linux/drivers/char/ipmi/ipmi_si_intf.c
<<
>>
Prefs
   1/*
   2 * ipmi_si.c
   3 *
   4 * The interface to the IPMI driver for the system interfaces (KCS, SMIC,
   5 * BT).
   6 *
   7 * Author: MontaVista Software, Inc.
   8 *         Corey Minyard <minyard@mvista.com>
   9 *         source@mvista.com
  10 *
  11 * Copyright 2002 MontaVista Software Inc.
  12 * Copyright 2006 IBM Corp., Christian Krafft <krafft@de.ibm.com>
  13 *
  14 *  This program is free software; you can redistribute it and/or modify it
  15 *  under the terms of the GNU General Public License as published by the
  16 *  Free Software Foundation; either version 2 of the License, or (at your
  17 *  option) any later version.
  18 *
  19 *
  20 *  THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
  21 *  WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
  22 *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  23 *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  24 *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
  25 *  BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
  26 *  OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
  27 *  ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
  28 *  TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
  29 *  USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  30 *
  31 *  You should have received a copy of the GNU General Public License along
  32 *  with this program; if not, write to the Free Software Foundation, Inc.,
  33 *  675 Mass Ave, Cambridge, MA 02139, USA.
  34 */
  35
  36/*
  37 * This file holds the "policy" for the interface to the SMI state
  38 * machine.  It does the configuration, handles timers and interrupts,
  39 * and drives the real SMI state machine.
  40 */
  41
  42#include <linux/module.h>
  43#include <linux/moduleparam.h>
  44#include <linux/sched.h>
  45#include <linux/seq_file.h>
  46#include <linux/timer.h>
  47#include <linux/errno.h>
  48#include <linux/spinlock.h>
  49#include <linux/slab.h>
  50#include <linux/delay.h>
  51#include <linux/list.h>
  52#include <linux/notifier.h>
  53#include <linux/mutex.h>
  54#include <linux/kthread.h>
  55#include <asm/irq.h>
  56#include <linux/interrupt.h>
  57#include <linux/rcupdate.h>
  58#include <linux/ipmi.h>
  59#include <linux/ipmi_smi.h>
  60#include "ipmi_si.h"
  61#include <linux/string.h>
  62#include <linux/ctype.h>
  63
  64#define PFX "ipmi_si: "
  65
  66/* Measure times between events in the driver. */
  67#undef DEBUG_TIMING
  68
  69/* Call every 10 ms. */
  70#define SI_TIMEOUT_TIME_USEC    10000
  71#define SI_USEC_PER_JIFFY       (1000000/HZ)
  72#define SI_TIMEOUT_JIFFIES      (SI_TIMEOUT_TIME_USEC/SI_USEC_PER_JIFFY)
  73#define SI_SHORT_TIMEOUT_USEC  250 /* .25ms when the SM request a
  74                                      short timeout */
  75
  76enum si_intf_state {
  77        SI_NORMAL,
  78        SI_GETTING_FLAGS,
  79        SI_GETTING_EVENTS,
  80        SI_CLEARING_FLAGS,
  81        SI_GETTING_MESSAGES,
  82        SI_CHECKING_ENABLES,
  83        SI_SETTING_ENABLES
  84        /* FIXME - add watchdog stuff. */
  85};
  86
  87/* Some BT-specific defines we need here. */
  88#define IPMI_BT_INTMASK_REG             2
  89#define IPMI_BT_INTMASK_CLEAR_IRQ_BIT   2
  90#define IPMI_BT_INTMASK_ENABLE_IRQ_BIT  1
  91
  92static const char * const si_to_str[] = { "invalid", "kcs", "smic", "bt" };
  93
  94static int initialized;
  95
  96/*
  97 * Indexes into stats[] in smi_info below.
  98 */
  99enum si_stat_indexes {
 100        /*
 101         * Number of times the driver requested a timer while an operation
 102         * was in progress.
 103         */
 104        SI_STAT_short_timeouts = 0,
 105
 106        /*
 107         * Number of times the driver requested a timer while nothing was in
 108         * progress.
 109         */
 110        SI_STAT_long_timeouts,
 111
 112        /* Number of times the interface was idle while being polled. */
 113        SI_STAT_idles,
 114
 115        /* Number of interrupts the driver handled. */
 116        SI_STAT_interrupts,
 117
 118        /* Number of time the driver got an ATTN from the hardware. */
 119        SI_STAT_attentions,
 120
 121        /* Number of times the driver requested flags from the hardware. */
 122        SI_STAT_flag_fetches,
 123
 124        /* Number of times the hardware didn't follow the state machine. */
 125        SI_STAT_hosed_count,
 126
 127        /* Number of completed messages. */
 128        SI_STAT_complete_transactions,
 129
 130        /* Number of IPMI events received from the hardware. */
 131        SI_STAT_events,
 132
 133        /* Number of watchdog pretimeouts. */
 134        SI_STAT_watchdog_pretimeouts,
 135
 136        /* Number of asynchronous messages received. */
 137        SI_STAT_incoming_messages,
 138
 139
 140        /* This *must* remain last, add new values above this. */
 141        SI_NUM_STATS
 142};
 143
 144struct smi_info {
 145        int                    intf_num;
 146        ipmi_smi_t             intf;
 147        struct si_sm_data      *si_sm;
 148        const struct si_sm_handlers *handlers;
 149        spinlock_t             si_lock;
 150        struct ipmi_smi_msg    *waiting_msg;
 151        struct ipmi_smi_msg    *curr_msg;
 152        enum si_intf_state     si_state;
 153
 154        /*
 155         * Used to handle the various types of I/O that can occur with
 156         * IPMI
 157         */
 158        struct si_sm_io io;
 159
 160        /*
 161         * Per-OEM handler, called from handle_flags().  Returns 1
 162         * when handle_flags() needs to be re-run or 0 indicating it
 163         * set si_state itself.
 164         */
 165        int (*oem_data_avail_handler)(struct smi_info *smi_info);
 166
 167        /*
 168         * Flags from the last GET_MSG_FLAGS command, used when an ATTN
 169         * is set to hold the flags until we are done handling everything
 170         * from the flags.
 171         */
 172#define RECEIVE_MSG_AVAIL       0x01
 173#define EVENT_MSG_BUFFER_FULL   0x02
 174#define WDT_PRE_TIMEOUT_INT     0x08
 175#define OEM0_DATA_AVAIL     0x20
 176#define OEM1_DATA_AVAIL     0x40
 177#define OEM2_DATA_AVAIL     0x80
 178#define OEM_DATA_AVAIL      (OEM0_DATA_AVAIL | \
 179                             OEM1_DATA_AVAIL | \
 180                             OEM2_DATA_AVAIL)
 181        unsigned char       msg_flags;
 182
 183        /* Does the BMC have an event buffer? */
 184        bool                has_event_buffer;
 185
 186        /*
 187         * If set to true, this will request events the next time the
 188         * state machine is idle.
 189         */
 190        atomic_t            req_events;
 191
 192        /*
 193         * If true, run the state machine to completion on every send
 194         * call.  Generally used after a panic to make sure stuff goes
 195         * out.
 196         */
 197        bool                run_to_completion;
 198
 199        /* The timer for this si. */
 200        struct timer_list   si_timer;
 201
 202        /* This flag is set, if the timer can be set */
 203        bool                timer_can_start;
 204
 205        /* This flag is set, if the timer is running (timer_pending() isn't enough) */
 206        bool                timer_running;
 207
 208        /* The time (in jiffies) the last timeout occurred at. */
 209        unsigned long       last_timeout_jiffies;
 210
 211        /* Are we waiting for the events, pretimeouts, received msgs? */
 212        atomic_t            need_watch;
 213
 214        /*
 215         * The driver will disable interrupts when it gets into a
 216         * situation where it cannot handle messages due to lack of
 217         * memory.  Once that situation clears up, it will re-enable
 218         * interrupts.
 219         */
 220        bool interrupt_disabled;
 221
 222        /*
 223         * Does the BMC support events?
 224         */
 225        bool supports_event_msg_buff;
 226
 227        /*
 228         * Can we disable interrupts the global enables receive irq
 229         * bit?  There are currently two forms of brokenness, some
 230         * systems cannot disable the bit (which is technically within
 231         * the spec but a bad idea) and some systems have the bit
 232         * forced to zero even though interrupts work (which is
 233         * clearly outside the spec).  The next bool tells which form
 234         * of brokenness is present.
 235         */
 236        bool cannot_disable_irq;
 237
 238        /*
 239         * Some systems are broken and cannot set the irq enable
 240         * bit, even if they support interrupts.
 241         */
 242        bool irq_enable_broken;
 243
 244        /*
 245         * Did we get an attention that we did not handle?
 246         */
 247        bool got_attn;
 248
 249        /* From the get device id response... */
 250        struct ipmi_device_id device_id;
 251
 252        /* Default driver model device. */
 253        struct platform_device *pdev;
 254
 255        /* Counters and things for the proc filesystem. */
 256        atomic_t stats[SI_NUM_STATS];
 257
 258        struct task_struct *thread;
 259
 260        struct list_head link;
 261};
 262
 263#define smi_inc_stat(smi, stat) \
 264        atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
 265#define smi_get_stat(smi, stat) \
 266        ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
 267
 268#define IPMI_MAX_INTFS 4
 269static int force_kipmid[IPMI_MAX_INTFS];
 270static int num_force_kipmid;
 271
 272static unsigned int kipmid_max_busy_us[IPMI_MAX_INTFS];
 273static int num_max_busy_us;
 274
 275static bool unload_when_empty = true;
 276
 277static int try_smi_init(struct smi_info *smi);
 278static void cleanup_one_si(struct smi_info *to_clean);
 279static void cleanup_ipmi_si(void);
 280
 281#ifdef DEBUG_TIMING
 282void debug_timestamp(char *msg)
 283{
 284        struct timespec64 t;
 285
 286        getnstimeofday64(&t);
 287        pr_debug("**%s: %lld.%9.9ld\n", msg, (long long) t.tv_sec, t.tv_nsec);
 288}
 289#else
 290#define debug_timestamp(x)
 291#endif
 292
 293static ATOMIC_NOTIFIER_HEAD(xaction_notifier_list);
 294static int register_xaction_notifier(struct notifier_block *nb)
 295{
 296        return atomic_notifier_chain_register(&xaction_notifier_list, nb);
 297}
 298
 299static void deliver_recv_msg(struct smi_info *smi_info,
 300                             struct ipmi_smi_msg *msg)
 301{
 302        /* Deliver the message to the upper layer. */
 303        if (smi_info->intf)
 304                ipmi_smi_msg_received(smi_info->intf, msg);
 305        else
 306                ipmi_free_smi_msg(msg);
 307}
 308
 309static void return_hosed_msg(struct smi_info *smi_info, int cCode)
 310{
 311        struct ipmi_smi_msg *msg = smi_info->curr_msg;
 312
 313        if (cCode < 0 || cCode > IPMI_ERR_UNSPECIFIED)
 314                cCode = IPMI_ERR_UNSPECIFIED;
 315        /* else use it as is */
 316
 317        /* Make it a response */
 318        msg->rsp[0] = msg->data[0] | 4;
 319        msg->rsp[1] = msg->data[1];
 320        msg->rsp[2] = cCode;
 321        msg->rsp_size = 3;
 322
 323        smi_info->curr_msg = NULL;
 324        deliver_recv_msg(smi_info, msg);
 325}
 326
 327static enum si_sm_result start_next_msg(struct smi_info *smi_info)
 328{
 329        int              rv;
 330
 331        if (!smi_info->waiting_msg) {
 332                smi_info->curr_msg = NULL;
 333                rv = SI_SM_IDLE;
 334        } else {
 335                int err;
 336
 337                smi_info->curr_msg = smi_info->waiting_msg;
 338                smi_info->waiting_msg = NULL;
 339                debug_timestamp("Start2");
 340                err = atomic_notifier_call_chain(&xaction_notifier_list,
 341                                0, smi_info);
 342                if (err & NOTIFY_STOP_MASK) {
 343                        rv = SI_SM_CALL_WITHOUT_DELAY;
 344                        goto out;
 345                }
 346                err = smi_info->handlers->start_transaction(
 347                        smi_info->si_sm,
 348                        smi_info->curr_msg->data,
 349                        smi_info->curr_msg->data_size);
 350                if (err)
 351                        return_hosed_msg(smi_info, err);
 352
 353                rv = SI_SM_CALL_WITHOUT_DELAY;
 354        }
 355out:
 356        return rv;
 357}
 358
 359static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val)
 360{
 361        if (!smi_info->timer_can_start)
 362                return;
 363        smi_info->last_timeout_jiffies = jiffies;
 364        mod_timer(&smi_info->si_timer, new_val);
 365        smi_info->timer_running = true;
 366}
 367
 368/*
 369 * Start a new message and (re)start the timer and thread.
 370 */
 371static void start_new_msg(struct smi_info *smi_info, unsigned char *msg,
 372                          unsigned int size)
 373{
 374        smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES);
 375
 376        if (smi_info->thread)
 377                wake_up_process(smi_info->thread);
 378
 379        smi_info->handlers->start_transaction(smi_info->si_sm, msg, size);
 380}
 381
 382static void start_check_enables(struct smi_info *smi_info)
 383{
 384        unsigned char msg[2];
 385
 386        msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
 387        msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
 388
 389        start_new_msg(smi_info, msg, 2);
 390        smi_info->si_state = SI_CHECKING_ENABLES;
 391}
 392
 393static void start_clear_flags(struct smi_info *smi_info)
 394{
 395        unsigned char msg[3];
 396
 397        /* Make sure the watchdog pre-timeout flag is not set at startup. */
 398        msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
 399        msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD;
 400        msg[2] = WDT_PRE_TIMEOUT_INT;
 401
 402        start_new_msg(smi_info, msg, 3);
 403        smi_info->si_state = SI_CLEARING_FLAGS;
 404}
 405
 406static void start_getting_msg_queue(struct smi_info *smi_info)
 407{
 408        smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
 409        smi_info->curr_msg->data[1] = IPMI_GET_MSG_CMD;
 410        smi_info->curr_msg->data_size = 2;
 411
 412        start_new_msg(smi_info, smi_info->curr_msg->data,
 413                      smi_info->curr_msg->data_size);
 414        smi_info->si_state = SI_GETTING_MESSAGES;
 415}
 416
 417static void start_getting_events(struct smi_info *smi_info)
 418{
 419        smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
 420        smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD;
 421        smi_info->curr_msg->data_size = 2;
 422
 423        start_new_msg(smi_info, smi_info->curr_msg->data,
 424                      smi_info->curr_msg->data_size);
 425        smi_info->si_state = SI_GETTING_EVENTS;
 426}
 427
 428/*
 429 * When we have a situtaion where we run out of memory and cannot
 430 * allocate messages, we just leave them in the BMC and run the system
 431 * polled until we can allocate some memory.  Once we have some
 432 * memory, we will re-enable the interrupt.
 433 *
 434 * Note that we cannot just use disable_irq(), since the interrupt may
 435 * be shared.
 436 */
 437static inline bool disable_si_irq(struct smi_info *smi_info)
 438{
 439        if ((smi_info->io.irq) && (!smi_info->interrupt_disabled)) {
 440                smi_info->interrupt_disabled = true;
 441                start_check_enables(smi_info);
 442                return true;
 443        }
 444        return false;
 445}
 446
 447static inline bool enable_si_irq(struct smi_info *smi_info)
 448{
 449        if ((smi_info->io.irq) && (smi_info->interrupt_disabled)) {
 450                smi_info->interrupt_disabled = false;
 451                start_check_enables(smi_info);
 452                return true;
 453        }
 454        return false;
 455}
 456
 457/*
 458 * Allocate a message.  If unable to allocate, start the interrupt
 459 * disable process and return NULL.  If able to allocate but
 460 * interrupts are disabled, free the message and return NULL after
 461 * starting the interrupt enable process.
 462 */
 463static struct ipmi_smi_msg *alloc_msg_handle_irq(struct smi_info *smi_info)
 464{
 465        struct ipmi_smi_msg *msg;
 466
 467        msg = ipmi_alloc_smi_msg();
 468        if (!msg) {
 469                if (!disable_si_irq(smi_info))
 470                        smi_info->si_state = SI_NORMAL;
 471        } else if (enable_si_irq(smi_info)) {
 472                ipmi_free_smi_msg(msg);
 473                msg = NULL;
 474        }
 475        return msg;
 476}
 477
 478static void handle_flags(struct smi_info *smi_info)
 479{
 480retry:
 481        if (smi_info->msg_flags & WDT_PRE_TIMEOUT_INT) {
 482                /* Watchdog pre-timeout */
 483                smi_inc_stat(smi_info, watchdog_pretimeouts);
 484
 485                start_clear_flags(smi_info);
 486                smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT;
 487                if (smi_info->intf)
 488                        ipmi_smi_watchdog_pretimeout(smi_info->intf);
 489        } else if (smi_info->msg_flags & RECEIVE_MSG_AVAIL) {
 490                /* Messages available. */
 491                smi_info->curr_msg = alloc_msg_handle_irq(smi_info);
 492                if (!smi_info->curr_msg)
 493                        return;
 494
 495                start_getting_msg_queue(smi_info);
 496        } else if (smi_info->msg_flags & EVENT_MSG_BUFFER_FULL) {
 497                /* Events available. */
 498                smi_info->curr_msg = alloc_msg_handle_irq(smi_info);
 499                if (!smi_info->curr_msg)
 500                        return;
 501
 502                start_getting_events(smi_info);
 503        } else if (smi_info->msg_flags & OEM_DATA_AVAIL &&
 504                   smi_info->oem_data_avail_handler) {
 505                if (smi_info->oem_data_avail_handler(smi_info))
 506                        goto retry;
 507        } else
 508                smi_info->si_state = SI_NORMAL;
 509}
 510
 511/*
 512 * Global enables we care about.
 513 */
 514#define GLOBAL_ENABLES_MASK (IPMI_BMC_EVT_MSG_BUFF | IPMI_BMC_RCV_MSG_INTR | \
 515                             IPMI_BMC_EVT_MSG_INTR)
 516
 517static u8 current_global_enables(struct smi_info *smi_info, u8 base,
 518                                 bool *irq_on)
 519{
 520        u8 enables = 0;
 521
 522        if (smi_info->supports_event_msg_buff)
 523                enables |= IPMI_BMC_EVT_MSG_BUFF;
 524
 525        if (((smi_info->io.irq && !smi_info->interrupt_disabled) ||
 526             smi_info->cannot_disable_irq) &&
 527            !smi_info->irq_enable_broken)
 528                enables |= IPMI_BMC_RCV_MSG_INTR;
 529
 530        if (smi_info->supports_event_msg_buff &&
 531            smi_info->io.irq && !smi_info->interrupt_disabled &&
 532            !smi_info->irq_enable_broken)
 533                enables |= IPMI_BMC_EVT_MSG_INTR;
 534
 535        *irq_on = enables & (IPMI_BMC_EVT_MSG_INTR | IPMI_BMC_RCV_MSG_INTR);
 536
 537        return enables;
 538}
 539
 540static void check_bt_irq(struct smi_info *smi_info, bool irq_on)
 541{
 542        u8 irqstate = smi_info->io.inputb(&smi_info->io, IPMI_BT_INTMASK_REG);
 543
 544        irqstate &= IPMI_BT_INTMASK_ENABLE_IRQ_BIT;
 545
 546        if ((bool)irqstate == irq_on)
 547                return;
 548
 549        if (irq_on)
 550                smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG,
 551                                     IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
 552        else
 553                smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG, 0);
 554}
 555
 556static void handle_transaction_done(struct smi_info *smi_info)
 557{
 558        struct ipmi_smi_msg *msg;
 559
 560        debug_timestamp("Done");
 561        switch (smi_info->si_state) {
 562        case SI_NORMAL:
 563                if (!smi_info->curr_msg)
 564                        break;
 565
 566                smi_info->curr_msg->rsp_size
 567                        = smi_info->handlers->get_result(
 568                                smi_info->si_sm,
 569                                smi_info->curr_msg->rsp,
 570                                IPMI_MAX_MSG_LENGTH);
 571
 572                /*
 573                 * Do this here becase deliver_recv_msg() releases the
 574                 * lock, and a new message can be put in during the
 575                 * time the lock is released.
 576                 */
 577                msg = smi_info->curr_msg;
 578                smi_info->curr_msg = NULL;
 579                deliver_recv_msg(smi_info, msg);
 580                break;
 581
 582        case SI_GETTING_FLAGS:
 583        {
 584                unsigned char msg[4];
 585                unsigned int  len;
 586
 587                /* We got the flags from the SMI, now handle them. */
 588                len = smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
 589                if (msg[2] != 0) {
 590                        /* Error fetching flags, just give up for now. */
 591                        smi_info->si_state = SI_NORMAL;
 592                } else if (len < 4) {
 593                        /*
 594                         * Hmm, no flags.  That's technically illegal, but
 595                         * don't use uninitialized data.
 596                         */
 597                        smi_info->si_state = SI_NORMAL;
 598                } else {
 599                        smi_info->msg_flags = msg[3];
 600                        handle_flags(smi_info);
 601                }
 602                break;
 603        }
 604
 605        case SI_CLEARING_FLAGS:
 606        {
 607                unsigned char msg[3];
 608
 609                /* We cleared the flags. */
 610                smi_info->handlers->get_result(smi_info->si_sm, msg, 3);
 611                if (msg[2] != 0) {
 612                        /* Error clearing flags */
 613                        dev_warn(smi_info->io.dev,
 614                                 "Error clearing flags: %2.2x\n", msg[2]);
 615                }
 616                smi_info->si_state = SI_NORMAL;
 617                break;
 618        }
 619
 620        case SI_GETTING_EVENTS:
 621        {
 622                smi_info->curr_msg->rsp_size
 623                        = smi_info->handlers->get_result(
 624                                smi_info->si_sm,
 625                                smi_info->curr_msg->rsp,
 626                                IPMI_MAX_MSG_LENGTH);
 627
 628                /*
 629                 * Do this here becase deliver_recv_msg() releases the
 630                 * lock, and a new message can be put in during the
 631                 * time the lock is released.
 632                 */
 633                msg = smi_info->curr_msg;
 634                smi_info->curr_msg = NULL;
 635                if (msg->rsp[2] != 0) {
 636                        /* Error getting event, probably done. */
 637                        msg->done(msg);
 638
 639                        /* Take off the event flag. */
 640                        smi_info->msg_flags &= ~EVENT_MSG_BUFFER_FULL;
 641                        handle_flags(smi_info);
 642                } else {
 643                        smi_inc_stat(smi_info, events);
 644
 645                        /*
 646                         * Do this before we deliver the message
 647                         * because delivering the message releases the
 648                         * lock and something else can mess with the
 649                         * state.
 650                         */
 651                        handle_flags(smi_info);
 652
 653                        deliver_recv_msg(smi_info, msg);
 654                }
 655                break;
 656        }
 657
 658        case SI_GETTING_MESSAGES:
 659        {
 660                smi_info->curr_msg->rsp_size
 661                        = smi_info->handlers->get_result(
 662                                smi_info->si_sm,
 663                                smi_info->curr_msg->rsp,
 664                                IPMI_MAX_MSG_LENGTH);
 665
 666                /*
 667                 * Do this here becase deliver_recv_msg() releases the
 668                 * lock, and a new message can be put in during the
 669                 * time the lock is released.
 670                 */
 671                msg = smi_info->curr_msg;
 672                smi_info->curr_msg = NULL;
 673                if (msg->rsp[2] != 0) {
 674                        /* Error getting event, probably done. */
 675                        msg->done(msg);
 676
 677                        /* Take off the msg flag. */
 678                        smi_info->msg_flags &= ~RECEIVE_MSG_AVAIL;
 679                        handle_flags(smi_info);
 680                } else {
 681                        smi_inc_stat(smi_info, incoming_messages);
 682
 683                        /*
 684                         * Do this before we deliver the message
 685                         * because delivering the message releases the
 686                         * lock and something else can mess with the
 687                         * state.
 688                         */
 689                        handle_flags(smi_info);
 690
 691                        deliver_recv_msg(smi_info, msg);
 692                }
 693                break;
 694        }
 695
 696        case SI_CHECKING_ENABLES:
 697        {
 698                unsigned char msg[4];
 699                u8 enables;
 700                bool irq_on;
 701
 702                /* We got the flags from the SMI, now handle them. */
 703                smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
 704                if (msg[2] != 0) {
 705                        dev_warn(smi_info->io.dev,
 706                                 "Couldn't get irq info: %x.\n", msg[2]);
 707                        dev_warn(smi_info->io.dev,
 708                                 "Maybe ok, but ipmi might run very slowly.\n");
 709                        smi_info->si_state = SI_NORMAL;
 710                        break;
 711                }
 712                enables = current_global_enables(smi_info, 0, &irq_on);
 713                if (smi_info->io.si_type == SI_BT)
 714                        /* BT has its own interrupt enable bit. */
 715                        check_bt_irq(smi_info, irq_on);
 716                if (enables != (msg[3] & GLOBAL_ENABLES_MASK)) {
 717                        /* Enables are not correct, fix them. */
 718                        msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
 719                        msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
 720                        msg[2] = enables | (msg[3] & ~GLOBAL_ENABLES_MASK);
 721                        smi_info->handlers->start_transaction(
 722                                smi_info->si_sm, msg, 3);
 723                        smi_info->si_state = SI_SETTING_ENABLES;
 724                } else if (smi_info->supports_event_msg_buff) {
 725                        smi_info->curr_msg = ipmi_alloc_smi_msg();
 726                        if (!smi_info->curr_msg) {
 727                                smi_info->si_state = SI_NORMAL;
 728                                break;
 729                        }
 730                        start_getting_events(smi_info);
 731                } else {
 732                        smi_info->si_state = SI_NORMAL;
 733                }
 734                break;
 735        }
 736
 737        case SI_SETTING_ENABLES:
 738        {
 739                unsigned char msg[4];
 740
 741                smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
 742                if (msg[2] != 0)
 743                        dev_warn(smi_info->io.dev,
 744                                 "Could not set the global enables: 0x%x.\n",
 745                                 msg[2]);
 746
 747                if (smi_info->supports_event_msg_buff) {
 748                        smi_info->curr_msg = ipmi_alloc_smi_msg();
 749                        if (!smi_info->curr_msg) {
 750                                smi_info->si_state = SI_NORMAL;
 751                                break;
 752                        }
 753                        start_getting_events(smi_info);
 754                } else {
 755                        smi_info->si_state = SI_NORMAL;
 756                }
 757                break;
 758        }
 759        }
 760}
 761
 762/*
 763 * Called on timeouts and events.  Timeouts should pass the elapsed
 764 * time, interrupts should pass in zero.  Must be called with
 765 * si_lock held and interrupts disabled.
 766 */
 767static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
 768                                           int time)
 769{
 770        enum si_sm_result si_sm_result;
 771
 772restart:
 773        /*
 774         * There used to be a loop here that waited a little while
 775         * (around 25us) before giving up.  That turned out to be
 776         * pointless, the minimum delays I was seeing were in the 300us
 777         * range, which is far too long to wait in an interrupt.  So
 778         * we just run until the state machine tells us something
 779         * happened or it needs a delay.
 780         */
 781        si_sm_result = smi_info->handlers->event(smi_info->si_sm, time);
 782        time = 0;
 783        while (si_sm_result == SI_SM_CALL_WITHOUT_DELAY)
 784                si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
 785
 786        if (si_sm_result == SI_SM_TRANSACTION_COMPLETE) {
 787                smi_inc_stat(smi_info, complete_transactions);
 788
 789                handle_transaction_done(smi_info);
 790                goto restart;
 791        } else if (si_sm_result == SI_SM_HOSED) {
 792                smi_inc_stat(smi_info, hosed_count);
 793
 794                /*
 795                 * Do the before return_hosed_msg, because that
 796                 * releases the lock.
 797                 */
 798                smi_info->si_state = SI_NORMAL;
 799                if (smi_info->curr_msg != NULL) {
 800                        /*
 801                         * If we were handling a user message, format
 802                         * a response to send to the upper layer to
 803                         * tell it about the error.
 804                         */
 805                        return_hosed_msg(smi_info, IPMI_ERR_UNSPECIFIED);
 806                }
 807                goto restart;
 808        }
 809
 810        /*
 811         * We prefer handling attn over new messages.  But don't do
 812         * this if there is not yet an upper layer to handle anything.
 813         */
 814        if (likely(smi_info->intf) &&
 815            (si_sm_result == SI_SM_ATTN || smi_info->got_attn)) {
 816                unsigned char msg[2];
 817
 818                if (smi_info->si_state != SI_NORMAL) {
 819                        /*
 820                         * We got an ATTN, but we are doing something else.
 821                         * Handle the ATTN later.
 822                         */
 823                        smi_info->got_attn = true;
 824                } else {
 825                        smi_info->got_attn = false;
 826                        smi_inc_stat(smi_info, attentions);
 827
 828                        /*
 829                         * Got a attn, send down a get message flags to see
 830                         * what's causing it.  It would be better to handle
 831                         * this in the upper layer, but due to the way
 832                         * interrupts work with the SMI, that's not really
 833                         * possible.
 834                         */
 835                        msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
 836                        msg[1] = IPMI_GET_MSG_FLAGS_CMD;
 837
 838                        start_new_msg(smi_info, msg, 2);
 839                        smi_info->si_state = SI_GETTING_FLAGS;
 840                        goto restart;
 841                }
 842        }
 843
 844        /* If we are currently idle, try to start the next message. */
 845        if (si_sm_result == SI_SM_IDLE) {
 846                smi_inc_stat(smi_info, idles);
 847
 848                si_sm_result = start_next_msg(smi_info);
 849                if (si_sm_result != SI_SM_IDLE)
 850                        goto restart;
 851        }
 852
 853        if ((si_sm_result == SI_SM_IDLE)
 854            && (atomic_read(&smi_info->req_events))) {
 855                /*
 856                 * We are idle and the upper layer requested that I fetch
 857                 * events, so do so.
 858                 */
 859                atomic_set(&smi_info->req_events, 0);
 860
 861                /*
 862                 * Take this opportunity to check the interrupt and
 863                 * message enable state for the BMC.  The BMC can be
 864                 * asynchronously reset, and may thus get interrupts
 865                 * disable and messages disabled.
 866                 */
 867                if (smi_info->supports_event_msg_buff || smi_info->io.irq) {
 868                        start_check_enables(smi_info);
 869                } else {
 870                        smi_info->curr_msg = alloc_msg_handle_irq(smi_info);
 871                        if (!smi_info->curr_msg)
 872                                goto out;
 873
 874                        start_getting_events(smi_info);
 875                }
 876                goto restart;
 877        }
 878
 879        if (si_sm_result == SI_SM_IDLE && smi_info->timer_running) {
 880                /* Ok it if fails, the timer will just go off. */
 881                if (del_timer(&smi_info->si_timer))
 882                        smi_info->timer_running = false;
 883        }
 884
 885out:
 886        return si_sm_result;
 887}
 888
 889static void check_start_timer_thread(struct smi_info *smi_info)
 890{
 891        if (smi_info->si_state == SI_NORMAL && smi_info->curr_msg == NULL) {
 892                smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES);
 893
 894                if (smi_info->thread)
 895                        wake_up_process(smi_info->thread);
 896
 897                start_next_msg(smi_info);
 898                smi_event_handler(smi_info, 0);
 899        }
 900}
 901
 902static void flush_messages(void *send_info)
 903{
 904        struct smi_info *smi_info = send_info;
 905        enum si_sm_result result;
 906
 907        /*
 908         * Currently, this function is called only in run-to-completion
 909         * mode.  This means we are single-threaded, no need for locks.
 910         */
 911        result = smi_event_handler(smi_info, 0);
 912        while (result != SI_SM_IDLE) {
 913                udelay(SI_SHORT_TIMEOUT_USEC);
 914                result = smi_event_handler(smi_info, SI_SHORT_TIMEOUT_USEC);
 915        }
 916}
 917
 918static void sender(void                *send_info,
 919                   struct ipmi_smi_msg *msg)
 920{
 921        struct smi_info   *smi_info = send_info;
 922        unsigned long     flags;
 923
 924        debug_timestamp("Enqueue");
 925
 926        if (smi_info->run_to_completion) {
 927                /*
 928                 * If we are running to completion, start it.  Upper
 929                 * layer will call flush_messages to clear it out.
 930                 */
 931                smi_info->waiting_msg = msg;
 932                return;
 933        }
 934
 935        spin_lock_irqsave(&smi_info->si_lock, flags);
 936        /*
 937         * The following two lines don't need to be under the lock for
 938         * the lock's sake, but they do need SMP memory barriers to
 939         * avoid getting things out of order.  We are already claiming
 940         * the lock, anyway, so just do it under the lock to avoid the
 941         * ordering problem.
 942         */
 943        BUG_ON(smi_info->waiting_msg);
 944        smi_info->waiting_msg = msg;
 945        check_start_timer_thread(smi_info);
 946        spin_unlock_irqrestore(&smi_info->si_lock, flags);
 947}
 948
 949static void set_run_to_completion(void *send_info, bool i_run_to_completion)
 950{
 951        struct smi_info   *smi_info = send_info;
 952
 953        smi_info->run_to_completion = i_run_to_completion;
 954        if (i_run_to_completion)
 955                flush_messages(smi_info);
 956}
 957
 958/*
 959 * Use -1 in the nsec value of the busy waiting timespec to tell that
 960 * we are spinning in kipmid looking for something and not delaying
 961 * between checks
 962 */
 963static inline void ipmi_si_set_not_busy(struct timespec64 *ts)
 964{
 965        ts->tv_nsec = -1;
 966}
 967static inline int ipmi_si_is_busy(struct timespec64 *ts)
 968{
 969        return ts->tv_nsec != -1;
 970}
 971
 972static inline int ipmi_thread_busy_wait(enum si_sm_result smi_result,
 973                                        const struct smi_info *smi_info,
 974                                        struct timespec64 *busy_until)
 975{
 976        unsigned int max_busy_us = 0;
 977
 978        if (smi_info->intf_num < num_max_busy_us)
 979                max_busy_us = kipmid_max_busy_us[smi_info->intf_num];
 980        if (max_busy_us == 0 || smi_result != SI_SM_CALL_WITH_DELAY)
 981                ipmi_si_set_not_busy(busy_until);
 982        else if (!ipmi_si_is_busy(busy_until)) {
 983                getnstimeofday64(busy_until);
 984                timespec64_add_ns(busy_until, max_busy_us*NSEC_PER_USEC);
 985        } else {
 986                struct timespec64 now;
 987
 988                getnstimeofday64(&now);
 989                if (unlikely(timespec64_compare(&now, busy_until) > 0)) {
 990                        ipmi_si_set_not_busy(busy_until);
 991                        return 0;
 992                }
 993        }
 994        return 1;
 995}
 996
 997
 998/*
 999 * A busy-waiting loop for speeding up IPMI operation.
1000 *
1001 * Lousy hardware makes this hard.  This is only enabled for systems
1002 * that are not BT and do not have interrupts.  It starts spinning
1003 * when an operation is complete or until max_busy tells it to stop
1004 * (if that is enabled).  See the paragraph on kimid_max_busy_us in
1005 * Documentation/IPMI.txt for details.
1006 */
1007static int ipmi_thread(void *data)
1008{
1009        struct smi_info *smi_info = data;
1010        unsigned long flags;
1011        enum si_sm_result smi_result;
1012        struct timespec64 busy_until;
1013
1014        ipmi_si_set_not_busy(&busy_until);
1015        set_user_nice(current, MAX_NICE);
1016        while (!kthread_should_stop()) {
1017                int busy_wait;
1018
1019                spin_lock_irqsave(&(smi_info->si_lock), flags);
1020                smi_result = smi_event_handler(smi_info, 0);
1021
1022                /*
1023                 * If the driver is doing something, there is a possible
1024                 * race with the timer.  If the timer handler see idle,
1025                 * and the thread here sees something else, the timer
1026                 * handler won't restart the timer even though it is
1027                 * required.  So start it here if necessary.
1028                 */
1029                if (smi_result != SI_SM_IDLE && !smi_info->timer_running)
1030                        smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES);
1031
1032                spin_unlock_irqrestore(&(smi_info->si_lock), flags);
1033                busy_wait = ipmi_thread_busy_wait(smi_result, smi_info,
1034                                                  &busy_until);
1035                if (smi_result == SI_SM_CALL_WITHOUT_DELAY)
1036                        ; /* do nothing */
1037                else if (smi_result == SI_SM_CALL_WITH_DELAY && busy_wait)
1038                        schedule();
1039                else if (smi_result == SI_SM_IDLE) {
1040                        if (atomic_read(&smi_info->need_watch)) {
1041                                schedule_timeout_interruptible(100);
1042                        } else {
1043                                /* Wait to be woken up when we are needed. */
1044                                __set_current_state(TASK_INTERRUPTIBLE);
1045                                schedule();
1046                        }
1047                } else
1048                        schedule_timeout_interruptible(1);
1049        }
1050        return 0;
1051}
1052
1053
1054static void poll(void *send_info)
1055{
1056        struct smi_info *smi_info = send_info;
1057        unsigned long flags = 0;
1058        bool run_to_completion = smi_info->run_to_completion;
1059
1060        /*
1061         * Make sure there is some delay in the poll loop so we can
1062         * drive time forward and timeout things.
1063         */
1064        udelay(10);
1065        if (!run_to_completion)
1066                spin_lock_irqsave(&smi_info->si_lock, flags);
1067        smi_event_handler(smi_info, 10);
1068        if (!run_to_completion)
1069                spin_unlock_irqrestore(&smi_info->si_lock, flags);
1070}
1071
1072static void request_events(void *send_info)
1073{
1074        struct smi_info *smi_info = send_info;
1075
1076        if (!smi_info->has_event_buffer)
1077                return;
1078
1079        atomic_set(&smi_info->req_events, 1);
1080}
1081
1082static void set_need_watch(void *send_info, bool enable)
1083{
1084        struct smi_info *smi_info = send_info;
1085        unsigned long flags;
1086
1087        atomic_set(&smi_info->need_watch, enable);
1088        spin_lock_irqsave(&smi_info->si_lock, flags);
1089        check_start_timer_thread(smi_info);
1090        spin_unlock_irqrestore(&smi_info->si_lock, flags);
1091}
1092
1093static void smi_timeout(struct timer_list *t)
1094{
1095        struct smi_info   *smi_info = from_timer(smi_info, t, si_timer);
1096        enum si_sm_result smi_result;
1097        unsigned long     flags;
1098        unsigned long     jiffies_now;
1099        long              time_diff;
1100        long              timeout;
1101
1102        spin_lock_irqsave(&(smi_info->si_lock), flags);
1103        debug_timestamp("Timer");
1104
1105        jiffies_now = jiffies;
1106        time_diff = (((long)jiffies_now - (long)smi_info->last_timeout_jiffies)
1107                     * SI_USEC_PER_JIFFY);
1108        smi_result = smi_event_handler(smi_info, time_diff);
1109
1110        if ((smi_info->io.irq) && (!smi_info->interrupt_disabled)) {
1111                /* Running with interrupts, only do long timeouts. */
1112                timeout = jiffies + SI_TIMEOUT_JIFFIES;
1113                smi_inc_stat(smi_info, long_timeouts);
1114                goto do_mod_timer;
1115        }
1116
1117        /*
1118         * If the state machine asks for a short delay, then shorten
1119         * the timer timeout.
1120         */
1121        if (smi_result == SI_SM_CALL_WITH_DELAY) {
1122                smi_inc_stat(smi_info, short_timeouts);
1123                timeout = jiffies + 1;
1124        } else {
1125                smi_inc_stat(smi_info, long_timeouts);
1126                timeout = jiffies + SI_TIMEOUT_JIFFIES;
1127        }
1128
1129do_mod_timer:
1130        if (smi_result != SI_SM_IDLE)
1131                smi_mod_timer(smi_info, timeout);
1132        else
1133                smi_info->timer_running = false;
1134        spin_unlock_irqrestore(&(smi_info->si_lock), flags);
1135}
1136
1137irqreturn_t ipmi_si_irq_handler(int irq, void *data)
1138{
1139        struct smi_info *smi_info = data;
1140        unsigned long   flags;
1141
1142        if (smi_info->io.si_type == SI_BT)
1143                /* We need to clear the IRQ flag for the BT interface. */
1144                smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG,
1145                                     IPMI_BT_INTMASK_CLEAR_IRQ_BIT
1146                                     | IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
1147
1148        spin_lock_irqsave(&(smi_info->si_lock), flags);
1149
1150        smi_inc_stat(smi_info, interrupts);
1151
1152        debug_timestamp("Interrupt");
1153
1154        smi_event_handler(smi_info, 0);
1155        spin_unlock_irqrestore(&(smi_info->si_lock), flags);
1156        return IRQ_HANDLED;
1157}
1158
1159static int smi_start_processing(void       *send_info,
1160                                ipmi_smi_t intf)
1161{
1162        struct smi_info *new_smi = send_info;
1163        int             enable = 0;
1164
1165        new_smi->intf = intf;
1166
1167        /* Set up the timer that drives the interface. */
1168        timer_setup(&new_smi->si_timer, smi_timeout, 0);
1169        new_smi->timer_can_start = true;
1170        smi_mod_timer(new_smi, jiffies + SI_TIMEOUT_JIFFIES);
1171
1172        /* Try to claim any interrupts. */
1173        if (new_smi->io.irq_setup) {
1174                new_smi->io.irq_handler_data = new_smi;
1175                new_smi->io.irq_setup(&new_smi->io);
1176        }
1177
1178        /*
1179         * Check if the user forcefully enabled the daemon.
1180         */
1181        if (new_smi->intf_num < num_force_kipmid)
1182                enable = force_kipmid[new_smi->intf_num];
1183        /*
1184         * The BT interface is efficient enough to not need a thread,
1185         * and there is no need for a thread if we have interrupts.
1186         */
1187        else if ((new_smi->io.si_type != SI_BT) && (!new_smi->io.irq))
1188                enable = 1;
1189
1190        if (enable) {
1191                new_smi->thread = kthread_run(ipmi_thread, new_smi,
1192                                              "kipmi%d", new_smi->intf_num);
1193                if (IS_ERR(new_smi->thread)) {
1194                        dev_notice(new_smi->io.dev, "Could not start"
1195                                   " kernel thread due to error %ld, only using"
1196                                   " timers to drive the interface\n",
1197                                   PTR_ERR(new_smi->thread));
1198                        new_smi->thread = NULL;
1199                }
1200        }
1201
1202        return 0;
1203}
1204
1205static int get_smi_info(void *send_info, struct ipmi_smi_info *data)
1206{
1207        struct smi_info *smi = send_info;
1208
1209        data->addr_src = smi->io.addr_source;
1210        data->dev = smi->io.dev;
1211        data->addr_info = smi->io.addr_info;
1212        get_device(smi->io.dev);
1213
1214        return 0;
1215}
1216
1217static void set_maintenance_mode(void *send_info, bool enable)
1218{
1219        struct smi_info   *smi_info = send_info;
1220
1221        if (!enable)
1222                atomic_set(&smi_info->req_events, 0);
1223}
1224
1225static const struct ipmi_smi_handlers handlers = {
1226        .owner                  = THIS_MODULE,
1227        .start_processing       = smi_start_processing,
1228        .get_smi_info           = get_smi_info,
1229        .sender                 = sender,
1230        .request_events         = request_events,
1231        .set_need_watch         = set_need_watch,
1232        .set_maintenance_mode   = set_maintenance_mode,
1233        .set_run_to_completion  = set_run_to_completion,
1234        .flush_messages         = flush_messages,
1235        .poll                   = poll,
1236};
1237
1238static LIST_HEAD(smi_infos);
1239static DEFINE_MUTEX(smi_infos_lock);
1240static int smi_num; /* Used to sequence the SMIs */
1241
1242static const char * const addr_space_to_str[] = { "i/o", "mem" };
1243
1244module_param_array(force_kipmid, int, &num_force_kipmid, 0);
1245MODULE_PARM_DESC(force_kipmid, "Force the kipmi daemon to be enabled (1) or"
1246                 " disabled(0).  Normally the IPMI driver auto-detects"
1247                 " this, but the value may be overridden by this parm.");
1248module_param(unload_when_empty, bool, 0);
1249MODULE_PARM_DESC(unload_when_empty, "Unload the module if no interfaces are"
1250                 " specified or found, default is 1.  Setting to 0"
1251                 " is useful for hot add of devices using hotmod.");
1252module_param_array(kipmid_max_busy_us, uint, &num_max_busy_us, 0644);
1253MODULE_PARM_DESC(kipmid_max_busy_us,
1254                 "Max time (in microseconds) to busy-wait for IPMI data before"
1255                 " sleeping. 0 (default) means to wait forever. Set to 100-500"
1256                 " if kipmid is using up a lot of CPU time.");
1257
1258void ipmi_irq_finish_setup(struct si_sm_io *io)
1259{
1260        if (io->si_type == SI_BT)
1261                /* Enable the interrupt in the BT interface. */
1262                io->outputb(io, IPMI_BT_INTMASK_REG,
1263                            IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
1264}
1265
1266void ipmi_irq_start_cleanup(struct si_sm_io *io)
1267{
1268        if (io->si_type == SI_BT)
1269                /* Disable the interrupt in the BT interface. */
1270                io->outputb(io, IPMI_BT_INTMASK_REG, 0);
1271}
1272
1273static void std_irq_cleanup(struct si_sm_io *io)
1274{
1275        ipmi_irq_start_cleanup(io);
1276        free_irq(io->irq, io->irq_handler_data);
1277}
1278
1279int ipmi_std_irq_setup(struct si_sm_io *io)
1280{
1281        int rv;
1282
1283        if (!io->irq)
1284                return 0;
1285
1286        rv = request_irq(io->irq,
1287                         ipmi_si_irq_handler,
1288                         IRQF_SHARED,
1289                         DEVICE_NAME,
1290                         io->irq_handler_data);
1291        if (rv) {
1292                dev_warn(io->dev, "%s unable to claim interrupt %d,"
1293                         " running polled\n",
1294                         DEVICE_NAME, io->irq);
1295                io->irq = 0;
1296        } else {
1297                io->irq_cleanup = std_irq_cleanup;
1298                ipmi_irq_finish_setup(io);
1299                dev_info(io->dev, "Using irq %d\n", io->irq);
1300        }
1301
1302        return rv;
1303}
1304
1305static int wait_for_msg_done(struct smi_info *smi_info)
1306{
1307        enum si_sm_result     smi_result;
1308
1309        smi_result = smi_info->handlers->event(smi_info->si_sm, 0);
1310        for (;;) {
1311                if (smi_result == SI_SM_CALL_WITH_DELAY ||
1312                    smi_result == SI_SM_CALL_WITH_TICK_DELAY) {
1313                        schedule_timeout_uninterruptible(1);
1314                        smi_result = smi_info->handlers->event(
1315                                smi_info->si_sm, jiffies_to_usecs(1));
1316                } else if (smi_result == SI_SM_CALL_WITHOUT_DELAY) {
1317                        smi_result = smi_info->handlers->event(
1318                                smi_info->si_sm, 0);
1319                } else
1320                        break;
1321        }
1322        if (smi_result == SI_SM_HOSED)
1323                /*
1324                 * We couldn't get the state machine to run, so whatever's at
1325                 * the port is probably not an IPMI SMI interface.
1326                 */
1327                return -ENODEV;
1328
1329        return 0;
1330}
1331
1332static int try_get_dev_id(struct smi_info *smi_info)
1333{
1334        unsigned char         msg[2];
1335        unsigned char         *resp;
1336        unsigned long         resp_len;
1337        int                   rv = 0;
1338
1339        resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
1340        if (!resp)
1341                return -ENOMEM;
1342
1343        /*
1344         * Do a Get Device ID command, since it comes back with some
1345         * useful info.
1346         */
1347        msg[0] = IPMI_NETFN_APP_REQUEST << 2;
1348        msg[1] = IPMI_GET_DEVICE_ID_CMD;
1349        smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
1350
1351        rv = wait_for_msg_done(smi_info);
1352        if (rv)
1353                goto out;
1354
1355        resp_len = smi_info->handlers->get_result(smi_info->si_sm,
1356                                                  resp, IPMI_MAX_MSG_LENGTH);
1357
1358        /* Check and record info from the get device id, in case we need it. */
1359        rv = ipmi_demangle_device_id(resp[0] >> 2, resp[1],
1360                        resp + 2, resp_len - 2, &smi_info->device_id);
1361
1362out:
1363        kfree(resp);
1364        return rv;
1365}
1366
1367static int get_global_enables(struct smi_info *smi_info, u8 *enables)
1368{
1369        unsigned char         msg[3];
1370        unsigned char         *resp;
1371        unsigned long         resp_len;
1372        int                   rv;
1373
1374        resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
1375        if (!resp)
1376                return -ENOMEM;
1377
1378        msg[0] = IPMI_NETFN_APP_REQUEST << 2;
1379        msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
1380        smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
1381
1382        rv = wait_for_msg_done(smi_info);
1383        if (rv) {
1384                dev_warn(smi_info->io.dev,
1385                         "Error getting response from get global enables command: %d\n",
1386                         rv);
1387                goto out;
1388        }
1389
1390        resp_len = smi_info->handlers->get_result(smi_info->si_sm,
1391                                                  resp, IPMI_MAX_MSG_LENGTH);
1392
1393        if (resp_len < 4 ||
1394                        resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 ||
1395                        resp[1] != IPMI_GET_BMC_GLOBAL_ENABLES_CMD   ||
1396                        resp[2] != 0) {
1397                dev_warn(smi_info->io.dev,
1398                         "Invalid return from get global enables command: %ld %x %x %x\n",
1399                         resp_len, resp[0], resp[1], resp[2]);
1400                rv = -EINVAL;
1401                goto out;
1402        } else {
1403                *enables = resp[3];
1404        }
1405
1406out:
1407        kfree(resp);
1408        return rv;
1409}
1410
1411/*
1412 * Returns 1 if it gets an error from the command.
1413 */
1414static int set_global_enables(struct smi_info *smi_info, u8 enables)
1415{
1416        unsigned char         msg[3];
1417        unsigned char         *resp;
1418        unsigned long         resp_len;
1419        int                   rv;
1420
1421        resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
1422        if (!resp)
1423                return -ENOMEM;
1424
1425        msg[0] = IPMI_NETFN_APP_REQUEST << 2;
1426        msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
1427        msg[2] = enables;
1428        smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3);
1429
1430        rv = wait_for_msg_done(smi_info);
1431        if (rv) {
1432                dev_warn(smi_info->io.dev,
1433                         "Error getting response from set global enables command: %d\n",
1434                         rv);
1435                goto out;
1436        }
1437
1438        resp_len = smi_info->handlers->get_result(smi_info->si_sm,
1439                                                  resp, IPMI_MAX_MSG_LENGTH);
1440
1441        if (resp_len < 3 ||
1442                        resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 ||
1443                        resp[1] != IPMI_SET_BMC_GLOBAL_ENABLES_CMD) {
1444                dev_warn(smi_info->io.dev,
1445                         "Invalid return from set global enables command: %ld %x %x\n",
1446                         resp_len, resp[0], resp[1]);
1447                rv = -EINVAL;
1448                goto out;
1449        }
1450
1451        if (resp[2] != 0)
1452                rv = 1;
1453
1454out:
1455        kfree(resp);
1456        return rv;
1457}
1458
1459/*
1460 * Some BMCs do not support clearing the receive irq bit in the global
1461 * enables (even if they don't support interrupts on the BMC).  Check
1462 * for this and handle it properly.
1463 */
1464static void check_clr_rcv_irq(struct smi_info *smi_info)
1465{
1466        u8 enables = 0;
1467        int rv;
1468
1469        rv = get_global_enables(smi_info, &enables);
1470        if (!rv) {
1471                if ((enables & IPMI_BMC_RCV_MSG_INTR) == 0)
1472                        /* Already clear, should work ok. */
1473                        return;
1474
1475                enables &= ~IPMI_BMC_RCV_MSG_INTR;
1476                rv = set_global_enables(smi_info, enables);
1477        }
1478
1479        if (rv < 0) {
1480                dev_err(smi_info->io.dev,
1481                        "Cannot check clearing the rcv irq: %d\n", rv);
1482                return;
1483        }
1484
1485        if (rv) {
1486                /*
1487                 * An error when setting the event buffer bit means
1488                 * clearing the bit is not supported.
1489                 */
1490                dev_warn(smi_info->io.dev,
1491                         "The BMC does not support clearing the recv irq bit, compensating, but the BMC needs to be fixed.\n");
1492                smi_info->cannot_disable_irq = true;
1493        }
1494}
1495
1496/*
1497 * Some BMCs do not support setting the interrupt bits in the global
1498 * enables even if they support interrupts.  Clearly bad, but we can
1499 * compensate.
1500 */
1501static void check_set_rcv_irq(struct smi_info *smi_info)
1502{
1503        u8 enables = 0;
1504        int rv;
1505
1506        if (!smi_info->io.irq)
1507                return;
1508
1509        rv = get_global_enables(smi_info, &enables);
1510        if (!rv) {
1511                enables |= IPMI_BMC_RCV_MSG_INTR;
1512                rv = set_global_enables(smi_info, enables);
1513        }
1514
1515        if (rv < 0) {
1516                dev_err(smi_info->io.dev,
1517                        "Cannot check setting the rcv irq: %d\n", rv);
1518                return;
1519        }
1520
1521        if (rv) {
1522                /*
1523                 * An error when setting the event buffer bit means
1524                 * setting the bit is not supported.
1525                 */
1526                dev_warn(smi_info->io.dev,
1527                         "The BMC does not support setting the recv irq bit, compensating, but the BMC needs to be fixed.\n");
1528                smi_info->cannot_disable_irq = true;
1529                smi_info->irq_enable_broken = true;
1530        }
1531}
1532
1533static int try_enable_event_buffer(struct smi_info *smi_info)
1534{
1535        unsigned char         msg[3];
1536        unsigned char         *resp;
1537        unsigned long         resp_len;
1538        int                   rv = 0;
1539
1540        resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
1541        if (!resp)
1542                return -ENOMEM;
1543
1544        msg[0] = IPMI_NETFN_APP_REQUEST << 2;
1545        msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
1546        smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
1547
1548        rv = wait_for_msg_done(smi_info);
1549        if (rv) {
1550                pr_warn(PFX "Error getting response from get global enables command, the event buffer is not enabled.\n");
1551                goto out;
1552        }
1553
1554        resp_len = smi_info->handlers->get_result(smi_info->si_sm,
1555                                                  resp, IPMI_MAX_MSG_LENGTH);
1556
1557        if (resp_len < 4 ||
1558                        resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 ||
1559                        resp[1] != IPMI_GET_BMC_GLOBAL_ENABLES_CMD   ||
1560                        resp[2] != 0) {
1561                pr_warn(PFX "Invalid return from get global enables command, cannot enable the event buffer.\n");
1562                rv = -EINVAL;
1563                goto out;
1564        }
1565
1566        if (resp[3] & IPMI_BMC_EVT_MSG_BUFF) {
1567                /* buffer is already enabled, nothing to do. */
1568                smi_info->supports_event_msg_buff = true;
1569                goto out;
1570        }
1571
1572        msg[0] = IPMI_NETFN_APP_REQUEST << 2;
1573        msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
1574        msg[2] = resp[3] | IPMI_BMC_EVT_MSG_BUFF;
1575        smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3);
1576
1577        rv = wait_for_msg_done(smi_info);
1578        if (rv) {
1579                pr_warn(PFX "Error getting response from set global, enables command, the event buffer is not enabled.\n");
1580                goto out;
1581        }
1582
1583        resp_len = smi_info->handlers->get_result(smi_info->si_sm,
1584                                                  resp, IPMI_MAX_MSG_LENGTH);
1585
1586        if (resp_len < 3 ||
1587                        resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 ||
1588                        resp[1] != IPMI_SET_BMC_GLOBAL_ENABLES_CMD) {
1589                pr_warn(PFX "Invalid return from get global, enables command, not enable the event buffer.\n");
1590                rv = -EINVAL;
1591                goto out;
1592        }
1593
1594        if (resp[2] != 0)
1595                /*
1596                 * An error when setting the event buffer bit means
1597                 * that the event buffer is not supported.
1598                 */
1599                rv = -ENOENT;
1600        else
1601                smi_info->supports_event_msg_buff = true;
1602
1603out:
1604        kfree(resp);
1605        return rv;
1606}
1607
1608#ifdef CONFIG_IPMI_PROC_INTERFACE
1609static int smi_type_proc_show(struct seq_file *m, void *v)
1610{
1611        struct smi_info *smi = m->private;
1612
1613        seq_printf(m, "%s\n", si_to_str[smi->io.si_type]);
1614
1615        return 0;
1616}
1617
1618static int smi_type_proc_open(struct inode *inode, struct file *file)
1619{
1620        return single_open(file, smi_type_proc_show, PDE_DATA(inode));
1621}
1622
1623static const struct file_operations smi_type_proc_ops = {
1624        .open           = smi_type_proc_open,
1625        .read           = seq_read,
1626        .llseek         = seq_lseek,
1627        .release        = single_release,
1628};
1629
1630static int smi_si_stats_proc_show(struct seq_file *m, void *v)
1631{
1632        struct smi_info *smi = m->private;
1633
1634        seq_printf(m, "interrupts_enabled:    %d\n",
1635                       smi->io.irq && !smi->interrupt_disabled);
1636        seq_printf(m, "short_timeouts:        %u\n",
1637                       smi_get_stat(smi, short_timeouts));
1638        seq_printf(m, "long_timeouts:         %u\n",
1639                       smi_get_stat(smi, long_timeouts));
1640        seq_printf(m, "idles:                 %u\n",
1641                       smi_get_stat(smi, idles));
1642        seq_printf(m, "interrupts:            %u\n",
1643                       smi_get_stat(smi, interrupts));
1644        seq_printf(m, "attentions:            %u\n",
1645                       smi_get_stat(smi, attentions));
1646        seq_printf(m, "flag_fetches:          %u\n",
1647                       smi_get_stat(smi, flag_fetches));
1648        seq_printf(m, "hosed_count:           %u\n",
1649                       smi_get_stat(smi, hosed_count));
1650        seq_printf(m, "complete_transactions: %u\n",
1651                       smi_get_stat(smi, complete_transactions));
1652        seq_printf(m, "events:                %u\n",
1653                       smi_get_stat(smi, events));
1654        seq_printf(m, "watchdog_pretimeouts:  %u\n",
1655                       smi_get_stat(smi, watchdog_pretimeouts));
1656        seq_printf(m, "incoming_messages:     %u\n",
1657                       smi_get_stat(smi, incoming_messages));
1658        return 0;
1659}
1660
1661static int smi_si_stats_proc_open(struct inode *inode, struct file *file)
1662{
1663        return single_open(file, smi_si_stats_proc_show, PDE_DATA(inode));
1664}
1665
1666static const struct file_operations smi_si_stats_proc_ops = {
1667        .open           = smi_si_stats_proc_open,
1668        .read           = seq_read,
1669        .llseek         = seq_lseek,
1670        .release        = single_release,
1671};
1672
1673static int smi_params_proc_show(struct seq_file *m, void *v)
1674{
1675        struct smi_info *smi = m->private;
1676
1677        seq_printf(m,
1678                   "%s,%s,0x%lx,rsp=%d,rsi=%d,rsh=%d,irq=%d,ipmb=%d\n",
1679                   si_to_str[smi->io.si_type],
1680                   addr_space_to_str[smi->io.addr_type],
1681                   smi->io.addr_data,
1682                   smi->io.regspacing,
1683                   smi->io.regsize,
1684                   smi->io.regshift,
1685                   smi->io.irq,
1686                   smi->io.slave_addr);
1687
1688        return 0;
1689}
1690
1691static int smi_params_proc_open(struct inode *inode, struct file *file)
1692{
1693        return single_open(file, smi_params_proc_show, PDE_DATA(inode));
1694}
1695
1696static const struct file_operations smi_params_proc_ops = {
1697        .open           = smi_params_proc_open,
1698        .read           = seq_read,
1699        .llseek         = seq_lseek,
1700        .release        = single_release,
1701};
1702#endif
1703
1704#define IPMI_SI_ATTR(name) \
1705static ssize_t ipmi_##name##_show(struct device *dev,                   \
1706                                  struct device_attribute *attr,        \
1707                                  char *buf)                            \
1708{                                                                       \
1709        struct smi_info *smi_info = dev_get_drvdata(dev);               \
1710                                                                        \
1711        return snprintf(buf, 10, "%u\n", smi_get_stat(smi_info, name)); \
1712}                                                                       \
1713static DEVICE_ATTR(name, S_IRUGO, ipmi_##name##_show, NULL)
1714
1715static ssize_t ipmi_type_show(struct device *dev,
1716                              struct device_attribute *attr,
1717                              char *buf)
1718{
1719        struct smi_info *smi_info = dev_get_drvdata(dev);
1720
1721        return snprintf(buf, 10, "%s\n", si_to_str[smi_info->io.si_type]);
1722}
1723static DEVICE_ATTR(type, S_IRUGO, ipmi_type_show, NULL);
1724
1725static ssize_t ipmi_interrupts_enabled_show(struct device *dev,
1726                                            struct device_attribute *attr,
1727                                            char *buf)
1728{
1729        struct smi_info *smi_info = dev_get_drvdata(dev);
1730        int enabled = smi_info->io.irq && !smi_info->interrupt_disabled;
1731
1732        return snprintf(buf, 10, "%d\n", enabled);
1733}
1734static DEVICE_ATTR(interrupts_enabled, S_IRUGO,
1735                   ipmi_interrupts_enabled_show, NULL);
1736
1737IPMI_SI_ATTR(short_timeouts);
1738IPMI_SI_ATTR(long_timeouts);
1739IPMI_SI_ATTR(idles);
1740IPMI_SI_ATTR(interrupts);
1741IPMI_SI_ATTR(attentions);
1742IPMI_SI_ATTR(flag_fetches);
1743IPMI_SI_ATTR(hosed_count);
1744IPMI_SI_ATTR(complete_transactions);
1745IPMI_SI_ATTR(events);
1746IPMI_SI_ATTR(watchdog_pretimeouts);
1747IPMI_SI_ATTR(incoming_messages);
1748
1749static ssize_t ipmi_params_show(struct device *dev,
1750                                struct device_attribute *attr,
1751                                char *buf)
1752{
1753        struct smi_info *smi_info = dev_get_drvdata(dev);
1754
1755        return snprintf(buf, 200,
1756                        "%s,%s,0x%lx,rsp=%d,rsi=%d,rsh=%d,irq=%d,ipmb=%d\n",
1757                        si_to_str[smi_info->io.si_type],
1758                        addr_space_to_str[smi_info->io.addr_type],
1759                        smi_info->io.addr_data,
1760                        smi_info->io.regspacing,
1761                        smi_info->io.regsize,
1762                        smi_info->io.regshift,
1763                        smi_info->io.irq,
1764                        smi_info->io.slave_addr);
1765}
1766static DEVICE_ATTR(params, S_IRUGO, ipmi_params_show, NULL);
1767
1768static struct attribute *ipmi_si_dev_attrs[] = {
1769        &dev_attr_type.attr,
1770        &dev_attr_interrupts_enabled.attr,
1771        &dev_attr_short_timeouts.attr,
1772        &dev_attr_long_timeouts.attr,
1773        &dev_attr_idles.attr,
1774        &dev_attr_interrupts.attr,
1775        &dev_attr_attentions.attr,
1776        &dev_attr_flag_fetches.attr,
1777        &dev_attr_hosed_count.attr,
1778        &dev_attr_complete_transactions.attr,
1779        &dev_attr_events.attr,
1780        &dev_attr_watchdog_pretimeouts.attr,
1781        &dev_attr_incoming_messages.attr,
1782        &dev_attr_params.attr,
1783        NULL
1784};
1785
1786static const struct attribute_group ipmi_si_dev_attr_group = {
1787        .attrs          = ipmi_si_dev_attrs,
1788};
1789
1790/*
1791 * oem_data_avail_to_receive_msg_avail
1792 * @info - smi_info structure with msg_flags set
1793 *
1794 * Converts flags from OEM_DATA_AVAIL to RECEIVE_MSG_AVAIL
1795 * Returns 1 indicating need to re-run handle_flags().
1796 */
1797static int oem_data_avail_to_receive_msg_avail(struct smi_info *smi_info)
1798{
1799        smi_info->msg_flags = ((smi_info->msg_flags & ~OEM_DATA_AVAIL) |
1800                               RECEIVE_MSG_AVAIL);
1801        return 1;
1802}
1803
1804/*
1805 * setup_dell_poweredge_oem_data_handler
1806 * @info - smi_info.device_id must be populated
1807 *
1808 * Systems that match, but have firmware version < 1.40 may assert
1809 * OEM0_DATA_AVAIL on their own, without being told via Set Flags that
1810 * it's safe to do so.  Such systems will de-assert OEM1_DATA_AVAIL
1811 * upon receipt of IPMI_GET_MSG_CMD, so we should treat these flags
1812 * as RECEIVE_MSG_AVAIL instead.
1813 *
1814 * As Dell has no plans to release IPMI 1.5 firmware that *ever*
1815 * assert the OEM[012] bits, and if it did, the driver would have to
1816 * change to handle that properly, we don't actually check for the
1817 * firmware version.
1818 * Device ID = 0x20                BMC on PowerEdge 8G servers
1819 * Device Revision = 0x80
1820 * Firmware Revision1 = 0x01       BMC version 1.40
1821 * Firmware Revision2 = 0x40       BCD encoded
1822 * IPMI Version = 0x51             IPMI 1.5
1823 * Manufacturer ID = A2 02 00      Dell IANA
1824 *
1825 * Additionally, PowerEdge systems with IPMI < 1.5 may also assert
1826 * OEM0_DATA_AVAIL and needs to be treated as RECEIVE_MSG_AVAIL.
1827 *
1828 */
1829#define DELL_POWEREDGE_8G_BMC_DEVICE_ID  0x20
1830#define DELL_POWEREDGE_8G_BMC_DEVICE_REV 0x80
1831#define DELL_POWEREDGE_8G_BMC_IPMI_VERSION 0x51
1832#define DELL_IANA_MFR_ID 0x0002a2
1833static void setup_dell_poweredge_oem_data_handler(struct smi_info *smi_info)
1834{
1835        struct ipmi_device_id *id = &smi_info->device_id;
1836        if (id->manufacturer_id == DELL_IANA_MFR_ID) {
1837                if (id->device_id       == DELL_POWEREDGE_8G_BMC_DEVICE_ID  &&
1838                    id->device_revision == DELL_POWEREDGE_8G_BMC_DEVICE_REV &&
1839                    id->ipmi_version   == DELL_POWEREDGE_8G_BMC_IPMI_VERSION) {
1840                        smi_info->oem_data_avail_handler =
1841                                oem_data_avail_to_receive_msg_avail;
1842                } else if (ipmi_version_major(id) < 1 ||
1843                           (ipmi_version_major(id) == 1 &&
1844                            ipmi_version_minor(id) < 5)) {
1845                        smi_info->oem_data_avail_handler =
1846                                oem_data_avail_to_receive_msg_avail;
1847                }
1848        }
1849}
1850
1851#define CANNOT_RETURN_REQUESTED_LENGTH 0xCA
1852static void return_hosed_msg_badsize(struct smi_info *smi_info)
1853{
1854        struct ipmi_smi_msg *msg = smi_info->curr_msg;
1855
1856        /* Make it a response */
1857        msg->rsp[0] = msg->data[0] | 4;
1858        msg->rsp[1] = msg->data[1];
1859        msg->rsp[2] = CANNOT_RETURN_REQUESTED_LENGTH;
1860        msg->rsp_size = 3;
1861        smi_info->curr_msg = NULL;
1862        deliver_recv_msg(smi_info, msg);
1863}
1864
1865/*
1866 * dell_poweredge_bt_xaction_handler
1867 * @info - smi_info.device_id must be populated
1868 *
1869 * Dell PowerEdge servers with the BT interface (x6xx and 1750) will
1870 * not respond to a Get SDR command if the length of the data
1871 * requested is exactly 0x3A, which leads to command timeouts and no
1872 * data returned.  This intercepts such commands, and causes userspace
1873 * callers to try again with a different-sized buffer, which succeeds.
1874 */
1875
1876#define STORAGE_NETFN 0x0A
1877#define STORAGE_CMD_GET_SDR 0x23
1878static int dell_poweredge_bt_xaction_handler(struct notifier_block *self,
1879                                             unsigned long unused,
1880                                             void *in)
1881{
1882        struct smi_info *smi_info = in;
1883        unsigned char *data = smi_info->curr_msg->data;
1884        unsigned int size   = smi_info->curr_msg->data_size;
1885        if (size >= 8 &&
1886            (data[0]>>2) == STORAGE_NETFN &&
1887            data[1] == STORAGE_CMD_GET_SDR &&
1888            data[7] == 0x3A) {
1889                return_hosed_msg_badsize(smi_info);
1890                return NOTIFY_STOP;
1891        }
1892        return NOTIFY_DONE;
1893}
1894
1895static struct notifier_block dell_poweredge_bt_xaction_notifier = {
1896        .notifier_call  = dell_poweredge_bt_xaction_handler,
1897};
1898
1899/*
1900 * setup_dell_poweredge_bt_xaction_handler
1901 * @info - smi_info.device_id must be filled in already
1902 *
1903 * Fills in smi_info.device_id.start_transaction_pre_hook
1904 * when we know what function to use there.
1905 */
1906static void
1907setup_dell_poweredge_bt_xaction_handler(struct smi_info *smi_info)
1908{
1909        struct ipmi_device_id *id = &smi_info->device_id;
1910        if (id->manufacturer_id == DELL_IANA_MFR_ID &&
1911            smi_info->io.si_type == SI_BT)
1912                register_xaction_notifier(&dell_poweredge_bt_xaction_notifier);
1913}
1914
1915/*
1916 * setup_oem_data_handler
1917 * @info - smi_info.device_id must be filled in already
1918 *
1919 * Fills in smi_info.device_id.oem_data_available_handler
1920 * when we know what function to use there.
1921 */
1922
1923static void setup_oem_data_handler(struct smi_info *smi_info)
1924{
1925        setup_dell_poweredge_oem_data_handler(smi_info);
1926}
1927
1928static void setup_xaction_handlers(struct smi_info *smi_info)
1929{
1930        setup_dell_poweredge_bt_xaction_handler(smi_info);
1931}
1932
1933static void check_for_broken_irqs(struct smi_info *smi_info)
1934{
1935        check_clr_rcv_irq(smi_info);
1936        check_set_rcv_irq(smi_info);
1937}
1938
1939static inline void stop_timer_and_thread(struct smi_info *smi_info)
1940{
1941        if (smi_info->thread != NULL)
1942                kthread_stop(smi_info->thread);
1943
1944        smi_info->timer_can_start = false;
1945        if (smi_info->timer_running)
1946                del_timer_sync(&smi_info->si_timer);
1947}
1948
1949static struct smi_info *find_dup_si(struct smi_info *info)
1950{
1951        struct smi_info *e;
1952
1953        list_for_each_entry(e, &smi_infos, link) {
1954                if (e->io.addr_type != info->io.addr_type)
1955                        continue;
1956                if (e->io.addr_data == info->io.addr_data) {
1957                        /*
1958                         * This is a cheap hack, ACPI doesn't have a defined
1959                         * slave address but SMBIOS does.  Pick it up from
1960                         * any source that has it available.
1961                         */
1962                        if (info->io.slave_addr && !e->io.slave_addr)
1963                                e->io.slave_addr = info->io.slave_addr;
1964                        return e;
1965                }
1966        }
1967
1968        return NULL;
1969}
1970
1971int ipmi_si_add_smi(struct si_sm_io *io)
1972{
1973        int rv = 0;
1974        struct smi_info *new_smi, *dup;
1975
1976        if (!io->io_setup) {
1977                if (io->addr_type == IPMI_IO_ADDR_SPACE) {
1978                        io->io_setup = ipmi_si_port_setup;
1979                } else if (io->addr_type == IPMI_MEM_ADDR_SPACE) {
1980                        io->io_setup = ipmi_si_mem_setup;
1981                } else {
1982                        return -EINVAL;
1983                }
1984        }
1985
1986        new_smi = kzalloc(sizeof(*new_smi), GFP_KERNEL);
1987        if (!new_smi)
1988                return -ENOMEM;
1989        spin_lock_init(&new_smi->si_lock);
1990
1991        new_smi->io = *io;
1992
1993        mutex_lock(&smi_infos_lock);
1994        dup = find_dup_si(new_smi);
1995        if (dup) {
1996                if (new_smi->io.addr_source == SI_ACPI &&
1997                    dup->io.addr_source == SI_SMBIOS) {
1998                        /* We prefer ACPI over SMBIOS. */
1999                        dev_info(dup->io.dev,
2000                                 "Removing SMBIOS-specified %s state machine in favor of ACPI\n",
2001                                 si_to_str[new_smi->io.si_type]);
2002                        cleanup_one_si(dup);
2003                } else {
2004                        dev_info(new_smi->io.dev,
2005                                 "%s-specified %s state machine: duplicate\n",
2006                                 ipmi_addr_src_to_str(new_smi->io.addr_source),
2007                                 si_to_str[new_smi->io.si_type]);
2008                        rv = -EBUSY;
2009                        kfree(new_smi);
2010                        goto out_err;
2011                }
2012        }
2013
2014        pr_info(PFX "Adding %s-specified %s state machine\n",
2015                ipmi_addr_src_to_str(new_smi->io.addr_source),
2016                si_to_str[new_smi->io.si_type]);
2017
2018        /* So we know not to free it unless we have allocated one. */
2019        new_smi->intf = NULL;
2020        new_smi->si_sm = NULL;
2021        new_smi->handlers = NULL;
2022
2023        list_add_tail(&new_smi->link, &smi_infos);
2024
2025        if (initialized) {
2026                rv = try_smi_init(new_smi);
2027                if (rv) {
2028                        mutex_unlock(&smi_infos_lock);
2029                        cleanup_one_si(new_smi);
2030                        return rv;
2031                }
2032        }
2033out_err:
2034        mutex_unlock(&smi_infos_lock);
2035        return rv;
2036}
2037
2038/*
2039 * Try to start up an interface.  Must be called with smi_infos_lock
2040 * held, primarily to keep smi_num consistent, we only one to do these
2041 * one at a time.
2042 */
2043static int try_smi_init(struct smi_info *new_smi)
2044{
2045        int rv = 0;
2046        int i;
2047        char *init_name = NULL;
2048
2049        pr_info(PFX "Trying %s-specified %s state machine at %s address 0x%lx, slave address 0x%x, irq %d\n",
2050                ipmi_addr_src_to_str(new_smi->io.addr_source),
2051                si_to_str[new_smi->io.si_type],
2052                addr_space_to_str[new_smi->io.addr_type],
2053                new_smi->io.addr_data,
2054                new_smi->io.slave_addr, new_smi->io.irq);
2055
2056        switch (new_smi->io.si_type) {
2057        case SI_KCS:
2058                new_smi->handlers = &kcs_smi_handlers;
2059                break;
2060
2061        case SI_SMIC:
2062                new_smi->handlers = &smic_smi_handlers;
2063                break;
2064
2065        case SI_BT:
2066                new_smi->handlers = &bt_smi_handlers;
2067                break;
2068
2069        default:
2070                /* No support for anything else yet. */
2071                rv = -EIO;
2072                goto out_err;
2073        }
2074
2075        new_smi->intf_num = smi_num;
2076
2077        /* Do this early so it's available for logs. */
2078        if (!new_smi->io.dev) {
2079                init_name = kasprintf(GFP_KERNEL, "ipmi_si.%d",
2080                                      new_smi->intf_num);
2081
2082                /*
2083                 * If we don't already have a device from something
2084                 * else (like PCI), then register a new one.
2085                 */
2086                new_smi->pdev = platform_device_alloc("ipmi_si",
2087                                                      new_smi->intf_num);
2088                if (!new_smi->pdev) {
2089                        pr_err(PFX "Unable to allocate platform device\n");
2090                        goto out_err;
2091                }
2092                new_smi->io.dev = &new_smi->pdev->dev;
2093                new_smi->io.dev->driver = &ipmi_platform_driver.driver;
2094                /* Nulled by device_add() */
2095                new_smi->io.dev->init_name = init_name;
2096        }
2097
2098        /* Allocate the state machine's data and initialize it. */
2099        new_smi->si_sm = kmalloc(new_smi->handlers->size(), GFP_KERNEL);
2100        if (!new_smi->si_sm) {
2101                rv = -ENOMEM;
2102                goto out_err;
2103        }
2104        new_smi->io.io_size = new_smi->handlers->init_data(new_smi->si_sm,
2105                                                           &new_smi->io);
2106
2107        /* Now that we know the I/O size, we can set up the I/O. */
2108        rv = new_smi->io.io_setup(&new_smi->io);
2109        if (rv) {
2110                dev_err(new_smi->io.dev, "Could not set up I/O space\n");
2111                goto out_err;
2112        }
2113
2114        /* Do low-level detection first. */
2115        if (new_smi->handlers->detect(new_smi->si_sm)) {
2116                if (new_smi->io.addr_source)
2117                        dev_err(new_smi->io.dev,
2118                                "Interface detection failed\n");
2119                rv = -ENODEV;
2120                goto out_err;
2121        }
2122
2123        /*
2124         * Attempt a get device id command.  If it fails, we probably
2125         * don't have a BMC here.
2126         */
2127        rv = try_get_dev_id(new_smi);
2128        if (rv) {
2129                if (new_smi->io.addr_source)
2130                        dev_err(new_smi->io.dev,
2131                               "There appears to be no BMC at this location\n");
2132                goto out_err;
2133        }
2134
2135        setup_oem_data_handler(new_smi);
2136        setup_xaction_handlers(new_smi);
2137        check_for_broken_irqs(new_smi);
2138
2139        new_smi->waiting_msg = NULL;
2140        new_smi->curr_msg = NULL;
2141        atomic_set(&new_smi->req_events, 0);
2142        new_smi->run_to_completion = false;
2143        for (i = 0; i < SI_NUM_STATS; i++)
2144                atomic_set(&new_smi->stats[i], 0);
2145
2146        new_smi->interrupt_disabled = true;
2147        atomic_set(&new_smi->need_watch, 0);
2148
2149        rv = try_enable_event_buffer(new_smi);
2150        if (rv == 0)
2151                new_smi->has_event_buffer = true;
2152
2153        /*
2154         * Start clearing the flags before we enable interrupts or the
2155         * timer to avoid racing with the timer.
2156         */
2157        start_clear_flags(new_smi);
2158
2159        /*
2160         * IRQ is defined to be set when non-zero.  req_events will
2161         * cause a global flags check that will enable interrupts.
2162         */
2163        if (new_smi->io.irq) {
2164                new_smi->interrupt_disabled = false;
2165                atomic_set(&new_smi->req_events, 1);
2166        }
2167
2168        if (new_smi->pdev) {
2169                rv = platform_device_add(new_smi->pdev);
2170                if (rv) {
2171                        dev_err(new_smi->io.dev,
2172                                "Unable to register system interface device: %d\n",
2173                                rv);
2174                        goto out_err;
2175                }
2176        }
2177
2178        dev_set_drvdata(new_smi->io.dev, new_smi);
2179        rv = device_add_group(new_smi->io.dev, &ipmi_si_dev_attr_group);
2180        if (rv) {
2181                dev_err(new_smi->io.dev,
2182                        "Unable to add device attributes: error %d\n",
2183                        rv);
2184                goto out_err_stop_timer;
2185        }
2186
2187        rv = ipmi_register_smi(&handlers,
2188                               new_smi,
2189                               new_smi->io.dev,
2190                               new_smi->io.slave_addr);
2191        if (rv) {
2192                dev_err(new_smi->io.dev,
2193                        "Unable to register device: error %d\n",
2194                        rv);
2195                goto out_err_remove_attrs;
2196        }
2197
2198#ifdef CONFIG_IPMI_PROC_INTERFACE
2199        rv = ipmi_smi_add_proc_entry(new_smi->intf, "type",
2200                                     &smi_type_proc_ops,
2201                                     new_smi);
2202        if (rv) {
2203                dev_err(new_smi->io.dev,
2204                        "Unable to create proc entry: %d\n", rv);
2205                goto out_err_stop_timer;
2206        }
2207
2208        rv = ipmi_smi_add_proc_entry(new_smi->intf, "si_stats",
2209                                     &smi_si_stats_proc_ops,
2210                                     new_smi);
2211        if (rv) {
2212                dev_err(new_smi->io.dev,
2213                        "Unable to create proc entry: %d\n", rv);
2214                goto out_err_stop_timer;
2215        }
2216
2217        rv = ipmi_smi_add_proc_entry(new_smi->intf, "params",
2218                                     &smi_params_proc_ops,
2219                                     new_smi);
2220        if (rv) {
2221                dev_err(new_smi->io.dev,
2222                        "Unable to create proc entry: %d\n", rv);
2223                goto out_err_stop_timer;
2224        }
2225#endif
2226
2227        /* Don't increment till we know we have succeeded. */
2228        smi_num++;
2229
2230        dev_info(new_smi->io.dev, "IPMI %s interface initialized\n",
2231                 si_to_str[new_smi->io.si_type]);
2232
2233        WARN_ON(new_smi->io.dev->init_name != NULL);
2234        kfree(init_name);
2235
2236        return 0;
2237
2238out_err_remove_attrs:
2239        device_remove_group(new_smi->io.dev, &ipmi_si_dev_attr_group);
2240        dev_set_drvdata(new_smi->io.dev, NULL);
2241
2242out_err_stop_timer:
2243        stop_timer_and_thread(new_smi);
2244
2245out_err:
2246        new_smi->interrupt_disabled = true;
2247
2248        if (new_smi->intf) {
2249                ipmi_smi_t intf = new_smi->intf;
2250                new_smi->intf = NULL;
2251                ipmi_unregister_smi(intf);
2252        }
2253
2254        if (new_smi->io.irq_cleanup) {
2255                new_smi->io.irq_cleanup(&new_smi->io);
2256                new_smi->io.irq_cleanup = NULL;
2257        }
2258
2259        /*
2260         * Wait until we know that we are out of any interrupt
2261         * handlers might have been running before we freed the
2262         * interrupt.
2263         */
2264        synchronize_sched();
2265
2266        if (new_smi->si_sm) {
2267                if (new_smi->handlers)
2268                        new_smi->handlers->cleanup(new_smi->si_sm);
2269                kfree(new_smi->si_sm);
2270                new_smi->si_sm = NULL;
2271        }
2272        if (new_smi->io.addr_source_cleanup) {
2273                new_smi->io.addr_source_cleanup(&new_smi->io);
2274                new_smi->io.addr_source_cleanup = NULL;
2275        }
2276        if (new_smi->io.io_cleanup) {
2277                new_smi->io.io_cleanup(&new_smi->io);
2278                new_smi->io.io_cleanup = NULL;
2279        }
2280
2281        if (new_smi->pdev) {
2282                platform_device_unregister(new_smi->pdev);
2283                new_smi->pdev = NULL;
2284        } else if (new_smi->pdev) {
2285                platform_device_put(new_smi->pdev);
2286        }
2287
2288        kfree(init_name);
2289
2290        return rv;
2291}
2292
2293static int init_ipmi_si(void)
2294{
2295        struct smi_info *e;
2296        enum ipmi_addr_src type = SI_INVALID;
2297
2298        if (initialized)
2299                return 0;
2300
2301        pr_info("IPMI System Interface driver.\n");
2302
2303        /* If the user gave us a device, they presumably want us to use it */
2304        if (!ipmi_si_hardcode_find_bmc())
2305                goto do_scan;
2306
2307        ipmi_si_platform_init();
2308
2309        ipmi_si_pci_init();
2310
2311        ipmi_si_parisc_init();
2312
2313        /* We prefer devices with interrupts, but in the case of a machine
2314           with multiple BMCs we assume that there will be several instances
2315           of a given type so if we succeed in registering a type then also
2316           try to register everything else of the same type */
2317do_scan:
2318        mutex_lock(&smi_infos_lock);
2319        list_for_each_entry(e, &smi_infos, link) {
2320                /* Try to register a device if it has an IRQ and we either
2321                   haven't successfully registered a device yet or this
2322                   device has the same type as one we successfully registered */
2323                if (e->io.irq && (!type || e->io.addr_source == type)) {
2324                        if (!try_smi_init(e)) {
2325                                type = e->io.addr_source;
2326                        }
2327                }
2328        }
2329
2330        /* type will only have been set if we successfully registered an si */
2331        if (type)
2332                goto skip_fallback_noirq;
2333
2334        /* Fall back to the preferred device */
2335
2336        list_for_each_entry(e, &smi_infos, link) {
2337                if (!e->io.irq && (!type || e->io.addr_source == type)) {
2338                        if (!try_smi_init(e)) {
2339                                type = e->io.addr_source;
2340                        }
2341                }
2342        }
2343
2344skip_fallback_noirq:
2345        initialized = 1;
2346        mutex_unlock(&smi_infos_lock);
2347
2348        if (type)
2349                return 0;
2350
2351        mutex_lock(&smi_infos_lock);
2352        if (unload_when_empty && list_empty(&smi_infos)) {
2353                mutex_unlock(&smi_infos_lock);
2354                cleanup_ipmi_si();
2355                pr_warn(PFX "Unable to find any System Interface(s)\n");
2356                return -ENODEV;
2357        } else {
2358                mutex_unlock(&smi_infos_lock);
2359                return 0;
2360        }
2361}
2362module_init(init_ipmi_si);
2363
2364static void cleanup_one_si(struct smi_info *to_clean)
2365{
2366        int           rv = 0;
2367
2368        if (!to_clean)
2369                return;
2370
2371        if (to_clean->intf) {
2372                ipmi_smi_t intf = to_clean->intf;
2373
2374                to_clean->intf = NULL;
2375                rv = ipmi_unregister_smi(intf);
2376                if (rv) {
2377                        pr_err(PFX "Unable to unregister device: errno=%d\n",
2378                               rv);
2379                }
2380        }
2381
2382        device_remove_group(to_clean->io.dev, &ipmi_si_dev_attr_group);
2383        dev_set_drvdata(to_clean->io.dev, NULL);
2384
2385        list_del(&to_clean->link);
2386
2387        /*
2388         * Make sure that interrupts, the timer and the thread are
2389         * stopped and will not run again.
2390         */
2391        if (to_clean->io.irq_cleanup)
2392                to_clean->io.irq_cleanup(&to_clean->io);
2393        stop_timer_and_thread(to_clean);
2394
2395        /*
2396         * Timeouts are stopped, now make sure the interrupts are off
2397         * in the BMC.  Note that timers and CPU interrupts are off,
2398         * so no need for locks.
2399         */
2400        while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) {
2401                poll(to_clean);
2402                schedule_timeout_uninterruptible(1);
2403        }
2404        if (to_clean->handlers)
2405                disable_si_irq(to_clean);
2406        while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) {
2407                poll(to_clean);
2408                schedule_timeout_uninterruptible(1);
2409        }
2410
2411        if (to_clean->handlers)
2412                to_clean->handlers->cleanup(to_clean->si_sm);
2413
2414        kfree(to_clean->si_sm);
2415
2416        if (to_clean->io.addr_source_cleanup)
2417                to_clean->io.addr_source_cleanup(&to_clean->io);
2418        if (to_clean->io.io_cleanup)
2419                to_clean->io.io_cleanup(&to_clean->io);
2420
2421        if (to_clean->pdev)
2422                platform_device_unregister(to_clean->pdev);
2423
2424        kfree(to_clean);
2425}
2426
2427int ipmi_si_remove_by_dev(struct device *dev)
2428{
2429        struct smi_info *e;
2430        int rv = -ENOENT;
2431
2432        mutex_lock(&smi_infos_lock);
2433        list_for_each_entry(e, &smi_infos, link) {
2434                if (e->io.dev == dev) {
2435                        cleanup_one_si(e);
2436                        rv = 0;
2437                        break;
2438                }
2439        }
2440        mutex_unlock(&smi_infos_lock);
2441
2442        return rv;
2443}
2444
2445void ipmi_si_remove_by_data(int addr_space, enum si_type si_type,
2446                            unsigned long addr)
2447{
2448        /* remove */
2449        struct smi_info *e, *tmp_e;
2450
2451        mutex_lock(&smi_infos_lock);
2452        list_for_each_entry_safe(e, tmp_e, &smi_infos, link) {
2453                if (e->io.addr_type != addr_space)
2454                        continue;
2455                if (e->io.si_type != si_type)
2456                        continue;
2457                if (e->io.addr_data == addr)
2458                        cleanup_one_si(e);
2459        }
2460        mutex_unlock(&smi_infos_lock);
2461}
2462
2463static void cleanup_ipmi_si(void)
2464{
2465        struct smi_info *e, *tmp_e;
2466
2467        if (!initialized)
2468                return;
2469
2470        ipmi_si_pci_shutdown();
2471
2472        ipmi_si_parisc_shutdown();
2473
2474        ipmi_si_platform_shutdown();
2475
2476        mutex_lock(&smi_infos_lock);
2477        list_for_each_entry_safe(e, tmp_e, &smi_infos, link)
2478                cleanup_one_si(e);
2479        mutex_unlock(&smi_infos_lock);
2480}
2481module_exit(cleanup_ipmi_si);
2482
2483MODULE_ALIAS("platform:dmi-ipmi-si");
2484MODULE_LICENSE("GPL");
2485MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
2486MODULE_DESCRIPTION("Interface to the IPMI driver for the KCS, SMIC, and BT"
2487                   " system interfaces.");
2488