linux/drivers/char/ipmi/ipmi_si_intf.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0+
   2/*
   3 * ipmi_si.c
   4 *
   5 * The interface to the IPMI driver for the system interfaces (KCS, SMIC,
   6 * BT).
   7 *
   8 * Author: MontaVista Software, Inc.
   9 *         Corey Minyard <minyard@mvista.com>
  10 *         source@mvista.com
  11 *
  12 * Copyright 2002 MontaVista Software Inc.
  13 * Copyright 2006 IBM Corp., Christian Krafft <krafft@de.ibm.com>
  14 */
  15
  16/*
  17 * This file holds the "policy" for the interface to the SMI state
  18 * machine.  It does the configuration, handles timers and interrupts,
  19 * and drives the real SMI state machine.
  20 */
  21
  22#define pr_fmt(fmt) "ipmi_si: " fmt
  23
  24#include <linux/module.h>
  25#include <linux/moduleparam.h>
  26#include <linux/sched.h>
  27#include <linux/seq_file.h>
  28#include <linux/timer.h>
  29#include <linux/errno.h>
  30#include <linux/spinlock.h>
  31#include <linux/slab.h>
  32#include <linux/delay.h>
  33#include <linux/list.h>
  34#include <linux/notifier.h>
  35#include <linux/mutex.h>
  36#include <linux/kthread.h>
  37#include <asm/irq.h>
  38#include <linux/interrupt.h>
  39#include <linux/rcupdate.h>
  40#include <linux/ipmi.h>
  41#include <linux/ipmi_smi.h>
  42#include "ipmi_si.h"
  43#include <linux/string.h>
  44#include <linux/ctype.h>
  45
  46/* Measure times between events in the driver. */
  47#undef DEBUG_TIMING
  48
  49/* Call every 10 ms. */
  50#define SI_TIMEOUT_TIME_USEC    10000
  51#define SI_USEC_PER_JIFFY       (1000000/HZ)
  52#define SI_TIMEOUT_JIFFIES      (SI_TIMEOUT_TIME_USEC/SI_USEC_PER_JIFFY)
  53#define SI_SHORT_TIMEOUT_USEC  250 /* .25ms when the SM request a
  54                                      short timeout */
  55
  56enum si_intf_state {
  57        SI_NORMAL,
  58        SI_GETTING_FLAGS,
  59        SI_GETTING_EVENTS,
  60        SI_CLEARING_FLAGS,
  61        SI_GETTING_MESSAGES,
  62        SI_CHECKING_ENABLES,
  63        SI_SETTING_ENABLES
  64        /* FIXME - add watchdog stuff. */
  65};
  66
  67/* Some BT-specific defines we need here. */
  68#define IPMI_BT_INTMASK_REG             2
  69#define IPMI_BT_INTMASK_CLEAR_IRQ_BIT   2
  70#define IPMI_BT_INTMASK_ENABLE_IRQ_BIT  1
  71
  72static const char * const si_to_str[] = { "invalid", "kcs", "smic", "bt" };
  73
  74static bool initialized;
  75
  76/*
  77 * Indexes into stats[] in smi_info below.
  78 */
  79enum si_stat_indexes {
  80        /*
  81         * Number of times the driver requested a timer while an operation
  82         * was in progress.
  83         */
  84        SI_STAT_short_timeouts = 0,
  85
  86        /*
  87         * Number of times the driver requested a timer while nothing was in
  88         * progress.
  89         */
  90        SI_STAT_long_timeouts,
  91
  92        /* Number of times the interface was idle while being polled. */
  93        SI_STAT_idles,
  94
  95        /* Number of interrupts the driver handled. */
  96        SI_STAT_interrupts,
  97
  98        /* Number of time the driver got an ATTN from the hardware. */
  99        SI_STAT_attentions,
 100
 101        /* Number of times the driver requested flags from the hardware. */
 102        SI_STAT_flag_fetches,
 103
 104        /* Number of times the hardware didn't follow the state machine. */
 105        SI_STAT_hosed_count,
 106
 107        /* Number of completed messages. */
 108        SI_STAT_complete_transactions,
 109
 110        /* Number of IPMI events received from the hardware. */
 111        SI_STAT_events,
 112
 113        /* Number of watchdog pretimeouts. */
 114        SI_STAT_watchdog_pretimeouts,
 115
 116        /* Number of asynchronous messages received. */
 117        SI_STAT_incoming_messages,
 118
 119
 120        /* This *must* remain last, add new values above this. */
 121        SI_NUM_STATS
 122};
 123
 124struct smi_info {
 125        int                    si_num;
 126        struct ipmi_smi        *intf;
 127        struct si_sm_data      *si_sm;
 128        const struct si_sm_handlers *handlers;
 129        spinlock_t             si_lock;
 130        struct ipmi_smi_msg    *waiting_msg;
 131        struct ipmi_smi_msg    *curr_msg;
 132        enum si_intf_state     si_state;
 133
 134        /*
 135         * Used to handle the various types of I/O that can occur with
 136         * IPMI
 137         */
 138        struct si_sm_io io;
 139
 140        /*
 141         * Per-OEM handler, called from handle_flags().  Returns 1
 142         * when handle_flags() needs to be re-run or 0 indicating it
 143         * set si_state itself.
 144         */
 145        int (*oem_data_avail_handler)(struct smi_info *smi_info);
 146
 147        /*
 148         * Flags from the last GET_MSG_FLAGS command, used when an ATTN
 149         * is set to hold the flags until we are done handling everything
 150         * from the flags.
 151         */
 152#define RECEIVE_MSG_AVAIL       0x01
 153#define EVENT_MSG_BUFFER_FULL   0x02
 154#define WDT_PRE_TIMEOUT_INT     0x08
 155#define OEM0_DATA_AVAIL     0x20
 156#define OEM1_DATA_AVAIL     0x40
 157#define OEM2_DATA_AVAIL     0x80
 158#define OEM_DATA_AVAIL      (OEM0_DATA_AVAIL | \
 159                             OEM1_DATA_AVAIL | \
 160                             OEM2_DATA_AVAIL)
 161        unsigned char       msg_flags;
 162
 163        /* Does the BMC have an event buffer? */
 164        bool                has_event_buffer;
 165
 166        /*
 167         * If set to true, this will request events the next time the
 168         * state machine is idle.
 169         */
 170        atomic_t            req_events;
 171
 172        /*
 173         * If true, run the state machine to completion on every send
 174         * call.  Generally used after a panic to make sure stuff goes
 175         * out.
 176         */
 177        bool                run_to_completion;
 178
 179        /* The timer for this si. */
 180        struct timer_list   si_timer;
 181
 182        /* This flag is set, if the timer can be set */
 183        bool                timer_can_start;
 184
 185        /* This flag is set, if the timer is running (timer_pending() isn't enough) */
 186        bool                timer_running;
 187
 188        /* The time (in jiffies) the last timeout occurred at. */
 189        unsigned long       last_timeout_jiffies;
 190
 191        /* Are we waiting for the events, pretimeouts, received msgs? */
 192        atomic_t            need_watch;
 193
 194        /*
 195         * The driver will disable interrupts when it gets into a
 196         * situation where it cannot handle messages due to lack of
 197         * memory.  Once that situation clears up, it will re-enable
 198         * interrupts.
 199         */
 200        bool interrupt_disabled;
 201
 202        /*
 203         * Does the BMC support events?
 204         */
 205        bool supports_event_msg_buff;
 206
 207        /*
 208         * Can we disable interrupts the global enables receive irq
 209         * bit?  There are currently two forms of brokenness, some
 210         * systems cannot disable the bit (which is technically within
 211         * the spec but a bad idea) and some systems have the bit
 212         * forced to zero even though interrupts work (which is
 213         * clearly outside the spec).  The next bool tells which form
 214         * of brokenness is present.
 215         */
 216        bool cannot_disable_irq;
 217
 218        /*
 219         * Some systems are broken and cannot set the irq enable
 220         * bit, even if they support interrupts.
 221         */
 222        bool irq_enable_broken;
 223
 224        /*
 225         * Did we get an attention that we did not handle?
 226         */
 227        bool got_attn;
 228
 229        /* From the get device id response... */
 230        struct ipmi_device_id device_id;
 231
 232        /* Have we added the device group to the device? */
 233        bool dev_group_added;
 234
 235        /* Counters and things for the proc filesystem. */
 236        atomic_t stats[SI_NUM_STATS];
 237
 238        struct task_struct *thread;
 239
 240        struct list_head link;
 241};
 242
 243#define smi_inc_stat(smi, stat) \
 244        atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
 245#define smi_get_stat(smi, stat) \
 246        ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
 247
 248#define IPMI_MAX_INTFS 4
 249static int force_kipmid[IPMI_MAX_INTFS];
 250static int num_force_kipmid;
 251
 252static unsigned int kipmid_max_busy_us[IPMI_MAX_INTFS];
 253static int num_max_busy_us;
 254
 255static bool unload_when_empty = true;
 256
 257static int try_smi_init(struct smi_info *smi);
 258static void cleanup_one_si(struct smi_info *smi_info);
 259static void cleanup_ipmi_si(void);
 260
 261#ifdef DEBUG_TIMING
 262void debug_timestamp(char *msg)
 263{
 264        struct timespec64 t;
 265
 266        ktime_get_ts64(&t);
 267        pr_debug("**%s: %lld.%9.9ld\n", msg, (long long) t.tv_sec, t.tv_nsec);
 268}
 269#else
 270#define debug_timestamp(x)
 271#endif
 272
 273static ATOMIC_NOTIFIER_HEAD(xaction_notifier_list);
 274static int register_xaction_notifier(struct notifier_block *nb)
 275{
 276        return atomic_notifier_chain_register(&xaction_notifier_list, nb);
 277}
 278
 279static void deliver_recv_msg(struct smi_info *smi_info,
 280                             struct ipmi_smi_msg *msg)
 281{
 282        /* Deliver the message to the upper layer. */
 283        ipmi_smi_msg_received(smi_info->intf, msg);
 284}
 285
 286static void return_hosed_msg(struct smi_info *smi_info, int cCode)
 287{
 288        struct ipmi_smi_msg *msg = smi_info->curr_msg;
 289
 290        if (cCode < 0 || cCode > IPMI_ERR_UNSPECIFIED)
 291                cCode = IPMI_ERR_UNSPECIFIED;
 292        /* else use it as is */
 293
 294        /* Make it a response */
 295        msg->rsp[0] = msg->data[0] | 4;
 296        msg->rsp[1] = msg->data[1];
 297        msg->rsp[2] = cCode;
 298        msg->rsp_size = 3;
 299
 300        smi_info->curr_msg = NULL;
 301        deliver_recv_msg(smi_info, msg);
 302}
 303
 304static enum si_sm_result start_next_msg(struct smi_info *smi_info)
 305{
 306        int              rv;
 307
 308        if (!smi_info->waiting_msg) {
 309                smi_info->curr_msg = NULL;
 310                rv = SI_SM_IDLE;
 311        } else {
 312                int err;
 313
 314                smi_info->curr_msg = smi_info->waiting_msg;
 315                smi_info->waiting_msg = NULL;
 316                debug_timestamp("Start2");
 317                err = atomic_notifier_call_chain(&xaction_notifier_list,
 318                                0, smi_info);
 319                if (err & NOTIFY_STOP_MASK) {
 320                        rv = SI_SM_CALL_WITHOUT_DELAY;
 321                        goto out;
 322                }
 323                err = smi_info->handlers->start_transaction(
 324                        smi_info->si_sm,
 325                        smi_info->curr_msg->data,
 326                        smi_info->curr_msg->data_size);
 327                if (err)
 328                        return_hosed_msg(smi_info, err);
 329
 330                rv = SI_SM_CALL_WITHOUT_DELAY;
 331        }
 332out:
 333        return rv;
 334}
 335
 336static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val)
 337{
 338        if (!smi_info->timer_can_start)
 339                return;
 340        smi_info->last_timeout_jiffies = jiffies;
 341        mod_timer(&smi_info->si_timer, new_val);
 342        smi_info->timer_running = true;
 343}
 344
 345/*
 346 * Start a new message and (re)start the timer and thread.
 347 */
 348static void start_new_msg(struct smi_info *smi_info, unsigned char *msg,
 349                          unsigned int size)
 350{
 351        smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES);
 352
 353        if (smi_info->thread)
 354                wake_up_process(smi_info->thread);
 355
 356        smi_info->handlers->start_transaction(smi_info->si_sm, msg, size);
 357}
 358
 359static void start_check_enables(struct smi_info *smi_info)
 360{
 361        unsigned char msg[2];
 362
 363        msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
 364        msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
 365
 366        start_new_msg(smi_info, msg, 2);
 367        smi_info->si_state = SI_CHECKING_ENABLES;
 368}
 369
 370static void start_clear_flags(struct smi_info *smi_info)
 371{
 372        unsigned char msg[3];
 373
 374        /* Make sure the watchdog pre-timeout flag is not set at startup. */
 375        msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
 376        msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD;
 377        msg[2] = WDT_PRE_TIMEOUT_INT;
 378
 379        start_new_msg(smi_info, msg, 3);
 380        smi_info->si_state = SI_CLEARING_FLAGS;
 381}
 382
 383static void start_getting_msg_queue(struct smi_info *smi_info)
 384{
 385        smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
 386        smi_info->curr_msg->data[1] = IPMI_GET_MSG_CMD;
 387        smi_info->curr_msg->data_size = 2;
 388
 389        start_new_msg(smi_info, smi_info->curr_msg->data,
 390                      smi_info->curr_msg->data_size);
 391        smi_info->si_state = SI_GETTING_MESSAGES;
 392}
 393
 394static void start_getting_events(struct smi_info *smi_info)
 395{
 396        smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
 397        smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD;
 398        smi_info->curr_msg->data_size = 2;
 399
 400        start_new_msg(smi_info, smi_info->curr_msg->data,
 401                      smi_info->curr_msg->data_size);
 402        smi_info->si_state = SI_GETTING_EVENTS;
 403}
 404
 405/*
 406 * When we have a situtaion where we run out of memory and cannot
 407 * allocate messages, we just leave them in the BMC and run the system
 408 * polled until we can allocate some memory.  Once we have some
 409 * memory, we will re-enable the interrupt.
 410 *
 411 * Note that we cannot just use disable_irq(), since the interrupt may
 412 * be shared.
 413 */
 414static inline bool disable_si_irq(struct smi_info *smi_info)
 415{
 416        if ((smi_info->io.irq) && (!smi_info->interrupt_disabled)) {
 417                smi_info->interrupt_disabled = true;
 418                start_check_enables(smi_info);
 419                return true;
 420        }
 421        return false;
 422}
 423
 424static inline bool enable_si_irq(struct smi_info *smi_info)
 425{
 426        if ((smi_info->io.irq) && (smi_info->interrupt_disabled)) {
 427                smi_info->interrupt_disabled = false;
 428                start_check_enables(smi_info);
 429                return true;
 430        }
 431        return false;
 432}
 433
 434/*
 435 * Allocate a message.  If unable to allocate, start the interrupt
 436 * disable process and return NULL.  If able to allocate but
 437 * interrupts are disabled, free the message and return NULL after
 438 * starting the interrupt enable process.
 439 */
 440static struct ipmi_smi_msg *alloc_msg_handle_irq(struct smi_info *smi_info)
 441{
 442        struct ipmi_smi_msg *msg;
 443
 444        msg = ipmi_alloc_smi_msg();
 445        if (!msg) {
 446                if (!disable_si_irq(smi_info))
 447                        smi_info->si_state = SI_NORMAL;
 448        } else if (enable_si_irq(smi_info)) {
 449                ipmi_free_smi_msg(msg);
 450                msg = NULL;
 451        }
 452        return msg;
 453}
 454
 455static void handle_flags(struct smi_info *smi_info)
 456{
 457retry:
 458        if (smi_info->msg_flags & WDT_PRE_TIMEOUT_INT) {
 459                /* Watchdog pre-timeout */
 460                smi_inc_stat(smi_info, watchdog_pretimeouts);
 461
 462                start_clear_flags(smi_info);
 463                smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT;
 464                ipmi_smi_watchdog_pretimeout(smi_info->intf);
 465        } else if (smi_info->msg_flags & RECEIVE_MSG_AVAIL) {
 466                /* Messages available. */
 467                smi_info->curr_msg = alloc_msg_handle_irq(smi_info);
 468                if (!smi_info->curr_msg)
 469                        return;
 470
 471                start_getting_msg_queue(smi_info);
 472        } else if (smi_info->msg_flags & EVENT_MSG_BUFFER_FULL) {
 473                /* Events available. */
 474                smi_info->curr_msg = alloc_msg_handle_irq(smi_info);
 475                if (!smi_info->curr_msg)
 476                        return;
 477
 478                start_getting_events(smi_info);
 479        } else if (smi_info->msg_flags & OEM_DATA_AVAIL &&
 480                   smi_info->oem_data_avail_handler) {
 481                if (smi_info->oem_data_avail_handler(smi_info))
 482                        goto retry;
 483        } else
 484                smi_info->si_state = SI_NORMAL;
 485}
 486
 487/*
 488 * Global enables we care about.
 489 */
 490#define GLOBAL_ENABLES_MASK (IPMI_BMC_EVT_MSG_BUFF | IPMI_BMC_RCV_MSG_INTR | \
 491                             IPMI_BMC_EVT_MSG_INTR)
 492
 493static u8 current_global_enables(struct smi_info *smi_info, u8 base,
 494                                 bool *irq_on)
 495{
 496        u8 enables = 0;
 497
 498        if (smi_info->supports_event_msg_buff)
 499                enables |= IPMI_BMC_EVT_MSG_BUFF;
 500
 501        if (((smi_info->io.irq && !smi_info->interrupt_disabled) ||
 502             smi_info->cannot_disable_irq) &&
 503            !smi_info->irq_enable_broken)
 504                enables |= IPMI_BMC_RCV_MSG_INTR;
 505
 506        if (smi_info->supports_event_msg_buff &&
 507            smi_info->io.irq && !smi_info->interrupt_disabled &&
 508            !smi_info->irq_enable_broken)
 509                enables |= IPMI_BMC_EVT_MSG_INTR;
 510
 511        *irq_on = enables & (IPMI_BMC_EVT_MSG_INTR | IPMI_BMC_RCV_MSG_INTR);
 512
 513        return enables;
 514}
 515
 516static void check_bt_irq(struct smi_info *smi_info, bool irq_on)
 517{
 518        u8 irqstate = smi_info->io.inputb(&smi_info->io, IPMI_BT_INTMASK_REG);
 519
 520        irqstate &= IPMI_BT_INTMASK_ENABLE_IRQ_BIT;
 521
 522        if ((bool)irqstate == irq_on)
 523                return;
 524
 525        if (irq_on)
 526                smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG,
 527                                     IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
 528        else
 529                smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG, 0);
 530}
 531
 532static void handle_transaction_done(struct smi_info *smi_info)
 533{
 534        struct ipmi_smi_msg *msg;
 535
 536        debug_timestamp("Done");
 537        switch (smi_info->si_state) {
 538        case SI_NORMAL:
 539                if (!smi_info->curr_msg)
 540                        break;
 541
 542                smi_info->curr_msg->rsp_size
 543                        = smi_info->handlers->get_result(
 544                                smi_info->si_sm,
 545                                smi_info->curr_msg->rsp,
 546                                IPMI_MAX_MSG_LENGTH);
 547
 548                /*
 549                 * Do this here becase deliver_recv_msg() releases the
 550                 * lock, and a new message can be put in during the
 551                 * time the lock is released.
 552                 */
 553                msg = smi_info->curr_msg;
 554                smi_info->curr_msg = NULL;
 555                deliver_recv_msg(smi_info, msg);
 556                break;
 557
 558        case SI_GETTING_FLAGS:
 559        {
 560                unsigned char msg[4];
 561                unsigned int  len;
 562
 563                /* We got the flags from the SMI, now handle them. */
 564                len = smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
 565                if (msg[2] != 0) {
 566                        /* Error fetching flags, just give up for now. */
 567                        smi_info->si_state = SI_NORMAL;
 568                } else if (len < 4) {
 569                        /*
 570                         * Hmm, no flags.  That's technically illegal, but
 571                         * don't use uninitialized data.
 572                         */
 573                        smi_info->si_state = SI_NORMAL;
 574                } else {
 575                        smi_info->msg_flags = msg[3];
 576                        handle_flags(smi_info);
 577                }
 578                break;
 579        }
 580
 581        case SI_CLEARING_FLAGS:
 582        {
 583                unsigned char msg[3];
 584
 585                /* We cleared the flags. */
 586                smi_info->handlers->get_result(smi_info->si_sm, msg, 3);
 587                if (msg[2] != 0) {
 588                        /* Error clearing flags */
 589                        dev_warn(smi_info->io.dev,
 590                                 "Error clearing flags: %2.2x\n", msg[2]);
 591                }
 592                smi_info->si_state = SI_NORMAL;
 593                break;
 594        }
 595
 596        case SI_GETTING_EVENTS:
 597        {
 598                smi_info->curr_msg->rsp_size
 599                        = smi_info->handlers->get_result(
 600                                smi_info->si_sm,
 601                                smi_info->curr_msg->rsp,
 602                                IPMI_MAX_MSG_LENGTH);
 603
 604                /*
 605                 * Do this here becase deliver_recv_msg() releases the
 606                 * lock, and a new message can be put in during the
 607                 * time the lock is released.
 608                 */
 609                msg = smi_info->curr_msg;
 610                smi_info->curr_msg = NULL;
 611                if (msg->rsp[2] != 0) {
 612                        /* Error getting event, probably done. */
 613                        msg->done(msg);
 614
 615                        /* Take off the event flag. */
 616                        smi_info->msg_flags &= ~EVENT_MSG_BUFFER_FULL;
 617                        handle_flags(smi_info);
 618                } else {
 619                        smi_inc_stat(smi_info, events);
 620
 621                        /*
 622                         * Do this before we deliver the message
 623                         * because delivering the message releases the
 624                         * lock and something else can mess with the
 625                         * state.
 626                         */
 627                        handle_flags(smi_info);
 628
 629                        deliver_recv_msg(smi_info, msg);
 630                }
 631                break;
 632        }
 633
 634        case SI_GETTING_MESSAGES:
 635        {
 636                smi_info->curr_msg->rsp_size
 637                        = smi_info->handlers->get_result(
 638                                smi_info->si_sm,
 639                                smi_info->curr_msg->rsp,
 640                                IPMI_MAX_MSG_LENGTH);
 641
 642                /*
 643                 * Do this here becase deliver_recv_msg() releases the
 644                 * lock, and a new message can be put in during the
 645                 * time the lock is released.
 646                 */
 647                msg = smi_info->curr_msg;
 648                smi_info->curr_msg = NULL;
 649                if (msg->rsp[2] != 0) {
 650                        /* Error getting event, probably done. */
 651                        msg->done(msg);
 652
 653                        /* Take off the msg flag. */
 654                        smi_info->msg_flags &= ~RECEIVE_MSG_AVAIL;
 655                        handle_flags(smi_info);
 656                } else {
 657                        smi_inc_stat(smi_info, incoming_messages);
 658
 659                        /*
 660                         * Do this before we deliver the message
 661                         * because delivering the message releases the
 662                         * lock and something else can mess with the
 663                         * state.
 664                         */
 665                        handle_flags(smi_info);
 666
 667                        deliver_recv_msg(smi_info, msg);
 668                }
 669                break;
 670        }
 671
 672        case SI_CHECKING_ENABLES:
 673        {
 674                unsigned char msg[4];
 675                u8 enables;
 676                bool irq_on;
 677
 678                /* We got the flags from the SMI, now handle them. */
 679                smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
 680                if (msg[2] != 0) {
 681                        dev_warn(smi_info->io.dev,
 682                                 "Couldn't get irq info: %x.\n", msg[2]);
 683                        dev_warn(smi_info->io.dev,
 684                                 "Maybe ok, but ipmi might run very slowly.\n");
 685                        smi_info->si_state = SI_NORMAL;
 686                        break;
 687                }
 688                enables = current_global_enables(smi_info, 0, &irq_on);
 689                if (smi_info->io.si_type == SI_BT)
 690                        /* BT has its own interrupt enable bit. */
 691                        check_bt_irq(smi_info, irq_on);
 692                if (enables != (msg[3] & GLOBAL_ENABLES_MASK)) {
 693                        /* Enables are not correct, fix them. */
 694                        msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
 695                        msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
 696                        msg[2] = enables | (msg[3] & ~GLOBAL_ENABLES_MASK);
 697                        smi_info->handlers->start_transaction(
 698                                smi_info->si_sm, msg, 3);
 699                        smi_info->si_state = SI_SETTING_ENABLES;
 700                } else if (smi_info->supports_event_msg_buff) {
 701                        smi_info->curr_msg = ipmi_alloc_smi_msg();
 702                        if (!smi_info->curr_msg) {
 703                                smi_info->si_state = SI_NORMAL;
 704                                break;
 705                        }
 706                        start_getting_events(smi_info);
 707                } else {
 708                        smi_info->si_state = SI_NORMAL;
 709                }
 710                break;
 711        }
 712
 713        case SI_SETTING_ENABLES:
 714        {
 715                unsigned char msg[4];
 716
 717                smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
 718                if (msg[2] != 0)
 719                        dev_warn(smi_info->io.dev,
 720                                 "Could not set the global enables: 0x%x.\n",
 721                                 msg[2]);
 722
 723                if (smi_info->supports_event_msg_buff) {
 724                        smi_info->curr_msg = ipmi_alloc_smi_msg();
 725                        if (!smi_info->curr_msg) {
 726                                smi_info->si_state = SI_NORMAL;
 727                                break;
 728                        }
 729                        start_getting_events(smi_info);
 730                } else {
 731                        smi_info->si_state = SI_NORMAL;
 732                }
 733                break;
 734        }
 735        }
 736}
 737
 738/*
 739 * Called on timeouts and events.  Timeouts should pass the elapsed
 740 * time, interrupts should pass in zero.  Must be called with
 741 * si_lock held and interrupts disabled.
 742 */
 743static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
 744                                           int time)
 745{
 746        enum si_sm_result si_sm_result;
 747
 748restart:
 749        /*
 750         * There used to be a loop here that waited a little while
 751         * (around 25us) before giving up.  That turned out to be
 752         * pointless, the minimum delays I was seeing were in the 300us
 753         * range, which is far too long to wait in an interrupt.  So
 754         * we just run until the state machine tells us something
 755         * happened or it needs a delay.
 756         */
 757        si_sm_result = smi_info->handlers->event(smi_info->si_sm, time);
 758        time = 0;
 759        while (si_sm_result == SI_SM_CALL_WITHOUT_DELAY)
 760                si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
 761
 762        if (si_sm_result == SI_SM_TRANSACTION_COMPLETE) {
 763                smi_inc_stat(smi_info, complete_transactions);
 764
 765                handle_transaction_done(smi_info);
 766                goto restart;
 767        } else if (si_sm_result == SI_SM_HOSED) {
 768                smi_inc_stat(smi_info, hosed_count);
 769
 770                /*
 771                 * Do the before return_hosed_msg, because that
 772                 * releases the lock.
 773                 */
 774                smi_info->si_state = SI_NORMAL;
 775                if (smi_info->curr_msg != NULL) {
 776                        /*
 777                         * If we were handling a user message, format
 778                         * a response to send to the upper layer to
 779                         * tell it about the error.
 780                         */
 781                        return_hosed_msg(smi_info, IPMI_ERR_UNSPECIFIED);
 782                }
 783                goto restart;
 784        }
 785
 786        /*
 787         * We prefer handling attn over new messages.  But don't do
 788         * this if there is not yet an upper layer to handle anything.
 789         */
 790        if (si_sm_result == SI_SM_ATTN || smi_info->got_attn) {
 791                unsigned char msg[2];
 792
 793                if (smi_info->si_state != SI_NORMAL) {
 794                        /*
 795                         * We got an ATTN, but we are doing something else.
 796                         * Handle the ATTN later.
 797                         */
 798                        smi_info->got_attn = true;
 799                } else {
 800                        smi_info->got_attn = false;
 801                        smi_inc_stat(smi_info, attentions);
 802
 803                        /*
 804                         * Got a attn, send down a get message flags to see
 805                         * what's causing it.  It would be better to handle
 806                         * this in the upper layer, but due to the way
 807                         * interrupts work with the SMI, that's not really
 808                         * possible.
 809                         */
 810                        msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
 811                        msg[1] = IPMI_GET_MSG_FLAGS_CMD;
 812
 813                        start_new_msg(smi_info, msg, 2);
 814                        smi_info->si_state = SI_GETTING_FLAGS;
 815                        goto restart;
 816                }
 817        }
 818
 819        /* If we are currently idle, try to start the next message. */
 820        if (si_sm_result == SI_SM_IDLE) {
 821                smi_inc_stat(smi_info, idles);
 822
 823                si_sm_result = start_next_msg(smi_info);
 824                if (si_sm_result != SI_SM_IDLE)
 825                        goto restart;
 826        }
 827
 828        if ((si_sm_result == SI_SM_IDLE)
 829            && (atomic_read(&smi_info->req_events))) {
 830                /*
 831                 * We are idle and the upper layer requested that I fetch
 832                 * events, so do so.
 833                 */
 834                atomic_set(&smi_info->req_events, 0);
 835
 836                /*
 837                 * Take this opportunity to check the interrupt and
 838                 * message enable state for the BMC.  The BMC can be
 839                 * asynchronously reset, and may thus get interrupts
 840                 * disable and messages disabled.
 841                 */
 842                if (smi_info->supports_event_msg_buff || smi_info->io.irq) {
 843                        start_check_enables(smi_info);
 844                } else {
 845                        smi_info->curr_msg = alloc_msg_handle_irq(smi_info);
 846                        if (!smi_info->curr_msg)
 847                                goto out;
 848
 849                        start_getting_events(smi_info);
 850                }
 851                goto restart;
 852        }
 853
 854        if (si_sm_result == SI_SM_IDLE && smi_info->timer_running) {
 855                /* Ok it if fails, the timer will just go off. */
 856                if (del_timer(&smi_info->si_timer))
 857                        smi_info->timer_running = false;
 858        }
 859
 860out:
 861        return si_sm_result;
 862}
 863
 864static void check_start_timer_thread(struct smi_info *smi_info)
 865{
 866        if (smi_info->si_state == SI_NORMAL && smi_info->curr_msg == NULL) {
 867                smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES);
 868
 869                if (smi_info->thread)
 870                        wake_up_process(smi_info->thread);
 871
 872                start_next_msg(smi_info);
 873                smi_event_handler(smi_info, 0);
 874        }
 875}
 876
 877static void flush_messages(void *send_info)
 878{
 879        struct smi_info *smi_info = send_info;
 880        enum si_sm_result result;
 881
 882        /*
 883         * Currently, this function is called only in run-to-completion
 884         * mode.  This means we are single-threaded, no need for locks.
 885         */
 886        result = smi_event_handler(smi_info, 0);
 887        while (result != SI_SM_IDLE) {
 888                udelay(SI_SHORT_TIMEOUT_USEC);
 889                result = smi_event_handler(smi_info, SI_SHORT_TIMEOUT_USEC);
 890        }
 891}
 892
 893static void sender(void                *send_info,
 894                   struct ipmi_smi_msg *msg)
 895{
 896        struct smi_info   *smi_info = send_info;
 897        unsigned long     flags;
 898
 899        debug_timestamp("Enqueue");
 900
 901        if (smi_info->run_to_completion) {
 902                /*
 903                 * If we are running to completion, start it.  Upper
 904                 * layer will call flush_messages to clear it out.
 905                 */
 906                smi_info->waiting_msg = msg;
 907                return;
 908        }
 909
 910        spin_lock_irqsave(&smi_info->si_lock, flags);
 911        /*
 912         * The following two lines don't need to be under the lock for
 913         * the lock's sake, but they do need SMP memory barriers to
 914         * avoid getting things out of order.  We are already claiming
 915         * the lock, anyway, so just do it under the lock to avoid the
 916         * ordering problem.
 917         */
 918        BUG_ON(smi_info->waiting_msg);
 919        smi_info->waiting_msg = msg;
 920        check_start_timer_thread(smi_info);
 921        spin_unlock_irqrestore(&smi_info->si_lock, flags);
 922}
 923
 924static void set_run_to_completion(void *send_info, bool i_run_to_completion)
 925{
 926        struct smi_info   *smi_info = send_info;
 927
 928        smi_info->run_to_completion = i_run_to_completion;
 929        if (i_run_to_completion)
 930                flush_messages(smi_info);
 931}
 932
 933/*
 934 * Use -1 in the nsec value of the busy waiting timespec to tell that
 935 * we are spinning in kipmid looking for something and not delaying
 936 * between checks
 937 */
 938static inline void ipmi_si_set_not_busy(struct timespec64 *ts)
 939{
 940        ts->tv_nsec = -1;
 941}
 942static inline int ipmi_si_is_busy(struct timespec64 *ts)
 943{
 944        return ts->tv_nsec != -1;
 945}
 946
 947static inline int ipmi_thread_busy_wait(enum si_sm_result smi_result,
 948                                        const struct smi_info *smi_info,
 949                                        struct timespec64 *busy_until)
 950{
 951        unsigned int max_busy_us = 0;
 952
 953        if (smi_info->si_num < num_max_busy_us)
 954                max_busy_us = kipmid_max_busy_us[smi_info->si_num];
 955        if (max_busy_us == 0 || smi_result != SI_SM_CALL_WITH_DELAY)
 956                ipmi_si_set_not_busy(busy_until);
 957        else if (!ipmi_si_is_busy(busy_until)) {
 958                ktime_get_ts64(busy_until);
 959                timespec64_add_ns(busy_until, max_busy_us*NSEC_PER_USEC);
 960        } else {
 961                struct timespec64 now;
 962
 963                ktime_get_ts64(&now);
 964                if (unlikely(timespec64_compare(&now, busy_until) > 0)) {
 965                        ipmi_si_set_not_busy(busy_until);
 966                        return 0;
 967                }
 968        }
 969        return 1;
 970}
 971
 972
 973/*
 974 * A busy-waiting loop for speeding up IPMI operation.
 975 *
 976 * Lousy hardware makes this hard.  This is only enabled for systems
 977 * that are not BT and do not have interrupts.  It starts spinning
 978 * when an operation is complete or until max_busy tells it to stop
 979 * (if that is enabled).  See the paragraph on kimid_max_busy_us in
 980 * Documentation/IPMI.txt for details.
 981 */
 982static int ipmi_thread(void *data)
 983{
 984        struct smi_info *smi_info = data;
 985        unsigned long flags;
 986        enum si_sm_result smi_result;
 987        struct timespec64 busy_until;
 988
 989        ipmi_si_set_not_busy(&busy_until);
 990        set_user_nice(current, MAX_NICE);
 991        while (!kthread_should_stop()) {
 992                int busy_wait;
 993
 994                spin_lock_irqsave(&(smi_info->si_lock), flags);
 995                smi_result = smi_event_handler(smi_info, 0);
 996
 997                /*
 998                 * If the driver is doing something, there is a possible
 999                 * race with the timer.  If the timer handler see idle,
1000                 * and the thread here sees something else, the timer
1001                 * handler won't restart the timer even though it is
1002                 * required.  So start it here if necessary.
1003                 */
1004                if (smi_result != SI_SM_IDLE && !smi_info->timer_running)
1005                        smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES);
1006
1007                spin_unlock_irqrestore(&(smi_info->si_lock), flags);
1008                busy_wait = ipmi_thread_busy_wait(smi_result, smi_info,
1009                                                  &busy_until);
1010                if (smi_result == SI_SM_CALL_WITHOUT_DELAY)
1011                        ; /* do nothing */
1012                else if (smi_result == SI_SM_CALL_WITH_DELAY && busy_wait)
1013                        schedule();
1014                else if (smi_result == SI_SM_IDLE) {
1015                        if (atomic_read(&smi_info->need_watch)) {
1016                                schedule_timeout_interruptible(100);
1017                        } else {
1018                                /* Wait to be woken up when we are needed. */
1019                                __set_current_state(TASK_INTERRUPTIBLE);
1020                                schedule();
1021                        }
1022                } else
1023                        schedule_timeout_interruptible(1);
1024        }
1025        return 0;
1026}
1027
1028
1029static void poll(void *send_info)
1030{
1031        struct smi_info *smi_info = send_info;
1032        unsigned long flags = 0;
1033        bool run_to_completion = smi_info->run_to_completion;
1034
1035        /*
1036         * Make sure there is some delay in the poll loop so we can
1037         * drive time forward and timeout things.
1038         */
1039        udelay(10);
1040        if (!run_to_completion)
1041                spin_lock_irqsave(&smi_info->si_lock, flags);
1042        smi_event_handler(smi_info, 10);
1043        if (!run_to_completion)
1044                spin_unlock_irqrestore(&smi_info->si_lock, flags);
1045}
1046
1047static void request_events(void *send_info)
1048{
1049        struct smi_info *smi_info = send_info;
1050
1051        if (!smi_info->has_event_buffer)
1052                return;
1053
1054        atomic_set(&smi_info->req_events, 1);
1055}
1056
1057static void set_need_watch(void *send_info, unsigned int watch_mask)
1058{
1059        struct smi_info *smi_info = send_info;
1060        unsigned long flags;
1061        int enable;
1062
1063        enable = !!watch_mask;
1064
1065        atomic_set(&smi_info->need_watch, enable);
1066        spin_lock_irqsave(&smi_info->si_lock, flags);
1067        check_start_timer_thread(smi_info);
1068        spin_unlock_irqrestore(&smi_info->si_lock, flags);
1069}
1070
1071static void smi_timeout(struct timer_list *t)
1072{
1073        struct smi_info   *smi_info = from_timer(smi_info, t, si_timer);
1074        enum si_sm_result smi_result;
1075        unsigned long     flags;
1076        unsigned long     jiffies_now;
1077        long              time_diff;
1078        long              timeout;
1079
1080        spin_lock_irqsave(&(smi_info->si_lock), flags);
1081        debug_timestamp("Timer");
1082
1083        jiffies_now = jiffies;
1084        time_diff = (((long)jiffies_now - (long)smi_info->last_timeout_jiffies)
1085                     * SI_USEC_PER_JIFFY);
1086        smi_result = smi_event_handler(smi_info, time_diff);
1087
1088        if ((smi_info->io.irq) && (!smi_info->interrupt_disabled)) {
1089                /* Running with interrupts, only do long timeouts. */
1090                timeout = jiffies + SI_TIMEOUT_JIFFIES;
1091                smi_inc_stat(smi_info, long_timeouts);
1092                goto do_mod_timer;
1093        }
1094
1095        /*
1096         * If the state machine asks for a short delay, then shorten
1097         * the timer timeout.
1098         */
1099        if (smi_result == SI_SM_CALL_WITH_DELAY) {
1100                smi_inc_stat(smi_info, short_timeouts);
1101                timeout = jiffies + 1;
1102        } else {
1103                smi_inc_stat(smi_info, long_timeouts);
1104                timeout = jiffies + SI_TIMEOUT_JIFFIES;
1105        }
1106
1107do_mod_timer:
1108        if (smi_result != SI_SM_IDLE)
1109                smi_mod_timer(smi_info, timeout);
1110        else
1111                smi_info->timer_running = false;
1112        spin_unlock_irqrestore(&(smi_info->si_lock), flags);
1113}
1114
1115irqreturn_t ipmi_si_irq_handler(int irq, void *data)
1116{
1117        struct smi_info *smi_info = data;
1118        unsigned long   flags;
1119
1120        if (smi_info->io.si_type == SI_BT)
1121                /* We need to clear the IRQ flag for the BT interface. */
1122                smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG,
1123                                     IPMI_BT_INTMASK_CLEAR_IRQ_BIT
1124                                     | IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
1125
1126        spin_lock_irqsave(&(smi_info->si_lock), flags);
1127
1128        smi_inc_stat(smi_info, interrupts);
1129
1130        debug_timestamp("Interrupt");
1131
1132        smi_event_handler(smi_info, 0);
1133        spin_unlock_irqrestore(&(smi_info->si_lock), flags);
1134        return IRQ_HANDLED;
1135}
1136
1137static int smi_start_processing(void            *send_info,
1138                                struct ipmi_smi *intf)
1139{
1140        struct smi_info *new_smi = send_info;
1141        int             enable = 0;
1142
1143        new_smi->intf = intf;
1144
1145        /* Set up the timer that drives the interface. */
1146        timer_setup(&new_smi->si_timer, smi_timeout, 0);
1147        new_smi->timer_can_start = true;
1148        smi_mod_timer(new_smi, jiffies + SI_TIMEOUT_JIFFIES);
1149
1150        /* Try to claim any interrupts. */
1151        if (new_smi->io.irq_setup) {
1152                new_smi->io.irq_handler_data = new_smi;
1153                new_smi->io.irq_setup(&new_smi->io);
1154        }
1155
1156        /*
1157         * Check if the user forcefully enabled the daemon.
1158         */
1159        if (new_smi->si_num < num_force_kipmid)
1160                enable = force_kipmid[new_smi->si_num];
1161        /*
1162         * The BT interface is efficient enough to not need a thread,
1163         * and there is no need for a thread if we have interrupts.
1164         */
1165        else if ((new_smi->io.si_type != SI_BT) && (!new_smi->io.irq))
1166                enable = 1;
1167
1168        if (enable) {
1169                new_smi->thread = kthread_run(ipmi_thread, new_smi,
1170                                              "kipmi%d", new_smi->si_num);
1171                if (IS_ERR(new_smi->thread)) {
1172                        dev_notice(new_smi->io.dev, "Could not start"
1173                                   " kernel thread due to error %ld, only using"
1174                                   " timers to drive the interface\n",
1175                                   PTR_ERR(new_smi->thread));
1176                        new_smi->thread = NULL;
1177                }
1178        }
1179
1180        return 0;
1181}
1182
1183static int get_smi_info(void *send_info, struct ipmi_smi_info *data)
1184{
1185        struct smi_info *smi = send_info;
1186
1187        data->addr_src = smi->io.addr_source;
1188        data->dev = smi->io.dev;
1189        data->addr_info = smi->io.addr_info;
1190        get_device(smi->io.dev);
1191
1192        return 0;
1193}
1194
1195static void set_maintenance_mode(void *send_info, bool enable)
1196{
1197        struct smi_info   *smi_info = send_info;
1198
1199        if (!enable)
1200                atomic_set(&smi_info->req_events, 0);
1201}
1202
1203static void shutdown_smi(void *send_info);
1204static const struct ipmi_smi_handlers handlers = {
1205        .owner                  = THIS_MODULE,
1206        .start_processing       = smi_start_processing,
1207        .shutdown               = shutdown_smi,
1208        .get_smi_info           = get_smi_info,
1209        .sender                 = sender,
1210        .request_events         = request_events,
1211        .set_need_watch         = set_need_watch,
1212        .set_maintenance_mode   = set_maintenance_mode,
1213        .set_run_to_completion  = set_run_to_completion,
1214        .flush_messages         = flush_messages,
1215        .poll                   = poll,
1216};
1217
1218static LIST_HEAD(smi_infos);
1219static DEFINE_MUTEX(smi_infos_lock);
1220static int smi_num; /* Used to sequence the SMIs */
1221
1222static const char * const addr_space_to_str[] = { "i/o", "mem" };
1223
1224module_param_array(force_kipmid, int, &num_force_kipmid, 0);
1225MODULE_PARM_DESC(force_kipmid, "Force the kipmi daemon to be enabled (1) or"
1226                 " disabled(0).  Normally the IPMI driver auto-detects"
1227                 " this, but the value may be overridden by this parm.");
1228module_param(unload_when_empty, bool, 0);
1229MODULE_PARM_DESC(unload_when_empty, "Unload the module if no interfaces are"
1230                 " specified or found, default is 1.  Setting to 0"
1231                 " is useful for hot add of devices using hotmod.");
1232module_param_array(kipmid_max_busy_us, uint, &num_max_busy_us, 0644);
1233MODULE_PARM_DESC(kipmid_max_busy_us,
1234                 "Max time (in microseconds) to busy-wait for IPMI data before"
1235                 " sleeping. 0 (default) means to wait forever. Set to 100-500"
1236                 " if kipmid is using up a lot of CPU time.");
1237
1238void ipmi_irq_finish_setup(struct si_sm_io *io)
1239{
1240        if (io->si_type == SI_BT)
1241                /* Enable the interrupt in the BT interface. */
1242                io->outputb(io, IPMI_BT_INTMASK_REG,
1243                            IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
1244}
1245
1246void ipmi_irq_start_cleanup(struct si_sm_io *io)
1247{
1248        if (io->si_type == SI_BT)
1249                /* Disable the interrupt in the BT interface. */
1250                io->outputb(io, IPMI_BT_INTMASK_REG, 0);
1251}
1252
1253static void std_irq_cleanup(struct si_sm_io *io)
1254{
1255        ipmi_irq_start_cleanup(io);
1256        free_irq(io->irq, io->irq_handler_data);
1257}
1258
1259int ipmi_std_irq_setup(struct si_sm_io *io)
1260{
1261        int rv;
1262
1263        if (!io->irq)
1264                return 0;
1265
1266        rv = request_irq(io->irq,
1267                         ipmi_si_irq_handler,
1268                         IRQF_SHARED,
1269                         DEVICE_NAME,
1270                         io->irq_handler_data);
1271        if (rv) {
1272                dev_warn(io->dev, "%s unable to claim interrupt %d,"
1273                         " running polled\n",
1274                         DEVICE_NAME, io->irq);
1275                io->irq = 0;
1276        } else {
1277                io->irq_cleanup = std_irq_cleanup;
1278                ipmi_irq_finish_setup(io);
1279                dev_info(io->dev, "Using irq %d\n", io->irq);
1280        }
1281
1282        return rv;
1283}
1284
1285static int wait_for_msg_done(struct smi_info *smi_info)
1286{
1287        enum si_sm_result     smi_result;
1288
1289        smi_result = smi_info->handlers->event(smi_info->si_sm, 0);
1290        for (;;) {
1291                if (smi_result == SI_SM_CALL_WITH_DELAY ||
1292                    smi_result == SI_SM_CALL_WITH_TICK_DELAY) {
1293                        schedule_timeout_uninterruptible(1);
1294                        smi_result = smi_info->handlers->event(
1295                                smi_info->si_sm, jiffies_to_usecs(1));
1296                } else if (smi_result == SI_SM_CALL_WITHOUT_DELAY) {
1297                        smi_result = smi_info->handlers->event(
1298                                smi_info->si_sm, 0);
1299                } else
1300                        break;
1301        }
1302        if (smi_result == SI_SM_HOSED)
1303                /*
1304                 * We couldn't get the state machine to run, so whatever's at
1305                 * the port is probably not an IPMI SMI interface.
1306                 */
1307                return -ENODEV;
1308
1309        return 0;
1310}
1311
1312static int try_get_dev_id(struct smi_info *smi_info)
1313{
1314        unsigned char         msg[2];
1315        unsigned char         *resp;
1316        unsigned long         resp_len;
1317        int                   rv = 0;
1318
1319        resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
1320        if (!resp)
1321                return -ENOMEM;
1322
1323        /*
1324         * Do a Get Device ID command, since it comes back with some
1325         * useful info.
1326         */
1327        msg[0] = IPMI_NETFN_APP_REQUEST << 2;
1328        msg[1] = IPMI_GET_DEVICE_ID_CMD;
1329        smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
1330
1331        rv = wait_for_msg_done(smi_info);
1332        if (rv)
1333                goto out;
1334
1335        resp_len = smi_info->handlers->get_result(smi_info->si_sm,
1336                                                  resp, IPMI_MAX_MSG_LENGTH);
1337
1338        /* Check and record info from the get device id, in case we need it. */
1339        rv = ipmi_demangle_device_id(resp[0] >> 2, resp[1],
1340                        resp + 2, resp_len - 2, &smi_info->device_id);
1341
1342out:
1343        kfree(resp);
1344        return rv;
1345}
1346
1347static int get_global_enables(struct smi_info *smi_info, u8 *enables)
1348{
1349        unsigned char         msg[3];
1350        unsigned char         *resp;
1351        unsigned long         resp_len;
1352        int                   rv;
1353
1354        resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
1355        if (!resp)
1356                return -ENOMEM;
1357
1358        msg[0] = IPMI_NETFN_APP_REQUEST << 2;
1359        msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
1360        smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
1361
1362        rv = wait_for_msg_done(smi_info);
1363        if (rv) {
1364                dev_warn(smi_info->io.dev,
1365                         "Error getting response from get global enables command: %d\n",
1366                         rv);
1367                goto out;
1368        }
1369
1370        resp_len = smi_info->handlers->get_result(smi_info->si_sm,
1371                                                  resp, IPMI_MAX_MSG_LENGTH);
1372
1373        if (resp_len < 4 ||
1374                        resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 ||
1375                        resp[1] != IPMI_GET_BMC_GLOBAL_ENABLES_CMD   ||
1376                        resp[2] != 0) {
1377                dev_warn(smi_info->io.dev,
1378                         "Invalid return from get global enables command: %ld %x %x %x\n",
1379                         resp_len, resp[0], resp[1], resp[2]);
1380                rv = -EINVAL;
1381                goto out;
1382        } else {
1383                *enables = resp[3];
1384        }
1385
1386out:
1387        kfree(resp);
1388        return rv;
1389}
1390
1391/*
1392 * Returns 1 if it gets an error from the command.
1393 */
1394static int set_global_enables(struct smi_info *smi_info, u8 enables)
1395{
1396        unsigned char         msg[3];
1397        unsigned char         *resp;
1398        unsigned long         resp_len;
1399        int                   rv;
1400
1401        resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
1402        if (!resp)
1403                return -ENOMEM;
1404
1405        msg[0] = IPMI_NETFN_APP_REQUEST << 2;
1406        msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
1407        msg[2] = enables;
1408        smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3);
1409
1410        rv = wait_for_msg_done(smi_info);
1411        if (rv) {
1412                dev_warn(smi_info->io.dev,
1413                         "Error getting response from set global enables command: %d\n",
1414                         rv);
1415                goto out;
1416        }
1417
1418        resp_len = smi_info->handlers->get_result(smi_info->si_sm,
1419                                                  resp, IPMI_MAX_MSG_LENGTH);
1420
1421        if (resp_len < 3 ||
1422                        resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 ||
1423                        resp[1] != IPMI_SET_BMC_GLOBAL_ENABLES_CMD) {
1424                dev_warn(smi_info->io.dev,
1425                         "Invalid return from set global enables command: %ld %x %x\n",
1426                         resp_len, resp[0], resp[1]);
1427                rv = -EINVAL;
1428                goto out;
1429        }
1430
1431        if (resp[2] != 0)
1432                rv = 1;
1433
1434out:
1435        kfree(resp);
1436        return rv;
1437}
1438
1439/*
1440 * Some BMCs do not support clearing the receive irq bit in the global
1441 * enables (even if they don't support interrupts on the BMC).  Check
1442 * for this and handle it properly.
1443 */
1444static void check_clr_rcv_irq(struct smi_info *smi_info)
1445{
1446        u8 enables = 0;
1447        int rv;
1448
1449        rv = get_global_enables(smi_info, &enables);
1450        if (!rv) {
1451                if ((enables & IPMI_BMC_RCV_MSG_INTR) == 0)
1452                        /* Already clear, should work ok. */
1453                        return;
1454
1455                enables &= ~IPMI_BMC_RCV_MSG_INTR;
1456                rv = set_global_enables(smi_info, enables);
1457        }
1458
1459        if (rv < 0) {
1460                dev_err(smi_info->io.dev,
1461                        "Cannot check clearing the rcv irq: %d\n", rv);
1462                return;
1463        }
1464
1465        if (rv) {
1466                /*
1467                 * An error when setting the event buffer bit means
1468                 * clearing the bit is not supported.
1469                 */
1470                dev_warn(smi_info->io.dev,
1471                         "The BMC does not support clearing the recv irq bit, compensating, but the BMC needs to be fixed.\n");
1472                smi_info->cannot_disable_irq = true;
1473        }
1474}
1475
1476/*
1477 * Some BMCs do not support setting the interrupt bits in the global
1478 * enables even if they support interrupts.  Clearly bad, but we can
1479 * compensate.
1480 */
1481static void check_set_rcv_irq(struct smi_info *smi_info)
1482{
1483        u8 enables = 0;
1484        int rv;
1485
1486        if (!smi_info->io.irq)
1487                return;
1488
1489        rv = get_global_enables(smi_info, &enables);
1490        if (!rv) {
1491                enables |= IPMI_BMC_RCV_MSG_INTR;
1492                rv = set_global_enables(smi_info, enables);
1493        }
1494
1495        if (rv < 0) {
1496                dev_err(smi_info->io.dev,
1497                        "Cannot check setting the rcv irq: %d\n", rv);
1498                return;
1499        }
1500
1501        if (rv) {
1502                /*
1503                 * An error when setting the event buffer bit means
1504                 * setting the bit is not supported.
1505                 */
1506                dev_warn(smi_info->io.dev,
1507                         "The BMC does not support setting the recv irq bit, compensating, but the BMC needs to be fixed.\n");
1508                smi_info->cannot_disable_irq = true;
1509                smi_info->irq_enable_broken = true;
1510        }
1511}
1512
1513static int try_enable_event_buffer(struct smi_info *smi_info)
1514{
1515        unsigned char         msg[3];
1516        unsigned char         *resp;
1517        unsigned long         resp_len;
1518        int                   rv = 0;
1519
1520        resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
1521        if (!resp)
1522                return -ENOMEM;
1523
1524        msg[0] = IPMI_NETFN_APP_REQUEST << 2;
1525        msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
1526        smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
1527
1528        rv = wait_for_msg_done(smi_info);
1529        if (rv) {
1530                pr_warn("Error getting response from get global enables command, the event buffer is not enabled\n");
1531                goto out;
1532        }
1533
1534        resp_len = smi_info->handlers->get_result(smi_info->si_sm,
1535                                                  resp, IPMI_MAX_MSG_LENGTH);
1536
1537        if (resp_len < 4 ||
1538                        resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 ||
1539                        resp[1] != IPMI_GET_BMC_GLOBAL_ENABLES_CMD   ||
1540                        resp[2] != 0) {
1541                pr_warn("Invalid return from get global enables command, cannot enable the event buffer\n");
1542                rv = -EINVAL;
1543                goto out;
1544        }
1545
1546        if (resp[3] & IPMI_BMC_EVT_MSG_BUFF) {
1547                /* buffer is already enabled, nothing to do. */
1548                smi_info->supports_event_msg_buff = true;
1549                goto out;
1550        }
1551
1552        msg[0] = IPMI_NETFN_APP_REQUEST << 2;
1553        msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
1554        msg[2] = resp[3] | IPMI_BMC_EVT_MSG_BUFF;
1555        smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3);
1556
1557        rv = wait_for_msg_done(smi_info);
1558        if (rv) {
1559                pr_warn("Error getting response from set global, enables command, the event buffer is not enabled\n");
1560                goto out;
1561        }
1562
1563        resp_len = smi_info->handlers->get_result(smi_info->si_sm,
1564                                                  resp, IPMI_MAX_MSG_LENGTH);
1565
1566        if (resp_len < 3 ||
1567                        resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 ||
1568                        resp[1] != IPMI_SET_BMC_GLOBAL_ENABLES_CMD) {
1569                pr_warn("Invalid return from get global, enables command, not enable the event buffer\n");
1570                rv = -EINVAL;
1571                goto out;
1572        }
1573
1574        if (resp[2] != 0)
1575                /*
1576                 * An error when setting the event buffer bit means
1577                 * that the event buffer is not supported.
1578                 */
1579                rv = -ENOENT;
1580        else
1581                smi_info->supports_event_msg_buff = true;
1582
1583out:
1584        kfree(resp);
1585        return rv;
1586}
1587
1588#define IPMI_SI_ATTR(name) \
1589static ssize_t ipmi_##name##_show(struct device *dev,                   \
1590                                  struct device_attribute *attr,        \
1591                                  char *buf)                            \
1592{                                                                       \
1593        struct smi_info *smi_info = dev_get_drvdata(dev);               \
1594                                                                        \
1595        return snprintf(buf, 10, "%u\n", smi_get_stat(smi_info, name)); \
1596}                                                                       \
1597static DEVICE_ATTR(name, S_IRUGO, ipmi_##name##_show, NULL)
1598
1599static ssize_t ipmi_type_show(struct device *dev,
1600                              struct device_attribute *attr,
1601                              char *buf)
1602{
1603        struct smi_info *smi_info = dev_get_drvdata(dev);
1604
1605        return snprintf(buf, 10, "%s\n", si_to_str[smi_info->io.si_type]);
1606}
1607static DEVICE_ATTR(type, S_IRUGO, ipmi_type_show, NULL);
1608
1609static ssize_t ipmi_interrupts_enabled_show(struct device *dev,
1610                                            struct device_attribute *attr,
1611                                            char *buf)
1612{
1613        struct smi_info *smi_info = dev_get_drvdata(dev);
1614        int enabled = smi_info->io.irq && !smi_info->interrupt_disabled;
1615
1616        return snprintf(buf, 10, "%d\n", enabled);
1617}
1618static DEVICE_ATTR(interrupts_enabled, S_IRUGO,
1619                   ipmi_interrupts_enabled_show, NULL);
1620
1621IPMI_SI_ATTR(short_timeouts);
1622IPMI_SI_ATTR(long_timeouts);
1623IPMI_SI_ATTR(idles);
1624IPMI_SI_ATTR(interrupts);
1625IPMI_SI_ATTR(attentions);
1626IPMI_SI_ATTR(flag_fetches);
1627IPMI_SI_ATTR(hosed_count);
1628IPMI_SI_ATTR(complete_transactions);
1629IPMI_SI_ATTR(events);
1630IPMI_SI_ATTR(watchdog_pretimeouts);
1631IPMI_SI_ATTR(incoming_messages);
1632
1633static ssize_t ipmi_params_show(struct device *dev,
1634                                struct device_attribute *attr,
1635                                char *buf)
1636{
1637        struct smi_info *smi_info = dev_get_drvdata(dev);
1638
1639        return snprintf(buf, 200,
1640                        "%s,%s,0x%lx,rsp=%d,rsi=%d,rsh=%d,irq=%d,ipmb=%d\n",
1641                        si_to_str[smi_info->io.si_type],
1642                        addr_space_to_str[smi_info->io.addr_space],
1643                        smi_info->io.addr_data,
1644                        smi_info->io.regspacing,
1645                        smi_info->io.regsize,
1646                        smi_info->io.regshift,
1647                        smi_info->io.irq,
1648                        smi_info->io.slave_addr);
1649}
1650static DEVICE_ATTR(params, S_IRUGO, ipmi_params_show, NULL);
1651
1652static struct attribute *ipmi_si_dev_attrs[] = {
1653        &dev_attr_type.attr,
1654        &dev_attr_interrupts_enabled.attr,
1655        &dev_attr_short_timeouts.attr,
1656        &dev_attr_long_timeouts.attr,
1657        &dev_attr_idles.attr,
1658        &dev_attr_interrupts.attr,
1659        &dev_attr_attentions.attr,
1660        &dev_attr_flag_fetches.attr,
1661        &dev_attr_hosed_count.attr,
1662        &dev_attr_complete_transactions.attr,
1663        &dev_attr_events.attr,
1664        &dev_attr_watchdog_pretimeouts.attr,
1665        &dev_attr_incoming_messages.attr,
1666        &dev_attr_params.attr,
1667        NULL
1668};
1669
1670static const struct attribute_group ipmi_si_dev_attr_group = {
1671        .attrs          = ipmi_si_dev_attrs,
1672};
1673
1674/*
1675 * oem_data_avail_to_receive_msg_avail
1676 * @info - smi_info structure with msg_flags set
1677 *
1678 * Converts flags from OEM_DATA_AVAIL to RECEIVE_MSG_AVAIL
1679 * Returns 1 indicating need to re-run handle_flags().
1680 */
1681static int oem_data_avail_to_receive_msg_avail(struct smi_info *smi_info)
1682{
1683        smi_info->msg_flags = ((smi_info->msg_flags & ~OEM_DATA_AVAIL) |
1684                               RECEIVE_MSG_AVAIL);
1685        return 1;
1686}
1687
1688/*
1689 * setup_dell_poweredge_oem_data_handler
1690 * @info - smi_info.device_id must be populated
1691 *
1692 * Systems that match, but have firmware version < 1.40 may assert
1693 * OEM0_DATA_AVAIL on their own, without being told via Set Flags that
1694 * it's safe to do so.  Such systems will de-assert OEM1_DATA_AVAIL
1695 * upon receipt of IPMI_GET_MSG_CMD, so we should treat these flags
1696 * as RECEIVE_MSG_AVAIL instead.
1697 *
1698 * As Dell has no plans to release IPMI 1.5 firmware that *ever*
1699 * assert the OEM[012] bits, and if it did, the driver would have to
1700 * change to handle that properly, we don't actually check for the
1701 * firmware version.
1702 * Device ID = 0x20                BMC on PowerEdge 8G servers
1703 * Device Revision = 0x80
1704 * Firmware Revision1 = 0x01       BMC version 1.40
1705 * Firmware Revision2 = 0x40       BCD encoded
1706 * IPMI Version = 0x51             IPMI 1.5
1707 * Manufacturer ID = A2 02 00      Dell IANA
1708 *
1709 * Additionally, PowerEdge systems with IPMI < 1.5 may also assert
1710 * OEM0_DATA_AVAIL and needs to be treated as RECEIVE_MSG_AVAIL.
1711 *
1712 */
1713#define DELL_POWEREDGE_8G_BMC_DEVICE_ID  0x20
1714#define DELL_POWEREDGE_8G_BMC_DEVICE_REV 0x80
1715#define DELL_POWEREDGE_8G_BMC_IPMI_VERSION 0x51
1716#define DELL_IANA_MFR_ID 0x0002a2
1717static void setup_dell_poweredge_oem_data_handler(struct smi_info *smi_info)
1718{
1719        struct ipmi_device_id *id = &smi_info->device_id;
1720        if (id->manufacturer_id == DELL_IANA_MFR_ID) {
1721                if (id->device_id       == DELL_POWEREDGE_8G_BMC_DEVICE_ID  &&
1722                    id->device_revision == DELL_POWEREDGE_8G_BMC_DEVICE_REV &&
1723                    id->ipmi_version   == DELL_POWEREDGE_8G_BMC_IPMI_VERSION) {
1724                        smi_info->oem_data_avail_handler =
1725                                oem_data_avail_to_receive_msg_avail;
1726                } else if (ipmi_version_major(id) < 1 ||
1727                           (ipmi_version_major(id) == 1 &&
1728                            ipmi_version_minor(id) < 5)) {
1729                        smi_info->oem_data_avail_handler =
1730                                oem_data_avail_to_receive_msg_avail;
1731                }
1732        }
1733}
1734
1735#define CANNOT_RETURN_REQUESTED_LENGTH 0xCA
1736static void return_hosed_msg_badsize(struct smi_info *smi_info)
1737{
1738        struct ipmi_smi_msg *msg = smi_info->curr_msg;
1739
1740        /* Make it a response */
1741        msg->rsp[0] = msg->data[0] | 4;
1742        msg->rsp[1] = msg->data[1];
1743        msg->rsp[2] = CANNOT_RETURN_REQUESTED_LENGTH;
1744        msg->rsp_size = 3;
1745        smi_info->curr_msg = NULL;
1746        deliver_recv_msg(smi_info, msg);
1747}
1748
1749/*
1750 * dell_poweredge_bt_xaction_handler
1751 * @info - smi_info.device_id must be populated
1752 *
1753 * Dell PowerEdge servers with the BT interface (x6xx and 1750) will
1754 * not respond to a Get SDR command if the length of the data
1755 * requested is exactly 0x3A, which leads to command timeouts and no
1756 * data returned.  This intercepts such commands, and causes userspace
1757 * callers to try again with a different-sized buffer, which succeeds.
1758 */
1759
1760#define STORAGE_NETFN 0x0A
1761#define STORAGE_CMD_GET_SDR 0x23
1762static int dell_poweredge_bt_xaction_handler(struct notifier_block *self,
1763                                             unsigned long unused,
1764                                             void *in)
1765{
1766        struct smi_info *smi_info = in;
1767        unsigned char *data = smi_info->curr_msg->data;
1768        unsigned int size   = smi_info->curr_msg->data_size;
1769        if (size >= 8 &&
1770            (data[0]>>2) == STORAGE_NETFN &&
1771            data[1] == STORAGE_CMD_GET_SDR &&
1772            data[7] == 0x3A) {
1773                return_hosed_msg_badsize(smi_info);
1774                return NOTIFY_STOP;
1775        }
1776        return NOTIFY_DONE;
1777}
1778
1779static struct notifier_block dell_poweredge_bt_xaction_notifier = {
1780        .notifier_call  = dell_poweredge_bt_xaction_handler,
1781};
1782
1783/*
1784 * setup_dell_poweredge_bt_xaction_handler
1785 * @info - smi_info.device_id must be filled in already
1786 *
1787 * Fills in smi_info.device_id.start_transaction_pre_hook
1788 * when we know what function to use there.
1789 */
1790static void
1791setup_dell_poweredge_bt_xaction_handler(struct smi_info *smi_info)
1792{
1793        struct ipmi_device_id *id = &smi_info->device_id;
1794        if (id->manufacturer_id == DELL_IANA_MFR_ID &&
1795            smi_info->io.si_type == SI_BT)
1796                register_xaction_notifier(&dell_poweredge_bt_xaction_notifier);
1797}
1798
1799/*
1800 * setup_oem_data_handler
1801 * @info - smi_info.device_id must be filled in already
1802 *
1803 * Fills in smi_info.device_id.oem_data_available_handler
1804 * when we know what function to use there.
1805 */
1806
1807static void setup_oem_data_handler(struct smi_info *smi_info)
1808{
1809        setup_dell_poweredge_oem_data_handler(smi_info);
1810}
1811
1812static void setup_xaction_handlers(struct smi_info *smi_info)
1813{
1814        setup_dell_poweredge_bt_xaction_handler(smi_info);
1815}
1816
1817static void check_for_broken_irqs(struct smi_info *smi_info)
1818{
1819        check_clr_rcv_irq(smi_info);
1820        check_set_rcv_irq(smi_info);
1821}
1822
1823static inline void stop_timer_and_thread(struct smi_info *smi_info)
1824{
1825        if (smi_info->thread != NULL) {
1826                kthread_stop(smi_info->thread);
1827                smi_info->thread = NULL;
1828        }
1829
1830        smi_info->timer_can_start = false;
1831        if (smi_info->timer_running)
1832                del_timer_sync(&smi_info->si_timer);
1833}
1834
1835static struct smi_info *find_dup_si(struct smi_info *info)
1836{
1837        struct smi_info *e;
1838
1839        list_for_each_entry(e, &smi_infos, link) {
1840                if (e->io.addr_space != info->io.addr_space)
1841                        continue;
1842                if (e->io.addr_data == info->io.addr_data) {
1843                        /*
1844                         * This is a cheap hack, ACPI doesn't have a defined
1845                         * slave address but SMBIOS does.  Pick it up from
1846                         * any source that has it available.
1847                         */
1848                        if (info->io.slave_addr && !e->io.slave_addr)
1849                                e->io.slave_addr = info->io.slave_addr;
1850                        return e;
1851                }
1852        }
1853
1854        return NULL;
1855}
1856
1857int ipmi_si_add_smi(struct si_sm_io *io)
1858{
1859        int rv = 0;
1860        struct smi_info *new_smi, *dup;
1861
1862        /*
1863         * If the user gave us a hard-coded device at the same
1864         * address, they presumably want us to use it and not what is
1865         * in the firmware.
1866         */
1867        if (io->addr_source != SI_HARDCODED && io->addr_source != SI_HOTMOD &&
1868            ipmi_si_hardcode_match(io->addr_space, io->addr_data)) {
1869                dev_info(io->dev,
1870                         "Hard-coded device at this address already exists");
1871                return -ENODEV;
1872        }
1873
1874        if (!io->io_setup) {
1875                if (io->addr_space == IPMI_IO_ADDR_SPACE) {
1876                        io->io_setup = ipmi_si_port_setup;
1877                } else if (io->addr_space == IPMI_MEM_ADDR_SPACE) {
1878                        io->io_setup = ipmi_si_mem_setup;
1879                } else {
1880                        return -EINVAL;
1881                }
1882        }
1883
1884        new_smi = kzalloc(sizeof(*new_smi), GFP_KERNEL);
1885        if (!new_smi)
1886                return -ENOMEM;
1887        spin_lock_init(&new_smi->si_lock);
1888
1889        new_smi->io = *io;
1890
1891        mutex_lock(&smi_infos_lock);
1892        dup = find_dup_si(new_smi);
1893        if (dup) {
1894                if (new_smi->io.addr_source == SI_ACPI &&
1895                    dup->io.addr_source == SI_SMBIOS) {
1896                        /* We prefer ACPI over SMBIOS. */
1897                        dev_info(dup->io.dev,
1898                                 "Removing SMBIOS-specified %s state machine in favor of ACPI\n",
1899                                 si_to_str[new_smi->io.si_type]);
1900                        cleanup_one_si(dup);
1901                } else {
1902                        dev_info(new_smi->io.dev,
1903                                 "%s-specified %s state machine: duplicate\n",
1904                                 ipmi_addr_src_to_str(new_smi->io.addr_source),
1905                                 si_to_str[new_smi->io.si_type]);
1906                        rv = -EBUSY;
1907                        kfree(new_smi);
1908                        goto out_err;
1909                }
1910        }
1911
1912        pr_info("Adding %s-specified %s state machine\n",
1913                ipmi_addr_src_to_str(new_smi->io.addr_source),
1914                si_to_str[new_smi->io.si_type]);
1915
1916        list_add_tail(&new_smi->link, &smi_infos);
1917
1918        if (initialized)
1919                rv = try_smi_init(new_smi);
1920out_err:
1921        mutex_unlock(&smi_infos_lock);
1922        return rv;
1923}
1924
1925/*
1926 * Try to start up an interface.  Must be called with smi_infos_lock
1927 * held, primarily to keep smi_num consistent, we only one to do these
1928 * one at a time.
1929 */
1930static int try_smi_init(struct smi_info *new_smi)
1931{
1932        int rv = 0;
1933        int i;
1934
1935        pr_info("Trying %s-specified %s state machine at %s address 0x%lx, slave address 0x%x, irq %d\n",
1936                ipmi_addr_src_to_str(new_smi->io.addr_source),
1937                si_to_str[new_smi->io.si_type],
1938                addr_space_to_str[new_smi->io.addr_space],
1939                new_smi->io.addr_data,
1940                new_smi->io.slave_addr, new_smi->io.irq);
1941
1942        switch (new_smi->io.si_type) {
1943        case SI_KCS:
1944                new_smi->handlers = &kcs_smi_handlers;
1945                break;
1946
1947        case SI_SMIC:
1948                new_smi->handlers = &smic_smi_handlers;
1949                break;
1950
1951        case SI_BT:
1952                new_smi->handlers = &bt_smi_handlers;
1953                break;
1954
1955        default:
1956                /* No support for anything else yet. */
1957                rv = -EIO;
1958                goto out_err;
1959        }
1960
1961        new_smi->si_num = smi_num;
1962
1963        /* Do this early so it's available for logs. */
1964        if (!new_smi->io.dev) {
1965                pr_err("IPMI interface added with no device\n");
1966                rv = EIO;
1967                goto out_err;
1968        }
1969
1970        /* Allocate the state machine's data and initialize it. */
1971        new_smi->si_sm = kmalloc(new_smi->handlers->size(), GFP_KERNEL);
1972        if (!new_smi->si_sm) {
1973                rv = -ENOMEM;
1974                goto out_err;
1975        }
1976        new_smi->io.io_size = new_smi->handlers->init_data(new_smi->si_sm,
1977                                                           &new_smi->io);
1978
1979        /* Now that we know the I/O size, we can set up the I/O. */
1980        rv = new_smi->io.io_setup(&new_smi->io);
1981        if (rv) {
1982                dev_err(new_smi->io.dev, "Could not set up I/O space\n");
1983                goto out_err;
1984        }
1985
1986        /* Do low-level detection first. */
1987        if (new_smi->handlers->detect(new_smi->si_sm)) {
1988                if (new_smi->io.addr_source)
1989                        dev_err(new_smi->io.dev,
1990                                "Interface detection failed\n");
1991                rv = -ENODEV;
1992                goto out_err;
1993        }
1994
1995        /*
1996         * Attempt a get device id command.  If it fails, we probably
1997         * don't have a BMC here.
1998         */
1999        rv = try_get_dev_id(new_smi);
2000        if (rv) {
2001                if (new_smi->io.addr_source)
2002                        dev_err(new_smi->io.dev,
2003                               "There appears to be no BMC at this location\n");
2004                goto out_err;
2005        }
2006
2007        setup_oem_data_handler(new_smi);
2008        setup_xaction_handlers(new_smi);
2009        check_for_broken_irqs(new_smi);
2010
2011        new_smi->waiting_msg = NULL;
2012        new_smi->curr_msg = NULL;
2013        atomic_set(&new_smi->req_events, 0);
2014        new_smi->run_to_completion = false;
2015        for (i = 0; i < SI_NUM_STATS; i++)
2016                atomic_set(&new_smi->stats[i], 0);
2017
2018        new_smi->interrupt_disabled = true;
2019        atomic_set(&new_smi->need_watch, 0);
2020
2021        rv = try_enable_event_buffer(new_smi);
2022        if (rv == 0)
2023                new_smi->has_event_buffer = true;
2024
2025        /*
2026         * Start clearing the flags before we enable interrupts or the
2027         * timer to avoid racing with the timer.
2028         */
2029        start_clear_flags(new_smi);
2030
2031        /*
2032         * IRQ is defined to be set when non-zero.  req_events will
2033         * cause a global flags check that will enable interrupts.
2034         */
2035        if (new_smi->io.irq) {
2036                new_smi->interrupt_disabled = false;
2037                atomic_set(&new_smi->req_events, 1);
2038        }
2039
2040        dev_set_drvdata(new_smi->io.dev, new_smi);
2041        rv = device_add_group(new_smi->io.dev, &ipmi_si_dev_attr_group);
2042        if (rv) {
2043                dev_err(new_smi->io.dev,
2044                        "Unable to add device attributes: error %d\n",
2045                        rv);
2046                goto out_err;
2047        }
2048        new_smi->dev_group_added = true;
2049
2050        rv = ipmi_register_smi(&handlers,
2051                               new_smi,
2052                               new_smi->io.dev,
2053                               new_smi->io.slave_addr);
2054        if (rv) {
2055                dev_err(new_smi->io.dev,
2056                        "Unable to register device: error %d\n",
2057                        rv);
2058                goto out_err;
2059        }
2060
2061        /* Don't increment till we know we have succeeded. */
2062        smi_num++;
2063
2064        dev_info(new_smi->io.dev, "IPMI %s interface initialized\n",
2065                 si_to_str[new_smi->io.si_type]);
2066
2067        WARN_ON(new_smi->io.dev->init_name != NULL);
2068
2069 out_err:
2070        if (rv && new_smi->io.io_cleanup) {
2071                new_smi->io.io_cleanup(&new_smi->io);
2072                new_smi->io.io_cleanup = NULL;
2073        }
2074
2075        return rv;
2076}
2077
2078static int __init init_ipmi_si(void)
2079{
2080        struct smi_info *e;
2081        enum ipmi_addr_src type = SI_INVALID;
2082
2083        if (initialized)
2084                return 0;
2085
2086        ipmi_hardcode_init();
2087
2088        pr_info("IPMI System Interface driver\n");
2089
2090        ipmi_si_platform_init();
2091
2092        ipmi_si_pci_init();
2093
2094        ipmi_si_parisc_init();
2095
2096        /* We prefer devices with interrupts, but in the case of a machine
2097           with multiple BMCs we assume that there will be several instances
2098           of a given type so if we succeed in registering a type then also
2099           try to register everything else of the same type */
2100        mutex_lock(&smi_infos_lock);
2101        list_for_each_entry(e, &smi_infos, link) {
2102                /* Try to register a device if it has an IRQ and we either
2103                   haven't successfully registered a device yet or this
2104                   device has the same type as one we successfully registered */
2105                if (e->io.irq && (!type || e->io.addr_source == type)) {
2106                        if (!try_smi_init(e)) {
2107                                type = e->io.addr_source;
2108                        }
2109                }
2110        }
2111
2112        /* type will only have been set if we successfully registered an si */
2113        if (type)
2114                goto skip_fallback_noirq;
2115
2116        /* Fall back to the preferred device */
2117
2118        list_for_each_entry(e, &smi_infos, link) {
2119                if (!e->io.irq && (!type || e->io.addr_source == type)) {
2120                        if (!try_smi_init(e)) {
2121                                type = e->io.addr_source;
2122                        }
2123                }
2124        }
2125
2126skip_fallback_noirq:
2127        initialized = true;
2128        mutex_unlock(&smi_infos_lock);
2129
2130        if (type)
2131                return 0;
2132
2133        mutex_lock(&smi_infos_lock);
2134        if (unload_when_empty && list_empty(&smi_infos)) {
2135                mutex_unlock(&smi_infos_lock);
2136                cleanup_ipmi_si();
2137                pr_warn("Unable to find any System Interface(s)\n");
2138                return -ENODEV;
2139        } else {
2140                mutex_unlock(&smi_infos_lock);
2141                return 0;
2142        }
2143}
2144module_init(init_ipmi_si);
2145
2146static void shutdown_smi(void *send_info)
2147{
2148        struct smi_info *smi_info = send_info;
2149
2150        if (smi_info->dev_group_added) {
2151                device_remove_group(smi_info->io.dev, &ipmi_si_dev_attr_group);
2152                smi_info->dev_group_added = false;
2153        }
2154        if (smi_info->io.dev)
2155                dev_set_drvdata(smi_info->io.dev, NULL);
2156
2157        /*
2158         * Make sure that interrupts, the timer and the thread are
2159         * stopped and will not run again.
2160         */
2161        smi_info->interrupt_disabled = true;
2162        if (smi_info->io.irq_cleanup) {
2163                smi_info->io.irq_cleanup(&smi_info->io);
2164                smi_info->io.irq_cleanup = NULL;
2165        }
2166        stop_timer_and_thread(smi_info);
2167
2168        /*
2169         * Wait until we know that we are out of any interrupt
2170         * handlers might have been running before we freed the
2171         * interrupt.
2172         */
2173        synchronize_rcu();
2174
2175        /*
2176         * Timeouts are stopped, now make sure the interrupts are off
2177         * in the BMC.  Note that timers and CPU interrupts are off,
2178         * so no need for locks.
2179         */
2180        while (smi_info->curr_msg || (smi_info->si_state != SI_NORMAL)) {
2181                poll(smi_info);
2182                schedule_timeout_uninterruptible(1);
2183        }
2184        if (smi_info->handlers)
2185                disable_si_irq(smi_info);
2186        while (smi_info->curr_msg || (smi_info->si_state != SI_NORMAL)) {
2187                poll(smi_info);
2188                schedule_timeout_uninterruptible(1);
2189        }
2190        if (smi_info->handlers)
2191                smi_info->handlers->cleanup(smi_info->si_sm);
2192
2193        if (smi_info->io.addr_source_cleanup) {
2194                smi_info->io.addr_source_cleanup(&smi_info->io);
2195                smi_info->io.addr_source_cleanup = NULL;
2196        }
2197        if (smi_info->io.io_cleanup) {
2198                smi_info->io.io_cleanup(&smi_info->io);
2199                smi_info->io.io_cleanup = NULL;
2200        }
2201
2202        kfree(smi_info->si_sm);
2203        smi_info->si_sm = NULL;
2204
2205        smi_info->intf = NULL;
2206}
2207
2208/*
2209 * Must be called with smi_infos_lock held, to serialize the
2210 * smi_info->intf check.
2211 */
2212static void cleanup_one_si(struct smi_info *smi_info)
2213{
2214        if (!smi_info)
2215                return;
2216
2217        list_del(&smi_info->link);
2218
2219        if (smi_info->intf)
2220                ipmi_unregister_smi(smi_info->intf);
2221
2222        kfree(smi_info);
2223}
2224
2225int ipmi_si_remove_by_dev(struct device *dev)
2226{
2227        struct smi_info *e;
2228        int rv = -ENOENT;
2229
2230        mutex_lock(&smi_infos_lock);
2231        list_for_each_entry(e, &smi_infos, link) {
2232                if (e->io.dev == dev) {
2233                        cleanup_one_si(e);
2234                        rv = 0;
2235                        break;
2236                }
2237        }
2238        mutex_unlock(&smi_infos_lock);
2239
2240        return rv;
2241}
2242
2243struct device *ipmi_si_remove_by_data(int addr_space, enum si_type si_type,
2244                                      unsigned long addr)
2245{
2246        /* remove */
2247        struct smi_info *e, *tmp_e;
2248        struct device *dev = NULL;
2249
2250        mutex_lock(&smi_infos_lock);
2251        list_for_each_entry_safe(e, tmp_e, &smi_infos, link) {
2252                if (e->io.addr_space != addr_space)
2253                        continue;
2254                if (e->io.si_type != si_type)
2255                        continue;
2256                if (e->io.addr_data == addr) {
2257                        dev = get_device(e->io.dev);
2258                        cleanup_one_si(e);
2259                }
2260        }
2261        mutex_unlock(&smi_infos_lock);
2262
2263        return dev;
2264}
2265
2266static void cleanup_ipmi_si(void)
2267{
2268        struct smi_info *e, *tmp_e;
2269
2270        if (!initialized)
2271                return;
2272
2273        ipmi_si_pci_shutdown();
2274
2275        ipmi_si_parisc_shutdown();
2276
2277        ipmi_si_platform_shutdown();
2278
2279        mutex_lock(&smi_infos_lock);
2280        list_for_each_entry_safe(e, tmp_e, &smi_infos, link)
2281                cleanup_one_si(e);
2282        mutex_unlock(&smi_infos_lock);
2283
2284        ipmi_si_hardcode_exit();
2285        ipmi_si_hotmod_exit();
2286}
2287module_exit(cleanup_ipmi_si);
2288
2289MODULE_ALIAS("platform:dmi-ipmi-si");
2290MODULE_LICENSE("GPL");
2291MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
2292MODULE_DESCRIPTION("Interface to the IPMI driver for the KCS, SMIC, and BT"
2293                   " system interfaces.");
2294