linux/drivers/char/ipmi/ipmi_msghandler.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0+
   2/*
   3 * ipmi_msghandler.c
   4 *
   5 * Incoming and outgoing message routing for an IPMI interface.
   6 *
   7 * Author: MontaVista Software, Inc.
   8 *         Corey Minyard <minyard@mvista.com>
   9 *         source@mvista.com
  10 *
  11 * Copyright 2002 MontaVista Software Inc.
  12 */
  13
  14#define pr_fmt(fmt) "%s" fmt, "IPMI message handler: "
  15#define dev_fmt pr_fmt
  16
  17#include <linux/module.h>
  18#include <linux/errno.h>
  19#include <linux/poll.h>
  20#include <linux/sched.h>
  21#include <linux/seq_file.h>
  22#include <linux/spinlock.h>
  23#include <linux/mutex.h>
  24#include <linux/slab.h>
  25#include <linux/ipmi.h>
  26#include <linux/ipmi_smi.h>
  27#include <linux/notifier.h>
  28#include <linux/init.h>
  29#include <linux/proc_fs.h>
  30#include <linux/rcupdate.h>
  31#include <linux/interrupt.h>
  32#include <linux/moduleparam.h>
  33#include <linux/workqueue.h>
  34#include <linux/uuid.h>
  35#include <linux/nospec.h>
  36
  37#define IPMI_DRIVER_VERSION "39.2"
  38
  39static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void);
  40static int ipmi_init_msghandler(void);
  41static void smi_recv_tasklet(unsigned long);
  42static void handle_new_recv_msgs(struct ipmi_smi *intf);
  43static void need_waiter(struct ipmi_smi *intf);
  44static int handle_one_recv_msg(struct ipmi_smi *intf,
  45                               struct ipmi_smi_msg *msg);
  46
  47#ifdef DEBUG
  48static void ipmi_debug_msg(const char *title, unsigned char *data,
  49                           unsigned int len)
  50{
  51        int i, pos;
  52        char buf[100];
  53
  54        pos = snprintf(buf, sizeof(buf), "%s: ", title);
  55        for (i = 0; i < len; i++)
  56                pos += snprintf(buf + pos, sizeof(buf) - pos,
  57                                " %2.2x", data[i]);
  58        pr_debug("%s\n", buf);
  59}
  60#else
  61static void ipmi_debug_msg(const char *title, unsigned char *data,
  62                           unsigned int len)
  63{ }
  64#endif
  65
  66static bool initialized;
  67static bool drvregistered;
  68
  69enum ipmi_panic_event_op {
  70        IPMI_SEND_PANIC_EVENT_NONE,
  71        IPMI_SEND_PANIC_EVENT,
  72        IPMI_SEND_PANIC_EVENT_STRING
  73};
  74#ifdef CONFIG_IPMI_PANIC_STRING
  75#define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT_STRING
  76#elif defined(CONFIG_IPMI_PANIC_EVENT)
  77#define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT
  78#else
  79#define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT_NONE
  80#endif
  81static enum ipmi_panic_event_op ipmi_send_panic_event = IPMI_PANIC_DEFAULT;
  82
  83static int panic_op_write_handler(const char *val,
  84                                  const struct kernel_param *kp)
  85{
  86        char valcp[16];
  87        char *s;
  88
  89        strncpy(valcp, val, 15);
  90        valcp[15] = '\0';
  91
  92        s = strstrip(valcp);
  93
  94        if (strcmp(s, "none") == 0)
  95                ipmi_send_panic_event = IPMI_SEND_PANIC_EVENT_NONE;
  96        else if (strcmp(s, "event") == 0)
  97                ipmi_send_panic_event = IPMI_SEND_PANIC_EVENT;
  98        else if (strcmp(s, "string") == 0)
  99                ipmi_send_panic_event = IPMI_SEND_PANIC_EVENT_STRING;
 100        else
 101                return -EINVAL;
 102
 103        return 0;
 104}
 105
 106static int panic_op_read_handler(char *buffer, const struct kernel_param *kp)
 107{
 108        switch (ipmi_send_panic_event) {
 109        case IPMI_SEND_PANIC_EVENT_NONE:
 110                strcpy(buffer, "none");
 111                break;
 112
 113        case IPMI_SEND_PANIC_EVENT:
 114                strcpy(buffer, "event");
 115                break;
 116
 117        case IPMI_SEND_PANIC_EVENT_STRING:
 118                strcpy(buffer, "string");
 119                break;
 120
 121        default:
 122                strcpy(buffer, "???");
 123                break;
 124        }
 125
 126        return strlen(buffer);
 127}
 128
 129static const struct kernel_param_ops panic_op_ops = {
 130        .set = panic_op_write_handler,
 131        .get = panic_op_read_handler
 132};
 133module_param_cb(panic_op, &panic_op_ops, NULL, 0600);
 134MODULE_PARM_DESC(panic_op, "Sets if the IPMI driver will attempt to store panic information in the event log in the event of a panic.  Set to 'none' for no, 'event' for a single event, or 'string' for a generic event and the panic string in IPMI OEM events.");
 135
 136
 137#define MAX_EVENTS_IN_QUEUE     25
 138
 139/* Remain in auto-maintenance mode for this amount of time (in ms). */
 140static unsigned long maintenance_mode_timeout_ms = 30000;
 141module_param(maintenance_mode_timeout_ms, ulong, 0644);
 142MODULE_PARM_DESC(maintenance_mode_timeout_ms,
 143                 "The time (milliseconds) after the last maintenance message that the connection stays in maintenance mode.");
 144
 145/*
 146 * Don't let a message sit in a queue forever, always time it with at lest
 147 * the max message timer.  This is in milliseconds.
 148 */
 149#define MAX_MSG_TIMEOUT         60000
 150
 151/*
 152 * Timeout times below are in milliseconds, and are done off a 1
 153 * second timer.  So setting the value to 1000 would mean anything
 154 * between 0 and 1000ms.  So really the only reasonable minimum
 155 * setting it 2000ms, which is between 1 and 2 seconds.
 156 */
 157
 158/* The default timeout for message retries. */
 159static unsigned long default_retry_ms = 2000;
 160module_param(default_retry_ms, ulong, 0644);
 161MODULE_PARM_DESC(default_retry_ms,
 162                 "The time (milliseconds) between retry sends");
 163
 164/* The default timeout for maintenance mode message retries. */
 165static unsigned long default_maintenance_retry_ms = 3000;
 166module_param(default_maintenance_retry_ms, ulong, 0644);
 167MODULE_PARM_DESC(default_maintenance_retry_ms,
 168                 "The time (milliseconds) between retry sends in maintenance mode");
 169
 170/* The default maximum number of retries */
 171static unsigned int default_max_retries = 4;
 172module_param(default_max_retries, uint, 0644);
 173MODULE_PARM_DESC(default_max_retries,
 174                 "The time (milliseconds) between retry sends in maintenance mode");
 175
 176/* Call every ~1000 ms. */
 177#define IPMI_TIMEOUT_TIME       1000
 178
 179/* How many jiffies does it take to get to the timeout time. */
 180#define IPMI_TIMEOUT_JIFFIES    ((IPMI_TIMEOUT_TIME * HZ) / 1000)
 181
 182/*
 183 * Request events from the queue every second (this is the number of
 184 * IPMI_TIMEOUT_TIMES between event requests).  Hopefully, in the
 185 * future, IPMI will add a way to know immediately if an event is in
 186 * the queue and this silliness can go away.
 187 */
 188#define IPMI_REQUEST_EV_TIME    (1000 / (IPMI_TIMEOUT_TIME))
 189
 190/* How long should we cache dynamic device IDs? */
 191#define IPMI_DYN_DEV_ID_EXPIRY  (10 * HZ)
 192
 193/*
 194 * The main "user" data structure.
 195 */
 196struct ipmi_user {
 197        struct list_head link;
 198
 199        /*
 200         * Set to NULL when the user is destroyed, a pointer to myself
 201         * so srcu_dereference can be used on it.
 202         */
 203        struct ipmi_user *self;
 204        struct srcu_struct release_barrier;
 205
 206        struct kref refcount;
 207
 208        /* The upper layer that handles receive messages. */
 209        const struct ipmi_user_hndl *handler;
 210        void             *handler_data;
 211
 212        /* The interface this user is bound to. */
 213        struct ipmi_smi *intf;
 214
 215        /* Does this interface receive IPMI events? */
 216        bool gets_events;
 217
 218        /* Free must run in process context for RCU cleanup. */
 219        struct work_struct remove_work;
 220};
 221
 222static struct ipmi_user *acquire_ipmi_user(struct ipmi_user *user, int *index)
 223        __acquires(user->release_barrier)
 224{
 225        struct ipmi_user *ruser;
 226
 227        *index = srcu_read_lock(&user->release_barrier);
 228        ruser = srcu_dereference(user->self, &user->release_barrier);
 229        if (!ruser)
 230                srcu_read_unlock(&user->release_barrier, *index);
 231        return ruser;
 232}
 233
 234static void release_ipmi_user(struct ipmi_user *user, int index)
 235{
 236        srcu_read_unlock(&user->release_barrier, index);
 237}
 238
 239struct cmd_rcvr {
 240        struct list_head link;
 241
 242        struct ipmi_user *user;
 243        unsigned char netfn;
 244        unsigned char cmd;
 245        unsigned int  chans;
 246
 247        /*
 248         * This is used to form a linked lised during mass deletion.
 249         * Since this is in an RCU list, we cannot use the link above
 250         * or change any data until the RCU period completes.  So we
 251         * use this next variable during mass deletion so we can have
 252         * a list and don't have to wait and restart the search on
 253         * every individual deletion of a command.
 254         */
 255        struct cmd_rcvr *next;
 256};
 257
 258struct seq_table {
 259        unsigned int         inuse : 1;
 260        unsigned int         broadcast : 1;
 261
 262        unsigned long        timeout;
 263        unsigned long        orig_timeout;
 264        unsigned int         retries_left;
 265
 266        /*
 267         * To verify on an incoming send message response that this is
 268         * the message that the response is for, we keep a sequence id
 269         * and increment it every time we send a message.
 270         */
 271        long                 seqid;
 272
 273        /*
 274         * This is held so we can properly respond to the message on a
 275         * timeout, and it is used to hold the temporary data for
 276         * retransmission, too.
 277         */
 278        struct ipmi_recv_msg *recv_msg;
 279};
 280
 281/*
 282 * Store the information in a msgid (long) to allow us to find a
 283 * sequence table entry from the msgid.
 284 */
 285#define STORE_SEQ_IN_MSGID(seq, seqid) \
 286        ((((seq) & 0x3f) << 26) | ((seqid) & 0x3ffffff))
 287
 288#define GET_SEQ_FROM_MSGID(msgid, seq, seqid) \
 289        do {                                                            \
 290                seq = (((msgid) >> 26) & 0x3f);                         \
 291                seqid = ((msgid) & 0x3ffffff);                          \
 292        } while (0)
 293
 294#define NEXT_SEQID(seqid) (((seqid) + 1) & 0x3ffffff)
 295
 296#define IPMI_MAX_CHANNELS       16
 297struct ipmi_channel {
 298        unsigned char medium;
 299        unsigned char protocol;
 300};
 301
 302struct ipmi_channel_set {
 303        struct ipmi_channel c[IPMI_MAX_CHANNELS];
 304};
 305
 306struct ipmi_my_addrinfo {
 307        /*
 308         * My slave address.  This is initialized to IPMI_BMC_SLAVE_ADDR,
 309         * but may be changed by the user.
 310         */
 311        unsigned char address;
 312
 313        /*
 314         * My LUN.  This should generally stay the SMS LUN, but just in
 315         * case...
 316         */
 317        unsigned char lun;
 318};
 319
 320/*
 321 * Note that the product id, manufacturer id, guid, and device id are
 322 * immutable in this structure, so dyn_mutex is not required for
 323 * accessing those.  If those change on a BMC, a new BMC is allocated.
 324 */
 325struct bmc_device {
 326        struct platform_device pdev;
 327        struct list_head       intfs; /* Interfaces on this BMC. */
 328        struct ipmi_device_id  id;
 329        struct ipmi_device_id  fetch_id;
 330        int                    dyn_id_set;
 331        unsigned long          dyn_id_expiry;
 332        struct mutex           dyn_mutex; /* Protects id, intfs, & dyn* */
 333        guid_t                 guid;
 334        guid_t                 fetch_guid;
 335        int                    dyn_guid_set;
 336        struct kref            usecount;
 337        struct work_struct     remove_work;
 338};
 339#define to_bmc_device(x) container_of((x), struct bmc_device, pdev.dev)
 340
 341static int bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc,
 342                             struct ipmi_device_id *id,
 343                             bool *guid_set, guid_t *guid);
 344
 345/*
 346 * Various statistics for IPMI, these index stats[] in the ipmi_smi
 347 * structure.
 348 */
 349enum ipmi_stat_indexes {
 350        /* Commands we got from the user that were invalid. */
 351        IPMI_STAT_sent_invalid_commands = 0,
 352
 353        /* Commands we sent to the MC. */
 354        IPMI_STAT_sent_local_commands,
 355
 356        /* Responses from the MC that were delivered to a user. */
 357        IPMI_STAT_handled_local_responses,
 358
 359        /* Responses from the MC that were not delivered to a user. */
 360        IPMI_STAT_unhandled_local_responses,
 361
 362        /* Commands we sent out to the IPMB bus. */
 363        IPMI_STAT_sent_ipmb_commands,
 364
 365        /* Commands sent on the IPMB that had errors on the SEND CMD */
 366        IPMI_STAT_sent_ipmb_command_errs,
 367
 368        /* Each retransmit increments this count. */
 369        IPMI_STAT_retransmitted_ipmb_commands,
 370
 371        /*
 372         * When a message times out (runs out of retransmits) this is
 373         * incremented.
 374         */
 375        IPMI_STAT_timed_out_ipmb_commands,
 376
 377        /*
 378         * This is like above, but for broadcasts.  Broadcasts are
 379         * *not* included in the above count (they are expected to
 380         * time out).
 381         */
 382        IPMI_STAT_timed_out_ipmb_broadcasts,
 383
 384        /* Responses I have sent to the IPMB bus. */
 385        IPMI_STAT_sent_ipmb_responses,
 386
 387        /* The response was delivered to the user. */
 388        IPMI_STAT_handled_ipmb_responses,
 389
 390        /* The response had invalid data in it. */
 391        IPMI_STAT_invalid_ipmb_responses,
 392
 393        /* The response didn't have anyone waiting for it. */
 394        IPMI_STAT_unhandled_ipmb_responses,
 395
 396        /* Commands we sent out to the IPMB bus. */
 397        IPMI_STAT_sent_lan_commands,
 398
 399        /* Commands sent on the IPMB that had errors on the SEND CMD */
 400        IPMI_STAT_sent_lan_command_errs,
 401
 402        /* Each retransmit increments this count. */
 403        IPMI_STAT_retransmitted_lan_commands,
 404
 405        /*
 406         * When a message times out (runs out of retransmits) this is
 407         * incremented.
 408         */
 409        IPMI_STAT_timed_out_lan_commands,
 410
 411        /* Responses I have sent to the IPMB bus. */
 412        IPMI_STAT_sent_lan_responses,
 413
 414        /* The response was delivered to the user. */
 415        IPMI_STAT_handled_lan_responses,
 416
 417        /* The response had invalid data in it. */
 418        IPMI_STAT_invalid_lan_responses,
 419
 420        /* The response didn't have anyone waiting for it. */
 421        IPMI_STAT_unhandled_lan_responses,
 422
 423        /* The command was delivered to the user. */
 424        IPMI_STAT_handled_commands,
 425
 426        /* The command had invalid data in it. */
 427        IPMI_STAT_invalid_commands,
 428
 429        /* The command didn't have anyone waiting for it. */
 430        IPMI_STAT_unhandled_commands,
 431
 432        /* Invalid data in an event. */
 433        IPMI_STAT_invalid_events,
 434
 435        /* Events that were received with the proper format. */
 436        IPMI_STAT_events,
 437
 438        /* Retransmissions on IPMB that failed. */
 439        IPMI_STAT_dropped_rexmit_ipmb_commands,
 440
 441        /* Retransmissions on LAN that failed. */
 442        IPMI_STAT_dropped_rexmit_lan_commands,
 443
 444        /* This *must* remain last, add new values above this. */
 445        IPMI_NUM_STATS
 446};
 447
 448
 449#define IPMI_IPMB_NUM_SEQ       64
 450struct ipmi_smi {
 451        /* What interface number are we? */
 452        int intf_num;
 453
 454        struct kref refcount;
 455
 456        /* Set when the interface is being unregistered. */
 457        bool in_shutdown;
 458
 459        /* Used for a list of interfaces. */
 460        struct list_head link;
 461
 462        /*
 463         * The list of upper layers that are using me.  seq_lock write
 464         * protects this.  Read protection is with srcu.
 465         */
 466        struct list_head users;
 467        struct srcu_struct users_srcu;
 468
 469        /* Used for wake ups at startup. */
 470        wait_queue_head_t waitq;
 471
 472        /*
 473         * Prevents the interface from being unregistered when the
 474         * interface is used by being looked up through the BMC
 475         * structure.
 476         */
 477        struct mutex bmc_reg_mutex;
 478
 479        struct bmc_device tmp_bmc;
 480        struct bmc_device *bmc;
 481        bool bmc_registered;
 482        struct list_head bmc_link;
 483        char *my_dev_name;
 484        bool in_bmc_register;  /* Handle recursive situations.  Yuck. */
 485        struct work_struct bmc_reg_work;
 486
 487        const struct ipmi_smi_handlers *handlers;
 488        void                     *send_info;
 489
 490        /* Driver-model device for the system interface. */
 491        struct device          *si_dev;
 492
 493        /*
 494         * A table of sequence numbers for this interface.  We use the
 495         * sequence numbers for IPMB messages that go out of the
 496         * interface to match them up with their responses.  A routine
 497         * is called periodically to time the items in this list.
 498         */
 499        spinlock_t       seq_lock;
 500        struct seq_table seq_table[IPMI_IPMB_NUM_SEQ];
 501        int curr_seq;
 502
 503        /*
 504         * Messages queued for delivery.  If delivery fails (out of memory
 505         * for instance), They will stay in here to be processed later in a
 506         * periodic timer interrupt.  The tasklet is for handling received
 507         * messages directly from the handler.
 508         */
 509        spinlock_t       waiting_rcv_msgs_lock;
 510        struct list_head waiting_rcv_msgs;
 511        atomic_t         watchdog_pretimeouts_to_deliver;
 512        struct tasklet_struct recv_tasklet;
 513
 514        spinlock_t             xmit_msgs_lock;
 515        struct list_head       xmit_msgs;
 516        struct ipmi_smi_msg    *curr_msg;
 517        struct list_head       hp_xmit_msgs;
 518
 519        /*
 520         * The list of command receivers that are registered for commands
 521         * on this interface.
 522         */
 523        struct mutex     cmd_rcvrs_mutex;
 524        struct list_head cmd_rcvrs;
 525
 526        /*
 527         * Events that were queues because no one was there to receive
 528         * them.
 529         */
 530        spinlock_t       events_lock; /* For dealing with event stuff. */
 531        struct list_head waiting_events;
 532        unsigned int     waiting_events_count; /* How many events in queue? */
 533        char             delivering_events;
 534        char             event_msg_printed;
 535
 536        /* How many users are waiting for events? */
 537        atomic_t         event_waiters;
 538        unsigned int     ticks_to_req_ev;
 539
 540        spinlock_t       watch_lock; /* For dealing with watch stuff below. */
 541
 542        /* How many users are waiting for commands? */
 543        unsigned int     command_waiters;
 544
 545        /* How many users are waiting for watchdogs? */
 546        unsigned int     watchdog_waiters;
 547
 548        /* How many users are waiting for message responses? */
 549        unsigned int     response_waiters;
 550
 551        /*
 552         * Tells what the lower layer has last been asked to watch for,
 553         * messages and/or watchdogs.  Protected by watch_lock.
 554         */
 555        unsigned int     last_watch_mask;
 556
 557        /*
 558         * The event receiver for my BMC, only really used at panic
 559         * shutdown as a place to store this.
 560         */
 561        unsigned char event_receiver;
 562        unsigned char event_receiver_lun;
 563        unsigned char local_sel_device;
 564        unsigned char local_event_generator;
 565
 566        /* For handling of maintenance mode. */
 567        int maintenance_mode;
 568        bool maintenance_mode_enable;
 569        int auto_maintenance_timeout;
 570        spinlock_t maintenance_mode_lock; /* Used in a timer... */
 571
 572        /*
 573         * If we are doing maintenance on something on IPMB, extend
 574         * the timeout time to avoid timeouts writing firmware and
 575         * such.
 576         */
 577        int ipmb_maintenance_mode_timeout;
 578
 579        /*
 580         * A cheap hack, if this is non-null and a message to an
 581         * interface comes in with a NULL user, call this routine with
 582         * it.  Note that the message will still be freed by the
 583         * caller.  This only works on the system interface.
 584         *
 585         * Protected by bmc_reg_mutex.
 586         */
 587        void (*null_user_handler)(struct ipmi_smi *intf,
 588                                  struct ipmi_recv_msg *msg);
 589
 590        /*
 591         * When we are scanning the channels for an SMI, this will
 592         * tell which channel we are scanning.
 593         */
 594        int curr_channel;
 595
 596        /* Channel information */
 597        struct ipmi_channel_set *channel_list;
 598        unsigned int curr_working_cset; /* First index into the following. */
 599        struct ipmi_channel_set wchannels[2];
 600        struct ipmi_my_addrinfo addrinfo[IPMI_MAX_CHANNELS];
 601        bool channels_ready;
 602
 603        atomic_t stats[IPMI_NUM_STATS];
 604
 605        /*
 606         * run_to_completion duplicate of smb_info, smi_info
 607         * and ipmi_serial_info structures. Used to decrease numbers of
 608         * parameters passed by "low" level IPMI code.
 609         */
 610        int run_to_completion;
 611};
 612#define to_si_intf_from_dev(device) container_of(device, struct ipmi_smi, dev)
 613
 614static void __get_guid(struct ipmi_smi *intf);
 615static void __ipmi_bmc_unregister(struct ipmi_smi *intf);
 616static int __ipmi_bmc_register(struct ipmi_smi *intf,
 617                               struct ipmi_device_id *id,
 618                               bool guid_set, guid_t *guid, int intf_num);
 619static int __scan_channels(struct ipmi_smi *intf, struct ipmi_device_id *id);
 620
 621
 622/**
 623 * The driver model view of the IPMI messaging driver.
 624 */
 625static struct platform_driver ipmidriver = {
 626        .driver = {
 627                .name = "ipmi",
 628                .bus = &platform_bus_type
 629        }
 630};
 631/*
 632 * This mutex keeps us from adding the same BMC twice.
 633 */
 634static DEFINE_MUTEX(ipmidriver_mutex);
 635
 636static LIST_HEAD(ipmi_interfaces);
 637static DEFINE_MUTEX(ipmi_interfaces_mutex);
 638static struct srcu_struct ipmi_interfaces_srcu;
 639
 640/*
 641 * List of watchers that want to know when smi's are added and deleted.
 642 */
 643static LIST_HEAD(smi_watchers);
 644static DEFINE_MUTEX(smi_watchers_mutex);
 645
 646#define ipmi_inc_stat(intf, stat) \
 647        atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
 648#define ipmi_get_stat(intf, stat) \
 649        ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
 650
 651static const char * const addr_src_to_str[] = {
 652        "invalid", "hotmod", "hardcoded", "SPMI", "ACPI", "SMBIOS", "PCI",
 653        "device-tree", "platform"
 654};
 655
 656const char *ipmi_addr_src_to_str(enum ipmi_addr_src src)
 657{
 658        if (src >= SI_LAST)
 659                src = 0; /* Invalid */
 660        return addr_src_to_str[src];
 661}
 662EXPORT_SYMBOL(ipmi_addr_src_to_str);
 663
 664static int is_lan_addr(struct ipmi_addr *addr)
 665{
 666        return addr->addr_type == IPMI_LAN_ADDR_TYPE;
 667}
 668
 669static int is_ipmb_addr(struct ipmi_addr *addr)
 670{
 671        return addr->addr_type == IPMI_IPMB_ADDR_TYPE;
 672}
 673
 674static int is_ipmb_bcast_addr(struct ipmi_addr *addr)
 675{
 676        return addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE;
 677}
 678
 679static void free_recv_msg_list(struct list_head *q)
 680{
 681        struct ipmi_recv_msg *msg, *msg2;
 682
 683        list_for_each_entry_safe(msg, msg2, q, link) {
 684                list_del(&msg->link);
 685                ipmi_free_recv_msg(msg);
 686        }
 687}
 688
 689static void free_smi_msg_list(struct list_head *q)
 690{
 691        struct ipmi_smi_msg *msg, *msg2;
 692
 693        list_for_each_entry_safe(msg, msg2, q, link) {
 694                list_del(&msg->link);
 695                ipmi_free_smi_msg(msg);
 696        }
 697}
 698
 699static void clean_up_interface_data(struct ipmi_smi *intf)
 700{
 701        int              i;
 702        struct cmd_rcvr  *rcvr, *rcvr2;
 703        struct list_head list;
 704
 705        tasklet_kill(&intf->recv_tasklet);
 706
 707        free_smi_msg_list(&intf->waiting_rcv_msgs);
 708        free_recv_msg_list(&intf->waiting_events);
 709
 710        /*
 711         * Wholesale remove all the entries from the list in the
 712         * interface and wait for RCU to know that none are in use.
 713         */
 714        mutex_lock(&intf->cmd_rcvrs_mutex);
 715        INIT_LIST_HEAD(&list);
 716        list_splice_init_rcu(&intf->cmd_rcvrs, &list, synchronize_rcu);
 717        mutex_unlock(&intf->cmd_rcvrs_mutex);
 718
 719        list_for_each_entry_safe(rcvr, rcvr2, &list, link)
 720                kfree(rcvr);
 721
 722        for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
 723                if ((intf->seq_table[i].inuse)
 724                                        && (intf->seq_table[i].recv_msg))
 725                        ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
 726        }
 727}
 728
 729static void intf_free(struct kref *ref)
 730{
 731        struct ipmi_smi *intf = container_of(ref, struct ipmi_smi, refcount);
 732
 733        clean_up_interface_data(intf);
 734        kfree(intf);
 735}
 736
 737struct watcher_entry {
 738        int              intf_num;
 739        struct ipmi_smi  *intf;
 740        struct list_head link;
 741};
 742
 743int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher)
 744{
 745        struct ipmi_smi *intf;
 746        int index, rv;
 747
 748        /*
 749         * Make sure the driver is actually initialized, this handles
 750         * problems with initialization order.
 751         */
 752        rv = ipmi_init_msghandler();
 753        if (rv)
 754                return rv;
 755
 756        mutex_lock(&smi_watchers_mutex);
 757
 758        list_add(&watcher->link, &smi_watchers);
 759
 760        index = srcu_read_lock(&ipmi_interfaces_srcu);
 761        list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
 762                int intf_num = READ_ONCE(intf->intf_num);
 763
 764                if (intf_num == -1)
 765                        continue;
 766                watcher->new_smi(intf_num, intf->si_dev);
 767        }
 768        srcu_read_unlock(&ipmi_interfaces_srcu, index);
 769
 770        mutex_unlock(&smi_watchers_mutex);
 771
 772        return 0;
 773}
 774EXPORT_SYMBOL(ipmi_smi_watcher_register);
 775
 776int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher)
 777{
 778        mutex_lock(&smi_watchers_mutex);
 779        list_del(&watcher->link);
 780        mutex_unlock(&smi_watchers_mutex);
 781        return 0;
 782}
 783EXPORT_SYMBOL(ipmi_smi_watcher_unregister);
 784
 785/*
 786 * Must be called with smi_watchers_mutex held.
 787 */
 788static void
 789call_smi_watchers(int i, struct device *dev)
 790{
 791        struct ipmi_smi_watcher *w;
 792
 793        mutex_lock(&smi_watchers_mutex);
 794        list_for_each_entry(w, &smi_watchers, link) {
 795                if (try_module_get(w->owner)) {
 796                        w->new_smi(i, dev);
 797                        module_put(w->owner);
 798                }
 799        }
 800        mutex_unlock(&smi_watchers_mutex);
 801}
 802
 803static int
 804ipmi_addr_equal(struct ipmi_addr *addr1, struct ipmi_addr *addr2)
 805{
 806        if (addr1->addr_type != addr2->addr_type)
 807                return 0;
 808
 809        if (addr1->channel != addr2->channel)
 810                return 0;
 811
 812        if (addr1->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
 813                struct ipmi_system_interface_addr *smi_addr1
 814                    = (struct ipmi_system_interface_addr *) addr1;
 815                struct ipmi_system_interface_addr *smi_addr2
 816                    = (struct ipmi_system_interface_addr *) addr2;
 817                return (smi_addr1->lun == smi_addr2->lun);
 818        }
 819
 820        if (is_ipmb_addr(addr1) || is_ipmb_bcast_addr(addr1)) {
 821                struct ipmi_ipmb_addr *ipmb_addr1
 822                    = (struct ipmi_ipmb_addr *) addr1;
 823                struct ipmi_ipmb_addr *ipmb_addr2
 824                    = (struct ipmi_ipmb_addr *) addr2;
 825
 826                return ((ipmb_addr1->slave_addr == ipmb_addr2->slave_addr)
 827                        && (ipmb_addr1->lun == ipmb_addr2->lun));
 828        }
 829
 830        if (is_lan_addr(addr1)) {
 831                struct ipmi_lan_addr *lan_addr1
 832                        = (struct ipmi_lan_addr *) addr1;
 833                struct ipmi_lan_addr *lan_addr2
 834                    = (struct ipmi_lan_addr *) addr2;
 835
 836                return ((lan_addr1->remote_SWID == lan_addr2->remote_SWID)
 837                        && (lan_addr1->local_SWID == lan_addr2->local_SWID)
 838                        && (lan_addr1->session_handle
 839                            == lan_addr2->session_handle)
 840                        && (lan_addr1->lun == lan_addr2->lun));
 841        }
 842
 843        return 1;
 844}
 845
 846int ipmi_validate_addr(struct ipmi_addr *addr, int len)
 847{
 848        if (len < sizeof(struct ipmi_system_interface_addr))
 849                return -EINVAL;
 850
 851        if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
 852                if (addr->channel != IPMI_BMC_CHANNEL)
 853                        return -EINVAL;
 854                return 0;
 855        }
 856
 857        if ((addr->channel == IPMI_BMC_CHANNEL)
 858            || (addr->channel >= IPMI_MAX_CHANNELS)
 859            || (addr->channel < 0))
 860                return -EINVAL;
 861
 862        if (is_ipmb_addr(addr) || is_ipmb_bcast_addr(addr)) {
 863                if (len < sizeof(struct ipmi_ipmb_addr))
 864                        return -EINVAL;
 865                return 0;
 866        }
 867
 868        if (is_lan_addr(addr)) {
 869                if (len < sizeof(struct ipmi_lan_addr))
 870                        return -EINVAL;
 871                return 0;
 872        }
 873
 874        return -EINVAL;
 875}
 876EXPORT_SYMBOL(ipmi_validate_addr);
 877
 878unsigned int ipmi_addr_length(int addr_type)
 879{
 880        if (addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
 881                return sizeof(struct ipmi_system_interface_addr);
 882
 883        if ((addr_type == IPMI_IPMB_ADDR_TYPE)
 884                        || (addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
 885                return sizeof(struct ipmi_ipmb_addr);
 886
 887        if (addr_type == IPMI_LAN_ADDR_TYPE)
 888                return sizeof(struct ipmi_lan_addr);
 889
 890        return 0;
 891}
 892EXPORT_SYMBOL(ipmi_addr_length);
 893
 894static int deliver_response(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
 895{
 896        int rv = 0;
 897
 898        if (!msg->user) {
 899                /* Special handling for NULL users. */
 900                if (intf->null_user_handler) {
 901                        intf->null_user_handler(intf, msg);
 902                } else {
 903                        /* No handler, so give up. */
 904                        rv = -EINVAL;
 905                }
 906                ipmi_free_recv_msg(msg);
 907        } else if (!oops_in_progress) {
 908                /*
 909                 * If we are running in the panic context, calling the
 910                 * receive handler doesn't much meaning and has a deadlock
 911                 * risk.  At this moment, simply skip it in that case.
 912                 */
 913                int index;
 914                struct ipmi_user *user = acquire_ipmi_user(msg->user, &index);
 915
 916                if (user) {
 917                        user->handler->ipmi_recv_hndl(msg, user->handler_data);
 918                        release_ipmi_user(user, index);
 919                } else {
 920                        /* User went away, give up. */
 921                        ipmi_free_recv_msg(msg);
 922                        rv = -EINVAL;
 923                }
 924        }
 925
 926        return rv;
 927}
 928
 929static void deliver_local_response(struct ipmi_smi *intf,
 930                                   struct ipmi_recv_msg *msg)
 931{
 932        if (deliver_response(intf, msg))
 933                ipmi_inc_stat(intf, unhandled_local_responses);
 934        else
 935                ipmi_inc_stat(intf, handled_local_responses);
 936}
 937
 938static void deliver_err_response(struct ipmi_smi *intf,
 939                                 struct ipmi_recv_msg *msg, int err)
 940{
 941        msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
 942        msg->msg_data[0] = err;
 943        msg->msg.netfn |= 1; /* Convert to a response. */
 944        msg->msg.data_len = 1;
 945        msg->msg.data = msg->msg_data;
 946        deliver_local_response(intf, msg);
 947}
 948
 949static void smi_add_watch(struct ipmi_smi *intf, unsigned int flags)
 950{
 951        unsigned long iflags;
 952
 953        if (!intf->handlers->set_need_watch)
 954                return;
 955
 956        spin_lock_irqsave(&intf->watch_lock, iflags);
 957        if (flags & IPMI_WATCH_MASK_CHECK_MESSAGES)
 958                intf->response_waiters++;
 959
 960        if (flags & IPMI_WATCH_MASK_CHECK_WATCHDOG)
 961                intf->watchdog_waiters++;
 962
 963        if (flags & IPMI_WATCH_MASK_CHECK_COMMANDS)
 964                intf->command_waiters++;
 965
 966        if ((intf->last_watch_mask & flags) != flags) {
 967                intf->last_watch_mask |= flags;
 968                intf->handlers->set_need_watch(intf->send_info,
 969                                               intf->last_watch_mask);
 970        }
 971        spin_unlock_irqrestore(&intf->watch_lock, iflags);
 972}
 973
 974static void smi_remove_watch(struct ipmi_smi *intf, unsigned int flags)
 975{
 976        unsigned long iflags;
 977
 978        if (!intf->handlers->set_need_watch)
 979                return;
 980
 981        spin_lock_irqsave(&intf->watch_lock, iflags);
 982        if (flags & IPMI_WATCH_MASK_CHECK_MESSAGES)
 983                intf->response_waiters--;
 984
 985        if (flags & IPMI_WATCH_MASK_CHECK_WATCHDOG)
 986                intf->watchdog_waiters--;
 987
 988        if (flags & IPMI_WATCH_MASK_CHECK_COMMANDS)
 989                intf->command_waiters--;
 990
 991        flags = 0;
 992        if (intf->response_waiters)
 993                flags |= IPMI_WATCH_MASK_CHECK_MESSAGES;
 994        if (intf->watchdog_waiters)
 995                flags |= IPMI_WATCH_MASK_CHECK_WATCHDOG;
 996        if (intf->command_waiters)
 997                flags |= IPMI_WATCH_MASK_CHECK_COMMANDS;
 998
 999        if (intf->last_watch_mask != flags) {
1000                intf->last_watch_mask = flags;
1001                intf->handlers->set_need_watch(intf->send_info,
1002                                               intf->last_watch_mask);
1003        }
1004        spin_unlock_irqrestore(&intf->watch_lock, iflags);
1005}
1006
1007/*
1008 * Find the next sequence number not being used and add the given
1009 * message with the given timeout to the sequence table.  This must be
1010 * called with the interface's seq_lock held.
1011 */
1012static int intf_next_seq(struct ipmi_smi      *intf,
1013                         struct ipmi_recv_msg *recv_msg,
1014                         unsigned long        timeout,
1015                         int                  retries,
1016                         int                  broadcast,
1017                         unsigned char        *seq,
1018                         long                 *seqid)
1019{
1020        int          rv = 0;
1021        unsigned int i;
1022
1023        if (timeout == 0)
1024                timeout = default_retry_ms;
1025        if (retries < 0)
1026                retries = default_max_retries;
1027
1028        for (i = intf->curr_seq; (i+1)%IPMI_IPMB_NUM_SEQ != intf->curr_seq;
1029                                        i = (i+1)%IPMI_IPMB_NUM_SEQ) {
1030                if (!intf->seq_table[i].inuse)
1031                        break;
1032        }
1033
1034        if (!intf->seq_table[i].inuse) {
1035                intf->seq_table[i].recv_msg = recv_msg;
1036
1037                /*
1038                 * Start with the maximum timeout, when the send response
1039                 * comes in we will start the real timer.
1040                 */
1041                intf->seq_table[i].timeout = MAX_MSG_TIMEOUT;
1042                intf->seq_table[i].orig_timeout = timeout;
1043                intf->seq_table[i].retries_left = retries;
1044                intf->seq_table[i].broadcast = broadcast;
1045                intf->seq_table[i].inuse = 1;
1046                intf->seq_table[i].seqid = NEXT_SEQID(intf->seq_table[i].seqid);
1047                *seq = i;
1048                *seqid = intf->seq_table[i].seqid;
1049                intf->curr_seq = (i+1)%IPMI_IPMB_NUM_SEQ;
1050                smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES);
1051                need_waiter(intf);
1052        } else {
1053                rv = -EAGAIN;
1054        }
1055
1056        return rv;
1057}
1058
1059/*
1060 * Return the receive message for the given sequence number and
1061 * release the sequence number so it can be reused.  Some other data
1062 * is passed in to be sure the message matches up correctly (to help
1063 * guard against message coming in after their timeout and the
1064 * sequence number being reused).
1065 */
1066static int intf_find_seq(struct ipmi_smi      *intf,
1067                         unsigned char        seq,
1068                         short                channel,
1069                         unsigned char        cmd,
1070                         unsigned char        netfn,
1071                         struct ipmi_addr     *addr,
1072                         struct ipmi_recv_msg **recv_msg)
1073{
1074        int           rv = -ENODEV;
1075        unsigned long flags;
1076
1077        if (seq >= IPMI_IPMB_NUM_SEQ)
1078                return -EINVAL;
1079
1080        spin_lock_irqsave(&intf->seq_lock, flags);
1081        if (intf->seq_table[seq].inuse) {
1082                struct ipmi_recv_msg *msg = intf->seq_table[seq].recv_msg;
1083
1084                if ((msg->addr.channel == channel) && (msg->msg.cmd == cmd)
1085                                && (msg->msg.netfn == netfn)
1086                                && (ipmi_addr_equal(addr, &msg->addr))) {
1087                        *recv_msg = msg;
1088                        intf->seq_table[seq].inuse = 0;
1089                        smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES);
1090                        rv = 0;
1091                }
1092        }
1093        spin_unlock_irqrestore(&intf->seq_lock, flags);
1094
1095        return rv;
1096}
1097
1098
1099/* Start the timer for a specific sequence table entry. */
1100static int intf_start_seq_timer(struct ipmi_smi *intf,
1101                                long       msgid)
1102{
1103        int           rv = -ENODEV;
1104        unsigned long flags;
1105        unsigned char seq;
1106        unsigned long seqid;
1107
1108
1109        GET_SEQ_FROM_MSGID(msgid, seq, seqid);
1110
1111        spin_lock_irqsave(&intf->seq_lock, flags);
1112        /*
1113         * We do this verification because the user can be deleted
1114         * while a message is outstanding.
1115         */
1116        if ((intf->seq_table[seq].inuse)
1117                                && (intf->seq_table[seq].seqid == seqid)) {
1118                struct seq_table *ent = &intf->seq_table[seq];
1119                ent->timeout = ent->orig_timeout;
1120                rv = 0;
1121        }
1122        spin_unlock_irqrestore(&intf->seq_lock, flags);
1123
1124        return rv;
1125}
1126
1127/* Got an error for the send message for a specific sequence number. */
1128static int intf_err_seq(struct ipmi_smi *intf,
1129                        long         msgid,
1130                        unsigned int err)
1131{
1132        int                  rv = -ENODEV;
1133        unsigned long        flags;
1134        unsigned char        seq;
1135        unsigned long        seqid;
1136        struct ipmi_recv_msg *msg = NULL;
1137
1138
1139        GET_SEQ_FROM_MSGID(msgid, seq, seqid);
1140
1141        spin_lock_irqsave(&intf->seq_lock, flags);
1142        /*
1143         * We do this verification because the user can be deleted
1144         * while a message is outstanding.
1145         */
1146        if ((intf->seq_table[seq].inuse)
1147                                && (intf->seq_table[seq].seqid == seqid)) {
1148                struct seq_table *ent = &intf->seq_table[seq];
1149
1150                ent->inuse = 0;
1151                smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES);
1152                msg = ent->recv_msg;
1153                rv = 0;
1154        }
1155        spin_unlock_irqrestore(&intf->seq_lock, flags);
1156
1157        if (msg)
1158                deliver_err_response(intf, msg, err);
1159
1160        return rv;
1161}
1162
1163static void free_user_work(struct work_struct *work)
1164{
1165        struct ipmi_user *user = container_of(work, struct ipmi_user,
1166                                              remove_work);
1167
1168        cleanup_srcu_struct(&user->release_barrier);
1169        kfree(user);
1170}
1171
1172int ipmi_create_user(unsigned int          if_num,
1173                     const struct ipmi_user_hndl *handler,
1174                     void                  *handler_data,
1175                     struct ipmi_user      **user)
1176{
1177        unsigned long flags;
1178        struct ipmi_user *new_user;
1179        int           rv, index;
1180        struct ipmi_smi *intf;
1181
1182        /*
1183         * There is no module usecount here, because it's not
1184         * required.  Since this can only be used by and called from
1185         * other modules, they will implicitly use this module, and
1186         * thus this can't be removed unless the other modules are
1187         * removed.
1188         */
1189
1190        if (handler == NULL)
1191                return -EINVAL;
1192
1193        /*
1194         * Make sure the driver is actually initialized, this handles
1195         * problems with initialization order.
1196         */
1197        rv = ipmi_init_msghandler();
1198        if (rv)
1199                return rv;
1200
1201        new_user = kmalloc(sizeof(*new_user), GFP_KERNEL);
1202        if (!new_user)
1203                return -ENOMEM;
1204
1205        index = srcu_read_lock(&ipmi_interfaces_srcu);
1206        list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
1207                if (intf->intf_num == if_num)
1208                        goto found;
1209        }
1210        /* Not found, return an error */
1211        rv = -EINVAL;
1212        goto out_kfree;
1213
1214 found:
1215        INIT_WORK(&new_user->remove_work, free_user_work);
1216
1217        rv = init_srcu_struct(&new_user->release_barrier);
1218        if (rv)
1219                goto out_kfree;
1220
1221        /* Note that each existing user holds a refcount to the interface. */
1222        kref_get(&intf->refcount);
1223
1224        kref_init(&new_user->refcount);
1225        new_user->handler = handler;
1226        new_user->handler_data = handler_data;
1227        new_user->intf = intf;
1228        new_user->gets_events = false;
1229
1230        rcu_assign_pointer(new_user->self, new_user);
1231        spin_lock_irqsave(&intf->seq_lock, flags);
1232        list_add_rcu(&new_user->link, &intf->users);
1233        spin_unlock_irqrestore(&intf->seq_lock, flags);
1234        if (handler->ipmi_watchdog_pretimeout)
1235                /* User wants pretimeouts, so make sure to watch for them. */
1236                smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_WATCHDOG);
1237        srcu_read_unlock(&ipmi_interfaces_srcu, index);
1238        *user = new_user;
1239        return 0;
1240
1241out_kfree:
1242        srcu_read_unlock(&ipmi_interfaces_srcu, index);
1243        kfree(new_user);
1244        return rv;
1245}
1246EXPORT_SYMBOL(ipmi_create_user);
1247
1248int ipmi_get_smi_info(int if_num, struct ipmi_smi_info *data)
1249{
1250        int rv, index;
1251        struct ipmi_smi *intf;
1252
1253        index = srcu_read_lock(&ipmi_interfaces_srcu);
1254        list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
1255                if (intf->intf_num == if_num)
1256                        goto found;
1257        }
1258        srcu_read_unlock(&ipmi_interfaces_srcu, index);
1259
1260        /* Not found, return an error */
1261        return -EINVAL;
1262
1263found:
1264        if (!intf->handlers->get_smi_info)
1265                rv = -ENOTTY;
1266        else
1267                rv = intf->handlers->get_smi_info(intf->send_info, data);
1268        srcu_read_unlock(&ipmi_interfaces_srcu, index);
1269
1270        return rv;
1271}
1272EXPORT_SYMBOL(ipmi_get_smi_info);
1273
1274static void free_user(struct kref *ref)
1275{
1276        struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount);
1277
1278        /* SRCU cleanup must happen in task context. */
1279        schedule_work(&user->remove_work);
1280}
1281
1282static void _ipmi_destroy_user(struct ipmi_user *user)
1283{
1284        struct ipmi_smi  *intf = user->intf;
1285        int              i;
1286        unsigned long    flags;
1287        struct cmd_rcvr  *rcvr;
1288        struct cmd_rcvr  *rcvrs = NULL;
1289
1290        if (!acquire_ipmi_user(user, &i)) {
1291                /*
1292                 * The user has already been cleaned up, just make sure
1293                 * nothing is using it and return.
1294                 */
1295                synchronize_srcu(&user->release_barrier);
1296                return;
1297        }
1298
1299        rcu_assign_pointer(user->self, NULL);
1300        release_ipmi_user(user, i);
1301
1302        synchronize_srcu(&user->release_barrier);
1303
1304        if (user->handler->shutdown)
1305                user->handler->shutdown(user->handler_data);
1306
1307        if (user->handler->ipmi_watchdog_pretimeout)
1308                smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_WATCHDOG);
1309
1310        if (user->gets_events)
1311                atomic_dec(&intf->event_waiters);
1312
1313        /* Remove the user from the interface's sequence table. */
1314        spin_lock_irqsave(&intf->seq_lock, flags);
1315        list_del_rcu(&user->link);
1316
1317        for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
1318                if (intf->seq_table[i].inuse
1319                    && (intf->seq_table[i].recv_msg->user == user)) {
1320                        intf->seq_table[i].inuse = 0;
1321                        smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES);
1322                        ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
1323                }
1324        }
1325        spin_unlock_irqrestore(&intf->seq_lock, flags);
1326
1327        /*
1328         * Remove the user from the command receiver's table.  First
1329         * we build a list of everything (not using the standard link,
1330         * since other things may be using it till we do
1331         * synchronize_srcu()) then free everything in that list.
1332         */
1333        mutex_lock(&intf->cmd_rcvrs_mutex);
1334        list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
1335                if (rcvr->user == user) {
1336                        list_del_rcu(&rcvr->link);
1337                        rcvr->next = rcvrs;
1338                        rcvrs = rcvr;
1339                }
1340        }
1341        mutex_unlock(&intf->cmd_rcvrs_mutex);
1342        synchronize_rcu();
1343        while (rcvrs) {
1344                rcvr = rcvrs;
1345                rcvrs = rcvr->next;
1346                kfree(rcvr);
1347        }
1348
1349        kref_put(&intf->refcount, intf_free);
1350}
1351
1352int ipmi_destroy_user(struct ipmi_user *user)
1353{
1354        _ipmi_destroy_user(user);
1355
1356        kref_put(&user->refcount, free_user);
1357
1358        return 0;
1359}
1360EXPORT_SYMBOL(ipmi_destroy_user);
1361
1362int ipmi_get_version(struct ipmi_user *user,
1363                     unsigned char *major,
1364                     unsigned char *minor)
1365{
1366        struct ipmi_device_id id;
1367        int rv, index;
1368
1369        user = acquire_ipmi_user(user, &index);
1370        if (!user)
1371                return -ENODEV;
1372
1373        rv = bmc_get_device_id(user->intf, NULL, &id, NULL, NULL);
1374        if (!rv) {
1375                *major = ipmi_version_major(&id);
1376                *minor = ipmi_version_minor(&id);
1377        }
1378        release_ipmi_user(user, index);
1379
1380        return rv;
1381}
1382EXPORT_SYMBOL(ipmi_get_version);
1383
1384int ipmi_set_my_address(struct ipmi_user *user,
1385                        unsigned int  channel,
1386                        unsigned char address)
1387{
1388        int index, rv = 0;
1389
1390        user = acquire_ipmi_user(user, &index);
1391        if (!user)
1392                return -ENODEV;
1393
1394        if (channel >= IPMI_MAX_CHANNELS) {
1395                rv = -EINVAL;
1396        } else {
1397                channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
1398                user->intf->addrinfo[channel].address = address;
1399        }
1400        release_ipmi_user(user, index);
1401
1402        return rv;
1403}
1404EXPORT_SYMBOL(ipmi_set_my_address);
1405
1406int ipmi_get_my_address(struct ipmi_user *user,
1407                        unsigned int  channel,
1408                        unsigned char *address)
1409{
1410        int index, rv = 0;
1411
1412        user = acquire_ipmi_user(user, &index);
1413        if (!user)
1414                return -ENODEV;
1415
1416        if (channel >= IPMI_MAX_CHANNELS) {
1417                rv = -EINVAL;
1418        } else {
1419                channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
1420                *address = user->intf->addrinfo[channel].address;
1421        }
1422        release_ipmi_user(user, index);
1423
1424        return rv;
1425}
1426EXPORT_SYMBOL(ipmi_get_my_address);
1427
1428int ipmi_set_my_LUN(struct ipmi_user *user,
1429                    unsigned int  channel,
1430                    unsigned char LUN)
1431{
1432        int index, rv = 0;
1433
1434        user = acquire_ipmi_user(user, &index);
1435        if (!user)
1436                return -ENODEV;
1437
1438        if (channel >= IPMI_MAX_CHANNELS) {
1439                rv = -EINVAL;
1440        } else {
1441                channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
1442                user->intf->addrinfo[channel].lun = LUN & 0x3;
1443        }
1444        release_ipmi_user(user, index);
1445
1446        return rv;
1447}
1448EXPORT_SYMBOL(ipmi_set_my_LUN);
1449
1450int ipmi_get_my_LUN(struct ipmi_user *user,
1451                    unsigned int  channel,
1452                    unsigned char *address)
1453{
1454        int index, rv = 0;
1455
1456        user = acquire_ipmi_user(user, &index);
1457        if (!user)
1458                return -ENODEV;
1459
1460        if (channel >= IPMI_MAX_CHANNELS) {
1461                rv = -EINVAL;
1462        } else {
1463                channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
1464                *address = user->intf->addrinfo[channel].lun;
1465        }
1466        release_ipmi_user(user, index);
1467
1468        return rv;
1469}
1470EXPORT_SYMBOL(ipmi_get_my_LUN);
1471
1472int ipmi_get_maintenance_mode(struct ipmi_user *user)
1473{
1474        int mode, index;
1475        unsigned long flags;
1476
1477        user = acquire_ipmi_user(user, &index);
1478        if (!user)
1479                return -ENODEV;
1480
1481        spin_lock_irqsave(&user->intf->maintenance_mode_lock, flags);
1482        mode = user->intf->maintenance_mode;
1483        spin_unlock_irqrestore(&user->intf->maintenance_mode_lock, flags);
1484        release_ipmi_user(user, index);
1485
1486        return mode;
1487}
1488EXPORT_SYMBOL(ipmi_get_maintenance_mode);
1489
1490static void maintenance_mode_update(struct ipmi_smi *intf)
1491{
1492        if (intf->handlers->set_maintenance_mode)
1493                intf->handlers->set_maintenance_mode(
1494                        intf->send_info, intf->maintenance_mode_enable);
1495}
1496
1497int ipmi_set_maintenance_mode(struct ipmi_user *user, int mode)
1498{
1499        int rv = 0, index;
1500        unsigned long flags;
1501        struct ipmi_smi *intf = user->intf;
1502
1503        user = acquire_ipmi_user(user, &index);
1504        if (!user)
1505                return -ENODEV;
1506
1507        spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
1508        if (intf->maintenance_mode != mode) {
1509                switch (mode) {
1510                case IPMI_MAINTENANCE_MODE_AUTO:
1511                        intf->maintenance_mode_enable
1512                                = (intf->auto_maintenance_timeout > 0);
1513                        break;
1514
1515                case IPMI_MAINTENANCE_MODE_OFF:
1516                        intf->maintenance_mode_enable = false;
1517                        break;
1518
1519                case IPMI_MAINTENANCE_MODE_ON:
1520                        intf->maintenance_mode_enable = true;
1521                        break;
1522
1523                default:
1524                        rv = -EINVAL;
1525                        goto out_unlock;
1526                }
1527                intf->maintenance_mode = mode;
1528
1529                maintenance_mode_update(intf);
1530        }
1531 out_unlock:
1532        spin_unlock_irqrestore(&intf->maintenance_mode_lock, flags);
1533        release_ipmi_user(user, index);
1534
1535        return rv;
1536}
1537EXPORT_SYMBOL(ipmi_set_maintenance_mode);
1538
1539int ipmi_set_gets_events(struct ipmi_user *user, bool val)
1540{
1541        unsigned long        flags;
1542        struct ipmi_smi      *intf = user->intf;
1543        struct ipmi_recv_msg *msg, *msg2;
1544        struct list_head     msgs;
1545        int index;
1546
1547        user = acquire_ipmi_user(user, &index);
1548        if (!user)
1549                return -ENODEV;
1550
1551        INIT_LIST_HEAD(&msgs);
1552
1553        spin_lock_irqsave(&intf->events_lock, flags);
1554        if (user->gets_events == val)
1555                goto out;
1556
1557        user->gets_events = val;
1558
1559        if (val) {
1560                if (atomic_inc_return(&intf->event_waiters) == 1)
1561                        need_waiter(intf);
1562        } else {
1563                atomic_dec(&intf->event_waiters);
1564        }
1565
1566        if (intf->delivering_events)
1567                /*
1568                 * Another thread is delivering events for this, so
1569                 * let it handle any new events.
1570                 */
1571                goto out;
1572
1573        /* Deliver any queued events. */
1574        while (user->gets_events && !list_empty(&intf->waiting_events)) {
1575                list_for_each_entry_safe(msg, msg2, &intf->waiting_events, link)
1576                        list_move_tail(&msg->link, &msgs);
1577                intf->waiting_events_count = 0;
1578                if (intf->event_msg_printed) {
1579                        dev_warn(intf->si_dev, "Event queue no longer full\n");
1580                        intf->event_msg_printed = 0;
1581                }
1582
1583                intf->delivering_events = 1;
1584                spin_unlock_irqrestore(&intf->events_lock, flags);
1585
1586                list_for_each_entry_safe(msg, msg2, &msgs, link) {
1587                        msg->user = user;
1588                        kref_get(&user->refcount);
1589                        deliver_local_response(intf, msg);
1590                }
1591
1592                spin_lock_irqsave(&intf->events_lock, flags);
1593                intf->delivering_events = 0;
1594        }
1595
1596 out:
1597        spin_unlock_irqrestore(&intf->events_lock, flags);
1598        release_ipmi_user(user, index);
1599
1600        return 0;
1601}
1602EXPORT_SYMBOL(ipmi_set_gets_events);
1603
1604static struct cmd_rcvr *find_cmd_rcvr(struct ipmi_smi *intf,
1605                                      unsigned char netfn,
1606                                      unsigned char cmd,
1607                                      unsigned char chan)
1608{
1609        struct cmd_rcvr *rcvr;
1610
1611        list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
1612                if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)
1613                                        && (rcvr->chans & (1 << chan)))
1614                        return rcvr;
1615        }
1616        return NULL;
1617}
1618
1619static int is_cmd_rcvr_exclusive(struct ipmi_smi *intf,
1620                                 unsigned char netfn,
1621                                 unsigned char cmd,
1622                                 unsigned int  chans)
1623{
1624        struct cmd_rcvr *rcvr;
1625
1626        list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
1627                if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)
1628                                        && (rcvr->chans & chans))
1629                        return 0;
1630        }
1631        return 1;
1632}
1633
1634int ipmi_register_for_cmd(struct ipmi_user *user,
1635                          unsigned char netfn,
1636                          unsigned char cmd,
1637                          unsigned int  chans)
1638{
1639        struct ipmi_smi *intf = user->intf;
1640        struct cmd_rcvr *rcvr;
1641        int rv = 0, index;
1642
1643        user = acquire_ipmi_user(user, &index);
1644        if (!user)
1645                return -ENODEV;
1646
1647        rcvr = kmalloc(sizeof(*rcvr), GFP_KERNEL);
1648        if (!rcvr) {
1649                rv = -ENOMEM;
1650                goto out_release;
1651        }
1652        rcvr->cmd = cmd;
1653        rcvr->netfn = netfn;
1654        rcvr->chans = chans;
1655        rcvr->user = user;
1656
1657        mutex_lock(&intf->cmd_rcvrs_mutex);
1658        /* Make sure the command/netfn is not already registered. */
1659        if (!is_cmd_rcvr_exclusive(intf, netfn, cmd, chans)) {
1660                rv = -EBUSY;
1661                goto out_unlock;
1662        }
1663
1664        smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_COMMANDS);
1665
1666        list_add_rcu(&rcvr->link, &intf->cmd_rcvrs);
1667
1668out_unlock:
1669        mutex_unlock(&intf->cmd_rcvrs_mutex);
1670        if (rv)
1671                kfree(rcvr);
1672out_release:
1673        release_ipmi_user(user, index);
1674
1675        return rv;
1676}
1677EXPORT_SYMBOL(ipmi_register_for_cmd);
1678
1679int ipmi_unregister_for_cmd(struct ipmi_user *user,
1680                            unsigned char netfn,
1681                            unsigned char cmd,
1682                            unsigned int  chans)
1683{
1684        struct ipmi_smi *intf = user->intf;
1685        struct cmd_rcvr *rcvr;
1686        struct cmd_rcvr *rcvrs = NULL;
1687        int i, rv = -ENOENT, index;
1688
1689        user = acquire_ipmi_user(user, &index);
1690        if (!user)
1691                return -ENODEV;
1692
1693        mutex_lock(&intf->cmd_rcvrs_mutex);
1694        for (i = 0; i < IPMI_NUM_CHANNELS; i++) {
1695                if (((1 << i) & chans) == 0)
1696                        continue;
1697                rcvr = find_cmd_rcvr(intf, netfn, cmd, i);
1698                if (rcvr == NULL)
1699                        continue;
1700                if (rcvr->user == user) {
1701                        rv = 0;
1702                        rcvr->chans &= ~chans;
1703                        if (rcvr->chans == 0) {
1704                                list_del_rcu(&rcvr->link);
1705                                rcvr->next = rcvrs;
1706                                rcvrs = rcvr;
1707                        }
1708                }
1709        }
1710        mutex_unlock(&intf->cmd_rcvrs_mutex);
1711        synchronize_rcu();
1712        release_ipmi_user(user, index);
1713        while (rcvrs) {
1714                smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_COMMANDS);
1715                rcvr = rcvrs;
1716                rcvrs = rcvr->next;
1717                kfree(rcvr);
1718        }
1719
1720        return rv;
1721}
1722EXPORT_SYMBOL(ipmi_unregister_for_cmd);
1723
1724static unsigned char
1725ipmb_checksum(unsigned char *data, int size)
1726{
1727        unsigned char csum = 0;
1728
1729        for (; size > 0; size--, data++)
1730                csum += *data;
1731
1732        return -csum;
1733}
1734
1735static inline void format_ipmb_msg(struct ipmi_smi_msg   *smi_msg,
1736                                   struct kernel_ipmi_msg *msg,
1737                                   struct ipmi_ipmb_addr *ipmb_addr,
1738                                   long                  msgid,
1739                                   unsigned char         ipmb_seq,
1740                                   int                   broadcast,
1741                                   unsigned char         source_address,
1742                                   unsigned char         source_lun)
1743{
1744        int i = broadcast;
1745
1746        /* Format the IPMB header data. */
1747        smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
1748        smi_msg->data[1] = IPMI_SEND_MSG_CMD;
1749        smi_msg->data[2] = ipmb_addr->channel;
1750        if (broadcast)
1751                smi_msg->data[3] = 0;
1752        smi_msg->data[i+3] = ipmb_addr->slave_addr;
1753        smi_msg->data[i+4] = (msg->netfn << 2) | (ipmb_addr->lun & 0x3);
1754        smi_msg->data[i+5] = ipmb_checksum(&smi_msg->data[i + 3], 2);
1755        smi_msg->data[i+6] = source_address;
1756        smi_msg->data[i+7] = (ipmb_seq << 2) | source_lun;
1757        smi_msg->data[i+8] = msg->cmd;
1758
1759        /* Now tack on the data to the message. */
1760        if (msg->data_len > 0)
1761                memcpy(&smi_msg->data[i + 9], msg->data, msg->data_len);
1762        smi_msg->data_size = msg->data_len + 9;
1763
1764        /* Now calculate the checksum and tack it on. */
1765        smi_msg->data[i+smi_msg->data_size]
1766                = ipmb_checksum(&smi_msg->data[i + 6], smi_msg->data_size - 6);
1767
1768        /*
1769         * Add on the checksum size and the offset from the
1770         * broadcast.
1771         */
1772        smi_msg->data_size += 1 + i;
1773
1774        smi_msg->msgid = msgid;
1775}
1776
1777static inline void format_lan_msg(struct ipmi_smi_msg   *smi_msg,
1778                                  struct kernel_ipmi_msg *msg,
1779                                  struct ipmi_lan_addr  *lan_addr,
1780                                  long                  msgid,
1781                                  unsigned char         ipmb_seq,
1782                                  unsigned char         source_lun)
1783{
1784        /* Format the IPMB header data. */
1785        smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
1786        smi_msg->data[1] = IPMI_SEND_MSG_CMD;
1787        smi_msg->data[2] = lan_addr->channel;
1788        smi_msg->data[3] = lan_addr->session_handle;
1789        smi_msg->data[4] = lan_addr->remote_SWID;
1790        smi_msg->data[5] = (msg->netfn << 2) | (lan_addr->lun & 0x3);
1791        smi_msg->data[6] = ipmb_checksum(&smi_msg->data[4], 2);
1792        smi_msg->data[7] = lan_addr->local_SWID;
1793        smi_msg->data[8] = (ipmb_seq << 2) | source_lun;
1794        smi_msg->data[9] = msg->cmd;
1795
1796        /* Now tack on the data to the message. */
1797        if (msg->data_len > 0)
1798                memcpy(&smi_msg->data[10], msg->data, msg->data_len);
1799        smi_msg->data_size = msg->data_len + 10;
1800
1801        /* Now calculate the checksum and tack it on. */
1802        smi_msg->data[smi_msg->data_size]
1803                = ipmb_checksum(&smi_msg->data[7], smi_msg->data_size - 7);
1804
1805        /*
1806         * Add on the checksum size and the offset from the
1807         * broadcast.
1808         */
1809        smi_msg->data_size += 1;
1810
1811        smi_msg->msgid = msgid;
1812}
1813
1814static struct ipmi_smi_msg *smi_add_send_msg(struct ipmi_smi *intf,
1815                                             struct ipmi_smi_msg *smi_msg,
1816                                             int priority)
1817{
1818        if (intf->curr_msg) {
1819                if (priority > 0)
1820                        list_add_tail(&smi_msg->link, &intf->hp_xmit_msgs);
1821                else
1822                        list_add_tail(&smi_msg->link, &intf->xmit_msgs);
1823                smi_msg = NULL;
1824        } else {
1825                intf->curr_msg = smi_msg;
1826        }
1827
1828        return smi_msg;
1829}
1830
1831static void smi_send(struct ipmi_smi *intf,
1832                     const struct ipmi_smi_handlers *handlers,
1833                     struct ipmi_smi_msg *smi_msg, int priority)
1834{
1835        int run_to_completion = intf->run_to_completion;
1836        unsigned long flags = 0;
1837
1838        if (!run_to_completion)
1839                spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
1840        smi_msg = smi_add_send_msg(intf, smi_msg, priority);
1841
1842        if (!run_to_completion)
1843                spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
1844
1845        if (smi_msg)
1846                handlers->sender(intf->send_info, smi_msg);
1847}
1848
1849static bool is_maintenance_mode_cmd(struct kernel_ipmi_msg *msg)
1850{
1851        return (((msg->netfn == IPMI_NETFN_APP_REQUEST)
1852                 && ((msg->cmd == IPMI_COLD_RESET_CMD)
1853                     || (msg->cmd == IPMI_WARM_RESET_CMD)))
1854                || (msg->netfn == IPMI_NETFN_FIRMWARE_REQUEST));
1855}
1856
1857static int i_ipmi_req_sysintf(struct ipmi_smi        *intf,
1858                              struct ipmi_addr       *addr,
1859                              long                   msgid,
1860                              struct kernel_ipmi_msg *msg,
1861                              struct ipmi_smi_msg    *smi_msg,
1862                              struct ipmi_recv_msg   *recv_msg,
1863                              int                    retries,
1864                              unsigned int           retry_time_ms)
1865{
1866        struct ipmi_system_interface_addr *smi_addr;
1867
1868        if (msg->netfn & 1)
1869                /* Responses are not allowed to the SMI. */
1870                return -EINVAL;
1871
1872        smi_addr = (struct ipmi_system_interface_addr *) addr;
1873        if (smi_addr->lun > 3) {
1874                ipmi_inc_stat(intf, sent_invalid_commands);
1875                return -EINVAL;
1876        }
1877
1878        memcpy(&recv_msg->addr, smi_addr, sizeof(*smi_addr));
1879
1880        if ((msg->netfn == IPMI_NETFN_APP_REQUEST)
1881            && ((msg->cmd == IPMI_SEND_MSG_CMD)
1882                || (msg->cmd == IPMI_GET_MSG_CMD)
1883                || (msg->cmd == IPMI_READ_EVENT_MSG_BUFFER_CMD))) {
1884                /*
1885                 * We don't let the user do these, since we manage
1886                 * the sequence numbers.
1887                 */
1888                ipmi_inc_stat(intf, sent_invalid_commands);
1889                return -EINVAL;
1890        }
1891
1892        if (is_maintenance_mode_cmd(msg)) {
1893                unsigned long flags;
1894
1895                spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
1896                intf->auto_maintenance_timeout
1897                        = maintenance_mode_timeout_ms;
1898                if (!intf->maintenance_mode
1899                    && !intf->maintenance_mode_enable) {
1900                        intf->maintenance_mode_enable = true;
1901                        maintenance_mode_update(intf);
1902                }
1903                spin_unlock_irqrestore(&intf->maintenance_mode_lock,
1904                                       flags);
1905        }
1906
1907        if (msg->data_len + 2 > IPMI_MAX_MSG_LENGTH) {
1908                ipmi_inc_stat(intf, sent_invalid_commands);
1909                return -EMSGSIZE;
1910        }
1911
1912        smi_msg->data[0] = (msg->netfn << 2) | (smi_addr->lun & 0x3);
1913        smi_msg->data[1] = msg->cmd;
1914        smi_msg->msgid = msgid;
1915        smi_msg->user_data = recv_msg;
1916        if (msg->data_len > 0)
1917                memcpy(&smi_msg->data[2], msg->data, msg->data_len);
1918        smi_msg->data_size = msg->data_len + 2;
1919        ipmi_inc_stat(intf, sent_local_commands);
1920
1921        return 0;
1922}
1923
1924static int i_ipmi_req_ipmb(struct ipmi_smi        *intf,
1925                           struct ipmi_addr       *addr,
1926                           long                   msgid,
1927                           struct kernel_ipmi_msg *msg,
1928                           struct ipmi_smi_msg    *smi_msg,
1929                           struct ipmi_recv_msg   *recv_msg,
1930                           unsigned char          source_address,
1931                           unsigned char          source_lun,
1932                           int                    retries,
1933                           unsigned int           retry_time_ms)
1934{
1935        struct ipmi_ipmb_addr *ipmb_addr;
1936        unsigned char ipmb_seq;
1937        long seqid;
1938        int broadcast = 0;
1939        struct ipmi_channel *chans;
1940        int rv = 0;
1941
1942        if (addr->channel >= IPMI_MAX_CHANNELS) {
1943                ipmi_inc_stat(intf, sent_invalid_commands);
1944                return -EINVAL;
1945        }
1946
1947        chans = READ_ONCE(intf->channel_list)->c;
1948
1949        if (chans[addr->channel].medium != IPMI_CHANNEL_MEDIUM_IPMB) {
1950                ipmi_inc_stat(intf, sent_invalid_commands);
1951                return -EINVAL;
1952        }
1953
1954        if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE) {
1955                /*
1956                 * Broadcasts add a zero at the beginning of the
1957                 * message, but otherwise is the same as an IPMB
1958                 * address.
1959                 */
1960                addr->addr_type = IPMI_IPMB_ADDR_TYPE;
1961                broadcast = 1;
1962                retries = 0; /* Don't retry broadcasts. */
1963        }
1964
1965        /*
1966         * 9 for the header and 1 for the checksum, plus
1967         * possibly one for the broadcast.
1968         */
1969        if ((msg->data_len + 10 + broadcast) > IPMI_MAX_MSG_LENGTH) {
1970                ipmi_inc_stat(intf, sent_invalid_commands);
1971                return -EMSGSIZE;
1972        }
1973
1974        ipmb_addr = (struct ipmi_ipmb_addr *) addr;
1975        if (ipmb_addr->lun > 3) {
1976                ipmi_inc_stat(intf, sent_invalid_commands);
1977                return -EINVAL;
1978        }
1979
1980        memcpy(&recv_msg->addr, ipmb_addr, sizeof(*ipmb_addr));
1981
1982        if (recv_msg->msg.netfn & 0x1) {
1983                /*
1984                 * It's a response, so use the user's sequence
1985                 * from msgid.
1986                 */
1987                ipmi_inc_stat(intf, sent_ipmb_responses);
1988                format_ipmb_msg(smi_msg, msg, ipmb_addr, msgid,
1989                                msgid, broadcast,
1990                                source_address, source_lun);
1991
1992                /*
1993                 * Save the receive message so we can use it
1994                 * to deliver the response.
1995                 */
1996                smi_msg->user_data = recv_msg;
1997        } else {
1998                /* It's a command, so get a sequence for it. */
1999                unsigned long flags;
2000
2001                spin_lock_irqsave(&intf->seq_lock, flags);
2002
2003                if (is_maintenance_mode_cmd(msg))
2004                        intf->ipmb_maintenance_mode_timeout =
2005                                maintenance_mode_timeout_ms;
2006
2007                if (intf->ipmb_maintenance_mode_timeout && retry_time_ms == 0)
2008                        /* Different default in maintenance mode */
2009                        retry_time_ms = default_maintenance_retry_ms;
2010
2011                /*
2012                 * Create a sequence number with a 1 second
2013                 * timeout and 4 retries.
2014                 */
2015                rv = intf_next_seq(intf,
2016                                   recv_msg,
2017                                   retry_time_ms,
2018                                   retries,
2019                                   broadcast,
2020                                   &ipmb_seq,
2021                                   &seqid);
2022                if (rv)
2023                        /*
2024                         * We have used up all the sequence numbers,
2025                         * probably, so abort.
2026                         */
2027                        goto out_err;
2028
2029                ipmi_inc_stat(intf, sent_ipmb_commands);
2030
2031                /*
2032                 * Store the sequence number in the message,
2033                 * so that when the send message response
2034                 * comes back we can start the timer.
2035                 */
2036                format_ipmb_msg(smi_msg, msg, ipmb_addr,
2037                                STORE_SEQ_IN_MSGID(ipmb_seq, seqid),
2038                                ipmb_seq, broadcast,
2039                                source_address, source_lun);
2040
2041                /*
2042                 * Copy the message into the recv message data, so we
2043                 * can retransmit it later if necessary.
2044                 */
2045                memcpy(recv_msg->msg_data, smi_msg->data,
2046                       smi_msg->data_size);
2047                recv_msg->msg.data = recv_msg->msg_data;
2048                recv_msg->msg.data_len = smi_msg->data_size;
2049
2050                /*
2051                 * We don't unlock until here, because we need
2052                 * to copy the completed message into the
2053                 * recv_msg before we release the lock.
2054                 * Otherwise, race conditions may bite us.  I
2055                 * know that's pretty paranoid, but I prefer
2056                 * to be correct.
2057                 */
2058out_err:
2059                spin_unlock_irqrestore(&intf->seq_lock, flags);
2060        }
2061
2062        return rv;
2063}
2064
2065static int i_ipmi_req_lan(struct ipmi_smi        *intf,
2066                          struct ipmi_addr       *addr,
2067                          long                   msgid,
2068                          struct kernel_ipmi_msg *msg,
2069                          struct ipmi_smi_msg    *smi_msg,
2070                          struct ipmi_recv_msg   *recv_msg,
2071                          unsigned char          source_lun,
2072                          int                    retries,
2073                          unsigned int           retry_time_ms)
2074{
2075        struct ipmi_lan_addr  *lan_addr;
2076        unsigned char ipmb_seq;
2077        long seqid;
2078        struct ipmi_channel *chans;
2079        int rv = 0;
2080
2081        if (addr->channel >= IPMI_MAX_CHANNELS) {
2082                ipmi_inc_stat(intf, sent_invalid_commands);
2083                return -EINVAL;
2084        }
2085
2086        chans = READ_ONCE(intf->channel_list)->c;
2087
2088        if ((chans[addr->channel].medium
2089                                != IPMI_CHANNEL_MEDIUM_8023LAN)
2090                        && (chans[addr->channel].medium
2091                            != IPMI_CHANNEL_MEDIUM_ASYNC)) {
2092                ipmi_inc_stat(intf, sent_invalid_commands);
2093                return -EINVAL;
2094        }
2095
2096        /* 11 for the header and 1 for the checksum. */
2097        if ((msg->data_len + 12) > IPMI_MAX_MSG_LENGTH) {
2098                ipmi_inc_stat(intf, sent_invalid_commands);
2099                return -EMSGSIZE;
2100        }
2101
2102        lan_addr = (struct ipmi_lan_addr *) addr;
2103        if (lan_addr->lun > 3) {
2104                ipmi_inc_stat(intf, sent_invalid_commands);
2105                return -EINVAL;
2106        }
2107
2108        memcpy(&recv_msg->addr, lan_addr, sizeof(*lan_addr));
2109
2110        if (recv_msg->msg.netfn & 0x1) {
2111                /*
2112                 * It's a response, so use the user's sequence
2113                 * from msgid.
2114                 */
2115                ipmi_inc_stat(intf, sent_lan_responses);
2116                format_lan_msg(smi_msg, msg, lan_addr, msgid,
2117                               msgid, source_lun);
2118
2119                /*
2120                 * Save the receive message so we can use it
2121                 * to deliver the response.
2122                 */
2123                smi_msg->user_data = recv_msg;
2124        } else {
2125                /* It's a command, so get a sequence for it. */
2126                unsigned long flags;
2127
2128                spin_lock_irqsave(&intf->seq_lock, flags);
2129
2130                /*
2131                 * Create a sequence number with a 1 second
2132                 * timeout and 4 retries.
2133                 */
2134                rv = intf_next_seq(intf,
2135                                   recv_msg,
2136                                   retry_time_ms,
2137                                   retries,
2138                                   0,
2139                                   &ipmb_seq,
2140                                   &seqid);
2141                if (rv)
2142                        /*
2143                         * We have used up all the sequence numbers,
2144                         * probably, so abort.
2145                         */
2146                        goto out_err;
2147
2148                ipmi_inc_stat(intf, sent_lan_commands);
2149
2150                /*
2151                 * Store the sequence number in the message,
2152                 * so that when the send message response
2153                 * comes back we can start the timer.
2154                 */
2155                format_lan_msg(smi_msg, msg, lan_addr,
2156                               STORE_SEQ_IN_MSGID(ipmb_seq, seqid),
2157                               ipmb_seq, source_lun);
2158
2159                /*
2160                 * Copy the message into the recv message data, so we
2161                 * can retransmit it later if necessary.
2162                 */
2163                memcpy(recv_msg->msg_data, smi_msg->data,
2164                       smi_msg->data_size);
2165                recv_msg->msg.data = recv_msg->msg_data;
2166                recv_msg->msg.data_len = smi_msg->data_size;
2167
2168                /*
2169                 * We don't unlock until here, because we need
2170                 * to copy the completed message into the
2171                 * recv_msg before we release the lock.
2172                 * Otherwise, race conditions may bite us.  I
2173                 * know that's pretty paranoid, but I prefer
2174                 * to be correct.
2175                 */
2176out_err:
2177                spin_unlock_irqrestore(&intf->seq_lock, flags);
2178        }
2179
2180        return rv;
2181}
2182
2183/*
2184 * Separate from ipmi_request so that the user does not have to be
2185 * supplied in certain circumstances (mainly at panic time).  If
2186 * messages are supplied, they will be freed, even if an error
2187 * occurs.
2188 */
2189static int i_ipmi_request(struct ipmi_user     *user,
2190                          struct ipmi_smi      *intf,
2191                          struct ipmi_addr     *addr,
2192                          long                 msgid,
2193                          struct kernel_ipmi_msg *msg,
2194                          void                 *user_msg_data,
2195                          void                 *supplied_smi,
2196                          struct ipmi_recv_msg *supplied_recv,
2197                          int                  priority,
2198                          unsigned char        source_address,
2199                          unsigned char        source_lun,
2200                          int                  retries,
2201                          unsigned int         retry_time_ms)
2202{
2203        struct ipmi_smi_msg *smi_msg;
2204        struct ipmi_recv_msg *recv_msg;
2205        int rv = 0;
2206
2207        if (supplied_recv)
2208                recv_msg = supplied_recv;
2209        else {
2210                recv_msg = ipmi_alloc_recv_msg();
2211                if (recv_msg == NULL) {
2212                        rv = -ENOMEM;
2213                        goto out;
2214                }
2215        }
2216        recv_msg->user_msg_data = user_msg_data;
2217
2218        if (supplied_smi)
2219                smi_msg = (struct ipmi_smi_msg *) supplied_smi;
2220        else {
2221                smi_msg = ipmi_alloc_smi_msg();
2222                if (smi_msg == NULL) {
2223                        ipmi_free_recv_msg(recv_msg);
2224                        rv = -ENOMEM;
2225                        goto out;
2226                }
2227        }
2228
2229        rcu_read_lock();
2230        if (intf->in_shutdown) {
2231                rv = -ENODEV;
2232                goto out_err;
2233        }
2234
2235        recv_msg->user = user;
2236        if (user)
2237                /* The put happens when the message is freed. */
2238                kref_get(&user->refcount);
2239        recv_msg->msgid = msgid;
2240        /*
2241         * Store the message to send in the receive message so timeout
2242         * responses can get the proper response data.
2243         */
2244        recv_msg->msg = *msg;
2245
2246        if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
2247                rv = i_ipmi_req_sysintf(intf, addr, msgid, msg, smi_msg,
2248                                        recv_msg, retries, retry_time_ms);
2249        } else if (is_ipmb_addr(addr) || is_ipmb_bcast_addr(addr)) {
2250                rv = i_ipmi_req_ipmb(intf, addr, msgid, msg, smi_msg, recv_msg,
2251                                     source_address, source_lun,
2252                                     retries, retry_time_ms);
2253        } else if (is_lan_addr(addr)) {
2254                rv = i_ipmi_req_lan(intf, addr, msgid, msg, smi_msg, recv_msg,
2255                                    source_lun, retries, retry_time_ms);
2256        } else {
2257            /* Unknown address type. */
2258                ipmi_inc_stat(intf, sent_invalid_commands);
2259                rv = -EINVAL;
2260        }
2261
2262        if (rv) {
2263out_err:
2264                ipmi_free_smi_msg(smi_msg);
2265                ipmi_free_recv_msg(recv_msg);
2266        } else {
2267                ipmi_debug_msg("Send", smi_msg->data, smi_msg->data_size);
2268
2269                smi_send(intf, intf->handlers, smi_msg, priority);
2270        }
2271        rcu_read_unlock();
2272
2273out:
2274        return rv;
2275}
2276
2277static int check_addr(struct ipmi_smi  *intf,
2278                      struct ipmi_addr *addr,
2279                      unsigned char    *saddr,
2280                      unsigned char    *lun)
2281{
2282        if (addr->channel >= IPMI_MAX_CHANNELS)
2283                return -EINVAL;
2284        addr->channel = array_index_nospec(addr->channel, IPMI_MAX_CHANNELS);
2285        *lun = intf->addrinfo[addr->channel].lun;
2286        *saddr = intf->addrinfo[addr->channel].address;
2287        return 0;
2288}
2289
2290int ipmi_request_settime(struct ipmi_user *user,
2291                         struct ipmi_addr *addr,
2292                         long             msgid,
2293                         struct kernel_ipmi_msg  *msg,
2294                         void             *user_msg_data,
2295                         int              priority,
2296                         int              retries,
2297                         unsigned int     retry_time_ms)
2298{
2299        unsigned char saddr = 0, lun = 0;
2300        int rv, index;
2301
2302        if (!user)
2303                return -EINVAL;
2304
2305        user = acquire_ipmi_user(user, &index);
2306        if (!user)
2307                return -ENODEV;
2308
2309        rv = check_addr(user->intf, addr, &saddr, &lun);
2310        if (!rv)
2311                rv = i_ipmi_request(user,
2312                                    user->intf,
2313                                    addr,
2314                                    msgid,
2315                                    msg,
2316                                    user_msg_data,
2317                                    NULL, NULL,
2318                                    priority,
2319                                    saddr,
2320                                    lun,
2321                                    retries,
2322                                    retry_time_ms);
2323
2324        release_ipmi_user(user, index);
2325        return rv;
2326}
2327EXPORT_SYMBOL(ipmi_request_settime);
2328
2329int ipmi_request_supply_msgs(struct ipmi_user     *user,
2330                             struct ipmi_addr     *addr,
2331                             long                 msgid,
2332                             struct kernel_ipmi_msg *msg,
2333                             void                 *user_msg_data,
2334                             void                 *supplied_smi,
2335                             struct ipmi_recv_msg *supplied_recv,
2336                             int                  priority)
2337{
2338        unsigned char saddr = 0, lun = 0;
2339        int rv, index;
2340
2341        if (!user)
2342                return -EINVAL;
2343
2344        user = acquire_ipmi_user(user, &index);
2345        if (!user)
2346                return -ENODEV;
2347
2348        rv = check_addr(user->intf, addr, &saddr, &lun);
2349        if (!rv)
2350                rv = i_ipmi_request(user,
2351                                    user->intf,
2352                                    addr,
2353                                    msgid,
2354                                    msg,
2355                                    user_msg_data,
2356                                    supplied_smi,
2357                                    supplied_recv,
2358                                    priority,
2359                                    saddr,
2360                                    lun,
2361                                    -1, 0);
2362
2363        release_ipmi_user(user, index);
2364        return rv;
2365}
2366EXPORT_SYMBOL(ipmi_request_supply_msgs);
2367
2368static void bmc_device_id_handler(struct ipmi_smi *intf,
2369                                  struct ipmi_recv_msg *msg)
2370{
2371        int rv;
2372
2373        if ((msg->addr.addr_type != IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
2374                        || (msg->msg.netfn != IPMI_NETFN_APP_RESPONSE)
2375                        || (msg->msg.cmd != IPMI_GET_DEVICE_ID_CMD)) {
2376                dev_warn(intf->si_dev,
2377                         "invalid device_id msg: addr_type=%d netfn=%x cmd=%x\n",
2378                         msg->addr.addr_type, msg->msg.netfn, msg->msg.cmd);
2379                return;
2380        }
2381
2382        rv = ipmi_demangle_device_id(msg->msg.netfn, msg->msg.cmd,
2383                        msg->msg.data, msg->msg.data_len, &intf->bmc->fetch_id);
2384        if (rv) {
2385                dev_warn(intf->si_dev, "device id demangle failed: %d\n", rv);
2386                intf->bmc->dyn_id_set = 0;
2387        } else {
2388                /*
2389                 * Make sure the id data is available before setting
2390                 * dyn_id_set.
2391                 */
2392                smp_wmb();
2393                intf->bmc->dyn_id_set = 1;
2394        }
2395
2396        wake_up(&intf->waitq);
2397}
2398
2399static int
2400send_get_device_id_cmd(struct ipmi_smi *intf)
2401{
2402        struct ipmi_system_interface_addr si;
2403        struct kernel_ipmi_msg msg;
2404
2405        si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
2406        si.channel = IPMI_BMC_CHANNEL;
2407        si.lun = 0;
2408
2409        msg.netfn = IPMI_NETFN_APP_REQUEST;
2410        msg.cmd = IPMI_GET_DEVICE_ID_CMD;
2411        msg.data = NULL;
2412        msg.data_len = 0;
2413
2414        return i_ipmi_request(NULL,
2415                              intf,
2416                              (struct ipmi_addr *) &si,
2417                              0,
2418                              &msg,
2419                              intf,
2420                              NULL,
2421                              NULL,
2422                              0,
2423                              intf->addrinfo[0].address,
2424                              intf->addrinfo[0].lun,
2425                              -1, 0);
2426}
2427
2428static int __get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc)
2429{
2430        int rv;
2431
2432        bmc->dyn_id_set = 2;
2433
2434        intf->null_user_handler = bmc_device_id_handler;
2435
2436        rv = send_get_device_id_cmd(intf);
2437        if (rv)
2438                return rv;
2439
2440        wait_event(intf->waitq, bmc->dyn_id_set != 2);
2441
2442        if (!bmc->dyn_id_set)
2443                rv = -EIO; /* Something went wrong in the fetch. */
2444
2445        /* dyn_id_set makes the id data available. */
2446        smp_rmb();
2447
2448        intf->null_user_handler = NULL;
2449
2450        return rv;
2451}
2452
2453/*
2454 * Fetch the device id for the bmc/interface.  You must pass in either
2455 * bmc or intf, this code will get the other one.  If the data has
2456 * been recently fetched, this will just use the cached data.  Otherwise
2457 * it will run a new fetch.
2458 *
2459 * Except for the first time this is called (in ipmi_register_smi()),
2460 * this will always return good data;
2461 */
2462static int __bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc,
2463                               struct ipmi_device_id *id,
2464                               bool *guid_set, guid_t *guid, int intf_num)
2465{
2466        int rv = 0;
2467        int prev_dyn_id_set, prev_guid_set;
2468        bool intf_set = intf != NULL;
2469
2470        if (!intf) {
2471                mutex_lock(&bmc->dyn_mutex);
2472retry_bmc_lock:
2473                if (list_empty(&bmc->intfs)) {
2474                        mutex_unlock(&bmc->dyn_mutex);
2475                        return -ENOENT;
2476                }
2477                intf = list_first_entry(&bmc->intfs, struct ipmi_smi,
2478                                        bmc_link);
2479                kref_get(&intf->refcount);
2480                mutex_unlock(&bmc->dyn_mutex);
2481                mutex_lock(&intf->bmc_reg_mutex);
2482                mutex_lock(&bmc->dyn_mutex);
2483                if (intf != list_first_entry(&bmc->intfs, struct ipmi_smi,
2484                                             bmc_link)) {
2485                        mutex_unlock(&intf->bmc_reg_mutex);
2486                        kref_put(&intf->refcount, intf_free);
2487                        goto retry_bmc_lock;
2488                }
2489        } else {
2490                mutex_lock(&intf->bmc_reg_mutex);
2491                bmc = intf->bmc;
2492                mutex_lock(&bmc->dyn_mutex);
2493                kref_get(&intf->refcount);
2494        }
2495
2496        /* If we have a valid and current ID, just return that. */
2497        if (intf->in_bmc_register ||
2498            (bmc->dyn_id_set && time_is_after_jiffies(bmc->dyn_id_expiry)))
2499                goto out_noprocessing;
2500
2501        prev_guid_set = bmc->dyn_guid_set;
2502        __get_guid(intf);
2503
2504        prev_dyn_id_set = bmc->dyn_id_set;
2505        rv = __get_device_id(intf, bmc);
2506        if (rv)
2507                goto out;
2508
2509        /*
2510         * The guid, device id, manufacturer id, and product id should
2511         * not change on a BMC.  If it does we have to do some dancing.
2512         */
2513        if (!intf->bmc_registered
2514            || (!prev_guid_set && bmc->dyn_guid_set)
2515            || (!prev_dyn_id_set && bmc->dyn_id_set)
2516            || (prev_guid_set && bmc->dyn_guid_set
2517                && !guid_equal(&bmc->guid, &bmc->fetch_guid))
2518            || bmc->id.device_id != bmc->fetch_id.device_id
2519            || bmc->id.manufacturer_id != bmc->fetch_id.manufacturer_id
2520            || bmc->id.product_id != bmc->fetch_id.product_id) {
2521                struct ipmi_device_id id = bmc->fetch_id;
2522                int guid_set = bmc->dyn_guid_set;
2523                guid_t guid;
2524
2525                guid = bmc->fetch_guid;
2526                mutex_unlock(&bmc->dyn_mutex);
2527
2528                __ipmi_bmc_unregister(intf);
2529                /* Fill in the temporary BMC for good measure. */
2530                intf->bmc->id = id;
2531                intf->bmc->dyn_guid_set = guid_set;
2532                intf->bmc->guid = guid;
2533                if (__ipmi_bmc_register(intf, &id, guid_set, &guid, intf_num))
2534                        need_waiter(intf); /* Retry later on an error. */
2535                else
2536                        __scan_channels(intf, &id);
2537
2538
2539                if (!intf_set) {
2540                        /*
2541                         * We weren't given the interface on the
2542                         * command line, so restart the operation on
2543                         * the next interface for the BMC.
2544                         */
2545                        mutex_unlock(&intf->bmc_reg_mutex);
2546                        mutex_lock(&bmc->dyn_mutex);
2547                        goto retry_bmc_lock;
2548                }
2549
2550                /* We have a new BMC, set it up. */
2551                bmc = intf->bmc;
2552                mutex_lock(&bmc->dyn_mutex);
2553                goto out_noprocessing;
2554        } else if (memcmp(&bmc->fetch_id, &bmc->id, sizeof(bmc->id)))
2555                /* Version info changes, scan the channels again. */
2556                __scan_channels(intf, &bmc->fetch_id);
2557
2558        bmc->dyn_id_expiry = jiffies + IPMI_DYN_DEV_ID_EXPIRY;
2559
2560out:
2561        if (rv && prev_dyn_id_set) {
2562                rv = 0; /* Ignore failures if we have previous data. */
2563                bmc->dyn_id_set = prev_dyn_id_set;
2564        }
2565        if (!rv) {
2566                bmc->id = bmc->fetch_id;
2567                if (bmc->dyn_guid_set)
2568                        bmc->guid = bmc->fetch_guid;
2569                else if (prev_guid_set)
2570                        /*
2571                         * The guid used to be valid and it failed to fetch,
2572                         * just use the cached value.
2573                         */
2574                        bmc->dyn_guid_set = prev_guid_set;
2575        }
2576out_noprocessing:
2577        if (!rv) {
2578                if (id)
2579                        *id = bmc->id;
2580
2581                if (guid_set)
2582                        *guid_set = bmc->dyn_guid_set;
2583
2584                if (guid && bmc->dyn_guid_set)
2585                        *guid =  bmc->guid;
2586        }
2587
2588        mutex_unlock(&bmc->dyn_mutex);
2589        mutex_unlock(&intf->bmc_reg_mutex);
2590
2591        kref_put(&intf->refcount, intf_free);
2592        return rv;
2593}
2594
2595static int bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc,
2596                             struct ipmi_device_id *id,
2597                             bool *guid_set, guid_t *guid)
2598{
2599        return __bmc_get_device_id(intf, bmc, id, guid_set, guid, -1);
2600}
2601
2602static ssize_t device_id_show(struct device *dev,
2603                              struct device_attribute *attr,
2604                              char *buf)
2605{
2606        struct bmc_device *bmc = to_bmc_device(dev);
2607        struct ipmi_device_id id;
2608        int rv;
2609
2610        rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2611        if (rv)
2612                return rv;
2613
2614        return snprintf(buf, 10, "%u\n", id.device_id);
2615}
2616static DEVICE_ATTR_RO(device_id);
2617
2618static ssize_t provides_device_sdrs_show(struct device *dev,
2619                                         struct device_attribute *attr,
2620                                         char *buf)
2621{
2622        struct bmc_device *bmc = to_bmc_device(dev);
2623        struct ipmi_device_id id;
2624        int rv;
2625
2626        rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2627        if (rv)
2628                return rv;
2629
2630        return snprintf(buf, 10, "%u\n", (id.device_revision & 0x80) >> 7);
2631}
2632static DEVICE_ATTR_RO(provides_device_sdrs);
2633
2634static ssize_t revision_show(struct device *dev, struct device_attribute *attr,
2635                             char *buf)
2636{
2637        struct bmc_device *bmc = to_bmc_device(dev);
2638        struct ipmi_device_id id;
2639        int rv;
2640
2641        rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2642        if (rv)
2643                return rv;
2644
2645        return snprintf(buf, 20, "%u\n", id.device_revision & 0x0F);
2646}
2647static DEVICE_ATTR_RO(revision);
2648
2649static ssize_t firmware_revision_show(struct device *dev,
2650                                      struct device_attribute *attr,
2651                                      char *buf)
2652{
2653        struct bmc_device *bmc = to_bmc_device(dev);
2654        struct ipmi_device_id id;
2655        int rv;
2656
2657        rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2658        if (rv)
2659                return rv;
2660
2661        return snprintf(buf, 20, "%u.%x\n", id.firmware_revision_1,
2662                        id.firmware_revision_2);
2663}
2664static DEVICE_ATTR_RO(firmware_revision);
2665
2666static ssize_t ipmi_version_show(struct device *dev,
2667                                 struct device_attribute *attr,
2668                                 char *buf)
2669{
2670        struct bmc_device *bmc = to_bmc_device(dev);
2671        struct ipmi_device_id id;
2672        int rv;
2673
2674        rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2675        if (rv)
2676                return rv;
2677
2678        return snprintf(buf, 20, "%u.%u\n",
2679                        ipmi_version_major(&id),
2680                        ipmi_version_minor(&id));
2681}
2682static DEVICE_ATTR_RO(ipmi_version);
2683
2684static ssize_t add_dev_support_show(struct device *dev,
2685                                    struct device_attribute *attr,
2686                                    char *buf)
2687{
2688        struct bmc_device *bmc = to_bmc_device(dev);
2689        struct ipmi_device_id id;
2690        int rv;
2691
2692        rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2693        if (rv)
2694                return rv;
2695
2696        return snprintf(buf, 10, "0x%02x\n", id.additional_device_support);
2697}
2698static DEVICE_ATTR(additional_device_support, S_IRUGO, add_dev_support_show,
2699                   NULL);
2700
2701static ssize_t manufacturer_id_show(struct device *dev,
2702                                    struct device_attribute *attr,
2703                                    char *buf)
2704{
2705        struct bmc_device *bmc = to_bmc_device(dev);
2706        struct ipmi_device_id id;
2707        int rv;
2708
2709        rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2710        if (rv)
2711                return rv;
2712
2713        return snprintf(buf, 20, "0x%6.6x\n", id.manufacturer_id);
2714}
2715static DEVICE_ATTR_RO(manufacturer_id);
2716
2717static ssize_t product_id_show(struct device *dev,
2718                               struct device_attribute *attr,
2719                               char *buf)
2720{
2721        struct bmc_device *bmc = to_bmc_device(dev);
2722        struct ipmi_device_id id;
2723        int rv;
2724
2725        rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2726        if (rv)
2727                return rv;
2728
2729        return snprintf(buf, 10, "0x%4.4x\n", id.product_id);
2730}
2731static DEVICE_ATTR_RO(product_id);
2732
2733static ssize_t aux_firmware_rev_show(struct device *dev,
2734                                     struct device_attribute *attr,
2735                                     char *buf)
2736{
2737        struct bmc_device *bmc = to_bmc_device(dev);
2738        struct ipmi_device_id id;
2739        int rv;
2740
2741        rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2742        if (rv)
2743                return rv;
2744
2745        return snprintf(buf, 21, "0x%02x 0x%02x 0x%02x 0x%02x\n",
2746                        id.aux_firmware_revision[3],
2747                        id.aux_firmware_revision[2],
2748                        id.aux_firmware_revision[1],
2749                        id.aux_firmware_revision[0]);
2750}
2751static DEVICE_ATTR(aux_firmware_revision, S_IRUGO, aux_firmware_rev_show, NULL);
2752
2753static ssize_t guid_show(struct device *dev, struct device_attribute *attr,
2754                         char *buf)
2755{
2756        struct bmc_device *bmc = to_bmc_device(dev);
2757        bool guid_set;
2758        guid_t guid;
2759        int rv;
2760
2761        rv = bmc_get_device_id(NULL, bmc, NULL, &guid_set, &guid);
2762        if (rv)
2763                return rv;
2764        if (!guid_set)
2765                return -ENOENT;
2766
2767        return snprintf(buf, UUID_STRING_LEN + 1 + 1, "%pUl\n", &guid);
2768}
2769static DEVICE_ATTR_RO(guid);
2770
2771static struct attribute *bmc_dev_attrs[] = {
2772        &dev_attr_device_id.attr,
2773        &dev_attr_provides_device_sdrs.attr,
2774        &dev_attr_revision.attr,
2775        &dev_attr_firmware_revision.attr,
2776        &dev_attr_ipmi_version.attr,
2777        &dev_attr_additional_device_support.attr,
2778        &dev_attr_manufacturer_id.attr,
2779        &dev_attr_product_id.attr,
2780        &dev_attr_aux_firmware_revision.attr,
2781        &dev_attr_guid.attr,
2782        NULL
2783};
2784
2785static umode_t bmc_dev_attr_is_visible(struct kobject *kobj,
2786                                       struct attribute *attr, int idx)
2787{
2788        struct device *dev = kobj_to_dev(kobj);
2789        struct bmc_device *bmc = to_bmc_device(dev);
2790        umode_t mode = attr->mode;
2791        int rv;
2792
2793        if (attr == &dev_attr_aux_firmware_revision.attr) {
2794                struct ipmi_device_id id;
2795
2796                rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2797                return (!rv && id.aux_firmware_revision_set) ? mode : 0;
2798        }
2799        if (attr == &dev_attr_guid.attr) {
2800                bool guid_set;
2801
2802                rv = bmc_get_device_id(NULL, bmc, NULL, &guid_set, NULL);
2803                return (!rv && guid_set) ? mode : 0;
2804        }
2805        return mode;
2806}
2807
2808static const struct attribute_group bmc_dev_attr_group = {
2809        .attrs          = bmc_dev_attrs,
2810        .is_visible     = bmc_dev_attr_is_visible,
2811};
2812
2813static const struct attribute_group *bmc_dev_attr_groups[] = {
2814        &bmc_dev_attr_group,
2815        NULL
2816};
2817
2818static const struct device_type bmc_device_type = {
2819        .groups         = bmc_dev_attr_groups,
2820};
2821
2822static int __find_bmc_guid(struct device *dev, const void *data)
2823{
2824        const guid_t *guid = data;
2825        struct bmc_device *bmc;
2826        int rv;
2827
2828        if (dev->type != &bmc_device_type)
2829                return 0;
2830
2831        bmc = to_bmc_device(dev);
2832        rv = bmc->dyn_guid_set && guid_equal(&bmc->guid, guid);
2833        if (rv)
2834                rv = kref_get_unless_zero(&bmc->usecount);
2835        return rv;
2836}
2837
2838/*
2839 * Returns with the bmc's usecount incremented, if it is non-NULL.
2840 */
2841static struct bmc_device *ipmi_find_bmc_guid(struct device_driver *drv,
2842                                             guid_t *guid)
2843{
2844        struct device *dev;
2845        struct bmc_device *bmc = NULL;
2846
2847        dev = driver_find_device(drv, NULL, guid, __find_bmc_guid);
2848        if (dev) {
2849                bmc = to_bmc_device(dev);
2850                put_device(dev);
2851        }
2852        return bmc;
2853}
2854
2855struct prod_dev_id {
2856        unsigned int  product_id;
2857        unsigned char device_id;
2858};
2859
2860static int __find_bmc_prod_dev_id(struct device *dev, const void *data)
2861{
2862        const struct prod_dev_id *cid = data;
2863        struct bmc_device *bmc;
2864        int rv;
2865
2866        if (dev->type != &bmc_device_type)
2867                return 0;
2868
2869        bmc = to_bmc_device(dev);
2870        rv = (bmc->id.product_id == cid->product_id
2871              && bmc->id.device_id == cid->device_id);
2872        if (rv)
2873                rv = kref_get_unless_zero(&bmc->usecount);
2874        return rv;
2875}
2876
2877/*
2878 * Returns with the bmc's usecount incremented, if it is non-NULL.
2879 */
2880static struct bmc_device *ipmi_find_bmc_prod_dev_id(
2881        struct device_driver *drv,
2882        unsigned int product_id, unsigned char device_id)
2883{
2884        struct prod_dev_id id = {
2885                .product_id = product_id,
2886                .device_id = device_id,
2887        };
2888        struct device *dev;
2889        struct bmc_device *bmc = NULL;
2890
2891        dev = driver_find_device(drv, NULL, &id, __find_bmc_prod_dev_id);
2892        if (dev) {
2893                bmc = to_bmc_device(dev);
2894                put_device(dev);
2895        }
2896        return bmc;
2897}
2898
2899static DEFINE_IDA(ipmi_bmc_ida);
2900
2901static void
2902release_bmc_device(struct device *dev)
2903{
2904        kfree(to_bmc_device(dev));
2905}
2906
2907static void cleanup_bmc_work(struct work_struct *work)
2908{
2909        struct bmc_device *bmc = container_of(work, struct bmc_device,
2910                                              remove_work);
2911        int id = bmc->pdev.id; /* Unregister overwrites id */
2912
2913        platform_device_unregister(&bmc->pdev);
2914        ida_simple_remove(&ipmi_bmc_ida, id);
2915}
2916
2917static void
2918cleanup_bmc_device(struct kref *ref)
2919{
2920        struct bmc_device *bmc = container_of(ref, struct bmc_device, usecount);
2921
2922        /*
2923         * Remove the platform device in a work queue to avoid issues
2924         * with removing the device attributes while reading a device
2925         * attribute.
2926         */
2927        schedule_work(&bmc->remove_work);
2928}
2929
2930/*
2931 * Must be called with intf->bmc_reg_mutex held.
2932 */
2933static void __ipmi_bmc_unregister(struct ipmi_smi *intf)
2934{
2935        struct bmc_device *bmc = intf->bmc;
2936
2937        if (!intf->bmc_registered)
2938                return;
2939
2940        sysfs_remove_link(&intf->si_dev->kobj, "bmc");
2941        sysfs_remove_link(&bmc->pdev.dev.kobj, intf->my_dev_name);
2942        kfree(intf->my_dev_name);
2943        intf->my_dev_name = NULL;
2944
2945        mutex_lock(&bmc->dyn_mutex);
2946        list_del(&intf->bmc_link);
2947        mutex_unlock(&bmc->dyn_mutex);
2948        intf->bmc = &intf->tmp_bmc;
2949        kref_put(&bmc->usecount, cleanup_bmc_device);
2950        intf->bmc_registered = false;
2951}
2952
2953static void ipmi_bmc_unregister(struct ipmi_smi *intf)
2954{
2955        mutex_lock(&intf->bmc_reg_mutex);
2956        __ipmi_bmc_unregister(intf);
2957        mutex_unlock(&intf->bmc_reg_mutex);
2958}
2959
2960/*
2961 * Must be called with intf->bmc_reg_mutex held.
2962 */
2963static int __ipmi_bmc_register(struct ipmi_smi *intf,
2964                               struct ipmi_device_id *id,
2965                               bool guid_set, guid_t *guid, int intf_num)
2966{
2967        int               rv;
2968        struct bmc_device *bmc;
2969        struct bmc_device *old_bmc;
2970
2971        /*
2972         * platform_device_register() can cause bmc_reg_mutex to
2973         * be claimed because of the is_visible functions of
2974         * the attributes.  Eliminate possible recursion and
2975         * release the lock.
2976         */
2977        intf->in_bmc_register = true;
2978        mutex_unlock(&intf->bmc_reg_mutex);
2979
2980        /*
2981         * Try to find if there is an bmc_device struct
2982         * representing the interfaced BMC already
2983         */
2984        mutex_lock(&ipmidriver_mutex);
2985        if (guid_set)
2986                old_bmc = ipmi_find_bmc_guid(&ipmidriver.driver, guid);
2987        else
2988                old_bmc = ipmi_find_bmc_prod_dev_id(&ipmidriver.driver,
2989                                                    id->product_id,
2990                                                    id->device_id);
2991
2992        /*
2993         * If there is already an bmc_device, free the new one,
2994         * otherwise register the new BMC device
2995         */
2996        if (old_bmc) {
2997                bmc = old_bmc;
2998                /*
2999                 * Note: old_bmc already has usecount incremented by
3000                 * the BMC find functions.
3001                 */
3002                intf->bmc = old_bmc;
3003                mutex_lock(&bmc->dyn_mutex);
3004                list_add_tail(&intf->bmc_link, &bmc->intfs);
3005                mutex_unlock(&bmc->dyn_mutex);
3006
3007                dev_info(intf->si_dev,
3008                         "interfacing existing BMC (man_id: 0x%6.6x, prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
3009                         bmc->id.manufacturer_id,
3010                         bmc->id.product_id,
3011                         bmc->id.device_id);
3012        } else {
3013                bmc = kzalloc(sizeof(*bmc), GFP_KERNEL);
3014                if (!bmc) {
3015                        rv = -ENOMEM;
3016                        goto out;
3017                }
3018                INIT_LIST_HEAD(&bmc->intfs);
3019                mutex_init(&bmc->dyn_mutex);
3020                INIT_WORK(&bmc->remove_work, cleanup_bmc_work);
3021
3022                bmc->id = *id;
3023                bmc->dyn_id_set = 1;
3024                bmc->dyn_guid_set = guid_set;
3025                bmc->guid = *guid;
3026                bmc->dyn_id_expiry = jiffies + IPMI_DYN_DEV_ID_EXPIRY;
3027
3028                bmc->pdev.name = "ipmi_bmc";
3029
3030                rv = ida_simple_get(&ipmi_bmc_ida, 0, 0, GFP_KERNEL);
3031                if (rv < 0)
3032                        goto out;
3033                bmc->pdev.dev.driver = &ipmidriver.driver;
3034                bmc->pdev.id = rv;
3035                bmc->pdev.dev.release = release_bmc_device;
3036                bmc->pdev.dev.type = &bmc_device_type;
3037                kref_init(&bmc->usecount);
3038
3039                intf->bmc = bmc;
3040                mutex_lock(&bmc->dyn_mutex);
3041                list_add_tail(&intf->bmc_link, &bmc->intfs);
3042                mutex_unlock(&bmc->dyn_mutex);
3043
3044                rv = platform_device_register(&bmc->pdev);
3045                if (rv) {
3046                        dev_err(intf->si_dev,
3047                                "Unable to register bmc device: %d\n",
3048                                rv);
3049                        goto out_list_del;
3050                }
3051
3052                dev_info(intf->si_dev,
3053                         "Found new BMC (man_id: 0x%6.6x, prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
3054                         bmc->id.manufacturer_id,
3055                         bmc->id.product_id,
3056                         bmc->id.device_id);
3057        }
3058
3059        /*
3060         * create symlink from system interface device to bmc device
3061         * and back.
3062         */
3063        rv = sysfs_create_link(&intf->si_dev->kobj, &bmc->pdev.dev.kobj, "bmc");
3064        if (rv) {
3065                dev_err(intf->si_dev, "Unable to create bmc symlink: %d\n", rv);
3066                goto out_put_bmc;
3067        }
3068
3069        if (intf_num == -1)
3070                intf_num = intf->intf_num;
3071        intf->my_dev_name = kasprintf(GFP_KERNEL, "ipmi%d", intf_num);
3072        if (!intf->my_dev_name) {
3073                rv = -ENOMEM;
3074                dev_err(intf->si_dev, "Unable to allocate link from BMC: %d\n",
3075                        rv);
3076                goto out_unlink1;
3077        }
3078
3079        rv = sysfs_create_link(&bmc->pdev.dev.kobj, &intf->si_dev->kobj,
3080                               intf->my_dev_name);
3081        if (rv) {
3082                kfree(intf->my_dev_name);
3083                intf->my_dev_name = NULL;
3084                dev_err(intf->si_dev, "Unable to create symlink to bmc: %d\n",
3085                        rv);
3086                goto out_free_my_dev_name;
3087        }
3088
3089        intf->bmc_registered = true;
3090
3091out:
3092        mutex_unlock(&ipmidriver_mutex);
3093        mutex_lock(&intf->bmc_reg_mutex);
3094        intf->in_bmc_register = false;
3095        return rv;
3096
3097
3098out_free_my_dev_name:
3099        kfree(intf->my_dev_name);
3100        intf->my_dev_name = NULL;
3101
3102out_unlink1:
3103        sysfs_remove_link(&intf->si_dev->kobj, "bmc");
3104
3105out_put_bmc:
3106        mutex_lock(&bmc->dyn_mutex);
3107        list_del(&intf->bmc_link);
3108        mutex_unlock(&bmc->dyn_mutex);
3109        intf->bmc = &intf->tmp_bmc;
3110        kref_put(&bmc->usecount, cleanup_bmc_device);
3111        goto out;
3112
3113out_list_del:
3114        mutex_lock(&bmc->dyn_mutex);
3115        list_del(&intf->bmc_link);
3116        mutex_unlock(&bmc->dyn_mutex);
3117        intf->bmc = &intf->tmp_bmc;
3118        put_device(&bmc->pdev.dev);
3119        goto out;
3120}
3121
3122static int
3123send_guid_cmd(struct ipmi_smi *intf, int chan)
3124{
3125        struct kernel_ipmi_msg            msg;
3126        struct ipmi_system_interface_addr si;
3127
3128        si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
3129        si.channel = IPMI_BMC_CHANNEL;
3130        si.lun = 0;
3131
3132        msg.netfn = IPMI_NETFN_APP_REQUEST;
3133        msg.cmd = IPMI_GET_DEVICE_GUID_CMD;
3134        msg.data = NULL;
3135        msg.data_len = 0;
3136        return i_ipmi_request(NULL,
3137                              intf,
3138                              (struct ipmi_addr *) &si,
3139                              0,
3140                              &msg,
3141                              intf,
3142                              NULL,
3143                              NULL,
3144                              0,
3145                              intf->addrinfo[0].address,
3146                              intf->addrinfo[0].lun,
3147                              -1, 0);
3148}
3149
3150static void guid_handler(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
3151{
3152        struct bmc_device *bmc = intf->bmc;
3153
3154        if ((msg->addr.addr_type != IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
3155            || (msg->msg.netfn != IPMI_NETFN_APP_RESPONSE)
3156            || (msg->msg.cmd != IPMI_GET_DEVICE_GUID_CMD))
3157                /* Not for me */
3158                return;
3159
3160        if (msg->msg.data[0] != 0) {
3161                /* Error from getting the GUID, the BMC doesn't have one. */
3162                bmc->dyn_guid_set = 0;
3163                goto out;
3164        }
3165
3166        if (msg->msg.data_len < UUID_SIZE + 1) {
3167                bmc->dyn_guid_set = 0;
3168                dev_warn(intf->si_dev,
3169                         "The GUID response from the BMC was too short, it was %d but should have been %d.  Assuming GUID is not available.\n",
3170                         msg->msg.data_len, UUID_SIZE + 1);
3171                goto out;
3172        }
3173
3174        guid_copy(&bmc->fetch_guid, (guid_t *)(msg->msg.data + 1));
3175        /*
3176         * Make sure the guid data is available before setting
3177         * dyn_guid_set.
3178         */
3179        smp_wmb();
3180        bmc->dyn_guid_set = 1;
3181 out:
3182        wake_up(&intf->waitq);
3183}
3184
3185static void __get_guid(struct ipmi_smi *intf)
3186{
3187        int rv;
3188        struct bmc_device *bmc = intf->bmc;
3189
3190        bmc->dyn_guid_set = 2;
3191        intf->null_user_handler = guid_handler;
3192        rv = send_guid_cmd(intf, 0);
3193        if (rv)
3194                /* Send failed, no GUID available. */
3195                bmc->dyn_guid_set = 0;
3196
3197        wait_event(intf->waitq, bmc->dyn_guid_set != 2);
3198
3199        /* dyn_guid_set makes the guid data available. */
3200        smp_rmb();
3201
3202        intf->null_user_handler = NULL;
3203}
3204
3205static int
3206send_channel_info_cmd(struct ipmi_smi *intf, int chan)
3207{
3208        struct kernel_ipmi_msg            msg;
3209        unsigned char                     data[1];
3210        struct ipmi_system_interface_addr si;
3211
3212        si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
3213        si.channel = IPMI_BMC_CHANNEL;
3214        si.lun = 0;
3215
3216        msg.netfn = IPMI_NETFN_APP_REQUEST;
3217        msg.cmd = IPMI_GET_CHANNEL_INFO_CMD;
3218        msg.data = data;
3219        msg.data_len = 1;
3220        data[0] = chan;
3221        return i_ipmi_request(NULL,
3222                              intf,
3223                              (struct ipmi_addr *) &si,
3224                              0,
3225                              &msg,
3226                              intf,
3227                              NULL,
3228                              NULL,
3229                              0,
3230                              intf->addrinfo[0].address,
3231                              intf->addrinfo[0].lun,
3232                              -1, 0);
3233}
3234
3235static void
3236channel_handler(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
3237{
3238        int rv = 0;
3239        int ch;
3240        unsigned int set = intf->curr_working_cset;
3241        struct ipmi_channel *chans;
3242
3243        if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
3244            && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE)
3245            && (msg->msg.cmd == IPMI_GET_CHANNEL_INFO_CMD)) {
3246                /* It's the one we want */
3247                if (msg->msg.data[0] != 0) {
3248                        /* Got an error from the channel, just go on. */
3249
3250                        if (msg->msg.data[0] == IPMI_INVALID_COMMAND_ERR) {
3251                                /*
3252                                 * If the MC does not support this
3253                                 * command, that is legal.  We just
3254                                 * assume it has one IPMB at channel
3255                                 * zero.
3256                                 */
3257                                intf->wchannels[set].c[0].medium
3258                                        = IPMI_CHANNEL_MEDIUM_IPMB;
3259                                intf->wchannels[set].c[0].protocol
3260                                        = IPMI_CHANNEL_PROTOCOL_IPMB;
3261
3262                                intf->channel_list = intf->wchannels + set;
3263                                intf->channels_ready = true;
3264                                wake_up(&intf->waitq);
3265                                goto out;
3266                        }
3267                        goto next_channel;
3268                }
3269                if (msg->msg.data_len < 4) {
3270                        /* Message not big enough, just go on. */
3271                        goto next_channel;
3272                }
3273                ch = intf->curr_channel;
3274                chans = intf->wchannels[set].c;
3275                chans[ch].medium = msg->msg.data[2] & 0x7f;
3276                chans[ch].protocol = msg->msg.data[3] & 0x1f;
3277
3278 next_channel:
3279                intf->curr_channel++;
3280                if (intf->curr_channel >= IPMI_MAX_CHANNELS) {
3281                        intf->channel_list = intf->wchannels + set;
3282                        intf->channels_ready = true;
3283                        wake_up(&intf->waitq);
3284                } else {
3285                        intf->channel_list = intf->wchannels + set;
3286                        intf->channels_ready = true;
3287                        rv = send_channel_info_cmd(intf, intf->curr_channel);
3288                }
3289
3290                if (rv) {
3291                        /* Got an error somehow, just give up. */
3292                        dev_warn(intf->si_dev,
3293                                 "Error sending channel information for channel %d: %d\n",
3294                                 intf->curr_channel, rv);
3295
3296                        intf->channel_list = intf->wchannels + set;
3297                        intf->channels_ready = true;
3298                        wake_up(&intf->waitq);
3299                }
3300        }
3301 out:
3302        return;
3303}
3304
3305/*
3306 * Must be holding intf->bmc_reg_mutex to call this.
3307 */
3308static int __scan_channels(struct ipmi_smi *intf, struct ipmi_device_id *id)
3309{
3310        int rv;
3311
3312        if (ipmi_version_major(id) > 1
3313                        || (ipmi_version_major(id) == 1
3314                            && ipmi_version_minor(id) >= 5)) {
3315                unsigned int set;
3316
3317                /*
3318                 * Start scanning the channels to see what is
3319                 * available.
3320                 */
3321                set = !intf->curr_working_cset;
3322                intf->curr_working_cset = set;
3323                memset(&intf->wchannels[set], 0,
3324                       sizeof(struct ipmi_channel_set));
3325
3326                intf->null_user_handler = channel_handler;
3327                intf->curr_channel = 0;
3328                rv = send_channel_info_cmd(intf, 0);
3329                if (rv) {
3330                        dev_warn(intf->si_dev,
3331                                 "Error sending channel information for channel 0, %d\n",
3332                                 rv);
3333                        return -EIO;
3334                }
3335
3336                /* Wait for the channel info to be read. */
3337                wait_event(intf->waitq, intf->channels_ready);
3338                intf->null_user_handler = NULL;
3339        } else {
3340                unsigned int set = intf->curr_working_cset;
3341
3342                /* Assume a single IPMB channel at zero. */
3343                intf->wchannels[set].c[0].medium = IPMI_CHANNEL_MEDIUM_IPMB;
3344                intf->wchannels[set].c[0].protocol = IPMI_CHANNEL_PROTOCOL_IPMB;
3345                intf->channel_list = intf->wchannels + set;
3346                intf->channels_ready = true;
3347        }
3348
3349        return 0;
3350}
3351
3352static void ipmi_poll(struct ipmi_smi *intf)
3353{
3354        if (intf->handlers->poll)
3355                intf->handlers->poll(intf->send_info);
3356        /* In case something came in */
3357        handle_new_recv_msgs(intf);
3358}
3359
3360void ipmi_poll_interface(struct ipmi_user *user)
3361{
3362        ipmi_poll(user->intf);
3363}
3364EXPORT_SYMBOL(ipmi_poll_interface);
3365
3366static void redo_bmc_reg(struct work_struct *work)
3367{
3368        struct ipmi_smi *intf = container_of(work, struct ipmi_smi,
3369                                             bmc_reg_work);
3370
3371        if (!intf->in_shutdown)
3372                bmc_get_device_id(intf, NULL, NULL, NULL, NULL);
3373
3374        kref_put(&intf->refcount, intf_free);
3375}
3376
3377int ipmi_register_smi(const struct ipmi_smi_handlers *handlers,
3378                      void                     *send_info,
3379                      struct device            *si_dev,
3380                      unsigned char            slave_addr)
3381{
3382        int              i, j;
3383        int              rv;
3384        struct ipmi_smi *intf, *tintf;
3385        struct list_head *link;
3386        struct ipmi_device_id id;
3387
3388        /*
3389         * Make sure the driver is actually initialized, this handles
3390         * problems with initialization order.
3391         */
3392        rv = ipmi_init_msghandler();
3393        if (rv)
3394                return rv;
3395
3396        intf = kzalloc(sizeof(*intf), GFP_KERNEL);
3397        if (!intf)
3398                return -ENOMEM;
3399
3400        rv = init_srcu_struct(&intf->users_srcu);
3401        if (rv) {
3402                kfree(intf);
3403                return rv;
3404        }
3405
3406
3407        intf->bmc = &intf->tmp_bmc;
3408        INIT_LIST_HEAD(&intf->bmc->intfs);
3409        mutex_init(&intf->bmc->dyn_mutex);
3410        INIT_LIST_HEAD(&intf->bmc_link);
3411        mutex_init(&intf->bmc_reg_mutex);
3412        intf->intf_num = -1; /* Mark it invalid for now. */
3413        kref_init(&intf->refcount);
3414        INIT_WORK(&intf->bmc_reg_work, redo_bmc_reg);
3415        intf->si_dev = si_dev;
3416        for (j = 0; j < IPMI_MAX_CHANNELS; j++) {
3417                intf->addrinfo[j].address = IPMI_BMC_SLAVE_ADDR;
3418                intf->addrinfo[j].lun = 2;
3419        }
3420        if (slave_addr != 0)
3421                intf->addrinfo[0].address = slave_addr;
3422        INIT_LIST_HEAD(&intf->users);
3423        intf->handlers = handlers;
3424        intf->send_info = send_info;
3425        spin_lock_init(&intf->seq_lock);
3426        for (j = 0; j < IPMI_IPMB_NUM_SEQ; j++) {
3427                intf->seq_table[j].inuse = 0;
3428                intf->seq_table[j].seqid = 0;
3429        }
3430        intf->curr_seq = 0;
3431        spin_lock_init(&intf->waiting_rcv_msgs_lock);
3432        INIT_LIST_HEAD(&intf->waiting_rcv_msgs);
3433        tasklet_init(&intf->recv_tasklet,
3434                     smi_recv_tasklet,
3435                     (unsigned long) intf);
3436        atomic_set(&intf->watchdog_pretimeouts_to_deliver, 0);
3437        spin_lock_init(&intf->xmit_msgs_lock);
3438        INIT_LIST_HEAD(&intf->xmit_msgs);
3439        INIT_LIST_HEAD(&intf->hp_xmit_msgs);
3440        spin_lock_init(&intf->events_lock);
3441        spin_lock_init(&intf->watch_lock);
3442        atomic_set(&intf->event_waiters, 0);
3443        intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
3444        INIT_LIST_HEAD(&intf->waiting_events);
3445        intf->waiting_events_count = 0;
3446        mutex_init(&intf->cmd_rcvrs_mutex);
3447        spin_lock_init(&intf->maintenance_mode_lock);
3448        INIT_LIST_HEAD(&intf->cmd_rcvrs);
3449        init_waitqueue_head(&intf->waitq);
3450        for (i = 0; i < IPMI_NUM_STATS; i++)
3451                atomic_set(&intf->stats[i], 0);
3452
3453        mutex_lock(&ipmi_interfaces_mutex);
3454        /* Look for a hole in the numbers. */
3455        i = 0;
3456        link = &ipmi_interfaces;
3457        list_for_each_entry_rcu(tintf, &ipmi_interfaces, link) {
3458                if (tintf->intf_num != i) {
3459                        link = &tintf->link;
3460                        break;
3461                }
3462                i++;
3463        }
3464        /* Add the new interface in numeric order. */
3465        if (i == 0)
3466                list_add_rcu(&intf->link, &ipmi_interfaces);
3467        else
3468                list_add_tail_rcu(&intf->link, link);
3469
3470        rv = handlers->start_processing(send_info, intf);
3471        if (rv)
3472                goto out_err;
3473
3474        rv = __bmc_get_device_id(intf, NULL, &id, NULL, NULL, i);
3475        if (rv) {
3476                dev_err(si_dev, "Unable to get the device id: %d\n", rv);
3477                goto out_err_started;
3478        }
3479
3480        mutex_lock(&intf->bmc_reg_mutex);
3481        rv = __scan_channels(intf, &id);
3482        mutex_unlock(&intf->bmc_reg_mutex);
3483        if (rv)
3484                goto out_err_bmc_reg;
3485
3486        /*
3487         * Keep memory order straight for RCU readers.  Make
3488         * sure everything else is committed to memory before
3489         * setting intf_num to mark the interface valid.
3490         */
3491        smp_wmb();
3492        intf->intf_num = i;
3493        mutex_unlock(&ipmi_interfaces_mutex);
3494
3495        /* After this point the interface is legal to use. */
3496        call_smi_watchers(i, intf->si_dev);
3497
3498        return 0;
3499
3500 out_err_bmc_reg:
3501        ipmi_bmc_unregister(intf);
3502 out_err_started:
3503        if (intf->handlers->shutdown)
3504                intf->handlers->shutdown(intf->send_info);
3505 out_err:
3506        list_del_rcu(&intf->link);
3507        mutex_unlock(&ipmi_interfaces_mutex);
3508        synchronize_srcu(&ipmi_interfaces_srcu);
3509        cleanup_srcu_struct(&intf->users_srcu);
3510        kref_put(&intf->refcount, intf_free);
3511
3512        return rv;
3513}
3514EXPORT_SYMBOL(ipmi_register_smi);
3515
3516static void deliver_smi_err_response(struct ipmi_smi *intf,
3517                                     struct ipmi_smi_msg *msg,
3518                                     unsigned char err)
3519{
3520        msg->rsp[0] = msg->data[0] | 4;
3521        msg->rsp[1] = msg->data[1];
3522        msg->rsp[2] = err;
3523        msg->rsp_size = 3;
3524        /* It's an error, so it will never requeue, no need to check return. */
3525        handle_one_recv_msg(intf, msg);
3526}
3527
3528static void cleanup_smi_msgs(struct ipmi_smi *intf)
3529{
3530        int              i;
3531        struct seq_table *ent;
3532        struct ipmi_smi_msg *msg;
3533        struct list_head *entry;
3534        struct list_head tmplist;
3535
3536        /* Clear out our transmit queues and hold the messages. */
3537        INIT_LIST_HEAD(&tmplist);
3538        list_splice_tail(&intf->hp_xmit_msgs, &tmplist);
3539        list_splice_tail(&intf->xmit_msgs, &tmplist);
3540
3541        /* Current message first, to preserve order */
3542        while (intf->curr_msg && !list_empty(&intf->waiting_rcv_msgs)) {
3543                /* Wait for the message to clear out. */
3544                schedule_timeout(1);
3545        }
3546
3547        /* No need for locks, the interface is down. */
3548
3549        /*
3550         * Return errors for all pending messages in queue and in the
3551         * tables waiting for remote responses.
3552         */
3553        while (!list_empty(&tmplist)) {
3554                entry = tmplist.next;
3555                list_del(entry);
3556                msg = list_entry(entry, struct ipmi_smi_msg, link);
3557                deliver_smi_err_response(intf, msg, IPMI_ERR_UNSPECIFIED);
3558        }
3559
3560        for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
3561                ent = &intf->seq_table[i];
3562                if (!ent->inuse)
3563                        continue;
3564                deliver_err_response(intf, ent->recv_msg, IPMI_ERR_UNSPECIFIED);
3565        }
3566}
3567
3568void ipmi_unregister_smi(struct ipmi_smi *intf)
3569{
3570        struct ipmi_smi_watcher *w;
3571        int intf_num = intf->intf_num, index;
3572
3573        mutex_lock(&ipmi_interfaces_mutex);
3574        intf->intf_num = -1;
3575        intf->in_shutdown = true;
3576        list_del_rcu(&intf->link);
3577        mutex_unlock(&ipmi_interfaces_mutex);
3578        synchronize_srcu(&ipmi_interfaces_srcu);
3579
3580        /* At this point no users can be added to the interface. */
3581
3582        /*
3583         * Call all the watcher interfaces to tell them that
3584         * an interface is going away.
3585         */
3586        mutex_lock(&smi_watchers_mutex);
3587        list_for_each_entry(w, &smi_watchers, link)
3588                w->smi_gone(intf_num);
3589        mutex_unlock(&smi_watchers_mutex);
3590
3591        index = srcu_read_lock(&intf->users_srcu);
3592        while (!list_empty(&intf->users)) {
3593                struct ipmi_user *user =
3594                        container_of(list_next_rcu(&intf->users),
3595                                     struct ipmi_user, link);
3596
3597                _ipmi_destroy_user(user);
3598        }
3599        srcu_read_unlock(&intf->users_srcu, index);
3600
3601        if (intf->handlers->shutdown)
3602                intf->handlers->shutdown(intf->send_info);
3603
3604        cleanup_smi_msgs(intf);
3605
3606        ipmi_bmc_unregister(intf);
3607
3608        cleanup_srcu_struct(&intf->users_srcu);
3609        kref_put(&intf->refcount, intf_free);
3610}
3611EXPORT_SYMBOL(ipmi_unregister_smi);
3612
3613static int handle_ipmb_get_msg_rsp(struct ipmi_smi *intf,
3614                                   struct ipmi_smi_msg *msg)
3615{
3616        struct ipmi_ipmb_addr ipmb_addr;
3617        struct ipmi_recv_msg  *recv_msg;
3618
3619        /*
3620         * This is 11, not 10, because the response must contain a
3621         * completion code.
3622         */
3623        if (msg->rsp_size < 11) {
3624                /* Message not big enough, just ignore it. */
3625                ipmi_inc_stat(intf, invalid_ipmb_responses);
3626                return 0;
3627        }
3628
3629        if (msg->rsp[2] != 0) {
3630                /* An error getting the response, just ignore it. */
3631                return 0;
3632        }
3633
3634        ipmb_addr.addr_type = IPMI_IPMB_ADDR_TYPE;
3635        ipmb_addr.slave_addr = msg->rsp[6];
3636        ipmb_addr.channel = msg->rsp[3] & 0x0f;
3637        ipmb_addr.lun = msg->rsp[7] & 3;
3638
3639        /*
3640         * It's a response from a remote entity.  Look up the sequence
3641         * number and handle the response.
3642         */
3643        if (intf_find_seq(intf,
3644                          msg->rsp[7] >> 2,
3645                          msg->rsp[3] & 0x0f,
3646                          msg->rsp[8],
3647                          (msg->rsp[4] >> 2) & (~1),
3648                          (struct ipmi_addr *) &ipmb_addr,
3649                          &recv_msg)) {
3650                /*
3651                 * We were unable to find the sequence number,
3652                 * so just nuke the message.
3653                 */
3654                ipmi_inc_stat(intf, unhandled_ipmb_responses);
3655                return 0;
3656        }
3657
3658        memcpy(recv_msg->msg_data, &msg->rsp[9], msg->rsp_size - 9);
3659        /*
3660         * The other fields matched, so no need to set them, except
3661         * for netfn, which needs to be the response that was
3662         * returned, not the request value.
3663         */
3664        recv_msg->msg.netfn = msg->rsp[4] >> 2;
3665        recv_msg->msg.data = recv_msg->msg_data;
3666        recv_msg->msg.data_len = msg->rsp_size - 10;
3667        recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
3668        if (deliver_response(intf, recv_msg))
3669                ipmi_inc_stat(intf, unhandled_ipmb_responses);
3670        else
3671                ipmi_inc_stat(intf, handled_ipmb_responses);
3672
3673        return 0;
3674}
3675
3676static int handle_ipmb_get_msg_cmd(struct ipmi_smi *intf,
3677                                   struct ipmi_smi_msg *msg)
3678{
3679        struct cmd_rcvr          *rcvr;
3680        int                      rv = 0;
3681        unsigned char            netfn;
3682        unsigned char            cmd;
3683        unsigned char            chan;
3684        struct ipmi_user         *user = NULL;
3685        struct ipmi_ipmb_addr    *ipmb_addr;
3686        struct ipmi_recv_msg     *recv_msg;
3687
3688        if (msg->rsp_size < 10) {
3689                /* Message not big enough, just ignore it. */
3690                ipmi_inc_stat(intf, invalid_commands);
3691                return 0;
3692        }
3693
3694        if (msg->rsp[2] != 0) {
3695                /* An error getting the response, just ignore it. */
3696                return 0;
3697        }
3698
3699        netfn = msg->rsp[4] >> 2;
3700        cmd = msg->rsp[8];
3701        chan = msg->rsp[3] & 0xf;
3702
3703        rcu_read_lock();
3704        rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
3705        if (rcvr) {
3706                user = rcvr->user;
3707                kref_get(&user->refcount);
3708        } else
3709                user = NULL;
3710        rcu_read_unlock();
3711
3712        if (user == NULL) {
3713                /* We didn't find a user, deliver an error response. */
3714                ipmi_inc_stat(intf, unhandled_commands);
3715
3716                msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
3717                msg->data[1] = IPMI_SEND_MSG_CMD;
3718                msg->data[2] = msg->rsp[3];
3719                msg->data[3] = msg->rsp[6];
3720                msg->data[4] = ((netfn + 1) << 2) | (msg->rsp[7] & 0x3);
3721                msg->data[5] = ipmb_checksum(&msg->data[3], 2);
3722                msg->data[6] = intf->addrinfo[msg->rsp[3] & 0xf].address;
3723                /* rqseq/lun */
3724                msg->data[7] = (msg->rsp[7] & 0xfc) | (msg->rsp[4] & 0x3);
3725                msg->data[8] = msg->rsp[8]; /* cmd */
3726                msg->data[9] = IPMI_INVALID_CMD_COMPLETION_CODE;
3727                msg->data[10] = ipmb_checksum(&msg->data[6], 4);
3728                msg->data_size = 11;
3729
3730                ipmi_debug_msg("Invalid command:", msg->data, msg->data_size);
3731
3732                rcu_read_lock();
3733                if (!intf->in_shutdown) {
3734                        smi_send(intf, intf->handlers, msg, 0);
3735                        /*
3736                         * We used the message, so return the value
3737                         * that causes it to not be freed or
3738                         * queued.
3739                         */
3740                        rv = -1;
3741                }
3742                rcu_read_unlock();
3743        } else {
3744                recv_msg = ipmi_alloc_recv_msg();
3745                if (!recv_msg) {
3746                        /*
3747                         * We couldn't allocate memory for the
3748                         * message, so requeue it for handling
3749                         * later.
3750                         */
3751                        rv = 1;
3752                        kref_put(&user->refcount, free_user);
3753                } else {
3754                        /* Extract the source address from the data. */
3755                        ipmb_addr = (struct ipmi_ipmb_addr *) &recv_msg->addr;
3756                        ipmb_addr->addr_type = IPMI_IPMB_ADDR_TYPE;
3757                        ipmb_addr->slave_addr = msg->rsp[6];
3758                        ipmb_addr->lun = msg->rsp[7] & 3;
3759                        ipmb_addr->channel = msg->rsp[3] & 0xf;
3760
3761                        /*
3762                         * Extract the rest of the message information
3763                         * from the IPMB header.
3764                         */
3765                        recv_msg->user = user;
3766                        recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
3767                        recv_msg->msgid = msg->rsp[7] >> 2;
3768                        recv_msg->msg.netfn = msg->rsp[4] >> 2;
3769                        recv_msg->msg.cmd = msg->rsp[8];
3770                        recv_msg->msg.data = recv_msg->msg_data;
3771
3772                        /*
3773                         * We chop off 10, not 9 bytes because the checksum
3774                         * at the end also needs to be removed.
3775                         */
3776                        recv_msg->msg.data_len = msg->rsp_size - 10;
3777                        memcpy(recv_msg->msg_data, &msg->rsp[9],
3778                               msg->rsp_size - 10);
3779                        if (deliver_response(intf, recv_msg))
3780                                ipmi_inc_stat(intf, unhandled_commands);
3781                        else
3782                                ipmi_inc_stat(intf, handled_commands);
3783                }
3784        }
3785
3786        return rv;
3787}
3788
3789static int handle_lan_get_msg_rsp(struct ipmi_smi *intf,
3790                                  struct ipmi_smi_msg *msg)
3791{
3792        struct ipmi_lan_addr  lan_addr;
3793        struct ipmi_recv_msg  *recv_msg;
3794
3795
3796        /*
3797         * This is 13, not 12, because the response must contain a
3798         * completion code.
3799         */
3800        if (msg->rsp_size < 13) {
3801                /* Message not big enough, just ignore it. */
3802                ipmi_inc_stat(intf, invalid_lan_responses);
3803                return 0;
3804        }
3805
3806        if (msg->rsp[2] != 0) {
3807                /* An error getting the response, just ignore it. */
3808                return 0;
3809        }
3810
3811        lan_addr.addr_type = IPMI_LAN_ADDR_TYPE;
3812        lan_addr.session_handle = msg->rsp[4];
3813        lan_addr.remote_SWID = msg->rsp[8];
3814        lan_addr.local_SWID = msg->rsp[5];
3815        lan_addr.channel = msg->rsp[3] & 0x0f;
3816        lan_addr.privilege = msg->rsp[3] >> 4;
3817        lan_addr.lun = msg->rsp[9] & 3;
3818
3819        /*
3820         * It's a response from a remote entity.  Look up the sequence
3821         * number and handle the response.
3822         */
3823        if (intf_find_seq(intf,
3824                          msg->rsp[9] >> 2,
3825                          msg->rsp[3] & 0x0f,
3826                          msg->rsp[10],
3827                          (msg->rsp[6] >> 2) & (~1),
3828                          (struct ipmi_addr *) &lan_addr,
3829                          &recv_msg)) {
3830                /*
3831                 * We were unable to find the sequence number,
3832                 * so just nuke the message.
3833                 */
3834                ipmi_inc_stat(intf, unhandled_lan_responses);
3835                return 0;
3836        }
3837
3838        memcpy(recv_msg->msg_data, &msg->rsp[11], msg->rsp_size - 11);
3839        /*
3840         * The other fields matched, so no need to set them, except
3841         * for netfn, which needs to be the response that was
3842         * returned, not the request value.
3843         */
3844        recv_msg->msg.netfn = msg->rsp[6] >> 2;
3845        recv_msg->msg.data = recv_msg->msg_data;
3846        recv_msg->msg.data_len = msg->rsp_size - 12;
3847        recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
3848        if (deliver_response(intf, recv_msg))
3849                ipmi_inc_stat(intf, unhandled_lan_responses);
3850        else
3851                ipmi_inc_stat(intf, handled_lan_responses);
3852
3853        return 0;
3854}
3855
3856static int handle_lan_get_msg_cmd(struct ipmi_smi *intf,
3857                                  struct ipmi_smi_msg *msg)
3858{
3859        struct cmd_rcvr          *rcvr;
3860        int                      rv = 0;
3861        unsigned char            netfn;
3862        unsigned char            cmd;
3863        unsigned char            chan;
3864        struct ipmi_user         *user = NULL;
3865        struct ipmi_lan_addr     *lan_addr;
3866        struct ipmi_recv_msg     *recv_msg;
3867
3868        if (msg->rsp_size < 12) {
3869                /* Message not big enough, just ignore it. */
3870                ipmi_inc_stat(intf, invalid_commands);
3871                return 0;
3872        }
3873
3874        if (msg->rsp[2] != 0) {
3875                /* An error getting the response, just ignore it. */
3876                return 0;
3877        }
3878
3879        netfn = msg->rsp[6] >> 2;
3880        cmd = msg->rsp[10];
3881        chan = msg->rsp[3] & 0xf;
3882
3883        rcu_read_lock();
3884        rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
3885        if (rcvr) {
3886                user = rcvr->user;
3887                kref_get(&user->refcount);
3888        } else
3889                user = NULL;
3890        rcu_read_unlock();
3891
3892        if (user == NULL) {
3893                /* We didn't find a user, just give up. */
3894                ipmi_inc_stat(intf, unhandled_commands);
3895
3896                /*
3897                 * Don't do anything with these messages, just allow
3898                 * them to be freed.
3899                 */
3900                rv = 0;
3901        } else {
3902                recv_msg = ipmi_alloc_recv_msg();
3903                if (!recv_msg) {
3904                        /*
3905                         * We couldn't allocate memory for the
3906                         * message, so requeue it for handling later.
3907                         */
3908                        rv = 1;
3909                        kref_put(&user->refcount, free_user);
3910                } else {
3911                        /* Extract the source address from the data. */
3912                        lan_addr = (struct ipmi_lan_addr *) &recv_msg->addr;
3913                        lan_addr->addr_type = IPMI_LAN_ADDR_TYPE;
3914                        lan_addr->session_handle = msg->rsp[4];
3915                        lan_addr->remote_SWID = msg->rsp[8];
3916                        lan_addr->local_SWID = msg->rsp[5];
3917                        lan_addr->lun = msg->rsp[9] & 3;
3918                        lan_addr->channel = msg->rsp[3] & 0xf;
3919                        lan_addr->privilege = msg->rsp[3] >> 4;
3920
3921                        /*
3922                         * Extract the rest of the message information
3923                         * from the IPMB header.
3924                         */
3925                        recv_msg->user = user;
3926                        recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
3927                        recv_msg->msgid = msg->rsp[9] >> 2;
3928                        recv_msg->msg.netfn = msg->rsp[6] >> 2;
3929                        recv_msg->msg.cmd = msg->rsp[10];
3930                        recv_msg->msg.data = recv_msg->msg_data;
3931
3932                        /*
3933                         * We chop off 12, not 11 bytes because the checksum
3934                         * at the end also needs to be removed.
3935                         */
3936                        recv_msg->msg.data_len = msg->rsp_size - 12;
3937                        memcpy(recv_msg->msg_data, &msg->rsp[11],
3938                               msg->rsp_size - 12);
3939                        if (deliver_response(intf, recv_msg))
3940                                ipmi_inc_stat(intf, unhandled_commands);
3941                        else
3942                                ipmi_inc_stat(intf, handled_commands);
3943                }
3944        }
3945
3946        return rv;
3947}
3948
3949/*
3950 * This routine will handle "Get Message" command responses with
3951 * channels that use an OEM Medium. The message format belongs to
3952 * the OEM.  See IPMI 2.0 specification, Chapter 6 and
3953 * Chapter 22, sections 22.6 and 22.24 for more details.
3954 */
3955static int handle_oem_get_msg_cmd(struct ipmi_smi *intf,
3956                                  struct ipmi_smi_msg *msg)
3957{
3958        struct cmd_rcvr       *rcvr;
3959        int                   rv = 0;
3960        unsigned char         netfn;
3961        unsigned char         cmd;
3962        unsigned char         chan;
3963        struct ipmi_user *user = NULL;
3964        struct ipmi_system_interface_addr *smi_addr;
3965        struct ipmi_recv_msg  *recv_msg;
3966
3967        /*
3968         * We expect the OEM SW to perform error checking
3969         * so we just do some basic sanity checks
3970         */
3971        if (msg->rsp_size < 4) {
3972                /* Message not big enough, just ignore it. */
3973                ipmi_inc_stat(intf, invalid_commands);
3974                return 0;
3975        }
3976
3977        if (msg->rsp[2] != 0) {
3978                /* An error getting the response, just ignore it. */
3979                return 0;
3980        }
3981
3982        /*
3983         * This is an OEM Message so the OEM needs to know how
3984         * handle the message. We do no interpretation.
3985         */
3986        netfn = msg->rsp[0] >> 2;
3987        cmd = msg->rsp[1];
3988        chan = msg->rsp[3] & 0xf;
3989
3990        rcu_read_lock();
3991        rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
3992        if (rcvr) {
3993                user = rcvr->user;
3994                kref_get(&user->refcount);
3995        } else
3996                user = NULL;
3997        rcu_read_unlock();
3998
3999        if (user == NULL) {
4000                /* We didn't find a user, just give up. */
4001                ipmi_inc_stat(intf, unhandled_commands);
4002
4003                /*
4004                 * Don't do anything with these messages, just allow
4005                 * them to be freed.
4006                 */
4007
4008                rv = 0;
4009        } else {
4010                recv_msg = ipmi_alloc_recv_msg();
4011                if (!recv_msg) {
4012                        /*
4013                         * We couldn't allocate memory for the
4014                         * message, so requeue it for handling
4015                         * later.
4016                         */
4017                        rv = 1;
4018                        kref_put(&user->refcount, free_user);
4019                } else {
4020                        /*
4021                         * OEM Messages are expected to be delivered via
4022                         * the system interface to SMS software.  We might
4023                         * need to visit this again depending on OEM
4024                         * requirements
4025                         */
4026                        smi_addr = ((struct ipmi_system_interface_addr *)
4027                                    &recv_msg->addr);
4028                        smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
4029                        smi_addr->channel = IPMI_BMC_CHANNEL;
4030                        smi_addr->lun = msg->rsp[0] & 3;
4031
4032                        recv_msg->user = user;
4033                        recv_msg->user_msg_data = NULL;
4034                        recv_msg->recv_type = IPMI_OEM_RECV_TYPE;
4035                        recv_msg->msg.netfn = msg->rsp[0] >> 2;
4036                        recv_msg->msg.cmd = msg->rsp[1];
4037                        recv_msg->msg.data = recv_msg->msg_data;
4038
4039                        /*
4040                         * The message starts at byte 4 which follows the
4041                         * the Channel Byte in the "GET MESSAGE" command
4042                         */
4043                        recv_msg->msg.data_len = msg->rsp_size - 4;
4044                        memcpy(recv_msg->msg_data, &msg->rsp[4],
4045                               msg->rsp_size - 4);
4046                        if (deliver_response(intf, recv_msg))
4047                                ipmi_inc_stat(intf, unhandled_commands);
4048                        else
4049                                ipmi_inc_stat(intf, handled_commands);
4050                }
4051        }
4052
4053        return rv;
4054}
4055
4056static void copy_event_into_recv_msg(struct ipmi_recv_msg *recv_msg,
4057                                     struct ipmi_smi_msg  *msg)
4058{
4059        struct ipmi_system_interface_addr *smi_addr;
4060
4061        recv_msg->msgid = 0;
4062        smi_addr = (struct ipmi_system_interface_addr *) &recv_msg->addr;
4063        smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
4064        smi_addr->channel = IPMI_BMC_CHANNEL;
4065        smi_addr->lun = msg->rsp[0] & 3;
4066        recv_msg->recv_type = IPMI_ASYNC_EVENT_RECV_TYPE;
4067        recv_msg->msg.netfn = msg->rsp[0] >> 2;
4068        recv_msg->msg.cmd = msg->rsp[1];
4069        memcpy(recv_msg->msg_data, &msg->rsp[3], msg->rsp_size - 3);
4070        recv_msg->msg.data = recv_msg->msg_data;
4071        recv_msg->msg.data_len = msg->rsp_size - 3;
4072}
4073
4074static int handle_read_event_rsp(struct ipmi_smi *intf,
4075                                 struct ipmi_smi_msg *msg)
4076{
4077        struct ipmi_recv_msg *recv_msg, *recv_msg2;
4078        struct list_head     msgs;
4079        struct ipmi_user     *user;
4080        int rv = 0, deliver_count = 0, index;
4081        unsigned long        flags;
4082
4083        if (msg->rsp_size < 19) {
4084                /* Message is too small to be an IPMB event. */
4085                ipmi_inc_stat(intf, invalid_events);
4086                return 0;
4087        }
4088
4089        if (msg->rsp[2] != 0) {
4090                /* An error getting the event, just ignore it. */
4091                return 0;
4092        }
4093
4094        INIT_LIST_HEAD(&msgs);
4095
4096        spin_lock_irqsave(&intf->events_lock, flags);
4097
4098        ipmi_inc_stat(intf, events);
4099
4100        /*
4101         * Allocate and fill in one message for every user that is
4102         * getting events.
4103         */
4104        index = srcu_read_lock(&intf->users_srcu);
4105        list_for_each_entry_rcu(user, &intf->users, link) {
4106                if (!user->gets_events)
4107                        continue;
4108
4109                recv_msg = ipmi_alloc_recv_msg();
4110                if (!recv_msg) {
4111                        rcu_read_unlock();
4112                        list_for_each_entry_safe(recv_msg, recv_msg2, &msgs,
4113                                                 link) {
4114                                list_del(&recv_msg->link);
4115                                ipmi_free_recv_msg(recv_msg);
4116                        }
4117                        /*
4118                         * We couldn't allocate memory for the
4119                         * message, so requeue it for handling
4120                         * later.
4121                         */
4122                        rv = 1;
4123                        goto out;
4124                }
4125
4126                deliver_count++;
4127
4128                copy_event_into_recv_msg(recv_msg, msg);
4129                recv_msg->user = user;
4130                kref_get(&user->refcount);
4131                list_add_tail(&recv_msg->link, &msgs);
4132        }
4133        srcu_read_unlock(&intf->users_srcu, index);
4134
4135        if (deliver_count) {
4136                /* Now deliver all the messages. */
4137                list_for_each_entry_safe(recv_msg, recv_msg2, &msgs, link) {
4138                        list_del(&recv_msg->link);
4139                        deliver_local_response(intf, recv_msg);
4140                }
4141        } else if (intf->waiting_events_count < MAX_EVENTS_IN_QUEUE) {
4142                /*
4143                 * No one to receive the message, put it in queue if there's
4144                 * not already too many things in the queue.
4145                 */
4146                recv_msg = ipmi_alloc_recv_msg();
4147                if (!recv_msg) {
4148                        /*
4149                         * We couldn't allocate memory for the
4150                         * message, so requeue it for handling
4151                         * later.
4152                         */
4153                        rv = 1;
4154                        goto out;
4155                }
4156
4157                copy_event_into_recv_msg(recv_msg, msg);
4158                list_add_tail(&recv_msg->link, &intf->waiting_events);
4159                intf->waiting_events_count++;
4160        } else if (!intf->event_msg_printed) {
4161                /*
4162                 * There's too many things in the queue, discard this
4163                 * message.
4164                 */
4165                dev_warn(intf->si_dev,
4166                         "Event queue full, discarding incoming events\n");
4167                intf->event_msg_printed = 1;
4168        }
4169
4170 out:
4171        spin_unlock_irqrestore(&intf->events_lock, flags);
4172
4173        return rv;
4174}
4175
4176static int handle_bmc_rsp(struct ipmi_smi *intf,
4177                          struct ipmi_smi_msg *msg)
4178{
4179        struct ipmi_recv_msg *recv_msg;
4180        struct ipmi_system_interface_addr *smi_addr;
4181
4182        recv_msg = (struct ipmi_recv_msg *) msg->user_data;
4183        if (recv_msg == NULL) {
4184                dev_warn(intf->si_dev,
4185                         "IPMI message received with no owner. This could be because of a malformed message, or because of a hardware error.  Contact your hardware vendor for assistance.\n");
4186                return 0;
4187        }
4188
4189        recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
4190        recv_msg->msgid = msg->msgid;
4191        smi_addr = ((struct ipmi_system_interface_addr *)
4192                    &recv_msg->addr);
4193        smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
4194        smi_addr->channel = IPMI_BMC_CHANNEL;
4195        smi_addr->lun = msg->rsp[0] & 3;
4196        recv_msg->msg.netfn = msg->rsp[0] >> 2;
4197        recv_msg->msg.cmd = msg->rsp[1];
4198        memcpy(recv_msg->msg_data, &msg->rsp[2], msg->rsp_size - 2);
4199        recv_msg->msg.data = recv_msg->msg_data;
4200        recv_msg->msg.data_len = msg->rsp_size - 2;
4201        deliver_local_response(intf, recv_msg);
4202
4203        return 0;
4204}
4205
4206/*
4207 * Handle a received message.  Return 1 if the message should be requeued,
4208 * 0 if the message should be freed, or -1 if the message should not
4209 * be freed or requeued.
4210 */
4211static int handle_one_recv_msg(struct ipmi_smi *intf,
4212                               struct ipmi_smi_msg *msg)
4213{
4214        int requeue;
4215        int chan;
4216
4217        ipmi_debug_msg("Recv:", msg->rsp, msg->rsp_size);
4218        if (msg->rsp_size < 2) {
4219                /* Message is too small to be correct. */
4220                dev_warn(intf->si_dev,
4221                         "BMC returned too small a message for netfn %x cmd %x, got %d bytes\n",
4222                         (msg->data[0] >> 2) | 1, msg->data[1], msg->rsp_size);
4223
4224                /* Generate an error response for the message. */
4225                msg->rsp[0] = msg->data[0] | (1 << 2);
4226                msg->rsp[1] = msg->data[1];
4227                msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
4228                msg->rsp_size = 3;
4229        } else if (((msg->rsp[0] >> 2) != ((msg->data[0] >> 2) | 1))
4230                   || (msg->rsp[1] != msg->data[1])) {
4231                /*
4232                 * The NetFN and Command in the response is not even
4233                 * marginally correct.
4234                 */
4235                dev_warn(intf->si_dev,
4236                         "BMC returned incorrect response, expected netfn %x cmd %x, got netfn %x cmd %x\n",
4237                         (msg->data[0] >> 2) | 1, msg->data[1],
4238                         msg->rsp[0] >> 2, msg->rsp[1]);
4239
4240                /* Generate an error response for the message. */
4241                msg->rsp[0] = msg->data[0] | (1 << 2);
4242                msg->rsp[1] = msg->data[1];
4243                msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
4244                msg->rsp_size = 3;
4245        }
4246
4247        if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
4248            && (msg->rsp[1] == IPMI_SEND_MSG_CMD)
4249            && (msg->user_data != NULL)) {
4250                /*
4251                 * It's a response to a response we sent.  For this we
4252                 * deliver a send message response to the user.
4253                 */
4254                struct ipmi_recv_msg *recv_msg = msg->user_data;
4255
4256                requeue = 0;
4257                if (msg->rsp_size < 2)
4258                        /* Message is too small to be correct. */
4259                        goto out;
4260
4261                chan = msg->data[2] & 0x0f;
4262                if (chan >= IPMI_MAX_CHANNELS)
4263                        /* Invalid channel number */
4264                        goto out;
4265
4266                if (!recv_msg)
4267                        goto out;
4268
4269                recv_msg->recv_type = IPMI_RESPONSE_RESPONSE_TYPE;
4270                recv_msg->msg.data = recv_msg->msg_data;
4271                recv_msg->msg.data_len = 1;
4272                recv_msg->msg_data[0] = msg->rsp[2];
4273                deliver_local_response(intf, recv_msg);
4274        } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
4275                   && (msg->rsp[1] == IPMI_GET_MSG_CMD)) {
4276                struct ipmi_channel   *chans;
4277
4278                /* It's from the receive queue. */
4279                chan = msg->rsp[3] & 0xf;
4280                if (chan >= IPMI_MAX_CHANNELS) {
4281                        /* Invalid channel number */
4282                        requeue = 0;
4283                        goto out;
4284                }
4285
4286                /*
4287                 * We need to make sure the channels have been initialized.
4288                 * The channel_handler routine will set the "curr_channel"
4289                 * equal to or greater than IPMI_MAX_CHANNELS when all the
4290                 * channels for this interface have been initialized.
4291                 */
4292                if (!intf->channels_ready) {
4293                        requeue = 0; /* Throw the message away */
4294                        goto out;
4295                }
4296
4297                chans = READ_ONCE(intf->channel_list)->c;
4298
4299                switch (chans[chan].medium) {
4300                case IPMI_CHANNEL_MEDIUM_IPMB:
4301                        if (msg->rsp[4] & 0x04) {
4302                                /*
4303                                 * It's a response, so find the
4304                                 * requesting message and send it up.
4305                                 */
4306                                requeue = handle_ipmb_get_msg_rsp(intf, msg);
4307                        } else {
4308                                /*
4309                                 * It's a command to the SMS from some other
4310                                 * entity.  Handle that.
4311                                 */
4312                                requeue = handle_ipmb_get_msg_cmd(intf, msg);
4313                        }
4314                        break;
4315
4316                case IPMI_CHANNEL_MEDIUM_8023LAN:
4317                case IPMI_CHANNEL_MEDIUM_ASYNC:
4318                        if (msg->rsp[6] & 0x04) {
4319                                /*
4320                                 * It's a response, so find the
4321                                 * requesting message and send it up.
4322                                 */
4323                                requeue = handle_lan_get_msg_rsp(intf, msg);
4324                        } else {
4325                                /*
4326                                 * It's a command to the SMS from some other
4327                                 * entity.  Handle that.
4328                                 */
4329                                requeue = handle_lan_get_msg_cmd(intf, msg);
4330                        }
4331                        break;
4332
4333                default:
4334                        /* Check for OEM Channels.  Clients had better
4335                           register for these commands. */
4336                        if ((chans[chan].medium >= IPMI_CHANNEL_MEDIUM_OEM_MIN)
4337                            && (chans[chan].medium
4338                                <= IPMI_CHANNEL_MEDIUM_OEM_MAX)) {
4339                                requeue = handle_oem_get_msg_cmd(intf, msg);
4340                        } else {
4341                                /*
4342                                 * We don't handle the channel type, so just
4343                                 * free the message.
4344                                 */
4345                                requeue = 0;
4346                        }
4347                }
4348
4349        } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
4350                   && (msg->rsp[1] == IPMI_READ_EVENT_MSG_BUFFER_CMD)) {
4351                /* It's an asynchronous event. */
4352                requeue = handle_read_event_rsp(intf, msg);
4353        } else {
4354                /* It's a response from the local BMC. */
4355                requeue = handle_bmc_rsp(intf, msg);
4356        }
4357
4358 out:
4359        return requeue;
4360}
4361
4362/*
4363 * If there are messages in the queue or pretimeouts, handle them.
4364 */
4365static void handle_new_recv_msgs(struct ipmi_smi *intf)
4366{
4367        struct ipmi_smi_msg  *smi_msg;
4368        unsigned long        flags = 0;
4369        int                  rv;
4370        int                  run_to_completion = intf->run_to_completion;
4371
4372        /* See if any waiting messages need to be processed. */
4373        if (!run_to_completion)
4374                spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags);
4375        while (!list_empty(&intf->waiting_rcv_msgs)) {
4376                smi_msg = list_entry(intf->waiting_rcv_msgs.next,
4377                                     struct ipmi_smi_msg, link);
4378                list_del(&smi_msg->link);
4379                if (!run_to_completion)
4380                        spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock,
4381                                               flags);
4382                rv = handle_one_recv_msg(intf, smi_msg);
4383                if (!run_to_completion)
4384                        spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags);
4385                if (rv > 0) {
4386                        /*
4387                         * To preserve message order, quit if we
4388                         * can't handle a message.  Add the message
4389                         * back at the head, this is safe because this
4390                         * tasklet is the only thing that pulls the
4391                         * messages.
4392                         */
4393                        list_add(&smi_msg->link, &intf->waiting_rcv_msgs);
4394                        break;
4395                } else {
4396                        if (rv == 0)
4397                                /* Message handled */
4398                                ipmi_free_smi_msg(smi_msg);
4399                        /* If rv < 0, fatal error, del but don't free. */
4400                }
4401        }
4402        if (!run_to_completion)
4403                spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock, flags);
4404
4405        /*
4406         * If the pretimout count is non-zero, decrement one from it and
4407         * deliver pretimeouts to all the users.
4408         */
4409        if (atomic_add_unless(&intf->watchdog_pretimeouts_to_deliver, -1, 0)) {
4410                struct ipmi_user *user;
4411                int index;
4412
4413                index = srcu_read_lock(&intf->users_srcu);
4414                list_for_each_entry_rcu(user, &intf->users, link) {
4415                        if (user->handler->ipmi_watchdog_pretimeout)
4416                                user->handler->ipmi_watchdog_pretimeout(
4417                                        user->handler_data);
4418                }
4419                srcu_read_unlock(&intf->users_srcu, index);
4420        }
4421}
4422
4423static void smi_recv_tasklet(unsigned long val)
4424{
4425        unsigned long flags = 0; /* keep us warning-free. */
4426        struct ipmi_smi *intf = (struct ipmi_smi *) val;
4427        int run_to_completion = intf->run_to_completion;
4428        struct ipmi_smi_msg *newmsg = NULL;
4429
4430        /*
4431         * Start the next message if available.
4432         *
4433         * Do this here, not in the actual receiver, because we may deadlock
4434         * because the lower layer is allowed to hold locks while calling
4435         * message delivery.
4436         */
4437
4438        rcu_read_lock();
4439
4440        if (!run_to_completion)
4441                spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
4442        if (intf->curr_msg == NULL && !intf->in_shutdown) {
4443                struct list_head *entry = NULL;
4444
4445                /* Pick the high priority queue first. */
4446                if (!list_empty(&intf->hp_xmit_msgs))
4447                        entry = intf->hp_xmit_msgs.next;
4448                else if (!list_empty(&intf->xmit_msgs))
4449                        entry = intf->xmit_msgs.next;
4450
4451                if (entry) {
4452                        list_del(entry);
4453                        newmsg = list_entry(entry, struct ipmi_smi_msg, link);
4454                        intf->curr_msg = newmsg;
4455                }
4456        }
4457
4458        if (!run_to_completion)
4459                spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
4460        if (newmsg)
4461                intf->handlers->sender(intf->send_info, newmsg);
4462
4463        rcu_read_unlock();
4464
4465        handle_new_recv_msgs(intf);
4466}
4467
4468/* Handle a new message from the lower layer. */
4469void ipmi_smi_msg_received(struct ipmi_smi *intf,
4470                           struct ipmi_smi_msg *msg)
4471{
4472        unsigned long flags = 0; /* keep us warning-free. */
4473        int run_to_completion = intf->run_to_completion;
4474
4475        if ((msg->data_size >= 2)
4476            && (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2))
4477            && (msg->data[1] == IPMI_SEND_MSG_CMD)
4478            && (msg->user_data == NULL)) {
4479
4480                if (intf->in_shutdown)
4481                        goto free_msg;
4482
4483                /*
4484                 * This is the local response to a command send, start
4485                 * the timer for these.  The user_data will not be
4486                 * NULL if this is a response send, and we will let
4487                 * response sends just go through.
4488                 */
4489
4490                /*
4491                 * Check for errors, if we get certain errors (ones
4492                 * that mean basically we can try again later), we
4493                 * ignore them and start the timer.  Otherwise we
4494                 * report the error immediately.
4495                 */
4496                if ((msg->rsp_size >= 3) && (msg->rsp[2] != 0)
4497                    && (msg->rsp[2] != IPMI_NODE_BUSY_ERR)
4498                    && (msg->rsp[2] != IPMI_LOST_ARBITRATION_ERR)
4499                    && (msg->rsp[2] != IPMI_BUS_ERR)
4500                    && (msg->rsp[2] != IPMI_NAK_ON_WRITE_ERR)) {
4501                        int ch = msg->rsp[3] & 0xf;
4502                        struct ipmi_channel *chans;
4503
4504                        /* Got an error sending the message, handle it. */
4505
4506                        chans = READ_ONCE(intf->channel_list)->c;
4507                        if ((chans[ch].medium == IPMI_CHANNEL_MEDIUM_8023LAN)
4508                            || (chans[ch].medium == IPMI_CHANNEL_MEDIUM_ASYNC))
4509                                ipmi_inc_stat(intf, sent_lan_command_errs);
4510                        else
4511                                ipmi_inc_stat(intf, sent_ipmb_command_errs);
4512                        intf_err_seq(intf, msg->msgid, msg->rsp[2]);
4513                } else
4514                        /* The message was sent, start the timer. */
4515                        intf_start_seq_timer(intf, msg->msgid);
4516
4517free_msg:
4518                ipmi_free_smi_msg(msg);
4519        } else {
4520                /*
4521                 * To preserve message order, we keep a queue and deliver from
4522                 * a tasklet.
4523                 */
4524                if (!run_to_completion)
4525                        spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags);
4526                list_add_tail(&msg->link, &intf->waiting_rcv_msgs);
4527                if (!run_to_completion)
4528                        spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock,
4529                                               flags);
4530        }
4531
4532        if (!run_to_completion)
4533                spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
4534        /*
4535         * We can get an asynchronous event or receive message in addition
4536         * to commands we send.
4537         */
4538        if (msg == intf->curr_msg)
4539                intf->curr_msg = NULL;
4540        if (!run_to_completion)
4541                spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
4542
4543        if (run_to_completion)
4544                smi_recv_tasklet((unsigned long) intf);
4545        else
4546                tasklet_schedule(&intf->recv_tasklet);
4547}
4548EXPORT_SYMBOL(ipmi_smi_msg_received);
4549
4550void ipmi_smi_watchdog_pretimeout(struct ipmi_smi *intf)
4551{
4552        if (intf->in_shutdown)
4553                return;
4554
4555        atomic_set(&intf->watchdog_pretimeouts_to_deliver, 1);
4556        tasklet_schedule(&intf->recv_tasklet);
4557}
4558EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout);
4559
4560static struct ipmi_smi_msg *
4561smi_from_recv_msg(struct ipmi_smi *intf, struct ipmi_recv_msg *recv_msg,
4562                  unsigned char seq, long seqid)
4563{
4564        struct ipmi_smi_msg *smi_msg = ipmi_alloc_smi_msg();
4565        if (!smi_msg)
4566                /*
4567                 * If we can't allocate the message, then just return, we
4568                 * get 4 retries, so this should be ok.
4569                 */
4570                return NULL;
4571
4572        memcpy(smi_msg->data, recv_msg->msg.data, recv_msg->msg.data_len);
4573        smi_msg->data_size = recv_msg->msg.data_len;
4574        smi_msg->msgid = STORE_SEQ_IN_MSGID(seq, seqid);
4575
4576        ipmi_debug_msg("Resend: ", smi_msg->data, smi_msg->data_size);
4577
4578        return smi_msg;
4579}
4580
4581static void check_msg_timeout(struct ipmi_smi *intf, struct seq_table *ent,
4582                              struct list_head *timeouts,
4583                              unsigned long timeout_period,
4584                              int slot, unsigned long *flags,
4585                              bool *need_timer)
4586{
4587        struct ipmi_recv_msg *msg;
4588
4589        if (intf->in_shutdown)
4590                return;
4591
4592        if (!ent->inuse)
4593                return;
4594
4595        if (timeout_period < ent->timeout) {
4596                ent->timeout -= timeout_period;
4597                *need_timer = true;
4598                return;
4599        }
4600
4601        if (ent->retries_left == 0) {
4602                /* The message has used all its retries. */
4603                ent->inuse = 0;
4604                smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES);
4605                msg = ent->recv_msg;
4606                list_add_tail(&msg->link, timeouts);
4607                if (ent->broadcast)
4608                        ipmi_inc_stat(intf, timed_out_ipmb_broadcasts);
4609                else if (is_lan_addr(&ent->recv_msg->addr))
4610                        ipmi_inc_stat(intf, timed_out_lan_commands);
4611                else
4612                        ipmi_inc_stat(intf, timed_out_ipmb_commands);
4613        } else {
4614                struct ipmi_smi_msg *smi_msg;
4615                /* More retries, send again. */
4616
4617                *need_timer = true;
4618
4619                /*
4620                 * Start with the max timer, set to normal timer after
4621                 * the message is sent.
4622                 */
4623                ent->timeout = MAX_MSG_TIMEOUT;
4624                ent->retries_left--;
4625                smi_msg = smi_from_recv_msg(intf, ent->recv_msg, slot,
4626                                            ent->seqid);
4627                if (!smi_msg) {
4628                        if (is_lan_addr(&ent->recv_msg->addr))
4629                                ipmi_inc_stat(intf,
4630                                              dropped_rexmit_lan_commands);
4631                        else
4632                                ipmi_inc_stat(intf,
4633                                              dropped_rexmit_ipmb_commands);
4634                        return;
4635                }
4636
4637                spin_unlock_irqrestore(&intf->seq_lock, *flags);
4638
4639                /*
4640                 * Send the new message.  We send with a zero
4641                 * priority.  It timed out, I doubt time is that
4642                 * critical now, and high priority messages are really
4643                 * only for messages to the local MC, which don't get
4644                 * resent.
4645                 */
4646                if (intf->handlers) {
4647                        if (is_lan_addr(&ent->recv_msg->addr))
4648                                ipmi_inc_stat(intf,
4649                                              retransmitted_lan_commands);
4650                        else
4651                                ipmi_inc_stat(intf,
4652                                              retransmitted_ipmb_commands);
4653
4654                        smi_send(intf, intf->handlers, smi_msg, 0);
4655                } else
4656                        ipmi_free_smi_msg(smi_msg);
4657
4658                spin_lock_irqsave(&intf->seq_lock, *flags);
4659        }
4660}
4661
4662static bool ipmi_timeout_handler(struct ipmi_smi *intf,
4663                                 unsigned long timeout_period)
4664{
4665        struct list_head     timeouts;
4666        struct ipmi_recv_msg *msg, *msg2;
4667        unsigned long        flags;
4668        int                  i;
4669        bool                 need_timer = false;
4670
4671        if (!intf->bmc_registered) {
4672                kref_get(&intf->refcount);
4673                if (!schedule_work(&intf->bmc_reg_work)) {
4674                        kref_put(&intf->refcount, intf_free);
4675                        need_timer = true;
4676                }
4677        }
4678
4679        /*
4680         * Go through the seq table and find any messages that
4681         * have timed out, putting them in the timeouts
4682         * list.
4683         */
4684        INIT_LIST_HEAD(&timeouts);
4685        spin_lock_irqsave(&intf->seq_lock, flags);
4686        if (intf->ipmb_maintenance_mode_timeout) {
4687                if (intf->ipmb_maintenance_mode_timeout <= timeout_period)
4688                        intf->ipmb_maintenance_mode_timeout = 0;
4689                else
4690                        intf->ipmb_maintenance_mode_timeout -= timeout_period;
4691        }
4692        for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++)
4693                check_msg_timeout(intf, &intf->seq_table[i],
4694                                  &timeouts, timeout_period, i,
4695                                  &flags, &need_timer);
4696        spin_unlock_irqrestore(&intf->seq_lock, flags);
4697
4698        list_for_each_entry_safe(msg, msg2, &timeouts, link)
4699                deliver_err_response(intf, msg, IPMI_TIMEOUT_COMPLETION_CODE);
4700
4701        /*
4702         * Maintenance mode handling.  Check the timeout
4703         * optimistically before we claim the lock.  It may
4704         * mean a timeout gets missed occasionally, but that
4705         * only means the timeout gets extended by one period
4706         * in that case.  No big deal, and it avoids the lock
4707         * most of the time.
4708         */
4709        if (intf->auto_maintenance_timeout > 0) {
4710                spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
4711                if (intf->auto_maintenance_timeout > 0) {
4712                        intf->auto_maintenance_timeout
4713                                -= timeout_period;
4714                        if (!intf->maintenance_mode
4715                            && (intf->auto_maintenance_timeout <= 0)) {
4716                                intf->maintenance_mode_enable = false;
4717                                maintenance_mode_update(intf);
4718                        }
4719                }
4720                spin_unlock_irqrestore(&intf->maintenance_mode_lock,
4721                                       flags);
4722        }
4723
4724        tasklet_schedule(&intf->recv_tasklet);
4725
4726        return need_timer;
4727}
4728
4729static void ipmi_request_event(struct ipmi_smi *intf)
4730{
4731        /* No event requests when in maintenance mode. */
4732        if (intf->maintenance_mode_enable)
4733                return;
4734
4735        if (!intf->in_shutdown)
4736                intf->handlers->request_events(intf->send_info);
4737}
4738
4739static struct timer_list ipmi_timer;
4740
4741static atomic_t stop_operation;
4742
4743static void ipmi_timeout(struct timer_list *unused)
4744{
4745        struct ipmi_smi *intf;
4746        bool need_timer = false;
4747        int index;
4748
4749        if (atomic_read(&stop_operation))
4750                return;
4751
4752        index = srcu_read_lock(&ipmi_interfaces_srcu);
4753        list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
4754                if (atomic_read(&intf->event_waiters)) {
4755                        intf->ticks_to_req_ev--;
4756                        if (intf->ticks_to_req_ev == 0) {
4757                                ipmi_request_event(intf);
4758                                intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
4759                        }
4760                        need_timer = true;
4761                }
4762
4763                need_timer |= ipmi_timeout_handler(intf, IPMI_TIMEOUT_TIME);
4764        }
4765        srcu_read_unlock(&ipmi_interfaces_srcu, index);
4766
4767        if (need_timer)
4768                mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
4769}
4770
4771static void need_waiter(struct ipmi_smi *intf)
4772{
4773        /* Racy, but worst case we start the timer twice. */
4774        if (!timer_pending(&ipmi_timer))
4775                mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
4776}
4777
4778static atomic_t smi_msg_inuse_count = ATOMIC_INIT(0);
4779static atomic_t recv_msg_inuse_count = ATOMIC_INIT(0);
4780
4781static void free_smi_msg(struct ipmi_smi_msg *msg)
4782{
4783        atomic_dec(&smi_msg_inuse_count);
4784        kfree(msg);
4785}
4786
4787struct ipmi_smi_msg *ipmi_alloc_smi_msg(void)
4788{
4789        struct ipmi_smi_msg *rv;
4790        rv = kmalloc(sizeof(struct ipmi_smi_msg), GFP_ATOMIC);
4791        if (rv) {
4792                rv->done = free_smi_msg;
4793                rv->user_data = NULL;
4794                atomic_inc(&smi_msg_inuse_count);
4795        }
4796        return rv;
4797}
4798EXPORT_SYMBOL(ipmi_alloc_smi_msg);
4799
4800static void free_recv_msg(struct ipmi_recv_msg *msg)
4801{
4802        atomic_dec(&recv_msg_inuse_count);
4803        kfree(msg);
4804}
4805
4806static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void)
4807{
4808        struct ipmi_recv_msg *rv;
4809
4810        rv = kmalloc(sizeof(struct ipmi_recv_msg), GFP_ATOMIC);
4811        if (rv) {
4812                rv->user = NULL;
4813                rv->done = free_recv_msg;
4814                atomic_inc(&recv_msg_inuse_count);
4815        }
4816        return rv;
4817}
4818
4819void ipmi_free_recv_msg(struct ipmi_recv_msg *msg)
4820{
4821        if (msg->user)
4822                kref_put(&msg->user->refcount, free_user);
4823        msg->done(msg);
4824}
4825EXPORT_SYMBOL(ipmi_free_recv_msg);
4826
4827static atomic_t panic_done_count = ATOMIC_INIT(0);
4828
4829static void dummy_smi_done_handler(struct ipmi_smi_msg *msg)
4830{
4831        atomic_dec(&panic_done_count);
4832}
4833
4834static void dummy_recv_done_handler(struct ipmi_recv_msg *msg)
4835{
4836        atomic_dec(&panic_done_count);
4837}
4838
4839/*
4840 * Inside a panic, send a message and wait for a response.
4841 */
4842static void ipmi_panic_request_and_wait(struct ipmi_smi *intf,
4843                                        struct ipmi_addr *addr,
4844                                        struct kernel_ipmi_msg *msg)
4845{
4846        struct ipmi_smi_msg  smi_msg;
4847        struct ipmi_recv_msg recv_msg;
4848        int rv;
4849
4850        smi_msg.done = dummy_smi_done_handler;
4851        recv_msg.done = dummy_recv_done_handler;
4852        atomic_add(2, &panic_done_count);
4853        rv = i_ipmi_request(NULL,
4854                            intf,
4855                            addr,
4856                            0,
4857                            msg,
4858                            intf,
4859                            &smi_msg,
4860                            &recv_msg,
4861                            0,
4862                            intf->addrinfo[0].address,
4863                            intf->addrinfo[0].lun,
4864                            0, 1); /* Don't retry, and don't wait. */
4865        if (rv)
4866                atomic_sub(2, &panic_done_count);
4867        else if (intf->handlers->flush_messages)
4868                intf->handlers->flush_messages(intf->send_info);
4869
4870        while (atomic_read(&panic_done_count) != 0)
4871                ipmi_poll(intf);
4872}
4873
4874static void event_receiver_fetcher(struct ipmi_smi *intf,
4875                                   struct ipmi_recv_msg *msg)
4876{
4877        if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
4878            && (msg->msg.netfn == IPMI_NETFN_SENSOR_EVENT_RESPONSE)
4879            && (msg->msg.cmd == IPMI_GET_EVENT_RECEIVER_CMD)
4880            && (msg->msg.data[0] == IPMI_CC_NO_ERROR)) {
4881                /* A get event receiver command, save it. */
4882                intf->event_receiver = msg->msg.data[1];
4883                intf->event_receiver_lun = msg->msg.data[2] & 0x3;
4884        }
4885}
4886
4887static void device_id_fetcher(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
4888{
4889        if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
4890            && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE)
4891            && (msg->msg.cmd == IPMI_GET_DEVICE_ID_CMD)
4892            && (msg->msg.data[0] == IPMI_CC_NO_ERROR)) {
4893                /*
4894                 * A get device id command, save if we are an event
4895                 * receiver or generator.
4896                 */
4897                intf->local_sel_device = (msg->msg.data[6] >> 2) & 1;
4898                intf->local_event_generator = (msg->msg.data[6] >> 5) & 1;
4899        }
4900}
4901
4902static void send_panic_events(struct ipmi_smi *intf, char *str)
4903{
4904        struct kernel_ipmi_msg msg;
4905        unsigned char data[16];
4906        struct ipmi_system_interface_addr *si;
4907        struct ipmi_addr addr;
4908        char *p = str;
4909        struct ipmi_ipmb_addr *ipmb;
4910        int j;
4911
4912        if (ipmi_send_panic_event == IPMI_SEND_PANIC_EVENT_NONE)
4913                return;
4914
4915        si = (struct ipmi_system_interface_addr *) &addr;
4916        si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
4917        si->channel = IPMI_BMC_CHANNEL;
4918        si->lun = 0;
4919
4920        /* Fill in an event telling that we have failed. */
4921        msg.netfn = 0x04; /* Sensor or Event. */
4922        msg.cmd = 2; /* Platform event command. */
4923        msg.data = data;
4924        msg.data_len = 8;
4925        data[0] = 0x41; /* Kernel generator ID, IPMI table 5-4 */
4926        data[1] = 0x03; /* This is for IPMI 1.0. */
4927        data[2] = 0x20; /* OS Critical Stop, IPMI table 36-3 */
4928        data[4] = 0x6f; /* Sensor specific, IPMI table 36-1 */
4929        data[5] = 0xa1; /* Runtime stop OEM bytes 2 & 3. */
4930
4931        /*
4932         * Put a few breadcrumbs in.  Hopefully later we can add more things
4933         * to make the panic events more useful.
4934         */
4935        if (str) {
4936                data[3] = str[0];
4937                data[6] = str[1];
4938                data[7] = str[2];
4939        }
4940
4941        /* Send the event announcing the panic. */
4942        ipmi_panic_request_and_wait(intf, &addr, &msg);
4943
4944        /*
4945         * On every interface, dump a bunch of OEM event holding the
4946         * string.
4947         */
4948        if (ipmi_send_panic_event != IPMI_SEND_PANIC_EVENT_STRING || !str)
4949                return;
4950
4951        /*
4952         * intf_num is used as an marker to tell if the
4953         * interface is valid.  Thus we need a read barrier to
4954         * make sure data fetched before checking intf_num
4955         * won't be used.
4956         */
4957        smp_rmb();
4958
4959        /*
4960         * First job here is to figure out where to send the
4961         * OEM events.  There's no way in IPMI to send OEM
4962         * events using an event send command, so we have to
4963         * find the SEL to put them in and stick them in
4964         * there.
4965         */
4966
4967        /* Get capabilities from the get device id. */
4968        intf->local_sel_device = 0;
4969        intf->local_event_generator = 0;
4970        intf->event_receiver = 0;
4971
4972        /* Request the device info from the local MC. */
4973        msg.netfn = IPMI_NETFN_APP_REQUEST;
4974        msg.cmd = IPMI_GET_DEVICE_ID_CMD;
4975        msg.data = NULL;
4976        msg.data_len = 0;
4977        intf->null_user_handler = device_id_fetcher;
4978        ipmi_panic_request_and_wait(intf, &addr, &msg);
4979
4980        if (intf->local_event_generator) {
4981                /* Request the event receiver from the local MC. */
4982                msg.netfn = IPMI_NETFN_SENSOR_EVENT_REQUEST;
4983                msg.cmd = IPMI_GET_EVENT_RECEIVER_CMD;
4984                msg.data = NULL;
4985                msg.data_len = 0;
4986                intf->null_user_handler = event_receiver_fetcher;
4987                ipmi_panic_request_and_wait(intf, &addr, &msg);
4988        }
4989        intf->null_user_handler = NULL;
4990
4991        /*
4992         * Validate the event receiver.  The low bit must not
4993         * be 1 (it must be a valid IPMB address), it cannot
4994         * be zero, and it must not be my address.
4995         */
4996        if (((intf->event_receiver & 1) == 0)
4997            && (intf->event_receiver != 0)
4998            && (intf->event_receiver != intf->addrinfo[0].address)) {
4999                /*
5000                 * The event receiver is valid, send an IPMB
5001                 * message.
5002                 */
5003                ipmb = (struct ipmi_ipmb_addr *) &addr;
5004                ipmb->addr_type = IPMI_IPMB_ADDR_TYPE;
5005                ipmb->channel = 0; /* FIXME - is this right? */
5006                ipmb->lun = intf->event_receiver_lun;
5007                ipmb->slave_addr = intf->event_receiver;
5008        } else if (intf->local_sel_device) {
5009                /*
5010                 * The event receiver was not valid (or was
5011                 * me), but I am an SEL device, just dump it
5012                 * in my SEL.
5013                 */
5014                si = (struct ipmi_system_interface_addr *) &addr;
5015                si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
5016                si->channel = IPMI_BMC_CHANNEL;
5017                si->lun = 0;
5018        } else
5019                return; /* No where to send the event. */
5020
5021        msg.netfn = IPMI_NETFN_STORAGE_REQUEST; /* Storage. */
5022        msg.cmd = IPMI_ADD_SEL_ENTRY_CMD;
5023        msg.data = data;
5024        msg.data_len = 16;
5025
5026        j = 0;
5027        while (*p) {
5028                int size = strlen(p);
5029
5030                if (size > 11)
5031                        size = 11;
5032                data[0] = 0;
5033                data[1] = 0;
5034                data[2] = 0xf0; /* OEM event without timestamp. */
5035                data[3] = intf->addrinfo[0].address;
5036                data[4] = j++; /* sequence # */
5037                /*
5038                 * Always give 11 bytes, so strncpy will fill
5039                 * it with zeroes for me.
5040                 */
5041                strncpy(data+5, p, 11);
5042                p += size;
5043
5044                ipmi_panic_request_and_wait(intf, &addr, &msg);
5045        }
5046}
5047
5048static int has_panicked;
5049
5050static int panic_event(struct notifier_block *this,
5051                       unsigned long         event,
5052                       void                  *ptr)
5053{
5054        struct ipmi_smi *intf;
5055        struct ipmi_user *user;
5056
5057        if (has_panicked)
5058                return NOTIFY_DONE;
5059        has_panicked = 1;
5060
5061        /* For every registered interface, set it to run to completion. */
5062        list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
5063                if (!intf->handlers || intf->intf_num == -1)
5064                        /* Interface is not ready. */
5065                        continue;
5066
5067                if (!intf->handlers->poll)
5068                        continue;
5069
5070                /*
5071                 * If we were interrupted while locking xmit_msgs_lock or
5072                 * waiting_rcv_msgs_lock, the corresponding list may be
5073                 * corrupted.  In this case, drop items on the list for
5074                 * the safety.
5075                 */
5076                if (!spin_trylock(&intf->xmit_msgs_lock)) {
5077                        INIT_LIST_HEAD(&intf->xmit_msgs);
5078                        INIT_LIST_HEAD(&intf->hp_xmit_msgs);
5079                } else
5080                        spin_unlock(&intf->xmit_msgs_lock);
5081
5082                if (!spin_trylock(&intf->waiting_rcv_msgs_lock))
5083                        INIT_LIST_HEAD(&intf->waiting_rcv_msgs);
5084                else
5085                        spin_unlock(&intf->waiting_rcv_msgs_lock);
5086
5087                intf->run_to_completion = 1;
5088                if (intf->handlers->set_run_to_completion)
5089                        intf->handlers->set_run_to_completion(intf->send_info,
5090                                                              1);
5091
5092                list_for_each_entry_rcu(user, &intf->users, link) {
5093                        if (user->handler->ipmi_panic_handler)
5094                                user->handler->ipmi_panic_handler(
5095                                        user->handler_data);
5096                }
5097
5098                send_panic_events(intf, ptr);
5099        }
5100
5101        return NOTIFY_DONE;
5102}
5103
5104/* Must be called with ipmi_interfaces_mutex held. */
5105static int ipmi_register_driver(void)
5106{
5107        int rv;
5108
5109        if (drvregistered)
5110                return 0;
5111
5112        rv = driver_register(&ipmidriver.driver);
5113        if (rv)
5114                pr_err("Could not register IPMI driver\n");
5115        else
5116                drvregistered = true;
5117        return rv;
5118}
5119
5120static struct notifier_block panic_block = {
5121        .notifier_call  = panic_event,
5122        .next           = NULL,
5123        .priority       = 200   /* priority: INT_MAX >= x >= 0 */
5124};
5125
5126static int ipmi_init_msghandler(void)
5127{
5128        int rv;
5129
5130        mutex_lock(&ipmi_interfaces_mutex);
5131        rv = ipmi_register_driver();
5132        if (rv)
5133                goto out;
5134        if (initialized)
5135                goto out;
5136
5137        init_srcu_struct(&ipmi_interfaces_srcu);
5138
5139        timer_setup(&ipmi_timer, ipmi_timeout, 0);
5140        mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
5141
5142        atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
5143
5144        initialized = true;
5145
5146out:
5147        mutex_unlock(&ipmi_interfaces_mutex);
5148        return rv;
5149}
5150
5151static int __init ipmi_init_msghandler_mod(void)
5152{
5153        int rv;
5154
5155        pr_info("version " IPMI_DRIVER_VERSION "\n");
5156
5157        mutex_lock(&ipmi_interfaces_mutex);
5158        rv = ipmi_register_driver();
5159        mutex_unlock(&ipmi_interfaces_mutex);
5160
5161        return rv;
5162}
5163
5164static void __exit cleanup_ipmi(void)
5165{
5166        int count;
5167
5168        if (initialized) {
5169                atomic_notifier_chain_unregister(&panic_notifier_list,
5170                                                 &panic_block);
5171
5172                /*
5173                 * This can't be called if any interfaces exist, so no worry
5174                 * about shutting down the interfaces.
5175                 */
5176
5177                /*
5178                 * Tell the timer to stop, then wait for it to stop.  This
5179                 * avoids problems with race conditions removing the timer
5180                 * here.
5181                 */
5182                atomic_set(&stop_operation, 1);
5183                del_timer_sync(&ipmi_timer);
5184
5185                initialized = false;
5186
5187                /* Check for buffer leaks. */
5188                count = atomic_read(&smi_msg_inuse_count);
5189                if (count != 0)
5190                        pr_warn("SMI message count %d at exit\n", count);
5191                count = atomic_read(&recv_msg_inuse_count);
5192                if (count != 0)
5193                        pr_warn("recv message count %d at exit\n", count);
5194
5195                cleanup_srcu_struct(&ipmi_interfaces_srcu);
5196        }
5197        if (drvregistered)
5198                driver_unregister(&ipmidriver.driver);
5199}
5200module_exit(cleanup_ipmi);
5201
5202module_init(ipmi_init_msghandler_mod);
5203MODULE_LICENSE("GPL");
5204MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
5205MODULE_DESCRIPTION("Incoming and outgoing message routing for an IPMI"
5206                   " interface.");
5207MODULE_VERSION(IPMI_DRIVER_VERSION);
5208MODULE_SOFTDEP("post: ipmi_devintf");
5209