linux/drivers/char/ipmi/ipmi_msghandler.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0+
   2/*
   3 * ipmi_msghandler.c
   4 *
   5 * Incoming and outgoing message routing for an IPMI interface.
   6 *
   7 * Author: MontaVista Software, Inc.
   8 *         Corey Minyard <minyard@mvista.com>
   9 *         source@mvista.com
  10 *
  11 * Copyright 2002 MontaVista Software Inc.
  12 */
  13
  14#define pr_fmt(fmt) "%s" fmt, "IPMI message handler: "
  15#define dev_fmt pr_fmt
  16
  17#include <linux/module.h>
  18#include <linux/errno.h>
  19#include <linux/panic_notifier.h>
  20#include <linux/poll.h>
  21#include <linux/sched.h>
  22#include <linux/seq_file.h>
  23#include <linux/spinlock.h>
  24#include <linux/mutex.h>
  25#include <linux/slab.h>
  26#include <linux/ipmi.h>
  27#include <linux/ipmi_smi.h>
  28#include <linux/notifier.h>
  29#include <linux/init.h>
  30#include <linux/proc_fs.h>
  31#include <linux/rcupdate.h>
  32#include <linux/interrupt.h>
  33#include <linux/moduleparam.h>
  34#include <linux/workqueue.h>
  35#include <linux/uuid.h>
  36#include <linux/nospec.h>
  37#include <linux/vmalloc.h>
  38#include <linux/delay.h>
  39
  40#define IPMI_DRIVER_VERSION "39.2"
  41
  42static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void);
  43static int ipmi_init_msghandler(void);
  44static void smi_recv_tasklet(struct tasklet_struct *t);
  45static void handle_new_recv_msgs(struct ipmi_smi *intf);
  46static void need_waiter(struct ipmi_smi *intf);
  47static int handle_one_recv_msg(struct ipmi_smi *intf,
  48                               struct ipmi_smi_msg *msg);
  49
  50static bool initialized;
  51static bool drvregistered;
  52
  53/* Numbers in this enumerator should be mapped to ipmi_panic_event_str */
  54enum ipmi_panic_event_op {
  55        IPMI_SEND_PANIC_EVENT_NONE,
  56        IPMI_SEND_PANIC_EVENT,
  57        IPMI_SEND_PANIC_EVENT_STRING,
  58        IPMI_SEND_PANIC_EVENT_MAX
  59};
  60
  61/* Indices in this array should be mapped to enum ipmi_panic_event_op */
  62static const char *const ipmi_panic_event_str[] = { "none", "event", "string", NULL };
  63
  64#ifdef CONFIG_IPMI_PANIC_STRING
  65#define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT_STRING
  66#elif defined(CONFIG_IPMI_PANIC_EVENT)
  67#define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT
  68#else
  69#define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT_NONE
  70#endif
  71
  72static enum ipmi_panic_event_op ipmi_send_panic_event = IPMI_PANIC_DEFAULT;
  73
  74static int panic_op_write_handler(const char *val,
  75                                  const struct kernel_param *kp)
  76{
  77        char valcp[16];
  78        int e;
  79
  80        strscpy(valcp, val, sizeof(valcp));
  81        e = match_string(ipmi_panic_event_str, -1, strstrip(valcp));
  82        if (e < 0)
  83                return e;
  84
  85        ipmi_send_panic_event = e;
  86        return 0;
  87}
  88
  89static int panic_op_read_handler(char *buffer, const struct kernel_param *kp)
  90{
  91        const char *event_str;
  92
  93        if (ipmi_send_panic_event >= IPMI_SEND_PANIC_EVENT_MAX)
  94                event_str = "???";
  95        else
  96                event_str = ipmi_panic_event_str[ipmi_send_panic_event];
  97
  98        return sprintf(buffer, "%s\n", event_str);
  99}
 100
 101static const struct kernel_param_ops panic_op_ops = {
 102        .set = panic_op_write_handler,
 103        .get = panic_op_read_handler
 104};
 105module_param_cb(panic_op, &panic_op_ops, NULL, 0600);
 106MODULE_PARM_DESC(panic_op, "Sets if the IPMI driver will attempt to store panic information in the event log in the event of a panic.  Set to 'none' for no, 'event' for a single event, or 'string' for a generic event and the panic string in IPMI OEM events.");
 107
 108
 109#define MAX_EVENTS_IN_QUEUE     25
 110
 111/* Remain in auto-maintenance mode for this amount of time (in ms). */
 112static unsigned long maintenance_mode_timeout_ms = 30000;
 113module_param(maintenance_mode_timeout_ms, ulong, 0644);
 114MODULE_PARM_DESC(maintenance_mode_timeout_ms,
 115                 "The time (milliseconds) after the last maintenance message that the connection stays in maintenance mode.");
 116
 117/*
 118 * Don't let a message sit in a queue forever, always time it with at lest
 119 * the max message timer.  This is in milliseconds.
 120 */
 121#define MAX_MSG_TIMEOUT         60000
 122
 123/*
 124 * Timeout times below are in milliseconds, and are done off a 1
 125 * second timer.  So setting the value to 1000 would mean anything
 126 * between 0 and 1000ms.  So really the only reasonable minimum
 127 * setting it 2000ms, which is between 1 and 2 seconds.
 128 */
 129
 130/* The default timeout for message retries. */
 131static unsigned long default_retry_ms = 2000;
 132module_param(default_retry_ms, ulong, 0644);
 133MODULE_PARM_DESC(default_retry_ms,
 134                 "The time (milliseconds) between retry sends");
 135
 136/* The default timeout for maintenance mode message retries. */
 137static unsigned long default_maintenance_retry_ms = 3000;
 138module_param(default_maintenance_retry_ms, ulong, 0644);
 139MODULE_PARM_DESC(default_maintenance_retry_ms,
 140                 "The time (milliseconds) between retry sends in maintenance mode");
 141
 142/* The default maximum number of retries */
 143static unsigned int default_max_retries = 4;
 144module_param(default_max_retries, uint, 0644);
 145MODULE_PARM_DESC(default_max_retries,
 146                 "The time (milliseconds) between retry sends in maintenance mode");
 147
 148/* Call every ~1000 ms. */
 149#define IPMI_TIMEOUT_TIME       1000
 150
 151/* How many jiffies does it take to get to the timeout time. */
 152#define IPMI_TIMEOUT_JIFFIES    ((IPMI_TIMEOUT_TIME * HZ) / 1000)
 153
 154/*
 155 * Request events from the queue every second (this is the number of
 156 * IPMI_TIMEOUT_TIMES between event requests).  Hopefully, in the
 157 * future, IPMI will add a way to know immediately if an event is in
 158 * the queue and this silliness can go away.
 159 */
 160#define IPMI_REQUEST_EV_TIME    (1000 / (IPMI_TIMEOUT_TIME))
 161
 162/* How long should we cache dynamic device IDs? */
 163#define IPMI_DYN_DEV_ID_EXPIRY  (10 * HZ)
 164
 165/*
 166 * The main "user" data structure.
 167 */
 168struct ipmi_user {
 169        struct list_head link;
 170
 171        /*
 172         * Set to NULL when the user is destroyed, a pointer to myself
 173         * so srcu_dereference can be used on it.
 174         */
 175        struct ipmi_user *self;
 176        struct srcu_struct release_barrier;
 177
 178        struct kref refcount;
 179
 180        /* The upper layer that handles receive messages. */
 181        const struct ipmi_user_hndl *handler;
 182        void             *handler_data;
 183
 184        /* The interface this user is bound to. */
 185        struct ipmi_smi *intf;
 186
 187        /* Does this interface receive IPMI events? */
 188        bool gets_events;
 189
 190        /* Free must run in process context for RCU cleanup. */
 191        struct work_struct remove_work;
 192};
 193
 194static struct ipmi_user *acquire_ipmi_user(struct ipmi_user *user, int *index)
 195        __acquires(user->release_barrier)
 196{
 197        struct ipmi_user *ruser;
 198
 199        *index = srcu_read_lock(&user->release_barrier);
 200        ruser = srcu_dereference(user->self, &user->release_barrier);
 201        if (!ruser)
 202                srcu_read_unlock(&user->release_barrier, *index);
 203        return ruser;
 204}
 205
 206static void release_ipmi_user(struct ipmi_user *user, int index)
 207{
 208        srcu_read_unlock(&user->release_barrier, index);
 209}
 210
 211struct cmd_rcvr {
 212        struct list_head link;
 213
 214        struct ipmi_user *user;
 215        unsigned char netfn;
 216        unsigned char cmd;
 217        unsigned int  chans;
 218
 219        /*
 220         * This is used to form a linked lised during mass deletion.
 221         * Since this is in an RCU list, we cannot use the link above
 222         * or change any data until the RCU period completes.  So we
 223         * use this next variable during mass deletion so we can have
 224         * a list and don't have to wait and restart the search on
 225         * every individual deletion of a command.
 226         */
 227        struct cmd_rcvr *next;
 228};
 229
 230struct seq_table {
 231        unsigned int         inuse : 1;
 232        unsigned int         broadcast : 1;
 233
 234        unsigned long        timeout;
 235        unsigned long        orig_timeout;
 236        unsigned int         retries_left;
 237
 238        /*
 239         * To verify on an incoming send message response that this is
 240         * the message that the response is for, we keep a sequence id
 241         * and increment it every time we send a message.
 242         */
 243        long                 seqid;
 244
 245        /*
 246         * This is held so we can properly respond to the message on a
 247         * timeout, and it is used to hold the temporary data for
 248         * retransmission, too.
 249         */
 250        struct ipmi_recv_msg *recv_msg;
 251};
 252
 253/*
 254 * Store the information in a msgid (long) to allow us to find a
 255 * sequence table entry from the msgid.
 256 */
 257#define STORE_SEQ_IN_MSGID(seq, seqid) \
 258        ((((seq) & 0x3f) << 26) | ((seqid) & 0x3ffffff))
 259
 260#define GET_SEQ_FROM_MSGID(msgid, seq, seqid) \
 261        do {                                                            \
 262                seq = (((msgid) >> 26) & 0x3f);                         \
 263                seqid = ((msgid) & 0x3ffffff);                          \
 264        } while (0)
 265
 266#define NEXT_SEQID(seqid) (((seqid) + 1) & 0x3ffffff)
 267
 268#define IPMI_MAX_CHANNELS       16
 269struct ipmi_channel {
 270        unsigned char medium;
 271        unsigned char protocol;
 272};
 273
 274struct ipmi_channel_set {
 275        struct ipmi_channel c[IPMI_MAX_CHANNELS];
 276};
 277
 278struct ipmi_my_addrinfo {
 279        /*
 280         * My slave address.  This is initialized to IPMI_BMC_SLAVE_ADDR,
 281         * but may be changed by the user.
 282         */
 283        unsigned char address;
 284
 285        /*
 286         * My LUN.  This should generally stay the SMS LUN, but just in
 287         * case...
 288         */
 289        unsigned char lun;
 290};
 291
 292/*
 293 * Note that the product id, manufacturer id, guid, and device id are
 294 * immutable in this structure, so dyn_mutex is not required for
 295 * accessing those.  If those change on a BMC, a new BMC is allocated.
 296 */
 297struct bmc_device {
 298        struct platform_device pdev;
 299        struct list_head       intfs; /* Interfaces on this BMC. */
 300        struct ipmi_device_id  id;
 301        struct ipmi_device_id  fetch_id;
 302        int                    dyn_id_set;
 303        unsigned long          dyn_id_expiry;
 304        struct mutex           dyn_mutex; /* Protects id, intfs, & dyn* */
 305        guid_t                 guid;
 306        guid_t                 fetch_guid;
 307        int                    dyn_guid_set;
 308        struct kref            usecount;
 309        struct work_struct     remove_work;
 310        unsigned char          cc; /* completion code */
 311};
 312#define to_bmc_device(x) container_of((x), struct bmc_device, pdev.dev)
 313
 314static int bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc,
 315                             struct ipmi_device_id *id,
 316                             bool *guid_set, guid_t *guid);
 317
 318/*
 319 * Various statistics for IPMI, these index stats[] in the ipmi_smi
 320 * structure.
 321 */
 322enum ipmi_stat_indexes {
 323        /* Commands we got from the user that were invalid. */
 324        IPMI_STAT_sent_invalid_commands = 0,
 325
 326        /* Commands we sent to the MC. */
 327        IPMI_STAT_sent_local_commands,
 328
 329        /* Responses from the MC that were delivered to a user. */
 330        IPMI_STAT_handled_local_responses,
 331
 332        /* Responses from the MC that were not delivered to a user. */
 333        IPMI_STAT_unhandled_local_responses,
 334
 335        /* Commands we sent out to the IPMB bus. */
 336        IPMI_STAT_sent_ipmb_commands,
 337
 338        /* Commands sent on the IPMB that had errors on the SEND CMD */
 339        IPMI_STAT_sent_ipmb_command_errs,
 340
 341        /* Each retransmit increments this count. */
 342        IPMI_STAT_retransmitted_ipmb_commands,
 343
 344        /*
 345         * When a message times out (runs out of retransmits) this is
 346         * incremented.
 347         */
 348        IPMI_STAT_timed_out_ipmb_commands,
 349
 350        /*
 351         * This is like above, but for broadcasts.  Broadcasts are
 352         * *not* included in the above count (they are expected to
 353         * time out).
 354         */
 355        IPMI_STAT_timed_out_ipmb_broadcasts,
 356
 357        /* Responses I have sent to the IPMB bus. */
 358        IPMI_STAT_sent_ipmb_responses,
 359
 360        /* The response was delivered to the user. */
 361        IPMI_STAT_handled_ipmb_responses,
 362
 363        /* The response had invalid data in it. */
 364        IPMI_STAT_invalid_ipmb_responses,
 365
 366        /* The response didn't have anyone waiting for it. */
 367        IPMI_STAT_unhandled_ipmb_responses,
 368
 369        /* Commands we sent out to the IPMB bus. */
 370        IPMI_STAT_sent_lan_commands,
 371
 372        /* Commands sent on the IPMB that had errors on the SEND CMD */
 373        IPMI_STAT_sent_lan_command_errs,
 374
 375        /* Each retransmit increments this count. */
 376        IPMI_STAT_retransmitted_lan_commands,
 377
 378        /*
 379         * When a message times out (runs out of retransmits) this is
 380         * incremented.
 381         */
 382        IPMI_STAT_timed_out_lan_commands,
 383
 384        /* Responses I have sent to the IPMB bus. */
 385        IPMI_STAT_sent_lan_responses,
 386
 387        /* The response was delivered to the user. */
 388        IPMI_STAT_handled_lan_responses,
 389
 390        /* The response had invalid data in it. */
 391        IPMI_STAT_invalid_lan_responses,
 392
 393        /* The response didn't have anyone waiting for it. */
 394        IPMI_STAT_unhandled_lan_responses,
 395
 396        /* The command was delivered to the user. */
 397        IPMI_STAT_handled_commands,
 398
 399        /* The command had invalid data in it. */
 400        IPMI_STAT_invalid_commands,
 401
 402        /* The command didn't have anyone waiting for it. */
 403        IPMI_STAT_unhandled_commands,
 404
 405        /* Invalid data in an event. */
 406        IPMI_STAT_invalid_events,
 407
 408        /* Events that were received with the proper format. */
 409        IPMI_STAT_events,
 410
 411        /* Retransmissions on IPMB that failed. */
 412        IPMI_STAT_dropped_rexmit_ipmb_commands,
 413
 414        /* Retransmissions on LAN that failed. */
 415        IPMI_STAT_dropped_rexmit_lan_commands,
 416
 417        /* This *must* remain last, add new values above this. */
 418        IPMI_NUM_STATS
 419};
 420
 421
 422#define IPMI_IPMB_NUM_SEQ       64
 423struct ipmi_smi {
 424        struct module *owner;
 425
 426        /* What interface number are we? */
 427        int intf_num;
 428
 429        struct kref refcount;
 430
 431        /* Set when the interface is being unregistered. */
 432        bool in_shutdown;
 433
 434        /* Used for a list of interfaces. */
 435        struct list_head link;
 436
 437        /*
 438         * The list of upper layers that are using me.  seq_lock write
 439         * protects this.  Read protection is with srcu.
 440         */
 441        struct list_head users;
 442        struct srcu_struct users_srcu;
 443
 444        /* Used for wake ups at startup. */
 445        wait_queue_head_t waitq;
 446
 447        /*
 448         * Prevents the interface from being unregistered when the
 449         * interface is used by being looked up through the BMC
 450         * structure.
 451         */
 452        struct mutex bmc_reg_mutex;
 453
 454        struct bmc_device tmp_bmc;
 455        struct bmc_device *bmc;
 456        bool bmc_registered;
 457        struct list_head bmc_link;
 458        char *my_dev_name;
 459        bool in_bmc_register;  /* Handle recursive situations.  Yuck. */
 460        struct work_struct bmc_reg_work;
 461
 462        const struct ipmi_smi_handlers *handlers;
 463        void                     *send_info;
 464
 465        /* Driver-model device for the system interface. */
 466        struct device          *si_dev;
 467
 468        /*
 469         * A table of sequence numbers for this interface.  We use the
 470         * sequence numbers for IPMB messages that go out of the
 471         * interface to match them up with their responses.  A routine
 472         * is called periodically to time the items in this list.
 473         */
 474        spinlock_t       seq_lock;
 475        struct seq_table seq_table[IPMI_IPMB_NUM_SEQ];
 476        int curr_seq;
 477
 478        /*
 479         * Messages queued for delivery.  If delivery fails (out of memory
 480         * for instance), They will stay in here to be processed later in a
 481         * periodic timer interrupt.  The tasklet is for handling received
 482         * messages directly from the handler.
 483         */
 484        spinlock_t       waiting_rcv_msgs_lock;
 485        struct list_head waiting_rcv_msgs;
 486        atomic_t         watchdog_pretimeouts_to_deliver;
 487        struct tasklet_struct recv_tasklet;
 488
 489        spinlock_t             xmit_msgs_lock;
 490        struct list_head       xmit_msgs;
 491        struct ipmi_smi_msg    *curr_msg;
 492        struct list_head       hp_xmit_msgs;
 493
 494        /*
 495         * The list of command receivers that are registered for commands
 496         * on this interface.
 497         */
 498        struct mutex     cmd_rcvrs_mutex;
 499        struct list_head cmd_rcvrs;
 500
 501        /*
 502         * Events that were queues because no one was there to receive
 503         * them.
 504         */
 505        spinlock_t       events_lock; /* For dealing with event stuff. */
 506        struct list_head waiting_events;
 507        unsigned int     waiting_events_count; /* How many events in queue? */
 508        char             delivering_events;
 509        char             event_msg_printed;
 510
 511        /* How many users are waiting for events? */
 512        atomic_t         event_waiters;
 513        unsigned int     ticks_to_req_ev;
 514
 515        spinlock_t       watch_lock; /* For dealing with watch stuff below. */
 516
 517        /* How many users are waiting for commands? */
 518        unsigned int     command_waiters;
 519
 520        /* How many users are waiting for watchdogs? */
 521        unsigned int     watchdog_waiters;
 522
 523        /* How many users are waiting for message responses? */
 524        unsigned int     response_waiters;
 525
 526        /*
 527         * Tells what the lower layer has last been asked to watch for,
 528         * messages and/or watchdogs.  Protected by watch_lock.
 529         */
 530        unsigned int     last_watch_mask;
 531
 532        /*
 533         * The event receiver for my BMC, only really used at panic
 534         * shutdown as a place to store this.
 535         */
 536        unsigned char event_receiver;
 537        unsigned char event_receiver_lun;
 538        unsigned char local_sel_device;
 539        unsigned char local_event_generator;
 540
 541        /* For handling of maintenance mode. */
 542        int maintenance_mode;
 543        bool maintenance_mode_enable;
 544        int auto_maintenance_timeout;
 545        spinlock_t maintenance_mode_lock; /* Used in a timer... */
 546
 547        /*
 548         * If we are doing maintenance on something on IPMB, extend
 549         * the timeout time to avoid timeouts writing firmware and
 550         * such.
 551         */
 552        int ipmb_maintenance_mode_timeout;
 553
 554        /*
 555         * A cheap hack, if this is non-null and a message to an
 556         * interface comes in with a NULL user, call this routine with
 557         * it.  Note that the message will still be freed by the
 558         * caller.  This only works on the system interface.
 559         *
 560         * Protected by bmc_reg_mutex.
 561         */
 562        void (*null_user_handler)(struct ipmi_smi *intf,
 563                                  struct ipmi_recv_msg *msg);
 564
 565        /*
 566         * When we are scanning the channels for an SMI, this will
 567         * tell which channel we are scanning.
 568         */
 569        int curr_channel;
 570
 571        /* Channel information */
 572        struct ipmi_channel_set *channel_list;
 573        unsigned int curr_working_cset; /* First index into the following. */
 574        struct ipmi_channel_set wchannels[2];
 575        struct ipmi_my_addrinfo addrinfo[IPMI_MAX_CHANNELS];
 576        bool channels_ready;
 577
 578        atomic_t stats[IPMI_NUM_STATS];
 579
 580        /*
 581         * run_to_completion duplicate of smb_info, smi_info
 582         * and ipmi_serial_info structures. Used to decrease numbers of
 583         * parameters passed by "low" level IPMI code.
 584         */
 585        int run_to_completion;
 586};
 587#define to_si_intf_from_dev(device) container_of(device, struct ipmi_smi, dev)
 588
 589static void __get_guid(struct ipmi_smi *intf);
 590static void __ipmi_bmc_unregister(struct ipmi_smi *intf);
 591static int __ipmi_bmc_register(struct ipmi_smi *intf,
 592                               struct ipmi_device_id *id,
 593                               bool guid_set, guid_t *guid, int intf_num);
 594static int __scan_channels(struct ipmi_smi *intf, struct ipmi_device_id *id);
 595
 596
 597/**
 598 * The driver model view of the IPMI messaging driver.
 599 */
 600static struct platform_driver ipmidriver = {
 601        .driver = {
 602                .name = "ipmi",
 603                .bus = &platform_bus_type
 604        }
 605};
 606/*
 607 * This mutex keeps us from adding the same BMC twice.
 608 */
 609static DEFINE_MUTEX(ipmidriver_mutex);
 610
 611static LIST_HEAD(ipmi_interfaces);
 612static DEFINE_MUTEX(ipmi_interfaces_mutex);
 613#define ipmi_interfaces_mutex_held() \
 614        lockdep_is_held(&ipmi_interfaces_mutex)
 615static struct srcu_struct ipmi_interfaces_srcu;
 616
 617/*
 618 * List of watchers that want to know when smi's are added and deleted.
 619 */
 620static LIST_HEAD(smi_watchers);
 621static DEFINE_MUTEX(smi_watchers_mutex);
 622
 623#define ipmi_inc_stat(intf, stat) \
 624        atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
 625#define ipmi_get_stat(intf, stat) \
 626        ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
 627
 628static const char * const addr_src_to_str[] = {
 629        "invalid", "hotmod", "hardcoded", "SPMI", "ACPI", "SMBIOS", "PCI",
 630        "device-tree", "platform"
 631};
 632
 633const char *ipmi_addr_src_to_str(enum ipmi_addr_src src)
 634{
 635        if (src >= SI_LAST)
 636                src = 0; /* Invalid */
 637        return addr_src_to_str[src];
 638}
 639EXPORT_SYMBOL(ipmi_addr_src_to_str);
 640
 641static int is_lan_addr(struct ipmi_addr *addr)
 642{
 643        return addr->addr_type == IPMI_LAN_ADDR_TYPE;
 644}
 645
 646static int is_ipmb_addr(struct ipmi_addr *addr)
 647{
 648        return addr->addr_type == IPMI_IPMB_ADDR_TYPE;
 649}
 650
 651static int is_ipmb_bcast_addr(struct ipmi_addr *addr)
 652{
 653        return addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE;
 654}
 655
 656static void free_recv_msg_list(struct list_head *q)
 657{
 658        struct ipmi_recv_msg *msg, *msg2;
 659
 660        list_for_each_entry_safe(msg, msg2, q, link) {
 661                list_del(&msg->link);
 662                ipmi_free_recv_msg(msg);
 663        }
 664}
 665
 666static void free_smi_msg_list(struct list_head *q)
 667{
 668        struct ipmi_smi_msg *msg, *msg2;
 669
 670        list_for_each_entry_safe(msg, msg2, q, link) {
 671                list_del(&msg->link);
 672                ipmi_free_smi_msg(msg);
 673        }
 674}
 675
 676static void clean_up_interface_data(struct ipmi_smi *intf)
 677{
 678        int              i;
 679        struct cmd_rcvr  *rcvr, *rcvr2;
 680        struct list_head list;
 681
 682        tasklet_kill(&intf->recv_tasklet);
 683
 684        free_smi_msg_list(&intf->waiting_rcv_msgs);
 685        free_recv_msg_list(&intf->waiting_events);
 686
 687        /*
 688         * Wholesale remove all the entries from the list in the
 689         * interface and wait for RCU to know that none are in use.
 690         */
 691        mutex_lock(&intf->cmd_rcvrs_mutex);
 692        INIT_LIST_HEAD(&list);
 693        list_splice_init_rcu(&intf->cmd_rcvrs, &list, synchronize_rcu);
 694        mutex_unlock(&intf->cmd_rcvrs_mutex);
 695
 696        list_for_each_entry_safe(rcvr, rcvr2, &list, link)
 697                kfree(rcvr);
 698
 699        for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
 700                if ((intf->seq_table[i].inuse)
 701                                        && (intf->seq_table[i].recv_msg))
 702                        ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
 703        }
 704}
 705
 706static void intf_free(struct kref *ref)
 707{
 708        struct ipmi_smi *intf = container_of(ref, struct ipmi_smi, refcount);
 709
 710        clean_up_interface_data(intf);
 711        kfree(intf);
 712}
 713
 714struct watcher_entry {
 715        int              intf_num;
 716        struct ipmi_smi  *intf;
 717        struct list_head link;
 718};
 719
 720int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher)
 721{
 722        struct ipmi_smi *intf;
 723        int index, rv;
 724
 725        /*
 726         * Make sure the driver is actually initialized, this handles
 727         * problems with initialization order.
 728         */
 729        rv = ipmi_init_msghandler();
 730        if (rv)
 731                return rv;
 732
 733        mutex_lock(&smi_watchers_mutex);
 734
 735        list_add(&watcher->link, &smi_watchers);
 736
 737        index = srcu_read_lock(&ipmi_interfaces_srcu);
 738        list_for_each_entry_rcu(intf, &ipmi_interfaces, link,
 739                        lockdep_is_held(&smi_watchers_mutex)) {
 740                int intf_num = READ_ONCE(intf->intf_num);
 741
 742                if (intf_num == -1)
 743                        continue;
 744                watcher->new_smi(intf_num, intf->si_dev);
 745        }
 746        srcu_read_unlock(&ipmi_interfaces_srcu, index);
 747
 748        mutex_unlock(&smi_watchers_mutex);
 749
 750        return 0;
 751}
 752EXPORT_SYMBOL(ipmi_smi_watcher_register);
 753
 754int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher)
 755{
 756        mutex_lock(&smi_watchers_mutex);
 757        list_del(&watcher->link);
 758        mutex_unlock(&smi_watchers_mutex);
 759        return 0;
 760}
 761EXPORT_SYMBOL(ipmi_smi_watcher_unregister);
 762
 763/*
 764 * Must be called with smi_watchers_mutex held.
 765 */
 766static void
 767call_smi_watchers(int i, struct device *dev)
 768{
 769        struct ipmi_smi_watcher *w;
 770
 771        mutex_lock(&smi_watchers_mutex);
 772        list_for_each_entry(w, &smi_watchers, link) {
 773                if (try_module_get(w->owner)) {
 774                        w->new_smi(i, dev);
 775                        module_put(w->owner);
 776                }
 777        }
 778        mutex_unlock(&smi_watchers_mutex);
 779}
 780
 781static int
 782ipmi_addr_equal(struct ipmi_addr *addr1, struct ipmi_addr *addr2)
 783{
 784        if (addr1->addr_type != addr2->addr_type)
 785                return 0;
 786
 787        if (addr1->channel != addr2->channel)
 788                return 0;
 789
 790        if (addr1->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
 791                struct ipmi_system_interface_addr *smi_addr1
 792                    = (struct ipmi_system_interface_addr *) addr1;
 793                struct ipmi_system_interface_addr *smi_addr2
 794                    = (struct ipmi_system_interface_addr *) addr2;
 795                return (smi_addr1->lun == smi_addr2->lun);
 796        }
 797
 798        if (is_ipmb_addr(addr1) || is_ipmb_bcast_addr(addr1)) {
 799                struct ipmi_ipmb_addr *ipmb_addr1
 800                    = (struct ipmi_ipmb_addr *) addr1;
 801                struct ipmi_ipmb_addr *ipmb_addr2
 802                    = (struct ipmi_ipmb_addr *) addr2;
 803
 804                return ((ipmb_addr1->slave_addr == ipmb_addr2->slave_addr)
 805                        && (ipmb_addr1->lun == ipmb_addr2->lun));
 806        }
 807
 808        if (is_lan_addr(addr1)) {
 809                struct ipmi_lan_addr *lan_addr1
 810                        = (struct ipmi_lan_addr *) addr1;
 811                struct ipmi_lan_addr *lan_addr2
 812                    = (struct ipmi_lan_addr *) addr2;
 813
 814                return ((lan_addr1->remote_SWID == lan_addr2->remote_SWID)
 815                        && (lan_addr1->local_SWID == lan_addr2->local_SWID)
 816                        && (lan_addr1->session_handle
 817                            == lan_addr2->session_handle)
 818                        && (lan_addr1->lun == lan_addr2->lun));
 819        }
 820
 821        return 1;
 822}
 823
 824int ipmi_validate_addr(struct ipmi_addr *addr, int len)
 825{
 826        if (len < sizeof(struct ipmi_system_interface_addr))
 827                return -EINVAL;
 828
 829        if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
 830                if (addr->channel != IPMI_BMC_CHANNEL)
 831                        return -EINVAL;
 832                return 0;
 833        }
 834
 835        if ((addr->channel == IPMI_BMC_CHANNEL)
 836            || (addr->channel >= IPMI_MAX_CHANNELS)
 837            || (addr->channel < 0))
 838                return -EINVAL;
 839
 840        if (is_ipmb_addr(addr) || is_ipmb_bcast_addr(addr)) {
 841                if (len < sizeof(struct ipmi_ipmb_addr))
 842                        return -EINVAL;
 843                return 0;
 844        }
 845
 846        if (is_lan_addr(addr)) {
 847                if (len < sizeof(struct ipmi_lan_addr))
 848                        return -EINVAL;
 849                return 0;
 850        }
 851
 852        return -EINVAL;
 853}
 854EXPORT_SYMBOL(ipmi_validate_addr);
 855
 856unsigned int ipmi_addr_length(int addr_type)
 857{
 858        if (addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
 859                return sizeof(struct ipmi_system_interface_addr);
 860
 861        if ((addr_type == IPMI_IPMB_ADDR_TYPE)
 862                        || (addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
 863                return sizeof(struct ipmi_ipmb_addr);
 864
 865        if (addr_type == IPMI_LAN_ADDR_TYPE)
 866                return sizeof(struct ipmi_lan_addr);
 867
 868        return 0;
 869}
 870EXPORT_SYMBOL(ipmi_addr_length);
 871
 872static int deliver_response(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
 873{
 874        int rv = 0;
 875
 876        if (!msg->user) {
 877                /* Special handling for NULL users. */
 878                if (intf->null_user_handler) {
 879                        intf->null_user_handler(intf, msg);
 880                } else {
 881                        /* No handler, so give up. */
 882                        rv = -EINVAL;
 883                }
 884                ipmi_free_recv_msg(msg);
 885        } else if (oops_in_progress) {
 886                /*
 887                 * If we are running in the panic context, calling the
 888                 * receive handler doesn't much meaning and has a deadlock
 889                 * risk.  At this moment, simply skip it in that case.
 890                 */
 891                ipmi_free_recv_msg(msg);
 892        } else {
 893                int index;
 894                struct ipmi_user *user = acquire_ipmi_user(msg->user, &index);
 895
 896                if (user) {
 897                        user->handler->ipmi_recv_hndl(msg, user->handler_data);
 898                        release_ipmi_user(user, index);
 899                } else {
 900                        /* User went away, give up. */
 901                        ipmi_free_recv_msg(msg);
 902                        rv = -EINVAL;
 903                }
 904        }
 905
 906        return rv;
 907}
 908
 909static void deliver_local_response(struct ipmi_smi *intf,
 910                                   struct ipmi_recv_msg *msg)
 911{
 912        if (deliver_response(intf, msg))
 913                ipmi_inc_stat(intf, unhandled_local_responses);
 914        else
 915                ipmi_inc_stat(intf, handled_local_responses);
 916}
 917
 918static void deliver_err_response(struct ipmi_smi *intf,
 919                                 struct ipmi_recv_msg *msg, int err)
 920{
 921        msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
 922        msg->msg_data[0] = err;
 923        msg->msg.netfn |= 1; /* Convert to a response. */
 924        msg->msg.data_len = 1;
 925        msg->msg.data = msg->msg_data;
 926        deliver_local_response(intf, msg);
 927}
 928
 929static void smi_add_watch(struct ipmi_smi *intf, unsigned int flags)
 930{
 931        unsigned long iflags;
 932
 933        if (!intf->handlers->set_need_watch)
 934                return;
 935
 936        spin_lock_irqsave(&intf->watch_lock, iflags);
 937        if (flags & IPMI_WATCH_MASK_CHECK_MESSAGES)
 938                intf->response_waiters++;
 939
 940        if (flags & IPMI_WATCH_MASK_CHECK_WATCHDOG)
 941                intf->watchdog_waiters++;
 942
 943        if (flags & IPMI_WATCH_MASK_CHECK_COMMANDS)
 944                intf->command_waiters++;
 945
 946        if ((intf->last_watch_mask & flags) != flags) {
 947                intf->last_watch_mask |= flags;
 948                intf->handlers->set_need_watch(intf->send_info,
 949                                               intf->last_watch_mask);
 950        }
 951        spin_unlock_irqrestore(&intf->watch_lock, iflags);
 952}
 953
 954static void smi_remove_watch(struct ipmi_smi *intf, unsigned int flags)
 955{
 956        unsigned long iflags;
 957
 958        if (!intf->handlers->set_need_watch)
 959                return;
 960
 961        spin_lock_irqsave(&intf->watch_lock, iflags);
 962        if (flags & IPMI_WATCH_MASK_CHECK_MESSAGES)
 963                intf->response_waiters--;
 964
 965        if (flags & IPMI_WATCH_MASK_CHECK_WATCHDOG)
 966                intf->watchdog_waiters--;
 967
 968        if (flags & IPMI_WATCH_MASK_CHECK_COMMANDS)
 969                intf->command_waiters--;
 970
 971        flags = 0;
 972        if (intf->response_waiters)
 973                flags |= IPMI_WATCH_MASK_CHECK_MESSAGES;
 974        if (intf->watchdog_waiters)
 975                flags |= IPMI_WATCH_MASK_CHECK_WATCHDOG;
 976        if (intf->command_waiters)
 977                flags |= IPMI_WATCH_MASK_CHECK_COMMANDS;
 978
 979        if (intf->last_watch_mask != flags) {
 980                intf->last_watch_mask = flags;
 981                intf->handlers->set_need_watch(intf->send_info,
 982                                               intf->last_watch_mask);
 983        }
 984        spin_unlock_irqrestore(&intf->watch_lock, iflags);
 985}
 986
 987/*
 988 * Find the next sequence number not being used and add the given
 989 * message with the given timeout to the sequence table.  This must be
 990 * called with the interface's seq_lock held.
 991 */
 992static int intf_next_seq(struct ipmi_smi      *intf,
 993                         struct ipmi_recv_msg *recv_msg,
 994                         unsigned long        timeout,
 995                         int                  retries,
 996                         int                  broadcast,
 997                         unsigned char        *seq,
 998                         long                 *seqid)
 999{
1000        int          rv = 0;
1001        unsigned int i;
1002
1003        if (timeout == 0)
1004                timeout = default_retry_ms;
1005        if (retries < 0)
1006                retries = default_max_retries;
1007
1008        for (i = intf->curr_seq; (i+1)%IPMI_IPMB_NUM_SEQ != intf->curr_seq;
1009                                        i = (i+1)%IPMI_IPMB_NUM_SEQ) {
1010                if (!intf->seq_table[i].inuse)
1011                        break;
1012        }
1013
1014        if (!intf->seq_table[i].inuse) {
1015                intf->seq_table[i].recv_msg = recv_msg;
1016
1017                /*
1018                 * Start with the maximum timeout, when the send response
1019                 * comes in we will start the real timer.
1020                 */
1021                intf->seq_table[i].timeout = MAX_MSG_TIMEOUT;
1022                intf->seq_table[i].orig_timeout = timeout;
1023                intf->seq_table[i].retries_left = retries;
1024                intf->seq_table[i].broadcast = broadcast;
1025                intf->seq_table[i].inuse = 1;
1026                intf->seq_table[i].seqid = NEXT_SEQID(intf->seq_table[i].seqid);
1027                *seq = i;
1028                *seqid = intf->seq_table[i].seqid;
1029                intf->curr_seq = (i+1)%IPMI_IPMB_NUM_SEQ;
1030                smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES);
1031                need_waiter(intf);
1032        } else {
1033                rv = -EAGAIN;
1034        }
1035
1036        return rv;
1037}
1038
1039/*
1040 * Return the receive message for the given sequence number and
1041 * release the sequence number so it can be reused.  Some other data
1042 * is passed in to be sure the message matches up correctly (to help
1043 * guard against message coming in after their timeout and the
1044 * sequence number being reused).
1045 */
1046static int intf_find_seq(struct ipmi_smi      *intf,
1047                         unsigned char        seq,
1048                         short                channel,
1049                         unsigned char        cmd,
1050                         unsigned char        netfn,
1051                         struct ipmi_addr     *addr,
1052                         struct ipmi_recv_msg **recv_msg)
1053{
1054        int           rv = -ENODEV;
1055        unsigned long flags;
1056
1057        if (seq >= IPMI_IPMB_NUM_SEQ)
1058                return -EINVAL;
1059
1060        spin_lock_irqsave(&intf->seq_lock, flags);
1061        if (intf->seq_table[seq].inuse) {
1062                struct ipmi_recv_msg *msg = intf->seq_table[seq].recv_msg;
1063
1064                if ((msg->addr.channel == channel) && (msg->msg.cmd == cmd)
1065                                && (msg->msg.netfn == netfn)
1066                                && (ipmi_addr_equal(addr, &msg->addr))) {
1067                        *recv_msg = msg;
1068                        intf->seq_table[seq].inuse = 0;
1069                        smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES);
1070                        rv = 0;
1071                }
1072        }
1073        spin_unlock_irqrestore(&intf->seq_lock, flags);
1074
1075        return rv;
1076}
1077
1078
1079/* Start the timer for a specific sequence table entry. */
1080static int intf_start_seq_timer(struct ipmi_smi *intf,
1081                                long       msgid)
1082{
1083        int           rv = -ENODEV;
1084        unsigned long flags;
1085        unsigned char seq;
1086        unsigned long seqid;
1087
1088
1089        GET_SEQ_FROM_MSGID(msgid, seq, seqid);
1090
1091        spin_lock_irqsave(&intf->seq_lock, flags);
1092        /*
1093         * We do this verification because the user can be deleted
1094         * while a message is outstanding.
1095         */
1096        if ((intf->seq_table[seq].inuse)
1097                                && (intf->seq_table[seq].seqid == seqid)) {
1098                struct seq_table *ent = &intf->seq_table[seq];
1099                ent->timeout = ent->orig_timeout;
1100                rv = 0;
1101        }
1102        spin_unlock_irqrestore(&intf->seq_lock, flags);
1103
1104        return rv;
1105}
1106
1107/* Got an error for the send message for a specific sequence number. */
1108static int intf_err_seq(struct ipmi_smi *intf,
1109                        long         msgid,
1110                        unsigned int err)
1111{
1112        int                  rv = -ENODEV;
1113        unsigned long        flags;
1114        unsigned char        seq;
1115        unsigned long        seqid;
1116        struct ipmi_recv_msg *msg = NULL;
1117
1118
1119        GET_SEQ_FROM_MSGID(msgid, seq, seqid);
1120
1121        spin_lock_irqsave(&intf->seq_lock, flags);
1122        /*
1123         * We do this verification because the user can be deleted
1124         * while a message is outstanding.
1125         */
1126        if ((intf->seq_table[seq].inuse)
1127                                && (intf->seq_table[seq].seqid == seqid)) {
1128                struct seq_table *ent = &intf->seq_table[seq];
1129
1130                ent->inuse = 0;
1131                smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES);
1132                msg = ent->recv_msg;
1133                rv = 0;
1134        }
1135        spin_unlock_irqrestore(&intf->seq_lock, flags);
1136
1137        if (msg)
1138                deliver_err_response(intf, msg, err);
1139
1140        return rv;
1141}
1142
1143static void free_user_work(struct work_struct *work)
1144{
1145        struct ipmi_user *user = container_of(work, struct ipmi_user,
1146                                              remove_work);
1147
1148        cleanup_srcu_struct(&user->release_barrier);
1149        vfree(user);
1150}
1151
1152int ipmi_create_user(unsigned int          if_num,
1153                     const struct ipmi_user_hndl *handler,
1154                     void                  *handler_data,
1155                     struct ipmi_user      **user)
1156{
1157        unsigned long flags;
1158        struct ipmi_user *new_user;
1159        int           rv, index;
1160        struct ipmi_smi *intf;
1161
1162        /*
1163         * There is no module usecount here, because it's not
1164         * required.  Since this can only be used by and called from
1165         * other modules, they will implicitly use this module, and
1166         * thus this can't be removed unless the other modules are
1167         * removed.
1168         */
1169
1170        if (handler == NULL)
1171                return -EINVAL;
1172
1173        /*
1174         * Make sure the driver is actually initialized, this handles
1175         * problems with initialization order.
1176         */
1177        rv = ipmi_init_msghandler();
1178        if (rv)
1179                return rv;
1180
1181        new_user = vzalloc(sizeof(*new_user));
1182        if (!new_user)
1183                return -ENOMEM;
1184
1185        index = srcu_read_lock(&ipmi_interfaces_srcu);
1186        list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
1187                if (intf->intf_num == if_num)
1188                        goto found;
1189        }
1190        /* Not found, return an error */
1191        rv = -EINVAL;
1192        goto out_kfree;
1193
1194 found:
1195        INIT_WORK(&new_user->remove_work, free_user_work);
1196
1197        rv = init_srcu_struct(&new_user->release_barrier);
1198        if (rv)
1199                goto out_kfree;
1200
1201        if (!try_module_get(intf->owner)) {
1202                rv = -ENODEV;
1203                goto out_kfree;
1204        }
1205
1206        /* Note that each existing user holds a refcount to the interface. */
1207        kref_get(&intf->refcount);
1208
1209        kref_init(&new_user->refcount);
1210        new_user->handler = handler;
1211        new_user->handler_data = handler_data;
1212        new_user->intf = intf;
1213        new_user->gets_events = false;
1214
1215        rcu_assign_pointer(new_user->self, new_user);
1216        spin_lock_irqsave(&intf->seq_lock, flags);
1217        list_add_rcu(&new_user->link, &intf->users);
1218        spin_unlock_irqrestore(&intf->seq_lock, flags);
1219        if (handler->ipmi_watchdog_pretimeout)
1220                /* User wants pretimeouts, so make sure to watch for them. */
1221                smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_WATCHDOG);
1222        srcu_read_unlock(&ipmi_interfaces_srcu, index);
1223        *user = new_user;
1224        return 0;
1225
1226out_kfree:
1227        srcu_read_unlock(&ipmi_interfaces_srcu, index);
1228        vfree(new_user);
1229        return rv;
1230}
1231EXPORT_SYMBOL(ipmi_create_user);
1232
1233int ipmi_get_smi_info(int if_num, struct ipmi_smi_info *data)
1234{
1235        int rv, index;
1236        struct ipmi_smi *intf;
1237
1238        index = srcu_read_lock(&ipmi_interfaces_srcu);
1239        list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
1240                if (intf->intf_num == if_num)
1241                        goto found;
1242        }
1243        srcu_read_unlock(&ipmi_interfaces_srcu, index);
1244
1245        /* Not found, return an error */
1246        return -EINVAL;
1247
1248found:
1249        if (!intf->handlers->get_smi_info)
1250                rv = -ENOTTY;
1251        else
1252                rv = intf->handlers->get_smi_info(intf->send_info, data);
1253        srcu_read_unlock(&ipmi_interfaces_srcu, index);
1254
1255        return rv;
1256}
1257EXPORT_SYMBOL(ipmi_get_smi_info);
1258
1259static void free_user(struct kref *ref)
1260{
1261        struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount);
1262
1263        /* SRCU cleanup must happen in task context. */
1264        schedule_work(&user->remove_work);
1265}
1266
1267static void _ipmi_destroy_user(struct ipmi_user *user)
1268{
1269        struct ipmi_smi  *intf = user->intf;
1270        int              i;
1271        unsigned long    flags;
1272        struct cmd_rcvr  *rcvr;
1273        struct cmd_rcvr  *rcvrs = NULL;
1274
1275        if (!acquire_ipmi_user(user, &i)) {
1276                /*
1277                 * The user has already been cleaned up, just make sure
1278                 * nothing is using it and return.
1279                 */
1280                synchronize_srcu(&user->release_barrier);
1281                return;
1282        }
1283
1284        rcu_assign_pointer(user->self, NULL);
1285        release_ipmi_user(user, i);
1286
1287        synchronize_srcu(&user->release_barrier);
1288
1289        if (user->handler->shutdown)
1290                user->handler->shutdown(user->handler_data);
1291
1292        if (user->handler->ipmi_watchdog_pretimeout)
1293                smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_WATCHDOG);
1294
1295        if (user->gets_events)
1296                atomic_dec(&intf->event_waiters);
1297
1298        /* Remove the user from the interface's sequence table. */
1299        spin_lock_irqsave(&intf->seq_lock, flags);
1300        list_del_rcu(&user->link);
1301
1302        for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
1303                if (intf->seq_table[i].inuse
1304                    && (intf->seq_table[i].recv_msg->user == user)) {
1305                        intf->seq_table[i].inuse = 0;
1306                        smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES);
1307                        ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
1308                }
1309        }
1310        spin_unlock_irqrestore(&intf->seq_lock, flags);
1311
1312        /*
1313         * Remove the user from the command receiver's table.  First
1314         * we build a list of everything (not using the standard link,
1315         * since other things may be using it till we do
1316         * synchronize_srcu()) then free everything in that list.
1317         */
1318        mutex_lock(&intf->cmd_rcvrs_mutex);
1319        list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link,
1320                                lockdep_is_held(&intf->cmd_rcvrs_mutex)) {
1321                if (rcvr->user == user) {
1322                        list_del_rcu(&rcvr->link);
1323                        rcvr->next = rcvrs;
1324                        rcvrs = rcvr;
1325                }
1326        }
1327        mutex_unlock(&intf->cmd_rcvrs_mutex);
1328        synchronize_rcu();
1329        while (rcvrs) {
1330                rcvr = rcvrs;
1331                rcvrs = rcvr->next;
1332                kfree(rcvr);
1333        }
1334
1335        kref_put(&intf->refcount, intf_free);
1336        module_put(intf->owner);
1337}
1338
1339int ipmi_destroy_user(struct ipmi_user *user)
1340{
1341        _ipmi_destroy_user(user);
1342
1343        kref_put(&user->refcount, free_user);
1344
1345        return 0;
1346}
1347EXPORT_SYMBOL(ipmi_destroy_user);
1348
1349int ipmi_get_version(struct ipmi_user *user,
1350                     unsigned char *major,
1351                     unsigned char *minor)
1352{
1353        struct ipmi_device_id id;
1354        int rv, index;
1355
1356        user = acquire_ipmi_user(user, &index);
1357        if (!user)
1358                return -ENODEV;
1359
1360        rv = bmc_get_device_id(user->intf, NULL, &id, NULL, NULL);
1361        if (!rv) {
1362                *major = ipmi_version_major(&id);
1363                *minor = ipmi_version_minor(&id);
1364        }
1365        release_ipmi_user(user, index);
1366
1367        return rv;
1368}
1369EXPORT_SYMBOL(ipmi_get_version);
1370
1371int ipmi_set_my_address(struct ipmi_user *user,
1372                        unsigned int  channel,
1373                        unsigned char address)
1374{
1375        int index, rv = 0;
1376
1377        user = acquire_ipmi_user(user, &index);
1378        if (!user)
1379                return -ENODEV;
1380
1381        if (channel >= IPMI_MAX_CHANNELS) {
1382                rv = -EINVAL;
1383        } else {
1384                channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
1385                user->intf->addrinfo[channel].address = address;
1386        }
1387        release_ipmi_user(user, index);
1388
1389        return rv;
1390}
1391EXPORT_SYMBOL(ipmi_set_my_address);
1392
1393int ipmi_get_my_address(struct ipmi_user *user,
1394                        unsigned int  channel,
1395                        unsigned char *address)
1396{
1397        int index, rv = 0;
1398
1399        user = acquire_ipmi_user(user, &index);
1400        if (!user)
1401                return -ENODEV;
1402
1403        if (channel >= IPMI_MAX_CHANNELS) {
1404                rv = -EINVAL;
1405        } else {
1406                channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
1407                *address = user->intf->addrinfo[channel].address;
1408        }
1409        release_ipmi_user(user, index);
1410
1411        return rv;
1412}
1413EXPORT_SYMBOL(ipmi_get_my_address);
1414
1415int ipmi_set_my_LUN(struct ipmi_user *user,
1416                    unsigned int  channel,
1417                    unsigned char LUN)
1418{
1419        int index, rv = 0;
1420
1421        user = acquire_ipmi_user(user, &index);
1422        if (!user)
1423                return -ENODEV;
1424
1425        if (channel >= IPMI_MAX_CHANNELS) {
1426                rv = -EINVAL;
1427        } else {
1428                channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
1429                user->intf->addrinfo[channel].lun = LUN & 0x3;
1430        }
1431        release_ipmi_user(user, index);
1432
1433        return rv;
1434}
1435EXPORT_SYMBOL(ipmi_set_my_LUN);
1436
1437int ipmi_get_my_LUN(struct ipmi_user *user,
1438                    unsigned int  channel,
1439                    unsigned char *address)
1440{
1441        int index, rv = 0;
1442
1443        user = acquire_ipmi_user(user, &index);
1444        if (!user)
1445                return -ENODEV;
1446
1447        if (channel >= IPMI_MAX_CHANNELS) {
1448                rv = -EINVAL;
1449        } else {
1450                channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
1451                *address = user->intf->addrinfo[channel].lun;
1452        }
1453        release_ipmi_user(user, index);
1454
1455        return rv;
1456}
1457EXPORT_SYMBOL(ipmi_get_my_LUN);
1458
1459int ipmi_get_maintenance_mode(struct ipmi_user *user)
1460{
1461        int mode, index;
1462        unsigned long flags;
1463
1464        user = acquire_ipmi_user(user, &index);
1465        if (!user)
1466                return -ENODEV;
1467
1468        spin_lock_irqsave(&user->intf->maintenance_mode_lock, flags);
1469        mode = user->intf->maintenance_mode;
1470        spin_unlock_irqrestore(&user->intf->maintenance_mode_lock, flags);
1471        release_ipmi_user(user, index);
1472
1473        return mode;
1474}
1475EXPORT_SYMBOL(ipmi_get_maintenance_mode);
1476
1477static void maintenance_mode_update(struct ipmi_smi *intf)
1478{
1479        if (intf->handlers->set_maintenance_mode)
1480                intf->handlers->set_maintenance_mode(
1481                        intf->send_info, intf->maintenance_mode_enable);
1482}
1483
1484int ipmi_set_maintenance_mode(struct ipmi_user *user, int mode)
1485{
1486        int rv = 0, index;
1487        unsigned long flags;
1488        struct ipmi_smi *intf = user->intf;
1489
1490        user = acquire_ipmi_user(user, &index);
1491        if (!user)
1492                return -ENODEV;
1493
1494        spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
1495        if (intf->maintenance_mode != mode) {
1496                switch (mode) {
1497                case IPMI_MAINTENANCE_MODE_AUTO:
1498                        intf->maintenance_mode_enable
1499                                = (intf->auto_maintenance_timeout > 0);
1500                        break;
1501
1502                case IPMI_MAINTENANCE_MODE_OFF:
1503                        intf->maintenance_mode_enable = false;
1504                        break;
1505
1506                case IPMI_MAINTENANCE_MODE_ON:
1507                        intf->maintenance_mode_enable = true;
1508                        break;
1509
1510                default:
1511                        rv = -EINVAL;
1512                        goto out_unlock;
1513                }
1514                intf->maintenance_mode = mode;
1515
1516                maintenance_mode_update(intf);
1517        }
1518 out_unlock:
1519        spin_unlock_irqrestore(&intf->maintenance_mode_lock, flags);
1520        release_ipmi_user(user, index);
1521
1522        return rv;
1523}
1524EXPORT_SYMBOL(ipmi_set_maintenance_mode);
1525
1526int ipmi_set_gets_events(struct ipmi_user *user, bool val)
1527{
1528        unsigned long        flags;
1529        struct ipmi_smi      *intf = user->intf;
1530        struct ipmi_recv_msg *msg, *msg2;
1531        struct list_head     msgs;
1532        int index;
1533
1534        user = acquire_ipmi_user(user, &index);
1535        if (!user)
1536                return -ENODEV;
1537
1538        INIT_LIST_HEAD(&msgs);
1539
1540        spin_lock_irqsave(&intf->events_lock, flags);
1541        if (user->gets_events == val)
1542                goto out;
1543
1544        user->gets_events = val;
1545
1546        if (val) {
1547                if (atomic_inc_return(&intf->event_waiters) == 1)
1548                        need_waiter(intf);
1549        } else {
1550                atomic_dec(&intf->event_waiters);
1551        }
1552
1553        if (intf->delivering_events)
1554                /*
1555                 * Another thread is delivering events for this, so
1556                 * let it handle any new events.
1557                 */
1558                goto out;
1559
1560        /* Deliver any queued events. */
1561        while (user->gets_events && !list_empty(&intf->waiting_events)) {
1562                list_for_each_entry_safe(msg, msg2, &intf->waiting_events, link)
1563                        list_move_tail(&msg->link, &msgs);
1564                intf->waiting_events_count = 0;
1565                if (intf->event_msg_printed) {
1566                        dev_warn(intf->si_dev, "Event queue no longer full\n");
1567                        intf->event_msg_printed = 0;
1568                }
1569
1570                intf->delivering_events = 1;
1571                spin_unlock_irqrestore(&intf->events_lock, flags);
1572
1573                list_for_each_entry_safe(msg, msg2, &msgs, link) {
1574                        msg->user = user;
1575                        kref_get(&user->refcount);
1576                        deliver_local_response(intf, msg);
1577                }
1578
1579                spin_lock_irqsave(&intf->events_lock, flags);
1580                intf->delivering_events = 0;
1581        }
1582
1583 out:
1584        spin_unlock_irqrestore(&intf->events_lock, flags);
1585        release_ipmi_user(user, index);
1586
1587        return 0;
1588}
1589EXPORT_SYMBOL(ipmi_set_gets_events);
1590
1591static struct cmd_rcvr *find_cmd_rcvr(struct ipmi_smi *intf,
1592                                      unsigned char netfn,
1593                                      unsigned char cmd,
1594                                      unsigned char chan)
1595{
1596        struct cmd_rcvr *rcvr;
1597
1598        list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link,
1599                                lockdep_is_held(&intf->cmd_rcvrs_mutex)) {
1600                if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)
1601                                        && (rcvr->chans & (1 << chan)))
1602                        return rcvr;
1603        }
1604        return NULL;
1605}
1606
1607static int is_cmd_rcvr_exclusive(struct ipmi_smi *intf,
1608                                 unsigned char netfn,
1609                                 unsigned char cmd,
1610                                 unsigned int  chans)
1611{
1612        struct cmd_rcvr *rcvr;
1613
1614        list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link,
1615                                lockdep_is_held(&intf->cmd_rcvrs_mutex)) {
1616                if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)
1617                                        && (rcvr->chans & chans))
1618                        return 0;
1619        }
1620        return 1;
1621}
1622
1623int ipmi_register_for_cmd(struct ipmi_user *user,
1624                          unsigned char netfn,
1625                          unsigned char cmd,
1626                          unsigned int  chans)
1627{
1628        struct ipmi_smi *intf = user->intf;
1629        struct cmd_rcvr *rcvr;
1630        int rv = 0, index;
1631
1632        user = acquire_ipmi_user(user, &index);
1633        if (!user)
1634                return -ENODEV;
1635
1636        rcvr = kmalloc(sizeof(*rcvr), GFP_KERNEL);
1637        if (!rcvr) {
1638                rv = -ENOMEM;
1639                goto out_release;
1640        }
1641        rcvr->cmd = cmd;
1642        rcvr->netfn = netfn;
1643        rcvr->chans = chans;
1644        rcvr->user = user;
1645
1646        mutex_lock(&intf->cmd_rcvrs_mutex);
1647        /* Make sure the command/netfn is not already registered. */
1648        if (!is_cmd_rcvr_exclusive(intf, netfn, cmd, chans)) {
1649                rv = -EBUSY;
1650                goto out_unlock;
1651        }
1652
1653        smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_COMMANDS);
1654
1655        list_add_rcu(&rcvr->link, &intf->cmd_rcvrs);
1656
1657out_unlock:
1658        mutex_unlock(&intf->cmd_rcvrs_mutex);
1659        if (rv)
1660                kfree(rcvr);
1661out_release:
1662        release_ipmi_user(user, index);
1663
1664        return rv;
1665}
1666EXPORT_SYMBOL(ipmi_register_for_cmd);
1667
1668int ipmi_unregister_for_cmd(struct ipmi_user *user,
1669                            unsigned char netfn,
1670                            unsigned char cmd,
1671                            unsigned int  chans)
1672{
1673        struct ipmi_smi *intf = user->intf;
1674        struct cmd_rcvr *rcvr;
1675        struct cmd_rcvr *rcvrs = NULL;
1676        int i, rv = -ENOENT, index;
1677
1678        user = acquire_ipmi_user(user, &index);
1679        if (!user)
1680                return -ENODEV;
1681
1682        mutex_lock(&intf->cmd_rcvrs_mutex);
1683        for (i = 0; i < IPMI_NUM_CHANNELS; i++) {
1684                if (((1 << i) & chans) == 0)
1685                        continue;
1686                rcvr = find_cmd_rcvr(intf, netfn, cmd, i);
1687                if (rcvr == NULL)
1688                        continue;
1689                if (rcvr->user == user) {
1690                        rv = 0;
1691                        rcvr->chans &= ~chans;
1692                        if (rcvr->chans == 0) {
1693                                list_del_rcu(&rcvr->link);
1694                                rcvr->next = rcvrs;
1695                                rcvrs = rcvr;
1696                        }
1697                }
1698        }
1699        mutex_unlock(&intf->cmd_rcvrs_mutex);
1700        synchronize_rcu();
1701        release_ipmi_user(user, index);
1702        while (rcvrs) {
1703                smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_COMMANDS);
1704                rcvr = rcvrs;
1705                rcvrs = rcvr->next;
1706                kfree(rcvr);
1707        }
1708
1709        return rv;
1710}
1711EXPORT_SYMBOL(ipmi_unregister_for_cmd);
1712
1713static unsigned char
1714ipmb_checksum(unsigned char *data, int size)
1715{
1716        unsigned char csum = 0;
1717
1718        for (; size > 0; size--, data++)
1719                csum += *data;
1720
1721        return -csum;
1722}
1723
1724static inline void format_ipmb_msg(struct ipmi_smi_msg   *smi_msg,
1725                                   struct kernel_ipmi_msg *msg,
1726                                   struct ipmi_ipmb_addr *ipmb_addr,
1727                                   long                  msgid,
1728                                   unsigned char         ipmb_seq,
1729                                   int                   broadcast,
1730                                   unsigned char         source_address,
1731                                   unsigned char         source_lun)
1732{
1733        int i = broadcast;
1734
1735        /* Format the IPMB header data. */
1736        smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
1737        smi_msg->data[1] = IPMI_SEND_MSG_CMD;
1738        smi_msg->data[2] = ipmb_addr->channel;
1739        if (broadcast)
1740                smi_msg->data[3] = 0;
1741        smi_msg->data[i+3] = ipmb_addr->slave_addr;
1742        smi_msg->data[i+4] = (msg->netfn << 2) | (ipmb_addr->lun & 0x3);
1743        smi_msg->data[i+5] = ipmb_checksum(&smi_msg->data[i + 3], 2);
1744        smi_msg->data[i+6] = source_address;
1745        smi_msg->data[i+7] = (ipmb_seq << 2) | source_lun;
1746        smi_msg->data[i+8] = msg->cmd;
1747
1748        /* Now tack on the data to the message. */
1749        if (msg->data_len > 0)
1750                memcpy(&smi_msg->data[i + 9], msg->data, msg->data_len);
1751        smi_msg->data_size = msg->data_len + 9;
1752
1753        /* Now calculate the checksum and tack it on. */
1754        smi_msg->data[i+smi_msg->data_size]
1755                = ipmb_checksum(&smi_msg->data[i + 6], smi_msg->data_size - 6);
1756
1757        /*
1758         * Add on the checksum size and the offset from the
1759         * broadcast.
1760         */
1761        smi_msg->data_size += 1 + i;
1762
1763        smi_msg->msgid = msgid;
1764}
1765
1766static inline void format_lan_msg(struct ipmi_smi_msg   *smi_msg,
1767                                  struct kernel_ipmi_msg *msg,
1768                                  struct ipmi_lan_addr  *lan_addr,
1769                                  long                  msgid,
1770                                  unsigned char         ipmb_seq,
1771                                  unsigned char         source_lun)
1772{
1773        /* Format the IPMB header data. */
1774        smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
1775        smi_msg->data[1] = IPMI_SEND_MSG_CMD;
1776        smi_msg->data[2] = lan_addr->channel;
1777        smi_msg->data[3] = lan_addr->session_handle;
1778        smi_msg->data[4] = lan_addr->remote_SWID;
1779        smi_msg->data[5] = (msg->netfn << 2) | (lan_addr->lun & 0x3);
1780        smi_msg->data[6] = ipmb_checksum(&smi_msg->data[4], 2);
1781        smi_msg->data[7] = lan_addr->local_SWID;
1782        smi_msg->data[8] = (ipmb_seq << 2) | source_lun;
1783        smi_msg->data[9] = msg->cmd;
1784
1785        /* Now tack on the data to the message. */
1786        if (msg->data_len > 0)
1787                memcpy(&smi_msg->data[10], msg->data, msg->data_len);
1788        smi_msg->data_size = msg->data_len + 10;
1789
1790        /* Now calculate the checksum and tack it on. */
1791        smi_msg->data[smi_msg->data_size]
1792                = ipmb_checksum(&smi_msg->data[7], smi_msg->data_size - 7);
1793
1794        /*
1795         * Add on the checksum size and the offset from the
1796         * broadcast.
1797         */
1798        smi_msg->data_size += 1;
1799
1800        smi_msg->msgid = msgid;
1801}
1802
1803static struct ipmi_smi_msg *smi_add_send_msg(struct ipmi_smi *intf,
1804                                             struct ipmi_smi_msg *smi_msg,
1805                                             int priority)
1806{
1807        if (intf->curr_msg) {
1808                if (priority > 0)
1809                        list_add_tail(&smi_msg->link, &intf->hp_xmit_msgs);
1810                else
1811                        list_add_tail(&smi_msg->link, &intf->xmit_msgs);
1812                smi_msg = NULL;
1813        } else {
1814                intf->curr_msg = smi_msg;
1815        }
1816
1817        return smi_msg;
1818}
1819
1820static void smi_send(struct ipmi_smi *intf,
1821                     const struct ipmi_smi_handlers *handlers,
1822                     struct ipmi_smi_msg *smi_msg, int priority)
1823{
1824        int run_to_completion = intf->run_to_completion;
1825        unsigned long flags = 0;
1826
1827        if (!run_to_completion)
1828                spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
1829        smi_msg = smi_add_send_msg(intf, smi_msg, priority);
1830
1831        if (!run_to_completion)
1832                spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
1833
1834        if (smi_msg)
1835                handlers->sender(intf->send_info, smi_msg);
1836}
1837
1838static bool is_maintenance_mode_cmd(struct kernel_ipmi_msg *msg)
1839{
1840        return (((msg->netfn == IPMI_NETFN_APP_REQUEST)
1841                 && ((msg->cmd == IPMI_COLD_RESET_CMD)
1842                     || (msg->cmd == IPMI_WARM_RESET_CMD)))
1843                || (msg->netfn == IPMI_NETFN_FIRMWARE_REQUEST));
1844}
1845
1846static int i_ipmi_req_sysintf(struct ipmi_smi        *intf,
1847                              struct ipmi_addr       *addr,
1848                              long                   msgid,
1849                              struct kernel_ipmi_msg *msg,
1850                              struct ipmi_smi_msg    *smi_msg,
1851                              struct ipmi_recv_msg   *recv_msg,
1852                              int                    retries,
1853                              unsigned int           retry_time_ms)
1854{
1855        struct ipmi_system_interface_addr *smi_addr;
1856
1857        if (msg->netfn & 1)
1858                /* Responses are not allowed to the SMI. */
1859                return -EINVAL;
1860
1861        smi_addr = (struct ipmi_system_interface_addr *) addr;
1862        if (smi_addr->lun > 3) {
1863                ipmi_inc_stat(intf, sent_invalid_commands);
1864                return -EINVAL;
1865        }
1866
1867        memcpy(&recv_msg->addr, smi_addr, sizeof(*smi_addr));
1868
1869        if ((msg->netfn == IPMI_NETFN_APP_REQUEST)
1870            && ((msg->cmd == IPMI_SEND_MSG_CMD)
1871                || (msg->cmd == IPMI_GET_MSG_CMD)
1872                || (msg->cmd == IPMI_READ_EVENT_MSG_BUFFER_CMD))) {
1873                /*
1874                 * We don't let the user do these, since we manage
1875                 * the sequence numbers.
1876                 */
1877                ipmi_inc_stat(intf, sent_invalid_commands);
1878                return -EINVAL;
1879        }
1880
1881        if (is_maintenance_mode_cmd(msg)) {
1882                unsigned long flags;
1883
1884                spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
1885                intf->auto_maintenance_timeout
1886                        = maintenance_mode_timeout_ms;
1887                if (!intf->maintenance_mode
1888                    && !intf->maintenance_mode_enable) {
1889                        intf->maintenance_mode_enable = true;
1890                        maintenance_mode_update(intf);
1891                }
1892                spin_unlock_irqrestore(&intf->maintenance_mode_lock,
1893                                       flags);
1894        }
1895
1896        if (msg->data_len + 2 > IPMI_MAX_MSG_LENGTH) {
1897                ipmi_inc_stat(intf, sent_invalid_commands);
1898                return -EMSGSIZE;
1899        }
1900
1901        smi_msg->data[0] = (msg->netfn << 2) | (smi_addr->lun & 0x3);
1902        smi_msg->data[1] = msg->cmd;
1903        smi_msg->msgid = msgid;
1904        smi_msg->user_data = recv_msg;
1905        if (msg->data_len > 0)
1906                memcpy(&smi_msg->data[2], msg->data, msg->data_len);
1907        smi_msg->data_size = msg->data_len + 2;
1908        ipmi_inc_stat(intf, sent_local_commands);
1909
1910        return 0;
1911}
1912
1913static int i_ipmi_req_ipmb(struct ipmi_smi        *intf,
1914                           struct ipmi_addr       *addr,
1915                           long                   msgid,
1916                           struct kernel_ipmi_msg *msg,
1917                           struct ipmi_smi_msg    *smi_msg,
1918                           struct ipmi_recv_msg   *recv_msg,
1919                           unsigned char          source_address,
1920                           unsigned char          source_lun,
1921                           int                    retries,
1922                           unsigned int           retry_time_ms)
1923{
1924        struct ipmi_ipmb_addr *ipmb_addr;
1925        unsigned char ipmb_seq;
1926        long seqid;
1927        int broadcast = 0;
1928        struct ipmi_channel *chans;
1929        int rv = 0;
1930
1931        if (addr->channel >= IPMI_MAX_CHANNELS) {
1932                ipmi_inc_stat(intf, sent_invalid_commands);
1933                return -EINVAL;
1934        }
1935
1936        chans = READ_ONCE(intf->channel_list)->c;
1937
1938        if (chans[addr->channel].medium != IPMI_CHANNEL_MEDIUM_IPMB) {
1939                ipmi_inc_stat(intf, sent_invalid_commands);
1940                return -EINVAL;
1941        }
1942
1943        if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE) {
1944                /*
1945                 * Broadcasts add a zero at the beginning of the
1946                 * message, but otherwise is the same as an IPMB
1947                 * address.
1948                 */
1949                addr->addr_type = IPMI_IPMB_ADDR_TYPE;
1950                broadcast = 1;
1951                retries = 0; /* Don't retry broadcasts. */
1952        }
1953
1954        /*
1955         * 9 for the header and 1 for the checksum, plus
1956         * possibly one for the broadcast.
1957         */
1958        if ((msg->data_len + 10 + broadcast) > IPMI_MAX_MSG_LENGTH) {
1959                ipmi_inc_stat(intf, sent_invalid_commands);
1960                return -EMSGSIZE;
1961        }
1962
1963        ipmb_addr = (struct ipmi_ipmb_addr *) addr;
1964        if (ipmb_addr->lun > 3) {
1965                ipmi_inc_stat(intf, sent_invalid_commands);
1966                return -EINVAL;
1967        }
1968
1969        memcpy(&recv_msg->addr, ipmb_addr, sizeof(*ipmb_addr));
1970
1971        if (recv_msg->msg.netfn & 0x1) {
1972                /*
1973                 * It's a response, so use the user's sequence
1974                 * from msgid.
1975                 */
1976                ipmi_inc_stat(intf, sent_ipmb_responses);
1977                format_ipmb_msg(smi_msg, msg, ipmb_addr, msgid,
1978                                msgid, broadcast,
1979                                source_address, source_lun);
1980
1981                /*
1982                 * Save the receive message so we can use it
1983                 * to deliver the response.
1984                 */
1985                smi_msg->user_data = recv_msg;
1986        } else {
1987                /* It's a command, so get a sequence for it. */
1988                unsigned long flags;
1989
1990                spin_lock_irqsave(&intf->seq_lock, flags);
1991
1992                if (is_maintenance_mode_cmd(msg))
1993                        intf->ipmb_maintenance_mode_timeout =
1994                                maintenance_mode_timeout_ms;
1995
1996                if (intf->ipmb_maintenance_mode_timeout && retry_time_ms == 0)
1997                        /* Different default in maintenance mode */
1998                        retry_time_ms = default_maintenance_retry_ms;
1999
2000                /*
2001                 * Create a sequence number with a 1 second
2002                 * timeout and 4 retries.
2003                 */
2004                rv = intf_next_seq(intf,
2005                                   recv_msg,
2006                                   retry_time_ms,
2007                                   retries,
2008                                   broadcast,
2009                                   &ipmb_seq,
2010                                   &seqid);
2011                if (rv)
2012                        /*
2013                         * We have used up all the sequence numbers,
2014                         * probably, so abort.
2015                         */
2016                        goto out_err;
2017
2018                ipmi_inc_stat(intf, sent_ipmb_commands);
2019
2020                /*
2021                 * Store the sequence number in the message,
2022                 * so that when the send message response
2023                 * comes back we can start the timer.
2024                 */
2025                format_ipmb_msg(smi_msg, msg, ipmb_addr,
2026                                STORE_SEQ_IN_MSGID(ipmb_seq, seqid),
2027                                ipmb_seq, broadcast,
2028                                source_address, source_lun);
2029
2030                /*
2031                 * Copy the message into the recv message data, so we
2032                 * can retransmit it later if necessary.
2033                 */
2034                memcpy(recv_msg->msg_data, smi_msg->data,
2035                       smi_msg->data_size);
2036                recv_msg->msg.data = recv_msg->msg_data;
2037                recv_msg->msg.data_len = smi_msg->data_size;
2038
2039                /*
2040                 * We don't unlock until here, because we need
2041                 * to copy the completed message into the
2042                 * recv_msg before we release the lock.
2043                 * Otherwise, race conditions may bite us.  I
2044                 * know that's pretty paranoid, but I prefer
2045                 * to be correct.
2046                 */
2047out_err:
2048                spin_unlock_irqrestore(&intf->seq_lock, flags);
2049        }
2050
2051        return rv;
2052}
2053
2054static int i_ipmi_req_lan(struct ipmi_smi        *intf,
2055                          struct ipmi_addr       *addr,
2056                          long                   msgid,
2057                          struct kernel_ipmi_msg *msg,
2058                          struct ipmi_smi_msg    *smi_msg,
2059                          struct ipmi_recv_msg   *recv_msg,
2060                          unsigned char          source_lun,
2061                          int                    retries,
2062                          unsigned int           retry_time_ms)
2063{
2064        struct ipmi_lan_addr  *lan_addr;
2065        unsigned char ipmb_seq;
2066        long seqid;
2067        struct ipmi_channel *chans;
2068        int rv = 0;
2069
2070        if (addr->channel >= IPMI_MAX_CHANNELS) {
2071                ipmi_inc_stat(intf, sent_invalid_commands);
2072                return -EINVAL;
2073        }
2074
2075        chans = READ_ONCE(intf->channel_list)->c;
2076
2077        if ((chans[addr->channel].medium
2078                                != IPMI_CHANNEL_MEDIUM_8023LAN)
2079                        && (chans[addr->channel].medium
2080                            != IPMI_CHANNEL_MEDIUM_ASYNC)) {
2081                ipmi_inc_stat(intf, sent_invalid_commands);
2082                return -EINVAL;
2083        }
2084
2085        /* 11 for the header and 1 for the checksum. */
2086        if ((msg->data_len + 12) > IPMI_MAX_MSG_LENGTH) {
2087                ipmi_inc_stat(intf, sent_invalid_commands);
2088                return -EMSGSIZE;
2089        }
2090
2091        lan_addr = (struct ipmi_lan_addr *) addr;
2092        if (lan_addr->lun > 3) {
2093                ipmi_inc_stat(intf, sent_invalid_commands);
2094                return -EINVAL;
2095        }
2096
2097        memcpy(&recv_msg->addr, lan_addr, sizeof(*lan_addr));
2098
2099        if (recv_msg->msg.netfn & 0x1) {
2100                /*
2101                 * It's a response, so use the user's sequence
2102                 * from msgid.
2103                 */
2104                ipmi_inc_stat(intf, sent_lan_responses);
2105                format_lan_msg(smi_msg, msg, lan_addr, msgid,
2106                               msgid, source_lun);
2107
2108                /*
2109                 * Save the receive message so we can use it
2110                 * to deliver the response.
2111                 */
2112                smi_msg->user_data = recv_msg;
2113        } else {
2114                /* It's a command, so get a sequence for it. */
2115                unsigned long flags;
2116
2117                spin_lock_irqsave(&intf->seq_lock, flags);
2118
2119                /*
2120                 * Create a sequence number with a 1 second
2121                 * timeout and 4 retries.
2122                 */
2123                rv = intf_next_seq(intf,
2124                                   recv_msg,
2125                                   retry_time_ms,
2126                                   retries,
2127                                   0,
2128                                   &ipmb_seq,
2129                                   &seqid);
2130                if (rv)
2131                        /*
2132                         * We have used up all the sequence numbers,
2133                         * probably, so abort.
2134                         */
2135                        goto out_err;
2136
2137                ipmi_inc_stat(intf, sent_lan_commands);
2138
2139                /*
2140                 * Store the sequence number in the message,
2141                 * so that when the send message response
2142                 * comes back we can start the timer.
2143                 */
2144                format_lan_msg(smi_msg, msg, lan_addr,
2145                               STORE_SEQ_IN_MSGID(ipmb_seq, seqid),
2146                               ipmb_seq, source_lun);
2147
2148                /*
2149                 * Copy the message into the recv message data, so we
2150                 * can retransmit it later if necessary.
2151                 */
2152                memcpy(recv_msg->msg_data, smi_msg->data,
2153                       smi_msg->data_size);
2154                recv_msg->msg.data = recv_msg->msg_data;
2155                recv_msg->msg.data_len = smi_msg->data_size;
2156
2157                /*
2158                 * We don't unlock until here, because we need
2159                 * to copy the completed message into the
2160                 * recv_msg before we release the lock.
2161                 * Otherwise, race conditions may bite us.  I
2162                 * know that's pretty paranoid, but I prefer
2163                 * to be correct.
2164                 */
2165out_err:
2166                spin_unlock_irqrestore(&intf->seq_lock, flags);
2167        }
2168
2169        return rv;
2170}
2171
2172/*
2173 * Separate from ipmi_request so that the user does not have to be
2174 * supplied in certain circumstances (mainly at panic time).  If
2175 * messages are supplied, they will be freed, even if an error
2176 * occurs.
2177 */
2178static int i_ipmi_request(struct ipmi_user     *user,
2179                          struct ipmi_smi      *intf,
2180                          struct ipmi_addr     *addr,
2181                          long                 msgid,
2182                          struct kernel_ipmi_msg *msg,
2183                          void                 *user_msg_data,
2184                          void                 *supplied_smi,
2185                          struct ipmi_recv_msg *supplied_recv,
2186                          int                  priority,
2187                          unsigned char        source_address,
2188                          unsigned char        source_lun,
2189                          int                  retries,
2190                          unsigned int         retry_time_ms)
2191{
2192        struct ipmi_smi_msg *smi_msg;
2193        struct ipmi_recv_msg *recv_msg;
2194        int rv = 0;
2195
2196        if (supplied_recv)
2197                recv_msg = supplied_recv;
2198        else {
2199                recv_msg = ipmi_alloc_recv_msg();
2200                if (recv_msg == NULL) {
2201                        rv = -ENOMEM;
2202                        goto out;
2203                }
2204        }
2205        recv_msg->user_msg_data = user_msg_data;
2206
2207        if (supplied_smi)
2208                smi_msg = (struct ipmi_smi_msg *) supplied_smi;
2209        else {
2210                smi_msg = ipmi_alloc_smi_msg();
2211                if (smi_msg == NULL) {
2212                        if (!supplied_recv)
2213                                ipmi_free_recv_msg(recv_msg);
2214                        rv = -ENOMEM;
2215                        goto out;
2216                }
2217        }
2218
2219        rcu_read_lock();
2220        if (intf->in_shutdown) {
2221                rv = -ENODEV;
2222                goto out_err;
2223        }
2224
2225        recv_msg->user = user;
2226        if (user)
2227                /* The put happens when the message is freed. */
2228                kref_get(&user->refcount);
2229        recv_msg->msgid = msgid;
2230        /*
2231         * Store the message to send in the receive message so timeout
2232         * responses can get the proper response data.
2233         */
2234        recv_msg->msg = *msg;
2235
2236        if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
2237                rv = i_ipmi_req_sysintf(intf, addr, msgid, msg, smi_msg,
2238                                        recv_msg, retries, retry_time_ms);
2239        } else if (is_ipmb_addr(addr) || is_ipmb_bcast_addr(addr)) {
2240                rv = i_ipmi_req_ipmb(intf, addr, msgid, msg, smi_msg, recv_msg,
2241                                     source_address, source_lun,
2242                                     retries, retry_time_ms);
2243        } else if (is_lan_addr(addr)) {
2244                rv = i_ipmi_req_lan(intf, addr, msgid, msg, smi_msg, recv_msg,
2245                                    source_lun, retries, retry_time_ms);
2246        } else {
2247            /* Unknown address type. */
2248                ipmi_inc_stat(intf, sent_invalid_commands);
2249                rv = -EINVAL;
2250        }
2251
2252        if (rv) {
2253out_err:
2254                ipmi_free_smi_msg(smi_msg);
2255                ipmi_free_recv_msg(recv_msg);
2256        } else {
2257                pr_debug("Send: %*ph\n", smi_msg->data_size, smi_msg->data);
2258
2259                smi_send(intf, intf->handlers, smi_msg, priority);
2260        }
2261        rcu_read_unlock();
2262
2263out:
2264        return rv;
2265}
2266
2267static int check_addr(struct ipmi_smi  *intf,
2268                      struct ipmi_addr *addr,
2269                      unsigned char    *saddr,
2270                      unsigned char    *lun)
2271{
2272        if (addr->channel >= IPMI_MAX_CHANNELS)
2273                return -EINVAL;
2274        addr->channel = array_index_nospec(addr->channel, IPMI_MAX_CHANNELS);
2275        *lun = intf->addrinfo[addr->channel].lun;
2276        *saddr = intf->addrinfo[addr->channel].address;
2277        return 0;
2278}
2279
2280int ipmi_request_settime(struct ipmi_user *user,
2281                         struct ipmi_addr *addr,
2282                         long             msgid,
2283                         struct kernel_ipmi_msg  *msg,
2284                         void             *user_msg_data,
2285                         int              priority,
2286                         int              retries,
2287                         unsigned int     retry_time_ms)
2288{
2289        unsigned char saddr = 0, lun = 0;
2290        int rv, index;
2291
2292        if (!user)
2293                return -EINVAL;
2294
2295        user = acquire_ipmi_user(user, &index);
2296        if (!user)
2297                return -ENODEV;
2298
2299        rv = check_addr(user->intf, addr, &saddr, &lun);
2300        if (!rv)
2301                rv = i_ipmi_request(user,
2302                                    user->intf,
2303                                    addr,
2304                                    msgid,
2305                                    msg,
2306                                    user_msg_data,
2307                                    NULL, NULL,
2308                                    priority,
2309                                    saddr,
2310                                    lun,
2311                                    retries,
2312                                    retry_time_ms);
2313
2314        release_ipmi_user(user, index);
2315        return rv;
2316}
2317EXPORT_SYMBOL(ipmi_request_settime);
2318
2319int ipmi_request_supply_msgs(struct ipmi_user     *user,
2320                             struct ipmi_addr     *addr,
2321                             long                 msgid,
2322                             struct kernel_ipmi_msg *msg,
2323                             void                 *user_msg_data,
2324                             void                 *supplied_smi,
2325                             struct ipmi_recv_msg *supplied_recv,
2326                             int                  priority)
2327{
2328        unsigned char saddr = 0, lun = 0;
2329        int rv, index;
2330
2331        if (!user)
2332                return -EINVAL;
2333
2334        user = acquire_ipmi_user(user, &index);
2335        if (!user)
2336                return -ENODEV;
2337
2338        rv = check_addr(user->intf, addr, &saddr, &lun);
2339        if (!rv)
2340                rv = i_ipmi_request(user,
2341                                    user->intf,
2342                                    addr,
2343                                    msgid,
2344                                    msg,
2345                                    user_msg_data,
2346                                    supplied_smi,
2347                                    supplied_recv,
2348                                    priority,
2349                                    saddr,
2350                                    lun,
2351                                    -1, 0);
2352
2353        release_ipmi_user(user, index);
2354        return rv;
2355}
2356EXPORT_SYMBOL(ipmi_request_supply_msgs);
2357
2358static void bmc_device_id_handler(struct ipmi_smi *intf,
2359                                  struct ipmi_recv_msg *msg)
2360{
2361        int rv;
2362
2363        if ((msg->addr.addr_type != IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
2364                        || (msg->msg.netfn != IPMI_NETFN_APP_RESPONSE)
2365                        || (msg->msg.cmd != IPMI_GET_DEVICE_ID_CMD)) {
2366                dev_warn(intf->si_dev,
2367                         "invalid device_id msg: addr_type=%d netfn=%x cmd=%x\n",
2368                         msg->addr.addr_type, msg->msg.netfn, msg->msg.cmd);
2369                return;
2370        }
2371
2372        rv = ipmi_demangle_device_id(msg->msg.netfn, msg->msg.cmd,
2373                        msg->msg.data, msg->msg.data_len, &intf->bmc->fetch_id);
2374        if (rv) {
2375                dev_warn(intf->si_dev, "device id demangle failed: %d\n", rv);
2376                /* record completion code when error */
2377                intf->bmc->cc = msg->msg.data[0];
2378                intf->bmc->dyn_id_set = 0;
2379        } else {
2380                /*
2381                 * Make sure the id data is available before setting
2382                 * dyn_id_set.
2383                 */
2384                smp_wmb();
2385                intf->bmc->dyn_id_set = 1;
2386        }
2387
2388        wake_up(&intf->waitq);
2389}
2390
2391static int
2392send_get_device_id_cmd(struct ipmi_smi *intf)
2393{
2394        struct ipmi_system_interface_addr si;
2395        struct kernel_ipmi_msg msg;
2396
2397        si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
2398        si.channel = IPMI_BMC_CHANNEL;
2399        si.lun = 0;
2400
2401        msg.netfn = IPMI_NETFN_APP_REQUEST;
2402        msg.cmd = IPMI_GET_DEVICE_ID_CMD;
2403        msg.data = NULL;
2404        msg.data_len = 0;
2405
2406        return i_ipmi_request(NULL,
2407                              intf,
2408                              (struct ipmi_addr *) &si,
2409                              0,
2410                              &msg,
2411                              intf,
2412                              NULL,
2413                              NULL,
2414                              0,
2415                              intf->addrinfo[0].address,
2416                              intf->addrinfo[0].lun,
2417                              -1, 0);
2418}
2419
2420static int __get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc)
2421{
2422        int rv;
2423        unsigned int retry_count = 0;
2424
2425        intf->null_user_handler = bmc_device_id_handler;
2426
2427retry:
2428        bmc->cc = 0;
2429        bmc->dyn_id_set = 2;
2430
2431        rv = send_get_device_id_cmd(intf);
2432        if (rv)
2433                goto out_reset_handler;
2434
2435        wait_event(intf->waitq, bmc->dyn_id_set != 2);
2436
2437        if (!bmc->dyn_id_set) {
2438                if (bmc->cc != IPMI_CC_NO_ERROR &&
2439                    ++retry_count <= GET_DEVICE_ID_MAX_RETRY) {
2440                        msleep(500);
2441                        dev_warn(intf->si_dev,
2442                            "BMC returned 0x%2.2x, retry get bmc device id\n",
2443                            bmc->cc);
2444                        goto retry;
2445                }
2446
2447                rv = -EIO; /* Something went wrong in the fetch. */
2448        }
2449
2450        /* dyn_id_set makes the id data available. */
2451        smp_rmb();
2452
2453out_reset_handler:
2454        intf->null_user_handler = NULL;
2455
2456        return rv;
2457}
2458
2459/*
2460 * Fetch the device id for the bmc/interface.  You must pass in either
2461 * bmc or intf, this code will get the other one.  If the data has
2462 * been recently fetched, this will just use the cached data.  Otherwise
2463 * it will run a new fetch.
2464 *
2465 * Except for the first time this is called (in ipmi_add_smi()),
2466 * this will always return good data;
2467 */
2468static int __bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc,
2469                               struct ipmi_device_id *id,
2470                               bool *guid_set, guid_t *guid, int intf_num)
2471{
2472        int rv = 0;
2473        int prev_dyn_id_set, prev_guid_set;
2474        bool intf_set = intf != NULL;
2475
2476        if (!intf) {
2477                mutex_lock(&bmc->dyn_mutex);
2478retry_bmc_lock:
2479                if (list_empty(&bmc->intfs)) {
2480                        mutex_unlock(&bmc->dyn_mutex);
2481                        return -ENOENT;
2482                }
2483                intf = list_first_entry(&bmc->intfs, struct ipmi_smi,
2484                                        bmc_link);
2485                kref_get(&intf->refcount);
2486                mutex_unlock(&bmc->dyn_mutex);
2487                mutex_lock(&intf->bmc_reg_mutex);
2488                mutex_lock(&bmc->dyn_mutex);
2489                if (intf != list_first_entry(&bmc->intfs, struct ipmi_smi,
2490                                             bmc_link)) {
2491                        mutex_unlock(&intf->bmc_reg_mutex);
2492                        kref_put(&intf->refcount, intf_free);
2493                        goto retry_bmc_lock;
2494                }
2495        } else {
2496                mutex_lock(&intf->bmc_reg_mutex);
2497                bmc = intf->bmc;
2498                mutex_lock(&bmc->dyn_mutex);
2499                kref_get(&intf->refcount);
2500        }
2501
2502        /* If we have a valid and current ID, just return that. */
2503        if (intf->in_bmc_register ||
2504            (bmc->dyn_id_set && time_is_after_jiffies(bmc->dyn_id_expiry)))
2505                goto out_noprocessing;
2506
2507        prev_guid_set = bmc->dyn_guid_set;
2508        __get_guid(intf);
2509
2510        prev_dyn_id_set = bmc->dyn_id_set;
2511        rv = __get_device_id(intf, bmc);
2512        if (rv)
2513                goto out;
2514
2515        /*
2516         * The guid, device id, manufacturer id, and product id should
2517         * not change on a BMC.  If it does we have to do some dancing.
2518         */
2519        if (!intf->bmc_registered
2520            || (!prev_guid_set && bmc->dyn_guid_set)
2521            || (!prev_dyn_id_set && bmc->dyn_id_set)
2522            || (prev_guid_set && bmc->dyn_guid_set
2523                && !guid_equal(&bmc->guid, &bmc->fetch_guid))
2524            || bmc->id.device_id != bmc->fetch_id.device_id
2525            || bmc->id.manufacturer_id != bmc->fetch_id.manufacturer_id
2526            || bmc->id.product_id != bmc->fetch_id.product_id) {
2527                struct ipmi_device_id id = bmc->fetch_id;
2528                int guid_set = bmc->dyn_guid_set;
2529                guid_t guid;
2530
2531                guid = bmc->fetch_guid;
2532                mutex_unlock(&bmc->dyn_mutex);
2533
2534                __ipmi_bmc_unregister(intf);
2535                /* Fill in the temporary BMC for good measure. */
2536                intf->bmc->id = id;
2537                intf->bmc->dyn_guid_set = guid_set;
2538                intf->bmc->guid = guid;
2539                if (__ipmi_bmc_register(intf, &id, guid_set, &guid, intf_num))
2540                        need_waiter(intf); /* Retry later on an error. */
2541                else
2542                        __scan_channels(intf, &id);
2543
2544
2545                if (!intf_set) {
2546                        /*
2547                         * We weren't given the interface on the
2548                         * command line, so restart the operation on
2549                         * the next interface for the BMC.
2550                         */
2551                        mutex_unlock(&intf->bmc_reg_mutex);
2552                        mutex_lock(&bmc->dyn_mutex);
2553                        goto retry_bmc_lock;
2554                }
2555
2556                /* We have a new BMC, set it up. */
2557                bmc = intf->bmc;
2558                mutex_lock(&bmc->dyn_mutex);
2559                goto out_noprocessing;
2560        } else if (memcmp(&bmc->fetch_id, &bmc->id, sizeof(bmc->id)))
2561                /* Version info changes, scan the channels again. */
2562                __scan_channels(intf, &bmc->fetch_id);
2563
2564        bmc->dyn_id_expiry = jiffies + IPMI_DYN_DEV_ID_EXPIRY;
2565
2566out:
2567        if (rv && prev_dyn_id_set) {
2568                rv = 0; /* Ignore failures if we have previous data. */
2569                bmc->dyn_id_set = prev_dyn_id_set;
2570        }
2571        if (!rv) {
2572                bmc->id = bmc->fetch_id;
2573                if (bmc->dyn_guid_set)
2574                        bmc->guid = bmc->fetch_guid;
2575                else if (prev_guid_set)
2576                        /*
2577                         * The guid used to be valid and it failed to fetch,
2578                         * just use the cached value.
2579                         */
2580                        bmc->dyn_guid_set = prev_guid_set;
2581        }
2582out_noprocessing:
2583        if (!rv) {
2584                if (id)
2585                        *id = bmc->id;
2586
2587                if (guid_set)
2588                        *guid_set = bmc->dyn_guid_set;
2589
2590                if (guid && bmc->dyn_guid_set)
2591                        *guid =  bmc->guid;
2592        }
2593
2594        mutex_unlock(&bmc->dyn_mutex);
2595        mutex_unlock(&intf->bmc_reg_mutex);
2596
2597        kref_put(&intf->refcount, intf_free);
2598        return rv;
2599}
2600
2601static int bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc,
2602                             struct ipmi_device_id *id,
2603                             bool *guid_set, guid_t *guid)
2604{
2605        return __bmc_get_device_id(intf, bmc, id, guid_set, guid, -1);
2606}
2607
2608static ssize_t device_id_show(struct device *dev,
2609                              struct device_attribute *attr,
2610                              char *buf)
2611{
2612        struct bmc_device *bmc = to_bmc_device(dev);
2613        struct ipmi_device_id id;
2614        int rv;
2615
2616        rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2617        if (rv)
2618                return rv;
2619
2620        return snprintf(buf, 10, "%u\n", id.device_id);
2621}
2622static DEVICE_ATTR_RO(device_id);
2623
2624static ssize_t provides_device_sdrs_show(struct device *dev,
2625                                         struct device_attribute *attr,
2626                                         char *buf)
2627{
2628        struct bmc_device *bmc = to_bmc_device(dev);
2629        struct ipmi_device_id id;
2630        int rv;
2631
2632        rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2633        if (rv)
2634                return rv;
2635
2636        return snprintf(buf, 10, "%u\n", (id.device_revision & 0x80) >> 7);
2637}
2638static DEVICE_ATTR_RO(provides_device_sdrs);
2639
2640static ssize_t revision_show(struct device *dev, struct device_attribute *attr,
2641                             char *buf)
2642{
2643        struct bmc_device *bmc = to_bmc_device(dev);
2644        struct ipmi_device_id id;
2645        int rv;
2646
2647        rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2648        if (rv)
2649                return rv;
2650
2651        return snprintf(buf, 20, "%u\n", id.device_revision & 0x0F);
2652}
2653static DEVICE_ATTR_RO(revision);
2654
2655static ssize_t firmware_revision_show(struct device *dev,
2656                                      struct device_attribute *attr,
2657                                      char *buf)
2658{
2659        struct bmc_device *bmc = to_bmc_device(dev);
2660        struct ipmi_device_id id;
2661        int rv;
2662
2663        rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2664        if (rv)
2665                return rv;
2666
2667        return snprintf(buf, 20, "%u.%x\n", id.firmware_revision_1,
2668                        id.firmware_revision_2);
2669}
2670static DEVICE_ATTR_RO(firmware_revision);
2671
2672static ssize_t ipmi_version_show(struct device *dev,
2673                                 struct device_attribute *attr,
2674                                 char *buf)
2675{
2676        struct bmc_device *bmc = to_bmc_device(dev);
2677        struct ipmi_device_id id;
2678        int rv;
2679
2680        rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2681        if (rv)
2682                return rv;
2683
2684        return snprintf(buf, 20, "%u.%u\n",
2685                        ipmi_version_major(&id),
2686                        ipmi_version_minor(&id));
2687}
2688static DEVICE_ATTR_RO(ipmi_version);
2689
2690static ssize_t add_dev_support_show(struct device *dev,
2691                                    struct device_attribute *attr,
2692                                    char *buf)
2693{
2694        struct bmc_device *bmc = to_bmc_device(dev);
2695        struct ipmi_device_id id;
2696        int rv;
2697
2698        rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2699        if (rv)
2700                return rv;
2701
2702        return snprintf(buf, 10, "0x%02x\n", id.additional_device_support);
2703}
2704static DEVICE_ATTR(additional_device_support, S_IRUGO, add_dev_support_show,
2705                   NULL);
2706
2707static ssize_t manufacturer_id_show(struct device *dev,
2708                                    struct device_attribute *attr,
2709                                    char *buf)
2710{
2711        struct bmc_device *bmc = to_bmc_device(dev);
2712        struct ipmi_device_id id;
2713        int rv;
2714
2715        rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2716        if (rv)
2717                return rv;
2718
2719        return snprintf(buf, 20, "0x%6.6x\n", id.manufacturer_id);
2720}
2721static DEVICE_ATTR_RO(manufacturer_id);
2722
2723static ssize_t product_id_show(struct device *dev,
2724                               struct device_attribute *attr,
2725                               char *buf)
2726{
2727        struct bmc_device *bmc = to_bmc_device(dev);
2728        struct ipmi_device_id id;
2729        int rv;
2730
2731        rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2732        if (rv)
2733                return rv;
2734
2735        return snprintf(buf, 10, "0x%4.4x\n", id.product_id);
2736}
2737static DEVICE_ATTR_RO(product_id);
2738
2739static ssize_t aux_firmware_rev_show(struct device *dev,
2740                                     struct device_attribute *attr,
2741                                     char *buf)
2742{
2743        struct bmc_device *bmc = to_bmc_device(dev);
2744        struct ipmi_device_id id;
2745        int rv;
2746
2747        rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2748        if (rv)
2749                return rv;
2750
2751        return snprintf(buf, 21, "0x%02x 0x%02x 0x%02x 0x%02x\n",
2752                        id.aux_firmware_revision[3],
2753                        id.aux_firmware_revision[2],
2754                        id.aux_firmware_revision[1],
2755                        id.aux_firmware_revision[0]);
2756}
2757static DEVICE_ATTR(aux_firmware_revision, S_IRUGO, aux_firmware_rev_show, NULL);
2758
2759static ssize_t guid_show(struct device *dev, struct device_attribute *attr,
2760                         char *buf)
2761{
2762        struct bmc_device *bmc = to_bmc_device(dev);
2763        bool guid_set;
2764        guid_t guid;
2765        int rv;
2766
2767        rv = bmc_get_device_id(NULL, bmc, NULL, &guid_set, &guid);
2768        if (rv)
2769                return rv;
2770        if (!guid_set)
2771                return -ENOENT;
2772
2773        return snprintf(buf, UUID_STRING_LEN + 1 + 1, "%pUl\n", &guid);
2774}
2775static DEVICE_ATTR_RO(guid);
2776
2777static struct attribute *bmc_dev_attrs[] = {
2778        &dev_attr_device_id.attr,
2779        &dev_attr_provides_device_sdrs.attr,
2780        &dev_attr_revision.attr,
2781        &dev_attr_firmware_revision.attr,
2782        &dev_attr_ipmi_version.attr,
2783        &dev_attr_additional_device_support.attr,
2784        &dev_attr_manufacturer_id.attr,
2785        &dev_attr_product_id.attr,
2786        &dev_attr_aux_firmware_revision.attr,
2787        &dev_attr_guid.attr,
2788        NULL
2789};
2790
2791static umode_t bmc_dev_attr_is_visible(struct kobject *kobj,
2792                                       struct attribute *attr, int idx)
2793{
2794        struct device *dev = kobj_to_dev(kobj);
2795        struct bmc_device *bmc = to_bmc_device(dev);
2796        umode_t mode = attr->mode;
2797        int rv;
2798
2799        if (attr == &dev_attr_aux_firmware_revision.attr) {
2800                struct ipmi_device_id id;
2801
2802                rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2803                return (!rv && id.aux_firmware_revision_set) ? mode : 0;
2804        }
2805        if (attr == &dev_attr_guid.attr) {
2806                bool guid_set;
2807
2808                rv = bmc_get_device_id(NULL, bmc, NULL, &guid_set, NULL);
2809                return (!rv && guid_set) ? mode : 0;
2810        }
2811        return mode;
2812}
2813
2814static const struct attribute_group bmc_dev_attr_group = {
2815        .attrs          = bmc_dev_attrs,
2816        .is_visible     = bmc_dev_attr_is_visible,
2817};
2818
2819static const struct attribute_group *bmc_dev_attr_groups[] = {
2820        &bmc_dev_attr_group,
2821        NULL
2822};
2823
2824static const struct device_type bmc_device_type = {
2825        .groups         = bmc_dev_attr_groups,
2826};
2827
2828static int __find_bmc_guid(struct device *dev, const void *data)
2829{
2830        const guid_t *guid = data;
2831        struct bmc_device *bmc;
2832        int rv;
2833
2834        if (dev->type != &bmc_device_type)
2835                return 0;
2836
2837        bmc = to_bmc_device(dev);
2838        rv = bmc->dyn_guid_set && guid_equal(&bmc->guid, guid);
2839        if (rv)
2840                rv = kref_get_unless_zero(&bmc->usecount);
2841        return rv;
2842}
2843
2844/*
2845 * Returns with the bmc's usecount incremented, if it is non-NULL.
2846 */
2847static struct bmc_device *ipmi_find_bmc_guid(struct device_driver *drv,
2848                                             guid_t *guid)
2849{
2850        struct device *dev;
2851        struct bmc_device *bmc = NULL;
2852
2853        dev = driver_find_device(drv, NULL, guid, __find_bmc_guid);
2854        if (dev) {
2855                bmc = to_bmc_device(dev);
2856                put_device(dev);
2857        }
2858        return bmc;
2859}
2860
2861struct prod_dev_id {
2862        unsigned int  product_id;
2863        unsigned char device_id;
2864};
2865
2866static int __find_bmc_prod_dev_id(struct device *dev, const void *data)
2867{
2868        const struct prod_dev_id *cid = data;
2869        struct bmc_device *bmc;
2870        int rv;
2871
2872        if (dev->type != &bmc_device_type)
2873                return 0;
2874
2875        bmc = to_bmc_device(dev);
2876        rv = (bmc->id.product_id == cid->product_id
2877              && bmc->id.device_id == cid->device_id);
2878        if (rv)
2879                rv = kref_get_unless_zero(&bmc->usecount);
2880        return rv;
2881}
2882
2883/*
2884 * Returns with the bmc's usecount incremented, if it is non-NULL.
2885 */
2886static struct bmc_device *ipmi_find_bmc_prod_dev_id(
2887        struct device_driver *drv,
2888        unsigned int product_id, unsigned char device_id)
2889{
2890        struct prod_dev_id id = {
2891                .product_id = product_id,
2892                .device_id = device_id,
2893        };
2894        struct device *dev;
2895        struct bmc_device *bmc = NULL;
2896
2897        dev = driver_find_device(drv, NULL, &id, __find_bmc_prod_dev_id);
2898        if (dev) {
2899                bmc = to_bmc_device(dev);
2900                put_device(dev);
2901        }
2902        return bmc;
2903}
2904
2905static DEFINE_IDA(ipmi_bmc_ida);
2906
2907static void
2908release_bmc_device(struct device *dev)
2909{
2910        kfree(to_bmc_device(dev));
2911}
2912
2913static void cleanup_bmc_work(struct work_struct *work)
2914{
2915        struct bmc_device *bmc = container_of(work, struct bmc_device,
2916                                              remove_work);
2917        int id = bmc->pdev.id; /* Unregister overwrites id */
2918
2919        platform_device_unregister(&bmc->pdev);
2920        ida_simple_remove(&ipmi_bmc_ida, id);
2921}
2922
2923static void
2924cleanup_bmc_device(struct kref *ref)
2925{
2926        struct bmc_device *bmc = container_of(ref, struct bmc_device, usecount);
2927
2928        /*
2929         * Remove the platform device in a work queue to avoid issues
2930         * with removing the device attributes while reading a device
2931         * attribute.
2932         */
2933        schedule_work(&bmc->remove_work);
2934}
2935
2936/*
2937 * Must be called with intf->bmc_reg_mutex held.
2938 */
2939static void __ipmi_bmc_unregister(struct ipmi_smi *intf)
2940{
2941        struct bmc_device *bmc = intf->bmc;
2942
2943        if (!intf->bmc_registered)
2944                return;
2945
2946        sysfs_remove_link(&intf->si_dev->kobj, "bmc");
2947        sysfs_remove_link(&bmc->pdev.dev.kobj, intf->my_dev_name);
2948        kfree(intf->my_dev_name);
2949        intf->my_dev_name = NULL;
2950
2951        mutex_lock(&bmc->dyn_mutex);
2952        list_del(&intf->bmc_link);
2953        mutex_unlock(&bmc->dyn_mutex);
2954        intf->bmc = &intf->tmp_bmc;
2955        kref_put(&bmc->usecount, cleanup_bmc_device);
2956        intf->bmc_registered = false;
2957}
2958
2959static void ipmi_bmc_unregister(struct ipmi_smi *intf)
2960{
2961        mutex_lock(&intf->bmc_reg_mutex);
2962        __ipmi_bmc_unregister(intf);
2963        mutex_unlock(&intf->bmc_reg_mutex);
2964}
2965
2966/*
2967 * Must be called with intf->bmc_reg_mutex held.
2968 */
2969static int __ipmi_bmc_register(struct ipmi_smi *intf,
2970                               struct ipmi_device_id *id,
2971                               bool guid_set, guid_t *guid, int intf_num)
2972{
2973        int               rv;
2974        struct bmc_device *bmc;
2975        struct bmc_device *old_bmc;
2976
2977        /*
2978         * platform_device_register() can cause bmc_reg_mutex to
2979         * be claimed because of the is_visible functions of
2980         * the attributes.  Eliminate possible recursion and
2981         * release the lock.
2982         */
2983        intf->in_bmc_register = true;
2984        mutex_unlock(&intf->bmc_reg_mutex);
2985
2986        /*
2987         * Try to find if there is an bmc_device struct
2988         * representing the interfaced BMC already
2989         */
2990        mutex_lock(&ipmidriver_mutex);
2991        if (guid_set)
2992                old_bmc = ipmi_find_bmc_guid(&ipmidriver.driver, guid);
2993        else
2994                old_bmc = ipmi_find_bmc_prod_dev_id(&ipmidriver.driver,
2995                                                    id->product_id,
2996                                                    id->device_id);
2997
2998        /*
2999         * If there is already an bmc_device, free the new one,
3000         * otherwise register the new BMC device
3001         */
3002        if (old_bmc) {
3003                bmc = old_bmc;
3004                /*
3005                 * Note: old_bmc already has usecount incremented by
3006                 * the BMC find functions.
3007                 */
3008                intf->bmc = old_bmc;
3009                mutex_lock(&bmc->dyn_mutex);
3010                list_add_tail(&intf->bmc_link, &bmc->intfs);
3011                mutex_unlock(&bmc->dyn_mutex);
3012
3013                dev_info(intf->si_dev,
3014                         "interfacing existing BMC (man_id: 0x%6.6x, prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
3015                         bmc->id.manufacturer_id,
3016                         bmc->id.product_id,
3017                         bmc->id.device_id);
3018        } else {
3019                bmc = kzalloc(sizeof(*bmc), GFP_KERNEL);
3020                if (!bmc) {
3021                        rv = -ENOMEM;
3022                        goto out;
3023                }
3024                INIT_LIST_HEAD(&bmc->intfs);
3025                mutex_init(&bmc->dyn_mutex);
3026                INIT_WORK(&bmc->remove_work, cleanup_bmc_work);
3027
3028                bmc->id = *id;
3029                bmc->dyn_id_set = 1;
3030                bmc->dyn_guid_set = guid_set;
3031                bmc->guid = *guid;
3032                bmc->dyn_id_expiry = jiffies + IPMI_DYN_DEV_ID_EXPIRY;
3033
3034                bmc->pdev.name = "ipmi_bmc";
3035
3036                rv = ida_simple_get(&ipmi_bmc_ida, 0, 0, GFP_KERNEL);
3037                if (rv < 0) {
3038                        kfree(bmc);
3039                        goto out;
3040                }
3041
3042                bmc->pdev.dev.driver = &ipmidriver.driver;
3043                bmc->pdev.id = rv;
3044                bmc->pdev.dev.release = release_bmc_device;
3045                bmc->pdev.dev.type = &bmc_device_type;
3046                kref_init(&bmc->usecount);
3047
3048                intf->bmc = bmc;
3049                mutex_lock(&bmc->dyn_mutex);
3050                list_add_tail(&intf->bmc_link, &bmc->intfs);
3051                mutex_unlock(&bmc->dyn_mutex);
3052
3053                rv = platform_device_register(&bmc->pdev);
3054                if (rv) {
3055                        dev_err(intf->si_dev,
3056                                "Unable to register bmc device: %d\n",
3057                                rv);
3058                        goto out_list_del;
3059                }
3060
3061                dev_info(intf->si_dev,
3062                         "Found new BMC (man_id: 0x%6.6x, prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
3063                         bmc->id.manufacturer_id,
3064                         bmc->id.product_id,
3065                         bmc->id.device_id);
3066        }
3067
3068        /*
3069         * create symlink from system interface device to bmc device
3070         * and back.
3071         */
3072        rv = sysfs_create_link(&intf->si_dev->kobj, &bmc->pdev.dev.kobj, "bmc");
3073        if (rv) {
3074                dev_err(intf->si_dev, "Unable to create bmc symlink: %d\n", rv);
3075                goto out_put_bmc;
3076        }
3077
3078        if (intf_num == -1)
3079                intf_num = intf->intf_num;
3080        intf->my_dev_name = kasprintf(GFP_KERNEL, "ipmi%d", intf_num);
3081        if (!intf->my_dev_name) {
3082                rv = -ENOMEM;
3083                dev_err(intf->si_dev, "Unable to allocate link from BMC: %d\n",
3084                        rv);
3085                goto out_unlink1;
3086        }
3087
3088        rv = sysfs_create_link(&bmc->pdev.dev.kobj, &intf->si_dev->kobj,
3089                               intf->my_dev_name);
3090        if (rv) {
3091                dev_err(intf->si_dev, "Unable to create symlink to bmc: %d\n",
3092                        rv);
3093                goto out_free_my_dev_name;
3094        }
3095
3096        intf->bmc_registered = true;
3097
3098out:
3099        mutex_unlock(&ipmidriver_mutex);
3100        mutex_lock(&intf->bmc_reg_mutex);
3101        intf->in_bmc_register = false;
3102        return rv;
3103
3104
3105out_free_my_dev_name:
3106        kfree(intf->my_dev_name);
3107        intf->my_dev_name = NULL;
3108
3109out_unlink1:
3110        sysfs_remove_link(&intf->si_dev->kobj, "bmc");
3111
3112out_put_bmc:
3113        mutex_lock(&bmc->dyn_mutex);
3114        list_del(&intf->bmc_link);
3115        mutex_unlock(&bmc->dyn_mutex);
3116        intf->bmc = &intf->tmp_bmc;
3117        kref_put(&bmc->usecount, cleanup_bmc_device);
3118        goto out;
3119
3120out_list_del:
3121        mutex_lock(&bmc->dyn_mutex);
3122        list_del(&intf->bmc_link);
3123        mutex_unlock(&bmc->dyn_mutex);
3124        intf->bmc = &intf->tmp_bmc;
3125        put_device(&bmc->pdev.dev);
3126        goto out;
3127}
3128
3129static int
3130send_guid_cmd(struct ipmi_smi *intf, int chan)
3131{
3132        struct kernel_ipmi_msg            msg;
3133        struct ipmi_system_interface_addr si;
3134
3135        si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
3136        si.channel = IPMI_BMC_CHANNEL;
3137        si.lun = 0;
3138
3139        msg.netfn = IPMI_NETFN_APP_REQUEST;
3140        msg.cmd = IPMI_GET_DEVICE_GUID_CMD;
3141        msg.data = NULL;
3142        msg.data_len = 0;
3143        return i_ipmi_request(NULL,
3144                              intf,
3145                              (struct ipmi_addr *) &si,
3146                              0,
3147                              &msg,
3148                              intf,
3149                              NULL,
3150                              NULL,
3151                              0,
3152                              intf->addrinfo[0].address,
3153                              intf->addrinfo[0].lun,
3154                              -1, 0);
3155}
3156
3157static void guid_handler(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
3158{
3159        struct bmc_device *bmc = intf->bmc;
3160
3161        if ((msg->addr.addr_type != IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
3162            || (msg->msg.netfn != IPMI_NETFN_APP_RESPONSE)
3163            || (msg->msg.cmd != IPMI_GET_DEVICE_GUID_CMD))
3164                /* Not for me */
3165                return;
3166
3167        if (msg->msg.data[0] != 0) {
3168                /* Error from getting the GUID, the BMC doesn't have one. */
3169                bmc->dyn_guid_set = 0;
3170                goto out;
3171        }
3172
3173        if (msg->msg.data_len < UUID_SIZE + 1) {
3174                bmc->dyn_guid_set = 0;
3175                dev_warn(intf->si_dev,
3176                         "The GUID response from the BMC was too short, it was %d but should have been %d.  Assuming GUID is not available.\n",
3177                         msg->msg.data_len, UUID_SIZE + 1);
3178                goto out;
3179        }
3180
3181        import_guid(&bmc->fetch_guid, msg->msg.data + 1);
3182        /*
3183         * Make sure the guid data is available before setting
3184         * dyn_guid_set.
3185         */
3186        smp_wmb();
3187        bmc->dyn_guid_set = 1;
3188 out:
3189        wake_up(&intf->waitq);
3190}
3191
3192static void __get_guid(struct ipmi_smi *intf)
3193{
3194        int rv;
3195        struct bmc_device *bmc = intf->bmc;
3196
3197        bmc->dyn_guid_set = 2;
3198        intf->null_user_handler = guid_handler;
3199        rv = send_guid_cmd(intf, 0);
3200        if (rv)
3201                /* Send failed, no GUID available. */
3202                bmc->dyn_guid_set = 0;
3203        else
3204                wait_event(intf->waitq, bmc->dyn_guid_set != 2);
3205
3206        /* dyn_guid_set makes the guid data available. */
3207        smp_rmb();
3208
3209        intf->null_user_handler = NULL;
3210}
3211
3212static int
3213send_channel_info_cmd(struct ipmi_smi *intf, int chan)
3214{
3215        struct kernel_ipmi_msg            msg;
3216        unsigned char                     data[1];
3217        struct ipmi_system_interface_addr si;
3218
3219        si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
3220        si.channel = IPMI_BMC_CHANNEL;
3221        si.lun = 0;
3222
3223        msg.netfn = IPMI_NETFN_APP_REQUEST;
3224        msg.cmd = IPMI_GET_CHANNEL_INFO_CMD;
3225        msg.data = data;
3226        msg.data_len = 1;
3227        data[0] = chan;
3228        return i_ipmi_request(NULL,
3229                              intf,
3230                              (struct ipmi_addr *) &si,
3231                              0,
3232                              &msg,
3233                              intf,
3234                              NULL,
3235                              NULL,
3236                              0,
3237                              intf->addrinfo[0].address,
3238                              intf->addrinfo[0].lun,
3239                              -1, 0);
3240}
3241
3242static void
3243channel_handler(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
3244{
3245        int rv = 0;
3246        int ch;
3247        unsigned int set = intf->curr_working_cset;
3248        struct ipmi_channel *chans;
3249
3250        if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
3251            && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE)
3252            && (msg->msg.cmd == IPMI_GET_CHANNEL_INFO_CMD)) {
3253                /* It's the one we want */
3254                if (msg->msg.data[0] != 0) {
3255                        /* Got an error from the channel, just go on. */
3256                        if (msg->msg.data[0] == IPMI_INVALID_COMMAND_ERR) {
3257                                /*
3258                                 * If the MC does not support this
3259                                 * command, that is legal.  We just
3260                                 * assume it has one IPMB at channel
3261                                 * zero.
3262                                 */
3263                                intf->wchannels[set].c[0].medium
3264                                        = IPMI_CHANNEL_MEDIUM_IPMB;
3265                                intf->wchannels[set].c[0].protocol
3266                                        = IPMI_CHANNEL_PROTOCOL_IPMB;
3267
3268                                intf->channel_list = intf->wchannels + set;
3269                                intf->channels_ready = true;
3270                                wake_up(&intf->waitq);
3271                                goto out;
3272                        }
3273                        goto next_channel;
3274                }
3275                if (msg->msg.data_len < 4) {
3276                        /* Message not big enough, just go on. */
3277                        goto next_channel;
3278                }
3279                ch = intf->curr_channel;
3280                chans = intf->wchannels[set].c;
3281                chans[ch].medium = msg->msg.data[2] & 0x7f;
3282                chans[ch].protocol = msg->msg.data[3] & 0x1f;
3283
3284 next_channel:
3285                intf->curr_channel++;
3286                if (intf->curr_channel >= IPMI_MAX_CHANNELS) {
3287                        intf->channel_list = intf->wchannels + set;
3288                        intf->channels_ready = true;
3289                        wake_up(&intf->waitq);
3290                } else {
3291                        intf->channel_list = intf->wchannels + set;
3292                        intf->channels_ready = true;
3293                        rv = send_channel_info_cmd(intf, intf->curr_channel);
3294                }
3295
3296                if (rv) {
3297                        /* Got an error somehow, just give up. */
3298                        dev_warn(intf->si_dev,
3299                                 "Error sending channel information for channel %d: %d\n",
3300                                 intf->curr_channel, rv);
3301
3302                        intf->channel_list = intf->wchannels + set;
3303                        intf->channels_ready = true;
3304                        wake_up(&intf->waitq);
3305                }
3306        }
3307 out:
3308        return;
3309}
3310
3311/*
3312 * Must be holding intf->bmc_reg_mutex to call this.
3313 */
3314static int __scan_channels(struct ipmi_smi *intf, struct ipmi_device_id *id)
3315{
3316        int rv;
3317
3318        if (ipmi_version_major(id) > 1
3319                        || (ipmi_version_major(id) == 1
3320                            && ipmi_version_minor(id) >= 5)) {
3321                unsigned int set;
3322
3323                /*
3324                 * Start scanning the channels to see what is
3325                 * available.
3326                 */
3327                set = !intf->curr_working_cset;
3328                intf->curr_working_cset = set;
3329                memset(&intf->wchannels[set], 0,
3330                       sizeof(struct ipmi_channel_set));
3331
3332                intf->null_user_handler = channel_handler;
3333                intf->curr_channel = 0;
3334                rv = send_channel_info_cmd(intf, 0);
3335                if (rv) {
3336                        dev_warn(intf->si_dev,
3337                                 "Error sending channel information for channel 0, %d\n",
3338                                 rv);
3339                        intf->null_user_handler = NULL;
3340                        return -EIO;
3341                }
3342
3343                /* Wait for the channel info to be read. */
3344                wait_event(intf->waitq, intf->channels_ready);
3345                intf->null_user_handler = NULL;
3346        } else {
3347                unsigned int set = intf->curr_working_cset;
3348
3349                /* Assume a single IPMB channel at zero. */
3350                intf->wchannels[set].c[0].medium = IPMI_CHANNEL_MEDIUM_IPMB;
3351                intf->wchannels[set].c[0].protocol = IPMI_CHANNEL_PROTOCOL_IPMB;
3352                intf->channel_list = intf->wchannels + set;
3353                intf->channels_ready = true;
3354        }
3355
3356        return 0;
3357}
3358
3359static void ipmi_poll(struct ipmi_smi *intf)
3360{
3361        if (intf->handlers->poll)
3362                intf->handlers->poll(intf->send_info);
3363        /* In case something came in */
3364        handle_new_recv_msgs(intf);
3365}
3366
3367void ipmi_poll_interface(struct ipmi_user *user)
3368{
3369        ipmi_poll(user->intf);
3370}
3371EXPORT_SYMBOL(ipmi_poll_interface);
3372
3373static void redo_bmc_reg(struct work_struct *work)
3374{
3375        struct ipmi_smi *intf = container_of(work, struct ipmi_smi,
3376                                             bmc_reg_work);
3377
3378        if (!intf->in_shutdown)
3379                bmc_get_device_id(intf, NULL, NULL, NULL, NULL);
3380
3381        kref_put(&intf->refcount, intf_free);
3382}
3383
3384int ipmi_add_smi(struct module         *owner,
3385                 const struct ipmi_smi_handlers *handlers,
3386                 void                  *send_info,
3387                 struct device         *si_dev,
3388                 unsigned char         slave_addr)
3389{
3390        int              i, j;
3391        int              rv;
3392        struct ipmi_smi *intf, *tintf;
3393        struct list_head *link;
3394        struct ipmi_device_id id;
3395
3396        /*
3397         * Make sure the driver is actually initialized, this handles
3398         * problems with initialization order.
3399         */
3400        rv = ipmi_init_msghandler();
3401        if (rv)
3402                return rv;
3403
3404        intf = kzalloc(sizeof(*intf), GFP_KERNEL);
3405        if (!intf)
3406                return -ENOMEM;
3407
3408        rv = init_srcu_struct(&intf->users_srcu);
3409        if (rv) {
3410                kfree(intf);
3411                return rv;
3412        }
3413
3414        intf->owner = owner;
3415        intf->bmc = &intf->tmp_bmc;
3416        INIT_LIST_HEAD(&intf->bmc->intfs);
3417        mutex_init(&intf->bmc->dyn_mutex);
3418        INIT_LIST_HEAD(&intf->bmc_link);
3419        mutex_init(&intf->bmc_reg_mutex);
3420        intf->intf_num = -1; /* Mark it invalid for now. */
3421        kref_init(&intf->refcount);
3422        INIT_WORK(&intf->bmc_reg_work, redo_bmc_reg);
3423        intf->si_dev = si_dev;
3424        for (j = 0; j < IPMI_MAX_CHANNELS; j++) {
3425                intf->addrinfo[j].address = IPMI_BMC_SLAVE_ADDR;
3426                intf->addrinfo[j].lun = 2;
3427        }
3428        if (slave_addr != 0)
3429                intf->addrinfo[0].address = slave_addr;
3430        INIT_LIST_HEAD(&intf->users);
3431        intf->handlers = handlers;
3432        intf->send_info = send_info;
3433        spin_lock_init(&intf->seq_lock);
3434        for (j = 0; j < IPMI_IPMB_NUM_SEQ; j++) {
3435                intf->seq_table[j].inuse = 0;
3436                intf->seq_table[j].seqid = 0;
3437        }
3438        intf->curr_seq = 0;
3439        spin_lock_init(&intf->waiting_rcv_msgs_lock);
3440        INIT_LIST_HEAD(&intf->waiting_rcv_msgs);
3441        tasklet_setup(&intf->recv_tasklet,
3442                     smi_recv_tasklet);
3443        atomic_set(&intf->watchdog_pretimeouts_to_deliver, 0);
3444        spin_lock_init(&intf->xmit_msgs_lock);
3445        INIT_LIST_HEAD(&intf->xmit_msgs);
3446        INIT_LIST_HEAD(&intf->hp_xmit_msgs);
3447        spin_lock_init(&intf->events_lock);
3448        spin_lock_init(&intf->watch_lock);
3449        atomic_set(&intf->event_waiters, 0);
3450        intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
3451        INIT_LIST_HEAD(&intf->waiting_events);
3452        intf->waiting_events_count = 0;
3453        mutex_init(&intf->cmd_rcvrs_mutex);
3454        spin_lock_init(&intf->maintenance_mode_lock);
3455        INIT_LIST_HEAD(&intf->cmd_rcvrs);
3456        init_waitqueue_head(&intf->waitq);
3457        for (i = 0; i < IPMI_NUM_STATS; i++)
3458                atomic_set(&intf->stats[i], 0);
3459
3460        mutex_lock(&ipmi_interfaces_mutex);
3461        /* Look for a hole in the numbers. */
3462        i = 0;
3463        link = &ipmi_interfaces;
3464        list_for_each_entry_rcu(tintf, &ipmi_interfaces, link,
3465                                ipmi_interfaces_mutex_held()) {
3466                if (tintf->intf_num != i) {
3467                        link = &tintf->link;
3468                        break;
3469                }
3470                i++;
3471        }
3472        /* Add the new interface in numeric order. */
3473        if (i == 0)
3474                list_add_rcu(&intf->link, &ipmi_interfaces);
3475        else
3476                list_add_tail_rcu(&intf->link, link);
3477
3478        rv = handlers->start_processing(send_info, intf);
3479        if (rv)
3480                goto out_err;
3481
3482        rv = __bmc_get_device_id(intf, NULL, &id, NULL, NULL, i);
3483        if (rv) {
3484                dev_err(si_dev, "Unable to get the device id: %d\n", rv);
3485                goto out_err_started;
3486        }
3487
3488        mutex_lock(&intf->bmc_reg_mutex);
3489        rv = __scan_channels(intf, &id);
3490        mutex_unlock(&intf->bmc_reg_mutex);
3491        if (rv)
3492                goto out_err_bmc_reg;
3493
3494        /*
3495         * Keep memory order straight for RCU readers.  Make
3496         * sure everything else is committed to memory before
3497         * setting intf_num to mark the interface valid.
3498         */
3499        smp_wmb();
3500        intf->intf_num = i;
3501        mutex_unlock(&ipmi_interfaces_mutex);
3502
3503        /* After this point the interface is legal to use. */
3504        call_smi_watchers(i, intf->si_dev);
3505
3506        return 0;
3507
3508 out_err_bmc_reg:
3509        ipmi_bmc_unregister(intf);
3510 out_err_started:
3511        if (intf->handlers->shutdown)
3512                intf->handlers->shutdown(intf->send_info);
3513 out_err:
3514        list_del_rcu(&intf->link);
3515        mutex_unlock(&ipmi_interfaces_mutex);
3516        synchronize_srcu(&ipmi_interfaces_srcu);
3517        cleanup_srcu_struct(&intf->users_srcu);
3518        kref_put(&intf->refcount, intf_free);
3519
3520        return rv;
3521}
3522EXPORT_SYMBOL(ipmi_add_smi);
3523
3524static void deliver_smi_err_response(struct ipmi_smi *intf,
3525                                     struct ipmi_smi_msg *msg,
3526                                     unsigned char err)
3527{
3528        msg->rsp[0] = msg->data[0] | 4;
3529        msg->rsp[1] = msg->data[1];
3530        msg->rsp[2] = err;
3531        msg->rsp_size = 3;
3532        /* It's an error, so it will never requeue, no need to check return. */
3533        handle_one_recv_msg(intf, msg);
3534}
3535
3536static void cleanup_smi_msgs(struct ipmi_smi *intf)
3537{
3538        int              i;
3539        struct seq_table *ent;
3540        struct ipmi_smi_msg *msg;
3541        struct list_head *entry;
3542        struct list_head tmplist;
3543
3544        /* Clear out our transmit queues and hold the messages. */
3545        INIT_LIST_HEAD(&tmplist);
3546        list_splice_tail(&intf->hp_xmit_msgs, &tmplist);
3547        list_splice_tail(&intf->xmit_msgs, &tmplist);
3548
3549        /* Current message first, to preserve order */
3550        while (intf->curr_msg && !list_empty(&intf->waiting_rcv_msgs)) {
3551                /* Wait for the message to clear out. */
3552                schedule_timeout(1);
3553        }
3554
3555        /* No need for locks, the interface is down. */
3556
3557        /*
3558         * Return errors for all pending messages in queue and in the
3559         * tables waiting for remote responses.
3560         */
3561        while (!list_empty(&tmplist)) {
3562                entry = tmplist.next;
3563                list_del(entry);
3564                msg = list_entry(entry, struct ipmi_smi_msg, link);
3565                deliver_smi_err_response(intf, msg, IPMI_ERR_UNSPECIFIED);
3566        }
3567
3568        for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
3569                ent = &intf->seq_table[i];
3570                if (!ent->inuse)
3571                        continue;
3572                deliver_err_response(intf, ent->recv_msg, IPMI_ERR_UNSPECIFIED);
3573        }
3574}
3575
3576void ipmi_unregister_smi(struct ipmi_smi *intf)
3577{
3578        struct ipmi_smi_watcher *w;
3579        int intf_num = intf->intf_num, index;
3580
3581        mutex_lock(&ipmi_interfaces_mutex);
3582        intf->intf_num = -1;
3583        intf->in_shutdown = true;
3584        list_del_rcu(&intf->link);
3585        mutex_unlock(&ipmi_interfaces_mutex);
3586        synchronize_srcu(&ipmi_interfaces_srcu);
3587
3588        /* At this point no users can be added to the interface. */
3589
3590        /*
3591         * Call all the watcher interfaces to tell them that
3592         * an interface is going away.
3593         */
3594        mutex_lock(&smi_watchers_mutex);
3595        list_for_each_entry(w, &smi_watchers, link)
3596                w->smi_gone(intf_num);
3597        mutex_unlock(&smi_watchers_mutex);
3598
3599        index = srcu_read_lock(&intf->users_srcu);
3600        while (!list_empty(&intf->users)) {
3601                struct ipmi_user *user =
3602                        container_of(list_next_rcu(&intf->users),
3603                                     struct ipmi_user, link);
3604
3605                _ipmi_destroy_user(user);
3606        }
3607        srcu_read_unlock(&intf->users_srcu, index);
3608
3609        if (intf->handlers->shutdown)
3610                intf->handlers->shutdown(intf->send_info);
3611
3612        cleanup_smi_msgs(intf);
3613
3614        ipmi_bmc_unregister(intf);
3615
3616        cleanup_srcu_struct(&intf->users_srcu);
3617        kref_put(&intf->refcount, intf_free);
3618}
3619EXPORT_SYMBOL(ipmi_unregister_smi);
3620
3621static int handle_ipmb_get_msg_rsp(struct ipmi_smi *intf,
3622                                   struct ipmi_smi_msg *msg)
3623{
3624        struct ipmi_ipmb_addr ipmb_addr;
3625        struct ipmi_recv_msg  *recv_msg;
3626
3627        /*
3628         * This is 11, not 10, because the response must contain a
3629         * completion code.
3630         */
3631        if (msg->rsp_size < 11) {
3632                /* Message not big enough, just ignore it. */
3633                ipmi_inc_stat(intf, invalid_ipmb_responses);
3634                return 0;
3635        }
3636
3637        if (msg->rsp[2] != 0) {
3638                /* An error getting the response, just ignore it. */
3639                return 0;
3640        }
3641
3642        ipmb_addr.addr_type = IPMI_IPMB_ADDR_TYPE;
3643        ipmb_addr.slave_addr = msg->rsp[6];
3644        ipmb_addr.channel = msg->rsp[3] & 0x0f;
3645        ipmb_addr.lun = msg->rsp[7] & 3;
3646
3647        /*
3648         * It's a response from a remote entity.  Look up the sequence
3649         * number and handle the response.
3650         */
3651        if (intf_find_seq(intf,
3652                          msg->rsp[7] >> 2,
3653                          msg->rsp[3] & 0x0f,
3654                          msg->rsp[8],
3655                          (msg->rsp[4] >> 2) & (~1),
3656                          (struct ipmi_addr *) &ipmb_addr,
3657                          &recv_msg)) {
3658                /*
3659                 * We were unable to find the sequence number,
3660                 * so just nuke the message.
3661                 */
3662                ipmi_inc_stat(intf, unhandled_ipmb_responses);
3663                return 0;
3664        }
3665
3666        memcpy(recv_msg->msg_data, &msg->rsp[9], msg->rsp_size - 9);
3667        /*
3668         * The other fields matched, so no need to set them, except
3669         * for netfn, which needs to be the response that was
3670         * returned, not the request value.
3671         */
3672        recv_msg->msg.netfn = msg->rsp[4] >> 2;
3673        recv_msg->msg.data = recv_msg->msg_data;
3674        recv_msg->msg.data_len = msg->rsp_size - 10;
3675        recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
3676        if (deliver_response(intf, recv_msg))
3677                ipmi_inc_stat(intf, unhandled_ipmb_responses);
3678        else
3679                ipmi_inc_stat(intf, handled_ipmb_responses);
3680
3681        return 0;
3682}
3683
3684static int handle_ipmb_get_msg_cmd(struct ipmi_smi *intf,
3685                                   struct ipmi_smi_msg *msg)
3686{
3687        struct cmd_rcvr          *rcvr;
3688        int                      rv = 0;
3689        unsigned char            netfn;
3690        unsigned char            cmd;
3691        unsigned char            chan;
3692        struct ipmi_user         *user = NULL;
3693        struct ipmi_ipmb_addr    *ipmb_addr;
3694        struct ipmi_recv_msg     *recv_msg;
3695
3696        if (msg->rsp_size < 10) {
3697                /* Message not big enough, just ignore it. */
3698                ipmi_inc_stat(intf, invalid_commands);
3699                return 0;
3700        }
3701
3702        if (msg->rsp[2] != 0) {
3703                /* An error getting the response, just ignore it. */
3704                return 0;
3705        }
3706
3707        netfn = msg->rsp[4] >> 2;
3708        cmd = msg->rsp[8];
3709        chan = msg->rsp[3] & 0xf;
3710
3711        rcu_read_lock();
3712        rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
3713        if (rcvr) {
3714                user = rcvr->user;
3715                kref_get(&user->refcount);
3716        } else
3717                user = NULL;
3718        rcu_read_unlock();
3719
3720        if (user == NULL) {
3721                /* We didn't find a user, deliver an error response. */
3722                ipmi_inc_stat(intf, unhandled_commands);
3723
3724                msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
3725                msg->data[1] = IPMI_SEND_MSG_CMD;
3726                msg->data[2] = msg->rsp[3];
3727                msg->data[3] = msg->rsp[6];
3728                msg->data[4] = ((netfn + 1) << 2) | (msg->rsp[7] & 0x3);
3729                msg->data[5] = ipmb_checksum(&msg->data[3], 2);
3730                msg->data[6] = intf->addrinfo[msg->rsp[3] & 0xf].address;
3731                /* rqseq/lun */
3732                msg->data[7] = (msg->rsp[7] & 0xfc) | (msg->rsp[4] & 0x3);
3733                msg->data[8] = msg->rsp[8]; /* cmd */
3734                msg->data[9] = IPMI_INVALID_CMD_COMPLETION_CODE;
3735                msg->data[10] = ipmb_checksum(&msg->data[6], 4);
3736                msg->data_size = 11;
3737
3738                pr_debug("Invalid command: %*ph\n", msg->data_size, msg->data);
3739
3740                rcu_read_lock();
3741                if (!intf->in_shutdown) {
3742                        smi_send(intf, intf->handlers, msg, 0);
3743                        /*
3744                         * We used the message, so return the value
3745                         * that causes it to not be freed or
3746                         * queued.
3747                         */
3748                        rv = -1;
3749                }
3750                rcu_read_unlock();
3751        } else {
3752                recv_msg = ipmi_alloc_recv_msg();
3753                if (!recv_msg) {
3754                        /*
3755                         * We couldn't allocate memory for the
3756                         * message, so requeue it for handling
3757                         * later.
3758                         */
3759                        rv = 1;
3760                        kref_put(&user->refcount, free_user);
3761                } else {
3762                        /* Extract the source address from the data. */
3763                        ipmb_addr = (struct ipmi_ipmb_addr *) &recv_msg->addr;
3764                        ipmb_addr->addr_type = IPMI_IPMB_ADDR_TYPE;
3765                        ipmb_addr->slave_addr = msg->rsp[6];
3766                        ipmb_addr->lun = msg->rsp[7] & 3;
3767                        ipmb_addr->channel = msg->rsp[3] & 0xf;
3768
3769                        /*
3770                         * Extract the rest of the message information
3771                         * from the IPMB header.
3772                         */
3773                        recv_msg->user = user;
3774                        recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
3775                        recv_msg->msgid = msg->rsp[7] >> 2;
3776                        recv_msg->msg.netfn = msg->rsp[4] >> 2;
3777                        recv_msg->msg.cmd = msg->rsp[8];
3778                        recv_msg->msg.data = recv_msg->msg_data;
3779
3780                        /*
3781                         * We chop off 10, not 9 bytes because the checksum
3782                         * at the end also needs to be removed.
3783                         */
3784                        recv_msg->msg.data_len = msg->rsp_size - 10;
3785                        memcpy(recv_msg->msg_data, &msg->rsp[9],
3786                               msg->rsp_size - 10);
3787                        if (deliver_response(intf, recv_msg))
3788                                ipmi_inc_stat(intf, unhandled_commands);
3789                        else
3790                                ipmi_inc_stat(intf, handled_commands);
3791                }
3792        }
3793
3794        return rv;
3795}
3796
3797static int handle_lan_get_msg_rsp(struct ipmi_smi *intf,
3798                                  struct ipmi_smi_msg *msg)
3799{
3800        struct ipmi_lan_addr  lan_addr;
3801        struct ipmi_recv_msg  *recv_msg;
3802
3803
3804        /*
3805         * This is 13, not 12, because the response must contain a
3806         * completion code.
3807         */
3808        if (msg->rsp_size < 13) {
3809                /* Message not big enough, just ignore it. */
3810                ipmi_inc_stat(intf, invalid_lan_responses);
3811                return 0;
3812        }
3813
3814        if (msg->rsp[2] != 0) {
3815                /* An error getting the response, just ignore it. */
3816                return 0;
3817        }
3818
3819        lan_addr.addr_type = IPMI_LAN_ADDR_TYPE;
3820        lan_addr.session_handle = msg->rsp[4];
3821        lan_addr.remote_SWID = msg->rsp[8];
3822        lan_addr.local_SWID = msg->rsp[5];
3823        lan_addr.channel = msg->rsp[3] & 0x0f;
3824        lan_addr.privilege = msg->rsp[3] >> 4;
3825        lan_addr.lun = msg->rsp[9] & 3;
3826
3827        /*
3828         * It's a response from a remote entity.  Look up the sequence
3829         * number and handle the response.
3830         */
3831        if (intf_find_seq(intf,
3832                          msg->rsp[9] >> 2,
3833                          msg->rsp[3] & 0x0f,
3834                          msg->rsp[10],
3835                          (msg->rsp[6] >> 2) & (~1),
3836                          (struct ipmi_addr *) &lan_addr,
3837                          &recv_msg)) {
3838                /*
3839                 * We were unable to find the sequence number,
3840                 * so just nuke the message.
3841                 */
3842                ipmi_inc_stat(intf, unhandled_lan_responses);
3843                return 0;
3844        }
3845
3846        memcpy(recv_msg->msg_data, &msg->rsp[11], msg->rsp_size - 11);
3847        /*
3848         * The other fields matched, so no need to set them, except
3849         * for netfn, which needs to be the response that was
3850         * returned, not the request value.
3851         */
3852        recv_msg->msg.netfn = msg->rsp[6] >> 2;
3853        recv_msg->msg.data = recv_msg->msg_data;
3854        recv_msg->msg.data_len = msg->rsp_size - 12;
3855        recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
3856        if (deliver_response(intf, recv_msg))
3857                ipmi_inc_stat(intf, unhandled_lan_responses);
3858        else
3859                ipmi_inc_stat(intf, handled_lan_responses);
3860
3861        return 0;
3862}
3863
3864static int handle_lan_get_msg_cmd(struct ipmi_smi *intf,
3865                                  struct ipmi_smi_msg *msg)
3866{
3867        struct cmd_rcvr          *rcvr;
3868        int                      rv = 0;
3869        unsigned char            netfn;
3870        unsigned char            cmd;
3871        unsigned char            chan;
3872        struct ipmi_user         *user = NULL;
3873        struct ipmi_lan_addr     *lan_addr;
3874        struct ipmi_recv_msg     *recv_msg;
3875
3876        if (msg->rsp_size < 12) {
3877                /* Message not big enough, just ignore it. */
3878                ipmi_inc_stat(intf, invalid_commands);
3879                return 0;
3880        }
3881
3882        if (msg->rsp[2] != 0) {
3883                /* An error getting the response, just ignore it. */
3884                return 0;
3885        }
3886
3887        netfn = msg->rsp[6] >> 2;
3888        cmd = msg->rsp[10];
3889        chan = msg->rsp[3] & 0xf;
3890
3891        rcu_read_lock();
3892        rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
3893        if (rcvr) {
3894                user = rcvr->user;
3895                kref_get(&user->refcount);
3896        } else
3897                user = NULL;
3898        rcu_read_unlock();
3899
3900        if (user == NULL) {
3901                /* We didn't find a user, just give up. */
3902                ipmi_inc_stat(intf, unhandled_commands);
3903
3904                /*
3905                 * Don't do anything with these messages, just allow
3906                 * them to be freed.
3907                 */
3908                rv = 0;
3909        } else {
3910                recv_msg = ipmi_alloc_recv_msg();
3911                if (!recv_msg) {
3912                        /*
3913                         * We couldn't allocate memory for the
3914                         * message, so requeue it for handling later.
3915                         */
3916                        rv = 1;
3917                        kref_put(&user->refcount, free_user);
3918                } else {
3919                        /* Extract the source address from the data. */
3920                        lan_addr = (struct ipmi_lan_addr *) &recv_msg->addr;
3921                        lan_addr->addr_type = IPMI_LAN_ADDR_TYPE;
3922                        lan_addr->session_handle = msg->rsp[4];
3923                        lan_addr->remote_SWID = msg->rsp[8];
3924                        lan_addr->local_SWID = msg->rsp[5];
3925                        lan_addr->lun = msg->rsp[9] & 3;
3926                        lan_addr->channel = msg->rsp[3] & 0xf;
3927                        lan_addr->privilege = msg->rsp[3] >> 4;
3928
3929                        /*
3930                         * Extract the rest of the message information
3931                         * from the IPMB header.
3932                         */
3933                        recv_msg->user = user;
3934                        recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
3935                        recv_msg->msgid = msg->rsp[9] >> 2;
3936                        recv_msg->msg.netfn = msg->rsp[6] >> 2;
3937                        recv_msg->msg.cmd = msg->rsp[10];
3938                        recv_msg->msg.data = recv_msg->msg_data;
3939
3940                        /*
3941                         * We chop off 12, not 11 bytes because the checksum
3942                         * at the end also needs to be removed.
3943                         */
3944                        recv_msg->msg.data_len = msg->rsp_size - 12;
3945                        memcpy(recv_msg->msg_data, &msg->rsp[11],
3946                               msg->rsp_size - 12);
3947                        if (deliver_response(intf, recv_msg))
3948                                ipmi_inc_stat(intf, unhandled_commands);
3949                        else
3950                                ipmi_inc_stat(intf, handled_commands);
3951                }
3952        }
3953
3954        return rv;
3955}
3956
3957/*
3958 * This routine will handle "Get Message" command responses with
3959 * channels that use an OEM Medium. The message format belongs to
3960 * the OEM.  See IPMI 2.0 specification, Chapter 6 and
3961 * Chapter 22, sections 22.6 and 22.24 for more details.
3962 */
3963static int handle_oem_get_msg_cmd(struct ipmi_smi *intf,
3964                                  struct ipmi_smi_msg *msg)
3965{
3966        struct cmd_rcvr       *rcvr;
3967        int                   rv = 0;
3968        unsigned char         netfn;
3969        unsigned char         cmd;
3970        unsigned char         chan;
3971        struct ipmi_user *user = NULL;
3972        struct ipmi_system_interface_addr *smi_addr;
3973        struct ipmi_recv_msg  *recv_msg;
3974
3975        /*
3976         * We expect the OEM SW to perform error checking
3977         * so we just do some basic sanity checks
3978         */
3979        if (msg->rsp_size < 4) {
3980                /* Message not big enough, just ignore it. */
3981                ipmi_inc_stat(intf, invalid_commands);
3982                return 0;
3983        }
3984
3985        if (msg->rsp[2] != 0) {
3986                /* An error getting the response, just ignore it. */
3987                return 0;
3988        }
3989
3990        /*
3991         * This is an OEM Message so the OEM needs to know how
3992         * handle the message. We do no interpretation.
3993         */
3994        netfn = msg->rsp[0] >> 2;
3995        cmd = msg->rsp[1];
3996        chan = msg->rsp[3] & 0xf;
3997
3998        rcu_read_lock();
3999        rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
4000        if (rcvr) {
4001                user = rcvr->user;
4002                kref_get(&user->refcount);
4003        } else
4004                user = NULL;
4005        rcu_read_unlock();
4006
4007        if (user == NULL) {
4008                /* We didn't find a user, just give up. */
4009                ipmi_inc_stat(intf, unhandled_commands);
4010
4011                /*
4012                 * Don't do anything with these messages, just allow
4013                 * them to be freed.
4014                 */
4015
4016                rv = 0;
4017        } else {
4018                recv_msg = ipmi_alloc_recv_msg();
4019                if (!recv_msg) {
4020                        /*
4021                         * We couldn't allocate memory for the
4022                         * message, so requeue it for handling
4023                         * later.
4024                         */
4025                        rv = 1;
4026                        kref_put(&user->refcount, free_user);
4027                } else {
4028                        /*
4029                         * OEM Messages are expected to be delivered via
4030                         * the system interface to SMS software.  We might
4031                         * need to visit this again depending on OEM
4032                         * requirements
4033                         */
4034                        smi_addr = ((struct ipmi_system_interface_addr *)
4035                                    &recv_msg->addr);
4036                        smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
4037                        smi_addr->channel = IPMI_BMC_CHANNEL;
4038                        smi_addr->lun = msg->rsp[0] & 3;
4039
4040                        recv_msg->user = user;
4041                        recv_msg->user_msg_data = NULL;
4042                        recv_msg->recv_type = IPMI_OEM_RECV_TYPE;
4043                        recv_msg->msg.netfn = msg->rsp[0] >> 2;
4044                        recv_msg->msg.cmd = msg->rsp[1];
4045                        recv_msg->msg.data = recv_msg->msg_data;
4046
4047                        /*
4048                         * The message starts at byte 4 which follows the
4049                         * the Channel Byte in the "GET MESSAGE" command
4050                         */
4051                        recv_msg->msg.data_len = msg->rsp_size - 4;
4052                        memcpy(recv_msg->msg_data, &msg->rsp[4],
4053                               msg->rsp_size - 4);
4054                        if (deliver_response(intf, recv_msg))
4055                                ipmi_inc_stat(intf, unhandled_commands);
4056                        else
4057                                ipmi_inc_stat(intf, handled_commands);
4058                }
4059        }
4060
4061        return rv;
4062}
4063
4064static void copy_event_into_recv_msg(struct ipmi_recv_msg *recv_msg,
4065                                     struct ipmi_smi_msg  *msg)
4066{
4067        struct ipmi_system_interface_addr *smi_addr;
4068
4069        recv_msg->msgid = 0;
4070        smi_addr = (struct ipmi_system_interface_addr *) &recv_msg->addr;
4071        smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
4072        smi_addr->channel = IPMI_BMC_CHANNEL;
4073        smi_addr->lun = msg->rsp[0] & 3;
4074        recv_msg->recv_type = IPMI_ASYNC_EVENT_RECV_TYPE;
4075        recv_msg->msg.netfn = msg->rsp[0] >> 2;
4076        recv_msg->msg.cmd = msg->rsp[1];
4077        memcpy(recv_msg->msg_data, &msg->rsp[3], msg->rsp_size - 3);
4078        recv_msg->msg.data = recv_msg->msg_data;
4079        recv_msg->msg.data_len = msg->rsp_size - 3;
4080}
4081
4082static int handle_read_event_rsp(struct ipmi_smi *intf,
4083                                 struct ipmi_smi_msg *msg)
4084{
4085        struct ipmi_recv_msg *recv_msg, *recv_msg2;
4086        struct list_head     msgs;
4087        struct ipmi_user     *user;
4088        int rv = 0, deliver_count = 0, index;
4089        unsigned long        flags;
4090
4091        if (msg->rsp_size < 19) {
4092                /* Message is too small to be an IPMB event. */
4093                ipmi_inc_stat(intf, invalid_events);
4094                return 0;
4095        }
4096
4097        if (msg->rsp[2] != 0) {
4098                /* An error getting the event, just ignore it. */
4099                return 0;
4100        }
4101
4102        INIT_LIST_HEAD(&msgs);
4103
4104        spin_lock_irqsave(&intf->events_lock, flags);
4105
4106        ipmi_inc_stat(intf, events);
4107
4108        /*
4109         * Allocate and fill in one message for every user that is
4110         * getting events.
4111         */
4112        index = srcu_read_lock(&intf->users_srcu);
4113        list_for_each_entry_rcu(user, &intf->users, link) {
4114                if (!user->gets_events)
4115                        continue;
4116
4117                recv_msg = ipmi_alloc_recv_msg();
4118                if (!recv_msg) {
4119                        rcu_read_unlock();
4120                        list_for_each_entry_safe(recv_msg, recv_msg2, &msgs,
4121                                                 link) {
4122                                list_del(&recv_msg->link);
4123                                ipmi_free_recv_msg(recv_msg);
4124                        }
4125                        /*
4126                         * We couldn't allocate memory for the
4127                         * message, so requeue it for handling
4128                         * later.
4129                         */
4130                        rv = 1;
4131                        goto out;
4132                }
4133
4134                deliver_count++;
4135
4136                copy_event_into_recv_msg(recv_msg, msg);
4137                recv_msg->user = user;
4138                kref_get(&user->refcount);
4139                list_add_tail(&recv_msg->link, &msgs);
4140        }
4141        srcu_read_unlock(&intf->users_srcu, index);
4142
4143        if (deliver_count) {
4144                /* Now deliver all the messages. */
4145                list_for_each_entry_safe(recv_msg, recv_msg2, &msgs, link) {
4146                        list_del(&recv_msg->link);
4147                        deliver_local_response(intf, recv_msg);
4148                }
4149        } else if (intf->waiting_events_count < MAX_EVENTS_IN_QUEUE) {
4150                /*
4151                 * No one to receive the message, put it in queue if there's
4152                 * not already too many things in the queue.
4153                 */
4154                recv_msg = ipmi_alloc_recv_msg();
4155                if (!recv_msg) {
4156                        /*
4157                         * We couldn't allocate memory for the
4158                         * message, so requeue it for handling
4159                         * later.
4160                         */
4161                        rv = 1;
4162                        goto out;
4163                }
4164
4165                copy_event_into_recv_msg(recv_msg, msg);
4166                list_add_tail(&recv_msg->link, &intf->waiting_events);
4167                intf->waiting_events_count++;
4168        } else if (!intf->event_msg_printed) {
4169                /*
4170                 * There's too many things in the queue, discard this
4171                 * message.
4172                 */
4173                dev_warn(intf->si_dev,
4174                         "Event queue full, discarding incoming events\n");
4175                intf->event_msg_printed = 1;
4176        }
4177
4178 out:
4179        spin_unlock_irqrestore(&intf->events_lock, flags);
4180
4181        return rv;
4182}
4183
4184static int handle_bmc_rsp(struct ipmi_smi *intf,
4185                          struct ipmi_smi_msg *msg)
4186{
4187        struct ipmi_recv_msg *recv_msg;
4188        struct ipmi_system_interface_addr *smi_addr;
4189
4190        recv_msg = (struct ipmi_recv_msg *) msg->user_data;
4191        if (recv_msg == NULL) {
4192                dev_warn(intf->si_dev,
4193                         "IPMI message received with no owner. This could be because of a malformed message, or because of a hardware error.  Contact your hardware vendor for assistance.\n");
4194                return 0;
4195        }
4196
4197        recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
4198        recv_msg->msgid = msg->msgid;
4199        smi_addr = ((struct ipmi_system_interface_addr *)
4200                    &recv_msg->addr);
4201        smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
4202        smi_addr->channel = IPMI_BMC_CHANNEL;
4203        smi_addr->lun = msg->rsp[0] & 3;
4204        recv_msg->msg.netfn = msg->rsp[0] >> 2;
4205        recv_msg->msg.cmd = msg->rsp[1];
4206        memcpy(recv_msg->msg_data, &msg->rsp[2], msg->rsp_size - 2);
4207        recv_msg->msg.data = recv_msg->msg_data;
4208        recv_msg->msg.data_len = msg->rsp_size - 2;
4209        deliver_local_response(intf, recv_msg);
4210
4211        return 0;
4212}
4213
4214/*
4215 * Handle a received message.  Return 1 if the message should be requeued,
4216 * 0 if the message should be freed, or -1 if the message should not
4217 * be freed or requeued.
4218 */
4219static int handle_one_recv_msg(struct ipmi_smi *intf,
4220                               struct ipmi_smi_msg *msg)
4221{
4222        int requeue;
4223        int chan;
4224
4225        pr_debug("Recv: %*ph\n", msg->rsp_size, msg->rsp);
4226
4227        if ((msg->data_size >= 2)
4228            && (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2))
4229            && (msg->data[1] == IPMI_SEND_MSG_CMD)
4230            && (msg->user_data == NULL)) {
4231
4232                if (intf->in_shutdown)
4233                        goto free_msg;
4234
4235                /*
4236                 * This is the local response to a command send, start
4237                 * the timer for these.  The user_data will not be
4238                 * NULL if this is a response send, and we will let
4239                 * response sends just go through.
4240                 */
4241
4242                /*
4243                 * Check for errors, if we get certain errors (ones
4244                 * that mean basically we can try again later), we
4245                 * ignore them and start the timer.  Otherwise we
4246                 * report the error immediately.
4247                 */
4248                if ((msg->rsp_size >= 3) && (msg->rsp[2] != 0)
4249                    && (msg->rsp[2] != IPMI_NODE_BUSY_ERR)
4250                    && (msg->rsp[2] != IPMI_LOST_ARBITRATION_ERR)
4251                    && (msg->rsp[2] != IPMI_BUS_ERR)
4252                    && (msg->rsp[2] != IPMI_NAK_ON_WRITE_ERR)) {
4253                        int ch = msg->rsp[3] & 0xf;
4254                        struct ipmi_channel *chans;
4255
4256                        /* Got an error sending the message, handle it. */
4257
4258                        chans = READ_ONCE(intf->channel_list)->c;
4259                        if ((chans[ch].medium == IPMI_CHANNEL_MEDIUM_8023LAN)
4260                            || (chans[ch].medium == IPMI_CHANNEL_MEDIUM_ASYNC))
4261                                ipmi_inc_stat(intf, sent_lan_command_errs);
4262                        else
4263                                ipmi_inc_stat(intf, sent_ipmb_command_errs);
4264                        intf_err_seq(intf, msg->msgid, msg->rsp[2]);
4265                } else
4266                        /* The message was sent, start the timer. */
4267                        intf_start_seq_timer(intf, msg->msgid);
4268free_msg:
4269                requeue = 0;
4270                goto out;
4271
4272        } else if (msg->rsp_size < 2) {
4273                /* Message is too small to be correct. */
4274                dev_warn(intf->si_dev,
4275                         "BMC returned too small a message for netfn %x cmd %x, got %d bytes\n",
4276                         (msg->data[0] >> 2) | 1, msg->data[1], msg->rsp_size);
4277
4278                /* Generate an error response for the message. */
4279                msg->rsp[0] = msg->data[0] | (1 << 2);
4280                msg->rsp[1] = msg->data[1];
4281                msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
4282                msg->rsp_size = 3;
4283        } else if (((msg->rsp[0] >> 2) != ((msg->data[0] >> 2) | 1))
4284                   || (msg->rsp[1] != msg->data[1])) {
4285                /*
4286                 * The NetFN and Command in the response is not even
4287                 * marginally correct.
4288                 */
4289                dev_warn(intf->si_dev,
4290                         "BMC returned incorrect response, expected netfn %x cmd %x, got netfn %x cmd %x\n",
4291                         (msg->data[0] >> 2) | 1, msg->data[1],
4292                         msg->rsp[0] >> 2, msg->rsp[1]);
4293
4294                /* Generate an error response for the message. */
4295                msg->rsp[0] = msg->data[0] | (1 << 2);
4296                msg->rsp[1] = msg->data[1];
4297                msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
4298                msg->rsp_size = 3;
4299        }
4300
4301        if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
4302            && (msg->rsp[1] == IPMI_SEND_MSG_CMD)
4303            && (msg->user_data != NULL)) {
4304                /*
4305                 * It's a response to a response we sent.  For this we
4306                 * deliver a send message response to the user.
4307                 */
4308                struct ipmi_recv_msg *recv_msg = msg->user_data;
4309
4310                requeue = 0;
4311                if (msg->rsp_size < 2)
4312                        /* Message is too small to be correct. */
4313                        goto out;
4314
4315                chan = msg->data[2] & 0x0f;
4316                if (chan >= IPMI_MAX_CHANNELS)
4317                        /* Invalid channel number */
4318                        goto out;
4319
4320                if (!recv_msg)
4321                        goto out;
4322
4323                recv_msg->recv_type = IPMI_RESPONSE_RESPONSE_TYPE;
4324                recv_msg->msg.data = recv_msg->msg_data;
4325                recv_msg->msg.data_len = 1;
4326                recv_msg->msg_data[0] = msg->rsp[2];
4327                deliver_local_response(intf, recv_msg);
4328        } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
4329                   && (msg->rsp[1] == IPMI_GET_MSG_CMD)) {
4330                struct ipmi_channel   *chans;
4331
4332                /* It's from the receive queue. */
4333                chan = msg->rsp[3] & 0xf;
4334                if (chan >= IPMI_MAX_CHANNELS) {
4335                        /* Invalid channel number */
4336                        requeue = 0;
4337                        goto out;
4338                }
4339
4340                /*
4341                 * We need to make sure the channels have been initialized.
4342                 * The channel_handler routine will set the "curr_channel"
4343                 * equal to or greater than IPMI_MAX_CHANNELS when all the
4344                 * channels for this interface have been initialized.
4345                 */
4346                if (!intf->channels_ready) {
4347                        requeue = 0; /* Throw the message away */
4348                        goto out;
4349                }
4350
4351                chans = READ_ONCE(intf->channel_list)->c;
4352
4353                switch (chans[chan].medium) {
4354                case IPMI_CHANNEL_MEDIUM_IPMB:
4355                        if (msg->rsp[4] & 0x04) {
4356                                /*
4357                                 * It's a response, so find the
4358                                 * requesting message and send it up.
4359                                 */
4360                                requeue = handle_ipmb_get_msg_rsp(intf, msg);
4361                        } else {
4362                                /*
4363                                 * It's a command to the SMS from some other
4364                                 * entity.  Handle that.
4365                                 */
4366                                requeue = handle_ipmb_get_msg_cmd(intf, msg);
4367                        }
4368                        break;
4369
4370                case IPMI_CHANNEL_MEDIUM_8023LAN:
4371                case IPMI_CHANNEL_MEDIUM_ASYNC:
4372                        if (msg->rsp[6] & 0x04) {
4373                                /*
4374                                 * It's a response, so find the
4375                                 * requesting message and send it up.
4376                                 */
4377                                requeue = handle_lan_get_msg_rsp(intf, msg);
4378                        } else {
4379                                /*
4380                                 * It's a command to the SMS from some other
4381                                 * entity.  Handle that.
4382                                 */
4383                                requeue = handle_lan_get_msg_cmd(intf, msg);
4384                        }
4385                        break;
4386
4387                default:
4388                        /* Check for OEM Channels.  Clients had better
4389                           register for these commands. */
4390                        if ((chans[chan].medium >= IPMI_CHANNEL_MEDIUM_OEM_MIN)
4391                            && (chans[chan].medium
4392                                <= IPMI_CHANNEL_MEDIUM_OEM_MAX)) {
4393                                requeue = handle_oem_get_msg_cmd(intf, msg);
4394                        } else {
4395                                /*
4396                                 * We don't handle the channel type, so just
4397                                 * free the message.
4398                                 */
4399                                requeue = 0;
4400                        }
4401                }
4402
4403        } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
4404                   && (msg->rsp[1] == IPMI_READ_EVENT_MSG_BUFFER_CMD)) {
4405                /* It's an asynchronous event. */
4406                requeue = handle_read_event_rsp(intf, msg);
4407        } else {
4408                /* It's a response from the local BMC. */
4409                requeue = handle_bmc_rsp(intf, msg);
4410        }
4411
4412 out:
4413        return requeue;
4414}
4415
4416/*
4417 * If there are messages in the queue or pretimeouts, handle them.
4418 */
4419static void handle_new_recv_msgs(struct ipmi_smi *intf)
4420{
4421        struct ipmi_smi_msg  *smi_msg;
4422        unsigned long        flags = 0;
4423        int                  rv;
4424        int                  run_to_completion = intf->run_to_completion;
4425
4426        /* See if any waiting messages need to be processed. */
4427        if (!run_to_completion)
4428                spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags);
4429        while (!list_empty(&intf->waiting_rcv_msgs)) {
4430                smi_msg = list_entry(intf->waiting_rcv_msgs.next,
4431                                     struct ipmi_smi_msg, link);
4432                list_del(&smi_msg->link);
4433                if (!run_to_completion)
4434                        spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock,
4435                                               flags);
4436                rv = handle_one_recv_msg(intf, smi_msg);
4437                if (!run_to_completion)
4438                        spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags);
4439                if (rv > 0) {
4440                        /*
4441                         * To preserve message order, quit if we
4442                         * can't handle a message.  Add the message
4443                         * back at the head, this is safe because this
4444                         * tasklet is the only thing that pulls the
4445                         * messages.
4446                         */
4447                        list_add(&smi_msg->link, &intf->waiting_rcv_msgs);
4448                        break;
4449                } else {
4450                        if (rv == 0)
4451                                /* Message handled */
4452                                ipmi_free_smi_msg(smi_msg);
4453                        /* If rv < 0, fatal error, del but don't free. */
4454                }
4455        }
4456        if (!run_to_completion)
4457                spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock, flags);
4458
4459        /*
4460         * If the pretimout count is non-zero, decrement one from it and
4461         * deliver pretimeouts to all the users.
4462         */
4463        if (atomic_add_unless(&intf->watchdog_pretimeouts_to_deliver, -1, 0)) {
4464                struct ipmi_user *user;
4465                int index;
4466
4467                index = srcu_read_lock(&intf->users_srcu);
4468                list_for_each_entry_rcu(user, &intf->users, link) {
4469                        if (user->handler->ipmi_watchdog_pretimeout)
4470                                user->handler->ipmi_watchdog_pretimeout(
4471                                        user->handler_data);
4472                }
4473                srcu_read_unlock(&intf->users_srcu, index);
4474        }
4475}
4476
4477static void smi_recv_tasklet(struct tasklet_struct *t)
4478{
4479        unsigned long flags = 0; /* keep us warning-free. */
4480        struct ipmi_smi *intf = from_tasklet(intf, t, recv_tasklet);
4481        int run_to_completion = intf->run_to_completion;
4482        struct ipmi_smi_msg *newmsg = NULL;
4483
4484        /*
4485         * Start the next message if available.
4486         *
4487         * Do this here, not in the actual receiver, because we may deadlock
4488         * because the lower layer is allowed to hold locks while calling
4489         * message delivery.
4490         */
4491
4492        rcu_read_lock();
4493
4494        if (!run_to_completion)
4495                spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
4496        if (intf->curr_msg == NULL && !intf->in_shutdown) {
4497                struct list_head *entry = NULL;
4498
4499                /* Pick the high priority queue first. */
4500                if (!list_empty(&intf->hp_xmit_msgs))
4501                        entry = intf->hp_xmit_msgs.next;
4502                else if (!list_empty(&intf->xmit_msgs))
4503                        entry = intf->xmit_msgs.next;
4504
4505                if (entry) {
4506                        list_del(entry);
4507                        newmsg = list_entry(entry, struct ipmi_smi_msg, link);
4508                        intf->curr_msg = newmsg;
4509                }
4510        }
4511
4512        if (!run_to_completion)
4513                spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
4514        if (newmsg)
4515                intf->handlers->sender(intf->send_info, newmsg);
4516
4517        rcu_read_unlock();
4518
4519        handle_new_recv_msgs(intf);
4520}
4521
4522/* Handle a new message from the lower layer. */
4523void ipmi_smi_msg_received(struct ipmi_smi *intf,
4524                           struct ipmi_smi_msg *msg)
4525{
4526        unsigned long flags = 0; /* keep us warning-free. */
4527        int run_to_completion = intf->run_to_completion;
4528
4529        /*
4530         * To preserve message order, we keep a queue and deliver from
4531         * a tasklet.
4532         */
4533        if (!run_to_completion)
4534                spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags);
4535        list_add_tail(&msg->link, &intf->waiting_rcv_msgs);
4536        if (!run_to_completion)
4537                spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock,
4538                                       flags);
4539
4540        if (!run_to_completion)
4541                spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
4542        /*
4543         * We can get an asynchronous event or receive message in addition
4544         * to commands we send.
4545         */
4546        if (msg == intf->curr_msg)
4547                intf->curr_msg = NULL;
4548        if (!run_to_completion)
4549                spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
4550
4551        if (run_to_completion)
4552                smi_recv_tasklet(&intf->recv_tasklet);
4553        else
4554                tasklet_schedule(&intf->recv_tasklet);
4555}
4556EXPORT_SYMBOL(ipmi_smi_msg_received);
4557
4558void ipmi_smi_watchdog_pretimeout(struct ipmi_smi *intf)
4559{
4560        if (intf->in_shutdown)
4561                return;
4562
4563        atomic_set(&intf->watchdog_pretimeouts_to_deliver, 1);
4564        tasklet_schedule(&intf->recv_tasklet);
4565}
4566EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout);
4567
4568static struct ipmi_smi_msg *
4569smi_from_recv_msg(struct ipmi_smi *intf, struct ipmi_recv_msg *recv_msg,
4570                  unsigned char seq, long seqid)
4571{
4572        struct ipmi_smi_msg *smi_msg = ipmi_alloc_smi_msg();
4573        if (!smi_msg)
4574                /*
4575                 * If we can't allocate the message, then just return, we
4576                 * get 4 retries, so this should be ok.
4577                 */
4578                return NULL;
4579
4580        memcpy(smi_msg->data, recv_msg->msg.data, recv_msg->msg.data_len);
4581        smi_msg->data_size = recv_msg->msg.data_len;
4582        smi_msg->msgid = STORE_SEQ_IN_MSGID(seq, seqid);
4583
4584        pr_debug("Resend: %*ph\n", smi_msg->data_size, smi_msg->data);
4585
4586        return smi_msg;
4587}
4588
4589static void check_msg_timeout(struct ipmi_smi *intf, struct seq_table *ent,
4590                              struct list_head *timeouts,
4591                              unsigned long timeout_period,
4592                              int slot, unsigned long *flags,
4593                              bool *need_timer)
4594{
4595        struct ipmi_recv_msg *msg;
4596
4597        if (intf->in_shutdown)
4598                return;
4599
4600        if (!ent->inuse)
4601                return;
4602
4603        if (timeout_period < ent->timeout) {
4604                ent->timeout -= timeout_period;
4605                *need_timer = true;
4606                return;
4607        }
4608
4609        if (ent->retries_left == 0) {
4610                /* The message has used all its retries. */
4611                ent->inuse = 0;
4612                smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES);
4613                msg = ent->recv_msg;
4614                list_add_tail(&msg->link, timeouts);
4615                if (ent->broadcast)
4616                        ipmi_inc_stat(intf, timed_out_ipmb_broadcasts);
4617                else if (is_lan_addr(&ent->recv_msg->addr))
4618                        ipmi_inc_stat(intf, timed_out_lan_commands);
4619                else
4620                        ipmi_inc_stat(intf, timed_out_ipmb_commands);
4621        } else {
4622                struct ipmi_smi_msg *smi_msg;
4623                /* More retries, send again. */
4624
4625                *need_timer = true;
4626
4627                /*
4628                 * Start with the max timer, set to normal timer after
4629                 * the message is sent.
4630                 */
4631                ent->timeout = MAX_MSG_TIMEOUT;
4632                ent->retries_left--;
4633                smi_msg = smi_from_recv_msg(intf, ent->recv_msg, slot,
4634                                            ent->seqid);
4635                if (!smi_msg) {
4636                        if (is_lan_addr(&ent->recv_msg->addr))
4637                                ipmi_inc_stat(intf,
4638                                              dropped_rexmit_lan_commands);
4639                        else
4640                                ipmi_inc_stat(intf,
4641                                              dropped_rexmit_ipmb_commands);
4642                        return;
4643                }
4644
4645                spin_unlock_irqrestore(&intf->seq_lock, *flags);
4646
4647                /*
4648                 * Send the new message.  We send with a zero
4649                 * priority.  It timed out, I doubt time is that
4650                 * critical now, and high priority messages are really
4651                 * only for messages to the local MC, which don't get
4652                 * resent.
4653                 */
4654                if (intf->handlers) {
4655                        if (is_lan_addr(&ent->recv_msg->addr))
4656                                ipmi_inc_stat(intf,
4657                                              retransmitted_lan_commands);
4658                        else
4659                                ipmi_inc_stat(intf,
4660                                              retransmitted_ipmb_commands);
4661
4662                        smi_send(intf, intf->handlers, smi_msg, 0);
4663                } else
4664                        ipmi_free_smi_msg(smi_msg);
4665
4666                spin_lock_irqsave(&intf->seq_lock, *flags);
4667        }
4668}
4669
4670static bool ipmi_timeout_handler(struct ipmi_smi *intf,
4671                                 unsigned long timeout_period)
4672{
4673        struct list_head     timeouts;
4674        struct ipmi_recv_msg *msg, *msg2;
4675        unsigned long        flags;
4676        int                  i;
4677        bool                 need_timer = false;
4678
4679        if (!intf->bmc_registered) {
4680                kref_get(&intf->refcount);
4681                if (!schedule_work(&intf->bmc_reg_work)) {
4682                        kref_put(&intf->refcount, intf_free);
4683                        need_timer = true;
4684                }
4685        }
4686
4687        /*
4688         * Go through the seq table and find any messages that
4689         * have timed out, putting them in the timeouts
4690         * list.
4691         */
4692        INIT_LIST_HEAD(&timeouts);
4693        spin_lock_irqsave(&intf->seq_lock, flags);
4694        if (intf->ipmb_maintenance_mode_timeout) {
4695                if (intf->ipmb_maintenance_mode_timeout <= timeout_period)
4696                        intf->ipmb_maintenance_mode_timeout = 0;
4697                else
4698                        intf->ipmb_maintenance_mode_timeout -= timeout_period;
4699        }
4700        for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++)
4701                check_msg_timeout(intf, &intf->seq_table[i],
4702                                  &timeouts, timeout_period, i,
4703                                  &flags, &need_timer);
4704        spin_unlock_irqrestore(&intf->seq_lock, flags);
4705
4706        list_for_each_entry_safe(msg, msg2, &timeouts, link)
4707                deliver_err_response(intf, msg, IPMI_TIMEOUT_COMPLETION_CODE);
4708
4709        /*
4710         * Maintenance mode handling.  Check the timeout
4711         * optimistically before we claim the lock.  It may
4712         * mean a timeout gets missed occasionally, but that
4713         * only means the timeout gets extended by one period
4714         * in that case.  No big deal, and it avoids the lock
4715         * most of the time.
4716         */
4717        if (intf->auto_maintenance_timeout > 0) {
4718                spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
4719                if (intf->auto_maintenance_timeout > 0) {
4720                        intf->auto_maintenance_timeout
4721                                -= timeout_period;
4722                        if (!intf->maintenance_mode
4723                            && (intf->auto_maintenance_timeout <= 0)) {
4724                                intf->maintenance_mode_enable = false;
4725                                maintenance_mode_update(intf);
4726                        }
4727                }
4728                spin_unlock_irqrestore(&intf->maintenance_mode_lock,
4729                                       flags);
4730        }
4731
4732        tasklet_schedule(&intf->recv_tasklet);
4733
4734        return need_timer;
4735}
4736
4737static void ipmi_request_event(struct ipmi_smi *intf)
4738{
4739        /* No event requests when in maintenance mode. */
4740        if (intf->maintenance_mode_enable)
4741                return;
4742
4743        if (!intf->in_shutdown)
4744                intf->handlers->request_events(intf->send_info);
4745}
4746
4747static struct timer_list ipmi_timer;
4748
4749static atomic_t stop_operation;
4750
4751static void ipmi_timeout(struct timer_list *unused)
4752{
4753        struct ipmi_smi *intf;
4754        bool need_timer = false;
4755        int index;
4756
4757        if (atomic_read(&stop_operation))
4758                return;
4759
4760        index = srcu_read_lock(&ipmi_interfaces_srcu);
4761        list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
4762                if (atomic_read(&intf->event_waiters)) {
4763                        intf->ticks_to_req_ev--;
4764                        if (intf->ticks_to_req_ev == 0) {
4765                                ipmi_request_event(intf);
4766                                intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
4767                        }
4768                        need_timer = true;
4769                }
4770
4771                need_timer |= ipmi_timeout_handler(intf, IPMI_TIMEOUT_TIME);
4772        }
4773        srcu_read_unlock(&ipmi_interfaces_srcu, index);
4774
4775        if (need_timer)
4776                mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
4777}
4778
4779static void need_waiter(struct ipmi_smi *intf)
4780{
4781        /* Racy, but worst case we start the timer twice. */
4782        if (!timer_pending(&ipmi_timer))
4783                mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
4784}
4785
4786static atomic_t smi_msg_inuse_count = ATOMIC_INIT(0);
4787static atomic_t recv_msg_inuse_count = ATOMIC_INIT(0);
4788
4789static void free_smi_msg(struct ipmi_smi_msg *msg)
4790{
4791        atomic_dec(&smi_msg_inuse_count);
4792        kfree(msg);
4793}
4794
4795struct ipmi_smi_msg *ipmi_alloc_smi_msg(void)
4796{
4797        struct ipmi_smi_msg *rv;
4798        rv = kmalloc(sizeof(struct ipmi_smi_msg), GFP_ATOMIC);
4799        if (rv) {
4800                rv->done = free_smi_msg;
4801                rv->user_data = NULL;
4802                atomic_inc(&smi_msg_inuse_count);
4803        }
4804        return rv;
4805}
4806EXPORT_SYMBOL(ipmi_alloc_smi_msg);
4807
4808static void free_recv_msg(struct ipmi_recv_msg *msg)
4809{
4810        atomic_dec(&recv_msg_inuse_count);
4811        kfree(msg);
4812}
4813
4814static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void)
4815{
4816        struct ipmi_recv_msg *rv;
4817
4818        rv = kmalloc(sizeof(struct ipmi_recv_msg), GFP_ATOMIC);
4819        if (rv) {
4820                rv->user = NULL;
4821                rv->done = free_recv_msg;
4822                atomic_inc(&recv_msg_inuse_count);
4823        }
4824        return rv;
4825}
4826
4827void ipmi_free_recv_msg(struct ipmi_recv_msg *msg)
4828{
4829        if (msg->user)
4830                kref_put(&msg->user->refcount, free_user);
4831        msg->done(msg);
4832}
4833EXPORT_SYMBOL(ipmi_free_recv_msg);
4834
4835static atomic_t panic_done_count = ATOMIC_INIT(0);
4836
4837static void dummy_smi_done_handler(struct ipmi_smi_msg *msg)
4838{
4839        atomic_dec(&panic_done_count);
4840}
4841
4842static void dummy_recv_done_handler(struct ipmi_recv_msg *msg)
4843{
4844        atomic_dec(&panic_done_count);
4845}
4846
4847/*
4848 * Inside a panic, send a message and wait for a response.
4849 */
4850static void ipmi_panic_request_and_wait(struct ipmi_smi *intf,
4851                                        struct ipmi_addr *addr,
4852                                        struct kernel_ipmi_msg *msg)
4853{
4854        struct ipmi_smi_msg  smi_msg;
4855        struct ipmi_recv_msg recv_msg;
4856        int rv;
4857
4858        smi_msg.done = dummy_smi_done_handler;
4859        recv_msg.done = dummy_recv_done_handler;
4860        atomic_add(2, &panic_done_count);
4861        rv = i_ipmi_request(NULL,
4862                            intf,
4863                            addr,
4864                            0,
4865                            msg,
4866                            intf,
4867                            &smi_msg,
4868                            &recv_msg,
4869                            0,
4870                            intf->addrinfo[0].address,
4871                            intf->addrinfo[0].lun,
4872                            0, 1); /* Don't retry, and don't wait. */
4873        if (rv)
4874                atomic_sub(2, &panic_done_count);
4875        else if (intf->handlers->flush_messages)
4876                intf->handlers->flush_messages(intf->send_info);
4877
4878        while (atomic_read(&panic_done_count) != 0)
4879                ipmi_poll(intf);
4880}
4881
4882static void event_receiver_fetcher(struct ipmi_smi *intf,
4883                                   struct ipmi_recv_msg *msg)
4884{
4885        if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
4886            && (msg->msg.netfn == IPMI_NETFN_SENSOR_EVENT_RESPONSE)
4887            && (msg->msg.cmd == IPMI_GET_EVENT_RECEIVER_CMD)
4888            && (msg->msg.data[0] == IPMI_CC_NO_ERROR)) {
4889                /* A get event receiver command, save it. */
4890                intf->event_receiver = msg->msg.data[1];
4891                intf->event_receiver_lun = msg->msg.data[2] & 0x3;
4892        }
4893}
4894
4895static void device_id_fetcher(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
4896{
4897        if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
4898            && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE)
4899            && (msg->msg.cmd == IPMI_GET_DEVICE_ID_CMD)
4900            && (msg->msg.data[0] == IPMI_CC_NO_ERROR)) {
4901                /*
4902                 * A get device id command, save if we are an event
4903                 * receiver or generator.
4904                 */
4905                intf->local_sel_device = (msg->msg.data[6] >> 2) & 1;
4906                intf->local_event_generator = (msg->msg.data[6] >> 5) & 1;
4907        }
4908}
4909
4910static void send_panic_events(struct ipmi_smi *intf, char *str)
4911{
4912        struct kernel_ipmi_msg msg;
4913        unsigned char data[16];
4914        struct ipmi_system_interface_addr *si;
4915        struct ipmi_addr addr;
4916        char *p = str;
4917        struct ipmi_ipmb_addr *ipmb;
4918        int j;
4919
4920        if (ipmi_send_panic_event == IPMI_SEND_PANIC_EVENT_NONE)
4921                return;
4922
4923        si = (struct ipmi_system_interface_addr *) &addr;
4924        si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
4925        si->channel = IPMI_BMC_CHANNEL;
4926        si->lun = 0;
4927
4928        /* Fill in an event telling that we have failed. */
4929        msg.netfn = 0x04; /* Sensor or Event. */
4930        msg.cmd = 2; /* Platform event command. */
4931        msg.data = data;
4932        msg.data_len = 8;
4933        data[0] = 0x41; /* Kernel generator ID, IPMI table 5-4 */
4934        data[1] = 0x03; /* This is for IPMI 1.0. */
4935        data[2] = 0x20; /* OS Critical Stop, IPMI table 36-3 */
4936        data[4] = 0x6f; /* Sensor specific, IPMI table 36-1 */
4937        data[5] = 0xa1; /* Runtime stop OEM bytes 2 & 3. */
4938
4939        /*
4940         * Put a few breadcrumbs in.  Hopefully later we can add more things
4941         * to make the panic events more useful.
4942         */
4943        if (str) {
4944                data[3] = str[0];
4945                data[6] = str[1];
4946                data[7] = str[2];
4947        }
4948
4949        /* Send the event announcing the panic. */
4950        ipmi_panic_request_and_wait(intf, &addr, &msg);
4951
4952        /*
4953         * On every interface, dump a bunch of OEM event holding the
4954         * string.
4955         */
4956        if (ipmi_send_panic_event != IPMI_SEND_PANIC_EVENT_STRING || !str)
4957                return;
4958
4959        /*
4960         * intf_num is used as an marker to tell if the
4961         * interface is valid.  Thus we need a read barrier to
4962         * make sure data fetched before checking intf_num
4963         * won't be used.
4964         */
4965        smp_rmb();
4966
4967        /*
4968         * First job here is to figure out where to send the
4969         * OEM events.  There's no way in IPMI to send OEM
4970         * events using an event send command, so we have to
4971         * find the SEL to put them in and stick them in
4972         * there.
4973         */
4974
4975        /* Get capabilities from the get device id. */
4976        intf->local_sel_device = 0;
4977        intf->local_event_generator = 0;
4978        intf->event_receiver = 0;
4979
4980        /* Request the device info from the local MC. */
4981        msg.netfn = IPMI_NETFN_APP_REQUEST;
4982        msg.cmd = IPMI_GET_DEVICE_ID_CMD;
4983        msg.data = NULL;
4984        msg.data_len = 0;
4985        intf->null_user_handler = device_id_fetcher;
4986        ipmi_panic_request_and_wait(intf, &addr, &msg);
4987
4988        if (intf->local_event_generator) {
4989                /* Request the event receiver from the local MC. */
4990                msg.netfn = IPMI_NETFN_SENSOR_EVENT_REQUEST;
4991                msg.cmd = IPMI_GET_EVENT_RECEIVER_CMD;
4992                msg.data = NULL;
4993                msg.data_len = 0;
4994                intf->null_user_handler = event_receiver_fetcher;
4995                ipmi_panic_request_and_wait(intf, &addr, &msg);
4996        }
4997        intf->null_user_handler = NULL;
4998
4999        /*
5000         * Validate the event receiver.  The low bit must not
5001         * be 1 (it must be a valid IPMB address), it cannot
5002         * be zero, and it must not be my address.
5003         */
5004        if (((intf->event_receiver & 1) == 0)
5005            && (intf->event_receiver != 0)
5006            && (intf->event_receiver != intf->addrinfo[0].address)) {
5007                /*
5008                 * The event receiver is valid, send an IPMB
5009                 * message.
5010                 */
5011                ipmb = (struct ipmi_ipmb_addr *) &addr;
5012                ipmb->addr_type = IPMI_IPMB_ADDR_TYPE;
5013                ipmb->channel = 0; /* FIXME - is this right? */
5014                ipmb->lun = intf->event_receiver_lun;
5015                ipmb->slave_addr = intf->event_receiver;
5016        } else if (intf->local_sel_device) {
5017                /*
5018                 * The event receiver was not valid (or was
5019                 * me), but I am an SEL device, just dump it
5020                 * in my SEL.
5021                 */
5022                si = (struct ipmi_system_interface_addr *) &addr;
5023                si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
5024                si->channel = IPMI_BMC_CHANNEL;
5025                si->lun = 0;
5026        } else
5027                return; /* No where to send the event. */
5028
5029        msg.netfn = IPMI_NETFN_STORAGE_REQUEST; /* Storage. */
5030        msg.cmd = IPMI_ADD_SEL_ENTRY_CMD;
5031        msg.data = data;
5032        msg.data_len = 16;
5033
5034        j = 0;
5035        while (*p) {
5036                int size = strlen(p);
5037
5038                if (size > 11)
5039                        size = 11;
5040                data[0] = 0;
5041                data[1] = 0;
5042                data[2] = 0xf0; /* OEM event without timestamp. */
5043                data[3] = intf->addrinfo[0].address;
5044                data[4] = j++; /* sequence # */
5045                /*
5046                 * Always give 11 bytes, so strncpy will fill
5047                 * it with zeroes for me.
5048                 */
5049                strncpy(data+5, p, 11);
5050                p += size;
5051
5052                ipmi_panic_request_and_wait(intf, &addr, &msg);
5053        }
5054}
5055
5056static int has_panicked;
5057
5058static int panic_event(struct notifier_block *this,
5059                       unsigned long         event,
5060                       void                  *ptr)
5061{
5062        struct ipmi_smi *intf;
5063        struct ipmi_user *user;
5064
5065        if (has_panicked)
5066                return NOTIFY_DONE;
5067        has_panicked = 1;
5068
5069        /* For every registered interface, set it to run to completion. */
5070        list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
5071                if (!intf->handlers || intf->intf_num == -1)
5072                        /* Interface is not ready. */
5073                        continue;
5074
5075                if (!intf->handlers->poll)
5076                        continue;
5077
5078                /*
5079                 * If we were interrupted while locking xmit_msgs_lock or
5080                 * waiting_rcv_msgs_lock, the corresponding list may be
5081                 * corrupted.  In this case, drop items on the list for
5082                 * the safety.
5083                 */
5084                if (!spin_trylock(&intf->xmit_msgs_lock)) {
5085                        INIT_LIST_HEAD(&intf->xmit_msgs);
5086                        INIT_LIST_HEAD(&intf->hp_xmit_msgs);
5087                } else
5088                        spin_unlock(&intf->xmit_msgs_lock);
5089
5090                if (!spin_trylock(&intf->waiting_rcv_msgs_lock))
5091                        INIT_LIST_HEAD(&intf->waiting_rcv_msgs);
5092                else
5093                        spin_unlock(&intf->waiting_rcv_msgs_lock);
5094
5095                intf->run_to_completion = 1;
5096                if (intf->handlers->set_run_to_completion)
5097                        intf->handlers->set_run_to_completion(intf->send_info,
5098                                                              1);
5099
5100                list_for_each_entry_rcu(user, &intf->users, link) {
5101                        if (user->handler->ipmi_panic_handler)
5102                                user->handler->ipmi_panic_handler(
5103                                        user->handler_data);
5104                }
5105
5106                send_panic_events(intf, ptr);
5107        }
5108
5109        return NOTIFY_DONE;
5110}
5111
5112/* Must be called with ipmi_interfaces_mutex held. */
5113static int ipmi_register_driver(void)
5114{
5115        int rv;
5116
5117        if (drvregistered)
5118                return 0;
5119
5120        rv = driver_register(&ipmidriver.driver);
5121        if (rv)
5122                pr_err("Could not register IPMI driver\n");
5123        else
5124                drvregistered = true;
5125        return rv;
5126}
5127
5128static struct notifier_block panic_block = {
5129        .notifier_call  = panic_event,
5130        .next           = NULL,
5131        .priority       = 200   /* priority: INT_MAX >= x >= 0 */
5132};
5133
5134static int ipmi_init_msghandler(void)
5135{
5136        int rv;
5137
5138        mutex_lock(&ipmi_interfaces_mutex);
5139        rv = ipmi_register_driver();
5140        if (rv)
5141                goto out;
5142        if (initialized)
5143                goto out;
5144
5145        init_srcu_struct(&ipmi_interfaces_srcu);
5146
5147        timer_setup(&ipmi_timer, ipmi_timeout, 0);
5148        mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
5149
5150        atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
5151
5152        initialized = true;
5153
5154out:
5155        mutex_unlock(&ipmi_interfaces_mutex);
5156        return rv;
5157}
5158
5159static int __init ipmi_init_msghandler_mod(void)
5160{
5161        int rv;
5162
5163        pr_info("version " IPMI_DRIVER_VERSION "\n");
5164
5165        mutex_lock(&ipmi_interfaces_mutex);
5166        rv = ipmi_register_driver();
5167        mutex_unlock(&ipmi_interfaces_mutex);
5168
5169        return rv;
5170}
5171
5172static void __exit cleanup_ipmi(void)
5173{
5174        int count;
5175
5176        if (initialized) {
5177                atomic_notifier_chain_unregister(&panic_notifier_list,
5178                                                 &panic_block);
5179
5180                /*
5181                 * This can't be called if any interfaces exist, so no worry
5182                 * about shutting down the interfaces.
5183                 */
5184
5185                /*
5186                 * Tell the timer to stop, then wait for it to stop.  This
5187                 * avoids problems with race conditions removing the timer
5188                 * here.
5189                 */
5190                atomic_set(&stop_operation, 1);
5191                del_timer_sync(&ipmi_timer);
5192
5193                initialized = false;
5194
5195                /* Check for buffer leaks. */
5196                count = atomic_read(&smi_msg_inuse_count);
5197                if (count != 0)
5198                        pr_warn("SMI message count %d at exit\n", count);
5199                count = atomic_read(&recv_msg_inuse_count);
5200                if (count != 0)
5201                        pr_warn("recv message count %d at exit\n", count);
5202
5203                cleanup_srcu_struct(&ipmi_interfaces_srcu);
5204        }
5205        if (drvregistered)
5206                driver_unregister(&ipmidriver.driver);
5207}
5208module_exit(cleanup_ipmi);
5209
5210module_init(ipmi_init_msghandler_mod);
5211MODULE_LICENSE("GPL");
5212MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
5213MODULE_DESCRIPTION("Incoming and outgoing message routing for an IPMI interface.");
5214MODULE_VERSION(IPMI_DRIVER_VERSION);
5215MODULE_SOFTDEP("post: ipmi_devintf");
5216