linux/drivers/char/ipmi/ipmi_msghandler.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0+
   2/*
   3 * ipmi_msghandler.c
   4 *
   5 * Incoming and outgoing message routing for an IPMI interface.
   6 *
   7 * Author: MontaVista Software, Inc.
   8 *         Corey Minyard <minyard@mvista.com>
   9 *         source@mvista.com
  10 *
  11 * Copyright 2002 MontaVista Software Inc.
  12 */
  13
  14#define pr_fmt(fmt) "%s" fmt, "IPMI message handler: "
  15#define dev_fmt pr_fmt
  16
  17#include <linux/module.h>
  18#include <linux/errno.h>
  19#include <linux/panic_notifier.h>
  20#include <linux/poll.h>
  21#include <linux/sched.h>
  22#include <linux/seq_file.h>
  23#include <linux/spinlock.h>
  24#include <linux/mutex.h>
  25#include <linux/slab.h>
  26#include <linux/ipmi.h>
  27#include <linux/ipmi_smi.h>
  28#include <linux/notifier.h>
  29#include <linux/init.h>
  30#include <linux/proc_fs.h>
  31#include <linux/rcupdate.h>
  32#include <linux/interrupt.h>
  33#include <linux/moduleparam.h>
  34#include <linux/workqueue.h>
  35#include <linux/uuid.h>
  36#include <linux/nospec.h>
  37#include <linux/vmalloc.h>
  38#include <linux/delay.h>
  39
  40#define IPMI_DRIVER_VERSION "39.2"
  41
  42static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void);
  43static int ipmi_init_msghandler(void);
  44static void smi_recv_tasklet(struct tasklet_struct *t);
  45static void handle_new_recv_msgs(struct ipmi_smi *intf);
  46static void need_waiter(struct ipmi_smi *intf);
  47static int handle_one_recv_msg(struct ipmi_smi *intf,
  48                               struct ipmi_smi_msg *msg);
  49
  50static bool initialized;
  51static bool drvregistered;
  52
  53/* Numbers in this enumerator should be mapped to ipmi_panic_event_str */
  54enum ipmi_panic_event_op {
  55        IPMI_SEND_PANIC_EVENT_NONE,
  56        IPMI_SEND_PANIC_EVENT,
  57        IPMI_SEND_PANIC_EVENT_STRING,
  58        IPMI_SEND_PANIC_EVENT_MAX
  59};
  60
  61/* Indices in this array should be mapped to enum ipmi_panic_event_op */
  62static const char *const ipmi_panic_event_str[] = { "none", "event", "string", NULL };
  63
  64#ifdef CONFIG_IPMI_PANIC_STRING
  65#define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT_STRING
  66#elif defined(CONFIG_IPMI_PANIC_EVENT)
  67#define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT
  68#else
  69#define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT_NONE
  70#endif
  71
  72static enum ipmi_panic_event_op ipmi_send_panic_event = IPMI_PANIC_DEFAULT;
  73
  74static int panic_op_write_handler(const char *val,
  75                                  const struct kernel_param *kp)
  76{
  77        char valcp[16];
  78        int e;
  79
  80        strscpy(valcp, val, sizeof(valcp));
  81        e = match_string(ipmi_panic_event_str, -1, strstrip(valcp));
  82        if (e < 0)
  83                return e;
  84
  85        ipmi_send_panic_event = e;
  86        return 0;
  87}
  88
  89static int panic_op_read_handler(char *buffer, const struct kernel_param *kp)
  90{
  91        const char *event_str;
  92
  93        if (ipmi_send_panic_event >= IPMI_SEND_PANIC_EVENT_MAX)
  94                event_str = "???";
  95        else
  96                event_str = ipmi_panic_event_str[ipmi_send_panic_event];
  97
  98        return sprintf(buffer, "%s\n", event_str);
  99}
 100
 101static const struct kernel_param_ops panic_op_ops = {
 102        .set = panic_op_write_handler,
 103        .get = panic_op_read_handler
 104};
 105module_param_cb(panic_op, &panic_op_ops, NULL, 0600);
 106MODULE_PARM_DESC(panic_op, "Sets if the IPMI driver will attempt to store panic information in the event log in the event of a panic.  Set to 'none' for no, 'event' for a single event, or 'string' for a generic event and the panic string in IPMI OEM events.");
 107
 108
 109#define MAX_EVENTS_IN_QUEUE     25
 110
 111/* Remain in auto-maintenance mode for this amount of time (in ms). */
 112static unsigned long maintenance_mode_timeout_ms = 30000;
 113module_param(maintenance_mode_timeout_ms, ulong, 0644);
 114MODULE_PARM_DESC(maintenance_mode_timeout_ms,
 115                 "The time (milliseconds) after the last maintenance message that the connection stays in maintenance mode.");
 116
 117/*
 118 * Don't let a message sit in a queue forever, always time it with at lest
 119 * the max message timer.  This is in milliseconds.
 120 */
 121#define MAX_MSG_TIMEOUT         60000
 122
 123/*
 124 * Timeout times below are in milliseconds, and are done off a 1
 125 * second timer.  So setting the value to 1000 would mean anything
 126 * between 0 and 1000ms.  So really the only reasonable minimum
 127 * setting it 2000ms, which is between 1 and 2 seconds.
 128 */
 129
 130/* The default timeout for message retries. */
 131static unsigned long default_retry_ms = 2000;
 132module_param(default_retry_ms, ulong, 0644);
 133MODULE_PARM_DESC(default_retry_ms,
 134                 "The time (milliseconds) between retry sends");
 135
 136/* The default timeout for maintenance mode message retries. */
 137static unsigned long default_maintenance_retry_ms = 3000;
 138module_param(default_maintenance_retry_ms, ulong, 0644);
 139MODULE_PARM_DESC(default_maintenance_retry_ms,
 140                 "The time (milliseconds) between retry sends in maintenance mode");
 141
 142/* The default maximum number of retries */
 143static unsigned int default_max_retries = 4;
 144module_param(default_max_retries, uint, 0644);
 145MODULE_PARM_DESC(default_max_retries,
 146                 "The time (milliseconds) between retry sends in maintenance mode");
 147
 148/* Call every ~1000 ms. */
 149#define IPMI_TIMEOUT_TIME       1000
 150
 151/* How many jiffies does it take to get to the timeout time. */
 152#define IPMI_TIMEOUT_JIFFIES    ((IPMI_TIMEOUT_TIME * HZ) / 1000)
 153
 154/*
 155 * Request events from the queue every second (this is the number of
 156 * IPMI_TIMEOUT_TIMES between event requests).  Hopefully, in the
 157 * future, IPMI will add a way to know immediately if an event is in
 158 * the queue and this silliness can go away.
 159 */
 160#define IPMI_REQUEST_EV_TIME    (1000 / (IPMI_TIMEOUT_TIME))
 161
 162/* How long should we cache dynamic device IDs? */
 163#define IPMI_DYN_DEV_ID_EXPIRY  (10 * HZ)
 164
 165/*
 166 * The main "user" data structure.
 167 */
 168struct ipmi_user {
 169        struct list_head link;
 170
 171        /*
 172         * Set to NULL when the user is destroyed, a pointer to myself
 173         * so srcu_dereference can be used on it.
 174         */
 175        struct ipmi_user *self;
 176        struct srcu_struct release_barrier;
 177
 178        struct kref refcount;
 179
 180        /* The upper layer that handles receive messages. */
 181        const struct ipmi_user_hndl *handler;
 182        void             *handler_data;
 183
 184        /* The interface this user is bound to. */
 185        struct ipmi_smi *intf;
 186
 187        /* Does this interface receive IPMI events? */
 188        bool gets_events;
 189
 190        /* Free must run in process context for RCU cleanup. */
 191        struct work_struct remove_work;
 192};
 193
 194static struct workqueue_struct *remove_work_wq;
 195
 196static struct ipmi_user *acquire_ipmi_user(struct ipmi_user *user, int *index)
 197        __acquires(user->release_barrier)
 198{
 199        struct ipmi_user *ruser;
 200
 201        *index = srcu_read_lock(&user->release_barrier);
 202        ruser = srcu_dereference(user->self, &user->release_barrier);
 203        if (!ruser)
 204                srcu_read_unlock(&user->release_barrier, *index);
 205        return ruser;
 206}
 207
 208static void release_ipmi_user(struct ipmi_user *user, int index)
 209{
 210        srcu_read_unlock(&user->release_barrier, index);
 211}
 212
 213struct cmd_rcvr {
 214        struct list_head link;
 215
 216        struct ipmi_user *user;
 217        unsigned char netfn;
 218        unsigned char cmd;
 219        unsigned int  chans;
 220
 221        /*
 222         * This is used to form a linked lised during mass deletion.
 223         * Since this is in an RCU list, we cannot use the link above
 224         * or change any data until the RCU period completes.  So we
 225         * use this next variable during mass deletion so we can have
 226         * a list and don't have to wait and restart the search on
 227         * every individual deletion of a command.
 228         */
 229        struct cmd_rcvr *next;
 230};
 231
 232struct seq_table {
 233        unsigned int         inuse : 1;
 234        unsigned int         broadcast : 1;
 235
 236        unsigned long        timeout;
 237        unsigned long        orig_timeout;
 238        unsigned int         retries_left;
 239
 240        /*
 241         * To verify on an incoming send message response that this is
 242         * the message that the response is for, we keep a sequence id
 243         * and increment it every time we send a message.
 244         */
 245        long                 seqid;
 246
 247        /*
 248         * This is held so we can properly respond to the message on a
 249         * timeout, and it is used to hold the temporary data for
 250         * retransmission, too.
 251         */
 252        struct ipmi_recv_msg *recv_msg;
 253};
 254
 255/*
 256 * Store the information in a msgid (long) to allow us to find a
 257 * sequence table entry from the msgid.
 258 */
 259#define STORE_SEQ_IN_MSGID(seq, seqid) \
 260        ((((seq) & 0x3f) << 26) | ((seqid) & 0x3ffffff))
 261
 262#define GET_SEQ_FROM_MSGID(msgid, seq, seqid) \
 263        do {                                                            \
 264                seq = (((msgid) >> 26) & 0x3f);                         \
 265                seqid = ((msgid) & 0x3ffffff);                          \
 266        } while (0)
 267
 268#define NEXT_SEQID(seqid) (((seqid) + 1) & 0x3ffffff)
 269
 270#define IPMI_MAX_CHANNELS       16
 271struct ipmi_channel {
 272        unsigned char medium;
 273        unsigned char protocol;
 274};
 275
 276struct ipmi_channel_set {
 277        struct ipmi_channel c[IPMI_MAX_CHANNELS];
 278};
 279
 280struct ipmi_my_addrinfo {
 281        /*
 282         * My slave address.  This is initialized to IPMI_BMC_SLAVE_ADDR,
 283         * but may be changed by the user.
 284         */
 285        unsigned char address;
 286
 287        /*
 288         * My LUN.  This should generally stay the SMS LUN, but just in
 289         * case...
 290         */
 291        unsigned char lun;
 292};
 293
 294/*
 295 * Note that the product id, manufacturer id, guid, and device id are
 296 * immutable in this structure, so dyn_mutex is not required for
 297 * accessing those.  If those change on a BMC, a new BMC is allocated.
 298 */
 299struct bmc_device {
 300        struct platform_device pdev;
 301        struct list_head       intfs; /* Interfaces on this BMC. */
 302        struct ipmi_device_id  id;
 303        struct ipmi_device_id  fetch_id;
 304        int                    dyn_id_set;
 305        unsigned long          dyn_id_expiry;
 306        struct mutex           dyn_mutex; /* Protects id, intfs, & dyn* */
 307        guid_t                 guid;
 308        guid_t                 fetch_guid;
 309        int                    dyn_guid_set;
 310        struct kref            usecount;
 311        struct work_struct     remove_work;
 312        unsigned char          cc; /* completion code */
 313};
 314#define to_bmc_device(x) container_of((x), struct bmc_device, pdev.dev)
 315
 316static int bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc,
 317                             struct ipmi_device_id *id,
 318                             bool *guid_set, guid_t *guid);
 319
 320/*
 321 * Various statistics for IPMI, these index stats[] in the ipmi_smi
 322 * structure.
 323 */
 324enum ipmi_stat_indexes {
 325        /* Commands we got from the user that were invalid. */
 326        IPMI_STAT_sent_invalid_commands = 0,
 327
 328        /* Commands we sent to the MC. */
 329        IPMI_STAT_sent_local_commands,
 330
 331        /* Responses from the MC that were delivered to a user. */
 332        IPMI_STAT_handled_local_responses,
 333
 334        /* Responses from the MC that were not delivered to a user. */
 335        IPMI_STAT_unhandled_local_responses,
 336
 337        /* Commands we sent out to the IPMB bus. */
 338        IPMI_STAT_sent_ipmb_commands,
 339
 340        /* Commands sent on the IPMB that had errors on the SEND CMD */
 341        IPMI_STAT_sent_ipmb_command_errs,
 342
 343        /* Each retransmit increments this count. */
 344        IPMI_STAT_retransmitted_ipmb_commands,
 345
 346        /*
 347         * When a message times out (runs out of retransmits) this is
 348         * incremented.
 349         */
 350        IPMI_STAT_timed_out_ipmb_commands,
 351
 352        /*
 353         * This is like above, but for broadcasts.  Broadcasts are
 354         * *not* included in the above count (they are expected to
 355         * time out).
 356         */
 357        IPMI_STAT_timed_out_ipmb_broadcasts,
 358
 359        /* Responses I have sent to the IPMB bus. */
 360        IPMI_STAT_sent_ipmb_responses,
 361
 362        /* The response was delivered to the user. */
 363        IPMI_STAT_handled_ipmb_responses,
 364
 365        /* The response had invalid data in it. */
 366        IPMI_STAT_invalid_ipmb_responses,
 367
 368        /* The response didn't have anyone waiting for it. */
 369        IPMI_STAT_unhandled_ipmb_responses,
 370
 371        /* Commands we sent out to the IPMB bus. */
 372        IPMI_STAT_sent_lan_commands,
 373
 374        /* Commands sent on the IPMB that had errors on the SEND CMD */
 375        IPMI_STAT_sent_lan_command_errs,
 376
 377        /* Each retransmit increments this count. */
 378        IPMI_STAT_retransmitted_lan_commands,
 379
 380        /*
 381         * When a message times out (runs out of retransmits) this is
 382         * incremented.
 383         */
 384        IPMI_STAT_timed_out_lan_commands,
 385
 386        /* Responses I have sent to the IPMB bus. */
 387        IPMI_STAT_sent_lan_responses,
 388
 389        /* The response was delivered to the user. */
 390        IPMI_STAT_handled_lan_responses,
 391
 392        /* The response had invalid data in it. */
 393        IPMI_STAT_invalid_lan_responses,
 394
 395        /* The response didn't have anyone waiting for it. */
 396        IPMI_STAT_unhandled_lan_responses,
 397
 398        /* The command was delivered to the user. */
 399        IPMI_STAT_handled_commands,
 400
 401        /* The command had invalid data in it. */
 402        IPMI_STAT_invalid_commands,
 403
 404        /* The command didn't have anyone waiting for it. */
 405        IPMI_STAT_unhandled_commands,
 406
 407        /* Invalid data in an event. */
 408        IPMI_STAT_invalid_events,
 409
 410        /* Events that were received with the proper format. */
 411        IPMI_STAT_events,
 412
 413        /* Retransmissions on IPMB that failed. */
 414        IPMI_STAT_dropped_rexmit_ipmb_commands,
 415
 416        /* Retransmissions on LAN that failed. */
 417        IPMI_STAT_dropped_rexmit_lan_commands,
 418
 419        /* This *must* remain last, add new values above this. */
 420        IPMI_NUM_STATS
 421};
 422
 423
 424#define IPMI_IPMB_NUM_SEQ       64
 425struct ipmi_smi {
 426        struct module *owner;
 427
 428        /* What interface number are we? */
 429        int intf_num;
 430
 431        struct kref refcount;
 432
 433        /* Set when the interface is being unregistered. */
 434        bool in_shutdown;
 435
 436        /* Used for a list of interfaces. */
 437        struct list_head link;
 438
 439        /*
 440         * The list of upper layers that are using me.  seq_lock write
 441         * protects this.  Read protection is with srcu.
 442         */
 443        struct list_head users;
 444        struct srcu_struct users_srcu;
 445
 446        /* Used for wake ups at startup. */
 447        wait_queue_head_t waitq;
 448
 449        /*
 450         * Prevents the interface from being unregistered when the
 451         * interface is used by being looked up through the BMC
 452         * structure.
 453         */
 454        struct mutex bmc_reg_mutex;
 455
 456        struct bmc_device tmp_bmc;
 457        struct bmc_device *bmc;
 458        bool bmc_registered;
 459        struct list_head bmc_link;
 460        char *my_dev_name;
 461        bool in_bmc_register;  /* Handle recursive situations.  Yuck. */
 462        struct work_struct bmc_reg_work;
 463
 464        const struct ipmi_smi_handlers *handlers;
 465        void                     *send_info;
 466
 467        /* Driver-model device for the system interface. */
 468        struct device          *si_dev;
 469
 470        /*
 471         * A table of sequence numbers for this interface.  We use the
 472         * sequence numbers for IPMB messages that go out of the
 473         * interface to match them up with their responses.  A routine
 474         * is called periodically to time the items in this list.
 475         */
 476        spinlock_t       seq_lock;
 477        struct seq_table seq_table[IPMI_IPMB_NUM_SEQ];
 478        int curr_seq;
 479
 480        /*
 481         * Messages queued for delivery.  If delivery fails (out of memory
 482         * for instance), They will stay in here to be processed later in a
 483         * periodic timer interrupt.  The tasklet is for handling received
 484         * messages directly from the handler.
 485         */
 486        spinlock_t       waiting_rcv_msgs_lock;
 487        struct list_head waiting_rcv_msgs;
 488        atomic_t         watchdog_pretimeouts_to_deliver;
 489        struct tasklet_struct recv_tasklet;
 490
 491        spinlock_t             xmit_msgs_lock;
 492        struct list_head       xmit_msgs;
 493        struct ipmi_smi_msg    *curr_msg;
 494        struct list_head       hp_xmit_msgs;
 495
 496        /*
 497         * The list of command receivers that are registered for commands
 498         * on this interface.
 499         */
 500        struct mutex     cmd_rcvrs_mutex;
 501        struct list_head cmd_rcvrs;
 502
 503        /*
 504         * Events that were queues because no one was there to receive
 505         * them.
 506         */
 507        spinlock_t       events_lock; /* For dealing with event stuff. */
 508        struct list_head waiting_events;
 509        unsigned int     waiting_events_count; /* How many events in queue? */
 510        char             delivering_events;
 511        char             event_msg_printed;
 512
 513        /* How many users are waiting for events? */
 514        atomic_t         event_waiters;
 515        unsigned int     ticks_to_req_ev;
 516
 517        spinlock_t       watch_lock; /* For dealing with watch stuff below. */
 518
 519        /* How many users are waiting for commands? */
 520        unsigned int     command_waiters;
 521
 522        /* How many users are waiting for watchdogs? */
 523        unsigned int     watchdog_waiters;
 524
 525        /* How many users are waiting for message responses? */
 526        unsigned int     response_waiters;
 527
 528        /*
 529         * Tells what the lower layer has last been asked to watch for,
 530         * messages and/or watchdogs.  Protected by watch_lock.
 531         */
 532        unsigned int     last_watch_mask;
 533
 534        /*
 535         * The event receiver for my BMC, only really used at panic
 536         * shutdown as a place to store this.
 537         */
 538        unsigned char event_receiver;
 539        unsigned char event_receiver_lun;
 540        unsigned char local_sel_device;
 541        unsigned char local_event_generator;
 542
 543        /* For handling of maintenance mode. */
 544        int maintenance_mode;
 545        bool maintenance_mode_enable;
 546        int auto_maintenance_timeout;
 547        spinlock_t maintenance_mode_lock; /* Used in a timer... */
 548
 549        /*
 550         * If we are doing maintenance on something on IPMB, extend
 551         * the timeout time to avoid timeouts writing firmware and
 552         * such.
 553         */
 554        int ipmb_maintenance_mode_timeout;
 555
 556        /*
 557         * A cheap hack, if this is non-null and a message to an
 558         * interface comes in with a NULL user, call this routine with
 559         * it.  Note that the message will still be freed by the
 560         * caller.  This only works on the system interface.
 561         *
 562         * Protected by bmc_reg_mutex.
 563         */
 564        void (*null_user_handler)(struct ipmi_smi *intf,
 565                                  struct ipmi_recv_msg *msg);
 566
 567        /*
 568         * When we are scanning the channels for an SMI, this will
 569         * tell which channel we are scanning.
 570         */
 571        int curr_channel;
 572
 573        /* Channel information */
 574        struct ipmi_channel_set *channel_list;
 575        unsigned int curr_working_cset; /* First index into the following. */
 576        struct ipmi_channel_set wchannels[2];
 577        struct ipmi_my_addrinfo addrinfo[IPMI_MAX_CHANNELS];
 578        bool channels_ready;
 579
 580        atomic_t stats[IPMI_NUM_STATS];
 581
 582        /*
 583         * run_to_completion duplicate of smb_info, smi_info
 584         * and ipmi_serial_info structures. Used to decrease numbers of
 585         * parameters passed by "low" level IPMI code.
 586         */
 587        int run_to_completion;
 588};
 589#define to_si_intf_from_dev(device) container_of(device, struct ipmi_smi, dev)
 590
 591static void __get_guid(struct ipmi_smi *intf);
 592static void __ipmi_bmc_unregister(struct ipmi_smi *intf);
 593static int __ipmi_bmc_register(struct ipmi_smi *intf,
 594                               struct ipmi_device_id *id,
 595                               bool guid_set, guid_t *guid, int intf_num);
 596static int __scan_channels(struct ipmi_smi *intf, struct ipmi_device_id *id);
 597
 598
 599/**
 600 * The driver model view of the IPMI messaging driver.
 601 */
 602static struct platform_driver ipmidriver = {
 603        .driver = {
 604                .name = "ipmi",
 605                .bus = &platform_bus_type
 606        }
 607};
 608/*
 609 * This mutex keeps us from adding the same BMC twice.
 610 */
 611static DEFINE_MUTEX(ipmidriver_mutex);
 612
 613static LIST_HEAD(ipmi_interfaces);
 614static DEFINE_MUTEX(ipmi_interfaces_mutex);
 615#define ipmi_interfaces_mutex_held() \
 616        lockdep_is_held(&ipmi_interfaces_mutex)
 617static struct srcu_struct ipmi_interfaces_srcu;
 618
 619/*
 620 * List of watchers that want to know when smi's are added and deleted.
 621 */
 622static LIST_HEAD(smi_watchers);
 623static DEFINE_MUTEX(smi_watchers_mutex);
 624
 625#define ipmi_inc_stat(intf, stat) \
 626        atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
 627#define ipmi_get_stat(intf, stat) \
 628        ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
 629
 630static const char * const addr_src_to_str[] = {
 631        "invalid", "hotmod", "hardcoded", "SPMI", "ACPI", "SMBIOS", "PCI",
 632        "device-tree", "platform"
 633};
 634
 635const char *ipmi_addr_src_to_str(enum ipmi_addr_src src)
 636{
 637        if (src >= SI_LAST)
 638                src = 0; /* Invalid */
 639        return addr_src_to_str[src];
 640}
 641EXPORT_SYMBOL(ipmi_addr_src_to_str);
 642
 643static int is_lan_addr(struct ipmi_addr *addr)
 644{
 645        return addr->addr_type == IPMI_LAN_ADDR_TYPE;
 646}
 647
 648static int is_ipmb_addr(struct ipmi_addr *addr)
 649{
 650        return addr->addr_type == IPMI_IPMB_ADDR_TYPE;
 651}
 652
 653static int is_ipmb_bcast_addr(struct ipmi_addr *addr)
 654{
 655        return addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE;
 656}
 657
 658static int is_ipmb_direct_addr(struct ipmi_addr *addr)
 659{
 660        return addr->addr_type == IPMI_IPMB_DIRECT_ADDR_TYPE;
 661}
 662
 663static void free_recv_msg_list(struct list_head *q)
 664{
 665        struct ipmi_recv_msg *msg, *msg2;
 666
 667        list_for_each_entry_safe(msg, msg2, q, link) {
 668                list_del(&msg->link);
 669                ipmi_free_recv_msg(msg);
 670        }
 671}
 672
 673static void free_smi_msg_list(struct list_head *q)
 674{
 675        struct ipmi_smi_msg *msg, *msg2;
 676
 677        list_for_each_entry_safe(msg, msg2, q, link) {
 678                list_del(&msg->link);
 679                ipmi_free_smi_msg(msg);
 680        }
 681}
 682
 683static void clean_up_interface_data(struct ipmi_smi *intf)
 684{
 685        int              i;
 686        struct cmd_rcvr  *rcvr, *rcvr2;
 687        struct list_head list;
 688
 689        tasklet_kill(&intf->recv_tasklet);
 690
 691        free_smi_msg_list(&intf->waiting_rcv_msgs);
 692        free_recv_msg_list(&intf->waiting_events);
 693
 694        /*
 695         * Wholesale remove all the entries from the list in the
 696         * interface and wait for RCU to know that none are in use.
 697         */
 698        mutex_lock(&intf->cmd_rcvrs_mutex);
 699        INIT_LIST_HEAD(&list);
 700        list_splice_init_rcu(&intf->cmd_rcvrs, &list, synchronize_rcu);
 701        mutex_unlock(&intf->cmd_rcvrs_mutex);
 702
 703        list_for_each_entry_safe(rcvr, rcvr2, &list, link)
 704                kfree(rcvr);
 705
 706        for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
 707                if ((intf->seq_table[i].inuse)
 708                                        && (intf->seq_table[i].recv_msg))
 709                        ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
 710        }
 711}
 712
 713static void intf_free(struct kref *ref)
 714{
 715        struct ipmi_smi *intf = container_of(ref, struct ipmi_smi, refcount);
 716
 717        clean_up_interface_data(intf);
 718        kfree(intf);
 719}
 720
 721struct watcher_entry {
 722        int              intf_num;
 723        struct ipmi_smi  *intf;
 724        struct list_head link;
 725};
 726
 727int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher)
 728{
 729        struct ipmi_smi *intf;
 730        int index, rv;
 731
 732        /*
 733         * Make sure the driver is actually initialized, this handles
 734         * problems with initialization order.
 735         */
 736        rv = ipmi_init_msghandler();
 737        if (rv)
 738                return rv;
 739
 740        mutex_lock(&smi_watchers_mutex);
 741
 742        list_add(&watcher->link, &smi_watchers);
 743
 744        index = srcu_read_lock(&ipmi_interfaces_srcu);
 745        list_for_each_entry_rcu(intf, &ipmi_interfaces, link,
 746                        lockdep_is_held(&smi_watchers_mutex)) {
 747                int intf_num = READ_ONCE(intf->intf_num);
 748
 749                if (intf_num == -1)
 750                        continue;
 751                watcher->new_smi(intf_num, intf->si_dev);
 752        }
 753        srcu_read_unlock(&ipmi_interfaces_srcu, index);
 754
 755        mutex_unlock(&smi_watchers_mutex);
 756
 757        return 0;
 758}
 759EXPORT_SYMBOL(ipmi_smi_watcher_register);
 760
 761int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher)
 762{
 763        mutex_lock(&smi_watchers_mutex);
 764        list_del(&watcher->link);
 765        mutex_unlock(&smi_watchers_mutex);
 766        return 0;
 767}
 768EXPORT_SYMBOL(ipmi_smi_watcher_unregister);
 769
 770/*
 771 * Must be called with smi_watchers_mutex held.
 772 */
 773static void
 774call_smi_watchers(int i, struct device *dev)
 775{
 776        struct ipmi_smi_watcher *w;
 777
 778        mutex_lock(&smi_watchers_mutex);
 779        list_for_each_entry(w, &smi_watchers, link) {
 780                if (try_module_get(w->owner)) {
 781                        w->new_smi(i, dev);
 782                        module_put(w->owner);
 783                }
 784        }
 785        mutex_unlock(&smi_watchers_mutex);
 786}
 787
 788static int
 789ipmi_addr_equal(struct ipmi_addr *addr1, struct ipmi_addr *addr2)
 790{
 791        if (addr1->addr_type != addr2->addr_type)
 792                return 0;
 793
 794        if (addr1->channel != addr2->channel)
 795                return 0;
 796
 797        if (addr1->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
 798                struct ipmi_system_interface_addr *smi_addr1
 799                    = (struct ipmi_system_interface_addr *) addr1;
 800                struct ipmi_system_interface_addr *smi_addr2
 801                    = (struct ipmi_system_interface_addr *) addr2;
 802                return (smi_addr1->lun == smi_addr2->lun);
 803        }
 804
 805        if (is_ipmb_addr(addr1) || is_ipmb_bcast_addr(addr1)) {
 806                struct ipmi_ipmb_addr *ipmb_addr1
 807                    = (struct ipmi_ipmb_addr *) addr1;
 808                struct ipmi_ipmb_addr *ipmb_addr2
 809                    = (struct ipmi_ipmb_addr *) addr2;
 810
 811                return ((ipmb_addr1->slave_addr == ipmb_addr2->slave_addr)
 812                        && (ipmb_addr1->lun == ipmb_addr2->lun));
 813        }
 814
 815        if (is_ipmb_direct_addr(addr1)) {
 816                struct ipmi_ipmb_direct_addr *daddr1
 817                        = (struct ipmi_ipmb_direct_addr *) addr1;
 818                struct ipmi_ipmb_direct_addr *daddr2
 819                        = (struct ipmi_ipmb_direct_addr *) addr2;
 820
 821                return daddr1->slave_addr == daddr2->slave_addr &&
 822                        daddr1->rq_lun == daddr2->rq_lun &&
 823                        daddr1->rs_lun == daddr2->rs_lun;
 824        }
 825
 826        if (is_lan_addr(addr1)) {
 827                struct ipmi_lan_addr *lan_addr1
 828                        = (struct ipmi_lan_addr *) addr1;
 829                struct ipmi_lan_addr *lan_addr2
 830                    = (struct ipmi_lan_addr *) addr2;
 831
 832                return ((lan_addr1->remote_SWID == lan_addr2->remote_SWID)
 833                        && (lan_addr1->local_SWID == lan_addr2->local_SWID)
 834                        && (lan_addr1->session_handle
 835                            == lan_addr2->session_handle)
 836                        && (lan_addr1->lun == lan_addr2->lun));
 837        }
 838
 839        return 1;
 840}
 841
 842int ipmi_validate_addr(struct ipmi_addr *addr, int len)
 843{
 844        if (len < sizeof(struct ipmi_system_interface_addr))
 845                return -EINVAL;
 846
 847        if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
 848                if (addr->channel != IPMI_BMC_CHANNEL)
 849                        return -EINVAL;
 850                return 0;
 851        }
 852
 853        if ((addr->channel == IPMI_BMC_CHANNEL)
 854            || (addr->channel >= IPMI_MAX_CHANNELS)
 855            || (addr->channel < 0))
 856                return -EINVAL;
 857
 858        if (is_ipmb_addr(addr) || is_ipmb_bcast_addr(addr)) {
 859                if (len < sizeof(struct ipmi_ipmb_addr))
 860                        return -EINVAL;
 861                return 0;
 862        }
 863
 864        if (is_ipmb_direct_addr(addr)) {
 865                struct ipmi_ipmb_direct_addr *daddr = (void *) addr;
 866
 867                if (addr->channel != 0)
 868                        return -EINVAL;
 869                if (len < sizeof(struct ipmi_ipmb_direct_addr))
 870                        return -EINVAL;
 871
 872                if (daddr->slave_addr & 0x01)
 873                        return -EINVAL;
 874                if (daddr->rq_lun >= 4)
 875                        return -EINVAL;
 876                if (daddr->rs_lun >= 4)
 877                        return -EINVAL;
 878                return 0;
 879        }
 880
 881        if (is_lan_addr(addr)) {
 882                if (len < sizeof(struct ipmi_lan_addr))
 883                        return -EINVAL;
 884                return 0;
 885        }
 886
 887        return -EINVAL;
 888}
 889EXPORT_SYMBOL(ipmi_validate_addr);
 890
 891unsigned int ipmi_addr_length(int addr_type)
 892{
 893        if (addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
 894                return sizeof(struct ipmi_system_interface_addr);
 895
 896        if ((addr_type == IPMI_IPMB_ADDR_TYPE)
 897                        || (addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
 898                return sizeof(struct ipmi_ipmb_addr);
 899
 900        if (addr_type == IPMI_IPMB_DIRECT_ADDR_TYPE)
 901                return sizeof(struct ipmi_ipmb_direct_addr);
 902
 903        if (addr_type == IPMI_LAN_ADDR_TYPE)
 904                return sizeof(struct ipmi_lan_addr);
 905
 906        return 0;
 907}
 908EXPORT_SYMBOL(ipmi_addr_length);
 909
 910static int deliver_response(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
 911{
 912        int rv = 0;
 913
 914        if (!msg->user) {
 915                /* Special handling for NULL users. */
 916                if (intf->null_user_handler) {
 917                        intf->null_user_handler(intf, msg);
 918                } else {
 919                        /* No handler, so give up. */
 920                        rv = -EINVAL;
 921                }
 922                ipmi_free_recv_msg(msg);
 923        } else if (oops_in_progress) {
 924                /*
 925                 * If we are running in the panic context, calling the
 926                 * receive handler doesn't much meaning and has a deadlock
 927                 * risk.  At this moment, simply skip it in that case.
 928                 */
 929                ipmi_free_recv_msg(msg);
 930        } else {
 931                int index;
 932                struct ipmi_user *user = acquire_ipmi_user(msg->user, &index);
 933
 934                if (user) {
 935                        user->handler->ipmi_recv_hndl(msg, user->handler_data);
 936                        release_ipmi_user(user, index);
 937                } else {
 938                        /* User went away, give up. */
 939                        ipmi_free_recv_msg(msg);
 940                        rv = -EINVAL;
 941                }
 942        }
 943
 944        return rv;
 945}
 946
 947static void deliver_local_response(struct ipmi_smi *intf,
 948                                   struct ipmi_recv_msg *msg)
 949{
 950        if (deliver_response(intf, msg))
 951                ipmi_inc_stat(intf, unhandled_local_responses);
 952        else
 953                ipmi_inc_stat(intf, handled_local_responses);
 954}
 955
 956static void deliver_err_response(struct ipmi_smi *intf,
 957                                 struct ipmi_recv_msg *msg, int err)
 958{
 959        msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
 960        msg->msg_data[0] = err;
 961        msg->msg.netfn |= 1; /* Convert to a response. */
 962        msg->msg.data_len = 1;
 963        msg->msg.data = msg->msg_data;
 964        deliver_local_response(intf, msg);
 965}
 966
 967static void smi_add_watch(struct ipmi_smi *intf, unsigned int flags)
 968{
 969        unsigned long iflags;
 970
 971        if (!intf->handlers->set_need_watch)
 972                return;
 973
 974        spin_lock_irqsave(&intf->watch_lock, iflags);
 975        if (flags & IPMI_WATCH_MASK_CHECK_MESSAGES)
 976                intf->response_waiters++;
 977
 978        if (flags & IPMI_WATCH_MASK_CHECK_WATCHDOG)
 979                intf->watchdog_waiters++;
 980
 981        if (flags & IPMI_WATCH_MASK_CHECK_COMMANDS)
 982                intf->command_waiters++;
 983
 984        if ((intf->last_watch_mask & flags) != flags) {
 985                intf->last_watch_mask |= flags;
 986                intf->handlers->set_need_watch(intf->send_info,
 987                                               intf->last_watch_mask);
 988        }
 989        spin_unlock_irqrestore(&intf->watch_lock, iflags);
 990}
 991
 992static void smi_remove_watch(struct ipmi_smi *intf, unsigned int flags)
 993{
 994        unsigned long iflags;
 995
 996        if (!intf->handlers->set_need_watch)
 997                return;
 998
 999        spin_lock_irqsave(&intf->watch_lock, iflags);
1000        if (flags & IPMI_WATCH_MASK_CHECK_MESSAGES)
1001                intf->response_waiters--;
1002
1003        if (flags & IPMI_WATCH_MASK_CHECK_WATCHDOG)
1004                intf->watchdog_waiters--;
1005
1006        if (flags & IPMI_WATCH_MASK_CHECK_COMMANDS)
1007                intf->command_waiters--;
1008
1009        flags = 0;
1010        if (intf->response_waiters)
1011                flags |= IPMI_WATCH_MASK_CHECK_MESSAGES;
1012        if (intf->watchdog_waiters)
1013                flags |= IPMI_WATCH_MASK_CHECK_WATCHDOG;
1014        if (intf->command_waiters)
1015                flags |= IPMI_WATCH_MASK_CHECK_COMMANDS;
1016
1017        if (intf->last_watch_mask != flags) {
1018                intf->last_watch_mask = flags;
1019                intf->handlers->set_need_watch(intf->send_info,
1020                                               intf->last_watch_mask);
1021        }
1022        spin_unlock_irqrestore(&intf->watch_lock, iflags);
1023}
1024
1025/*
1026 * Find the next sequence number not being used and add the given
1027 * message with the given timeout to the sequence table.  This must be
1028 * called with the interface's seq_lock held.
1029 */
1030static int intf_next_seq(struct ipmi_smi      *intf,
1031                         struct ipmi_recv_msg *recv_msg,
1032                         unsigned long        timeout,
1033                         int                  retries,
1034                         int                  broadcast,
1035                         unsigned char        *seq,
1036                         long                 *seqid)
1037{
1038        int          rv = 0;
1039        unsigned int i;
1040
1041        if (timeout == 0)
1042                timeout = default_retry_ms;
1043        if (retries < 0)
1044                retries = default_max_retries;
1045
1046        for (i = intf->curr_seq; (i+1)%IPMI_IPMB_NUM_SEQ != intf->curr_seq;
1047                                        i = (i+1)%IPMI_IPMB_NUM_SEQ) {
1048                if (!intf->seq_table[i].inuse)
1049                        break;
1050        }
1051
1052        if (!intf->seq_table[i].inuse) {
1053                intf->seq_table[i].recv_msg = recv_msg;
1054
1055                /*
1056                 * Start with the maximum timeout, when the send response
1057                 * comes in we will start the real timer.
1058                 */
1059                intf->seq_table[i].timeout = MAX_MSG_TIMEOUT;
1060                intf->seq_table[i].orig_timeout = timeout;
1061                intf->seq_table[i].retries_left = retries;
1062                intf->seq_table[i].broadcast = broadcast;
1063                intf->seq_table[i].inuse = 1;
1064                intf->seq_table[i].seqid = NEXT_SEQID(intf->seq_table[i].seqid);
1065                *seq = i;
1066                *seqid = intf->seq_table[i].seqid;
1067                intf->curr_seq = (i+1)%IPMI_IPMB_NUM_SEQ;
1068                smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES);
1069                need_waiter(intf);
1070        } else {
1071                rv = -EAGAIN;
1072        }
1073
1074        return rv;
1075}
1076
1077/*
1078 * Return the receive message for the given sequence number and
1079 * release the sequence number so it can be reused.  Some other data
1080 * is passed in to be sure the message matches up correctly (to help
1081 * guard against message coming in after their timeout and the
1082 * sequence number being reused).
1083 */
1084static int intf_find_seq(struct ipmi_smi      *intf,
1085                         unsigned char        seq,
1086                         short                channel,
1087                         unsigned char        cmd,
1088                         unsigned char        netfn,
1089                         struct ipmi_addr     *addr,
1090                         struct ipmi_recv_msg **recv_msg)
1091{
1092        int           rv = -ENODEV;
1093        unsigned long flags;
1094
1095        if (seq >= IPMI_IPMB_NUM_SEQ)
1096                return -EINVAL;
1097
1098        spin_lock_irqsave(&intf->seq_lock, flags);
1099        if (intf->seq_table[seq].inuse) {
1100                struct ipmi_recv_msg *msg = intf->seq_table[seq].recv_msg;
1101
1102                if ((msg->addr.channel == channel) && (msg->msg.cmd == cmd)
1103                                && (msg->msg.netfn == netfn)
1104                                && (ipmi_addr_equal(addr, &msg->addr))) {
1105                        *recv_msg = msg;
1106                        intf->seq_table[seq].inuse = 0;
1107                        smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES);
1108                        rv = 0;
1109                }
1110        }
1111        spin_unlock_irqrestore(&intf->seq_lock, flags);
1112
1113        return rv;
1114}
1115
1116
1117/* Start the timer for a specific sequence table entry. */
1118static int intf_start_seq_timer(struct ipmi_smi *intf,
1119                                long       msgid)
1120{
1121        int           rv = -ENODEV;
1122        unsigned long flags;
1123        unsigned char seq;
1124        unsigned long seqid;
1125
1126
1127        GET_SEQ_FROM_MSGID(msgid, seq, seqid);
1128
1129        spin_lock_irqsave(&intf->seq_lock, flags);
1130        /*
1131         * We do this verification because the user can be deleted
1132         * while a message is outstanding.
1133         */
1134        if ((intf->seq_table[seq].inuse)
1135                                && (intf->seq_table[seq].seqid == seqid)) {
1136                struct seq_table *ent = &intf->seq_table[seq];
1137                ent->timeout = ent->orig_timeout;
1138                rv = 0;
1139        }
1140        spin_unlock_irqrestore(&intf->seq_lock, flags);
1141
1142        return rv;
1143}
1144
1145/* Got an error for the send message for a specific sequence number. */
1146static int intf_err_seq(struct ipmi_smi *intf,
1147                        long         msgid,
1148                        unsigned int err)
1149{
1150        int                  rv = -ENODEV;
1151        unsigned long        flags;
1152        unsigned char        seq;
1153        unsigned long        seqid;
1154        struct ipmi_recv_msg *msg = NULL;
1155
1156
1157        GET_SEQ_FROM_MSGID(msgid, seq, seqid);
1158
1159        spin_lock_irqsave(&intf->seq_lock, flags);
1160        /*
1161         * We do this verification because the user can be deleted
1162         * while a message is outstanding.
1163         */
1164        if ((intf->seq_table[seq].inuse)
1165                                && (intf->seq_table[seq].seqid == seqid)) {
1166                struct seq_table *ent = &intf->seq_table[seq];
1167
1168                ent->inuse = 0;
1169                smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES);
1170                msg = ent->recv_msg;
1171                rv = 0;
1172        }
1173        spin_unlock_irqrestore(&intf->seq_lock, flags);
1174
1175        if (msg)
1176                deliver_err_response(intf, msg, err);
1177
1178        return rv;
1179}
1180
1181static void free_user_work(struct work_struct *work)
1182{
1183        struct ipmi_user *user = container_of(work, struct ipmi_user,
1184                                              remove_work);
1185
1186        cleanup_srcu_struct(&user->release_barrier);
1187        vfree(user);
1188}
1189
1190int ipmi_create_user(unsigned int          if_num,
1191                     const struct ipmi_user_hndl *handler,
1192                     void                  *handler_data,
1193                     struct ipmi_user      **user)
1194{
1195        unsigned long flags;
1196        struct ipmi_user *new_user;
1197        int           rv, index;
1198        struct ipmi_smi *intf;
1199
1200        /*
1201         * There is no module usecount here, because it's not
1202         * required.  Since this can only be used by and called from
1203         * other modules, they will implicitly use this module, and
1204         * thus this can't be removed unless the other modules are
1205         * removed.
1206         */
1207
1208        if (handler == NULL)
1209                return -EINVAL;
1210
1211        /*
1212         * Make sure the driver is actually initialized, this handles
1213         * problems with initialization order.
1214         */
1215        rv = ipmi_init_msghandler();
1216        if (rv)
1217                return rv;
1218
1219        new_user = vzalloc(sizeof(*new_user));
1220        if (!new_user)
1221                return -ENOMEM;
1222
1223        index = srcu_read_lock(&ipmi_interfaces_srcu);
1224        list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
1225                if (intf->intf_num == if_num)
1226                        goto found;
1227        }
1228        /* Not found, return an error */
1229        rv = -EINVAL;
1230        goto out_kfree;
1231
1232 found:
1233        INIT_WORK(&new_user->remove_work, free_user_work);
1234
1235        rv = init_srcu_struct(&new_user->release_barrier);
1236        if (rv)
1237                goto out_kfree;
1238
1239        if (!try_module_get(intf->owner)) {
1240                rv = -ENODEV;
1241                goto out_kfree;
1242        }
1243
1244        /* Note that each existing user holds a refcount to the interface. */
1245        kref_get(&intf->refcount);
1246
1247        kref_init(&new_user->refcount);
1248        new_user->handler = handler;
1249        new_user->handler_data = handler_data;
1250        new_user->intf = intf;
1251        new_user->gets_events = false;
1252
1253        rcu_assign_pointer(new_user->self, new_user);
1254        spin_lock_irqsave(&intf->seq_lock, flags);
1255        list_add_rcu(&new_user->link, &intf->users);
1256        spin_unlock_irqrestore(&intf->seq_lock, flags);
1257        if (handler->ipmi_watchdog_pretimeout)
1258                /* User wants pretimeouts, so make sure to watch for them. */
1259                smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_WATCHDOG);
1260        srcu_read_unlock(&ipmi_interfaces_srcu, index);
1261        *user = new_user;
1262        return 0;
1263
1264out_kfree:
1265        srcu_read_unlock(&ipmi_interfaces_srcu, index);
1266        vfree(new_user);
1267        return rv;
1268}
1269EXPORT_SYMBOL(ipmi_create_user);
1270
1271int ipmi_get_smi_info(int if_num, struct ipmi_smi_info *data)
1272{
1273        int rv, index;
1274        struct ipmi_smi *intf;
1275
1276        index = srcu_read_lock(&ipmi_interfaces_srcu);
1277        list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
1278                if (intf->intf_num == if_num)
1279                        goto found;
1280        }
1281        srcu_read_unlock(&ipmi_interfaces_srcu, index);
1282
1283        /* Not found, return an error */
1284        return -EINVAL;
1285
1286found:
1287        if (!intf->handlers->get_smi_info)
1288                rv = -ENOTTY;
1289        else
1290                rv = intf->handlers->get_smi_info(intf->send_info, data);
1291        srcu_read_unlock(&ipmi_interfaces_srcu, index);
1292
1293        return rv;
1294}
1295EXPORT_SYMBOL(ipmi_get_smi_info);
1296
1297static void free_user(struct kref *ref)
1298{
1299        struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount);
1300
1301        /* SRCU cleanup must happen in task context. */
1302        queue_work(remove_work_wq, &user->remove_work);
1303}
1304
1305static void _ipmi_destroy_user(struct ipmi_user *user)
1306{
1307        struct ipmi_smi  *intf = user->intf;
1308        int              i;
1309        unsigned long    flags;
1310        struct cmd_rcvr  *rcvr;
1311        struct cmd_rcvr  *rcvrs = NULL;
1312
1313        if (!acquire_ipmi_user(user, &i)) {
1314                /*
1315                 * The user has already been cleaned up, just make sure
1316                 * nothing is using it and return.
1317                 */
1318                synchronize_srcu(&user->release_barrier);
1319                return;
1320        }
1321
1322        rcu_assign_pointer(user->self, NULL);
1323        release_ipmi_user(user, i);
1324
1325        synchronize_srcu(&user->release_barrier);
1326
1327        if (user->handler->shutdown)
1328                user->handler->shutdown(user->handler_data);
1329
1330        if (user->handler->ipmi_watchdog_pretimeout)
1331                smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_WATCHDOG);
1332
1333        if (user->gets_events)
1334                atomic_dec(&intf->event_waiters);
1335
1336        /* Remove the user from the interface's sequence table. */
1337        spin_lock_irqsave(&intf->seq_lock, flags);
1338        list_del_rcu(&user->link);
1339
1340        for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
1341                if (intf->seq_table[i].inuse
1342                    && (intf->seq_table[i].recv_msg->user == user)) {
1343                        intf->seq_table[i].inuse = 0;
1344                        smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES);
1345                        ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
1346                }
1347        }
1348        spin_unlock_irqrestore(&intf->seq_lock, flags);
1349
1350        /*
1351         * Remove the user from the command receiver's table.  First
1352         * we build a list of everything (not using the standard link,
1353         * since other things may be using it till we do
1354         * synchronize_srcu()) then free everything in that list.
1355         */
1356        mutex_lock(&intf->cmd_rcvrs_mutex);
1357        list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link,
1358                                lockdep_is_held(&intf->cmd_rcvrs_mutex)) {
1359                if (rcvr->user == user) {
1360                        list_del_rcu(&rcvr->link);
1361                        rcvr->next = rcvrs;
1362                        rcvrs = rcvr;
1363                }
1364        }
1365        mutex_unlock(&intf->cmd_rcvrs_mutex);
1366        synchronize_rcu();
1367        while (rcvrs) {
1368                rcvr = rcvrs;
1369                rcvrs = rcvr->next;
1370                kfree(rcvr);
1371        }
1372
1373        kref_put(&intf->refcount, intf_free);
1374        module_put(intf->owner);
1375}
1376
1377int ipmi_destroy_user(struct ipmi_user *user)
1378{
1379        _ipmi_destroy_user(user);
1380
1381        kref_put(&user->refcount, free_user);
1382
1383        return 0;
1384}
1385EXPORT_SYMBOL(ipmi_destroy_user);
1386
1387int ipmi_get_version(struct ipmi_user *user,
1388                     unsigned char *major,
1389                     unsigned char *minor)
1390{
1391        struct ipmi_device_id id;
1392        int rv, index;
1393
1394        user = acquire_ipmi_user(user, &index);
1395        if (!user)
1396                return -ENODEV;
1397
1398        rv = bmc_get_device_id(user->intf, NULL, &id, NULL, NULL);
1399        if (!rv) {
1400                *major = ipmi_version_major(&id);
1401                *minor = ipmi_version_minor(&id);
1402        }
1403        release_ipmi_user(user, index);
1404
1405        return rv;
1406}
1407EXPORT_SYMBOL(ipmi_get_version);
1408
1409int ipmi_set_my_address(struct ipmi_user *user,
1410                        unsigned int  channel,
1411                        unsigned char address)
1412{
1413        int index, rv = 0;
1414
1415        user = acquire_ipmi_user(user, &index);
1416        if (!user)
1417                return -ENODEV;
1418
1419        if (channel >= IPMI_MAX_CHANNELS) {
1420                rv = -EINVAL;
1421        } else {
1422                channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
1423                user->intf->addrinfo[channel].address = address;
1424        }
1425        release_ipmi_user(user, index);
1426
1427        return rv;
1428}
1429EXPORT_SYMBOL(ipmi_set_my_address);
1430
1431int ipmi_get_my_address(struct ipmi_user *user,
1432                        unsigned int  channel,
1433                        unsigned char *address)
1434{
1435        int index, rv = 0;
1436
1437        user = acquire_ipmi_user(user, &index);
1438        if (!user)
1439                return -ENODEV;
1440
1441        if (channel >= IPMI_MAX_CHANNELS) {
1442                rv = -EINVAL;
1443        } else {
1444                channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
1445                *address = user->intf->addrinfo[channel].address;
1446        }
1447        release_ipmi_user(user, index);
1448
1449        return rv;
1450}
1451EXPORT_SYMBOL(ipmi_get_my_address);
1452
1453int ipmi_set_my_LUN(struct ipmi_user *user,
1454                    unsigned int  channel,
1455                    unsigned char LUN)
1456{
1457        int index, rv = 0;
1458
1459        user = acquire_ipmi_user(user, &index);
1460        if (!user)
1461                return -ENODEV;
1462
1463        if (channel >= IPMI_MAX_CHANNELS) {
1464                rv = -EINVAL;
1465        } else {
1466                channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
1467                user->intf->addrinfo[channel].lun = LUN & 0x3;
1468        }
1469        release_ipmi_user(user, index);
1470
1471        return rv;
1472}
1473EXPORT_SYMBOL(ipmi_set_my_LUN);
1474
1475int ipmi_get_my_LUN(struct ipmi_user *user,
1476                    unsigned int  channel,
1477                    unsigned char *address)
1478{
1479        int index, rv = 0;
1480
1481        user = acquire_ipmi_user(user, &index);
1482        if (!user)
1483                return -ENODEV;
1484
1485        if (channel >= IPMI_MAX_CHANNELS) {
1486                rv = -EINVAL;
1487        } else {
1488                channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
1489                *address = user->intf->addrinfo[channel].lun;
1490        }
1491        release_ipmi_user(user, index);
1492
1493        return rv;
1494}
1495EXPORT_SYMBOL(ipmi_get_my_LUN);
1496
1497int ipmi_get_maintenance_mode(struct ipmi_user *user)
1498{
1499        int mode, index;
1500        unsigned long flags;
1501
1502        user = acquire_ipmi_user(user, &index);
1503        if (!user)
1504                return -ENODEV;
1505
1506        spin_lock_irqsave(&user->intf->maintenance_mode_lock, flags);
1507        mode = user->intf->maintenance_mode;
1508        spin_unlock_irqrestore(&user->intf->maintenance_mode_lock, flags);
1509        release_ipmi_user(user, index);
1510
1511        return mode;
1512}
1513EXPORT_SYMBOL(ipmi_get_maintenance_mode);
1514
1515static void maintenance_mode_update(struct ipmi_smi *intf)
1516{
1517        if (intf->handlers->set_maintenance_mode)
1518                intf->handlers->set_maintenance_mode(
1519                        intf->send_info, intf->maintenance_mode_enable);
1520}
1521
1522int ipmi_set_maintenance_mode(struct ipmi_user *user, int mode)
1523{
1524        int rv = 0, index;
1525        unsigned long flags;
1526        struct ipmi_smi *intf = user->intf;
1527
1528        user = acquire_ipmi_user(user, &index);
1529        if (!user)
1530                return -ENODEV;
1531
1532        spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
1533        if (intf->maintenance_mode != mode) {
1534                switch (mode) {
1535                case IPMI_MAINTENANCE_MODE_AUTO:
1536                        intf->maintenance_mode_enable
1537                                = (intf->auto_maintenance_timeout > 0);
1538                        break;
1539
1540                case IPMI_MAINTENANCE_MODE_OFF:
1541                        intf->maintenance_mode_enable = false;
1542                        break;
1543
1544                case IPMI_MAINTENANCE_MODE_ON:
1545                        intf->maintenance_mode_enable = true;
1546                        break;
1547
1548                default:
1549                        rv = -EINVAL;
1550                        goto out_unlock;
1551                }
1552                intf->maintenance_mode = mode;
1553
1554                maintenance_mode_update(intf);
1555        }
1556 out_unlock:
1557        spin_unlock_irqrestore(&intf->maintenance_mode_lock, flags);
1558        release_ipmi_user(user, index);
1559
1560        return rv;
1561}
1562EXPORT_SYMBOL(ipmi_set_maintenance_mode);
1563
1564int ipmi_set_gets_events(struct ipmi_user *user, bool val)
1565{
1566        unsigned long        flags;
1567        struct ipmi_smi      *intf = user->intf;
1568        struct ipmi_recv_msg *msg, *msg2;
1569        struct list_head     msgs;
1570        int index;
1571
1572        user = acquire_ipmi_user(user, &index);
1573        if (!user)
1574                return -ENODEV;
1575
1576        INIT_LIST_HEAD(&msgs);
1577
1578        spin_lock_irqsave(&intf->events_lock, flags);
1579        if (user->gets_events == val)
1580                goto out;
1581
1582        user->gets_events = val;
1583
1584        if (val) {
1585                if (atomic_inc_return(&intf->event_waiters) == 1)
1586                        need_waiter(intf);
1587        } else {
1588                atomic_dec(&intf->event_waiters);
1589        }
1590
1591        if (intf->delivering_events)
1592                /*
1593                 * Another thread is delivering events for this, so
1594                 * let it handle any new events.
1595                 */
1596                goto out;
1597
1598        /* Deliver any queued events. */
1599        while (user->gets_events && !list_empty(&intf->waiting_events)) {
1600                list_for_each_entry_safe(msg, msg2, &intf->waiting_events, link)
1601                        list_move_tail(&msg->link, &msgs);
1602                intf->waiting_events_count = 0;
1603                if (intf->event_msg_printed) {
1604                        dev_warn(intf->si_dev, "Event queue no longer full\n");
1605                        intf->event_msg_printed = 0;
1606                }
1607
1608                intf->delivering_events = 1;
1609                spin_unlock_irqrestore(&intf->events_lock, flags);
1610
1611                list_for_each_entry_safe(msg, msg2, &msgs, link) {
1612                        msg->user = user;
1613                        kref_get(&user->refcount);
1614                        deliver_local_response(intf, msg);
1615                }
1616
1617                spin_lock_irqsave(&intf->events_lock, flags);
1618                intf->delivering_events = 0;
1619        }
1620
1621 out:
1622        spin_unlock_irqrestore(&intf->events_lock, flags);
1623        release_ipmi_user(user, index);
1624
1625        return 0;
1626}
1627EXPORT_SYMBOL(ipmi_set_gets_events);
1628
1629static struct cmd_rcvr *find_cmd_rcvr(struct ipmi_smi *intf,
1630                                      unsigned char netfn,
1631                                      unsigned char cmd,
1632                                      unsigned char chan)
1633{
1634        struct cmd_rcvr *rcvr;
1635
1636        list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link,
1637                                lockdep_is_held(&intf->cmd_rcvrs_mutex)) {
1638                if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)
1639                                        && (rcvr->chans & (1 << chan)))
1640                        return rcvr;
1641        }
1642        return NULL;
1643}
1644
1645static int is_cmd_rcvr_exclusive(struct ipmi_smi *intf,
1646                                 unsigned char netfn,
1647                                 unsigned char cmd,
1648                                 unsigned int  chans)
1649{
1650        struct cmd_rcvr *rcvr;
1651
1652        list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link,
1653                                lockdep_is_held(&intf->cmd_rcvrs_mutex)) {
1654                if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)
1655                                        && (rcvr->chans & chans))
1656                        return 0;
1657        }
1658        return 1;
1659}
1660
1661int ipmi_register_for_cmd(struct ipmi_user *user,
1662                          unsigned char netfn,
1663                          unsigned char cmd,
1664                          unsigned int  chans)
1665{
1666        struct ipmi_smi *intf = user->intf;
1667        struct cmd_rcvr *rcvr;
1668        int rv = 0, index;
1669
1670        user = acquire_ipmi_user(user, &index);
1671        if (!user)
1672                return -ENODEV;
1673
1674        rcvr = kmalloc(sizeof(*rcvr), GFP_KERNEL);
1675        if (!rcvr) {
1676                rv = -ENOMEM;
1677                goto out_release;
1678        }
1679        rcvr->cmd = cmd;
1680        rcvr->netfn = netfn;
1681        rcvr->chans = chans;
1682        rcvr->user = user;
1683
1684        mutex_lock(&intf->cmd_rcvrs_mutex);
1685        /* Make sure the command/netfn is not already registered. */
1686        if (!is_cmd_rcvr_exclusive(intf, netfn, cmd, chans)) {
1687                rv = -EBUSY;
1688                goto out_unlock;
1689        }
1690
1691        smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_COMMANDS);
1692
1693        list_add_rcu(&rcvr->link, &intf->cmd_rcvrs);
1694
1695out_unlock:
1696        mutex_unlock(&intf->cmd_rcvrs_mutex);
1697        if (rv)
1698                kfree(rcvr);
1699out_release:
1700        release_ipmi_user(user, index);
1701
1702        return rv;
1703}
1704EXPORT_SYMBOL(ipmi_register_for_cmd);
1705
1706int ipmi_unregister_for_cmd(struct ipmi_user *user,
1707                            unsigned char netfn,
1708                            unsigned char cmd,
1709                            unsigned int  chans)
1710{
1711        struct ipmi_smi *intf = user->intf;
1712        struct cmd_rcvr *rcvr;
1713        struct cmd_rcvr *rcvrs = NULL;
1714        int i, rv = -ENOENT, index;
1715
1716        user = acquire_ipmi_user(user, &index);
1717        if (!user)
1718                return -ENODEV;
1719
1720        mutex_lock(&intf->cmd_rcvrs_mutex);
1721        for (i = 0; i < IPMI_NUM_CHANNELS; i++) {
1722                if (((1 << i) & chans) == 0)
1723                        continue;
1724                rcvr = find_cmd_rcvr(intf, netfn, cmd, i);
1725                if (rcvr == NULL)
1726                        continue;
1727                if (rcvr->user == user) {
1728                        rv = 0;
1729                        rcvr->chans &= ~chans;
1730                        if (rcvr->chans == 0) {
1731                                list_del_rcu(&rcvr->link);
1732                                rcvr->next = rcvrs;
1733                                rcvrs = rcvr;
1734                        }
1735                }
1736        }
1737        mutex_unlock(&intf->cmd_rcvrs_mutex);
1738        synchronize_rcu();
1739        release_ipmi_user(user, index);
1740        while (rcvrs) {
1741                smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_COMMANDS);
1742                rcvr = rcvrs;
1743                rcvrs = rcvr->next;
1744                kfree(rcvr);
1745        }
1746
1747        return rv;
1748}
1749EXPORT_SYMBOL(ipmi_unregister_for_cmd);
1750
1751unsigned char
1752ipmb_checksum(unsigned char *data, int size)
1753{
1754        unsigned char csum = 0;
1755
1756        for (; size > 0; size--, data++)
1757                csum += *data;
1758
1759        return -csum;
1760}
1761EXPORT_SYMBOL(ipmb_checksum);
1762
1763static inline void format_ipmb_msg(struct ipmi_smi_msg   *smi_msg,
1764                                   struct kernel_ipmi_msg *msg,
1765                                   struct ipmi_ipmb_addr *ipmb_addr,
1766                                   long                  msgid,
1767                                   unsigned char         ipmb_seq,
1768                                   int                   broadcast,
1769                                   unsigned char         source_address,
1770                                   unsigned char         source_lun)
1771{
1772        int i = broadcast;
1773
1774        /* Format the IPMB header data. */
1775        smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
1776        smi_msg->data[1] = IPMI_SEND_MSG_CMD;
1777        smi_msg->data[2] = ipmb_addr->channel;
1778        if (broadcast)
1779                smi_msg->data[3] = 0;
1780        smi_msg->data[i+3] = ipmb_addr->slave_addr;
1781        smi_msg->data[i+4] = (msg->netfn << 2) | (ipmb_addr->lun & 0x3);
1782        smi_msg->data[i+5] = ipmb_checksum(&smi_msg->data[i + 3], 2);
1783        smi_msg->data[i+6] = source_address;
1784        smi_msg->data[i+7] = (ipmb_seq << 2) | source_lun;
1785        smi_msg->data[i+8] = msg->cmd;
1786
1787        /* Now tack on the data to the message. */
1788        if (msg->data_len > 0)
1789                memcpy(&smi_msg->data[i + 9], msg->data, msg->data_len);
1790        smi_msg->data_size = msg->data_len + 9;
1791
1792        /* Now calculate the checksum and tack it on. */
1793        smi_msg->data[i+smi_msg->data_size]
1794                = ipmb_checksum(&smi_msg->data[i + 6], smi_msg->data_size - 6);
1795
1796        /*
1797         * Add on the checksum size and the offset from the
1798         * broadcast.
1799         */
1800        smi_msg->data_size += 1 + i;
1801
1802        smi_msg->msgid = msgid;
1803}
1804
1805static inline void format_lan_msg(struct ipmi_smi_msg   *smi_msg,
1806                                  struct kernel_ipmi_msg *msg,
1807                                  struct ipmi_lan_addr  *lan_addr,
1808                                  long                  msgid,
1809                                  unsigned char         ipmb_seq,
1810                                  unsigned char         source_lun)
1811{
1812        /* Format the IPMB header data. */
1813        smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
1814        smi_msg->data[1] = IPMI_SEND_MSG_CMD;
1815        smi_msg->data[2] = lan_addr->channel;
1816        smi_msg->data[3] = lan_addr->session_handle;
1817        smi_msg->data[4] = lan_addr->remote_SWID;
1818        smi_msg->data[5] = (msg->netfn << 2) | (lan_addr->lun & 0x3);
1819        smi_msg->data[6] = ipmb_checksum(&smi_msg->data[4], 2);
1820        smi_msg->data[7] = lan_addr->local_SWID;
1821        smi_msg->data[8] = (ipmb_seq << 2) | source_lun;
1822        smi_msg->data[9] = msg->cmd;
1823
1824        /* Now tack on the data to the message. */
1825        if (msg->data_len > 0)
1826                memcpy(&smi_msg->data[10], msg->data, msg->data_len);
1827        smi_msg->data_size = msg->data_len + 10;
1828
1829        /* Now calculate the checksum and tack it on. */
1830        smi_msg->data[smi_msg->data_size]
1831                = ipmb_checksum(&smi_msg->data[7], smi_msg->data_size - 7);
1832
1833        /*
1834         * Add on the checksum size and the offset from the
1835         * broadcast.
1836         */
1837        smi_msg->data_size += 1;
1838
1839        smi_msg->msgid = msgid;
1840}
1841
1842static struct ipmi_smi_msg *smi_add_send_msg(struct ipmi_smi *intf,
1843                                             struct ipmi_smi_msg *smi_msg,
1844                                             int priority)
1845{
1846        if (intf->curr_msg) {
1847                if (priority > 0)
1848                        list_add_tail(&smi_msg->link, &intf->hp_xmit_msgs);
1849                else
1850                        list_add_tail(&smi_msg->link, &intf->xmit_msgs);
1851                smi_msg = NULL;
1852        } else {
1853                intf->curr_msg = smi_msg;
1854        }
1855
1856        return smi_msg;
1857}
1858
1859static void smi_send(struct ipmi_smi *intf,
1860                     const struct ipmi_smi_handlers *handlers,
1861                     struct ipmi_smi_msg *smi_msg, int priority)
1862{
1863        int run_to_completion = intf->run_to_completion;
1864        unsigned long flags = 0;
1865
1866        if (!run_to_completion)
1867                spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
1868        smi_msg = smi_add_send_msg(intf, smi_msg, priority);
1869
1870        if (!run_to_completion)
1871                spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
1872
1873        if (smi_msg)
1874                handlers->sender(intf->send_info, smi_msg);
1875}
1876
1877static bool is_maintenance_mode_cmd(struct kernel_ipmi_msg *msg)
1878{
1879        return (((msg->netfn == IPMI_NETFN_APP_REQUEST)
1880                 && ((msg->cmd == IPMI_COLD_RESET_CMD)
1881                     || (msg->cmd == IPMI_WARM_RESET_CMD)))
1882                || (msg->netfn == IPMI_NETFN_FIRMWARE_REQUEST));
1883}
1884
1885static int i_ipmi_req_sysintf(struct ipmi_smi        *intf,
1886                              struct ipmi_addr       *addr,
1887                              long                   msgid,
1888                              struct kernel_ipmi_msg *msg,
1889                              struct ipmi_smi_msg    *smi_msg,
1890                              struct ipmi_recv_msg   *recv_msg,
1891                              int                    retries,
1892                              unsigned int           retry_time_ms)
1893{
1894        struct ipmi_system_interface_addr *smi_addr;
1895
1896        if (msg->netfn & 1)
1897                /* Responses are not allowed to the SMI. */
1898                return -EINVAL;
1899
1900        smi_addr = (struct ipmi_system_interface_addr *) addr;
1901        if (smi_addr->lun > 3) {
1902                ipmi_inc_stat(intf, sent_invalid_commands);
1903                return -EINVAL;
1904        }
1905
1906        memcpy(&recv_msg->addr, smi_addr, sizeof(*smi_addr));
1907
1908        if ((msg->netfn == IPMI_NETFN_APP_REQUEST)
1909            && ((msg->cmd == IPMI_SEND_MSG_CMD)
1910                || (msg->cmd == IPMI_GET_MSG_CMD)
1911                || (msg->cmd == IPMI_READ_EVENT_MSG_BUFFER_CMD))) {
1912                /*
1913                 * We don't let the user do these, since we manage
1914                 * the sequence numbers.
1915                 */
1916                ipmi_inc_stat(intf, sent_invalid_commands);
1917                return -EINVAL;
1918        }
1919
1920        if (is_maintenance_mode_cmd(msg)) {
1921                unsigned long flags;
1922
1923                spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
1924                intf->auto_maintenance_timeout
1925                        = maintenance_mode_timeout_ms;
1926                if (!intf->maintenance_mode
1927                    && !intf->maintenance_mode_enable) {
1928                        intf->maintenance_mode_enable = true;
1929                        maintenance_mode_update(intf);
1930                }
1931                spin_unlock_irqrestore(&intf->maintenance_mode_lock,
1932                                       flags);
1933        }
1934
1935        if (msg->data_len + 2 > IPMI_MAX_MSG_LENGTH) {
1936                ipmi_inc_stat(intf, sent_invalid_commands);
1937                return -EMSGSIZE;
1938        }
1939
1940        smi_msg->data[0] = (msg->netfn << 2) | (smi_addr->lun & 0x3);
1941        smi_msg->data[1] = msg->cmd;
1942        smi_msg->msgid = msgid;
1943        smi_msg->user_data = recv_msg;
1944        if (msg->data_len > 0)
1945                memcpy(&smi_msg->data[2], msg->data, msg->data_len);
1946        smi_msg->data_size = msg->data_len + 2;
1947        ipmi_inc_stat(intf, sent_local_commands);
1948
1949        return 0;
1950}
1951
1952static int i_ipmi_req_ipmb(struct ipmi_smi        *intf,
1953                           struct ipmi_addr       *addr,
1954                           long                   msgid,
1955                           struct kernel_ipmi_msg *msg,
1956                           struct ipmi_smi_msg    *smi_msg,
1957                           struct ipmi_recv_msg   *recv_msg,
1958                           unsigned char          source_address,
1959                           unsigned char          source_lun,
1960                           int                    retries,
1961                           unsigned int           retry_time_ms)
1962{
1963        struct ipmi_ipmb_addr *ipmb_addr;
1964        unsigned char ipmb_seq;
1965        long seqid;
1966        int broadcast = 0;
1967        struct ipmi_channel *chans;
1968        int rv = 0;
1969
1970        if (addr->channel >= IPMI_MAX_CHANNELS) {
1971                ipmi_inc_stat(intf, sent_invalid_commands);
1972                return -EINVAL;
1973        }
1974
1975        chans = READ_ONCE(intf->channel_list)->c;
1976
1977        if (chans[addr->channel].medium != IPMI_CHANNEL_MEDIUM_IPMB) {
1978                ipmi_inc_stat(intf, sent_invalid_commands);
1979                return -EINVAL;
1980        }
1981
1982        if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE) {
1983                /*
1984                 * Broadcasts add a zero at the beginning of the
1985                 * message, but otherwise is the same as an IPMB
1986                 * address.
1987                 */
1988                addr->addr_type = IPMI_IPMB_ADDR_TYPE;
1989                broadcast = 1;
1990                retries = 0; /* Don't retry broadcasts. */
1991        }
1992
1993        /*
1994         * 9 for the header and 1 for the checksum, plus
1995         * possibly one for the broadcast.
1996         */
1997        if ((msg->data_len + 10 + broadcast) > IPMI_MAX_MSG_LENGTH) {
1998                ipmi_inc_stat(intf, sent_invalid_commands);
1999                return -EMSGSIZE;
2000        }
2001
2002        ipmb_addr = (struct ipmi_ipmb_addr *) addr;
2003        if (ipmb_addr->lun > 3) {
2004                ipmi_inc_stat(intf, sent_invalid_commands);
2005                return -EINVAL;
2006        }
2007
2008        memcpy(&recv_msg->addr, ipmb_addr, sizeof(*ipmb_addr));
2009
2010        if (recv_msg->msg.netfn & 0x1) {
2011                /*
2012                 * It's a response, so use the user's sequence
2013                 * from msgid.
2014                 */
2015                ipmi_inc_stat(intf, sent_ipmb_responses);
2016                format_ipmb_msg(smi_msg, msg, ipmb_addr, msgid,
2017                                msgid, broadcast,
2018                                source_address, source_lun);
2019
2020                /*
2021                 * Save the receive message so we can use it
2022                 * to deliver the response.
2023                 */
2024                smi_msg->user_data = recv_msg;
2025        } else {
2026                /* It's a command, so get a sequence for it. */
2027                unsigned long flags;
2028
2029                spin_lock_irqsave(&intf->seq_lock, flags);
2030
2031                if (is_maintenance_mode_cmd(msg))
2032                        intf->ipmb_maintenance_mode_timeout =
2033                                maintenance_mode_timeout_ms;
2034
2035                if (intf->ipmb_maintenance_mode_timeout && retry_time_ms == 0)
2036                        /* Different default in maintenance mode */
2037                        retry_time_ms = default_maintenance_retry_ms;
2038
2039                /*
2040                 * Create a sequence number with a 1 second
2041                 * timeout and 4 retries.
2042                 */
2043                rv = intf_next_seq(intf,
2044                                   recv_msg,
2045                                   retry_time_ms,
2046                                   retries,
2047                                   broadcast,
2048                                   &ipmb_seq,
2049                                   &seqid);
2050                if (rv)
2051                        /*
2052                         * We have used up all the sequence numbers,
2053                         * probably, so abort.
2054                         */
2055                        goto out_err;
2056
2057                ipmi_inc_stat(intf, sent_ipmb_commands);
2058
2059                /*
2060                 * Store the sequence number in the message,
2061                 * so that when the send message response
2062                 * comes back we can start the timer.
2063                 */
2064                format_ipmb_msg(smi_msg, msg, ipmb_addr,
2065                                STORE_SEQ_IN_MSGID(ipmb_seq, seqid),
2066                                ipmb_seq, broadcast,
2067                                source_address, source_lun);
2068
2069                /*
2070                 * Copy the message into the recv message data, so we
2071                 * can retransmit it later if necessary.
2072                 */
2073                memcpy(recv_msg->msg_data, smi_msg->data,
2074                       smi_msg->data_size);
2075                recv_msg->msg.data = recv_msg->msg_data;
2076                recv_msg->msg.data_len = smi_msg->data_size;
2077
2078                /*
2079                 * We don't unlock until here, because we need
2080                 * to copy the completed message into the
2081                 * recv_msg before we release the lock.
2082                 * Otherwise, race conditions may bite us.  I
2083                 * know that's pretty paranoid, but I prefer
2084                 * to be correct.
2085                 */
2086out_err:
2087                spin_unlock_irqrestore(&intf->seq_lock, flags);
2088        }
2089
2090        return rv;
2091}
2092
2093static int i_ipmi_req_ipmb_direct(struct ipmi_smi        *intf,
2094                                  struct ipmi_addr       *addr,
2095                                  long                   msgid,
2096                                  struct kernel_ipmi_msg *msg,
2097                                  struct ipmi_smi_msg    *smi_msg,
2098                                  struct ipmi_recv_msg   *recv_msg,
2099                                  unsigned char          source_lun)
2100{
2101        struct ipmi_ipmb_direct_addr *daddr;
2102        bool is_cmd = !(recv_msg->msg.netfn & 0x1);
2103
2104        if (!(intf->handlers->flags & IPMI_SMI_CAN_HANDLE_IPMB_DIRECT))
2105                return -EAFNOSUPPORT;
2106
2107        /* Responses must have a completion code. */
2108        if (!is_cmd && msg->data_len < 1) {
2109                ipmi_inc_stat(intf, sent_invalid_commands);
2110                return -EINVAL;
2111        }
2112
2113        if ((msg->data_len + 4) > IPMI_MAX_MSG_LENGTH) {
2114                ipmi_inc_stat(intf, sent_invalid_commands);
2115                return -EMSGSIZE;
2116        }
2117
2118        daddr = (struct ipmi_ipmb_direct_addr *) addr;
2119        if (daddr->rq_lun > 3 || daddr->rs_lun > 3) {
2120                ipmi_inc_stat(intf, sent_invalid_commands);
2121                return -EINVAL;
2122        }
2123
2124        smi_msg->type = IPMI_SMI_MSG_TYPE_IPMB_DIRECT;
2125        smi_msg->msgid = msgid;
2126
2127        if (is_cmd) {
2128                smi_msg->data[0] = msg->netfn << 2 | daddr->rs_lun;
2129                smi_msg->data[2] = recv_msg->msgid << 2 | daddr->rq_lun;
2130        } else {
2131                smi_msg->data[0] = msg->netfn << 2 | daddr->rq_lun;
2132                smi_msg->data[2] = recv_msg->msgid << 2 | daddr->rs_lun;
2133        }
2134        smi_msg->data[1] = daddr->slave_addr;
2135        smi_msg->data[3] = msg->cmd;
2136
2137        memcpy(smi_msg->data + 4, msg->data, msg->data_len);
2138        smi_msg->data_size = msg->data_len + 4;
2139
2140        smi_msg->user_data = recv_msg;
2141
2142        return 0;
2143}
2144
2145static int i_ipmi_req_lan(struct ipmi_smi        *intf,
2146                          struct ipmi_addr       *addr,
2147                          long                   msgid,
2148                          struct kernel_ipmi_msg *msg,
2149                          struct ipmi_smi_msg    *smi_msg,
2150                          struct ipmi_recv_msg   *recv_msg,
2151                          unsigned char          source_lun,
2152                          int                    retries,
2153                          unsigned int           retry_time_ms)
2154{
2155        struct ipmi_lan_addr  *lan_addr;
2156        unsigned char ipmb_seq;
2157        long seqid;
2158        struct ipmi_channel *chans;
2159        int rv = 0;
2160
2161        if (addr->channel >= IPMI_MAX_CHANNELS) {
2162                ipmi_inc_stat(intf, sent_invalid_commands);
2163                return -EINVAL;
2164        }
2165
2166        chans = READ_ONCE(intf->channel_list)->c;
2167
2168        if ((chans[addr->channel].medium
2169                                != IPMI_CHANNEL_MEDIUM_8023LAN)
2170                        && (chans[addr->channel].medium
2171                            != IPMI_CHANNEL_MEDIUM_ASYNC)) {
2172                ipmi_inc_stat(intf, sent_invalid_commands);
2173                return -EINVAL;
2174        }
2175
2176        /* 11 for the header and 1 for the checksum. */
2177        if ((msg->data_len + 12) > IPMI_MAX_MSG_LENGTH) {
2178                ipmi_inc_stat(intf, sent_invalid_commands);
2179                return -EMSGSIZE;
2180        }
2181
2182        lan_addr = (struct ipmi_lan_addr *) addr;
2183        if (lan_addr->lun > 3) {
2184                ipmi_inc_stat(intf, sent_invalid_commands);
2185                return -EINVAL;
2186        }
2187
2188        memcpy(&recv_msg->addr, lan_addr, sizeof(*lan_addr));
2189
2190        if (recv_msg->msg.netfn & 0x1) {
2191                /*
2192                 * It's a response, so use the user's sequence
2193                 * from msgid.
2194                 */
2195                ipmi_inc_stat(intf, sent_lan_responses);
2196                format_lan_msg(smi_msg, msg, lan_addr, msgid,
2197                               msgid, source_lun);
2198
2199                /*
2200                 * Save the receive message so we can use it
2201                 * to deliver the response.
2202                 */
2203                smi_msg->user_data = recv_msg;
2204        } else {
2205                /* It's a command, so get a sequence for it. */
2206                unsigned long flags;
2207
2208                spin_lock_irqsave(&intf->seq_lock, flags);
2209
2210                /*
2211                 * Create a sequence number with a 1 second
2212                 * timeout and 4 retries.
2213                 */
2214                rv = intf_next_seq(intf,
2215                                   recv_msg,
2216                                   retry_time_ms,
2217                                   retries,
2218                                   0,
2219                                   &ipmb_seq,
2220                                   &seqid);
2221                if (rv)
2222                        /*
2223                         * We have used up all the sequence numbers,
2224                         * probably, so abort.
2225                         */
2226                        goto out_err;
2227
2228                ipmi_inc_stat(intf, sent_lan_commands);
2229
2230                /*
2231                 * Store the sequence number in the message,
2232                 * so that when the send message response
2233                 * comes back we can start the timer.
2234                 */
2235                format_lan_msg(smi_msg, msg, lan_addr,
2236                               STORE_SEQ_IN_MSGID(ipmb_seq, seqid),
2237                               ipmb_seq, source_lun);
2238
2239                /*
2240                 * Copy the message into the recv message data, so we
2241                 * can retransmit it later if necessary.
2242                 */
2243                memcpy(recv_msg->msg_data, smi_msg->data,
2244                       smi_msg->data_size);
2245                recv_msg->msg.data = recv_msg->msg_data;
2246                recv_msg->msg.data_len = smi_msg->data_size;
2247
2248                /*
2249                 * We don't unlock until here, because we need
2250                 * to copy the completed message into the
2251                 * recv_msg before we release the lock.
2252                 * Otherwise, race conditions may bite us.  I
2253                 * know that's pretty paranoid, but I prefer
2254                 * to be correct.
2255                 */
2256out_err:
2257                spin_unlock_irqrestore(&intf->seq_lock, flags);
2258        }
2259
2260        return rv;
2261}
2262
2263/*
2264 * Separate from ipmi_request so that the user does not have to be
2265 * supplied in certain circumstances (mainly at panic time).  If
2266 * messages are supplied, they will be freed, even if an error
2267 * occurs.
2268 */
2269static int i_ipmi_request(struct ipmi_user     *user,
2270                          struct ipmi_smi      *intf,
2271                          struct ipmi_addr     *addr,
2272                          long                 msgid,
2273                          struct kernel_ipmi_msg *msg,
2274                          void                 *user_msg_data,
2275                          void                 *supplied_smi,
2276                          struct ipmi_recv_msg *supplied_recv,
2277                          int                  priority,
2278                          unsigned char        source_address,
2279                          unsigned char        source_lun,
2280                          int                  retries,
2281                          unsigned int         retry_time_ms)
2282{
2283        struct ipmi_smi_msg *smi_msg;
2284        struct ipmi_recv_msg *recv_msg;
2285        int rv = 0;
2286
2287        if (supplied_recv)
2288                recv_msg = supplied_recv;
2289        else {
2290                recv_msg = ipmi_alloc_recv_msg();
2291                if (recv_msg == NULL) {
2292                        rv = -ENOMEM;
2293                        goto out;
2294                }
2295        }
2296        recv_msg->user_msg_data = user_msg_data;
2297
2298        if (supplied_smi)
2299                smi_msg = (struct ipmi_smi_msg *) supplied_smi;
2300        else {
2301                smi_msg = ipmi_alloc_smi_msg();
2302                if (smi_msg == NULL) {
2303                        if (!supplied_recv)
2304                                ipmi_free_recv_msg(recv_msg);
2305                        rv = -ENOMEM;
2306                        goto out;
2307                }
2308        }
2309
2310        rcu_read_lock();
2311        if (intf->in_shutdown) {
2312                rv = -ENODEV;
2313                goto out_err;
2314        }
2315
2316        recv_msg->user = user;
2317        if (user)
2318                /* The put happens when the message is freed. */
2319                kref_get(&user->refcount);
2320        recv_msg->msgid = msgid;
2321        /*
2322         * Store the message to send in the receive message so timeout
2323         * responses can get the proper response data.
2324         */
2325        recv_msg->msg = *msg;
2326
2327        if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
2328                rv = i_ipmi_req_sysintf(intf, addr, msgid, msg, smi_msg,
2329                                        recv_msg, retries, retry_time_ms);
2330        } else if (is_ipmb_addr(addr) || is_ipmb_bcast_addr(addr)) {
2331                rv = i_ipmi_req_ipmb(intf, addr, msgid, msg, smi_msg, recv_msg,
2332                                     source_address, source_lun,
2333                                     retries, retry_time_ms);
2334        } else if (is_ipmb_direct_addr(addr)) {
2335                rv = i_ipmi_req_ipmb_direct(intf, addr, msgid, msg, smi_msg,
2336                                            recv_msg, source_lun);
2337        } else if (is_lan_addr(addr)) {
2338                rv = i_ipmi_req_lan(intf, addr, msgid, msg, smi_msg, recv_msg,
2339                                    source_lun, retries, retry_time_ms);
2340        } else {
2341            /* Unknown address type. */
2342                ipmi_inc_stat(intf, sent_invalid_commands);
2343                rv = -EINVAL;
2344        }
2345
2346        if (rv) {
2347out_err:
2348                ipmi_free_smi_msg(smi_msg);
2349                ipmi_free_recv_msg(recv_msg);
2350        } else {
2351                pr_debug("Send: %*ph\n", smi_msg->data_size, smi_msg->data);
2352
2353                smi_send(intf, intf->handlers, smi_msg, priority);
2354        }
2355        rcu_read_unlock();
2356
2357out:
2358        return rv;
2359}
2360
2361static int check_addr(struct ipmi_smi  *intf,
2362                      struct ipmi_addr *addr,
2363                      unsigned char    *saddr,
2364                      unsigned char    *lun)
2365{
2366        if (addr->channel >= IPMI_MAX_CHANNELS)
2367                return -EINVAL;
2368        addr->channel = array_index_nospec(addr->channel, IPMI_MAX_CHANNELS);
2369        *lun = intf->addrinfo[addr->channel].lun;
2370        *saddr = intf->addrinfo[addr->channel].address;
2371        return 0;
2372}
2373
2374int ipmi_request_settime(struct ipmi_user *user,
2375                         struct ipmi_addr *addr,
2376                         long             msgid,
2377                         struct kernel_ipmi_msg  *msg,
2378                         void             *user_msg_data,
2379                         int              priority,
2380                         int              retries,
2381                         unsigned int     retry_time_ms)
2382{
2383        unsigned char saddr = 0, lun = 0;
2384        int rv, index;
2385
2386        if (!user)
2387                return -EINVAL;
2388
2389        user = acquire_ipmi_user(user, &index);
2390        if (!user)
2391                return -ENODEV;
2392
2393        rv = check_addr(user->intf, addr, &saddr, &lun);
2394        if (!rv)
2395                rv = i_ipmi_request(user,
2396                                    user->intf,
2397                                    addr,
2398                                    msgid,
2399                                    msg,
2400                                    user_msg_data,
2401                                    NULL, NULL,
2402                                    priority,
2403                                    saddr,
2404                                    lun,
2405                                    retries,
2406                                    retry_time_ms);
2407
2408        release_ipmi_user(user, index);
2409        return rv;
2410}
2411EXPORT_SYMBOL(ipmi_request_settime);
2412
2413int ipmi_request_supply_msgs(struct ipmi_user     *user,
2414                             struct ipmi_addr     *addr,
2415                             long                 msgid,
2416                             struct kernel_ipmi_msg *msg,
2417                             void                 *user_msg_data,
2418                             void                 *supplied_smi,
2419                             struct ipmi_recv_msg *supplied_recv,
2420                             int                  priority)
2421{
2422        unsigned char saddr = 0, lun = 0;
2423        int rv, index;
2424
2425        if (!user)
2426                return -EINVAL;
2427
2428        user = acquire_ipmi_user(user, &index);
2429        if (!user)
2430                return -ENODEV;
2431
2432        rv = check_addr(user->intf, addr, &saddr, &lun);
2433        if (!rv)
2434                rv = i_ipmi_request(user,
2435                                    user->intf,
2436                                    addr,
2437                                    msgid,
2438                                    msg,
2439                                    user_msg_data,
2440                                    supplied_smi,
2441                                    supplied_recv,
2442                                    priority,
2443                                    saddr,
2444                                    lun,
2445                                    -1, 0);
2446
2447        release_ipmi_user(user, index);
2448        return rv;
2449}
2450EXPORT_SYMBOL(ipmi_request_supply_msgs);
2451
2452static void bmc_device_id_handler(struct ipmi_smi *intf,
2453                                  struct ipmi_recv_msg *msg)
2454{
2455        int rv;
2456
2457        if ((msg->addr.addr_type != IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
2458                        || (msg->msg.netfn != IPMI_NETFN_APP_RESPONSE)
2459                        || (msg->msg.cmd != IPMI_GET_DEVICE_ID_CMD)) {
2460                dev_warn(intf->si_dev,
2461                         "invalid device_id msg: addr_type=%d netfn=%x cmd=%x\n",
2462                         msg->addr.addr_type, msg->msg.netfn, msg->msg.cmd);
2463                return;
2464        }
2465
2466        if (msg->msg.data[0]) {
2467                dev_warn(intf->si_dev, "device id fetch failed: 0x%2.2x\n",
2468                         msg->msg.data[0]);
2469                intf->bmc->dyn_id_set = 0;
2470                goto out;
2471        }
2472
2473        rv = ipmi_demangle_device_id(msg->msg.netfn, msg->msg.cmd,
2474                        msg->msg.data, msg->msg.data_len, &intf->bmc->fetch_id);
2475        if (rv) {
2476                dev_warn(intf->si_dev, "device id demangle failed: %d\n", rv);
2477                /* record completion code when error */
2478                intf->bmc->cc = msg->msg.data[0];
2479                intf->bmc->dyn_id_set = 0;
2480        } else {
2481                /*
2482                 * Make sure the id data is available before setting
2483                 * dyn_id_set.
2484                 */
2485                smp_wmb();
2486                intf->bmc->dyn_id_set = 1;
2487        }
2488out:
2489        wake_up(&intf->waitq);
2490}
2491
2492static int
2493send_get_device_id_cmd(struct ipmi_smi *intf)
2494{
2495        struct ipmi_system_interface_addr si;
2496        struct kernel_ipmi_msg msg;
2497
2498        si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
2499        si.channel = IPMI_BMC_CHANNEL;
2500        si.lun = 0;
2501
2502        msg.netfn = IPMI_NETFN_APP_REQUEST;
2503        msg.cmd = IPMI_GET_DEVICE_ID_CMD;
2504        msg.data = NULL;
2505        msg.data_len = 0;
2506
2507        return i_ipmi_request(NULL,
2508                              intf,
2509                              (struct ipmi_addr *) &si,
2510                              0,
2511                              &msg,
2512                              intf,
2513                              NULL,
2514                              NULL,
2515                              0,
2516                              intf->addrinfo[0].address,
2517                              intf->addrinfo[0].lun,
2518                              -1, 0);
2519}
2520
2521static int __get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc)
2522{
2523        int rv;
2524        unsigned int retry_count = 0;
2525
2526        intf->null_user_handler = bmc_device_id_handler;
2527
2528retry:
2529        bmc->cc = 0;
2530        bmc->dyn_id_set = 2;
2531
2532        rv = send_get_device_id_cmd(intf);
2533        if (rv)
2534                goto out_reset_handler;
2535
2536        wait_event(intf->waitq, bmc->dyn_id_set != 2);
2537
2538        if (!bmc->dyn_id_set) {
2539                if (bmc->cc != IPMI_CC_NO_ERROR &&
2540                    ++retry_count <= GET_DEVICE_ID_MAX_RETRY) {
2541                        msleep(500);
2542                        dev_warn(intf->si_dev,
2543                            "BMC returned 0x%2.2x, retry get bmc device id\n",
2544                            bmc->cc);
2545                        goto retry;
2546                }
2547
2548                rv = -EIO; /* Something went wrong in the fetch. */
2549        }
2550
2551        /* dyn_id_set makes the id data available. */
2552        smp_rmb();
2553
2554out_reset_handler:
2555        intf->null_user_handler = NULL;
2556
2557        return rv;
2558}
2559
2560/*
2561 * Fetch the device id for the bmc/interface.  You must pass in either
2562 * bmc or intf, this code will get the other one.  If the data has
2563 * been recently fetched, this will just use the cached data.  Otherwise
2564 * it will run a new fetch.
2565 *
2566 * Except for the first time this is called (in ipmi_add_smi()),
2567 * this will always return good data;
2568 */
2569static int __bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc,
2570                               struct ipmi_device_id *id,
2571                               bool *guid_set, guid_t *guid, int intf_num)
2572{
2573        int rv = 0;
2574        int prev_dyn_id_set, prev_guid_set;
2575        bool intf_set = intf != NULL;
2576
2577        if (!intf) {
2578                mutex_lock(&bmc->dyn_mutex);
2579retry_bmc_lock:
2580                if (list_empty(&bmc->intfs)) {
2581                        mutex_unlock(&bmc->dyn_mutex);
2582                        return -ENOENT;
2583                }
2584                intf = list_first_entry(&bmc->intfs, struct ipmi_smi,
2585                                        bmc_link);
2586                kref_get(&intf->refcount);
2587                mutex_unlock(&bmc->dyn_mutex);
2588                mutex_lock(&intf->bmc_reg_mutex);
2589                mutex_lock(&bmc->dyn_mutex);
2590                if (intf != list_first_entry(&bmc->intfs, struct ipmi_smi,
2591                                             bmc_link)) {
2592                        mutex_unlock(&intf->bmc_reg_mutex);
2593                        kref_put(&intf->refcount, intf_free);
2594                        goto retry_bmc_lock;
2595                }
2596        } else {
2597                mutex_lock(&intf->bmc_reg_mutex);
2598                bmc = intf->bmc;
2599                mutex_lock(&bmc->dyn_mutex);
2600                kref_get(&intf->refcount);
2601        }
2602
2603        /* If we have a valid and current ID, just return that. */
2604        if (intf->in_bmc_register ||
2605            (bmc->dyn_id_set && time_is_after_jiffies(bmc->dyn_id_expiry)))
2606                goto out_noprocessing;
2607
2608        prev_guid_set = bmc->dyn_guid_set;
2609        __get_guid(intf);
2610
2611        prev_dyn_id_set = bmc->dyn_id_set;
2612        rv = __get_device_id(intf, bmc);
2613        if (rv)
2614                goto out;
2615
2616        /*
2617         * The guid, device id, manufacturer id, and product id should
2618         * not change on a BMC.  If it does we have to do some dancing.
2619         */
2620        if (!intf->bmc_registered
2621            || (!prev_guid_set && bmc->dyn_guid_set)
2622            || (!prev_dyn_id_set && bmc->dyn_id_set)
2623            || (prev_guid_set && bmc->dyn_guid_set
2624                && !guid_equal(&bmc->guid, &bmc->fetch_guid))
2625            || bmc->id.device_id != bmc->fetch_id.device_id
2626            || bmc->id.manufacturer_id != bmc->fetch_id.manufacturer_id
2627            || bmc->id.product_id != bmc->fetch_id.product_id) {
2628                struct ipmi_device_id id = bmc->fetch_id;
2629                int guid_set = bmc->dyn_guid_set;
2630                guid_t guid;
2631
2632                guid = bmc->fetch_guid;
2633                mutex_unlock(&bmc->dyn_mutex);
2634
2635                __ipmi_bmc_unregister(intf);
2636                /* Fill in the temporary BMC for good measure. */
2637                intf->bmc->id = id;
2638                intf->bmc->dyn_guid_set = guid_set;
2639                intf->bmc->guid = guid;
2640                if (__ipmi_bmc_register(intf, &id, guid_set, &guid, intf_num))
2641                        need_waiter(intf); /* Retry later on an error. */
2642                else
2643                        __scan_channels(intf, &id);
2644
2645
2646                if (!intf_set) {
2647                        /*
2648                         * We weren't given the interface on the
2649                         * command line, so restart the operation on
2650                         * the next interface for the BMC.
2651                         */
2652                        mutex_unlock(&intf->bmc_reg_mutex);
2653                        mutex_lock(&bmc->dyn_mutex);
2654                        goto retry_bmc_lock;
2655                }
2656
2657                /* We have a new BMC, set it up. */
2658                bmc = intf->bmc;
2659                mutex_lock(&bmc->dyn_mutex);
2660                goto out_noprocessing;
2661        } else if (memcmp(&bmc->fetch_id, &bmc->id, sizeof(bmc->id)))
2662                /* Version info changes, scan the channels again. */
2663                __scan_channels(intf, &bmc->fetch_id);
2664
2665        bmc->dyn_id_expiry = jiffies + IPMI_DYN_DEV_ID_EXPIRY;
2666
2667out:
2668        if (rv && prev_dyn_id_set) {
2669                rv = 0; /* Ignore failures if we have previous data. */
2670                bmc->dyn_id_set = prev_dyn_id_set;
2671        }
2672        if (!rv) {
2673                bmc->id = bmc->fetch_id;
2674                if (bmc->dyn_guid_set)
2675                        bmc->guid = bmc->fetch_guid;
2676                else if (prev_guid_set)
2677                        /*
2678                         * The guid used to be valid and it failed to fetch,
2679                         * just use the cached value.
2680                         */
2681                        bmc->dyn_guid_set = prev_guid_set;
2682        }
2683out_noprocessing:
2684        if (!rv) {
2685                if (id)
2686                        *id = bmc->id;
2687
2688                if (guid_set)
2689                        *guid_set = bmc->dyn_guid_set;
2690
2691                if (guid && bmc->dyn_guid_set)
2692                        *guid =  bmc->guid;
2693        }
2694
2695        mutex_unlock(&bmc->dyn_mutex);
2696        mutex_unlock(&intf->bmc_reg_mutex);
2697
2698        kref_put(&intf->refcount, intf_free);
2699        return rv;
2700}
2701
2702static int bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc,
2703                             struct ipmi_device_id *id,
2704                             bool *guid_set, guid_t *guid)
2705{
2706        return __bmc_get_device_id(intf, bmc, id, guid_set, guid, -1);
2707}
2708
2709static ssize_t device_id_show(struct device *dev,
2710                              struct device_attribute *attr,
2711                              char *buf)
2712{
2713        struct bmc_device *bmc = to_bmc_device(dev);
2714        struct ipmi_device_id id;
2715        int rv;
2716
2717        rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2718        if (rv)
2719                return rv;
2720
2721        return sysfs_emit(buf, "%u\n", id.device_id);
2722}
2723static DEVICE_ATTR_RO(device_id);
2724
2725static ssize_t provides_device_sdrs_show(struct device *dev,
2726                                         struct device_attribute *attr,
2727                                         char *buf)
2728{
2729        struct bmc_device *bmc = to_bmc_device(dev);
2730        struct ipmi_device_id id;
2731        int rv;
2732
2733        rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2734        if (rv)
2735                return rv;
2736
2737        return sysfs_emit(buf, "%u\n", (id.device_revision & 0x80) >> 7);
2738}
2739static DEVICE_ATTR_RO(provides_device_sdrs);
2740
2741static ssize_t revision_show(struct device *dev, struct device_attribute *attr,
2742                             char *buf)
2743{
2744        struct bmc_device *bmc = to_bmc_device(dev);
2745        struct ipmi_device_id id;
2746        int rv;
2747
2748        rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2749        if (rv)
2750                return rv;
2751
2752        return sysfs_emit(buf, "%u\n", id.device_revision & 0x0F);
2753}
2754static DEVICE_ATTR_RO(revision);
2755
2756static ssize_t firmware_revision_show(struct device *dev,
2757                                      struct device_attribute *attr,
2758                                      char *buf)
2759{
2760        struct bmc_device *bmc = to_bmc_device(dev);
2761        struct ipmi_device_id id;
2762        int rv;
2763
2764        rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2765        if (rv)
2766                return rv;
2767
2768        return sysfs_emit(buf, "%u.%x\n", id.firmware_revision_1,
2769                        id.firmware_revision_2);
2770}
2771static DEVICE_ATTR_RO(firmware_revision);
2772
2773static ssize_t ipmi_version_show(struct device *dev,
2774                                 struct device_attribute *attr,
2775                                 char *buf)
2776{
2777        struct bmc_device *bmc = to_bmc_device(dev);
2778        struct ipmi_device_id id;
2779        int rv;
2780
2781        rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2782        if (rv)
2783                return rv;
2784
2785        return sysfs_emit(buf, "%u.%u\n",
2786                        ipmi_version_major(&id),
2787                        ipmi_version_minor(&id));
2788}
2789static DEVICE_ATTR_RO(ipmi_version);
2790
2791static ssize_t add_dev_support_show(struct device *dev,
2792                                    struct device_attribute *attr,
2793                                    char *buf)
2794{
2795        struct bmc_device *bmc = to_bmc_device(dev);
2796        struct ipmi_device_id id;
2797        int rv;
2798
2799        rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2800        if (rv)
2801                return rv;
2802
2803        return sysfs_emit(buf, "0x%02x\n", id.additional_device_support);
2804}
2805static DEVICE_ATTR(additional_device_support, S_IRUGO, add_dev_support_show,
2806                   NULL);
2807
2808static ssize_t manufacturer_id_show(struct device *dev,
2809                                    struct device_attribute *attr,
2810                                    char *buf)
2811{
2812        struct bmc_device *bmc = to_bmc_device(dev);
2813        struct ipmi_device_id id;
2814        int rv;
2815
2816        rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2817        if (rv)
2818                return rv;
2819
2820        return sysfs_emit(buf, "0x%6.6x\n", id.manufacturer_id);
2821}
2822static DEVICE_ATTR_RO(manufacturer_id);
2823
2824static ssize_t product_id_show(struct device *dev,
2825                               struct device_attribute *attr,
2826                               char *buf)
2827{
2828        struct bmc_device *bmc = to_bmc_device(dev);
2829        struct ipmi_device_id id;
2830        int rv;
2831
2832        rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2833        if (rv)
2834                return rv;
2835
2836        return sysfs_emit(buf, "0x%4.4x\n", id.product_id);
2837}
2838static DEVICE_ATTR_RO(product_id);
2839
2840static ssize_t aux_firmware_rev_show(struct device *dev,
2841                                     struct device_attribute *attr,
2842                                     char *buf)
2843{
2844        struct bmc_device *bmc = to_bmc_device(dev);
2845        struct ipmi_device_id id;
2846        int rv;
2847
2848        rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2849        if (rv)
2850                return rv;
2851
2852        return sysfs_emit(buf, "0x%02x 0x%02x 0x%02x 0x%02x\n",
2853                        id.aux_firmware_revision[3],
2854                        id.aux_firmware_revision[2],
2855                        id.aux_firmware_revision[1],
2856                        id.aux_firmware_revision[0]);
2857}
2858static DEVICE_ATTR(aux_firmware_revision, S_IRUGO, aux_firmware_rev_show, NULL);
2859
2860static ssize_t guid_show(struct device *dev, struct device_attribute *attr,
2861                         char *buf)
2862{
2863        struct bmc_device *bmc = to_bmc_device(dev);
2864        bool guid_set;
2865        guid_t guid;
2866        int rv;
2867
2868        rv = bmc_get_device_id(NULL, bmc, NULL, &guid_set, &guid);
2869        if (rv)
2870                return rv;
2871        if (!guid_set)
2872                return -ENOENT;
2873
2874        return sysfs_emit(buf, "%pUl\n", &guid);
2875}
2876static DEVICE_ATTR_RO(guid);
2877
2878static struct attribute *bmc_dev_attrs[] = {
2879        &dev_attr_device_id.attr,
2880        &dev_attr_provides_device_sdrs.attr,
2881        &dev_attr_revision.attr,
2882        &dev_attr_firmware_revision.attr,
2883        &dev_attr_ipmi_version.attr,
2884        &dev_attr_additional_device_support.attr,
2885        &dev_attr_manufacturer_id.attr,
2886        &dev_attr_product_id.attr,
2887        &dev_attr_aux_firmware_revision.attr,
2888        &dev_attr_guid.attr,
2889        NULL
2890};
2891
2892static umode_t bmc_dev_attr_is_visible(struct kobject *kobj,
2893                                       struct attribute *attr, int idx)
2894{
2895        struct device *dev = kobj_to_dev(kobj);
2896        struct bmc_device *bmc = to_bmc_device(dev);
2897        umode_t mode = attr->mode;
2898        int rv;
2899
2900        if (attr == &dev_attr_aux_firmware_revision.attr) {
2901                struct ipmi_device_id id;
2902
2903                rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2904                return (!rv && id.aux_firmware_revision_set) ? mode : 0;
2905        }
2906        if (attr == &dev_attr_guid.attr) {
2907                bool guid_set;
2908
2909                rv = bmc_get_device_id(NULL, bmc, NULL, &guid_set, NULL);
2910                return (!rv && guid_set) ? mode : 0;
2911        }
2912        return mode;
2913}
2914
2915static const struct attribute_group bmc_dev_attr_group = {
2916        .attrs          = bmc_dev_attrs,
2917        .is_visible     = bmc_dev_attr_is_visible,
2918};
2919
2920static const struct attribute_group *bmc_dev_attr_groups[] = {
2921        &bmc_dev_attr_group,
2922        NULL
2923};
2924
2925static const struct device_type bmc_device_type = {
2926        .groups         = bmc_dev_attr_groups,
2927};
2928
2929static int __find_bmc_guid(struct device *dev, const void *data)
2930{
2931        const guid_t *guid = data;
2932        struct bmc_device *bmc;
2933        int rv;
2934
2935        if (dev->type != &bmc_device_type)
2936                return 0;
2937
2938        bmc = to_bmc_device(dev);
2939        rv = bmc->dyn_guid_set && guid_equal(&bmc->guid, guid);
2940        if (rv)
2941                rv = kref_get_unless_zero(&bmc->usecount);
2942        return rv;
2943}
2944
2945/*
2946 * Returns with the bmc's usecount incremented, if it is non-NULL.
2947 */
2948static struct bmc_device *ipmi_find_bmc_guid(struct device_driver *drv,
2949                                             guid_t *guid)
2950{
2951        struct device *dev;
2952        struct bmc_device *bmc = NULL;
2953
2954        dev = driver_find_device(drv, NULL, guid, __find_bmc_guid);
2955        if (dev) {
2956                bmc = to_bmc_device(dev);
2957                put_device(dev);
2958        }
2959        return bmc;
2960}
2961
2962struct prod_dev_id {
2963        unsigned int  product_id;
2964        unsigned char device_id;
2965};
2966
2967static int __find_bmc_prod_dev_id(struct device *dev, const void *data)
2968{
2969        const struct prod_dev_id *cid = data;
2970        struct bmc_device *bmc;
2971        int rv;
2972
2973        if (dev->type != &bmc_device_type)
2974                return 0;
2975
2976        bmc = to_bmc_device(dev);
2977        rv = (bmc->id.product_id == cid->product_id
2978              && bmc->id.device_id == cid->device_id);
2979        if (rv)
2980                rv = kref_get_unless_zero(&bmc->usecount);
2981        return rv;
2982}
2983
2984/*
2985 * Returns with the bmc's usecount incremented, if it is non-NULL.
2986 */
2987static struct bmc_device *ipmi_find_bmc_prod_dev_id(
2988        struct device_driver *drv,
2989        unsigned int product_id, unsigned char device_id)
2990{
2991        struct prod_dev_id id = {
2992                .product_id = product_id,
2993                .device_id = device_id,
2994        };
2995        struct device *dev;
2996        struct bmc_device *bmc = NULL;
2997
2998        dev = driver_find_device(drv, NULL, &id, __find_bmc_prod_dev_id);
2999        if (dev) {
3000                bmc = to_bmc_device(dev);
3001                put_device(dev);
3002        }
3003        return bmc;
3004}
3005
3006static DEFINE_IDA(ipmi_bmc_ida);
3007
3008static void
3009release_bmc_device(struct device *dev)
3010{
3011        kfree(to_bmc_device(dev));
3012}
3013
3014static void cleanup_bmc_work(struct work_struct *work)
3015{
3016        struct bmc_device *bmc = container_of(work, struct bmc_device,
3017                                              remove_work);
3018        int id = bmc->pdev.id; /* Unregister overwrites id */
3019
3020        platform_device_unregister(&bmc->pdev);
3021        ida_simple_remove(&ipmi_bmc_ida, id);
3022}
3023
3024static void
3025cleanup_bmc_device(struct kref *ref)
3026{
3027        struct bmc_device *bmc = container_of(ref, struct bmc_device, usecount);
3028
3029        /*
3030         * Remove the platform device in a work queue to avoid issues
3031         * with removing the device attributes while reading a device
3032         * attribute.
3033         */
3034        queue_work(remove_work_wq, &bmc->remove_work);
3035}
3036
3037/*
3038 * Must be called with intf->bmc_reg_mutex held.
3039 */
3040static void __ipmi_bmc_unregister(struct ipmi_smi *intf)
3041{
3042        struct bmc_device *bmc = intf->bmc;
3043
3044        if (!intf->bmc_registered)
3045                return;
3046
3047        sysfs_remove_link(&intf->si_dev->kobj, "bmc");
3048        sysfs_remove_link(&bmc->pdev.dev.kobj, intf->my_dev_name);
3049        kfree(intf->my_dev_name);
3050        intf->my_dev_name = NULL;
3051
3052        mutex_lock(&bmc->dyn_mutex);
3053        list_del(&intf->bmc_link);
3054        mutex_unlock(&bmc->dyn_mutex);
3055        intf->bmc = &intf->tmp_bmc;
3056        kref_put(&bmc->usecount, cleanup_bmc_device);
3057        intf->bmc_registered = false;
3058}
3059
3060static void ipmi_bmc_unregister(struct ipmi_smi *intf)
3061{
3062        mutex_lock(&intf->bmc_reg_mutex);
3063        __ipmi_bmc_unregister(intf);
3064        mutex_unlock(&intf->bmc_reg_mutex);
3065}
3066
3067/*
3068 * Must be called with intf->bmc_reg_mutex held.
3069 */
3070static int __ipmi_bmc_register(struct ipmi_smi *intf,
3071                               struct ipmi_device_id *id,
3072                               bool guid_set, guid_t *guid, int intf_num)
3073{
3074        int               rv;
3075        struct bmc_device *bmc;
3076        struct bmc_device *old_bmc;
3077
3078        /*
3079         * platform_device_register() can cause bmc_reg_mutex to
3080         * be claimed because of the is_visible functions of
3081         * the attributes.  Eliminate possible recursion and
3082         * release the lock.
3083         */
3084        intf->in_bmc_register = true;
3085        mutex_unlock(&intf->bmc_reg_mutex);
3086
3087        /*
3088         * Try to find if there is an bmc_device struct
3089         * representing the interfaced BMC already
3090         */
3091        mutex_lock(&ipmidriver_mutex);
3092        if (guid_set)
3093                old_bmc = ipmi_find_bmc_guid(&ipmidriver.driver, guid);
3094        else
3095                old_bmc = ipmi_find_bmc_prod_dev_id(&ipmidriver.driver,
3096                                                    id->product_id,
3097                                                    id->device_id);
3098
3099        /*
3100         * If there is already an bmc_device, free the new one,
3101         * otherwise register the new BMC device
3102         */
3103        if (old_bmc) {
3104                bmc = old_bmc;
3105                /*
3106                 * Note: old_bmc already has usecount incremented by
3107                 * the BMC find functions.
3108                 */
3109                intf->bmc = old_bmc;
3110                mutex_lock(&bmc->dyn_mutex);
3111                list_add_tail(&intf->bmc_link, &bmc->intfs);
3112                mutex_unlock(&bmc->dyn_mutex);
3113
3114                dev_info(intf->si_dev,
3115                         "interfacing existing BMC (man_id: 0x%6.6x, prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
3116                         bmc->id.manufacturer_id,
3117                         bmc->id.product_id,
3118                         bmc->id.device_id);
3119        } else {
3120                bmc = kzalloc(sizeof(*bmc), GFP_KERNEL);
3121                if (!bmc) {
3122                        rv = -ENOMEM;
3123                        goto out;
3124                }
3125                INIT_LIST_HEAD(&bmc->intfs);
3126                mutex_init(&bmc->dyn_mutex);
3127                INIT_WORK(&bmc->remove_work, cleanup_bmc_work);
3128
3129                bmc->id = *id;
3130                bmc->dyn_id_set = 1;
3131                bmc->dyn_guid_set = guid_set;
3132                bmc->guid = *guid;
3133                bmc->dyn_id_expiry = jiffies + IPMI_DYN_DEV_ID_EXPIRY;
3134
3135                bmc->pdev.name = "ipmi_bmc";
3136
3137                rv = ida_simple_get(&ipmi_bmc_ida, 0, 0, GFP_KERNEL);
3138                if (rv < 0) {
3139                        kfree(bmc);
3140                        goto out;
3141                }
3142
3143                bmc->pdev.dev.driver = &ipmidriver.driver;
3144                bmc->pdev.id = rv;
3145                bmc->pdev.dev.release = release_bmc_device;
3146                bmc->pdev.dev.type = &bmc_device_type;
3147                kref_init(&bmc->usecount);
3148
3149                intf->bmc = bmc;
3150                mutex_lock(&bmc->dyn_mutex);
3151                list_add_tail(&intf->bmc_link, &bmc->intfs);
3152                mutex_unlock(&bmc->dyn_mutex);
3153
3154                rv = platform_device_register(&bmc->pdev);
3155                if (rv) {
3156                        dev_err(intf->si_dev,
3157                                "Unable to register bmc device: %d\n",
3158                                rv);
3159                        goto out_list_del;
3160                }
3161
3162                dev_info(intf->si_dev,
3163                         "Found new BMC (man_id: 0x%6.6x, prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
3164                         bmc->id.manufacturer_id,
3165                         bmc->id.product_id,
3166                         bmc->id.device_id);
3167        }
3168
3169        /*
3170         * create symlink from system interface device to bmc device
3171         * and back.
3172         */
3173        rv = sysfs_create_link(&intf->si_dev->kobj, &bmc->pdev.dev.kobj, "bmc");
3174        if (rv) {
3175                dev_err(intf->si_dev, "Unable to create bmc symlink: %d\n", rv);
3176                goto out_put_bmc;
3177        }
3178
3179        if (intf_num == -1)
3180                intf_num = intf->intf_num;
3181        intf->my_dev_name = kasprintf(GFP_KERNEL, "ipmi%d", intf_num);
3182        if (!intf->my_dev_name) {
3183                rv = -ENOMEM;
3184                dev_err(intf->si_dev, "Unable to allocate link from BMC: %d\n",
3185                        rv);
3186                goto out_unlink1;
3187        }
3188
3189        rv = sysfs_create_link(&bmc->pdev.dev.kobj, &intf->si_dev->kobj,
3190                               intf->my_dev_name);
3191        if (rv) {
3192                dev_err(intf->si_dev, "Unable to create symlink to bmc: %d\n",
3193                        rv);
3194                goto out_free_my_dev_name;
3195        }
3196
3197        intf->bmc_registered = true;
3198
3199out:
3200        mutex_unlock(&ipmidriver_mutex);
3201        mutex_lock(&intf->bmc_reg_mutex);
3202        intf->in_bmc_register = false;
3203        return rv;
3204
3205
3206out_free_my_dev_name:
3207        kfree(intf->my_dev_name);
3208        intf->my_dev_name = NULL;
3209
3210out_unlink1:
3211        sysfs_remove_link(&intf->si_dev->kobj, "bmc");
3212
3213out_put_bmc:
3214        mutex_lock(&bmc->dyn_mutex);
3215        list_del(&intf->bmc_link);
3216        mutex_unlock(&bmc->dyn_mutex);
3217        intf->bmc = &intf->tmp_bmc;
3218        kref_put(&bmc->usecount, cleanup_bmc_device);
3219        goto out;
3220
3221out_list_del:
3222        mutex_lock(&bmc->dyn_mutex);
3223        list_del(&intf->bmc_link);
3224        mutex_unlock(&bmc->dyn_mutex);
3225        intf->bmc = &intf->tmp_bmc;
3226        put_device(&bmc->pdev.dev);
3227        goto out;
3228}
3229
3230static int
3231send_guid_cmd(struct ipmi_smi *intf, int chan)
3232{
3233        struct kernel_ipmi_msg            msg;
3234        struct ipmi_system_interface_addr si;
3235
3236        si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
3237        si.channel = IPMI_BMC_CHANNEL;
3238        si.lun = 0;
3239
3240        msg.netfn = IPMI_NETFN_APP_REQUEST;
3241        msg.cmd = IPMI_GET_DEVICE_GUID_CMD;
3242        msg.data = NULL;
3243        msg.data_len = 0;
3244        return i_ipmi_request(NULL,
3245                              intf,
3246                              (struct ipmi_addr *) &si,
3247                              0,
3248                              &msg,
3249                              intf,
3250                              NULL,
3251                              NULL,
3252                              0,
3253                              intf->addrinfo[0].address,
3254                              intf->addrinfo[0].lun,
3255                              -1, 0);
3256}
3257
3258static void guid_handler(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
3259{
3260        struct bmc_device *bmc = intf->bmc;
3261
3262        if ((msg->addr.addr_type != IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
3263            || (msg->msg.netfn != IPMI_NETFN_APP_RESPONSE)
3264            || (msg->msg.cmd != IPMI_GET_DEVICE_GUID_CMD))
3265                /* Not for me */
3266                return;
3267
3268        if (msg->msg.data[0] != 0) {
3269                /* Error from getting the GUID, the BMC doesn't have one. */
3270                bmc->dyn_guid_set = 0;
3271                goto out;
3272        }
3273
3274        if (msg->msg.data_len < UUID_SIZE + 1) {
3275                bmc->dyn_guid_set = 0;
3276                dev_warn(intf->si_dev,
3277                         "The GUID response from the BMC was too short, it was %d but should have been %d.  Assuming GUID is not available.\n",
3278                         msg->msg.data_len, UUID_SIZE + 1);
3279                goto out;
3280        }
3281
3282        import_guid(&bmc->fetch_guid, msg->msg.data + 1);
3283        /*
3284         * Make sure the guid data is available before setting
3285         * dyn_guid_set.
3286         */
3287        smp_wmb();
3288        bmc->dyn_guid_set = 1;
3289 out:
3290        wake_up(&intf->waitq);
3291}
3292
3293static void __get_guid(struct ipmi_smi *intf)
3294{
3295        int rv;
3296        struct bmc_device *bmc = intf->bmc;
3297
3298        bmc->dyn_guid_set = 2;
3299        intf->null_user_handler = guid_handler;
3300        rv = send_guid_cmd(intf, 0);
3301        if (rv)
3302                /* Send failed, no GUID available. */
3303                bmc->dyn_guid_set = 0;
3304        else
3305                wait_event(intf->waitq, bmc->dyn_guid_set != 2);
3306
3307        /* dyn_guid_set makes the guid data available. */
3308        smp_rmb();
3309
3310        intf->null_user_handler = NULL;
3311}
3312
3313static int
3314send_channel_info_cmd(struct ipmi_smi *intf, int chan)
3315{
3316        struct kernel_ipmi_msg            msg;
3317        unsigned char                     data[1];
3318        struct ipmi_system_interface_addr si;
3319
3320        si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
3321        si.channel = IPMI_BMC_CHANNEL;
3322        si.lun = 0;
3323
3324        msg.netfn = IPMI_NETFN_APP_REQUEST;
3325        msg.cmd = IPMI_GET_CHANNEL_INFO_CMD;
3326        msg.data = data;
3327        msg.data_len = 1;
3328        data[0] = chan;
3329        return i_ipmi_request(NULL,
3330                              intf,
3331                              (struct ipmi_addr *) &si,
3332                              0,
3333                              &msg,
3334                              intf,
3335                              NULL,
3336                              NULL,
3337                              0,
3338                              intf->addrinfo[0].address,
3339                              intf->addrinfo[0].lun,
3340                              -1, 0);
3341}
3342
3343static void
3344channel_handler(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
3345{
3346        int rv = 0;
3347        int ch;
3348        unsigned int set = intf->curr_working_cset;
3349        struct ipmi_channel *chans;
3350
3351        if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
3352            && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE)
3353            && (msg->msg.cmd == IPMI_GET_CHANNEL_INFO_CMD)) {
3354                /* It's the one we want */
3355                if (msg->msg.data[0] != 0) {
3356                        /* Got an error from the channel, just go on. */
3357                        if (msg->msg.data[0] == IPMI_INVALID_COMMAND_ERR) {
3358                                /*
3359                                 * If the MC does not support this
3360                                 * command, that is legal.  We just
3361                                 * assume it has one IPMB at channel
3362                                 * zero.
3363                                 */
3364                                intf->wchannels[set].c[0].medium
3365                                        = IPMI_CHANNEL_MEDIUM_IPMB;
3366                                intf->wchannels[set].c[0].protocol
3367                                        = IPMI_CHANNEL_PROTOCOL_IPMB;
3368
3369                                intf->channel_list = intf->wchannels + set;
3370                                intf->channels_ready = true;
3371                                wake_up(&intf->waitq);
3372                                goto out;
3373                        }
3374                        goto next_channel;
3375                }
3376                if (msg->msg.data_len < 4) {
3377                        /* Message not big enough, just go on. */
3378                        goto next_channel;
3379                }
3380                ch = intf->curr_channel;
3381                chans = intf->wchannels[set].c;
3382                chans[ch].medium = msg->msg.data[2] & 0x7f;
3383                chans[ch].protocol = msg->msg.data[3] & 0x1f;
3384
3385 next_channel:
3386                intf->curr_channel++;
3387                if (intf->curr_channel >= IPMI_MAX_CHANNELS) {
3388                        intf->channel_list = intf->wchannels + set;
3389                        intf->channels_ready = true;
3390                        wake_up(&intf->waitq);
3391                } else {
3392                        intf->channel_list = intf->wchannels + set;
3393                        intf->channels_ready = true;
3394                        rv = send_channel_info_cmd(intf, intf->curr_channel);
3395                }
3396
3397                if (rv) {
3398                        /* Got an error somehow, just give up. */
3399                        dev_warn(intf->si_dev,
3400                                 "Error sending channel information for channel %d: %d\n",
3401                                 intf->curr_channel, rv);
3402
3403                        intf->channel_list = intf->wchannels + set;
3404                        intf->channels_ready = true;
3405                        wake_up(&intf->waitq);
3406                }
3407        }
3408 out:
3409        return;
3410}
3411
3412/*
3413 * Must be holding intf->bmc_reg_mutex to call this.
3414 */
3415static int __scan_channels(struct ipmi_smi *intf, struct ipmi_device_id *id)
3416{
3417        int rv;
3418
3419        if (ipmi_version_major(id) > 1
3420                        || (ipmi_version_major(id) == 1
3421                            && ipmi_version_minor(id) >= 5)) {
3422                unsigned int set;
3423
3424                /*
3425                 * Start scanning the channels to see what is
3426                 * available.
3427                 */
3428                set = !intf->curr_working_cset;
3429                intf->curr_working_cset = set;
3430                memset(&intf->wchannels[set], 0,
3431                       sizeof(struct ipmi_channel_set));
3432
3433                intf->null_user_handler = channel_handler;
3434                intf->curr_channel = 0;
3435                rv = send_channel_info_cmd(intf, 0);
3436                if (rv) {
3437                        dev_warn(intf->si_dev,
3438                                 "Error sending channel information for channel 0, %d\n",
3439                                 rv);
3440                        intf->null_user_handler = NULL;
3441                        return -EIO;
3442                }
3443
3444                /* Wait for the channel info to be read. */
3445                wait_event(intf->waitq, intf->channels_ready);
3446                intf->null_user_handler = NULL;
3447        } else {
3448                unsigned int set = intf->curr_working_cset;
3449
3450                /* Assume a single IPMB channel at zero. */
3451                intf->wchannels[set].c[0].medium = IPMI_CHANNEL_MEDIUM_IPMB;
3452                intf->wchannels[set].c[0].protocol = IPMI_CHANNEL_PROTOCOL_IPMB;
3453                intf->channel_list = intf->wchannels + set;
3454                intf->channels_ready = true;
3455        }
3456
3457        return 0;
3458}
3459
3460static void ipmi_poll(struct ipmi_smi *intf)
3461{
3462        if (intf->handlers->poll)
3463                intf->handlers->poll(intf->send_info);
3464        /* In case something came in */
3465        handle_new_recv_msgs(intf);
3466}
3467
3468void ipmi_poll_interface(struct ipmi_user *user)
3469{
3470        ipmi_poll(user->intf);
3471}
3472EXPORT_SYMBOL(ipmi_poll_interface);
3473
3474static void redo_bmc_reg(struct work_struct *work)
3475{
3476        struct ipmi_smi *intf = container_of(work, struct ipmi_smi,
3477                                             bmc_reg_work);
3478
3479        if (!intf->in_shutdown)
3480                bmc_get_device_id(intf, NULL, NULL, NULL, NULL);
3481
3482        kref_put(&intf->refcount, intf_free);
3483}
3484
3485int ipmi_add_smi(struct module         *owner,
3486                 const struct ipmi_smi_handlers *handlers,
3487                 void                  *send_info,
3488                 struct device         *si_dev,
3489                 unsigned char         slave_addr)
3490{
3491        int              i, j;
3492        int              rv;
3493        struct ipmi_smi *intf, *tintf;
3494        struct list_head *link;
3495        struct ipmi_device_id id;
3496
3497        /*
3498         * Make sure the driver is actually initialized, this handles
3499         * problems with initialization order.
3500         */
3501        rv = ipmi_init_msghandler();
3502        if (rv)
3503                return rv;
3504
3505        intf = kzalloc(sizeof(*intf), GFP_KERNEL);
3506        if (!intf)
3507                return -ENOMEM;
3508
3509        rv = init_srcu_struct(&intf->users_srcu);
3510        if (rv) {
3511                kfree(intf);
3512                return rv;
3513        }
3514
3515        intf->owner = owner;
3516        intf->bmc = &intf->tmp_bmc;
3517        INIT_LIST_HEAD(&intf->bmc->intfs);
3518        mutex_init(&intf->bmc->dyn_mutex);
3519        INIT_LIST_HEAD(&intf->bmc_link);
3520        mutex_init(&intf->bmc_reg_mutex);
3521        intf->intf_num = -1; /* Mark it invalid for now. */
3522        kref_init(&intf->refcount);
3523        INIT_WORK(&intf->bmc_reg_work, redo_bmc_reg);
3524        intf->si_dev = si_dev;
3525        for (j = 0; j < IPMI_MAX_CHANNELS; j++) {
3526                intf->addrinfo[j].address = IPMI_BMC_SLAVE_ADDR;
3527                intf->addrinfo[j].lun = 2;
3528        }
3529        if (slave_addr != 0)
3530                intf->addrinfo[0].address = slave_addr;
3531        INIT_LIST_HEAD(&intf->users);
3532        intf->handlers = handlers;
3533        intf->send_info = send_info;
3534        spin_lock_init(&intf->seq_lock);
3535        for (j = 0; j < IPMI_IPMB_NUM_SEQ; j++) {
3536                intf->seq_table[j].inuse = 0;
3537                intf->seq_table[j].seqid = 0;
3538        }
3539        intf->curr_seq = 0;
3540        spin_lock_init(&intf->waiting_rcv_msgs_lock);
3541        INIT_LIST_HEAD(&intf->waiting_rcv_msgs);
3542        tasklet_setup(&intf->recv_tasklet,
3543                     smi_recv_tasklet);
3544        atomic_set(&intf->watchdog_pretimeouts_to_deliver, 0);
3545        spin_lock_init(&intf->xmit_msgs_lock);
3546        INIT_LIST_HEAD(&intf->xmit_msgs);
3547        INIT_LIST_HEAD(&intf->hp_xmit_msgs);
3548        spin_lock_init(&intf->events_lock);
3549        spin_lock_init(&intf->watch_lock);
3550        atomic_set(&intf->event_waiters, 0);
3551        intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
3552        INIT_LIST_HEAD(&intf->waiting_events);
3553        intf->waiting_events_count = 0;
3554        mutex_init(&intf->cmd_rcvrs_mutex);
3555        spin_lock_init(&intf->maintenance_mode_lock);
3556        INIT_LIST_HEAD(&intf->cmd_rcvrs);
3557        init_waitqueue_head(&intf->waitq);
3558        for (i = 0; i < IPMI_NUM_STATS; i++)
3559                atomic_set(&intf->stats[i], 0);
3560
3561        mutex_lock(&ipmi_interfaces_mutex);
3562        /* Look for a hole in the numbers. */
3563        i = 0;
3564        link = &ipmi_interfaces;
3565        list_for_each_entry_rcu(tintf, &ipmi_interfaces, link,
3566                                ipmi_interfaces_mutex_held()) {
3567                if (tintf->intf_num != i) {
3568                        link = &tintf->link;
3569                        break;
3570                }
3571                i++;
3572        }
3573        /* Add the new interface in numeric order. */
3574        if (i == 0)
3575                list_add_rcu(&intf->link, &ipmi_interfaces);
3576        else
3577                list_add_tail_rcu(&intf->link, link);
3578
3579        rv = handlers->start_processing(send_info, intf);
3580        if (rv)
3581                goto out_err;
3582
3583        rv = __bmc_get_device_id(intf, NULL, &id, NULL, NULL, i);
3584        if (rv) {
3585                dev_err(si_dev, "Unable to get the device id: %d\n", rv);
3586                goto out_err_started;
3587        }
3588
3589        mutex_lock(&intf->bmc_reg_mutex);
3590        rv = __scan_channels(intf, &id);
3591        mutex_unlock(&intf->bmc_reg_mutex);
3592        if (rv)
3593                goto out_err_bmc_reg;
3594
3595        /*
3596         * Keep memory order straight for RCU readers.  Make
3597         * sure everything else is committed to memory before
3598         * setting intf_num to mark the interface valid.
3599         */
3600        smp_wmb();
3601        intf->intf_num = i;
3602        mutex_unlock(&ipmi_interfaces_mutex);
3603
3604        /* After this point the interface is legal to use. */
3605        call_smi_watchers(i, intf->si_dev);
3606
3607        return 0;
3608
3609 out_err_bmc_reg:
3610        ipmi_bmc_unregister(intf);
3611 out_err_started:
3612        if (intf->handlers->shutdown)
3613                intf->handlers->shutdown(intf->send_info);
3614 out_err:
3615        list_del_rcu(&intf->link);
3616        mutex_unlock(&ipmi_interfaces_mutex);
3617        synchronize_srcu(&ipmi_interfaces_srcu);
3618        cleanup_srcu_struct(&intf->users_srcu);
3619        kref_put(&intf->refcount, intf_free);
3620
3621        return rv;
3622}
3623EXPORT_SYMBOL(ipmi_add_smi);
3624
3625static void deliver_smi_err_response(struct ipmi_smi *intf,
3626                                     struct ipmi_smi_msg *msg,
3627                                     unsigned char err)
3628{
3629        msg->rsp[0] = msg->data[0] | 4;
3630        msg->rsp[1] = msg->data[1];
3631        msg->rsp[2] = err;
3632        msg->rsp_size = 3;
3633        /* It's an error, so it will never requeue, no need to check return. */
3634        handle_one_recv_msg(intf, msg);
3635}
3636
3637static void cleanup_smi_msgs(struct ipmi_smi *intf)
3638{
3639        int              i;
3640        struct seq_table *ent;
3641        struct ipmi_smi_msg *msg;
3642        struct list_head *entry;
3643        struct list_head tmplist;
3644
3645        /* Clear out our transmit queues and hold the messages. */
3646        INIT_LIST_HEAD(&tmplist);
3647        list_splice_tail(&intf->hp_xmit_msgs, &tmplist);
3648        list_splice_tail(&intf->xmit_msgs, &tmplist);
3649
3650        /* Current message first, to preserve order */
3651        while (intf->curr_msg && !list_empty(&intf->waiting_rcv_msgs)) {
3652                /* Wait for the message to clear out. */
3653                schedule_timeout(1);
3654        }
3655
3656        /* No need for locks, the interface is down. */
3657
3658        /*
3659         * Return errors for all pending messages in queue and in the
3660         * tables waiting for remote responses.
3661         */
3662        while (!list_empty(&tmplist)) {
3663                entry = tmplist.next;
3664                list_del(entry);
3665                msg = list_entry(entry, struct ipmi_smi_msg, link);
3666                deliver_smi_err_response(intf, msg, IPMI_ERR_UNSPECIFIED);
3667        }
3668
3669        for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
3670                ent = &intf->seq_table[i];
3671                if (!ent->inuse)
3672                        continue;
3673                deliver_err_response(intf, ent->recv_msg, IPMI_ERR_UNSPECIFIED);
3674        }
3675}
3676
3677void ipmi_unregister_smi(struct ipmi_smi *intf)
3678{
3679        struct ipmi_smi_watcher *w;
3680        int intf_num = intf->intf_num, index;
3681
3682        mutex_lock(&ipmi_interfaces_mutex);
3683        intf->intf_num = -1;
3684        intf->in_shutdown = true;
3685        list_del_rcu(&intf->link);
3686        mutex_unlock(&ipmi_interfaces_mutex);
3687        synchronize_srcu(&ipmi_interfaces_srcu);
3688
3689        /* At this point no users can be added to the interface. */
3690
3691        /*
3692         * Call all the watcher interfaces to tell them that
3693         * an interface is going away.
3694         */
3695        mutex_lock(&smi_watchers_mutex);
3696        list_for_each_entry(w, &smi_watchers, link)
3697                w->smi_gone(intf_num);
3698        mutex_unlock(&smi_watchers_mutex);
3699
3700        index = srcu_read_lock(&intf->users_srcu);
3701        while (!list_empty(&intf->users)) {
3702                struct ipmi_user *user =
3703                        container_of(list_next_rcu(&intf->users),
3704                                     struct ipmi_user, link);
3705
3706                _ipmi_destroy_user(user);
3707        }
3708        srcu_read_unlock(&intf->users_srcu, index);
3709
3710        if (intf->handlers->shutdown)
3711                intf->handlers->shutdown(intf->send_info);
3712
3713        cleanup_smi_msgs(intf);
3714
3715        ipmi_bmc_unregister(intf);
3716
3717        cleanup_srcu_struct(&intf->users_srcu);
3718        kref_put(&intf->refcount, intf_free);
3719}
3720EXPORT_SYMBOL(ipmi_unregister_smi);
3721
3722static int handle_ipmb_get_msg_rsp(struct ipmi_smi *intf,
3723                                   struct ipmi_smi_msg *msg)
3724{
3725        struct ipmi_ipmb_addr ipmb_addr;
3726        struct ipmi_recv_msg  *recv_msg;
3727
3728        /*
3729         * This is 11, not 10, because the response must contain a
3730         * completion code.
3731         */
3732        if (msg->rsp_size < 11) {
3733                /* Message not big enough, just ignore it. */
3734                ipmi_inc_stat(intf, invalid_ipmb_responses);
3735                return 0;
3736        }
3737
3738        if (msg->rsp[2] != 0) {
3739                /* An error getting the response, just ignore it. */
3740                return 0;
3741        }
3742
3743        ipmb_addr.addr_type = IPMI_IPMB_ADDR_TYPE;
3744        ipmb_addr.slave_addr = msg->rsp[6];
3745        ipmb_addr.channel = msg->rsp[3] & 0x0f;
3746        ipmb_addr.lun = msg->rsp[7] & 3;
3747
3748        /*
3749         * It's a response from a remote entity.  Look up the sequence
3750         * number and handle the response.
3751         */
3752        if (intf_find_seq(intf,
3753                          msg->rsp[7] >> 2,
3754                          msg->rsp[3] & 0x0f,
3755                          msg->rsp[8],
3756                          (msg->rsp[4] >> 2) & (~1),
3757                          (struct ipmi_addr *) &ipmb_addr,
3758                          &recv_msg)) {
3759                /*
3760                 * We were unable to find the sequence number,
3761                 * so just nuke the message.
3762                 */
3763                ipmi_inc_stat(intf, unhandled_ipmb_responses);
3764                return 0;
3765        }
3766
3767        memcpy(recv_msg->msg_data, &msg->rsp[9], msg->rsp_size - 9);
3768        /*
3769         * The other fields matched, so no need to set them, except
3770         * for netfn, which needs to be the response that was
3771         * returned, not the request value.
3772         */
3773        recv_msg->msg.netfn = msg->rsp[4] >> 2;
3774        recv_msg->msg.data = recv_msg->msg_data;
3775        recv_msg->msg.data_len = msg->rsp_size - 10;
3776        recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
3777        if (deliver_response(intf, recv_msg))
3778                ipmi_inc_stat(intf, unhandled_ipmb_responses);
3779        else
3780                ipmi_inc_stat(intf, handled_ipmb_responses);
3781
3782        return 0;
3783}
3784
3785static int handle_ipmb_get_msg_cmd(struct ipmi_smi *intf,
3786                                   struct ipmi_smi_msg *msg)
3787{
3788        struct cmd_rcvr          *rcvr;
3789        int                      rv = 0;
3790        unsigned char            netfn;
3791        unsigned char            cmd;
3792        unsigned char            chan;
3793        struct ipmi_user         *user = NULL;
3794        struct ipmi_ipmb_addr    *ipmb_addr;
3795        struct ipmi_recv_msg     *recv_msg;
3796
3797        if (msg->rsp_size < 10) {
3798                /* Message not big enough, just ignore it. */
3799                ipmi_inc_stat(intf, invalid_commands);
3800                return 0;
3801        }
3802
3803        if (msg->rsp[2] != 0) {
3804                /* An error getting the response, just ignore it. */
3805                return 0;
3806        }
3807
3808        netfn = msg->rsp[4] >> 2;
3809        cmd = msg->rsp[8];
3810        chan = msg->rsp[3] & 0xf;
3811
3812        rcu_read_lock();
3813        rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
3814        if (rcvr) {
3815                user = rcvr->user;
3816                kref_get(&user->refcount);
3817        } else
3818                user = NULL;
3819        rcu_read_unlock();
3820
3821        if (user == NULL) {
3822                /* We didn't find a user, deliver an error response. */
3823                ipmi_inc_stat(intf, unhandled_commands);
3824
3825                msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
3826                msg->data[1] = IPMI_SEND_MSG_CMD;
3827                msg->data[2] = msg->rsp[3];
3828                msg->data[3] = msg->rsp[6];
3829                msg->data[4] = ((netfn + 1) << 2) | (msg->rsp[7] & 0x3);
3830                msg->data[5] = ipmb_checksum(&msg->data[3], 2);
3831                msg->data[6] = intf->addrinfo[msg->rsp[3] & 0xf].address;
3832                /* rqseq/lun */
3833                msg->data[7] = (msg->rsp[7] & 0xfc) | (msg->rsp[4] & 0x3);
3834                msg->data[8] = msg->rsp[8]; /* cmd */
3835                msg->data[9] = IPMI_INVALID_CMD_COMPLETION_CODE;
3836                msg->data[10] = ipmb_checksum(&msg->data[6], 4);
3837                msg->data_size = 11;
3838
3839                pr_debug("Invalid command: %*ph\n", msg->data_size, msg->data);
3840
3841                rcu_read_lock();
3842                if (!intf->in_shutdown) {
3843                        smi_send(intf, intf->handlers, msg, 0);
3844                        /*
3845                         * We used the message, so return the value
3846                         * that causes it to not be freed or
3847                         * queued.
3848                         */
3849                        rv = -1;
3850                }
3851                rcu_read_unlock();
3852        } else {
3853                recv_msg = ipmi_alloc_recv_msg();
3854                if (!recv_msg) {
3855                        /*
3856                         * We couldn't allocate memory for the
3857                         * message, so requeue it for handling
3858                         * later.
3859                         */
3860                        rv = 1;
3861                        kref_put(&user->refcount, free_user);
3862                } else {
3863                        /* Extract the source address from the data. */
3864                        ipmb_addr = (struct ipmi_ipmb_addr *) &recv_msg->addr;
3865                        ipmb_addr->addr_type = IPMI_IPMB_ADDR_TYPE;
3866                        ipmb_addr->slave_addr = msg->rsp[6];
3867                        ipmb_addr->lun = msg->rsp[7] & 3;
3868                        ipmb_addr->channel = msg->rsp[3] & 0xf;
3869
3870                        /*
3871                         * Extract the rest of the message information
3872                         * from the IPMB header.
3873                         */
3874                        recv_msg->user = user;
3875                        recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
3876                        recv_msg->msgid = msg->rsp[7] >> 2;
3877                        recv_msg->msg.netfn = msg->rsp[4] >> 2;
3878                        recv_msg->msg.cmd = msg->rsp[8];
3879                        recv_msg->msg.data = recv_msg->msg_data;
3880
3881                        /*
3882                         * We chop off 10, not 9 bytes because the checksum
3883                         * at the end also needs to be removed.
3884                         */
3885                        recv_msg->msg.data_len = msg->rsp_size - 10;
3886                        memcpy(recv_msg->msg_data, &msg->rsp[9],
3887                               msg->rsp_size - 10);
3888                        if (deliver_response(intf, recv_msg))
3889                                ipmi_inc_stat(intf, unhandled_commands);
3890                        else
3891                                ipmi_inc_stat(intf, handled_commands);
3892                }
3893        }
3894
3895        return rv;
3896}
3897
3898static int handle_ipmb_direct_rcv_cmd(struct ipmi_smi *intf,
3899                                      struct ipmi_smi_msg *msg)
3900{
3901        struct cmd_rcvr          *rcvr;
3902        int                      rv = 0;
3903        struct ipmi_user         *user = NULL;
3904        struct ipmi_ipmb_direct_addr *daddr;
3905        struct ipmi_recv_msg     *recv_msg;
3906        unsigned char netfn = msg->rsp[0] >> 2;
3907        unsigned char cmd = msg->rsp[3];
3908
3909        rcu_read_lock();
3910        /* We always use channel 0 for direct messages. */
3911        rcvr = find_cmd_rcvr(intf, netfn, cmd, 0);
3912        if (rcvr) {
3913                user = rcvr->user;
3914                kref_get(&user->refcount);
3915        } else
3916                user = NULL;
3917        rcu_read_unlock();
3918
3919        if (user == NULL) {
3920                /* We didn't find a user, deliver an error response. */
3921                ipmi_inc_stat(intf, unhandled_commands);
3922
3923                msg->data[0] = (netfn + 1) << 2;
3924                msg->data[0] |= msg->rsp[2] & 0x3; /* rqLUN */
3925                msg->data[1] = msg->rsp[1]; /* Addr */
3926                msg->data[2] = msg->rsp[2] & ~0x3; /* rqSeq */
3927                msg->data[2] |= msg->rsp[0] & 0x3; /* rsLUN */
3928                msg->data[3] = cmd;
3929                msg->data[4] = IPMI_INVALID_CMD_COMPLETION_CODE;
3930                msg->data_size = 5;
3931
3932                rcu_read_lock();
3933                if (!intf->in_shutdown) {
3934                        smi_send(intf, intf->handlers, msg, 0);
3935                        /*
3936                         * We used the message, so return the value
3937                         * that causes it to not be freed or
3938                         * queued.
3939                         */
3940                        rv = -1;
3941                }
3942                rcu_read_unlock();
3943        } else {
3944                recv_msg = ipmi_alloc_recv_msg();
3945                if (!recv_msg) {
3946                        /*
3947                         * We couldn't allocate memory for the
3948                         * message, so requeue it for handling
3949                         * later.
3950                         */
3951                        rv = 1;
3952                        kref_put(&user->refcount, free_user);
3953                } else {
3954                        /* Extract the source address from the data. */
3955                        daddr = (struct ipmi_ipmb_direct_addr *)&recv_msg->addr;
3956                        daddr->addr_type = IPMI_IPMB_DIRECT_ADDR_TYPE;
3957                        daddr->channel = 0;
3958                        daddr->slave_addr = msg->rsp[1];
3959                        daddr->rs_lun = msg->rsp[0] & 3;
3960                        daddr->rq_lun = msg->rsp[2] & 3;
3961
3962                        /*
3963                         * Extract the rest of the message information
3964                         * from the IPMB header.
3965                         */
3966                        recv_msg->user = user;
3967                        recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
3968                        recv_msg->msgid = (msg->rsp[2] >> 2);
3969                        recv_msg->msg.netfn = msg->rsp[0] >> 2;
3970                        recv_msg->msg.cmd = msg->rsp[3];
3971                        recv_msg->msg.data = recv_msg->msg_data;
3972
3973                        recv_msg->msg.data_len = msg->rsp_size - 4;
3974                        memcpy(recv_msg->msg_data, msg->rsp + 4,
3975                               msg->rsp_size - 4);
3976                        if (deliver_response(intf, recv_msg))
3977                                ipmi_inc_stat(intf, unhandled_commands);
3978                        else
3979                                ipmi_inc_stat(intf, handled_commands);
3980                }
3981        }
3982
3983        return rv;
3984}
3985
3986static int handle_ipmb_direct_rcv_rsp(struct ipmi_smi *intf,
3987                                      struct ipmi_smi_msg *msg)
3988{
3989        struct ipmi_recv_msg *recv_msg;
3990        struct ipmi_ipmb_direct_addr *daddr;
3991
3992        recv_msg = (struct ipmi_recv_msg *) msg->user_data;
3993        if (recv_msg == NULL) {
3994                dev_warn(intf->si_dev,
3995                         "IPMI message received with no owner. This could be because of a malformed message, or because of a hardware error.  Contact your hardware vendor for assistance.\n");
3996                return 0;
3997        }
3998
3999        recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
4000        recv_msg->msgid = msg->msgid;
4001        daddr = (struct ipmi_ipmb_direct_addr *) &recv_msg->addr;
4002        daddr->addr_type = IPMI_IPMB_DIRECT_ADDR_TYPE;
4003        daddr->channel = 0;
4004        daddr->slave_addr = msg->rsp[1];
4005        daddr->rq_lun = msg->rsp[0] & 3;
4006        daddr->rs_lun = msg->rsp[2] & 3;
4007        recv_msg->msg.netfn = msg->rsp[0] >> 2;
4008        recv_msg->msg.cmd = msg->rsp[3];
4009        memcpy(recv_msg->msg_data, &msg->rsp[4], msg->rsp_size - 4);
4010        recv_msg->msg.data = recv_msg->msg_data;
4011        recv_msg->msg.data_len = msg->rsp_size - 4;
4012        deliver_local_response(intf, recv_msg);
4013
4014        return 0;
4015}
4016
4017static int handle_lan_get_msg_rsp(struct ipmi_smi *intf,
4018                                  struct ipmi_smi_msg *msg)
4019{
4020        struct ipmi_lan_addr  lan_addr;
4021        struct ipmi_recv_msg  *recv_msg;
4022
4023
4024        /*
4025         * This is 13, not 12, because the response must contain a
4026         * completion code.
4027         */
4028        if (msg->rsp_size < 13) {
4029                /* Message not big enough, just ignore it. */
4030                ipmi_inc_stat(intf, invalid_lan_responses);
4031                return 0;
4032        }
4033
4034        if (msg->rsp[2] != 0) {
4035                /* An error getting the response, just ignore it. */
4036                return 0;
4037        }
4038
4039        lan_addr.addr_type = IPMI_LAN_ADDR_TYPE;
4040        lan_addr.session_handle = msg->rsp[4];
4041        lan_addr.remote_SWID = msg->rsp[8];
4042        lan_addr.local_SWID = msg->rsp[5];
4043        lan_addr.channel = msg->rsp[3] & 0x0f;
4044        lan_addr.privilege = msg->rsp[3] >> 4;
4045        lan_addr.lun = msg->rsp[9] & 3;
4046
4047        /*
4048         * It's a response from a remote entity.  Look up the sequence
4049         * number and handle the response.
4050         */
4051        if (intf_find_seq(intf,
4052                          msg->rsp[9] >> 2,
4053                          msg->rsp[3] & 0x0f,
4054                          msg->rsp[10],
4055                          (msg->rsp[6] >> 2) & (~1),
4056                          (struct ipmi_addr *) &lan_addr,
4057                          &recv_msg)) {
4058                /*
4059                 * We were unable to find the sequence number,
4060                 * so just nuke the message.
4061                 */
4062                ipmi_inc_stat(intf, unhandled_lan_responses);
4063                return 0;
4064        }
4065
4066        memcpy(recv_msg->msg_data, &msg->rsp[11], msg->rsp_size - 11);
4067        /*
4068         * The other fields matched, so no need to set them, except
4069         * for netfn, which needs to be the response that was
4070         * returned, not the request value.
4071         */
4072        recv_msg->msg.netfn = msg->rsp[6] >> 2;
4073        recv_msg->msg.data = recv_msg->msg_data;
4074        recv_msg->msg.data_len = msg->rsp_size - 12;
4075        recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
4076        if (deliver_response(intf, recv_msg))
4077                ipmi_inc_stat(intf, unhandled_lan_responses);
4078        else
4079                ipmi_inc_stat(intf, handled_lan_responses);
4080
4081        return 0;
4082}
4083
4084static int handle_lan_get_msg_cmd(struct ipmi_smi *intf,
4085                                  struct ipmi_smi_msg *msg)
4086{
4087        struct cmd_rcvr          *rcvr;
4088        int                      rv = 0;
4089        unsigned char            netfn;
4090        unsigned char            cmd;
4091        unsigned char            chan;
4092        struct ipmi_user         *user = NULL;
4093        struct ipmi_lan_addr     *lan_addr;
4094        struct ipmi_recv_msg     *recv_msg;
4095
4096        if (msg->rsp_size < 12) {
4097                /* Message not big enough, just ignore it. */
4098                ipmi_inc_stat(intf, invalid_commands);
4099                return 0;
4100        }
4101
4102        if (msg->rsp[2] != 0) {
4103                /* An error getting the response, just ignore it. */
4104                return 0;
4105        }
4106
4107        netfn = msg->rsp[6] >> 2;
4108        cmd = msg->rsp[10];
4109        chan = msg->rsp[3] & 0xf;
4110
4111        rcu_read_lock();
4112        rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
4113        if (rcvr) {
4114                user = rcvr->user;
4115                kref_get(&user->refcount);
4116        } else
4117                user = NULL;
4118        rcu_read_unlock();
4119
4120        if (user == NULL) {
4121                /* We didn't find a user, just give up. */
4122                ipmi_inc_stat(intf, unhandled_commands);
4123
4124                /*
4125                 * Don't do anything with these messages, just allow
4126                 * them to be freed.
4127                 */
4128                rv = 0;
4129        } else {
4130                recv_msg = ipmi_alloc_recv_msg();
4131                if (!recv_msg) {
4132                        /*
4133                         * We couldn't allocate memory for the
4134                         * message, so requeue it for handling later.
4135                         */
4136                        rv = 1;
4137                        kref_put(&user->refcount, free_user);
4138                } else {
4139                        /* Extract the source address from the data. */
4140                        lan_addr = (struct ipmi_lan_addr *) &recv_msg->addr;
4141                        lan_addr->addr_type = IPMI_LAN_ADDR_TYPE;
4142                        lan_addr->session_handle = msg->rsp[4];
4143                        lan_addr->remote_SWID = msg->rsp[8];
4144                        lan_addr->local_SWID = msg->rsp[5];
4145                        lan_addr->lun = msg->rsp[9] & 3;
4146                        lan_addr->channel = msg->rsp[3] & 0xf;
4147                        lan_addr->privilege = msg->rsp[3] >> 4;
4148
4149                        /*
4150                         * Extract the rest of the message information
4151                         * from the IPMB header.
4152                         */
4153                        recv_msg->user = user;
4154                        recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
4155                        recv_msg->msgid = msg->rsp[9] >> 2;
4156                        recv_msg->msg.netfn = msg->rsp[6] >> 2;
4157                        recv_msg->msg.cmd = msg->rsp[10];
4158                        recv_msg->msg.data = recv_msg->msg_data;
4159
4160                        /*
4161                         * We chop off 12, not 11 bytes because the checksum
4162                         * at the end also needs to be removed.
4163                         */
4164                        recv_msg->msg.data_len = msg->rsp_size - 12;
4165                        memcpy(recv_msg->msg_data, &msg->rsp[11],
4166                               msg->rsp_size - 12);
4167                        if (deliver_response(intf, recv_msg))
4168                                ipmi_inc_stat(intf, unhandled_commands);
4169                        else
4170                                ipmi_inc_stat(intf, handled_commands);
4171                }
4172        }
4173
4174        return rv;
4175}
4176
4177/*
4178 * This routine will handle "Get Message" command responses with
4179 * channels that use an OEM Medium. The message format belongs to
4180 * the OEM.  See IPMI 2.0 specification, Chapter 6 and
4181 * Chapter 22, sections 22.6 and 22.24 for more details.
4182 */
4183static int handle_oem_get_msg_cmd(struct ipmi_smi *intf,
4184                                  struct ipmi_smi_msg *msg)
4185{
4186        struct cmd_rcvr       *rcvr;
4187        int                   rv = 0;
4188        unsigned char         netfn;
4189        unsigned char         cmd;
4190        unsigned char         chan;
4191        struct ipmi_user *user = NULL;
4192        struct ipmi_system_interface_addr *smi_addr;
4193        struct ipmi_recv_msg  *recv_msg;
4194
4195        /*
4196         * We expect the OEM SW to perform error checking
4197         * so we just do some basic sanity checks
4198         */
4199        if (msg->rsp_size < 4) {
4200                /* Message not big enough, just ignore it. */
4201                ipmi_inc_stat(intf, invalid_commands);
4202                return 0;
4203        }
4204
4205        if (msg->rsp[2] != 0) {
4206                /* An error getting the response, just ignore it. */
4207                return 0;
4208        }
4209
4210        /*
4211         * This is an OEM Message so the OEM needs to know how
4212         * handle the message. We do no interpretation.
4213         */
4214        netfn = msg->rsp[0] >> 2;
4215        cmd = msg->rsp[1];
4216        chan = msg->rsp[3] & 0xf;
4217
4218        rcu_read_lock();
4219        rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
4220        if (rcvr) {
4221                user = rcvr->user;
4222                kref_get(&user->refcount);
4223        } else
4224                user = NULL;
4225        rcu_read_unlock();
4226
4227        if (user == NULL) {
4228                /* We didn't find a user, just give up. */
4229                ipmi_inc_stat(intf, unhandled_commands);
4230
4231                /*
4232                 * Don't do anything with these messages, just allow
4233                 * them to be freed.
4234                 */
4235
4236                rv = 0;
4237        } else {
4238                recv_msg = ipmi_alloc_recv_msg();
4239                if (!recv_msg) {
4240                        /*
4241                         * We couldn't allocate memory for the
4242                         * message, so requeue it for handling
4243                         * later.
4244                         */
4245                        rv = 1;
4246                        kref_put(&user->refcount, free_user);
4247                } else {
4248                        /*
4249                         * OEM Messages are expected to be delivered via
4250                         * the system interface to SMS software.  We might
4251                         * need to visit this again depending on OEM
4252                         * requirements
4253                         */
4254                        smi_addr = ((struct ipmi_system_interface_addr *)
4255                                    &recv_msg->addr);
4256                        smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
4257                        smi_addr->channel = IPMI_BMC_CHANNEL;
4258                        smi_addr->lun = msg->rsp[0] & 3;
4259
4260                        recv_msg->user = user;
4261                        recv_msg->user_msg_data = NULL;
4262                        recv_msg->recv_type = IPMI_OEM_RECV_TYPE;
4263                        recv_msg->msg.netfn = msg->rsp[0] >> 2;
4264                        recv_msg->msg.cmd = msg->rsp[1];
4265                        recv_msg->msg.data = recv_msg->msg_data;
4266
4267                        /*
4268                         * The message starts at byte 4 which follows the
4269                         * the Channel Byte in the "GET MESSAGE" command
4270                         */
4271                        recv_msg->msg.data_len = msg->rsp_size - 4;
4272                        memcpy(recv_msg->msg_data, &msg->rsp[4],
4273                               msg->rsp_size - 4);
4274                        if (deliver_response(intf, recv_msg))
4275                                ipmi_inc_stat(intf, unhandled_commands);
4276                        else
4277                                ipmi_inc_stat(intf, handled_commands);
4278                }
4279        }
4280
4281        return rv;
4282}
4283
4284static void copy_event_into_recv_msg(struct ipmi_recv_msg *recv_msg,
4285                                     struct ipmi_smi_msg  *msg)
4286{
4287        struct ipmi_system_interface_addr *smi_addr;
4288
4289        recv_msg->msgid = 0;
4290        smi_addr = (struct ipmi_system_interface_addr *) &recv_msg->addr;
4291        smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
4292        smi_addr->channel = IPMI_BMC_CHANNEL;
4293        smi_addr->lun = msg->rsp[0] & 3;
4294        recv_msg->recv_type = IPMI_ASYNC_EVENT_RECV_TYPE;
4295        recv_msg->msg.netfn = msg->rsp[0] >> 2;
4296        recv_msg->msg.cmd = msg->rsp[1];
4297        memcpy(recv_msg->msg_data, &msg->rsp[3], msg->rsp_size - 3);
4298        recv_msg->msg.data = recv_msg->msg_data;
4299        recv_msg->msg.data_len = msg->rsp_size - 3;
4300}
4301
4302static int handle_read_event_rsp(struct ipmi_smi *intf,
4303                                 struct ipmi_smi_msg *msg)
4304{
4305        struct ipmi_recv_msg *recv_msg, *recv_msg2;
4306        struct list_head     msgs;
4307        struct ipmi_user     *user;
4308        int rv = 0, deliver_count = 0, index;
4309        unsigned long        flags;
4310
4311        if (msg->rsp_size < 19) {
4312                /* Message is too small to be an IPMB event. */
4313                ipmi_inc_stat(intf, invalid_events);
4314                return 0;
4315        }
4316
4317        if (msg->rsp[2] != 0) {
4318                /* An error getting the event, just ignore it. */
4319                return 0;
4320        }
4321
4322        INIT_LIST_HEAD(&msgs);
4323
4324        spin_lock_irqsave(&intf->events_lock, flags);
4325
4326        ipmi_inc_stat(intf, events);
4327
4328        /*
4329         * Allocate and fill in one message for every user that is
4330         * getting events.
4331         */
4332        index = srcu_read_lock(&intf->users_srcu);
4333        list_for_each_entry_rcu(user, &intf->users, link) {
4334                if (!user->gets_events)
4335                        continue;
4336
4337                recv_msg = ipmi_alloc_recv_msg();
4338                if (!recv_msg) {
4339                        rcu_read_unlock();
4340                        list_for_each_entry_safe(recv_msg, recv_msg2, &msgs,
4341                                                 link) {
4342                                list_del(&recv_msg->link);
4343                                ipmi_free_recv_msg(recv_msg);
4344                        }
4345                        /*
4346                         * We couldn't allocate memory for the
4347                         * message, so requeue it for handling
4348                         * later.
4349                         */
4350                        rv = 1;
4351                        goto out;
4352                }
4353
4354                deliver_count++;
4355
4356                copy_event_into_recv_msg(recv_msg, msg);
4357                recv_msg->user = user;
4358                kref_get(&user->refcount);
4359                list_add_tail(&recv_msg->link, &msgs);
4360        }
4361        srcu_read_unlock(&intf->users_srcu, index);
4362
4363        if (deliver_count) {
4364                /* Now deliver all the messages. */
4365                list_for_each_entry_safe(recv_msg, recv_msg2, &msgs, link) {
4366                        list_del(&recv_msg->link);
4367                        deliver_local_response(intf, recv_msg);
4368                }
4369        } else if (intf->waiting_events_count < MAX_EVENTS_IN_QUEUE) {
4370                /*
4371                 * No one to receive the message, put it in queue if there's
4372                 * not already too many things in the queue.
4373                 */
4374                recv_msg = ipmi_alloc_recv_msg();
4375                if (!recv_msg) {
4376                        /*
4377                         * We couldn't allocate memory for the
4378                         * message, so requeue it for handling
4379                         * later.
4380                         */
4381                        rv = 1;
4382                        goto out;
4383                }
4384
4385                copy_event_into_recv_msg(recv_msg, msg);
4386                list_add_tail(&recv_msg->link, &intf->waiting_events);
4387                intf->waiting_events_count++;
4388        } else if (!intf->event_msg_printed) {
4389                /*
4390                 * There's too many things in the queue, discard this
4391                 * message.
4392                 */
4393                dev_warn(intf->si_dev,
4394                         "Event queue full, discarding incoming events\n");
4395                intf->event_msg_printed = 1;
4396        }
4397
4398 out:
4399        spin_unlock_irqrestore(&intf->events_lock, flags);
4400
4401        return rv;
4402}
4403
4404static int handle_bmc_rsp(struct ipmi_smi *intf,
4405                          struct ipmi_smi_msg *msg)
4406{
4407        struct ipmi_recv_msg *recv_msg;
4408        struct ipmi_system_interface_addr *smi_addr;
4409
4410        recv_msg = (struct ipmi_recv_msg *) msg->user_data;
4411        if (recv_msg == NULL) {
4412                dev_warn(intf->si_dev,
4413                         "IPMI message received with no owner. This could be because of a malformed message, or because of a hardware error.  Contact your hardware vendor for assistance.\n");
4414                return 0;
4415        }
4416
4417        recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
4418        recv_msg->msgid = msg->msgid;
4419        smi_addr = ((struct ipmi_system_interface_addr *)
4420                    &recv_msg->addr);
4421        smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
4422        smi_addr->channel = IPMI_BMC_CHANNEL;
4423        smi_addr->lun = msg->rsp[0] & 3;
4424        recv_msg->msg.netfn = msg->rsp[0] >> 2;
4425        recv_msg->msg.cmd = msg->rsp[1];
4426        memcpy(recv_msg->msg_data, &msg->rsp[2], msg->rsp_size - 2);
4427        recv_msg->msg.data = recv_msg->msg_data;
4428        recv_msg->msg.data_len = msg->rsp_size - 2;
4429        deliver_local_response(intf, recv_msg);
4430
4431        return 0;
4432}
4433
4434/*
4435 * Handle a received message.  Return 1 if the message should be requeued,
4436 * 0 if the message should be freed, or -1 if the message should not
4437 * be freed or requeued.
4438 */
4439static int handle_one_recv_msg(struct ipmi_smi *intf,
4440                               struct ipmi_smi_msg *msg)
4441{
4442        int requeue = 0;
4443        int chan;
4444        unsigned char cc;
4445        bool is_cmd = !((msg->rsp[0] >> 2) & 1);
4446
4447        pr_debug("Recv: %*ph\n", msg->rsp_size, msg->rsp);
4448
4449        if (msg->rsp_size < 2) {
4450                /* Message is too small to be correct. */
4451                dev_warn(intf->si_dev,
4452                         "BMC returned too small a message for netfn %x cmd %x, got %d bytes\n",
4453                         (msg->data[0] >> 2) | 1, msg->data[1], msg->rsp_size);
4454
4455return_unspecified:
4456                /* Generate an error response for the message. */
4457                msg->rsp[0] = msg->data[0] | (1 << 2);
4458                msg->rsp[1] = msg->data[1];
4459                msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
4460                msg->rsp_size = 3;
4461        } else if (msg->type == IPMI_SMI_MSG_TYPE_IPMB_DIRECT) {
4462                /* commands must have at least 4 bytes, responses 5. */
4463                if (is_cmd && (msg->rsp_size < 4)) {
4464                        ipmi_inc_stat(intf, invalid_commands);
4465                        goto out;
4466                }
4467                if (!is_cmd && (msg->rsp_size < 5)) {
4468                        ipmi_inc_stat(intf, invalid_ipmb_responses);
4469                        /* Construct a valid error response. */
4470                        msg->rsp[0] = msg->data[0] & 0xfc; /* NetFN */
4471                        msg->rsp[0] |= (1 << 2); /* Make it a response */
4472                        msg->rsp[0] |= msg->data[2] & 3; /* rqLUN */
4473                        msg->rsp[1] = msg->data[1]; /* Addr */
4474                        msg->rsp[2] = msg->data[2] & 0xfc; /* rqSeq */
4475                        msg->rsp[2] |= msg->data[0] & 0x3; /* rsLUN */
4476                        msg->rsp[3] = msg->data[3]; /* Cmd */
4477                        msg->rsp[4] = IPMI_ERR_UNSPECIFIED;
4478                        msg->rsp_size = 5;
4479                }
4480        } else if ((msg->data_size >= 2)
4481            && (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2))
4482            && (msg->data[1] == IPMI_SEND_MSG_CMD)
4483            && (msg->user_data == NULL)) {
4484
4485                if (intf->in_shutdown)
4486                        goto out;
4487
4488                /*
4489                 * This is the local response to a command send, start
4490                 * the timer for these.  The user_data will not be
4491                 * NULL if this is a response send, and we will let
4492                 * response sends just go through.
4493                 */
4494
4495                /*
4496                 * Check for errors, if we get certain errors (ones
4497                 * that mean basically we can try again later), we
4498                 * ignore them and start the timer.  Otherwise we
4499                 * report the error immediately.
4500                 */
4501                if ((msg->rsp_size >= 3) && (msg->rsp[2] != 0)
4502                    && (msg->rsp[2] != IPMI_NODE_BUSY_ERR)
4503                    && (msg->rsp[2] != IPMI_LOST_ARBITRATION_ERR)
4504                    && (msg->rsp[2] != IPMI_BUS_ERR)
4505                    && (msg->rsp[2] != IPMI_NAK_ON_WRITE_ERR)) {
4506                        int ch = msg->rsp[3] & 0xf;
4507                        struct ipmi_channel *chans;
4508
4509                        /* Got an error sending the message, handle it. */
4510
4511                        chans = READ_ONCE(intf->channel_list)->c;
4512                        if ((chans[ch].medium == IPMI_CHANNEL_MEDIUM_8023LAN)
4513                            || (chans[ch].medium == IPMI_CHANNEL_MEDIUM_ASYNC))
4514                                ipmi_inc_stat(intf, sent_lan_command_errs);
4515                        else
4516                                ipmi_inc_stat(intf, sent_ipmb_command_errs);
4517                        intf_err_seq(intf, msg->msgid, msg->rsp[2]);
4518                } else
4519                        /* The message was sent, start the timer. */
4520                        intf_start_seq_timer(intf, msg->msgid);
4521        } else if (((msg->rsp[0] >> 2) != ((msg->data[0] >> 2) | 1))
4522                   || (msg->rsp[1] != msg->data[1])) {
4523                /*
4524                 * The NetFN and Command in the response is not even
4525                 * marginally correct.
4526                 */
4527                dev_warn(intf->si_dev,
4528                         "BMC returned incorrect response, expected netfn %x cmd %x, got netfn %x cmd %x\n",
4529                         (msg->data[0] >> 2) | 1, msg->data[1],
4530                         msg->rsp[0] >> 2, msg->rsp[1]);
4531
4532                goto return_unspecified;
4533        }
4534
4535        if (msg->type == IPMI_SMI_MSG_TYPE_IPMB_DIRECT) {
4536                if ((msg->data[0] >> 2) & 1) {
4537                        /* It's a response to a sent response. */
4538                        chan = 0;
4539                        cc = msg->rsp[4];
4540                        goto process_response_response;
4541                }
4542                if (is_cmd)
4543                        requeue = handle_ipmb_direct_rcv_cmd(intf, msg);
4544                else
4545                        requeue = handle_ipmb_direct_rcv_rsp(intf, msg);
4546        } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
4547                   && (msg->rsp[1] == IPMI_SEND_MSG_CMD)
4548                   && (msg->user_data != NULL)) {
4549                /*
4550                 * It's a response to a response we sent.  For this we
4551                 * deliver a send message response to the user.
4552                 */
4553                struct ipmi_recv_msg *recv_msg;
4554
4555                chan = msg->data[2] & 0x0f;
4556                if (chan >= IPMI_MAX_CHANNELS)
4557                        /* Invalid channel number */
4558                        goto out;
4559                cc = msg->rsp[2];
4560
4561process_response_response:
4562                recv_msg = msg->user_data;
4563
4564                requeue = 0;
4565                if (!recv_msg)
4566                        goto out;
4567
4568                recv_msg->recv_type = IPMI_RESPONSE_RESPONSE_TYPE;
4569                recv_msg->msg.data = recv_msg->msg_data;
4570                recv_msg->msg_data[0] = cc;
4571                recv_msg->msg.data_len = 1;
4572                deliver_local_response(intf, recv_msg);
4573        } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
4574                   && (msg->rsp[1] == IPMI_GET_MSG_CMD)) {
4575                struct ipmi_channel   *chans;
4576
4577                /* It's from the receive queue. */
4578                chan = msg->rsp[3] & 0xf;
4579                if (chan >= IPMI_MAX_CHANNELS) {
4580                        /* Invalid channel number */
4581                        requeue = 0;
4582                        goto out;
4583                }
4584
4585                /*
4586                 * We need to make sure the channels have been initialized.
4587                 * The channel_handler routine will set the "curr_channel"
4588                 * equal to or greater than IPMI_MAX_CHANNELS when all the
4589                 * channels for this interface have been initialized.
4590                 */
4591                if (!intf->channels_ready) {
4592                        requeue = 0; /* Throw the message away */
4593                        goto out;
4594                }
4595
4596                chans = READ_ONCE(intf->channel_list)->c;
4597
4598                switch (chans[chan].medium) {
4599                case IPMI_CHANNEL_MEDIUM_IPMB:
4600                        if (msg->rsp[4] & 0x04) {
4601                                /*
4602                                 * It's a response, so find the
4603                                 * requesting message and send it up.
4604                                 */
4605                                requeue = handle_ipmb_get_msg_rsp(intf, msg);
4606                        } else {
4607                                /*
4608                                 * It's a command to the SMS from some other
4609                                 * entity.  Handle that.
4610                                 */
4611                                requeue = handle_ipmb_get_msg_cmd(intf, msg);
4612                        }
4613                        break;
4614
4615                case IPMI_CHANNEL_MEDIUM_8023LAN:
4616                case IPMI_CHANNEL_MEDIUM_ASYNC:
4617                        if (msg->rsp[6] & 0x04) {
4618                                /*
4619                                 * It's a response, so find the
4620                                 * requesting message and send it up.
4621                                 */
4622                                requeue = handle_lan_get_msg_rsp(intf, msg);
4623                        } else {
4624                                /*
4625                                 * It's a command to the SMS from some other
4626                                 * entity.  Handle that.
4627                                 */
4628                                requeue = handle_lan_get_msg_cmd(intf, msg);
4629                        }
4630                        break;
4631
4632                default:
4633                        /* Check for OEM Channels.  Clients had better
4634                           register for these commands. */
4635                        if ((chans[chan].medium >= IPMI_CHANNEL_MEDIUM_OEM_MIN)
4636                            && (chans[chan].medium
4637                                <= IPMI_CHANNEL_MEDIUM_OEM_MAX)) {
4638                                requeue = handle_oem_get_msg_cmd(intf, msg);
4639                        } else {
4640                                /*
4641                                 * We don't handle the channel type, so just
4642                                 * free the message.
4643                                 */
4644                                requeue = 0;
4645                        }
4646                }
4647
4648        } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
4649                   && (msg->rsp[1] == IPMI_READ_EVENT_MSG_BUFFER_CMD)) {
4650                /* It's an asynchronous event. */
4651                requeue = handle_read_event_rsp(intf, msg);
4652        } else {
4653                /* It's a response from the local BMC. */
4654                requeue = handle_bmc_rsp(intf, msg);
4655        }
4656
4657 out:
4658        return requeue;
4659}
4660
4661/*
4662 * If there are messages in the queue or pretimeouts, handle them.
4663 */
4664static void handle_new_recv_msgs(struct ipmi_smi *intf)
4665{
4666        struct ipmi_smi_msg  *smi_msg;
4667        unsigned long        flags = 0;
4668        int                  rv;
4669        int                  run_to_completion = intf->run_to_completion;
4670
4671        /* See if any waiting messages need to be processed. */
4672        if (!run_to_completion)
4673                spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags);
4674        while (!list_empty(&intf->waiting_rcv_msgs)) {
4675                smi_msg = list_entry(intf->waiting_rcv_msgs.next,
4676                                     struct ipmi_smi_msg, link);
4677                list_del(&smi_msg->link);
4678                if (!run_to_completion)
4679                        spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock,
4680                                               flags);
4681                rv = handle_one_recv_msg(intf, smi_msg);
4682                if (!run_to_completion)
4683                        spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags);
4684                if (rv > 0) {
4685                        /*
4686                         * To preserve message order, quit if we
4687                         * can't handle a message.  Add the message
4688                         * back at the head, this is safe because this
4689                         * tasklet is the only thing that pulls the
4690                         * messages.
4691                         */
4692                        list_add(&smi_msg->link, &intf->waiting_rcv_msgs);
4693                        break;
4694                } else {
4695                        if (rv == 0)
4696                                /* Message handled */
4697                                ipmi_free_smi_msg(smi_msg);
4698                        /* If rv < 0, fatal error, del but don't free. */
4699                }
4700        }
4701        if (!run_to_completion)
4702                spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock, flags);
4703
4704        /*
4705         * If the pretimout count is non-zero, decrement one from it and
4706         * deliver pretimeouts to all the users.
4707         */
4708        if (atomic_add_unless(&intf->watchdog_pretimeouts_to_deliver, -1, 0)) {
4709                struct ipmi_user *user;
4710                int index;
4711
4712                index = srcu_read_lock(&intf->users_srcu);
4713                list_for_each_entry_rcu(user, &intf->users, link) {
4714                        if (user->handler->ipmi_watchdog_pretimeout)
4715                                user->handler->ipmi_watchdog_pretimeout(
4716                                        user->handler_data);
4717                }
4718                srcu_read_unlock(&intf->users_srcu, index);
4719        }
4720}
4721
4722static void smi_recv_tasklet(struct tasklet_struct *t)
4723{
4724        unsigned long flags = 0; /* keep us warning-free. */
4725        struct ipmi_smi *intf = from_tasklet(intf, t, recv_tasklet);
4726        int run_to_completion = intf->run_to_completion;
4727        struct ipmi_smi_msg *newmsg = NULL;
4728
4729        /*
4730         * Start the next message if available.
4731         *
4732         * Do this here, not in the actual receiver, because we may deadlock
4733         * because the lower layer is allowed to hold locks while calling
4734         * message delivery.
4735         */
4736
4737        rcu_read_lock();
4738
4739        if (!run_to_completion)
4740                spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
4741        if (intf->curr_msg == NULL && !intf->in_shutdown) {
4742                struct list_head *entry = NULL;
4743
4744                /* Pick the high priority queue first. */
4745                if (!list_empty(&intf->hp_xmit_msgs))
4746                        entry = intf->hp_xmit_msgs.next;
4747                else if (!list_empty(&intf->xmit_msgs))
4748                        entry = intf->xmit_msgs.next;
4749
4750                if (entry) {
4751                        list_del(entry);
4752                        newmsg = list_entry(entry, struct ipmi_smi_msg, link);
4753                        intf->curr_msg = newmsg;
4754                }
4755        }
4756
4757        if (!run_to_completion)
4758                spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
4759        if (newmsg)
4760                intf->handlers->sender(intf->send_info, newmsg);
4761
4762        rcu_read_unlock();
4763
4764        handle_new_recv_msgs(intf);
4765}
4766
4767/* Handle a new message from the lower layer. */
4768void ipmi_smi_msg_received(struct ipmi_smi *intf,
4769                           struct ipmi_smi_msg *msg)
4770{
4771        unsigned long flags = 0; /* keep us warning-free. */
4772        int run_to_completion = intf->run_to_completion;
4773
4774        /*
4775         * To preserve message order, we keep a queue and deliver from
4776         * a tasklet.
4777         */
4778        if (!run_to_completion)
4779                spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags);
4780        list_add_tail(&msg->link, &intf->waiting_rcv_msgs);
4781        if (!run_to_completion)
4782                spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock,
4783                                       flags);
4784
4785        if (!run_to_completion)
4786                spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
4787        /*
4788         * We can get an asynchronous event or receive message in addition
4789         * to commands we send.
4790         */
4791        if (msg == intf->curr_msg)
4792                intf->curr_msg = NULL;
4793        if (!run_to_completion)
4794                spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
4795
4796        if (run_to_completion)
4797                smi_recv_tasklet(&intf->recv_tasklet);
4798        else
4799                tasklet_schedule(&intf->recv_tasklet);
4800}
4801EXPORT_SYMBOL(ipmi_smi_msg_received);
4802
4803void ipmi_smi_watchdog_pretimeout(struct ipmi_smi *intf)
4804{
4805        if (intf->in_shutdown)
4806                return;
4807
4808        atomic_set(&intf->watchdog_pretimeouts_to_deliver, 1);
4809        tasklet_schedule(&intf->recv_tasklet);
4810}
4811EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout);
4812
4813static struct ipmi_smi_msg *
4814smi_from_recv_msg(struct ipmi_smi *intf, struct ipmi_recv_msg *recv_msg,
4815                  unsigned char seq, long seqid)
4816{
4817        struct ipmi_smi_msg *smi_msg = ipmi_alloc_smi_msg();
4818        if (!smi_msg)
4819                /*
4820                 * If we can't allocate the message, then just return, we
4821                 * get 4 retries, so this should be ok.
4822                 */
4823                return NULL;
4824
4825        memcpy(smi_msg->data, recv_msg->msg.data, recv_msg->msg.data_len);
4826        smi_msg->data_size = recv_msg->msg.data_len;
4827        smi_msg->msgid = STORE_SEQ_IN_MSGID(seq, seqid);
4828
4829        pr_debug("Resend: %*ph\n", smi_msg->data_size, smi_msg->data);
4830
4831        return smi_msg;
4832}
4833
4834static void check_msg_timeout(struct ipmi_smi *intf, struct seq_table *ent,
4835                              struct list_head *timeouts,
4836                              unsigned long timeout_period,
4837                              int slot, unsigned long *flags,
4838                              bool *need_timer)
4839{
4840        struct ipmi_recv_msg *msg;
4841
4842        if (intf->in_shutdown)
4843                return;
4844
4845        if (!ent->inuse)
4846                return;
4847
4848        if (timeout_period < ent->timeout) {
4849                ent->timeout -= timeout_period;
4850                *need_timer = true;
4851                return;
4852        }
4853
4854        if (ent->retries_left == 0) {
4855                /* The message has used all its retries. */
4856                ent->inuse = 0;
4857                smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES);
4858                msg = ent->recv_msg;
4859                list_add_tail(&msg->link, timeouts);
4860                if (ent->broadcast)
4861                        ipmi_inc_stat(intf, timed_out_ipmb_broadcasts);
4862                else if (is_lan_addr(&ent->recv_msg->addr))
4863                        ipmi_inc_stat(intf, timed_out_lan_commands);
4864                else
4865                        ipmi_inc_stat(intf, timed_out_ipmb_commands);
4866        } else {
4867                struct ipmi_smi_msg *smi_msg;
4868                /* More retries, send again. */
4869
4870                *need_timer = true;
4871
4872                /*
4873                 * Start with the max timer, set to normal timer after
4874                 * the message is sent.
4875                 */
4876                ent->timeout = MAX_MSG_TIMEOUT;
4877                ent->retries_left--;
4878                smi_msg = smi_from_recv_msg(intf, ent->recv_msg, slot,
4879                                            ent->seqid);
4880                if (!smi_msg) {
4881                        if (is_lan_addr(&ent->recv_msg->addr))
4882                                ipmi_inc_stat(intf,
4883                                              dropped_rexmit_lan_commands);
4884                        else
4885                                ipmi_inc_stat(intf,
4886                                              dropped_rexmit_ipmb_commands);
4887                        return;
4888                }
4889
4890                spin_unlock_irqrestore(&intf->seq_lock, *flags);
4891
4892                /*
4893                 * Send the new message.  We send with a zero
4894                 * priority.  It timed out, I doubt time is that
4895                 * critical now, and high priority messages are really
4896                 * only for messages to the local MC, which don't get
4897                 * resent.
4898                 */
4899                if (intf->handlers) {
4900                        if (is_lan_addr(&ent->recv_msg->addr))
4901                                ipmi_inc_stat(intf,
4902                                              retransmitted_lan_commands);
4903                        else
4904                                ipmi_inc_stat(intf,
4905                                              retransmitted_ipmb_commands);
4906
4907                        smi_send(intf, intf->handlers, smi_msg, 0);
4908                } else
4909                        ipmi_free_smi_msg(smi_msg);
4910
4911                spin_lock_irqsave(&intf->seq_lock, *flags);
4912        }
4913}
4914
4915static bool ipmi_timeout_handler(struct ipmi_smi *intf,
4916                                 unsigned long timeout_period)
4917{
4918        struct list_head     timeouts;
4919        struct ipmi_recv_msg *msg, *msg2;
4920        unsigned long        flags;
4921        int                  i;
4922        bool                 need_timer = false;
4923
4924        if (!intf->bmc_registered) {
4925                kref_get(&intf->refcount);
4926                if (!schedule_work(&intf->bmc_reg_work)) {
4927                        kref_put(&intf->refcount, intf_free);
4928                        need_timer = true;
4929                }
4930        }
4931
4932        /*
4933         * Go through the seq table and find any messages that
4934         * have timed out, putting them in the timeouts
4935         * list.
4936         */
4937        INIT_LIST_HEAD(&timeouts);
4938        spin_lock_irqsave(&intf->seq_lock, flags);
4939        if (intf->ipmb_maintenance_mode_timeout) {
4940                if (intf->ipmb_maintenance_mode_timeout <= timeout_period)
4941                        intf->ipmb_maintenance_mode_timeout = 0;
4942                else
4943                        intf->ipmb_maintenance_mode_timeout -= timeout_period;
4944        }
4945        for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++)
4946                check_msg_timeout(intf, &intf->seq_table[i],
4947                                  &timeouts, timeout_period, i,
4948                                  &flags, &need_timer);
4949        spin_unlock_irqrestore(&intf->seq_lock, flags);
4950
4951        list_for_each_entry_safe(msg, msg2, &timeouts, link)
4952                deliver_err_response(intf, msg, IPMI_TIMEOUT_COMPLETION_CODE);
4953
4954        /*
4955         * Maintenance mode handling.  Check the timeout
4956         * optimistically before we claim the lock.  It may
4957         * mean a timeout gets missed occasionally, but that
4958         * only means the timeout gets extended by one period
4959         * in that case.  No big deal, and it avoids the lock
4960         * most of the time.
4961         */
4962        if (intf->auto_maintenance_timeout > 0) {
4963                spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
4964                if (intf->auto_maintenance_timeout > 0) {
4965                        intf->auto_maintenance_timeout
4966                                -= timeout_period;
4967                        if (!intf->maintenance_mode
4968                            && (intf->auto_maintenance_timeout <= 0)) {
4969                                intf->maintenance_mode_enable = false;
4970                                maintenance_mode_update(intf);
4971                        }
4972                }
4973                spin_unlock_irqrestore(&intf->maintenance_mode_lock,
4974                                       flags);
4975        }
4976
4977        tasklet_schedule(&intf->recv_tasklet);
4978
4979        return need_timer;
4980}
4981
4982static void ipmi_request_event(struct ipmi_smi *intf)
4983{
4984        /* No event requests when in maintenance mode. */
4985        if (intf->maintenance_mode_enable)
4986                return;
4987
4988        if (!intf->in_shutdown)
4989                intf->handlers->request_events(intf->send_info);
4990}
4991
4992static struct timer_list ipmi_timer;
4993
4994static atomic_t stop_operation;
4995
4996static void ipmi_timeout(struct timer_list *unused)
4997{
4998        struct ipmi_smi *intf;
4999        bool need_timer = false;
5000        int index;
5001
5002        if (atomic_read(&stop_operation))
5003                return;
5004
5005        index = srcu_read_lock(&ipmi_interfaces_srcu);
5006        list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
5007                if (atomic_read(&intf->event_waiters)) {
5008                        intf->ticks_to_req_ev--;
5009                        if (intf->ticks_to_req_ev == 0) {
5010                                ipmi_request_event(intf);
5011                                intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
5012                        }
5013                        need_timer = true;
5014                }
5015
5016                need_timer |= ipmi_timeout_handler(intf, IPMI_TIMEOUT_TIME);
5017        }
5018        srcu_read_unlock(&ipmi_interfaces_srcu, index);
5019
5020        if (need_timer)
5021                mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
5022}
5023
5024static void need_waiter(struct ipmi_smi *intf)
5025{
5026        /* Racy, but worst case we start the timer twice. */
5027        if (!timer_pending(&ipmi_timer))
5028                mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
5029}
5030
5031static atomic_t smi_msg_inuse_count = ATOMIC_INIT(0);
5032static atomic_t recv_msg_inuse_count = ATOMIC_INIT(0);
5033
5034static void free_smi_msg(struct ipmi_smi_msg *msg)
5035{
5036        atomic_dec(&smi_msg_inuse_count);
5037        /* Try to keep as much stuff out of the panic path as possible. */
5038        if (!oops_in_progress)
5039                kfree(msg);
5040}
5041
5042struct ipmi_smi_msg *ipmi_alloc_smi_msg(void)
5043{
5044        struct ipmi_smi_msg *rv;
5045        rv = kmalloc(sizeof(struct ipmi_smi_msg), GFP_ATOMIC);
5046        if (rv) {
5047                rv->done = free_smi_msg;
5048                rv->user_data = NULL;
5049                rv->type = IPMI_SMI_MSG_TYPE_NORMAL;
5050                atomic_inc(&smi_msg_inuse_count);
5051        }
5052        return rv;
5053}
5054EXPORT_SYMBOL(ipmi_alloc_smi_msg);
5055
5056static void free_recv_msg(struct ipmi_recv_msg *msg)
5057{
5058        atomic_dec(&recv_msg_inuse_count);
5059        /* Try to keep as much stuff out of the panic path as possible. */
5060        if (!oops_in_progress)
5061                kfree(msg);
5062}
5063
5064static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void)
5065{
5066        struct ipmi_recv_msg *rv;
5067
5068        rv = kmalloc(sizeof(struct ipmi_recv_msg), GFP_ATOMIC);
5069        if (rv) {
5070                rv->user = NULL;
5071                rv->done = free_recv_msg;
5072                atomic_inc(&recv_msg_inuse_count);
5073        }
5074        return rv;
5075}
5076
5077void ipmi_free_recv_msg(struct ipmi_recv_msg *msg)
5078{
5079        if (msg->user && !oops_in_progress)
5080                kref_put(&msg->user->refcount, free_user);
5081        msg->done(msg);
5082}
5083EXPORT_SYMBOL(ipmi_free_recv_msg);
5084
5085static atomic_t panic_done_count = ATOMIC_INIT(0);
5086
5087static void dummy_smi_done_handler(struct ipmi_smi_msg *msg)
5088{
5089        atomic_dec(&panic_done_count);
5090}
5091
5092static void dummy_recv_done_handler(struct ipmi_recv_msg *msg)
5093{
5094        atomic_dec(&panic_done_count);
5095}
5096
5097/*
5098 * Inside a panic, send a message and wait for a response.
5099 */
5100static void ipmi_panic_request_and_wait(struct ipmi_smi *intf,
5101                                        struct ipmi_addr *addr,
5102                                        struct kernel_ipmi_msg *msg)
5103{
5104        struct ipmi_smi_msg  smi_msg;
5105        struct ipmi_recv_msg recv_msg;
5106        int rv;
5107
5108        smi_msg.done = dummy_smi_done_handler;
5109        recv_msg.done = dummy_recv_done_handler;
5110        atomic_add(2, &panic_done_count);
5111        rv = i_ipmi_request(NULL,
5112                            intf,
5113                            addr,
5114                            0,
5115                            msg,
5116                            intf,
5117                            &smi_msg,
5118                            &recv_msg,
5119                            0,
5120                            intf->addrinfo[0].address,
5121                            intf->addrinfo[0].lun,
5122                            0, 1); /* Don't retry, and don't wait. */
5123        if (rv)
5124                atomic_sub(2, &panic_done_count);
5125        else if (intf->handlers->flush_messages)
5126                intf->handlers->flush_messages(intf->send_info);
5127
5128        while (atomic_read(&panic_done_count) != 0)
5129                ipmi_poll(intf);
5130}
5131
5132static void event_receiver_fetcher(struct ipmi_smi *intf,
5133                                   struct ipmi_recv_msg *msg)
5134{
5135        if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
5136            && (msg->msg.netfn == IPMI_NETFN_SENSOR_EVENT_RESPONSE)
5137            && (msg->msg.cmd == IPMI_GET_EVENT_RECEIVER_CMD)
5138            && (msg->msg.data[0] == IPMI_CC_NO_ERROR)) {
5139                /* A get event receiver command, save it. */
5140                intf->event_receiver = msg->msg.data[1];
5141                intf->event_receiver_lun = msg->msg.data[2] & 0x3;
5142        }
5143}
5144
5145static void device_id_fetcher(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
5146{
5147        if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
5148            && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE)
5149            && (msg->msg.cmd == IPMI_GET_DEVICE_ID_CMD)
5150            && (msg->msg.data[0] == IPMI_CC_NO_ERROR)) {
5151                /*
5152                 * A get device id command, save if we are an event
5153                 * receiver or generator.
5154                 */
5155                intf->local_sel_device = (msg->msg.data[6] >> 2) & 1;
5156                intf->local_event_generator = (msg->msg.data[6] >> 5) & 1;
5157        }
5158}
5159
5160static void send_panic_events(struct ipmi_smi *intf, char *str)
5161{
5162        struct kernel_ipmi_msg msg;
5163        unsigned char data[16];
5164        struct ipmi_system_interface_addr *si;
5165        struct ipmi_addr addr;
5166        char *p = str;
5167        struct ipmi_ipmb_addr *ipmb;
5168        int j;
5169
5170        if (ipmi_send_panic_event == IPMI_SEND_PANIC_EVENT_NONE)
5171                return;
5172
5173        si = (struct ipmi_system_interface_addr *) &addr;
5174        si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
5175        si->channel = IPMI_BMC_CHANNEL;
5176        si->lun = 0;
5177
5178        /* Fill in an event telling that we have failed. */
5179        msg.netfn = 0x04; /* Sensor or Event. */
5180        msg.cmd = 2; /* Platform event command. */
5181        msg.data = data;
5182        msg.data_len = 8;
5183        data[0] = 0x41; /* Kernel generator ID, IPMI table 5-4 */
5184        data[1] = 0x03; /* This is for IPMI 1.0. */
5185        data[2] = 0x20; /* OS Critical Stop, IPMI table 36-3 */
5186        data[4] = 0x6f; /* Sensor specific, IPMI table 36-1 */
5187        data[5] = 0xa1; /* Runtime stop OEM bytes 2 & 3. */
5188
5189        /*
5190         * Put a few breadcrumbs in.  Hopefully later we can add more things
5191         * to make the panic events more useful.
5192         */
5193        if (str) {
5194                data[3] = str[0];
5195                data[6] = str[1];
5196                data[7] = str[2];
5197        }
5198
5199        /* Send the event announcing the panic. */
5200        ipmi_panic_request_and_wait(intf, &addr, &msg);
5201
5202        /*
5203         * On every interface, dump a bunch of OEM event holding the
5204         * string.
5205         */
5206        if (ipmi_send_panic_event != IPMI_SEND_PANIC_EVENT_STRING || !str)
5207                return;
5208
5209        /*
5210         * intf_num is used as an marker to tell if the
5211         * interface is valid.  Thus we need a read barrier to
5212         * make sure data fetched before checking intf_num
5213         * won't be used.
5214         */
5215        smp_rmb();
5216
5217        /*
5218         * First job here is to figure out where to send the
5219         * OEM events.  There's no way in IPMI to send OEM
5220         * events using an event send command, so we have to
5221         * find the SEL to put them in and stick them in
5222         * there.
5223         */
5224
5225        /* Get capabilities from the get device id. */
5226        intf->local_sel_device = 0;
5227        intf->local_event_generator = 0;
5228        intf->event_receiver = 0;
5229
5230        /* Request the device info from the local MC. */
5231        msg.netfn = IPMI_NETFN_APP_REQUEST;
5232        msg.cmd = IPMI_GET_DEVICE_ID_CMD;
5233        msg.data = NULL;
5234        msg.data_len = 0;
5235        intf->null_user_handler = device_id_fetcher;
5236        ipmi_panic_request_and_wait(intf, &addr, &msg);
5237
5238        if (intf->local_event_generator) {
5239                /* Request the event receiver from the local MC. */
5240                msg.netfn = IPMI_NETFN_SENSOR_EVENT_REQUEST;
5241                msg.cmd = IPMI_GET_EVENT_RECEIVER_CMD;
5242                msg.data = NULL;
5243                msg.data_len = 0;
5244                intf->null_user_handler = event_receiver_fetcher;
5245                ipmi_panic_request_and_wait(intf, &addr, &msg);
5246        }
5247        intf->null_user_handler = NULL;
5248
5249        /*
5250         * Validate the event receiver.  The low bit must not
5251         * be 1 (it must be a valid IPMB address), it cannot
5252         * be zero, and it must not be my address.
5253         */
5254        if (((intf->event_receiver & 1) == 0)
5255            && (intf->event_receiver != 0)
5256            && (intf->event_receiver != intf->addrinfo[0].address)) {
5257                /*
5258                 * The event receiver is valid, send an IPMB
5259                 * message.
5260                 */
5261                ipmb = (struct ipmi_ipmb_addr *) &addr;
5262                ipmb->addr_type = IPMI_IPMB_ADDR_TYPE;
5263                ipmb->channel = 0; /* FIXME - is this right? */
5264                ipmb->lun = intf->event_receiver_lun;
5265                ipmb->slave_addr = intf->event_receiver;
5266        } else if (intf->local_sel_device) {
5267                /*
5268                 * The event receiver was not valid (or was
5269                 * me), but I am an SEL device, just dump it
5270                 * in my SEL.
5271                 */
5272                si = (struct ipmi_system_interface_addr *) &addr;
5273                si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
5274                si->channel = IPMI_BMC_CHANNEL;
5275                si->lun = 0;
5276        } else
5277                return; /* No where to send the event. */
5278
5279        msg.netfn = IPMI_NETFN_STORAGE_REQUEST; /* Storage. */
5280        msg.cmd = IPMI_ADD_SEL_ENTRY_CMD;
5281        msg.data = data;
5282        msg.data_len = 16;
5283
5284        j = 0;
5285        while (*p) {
5286                int size = strlen(p);
5287
5288                if (size > 11)
5289                        size = 11;
5290                data[0] = 0;
5291                data[1] = 0;
5292                data[2] = 0xf0; /* OEM event without timestamp. */
5293                data[3] = intf->addrinfo[0].address;
5294                data[4] = j++; /* sequence # */
5295                /*
5296                 * Always give 11 bytes, so strncpy will fill
5297                 * it with zeroes for me.
5298                 */
5299                strncpy(data+5, p, 11);
5300                p += size;
5301
5302                ipmi_panic_request_and_wait(intf, &addr, &msg);
5303        }
5304}
5305
5306static int has_panicked;
5307
5308static int panic_event(struct notifier_block *this,
5309                       unsigned long         event,
5310                       void                  *ptr)
5311{
5312        struct ipmi_smi *intf;
5313        struct ipmi_user *user;
5314
5315        if (has_panicked)
5316                return NOTIFY_DONE;
5317        has_panicked = 1;
5318
5319        /* For every registered interface, set it to run to completion. */
5320        list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
5321                if (!intf->handlers || intf->intf_num == -1)
5322                        /* Interface is not ready. */
5323                        continue;
5324
5325                if (!intf->handlers->poll)
5326                        continue;
5327
5328                /*
5329                 * If we were interrupted while locking xmit_msgs_lock or
5330                 * waiting_rcv_msgs_lock, the corresponding list may be
5331                 * corrupted.  In this case, drop items on the list for
5332                 * the safety.
5333                 */
5334                if (!spin_trylock(&intf->xmit_msgs_lock)) {
5335                        INIT_LIST_HEAD(&intf->xmit_msgs);
5336                        INIT_LIST_HEAD(&intf->hp_xmit_msgs);
5337                } else
5338                        spin_unlock(&intf->xmit_msgs_lock);
5339
5340                if (!spin_trylock(&intf->waiting_rcv_msgs_lock))
5341                        INIT_LIST_HEAD(&intf->waiting_rcv_msgs);
5342                else
5343                        spin_unlock(&intf->waiting_rcv_msgs_lock);
5344
5345                intf->run_to_completion = 1;
5346                if (intf->handlers->set_run_to_completion)
5347                        intf->handlers->set_run_to_completion(intf->send_info,
5348                                                              1);
5349
5350                list_for_each_entry_rcu(user, &intf->users, link) {
5351                        if (user->handler->ipmi_panic_handler)
5352                                user->handler->ipmi_panic_handler(
5353                                        user->handler_data);
5354                }
5355
5356                send_panic_events(intf, ptr);
5357        }
5358
5359        return NOTIFY_DONE;
5360}
5361
5362/* Must be called with ipmi_interfaces_mutex held. */
5363static int ipmi_register_driver(void)
5364{
5365        int rv;
5366
5367        if (drvregistered)
5368                return 0;
5369
5370        rv = driver_register(&ipmidriver.driver);
5371        if (rv)
5372                pr_err("Could not register IPMI driver\n");
5373        else
5374                drvregistered = true;
5375        return rv;
5376}
5377
5378static struct notifier_block panic_block = {
5379        .notifier_call  = panic_event,
5380        .next           = NULL,
5381        .priority       = 200   /* priority: INT_MAX >= x >= 0 */
5382};
5383
5384static int ipmi_init_msghandler(void)
5385{
5386        int rv;
5387
5388        mutex_lock(&ipmi_interfaces_mutex);
5389        rv = ipmi_register_driver();
5390        if (rv)
5391                goto out;
5392        if (initialized)
5393                goto out;
5394
5395        rv = init_srcu_struct(&ipmi_interfaces_srcu);
5396        if (rv)
5397                goto out;
5398
5399        remove_work_wq = create_singlethread_workqueue("ipmi-msghandler-remove-wq");
5400        if (!remove_work_wq) {
5401                pr_err("unable to create ipmi-msghandler-remove-wq workqueue");
5402                rv = -ENOMEM;
5403                goto out_wq;
5404        }
5405
5406        timer_setup(&ipmi_timer, ipmi_timeout, 0);
5407        mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
5408
5409        atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
5410
5411        initialized = true;
5412
5413out_wq:
5414        if (rv)
5415                cleanup_srcu_struct(&ipmi_interfaces_srcu);
5416out:
5417        mutex_unlock(&ipmi_interfaces_mutex);
5418        return rv;
5419}
5420
5421static int __init ipmi_init_msghandler_mod(void)
5422{
5423        int rv;
5424
5425        pr_info("version " IPMI_DRIVER_VERSION "\n");
5426
5427        mutex_lock(&ipmi_interfaces_mutex);
5428        rv = ipmi_register_driver();
5429        mutex_unlock(&ipmi_interfaces_mutex);
5430
5431        return rv;
5432}
5433
5434static void __exit cleanup_ipmi(void)
5435{
5436        int count;
5437
5438        if (initialized) {
5439                destroy_workqueue(remove_work_wq);
5440
5441                atomic_notifier_chain_unregister(&panic_notifier_list,
5442                                                 &panic_block);
5443
5444                /*
5445                 * This can't be called if any interfaces exist, so no worry
5446                 * about shutting down the interfaces.
5447                 */
5448
5449                /*
5450                 * Tell the timer to stop, then wait for it to stop.  This
5451                 * avoids problems with race conditions removing the timer
5452                 * here.
5453                 */
5454                atomic_set(&stop_operation, 1);
5455                del_timer_sync(&ipmi_timer);
5456
5457                initialized = false;
5458
5459                /* Check for buffer leaks. */
5460                count = atomic_read(&smi_msg_inuse_count);
5461                if (count != 0)
5462                        pr_warn("SMI message count %d at exit\n", count);
5463                count = atomic_read(&recv_msg_inuse_count);
5464                if (count != 0)
5465                        pr_warn("recv message count %d at exit\n", count);
5466
5467                cleanup_srcu_struct(&ipmi_interfaces_srcu);
5468        }
5469        if (drvregistered)
5470                driver_unregister(&ipmidriver.driver);
5471}
5472module_exit(cleanup_ipmi);
5473
5474module_init(ipmi_init_msghandler_mod);
5475MODULE_LICENSE("GPL");
5476MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
5477MODULE_DESCRIPTION("Incoming and outgoing message routing for an IPMI interface.");
5478MODULE_VERSION(IPMI_DRIVER_VERSION);
5479MODULE_SOFTDEP("post: ipmi_devintf");
5480