linux/drivers/staging/unisys/visorbus/visorchipset.c
<<
>>
Prefs
   1/* visorchipset_main.c
   2 *
   3 * Copyright (C) 2010 - 2015 UNISYS CORPORATION
   4 * All rights reserved.
   5 *
   6 * This program is free software; you can redistribute it and/or modify it
   7 * under the terms and conditions of the GNU General Public License,
   8 * version 2, as published by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope that it will be useful, but
  11 * WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  13 * NON INFRINGEMENT.  See the GNU General Public License for more
  14 * details.
  15 */
  16
  17#include <linux/acpi.h>
  18#include <linux/cdev.h>
  19#include <linux/ctype.h>
  20#include <linux/fs.h>
  21#include <linux/mm.h>
  22#include <linux/nls.h>
  23#include <linux/netdevice.h>
  24#include <linux/platform_device.h>
  25#include <linux/uuid.h>
  26#include <linux/crash_dump.h>
  27
  28#include "visorbus.h"
  29#include "visorbus_private.h"
  30#include "vmcallinterface.h"
  31
  32#define CURRENT_FILE_PC VISOR_CHIPSET_PC_visorchipset_main_c
  33
  34#define POLLJIFFIES_CONTROLVMCHANNEL_FAST   1
  35#define POLLJIFFIES_CONTROLVMCHANNEL_SLOW 100
  36
  37#define MAX_CONTROLVM_PAYLOAD_BYTES (1024 * 128)
  38
  39#define VISORCHIPSET_MMAP_CONTROLCHANOFFSET     0x00000000
  40
  41#define UNISYS_SPAR_LEAF_ID 0x40000000
  42
  43/* The s-Par leaf ID returns "UnisysSpar64" encoded across ebx, ecx, edx */
  44#define UNISYS_SPAR_ID_EBX 0x73696e55
  45#define UNISYS_SPAR_ID_ECX 0x70537379
  46#define UNISYS_SPAR_ID_EDX 0x34367261
  47
  48/*
  49 * Module parameters
  50 */
  51static int visorchipset_major;
  52
  53static int
  54visorchipset_open(struct inode *inode, struct file *file)
  55{
  56        unsigned int minor_number = iminor(inode);
  57
  58        if (minor_number)
  59                return -ENODEV;
  60        file->private_data = NULL;
  61        return 0;
  62}
  63
  64static int
  65visorchipset_release(struct inode *inode, struct file *file)
  66{
  67        return 0;
  68}
  69
  70/*
  71 * When the controlvm channel is idle for at least MIN_IDLE_SECONDS,
  72 * we switch to slow polling mode. As soon as we get a controlvm
  73 * message, we switch back to fast polling mode.
  74 */
  75#define MIN_IDLE_SECONDS 10
  76static unsigned long poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
  77/* when we got our last controlvm message */
  78static unsigned long most_recent_message_jiffies;
  79
  80struct parser_context {
  81        unsigned long allocbytes;
  82        unsigned long param_bytes;
  83        u8 *curr;
  84        unsigned long bytes_remaining;
  85        bool byte_stream;
  86        char data[0];
  87};
  88
  89static struct delayed_work periodic_controlvm_work;
  90
  91static struct cdev file_cdev;
  92static struct visorchannel **file_controlvm_channel;
  93
  94static struct visorchannel *controlvm_channel;
  95
  96/* Manages the request payload in the controlvm channel */
  97struct visor_controlvm_payload_info {
  98        u8 *ptr;                /* pointer to base address of payload pool */
  99        u64 offset;             /*
 100                                 * offset from beginning of controlvm
 101                                 * channel to beginning of payload * pool
 102                                 */
 103        u32 bytes;              /* number of bytes in payload pool */
 104};
 105
 106static struct visor_controlvm_payload_info controlvm_payload_info;
 107static unsigned long controlvm_payload_bytes_buffered;
 108
 109/*
 110 * The following globals are used to handle the scenario where we are unable to
 111 * offload the payload from a controlvm message due to memory requirements. In
 112 * this scenario, we simply stash the controlvm message, then attempt to
 113 * process it again the next time controlvm_periodic_work() runs.
 114 */
 115static struct controlvm_message controlvm_pending_msg;
 116static bool controlvm_pending_msg_valid;
 117
 118/*
 119 * This describes a buffer and its current state of transfer (e.g., how many
 120 * bytes have already been supplied as putfile data, and how many bytes are
 121 * remaining) for a putfile_request.
 122 */
 123struct putfile_active_buffer {
 124        /* a payload from a controlvm message, containing a file data buffer */
 125        struct parser_context *parser_ctx;
 126        /* points within data area of parser_ctx to next byte of data */
 127        size_t bytes_remaining;
 128};
 129
 130#define PUTFILE_REQUEST_SIG 0x0906101302281211
 131/*
 132 * This identifies a single remote --> local CONTROLVM_TRANSMIT_FILE
 133 * conversation. Structs of this type are dynamically linked into
 134 * <Putfile_request_list>.
 135 */
 136struct putfile_request {
 137        u64 sig;                /* PUTFILE_REQUEST_SIG */
 138
 139        /* header from original TransmitFile request */
 140        struct controlvm_message_header controlvm_header;
 141
 142        /* link to next struct putfile_request */
 143        struct list_head next_putfile_request;
 144
 145        /*
 146         * head of putfile_buffer_entry list, which describes the data to be
 147         * supplied as putfile data;
 148         * - this list is added to when controlvm messages come in that supply
 149         * file data
 150         * - this list is removed from via the hotplug program that is actually
 151         * consuming these buffers to write as file data
 152         */
 153        struct list_head input_buffer_list;
 154        spinlock_t req_list_lock;       /* lock for input_buffer_list */
 155
 156        /* waiters for input_buffer_list to go non-empty */
 157        wait_queue_head_t input_buffer_wq;
 158
 159        /* data not yet read within current putfile_buffer_entry */
 160        struct putfile_active_buffer active_buf;
 161
 162        /*
 163         * <0 = failed, 0 = in-progress, >0 = successful;
 164         * note that this must be set with req_list_lock, and if you set <0,
 165         * it is your responsibility to also free up all of the other objects
 166         * in this struct (like input_buffer_list, active_buf.parser_ctx)
 167         * before releasing the lock
 168         */
 169        int completion_status;
 170};
 171
 172struct parahotplug_request {
 173        struct list_head list;
 174        int id;
 175        unsigned long expiration;
 176        struct controlvm_message msg;
 177};
 178
 179/* info for /dev/visorchipset */
 180static dev_t major_dev = -1; /*< indicates major num for device */
 181
 182/* prototypes for attributes */
 183static ssize_t toolaction_show(struct device *dev,
 184                               struct device_attribute *attr,
 185                               char *buf)
 186{
 187        u8 tool_action = 0;
 188
 189        visorchannel_read(controlvm_channel,
 190                          offsetof(struct spar_controlvm_channel_protocol,
 191                                   tool_action), &tool_action, sizeof(u8));
 192        return scnprintf(buf, PAGE_SIZE, "%u\n", tool_action);
 193}
 194
 195static ssize_t toolaction_store(struct device *dev,
 196                                struct device_attribute *attr,
 197                                const char *buf, size_t count)
 198{
 199        u8 tool_action;
 200        int ret;
 201
 202        if (kstrtou8(buf, 10, &tool_action))
 203                return -EINVAL;
 204
 205        ret = visorchannel_write
 206                (controlvm_channel,
 207                 offsetof(struct spar_controlvm_channel_protocol,
 208                          tool_action),
 209                 &tool_action, sizeof(u8));
 210
 211        if (ret)
 212                return ret;
 213        return count;
 214}
 215static DEVICE_ATTR_RW(toolaction);
 216
 217static ssize_t boottotool_show(struct device *dev,
 218                               struct device_attribute *attr,
 219                               char *buf)
 220{
 221        struct efi_spar_indication efi_spar_indication;
 222
 223        visorchannel_read(controlvm_channel,
 224                          offsetof(struct spar_controlvm_channel_protocol,
 225                                   efi_spar_ind), &efi_spar_indication,
 226                          sizeof(struct efi_spar_indication));
 227        return scnprintf(buf, PAGE_SIZE, "%u\n",
 228                         efi_spar_indication.boot_to_tool);
 229}
 230
 231static ssize_t boottotool_store(struct device *dev,
 232                                struct device_attribute *attr,
 233                                const char *buf, size_t count)
 234{
 235        int val, ret;
 236        struct efi_spar_indication efi_spar_indication;
 237
 238        if (kstrtoint(buf, 10, &val))
 239                return -EINVAL;
 240
 241        efi_spar_indication.boot_to_tool = val;
 242        ret = visorchannel_write
 243                (controlvm_channel,
 244                 offsetof(struct spar_controlvm_channel_protocol,
 245                          efi_spar_ind), &(efi_spar_indication),
 246                 sizeof(struct efi_spar_indication));
 247
 248        if (ret)
 249                return ret;
 250        return count;
 251}
 252static DEVICE_ATTR_RW(boottotool);
 253
 254static ssize_t error_show(struct device *dev, struct device_attribute *attr,
 255                          char *buf)
 256{
 257        u32 error = 0;
 258
 259        visorchannel_read(controlvm_channel,
 260                          offsetof(struct spar_controlvm_channel_protocol,
 261                                   installation_error),
 262                          &error, sizeof(u32));
 263        return scnprintf(buf, PAGE_SIZE, "%i\n", error);
 264}
 265
 266static ssize_t error_store(struct device *dev, struct device_attribute *attr,
 267                           const char *buf, size_t count)
 268{
 269        u32 error;
 270        int ret;
 271
 272        if (kstrtou32(buf, 10, &error))
 273                return -EINVAL;
 274
 275        ret = visorchannel_write
 276                (controlvm_channel,
 277                 offsetof(struct spar_controlvm_channel_protocol,
 278                          installation_error),
 279                 &error, sizeof(u32));
 280        if (ret)
 281                return ret;
 282        return count;
 283}
 284static DEVICE_ATTR_RW(error);
 285
 286static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
 287                           char *buf)
 288{
 289        u32 text_id = 0;
 290
 291        visorchannel_read
 292                (controlvm_channel,
 293                 offsetof(struct spar_controlvm_channel_protocol,
 294                          installation_text_id),
 295                 &text_id, sizeof(u32));
 296        return scnprintf(buf, PAGE_SIZE, "%i\n", text_id);
 297}
 298
 299static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
 300                            const char *buf, size_t count)
 301{
 302        u32 text_id;
 303        int ret;
 304
 305        if (kstrtou32(buf, 10, &text_id))
 306                return -EINVAL;
 307
 308        ret = visorchannel_write
 309                (controlvm_channel,
 310                 offsetof(struct spar_controlvm_channel_protocol,
 311                          installation_text_id),
 312                 &text_id, sizeof(u32));
 313        if (ret)
 314                return ret;
 315        return count;
 316}
 317static DEVICE_ATTR_RW(textid);
 318
 319static ssize_t remaining_steps_show(struct device *dev,
 320                                    struct device_attribute *attr, char *buf)
 321{
 322        u16 remaining_steps = 0;
 323
 324        visorchannel_read(controlvm_channel,
 325                          offsetof(struct spar_controlvm_channel_protocol,
 326                                   installation_remaining_steps),
 327                          &remaining_steps, sizeof(u16));
 328        return scnprintf(buf, PAGE_SIZE, "%hu\n", remaining_steps);
 329}
 330
 331static ssize_t remaining_steps_store(struct device *dev,
 332                                     struct device_attribute *attr,
 333                                     const char *buf, size_t count)
 334{
 335        u16 remaining_steps;
 336        int ret;
 337
 338        if (kstrtou16(buf, 10, &remaining_steps))
 339                return -EINVAL;
 340
 341        ret = visorchannel_write
 342                (controlvm_channel,
 343                 offsetof(struct spar_controlvm_channel_protocol,
 344                          installation_remaining_steps),
 345                 &remaining_steps, sizeof(u16));
 346        if (ret)
 347                return ret;
 348        return count;
 349}
 350static DEVICE_ATTR_RW(remaining_steps);
 351
 352static uuid_le
 353parser_id_get(struct parser_context *ctx)
 354{
 355        struct spar_controlvm_parameters_header *phdr = NULL;
 356
 357        if (!ctx)
 358                return NULL_UUID_LE;
 359        phdr = (struct spar_controlvm_parameters_header *)(ctx->data);
 360        return phdr->id;
 361}
 362
 363/*
 364 * Describes the state from the perspective of which controlvm messages have
 365 * been received for a bus or device.
 366 */
 367
 368enum PARSER_WHICH_STRING {
 369        PARSERSTRING_INITIATOR,
 370        PARSERSTRING_TARGET,
 371        PARSERSTRING_CONNECTION,
 372        PARSERSTRING_NAME, /* TODO: only PARSERSTRING_NAME is used ? */
 373};
 374
 375static void
 376parser_param_start(struct parser_context *ctx,
 377                   enum PARSER_WHICH_STRING which_string)
 378{
 379        struct spar_controlvm_parameters_header *phdr = NULL;
 380
 381        if (!ctx)
 382                return;
 383
 384        phdr = (struct spar_controlvm_parameters_header *)(ctx->data);
 385        switch (which_string) {
 386        case PARSERSTRING_INITIATOR:
 387                ctx->curr = ctx->data + phdr->initiator_offset;
 388                ctx->bytes_remaining = phdr->initiator_length;
 389                break;
 390        case PARSERSTRING_TARGET:
 391                ctx->curr = ctx->data + phdr->target_offset;
 392                ctx->bytes_remaining = phdr->target_length;
 393                break;
 394        case PARSERSTRING_CONNECTION:
 395                ctx->curr = ctx->data + phdr->connection_offset;
 396                ctx->bytes_remaining = phdr->connection_length;
 397                break;
 398        case PARSERSTRING_NAME:
 399                ctx->curr = ctx->data + phdr->name_offset;
 400                ctx->bytes_remaining = phdr->name_length;
 401                break;
 402        default:
 403                break;
 404        }
 405}
 406
 407static void parser_done(struct parser_context *ctx)
 408{
 409        if (!ctx)
 410                return;
 411        controlvm_payload_bytes_buffered -= ctx->param_bytes;
 412        kfree(ctx);
 413}
 414
 415static void *
 416parser_string_get(struct parser_context *ctx)
 417{
 418        u8 *pscan;
 419        unsigned long nscan;
 420        int value_length = -1;
 421        void *value = NULL;
 422        int i;
 423
 424        if (!ctx)
 425                return NULL;
 426        pscan = ctx->curr;
 427        nscan = ctx->bytes_remaining;
 428        if (nscan == 0)
 429                return NULL;
 430        if (!pscan)
 431                return NULL;
 432        for (i = 0, value_length = -1; i < nscan; i++)
 433                if (pscan[i] == '\0') {
 434                        value_length = i;
 435                        break;
 436                }
 437        if (value_length < 0)   /* '\0' was not included in the length */
 438                value_length = nscan;
 439        value = kmalloc(value_length + 1, GFP_KERNEL | __GFP_NORETRY);
 440        if (!value)
 441                return NULL;
 442        if (value_length > 0)
 443                memcpy(value, pscan, value_length);
 444        ((u8 *)(value))[value_length] = '\0';
 445        return value;
 446}
 447
 448struct visor_busdev {
 449        u32 bus_no;
 450        u32 dev_no;
 451};
 452
 453static int match_visorbus_dev_by_id(struct device *dev, void *data)
 454{
 455        struct visor_device *vdev = to_visor_device(dev);
 456        struct visor_busdev *id = data;
 457        u32 bus_no = id->bus_no;
 458        u32 dev_no = id->dev_no;
 459
 460        if ((vdev->chipset_bus_no == bus_no) &&
 461            (vdev->chipset_dev_no == dev_no))
 462                return 1;
 463
 464        return 0;
 465}
 466
 467struct visor_device *visorbus_get_device_by_id(u32 bus_no, u32 dev_no,
 468                                               struct visor_device *from)
 469{
 470        struct device *dev;
 471        struct device *dev_start = NULL;
 472        struct visor_device *vdev = NULL;
 473        struct visor_busdev id = {
 474                        .bus_no = bus_no,
 475                        .dev_no = dev_no
 476                };
 477
 478        if (from)
 479                dev_start = &from->device;
 480        dev = bus_find_device(&visorbus_type, dev_start, (void *)&id,
 481                              match_visorbus_dev_by_id);
 482        if (dev)
 483                vdev = to_visor_device(dev);
 484        return vdev;
 485}
 486
 487static void
 488controlvm_init_response(struct controlvm_message *msg,
 489                        struct controlvm_message_header *msg_hdr, int response)
 490{
 491        memset(msg, 0, sizeof(struct controlvm_message));
 492        memcpy(&msg->hdr, msg_hdr, sizeof(struct controlvm_message_header));
 493        msg->hdr.payload_bytes = 0;
 494        msg->hdr.payload_vm_offset = 0;
 495        msg->hdr.payload_max_bytes = 0;
 496        if (response < 0) {
 497                msg->hdr.flags.failed = 1;
 498                msg->hdr.completion_status = (u32)(-response);
 499        }
 500}
 501
 502static void
 503controlvm_respond_chipset_init(struct controlvm_message_header *msg_hdr,
 504                               int response,
 505                               enum ultra_chipset_feature features)
 506{
 507        struct controlvm_message outmsg;
 508
 509        controlvm_init_response(&outmsg, msg_hdr, response);
 510        outmsg.cmd.init_chipset.features = features;
 511        if (visorchannel_signalinsert(controlvm_channel,
 512                                      CONTROLVM_QUEUE_REQUEST, &outmsg)) {
 513                return;
 514        }
 515}
 516
 517static void
 518chipset_init(struct controlvm_message *inmsg)
 519{
 520        static int chipset_inited;
 521        enum ultra_chipset_feature features = 0;
 522        int rc = CONTROLVM_RESP_SUCCESS;
 523
 524        POSTCODE_LINUX_2(CHIPSET_INIT_ENTRY_PC, POSTCODE_SEVERITY_INFO);
 525        if (chipset_inited) {
 526                rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
 527                goto out_respond;
 528        }
 529        chipset_inited = 1;
 530        POSTCODE_LINUX_2(CHIPSET_INIT_EXIT_PC, POSTCODE_SEVERITY_INFO);
 531
 532        /*
 533         * Set features to indicate we support parahotplug (if Command
 534         * also supports it).
 535         */
 536        features =
 537            inmsg->cmd.init_chipset.
 538            features & ULTRA_CHIPSET_FEATURE_PARA_HOTPLUG;
 539
 540        /*
 541         * Set the "reply" bit so Command knows this is a
 542         * features-aware driver.
 543         */
 544        features |= ULTRA_CHIPSET_FEATURE_REPLY;
 545
 546out_respond:
 547        if (inmsg->hdr.flags.response_expected)
 548                controlvm_respond_chipset_init(&inmsg->hdr, rc, features);
 549}
 550
 551static void
 552controlvm_respond(struct controlvm_message_header *msg_hdr, int response)
 553{
 554        struct controlvm_message outmsg;
 555
 556        controlvm_init_response(&outmsg, msg_hdr, response);
 557        if (outmsg.hdr.flags.test_message == 1)
 558                return;
 559
 560        if (visorchannel_signalinsert(controlvm_channel,
 561                                      CONTROLVM_QUEUE_REQUEST, &outmsg)) {
 562                return;
 563        }
 564}
 565
 566static void controlvm_respond_physdev_changestate(
 567                struct controlvm_message_header *msg_hdr, int response,
 568                struct spar_segment_state state)
 569{
 570        struct controlvm_message outmsg;
 571
 572        controlvm_init_response(&outmsg, msg_hdr, response);
 573        outmsg.cmd.device_change_state.state = state;
 574        outmsg.cmd.device_change_state.flags.phys_device = 1;
 575        if (visorchannel_signalinsert(controlvm_channel,
 576                                      CONTROLVM_QUEUE_REQUEST, &outmsg)) {
 577                return;
 578        }
 579}
 580
 581enum crash_obj_type {
 582        CRASH_DEV,
 583        CRASH_BUS,
 584};
 585
 586static void
 587save_crash_message(struct controlvm_message *msg, enum crash_obj_type typ)
 588{
 589        u32 local_crash_msg_offset;
 590        u16 local_crash_msg_count;
 591
 592        if (visorchannel_read(controlvm_channel,
 593                              offsetof(struct spar_controlvm_channel_protocol,
 594                                       saved_crash_message_count),
 595                              &local_crash_msg_count, sizeof(u16)) < 0) {
 596                POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
 597                                 POSTCODE_SEVERITY_ERR);
 598                return;
 599        }
 600
 601        if (local_crash_msg_count != CONTROLVM_CRASHMSG_MAX) {
 602                POSTCODE_LINUX_3(CRASH_DEV_COUNT_FAILURE_PC,
 603                                 local_crash_msg_count,
 604                                 POSTCODE_SEVERITY_ERR);
 605                return;
 606        }
 607
 608        if (visorchannel_read(controlvm_channel,
 609                              offsetof(struct spar_controlvm_channel_protocol,
 610                                       saved_crash_message_offset),
 611                              &local_crash_msg_offset, sizeof(u32)) < 0) {
 612                POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
 613                                 POSTCODE_SEVERITY_ERR);
 614                return;
 615        }
 616
 617        if (typ == CRASH_BUS) {
 618                if (visorchannel_write(controlvm_channel,
 619                                       local_crash_msg_offset,
 620                                       msg,
 621                                       sizeof(struct controlvm_message)) < 0) {
 622                        POSTCODE_LINUX_2(SAVE_MSG_BUS_FAILURE_PC,
 623                                         POSTCODE_SEVERITY_ERR);
 624                        return;
 625                }
 626        } else {
 627                local_crash_msg_offset += sizeof(struct controlvm_message);
 628                if (visorchannel_write(controlvm_channel,
 629                                       local_crash_msg_offset,
 630                                       msg,
 631                                       sizeof(struct controlvm_message)) < 0) {
 632                        POSTCODE_LINUX_2(SAVE_MSG_DEV_FAILURE_PC,
 633                                         POSTCODE_SEVERITY_ERR);
 634                        return;
 635                }
 636        }
 637}
 638
 639static void
 640bus_responder(enum controlvm_id cmd_id,
 641              struct controlvm_message_header *pending_msg_hdr,
 642              int response)
 643{
 644        if (!pending_msg_hdr)
 645                return;         /* no controlvm response needed */
 646
 647        if (pending_msg_hdr->id != (u32)cmd_id)
 648                return;
 649
 650        controlvm_respond(pending_msg_hdr, response);
 651}
 652
 653static void
 654device_changestate_responder(enum controlvm_id cmd_id,
 655                             struct visor_device *p, int response,
 656                             struct spar_segment_state response_state)
 657{
 658        struct controlvm_message outmsg;
 659        u32 bus_no = p->chipset_bus_no;
 660        u32 dev_no = p->chipset_dev_no;
 661
 662        if (!p->pending_msg_hdr)
 663                return;         /* no controlvm response needed */
 664        if (p->pending_msg_hdr->id != cmd_id)
 665                return;
 666
 667        controlvm_init_response(&outmsg, p->pending_msg_hdr, response);
 668
 669        outmsg.cmd.device_change_state.bus_no = bus_no;
 670        outmsg.cmd.device_change_state.dev_no = dev_no;
 671        outmsg.cmd.device_change_state.state = response_state;
 672
 673        if (visorchannel_signalinsert(controlvm_channel,
 674                                      CONTROLVM_QUEUE_REQUEST, &outmsg))
 675                return;
 676}
 677
 678static void
 679device_responder(enum controlvm_id cmd_id,
 680                 struct controlvm_message_header *pending_msg_hdr,
 681                 int response)
 682{
 683        if (!pending_msg_hdr)
 684                return;         /* no controlvm response needed */
 685
 686        if (pending_msg_hdr->id != (u32)cmd_id)
 687                return;
 688
 689        controlvm_respond(pending_msg_hdr, response);
 690}
 691
 692static void
 693bus_epilog(struct visor_device *bus_info,
 694           u32 cmd, struct controlvm_message_header *msg_hdr,
 695           int response, bool need_response)
 696{
 697        struct controlvm_message_header *pmsg_hdr = NULL;
 698
 699        if (!bus_info) {
 700                /*
 701                 * relying on a valid passed in response code
 702                 * be lazy and re-use msg_hdr for this failure, is this ok??
 703                 */
 704                pmsg_hdr = msg_hdr;
 705                goto out_respond;
 706        }
 707
 708        if (bus_info->pending_msg_hdr) {
 709                /* only non-NULL if dev is still waiting on a response */
 710                response = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT;
 711                pmsg_hdr = bus_info->pending_msg_hdr;
 712                goto out_respond;
 713        }
 714
 715        if (need_response) {
 716                pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
 717                if (!pmsg_hdr) {
 718                        POSTCODE_LINUX_4(MALLOC_FAILURE_PC, cmd,
 719                                         bus_info->chipset_bus_no,
 720                                         POSTCODE_SEVERITY_ERR);
 721                        return;
 722                }
 723
 724                memcpy(pmsg_hdr, msg_hdr,
 725                       sizeof(struct controlvm_message_header));
 726                bus_info->pending_msg_hdr = pmsg_hdr;
 727        }
 728
 729        if (response == CONTROLVM_RESP_SUCCESS) {
 730                switch (cmd) {
 731                case CONTROLVM_BUS_CREATE:
 732                        chipset_bus_create(bus_info);
 733                        break;
 734                case CONTROLVM_BUS_DESTROY:
 735                        chipset_bus_destroy(bus_info);
 736                        break;
 737                }
 738        }
 739
 740out_respond:
 741        bus_responder(cmd, pmsg_hdr, response);
 742}
 743
 744static void
 745device_epilog(struct visor_device *dev_info,
 746              struct spar_segment_state state, u32 cmd,
 747              struct controlvm_message_header *msg_hdr, int response,
 748              bool need_response, bool for_visorbus)
 749{
 750        struct controlvm_message_header *pmsg_hdr = NULL;
 751
 752        if (!dev_info) {
 753                /*
 754                 * relying on a valid passed in response code
 755                 * be lazy and re-use msg_hdr for this failure, is this ok??
 756                 */
 757                pmsg_hdr = msg_hdr;
 758                goto out_respond;
 759        }
 760
 761        if (dev_info->pending_msg_hdr) {
 762                /* only non-NULL if dev is still waiting on a response */
 763                response = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT;
 764                pmsg_hdr = dev_info->pending_msg_hdr;
 765                goto out_respond;
 766        }
 767
 768        if (need_response) {
 769                pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
 770                if (!pmsg_hdr) {
 771                        response = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
 772                        goto out_respond;
 773                }
 774
 775                memcpy(pmsg_hdr, msg_hdr,
 776                       sizeof(struct controlvm_message_header));
 777                dev_info->pending_msg_hdr = pmsg_hdr;
 778        }
 779
 780        if (response >= 0) {
 781                switch (cmd) {
 782                case CONTROLVM_DEVICE_CREATE:
 783                        chipset_device_create(dev_info);
 784                        break;
 785                case CONTROLVM_DEVICE_CHANGESTATE:
 786                        /* ServerReady / ServerRunning / SegmentStateRunning */
 787                        if (state.alive == segment_state_running.alive &&
 788                            state.operating ==
 789                                segment_state_running.operating) {
 790                                chipset_device_resume(dev_info);
 791                        }
 792                        /* ServerNotReady / ServerLost / SegmentStateStandby */
 793                        else if (state.alive == segment_state_standby.alive &&
 794                                 state.operating ==
 795                                 segment_state_standby.operating) {
 796                                /*
 797                                 * technically this is standby case
 798                                 * where server is lost
 799                                 */
 800                                chipset_device_pause(dev_info);
 801                        }
 802                        break;
 803                case CONTROLVM_DEVICE_DESTROY:
 804                        chipset_device_destroy(dev_info);
 805                        break;
 806                }
 807        }
 808
 809out_respond:
 810        device_responder(cmd, pmsg_hdr, response);
 811}
 812
 813static void
 814bus_create(struct controlvm_message *inmsg)
 815{
 816        struct controlvm_message_packet *cmd = &inmsg->cmd;
 817        u32 bus_no = cmd->create_bus.bus_no;
 818        int rc = CONTROLVM_RESP_SUCCESS;
 819        struct visor_device *bus_info;
 820        struct visorchannel *visorchannel;
 821
 822        bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
 823        if (bus_info && (bus_info->state.created == 1)) {
 824                POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no,
 825                                 POSTCODE_SEVERITY_ERR);
 826                rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
 827                goto out_bus_epilog;
 828        }
 829        bus_info = kzalloc(sizeof(*bus_info), GFP_KERNEL);
 830        if (!bus_info) {
 831                POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no,
 832                                 POSTCODE_SEVERITY_ERR);
 833                rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
 834                goto out_bus_epilog;
 835        }
 836
 837        INIT_LIST_HEAD(&bus_info->list_all);
 838        bus_info->chipset_bus_no = bus_no;
 839        bus_info->chipset_dev_no = BUS_ROOT_DEVICE;
 840
 841        POSTCODE_LINUX_3(BUS_CREATE_ENTRY_PC, bus_no, POSTCODE_SEVERITY_INFO);
 842
 843        visorchannel = visorchannel_create(cmd->create_bus.channel_addr,
 844                                           cmd->create_bus.channel_bytes,
 845                                           GFP_KERNEL,
 846                                           cmd->create_bus.bus_data_type_uuid);
 847
 848        if (!visorchannel) {
 849                POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no,
 850                                 POSTCODE_SEVERITY_ERR);
 851                rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
 852                kfree(bus_info);
 853                bus_info = NULL;
 854                goto out_bus_epilog;
 855        }
 856        bus_info->visorchannel = visorchannel;
 857        if (uuid_le_cmp(cmd->create_bus.bus_inst_uuid, spar_siovm_uuid) == 0)
 858                save_crash_message(inmsg, CRASH_BUS);
 859
 860        POSTCODE_LINUX_3(BUS_CREATE_EXIT_PC, bus_no, POSTCODE_SEVERITY_INFO);
 861
 862out_bus_epilog:
 863        bus_epilog(bus_info, CONTROLVM_BUS_CREATE, &inmsg->hdr,
 864                   rc, inmsg->hdr.flags.response_expected == 1);
 865}
 866
 867static void
 868bus_destroy(struct controlvm_message *inmsg)
 869{
 870        struct controlvm_message_packet *cmd = &inmsg->cmd;
 871        u32 bus_no = cmd->destroy_bus.bus_no;
 872        struct visor_device *bus_info;
 873        int rc = CONTROLVM_RESP_SUCCESS;
 874
 875        bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
 876        if (!bus_info)
 877                rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
 878        else if (bus_info->state.created == 0)
 879                rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
 880
 881        bus_epilog(bus_info, CONTROLVM_BUS_DESTROY, &inmsg->hdr,
 882                   rc, inmsg->hdr.flags.response_expected == 1);
 883
 884        /* bus_info is freed as part of the busdevice_release function */
 885}
 886
 887static void
 888bus_configure(struct controlvm_message *inmsg,
 889              struct parser_context *parser_ctx)
 890{
 891        struct controlvm_message_packet *cmd = &inmsg->cmd;
 892        u32 bus_no;
 893        struct visor_device *bus_info;
 894        int rc = CONTROLVM_RESP_SUCCESS;
 895
 896        bus_no = cmd->configure_bus.bus_no;
 897        POSTCODE_LINUX_3(BUS_CONFIGURE_ENTRY_PC, bus_no,
 898                         POSTCODE_SEVERITY_INFO);
 899
 900        bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
 901        if (!bus_info) {
 902                POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no,
 903                                 POSTCODE_SEVERITY_ERR);
 904                rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
 905        } else if (bus_info->state.created == 0) {
 906                POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no,
 907                                 POSTCODE_SEVERITY_ERR);
 908                rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
 909        } else if (bus_info->pending_msg_hdr) {
 910                POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no,
 911                                 POSTCODE_SEVERITY_ERR);
 912                rc = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT;
 913        } else {
 914                visorchannel_set_clientpartition
 915                        (bus_info->visorchannel,
 916                         cmd->configure_bus.guest_handle);
 917                bus_info->partition_uuid = parser_id_get(parser_ctx);
 918                parser_param_start(parser_ctx, PARSERSTRING_NAME);
 919                bus_info->name = parser_string_get(parser_ctx);
 920
 921                POSTCODE_LINUX_3(BUS_CONFIGURE_EXIT_PC, bus_no,
 922                                 POSTCODE_SEVERITY_INFO);
 923        }
 924        bus_epilog(bus_info, CONTROLVM_BUS_CONFIGURE, &inmsg->hdr,
 925                   rc, inmsg->hdr.flags.response_expected == 1);
 926}
 927
 928static void
 929my_device_create(struct controlvm_message *inmsg)
 930{
 931        struct controlvm_message_packet *cmd = &inmsg->cmd;
 932        u32 bus_no = cmd->create_device.bus_no;
 933        u32 dev_no = cmd->create_device.dev_no;
 934        struct visor_device *dev_info = NULL;
 935        struct visor_device *bus_info;
 936        struct visorchannel *visorchannel;
 937        int rc = CONTROLVM_RESP_SUCCESS;
 938
 939        bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
 940        if (!bus_info) {
 941                POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
 942                                 POSTCODE_SEVERITY_ERR);
 943                rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
 944                goto out_respond;
 945        }
 946
 947        if (bus_info->state.created == 0) {
 948                POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
 949                                 POSTCODE_SEVERITY_ERR);
 950                rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
 951                goto out_respond;
 952        }
 953
 954        dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL);
 955        if (dev_info && (dev_info->state.created == 1)) {
 956                POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
 957                                 POSTCODE_SEVERITY_ERR);
 958                rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
 959                goto out_respond;
 960        }
 961
 962        dev_info = kzalloc(sizeof(*dev_info), GFP_KERNEL);
 963        if (!dev_info) {
 964                POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
 965                                 POSTCODE_SEVERITY_ERR);
 966                rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
 967                goto out_respond;
 968        }
 969
 970        dev_info->chipset_bus_no = bus_no;
 971        dev_info->chipset_dev_no = dev_no;
 972        dev_info->inst = cmd->create_device.dev_inst_uuid;
 973
 974        /* not sure where the best place to set the 'parent' */
 975        dev_info->device.parent = &bus_info->device;
 976
 977        POSTCODE_LINUX_4(DEVICE_CREATE_ENTRY_PC, dev_no, bus_no,
 978                         POSTCODE_SEVERITY_INFO);
 979
 980        visorchannel =
 981               visorchannel_create_with_lock(cmd->create_device.channel_addr,
 982                                             cmd->create_device.channel_bytes,
 983                                             GFP_KERNEL,
 984                                             cmd->create_device.data_type_uuid);
 985
 986        if (!visorchannel) {
 987                POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
 988                                 POSTCODE_SEVERITY_ERR);
 989                rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
 990                kfree(dev_info);
 991                dev_info = NULL;
 992                goto out_respond;
 993        }
 994        dev_info->visorchannel = visorchannel;
 995        dev_info->channel_type_guid = cmd->create_device.data_type_uuid;
 996        if (uuid_le_cmp(cmd->create_device.data_type_uuid,
 997                        spar_vhba_channel_protocol_uuid) == 0)
 998                save_crash_message(inmsg, CRASH_DEV);
 999
1000        POSTCODE_LINUX_4(DEVICE_CREATE_EXIT_PC, dev_no, bus_no,
1001                         POSTCODE_SEVERITY_INFO);
1002out_respond:
1003        device_epilog(dev_info, segment_state_running,
1004                      CONTROLVM_DEVICE_CREATE, &inmsg->hdr, rc,
1005                      inmsg->hdr.flags.response_expected == 1, 1);
1006}
1007
1008static void
1009my_device_changestate(struct controlvm_message *inmsg)
1010{
1011        struct controlvm_message_packet *cmd = &inmsg->cmd;
1012        u32 bus_no = cmd->device_change_state.bus_no;
1013        u32 dev_no = cmd->device_change_state.dev_no;
1014        struct spar_segment_state state = cmd->device_change_state.state;
1015        struct visor_device *dev_info;
1016        int rc = CONTROLVM_RESP_SUCCESS;
1017
1018        dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL);
1019        if (!dev_info) {
1020                POSTCODE_LINUX_4(DEVICE_CHANGESTATE_FAILURE_PC, dev_no, bus_no,
1021                                 POSTCODE_SEVERITY_ERR);
1022                rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
1023        } else if (dev_info->state.created == 0) {
1024                POSTCODE_LINUX_4(DEVICE_CHANGESTATE_FAILURE_PC, dev_no, bus_no,
1025                                 POSTCODE_SEVERITY_ERR);
1026                rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
1027        }
1028        if ((rc >= CONTROLVM_RESP_SUCCESS) && dev_info)
1029                device_epilog(dev_info, state,
1030                              CONTROLVM_DEVICE_CHANGESTATE, &inmsg->hdr, rc,
1031                              inmsg->hdr.flags.response_expected == 1, 1);
1032}
1033
1034static void
1035my_device_destroy(struct controlvm_message *inmsg)
1036{
1037        struct controlvm_message_packet *cmd = &inmsg->cmd;
1038        u32 bus_no = cmd->destroy_device.bus_no;
1039        u32 dev_no = cmd->destroy_device.dev_no;
1040        struct visor_device *dev_info;
1041        int rc = CONTROLVM_RESP_SUCCESS;
1042
1043        dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL);
1044        if (!dev_info)
1045                rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
1046        else if (dev_info->state.created == 0)
1047                rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
1048
1049        if ((rc >= CONTROLVM_RESP_SUCCESS) && dev_info)
1050                device_epilog(dev_info, segment_state_running,
1051                              CONTROLVM_DEVICE_DESTROY, &inmsg->hdr, rc,
1052                              inmsg->hdr.flags.response_expected == 1, 1);
1053}
1054
1055/**
1056 * initialize_controlvm_payload_info() - init controlvm_payload_info struct
1057 * @phys_addr: the physical address of controlvm channel
1058 * @offset:    the offset to payload
1059 * @bytes:     the size of the payload in bytes
1060 * @info:      the returning valid struct
1061 *
1062 * When provided with the physical address of the controlvm channel
1063 * (phys_addr), the offset to the payload area we need to manage
1064 * (offset), and the size of this payload area (bytes), fills in the
1065 * controlvm_payload_info struct.
1066 *
1067 * Return: CONTROLVM_RESP_SUCCESS for success or a negative for failure
1068 */
1069static int
1070initialize_controlvm_payload_info(u64 phys_addr, u64 offset, u32 bytes,
1071                                  struct visor_controlvm_payload_info *info)
1072{
1073        u8 *payload = NULL;
1074
1075        if (!info)
1076                return -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
1077
1078        memset(info, 0, sizeof(struct visor_controlvm_payload_info));
1079        if ((offset == 0) || (bytes == 0))
1080                return -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
1081
1082        payload = memremap(phys_addr + offset, bytes, MEMREMAP_WB);
1083        if (!payload)
1084                return -CONTROLVM_RESP_ERROR_IOREMAP_FAILED;
1085
1086        info->offset = offset;
1087        info->bytes = bytes;
1088        info->ptr = payload;
1089
1090        return CONTROLVM_RESP_SUCCESS;
1091}
1092
1093static void
1094destroy_controlvm_payload_info(struct visor_controlvm_payload_info *info)
1095{
1096        if (info->ptr) {
1097                memunmap(info->ptr);
1098                info->ptr = NULL;
1099        }
1100        memset(info, 0, sizeof(struct visor_controlvm_payload_info));
1101}
1102
1103static void
1104initialize_controlvm_payload(void)
1105{
1106        u64 phys_addr = visorchannel_get_physaddr(controlvm_channel);
1107        u64 payload_offset = 0;
1108        u32 payload_bytes = 0;
1109
1110        if (visorchannel_read(controlvm_channel,
1111                              offsetof(struct spar_controlvm_channel_protocol,
1112                                       request_payload_offset),
1113                              &payload_offset, sizeof(payload_offset)) < 0) {
1114                POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC,
1115                                 POSTCODE_SEVERITY_ERR);
1116                return;
1117        }
1118        if (visorchannel_read(controlvm_channel,
1119                              offsetof(struct spar_controlvm_channel_protocol,
1120                                       request_payload_bytes),
1121                              &payload_bytes, sizeof(payload_bytes)) < 0) {
1122                POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC,
1123                                 POSTCODE_SEVERITY_ERR);
1124                return;
1125        }
1126        initialize_controlvm_payload_info(phys_addr,
1127                                          payload_offset, payload_bytes,
1128                                          &controlvm_payload_info);
1129}
1130
1131/*
1132 * The general parahotplug flow works as follows. The visorchipset
1133 * driver receives a DEVICE_CHANGESTATE message from Command
1134 * specifying a physical device to enable or disable. The CONTROLVM
1135 * message handler calls parahotplug_process_message, which then adds
1136 * the message to a global list and kicks off a udev event which
1137 * causes a user level script to enable or disable the specified
1138 * device. The udev script then writes to
1139 * /proc/visorchipset/parahotplug, which causes parahotplug_proc_write
1140 * to get called, at which point the appropriate CONTROLVM message is
1141 * retrieved from the list and responded to.
1142 */
1143
1144#define PARAHOTPLUG_TIMEOUT_MS 2000
1145
1146/**
1147 * parahotplug_next_id() - generate unique int to match an outstanding CONTROLVM
1148 *                         message with a udev script /proc response
1149 *
1150 * Return: a unique integer value
1151 */
1152static int
1153parahotplug_next_id(void)
1154{
1155        static atomic_t id = ATOMIC_INIT(0);
1156
1157        return atomic_inc_return(&id);
1158}
1159
1160/**
1161 * parahotplug_next_expiration() - returns the time (in jiffies) when a
1162 *                                 CONTROLVM message on the list should expire
1163 *                                 -- PARAHOTPLUG_TIMEOUT_MS in the future
1164 *
1165 * Return: expected expiration time (in jiffies)
1166 */
1167static unsigned long
1168parahotplug_next_expiration(void)
1169{
1170        return jiffies + msecs_to_jiffies(PARAHOTPLUG_TIMEOUT_MS);
1171}
1172
1173/**
1174 * parahotplug_request_create() - create a parahotplug_request, which is
1175 *                                basically a wrapper for a CONTROLVM_MESSAGE
1176 *                                that we can stick on a list
1177 * @msg: the message to insert in the request
1178 *
1179 * Return: the request containing the provided message
1180 */
1181static struct parahotplug_request *
1182parahotplug_request_create(struct controlvm_message *msg)
1183{
1184        struct parahotplug_request *req;
1185
1186        req = kmalloc(sizeof(*req), GFP_KERNEL | __GFP_NORETRY);
1187        if (!req)
1188                return NULL;
1189
1190        req->id = parahotplug_next_id();
1191        req->expiration = parahotplug_next_expiration();
1192        req->msg = *msg;
1193
1194        return req;
1195}
1196
1197/**
1198 * parahotplug_request_destroy() - free a parahotplug_request
1199 * @req: the request to deallocate
1200 */
1201static void
1202parahotplug_request_destroy(struct parahotplug_request *req)
1203{
1204        kfree(req);
1205}
1206
1207static LIST_HEAD(parahotplug_request_list);
1208static DEFINE_SPINLOCK(parahotplug_request_list_lock);  /* lock for above */
1209
1210/**
1211 * parahotplug_request_complete() - mark request as complete
1212 * @id:     the id of the request
1213 * @active: indicates whether the request is assigned to active partition
1214 *
1215 * Called from the /proc handler, which means the user script has
1216 * finished the enable/disable. Find the matching identifier, and
1217 * respond to the CONTROLVM message with success.
1218 *
1219 * Return: 0 on success or -EINVAL on failure
1220 */
1221static int
1222parahotplug_request_complete(int id, u16 active)
1223{
1224        struct list_head *pos;
1225        struct list_head *tmp;
1226
1227        spin_lock(&parahotplug_request_list_lock);
1228
1229        /* Look for a request matching "id". */
1230        list_for_each_safe(pos, tmp, &parahotplug_request_list) {
1231                struct parahotplug_request *req =
1232                    list_entry(pos, struct parahotplug_request, list);
1233                if (req->id == id) {
1234                        /*
1235                         * Found a match. Remove it from the list and
1236                         * respond.
1237                         */
1238                        list_del(pos);
1239                        spin_unlock(&parahotplug_request_list_lock);
1240                        req->msg.cmd.device_change_state.state.active = active;
1241                        if (req->msg.hdr.flags.response_expected)
1242                                controlvm_respond_physdev_changestate(
1243                                        &req->msg.hdr, CONTROLVM_RESP_SUCCESS,
1244                                        req->msg.cmd.device_change_state.state);
1245                        parahotplug_request_destroy(req);
1246                        return 0;
1247                }
1248        }
1249
1250        spin_unlock(&parahotplug_request_list_lock);
1251        return -EINVAL;
1252}
1253
1254/**
1255 * devicedisabled_store() - disables the hotplug device
1256 * @dev:   sysfs interface variable not utilized in this function
1257 * @attr:  sysfs interface variable not utilized in this function
1258 * @buf:   buffer containing the device id
1259 * @count: the size of the buffer
1260 *
1261 * The parahotplug/devicedisabled interface gets called by our support script
1262 * when an SR-IOV device has been shut down. The ID is passed to the script
1263 * and then passed back when the device has been removed.
1264 *
1265 * Return: the size of the buffer for success or negative for error
1266 */
1267static ssize_t devicedisabled_store(struct device *dev,
1268                                    struct device_attribute *attr,
1269                                    const char *buf, size_t count)
1270{
1271        unsigned int id;
1272        int err;
1273
1274        if (kstrtouint(buf, 10, &id))
1275                return -EINVAL;
1276
1277        err = parahotplug_request_complete(id, 0);
1278        if (err < 0)
1279                return err;
1280        return count;
1281}
1282static DEVICE_ATTR_WO(devicedisabled);
1283
1284/**
1285 * deviceenabled_store() - enables the hotplug device
1286 * @dev:   sysfs interface variable not utilized in this function
1287 * @attr:  sysfs interface variable not utilized in this function
1288 * @buf:   buffer containing the device id
1289 * @count: the size of the buffer
1290 *
1291 * The parahotplug/deviceenabled interface gets called by our support script
1292 * when an SR-IOV device has been recovered. The ID is passed to the script
1293 * and then passed back when the device has been brought back up.
1294 *
1295 * Return: the size of the buffer for success or negative for error
1296 */
1297static ssize_t deviceenabled_store(struct device *dev,
1298                                   struct device_attribute *attr,
1299                                   const char *buf, size_t count)
1300{
1301        unsigned int id;
1302
1303        if (kstrtouint(buf, 10, &id))
1304                return -EINVAL;
1305
1306        parahotplug_request_complete(id, 1);
1307        return count;
1308}
1309static DEVICE_ATTR_WO(deviceenabled);
1310
1311static struct attribute *visorchipset_install_attrs[] = {
1312        &dev_attr_toolaction.attr,
1313        &dev_attr_boottotool.attr,
1314        &dev_attr_error.attr,
1315        &dev_attr_textid.attr,
1316        &dev_attr_remaining_steps.attr,
1317        NULL
1318};
1319
1320static struct attribute_group visorchipset_install_group = {
1321        .name = "install",
1322        .attrs = visorchipset_install_attrs
1323};
1324
1325static struct attribute *visorchipset_parahotplug_attrs[] = {
1326        &dev_attr_devicedisabled.attr,
1327        &dev_attr_deviceenabled.attr,
1328        NULL
1329};
1330
1331static struct attribute_group visorchipset_parahotplug_group = {
1332        .name = "parahotplug",
1333        .attrs = visorchipset_parahotplug_attrs
1334};
1335
1336static const struct attribute_group *visorchipset_dev_groups[] = {
1337        &visorchipset_install_group,
1338        &visorchipset_parahotplug_group,
1339        NULL
1340};
1341
1342static void visorchipset_dev_release(struct device *dev)
1343{
1344}
1345
1346/* /sys/devices/platform/visorchipset */
1347static struct platform_device visorchipset_platform_device = {
1348        .name = "visorchipset",
1349        .id = -1,
1350        .dev.groups = visorchipset_dev_groups,
1351        .dev.release = visorchipset_dev_release,
1352};
1353
1354/**
1355 * parahotplug_request_kickoff() - initiate parahotplug request
1356 * @req: the request to initiate
1357 *
1358 * Cause uevent to run the user level script to do the disable/enable specified
1359 * in the parahotplug_request.
1360 */
1361static void
1362parahotplug_request_kickoff(struct parahotplug_request *req)
1363{
1364        struct controlvm_message_packet *cmd = &req->msg.cmd;
1365        char env_cmd[40], env_id[40], env_state[40], env_bus[40], env_dev[40],
1366            env_func[40];
1367        char *envp[] = {
1368                env_cmd, env_id, env_state, env_bus, env_dev, env_func, NULL
1369        };
1370
1371        sprintf(env_cmd, "SPAR_PARAHOTPLUG=1");
1372        sprintf(env_id, "SPAR_PARAHOTPLUG_ID=%d", req->id);
1373        sprintf(env_state, "SPAR_PARAHOTPLUG_STATE=%d",
1374                cmd->device_change_state.state.active);
1375        sprintf(env_bus, "SPAR_PARAHOTPLUG_BUS=%d",
1376                cmd->device_change_state.bus_no);
1377        sprintf(env_dev, "SPAR_PARAHOTPLUG_DEVICE=%d",
1378                cmd->device_change_state.dev_no >> 3);
1379        sprintf(env_func, "SPAR_PARAHOTPLUG_FUNCTION=%d",
1380                cmd->device_change_state.dev_no & 0x7);
1381
1382        kobject_uevent_env(&visorchipset_platform_device.dev.kobj, KOBJ_CHANGE,
1383                           envp);
1384}
1385
1386/**
1387 * parahotplug_process_message() - enables or disables a PCI device by kicking
1388 *                                 off a udev script
1389 * @inmsg: the message indicating whether to enable or disable
1390 */
1391static void
1392parahotplug_process_message(struct controlvm_message *inmsg)
1393{
1394        struct parahotplug_request *req;
1395
1396        req = parahotplug_request_create(inmsg);
1397
1398        if (!req)
1399                return;
1400
1401        if (inmsg->cmd.device_change_state.state.active) {
1402                /*
1403                 * For enable messages, just respond with success
1404                 * right away. This is a bit of a hack, but there are
1405                 * issues with the early enable messages we get (with
1406                 * either the udev script not detecting that the device
1407                 * is up, or not getting called at all). Fortunately
1408                 * the messages that get lost don't matter anyway, as
1409                 *
1410                 * devices are automatically enabled at
1411                 * initialization.
1412                */
1413                parahotplug_request_kickoff(req);
1414                controlvm_respond_physdev_changestate
1415                        (&inmsg->hdr,
1416                         CONTROLVM_RESP_SUCCESS,
1417                         inmsg->cmd.device_change_state.state);
1418                parahotplug_request_destroy(req);
1419        } else {
1420                /*
1421                 * For disable messages, add the request to the
1422                 * request list before kicking off the udev script. It
1423                 * won't get responded to until the script has
1424                 * indicated it's done.
1425                 */
1426                spin_lock(&parahotplug_request_list_lock);
1427                list_add_tail(&req->list, &parahotplug_request_list);
1428                spin_unlock(&parahotplug_request_list_lock);
1429
1430                parahotplug_request_kickoff(req);
1431        }
1432}
1433
1434/**
1435 * visorchipset_chipset_ready() - sends chipset_ready action
1436 *
1437 * Send ACTION=online for DEVPATH=/sys/devices/platform/visorchipset.
1438 *
1439 * Return: CONTROLVM_RESP_SUCCESS
1440 */
1441static int
1442visorchipset_chipset_ready(void)
1443{
1444        kobject_uevent(&visorchipset_platform_device.dev.kobj, KOBJ_ONLINE);
1445        return CONTROLVM_RESP_SUCCESS;
1446}
1447
1448static int
1449visorchipset_chipset_selftest(void)
1450{
1451        char env_selftest[20];
1452        char *envp[] = { env_selftest, NULL };
1453
1454        sprintf(env_selftest, "SPARSP_SELFTEST=%d", 1);
1455        kobject_uevent_env(&visorchipset_platform_device.dev.kobj, KOBJ_CHANGE,
1456                           envp);
1457        return CONTROLVM_RESP_SUCCESS;
1458}
1459
1460/**
1461 * visorchipset_chipset_notready() - sends chipset_notready action
1462 *
1463 * Send ACTION=offline for DEVPATH=/sys/devices/platform/visorchipset.
1464 *
1465 * Return: CONTROLVM_RESP_SUCCESS
1466 */
1467static int
1468visorchipset_chipset_notready(void)
1469{
1470        kobject_uevent(&visorchipset_platform_device.dev.kobj, KOBJ_OFFLINE);
1471        return CONTROLVM_RESP_SUCCESS;
1472}
1473
1474static void
1475chipset_ready(struct controlvm_message_header *msg_hdr)
1476{
1477        int rc = visorchipset_chipset_ready();
1478
1479        if (rc != CONTROLVM_RESP_SUCCESS)
1480                rc = -rc;
1481        if (msg_hdr->flags.response_expected)
1482                controlvm_respond(msg_hdr, rc);
1483}
1484
1485static void
1486chipset_selftest(struct controlvm_message_header *msg_hdr)
1487{
1488        int rc = visorchipset_chipset_selftest();
1489
1490        if (rc != CONTROLVM_RESP_SUCCESS)
1491                rc = -rc;
1492        if (msg_hdr->flags.response_expected)
1493                controlvm_respond(msg_hdr, rc);
1494}
1495
1496static void
1497chipset_notready(struct controlvm_message_header *msg_hdr)
1498{
1499        int rc = visorchipset_chipset_notready();
1500
1501        if (rc != CONTROLVM_RESP_SUCCESS)
1502                rc = -rc;
1503        if (msg_hdr->flags.response_expected)
1504                controlvm_respond(msg_hdr, rc);
1505}
1506
1507static inline unsigned int
1508issue_vmcall_io_controlvm_addr(u64 *control_addr, u32 *control_bytes)
1509{
1510        struct vmcall_io_controlvm_addr_params params;
1511        int result = VMCALL_SUCCESS;
1512        u64 physaddr;
1513
1514        physaddr = virt_to_phys(&params);
1515        ISSUE_IO_VMCALL(VMCALL_IO_CONTROLVM_ADDR, physaddr, result);
1516        if (VMCALL_SUCCESSFUL(result)) {
1517                *control_addr = params.address;
1518                *control_bytes = params.channel_bytes;
1519        }
1520        return result;
1521}
1522
1523static u64 controlvm_get_channel_address(void)
1524{
1525        u64 addr = 0;
1526        u32 size = 0;
1527
1528        if (!VMCALL_SUCCESSFUL(issue_vmcall_io_controlvm_addr(&addr, &size)))
1529                return 0;
1530
1531        return addr;
1532}
1533
1534static void
1535setup_crash_devices_work_queue(struct work_struct *work)
1536{
1537        struct controlvm_message local_crash_bus_msg;
1538        struct controlvm_message local_crash_dev_msg;
1539        struct controlvm_message msg;
1540        u32 local_crash_msg_offset;
1541        u16 local_crash_msg_count;
1542
1543        POSTCODE_LINUX_2(CRASH_DEV_ENTRY_PC, POSTCODE_SEVERITY_INFO);
1544
1545        /* send init chipset msg */
1546        msg.hdr.id = CONTROLVM_CHIPSET_INIT;
1547        msg.cmd.init_chipset.bus_count = 23;
1548        msg.cmd.init_chipset.switch_count = 0;
1549
1550        chipset_init(&msg);
1551
1552        /* get saved message count */
1553        if (visorchannel_read(controlvm_channel,
1554                              offsetof(struct spar_controlvm_channel_protocol,
1555                                       saved_crash_message_count),
1556                              &local_crash_msg_count, sizeof(u16)) < 0) {
1557                POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
1558                                 POSTCODE_SEVERITY_ERR);
1559                return;
1560        }
1561
1562        if (local_crash_msg_count != CONTROLVM_CRASHMSG_MAX) {
1563                POSTCODE_LINUX_3(CRASH_DEV_COUNT_FAILURE_PC,
1564                                 local_crash_msg_count,
1565                                 POSTCODE_SEVERITY_ERR);
1566                return;
1567        }
1568
1569        /* get saved crash message offset */
1570        if (visorchannel_read(controlvm_channel,
1571                              offsetof(struct spar_controlvm_channel_protocol,
1572                                       saved_crash_message_offset),
1573                              &local_crash_msg_offset, sizeof(u32)) < 0) {
1574                POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
1575                                 POSTCODE_SEVERITY_ERR);
1576                return;
1577        }
1578
1579        /* read create device message for storage bus offset */
1580        if (visorchannel_read(controlvm_channel,
1581                              local_crash_msg_offset,
1582                              &local_crash_bus_msg,
1583                              sizeof(struct controlvm_message)) < 0) {
1584                POSTCODE_LINUX_2(CRASH_DEV_RD_BUS_FAIULRE_PC,
1585                                 POSTCODE_SEVERITY_ERR);
1586                return;
1587        }
1588
1589        /* read create device message for storage device */
1590        if (visorchannel_read(controlvm_channel,
1591                              local_crash_msg_offset +
1592                              sizeof(struct controlvm_message),
1593                              &local_crash_dev_msg,
1594                              sizeof(struct controlvm_message)) < 0) {
1595                POSTCODE_LINUX_2(CRASH_DEV_RD_DEV_FAIULRE_PC,
1596                                 POSTCODE_SEVERITY_ERR);
1597                return;
1598        }
1599
1600        /* reuse IOVM create bus message */
1601        if (local_crash_bus_msg.cmd.create_bus.channel_addr) {
1602                bus_create(&local_crash_bus_msg);
1603        } else {
1604                POSTCODE_LINUX_2(CRASH_DEV_BUS_NULL_FAILURE_PC,
1605                                 POSTCODE_SEVERITY_ERR);
1606                return;
1607        }
1608
1609        /* reuse create device message for storage device */
1610        if (local_crash_dev_msg.cmd.create_device.channel_addr) {
1611                my_device_create(&local_crash_dev_msg);
1612        } else {
1613                POSTCODE_LINUX_2(CRASH_DEV_DEV_NULL_FAILURE_PC,
1614                                 POSTCODE_SEVERITY_ERR);
1615                return;
1616        }
1617        POSTCODE_LINUX_2(CRASH_DEV_EXIT_PC, POSTCODE_SEVERITY_INFO);
1618}
1619
1620void
1621bus_create_response(struct visor_device *bus_info, int response)
1622{
1623        if (response >= 0)
1624                bus_info->state.created = 1;
1625
1626        bus_responder(CONTROLVM_BUS_CREATE, bus_info->pending_msg_hdr,
1627                      response);
1628
1629        kfree(bus_info->pending_msg_hdr);
1630        bus_info->pending_msg_hdr = NULL;
1631}
1632
1633void
1634bus_destroy_response(struct visor_device *bus_info, int response)
1635{
1636        bus_responder(CONTROLVM_BUS_DESTROY, bus_info->pending_msg_hdr,
1637                      response);
1638
1639        kfree(bus_info->pending_msg_hdr);
1640        bus_info->pending_msg_hdr = NULL;
1641}
1642
1643void
1644device_create_response(struct visor_device *dev_info, int response)
1645{
1646        if (response >= 0)
1647                dev_info->state.created = 1;
1648
1649        device_responder(CONTROLVM_DEVICE_CREATE, dev_info->pending_msg_hdr,
1650                         response);
1651
1652        kfree(dev_info->pending_msg_hdr);
1653        dev_info->pending_msg_hdr = NULL;
1654}
1655
1656void
1657device_destroy_response(struct visor_device *dev_info, int response)
1658{
1659        device_responder(CONTROLVM_DEVICE_DESTROY, dev_info->pending_msg_hdr,
1660                         response);
1661
1662        kfree(dev_info->pending_msg_hdr);
1663        dev_info->pending_msg_hdr = NULL;
1664}
1665
1666void
1667device_pause_response(struct visor_device *dev_info,
1668                      int response)
1669{
1670        device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
1671                                     dev_info, response,
1672                                     segment_state_standby);
1673
1674        kfree(dev_info->pending_msg_hdr);
1675        dev_info->pending_msg_hdr = NULL;
1676}
1677
1678void
1679device_resume_response(struct visor_device *dev_info, int response)
1680{
1681        device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
1682                                     dev_info, response,
1683                                     segment_state_running);
1684
1685        kfree(dev_info->pending_msg_hdr);
1686        dev_info->pending_msg_hdr = NULL;
1687}
1688
1689static int
1690visorchipset_mmap(struct file *file, struct vm_area_struct *vma)
1691{
1692        unsigned long physaddr = 0;
1693        unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
1694        u64 addr = 0;
1695
1696        /* sv_enable_dfp(); */
1697        if (offset & (PAGE_SIZE - 1))
1698                return -ENXIO;  /* need aligned offsets */
1699
1700        switch (offset) {
1701        case VISORCHIPSET_MMAP_CONTROLCHANOFFSET:
1702                vma->vm_flags |= VM_IO;
1703                if (!*file_controlvm_channel)
1704                        return -ENXIO;
1705
1706                visorchannel_read
1707                        (*file_controlvm_channel,
1708                         offsetof(struct spar_controlvm_channel_protocol,
1709                                  gp_control_channel),
1710                         &addr, sizeof(addr));
1711                if (!addr)
1712                        return -ENXIO;
1713
1714                physaddr = (unsigned long)addr;
1715                if (remap_pfn_range(vma, vma->vm_start,
1716                                    physaddr >> PAGE_SHIFT,
1717                                    vma->vm_end - vma->vm_start,
1718                                    /*pgprot_noncached */
1719                                    (vma->vm_page_prot))) {
1720                        return -EAGAIN;
1721                }
1722                break;
1723        default:
1724                return -ENXIO;
1725        }
1726        return 0;
1727}
1728
1729static inline s64 issue_vmcall_query_guest_virtual_time_offset(void)
1730{
1731        u64 result = VMCALL_SUCCESS;
1732        u64 physaddr = 0;
1733
1734        ISSUE_IO_VMCALL(VMCALL_QUERY_GUEST_VIRTUAL_TIME_OFFSET, physaddr,
1735                        result);
1736        return result;
1737}
1738
1739static inline int issue_vmcall_update_physical_time(u64 adjustment)
1740{
1741        int result = VMCALL_SUCCESS;
1742
1743        ISSUE_IO_VMCALL(VMCALL_UPDATE_PHYSICAL_TIME, adjustment, result);
1744        return result;
1745}
1746
1747static long visorchipset_ioctl(struct file *file, unsigned int cmd,
1748                               unsigned long arg)
1749{
1750        u64 adjustment;
1751        s64 vrtc_offset;
1752
1753        switch (cmd) {
1754        case VMCALL_QUERY_GUEST_VIRTUAL_TIME_OFFSET:
1755                /* get the physical rtc offset */
1756                vrtc_offset = issue_vmcall_query_guest_virtual_time_offset();
1757                if (copy_to_user((void __user *)arg, &vrtc_offset,
1758                                 sizeof(vrtc_offset))) {
1759                        return -EFAULT;
1760                }
1761                return 0;
1762        case VMCALL_UPDATE_PHYSICAL_TIME:
1763                if (copy_from_user(&adjustment, (void __user *)arg,
1764                                   sizeof(adjustment))) {
1765                        return -EFAULT;
1766                }
1767                return issue_vmcall_update_physical_time(adjustment);
1768        default:
1769                return -EFAULT;
1770        }
1771}
1772
1773static const struct file_operations visorchipset_fops = {
1774        .owner = THIS_MODULE,
1775        .open = visorchipset_open,
1776        .read = NULL,
1777        .write = NULL,
1778        .unlocked_ioctl = visorchipset_ioctl,
1779        .release = visorchipset_release,
1780        .mmap = visorchipset_mmap,
1781};
1782
1783static int
1784visorchipset_file_init(dev_t major_dev, struct visorchannel **controlvm_channel)
1785{
1786        int rc = 0;
1787
1788        file_controlvm_channel = controlvm_channel;
1789        cdev_init(&file_cdev, &visorchipset_fops);
1790        file_cdev.owner = THIS_MODULE;
1791        if (MAJOR(major_dev) == 0) {
1792                rc = alloc_chrdev_region(&major_dev, 0, 1, "visorchipset");
1793                /* dynamic major device number registration required */
1794                if (rc < 0)
1795                        return rc;
1796        } else {
1797                /* static major device number registration required */
1798                rc = register_chrdev_region(major_dev, 1, "visorchipset");
1799                if (rc < 0)
1800                        return rc;
1801        }
1802        rc = cdev_add(&file_cdev, MKDEV(MAJOR(major_dev), 0), 1);
1803        if (rc < 0) {
1804                unregister_chrdev_region(major_dev, 1);
1805                return rc;
1806        }
1807        return 0;
1808}
1809
1810static void
1811visorchipset_file_cleanup(dev_t major_dev)
1812{
1813        if (file_cdev.ops)
1814                cdev_del(&file_cdev);
1815        file_cdev.ops = NULL;
1816        unregister_chrdev_region(major_dev, 1);
1817}
1818
1819static struct parser_context *
1820parser_init_byte_stream(u64 addr, u32 bytes, bool local, bool *retry)
1821{
1822        int allocbytes = sizeof(struct parser_context) + bytes;
1823        struct parser_context *ctx;
1824
1825        if (retry)
1826                *retry = false;
1827
1828        /*
1829         * alloc an 0 extra byte to ensure payload is
1830         * '\0'-terminated
1831         */
1832        allocbytes++;
1833        if ((controlvm_payload_bytes_buffered + bytes)
1834            > MAX_CONTROLVM_PAYLOAD_BYTES) {
1835                if (retry)
1836                        *retry = true;
1837                return NULL;
1838        }
1839        ctx = kzalloc(allocbytes, GFP_KERNEL | __GFP_NORETRY);
1840        if (!ctx) {
1841                if (retry)
1842                        *retry = true;
1843                return NULL;
1844        }
1845
1846        ctx->allocbytes = allocbytes;
1847        ctx->param_bytes = bytes;
1848        ctx->curr = NULL;
1849        ctx->bytes_remaining = 0;
1850        ctx->byte_stream = false;
1851        if (local) {
1852                void *p;
1853
1854                if (addr > virt_to_phys(high_memory - 1))
1855                        goto err_finish_ctx;
1856                p = __va((unsigned long)(addr));
1857                memcpy(ctx->data, p, bytes);
1858        } else {
1859                void *mapping = memremap(addr, bytes, MEMREMAP_WB);
1860
1861                if (!mapping)
1862                        goto err_finish_ctx;
1863                memcpy(ctx->data, mapping, bytes);
1864                memunmap(mapping);
1865        }
1866
1867        ctx->byte_stream = true;
1868        controlvm_payload_bytes_buffered += ctx->param_bytes;
1869
1870        return ctx;
1871
1872err_finish_ctx:
1873        parser_done(ctx);
1874        return NULL;
1875}
1876
1877/**
1878 * handle_command() - process a controlvm message
1879 * @inmsg:        the message to process
1880 * @channel_addr: address of the controlvm channel
1881 *
1882 * Return:
1883 *    false - this function will return false only in the case where the
1884 *            controlvm message was NOT processed, but processing must be
1885 *            retried before reading the next controlvm message; a
1886 *            scenario where this can occur is when we need to throttle
1887 *            the allocation of memory in which to copy out controlvm
1888 *            payload data
1889 *    true  - processing of the controlvm message completed,
1890 *            either successfully or with an error
1891 */
1892static bool
1893handle_command(struct controlvm_message inmsg, u64 channel_addr)
1894{
1895        struct controlvm_message_packet *cmd = &inmsg.cmd;
1896        u64 parm_addr;
1897        u32 parm_bytes;
1898        struct parser_context *parser_ctx = NULL;
1899        bool local_addr;
1900        struct controlvm_message ackmsg;
1901
1902        /* create parsing context if necessary */
1903        local_addr = (inmsg.hdr.flags.test_message == 1);
1904        if (channel_addr == 0)
1905                return true;
1906        parm_addr = channel_addr + inmsg.hdr.payload_vm_offset;
1907        parm_bytes = inmsg.hdr.payload_bytes;
1908
1909        /*
1910         * Parameter and channel addresses within test messages actually lie
1911         * within our OS-controlled memory. We need to know that, because it
1912         * makes a difference in how we compute the virtual address.
1913         */
1914        if (parm_addr && parm_bytes) {
1915                bool retry = false;
1916
1917                parser_ctx =
1918                    parser_init_byte_stream(parm_addr, parm_bytes,
1919                                            local_addr, &retry);
1920                if (!parser_ctx && retry)
1921                        return false;
1922        }
1923
1924        if (!local_addr) {
1925                controlvm_init_response(&ackmsg, &inmsg.hdr,
1926                                        CONTROLVM_RESP_SUCCESS);
1927                if (controlvm_channel)
1928                        visorchannel_signalinsert(controlvm_channel,
1929                                                  CONTROLVM_QUEUE_ACK,
1930                                                  &ackmsg);
1931        }
1932        switch (inmsg.hdr.id) {
1933        case CONTROLVM_CHIPSET_INIT:
1934                chipset_init(&inmsg);
1935                break;
1936        case CONTROLVM_BUS_CREATE:
1937                bus_create(&inmsg);
1938                break;
1939        case CONTROLVM_BUS_DESTROY:
1940                bus_destroy(&inmsg);
1941                break;
1942        case CONTROLVM_BUS_CONFIGURE:
1943                bus_configure(&inmsg, parser_ctx);
1944                break;
1945        case CONTROLVM_DEVICE_CREATE:
1946                my_device_create(&inmsg);
1947                break;
1948        case CONTROLVM_DEVICE_CHANGESTATE:
1949                if (cmd->device_change_state.flags.phys_device) {
1950                        parahotplug_process_message(&inmsg);
1951                } else {
1952                        /*
1953                         * save the hdr and cmd structures for later use
1954                         * when sending back the response to Command
1955                         */
1956                        my_device_changestate(&inmsg);
1957                        break;
1958                }
1959                break;
1960        case CONTROLVM_DEVICE_DESTROY:
1961                my_device_destroy(&inmsg);
1962                break;
1963        case CONTROLVM_DEVICE_CONFIGURE:
1964                /* no op for now, just send a respond that we passed */
1965                if (inmsg.hdr.flags.response_expected)
1966                        controlvm_respond(&inmsg.hdr, CONTROLVM_RESP_SUCCESS);
1967                break;
1968        case CONTROLVM_CHIPSET_READY:
1969                chipset_ready(&inmsg.hdr);
1970                break;
1971        case CONTROLVM_CHIPSET_SELFTEST:
1972                chipset_selftest(&inmsg.hdr);
1973                break;
1974        case CONTROLVM_CHIPSET_STOP:
1975                chipset_notready(&inmsg.hdr);
1976                break;
1977        default:
1978                if (inmsg.hdr.flags.response_expected)
1979                        controlvm_respond
1980                                (&inmsg.hdr,
1981                                 -CONTROLVM_RESP_ERROR_MESSAGE_ID_UNKNOWN);
1982                break;
1983        }
1984
1985        if (parser_ctx) {
1986                parser_done(parser_ctx);
1987                parser_ctx = NULL;
1988        }
1989        return true;
1990}
1991
1992/**
1993 * read_controlvm_event() - retreives the next message from the
1994 *                          CONTROLVM_QUEUE_EVENT queue in the controlvm
1995 *                          channel
1996 * @msg: pointer to the retrieved message
1997 *
1998 * Return: true if a valid message was retrieved or false otherwise
1999 */
2000static bool
2001read_controlvm_event(struct controlvm_message *msg)
2002{
2003        if (!visorchannel_signalremove(controlvm_channel,
2004                                       CONTROLVM_QUEUE_EVENT, msg)) {
2005                /* got a message */
2006                if (msg->hdr.flags.test_message == 1)
2007                        return false;
2008                return true;
2009        }
2010        return false;
2011}
2012
2013/**
2014 * parahotplug_process_list() - remove any request from the list that's been on
2015 *                              there too long and respond with an error
2016 */
2017static void
2018parahotplug_process_list(void)
2019{
2020        struct list_head *pos;
2021        struct list_head *tmp;
2022
2023        spin_lock(&parahotplug_request_list_lock);
2024
2025        list_for_each_safe(pos, tmp, &parahotplug_request_list) {
2026                struct parahotplug_request *req =
2027                    list_entry(pos, struct parahotplug_request, list);
2028
2029                if (!time_after_eq(jiffies, req->expiration))
2030                        continue;
2031
2032                list_del(pos);
2033                if (req->msg.hdr.flags.response_expected)
2034                        controlvm_respond_physdev_changestate(
2035                                &req->msg.hdr,
2036                                CONTROLVM_RESP_ERROR_DEVICE_UDEV_TIMEOUT,
2037                                req->msg.cmd.device_change_state.state);
2038                parahotplug_request_destroy(req);
2039        }
2040
2041        spin_unlock(&parahotplug_request_list_lock);
2042}
2043
2044static void
2045controlvm_periodic_work(struct work_struct *work)
2046{
2047        struct controlvm_message inmsg;
2048        bool got_command = false;
2049        bool handle_command_failed = false;
2050
2051        while (!visorchannel_signalremove(controlvm_channel,
2052                                          CONTROLVM_QUEUE_RESPONSE,
2053                                          &inmsg))
2054                ;
2055        if (!got_command) {
2056                if (controlvm_pending_msg_valid) {
2057                        /*
2058                         * we throttled processing of a prior
2059                         * msg, so try to process it again
2060                         * rather than reading a new one
2061                         */
2062                        inmsg = controlvm_pending_msg;
2063                        controlvm_pending_msg_valid = false;
2064                        got_command = true;
2065                } else {
2066                        got_command = read_controlvm_event(&inmsg);
2067                }
2068        }
2069
2070        handle_command_failed = false;
2071        while (got_command && (!handle_command_failed)) {
2072                most_recent_message_jiffies = jiffies;
2073                if (handle_command(inmsg,
2074                                   visorchannel_get_physaddr
2075                                   (controlvm_channel)))
2076                        got_command = read_controlvm_event(&inmsg);
2077                else {
2078                        /*
2079                         * this is a scenario where throttling
2080                         * is required, but probably NOT an
2081                         * error...; we stash the current
2082                         * controlvm msg so we will attempt to
2083                         * reprocess it on our next loop
2084                         */
2085                        handle_command_failed = true;
2086                        controlvm_pending_msg = inmsg;
2087                        controlvm_pending_msg_valid = true;
2088                }
2089        }
2090
2091        /* parahotplug_worker */
2092        parahotplug_process_list();
2093
2094        if (time_after(jiffies,
2095                       most_recent_message_jiffies + (HZ * MIN_IDLE_SECONDS))) {
2096                /*
2097                 * it's been longer than MIN_IDLE_SECONDS since we
2098                 * processed our last controlvm message; slow down the
2099                 * polling
2100                 */
2101                if (poll_jiffies != POLLJIFFIES_CONTROLVMCHANNEL_SLOW)
2102                        poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_SLOW;
2103        } else {
2104                if (poll_jiffies != POLLJIFFIES_CONTROLVMCHANNEL_FAST)
2105                        poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
2106        }
2107
2108        schedule_delayed_work(&periodic_controlvm_work, poll_jiffies);
2109}
2110
2111static int
2112visorchipset_init(struct acpi_device *acpi_device)
2113{
2114        int err = -ENODEV;
2115        u64 addr;
2116        uuid_le uuid = SPAR_CONTROLVM_CHANNEL_PROTOCOL_UUID;
2117
2118        addr = controlvm_get_channel_address();
2119        if (!addr)
2120                goto error;
2121
2122        memset(&controlvm_payload_info, 0, sizeof(controlvm_payload_info));
2123
2124        controlvm_channel = visorchannel_create_with_lock(addr, 0,
2125                                                          GFP_KERNEL, uuid);
2126        if (!controlvm_channel)
2127                goto error;
2128
2129        if (SPAR_CONTROLVM_CHANNEL_OK_CLIENT(
2130                    visorchannel_get_header(controlvm_channel))) {
2131                initialize_controlvm_payload();
2132        } else {
2133                goto error_destroy_channel;
2134        }
2135
2136        major_dev = MKDEV(visorchipset_major, 0);
2137        err = visorchipset_file_init(major_dev, &controlvm_channel);
2138        if (err < 0)
2139                goto error_destroy_payload;
2140
2141        /* if booting in a crash kernel */
2142        if (is_kdump_kernel())
2143                INIT_DELAYED_WORK(&periodic_controlvm_work,
2144                                  setup_crash_devices_work_queue);
2145        else
2146                INIT_DELAYED_WORK(&periodic_controlvm_work,
2147                                  controlvm_periodic_work);
2148
2149        most_recent_message_jiffies = jiffies;
2150        poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
2151        schedule_delayed_work(&periodic_controlvm_work, poll_jiffies);
2152
2153        visorchipset_platform_device.dev.devt = major_dev;
2154        if (platform_device_register(&visorchipset_platform_device) < 0) {
2155                POSTCODE_LINUX_2(DEVICE_REGISTER_FAILURE_PC, DIAG_SEVERITY_ERR);
2156                err = -ENODEV;
2157                goto error_cancel_work;
2158        }
2159        POSTCODE_LINUX_2(CHIPSET_INIT_SUCCESS_PC, POSTCODE_SEVERITY_INFO);
2160
2161        err = visorbus_init();
2162        if (err < 0)
2163                goto error_unregister;
2164
2165        return 0;
2166
2167error_unregister:
2168        platform_device_unregister(&visorchipset_platform_device);
2169
2170error_cancel_work:
2171        cancel_delayed_work_sync(&periodic_controlvm_work);
2172        visorchipset_file_cleanup(major_dev);
2173
2174error_destroy_payload:
2175        destroy_controlvm_payload_info(&controlvm_payload_info);
2176
2177error_destroy_channel:
2178        visorchannel_destroy(controlvm_channel);
2179
2180error:
2181        POSTCODE_LINUX_3(CHIPSET_INIT_FAILURE_PC, err, POSTCODE_SEVERITY_ERR);
2182        return err;
2183}
2184
2185static int
2186visorchipset_exit(struct acpi_device *acpi_device)
2187{
2188        POSTCODE_LINUX_2(DRIVER_EXIT_PC, POSTCODE_SEVERITY_INFO);
2189
2190        visorbus_exit();
2191
2192        cancel_delayed_work_sync(&periodic_controlvm_work);
2193        destroy_controlvm_payload_info(&controlvm_payload_info);
2194
2195        visorchannel_destroy(controlvm_channel);
2196
2197        visorchipset_file_cleanup(visorchipset_platform_device.dev.devt);
2198        platform_device_unregister(&visorchipset_platform_device);
2199        POSTCODE_LINUX_2(DRIVER_EXIT_PC, POSTCODE_SEVERITY_INFO);
2200
2201        return 0;
2202}
2203
2204static const struct acpi_device_id unisys_device_ids[] = {
2205        {"PNP0A07", 0},
2206        {"", 0},
2207};
2208
2209static struct acpi_driver unisys_acpi_driver = {
2210        .name = "unisys_acpi",
2211        .class = "unisys_acpi_class",
2212        .owner = THIS_MODULE,
2213        .ids = unisys_device_ids,
2214        .ops = {
2215                .add = visorchipset_init,
2216                .remove = visorchipset_exit,
2217                },
2218};
2219
2220MODULE_DEVICE_TABLE(acpi, unisys_device_ids);
2221
2222static __init uint32_t visorutil_spar_detect(void)
2223{
2224        unsigned int eax, ebx, ecx, edx;
2225
2226        if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
2227                /* check the ID */
2228                cpuid(UNISYS_SPAR_LEAF_ID, &eax, &ebx, &ecx, &edx);
2229                return  (ebx == UNISYS_SPAR_ID_EBX) &&
2230                        (ecx == UNISYS_SPAR_ID_ECX) &&
2231                        (edx == UNISYS_SPAR_ID_EDX);
2232        } else {
2233                return 0;
2234        }
2235}
2236
2237static int init_unisys(void)
2238{
2239        int result;
2240
2241        if (!visorutil_spar_detect())
2242                return -ENODEV;
2243
2244        result = acpi_bus_register_driver(&unisys_acpi_driver);
2245        if (result)
2246                return -ENODEV;
2247
2248        pr_info("Unisys Visorchipset Driver Loaded.\n");
2249        return 0;
2250};
2251
2252static void exit_unisys(void)
2253{
2254        acpi_bus_unregister_driver(&unisys_acpi_driver);
2255}
2256
2257module_param_named(major, visorchipset_major, int, S_IRUGO);
2258MODULE_PARM_DESC(visorchipset_major,
2259                 "major device number to use for the device node");
2260
2261module_init(init_unisys);
2262module_exit(exit_unisys);
2263
2264MODULE_AUTHOR("Unisys");
2265MODULE_LICENSE("GPL");
2266MODULE_DESCRIPTION("s-Par visorbus driver for virtual device buses");
2267