linux/drivers/s390/cio/vfio_ccw_fsm.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Finite state machine for vfio-ccw device handling
   4 *
   5 * Copyright IBM Corp. 2017
   6 * Copyright Red Hat, Inc. 2019
   7 *
   8 * Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com>
   9 *            Cornelia Huck <cohuck@redhat.com>
  10 */
  11
  12#include <linux/vfio.h>
  13#include <linux/mdev.h>
  14
  15#include "ioasm.h"
  16#include "vfio_ccw_private.h"
  17
  18#define CREATE_TRACE_POINTS
  19#include "vfio_ccw_trace.h"
  20
  21static int fsm_io_helper(struct vfio_ccw_private *private)
  22{
  23        struct subchannel *sch;
  24        union orb *orb;
  25        int ccode;
  26        __u8 lpm;
  27        unsigned long flags;
  28        int ret;
  29
  30        sch = private->sch;
  31
  32        spin_lock_irqsave(sch->lock, flags);
  33
  34        orb = cp_get_orb(&private->cp, (u32)(addr_t)sch, sch->lpm);
  35        if (!orb) {
  36                ret = -EIO;
  37                goto out;
  38        }
  39
  40        /* Issue "Start Subchannel" */
  41        ccode = ssch(sch->schid, orb);
  42
  43        switch (ccode) {
  44        case 0:
  45                /*
  46                 * Initialize device status information
  47                 */
  48                sch->schib.scsw.cmd.actl |= SCSW_ACTL_START_PEND;
  49                ret = 0;
  50                private->state = VFIO_CCW_STATE_CP_PENDING;
  51                break;
  52        case 1:         /* Status pending */
  53        case 2:         /* Busy */
  54                ret = -EBUSY;
  55                break;
  56        case 3:         /* Device/path not operational */
  57        {
  58                lpm = orb->cmd.lpm;
  59                if (lpm != 0)
  60                        sch->lpm &= ~lpm;
  61                else
  62                        sch->lpm = 0;
  63
  64                if (cio_update_schib(sch))
  65                        ret = -ENODEV;
  66                else
  67                        ret = sch->lpm ? -EACCES : -ENODEV;
  68                break;
  69        }
  70        default:
  71                ret = ccode;
  72        }
  73out:
  74        spin_unlock_irqrestore(sch->lock, flags);
  75        return ret;
  76}
  77
  78static int fsm_do_halt(struct vfio_ccw_private *private)
  79{
  80        struct subchannel *sch;
  81        unsigned long flags;
  82        int ccode;
  83        int ret;
  84
  85        sch = private->sch;
  86
  87        spin_lock_irqsave(sch->lock, flags);
  88
  89        /* Issue "Halt Subchannel" */
  90        ccode = hsch(sch->schid);
  91
  92        switch (ccode) {
  93        case 0:
  94                /*
  95                 * Initialize device status information
  96                 */
  97                sch->schib.scsw.cmd.actl |= SCSW_ACTL_HALT_PEND;
  98                ret = 0;
  99                break;
 100        case 1:         /* Status pending */
 101        case 2:         /* Busy */
 102                ret = -EBUSY;
 103                break;
 104        case 3:         /* Device not operational */
 105                ret = -ENODEV;
 106                break;
 107        default:
 108                ret = ccode;
 109        }
 110        spin_unlock_irqrestore(sch->lock, flags);
 111        return ret;
 112}
 113
 114static int fsm_do_clear(struct vfio_ccw_private *private)
 115{
 116        struct subchannel *sch;
 117        unsigned long flags;
 118        int ccode;
 119        int ret;
 120
 121        sch = private->sch;
 122
 123        spin_lock_irqsave(sch->lock, flags);
 124
 125        /* Issue "Clear Subchannel" */
 126        ccode = csch(sch->schid);
 127
 128        switch (ccode) {
 129        case 0:
 130                /*
 131                 * Initialize device status information
 132                 */
 133                sch->schib.scsw.cmd.actl = SCSW_ACTL_CLEAR_PEND;
 134                /* TODO: check what else we might need to clear */
 135                ret = 0;
 136                break;
 137        case 3:         /* Device not operational */
 138                ret = -ENODEV;
 139                break;
 140        default:
 141                ret = ccode;
 142        }
 143        spin_unlock_irqrestore(sch->lock, flags);
 144        return ret;
 145}
 146
 147static void fsm_notoper(struct vfio_ccw_private *private,
 148                        enum vfio_ccw_event event)
 149{
 150        struct subchannel *sch = private->sch;
 151
 152        /*
 153         * TODO:
 154         * Probably we should send the machine check to the guest.
 155         */
 156        css_sched_sch_todo(sch, SCH_TODO_UNREG);
 157        private->state = VFIO_CCW_STATE_NOT_OPER;
 158}
 159
 160/*
 161 * No operation action.
 162 */
 163static void fsm_nop(struct vfio_ccw_private *private,
 164                    enum vfio_ccw_event event)
 165{
 166}
 167
 168static void fsm_io_error(struct vfio_ccw_private *private,
 169                         enum vfio_ccw_event event)
 170{
 171        pr_err("vfio-ccw: FSM: I/O request from state:%d\n", private->state);
 172        private->io_region->ret_code = -EIO;
 173}
 174
 175static void fsm_io_busy(struct vfio_ccw_private *private,
 176                        enum vfio_ccw_event event)
 177{
 178        private->io_region->ret_code = -EBUSY;
 179}
 180
 181static void fsm_io_retry(struct vfio_ccw_private *private,
 182                         enum vfio_ccw_event event)
 183{
 184        private->io_region->ret_code = -EAGAIN;
 185}
 186
 187static void fsm_async_error(struct vfio_ccw_private *private,
 188                            enum vfio_ccw_event event)
 189{
 190        struct ccw_cmd_region *cmd_region = private->cmd_region;
 191
 192        pr_err("vfio-ccw: FSM: %s request from state:%d\n",
 193               cmd_region->command == VFIO_CCW_ASYNC_CMD_HSCH ? "halt" :
 194               cmd_region->command == VFIO_CCW_ASYNC_CMD_CSCH ? "clear" :
 195               "<unknown>", private->state);
 196        cmd_region->ret_code = -EIO;
 197}
 198
 199static void fsm_async_retry(struct vfio_ccw_private *private,
 200                            enum vfio_ccw_event event)
 201{
 202        private->cmd_region->ret_code = -EAGAIN;
 203}
 204
 205static void fsm_disabled_irq(struct vfio_ccw_private *private,
 206                             enum vfio_ccw_event event)
 207{
 208        struct subchannel *sch = private->sch;
 209
 210        /*
 211         * An interrupt in a disabled state means a previous disable was not
 212         * successful - should not happen, but we try to disable again.
 213         */
 214        cio_disable_subchannel(sch);
 215}
 216inline struct subchannel_id get_schid(struct vfio_ccw_private *p)
 217{
 218        return p->sch->schid;
 219}
 220
 221/*
 222 * Deal with the ccw command request from the userspace.
 223 */
 224static void fsm_io_request(struct vfio_ccw_private *private,
 225                           enum vfio_ccw_event event)
 226{
 227        union orb *orb;
 228        union scsw *scsw = &private->scsw;
 229        struct ccw_io_region *io_region = private->io_region;
 230        struct mdev_device *mdev = private->mdev;
 231        char *errstr = "request";
 232
 233        private->state = VFIO_CCW_STATE_CP_PROCESSING;
 234        memcpy(scsw, io_region->scsw_area, sizeof(*scsw));
 235
 236        if (scsw->cmd.fctl & SCSW_FCTL_START_FUNC) {
 237                orb = (union orb *)io_region->orb_area;
 238
 239                /* Don't try to build a cp if transport mode is specified. */
 240                if (orb->tm.b) {
 241                        io_region->ret_code = -EOPNOTSUPP;
 242                        errstr = "transport mode";
 243                        goto err_out;
 244                }
 245                io_region->ret_code = cp_init(&private->cp, mdev_dev(mdev),
 246                                              orb);
 247                if (io_region->ret_code) {
 248                        errstr = "cp init";
 249                        goto err_out;
 250                }
 251
 252                io_region->ret_code = cp_prefetch(&private->cp);
 253                if (io_region->ret_code) {
 254                        errstr = "cp prefetch";
 255                        cp_free(&private->cp);
 256                        goto err_out;
 257                }
 258
 259                /* Start channel program and wait for I/O interrupt. */
 260                io_region->ret_code = fsm_io_helper(private);
 261                if (io_region->ret_code) {
 262                        errstr = "cp fsm_io_helper";
 263                        cp_free(&private->cp);
 264                        goto err_out;
 265                }
 266                return;
 267        } else if (scsw->cmd.fctl & SCSW_FCTL_HALT_FUNC) {
 268                /* halt is handled via the async cmd region */
 269                io_region->ret_code = -EOPNOTSUPP;
 270                goto err_out;
 271        } else if (scsw->cmd.fctl & SCSW_FCTL_CLEAR_FUNC) {
 272                /* clear is handled via the async cmd region */
 273                io_region->ret_code = -EOPNOTSUPP;
 274                goto err_out;
 275        }
 276
 277err_out:
 278        trace_vfio_ccw_io_fctl(scsw->cmd.fctl, get_schid(private),
 279                               io_region->ret_code, errstr);
 280}
 281
 282/*
 283 * Deal with an async request from userspace.
 284 */
 285static void fsm_async_request(struct vfio_ccw_private *private,
 286                              enum vfio_ccw_event event)
 287{
 288        struct ccw_cmd_region *cmd_region = private->cmd_region;
 289
 290        switch (cmd_region->command) {
 291        case VFIO_CCW_ASYNC_CMD_HSCH:
 292                cmd_region->ret_code = fsm_do_halt(private);
 293                break;
 294        case VFIO_CCW_ASYNC_CMD_CSCH:
 295                cmd_region->ret_code = fsm_do_clear(private);
 296                break;
 297        default:
 298                /* should not happen? */
 299                cmd_region->ret_code = -EINVAL;
 300        }
 301}
 302
 303/*
 304 * Got an interrupt for a normal io (state busy).
 305 */
 306static void fsm_irq(struct vfio_ccw_private *private,
 307                    enum vfio_ccw_event event)
 308{
 309        struct irb *irb = this_cpu_ptr(&cio_irb);
 310
 311        memcpy(&private->irb, irb, sizeof(*irb));
 312
 313        queue_work(vfio_ccw_work_q, &private->io_work);
 314
 315        if (private->completion)
 316                complete(private->completion);
 317}
 318
 319/*
 320 * Device statemachine
 321 */
 322fsm_func_t *vfio_ccw_jumptable[NR_VFIO_CCW_STATES][NR_VFIO_CCW_EVENTS] = {
 323        [VFIO_CCW_STATE_NOT_OPER] = {
 324                [VFIO_CCW_EVENT_NOT_OPER]       = fsm_nop,
 325                [VFIO_CCW_EVENT_IO_REQ]         = fsm_io_error,
 326                [VFIO_CCW_EVENT_ASYNC_REQ]      = fsm_async_error,
 327                [VFIO_CCW_EVENT_INTERRUPT]      = fsm_disabled_irq,
 328        },
 329        [VFIO_CCW_STATE_STANDBY] = {
 330                [VFIO_CCW_EVENT_NOT_OPER]       = fsm_notoper,
 331                [VFIO_CCW_EVENT_IO_REQ]         = fsm_io_error,
 332                [VFIO_CCW_EVENT_ASYNC_REQ]      = fsm_async_error,
 333                [VFIO_CCW_EVENT_INTERRUPT]      = fsm_irq,
 334        },
 335        [VFIO_CCW_STATE_IDLE] = {
 336                [VFIO_CCW_EVENT_NOT_OPER]       = fsm_notoper,
 337                [VFIO_CCW_EVENT_IO_REQ]         = fsm_io_request,
 338                [VFIO_CCW_EVENT_ASYNC_REQ]      = fsm_async_request,
 339                [VFIO_CCW_EVENT_INTERRUPT]      = fsm_irq,
 340        },
 341        [VFIO_CCW_STATE_CP_PROCESSING] = {
 342                [VFIO_CCW_EVENT_NOT_OPER]       = fsm_notoper,
 343                [VFIO_CCW_EVENT_IO_REQ]         = fsm_io_retry,
 344                [VFIO_CCW_EVENT_ASYNC_REQ]      = fsm_async_retry,
 345                [VFIO_CCW_EVENT_INTERRUPT]      = fsm_irq,
 346        },
 347        [VFIO_CCW_STATE_CP_PENDING] = {
 348                [VFIO_CCW_EVENT_NOT_OPER]       = fsm_notoper,
 349                [VFIO_CCW_EVENT_IO_REQ]         = fsm_io_busy,
 350                [VFIO_CCW_EVENT_ASYNC_REQ]      = fsm_async_request,
 351                [VFIO_CCW_EVENT_INTERRUPT]      = fsm_irq,
 352        },
 353};
 354