linux/drivers/s390/cio/vfio_ccw_drv.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * VFIO based Physical Subchannel device driver
   4 *
   5 * Copyright IBM Corp. 2017
   6 * Copyright Red Hat, Inc. 2019
   7 *
   8 * Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com>
   9 *            Xiao Feng Ren <renxiaof@linux.vnet.ibm.com>
  10 *            Cornelia Huck <cohuck@redhat.com>
  11 */
  12
  13#include <linux/module.h>
  14#include <linux/init.h>
  15#include <linux/device.h>
  16#include <linux/slab.h>
  17#include <linux/uuid.h>
  18#include <linux/mdev.h>
  19
  20#include <asm/isc.h>
  21
  22#include "chp.h"
  23#include "ioasm.h"
  24#include "css.h"
  25#include "vfio_ccw_private.h"
  26
  27struct workqueue_struct *vfio_ccw_work_q;
  28static struct kmem_cache *vfio_ccw_io_region;
  29static struct kmem_cache *vfio_ccw_cmd_region;
  30static struct kmem_cache *vfio_ccw_schib_region;
  31static struct kmem_cache *vfio_ccw_crw_region;
  32
  33debug_info_t *vfio_ccw_debug_msg_id;
  34debug_info_t *vfio_ccw_debug_trace_id;
  35
  36/*
  37 * Helpers
  38 */
  39int vfio_ccw_sch_quiesce(struct subchannel *sch)
  40{
  41        struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);
  42        DECLARE_COMPLETION_ONSTACK(completion);
  43        int iretry, ret = 0;
  44
  45        spin_lock_irq(sch->lock);
  46        if (!sch->schib.pmcw.ena)
  47                goto out_unlock;
  48        ret = cio_disable_subchannel(sch);
  49        if (ret != -EBUSY)
  50                goto out_unlock;
  51
  52        iretry = 255;
  53        do {
  54
  55                ret = cio_cancel_halt_clear(sch, &iretry);
  56
  57                if (ret == -EIO) {
  58                        pr_err("vfio_ccw: could not quiesce subchannel 0.%x.%04x!\n",
  59                               sch->schid.ssid, sch->schid.sch_no);
  60                        break;
  61                }
  62
  63                /*
  64                 * Flush all I/O and wait for
  65                 * cancel/halt/clear completion.
  66                 */
  67                private->completion = &completion;
  68                spin_unlock_irq(sch->lock);
  69
  70                if (ret == -EBUSY)
  71                        wait_for_completion_timeout(&completion, 3*HZ);
  72
  73                private->completion = NULL;
  74                flush_workqueue(vfio_ccw_work_q);
  75                spin_lock_irq(sch->lock);
  76                ret = cio_disable_subchannel(sch);
  77        } while (ret == -EBUSY);
  78out_unlock:
  79        private->state = VFIO_CCW_STATE_NOT_OPER;
  80        spin_unlock_irq(sch->lock);
  81        return ret;
  82}
  83
  84static void vfio_ccw_sch_io_todo(struct work_struct *work)
  85{
  86        struct vfio_ccw_private *private;
  87        struct irb *irb;
  88        bool is_final;
  89        bool cp_is_finished = false;
  90
  91        private = container_of(work, struct vfio_ccw_private, io_work);
  92        irb = &private->irb;
  93
  94        is_final = !(scsw_actl(&irb->scsw) &
  95                     (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT));
  96        if (scsw_is_solicited(&irb->scsw)) {
  97                cp_update_scsw(&private->cp, &irb->scsw);
  98                if (is_final && private->state == VFIO_CCW_STATE_CP_PENDING) {
  99                        cp_free(&private->cp);
 100                        cp_is_finished = true;
 101                }
 102        }
 103        mutex_lock(&private->io_mutex);
 104        memcpy(private->io_region->irb_area, irb, sizeof(*irb));
 105        mutex_unlock(&private->io_mutex);
 106
 107        /*
 108         * Reset to IDLE only if processing of a channel program
 109         * has finished. Do not overwrite a possible processing
 110         * state if the final interrupt was for HSCH or CSCH.
 111         */
 112        if (private->mdev && cp_is_finished)
 113                private->state = VFIO_CCW_STATE_IDLE;
 114
 115        if (private->io_trigger)
 116                eventfd_signal(private->io_trigger, 1);
 117}
 118
 119static void vfio_ccw_crw_todo(struct work_struct *work)
 120{
 121        struct vfio_ccw_private *private;
 122
 123        private = container_of(work, struct vfio_ccw_private, crw_work);
 124
 125        if (!list_empty(&private->crw) && private->crw_trigger)
 126                eventfd_signal(private->crw_trigger, 1);
 127}
 128
 129/*
 130 * Css driver callbacks
 131 */
 132static void vfio_ccw_sch_irq(struct subchannel *sch)
 133{
 134        struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);
 135
 136        inc_irq_stat(IRQIO_CIO);
 137        vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_INTERRUPT);
 138}
 139
 140static void vfio_ccw_free_regions(struct vfio_ccw_private *private)
 141{
 142        if (private->crw_region)
 143                kmem_cache_free(vfio_ccw_crw_region, private->crw_region);
 144        if (private->schib_region)
 145                kmem_cache_free(vfio_ccw_schib_region, private->schib_region);
 146        if (private->cmd_region)
 147                kmem_cache_free(vfio_ccw_cmd_region, private->cmd_region);
 148        if (private->io_region)
 149                kmem_cache_free(vfio_ccw_io_region, private->io_region);
 150}
 151
 152static int vfio_ccw_sch_probe(struct subchannel *sch)
 153{
 154        struct pmcw *pmcw = &sch->schib.pmcw;
 155        struct vfio_ccw_private *private;
 156        int ret = -ENOMEM;
 157
 158        if (pmcw->qf) {
 159                dev_warn(&sch->dev, "vfio: ccw: does not support QDIO: %s\n",
 160                         dev_name(&sch->dev));
 161                return -ENODEV;
 162        }
 163
 164        private = kzalloc(sizeof(*private), GFP_KERNEL | GFP_DMA);
 165        if (!private)
 166                return -ENOMEM;
 167
 168        private->cp.guest_cp = kcalloc(CCWCHAIN_LEN_MAX, sizeof(struct ccw1),
 169                                       GFP_KERNEL);
 170        if (!private->cp.guest_cp)
 171                goto out_free;
 172
 173        private->io_region = kmem_cache_zalloc(vfio_ccw_io_region,
 174                                               GFP_KERNEL | GFP_DMA);
 175        if (!private->io_region)
 176                goto out_free;
 177
 178        private->cmd_region = kmem_cache_zalloc(vfio_ccw_cmd_region,
 179                                                GFP_KERNEL | GFP_DMA);
 180        if (!private->cmd_region)
 181                goto out_free;
 182
 183        private->schib_region = kmem_cache_zalloc(vfio_ccw_schib_region,
 184                                                  GFP_KERNEL | GFP_DMA);
 185
 186        if (!private->schib_region)
 187                goto out_free;
 188
 189        private->crw_region = kmem_cache_zalloc(vfio_ccw_crw_region,
 190                                                GFP_KERNEL | GFP_DMA);
 191
 192        if (!private->crw_region)
 193                goto out_free;
 194
 195        private->sch = sch;
 196        dev_set_drvdata(&sch->dev, private);
 197        mutex_init(&private->io_mutex);
 198
 199        spin_lock_irq(sch->lock);
 200        private->state = VFIO_CCW_STATE_NOT_OPER;
 201        sch->isc = VFIO_CCW_ISC;
 202        ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch);
 203        spin_unlock_irq(sch->lock);
 204        if (ret)
 205                goto out_free;
 206
 207        INIT_LIST_HEAD(&private->crw);
 208        INIT_WORK(&private->io_work, vfio_ccw_sch_io_todo);
 209        INIT_WORK(&private->crw_work, vfio_ccw_crw_todo);
 210        atomic_set(&private->avail, 1);
 211        private->state = VFIO_CCW_STATE_STANDBY;
 212
 213        ret = vfio_ccw_mdev_reg(sch);
 214        if (ret)
 215                goto out_disable;
 216
 217        if (dev_get_uevent_suppress(&sch->dev)) {
 218                dev_set_uevent_suppress(&sch->dev, 0);
 219                kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
 220        }
 221
 222        VFIO_CCW_MSG_EVENT(4, "bound to subchannel %x.%x.%04x\n",
 223                           sch->schid.cssid, sch->schid.ssid,
 224                           sch->schid.sch_no);
 225        return 0;
 226
 227out_disable:
 228        cio_disable_subchannel(sch);
 229out_free:
 230        dev_set_drvdata(&sch->dev, NULL);
 231        vfio_ccw_free_regions(private);
 232        kfree(private->cp.guest_cp);
 233        kfree(private);
 234        return ret;
 235}
 236
 237static void vfio_ccw_sch_remove(struct subchannel *sch)
 238{
 239        struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);
 240        struct vfio_ccw_crw *crw, *temp;
 241
 242        vfio_ccw_sch_quiesce(sch);
 243
 244        list_for_each_entry_safe(crw, temp, &private->crw, next) {
 245                list_del(&crw->next);
 246                kfree(crw);
 247        }
 248
 249        vfio_ccw_mdev_unreg(sch);
 250
 251        dev_set_drvdata(&sch->dev, NULL);
 252
 253        vfio_ccw_free_regions(private);
 254        kfree(private->cp.guest_cp);
 255        kfree(private);
 256
 257        VFIO_CCW_MSG_EVENT(4, "unbound from subchannel %x.%x.%04x\n",
 258                           sch->schid.cssid, sch->schid.ssid,
 259                           sch->schid.sch_no);
 260}
 261
 262static void vfio_ccw_sch_shutdown(struct subchannel *sch)
 263{
 264        vfio_ccw_sch_quiesce(sch);
 265}
 266
 267/**
 268 * vfio_ccw_sch_event - process subchannel event
 269 * @sch: subchannel
 270 * @process: non-zero if function is called in process context
 271 *
 272 * An unspecified event occurred for this subchannel. Adjust data according
 273 * to the current operational state of the subchannel. Return zero when the
 274 * event has been handled sufficiently or -EAGAIN when this function should
 275 * be called again in process context.
 276 */
 277static int vfio_ccw_sch_event(struct subchannel *sch, int process)
 278{
 279        struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);
 280        unsigned long flags;
 281        int rc = -EAGAIN;
 282
 283        spin_lock_irqsave(sch->lock, flags);
 284        if (!device_is_registered(&sch->dev))
 285                goto out_unlock;
 286
 287        if (work_pending(&sch->todo_work))
 288                goto out_unlock;
 289
 290        if (cio_update_schib(sch)) {
 291                vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_NOT_OPER);
 292                rc = 0;
 293                goto out_unlock;
 294        }
 295
 296        private = dev_get_drvdata(&sch->dev);
 297        if (private->state == VFIO_CCW_STATE_NOT_OPER) {
 298                private->state = private->mdev ? VFIO_CCW_STATE_IDLE :
 299                                 VFIO_CCW_STATE_STANDBY;
 300        }
 301        rc = 0;
 302
 303out_unlock:
 304        spin_unlock_irqrestore(sch->lock, flags);
 305
 306        return rc;
 307}
 308
 309static void vfio_ccw_queue_crw(struct vfio_ccw_private *private,
 310                               unsigned int rsc,
 311                               unsigned int erc,
 312                               unsigned int rsid)
 313{
 314        struct vfio_ccw_crw *crw;
 315
 316        /*
 317         * If unable to allocate a CRW, just drop the event and
 318         * carry on.  The guest will either see a later one or
 319         * learn when it issues its own store subchannel.
 320         */
 321        crw = kzalloc(sizeof(*crw), GFP_ATOMIC);
 322        if (!crw)
 323                return;
 324
 325        /*
 326         * Build the CRW based on the inputs given to us.
 327         */
 328        crw->crw.rsc = rsc;
 329        crw->crw.erc = erc;
 330        crw->crw.rsid = rsid;
 331
 332        list_add_tail(&crw->next, &private->crw);
 333        queue_work(vfio_ccw_work_q, &private->crw_work);
 334}
 335
 336static int vfio_ccw_chp_event(struct subchannel *sch,
 337                              struct chp_link *link, int event)
 338{
 339        struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);
 340        int mask = chp_ssd_get_mask(&sch->ssd_info, link);
 341        int retry = 255;
 342
 343        if (!private || !mask)
 344                return 0;
 345
 346        trace_vfio_ccw_chp_event(private->sch->schid, mask, event);
 347        VFIO_CCW_MSG_EVENT(2, "%pUl (%x.%x.%04x): mask=0x%x event=%d\n",
 348                           mdev_uuid(private->mdev), sch->schid.cssid,
 349                           sch->schid.ssid, sch->schid.sch_no,
 350                           mask, event);
 351
 352        if (cio_update_schib(sch))
 353                return -ENODEV;
 354
 355        switch (event) {
 356        case CHP_VARY_OFF:
 357                /* Path logically turned off */
 358                sch->opm &= ~mask;
 359                sch->lpm &= ~mask;
 360                if (sch->schib.pmcw.lpum & mask)
 361                        cio_cancel_halt_clear(sch, &retry);
 362                break;
 363        case CHP_OFFLINE:
 364                /* Path is gone */
 365                if (sch->schib.pmcw.lpum & mask)
 366                        cio_cancel_halt_clear(sch, &retry);
 367                vfio_ccw_queue_crw(private, CRW_RSC_CPATH, CRW_ERC_PERRN,
 368                                   link->chpid.id);
 369                break;
 370        case CHP_VARY_ON:
 371                /* Path logically turned on */
 372                sch->opm |= mask;
 373                sch->lpm |= mask;
 374                break;
 375        case CHP_ONLINE:
 376                /* Path became available */
 377                sch->lpm |= mask & sch->opm;
 378                vfio_ccw_queue_crw(private, CRW_RSC_CPATH, CRW_ERC_INIT,
 379                                   link->chpid.id);
 380                break;
 381        }
 382
 383        return 0;
 384}
 385
 386static struct css_device_id vfio_ccw_sch_ids[] = {
 387        { .match_flags = 0x1, .type = SUBCHANNEL_TYPE_IO, },
 388        { /* end of list */ },
 389};
 390MODULE_DEVICE_TABLE(css, vfio_ccw_sch_ids);
 391
 392static struct css_driver vfio_ccw_sch_driver = {
 393        .drv = {
 394                .name = "vfio_ccw",
 395                .owner = THIS_MODULE,
 396        },
 397        .subchannel_type = vfio_ccw_sch_ids,
 398        .irq = vfio_ccw_sch_irq,
 399        .probe = vfio_ccw_sch_probe,
 400        .remove = vfio_ccw_sch_remove,
 401        .shutdown = vfio_ccw_sch_shutdown,
 402        .sch_event = vfio_ccw_sch_event,
 403        .chp_event = vfio_ccw_chp_event,
 404};
 405
 406static int __init vfio_ccw_debug_init(void)
 407{
 408        vfio_ccw_debug_msg_id = debug_register("vfio_ccw_msg", 16, 1,
 409                                               11 * sizeof(long));
 410        if (!vfio_ccw_debug_msg_id)
 411                goto out_unregister;
 412        debug_register_view(vfio_ccw_debug_msg_id, &debug_sprintf_view);
 413        debug_set_level(vfio_ccw_debug_msg_id, 2);
 414        vfio_ccw_debug_trace_id = debug_register("vfio_ccw_trace", 16, 1, 16);
 415        if (!vfio_ccw_debug_trace_id)
 416                goto out_unregister;
 417        debug_register_view(vfio_ccw_debug_trace_id, &debug_hex_ascii_view);
 418        debug_set_level(vfio_ccw_debug_trace_id, 2);
 419        return 0;
 420
 421out_unregister:
 422        debug_unregister(vfio_ccw_debug_msg_id);
 423        debug_unregister(vfio_ccw_debug_trace_id);
 424        return -1;
 425}
 426
 427static void vfio_ccw_debug_exit(void)
 428{
 429        debug_unregister(vfio_ccw_debug_msg_id);
 430        debug_unregister(vfio_ccw_debug_trace_id);
 431}
 432
 433static void vfio_ccw_destroy_regions(void)
 434{
 435        kmem_cache_destroy(vfio_ccw_crw_region);
 436        kmem_cache_destroy(vfio_ccw_schib_region);
 437        kmem_cache_destroy(vfio_ccw_cmd_region);
 438        kmem_cache_destroy(vfio_ccw_io_region);
 439}
 440
 441static int __init vfio_ccw_sch_init(void)
 442{
 443        int ret;
 444
 445        ret = vfio_ccw_debug_init();
 446        if (ret)
 447                return ret;
 448
 449        vfio_ccw_work_q = create_singlethread_workqueue("vfio-ccw");
 450        if (!vfio_ccw_work_q) {
 451                ret = -ENOMEM;
 452                goto out_err;
 453        }
 454
 455        vfio_ccw_io_region = kmem_cache_create_usercopy("vfio_ccw_io_region",
 456                                        sizeof(struct ccw_io_region), 0,
 457                                        SLAB_ACCOUNT, 0,
 458                                        sizeof(struct ccw_io_region), NULL);
 459        if (!vfio_ccw_io_region) {
 460                ret = -ENOMEM;
 461                goto out_err;
 462        }
 463
 464        vfio_ccw_cmd_region = kmem_cache_create_usercopy("vfio_ccw_cmd_region",
 465                                        sizeof(struct ccw_cmd_region), 0,
 466                                        SLAB_ACCOUNT, 0,
 467                                        sizeof(struct ccw_cmd_region), NULL);
 468        if (!vfio_ccw_cmd_region) {
 469                ret = -ENOMEM;
 470                goto out_err;
 471        }
 472
 473        vfio_ccw_schib_region = kmem_cache_create_usercopy("vfio_ccw_schib_region",
 474                                        sizeof(struct ccw_schib_region), 0,
 475                                        SLAB_ACCOUNT, 0,
 476                                        sizeof(struct ccw_schib_region), NULL);
 477
 478        if (!vfio_ccw_schib_region) {
 479                ret = -ENOMEM;
 480                goto out_err;
 481        }
 482
 483        vfio_ccw_crw_region = kmem_cache_create_usercopy("vfio_ccw_crw_region",
 484                                        sizeof(struct ccw_crw_region), 0,
 485                                        SLAB_ACCOUNT, 0,
 486                                        sizeof(struct ccw_crw_region), NULL);
 487
 488        if (!vfio_ccw_crw_region) {
 489                ret = -ENOMEM;
 490                goto out_err;
 491        }
 492
 493        isc_register(VFIO_CCW_ISC);
 494        ret = css_driver_register(&vfio_ccw_sch_driver);
 495        if (ret) {
 496                isc_unregister(VFIO_CCW_ISC);
 497                goto out_err;
 498        }
 499
 500        return ret;
 501
 502out_err:
 503        vfio_ccw_destroy_regions();
 504        destroy_workqueue(vfio_ccw_work_q);
 505        vfio_ccw_debug_exit();
 506        return ret;
 507}
 508
 509static void __exit vfio_ccw_sch_exit(void)
 510{
 511        css_driver_unregister(&vfio_ccw_sch_driver);
 512        isc_unregister(VFIO_CCW_ISC);
 513        vfio_ccw_destroy_regions();
 514        destroy_workqueue(vfio_ccw_work_q);
 515        vfio_ccw_debug_exit();
 516}
 517module_init(vfio_ccw_sch_init);
 518module_exit(vfio_ccw_sch_exit);
 519
 520MODULE_LICENSE("GPL v2");
 521