linux/drivers/s390/cio/vfio_ccw_drv.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * VFIO based Physical Subchannel device driver
   4 *
   5 * Copyright IBM Corp. 2017
   6 * Copyright Red Hat, Inc. 2019
   7 *
   8 * Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com>
   9 *            Xiao Feng Ren <renxiaof@linux.vnet.ibm.com>
  10 *            Cornelia Huck <cohuck@redhat.com>
  11 */
  12
  13#include <linux/module.h>
  14#include <linux/init.h>
  15#include <linux/device.h>
  16#include <linux/slab.h>
  17#include <linux/uuid.h>
  18#include <linux/mdev.h>
  19
  20#include <asm/isc.h>
  21
  22#include "chp.h"
  23#include "ioasm.h"
  24#include "css.h"
  25#include "vfio_ccw_private.h"
  26
  27struct workqueue_struct *vfio_ccw_work_q;
  28static struct kmem_cache *vfio_ccw_io_region;
  29static struct kmem_cache *vfio_ccw_cmd_region;
  30static struct kmem_cache *vfio_ccw_schib_region;
  31static struct kmem_cache *vfio_ccw_crw_region;
  32
  33debug_info_t *vfio_ccw_debug_msg_id;
  34debug_info_t *vfio_ccw_debug_trace_id;
  35
  36/*
  37 * Helpers
  38 */
  39int vfio_ccw_sch_quiesce(struct subchannel *sch)
  40{
  41        struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);
  42        DECLARE_COMPLETION_ONSTACK(completion);
  43        int iretry, ret = 0;
  44
  45        spin_lock_irq(sch->lock);
  46        if (!sch->schib.pmcw.ena)
  47                goto out_unlock;
  48        ret = cio_disable_subchannel(sch);
  49        if (ret != -EBUSY)
  50                goto out_unlock;
  51
  52        iretry = 255;
  53        do {
  54
  55                ret = cio_cancel_halt_clear(sch, &iretry);
  56
  57                if (ret == -EIO) {
  58                        pr_err("vfio_ccw: could not quiesce subchannel 0.%x.%04x!\n",
  59                               sch->schid.ssid, sch->schid.sch_no);
  60                        break;
  61                }
  62
  63                /*
  64                 * Flush all I/O and wait for
  65                 * cancel/halt/clear completion.
  66                 */
  67                private->completion = &completion;
  68                spin_unlock_irq(sch->lock);
  69
  70                if (ret == -EBUSY)
  71                        wait_for_completion_timeout(&completion, 3*HZ);
  72
  73                private->completion = NULL;
  74                flush_workqueue(vfio_ccw_work_q);
  75                spin_lock_irq(sch->lock);
  76                ret = cio_disable_subchannel(sch);
  77        } while (ret == -EBUSY);
  78out_unlock:
  79        private->state = VFIO_CCW_STATE_NOT_OPER;
  80        spin_unlock_irq(sch->lock);
  81        return ret;
  82}
  83
  84static void vfio_ccw_sch_io_todo(struct work_struct *work)
  85{
  86        struct vfio_ccw_private *private;
  87        struct irb *irb;
  88        bool is_final;
  89        bool cp_is_finished = false;
  90
  91        private = container_of(work, struct vfio_ccw_private, io_work);
  92        irb = &private->irb;
  93
  94        is_final = !(scsw_actl(&irb->scsw) &
  95                     (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT));
  96        if (scsw_is_solicited(&irb->scsw)) {
  97                cp_update_scsw(&private->cp, &irb->scsw);
  98                if (is_final && private->state == VFIO_CCW_STATE_CP_PENDING) {
  99                        cp_free(&private->cp);
 100                        cp_is_finished = true;
 101                }
 102        }
 103        mutex_lock(&private->io_mutex);
 104        memcpy(private->io_region->irb_area, irb, sizeof(*irb));
 105        mutex_unlock(&private->io_mutex);
 106
 107        /*
 108         * Reset to IDLE only if processing of a channel program
 109         * has finished. Do not overwrite a possible processing
 110         * state if the final interrupt was for HSCH or CSCH.
 111         */
 112        if (private->mdev && cp_is_finished)
 113                private->state = VFIO_CCW_STATE_IDLE;
 114
 115        if (private->io_trigger)
 116                eventfd_signal(private->io_trigger, 1);
 117}
 118
 119static void vfio_ccw_crw_todo(struct work_struct *work)
 120{
 121        struct vfio_ccw_private *private;
 122
 123        private = container_of(work, struct vfio_ccw_private, crw_work);
 124
 125        if (!list_empty(&private->crw) && private->crw_trigger)
 126                eventfd_signal(private->crw_trigger, 1);
 127}
 128
 129/*
 130 * Css driver callbacks
 131 */
 132static void vfio_ccw_sch_irq(struct subchannel *sch)
 133{
 134        struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);
 135
 136        inc_irq_stat(IRQIO_CIO);
 137        vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_INTERRUPT);
 138}
 139
 140static void vfio_ccw_free_regions(struct vfio_ccw_private *private)
 141{
 142        if (private->crw_region)
 143                kmem_cache_free(vfio_ccw_crw_region, private->crw_region);
 144        if (private->schib_region)
 145                kmem_cache_free(vfio_ccw_schib_region, private->schib_region);
 146        if (private->cmd_region)
 147                kmem_cache_free(vfio_ccw_cmd_region, private->cmd_region);
 148        if (private->io_region)
 149                kmem_cache_free(vfio_ccw_io_region, private->io_region);
 150}
 151
 152static int vfio_ccw_sch_probe(struct subchannel *sch)
 153{
 154        struct pmcw *pmcw = &sch->schib.pmcw;
 155        struct vfio_ccw_private *private;
 156        int ret = -ENOMEM;
 157
 158        if (pmcw->qf) {
 159                dev_warn(&sch->dev, "vfio: ccw: does not support QDIO: %s\n",
 160                         dev_name(&sch->dev));
 161                return -ENODEV;
 162        }
 163
 164        private = kzalloc(sizeof(*private), GFP_KERNEL | GFP_DMA);
 165        if (!private)
 166                return -ENOMEM;
 167
 168        private->cp.guest_cp = kcalloc(CCWCHAIN_LEN_MAX, sizeof(struct ccw1),
 169                                       GFP_KERNEL);
 170        if (!private->cp.guest_cp)
 171                goto out_free;
 172
 173        private->io_region = kmem_cache_zalloc(vfio_ccw_io_region,
 174                                               GFP_KERNEL | GFP_DMA);
 175        if (!private->io_region)
 176                goto out_free;
 177
 178        private->cmd_region = kmem_cache_zalloc(vfio_ccw_cmd_region,
 179                                                GFP_KERNEL | GFP_DMA);
 180        if (!private->cmd_region)
 181                goto out_free;
 182
 183        private->schib_region = kmem_cache_zalloc(vfio_ccw_schib_region,
 184                                                  GFP_KERNEL | GFP_DMA);
 185
 186        if (!private->schib_region)
 187                goto out_free;
 188
 189        private->crw_region = kmem_cache_zalloc(vfio_ccw_crw_region,
 190                                                GFP_KERNEL | GFP_DMA);
 191
 192        if (!private->crw_region)
 193                goto out_free;
 194
 195        private->sch = sch;
 196        dev_set_drvdata(&sch->dev, private);
 197        mutex_init(&private->io_mutex);
 198
 199        spin_lock_irq(sch->lock);
 200        private->state = VFIO_CCW_STATE_NOT_OPER;
 201        sch->isc = VFIO_CCW_ISC;
 202        ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch);
 203        spin_unlock_irq(sch->lock);
 204        if (ret)
 205                goto out_free;
 206
 207        INIT_LIST_HEAD(&private->crw);
 208        INIT_WORK(&private->io_work, vfio_ccw_sch_io_todo);
 209        INIT_WORK(&private->crw_work, vfio_ccw_crw_todo);
 210        atomic_set(&private->avail, 1);
 211        private->state = VFIO_CCW_STATE_STANDBY;
 212
 213        ret = vfio_ccw_mdev_reg(sch);
 214        if (ret)
 215                goto out_disable;
 216
 217        if (dev_get_uevent_suppress(&sch->dev)) {
 218                dev_set_uevent_suppress(&sch->dev, 0);
 219                kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
 220        }
 221
 222        VFIO_CCW_MSG_EVENT(4, "bound to subchannel %x.%x.%04x\n",
 223                           sch->schid.cssid, sch->schid.ssid,
 224                           sch->schid.sch_no);
 225        return 0;
 226
 227out_disable:
 228        cio_disable_subchannel(sch);
 229out_free:
 230        dev_set_drvdata(&sch->dev, NULL);
 231        vfio_ccw_free_regions(private);
 232        kfree(private->cp.guest_cp);
 233        kfree(private);
 234        return ret;
 235}
 236
 237static int vfio_ccw_sch_remove(struct subchannel *sch)
 238{
 239        struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);
 240        struct vfio_ccw_crw *crw, *temp;
 241
 242        vfio_ccw_sch_quiesce(sch);
 243
 244        list_for_each_entry_safe(crw, temp, &private->crw, next) {
 245                list_del(&crw->next);
 246                kfree(crw);
 247        }
 248
 249        vfio_ccw_mdev_unreg(sch);
 250
 251        dev_set_drvdata(&sch->dev, NULL);
 252
 253        vfio_ccw_free_regions(private);
 254        kfree(private->cp.guest_cp);
 255        kfree(private);
 256
 257        VFIO_CCW_MSG_EVENT(4, "unbound from subchannel %x.%x.%04x\n",
 258                           sch->schid.cssid, sch->schid.ssid,
 259                           sch->schid.sch_no);
 260        return 0;
 261}
 262
 263static void vfio_ccw_sch_shutdown(struct subchannel *sch)
 264{
 265        vfio_ccw_sch_quiesce(sch);
 266}
 267
 268/**
 269 * vfio_ccw_sch_event - process subchannel event
 270 * @sch: subchannel
 271 * @process: non-zero if function is called in process context
 272 *
 273 * An unspecified event occurred for this subchannel. Adjust data according
 274 * to the current operational state of the subchannel. Return zero when the
 275 * event has been handled sufficiently or -EAGAIN when this function should
 276 * be called again in process context.
 277 */
 278static int vfio_ccw_sch_event(struct subchannel *sch, int process)
 279{
 280        struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);
 281        unsigned long flags;
 282        int rc = -EAGAIN;
 283
 284        spin_lock_irqsave(sch->lock, flags);
 285        if (!device_is_registered(&sch->dev))
 286                goto out_unlock;
 287
 288        if (work_pending(&sch->todo_work))
 289                goto out_unlock;
 290
 291        if (cio_update_schib(sch)) {
 292                vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_NOT_OPER);
 293                rc = 0;
 294                goto out_unlock;
 295        }
 296
 297        private = dev_get_drvdata(&sch->dev);
 298        if (private->state == VFIO_CCW_STATE_NOT_OPER) {
 299                private->state = private->mdev ? VFIO_CCW_STATE_IDLE :
 300                                 VFIO_CCW_STATE_STANDBY;
 301        }
 302        rc = 0;
 303
 304out_unlock:
 305        spin_unlock_irqrestore(sch->lock, flags);
 306
 307        return rc;
 308}
 309
 310static void vfio_ccw_queue_crw(struct vfio_ccw_private *private,
 311                               unsigned int rsc,
 312                               unsigned int erc,
 313                               unsigned int rsid)
 314{
 315        struct vfio_ccw_crw *crw;
 316
 317        /*
 318         * If unable to allocate a CRW, just drop the event and
 319         * carry on.  The guest will either see a later one or
 320         * learn when it issues its own store subchannel.
 321         */
 322        crw = kzalloc(sizeof(*crw), GFP_ATOMIC);
 323        if (!crw)
 324                return;
 325
 326        /*
 327         * Build the CRW based on the inputs given to us.
 328         */
 329        crw->crw.rsc = rsc;
 330        crw->crw.erc = erc;
 331        crw->crw.rsid = rsid;
 332
 333        list_add_tail(&crw->next, &private->crw);
 334        queue_work(vfio_ccw_work_q, &private->crw_work);
 335}
 336
 337static int vfio_ccw_chp_event(struct subchannel *sch,
 338                              struct chp_link *link, int event)
 339{
 340        struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);
 341        int mask = chp_ssd_get_mask(&sch->ssd_info, link);
 342        int retry = 255;
 343
 344        if (!private || !mask)
 345                return 0;
 346
 347        trace_vfio_ccw_chp_event(private->sch->schid, mask, event);
 348        VFIO_CCW_MSG_EVENT(2, "%pUl (%x.%x.%04x): mask=0x%x event=%d\n",
 349                           mdev_uuid(private->mdev), sch->schid.cssid,
 350                           sch->schid.ssid, sch->schid.sch_no,
 351                           mask, event);
 352
 353        if (cio_update_schib(sch))
 354                return -ENODEV;
 355
 356        switch (event) {
 357        case CHP_VARY_OFF:
 358                /* Path logically turned off */
 359                sch->opm &= ~mask;
 360                sch->lpm &= ~mask;
 361                if (sch->schib.pmcw.lpum & mask)
 362                        cio_cancel_halt_clear(sch, &retry);
 363                break;
 364        case CHP_OFFLINE:
 365                /* Path is gone */
 366                if (sch->schib.pmcw.lpum & mask)
 367                        cio_cancel_halt_clear(sch, &retry);
 368                vfio_ccw_queue_crw(private, CRW_RSC_CPATH, CRW_ERC_PERRN,
 369                                   link->chpid.id);
 370                break;
 371        case CHP_VARY_ON:
 372                /* Path logically turned on */
 373                sch->opm |= mask;
 374                sch->lpm |= mask;
 375                break;
 376        case CHP_ONLINE:
 377                /* Path became available */
 378                sch->lpm |= mask & sch->opm;
 379                vfio_ccw_queue_crw(private, CRW_RSC_CPATH, CRW_ERC_INIT,
 380                                   link->chpid.id);
 381                break;
 382        }
 383
 384        return 0;
 385}
 386
 387static struct css_device_id vfio_ccw_sch_ids[] = {
 388        { .match_flags = 0x1, .type = SUBCHANNEL_TYPE_IO, },
 389        { /* end of list */ },
 390};
 391MODULE_DEVICE_TABLE(css, vfio_ccw_sch_ids);
 392
 393static struct css_driver vfio_ccw_sch_driver = {
 394        .drv = {
 395                .name = "vfio_ccw",
 396                .owner = THIS_MODULE,
 397        },
 398        .subchannel_type = vfio_ccw_sch_ids,
 399        .irq = vfio_ccw_sch_irq,
 400        .probe = vfio_ccw_sch_probe,
 401        .remove = vfio_ccw_sch_remove,
 402        .shutdown = vfio_ccw_sch_shutdown,
 403        .sch_event = vfio_ccw_sch_event,
 404        .chp_event = vfio_ccw_chp_event,
 405};
 406
 407static int __init vfio_ccw_debug_init(void)
 408{
 409        vfio_ccw_debug_msg_id = debug_register("vfio_ccw_msg", 16, 1,
 410                                               11 * sizeof(long));
 411        if (!vfio_ccw_debug_msg_id)
 412                goto out_unregister;
 413        debug_register_view(vfio_ccw_debug_msg_id, &debug_sprintf_view);
 414        debug_set_level(vfio_ccw_debug_msg_id, 2);
 415        vfio_ccw_debug_trace_id = debug_register("vfio_ccw_trace", 16, 1, 16);
 416        if (!vfio_ccw_debug_trace_id)
 417                goto out_unregister;
 418        debug_register_view(vfio_ccw_debug_trace_id, &debug_hex_ascii_view);
 419        debug_set_level(vfio_ccw_debug_trace_id, 2);
 420        return 0;
 421
 422out_unregister:
 423        debug_unregister(vfio_ccw_debug_msg_id);
 424        debug_unregister(vfio_ccw_debug_trace_id);
 425        return -1;
 426}
 427
 428static void vfio_ccw_debug_exit(void)
 429{
 430        debug_unregister(vfio_ccw_debug_msg_id);
 431        debug_unregister(vfio_ccw_debug_trace_id);
 432}
 433
 434static void vfio_ccw_destroy_regions(void)
 435{
 436        kmem_cache_destroy(vfio_ccw_crw_region);
 437        kmem_cache_destroy(vfio_ccw_schib_region);
 438        kmem_cache_destroy(vfio_ccw_cmd_region);
 439        kmem_cache_destroy(vfio_ccw_io_region);
 440}
 441
 442static int __init vfio_ccw_sch_init(void)
 443{
 444        int ret;
 445
 446        ret = vfio_ccw_debug_init();
 447        if (ret)
 448                return ret;
 449
 450        vfio_ccw_work_q = create_singlethread_workqueue("vfio-ccw");
 451        if (!vfio_ccw_work_q) {
 452                ret = -ENOMEM;
 453                goto out_err;
 454        }
 455
 456        vfio_ccw_io_region = kmem_cache_create_usercopy("vfio_ccw_io_region",
 457                                        sizeof(struct ccw_io_region), 0,
 458                                        SLAB_ACCOUNT, 0,
 459                                        sizeof(struct ccw_io_region), NULL);
 460        if (!vfio_ccw_io_region) {
 461                ret = -ENOMEM;
 462                goto out_err;
 463        }
 464
 465        vfio_ccw_cmd_region = kmem_cache_create_usercopy("vfio_ccw_cmd_region",
 466                                        sizeof(struct ccw_cmd_region), 0,
 467                                        SLAB_ACCOUNT, 0,
 468                                        sizeof(struct ccw_cmd_region), NULL);
 469        if (!vfio_ccw_cmd_region) {
 470                ret = -ENOMEM;
 471                goto out_err;
 472        }
 473
 474        vfio_ccw_schib_region = kmem_cache_create_usercopy("vfio_ccw_schib_region",
 475                                        sizeof(struct ccw_schib_region), 0,
 476                                        SLAB_ACCOUNT, 0,
 477                                        sizeof(struct ccw_schib_region), NULL);
 478
 479        if (!vfio_ccw_schib_region) {
 480                ret = -ENOMEM;
 481                goto out_err;
 482        }
 483
 484        vfio_ccw_crw_region = kmem_cache_create_usercopy("vfio_ccw_crw_region",
 485                                        sizeof(struct ccw_crw_region), 0,
 486                                        SLAB_ACCOUNT, 0,
 487                                        sizeof(struct ccw_crw_region), NULL);
 488
 489        if (!vfio_ccw_crw_region) {
 490                ret = -ENOMEM;
 491                goto out_err;
 492        }
 493
 494        isc_register(VFIO_CCW_ISC);
 495        ret = css_driver_register(&vfio_ccw_sch_driver);
 496        if (ret) {
 497                isc_unregister(VFIO_CCW_ISC);
 498                goto out_err;
 499        }
 500
 501        return ret;
 502
 503out_err:
 504        vfio_ccw_destroy_regions();
 505        destroy_workqueue(vfio_ccw_work_q);
 506        vfio_ccw_debug_exit();
 507        return ret;
 508}
 509
 510static void __exit vfio_ccw_sch_exit(void)
 511{
 512        css_driver_unregister(&vfio_ccw_sch_driver);
 513        isc_unregister(VFIO_CCW_ISC);
 514        vfio_ccw_destroy_regions();
 515        destroy_workqueue(vfio_ccw_work_q);
 516        vfio_ccw_debug_exit();
 517}
 518module_init(vfio_ccw_sch_init);
 519module_exit(vfio_ccw_sch_exit);
 520
 521MODULE_LICENSE("GPL v2");
 522