linux/drivers/s390/cio/vfio_ccw_ops.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Physical device callbacks for vfio_ccw
   4 *
   5 * Copyright IBM Corp. 2017
   6 * Copyright Red Hat, Inc. 2019
   7 *
   8 * Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com>
   9 *            Xiao Feng Ren <renxiaof@linux.vnet.ibm.com>
  10 *            Cornelia Huck <cohuck@redhat.com>
  11 */
  12
  13#include <linux/vfio.h>
  14#include <linux/mdev.h>
  15#include <linux/nospec.h>
  16#include <linux/slab.h>
  17
  18#include "vfio_ccw_private.h"
  19
  20static int vfio_ccw_mdev_reset(struct mdev_device *mdev)
  21{
  22        struct vfio_ccw_private *private;
  23        struct subchannel *sch;
  24        int ret;
  25
  26        private = dev_get_drvdata(mdev_parent_dev(mdev));
  27        sch = private->sch;
  28        /*
  29         * TODO:
  30         * In the cureent stage, some things like "no I/O running" and "no
  31         * interrupt pending" are clear, but we are not sure what other state
  32         * we need to care about.
  33         * There are still a lot more instructions need to be handled. We
  34         * should come back here later.
  35         */
  36        ret = vfio_ccw_sch_quiesce(sch);
  37        if (ret)
  38                return ret;
  39
  40        ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch);
  41        if (!ret)
  42                private->state = VFIO_CCW_STATE_IDLE;
  43
  44        return ret;
  45}
  46
  47static int vfio_ccw_mdev_notifier(struct notifier_block *nb,
  48                                  unsigned long action,
  49                                  void *data)
  50{
  51        struct vfio_ccw_private *private =
  52                container_of(nb, struct vfio_ccw_private, nb);
  53
  54        /*
  55         * Vendor drivers MUST unpin pages in response to an
  56         * invalidation.
  57         */
  58        if (action == VFIO_IOMMU_NOTIFY_DMA_UNMAP) {
  59                struct vfio_iommu_type1_dma_unmap *unmap = data;
  60
  61                if (!cp_iova_pinned(&private->cp, unmap->iova))
  62                        return NOTIFY_OK;
  63
  64                if (vfio_ccw_mdev_reset(private->mdev))
  65                        return NOTIFY_BAD;
  66
  67                cp_free(&private->cp);
  68                return NOTIFY_OK;
  69        }
  70
  71        return NOTIFY_DONE;
  72}
  73
  74static ssize_t name_show(struct kobject *kobj, struct device *dev, char *buf)
  75{
  76        return sprintf(buf, "I/O subchannel (Non-QDIO)\n");
  77}
  78static MDEV_TYPE_ATTR_RO(name);
  79
  80static ssize_t device_api_show(struct kobject *kobj, struct device *dev,
  81                               char *buf)
  82{
  83        return sprintf(buf, "%s\n", VFIO_DEVICE_API_CCW_STRING);
  84}
  85static MDEV_TYPE_ATTR_RO(device_api);
  86
  87static ssize_t available_instances_show(struct kobject *kobj,
  88                                        struct device *dev, char *buf)
  89{
  90        struct vfio_ccw_private *private = dev_get_drvdata(dev);
  91
  92        return sprintf(buf, "%d\n", atomic_read(&private->avail));
  93}
  94static MDEV_TYPE_ATTR_RO(available_instances);
  95
  96static struct attribute *mdev_types_attrs[] = {
  97        &mdev_type_attr_name.attr,
  98        &mdev_type_attr_device_api.attr,
  99        &mdev_type_attr_available_instances.attr,
 100        NULL,
 101};
 102
 103static struct attribute_group mdev_type_group = {
 104        .name  = "io",
 105        .attrs = mdev_types_attrs,
 106};
 107
 108static struct attribute_group *mdev_type_groups[] = {
 109        &mdev_type_group,
 110        NULL,
 111};
 112
 113static int vfio_ccw_mdev_create(struct kobject *kobj, struct mdev_device *mdev)
 114{
 115        struct vfio_ccw_private *private =
 116                dev_get_drvdata(mdev_parent_dev(mdev));
 117
 118        if (private->state == VFIO_CCW_STATE_NOT_OPER)
 119                return -ENODEV;
 120
 121        if (atomic_dec_if_positive(&private->avail) < 0)
 122                return -EPERM;
 123
 124        private->mdev = mdev;
 125        private->state = VFIO_CCW_STATE_IDLE;
 126
 127        VFIO_CCW_MSG_EVENT(2, "mdev %pUl, sch %x.%x.%04x: create\n",
 128                           mdev_uuid(mdev), private->sch->schid.cssid,
 129                           private->sch->schid.ssid,
 130                           private->sch->schid.sch_no);
 131
 132        return 0;
 133}
 134
 135static int vfio_ccw_mdev_remove(struct mdev_device *mdev)
 136{
 137        struct vfio_ccw_private *private =
 138                dev_get_drvdata(mdev_parent_dev(mdev));
 139
 140        VFIO_CCW_MSG_EVENT(2, "mdev %pUl, sch %x.%x.%04x: remove\n",
 141                           mdev_uuid(mdev), private->sch->schid.cssid,
 142                           private->sch->schid.ssid,
 143                           private->sch->schid.sch_no);
 144
 145        if ((private->state != VFIO_CCW_STATE_NOT_OPER) &&
 146            (private->state != VFIO_CCW_STATE_STANDBY)) {
 147                if (!vfio_ccw_sch_quiesce(private->sch))
 148                        private->state = VFIO_CCW_STATE_STANDBY;
 149                /* The state will be NOT_OPER on error. */
 150        }
 151
 152        cp_free(&private->cp);
 153        private->mdev = NULL;
 154        atomic_inc(&private->avail);
 155
 156        return 0;
 157}
 158
 159static int vfio_ccw_mdev_open(struct mdev_device *mdev)
 160{
 161        struct vfio_ccw_private *private =
 162                dev_get_drvdata(mdev_parent_dev(mdev));
 163        unsigned long events = VFIO_IOMMU_NOTIFY_DMA_UNMAP;
 164        int ret;
 165
 166        private->nb.notifier_call = vfio_ccw_mdev_notifier;
 167
 168        ret = vfio_register_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
 169                                     &events, &private->nb);
 170        if (ret)
 171                return ret;
 172
 173        ret = vfio_ccw_register_async_dev_regions(private);
 174        if (ret)
 175                goto out_unregister;
 176
 177        ret = vfio_ccw_register_schib_dev_regions(private);
 178        if (ret)
 179                goto out_unregister;
 180
 181        ret = vfio_ccw_register_crw_dev_regions(private);
 182        if (ret)
 183                goto out_unregister;
 184
 185        return ret;
 186
 187out_unregister:
 188        vfio_ccw_unregister_dev_regions(private);
 189        vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
 190                                 &private->nb);
 191        return ret;
 192}
 193
 194static void vfio_ccw_mdev_release(struct mdev_device *mdev)
 195{
 196        struct vfio_ccw_private *private =
 197                dev_get_drvdata(mdev_parent_dev(mdev));
 198
 199        if ((private->state != VFIO_CCW_STATE_NOT_OPER) &&
 200            (private->state != VFIO_CCW_STATE_STANDBY)) {
 201                if (!vfio_ccw_mdev_reset(mdev))
 202                        private->state = VFIO_CCW_STATE_STANDBY;
 203                /* The state will be NOT_OPER on error. */
 204        }
 205
 206        cp_free(&private->cp);
 207        vfio_ccw_unregister_dev_regions(private);
 208        vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
 209                                 &private->nb);
 210}
 211
 212static ssize_t vfio_ccw_mdev_read_io_region(struct vfio_ccw_private *private,
 213                                            char __user *buf, size_t count,
 214                                            loff_t *ppos)
 215{
 216        loff_t pos = *ppos & VFIO_CCW_OFFSET_MASK;
 217        struct ccw_io_region *region;
 218        int ret;
 219
 220        if (pos + count > sizeof(*region))
 221                return -EINVAL;
 222
 223        mutex_lock(&private->io_mutex);
 224        region = private->io_region;
 225        if (copy_to_user(buf, (void *)region + pos, count))
 226                ret = -EFAULT;
 227        else
 228                ret = count;
 229        mutex_unlock(&private->io_mutex);
 230        return ret;
 231}
 232
 233static ssize_t vfio_ccw_mdev_read(struct mdev_device *mdev,
 234                                  char __user *buf,
 235                                  size_t count,
 236                                  loff_t *ppos)
 237{
 238        unsigned int index = VFIO_CCW_OFFSET_TO_INDEX(*ppos);
 239        struct vfio_ccw_private *private;
 240
 241        private = dev_get_drvdata(mdev_parent_dev(mdev));
 242
 243        if (index >= VFIO_CCW_NUM_REGIONS + private->num_regions)
 244                return -EINVAL;
 245
 246        switch (index) {
 247        case VFIO_CCW_CONFIG_REGION_INDEX:
 248                return vfio_ccw_mdev_read_io_region(private, buf, count, ppos);
 249        default:
 250                index -= VFIO_CCW_NUM_REGIONS;
 251                return private->region[index].ops->read(private, buf, count,
 252                                                        ppos);
 253        }
 254
 255        return -EINVAL;
 256}
 257
 258static ssize_t vfio_ccw_mdev_write_io_region(struct vfio_ccw_private *private,
 259                                             const char __user *buf,
 260                                             size_t count, loff_t *ppos)
 261{
 262        loff_t pos = *ppos & VFIO_CCW_OFFSET_MASK;
 263        struct ccw_io_region *region;
 264        int ret;
 265
 266        if (pos + count > sizeof(*region))
 267                return -EINVAL;
 268
 269        if (!mutex_trylock(&private->io_mutex))
 270                return -EAGAIN;
 271
 272        region = private->io_region;
 273        if (copy_from_user((void *)region + pos, buf, count)) {
 274                ret = -EFAULT;
 275                goto out_unlock;
 276        }
 277
 278        vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_IO_REQ);
 279        if (region->ret_code != 0)
 280                private->state = VFIO_CCW_STATE_IDLE;
 281        ret = (region->ret_code != 0) ? region->ret_code : count;
 282
 283out_unlock:
 284        mutex_unlock(&private->io_mutex);
 285        return ret;
 286}
 287
 288static ssize_t vfio_ccw_mdev_write(struct mdev_device *mdev,
 289                                   const char __user *buf,
 290                                   size_t count,
 291                                   loff_t *ppos)
 292{
 293        unsigned int index = VFIO_CCW_OFFSET_TO_INDEX(*ppos);
 294        struct vfio_ccw_private *private;
 295
 296        private = dev_get_drvdata(mdev_parent_dev(mdev));
 297
 298        if (index >= VFIO_CCW_NUM_REGIONS + private->num_regions)
 299                return -EINVAL;
 300
 301        switch (index) {
 302        case VFIO_CCW_CONFIG_REGION_INDEX:
 303                return vfio_ccw_mdev_write_io_region(private, buf, count, ppos);
 304        default:
 305                index -= VFIO_CCW_NUM_REGIONS;
 306                return private->region[index].ops->write(private, buf, count,
 307                                                         ppos);
 308        }
 309
 310        return -EINVAL;
 311}
 312
 313static int vfio_ccw_mdev_get_device_info(struct vfio_device_info *info,
 314                                         struct mdev_device *mdev)
 315{
 316        struct vfio_ccw_private *private;
 317
 318        private = dev_get_drvdata(mdev_parent_dev(mdev));
 319        info->flags = VFIO_DEVICE_FLAGS_CCW | VFIO_DEVICE_FLAGS_RESET;
 320        info->num_regions = VFIO_CCW_NUM_REGIONS + private->num_regions;
 321        info->num_irqs = VFIO_CCW_NUM_IRQS;
 322
 323        return 0;
 324}
 325
 326static int vfio_ccw_mdev_get_region_info(struct vfio_region_info *info,
 327                                         struct mdev_device *mdev,
 328                                         unsigned long arg)
 329{
 330        struct vfio_ccw_private *private;
 331        int i;
 332
 333        private = dev_get_drvdata(mdev_parent_dev(mdev));
 334        switch (info->index) {
 335        case VFIO_CCW_CONFIG_REGION_INDEX:
 336                info->offset = 0;
 337                info->size = sizeof(struct ccw_io_region);
 338                info->flags = VFIO_REGION_INFO_FLAG_READ
 339                              | VFIO_REGION_INFO_FLAG_WRITE;
 340                return 0;
 341        default: /* all other regions are handled via capability chain */
 342        {
 343                struct vfio_info_cap caps = { .buf = NULL, .size = 0 };
 344                struct vfio_region_info_cap_type cap_type = {
 345                        .header.id = VFIO_REGION_INFO_CAP_TYPE,
 346                        .header.version = 1 };
 347                int ret;
 348
 349                if (info->index >=
 350                    VFIO_CCW_NUM_REGIONS + private->num_regions)
 351                        return -EINVAL;
 352
 353                info->index = array_index_nospec(info->index,
 354                                                 VFIO_CCW_NUM_REGIONS +
 355                                                 private->num_regions);
 356
 357                i = info->index - VFIO_CCW_NUM_REGIONS;
 358
 359                info->offset = VFIO_CCW_INDEX_TO_OFFSET(info->index);
 360                info->size = private->region[i].size;
 361                info->flags = private->region[i].flags;
 362
 363                cap_type.type = private->region[i].type;
 364                cap_type.subtype = private->region[i].subtype;
 365
 366                ret = vfio_info_add_capability(&caps, &cap_type.header,
 367                                               sizeof(cap_type));
 368                if (ret)
 369                        return ret;
 370
 371                info->flags |= VFIO_REGION_INFO_FLAG_CAPS;
 372                if (info->argsz < sizeof(*info) + caps.size) {
 373                        info->argsz = sizeof(*info) + caps.size;
 374                        info->cap_offset = 0;
 375                } else {
 376                        vfio_info_cap_shift(&caps, sizeof(*info));
 377                        if (copy_to_user((void __user *)arg + sizeof(*info),
 378                                         caps.buf, caps.size)) {
 379                                kfree(caps.buf);
 380                                return -EFAULT;
 381                        }
 382                        info->cap_offset = sizeof(*info);
 383                }
 384
 385                kfree(caps.buf);
 386
 387        }
 388        }
 389        return 0;
 390}
 391
 392static int vfio_ccw_mdev_get_irq_info(struct vfio_irq_info *info)
 393{
 394        switch (info->index) {
 395        case VFIO_CCW_IO_IRQ_INDEX:
 396        case VFIO_CCW_CRW_IRQ_INDEX:
 397                info->count = 1;
 398                info->flags = VFIO_IRQ_INFO_EVENTFD;
 399                break;
 400        default:
 401                return -EINVAL;
 402        }
 403
 404        return 0;
 405}
 406
 407static int vfio_ccw_mdev_set_irqs(struct mdev_device *mdev,
 408                                  uint32_t flags,
 409                                  uint32_t index,
 410                                  void __user *data)
 411{
 412        struct vfio_ccw_private *private;
 413        struct eventfd_ctx **ctx;
 414
 415        if (!(flags & VFIO_IRQ_SET_ACTION_TRIGGER))
 416                return -EINVAL;
 417
 418        private = dev_get_drvdata(mdev_parent_dev(mdev));
 419
 420        switch (index) {
 421        case VFIO_CCW_IO_IRQ_INDEX:
 422                ctx = &private->io_trigger;
 423                break;
 424        case VFIO_CCW_CRW_IRQ_INDEX:
 425                ctx = &private->crw_trigger;
 426                break;
 427        default:
 428                return -EINVAL;
 429        }
 430
 431        switch (flags & VFIO_IRQ_SET_DATA_TYPE_MASK) {
 432        case VFIO_IRQ_SET_DATA_NONE:
 433        {
 434                if (*ctx)
 435                        eventfd_signal(*ctx, 1);
 436                return 0;
 437        }
 438        case VFIO_IRQ_SET_DATA_BOOL:
 439        {
 440                uint8_t trigger;
 441
 442                if (get_user(trigger, (uint8_t __user *)data))
 443                        return -EFAULT;
 444
 445                if (trigger && *ctx)
 446                        eventfd_signal(*ctx, 1);
 447                return 0;
 448        }
 449        case VFIO_IRQ_SET_DATA_EVENTFD:
 450        {
 451                int32_t fd;
 452
 453                if (get_user(fd, (int32_t __user *)data))
 454                        return -EFAULT;
 455
 456                if (fd == -1) {
 457                        if (*ctx)
 458                                eventfd_ctx_put(*ctx);
 459                        *ctx = NULL;
 460                } else if (fd >= 0) {
 461                        struct eventfd_ctx *efdctx;
 462
 463                        efdctx = eventfd_ctx_fdget(fd);
 464                        if (IS_ERR(efdctx))
 465                                return PTR_ERR(efdctx);
 466
 467                        if (*ctx)
 468                                eventfd_ctx_put(*ctx);
 469
 470                        *ctx = efdctx;
 471                } else
 472                        return -EINVAL;
 473
 474                return 0;
 475        }
 476        default:
 477                return -EINVAL;
 478        }
 479}
 480
 481int vfio_ccw_register_dev_region(struct vfio_ccw_private *private,
 482                                 unsigned int subtype,
 483                                 const struct vfio_ccw_regops *ops,
 484                                 size_t size, u32 flags, void *data)
 485{
 486        struct vfio_ccw_region *region;
 487
 488        region = krealloc(private->region,
 489                          (private->num_regions + 1) * sizeof(*region),
 490                          GFP_KERNEL);
 491        if (!region)
 492                return -ENOMEM;
 493
 494        private->region = region;
 495        private->region[private->num_regions].type = VFIO_REGION_TYPE_CCW;
 496        private->region[private->num_regions].subtype = subtype;
 497        private->region[private->num_regions].ops = ops;
 498        private->region[private->num_regions].size = size;
 499        private->region[private->num_regions].flags = flags;
 500        private->region[private->num_regions].data = data;
 501
 502        private->num_regions++;
 503
 504        return 0;
 505}
 506
 507void vfio_ccw_unregister_dev_regions(struct vfio_ccw_private *private)
 508{
 509        int i;
 510
 511        for (i = 0; i < private->num_regions; i++)
 512                private->region[i].ops->release(private, &private->region[i]);
 513        private->num_regions = 0;
 514        kfree(private->region);
 515        private->region = NULL;
 516}
 517
 518static ssize_t vfio_ccw_mdev_ioctl(struct mdev_device *mdev,
 519                                   unsigned int cmd,
 520                                   unsigned long arg)
 521{
 522        int ret = 0;
 523        unsigned long minsz;
 524
 525        switch (cmd) {
 526        case VFIO_DEVICE_GET_INFO:
 527        {
 528                struct vfio_device_info info;
 529
 530                minsz = offsetofend(struct vfio_device_info, num_irqs);
 531
 532                if (copy_from_user(&info, (void __user *)arg, minsz))
 533                        return -EFAULT;
 534
 535                if (info.argsz < minsz)
 536                        return -EINVAL;
 537
 538                ret = vfio_ccw_mdev_get_device_info(&info, mdev);
 539                if (ret)
 540                        return ret;
 541
 542                return copy_to_user((void __user *)arg, &info, minsz);
 543        }
 544        case VFIO_DEVICE_GET_REGION_INFO:
 545        {
 546                struct vfio_region_info info;
 547
 548                minsz = offsetofend(struct vfio_region_info, offset);
 549
 550                if (copy_from_user(&info, (void __user *)arg, minsz))
 551                        return -EFAULT;
 552
 553                if (info.argsz < minsz)
 554                        return -EINVAL;
 555
 556                ret = vfio_ccw_mdev_get_region_info(&info, mdev, arg);
 557                if (ret)
 558                        return ret;
 559
 560                return copy_to_user((void __user *)arg, &info, minsz);
 561        }
 562        case VFIO_DEVICE_GET_IRQ_INFO:
 563        {
 564                struct vfio_irq_info info;
 565
 566                minsz = offsetofend(struct vfio_irq_info, count);
 567
 568                if (copy_from_user(&info, (void __user *)arg, minsz))
 569                        return -EFAULT;
 570
 571                if (info.argsz < minsz || info.index >= VFIO_CCW_NUM_IRQS)
 572                        return -EINVAL;
 573
 574                ret = vfio_ccw_mdev_get_irq_info(&info);
 575                if (ret)
 576                        return ret;
 577
 578                if (info.count == -1)
 579                        return -EINVAL;
 580
 581                return copy_to_user((void __user *)arg, &info, minsz);
 582        }
 583        case VFIO_DEVICE_SET_IRQS:
 584        {
 585                struct vfio_irq_set hdr;
 586                size_t data_size;
 587                void __user *data;
 588
 589                minsz = offsetofend(struct vfio_irq_set, count);
 590
 591                if (copy_from_user(&hdr, (void __user *)arg, minsz))
 592                        return -EFAULT;
 593
 594                ret = vfio_set_irqs_validate_and_prepare(&hdr, 1,
 595                                                         VFIO_CCW_NUM_IRQS,
 596                                                         &data_size);
 597                if (ret)
 598                        return ret;
 599
 600                data = (void __user *)(arg + minsz);
 601                return vfio_ccw_mdev_set_irqs(mdev, hdr.flags, hdr.index, data);
 602        }
 603        case VFIO_DEVICE_RESET:
 604                return vfio_ccw_mdev_reset(mdev);
 605        default:
 606                return -ENOTTY;
 607        }
 608}
 609
 610static const struct mdev_parent_ops vfio_ccw_mdev_ops = {
 611        .owner                  = THIS_MODULE,
 612        .supported_type_groups  = mdev_type_groups,
 613        .create                 = vfio_ccw_mdev_create,
 614        .remove                 = vfio_ccw_mdev_remove,
 615        .open                   = vfio_ccw_mdev_open,
 616        .release                = vfio_ccw_mdev_release,
 617        .read                   = vfio_ccw_mdev_read,
 618        .write                  = vfio_ccw_mdev_write,
 619        .ioctl                  = vfio_ccw_mdev_ioctl,
 620};
 621
 622int vfio_ccw_mdev_reg(struct subchannel *sch)
 623{
 624        return mdev_register_device(&sch->dev, &vfio_ccw_mdev_ops);
 625}
 626
 627void vfio_ccw_mdev_unreg(struct subchannel *sch)
 628{
 629        mdev_unregister_device(&sch->dev);
 630}
 631