linux/drivers/s390/cio/vfio_ccw_ops.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Physical device callbacks for vfio_ccw
   4 *
   5 * Copyright IBM Corp. 2017
   6 *
   7 * Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com>
   8 *            Xiao Feng Ren <renxiaof@linux.vnet.ibm.com>
   9 */
  10
  11#include <linux/vfio.h>
  12#include <linux/mdev.h>
  13
  14#include "vfio_ccw_private.h"
  15
  16static int vfio_ccw_mdev_reset(struct mdev_device *mdev)
  17{
  18        struct vfio_ccw_private *private;
  19        struct subchannel *sch;
  20        int ret;
  21
  22        private = dev_get_drvdata(mdev_parent_dev(mdev));
  23        sch = private->sch;
  24        /*
  25         * TODO:
  26         * In the cureent stage, some things like "no I/O running" and "no
  27         * interrupt pending" are clear, but we are not sure what other state
  28         * we need to care about.
  29         * There are still a lot more instructions need to be handled. We
  30         * should come back here later.
  31         */
  32        ret = vfio_ccw_sch_quiesce(sch);
  33        if (ret)
  34                return ret;
  35
  36        ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch);
  37        if (!ret)
  38                private->state = VFIO_CCW_STATE_IDLE;
  39
  40        return ret;
  41}
  42
  43static int vfio_ccw_mdev_notifier(struct notifier_block *nb,
  44                                  unsigned long action,
  45                                  void *data)
  46{
  47        struct vfio_ccw_private *private =
  48                container_of(nb, struct vfio_ccw_private, nb);
  49
  50        /*
  51         * Vendor drivers MUST unpin pages in response to an
  52         * invalidation.
  53         */
  54        if (action == VFIO_IOMMU_NOTIFY_DMA_UNMAP) {
  55                struct vfio_iommu_type1_dma_unmap *unmap = data;
  56
  57                if (!cp_iova_pinned(&private->cp, unmap->iova))
  58                        return NOTIFY_OK;
  59
  60                if (vfio_ccw_mdev_reset(private->mdev))
  61                        return NOTIFY_BAD;
  62
  63                cp_free(&private->cp);
  64                return NOTIFY_OK;
  65        }
  66
  67        return NOTIFY_DONE;
  68}
  69
  70static ssize_t name_show(struct kobject *kobj, struct device *dev, char *buf)
  71{
  72        return sprintf(buf, "I/O subchannel (Non-QDIO)\n");
  73}
  74static MDEV_TYPE_ATTR_RO(name);
  75
  76static ssize_t device_api_show(struct kobject *kobj, struct device *dev,
  77                               char *buf)
  78{
  79        return sprintf(buf, "%s\n", VFIO_DEVICE_API_CCW_STRING);
  80}
  81static MDEV_TYPE_ATTR_RO(device_api);
  82
  83static ssize_t available_instances_show(struct kobject *kobj,
  84                                        struct device *dev, char *buf)
  85{
  86        struct vfio_ccw_private *private = dev_get_drvdata(dev);
  87
  88        return sprintf(buf, "%d\n", atomic_read(&private->avail));
  89}
  90static MDEV_TYPE_ATTR_RO(available_instances);
  91
  92static struct attribute *mdev_types_attrs[] = {
  93        &mdev_type_attr_name.attr,
  94        &mdev_type_attr_device_api.attr,
  95        &mdev_type_attr_available_instances.attr,
  96        NULL,
  97};
  98
  99static struct attribute_group mdev_type_group = {
 100        .name  = "io",
 101        .attrs = mdev_types_attrs,
 102};
 103
 104static struct attribute_group *mdev_type_groups[] = {
 105        &mdev_type_group,
 106        NULL,
 107};
 108
 109static int vfio_ccw_mdev_create(struct kobject *kobj, struct mdev_device *mdev)
 110{
 111        struct vfio_ccw_private *private =
 112                dev_get_drvdata(mdev_parent_dev(mdev));
 113
 114        if (private->state == VFIO_CCW_STATE_NOT_OPER)
 115                return -ENODEV;
 116
 117        if (atomic_dec_if_positive(&private->avail) < 0)
 118                return -EPERM;
 119
 120        private->mdev = mdev;
 121        private->state = VFIO_CCW_STATE_IDLE;
 122
 123        return 0;
 124}
 125
 126static int vfio_ccw_mdev_remove(struct mdev_device *mdev)
 127{
 128        struct vfio_ccw_private *private =
 129                dev_get_drvdata(mdev_parent_dev(mdev));
 130
 131        if ((private->state != VFIO_CCW_STATE_NOT_OPER) &&
 132            (private->state != VFIO_CCW_STATE_STANDBY)) {
 133                if (!vfio_ccw_mdev_reset(mdev))
 134                        private->state = VFIO_CCW_STATE_STANDBY;
 135                /* The state will be NOT_OPER on error. */
 136        }
 137
 138        private->mdev = NULL;
 139        atomic_inc(&private->avail);
 140
 141        return 0;
 142}
 143
 144static int vfio_ccw_mdev_open(struct mdev_device *mdev)
 145{
 146        struct vfio_ccw_private *private =
 147                dev_get_drvdata(mdev_parent_dev(mdev));
 148        unsigned long events = VFIO_IOMMU_NOTIFY_DMA_UNMAP;
 149
 150        private->nb.notifier_call = vfio_ccw_mdev_notifier;
 151
 152        return vfio_register_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
 153                                      &events, &private->nb);
 154}
 155
 156static void vfio_ccw_mdev_release(struct mdev_device *mdev)
 157{
 158        struct vfio_ccw_private *private =
 159                dev_get_drvdata(mdev_parent_dev(mdev));
 160
 161        vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
 162                                 &private->nb);
 163}
 164
 165static ssize_t vfio_ccw_mdev_read(struct mdev_device *mdev,
 166                                  char __user *buf,
 167                                  size_t count,
 168                                  loff_t *ppos)
 169{
 170        struct vfio_ccw_private *private;
 171        struct ccw_io_region *region;
 172
 173        if (*ppos + count > sizeof(*region))
 174                return -EINVAL;
 175
 176        private = dev_get_drvdata(mdev_parent_dev(mdev));
 177        region = &private->io_region;
 178        if (copy_to_user(buf, (void *)region + *ppos, count))
 179                return -EFAULT;
 180
 181        return count;
 182}
 183
 184static ssize_t vfio_ccw_mdev_write(struct mdev_device *mdev,
 185                                   const char __user *buf,
 186                                   size_t count,
 187                                   loff_t *ppos)
 188{
 189        struct vfio_ccw_private *private;
 190        struct ccw_io_region *region;
 191
 192        if (*ppos + count > sizeof(*region))
 193                return -EINVAL;
 194
 195        private = dev_get_drvdata(mdev_parent_dev(mdev));
 196        if (private->state != VFIO_CCW_STATE_IDLE)
 197                return -EACCES;
 198
 199        region = &private->io_region;
 200        if (copy_from_user((void *)region + *ppos, buf, count))
 201                return -EFAULT;
 202
 203        vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_IO_REQ);
 204        if (region->ret_code != 0) {
 205                private->state = VFIO_CCW_STATE_IDLE;
 206                return region->ret_code;
 207        }
 208
 209        return count;
 210}
 211
 212static int vfio_ccw_mdev_get_device_info(struct vfio_device_info *info)
 213{
 214        info->flags = VFIO_DEVICE_FLAGS_CCW | VFIO_DEVICE_FLAGS_RESET;
 215        info->num_regions = VFIO_CCW_NUM_REGIONS;
 216        info->num_irqs = VFIO_CCW_NUM_IRQS;
 217
 218        return 0;
 219}
 220
 221static int vfio_ccw_mdev_get_region_info(struct vfio_region_info *info,
 222                                         u16 *cap_type_id,
 223                                         void **cap_type)
 224{
 225        switch (info->index) {
 226        case VFIO_CCW_CONFIG_REGION_INDEX:
 227                info->offset = 0;
 228                info->size = sizeof(struct ccw_io_region);
 229                info->flags = VFIO_REGION_INFO_FLAG_READ
 230                              | VFIO_REGION_INFO_FLAG_WRITE;
 231                return 0;
 232        default:
 233                return -EINVAL;
 234        }
 235}
 236
 237static int vfio_ccw_mdev_get_irq_info(struct vfio_irq_info *info)
 238{
 239        if (info->index != VFIO_CCW_IO_IRQ_INDEX)
 240                return -EINVAL;
 241
 242        info->count = 1;
 243        info->flags = VFIO_IRQ_INFO_EVENTFD;
 244
 245        return 0;
 246}
 247
 248static int vfio_ccw_mdev_set_irqs(struct mdev_device *mdev,
 249                                  uint32_t flags,
 250                                  void __user *data)
 251{
 252        struct vfio_ccw_private *private;
 253        struct eventfd_ctx **ctx;
 254
 255        if (!(flags & VFIO_IRQ_SET_ACTION_TRIGGER))
 256                return -EINVAL;
 257
 258        private = dev_get_drvdata(mdev_parent_dev(mdev));
 259        ctx = &private->io_trigger;
 260
 261        switch (flags & VFIO_IRQ_SET_DATA_TYPE_MASK) {
 262        case VFIO_IRQ_SET_DATA_NONE:
 263        {
 264                if (*ctx)
 265                        eventfd_signal(*ctx, 1);
 266                return 0;
 267        }
 268        case VFIO_IRQ_SET_DATA_BOOL:
 269        {
 270                uint8_t trigger;
 271
 272                if (get_user(trigger, (uint8_t __user *)data))
 273                        return -EFAULT;
 274
 275                if (trigger && *ctx)
 276                        eventfd_signal(*ctx, 1);
 277                return 0;
 278        }
 279        case VFIO_IRQ_SET_DATA_EVENTFD:
 280        {
 281                int32_t fd;
 282
 283                if (get_user(fd, (int32_t __user *)data))
 284                        return -EFAULT;
 285
 286                if (fd == -1) {
 287                        if (*ctx)
 288                                eventfd_ctx_put(*ctx);
 289                        *ctx = NULL;
 290                } else if (fd >= 0) {
 291                        struct eventfd_ctx *efdctx;
 292
 293                        efdctx = eventfd_ctx_fdget(fd);
 294                        if (IS_ERR(efdctx))
 295                                return PTR_ERR(efdctx);
 296
 297                        if (*ctx)
 298                                eventfd_ctx_put(*ctx);
 299
 300                        *ctx = efdctx;
 301                } else
 302                        return -EINVAL;
 303
 304                return 0;
 305        }
 306        default:
 307                return -EINVAL;
 308        }
 309}
 310
 311static ssize_t vfio_ccw_mdev_ioctl(struct mdev_device *mdev,
 312                                   unsigned int cmd,
 313                                   unsigned long arg)
 314{
 315        int ret = 0;
 316        unsigned long minsz;
 317
 318        switch (cmd) {
 319        case VFIO_DEVICE_GET_INFO:
 320        {
 321                struct vfio_device_info info;
 322
 323                minsz = offsetofend(struct vfio_device_info, num_irqs);
 324
 325                if (copy_from_user(&info, (void __user *)arg, minsz))
 326                        return -EFAULT;
 327
 328                if (info.argsz < minsz)
 329                        return -EINVAL;
 330
 331                ret = vfio_ccw_mdev_get_device_info(&info);
 332                if (ret)
 333                        return ret;
 334
 335                return copy_to_user((void __user *)arg, &info, minsz);
 336        }
 337        case VFIO_DEVICE_GET_REGION_INFO:
 338        {
 339                struct vfio_region_info info;
 340                u16 cap_type_id = 0;
 341                void *cap_type = NULL;
 342
 343                minsz = offsetofend(struct vfio_region_info, offset);
 344
 345                if (copy_from_user(&info, (void __user *)arg, minsz))
 346                        return -EFAULT;
 347
 348                if (info.argsz < minsz)
 349                        return -EINVAL;
 350
 351                ret = vfio_ccw_mdev_get_region_info(&info, &cap_type_id,
 352                                                    &cap_type);
 353                if (ret)
 354                        return ret;
 355
 356                return copy_to_user((void __user *)arg, &info, minsz);
 357        }
 358        case VFIO_DEVICE_GET_IRQ_INFO:
 359        {
 360                struct vfio_irq_info info;
 361
 362                minsz = offsetofend(struct vfio_irq_info, count);
 363
 364                if (copy_from_user(&info, (void __user *)arg, minsz))
 365                        return -EFAULT;
 366
 367                if (info.argsz < minsz || info.index >= VFIO_CCW_NUM_IRQS)
 368                        return -EINVAL;
 369
 370                ret = vfio_ccw_mdev_get_irq_info(&info);
 371                if (ret)
 372                        return ret;
 373
 374                if (info.count == -1)
 375                        return -EINVAL;
 376
 377                return copy_to_user((void __user *)arg, &info, minsz);
 378        }
 379        case VFIO_DEVICE_SET_IRQS:
 380        {
 381                struct vfio_irq_set hdr;
 382                size_t data_size;
 383                void __user *data;
 384
 385                minsz = offsetofend(struct vfio_irq_set, count);
 386
 387                if (copy_from_user(&hdr, (void __user *)arg, minsz))
 388                        return -EFAULT;
 389
 390                ret = vfio_set_irqs_validate_and_prepare(&hdr, 1,
 391                                                         VFIO_CCW_NUM_IRQS,
 392                                                         &data_size);
 393                if (ret)
 394                        return ret;
 395
 396                data = (void __user *)(arg + minsz);
 397                return vfio_ccw_mdev_set_irqs(mdev, hdr.flags, data);
 398        }
 399        case VFIO_DEVICE_RESET:
 400                return vfio_ccw_mdev_reset(mdev);
 401        default:
 402                return -ENOTTY;
 403        }
 404}
 405
 406static const struct mdev_parent_ops vfio_ccw_mdev_ops = {
 407        .owner                  = THIS_MODULE,
 408        .supported_type_groups  = mdev_type_groups,
 409        .create                 = vfio_ccw_mdev_create,
 410        .remove                 = vfio_ccw_mdev_remove,
 411        .open                   = vfio_ccw_mdev_open,
 412        .release                = vfio_ccw_mdev_release,
 413        .read                   = vfio_ccw_mdev_read,
 414        .write                  = vfio_ccw_mdev_write,
 415        .ioctl                  = vfio_ccw_mdev_ioctl,
 416};
 417
 418int vfio_ccw_mdev_reg(struct subchannel *sch)
 419{
 420        return mdev_register_device(&sch->dev, &vfio_ccw_mdev_ops);
 421}
 422
 423void vfio_ccw_mdev_unreg(struct subchannel *sch)
 424{
 425        mdev_unregister_device(&sch->dev);
 426}
 427