linux/drivers/nvme/host/ioctl.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (c) 2011-2014, Intel Corporation.
   4 * Copyright (c) 2017-2021 Christoph Hellwig.
   5 */
   6#include <linux/ptrace.h>       /* for force_successful_syscall_return */
   7#include <linux/nvme_ioctl.h>
   8#include "nvme.h"
   9
  10/*
  11 * Convert integer values from ioctl structures to user pointers, silently
  12 * ignoring the upper bits in the compat case to match behaviour of 32-bit
  13 * kernels.
  14 */
  15static void __user *nvme_to_user_ptr(uintptr_t ptrval)
  16{
  17        if (in_compat_syscall())
  18                ptrval = (compat_uptr_t)ptrval;
  19        return (void __user *)ptrval;
  20}
  21
  22static void *nvme_add_user_metadata(struct bio *bio, void __user *ubuf,
  23                unsigned len, u32 seed, bool write)
  24{
  25        struct bio_integrity_payload *bip;
  26        int ret = -ENOMEM;
  27        void *buf;
  28
  29        buf = kmalloc(len, GFP_KERNEL);
  30        if (!buf)
  31                goto out;
  32
  33        ret = -EFAULT;
  34        if (write && copy_from_user(buf, ubuf, len))
  35                goto out_free_meta;
  36
  37        bip = bio_integrity_alloc(bio, GFP_KERNEL, 1);
  38        if (IS_ERR(bip)) {
  39                ret = PTR_ERR(bip);
  40                goto out_free_meta;
  41        }
  42
  43        bip->bip_iter.bi_size = len;
  44        bip->bip_iter.bi_sector = seed;
  45        ret = bio_integrity_add_page(bio, virt_to_page(buf), len,
  46                        offset_in_page(buf));
  47        if (ret == len)
  48                return buf;
  49        ret = -ENOMEM;
  50out_free_meta:
  51        kfree(buf);
  52out:
  53        return ERR_PTR(ret);
  54}
  55
  56static int nvme_submit_user_cmd(struct request_queue *q,
  57                struct nvme_command *cmd, void __user *ubuffer,
  58                unsigned bufflen, void __user *meta_buffer, unsigned meta_len,
  59                u32 meta_seed, u64 *result, unsigned timeout)
  60{
  61        bool write = nvme_is_write(cmd);
  62        struct nvme_ns *ns = q->queuedata;
  63        struct block_device *bdev = ns ? ns->disk->part0 : NULL;
  64        struct request *req;
  65        struct bio *bio = NULL;
  66        void *meta = NULL;
  67        int ret;
  68
  69        req = nvme_alloc_request(q, cmd, 0);
  70        if (IS_ERR(req))
  71                return PTR_ERR(req);
  72
  73        if (timeout)
  74                req->timeout = timeout;
  75        nvme_req(req)->flags |= NVME_REQ_USERCMD;
  76
  77        if (ubuffer && bufflen) {
  78                ret = blk_rq_map_user(q, req, NULL, ubuffer, bufflen,
  79                                GFP_KERNEL);
  80                if (ret)
  81                        goto out;
  82                bio = req->bio;
  83                if (bdev)
  84                        bio_set_dev(bio, bdev);
  85                if (bdev && meta_buffer && meta_len) {
  86                        meta = nvme_add_user_metadata(bio, meta_buffer, meta_len,
  87                                        meta_seed, write);
  88                        if (IS_ERR(meta)) {
  89                                ret = PTR_ERR(meta);
  90                                goto out_unmap;
  91                        }
  92                        req->cmd_flags |= REQ_INTEGRITY;
  93                }
  94        }
  95
  96        ret = nvme_execute_passthru_rq(req);
  97        if (result)
  98                *result = le64_to_cpu(nvme_req(req)->result.u64);
  99        if (meta && !ret && !write) {
 100                if (copy_to_user(meta_buffer, meta, meta_len))
 101                        ret = -EFAULT;
 102        }
 103        kfree(meta);
 104 out_unmap:
 105        if (bio)
 106                blk_rq_unmap_user(bio);
 107 out:
 108        blk_mq_free_request(req);
 109        return ret;
 110}
 111
 112
 113static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
 114{
 115        struct nvme_user_io io;
 116        struct nvme_command c;
 117        unsigned length, meta_len;
 118        void __user *metadata;
 119
 120        if (copy_from_user(&io, uio, sizeof(io)))
 121                return -EFAULT;
 122        if (io.flags)
 123                return -EINVAL;
 124
 125        switch (io.opcode) {
 126        case nvme_cmd_write:
 127        case nvme_cmd_read:
 128        case nvme_cmd_compare:
 129                break;
 130        default:
 131                return -EINVAL;
 132        }
 133
 134        length = (io.nblocks + 1) << ns->lba_shift;
 135
 136        if ((io.control & NVME_RW_PRINFO_PRACT) &&
 137            ns->ms == sizeof(struct t10_pi_tuple)) {
 138                /*
 139                 * Protection information is stripped/inserted by the
 140                 * controller.
 141                 */
 142                if (nvme_to_user_ptr(io.metadata))
 143                        return -EINVAL;
 144                meta_len = 0;
 145                metadata = NULL;
 146        } else {
 147                meta_len = (io.nblocks + 1) * ns->ms;
 148                metadata = nvme_to_user_ptr(io.metadata);
 149        }
 150
 151        if (ns->features & NVME_NS_EXT_LBAS) {
 152                length += meta_len;
 153                meta_len = 0;
 154        } else if (meta_len) {
 155                if ((io.metadata & 3) || !io.metadata)
 156                        return -EINVAL;
 157        }
 158
 159        memset(&c, 0, sizeof(c));
 160        c.rw.opcode = io.opcode;
 161        c.rw.flags = io.flags;
 162        c.rw.nsid = cpu_to_le32(ns->head->ns_id);
 163        c.rw.slba = cpu_to_le64(io.slba);
 164        c.rw.length = cpu_to_le16(io.nblocks);
 165        c.rw.control = cpu_to_le16(io.control);
 166        c.rw.dsmgmt = cpu_to_le32(io.dsmgmt);
 167        c.rw.reftag = cpu_to_le32(io.reftag);
 168        c.rw.apptag = cpu_to_le16(io.apptag);
 169        c.rw.appmask = cpu_to_le16(io.appmask);
 170
 171        return nvme_submit_user_cmd(ns->queue, &c,
 172                        nvme_to_user_ptr(io.addr), length,
 173                        metadata, meta_len, lower_32_bits(io.slba), NULL, 0);
 174}
 175
 176static bool nvme_validate_passthru_nsid(struct nvme_ctrl *ctrl,
 177                                        struct nvme_ns *ns, __u32 nsid)
 178{
 179        if (ns && nsid != ns->head->ns_id) {
 180                dev_err(ctrl->device,
 181                        "%s: nsid (%u) in cmd does not match nsid (%u)"
 182                        "of namespace\n",
 183                        current->comm, nsid, ns->head->ns_id);
 184                return false;
 185        }
 186
 187        return true;
 188}
 189
 190static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
 191                        struct nvme_passthru_cmd __user *ucmd)
 192{
 193        struct nvme_passthru_cmd cmd;
 194        struct nvme_command c;
 195        unsigned timeout = 0;
 196        u64 result;
 197        int status;
 198
 199        if (!capable(CAP_SYS_ADMIN))
 200                return -EACCES;
 201        if (copy_from_user(&cmd, ucmd, sizeof(cmd)))
 202                return -EFAULT;
 203        if (cmd.flags)
 204                return -EINVAL;
 205        if (!nvme_validate_passthru_nsid(ctrl, ns, cmd.nsid))
 206                return -EINVAL;
 207
 208        memset(&c, 0, sizeof(c));
 209        c.common.opcode = cmd.opcode;
 210        c.common.flags = cmd.flags;
 211        c.common.nsid = cpu_to_le32(cmd.nsid);
 212        c.common.cdw2[0] = cpu_to_le32(cmd.cdw2);
 213        c.common.cdw2[1] = cpu_to_le32(cmd.cdw3);
 214        c.common.cdw10 = cpu_to_le32(cmd.cdw10);
 215        c.common.cdw11 = cpu_to_le32(cmd.cdw11);
 216        c.common.cdw12 = cpu_to_le32(cmd.cdw12);
 217        c.common.cdw13 = cpu_to_le32(cmd.cdw13);
 218        c.common.cdw14 = cpu_to_le32(cmd.cdw14);
 219        c.common.cdw15 = cpu_to_le32(cmd.cdw15);
 220
 221        if (cmd.timeout_ms)
 222                timeout = msecs_to_jiffies(cmd.timeout_ms);
 223
 224        status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
 225                        nvme_to_user_ptr(cmd.addr), cmd.data_len,
 226                        nvme_to_user_ptr(cmd.metadata), cmd.metadata_len,
 227                        0, &result, timeout);
 228
 229        if (status >= 0) {
 230                if (put_user(result, &ucmd->result))
 231                        return -EFAULT;
 232        }
 233
 234        return status;
 235}
 236
 237static int nvme_user_cmd64(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
 238                        struct nvme_passthru_cmd64 __user *ucmd)
 239{
 240        struct nvme_passthru_cmd64 cmd;
 241        struct nvme_command c;
 242        unsigned timeout = 0;
 243        int status;
 244
 245        if (!capable(CAP_SYS_ADMIN))
 246                return -EACCES;
 247        if (copy_from_user(&cmd, ucmd, sizeof(cmd)))
 248                return -EFAULT;
 249        if (cmd.flags)
 250                return -EINVAL;
 251        if (!nvme_validate_passthru_nsid(ctrl, ns, cmd.nsid))
 252                return -EINVAL;
 253
 254        memset(&c, 0, sizeof(c));
 255        c.common.opcode = cmd.opcode;
 256        c.common.flags = cmd.flags;
 257        c.common.nsid = cpu_to_le32(cmd.nsid);
 258        c.common.cdw2[0] = cpu_to_le32(cmd.cdw2);
 259        c.common.cdw2[1] = cpu_to_le32(cmd.cdw3);
 260        c.common.cdw10 = cpu_to_le32(cmd.cdw10);
 261        c.common.cdw11 = cpu_to_le32(cmd.cdw11);
 262        c.common.cdw12 = cpu_to_le32(cmd.cdw12);
 263        c.common.cdw13 = cpu_to_le32(cmd.cdw13);
 264        c.common.cdw14 = cpu_to_le32(cmd.cdw14);
 265        c.common.cdw15 = cpu_to_le32(cmd.cdw15);
 266
 267        if (cmd.timeout_ms)
 268                timeout = msecs_to_jiffies(cmd.timeout_ms);
 269
 270        status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
 271                        nvme_to_user_ptr(cmd.addr), cmd.data_len,
 272                        nvme_to_user_ptr(cmd.metadata), cmd.metadata_len,
 273                        0, &cmd.result, timeout);
 274
 275        if (status >= 0) {
 276                if (put_user(cmd.result, &ucmd->result))
 277                        return -EFAULT;
 278        }
 279
 280        return status;
 281}
 282
 283static bool is_ctrl_ioctl(unsigned int cmd)
 284{
 285        if (cmd == NVME_IOCTL_ADMIN_CMD || cmd == NVME_IOCTL_ADMIN64_CMD)
 286                return true;
 287        if (is_sed_ioctl(cmd))
 288                return true;
 289        return false;
 290}
 291
 292static int nvme_ctrl_ioctl(struct nvme_ctrl *ctrl, unsigned int cmd,
 293                void __user *argp)
 294{
 295        switch (cmd) {
 296        case NVME_IOCTL_ADMIN_CMD:
 297                return nvme_user_cmd(ctrl, NULL, argp);
 298        case NVME_IOCTL_ADMIN64_CMD:
 299                return nvme_user_cmd64(ctrl, NULL, argp);
 300        default:
 301                return sed_ioctl(ctrl->opal_dev, cmd, argp);
 302        }
 303}
 304
 305#ifdef COMPAT_FOR_U64_ALIGNMENT
 306struct nvme_user_io32 {
 307        __u8    opcode;
 308        __u8    flags;
 309        __u16   control;
 310        __u16   nblocks;
 311        __u16   rsvd;
 312        __u64   metadata;
 313        __u64   addr;
 314        __u64   slba;
 315        __u32   dsmgmt;
 316        __u32   reftag;
 317        __u16   apptag;
 318        __u16   appmask;
 319} __attribute__((__packed__));
 320#define NVME_IOCTL_SUBMIT_IO32  _IOW('N', 0x42, struct nvme_user_io32)
 321#endif /* COMPAT_FOR_U64_ALIGNMENT */
 322
 323static int nvme_ns_ioctl(struct nvme_ns *ns, unsigned int cmd,
 324                void __user *argp)
 325{
 326        switch (cmd) {
 327        case NVME_IOCTL_ID:
 328                force_successful_syscall_return();
 329                return ns->head->ns_id;
 330        case NVME_IOCTL_IO_CMD:
 331                return nvme_user_cmd(ns->ctrl, ns, argp);
 332        /*
 333         * struct nvme_user_io can have different padding on some 32-bit ABIs.
 334         * Just accept the compat version as all fields that are used are the
 335         * same size and at the same offset.
 336         */
 337#ifdef COMPAT_FOR_U64_ALIGNMENT
 338        case NVME_IOCTL_SUBMIT_IO32:
 339#endif
 340        case NVME_IOCTL_SUBMIT_IO:
 341                return nvme_submit_io(ns, argp);
 342        case NVME_IOCTL_IO64_CMD:
 343                return nvme_user_cmd64(ns->ctrl, ns, argp);
 344        default:
 345                return -ENOTTY;
 346        }
 347}
 348
 349static int __nvme_ioctl(struct nvme_ns *ns, unsigned int cmd, void __user *arg)
 350{
 351       if (is_ctrl_ioctl(cmd))
 352               return nvme_ctrl_ioctl(ns->ctrl, cmd, arg);
 353       return nvme_ns_ioctl(ns, cmd, arg);
 354}
 355
 356int nvme_ioctl(struct block_device *bdev, fmode_t mode,
 357                unsigned int cmd, unsigned long arg)
 358{
 359        struct nvme_ns *ns = bdev->bd_disk->private_data;
 360
 361        return __nvme_ioctl(ns, cmd, (void __user *)arg);
 362}
 363
 364long nvme_ns_chr_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 365{
 366        struct nvme_ns *ns =
 367                container_of(file_inode(file)->i_cdev, struct nvme_ns, cdev);
 368
 369        return __nvme_ioctl(ns, cmd, (void __user *)arg);
 370}
 371
 372#ifdef CONFIG_NVME_MULTIPATH
 373static int nvme_ns_head_ctrl_ioctl(struct nvme_ns *ns, unsigned int cmd,
 374                void __user *argp, struct nvme_ns_head *head, int srcu_idx)
 375        __releases(&head->srcu)
 376{
 377        struct nvme_ctrl *ctrl = ns->ctrl;
 378        int ret;
 379
 380        nvme_get_ctrl(ns->ctrl);
 381        srcu_read_unlock(&head->srcu, srcu_idx);
 382        ret = nvme_ctrl_ioctl(ns->ctrl, cmd, argp);
 383
 384        nvme_put_ctrl(ctrl);
 385        return ret;
 386}
 387
 388int nvme_ns_head_ioctl(struct block_device *bdev, fmode_t mode,
 389                unsigned int cmd, unsigned long arg)
 390{
 391        struct nvme_ns_head *head = bdev->bd_disk->private_data;
 392        void __user *argp = (void __user *)arg;
 393        struct nvme_ns *ns;
 394        int srcu_idx, ret = -EWOULDBLOCK;
 395
 396        srcu_idx = srcu_read_lock(&head->srcu);
 397        ns = nvme_find_path(head);
 398        if (!ns)
 399                goto out_unlock;
 400
 401        /*
 402         * Handle ioctls that apply to the controller instead of the namespace
 403         * seperately and drop the ns SRCU reference early.  This avoids a
 404         * deadlock when deleting namespaces using the passthrough interface.
 405         */
 406        if (is_ctrl_ioctl(cmd))
 407                return nvme_ns_head_ctrl_ioctl(ns, cmd, argp, head, srcu_idx);
 408
 409        ret = nvme_ns_ioctl(ns, cmd, argp);
 410out_unlock:
 411        srcu_read_unlock(&head->srcu, srcu_idx);
 412        return ret;
 413}
 414
 415long nvme_ns_head_chr_ioctl(struct file *file, unsigned int cmd,
 416                unsigned long arg)
 417{
 418        struct cdev *cdev = file_inode(file)->i_cdev;
 419        struct nvme_ns_head *head =
 420                container_of(cdev, struct nvme_ns_head, cdev);
 421        void __user *argp = (void __user *)arg;
 422        struct nvme_ns *ns;
 423        int srcu_idx, ret = -EWOULDBLOCK;
 424
 425        srcu_idx = srcu_read_lock(&head->srcu);
 426        ns = nvme_find_path(head);
 427        if (!ns)
 428                goto out_unlock;
 429
 430        if (is_ctrl_ioctl(cmd))
 431                return nvme_ns_head_ctrl_ioctl(ns, cmd, argp, head, srcu_idx);
 432
 433        ret = nvme_ns_ioctl(ns, cmd, argp);
 434out_unlock:
 435        srcu_read_unlock(&head->srcu, srcu_idx);
 436        return ret;
 437}
 438#endif /* CONFIG_NVME_MULTIPATH */
 439
 440static int nvme_dev_user_cmd(struct nvme_ctrl *ctrl, void __user *argp)
 441{
 442        struct nvme_ns *ns;
 443        int ret;
 444
 445        down_read(&ctrl->namespaces_rwsem);
 446        if (list_empty(&ctrl->namespaces)) {
 447                ret = -ENOTTY;
 448                goto out_unlock;
 449        }
 450
 451        ns = list_first_entry(&ctrl->namespaces, struct nvme_ns, list);
 452        if (ns != list_last_entry(&ctrl->namespaces, struct nvme_ns, list)) {
 453                dev_warn(ctrl->device,
 454                        "NVME_IOCTL_IO_CMD not supported when multiple namespaces present!\n");
 455                ret = -EINVAL;
 456                goto out_unlock;
 457        }
 458
 459        dev_warn(ctrl->device,
 460                "using deprecated NVME_IOCTL_IO_CMD ioctl on the char device!\n");
 461        kref_get(&ns->kref);
 462        up_read(&ctrl->namespaces_rwsem);
 463
 464        ret = nvme_user_cmd(ctrl, ns, argp);
 465        nvme_put_ns(ns);
 466        return ret;
 467
 468out_unlock:
 469        up_read(&ctrl->namespaces_rwsem);
 470        return ret;
 471}
 472
 473long nvme_dev_ioctl(struct file *file, unsigned int cmd,
 474                unsigned long arg)
 475{
 476        struct nvme_ctrl *ctrl = file->private_data;
 477        void __user *argp = (void __user *)arg;
 478
 479        switch (cmd) {
 480        case NVME_IOCTL_ADMIN_CMD:
 481                return nvme_user_cmd(ctrl, NULL, argp);
 482        case NVME_IOCTL_ADMIN64_CMD:
 483                return nvme_user_cmd64(ctrl, NULL, argp);
 484        case NVME_IOCTL_IO_CMD:
 485                return nvme_dev_user_cmd(ctrl, argp);
 486        case NVME_IOCTL_RESET:
 487                dev_warn(ctrl->device, "resetting controller\n");
 488                return nvme_reset_ctrl_sync(ctrl);
 489        case NVME_IOCTL_SUBSYS_RESET:
 490                return nvme_reset_subsystem(ctrl);
 491        case NVME_IOCTL_RESCAN:
 492                nvme_queue_scan(ctrl);
 493                return 0;
 494        default:
 495                return -ENOTTY;
 496        }
 497}
 498