linux/fs/ioctl.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 *  linux/fs/ioctl.c
   4 *
   5 *  Copyright (C) 1991, 1992  Linus Torvalds
   6 */
   7
   8#include <linux/syscalls.h>
   9#include <linux/mm.h>
  10#include <linux/capability.h>
  11#include <linux/file.h>
  12#include <linux/fs.h>
  13#include <linux/security.h>
  14#include <linux/export.h>
  15#include <linux/uaccess.h>
  16#include <linux/writeback.h>
  17#include <linux/buffer_head.h>
  18#include <linux/falloc.h>
  19#include <linux/sched/signal.h>
  20
  21#include "internal.h"
  22
  23#include <asm/ioctls.h>
  24
  25/* So that the fiemap access checks can't overflow on 32 bit machines. */
  26#define FIEMAP_MAX_EXTENTS      (UINT_MAX / sizeof(struct fiemap_extent))
  27
  28/**
  29 * vfs_ioctl - call filesystem specific ioctl methods
  30 * @filp:       open file to invoke ioctl method on
  31 * @cmd:        ioctl command to execute
  32 * @arg:        command-specific argument for ioctl
  33 *
  34 * Invokes filesystem specific ->unlocked_ioctl, if one exists; otherwise
  35 * returns -ENOTTY.
  36 *
  37 * Returns 0 on success, -errno on error.
  38 */
  39long vfs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
  40{
  41        int error = -ENOTTY;
  42
  43        if (!filp->f_op->unlocked_ioctl)
  44                goto out;
  45
  46        error = filp->f_op->unlocked_ioctl(filp, cmd, arg);
  47        if (error == -ENOIOCTLCMD)
  48                error = -ENOTTY;
  49 out:
  50        return error;
  51}
  52EXPORT_SYMBOL(vfs_ioctl);
  53
  54static int ioctl_fibmap(struct file *filp, int __user *p)
  55{
  56        struct address_space *mapping = filp->f_mapping;
  57        int res, block;
  58
  59        /* do we support this mess? */
  60        if (!mapping->a_ops->bmap)
  61                return -EINVAL;
  62        if (!capable(CAP_SYS_RAWIO))
  63                return -EPERM;
  64        res = get_user(block, p);
  65        if (res)
  66                return res;
  67        res = mapping->a_ops->bmap(mapping, block);
  68        return put_user(res, p);
  69}
  70
  71/**
  72 * fiemap_fill_next_extent - Fiemap helper function
  73 * @fieinfo:    Fiemap context passed into ->fiemap
  74 * @logical:    Extent logical start offset, in bytes
  75 * @phys:       Extent physical start offset, in bytes
  76 * @len:        Extent length, in bytes
  77 * @flags:      FIEMAP_EXTENT flags that describe this extent
  78 *
  79 * Called from file system ->fiemap callback. Will populate extent
  80 * info as passed in via arguments and copy to user memory. On
  81 * success, extent count on fieinfo is incremented.
  82 *
  83 * Returns 0 on success, -errno on error, 1 if this was the last
  84 * extent that will fit in user array.
  85 */
  86#define SET_UNKNOWN_FLAGS       (FIEMAP_EXTENT_DELALLOC)
  87#define SET_NO_UNMOUNTED_IO_FLAGS       (FIEMAP_EXTENT_DATA_ENCRYPTED)
  88#define SET_NOT_ALIGNED_FLAGS   (FIEMAP_EXTENT_DATA_TAIL|FIEMAP_EXTENT_DATA_INLINE)
  89int fiemap_fill_next_extent(struct fiemap_extent_info *fieinfo, u64 logical,
  90                            u64 phys, u64 len, u32 flags)
  91{
  92        struct fiemap_extent extent;
  93        struct fiemap_extent __user *dest = fieinfo->fi_extents_start;
  94
  95        /* only count the extents */
  96        if (fieinfo->fi_extents_max == 0) {
  97                fieinfo->fi_extents_mapped++;
  98                return (flags & FIEMAP_EXTENT_LAST) ? 1 : 0;
  99        }
 100
 101        if (fieinfo->fi_extents_mapped >= fieinfo->fi_extents_max)
 102                return 1;
 103
 104        if (flags & SET_UNKNOWN_FLAGS)
 105                flags |= FIEMAP_EXTENT_UNKNOWN;
 106        if (flags & SET_NO_UNMOUNTED_IO_FLAGS)
 107                flags |= FIEMAP_EXTENT_ENCODED;
 108        if (flags & SET_NOT_ALIGNED_FLAGS)
 109                flags |= FIEMAP_EXTENT_NOT_ALIGNED;
 110
 111        memset(&extent, 0, sizeof(extent));
 112        extent.fe_logical = logical;
 113        extent.fe_physical = phys;
 114        extent.fe_length = len;
 115        extent.fe_flags = flags;
 116
 117        dest += fieinfo->fi_extents_mapped;
 118        if (copy_to_user(dest, &extent, sizeof(extent)))
 119                return -EFAULT;
 120
 121        fieinfo->fi_extents_mapped++;
 122        if (fieinfo->fi_extents_mapped == fieinfo->fi_extents_max)
 123                return 1;
 124        return (flags & FIEMAP_EXTENT_LAST) ? 1 : 0;
 125}
 126EXPORT_SYMBOL(fiemap_fill_next_extent);
 127
 128/**
 129 * fiemap_check_flags - check validity of requested flags for fiemap
 130 * @fieinfo:    Fiemap context passed into ->fiemap
 131 * @fs_flags:   Set of fiemap flags that the file system understands
 132 *
 133 * Called from file system ->fiemap callback. This will compute the
 134 * intersection of valid fiemap flags and those that the fs supports. That
 135 * value is then compared against the user supplied flags. In case of bad user
 136 * flags, the invalid values will be written into the fieinfo structure, and
 137 * -EBADR is returned, which tells ioctl_fiemap() to return those values to
 138 * userspace. For this reason, a return code of -EBADR should be preserved.
 139 *
 140 * Returns 0 on success, -EBADR on bad flags.
 141 */
 142int fiemap_check_flags(struct fiemap_extent_info *fieinfo, u32 fs_flags)
 143{
 144        u32 incompat_flags;
 145
 146        incompat_flags = fieinfo->fi_flags & ~(FIEMAP_FLAGS_COMPAT & fs_flags);
 147        if (incompat_flags) {
 148                fieinfo->fi_flags = incompat_flags;
 149                return -EBADR;
 150        }
 151        return 0;
 152}
 153EXPORT_SYMBOL(fiemap_check_flags);
 154
 155static int fiemap_check_ranges(struct super_block *sb,
 156                               u64 start, u64 len, u64 *new_len)
 157{
 158        u64 maxbytes = (u64) sb->s_maxbytes;
 159
 160        *new_len = len;
 161
 162        if (len == 0)
 163                return -EINVAL;
 164
 165        if (start > maxbytes)
 166                return -EFBIG;
 167
 168        /*
 169         * Shrink request scope to what the fs can actually handle.
 170         */
 171        if (len > maxbytes || (maxbytes - len) < start)
 172                *new_len = maxbytes - start;
 173
 174        return 0;
 175}
 176
 177static int ioctl_fiemap(struct file *filp, unsigned long arg)
 178{
 179        struct fiemap fiemap;
 180        struct fiemap __user *ufiemap = (struct fiemap __user *) arg;
 181        struct fiemap_extent_info fieinfo = { 0, };
 182        struct inode *inode = file_inode(filp);
 183        struct super_block *sb = inode->i_sb;
 184        u64 len;
 185        int error;
 186
 187        if (!inode->i_op->fiemap)
 188                return -EOPNOTSUPP;
 189
 190        if (copy_from_user(&fiemap, ufiemap, sizeof(fiemap)))
 191                return -EFAULT;
 192
 193        if (fiemap.fm_extent_count > FIEMAP_MAX_EXTENTS)
 194                return -EINVAL;
 195
 196        error = fiemap_check_ranges(sb, fiemap.fm_start, fiemap.fm_length,
 197                                    &len);
 198        if (error)
 199                return error;
 200
 201        fieinfo.fi_flags = fiemap.fm_flags;
 202        fieinfo.fi_extents_max = fiemap.fm_extent_count;
 203        fieinfo.fi_extents_start = ufiemap->fm_extents;
 204
 205        if (fiemap.fm_extent_count != 0 &&
 206            !access_ok(fieinfo.fi_extents_start,
 207                       fieinfo.fi_extents_max * sizeof(struct fiemap_extent)))
 208                return -EFAULT;
 209
 210        if (fieinfo.fi_flags & FIEMAP_FLAG_SYNC)
 211                filemap_write_and_wait(inode->i_mapping);
 212
 213        error = inode->i_op->fiemap(inode, &fieinfo, fiemap.fm_start, len);
 214        fiemap.fm_flags = fieinfo.fi_flags;
 215        fiemap.fm_mapped_extents = fieinfo.fi_extents_mapped;
 216        if (copy_to_user(ufiemap, &fiemap, sizeof(fiemap)))
 217                error = -EFAULT;
 218
 219        return error;
 220}
 221
 222static long ioctl_file_clone(struct file *dst_file, unsigned long srcfd,
 223                             u64 off, u64 olen, u64 destoff)
 224{
 225        struct fd src_file = fdget(srcfd);
 226        loff_t cloned;
 227        int ret;
 228
 229        if (!src_file.file)
 230                return -EBADF;
 231        ret = -EXDEV;
 232        if (src_file.file->f_path.mnt != dst_file->f_path.mnt)
 233                goto fdput;
 234        cloned = vfs_clone_file_range(src_file.file, off, dst_file, destoff,
 235                                      olen, 0);
 236        if (cloned < 0)
 237                ret = cloned;
 238        else if (olen && cloned != olen)
 239                ret = -EINVAL;
 240        else
 241                ret = 0;
 242fdput:
 243        fdput(src_file);
 244        return ret;
 245}
 246
 247static long ioctl_file_clone_range(struct file *file, void __user *argp)
 248{
 249        struct file_clone_range args;
 250
 251        if (copy_from_user(&args, argp, sizeof(args)))
 252                return -EFAULT;
 253        return ioctl_file_clone(file, args.src_fd, args.src_offset,
 254                                args.src_length, args.dest_offset);
 255}
 256
 257#ifdef CONFIG_BLOCK
 258
 259static inline sector_t logical_to_blk(struct inode *inode, loff_t offset)
 260{
 261        return (offset >> inode->i_blkbits);
 262}
 263
 264static inline loff_t blk_to_logical(struct inode *inode, sector_t blk)
 265{
 266        return (blk << inode->i_blkbits);
 267}
 268
 269/**
 270 * __generic_block_fiemap - FIEMAP for block based inodes (no locking)
 271 * @inode: the inode to map
 272 * @fieinfo: the fiemap info struct that will be passed back to userspace
 273 * @start: where to start mapping in the inode
 274 * @len: how much space to map
 275 * @get_block: the fs's get_block function
 276 *
 277 * This does FIEMAP for block based inodes.  Basically it will just loop
 278 * through get_block until we hit the number of extents we want to map, or we
 279 * go past the end of the file and hit a hole.
 280 *
 281 * If it is possible to have data blocks beyond a hole past @inode->i_size, then
 282 * please do not use this function, it will stop at the first unmapped block
 283 * beyond i_size.
 284 *
 285 * If you use this function directly, you need to do your own locking. Use
 286 * generic_block_fiemap if you want the locking done for you.
 287 */
 288
 289int __generic_block_fiemap(struct inode *inode,
 290                           struct fiemap_extent_info *fieinfo, loff_t start,
 291                           loff_t len, get_block_t *get_block)
 292{
 293        struct buffer_head map_bh;
 294        sector_t start_blk, last_blk;
 295        loff_t isize = i_size_read(inode);
 296        u64 logical = 0, phys = 0, size = 0;
 297        u32 flags = FIEMAP_EXTENT_MERGED;
 298        bool past_eof = false, whole_file = false;
 299        int ret = 0;
 300
 301        ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC);
 302        if (ret)
 303                return ret;
 304
 305        /*
 306         * Either the i_mutex or other appropriate locking needs to be held
 307         * since we expect isize to not change at all through the duration of
 308         * this call.
 309         */
 310        if (len >= isize) {
 311                whole_file = true;
 312                len = isize;
 313        }
 314
 315        /*
 316         * Some filesystems can't deal with being asked to map less than
 317         * blocksize, so make sure our len is at least block length.
 318         */
 319        if (logical_to_blk(inode, len) == 0)
 320                len = blk_to_logical(inode, 1);
 321
 322        start_blk = logical_to_blk(inode, start);
 323        last_blk = logical_to_blk(inode, start + len - 1);
 324
 325        do {
 326                /*
 327                 * we set b_size to the total size we want so it will map as
 328                 * many contiguous blocks as possible at once
 329                 */
 330                memset(&map_bh, 0, sizeof(struct buffer_head));
 331                map_bh.b_size = len;
 332
 333                ret = get_block(inode, start_blk, &map_bh, 0);
 334                if (ret)
 335                        break;
 336
 337                /* HOLE */
 338                if (!buffer_mapped(&map_bh)) {
 339                        start_blk++;
 340
 341                        /*
 342                         * We want to handle the case where there is an
 343                         * allocated block at the front of the file, and then
 344                         * nothing but holes up to the end of the file properly,
 345                         * to make sure that extent at the front gets properly
 346                         * marked with FIEMAP_EXTENT_LAST
 347                         */
 348                        if (!past_eof &&
 349                            blk_to_logical(inode, start_blk) >= isize)
 350                                past_eof = 1;
 351
 352                        /*
 353                         * First hole after going past the EOF, this is our
 354                         * last extent
 355                         */
 356                        if (past_eof && size) {
 357                                flags = FIEMAP_EXTENT_MERGED|FIEMAP_EXTENT_LAST;
 358                                ret = fiemap_fill_next_extent(fieinfo, logical,
 359                                                              phys, size,
 360                                                              flags);
 361                        } else if (size) {
 362                                ret = fiemap_fill_next_extent(fieinfo, logical,
 363                                                              phys, size, flags);
 364                                size = 0;
 365                        }
 366
 367                        /* if we have holes up to/past EOF then we're done */
 368                        if (start_blk > last_blk || past_eof || ret)
 369                                break;
 370                } else {
 371                        /*
 372                         * We have gone over the length of what we wanted to
 373                         * map, and it wasn't the entire file, so add the extent
 374                         * we got last time and exit.
 375                         *
 376                         * This is for the case where say we want to map all the
 377                         * way up to the second to the last block in a file, but
 378                         * the last block is a hole, making the second to last
 379                         * block FIEMAP_EXTENT_LAST.  In this case we want to
 380                         * see if there is a hole after the second to last block
 381                         * so we can mark it properly.  If we found data after
 382                         * we exceeded the length we were requesting, then we
 383                         * are good to go, just add the extent to the fieinfo
 384                         * and break
 385                         */
 386                        if (start_blk > last_blk && !whole_file) {
 387                                ret = fiemap_fill_next_extent(fieinfo, logical,
 388                                                              phys, size,
 389                                                              flags);
 390                                break;
 391                        }
 392
 393                        /*
 394                         * if size != 0 then we know we already have an extent
 395                         * to add, so add it.
 396                         */
 397                        if (size) {
 398                                ret = fiemap_fill_next_extent(fieinfo, logical,
 399                                                              phys, size,
 400                                                              flags);
 401                                if (ret)
 402                                        break;
 403                        }
 404
 405                        logical = blk_to_logical(inode, start_blk);
 406                        phys = blk_to_logical(inode, map_bh.b_blocknr);
 407                        size = map_bh.b_size;
 408                        flags = FIEMAP_EXTENT_MERGED;
 409
 410                        start_blk += logical_to_blk(inode, size);
 411
 412                        /*
 413                         * If we are past the EOF, then we need to make sure as
 414                         * soon as we find a hole that the last extent we found
 415                         * is marked with FIEMAP_EXTENT_LAST
 416                         */
 417                        if (!past_eof && logical + size >= isize)
 418                                past_eof = true;
 419                }
 420                cond_resched();
 421                if (fatal_signal_pending(current)) {
 422                        ret = -EINTR;
 423                        break;
 424                }
 425
 426        } while (1);
 427
 428        /* If ret is 1 then we just hit the end of the extent array */
 429        if (ret == 1)
 430                ret = 0;
 431
 432        return ret;
 433}
 434EXPORT_SYMBOL(__generic_block_fiemap);
 435
 436/**
 437 * generic_block_fiemap - FIEMAP for block based inodes
 438 * @inode: The inode to map
 439 * @fieinfo: The mapping information
 440 * @start: The initial block to map
 441 * @len: The length of the extect to attempt to map
 442 * @get_block: The block mapping function for the fs
 443 *
 444 * Calls __generic_block_fiemap to map the inode, after taking
 445 * the inode's mutex lock.
 446 */
 447
 448int generic_block_fiemap(struct inode *inode,
 449                         struct fiemap_extent_info *fieinfo, u64 start,
 450                         u64 len, get_block_t *get_block)
 451{
 452        int ret;
 453        inode_lock(inode);
 454        ret = __generic_block_fiemap(inode, fieinfo, start, len, get_block);
 455        inode_unlock(inode);
 456        return ret;
 457}
 458EXPORT_SYMBOL(generic_block_fiemap);
 459
 460#endif  /*  CONFIG_BLOCK  */
 461
 462/*
 463 * This provides compatibility with legacy XFS pre-allocation ioctls
 464 * which predate the fallocate syscall.
 465 *
 466 * Only the l_start, l_len and l_whence fields of the 'struct space_resv'
 467 * are used here, rest are ignored.
 468 */
 469int ioctl_preallocate(struct file *filp, void __user *argp)
 470{
 471        struct inode *inode = file_inode(filp);
 472        struct space_resv sr;
 473
 474        if (copy_from_user(&sr, argp, sizeof(sr)))
 475                return -EFAULT;
 476
 477        switch (sr.l_whence) {
 478        case SEEK_SET:
 479                break;
 480        case SEEK_CUR:
 481                sr.l_start += filp->f_pos;
 482                break;
 483        case SEEK_END:
 484                sr.l_start += i_size_read(inode);
 485                break;
 486        default:
 487                return -EINVAL;
 488        }
 489
 490        return vfs_fallocate(filp, FALLOC_FL_KEEP_SIZE, sr.l_start, sr.l_len);
 491}
 492
 493static int file_ioctl(struct file *filp, unsigned int cmd,
 494                unsigned long arg)
 495{
 496        struct inode *inode = file_inode(filp);
 497        int __user *p = (int __user *)arg;
 498
 499        switch (cmd) {
 500        case FIBMAP:
 501                return ioctl_fibmap(filp, p);
 502        case FIONREAD:
 503                return put_user(i_size_read(inode) - filp->f_pos, p);
 504        case FS_IOC_RESVSP:
 505        case FS_IOC_RESVSP64:
 506                return ioctl_preallocate(filp, p);
 507        }
 508
 509        return vfs_ioctl(filp, cmd, arg);
 510}
 511
 512static int ioctl_fionbio(struct file *filp, int __user *argp)
 513{
 514        unsigned int flag;
 515        int on, error;
 516
 517        error = get_user(on, argp);
 518        if (error)
 519                return error;
 520        flag = O_NONBLOCK;
 521#ifdef __sparc__
 522        /* SunOS compatibility item. */
 523        if (O_NONBLOCK != O_NDELAY)
 524                flag |= O_NDELAY;
 525#endif
 526        spin_lock(&filp->f_lock);
 527        if (on)
 528                filp->f_flags |= flag;
 529        else
 530                filp->f_flags &= ~flag;
 531        spin_unlock(&filp->f_lock);
 532        return error;
 533}
 534
 535static int ioctl_fioasync(unsigned int fd, struct file *filp,
 536                          int __user *argp)
 537{
 538        unsigned int flag;
 539        int on, error;
 540
 541        error = get_user(on, argp);
 542        if (error)
 543                return error;
 544        flag = on ? FASYNC : 0;
 545
 546        /* Did FASYNC state change ? */
 547        if ((flag ^ filp->f_flags) & FASYNC) {
 548                if (filp->f_op->fasync)
 549                        /* fasync() adjusts filp->f_flags */
 550                        error = filp->f_op->fasync(fd, filp, on);
 551                else
 552                        error = -ENOTTY;
 553        }
 554        return error < 0 ? error : 0;
 555}
 556
 557static int ioctl_fsfreeze(struct file *filp)
 558{
 559        struct super_block *sb = file_inode(filp)->i_sb;
 560
 561        if (!ns_capable(sb->s_user_ns, CAP_SYS_ADMIN))
 562                return -EPERM;
 563
 564        /* If filesystem doesn't support freeze feature, return. */
 565        if (sb->s_op->freeze_fs == NULL && sb->s_op->freeze_super == NULL)
 566                return -EOPNOTSUPP;
 567
 568        /* Freeze */
 569        if (sb->s_op->freeze_super)
 570                return sb->s_op->freeze_super(sb);
 571        return freeze_super(sb);
 572}
 573
 574static int ioctl_fsthaw(struct file *filp)
 575{
 576        struct super_block *sb = file_inode(filp)->i_sb;
 577
 578        if (!ns_capable(sb->s_user_ns, CAP_SYS_ADMIN))
 579                return -EPERM;
 580
 581        /* Thaw */
 582        if (sb->s_op->thaw_super)
 583                return sb->s_op->thaw_super(sb);
 584        return thaw_super(sb);
 585}
 586
 587static int ioctl_file_dedupe_range(struct file *file, void __user *arg)
 588{
 589        struct file_dedupe_range __user *argp = arg;
 590        struct file_dedupe_range *same = NULL;
 591        int ret;
 592        unsigned long size;
 593        u16 count;
 594
 595        if (get_user(count, &argp->dest_count)) {
 596                ret = -EFAULT;
 597                goto out;
 598        }
 599
 600        size = offsetof(struct file_dedupe_range __user, info[count]);
 601        if (size > PAGE_SIZE) {
 602                ret = -ENOMEM;
 603                goto out;
 604        }
 605
 606        same = memdup_user(argp, size);
 607        if (IS_ERR(same)) {
 608                ret = PTR_ERR(same);
 609                same = NULL;
 610                goto out;
 611        }
 612
 613        same->dest_count = count;
 614        ret = vfs_dedupe_file_range(file, same);
 615        if (ret)
 616                goto out;
 617
 618        ret = copy_to_user(argp, same, size);
 619        if (ret)
 620                ret = -EFAULT;
 621
 622out:
 623        kfree(same);
 624        return ret;
 625}
 626
 627/*
 628 * When you add any new common ioctls to the switches above and below
 629 * please update compat_sys_ioctl() too.
 630 *
 631 * do_vfs_ioctl() is not for drivers and not intended to be EXPORT_SYMBOL()'d.
 632 * It's just a simple helper for sys_ioctl and compat_sys_ioctl.
 633 */
 634int do_vfs_ioctl(struct file *filp, unsigned int fd, unsigned int cmd,
 635             unsigned long arg)
 636{
 637        int error = 0;
 638        int __user *argp = (int __user *)arg;
 639        struct inode *inode = file_inode(filp);
 640
 641        switch (cmd) {
 642        case FIOCLEX:
 643                set_close_on_exec(fd, 1);
 644                break;
 645
 646        case FIONCLEX:
 647                set_close_on_exec(fd, 0);
 648                break;
 649
 650        case FIONBIO:
 651                error = ioctl_fionbio(filp, argp);
 652                break;
 653
 654        case FIOASYNC:
 655                error = ioctl_fioasync(fd, filp, argp);
 656                break;
 657
 658        case FIOQSIZE:
 659                if (S_ISDIR(inode->i_mode) || S_ISREG(inode->i_mode) ||
 660                    S_ISLNK(inode->i_mode)) {
 661                        loff_t res = inode_get_bytes(inode);
 662                        error = copy_to_user(argp, &res, sizeof(res)) ?
 663                                        -EFAULT : 0;
 664                } else
 665                        error = -ENOTTY;
 666                break;
 667
 668        case FIFREEZE:
 669                error = ioctl_fsfreeze(filp);
 670                break;
 671
 672        case FITHAW:
 673                error = ioctl_fsthaw(filp);
 674                break;
 675
 676        case FS_IOC_FIEMAP:
 677                return ioctl_fiemap(filp, arg);
 678
 679        case FIGETBSZ:
 680                /* anon_bdev filesystems may not have a block size */
 681                if (!inode->i_sb->s_blocksize)
 682                        return -EINVAL;
 683                return put_user(inode->i_sb->s_blocksize, argp);
 684
 685        case FICLONE:
 686                return ioctl_file_clone(filp, arg, 0, 0, 0);
 687
 688        case FICLONERANGE:
 689                return ioctl_file_clone_range(filp, argp);
 690
 691        case FIDEDUPERANGE:
 692                return ioctl_file_dedupe_range(filp, argp);
 693
 694        default:
 695                if (S_ISREG(inode->i_mode))
 696                        error = file_ioctl(filp, cmd, arg);
 697                else
 698                        error = vfs_ioctl(filp, cmd, arg);
 699                break;
 700        }
 701        return error;
 702}
 703
 704int ksys_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg)
 705{
 706        int error;
 707        struct fd f = fdget(fd);
 708
 709        if (!f.file)
 710                return -EBADF;
 711        error = security_file_ioctl(f.file, cmd, arg);
 712        if (!error)
 713                error = do_vfs_ioctl(f.file, fd, cmd, arg);
 714        fdput(f);
 715        return error;
 716}
 717
 718SYSCALL_DEFINE3(ioctl, unsigned int, fd, unsigned int, cmd, unsigned long, arg)
 719{
 720        return ksys_ioctl(fd, cmd, arg);
 721}
 722