linux/fs/xfs/xfs_ioctl.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
   4 * All Rights Reserved.
   5 */
   6#include "xfs.h"
   7#include "xfs_fs.h"
   8#include "xfs_shared.h"
   9#include "xfs_format.h"
  10#include "xfs_log_format.h"
  11#include "xfs_trans_resv.h"
  12#include "xfs_mount.h"
  13#include "xfs_inode.h"
  14#include "xfs_rtalloc.h"
  15#include "xfs_iwalk.h"
  16#include "xfs_itable.h"
  17#include "xfs_error.h"
  18#include "xfs_attr.h"
  19#include "xfs_bmap.h"
  20#include "xfs_bmap_util.h"
  21#include "xfs_fsops.h"
  22#include "xfs_discard.h"
  23#include "xfs_quota.h"
  24#include "xfs_export.h"
  25#include "xfs_trace.h"
  26#include "xfs_icache.h"
  27#include "xfs_trans.h"
  28#include "xfs_acl.h"
  29#include "xfs_btree.h"
  30#include <linux/fsmap.h>
  31#include "xfs_fsmap.h"
  32#include "scrub/xfs_scrub.h"
  33#include "xfs_sb.h"
  34#include "xfs_ag.h"
  35#include "xfs_health.h"
  36#include "xfs_reflink.h"
  37#include "xfs_ioctl.h"
  38#include "xfs_da_format.h"
  39#include "xfs_da_btree.h"
  40
  41#include <linux/mount.h>
  42#include <linux/namei.h>
  43#include <linux/fileattr.h>
  44
  45/*
  46 * xfs_find_handle maps from userspace xfs_fsop_handlereq structure to
  47 * a file or fs handle.
  48 *
  49 * XFS_IOC_PATH_TO_FSHANDLE
  50 *    returns fs handle for a mount point or path within that mount point
  51 * XFS_IOC_FD_TO_HANDLE
  52 *    returns full handle for a FD opened in user space
  53 * XFS_IOC_PATH_TO_HANDLE
  54 *    returns full handle for a path
  55 */
  56int
  57xfs_find_handle(
  58        unsigned int            cmd,
  59        xfs_fsop_handlereq_t    *hreq)
  60{
  61        int                     hsize;
  62        xfs_handle_t            handle;
  63        struct inode            *inode;
  64        struct fd               f = {NULL};
  65        struct path             path;
  66        int                     error;
  67        struct xfs_inode        *ip;
  68
  69        if (cmd == XFS_IOC_FD_TO_HANDLE) {
  70                f = fdget(hreq->fd);
  71                if (!f.file)
  72                        return -EBADF;
  73                inode = file_inode(f.file);
  74        } else {
  75                error = user_path_at(AT_FDCWD, hreq->path, 0, &path);
  76                if (error)
  77                        return error;
  78                inode = d_inode(path.dentry);
  79        }
  80        ip = XFS_I(inode);
  81
  82        /*
  83         * We can only generate handles for inodes residing on a XFS filesystem,
  84         * and only for regular files, directories or symbolic links.
  85         */
  86        error = -EINVAL;
  87        if (inode->i_sb->s_magic != XFS_SB_MAGIC)
  88                goto out_put;
  89
  90        error = -EBADF;
  91        if (!S_ISREG(inode->i_mode) &&
  92            !S_ISDIR(inode->i_mode) &&
  93            !S_ISLNK(inode->i_mode))
  94                goto out_put;
  95
  96
  97        memcpy(&handle.ha_fsid, ip->i_mount->m_fixedfsid, sizeof(xfs_fsid_t));
  98
  99        if (cmd == XFS_IOC_PATH_TO_FSHANDLE) {
 100                /*
 101                 * This handle only contains an fsid, zero the rest.
 102                 */
 103                memset(&handle.ha_fid, 0, sizeof(handle.ha_fid));
 104                hsize = sizeof(xfs_fsid_t);
 105        } else {
 106                handle.ha_fid.fid_len = sizeof(xfs_fid_t) -
 107                                        sizeof(handle.ha_fid.fid_len);
 108                handle.ha_fid.fid_pad = 0;
 109                handle.ha_fid.fid_gen = inode->i_generation;
 110                handle.ha_fid.fid_ino = ip->i_ino;
 111                hsize = sizeof(xfs_handle_t);
 112        }
 113
 114        error = -EFAULT;
 115        if (copy_to_user(hreq->ohandle, &handle, hsize) ||
 116            copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
 117                goto out_put;
 118
 119        error = 0;
 120
 121 out_put:
 122        if (cmd == XFS_IOC_FD_TO_HANDLE)
 123                fdput(f);
 124        else
 125                path_put(&path);
 126        return error;
 127}
 128
 129/*
 130 * No need to do permission checks on the various pathname components
 131 * as the handle operations are privileged.
 132 */
 133STATIC int
 134xfs_handle_acceptable(
 135        void                    *context,
 136        struct dentry           *dentry)
 137{
 138        return 1;
 139}
 140
 141/*
 142 * Convert userspace handle data into a dentry.
 143 */
 144struct dentry *
 145xfs_handle_to_dentry(
 146        struct file             *parfilp,
 147        void __user             *uhandle,
 148        u32                     hlen)
 149{
 150        xfs_handle_t            handle;
 151        struct xfs_fid64        fid;
 152
 153        /*
 154         * Only allow handle opens under a directory.
 155         */
 156        if (!S_ISDIR(file_inode(parfilp)->i_mode))
 157                return ERR_PTR(-ENOTDIR);
 158
 159        if (hlen != sizeof(xfs_handle_t))
 160                return ERR_PTR(-EINVAL);
 161        if (copy_from_user(&handle, uhandle, hlen))
 162                return ERR_PTR(-EFAULT);
 163        if (handle.ha_fid.fid_len !=
 164            sizeof(handle.ha_fid) - sizeof(handle.ha_fid.fid_len))
 165                return ERR_PTR(-EINVAL);
 166
 167        memset(&fid, 0, sizeof(struct fid));
 168        fid.ino = handle.ha_fid.fid_ino;
 169        fid.gen = handle.ha_fid.fid_gen;
 170
 171        return exportfs_decode_fh(parfilp->f_path.mnt, (struct fid *)&fid, 3,
 172                        FILEID_INO32_GEN | XFS_FILEID_TYPE_64FLAG,
 173                        xfs_handle_acceptable, NULL);
 174}
 175
 176STATIC struct dentry *
 177xfs_handlereq_to_dentry(
 178        struct file             *parfilp,
 179        xfs_fsop_handlereq_t    *hreq)
 180{
 181        return xfs_handle_to_dentry(parfilp, hreq->ihandle, hreq->ihandlen);
 182}
 183
 184int
 185xfs_open_by_handle(
 186        struct file             *parfilp,
 187        xfs_fsop_handlereq_t    *hreq)
 188{
 189        const struct cred       *cred = current_cred();
 190        int                     error;
 191        int                     fd;
 192        int                     permflag;
 193        struct file             *filp;
 194        struct inode            *inode;
 195        struct dentry           *dentry;
 196        fmode_t                 fmode;
 197        struct path             path;
 198
 199        if (!capable(CAP_SYS_ADMIN))
 200                return -EPERM;
 201
 202        dentry = xfs_handlereq_to_dentry(parfilp, hreq);
 203        if (IS_ERR(dentry))
 204                return PTR_ERR(dentry);
 205        inode = d_inode(dentry);
 206
 207        /* Restrict xfs_open_by_handle to directories & regular files. */
 208        if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode))) {
 209                error = -EPERM;
 210                goto out_dput;
 211        }
 212
 213#if BITS_PER_LONG != 32
 214        hreq->oflags |= O_LARGEFILE;
 215#endif
 216
 217        permflag = hreq->oflags;
 218        fmode = OPEN_FMODE(permflag);
 219        if ((!(permflag & O_APPEND) || (permflag & O_TRUNC)) &&
 220            (fmode & FMODE_WRITE) && IS_APPEND(inode)) {
 221                error = -EPERM;
 222                goto out_dput;
 223        }
 224
 225        if ((fmode & FMODE_WRITE) && IS_IMMUTABLE(inode)) {
 226                error = -EPERM;
 227                goto out_dput;
 228        }
 229
 230        /* Can't write directories. */
 231        if (S_ISDIR(inode->i_mode) && (fmode & FMODE_WRITE)) {
 232                error = -EISDIR;
 233                goto out_dput;
 234        }
 235
 236        fd = get_unused_fd_flags(0);
 237        if (fd < 0) {
 238                error = fd;
 239                goto out_dput;
 240        }
 241
 242        path.mnt = parfilp->f_path.mnt;
 243        path.dentry = dentry;
 244        filp = dentry_open(&path, hreq->oflags, cred);
 245        dput(dentry);
 246        if (IS_ERR(filp)) {
 247                put_unused_fd(fd);
 248                return PTR_ERR(filp);
 249        }
 250
 251        if (S_ISREG(inode->i_mode)) {
 252                filp->f_flags |= O_NOATIME;
 253                filp->f_mode |= FMODE_NOCMTIME;
 254        }
 255
 256        fd_install(fd, filp);
 257        return fd;
 258
 259 out_dput:
 260        dput(dentry);
 261        return error;
 262}
 263
 264int
 265xfs_readlink_by_handle(
 266        struct file             *parfilp,
 267        xfs_fsop_handlereq_t    *hreq)
 268{
 269        struct dentry           *dentry;
 270        __u32                   olen;
 271        int                     error;
 272
 273        if (!capable(CAP_SYS_ADMIN))
 274                return -EPERM;
 275
 276        dentry = xfs_handlereq_to_dentry(parfilp, hreq);
 277        if (IS_ERR(dentry))
 278                return PTR_ERR(dentry);
 279
 280        /* Restrict this handle operation to symlinks only. */
 281        if (!d_is_symlink(dentry)) {
 282                error = -EINVAL;
 283                goto out_dput;
 284        }
 285
 286        if (copy_from_user(&olen, hreq->ohandlen, sizeof(__u32))) {
 287                error = -EFAULT;
 288                goto out_dput;
 289        }
 290
 291        error = vfs_readlink(dentry, hreq->ohandle, olen);
 292
 293 out_dput:
 294        dput(dentry);
 295        return error;
 296}
 297
 298/*
 299 * Format an attribute and copy it out to the user's buffer.
 300 * Take care to check values and protect against them changing later,
 301 * we may be reading them directly out of a user buffer.
 302 */
 303static void
 304xfs_ioc_attr_put_listent(
 305        struct xfs_attr_list_context *context,
 306        int                     flags,
 307        unsigned char           *name,
 308        int                     namelen,
 309        int                     valuelen)
 310{
 311        struct xfs_attrlist     *alist = context->buffer;
 312        struct xfs_attrlist_ent *aep;
 313        int                     arraytop;
 314
 315        ASSERT(!context->seen_enough);
 316        ASSERT(context->count >= 0);
 317        ASSERT(context->count < (ATTR_MAX_VALUELEN/8));
 318        ASSERT(context->firstu >= sizeof(*alist));
 319        ASSERT(context->firstu <= context->bufsize);
 320
 321        /*
 322         * Only list entries in the right namespace.
 323         */
 324        if (context->attr_filter != (flags & XFS_ATTR_NSP_ONDISK_MASK))
 325                return;
 326
 327        arraytop = sizeof(*alist) +
 328                        context->count * sizeof(alist->al_offset[0]);
 329
 330        /* decrement by the actual bytes used by the attr */
 331        context->firstu -= round_up(offsetof(struct xfs_attrlist_ent, a_name) +
 332                        namelen + 1, sizeof(uint32_t));
 333        if (context->firstu < arraytop) {
 334                trace_xfs_attr_list_full(context);
 335                alist->al_more = 1;
 336                context->seen_enough = 1;
 337                return;
 338        }
 339
 340        aep = context->buffer + context->firstu;
 341        aep->a_valuelen = valuelen;
 342        memcpy(aep->a_name, name, namelen);
 343        aep->a_name[namelen] = 0;
 344        alist->al_offset[context->count++] = context->firstu;
 345        alist->al_count = context->count;
 346        trace_xfs_attr_list_add(context);
 347}
 348
 349static unsigned int
 350xfs_attr_filter(
 351        u32                     ioc_flags)
 352{
 353        if (ioc_flags & XFS_IOC_ATTR_ROOT)
 354                return XFS_ATTR_ROOT;
 355        if (ioc_flags & XFS_IOC_ATTR_SECURE)
 356                return XFS_ATTR_SECURE;
 357        return 0;
 358}
 359
 360static unsigned int
 361xfs_attr_flags(
 362        u32                     ioc_flags)
 363{
 364        if (ioc_flags & XFS_IOC_ATTR_CREATE)
 365                return XATTR_CREATE;
 366        if (ioc_flags & XFS_IOC_ATTR_REPLACE)
 367                return XATTR_REPLACE;
 368        return 0;
 369}
 370
 371int
 372xfs_ioc_attr_list(
 373        struct xfs_inode                *dp,
 374        void __user                     *ubuf,
 375        int                             bufsize,
 376        int                             flags,
 377        struct xfs_attrlist_cursor __user *ucursor)
 378{
 379        struct xfs_attr_list_context    context = { };
 380        struct xfs_attrlist             *alist;
 381        void                            *buffer;
 382        int                             error;
 383
 384        if (bufsize < sizeof(struct xfs_attrlist) ||
 385            bufsize > XFS_XATTR_LIST_MAX)
 386                return -EINVAL;
 387
 388        /*
 389         * Reject flags, only allow namespaces.
 390         */
 391        if (flags & ~(XFS_IOC_ATTR_ROOT | XFS_IOC_ATTR_SECURE))
 392                return -EINVAL;
 393        if (flags == (XFS_IOC_ATTR_ROOT | XFS_IOC_ATTR_SECURE))
 394                return -EINVAL;
 395
 396        /*
 397         * Validate the cursor.
 398         */
 399        if (copy_from_user(&context.cursor, ucursor, sizeof(context.cursor)))
 400                return -EFAULT;
 401        if (context.cursor.pad1 || context.cursor.pad2)
 402                return -EINVAL;
 403        if (!context.cursor.initted &&
 404            (context.cursor.hashval || context.cursor.blkno ||
 405             context.cursor.offset))
 406                return -EINVAL;
 407
 408        buffer = kvzalloc(bufsize, GFP_KERNEL);
 409        if (!buffer)
 410                return -ENOMEM;
 411
 412        /*
 413         * Initialize the output buffer.
 414         */
 415        context.dp = dp;
 416        context.resynch = 1;
 417        context.attr_filter = xfs_attr_filter(flags);
 418        context.buffer = buffer;
 419        context.bufsize = round_down(bufsize, sizeof(uint32_t));
 420        context.firstu = context.bufsize;
 421        context.put_listent = xfs_ioc_attr_put_listent;
 422
 423        alist = context.buffer;
 424        alist->al_count = 0;
 425        alist->al_more = 0;
 426        alist->al_offset[0] = context.bufsize;
 427
 428        error = xfs_attr_list(&context);
 429        if (error)
 430                goto out_free;
 431
 432        if (copy_to_user(ubuf, buffer, bufsize) ||
 433            copy_to_user(ucursor, &context.cursor, sizeof(context.cursor)))
 434                error = -EFAULT;
 435out_free:
 436        kmem_free(buffer);
 437        return error;
 438}
 439
 440STATIC int
 441xfs_attrlist_by_handle(
 442        struct file             *parfilp,
 443        struct xfs_fsop_attrlist_handlereq __user *p)
 444{
 445        struct xfs_fsop_attrlist_handlereq al_hreq;
 446        struct dentry           *dentry;
 447        int                     error = -ENOMEM;
 448
 449        if (!capable(CAP_SYS_ADMIN))
 450                return -EPERM;
 451        if (copy_from_user(&al_hreq, p, sizeof(al_hreq)))
 452                return -EFAULT;
 453
 454        dentry = xfs_handlereq_to_dentry(parfilp, &al_hreq.hreq);
 455        if (IS_ERR(dentry))
 456                return PTR_ERR(dentry);
 457
 458        error = xfs_ioc_attr_list(XFS_I(d_inode(dentry)), al_hreq.buffer,
 459                                  al_hreq.buflen, al_hreq.flags, &p->pos);
 460        dput(dentry);
 461        return error;
 462}
 463
 464static int
 465xfs_attrmulti_attr_get(
 466        struct inode            *inode,
 467        unsigned char           *name,
 468        unsigned char           __user *ubuf,
 469        uint32_t                *len,
 470        uint32_t                flags)
 471{
 472        struct xfs_da_args      args = {
 473                .dp             = XFS_I(inode),
 474                .attr_filter    = xfs_attr_filter(flags),
 475                .attr_flags     = xfs_attr_flags(flags),
 476                .name           = name,
 477                .namelen        = strlen(name),
 478                .valuelen       = *len,
 479        };
 480        int                     error;
 481
 482        if (*len > XFS_XATTR_SIZE_MAX)
 483                return -EINVAL;
 484
 485        error = xfs_attr_get(&args);
 486        if (error)
 487                goto out_kfree;
 488
 489        *len = args.valuelen;
 490        if (copy_to_user(ubuf, args.value, args.valuelen))
 491                error = -EFAULT;
 492
 493out_kfree:
 494        kmem_free(args.value);
 495        return error;
 496}
 497
 498static int
 499xfs_attrmulti_attr_set(
 500        struct inode            *inode,
 501        unsigned char           *name,
 502        const unsigned char     __user *ubuf,
 503        uint32_t                len,
 504        uint32_t                flags)
 505{
 506        struct xfs_da_args      args = {
 507                .dp             = XFS_I(inode),
 508                .attr_filter    = xfs_attr_filter(flags),
 509                .attr_flags     = xfs_attr_flags(flags),
 510                .name           = name,
 511                .namelen        = strlen(name),
 512        };
 513        int                     error;
 514
 515        if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
 516                return -EPERM;
 517
 518        if (ubuf) {
 519                if (len > XFS_XATTR_SIZE_MAX)
 520                        return -EINVAL;
 521                args.value = memdup_user(ubuf, len);
 522                if (IS_ERR(args.value))
 523                        return PTR_ERR(args.value);
 524                args.valuelen = len;
 525        }
 526
 527        error = xfs_attr_set(&args);
 528        if (!error && (flags & XFS_IOC_ATTR_ROOT))
 529                xfs_forget_acl(inode, name);
 530        kfree(args.value);
 531        return error;
 532}
 533
 534int
 535xfs_ioc_attrmulti_one(
 536        struct file             *parfilp,
 537        struct inode            *inode,
 538        uint32_t                opcode,
 539        void __user             *uname,
 540        void __user             *value,
 541        uint32_t                *len,
 542        uint32_t                flags)
 543{
 544        unsigned char           *name;
 545        int                     error;
 546
 547        if ((flags & XFS_IOC_ATTR_ROOT) && (flags & XFS_IOC_ATTR_SECURE))
 548                return -EINVAL;
 549
 550        name = strndup_user(uname, MAXNAMELEN);
 551        if (IS_ERR(name))
 552                return PTR_ERR(name);
 553
 554        switch (opcode) {
 555        case ATTR_OP_GET:
 556                error = xfs_attrmulti_attr_get(inode, name, value, len, flags);
 557                break;
 558        case ATTR_OP_REMOVE:
 559                value = NULL;
 560                *len = 0;
 561                fallthrough;
 562        case ATTR_OP_SET:
 563                error = mnt_want_write_file(parfilp);
 564                if (error)
 565                        break;
 566                error = xfs_attrmulti_attr_set(inode, name, value, *len, flags);
 567                mnt_drop_write_file(parfilp);
 568                break;
 569        default:
 570                error = -EINVAL;
 571                break;
 572        }
 573
 574        kfree(name);
 575        return error;
 576}
 577
 578STATIC int
 579xfs_attrmulti_by_handle(
 580        struct file             *parfilp,
 581        void                    __user *arg)
 582{
 583        int                     error;
 584        xfs_attr_multiop_t      *ops;
 585        xfs_fsop_attrmulti_handlereq_t am_hreq;
 586        struct dentry           *dentry;
 587        unsigned int            i, size;
 588
 589        if (!capable(CAP_SYS_ADMIN))
 590                return -EPERM;
 591        if (copy_from_user(&am_hreq, arg, sizeof(xfs_fsop_attrmulti_handlereq_t)))
 592                return -EFAULT;
 593
 594        /* overflow check */
 595        if (am_hreq.opcount >= INT_MAX / sizeof(xfs_attr_multiop_t))
 596                return -E2BIG;
 597
 598        dentry = xfs_handlereq_to_dentry(parfilp, &am_hreq.hreq);
 599        if (IS_ERR(dentry))
 600                return PTR_ERR(dentry);
 601
 602        error = -E2BIG;
 603        size = am_hreq.opcount * sizeof(xfs_attr_multiop_t);
 604        if (!size || size > 16 * PAGE_SIZE)
 605                goto out_dput;
 606
 607        ops = memdup_user(am_hreq.ops, size);
 608        if (IS_ERR(ops)) {
 609                error = PTR_ERR(ops);
 610                goto out_dput;
 611        }
 612
 613        error = 0;
 614        for (i = 0; i < am_hreq.opcount; i++) {
 615                ops[i].am_error = xfs_ioc_attrmulti_one(parfilp,
 616                                d_inode(dentry), ops[i].am_opcode,
 617                                ops[i].am_attrname, ops[i].am_attrvalue,
 618                                &ops[i].am_length, ops[i].am_flags);
 619        }
 620
 621        if (copy_to_user(am_hreq.ops, ops, size))
 622                error = -EFAULT;
 623
 624        kfree(ops);
 625 out_dput:
 626        dput(dentry);
 627        return error;
 628}
 629
 630int
 631xfs_ioc_space(
 632        struct file             *filp,
 633        xfs_flock64_t           *bf)
 634{
 635        struct inode            *inode = file_inode(filp);
 636        struct xfs_inode        *ip = XFS_I(inode);
 637        struct iattr            iattr;
 638        enum xfs_prealloc_flags flags = XFS_PREALLOC_CLEAR;
 639        uint                    iolock = XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL;
 640        int                     error;
 641
 642        if (inode->i_flags & (S_IMMUTABLE|S_APPEND))
 643                return -EPERM;
 644
 645        if (!(filp->f_mode & FMODE_WRITE))
 646                return -EBADF;
 647
 648        if (!S_ISREG(inode->i_mode))
 649                return -EINVAL;
 650
 651        if (xfs_is_always_cow_inode(ip))
 652                return -EOPNOTSUPP;
 653
 654        if (filp->f_flags & O_DSYNC)
 655                flags |= XFS_PREALLOC_SYNC;
 656        if (filp->f_mode & FMODE_NOCMTIME)
 657                flags |= XFS_PREALLOC_INVISIBLE;
 658
 659        error = mnt_want_write_file(filp);
 660        if (error)
 661                return error;
 662
 663        xfs_ilock(ip, iolock);
 664        error = xfs_break_layouts(inode, &iolock, BREAK_UNMAP);
 665        if (error)
 666                goto out_unlock;
 667        inode_dio_wait(inode);
 668
 669        switch (bf->l_whence) {
 670        case 0: /*SEEK_SET*/
 671                break;
 672        case 1: /*SEEK_CUR*/
 673                bf->l_start += filp->f_pos;
 674                break;
 675        case 2: /*SEEK_END*/
 676                bf->l_start += XFS_ISIZE(ip);
 677                break;
 678        default:
 679                error = -EINVAL;
 680                goto out_unlock;
 681        }
 682
 683        if (bf->l_start < 0 || bf->l_start > inode->i_sb->s_maxbytes) {
 684                error = -EINVAL;
 685                goto out_unlock;
 686        }
 687
 688        if (bf->l_start > XFS_ISIZE(ip)) {
 689                error = xfs_alloc_file_space(ip, XFS_ISIZE(ip),
 690                                bf->l_start - XFS_ISIZE(ip), 0);
 691                if (error)
 692                        goto out_unlock;
 693        }
 694
 695        iattr.ia_valid = ATTR_SIZE;
 696        iattr.ia_size = bf->l_start;
 697        error = xfs_vn_setattr_size(file_mnt_user_ns(filp), file_dentry(filp),
 698                                    &iattr);
 699        if (error)
 700                goto out_unlock;
 701
 702        error = xfs_update_prealloc_flags(ip, flags);
 703
 704out_unlock:
 705        xfs_iunlock(ip, iolock);
 706        mnt_drop_write_file(filp);
 707        return error;
 708}
 709
 710/* Return 0 on success or positive error */
 711int
 712xfs_fsbulkstat_one_fmt(
 713        struct xfs_ibulk                *breq,
 714        const struct xfs_bulkstat       *bstat)
 715{
 716        struct xfs_bstat                bs1;
 717
 718        xfs_bulkstat_to_bstat(breq->mp, &bs1, bstat);
 719        if (copy_to_user(breq->ubuffer, &bs1, sizeof(bs1)))
 720                return -EFAULT;
 721        return xfs_ibulk_advance(breq, sizeof(struct xfs_bstat));
 722}
 723
 724int
 725xfs_fsinumbers_fmt(
 726        struct xfs_ibulk                *breq,
 727        const struct xfs_inumbers       *igrp)
 728{
 729        struct xfs_inogrp               ig1;
 730
 731        xfs_inumbers_to_inogrp(&ig1, igrp);
 732        if (copy_to_user(breq->ubuffer, &ig1, sizeof(struct xfs_inogrp)))
 733                return -EFAULT;
 734        return xfs_ibulk_advance(breq, sizeof(struct xfs_inogrp));
 735}
 736
 737STATIC int
 738xfs_ioc_fsbulkstat(
 739        struct file             *file,
 740        unsigned int            cmd,
 741        void                    __user *arg)
 742{
 743        struct xfs_mount        *mp = XFS_I(file_inode(file))->i_mount;
 744        struct xfs_fsop_bulkreq bulkreq;
 745        struct xfs_ibulk        breq = {
 746                .mp             = mp,
 747                .mnt_userns     = file_mnt_user_ns(file),
 748                .ocount         = 0,
 749        };
 750        xfs_ino_t               lastino;
 751        int                     error;
 752
 753        /* done = 1 if there are more stats to get and if bulkstat */
 754        /* should be called again (unused here, but used in dmapi) */
 755
 756        if (!capable(CAP_SYS_ADMIN))
 757                return -EPERM;
 758
 759        if (XFS_FORCED_SHUTDOWN(mp))
 760                return -EIO;
 761
 762        if (copy_from_user(&bulkreq, arg, sizeof(struct xfs_fsop_bulkreq)))
 763                return -EFAULT;
 764
 765        if (copy_from_user(&lastino, bulkreq.lastip, sizeof(__s64)))
 766                return -EFAULT;
 767
 768        if (bulkreq.icount <= 0)
 769                return -EINVAL;
 770
 771        if (bulkreq.ubuffer == NULL)
 772                return -EINVAL;
 773
 774        breq.ubuffer = bulkreq.ubuffer;
 775        breq.icount = bulkreq.icount;
 776
 777        /*
 778         * FSBULKSTAT_SINGLE expects that *lastip contains the inode number
 779         * that we want to stat.  However, FSINUMBERS and FSBULKSTAT expect
 780         * that *lastip contains either zero or the number of the last inode to
 781         * be examined by the previous call and return results starting with
 782         * the next inode after that.  The new bulk request back end functions
 783         * take the inode to start with, so we have to compute the startino
 784         * parameter from lastino to maintain correct function.  lastino == 0
 785         * is a special case because it has traditionally meant "first inode
 786         * in filesystem".
 787         */
 788        if (cmd == XFS_IOC_FSINUMBERS) {
 789                breq.startino = lastino ? lastino + 1 : 0;
 790                error = xfs_inumbers(&breq, xfs_fsinumbers_fmt);
 791                lastino = breq.startino - 1;
 792        } else if (cmd == XFS_IOC_FSBULKSTAT_SINGLE) {
 793                breq.startino = lastino;
 794                breq.icount = 1;
 795                error = xfs_bulkstat_one(&breq, xfs_fsbulkstat_one_fmt);
 796        } else {        /* XFS_IOC_FSBULKSTAT */
 797                breq.startino = lastino ? lastino + 1 : 0;
 798                error = xfs_bulkstat(&breq, xfs_fsbulkstat_one_fmt);
 799                lastino = breq.startino - 1;
 800        }
 801
 802        if (error)
 803                return error;
 804
 805        if (bulkreq.lastip != NULL &&
 806            copy_to_user(bulkreq.lastip, &lastino, sizeof(xfs_ino_t)))
 807                return -EFAULT;
 808
 809        if (bulkreq.ocount != NULL &&
 810            copy_to_user(bulkreq.ocount, &breq.ocount, sizeof(__s32)))
 811                return -EFAULT;
 812
 813        return 0;
 814}
 815
 816/* Return 0 on success or positive error */
 817static int
 818xfs_bulkstat_fmt(
 819        struct xfs_ibulk                *breq,
 820        const struct xfs_bulkstat       *bstat)
 821{
 822        if (copy_to_user(breq->ubuffer, bstat, sizeof(struct xfs_bulkstat)))
 823                return -EFAULT;
 824        return xfs_ibulk_advance(breq, sizeof(struct xfs_bulkstat));
 825}
 826
 827/*
 828 * Check the incoming bulk request @hdr from userspace and initialize the
 829 * internal @breq bulk request appropriately.  Returns 0 if the bulk request
 830 * should proceed; -ECANCELED if there's nothing to do; or the usual
 831 * negative error code.
 832 */
 833static int
 834xfs_bulk_ireq_setup(
 835        struct xfs_mount        *mp,
 836        struct xfs_bulk_ireq    *hdr,
 837        struct xfs_ibulk        *breq,
 838        void __user             *ubuffer)
 839{
 840        if (hdr->icount == 0 ||
 841            (hdr->flags & ~XFS_BULK_IREQ_FLAGS_ALL) ||
 842            memchr_inv(hdr->reserved, 0, sizeof(hdr->reserved)))
 843                return -EINVAL;
 844
 845        breq->startino = hdr->ino;
 846        breq->ubuffer = ubuffer;
 847        breq->icount = hdr->icount;
 848        breq->ocount = 0;
 849        breq->flags = 0;
 850
 851        /*
 852         * The @ino parameter is a special value, so we must look it up here.
 853         * We're not allowed to have IREQ_AGNO, and we only return one inode
 854         * worth of data.
 855         */
 856        if (hdr->flags & XFS_BULK_IREQ_SPECIAL) {
 857                if (hdr->flags & XFS_BULK_IREQ_AGNO)
 858                        return -EINVAL;
 859
 860                switch (hdr->ino) {
 861                case XFS_BULK_IREQ_SPECIAL_ROOT:
 862                        hdr->ino = mp->m_sb.sb_rootino;
 863                        break;
 864                default:
 865                        return -EINVAL;
 866                }
 867                breq->icount = 1;
 868        }
 869
 870        /*
 871         * The IREQ_AGNO flag means that we only want results from a given AG.
 872         * If @hdr->ino is zero, we start iterating in that AG.  If @hdr->ino is
 873         * beyond the specified AG then we return no results.
 874         */
 875        if (hdr->flags & XFS_BULK_IREQ_AGNO) {
 876                if (hdr->agno >= mp->m_sb.sb_agcount)
 877                        return -EINVAL;
 878
 879                if (breq->startino == 0)
 880                        breq->startino = XFS_AGINO_TO_INO(mp, hdr->agno, 0);
 881                else if (XFS_INO_TO_AGNO(mp, breq->startino) < hdr->agno)
 882                        return -EINVAL;
 883
 884                breq->flags |= XFS_IBULK_SAME_AG;
 885
 886                /* Asking for an inode past the end of the AG?  We're done! */
 887                if (XFS_INO_TO_AGNO(mp, breq->startino) > hdr->agno)
 888                        return -ECANCELED;
 889        } else if (hdr->agno)
 890                return -EINVAL;
 891
 892        /* Asking for an inode past the end of the FS?  We're done! */
 893        if (XFS_INO_TO_AGNO(mp, breq->startino) >= mp->m_sb.sb_agcount)
 894                return -ECANCELED;
 895
 896        return 0;
 897}
 898
 899/*
 900 * Update the userspace bulk request @hdr to reflect the end state of the
 901 * internal bulk request @breq.
 902 */
 903static void
 904xfs_bulk_ireq_teardown(
 905        struct xfs_bulk_ireq    *hdr,
 906        struct xfs_ibulk        *breq)
 907{
 908        hdr->ino = breq->startino;
 909        hdr->ocount = breq->ocount;
 910}
 911
 912/* Handle the v5 bulkstat ioctl. */
 913STATIC int
 914xfs_ioc_bulkstat(
 915        struct file                     *file,
 916        unsigned int                    cmd,
 917        struct xfs_bulkstat_req __user  *arg)
 918{
 919        struct xfs_mount                *mp = XFS_I(file_inode(file))->i_mount;
 920        struct xfs_bulk_ireq            hdr;
 921        struct xfs_ibulk                breq = {
 922                .mp                     = mp,
 923                .mnt_userns             = file_mnt_user_ns(file),
 924        };
 925        int                             error;
 926
 927        if (!capable(CAP_SYS_ADMIN))
 928                return -EPERM;
 929
 930        if (XFS_FORCED_SHUTDOWN(mp))
 931                return -EIO;
 932
 933        if (copy_from_user(&hdr, &arg->hdr, sizeof(hdr)))
 934                return -EFAULT;
 935
 936        error = xfs_bulk_ireq_setup(mp, &hdr, &breq, arg->bulkstat);
 937        if (error == -ECANCELED)
 938                goto out_teardown;
 939        if (error < 0)
 940                return error;
 941
 942        error = xfs_bulkstat(&breq, xfs_bulkstat_fmt);
 943        if (error)
 944                return error;
 945
 946out_teardown:
 947        xfs_bulk_ireq_teardown(&hdr, &breq);
 948        if (copy_to_user(&arg->hdr, &hdr, sizeof(hdr)))
 949                return -EFAULT;
 950
 951        return 0;
 952}
 953
 954STATIC int
 955xfs_inumbers_fmt(
 956        struct xfs_ibulk                *breq,
 957        const struct xfs_inumbers       *igrp)
 958{
 959        if (copy_to_user(breq->ubuffer, igrp, sizeof(struct xfs_inumbers)))
 960                return -EFAULT;
 961        return xfs_ibulk_advance(breq, sizeof(struct xfs_inumbers));
 962}
 963
 964/* Handle the v5 inumbers ioctl. */
 965STATIC int
 966xfs_ioc_inumbers(
 967        struct xfs_mount                *mp,
 968        unsigned int                    cmd,
 969        struct xfs_inumbers_req __user  *arg)
 970{
 971        struct xfs_bulk_ireq            hdr;
 972        struct xfs_ibulk                breq = {
 973                .mp                     = mp,
 974        };
 975        int                             error;
 976
 977        if (!capable(CAP_SYS_ADMIN))
 978                return -EPERM;
 979
 980        if (XFS_FORCED_SHUTDOWN(mp))
 981                return -EIO;
 982
 983        if (copy_from_user(&hdr, &arg->hdr, sizeof(hdr)))
 984                return -EFAULT;
 985
 986        error = xfs_bulk_ireq_setup(mp, &hdr, &breq, arg->inumbers);
 987        if (error == -ECANCELED)
 988                goto out_teardown;
 989        if (error < 0)
 990                return error;
 991
 992        error = xfs_inumbers(&breq, xfs_inumbers_fmt);
 993        if (error)
 994                return error;
 995
 996out_teardown:
 997        xfs_bulk_ireq_teardown(&hdr, &breq);
 998        if (copy_to_user(&arg->hdr, &hdr, sizeof(hdr)))
 999                return -EFAULT;
1000
1001        return 0;
1002}
1003
1004STATIC int
1005xfs_ioc_fsgeometry(
1006        struct xfs_mount        *mp,
1007        void                    __user *arg,
1008        int                     struct_version)
1009{
1010        struct xfs_fsop_geom    fsgeo;
1011        size_t                  len;
1012
1013        xfs_fs_geometry(&mp->m_sb, &fsgeo, struct_version);
1014
1015        if (struct_version <= 3)
1016                len = sizeof(struct xfs_fsop_geom_v1);
1017        else if (struct_version == 4)
1018                len = sizeof(struct xfs_fsop_geom_v4);
1019        else {
1020                xfs_fsop_geom_health(mp, &fsgeo);
1021                len = sizeof(fsgeo);
1022        }
1023
1024        if (copy_to_user(arg, &fsgeo, len))
1025                return -EFAULT;
1026        return 0;
1027}
1028
1029STATIC int
1030xfs_ioc_ag_geometry(
1031        struct xfs_mount        *mp,
1032        void                    __user *arg)
1033{
1034        struct xfs_ag_geometry  ageo;
1035        int                     error;
1036
1037        if (copy_from_user(&ageo, arg, sizeof(ageo)))
1038                return -EFAULT;
1039        if (ageo.ag_flags)
1040                return -EINVAL;
1041        if (memchr_inv(&ageo.ag_reserved, 0, sizeof(ageo.ag_reserved)))
1042                return -EINVAL;
1043
1044        error = xfs_ag_get_geometry(mp, ageo.ag_number, &ageo);
1045        if (error)
1046                return error;
1047
1048        if (copy_to_user(arg, &ageo, sizeof(ageo)))
1049                return -EFAULT;
1050        return 0;
1051}
1052
1053/*
1054 * Linux extended inode flags interface.
1055 */
1056
1057static void
1058xfs_fill_fsxattr(
1059        struct xfs_inode        *ip,
1060        int                     whichfork,
1061        struct fileattr         *fa)
1062{
1063        struct xfs_mount        *mp = ip->i_mount;
1064        struct xfs_ifork        *ifp = XFS_IFORK_PTR(ip, whichfork);
1065
1066        fileattr_fill_xflags(fa, xfs_ip2xflags(ip));
1067
1068        if (ip->i_diflags & XFS_DIFLAG_EXTSIZE) {
1069                fa->fsx_extsize = XFS_FSB_TO_B(mp, ip->i_extsize);
1070        } else if (ip->i_diflags & XFS_DIFLAG_EXTSZINHERIT) {
1071                /*
1072                 * Don't let a misaligned extent size hint on a directory
1073                 * escape to userspace if it won't pass the setattr checks
1074                 * later.
1075                 */
1076                if ((ip->i_diflags & XFS_DIFLAG_RTINHERIT) &&
1077                    ip->i_extsize % mp->m_sb.sb_rextsize > 0) {
1078                        fa->fsx_xflags &= ~(FS_XFLAG_EXTSIZE |
1079                                            FS_XFLAG_EXTSZINHERIT);
1080                        fa->fsx_extsize = 0;
1081                } else {
1082                        fa->fsx_extsize = XFS_FSB_TO_B(mp, ip->i_extsize);
1083                }
1084        }
1085
1086        if (ip->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE)
1087                fa->fsx_cowextsize = XFS_FSB_TO_B(mp, ip->i_cowextsize);
1088        fa->fsx_projid = ip->i_projid;
1089        if (ifp && !xfs_need_iread_extents(ifp))
1090                fa->fsx_nextents = xfs_iext_count(ifp);
1091        else
1092                fa->fsx_nextents = xfs_ifork_nextents(ifp);
1093}
1094
1095STATIC int
1096xfs_ioc_fsgetxattra(
1097        xfs_inode_t             *ip,
1098        void                    __user *arg)
1099{
1100        struct fileattr         fa;
1101
1102        xfs_ilock(ip, XFS_ILOCK_SHARED);
1103        xfs_fill_fsxattr(ip, XFS_ATTR_FORK, &fa);
1104        xfs_iunlock(ip, XFS_ILOCK_SHARED);
1105
1106        return copy_fsxattr_to_user(&fa, arg);
1107}
1108
1109int
1110xfs_fileattr_get(
1111        struct dentry           *dentry,
1112        struct fileattr         *fa)
1113{
1114        struct xfs_inode        *ip = XFS_I(d_inode(dentry));
1115
1116        if (d_is_special(dentry))
1117                return -ENOTTY;
1118
1119        xfs_ilock(ip, XFS_ILOCK_SHARED);
1120        xfs_fill_fsxattr(ip, XFS_DATA_FORK, fa);
1121        xfs_iunlock(ip, XFS_ILOCK_SHARED);
1122
1123        return 0;
1124}
1125
1126STATIC uint16_t
1127xfs_flags2diflags(
1128        struct xfs_inode        *ip,
1129        unsigned int            xflags)
1130{
1131        /* can't set PREALLOC this way, just preserve it */
1132        uint16_t                di_flags =
1133                (ip->i_diflags & XFS_DIFLAG_PREALLOC);
1134
1135        if (xflags & FS_XFLAG_IMMUTABLE)
1136                di_flags |= XFS_DIFLAG_IMMUTABLE;
1137        if (xflags & FS_XFLAG_APPEND)
1138                di_flags |= XFS_DIFLAG_APPEND;
1139        if (xflags & FS_XFLAG_SYNC)
1140                di_flags |= XFS_DIFLAG_SYNC;
1141        if (xflags & FS_XFLAG_NOATIME)
1142                di_flags |= XFS_DIFLAG_NOATIME;
1143        if (xflags & FS_XFLAG_NODUMP)
1144                di_flags |= XFS_DIFLAG_NODUMP;
1145        if (xflags & FS_XFLAG_NODEFRAG)
1146                di_flags |= XFS_DIFLAG_NODEFRAG;
1147        if (xflags & FS_XFLAG_FILESTREAM)
1148                di_flags |= XFS_DIFLAG_FILESTREAM;
1149        if (S_ISDIR(VFS_I(ip)->i_mode)) {
1150                if (xflags & FS_XFLAG_RTINHERIT)
1151                        di_flags |= XFS_DIFLAG_RTINHERIT;
1152                if (xflags & FS_XFLAG_NOSYMLINKS)
1153                        di_flags |= XFS_DIFLAG_NOSYMLINKS;
1154                if (xflags & FS_XFLAG_EXTSZINHERIT)
1155                        di_flags |= XFS_DIFLAG_EXTSZINHERIT;
1156                if (xflags & FS_XFLAG_PROJINHERIT)
1157                        di_flags |= XFS_DIFLAG_PROJINHERIT;
1158        } else if (S_ISREG(VFS_I(ip)->i_mode)) {
1159                if (xflags & FS_XFLAG_REALTIME)
1160                        di_flags |= XFS_DIFLAG_REALTIME;
1161                if (xflags & FS_XFLAG_EXTSIZE)
1162                        di_flags |= XFS_DIFLAG_EXTSIZE;
1163        }
1164
1165        return di_flags;
1166}
1167
1168STATIC uint64_t
1169xfs_flags2diflags2(
1170        struct xfs_inode        *ip,
1171        unsigned int            xflags)
1172{
1173        uint64_t                di_flags2 =
1174                (ip->i_diflags2 & (XFS_DIFLAG2_REFLINK |
1175                                   XFS_DIFLAG2_BIGTIME));
1176
1177        if (xflags & FS_XFLAG_DAX)
1178                di_flags2 |= XFS_DIFLAG2_DAX;
1179        if (xflags & FS_XFLAG_COWEXTSIZE)
1180                di_flags2 |= XFS_DIFLAG2_COWEXTSIZE;
1181
1182        return di_flags2;
1183}
1184
1185static int
1186xfs_ioctl_setattr_xflags(
1187        struct xfs_trans        *tp,
1188        struct xfs_inode        *ip,
1189        struct fileattr         *fa)
1190{
1191        struct xfs_mount        *mp = ip->i_mount;
1192        uint64_t                i_flags2;
1193
1194        /* Can't change realtime flag if any extents are allocated. */
1195        if ((ip->i_df.if_nextents || ip->i_delayed_blks) &&
1196            XFS_IS_REALTIME_INODE(ip) != (fa->fsx_xflags & FS_XFLAG_REALTIME))
1197                return -EINVAL;
1198
1199        /* If realtime flag is set then must have realtime device */
1200        if (fa->fsx_xflags & FS_XFLAG_REALTIME) {
1201                if (mp->m_sb.sb_rblocks == 0 || mp->m_sb.sb_rextsize == 0 ||
1202                    (ip->i_extsize % mp->m_sb.sb_rextsize))
1203                        return -EINVAL;
1204        }
1205
1206        /* Clear reflink if we are actually able to set the rt flag. */
1207        if ((fa->fsx_xflags & FS_XFLAG_REALTIME) && xfs_is_reflink_inode(ip))
1208                ip->i_diflags2 &= ~XFS_DIFLAG2_REFLINK;
1209
1210        /* Don't allow us to set DAX mode for a reflinked file for now. */
1211        if ((fa->fsx_xflags & FS_XFLAG_DAX) && xfs_is_reflink_inode(ip))
1212                return -EINVAL;
1213
1214        /* diflags2 only valid for v3 inodes. */
1215        i_flags2 = xfs_flags2diflags2(ip, fa->fsx_xflags);
1216        if (i_flags2 && !xfs_sb_version_has_v3inode(&mp->m_sb))
1217                return -EINVAL;
1218
1219        ip->i_diflags = xfs_flags2diflags(ip, fa->fsx_xflags);
1220        ip->i_diflags2 = i_flags2;
1221
1222        xfs_diflags_to_iflags(ip, false);
1223        xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
1224        xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1225        XFS_STATS_INC(mp, xs_ig_attrchg);
1226        return 0;
1227}
1228
1229static void
1230xfs_ioctl_setattr_prepare_dax(
1231        struct xfs_inode        *ip,
1232        struct fileattr         *fa)
1233{
1234        struct xfs_mount        *mp = ip->i_mount;
1235        struct inode            *inode = VFS_I(ip);
1236
1237        if (S_ISDIR(inode->i_mode))
1238                return;
1239
1240        if ((mp->m_flags & XFS_MOUNT_DAX_ALWAYS) ||
1241            (mp->m_flags & XFS_MOUNT_DAX_NEVER))
1242                return;
1243
1244        if (((fa->fsx_xflags & FS_XFLAG_DAX) &&
1245            !(ip->i_diflags2 & XFS_DIFLAG2_DAX)) ||
1246            (!(fa->fsx_xflags & FS_XFLAG_DAX) &&
1247             (ip->i_diflags2 & XFS_DIFLAG2_DAX)))
1248                d_mark_dontcache(inode);
1249}
1250
1251/*
1252 * Set up the transaction structure for the setattr operation, checking that we
1253 * have permission to do so. On success, return a clean transaction and the
1254 * inode locked exclusively ready for further operation specific checks. On
1255 * failure, return an error without modifying or locking the inode.
1256 */
1257static struct xfs_trans *
1258xfs_ioctl_setattr_get_trans(
1259        struct xfs_inode        *ip,
1260        struct xfs_dquot        *pdqp)
1261{
1262        struct xfs_mount        *mp = ip->i_mount;
1263        struct xfs_trans        *tp;
1264        int                     error = -EROFS;
1265
1266        if (mp->m_flags & XFS_MOUNT_RDONLY)
1267                goto out_error;
1268        error = -EIO;
1269        if (XFS_FORCED_SHUTDOWN(mp))
1270                goto out_error;
1271
1272        error = xfs_trans_alloc_ichange(ip, NULL, NULL, pdqp,
1273                        capable(CAP_FOWNER), &tp);
1274        if (error)
1275                goto out_error;
1276
1277        if (mp->m_flags & XFS_MOUNT_WSYNC)
1278                xfs_trans_set_sync(tp);
1279
1280        return tp;
1281
1282out_error:
1283        return ERR_PTR(error);
1284}
1285
1286/*
1287 * Validate a proposed extent size hint.  For regular files, the hint can only
1288 * be changed if no extents are allocated.
1289 */
1290static int
1291xfs_ioctl_setattr_check_extsize(
1292        struct xfs_inode        *ip,
1293        struct fileattr         *fa)
1294{
1295        struct xfs_mount        *mp = ip->i_mount;
1296        xfs_failaddr_t          failaddr;
1297        uint16_t                new_diflags;
1298
1299        if (!fa->fsx_valid)
1300                return 0;
1301
1302        if (S_ISREG(VFS_I(ip)->i_mode) && ip->i_df.if_nextents &&
1303            XFS_FSB_TO_B(mp, ip->i_extsize) != fa->fsx_extsize)
1304                return -EINVAL;
1305
1306        if (fa->fsx_extsize & mp->m_blockmask)
1307                return -EINVAL;
1308
1309        new_diflags = xfs_flags2diflags(ip, fa->fsx_xflags);
1310
1311        /*
1312         * Inode verifiers do not check that the extent size hint is an integer
1313         * multiple of the rt extent size on a directory with both rtinherit
1314         * and extszinherit flags set.  Don't let sysadmins misconfigure
1315         * directories.
1316         */
1317        if ((new_diflags & XFS_DIFLAG_RTINHERIT) &&
1318            (new_diflags & XFS_DIFLAG_EXTSZINHERIT)) {
1319                unsigned int    rtextsize_bytes;
1320
1321                rtextsize_bytes = XFS_FSB_TO_B(mp, mp->m_sb.sb_rextsize);
1322                if (fa->fsx_extsize % rtextsize_bytes)
1323                        return -EINVAL;
1324        }
1325
1326        failaddr = xfs_inode_validate_extsize(ip->i_mount,
1327                        XFS_B_TO_FSB(mp, fa->fsx_extsize),
1328                        VFS_I(ip)->i_mode, new_diflags);
1329        return failaddr != NULL ? -EINVAL : 0;
1330}
1331
1332static int
1333xfs_ioctl_setattr_check_cowextsize(
1334        struct xfs_inode        *ip,
1335        struct fileattr         *fa)
1336{
1337        struct xfs_mount        *mp = ip->i_mount;
1338        xfs_failaddr_t          failaddr;
1339        uint64_t                new_diflags2;
1340        uint16_t                new_diflags;
1341
1342        if (!fa->fsx_valid)
1343                return 0;
1344
1345        if (fa->fsx_cowextsize & mp->m_blockmask)
1346                return -EINVAL;
1347
1348        new_diflags = xfs_flags2diflags(ip, fa->fsx_xflags);
1349        new_diflags2 = xfs_flags2diflags2(ip, fa->fsx_xflags);
1350
1351        failaddr = xfs_inode_validate_cowextsize(ip->i_mount,
1352                        XFS_B_TO_FSB(mp, fa->fsx_cowextsize),
1353                        VFS_I(ip)->i_mode, new_diflags, new_diflags2);
1354        return failaddr != NULL ? -EINVAL : 0;
1355}
1356
1357static int
1358xfs_ioctl_setattr_check_projid(
1359        struct xfs_inode        *ip,
1360        struct fileattr         *fa)
1361{
1362        if (!fa->fsx_valid)
1363                return 0;
1364
1365        /* Disallow 32bit project ids if projid32bit feature is not enabled. */
1366        if (fa->fsx_projid > (uint16_t)-1 &&
1367            !xfs_sb_version_hasprojid32bit(&ip->i_mount->m_sb))
1368                return -EINVAL;
1369        return 0;
1370}
1371
1372int
1373xfs_fileattr_set(
1374        struct user_namespace   *mnt_userns,
1375        struct dentry           *dentry,
1376        struct fileattr         *fa)
1377{
1378        struct xfs_inode        *ip = XFS_I(d_inode(dentry));
1379        struct xfs_mount        *mp = ip->i_mount;
1380        struct xfs_trans        *tp;
1381        struct xfs_dquot        *pdqp = NULL;
1382        struct xfs_dquot        *olddquot = NULL;
1383        int                     error;
1384
1385        trace_xfs_ioctl_setattr(ip);
1386
1387        if (d_is_special(dentry))
1388                return -ENOTTY;
1389
1390        if (!fa->fsx_valid) {
1391                if (fa->flags & ~(FS_IMMUTABLE_FL | FS_APPEND_FL |
1392                                  FS_NOATIME_FL | FS_NODUMP_FL |
1393                                  FS_SYNC_FL | FS_DAX_FL | FS_PROJINHERIT_FL))
1394                        return -EOPNOTSUPP;
1395        }
1396
1397        error = xfs_ioctl_setattr_check_projid(ip, fa);
1398        if (error)
1399                return error;
1400
1401        /*
1402         * If disk quotas is on, we make sure that the dquots do exist on disk,
1403         * before we start any other transactions. Trying to do this later
1404         * is messy. We don't care to take a readlock to look at the ids
1405         * in inode here, because we can't hold it across the trans_reserve.
1406         * If the IDs do change before we take the ilock, we're covered
1407         * because the i_*dquot fields will get updated anyway.
1408         */
1409        if (fa->fsx_valid && XFS_IS_QUOTA_ON(mp)) {
1410                error = xfs_qm_vop_dqalloc(ip, VFS_I(ip)->i_uid,
1411                                VFS_I(ip)->i_gid, fa->fsx_projid,
1412                                XFS_QMOPT_PQUOTA, NULL, NULL, &pdqp);
1413                if (error)
1414                        return error;
1415        }
1416
1417        xfs_ioctl_setattr_prepare_dax(ip, fa);
1418
1419        tp = xfs_ioctl_setattr_get_trans(ip, pdqp);
1420        if (IS_ERR(tp)) {
1421                error = PTR_ERR(tp);
1422                goto error_free_dquots;
1423        }
1424
1425        error = xfs_ioctl_setattr_check_extsize(ip, fa);
1426        if (error)
1427                goto error_trans_cancel;
1428
1429        error = xfs_ioctl_setattr_check_cowextsize(ip, fa);
1430        if (error)
1431                goto error_trans_cancel;
1432
1433        error = xfs_ioctl_setattr_xflags(tp, ip, fa);
1434        if (error)
1435                goto error_trans_cancel;
1436
1437        if (!fa->fsx_valid)
1438                goto skip_xattr;
1439        /*
1440         * Change file ownership.  Must be the owner or privileged.  CAP_FSETID
1441         * overrides the following restrictions:
1442         *
1443         * The set-user-ID and set-group-ID bits of a file will be cleared upon
1444         * successful return from chown()
1445         */
1446
1447        if ((VFS_I(ip)->i_mode & (S_ISUID|S_ISGID)) &&
1448            !capable_wrt_inode_uidgid(mnt_userns, VFS_I(ip), CAP_FSETID))
1449                VFS_I(ip)->i_mode &= ~(S_ISUID|S_ISGID);
1450
1451        /* Change the ownerships and register project quota modifications */
1452        if (ip->i_projid != fa->fsx_projid) {
1453                if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_PQUOTA_ON(mp)) {
1454                        olddquot = xfs_qm_vop_chown(tp, ip,
1455                                                &ip->i_pdquot, pdqp);
1456                }
1457                ip->i_projid = fa->fsx_projid;
1458        }
1459
1460        /*
1461         * Only set the extent size hint if we've already determined that the
1462         * extent size hint should be set on the inode. If no extent size flags
1463         * are set on the inode then unconditionally clear the extent size hint.
1464         */
1465        if (ip->i_diflags & (XFS_DIFLAG_EXTSIZE | XFS_DIFLAG_EXTSZINHERIT))
1466                ip->i_extsize = XFS_B_TO_FSB(mp, fa->fsx_extsize);
1467        else
1468                ip->i_extsize = 0;
1469
1470        if (xfs_sb_version_has_v3inode(&mp->m_sb)) {
1471                if (ip->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE)
1472                        ip->i_cowextsize = XFS_B_TO_FSB(mp, fa->fsx_cowextsize);
1473                else
1474                        ip->i_cowextsize = 0;
1475        }
1476
1477skip_xattr:
1478        error = xfs_trans_commit(tp);
1479
1480        /*
1481         * Release any dquot(s) the inode had kept before chown.
1482         */
1483        xfs_qm_dqrele(olddquot);
1484        xfs_qm_dqrele(pdqp);
1485
1486        return error;
1487
1488error_trans_cancel:
1489        xfs_trans_cancel(tp);
1490error_free_dquots:
1491        xfs_qm_dqrele(pdqp);
1492        return error;
1493}
1494
1495static bool
1496xfs_getbmap_format(
1497        struct kgetbmap         *p,
1498        struct getbmapx __user  *u,
1499        size_t                  recsize)
1500{
1501        if (put_user(p->bmv_offset, &u->bmv_offset) ||
1502            put_user(p->bmv_block, &u->bmv_block) ||
1503            put_user(p->bmv_length, &u->bmv_length) ||
1504            put_user(0, &u->bmv_count) ||
1505            put_user(0, &u->bmv_entries))
1506                return false;
1507        if (recsize < sizeof(struct getbmapx))
1508                return true;
1509        if (put_user(0, &u->bmv_iflags) ||
1510            put_user(p->bmv_oflags, &u->bmv_oflags) ||
1511            put_user(0, &u->bmv_unused1) ||
1512            put_user(0, &u->bmv_unused2))
1513                return false;
1514        return true;
1515}
1516
1517STATIC int
1518xfs_ioc_getbmap(
1519        struct file             *file,
1520        unsigned int            cmd,
1521        void                    __user *arg)
1522{
1523        struct getbmapx         bmx = { 0 };
1524        struct kgetbmap         *buf;
1525        size_t                  recsize;
1526        int                     error, i;
1527
1528        switch (cmd) {
1529        case XFS_IOC_GETBMAPA:
1530                bmx.bmv_iflags = BMV_IF_ATTRFORK;
1531                fallthrough;
1532        case XFS_IOC_GETBMAP:
1533                /* struct getbmap is a strict subset of struct getbmapx. */
1534                recsize = sizeof(struct getbmap);
1535                break;
1536        case XFS_IOC_GETBMAPX:
1537                recsize = sizeof(struct getbmapx);
1538                break;
1539        default:
1540                return -EINVAL;
1541        }
1542
1543        if (copy_from_user(&bmx, arg, recsize))
1544                return -EFAULT;
1545
1546        if (bmx.bmv_count < 2)
1547                return -EINVAL;
1548        if (bmx.bmv_count > ULONG_MAX / recsize)
1549                return -ENOMEM;
1550
1551        buf = kvzalloc(bmx.bmv_count * sizeof(*buf), GFP_KERNEL);
1552        if (!buf)
1553                return -ENOMEM;
1554
1555        error = xfs_getbmap(XFS_I(file_inode(file)), &bmx, buf);
1556        if (error)
1557                goto out_free_buf;
1558
1559        error = -EFAULT;
1560        if (copy_to_user(arg, &bmx, recsize))
1561                goto out_free_buf;
1562        arg += recsize;
1563
1564        for (i = 0; i < bmx.bmv_entries; i++) {
1565                if (!xfs_getbmap_format(buf + i, arg, recsize))
1566                        goto out_free_buf;
1567                arg += recsize;
1568        }
1569
1570        error = 0;
1571out_free_buf:
1572        kmem_free(buf);
1573        return error;
1574}
1575
1576STATIC int
1577xfs_ioc_getfsmap(
1578        struct xfs_inode        *ip,
1579        struct fsmap_head       __user *arg)
1580{
1581        struct xfs_fsmap_head   xhead = {0};
1582        struct fsmap_head       head;
1583        struct fsmap            *recs;
1584        unsigned int            count;
1585        __u32                   last_flags = 0;
1586        bool                    done = false;
1587        int                     error;
1588
1589        if (copy_from_user(&head, arg, sizeof(struct fsmap_head)))
1590                return -EFAULT;
1591        if (memchr_inv(head.fmh_reserved, 0, sizeof(head.fmh_reserved)) ||
1592            memchr_inv(head.fmh_keys[0].fmr_reserved, 0,
1593                       sizeof(head.fmh_keys[0].fmr_reserved)) ||
1594            memchr_inv(head.fmh_keys[1].fmr_reserved, 0,
1595                       sizeof(head.fmh_keys[1].fmr_reserved)))
1596                return -EINVAL;
1597
1598        /*
1599         * Use an internal memory buffer so that we don't have to copy fsmap
1600         * data to userspace while holding locks.  Start by trying to allocate
1601         * up to 128k for the buffer, but fall back to a single page if needed.
1602         */
1603        count = min_t(unsigned int, head.fmh_count,
1604                        131072 / sizeof(struct fsmap));
1605        recs = kvzalloc(count * sizeof(struct fsmap), GFP_KERNEL);
1606        if (!recs) {
1607                count = min_t(unsigned int, head.fmh_count,
1608                                PAGE_SIZE / sizeof(struct fsmap));
1609                recs = kvzalloc(count * sizeof(struct fsmap), GFP_KERNEL);
1610                if (!recs)
1611                        return -ENOMEM;
1612        }
1613
1614        xhead.fmh_iflags = head.fmh_iflags;
1615        xfs_fsmap_to_internal(&xhead.fmh_keys[0], &head.fmh_keys[0]);
1616        xfs_fsmap_to_internal(&xhead.fmh_keys[1], &head.fmh_keys[1]);
1617
1618        trace_xfs_getfsmap_low_key(ip->i_mount, &xhead.fmh_keys[0]);
1619        trace_xfs_getfsmap_high_key(ip->i_mount, &xhead.fmh_keys[1]);
1620
1621        head.fmh_entries = 0;
1622        do {
1623                struct fsmap __user     *user_recs;
1624                struct fsmap            *last_rec;
1625
1626                user_recs = &arg->fmh_recs[head.fmh_entries];
1627                xhead.fmh_entries = 0;
1628                xhead.fmh_count = min_t(unsigned int, count,
1629                                        head.fmh_count - head.fmh_entries);
1630
1631                /* Run query, record how many entries we got. */
1632                error = xfs_getfsmap(ip->i_mount, &xhead, recs);
1633                switch (error) {
1634                case 0:
1635                        /*
1636                         * There are no more records in the result set.  Copy
1637                         * whatever we got to userspace and break out.
1638                         */
1639                        done = true;
1640                        break;
1641                case -ECANCELED:
1642                        /*
1643                         * The internal memory buffer is full.  Copy whatever
1644                         * records we got to userspace and go again if we have
1645                         * not yet filled the userspace buffer.
1646                         */
1647                        error = 0;
1648                        break;
1649                default:
1650                        goto out_free;
1651                }
1652                head.fmh_entries += xhead.fmh_entries;
1653                head.fmh_oflags = xhead.fmh_oflags;
1654
1655                /*
1656                 * If the caller wanted a record count or there aren't any
1657                 * new records to return, we're done.
1658                 */
1659                if (head.fmh_count == 0 || xhead.fmh_entries == 0)
1660                        break;
1661
1662                /* Copy all the records we got out to userspace. */
1663                if (copy_to_user(user_recs, recs,
1664                                 xhead.fmh_entries * sizeof(struct fsmap))) {
1665                        error = -EFAULT;
1666                        goto out_free;
1667                }
1668
1669                /* Remember the last record flags we copied to userspace. */
1670                last_rec = &recs[xhead.fmh_entries - 1];
1671                last_flags = last_rec->fmr_flags;
1672
1673                /* Set up the low key for the next iteration. */
1674                xfs_fsmap_to_internal(&xhead.fmh_keys[0], last_rec);
1675                trace_xfs_getfsmap_low_key(ip->i_mount, &xhead.fmh_keys[0]);
1676        } while (!done && head.fmh_entries < head.fmh_count);
1677
1678        /*
1679         * If there are no more records in the query result set and we're not
1680         * in counting mode, mark the last record returned with the LAST flag.
1681         */
1682        if (done && head.fmh_count > 0 && head.fmh_entries > 0) {
1683                struct fsmap __user     *user_rec;
1684
1685                last_flags |= FMR_OF_LAST;
1686                user_rec = &arg->fmh_recs[head.fmh_entries - 1];
1687
1688                if (copy_to_user(&user_rec->fmr_flags, &last_flags,
1689                                        sizeof(last_flags))) {
1690                        error = -EFAULT;
1691                        goto out_free;
1692                }
1693        }
1694
1695        /* copy back header */
1696        if (copy_to_user(arg, &head, sizeof(struct fsmap_head))) {
1697                error = -EFAULT;
1698                goto out_free;
1699        }
1700
1701out_free:
1702        kmem_free(recs);
1703        return error;
1704}
1705
1706STATIC int
1707xfs_ioc_scrub_metadata(
1708        struct file                     *file,
1709        void                            __user *arg)
1710{
1711        struct xfs_scrub_metadata       scrub;
1712        int                             error;
1713
1714        if (!capable(CAP_SYS_ADMIN))
1715                return -EPERM;
1716
1717        if (copy_from_user(&scrub, arg, sizeof(scrub)))
1718                return -EFAULT;
1719
1720        error = xfs_scrub_metadata(file, &scrub);
1721        if (error)
1722                return error;
1723
1724        if (copy_to_user(arg, &scrub, sizeof(scrub)))
1725                return -EFAULT;
1726
1727        return 0;
1728}
1729
1730int
1731xfs_ioc_swapext(
1732        xfs_swapext_t   *sxp)
1733{
1734        xfs_inode_t     *ip, *tip;
1735        struct fd       f, tmp;
1736        int             error = 0;
1737
1738        /* Pull information for the target fd */
1739        f = fdget((int)sxp->sx_fdtarget);
1740        if (!f.file) {
1741                error = -EINVAL;
1742                goto out;
1743        }
1744
1745        if (!(f.file->f_mode & FMODE_WRITE) ||
1746            !(f.file->f_mode & FMODE_READ) ||
1747            (f.file->f_flags & O_APPEND)) {
1748                error = -EBADF;
1749                goto out_put_file;
1750        }
1751
1752        tmp = fdget((int)sxp->sx_fdtmp);
1753        if (!tmp.file) {
1754                error = -EINVAL;
1755                goto out_put_file;
1756        }
1757
1758        if (!(tmp.file->f_mode & FMODE_WRITE) ||
1759            !(tmp.file->f_mode & FMODE_READ) ||
1760            (tmp.file->f_flags & O_APPEND)) {
1761                error = -EBADF;
1762                goto out_put_tmp_file;
1763        }
1764
1765        if (IS_SWAPFILE(file_inode(f.file)) ||
1766            IS_SWAPFILE(file_inode(tmp.file))) {
1767                error = -EINVAL;
1768                goto out_put_tmp_file;
1769        }
1770
1771        /*
1772         * We need to ensure that the fds passed in point to XFS inodes
1773         * before we cast and access them as XFS structures as we have no
1774         * control over what the user passes us here.
1775         */
1776        if (f.file->f_op != &xfs_file_operations ||
1777            tmp.file->f_op != &xfs_file_operations) {
1778                error = -EINVAL;
1779                goto out_put_tmp_file;
1780        }
1781
1782        ip = XFS_I(file_inode(f.file));
1783        tip = XFS_I(file_inode(tmp.file));
1784
1785        if (ip->i_mount != tip->i_mount) {
1786                error = -EINVAL;
1787                goto out_put_tmp_file;
1788        }
1789
1790        if (ip->i_ino == tip->i_ino) {
1791                error = -EINVAL;
1792                goto out_put_tmp_file;
1793        }
1794
1795        if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
1796                error = -EIO;
1797                goto out_put_tmp_file;
1798        }
1799
1800        error = xfs_swap_extents(ip, tip, sxp);
1801
1802 out_put_tmp_file:
1803        fdput(tmp);
1804 out_put_file:
1805        fdput(f);
1806 out:
1807        return error;
1808}
1809
1810static int
1811xfs_ioc_getlabel(
1812        struct xfs_mount        *mp,
1813        char                    __user *user_label)
1814{
1815        struct xfs_sb           *sbp = &mp->m_sb;
1816        char                    label[XFSLABEL_MAX + 1];
1817
1818        /* Paranoia */
1819        BUILD_BUG_ON(sizeof(sbp->sb_fname) > FSLABEL_MAX);
1820
1821        /* 1 larger than sb_fname, so this ensures a trailing NUL char */
1822        memset(label, 0, sizeof(label));
1823        spin_lock(&mp->m_sb_lock);
1824        strncpy(label, sbp->sb_fname, XFSLABEL_MAX);
1825        spin_unlock(&mp->m_sb_lock);
1826
1827        if (copy_to_user(user_label, label, sizeof(label)))
1828                return -EFAULT;
1829        return 0;
1830}
1831
1832static int
1833xfs_ioc_setlabel(
1834        struct file             *filp,
1835        struct xfs_mount        *mp,
1836        char                    __user *newlabel)
1837{
1838        struct xfs_sb           *sbp = &mp->m_sb;
1839        char                    label[XFSLABEL_MAX + 1];
1840        size_t                  len;
1841        int                     error;
1842
1843        if (!capable(CAP_SYS_ADMIN))
1844                return -EPERM;
1845        /*
1846         * The generic ioctl allows up to FSLABEL_MAX chars, but XFS is much
1847         * smaller, at 12 bytes.  We copy one more to be sure we find the
1848         * (required) NULL character to test the incoming label length.
1849         * NB: The on disk label doesn't need to be null terminated.
1850         */
1851        if (copy_from_user(label, newlabel, XFSLABEL_MAX + 1))
1852                return -EFAULT;
1853        len = strnlen(label, XFSLABEL_MAX + 1);
1854        if (len > sizeof(sbp->sb_fname))
1855                return -EINVAL;
1856
1857        error = mnt_want_write_file(filp);
1858        if (error)
1859                return error;
1860
1861        spin_lock(&mp->m_sb_lock);
1862        memset(sbp->sb_fname, 0, sizeof(sbp->sb_fname));
1863        memcpy(sbp->sb_fname, label, len);
1864        spin_unlock(&mp->m_sb_lock);
1865
1866        /*
1867         * Now we do several things to satisfy userspace.
1868         * In addition to normal logging of the primary superblock, we also
1869         * immediately write these changes to sector zero for the primary, then
1870         * update all backup supers (as xfs_db does for a label change), then
1871         * invalidate the block device page cache.  This is so that any prior
1872         * buffered reads from userspace (i.e. from blkid) are invalidated,
1873         * and userspace will see the newly-written label.
1874         */
1875        error = xfs_sync_sb_buf(mp);
1876        if (error)
1877                goto out;
1878        /*
1879         * growfs also updates backup supers so lock against that.
1880         */
1881        mutex_lock(&mp->m_growlock);
1882        error = xfs_update_secondary_sbs(mp);
1883        mutex_unlock(&mp->m_growlock);
1884
1885        invalidate_bdev(mp->m_ddev_targp->bt_bdev);
1886
1887out:
1888        mnt_drop_write_file(filp);
1889        return error;
1890}
1891
1892static inline int
1893xfs_fs_eofblocks_from_user(
1894        struct xfs_fs_eofblocks         *src,
1895        struct xfs_icwalk               *dst)
1896{
1897        if (src->eof_version != XFS_EOFBLOCKS_VERSION)
1898                return -EINVAL;
1899
1900        if (src->eof_flags & ~XFS_EOF_FLAGS_VALID)
1901                return -EINVAL;
1902
1903        if (memchr_inv(&src->pad32, 0, sizeof(src->pad32)) ||
1904            memchr_inv(src->pad64, 0, sizeof(src->pad64)))
1905                return -EINVAL;
1906
1907        dst->icw_flags = 0;
1908        if (src->eof_flags & XFS_EOF_FLAGS_SYNC)
1909                dst->icw_flags |= XFS_ICWALK_FLAG_SYNC;
1910        if (src->eof_flags & XFS_EOF_FLAGS_UID)
1911                dst->icw_flags |= XFS_ICWALK_FLAG_UID;
1912        if (src->eof_flags & XFS_EOF_FLAGS_GID)
1913                dst->icw_flags |= XFS_ICWALK_FLAG_GID;
1914        if (src->eof_flags & XFS_EOF_FLAGS_PRID)
1915                dst->icw_flags |= XFS_ICWALK_FLAG_PRID;
1916        if (src->eof_flags & XFS_EOF_FLAGS_MINFILESIZE)
1917                dst->icw_flags |= XFS_ICWALK_FLAG_MINFILESIZE;
1918
1919        dst->icw_prid = src->eof_prid;
1920        dst->icw_min_file_size = src->eof_min_file_size;
1921
1922        dst->icw_uid = INVALID_UID;
1923        if (src->eof_flags & XFS_EOF_FLAGS_UID) {
1924                dst->icw_uid = make_kuid(current_user_ns(), src->eof_uid);
1925                if (!uid_valid(dst->icw_uid))
1926                        return -EINVAL;
1927        }
1928
1929        dst->icw_gid = INVALID_GID;
1930        if (src->eof_flags & XFS_EOF_FLAGS_GID) {
1931                dst->icw_gid = make_kgid(current_user_ns(), src->eof_gid);
1932                if (!gid_valid(dst->icw_gid))
1933                        return -EINVAL;
1934        }
1935        return 0;
1936}
1937
1938/*
1939 * Note: some of the ioctl's return positive numbers as a
1940 * byte count indicating success, such as readlink_by_handle.
1941 * So we don't "sign flip" like most other routines.  This means
1942 * true errors need to be returned as a negative value.
1943 */
1944long
1945xfs_file_ioctl(
1946        struct file             *filp,
1947        unsigned int            cmd,
1948        unsigned long           p)
1949{
1950        struct inode            *inode = file_inode(filp);
1951        struct xfs_inode        *ip = XFS_I(inode);
1952        struct xfs_mount        *mp = ip->i_mount;
1953        void                    __user *arg = (void __user *)p;
1954        int                     error;
1955
1956        trace_xfs_file_ioctl(ip);
1957
1958        switch (cmd) {
1959        case FITRIM:
1960                return xfs_ioc_trim(mp, arg);
1961        case FS_IOC_GETFSLABEL:
1962                return xfs_ioc_getlabel(mp, arg);
1963        case FS_IOC_SETFSLABEL:
1964                return xfs_ioc_setlabel(filp, mp, arg);
1965        case XFS_IOC_ALLOCSP:
1966        case XFS_IOC_FREESP:
1967        case XFS_IOC_ALLOCSP64:
1968        case XFS_IOC_FREESP64: {
1969                xfs_flock64_t           bf;
1970
1971                if (copy_from_user(&bf, arg, sizeof(bf)))
1972                        return -EFAULT;
1973                return xfs_ioc_space(filp, &bf);
1974        }
1975        case XFS_IOC_DIOINFO: {
1976                struct xfs_buftarg      *target = xfs_inode_buftarg(ip);
1977                struct dioattr          da;
1978
1979                da.d_mem =  da.d_miniosz = target->bt_logical_sectorsize;
1980                da.d_maxiosz = INT_MAX & ~(da.d_miniosz - 1);
1981
1982                if (copy_to_user(arg, &da, sizeof(da)))
1983                        return -EFAULT;
1984                return 0;
1985        }
1986
1987        case XFS_IOC_FSBULKSTAT_SINGLE:
1988        case XFS_IOC_FSBULKSTAT:
1989        case XFS_IOC_FSINUMBERS:
1990                return xfs_ioc_fsbulkstat(filp, cmd, arg);
1991
1992        case XFS_IOC_BULKSTAT:
1993                return xfs_ioc_bulkstat(filp, cmd, arg);
1994        case XFS_IOC_INUMBERS:
1995                return xfs_ioc_inumbers(mp, cmd, arg);
1996
1997        case XFS_IOC_FSGEOMETRY_V1:
1998                return xfs_ioc_fsgeometry(mp, arg, 3);
1999        case XFS_IOC_FSGEOMETRY_V4:
2000                return xfs_ioc_fsgeometry(mp, arg, 4);
2001        case XFS_IOC_FSGEOMETRY:
2002                return xfs_ioc_fsgeometry(mp, arg, 5);
2003
2004        case XFS_IOC_AG_GEOMETRY:
2005                return xfs_ioc_ag_geometry(mp, arg);
2006
2007        case XFS_IOC_GETVERSION:
2008                return put_user(inode->i_generation, (int __user *)arg);
2009
2010        case XFS_IOC_FSGETXATTRA:
2011                return xfs_ioc_fsgetxattra(ip, arg);
2012
2013        case XFS_IOC_GETBMAP:
2014        case XFS_IOC_GETBMAPA:
2015        case XFS_IOC_GETBMAPX:
2016                return xfs_ioc_getbmap(filp, cmd, arg);
2017
2018        case FS_IOC_GETFSMAP:
2019                return xfs_ioc_getfsmap(ip, arg);
2020
2021        case XFS_IOC_SCRUB_METADATA:
2022                return xfs_ioc_scrub_metadata(filp, arg);
2023
2024        case XFS_IOC_FD_TO_HANDLE:
2025        case XFS_IOC_PATH_TO_HANDLE:
2026        case XFS_IOC_PATH_TO_FSHANDLE: {
2027                xfs_fsop_handlereq_t    hreq;
2028
2029                if (copy_from_user(&hreq, arg, sizeof(hreq)))
2030                        return -EFAULT;
2031                return xfs_find_handle(cmd, &hreq);
2032        }
2033        case XFS_IOC_OPEN_BY_HANDLE: {
2034                xfs_fsop_handlereq_t    hreq;
2035
2036                if (copy_from_user(&hreq, arg, sizeof(xfs_fsop_handlereq_t)))
2037                        return -EFAULT;
2038                return xfs_open_by_handle(filp, &hreq);
2039        }
2040
2041        case XFS_IOC_READLINK_BY_HANDLE: {
2042                xfs_fsop_handlereq_t    hreq;
2043
2044                if (copy_from_user(&hreq, arg, sizeof(xfs_fsop_handlereq_t)))
2045                        return -EFAULT;
2046                return xfs_readlink_by_handle(filp, &hreq);
2047        }
2048        case XFS_IOC_ATTRLIST_BY_HANDLE:
2049                return xfs_attrlist_by_handle(filp, arg);
2050
2051        case XFS_IOC_ATTRMULTI_BY_HANDLE:
2052                return xfs_attrmulti_by_handle(filp, arg);
2053
2054        case XFS_IOC_SWAPEXT: {
2055                struct xfs_swapext      sxp;
2056
2057                if (copy_from_user(&sxp, arg, sizeof(xfs_swapext_t)))
2058                        return -EFAULT;
2059                error = mnt_want_write_file(filp);
2060                if (error)
2061                        return error;
2062                error = xfs_ioc_swapext(&sxp);
2063                mnt_drop_write_file(filp);
2064                return error;
2065        }
2066
2067        case XFS_IOC_FSCOUNTS: {
2068                xfs_fsop_counts_t out;
2069
2070                xfs_fs_counts(mp, &out);
2071
2072                if (copy_to_user(arg, &out, sizeof(out)))
2073                        return -EFAULT;
2074                return 0;
2075        }
2076
2077        case XFS_IOC_SET_RESBLKS: {
2078                xfs_fsop_resblks_t inout;
2079                uint64_t           in;
2080
2081                if (!capable(CAP_SYS_ADMIN))
2082                        return -EPERM;
2083
2084                if (mp->m_flags & XFS_MOUNT_RDONLY)
2085                        return -EROFS;
2086
2087                if (copy_from_user(&inout, arg, sizeof(inout)))
2088                        return -EFAULT;
2089
2090                error = mnt_want_write_file(filp);
2091                if (error)
2092                        return error;
2093
2094                /* input parameter is passed in resblks field of structure */
2095                in = inout.resblks;
2096                error = xfs_reserve_blocks(mp, &in, &inout);
2097                mnt_drop_write_file(filp);
2098                if (error)
2099                        return error;
2100
2101                if (copy_to_user(arg, &inout, sizeof(inout)))
2102                        return -EFAULT;
2103                return 0;
2104        }
2105
2106        case XFS_IOC_GET_RESBLKS: {
2107                xfs_fsop_resblks_t out;
2108
2109                if (!capable(CAP_SYS_ADMIN))
2110                        return -EPERM;
2111
2112                error = xfs_reserve_blocks(mp, NULL, &out);
2113                if (error)
2114                        return error;
2115
2116                if (copy_to_user(arg, &out, sizeof(out)))
2117                        return -EFAULT;
2118
2119                return 0;
2120        }
2121
2122        case XFS_IOC_FSGROWFSDATA: {
2123                struct xfs_growfs_data in;
2124
2125                if (copy_from_user(&in, arg, sizeof(in)))
2126                        return -EFAULT;
2127
2128                error = mnt_want_write_file(filp);
2129                if (error)
2130                        return error;
2131                error = xfs_growfs_data(mp, &in);
2132                mnt_drop_write_file(filp);
2133                return error;
2134        }
2135
2136        case XFS_IOC_FSGROWFSLOG: {
2137                struct xfs_growfs_log in;
2138
2139                if (copy_from_user(&in, arg, sizeof(in)))
2140                        return -EFAULT;
2141
2142                error = mnt_want_write_file(filp);
2143                if (error)
2144                        return error;
2145                error = xfs_growfs_log(mp, &in);
2146                mnt_drop_write_file(filp);
2147                return error;
2148        }
2149
2150        case XFS_IOC_FSGROWFSRT: {
2151                xfs_growfs_rt_t in;
2152
2153                if (copy_from_user(&in, arg, sizeof(in)))
2154                        return -EFAULT;
2155
2156                error = mnt_want_write_file(filp);
2157                if (error)
2158                        return error;
2159                error = xfs_growfs_rt(mp, &in);
2160                mnt_drop_write_file(filp);
2161                return error;
2162        }
2163
2164        case XFS_IOC_GOINGDOWN: {
2165                uint32_t in;
2166
2167                if (!capable(CAP_SYS_ADMIN))
2168                        return -EPERM;
2169
2170                if (get_user(in, (uint32_t __user *)arg))
2171                        return -EFAULT;
2172
2173                return xfs_fs_goingdown(mp, in);
2174        }
2175
2176        case XFS_IOC_ERROR_INJECTION: {
2177                xfs_error_injection_t in;
2178
2179                if (!capable(CAP_SYS_ADMIN))
2180                        return -EPERM;
2181
2182                if (copy_from_user(&in, arg, sizeof(in)))
2183                        return -EFAULT;
2184
2185                return xfs_errortag_add(mp, in.errtag);
2186        }
2187
2188        case XFS_IOC_ERROR_CLEARALL:
2189                if (!capable(CAP_SYS_ADMIN))
2190                        return -EPERM;
2191
2192                return xfs_errortag_clearall(mp);
2193
2194        case XFS_IOC_FREE_EOFBLOCKS: {
2195                struct xfs_fs_eofblocks eofb;
2196                struct xfs_icwalk       icw;
2197
2198                if (!capable(CAP_SYS_ADMIN))
2199                        return -EPERM;
2200
2201                if (mp->m_flags & XFS_MOUNT_RDONLY)
2202                        return -EROFS;
2203
2204                if (copy_from_user(&eofb, arg, sizeof(eofb)))
2205                        return -EFAULT;
2206
2207                error = xfs_fs_eofblocks_from_user(&eofb, &icw);
2208                if (error)
2209                        return error;
2210
2211                trace_xfs_ioc_free_eofblocks(mp, &icw, _RET_IP_);
2212
2213                sb_start_write(mp->m_super);
2214                error = xfs_blockgc_free_space(mp, &icw);
2215                sb_end_write(mp->m_super);
2216                return error;
2217        }
2218
2219        default:
2220                return -ENOTTY;
2221        }
2222}
2223