linux/fs/gfs2/file.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
   4 * Copyright (C) 2004-2006 Red Hat, Inc.  All rights reserved.
   5 */
   6
   7#include <linux/slab.h>
   8#include <linux/spinlock.h>
   9#include <linux/compat.h>
  10#include <linux/completion.h>
  11#include <linux/buffer_head.h>
  12#include <linux/pagemap.h>
  13#include <linux/uio.h>
  14#include <linux/blkdev.h>
  15#include <linux/mm.h>
  16#include <linux/mount.h>
  17#include <linux/fs.h>
  18#include <linux/gfs2_ondisk.h>
  19#include <linux/falloc.h>
  20#include <linux/swap.h>
  21#include <linux/crc32.h>
  22#include <linux/writeback.h>
  23#include <linux/uaccess.h>
  24#include <linux/dlm.h>
  25#include <linux/dlm_plock.h>
  26#include <linux/delay.h>
  27#include <linux/backing-dev.h>
  28
  29#include "gfs2.h"
  30#include "incore.h"
  31#include "bmap.h"
  32#include "aops.h"
  33#include "dir.h"
  34#include "glock.h"
  35#include "glops.h"
  36#include "inode.h"
  37#include "log.h"
  38#include "meta_io.h"
  39#include "quota.h"
  40#include "rgrp.h"
  41#include "trans.h"
  42#include "util.h"
  43
  44/**
  45 * gfs2_llseek - seek to a location in a file
  46 * @file: the file
  47 * @offset: the offset
  48 * @whence: Where to seek from (SEEK_SET, SEEK_CUR, or SEEK_END)
  49 *
  50 * SEEK_END requires the glock for the file because it references the
  51 * file's size.
  52 *
  53 * Returns: The new offset, or errno
  54 */
  55
  56static loff_t gfs2_llseek(struct file *file, loff_t offset, int whence)
  57{
  58        struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
  59        struct gfs2_holder i_gh;
  60        loff_t error;
  61
  62        switch (whence) {
  63        case SEEK_END:
  64                error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY,
  65                                           &i_gh);
  66                if (!error) {
  67                        error = generic_file_llseek(file, offset, whence);
  68                        gfs2_glock_dq_uninit(&i_gh);
  69                }
  70                break;
  71
  72        case SEEK_DATA:
  73                error = gfs2_seek_data(file, offset);
  74                break;
  75
  76        case SEEK_HOLE:
  77                error = gfs2_seek_hole(file, offset);
  78                break;
  79
  80        case SEEK_CUR:
  81        case SEEK_SET:
  82                /*
  83                 * These don't reference inode->i_size and don't depend on the
  84                 * block mapping, so we don't need the glock.
  85                 */
  86                error = generic_file_llseek(file, offset, whence);
  87                break;
  88        default:
  89                error = -EINVAL;
  90        }
  91
  92        return error;
  93}
  94
  95/**
  96 * gfs2_readdir - Iterator for a directory
  97 * @file: The directory to read from
  98 * @ctx: What to feed directory entries to
  99 *
 100 * Returns: errno
 101 */
 102
 103static int gfs2_readdir(struct file *file, struct dir_context *ctx)
 104{
 105        struct inode *dir = file->f_mapping->host;
 106        struct gfs2_inode *dip = GFS2_I(dir);
 107        struct gfs2_holder d_gh;
 108        int error;
 109
 110        error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, &d_gh);
 111        if (error)
 112                return error;
 113
 114        error = gfs2_dir_read(dir, ctx, &file->f_ra);
 115
 116        gfs2_glock_dq_uninit(&d_gh);
 117
 118        return error;
 119}
 120
 121/**
 122 * fsflag_gfs2flag
 123 *
 124 * The FS_JOURNAL_DATA_FL flag maps to GFS2_DIF_INHERIT_JDATA for directories,
 125 * and to GFS2_DIF_JDATA for non-directories.
 126 */
 127static struct {
 128        u32 fsflag;
 129        u32 gfsflag;
 130} fsflag_gfs2flag[] = {
 131        {FS_SYNC_FL, GFS2_DIF_SYNC},
 132        {FS_IMMUTABLE_FL, GFS2_DIF_IMMUTABLE},
 133        {FS_APPEND_FL, GFS2_DIF_APPENDONLY},
 134        {FS_NOATIME_FL, GFS2_DIF_NOATIME},
 135        {FS_INDEX_FL, GFS2_DIF_EXHASH},
 136        {FS_TOPDIR_FL, GFS2_DIF_TOPDIR},
 137        {FS_JOURNAL_DATA_FL, GFS2_DIF_JDATA | GFS2_DIF_INHERIT_JDATA},
 138};
 139
 140static inline u32 gfs2_gfsflags_to_fsflags(struct inode *inode, u32 gfsflags)
 141{
 142        int i;
 143        u32 fsflags = 0;
 144
 145        if (S_ISDIR(inode->i_mode))
 146                gfsflags &= ~GFS2_DIF_JDATA;
 147        else
 148                gfsflags &= ~GFS2_DIF_INHERIT_JDATA;
 149
 150        for (i = 0; i < ARRAY_SIZE(fsflag_gfs2flag); i++)
 151                if (gfsflags & fsflag_gfs2flag[i].gfsflag)
 152                        fsflags |= fsflag_gfs2flag[i].fsflag;
 153        return fsflags;
 154}
 155
 156static int gfs2_get_flags(struct file *filp, u32 __user *ptr)
 157{
 158        struct inode *inode = file_inode(filp);
 159        struct gfs2_inode *ip = GFS2_I(inode);
 160        struct gfs2_holder gh;
 161        int error;
 162        u32 fsflags;
 163
 164        gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
 165        error = gfs2_glock_nq(&gh);
 166        if (error)
 167                goto out_uninit;
 168
 169        fsflags = gfs2_gfsflags_to_fsflags(inode, ip->i_diskflags);
 170
 171        if (put_user(fsflags, ptr))
 172                error = -EFAULT;
 173
 174        gfs2_glock_dq(&gh);
 175out_uninit:
 176        gfs2_holder_uninit(&gh);
 177        return error;
 178}
 179
 180void gfs2_set_inode_flags(struct inode *inode)
 181{
 182        struct gfs2_inode *ip = GFS2_I(inode);
 183        unsigned int flags = inode->i_flags;
 184
 185        flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC|S_NOSEC);
 186        if ((ip->i_eattr == 0) && !is_sxid(inode->i_mode))
 187                flags |= S_NOSEC;
 188        if (ip->i_diskflags & GFS2_DIF_IMMUTABLE)
 189                flags |= S_IMMUTABLE;
 190        if (ip->i_diskflags & GFS2_DIF_APPENDONLY)
 191                flags |= S_APPEND;
 192        if (ip->i_diskflags & GFS2_DIF_NOATIME)
 193                flags |= S_NOATIME;
 194        if (ip->i_diskflags & GFS2_DIF_SYNC)
 195                flags |= S_SYNC;
 196        inode->i_flags = flags;
 197}
 198
 199/* Flags that can be set by user space */
 200#define GFS2_FLAGS_USER_SET (GFS2_DIF_JDATA|                    \
 201                             GFS2_DIF_IMMUTABLE|                \
 202                             GFS2_DIF_APPENDONLY|               \
 203                             GFS2_DIF_NOATIME|                  \
 204                             GFS2_DIF_SYNC|                     \
 205                             GFS2_DIF_TOPDIR|                   \
 206                             GFS2_DIF_INHERIT_JDATA)
 207
 208/**
 209 * do_gfs2_set_flags - set flags on an inode
 210 * @filp: file pointer
 211 * @reqflags: The flags to set
 212 * @mask: Indicates which flags are valid
 213 * @fsflags: The FS_* inode flags passed in
 214 *
 215 */
 216static int do_gfs2_set_flags(struct file *filp, u32 reqflags, u32 mask,
 217                             const u32 fsflags)
 218{
 219        struct inode *inode = file_inode(filp);
 220        struct gfs2_inode *ip = GFS2_I(inode);
 221        struct gfs2_sbd *sdp = GFS2_SB(inode);
 222        struct buffer_head *bh;
 223        struct gfs2_holder gh;
 224        int error;
 225        u32 new_flags, flags, oldflags;
 226
 227        error = mnt_want_write_file(filp);
 228        if (error)
 229                return error;
 230
 231        error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
 232        if (error)
 233                goto out_drop_write;
 234
 235        oldflags = gfs2_gfsflags_to_fsflags(inode, ip->i_diskflags);
 236        error = vfs_ioc_setflags_prepare(inode, oldflags, fsflags);
 237        if (error)
 238                goto out;
 239
 240        error = -EACCES;
 241        if (!inode_owner_or_capable(inode))
 242                goto out;
 243
 244        error = 0;
 245        flags = ip->i_diskflags;
 246        new_flags = (flags & ~mask) | (reqflags & mask);
 247        if ((new_flags ^ flags) == 0)
 248                goto out;
 249
 250        error = -EPERM;
 251        if (IS_IMMUTABLE(inode) && (new_flags & GFS2_DIF_IMMUTABLE))
 252                goto out;
 253        if (IS_APPEND(inode) && (new_flags & GFS2_DIF_APPENDONLY))
 254                goto out;
 255        if (((new_flags ^ flags) & GFS2_DIF_IMMUTABLE) &&
 256            !capable(CAP_LINUX_IMMUTABLE))
 257                goto out;
 258        if (!IS_IMMUTABLE(inode)) {
 259                error = gfs2_permission(inode, MAY_WRITE);
 260                if (error)
 261                        goto out;
 262        }
 263        if ((flags ^ new_flags) & GFS2_DIF_JDATA) {
 264                if (new_flags & GFS2_DIF_JDATA)
 265                        gfs2_log_flush(sdp, ip->i_gl,
 266                                       GFS2_LOG_HEAD_FLUSH_NORMAL |
 267                                       GFS2_LFC_SET_FLAGS);
 268                error = filemap_fdatawrite(inode->i_mapping);
 269                if (error)
 270                        goto out;
 271                error = filemap_fdatawait(inode->i_mapping);
 272                if (error)
 273                        goto out;
 274                if (new_flags & GFS2_DIF_JDATA)
 275                        gfs2_ordered_del_inode(ip);
 276        }
 277        error = gfs2_trans_begin(sdp, RES_DINODE, 0);
 278        if (error)
 279                goto out;
 280        error = gfs2_meta_inode_buffer(ip, &bh);
 281        if (error)
 282                goto out_trans_end;
 283        inode->i_ctime = current_time(inode);
 284        gfs2_trans_add_meta(ip->i_gl, bh);
 285        ip->i_diskflags = new_flags;
 286        gfs2_dinode_out(ip, bh->b_data);
 287        brelse(bh);
 288        gfs2_set_inode_flags(inode);
 289        gfs2_set_aops(inode);
 290out_trans_end:
 291        gfs2_trans_end(sdp);
 292out:
 293        gfs2_glock_dq_uninit(&gh);
 294out_drop_write:
 295        mnt_drop_write_file(filp);
 296        return error;
 297}
 298
 299static int gfs2_set_flags(struct file *filp, u32 __user *ptr)
 300{
 301        struct inode *inode = file_inode(filp);
 302        u32 fsflags, gfsflags = 0;
 303        u32 mask;
 304        int i;
 305
 306        if (get_user(fsflags, ptr))
 307                return -EFAULT;
 308
 309        for (i = 0; i < ARRAY_SIZE(fsflag_gfs2flag); i++) {
 310                if (fsflags & fsflag_gfs2flag[i].fsflag) {
 311                        fsflags &= ~fsflag_gfs2flag[i].fsflag;
 312                        gfsflags |= fsflag_gfs2flag[i].gfsflag;
 313                }
 314        }
 315        if (fsflags || gfsflags & ~GFS2_FLAGS_USER_SET)
 316                return -EINVAL;
 317
 318        mask = GFS2_FLAGS_USER_SET;
 319        if (S_ISDIR(inode->i_mode)) {
 320                mask &= ~GFS2_DIF_JDATA;
 321        } else {
 322                /* The GFS2_DIF_TOPDIR flag is only valid for directories. */
 323                if (gfsflags & GFS2_DIF_TOPDIR)
 324                        return -EINVAL;
 325                mask &= ~(GFS2_DIF_TOPDIR | GFS2_DIF_INHERIT_JDATA);
 326        }
 327
 328        return do_gfs2_set_flags(filp, gfsflags, mask, fsflags);
 329}
 330
 331static int gfs2_getlabel(struct file *filp, char __user *label)
 332{
 333        struct inode *inode = file_inode(filp);
 334        struct gfs2_sbd *sdp = GFS2_SB(inode);
 335
 336        if (copy_to_user(label, sdp->sd_sb.sb_locktable, GFS2_LOCKNAME_LEN))
 337                return -EFAULT;
 338
 339        return 0;
 340}
 341
 342static long gfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 343{
 344        switch(cmd) {
 345        case FS_IOC_GETFLAGS:
 346                return gfs2_get_flags(filp, (u32 __user *)arg);
 347        case FS_IOC_SETFLAGS:
 348                return gfs2_set_flags(filp, (u32 __user *)arg);
 349        case FITRIM:
 350                return gfs2_fitrim(filp, (void __user *)arg);
 351        case FS_IOC_GETFSLABEL:
 352                return gfs2_getlabel(filp, (char __user *)arg);
 353        }
 354
 355        return -ENOTTY;
 356}
 357
 358#ifdef CONFIG_COMPAT
 359static long gfs2_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 360{
 361        switch(cmd) {
 362        /* These are just misnamed, they actually get/put from/to user an int */
 363        case FS_IOC32_GETFLAGS:
 364                cmd = FS_IOC_GETFLAGS;
 365                break;
 366        case FS_IOC32_SETFLAGS:
 367                cmd = FS_IOC_SETFLAGS;
 368                break;
 369        /* Keep this list in sync with gfs2_ioctl */
 370        case FITRIM:
 371        case FS_IOC_GETFSLABEL:
 372                break;
 373        default:
 374                return -ENOIOCTLCMD;
 375        }
 376
 377        return gfs2_ioctl(filp, cmd, (unsigned long)compat_ptr(arg));
 378}
 379#else
 380#define gfs2_compat_ioctl NULL
 381#endif
 382
 383/**
 384 * gfs2_size_hint - Give a hint to the size of a write request
 385 * @filep: The struct file
 386 * @offset: The file offset of the write
 387 * @size: The length of the write
 388 *
 389 * When we are about to do a write, this function records the total
 390 * write size in order to provide a suitable hint to the lower layers
 391 * about how many blocks will be required.
 392 *
 393 */
 394
 395static void gfs2_size_hint(struct file *filep, loff_t offset, size_t size)
 396{
 397        struct inode *inode = file_inode(filep);
 398        struct gfs2_sbd *sdp = GFS2_SB(inode);
 399        struct gfs2_inode *ip = GFS2_I(inode);
 400        size_t blks = (size + sdp->sd_sb.sb_bsize - 1) >> sdp->sd_sb.sb_bsize_shift;
 401        int hint = min_t(size_t, INT_MAX, blks);
 402
 403        if (hint > atomic_read(&ip->i_sizehint))
 404                atomic_set(&ip->i_sizehint, hint);
 405}
 406
 407/**
 408 * gfs2_allocate_page_backing - Allocate blocks for a write fault
 409 * @page: The (locked) page to allocate backing for
 410 * @length: Size of the allocation
 411 *
 412 * We try to allocate all the blocks required for the page in one go.  This
 413 * might fail for various reasons, so we keep trying until all the blocks to
 414 * back this page are allocated.  If some of the blocks are already allocated,
 415 * that is ok too.
 416 */
 417static int gfs2_allocate_page_backing(struct page *page, unsigned int length)
 418{
 419        u64 pos = page_offset(page);
 420
 421        do {
 422                struct iomap iomap = { };
 423
 424                if (gfs2_iomap_get_alloc(page->mapping->host, pos, length, &iomap))
 425                        return -EIO;
 426
 427                if (length < iomap.length)
 428                        iomap.length = length;
 429                length -= iomap.length;
 430                pos += iomap.length;
 431        } while (length > 0);
 432
 433        return 0;
 434}
 435
 436/**
 437 * gfs2_page_mkwrite - Make a shared, mmap()ed, page writable
 438 * @vma: The virtual memory area
 439 * @vmf: The virtual memory fault containing the page to become writable
 440 *
 441 * When the page becomes writable, we need to ensure that we have
 442 * blocks allocated on disk to back that page.
 443 */
 444
 445static vm_fault_t gfs2_page_mkwrite(struct vm_fault *vmf)
 446{
 447        struct page *page = vmf->page;
 448        struct inode *inode = file_inode(vmf->vma->vm_file);
 449        struct gfs2_inode *ip = GFS2_I(inode);
 450        struct gfs2_sbd *sdp = GFS2_SB(inode);
 451        struct gfs2_alloc_parms ap = { .aflags = 0, };
 452        u64 offset = page_offset(page);
 453        unsigned int data_blocks, ind_blocks, rblocks;
 454        struct gfs2_holder gh;
 455        unsigned int length;
 456        loff_t size;
 457        int ret;
 458
 459        sb_start_pagefault(inode->i_sb);
 460
 461        ret = gfs2_rsqa_alloc(ip);
 462        if (ret)
 463                goto out;
 464
 465        gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
 466        ret = gfs2_glock_nq(&gh);
 467        if (ret)
 468                goto out_uninit;
 469
 470        /* Check page index against inode size */
 471        size = i_size_read(inode);
 472        if (offset >= size) {
 473                ret = -EINVAL;
 474                goto out_unlock;
 475        }
 476
 477        /* Update file times before taking page lock */
 478        file_update_time(vmf->vma->vm_file);
 479
 480        /* page is wholly or partially inside EOF */
 481        if (offset > size - PAGE_SIZE)
 482                length = offset_in_page(size);
 483        else
 484                length = PAGE_SIZE;
 485
 486        gfs2_size_hint(vmf->vma->vm_file, offset, length);
 487
 488        set_bit(GLF_DIRTY, &ip->i_gl->gl_flags);
 489        set_bit(GIF_SW_PAGED, &ip->i_flags);
 490
 491        /*
 492         * iomap_writepage / iomap_writepages currently don't support inline
 493         * files, so always unstuff here.
 494         */
 495
 496        if (!gfs2_is_stuffed(ip) &&
 497            !gfs2_write_alloc_required(ip, offset, length)) {
 498                lock_page(page);
 499                if (!PageUptodate(page) || page->mapping != inode->i_mapping) {
 500                        ret = -EAGAIN;
 501                        unlock_page(page);
 502                }
 503                goto out_unlock;
 504        }
 505
 506        ret = gfs2_rindex_update(sdp);
 507        if (ret)
 508                goto out_unlock;
 509
 510        gfs2_write_calc_reserv(ip, length, &data_blocks, &ind_blocks);
 511        ap.target = data_blocks + ind_blocks;
 512        ret = gfs2_quota_lock_check(ip, &ap);
 513        if (ret)
 514                goto out_unlock;
 515        ret = gfs2_inplace_reserve(ip, &ap);
 516        if (ret)
 517                goto out_quota_unlock;
 518
 519        rblocks = RES_DINODE + ind_blocks;
 520        if (gfs2_is_jdata(ip))
 521                rblocks += data_blocks ? data_blocks : 1;
 522        if (ind_blocks || data_blocks) {
 523                rblocks += RES_STATFS + RES_QUOTA;
 524                rblocks += gfs2_rg_blocks(ip, data_blocks + ind_blocks);
 525        }
 526        ret = gfs2_trans_begin(sdp, rblocks, 0);
 527        if (ret)
 528                goto out_trans_fail;
 529
 530        lock_page(page);
 531        ret = -EAGAIN;
 532        /* If truncated, we must retry the operation, we may have raced
 533         * with the glock demotion code.
 534         */
 535        if (!PageUptodate(page) || page->mapping != inode->i_mapping)
 536                goto out_trans_end;
 537
 538        /* Unstuff, if required, and allocate backing blocks for page */
 539        ret = 0;
 540        if (gfs2_is_stuffed(ip))
 541                ret = gfs2_unstuff_dinode(ip, page);
 542        if (ret == 0)
 543                ret = gfs2_allocate_page_backing(page, length);
 544
 545out_trans_end:
 546        if (ret)
 547                unlock_page(page);
 548        gfs2_trans_end(sdp);
 549out_trans_fail:
 550        gfs2_inplace_release(ip);
 551out_quota_unlock:
 552        gfs2_quota_unlock(ip);
 553out_unlock:
 554        gfs2_glock_dq(&gh);
 555out_uninit:
 556        gfs2_holder_uninit(&gh);
 557        if (ret == 0) {
 558                set_page_dirty(page);
 559                wait_for_stable_page(page);
 560        }
 561out:
 562        sb_end_pagefault(inode->i_sb);
 563        return block_page_mkwrite_return(ret);
 564}
 565
 566static const struct vm_operations_struct gfs2_vm_ops = {
 567        .fault = filemap_fault,
 568        .map_pages = filemap_map_pages,
 569        .page_mkwrite = gfs2_page_mkwrite,
 570};
 571
 572/**
 573 * gfs2_mmap -
 574 * @file: The file to map
 575 * @vma: The VMA which described the mapping
 576 *
 577 * There is no need to get a lock here unless we should be updating
 578 * atime. We ignore any locking errors since the only consequence is
 579 * a missed atime update (which will just be deferred until later).
 580 *
 581 * Returns: 0
 582 */
 583
 584static int gfs2_mmap(struct file *file, struct vm_area_struct *vma)
 585{
 586        struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
 587
 588        if (!(file->f_flags & O_NOATIME) &&
 589            !IS_NOATIME(&ip->i_inode)) {
 590                struct gfs2_holder i_gh;
 591                int error;
 592
 593                error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY,
 594                                           &i_gh);
 595                if (error)
 596                        return error;
 597                /* grab lock to update inode */
 598                gfs2_glock_dq_uninit(&i_gh);
 599                file_accessed(file);
 600        }
 601        vma->vm_ops = &gfs2_vm_ops;
 602
 603        return 0;
 604}
 605
 606/**
 607 * gfs2_open_common - This is common to open and atomic_open
 608 * @inode: The inode being opened
 609 * @file: The file being opened
 610 *
 611 * This maybe called under a glock or not depending upon how it has
 612 * been called. We must always be called under a glock for regular
 613 * files, however. For other file types, it does not matter whether
 614 * we hold the glock or not.
 615 *
 616 * Returns: Error code or 0 for success
 617 */
 618
 619int gfs2_open_common(struct inode *inode, struct file *file)
 620{
 621        struct gfs2_file *fp;
 622        int ret;
 623
 624        if (S_ISREG(inode->i_mode)) {
 625                ret = generic_file_open(inode, file);
 626                if (ret)
 627                        return ret;
 628        }
 629
 630        fp = kzalloc(sizeof(struct gfs2_file), GFP_NOFS);
 631        if (!fp)
 632                return -ENOMEM;
 633
 634        mutex_init(&fp->f_fl_mutex);
 635
 636        gfs2_assert_warn(GFS2_SB(inode), !file->private_data);
 637        file->private_data = fp;
 638        return 0;
 639}
 640
 641/**
 642 * gfs2_open - open a file
 643 * @inode: the inode to open
 644 * @file: the struct file for this opening
 645 *
 646 * After atomic_open, this function is only used for opening files
 647 * which are already cached. We must still get the glock for regular
 648 * files to ensure that we have the file size uptodate for the large
 649 * file check which is in the common code. That is only an issue for
 650 * regular files though.
 651 *
 652 * Returns: errno
 653 */
 654
 655static int gfs2_open(struct inode *inode, struct file *file)
 656{
 657        struct gfs2_inode *ip = GFS2_I(inode);
 658        struct gfs2_holder i_gh;
 659        int error;
 660        bool need_unlock = false;
 661
 662        if (S_ISREG(ip->i_inode.i_mode)) {
 663                error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY,
 664                                           &i_gh);
 665                if (error)
 666                        return error;
 667                need_unlock = true;
 668        }
 669
 670        error = gfs2_open_common(inode, file);
 671
 672        if (need_unlock)
 673                gfs2_glock_dq_uninit(&i_gh);
 674
 675        return error;
 676}
 677
 678/**
 679 * gfs2_release - called to close a struct file
 680 * @inode: the inode the struct file belongs to
 681 * @file: the struct file being closed
 682 *
 683 * Returns: errno
 684 */
 685
 686static int gfs2_release(struct inode *inode, struct file *file)
 687{
 688        struct gfs2_inode *ip = GFS2_I(inode);
 689
 690        kfree(file->private_data);
 691        file->private_data = NULL;
 692
 693        if (!(file->f_mode & FMODE_WRITE))
 694                return 0;
 695
 696        gfs2_rsqa_delete(ip, &inode->i_writecount);
 697        return 0;
 698}
 699
 700/**
 701 * gfs2_fsync - sync the dirty data for a file (across the cluster)
 702 * @file: the file that points to the dentry
 703 * @start: the start position in the file to sync
 704 * @end: the end position in the file to sync
 705 * @datasync: set if we can ignore timestamp changes
 706 *
 707 * We split the data flushing here so that we don't wait for the data
 708 * until after we've also sent the metadata to disk. Note that for
 709 * data=ordered, we will write & wait for the data at the log flush
 710 * stage anyway, so this is unlikely to make much of a difference
 711 * except in the data=writeback case.
 712 *
 713 * If the fdatawrite fails due to any reason except -EIO, we will
 714 * continue the remainder of the fsync, although we'll still report
 715 * the error at the end. This is to match filemap_write_and_wait_range()
 716 * behaviour.
 717 *
 718 * Returns: errno
 719 */
 720
 721static int gfs2_fsync(struct file *file, loff_t start, loff_t end,
 722                      int datasync)
 723{
 724        struct address_space *mapping = file->f_mapping;
 725        struct inode *inode = mapping->host;
 726        int sync_state = inode->i_state & I_DIRTY_ALL;
 727        struct gfs2_inode *ip = GFS2_I(inode);
 728        int ret = 0, ret1 = 0;
 729
 730        if (mapping->nrpages) {
 731                ret1 = filemap_fdatawrite_range(mapping, start, end);
 732                if (ret1 == -EIO)
 733                        return ret1;
 734        }
 735
 736        if (!gfs2_is_jdata(ip))
 737                sync_state &= ~I_DIRTY_PAGES;
 738        if (datasync)
 739                sync_state &= ~(I_DIRTY_SYNC | I_DIRTY_TIME);
 740
 741        if (sync_state) {
 742                ret = sync_inode_metadata(inode, 1);
 743                if (ret)
 744                        return ret;
 745                if (gfs2_is_jdata(ip))
 746                        ret = file_write_and_wait(file);
 747                if (ret)
 748                        return ret;
 749                gfs2_ail_flush(ip->i_gl, 1);
 750        }
 751
 752        if (mapping->nrpages)
 753                ret = file_fdatawait_range(file, start, end);
 754
 755        return ret ? ret : ret1;
 756}
 757
 758static ssize_t gfs2_file_direct_read(struct kiocb *iocb, struct iov_iter *to)
 759{
 760        struct file *file = iocb->ki_filp;
 761        struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
 762        size_t count = iov_iter_count(to);
 763        struct gfs2_holder gh;
 764        ssize_t ret;
 765
 766        if (!count)
 767                return 0; /* skip atime */
 768
 769        gfs2_holder_init(ip->i_gl, LM_ST_DEFERRED, 0, &gh);
 770        ret = gfs2_glock_nq(&gh);
 771        if (ret)
 772                goto out_uninit;
 773
 774        ret = iomap_dio_rw(iocb, to, &gfs2_iomap_ops, NULL,
 775                           is_sync_kiocb(iocb));
 776
 777        gfs2_glock_dq(&gh);
 778out_uninit:
 779        gfs2_holder_uninit(&gh);
 780        return ret;
 781}
 782
 783static ssize_t gfs2_file_direct_write(struct kiocb *iocb, struct iov_iter *from)
 784{
 785        struct file *file = iocb->ki_filp;
 786        struct inode *inode = file->f_mapping->host;
 787        struct gfs2_inode *ip = GFS2_I(inode);
 788        size_t len = iov_iter_count(from);
 789        loff_t offset = iocb->ki_pos;
 790        struct gfs2_holder gh;
 791        ssize_t ret;
 792
 793        /*
 794         * Deferred lock, even if its a write, since we do no allocation on
 795         * this path. All we need to change is the atime, and this lock mode
 796         * ensures that other nodes have flushed their buffered read caches
 797         * (i.e. their page cache entries for this inode). We do not,
 798         * unfortunately, have the option of only flushing a range like the
 799         * VFS does.
 800         */
 801        gfs2_holder_init(ip->i_gl, LM_ST_DEFERRED, 0, &gh);
 802        ret = gfs2_glock_nq(&gh);
 803        if (ret)
 804                goto out_uninit;
 805
 806        /* Silently fall back to buffered I/O when writing beyond EOF */
 807        if (offset + len > i_size_read(&ip->i_inode))
 808                goto out;
 809
 810        ret = iomap_dio_rw(iocb, from, &gfs2_iomap_ops, NULL,
 811                           is_sync_kiocb(iocb));
 812
 813out:
 814        gfs2_glock_dq(&gh);
 815out_uninit:
 816        gfs2_holder_uninit(&gh);
 817        return ret;
 818}
 819
 820static ssize_t gfs2_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
 821{
 822        ssize_t ret;
 823
 824        if (iocb->ki_flags & IOCB_DIRECT) {
 825                ret = gfs2_file_direct_read(iocb, to);
 826                if (likely(ret != -ENOTBLK))
 827                        return ret;
 828                iocb->ki_flags &= ~IOCB_DIRECT;
 829        }
 830        return generic_file_read_iter(iocb, to);
 831}
 832
 833/**
 834 * gfs2_file_write_iter - Perform a write to a file
 835 * @iocb: The io context
 836 * @from: The data to write
 837 *
 838 * We have to do a lock/unlock here to refresh the inode size for
 839 * O_APPEND writes, otherwise we can land up writing at the wrong
 840 * offset. There is still a race, but provided the app is using its
 841 * own file locking, this will make O_APPEND work as expected.
 842 *
 843 */
 844
 845static ssize_t gfs2_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
 846{
 847        struct file *file = iocb->ki_filp;
 848        struct inode *inode = file_inode(file);
 849        struct gfs2_inode *ip = GFS2_I(inode);
 850        ssize_t ret;
 851
 852        ret = gfs2_rsqa_alloc(ip);
 853        if (ret)
 854                return ret;
 855
 856        gfs2_size_hint(file, iocb->ki_pos, iov_iter_count(from));
 857
 858        if (iocb->ki_flags & IOCB_APPEND) {
 859                struct gfs2_holder gh;
 860
 861                ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
 862                if (ret)
 863                        return ret;
 864                gfs2_glock_dq_uninit(&gh);
 865        }
 866
 867        inode_lock(inode);
 868        ret = generic_write_checks(iocb, from);
 869        if (ret <= 0)
 870                goto out_unlock;
 871
 872        ret = file_remove_privs(file);
 873        if (ret)
 874                goto out_unlock;
 875
 876        ret = file_update_time(file);
 877        if (ret)
 878                goto out_unlock;
 879
 880        if (iocb->ki_flags & IOCB_DIRECT) {
 881                struct address_space *mapping = file->f_mapping;
 882                ssize_t buffered, ret2;
 883
 884                ret = gfs2_file_direct_write(iocb, from);
 885                if (ret < 0 || !iov_iter_count(from))
 886                        goto out_unlock;
 887
 888                iocb->ki_flags |= IOCB_DSYNC;
 889                current->backing_dev_info = inode_to_bdi(inode);
 890                buffered = iomap_file_buffered_write(iocb, from, &gfs2_iomap_ops);
 891                current->backing_dev_info = NULL;
 892                if (unlikely(buffered <= 0))
 893                        goto out_unlock;
 894
 895                /*
 896                 * We need to ensure that the page cache pages are written to
 897                 * disk and invalidated to preserve the expected O_DIRECT
 898                 * semantics.  If the writeback or invalidate fails, only report
 899                 * the direct I/O range as we don't know if the buffered pages
 900                 * made it to disk.
 901                 */
 902                iocb->ki_pos += buffered;
 903                ret2 = generic_write_sync(iocb, buffered);
 904                invalidate_mapping_pages(mapping,
 905                                (iocb->ki_pos - buffered) >> PAGE_SHIFT,
 906                                (iocb->ki_pos - 1) >> PAGE_SHIFT);
 907                if (!ret || ret2 > 0)
 908                        ret += ret2;
 909        } else {
 910                current->backing_dev_info = inode_to_bdi(inode);
 911                ret = iomap_file_buffered_write(iocb, from, &gfs2_iomap_ops);
 912                current->backing_dev_info = NULL;
 913                if (likely(ret > 0)) {
 914                        iocb->ki_pos += ret;
 915                        ret = generic_write_sync(iocb, ret);
 916                }
 917        }
 918
 919out_unlock:
 920        inode_unlock(inode);
 921        return ret;
 922}
 923
 924static int fallocate_chunk(struct inode *inode, loff_t offset, loff_t len,
 925                           int mode)
 926{
 927        struct super_block *sb = inode->i_sb;
 928        struct gfs2_inode *ip = GFS2_I(inode);
 929        loff_t end = offset + len;
 930        struct buffer_head *dibh;
 931        int error;
 932
 933        error = gfs2_meta_inode_buffer(ip, &dibh);
 934        if (unlikely(error))
 935                return error;
 936
 937        gfs2_trans_add_meta(ip->i_gl, dibh);
 938
 939        if (gfs2_is_stuffed(ip)) {
 940                error = gfs2_unstuff_dinode(ip, NULL);
 941                if (unlikely(error))
 942                        goto out;
 943        }
 944
 945        while (offset < end) {
 946                struct iomap iomap = { };
 947
 948                error = gfs2_iomap_get_alloc(inode, offset, end - offset,
 949                                             &iomap);
 950                if (error)
 951                        goto out;
 952                offset = iomap.offset + iomap.length;
 953                if (!(iomap.flags & IOMAP_F_NEW))
 954                        continue;
 955                error = sb_issue_zeroout(sb, iomap.addr >> inode->i_blkbits,
 956                                         iomap.length >> inode->i_blkbits,
 957                                         GFP_NOFS);
 958                if (error) {
 959                        fs_err(GFS2_SB(inode), "Failed to zero data buffers\n");
 960                        goto out;
 961                }
 962        }
 963out:
 964        brelse(dibh);
 965        return error;
 966}
 967
 968/**
 969 * calc_max_reserv() - Reverse of write_calc_reserv. Given a number of
 970 *                     blocks, determine how many bytes can be written.
 971 * @ip:          The inode in question.
 972 * @len:         Max cap of bytes. What we return in *len must be <= this.
 973 * @data_blocks: Compute and return the number of data blocks needed
 974 * @ind_blocks:  Compute and return the number of indirect blocks needed
 975 * @max_blocks:  The total blocks available to work with.
 976 *
 977 * Returns: void, but @len, @data_blocks and @ind_blocks are filled in.
 978 */
 979static void calc_max_reserv(struct gfs2_inode *ip, loff_t *len,
 980                            unsigned int *data_blocks, unsigned int *ind_blocks,
 981                            unsigned int max_blocks)
 982{
 983        loff_t max = *len;
 984        const struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
 985        unsigned int tmp, max_data = max_blocks - 3 * (sdp->sd_max_height - 1);
 986
 987        for (tmp = max_data; tmp > sdp->sd_diptrs;) {
 988                tmp = DIV_ROUND_UP(tmp, sdp->sd_inptrs);
 989                max_data -= tmp;
 990        }
 991
 992        *data_blocks = max_data;
 993        *ind_blocks = max_blocks - max_data;
 994        *len = ((loff_t)max_data - 3) << sdp->sd_sb.sb_bsize_shift;
 995        if (*len > max) {
 996                *len = max;
 997                gfs2_write_calc_reserv(ip, max, data_blocks, ind_blocks);
 998        }
 999}
1000
1001static long __gfs2_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
1002{
1003        struct inode *inode = file_inode(file);
1004        struct gfs2_sbd *sdp = GFS2_SB(inode);
1005        struct gfs2_inode *ip = GFS2_I(inode);
1006        struct gfs2_alloc_parms ap = { .aflags = 0, };
1007        unsigned int data_blocks = 0, ind_blocks = 0, rblocks;
1008        loff_t bytes, max_bytes, max_blks;
1009        int error;
1010        const loff_t pos = offset;
1011        const loff_t count = len;
1012        loff_t bsize_mask = ~((loff_t)sdp->sd_sb.sb_bsize - 1);
1013        loff_t next = (offset + len - 1) >> sdp->sd_sb.sb_bsize_shift;
1014        loff_t max_chunk_size = UINT_MAX & bsize_mask;
1015
1016        next = (next + 1) << sdp->sd_sb.sb_bsize_shift;
1017
1018        offset &= bsize_mask;
1019
1020        len = next - offset;
1021        bytes = sdp->sd_max_rg_data * sdp->sd_sb.sb_bsize / 2;
1022        if (!bytes)
1023                bytes = UINT_MAX;
1024        bytes &= bsize_mask;
1025        if (bytes == 0)
1026                bytes = sdp->sd_sb.sb_bsize;
1027
1028        gfs2_size_hint(file, offset, len);
1029
1030        gfs2_write_calc_reserv(ip, PAGE_SIZE, &data_blocks, &ind_blocks);
1031        ap.min_target = data_blocks + ind_blocks;
1032
1033        while (len > 0) {
1034                if (len < bytes)
1035                        bytes = len;
1036                if (!gfs2_write_alloc_required(ip, offset, bytes)) {
1037                        len -= bytes;
1038                        offset += bytes;
1039                        continue;
1040                }
1041
1042                /* We need to determine how many bytes we can actually
1043                 * fallocate without exceeding quota or going over the
1044                 * end of the fs. We start off optimistically by assuming
1045                 * we can write max_bytes */
1046                max_bytes = (len > max_chunk_size) ? max_chunk_size : len;
1047
1048                /* Since max_bytes is most likely a theoretical max, we
1049                 * calculate a more realistic 'bytes' to serve as a good
1050                 * starting point for the number of bytes we may be able
1051                 * to write */
1052                gfs2_write_calc_reserv(ip, bytes, &data_blocks, &ind_blocks);
1053                ap.target = data_blocks + ind_blocks;
1054
1055                error = gfs2_quota_lock_check(ip, &ap);
1056                if (error)
1057                        return error;
1058                /* ap.allowed tells us how many blocks quota will allow
1059                 * us to write. Check if this reduces max_blks */
1060                max_blks = UINT_MAX;
1061                if (ap.allowed)
1062                        max_blks = ap.allowed;
1063
1064                error = gfs2_inplace_reserve(ip, &ap);
1065                if (error)
1066                        goto out_qunlock;
1067
1068                /* check if the selected rgrp limits our max_blks further */
1069                if (ap.allowed && ap.allowed < max_blks)
1070                        max_blks = ap.allowed;
1071
1072                /* Almost done. Calculate bytes that can be written using
1073                 * max_blks. We also recompute max_bytes, data_blocks and
1074                 * ind_blocks */
1075                calc_max_reserv(ip, &max_bytes, &data_blocks,
1076                                &ind_blocks, max_blks);
1077
1078                rblocks = RES_DINODE + ind_blocks + RES_STATFS + RES_QUOTA +
1079                          RES_RG_HDR + gfs2_rg_blocks(ip, data_blocks + ind_blocks);
1080                if (gfs2_is_jdata(ip))
1081                        rblocks += data_blocks ? data_blocks : 1;
1082
1083                error = gfs2_trans_begin(sdp, rblocks,
1084                                         PAGE_SIZE >> inode->i_blkbits);
1085                if (error)
1086                        goto out_trans_fail;
1087
1088                error = fallocate_chunk(inode, offset, max_bytes, mode);
1089                gfs2_trans_end(sdp);
1090
1091                if (error)
1092                        goto out_trans_fail;
1093
1094                len -= max_bytes;
1095                offset += max_bytes;
1096                gfs2_inplace_release(ip);
1097                gfs2_quota_unlock(ip);
1098        }
1099
1100        if (!(mode & FALLOC_FL_KEEP_SIZE) && (pos + count) > inode->i_size)
1101                i_size_write(inode, pos + count);
1102        file_update_time(file);
1103        mark_inode_dirty(inode);
1104
1105        if ((file->f_flags & O_DSYNC) || IS_SYNC(file->f_mapping->host))
1106                return vfs_fsync_range(file, pos, pos + count - 1,
1107                               (file->f_flags & __O_SYNC) ? 0 : 1);
1108        return 0;
1109
1110out_trans_fail:
1111        gfs2_inplace_release(ip);
1112out_qunlock:
1113        gfs2_quota_unlock(ip);
1114        return error;
1115}
1116
1117static long gfs2_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
1118{
1119        struct inode *inode = file_inode(file);
1120        struct gfs2_sbd *sdp = GFS2_SB(inode);
1121        struct gfs2_inode *ip = GFS2_I(inode);
1122        struct gfs2_holder gh;
1123        int ret;
1124
1125        if (mode & ~(FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE))
1126                return -EOPNOTSUPP;
1127        /* fallocate is needed by gfs2_grow to reserve space in the rindex */
1128        if (gfs2_is_jdata(ip) && inode != sdp->sd_rindex)
1129                return -EOPNOTSUPP;
1130
1131        inode_lock(inode);
1132
1133        gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
1134        ret = gfs2_glock_nq(&gh);
1135        if (ret)
1136                goto out_uninit;
1137
1138        if (!(mode & FALLOC_FL_KEEP_SIZE) &&
1139            (offset + len) > inode->i_size) {
1140                ret = inode_newsize_ok(inode, offset + len);
1141                if (ret)
1142                        goto out_unlock;
1143        }
1144
1145        ret = get_write_access(inode);
1146        if (ret)
1147                goto out_unlock;
1148
1149        if (mode & FALLOC_FL_PUNCH_HOLE) {
1150                ret = __gfs2_punch_hole(file, offset, len);
1151        } else {
1152                ret = gfs2_rsqa_alloc(ip);
1153                if (ret)
1154                        goto out_putw;
1155
1156                ret = __gfs2_fallocate(file, mode, offset, len);
1157
1158                if (ret)
1159                        gfs2_rs_deltree(&ip->i_res);
1160        }
1161
1162out_putw:
1163        put_write_access(inode);
1164out_unlock:
1165        gfs2_glock_dq(&gh);
1166out_uninit:
1167        gfs2_holder_uninit(&gh);
1168        inode_unlock(inode);
1169        return ret;
1170}
1171
1172static ssize_t gfs2_file_splice_write(struct pipe_inode_info *pipe,
1173                                      struct file *out, loff_t *ppos,
1174                                      size_t len, unsigned int flags)
1175{
1176        int error;
1177        struct gfs2_inode *ip = GFS2_I(out->f_mapping->host);
1178
1179        error = gfs2_rsqa_alloc(ip);
1180        if (error)
1181                return (ssize_t)error;
1182
1183        gfs2_size_hint(out, *ppos, len);
1184
1185        return iter_file_splice_write(pipe, out, ppos, len, flags);
1186}
1187
1188#ifdef CONFIG_GFS2_FS_LOCKING_DLM
1189
1190/**
1191 * gfs2_lock - acquire/release a posix lock on a file
1192 * @file: the file pointer
1193 * @cmd: either modify or retrieve lock state, possibly wait
1194 * @fl: type and range of lock
1195 *
1196 * Returns: errno
1197 */
1198
1199static int gfs2_lock(struct file *file, int cmd, struct file_lock *fl)
1200{
1201        struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
1202        struct gfs2_sbd *sdp = GFS2_SB(file->f_mapping->host);
1203        struct lm_lockstruct *ls = &sdp->sd_lockstruct;
1204
1205        if (!(fl->fl_flags & FL_POSIX))
1206                return -ENOLCK;
1207        if (__mandatory_lock(&ip->i_inode) && fl->fl_type != F_UNLCK)
1208                return -ENOLCK;
1209
1210        if (cmd == F_CANCELLK) {
1211                /* Hack: */
1212                cmd = F_SETLK;
1213                fl->fl_type = F_UNLCK;
1214        }
1215        if (unlikely(gfs2_withdrawn(sdp))) {
1216                if (fl->fl_type == F_UNLCK)
1217                        locks_lock_file_wait(file, fl);
1218                return -EIO;
1219        }
1220        if (IS_GETLK(cmd))
1221                return dlm_posix_get(ls->ls_dlm, ip->i_no_addr, file, fl);
1222        else if (fl->fl_type == F_UNLCK)
1223                return dlm_posix_unlock(ls->ls_dlm, ip->i_no_addr, file, fl);
1224        else
1225                return dlm_posix_lock(ls->ls_dlm, ip->i_no_addr, file, cmd, fl);
1226}
1227
1228static int do_flock(struct file *file, int cmd, struct file_lock *fl)
1229{
1230        struct gfs2_file *fp = file->private_data;
1231        struct gfs2_holder *fl_gh = &fp->f_fl_gh;
1232        struct gfs2_inode *ip = GFS2_I(file_inode(file));
1233        struct gfs2_glock *gl;
1234        unsigned int state;
1235        u16 flags;
1236        int error = 0;
1237        int sleeptime;
1238
1239        state = (fl->fl_type == F_WRLCK) ? LM_ST_EXCLUSIVE : LM_ST_SHARED;
1240        flags = (IS_SETLKW(cmd) ? 0 : LM_FLAG_TRY_1CB) | GL_EXACT;
1241
1242        mutex_lock(&fp->f_fl_mutex);
1243
1244        if (gfs2_holder_initialized(fl_gh)) {
1245                struct file_lock request;
1246                if (fl_gh->gh_state == state)
1247                        goto out;
1248                locks_init_lock(&request);
1249                request.fl_type = F_UNLCK;
1250                request.fl_flags = FL_FLOCK;
1251                locks_lock_file_wait(file, &request);
1252                gfs2_glock_dq(fl_gh);
1253                gfs2_holder_reinit(state, flags, fl_gh);
1254        } else {
1255                error = gfs2_glock_get(GFS2_SB(&ip->i_inode), ip->i_no_addr,
1256                                       &gfs2_flock_glops, CREATE, &gl);
1257                if (error)
1258                        goto out;
1259                gfs2_holder_init(gl, state, flags, fl_gh);
1260                gfs2_glock_put(gl);
1261        }
1262        for (sleeptime = 1; sleeptime <= 4; sleeptime <<= 1) {
1263                error = gfs2_glock_nq(fl_gh);
1264                if (error != GLR_TRYFAILED)
1265                        break;
1266                fl_gh->gh_flags = LM_FLAG_TRY | GL_EXACT;
1267                fl_gh->gh_error = 0;
1268                msleep(sleeptime);
1269        }
1270        if (error) {
1271                gfs2_holder_uninit(fl_gh);
1272                if (error == GLR_TRYFAILED)
1273                        error = -EAGAIN;
1274        } else {
1275                error = locks_lock_file_wait(file, fl);
1276                gfs2_assert_warn(GFS2_SB(&ip->i_inode), !error);
1277        }
1278
1279out:
1280        mutex_unlock(&fp->f_fl_mutex);
1281        return error;
1282}
1283
1284static void do_unflock(struct file *file, struct file_lock *fl)
1285{
1286        struct gfs2_file *fp = file->private_data;
1287        struct gfs2_holder *fl_gh = &fp->f_fl_gh;
1288
1289        mutex_lock(&fp->f_fl_mutex);
1290        locks_lock_file_wait(file, fl);
1291        if (gfs2_holder_initialized(fl_gh)) {
1292                gfs2_glock_dq(fl_gh);
1293                gfs2_holder_uninit(fl_gh);
1294        }
1295        mutex_unlock(&fp->f_fl_mutex);
1296}
1297
1298/**
1299 * gfs2_flock - acquire/release a flock lock on a file
1300 * @file: the file pointer
1301 * @cmd: either modify or retrieve lock state, possibly wait
1302 * @fl: type and range of lock
1303 *
1304 * Returns: errno
1305 */
1306
1307static int gfs2_flock(struct file *file, int cmd, struct file_lock *fl)
1308{
1309        if (!(fl->fl_flags & FL_FLOCK))
1310                return -ENOLCK;
1311        if (fl->fl_type & LOCK_MAND)
1312                return -EOPNOTSUPP;
1313
1314        if (fl->fl_type == F_UNLCK) {
1315                do_unflock(file, fl);
1316                return 0;
1317        } else {
1318                return do_flock(file, cmd, fl);
1319        }
1320}
1321
1322const struct file_operations gfs2_file_fops = {
1323        .llseek         = gfs2_llseek,
1324        .read_iter      = gfs2_file_read_iter,
1325        .write_iter     = gfs2_file_write_iter,
1326        .iopoll         = iomap_dio_iopoll,
1327        .unlocked_ioctl = gfs2_ioctl,
1328        .compat_ioctl   = gfs2_compat_ioctl,
1329        .mmap           = gfs2_mmap,
1330        .open           = gfs2_open,
1331        .release        = gfs2_release,
1332        .fsync          = gfs2_fsync,
1333        .lock           = gfs2_lock,
1334        .flock          = gfs2_flock,
1335        .splice_read    = generic_file_splice_read,
1336        .splice_write   = gfs2_file_splice_write,
1337        .setlease       = simple_nosetlease,
1338        .fallocate      = gfs2_fallocate,
1339};
1340
1341const struct file_operations gfs2_dir_fops = {
1342        .iterate_shared = gfs2_readdir,
1343        .unlocked_ioctl = gfs2_ioctl,
1344        .compat_ioctl   = gfs2_compat_ioctl,
1345        .open           = gfs2_open,
1346        .release        = gfs2_release,
1347        .fsync          = gfs2_fsync,
1348        .lock           = gfs2_lock,
1349        .flock          = gfs2_flock,
1350        .llseek         = default_llseek,
1351};
1352
1353#endif /* CONFIG_GFS2_FS_LOCKING_DLM */
1354
1355const struct file_operations gfs2_file_fops_nolock = {
1356        .llseek         = gfs2_llseek,
1357        .read_iter      = gfs2_file_read_iter,
1358        .write_iter     = gfs2_file_write_iter,
1359        .iopoll         = iomap_dio_iopoll,
1360        .unlocked_ioctl = gfs2_ioctl,
1361        .compat_ioctl   = gfs2_compat_ioctl,
1362        .mmap           = gfs2_mmap,
1363        .open           = gfs2_open,
1364        .release        = gfs2_release,
1365        .fsync          = gfs2_fsync,
1366        .splice_read    = generic_file_splice_read,
1367        .splice_write   = gfs2_file_splice_write,
1368        .setlease       = generic_setlease,
1369        .fallocate      = gfs2_fallocate,
1370};
1371
1372const struct file_operations gfs2_dir_fops_nolock = {
1373        .iterate_shared = gfs2_readdir,
1374        .unlocked_ioctl = gfs2_ioctl,
1375        .compat_ioctl   = gfs2_compat_ioctl,
1376        .open           = gfs2_open,
1377        .release        = gfs2_release,
1378        .fsync          = gfs2_fsync,
1379        .llseek         = default_llseek,
1380};
1381
1382