linux/fs/nfs/pnfs.c
<<
>>
Prefs
   1/*
   2 *  pNFS functions to call and manage layout drivers.
   3 *
   4 *  Copyright (c) 2002 [year of first publication]
   5 *  The Regents of the University of Michigan
   6 *  All Rights Reserved
   7 *
   8 *  Dean Hildebrand <dhildebz@umich.edu>
   9 *
  10 *  Permission is granted to use, copy, create derivative works, and
  11 *  redistribute this software and such derivative works for any purpose,
  12 *  so long as the name of the University of Michigan is not used in
  13 *  any advertising or publicity pertaining to the use or distribution
  14 *  of this software without specific, written prior authorization. If
  15 *  the above copyright notice or any other identification of the
  16 *  University of Michigan is included in any copy of any portion of
  17 *  this software, then the disclaimer below must also be included.
  18 *
  19 *  This software is provided as is, without representation or warranty
  20 *  of any kind either express or implied, including without limitation
  21 *  the implied warranties of merchantability, fitness for a particular
  22 *  purpose, or noninfringement.  The Regents of the University of
  23 *  Michigan shall not be liable for any damages, including special,
  24 *  indirect, incidental, or consequential damages, with respect to any
  25 *  claim arising out of or in connection with the use of the software,
  26 *  even if it has been or is hereafter advised of the possibility of
  27 *  such damages.
  28 */
  29
  30#include <linux/nfs_fs.h>
  31#include <linux/nfs_page.h>
  32#include <linux/module.h>
  33#include <linux/sort.h>
  34#include "internal.h"
  35#include "pnfs.h"
  36#include "iostat.h"
  37#include "nfs4trace.h"
  38#include "delegation.h"
  39#include "nfs42.h"
  40#include "nfs4_fs.h"
  41
  42#define NFSDBG_FACILITY         NFSDBG_PNFS
  43#define PNFS_LAYOUTGET_RETRY_TIMEOUT (120*HZ)
  44
  45/* Locking:
  46 *
  47 * pnfs_spinlock:
  48 *      protects pnfs_modules_tbl.
  49 */
  50static DEFINE_SPINLOCK(pnfs_spinlock);
  51
  52/*
  53 * pnfs_modules_tbl holds all pnfs modules
  54 */
  55static LIST_HEAD(pnfs_modules_tbl);
  56
  57static void pnfs_layoutreturn_before_put_layout_hdr(struct pnfs_layout_hdr *lo);
  58static void pnfs_free_returned_lsegs(struct pnfs_layout_hdr *lo,
  59                struct list_head *free_me,
  60                const struct pnfs_layout_range *range,
  61                u32 seq);
  62static bool pnfs_lseg_dec_and_remove_zero(struct pnfs_layout_segment *lseg,
  63                                struct list_head *tmp_list);
  64
  65/* Return the registered pnfs layout driver module matching given id */
  66static struct pnfs_layoutdriver_type *
  67find_pnfs_driver_locked(u32 id)
  68{
  69        struct pnfs_layoutdriver_type *local;
  70
  71        list_for_each_entry(local, &pnfs_modules_tbl, pnfs_tblid)
  72                if (local->id == id)
  73                        goto out;
  74        local = NULL;
  75out:
  76        dprintk("%s: Searching for id %u, found %p\n", __func__, id, local);
  77        return local;
  78}
  79
  80static struct pnfs_layoutdriver_type *
  81find_pnfs_driver(u32 id)
  82{
  83        struct pnfs_layoutdriver_type *local;
  84
  85        spin_lock(&pnfs_spinlock);
  86        local = find_pnfs_driver_locked(id);
  87        if (local != NULL && !try_module_get(local->owner)) {
  88                dprintk("%s: Could not grab reference on module\n", __func__);
  89                local = NULL;
  90        }
  91        spin_unlock(&pnfs_spinlock);
  92        return local;
  93}
  94
  95void
  96unset_pnfs_layoutdriver(struct nfs_server *nfss)
  97{
  98        if (nfss->pnfs_curr_ld) {
  99                if (nfss->pnfs_curr_ld->clear_layoutdriver)
 100                        nfss->pnfs_curr_ld->clear_layoutdriver(nfss);
 101                /* Decrement the MDS count. Purge the deviceid cache if zero */
 102                if (atomic_dec_and_test(&nfss->nfs_client->cl_mds_count))
 103                        nfs4_deviceid_purge_client(nfss->nfs_client);
 104                module_put(nfss->pnfs_curr_ld->owner);
 105        }
 106        nfss->pnfs_curr_ld = NULL;
 107}
 108
 109/*
 110 * When the server sends a list of layout types, we choose one in the order
 111 * given in the list below.
 112 *
 113 * FIXME: should this list be configurable in some fashion? module param?
 114 *        mount option? something else?
 115 */
 116static const u32 ld_prefs[] = {
 117        LAYOUT_SCSI,
 118        LAYOUT_BLOCK_VOLUME,
 119        LAYOUT_OSD2_OBJECTS,
 120        LAYOUT_FLEX_FILES,
 121        LAYOUT_NFSV4_1_FILES,
 122        0
 123};
 124
 125static int
 126ld_cmp(const void *e1, const void *e2)
 127{
 128        u32 ld1 = *((u32 *)e1);
 129        u32 ld2 = *((u32 *)e2);
 130        int i;
 131
 132        for (i = 0; ld_prefs[i] != 0; i++) {
 133                if (ld1 == ld_prefs[i])
 134                        return -1;
 135
 136                if (ld2 == ld_prefs[i])
 137                        return 1;
 138        }
 139        return 0;
 140}
 141
 142/*
 143 * Try to set the server's pnfs module to the pnfs layout type specified by id.
 144 * Currently only one pNFS layout driver per filesystem is supported.
 145 *
 146 * @ids array of layout types supported by MDS.
 147 */
 148void
 149set_pnfs_layoutdriver(struct nfs_server *server, const struct nfs_fh *mntfh,
 150                      struct nfs_fsinfo *fsinfo)
 151{
 152        struct pnfs_layoutdriver_type *ld_type = NULL;
 153        u32 id;
 154        int i;
 155
 156        if (fsinfo->nlayouttypes == 0)
 157                goto out_no_driver;
 158        if (!(server->nfs_client->cl_exchange_flags &
 159                 (EXCHGID4_FLAG_USE_NON_PNFS | EXCHGID4_FLAG_USE_PNFS_MDS))) {
 160                printk(KERN_ERR "NFS: %s: cl_exchange_flags 0x%x\n",
 161                        __func__, server->nfs_client->cl_exchange_flags);
 162                goto out_no_driver;
 163        }
 164
 165        sort(fsinfo->layouttype, fsinfo->nlayouttypes,
 166                sizeof(*fsinfo->layouttype), ld_cmp, NULL);
 167
 168        for (i = 0; i < fsinfo->nlayouttypes; i++) {
 169                id = fsinfo->layouttype[i];
 170                ld_type = find_pnfs_driver(id);
 171                if (!ld_type) {
 172                        request_module("%s-%u", LAYOUT_NFSV4_1_MODULE_PREFIX,
 173                                        id);
 174                        ld_type = find_pnfs_driver(id);
 175                }
 176                if (ld_type)
 177                        break;
 178        }
 179
 180        if (!ld_type) {
 181                dprintk("%s: No pNFS module found!\n", __func__);
 182                goto out_no_driver;
 183        }
 184
 185        server->pnfs_curr_ld = ld_type;
 186        if (ld_type->set_layoutdriver
 187            && ld_type->set_layoutdriver(server, mntfh)) {
 188                printk(KERN_ERR "NFS: %s: Error initializing pNFS layout "
 189                        "driver %u.\n", __func__, id);
 190                module_put(ld_type->owner);
 191                goto out_no_driver;
 192        }
 193        /* Bump the MDS count */
 194        atomic_inc(&server->nfs_client->cl_mds_count);
 195
 196        dprintk("%s: pNFS module for %u set\n", __func__, id);
 197        return;
 198
 199out_no_driver:
 200        dprintk("%s: Using NFSv4 I/O\n", __func__);
 201        server->pnfs_curr_ld = NULL;
 202}
 203
 204int
 205pnfs_register_layoutdriver(struct pnfs_layoutdriver_type *ld_type)
 206{
 207        int status = -EINVAL;
 208        struct pnfs_layoutdriver_type *tmp;
 209
 210        if (ld_type->id == 0) {
 211                printk(KERN_ERR "NFS: %s id 0 is reserved\n", __func__);
 212                return status;
 213        }
 214        if (!ld_type->alloc_lseg || !ld_type->free_lseg) {
 215                printk(KERN_ERR "NFS: %s Layout driver must provide "
 216                       "alloc_lseg and free_lseg.\n", __func__);
 217                return status;
 218        }
 219
 220        spin_lock(&pnfs_spinlock);
 221        tmp = find_pnfs_driver_locked(ld_type->id);
 222        if (!tmp) {
 223                list_add(&ld_type->pnfs_tblid, &pnfs_modules_tbl);
 224                status = 0;
 225                dprintk("%s Registering id:%u name:%s\n", __func__, ld_type->id,
 226                        ld_type->name);
 227        } else {
 228                printk(KERN_ERR "NFS: %s Module with id %d already loaded!\n",
 229                        __func__, ld_type->id);
 230        }
 231        spin_unlock(&pnfs_spinlock);
 232
 233        return status;
 234}
 235EXPORT_SYMBOL_GPL(pnfs_register_layoutdriver);
 236
 237void
 238pnfs_unregister_layoutdriver(struct pnfs_layoutdriver_type *ld_type)
 239{
 240        dprintk("%s Deregistering id:%u\n", __func__, ld_type->id);
 241        spin_lock(&pnfs_spinlock);
 242        list_del(&ld_type->pnfs_tblid);
 243        spin_unlock(&pnfs_spinlock);
 244}
 245EXPORT_SYMBOL_GPL(pnfs_unregister_layoutdriver);
 246
 247/*
 248 * pNFS client layout cache
 249 */
 250
 251/* Need to hold i_lock if caller does not already hold reference */
 252void
 253pnfs_get_layout_hdr(struct pnfs_layout_hdr *lo)
 254{
 255        refcount_inc(&lo->plh_refcount);
 256}
 257
 258static struct pnfs_layout_hdr *
 259pnfs_alloc_layout_hdr(struct inode *ino, gfp_t gfp_flags)
 260{
 261        struct pnfs_layoutdriver_type *ld = NFS_SERVER(ino)->pnfs_curr_ld;
 262        return ld->alloc_layout_hdr(ino, gfp_flags);
 263}
 264
 265static void
 266pnfs_free_layout_hdr(struct pnfs_layout_hdr *lo)
 267{
 268        struct nfs_server *server = NFS_SERVER(lo->plh_inode);
 269        struct pnfs_layoutdriver_type *ld = server->pnfs_curr_ld;
 270
 271        if (test_and_clear_bit(NFS_LAYOUT_HASHED, &lo->plh_flags)) {
 272                struct nfs_client *clp = server->nfs_client;
 273
 274                spin_lock(&clp->cl_lock);
 275                list_del_rcu(&lo->plh_layouts);
 276                spin_unlock(&clp->cl_lock);
 277        }
 278        put_cred(lo->plh_lc_cred);
 279        return ld->free_layout_hdr(lo);
 280}
 281
 282static void
 283pnfs_detach_layout_hdr(struct pnfs_layout_hdr *lo)
 284{
 285        struct nfs_inode *nfsi = NFS_I(lo->plh_inode);
 286        dprintk("%s: freeing layout cache %p\n", __func__, lo);
 287        nfsi->layout = NULL;
 288        /* Reset MDS Threshold I/O counters */
 289        nfsi->write_io = 0;
 290        nfsi->read_io = 0;
 291}
 292
 293void
 294pnfs_put_layout_hdr(struct pnfs_layout_hdr *lo)
 295{
 296        struct inode *inode;
 297        unsigned long i_state;
 298
 299        if (!lo)
 300                return;
 301        inode = lo->plh_inode;
 302        pnfs_layoutreturn_before_put_layout_hdr(lo);
 303
 304        if (refcount_dec_and_lock(&lo->plh_refcount, &inode->i_lock)) {
 305                if (!list_empty(&lo->plh_segs))
 306                        WARN_ONCE(1, "NFS: BUG unfreed layout segments.\n");
 307                pnfs_detach_layout_hdr(lo);
 308                i_state = inode->i_state;
 309                spin_unlock(&inode->i_lock);
 310                pnfs_free_layout_hdr(lo);
 311                /* Notify pnfs_destroy_layout_final() that we're done */
 312                if (i_state & (I_FREEING | I_CLEAR))
 313                        wake_up_var(lo);
 314        }
 315}
 316
 317static struct inode *
 318pnfs_grab_inode_layout_hdr(struct pnfs_layout_hdr *lo)
 319{
 320        struct inode *inode = igrab(lo->plh_inode);
 321        if (inode)
 322                return inode;
 323        set_bit(NFS_LAYOUT_INODE_FREEING, &lo->plh_flags);
 324        return NULL;
 325}
 326
 327/*
 328 * Compare 2 layout stateid sequence ids, to see which is newer,
 329 * taking into account wraparound issues.
 330 */
 331static bool pnfs_seqid_is_newer(u32 s1, u32 s2)
 332{
 333        return (s32)(s1 - s2) > 0;
 334}
 335
 336static void pnfs_barrier_update(struct pnfs_layout_hdr *lo, u32 newseq)
 337{
 338        if (pnfs_seqid_is_newer(newseq, lo->plh_barrier) || !lo->plh_barrier)
 339                lo->plh_barrier = newseq;
 340}
 341
 342static void
 343pnfs_set_plh_return_info(struct pnfs_layout_hdr *lo, enum pnfs_iomode iomode,
 344                         u32 seq)
 345{
 346        if (lo->plh_return_iomode != 0 && lo->plh_return_iomode != iomode)
 347                iomode = IOMODE_ANY;
 348        lo->plh_return_iomode = iomode;
 349        set_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags);
 350        /*
 351         * We must set lo->plh_return_seq to avoid livelocks with
 352         * pnfs_layout_need_return()
 353         */
 354        if (seq == 0)
 355                seq = be32_to_cpu(lo->plh_stateid.seqid);
 356        if (!lo->plh_return_seq || pnfs_seqid_is_newer(seq, lo->plh_return_seq))
 357                lo->plh_return_seq = seq;
 358        pnfs_barrier_update(lo, seq);
 359}
 360
 361static void
 362pnfs_clear_layoutreturn_info(struct pnfs_layout_hdr *lo)
 363{
 364        struct pnfs_layout_segment *lseg;
 365        lo->plh_return_iomode = 0;
 366        lo->plh_return_seq = 0;
 367        clear_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags);
 368        list_for_each_entry(lseg, &lo->plh_segs, pls_list) {
 369                if (!test_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags))
 370                        continue;
 371                pnfs_set_plh_return_info(lo, lseg->pls_range.iomode, 0);
 372        }
 373}
 374
 375static void pnfs_clear_layoutreturn_waitbit(struct pnfs_layout_hdr *lo)
 376{
 377        clear_bit_unlock(NFS_LAYOUT_RETURN, &lo->plh_flags);
 378        clear_bit(NFS_LAYOUT_RETURN_LOCK, &lo->plh_flags);
 379        smp_mb__after_atomic();
 380        wake_up_bit(&lo->plh_flags, NFS_LAYOUT_RETURN);
 381        rpc_wake_up(&NFS_SERVER(lo->plh_inode)->roc_rpcwaitq);
 382}
 383
 384static void
 385pnfs_clear_lseg_state(struct pnfs_layout_segment *lseg,
 386                struct list_head *free_me)
 387{
 388        clear_bit(NFS_LSEG_ROC, &lseg->pls_flags);
 389        clear_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags);
 390        if (test_and_clear_bit(NFS_LSEG_VALID, &lseg->pls_flags))
 391                pnfs_lseg_dec_and_remove_zero(lseg, free_me);
 392        if (test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT, &lseg->pls_flags))
 393                pnfs_lseg_dec_and_remove_zero(lseg, free_me);
 394}
 395
 396/*
 397 * Update the seqid of a layout stateid after receiving
 398 * NFS4ERR_OLD_STATEID
 399 */
 400bool nfs4_layout_refresh_old_stateid(nfs4_stateid *dst,
 401                struct pnfs_layout_range *dst_range,
 402                struct inode *inode)
 403{
 404        struct pnfs_layout_hdr *lo;
 405        struct pnfs_layout_range range = {
 406                .iomode = IOMODE_ANY,
 407                .offset = 0,
 408                .length = NFS4_MAX_UINT64,
 409        };
 410        bool ret = false;
 411        LIST_HEAD(head);
 412        int err;
 413
 414        spin_lock(&inode->i_lock);
 415        lo = NFS_I(inode)->layout;
 416        if (lo &&  pnfs_layout_is_valid(lo) &&
 417            nfs4_stateid_match_other(dst, &lo->plh_stateid)) {
 418                /* Is our call using the most recent seqid? If so, bump it */
 419                if (!nfs4_stateid_is_newer(&lo->plh_stateid, dst)) {
 420                        nfs4_stateid_seqid_inc(dst);
 421                        ret = true;
 422                        goto out;
 423                }
 424                /* Try to update the seqid to the most recent */
 425                err = pnfs_mark_matching_lsegs_return(lo, &head, &range, 0);
 426                if (err != -EBUSY) {
 427                        dst->seqid = lo->plh_stateid.seqid;
 428                        *dst_range = range;
 429                        ret = true;
 430                }
 431        }
 432out:
 433        spin_unlock(&inode->i_lock);
 434        pnfs_free_lseg_list(&head);
 435        return ret;
 436}
 437
 438/*
 439 * Mark a pnfs_layout_hdr and all associated layout segments as invalid
 440 *
 441 * In order to continue using the pnfs_layout_hdr, a full recovery
 442 * is required.
 443 * Note that caller must hold inode->i_lock.
 444 */
 445int
 446pnfs_mark_layout_stateid_invalid(struct pnfs_layout_hdr *lo,
 447                struct list_head *lseg_list)
 448{
 449        struct pnfs_layout_range range = {
 450                .iomode = IOMODE_ANY,
 451                .offset = 0,
 452                .length = NFS4_MAX_UINT64,
 453        };
 454        struct pnfs_layout_segment *lseg, *next;
 455
 456        set_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags);
 457        list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list)
 458                pnfs_clear_lseg_state(lseg, lseg_list);
 459        pnfs_clear_layoutreturn_info(lo);
 460        pnfs_free_returned_lsegs(lo, lseg_list, &range, 0);
 461        if (test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags) &&
 462            !test_and_set_bit(NFS_LAYOUT_RETURN_LOCK, &lo->plh_flags))
 463                pnfs_clear_layoutreturn_waitbit(lo);
 464        return !list_empty(&lo->plh_segs);
 465}
 466
 467static int
 468pnfs_iomode_to_fail_bit(u32 iomode)
 469{
 470        return iomode == IOMODE_RW ?
 471                NFS_LAYOUT_RW_FAILED : NFS_LAYOUT_RO_FAILED;
 472}
 473
 474static void
 475pnfs_layout_set_fail_bit(struct pnfs_layout_hdr *lo, int fail_bit)
 476{
 477        lo->plh_retry_timestamp = jiffies;
 478        if (!test_and_set_bit(fail_bit, &lo->plh_flags))
 479                refcount_inc(&lo->plh_refcount);
 480}
 481
 482static void
 483pnfs_layout_clear_fail_bit(struct pnfs_layout_hdr *lo, int fail_bit)
 484{
 485        if (test_and_clear_bit(fail_bit, &lo->plh_flags))
 486                refcount_dec(&lo->plh_refcount);
 487}
 488
 489static void
 490pnfs_layout_io_set_failed(struct pnfs_layout_hdr *lo, u32 iomode)
 491{
 492        struct inode *inode = lo->plh_inode;
 493        struct pnfs_layout_range range = {
 494                .iomode = iomode,
 495                .offset = 0,
 496                .length = NFS4_MAX_UINT64,
 497        };
 498        LIST_HEAD(head);
 499
 500        spin_lock(&inode->i_lock);
 501        pnfs_layout_set_fail_bit(lo, pnfs_iomode_to_fail_bit(iomode));
 502        pnfs_mark_matching_lsegs_invalid(lo, &head, &range, 0);
 503        spin_unlock(&inode->i_lock);
 504        pnfs_free_lseg_list(&head);
 505        dprintk("%s Setting layout IOMODE_%s fail bit\n", __func__,
 506                        iomode == IOMODE_RW ?  "RW" : "READ");
 507}
 508
 509static bool
 510pnfs_layout_io_test_failed(struct pnfs_layout_hdr *lo, u32 iomode)
 511{
 512        unsigned long start, end;
 513        int fail_bit = pnfs_iomode_to_fail_bit(iomode);
 514
 515        if (test_bit(fail_bit, &lo->plh_flags) == 0)
 516                return false;
 517        end = jiffies;
 518        start = end - PNFS_LAYOUTGET_RETRY_TIMEOUT;
 519        if (!time_in_range(lo->plh_retry_timestamp, start, end)) {
 520                /* It is time to retry the failed layoutgets */
 521                pnfs_layout_clear_fail_bit(lo, fail_bit);
 522                return false;
 523        }
 524        return true;
 525}
 526
 527static void
 528pnfs_init_lseg(struct pnfs_layout_hdr *lo, struct pnfs_layout_segment *lseg,
 529                const struct pnfs_layout_range *range,
 530                const nfs4_stateid *stateid)
 531{
 532        INIT_LIST_HEAD(&lseg->pls_list);
 533        INIT_LIST_HEAD(&lseg->pls_lc_list);
 534        INIT_LIST_HEAD(&lseg->pls_commits);
 535        refcount_set(&lseg->pls_refcount, 1);
 536        set_bit(NFS_LSEG_VALID, &lseg->pls_flags);
 537        lseg->pls_layout = lo;
 538        lseg->pls_range = *range;
 539        lseg->pls_seq = be32_to_cpu(stateid->seqid);
 540}
 541
 542static void pnfs_free_lseg(struct pnfs_layout_segment *lseg)
 543{
 544        if (lseg != NULL) {
 545                struct inode *inode = lseg->pls_layout->plh_inode;
 546                NFS_SERVER(inode)->pnfs_curr_ld->free_lseg(lseg);
 547        }
 548}
 549
 550static void
 551pnfs_layout_remove_lseg(struct pnfs_layout_hdr *lo,
 552                struct pnfs_layout_segment *lseg)
 553{
 554        WARN_ON(test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
 555        list_del_init(&lseg->pls_list);
 556        /* Matched by pnfs_get_layout_hdr in pnfs_layout_insert_lseg */
 557        refcount_dec(&lo->plh_refcount);
 558        if (test_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags))
 559                return;
 560        if (list_empty(&lo->plh_segs) &&
 561            !test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags) &&
 562            !test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags)) {
 563                if (atomic_read(&lo->plh_outstanding) == 0)
 564                        set_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags);
 565                clear_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags);
 566        }
 567}
 568
 569static bool
 570pnfs_cache_lseg_for_layoutreturn(struct pnfs_layout_hdr *lo,
 571                struct pnfs_layout_segment *lseg)
 572{
 573        if (test_and_clear_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags) &&
 574            pnfs_layout_is_valid(lo)) {
 575                pnfs_set_plh_return_info(lo, lseg->pls_range.iomode, 0);
 576                list_move_tail(&lseg->pls_list, &lo->plh_return_segs);
 577                return true;
 578        }
 579        return false;
 580}
 581
 582void
 583pnfs_put_lseg(struct pnfs_layout_segment *lseg)
 584{
 585        struct pnfs_layout_hdr *lo;
 586        struct inode *inode;
 587
 588        if (!lseg)
 589                return;
 590
 591        dprintk("%s: lseg %p ref %d valid %d\n", __func__, lseg,
 592                refcount_read(&lseg->pls_refcount),
 593                test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
 594
 595        lo = lseg->pls_layout;
 596        inode = lo->plh_inode;
 597
 598        if (refcount_dec_and_lock(&lseg->pls_refcount, &inode->i_lock)) {
 599                if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags)) {
 600                        spin_unlock(&inode->i_lock);
 601                        return;
 602                }
 603                pnfs_get_layout_hdr(lo);
 604                pnfs_layout_remove_lseg(lo, lseg);
 605                if (pnfs_cache_lseg_for_layoutreturn(lo, lseg))
 606                        lseg = NULL;
 607                spin_unlock(&inode->i_lock);
 608                pnfs_free_lseg(lseg);
 609                pnfs_put_layout_hdr(lo);
 610        }
 611}
 612EXPORT_SYMBOL_GPL(pnfs_put_lseg);
 613
 614/*
 615 * is l2 fully contained in l1?
 616 *   start1                             end1
 617 *   [----------------------------------)
 618 *           start2           end2
 619 *           [----------------)
 620 */
 621static bool
 622pnfs_lseg_range_contained(const struct pnfs_layout_range *l1,
 623                 const struct pnfs_layout_range *l2)
 624{
 625        u64 start1 = l1->offset;
 626        u64 end1 = pnfs_end_offset(start1, l1->length);
 627        u64 start2 = l2->offset;
 628        u64 end2 = pnfs_end_offset(start2, l2->length);
 629
 630        return (start1 <= start2) && (end1 >= end2);
 631}
 632
 633static bool pnfs_lseg_dec_and_remove_zero(struct pnfs_layout_segment *lseg,
 634                struct list_head *tmp_list)
 635{
 636        if (!refcount_dec_and_test(&lseg->pls_refcount))
 637                return false;
 638        pnfs_layout_remove_lseg(lseg->pls_layout, lseg);
 639        list_add(&lseg->pls_list, tmp_list);
 640        return true;
 641}
 642
 643/* Returns 1 if lseg is removed from list, 0 otherwise */
 644static int mark_lseg_invalid(struct pnfs_layout_segment *lseg,
 645                             struct list_head *tmp_list)
 646{
 647        int rv = 0;
 648
 649        if (test_and_clear_bit(NFS_LSEG_VALID, &lseg->pls_flags)) {
 650                /* Remove the reference keeping the lseg in the
 651                 * list.  It will now be removed when all
 652                 * outstanding io is finished.
 653                 */
 654                dprintk("%s: lseg %p ref %d\n", __func__, lseg,
 655                        refcount_read(&lseg->pls_refcount));
 656                if (pnfs_lseg_dec_and_remove_zero(lseg, tmp_list))
 657                        rv = 1;
 658        }
 659        return rv;
 660}
 661
 662static bool
 663pnfs_should_free_range(const struct pnfs_layout_range *lseg_range,
 664                 const struct pnfs_layout_range *recall_range)
 665{
 666        return (recall_range->iomode == IOMODE_ANY ||
 667                lseg_range->iomode == recall_range->iomode) &&
 668               pnfs_lseg_range_intersecting(lseg_range, recall_range);
 669}
 670
 671static bool
 672pnfs_match_lseg_recall(const struct pnfs_layout_segment *lseg,
 673                const struct pnfs_layout_range *recall_range,
 674                u32 seq)
 675{
 676        if (seq != 0 && pnfs_seqid_is_newer(lseg->pls_seq, seq))
 677                return false;
 678        if (recall_range == NULL)
 679                return true;
 680        return pnfs_should_free_range(&lseg->pls_range, recall_range);
 681}
 682
 683/**
 684 * pnfs_mark_matching_lsegs_invalid - tear down lsegs or mark them for later
 685 * @lo: layout header containing the lsegs
 686 * @tmp_list: list head where doomed lsegs should go
 687 * @recall_range: optional recall range argument to match (may be NULL)
 688 * @seq: only invalidate lsegs obtained prior to this sequence (may be 0)
 689 *
 690 * Walk the list of lsegs in the layout header, and tear down any that should
 691 * be destroyed. If "recall_range" is specified then the segment must match
 692 * that range. If "seq" is non-zero, then only match segments that were handed
 693 * out at or before that sequence.
 694 *
 695 * Returns number of matching invalid lsegs remaining in list after scanning
 696 * it and purging them.
 697 */
 698int
 699pnfs_mark_matching_lsegs_invalid(struct pnfs_layout_hdr *lo,
 700                            struct list_head *tmp_list,
 701                            const struct pnfs_layout_range *recall_range,
 702                            u32 seq)
 703{
 704        struct pnfs_layout_segment *lseg, *next;
 705        int remaining = 0;
 706
 707        dprintk("%s:Begin lo %p\n", __func__, lo);
 708
 709        if (list_empty(&lo->plh_segs))
 710                return 0;
 711        list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list)
 712                if (pnfs_match_lseg_recall(lseg, recall_range, seq)) {
 713                        dprintk("%s: freeing lseg %p iomode %d seq %u "
 714                                "offset %llu length %llu\n", __func__,
 715                                lseg, lseg->pls_range.iomode, lseg->pls_seq,
 716                                lseg->pls_range.offset, lseg->pls_range.length);
 717                        if (!mark_lseg_invalid(lseg, tmp_list))
 718                                remaining++;
 719                }
 720        dprintk("%s:Return %i\n", __func__, remaining);
 721        return remaining;
 722}
 723
 724static void
 725pnfs_free_returned_lsegs(struct pnfs_layout_hdr *lo,
 726                struct list_head *free_me,
 727                const struct pnfs_layout_range *range,
 728                u32 seq)
 729{
 730        struct pnfs_layout_segment *lseg, *next;
 731
 732        list_for_each_entry_safe(lseg, next, &lo->plh_return_segs, pls_list) {
 733                if (pnfs_match_lseg_recall(lseg, range, seq))
 734                        list_move_tail(&lseg->pls_list, free_me);
 735        }
 736}
 737
 738/* note free_me must contain lsegs from a single layout_hdr */
 739void
 740pnfs_free_lseg_list(struct list_head *free_me)
 741{
 742        struct pnfs_layout_segment *lseg, *tmp;
 743
 744        if (list_empty(free_me))
 745                return;
 746
 747        list_for_each_entry_safe(lseg, tmp, free_me, pls_list) {
 748                list_del(&lseg->pls_list);
 749                pnfs_free_lseg(lseg);
 750        }
 751}
 752
 753static struct pnfs_layout_hdr *__pnfs_destroy_layout(struct nfs_inode *nfsi)
 754{
 755        struct pnfs_layout_hdr *lo;
 756        LIST_HEAD(tmp_list);
 757
 758        spin_lock(&nfsi->vfs_inode.i_lock);
 759        lo = nfsi->layout;
 760        if (lo) {
 761                pnfs_get_layout_hdr(lo);
 762                pnfs_mark_layout_stateid_invalid(lo, &tmp_list);
 763                pnfs_layout_clear_fail_bit(lo, NFS_LAYOUT_RO_FAILED);
 764                pnfs_layout_clear_fail_bit(lo, NFS_LAYOUT_RW_FAILED);
 765                spin_unlock(&nfsi->vfs_inode.i_lock);
 766                pnfs_free_lseg_list(&tmp_list);
 767                nfs_commit_inode(&nfsi->vfs_inode, 0);
 768                pnfs_put_layout_hdr(lo);
 769        } else
 770                spin_unlock(&nfsi->vfs_inode.i_lock);
 771        return lo;
 772}
 773
 774void pnfs_destroy_layout(struct nfs_inode *nfsi)
 775{
 776        __pnfs_destroy_layout(nfsi);
 777}
 778EXPORT_SYMBOL_GPL(pnfs_destroy_layout);
 779
 780static bool pnfs_layout_removed(struct nfs_inode *nfsi,
 781                                struct pnfs_layout_hdr *lo)
 782{
 783        bool ret;
 784
 785        spin_lock(&nfsi->vfs_inode.i_lock);
 786        ret = nfsi->layout != lo;
 787        spin_unlock(&nfsi->vfs_inode.i_lock);
 788        return ret;
 789}
 790
 791void pnfs_destroy_layout_final(struct nfs_inode *nfsi)
 792{
 793        struct pnfs_layout_hdr *lo = __pnfs_destroy_layout(nfsi);
 794
 795        if (lo)
 796                wait_var_event(lo, pnfs_layout_removed(nfsi, lo));
 797}
 798
 799static bool
 800pnfs_layout_add_bulk_destroy_list(struct inode *inode,
 801                struct list_head *layout_list)
 802{
 803        struct pnfs_layout_hdr *lo;
 804        bool ret = false;
 805
 806        spin_lock(&inode->i_lock);
 807        lo = NFS_I(inode)->layout;
 808        if (lo != NULL && list_empty(&lo->plh_bulk_destroy)) {
 809                pnfs_get_layout_hdr(lo);
 810                list_add(&lo->plh_bulk_destroy, layout_list);
 811                ret = true;
 812        }
 813        spin_unlock(&inode->i_lock);
 814        return ret;
 815}
 816
 817/* Caller must hold rcu_read_lock and clp->cl_lock */
 818static int
 819pnfs_layout_bulk_destroy_byserver_locked(struct nfs_client *clp,
 820                struct nfs_server *server,
 821                struct list_head *layout_list)
 822        __must_hold(&clp->cl_lock)
 823        __must_hold(RCU)
 824{
 825        struct pnfs_layout_hdr *lo, *next;
 826        struct inode *inode;
 827
 828        list_for_each_entry_safe(lo, next, &server->layouts, plh_layouts) {
 829                if (test_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags) ||
 830                    test_bit(NFS_LAYOUT_INODE_FREEING, &lo->plh_flags) ||
 831                    !list_empty(&lo->plh_bulk_destroy))
 832                        continue;
 833                /* If the sb is being destroyed, just bail */
 834                if (!nfs_sb_active(server->super))
 835                        break;
 836                inode = pnfs_grab_inode_layout_hdr(lo);
 837                if (inode != NULL) {
 838                        if (test_and_clear_bit(NFS_LAYOUT_HASHED, &lo->plh_flags))
 839                                list_del_rcu(&lo->plh_layouts);
 840                        if (pnfs_layout_add_bulk_destroy_list(inode,
 841                                                layout_list))
 842                                continue;
 843                        rcu_read_unlock();
 844                        spin_unlock(&clp->cl_lock);
 845                        iput(inode);
 846                } else {
 847                        rcu_read_unlock();
 848                        spin_unlock(&clp->cl_lock);
 849                }
 850                nfs_sb_deactive(server->super);
 851                spin_lock(&clp->cl_lock);
 852                rcu_read_lock();
 853                return -EAGAIN;
 854        }
 855        return 0;
 856}
 857
 858static int
 859pnfs_layout_free_bulk_destroy_list(struct list_head *layout_list,
 860                bool is_bulk_recall)
 861{
 862        struct pnfs_layout_hdr *lo;
 863        struct inode *inode;
 864        LIST_HEAD(lseg_list);
 865        int ret = 0;
 866
 867        while (!list_empty(layout_list)) {
 868                lo = list_entry(layout_list->next, struct pnfs_layout_hdr,
 869                                plh_bulk_destroy);
 870                dprintk("%s freeing layout for inode %lu\n", __func__,
 871                        lo->plh_inode->i_ino);
 872                inode = lo->plh_inode;
 873
 874                pnfs_layoutcommit_inode(inode, false);
 875
 876                spin_lock(&inode->i_lock);
 877                list_del_init(&lo->plh_bulk_destroy);
 878                if (pnfs_mark_layout_stateid_invalid(lo, &lseg_list)) {
 879                        if (is_bulk_recall)
 880                                set_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags);
 881                        ret = -EAGAIN;
 882                }
 883                spin_unlock(&inode->i_lock);
 884                pnfs_free_lseg_list(&lseg_list);
 885                /* Free all lsegs that are attached to commit buckets */
 886                nfs_commit_inode(inode, 0);
 887                pnfs_put_layout_hdr(lo);
 888                nfs_iput_and_deactive(inode);
 889        }
 890        return ret;
 891}
 892
 893int
 894pnfs_destroy_layouts_byfsid(struct nfs_client *clp,
 895                struct nfs_fsid *fsid,
 896                bool is_recall)
 897{
 898        struct nfs_server *server;
 899        LIST_HEAD(layout_list);
 900
 901        spin_lock(&clp->cl_lock);
 902        rcu_read_lock();
 903restart:
 904        list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
 905                if (memcmp(&server->fsid, fsid, sizeof(*fsid)) != 0)
 906                        continue;
 907                if (pnfs_layout_bulk_destroy_byserver_locked(clp,
 908                                server,
 909                                &layout_list) != 0)
 910                        goto restart;
 911        }
 912        rcu_read_unlock();
 913        spin_unlock(&clp->cl_lock);
 914
 915        if (list_empty(&layout_list))
 916                return 0;
 917        return pnfs_layout_free_bulk_destroy_list(&layout_list, is_recall);
 918}
 919
 920int
 921pnfs_destroy_layouts_byclid(struct nfs_client *clp,
 922                bool is_recall)
 923{
 924        struct nfs_server *server;
 925        LIST_HEAD(layout_list);
 926
 927        spin_lock(&clp->cl_lock);
 928        rcu_read_lock();
 929restart:
 930        list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
 931                if (pnfs_layout_bulk_destroy_byserver_locked(clp,
 932                                        server,
 933                                        &layout_list) != 0)
 934                        goto restart;
 935        }
 936        rcu_read_unlock();
 937        spin_unlock(&clp->cl_lock);
 938
 939        if (list_empty(&layout_list))
 940                return 0;
 941        return pnfs_layout_free_bulk_destroy_list(&layout_list, is_recall);
 942}
 943
 944/*
 945 * Called by the state manager to remove all layouts established under an
 946 * expired lease.
 947 */
 948void
 949pnfs_destroy_all_layouts(struct nfs_client *clp)
 950{
 951        nfs4_deviceid_mark_client_invalid(clp);
 952        nfs4_deviceid_purge_client(clp);
 953
 954        pnfs_destroy_layouts_byclid(clp, false);
 955}
 956
 957static void
 958pnfs_set_layout_cred(struct pnfs_layout_hdr *lo, const struct cred *cred)
 959{
 960        const struct cred *old;
 961
 962        if (cred && cred_fscmp(lo->plh_lc_cred, cred) != 0) {
 963                old = xchg(&lo->plh_lc_cred, get_cred(cred));
 964                put_cred(old);
 965        }
 966}
 967
 968/* update lo->plh_stateid with new if is more recent */
 969void
 970pnfs_set_layout_stateid(struct pnfs_layout_hdr *lo, const nfs4_stateid *new,
 971                        const struct cred *cred, bool update_barrier)
 972{
 973        u32 oldseq = be32_to_cpu(lo->plh_stateid.seqid);
 974        u32 newseq = be32_to_cpu(new->seqid);
 975
 976        if (!pnfs_layout_is_valid(lo)) {
 977                pnfs_set_layout_cred(lo, cred);
 978                nfs4_stateid_copy(&lo->plh_stateid, new);
 979                lo->plh_barrier = newseq;
 980                pnfs_clear_layoutreturn_info(lo);
 981                clear_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags);
 982                return;
 983        }
 984
 985        if (pnfs_seqid_is_newer(newseq, oldseq))
 986                nfs4_stateid_copy(&lo->plh_stateid, new);
 987
 988        if (update_barrier) {
 989                pnfs_barrier_update(lo, newseq);
 990                return;
 991        }
 992        /*
 993         * Because of wraparound, we want to keep the barrier
 994         * "close" to the current seqids. We really only want to
 995         * get here from a layoutget call.
 996         */
 997        if (atomic_read(&lo->plh_outstanding) == 1)
 998                 pnfs_barrier_update(lo, be32_to_cpu(lo->plh_stateid.seqid));
 999}
1000
1001static bool
1002pnfs_layout_stateid_blocked(const struct pnfs_layout_hdr *lo,
1003                const nfs4_stateid *stateid)
1004{
1005        u32 seqid = be32_to_cpu(stateid->seqid);
1006
1007        return lo->plh_barrier && pnfs_seqid_is_newer(lo->plh_barrier, seqid);
1008}
1009
1010/* lget is set to 1 if called from inside send_layoutget call chain */
1011static bool
1012pnfs_layoutgets_blocked(const struct pnfs_layout_hdr *lo)
1013{
1014        return lo->plh_block_lgets ||
1015                test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags);
1016}
1017
1018static struct nfs_server *
1019pnfs_find_server(struct inode *inode, struct nfs_open_context *ctx)
1020{
1021        struct nfs_server *server;
1022
1023        if (inode) {
1024                server = NFS_SERVER(inode);
1025        } else {
1026                struct dentry *parent_dir = dget_parent(ctx->dentry);
1027                server = NFS_SERVER(parent_dir->d_inode);
1028                dput(parent_dir);
1029        }
1030        return server;
1031}
1032
1033static void nfs4_free_pages(struct page **pages, size_t size)
1034{
1035        int i;
1036
1037        if (!pages)
1038                return;
1039
1040        for (i = 0; i < size; i++) {
1041                if (!pages[i])
1042                        break;
1043                __free_page(pages[i]);
1044        }
1045        kfree(pages);
1046}
1047
1048static struct page **nfs4_alloc_pages(size_t size, gfp_t gfp_flags)
1049{
1050        struct page **pages;
1051        int i;
1052
1053        pages = kmalloc_array(size, sizeof(struct page *), gfp_flags);
1054        if (!pages) {
1055                dprintk("%s: can't alloc array of %zu pages\n", __func__, size);
1056                return NULL;
1057        }
1058
1059        for (i = 0; i < size; i++) {
1060                pages[i] = alloc_page(gfp_flags);
1061                if (!pages[i]) {
1062                        dprintk("%s: failed to allocate page\n", __func__);
1063                        nfs4_free_pages(pages, i);
1064                        return NULL;
1065                }
1066        }
1067
1068        return pages;
1069}
1070
1071static struct nfs4_layoutget *
1072pnfs_alloc_init_layoutget_args(struct inode *ino,
1073           struct nfs_open_context *ctx,
1074           const nfs4_stateid *stateid,
1075           const struct pnfs_layout_range *range,
1076           gfp_t gfp_flags)
1077{
1078        struct nfs_server *server = pnfs_find_server(ino, ctx);
1079        size_t max_reply_sz = server->pnfs_curr_ld->max_layoutget_response;
1080        size_t max_pages = max_response_pages(server);
1081        struct nfs4_layoutget *lgp;
1082
1083        dprintk("--> %s\n", __func__);
1084
1085        lgp = kzalloc(sizeof(*lgp), gfp_flags);
1086        if (lgp == NULL)
1087                return NULL;
1088
1089        if (max_reply_sz) {
1090                size_t npages = (max_reply_sz + PAGE_SIZE - 1) >> PAGE_SHIFT;
1091                if (npages < max_pages)
1092                        max_pages = npages;
1093        }
1094
1095        lgp->args.layout.pages = nfs4_alloc_pages(max_pages, gfp_flags);
1096        if (!lgp->args.layout.pages) {
1097                kfree(lgp);
1098                return NULL;
1099        }
1100        lgp->args.layout.pglen = max_pages * PAGE_SIZE;
1101        lgp->res.layoutp = &lgp->args.layout;
1102
1103        /* Don't confuse uninitialised result and success */
1104        lgp->res.status = -NFS4ERR_DELAY;
1105
1106        lgp->args.minlength = PAGE_SIZE;
1107        if (lgp->args.minlength > range->length)
1108                lgp->args.minlength = range->length;
1109        if (ino) {
1110                loff_t i_size = i_size_read(ino);
1111
1112                if (range->iomode == IOMODE_READ) {
1113                        if (range->offset >= i_size)
1114                                lgp->args.minlength = 0;
1115                        else if (i_size - range->offset < lgp->args.minlength)
1116                                lgp->args.minlength = i_size - range->offset;
1117                }
1118        }
1119        lgp->args.maxcount = PNFS_LAYOUT_MAXSIZE;
1120        pnfs_copy_range(&lgp->args.range, range);
1121        lgp->args.type = server->pnfs_curr_ld->id;
1122        lgp->args.inode = ino;
1123        lgp->args.ctx = get_nfs_open_context(ctx);
1124        nfs4_stateid_copy(&lgp->args.stateid, stateid);
1125        lgp->gfp_flags = gfp_flags;
1126        lgp->cred = ctx->cred;
1127        return lgp;
1128}
1129
1130void pnfs_layoutget_free(struct nfs4_layoutget *lgp)
1131{
1132        size_t max_pages = lgp->args.layout.pglen / PAGE_SIZE;
1133
1134        nfs4_free_pages(lgp->args.layout.pages, max_pages);
1135        pnfs_put_layout_hdr(lgp->lo);
1136        put_nfs_open_context(lgp->args.ctx);
1137        kfree(lgp);
1138}
1139
1140static void pnfs_clear_layoutcommit(struct inode *inode,
1141                struct list_head *head)
1142{
1143        struct nfs_inode *nfsi = NFS_I(inode);
1144        struct pnfs_layout_segment *lseg, *tmp;
1145
1146        if (!test_and_clear_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags))
1147                return;
1148        list_for_each_entry_safe(lseg, tmp, &nfsi->layout->plh_segs, pls_list) {
1149                if (!test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT, &lseg->pls_flags))
1150                        continue;
1151                pnfs_lseg_dec_and_remove_zero(lseg, head);
1152        }
1153}
1154
1155void pnfs_layoutreturn_free_lsegs(struct pnfs_layout_hdr *lo,
1156                const nfs4_stateid *arg_stateid,
1157                const struct pnfs_layout_range *range,
1158                const nfs4_stateid *stateid)
1159{
1160        struct inode *inode = lo->plh_inode;
1161        LIST_HEAD(freeme);
1162
1163        spin_lock(&inode->i_lock);
1164        if (!pnfs_layout_is_valid(lo) ||
1165            !nfs4_stateid_match_other(&lo->plh_stateid, arg_stateid))
1166                goto out_unlock;
1167        if (stateid) {
1168                u32 seq = be32_to_cpu(arg_stateid->seqid);
1169
1170                pnfs_mark_matching_lsegs_invalid(lo, &freeme, range, seq);
1171                pnfs_free_returned_lsegs(lo, &freeme, range, seq);
1172                pnfs_set_layout_stateid(lo, stateid, NULL, true);
1173        } else
1174                pnfs_mark_layout_stateid_invalid(lo, &freeme);
1175out_unlock:
1176        pnfs_clear_layoutreturn_waitbit(lo);
1177        spin_unlock(&inode->i_lock);
1178        pnfs_free_lseg_list(&freeme);
1179
1180}
1181
1182static bool
1183pnfs_prepare_layoutreturn(struct pnfs_layout_hdr *lo,
1184                nfs4_stateid *stateid,
1185                const struct cred **cred,
1186                enum pnfs_iomode *iomode)
1187{
1188        /* Serialise LAYOUTGET/LAYOUTRETURN */
1189        if (atomic_read(&lo->plh_outstanding) != 0)
1190                return false;
1191        if (test_and_set_bit(NFS_LAYOUT_RETURN_LOCK, &lo->plh_flags))
1192                return false;
1193        set_bit(NFS_LAYOUT_RETURN, &lo->plh_flags);
1194        pnfs_get_layout_hdr(lo);
1195        nfs4_stateid_copy(stateid, &lo->plh_stateid);
1196        *cred = get_cred(lo->plh_lc_cred);
1197        if (test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags)) {
1198                if (lo->plh_return_seq != 0)
1199                        stateid->seqid = cpu_to_be32(lo->plh_return_seq);
1200                if (iomode != NULL)
1201                        *iomode = lo->plh_return_iomode;
1202                pnfs_clear_layoutreturn_info(lo);
1203        } else if (iomode != NULL)
1204                *iomode = IOMODE_ANY;
1205        pnfs_barrier_update(lo, be32_to_cpu(stateid->seqid));
1206        return true;
1207}
1208
1209static void
1210pnfs_init_layoutreturn_args(struct nfs4_layoutreturn_args *args,
1211                struct pnfs_layout_hdr *lo,
1212                const nfs4_stateid *stateid,
1213                enum pnfs_iomode iomode)
1214{
1215        struct inode *inode = lo->plh_inode;
1216
1217        args->layout_type = NFS_SERVER(inode)->pnfs_curr_ld->id;
1218        args->inode = inode;
1219        args->range.iomode = iomode;
1220        args->range.offset = 0;
1221        args->range.length = NFS4_MAX_UINT64;
1222        args->layout = lo;
1223        nfs4_stateid_copy(&args->stateid, stateid);
1224}
1225
1226static int
1227pnfs_send_layoutreturn(struct pnfs_layout_hdr *lo,
1228                       const nfs4_stateid *stateid,
1229                       const struct cred **pcred,
1230                       enum pnfs_iomode iomode,
1231                       bool sync)
1232{
1233        struct inode *ino = lo->plh_inode;
1234        struct pnfs_layoutdriver_type *ld = NFS_SERVER(ino)->pnfs_curr_ld;
1235        struct nfs4_layoutreturn *lrp;
1236        const struct cred *cred = *pcred;
1237        int status = 0;
1238
1239        *pcred = NULL;
1240        lrp = kzalloc(sizeof(*lrp), GFP_NOFS);
1241        if (unlikely(lrp == NULL)) {
1242                status = -ENOMEM;
1243                spin_lock(&ino->i_lock);
1244                pnfs_clear_layoutreturn_waitbit(lo);
1245                spin_unlock(&ino->i_lock);
1246                put_cred(cred);
1247                pnfs_put_layout_hdr(lo);
1248                goto out;
1249        }
1250
1251        pnfs_init_layoutreturn_args(&lrp->args, lo, stateid, iomode);
1252        lrp->args.ld_private = &lrp->ld_private;
1253        lrp->clp = NFS_SERVER(ino)->nfs_client;
1254        lrp->cred = cred;
1255        if (ld->prepare_layoutreturn)
1256                ld->prepare_layoutreturn(&lrp->args);
1257
1258        status = nfs4_proc_layoutreturn(lrp, sync);
1259out:
1260        dprintk("<-- %s status: %d\n", __func__, status);
1261        return status;
1262}
1263
1264static bool
1265pnfs_layout_segments_returnable(struct pnfs_layout_hdr *lo,
1266                                enum pnfs_iomode iomode,
1267                                u32 seq)
1268{
1269        struct pnfs_layout_range recall_range = {
1270                .length = NFS4_MAX_UINT64,
1271                .iomode = iomode,
1272        };
1273        return pnfs_mark_matching_lsegs_return(lo, &lo->plh_return_segs,
1274                                               &recall_range, seq) != -EBUSY;
1275}
1276
1277/* Return true if layoutreturn is needed */
1278static bool
1279pnfs_layout_need_return(struct pnfs_layout_hdr *lo)
1280{
1281        if (!test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags))
1282                return false;
1283        return pnfs_layout_segments_returnable(lo, lo->plh_return_iomode,
1284                                               lo->plh_return_seq);
1285}
1286
1287static void pnfs_layoutreturn_before_put_layout_hdr(struct pnfs_layout_hdr *lo)
1288{
1289        struct inode *inode= lo->plh_inode;
1290
1291        if (!test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags))
1292                return;
1293        spin_lock(&inode->i_lock);
1294        if (pnfs_layout_need_return(lo)) {
1295                const struct cred *cred;
1296                nfs4_stateid stateid;
1297                enum pnfs_iomode iomode;
1298                bool send;
1299
1300                send = pnfs_prepare_layoutreturn(lo, &stateid, &cred, &iomode);
1301                spin_unlock(&inode->i_lock);
1302                if (send) {
1303                        /* Send an async layoutreturn so we dont deadlock */
1304                        pnfs_send_layoutreturn(lo, &stateid, &cred, iomode, false);
1305                }
1306        } else
1307                spin_unlock(&inode->i_lock);
1308}
1309
1310/*
1311 * Initiates a LAYOUTRETURN(FILE), and removes the pnfs_layout_hdr
1312 * when the layout segment list is empty.
1313 *
1314 * Note that a pnfs_layout_hdr can exist with an empty layout segment
1315 * list when LAYOUTGET has failed, or when LAYOUTGET succeeded, but the
1316 * deviceid is marked invalid.
1317 */
1318int
1319_pnfs_return_layout(struct inode *ino)
1320{
1321        struct pnfs_layout_hdr *lo = NULL;
1322        struct nfs_inode *nfsi = NFS_I(ino);
1323        struct pnfs_layout_range range = {
1324                .iomode         = IOMODE_ANY,
1325                .offset         = 0,
1326                .length         = NFS4_MAX_UINT64,
1327        };
1328        LIST_HEAD(tmp_list);
1329        const struct cred *cred;
1330        nfs4_stateid stateid;
1331        int status = 0;
1332        bool send, valid_layout;
1333
1334        dprintk("NFS: %s for inode %lu\n", __func__, ino->i_ino);
1335
1336        spin_lock(&ino->i_lock);
1337        lo = nfsi->layout;
1338        if (!lo) {
1339                spin_unlock(&ino->i_lock);
1340                dprintk("NFS: %s no layout to return\n", __func__);
1341                goto out;
1342        }
1343        /* Reference matched in nfs4_layoutreturn_release */
1344        pnfs_get_layout_hdr(lo);
1345        /* Is there an outstanding layoutreturn ? */
1346        if (test_bit(NFS_LAYOUT_RETURN_LOCK, &lo->plh_flags)) {
1347                spin_unlock(&ino->i_lock);
1348                if (wait_on_bit(&lo->plh_flags, NFS_LAYOUT_RETURN,
1349                                        TASK_UNINTERRUPTIBLE))
1350                        goto out_put_layout_hdr;
1351                spin_lock(&ino->i_lock);
1352        }
1353        valid_layout = pnfs_layout_is_valid(lo);
1354        pnfs_clear_layoutcommit(ino, &tmp_list);
1355        pnfs_mark_matching_lsegs_return(lo, &tmp_list, &range, 0);
1356
1357        if (NFS_SERVER(ino)->pnfs_curr_ld->return_range)
1358                NFS_SERVER(ino)->pnfs_curr_ld->return_range(lo, &range);
1359
1360        /* Don't send a LAYOUTRETURN if list was initially empty */
1361        if (!test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags) ||
1362                        !valid_layout) {
1363                spin_unlock(&ino->i_lock);
1364                dprintk("NFS: %s no layout segments to return\n", __func__);
1365                goto out_wait_layoutreturn;
1366        }
1367
1368        send = pnfs_prepare_layoutreturn(lo, &stateid, &cred, NULL);
1369        spin_unlock(&ino->i_lock);
1370        if (send)
1371                status = pnfs_send_layoutreturn(lo, &stateid, &cred, IOMODE_ANY, true);
1372out_wait_layoutreturn:
1373        wait_on_bit(&lo->plh_flags, NFS_LAYOUT_RETURN, TASK_UNINTERRUPTIBLE);
1374out_put_layout_hdr:
1375        pnfs_free_lseg_list(&tmp_list);
1376        pnfs_put_layout_hdr(lo);
1377out:
1378        dprintk("<-- %s status: %d\n", __func__, status);
1379        return status;
1380}
1381
1382int
1383pnfs_commit_and_return_layout(struct inode *inode)
1384{
1385        struct pnfs_layout_hdr *lo;
1386        int ret;
1387
1388        spin_lock(&inode->i_lock);
1389        lo = NFS_I(inode)->layout;
1390        if (lo == NULL) {
1391                spin_unlock(&inode->i_lock);
1392                return 0;
1393        }
1394        pnfs_get_layout_hdr(lo);
1395        /* Block new layoutgets and read/write to ds */
1396        lo->plh_block_lgets++;
1397        spin_unlock(&inode->i_lock);
1398        filemap_fdatawait(inode->i_mapping);
1399        ret = pnfs_layoutcommit_inode(inode, true);
1400        if (ret == 0)
1401                ret = _pnfs_return_layout(inode);
1402        spin_lock(&inode->i_lock);
1403        lo->plh_block_lgets--;
1404        spin_unlock(&inode->i_lock);
1405        pnfs_put_layout_hdr(lo);
1406        return ret;
1407}
1408
1409bool pnfs_roc(struct inode *ino,
1410                struct nfs4_layoutreturn_args *args,
1411                struct nfs4_layoutreturn_res *res,
1412                const struct cred *cred)
1413{
1414        struct nfs_inode *nfsi = NFS_I(ino);
1415        struct nfs_open_context *ctx;
1416        struct nfs4_state *state;
1417        struct pnfs_layout_hdr *lo;
1418        struct pnfs_layout_segment *lseg, *next;
1419        const struct cred *lc_cred;
1420        nfs4_stateid stateid;
1421        enum pnfs_iomode iomode = 0;
1422        bool layoutreturn = false, roc = false;
1423        bool skip_read = false;
1424
1425        if (!nfs_have_layout(ino))
1426                return false;
1427retry:
1428        rcu_read_lock();
1429        spin_lock(&ino->i_lock);
1430        lo = nfsi->layout;
1431        if (!lo || !pnfs_layout_is_valid(lo) ||
1432            test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) {
1433                lo = NULL;
1434                goto out_noroc;
1435        }
1436        pnfs_get_layout_hdr(lo);
1437        if (test_bit(NFS_LAYOUT_RETURN_LOCK, &lo->plh_flags)) {
1438                spin_unlock(&ino->i_lock);
1439                rcu_read_unlock();
1440                wait_on_bit(&lo->plh_flags, NFS_LAYOUT_RETURN,
1441                                TASK_UNINTERRUPTIBLE);
1442                pnfs_put_layout_hdr(lo);
1443                goto retry;
1444        }
1445
1446        /* no roc if we hold a delegation */
1447        if (nfs4_check_delegation(ino, FMODE_READ)) {
1448                if (nfs4_check_delegation(ino, FMODE_WRITE))
1449                        goto out_noroc;
1450                skip_read = true;
1451        }
1452
1453        list_for_each_entry_rcu(ctx, &nfsi->open_files, list) {
1454                state = ctx->state;
1455                if (state == NULL)
1456                        continue;
1457                /* Don't return layout if there is open file state */
1458                if (state->state & FMODE_WRITE)
1459                        goto out_noroc;
1460                if (state->state & FMODE_READ)
1461                        skip_read = true;
1462        }
1463
1464
1465        list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list) {
1466                if (skip_read && lseg->pls_range.iomode == IOMODE_READ)
1467                        continue;
1468                /* If we are sending layoutreturn, invalidate all valid lsegs */
1469                if (!test_and_clear_bit(NFS_LSEG_ROC, &lseg->pls_flags))
1470                        continue;
1471                /*
1472                 * Note: mark lseg for return so pnfs_layout_remove_lseg
1473                 * doesn't invalidate the layout for us.
1474                 */
1475                set_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags);
1476                if (!mark_lseg_invalid(lseg, &lo->plh_return_segs))
1477                        continue;
1478                pnfs_set_plh_return_info(lo, lseg->pls_range.iomode, 0);
1479        }
1480
1481        if (!test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags))
1482                goto out_noroc;
1483
1484        /* ROC in two conditions:
1485         * 1. there are ROC lsegs
1486         * 2. we don't send layoutreturn
1487         */
1488        /* lo ref dropped in pnfs_roc_release() */
1489        layoutreturn = pnfs_prepare_layoutreturn(lo, &stateid, &lc_cred, &iomode);
1490        /* If the creds don't match, we can't compound the layoutreturn */
1491        if (!layoutreturn || cred_fscmp(cred, lc_cred) != 0)
1492                goto out_noroc;
1493
1494        roc = layoutreturn;
1495        pnfs_init_layoutreturn_args(args, lo, &stateid, iomode);
1496        res->lrs_present = 0;
1497        layoutreturn = false;
1498        put_cred(lc_cred);
1499
1500out_noroc:
1501        spin_unlock(&ino->i_lock);
1502        rcu_read_unlock();
1503        pnfs_layoutcommit_inode(ino, true);
1504        if (roc) {
1505                struct pnfs_layoutdriver_type *ld = NFS_SERVER(ino)->pnfs_curr_ld;
1506                if (ld->prepare_layoutreturn)
1507                        ld->prepare_layoutreturn(args);
1508                pnfs_put_layout_hdr(lo);
1509                return true;
1510        }
1511        if (layoutreturn)
1512                pnfs_send_layoutreturn(lo, &stateid, &lc_cred, iomode, true);
1513        pnfs_put_layout_hdr(lo);
1514        return false;
1515}
1516
1517int pnfs_roc_done(struct rpc_task *task, struct nfs4_layoutreturn_args **argpp,
1518                  struct nfs4_layoutreturn_res **respp, int *ret)
1519{
1520        struct nfs4_layoutreturn_args *arg = *argpp;
1521        int retval = -EAGAIN;
1522
1523        if (!arg)
1524                return 0;
1525        /* Handle Layoutreturn errors */
1526        switch (*ret) {
1527        case 0:
1528                retval = 0;
1529                break;
1530        case -NFS4ERR_NOMATCHING_LAYOUT:
1531                /* Was there an RPC level error? If not, retry */
1532                if (task->tk_rpc_status == 0)
1533                        break;
1534                /* If the call was not sent, let caller handle it */
1535                if (!RPC_WAS_SENT(task))
1536                        return 0;
1537                /*
1538                 * Otherwise, assume the call succeeded and
1539                 * that we need to release the layout
1540                 */
1541                *ret = 0;
1542                (*respp)->lrs_present = 0;
1543                retval = 0;
1544                break;
1545        case -NFS4ERR_DELAY:
1546                /* Let the caller handle the retry */
1547                *ret = -NFS4ERR_NOMATCHING_LAYOUT;
1548                return 0;
1549        case -NFS4ERR_OLD_STATEID:
1550                if (!nfs4_layout_refresh_old_stateid(&arg->stateid,
1551                                                     &arg->range, arg->inode))
1552                        break;
1553                *ret = -NFS4ERR_NOMATCHING_LAYOUT;
1554                return -EAGAIN;
1555        }
1556        *argpp = NULL;
1557        *respp = NULL;
1558        return retval;
1559}
1560
1561void pnfs_roc_release(struct nfs4_layoutreturn_args *args,
1562                struct nfs4_layoutreturn_res *res,
1563                int ret)
1564{
1565        struct pnfs_layout_hdr *lo = args->layout;
1566        struct inode *inode = args->inode;
1567        const nfs4_stateid *res_stateid = NULL;
1568        struct nfs4_xdr_opaque_data *ld_private = args->ld_private;
1569
1570        switch (ret) {
1571        case -NFS4ERR_NOMATCHING_LAYOUT:
1572                spin_lock(&inode->i_lock);
1573                if (pnfs_layout_is_valid(lo) &&
1574                    nfs4_stateid_match_other(&args->stateid, &lo->plh_stateid))
1575                        pnfs_set_plh_return_info(lo, args->range.iomode, 0);
1576                pnfs_clear_layoutreturn_waitbit(lo);
1577                spin_unlock(&inode->i_lock);
1578                break;
1579        case 0:
1580                if (res->lrs_present)
1581                        res_stateid = &res->stateid;
1582                /* Fallthrough */
1583        default:
1584                pnfs_layoutreturn_free_lsegs(lo, &args->stateid, &args->range,
1585                                             res_stateid);
1586        }
1587        trace_nfs4_layoutreturn_on_close(args->inode, &args->stateid, ret);
1588        if (ld_private && ld_private->ops && ld_private->ops->free)
1589                ld_private->ops->free(ld_private);
1590        pnfs_put_layout_hdr(lo);
1591}
1592
1593bool pnfs_wait_on_layoutreturn(struct inode *ino, struct rpc_task *task)
1594{
1595        struct nfs_inode *nfsi = NFS_I(ino);
1596        struct pnfs_layout_hdr *lo;
1597        bool sleep = false;
1598
1599        /* we might not have grabbed lo reference. so need to check under
1600         * i_lock */
1601        spin_lock(&ino->i_lock);
1602        lo = nfsi->layout;
1603        if (lo && test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags)) {
1604                rpc_sleep_on(&NFS_SERVER(ino)->roc_rpcwaitq, task, NULL);
1605                sleep = true;
1606        }
1607        spin_unlock(&ino->i_lock);
1608        return sleep;
1609}
1610
1611/*
1612 * Compare two layout segments for sorting into layout cache.
1613 * We want to preferentially return RW over RO layouts, so ensure those
1614 * are seen first.
1615 */
1616static s64
1617pnfs_lseg_range_cmp(const struct pnfs_layout_range *l1,
1618           const struct pnfs_layout_range *l2)
1619{
1620        s64 d;
1621
1622        /* high offset > low offset */
1623        d = l1->offset - l2->offset;
1624        if (d)
1625                return d;
1626
1627        /* short length > long length */
1628        d = l2->length - l1->length;
1629        if (d)
1630                return d;
1631
1632        /* read > read/write */
1633        return (int)(l1->iomode == IOMODE_READ) - (int)(l2->iomode == IOMODE_READ);
1634}
1635
1636static bool
1637pnfs_lseg_range_is_after(const struct pnfs_layout_range *l1,
1638                const struct pnfs_layout_range *l2)
1639{
1640        return pnfs_lseg_range_cmp(l1, l2) > 0;
1641}
1642
1643static bool
1644pnfs_lseg_no_merge(struct pnfs_layout_segment *lseg,
1645                struct pnfs_layout_segment *old)
1646{
1647        return false;
1648}
1649
1650void
1651pnfs_generic_layout_insert_lseg(struct pnfs_layout_hdr *lo,
1652                   struct pnfs_layout_segment *lseg,
1653                   bool (*is_after)(const struct pnfs_layout_range *,
1654                           const struct pnfs_layout_range *),
1655                   bool (*do_merge)(struct pnfs_layout_segment *,
1656                           struct pnfs_layout_segment *),
1657                   struct list_head *free_me)
1658{
1659        struct pnfs_layout_segment *lp, *tmp;
1660
1661        dprintk("%s:Begin\n", __func__);
1662
1663        list_for_each_entry_safe(lp, tmp, &lo->plh_segs, pls_list) {
1664                if (test_bit(NFS_LSEG_VALID, &lp->pls_flags) == 0)
1665                        continue;
1666                if (do_merge(lseg, lp)) {
1667                        mark_lseg_invalid(lp, free_me);
1668                        continue;
1669                }
1670                if (is_after(&lseg->pls_range, &lp->pls_range))
1671                        continue;
1672                list_add_tail(&lseg->pls_list, &lp->pls_list);
1673                dprintk("%s: inserted lseg %p "
1674                        "iomode %d offset %llu length %llu before "
1675                        "lp %p iomode %d offset %llu length %llu\n",
1676                        __func__, lseg, lseg->pls_range.iomode,
1677                        lseg->pls_range.offset, lseg->pls_range.length,
1678                        lp, lp->pls_range.iomode, lp->pls_range.offset,
1679                        lp->pls_range.length);
1680                goto out;
1681        }
1682        list_add_tail(&lseg->pls_list, &lo->plh_segs);
1683        dprintk("%s: inserted lseg %p "
1684                "iomode %d offset %llu length %llu at tail\n",
1685                __func__, lseg, lseg->pls_range.iomode,
1686                lseg->pls_range.offset, lseg->pls_range.length);
1687out:
1688        pnfs_get_layout_hdr(lo);
1689
1690        dprintk("%s:Return\n", __func__);
1691}
1692EXPORT_SYMBOL_GPL(pnfs_generic_layout_insert_lseg);
1693
1694static void
1695pnfs_layout_insert_lseg(struct pnfs_layout_hdr *lo,
1696                   struct pnfs_layout_segment *lseg,
1697                   struct list_head *free_me)
1698{
1699        struct inode *inode = lo->plh_inode;
1700        struct pnfs_layoutdriver_type *ld = NFS_SERVER(inode)->pnfs_curr_ld;
1701
1702        if (ld->add_lseg != NULL)
1703                ld->add_lseg(lo, lseg, free_me);
1704        else
1705                pnfs_generic_layout_insert_lseg(lo, lseg,
1706                                pnfs_lseg_range_is_after,
1707                                pnfs_lseg_no_merge,
1708                                free_me);
1709}
1710
1711static struct pnfs_layout_hdr *
1712alloc_init_layout_hdr(struct inode *ino,
1713                      struct nfs_open_context *ctx,
1714                      gfp_t gfp_flags)
1715{
1716        struct pnfs_layout_hdr *lo;
1717
1718        lo = pnfs_alloc_layout_hdr(ino, gfp_flags);
1719        if (!lo)
1720                return NULL;
1721        refcount_set(&lo->plh_refcount, 1);
1722        INIT_LIST_HEAD(&lo->plh_layouts);
1723        INIT_LIST_HEAD(&lo->plh_segs);
1724        INIT_LIST_HEAD(&lo->plh_return_segs);
1725        INIT_LIST_HEAD(&lo->plh_bulk_destroy);
1726        lo->plh_inode = ino;
1727        lo->plh_lc_cred = get_cred(ctx->cred);
1728        lo->plh_flags |= 1 << NFS_LAYOUT_INVALID_STID;
1729        return lo;
1730}
1731
1732static struct pnfs_layout_hdr *
1733pnfs_find_alloc_layout(struct inode *ino,
1734                       struct nfs_open_context *ctx,
1735                       gfp_t gfp_flags)
1736        __releases(&ino->i_lock)
1737        __acquires(&ino->i_lock)
1738{
1739        struct nfs_inode *nfsi = NFS_I(ino);
1740        struct pnfs_layout_hdr *new = NULL;
1741
1742        dprintk("%s Begin ino=%p layout=%p\n", __func__, ino, nfsi->layout);
1743
1744        if (nfsi->layout != NULL)
1745                goto out_existing;
1746        spin_unlock(&ino->i_lock);
1747        new = alloc_init_layout_hdr(ino, ctx, gfp_flags);
1748        spin_lock(&ino->i_lock);
1749
1750        if (likely(nfsi->layout == NULL)) {     /* Won the race? */
1751                nfsi->layout = new;
1752                return new;
1753        } else if (new != NULL)
1754                pnfs_free_layout_hdr(new);
1755out_existing:
1756        pnfs_get_layout_hdr(nfsi->layout);
1757        return nfsi->layout;
1758}
1759
1760/*
1761 * iomode matching rules:
1762 * iomode       lseg    strict match
1763 *                      iomode
1764 * -----        -----   ------ -----
1765 * ANY          READ    N/A    true
1766 * ANY          RW      N/A    true
1767 * RW           READ    N/A    false
1768 * RW           RW      N/A    true
1769 * READ         READ    N/A    true
1770 * READ         RW      true   false
1771 * READ         RW      false  true
1772 */
1773static bool
1774pnfs_lseg_range_match(const struct pnfs_layout_range *ls_range,
1775                 const struct pnfs_layout_range *range,
1776                 bool strict_iomode)
1777{
1778        struct pnfs_layout_range range1;
1779
1780        if ((range->iomode == IOMODE_RW &&
1781             ls_range->iomode != IOMODE_RW) ||
1782            (range->iomode != ls_range->iomode &&
1783             strict_iomode) ||
1784            !pnfs_lseg_range_intersecting(ls_range, range))
1785                return false;
1786
1787        /* range1 covers only the first byte in the range */
1788        range1 = *range;
1789        range1.length = 1;
1790        return pnfs_lseg_range_contained(ls_range, &range1);
1791}
1792
1793/*
1794 * lookup range in layout
1795 */
1796static struct pnfs_layout_segment *
1797pnfs_find_lseg(struct pnfs_layout_hdr *lo,
1798                struct pnfs_layout_range *range,
1799                bool strict_iomode)
1800{
1801        struct pnfs_layout_segment *lseg, *ret = NULL;
1802
1803        dprintk("%s:Begin\n", __func__);
1804
1805        list_for_each_entry(lseg, &lo->plh_segs, pls_list) {
1806                if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags) &&
1807                    pnfs_lseg_range_match(&lseg->pls_range, range,
1808                                          strict_iomode)) {
1809                        ret = pnfs_get_lseg(lseg);
1810                        break;
1811                }
1812        }
1813
1814        dprintk("%s:Return lseg %p ref %d\n",
1815                __func__, ret, ret ? refcount_read(&ret->pls_refcount) : 0);
1816        return ret;
1817}
1818
1819/*
1820 * Use mdsthreshold hints set at each OPEN to determine if I/O should go
1821 * to the MDS or over pNFS
1822 *
1823 * The nfs_inode read_io and write_io fields are cumulative counters reset
1824 * when there are no layout segments. Note that in pnfs_update_layout iomode
1825 * is set to IOMODE_READ for a READ request, and set to IOMODE_RW for a
1826 * WRITE request.
1827 *
1828 * A return of true means use MDS I/O.
1829 *
1830 * From rfc 5661:
1831 * If a file's size is smaller than the file size threshold, data accesses
1832 * SHOULD be sent to the metadata server.  If an I/O request has a length that
1833 * is below the I/O size threshold, the I/O SHOULD be sent to the metadata
1834 * server.  If both file size and I/O size are provided, the client SHOULD
1835 * reach or exceed  both thresholds before sending its read or write
1836 * requests to the data server.
1837 */
1838static bool pnfs_within_mdsthreshold(struct nfs_open_context *ctx,
1839                                     struct inode *ino, int iomode)
1840{
1841        struct nfs4_threshold *t = ctx->mdsthreshold;
1842        struct nfs_inode *nfsi = NFS_I(ino);
1843        loff_t fsize = i_size_read(ino);
1844        bool size = false, size_set = false, io = false, io_set = false, ret = false;
1845
1846        if (t == NULL)
1847                return ret;
1848
1849        dprintk("%s bm=0x%x rd_sz=%llu wr_sz=%llu rd_io=%llu wr_io=%llu\n",
1850                __func__, t->bm, t->rd_sz, t->wr_sz, t->rd_io_sz, t->wr_io_sz);
1851
1852        switch (iomode) {
1853        case IOMODE_READ:
1854                if (t->bm & THRESHOLD_RD) {
1855                        dprintk("%s fsize %llu\n", __func__, fsize);
1856                        size_set = true;
1857                        if (fsize < t->rd_sz)
1858                                size = true;
1859                }
1860                if (t->bm & THRESHOLD_RD_IO) {
1861                        dprintk("%s nfsi->read_io %llu\n", __func__,
1862                                nfsi->read_io);
1863                        io_set = true;
1864                        if (nfsi->read_io < t->rd_io_sz)
1865                                io = true;
1866                }
1867                break;
1868        case IOMODE_RW:
1869                if (t->bm & THRESHOLD_WR) {
1870                        dprintk("%s fsize %llu\n", __func__, fsize);
1871                        size_set = true;
1872                        if (fsize < t->wr_sz)
1873                                size = true;
1874                }
1875                if (t->bm & THRESHOLD_WR_IO) {
1876                        dprintk("%s nfsi->write_io %llu\n", __func__,
1877                                nfsi->write_io);
1878                        io_set = true;
1879                        if (nfsi->write_io < t->wr_io_sz)
1880                                io = true;
1881                }
1882                break;
1883        }
1884        if (size_set && io_set) {
1885                if (size && io)
1886                        ret = true;
1887        } else if (size || io)
1888                ret = true;
1889
1890        dprintk("<-- %s size %d io %d ret %d\n", __func__, size, io, ret);
1891        return ret;
1892}
1893
1894static int pnfs_prepare_to_retry_layoutget(struct pnfs_layout_hdr *lo)
1895{
1896        /*
1897         * send layoutcommit as it can hold up layoutreturn due to lseg
1898         * reference
1899         */
1900        pnfs_layoutcommit_inode(lo->plh_inode, false);
1901        return wait_on_bit_action(&lo->plh_flags, NFS_LAYOUT_RETURN,
1902                                   nfs_wait_bit_killable,
1903                                   TASK_KILLABLE);
1904}
1905
1906static void nfs_layoutget_begin(struct pnfs_layout_hdr *lo)
1907{
1908        atomic_inc(&lo->plh_outstanding);
1909}
1910
1911static void nfs_layoutget_end(struct pnfs_layout_hdr *lo)
1912{
1913        if (atomic_dec_and_test(&lo->plh_outstanding))
1914                wake_up_var(&lo->plh_outstanding);
1915}
1916
1917static bool pnfs_is_first_layoutget(struct pnfs_layout_hdr *lo)
1918{
1919        return test_bit(NFS_LAYOUT_FIRST_LAYOUTGET, &lo->plh_flags);
1920}
1921
1922static void pnfs_clear_first_layoutget(struct pnfs_layout_hdr *lo)
1923{
1924        unsigned long *bitlock = &lo->plh_flags;
1925
1926        clear_bit_unlock(NFS_LAYOUT_FIRST_LAYOUTGET, bitlock);
1927        smp_mb__after_atomic();
1928        wake_up_bit(bitlock, NFS_LAYOUT_FIRST_LAYOUTGET);
1929}
1930
1931static void _add_to_server_list(struct pnfs_layout_hdr *lo,
1932                                struct nfs_server *server)
1933{
1934        if (!test_and_set_bit(NFS_LAYOUT_HASHED, &lo->plh_flags)) {
1935                struct nfs_client *clp = server->nfs_client;
1936
1937                /* The lo must be on the clp list if there is any
1938                 * chance of a CB_LAYOUTRECALL(FILE) coming in.
1939                 */
1940                spin_lock(&clp->cl_lock);
1941                list_add_tail_rcu(&lo->plh_layouts, &server->layouts);
1942                spin_unlock(&clp->cl_lock);
1943        }
1944}
1945
1946/*
1947 * Layout segment is retreived from the server if not cached.
1948 * The appropriate layout segment is referenced and returned to the caller.
1949 */
1950struct pnfs_layout_segment *
1951pnfs_update_layout(struct inode *ino,
1952                   struct nfs_open_context *ctx,
1953                   loff_t pos,
1954                   u64 count,
1955                   enum pnfs_iomode iomode,
1956                   bool strict_iomode,
1957                   gfp_t gfp_flags)
1958{
1959        struct pnfs_layout_range arg = {
1960                .iomode = iomode,
1961                .offset = pos,
1962                .length = count,
1963        };
1964        unsigned pg_offset;
1965        struct nfs_server *server = NFS_SERVER(ino);
1966        struct nfs_client *clp = server->nfs_client;
1967        struct pnfs_layout_hdr *lo = NULL;
1968        struct pnfs_layout_segment *lseg = NULL;
1969        struct nfs4_layoutget *lgp;
1970        nfs4_stateid stateid;
1971        long timeout = 0;
1972        unsigned long giveup = jiffies + (clp->cl_lease_time << 1);
1973        bool first;
1974
1975        if (!pnfs_enabled_sb(NFS_SERVER(ino))) {
1976                trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
1977                                 PNFS_UPDATE_LAYOUT_NO_PNFS);
1978                goto out;
1979        }
1980
1981        if (pnfs_within_mdsthreshold(ctx, ino, iomode)) {
1982                trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
1983                                 PNFS_UPDATE_LAYOUT_MDSTHRESH);
1984                goto out;
1985        }
1986
1987lookup_again:
1988        lseg = ERR_PTR(nfs4_client_recover_expired_lease(clp));
1989        if (IS_ERR(lseg))
1990                goto out;
1991        first = false;
1992        spin_lock(&ino->i_lock);
1993        lo = pnfs_find_alloc_layout(ino, ctx, gfp_flags);
1994        if (lo == NULL) {
1995                spin_unlock(&ino->i_lock);
1996                trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
1997                                 PNFS_UPDATE_LAYOUT_NOMEM);
1998                goto out;
1999        }
2000
2001        /* Do we even need to bother with this? */
2002        if (test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) {
2003                trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
2004                                 PNFS_UPDATE_LAYOUT_BULK_RECALL);
2005                dprintk("%s matches recall, use MDS\n", __func__);
2006                goto out_unlock;
2007        }
2008
2009        /* if LAYOUTGET already failed once we don't try again */
2010        if (pnfs_layout_io_test_failed(lo, iomode)) {
2011                trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
2012                                 PNFS_UPDATE_LAYOUT_IO_TEST_FAIL);
2013                goto out_unlock;
2014        }
2015
2016        /*
2017         * If the layout segment list is empty, but there are outstanding
2018         * layoutget calls, then they might be subject to a layoutrecall.
2019         */
2020        if ((list_empty(&lo->plh_segs) || !pnfs_layout_is_valid(lo)) &&
2021            atomic_read(&lo->plh_outstanding) != 0) {
2022                spin_unlock(&ino->i_lock);
2023                lseg = ERR_PTR(wait_var_event_killable(&lo->plh_outstanding,
2024                                        !atomic_read(&lo->plh_outstanding)));
2025                if (IS_ERR(lseg))
2026                        goto out_put_layout_hdr;
2027                pnfs_put_layout_hdr(lo);
2028                goto lookup_again;
2029        }
2030
2031        /*
2032         * Because we free lsegs when sending LAYOUTRETURN, we need to wait
2033         * for LAYOUTRETURN.
2034         */
2035        if (test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags)) {
2036                spin_unlock(&ino->i_lock);
2037                dprintk("%s wait for layoutreturn\n", __func__);
2038                lseg = ERR_PTR(pnfs_prepare_to_retry_layoutget(lo));
2039                if (!IS_ERR(lseg)) {
2040                        pnfs_put_layout_hdr(lo);
2041                        dprintk("%s retrying\n", __func__);
2042                        trace_pnfs_update_layout(ino, pos, count, iomode, lo,
2043                                                 lseg,
2044                                                 PNFS_UPDATE_LAYOUT_RETRY);
2045                        goto lookup_again;
2046                }
2047                trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
2048                                         PNFS_UPDATE_LAYOUT_RETURN);
2049                goto out_put_layout_hdr;
2050        }
2051
2052        lseg = pnfs_find_lseg(lo, &arg, strict_iomode);
2053        if (lseg) {
2054                trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
2055                                PNFS_UPDATE_LAYOUT_FOUND_CACHED);
2056                goto out_unlock;
2057        }
2058
2059        /*
2060         * Choose a stateid for the LAYOUTGET. If we don't have a layout
2061         * stateid, or it has been invalidated, then we must use the open
2062         * stateid.
2063         */
2064        if (test_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags)) {
2065                int status;
2066
2067                /*
2068                 * The first layoutget for the file. Need to serialize per
2069                 * RFC 5661 Errata 3208.
2070                 */
2071                if (test_and_set_bit(NFS_LAYOUT_FIRST_LAYOUTGET,
2072                                     &lo->plh_flags)) {
2073                        spin_unlock(&ino->i_lock);
2074                        lseg = ERR_PTR(wait_on_bit(&lo->plh_flags,
2075                                                NFS_LAYOUT_FIRST_LAYOUTGET,
2076                                                TASK_KILLABLE));
2077                        if (IS_ERR(lseg))
2078                                goto out_put_layout_hdr;
2079                        pnfs_put_layout_hdr(lo);
2080                        dprintk("%s retrying\n", __func__);
2081                        goto lookup_again;
2082                }
2083
2084                spin_unlock(&ino->i_lock);
2085                first = true;
2086                status = nfs4_select_rw_stateid(ctx->state,
2087                                        iomode == IOMODE_RW ? FMODE_WRITE : FMODE_READ,
2088                                        NULL, &stateid, NULL);
2089                if (status != 0) {
2090                        lseg = ERR_PTR(status);
2091                        trace_pnfs_update_layout(ino, pos, count,
2092                                        iomode, lo, lseg,
2093                                        PNFS_UPDATE_LAYOUT_INVALID_OPEN);
2094                        nfs4_schedule_stateid_recovery(server, ctx->state);
2095                        pnfs_clear_first_layoutget(lo);
2096                        pnfs_put_layout_hdr(lo);
2097                        goto lookup_again;
2098                }
2099                spin_lock(&ino->i_lock);
2100        } else {
2101                nfs4_stateid_copy(&stateid, &lo->plh_stateid);
2102        }
2103
2104        if (pnfs_layoutgets_blocked(lo)) {
2105                trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
2106                                PNFS_UPDATE_LAYOUT_BLOCKED);
2107                goto out_unlock;
2108        }
2109        nfs_layoutget_begin(lo);
2110        spin_unlock(&ino->i_lock);
2111
2112        _add_to_server_list(lo, server);
2113
2114        pg_offset = arg.offset & ~PAGE_MASK;
2115        if (pg_offset) {
2116                arg.offset -= pg_offset;
2117                arg.length += pg_offset;
2118        }
2119        if (arg.length != NFS4_MAX_UINT64)
2120                arg.length = PAGE_ALIGN(arg.length);
2121
2122        lgp = pnfs_alloc_init_layoutget_args(ino, ctx, &stateid, &arg, gfp_flags);
2123        if (!lgp) {
2124                trace_pnfs_update_layout(ino, pos, count, iomode, lo, NULL,
2125                                         PNFS_UPDATE_LAYOUT_NOMEM);
2126                nfs_layoutget_end(lo);
2127                goto out_put_layout_hdr;
2128        }
2129
2130        lgp->lo = lo;
2131        pnfs_get_layout_hdr(lo);
2132
2133        lseg = nfs4_proc_layoutget(lgp, &timeout);
2134        trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
2135                                 PNFS_UPDATE_LAYOUT_SEND_LAYOUTGET);
2136        nfs_layoutget_end(lo);
2137        if (IS_ERR(lseg)) {
2138                switch(PTR_ERR(lseg)) {
2139                case -EBUSY:
2140                        if (time_after(jiffies, giveup))
2141                                lseg = NULL;
2142                        break;
2143                case -ERECALLCONFLICT:
2144                case -EAGAIN:
2145                        break;
2146                default:
2147                        if (!nfs_error_is_fatal(PTR_ERR(lseg))) {
2148                                pnfs_layout_clear_fail_bit(lo, pnfs_iomode_to_fail_bit(iomode));
2149                                lseg = NULL;
2150                        }
2151                        goto out_put_layout_hdr;
2152                }
2153                if (lseg) {
2154                        if (first)
2155                                pnfs_clear_first_layoutget(lo);
2156                        trace_pnfs_update_layout(ino, pos, count,
2157                                iomode, lo, lseg, PNFS_UPDATE_LAYOUT_RETRY);
2158                        pnfs_put_layout_hdr(lo);
2159                        goto lookup_again;
2160                }
2161        } else {
2162                pnfs_layout_clear_fail_bit(lo, pnfs_iomode_to_fail_bit(iomode));
2163        }
2164
2165out_put_layout_hdr:
2166        if (first)
2167                pnfs_clear_first_layoutget(lo);
2168        trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
2169                                 PNFS_UPDATE_LAYOUT_EXIT);
2170        pnfs_put_layout_hdr(lo);
2171out:
2172        dprintk("%s: inode %s/%llu pNFS layout segment %s for "
2173                        "(%s, offset: %llu, length: %llu)\n",
2174                        __func__, ino->i_sb->s_id,
2175                        (unsigned long long)NFS_FILEID(ino),
2176                        IS_ERR_OR_NULL(lseg) ? "not found" : "found",
2177                        iomode==IOMODE_RW ?  "read/write" : "read-only",
2178                        (unsigned long long)pos,
2179                        (unsigned long long)count);
2180        return lseg;
2181out_unlock:
2182        spin_unlock(&ino->i_lock);
2183        goto out_put_layout_hdr;
2184}
2185EXPORT_SYMBOL_GPL(pnfs_update_layout);
2186
2187static bool
2188pnfs_sanity_check_layout_range(struct pnfs_layout_range *range)
2189{
2190        switch (range->iomode) {
2191        case IOMODE_READ:
2192        case IOMODE_RW:
2193                break;
2194        default:
2195                return false;
2196        }
2197        if (range->offset == NFS4_MAX_UINT64)
2198                return false;
2199        if (range->length == 0)
2200                return false;
2201        if (range->length != NFS4_MAX_UINT64 &&
2202            range->length > NFS4_MAX_UINT64 - range->offset)
2203                return false;
2204        return true;
2205}
2206
2207static struct pnfs_layout_hdr *
2208_pnfs_grab_empty_layout(struct inode *ino, struct nfs_open_context *ctx)
2209{
2210        struct pnfs_layout_hdr *lo;
2211
2212        spin_lock(&ino->i_lock);
2213        lo = pnfs_find_alloc_layout(ino, ctx, GFP_KERNEL);
2214        if (!lo)
2215                goto out_unlock;
2216        if (!test_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags))
2217                goto out_unlock;
2218        if (test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags))
2219                goto out_unlock;
2220        if (pnfs_layoutgets_blocked(lo))
2221                goto out_unlock;
2222        if (test_and_set_bit(NFS_LAYOUT_FIRST_LAYOUTGET, &lo->plh_flags))
2223                goto out_unlock;
2224        nfs_layoutget_begin(lo);
2225        spin_unlock(&ino->i_lock);
2226        _add_to_server_list(lo, NFS_SERVER(ino));
2227        return lo;
2228
2229out_unlock:
2230        spin_unlock(&ino->i_lock);
2231        pnfs_put_layout_hdr(lo);
2232        return NULL;
2233}
2234
2235static void _lgopen_prepare_attached(struct nfs4_opendata *data,
2236                                     struct nfs_open_context *ctx)
2237{
2238        struct inode *ino = data->dentry->d_inode;
2239        struct pnfs_layout_range rng = {
2240                .iomode = (data->o_arg.fmode & FMODE_WRITE) ?
2241                          IOMODE_RW: IOMODE_READ,
2242                .offset = 0,
2243                .length = NFS4_MAX_UINT64,
2244        };
2245        struct nfs4_layoutget *lgp;
2246        struct pnfs_layout_hdr *lo;
2247
2248        /* Heuristic: don't send layoutget if we have cached data */
2249        if (rng.iomode == IOMODE_READ &&
2250           (i_size_read(ino) == 0 || ino->i_mapping->nrpages != 0))
2251                return;
2252
2253        lo = _pnfs_grab_empty_layout(ino, ctx);
2254        if (!lo)
2255                return;
2256        lgp = pnfs_alloc_init_layoutget_args(ino, ctx, &current_stateid,
2257                                             &rng, GFP_KERNEL);
2258        if (!lgp) {
2259                pnfs_clear_first_layoutget(lo);
2260                nfs_layoutget_end(lo);
2261                pnfs_put_layout_hdr(lo);
2262                return;
2263        }
2264        lgp->lo = lo;
2265        data->lgp = lgp;
2266        data->o_arg.lg_args = &lgp->args;
2267        data->o_res.lg_res = &lgp->res;
2268}
2269
2270static void _lgopen_prepare_floating(struct nfs4_opendata *data,
2271                                     struct nfs_open_context *ctx)
2272{
2273        struct inode *ino = data->dentry->d_inode;
2274        struct pnfs_layout_range rng = {
2275                .iomode = (data->o_arg.fmode & FMODE_WRITE) ?
2276                          IOMODE_RW: IOMODE_READ,
2277                .offset = 0,
2278                .length = NFS4_MAX_UINT64,
2279        };
2280        struct nfs4_layoutget *lgp;
2281
2282        lgp = pnfs_alloc_init_layoutget_args(ino, ctx, &current_stateid,
2283                                             &rng, GFP_KERNEL);
2284        if (!lgp)
2285                return;
2286        data->lgp = lgp;
2287        data->o_arg.lg_args = &lgp->args;
2288        data->o_res.lg_res = &lgp->res;
2289}
2290
2291void pnfs_lgopen_prepare(struct nfs4_opendata *data,
2292                         struct nfs_open_context *ctx)
2293{
2294        struct nfs_server *server = NFS_SERVER(data->dir->d_inode);
2295
2296        if (!(pnfs_enabled_sb(server) &&
2297              server->pnfs_curr_ld->flags & PNFS_LAYOUTGET_ON_OPEN))
2298                return;
2299        /* Could check on max_ops, but currently hardcoded high enough */
2300        if (!nfs_server_capable(data->dir->d_inode, NFS_CAP_LGOPEN))
2301                return;
2302        if (data->lgp)
2303                return;
2304        if (data->state)
2305                _lgopen_prepare_attached(data, ctx);
2306        else
2307                _lgopen_prepare_floating(data, ctx);
2308}
2309
2310void pnfs_parse_lgopen(struct inode *ino, struct nfs4_layoutget *lgp,
2311                       struct nfs_open_context *ctx)
2312{
2313        struct pnfs_layout_hdr *lo;
2314        struct pnfs_layout_segment *lseg;
2315        struct nfs_server *srv = NFS_SERVER(ino);
2316        u32 iomode;
2317
2318        if (!lgp)
2319                return;
2320        dprintk("%s: entered with status %i\n", __func__, lgp->res.status);
2321        if (lgp->res.status) {
2322                switch (lgp->res.status) {
2323                default:
2324                        break;
2325                /*
2326                 * Halt lgopen attempts if the server doesn't recognise
2327                 * the "current stateid" value, the layout type, or the
2328                 * layoutget operation as being valid.
2329                 * Also if it complains about too many ops in the compound
2330                 * or of the request/reply being too big.
2331                 */
2332                case -NFS4ERR_BAD_STATEID:
2333                case -NFS4ERR_NOTSUPP:
2334                case -NFS4ERR_REP_TOO_BIG:
2335                case -NFS4ERR_REP_TOO_BIG_TO_CACHE:
2336                case -NFS4ERR_REQ_TOO_BIG:
2337                case -NFS4ERR_TOO_MANY_OPS:
2338                case -NFS4ERR_UNKNOWN_LAYOUTTYPE:
2339                        srv->caps &= ~NFS_CAP_LGOPEN;
2340                }
2341                return;
2342        }
2343        if (!lgp->lo) {
2344                lo = _pnfs_grab_empty_layout(ino, ctx);
2345                if (!lo)
2346                        return;
2347                lgp->lo = lo;
2348        } else
2349                lo = lgp->lo;
2350
2351        lseg = pnfs_layout_process(lgp);
2352        if (!IS_ERR(lseg)) {
2353                iomode = lgp->args.range.iomode;
2354                pnfs_layout_clear_fail_bit(lo, pnfs_iomode_to_fail_bit(iomode));
2355                pnfs_put_lseg(lseg);
2356        }
2357}
2358
2359void nfs4_lgopen_release(struct nfs4_layoutget *lgp)
2360{
2361        if (lgp != NULL) {
2362                if (lgp->lo) {
2363                        pnfs_clear_first_layoutget(lgp->lo);
2364                        nfs_layoutget_end(lgp->lo);
2365                }
2366                pnfs_layoutget_free(lgp);
2367        }
2368}
2369
2370struct pnfs_layout_segment *
2371pnfs_layout_process(struct nfs4_layoutget *lgp)
2372{
2373        struct pnfs_layout_hdr *lo = lgp->lo;
2374        struct nfs4_layoutget_res *res = &lgp->res;
2375        struct pnfs_layout_segment *lseg;
2376        struct inode *ino = lo->plh_inode;
2377        LIST_HEAD(free_me);
2378
2379        if (!pnfs_sanity_check_layout_range(&res->range))
2380                return ERR_PTR(-EINVAL);
2381
2382        /* Inject layout blob into I/O device driver */
2383        lseg = NFS_SERVER(ino)->pnfs_curr_ld->alloc_lseg(lo, res, lgp->gfp_flags);
2384        if (IS_ERR_OR_NULL(lseg)) {
2385                if (!lseg)
2386                        lseg = ERR_PTR(-ENOMEM);
2387
2388                dprintk("%s: Could not allocate layout: error %ld\n",
2389                       __func__, PTR_ERR(lseg));
2390                return lseg;
2391        }
2392
2393        pnfs_init_lseg(lo, lseg, &res->range, &res->stateid);
2394
2395        spin_lock(&ino->i_lock);
2396        if (pnfs_layoutgets_blocked(lo)) {
2397                dprintk("%s forget reply due to state\n", __func__);
2398                goto out_forget;
2399        }
2400
2401        if (!pnfs_layout_is_valid(lo) && !pnfs_is_first_layoutget(lo))
2402                goto out_forget;
2403
2404        if (nfs4_stateid_match_other(&lo->plh_stateid, &res->stateid)) {
2405                /* existing state ID, make sure the sequence number matches. */
2406                if (pnfs_layout_stateid_blocked(lo, &res->stateid)) {
2407                        if (!pnfs_layout_is_valid(lo))
2408                                lo->plh_barrier = 0;
2409                        dprintk("%s forget reply due to sequence\n", __func__);
2410                        goto out_forget;
2411                }
2412                pnfs_set_layout_stateid(lo, &res->stateid, lgp->cred, false);
2413        } else if (pnfs_layout_is_valid(lo)) {
2414                /*
2415                 * We got an entirely new state ID.  Mark all segments for the
2416                 * inode invalid, and retry the layoutget
2417                 */
2418                struct pnfs_layout_range range = {
2419                        .iomode = IOMODE_ANY,
2420                        .length = NFS4_MAX_UINT64,
2421                };
2422                pnfs_mark_matching_lsegs_return(lo, &free_me, &range, 0);
2423                goto out_forget;
2424        } else {
2425                /* We have a completely new layout */
2426                pnfs_set_layout_stateid(lo, &res->stateid, lgp->cred, true);
2427        }
2428
2429        pnfs_get_lseg(lseg);
2430        pnfs_layout_insert_lseg(lo, lseg, &free_me);
2431
2432
2433        if (res->return_on_close)
2434                set_bit(NFS_LSEG_ROC, &lseg->pls_flags);
2435
2436        spin_unlock(&ino->i_lock);
2437        pnfs_free_lseg_list(&free_me);
2438        return lseg;
2439
2440out_forget:
2441        spin_unlock(&ino->i_lock);
2442        lseg->pls_layout = lo;
2443        NFS_SERVER(ino)->pnfs_curr_ld->free_lseg(lseg);
2444        return ERR_PTR(-EAGAIN);
2445}
2446
2447/**
2448 * pnfs_mark_matching_lsegs_return - Free or return matching layout segments
2449 * @lo: pointer to layout header
2450 * @tmp_list: list header to be used with pnfs_free_lseg_list()
2451 * @return_range: describe layout segment ranges to be returned
2452 * @seq: stateid seqid to match
2453 *
2454 * This function is mainly intended for use by layoutrecall. It attempts
2455 * to free the layout segment immediately, or else to mark it for return
2456 * as soon as its reference count drops to zero.
2457 *
2458 * Returns
2459 * - 0: a layoutreturn needs to be scheduled.
2460 * - EBUSY: there are layout segment that are still in use.
2461 * - ENOENT: there are no layout segments that need to be returned.
2462 */
2463int
2464pnfs_mark_matching_lsegs_return(struct pnfs_layout_hdr *lo,
2465                                struct list_head *tmp_list,
2466                                const struct pnfs_layout_range *return_range,
2467                                u32 seq)
2468{
2469        struct pnfs_layout_segment *lseg, *next;
2470        int remaining = 0;
2471
2472        dprintk("%s:Begin lo %p\n", __func__, lo);
2473
2474        assert_spin_locked(&lo->plh_inode->i_lock);
2475
2476        if (test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags))
2477                tmp_list = &lo->plh_return_segs;
2478
2479        list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list)
2480                if (pnfs_match_lseg_recall(lseg, return_range, seq)) {
2481                        dprintk("%s: marking lseg %p iomode %d "
2482                                "offset %llu length %llu\n", __func__,
2483                                lseg, lseg->pls_range.iomode,
2484                                lseg->pls_range.offset,
2485                                lseg->pls_range.length);
2486                        if (test_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags))
2487                                tmp_list = &lo->plh_return_segs;
2488                        if (mark_lseg_invalid(lseg, tmp_list))
2489                                continue;
2490                        remaining++;
2491                        set_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags);
2492                }
2493
2494        if (remaining) {
2495                pnfs_set_plh_return_info(lo, return_range->iomode, seq);
2496                return -EBUSY;
2497        }
2498
2499        if (!list_empty(&lo->plh_return_segs)) {
2500                pnfs_set_plh_return_info(lo, return_range->iomode, seq);
2501                return 0;
2502        }
2503
2504        return -ENOENT;
2505}
2506
2507static void
2508pnfs_mark_layout_for_return(struct inode *inode,
2509                            const struct pnfs_layout_range *range)
2510{
2511        struct pnfs_layout_hdr *lo;
2512        bool return_now = false;
2513
2514        spin_lock(&inode->i_lock);
2515        lo = NFS_I(inode)->layout;
2516        if (!pnfs_layout_is_valid(lo)) {
2517                spin_unlock(&inode->i_lock);
2518                return;
2519        }
2520        pnfs_set_plh_return_info(lo, range->iomode, 0);
2521        /*
2522         * mark all matching lsegs so that we are sure to have no live
2523         * segments at hand when sending layoutreturn. See pnfs_put_lseg()
2524         * for how it works.
2525         */
2526        if (pnfs_mark_matching_lsegs_return(lo, &lo->plh_return_segs, range, 0) != -EBUSY) {
2527                const struct cred *cred;
2528                nfs4_stateid stateid;
2529                enum pnfs_iomode iomode;
2530
2531                return_now = pnfs_prepare_layoutreturn(lo, &stateid, &cred, &iomode);
2532                spin_unlock(&inode->i_lock);
2533                if (return_now)
2534                        pnfs_send_layoutreturn(lo, &stateid, &cred, iomode, false);
2535        } else {
2536                spin_unlock(&inode->i_lock);
2537                nfs_commit_inode(inode, 0);
2538        }
2539}
2540
2541void pnfs_error_mark_layout_for_return(struct inode *inode,
2542                                       struct pnfs_layout_segment *lseg)
2543{
2544        struct pnfs_layout_range range = {
2545                .iomode = lseg->pls_range.iomode,
2546                .offset = 0,
2547                .length = NFS4_MAX_UINT64,
2548        };
2549
2550        pnfs_mark_layout_for_return(inode, &range);
2551}
2552EXPORT_SYMBOL_GPL(pnfs_error_mark_layout_for_return);
2553
2554static bool
2555pnfs_layout_can_be_returned(struct pnfs_layout_hdr *lo)
2556{
2557        return pnfs_layout_is_valid(lo) &&
2558                !test_bit(NFS_LAYOUT_INODE_FREEING, &lo->plh_flags) &&
2559                !test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags);
2560}
2561
2562static struct pnfs_layout_segment *
2563pnfs_find_first_lseg(struct pnfs_layout_hdr *lo,
2564                     const struct pnfs_layout_range *range,
2565                     enum pnfs_iomode iomode)
2566{
2567        struct pnfs_layout_segment *lseg;
2568
2569        list_for_each_entry(lseg, &lo->plh_segs, pls_list) {
2570                if (!test_bit(NFS_LSEG_VALID, &lseg->pls_flags))
2571                        continue;
2572                if (test_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags))
2573                        continue;
2574                if (lseg->pls_range.iomode != iomode && iomode != IOMODE_ANY)
2575                        continue;
2576                if (pnfs_lseg_range_intersecting(&lseg->pls_range, range))
2577                        return lseg;
2578        }
2579        return NULL;
2580}
2581
2582/* Find open file states whose mode matches that of the range */
2583static bool
2584pnfs_should_return_unused_layout(struct pnfs_layout_hdr *lo,
2585                                 const struct pnfs_layout_range *range)
2586{
2587        struct list_head *head;
2588        struct nfs_open_context *ctx;
2589        fmode_t mode = 0;
2590
2591        if (!pnfs_layout_can_be_returned(lo) ||
2592            !pnfs_find_first_lseg(lo, range, range->iomode))
2593                return false;
2594
2595        head = &NFS_I(lo->plh_inode)->open_files;
2596        list_for_each_entry_rcu(ctx, head, list) {
2597                if (ctx->state)
2598                        mode |= ctx->state->state & (FMODE_READ|FMODE_WRITE);
2599        }
2600
2601        switch (range->iomode) {
2602        default:
2603                break;
2604        case IOMODE_READ:
2605                mode &= ~FMODE_WRITE;
2606                break;
2607        case IOMODE_RW:
2608                if (pnfs_find_first_lseg(lo, range, IOMODE_READ))
2609                        mode &= ~FMODE_READ;
2610        }
2611        return mode == 0;
2612}
2613
2614static int
2615pnfs_layout_return_unused_byserver(struct nfs_server *server, void *data)
2616{
2617        const struct pnfs_layout_range *range = data;
2618        struct pnfs_layout_hdr *lo;
2619        struct inode *inode;
2620restart:
2621        rcu_read_lock();
2622        list_for_each_entry_rcu(lo, &server->layouts, plh_layouts) {
2623                if (!pnfs_layout_can_be_returned(lo) ||
2624                    test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags))
2625                        continue;
2626                inode = lo->plh_inode;
2627                spin_lock(&inode->i_lock);
2628                if (!pnfs_should_return_unused_layout(lo, range)) {
2629                        spin_unlock(&inode->i_lock);
2630                        continue;
2631                }
2632                spin_unlock(&inode->i_lock);
2633                inode = pnfs_grab_inode_layout_hdr(lo);
2634                if (!inode)
2635                        continue;
2636                rcu_read_unlock();
2637                pnfs_mark_layout_for_return(inode, range);
2638                iput(inode);
2639                cond_resched();
2640                goto restart;
2641        }
2642        rcu_read_unlock();
2643        return 0;
2644}
2645
2646void
2647pnfs_layout_return_unused_byclid(struct nfs_client *clp,
2648                                 enum pnfs_iomode iomode)
2649{
2650        struct pnfs_layout_range range = {
2651                .iomode = iomode,
2652                .offset = 0,
2653                .length = NFS4_MAX_UINT64,
2654        };
2655
2656        nfs_client_for_each_server(clp, pnfs_layout_return_unused_byserver,
2657                        &range);
2658}
2659
2660void
2661pnfs_generic_pg_check_layout(struct nfs_pageio_descriptor *pgio)
2662{
2663        if (pgio->pg_lseg == NULL ||
2664            test_bit(NFS_LSEG_VALID, &pgio->pg_lseg->pls_flags))
2665                return;
2666        pnfs_put_lseg(pgio->pg_lseg);
2667        pgio->pg_lseg = NULL;
2668}
2669EXPORT_SYMBOL_GPL(pnfs_generic_pg_check_layout);
2670
2671/*
2672 * Check for any intersection between the request and the pgio->pg_lseg,
2673 * and if none, put this pgio->pg_lseg away.
2674 */
2675void
2676pnfs_generic_pg_check_range(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
2677{
2678        if (pgio->pg_lseg && !pnfs_lseg_request_intersecting(pgio->pg_lseg, req)) {
2679                pnfs_put_lseg(pgio->pg_lseg);
2680                pgio->pg_lseg = NULL;
2681        }
2682}
2683EXPORT_SYMBOL_GPL(pnfs_generic_pg_check_range);
2684
2685void
2686pnfs_generic_pg_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
2687{
2688        u64 rd_size;
2689
2690        pnfs_generic_pg_check_layout(pgio);
2691        pnfs_generic_pg_check_range(pgio, req);
2692        if (pgio->pg_lseg == NULL) {
2693                if (pgio->pg_dreq == NULL)
2694                        rd_size = i_size_read(pgio->pg_inode) - req_offset(req);
2695                else
2696                        rd_size = nfs_dreq_bytes_left(pgio->pg_dreq);
2697
2698                pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
2699                                                   nfs_req_openctx(req),
2700                                                   req_offset(req),
2701                                                   rd_size,
2702                                                   IOMODE_READ,
2703                                                   false,
2704                                                   GFP_KERNEL);
2705                if (IS_ERR(pgio->pg_lseg)) {
2706                        pgio->pg_error = PTR_ERR(pgio->pg_lseg);
2707                        pgio->pg_lseg = NULL;
2708                        return;
2709                }
2710        }
2711        /* If no lseg, fall back to read through mds */
2712        if (pgio->pg_lseg == NULL)
2713                nfs_pageio_reset_read_mds(pgio);
2714
2715}
2716EXPORT_SYMBOL_GPL(pnfs_generic_pg_init_read);
2717
2718void
2719pnfs_generic_pg_init_write(struct nfs_pageio_descriptor *pgio,
2720                           struct nfs_page *req, u64 wb_size)
2721{
2722        pnfs_generic_pg_check_layout(pgio);
2723        pnfs_generic_pg_check_range(pgio, req);
2724        if (pgio->pg_lseg == NULL) {
2725                pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
2726                                                   nfs_req_openctx(req),
2727                                                   req_offset(req),
2728                                                   wb_size,
2729                                                   IOMODE_RW,
2730                                                   false,
2731                                                   GFP_KERNEL);
2732                if (IS_ERR(pgio->pg_lseg)) {
2733                        pgio->pg_error = PTR_ERR(pgio->pg_lseg);
2734                        pgio->pg_lseg = NULL;
2735                        return;
2736                }
2737        }
2738        /* If no lseg, fall back to write through mds */
2739        if (pgio->pg_lseg == NULL)
2740                nfs_pageio_reset_write_mds(pgio);
2741}
2742EXPORT_SYMBOL_GPL(pnfs_generic_pg_init_write);
2743
2744void
2745pnfs_generic_pg_cleanup(struct nfs_pageio_descriptor *desc)
2746{
2747        if (desc->pg_lseg) {
2748                pnfs_put_lseg(desc->pg_lseg);
2749                desc->pg_lseg = NULL;
2750        }
2751}
2752EXPORT_SYMBOL_GPL(pnfs_generic_pg_cleanup);
2753
2754/*
2755 * Return 0 if @req cannot be coalesced into @pgio, otherwise return the number
2756 * of bytes (maximum @req->wb_bytes) that can be coalesced.
2757 */
2758size_t
2759pnfs_generic_pg_test(struct nfs_pageio_descriptor *pgio,
2760                     struct nfs_page *prev, struct nfs_page *req)
2761{
2762        unsigned int size;
2763        u64 seg_end, req_start, seg_left;
2764
2765        size = nfs_generic_pg_test(pgio, prev, req);
2766        if (!size)
2767                return 0;
2768
2769        /*
2770         * 'size' contains the number of bytes left in the current page (up
2771         * to the original size asked for in @req->wb_bytes).
2772         *
2773         * Calculate how many bytes are left in the layout segment
2774         * and if there are less bytes than 'size', return that instead.
2775         *
2776         * Please also note that 'end_offset' is actually the offset of the
2777         * first byte that lies outside the pnfs_layout_range. FIXME?
2778         *
2779         */
2780        if (pgio->pg_lseg) {
2781                seg_end = pnfs_end_offset(pgio->pg_lseg->pls_range.offset,
2782                                     pgio->pg_lseg->pls_range.length);
2783                req_start = req_offset(req);
2784
2785                /* start of request is past the last byte of this segment */
2786                if (req_start >= seg_end)
2787                        return 0;
2788
2789                /* adjust 'size' iff there are fewer bytes left in the
2790                 * segment than what nfs_generic_pg_test returned */
2791                seg_left = seg_end - req_start;
2792                if (seg_left < size)
2793                        size = (unsigned int)seg_left;
2794        }
2795
2796        return size;
2797}
2798EXPORT_SYMBOL_GPL(pnfs_generic_pg_test);
2799
2800int pnfs_write_done_resend_to_mds(struct nfs_pgio_header *hdr)
2801{
2802        struct nfs_pageio_descriptor pgio;
2803
2804        /* Resend all requests through the MDS */
2805        nfs_pageio_init_write(&pgio, hdr->inode, FLUSH_STABLE, true,
2806                              hdr->completion_ops);
2807        set_bit(NFS_CONTEXT_RESEND_WRITES, &hdr->args.context->flags);
2808        return nfs_pageio_resend(&pgio, hdr);
2809}
2810EXPORT_SYMBOL_GPL(pnfs_write_done_resend_to_mds);
2811
2812static void pnfs_ld_handle_write_error(struct nfs_pgio_header *hdr)
2813{
2814
2815        dprintk("pnfs write error = %d\n", hdr->pnfs_error);
2816        if (NFS_SERVER(hdr->inode)->pnfs_curr_ld->flags &
2817            PNFS_LAYOUTRET_ON_ERROR) {
2818                pnfs_return_layout(hdr->inode);
2819        }
2820        if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags))
2821                hdr->task.tk_status = pnfs_write_done_resend_to_mds(hdr);
2822}
2823
2824/*
2825 * Called by non rpc-based layout drivers
2826 */
2827void pnfs_ld_write_done(struct nfs_pgio_header *hdr)
2828{
2829        if (likely(!hdr->pnfs_error)) {
2830                pnfs_set_layoutcommit(hdr->inode, hdr->lseg,
2831                                hdr->mds_offset + hdr->res.count);
2832                hdr->mds_ops->rpc_call_done(&hdr->task, hdr);
2833        }
2834        trace_nfs4_pnfs_write(hdr, hdr->pnfs_error);
2835        if (unlikely(hdr->pnfs_error))
2836                pnfs_ld_handle_write_error(hdr);
2837        hdr->mds_ops->rpc_release(hdr);
2838}
2839EXPORT_SYMBOL_GPL(pnfs_ld_write_done);
2840
2841static void
2842pnfs_write_through_mds(struct nfs_pageio_descriptor *desc,
2843                struct nfs_pgio_header *hdr)
2844{
2845        struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
2846
2847        if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
2848                list_splice_tail_init(&hdr->pages, &mirror->pg_list);
2849                nfs_pageio_reset_write_mds(desc);
2850                mirror->pg_recoalesce = 1;
2851        }
2852        hdr->completion_ops->completion(hdr);
2853}
2854
2855static enum pnfs_try_status
2856pnfs_try_to_write_data(struct nfs_pgio_header *hdr,
2857                        const struct rpc_call_ops *call_ops,
2858                        struct pnfs_layout_segment *lseg,
2859                        int how)
2860{
2861        struct inode *inode = hdr->inode;
2862        enum pnfs_try_status trypnfs;
2863        struct nfs_server *nfss = NFS_SERVER(inode);
2864
2865        hdr->mds_ops = call_ops;
2866
2867        dprintk("%s: Writing ino:%lu %u@%llu (how %d)\n", __func__,
2868                inode->i_ino, hdr->args.count, hdr->args.offset, how);
2869        trypnfs = nfss->pnfs_curr_ld->write_pagelist(hdr, how);
2870        if (trypnfs != PNFS_NOT_ATTEMPTED)
2871                nfs_inc_stats(inode, NFSIOS_PNFS_WRITE);
2872        dprintk("%s End (trypnfs:%d)\n", __func__, trypnfs);
2873        return trypnfs;
2874}
2875
2876static void
2877pnfs_do_write(struct nfs_pageio_descriptor *desc,
2878              struct nfs_pgio_header *hdr, int how)
2879{
2880        const struct rpc_call_ops *call_ops = desc->pg_rpc_callops;
2881        struct pnfs_layout_segment *lseg = desc->pg_lseg;
2882        enum pnfs_try_status trypnfs;
2883
2884        trypnfs = pnfs_try_to_write_data(hdr, call_ops, lseg, how);
2885        switch (trypnfs) {
2886        case PNFS_NOT_ATTEMPTED:
2887                pnfs_write_through_mds(desc, hdr);
2888                break;
2889        case PNFS_ATTEMPTED:
2890                break;
2891        case PNFS_TRY_AGAIN:
2892                /* cleanup hdr and prepare to redo pnfs */
2893                if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
2894                        struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
2895                        list_splice_init(&hdr->pages, &mirror->pg_list);
2896                        mirror->pg_recoalesce = 1;
2897                }
2898                hdr->mds_ops->rpc_release(hdr);
2899        }
2900}
2901
2902static void pnfs_writehdr_free(struct nfs_pgio_header *hdr)
2903{
2904        pnfs_put_lseg(hdr->lseg);
2905        nfs_pgio_header_free(hdr);
2906}
2907
2908int
2909pnfs_generic_pg_writepages(struct nfs_pageio_descriptor *desc)
2910{
2911        struct nfs_pgio_header *hdr;
2912        int ret;
2913
2914        hdr = nfs_pgio_header_alloc(desc->pg_rw_ops);
2915        if (!hdr) {
2916                desc->pg_error = -ENOMEM;
2917                return desc->pg_error;
2918        }
2919        nfs_pgheader_init(desc, hdr, pnfs_writehdr_free);
2920
2921        hdr->lseg = pnfs_get_lseg(desc->pg_lseg);
2922        ret = nfs_generic_pgio(desc, hdr);
2923        if (!ret)
2924                pnfs_do_write(desc, hdr, desc->pg_ioflags);
2925
2926        return ret;
2927}
2928EXPORT_SYMBOL_GPL(pnfs_generic_pg_writepages);
2929
2930int pnfs_read_done_resend_to_mds(struct nfs_pgio_header *hdr)
2931{
2932        struct nfs_pageio_descriptor pgio;
2933
2934        /* Resend all requests through the MDS */
2935        nfs_pageio_init_read(&pgio, hdr->inode, true, hdr->completion_ops);
2936        return nfs_pageio_resend(&pgio, hdr);
2937}
2938EXPORT_SYMBOL_GPL(pnfs_read_done_resend_to_mds);
2939
2940static void pnfs_ld_handle_read_error(struct nfs_pgio_header *hdr)
2941{
2942        dprintk("pnfs read error = %d\n", hdr->pnfs_error);
2943        if (NFS_SERVER(hdr->inode)->pnfs_curr_ld->flags &
2944            PNFS_LAYOUTRET_ON_ERROR) {
2945                pnfs_return_layout(hdr->inode);
2946        }
2947        if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags))
2948                hdr->task.tk_status = pnfs_read_done_resend_to_mds(hdr);
2949}
2950
2951/*
2952 * Called by non rpc-based layout drivers
2953 */
2954void pnfs_ld_read_done(struct nfs_pgio_header *hdr)
2955{
2956        if (likely(!hdr->pnfs_error))
2957                hdr->mds_ops->rpc_call_done(&hdr->task, hdr);
2958        trace_nfs4_pnfs_read(hdr, hdr->pnfs_error);
2959        if (unlikely(hdr->pnfs_error))
2960                pnfs_ld_handle_read_error(hdr);
2961        hdr->mds_ops->rpc_release(hdr);
2962}
2963EXPORT_SYMBOL_GPL(pnfs_ld_read_done);
2964
2965static void
2966pnfs_read_through_mds(struct nfs_pageio_descriptor *desc,
2967                struct nfs_pgio_header *hdr)
2968{
2969        struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
2970
2971        if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
2972                list_splice_tail_init(&hdr->pages, &mirror->pg_list);
2973                nfs_pageio_reset_read_mds(desc);
2974                mirror->pg_recoalesce = 1;
2975        }
2976        hdr->completion_ops->completion(hdr);
2977}
2978
2979/*
2980 * Call the appropriate parallel I/O subsystem read function.
2981 */
2982static enum pnfs_try_status
2983pnfs_try_to_read_data(struct nfs_pgio_header *hdr,
2984                       const struct rpc_call_ops *call_ops,
2985                       struct pnfs_layout_segment *lseg)
2986{
2987        struct inode *inode = hdr->inode;
2988        struct nfs_server *nfss = NFS_SERVER(inode);
2989        enum pnfs_try_status trypnfs;
2990
2991        hdr->mds_ops = call_ops;
2992
2993        dprintk("%s: Reading ino:%lu %u@%llu\n",
2994                __func__, inode->i_ino, hdr->args.count, hdr->args.offset);
2995
2996        trypnfs = nfss->pnfs_curr_ld->read_pagelist(hdr);
2997        if (trypnfs != PNFS_NOT_ATTEMPTED)
2998                nfs_inc_stats(inode, NFSIOS_PNFS_READ);
2999        dprintk("%s End (trypnfs:%d)\n", __func__, trypnfs);
3000        return trypnfs;
3001}
3002
3003/* Resend all requests through pnfs. */
3004void pnfs_read_resend_pnfs(struct nfs_pgio_header *hdr,
3005                           unsigned int mirror_idx)
3006{
3007        struct nfs_pageio_descriptor pgio;
3008
3009        if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
3010                /* Prevent deadlocks with layoutreturn! */
3011                pnfs_put_lseg(hdr->lseg);
3012                hdr->lseg = NULL;
3013
3014                nfs_pageio_init_read(&pgio, hdr->inode, false,
3015                                        hdr->completion_ops);
3016                pgio.pg_mirror_idx = mirror_idx;
3017                hdr->task.tk_status = nfs_pageio_resend(&pgio, hdr);
3018        }
3019}
3020EXPORT_SYMBOL_GPL(pnfs_read_resend_pnfs);
3021
3022static void
3023pnfs_do_read(struct nfs_pageio_descriptor *desc, struct nfs_pgio_header *hdr)
3024{
3025        const struct rpc_call_ops *call_ops = desc->pg_rpc_callops;
3026        struct pnfs_layout_segment *lseg = desc->pg_lseg;
3027        enum pnfs_try_status trypnfs;
3028
3029        trypnfs = pnfs_try_to_read_data(hdr, call_ops, lseg);
3030        switch (trypnfs) {
3031        case PNFS_NOT_ATTEMPTED:
3032                pnfs_read_through_mds(desc, hdr);
3033                break;
3034        case PNFS_ATTEMPTED:
3035                break;
3036        case PNFS_TRY_AGAIN:
3037                /* cleanup hdr and prepare to redo pnfs */
3038                if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
3039                        struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
3040                        list_splice_init(&hdr->pages, &mirror->pg_list);
3041                        mirror->pg_recoalesce = 1;
3042                }
3043                hdr->mds_ops->rpc_release(hdr);
3044        }
3045}
3046
3047static void pnfs_readhdr_free(struct nfs_pgio_header *hdr)
3048{
3049        pnfs_put_lseg(hdr->lseg);
3050        nfs_pgio_header_free(hdr);
3051}
3052
3053int
3054pnfs_generic_pg_readpages(struct nfs_pageio_descriptor *desc)
3055{
3056        struct nfs_pgio_header *hdr;
3057        int ret;
3058
3059        hdr = nfs_pgio_header_alloc(desc->pg_rw_ops);
3060        if (!hdr) {
3061                desc->pg_error = -ENOMEM;
3062                return desc->pg_error;
3063        }
3064        nfs_pgheader_init(desc, hdr, pnfs_readhdr_free);
3065        hdr->lseg = pnfs_get_lseg(desc->pg_lseg);
3066        ret = nfs_generic_pgio(desc, hdr);
3067        if (!ret)
3068                pnfs_do_read(desc, hdr);
3069        return ret;
3070}
3071EXPORT_SYMBOL_GPL(pnfs_generic_pg_readpages);
3072
3073static void pnfs_clear_layoutcommitting(struct inode *inode)
3074{
3075        unsigned long *bitlock = &NFS_I(inode)->flags;
3076
3077        clear_bit_unlock(NFS_INO_LAYOUTCOMMITTING, bitlock);
3078        smp_mb__after_atomic();
3079        wake_up_bit(bitlock, NFS_INO_LAYOUTCOMMITTING);
3080}
3081
3082/*
3083 * There can be multiple RW segments.
3084 */
3085static void pnfs_list_write_lseg(struct inode *inode, struct list_head *listp)
3086{
3087        struct pnfs_layout_segment *lseg;
3088
3089        list_for_each_entry(lseg, &NFS_I(inode)->layout->plh_segs, pls_list) {
3090                if (lseg->pls_range.iomode == IOMODE_RW &&
3091                    test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT, &lseg->pls_flags))
3092                        list_add(&lseg->pls_lc_list, listp);
3093        }
3094}
3095
3096static void pnfs_list_write_lseg_done(struct inode *inode, struct list_head *listp)
3097{
3098        struct pnfs_layout_segment *lseg, *tmp;
3099
3100        /* Matched by references in pnfs_set_layoutcommit */
3101        list_for_each_entry_safe(lseg, tmp, listp, pls_lc_list) {
3102                list_del_init(&lseg->pls_lc_list);
3103                pnfs_put_lseg(lseg);
3104        }
3105
3106        pnfs_clear_layoutcommitting(inode);
3107}
3108
3109void pnfs_set_lo_fail(struct pnfs_layout_segment *lseg)
3110{
3111        pnfs_layout_io_set_failed(lseg->pls_layout, lseg->pls_range.iomode);
3112}
3113EXPORT_SYMBOL_GPL(pnfs_set_lo_fail);
3114
3115void
3116pnfs_set_layoutcommit(struct inode *inode, struct pnfs_layout_segment *lseg,
3117                loff_t end_pos)
3118{
3119        struct nfs_inode *nfsi = NFS_I(inode);
3120        bool mark_as_dirty = false;
3121
3122        spin_lock(&inode->i_lock);
3123        if (!test_and_set_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags)) {
3124                nfsi->layout->plh_lwb = end_pos;
3125                mark_as_dirty = true;
3126                dprintk("%s: Set layoutcommit for inode %lu ",
3127                        __func__, inode->i_ino);
3128        } else if (end_pos > nfsi->layout->plh_lwb)
3129                nfsi->layout->plh_lwb = end_pos;
3130        if (!test_and_set_bit(NFS_LSEG_LAYOUTCOMMIT, &lseg->pls_flags)) {
3131                /* references matched in nfs4_layoutcommit_release */
3132                pnfs_get_lseg(lseg);
3133        }
3134        spin_unlock(&inode->i_lock);
3135        dprintk("%s: lseg %p end_pos %llu\n",
3136                __func__, lseg, nfsi->layout->plh_lwb);
3137
3138        /* if pnfs_layoutcommit_inode() runs between inode locks, the next one
3139         * will be a noop because NFS_INO_LAYOUTCOMMIT will not be set */
3140        if (mark_as_dirty)
3141                mark_inode_dirty_sync(inode);
3142}
3143EXPORT_SYMBOL_GPL(pnfs_set_layoutcommit);
3144
3145void pnfs_cleanup_layoutcommit(struct nfs4_layoutcommit_data *data)
3146{
3147        struct nfs_server *nfss = NFS_SERVER(data->args.inode);
3148
3149        if (nfss->pnfs_curr_ld->cleanup_layoutcommit)
3150                nfss->pnfs_curr_ld->cleanup_layoutcommit(data);
3151        pnfs_list_write_lseg_done(data->args.inode, &data->lseg_list);
3152}
3153
3154/*
3155 * For the LAYOUT4_NFSV4_1_FILES layout type, NFS_DATA_SYNC WRITEs and
3156 * NFS_UNSTABLE WRITEs with a COMMIT to data servers must store enough
3157 * data to disk to allow the server to recover the data if it crashes.
3158 * LAYOUTCOMMIT is only needed when the NFL4_UFLG_COMMIT_THRU_MDS flag
3159 * is off, and a COMMIT is sent to a data server, or
3160 * if WRITEs to a data server return NFS_DATA_SYNC.
3161 */
3162int
3163pnfs_layoutcommit_inode(struct inode *inode, bool sync)
3164{
3165        struct pnfs_layoutdriver_type *ld = NFS_SERVER(inode)->pnfs_curr_ld;
3166        struct nfs4_layoutcommit_data *data;
3167        struct nfs_inode *nfsi = NFS_I(inode);
3168        loff_t end_pos;
3169        int status;
3170
3171        if (!pnfs_layoutcommit_outstanding(inode))
3172                return 0;
3173
3174        dprintk("--> %s inode %lu\n", __func__, inode->i_ino);
3175
3176        status = -EAGAIN;
3177        if (test_and_set_bit(NFS_INO_LAYOUTCOMMITTING, &nfsi->flags)) {
3178                if (!sync)
3179                        goto out;
3180                status = wait_on_bit_lock_action(&nfsi->flags,
3181                                NFS_INO_LAYOUTCOMMITTING,
3182                                nfs_wait_bit_killable,
3183                                TASK_KILLABLE);
3184                if (status)
3185                        goto out;
3186        }
3187
3188        status = -ENOMEM;
3189        /* Note kzalloc ensures data->res.seq_res.sr_slot == NULL */
3190        data = kzalloc(sizeof(*data), GFP_NOFS);
3191        if (!data)
3192                goto clear_layoutcommitting;
3193
3194        status = 0;
3195        spin_lock(&inode->i_lock);
3196        if (!test_and_clear_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags))
3197                goto out_unlock;
3198
3199        INIT_LIST_HEAD(&data->lseg_list);
3200        pnfs_list_write_lseg(inode, &data->lseg_list);
3201
3202        end_pos = nfsi->layout->plh_lwb;
3203
3204        nfs4_stateid_copy(&data->args.stateid, &nfsi->layout->plh_stateid);
3205        data->cred = get_cred(nfsi->layout->plh_lc_cred);
3206        spin_unlock(&inode->i_lock);
3207
3208        data->args.inode = inode;
3209        nfs_fattr_init(&data->fattr);
3210        data->args.bitmask = NFS_SERVER(inode)->cache_consistency_bitmask;
3211        data->res.fattr = &data->fattr;
3212        if (end_pos != 0)
3213                data->args.lastbytewritten = end_pos - 1;
3214        else
3215                data->args.lastbytewritten = U64_MAX;
3216        data->res.server = NFS_SERVER(inode);
3217
3218        if (ld->prepare_layoutcommit) {
3219                status = ld->prepare_layoutcommit(&data->args);
3220                if (status) {
3221                        put_cred(data->cred);
3222                        spin_lock(&inode->i_lock);
3223                        set_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags);
3224                        if (end_pos > nfsi->layout->plh_lwb)
3225                                nfsi->layout->plh_lwb = end_pos;
3226                        goto out_unlock;
3227                }
3228        }
3229
3230
3231        status = nfs4_proc_layoutcommit(data, sync);
3232out:
3233        if (status)
3234                mark_inode_dirty_sync(inode);
3235        dprintk("<-- %s status %d\n", __func__, status);
3236        return status;
3237out_unlock:
3238        spin_unlock(&inode->i_lock);
3239        kfree(data);
3240clear_layoutcommitting:
3241        pnfs_clear_layoutcommitting(inode);
3242        goto out;
3243}
3244EXPORT_SYMBOL_GPL(pnfs_layoutcommit_inode);
3245
3246int
3247pnfs_generic_sync(struct inode *inode, bool datasync)
3248{
3249        return pnfs_layoutcommit_inode(inode, true);
3250}
3251EXPORT_SYMBOL_GPL(pnfs_generic_sync);
3252
3253struct nfs4_threshold *pnfs_mdsthreshold_alloc(void)
3254{
3255        struct nfs4_threshold *thp;
3256
3257        thp = kzalloc(sizeof(*thp), GFP_NOFS);
3258        if (!thp) {
3259                dprintk("%s mdsthreshold allocation failed\n", __func__);
3260                return NULL;
3261        }
3262        return thp;
3263}
3264
3265#if IS_ENABLED(CONFIG_NFS_V4_2)
3266int
3267pnfs_report_layoutstat(struct inode *inode, gfp_t gfp_flags)
3268{
3269        struct pnfs_layoutdriver_type *ld = NFS_SERVER(inode)->pnfs_curr_ld;
3270        struct nfs_server *server = NFS_SERVER(inode);
3271        struct nfs_inode *nfsi = NFS_I(inode);
3272        struct nfs42_layoutstat_data *data;
3273        struct pnfs_layout_hdr *hdr;
3274        int status = 0;
3275
3276        if (!pnfs_enabled_sb(server) || !ld->prepare_layoutstats)
3277                goto out;
3278
3279        if (!nfs_server_capable(inode, NFS_CAP_LAYOUTSTATS))
3280                goto out;
3281
3282        if (test_and_set_bit(NFS_INO_LAYOUTSTATS, &nfsi->flags))
3283                goto out;
3284
3285        spin_lock(&inode->i_lock);
3286        if (!NFS_I(inode)->layout) {
3287                spin_unlock(&inode->i_lock);
3288                goto out_clear_layoutstats;
3289        }
3290        hdr = NFS_I(inode)->layout;
3291        pnfs_get_layout_hdr(hdr);
3292        spin_unlock(&inode->i_lock);
3293
3294        data = kzalloc(sizeof(*data), gfp_flags);
3295        if (!data) {
3296                status = -ENOMEM;
3297                goto out_put;
3298        }
3299
3300        data->args.fh = NFS_FH(inode);
3301        data->args.inode = inode;
3302        status = ld->prepare_layoutstats(&data->args);
3303        if (status)
3304                goto out_free;
3305
3306        status = nfs42_proc_layoutstats_generic(NFS_SERVER(inode), data);
3307
3308out:
3309        dprintk("%s returns %d\n", __func__, status);
3310        return status;
3311
3312out_free:
3313        kfree(data);
3314out_put:
3315        pnfs_put_layout_hdr(hdr);
3316out_clear_layoutstats:
3317        smp_mb__before_atomic();
3318        clear_bit(NFS_INO_LAYOUTSTATS, &nfsi->flags);
3319        smp_mb__after_atomic();
3320        goto out;
3321}
3322EXPORT_SYMBOL_GPL(pnfs_report_layoutstat);
3323#endif
3324
3325unsigned int layoutstats_timer;
3326module_param(layoutstats_timer, uint, 0644);
3327EXPORT_SYMBOL_GPL(layoutstats_timer);
3328