linux/fs/nfs/pnfs.c
<<
>>
Prefs
   1/*
   2 *  pNFS functions to call and manage layout drivers.
   3 *
   4 *  Copyright (c) 2002 [year of first publication]
   5 *  The Regents of the University of Michigan
   6 *  All Rights Reserved
   7 *
   8 *  Dean Hildebrand <dhildebz@umich.edu>
   9 *
  10 *  Permission is granted to use, copy, create derivative works, and
  11 *  redistribute this software and such derivative works for any purpose,
  12 *  so long as the name of the University of Michigan is not used in
  13 *  any advertising or publicity pertaining to the use or distribution
  14 *  of this software without specific, written prior authorization. If
  15 *  the above copyright notice or any other identification of the
  16 *  University of Michigan is included in any copy of any portion of
  17 *  this software, then the disclaimer below must also be included.
  18 *
  19 *  This software is provided as is, without representation or warranty
  20 *  of any kind either express or implied, including without limitation
  21 *  the implied warranties of merchantability, fitness for a particular
  22 *  purpose, or noninfringement.  The Regents of the University of
  23 *  Michigan shall not be liable for any damages, including special,
  24 *  indirect, incidental, or consequential damages, with respect to any
  25 *  claim arising out of or in connection with the use of the software,
  26 *  even if it has been or is hereafter advised of the possibility of
  27 *  such damages.
  28 */
  29
  30#include <linux/nfs_fs.h>
  31#include <linux/nfs_page.h>
  32#include <linux/module.h>
  33#include "internal.h"
  34#include "pnfs.h"
  35#include "iostat.h"
  36
  37#define NFSDBG_FACILITY         NFSDBG_PNFS
  38#define PNFS_LAYOUTGET_RETRY_TIMEOUT (120*HZ)
  39
  40/* Locking:
  41 *
  42 * pnfs_spinlock:
  43 *      protects pnfs_modules_tbl.
  44 */
  45static DEFINE_SPINLOCK(pnfs_spinlock);
  46
  47/*
  48 * pnfs_modules_tbl holds all pnfs modules
  49 */
  50static LIST_HEAD(pnfs_modules_tbl);
  51
  52/* Return the registered pnfs layout driver module matching given id */
  53static struct pnfs_layoutdriver_type *
  54find_pnfs_driver_locked(u32 id)
  55{
  56        struct pnfs_layoutdriver_type *local;
  57
  58        list_for_each_entry(local, &pnfs_modules_tbl, pnfs_tblid)
  59                if (local->id == id)
  60                        goto out;
  61        local = NULL;
  62out:
  63        dprintk("%s: Searching for id %u, found %p\n", __func__, id, local);
  64        return local;
  65}
  66
  67static struct pnfs_layoutdriver_type *
  68find_pnfs_driver(u32 id)
  69{
  70        struct pnfs_layoutdriver_type *local;
  71
  72        spin_lock(&pnfs_spinlock);
  73        local = find_pnfs_driver_locked(id);
  74        if (local != NULL && !try_module_get(local->owner)) {
  75                dprintk("%s: Could not grab reference on module\n", __func__);
  76                local = NULL;
  77        }
  78        spin_unlock(&pnfs_spinlock);
  79        return local;
  80}
  81
  82void
  83unset_pnfs_layoutdriver(struct nfs_server *nfss)
  84{
  85        if (nfss->pnfs_curr_ld) {
  86                if (nfss->pnfs_curr_ld->clear_layoutdriver)
  87                        nfss->pnfs_curr_ld->clear_layoutdriver(nfss);
  88                /* Decrement the MDS count. Purge the deviceid cache if zero */
  89                if (atomic_dec_and_test(&nfss->nfs_client->cl_mds_count))
  90                        nfs4_deviceid_purge_client(nfss->nfs_client);
  91                module_put(nfss->pnfs_curr_ld->owner);
  92        }
  93        nfss->pnfs_curr_ld = NULL;
  94}
  95
  96/*
  97 * Try to set the server's pnfs module to the pnfs layout type specified by id.
  98 * Currently only one pNFS layout driver per filesystem is supported.
  99 *
 100 * @id layout type. Zero (illegal layout type) indicates pNFS not in use.
 101 */
 102void
 103set_pnfs_layoutdriver(struct nfs_server *server, const struct nfs_fh *mntfh,
 104                      u32 id)
 105{
 106        struct pnfs_layoutdriver_type *ld_type = NULL;
 107
 108        if (id == 0)
 109                goto out_no_driver;
 110        if (!(server->nfs_client->cl_exchange_flags &
 111                 (EXCHGID4_FLAG_USE_NON_PNFS | EXCHGID4_FLAG_USE_PNFS_MDS))) {
 112                printk(KERN_ERR "NFS: %s: id %u cl_exchange_flags 0x%x\n",
 113                        __func__, id, server->nfs_client->cl_exchange_flags);
 114                goto out_no_driver;
 115        }
 116        ld_type = find_pnfs_driver(id);
 117        if (!ld_type) {
 118                request_module("%s-%u", LAYOUT_NFSV4_1_MODULE_PREFIX, id);
 119                ld_type = find_pnfs_driver(id);
 120                if (!ld_type) {
 121                        dprintk("%s: No pNFS module found for %u.\n",
 122                                __func__, id);
 123                        goto out_no_driver;
 124                }
 125        }
 126        server->pnfs_curr_ld = ld_type;
 127        if (ld_type->set_layoutdriver
 128            && ld_type->set_layoutdriver(server, mntfh)) {
 129                printk(KERN_ERR "NFS: %s: Error initializing pNFS layout "
 130                        "driver %u.\n", __func__, id);
 131                module_put(ld_type->owner);
 132                goto out_no_driver;
 133        }
 134        /* Bump the MDS count */
 135        atomic_inc(&server->nfs_client->cl_mds_count);
 136
 137        dprintk("%s: pNFS module for %u set\n", __func__, id);
 138        return;
 139
 140out_no_driver:
 141        dprintk("%s: Using NFSv4 I/O\n", __func__);
 142        server->pnfs_curr_ld = NULL;
 143}
 144
 145int
 146pnfs_register_layoutdriver(struct pnfs_layoutdriver_type *ld_type)
 147{
 148        int status = -EINVAL;
 149        struct pnfs_layoutdriver_type *tmp;
 150
 151        if (ld_type->id == 0) {
 152                printk(KERN_ERR "NFS: %s id 0 is reserved\n", __func__);
 153                return status;
 154        }
 155        if (!ld_type->alloc_lseg || !ld_type->free_lseg) {
 156                printk(KERN_ERR "NFS: %s Layout driver must provide "
 157                       "alloc_lseg and free_lseg.\n", __func__);
 158                return status;
 159        }
 160
 161        spin_lock(&pnfs_spinlock);
 162        tmp = find_pnfs_driver_locked(ld_type->id);
 163        if (!tmp) {
 164                list_add(&ld_type->pnfs_tblid, &pnfs_modules_tbl);
 165                status = 0;
 166                dprintk("%s Registering id:%u name:%s\n", __func__, ld_type->id,
 167                        ld_type->name);
 168        } else {
 169                printk(KERN_ERR "NFS: %s Module with id %d already loaded!\n",
 170                        __func__, ld_type->id);
 171        }
 172        spin_unlock(&pnfs_spinlock);
 173
 174        return status;
 175}
 176EXPORT_SYMBOL_GPL(pnfs_register_layoutdriver);
 177
 178void
 179pnfs_unregister_layoutdriver(struct pnfs_layoutdriver_type *ld_type)
 180{
 181        dprintk("%s Deregistering id:%u\n", __func__, ld_type->id);
 182        spin_lock(&pnfs_spinlock);
 183        list_del(&ld_type->pnfs_tblid);
 184        spin_unlock(&pnfs_spinlock);
 185}
 186EXPORT_SYMBOL_GPL(pnfs_unregister_layoutdriver);
 187
 188/*
 189 * pNFS client layout cache
 190 */
 191
 192/* Need to hold i_lock if caller does not already hold reference */
 193void
 194pnfs_get_layout_hdr(struct pnfs_layout_hdr *lo)
 195{
 196        atomic_inc(&lo->plh_refcount);
 197}
 198
 199static struct pnfs_layout_hdr *
 200pnfs_alloc_layout_hdr(struct inode *ino, gfp_t gfp_flags)
 201{
 202        struct pnfs_layoutdriver_type *ld = NFS_SERVER(ino)->pnfs_curr_ld;
 203        return ld->alloc_layout_hdr(ino, gfp_flags);
 204}
 205
 206static void
 207pnfs_free_layout_hdr(struct pnfs_layout_hdr *lo)
 208{
 209        struct nfs_server *server = NFS_SERVER(lo->plh_inode);
 210        struct pnfs_layoutdriver_type *ld = server->pnfs_curr_ld;
 211
 212        if (!list_empty(&lo->plh_layouts)) {
 213                struct nfs_client *clp = server->nfs_client;
 214
 215                spin_lock(&clp->cl_lock);
 216                list_del_init(&lo->plh_layouts);
 217                spin_unlock(&clp->cl_lock);
 218        }
 219        put_rpccred(lo->plh_lc_cred);
 220        return ld->free_layout_hdr(lo);
 221}
 222
 223static void
 224pnfs_detach_layout_hdr(struct pnfs_layout_hdr *lo)
 225{
 226        struct nfs_inode *nfsi = NFS_I(lo->plh_inode);
 227        dprintk("%s: freeing layout cache %p\n", __func__, lo);
 228        nfsi->layout = NULL;
 229        /* Reset MDS Threshold I/O counters */
 230        nfsi->write_io = 0;
 231        nfsi->read_io = 0;
 232}
 233
 234void
 235pnfs_put_layout_hdr(struct pnfs_layout_hdr *lo)
 236{
 237        struct inode *inode = lo->plh_inode;
 238
 239        if (atomic_dec_and_lock(&lo->plh_refcount, &inode->i_lock)) {
 240                pnfs_detach_layout_hdr(lo);
 241                spin_unlock(&inode->i_lock);
 242                pnfs_free_layout_hdr(lo);
 243        }
 244}
 245
 246static int
 247pnfs_iomode_to_fail_bit(u32 iomode)
 248{
 249        return iomode == IOMODE_RW ?
 250                NFS_LAYOUT_RW_FAILED : NFS_LAYOUT_RO_FAILED;
 251}
 252
 253static void
 254pnfs_layout_set_fail_bit(struct pnfs_layout_hdr *lo, int fail_bit)
 255{
 256        lo->plh_retry_timestamp = jiffies;
 257        if (!test_and_set_bit(fail_bit, &lo->plh_flags))
 258                atomic_inc(&lo->plh_refcount);
 259}
 260
 261static void
 262pnfs_layout_clear_fail_bit(struct pnfs_layout_hdr *lo, int fail_bit)
 263{
 264        if (test_and_clear_bit(fail_bit, &lo->plh_flags))
 265                atomic_dec(&lo->plh_refcount);
 266}
 267
 268static void
 269pnfs_layout_io_set_failed(struct pnfs_layout_hdr *lo, u32 iomode)
 270{
 271        struct inode *inode = lo->plh_inode;
 272        struct pnfs_layout_range range = {
 273                .iomode = iomode,
 274                .offset = 0,
 275                .length = NFS4_MAX_UINT64,
 276        };
 277        LIST_HEAD(head);
 278
 279        spin_lock(&inode->i_lock);
 280        pnfs_layout_set_fail_bit(lo, pnfs_iomode_to_fail_bit(iomode));
 281        pnfs_mark_matching_lsegs_invalid(lo, &head, &range);
 282        spin_unlock(&inode->i_lock);
 283        pnfs_free_lseg_list(&head);
 284        dprintk("%s Setting layout IOMODE_%s fail bit\n", __func__,
 285                        iomode == IOMODE_RW ?  "RW" : "READ");
 286}
 287
 288static bool
 289pnfs_layout_io_test_failed(struct pnfs_layout_hdr *lo, u32 iomode)
 290{
 291        unsigned long start, end;
 292        int fail_bit = pnfs_iomode_to_fail_bit(iomode);
 293
 294        if (test_bit(fail_bit, &lo->plh_flags) == 0)
 295                return false;
 296        end = jiffies;
 297        start = end - PNFS_LAYOUTGET_RETRY_TIMEOUT;
 298        if (!time_in_range(lo->plh_retry_timestamp, start, end)) {
 299                /* It is time to retry the failed layoutgets */
 300                pnfs_layout_clear_fail_bit(lo, fail_bit);
 301                return false;
 302        }
 303        return true;
 304}
 305
 306static void
 307init_lseg(struct pnfs_layout_hdr *lo, struct pnfs_layout_segment *lseg)
 308{
 309        INIT_LIST_HEAD(&lseg->pls_list);
 310        INIT_LIST_HEAD(&lseg->pls_lc_list);
 311        atomic_set(&lseg->pls_refcount, 1);
 312        smp_mb();
 313        set_bit(NFS_LSEG_VALID, &lseg->pls_flags);
 314        lseg->pls_layout = lo;
 315}
 316
 317static void pnfs_free_lseg(struct pnfs_layout_segment *lseg)
 318{
 319        struct inode *ino = lseg->pls_layout->plh_inode;
 320
 321        NFS_SERVER(ino)->pnfs_curr_ld->free_lseg(lseg);
 322}
 323
 324static void
 325pnfs_layout_remove_lseg(struct pnfs_layout_hdr *lo,
 326                struct pnfs_layout_segment *lseg)
 327{
 328        struct inode *inode = lo->plh_inode;
 329
 330        WARN_ON(test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
 331        list_del_init(&lseg->pls_list);
 332        /* Matched by pnfs_get_layout_hdr in pnfs_layout_insert_lseg */
 333        atomic_dec(&lo->plh_refcount);
 334        if (list_empty(&lo->plh_segs))
 335                clear_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags);
 336        rpc_wake_up(&NFS_SERVER(inode)->roc_rpcwaitq);
 337}
 338
 339void
 340pnfs_put_lseg(struct pnfs_layout_segment *lseg)
 341{
 342        struct pnfs_layout_hdr *lo;
 343        struct inode *inode;
 344
 345        if (!lseg)
 346                return;
 347
 348        dprintk("%s: lseg %p ref %d valid %d\n", __func__, lseg,
 349                atomic_read(&lseg->pls_refcount),
 350                test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
 351        lo = lseg->pls_layout;
 352        inode = lo->plh_inode;
 353        if (atomic_dec_and_lock(&lseg->pls_refcount, &inode->i_lock)) {
 354                pnfs_get_layout_hdr(lo);
 355                pnfs_layout_remove_lseg(lo, lseg);
 356                spin_unlock(&inode->i_lock);
 357                pnfs_free_lseg(lseg);
 358                pnfs_put_layout_hdr(lo);
 359        }
 360}
 361EXPORT_SYMBOL_GPL(pnfs_put_lseg);
 362
 363static inline u64
 364end_offset(u64 start, u64 len)
 365{
 366        u64 end;
 367
 368        end = start + len;
 369        return end >= start ? end : NFS4_MAX_UINT64;
 370}
 371
 372/*
 373 * is l2 fully contained in l1?
 374 *   start1                             end1
 375 *   [----------------------------------)
 376 *           start2           end2
 377 *           [----------------)
 378 */
 379static inline int
 380lo_seg_contained(struct pnfs_layout_range *l1,
 381                 struct pnfs_layout_range *l2)
 382{
 383        u64 start1 = l1->offset;
 384        u64 end1 = end_offset(start1, l1->length);
 385        u64 start2 = l2->offset;
 386        u64 end2 = end_offset(start2, l2->length);
 387
 388        return (start1 <= start2) && (end1 >= end2);
 389}
 390
 391/*
 392 * is l1 and l2 intersecting?
 393 *   start1                             end1
 394 *   [----------------------------------)
 395 *                              start2           end2
 396 *                              [----------------)
 397 */
 398static inline int
 399lo_seg_intersecting(struct pnfs_layout_range *l1,
 400                    struct pnfs_layout_range *l2)
 401{
 402        u64 start1 = l1->offset;
 403        u64 end1 = end_offset(start1, l1->length);
 404        u64 start2 = l2->offset;
 405        u64 end2 = end_offset(start2, l2->length);
 406
 407        return (end1 == NFS4_MAX_UINT64 || end1 > start2) &&
 408               (end2 == NFS4_MAX_UINT64 || end2 > start1);
 409}
 410
 411static bool
 412should_free_lseg(struct pnfs_layout_range *lseg_range,
 413                 struct pnfs_layout_range *recall_range)
 414{
 415        return (recall_range->iomode == IOMODE_ANY ||
 416                lseg_range->iomode == recall_range->iomode) &&
 417               lo_seg_intersecting(lseg_range, recall_range);
 418}
 419
 420/* Returns 1 if lseg is removed from list, 0 otherwise */
 421static int mark_lseg_invalid(struct pnfs_layout_segment *lseg,
 422                             struct list_head *tmp_list)
 423{
 424        int rv = 0;
 425
 426        if (test_and_clear_bit(NFS_LSEG_VALID, &lseg->pls_flags)) {
 427                /* Remove the reference keeping the lseg in the
 428                 * list.  It will now be removed when all
 429                 * outstanding io is finished.
 430                 */
 431                dprintk("%s: lseg %p ref %d\n", __func__, lseg,
 432                        atomic_read(&lseg->pls_refcount));
 433                if (atomic_dec_and_test(&lseg->pls_refcount)) {
 434                        pnfs_layout_remove_lseg(lseg->pls_layout, lseg);
 435                        list_add(&lseg->pls_list, tmp_list);
 436                        rv = 1;
 437                }
 438        }
 439        return rv;
 440}
 441
 442/* Returns count of number of matching invalid lsegs remaining in list
 443 * after call.
 444 */
 445int
 446pnfs_mark_matching_lsegs_invalid(struct pnfs_layout_hdr *lo,
 447                            struct list_head *tmp_list,
 448                            struct pnfs_layout_range *recall_range)
 449{
 450        struct pnfs_layout_segment *lseg, *next;
 451        int invalid = 0, removed = 0;
 452
 453        dprintk("%s:Begin lo %p\n", __func__, lo);
 454
 455        if (list_empty(&lo->plh_segs))
 456                return 0;
 457        list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list)
 458                if (!recall_range ||
 459                    should_free_lseg(&lseg->pls_range, recall_range)) {
 460                        dprintk("%s: freeing lseg %p iomode %d "
 461                                "offset %llu length %llu\n", __func__,
 462                                lseg, lseg->pls_range.iomode, lseg->pls_range.offset,
 463                                lseg->pls_range.length);
 464                        invalid++;
 465                        removed += mark_lseg_invalid(lseg, tmp_list);
 466                }
 467        dprintk("%s:Return %i\n", __func__, invalid - removed);
 468        return invalid - removed;
 469}
 470
 471/* note free_me must contain lsegs from a single layout_hdr */
 472void
 473pnfs_free_lseg_list(struct list_head *free_me)
 474{
 475        struct pnfs_layout_segment *lseg, *tmp;
 476
 477        if (list_empty(free_me))
 478                return;
 479
 480        list_for_each_entry_safe(lseg, tmp, free_me, pls_list) {
 481                list_del(&lseg->pls_list);
 482                pnfs_free_lseg(lseg);
 483        }
 484}
 485
 486void
 487pnfs_destroy_layout(struct nfs_inode *nfsi)
 488{
 489        struct pnfs_layout_hdr *lo;
 490        LIST_HEAD(tmp_list);
 491
 492        spin_lock(&nfsi->vfs_inode.i_lock);
 493        lo = nfsi->layout;
 494        if (lo) {
 495                lo->plh_block_lgets++; /* permanently block new LAYOUTGETs */
 496                pnfs_mark_matching_lsegs_invalid(lo, &tmp_list, NULL);
 497                pnfs_get_layout_hdr(lo);
 498                pnfs_layout_clear_fail_bit(lo, NFS_LAYOUT_RO_FAILED);
 499                pnfs_layout_clear_fail_bit(lo, NFS_LAYOUT_RW_FAILED);
 500                spin_unlock(&nfsi->vfs_inode.i_lock);
 501                pnfs_free_lseg_list(&tmp_list);
 502                pnfs_put_layout_hdr(lo);
 503        } else
 504                spin_unlock(&nfsi->vfs_inode.i_lock);
 505}
 506EXPORT_SYMBOL_GPL(pnfs_destroy_layout);
 507
 508/*
 509 * Called by the state manger to remove all layouts established under an
 510 * expired lease.
 511 */
 512void
 513pnfs_destroy_all_layouts(struct nfs_client *clp)
 514{
 515        struct nfs_server *server;
 516        struct pnfs_layout_hdr *lo;
 517        LIST_HEAD(tmp_list);
 518
 519        nfs4_deviceid_mark_client_invalid(clp);
 520        nfs4_deviceid_purge_client(clp);
 521
 522        spin_lock(&clp->cl_lock);
 523        rcu_read_lock();
 524        list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
 525                if (!list_empty(&server->layouts))
 526                        list_splice_init(&server->layouts, &tmp_list);
 527        }
 528        rcu_read_unlock();
 529        spin_unlock(&clp->cl_lock);
 530
 531        while (!list_empty(&tmp_list)) {
 532                lo = list_entry(tmp_list.next, struct pnfs_layout_hdr,
 533                                plh_layouts);
 534                dprintk("%s freeing layout for inode %lu\n", __func__,
 535                        lo->plh_inode->i_ino);
 536                list_del_init(&lo->plh_layouts);
 537                pnfs_destroy_layout(NFS_I(lo->plh_inode));
 538        }
 539}
 540
 541/*
 542 * Compare 2 layout stateid sequence ids, to see which is newer,
 543 * taking into account wraparound issues.
 544 */
 545static bool pnfs_seqid_is_newer(u32 s1, u32 s2)
 546{
 547        return (s32)s1 - (s32)s2 > 0;
 548}
 549
 550/* update lo->plh_stateid with new if is more recent */
 551void
 552pnfs_set_layout_stateid(struct pnfs_layout_hdr *lo, const nfs4_stateid *new,
 553                        bool update_barrier)
 554{
 555        u32 oldseq, newseq, new_barrier;
 556        int empty = list_empty(&lo->plh_segs);
 557
 558        oldseq = be32_to_cpu(lo->plh_stateid.seqid);
 559        newseq = be32_to_cpu(new->seqid);
 560        if (empty || pnfs_seqid_is_newer(newseq, oldseq)) {
 561                nfs4_stateid_copy(&lo->plh_stateid, new);
 562                if (update_barrier) {
 563                        new_barrier = be32_to_cpu(new->seqid);
 564                } else {
 565                        /* Because of wraparound, we want to keep the barrier
 566                         * "close" to the current seqids.
 567                         */
 568                        new_barrier = newseq - atomic_read(&lo->plh_outstanding);
 569                }
 570                if (empty || pnfs_seqid_is_newer(new_barrier, lo->plh_barrier))
 571                        lo->plh_barrier = new_barrier;
 572        }
 573}
 574
 575static bool
 576pnfs_layout_stateid_blocked(const struct pnfs_layout_hdr *lo,
 577                const nfs4_stateid *stateid)
 578{
 579        u32 seqid = be32_to_cpu(stateid->seqid);
 580
 581        return !pnfs_seqid_is_newer(seqid, lo->plh_barrier);
 582}
 583
 584/* lget is set to 1 if called from inside send_layoutget call chain */
 585static bool
 586pnfs_layoutgets_blocked(const struct pnfs_layout_hdr *lo, int lget)
 587{
 588        return lo->plh_block_lgets ||
 589                test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags) ||
 590                (list_empty(&lo->plh_segs) &&
 591                 (atomic_read(&lo->plh_outstanding) > lget));
 592}
 593
 594int
 595pnfs_choose_layoutget_stateid(nfs4_stateid *dst, struct pnfs_layout_hdr *lo,
 596                              struct nfs4_state *open_state)
 597{
 598        int status = 0;
 599
 600        dprintk("--> %s\n", __func__);
 601        spin_lock(&lo->plh_inode->i_lock);
 602        if (pnfs_layoutgets_blocked(lo, 1)) {
 603                status = -EAGAIN;
 604        } else if (list_empty(&lo->plh_segs)) {
 605                int seq;
 606
 607                do {
 608                        seq = read_seqbegin(&open_state->seqlock);
 609                        nfs4_stateid_copy(dst, &open_state->stateid);
 610                } while (read_seqretry(&open_state->seqlock, seq));
 611        } else
 612                nfs4_stateid_copy(dst, &lo->plh_stateid);
 613        spin_unlock(&lo->plh_inode->i_lock);
 614        dprintk("<-- %s\n", __func__);
 615        return status;
 616}
 617
 618/*
 619* Get layout from server.
 620*    for now, assume that whole file layouts are requested.
 621*    arg->offset: 0
 622*    arg->length: all ones
 623*/
 624static struct pnfs_layout_segment *
 625send_layoutget(struct pnfs_layout_hdr *lo,
 626           struct nfs_open_context *ctx,
 627           struct pnfs_layout_range *range,
 628           gfp_t gfp_flags)
 629{
 630        struct inode *ino = lo->plh_inode;
 631        struct nfs_server *server = NFS_SERVER(ino);
 632        struct nfs4_layoutget *lgp;
 633        struct pnfs_layout_segment *lseg;
 634
 635        dprintk("--> %s\n", __func__);
 636
 637        lgp = kzalloc(sizeof(*lgp), gfp_flags);
 638        if (lgp == NULL)
 639                return NULL;
 640
 641        lgp->args.minlength = PAGE_CACHE_SIZE;
 642        if (lgp->args.minlength > range->length)
 643                lgp->args.minlength = range->length;
 644        lgp->args.maxcount = PNFS_LAYOUT_MAXSIZE;
 645        lgp->args.range = *range;
 646        lgp->args.type = server->pnfs_curr_ld->id;
 647        lgp->args.inode = ino;
 648        lgp->args.ctx = get_nfs_open_context(ctx);
 649        lgp->gfp_flags = gfp_flags;
 650
 651        /* Synchronously retrieve layout information from server and
 652         * store in lseg.
 653         */
 654        lseg = nfs4_proc_layoutget(lgp, gfp_flags);
 655        if (IS_ERR(lseg)) {
 656                switch (PTR_ERR(lseg)) {
 657                case -ENOMEM:
 658                case -ERESTARTSYS:
 659                        break;
 660                default:
 661                        /* remember that LAYOUTGET failed and suspend trying */
 662                        pnfs_layout_io_set_failed(lo, range->iomode);
 663                }
 664                return NULL;
 665        }
 666
 667        return lseg;
 668}
 669
 670/*
 671 * Initiates a LAYOUTRETURN(FILE), and removes the pnfs_layout_hdr
 672 * when the layout segment list is empty.
 673 *
 674 * Note that a pnfs_layout_hdr can exist with an empty layout segment
 675 * list when LAYOUTGET has failed, or when LAYOUTGET succeeded, but the
 676 * deviceid is marked invalid.
 677 */
 678int
 679_pnfs_return_layout(struct inode *ino)
 680{
 681        struct pnfs_layout_hdr *lo = NULL;
 682        struct nfs_inode *nfsi = NFS_I(ino);
 683        LIST_HEAD(tmp_list);
 684        struct nfs4_layoutreturn *lrp;
 685        nfs4_stateid stateid;
 686        int status = 0, empty;
 687
 688        dprintk("NFS: %s for inode %lu\n", __func__, ino->i_ino);
 689
 690        spin_lock(&ino->i_lock);
 691        lo = nfsi->layout;
 692        if (!lo) {
 693                spin_unlock(&ino->i_lock);
 694                dprintk("NFS: %s no layout to return\n", __func__);
 695                goto out;
 696        }
 697        stateid = nfsi->layout->plh_stateid;
 698        /* Reference matched in nfs4_layoutreturn_release */
 699        pnfs_get_layout_hdr(lo);
 700        empty = list_empty(&lo->plh_segs);
 701        pnfs_mark_matching_lsegs_invalid(lo, &tmp_list, NULL);
 702        /* Don't send a LAYOUTRETURN if list was initially empty */
 703        if (empty) {
 704                spin_unlock(&ino->i_lock);
 705                pnfs_put_layout_hdr(lo);
 706                dprintk("NFS: %s no layout segments to return\n", __func__);
 707                goto out;
 708        }
 709        lo->plh_block_lgets++;
 710        spin_unlock(&ino->i_lock);
 711        pnfs_free_lseg_list(&tmp_list);
 712
 713        WARN_ON(test_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags));
 714
 715        lrp = kzalloc(sizeof(*lrp), GFP_KERNEL);
 716        if (unlikely(lrp == NULL)) {
 717                status = -ENOMEM;
 718                spin_lock(&ino->i_lock);
 719                lo->plh_block_lgets--;
 720                spin_unlock(&ino->i_lock);
 721                pnfs_put_layout_hdr(lo);
 722                goto out;
 723        }
 724
 725        lrp->args.stateid = stateid;
 726        lrp->args.layout_type = NFS_SERVER(ino)->pnfs_curr_ld->id;
 727        lrp->args.inode = ino;
 728        lrp->args.layout = lo;
 729        lrp->clp = NFS_SERVER(ino)->nfs_client;
 730
 731        status = nfs4_proc_layoutreturn(lrp);
 732out:
 733        dprintk("<-- %s status: %d\n", __func__, status);
 734        return status;
 735}
 736EXPORT_SYMBOL_GPL(_pnfs_return_layout);
 737
 738bool pnfs_roc(struct inode *ino)
 739{
 740        struct pnfs_layout_hdr *lo;
 741        struct pnfs_layout_segment *lseg, *tmp;
 742        LIST_HEAD(tmp_list);
 743        bool found = false;
 744
 745        spin_lock(&ino->i_lock);
 746        lo = NFS_I(ino)->layout;
 747        if (!lo || !test_and_clear_bit(NFS_LAYOUT_ROC, &lo->plh_flags) ||
 748            test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags))
 749                goto out_nolayout;
 750        list_for_each_entry_safe(lseg, tmp, &lo->plh_segs, pls_list)
 751                if (test_bit(NFS_LSEG_ROC, &lseg->pls_flags)) {
 752                        mark_lseg_invalid(lseg, &tmp_list);
 753                        found = true;
 754                }
 755        if (!found)
 756                goto out_nolayout;
 757        lo->plh_block_lgets++;
 758        pnfs_get_layout_hdr(lo); /* matched in pnfs_roc_release */
 759        spin_unlock(&ino->i_lock);
 760        pnfs_free_lseg_list(&tmp_list);
 761        return true;
 762
 763out_nolayout:
 764        spin_unlock(&ino->i_lock);
 765        return false;
 766}
 767
 768void pnfs_roc_release(struct inode *ino)
 769{
 770        struct pnfs_layout_hdr *lo;
 771
 772        spin_lock(&ino->i_lock);
 773        lo = NFS_I(ino)->layout;
 774        lo->plh_block_lgets--;
 775        if (atomic_dec_and_test(&lo->plh_refcount)) {
 776                pnfs_detach_layout_hdr(lo);
 777                spin_unlock(&ino->i_lock);
 778                pnfs_free_layout_hdr(lo);
 779        } else
 780                spin_unlock(&ino->i_lock);
 781}
 782
 783void pnfs_roc_set_barrier(struct inode *ino, u32 barrier)
 784{
 785        struct pnfs_layout_hdr *lo;
 786
 787        spin_lock(&ino->i_lock);
 788        lo = NFS_I(ino)->layout;
 789        if (pnfs_seqid_is_newer(barrier, lo->plh_barrier))
 790                lo->plh_barrier = barrier;
 791        spin_unlock(&ino->i_lock);
 792}
 793
 794bool pnfs_roc_drain(struct inode *ino, u32 *barrier, struct rpc_task *task)
 795{
 796        struct nfs_inode *nfsi = NFS_I(ino);
 797        struct pnfs_layout_hdr *lo;
 798        struct pnfs_layout_segment *lseg;
 799        u32 current_seqid;
 800        bool found = false;
 801
 802        spin_lock(&ino->i_lock);
 803        list_for_each_entry(lseg, &nfsi->layout->plh_segs, pls_list)
 804                if (test_bit(NFS_LSEG_ROC, &lseg->pls_flags)) {
 805                        rpc_sleep_on(&NFS_SERVER(ino)->roc_rpcwaitq, task, NULL);
 806                        found = true;
 807                        goto out;
 808                }
 809        lo = nfsi->layout;
 810        current_seqid = be32_to_cpu(lo->plh_stateid.seqid);
 811
 812        /* Since close does not return a layout stateid for use as
 813         * a barrier, we choose the worst-case barrier.
 814         */
 815        *barrier = current_seqid + atomic_read(&lo->plh_outstanding);
 816out:
 817        spin_unlock(&ino->i_lock);
 818        return found;
 819}
 820
 821/*
 822 * Compare two layout segments for sorting into layout cache.
 823 * We want to preferentially return RW over RO layouts, so ensure those
 824 * are seen first.
 825 */
 826static s64
 827cmp_layout(struct pnfs_layout_range *l1,
 828           struct pnfs_layout_range *l2)
 829{
 830        s64 d;
 831
 832        /* high offset > low offset */
 833        d = l1->offset - l2->offset;
 834        if (d)
 835                return d;
 836
 837        /* short length > long length */
 838        d = l2->length - l1->length;
 839        if (d)
 840                return d;
 841
 842        /* read > read/write */
 843        return (int)(l1->iomode == IOMODE_READ) - (int)(l2->iomode == IOMODE_READ);
 844}
 845
 846static void
 847pnfs_layout_insert_lseg(struct pnfs_layout_hdr *lo,
 848                   struct pnfs_layout_segment *lseg)
 849{
 850        struct pnfs_layout_segment *lp;
 851
 852        dprintk("%s:Begin\n", __func__);
 853
 854        list_for_each_entry(lp, &lo->plh_segs, pls_list) {
 855                if (cmp_layout(&lseg->pls_range, &lp->pls_range) > 0)
 856                        continue;
 857                list_add_tail(&lseg->pls_list, &lp->pls_list);
 858                dprintk("%s: inserted lseg %p "
 859                        "iomode %d offset %llu length %llu before "
 860                        "lp %p iomode %d offset %llu length %llu\n",
 861                        __func__, lseg, lseg->pls_range.iomode,
 862                        lseg->pls_range.offset, lseg->pls_range.length,
 863                        lp, lp->pls_range.iomode, lp->pls_range.offset,
 864                        lp->pls_range.length);
 865                goto out;
 866        }
 867        list_add_tail(&lseg->pls_list, &lo->plh_segs);
 868        dprintk("%s: inserted lseg %p "
 869                "iomode %d offset %llu length %llu at tail\n",
 870                __func__, lseg, lseg->pls_range.iomode,
 871                lseg->pls_range.offset, lseg->pls_range.length);
 872out:
 873        pnfs_get_layout_hdr(lo);
 874
 875        dprintk("%s:Return\n", __func__);
 876}
 877
 878static struct pnfs_layout_hdr *
 879alloc_init_layout_hdr(struct inode *ino,
 880                      struct nfs_open_context *ctx,
 881                      gfp_t gfp_flags)
 882{
 883        struct pnfs_layout_hdr *lo;
 884
 885        lo = pnfs_alloc_layout_hdr(ino, gfp_flags);
 886        if (!lo)
 887                return NULL;
 888        atomic_set(&lo->plh_refcount, 1);
 889        INIT_LIST_HEAD(&lo->plh_layouts);
 890        INIT_LIST_HEAD(&lo->plh_segs);
 891        INIT_LIST_HEAD(&lo->plh_bulk_recall);
 892        lo->plh_inode = ino;
 893        lo->plh_lc_cred = get_rpccred(ctx->state->owner->so_cred);
 894        return lo;
 895}
 896
 897static struct pnfs_layout_hdr *
 898pnfs_find_alloc_layout(struct inode *ino,
 899                       struct nfs_open_context *ctx,
 900                       gfp_t gfp_flags)
 901{
 902        struct nfs_inode *nfsi = NFS_I(ino);
 903        struct pnfs_layout_hdr *new = NULL;
 904
 905        dprintk("%s Begin ino=%p layout=%p\n", __func__, ino, nfsi->layout);
 906
 907        if (nfsi->layout != NULL)
 908                goto out_existing;
 909        spin_unlock(&ino->i_lock);
 910        new = alloc_init_layout_hdr(ino, ctx, gfp_flags);
 911        spin_lock(&ino->i_lock);
 912
 913        if (likely(nfsi->layout == NULL)) {     /* Won the race? */
 914                nfsi->layout = new;
 915                return new;
 916        } else if (new != NULL)
 917                pnfs_free_layout_hdr(new);
 918out_existing:
 919        pnfs_get_layout_hdr(nfsi->layout);
 920        return nfsi->layout;
 921}
 922
 923/*
 924 * iomode matching rules:
 925 * iomode       lseg    match
 926 * -----        -----   -----
 927 * ANY          READ    true
 928 * ANY          RW      true
 929 * RW           READ    false
 930 * RW           RW      true
 931 * READ         READ    true
 932 * READ         RW      true
 933 */
 934static int
 935is_matching_lseg(struct pnfs_layout_range *ls_range,
 936                 struct pnfs_layout_range *range)
 937{
 938        struct pnfs_layout_range range1;
 939
 940        if ((range->iomode == IOMODE_RW &&
 941             ls_range->iomode != IOMODE_RW) ||
 942            !lo_seg_intersecting(ls_range, range))
 943                return 0;
 944
 945        /* range1 covers only the first byte in the range */
 946        range1 = *range;
 947        range1.length = 1;
 948        return lo_seg_contained(ls_range, &range1);
 949}
 950
 951/*
 952 * lookup range in layout
 953 */
 954static struct pnfs_layout_segment *
 955pnfs_find_lseg(struct pnfs_layout_hdr *lo,
 956                struct pnfs_layout_range *range)
 957{
 958        struct pnfs_layout_segment *lseg, *ret = NULL;
 959
 960        dprintk("%s:Begin\n", __func__);
 961
 962        list_for_each_entry(lseg, &lo->plh_segs, pls_list) {
 963                if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags) &&
 964                    is_matching_lseg(&lseg->pls_range, range)) {
 965                        ret = pnfs_get_lseg(lseg);
 966                        break;
 967                }
 968                if (lseg->pls_range.offset > range->offset)
 969                        break;
 970        }
 971
 972        dprintk("%s:Return lseg %p ref %d\n",
 973                __func__, ret, ret ? atomic_read(&ret->pls_refcount) : 0);
 974        return ret;
 975}
 976
 977/*
 978 * Use mdsthreshold hints set at each OPEN to determine if I/O should go
 979 * to the MDS or over pNFS
 980 *
 981 * The nfs_inode read_io and write_io fields are cumulative counters reset
 982 * when there are no layout segments. Note that in pnfs_update_layout iomode
 983 * is set to IOMODE_READ for a READ request, and set to IOMODE_RW for a
 984 * WRITE request.
 985 *
 986 * A return of true means use MDS I/O.
 987 *
 988 * From rfc 5661:
 989 * If a file's size is smaller than the file size threshold, data accesses
 990 * SHOULD be sent to the metadata server.  If an I/O request has a length that
 991 * is below the I/O size threshold, the I/O SHOULD be sent to the metadata
 992 * server.  If both file size and I/O size are provided, the client SHOULD
 993 * reach or exceed  both thresholds before sending its read or write
 994 * requests to the data server.
 995 */
 996static bool pnfs_within_mdsthreshold(struct nfs_open_context *ctx,
 997                                     struct inode *ino, int iomode)
 998{
 999        struct nfs4_threshold *t = ctx->mdsthreshold;
1000        struct nfs_inode *nfsi = NFS_I(ino);
1001        loff_t fsize = i_size_read(ino);
1002        bool size = false, size_set = false, io = false, io_set = false, ret = false;
1003
1004        if (t == NULL)
1005                return ret;
1006
1007        dprintk("%s bm=0x%x rd_sz=%llu wr_sz=%llu rd_io=%llu wr_io=%llu\n",
1008                __func__, t->bm, t->rd_sz, t->wr_sz, t->rd_io_sz, t->wr_io_sz);
1009
1010        switch (iomode) {
1011        case IOMODE_READ:
1012                if (t->bm & THRESHOLD_RD) {
1013                        dprintk("%s fsize %llu\n", __func__, fsize);
1014                        size_set = true;
1015                        if (fsize < t->rd_sz)
1016                                size = true;
1017                }
1018                if (t->bm & THRESHOLD_RD_IO) {
1019                        dprintk("%s nfsi->read_io %llu\n", __func__,
1020                                nfsi->read_io);
1021                        io_set = true;
1022                        if (nfsi->read_io < t->rd_io_sz)
1023                                io = true;
1024                }
1025                break;
1026        case IOMODE_RW:
1027                if (t->bm & THRESHOLD_WR) {
1028                        dprintk("%s fsize %llu\n", __func__, fsize);
1029                        size_set = true;
1030                        if (fsize < t->wr_sz)
1031                                size = true;
1032                }
1033                if (t->bm & THRESHOLD_WR_IO) {
1034                        dprintk("%s nfsi->write_io %llu\n", __func__,
1035                                nfsi->write_io);
1036                        io_set = true;
1037                        if (nfsi->write_io < t->wr_io_sz)
1038                                io = true;
1039                }
1040                break;
1041        }
1042        if (size_set && io_set) {
1043                if (size && io)
1044                        ret = true;
1045        } else if (size || io)
1046                ret = true;
1047
1048        dprintk("<-- %s size %d io %d ret %d\n", __func__, size, io, ret);
1049        return ret;
1050}
1051
1052/*
1053 * Layout segment is retreived from the server if not cached.
1054 * The appropriate layout segment is referenced and returned to the caller.
1055 */
1056struct pnfs_layout_segment *
1057pnfs_update_layout(struct inode *ino,
1058                   struct nfs_open_context *ctx,
1059                   loff_t pos,
1060                   u64 count,
1061                   enum pnfs_iomode iomode,
1062                   gfp_t gfp_flags)
1063{
1064        struct pnfs_layout_range arg = {
1065                .iomode = iomode,
1066                .offset = pos,
1067                .length = count,
1068        };
1069        unsigned pg_offset;
1070        struct nfs_server *server = NFS_SERVER(ino);
1071        struct nfs_client *clp = server->nfs_client;
1072        struct pnfs_layout_hdr *lo;
1073        struct pnfs_layout_segment *lseg = NULL;
1074        bool first = false;
1075
1076        if (!pnfs_enabled_sb(NFS_SERVER(ino)))
1077                goto out;
1078
1079        if (pnfs_within_mdsthreshold(ctx, ino, iomode))
1080                goto out;
1081
1082        spin_lock(&ino->i_lock);
1083        lo = pnfs_find_alloc_layout(ino, ctx, gfp_flags);
1084        if (lo == NULL) {
1085                spin_unlock(&ino->i_lock);
1086                goto out;
1087        }
1088
1089        /* Do we even need to bother with this? */
1090        if (test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) {
1091                dprintk("%s matches recall, use MDS\n", __func__);
1092                goto out_unlock;
1093        }
1094
1095        /* if LAYOUTGET already failed once we don't try again */
1096        if (pnfs_layout_io_test_failed(lo, iomode))
1097                goto out_unlock;
1098
1099        /* Check to see if the layout for the given range already exists */
1100        lseg = pnfs_find_lseg(lo, &arg);
1101        if (lseg)
1102                goto out_unlock;
1103
1104        if (pnfs_layoutgets_blocked(lo, 0))
1105                goto out_unlock;
1106        atomic_inc(&lo->plh_outstanding);
1107
1108        if (list_empty(&lo->plh_segs))
1109                first = true;
1110
1111        spin_unlock(&ino->i_lock);
1112        if (first) {
1113                /* The lo must be on the clp list if there is any
1114                 * chance of a CB_LAYOUTRECALL(FILE) coming in.
1115                 */
1116                spin_lock(&clp->cl_lock);
1117                list_add_tail(&lo->plh_layouts, &server->layouts);
1118                spin_unlock(&clp->cl_lock);
1119        }
1120
1121        pg_offset = arg.offset & ~PAGE_CACHE_MASK;
1122        if (pg_offset) {
1123                arg.offset -= pg_offset;
1124                arg.length += pg_offset;
1125        }
1126        if (arg.length != NFS4_MAX_UINT64)
1127                arg.length = PAGE_CACHE_ALIGN(arg.length);
1128
1129        lseg = send_layoutget(lo, ctx, &arg, gfp_flags);
1130        atomic_dec(&lo->plh_outstanding);
1131out_put_layout_hdr:
1132        pnfs_put_layout_hdr(lo);
1133out:
1134        dprintk("%s: inode %s/%llu pNFS layout segment %s for "
1135                        "(%s, offset: %llu, length: %llu)\n",
1136                        __func__, ino->i_sb->s_id,
1137                        (unsigned long long)NFS_FILEID(ino),
1138                        lseg == NULL ? "not found" : "found",
1139                        iomode==IOMODE_RW ?  "read/write" : "read-only",
1140                        (unsigned long long)pos,
1141                        (unsigned long long)count);
1142        return lseg;
1143out_unlock:
1144        spin_unlock(&ino->i_lock);
1145        goto out_put_layout_hdr;
1146}
1147EXPORT_SYMBOL_GPL(pnfs_update_layout);
1148
1149struct pnfs_layout_segment *
1150pnfs_layout_process(struct nfs4_layoutget *lgp)
1151{
1152        struct pnfs_layout_hdr *lo = NFS_I(lgp->args.inode)->layout;
1153        struct nfs4_layoutget_res *res = &lgp->res;
1154        struct pnfs_layout_segment *lseg;
1155        struct inode *ino = lo->plh_inode;
1156        int status = 0;
1157
1158        /* Inject layout blob into I/O device driver */
1159        lseg = NFS_SERVER(ino)->pnfs_curr_ld->alloc_lseg(lo, res, lgp->gfp_flags);
1160        if (!lseg || IS_ERR(lseg)) {
1161                if (!lseg)
1162                        status = -ENOMEM;
1163                else
1164                        status = PTR_ERR(lseg);
1165                dprintk("%s: Could not allocate layout: error %d\n",
1166                       __func__, status);
1167                goto out;
1168        }
1169
1170        spin_lock(&ino->i_lock);
1171        if (test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) {
1172                dprintk("%s forget reply due to recall\n", __func__);
1173                goto out_forget_reply;
1174        }
1175
1176        if (pnfs_layoutgets_blocked(lo, 1) ||
1177            pnfs_layout_stateid_blocked(lo, &res->stateid)) {
1178                dprintk("%s forget reply due to state\n", __func__);
1179                goto out_forget_reply;
1180        }
1181
1182        /* Done processing layoutget. Set the layout stateid */
1183        pnfs_set_layout_stateid(lo, &res->stateid, false);
1184
1185        init_lseg(lo, lseg);
1186        lseg->pls_range = res->range;
1187        pnfs_get_lseg(lseg);
1188        pnfs_layout_insert_lseg(lo, lseg);
1189
1190        if (res->return_on_close) {
1191                set_bit(NFS_LSEG_ROC, &lseg->pls_flags);
1192                set_bit(NFS_LAYOUT_ROC, &lo->plh_flags);
1193        }
1194
1195        spin_unlock(&ino->i_lock);
1196        return lseg;
1197out:
1198        return ERR_PTR(status);
1199
1200out_forget_reply:
1201        spin_unlock(&ino->i_lock);
1202        lseg->pls_layout = lo;
1203        NFS_SERVER(ino)->pnfs_curr_ld->free_lseg(lseg);
1204        goto out;
1205}
1206
1207void
1208pnfs_generic_pg_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
1209{
1210        u64 rd_size = req->wb_bytes;
1211
1212        WARN_ON_ONCE(pgio->pg_lseg != NULL);
1213
1214        if (req->wb_offset != req->wb_pgbase) {
1215                nfs_pageio_reset_read_mds(pgio);
1216                return;
1217        }
1218
1219        if (pgio->pg_dreq == NULL)
1220                rd_size = i_size_read(pgio->pg_inode) - req_offset(req);
1221        else
1222                rd_size = nfs_dreq_bytes_left(pgio->pg_dreq);
1223
1224        pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
1225                                           req->wb_context,
1226                                           req_offset(req),
1227                                           rd_size,
1228                                           IOMODE_READ,
1229                                           GFP_KERNEL);
1230        /* If no lseg, fall back to read through mds */
1231        if (pgio->pg_lseg == NULL)
1232                nfs_pageio_reset_read_mds(pgio);
1233
1234}
1235EXPORT_SYMBOL_GPL(pnfs_generic_pg_init_read);
1236
1237void
1238pnfs_generic_pg_init_write(struct nfs_pageio_descriptor *pgio,
1239                           struct nfs_page *req, u64 wb_size)
1240{
1241        WARN_ON_ONCE(pgio->pg_lseg != NULL);
1242
1243        if (req->wb_offset != req->wb_pgbase) {
1244                nfs_pageio_reset_write_mds(pgio);
1245                return;
1246        }
1247
1248        pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
1249                                           req->wb_context,
1250                                           req_offset(req),
1251                                           wb_size,
1252                                           IOMODE_RW,
1253                                           GFP_NOFS);
1254        /* If no lseg, fall back to write through mds */
1255        if (pgio->pg_lseg == NULL)
1256                nfs_pageio_reset_write_mds(pgio);
1257}
1258EXPORT_SYMBOL_GPL(pnfs_generic_pg_init_write);
1259
1260void
1261pnfs_pageio_init_read(struct nfs_pageio_descriptor *pgio, struct inode *inode,
1262                      const struct nfs_pgio_completion_ops *compl_ops)
1263{
1264        struct nfs_server *server = NFS_SERVER(inode);
1265        struct pnfs_layoutdriver_type *ld = server->pnfs_curr_ld;
1266
1267        if (ld == NULL)
1268                nfs_pageio_init_read(pgio, inode, compl_ops);
1269        else
1270                nfs_pageio_init(pgio, inode, ld->pg_read_ops, compl_ops, server->rsize, 0);
1271}
1272
1273void
1274pnfs_pageio_init_write(struct nfs_pageio_descriptor *pgio, struct inode *inode,
1275                       int ioflags,
1276                       const struct nfs_pgio_completion_ops *compl_ops)
1277{
1278        struct nfs_server *server = NFS_SERVER(inode);
1279        struct pnfs_layoutdriver_type *ld = server->pnfs_curr_ld;
1280
1281        if (ld == NULL)
1282                nfs_pageio_init_write(pgio, inode, ioflags, compl_ops);
1283        else
1284                nfs_pageio_init(pgio, inode, ld->pg_write_ops, compl_ops, server->wsize, ioflags);
1285}
1286
1287bool
1288pnfs_generic_pg_test(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev,
1289                     struct nfs_page *req)
1290{
1291        if (pgio->pg_lseg == NULL)
1292                return nfs_generic_pg_test(pgio, prev, req);
1293
1294        /*
1295         * Test if a nfs_page is fully contained in the pnfs_layout_range.
1296         * Note that this test makes several assumptions:
1297         * - that the previous nfs_page in the struct nfs_pageio_descriptor
1298         *   is known to lie within the range.
1299         *   - that the nfs_page being tested is known to be contiguous with the
1300         *   previous nfs_page.
1301         *   - Layout ranges are page aligned, so we only have to test the
1302         *   start offset of the request.
1303         *
1304         * Please also note that 'end_offset' is actually the offset of the
1305         * first byte that lies outside the pnfs_layout_range. FIXME?
1306         *
1307         */
1308        return req_offset(req) < end_offset(pgio->pg_lseg->pls_range.offset,
1309                                         pgio->pg_lseg->pls_range.length);
1310}
1311EXPORT_SYMBOL_GPL(pnfs_generic_pg_test);
1312
1313int pnfs_write_done_resend_to_mds(struct inode *inode,
1314                                struct list_head *head,
1315                                const struct nfs_pgio_completion_ops *compl_ops)
1316{
1317        struct nfs_pageio_descriptor pgio;
1318        LIST_HEAD(failed);
1319
1320        /* Resend all requests through the MDS */
1321        nfs_pageio_init_write(&pgio, inode, FLUSH_STABLE, compl_ops);
1322        while (!list_empty(head)) {
1323                struct nfs_page *req = nfs_list_entry(head->next);
1324
1325                nfs_list_remove_request(req);
1326                if (!nfs_pageio_add_request(&pgio, req))
1327                        nfs_list_add_request(req, &failed);
1328        }
1329        nfs_pageio_complete(&pgio);
1330
1331        if (!list_empty(&failed)) {
1332                /* For some reason our attempt to resend pages. Mark the
1333                 * overall send request as having failed, and let
1334                 * nfs_writeback_release_full deal with the error.
1335                 */
1336                list_move(&failed, head);
1337                return -EIO;
1338        }
1339        return 0;
1340}
1341EXPORT_SYMBOL_GPL(pnfs_write_done_resend_to_mds);
1342
1343static void pnfs_ld_handle_write_error(struct nfs_write_data *data)
1344{
1345        struct nfs_pgio_header *hdr = data->header;
1346
1347        dprintk("pnfs write error = %d\n", hdr->pnfs_error);
1348        if (NFS_SERVER(hdr->inode)->pnfs_curr_ld->flags &
1349            PNFS_LAYOUTRET_ON_ERROR) {
1350                clear_bit(NFS_INO_LAYOUTCOMMIT, &NFS_I(hdr->inode)->flags);
1351                pnfs_return_layout(hdr->inode);
1352        }
1353        if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags))
1354                data->task.tk_status = pnfs_write_done_resend_to_mds(hdr->inode,
1355                                                        &hdr->pages,
1356                                                        hdr->completion_ops);
1357}
1358
1359/*
1360 * Called by non rpc-based layout drivers
1361 */
1362void pnfs_ld_write_done(struct nfs_write_data *data)
1363{
1364        struct nfs_pgio_header *hdr = data->header;
1365
1366        if (!hdr->pnfs_error) {
1367                pnfs_set_layoutcommit(data);
1368                hdr->mds_ops->rpc_call_done(&data->task, data);
1369        } else
1370                pnfs_ld_handle_write_error(data);
1371        hdr->mds_ops->rpc_release(data);
1372}
1373EXPORT_SYMBOL_GPL(pnfs_ld_write_done);
1374
1375static void
1376pnfs_write_through_mds(struct nfs_pageio_descriptor *desc,
1377                struct nfs_write_data *data)
1378{
1379        struct nfs_pgio_header *hdr = data->header;
1380
1381        if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
1382                list_splice_tail_init(&hdr->pages, &desc->pg_list);
1383                nfs_pageio_reset_write_mds(desc);
1384                desc->pg_recoalesce = 1;
1385        }
1386        nfs_writedata_release(data);
1387}
1388
1389static enum pnfs_try_status
1390pnfs_try_to_write_data(struct nfs_write_data *wdata,
1391                        const struct rpc_call_ops *call_ops,
1392                        struct pnfs_layout_segment *lseg,
1393                        int how)
1394{
1395        struct nfs_pgio_header *hdr = wdata->header;
1396        struct inode *inode = hdr->inode;
1397        enum pnfs_try_status trypnfs;
1398        struct nfs_server *nfss = NFS_SERVER(inode);
1399
1400        hdr->mds_ops = call_ops;
1401
1402        dprintk("%s: Writing ino:%lu %u@%llu (how %d)\n", __func__,
1403                inode->i_ino, wdata->args.count, wdata->args.offset, how);
1404        trypnfs = nfss->pnfs_curr_ld->write_pagelist(wdata, how);
1405        if (trypnfs != PNFS_NOT_ATTEMPTED)
1406                nfs_inc_stats(inode, NFSIOS_PNFS_WRITE);
1407        dprintk("%s End (trypnfs:%d)\n", __func__, trypnfs);
1408        return trypnfs;
1409}
1410
1411static void
1412pnfs_do_multiple_writes(struct nfs_pageio_descriptor *desc, struct list_head *head, int how)
1413{
1414        struct nfs_write_data *data;
1415        const struct rpc_call_ops *call_ops = desc->pg_rpc_callops;
1416        struct pnfs_layout_segment *lseg = desc->pg_lseg;
1417
1418        desc->pg_lseg = NULL;
1419        while (!list_empty(head)) {
1420                enum pnfs_try_status trypnfs;
1421
1422                data = list_first_entry(head, struct nfs_write_data, list);
1423                list_del_init(&data->list);
1424
1425                trypnfs = pnfs_try_to_write_data(data, call_ops, lseg, how);
1426                if (trypnfs == PNFS_NOT_ATTEMPTED)
1427                        pnfs_write_through_mds(desc, data);
1428        }
1429        pnfs_put_lseg(lseg);
1430}
1431
1432static void pnfs_writehdr_free(struct nfs_pgio_header *hdr)
1433{
1434        pnfs_put_lseg(hdr->lseg);
1435        nfs_writehdr_free(hdr);
1436}
1437EXPORT_SYMBOL_GPL(pnfs_writehdr_free);
1438
1439int
1440pnfs_generic_pg_writepages(struct nfs_pageio_descriptor *desc)
1441{
1442        struct nfs_write_header *whdr;
1443        struct nfs_pgio_header *hdr;
1444        int ret;
1445
1446        whdr = nfs_writehdr_alloc();
1447        if (!whdr) {
1448                desc->pg_completion_ops->error_cleanup(&desc->pg_list);
1449                pnfs_put_lseg(desc->pg_lseg);
1450                desc->pg_lseg = NULL;
1451                return -ENOMEM;
1452        }
1453        hdr = &whdr->header;
1454        nfs_pgheader_init(desc, hdr, pnfs_writehdr_free);
1455        hdr->lseg = pnfs_get_lseg(desc->pg_lseg);
1456        atomic_inc(&hdr->refcnt);
1457        ret = nfs_generic_flush(desc, hdr);
1458        if (ret != 0) {
1459                pnfs_put_lseg(desc->pg_lseg);
1460                desc->pg_lseg = NULL;
1461        } else
1462                pnfs_do_multiple_writes(desc, &hdr->rpc_list, desc->pg_ioflags);
1463        if (atomic_dec_and_test(&hdr->refcnt))
1464                hdr->completion_ops->completion(hdr);
1465        return ret;
1466}
1467EXPORT_SYMBOL_GPL(pnfs_generic_pg_writepages);
1468
1469int pnfs_read_done_resend_to_mds(struct inode *inode,
1470                                struct list_head *head,
1471                                const struct nfs_pgio_completion_ops *compl_ops)
1472{
1473        struct nfs_pageio_descriptor pgio;
1474        LIST_HEAD(failed);
1475
1476        /* Resend all requests through the MDS */
1477        nfs_pageio_init_read(&pgio, inode, compl_ops);
1478        while (!list_empty(head)) {
1479                struct nfs_page *req = nfs_list_entry(head->next);
1480
1481                nfs_list_remove_request(req);
1482                if (!nfs_pageio_add_request(&pgio, req))
1483                        nfs_list_add_request(req, &failed);
1484        }
1485        nfs_pageio_complete(&pgio);
1486
1487        if (!list_empty(&failed)) {
1488                list_move(&failed, head);
1489                return -EIO;
1490        }
1491        return 0;
1492}
1493EXPORT_SYMBOL_GPL(pnfs_read_done_resend_to_mds);
1494
1495static void pnfs_ld_handle_read_error(struct nfs_read_data *data)
1496{
1497        struct nfs_pgio_header *hdr = data->header;
1498
1499        dprintk("pnfs read error = %d\n", hdr->pnfs_error);
1500        if (NFS_SERVER(hdr->inode)->pnfs_curr_ld->flags &
1501            PNFS_LAYOUTRET_ON_ERROR) {
1502                clear_bit(NFS_INO_LAYOUTCOMMIT, &NFS_I(hdr->inode)->flags);
1503                pnfs_return_layout(hdr->inode);
1504        }
1505        if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags))
1506                data->task.tk_status = pnfs_read_done_resend_to_mds(hdr->inode,
1507                                                        &hdr->pages,
1508                                                        hdr->completion_ops);
1509}
1510
1511/*
1512 * Called by non rpc-based layout drivers
1513 */
1514void pnfs_ld_read_done(struct nfs_read_data *data)
1515{
1516        struct nfs_pgio_header *hdr = data->header;
1517
1518        if (likely(!hdr->pnfs_error)) {
1519                __nfs4_read_done_cb(data);
1520                hdr->mds_ops->rpc_call_done(&data->task, data);
1521        } else
1522                pnfs_ld_handle_read_error(data);
1523        hdr->mds_ops->rpc_release(data);
1524}
1525EXPORT_SYMBOL_GPL(pnfs_ld_read_done);
1526
1527static void
1528pnfs_read_through_mds(struct nfs_pageio_descriptor *desc,
1529                struct nfs_read_data *data)
1530{
1531        struct nfs_pgio_header *hdr = data->header;
1532
1533        if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
1534                list_splice_tail_init(&hdr->pages, &desc->pg_list);
1535                nfs_pageio_reset_read_mds(desc);
1536                desc->pg_recoalesce = 1;
1537        }
1538        nfs_readdata_release(data);
1539}
1540
1541/*
1542 * Call the appropriate parallel I/O subsystem read function.
1543 */
1544static enum pnfs_try_status
1545pnfs_try_to_read_data(struct nfs_read_data *rdata,
1546                       const struct rpc_call_ops *call_ops,
1547                       struct pnfs_layout_segment *lseg)
1548{
1549        struct nfs_pgio_header *hdr = rdata->header;
1550        struct inode *inode = hdr->inode;
1551        struct nfs_server *nfss = NFS_SERVER(inode);
1552        enum pnfs_try_status trypnfs;
1553
1554        hdr->mds_ops = call_ops;
1555
1556        dprintk("%s: Reading ino:%lu %u@%llu\n",
1557                __func__, inode->i_ino, rdata->args.count, rdata->args.offset);
1558
1559        trypnfs = nfss->pnfs_curr_ld->read_pagelist(rdata);
1560        if (trypnfs != PNFS_NOT_ATTEMPTED)
1561                nfs_inc_stats(inode, NFSIOS_PNFS_READ);
1562        dprintk("%s End (trypnfs:%d)\n", __func__, trypnfs);
1563        return trypnfs;
1564}
1565
1566static void
1567pnfs_do_multiple_reads(struct nfs_pageio_descriptor *desc, struct list_head *head)
1568{
1569        struct nfs_read_data *data;
1570        const struct rpc_call_ops *call_ops = desc->pg_rpc_callops;
1571        struct pnfs_layout_segment *lseg = desc->pg_lseg;
1572
1573        desc->pg_lseg = NULL;
1574        while (!list_empty(head)) {
1575                enum pnfs_try_status trypnfs;
1576
1577                data = list_first_entry(head, struct nfs_read_data, list);
1578                list_del_init(&data->list);
1579
1580                trypnfs = pnfs_try_to_read_data(data, call_ops, lseg);
1581                if (trypnfs == PNFS_NOT_ATTEMPTED)
1582                        pnfs_read_through_mds(desc, data);
1583        }
1584        pnfs_put_lseg(lseg);
1585}
1586
1587static void pnfs_readhdr_free(struct nfs_pgio_header *hdr)
1588{
1589        pnfs_put_lseg(hdr->lseg);
1590        nfs_readhdr_free(hdr);
1591}
1592EXPORT_SYMBOL_GPL(pnfs_readhdr_free);
1593
1594int
1595pnfs_generic_pg_readpages(struct nfs_pageio_descriptor *desc)
1596{
1597        struct nfs_read_header *rhdr;
1598        struct nfs_pgio_header *hdr;
1599        int ret;
1600
1601        rhdr = nfs_readhdr_alloc();
1602        if (!rhdr) {
1603                desc->pg_completion_ops->error_cleanup(&desc->pg_list);
1604                ret = -ENOMEM;
1605                pnfs_put_lseg(desc->pg_lseg);
1606                desc->pg_lseg = NULL;
1607                return ret;
1608        }
1609        hdr = &rhdr->header;
1610        nfs_pgheader_init(desc, hdr, pnfs_readhdr_free);
1611        hdr->lseg = pnfs_get_lseg(desc->pg_lseg);
1612        atomic_inc(&hdr->refcnt);
1613        ret = nfs_generic_pagein(desc, hdr);
1614        if (ret != 0) {
1615                pnfs_put_lseg(desc->pg_lseg);
1616                desc->pg_lseg = NULL;
1617        } else
1618                pnfs_do_multiple_reads(desc, &hdr->rpc_list);
1619        if (atomic_dec_and_test(&hdr->refcnt))
1620                hdr->completion_ops->completion(hdr);
1621        return ret;
1622}
1623EXPORT_SYMBOL_GPL(pnfs_generic_pg_readpages);
1624
1625/*
1626 * There can be multiple RW segments.
1627 */
1628static void pnfs_list_write_lseg(struct inode *inode, struct list_head *listp)
1629{
1630        struct pnfs_layout_segment *lseg;
1631
1632        list_for_each_entry(lseg, &NFS_I(inode)->layout->plh_segs, pls_list) {
1633                if (lseg->pls_range.iomode == IOMODE_RW &&
1634                    test_bit(NFS_LSEG_LAYOUTCOMMIT, &lseg->pls_flags))
1635                        list_add(&lseg->pls_lc_list, listp);
1636        }
1637}
1638
1639void pnfs_set_lo_fail(struct pnfs_layout_segment *lseg)
1640{
1641        pnfs_layout_io_set_failed(lseg->pls_layout, lseg->pls_range.iomode);
1642}
1643EXPORT_SYMBOL_GPL(pnfs_set_lo_fail);
1644
1645void
1646pnfs_set_layoutcommit(struct nfs_write_data *wdata)
1647{
1648        struct nfs_pgio_header *hdr = wdata->header;
1649        struct inode *inode = hdr->inode;
1650        struct nfs_inode *nfsi = NFS_I(inode);
1651        loff_t end_pos = wdata->mds_offset + wdata->res.count;
1652        bool mark_as_dirty = false;
1653
1654        spin_lock(&inode->i_lock);
1655        if (!test_and_set_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags)) {
1656                mark_as_dirty = true;
1657                dprintk("%s: Set layoutcommit for inode %lu ",
1658                        __func__, inode->i_ino);
1659        }
1660        if (!test_and_set_bit(NFS_LSEG_LAYOUTCOMMIT, &hdr->lseg->pls_flags)) {
1661                /* references matched in nfs4_layoutcommit_release */
1662                pnfs_get_lseg(hdr->lseg);
1663        }
1664        if (end_pos > nfsi->layout->plh_lwb)
1665                nfsi->layout->plh_lwb = end_pos;
1666        spin_unlock(&inode->i_lock);
1667        dprintk("%s: lseg %p end_pos %llu\n",
1668                __func__, hdr->lseg, nfsi->layout->plh_lwb);
1669
1670        /* if pnfs_layoutcommit_inode() runs between inode locks, the next one
1671         * will be a noop because NFS_INO_LAYOUTCOMMIT will not be set */
1672        if (mark_as_dirty)
1673                mark_inode_dirty_sync(inode);
1674}
1675EXPORT_SYMBOL_GPL(pnfs_set_layoutcommit);
1676
1677void pnfs_cleanup_layoutcommit(struct nfs4_layoutcommit_data *data)
1678{
1679        struct nfs_server *nfss = NFS_SERVER(data->args.inode);
1680
1681        if (nfss->pnfs_curr_ld->cleanup_layoutcommit)
1682                nfss->pnfs_curr_ld->cleanup_layoutcommit(data);
1683}
1684
1685/*
1686 * For the LAYOUT4_NFSV4_1_FILES layout type, NFS_DATA_SYNC WRITEs and
1687 * NFS_UNSTABLE WRITEs with a COMMIT to data servers must store enough
1688 * data to disk to allow the server to recover the data if it crashes.
1689 * LAYOUTCOMMIT is only needed when the NFL4_UFLG_COMMIT_THRU_MDS flag
1690 * is off, and a COMMIT is sent to a data server, or
1691 * if WRITEs to a data server return NFS_DATA_SYNC.
1692 */
1693int
1694pnfs_layoutcommit_inode(struct inode *inode, bool sync)
1695{
1696        struct nfs4_layoutcommit_data *data;
1697        struct nfs_inode *nfsi = NFS_I(inode);
1698        loff_t end_pos;
1699        int status = 0;
1700
1701        dprintk("--> %s inode %lu\n", __func__, inode->i_ino);
1702
1703        if (!test_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags))
1704                return 0;
1705
1706        /* Note kzalloc ensures data->res.seq_res.sr_slot == NULL */
1707        data = kzalloc(sizeof(*data), GFP_NOFS);
1708        if (!data) {
1709                status = -ENOMEM;
1710                goto out;
1711        }
1712
1713        if (!test_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags))
1714                goto out_free;
1715
1716        if (test_and_set_bit(NFS_INO_LAYOUTCOMMITTING, &nfsi->flags)) {
1717                if (!sync) {
1718                        status = -EAGAIN;
1719                        goto out_free;
1720                }
1721                status = wait_on_bit_lock(&nfsi->flags, NFS_INO_LAYOUTCOMMITTING,
1722                                        nfs_wait_bit_killable, TASK_KILLABLE);
1723                if (status)
1724                        goto out_free;
1725        }
1726
1727        INIT_LIST_HEAD(&data->lseg_list);
1728        spin_lock(&inode->i_lock);
1729        if (!test_and_clear_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags)) {
1730                clear_bit(NFS_INO_LAYOUTCOMMITTING, &nfsi->flags);
1731                spin_unlock(&inode->i_lock);
1732                wake_up_bit(&nfsi->flags, NFS_INO_LAYOUTCOMMITTING);
1733                goto out_free;
1734        }
1735
1736        pnfs_list_write_lseg(inode, &data->lseg_list);
1737
1738        end_pos = nfsi->layout->plh_lwb;
1739        nfsi->layout->plh_lwb = 0;
1740
1741        nfs4_stateid_copy(&data->args.stateid, &nfsi->layout->plh_stateid);
1742        spin_unlock(&inode->i_lock);
1743
1744        data->args.inode = inode;
1745        data->cred = get_rpccred(nfsi->layout->plh_lc_cred);
1746        nfs_fattr_init(&data->fattr);
1747        data->args.bitmask = NFS_SERVER(inode)->cache_consistency_bitmask;
1748        data->res.fattr = &data->fattr;
1749        data->args.lastbytewritten = end_pos - 1;
1750        data->res.server = NFS_SERVER(inode);
1751
1752        status = nfs4_proc_layoutcommit(data, sync);
1753out:
1754        if (status)
1755                mark_inode_dirty_sync(inode);
1756        dprintk("<-- %s status %d\n", __func__, status);
1757        return status;
1758out_free:
1759        kfree(data);
1760        goto out;
1761}
1762
1763struct nfs4_threshold *pnfs_mdsthreshold_alloc(void)
1764{
1765        struct nfs4_threshold *thp;
1766
1767        thp = kzalloc(sizeof(*thp), GFP_NOFS);
1768        if (!thp) {
1769                dprintk("%s mdsthreshold allocation failed\n", __func__);
1770                return NULL;
1771        }
1772        return thp;
1773}
1774