linux/fs/nfs/flexfilelayout/flexfilelayoutdev.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Device operations for the pnfs nfs4 file layout driver.
   4 *
   5 * Copyright (c) 2014, Primary Data, Inc. All rights reserved.
   6 *
   7 * Tao Peng <bergwolf@primarydata.com>
   8 */
   9
  10#include <linux/nfs_fs.h>
  11#include <linux/vmalloc.h>
  12#include <linux/module.h>
  13#include <linux/sunrpc/addr.h>
  14
  15#include "../internal.h"
  16#include "../nfs4session.h"
  17#include "flexfilelayout.h"
  18
  19#define NFSDBG_FACILITY         NFSDBG_PNFS_LD
  20
  21static unsigned int dataserver_timeo = NFS_DEF_TCP_TIMEO;
  22static unsigned int dataserver_retrans;
  23
  24static bool ff_layout_has_available_ds(struct pnfs_layout_segment *lseg);
  25
  26void nfs4_ff_layout_put_deviceid(struct nfs4_ff_layout_ds *mirror_ds)
  27{
  28        if (!IS_ERR_OR_NULL(mirror_ds))
  29                nfs4_put_deviceid_node(&mirror_ds->id_node);
  30}
  31
  32void nfs4_ff_layout_free_deviceid(struct nfs4_ff_layout_ds *mirror_ds)
  33{
  34        nfs4_print_deviceid(&mirror_ds->id_node.deviceid);
  35        nfs4_pnfs_ds_put(mirror_ds->ds);
  36        kfree(mirror_ds->ds_versions);
  37        kfree_rcu(mirror_ds, id_node.rcu);
  38}
  39
  40/* Decode opaque device data and construct new_ds using it */
  41struct nfs4_ff_layout_ds *
  42nfs4_ff_alloc_deviceid_node(struct nfs_server *server, struct pnfs_device *pdev,
  43                            gfp_t gfp_flags)
  44{
  45        struct xdr_stream stream;
  46        struct xdr_buf buf;
  47        struct page *scratch;
  48        struct list_head dsaddrs;
  49        struct nfs4_pnfs_ds_addr *da;
  50        struct nfs4_ff_layout_ds *new_ds = NULL;
  51        struct nfs4_ff_ds_version *ds_versions = NULL;
  52        u32 mp_count;
  53        u32 version_count;
  54        __be32 *p;
  55        int i, ret = -ENOMEM;
  56
  57        /* set up xdr stream */
  58        scratch = alloc_page(gfp_flags);
  59        if (!scratch)
  60                goto out_err;
  61
  62        new_ds = kzalloc(sizeof(struct nfs4_ff_layout_ds), gfp_flags);
  63        if (!new_ds)
  64                goto out_scratch;
  65
  66        nfs4_init_deviceid_node(&new_ds->id_node,
  67                                server,
  68                                &pdev->dev_id);
  69        INIT_LIST_HEAD(&dsaddrs);
  70
  71        xdr_init_decode_pages(&stream, &buf, pdev->pages, pdev->pglen);
  72        xdr_set_scratch_page(&stream, scratch);
  73
  74        /* multipath count */
  75        p = xdr_inline_decode(&stream, 4);
  76        if (unlikely(!p))
  77                goto out_err_drain_dsaddrs;
  78        mp_count = be32_to_cpup(p);
  79        dprintk("%s: multipath ds count %d\n", __func__, mp_count);
  80
  81        for (i = 0; i < mp_count; i++) {
  82                /* multipath ds */
  83                da = nfs4_decode_mp_ds_addr(server->nfs_client->cl_net,
  84                                            &stream, gfp_flags);
  85                if (da)
  86                        list_add_tail(&da->da_node, &dsaddrs);
  87        }
  88        if (list_empty(&dsaddrs)) {
  89                dprintk("%s: no suitable DS addresses found\n",
  90                        __func__);
  91                ret = -ENOMEDIUM;
  92                goto out_err_drain_dsaddrs;
  93        }
  94
  95        /* version count */
  96        p = xdr_inline_decode(&stream, 4);
  97        if (unlikely(!p))
  98                goto out_err_drain_dsaddrs;
  99        version_count = be32_to_cpup(p);
 100        dprintk("%s: version count %d\n", __func__, version_count);
 101
 102        ds_versions = kcalloc(version_count,
 103                              sizeof(struct nfs4_ff_ds_version),
 104                              gfp_flags);
 105        if (!ds_versions)
 106                goto out_scratch;
 107
 108        for (i = 0; i < version_count; i++) {
 109                /* 20 = version(4) + minor_version(4) + rsize(4) + wsize(4) +
 110                 * tightly_coupled(4) */
 111                p = xdr_inline_decode(&stream, 20);
 112                if (unlikely(!p))
 113                        goto out_err_drain_dsaddrs;
 114                ds_versions[i].version = be32_to_cpup(p++);
 115                ds_versions[i].minor_version = be32_to_cpup(p++);
 116                ds_versions[i].rsize = nfs_block_size(be32_to_cpup(p++), NULL);
 117                ds_versions[i].wsize = nfs_block_size(be32_to_cpup(p++), NULL);
 118                ds_versions[i].tightly_coupled = be32_to_cpup(p);
 119
 120                if (ds_versions[i].rsize > NFS_MAX_FILE_IO_SIZE)
 121                        ds_versions[i].rsize = NFS_MAX_FILE_IO_SIZE;
 122                if (ds_versions[i].wsize > NFS_MAX_FILE_IO_SIZE)
 123                        ds_versions[i].wsize = NFS_MAX_FILE_IO_SIZE;
 124
 125                /*
 126                 * check for valid major/minor combination.
 127                 * currently we support dataserver which talk:
 128                 *   v3, v4.0, v4.1, v4.2
 129                 */
 130                if (!((ds_versions[i].version == 3 && ds_versions[i].minor_version == 0) ||
 131                        (ds_versions[i].version == 4 && ds_versions[i].minor_version < 3))) {
 132                        dprintk("%s: [%d] unsupported ds version %d-%d\n", __func__,
 133                                i, ds_versions[i].version,
 134                                ds_versions[i].minor_version);
 135                        ret = -EPROTONOSUPPORT;
 136                        goto out_err_drain_dsaddrs;
 137                }
 138
 139                dprintk("%s: [%d] vers %u minor_ver %u rsize %u wsize %u coupled %d\n",
 140                        __func__, i, ds_versions[i].version,
 141                        ds_versions[i].minor_version,
 142                        ds_versions[i].rsize,
 143                        ds_versions[i].wsize,
 144                        ds_versions[i].tightly_coupled);
 145        }
 146
 147        new_ds->ds_versions = ds_versions;
 148        new_ds->ds_versions_cnt = version_count;
 149
 150        new_ds->ds = nfs4_pnfs_ds_add(&dsaddrs, gfp_flags);
 151        if (!new_ds->ds)
 152                goto out_err_drain_dsaddrs;
 153
 154        /* If DS was already in cache, free ds addrs */
 155        while (!list_empty(&dsaddrs)) {
 156                da = list_first_entry(&dsaddrs,
 157                                      struct nfs4_pnfs_ds_addr,
 158                                      da_node);
 159                list_del_init(&da->da_node);
 160                kfree(da->da_remotestr);
 161                kfree(da);
 162        }
 163
 164        __free_page(scratch);
 165        return new_ds;
 166
 167out_err_drain_dsaddrs:
 168        while (!list_empty(&dsaddrs)) {
 169                da = list_first_entry(&dsaddrs, struct nfs4_pnfs_ds_addr,
 170                                      da_node);
 171                list_del_init(&da->da_node);
 172                kfree(da->da_remotestr);
 173                kfree(da);
 174        }
 175
 176        kfree(ds_versions);
 177out_scratch:
 178        __free_page(scratch);
 179out_err:
 180        kfree(new_ds);
 181
 182        dprintk("%s ERROR: returning %d\n", __func__, ret);
 183        return NULL;
 184}
 185
 186static void extend_ds_error(struct nfs4_ff_layout_ds_err *err,
 187                            u64 offset, u64 length)
 188{
 189        u64 end;
 190
 191        end = max_t(u64, pnfs_end_offset(err->offset, err->length),
 192                    pnfs_end_offset(offset, length));
 193        err->offset = min_t(u64, err->offset, offset);
 194        err->length = end - err->offset;
 195}
 196
 197static int
 198ff_ds_error_match(const struct nfs4_ff_layout_ds_err *e1,
 199                const struct nfs4_ff_layout_ds_err *e2)
 200{
 201        int ret;
 202
 203        if (e1->opnum != e2->opnum)
 204                return e1->opnum < e2->opnum ? -1 : 1;
 205        if (e1->status != e2->status)
 206                return e1->status < e2->status ? -1 : 1;
 207        ret = memcmp(e1->stateid.data, e2->stateid.data,
 208                        sizeof(e1->stateid.data));
 209        if (ret != 0)
 210                return ret;
 211        ret = memcmp(&e1->deviceid, &e2->deviceid, sizeof(e1->deviceid));
 212        if (ret != 0)
 213                return ret;
 214        if (pnfs_end_offset(e1->offset, e1->length) < e2->offset)
 215                return -1;
 216        if (e1->offset > pnfs_end_offset(e2->offset, e2->length))
 217                return 1;
 218        /* If ranges overlap or are contiguous, they are the same */
 219        return 0;
 220}
 221
 222static void
 223ff_layout_add_ds_error_locked(struct nfs4_flexfile_layout *flo,
 224                              struct nfs4_ff_layout_ds_err *dserr)
 225{
 226        struct nfs4_ff_layout_ds_err *err, *tmp;
 227        struct list_head *head = &flo->error_list;
 228        int match;
 229
 230        /* Do insertion sort w/ merges */
 231        list_for_each_entry_safe(err, tmp, &flo->error_list, list) {
 232                match = ff_ds_error_match(err, dserr);
 233                if (match < 0)
 234                        continue;
 235                if (match > 0) {
 236                        /* Add entry "dserr" _before_ entry "err" */
 237                        head = &err->list;
 238                        break;
 239                }
 240                /* Entries match, so merge "err" into "dserr" */
 241                extend_ds_error(dserr, err->offset, err->length);
 242                list_replace(&err->list, &dserr->list);
 243                kfree(err);
 244                return;
 245        }
 246
 247        list_add_tail(&dserr->list, head);
 248}
 249
 250int ff_layout_track_ds_error(struct nfs4_flexfile_layout *flo,
 251                             struct nfs4_ff_layout_mirror *mirror, u64 offset,
 252                             u64 length, int status, enum nfs_opnum4 opnum,
 253                             gfp_t gfp_flags)
 254{
 255        struct nfs4_ff_layout_ds_err *dserr;
 256
 257        if (status == 0)
 258                return 0;
 259
 260        if (IS_ERR_OR_NULL(mirror->mirror_ds))
 261                return -EINVAL;
 262
 263        dserr = kmalloc(sizeof(*dserr), gfp_flags);
 264        if (!dserr)
 265                return -ENOMEM;
 266
 267        INIT_LIST_HEAD(&dserr->list);
 268        dserr->offset = offset;
 269        dserr->length = length;
 270        dserr->status = status;
 271        dserr->opnum = opnum;
 272        nfs4_stateid_copy(&dserr->stateid, &mirror->stateid);
 273        memcpy(&dserr->deviceid, &mirror->mirror_ds->id_node.deviceid,
 274               NFS4_DEVICEID4_SIZE);
 275
 276        spin_lock(&flo->generic_hdr.plh_inode->i_lock);
 277        ff_layout_add_ds_error_locked(flo, dserr);
 278        spin_unlock(&flo->generic_hdr.plh_inode->i_lock);
 279        return 0;
 280}
 281
 282static const struct cred *
 283ff_layout_get_mirror_cred(struct nfs4_ff_layout_mirror *mirror, u32 iomode)
 284{
 285        const struct cred *cred, __rcu **pcred;
 286
 287        if (iomode == IOMODE_READ)
 288                pcred = &mirror->ro_cred;
 289        else
 290                pcred = &mirror->rw_cred;
 291
 292        rcu_read_lock();
 293        do {
 294                cred = rcu_dereference(*pcred);
 295                if (!cred)
 296                        break;
 297
 298                cred = get_cred_rcu(cred);
 299        } while(!cred);
 300        rcu_read_unlock();
 301        return cred;
 302}
 303
 304struct nfs_fh *
 305nfs4_ff_layout_select_ds_fh(struct nfs4_ff_layout_mirror *mirror)
 306{
 307        /* FIXME: For now assume there is only 1 version available for the DS */
 308        return &mirror->fh_versions[0];
 309}
 310
 311void
 312nfs4_ff_layout_select_ds_stateid(const struct nfs4_ff_layout_mirror *mirror,
 313                nfs4_stateid *stateid)
 314{
 315        if (nfs4_ff_layout_ds_version(mirror) == 4)
 316                nfs4_stateid_copy(stateid, &mirror->stateid);
 317}
 318
 319static bool
 320ff_layout_init_mirror_ds(struct pnfs_layout_hdr *lo,
 321                         struct nfs4_ff_layout_mirror *mirror)
 322{
 323        if (mirror == NULL)
 324                goto outerr;
 325        if (mirror->mirror_ds == NULL) {
 326                struct nfs4_deviceid_node *node;
 327                struct nfs4_ff_layout_ds *mirror_ds = ERR_PTR(-ENODEV);
 328
 329                node = nfs4_find_get_deviceid(NFS_SERVER(lo->plh_inode),
 330                                &mirror->devid, lo->plh_lc_cred,
 331                                GFP_KERNEL);
 332                if (node)
 333                        mirror_ds = FF_LAYOUT_MIRROR_DS(node);
 334
 335                /* check for race with another call to this function */
 336                if (cmpxchg(&mirror->mirror_ds, NULL, mirror_ds) &&
 337                    mirror_ds != ERR_PTR(-ENODEV))
 338                        nfs4_put_deviceid_node(node);
 339        }
 340
 341        if (IS_ERR(mirror->mirror_ds))
 342                goto outerr;
 343
 344        return true;
 345outerr:
 346        return false;
 347}
 348
 349/**
 350 * nfs4_ff_layout_prepare_ds - prepare a DS connection for an RPC call
 351 * @lseg: the layout segment we're operating on
 352 * @mirror: layout mirror describing the DS to use
 353 * @fail_return: return layout on connect failure?
 354 *
 355 * Try to prepare a DS connection to accept an RPC call. This involves
 356 * selecting a mirror to use and connecting the client to it if it's not
 357 * already connected.
 358 *
 359 * Since we only need a single functioning mirror to satisfy a read, we don't
 360 * want to return the layout if there is one. For writes though, any down
 361 * mirror should result in a LAYOUTRETURN. @fail_return is how we distinguish
 362 * between the two cases.
 363 *
 364 * Returns a pointer to a connected DS object on success or NULL on failure.
 365 */
 366struct nfs4_pnfs_ds *
 367nfs4_ff_layout_prepare_ds(struct pnfs_layout_segment *lseg,
 368                          struct nfs4_ff_layout_mirror *mirror,
 369                          bool fail_return)
 370{
 371        struct nfs4_pnfs_ds *ds = NULL;
 372        struct inode *ino = lseg->pls_layout->plh_inode;
 373        struct nfs_server *s = NFS_SERVER(ino);
 374        unsigned int max_payload;
 375        int status;
 376
 377        if (!ff_layout_init_mirror_ds(lseg->pls_layout, mirror))
 378                goto noconnect;
 379
 380        ds = mirror->mirror_ds->ds;
 381        /* matching smp_wmb() in _nfs4_pnfs_v3/4_ds_connect */
 382        smp_rmb();
 383        if (ds->ds_clp)
 384                goto out;
 385
 386        /* FIXME: For now we assume the server sent only one version of NFS
 387         * to use for the DS.
 388         */
 389        status = nfs4_pnfs_ds_connect(s, ds, &mirror->mirror_ds->id_node,
 390                             dataserver_timeo, dataserver_retrans,
 391                             mirror->mirror_ds->ds_versions[0].version,
 392                             mirror->mirror_ds->ds_versions[0].minor_version);
 393
 394        /* connect success, check rsize/wsize limit */
 395        if (!status) {
 396                max_payload =
 397                        nfs_block_size(rpc_max_payload(ds->ds_clp->cl_rpcclient),
 398                                       NULL);
 399                if (mirror->mirror_ds->ds_versions[0].rsize > max_payload)
 400                        mirror->mirror_ds->ds_versions[0].rsize = max_payload;
 401                if (mirror->mirror_ds->ds_versions[0].wsize > max_payload)
 402                        mirror->mirror_ds->ds_versions[0].wsize = max_payload;
 403                goto out;
 404        }
 405noconnect:
 406        ff_layout_track_ds_error(FF_LAYOUT_FROM_HDR(lseg->pls_layout),
 407                                 mirror, lseg->pls_range.offset,
 408                                 lseg->pls_range.length, NFS4ERR_NXIO,
 409                                 OP_ILLEGAL, GFP_NOIO);
 410        ff_layout_send_layouterror(lseg);
 411        if (fail_return || !ff_layout_has_available_ds(lseg))
 412                pnfs_error_mark_layout_for_return(ino, lseg);
 413        ds = NULL;
 414out:
 415        return ds;
 416}
 417
 418const struct cred *
 419ff_layout_get_ds_cred(struct nfs4_ff_layout_mirror *mirror,
 420                      const struct pnfs_layout_range *range,
 421                      const struct cred *mdscred)
 422{
 423        const struct cred *cred;
 424
 425        if (mirror && !mirror->mirror_ds->ds_versions[0].tightly_coupled) {
 426                cred = ff_layout_get_mirror_cred(mirror, range->iomode);
 427                if (!cred)
 428                        cred = get_cred(mdscred);
 429        } else {
 430                cred = get_cred(mdscred);
 431        }
 432        return cred;
 433}
 434
 435/**
 436 * nfs4_ff_find_or_create_ds_client - Find or create a DS rpc client
 437 * @mirror: pointer to the mirror
 438 * @ds_clp: nfs_client for the DS
 439 * @inode: pointer to inode
 440 *
 441 * Find or create a DS rpc client with th MDS server rpc client auth flavor
 442 * in the nfs_client cl_ds_clients list.
 443 */
 444struct rpc_clnt *
 445nfs4_ff_find_or_create_ds_client(struct nfs4_ff_layout_mirror *mirror,
 446                                 struct nfs_client *ds_clp, struct inode *inode)
 447{
 448        switch (mirror->mirror_ds->ds_versions[0].version) {
 449        case 3:
 450                /* For NFSv3 DS, flavor is set when creating DS connections */
 451                return ds_clp->cl_rpcclient;
 452        case 4:
 453                return nfs4_find_or_create_ds_client(ds_clp, inode);
 454        default:
 455                BUG();
 456        }
 457}
 458
 459void ff_layout_free_ds_ioerr(struct list_head *head)
 460{
 461        struct nfs4_ff_layout_ds_err *err;
 462
 463        while (!list_empty(head)) {
 464                err = list_first_entry(head,
 465                                struct nfs4_ff_layout_ds_err,
 466                                list);
 467                list_del(&err->list);
 468                kfree(err);
 469        }
 470}
 471
 472/* called with inode i_lock held */
 473int ff_layout_encode_ds_ioerr(struct xdr_stream *xdr, const struct list_head *head)
 474{
 475        struct nfs4_ff_layout_ds_err *err;
 476        __be32 *p;
 477
 478        list_for_each_entry(err, head, list) {
 479                /* offset(8) + length(8) + stateid(NFS4_STATEID_SIZE)
 480                 * + array length + deviceid(NFS4_DEVICEID4_SIZE)
 481                 * + status(4) + opnum(4)
 482                 */
 483                p = xdr_reserve_space(xdr,
 484                                28 + NFS4_STATEID_SIZE + NFS4_DEVICEID4_SIZE);
 485                if (unlikely(!p))
 486                        return -ENOBUFS;
 487                p = xdr_encode_hyper(p, err->offset);
 488                p = xdr_encode_hyper(p, err->length);
 489                p = xdr_encode_opaque_fixed(p, &err->stateid,
 490                                            NFS4_STATEID_SIZE);
 491                /* Encode 1 error */
 492                *p++ = cpu_to_be32(1);
 493                p = xdr_encode_opaque_fixed(p, &err->deviceid,
 494                                            NFS4_DEVICEID4_SIZE);
 495                *p++ = cpu_to_be32(err->status);
 496                *p++ = cpu_to_be32(err->opnum);
 497                dprintk("%s: offset %llu length %llu status %d op %d\n",
 498                        __func__, err->offset, err->length, err->status,
 499                        err->opnum);
 500        }
 501
 502        return 0;
 503}
 504
 505static
 506unsigned int do_layout_fetch_ds_ioerr(struct pnfs_layout_hdr *lo,
 507                                      const struct pnfs_layout_range *range,
 508                                      struct list_head *head,
 509                                      unsigned int maxnum)
 510{
 511        struct nfs4_flexfile_layout *flo = FF_LAYOUT_FROM_HDR(lo);
 512        struct inode *inode = lo->plh_inode;
 513        struct nfs4_ff_layout_ds_err *err, *n;
 514        unsigned int ret = 0;
 515
 516        spin_lock(&inode->i_lock);
 517        list_for_each_entry_safe(err, n, &flo->error_list, list) {
 518                if (!pnfs_is_range_intersecting(err->offset,
 519                                pnfs_end_offset(err->offset, err->length),
 520                                range->offset,
 521                                pnfs_end_offset(range->offset, range->length)))
 522                        continue;
 523                if (!maxnum)
 524                        break;
 525                list_move(&err->list, head);
 526                maxnum--;
 527                ret++;
 528        }
 529        spin_unlock(&inode->i_lock);
 530        return ret;
 531}
 532
 533unsigned int ff_layout_fetch_ds_ioerr(struct pnfs_layout_hdr *lo,
 534                                      const struct pnfs_layout_range *range,
 535                                      struct list_head *head,
 536                                      unsigned int maxnum)
 537{
 538        unsigned int ret;
 539
 540        ret = do_layout_fetch_ds_ioerr(lo, range, head, maxnum);
 541        /* If we're over the max, discard all remaining entries */
 542        if (ret == maxnum) {
 543                LIST_HEAD(discard);
 544                do_layout_fetch_ds_ioerr(lo, range, &discard, -1);
 545                ff_layout_free_ds_ioerr(&discard);
 546        }
 547        return ret;
 548}
 549
 550static bool ff_read_layout_has_available_ds(struct pnfs_layout_segment *lseg)
 551{
 552        struct nfs4_ff_layout_mirror *mirror;
 553        struct nfs4_deviceid_node *devid;
 554        u32 idx;
 555
 556        for (idx = 0; idx < FF_LAYOUT_MIRROR_COUNT(lseg); idx++) {
 557                mirror = FF_LAYOUT_COMP(lseg, idx);
 558                if (mirror) {
 559                        if (!mirror->mirror_ds)
 560                                return true;
 561                        if (IS_ERR(mirror->mirror_ds))
 562                                continue;
 563                        devid = &mirror->mirror_ds->id_node;
 564                        if (!nfs4_test_deviceid_unavailable(devid))
 565                                return true;
 566                }
 567        }
 568
 569        return false;
 570}
 571
 572static bool ff_rw_layout_has_available_ds(struct pnfs_layout_segment *lseg)
 573{
 574        struct nfs4_ff_layout_mirror *mirror;
 575        struct nfs4_deviceid_node *devid;
 576        u32 idx;
 577
 578        for (idx = 0; idx < FF_LAYOUT_MIRROR_COUNT(lseg); idx++) {
 579                mirror = FF_LAYOUT_COMP(lseg, idx);
 580                if (!mirror || IS_ERR(mirror->mirror_ds))
 581                        return false;
 582                if (!mirror->mirror_ds)
 583                        continue;
 584                devid = &mirror->mirror_ds->id_node;
 585                if (nfs4_test_deviceid_unavailable(devid))
 586                        return false;
 587        }
 588
 589        return FF_LAYOUT_MIRROR_COUNT(lseg) != 0;
 590}
 591
 592static bool ff_layout_has_available_ds(struct pnfs_layout_segment *lseg)
 593{
 594        if (lseg->pls_range.iomode == IOMODE_READ)
 595                return  ff_read_layout_has_available_ds(lseg);
 596        /* Note: RW layout needs all mirrors available */
 597        return ff_rw_layout_has_available_ds(lseg);
 598}
 599
 600bool ff_layout_avoid_mds_available_ds(struct pnfs_layout_segment *lseg)
 601{
 602        return ff_layout_no_fallback_to_mds(lseg) ||
 603               ff_layout_has_available_ds(lseg);
 604}
 605
 606bool ff_layout_avoid_read_on_rw(struct pnfs_layout_segment *lseg)
 607{
 608        return lseg->pls_range.iomode == IOMODE_RW &&
 609               ff_layout_no_read_on_rw(lseg);
 610}
 611
 612module_param(dataserver_retrans, uint, 0644);
 613MODULE_PARM_DESC(dataserver_retrans, "The  number of times the NFSv4.1 client "
 614                        "retries a request before it attempts further "
 615                        " recovery  action.");
 616module_param(dataserver_timeo, uint, 0644);
 617MODULE_PARM_DESC(dataserver_timeo, "The time (in tenths of a second) the "
 618                        "NFSv4.1  client  waits for a response from a "
 619                        " data server before it retries an NFS request.");
 620