linux/fs/nfs/pagelist.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * linux/fs/nfs/pagelist.c
   4 *
   5 * A set of helper functions for managing NFS read and write requests.
   6 * The main purpose of these routines is to provide support for the
   7 * coalescing of several requests into a single RPC call.
   8 *
   9 * Copyright 2000, 2001 (c) Trond Myklebust <trond.myklebust@fys.uio.no>
  10 *
  11 */
  12
  13#include <linux/slab.h>
  14#include <linux/file.h>
  15#include <linux/sched.h>
  16#include <linux/sunrpc/clnt.h>
  17#include <linux/nfs.h>
  18#include <linux/nfs3.h>
  19#include <linux/nfs4.h>
  20#include <linux/nfs_fs.h>
  21#include <linux/nfs_page.h>
  22#include <linux/nfs_mount.h>
  23#include <linux/export.h>
  24
  25#include "internal.h"
  26#include "pnfs.h"
  27
  28#define NFSDBG_FACILITY         NFSDBG_PAGECACHE
  29
  30static struct kmem_cache *nfs_page_cachep;
  31static const struct rpc_call_ops nfs_pgio_common_ops;
  32
  33struct nfs_pgio_mirror *
  34nfs_pgio_current_mirror(struct nfs_pageio_descriptor *desc)
  35{
  36        return nfs_pgio_has_mirroring(desc) ?
  37                &desc->pg_mirrors[desc->pg_mirror_idx] :
  38                &desc->pg_mirrors[0];
  39}
  40EXPORT_SYMBOL_GPL(nfs_pgio_current_mirror);
  41
  42void nfs_pgheader_init(struct nfs_pageio_descriptor *desc,
  43                       struct nfs_pgio_header *hdr,
  44                       void (*release)(struct nfs_pgio_header *hdr))
  45{
  46        struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
  47
  48
  49        hdr->req = nfs_list_entry(mirror->pg_list.next);
  50        hdr->inode = desc->pg_inode;
  51        hdr->cred = nfs_req_openctx(hdr->req)->cred;
  52        hdr->io_start = req_offset(hdr->req);
  53        hdr->good_bytes = mirror->pg_count;
  54        hdr->io_completion = desc->pg_io_completion;
  55        hdr->dreq = desc->pg_dreq;
  56        hdr->release = release;
  57        hdr->completion_ops = desc->pg_completion_ops;
  58        if (hdr->completion_ops->init_hdr)
  59                hdr->completion_ops->init_hdr(hdr);
  60
  61        hdr->pgio_mirror_idx = desc->pg_mirror_idx;
  62}
  63EXPORT_SYMBOL_GPL(nfs_pgheader_init);
  64
  65void nfs_set_pgio_error(struct nfs_pgio_header *hdr, int error, loff_t pos)
  66{
  67        unsigned int new = pos - hdr->io_start;
  68
  69        if (hdr->good_bytes > new) {
  70                hdr->good_bytes = new;
  71                clear_bit(NFS_IOHDR_EOF, &hdr->flags);
  72                if (!test_and_set_bit(NFS_IOHDR_ERROR, &hdr->flags))
  73                        hdr->error = error;
  74        }
  75}
  76
  77static inline struct nfs_page *
  78nfs_page_alloc(void)
  79{
  80        struct nfs_page *p = kmem_cache_zalloc(nfs_page_cachep, GFP_KERNEL);
  81        if (p)
  82                INIT_LIST_HEAD(&p->wb_list);
  83        return p;
  84}
  85
  86static inline void
  87nfs_page_free(struct nfs_page *p)
  88{
  89        kmem_cache_free(nfs_page_cachep, p);
  90}
  91
  92/**
  93 * nfs_iocounter_wait - wait for i/o to complete
  94 * @l_ctx: nfs_lock_context with io_counter to use
  95 *
  96 * returns -ERESTARTSYS if interrupted by a fatal signal.
  97 * Otherwise returns 0 once the io_count hits 0.
  98 */
  99int
 100nfs_iocounter_wait(struct nfs_lock_context *l_ctx)
 101{
 102        return wait_var_event_killable(&l_ctx->io_count,
 103                                       !atomic_read(&l_ctx->io_count));
 104}
 105
 106/**
 107 * nfs_async_iocounter_wait - wait on a rpc_waitqueue for I/O
 108 * to complete
 109 * @task: the rpc_task that should wait
 110 * @l_ctx: nfs_lock_context with io_counter to check
 111 *
 112 * Returns true if there is outstanding I/O to wait on and the
 113 * task has been put to sleep.
 114 */
 115bool
 116nfs_async_iocounter_wait(struct rpc_task *task, struct nfs_lock_context *l_ctx)
 117{
 118        struct inode *inode = d_inode(l_ctx->open_context->dentry);
 119        bool ret = false;
 120
 121        if (atomic_read(&l_ctx->io_count) > 0) {
 122                rpc_sleep_on(&NFS_SERVER(inode)->uoc_rpcwaitq, task, NULL);
 123                ret = true;
 124        }
 125
 126        if (atomic_read(&l_ctx->io_count) == 0) {
 127                rpc_wake_up_queued_task(&NFS_SERVER(inode)->uoc_rpcwaitq, task);
 128                ret = false;
 129        }
 130
 131        return ret;
 132}
 133EXPORT_SYMBOL_GPL(nfs_async_iocounter_wait);
 134
 135/*
 136 * nfs_page_group_lock - lock the head of the page group
 137 * @req - request in group that is to be locked
 138 *
 139 * this lock must be held when traversing or modifying the page
 140 * group list
 141 *
 142 * return 0 on success, < 0 on error
 143 */
 144int
 145nfs_page_group_lock(struct nfs_page *req)
 146{
 147        struct nfs_page *head = req->wb_head;
 148
 149        WARN_ON_ONCE(head != head->wb_head);
 150
 151        if (!test_and_set_bit(PG_HEADLOCK, &head->wb_flags))
 152                return 0;
 153
 154        set_bit(PG_CONTENDED1, &head->wb_flags);
 155        smp_mb__after_atomic();
 156        return wait_on_bit_lock(&head->wb_flags, PG_HEADLOCK,
 157                                TASK_UNINTERRUPTIBLE);
 158}
 159
 160/*
 161 * nfs_page_group_unlock - unlock the head of the page group
 162 * @req - request in group that is to be unlocked
 163 */
 164void
 165nfs_page_group_unlock(struct nfs_page *req)
 166{
 167        struct nfs_page *head = req->wb_head;
 168
 169        WARN_ON_ONCE(head != head->wb_head);
 170
 171        smp_mb__before_atomic();
 172        clear_bit(PG_HEADLOCK, &head->wb_flags);
 173        smp_mb__after_atomic();
 174        if (!test_bit(PG_CONTENDED1, &head->wb_flags))
 175                return;
 176        wake_up_bit(&head->wb_flags, PG_HEADLOCK);
 177}
 178
 179/*
 180 * nfs_page_group_sync_on_bit_locked
 181 *
 182 * must be called with page group lock held
 183 */
 184static bool
 185nfs_page_group_sync_on_bit_locked(struct nfs_page *req, unsigned int bit)
 186{
 187        struct nfs_page *head = req->wb_head;
 188        struct nfs_page *tmp;
 189
 190        WARN_ON_ONCE(!test_bit(PG_HEADLOCK, &head->wb_flags));
 191        WARN_ON_ONCE(test_and_set_bit(bit, &req->wb_flags));
 192
 193        tmp = req->wb_this_page;
 194        while (tmp != req) {
 195                if (!test_bit(bit, &tmp->wb_flags))
 196                        return false;
 197                tmp = tmp->wb_this_page;
 198        }
 199
 200        /* true! reset all bits */
 201        tmp = req;
 202        do {
 203                clear_bit(bit, &tmp->wb_flags);
 204                tmp = tmp->wb_this_page;
 205        } while (tmp != req);
 206
 207        return true;
 208}
 209
 210/*
 211 * nfs_page_group_sync_on_bit - set bit on current request, but only
 212 *   return true if the bit is set for all requests in page group
 213 * @req - request in page group
 214 * @bit - PG_* bit that is used to sync page group
 215 */
 216bool nfs_page_group_sync_on_bit(struct nfs_page *req, unsigned int bit)
 217{
 218        bool ret;
 219
 220        nfs_page_group_lock(req);
 221        ret = nfs_page_group_sync_on_bit_locked(req, bit);
 222        nfs_page_group_unlock(req);
 223
 224        return ret;
 225}
 226
 227/*
 228 * nfs_page_group_init - Initialize the page group linkage for @req
 229 * @req - a new nfs request
 230 * @prev - the previous request in page group, or NULL if @req is the first
 231 *         or only request in the group (the head).
 232 */
 233static inline void
 234nfs_page_group_init(struct nfs_page *req, struct nfs_page *prev)
 235{
 236        struct inode *inode;
 237        WARN_ON_ONCE(prev == req);
 238
 239        if (!prev) {
 240                /* a head request */
 241                req->wb_head = req;
 242                req->wb_this_page = req;
 243        } else {
 244                /* a subrequest */
 245                WARN_ON_ONCE(prev->wb_this_page != prev->wb_head);
 246                WARN_ON_ONCE(!test_bit(PG_HEADLOCK, &prev->wb_head->wb_flags));
 247                req->wb_head = prev->wb_head;
 248                req->wb_this_page = prev->wb_this_page;
 249                prev->wb_this_page = req;
 250
 251                /* All subrequests take a ref on the head request until
 252                 * nfs_page_group_destroy is called */
 253                kref_get(&req->wb_head->wb_kref);
 254
 255                /* grab extra ref and bump the request count if head request
 256                 * has extra ref from the write/commit path to handle handoff
 257                 * between write and commit lists. */
 258                if (test_bit(PG_INODE_REF, &prev->wb_head->wb_flags)) {
 259                        inode = page_file_mapping(req->wb_page)->host;
 260                        set_bit(PG_INODE_REF, &req->wb_flags);
 261                        kref_get(&req->wb_kref);
 262                        atomic_long_inc(&NFS_I(inode)->nrequests);
 263                }
 264        }
 265}
 266
 267/*
 268 * nfs_page_group_destroy - sync the destruction of page groups
 269 * @req - request that no longer needs the page group
 270 *
 271 * releases the page group reference from each member once all
 272 * members have called this function.
 273 */
 274static void
 275nfs_page_group_destroy(struct kref *kref)
 276{
 277        struct nfs_page *req = container_of(kref, struct nfs_page, wb_kref);
 278        struct nfs_page *head = req->wb_head;
 279        struct nfs_page *tmp, *next;
 280
 281        if (!nfs_page_group_sync_on_bit(req, PG_TEARDOWN))
 282                goto out;
 283
 284        tmp = req;
 285        do {
 286                next = tmp->wb_this_page;
 287                /* unlink and free */
 288                tmp->wb_this_page = tmp;
 289                tmp->wb_head = tmp;
 290                nfs_free_request(tmp);
 291                tmp = next;
 292        } while (tmp != req);
 293out:
 294        /* subrequests must release the ref on the head request */
 295        if (head != req)
 296                nfs_release_request(head);
 297}
 298
 299static struct nfs_page *
 300__nfs_create_request(struct nfs_lock_context *l_ctx, struct page *page,
 301                   unsigned int pgbase, unsigned int offset,
 302                   unsigned int count)
 303{
 304        struct nfs_page         *req;
 305        struct nfs_open_context *ctx = l_ctx->open_context;
 306
 307        if (test_bit(NFS_CONTEXT_BAD, &ctx->flags))
 308                return ERR_PTR(-EBADF);
 309        /* try to allocate the request struct */
 310        req = nfs_page_alloc();
 311        if (req == NULL)
 312                return ERR_PTR(-ENOMEM);
 313
 314        req->wb_lock_context = l_ctx;
 315        refcount_inc(&l_ctx->count);
 316        atomic_inc(&l_ctx->io_count);
 317
 318        /* Initialize the request struct. Initially, we assume a
 319         * long write-back delay. This will be adjusted in
 320         * update_nfs_request below if the region is not locked. */
 321        req->wb_page    = page;
 322        if (page) {
 323                req->wb_index = page_index(page);
 324                get_page(page);
 325        }
 326        req->wb_offset  = offset;
 327        req->wb_pgbase  = pgbase;
 328        req->wb_bytes   = count;
 329        kref_init(&req->wb_kref);
 330        req->wb_nio = 0;
 331        return req;
 332}
 333
 334/**
 335 * nfs_create_request - Create an NFS read/write request.
 336 * @ctx: open context to use
 337 * @page: page to write
 338 * @offset: starting offset within the page for the write
 339 * @count: number of bytes to read/write
 340 *
 341 * The page must be locked by the caller. This makes sure we never
 342 * create two different requests for the same page.
 343 * User should ensure it is safe to sleep in this function.
 344 */
 345struct nfs_page *
 346nfs_create_request(struct nfs_open_context *ctx, struct page *page,
 347                   unsigned int offset, unsigned int count)
 348{
 349        struct nfs_lock_context *l_ctx = nfs_get_lock_context(ctx);
 350        struct nfs_page *ret;
 351
 352        if (IS_ERR(l_ctx))
 353                return ERR_CAST(l_ctx);
 354        ret = __nfs_create_request(l_ctx, page, offset, offset, count);
 355        if (!IS_ERR(ret))
 356                nfs_page_group_init(ret, NULL);
 357        nfs_put_lock_context(l_ctx);
 358        return ret;
 359}
 360
 361static struct nfs_page *
 362nfs_create_subreq(struct nfs_page *req, struct nfs_page *last,
 363                  unsigned int pgbase, unsigned int offset,
 364                  unsigned int count)
 365{
 366        struct nfs_page *ret;
 367
 368        ret = __nfs_create_request(req->wb_lock_context, req->wb_page,
 369                        pgbase, offset, count);
 370        if (!IS_ERR(ret)) {
 371                nfs_lock_request(ret);
 372                ret->wb_index = req->wb_index;
 373                nfs_page_group_init(ret, last);
 374                ret->wb_nio = req->wb_nio;
 375        }
 376        return ret;
 377}
 378
 379/**
 380 * nfs_unlock_request - Unlock request and wake up sleepers.
 381 * @req: pointer to request
 382 */
 383void nfs_unlock_request(struct nfs_page *req)
 384{
 385        if (!NFS_WBACK_BUSY(req)) {
 386                printk(KERN_ERR "NFS: Invalid unlock attempted\n");
 387                BUG();
 388        }
 389        smp_mb__before_atomic();
 390        clear_bit(PG_BUSY, &req->wb_flags);
 391        smp_mb__after_atomic();
 392        if (!test_bit(PG_CONTENDED2, &req->wb_flags))
 393                return;
 394        wake_up_bit(&req->wb_flags, PG_BUSY);
 395}
 396
 397/**
 398 * nfs_unlock_and_release_request - Unlock request and release the nfs_page
 399 * @req: pointer to request
 400 */
 401void nfs_unlock_and_release_request(struct nfs_page *req)
 402{
 403        nfs_unlock_request(req);
 404        nfs_release_request(req);
 405}
 406
 407/*
 408 * nfs_clear_request - Free up all resources allocated to the request
 409 * @req:
 410 *
 411 * Release page and open context resources associated with a read/write
 412 * request after it has completed.
 413 */
 414static void nfs_clear_request(struct nfs_page *req)
 415{
 416        struct page *page = req->wb_page;
 417        struct nfs_lock_context *l_ctx = req->wb_lock_context;
 418        struct nfs_open_context *ctx;
 419
 420        if (page != NULL) {
 421                put_page(page);
 422                req->wb_page = NULL;
 423        }
 424        if (l_ctx != NULL) {
 425                if (atomic_dec_and_test(&l_ctx->io_count)) {
 426                        wake_up_var(&l_ctx->io_count);
 427                        ctx = l_ctx->open_context;
 428                        if (test_bit(NFS_CONTEXT_UNLOCK, &ctx->flags))
 429                                rpc_wake_up(&NFS_SERVER(d_inode(ctx->dentry))->uoc_rpcwaitq);
 430                }
 431                nfs_put_lock_context(l_ctx);
 432                req->wb_lock_context = NULL;
 433        }
 434}
 435
 436/**
 437 * nfs_release_request - Release the count on an NFS read/write request
 438 * @req: request to release
 439 *
 440 * Note: Should never be called with the spinlock held!
 441 */
 442void nfs_free_request(struct nfs_page *req)
 443{
 444        WARN_ON_ONCE(req->wb_this_page != req);
 445
 446        /* extra debug: make sure no sync bits are still set */
 447        WARN_ON_ONCE(test_bit(PG_TEARDOWN, &req->wb_flags));
 448        WARN_ON_ONCE(test_bit(PG_UNLOCKPAGE, &req->wb_flags));
 449        WARN_ON_ONCE(test_bit(PG_UPTODATE, &req->wb_flags));
 450        WARN_ON_ONCE(test_bit(PG_WB_END, &req->wb_flags));
 451        WARN_ON_ONCE(test_bit(PG_REMOVE, &req->wb_flags));
 452
 453        /* Release struct file and open context */
 454        nfs_clear_request(req);
 455        nfs_page_free(req);
 456}
 457
 458void nfs_release_request(struct nfs_page *req)
 459{
 460        kref_put(&req->wb_kref, nfs_page_group_destroy);
 461}
 462EXPORT_SYMBOL_GPL(nfs_release_request);
 463
 464/**
 465 * nfs_wait_on_request - Wait for a request to complete.
 466 * @req: request to wait upon.
 467 *
 468 * Interruptible by fatal signals only.
 469 * The user is responsible for holding a count on the request.
 470 */
 471int
 472nfs_wait_on_request(struct nfs_page *req)
 473{
 474        if (!test_bit(PG_BUSY, &req->wb_flags))
 475                return 0;
 476        set_bit(PG_CONTENDED2, &req->wb_flags);
 477        smp_mb__after_atomic();
 478        return wait_on_bit_io(&req->wb_flags, PG_BUSY,
 479                              TASK_UNINTERRUPTIBLE);
 480}
 481EXPORT_SYMBOL_GPL(nfs_wait_on_request);
 482
 483/*
 484 * nfs_generic_pg_test - determine if requests can be coalesced
 485 * @desc: pointer to descriptor
 486 * @prev: previous request in desc, or NULL
 487 * @req: this request
 488 *
 489 * Returns zero if @req cannot be coalesced into @desc, otherwise it returns
 490 * the size of the request.
 491 */
 492size_t nfs_generic_pg_test(struct nfs_pageio_descriptor *desc,
 493                           struct nfs_page *prev, struct nfs_page *req)
 494{
 495        struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
 496
 497
 498        if (mirror->pg_count > mirror->pg_bsize) {
 499                /* should never happen */
 500                WARN_ON_ONCE(1);
 501                return 0;
 502        }
 503
 504        /*
 505         * Limit the request size so that we can still allocate a page array
 506         * for it without upsetting the slab allocator.
 507         */
 508        if (((mirror->pg_count + req->wb_bytes) >> PAGE_SHIFT) *
 509                        sizeof(struct page *) > PAGE_SIZE)
 510                return 0;
 511
 512        return min(mirror->pg_bsize - mirror->pg_count, (size_t)req->wb_bytes);
 513}
 514EXPORT_SYMBOL_GPL(nfs_generic_pg_test);
 515
 516struct nfs_pgio_header *nfs_pgio_header_alloc(const struct nfs_rw_ops *ops)
 517{
 518        struct nfs_pgio_header *hdr = ops->rw_alloc_header();
 519
 520        if (hdr) {
 521                INIT_LIST_HEAD(&hdr->pages);
 522                hdr->rw_ops = ops;
 523        }
 524        return hdr;
 525}
 526EXPORT_SYMBOL_GPL(nfs_pgio_header_alloc);
 527
 528/**
 529 * nfs_pgio_data_destroy - make @hdr suitable for reuse
 530 *
 531 * Frees memory and releases refs from nfs_generic_pgio, so that it may
 532 * be called again.
 533 *
 534 * @hdr: A header that has had nfs_generic_pgio called
 535 */
 536static void nfs_pgio_data_destroy(struct nfs_pgio_header *hdr)
 537{
 538        if (hdr->args.context)
 539                put_nfs_open_context(hdr->args.context);
 540        if (hdr->page_array.pagevec != hdr->page_array.page_array)
 541                kfree(hdr->page_array.pagevec);
 542}
 543
 544/*
 545 * nfs_pgio_header_free - Free a read or write header
 546 * @hdr: The header to free
 547 */
 548void nfs_pgio_header_free(struct nfs_pgio_header *hdr)
 549{
 550        nfs_pgio_data_destroy(hdr);
 551        hdr->rw_ops->rw_free_header(hdr);
 552}
 553EXPORT_SYMBOL_GPL(nfs_pgio_header_free);
 554
 555/**
 556 * nfs_pgio_rpcsetup - Set up arguments for a pageio call
 557 * @hdr: The pageio hdr
 558 * @count: Number of bytes to read
 559 * @how: How to commit data (writes only)
 560 * @cinfo: Commit information for the call (writes only)
 561 */
 562static void nfs_pgio_rpcsetup(struct nfs_pgio_header *hdr,
 563                              unsigned int count,
 564                              int how, struct nfs_commit_info *cinfo)
 565{
 566        struct nfs_page *req = hdr->req;
 567
 568        /* Set up the RPC argument and reply structs
 569         * NB: take care not to mess about with hdr->commit et al. */
 570
 571        hdr->args.fh     = NFS_FH(hdr->inode);
 572        hdr->args.offset = req_offset(req);
 573        /* pnfs_set_layoutcommit needs this */
 574        hdr->mds_offset = hdr->args.offset;
 575        hdr->args.pgbase = req->wb_pgbase;
 576        hdr->args.pages  = hdr->page_array.pagevec;
 577        hdr->args.count  = count;
 578        hdr->args.context = get_nfs_open_context(nfs_req_openctx(req));
 579        hdr->args.lock_context = req->wb_lock_context;
 580        hdr->args.stable  = NFS_UNSTABLE;
 581        switch (how & (FLUSH_STABLE | FLUSH_COND_STABLE)) {
 582        case 0:
 583                break;
 584        case FLUSH_COND_STABLE:
 585                if (nfs_reqs_to_commit(cinfo))
 586                        break;
 587                /* fall through */
 588        default:
 589                hdr->args.stable = NFS_FILE_SYNC;
 590        }
 591
 592        hdr->res.fattr   = &hdr->fattr;
 593        hdr->res.count   = 0;
 594        hdr->res.eof     = 0;
 595        hdr->res.verf    = &hdr->verf;
 596        nfs_fattr_init(&hdr->fattr);
 597}
 598
 599/**
 600 * nfs_pgio_prepare - Prepare pageio hdr to go over the wire
 601 * @task: The current task
 602 * @calldata: pageio header to prepare
 603 */
 604static void nfs_pgio_prepare(struct rpc_task *task, void *calldata)
 605{
 606        struct nfs_pgio_header *hdr = calldata;
 607        int err;
 608        err = NFS_PROTO(hdr->inode)->pgio_rpc_prepare(task, hdr);
 609        if (err)
 610                rpc_exit(task, err);
 611}
 612
 613int nfs_initiate_pgio(struct rpc_clnt *clnt, struct nfs_pgio_header *hdr,
 614                      const struct cred *cred, const struct nfs_rpc_ops *rpc_ops,
 615                      const struct rpc_call_ops *call_ops, int how, int flags)
 616{
 617        struct rpc_task *task;
 618        struct rpc_message msg = {
 619                .rpc_argp = &hdr->args,
 620                .rpc_resp = &hdr->res,
 621                .rpc_cred = cred,
 622        };
 623        struct rpc_task_setup task_setup_data = {
 624                .rpc_client = clnt,
 625                .task = &hdr->task,
 626                .rpc_message = &msg,
 627                .callback_ops = call_ops,
 628                .callback_data = hdr,
 629                .workqueue = nfsiod_workqueue,
 630                .flags = RPC_TASK_ASYNC | flags,
 631        };
 632        int ret = 0;
 633
 634        hdr->rw_ops->rw_initiate(hdr, &msg, rpc_ops, &task_setup_data, how);
 635
 636        dprintk("NFS: initiated pgio call "
 637                "(req %s/%llu, %u bytes @ offset %llu)\n",
 638                hdr->inode->i_sb->s_id,
 639                (unsigned long long)NFS_FILEID(hdr->inode),
 640                hdr->args.count,
 641                (unsigned long long)hdr->args.offset);
 642
 643        task = rpc_run_task(&task_setup_data);
 644        if (IS_ERR(task)) {
 645                ret = PTR_ERR(task);
 646                goto out;
 647        }
 648        if (how & FLUSH_SYNC) {
 649                ret = rpc_wait_for_completion_task(task);
 650                if (ret == 0)
 651                        ret = task->tk_status;
 652        }
 653        rpc_put_task(task);
 654out:
 655        return ret;
 656}
 657EXPORT_SYMBOL_GPL(nfs_initiate_pgio);
 658
 659/**
 660 * nfs_pgio_error - Clean up from a pageio error
 661 * @hdr: pageio header
 662 */
 663static void nfs_pgio_error(struct nfs_pgio_header *hdr)
 664{
 665        set_bit(NFS_IOHDR_REDO, &hdr->flags);
 666        hdr->completion_ops->completion(hdr);
 667}
 668
 669/**
 670 * nfs_pgio_release - Release pageio data
 671 * @calldata: The pageio header to release
 672 */
 673static void nfs_pgio_release(void *calldata)
 674{
 675        struct nfs_pgio_header *hdr = calldata;
 676        hdr->completion_ops->completion(hdr);
 677}
 678
 679static void nfs_pageio_mirror_init(struct nfs_pgio_mirror *mirror,
 680                                   unsigned int bsize)
 681{
 682        INIT_LIST_HEAD(&mirror->pg_list);
 683        mirror->pg_bytes_written = 0;
 684        mirror->pg_count = 0;
 685        mirror->pg_bsize = bsize;
 686        mirror->pg_base = 0;
 687        mirror->pg_recoalesce = 0;
 688}
 689
 690/**
 691 * nfs_pageio_init - initialise a page io descriptor
 692 * @desc: pointer to descriptor
 693 * @inode: pointer to inode
 694 * @pg_ops: pointer to pageio operations
 695 * @compl_ops: pointer to pageio completion operations
 696 * @rw_ops: pointer to nfs read/write operations
 697 * @bsize: io block size
 698 * @io_flags: extra parameters for the io function
 699 */
 700void nfs_pageio_init(struct nfs_pageio_descriptor *desc,
 701                     struct inode *inode,
 702                     const struct nfs_pageio_ops *pg_ops,
 703                     const struct nfs_pgio_completion_ops *compl_ops,
 704                     const struct nfs_rw_ops *rw_ops,
 705                     size_t bsize,
 706                     int io_flags)
 707{
 708        desc->pg_moreio = 0;
 709        desc->pg_inode = inode;
 710        desc->pg_ops = pg_ops;
 711        desc->pg_completion_ops = compl_ops;
 712        desc->pg_rw_ops = rw_ops;
 713        desc->pg_ioflags = io_flags;
 714        desc->pg_error = 0;
 715        desc->pg_lseg = NULL;
 716        desc->pg_io_completion = NULL;
 717        desc->pg_dreq = NULL;
 718        desc->pg_bsize = bsize;
 719
 720        desc->pg_mirror_count = 1;
 721        desc->pg_mirror_idx = 0;
 722
 723        desc->pg_mirrors_dynamic = NULL;
 724        desc->pg_mirrors = desc->pg_mirrors_static;
 725        nfs_pageio_mirror_init(&desc->pg_mirrors[0], bsize);
 726        desc->pg_maxretrans = 0;
 727}
 728
 729/**
 730 * nfs_pgio_result - Basic pageio error handling
 731 * @task: The task that ran
 732 * @calldata: Pageio header to check
 733 */
 734static void nfs_pgio_result(struct rpc_task *task, void *calldata)
 735{
 736        struct nfs_pgio_header *hdr = calldata;
 737        struct inode *inode = hdr->inode;
 738
 739        dprintk("NFS: %s: %5u, (status %d)\n", __func__,
 740                task->tk_pid, task->tk_status);
 741
 742        if (hdr->rw_ops->rw_done(task, hdr, inode) != 0)
 743                return;
 744        if (task->tk_status < 0)
 745                nfs_set_pgio_error(hdr, task->tk_status, hdr->args.offset);
 746        else
 747                hdr->rw_ops->rw_result(task, hdr);
 748}
 749
 750/*
 751 * Create an RPC task for the given read or write request and kick it.
 752 * The page must have been locked by the caller.
 753 *
 754 * It may happen that the page we're passed is not marked dirty.
 755 * This is the case if nfs_updatepage detects a conflicting request
 756 * that has been written but not committed.
 757 */
 758int nfs_generic_pgio(struct nfs_pageio_descriptor *desc,
 759                     struct nfs_pgio_header *hdr)
 760{
 761        struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
 762
 763        struct nfs_page         *req;
 764        struct page             **pages,
 765                                *last_page;
 766        struct list_head *head = &mirror->pg_list;
 767        struct nfs_commit_info cinfo;
 768        struct nfs_page_array *pg_array = &hdr->page_array;
 769        unsigned int pagecount, pageused;
 770        gfp_t gfp_flags = GFP_KERNEL;
 771
 772        pagecount = nfs_page_array_len(mirror->pg_base, mirror->pg_count);
 773        pg_array->npages = pagecount;
 774
 775        if (pagecount <= ARRAY_SIZE(pg_array->page_array))
 776                pg_array->pagevec = pg_array->page_array;
 777        else {
 778                pg_array->pagevec = kcalloc(pagecount, sizeof(struct page *), gfp_flags);
 779                if (!pg_array->pagevec) {
 780                        pg_array->npages = 0;
 781                        nfs_pgio_error(hdr);
 782                        desc->pg_error = -ENOMEM;
 783                        return desc->pg_error;
 784                }
 785        }
 786
 787        nfs_init_cinfo(&cinfo, desc->pg_inode, desc->pg_dreq);
 788        pages = hdr->page_array.pagevec;
 789        last_page = NULL;
 790        pageused = 0;
 791        while (!list_empty(head)) {
 792                req = nfs_list_entry(head->next);
 793                nfs_list_move_request(req, &hdr->pages);
 794
 795                if (!last_page || last_page != req->wb_page) {
 796                        pageused++;
 797                        if (pageused > pagecount)
 798                                break;
 799                        *pages++ = last_page = req->wb_page;
 800                }
 801        }
 802        if (WARN_ON_ONCE(pageused != pagecount)) {
 803                nfs_pgio_error(hdr);
 804                desc->pg_error = -EINVAL;
 805                return desc->pg_error;
 806        }
 807
 808        if ((desc->pg_ioflags & FLUSH_COND_STABLE) &&
 809            (desc->pg_moreio || nfs_reqs_to_commit(&cinfo)))
 810                desc->pg_ioflags &= ~FLUSH_COND_STABLE;
 811
 812        /* Set up the argument struct */
 813        nfs_pgio_rpcsetup(hdr, mirror->pg_count, desc->pg_ioflags, &cinfo);
 814        desc->pg_rpc_callops = &nfs_pgio_common_ops;
 815        return 0;
 816}
 817EXPORT_SYMBOL_GPL(nfs_generic_pgio);
 818
 819static int nfs_generic_pg_pgios(struct nfs_pageio_descriptor *desc)
 820{
 821        struct nfs_pgio_header *hdr;
 822        int ret;
 823
 824        hdr = nfs_pgio_header_alloc(desc->pg_rw_ops);
 825        if (!hdr) {
 826                desc->pg_error = -ENOMEM;
 827                return desc->pg_error;
 828        }
 829        nfs_pgheader_init(desc, hdr, nfs_pgio_header_free);
 830        ret = nfs_generic_pgio(desc, hdr);
 831        if (ret == 0)
 832                ret = nfs_initiate_pgio(NFS_CLIENT(hdr->inode),
 833                                        hdr,
 834                                        hdr->cred,
 835                                        NFS_PROTO(hdr->inode),
 836                                        desc->pg_rpc_callops,
 837                                        desc->pg_ioflags, 0);
 838        return ret;
 839}
 840
 841static struct nfs_pgio_mirror *
 842nfs_pageio_alloc_mirrors(struct nfs_pageio_descriptor *desc,
 843                unsigned int mirror_count)
 844{
 845        struct nfs_pgio_mirror *ret;
 846        unsigned int i;
 847
 848        kfree(desc->pg_mirrors_dynamic);
 849        desc->pg_mirrors_dynamic = NULL;
 850        if (mirror_count == 1)
 851                return desc->pg_mirrors_static;
 852        ret = kmalloc_array(mirror_count, sizeof(*ret), GFP_KERNEL);
 853        if (ret != NULL) {
 854                for (i = 0; i < mirror_count; i++)
 855                        nfs_pageio_mirror_init(&ret[i], desc->pg_bsize);
 856                desc->pg_mirrors_dynamic = ret;
 857        }
 858        return ret;
 859}
 860
 861/*
 862 * nfs_pageio_setup_mirroring - determine if mirroring is to be used
 863 *                              by calling the pg_get_mirror_count op
 864 */
 865static void nfs_pageio_setup_mirroring(struct nfs_pageio_descriptor *pgio,
 866                                       struct nfs_page *req)
 867{
 868        unsigned int mirror_count = 1;
 869
 870        if (pgio->pg_ops->pg_get_mirror_count)
 871                mirror_count = pgio->pg_ops->pg_get_mirror_count(pgio, req);
 872        if (mirror_count == pgio->pg_mirror_count || pgio->pg_error < 0)
 873                return;
 874
 875        if (!mirror_count || mirror_count > NFS_PAGEIO_DESCRIPTOR_MIRROR_MAX) {
 876                pgio->pg_error = -EINVAL;
 877                return;
 878        }
 879
 880        pgio->pg_mirrors = nfs_pageio_alloc_mirrors(pgio, mirror_count);
 881        if (pgio->pg_mirrors == NULL) {
 882                pgio->pg_error = -ENOMEM;
 883                pgio->pg_mirrors = pgio->pg_mirrors_static;
 884                mirror_count = 1;
 885        }
 886        pgio->pg_mirror_count = mirror_count;
 887}
 888
 889/*
 890 * nfs_pageio_stop_mirroring - stop using mirroring (set mirror count to 1)
 891 */
 892void nfs_pageio_stop_mirroring(struct nfs_pageio_descriptor *pgio)
 893{
 894        pgio->pg_mirror_count = 1;
 895        pgio->pg_mirror_idx = 0;
 896}
 897
 898static void nfs_pageio_cleanup_mirroring(struct nfs_pageio_descriptor *pgio)
 899{
 900        pgio->pg_mirror_count = 1;
 901        pgio->pg_mirror_idx = 0;
 902        pgio->pg_mirrors = pgio->pg_mirrors_static;
 903        kfree(pgio->pg_mirrors_dynamic);
 904        pgio->pg_mirrors_dynamic = NULL;
 905}
 906
 907static bool nfs_match_lock_context(const struct nfs_lock_context *l1,
 908                const struct nfs_lock_context *l2)
 909{
 910        return l1->lockowner == l2->lockowner;
 911}
 912
 913/**
 914 * nfs_can_coalesce_requests - test two requests for compatibility
 915 * @prev: pointer to nfs_page
 916 * @req: pointer to nfs_page
 917 * @pgio: pointer to nfs_pagio_descriptor
 918 *
 919 * The nfs_page structures 'prev' and 'req' are compared to ensure that the
 920 * page data area they describe is contiguous, and that their RPC
 921 * credentials, NFSv4 open state, and lockowners are the same.
 922 *
 923 * Return 'true' if this is the case, else return 'false'.
 924 */
 925static bool nfs_can_coalesce_requests(struct nfs_page *prev,
 926                                      struct nfs_page *req,
 927                                      struct nfs_pageio_descriptor *pgio)
 928{
 929        size_t size;
 930        struct file_lock_context *flctx;
 931
 932        if (prev) {
 933                if (!nfs_match_open_context(nfs_req_openctx(req), nfs_req_openctx(prev)))
 934                        return false;
 935                flctx = d_inode(nfs_req_openctx(req)->dentry)->i_flctx;
 936                if (flctx != NULL &&
 937                    !(list_empty_careful(&flctx->flc_posix) &&
 938                      list_empty_careful(&flctx->flc_flock)) &&
 939                    !nfs_match_lock_context(req->wb_lock_context,
 940                                            prev->wb_lock_context))
 941                        return false;
 942                if (req_offset(req) != req_offset(prev) + prev->wb_bytes)
 943                        return false;
 944                if (req->wb_page == prev->wb_page) {
 945                        if (req->wb_pgbase != prev->wb_pgbase + prev->wb_bytes)
 946                                return false;
 947                } else {
 948                        if (req->wb_pgbase != 0 ||
 949                            prev->wb_pgbase + prev->wb_bytes != PAGE_SIZE)
 950                                return false;
 951                }
 952        }
 953        size = pgio->pg_ops->pg_test(pgio, prev, req);
 954        WARN_ON_ONCE(size > req->wb_bytes);
 955        if (size && size < req->wb_bytes)
 956                req->wb_bytes = size;
 957        return size > 0;
 958}
 959
 960/**
 961 * nfs_pageio_do_add_request - Attempt to coalesce a request into a page list.
 962 * @desc: destination io descriptor
 963 * @req: request
 964 *
 965 * Returns true if the request 'req' was successfully coalesced into the
 966 * existing list of pages 'desc'.
 967 */
 968static int nfs_pageio_do_add_request(struct nfs_pageio_descriptor *desc,
 969                                     struct nfs_page *req)
 970{
 971        struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
 972
 973        struct nfs_page *prev = NULL;
 974
 975        if (mirror->pg_count != 0) {
 976                prev = nfs_list_entry(mirror->pg_list.prev);
 977        } else {
 978                if (desc->pg_ops->pg_init)
 979                        desc->pg_ops->pg_init(desc, req);
 980                if (desc->pg_error < 0)
 981                        return 0;
 982                mirror->pg_base = req->wb_pgbase;
 983        }
 984
 985        if (desc->pg_maxretrans && req->wb_nio > desc->pg_maxretrans) {
 986                if (NFS_SERVER(desc->pg_inode)->flags & NFS_MOUNT_SOFTERR)
 987                        desc->pg_error = -ETIMEDOUT;
 988                else
 989                        desc->pg_error = -EIO;
 990                return 0;
 991        }
 992
 993        if (!nfs_can_coalesce_requests(prev, req, desc))
 994                return 0;
 995        nfs_list_move_request(req, &mirror->pg_list);
 996        mirror->pg_count += req->wb_bytes;
 997        return 1;
 998}
 999
1000/*
1001 * Helper for nfs_pageio_add_request and nfs_pageio_complete
1002 */
1003static void nfs_pageio_doio(struct nfs_pageio_descriptor *desc)
1004{
1005        struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
1006
1007
1008        if (!list_empty(&mirror->pg_list)) {
1009                int error = desc->pg_ops->pg_doio(desc);
1010                if (error < 0)
1011                        desc->pg_error = error;
1012                else
1013                        mirror->pg_bytes_written += mirror->pg_count;
1014        }
1015        if (list_empty(&mirror->pg_list)) {
1016                mirror->pg_count = 0;
1017                mirror->pg_base = 0;
1018        }
1019}
1020
1021static void
1022nfs_pageio_cleanup_request(struct nfs_pageio_descriptor *desc,
1023                struct nfs_page *req)
1024{
1025        LIST_HEAD(head);
1026
1027        nfs_list_move_request(req, &head);
1028        desc->pg_completion_ops->error_cleanup(&head, desc->pg_error);
1029}
1030
1031/**
1032 * nfs_pageio_add_request - Attempt to coalesce a request into a page list.
1033 * @desc: destination io descriptor
1034 * @req: request
1035 *
1036 * This may split a request into subrequests which are all part of the
1037 * same page group.
1038 *
1039 * Returns true if the request 'req' was successfully coalesced into the
1040 * existing list of pages 'desc'.
1041 */
1042static int __nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
1043                           struct nfs_page *req)
1044{
1045        struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
1046
1047        struct nfs_page *subreq;
1048        unsigned int bytes_left = 0;
1049        unsigned int offset, pgbase;
1050
1051        nfs_page_group_lock(req);
1052
1053        subreq = req;
1054        bytes_left = subreq->wb_bytes;
1055        offset = subreq->wb_offset;
1056        pgbase = subreq->wb_pgbase;
1057
1058        do {
1059                if (!nfs_pageio_do_add_request(desc, subreq)) {
1060                        /* make sure pg_test call(s) did nothing */
1061                        WARN_ON_ONCE(subreq->wb_bytes != bytes_left);
1062                        WARN_ON_ONCE(subreq->wb_offset != offset);
1063                        WARN_ON_ONCE(subreq->wb_pgbase != pgbase);
1064
1065                        nfs_page_group_unlock(req);
1066                        desc->pg_moreio = 1;
1067                        nfs_pageio_doio(desc);
1068                        if (desc->pg_error < 0 || mirror->pg_recoalesce)
1069                                goto out_cleanup_subreq;
1070                        /* retry add_request for this subreq */
1071                        nfs_page_group_lock(req);
1072                        continue;
1073                }
1074
1075                /* check for buggy pg_test call(s) */
1076                WARN_ON_ONCE(subreq->wb_bytes + subreq->wb_pgbase > PAGE_SIZE);
1077                WARN_ON_ONCE(subreq->wb_bytes > bytes_left);
1078                WARN_ON_ONCE(subreq->wb_bytes == 0);
1079
1080                bytes_left -= subreq->wb_bytes;
1081                offset += subreq->wb_bytes;
1082                pgbase += subreq->wb_bytes;
1083
1084                if (bytes_left) {
1085                        subreq = nfs_create_subreq(req, subreq, pgbase,
1086                                        offset, bytes_left);
1087                        if (IS_ERR(subreq))
1088                                goto err_ptr;
1089                }
1090        } while (bytes_left > 0);
1091
1092        nfs_page_group_unlock(req);
1093        return 1;
1094err_ptr:
1095        desc->pg_error = PTR_ERR(subreq);
1096        nfs_page_group_unlock(req);
1097        return 0;
1098out_cleanup_subreq:
1099        if (req != subreq)
1100                nfs_pageio_cleanup_request(desc, subreq);
1101        return 0;
1102}
1103
1104static int nfs_do_recoalesce(struct nfs_pageio_descriptor *desc)
1105{
1106        struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
1107        LIST_HEAD(head);
1108
1109        do {
1110                list_splice_init(&mirror->pg_list, &head);
1111                mirror->pg_bytes_written -= mirror->pg_count;
1112                mirror->pg_count = 0;
1113                mirror->pg_base = 0;
1114                mirror->pg_recoalesce = 0;
1115
1116                while (!list_empty(&head)) {
1117                        struct nfs_page *req;
1118
1119                        req = list_first_entry(&head, struct nfs_page, wb_list);
1120                        if (__nfs_pageio_add_request(desc, req))
1121                                continue;
1122                        if (desc->pg_error < 0) {
1123                                list_splice_tail(&head, &mirror->pg_list);
1124                                mirror->pg_recoalesce = 1;
1125                                return 0;
1126                        }
1127                        break;
1128                }
1129        } while (mirror->pg_recoalesce);
1130        return 1;
1131}
1132
1133static int nfs_pageio_add_request_mirror(struct nfs_pageio_descriptor *desc,
1134                struct nfs_page *req)
1135{
1136        int ret;
1137
1138        do {
1139                ret = __nfs_pageio_add_request(desc, req);
1140                if (ret)
1141                        break;
1142                if (desc->pg_error < 0)
1143                        break;
1144                ret = nfs_do_recoalesce(desc);
1145        } while (ret);
1146
1147        return ret;
1148}
1149
1150static void nfs_pageio_error_cleanup(struct nfs_pageio_descriptor *desc)
1151{
1152        u32 midx;
1153        struct nfs_pgio_mirror *mirror;
1154
1155        if (!desc->pg_error)
1156                return;
1157
1158        for (midx = 0; midx < desc->pg_mirror_count; midx++) {
1159                mirror = &desc->pg_mirrors[midx];
1160                desc->pg_completion_ops->error_cleanup(&mirror->pg_list,
1161                                desc->pg_error);
1162        }
1163}
1164
1165int nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
1166                           struct nfs_page *req)
1167{
1168        u32 midx;
1169        unsigned int pgbase, offset, bytes;
1170        struct nfs_page *dupreq, *lastreq;
1171
1172        pgbase = req->wb_pgbase;
1173        offset = req->wb_offset;
1174        bytes = req->wb_bytes;
1175
1176        nfs_pageio_setup_mirroring(desc, req);
1177        if (desc->pg_error < 0)
1178                goto out_failed;
1179
1180        for (midx = 0; midx < desc->pg_mirror_count; midx++) {
1181                if (midx) {
1182                        nfs_page_group_lock(req);
1183
1184                        /* find the last request */
1185                        for (lastreq = req->wb_head;
1186                             lastreq->wb_this_page != req->wb_head;
1187                             lastreq = lastreq->wb_this_page)
1188                                ;
1189
1190                        dupreq = nfs_create_subreq(req, lastreq,
1191                                        pgbase, offset, bytes);
1192
1193                        nfs_page_group_unlock(req);
1194                        if (IS_ERR(dupreq)) {
1195                                desc->pg_error = PTR_ERR(dupreq);
1196                                goto out_failed;
1197                        }
1198                } else
1199                        dupreq = req;
1200
1201                if (nfs_pgio_has_mirroring(desc))
1202                        desc->pg_mirror_idx = midx;
1203                if (!nfs_pageio_add_request_mirror(desc, dupreq))
1204                        goto out_cleanup_subreq;
1205        }
1206
1207        return 1;
1208
1209out_cleanup_subreq:
1210        if (req != dupreq)
1211                nfs_pageio_cleanup_request(desc, dupreq);
1212out_failed:
1213        nfs_pageio_error_cleanup(desc);
1214        return 0;
1215}
1216
1217/*
1218 * nfs_pageio_complete_mirror - Complete I/O on the current mirror of an
1219 *                              nfs_pageio_descriptor
1220 * @desc: pointer to io descriptor
1221 * @mirror_idx: pointer to mirror index
1222 */
1223static void nfs_pageio_complete_mirror(struct nfs_pageio_descriptor *desc,
1224                                       u32 mirror_idx)
1225{
1226        struct nfs_pgio_mirror *mirror = &desc->pg_mirrors[mirror_idx];
1227        u32 restore_idx = desc->pg_mirror_idx;
1228
1229        if (nfs_pgio_has_mirroring(desc))
1230                desc->pg_mirror_idx = mirror_idx;
1231        for (;;) {
1232                nfs_pageio_doio(desc);
1233                if (desc->pg_error < 0 || !mirror->pg_recoalesce)
1234                        break;
1235                if (!nfs_do_recoalesce(desc))
1236                        break;
1237        }
1238        desc->pg_mirror_idx = restore_idx;
1239}
1240
1241/*
1242 * nfs_pageio_resend - Transfer requests to new descriptor and resend
1243 * @hdr - the pgio header to move request from
1244 * @desc - the pageio descriptor to add requests to
1245 *
1246 * Try to move each request (nfs_page) from @hdr to @desc then attempt
1247 * to send them.
1248 *
1249 * Returns 0 on success and < 0 on error.
1250 */
1251int nfs_pageio_resend(struct nfs_pageio_descriptor *desc,
1252                      struct nfs_pgio_header *hdr)
1253{
1254        LIST_HEAD(pages);
1255
1256        desc->pg_io_completion = hdr->io_completion;
1257        desc->pg_dreq = hdr->dreq;
1258        list_splice_init(&hdr->pages, &pages);
1259        while (!list_empty(&pages)) {
1260                struct nfs_page *req = nfs_list_entry(pages.next);
1261
1262                if (!nfs_pageio_add_request(desc, req))
1263                        break;
1264        }
1265        nfs_pageio_complete(desc);
1266        if (!list_empty(&pages)) {
1267                int err = desc->pg_error < 0 ? desc->pg_error : -EIO;
1268                hdr->completion_ops->error_cleanup(&pages, err);
1269                nfs_set_pgio_error(hdr, err, hdr->io_start);
1270                return err;
1271        }
1272        return 0;
1273}
1274EXPORT_SYMBOL_GPL(nfs_pageio_resend);
1275
1276/**
1277 * nfs_pageio_complete - Complete I/O then cleanup an nfs_pageio_descriptor
1278 * @desc: pointer to io descriptor
1279 */
1280void nfs_pageio_complete(struct nfs_pageio_descriptor *desc)
1281{
1282        u32 midx;
1283
1284        for (midx = 0; midx < desc->pg_mirror_count; midx++)
1285                nfs_pageio_complete_mirror(desc, midx);
1286
1287        if (desc->pg_error < 0)
1288                nfs_pageio_error_cleanup(desc);
1289        if (desc->pg_ops->pg_cleanup)
1290                desc->pg_ops->pg_cleanup(desc);
1291        nfs_pageio_cleanup_mirroring(desc);
1292}
1293
1294/**
1295 * nfs_pageio_cond_complete - Conditional I/O completion
1296 * @desc: pointer to io descriptor
1297 * @index: page index
1298 *
1299 * It is important to ensure that processes don't try to take locks
1300 * on non-contiguous ranges of pages as that might deadlock. This
1301 * function should be called before attempting to wait on a locked
1302 * nfs_page. It will complete the I/O if the page index 'index'
1303 * is not contiguous with the existing list of pages in 'desc'.
1304 */
1305void nfs_pageio_cond_complete(struct nfs_pageio_descriptor *desc, pgoff_t index)
1306{
1307        struct nfs_pgio_mirror *mirror;
1308        struct nfs_page *prev;
1309        u32 midx;
1310
1311        for (midx = 0; midx < desc->pg_mirror_count; midx++) {
1312                mirror = &desc->pg_mirrors[midx];
1313                if (!list_empty(&mirror->pg_list)) {
1314                        prev = nfs_list_entry(mirror->pg_list.prev);
1315                        if (index != prev->wb_index + 1) {
1316                                nfs_pageio_complete(desc);
1317                                break;
1318                        }
1319                }
1320        }
1321}
1322
1323int __init nfs_init_nfspagecache(void)
1324{
1325        nfs_page_cachep = kmem_cache_create("nfs_page",
1326                                            sizeof(struct nfs_page),
1327                                            0, SLAB_HWCACHE_ALIGN,
1328                                            NULL);
1329        if (nfs_page_cachep == NULL)
1330                return -ENOMEM;
1331
1332        return 0;
1333}
1334
1335void nfs_destroy_nfspagecache(void)
1336{
1337        kmem_cache_destroy(nfs_page_cachep);
1338}
1339
1340static const struct rpc_call_ops nfs_pgio_common_ops = {
1341        .rpc_call_prepare = nfs_pgio_prepare,
1342        .rpc_call_done = nfs_pgio_result,
1343        .rpc_release = nfs_pgio_release,
1344};
1345
1346const struct nfs_pageio_ops nfs_pgio_rw_ops = {
1347        .pg_test = nfs_generic_pg_test,
1348        .pg_doio = nfs_generic_pg_pgios,
1349};
1350