linux/fs/nfs/pagelist.c
<<
>>
Prefs
   1/*
   2 * linux/fs/nfs/pagelist.c
   3 *
   4 * A set of helper functions for managing NFS read and write requests.
   5 * The main purpose of these routines is to provide support for the
   6 * coalescing of several requests into a single RPC call.
   7 *
   8 * Copyright 2000, 2001 (c) Trond Myklebust <trond.myklebust@fys.uio.no>
   9 *
  10 */
  11
  12#include <linux/slab.h>
  13#include <linux/file.h>
  14#include <linux/sched.h>
  15#include <linux/sunrpc/clnt.h>
  16#include <linux/nfs3.h>
  17#include <linux/nfs4.h>
  18#include <linux/nfs_page.h>
  19#include <linux/nfs_fs.h>
  20#include <linux/nfs_mount.h>
  21
  22#include "internal.h"
  23
  24static struct kmem_cache *nfs_page_cachep;
  25
  26static inline struct nfs_page *
  27nfs_page_alloc(void)
  28{
  29        struct nfs_page *p;
  30        p = kmem_cache_alloc(nfs_page_cachep, GFP_KERNEL);
  31        if (p) {
  32                memset(p, 0, sizeof(*p));
  33                INIT_LIST_HEAD(&p->wb_list);
  34        }
  35        return p;
  36}
  37
  38static inline void
  39nfs_page_free(struct nfs_page *p)
  40{
  41        kmem_cache_free(nfs_page_cachep, p);
  42}
  43
  44/**
  45 * nfs_create_request - Create an NFS read/write request.
  46 * @file: file descriptor to use
  47 * @inode: inode to which the request is attached
  48 * @page: page to write
  49 * @offset: starting offset within the page for the write
  50 * @count: number of bytes to read/write
  51 *
  52 * The page must be locked by the caller. This makes sure we never
  53 * create two different requests for the same page.
  54 * User should ensure it is safe to sleep in this function.
  55 */
  56struct nfs_page *
  57nfs_create_request(struct nfs_open_context *ctx, struct inode *inode,
  58                   struct page *page,
  59                   unsigned int offset, unsigned int count)
  60{
  61        struct nfs_page         *req;
  62
  63        for (;;) {
  64                /* try to allocate the request struct */
  65                req = nfs_page_alloc();
  66                if (req != NULL)
  67                        break;
  68
  69                if (fatal_signal_pending(current))
  70                        return ERR_PTR(-ERESTARTSYS);
  71                yield();
  72        }
  73
  74        /* Initialize the request struct. Initially, we assume a
  75         * long write-back delay. This will be adjusted in
  76         * update_nfs_request below if the region is not locked. */
  77        req->wb_page    = page;
  78        atomic_set(&req->wb_complete, 0);
  79        req->wb_index   = page->index;
  80        page_cache_get(page);
  81        BUG_ON(PagePrivate(page));
  82        BUG_ON(!PageLocked(page));
  83        BUG_ON(page->mapping->host != inode);
  84        req->wb_offset  = offset;
  85        req->wb_pgbase  = offset;
  86        req->wb_bytes   = count;
  87        req->wb_context = get_nfs_open_context(ctx);
  88        kref_init(&req->wb_kref);
  89        return req;
  90}
  91
  92/**
  93 * nfs_unlock_request - Unlock request and wake up sleepers.
  94 * @req:
  95 */
  96void nfs_unlock_request(struct nfs_page *req)
  97{
  98        if (!NFS_WBACK_BUSY(req)) {
  99                printk(KERN_ERR "NFS: Invalid unlock attempted\n");
 100                BUG();
 101        }
 102        smp_mb__before_clear_bit();
 103        clear_bit(PG_BUSY, &req->wb_flags);
 104        smp_mb__after_clear_bit();
 105        wake_up_bit(&req->wb_flags, PG_BUSY);
 106        nfs_release_request(req);
 107}
 108
 109/**
 110 * nfs_set_page_tag_locked - Tag a request as locked
 111 * @req:
 112 */
 113int nfs_set_page_tag_locked(struct nfs_page *req)
 114{
 115        struct nfs_inode *nfsi = NFS_I(req->wb_context->path.dentry->d_inode);
 116
 117        if (!nfs_lock_request_dontget(req))
 118                return 0;
 119        if (req->wb_page != NULL)
 120                radix_tree_tag_set(&nfsi->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_LOCKED);
 121        return 1;
 122}
 123
 124/**
 125 * nfs_clear_page_tag_locked - Clear request tag and wake up sleepers
 126 */
 127void nfs_clear_page_tag_locked(struct nfs_page *req)
 128{
 129        struct inode *inode = req->wb_context->path.dentry->d_inode;
 130        struct nfs_inode *nfsi = NFS_I(inode);
 131
 132        if (req->wb_page != NULL) {
 133                spin_lock(&inode->i_lock);
 134                radix_tree_tag_clear(&nfsi->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_LOCKED);
 135                nfs_unlock_request(req);
 136                spin_unlock(&inode->i_lock);
 137        } else
 138                nfs_unlock_request(req);
 139}
 140
 141/**
 142 * nfs_clear_request - Free up all resources allocated to the request
 143 * @req:
 144 *
 145 * Release page resources associated with a write request after it
 146 * has completed.
 147 */
 148void nfs_clear_request(struct nfs_page *req)
 149{
 150        struct page *page = req->wb_page;
 151        if (page != NULL) {
 152                page_cache_release(page);
 153                req->wb_page = NULL;
 154        }
 155}
 156
 157
 158/**
 159 * nfs_release_request - Release the count on an NFS read/write request
 160 * @req: request to release
 161 *
 162 * Note: Should never be called with the spinlock held!
 163 */
 164static void nfs_free_request(struct kref *kref)
 165{
 166        struct nfs_page *req = container_of(kref, struct nfs_page, wb_kref);
 167
 168        /* Release struct file or cached credential */
 169        nfs_clear_request(req);
 170        put_nfs_open_context(req->wb_context);
 171        nfs_page_free(req);
 172}
 173
 174void nfs_release_request(struct nfs_page *req)
 175{
 176        kref_put(&req->wb_kref, nfs_free_request);
 177}
 178
 179/**
 180 * nfs_wait_on_request - Wait for a request to complete.
 181 * @req: request to wait upon.
 182 *
 183 * Interruptible by fatal signals only.
 184 * The user is responsible for holding a count on the request.
 185 */
 186int
 187nfs_wait_on_request(struct nfs_page *req)
 188{
 189        int ret = 0;
 190
 191        if (!test_bit(PG_BUSY, &req->wb_flags))
 192                goto out;
 193        ret = out_of_line_wait_on_bit(&req->wb_flags, PG_BUSY,
 194                        nfs_wait_bit_killable, TASK_KILLABLE);
 195out:
 196        return ret;
 197}
 198
 199/**
 200 * nfs_pageio_init - initialise a page io descriptor
 201 * @desc: pointer to descriptor
 202 * @inode: pointer to inode
 203 * @doio: pointer to io function
 204 * @bsize: io block size
 205 * @io_flags: extra parameters for the io function
 206 */
 207void nfs_pageio_init(struct nfs_pageio_descriptor *desc,
 208                     struct inode *inode,
 209                     int (*doio)(struct inode *, struct list_head *, unsigned int, size_t, int),
 210                     size_t bsize,
 211                     int io_flags)
 212{
 213        INIT_LIST_HEAD(&desc->pg_list);
 214        desc->pg_bytes_written = 0;
 215        desc->pg_count = 0;
 216        desc->pg_bsize = bsize;
 217        desc->pg_base = 0;
 218        desc->pg_inode = inode;
 219        desc->pg_doio = doio;
 220        desc->pg_ioflags = io_flags;
 221        desc->pg_error = 0;
 222}
 223
 224/**
 225 * nfs_can_coalesce_requests - test two requests for compatibility
 226 * @prev: pointer to nfs_page
 227 * @req: pointer to nfs_page
 228 *
 229 * The nfs_page structures 'prev' and 'req' are compared to ensure that the
 230 * page data area they describe is contiguous, and that their RPC
 231 * credentials, NFSv4 open state, and lockowners are the same.
 232 *
 233 * Return 'true' if this is the case, else return 'false'.
 234 */
 235static int nfs_can_coalesce_requests(struct nfs_page *prev,
 236                                     struct nfs_page *req)
 237{
 238        if (req->wb_context->cred != prev->wb_context->cred)
 239                return 0;
 240        if (req->wb_context->lockowner != prev->wb_context->lockowner)
 241                return 0;
 242        if (req->wb_context->state != prev->wb_context->state)
 243                return 0;
 244        if (req->wb_index != (prev->wb_index + 1))
 245                return 0;
 246        if (req->wb_pgbase != 0)
 247                return 0;
 248        if (prev->wb_pgbase + prev->wb_bytes != PAGE_CACHE_SIZE)
 249                return 0;
 250        return 1;
 251}
 252
 253/**
 254 * nfs_pageio_do_add_request - Attempt to coalesce a request into a page list.
 255 * @desc: destination io descriptor
 256 * @req: request
 257 *
 258 * Returns true if the request 'req' was successfully coalesced into the
 259 * existing list of pages 'desc'.
 260 */
 261static int nfs_pageio_do_add_request(struct nfs_pageio_descriptor *desc,
 262                                     struct nfs_page *req)
 263{
 264        size_t newlen = req->wb_bytes;
 265
 266        if (desc->pg_count != 0) {
 267                struct nfs_page *prev;
 268
 269                /*
 270                 * FIXME: ideally we should be able to coalesce all requests
 271                 * that are not block boundary aligned, but currently this
 272                 * is problematic for the case of bsize < PAGE_CACHE_SIZE,
 273                 * since nfs_flush_multi and nfs_pagein_multi assume you
 274                 * can have only one struct nfs_page.
 275                 */
 276                if (desc->pg_bsize < PAGE_SIZE)
 277                        return 0;
 278                newlen += desc->pg_count;
 279                if (newlen > desc->pg_bsize)
 280                        return 0;
 281                prev = nfs_list_entry(desc->pg_list.prev);
 282                if (!nfs_can_coalesce_requests(prev, req))
 283                        return 0;
 284        } else
 285                desc->pg_base = req->wb_pgbase;
 286        nfs_list_remove_request(req);
 287        nfs_list_add_request(req, &desc->pg_list);
 288        desc->pg_count = newlen;
 289        return 1;
 290}
 291
 292/*
 293 * Helper for nfs_pageio_add_request and nfs_pageio_complete
 294 */
 295static void nfs_pageio_doio(struct nfs_pageio_descriptor *desc)
 296{
 297        if (!list_empty(&desc->pg_list)) {
 298                int error = desc->pg_doio(desc->pg_inode,
 299                                          &desc->pg_list,
 300                                          nfs_page_array_len(desc->pg_base,
 301                                                             desc->pg_count),
 302                                          desc->pg_count,
 303                                          desc->pg_ioflags);
 304                if (error < 0)
 305                        desc->pg_error = error;
 306                else
 307                        desc->pg_bytes_written += desc->pg_count;
 308        }
 309        if (list_empty(&desc->pg_list)) {
 310                desc->pg_count = 0;
 311                desc->pg_base = 0;
 312        }
 313}
 314
 315/**
 316 * nfs_pageio_add_request - Attempt to coalesce a request into a page list.
 317 * @desc: destination io descriptor
 318 * @req: request
 319 *
 320 * Returns true if the request 'req' was successfully coalesced into the
 321 * existing list of pages 'desc'.
 322 */
 323int nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
 324                           struct nfs_page *req)
 325{
 326        while (!nfs_pageio_do_add_request(desc, req)) {
 327                nfs_pageio_doio(desc);
 328                if (desc->pg_error < 0)
 329                        return 0;
 330        }
 331        return 1;
 332}
 333
 334/**
 335 * nfs_pageio_complete - Complete I/O on an nfs_pageio_descriptor
 336 * @desc: pointer to io descriptor
 337 */
 338void nfs_pageio_complete(struct nfs_pageio_descriptor *desc)
 339{
 340        nfs_pageio_doio(desc);
 341}
 342
 343/**
 344 * nfs_pageio_cond_complete - Conditional I/O completion
 345 * @desc: pointer to io descriptor
 346 * @index: page index
 347 *
 348 * It is important to ensure that processes don't try to take locks
 349 * on non-contiguous ranges of pages as that might deadlock. This
 350 * function should be called before attempting to wait on a locked
 351 * nfs_page. It will complete the I/O if the page index 'index'
 352 * is not contiguous with the existing list of pages in 'desc'.
 353 */
 354void nfs_pageio_cond_complete(struct nfs_pageio_descriptor *desc, pgoff_t index)
 355{
 356        if (!list_empty(&desc->pg_list)) {
 357                struct nfs_page *prev = nfs_list_entry(desc->pg_list.prev);
 358                if (index != prev->wb_index + 1)
 359                        nfs_pageio_doio(desc);
 360        }
 361}
 362
 363#define NFS_SCAN_MAXENTRIES 16
 364/**
 365 * nfs_scan_list - Scan a list for matching requests
 366 * @nfsi: NFS inode
 367 * @dst: Destination list
 368 * @idx_start: lower bound of page->index to scan
 369 * @npages: idx_start + npages sets the upper bound to scan.
 370 * @tag: tag to scan for
 371 *
 372 * Moves elements from one of the inode request lists.
 373 * If the number of requests is set to 0, the entire address_space
 374 * starting at index idx_start, is scanned.
 375 * The requests are *not* checked to ensure that they form a contiguous set.
 376 * You must be holding the inode's i_lock when calling this function
 377 */
 378int nfs_scan_list(struct nfs_inode *nfsi,
 379                struct list_head *dst, pgoff_t idx_start,
 380                unsigned int npages, int tag)
 381{
 382        struct nfs_page *pgvec[NFS_SCAN_MAXENTRIES];
 383        struct nfs_page *req;
 384        pgoff_t idx_end;
 385        int found, i;
 386        int res;
 387
 388        res = 0;
 389        if (npages == 0)
 390                idx_end = ~0;
 391        else
 392                idx_end = idx_start + npages - 1;
 393
 394        for (;;) {
 395                found = radix_tree_gang_lookup_tag(&nfsi->nfs_page_tree,
 396                                (void **)&pgvec[0], idx_start,
 397                                NFS_SCAN_MAXENTRIES, tag);
 398                if (found <= 0)
 399                        break;
 400                for (i = 0; i < found; i++) {
 401                        req = pgvec[i];
 402                        if (req->wb_index > idx_end)
 403                                goto out;
 404                        idx_start = req->wb_index + 1;
 405                        if (nfs_set_page_tag_locked(req)) {
 406                                kref_get(&req->wb_kref);
 407                                nfs_list_remove_request(req);
 408                                radix_tree_tag_clear(&nfsi->nfs_page_tree,
 409                                                req->wb_index, tag);
 410                                nfs_list_add_request(req, dst);
 411                                res++;
 412                                if (res == INT_MAX)
 413                                        goto out;
 414                        }
 415                }
 416                /* for latency reduction */
 417                cond_resched_lock(&nfsi->vfs_inode.i_lock);
 418        }
 419out:
 420        return res;
 421}
 422
 423int __init nfs_init_nfspagecache(void)
 424{
 425        nfs_page_cachep = kmem_cache_create("nfs_page",
 426                                            sizeof(struct nfs_page),
 427                                            0, SLAB_HWCACHE_ALIGN,
 428                                            NULL);
 429        if (nfs_page_cachep == NULL)
 430                return -ENOMEM;
 431
 432        return 0;
 433}
 434
 435void nfs_destroy_nfspagecache(void)
 436{
 437        kmem_cache_destroy(nfs_page_cachep);
 438}
 439
 440