linux/fs/nfs/read.c
<<
>>
Prefs
   1/*
   2 * linux/fs/nfs/read.c
   3 *
   4 * Block I/O for NFS
   5 *
   6 * Partial copy of Linus' read cache modifications to fs/nfs/file.c
   7 * modified for async RPC by okir@monad.swb.de
   8 */
   9
  10#include <linux/time.h>
  11#include <linux/kernel.h>
  12#include <linux/errno.h>
  13#include <linux/fcntl.h>
  14#include <linux/stat.h>
  15#include <linux/mm.h>
  16#include <linux/slab.h>
  17#include <linux/pagemap.h>
  18#include <linux/sunrpc/clnt.h>
  19#include <linux/nfs_fs.h>
  20#include <linux/nfs_page.h>
  21#include <linux/module.h>
  22
  23#include "nfs4_fs.h"
  24#include "internal.h"
  25#include "iostat.h"
  26#include "fscache.h"
  27#include "pnfs.h"
  28
  29#define NFSDBG_FACILITY         NFSDBG_PAGECACHE
  30
  31static const struct nfs_pgio_completion_ops nfs_async_read_completion_ops;
  32static const struct nfs_rw_ops nfs_rw_read_ops;
  33
  34static struct kmem_cache *nfs_rdata_cachep;
  35
  36static struct nfs_pgio_header *nfs_readhdr_alloc(void)
  37{
  38        return kmem_cache_zalloc(nfs_rdata_cachep, GFP_KERNEL);
  39}
  40
  41static void nfs_readhdr_free(struct nfs_pgio_header *rhdr)
  42{
  43        kmem_cache_free(nfs_rdata_cachep, rhdr);
  44}
  45
  46static
  47int nfs_return_empty_page(struct page *page)
  48{
  49        zero_user(page, 0, PAGE_CACHE_SIZE);
  50        SetPageUptodate(page);
  51        unlock_page(page);
  52        return 0;
  53}
  54
  55void nfs_pageio_init_read(struct nfs_pageio_descriptor *pgio,
  56                              struct inode *inode, bool force_mds,
  57                              const struct nfs_pgio_completion_ops *compl_ops)
  58{
  59        struct nfs_server *server = NFS_SERVER(inode);
  60        const struct nfs_pageio_ops *pg_ops = &nfs_pgio_rw_ops;
  61
  62#ifdef CONFIG_NFS_V4_1
  63        if (server->pnfs_curr_ld && !force_mds)
  64                pg_ops = server->pnfs_curr_ld->pg_read_ops;
  65#endif
  66        nfs_pageio_init(pgio, inode, pg_ops, compl_ops, &nfs_rw_read_ops,
  67                        server->rsize, 0);
  68}
  69EXPORT_SYMBOL_GPL(nfs_pageio_init_read);
  70
  71void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio)
  72{
  73        struct nfs_pgio_mirror *mirror;
  74
  75        pgio->pg_ops = &nfs_pgio_rw_ops;
  76
  77        /* read path should never have more than one mirror */
  78        WARN_ON_ONCE(pgio->pg_mirror_count != 1);
  79
  80        mirror = &pgio->pg_mirrors[0];
  81        mirror->pg_bsize = NFS_SERVER(pgio->pg_inode)->rsize;
  82}
  83EXPORT_SYMBOL_GPL(nfs_pageio_reset_read_mds);
  84
  85int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode,
  86                       struct page *page)
  87{
  88        struct nfs_page *new;
  89        unsigned int len;
  90        struct nfs_pageio_descriptor pgio;
  91        struct nfs_pgio_mirror *pgm;
  92
  93        len = nfs_page_length(page);
  94        if (len == 0)
  95                return nfs_return_empty_page(page);
  96        new = nfs_create_request(ctx, page, NULL, 0, len);
  97        if (IS_ERR(new)) {
  98                unlock_page(page);
  99                return PTR_ERR(new);
 100        }
 101        if (len < PAGE_CACHE_SIZE)
 102                zero_user_segment(page, len, PAGE_CACHE_SIZE);
 103
 104        nfs_pageio_init_read(&pgio, inode, false,
 105                             &nfs_async_read_completion_ops);
 106        nfs_pageio_add_request(&pgio, new);
 107        nfs_pageio_complete(&pgio);
 108
 109        /* It doesn't make sense to do mirrored reads! */
 110        WARN_ON_ONCE(pgio.pg_mirror_count != 1);
 111
 112        pgm = &pgio.pg_mirrors[0];
 113        NFS_I(inode)->read_io += pgm->pg_bytes_written;
 114
 115        return 0;
 116}
 117
 118static void nfs_readpage_release(struct nfs_page *req)
 119{
 120        struct inode *d_inode = req->wb_context->dentry->d_inode;
 121
 122        dprintk("NFS: read done (%s/%llu %d@%lld)\n", d_inode->i_sb->s_id,
 123                (unsigned long long)NFS_FILEID(d_inode), req->wb_bytes,
 124                (long long)req_offset(req));
 125
 126        if (nfs_page_group_sync_on_bit(req, PG_UNLOCKPAGE)) {
 127                if (PageUptodate(req->wb_page))
 128                        nfs_readpage_to_fscache(d_inode, req->wb_page, 0);
 129
 130                unlock_page(req->wb_page);
 131        }
 132        nfs_release_request(req);
 133}
 134
 135static void nfs_page_group_set_uptodate(struct nfs_page *req)
 136{
 137        if (nfs_page_group_sync_on_bit(req, PG_UPTODATE))
 138                SetPageUptodate(req->wb_page);
 139}
 140
 141static void nfs_read_completion(struct nfs_pgio_header *hdr)
 142{
 143        unsigned long bytes = 0;
 144
 145        if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
 146                goto out;
 147        while (!list_empty(&hdr->pages)) {
 148                struct nfs_page *req = nfs_list_entry(hdr->pages.next);
 149                struct page *page = req->wb_page;
 150                unsigned long start = req->wb_pgbase;
 151                unsigned long end = req->wb_pgbase + req->wb_bytes;
 152
 153                if (test_bit(NFS_IOHDR_EOF, &hdr->flags)) {
 154                        /* note: regions of the page not covered by a
 155                         * request are zeroed in nfs_readpage_async /
 156                         * readpage_async_filler */
 157                        if (bytes > hdr->good_bytes) {
 158                                /* nothing in this request was good, so zero
 159                                 * the full extent of the request */
 160                                zero_user_segment(page, start, end);
 161
 162                        } else if (hdr->good_bytes - bytes < req->wb_bytes) {
 163                                /* part of this request has good bytes, but
 164                                 * not all. zero the bad bytes */
 165                                start += hdr->good_bytes - bytes;
 166                                WARN_ON(start < req->wb_pgbase);
 167                                zero_user_segment(page, start, end);
 168                        }
 169                }
 170                bytes += req->wb_bytes;
 171                if (test_bit(NFS_IOHDR_ERROR, &hdr->flags)) {
 172                        if (bytes <= hdr->good_bytes)
 173                                nfs_page_group_set_uptodate(req);
 174                } else
 175                        nfs_page_group_set_uptodate(req);
 176                nfs_list_remove_request(req);
 177                nfs_readpage_release(req);
 178        }
 179out:
 180        hdr->release(hdr);
 181}
 182
 183static void nfs_initiate_read(struct nfs_pgio_header *hdr,
 184                              struct rpc_message *msg,
 185                              const struct nfs_rpc_ops *rpc_ops,
 186                              struct rpc_task_setup *task_setup_data, int how)
 187{
 188        struct inode *inode = hdr->inode;
 189        int swap_flags = IS_SWAPFILE(inode) ? NFS_RPC_SWAPFLAGS : 0;
 190
 191        task_setup_data->flags |= swap_flags;
 192        rpc_ops->read_setup(hdr, msg);
 193}
 194
 195static void
 196nfs_async_read_error(struct list_head *head)
 197{
 198        struct nfs_page *req;
 199
 200        while (!list_empty(head)) {
 201                req = nfs_list_entry(head->next);
 202                nfs_list_remove_request(req);
 203                nfs_readpage_release(req);
 204        }
 205}
 206
 207static const struct nfs_pgio_completion_ops nfs_async_read_completion_ops = {
 208        .error_cleanup = nfs_async_read_error,
 209        .completion = nfs_read_completion,
 210};
 211
 212/*
 213 * This is the callback from RPC telling us whether a reply was
 214 * received or some error occurred (timeout or socket shutdown).
 215 */
 216static int nfs_readpage_done(struct rpc_task *task,
 217                             struct nfs_pgio_header *hdr,
 218                             struct inode *inode)
 219{
 220        int status = NFS_PROTO(inode)->read_done(task, hdr);
 221        if (status != 0)
 222                return status;
 223
 224        nfs_add_stats(inode, NFSIOS_SERVERREADBYTES, hdr->res.count);
 225
 226        if (task->tk_status == -ESTALE) {
 227                set_bit(NFS_INO_STALE, &NFS_I(inode)->flags);
 228                nfs_mark_for_revalidate(inode);
 229        }
 230        return 0;
 231}
 232
 233static void nfs_readpage_retry(struct rpc_task *task,
 234                               struct nfs_pgio_header *hdr)
 235{
 236        struct nfs_pgio_args *argp = &hdr->args;
 237        struct nfs_pgio_res  *resp = &hdr->res;
 238
 239        /* This is a short read! */
 240        nfs_inc_stats(hdr->inode, NFSIOS_SHORTREAD);
 241        /* Has the server at least made some progress? */
 242        if (resp->count == 0) {
 243                nfs_set_pgio_error(hdr, -EIO, argp->offset);
 244                return;
 245        }
 246        /* Yes, so retry the read at the end of the hdr */
 247        hdr->mds_offset += resp->count;
 248        argp->offset += resp->count;
 249        argp->pgbase += resp->count;
 250        argp->count -= resp->count;
 251        rpc_restart_call_prepare(task);
 252}
 253
 254static void nfs_readpage_result(struct rpc_task *task,
 255                                struct nfs_pgio_header *hdr)
 256{
 257        if (hdr->res.eof) {
 258                loff_t bound;
 259
 260                bound = hdr->args.offset + hdr->res.count;
 261                spin_lock(&hdr->lock);
 262                if (bound < hdr->io_start + hdr->good_bytes) {
 263                        set_bit(NFS_IOHDR_EOF, &hdr->flags);
 264                        clear_bit(NFS_IOHDR_ERROR, &hdr->flags);
 265                        hdr->good_bytes = bound - hdr->io_start;
 266                }
 267                spin_unlock(&hdr->lock);
 268        } else if (hdr->res.count != hdr->args.count)
 269                nfs_readpage_retry(task, hdr);
 270}
 271
 272/*
 273 * Read a page over NFS.
 274 * We read the page synchronously in the following case:
 275 *  -   The error flag is set for this page. This happens only when a
 276 *      previous async read operation failed.
 277 */
 278int nfs_readpage(struct file *file, struct page *page)
 279{
 280        struct nfs_open_context *ctx;
 281        struct inode *inode = page_file_mapping(page)->host;
 282        int             error;
 283
 284        dprintk("NFS: nfs_readpage (%p %ld@%lu)\n",
 285                page, PAGE_CACHE_SIZE, page_file_index(page));
 286        nfs_inc_stats(inode, NFSIOS_VFSREADPAGE);
 287        nfs_inc_stats(inode, NFSIOS_READPAGES);
 288
 289        /*
 290         * Try to flush any pending writes to the file..
 291         *
 292         * NOTE! Because we own the page lock, there cannot
 293         * be any new pending writes generated at this point
 294         * for this page (other pages can be written to).
 295         */
 296        error = nfs_wb_page(inode, page);
 297        if (error)
 298                goto out_unlock;
 299        if (PageUptodate(page))
 300                goto out_unlock;
 301
 302        error = -ESTALE;
 303        if (NFS_STALE(inode))
 304                goto out_unlock;
 305
 306        if (file == NULL) {
 307                error = -EBADF;
 308                ctx = nfs_find_open_context(inode, NULL, FMODE_READ);
 309                if (ctx == NULL)
 310                        goto out_unlock;
 311        } else
 312                ctx = get_nfs_open_context(nfs_file_open_context(file));
 313
 314        if (!IS_SYNC(inode)) {
 315                error = nfs_readpage_from_fscache(ctx, inode, page);
 316                if (error == 0)
 317                        goto out;
 318        }
 319
 320        error = nfs_readpage_async(ctx, inode, page);
 321
 322out:
 323        put_nfs_open_context(ctx);
 324        return error;
 325out_unlock:
 326        unlock_page(page);
 327        return error;
 328}
 329
 330struct nfs_readdesc {
 331        struct nfs_pageio_descriptor *pgio;
 332        struct nfs_open_context *ctx;
 333};
 334
 335static int
 336readpage_async_filler(void *data, struct page *page)
 337{
 338        struct nfs_readdesc *desc = (struct nfs_readdesc *)data;
 339        struct nfs_page *new;
 340        unsigned int len;
 341        int error;
 342
 343        len = nfs_page_length(page);
 344        if (len == 0)
 345                return nfs_return_empty_page(page);
 346
 347        new = nfs_create_request(desc->ctx, page, NULL, 0, len);
 348        if (IS_ERR(new))
 349                goto out_error;
 350
 351        if (len < PAGE_CACHE_SIZE)
 352                zero_user_segment(page, len, PAGE_CACHE_SIZE);
 353        if (!nfs_pageio_add_request(desc->pgio, new)) {
 354                error = desc->pgio->pg_error;
 355                goto out_unlock;
 356        }
 357        return 0;
 358out_error:
 359        error = PTR_ERR(new);
 360out_unlock:
 361        unlock_page(page);
 362        return error;
 363}
 364
 365int nfs_readpages(struct file *filp, struct address_space *mapping,
 366                struct list_head *pages, unsigned nr_pages)
 367{
 368        struct nfs_pageio_descriptor pgio;
 369        struct nfs_pgio_mirror *pgm;
 370        struct nfs_readdesc desc = {
 371                .pgio = &pgio,
 372        };
 373        struct inode *inode = mapping->host;
 374        unsigned long npages;
 375        int ret = -ESTALE;
 376
 377        dprintk("NFS: nfs_readpages (%s/%Lu %d)\n",
 378                        inode->i_sb->s_id,
 379                        (unsigned long long)NFS_FILEID(inode),
 380                        nr_pages);
 381        nfs_inc_stats(inode, NFSIOS_VFSREADPAGES);
 382
 383        if (NFS_STALE(inode))
 384                goto out;
 385
 386        if (filp == NULL) {
 387                desc.ctx = nfs_find_open_context(inode, NULL, FMODE_READ);
 388                if (desc.ctx == NULL)
 389                        return -EBADF;
 390        } else
 391                desc.ctx = get_nfs_open_context(nfs_file_open_context(filp));
 392
 393        /* attempt to read as many of the pages as possible from the cache
 394         * - this returns -ENOBUFS immediately if the cookie is negative
 395         */
 396        ret = nfs_readpages_from_fscache(desc.ctx, inode, mapping,
 397                                         pages, &nr_pages);
 398        if (ret == 0)
 399                goto read_complete; /* all pages were read */
 400
 401        nfs_pageio_init_read(&pgio, inode, false,
 402                             &nfs_async_read_completion_ops);
 403
 404        ret = read_cache_pages(mapping, pages, readpage_async_filler, &desc);
 405        nfs_pageio_complete(&pgio);
 406
 407        /* It doesn't make sense to do mirrored reads! */
 408        WARN_ON_ONCE(pgio.pg_mirror_count != 1);
 409
 410        pgm = &pgio.pg_mirrors[0];
 411        NFS_I(inode)->read_io += pgm->pg_bytes_written;
 412        npages = (pgm->pg_bytes_written + PAGE_CACHE_SIZE - 1) >>
 413                 PAGE_CACHE_SHIFT;
 414        nfs_add_stats(inode, NFSIOS_READPAGES, npages);
 415read_complete:
 416        put_nfs_open_context(desc.ctx);
 417out:
 418        return ret;
 419}
 420
 421int __init nfs_init_readpagecache(void)
 422{
 423        nfs_rdata_cachep = kmem_cache_create("nfs_read_data",
 424                                             sizeof(struct nfs_pgio_header),
 425                                             0, SLAB_HWCACHE_ALIGN,
 426                                             NULL);
 427        if (nfs_rdata_cachep == NULL)
 428                return -ENOMEM;
 429
 430        return 0;
 431}
 432
 433void nfs_destroy_readpagecache(void)
 434{
 435        kmem_cache_destroy(nfs_rdata_cachep);
 436}
 437
 438static const struct nfs_rw_ops nfs_rw_read_ops = {
 439        .rw_mode                = FMODE_READ,
 440        .rw_alloc_header        = nfs_readhdr_alloc,
 441        .rw_free_header         = nfs_readhdr_free,
 442        .rw_done                = nfs_readpage_done,
 443        .rw_result              = nfs_readpage_result,
 444        .rw_initiate            = nfs_initiate_read,
 445};
 446