linux/fs/nfs/blocklayout/blocklayout.c
<<
>>
Prefs
   1/*
   2 *  linux/fs/nfs/blocklayout/blocklayout.c
   3 *
   4 *  Module for the NFSv4.1 pNFS block layout driver.
   5 *
   6 *  Copyright (c) 2006 The Regents of the University of Michigan.
   7 *  All rights reserved.
   8 *
   9 *  Andy Adamson <andros@citi.umich.edu>
  10 *  Fred Isaman <iisaman@umich.edu>
  11 *
  12 * permission is granted to use, copy, create derivative works and
  13 * redistribute this software and such derivative works for any purpose,
  14 * so long as the name of the university of michigan is not used in
  15 * any advertising or publicity pertaining to the use or distribution
  16 * of this software without specific, written prior authorization.  if
  17 * the above copyright notice or any other identification of the
  18 * university of michigan is included in any copy of any portion of
  19 * this software, then the disclaimer below must also be included.
  20 *
  21 * this software is provided as is, without representation from the
  22 * university of michigan as to its fitness for any purpose, and without
  23 * warranty by the university of michigan of any kind, either express
  24 * or implied, including without limitation the implied warranties of
  25 * merchantability and fitness for a particular purpose.  the regents
  26 * of the university of michigan shall not be liable for any damages,
  27 * including special, indirect, incidental, or consequential damages,
  28 * with respect to any claim arising out or in connection with the use
  29 * of the software, even if it has been or is hereafter advised of the
  30 * possibility of such damages.
  31 */
  32
  33#include <linux/module.h>
  34#include <linux/init.h>
  35#include <linux/mount.h>
  36#include <linux/namei.h>
  37#include <linux/bio.h>          /* struct bio */
  38#include <linux/buffer_head.h>  /* various write calls */
  39#include <linux/prefetch.h>
  40#include <linux/pagevec.h>
  41
  42#include "../pnfs.h"
  43#include "../nfs4session.h"
  44#include "../internal.h"
  45#include "blocklayout.h"
  46
  47#define NFSDBG_FACILITY NFSDBG_PNFS_LD
  48
  49MODULE_LICENSE("GPL");
  50MODULE_AUTHOR("Andy Adamson <andros@citi.umich.edu>");
  51MODULE_DESCRIPTION("The NFSv4.1 pNFS Block layout driver");
  52
  53static void print_page(struct page *page)
  54{
  55        dprintk("PRINTPAGE page %p\n", page);
  56        dprintk("       PagePrivate %d\n", PagePrivate(page));
  57        dprintk("       PageUptodate %d\n", PageUptodate(page));
  58        dprintk("       PageError %d\n", PageError(page));
  59        dprintk("       PageDirty %d\n", PageDirty(page));
  60        dprintk("       PageReferenced %d\n", PageReferenced(page));
  61        dprintk("       PageLocked %d\n", PageLocked(page));
  62        dprintk("       PageWriteback %d\n", PageWriteback(page));
  63        dprintk("       PageMappedToDisk %d\n", PageMappedToDisk(page));
  64        dprintk("\n");
  65}
  66
  67/* Given the be associated with isect, determine if page data needs to be
  68 * initialized.
  69 */
  70static int is_hole(struct pnfs_block_extent *be, sector_t isect)
  71{
  72        if (be->be_state == PNFS_BLOCK_NONE_DATA)
  73                return 1;
  74        else if (be->be_state != PNFS_BLOCK_INVALID_DATA)
  75                return 0;
  76        else
  77                return !bl_is_sector_init(be->be_inval, isect);
  78}
  79
  80/* Given the be associated with isect, determine if page data can be
  81 * written to disk.
  82 */
  83static int is_writable(struct pnfs_block_extent *be, sector_t isect)
  84{
  85        return (be->be_state == PNFS_BLOCK_READWRITE_DATA ||
  86                be->be_state == PNFS_BLOCK_INVALID_DATA);
  87}
  88
  89/* The data we are handed might be spread across several bios.  We need
  90 * to track when the last one is finished.
  91 */
  92struct parallel_io {
  93        struct kref refcnt;
  94        void (*pnfs_callback) (void *data, int num_se);
  95        void *data;
  96        int bse_count;
  97};
  98
  99static inline struct parallel_io *alloc_parallel(void *data)
 100{
 101        struct parallel_io *rv;
 102
 103        rv  = kmalloc(sizeof(*rv), GFP_NOFS);
 104        if (rv) {
 105                rv->data = data;
 106                kref_init(&rv->refcnt);
 107                rv->bse_count = 0;
 108        }
 109        return rv;
 110}
 111
 112static inline void get_parallel(struct parallel_io *p)
 113{
 114        kref_get(&p->refcnt);
 115}
 116
 117static void destroy_parallel(struct kref *kref)
 118{
 119        struct parallel_io *p = container_of(kref, struct parallel_io, refcnt);
 120
 121        dprintk("%s enter\n", __func__);
 122        p->pnfs_callback(p->data, p->bse_count);
 123        kfree(p);
 124}
 125
 126static inline void put_parallel(struct parallel_io *p)
 127{
 128        kref_put(&p->refcnt, destroy_parallel);
 129}
 130
 131static struct bio *
 132bl_submit_bio(int rw, struct bio *bio)
 133{
 134        if (bio) {
 135                get_parallel(bio->bi_private);
 136                dprintk("%s submitting %s bio %u@%llu\n", __func__,
 137                        rw == READ ? "read" : "write", bio->bi_iter.bi_size,
 138                        (unsigned long long)bio->bi_iter.bi_sector);
 139                submit_bio(rw, bio);
 140        }
 141        return NULL;
 142}
 143
 144static struct bio *bl_alloc_init_bio(int npg, sector_t isect,
 145                                     struct pnfs_block_extent *be,
 146                                     void (*end_io)(struct bio *, int err),
 147                                     struct parallel_io *par)
 148{
 149        struct bio *bio;
 150
 151        npg = min(npg, BIO_MAX_PAGES);
 152        bio = bio_alloc(GFP_NOIO, npg);
 153        if (!bio && (current->flags & PF_MEMALLOC)) {
 154                while (!bio && (npg /= 2))
 155                        bio = bio_alloc(GFP_NOIO, npg);
 156        }
 157
 158        if (bio) {
 159                bio->bi_iter.bi_sector = isect - be->be_f_offset +
 160                        be->be_v_offset;
 161                bio->bi_bdev = be->be_mdev;
 162                bio->bi_end_io = end_io;
 163                bio->bi_private = par;
 164        }
 165        return bio;
 166}
 167
 168static struct bio *do_add_page_to_bio(struct bio *bio, int npg, int rw,
 169                                      sector_t isect, struct page *page,
 170                                      struct pnfs_block_extent *be,
 171                                      void (*end_io)(struct bio *, int err),
 172                                      struct parallel_io *par,
 173                                      unsigned int offset, int len)
 174{
 175        isect = isect + (offset >> SECTOR_SHIFT);
 176        dprintk("%s: npg %d rw %d isect %llu offset %u len %d\n", __func__,
 177                npg, rw, (unsigned long long)isect, offset, len);
 178retry:
 179        if (!bio) {
 180                bio = bl_alloc_init_bio(npg, isect, be, end_io, par);
 181                if (!bio)
 182                        return ERR_PTR(-ENOMEM);
 183        }
 184        if (bio_add_page(bio, page, len, offset) < len) {
 185                bio = bl_submit_bio(rw, bio);
 186                goto retry;
 187        }
 188        return bio;
 189}
 190
 191static struct bio *bl_add_page_to_bio(struct bio *bio, int npg, int rw,
 192                                      sector_t isect, struct page *page,
 193                                      struct pnfs_block_extent *be,
 194                                      void (*end_io)(struct bio *, int err),
 195                                      struct parallel_io *par)
 196{
 197        return do_add_page_to_bio(bio, npg, rw, isect, page, be,
 198                                  end_io, par, 0, PAGE_CACHE_SIZE);
 199}
 200
 201/* This is basically copied from mpage_end_io_read */
 202static void bl_end_io_read(struct bio *bio, int err)
 203{
 204        struct parallel_io *par = bio->bi_private;
 205        struct bio_vec *bvec;
 206        int i;
 207
 208        if (!err)
 209                bio_for_each_segment_all(bvec, bio, i)
 210                        SetPageUptodate(bvec->bv_page);
 211
 212        if (err) {
 213                struct nfs_pgio_data *rdata = par->data;
 214                struct nfs_pgio_header *header = rdata->header;
 215
 216                if (!header->pnfs_error)
 217                        header->pnfs_error = -EIO;
 218                pnfs_set_lo_fail(header->lseg);
 219        }
 220        bio_put(bio);
 221        put_parallel(par);
 222}
 223
 224static void bl_read_cleanup(struct work_struct *work)
 225{
 226        struct rpc_task *task;
 227        struct nfs_pgio_data *rdata;
 228        dprintk("%s enter\n", __func__);
 229        task = container_of(work, struct rpc_task, u.tk_work);
 230        rdata = container_of(task, struct nfs_pgio_data, task);
 231        pnfs_ld_read_done(rdata);
 232}
 233
 234static void
 235bl_end_par_io_read(void *data, int unused)
 236{
 237        struct nfs_pgio_data *rdata = data;
 238
 239        rdata->task.tk_status = rdata->header->pnfs_error;
 240        INIT_WORK(&rdata->task.u.tk_work, bl_read_cleanup);
 241        schedule_work(&rdata->task.u.tk_work);
 242}
 243
 244static enum pnfs_try_status
 245bl_read_pagelist(struct nfs_pgio_data *rdata)
 246{
 247        struct nfs_pgio_header *header = rdata->header;
 248        int i, hole;
 249        struct bio *bio = NULL;
 250        struct pnfs_block_extent *be = NULL, *cow_read = NULL;
 251        sector_t isect, extent_length = 0;
 252        struct parallel_io *par;
 253        loff_t f_offset = rdata->args.offset;
 254        size_t bytes_left = rdata->args.count;
 255        unsigned int pg_offset, pg_len;
 256        struct page **pages = rdata->args.pages;
 257        int pg_index = rdata->args.pgbase >> PAGE_CACHE_SHIFT;
 258        const bool is_dio = (header->dreq != NULL);
 259
 260        dprintk("%s enter nr_pages %u offset %lld count %u\n", __func__,
 261               rdata->pages.npages, f_offset, (unsigned int)rdata->args.count);
 262
 263        par = alloc_parallel(rdata);
 264        if (!par)
 265                goto use_mds;
 266        par->pnfs_callback = bl_end_par_io_read;
 267        /* At this point, we can no longer jump to use_mds */
 268
 269        isect = (sector_t) (f_offset >> SECTOR_SHIFT);
 270        /* Code assumes extents are page-aligned */
 271        for (i = pg_index; i < rdata->pages.npages; i++) {
 272                if (!extent_length) {
 273                        /* We've used up the previous extent */
 274                        bl_put_extent(be);
 275                        bl_put_extent(cow_read);
 276                        bio = bl_submit_bio(READ, bio);
 277                        /* Get the next one */
 278                        be = bl_find_get_extent(BLK_LSEG2EXT(header->lseg),
 279                                             isect, &cow_read);
 280                        if (!be) {
 281                                header->pnfs_error = -EIO;
 282                                goto out;
 283                        }
 284                        extent_length = be->be_length -
 285                                (isect - be->be_f_offset);
 286                        if (cow_read) {
 287                                sector_t cow_length = cow_read->be_length -
 288                                        (isect - cow_read->be_f_offset);
 289                                extent_length = min(extent_length, cow_length);
 290                        }
 291                }
 292
 293                if (is_dio) {
 294                        pg_offset = f_offset & ~PAGE_CACHE_MASK;
 295                        if (pg_offset + bytes_left > PAGE_CACHE_SIZE)
 296                                pg_len = PAGE_CACHE_SIZE - pg_offset;
 297                        else
 298                                pg_len = bytes_left;
 299
 300                        f_offset += pg_len;
 301                        bytes_left -= pg_len;
 302                        isect += (pg_offset >> SECTOR_SHIFT);
 303                } else {
 304                        pg_offset = 0;
 305                        pg_len = PAGE_CACHE_SIZE;
 306                }
 307
 308                hole = is_hole(be, isect);
 309                if (hole && !cow_read) {
 310                        bio = bl_submit_bio(READ, bio);
 311                        /* Fill hole w/ zeroes w/o accessing device */
 312                        dprintk("%s Zeroing page for hole\n", __func__);
 313                        zero_user_segment(pages[i], pg_offset, pg_len);
 314                        print_page(pages[i]);
 315                        SetPageUptodate(pages[i]);
 316                } else {
 317                        struct pnfs_block_extent *be_read;
 318
 319                        be_read = (hole && cow_read) ? cow_read : be;
 320                        bio = do_add_page_to_bio(bio, rdata->pages.npages - i,
 321                                                 READ,
 322                                                 isect, pages[i], be_read,
 323                                                 bl_end_io_read, par,
 324                                                 pg_offset, pg_len);
 325                        if (IS_ERR(bio)) {
 326                                header->pnfs_error = PTR_ERR(bio);
 327                                bio = NULL;
 328                                goto out;
 329                        }
 330                }
 331                isect += (pg_len >> SECTOR_SHIFT);
 332                extent_length -= PAGE_CACHE_SECTORS;
 333        }
 334        if ((isect << SECTOR_SHIFT) >= header->inode->i_size) {
 335                rdata->res.eof = 1;
 336                rdata->res.count = header->inode->i_size - rdata->args.offset;
 337        } else {
 338                rdata->res.count = (isect << SECTOR_SHIFT) - rdata->args.offset;
 339        }
 340out:
 341        bl_put_extent(be);
 342        bl_put_extent(cow_read);
 343        bl_submit_bio(READ, bio);
 344        put_parallel(par);
 345        return PNFS_ATTEMPTED;
 346
 347 use_mds:
 348        dprintk("Giving up and using normal NFS\n");
 349        return PNFS_NOT_ATTEMPTED;
 350}
 351
 352static void mark_extents_written(struct pnfs_block_layout *bl,
 353                                 __u64 offset, __u32 count)
 354{
 355        sector_t isect, end;
 356        struct pnfs_block_extent *be;
 357        struct pnfs_block_short_extent *se;
 358
 359        dprintk("%s(%llu, %u)\n", __func__, offset, count);
 360        if (count == 0)
 361                return;
 362        isect = (offset & (long)(PAGE_CACHE_MASK)) >> SECTOR_SHIFT;
 363        end = (offset + count + PAGE_CACHE_SIZE - 1) & (long)(PAGE_CACHE_MASK);
 364        end >>= SECTOR_SHIFT;
 365        while (isect < end) {
 366                sector_t len;
 367                be = bl_find_get_extent(bl, isect, NULL);
 368                BUG_ON(!be); /* FIXME */
 369                len = min(end, be->be_f_offset + be->be_length) - isect;
 370                if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
 371                        se = bl_pop_one_short_extent(be->be_inval);
 372                        BUG_ON(!se);
 373                        bl_mark_for_commit(be, isect, len, se);
 374                }
 375                isect += len;
 376                bl_put_extent(be);
 377        }
 378}
 379
 380static void bl_end_io_write_zero(struct bio *bio, int err)
 381{
 382        struct parallel_io *par = bio->bi_private;
 383        struct bio_vec *bvec;
 384        int i;
 385
 386        bio_for_each_segment_all(bvec, bio, i) {
 387                /* This is the zeroing page we added */
 388                end_page_writeback(bvec->bv_page);
 389                page_cache_release(bvec->bv_page);
 390        }
 391
 392        if (unlikely(err)) {
 393                struct nfs_pgio_data *data = par->data;
 394                struct nfs_pgio_header *header = data->header;
 395
 396                if (!header->pnfs_error)
 397                        header->pnfs_error = -EIO;
 398                pnfs_set_lo_fail(header->lseg);
 399        }
 400        bio_put(bio);
 401        put_parallel(par);
 402}
 403
 404static void bl_end_io_write(struct bio *bio, int err)
 405{
 406        struct parallel_io *par = bio->bi_private;
 407        const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
 408        struct nfs_pgio_data *data = par->data;
 409        struct nfs_pgio_header *header = data->header;
 410
 411        if (!uptodate) {
 412                if (!header->pnfs_error)
 413                        header->pnfs_error = -EIO;
 414                pnfs_set_lo_fail(header->lseg);
 415        }
 416        bio_put(bio);
 417        put_parallel(par);
 418}
 419
 420/* Function scheduled for call during bl_end_par_io_write,
 421 * it marks sectors as written and extends the commitlist.
 422 */
 423static void bl_write_cleanup(struct work_struct *work)
 424{
 425        struct rpc_task *task;
 426        struct nfs_pgio_data *wdata;
 427        dprintk("%s enter\n", __func__);
 428        task = container_of(work, struct rpc_task, u.tk_work);
 429        wdata = container_of(task, struct nfs_pgio_data, task);
 430        if (likely(!wdata->header->pnfs_error)) {
 431                /* Marks for LAYOUTCOMMIT */
 432                mark_extents_written(BLK_LSEG2EXT(wdata->header->lseg),
 433                                     wdata->args.offset, wdata->args.count);
 434        }
 435        pnfs_ld_write_done(wdata);
 436}
 437
 438/* Called when last of bios associated with a bl_write_pagelist call finishes */
 439static void bl_end_par_io_write(void *data, int num_se)
 440{
 441        struct nfs_pgio_data *wdata = data;
 442
 443        if (unlikely(wdata->header->pnfs_error)) {
 444                bl_free_short_extents(&BLK_LSEG2EXT(wdata->header->lseg)->bl_inval,
 445                                        num_se);
 446        }
 447
 448        wdata->task.tk_status = wdata->header->pnfs_error;
 449        wdata->verf.committed = NFS_FILE_SYNC;
 450        INIT_WORK(&wdata->task.u.tk_work, bl_write_cleanup);
 451        schedule_work(&wdata->task.u.tk_work);
 452}
 453
 454/* FIXME STUB - mark intersection of layout and page as bad, so is not
 455 * used again.
 456 */
 457static void mark_bad_read(void)
 458{
 459        return;
 460}
 461
 462/*
 463 * map_block:  map a requested I/0 block (isect) into an offset in the LVM
 464 * block_device
 465 */
 466static void
 467map_block(struct buffer_head *bh, sector_t isect, struct pnfs_block_extent *be)
 468{
 469        dprintk("%s enter be=%p\n", __func__, be);
 470
 471        set_buffer_mapped(bh);
 472        bh->b_bdev = be->be_mdev;
 473        bh->b_blocknr = (isect - be->be_f_offset + be->be_v_offset) >>
 474            (be->be_mdev->bd_inode->i_blkbits - SECTOR_SHIFT);
 475
 476        dprintk("%s isect %llu, bh->b_blocknr %ld, using bsize %Zd\n",
 477                __func__, (unsigned long long)isect, (long)bh->b_blocknr,
 478                bh->b_size);
 479        return;
 480}
 481
 482static void
 483bl_read_single_end_io(struct bio *bio, int error)
 484{
 485        struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
 486        struct page *page = bvec->bv_page;
 487
 488        /* Only one page in bvec */
 489        unlock_page(page);
 490}
 491
 492static int
 493bl_do_readpage_sync(struct page *page, struct pnfs_block_extent *be,
 494                    unsigned int offset, unsigned int len)
 495{
 496        struct bio *bio;
 497        struct page *shadow_page;
 498        sector_t isect;
 499        char *kaddr, *kshadow_addr;
 500        int ret = 0;
 501
 502        dprintk("%s: offset %u len %u\n", __func__, offset, len);
 503
 504        shadow_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
 505        if (shadow_page == NULL)
 506                return -ENOMEM;
 507
 508        bio = bio_alloc(GFP_NOIO, 1);
 509        if (bio == NULL)
 510                return -ENOMEM;
 511
 512        isect = (page->index << PAGE_CACHE_SECTOR_SHIFT) +
 513                (offset / SECTOR_SIZE);
 514
 515        bio->bi_iter.bi_sector = isect - be->be_f_offset + be->be_v_offset;
 516        bio->bi_bdev = be->be_mdev;
 517        bio->bi_end_io = bl_read_single_end_io;
 518
 519        lock_page(shadow_page);
 520        if (bio_add_page(bio, shadow_page,
 521                         SECTOR_SIZE, round_down(offset, SECTOR_SIZE)) == 0) {
 522                unlock_page(shadow_page);
 523                bio_put(bio);
 524                return -EIO;
 525        }
 526
 527        submit_bio(READ, bio);
 528        wait_on_page_locked(shadow_page);
 529        if (unlikely(!test_bit(BIO_UPTODATE, &bio->bi_flags))) {
 530                ret = -EIO;
 531        } else {
 532                kaddr = kmap_atomic(page);
 533                kshadow_addr = kmap_atomic(shadow_page);
 534                memcpy(kaddr + offset, kshadow_addr + offset, len);
 535                kunmap_atomic(kshadow_addr);
 536                kunmap_atomic(kaddr);
 537        }
 538        __free_page(shadow_page);
 539        bio_put(bio);
 540
 541        return ret;
 542}
 543
 544static int
 545bl_read_partial_page_sync(struct page *page, struct pnfs_block_extent *be,
 546                          unsigned int dirty_offset, unsigned int dirty_len,
 547                          bool full_page)
 548{
 549        int ret = 0;
 550        unsigned int start, end;
 551
 552        if (full_page) {
 553                start = 0;
 554                end = PAGE_CACHE_SIZE;
 555        } else {
 556                start = round_down(dirty_offset, SECTOR_SIZE);
 557                end = round_up(dirty_offset + dirty_len, SECTOR_SIZE);
 558        }
 559
 560        dprintk("%s: offset %u len %d\n", __func__, dirty_offset, dirty_len);
 561        if (!be) {
 562                zero_user_segments(page, start, dirty_offset,
 563                                   dirty_offset + dirty_len, end);
 564                if (start == 0 && end == PAGE_CACHE_SIZE &&
 565                    trylock_page(page)) {
 566                        SetPageUptodate(page);
 567                        unlock_page(page);
 568                }
 569                return ret;
 570        }
 571
 572        if (start != dirty_offset)
 573                ret = bl_do_readpage_sync(page, be, start, dirty_offset - start);
 574
 575        if (!ret && (dirty_offset + dirty_len < end))
 576                ret = bl_do_readpage_sync(page, be, dirty_offset + dirty_len,
 577                                          end - dirty_offset - dirty_len);
 578
 579        return ret;
 580}
 581
 582/* Given an unmapped page, zero it or read in page for COW, page is locked
 583 * by caller.
 584 */
 585static int
 586init_page_for_write(struct page *page, struct pnfs_block_extent *cow_read)
 587{
 588        struct buffer_head *bh = NULL;
 589        int ret = 0;
 590        sector_t isect;
 591
 592        dprintk("%s enter, %p\n", __func__, page);
 593        BUG_ON(PageUptodate(page));
 594        if (!cow_read) {
 595                zero_user_segment(page, 0, PAGE_SIZE);
 596                SetPageUptodate(page);
 597                goto cleanup;
 598        }
 599
 600        bh = alloc_page_buffers(page, PAGE_CACHE_SIZE, 0);
 601        if (!bh) {
 602                ret = -ENOMEM;
 603                goto cleanup;
 604        }
 605
 606        isect = (sector_t) page->index << PAGE_CACHE_SECTOR_SHIFT;
 607        map_block(bh, isect, cow_read);
 608        if (!bh_uptodate_or_lock(bh))
 609                ret = bh_submit_read(bh);
 610        if (ret)
 611                goto cleanup;
 612        SetPageUptodate(page);
 613
 614cleanup:
 615        if (bh)
 616                free_buffer_head(bh);
 617        if (ret) {
 618                /* Need to mark layout with bad read...should now
 619                 * just use nfs4 for reads and writes.
 620                 */
 621                mark_bad_read();
 622        }
 623        return ret;
 624}
 625
 626/* Find or create a zeroing page marked being writeback.
 627 * Return ERR_PTR on error, NULL to indicate skip this page and page itself
 628 * to indicate write out.
 629 */
 630static struct page *
 631bl_find_get_zeroing_page(struct inode *inode, pgoff_t index,
 632                        struct pnfs_block_extent *cow_read)
 633{
 634        struct page *page;
 635        int locked = 0;
 636        page = find_get_page(inode->i_mapping, index);
 637        if (page)
 638                goto check_page;
 639
 640        page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
 641        if (unlikely(!page)) {
 642                dprintk("%s oom\n", __func__);
 643                return ERR_PTR(-ENOMEM);
 644        }
 645        locked = 1;
 646
 647check_page:
 648        /* PageDirty: Other will write this out
 649         * PageWriteback: Other is writing this out
 650         * PageUptodate: It was read before
 651         */
 652        if (PageDirty(page) || PageWriteback(page)) {
 653                print_page(page);
 654                if (locked)
 655                        unlock_page(page);
 656                page_cache_release(page);
 657                return NULL;
 658        }
 659
 660        if (!locked) {
 661                lock_page(page);
 662                locked = 1;
 663                goto check_page;
 664        }
 665        if (!PageUptodate(page)) {
 666                /* New page, readin or zero it */
 667                init_page_for_write(page, cow_read);
 668        }
 669        set_page_writeback(page);
 670        unlock_page(page);
 671
 672        return page;
 673}
 674
 675static enum pnfs_try_status
 676bl_write_pagelist(struct nfs_pgio_data *wdata, int sync)
 677{
 678        struct nfs_pgio_header *header = wdata->header;
 679        int i, ret, npg_zero, pg_index, last = 0;
 680        struct bio *bio = NULL;
 681        struct pnfs_block_extent *be = NULL, *cow_read = NULL;
 682        sector_t isect, last_isect = 0, extent_length = 0;
 683        struct parallel_io *par = NULL;
 684        loff_t offset = wdata->args.offset;
 685        size_t count = wdata->args.count;
 686        unsigned int pg_offset, pg_len, saved_len;
 687        struct page **pages = wdata->args.pages;
 688        struct page *page;
 689        pgoff_t index;
 690        u64 temp;
 691        int npg_per_block =
 692            NFS_SERVER(header->inode)->pnfs_blksize >> PAGE_CACHE_SHIFT;
 693
 694        dprintk("%s enter, %Zu@%lld\n", __func__, count, offset);
 695
 696        if (header->dreq != NULL &&
 697            (!IS_ALIGNED(offset, NFS_SERVER(header->inode)->pnfs_blksize) ||
 698             !IS_ALIGNED(count, NFS_SERVER(header->inode)->pnfs_blksize))) {
 699                dprintk("pnfsblock nonblock aligned DIO writes. Resend MDS\n");
 700                goto out_mds;
 701        }
 702        /* At this point, wdata->pages is a (sequential) list of nfs_pages.
 703         * We want to write each, and if there is an error set pnfs_error
 704         * to have it redone using nfs.
 705         */
 706        par = alloc_parallel(wdata);
 707        if (!par)
 708                goto out_mds;
 709        par->pnfs_callback = bl_end_par_io_write;
 710        /* At this point, have to be more careful with error handling */
 711
 712        isect = (sector_t) ((offset & (long)PAGE_CACHE_MASK) >> SECTOR_SHIFT);
 713        be = bl_find_get_extent(BLK_LSEG2EXT(header->lseg), isect, &cow_read);
 714        if (!be || !is_writable(be, isect)) {
 715                dprintk("%s no matching extents!\n", __func__);
 716                goto out_mds;
 717        }
 718
 719        /* First page inside INVALID extent */
 720        if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
 721                if (likely(!bl_push_one_short_extent(be->be_inval)))
 722                        par->bse_count++;
 723                else
 724                        goto out_mds;
 725                temp = offset >> PAGE_CACHE_SHIFT;
 726                npg_zero = do_div(temp, npg_per_block);
 727                isect = (sector_t) (((offset - npg_zero * PAGE_CACHE_SIZE) &
 728                                     (long)PAGE_CACHE_MASK) >> SECTOR_SHIFT);
 729                extent_length = be->be_length - (isect - be->be_f_offset);
 730
 731fill_invalid_ext:
 732                dprintk("%s need to zero %d pages\n", __func__, npg_zero);
 733                for (;npg_zero > 0; npg_zero--) {
 734                        if (bl_is_sector_init(be->be_inval, isect)) {
 735                                dprintk("isect %llu already init\n",
 736                                        (unsigned long long)isect);
 737                                goto next_page;
 738                        }
 739                        /* page ref released in bl_end_io_write_zero */
 740                        index = isect >> PAGE_CACHE_SECTOR_SHIFT;
 741                        dprintk("%s zero %dth page: index %lu isect %llu\n",
 742                                __func__, npg_zero, index,
 743                                (unsigned long long)isect);
 744                        page = bl_find_get_zeroing_page(header->inode, index,
 745                                                        cow_read);
 746                        if (unlikely(IS_ERR(page))) {
 747                                header->pnfs_error = PTR_ERR(page);
 748                                goto out;
 749                        } else if (page == NULL)
 750                                goto next_page;
 751
 752                        ret = bl_mark_sectors_init(be->be_inval, isect,
 753                                                       PAGE_CACHE_SECTORS);
 754                        if (unlikely(ret)) {
 755                                dprintk("%s bl_mark_sectors_init fail %d\n",
 756                                        __func__, ret);
 757                                end_page_writeback(page);
 758                                page_cache_release(page);
 759                                header->pnfs_error = ret;
 760                                goto out;
 761                        }
 762                        if (likely(!bl_push_one_short_extent(be->be_inval)))
 763                                par->bse_count++;
 764                        else {
 765                                end_page_writeback(page);
 766                                page_cache_release(page);
 767                                header->pnfs_error = -ENOMEM;
 768                                goto out;
 769                        }
 770                        /* FIXME: This should be done in bi_end_io */
 771                        mark_extents_written(BLK_LSEG2EXT(header->lseg),
 772                                             page->index << PAGE_CACHE_SHIFT,
 773                                             PAGE_CACHE_SIZE);
 774
 775                        bio = bl_add_page_to_bio(bio, npg_zero, WRITE,
 776                                                 isect, page, be,
 777                                                 bl_end_io_write_zero, par);
 778                        if (IS_ERR(bio)) {
 779                                header->pnfs_error = PTR_ERR(bio);
 780                                bio = NULL;
 781                                goto out;
 782                        }
 783next_page:
 784                        isect += PAGE_CACHE_SECTORS;
 785                        extent_length -= PAGE_CACHE_SECTORS;
 786                }
 787                if (last)
 788                        goto write_done;
 789        }
 790        bio = bl_submit_bio(WRITE, bio);
 791
 792        /* Middle pages */
 793        pg_index = wdata->args.pgbase >> PAGE_CACHE_SHIFT;
 794        for (i = pg_index; i < wdata->pages.npages; i++) {
 795                if (!extent_length) {
 796                        /* We've used up the previous extent */
 797                        bl_put_extent(be);
 798                        bl_put_extent(cow_read);
 799                        bio = bl_submit_bio(WRITE, bio);
 800                        /* Get the next one */
 801                        be = bl_find_get_extent(BLK_LSEG2EXT(header->lseg),
 802                                             isect, &cow_read);
 803                        if (!be || !is_writable(be, isect)) {
 804                                header->pnfs_error = -EINVAL;
 805                                goto out;
 806                        }
 807                        if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
 808                                if (likely(!bl_push_one_short_extent(
 809                                                                be->be_inval)))
 810                                        par->bse_count++;
 811                                else {
 812                                        header->pnfs_error = -ENOMEM;
 813                                        goto out;
 814                                }
 815                        }
 816                        extent_length = be->be_length -
 817                            (isect - be->be_f_offset);
 818                }
 819
 820                dprintk("%s offset %lld count %Zu\n", __func__, offset, count);
 821                pg_offset = offset & ~PAGE_CACHE_MASK;
 822                if (pg_offset + count > PAGE_CACHE_SIZE)
 823                        pg_len = PAGE_CACHE_SIZE - pg_offset;
 824                else
 825                        pg_len = count;
 826
 827                saved_len = pg_len;
 828                if (be->be_state == PNFS_BLOCK_INVALID_DATA &&
 829                    !bl_is_sector_init(be->be_inval, isect)) {
 830                        ret = bl_read_partial_page_sync(pages[i], cow_read,
 831                                                        pg_offset, pg_len, true);
 832                        if (ret) {
 833                                dprintk("%s bl_read_partial_page_sync fail %d\n",
 834                                        __func__, ret);
 835                                header->pnfs_error = ret;
 836                                goto out;
 837                        }
 838
 839                        ret = bl_mark_sectors_init(be->be_inval, isect,
 840                                                       PAGE_CACHE_SECTORS);
 841                        if (unlikely(ret)) {
 842                                dprintk("%s bl_mark_sectors_init fail %d\n",
 843                                        __func__, ret);
 844                                header->pnfs_error = ret;
 845                                goto out;
 846                        }
 847
 848                        /* Expand to full page write */
 849                        pg_offset = 0;
 850                        pg_len = PAGE_CACHE_SIZE;
 851                } else if  ((pg_offset & (SECTOR_SIZE - 1)) ||
 852                            (pg_len & (SECTOR_SIZE - 1))){
 853                        /* ahh, nasty case. We have to do sync full sector
 854                         * read-modify-write cycles.
 855                         */
 856                        unsigned int saved_offset = pg_offset;
 857                        ret = bl_read_partial_page_sync(pages[i], be, pg_offset,
 858                                                        pg_len, false);
 859                        pg_offset = round_down(pg_offset, SECTOR_SIZE);
 860                        pg_len = round_up(saved_offset + pg_len, SECTOR_SIZE)
 861                                 - pg_offset;
 862                }
 863
 864
 865                bio = do_add_page_to_bio(bio, wdata->pages.npages - i, WRITE,
 866                                         isect, pages[i], be,
 867                                         bl_end_io_write, par,
 868                                         pg_offset, pg_len);
 869                if (IS_ERR(bio)) {
 870                        header->pnfs_error = PTR_ERR(bio);
 871                        bio = NULL;
 872                        goto out;
 873                }
 874                offset += saved_len;
 875                count -= saved_len;
 876                isect += PAGE_CACHE_SECTORS;
 877                last_isect = isect;
 878                extent_length -= PAGE_CACHE_SECTORS;
 879        }
 880
 881        /* Last page inside INVALID extent */
 882        if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
 883                bio = bl_submit_bio(WRITE, bio);
 884                temp = last_isect >> PAGE_CACHE_SECTOR_SHIFT;
 885                npg_zero = npg_per_block - do_div(temp, npg_per_block);
 886                if (npg_zero < npg_per_block) {
 887                        last = 1;
 888                        goto fill_invalid_ext;
 889                }
 890        }
 891
 892write_done:
 893        wdata->res.count = wdata->args.count;
 894out:
 895        bl_put_extent(be);
 896        bl_put_extent(cow_read);
 897        bl_submit_bio(WRITE, bio);
 898        put_parallel(par);
 899        return PNFS_ATTEMPTED;
 900out_mds:
 901        bl_put_extent(be);
 902        bl_put_extent(cow_read);
 903        kfree(par);
 904        return PNFS_NOT_ATTEMPTED;
 905}
 906
 907/* FIXME - range ignored */
 908static void
 909release_extents(struct pnfs_block_layout *bl, struct pnfs_layout_range *range)
 910{
 911        int i;
 912        struct pnfs_block_extent *be;
 913
 914        spin_lock(&bl->bl_ext_lock);
 915        for (i = 0; i < EXTENT_LISTS; i++) {
 916                while (!list_empty(&bl->bl_extents[i])) {
 917                        be = list_first_entry(&bl->bl_extents[i],
 918                                              struct pnfs_block_extent,
 919                                              be_node);
 920                        list_del(&be->be_node);
 921                        bl_put_extent(be);
 922                }
 923        }
 924        spin_unlock(&bl->bl_ext_lock);
 925}
 926
 927static void
 928release_inval_marks(struct pnfs_inval_markings *marks)
 929{
 930        struct pnfs_inval_tracking *pos, *temp;
 931        struct pnfs_block_short_extent *se, *stemp;
 932
 933        list_for_each_entry_safe(pos, temp, &marks->im_tree.mtt_stub, it_link) {
 934                list_del(&pos->it_link);
 935                kfree(pos);
 936        }
 937
 938        list_for_each_entry_safe(se, stemp, &marks->im_extents, bse_node) {
 939                list_del(&se->bse_node);
 940                kfree(se);
 941        }
 942        return;
 943}
 944
 945static void bl_free_layout_hdr(struct pnfs_layout_hdr *lo)
 946{
 947        struct pnfs_block_layout *bl = BLK_LO2EXT(lo);
 948
 949        dprintk("%s enter\n", __func__);
 950        release_extents(bl, NULL);
 951        release_inval_marks(&bl->bl_inval);
 952        kfree(bl);
 953}
 954
 955static struct pnfs_layout_hdr *bl_alloc_layout_hdr(struct inode *inode,
 956                                                   gfp_t gfp_flags)
 957{
 958        struct pnfs_block_layout *bl;
 959
 960        dprintk("%s enter\n", __func__);
 961        bl = kzalloc(sizeof(*bl), gfp_flags);
 962        if (!bl)
 963                return NULL;
 964        spin_lock_init(&bl->bl_ext_lock);
 965        INIT_LIST_HEAD(&bl->bl_extents[0]);
 966        INIT_LIST_HEAD(&bl->bl_extents[1]);
 967        INIT_LIST_HEAD(&bl->bl_commit);
 968        INIT_LIST_HEAD(&bl->bl_committing);
 969        bl->bl_count = 0;
 970        bl->bl_blocksize = NFS_SERVER(inode)->pnfs_blksize >> SECTOR_SHIFT;
 971        BL_INIT_INVAL_MARKS(&bl->bl_inval, bl->bl_blocksize);
 972        return &bl->bl_layout;
 973}
 974
 975static void bl_free_lseg(struct pnfs_layout_segment *lseg)
 976{
 977        dprintk("%s enter\n", __func__);
 978        kfree(lseg);
 979}
 980
 981/* We pretty much ignore lseg, and store all data layout wide, so we
 982 * can correctly merge.
 983 */
 984static struct pnfs_layout_segment *bl_alloc_lseg(struct pnfs_layout_hdr *lo,
 985                                                 struct nfs4_layoutget_res *lgr,
 986                                                 gfp_t gfp_flags)
 987{
 988        struct pnfs_layout_segment *lseg;
 989        int status;
 990
 991        dprintk("%s enter\n", __func__);
 992        lseg = kzalloc(sizeof(*lseg), gfp_flags);
 993        if (!lseg)
 994                return ERR_PTR(-ENOMEM);
 995        status = nfs4_blk_process_layoutget(lo, lgr, gfp_flags);
 996        if (status) {
 997                /* We don't want to call the full-blown bl_free_lseg,
 998                 * since on error extents were not touched.
 999                 */
1000                kfree(lseg);
1001                return ERR_PTR(status);
1002        }
1003        return lseg;
1004}
1005
1006static void
1007bl_encode_layoutcommit(struct pnfs_layout_hdr *lo, struct xdr_stream *xdr,
1008                       const struct nfs4_layoutcommit_args *arg)
1009{
1010        dprintk("%s enter\n", __func__);
1011        encode_pnfs_block_layoutupdate(BLK_LO2EXT(lo), xdr, arg);
1012}
1013
1014static void
1015bl_cleanup_layoutcommit(struct nfs4_layoutcommit_data *lcdata)
1016{
1017        struct pnfs_layout_hdr *lo = NFS_I(lcdata->args.inode)->layout;
1018
1019        dprintk("%s enter\n", __func__);
1020        clean_pnfs_block_layoutupdate(BLK_LO2EXT(lo), &lcdata->args, lcdata->res.status);
1021}
1022
1023static void free_blk_mountid(struct block_mount_id *mid)
1024{
1025        if (mid) {
1026                struct pnfs_block_dev *dev, *tmp;
1027
1028                /* No need to take bm_lock as we are last user freeing bm_devlist */
1029                list_for_each_entry_safe(dev, tmp, &mid->bm_devlist, bm_node) {
1030                        list_del(&dev->bm_node);
1031                        bl_free_block_dev(dev);
1032                }
1033                kfree(mid);
1034        }
1035}
1036
1037/* This is mostly copied from the filelayout_get_device_info function.
1038 * It seems much of this should be at the generic pnfs level.
1039 */
1040static struct pnfs_block_dev *
1041nfs4_blk_get_deviceinfo(struct nfs_server *server, const struct nfs_fh *fh,
1042                        struct nfs4_deviceid *d_id)
1043{
1044        struct pnfs_device *dev;
1045        struct pnfs_block_dev *rv;
1046        u32 max_resp_sz;
1047        int max_pages;
1048        struct page **pages = NULL;
1049        int i, rc;
1050
1051        /*
1052         * Use the session max response size as the basis for setting
1053         * GETDEVICEINFO's maxcount
1054         */
1055        max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz;
1056        max_pages = nfs_page_array_len(0, max_resp_sz);
1057        dprintk("%s max_resp_sz %u max_pages %d\n",
1058                __func__, max_resp_sz, max_pages);
1059
1060        dev = kmalloc(sizeof(*dev), GFP_NOFS);
1061        if (!dev) {
1062                dprintk("%s kmalloc failed\n", __func__);
1063                return ERR_PTR(-ENOMEM);
1064        }
1065
1066        pages = kzalloc(max_pages * sizeof(struct page *), GFP_NOFS);
1067        if (pages == NULL) {
1068                kfree(dev);
1069                return ERR_PTR(-ENOMEM);
1070        }
1071        for (i = 0; i < max_pages; i++) {
1072                pages[i] = alloc_page(GFP_NOFS);
1073                if (!pages[i]) {
1074                        rv = ERR_PTR(-ENOMEM);
1075                        goto out_free;
1076                }
1077        }
1078
1079        memcpy(&dev->dev_id, d_id, sizeof(*d_id));
1080        dev->layout_type = LAYOUT_BLOCK_VOLUME;
1081        dev->pages = pages;
1082        dev->pgbase = 0;
1083        dev->pglen = PAGE_SIZE * max_pages;
1084        dev->mincount = 0;
1085        dev->maxcount = max_resp_sz - nfs41_maxgetdevinfo_overhead;
1086
1087        dprintk("%s: dev_id: %s\n", __func__, dev->dev_id.data);
1088        rc = nfs4_proc_getdeviceinfo(server, dev, NULL);
1089        dprintk("%s getdevice info returns %d\n", __func__, rc);
1090        if (rc) {
1091                rv = ERR_PTR(rc);
1092                goto out_free;
1093        }
1094
1095        rv = nfs4_blk_decode_device(server, dev);
1096 out_free:
1097        for (i = 0; i < max_pages; i++)
1098                __free_page(pages[i]);
1099        kfree(pages);
1100        kfree(dev);
1101        return rv;
1102}
1103
1104static int
1105bl_set_layoutdriver(struct nfs_server *server, const struct nfs_fh *fh)
1106{
1107        struct block_mount_id *b_mt_id = NULL;
1108        struct pnfs_devicelist *dlist = NULL;
1109        struct pnfs_block_dev *bdev;
1110        LIST_HEAD(block_disklist);
1111        int status, i;
1112
1113        dprintk("%s enter\n", __func__);
1114
1115        if (server->pnfs_blksize == 0) {
1116                dprintk("%s Server did not return blksize\n", __func__);
1117                return -EINVAL;
1118        }
1119        b_mt_id = kzalloc(sizeof(struct block_mount_id), GFP_NOFS);
1120        if (!b_mt_id) {
1121                status = -ENOMEM;
1122                goto out_error;
1123        }
1124        /* Initialize nfs4 block layout mount id */
1125        spin_lock_init(&b_mt_id->bm_lock);
1126        INIT_LIST_HEAD(&b_mt_id->bm_devlist);
1127
1128        dlist = kmalloc(sizeof(struct pnfs_devicelist), GFP_NOFS);
1129        if (!dlist) {
1130                status = -ENOMEM;
1131                goto out_error;
1132        }
1133        dlist->eof = 0;
1134        while (!dlist->eof) {
1135                status = nfs4_proc_getdevicelist(server, fh, dlist);
1136                if (status)
1137                        goto out_error;
1138                dprintk("%s GETDEVICELIST numdevs=%i, eof=%i\n",
1139                        __func__, dlist->num_devs, dlist->eof);
1140                for (i = 0; i < dlist->num_devs; i++) {
1141                        bdev = nfs4_blk_get_deviceinfo(server, fh,
1142                                                       &dlist->dev_id[i]);
1143                        if (IS_ERR(bdev)) {
1144                                status = PTR_ERR(bdev);
1145                                goto out_error;
1146                        }
1147                        spin_lock(&b_mt_id->bm_lock);
1148                        list_add(&bdev->bm_node, &b_mt_id->bm_devlist);
1149                        spin_unlock(&b_mt_id->bm_lock);
1150                }
1151        }
1152        dprintk("%s SUCCESS\n", __func__);
1153        server->pnfs_ld_data = b_mt_id;
1154
1155 out_return:
1156        kfree(dlist);
1157        return status;
1158
1159 out_error:
1160        free_blk_mountid(b_mt_id);
1161        goto out_return;
1162}
1163
1164static int
1165bl_clear_layoutdriver(struct nfs_server *server)
1166{
1167        struct block_mount_id *b_mt_id = server->pnfs_ld_data;
1168
1169        dprintk("%s enter\n", __func__);
1170        free_blk_mountid(b_mt_id);
1171        dprintk("%s RETURNS\n", __func__);
1172        return 0;
1173}
1174
1175static bool
1176is_aligned_req(struct nfs_page *req, unsigned int alignment)
1177{
1178        return IS_ALIGNED(req->wb_offset, alignment) &&
1179               IS_ALIGNED(req->wb_bytes, alignment);
1180}
1181
1182static void
1183bl_pg_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
1184{
1185        if (pgio->pg_dreq != NULL &&
1186            !is_aligned_req(req, SECTOR_SIZE))
1187                nfs_pageio_reset_read_mds(pgio);
1188        else
1189                pnfs_generic_pg_init_read(pgio, req);
1190}
1191
1192/*
1193 * Return 0 if @req cannot be coalesced into @pgio, otherwise return the number
1194 * of bytes (maximum @req->wb_bytes) that can be coalesced.
1195 */
1196static size_t
1197bl_pg_test_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev,
1198                struct nfs_page *req)
1199{
1200        if (pgio->pg_dreq != NULL &&
1201            !is_aligned_req(req, SECTOR_SIZE))
1202                return 0;
1203
1204        return pnfs_generic_pg_test(pgio, prev, req);
1205}
1206
1207/*
1208 * Return the number of contiguous bytes for a given inode
1209 * starting at page frame idx.
1210 */
1211static u64 pnfs_num_cont_bytes(struct inode *inode, pgoff_t idx)
1212{
1213        struct address_space *mapping = inode->i_mapping;
1214        pgoff_t end;
1215
1216        /* Optimize common case that writes from 0 to end of file */
1217        end = DIV_ROUND_UP(i_size_read(inode), PAGE_CACHE_SIZE);
1218        if (end != NFS_I(inode)->npages) {
1219                rcu_read_lock();
1220                end = page_cache_next_hole(mapping, idx + 1, ULONG_MAX);
1221                rcu_read_unlock();
1222        }
1223
1224        if (!end)
1225                return i_size_read(inode) - (idx << PAGE_CACHE_SHIFT);
1226        else
1227                return (end - idx) << PAGE_CACHE_SHIFT;
1228}
1229
1230static void
1231bl_pg_init_write(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
1232{
1233        if (pgio->pg_dreq != NULL &&
1234            !is_aligned_req(req, PAGE_CACHE_SIZE)) {
1235                nfs_pageio_reset_write_mds(pgio);
1236        } else {
1237                u64 wb_size;
1238                if (pgio->pg_dreq == NULL)
1239                        wb_size = pnfs_num_cont_bytes(pgio->pg_inode,
1240                                                      req->wb_index);
1241                else
1242                        wb_size = nfs_dreq_bytes_left(pgio->pg_dreq);
1243
1244                pnfs_generic_pg_init_write(pgio, req, wb_size);
1245        }
1246}
1247
1248/*
1249 * Return 0 if @req cannot be coalesced into @pgio, otherwise return the number
1250 * of bytes (maximum @req->wb_bytes) that can be coalesced.
1251 */
1252static size_t
1253bl_pg_test_write(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev,
1254                 struct nfs_page *req)
1255{
1256        if (pgio->pg_dreq != NULL &&
1257            !is_aligned_req(req, PAGE_CACHE_SIZE))
1258                return 0;
1259
1260        return pnfs_generic_pg_test(pgio, prev, req);
1261}
1262
1263static const struct nfs_pageio_ops bl_pg_read_ops = {
1264        .pg_init = bl_pg_init_read,
1265        .pg_test = bl_pg_test_read,
1266        .pg_doio = pnfs_generic_pg_readpages,
1267};
1268
1269static const struct nfs_pageio_ops bl_pg_write_ops = {
1270        .pg_init = bl_pg_init_write,
1271        .pg_test = bl_pg_test_write,
1272        .pg_doio = pnfs_generic_pg_writepages,
1273};
1274
1275static struct pnfs_layoutdriver_type blocklayout_type = {
1276        .id                             = LAYOUT_BLOCK_VOLUME,
1277        .name                           = "LAYOUT_BLOCK_VOLUME",
1278        .owner                          = THIS_MODULE,
1279        .read_pagelist                  = bl_read_pagelist,
1280        .write_pagelist                 = bl_write_pagelist,
1281        .alloc_layout_hdr               = bl_alloc_layout_hdr,
1282        .free_layout_hdr                = bl_free_layout_hdr,
1283        .alloc_lseg                     = bl_alloc_lseg,
1284        .free_lseg                      = bl_free_lseg,
1285        .encode_layoutcommit            = bl_encode_layoutcommit,
1286        .cleanup_layoutcommit           = bl_cleanup_layoutcommit,
1287        .set_layoutdriver               = bl_set_layoutdriver,
1288        .clear_layoutdriver             = bl_clear_layoutdriver,
1289        .pg_read_ops                    = &bl_pg_read_ops,
1290        .pg_write_ops                   = &bl_pg_write_ops,
1291};
1292
1293static const struct rpc_pipe_ops bl_upcall_ops = {
1294        .upcall         = rpc_pipe_generic_upcall,
1295        .downcall       = bl_pipe_downcall,
1296        .destroy_msg    = bl_pipe_destroy_msg,
1297};
1298
1299static struct dentry *nfs4blocklayout_register_sb(struct super_block *sb,
1300                                            struct rpc_pipe *pipe)
1301{
1302        struct dentry *dir, *dentry;
1303
1304        dir = rpc_d_lookup_sb(sb, NFS_PIPE_DIRNAME);
1305        if (dir == NULL)
1306                return ERR_PTR(-ENOENT);
1307        dentry = rpc_mkpipe_dentry(dir, "blocklayout", NULL, pipe);
1308        dput(dir);
1309        return dentry;
1310}
1311
1312static void nfs4blocklayout_unregister_sb(struct super_block *sb,
1313                                          struct rpc_pipe *pipe)
1314{
1315        if (pipe->dentry)
1316                rpc_unlink(pipe->dentry);
1317}
1318
1319static int rpc_pipefs_event(struct notifier_block *nb, unsigned long event,
1320                           void *ptr)
1321{
1322        struct super_block *sb = ptr;
1323        struct net *net = sb->s_fs_info;
1324        struct nfs_net *nn = net_generic(net, nfs_net_id);
1325        struct dentry *dentry;
1326        int ret = 0;
1327
1328        if (!try_module_get(THIS_MODULE))
1329                return 0;
1330
1331        if (nn->bl_device_pipe == NULL) {
1332                module_put(THIS_MODULE);
1333                return 0;
1334        }
1335
1336        switch (event) {
1337        case RPC_PIPEFS_MOUNT:
1338                dentry = nfs4blocklayout_register_sb(sb, nn->bl_device_pipe);
1339                if (IS_ERR(dentry)) {
1340                        ret = PTR_ERR(dentry);
1341                        break;
1342                }
1343                nn->bl_device_pipe->dentry = dentry;
1344                break;
1345        case RPC_PIPEFS_UMOUNT:
1346                if (nn->bl_device_pipe->dentry)
1347                        nfs4blocklayout_unregister_sb(sb, nn->bl_device_pipe);
1348                break;
1349        default:
1350                ret = -ENOTSUPP;
1351                break;
1352        }
1353        module_put(THIS_MODULE);
1354        return ret;
1355}
1356
1357static struct notifier_block nfs4blocklayout_block = {
1358        .notifier_call = rpc_pipefs_event,
1359};
1360
1361static struct dentry *nfs4blocklayout_register_net(struct net *net,
1362                                                   struct rpc_pipe *pipe)
1363{
1364        struct super_block *pipefs_sb;
1365        struct dentry *dentry;
1366
1367        pipefs_sb = rpc_get_sb_net(net);
1368        if (!pipefs_sb)
1369                return NULL;
1370        dentry = nfs4blocklayout_register_sb(pipefs_sb, pipe);
1371        rpc_put_sb_net(net);
1372        return dentry;
1373}
1374
1375static void nfs4blocklayout_unregister_net(struct net *net,
1376                                           struct rpc_pipe *pipe)
1377{
1378        struct super_block *pipefs_sb;
1379
1380        pipefs_sb = rpc_get_sb_net(net);
1381        if (pipefs_sb) {
1382                nfs4blocklayout_unregister_sb(pipefs_sb, pipe);
1383                rpc_put_sb_net(net);
1384        }
1385}
1386
1387static int nfs4blocklayout_net_init(struct net *net)
1388{
1389        struct nfs_net *nn = net_generic(net, nfs_net_id);
1390        struct dentry *dentry;
1391
1392        init_waitqueue_head(&nn->bl_wq);
1393        nn->bl_device_pipe = rpc_mkpipe_data(&bl_upcall_ops, 0);
1394        if (IS_ERR(nn->bl_device_pipe))
1395                return PTR_ERR(nn->bl_device_pipe);
1396        dentry = nfs4blocklayout_register_net(net, nn->bl_device_pipe);
1397        if (IS_ERR(dentry)) {
1398                rpc_destroy_pipe_data(nn->bl_device_pipe);
1399                return PTR_ERR(dentry);
1400        }
1401        nn->bl_device_pipe->dentry = dentry;
1402        return 0;
1403}
1404
1405static void nfs4blocklayout_net_exit(struct net *net)
1406{
1407        struct nfs_net *nn = net_generic(net, nfs_net_id);
1408
1409        nfs4blocklayout_unregister_net(net, nn->bl_device_pipe);
1410        rpc_destroy_pipe_data(nn->bl_device_pipe);
1411        nn->bl_device_pipe = NULL;
1412}
1413
1414static struct pernet_operations nfs4blocklayout_net_ops = {
1415        .init = nfs4blocklayout_net_init,
1416        .exit = nfs4blocklayout_net_exit,
1417};
1418
1419static int __init nfs4blocklayout_init(void)
1420{
1421        int ret;
1422
1423        dprintk("%s: NFSv4 Block Layout Driver Registering...\n", __func__);
1424
1425        ret = pnfs_register_layoutdriver(&blocklayout_type);
1426        if (ret)
1427                goto out;
1428
1429        ret = rpc_pipefs_notifier_register(&nfs4blocklayout_block);
1430        if (ret)
1431                goto out_remove;
1432        ret = register_pernet_subsys(&nfs4blocklayout_net_ops);
1433        if (ret)
1434                goto out_notifier;
1435out:
1436        return ret;
1437
1438out_notifier:
1439        rpc_pipefs_notifier_unregister(&nfs4blocklayout_block);
1440out_remove:
1441        pnfs_unregister_layoutdriver(&blocklayout_type);
1442        return ret;
1443}
1444
1445static void __exit nfs4blocklayout_exit(void)
1446{
1447        dprintk("%s: NFSv4 Block Layout Driver Unregistering...\n",
1448               __func__);
1449
1450        rpc_pipefs_notifier_unregister(&nfs4blocklayout_block);
1451        unregister_pernet_subsys(&nfs4blocklayout_net_ops);
1452        pnfs_unregister_layoutdriver(&blocklayout_type);
1453}
1454
1455MODULE_ALIAS("nfs-layouttype4-3");
1456
1457module_init(nfs4blocklayout_init);
1458module_exit(nfs4blocklayout_exit);
1459