linux/fs/afs/write.c
<<
>>
Prefs
   1/* handling of writes to regular files and writing back to the server
   2 *
   3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
   4 * Written by David Howells (dhowells@redhat.com)
   5 *
   6 * This program is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU General Public License
   8 * as published by the Free Software Foundation; either version
   9 * 2 of the License, or (at your option) any later version.
  10 */
  11#include <linux/backing-dev.h>
  12#include <linux/slab.h>
  13#include <linux/fs.h>
  14#include <linux/pagemap.h>
  15#include <linux/writeback.h>
  16#include <linux/pagevec.h>
  17#include "internal.h"
  18
  19static int afs_write_back_from_locked_page(struct afs_writeback *wb,
  20                                           struct page *page);
  21
  22/*
  23 * mark a page as having been made dirty and thus needing writeback
  24 */
  25int afs_set_page_dirty(struct page *page)
  26{
  27        _enter("");
  28        return __set_page_dirty_nobuffers(page);
  29}
  30
  31/*
  32 * unlink a writeback record because its usage has reached zero
  33 * - must be called with the wb->vnode->writeback_lock held
  34 */
  35static void afs_unlink_writeback(struct afs_writeback *wb)
  36{
  37        struct afs_writeback *front;
  38        struct afs_vnode *vnode = wb->vnode;
  39
  40        list_del_init(&wb->link);
  41        if (!list_empty(&vnode->writebacks)) {
  42                /* if an fsync rises to the front of the queue then wake it
  43                 * up */
  44                front = list_entry(vnode->writebacks.next,
  45                                   struct afs_writeback, link);
  46                if (front->state == AFS_WBACK_SYNCING) {
  47                        _debug("wake up sync");
  48                        front->state = AFS_WBACK_COMPLETE;
  49                        wake_up(&front->waitq);
  50                }
  51        }
  52}
  53
  54/*
  55 * free a writeback record
  56 */
  57static void afs_free_writeback(struct afs_writeback *wb)
  58{
  59        _enter("");
  60        key_put(wb->key);
  61        kfree(wb);
  62}
  63
  64/*
  65 * dispose of a reference to a writeback record
  66 */
  67void afs_put_writeback(struct afs_writeback *wb)
  68{
  69        struct afs_vnode *vnode = wb->vnode;
  70
  71        _enter("{%d}", wb->usage);
  72
  73        spin_lock(&vnode->writeback_lock);
  74        if (--wb->usage == 0)
  75                afs_unlink_writeback(wb);
  76        else
  77                wb = NULL;
  78        spin_unlock(&vnode->writeback_lock);
  79        if (wb)
  80                afs_free_writeback(wb);
  81}
  82
  83/*
  84 * partly or wholly fill a page that's under preparation for writing
  85 */
  86static int afs_fill_page(struct afs_vnode *vnode, struct key *key,
  87                         loff_t pos, unsigned int len, struct page *page)
  88{
  89        struct afs_read *req;
  90        int ret;
  91
  92        _enter(",,%llu", (unsigned long long)pos);
  93
  94        req = kzalloc(sizeof(struct afs_read) + sizeof(struct page *),
  95                      GFP_KERNEL);
  96        if (!req)
  97                return -ENOMEM;
  98
  99        atomic_set(&req->usage, 1);
 100        req->pos = pos;
 101        req->len = len;
 102        req->nr_pages = 1;
 103        req->pages[0] = page;
 104        get_page(page);
 105
 106        ret = afs_vnode_fetch_data(vnode, key, req);
 107        afs_put_read(req);
 108        if (ret < 0) {
 109                if (ret == -ENOENT) {
 110                        _debug("got NOENT from server"
 111                               " - marking file deleted and stale");
 112                        set_bit(AFS_VNODE_DELETED, &vnode->flags);
 113                        ret = -ESTALE;
 114                }
 115        }
 116
 117        _leave(" = %d", ret);
 118        return ret;
 119}
 120
 121/*
 122 * prepare to perform part of a write to a page
 123 */
 124int afs_write_begin(struct file *file, struct address_space *mapping,
 125                    loff_t pos, unsigned len, unsigned flags,
 126                    struct page **pagep, void **fsdata)
 127{
 128        struct afs_writeback *candidate, *wb;
 129        struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
 130        struct page *page;
 131        struct key *key = file->private_data;
 132        unsigned from = pos & (PAGE_SIZE - 1);
 133        unsigned to = from + len;
 134        pgoff_t index = pos >> PAGE_SHIFT;
 135        int ret;
 136
 137        _enter("{%x:%u},{%lx},%u,%u",
 138               vnode->fid.vid, vnode->fid.vnode, index, from, to);
 139
 140        candidate = kzalloc(sizeof(*candidate), GFP_KERNEL);
 141        if (!candidate)
 142                return -ENOMEM;
 143        candidate->vnode = vnode;
 144        candidate->first = candidate->last = index;
 145        candidate->offset_first = from;
 146        candidate->to_last = to;
 147        INIT_LIST_HEAD(&candidate->link);
 148        candidate->usage = 1;
 149        candidate->state = AFS_WBACK_PENDING;
 150        init_waitqueue_head(&candidate->waitq);
 151
 152        page = grab_cache_page_write_begin(mapping, index, flags);
 153        if (!page) {
 154                kfree(candidate);
 155                return -ENOMEM;
 156        }
 157
 158        if (!PageUptodate(page) && len != PAGE_SIZE) {
 159                ret = afs_fill_page(vnode, key, pos & PAGE_MASK, PAGE_SIZE, page);
 160                if (ret < 0) {
 161                        unlock_page(page);
 162                        put_page(page);
 163                        kfree(candidate);
 164                        _leave(" = %d [prep]", ret);
 165                        return ret;
 166                }
 167                SetPageUptodate(page);
 168        }
 169
 170        /* page won't leak in error case: it eventually gets cleaned off LRU */
 171        *pagep = page;
 172
 173try_again:
 174        spin_lock(&vnode->writeback_lock);
 175
 176        /* see if this page is already pending a writeback under a suitable key
 177         * - if so we can just join onto that one */
 178        wb = (struct afs_writeback *) page_private(page);
 179        if (wb) {
 180                if (wb->key == key && wb->state == AFS_WBACK_PENDING)
 181                        goto subsume_in_current_wb;
 182                goto flush_conflicting_wb;
 183        }
 184
 185        if (index > 0) {
 186                /* see if we can find an already pending writeback that we can
 187                 * append this page to */
 188                list_for_each_entry(wb, &vnode->writebacks, link) {
 189                        if (wb->last == index - 1 && wb->key == key &&
 190                            wb->state == AFS_WBACK_PENDING)
 191                                goto append_to_previous_wb;
 192                }
 193        }
 194
 195        list_add_tail(&candidate->link, &vnode->writebacks);
 196        candidate->key = key_get(key);
 197        spin_unlock(&vnode->writeback_lock);
 198        SetPagePrivate(page);
 199        set_page_private(page, (unsigned long) candidate);
 200        _leave(" = 0 [new]");
 201        return 0;
 202
 203subsume_in_current_wb:
 204        _debug("subsume");
 205        ASSERTRANGE(wb->first, <=, index, <=, wb->last);
 206        if (index == wb->first && from < wb->offset_first)
 207                wb->offset_first = from;
 208        if (index == wb->last && to > wb->to_last)
 209                wb->to_last = to;
 210        spin_unlock(&vnode->writeback_lock);
 211        kfree(candidate);
 212        _leave(" = 0 [sub]");
 213        return 0;
 214
 215append_to_previous_wb:
 216        _debug("append into %lx-%lx", wb->first, wb->last);
 217        wb->usage++;
 218        wb->last++;
 219        wb->to_last = to;
 220        spin_unlock(&vnode->writeback_lock);
 221        SetPagePrivate(page);
 222        set_page_private(page, (unsigned long) wb);
 223        kfree(candidate);
 224        _leave(" = 0 [app]");
 225        return 0;
 226
 227        /* the page is currently bound to another context, so if it's dirty we
 228         * need to flush it before we can use the new context */
 229flush_conflicting_wb:
 230        _debug("flush conflict");
 231        if (wb->state == AFS_WBACK_PENDING)
 232                wb->state = AFS_WBACK_CONFLICTING;
 233        spin_unlock(&vnode->writeback_lock);
 234        if (clear_page_dirty_for_io(page)) {
 235                ret = afs_write_back_from_locked_page(wb, page);
 236                if (ret < 0) {
 237                        afs_put_writeback(candidate);
 238                        _leave(" = %d", ret);
 239                        return ret;
 240                }
 241        }
 242
 243        /* the page holds a ref on the writeback record */
 244        afs_put_writeback(wb);
 245        set_page_private(page, 0);
 246        ClearPagePrivate(page);
 247        goto try_again;
 248}
 249
 250/*
 251 * finalise part of a write to a page
 252 */
 253int afs_write_end(struct file *file, struct address_space *mapping,
 254                  loff_t pos, unsigned len, unsigned copied,
 255                  struct page *page, void *fsdata)
 256{
 257        struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
 258        struct key *key = file->private_data;
 259        loff_t i_size, maybe_i_size;
 260        int ret;
 261
 262        _enter("{%x:%u},{%lx}",
 263               vnode->fid.vid, vnode->fid.vnode, page->index);
 264
 265        maybe_i_size = pos + copied;
 266
 267        i_size = i_size_read(&vnode->vfs_inode);
 268        if (maybe_i_size > i_size) {
 269                spin_lock(&vnode->writeback_lock);
 270                i_size = i_size_read(&vnode->vfs_inode);
 271                if (maybe_i_size > i_size)
 272                        i_size_write(&vnode->vfs_inode, maybe_i_size);
 273                spin_unlock(&vnode->writeback_lock);
 274        }
 275
 276        if (!PageUptodate(page)) {
 277                if (copied < len) {
 278                        /* Try and load any missing data from the server.  The
 279                         * unmarshalling routine will take care of clearing any
 280                         * bits that are beyond the EOF.
 281                         */
 282                        ret = afs_fill_page(vnode, key, pos + copied,
 283                                            len - copied, page);
 284                        if (ret < 0)
 285                                return ret;
 286                }
 287                SetPageUptodate(page);
 288        }
 289
 290        set_page_dirty(page);
 291        if (PageDirty(page))
 292                _debug("dirtied");
 293        unlock_page(page);
 294        put_page(page);
 295
 296        return copied;
 297}
 298
 299/*
 300 * kill all the pages in the given range
 301 */
 302static void afs_kill_pages(struct afs_vnode *vnode, bool error,
 303                           pgoff_t first, pgoff_t last)
 304{
 305        struct pagevec pv;
 306        unsigned count, loop;
 307
 308        _enter("{%x:%u},%lx-%lx",
 309               vnode->fid.vid, vnode->fid.vnode, first, last);
 310
 311        pagevec_init(&pv, 0);
 312
 313        do {
 314                _debug("kill %lx-%lx", first, last);
 315
 316                count = last - first + 1;
 317                if (count > PAGEVEC_SIZE)
 318                        count = PAGEVEC_SIZE;
 319                pv.nr = find_get_pages_contig(vnode->vfs_inode.i_mapping,
 320                                              first, count, pv.pages);
 321                ASSERTCMP(pv.nr, ==, count);
 322
 323                for (loop = 0; loop < count; loop++) {
 324                        struct page *page = pv.pages[loop];
 325                        ClearPageUptodate(page);
 326                        if (error)
 327                                SetPageError(page);
 328                        if (PageWriteback(page))
 329                                end_page_writeback(page);
 330                        if (page->index >= first)
 331                                first = page->index + 1;
 332                }
 333
 334                __pagevec_release(&pv);
 335        } while (first < last);
 336
 337        _leave("");
 338}
 339
 340/*
 341 * synchronously write back the locked page and any subsequent non-locked dirty
 342 * pages also covered by the same writeback record
 343 */
 344static int afs_write_back_from_locked_page(struct afs_writeback *wb,
 345                                           struct page *primary_page)
 346{
 347        struct page *pages[8], *page;
 348        unsigned long count;
 349        unsigned n, offset, to;
 350        pgoff_t start, first, last;
 351        int loop, ret;
 352
 353        _enter(",%lx", primary_page->index);
 354
 355        count = 1;
 356        if (test_set_page_writeback(primary_page))
 357                BUG();
 358
 359        /* find all consecutive lockable dirty pages, stopping when we find a
 360         * page that is not immediately lockable, is not dirty or is missing,
 361         * or we reach the end of the range */
 362        start = primary_page->index;
 363        if (start >= wb->last)
 364                goto no_more;
 365        start++;
 366        do {
 367                _debug("more %lx [%lx]", start, count);
 368                n = wb->last - start + 1;
 369                if (n > ARRAY_SIZE(pages))
 370                        n = ARRAY_SIZE(pages);
 371                n = find_get_pages_contig(wb->vnode->vfs_inode.i_mapping,
 372                                          start, n, pages);
 373                _debug("fgpc %u", n);
 374                if (n == 0)
 375                        goto no_more;
 376                if (pages[0]->index != start) {
 377                        do {
 378                                put_page(pages[--n]);
 379                        } while (n > 0);
 380                        goto no_more;
 381                }
 382
 383                for (loop = 0; loop < n; loop++) {
 384                        page = pages[loop];
 385                        if (page->index > wb->last)
 386                                break;
 387                        if (!trylock_page(page))
 388                                break;
 389                        if (!PageDirty(page) ||
 390                            page_private(page) != (unsigned long) wb) {
 391                                unlock_page(page);
 392                                break;
 393                        }
 394                        if (!clear_page_dirty_for_io(page))
 395                                BUG();
 396                        if (test_set_page_writeback(page))
 397                                BUG();
 398                        unlock_page(page);
 399                        put_page(page);
 400                }
 401                count += loop;
 402                if (loop < n) {
 403                        for (; loop < n; loop++)
 404                                put_page(pages[loop]);
 405                        goto no_more;
 406                }
 407
 408                start += loop;
 409        } while (start <= wb->last && count < 65536);
 410
 411no_more:
 412        /* we now have a contiguous set of dirty pages, each with writeback set
 413         * and the dirty mark cleared; the first page is locked and must remain
 414         * so, all the rest are unlocked */
 415        first = primary_page->index;
 416        last = first + count - 1;
 417
 418        offset = (first == wb->first) ? wb->offset_first : 0;
 419        to = (last == wb->last) ? wb->to_last : PAGE_SIZE;
 420
 421        _debug("write back %lx[%u..] to %lx[..%u]", first, offset, last, to);
 422
 423        ret = afs_vnode_store_data(wb, first, last, offset, to);
 424        if (ret < 0) {
 425                switch (ret) {
 426                case -EDQUOT:
 427                case -ENOSPC:
 428                        mapping_set_error(wb->vnode->vfs_inode.i_mapping, -ENOSPC);
 429                        break;
 430                case -EROFS:
 431                case -EIO:
 432                case -EREMOTEIO:
 433                case -EFBIG:
 434                case -ENOENT:
 435                case -ENOMEDIUM:
 436                case -ENXIO:
 437                        afs_kill_pages(wb->vnode, true, first, last);
 438                        mapping_set_error(wb->vnode->vfs_inode.i_mapping, -EIO);
 439                        break;
 440                case -EACCES:
 441                case -EPERM:
 442                case -ENOKEY:
 443                case -EKEYEXPIRED:
 444                case -EKEYREJECTED:
 445                case -EKEYREVOKED:
 446                        afs_kill_pages(wb->vnode, false, first, last);
 447                        break;
 448                default:
 449                        break;
 450                }
 451        } else {
 452                ret = count;
 453        }
 454
 455        _leave(" = %d", ret);
 456        return ret;
 457}
 458
 459/*
 460 * write a page back to the server
 461 * - the caller locked the page for us
 462 */
 463int afs_writepage(struct page *page, struct writeback_control *wbc)
 464{
 465        struct afs_writeback *wb;
 466        int ret;
 467
 468        _enter("{%lx},", page->index);
 469
 470        wb = (struct afs_writeback *) page_private(page);
 471        ASSERT(wb != NULL);
 472
 473        ret = afs_write_back_from_locked_page(wb, page);
 474        unlock_page(page);
 475        if (ret < 0) {
 476                _leave(" = %d", ret);
 477                return 0;
 478        }
 479
 480        wbc->nr_to_write -= ret;
 481
 482        _leave(" = 0");
 483        return 0;
 484}
 485
 486/*
 487 * write a region of pages back to the server
 488 */
 489static int afs_writepages_region(struct address_space *mapping,
 490                                 struct writeback_control *wbc,
 491                                 pgoff_t index, pgoff_t end, pgoff_t *_next)
 492{
 493        struct afs_writeback *wb;
 494        struct page *page;
 495        int ret, n;
 496
 497        _enter(",,%lx,%lx,", index, end);
 498
 499        do {
 500                n = find_get_pages_tag(mapping, &index, PAGECACHE_TAG_DIRTY,
 501                                       1, &page);
 502                if (!n)
 503                        break;
 504
 505                _debug("wback %lx", page->index);
 506
 507                if (page->index > end) {
 508                        *_next = index;
 509                        put_page(page);
 510                        _leave(" = 0 [%lx]", *_next);
 511                        return 0;
 512                }
 513
 514                /* at this point we hold neither mapping->tree_lock nor lock on
 515                 * the page itself: the page may be truncated or invalidated
 516                 * (changing page->mapping to NULL), or even swizzled back from
 517                 * swapper_space to tmpfs file mapping
 518                 */
 519                lock_page(page);
 520
 521                if (page->mapping != mapping || !PageDirty(page)) {
 522                        unlock_page(page);
 523                        put_page(page);
 524                        continue;
 525                }
 526
 527                if (PageWriteback(page)) {
 528                        unlock_page(page);
 529                        if (wbc->sync_mode != WB_SYNC_NONE)
 530                                wait_on_page_writeback(page);
 531                        put_page(page);
 532                        continue;
 533                }
 534
 535                wb = (struct afs_writeback *) page_private(page);
 536                ASSERT(wb != NULL);
 537
 538                spin_lock(&wb->vnode->writeback_lock);
 539                wb->state = AFS_WBACK_WRITING;
 540                spin_unlock(&wb->vnode->writeback_lock);
 541
 542                if (!clear_page_dirty_for_io(page))
 543                        BUG();
 544                ret = afs_write_back_from_locked_page(wb, page);
 545                unlock_page(page);
 546                put_page(page);
 547                if (ret < 0) {
 548                        _leave(" = %d", ret);
 549                        return ret;
 550                }
 551
 552                wbc->nr_to_write -= ret;
 553
 554                cond_resched();
 555        } while (index < end && wbc->nr_to_write > 0);
 556
 557        *_next = index;
 558        _leave(" = 0 [%lx]", *_next);
 559        return 0;
 560}
 561
 562/*
 563 * write some of the pending data back to the server
 564 */
 565int afs_writepages(struct address_space *mapping,
 566                   struct writeback_control *wbc)
 567{
 568        pgoff_t start, end, next;
 569        int ret;
 570
 571        _enter("");
 572
 573        if (wbc->range_cyclic) {
 574                start = mapping->writeback_index;
 575                end = -1;
 576                ret = afs_writepages_region(mapping, wbc, start, end, &next);
 577                if (start > 0 && wbc->nr_to_write > 0 && ret == 0)
 578                        ret = afs_writepages_region(mapping, wbc, 0, start,
 579                                                    &next);
 580                mapping->writeback_index = next;
 581        } else if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) {
 582                end = (pgoff_t)(LLONG_MAX >> PAGE_SHIFT);
 583                ret = afs_writepages_region(mapping, wbc, 0, end, &next);
 584                if (wbc->nr_to_write > 0)
 585                        mapping->writeback_index = next;
 586        } else {
 587                start = wbc->range_start >> PAGE_SHIFT;
 588                end = wbc->range_end >> PAGE_SHIFT;
 589                ret = afs_writepages_region(mapping, wbc, start, end, &next);
 590        }
 591
 592        _leave(" = %d", ret);
 593        return ret;
 594}
 595
 596/*
 597 * completion of write to server
 598 */
 599void afs_pages_written_back(struct afs_vnode *vnode, struct afs_call *call)
 600{
 601        struct afs_writeback *wb = call->wb;
 602        struct pagevec pv;
 603        unsigned count, loop;
 604        pgoff_t first = call->first, last = call->last;
 605        bool free_wb;
 606
 607        _enter("{%x:%u},{%lx-%lx}",
 608               vnode->fid.vid, vnode->fid.vnode, first, last);
 609
 610        ASSERT(wb != NULL);
 611
 612        pagevec_init(&pv, 0);
 613
 614        do {
 615                _debug("done %lx-%lx", first, last);
 616
 617                count = last - first + 1;
 618                if (count > PAGEVEC_SIZE)
 619                        count = PAGEVEC_SIZE;
 620                pv.nr = find_get_pages_contig(call->mapping, first, count,
 621                                              pv.pages);
 622                ASSERTCMP(pv.nr, ==, count);
 623
 624                spin_lock(&vnode->writeback_lock);
 625                for (loop = 0; loop < count; loop++) {
 626                        struct page *page = pv.pages[loop];
 627                        end_page_writeback(page);
 628                        if (page_private(page) == (unsigned long) wb) {
 629                                set_page_private(page, 0);
 630                                ClearPagePrivate(page);
 631                                wb->usage--;
 632                        }
 633                }
 634                free_wb = false;
 635                if (wb->usage == 0) {
 636                        afs_unlink_writeback(wb);
 637                        free_wb = true;
 638                }
 639                spin_unlock(&vnode->writeback_lock);
 640                first += count;
 641                if (free_wb) {
 642                        afs_free_writeback(wb);
 643                        wb = NULL;
 644                }
 645
 646                __pagevec_release(&pv);
 647        } while (first <= last);
 648
 649        _leave("");
 650}
 651
 652/*
 653 * write to an AFS file
 654 */
 655ssize_t afs_file_write(struct kiocb *iocb, struct iov_iter *from)
 656{
 657        struct afs_vnode *vnode = AFS_FS_I(file_inode(iocb->ki_filp));
 658        ssize_t result;
 659        size_t count = iov_iter_count(from);
 660
 661        _enter("{%x.%u},{%zu},",
 662               vnode->fid.vid, vnode->fid.vnode, count);
 663
 664        if (IS_SWAPFILE(&vnode->vfs_inode)) {
 665                printk(KERN_INFO
 666                       "AFS: Attempt to write to active swap file!\n");
 667                return -EBUSY;
 668        }
 669
 670        if (!count)
 671                return 0;
 672
 673        result = generic_file_write_iter(iocb, from);
 674
 675        _leave(" = %zd", result);
 676        return result;
 677}
 678
 679/*
 680 * flush the vnode to the fileserver
 681 */
 682int afs_writeback_all(struct afs_vnode *vnode)
 683{
 684        struct address_space *mapping = vnode->vfs_inode.i_mapping;
 685        struct writeback_control wbc = {
 686                .sync_mode      = WB_SYNC_ALL,
 687                .nr_to_write    = LONG_MAX,
 688                .range_cyclic   = 1,
 689        };
 690        int ret;
 691
 692        _enter("");
 693
 694        ret = mapping->a_ops->writepages(mapping, &wbc);
 695        __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
 696
 697        _leave(" = %d", ret);
 698        return ret;
 699}
 700
 701/*
 702 * flush any dirty pages for this process, and check for write errors.
 703 * - the return status from this call provides a reliable indication of
 704 *   whether any write errors occurred for this process.
 705 */
 706int afs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
 707{
 708        struct inode *inode = file_inode(file);
 709        struct afs_writeback *wb, *xwb;
 710        struct afs_vnode *vnode = AFS_FS_I(inode);
 711        int ret;
 712
 713        _enter("{%x:%u},{n=%pD},%d",
 714               vnode->fid.vid, vnode->fid.vnode, file,
 715               datasync);
 716
 717        ret = file_write_and_wait_range(file, start, end);
 718        if (ret)
 719                return ret;
 720        inode_lock(inode);
 721
 722        /* use a writeback record as a marker in the queue - when this reaches
 723         * the front of the queue, all the outstanding writes are either
 724         * completed or rejected */
 725        wb = kzalloc(sizeof(*wb), GFP_KERNEL);
 726        if (!wb) {
 727                ret = -ENOMEM;
 728                goto out;
 729        }
 730        wb->vnode = vnode;
 731        wb->first = 0;
 732        wb->last = -1;
 733        wb->offset_first = 0;
 734        wb->to_last = PAGE_SIZE;
 735        wb->usage = 1;
 736        wb->state = AFS_WBACK_SYNCING;
 737        init_waitqueue_head(&wb->waitq);
 738
 739        spin_lock(&vnode->writeback_lock);
 740        list_for_each_entry(xwb, &vnode->writebacks, link) {
 741                if (xwb->state == AFS_WBACK_PENDING)
 742                        xwb->state = AFS_WBACK_CONFLICTING;
 743        }
 744        list_add_tail(&wb->link, &vnode->writebacks);
 745        spin_unlock(&vnode->writeback_lock);
 746
 747        /* push all the outstanding writebacks to the server */
 748        ret = afs_writeback_all(vnode);
 749        if (ret < 0) {
 750                afs_put_writeback(wb);
 751                _leave(" = %d [wb]", ret);
 752                goto out;
 753        }
 754
 755        /* wait for the preceding writes to actually complete */
 756        ret = wait_event_interruptible(wb->waitq,
 757                                       wb->state == AFS_WBACK_COMPLETE ||
 758                                       vnode->writebacks.next == &wb->link);
 759        afs_put_writeback(wb);
 760        _leave(" = %d", ret);
 761out:
 762        inode_unlock(inode);
 763        return ret;
 764}
 765
 766/*
 767 * Flush out all outstanding writes on a file opened for writing when it is
 768 * closed.
 769 */
 770int afs_flush(struct file *file, fl_owner_t id)
 771{
 772        _enter("");
 773
 774        if ((file->f_mode & FMODE_WRITE) == 0)
 775                return 0;
 776
 777        return vfs_fsync(file, 0);
 778}
 779
 780/*
 781 * notification that a previously read-only page is about to become writable
 782 * - if it returns an error, the caller will deliver a bus error signal
 783 */
 784int afs_page_mkwrite(struct vm_area_struct *vma, struct page *page)
 785{
 786        struct afs_vnode *vnode = AFS_FS_I(vma->vm_file->f_mapping->host);
 787
 788        _enter("{{%x:%u}},{%lx}",
 789               vnode->fid.vid, vnode->fid.vnode, page->index);
 790
 791        /* wait for the page to be written to the cache before we allow it to
 792         * be modified */
 793#ifdef CONFIG_AFS_FSCACHE
 794        fscache_wait_on_page_write(vnode->cache, page);
 795#endif
 796
 797        _leave(" = 0");
 798        return 0;
 799}
 800