linux/fs/afs/write.c
<<
>>
Prefs
   1/* handling of writes to regular files and writing back to the server
   2 *
   3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
   4 * Written by David Howells (dhowells@redhat.com)
   5 *
   6 * This program is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU General Public License
   8 * as published by the Free Software Foundation; either version
   9 * 2 of the License, or (at your option) any later version.
  10 */
  11
  12#include <linux/backing-dev.h>
  13#include <linux/slab.h>
  14#include <linux/fs.h>
  15#include <linux/pagemap.h>
  16#include <linux/writeback.h>
  17#include <linux/pagevec.h>
  18#include "internal.h"
  19
  20/*
  21 * mark a page as having been made dirty and thus needing writeback
  22 */
  23int afs_set_page_dirty(struct page *page)
  24{
  25        _enter("");
  26        return __set_page_dirty_nobuffers(page);
  27}
  28
  29/*
  30 * partly or wholly fill a page that's under preparation for writing
  31 */
  32static int afs_fill_page(struct afs_vnode *vnode, struct key *key,
  33                         loff_t pos, unsigned int len, struct page *page)
  34{
  35        struct afs_read *req;
  36        int ret;
  37
  38        _enter(",,%llu", (unsigned long long)pos);
  39
  40        req = kzalloc(sizeof(struct afs_read) + sizeof(struct page *),
  41                      GFP_KERNEL);
  42        if (!req)
  43                return -ENOMEM;
  44
  45        refcount_set(&req->usage, 1);
  46        req->pos = pos;
  47        req->len = len;
  48        req->nr_pages = 1;
  49        req->pages = req->array;
  50        req->pages[0] = page;
  51        get_page(page);
  52
  53        ret = afs_fetch_data(vnode, key, req);
  54        afs_put_read(req);
  55        if (ret < 0) {
  56                if (ret == -ENOENT) {
  57                        _debug("got NOENT from server"
  58                               " - marking file deleted and stale");
  59                        set_bit(AFS_VNODE_DELETED, &vnode->flags);
  60                        ret = -ESTALE;
  61                }
  62        }
  63
  64        _leave(" = %d", ret);
  65        return ret;
  66}
  67
  68/*
  69 * prepare to perform part of a write to a page
  70 */
  71int afs_write_begin(struct file *file, struct address_space *mapping,
  72                    loff_t pos, unsigned len, unsigned flags,
  73                    struct page **pagep, void **fsdata)
  74{
  75        struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
  76        struct page *page;
  77        struct key *key = afs_file_key(file);
  78        unsigned long priv;
  79        unsigned f, from = pos & (PAGE_SIZE - 1);
  80        unsigned t, to = from + len;
  81        pgoff_t index = pos >> PAGE_SHIFT;
  82        int ret;
  83
  84        _enter("{%x:%u},{%lx},%u,%u",
  85               vnode->fid.vid, vnode->fid.vnode, index, from, to);
  86
  87        /* We want to store information about how much of a page is altered in
  88         * page->private.
  89         */
  90        BUILD_BUG_ON(PAGE_SIZE > 32768 && sizeof(page->private) < 8);
  91
  92        page = grab_cache_page_write_begin(mapping, index, flags);
  93        if (!page)
  94                return -ENOMEM;
  95
  96        if (!PageUptodate(page) && len != PAGE_SIZE) {
  97                ret = afs_fill_page(vnode, key, pos & PAGE_MASK, PAGE_SIZE, page);
  98                if (ret < 0) {
  99                        unlock_page(page);
 100                        put_page(page);
 101                        _leave(" = %d [prep]", ret);
 102                        return ret;
 103                }
 104                SetPageUptodate(page);
 105        }
 106
 107        /* page won't leak in error case: it eventually gets cleaned off LRU */
 108        *pagep = page;
 109
 110try_again:
 111        /* See if this page is already partially written in a way that we can
 112         * merge the new write with.
 113         */
 114        t = f = 0;
 115        if (PagePrivate(page)) {
 116                priv = page_private(page);
 117                f = priv & AFS_PRIV_MAX;
 118                t = priv >> AFS_PRIV_SHIFT;
 119                ASSERTCMP(f, <=, t);
 120        }
 121
 122        if (f != t) {
 123                if (PageWriteback(page)) {
 124                        trace_afs_page_dirty(vnode, tracepoint_string("alrdy"),
 125                                             page->index, priv);
 126                        goto flush_conflicting_write;
 127                }
 128                /* If the file is being filled locally, allow inter-write
 129                 * spaces to be merged into writes.  If it's not, only write
 130                 * back what the user gives us.
 131                 */
 132                if (!test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags) &&
 133                    (to < f || from > t))
 134                        goto flush_conflicting_write;
 135                if (from < f)
 136                        f = from;
 137                if (to > t)
 138                        t = to;
 139        } else {
 140                f = from;
 141                t = to;
 142        }
 143
 144        priv = (unsigned long)t << AFS_PRIV_SHIFT;
 145        priv |= f;
 146        trace_afs_page_dirty(vnode, tracepoint_string("begin"),
 147                             page->index, priv);
 148        SetPagePrivate(page);
 149        set_page_private(page, priv);
 150        _leave(" = 0");
 151        return 0;
 152
 153        /* The previous write and this write aren't adjacent or overlapping, so
 154         * flush the page out.
 155         */
 156flush_conflicting_write:
 157        _debug("flush conflict");
 158        ret = write_one_page(page);
 159        if (ret < 0) {
 160                _leave(" = %d", ret);
 161                return ret;
 162        }
 163
 164        ret = lock_page_killable(page);
 165        if (ret < 0) {
 166                _leave(" = %d", ret);
 167                return ret;
 168        }
 169        goto try_again;
 170}
 171
 172/*
 173 * finalise part of a write to a page
 174 */
 175int afs_write_end(struct file *file, struct address_space *mapping,
 176                  loff_t pos, unsigned len, unsigned copied,
 177                  struct page *page, void *fsdata)
 178{
 179        struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
 180        struct key *key = afs_file_key(file);
 181        loff_t i_size, maybe_i_size;
 182        int ret;
 183
 184        _enter("{%x:%u},{%lx}",
 185               vnode->fid.vid, vnode->fid.vnode, page->index);
 186
 187        maybe_i_size = pos + copied;
 188
 189        i_size = i_size_read(&vnode->vfs_inode);
 190        if (maybe_i_size > i_size) {
 191                spin_lock(&vnode->wb_lock);
 192                i_size = i_size_read(&vnode->vfs_inode);
 193                if (maybe_i_size > i_size)
 194                        i_size_write(&vnode->vfs_inode, maybe_i_size);
 195                spin_unlock(&vnode->wb_lock);
 196        }
 197
 198        if (!PageUptodate(page)) {
 199                if (copied < len) {
 200                        /* Try and load any missing data from the server.  The
 201                         * unmarshalling routine will take care of clearing any
 202                         * bits that are beyond the EOF.
 203                         */
 204                        ret = afs_fill_page(vnode, key, pos + copied,
 205                                            len - copied, page);
 206                        if (ret < 0)
 207                                goto out;
 208                }
 209                SetPageUptodate(page);
 210        }
 211
 212        set_page_dirty(page);
 213        if (PageDirty(page))
 214                _debug("dirtied");
 215        ret = copied;
 216
 217out:
 218        unlock_page(page);
 219        put_page(page);
 220        return ret;
 221}
 222
 223/*
 224 * kill all the pages in the given range
 225 */
 226static void afs_kill_pages(struct address_space *mapping,
 227                           pgoff_t first, pgoff_t last)
 228{
 229        struct afs_vnode *vnode = AFS_FS_I(mapping->host);
 230        struct pagevec pv;
 231        unsigned count, loop;
 232
 233        _enter("{%x:%u},%lx-%lx",
 234               vnode->fid.vid, vnode->fid.vnode, first, last);
 235
 236        pagevec_init(&pv);
 237
 238        do {
 239                _debug("kill %lx-%lx", first, last);
 240
 241                count = last - first + 1;
 242                if (count > PAGEVEC_SIZE)
 243                        count = PAGEVEC_SIZE;
 244                pv.nr = find_get_pages_contig(mapping, first, count, pv.pages);
 245                ASSERTCMP(pv.nr, ==, count);
 246
 247                for (loop = 0; loop < count; loop++) {
 248                        struct page *page = pv.pages[loop];
 249                        ClearPageUptodate(page);
 250                        SetPageError(page);
 251                        end_page_writeback(page);
 252                        if (page->index >= first)
 253                                first = page->index + 1;
 254                        lock_page(page);
 255                        generic_error_remove_page(mapping, page);
 256                }
 257
 258                __pagevec_release(&pv);
 259        } while (first <= last);
 260
 261        _leave("");
 262}
 263
 264/*
 265 * Redirty all the pages in a given range.
 266 */
 267static void afs_redirty_pages(struct writeback_control *wbc,
 268                              struct address_space *mapping,
 269                              pgoff_t first, pgoff_t last)
 270{
 271        struct afs_vnode *vnode = AFS_FS_I(mapping->host);
 272        struct pagevec pv;
 273        unsigned count, loop;
 274
 275        _enter("{%x:%u},%lx-%lx",
 276               vnode->fid.vid, vnode->fid.vnode, first, last);
 277
 278        pagevec_init(&pv);
 279
 280        do {
 281                _debug("redirty %lx-%lx", first, last);
 282
 283                count = last - first + 1;
 284                if (count > PAGEVEC_SIZE)
 285                        count = PAGEVEC_SIZE;
 286                pv.nr = find_get_pages_contig(mapping, first, count, pv.pages);
 287                ASSERTCMP(pv.nr, ==, count);
 288
 289                for (loop = 0; loop < count; loop++) {
 290                        struct page *page = pv.pages[loop];
 291
 292                        redirty_page_for_writepage(wbc, page);
 293                        end_page_writeback(page);
 294                        if (page->index >= first)
 295                                first = page->index + 1;
 296                }
 297
 298                __pagevec_release(&pv);
 299        } while (first <= last);
 300
 301        _leave("");
 302}
 303
 304/*
 305 * write to a file
 306 */
 307static int afs_store_data(struct address_space *mapping,
 308                          pgoff_t first, pgoff_t last,
 309                          unsigned offset, unsigned to)
 310{
 311        struct afs_vnode *vnode = AFS_FS_I(mapping->host);
 312        struct afs_fs_cursor fc;
 313        struct afs_wb_key *wbk = NULL;
 314        struct list_head *p;
 315        int ret = -ENOKEY, ret2;
 316
 317        _enter("%s{%x:%u.%u},%lx,%lx,%x,%x",
 318               vnode->volume->name,
 319               vnode->fid.vid,
 320               vnode->fid.vnode,
 321               vnode->fid.unique,
 322               first, last, offset, to);
 323
 324        spin_lock(&vnode->wb_lock);
 325        p = vnode->wb_keys.next;
 326
 327        /* Iterate through the list looking for a valid key to use. */
 328try_next_key:
 329        while (p != &vnode->wb_keys) {
 330                wbk = list_entry(p, struct afs_wb_key, vnode_link);
 331                _debug("wbk %u", key_serial(wbk->key));
 332                ret2 = key_validate(wbk->key);
 333                if (ret2 == 0)
 334                        goto found_key;
 335                if (ret == -ENOKEY)
 336                        ret = ret2;
 337                p = p->next;
 338        }
 339
 340        spin_unlock(&vnode->wb_lock);
 341        afs_put_wb_key(wbk);
 342        _leave(" = %d [no keys]", ret);
 343        return ret;
 344
 345found_key:
 346        refcount_inc(&wbk->usage);
 347        spin_unlock(&vnode->wb_lock);
 348
 349        _debug("USE WB KEY %u", key_serial(wbk->key));
 350
 351        ret = -ERESTARTSYS;
 352        if (afs_begin_vnode_operation(&fc, vnode, wbk->key)) {
 353                while (afs_select_fileserver(&fc)) {
 354                        fc.cb_break = afs_calc_vnode_cb_break(vnode);
 355                        afs_fs_store_data(&fc, mapping, first, last, offset, to);
 356                }
 357
 358                afs_check_for_remote_deletion(&fc, fc.vnode);
 359                afs_vnode_commit_status(&fc, vnode, fc.cb_break);
 360                ret = afs_end_vnode_operation(&fc);
 361        }
 362
 363        switch (ret) {
 364        case 0:
 365                afs_stat_v(vnode, n_stores);
 366                atomic_long_add((last * PAGE_SIZE + to) -
 367                                (first * PAGE_SIZE + offset),
 368                                &afs_v2net(vnode)->n_store_bytes);
 369                break;
 370        case -EACCES:
 371        case -EPERM:
 372        case -ENOKEY:
 373        case -EKEYEXPIRED:
 374        case -EKEYREJECTED:
 375        case -EKEYREVOKED:
 376                _debug("next");
 377                spin_lock(&vnode->wb_lock);
 378                p = wbk->vnode_link.next;
 379                afs_put_wb_key(wbk);
 380                goto try_next_key;
 381        }
 382
 383        afs_put_wb_key(wbk);
 384        _leave(" = %d", ret);
 385        return ret;
 386}
 387
 388/*
 389 * Synchronously write back the locked page and any subsequent non-locked dirty
 390 * pages.
 391 */
 392static int afs_write_back_from_locked_page(struct address_space *mapping,
 393                                           struct writeback_control *wbc,
 394                                           struct page *primary_page,
 395                                           pgoff_t final_page)
 396{
 397        struct afs_vnode *vnode = AFS_FS_I(mapping->host);
 398        struct page *pages[8], *page;
 399        unsigned long count, priv;
 400        unsigned n, offset, to, f, t;
 401        pgoff_t start, first, last;
 402        int loop, ret;
 403
 404        _enter(",%lx", primary_page->index);
 405
 406        count = 1;
 407        if (test_set_page_writeback(primary_page))
 408                BUG();
 409
 410        /* Find all consecutive lockable dirty pages that have contiguous
 411         * written regions, stopping when we find a page that is not
 412         * immediately lockable, is not dirty or is missing, or we reach the
 413         * end of the range.
 414         */
 415        start = primary_page->index;
 416        priv = page_private(primary_page);
 417        offset = priv & AFS_PRIV_MAX;
 418        to = priv >> AFS_PRIV_SHIFT;
 419        trace_afs_page_dirty(vnode, tracepoint_string("store"),
 420                             primary_page->index, priv);
 421
 422        WARN_ON(offset == to);
 423        if (offset == to)
 424                trace_afs_page_dirty(vnode, tracepoint_string("WARN"),
 425                                     primary_page->index, priv);
 426
 427        if (start >= final_page ||
 428            (to < PAGE_SIZE && !test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags)))
 429                goto no_more;
 430
 431        start++;
 432        do {
 433                _debug("more %lx [%lx]", start, count);
 434                n = final_page - start + 1;
 435                if (n > ARRAY_SIZE(pages))
 436                        n = ARRAY_SIZE(pages);
 437                n = find_get_pages_contig(mapping, start, ARRAY_SIZE(pages), pages);
 438                _debug("fgpc %u", n);
 439                if (n == 0)
 440                        goto no_more;
 441                if (pages[0]->index != start) {
 442                        do {
 443                                put_page(pages[--n]);
 444                        } while (n > 0);
 445                        goto no_more;
 446                }
 447
 448                for (loop = 0; loop < n; loop++) {
 449                        page = pages[loop];
 450                        if (to != PAGE_SIZE &&
 451                            !test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags))
 452                                break;
 453                        if (page->index > final_page)
 454                                break;
 455                        if (!trylock_page(page))
 456                                break;
 457                        if (!PageDirty(page) || PageWriteback(page)) {
 458                                unlock_page(page);
 459                                break;
 460                        }
 461
 462                        priv = page_private(page);
 463                        f = priv & AFS_PRIV_MAX;
 464                        t = priv >> AFS_PRIV_SHIFT;
 465                        if (f != 0 &&
 466                            !test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags)) {
 467                                unlock_page(page);
 468                                break;
 469                        }
 470                        to = t;
 471
 472                        trace_afs_page_dirty(vnode, tracepoint_string("store+"),
 473                                             page->index, priv);
 474
 475                        if (!clear_page_dirty_for_io(page))
 476                                BUG();
 477                        if (test_set_page_writeback(page))
 478                                BUG();
 479                        unlock_page(page);
 480                        put_page(page);
 481                }
 482                count += loop;
 483                if (loop < n) {
 484                        for (; loop < n; loop++)
 485                                put_page(pages[loop]);
 486                        goto no_more;
 487                }
 488
 489                start += loop;
 490        } while (start <= final_page && count < 65536);
 491
 492no_more:
 493        /* We now have a contiguous set of dirty pages, each with writeback
 494         * set; the first page is still locked at this point, but all the rest
 495         * have been unlocked.
 496         */
 497        unlock_page(primary_page);
 498
 499        first = primary_page->index;
 500        last = first + count - 1;
 501
 502        _debug("write back %lx[%u..] to %lx[..%u]", first, offset, last, to);
 503
 504        ret = afs_store_data(mapping, first, last, offset, to);
 505        switch (ret) {
 506        case 0:
 507                ret = count;
 508                break;
 509
 510        default:
 511                pr_notice("kAFS: Unexpected error from FS.StoreData %d\n", ret);
 512                /* Fall through */
 513        case -EACCES:
 514        case -EPERM:
 515        case -ENOKEY:
 516        case -EKEYEXPIRED:
 517        case -EKEYREJECTED:
 518        case -EKEYREVOKED:
 519                afs_redirty_pages(wbc, mapping, first, last);
 520                mapping_set_error(mapping, ret);
 521                break;
 522
 523        case -EDQUOT:
 524        case -ENOSPC:
 525                afs_redirty_pages(wbc, mapping, first, last);
 526                mapping_set_error(mapping, -ENOSPC);
 527                break;
 528
 529        case -EROFS:
 530        case -EIO:
 531        case -EREMOTEIO:
 532        case -EFBIG:
 533        case -ENOENT:
 534        case -ENOMEDIUM:
 535        case -ENXIO:
 536                afs_kill_pages(mapping, first, last);
 537                mapping_set_error(mapping, ret);
 538                break;
 539        }
 540
 541        _leave(" = %d", ret);
 542        return ret;
 543}
 544
 545/*
 546 * write a page back to the server
 547 * - the caller locked the page for us
 548 */
 549int afs_writepage(struct page *page, struct writeback_control *wbc)
 550{
 551        int ret;
 552
 553        _enter("{%lx},", page->index);
 554
 555        ret = afs_write_back_from_locked_page(page->mapping, wbc, page,
 556                                              wbc->range_end >> PAGE_SHIFT);
 557        if (ret < 0) {
 558                _leave(" = %d", ret);
 559                return 0;
 560        }
 561
 562        wbc->nr_to_write -= ret;
 563
 564        _leave(" = 0");
 565        return 0;
 566}
 567
 568/*
 569 * write a region of pages back to the server
 570 */
 571static int afs_writepages_region(struct address_space *mapping,
 572                                 struct writeback_control *wbc,
 573                                 pgoff_t index, pgoff_t end, pgoff_t *_next)
 574{
 575        struct page *page;
 576        int ret, n;
 577
 578        _enter(",,%lx,%lx,", index, end);
 579
 580        do {
 581                n = find_get_pages_range_tag(mapping, &index, end,
 582                                        PAGECACHE_TAG_DIRTY, 1, &page);
 583                if (!n)
 584                        break;
 585
 586                _debug("wback %lx", page->index);
 587
 588                /*
 589                 * at this point we hold neither the i_pages lock nor the
 590                 * page lock: the page may be truncated or invalidated
 591                 * (changing page->mapping to NULL), or even swizzled
 592                 * back from swapper_space to tmpfs file mapping
 593                 */
 594                ret = lock_page_killable(page);
 595                if (ret < 0) {
 596                        put_page(page);
 597                        _leave(" = %d", ret);
 598                        return ret;
 599                }
 600
 601                if (page->mapping != mapping || !PageDirty(page)) {
 602                        unlock_page(page);
 603                        put_page(page);
 604                        continue;
 605                }
 606
 607                if (PageWriteback(page)) {
 608                        unlock_page(page);
 609                        if (wbc->sync_mode != WB_SYNC_NONE)
 610                                wait_on_page_writeback(page);
 611                        put_page(page);
 612                        continue;
 613                }
 614
 615                if (!clear_page_dirty_for_io(page))
 616                        BUG();
 617                ret = afs_write_back_from_locked_page(mapping, wbc, page, end);
 618                put_page(page);
 619                if (ret < 0) {
 620                        _leave(" = %d", ret);
 621                        return ret;
 622                }
 623
 624                wbc->nr_to_write -= ret;
 625
 626                cond_resched();
 627        } while (index < end && wbc->nr_to_write > 0);
 628
 629        *_next = index;
 630        _leave(" = 0 [%lx]", *_next);
 631        return 0;
 632}
 633
 634/*
 635 * write some of the pending data back to the server
 636 */
 637int afs_writepages(struct address_space *mapping,
 638                   struct writeback_control *wbc)
 639{
 640        pgoff_t start, end, next;
 641        int ret;
 642
 643        _enter("");
 644
 645        if (wbc->range_cyclic) {
 646                start = mapping->writeback_index;
 647                end = -1;
 648                ret = afs_writepages_region(mapping, wbc, start, end, &next);
 649                if (start > 0 && wbc->nr_to_write > 0 && ret == 0)
 650                        ret = afs_writepages_region(mapping, wbc, 0, start,
 651                                                    &next);
 652                mapping->writeback_index = next;
 653        } else if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) {
 654                end = (pgoff_t)(LLONG_MAX >> PAGE_SHIFT);
 655                ret = afs_writepages_region(mapping, wbc, 0, end, &next);
 656                if (wbc->nr_to_write > 0)
 657                        mapping->writeback_index = next;
 658        } else {
 659                start = wbc->range_start >> PAGE_SHIFT;
 660                end = wbc->range_end >> PAGE_SHIFT;
 661                ret = afs_writepages_region(mapping, wbc, start, end, &next);
 662        }
 663
 664        _leave(" = %d", ret);
 665        return ret;
 666}
 667
 668/*
 669 * completion of write to server
 670 */
 671void afs_pages_written_back(struct afs_vnode *vnode, struct afs_call *call)
 672{
 673        struct pagevec pv;
 674        unsigned long priv;
 675        unsigned count, loop;
 676        pgoff_t first = call->first, last = call->last;
 677
 678        _enter("{%x:%u},{%lx-%lx}",
 679               vnode->fid.vid, vnode->fid.vnode, first, last);
 680
 681        pagevec_init(&pv);
 682
 683        do {
 684                _debug("done %lx-%lx", first, last);
 685
 686                count = last - first + 1;
 687                if (count > PAGEVEC_SIZE)
 688                        count = PAGEVEC_SIZE;
 689                pv.nr = find_get_pages_contig(vnode->vfs_inode.i_mapping,
 690                                              first, count, pv.pages);
 691                ASSERTCMP(pv.nr, ==, count);
 692
 693                for (loop = 0; loop < count; loop++) {
 694                        priv = page_private(pv.pages[loop]);
 695                        trace_afs_page_dirty(vnode, tracepoint_string("clear"),
 696                                             pv.pages[loop]->index, priv);
 697                        set_page_private(pv.pages[loop], 0);
 698                        end_page_writeback(pv.pages[loop]);
 699                }
 700                first += count;
 701                __pagevec_release(&pv);
 702        } while (first <= last);
 703
 704        afs_prune_wb_keys(vnode);
 705        _leave("");
 706}
 707
 708/*
 709 * write to an AFS file
 710 */
 711ssize_t afs_file_write(struct kiocb *iocb, struct iov_iter *from)
 712{
 713        struct afs_vnode *vnode = AFS_FS_I(file_inode(iocb->ki_filp));
 714        ssize_t result;
 715        size_t count = iov_iter_count(from);
 716
 717        _enter("{%x.%u},{%zu},",
 718               vnode->fid.vid, vnode->fid.vnode, count);
 719
 720        if (IS_SWAPFILE(&vnode->vfs_inode)) {
 721                printk(KERN_INFO
 722                       "AFS: Attempt to write to active swap file!\n");
 723                return -EBUSY;
 724        }
 725
 726        if (!count)
 727                return 0;
 728
 729        result = generic_file_write_iter(iocb, from);
 730
 731        _leave(" = %zd", result);
 732        return result;
 733}
 734
 735/*
 736 * flush any dirty pages for this process, and check for write errors.
 737 * - the return status from this call provides a reliable indication of
 738 *   whether any write errors occurred for this process.
 739 */
 740int afs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
 741{
 742        struct inode *inode = file_inode(file);
 743        struct afs_vnode *vnode = AFS_FS_I(inode);
 744
 745        _enter("{%x:%u},{n=%pD},%d",
 746               vnode->fid.vid, vnode->fid.vnode, file,
 747               datasync);
 748
 749        return file_write_and_wait_range(file, start, end);
 750}
 751
 752/*
 753 * notification that a previously read-only page is about to become writable
 754 * - if it returns an error, the caller will deliver a bus error signal
 755 */
 756int afs_page_mkwrite(struct vm_fault *vmf)
 757{
 758        struct file *file = vmf->vma->vm_file;
 759        struct inode *inode = file_inode(file);
 760        struct afs_vnode *vnode = AFS_FS_I(inode);
 761        unsigned long priv;
 762
 763        _enter("{{%x:%u}},{%lx}",
 764               vnode->fid.vid, vnode->fid.vnode, vmf->page->index);
 765
 766        sb_start_pagefault(inode->i_sb);
 767
 768        /* Wait for the page to be written to the cache before we allow it to
 769         * be modified.  We then assume the entire page will need writing back.
 770         */
 771#ifdef CONFIG_AFS_FSCACHE
 772        fscache_wait_on_page_write(vnode->cache, vmf->page);
 773#endif
 774
 775        if (PageWriteback(vmf->page) &&
 776            wait_on_page_bit_killable(vmf->page, PG_writeback) < 0)
 777                return VM_FAULT_RETRY;
 778
 779        if (lock_page_killable(vmf->page) < 0)
 780                return VM_FAULT_RETRY;
 781
 782        /* We mustn't change page->private until writeback is complete as that
 783         * details the portion of the page we need to write back and we might
 784         * need to redirty the page if there's a problem.
 785         */
 786        wait_on_page_writeback(vmf->page);
 787
 788        priv = (unsigned long)PAGE_SIZE << AFS_PRIV_SHIFT; /* To */
 789        priv |= 0; /* From */
 790        trace_afs_page_dirty(vnode, tracepoint_string("mkwrite"),
 791                             vmf->page->index, priv);
 792        SetPagePrivate(vmf->page);
 793        set_page_private(vmf->page, priv);
 794
 795        sb_end_pagefault(inode->i_sb);
 796        return VM_FAULT_LOCKED;
 797}
 798
 799/*
 800 * Prune the keys cached for writeback.  The caller must hold vnode->wb_lock.
 801 */
 802void afs_prune_wb_keys(struct afs_vnode *vnode)
 803{
 804        LIST_HEAD(graveyard);
 805        struct afs_wb_key *wbk, *tmp;
 806
 807        /* Discard unused keys */
 808        spin_lock(&vnode->wb_lock);
 809
 810        if (!mapping_tagged(&vnode->vfs_inode.i_data, PAGECACHE_TAG_WRITEBACK) &&
 811            !mapping_tagged(&vnode->vfs_inode.i_data, PAGECACHE_TAG_DIRTY)) {
 812                list_for_each_entry_safe(wbk, tmp, &vnode->wb_keys, vnode_link) {
 813                        if (refcount_read(&wbk->usage) == 1)
 814                                list_move(&wbk->vnode_link, &graveyard);
 815                }
 816        }
 817
 818        spin_unlock(&vnode->wb_lock);
 819
 820        while (!list_empty(&graveyard)) {
 821                wbk = list_entry(graveyard.next, struct afs_wb_key, vnode_link);
 822                list_del(&wbk->vnode_link);
 823                afs_put_wb_key(wbk);
 824        }
 825}
 826
 827/*
 828 * Clean up a page during invalidation.
 829 */
 830int afs_launder_page(struct page *page)
 831{
 832        struct address_space *mapping = page->mapping;
 833        struct afs_vnode *vnode = AFS_FS_I(mapping->host);
 834        unsigned long priv;
 835        unsigned int f, t;
 836        int ret = 0;
 837
 838        _enter("{%lx}", page->index);
 839
 840        priv = page_private(page);
 841        if (clear_page_dirty_for_io(page)) {
 842                f = 0;
 843                t = PAGE_SIZE;
 844                if (PagePrivate(page)) {
 845                        f = priv & AFS_PRIV_MAX;
 846                        t = priv >> AFS_PRIV_SHIFT;
 847                }
 848
 849                trace_afs_page_dirty(vnode, tracepoint_string("launder"),
 850                                     page->index, priv);
 851                ret = afs_store_data(mapping, page->index, page->index, t, f);
 852        }
 853
 854        trace_afs_page_dirty(vnode, tracepoint_string("laundered"),
 855                             page->index, priv);
 856        set_page_private(page, 0);
 857        ClearPagePrivate(page);
 858
 859#ifdef CONFIG_AFS_FSCACHE
 860        if (PageFsCache(page)) {
 861                fscache_wait_on_page_write(vnode->cache, page);
 862                fscache_uncache_page(vnode->cache, page);
 863        }
 864#endif
 865        return ret;
 866}
 867