linux/fs/afs/write.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/* handling of writes to regular files and writing back to the server
   3 *
   4 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
   5 * Written by David Howells (dhowells@redhat.com)
   6 */
   7
   8#include <linux/backing-dev.h>
   9#include <linux/slab.h>
  10#include <linux/fs.h>
  11#include <linux/pagemap.h>
  12#include <linux/writeback.h>
  13#include <linux/pagevec.h>
  14#include "internal.h"
  15
  16/*
  17 * mark a page as having been made dirty and thus needing writeback
  18 */
  19int afs_set_page_dirty(struct page *page)
  20{
  21        _enter("");
  22        return __set_page_dirty_nobuffers(page);
  23}
  24
  25/*
  26 * partly or wholly fill a page that's under preparation for writing
  27 */
  28static int afs_fill_page(struct afs_vnode *vnode, struct key *key,
  29                         loff_t pos, unsigned int len, struct page *page)
  30{
  31        struct afs_read *req;
  32        size_t p;
  33        void *data;
  34        int ret;
  35
  36        _enter(",,%llu", (unsigned long long)pos);
  37
  38        if (pos >= vnode->vfs_inode.i_size) {
  39                p = pos & ~PAGE_MASK;
  40                ASSERTCMP(p + len, <=, PAGE_SIZE);
  41                data = kmap(page);
  42                memset(data + p, 0, len);
  43                kunmap(page);
  44                return 0;
  45        }
  46
  47        req = kzalloc(struct_size(req, array, 1), GFP_KERNEL);
  48        if (!req)
  49                return -ENOMEM;
  50
  51        refcount_set(&req->usage, 1);
  52        req->pos = pos;
  53        req->len = len;
  54        req->nr_pages = 1;
  55        req->pages = req->array;
  56        req->pages[0] = page;
  57        get_page(page);
  58
  59        ret = afs_fetch_data(vnode, key, req);
  60        afs_put_read(req);
  61        if (ret < 0) {
  62                if (ret == -ENOENT) {
  63                        _debug("got NOENT from server"
  64                               " - marking file deleted and stale");
  65                        set_bit(AFS_VNODE_DELETED, &vnode->flags);
  66                        ret = -ESTALE;
  67                }
  68        }
  69
  70        _leave(" = %d", ret);
  71        return ret;
  72}
  73
  74/*
  75 * prepare to perform part of a write to a page
  76 */
  77int afs_write_begin(struct file *file, struct address_space *mapping,
  78                    loff_t pos, unsigned len, unsigned flags,
  79                    struct page **pagep, void **fsdata)
  80{
  81        struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
  82        struct page *page;
  83        struct key *key = afs_file_key(file);
  84        unsigned long priv;
  85        unsigned f, from = pos & (PAGE_SIZE - 1);
  86        unsigned t, to = from + len;
  87        pgoff_t index = pos >> PAGE_SHIFT;
  88        int ret;
  89
  90        _enter("{%llx:%llu},{%lx},%u,%u",
  91               vnode->fid.vid, vnode->fid.vnode, index, from, to);
  92
  93        /* We want to store information about how much of a page is altered in
  94         * page->private.
  95         */
  96        BUILD_BUG_ON(PAGE_SIZE > 32768 && sizeof(page->private) < 8);
  97
  98        page = grab_cache_page_write_begin(mapping, index, flags);
  99        if (!page)
 100                return -ENOMEM;
 101
 102        if (!PageUptodate(page) && len != PAGE_SIZE) {
 103                ret = afs_fill_page(vnode, key, pos & PAGE_MASK, PAGE_SIZE, page);
 104                if (ret < 0) {
 105                        unlock_page(page);
 106                        put_page(page);
 107                        _leave(" = %d [prep]", ret);
 108                        return ret;
 109                }
 110                SetPageUptodate(page);
 111        }
 112
 113        /* page won't leak in error case: it eventually gets cleaned off LRU */
 114        *pagep = page;
 115
 116try_again:
 117        /* See if this page is already partially written in a way that we can
 118         * merge the new write with.
 119         */
 120        t = f = 0;
 121        if (PagePrivate(page)) {
 122                priv = page_private(page);
 123                f = priv & AFS_PRIV_MAX;
 124                t = priv >> AFS_PRIV_SHIFT;
 125                ASSERTCMP(f, <=, t);
 126        }
 127
 128        if (f != t) {
 129                if (PageWriteback(page)) {
 130                        trace_afs_page_dirty(vnode, tracepoint_string("alrdy"),
 131                                             page->index, priv);
 132                        goto flush_conflicting_write;
 133                }
 134                /* If the file is being filled locally, allow inter-write
 135                 * spaces to be merged into writes.  If it's not, only write
 136                 * back what the user gives us.
 137                 */
 138                if (!test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags) &&
 139                    (to < f || from > t))
 140                        goto flush_conflicting_write;
 141                if (from < f)
 142                        f = from;
 143                if (to > t)
 144                        t = to;
 145        } else {
 146                f = from;
 147                t = to;
 148        }
 149
 150        priv = (unsigned long)t << AFS_PRIV_SHIFT;
 151        priv |= f;
 152        trace_afs_page_dirty(vnode, tracepoint_string("begin"),
 153                             page->index, priv);
 154        SetPagePrivate(page);
 155        set_page_private(page, priv);
 156        _leave(" = 0");
 157        return 0;
 158
 159        /* The previous write and this write aren't adjacent or overlapping, so
 160         * flush the page out.
 161         */
 162flush_conflicting_write:
 163        _debug("flush conflict");
 164        ret = write_one_page(page);
 165        if (ret < 0) {
 166                _leave(" = %d", ret);
 167                return ret;
 168        }
 169
 170        ret = lock_page_killable(page);
 171        if (ret < 0) {
 172                _leave(" = %d", ret);
 173                return ret;
 174        }
 175        goto try_again;
 176}
 177
 178/*
 179 * finalise part of a write to a page
 180 */
 181int afs_write_end(struct file *file, struct address_space *mapping,
 182                  loff_t pos, unsigned len, unsigned copied,
 183                  struct page *page, void *fsdata)
 184{
 185        struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
 186        struct key *key = afs_file_key(file);
 187        loff_t i_size, maybe_i_size;
 188        int ret;
 189
 190        _enter("{%llx:%llu},{%lx}",
 191               vnode->fid.vid, vnode->fid.vnode, page->index);
 192
 193        maybe_i_size = pos + copied;
 194
 195        i_size = i_size_read(&vnode->vfs_inode);
 196        if (maybe_i_size > i_size) {
 197                write_seqlock(&vnode->cb_lock);
 198                i_size = i_size_read(&vnode->vfs_inode);
 199                if (maybe_i_size > i_size)
 200                        i_size_write(&vnode->vfs_inode, maybe_i_size);
 201                write_sequnlock(&vnode->cb_lock);
 202        }
 203
 204        if (!PageUptodate(page)) {
 205                if (copied < len) {
 206                        /* Try and load any missing data from the server.  The
 207                         * unmarshalling routine will take care of clearing any
 208                         * bits that are beyond the EOF.
 209                         */
 210                        ret = afs_fill_page(vnode, key, pos + copied,
 211                                            len - copied, page);
 212                        if (ret < 0)
 213                                goto out;
 214                }
 215                SetPageUptodate(page);
 216        }
 217
 218        set_page_dirty(page);
 219        if (PageDirty(page))
 220                _debug("dirtied");
 221        ret = copied;
 222
 223out:
 224        unlock_page(page);
 225        put_page(page);
 226        return ret;
 227}
 228
 229/*
 230 * kill all the pages in the given range
 231 */
 232static void afs_kill_pages(struct address_space *mapping,
 233                           pgoff_t first, pgoff_t last)
 234{
 235        struct afs_vnode *vnode = AFS_FS_I(mapping->host);
 236        struct pagevec pv;
 237        unsigned count, loop;
 238
 239        _enter("{%llx:%llu},%lx-%lx",
 240               vnode->fid.vid, vnode->fid.vnode, first, last);
 241
 242        pagevec_init(&pv);
 243
 244        do {
 245                _debug("kill %lx-%lx", first, last);
 246
 247                count = last - first + 1;
 248                if (count > PAGEVEC_SIZE)
 249                        count = PAGEVEC_SIZE;
 250                pv.nr = find_get_pages_contig(mapping, first, count, pv.pages);
 251                ASSERTCMP(pv.nr, ==, count);
 252
 253                for (loop = 0; loop < count; loop++) {
 254                        struct page *page = pv.pages[loop];
 255                        ClearPageUptodate(page);
 256                        SetPageError(page);
 257                        end_page_writeback(page);
 258                        if (page->index >= first)
 259                                first = page->index + 1;
 260                        lock_page(page);
 261                        generic_error_remove_page(mapping, page);
 262                        unlock_page(page);
 263                }
 264
 265                __pagevec_release(&pv);
 266        } while (first <= last);
 267
 268        _leave("");
 269}
 270
 271/*
 272 * Redirty all the pages in a given range.
 273 */
 274static void afs_redirty_pages(struct writeback_control *wbc,
 275                              struct address_space *mapping,
 276                              pgoff_t first, pgoff_t last)
 277{
 278        struct afs_vnode *vnode = AFS_FS_I(mapping->host);
 279        struct pagevec pv;
 280        unsigned count, loop;
 281
 282        _enter("{%llx:%llu},%lx-%lx",
 283               vnode->fid.vid, vnode->fid.vnode, first, last);
 284
 285        pagevec_init(&pv);
 286
 287        do {
 288                _debug("redirty %lx-%lx", first, last);
 289
 290                count = last - first + 1;
 291                if (count > PAGEVEC_SIZE)
 292                        count = PAGEVEC_SIZE;
 293                pv.nr = find_get_pages_contig(mapping, first, count, pv.pages);
 294                ASSERTCMP(pv.nr, ==, count);
 295
 296                for (loop = 0; loop < count; loop++) {
 297                        struct page *page = pv.pages[loop];
 298
 299                        redirty_page_for_writepage(wbc, page);
 300                        end_page_writeback(page);
 301                        if (page->index >= first)
 302                                first = page->index + 1;
 303                }
 304
 305                __pagevec_release(&pv);
 306        } while (first <= last);
 307
 308        _leave("");
 309}
 310
 311/*
 312 * completion of write to server
 313 */
 314static void afs_pages_written_back(struct afs_vnode *vnode,
 315                                   pgoff_t first, pgoff_t last)
 316{
 317        struct pagevec pv;
 318        unsigned long priv;
 319        unsigned count, loop;
 320
 321        _enter("{%llx:%llu},{%lx-%lx}",
 322               vnode->fid.vid, vnode->fid.vnode, first, last);
 323
 324        pagevec_init(&pv);
 325
 326        do {
 327                _debug("done %lx-%lx", first, last);
 328
 329                count = last - first + 1;
 330                if (count > PAGEVEC_SIZE)
 331                        count = PAGEVEC_SIZE;
 332                pv.nr = find_get_pages_contig(vnode->vfs_inode.i_mapping,
 333                                              first, count, pv.pages);
 334                ASSERTCMP(pv.nr, ==, count);
 335
 336                for (loop = 0; loop < count; loop++) {
 337                        priv = page_private(pv.pages[loop]);
 338                        trace_afs_page_dirty(vnode, tracepoint_string("clear"),
 339                                             pv.pages[loop]->index, priv);
 340                        set_page_private(pv.pages[loop], 0);
 341                        end_page_writeback(pv.pages[loop]);
 342                }
 343                first += count;
 344                __pagevec_release(&pv);
 345        } while (first <= last);
 346
 347        afs_prune_wb_keys(vnode);
 348        _leave("");
 349}
 350
 351/*
 352 * Find a key to use for the writeback.  We cached the keys used to author the
 353 * writes on the vnode.  *_wbk will contain the last writeback key used or NULL
 354 * and we need to start from there if it's set.
 355 */
 356static int afs_get_writeback_key(struct afs_vnode *vnode,
 357                                 struct afs_wb_key **_wbk)
 358{
 359        struct afs_wb_key *wbk = NULL;
 360        struct list_head *p;
 361        int ret = -ENOKEY, ret2;
 362
 363        spin_lock(&vnode->wb_lock);
 364        if (*_wbk)
 365                p = (*_wbk)->vnode_link.next;
 366        else
 367                p = vnode->wb_keys.next;
 368
 369        while (p != &vnode->wb_keys) {
 370                wbk = list_entry(p, struct afs_wb_key, vnode_link);
 371                _debug("wbk %u", key_serial(wbk->key));
 372                ret2 = key_validate(wbk->key);
 373                if (ret2 == 0) {
 374                        refcount_inc(&wbk->usage);
 375                        _debug("USE WB KEY %u", key_serial(wbk->key));
 376                        break;
 377                }
 378
 379                wbk = NULL;
 380                if (ret == -ENOKEY)
 381                        ret = ret2;
 382                p = p->next;
 383        }
 384
 385        spin_unlock(&vnode->wb_lock);
 386        if (*_wbk)
 387                afs_put_wb_key(*_wbk);
 388        *_wbk = wbk;
 389        return 0;
 390}
 391
 392static void afs_store_data_success(struct afs_operation *op)
 393{
 394        struct afs_vnode *vnode = op->file[0].vnode;
 395
 396        op->ctime = op->file[0].scb.status.mtime_client;
 397        afs_vnode_commit_status(op, &op->file[0]);
 398        if (op->error == 0) {
 399                afs_pages_written_back(vnode, op->store.first, op->store.last);
 400                afs_stat_v(vnode, n_stores);
 401                atomic_long_add((op->store.last * PAGE_SIZE + op->store.last_to) -
 402                                (op->store.first * PAGE_SIZE + op->store.first_offset),
 403                                &afs_v2net(vnode)->n_store_bytes);
 404        }
 405}
 406
 407static const struct afs_operation_ops afs_store_data_operation = {
 408        .issue_afs_rpc  = afs_fs_store_data,
 409        .issue_yfs_rpc  = yfs_fs_store_data,
 410        .success        = afs_store_data_success,
 411};
 412
 413/*
 414 * write to a file
 415 */
 416static int afs_store_data(struct address_space *mapping,
 417                          pgoff_t first, pgoff_t last,
 418                          unsigned offset, unsigned to)
 419{
 420        struct afs_vnode *vnode = AFS_FS_I(mapping->host);
 421        struct afs_operation *op;
 422        struct afs_wb_key *wbk = NULL;
 423        int ret;
 424
 425        _enter("%s{%llx:%llu.%u},%lx,%lx,%x,%x",
 426               vnode->volume->name,
 427               vnode->fid.vid,
 428               vnode->fid.vnode,
 429               vnode->fid.unique,
 430               first, last, offset, to);
 431
 432        ret = afs_get_writeback_key(vnode, &wbk);
 433        if (ret) {
 434                _leave(" = %d [no keys]", ret);
 435                return ret;
 436        }
 437
 438        op = afs_alloc_operation(wbk->key, vnode->volume);
 439        if (IS_ERR(op)) {
 440                afs_put_wb_key(wbk);
 441                return -ENOMEM;
 442        }
 443
 444        afs_op_set_vnode(op, 0, vnode);
 445        op->file[0].dv_delta = 1;
 446        op->store.mapping = mapping;
 447        op->store.first = first;
 448        op->store.last = last;
 449        op->store.first_offset = offset;
 450        op->store.last_to = to;
 451        op->mtime = vnode->vfs_inode.i_mtime;
 452        op->flags |= AFS_OPERATION_UNINTR;
 453        op->ops = &afs_store_data_operation;
 454
 455try_next_key:
 456        afs_begin_vnode_operation(op);
 457        afs_wait_for_operation(op);
 458
 459        switch (op->error) {
 460        case -EACCES:
 461        case -EPERM:
 462        case -ENOKEY:
 463        case -EKEYEXPIRED:
 464        case -EKEYREJECTED:
 465        case -EKEYREVOKED:
 466                _debug("next");
 467
 468                ret = afs_get_writeback_key(vnode, &wbk);
 469                if (ret == 0) {
 470                        key_put(op->key);
 471                        op->key = key_get(wbk->key);
 472                        goto try_next_key;
 473                }
 474                break;
 475        }
 476
 477        afs_put_wb_key(wbk);
 478        _leave(" = %d", op->error);
 479        return afs_put_operation(op);
 480}
 481
 482/*
 483 * Synchronously write back the locked page and any subsequent non-locked dirty
 484 * pages.
 485 */
 486static int afs_write_back_from_locked_page(struct address_space *mapping,
 487                                           struct writeback_control *wbc,
 488                                           struct page *primary_page,
 489                                           pgoff_t final_page)
 490{
 491        struct afs_vnode *vnode = AFS_FS_I(mapping->host);
 492        struct page *pages[8], *page;
 493        unsigned long count, priv;
 494        unsigned n, offset, to, f, t;
 495        pgoff_t start, first, last;
 496        loff_t i_size, end;
 497        int loop, ret;
 498
 499        _enter(",%lx", primary_page->index);
 500
 501        count = 1;
 502        if (test_set_page_writeback(primary_page))
 503                BUG();
 504
 505        /* Find all consecutive lockable dirty pages that have contiguous
 506         * written regions, stopping when we find a page that is not
 507         * immediately lockable, is not dirty or is missing, or we reach the
 508         * end of the range.
 509         */
 510        start = primary_page->index;
 511        priv = page_private(primary_page);
 512        offset = priv & AFS_PRIV_MAX;
 513        to = priv >> AFS_PRIV_SHIFT;
 514        trace_afs_page_dirty(vnode, tracepoint_string("store"),
 515                             primary_page->index, priv);
 516
 517        WARN_ON(offset == to);
 518        if (offset == to)
 519                trace_afs_page_dirty(vnode, tracepoint_string("WARN"),
 520                                     primary_page->index, priv);
 521
 522        if (start >= final_page ||
 523            (to < PAGE_SIZE && !test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags)))
 524                goto no_more;
 525
 526        start++;
 527        do {
 528                _debug("more %lx [%lx]", start, count);
 529                n = final_page - start + 1;
 530                if (n > ARRAY_SIZE(pages))
 531                        n = ARRAY_SIZE(pages);
 532                n = find_get_pages_contig(mapping, start, ARRAY_SIZE(pages), pages);
 533                _debug("fgpc %u", n);
 534                if (n == 0)
 535                        goto no_more;
 536                if (pages[0]->index != start) {
 537                        do {
 538                                put_page(pages[--n]);
 539                        } while (n > 0);
 540                        goto no_more;
 541                }
 542
 543                for (loop = 0; loop < n; loop++) {
 544                        page = pages[loop];
 545                        if (to != PAGE_SIZE &&
 546                            !test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags))
 547                                break;
 548                        if (page->index > final_page)
 549                                break;
 550                        if (!trylock_page(page))
 551                                break;
 552                        if (!PageDirty(page) || PageWriteback(page)) {
 553                                unlock_page(page);
 554                                break;
 555                        }
 556
 557                        priv = page_private(page);
 558                        f = priv & AFS_PRIV_MAX;
 559                        t = priv >> AFS_PRIV_SHIFT;
 560                        if (f != 0 &&
 561                            !test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags)) {
 562                                unlock_page(page);
 563                                break;
 564                        }
 565                        to = t;
 566
 567                        trace_afs_page_dirty(vnode, tracepoint_string("store+"),
 568                                             page->index, priv);
 569
 570                        if (!clear_page_dirty_for_io(page))
 571                                BUG();
 572                        if (test_set_page_writeback(page))
 573                                BUG();
 574                        unlock_page(page);
 575                        put_page(page);
 576                }
 577                count += loop;
 578                if (loop < n) {
 579                        for (; loop < n; loop++)
 580                                put_page(pages[loop]);
 581                        goto no_more;
 582                }
 583
 584                start += loop;
 585        } while (start <= final_page && count < 65536);
 586
 587no_more:
 588        /* We now have a contiguous set of dirty pages, each with writeback
 589         * set; the first page is still locked at this point, but all the rest
 590         * have been unlocked.
 591         */
 592        unlock_page(primary_page);
 593
 594        first = primary_page->index;
 595        last = first + count - 1;
 596
 597        end = (loff_t)last * PAGE_SIZE + to;
 598        i_size = i_size_read(&vnode->vfs_inode);
 599
 600        _debug("write back %lx[%u..] to %lx[..%u]", first, offset, last, to);
 601        if (end > i_size)
 602                to = i_size & ~PAGE_MASK;
 603
 604        ret = afs_store_data(mapping, first, last, offset, to);
 605        switch (ret) {
 606        case 0:
 607                ret = count;
 608                break;
 609
 610        default:
 611                pr_notice("kAFS: Unexpected error from FS.StoreData %d\n", ret);
 612                fallthrough;
 613        case -EACCES:
 614        case -EPERM:
 615        case -ENOKEY:
 616        case -EKEYEXPIRED:
 617        case -EKEYREJECTED:
 618        case -EKEYREVOKED:
 619                afs_redirty_pages(wbc, mapping, first, last);
 620                mapping_set_error(mapping, ret);
 621                break;
 622
 623        case -EDQUOT:
 624        case -ENOSPC:
 625                afs_redirty_pages(wbc, mapping, first, last);
 626                mapping_set_error(mapping, -ENOSPC);
 627                break;
 628
 629        case -EROFS:
 630        case -EIO:
 631        case -EREMOTEIO:
 632        case -EFBIG:
 633        case -ENOENT:
 634        case -ENOMEDIUM:
 635        case -ENXIO:
 636                trace_afs_file_error(vnode, ret, afs_file_error_writeback_fail);
 637                afs_kill_pages(mapping, first, last);
 638                mapping_set_error(mapping, ret);
 639                break;
 640        }
 641
 642        _leave(" = %d", ret);
 643        return ret;
 644}
 645
 646/*
 647 * write a page back to the server
 648 * - the caller locked the page for us
 649 */
 650int afs_writepage(struct page *page, struct writeback_control *wbc)
 651{
 652        int ret;
 653
 654        _enter("{%lx},", page->index);
 655
 656        ret = afs_write_back_from_locked_page(page->mapping, wbc, page,
 657                                              wbc->range_end >> PAGE_SHIFT);
 658        if (ret < 0) {
 659                _leave(" = %d", ret);
 660                return 0;
 661        }
 662
 663        wbc->nr_to_write -= ret;
 664
 665        _leave(" = 0");
 666        return 0;
 667}
 668
 669/*
 670 * write a region of pages back to the server
 671 */
 672static int afs_writepages_region(struct address_space *mapping,
 673                                 struct writeback_control *wbc,
 674                                 pgoff_t index, pgoff_t end, pgoff_t *_next)
 675{
 676        struct page *page;
 677        int ret, n;
 678
 679        _enter(",,%lx,%lx,", index, end);
 680
 681        do {
 682                n = find_get_pages_range_tag(mapping, &index, end,
 683                                        PAGECACHE_TAG_DIRTY, 1, &page);
 684                if (!n)
 685                        break;
 686
 687                _debug("wback %lx", page->index);
 688
 689                /*
 690                 * at this point we hold neither the i_pages lock nor the
 691                 * page lock: the page may be truncated or invalidated
 692                 * (changing page->mapping to NULL), or even swizzled
 693                 * back from swapper_space to tmpfs file mapping
 694                 */
 695                ret = lock_page_killable(page);
 696                if (ret < 0) {
 697                        put_page(page);
 698                        _leave(" = %d", ret);
 699                        return ret;
 700                }
 701
 702                if (page->mapping != mapping || !PageDirty(page)) {
 703                        unlock_page(page);
 704                        put_page(page);
 705                        continue;
 706                }
 707
 708                if (PageWriteback(page)) {
 709                        unlock_page(page);
 710                        if (wbc->sync_mode != WB_SYNC_NONE)
 711                                wait_on_page_writeback(page);
 712                        put_page(page);
 713                        continue;
 714                }
 715
 716                if (!clear_page_dirty_for_io(page))
 717                        BUG();
 718                ret = afs_write_back_from_locked_page(mapping, wbc, page, end);
 719                put_page(page);
 720                if (ret < 0) {
 721                        _leave(" = %d", ret);
 722                        return ret;
 723                }
 724
 725                wbc->nr_to_write -= ret;
 726
 727                cond_resched();
 728        } while (index < end && wbc->nr_to_write > 0);
 729
 730        *_next = index;
 731        _leave(" = 0 [%lx]", *_next);
 732        return 0;
 733}
 734
 735/*
 736 * write some of the pending data back to the server
 737 */
 738int afs_writepages(struct address_space *mapping,
 739                   struct writeback_control *wbc)
 740{
 741        struct afs_vnode *vnode = AFS_FS_I(mapping->host);
 742        pgoff_t start, end, next;
 743        int ret;
 744
 745        _enter("");
 746
 747        /* We have to be careful as we can end up racing with setattr()
 748         * truncating the pagecache since the caller doesn't take a lock here
 749         * to prevent it.
 750         */
 751        if (wbc->sync_mode == WB_SYNC_ALL)
 752                down_read(&vnode->validate_lock);
 753        else if (!down_read_trylock(&vnode->validate_lock))
 754                return 0;
 755
 756        if (wbc->range_cyclic) {
 757                start = mapping->writeback_index;
 758                end = -1;
 759                ret = afs_writepages_region(mapping, wbc, start, end, &next);
 760                if (start > 0 && wbc->nr_to_write > 0 && ret == 0)
 761                        ret = afs_writepages_region(mapping, wbc, 0, start,
 762                                                    &next);
 763                mapping->writeback_index = next;
 764        } else if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) {
 765                end = (pgoff_t)(LLONG_MAX >> PAGE_SHIFT);
 766                ret = afs_writepages_region(mapping, wbc, 0, end, &next);
 767                if (wbc->nr_to_write > 0)
 768                        mapping->writeback_index = next;
 769        } else {
 770                start = wbc->range_start >> PAGE_SHIFT;
 771                end = wbc->range_end >> PAGE_SHIFT;
 772                ret = afs_writepages_region(mapping, wbc, start, end, &next);
 773        }
 774
 775        up_read(&vnode->validate_lock);
 776        _leave(" = %d", ret);
 777        return ret;
 778}
 779
 780/*
 781 * write to an AFS file
 782 */
 783ssize_t afs_file_write(struct kiocb *iocb, struct iov_iter *from)
 784{
 785        struct afs_vnode *vnode = AFS_FS_I(file_inode(iocb->ki_filp));
 786        ssize_t result;
 787        size_t count = iov_iter_count(from);
 788
 789        _enter("{%llx:%llu},{%zu},",
 790               vnode->fid.vid, vnode->fid.vnode, count);
 791
 792        if (IS_SWAPFILE(&vnode->vfs_inode)) {
 793                printk(KERN_INFO
 794                       "AFS: Attempt to write to active swap file!\n");
 795                return -EBUSY;
 796        }
 797
 798        if (!count)
 799                return 0;
 800
 801        result = generic_file_write_iter(iocb, from);
 802
 803        _leave(" = %zd", result);
 804        return result;
 805}
 806
 807/*
 808 * flush any dirty pages for this process, and check for write errors.
 809 * - the return status from this call provides a reliable indication of
 810 *   whether any write errors occurred for this process.
 811 */
 812int afs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
 813{
 814        struct inode *inode = file_inode(file);
 815        struct afs_vnode *vnode = AFS_FS_I(inode);
 816
 817        _enter("{%llx:%llu},{n=%pD},%d",
 818               vnode->fid.vid, vnode->fid.vnode, file,
 819               datasync);
 820
 821        return file_write_and_wait_range(file, start, end);
 822}
 823
 824/*
 825 * notification that a previously read-only page is about to become writable
 826 * - if it returns an error, the caller will deliver a bus error signal
 827 */
 828vm_fault_t afs_page_mkwrite(struct vm_fault *vmf)
 829{
 830        struct file *file = vmf->vma->vm_file;
 831        struct inode *inode = file_inode(file);
 832        struct afs_vnode *vnode = AFS_FS_I(inode);
 833        unsigned long priv;
 834
 835        _enter("{{%llx:%llu}},{%lx}",
 836               vnode->fid.vid, vnode->fid.vnode, vmf->page->index);
 837
 838        sb_start_pagefault(inode->i_sb);
 839
 840        /* Wait for the page to be written to the cache before we allow it to
 841         * be modified.  We then assume the entire page will need writing back.
 842         */
 843#ifdef CONFIG_AFS_FSCACHE
 844        fscache_wait_on_page_write(vnode->cache, vmf->page);
 845#endif
 846
 847        if (PageWriteback(vmf->page) &&
 848            wait_on_page_bit_killable(vmf->page, PG_writeback) < 0)
 849                return VM_FAULT_RETRY;
 850
 851        if (lock_page_killable(vmf->page) < 0)
 852                return VM_FAULT_RETRY;
 853
 854        /* We mustn't change page->private until writeback is complete as that
 855         * details the portion of the page we need to write back and we might
 856         * need to redirty the page if there's a problem.
 857         */
 858        wait_on_page_writeback(vmf->page);
 859
 860        priv = (unsigned long)PAGE_SIZE << AFS_PRIV_SHIFT; /* To */
 861        priv |= 0; /* From */
 862        trace_afs_page_dirty(vnode, tracepoint_string("mkwrite"),
 863                             vmf->page->index, priv);
 864        SetPagePrivate(vmf->page);
 865        set_page_private(vmf->page, priv);
 866        file_update_time(file);
 867
 868        sb_end_pagefault(inode->i_sb);
 869        return VM_FAULT_LOCKED;
 870}
 871
 872/*
 873 * Prune the keys cached for writeback.  The caller must hold vnode->wb_lock.
 874 */
 875void afs_prune_wb_keys(struct afs_vnode *vnode)
 876{
 877        LIST_HEAD(graveyard);
 878        struct afs_wb_key *wbk, *tmp;
 879
 880        /* Discard unused keys */
 881        spin_lock(&vnode->wb_lock);
 882
 883        if (!mapping_tagged(&vnode->vfs_inode.i_data, PAGECACHE_TAG_WRITEBACK) &&
 884            !mapping_tagged(&vnode->vfs_inode.i_data, PAGECACHE_TAG_DIRTY)) {
 885                list_for_each_entry_safe(wbk, tmp, &vnode->wb_keys, vnode_link) {
 886                        if (refcount_read(&wbk->usage) == 1)
 887                                list_move(&wbk->vnode_link, &graveyard);
 888                }
 889        }
 890
 891        spin_unlock(&vnode->wb_lock);
 892
 893        while (!list_empty(&graveyard)) {
 894                wbk = list_entry(graveyard.next, struct afs_wb_key, vnode_link);
 895                list_del(&wbk->vnode_link);
 896                afs_put_wb_key(wbk);
 897        }
 898}
 899
 900/*
 901 * Clean up a page during invalidation.
 902 */
 903int afs_launder_page(struct page *page)
 904{
 905        struct address_space *mapping = page->mapping;
 906        struct afs_vnode *vnode = AFS_FS_I(mapping->host);
 907        unsigned long priv;
 908        unsigned int f, t;
 909        int ret = 0;
 910
 911        _enter("{%lx}", page->index);
 912
 913        priv = page_private(page);
 914        if (clear_page_dirty_for_io(page)) {
 915                f = 0;
 916                t = PAGE_SIZE;
 917                if (PagePrivate(page)) {
 918                        f = priv & AFS_PRIV_MAX;
 919                        t = priv >> AFS_PRIV_SHIFT;
 920                }
 921
 922                trace_afs_page_dirty(vnode, tracepoint_string("launder"),
 923                                     page->index, priv);
 924                ret = afs_store_data(mapping, page->index, page->index, t, f);
 925        }
 926
 927        trace_afs_page_dirty(vnode, tracepoint_string("laundered"),
 928                             page->index, priv);
 929        set_page_private(page, 0);
 930        ClearPagePrivate(page);
 931
 932#ifdef CONFIG_AFS_FSCACHE
 933        if (PageFsCache(page)) {
 934                fscache_wait_on_page_write(vnode->cache, page);
 935                fscache_uncache_page(vnode->cache, page);
 936        }
 937#endif
 938        return ret;
 939}
 940