linux/fs/afs/write.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/* handling of writes to regular files and writing back to the server
   3 *
   4 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
   5 * Written by David Howells (dhowells@redhat.com)
   6 */
   7
   8#include <linux/backing-dev.h>
   9#include <linux/slab.h>
  10#include <linux/fs.h>
  11#include <linux/pagemap.h>
  12#include <linux/writeback.h>
  13#include <linux/pagevec.h>
  14#include "internal.h"
  15
  16/*
  17 * mark a page as having been made dirty and thus needing writeback
  18 */
  19int afs_set_page_dirty(struct page *page)
  20{
  21        _enter("");
  22        return __set_page_dirty_nobuffers(page);
  23}
  24
  25/*
  26 * partly or wholly fill a page that's under preparation for writing
  27 */
  28static int afs_fill_page(struct afs_vnode *vnode, struct key *key,
  29                         loff_t pos, unsigned int len, struct page *page)
  30{
  31        struct afs_read *req;
  32        size_t p;
  33        void *data;
  34        int ret;
  35
  36        _enter(",,%llu", (unsigned long long)pos);
  37
  38        if (pos >= vnode->vfs_inode.i_size) {
  39                p = pos & ~PAGE_MASK;
  40                ASSERTCMP(p + len, <=, PAGE_SIZE);
  41                data = kmap(page);
  42                memset(data + p, 0, len);
  43                kunmap(page);
  44                return 0;
  45        }
  46
  47        req = kzalloc(struct_size(req, array, 1), GFP_KERNEL);
  48        if (!req)
  49                return -ENOMEM;
  50
  51        refcount_set(&req->usage, 1);
  52        req->pos = pos;
  53        req->len = len;
  54        req->nr_pages = 1;
  55        req->pages = req->array;
  56        req->pages[0] = page;
  57        get_page(page);
  58
  59        ret = afs_fetch_data(vnode, key, req);
  60        afs_put_read(req);
  61        if (ret < 0) {
  62                if (ret == -ENOENT) {
  63                        _debug("got NOENT from server"
  64                               " - marking file deleted and stale");
  65                        set_bit(AFS_VNODE_DELETED, &vnode->flags);
  66                        ret = -ESTALE;
  67                }
  68        }
  69
  70        _leave(" = %d", ret);
  71        return ret;
  72}
  73
  74/*
  75 * prepare to perform part of a write to a page
  76 */
  77int afs_write_begin(struct file *file, struct address_space *mapping,
  78                    loff_t pos, unsigned len, unsigned flags,
  79                    struct page **_page, void **fsdata)
  80{
  81        struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
  82        struct page *page;
  83        struct key *key = afs_file_key(file);
  84        unsigned long priv;
  85        unsigned f, from = pos & (PAGE_SIZE - 1);
  86        unsigned t, to = from + len;
  87        pgoff_t index = pos >> PAGE_SHIFT;
  88        int ret;
  89
  90        _enter("{%llx:%llu},{%lx},%u,%u",
  91               vnode->fid.vid, vnode->fid.vnode, index, from, to);
  92
  93        page = grab_cache_page_write_begin(mapping, index, flags);
  94        if (!page)
  95                return -ENOMEM;
  96
  97        if (!PageUptodate(page) && len != PAGE_SIZE) {
  98                ret = afs_fill_page(vnode, key, pos & PAGE_MASK, PAGE_SIZE, page);
  99                if (ret < 0) {
 100                        unlock_page(page);
 101                        put_page(page);
 102                        _leave(" = %d [prep]", ret);
 103                        return ret;
 104                }
 105                SetPageUptodate(page);
 106        }
 107
 108try_again:
 109        /* See if this page is already partially written in a way that we can
 110         * merge the new write with.
 111         */
 112        t = f = 0;
 113        if (PagePrivate(page)) {
 114                priv = page_private(page);
 115                f = afs_page_dirty_from(priv);
 116                t = afs_page_dirty_to(priv);
 117                ASSERTCMP(f, <=, t);
 118        }
 119
 120        if (f != t) {
 121                if (PageWriteback(page)) {
 122                        trace_afs_page_dirty(vnode, tracepoint_string("alrdy"),
 123                                             page->index, priv);
 124                        goto flush_conflicting_write;
 125                }
 126                /* If the file is being filled locally, allow inter-write
 127                 * spaces to be merged into writes.  If it's not, only write
 128                 * back what the user gives us.
 129                 */
 130                if (!test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags) &&
 131                    (to < f || from > t))
 132                        goto flush_conflicting_write;
 133        }
 134
 135        *_page = page;
 136        _leave(" = 0");
 137        return 0;
 138
 139        /* The previous write and this write aren't adjacent or overlapping, so
 140         * flush the page out.
 141         */
 142flush_conflicting_write:
 143        _debug("flush conflict");
 144        ret = write_one_page(page);
 145        if (ret < 0)
 146                goto error;
 147
 148        ret = lock_page_killable(page);
 149        if (ret < 0)
 150                goto error;
 151        goto try_again;
 152
 153error:
 154        put_page(page);
 155        _leave(" = %d", ret);
 156        return ret;
 157}
 158
 159/*
 160 * finalise part of a write to a page
 161 */
 162int afs_write_end(struct file *file, struct address_space *mapping,
 163                  loff_t pos, unsigned len, unsigned copied,
 164                  struct page *page, void *fsdata)
 165{
 166        struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
 167        struct key *key = afs_file_key(file);
 168        unsigned long priv;
 169        unsigned int f, from = pos & (PAGE_SIZE - 1);
 170        unsigned int t, to = from + copied;
 171        loff_t i_size, maybe_i_size;
 172        int ret = 0;
 173
 174        _enter("{%llx:%llu},{%lx}",
 175               vnode->fid.vid, vnode->fid.vnode, page->index);
 176
 177        if (copied == 0)
 178                goto out;
 179
 180        maybe_i_size = pos + copied;
 181
 182        i_size = i_size_read(&vnode->vfs_inode);
 183        if (maybe_i_size > i_size) {
 184                write_seqlock(&vnode->cb_lock);
 185                i_size = i_size_read(&vnode->vfs_inode);
 186                if (maybe_i_size > i_size)
 187                        i_size_write(&vnode->vfs_inode, maybe_i_size);
 188                write_sequnlock(&vnode->cb_lock);
 189        }
 190
 191        if (!PageUptodate(page)) {
 192                if (copied < len) {
 193                        /* Try and load any missing data from the server.  The
 194                         * unmarshalling routine will take care of clearing any
 195                         * bits that are beyond the EOF.
 196                         */
 197                        ret = afs_fill_page(vnode, key, pos + copied,
 198                                            len - copied, page);
 199                        if (ret < 0)
 200                                goto out;
 201                }
 202                SetPageUptodate(page);
 203        }
 204
 205        if (PagePrivate(page)) {
 206                priv = page_private(page);
 207                f = afs_page_dirty_from(priv);
 208                t = afs_page_dirty_to(priv);
 209                if (from < f)
 210                        f = from;
 211                if (to > t)
 212                        t = to;
 213                priv = afs_page_dirty(f, t);
 214                set_page_private(page, priv);
 215                trace_afs_page_dirty(vnode, tracepoint_string("dirty+"),
 216                                     page->index, priv);
 217        } else {
 218                priv = afs_page_dirty(from, to);
 219                attach_page_private(page, (void *)priv);
 220                trace_afs_page_dirty(vnode, tracepoint_string("dirty"),
 221                                     page->index, priv);
 222        }
 223
 224        set_page_dirty(page);
 225        if (PageDirty(page))
 226                _debug("dirtied");
 227        ret = copied;
 228
 229out:
 230        unlock_page(page);
 231        put_page(page);
 232        return ret;
 233}
 234
 235/*
 236 * kill all the pages in the given range
 237 */
 238static void afs_kill_pages(struct address_space *mapping,
 239                           pgoff_t first, pgoff_t last)
 240{
 241        struct afs_vnode *vnode = AFS_FS_I(mapping->host);
 242        struct pagevec pv;
 243        unsigned count, loop;
 244
 245        _enter("{%llx:%llu},%lx-%lx",
 246               vnode->fid.vid, vnode->fid.vnode, first, last);
 247
 248        pagevec_init(&pv);
 249
 250        do {
 251                _debug("kill %lx-%lx", first, last);
 252
 253                count = last - first + 1;
 254                if (count > PAGEVEC_SIZE)
 255                        count = PAGEVEC_SIZE;
 256                pv.nr = find_get_pages_contig(mapping, first, count, pv.pages);
 257                ASSERTCMP(pv.nr, ==, count);
 258
 259                for (loop = 0; loop < count; loop++) {
 260                        struct page *page = pv.pages[loop];
 261                        ClearPageUptodate(page);
 262                        SetPageError(page);
 263                        end_page_writeback(page);
 264                        if (page->index >= first)
 265                                first = page->index + 1;
 266                        lock_page(page);
 267                        generic_error_remove_page(mapping, page);
 268                        unlock_page(page);
 269                }
 270
 271                __pagevec_release(&pv);
 272        } while (first <= last);
 273
 274        _leave("");
 275}
 276
 277/*
 278 * Redirty all the pages in a given range.
 279 */
 280static void afs_redirty_pages(struct writeback_control *wbc,
 281                              struct address_space *mapping,
 282                              pgoff_t first, pgoff_t last)
 283{
 284        struct afs_vnode *vnode = AFS_FS_I(mapping->host);
 285        struct pagevec pv;
 286        unsigned count, loop;
 287
 288        _enter("{%llx:%llu},%lx-%lx",
 289               vnode->fid.vid, vnode->fid.vnode, first, last);
 290
 291        pagevec_init(&pv);
 292
 293        do {
 294                _debug("redirty %lx-%lx", first, last);
 295
 296                count = last - first + 1;
 297                if (count > PAGEVEC_SIZE)
 298                        count = PAGEVEC_SIZE;
 299                pv.nr = find_get_pages_contig(mapping, first, count, pv.pages);
 300                ASSERTCMP(pv.nr, ==, count);
 301
 302                for (loop = 0; loop < count; loop++) {
 303                        struct page *page = pv.pages[loop];
 304
 305                        redirty_page_for_writepage(wbc, page);
 306                        end_page_writeback(page);
 307                        if (page->index >= first)
 308                                first = page->index + 1;
 309                }
 310
 311                __pagevec_release(&pv);
 312        } while (first <= last);
 313
 314        _leave("");
 315}
 316
 317/*
 318 * completion of write to server
 319 */
 320static void afs_pages_written_back(struct afs_vnode *vnode,
 321                                   pgoff_t first, pgoff_t last)
 322{
 323        struct pagevec pv;
 324        unsigned long priv;
 325        unsigned count, loop;
 326
 327        _enter("{%llx:%llu},{%lx-%lx}",
 328               vnode->fid.vid, vnode->fid.vnode, first, last);
 329
 330        pagevec_init(&pv);
 331
 332        do {
 333                _debug("done %lx-%lx", first, last);
 334
 335                count = last - first + 1;
 336                if (count > PAGEVEC_SIZE)
 337                        count = PAGEVEC_SIZE;
 338                pv.nr = find_get_pages_contig(vnode->vfs_inode.i_mapping,
 339                                              first, count, pv.pages);
 340                ASSERTCMP(pv.nr, ==, count);
 341
 342                for (loop = 0; loop < count; loop++) {
 343                        priv = (unsigned long)detach_page_private(pv.pages[loop]);
 344                        trace_afs_page_dirty(vnode, tracepoint_string("clear"),
 345                                             pv.pages[loop]->index, priv);
 346                        end_page_writeback(pv.pages[loop]);
 347                }
 348                first += count;
 349                __pagevec_release(&pv);
 350        } while (first <= last);
 351
 352        afs_prune_wb_keys(vnode);
 353        _leave("");
 354}
 355
 356/*
 357 * Find a key to use for the writeback.  We cached the keys used to author the
 358 * writes on the vnode.  *_wbk will contain the last writeback key used or NULL
 359 * and we need to start from there if it's set.
 360 */
 361static int afs_get_writeback_key(struct afs_vnode *vnode,
 362                                 struct afs_wb_key **_wbk)
 363{
 364        struct afs_wb_key *wbk = NULL;
 365        struct list_head *p;
 366        int ret = -ENOKEY, ret2;
 367
 368        spin_lock(&vnode->wb_lock);
 369        if (*_wbk)
 370                p = (*_wbk)->vnode_link.next;
 371        else
 372                p = vnode->wb_keys.next;
 373
 374        while (p != &vnode->wb_keys) {
 375                wbk = list_entry(p, struct afs_wb_key, vnode_link);
 376                _debug("wbk %u", key_serial(wbk->key));
 377                ret2 = key_validate(wbk->key);
 378                if (ret2 == 0) {
 379                        refcount_inc(&wbk->usage);
 380                        _debug("USE WB KEY %u", key_serial(wbk->key));
 381                        break;
 382                }
 383
 384                wbk = NULL;
 385                if (ret == -ENOKEY)
 386                        ret = ret2;
 387                p = p->next;
 388        }
 389
 390        spin_unlock(&vnode->wb_lock);
 391        if (*_wbk)
 392                afs_put_wb_key(*_wbk);
 393        *_wbk = wbk;
 394        return 0;
 395}
 396
 397static void afs_store_data_success(struct afs_operation *op)
 398{
 399        struct afs_vnode *vnode = op->file[0].vnode;
 400
 401        op->ctime = op->file[0].scb.status.mtime_client;
 402        afs_vnode_commit_status(op, &op->file[0]);
 403        if (op->error == 0) {
 404                if (!op->store.laundering)
 405                        afs_pages_written_back(vnode, op->store.first, op->store.last);
 406                afs_stat_v(vnode, n_stores);
 407                atomic_long_add((op->store.last * PAGE_SIZE + op->store.last_to) -
 408                                (op->store.first * PAGE_SIZE + op->store.first_offset),
 409                                &afs_v2net(vnode)->n_store_bytes);
 410        }
 411}
 412
 413static const struct afs_operation_ops afs_store_data_operation = {
 414        .issue_afs_rpc  = afs_fs_store_data,
 415        .issue_yfs_rpc  = yfs_fs_store_data,
 416        .success        = afs_store_data_success,
 417};
 418
 419/*
 420 * write to a file
 421 */
 422static int afs_store_data(struct address_space *mapping,
 423                          pgoff_t first, pgoff_t last,
 424                          unsigned offset, unsigned to, bool laundering)
 425{
 426        struct afs_vnode *vnode = AFS_FS_I(mapping->host);
 427        struct afs_operation *op;
 428        struct afs_wb_key *wbk = NULL;
 429        int ret;
 430
 431        _enter("%s{%llx:%llu.%u},%lx,%lx,%x,%x",
 432               vnode->volume->name,
 433               vnode->fid.vid,
 434               vnode->fid.vnode,
 435               vnode->fid.unique,
 436               first, last, offset, to);
 437
 438        ret = afs_get_writeback_key(vnode, &wbk);
 439        if (ret) {
 440                _leave(" = %d [no keys]", ret);
 441                return ret;
 442        }
 443
 444        op = afs_alloc_operation(wbk->key, vnode->volume);
 445        if (IS_ERR(op)) {
 446                afs_put_wb_key(wbk);
 447                return -ENOMEM;
 448        }
 449
 450        afs_op_set_vnode(op, 0, vnode);
 451        op->file[0].dv_delta = 1;
 452        op->store.mapping = mapping;
 453        op->store.first = first;
 454        op->store.last = last;
 455        op->store.first_offset = offset;
 456        op->store.last_to = to;
 457        op->store.laundering = laundering;
 458        op->mtime = vnode->vfs_inode.i_mtime;
 459        op->flags |= AFS_OPERATION_UNINTR;
 460        op->ops = &afs_store_data_operation;
 461
 462try_next_key:
 463        afs_begin_vnode_operation(op);
 464        afs_wait_for_operation(op);
 465
 466        switch (op->error) {
 467        case -EACCES:
 468        case -EPERM:
 469        case -ENOKEY:
 470        case -EKEYEXPIRED:
 471        case -EKEYREJECTED:
 472        case -EKEYREVOKED:
 473                _debug("next");
 474
 475                ret = afs_get_writeback_key(vnode, &wbk);
 476                if (ret == 0) {
 477                        key_put(op->key);
 478                        op->key = key_get(wbk->key);
 479                        goto try_next_key;
 480                }
 481                break;
 482        }
 483
 484        afs_put_wb_key(wbk);
 485        _leave(" = %d", op->error);
 486        return afs_put_operation(op);
 487}
 488
 489/*
 490 * Synchronously write back the locked page and any subsequent non-locked dirty
 491 * pages.
 492 */
 493static int afs_write_back_from_locked_page(struct address_space *mapping,
 494                                           struct writeback_control *wbc,
 495                                           struct page *primary_page,
 496                                           pgoff_t final_page)
 497{
 498        struct afs_vnode *vnode = AFS_FS_I(mapping->host);
 499        struct page *pages[8], *page;
 500        unsigned long count, priv;
 501        unsigned n, offset, to, f, t;
 502        pgoff_t start, first, last;
 503        loff_t i_size, end;
 504        int loop, ret;
 505
 506        _enter(",%lx", primary_page->index);
 507
 508        count = 1;
 509        if (test_set_page_writeback(primary_page))
 510                BUG();
 511
 512        /* Find all consecutive lockable dirty pages that have contiguous
 513         * written regions, stopping when we find a page that is not
 514         * immediately lockable, is not dirty or is missing, or we reach the
 515         * end of the range.
 516         */
 517        start = primary_page->index;
 518        priv = page_private(primary_page);
 519        offset = afs_page_dirty_from(priv);
 520        to = afs_page_dirty_to(priv);
 521        trace_afs_page_dirty(vnode, tracepoint_string("store"),
 522                             primary_page->index, priv);
 523
 524        WARN_ON(offset == to);
 525        if (offset == to)
 526                trace_afs_page_dirty(vnode, tracepoint_string("WARN"),
 527                                     primary_page->index, priv);
 528
 529        if (start >= final_page ||
 530            (to < PAGE_SIZE && !test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags)))
 531                goto no_more;
 532
 533        start++;
 534        do {
 535                _debug("more %lx [%lx]", start, count);
 536                n = final_page - start + 1;
 537                if (n > ARRAY_SIZE(pages))
 538                        n = ARRAY_SIZE(pages);
 539                n = find_get_pages_contig(mapping, start, ARRAY_SIZE(pages), pages);
 540                _debug("fgpc %u", n);
 541                if (n == 0)
 542                        goto no_more;
 543                if (pages[0]->index != start) {
 544                        do {
 545                                put_page(pages[--n]);
 546                        } while (n > 0);
 547                        goto no_more;
 548                }
 549
 550                for (loop = 0; loop < n; loop++) {
 551                        page = pages[loop];
 552                        if (to != PAGE_SIZE &&
 553                            !test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags))
 554                                break;
 555                        if (page->index > final_page)
 556                                break;
 557                        if (!trylock_page(page))
 558                                break;
 559                        if (!PageDirty(page) || PageWriteback(page)) {
 560                                unlock_page(page);
 561                                break;
 562                        }
 563
 564                        priv = page_private(page);
 565                        f = afs_page_dirty_from(priv);
 566                        t = afs_page_dirty_to(priv);
 567                        if (f != 0 &&
 568                            !test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags)) {
 569                                unlock_page(page);
 570                                break;
 571                        }
 572                        to = t;
 573
 574                        trace_afs_page_dirty(vnode, tracepoint_string("store+"),
 575                                             page->index, priv);
 576
 577                        if (!clear_page_dirty_for_io(page))
 578                                BUG();
 579                        if (test_set_page_writeback(page))
 580                                BUG();
 581                        unlock_page(page);
 582                        put_page(page);
 583                }
 584                count += loop;
 585                if (loop < n) {
 586                        for (; loop < n; loop++)
 587                                put_page(pages[loop]);
 588                        goto no_more;
 589                }
 590
 591                start += loop;
 592        } while (start <= final_page && count < 65536);
 593
 594no_more:
 595        /* We now have a contiguous set of dirty pages, each with writeback
 596         * set; the first page is still locked at this point, but all the rest
 597         * have been unlocked.
 598         */
 599        unlock_page(primary_page);
 600
 601        first = primary_page->index;
 602        last = first + count - 1;
 603
 604        end = (loff_t)last * PAGE_SIZE + to;
 605        i_size = i_size_read(&vnode->vfs_inode);
 606
 607        _debug("write back %lx[%u..] to %lx[..%u]", first, offset, last, to);
 608        if (end > i_size)
 609                to = i_size & ~PAGE_MASK;
 610
 611        ret = afs_store_data(mapping, first, last, offset, to, false);
 612        switch (ret) {
 613        case 0:
 614                ret = count;
 615                break;
 616
 617        default:
 618                pr_notice("kAFS: Unexpected error from FS.StoreData %d\n", ret);
 619                fallthrough;
 620        case -EACCES:
 621        case -EPERM:
 622        case -ENOKEY:
 623        case -EKEYEXPIRED:
 624        case -EKEYREJECTED:
 625        case -EKEYREVOKED:
 626                afs_redirty_pages(wbc, mapping, first, last);
 627                mapping_set_error(mapping, ret);
 628                break;
 629
 630        case -EDQUOT:
 631        case -ENOSPC:
 632                afs_redirty_pages(wbc, mapping, first, last);
 633                mapping_set_error(mapping, -ENOSPC);
 634                break;
 635
 636        case -EROFS:
 637        case -EIO:
 638        case -EREMOTEIO:
 639        case -EFBIG:
 640        case -ENOENT:
 641        case -ENOMEDIUM:
 642        case -ENXIO:
 643                trace_afs_file_error(vnode, ret, afs_file_error_writeback_fail);
 644                afs_kill_pages(mapping, first, last);
 645                mapping_set_error(mapping, ret);
 646                break;
 647        }
 648
 649        _leave(" = %d", ret);
 650        return ret;
 651}
 652
 653/*
 654 * write a page back to the server
 655 * - the caller locked the page for us
 656 */
 657int afs_writepage(struct page *page, struct writeback_control *wbc)
 658{
 659        int ret;
 660
 661        _enter("{%lx},", page->index);
 662
 663        ret = afs_write_back_from_locked_page(page->mapping, wbc, page,
 664                                              wbc->range_end >> PAGE_SHIFT);
 665        if (ret < 0) {
 666                _leave(" = %d", ret);
 667                return 0;
 668        }
 669
 670        wbc->nr_to_write -= ret;
 671
 672        _leave(" = 0");
 673        return 0;
 674}
 675
 676/*
 677 * write a region of pages back to the server
 678 */
 679static int afs_writepages_region(struct address_space *mapping,
 680                                 struct writeback_control *wbc,
 681                                 pgoff_t index, pgoff_t end, pgoff_t *_next)
 682{
 683        struct page *page;
 684        int ret, n;
 685
 686        _enter(",,%lx,%lx,", index, end);
 687
 688        do {
 689                n = find_get_pages_range_tag(mapping, &index, end,
 690                                        PAGECACHE_TAG_DIRTY, 1, &page);
 691                if (!n)
 692                        break;
 693
 694                _debug("wback %lx", page->index);
 695
 696                /*
 697                 * at this point we hold neither the i_pages lock nor the
 698                 * page lock: the page may be truncated or invalidated
 699                 * (changing page->mapping to NULL), or even swizzled
 700                 * back from swapper_space to tmpfs file mapping
 701                 */
 702                ret = lock_page_killable(page);
 703                if (ret < 0) {
 704                        put_page(page);
 705                        _leave(" = %d", ret);
 706                        return ret;
 707                }
 708
 709                if (page->mapping != mapping || !PageDirty(page)) {
 710                        unlock_page(page);
 711                        put_page(page);
 712                        continue;
 713                }
 714
 715                if (PageWriteback(page)) {
 716                        unlock_page(page);
 717                        if (wbc->sync_mode != WB_SYNC_NONE)
 718                                wait_on_page_writeback(page);
 719                        put_page(page);
 720                        continue;
 721                }
 722
 723                if (!clear_page_dirty_for_io(page))
 724                        BUG();
 725                ret = afs_write_back_from_locked_page(mapping, wbc, page, end);
 726                put_page(page);
 727                if (ret < 0) {
 728                        _leave(" = %d", ret);
 729                        return ret;
 730                }
 731
 732                wbc->nr_to_write -= ret;
 733
 734                cond_resched();
 735        } while (index < end && wbc->nr_to_write > 0);
 736
 737        *_next = index;
 738        _leave(" = 0 [%lx]", *_next);
 739        return 0;
 740}
 741
 742/*
 743 * write some of the pending data back to the server
 744 */
 745int afs_writepages(struct address_space *mapping,
 746                   struct writeback_control *wbc)
 747{
 748        struct afs_vnode *vnode = AFS_FS_I(mapping->host);
 749        pgoff_t start, end, next;
 750        int ret;
 751
 752        _enter("");
 753
 754        /* We have to be careful as we can end up racing with setattr()
 755         * truncating the pagecache since the caller doesn't take a lock here
 756         * to prevent it.
 757         */
 758        if (wbc->sync_mode == WB_SYNC_ALL)
 759                down_read(&vnode->validate_lock);
 760        else if (!down_read_trylock(&vnode->validate_lock))
 761                return 0;
 762
 763        if (wbc->range_cyclic) {
 764                start = mapping->writeback_index;
 765                end = -1;
 766                ret = afs_writepages_region(mapping, wbc, start, end, &next);
 767                if (start > 0 && wbc->nr_to_write > 0 && ret == 0)
 768                        ret = afs_writepages_region(mapping, wbc, 0, start,
 769                                                    &next);
 770                mapping->writeback_index = next;
 771        } else if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) {
 772                end = (pgoff_t)(LLONG_MAX >> PAGE_SHIFT);
 773                ret = afs_writepages_region(mapping, wbc, 0, end, &next);
 774                if (wbc->nr_to_write > 0)
 775                        mapping->writeback_index = next;
 776        } else {
 777                start = wbc->range_start >> PAGE_SHIFT;
 778                end = wbc->range_end >> PAGE_SHIFT;
 779                ret = afs_writepages_region(mapping, wbc, start, end, &next);
 780        }
 781
 782        up_read(&vnode->validate_lock);
 783        _leave(" = %d", ret);
 784        return ret;
 785}
 786
 787/*
 788 * write to an AFS file
 789 */
 790ssize_t afs_file_write(struct kiocb *iocb, struct iov_iter *from)
 791{
 792        struct afs_vnode *vnode = AFS_FS_I(file_inode(iocb->ki_filp));
 793        ssize_t result;
 794        size_t count = iov_iter_count(from);
 795
 796        _enter("{%llx:%llu},{%zu},",
 797               vnode->fid.vid, vnode->fid.vnode, count);
 798
 799        if (IS_SWAPFILE(&vnode->vfs_inode)) {
 800                printk(KERN_INFO
 801                       "AFS: Attempt to write to active swap file!\n");
 802                return -EBUSY;
 803        }
 804
 805        if (!count)
 806                return 0;
 807
 808        result = generic_file_write_iter(iocb, from);
 809
 810        _leave(" = %zd", result);
 811        return result;
 812}
 813
 814/*
 815 * flush any dirty pages for this process, and check for write errors.
 816 * - the return status from this call provides a reliable indication of
 817 *   whether any write errors occurred for this process.
 818 */
 819int afs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
 820{
 821        struct inode *inode = file_inode(file);
 822        struct afs_vnode *vnode = AFS_FS_I(inode);
 823
 824        _enter("{%llx:%llu},{n=%pD},%d",
 825               vnode->fid.vid, vnode->fid.vnode, file,
 826               datasync);
 827
 828        return file_write_and_wait_range(file, start, end);
 829}
 830
 831/*
 832 * notification that a previously read-only page is about to become writable
 833 * - if it returns an error, the caller will deliver a bus error signal
 834 */
 835vm_fault_t afs_page_mkwrite(struct vm_fault *vmf)
 836{
 837        struct file *file = vmf->vma->vm_file;
 838        struct inode *inode = file_inode(file);
 839        struct afs_vnode *vnode = AFS_FS_I(inode);
 840        unsigned long priv;
 841
 842        _enter("{{%llx:%llu}},{%lx}",
 843               vnode->fid.vid, vnode->fid.vnode, vmf->page->index);
 844
 845        sb_start_pagefault(inode->i_sb);
 846
 847        /* Wait for the page to be written to the cache before we allow it to
 848         * be modified.  We then assume the entire page will need writing back.
 849         */
 850#ifdef CONFIG_AFS_FSCACHE
 851        fscache_wait_on_page_write(vnode->cache, vmf->page);
 852#endif
 853
 854        if (PageWriteback(vmf->page) &&
 855            wait_on_page_bit_killable(vmf->page, PG_writeback) < 0)
 856                return VM_FAULT_RETRY;
 857
 858        if (lock_page_killable(vmf->page) < 0)
 859                return VM_FAULT_RETRY;
 860
 861        /* We mustn't change page->private until writeback is complete as that
 862         * details the portion of the page we need to write back and we might
 863         * need to redirty the page if there's a problem.
 864         */
 865        wait_on_page_writeback(vmf->page);
 866
 867        priv = afs_page_dirty(0, PAGE_SIZE);
 868        priv = afs_page_dirty_mmapped(priv);
 869        trace_afs_page_dirty(vnode, tracepoint_string("mkwrite"),
 870                             vmf->page->index, priv);
 871        if (PagePrivate(vmf->page))
 872                set_page_private(vmf->page, priv);
 873        else
 874                attach_page_private(vmf->page, (void *)priv);
 875        file_update_time(file);
 876
 877        sb_end_pagefault(inode->i_sb);
 878        return VM_FAULT_LOCKED;
 879}
 880
 881/*
 882 * Prune the keys cached for writeback.  The caller must hold vnode->wb_lock.
 883 */
 884void afs_prune_wb_keys(struct afs_vnode *vnode)
 885{
 886        LIST_HEAD(graveyard);
 887        struct afs_wb_key *wbk, *tmp;
 888
 889        /* Discard unused keys */
 890        spin_lock(&vnode->wb_lock);
 891
 892        if (!mapping_tagged(&vnode->vfs_inode.i_data, PAGECACHE_TAG_WRITEBACK) &&
 893            !mapping_tagged(&vnode->vfs_inode.i_data, PAGECACHE_TAG_DIRTY)) {
 894                list_for_each_entry_safe(wbk, tmp, &vnode->wb_keys, vnode_link) {
 895                        if (refcount_read(&wbk->usage) == 1)
 896                                list_move(&wbk->vnode_link, &graveyard);
 897                }
 898        }
 899
 900        spin_unlock(&vnode->wb_lock);
 901
 902        while (!list_empty(&graveyard)) {
 903                wbk = list_entry(graveyard.next, struct afs_wb_key, vnode_link);
 904                list_del(&wbk->vnode_link);
 905                afs_put_wb_key(wbk);
 906        }
 907}
 908
 909/*
 910 * Clean up a page during invalidation.
 911 */
 912int afs_launder_page(struct page *page)
 913{
 914        struct address_space *mapping = page->mapping;
 915        struct afs_vnode *vnode = AFS_FS_I(mapping->host);
 916        unsigned long priv;
 917        unsigned int f, t;
 918        int ret = 0;
 919
 920        _enter("{%lx}", page->index);
 921
 922        priv = page_private(page);
 923        if (clear_page_dirty_for_io(page)) {
 924                f = 0;
 925                t = PAGE_SIZE;
 926                if (PagePrivate(page)) {
 927                        f = afs_page_dirty_from(priv);
 928                        t = afs_page_dirty_to(priv);
 929                }
 930
 931                trace_afs_page_dirty(vnode, tracepoint_string("launder"),
 932                                     page->index, priv);
 933                ret = afs_store_data(mapping, page->index, page->index, t, f, true);
 934        }
 935
 936        priv = (unsigned long)detach_page_private(page);
 937        trace_afs_page_dirty(vnode, tracepoint_string("laundered"),
 938                             page->index, priv);
 939
 940#ifdef CONFIG_AFS_FSCACHE
 941        if (PageFsCache(page)) {
 942                fscache_wait_on_page_write(vnode->cache, page);
 943                fscache_uncache_page(vnode->cache, page);
 944        }
 945#endif
 946        return ret;
 947}
 948