linux/fs/afs/write.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/* handling of writes to regular files and writing back to the server
   3 *
   4 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
   5 * Written by David Howells (dhowells@redhat.com)
   6 */
   7
   8#include <linux/backing-dev.h>
   9#include <linux/slab.h>
  10#include <linux/fs.h>
  11#include <linux/pagemap.h>
  12#include <linux/writeback.h>
  13#include <linux/pagevec.h>
  14#include <linux/netfs.h>
  15#include <linux/fscache.h>
  16#include "internal.h"
  17
  18/*
  19 * mark a page as having been made dirty and thus needing writeback
  20 */
  21int afs_set_page_dirty(struct page *page)
  22{
  23        _enter("");
  24        return __set_page_dirty_nobuffers(page);
  25}
  26
  27/*
  28 * prepare to perform part of a write to a page
  29 */
  30int afs_write_begin(struct file *file, struct address_space *mapping,
  31                    loff_t pos, unsigned len, unsigned flags,
  32                    struct page **_page, void **fsdata)
  33{
  34        struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
  35        struct page *page;
  36        unsigned long priv;
  37        unsigned f, from;
  38        unsigned t, to;
  39        pgoff_t index;
  40        int ret;
  41
  42        _enter("{%llx:%llu},%llx,%x",
  43               vnode->fid.vid, vnode->fid.vnode, pos, len);
  44
  45        /* Prefetch area to be written into the cache if we're caching this
  46         * file.  We need to do this before we get a lock on the page in case
  47         * there's more than one writer competing for the same cache block.
  48         */
  49        ret = netfs_write_begin(file, mapping, pos, len, flags, &page, fsdata,
  50                                &afs_req_ops, NULL);
  51        if (ret < 0)
  52                return ret;
  53
  54        index = page->index;
  55        from = pos - index * PAGE_SIZE;
  56        to = from + len;
  57
  58try_again:
  59        /* See if this page is already partially written in a way that we can
  60         * merge the new write with.
  61         */
  62        if (PagePrivate(page)) {
  63                priv = page_private(page);
  64                f = afs_page_dirty_from(page, priv);
  65                t = afs_page_dirty_to(page, priv);
  66                ASSERTCMP(f, <=, t);
  67
  68                if (PageWriteback(page)) {
  69                        trace_afs_page_dirty(vnode, tracepoint_string("alrdy"), page);
  70                        goto flush_conflicting_write;
  71                }
  72                /* If the file is being filled locally, allow inter-write
  73                 * spaces to be merged into writes.  If it's not, only write
  74                 * back what the user gives us.
  75                 */
  76                if (!test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags) &&
  77                    (to < f || from > t))
  78                        goto flush_conflicting_write;
  79        }
  80
  81        *_page = page;
  82        _leave(" = 0");
  83        return 0;
  84
  85        /* The previous write and this write aren't adjacent or overlapping, so
  86         * flush the page out.
  87         */
  88flush_conflicting_write:
  89        _debug("flush conflict");
  90        ret = write_one_page(page);
  91        if (ret < 0)
  92                goto error;
  93
  94        ret = lock_page_killable(page);
  95        if (ret < 0)
  96                goto error;
  97        goto try_again;
  98
  99error:
 100        put_page(page);
 101        _leave(" = %d", ret);
 102        return ret;
 103}
 104
 105/*
 106 * finalise part of a write to a page
 107 */
 108int afs_write_end(struct file *file, struct address_space *mapping,
 109                  loff_t pos, unsigned len, unsigned copied,
 110                  struct page *page, void *fsdata)
 111{
 112        struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
 113        unsigned long priv;
 114        unsigned int f, from = pos & (thp_size(page) - 1);
 115        unsigned int t, to = from + copied;
 116        loff_t i_size, maybe_i_size;
 117
 118        _enter("{%llx:%llu},{%lx}",
 119               vnode->fid.vid, vnode->fid.vnode, page->index);
 120
 121        if (!PageUptodate(page)) {
 122                if (copied < len) {
 123                        copied = 0;
 124                        goto out;
 125                }
 126
 127                SetPageUptodate(page);
 128        }
 129
 130        if (copied == 0)
 131                goto out;
 132
 133        maybe_i_size = pos + copied;
 134
 135        i_size = i_size_read(&vnode->vfs_inode);
 136        if (maybe_i_size > i_size) {
 137                write_seqlock(&vnode->cb_lock);
 138                i_size = i_size_read(&vnode->vfs_inode);
 139                if (maybe_i_size > i_size)
 140                        i_size_write(&vnode->vfs_inode, maybe_i_size);
 141                write_sequnlock(&vnode->cb_lock);
 142        }
 143
 144        if (PagePrivate(page)) {
 145                priv = page_private(page);
 146                f = afs_page_dirty_from(page, priv);
 147                t = afs_page_dirty_to(page, priv);
 148                if (from < f)
 149                        f = from;
 150                if (to > t)
 151                        t = to;
 152                priv = afs_page_dirty(page, f, t);
 153                set_page_private(page, priv);
 154                trace_afs_page_dirty(vnode, tracepoint_string("dirty+"), page);
 155        } else {
 156                priv = afs_page_dirty(page, from, to);
 157                attach_page_private(page, (void *)priv);
 158                trace_afs_page_dirty(vnode, tracepoint_string("dirty"), page);
 159        }
 160
 161        if (set_page_dirty(page))
 162                _debug("dirtied %lx", page->index);
 163
 164out:
 165        unlock_page(page);
 166        put_page(page);
 167        return copied;
 168}
 169
 170/*
 171 * kill all the pages in the given range
 172 */
 173static void afs_kill_pages(struct address_space *mapping,
 174                           loff_t start, loff_t len)
 175{
 176        struct afs_vnode *vnode = AFS_FS_I(mapping->host);
 177        struct pagevec pv;
 178        unsigned int loop, psize;
 179
 180        _enter("{%llx:%llu},%llx @%llx",
 181               vnode->fid.vid, vnode->fid.vnode, len, start);
 182
 183        pagevec_init(&pv);
 184
 185        do {
 186                _debug("kill %llx @%llx", len, start);
 187
 188                pv.nr = find_get_pages_contig(mapping, start / PAGE_SIZE,
 189                                              PAGEVEC_SIZE, pv.pages);
 190                if (pv.nr == 0)
 191                        break;
 192
 193                for (loop = 0; loop < pv.nr; loop++) {
 194                        struct page *page = pv.pages[loop];
 195
 196                        if (page->index * PAGE_SIZE >= start + len)
 197                                break;
 198
 199                        psize = thp_size(page);
 200                        start += psize;
 201                        len -= psize;
 202                        ClearPageUptodate(page);
 203                        end_page_writeback(page);
 204                        lock_page(page);
 205                        generic_error_remove_page(mapping, page);
 206                        unlock_page(page);
 207                }
 208
 209                __pagevec_release(&pv);
 210        } while (len > 0);
 211
 212        _leave("");
 213}
 214
 215/*
 216 * Redirty all the pages in a given range.
 217 */
 218static void afs_redirty_pages(struct writeback_control *wbc,
 219                              struct address_space *mapping,
 220                              loff_t start, loff_t len)
 221{
 222        struct afs_vnode *vnode = AFS_FS_I(mapping->host);
 223        struct pagevec pv;
 224        unsigned int loop, psize;
 225
 226        _enter("{%llx:%llu},%llx @%llx",
 227               vnode->fid.vid, vnode->fid.vnode, len, start);
 228
 229        pagevec_init(&pv);
 230
 231        do {
 232                _debug("redirty %llx @%llx", len, start);
 233
 234                pv.nr = find_get_pages_contig(mapping, start / PAGE_SIZE,
 235                                              PAGEVEC_SIZE, pv.pages);
 236                if (pv.nr == 0)
 237                        break;
 238
 239                for (loop = 0; loop < pv.nr; loop++) {
 240                        struct page *page = pv.pages[loop];
 241
 242                        if (page->index * PAGE_SIZE >= start + len)
 243                                break;
 244
 245                        psize = thp_size(page);
 246                        start += psize;
 247                        len -= psize;
 248                        redirty_page_for_writepage(wbc, page);
 249                        end_page_writeback(page);
 250                }
 251
 252                __pagevec_release(&pv);
 253        } while (len > 0);
 254
 255        _leave("");
 256}
 257
 258/*
 259 * completion of write to server
 260 */
 261static void afs_pages_written_back(struct afs_vnode *vnode, loff_t start, unsigned int len)
 262{
 263        struct address_space *mapping = vnode->vfs_inode.i_mapping;
 264        struct page *page;
 265        pgoff_t end;
 266
 267        XA_STATE(xas, &mapping->i_pages, start / PAGE_SIZE);
 268
 269        _enter("{%llx:%llu},{%x @%llx}",
 270               vnode->fid.vid, vnode->fid.vnode, len, start);
 271
 272        rcu_read_lock();
 273
 274        end = (start + len - 1) / PAGE_SIZE;
 275        xas_for_each(&xas, page, end) {
 276                if (!PageWriteback(page)) {
 277                        kdebug("bad %x @%llx page %lx %lx", len, start, page->index, end);
 278                        ASSERT(PageWriteback(page));
 279                }
 280
 281                trace_afs_page_dirty(vnode, tracepoint_string("clear"), page);
 282                detach_page_private(page);
 283                page_endio(page, true, 0);
 284        }
 285
 286        rcu_read_unlock();
 287
 288        afs_prune_wb_keys(vnode);
 289        _leave("");
 290}
 291
 292/*
 293 * Find a key to use for the writeback.  We cached the keys used to author the
 294 * writes on the vnode.  *_wbk will contain the last writeback key used or NULL
 295 * and we need to start from there if it's set.
 296 */
 297static int afs_get_writeback_key(struct afs_vnode *vnode,
 298                                 struct afs_wb_key **_wbk)
 299{
 300        struct afs_wb_key *wbk = NULL;
 301        struct list_head *p;
 302        int ret = -ENOKEY, ret2;
 303
 304        spin_lock(&vnode->wb_lock);
 305        if (*_wbk)
 306                p = (*_wbk)->vnode_link.next;
 307        else
 308                p = vnode->wb_keys.next;
 309
 310        while (p != &vnode->wb_keys) {
 311                wbk = list_entry(p, struct afs_wb_key, vnode_link);
 312                _debug("wbk %u", key_serial(wbk->key));
 313                ret2 = key_validate(wbk->key);
 314                if (ret2 == 0) {
 315                        refcount_inc(&wbk->usage);
 316                        _debug("USE WB KEY %u", key_serial(wbk->key));
 317                        break;
 318                }
 319
 320                wbk = NULL;
 321                if (ret == -ENOKEY)
 322                        ret = ret2;
 323                p = p->next;
 324        }
 325
 326        spin_unlock(&vnode->wb_lock);
 327        if (*_wbk)
 328                afs_put_wb_key(*_wbk);
 329        *_wbk = wbk;
 330        return 0;
 331}
 332
 333static void afs_store_data_success(struct afs_operation *op)
 334{
 335        struct afs_vnode *vnode = op->file[0].vnode;
 336
 337        op->ctime = op->file[0].scb.status.mtime_client;
 338        afs_vnode_commit_status(op, &op->file[0]);
 339        if (op->error == 0) {
 340                if (!op->store.laundering)
 341                        afs_pages_written_back(vnode, op->store.pos, op->store.size);
 342                afs_stat_v(vnode, n_stores);
 343                atomic_long_add(op->store.size, &afs_v2net(vnode)->n_store_bytes);
 344        }
 345}
 346
 347static const struct afs_operation_ops afs_store_data_operation = {
 348        .issue_afs_rpc  = afs_fs_store_data,
 349        .issue_yfs_rpc  = yfs_fs_store_data,
 350        .success        = afs_store_data_success,
 351};
 352
 353/*
 354 * write to a file
 355 */
 356static int afs_store_data(struct afs_vnode *vnode, struct iov_iter *iter, loff_t pos,
 357                          bool laundering)
 358{
 359        struct afs_operation *op;
 360        struct afs_wb_key *wbk = NULL;
 361        loff_t size = iov_iter_count(iter), i_size;
 362        int ret = -ENOKEY;
 363
 364        _enter("%s{%llx:%llu.%u},%llx,%llx",
 365               vnode->volume->name,
 366               vnode->fid.vid,
 367               vnode->fid.vnode,
 368               vnode->fid.unique,
 369               size, pos);
 370
 371        ret = afs_get_writeback_key(vnode, &wbk);
 372        if (ret) {
 373                _leave(" = %d [no keys]", ret);
 374                return ret;
 375        }
 376
 377        op = afs_alloc_operation(wbk->key, vnode->volume);
 378        if (IS_ERR(op)) {
 379                afs_put_wb_key(wbk);
 380                return -ENOMEM;
 381        }
 382
 383        i_size = i_size_read(&vnode->vfs_inode);
 384
 385        afs_op_set_vnode(op, 0, vnode);
 386        op->file[0].dv_delta = 1;
 387        op->file[0].modification = true;
 388        op->store.write_iter = iter;
 389        op->store.pos = pos;
 390        op->store.size = size;
 391        op->store.i_size = max(pos + size, i_size);
 392        op->store.laundering = laundering;
 393        op->mtime = vnode->vfs_inode.i_mtime;
 394        op->flags |= AFS_OPERATION_UNINTR;
 395        op->ops = &afs_store_data_operation;
 396
 397try_next_key:
 398        afs_begin_vnode_operation(op);
 399        afs_wait_for_operation(op);
 400
 401        switch (op->error) {
 402        case -EACCES:
 403        case -EPERM:
 404        case -ENOKEY:
 405        case -EKEYEXPIRED:
 406        case -EKEYREJECTED:
 407        case -EKEYREVOKED:
 408                _debug("next");
 409
 410                ret = afs_get_writeback_key(vnode, &wbk);
 411                if (ret == 0) {
 412                        key_put(op->key);
 413                        op->key = key_get(wbk->key);
 414                        goto try_next_key;
 415                }
 416                break;
 417        }
 418
 419        afs_put_wb_key(wbk);
 420        _leave(" = %d", op->error);
 421        return afs_put_operation(op);
 422}
 423
 424/*
 425 * Extend the region to be written back to include subsequent contiguously
 426 * dirty pages if possible, but don't sleep while doing so.
 427 *
 428 * If this page holds new content, then we can include filler zeros in the
 429 * writeback.
 430 */
 431static void afs_extend_writeback(struct address_space *mapping,
 432                                 struct afs_vnode *vnode,
 433                                 long *_count,
 434                                 loff_t start,
 435                                 loff_t max_len,
 436                                 bool new_content,
 437                                 unsigned int *_len)
 438{
 439        struct pagevec pvec;
 440        struct page *page;
 441        unsigned long priv;
 442        unsigned int psize, filler = 0;
 443        unsigned int f, t;
 444        loff_t len = *_len;
 445        pgoff_t index = (start + len) / PAGE_SIZE;
 446        bool stop = true;
 447        unsigned int i;
 448
 449        XA_STATE(xas, &mapping->i_pages, index);
 450        pagevec_init(&pvec);
 451
 452        do {
 453                /* Firstly, we gather up a batch of contiguous dirty pages
 454                 * under the RCU read lock - but we can't clear the dirty flags
 455                 * there if any of those pages are mapped.
 456                 */
 457                rcu_read_lock();
 458
 459                xas_for_each(&xas, page, ULONG_MAX) {
 460                        stop = true;
 461                        if (xas_retry(&xas, page))
 462                                continue;
 463                        if (xa_is_value(page))
 464                                break;
 465                        if (page->index != index)
 466                                break;
 467
 468                        if (!page_cache_get_speculative(page)) {
 469                                xas_reset(&xas);
 470                                continue;
 471                        }
 472
 473                        /* Has the page moved or been split? */
 474                        if (unlikely(page != xas_reload(&xas)))
 475                                break;
 476
 477                        if (!trylock_page(page))
 478                                break;
 479                        if (!PageDirty(page) || PageWriteback(page)) {
 480                                unlock_page(page);
 481                                break;
 482                        }
 483
 484                        psize = thp_size(page);
 485                        priv = page_private(page);
 486                        f = afs_page_dirty_from(page, priv);
 487                        t = afs_page_dirty_to(page, priv);
 488                        if (f != 0 && !new_content) {
 489                                unlock_page(page);
 490                                break;
 491                        }
 492
 493                        len += filler + t;
 494                        filler = psize - t;
 495                        if (len >= max_len || *_count <= 0)
 496                                stop = true;
 497                        else if (t == psize || new_content)
 498                                stop = false;
 499
 500                        index += thp_nr_pages(page);
 501                        if (!pagevec_add(&pvec, page))
 502                                break;
 503                        if (stop)
 504                                break;
 505                }
 506
 507                if (!stop)
 508                        xas_pause(&xas);
 509                rcu_read_unlock();
 510
 511                /* Now, if we obtained any pages, we can shift them to being
 512                 * writable and mark them for caching.
 513                 */
 514                if (!pagevec_count(&pvec))
 515                        break;
 516
 517                for (i = 0; i < pagevec_count(&pvec); i++) {
 518                        page = pvec.pages[i];
 519                        trace_afs_page_dirty(vnode, tracepoint_string("store+"), page);
 520
 521                        if (!clear_page_dirty_for_io(page))
 522                                BUG();
 523                        if (test_set_page_writeback(page))
 524                                BUG();
 525
 526                        *_count -= thp_nr_pages(page);
 527                        unlock_page(page);
 528                }
 529
 530                pagevec_release(&pvec);
 531                cond_resched();
 532        } while (!stop);
 533
 534        *_len = len;
 535}
 536
 537/*
 538 * Synchronously write back the locked page and any subsequent non-locked dirty
 539 * pages.
 540 */
 541static ssize_t afs_write_back_from_locked_page(struct address_space *mapping,
 542                                               struct writeback_control *wbc,
 543                                               struct page *page,
 544                                               loff_t start, loff_t end)
 545{
 546        struct afs_vnode *vnode = AFS_FS_I(mapping->host);
 547        struct iov_iter iter;
 548        unsigned long priv;
 549        unsigned int offset, to, len, max_len;
 550        loff_t i_size = i_size_read(&vnode->vfs_inode);
 551        bool new_content = test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags);
 552        long count = wbc->nr_to_write;
 553        int ret;
 554
 555        _enter(",%lx,%llx-%llx", page->index, start, end);
 556
 557        if (test_set_page_writeback(page))
 558                BUG();
 559
 560        count -= thp_nr_pages(page);
 561
 562        /* Find all consecutive lockable dirty pages that have contiguous
 563         * written regions, stopping when we find a page that is not
 564         * immediately lockable, is not dirty or is missing, or we reach the
 565         * end of the range.
 566         */
 567        priv = page_private(page);
 568        offset = afs_page_dirty_from(page, priv);
 569        to = afs_page_dirty_to(page, priv);
 570        trace_afs_page_dirty(vnode, tracepoint_string("store"), page);
 571
 572        len = to - offset;
 573        start += offset;
 574        if (start < i_size) {
 575                /* Trim the write to the EOF; the extra data is ignored.  Also
 576                 * put an upper limit on the size of a single storedata op.
 577                 */
 578                max_len = 65536 * 4096;
 579                max_len = min_t(unsigned long long, max_len, end - start + 1);
 580                max_len = min_t(unsigned long long, max_len, i_size - start);
 581
 582                if (len < max_len &&
 583                    (to == thp_size(page) || new_content))
 584                        afs_extend_writeback(mapping, vnode, &count,
 585                                             start, max_len, new_content, &len);
 586                len = min_t(loff_t, len, max_len);
 587        }
 588
 589        /* We now have a contiguous set of dirty pages, each with writeback
 590         * set; the first page is still locked at this point, but all the rest
 591         * have been unlocked.
 592         */
 593        unlock_page(page);
 594
 595        if (start < i_size) {
 596                _debug("write back %x @%llx [%llx]", len, start, i_size);
 597
 598                iov_iter_xarray(&iter, WRITE, &mapping->i_pages, start, len);
 599                ret = afs_store_data(vnode, &iter, start, false);
 600        } else {
 601                _debug("write discard %x @%llx [%llx]", len, start, i_size);
 602
 603                /* The dirty region was entirely beyond the EOF. */
 604                afs_pages_written_back(vnode, start, len);
 605                ret = 0;
 606        }
 607
 608        switch (ret) {
 609        case 0:
 610                wbc->nr_to_write = count;
 611                ret = len;
 612                break;
 613
 614        default:
 615                pr_notice("kAFS: Unexpected error from FS.StoreData %d\n", ret);
 616                fallthrough;
 617        case -EACCES:
 618        case -EPERM:
 619        case -ENOKEY:
 620        case -EKEYEXPIRED:
 621        case -EKEYREJECTED:
 622        case -EKEYREVOKED:
 623                afs_redirty_pages(wbc, mapping, start, len);
 624                mapping_set_error(mapping, ret);
 625                break;
 626
 627        case -EDQUOT:
 628        case -ENOSPC:
 629                afs_redirty_pages(wbc, mapping, start, len);
 630                mapping_set_error(mapping, -ENOSPC);
 631                break;
 632
 633        case -EROFS:
 634        case -EIO:
 635        case -EREMOTEIO:
 636        case -EFBIG:
 637        case -ENOENT:
 638        case -ENOMEDIUM:
 639        case -ENXIO:
 640                trace_afs_file_error(vnode, ret, afs_file_error_writeback_fail);
 641                afs_kill_pages(mapping, start, len);
 642                mapping_set_error(mapping, ret);
 643                break;
 644        }
 645
 646        _leave(" = %d", ret);
 647        return ret;
 648}
 649
 650/*
 651 * write a page back to the server
 652 * - the caller locked the page for us
 653 */
 654int afs_writepage(struct page *page, struct writeback_control *wbc)
 655{
 656        ssize_t ret;
 657        loff_t start;
 658
 659        _enter("{%lx},", page->index);
 660
 661        start = page->index * PAGE_SIZE;
 662        ret = afs_write_back_from_locked_page(page->mapping, wbc, page,
 663                                              start, LLONG_MAX - start);
 664        if (ret < 0) {
 665                _leave(" = %zd", ret);
 666                return ret;
 667        }
 668
 669        _leave(" = 0");
 670        return 0;
 671}
 672
 673/*
 674 * write a region of pages back to the server
 675 */
 676static int afs_writepages_region(struct address_space *mapping,
 677                                 struct writeback_control *wbc,
 678                                 loff_t start, loff_t end, loff_t *_next)
 679{
 680        struct page *page;
 681        ssize_t ret;
 682        int n;
 683
 684        _enter("%llx,%llx,", start, end);
 685
 686        do {
 687                pgoff_t index = start / PAGE_SIZE;
 688
 689                n = find_get_pages_range_tag(mapping, &index, end / PAGE_SIZE,
 690                                             PAGECACHE_TAG_DIRTY, 1, &page);
 691                if (!n)
 692                        break;
 693
 694                start = (loff_t)page->index * PAGE_SIZE; /* May regress with THPs */
 695
 696                _debug("wback %lx", page->index);
 697
 698                /* At this point we hold neither the i_pages lock nor the
 699                 * page lock: the page may be truncated or invalidated
 700                 * (changing page->mapping to NULL), or even swizzled
 701                 * back from swapper_space to tmpfs file mapping
 702                 */
 703                if (wbc->sync_mode != WB_SYNC_NONE) {
 704                        ret = lock_page_killable(page);
 705                        if (ret < 0) {
 706                                put_page(page);
 707                                return ret;
 708                        }
 709                } else {
 710                        if (!trylock_page(page)) {
 711                                put_page(page);
 712                                return 0;
 713                        }
 714                }
 715
 716                if (page->mapping != mapping || !PageDirty(page)) {
 717                        start += thp_size(page);
 718                        unlock_page(page);
 719                        put_page(page);
 720                        continue;
 721                }
 722
 723                if (PageWriteback(page)) {
 724                        unlock_page(page);
 725                        if (wbc->sync_mode != WB_SYNC_NONE)
 726                                wait_on_page_writeback(page);
 727                        put_page(page);
 728                        continue;
 729                }
 730
 731                if (!clear_page_dirty_for_io(page))
 732                        BUG();
 733                ret = afs_write_back_from_locked_page(mapping, wbc, page, start, end);
 734                put_page(page);
 735                if (ret < 0) {
 736                        _leave(" = %zd", ret);
 737                        return ret;
 738                }
 739
 740                start += ret;
 741
 742                cond_resched();
 743        } while (wbc->nr_to_write > 0);
 744
 745        *_next = start;
 746        _leave(" = 0 [%llx]", *_next);
 747        return 0;
 748}
 749
 750/*
 751 * write some of the pending data back to the server
 752 */
 753int afs_writepages(struct address_space *mapping,
 754                   struct writeback_control *wbc)
 755{
 756        struct afs_vnode *vnode = AFS_FS_I(mapping->host);
 757        loff_t start, next;
 758        int ret;
 759
 760        _enter("");
 761
 762        /* We have to be careful as we can end up racing with setattr()
 763         * truncating the pagecache since the caller doesn't take a lock here
 764         * to prevent it.
 765         */
 766        if (wbc->sync_mode == WB_SYNC_ALL)
 767                down_read(&vnode->validate_lock);
 768        else if (!down_read_trylock(&vnode->validate_lock))
 769                return 0;
 770
 771        if (wbc->range_cyclic) {
 772                start = mapping->writeback_index * PAGE_SIZE;
 773                ret = afs_writepages_region(mapping, wbc, start, LLONG_MAX, &next);
 774                if (ret == 0) {
 775                        mapping->writeback_index = next / PAGE_SIZE;
 776                        if (start > 0 && wbc->nr_to_write > 0) {
 777                                ret = afs_writepages_region(mapping, wbc, 0,
 778                                                            start, &next);
 779                                if (ret == 0)
 780                                        mapping->writeback_index =
 781                                                next / PAGE_SIZE;
 782                        }
 783                }
 784        } else if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) {
 785                ret = afs_writepages_region(mapping, wbc, 0, LLONG_MAX, &next);
 786                if (wbc->nr_to_write > 0 && ret == 0)
 787                        mapping->writeback_index = next / PAGE_SIZE;
 788        } else {
 789                ret = afs_writepages_region(mapping, wbc,
 790                                            wbc->range_start, wbc->range_end, &next);
 791        }
 792
 793        up_read(&vnode->validate_lock);
 794        _leave(" = %d", ret);
 795        return ret;
 796}
 797
 798/*
 799 * write to an AFS file
 800 */
 801ssize_t afs_file_write(struct kiocb *iocb, struct iov_iter *from)
 802{
 803        struct afs_vnode *vnode = AFS_FS_I(file_inode(iocb->ki_filp));
 804        ssize_t result;
 805        size_t count = iov_iter_count(from);
 806
 807        _enter("{%llx:%llu},{%zu},",
 808               vnode->fid.vid, vnode->fid.vnode, count);
 809
 810        if (IS_SWAPFILE(&vnode->vfs_inode)) {
 811                printk(KERN_INFO
 812                       "AFS: Attempt to write to active swap file!\n");
 813                return -EBUSY;
 814        }
 815
 816        if (!count)
 817                return 0;
 818
 819        result = generic_file_write_iter(iocb, from);
 820
 821        _leave(" = %zd", result);
 822        return result;
 823}
 824
 825/*
 826 * flush any dirty pages for this process, and check for write errors.
 827 * - the return status from this call provides a reliable indication of
 828 *   whether any write errors occurred for this process.
 829 */
 830int afs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
 831{
 832        struct inode *inode = file_inode(file);
 833        struct afs_vnode *vnode = AFS_FS_I(inode);
 834
 835        _enter("{%llx:%llu},{n=%pD},%d",
 836               vnode->fid.vid, vnode->fid.vnode, file,
 837               datasync);
 838
 839        return file_write_and_wait_range(file, start, end);
 840}
 841
 842/*
 843 * notification that a previously read-only page is about to become writable
 844 * - if it returns an error, the caller will deliver a bus error signal
 845 */
 846vm_fault_t afs_page_mkwrite(struct vm_fault *vmf)
 847{
 848        struct page *page = thp_head(vmf->page);
 849        struct file *file = vmf->vma->vm_file;
 850        struct inode *inode = file_inode(file);
 851        struct afs_vnode *vnode = AFS_FS_I(inode);
 852        unsigned long priv;
 853        vm_fault_t ret = VM_FAULT_RETRY;
 854
 855        _enter("{{%llx:%llu}},{%lx}", vnode->fid.vid, vnode->fid.vnode, page->index);
 856
 857        sb_start_pagefault(inode->i_sb);
 858
 859        /* Wait for the page to be written to the cache before we allow it to
 860         * be modified.  We then assume the entire page will need writing back.
 861         */
 862#ifdef CONFIG_AFS_FSCACHE
 863        if (PageFsCache(page) &&
 864            wait_on_page_fscache_killable(page) < 0)
 865                goto out;
 866#endif
 867
 868        if (wait_on_page_writeback_killable(page))
 869                goto out;
 870
 871        if (lock_page_killable(page) < 0)
 872                goto out;
 873
 874        /* We mustn't change page->private until writeback is complete as that
 875         * details the portion of the page we need to write back and we might
 876         * need to redirty the page if there's a problem.
 877         */
 878        if (wait_on_page_writeback_killable(page) < 0) {
 879                unlock_page(page);
 880                goto out;
 881        }
 882
 883        priv = afs_page_dirty(page, 0, thp_size(page));
 884        priv = afs_page_dirty_mmapped(priv);
 885        if (PagePrivate(page)) {
 886                set_page_private(page, priv);
 887                trace_afs_page_dirty(vnode, tracepoint_string("mkwrite+"), page);
 888        } else {
 889                attach_page_private(page, (void *)priv);
 890                trace_afs_page_dirty(vnode, tracepoint_string("mkwrite"), page);
 891        }
 892        file_update_time(file);
 893
 894        ret = VM_FAULT_LOCKED;
 895out:
 896        sb_end_pagefault(inode->i_sb);
 897        return ret;
 898}
 899
 900/*
 901 * Prune the keys cached for writeback.  The caller must hold vnode->wb_lock.
 902 */
 903void afs_prune_wb_keys(struct afs_vnode *vnode)
 904{
 905        LIST_HEAD(graveyard);
 906        struct afs_wb_key *wbk, *tmp;
 907
 908        /* Discard unused keys */
 909        spin_lock(&vnode->wb_lock);
 910
 911        if (!mapping_tagged(&vnode->vfs_inode.i_data, PAGECACHE_TAG_WRITEBACK) &&
 912            !mapping_tagged(&vnode->vfs_inode.i_data, PAGECACHE_TAG_DIRTY)) {
 913                list_for_each_entry_safe(wbk, tmp, &vnode->wb_keys, vnode_link) {
 914                        if (refcount_read(&wbk->usage) == 1)
 915                                list_move(&wbk->vnode_link, &graveyard);
 916                }
 917        }
 918
 919        spin_unlock(&vnode->wb_lock);
 920
 921        while (!list_empty(&graveyard)) {
 922                wbk = list_entry(graveyard.next, struct afs_wb_key, vnode_link);
 923                list_del(&wbk->vnode_link);
 924                afs_put_wb_key(wbk);
 925        }
 926}
 927
 928/*
 929 * Clean up a page during invalidation.
 930 */
 931int afs_launder_page(struct page *page)
 932{
 933        struct address_space *mapping = page->mapping;
 934        struct afs_vnode *vnode = AFS_FS_I(mapping->host);
 935        struct iov_iter iter;
 936        struct bio_vec bv[1];
 937        unsigned long priv;
 938        unsigned int f, t;
 939        int ret = 0;
 940
 941        _enter("{%lx}", page->index);
 942
 943        priv = page_private(page);
 944        if (clear_page_dirty_for_io(page)) {
 945                f = 0;
 946                t = thp_size(page);
 947                if (PagePrivate(page)) {
 948                        f = afs_page_dirty_from(page, priv);
 949                        t = afs_page_dirty_to(page, priv);
 950                }
 951
 952                bv[0].bv_page = page;
 953                bv[0].bv_offset = f;
 954                bv[0].bv_len = t - f;
 955                iov_iter_bvec(&iter, WRITE, bv, 1, bv[0].bv_len);
 956
 957                trace_afs_page_dirty(vnode, tracepoint_string("launder"), page);
 958                ret = afs_store_data(vnode, &iter, (loff_t)page->index * PAGE_SIZE,
 959                                     true);
 960        }
 961
 962        trace_afs_page_dirty(vnode, tracepoint_string("laundered"), page);
 963        detach_page_private(page);
 964        wait_on_page_fscache(page);
 965        return ret;
 966}
 967