linux/fs/afs/write.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/* handling of writes to regular files and writing back to the server
   3 *
   4 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
   5 * Written by David Howells (dhowells@redhat.com)
   6 */
   7
   8#include <linux/backing-dev.h>
   9#include <linux/slab.h>
  10#include <linux/fs.h>
  11#include <linux/pagemap.h>
  12#include <linux/writeback.h>
  13#include <linux/pagevec.h>
  14#include <linux/netfs.h>
  15#include <linux/fscache.h>
  16#include "internal.h"
  17
  18/*
  19 * mark a page as having been made dirty and thus needing writeback
  20 */
  21int afs_set_page_dirty(struct page *page)
  22{
  23        _enter("");
  24        return __set_page_dirty_nobuffers(page);
  25}
  26
  27/*
  28 * prepare to perform part of a write to a page
  29 */
  30int afs_write_begin(struct file *file, struct address_space *mapping,
  31                    loff_t pos, unsigned len, unsigned flags,
  32                    struct page **_page, void **fsdata)
  33{
  34        struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
  35        struct page *page;
  36        unsigned long priv;
  37        unsigned f, from;
  38        unsigned t, to;
  39        pgoff_t index;
  40        int ret;
  41
  42        _enter("{%llx:%llu},%llx,%x",
  43               vnode->fid.vid, vnode->fid.vnode, pos, len);
  44
  45        /* Prefetch area to be written into the cache if we're caching this
  46         * file.  We need to do this before we get a lock on the page in case
  47         * there's more than one writer competing for the same cache block.
  48         */
  49        ret = netfs_write_begin(file, mapping, pos, len, flags, &page, fsdata,
  50                                &afs_req_ops, NULL);
  51        if (ret < 0)
  52                return ret;
  53
  54        index = page->index;
  55        from = pos - index * PAGE_SIZE;
  56        to = from + len;
  57
  58try_again:
  59        /* See if this page is already partially written in a way that we can
  60         * merge the new write with.
  61         */
  62        if (PagePrivate(page)) {
  63                priv = page_private(page);
  64                f = afs_page_dirty_from(page, priv);
  65                t = afs_page_dirty_to(page, priv);
  66                ASSERTCMP(f, <=, t);
  67
  68                if (PageWriteback(page)) {
  69                        trace_afs_page_dirty(vnode, tracepoint_string("alrdy"), page);
  70                        goto flush_conflicting_write;
  71                }
  72                /* If the file is being filled locally, allow inter-write
  73                 * spaces to be merged into writes.  If it's not, only write
  74                 * back what the user gives us.
  75                 */
  76                if (!test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags) &&
  77                    (to < f || from > t))
  78                        goto flush_conflicting_write;
  79        }
  80
  81        *_page = page;
  82        _leave(" = 0");
  83        return 0;
  84
  85        /* The previous write and this write aren't adjacent or overlapping, so
  86         * flush the page out.
  87         */
  88flush_conflicting_write:
  89        _debug("flush conflict");
  90        ret = write_one_page(page);
  91        if (ret < 0)
  92                goto error;
  93
  94        ret = lock_page_killable(page);
  95        if (ret < 0)
  96                goto error;
  97        goto try_again;
  98
  99error:
 100        put_page(page);
 101        _leave(" = %d", ret);
 102        return ret;
 103}
 104
 105/*
 106 * finalise part of a write to a page
 107 */
 108int afs_write_end(struct file *file, struct address_space *mapping,
 109                  loff_t pos, unsigned len, unsigned copied,
 110                  struct page *page, void *fsdata)
 111{
 112        struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
 113        unsigned long priv;
 114        unsigned int f, from = pos & (thp_size(page) - 1);
 115        unsigned int t, to = from + copied;
 116        loff_t i_size, maybe_i_size;
 117
 118        _enter("{%llx:%llu},{%lx}",
 119               vnode->fid.vid, vnode->fid.vnode, page->index);
 120
 121        if (!PageUptodate(page)) {
 122                if (copied < len) {
 123                        copied = 0;
 124                        goto out;
 125                }
 126
 127                SetPageUptodate(page);
 128        }
 129
 130        if (copied == 0)
 131                goto out;
 132
 133        maybe_i_size = pos + copied;
 134
 135        i_size = i_size_read(&vnode->vfs_inode);
 136        if (maybe_i_size > i_size) {
 137                write_seqlock(&vnode->cb_lock);
 138                i_size = i_size_read(&vnode->vfs_inode);
 139                if (maybe_i_size > i_size)
 140                        afs_set_i_size(vnode, maybe_i_size);
 141                write_sequnlock(&vnode->cb_lock);
 142        }
 143
 144        if (PagePrivate(page)) {
 145                priv = page_private(page);
 146                f = afs_page_dirty_from(page, priv);
 147                t = afs_page_dirty_to(page, priv);
 148                if (from < f)
 149                        f = from;
 150                if (to > t)
 151                        t = to;
 152                priv = afs_page_dirty(page, f, t);
 153                set_page_private(page, priv);
 154                trace_afs_page_dirty(vnode, tracepoint_string("dirty+"), page);
 155        } else {
 156                priv = afs_page_dirty(page, from, to);
 157                attach_page_private(page, (void *)priv);
 158                trace_afs_page_dirty(vnode, tracepoint_string("dirty"), page);
 159        }
 160
 161        if (set_page_dirty(page))
 162                _debug("dirtied %lx", page->index);
 163
 164out:
 165        unlock_page(page);
 166        put_page(page);
 167        return copied;
 168}
 169
 170/*
 171 * kill all the pages in the given range
 172 */
 173static void afs_kill_pages(struct address_space *mapping,
 174                           loff_t start, loff_t len)
 175{
 176        struct afs_vnode *vnode = AFS_FS_I(mapping->host);
 177        struct pagevec pv;
 178        unsigned int loop, psize;
 179
 180        _enter("{%llx:%llu},%llx @%llx",
 181               vnode->fid.vid, vnode->fid.vnode, len, start);
 182
 183        pagevec_init(&pv);
 184
 185        do {
 186                _debug("kill %llx @%llx", len, start);
 187
 188                pv.nr = find_get_pages_contig(mapping, start / PAGE_SIZE,
 189                                              PAGEVEC_SIZE, pv.pages);
 190                if (pv.nr == 0)
 191                        break;
 192
 193                for (loop = 0; loop < pv.nr; loop++) {
 194                        struct page *page = pv.pages[loop];
 195
 196                        if (page->index * PAGE_SIZE >= start + len)
 197                                break;
 198
 199                        psize = thp_size(page);
 200                        start += psize;
 201                        len -= psize;
 202                        ClearPageUptodate(page);
 203                        end_page_writeback(page);
 204                        lock_page(page);
 205                        generic_error_remove_page(mapping, page);
 206                        unlock_page(page);
 207                }
 208
 209                __pagevec_release(&pv);
 210        } while (len > 0);
 211
 212        _leave("");
 213}
 214
 215/*
 216 * Redirty all the pages in a given range.
 217 */
 218static void afs_redirty_pages(struct writeback_control *wbc,
 219                              struct address_space *mapping,
 220                              loff_t start, loff_t len)
 221{
 222        struct afs_vnode *vnode = AFS_FS_I(mapping->host);
 223        struct pagevec pv;
 224        unsigned int loop, psize;
 225
 226        _enter("{%llx:%llu},%llx @%llx",
 227               vnode->fid.vid, vnode->fid.vnode, len, start);
 228
 229        pagevec_init(&pv);
 230
 231        do {
 232                _debug("redirty %llx @%llx", len, start);
 233
 234                pv.nr = find_get_pages_contig(mapping, start / PAGE_SIZE,
 235                                              PAGEVEC_SIZE, pv.pages);
 236                if (pv.nr == 0)
 237                        break;
 238
 239                for (loop = 0; loop < pv.nr; loop++) {
 240                        struct page *page = pv.pages[loop];
 241
 242                        if (page->index * PAGE_SIZE >= start + len)
 243                                break;
 244
 245                        psize = thp_size(page);
 246                        start += psize;
 247                        len -= psize;
 248                        redirty_page_for_writepage(wbc, page);
 249                        end_page_writeback(page);
 250                }
 251
 252                __pagevec_release(&pv);
 253        } while (len > 0);
 254
 255        _leave("");
 256}
 257
 258/*
 259 * completion of write to server
 260 */
 261static void afs_pages_written_back(struct afs_vnode *vnode, loff_t start, unsigned int len)
 262{
 263        struct address_space *mapping = vnode->vfs_inode.i_mapping;
 264        struct page *page;
 265        pgoff_t end;
 266
 267        XA_STATE(xas, &mapping->i_pages, start / PAGE_SIZE);
 268
 269        _enter("{%llx:%llu},{%x @%llx}",
 270               vnode->fid.vid, vnode->fid.vnode, len, start);
 271
 272        rcu_read_lock();
 273
 274        end = (start + len - 1) / PAGE_SIZE;
 275        xas_for_each(&xas, page, end) {
 276                if (!PageWriteback(page)) {
 277                        kdebug("bad %x @%llx page %lx %lx", len, start, page->index, end);
 278                        ASSERT(PageWriteback(page));
 279                }
 280
 281                trace_afs_page_dirty(vnode, tracepoint_string("clear"), page);
 282                detach_page_private(page);
 283                page_endio(page, true, 0);
 284        }
 285
 286        rcu_read_unlock();
 287
 288        afs_prune_wb_keys(vnode);
 289        _leave("");
 290}
 291
 292/*
 293 * Find a key to use for the writeback.  We cached the keys used to author the
 294 * writes on the vnode.  *_wbk will contain the last writeback key used or NULL
 295 * and we need to start from there if it's set.
 296 */
 297static int afs_get_writeback_key(struct afs_vnode *vnode,
 298                                 struct afs_wb_key **_wbk)
 299{
 300        struct afs_wb_key *wbk = NULL;
 301        struct list_head *p;
 302        int ret = -ENOKEY, ret2;
 303
 304        spin_lock(&vnode->wb_lock);
 305        if (*_wbk)
 306                p = (*_wbk)->vnode_link.next;
 307        else
 308                p = vnode->wb_keys.next;
 309
 310        while (p != &vnode->wb_keys) {
 311                wbk = list_entry(p, struct afs_wb_key, vnode_link);
 312                _debug("wbk %u", key_serial(wbk->key));
 313                ret2 = key_validate(wbk->key);
 314                if (ret2 == 0) {
 315                        refcount_inc(&wbk->usage);
 316                        _debug("USE WB KEY %u", key_serial(wbk->key));
 317                        break;
 318                }
 319
 320                wbk = NULL;
 321                if (ret == -ENOKEY)
 322                        ret = ret2;
 323                p = p->next;
 324        }
 325
 326        spin_unlock(&vnode->wb_lock);
 327        if (*_wbk)
 328                afs_put_wb_key(*_wbk);
 329        *_wbk = wbk;
 330        return 0;
 331}
 332
 333static void afs_store_data_success(struct afs_operation *op)
 334{
 335        struct afs_vnode *vnode = op->file[0].vnode;
 336
 337        op->ctime = op->file[0].scb.status.mtime_client;
 338        afs_vnode_commit_status(op, &op->file[0]);
 339        if (op->error == 0) {
 340                if (!op->store.laundering)
 341                        afs_pages_written_back(vnode, op->store.pos, op->store.size);
 342                afs_stat_v(vnode, n_stores);
 343                atomic_long_add(op->store.size, &afs_v2net(vnode)->n_store_bytes);
 344        }
 345}
 346
 347static const struct afs_operation_ops afs_store_data_operation = {
 348        .issue_afs_rpc  = afs_fs_store_data,
 349        .issue_yfs_rpc  = yfs_fs_store_data,
 350        .success        = afs_store_data_success,
 351};
 352
 353/*
 354 * write to a file
 355 */
 356static int afs_store_data(struct afs_vnode *vnode, struct iov_iter *iter, loff_t pos,
 357                          bool laundering)
 358{
 359        struct afs_operation *op;
 360        struct afs_wb_key *wbk = NULL;
 361        loff_t size = iov_iter_count(iter), i_size;
 362        int ret = -ENOKEY;
 363
 364        _enter("%s{%llx:%llu.%u},%llx,%llx",
 365               vnode->volume->name,
 366               vnode->fid.vid,
 367               vnode->fid.vnode,
 368               vnode->fid.unique,
 369               size, pos);
 370
 371        ret = afs_get_writeback_key(vnode, &wbk);
 372        if (ret) {
 373                _leave(" = %d [no keys]", ret);
 374                return ret;
 375        }
 376
 377        op = afs_alloc_operation(wbk->key, vnode->volume);
 378        if (IS_ERR(op)) {
 379                afs_put_wb_key(wbk);
 380                return -ENOMEM;
 381        }
 382
 383        i_size = i_size_read(&vnode->vfs_inode);
 384
 385        afs_op_set_vnode(op, 0, vnode);
 386        op->file[0].dv_delta = 1;
 387        op->file[0].modification = true;
 388        op->store.write_iter = iter;
 389        op->store.pos = pos;
 390        op->store.size = size;
 391        op->store.i_size = max(pos + size, i_size);
 392        op->store.laundering = laundering;
 393        op->mtime = vnode->vfs_inode.i_mtime;
 394        op->flags |= AFS_OPERATION_UNINTR;
 395        op->ops = &afs_store_data_operation;
 396
 397try_next_key:
 398        afs_begin_vnode_operation(op);
 399        afs_wait_for_operation(op);
 400
 401        switch (op->error) {
 402        case -EACCES:
 403        case -EPERM:
 404        case -ENOKEY:
 405        case -EKEYEXPIRED:
 406        case -EKEYREJECTED:
 407        case -EKEYREVOKED:
 408                _debug("next");
 409
 410                ret = afs_get_writeback_key(vnode, &wbk);
 411                if (ret == 0) {
 412                        key_put(op->key);
 413                        op->key = key_get(wbk->key);
 414                        goto try_next_key;
 415                }
 416                break;
 417        }
 418
 419        afs_put_wb_key(wbk);
 420        _leave(" = %d", op->error);
 421        return afs_put_operation(op);
 422}
 423
 424/*
 425 * Extend the region to be written back to include subsequent contiguously
 426 * dirty pages if possible, but don't sleep while doing so.
 427 *
 428 * If this page holds new content, then we can include filler zeros in the
 429 * writeback.
 430 */
 431static void afs_extend_writeback(struct address_space *mapping,
 432                                 struct afs_vnode *vnode,
 433                                 long *_count,
 434                                 loff_t start,
 435                                 loff_t max_len,
 436                                 bool new_content,
 437                                 unsigned int *_len)
 438{
 439        struct pagevec pvec;
 440        struct page *page;
 441        unsigned long priv;
 442        unsigned int psize, filler = 0;
 443        unsigned int f, t;
 444        loff_t len = *_len;
 445        pgoff_t index = (start + len) / PAGE_SIZE;
 446        bool stop = true;
 447        unsigned int i;
 448
 449        XA_STATE(xas, &mapping->i_pages, index);
 450        pagevec_init(&pvec);
 451
 452        do {
 453                /* Firstly, we gather up a batch of contiguous dirty pages
 454                 * under the RCU read lock - but we can't clear the dirty flags
 455                 * there if any of those pages are mapped.
 456                 */
 457                rcu_read_lock();
 458
 459                xas_for_each(&xas, page, ULONG_MAX) {
 460                        stop = true;
 461                        if (xas_retry(&xas, page))
 462                                continue;
 463                        if (xa_is_value(page))
 464                                break;
 465                        if (page->index != index)
 466                                break;
 467
 468                        if (!page_cache_get_speculative(page)) {
 469                                xas_reset(&xas);
 470                                continue;
 471                        }
 472
 473                        /* Has the page moved or been split? */
 474                        if (unlikely(page != xas_reload(&xas))) {
 475                                put_page(page);
 476                                break;
 477                        }
 478
 479                        if (!trylock_page(page)) {
 480                                put_page(page);
 481                                break;
 482                        }
 483                        if (!PageDirty(page) || PageWriteback(page)) {
 484                                unlock_page(page);
 485                                put_page(page);
 486                                break;
 487                        }
 488
 489                        psize = thp_size(page);
 490                        priv = page_private(page);
 491                        f = afs_page_dirty_from(page, priv);
 492                        t = afs_page_dirty_to(page, priv);
 493                        if (f != 0 && !new_content) {
 494                                unlock_page(page);
 495                                put_page(page);
 496                                break;
 497                        }
 498
 499                        len += filler + t;
 500                        filler = psize - t;
 501                        if (len >= max_len || *_count <= 0)
 502                                stop = true;
 503                        else if (t == psize || new_content)
 504                                stop = false;
 505
 506                        index += thp_nr_pages(page);
 507                        if (!pagevec_add(&pvec, page))
 508                                break;
 509                        if (stop)
 510                                break;
 511                }
 512
 513                if (!stop)
 514                        xas_pause(&xas);
 515                rcu_read_unlock();
 516
 517                /* Now, if we obtained any pages, we can shift them to being
 518                 * writable and mark them for caching.
 519                 */
 520                if (!pagevec_count(&pvec))
 521                        break;
 522
 523                for (i = 0; i < pagevec_count(&pvec); i++) {
 524                        page = pvec.pages[i];
 525                        trace_afs_page_dirty(vnode, tracepoint_string("store+"), page);
 526
 527                        if (!clear_page_dirty_for_io(page))
 528                                BUG();
 529                        if (test_set_page_writeback(page))
 530                                BUG();
 531
 532                        *_count -= thp_nr_pages(page);
 533                        unlock_page(page);
 534                }
 535
 536                pagevec_release(&pvec);
 537                cond_resched();
 538        } while (!stop);
 539
 540        *_len = len;
 541}
 542
 543/*
 544 * Synchronously write back the locked page and any subsequent non-locked dirty
 545 * pages.
 546 */
 547static ssize_t afs_write_back_from_locked_page(struct address_space *mapping,
 548                                               struct writeback_control *wbc,
 549                                               struct page *page,
 550                                               loff_t start, loff_t end)
 551{
 552        struct afs_vnode *vnode = AFS_FS_I(mapping->host);
 553        struct iov_iter iter;
 554        unsigned long priv;
 555        unsigned int offset, to, len, max_len;
 556        loff_t i_size = i_size_read(&vnode->vfs_inode);
 557        bool new_content = test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags);
 558        long count = wbc->nr_to_write;
 559        int ret;
 560
 561        _enter(",%lx,%llx-%llx", page->index, start, end);
 562
 563        if (test_set_page_writeback(page))
 564                BUG();
 565
 566        count -= thp_nr_pages(page);
 567
 568        /* Find all consecutive lockable dirty pages that have contiguous
 569         * written regions, stopping when we find a page that is not
 570         * immediately lockable, is not dirty or is missing, or we reach the
 571         * end of the range.
 572         */
 573        priv = page_private(page);
 574        offset = afs_page_dirty_from(page, priv);
 575        to = afs_page_dirty_to(page, priv);
 576        trace_afs_page_dirty(vnode, tracepoint_string("store"), page);
 577
 578        len = to - offset;
 579        start += offset;
 580        if (start < i_size) {
 581                /* Trim the write to the EOF; the extra data is ignored.  Also
 582                 * put an upper limit on the size of a single storedata op.
 583                 */
 584                max_len = 65536 * 4096;
 585                max_len = min_t(unsigned long long, max_len, end - start + 1);
 586                max_len = min_t(unsigned long long, max_len, i_size - start);
 587
 588                if (len < max_len &&
 589                    (to == thp_size(page) || new_content))
 590                        afs_extend_writeback(mapping, vnode, &count,
 591                                             start, max_len, new_content, &len);
 592                len = min_t(loff_t, len, max_len);
 593        }
 594
 595        /* We now have a contiguous set of dirty pages, each with writeback
 596         * set; the first page is still locked at this point, but all the rest
 597         * have been unlocked.
 598         */
 599        unlock_page(page);
 600
 601        if (start < i_size) {
 602                _debug("write back %x @%llx [%llx]", len, start, i_size);
 603
 604                iov_iter_xarray(&iter, WRITE, &mapping->i_pages, start, len);
 605                ret = afs_store_data(vnode, &iter, start, false);
 606        } else {
 607                _debug("write discard %x @%llx [%llx]", len, start, i_size);
 608
 609                /* The dirty region was entirely beyond the EOF. */
 610                afs_pages_written_back(vnode, start, len);
 611                ret = 0;
 612        }
 613
 614        switch (ret) {
 615        case 0:
 616                wbc->nr_to_write = count;
 617                ret = len;
 618                break;
 619
 620        default:
 621                pr_notice("kAFS: Unexpected error from FS.StoreData %d\n", ret);
 622                fallthrough;
 623        case -EACCES:
 624        case -EPERM:
 625        case -ENOKEY:
 626        case -EKEYEXPIRED:
 627        case -EKEYREJECTED:
 628        case -EKEYREVOKED:
 629                afs_redirty_pages(wbc, mapping, start, len);
 630                mapping_set_error(mapping, ret);
 631                break;
 632
 633        case -EDQUOT:
 634        case -ENOSPC:
 635                afs_redirty_pages(wbc, mapping, start, len);
 636                mapping_set_error(mapping, -ENOSPC);
 637                break;
 638
 639        case -EROFS:
 640        case -EIO:
 641        case -EREMOTEIO:
 642        case -EFBIG:
 643        case -ENOENT:
 644        case -ENOMEDIUM:
 645        case -ENXIO:
 646                trace_afs_file_error(vnode, ret, afs_file_error_writeback_fail);
 647                afs_kill_pages(mapping, start, len);
 648                mapping_set_error(mapping, ret);
 649                break;
 650        }
 651
 652        _leave(" = %d", ret);
 653        return ret;
 654}
 655
 656/*
 657 * write a page back to the server
 658 * - the caller locked the page for us
 659 */
 660int afs_writepage(struct page *page, struct writeback_control *wbc)
 661{
 662        ssize_t ret;
 663        loff_t start;
 664
 665        _enter("{%lx},", page->index);
 666
 667        start = page->index * PAGE_SIZE;
 668        ret = afs_write_back_from_locked_page(page->mapping, wbc, page,
 669                                              start, LLONG_MAX - start);
 670        if (ret < 0) {
 671                _leave(" = %zd", ret);
 672                return ret;
 673        }
 674
 675        _leave(" = 0");
 676        return 0;
 677}
 678
 679/*
 680 * write a region of pages back to the server
 681 */
 682static int afs_writepages_region(struct address_space *mapping,
 683                                 struct writeback_control *wbc,
 684                                 loff_t start, loff_t end, loff_t *_next)
 685{
 686        struct page *page;
 687        ssize_t ret;
 688        int n;
 689
 690        _enter("%llx,%llx,", start, end);
 691
 692        do {
 693                pgoff_t index = start / PAGE_SIZE;
 694
 695                n = find_get_pages_range_tag(mapping, &index, end / PAGE_SIZE,
 696                                             PAGECACHE_TAG_DIRTY, 1, &page);
 697                if (!n)
 698                        break;
 699
 700                start = (loff_t)page->index * PAGE_SIZE; /* May regress with THPs */
 701
 702                _debug("wback %lx", page->index);
 703
 704                /* At this point we hold neither the i_pages lock nor the
 705                 * page lock: the page may be truncated or invalidated
 706                 * (changing page->mapping to NULL), or even swizzled
 707                 * back from swapper_space to tmpfs file mapping
 708                 */
 709                if (wbc->sync_mode != WB_SYNC_NONE) {
 710                        ret = lock_page_killable(page);
 711                        if (ret < 0) {
 712                                put_page(page);
 713                                return ret;
 714                        }
 715                } else {
 716                        if (!trylock_page(page)) {
 717                                put_page(page);
 718                                return 0;
 719                        }
 720                }
 721
 722                if (page->mapping != mapping || !PageDirty(page)) {
 723                        start += thp_size(page);
 724                        unlock_page(page);
 725                        put_page(page);
 726                        continue;
 727                }
 728
 729                if (PageWriteback(page)) {
 730                        unlock_page(page);
 731                        if (wbc->sync_mode != WB_SYNC_NONE)
 732                                wait_on_page_writeback(page);
 733                        put_page(page);
 734                        continue;
 735                }
 736
 737                if (!clear_page_dirty_for_io(page))
 738                        BUG();
 739                ret = afs_write_back_from_locked_page(mapping, wbc, page, start, end);
 740                put_page(page);
 741                if (ret < 0) {
 742                        _leave(" = %zd", ret);
 743                        return ret;
 744                }
 745
 746                start += ret;
 747
 748                cond_resched();
 749        } while (wbc->nr_to_write > 0);
 750
 751        *_next = start;
 752        _leave(" = 0 [%llx]", *_next);
 753        return 0;
 754}
 755
 756/*
 757 * write some of the pending data back to the server
 758 */
 759int afs_writepages(struct address_space *mapping,
 760                   struct writeback_control *wbc)
 761{
 762        struct afs_vnode *vnode = AFS_FS_I(mapping->host);
 763        loff_t start, next;
 764        int ret;
 765
 766        _enter("");
 767
 768        /* We have to be careful as we can end up racing with setattr()
 769         * truncating the pagecache since the caller doesn't take a lock here
 770         * to prevent it.
 771         */
 772        if (wbc->sync_mode == WB_SYNC_ALL)
 773                down_read(&vnode->validate_lock);
 774        else if (!down_read_trylock(&vnode->validate_lock))
 775                return 0;
 776
 777        if (wbc->range_cyclic) {
 778                start = mapping->writeback_index * PAGE_SIZE;
 779                ret = afs_writepages_region(mapping, wbc, start, LLONG_MAX, &next);
 780                if (ret == 0) {
 781                        mapping->writeback_index = next / PAGE_SIZE;
 782                        if (start > 0 && wbc->nr_to_write > 0) {
 783                                ret = afs_writepages_region(mapping, wbc, 0,
 784                                                            start, &next);
 785                                if (ret == 0)
 786                                        mapping->writeback_index =
 787                                                next / PAGE_SIZE;
 788                        }
 789                }
 790        } else if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) {
 791                ret = afs_writepages_region(mapping, wbc, 0, LLONG_MAX, &next);
 792                if (wbc->nr_to_write > 0 && ret == 0)
 793                        mapping->writeback_index = next / PAGE_SIZE;
 794        } else {
 795                ret = afs_writepages_region(mapping, wbc,
 796                                            wbc->range_start, wbc->range_end, &next);
 797        }
 798
 799        up_read(&vnode->validate_lock);
 800        _leave(" = %d", ret);
 801        return ret;
 802}
 803
 804/*
 805 * write to an AFS file
 806 */
 807ssize_t afs_file_write(struct kiocb *iocb, struct iov_iter *from)
 808{
 809        struct afs_vnode *vnode = AFS_FS_I(file_inode(iocb->ki_filp));
 810        struct afs_file *af = iocb->ki_filp->private_data;
 811        ssize_t result;
 812        size_t count = iov_iter_count(from);
 813
 814        _enter("{%llx:%llu},{%zu},",
 815               vnode->fid.vid, vnode->fid.vnode, count);
 816
 817        if (IS_SWAPFILE(&vnode->vfs_inode)) {
 818                printk(KERN_INFO
 819                       "AFS: Attempt to write to active swap file!\n");
 820                return -EBUSY;
 821        }
 822
 823        if (!count)
 824                return 0;
 825
 826        result = afs_validate(vnode, af->key);
 827        if (result < 0)
 828                return result;
 829
 830        result = generic_file_write_iter(iocb, from);
 831
 832        _leave(" = %zd", result);
 833        return result;
 834}
 835
 836/*
 837 * flush any dirty pages for this process, and check for write errors.
 838 * - the return status from this call provides a reliable indication of
 839 *   whether any write errors occurred for this process.
 840 */
 841int afs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
 842{
 843        struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
 844        struct afs_file *af = file->private_data;
 845        int ret;
 846
 847        _enter("{%llx:%llu},{n=%pD},%d",
 848               vnode->fid.vid, vnode->fid.vnode, file,
 849               datasync);
 850
 851        ret = afs_validate(vnode, af->key);
 852        if (ret < 0)
 853                return ret;
 854
 855        return file_write_and_wait_range(file, start, end);
 856}
 857
 858/*
 859 * notification that a previously read-only page is about to become writable
 860 * - if it returns an error, the caller will deliver a bus error signal
 861 */
 862vm_fault_t afs_page_mkwrite(struct vm_fault *vmf)
 863{
 864        struct page *page = thp_head(vmf->page);
 865        struct file *file = vmf->vma->vm_file;
 866        struct inode *inode = file_inode(file);
 867        struct afs_vnode *vnode = AFS_FS_I(inode);
 868        struct afs_file *af = file->private_data;
 869        unsigned long priv;
 870        vm_fault_t ret = VM_FAULT_RETRY;
 871
 872        _enter("{{%llx:%llu}},{%lx}", vnode->fid.vid, vnode->fid.vnode, page->index);
 873
 874        afs_validate(vnode, af->key);
 875
 876        sb_start_pagefault(inode->i_sb);
 877
 878        /* Wait for the page to be written to the cache before we allow it to
 879         * be modified.  We then assume the entire page will need writing back.
 880         */
 881#ifdef CONFIG_AFS_FSCACHE
 882        if (PageFsCache(page) &&
 883            wait_on_page_fscache_killable(page) < 0)
 884                goto out;
 885#endif
 886
 887        if (wait_on_page_writeback_killable(page))
 888                goto out;
 889
 890        if (lock_page_killable(page) < 0)
 891                goto out;
 892
 893        /* We mustn't change page->private until writeback is complete as that
 894         * details the portion of the page we need to write back and we might
 895         * need to redirty the page if there's a problem.
 896         */
 897        if (wait_on_page_writeback_killable(page) < 0) {
 898                unlock_page(page);
 899                goto out;
 900        }
 901
 902        priv = afs_page_dirty(page, 0, thp_size(page));
 903        priv = afs_page_dirty_mmapped(priv);
 904        if (PagePrivate(page)) {
 905                set_page_private(page, priv);
 906                trace_afs_page_dirty(vnode, tracepoint_string("mkwrite+"), page);
 907        } else {
 908                attach_page_private(page, (void *)priv);
 909                trace_afs_page_dirty(vnode, tracepoint_string("mkwrite"), page);
 910        }
 911        file_update_time(file);
 912
 913        ret = VM_FAULT_LOCKED;
 914out:
 915        sb_end_pagefault(inode->i_sb);
 916        return ret;
 917}
 918
 919/*
 920 * Prune the keys cached for writeback.  The caller must hold vnode->wb_lock.
 921 */
 922void afs_prune_wb_keys(struct afs_vnode *vnode)
 923{
 924        LIST_HEAD(graveyard);
 925        struct afs_wb_key *wbk, *tmp;
 926
 927        /* Discard unused keys */
 928        spin_lock(&vnode->wb_lock);
 929
 930        if (!mapping_tagged(&vnode->vfs_inode.i_data, PAGECACHE_TAG_WRITEBACK) &&
 931            !mapping_tagged(&vnode->vfs_inode.i_data, PAGECACHE_TAG_DIRTY)) {
 932                list_for_each_entry_safe(wbk, tmp, &vnode->wb_keys, vnode_link) {
 933                        if (refcount_read(&wbk->usage) == 1)
 934                                list_move(&wbk->vnode_link, &graveyard);
 935                }
 936        }
 937
 938        spin_unlock(&vnode->wb_lock);
 939
 940        while (!list_empty(&graveyard)) {
 941                wbk = list_entry(graveyard.next, struct afs_wb_key, vnode_link);
 942                list_del(&wbk->vnode_link);
 943                afs_put_wb_key(wbk);
 944        }
 945}
 946
 947/*
 948 * Clean up a page during invalidation.
 949 */
 950int afs_launder_page(struct page *page)
 951{
 952        struct address_space *mapping = page->mapping;
 953        struct afs_vnode *vnode = AFS_FS_I(mapping->host);
 954        struct iov_iter iter;
 955        struct bio_vec bv[1];
 956        unsigned long priv;
 957        unsigned int f, t;
 958        int ret = 0;
 959
 960        _enter("{%lx}", page->index);
 961
 962        priv = page_private(page);
 963        if (clear_page_dirty_for_io(page)) {
 964                f = 0;
 965                t = thp_size(page);
 966                if (PagePrivate(page)) {
 967                        f = afs_page_dirty_from(page, priv);
 968                        t = afs_page_dirty_to(page, priv);
 969                }
 970
 971                bv[0].bv_page = page;
 972                bv[0].bv_offset = f;
 973                bv[0].bv_len = t - f;
 974                iov_iter_bvec(&iter, WRITE, bv, 1, bv[0].bv_len);
 975
 976                trace_afs_page_dirty(vnode, tracepoint_string("launder"), page);
 977                ret = afs_store_data(vnode, &iter, page_offset(page) + f, true);
 978        }
 979
 980        trace_afs_page_dirty(vnode, tracepoint_string("laundered"), page);
 981        detach_page_private(page);
 982        wait_on_page_fscache(page);
 983        return ret;
 984}
 985