linux/fs/netfs/io.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/* Network filesystem high-level read support.
   3 *
   4 * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
   5 * Written by David Howells (dhowells@redhat.com)
   6 */
   7
   8#include <linux/module.h>
   9#include <linux/export.h>
  10#include <linux/fs.h>
  11#include <linux/mm.h>
  12#include <linux/pagemap.h>
  13#include <linux/slab.h>
  14#include <linux/uio.h>
  15#include <linux/sched/mm.h>
  16#include <linux/task_io_accounting_ops.h>
  17#include "internal.h"
  18
  19/*
  20 * Clear the unread part of an I/O request.
  21 */
  22static void netfs_clear_unread(struct netfs_io_subrequest *subreq)
  23{
  24        struct iov_iter iter;
  25
  26        iov_iter_xarray(&iter, READ, &subreq->rreq->mapping->i_pages,
  27                        subreq->start + subreq->transferred,
  28                        subreq->len   - subreq->transferred);
  29        iov_iter_zero(iov_iter_count(&iter), &iter);
  30}
  31
  32static void netfs_cache_read_terminated(void *priv, ssize_t transferred_or_error,
  33                                        bool was_async)
  34{
  35        struct netfs_io_subrequest *subreq = priv;
  36
  37        netfs_subreq_terminated(subreq, transferred_or_error, was_async);
  38}
  39
  40/*
  41 * Issue a read against the cache.
  42 * - Eats the caller's ref on subreq.
  43 */
  44static void netfs_read_from_cache(struct netfs_io_request *rreq,
  45                                  struct netfs_io_subrequest *subreq,
  46                                  enum netfs_read_from_hole read_hole)
  47{
  48        struct netfs_cache_resources *cres = &rreq->cache_resources;
  49        struct iov_iter iter;
  50
  51        netfs_stat(&netfs_n_rh_read);
  52        iov_iter_xarray(&iter, READ, &rreq->mapping->i_pages,
  53                        subreq->start + subreq->transferred,
  54                        subreq->len   - subreq->transferred);
  55
  56        cres->ops->read(cres, subreq->start, &iter, read_hole,
  57                        netfs_cache_read_terminated, subreq);
  58}
  59
  60/*
  61 * Fill a subrequest region with zeroes.
  62 */
  63static void netfs_fill_with_zeroes(struct netfs_io_request *rreq,
  64                                   struct netfs_io_subrequest *subreq)
  65{
  66        netfs_stat(&netfs_n_rh_zero);
  67        __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
  68        netfs_subreq_terminated(subreq, 0, false);
  69}
  70
  71/*
  72 * Ask the netfs to issue a read request to the server for us.
  73 *
  74 * The netfs is expected to read from subreq->pos + subreq->transferred to
  75 * subreq->pos + subreq->len - 1.  It may not backtrack and write data into the
  76 * buffer prior to the transferred point as it might clobber dirty data
  77 * obtained from the cache.
  78 *
  79 * Alternatively, the netfs is allowed to indicate one of two things:
  80 *
  81 * - NETFS_SREQ_SHORT_READ: A short read - it will get called again to try and
  82 *   make progress.
  83 *
  84 * - NETFS_SREQ_CLEAR_TAIL: A short read - the rest of the buffer will be
  85 *   cleared.
  86 */
  87static void netfs_read_from_server(struct netfs_io_request *rreq,
  88                                   struct netfs_io_subrequest *subreq)
  89{
  90        netfs_stat(&netfs_n_rh_download);
  91        rreq->netfs_ops->issue_read(subreq);
  92}
  93
  94/*
  95 * Release those waiting.
  96 */
  97static void netfs_rreq_completed(struct netfs_io_request *rreq, bool was_async)
  98{
  99        trace_netfs_rreq(rreq, netfs_rreq_trace_done);
 100        netfs_clear_subrequests(rreq, was_async);
 101        netfs_put_request(rreq, was_async, netfs_rreq_trace_put_complete);
 102}
 103
 104/*
 105 * Deal with the completion of writing the data to the cache.  We have to clear
 106 * the PG_fscache bits on the folios involved and release the caller's ref.
 107 *
 108 * May be called in softirq mode and we inherit a ref from the caller.
 109 */
 110static void netfs_rreq_unmark_after_write(struct netfs_io_request *rreq,
 111                                          bool was_async)
 112{
 113        struct netfs_io_subrequest *subreq;
 114        struct folio *folio;
 115        pgoff_t unlocked = 0;
 116        bool have_unlocked = false;
 117
 118        rcu_read_lock();
 119
 120        list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
 121                XA_STATE(xas, &rreq->mapping->i_pages, subreq->start / PAGE_SIZE);
 122
 123                xas_for_each(&xas, folio, (subreq->start + subreq->len - 1) / PAGE_SIZE) {
 124                        /* We might have multiple writes from the same huge
 125                         * folio, but we mustn't unlock a folio more than once.
 126                         */
 127                        if (have_unlocked && folio_index(folio) <= unlocked)
 128                                continue;
 129                        unlocked = folio_index(folio);
 130                        folio_end_fscache(folio);
 131                        have_unlocked = true;
 132                }
 133        }
 134
 135        rcu_read_unlock();
 136        netfs_rreq_completed(rreq, was_async);
 137}
 138
 139static void netfs_rreq_copy_terminated(void *priv, ssize_t transferred_or_error,
 140                                       bool was_async)
 141{
 142        struct netfs_io_subrequest *subreq = priv;
 143        struct netfs_io_request *rreq = subreq->rreq;
 144
 145        if (IS_ERR_VALUE(transferred_or_error)) {
 146                netfs_stat(&netfs_n_rh_write_failed);
 147                trace_netfs_failure(rreq, subreq, transferred_or_error,
 148                                    netfs_fail_copy_to_cache);
 149        } else {
 150                netfs_stat(&netfs_n_rh_write_done);
 151        }
 152
 153        trace_netfs_sreq(subreq, netfs_sreq_trace_write_term);
 154
 155        /* If we decrement nr_copy_ops to 0, the ref belongs to us. */
 156        if (atomic_dec_and_test(&rreq->nr_copy_ops))
 157                netfs_rreq_unmark_after_write(rreq, was_async);
 158
 159        netfs_put_subrequest(subreq, was_async, netfs_sreq_trace_put_terminated);
 160}
 161
 162/*
 163 * Perform any outstanding writes to the cache.  We inherit a ref from the
 164 * caller.
 165 */
 166static void netfs_rreq_do_write_to_cache(struct netfs_io_request *rreq)
 167{
 168        struct netfs_cache_resources *cres = &rreq->cache_resources;
 169        struct netfs_io_subrequest *subreq, *next, *p;
 170        struct iov_iter iter;
 171        int ret;
 172
 173        trace_netfs_rreq(rreq, netfs_rreq_trace_copy);
 174
 175        /* We don't want terminating writes trying to wake us up whilst we're
 176         * still going through the list.
 177         */
 178        atomic_inc(&rreq->nr_copy_ops);
 179
 180        list_for_each_entry_safe(subreq, p, &rreq->subrequests, rreq_link) {
 181                if (!test_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags)) {
 182                        list_del_init(&subreq->rreq_link);
 183                        netfs_put_subrequest(subreq, false,
 184                                             netfs_sreq_trace_put_no_copy);
 185                }
 186        }
 187
 188        list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
 189                /* Amalgamate adjacent writes */
 190                while (!list_is_last(&subreq->rreq_link, &rreq->subrequests)) {
 191                        next = list_next_entry(subreq, rreq_link);
 192                        if (next->start != subreq->start + subreq->len)
 193                                break;
 194                        subreq->len += next->len;
 195                        list_del_init(&next->rreq_link);
 196                        netfs_put_subrequest(next, false,
 197                                             netfs_sreq_trace_put_merged);
 198                }
 199
 200                ret = cres->ops->prepare_write(cres, &subreq->start, &subreq->len,
 201                                               rreq->i_size, true);
 202                if (ret < 0) {
 203                        trace_netfs_failure(rreq, subreq, ret, netfs_fail_prepare_write);
 204                        trace_netfs_sreq(subreq, netfs_sreq_trace_write_skip);
 205                        continue;
 206                }
 207
 208                iov_iter_xarray(&iter, WRITE, &rreq->mapping->i_pages,
 209                                subreq->start, subreq->len);
 210
 211                atomic_inc(&rreq->nr_copy_ops);
 212                netfs_stat(&netfs_n_rh_write);
 213                netfs_get_subrequest(subreq, netfs_sreq_trace_get_copy_to_cache);
 214                trace_netfs_sreq(subreq, netfs_sreq_trace_write);
 215                cres->ops->write(cres, subreq->start, &iter,
 216                                 netfs_rreq_copy_terminated, subreq);
 217        }
 218
 219        /* If we decrement nr_copy_ops to 0, the usage ref belongs to us. */
 220        if (atomic_dec_and_test(&rreq->nr_copy_ops))
 221                netfs_rreq_unmark_after_write(rreq, false);
 222}
 223
 224static void netfs_rreq_write_to_cache_work(struct work_struct *work)
 225{
 226        struct netfs_io_request *rreq =
 227                container_of(work, struct netfs_io_request, work);
 228
 229        netfs_rreq_do_write_to_cache(rreq);
 230}
 231
 232static void netfs_rreq_write_to_cache(struct netfs_io_request *rreq)
 233{
 234        rreq->work.func = netfs_rreq_write_to_cache_work;
 235        if (!queue_work(system_unbound_wq, &rreq->work))
 236                BUG();
 237}
 238
 239/*
 240 * Handle a short read.
 241 */
 242static void netfs_rreq_short_read(struct netfs_io_request *rreq,
 243                                  struct netfs_io_subrequest *subreq)
 244{
 245        __clear_bit(NETFS_SREQ_SHORT_IO, &subreq->flags);
 246        __set_bit(NETFS_SREQ_SEEK_DATA_READ, &subreq->flags);
 247
 248        netfs_stat(&netfs_n_rh_short_read);
 249        trace_netfs_sreq(subreq, netfs_sreq_trace_resubmit_short);
 250
 251        netfs_get_subrequest(subreq, netfs_sreq_trace_get_short_read);
 252        atomic_inc(&rreq->nr_outstanding);
 253        if (subreq->source == NETFS_READ_FROM_CACHE)
 254                netfs_read_from_cache(rreq, subreq, NETFS_READ_HOLE_CLEAR);
 255        else
 256                netfs_read_from_server(rreq, subreq);
 257}
 258
 259/*
 260 * Resubmit any short or failed operations.  Returns true if we got the rreq
 261 * ref back.
 262 */
 263static bool netfs_rreq_perform_resubmissions(struct netfs_io_request *rreq)
 264{
 265        struct netfs_io_subrequest *subreq;
 266
 267        WARN_ON(in_interrupt());
 268
 269        trace_netfs_rreq(rreq, netfs_rreq_trace_resubmit);
 270
 271        /* We don't want terminating submissions trying to wake us up whilst
 272         * we're still going through the list.
 273         */
 274        atomic_inc(&rreq->nr_outstanding);
 275
 276        __clear_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags);
 277        list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
 278                if (subreq->error) {
 279                        if (subreq->source != NETFS_READ_FROM_CACHE)
 280                                break;
 281                        subreq->source = NETFS_DOWNLOAD_FROM_SERVER;
 282                        subreq->error = 0;
 283                        netfs_stat(&netfs_n_rh_download_instead);
 284                        trace_netfs_sreq(subreq, netfs_sreq_trace_download_instead);
 285                        netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit);
 286                        atomic_inc(&rreq->nr_outstanding);
 287                        netfs_read_from_server(rreq, subreq);
 288                } else if (test_bit(NETFS_SREQ_SHORT_IO, &subreq->flags)) {
 289                        netfs_rreq_short_read(rreq, subreq);
 290                }
 291        }
 292
 293        /* If we decrement nr_outstanding to 0, the usage ref belongs to us. */
 294        if (atomic_dec_and_test(&rreq->nr_outstanding))
 295                return true;
 296
 297        wake_up_var(&rreq->nr_outstanding);
 298        return false;
 299}
 300
 301/*
 302 * Check to see if the data read is still valid.
 303 */
 304static void netfs_rreq_is_still_valid(struct netfs_io_request *rreq)
 305{
 306        struct netfs_io_subrequest *subreq;
 307
 308        if (!rreq->netfs_ops->is_still_valid ||
 309            rreq->netfs_ops->is_still_valid(rreq))
 310                return;
 311
 312        list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
 313                if (subreq->source == NETFS_READ_FROM_CACHE) {
 314                        subreq->error = -ESTALE;
 315                        __set_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags);
 316                }
 317        }
 318}
 319
 320/*
 321 * Assess the state of a read request and decide what to do next.
 322 *
 323 * Note that we could be in an ordinary kernel thread, on a workqueue or in
 324 * softirq context at this point.  We inherit a ref from the caller.
 325 */
 326static void netfs_rreq_assess(struct netfs_io_request *rreq, bool was_async)
 327{
 328        trace_netfs_rreq(rreq, netfs_rreq_trace_assess);
 329
 330again:
 331        netfs_rreq_is_still_valid(rreq);
 332
 333        if (!test_bit(NETFS_RREQ_FAILED, &rreq->flags) &&
 334            test_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags)) {
 335                if (netfs_rreq_perform_resubmissions(rreq))
 336                        goto again;
 337                return;
 338        }
 339
 340        netfs_rreq_unlock_folios(rreq);
 341
 342        clear_bit_unlock(NETFS_RREQ_IN_PROGRESS, &rreq->flags);
 343        wake_up_bit(&rreq->flags, NETFS_RREQ_IN_PROGRESS);
 344
 345        if (test_bit(NETFS_RREQ_COPY_TO_CACHE, &rreq->flags))
 346                return netfs_rreq_write_to_cache(rreq);
 347
 348        netfs_rreq_completed(rreq, was_async);
 349}
 350
 351static void netfs_rreq_work(struct work_struct *work)
 352{
 353        struct netfs_io_request *rreq =
 354                container_of(work, struct netfs_io_request, work);
 355        netfs_rreq_assess(rreq, false);
 356}
 357
 358/*
 359 * Handle the completion of all outstanding I/O operations on a read request.
 360 * We inherit a ref from the caller.
 361 */
 362static void netfs_rreq_terminated(struct netfs_io_request *rreq,
 363                                  bool was_async)
 364{
 365        if (test_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags) &&
 366            was_async) {
 367                if (!queue_work(system_unbound_wq, &rreq->work))
 368                        BUG();
 369        } else {
 370                netfs_rreq_assess(rreq, was_async);
 371        }
 372}
 373
 374/**
 375 * netfs_subreq_terminated - Note the termination of an I/O operation.
 376 * @subreq: The I/O request that has terminated.
 377 * @transferred_or_error: The amount of data transferred or an error code.
 378 * @was_async: The termination was asynchronous
 379 *
 380 * This tells the read helper that a contributory I/O operation has terminated,
 381 * one way or another, and that it should integrate the results.
 382 *
 383 * The caller indicates in @transferred_or_error the outcome of the operation,
 384 * supplying a positive value to indicate the number of bytes transferred, 0 to
 385 * indicate a failure to transfer anything that should be retried or a negative
 386 * error code.  The helper will look after reissuing I/O operations as
 387 * appropriate and writing downloaded data to the cache.
 388 *
 389 * If @was_async is true, the caller might be running in softirq or interrupt
 390 * context and we can't sleep.
 391 */
 392void netfs_subreq_terminated(struct netfs_io_subrequest *subreq,
 393                             ssize_t transferred_or_error,
 394                             bool was_async)
 395{
 396        struct netfs_io_request *rreq = subreq->rreq;
 397        int u;
 398
 399        _enter("[%u]{%llx,%lx},%zd",
 400               subreq->debug_index, subreq->start, subreq->flags,
 401               transferred_or_error);
 402
 403        switch (subreq->source) {
 404        case NETFS_READ_FROM_CACHE:
 405                netfs_stat(&netfs_n_rh_read_done);
 406                break;
 407        case NETFS_DOWNLOAD_FROM_SERVER:
 408                netfs_stat(&netfs_n_rh_download_done);
 409                break;
 410        default:
 411                break;
 412        }
 413
 414        if (IS_ERR_VALUE(transferred_or_error)) {
 415                subreq->error = transferred_or_error;
 416                trace_netfs_failure(rreq, subreq, transferred_or_error,
 417                                    netfs_fail_read);
 418                goto failed;
 419        }
 420
 421        if (WARN(transferred_or_error > subreq->len - subreq->transferred,
 422                 "Subreq overread: R%x[%x] %zd > %zu - %zu",
 423                 rreq->debug_id, subreq->debug_index,
 424                 transferred_or_error, subreq->len, subreq->transferred))
 425                transferred_or_error = subreq->len - subreq->transferred;
 426
 427        subreq->error = 0;
 428        subreq->transferred += transferred_or_error;
 429        if (subreq->transferred < subreq->len)
 430                goto incomplete;
 431
 432complete:
 433        __clear_bit(NETFS_SREQ_NO_PROGRESS, &subreq->flags);
 434        if (test_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags))
 435                set_bit(NETFS_RREQ_COPY_TO_CACHE, &rreq->flags);
 436
 437out:
 438        trace_netfs_sreq(subreq, netfs_sreq_trace_terminated);
 439
 440        /* If we decrement nr_outstanding to 0, the ref belongs to us. */
 441        u = atomic_dec_return(&rreq->nr_outstanding);
 442        if (u == 0)
 443                netfs_rreq_terminated(rreq, was_async);
 444        else if (u == 1)
 445                wake_up_var(&rreq->nr_outstanding);
 446
 447        netfs_put_subrequest(subreq, was_async, netfs_sreq_trace_put_terminated);
 448        return;
 449
 450incomplete:
 451        if (test_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags)) {
 452                netfs_clear_unread(subreq);
 453                subreq->transferred = subreq->len;
 454                goto complete;
 455        }
 456
 457        if (transferred_or_error == 0) {
 458                if (__test_and_set_bit(NETFS_SREQ_NO_PROGRESS, &subreq->flags)) {
 459                        subreq->error = -ENODATA;
 460                        goto failed;
 461                }
 462        } else {
 463                __clear_bit(NETFS_SREQ_NO_PROGRESS, &subreq->flags);
 464        }
 465
 466        __set_bit(NETFS_SREQ_SHORT_IO, &subreq->flags);
 467        set_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags);
 468        goto out;
 469
 470failed:
 471        if (subreq->source == NETFS_READ_FROM_CACHE) {
 472                netfs_stat(&netfs_n_rh_read_failed);
 473                set_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags);
 474        } else {
 475                netfs_stat(&netfs_n_rh_download_failed);
 476                set_bit(NETFS_RREQ_FAILED, &rreq->flags);
 477                rreq->error = subreq->error;
 478        }
 479        goto out;
 480}
 481EXPORT_SYMBOL(netfs_subreq_terminated);
 482
 483static enum netfs_io_source netfs_cache_prepare_read(struct netfs_io_subrequest *subreq,
 484                                                       loff_t i_size)
 485{
 486        struct netfs_io_request *rreq = subreq->rreq;
 487        struct netfs_cache_resources *cres = &rreq->cache_resources;
 488
 489        if (cres->ops)
 490                return cres->ops->prepare_read(subreq, i_size);
 491        if (subreq->start >= rreq->i_size)
 492                return NETFS_FILL_WITH_ZEROES;
 493        return NETFS_DOWNLOAD_FROM_SERVER;
 494}
 495
 496/*
 497 * Work out what sort of subrequest the next one will be.
 498 */
 499static enum netfs_io_source
 500netfs_rreq_prepare_read(struct netfs_io_request *rreq,
 501                        struct netfs_io_subrequest *subreq)
 502{
 503        enum netfs_io_source source;
 504
 505        _enter("%llx-%llx,%llx", subreq->start, subreq->start + subreq->len, rreq->i_size);
 506
 507        source = netfs_cache_prepare_read(subreq, rreq->i_size);
 508        if (source == NETFS_INVALID_READ)
 509                goto out;
 510
 511        if (source == NETFS_DOWNLOAD_FROM_SERVER) {
 512                /* Call out to the netfs to let it shrink the request to fit
 513                 * its own I/O sizes and boundaries.  If it shinks it here, it
 514                 * will be called again to make simultaneous calls; if it wants
 515                 * to make serial calls, it can indicate a short read and then
 516                 * we will call it again.
 517                 */
 518                if (subreq->len > rreq->i_size - subreq->start)
 519                        subreq->len = rreq->i_size - subreq->start;
 520
 521                if (rreq->netfs_ops->clamp_length &&
 522                    !rreq->netfs_ops->clamp_length(subreq)) {
 523                        source = NETFS_INVALID_READ;
 524                        goto out;
 525                }
 526        }
 527
 528        if (WARN_ON(subreq->len == 0))
 529                source = NETFS_INVALID_READ;
 530
 531out:
 532        subreq->source = source;
 533        trace_netfs_sreq(subreq, netfs_sreq_trace_prepare);
 534        return source;
 535}
 536
 537/*
 538 * Slice off a piece of a read request and submit an I/O request for it.
 539 */
 540static bool netfs_rreq_submit_slice(struct netfs_io_request *rreq,
 541                                    unsigned int *_debug_index)
 542{
 543        struct netfs_io_subrequest *subreq;
 544        enum netfs_io_source source;
 545
 546        subreq = netfs_alloc_subrequest(rreq);
 547        if (!subreq)
 548                return false;
 549
 550        subreq->debug_index     = (*_debug_index)++;
 551        subreq->start           = rreq->start + rreq->submitted;
 552        subreq->len             = rreq->len   - rreq->submitted;
 553
 554        _debug("slice %llx,%zx,%zx", subreq->start, subreq->len, rreq->submitted);
 555        list_add_tail(&subreq->rreq_link, &rreq->subrequests);
 556
 557        /* Call out to the cache to find out what it can do with the remaining
 558         * subset.  It tells us in subreq->flags what it decided should be done
 559         * and adjusts subreq->len down if the subset crosses a cache boundary.
 560         *
 561         * Then when we hand the subset, it can choose to take a subset of that
 562         * (the starts must coincide), in which case, we go around the loop
 563         * again and ask it to download the next piece.
 564         */
 565        source = netfs_rreq_prepare_read(rreq, subreq);
 566        if (source == NETFS_INVALID_READ)
 567                goto subreq_failed;
 568
 569        atomic_inc(&rreq->nr_outstanding);
 570
 571        rreq->submitted += subreq->len;
 572
 573        trace_netfs_sreq(subreq, netfs_sreq_trace_submit);
 574        switch (source) {
 575        case NETFS_FILL_WITH_ZEROES:
 576                netfs_fill_with_zeroes(rreq, subreq);
 577                break;
 578        case NETFS_DOWNLOAD_FROM_SERVER:
 579                netfs_read_from_server(rreq, subreq);
 580                break;
 581        case NETFS_READ_FROM_CACHE:
 582                netfs_read_from_cache(rreq, subreq, NETFS_READ_HOLE_IGNORE);
 583                break;
 584        default:
 585                BUG();
 586        }
 587
 588        return true;
 589
 590subreq_failed:
 591        rreq->error = subreq->error;
 592        netfs_put_subrequest(subreq, false, netfs_sreq_trace_put_failed);
 593        return false;
 594}
 595
 596/*
 597 * Begin the process of reading in a chunk of data, where that data may be
 598 * stitched together from multiple sources, including multiple servers and the
 599 * local cache.
 600 */
 601int netfs_begin_read(struct netfs_io_request *rreq, bool sync)
 602{
 603        unsigned int debug_index = 0;
 604        int ret;
 605
 606        _enter("R=%x %llx-%llx",
 607               rreq->debug_id, rreq->start, rreq->start + rreq->len - 1);
 608
 609        if (rreq->len == 0) {
 610                pr_err("Zero-sized read [R=%x]\n", rreq->debug_id);
 611                netfs_put_request(rreq, false, netfs_rreq_trace_put_zero_len);
 612                return -EIO;
 613        }
 614
 615        INIT_WORK(&rreq->work, netfs_rreq_work);
 616
 617        if (sync)
 618                netfs_get_request(rreq, netfs_rreq_trace_get_hold);
 619
 620        /* Chop the read into slices according to what the cache and the netfs
 621         * want and submit each one.
 622         */
 623        atomic_set(&rreq->nr_outstanding, 1);
 624        do {
 625                if (!netfs_rreq_submit_slice(rreq, &debug_index))
 626                        break;
 627
 628        } while (rreq->submitted < rreq->len);
 629
 630        if (sync) {
 631                /* Keep nr_outstanding incremented so that the ref always belongs to
 632                 * us, and the service code isn't punted off to a random thread pool to
 633                 * process.
 634                 */
 635                for (;;) {
 636                        wait_var_event(&rreq->nr_outstanding,
 637                                       atomic_read(&rreq->nr_outstanding) == 1);
 638                        netfs_rreq_assess(rreq, false);
 639                        if (!test_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags))
 640                                break;
 641                        cond_resched();
 642                }
 643
 644                ret = rreq->error;
 645                if (ret == 0 && rreq->submitted < rreq->len) {
 646                        trace_netfs_failure(rreq, NULL, ret, netfs_fail_short_read);
 647                        ret = -EIO;
 648                }
 649                netfs_put_request(rreq, false, netfs_rreq_trace_put_hold);
 650        } else {
 651                /* If we decrement nr_outstanding to 0, the ref belongs to us. */
 652                if (atomic_dec_and_test(&rreq->nr_outstanding))
 653                        netfs_rreq_assess(rreq, false);
 654                ret = 0;
 655        }
 656        return ret;
 657}
 658