1
2
3
4
5
6
7
8#include <linux/module.h>
9#include <linux/export.h>
10#include <linux/fs.h>
11#include <linux/mm.h>
12#include <linux/pagemap.h>
13#include <linux/slab.h>
14#include <linux/uio.h>
15#include <linux/sched/mm.h>
16#include <linux/task_io_accounting_ops.h>
17#include "internal.h"
18
19
20
21
22static void netfs_clear_unread(struct netfs_io_subrequest *subreq)
23{
24 struct iov_iter iter;
25
26 iov_iter_xarray(&iter, READ, &subreq->rreq->mapping->i_pages,
27 subreq->start + subreq->transferred,
28 subreq->len - subreq->transferred);
29 iov_iter_zero(iov_iter_count(&iter), &iter);
30}
31
32static void netfs_cache_read_terminated(void *priv, ssize_t transferred_or_error,
33 bool was_async)
34{
35 struct netfs_io_subrequest *subreq = priv;
36
37 netfs_subreq_terminated(subreq, transferred_or_error, was_async);
38}
39
40
41
42
43
44static void netfs_read_from_cache(struct netfs_io_request *rreq,
45 struct netfs_io_subrequest *subreq,
46 enum netfs_read_from_hole read_hole)
47{
48 struct netfs_cache_resources *cres = &rreq->cache_resources;
49 struct iov_iter iter;
50
51 netfs_stat(&netfs_n_rh_read);
52 iov_iter_xarray(&iter, READ, &rreq->mapping->i_pages,
53 subreq->start + subreq->transferred,
54 subreq->len - subreq->transferred);
55
56 cres->ops->read(cres, subreq->start, &iter, read_hole,
57 netfs_cache_read_terminated, subreq);
58}
59
60
61
62
63static void netfs_fill_with_zeroes(struct netfs_io_request *rreq,
64 struct netfs_io_subrequest *subreq)
65{
66 netfs_stat(&netfs_n_rh_zero);
67 __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
68 netfs_subreq_terminated(subreq, 0, false);
69}
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87static void netfs_read_from_server(struct netfs_io_request *rreq,
88 struct netfs_io_subrequest *subreq)
89{
90 netfs_stat(&netfs_n_rh_download);
91 rreq->netfs_ops->issue_read(subreq);
92}
93
94
95
96
97static void netfs_rreq_completed(struct netfs_io_request *rreq, bool was_async)
98{
99 trace_netfs_rreq(rreq, netfs_rreq_trace_done);
100 netfs_clear_subrequests(rreq, was_async);
101 netfs_put_request(rreq, was_async, netfs_rreq_trace_put_complete);
102}
103
104
105
106
107
108
109
110static void netfs_rreq_unmark_after_write(struct netfs_io_request *rreq,
111 bool was_async)
112{
113 struct netfs_io_subrequest *subreq;
114 struct folio *folio;
115 pgoff_t unlocked = 0;
116 bool have_unlocked = false;
117
118 rcu_read_lock();
119
120 list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
121 XA_STATE(xas, &rreq->mapping->i_pages, subreq->start / PAGE_SIZE);
122
123 xas_for_each(&xas, folio, (subreq->start + subreq->len - 1) / PAGE_SIZE) {
124
125
126
127 if (have_unlocked && folio_index(folio) <= unlocked)
128 continue;
129 unlocked = folio_index(folio);
130 folio_end_fscache(folio);
131 have_unlocked = true;
132 }
133 }
134
135 rcu_read_unlock();
136 netfs_rreq_completed(rreq, was_async);
137}
138
139static void netfs_rreq_copy_terminated(void *priv, ssize_t transferred_or_error,
140 bool was_async)
141{
142 struct netfs_io_subrequest *subreq = priv;
143 struct netfs_io_request *rreq = subreq->rreq;
144
145 if (IS_ERR_VALUE(transferred_or_error)) {
146 netfs_stat(&netfs_n_rh_write_failed);
147 trace_netfs_failure(rreq, subreq, transferred_or_error,
148 netfs_fail_copy_to_cache);
149 } else {
150 netfs_stat(&netfs_n_rh_write_done);
151 }
152
153 trace_netfs_sreq(subreq, netfs_sreq_trace_write_term);
154
155
156 if (atomic_dec_and_test(&rreq->nr_copy_ops))
157 netfs_rreq_unmark_after_write(rreq, was_async);
158
159 netfs_put_subrequest(subreq, was_async, netfs_sreq_trace_put_terminated);
160}
161
162
163
164
165
166static void netfs_rreq_do_write_to_cache(struct netfs_io_request *rreq)
167{
168 struct netfs_cache_resources *cres = &rreq->cache_resources;
169 struct netfs_io_subrequest *subreq, *next, *p;
170 struct iov_iter iter;
171 int ret;
172
173 trace_netfs_rreq(rreq, netfs_rreq_trace_copy);
174
175
176
177
178 atomic_inc(&rreq->nr_copy_ops);
179
180 list_for_each_entry_safe(subreq, p, &rreq->subrequests, rreq_link) {
181 if (!test_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags)) {
182 list_del_init(&subreq->rreq_link);
183 netfs_put_subrequest(subreq, false,
184 netfs_sreq_trace_put_no_copy);
185 }
186 }
187
188 list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
189
190 while (!list_is_last(&subreq->rreq_link, &rreq->subrequests)) {
191 next = list_next_entry(subreq, rreq_link);
192 if (next->start != subreq->start + subreq->len)
193 break;
194 subreq->len += next->len;
195 list_del_init(&next->rreq_link);
196 netfs_put_subrequest(next, false,
197 netfs_sreq_trace_put_merged);
198 }
199
200 ret = cres->ops->prepare_write(cres, &subreq->start, &subreq->len,
201 rreq->i_size, true);
202 if (ret < 0) {
203 trace_netfs_failure(rreq, subreq, ret, netfs_fail_prepare_write);
204 trace_netfs_sreq(subreq, netfs_sreq_trace_write_skip);
205 continue;
206 }
207
208 iov_iter_xarray(&iter, WRITE, &rreq->mapping->i_pages,
209 subreq->start, subreq->len);
210
211 atomic_inc(&rreq->nr_copy_ops);
212 netfs_stat(&netfs_n_rh_write);
213 netfs_get_subrequest(subreq, netfs_sreq_trace_get_copy_to_cache);
214 trace_netfs_sreq(subreq, netfs_sreq_trace_write);
215 cres->ops->write(cres, subreq->start, &iter,
216 netfs_rreq_copy_terminated, subreq);
217 }
218
219
220 if (atomic_dec_and_test(&rreq->nr_copy_ops))
221 netfs_rreq_unmark_after_write(rreq, false);
222}
223
224static void netfs_rreq_write_to_cache_work(struct work_struct *work)
225{
226 struct netfs_io_request *rreq =
227 container_of(work, struct netfs_io_request, work);
228
229 netfs_rreq_do_write_to_cache(rreq);
230}
231
232static void netfs_rreq_write_to_cache(struct netfs_io_request *rreq)
233{
234 rreq->work.func = netfs_rreq_write_to_cache_work;
235 if (!queue_work(system_unbound_wq, &rreq->work))
236 BUG();
237}
238
239
240
241
242static void netfs_rreq_short_read(struct netfs_io_request *rreq,
243 struct netfs_io_subrequest *subreq)
244{
245 __clear_bit(NETFS_SREQ_SHORT_IO, &subreq->flags);
246 __set_bit(NETFS_SREQ_SEEK_DATA_READ, &subreq->flags);
247
248 netfs_stat(&netfs_n_rh_short_read);
249 trace_netfs_sreq(subreq, netfs_sreq_trace_resubmit_short);
250
251 netfs_get_subrequest(subreq, netfs_sreq_trace_get_short_read);
252 atomic_inc(&rreq->nr_outstanding);
253 if (subreq->source == NETFS_READ_FROM_CACHE)
254 netfs_read_from_cache(rreq, subreq, NETFS_READ_HOLE_CLEAR);
255 else
256 netfs_read_from_server(rreq, subreq);
257}
258
259
260
261
262
263static bool netfs_rreq_perform_resubmissions(struct netfs_io_request *rreq)
264{
265 struct netfs_io_subrequest *subreq;
266
267 WARN_ON(in_interrupt());
268
269 trace_netfs_rreq(rreq, netfs_rreq_trace_resubmit);
270
271
272
273
274 atomic_inc(&rreq->nr_outstanding);
275
276 __clear_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags);
277 list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
278 if (subreq->error) {
279 if (subreq->source != NETFS_READ_FROM_CACHE)
280 break;
281 subreq->source = NETFS_DOWNLOAD_FROM_SERVER;
282 subreq->error = 0;
283 netfs_stat(&netfs_n_rh_download_instead);
284 trace_netfs_sreq(subreq, netfs_sreq_trace_download_instead);
285 netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit);
286 atomic_inc(&rreq->nr_outstanding);
287 netfs_read_from_server(rreq, subreq);
288 } else if (test_bit(NETFS_SREQ_SHORT_IO, &subreq->flags)) {
289 netfs_rreq_short_read(rreq, subreq);
290 }
291 }
292
293
294 if (atomic_dec_and_test(&rreq->nr_outstanding))
295 return true;
296
297 wake_up_var(&rreq->nr_outstanding);
298 return false;
299}
300
301
302
303
304static void netfs_rreq_is_still_valid(struct netfs_io_request *rreq)
305{
306 struct netfs_io_subrequest *subreq;
307
308 if (!rreq->netfs_ops->is_still_valid ||
309 rreq->netfs_ops->is_still_valid(rreq))
310 return;
311
312 list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
313 if (subreq->source == NETFS_READ_FROM_CACHE) {
314 subreq->error = -ESTALE;
315 __set_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags);
316 }
317 }
318}
319
320
321
322
323
324
325
326static void netfs_rreq_assess(struct netfs_io_request *rreq, bool was_async)
327{
328 trace_netfs_rreq(rreq, netfs_rreq_trace_assess);
329
330again:
331 netfs_rreq_is_still_valid(rreq);
332
333 if (!test_bit(NETFS_RREQ_FAILED, &rreq->flags) &&
334 test_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags)) {
335 if (netfs_rreq_perform_resubmissions(rreq))
336 goto again;
337 return;
338 }
339
340 netfs_rreq_unlock_folios(rreq);
341
342 clear_bit_unlock(NETFS_RREQ_IN_PROGRESS, &rreq->flags);
343 wake_up_bit(&rreq->flags, NETFS_RREQ_IN_PROGRESS);
344
345 if (test_bit(NETFS_RREQ_COPY_TO_CACHE, &rreq->flags))
346 return netfs_rreq_write_to_cache(rreq);
347
348 netfs_rreq_completed(rreq, was_async);
349}
350
351static void netfs_rreq_work(struct work_struct *work)
352{
353 struct netfs_io_request *rreq =
354 container_of(work, struct netfs_io_request, work);
355 netfs_rreq_assess(rreq, false);
356}
357
358
359
360
361
362static void netfs_rreq_terminated(struct netfs_io_request *rreq,
363 bool was_async)
364{
365 if (test_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags) &&
366 was_async) {
367 if (!queue_work(system_unbound_wq, &rreq->work))
368 BUG();
369 } else {
370 netfs_rreq_assess(rreq, was_async);
371 }
372}
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392void netfs_subreq_terminated(struct netfs_io_subrequest *subreq,
393 ssize_t transferred_or_error,
394 bool was_async)
395{
396 struct netfs_io_request *rreq = subreq->rreq;
397 int u;
398
399 _enter("[%u]{%llx,%lx},%zd",
400 subreq->debug_index, subreq->start, subreq->flags,
401 transferred_or_error);
402
403 switch (subreq->source) {
404 case NETFS_READ_FROM_CACHE:
405 netfs_stat(&netfs_n_rh_read_done);
406 break;
407 case NETFS_DOWNLOAD_FROM_SERVER:
408 netfs_stat(&netfs_n_rh_download_done);
409 break;
410 default:
411 break;
412 }
413
414 if (IS_ERR_VALUE(transferred_or_error)) {
415 subreq->error = transferred_or_error;
416 trace_netfs_failure(rreq, subreq, transferred_or_error,
417 netfs_fail_read);
418 goto failed;
419 }
420
421 if (WARN(transferred_or_error > subreq->len - subreq->transferred,
422 "Subreq overread: R%x[%x] %zd > %zu - %zu",
423 rreq->debug_id, subreq->debug_index,
424 transferred_or_error, subreq->len, subreq->transferred))
425 transferred_or_error = subreq->len - subreq->transferred;
426
427 subreq->error = 0;
428 subreq->transferred += transferred_or_error;
429 if (subreq->transferred < subreq->len)
430 goto incomplete;
431
432complete:
433 __clear_bit(NETFS_SREQ_NO_PROGRESS, &subreq->flags);
434 if (test_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags))
435 set_bit(NETFS_RREQ_COPY_TO_CACHE, &rreq->flags);
436
437out:
438 trace_netfs_sreq(subreq, netfs_sreq_trace_terminated);
439
440
441 u = atomic_dec_return(&rreq->nr_outstanding);
442 if (u == 0)
443 netfs_rreq_terminated(rreq, was_async);
444 else if (u == 1)
445 wake_up_var(&rreq->nr_outstanding);
446
447 netfs_put_subrequest(subreq, was_async, netfs_sreq_trace_put_terminated);
448 return;
449
450incomplete:
451 if (test_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags)) {
452 netfs_clear_unread(subreq);
453 subreq->transferred = subreq->len;
454 goto complete;
455 }
456
457 if (transferred_or_error == 0) {
458 if (__test_and_set_bit(NETFS_SREQ_NO_PROGRESS, &subreq->flags)) {
459 subreq->error = -ENODATA;
460 goto failed;
461 }
462 } else {
463 __clear_bit(NETFS_SREQ_NO_PROGRESS, &subreq->flags);
464 }
465
466 __set_bit(NETFS_SREQ_SHORT_IO, &subreq->flags);
467 set_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags);
468 goto out;
469
470failed:
471 if (subreq->source == NETFS_READ_FROM_CACHE) {
472 netfs_stat(&netfs_n_rh_read_failed);
473 set_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags);
474 } else {
475 netfs_stat(&netfs_n_rh_download_failed);
476 set_bit(NETFS_RREQ_FAILED, &rreq->flags);
477 rreq->error = subreq->error;
478 }
479 goto out;
480}
481EXPORT_SYMBOL(netfs_subreq_terminated);
482
483static enum netfs_io_source netfs_cache_prepare_read(struct netfs_io_subrequest *subreq,
484 loff_t i_size)
485{
486 struct netfs_io_request *rreq = subreq->rreq;
487 struct netfs_cache_resources *cres = &rreq->cache_resources;
488
489 if (cres->ops)
490 return cres->ops->prepare_read(subreq, i_size);
491 if (subreq->start >= rreq->i_size)
492 return NETFS_FILL_WITH_ZEROES;
493 return NETFS_DOWNLOAD_FROM_SERVER;
494}
495
496
497
498
499static enum netfs_io_source
500netfs_rreq_prepare_read(struct netfs_io_request *rreq,
501 struct netfs_io_subrequest *subreq)
502{
503 enum netfs_io_source source;
504
505 _enter("%llx-%llx,%llx", subreq->start, subreq->start + subreq->len, rreq->i_size);
506
507 source = netfs_cache_prepare_read(subreq, rreq->i_size);
508 if (source == NETFS_INVALID_READ)
509 goto out;
510
511 if (source == NETFS_DOWNLOAD_FROM_SERVER) {
512
513
514
515
516
517
518 if (subreq->len > rreq->i_size - subreq->start)
519 subreq->len = rreq->i_size - subreq->start;
520
521 if (rreq->netfs_ops->clamp_length &&
522 !rreq->netfs_ops->clamp_length(subreq)) {
523 source = NETFS_INVALID_READ;
524 goto out;
525 }
526 }
527
528 if (WARN_ON(subreq->len == 0))
529 source = NETFS_INVALID_READ;
530
531out:
532 subreq->source = source;
533 trace_netfs_sreq(subreq, netfs_sreq_trace_prepare);
534 return source;
535}
536
537
538
539
540static bool netfs_rreq_submit_slice(struct netfs_io_request *rreq,
541 unsigned int *_debug_index)
542{
543 struct netfs_io_subrequest *subreq;
544 enum netfs_io_source source;
545
546 subreq = netfs_alloc_subrequest(rreq);
547 if (!subreq)
548 return false;
549
550 subreq->debug_index = (*_debug_index)++;
551 subreq->start = rreq->start + rreq->submitted;
552 subreq->len = rreq->len - rreq->submitted;
553
554 _debug("slice %llx,%zx,%zx", subreq->start, subreq->len, rreq->submitted);
555 list_add_tail(&subreq->rreq_link, &rreq->subrequests);
556
557
558
559
560
561
562
563
564
565 source = netfs_rreq_prepare_read(rreq, subreq);
566 if (source == NETFS_INVALID_READ)
567 goto subreq_failed;
568
569 atomic_inc(&rreq->nr_outstanding);
570
571 rreq->submitted += subreq->len;
572
573 trace_netfs_sreq(subreq, netfs_sreq_trace_submit);
574 switch (source) {
575 case NETFS_FILL_WITH_ZEROES:
576 netfs_fill_with_zeroes(rreq, subreq);
577 break;
578 case NETFS_DOWNLOAD_FROM_SERVER:
579 netfs_read_from_server(rreq, subreq);
580 break;
581 case NETFS_READ_FROM_CACHE:
582 netfs_read_from_cache(rreq, subreq, NETFS_READ_HOLE_IGNORE);
583 break;
584 default:
585 BUG();
586 }
587
588 return true;
589
590subreq_failed:
591 rreq->error = subreq->error;
592 netfs_put_subrequest(subreq, false, netfs_sreq_trace_put_failed);
593 return false;
594}
595
596
597
598
599
600
601int netfs_begin_read(struct netfs_io_request *rreq, bool sync)
602{
603 unsigned int debug_index = 0;
604 int ret;
605
606 _enter("R=%x %llx-%llx",
607 rreq->debug_id, rreq->start, rreq->start + rreq->len - 1);
608
609 if (rreq->len == 0) {
610 pr_err("Zero-sized read [R=%x]\n", rreq->debug_id);
611 netfs_put_request(rreq, false, netfs_rreq_trace_put_zero_len);
612 return -EIO;
613 }
614
615 INIT_WORK(&rreq->work, netfs_rreq_work);
616
617 if (sync)
618 netfs_get_request(rreq, netfs_rreq_trace_get_hold);
619
620
621
622
623 atomic_set(&rreq->nr_outstanding, 1);
624 do {
625 if (!netfs_rreq_submit_slice(rreq, &debug_index))
626 break;
627
628 } while (rreq->submitted < rreq->len);
629
630 if (sync) {
631
632
633
634
635 for (;;) {
636 wait_var_event(&rreq->nr_outstanding,
637 atomic_read(&rreq->nr_outstanding) == 1);
638 netfs_rreq_assess(rreq, false);
639 if (!test_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags))
640 break;
641 cond_resched();
642 }
643
644 ret = rreq->error;
645 if (ret == 0 && rreq->submitted < rreq->len) {
646 trace_netfs_failure(rreq, NULL, ret, netfs_fail_short_read);
647 ret = -EIO;
648 }
649 netfs_put_request(rreq, false, netfs_rreq_trace_put_hold);
650 } else {
651
652 if (atomic_dec_and_test(&rreq->nr_outstanding))
653 netfs_rreq_assess(rreq, false);
654 ret = 0;
655 }
656 return ret;
657}
658