1
2
3
4
5
6
7
8#include <linux/module.h>
9#include <linux/export.h>
10#include <linux/fs.h>
11#include <linux/mm.h>
12#include <linux/pagemap.h>
13#include <linux/slab.h>
14#include <linux/uio.h>
15#include <linux/sched/mm.h>
16#include <linux/task_io_accounting_ops.h>
17#include <linux/netfs.h>
18#include "internal.h"
19#define CREATE_TRACE_POINTS
20#include <trace/events/netfs.h>
21
22MODULE_DESCRIPTION("Network fs support");
23MODULE_AUTHOR("Red Hat, Inc.");
24MODULE_LICENSE("GPL");
25
26unsigned netfs_debug;
27module_param_named(debug, netfs_debug, uint, S_IWUSR | S_IRUGO);
28MODULE_PARM_DESC(netfs_debug, "Netfs support debugging mask");
29
30static void netfs_rreq_work(struct work_struct *);
31static void __netfs_put_subrequest(struct netfs_read_subrequest *, bool);
32
33static void netfs_put_subrequest(struct netfs_read_subrequest *subreq,
34 bool was_async)
35{
36 if (refcount_dec_and_test(&subreq->usage))
37 __netfs_put_subrequest(subreq, was_async);
38}
39
40static struct netfs_read_request *netfs_alloc_read_request(
41 const struct netfs_read_request_ops *ops, void *netfs_priv,
42 struct file *file)
43{
44 static atomic_t debug_ids;
45 struct netfs_read_request *rreq;
46
47 rreq = kzalloc(sizeof(struct netfs_read_request), GFP_KERNEL);
48 if (rreq) {
49 rreq->netfs_ops = ops;
50 rreq->netfs_priv = netfs_priv;
51 rreq->inode = file_inode(file);
52 rreq->i_size = i_size_read(rreq->inode);
53 rreq->debug_id = atomic_inc_return(&debug_ids);
54 INIT_LIST_HEAD(&rreq->subrequests);
55 INIT_WORK(&rreq->work, netfs_rreq_work);
56 refcount_set(&rreq->usage, 1);
57 __set_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags);
58 ops->init_rreq(rreq, file);
59 netfs_stat(&netfs_n_rh_rreq);
60 }
61
62 return rreq;
63}
64
65static void netfs_get_read_request(struct netfs_read_request *rreq)
66{
67 refcount_inc(&rreq->usage);
68}
69
70static void netfs_rreq_clear_subreqs(struct netfs_read_request *rreq,
71 bool was_async)
72{
73 struct netfs_read_subrequest *subreq;
74
75 while (!list_empty(&rreq->subrequests)) {
76 subreq = list_first_entry(&rreq->subrequests,
77 struct netfs_read_subrequest, rreq_link);
78 list_del(&subreq->rreq_link);
79 netfs_put_subrequest(subreq, was_async);
80 }
81}
82
83static void netfs_free_read_request(struct work_struct *work)
84{
85 struct netfs_read_request *rreq =
86 container_of(work, struct netfs_read_request, work);
87 netfs_rreq_clear_subreqs(rreq, false);
88 if (rreq->netfs_priv)
89 rreq->netfs_ops->cleanup(rreq->mapping, rreq->netfs_priv);
90 trace_netfs_rreq(rreq, netfs_rreq_trace_free);
91 if (rreq->cache_resources.ops)
92 rreq->cache_resources.ops->end_operation(&rreq->cache_resources);
93 kfree(rreq);
94 netfs_stat_d(&netfs_n_rh_rreq);
95}
96
97static void netfs_put_read_request(struct netfs_read_request *rreq, bool was_async)
98{
99 if (refcount_dec_and_test(&rreq->usage)) {
100 if (was_async) {
101 rreq->work.func = netfs_free_read_request;
102 if (!queue_work(system_unbound_wq, &rreq->work))
103 BUG();
104 } else {
105 netfs_free_read_request(&rreq->work);
106 }
107 }
108}
109
110
111
112
113static struct netfs_read_subrequest *netfs_alloc_subrequest(
114 struct netfs_read_request *rreq)
115{
116 struct netfs_read_subrequest *subreq;
117
118 subreq = kzalloc(sizeof(struct netfs_read_subrequest), GFP_KERNEL);
119 if (subreq) {
120 INIT_LIST_HEAD(&subreq->rreq_link);
121 refcount_set(&subreq->usage, 2);
122 subreq->rreq = rreq;
123 netfs_get_read_request(rreq);
124 netfs_stat(&netfs_n_rh_sreq);
125 }
126
127 return subreq;
128}
129
130static void netfs_get_read_subrequest(struct netfs_read_subrequest *subreq)
131{
132 refcount_inc(&subreq->usage);
133}
134
135static void __netfs_put_subrequest(struct netfs_read_subrequest *subreq,
136 bool was_async)
137{
138 struct netfs_read_request *rreq = subreq->rreq;
139
140 trace_netfs_sreq(subreq, netfs_sreq_trace_free);
141 kfree(subreq);
142 netfs_stat_d(&netfs_n_rh_sreq);
143 netfs_put_read_request(rreq, was_async);
144}
145
146
147
148
149static void netfs_clear_unread(struct netfs_read_subrequest *subreq)
150{
151 struct iov_iter iter;
152
153 iov_iter_xarray(&iter, WRITE, &subreq->rreq->mapping->i_pages,
154 subreq->start + subreq->transferred,
155 subreq->len - subreq->transferred);
156 iov_iter_zero(iov_iter_count(&iter), &iter);
157}
158
159static void netfs_cache_read_terminated(void *priv, ssize_t transferred_or_error,
160 bool was_async)
161{
162 struct netfs_read_subrequest *subreq = priv;
163
164 netfs_subreq_terminated(subreq, transferred_or_error, was_async);
165}
166
167
168
169
170
171static void netfs_read_from_cache(struct netfs_read_request *rreq,
172 struct netfs_read_subrequest *subreq,
173 bool seek_data)
174{
175 struct netfs_cache_resources *cres = &rreq->cache_resources;
176 struct iov_iter iter;
177
178 netfs_stat(&netfs_n_rh_read);
179 iov_iter_xarray(&iter, READ, &rreq->mapping->i_pages,
180 subreq->start + subreq->transferred,
181 subreq->len - subreq->transferred);
182
183 cres->ops->read(cres, subreq->start, &iter, seek_data,
184 netfs_cache_read_terminated, subreq);
185}
186
187
188
189
190static void netfs_fill_with_zeroes(struct netfs_read_request *rreq,
191 struct netfs_read_subrequest *subreq)
192{
193 netfs_stat(&netfs_n_rh_zero);
194 __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
195 netfs_subreq_terminated(subreq, 0, false);
196}
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214static void netfs_read_from_server(struct netfs_read_request *rreq,
215 struct netfs_read_subrequest *subreq)
216{
217 netfs_stat(&netfs_n_rh_download);
218 rreq->netfs_ops->issue_op(subreq);
219}
220
221
222
223
224static void netfs_rreq_completed(struct netfs_read_request *rreq, bool was_async)
225{
226 trace_netfs_rreq(rreq, netfs_rreq_trace_done);
227 netfs_rreq_clear_subreqs(rreq, was_async);
228 netfs_put_read_request(rreq, was_async);
229}
230
231
232
233
234
235
236
237static void netfs_rreq_unmark_after_write(struct netfs_read_request *rreq,
238 bool was_async)
239{
240 struct netfs_read_subrequest *subreq;
241 struct page *page;
242 pgoff_t unlocked = 0;
243 bool have_unlocked = false;
244
245 rcu_read_lock();
246
247 list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
248 XA_STATE(xas, &rreq->mapping->i_pages, subreq->start / PAGE_SIZE);
249
250 xas_for_each(&xas, page, (subreq->start + subreq->len - 1) / PAGE_SIZE) {
251
252
253
254 if (have_unlocked && page->index <= unlocked)
255 continue;
256 unlocked = page->index;
257 end_page_fscache(page);
258 have_unlocked = true;
259 }
260 }
261
262 rcu_read_unlock();
263 netfs_rreq_completed(rreq, was_async);
264}
265
266static void netfs_rreq_copy_terminated(void *priv, ssize_t transferred_or_error,
267 bool was_async)
268{
269 struct netfs_read_subrequest *subreq = priv;
270 struct netfs_read_request *rreq = subreq->rreq;
271
272 if (IS_ERR_VALUE(transferred_or_error)) {
273 netfs_stat(&netfs_n_rh_write_failed);
274 trace_netfs_failure(rreq, subreq, transferred_or_error,
275 netfs_fail_copy_to_cache);
276 } else {
277 netfs_stat(&netfs_n_rh_write_done);
278 }
279
280 trace_netfs_sreq(subreq, netfs_sreq_trace_write_term);
281
282
283 if (atomic_dec_and_test(&rreq->nr_wr_ops))
284 netfs_rreq_unmark_after_write(rreq, was_async);
285
286 netfs_put_subrequest(subreq, was_async);
287}
288
289
290
291
292
293static void netfs_rreq_do_write_to_cache(struct netfs_read_request *rreq)
294{
295 struct netfs_cache_resources *cres = &rreq->cache_resources;
296 struct netfs_read_subrequest *subreq, *next, *p;
297 struct iov_iter iter;
298 int ret;
299
300 trace_netfs_rreq(rreq, netfs_rreq_trace_write);
301
302
303
304
305 atomic_inc(&rreq->nr_wr_ops);
306
307 list_for_each_entry_safe(subreq, p, &rreq->subrequests, rreq_link) {
308 if (!test_bit(NETFS_SREQ_WRITE_TO_CACHE, &subreq->flags)) {
309 list_del_init(&subreq->rreq_link);
310 netfs_put_subrequest(subreq, false);
311 }
312 }
313
314 list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
315
316 while (!list_is_last(&subreq->rreq_link, &rreq->subrequests)) {
317 next = list_next_entry(subreq, rreq_link);
318 if (next->start != subreq->start + subreq->len)
319 break;
320 subreq->len += next->len;
321 list_del_init(&next->rreq_link);
322 netfs_put_subrequest(next, false);
323 }
324
325 ret = cres->ops->prepare_write(cres, &subreq->start, &subreq->len,
326 rreq->i_size);
327 if (ret < 0) {
328 trace_netfs_failure(rreq, subreq, ret, netfs_fail_prepare_write);
329 trace_netfs_sreq(subreq, netfs_sreq_trace_write_skip);
330 continue;
331 }
332
333 iov_iter_xarray(&iter, WRITE, &rreq->mapping->i_pages,
334 subreq->start, subreq->len);
335
336 atomic_inc(&rreq->nr_wr_ops);
337 netfs_stat(&netfs_n_rh_write);
338 netfs_get_read_subrequest(subreq);
339 trace_netfs_sreq(subreq, netfs_sreq_trace_write);
340 cres->ops->write(cres, subreq->start, &iter,
341 netfs_rreq_copy_terminated, subreq);
342 }
343
344
345 if (atomic_dec_and_test(&rreq->nr_wr_ops))
346 netfs_rreq_unmark_after_write(rreq, false);
347}
348
349static void netfs_rreq_write_to_cache_work(struct work_struct *work)
350{
351 struct netfs_read_request *rreq =
352 container_of(work, struct netfs_read_request, work);
353
354 netfs_rreq_do_write_to_cache(rreq);
355}
356
357static void netfs_rreq_write_to_cache(struct netfs_read_request *rreq,
358 bool was_async)
359{
360 if (was_async) {
361 rreq->work.func = netfs_rreq_write_to_cache_work;
362 if (!queue_work(system_unbound_wq, &rreq->work))
363 BUG();
364 } else {
365 netfs_rreq_do_write_to_cache(rreq);
366 }
367}
368
369
370
371
372
373static void netfs_rreq_unlock(struct netfs_read_request *rreq)
374{
375 struct netfs_read_subrequest *subreq;
376 struct page *page;
377 unsigned int iopos, account = 0;
378 pgoff_t start_page = rreq->start / PAGE_SIZE;
379 pgoff_t last_page = ((rreq->start + rreq->len) / PAGE_SIZE) - 1;
380 bool subreq_failed = false;
381 int i;
382
383 XA_STATE(xas, &rreq->mapping->i_pages, start_page);
384
385 if (test_bit(NETFS_RREQ_FAILED, &rreq->flags)) {
386 __clear_bit(NETFS_RREQ_WRITE_TO_CACHE, &rreq->flags);
387 list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
388 __clear_bit(NETFS_SREQ_WRITE_TO_CACHE, &subreq->flags);
389 }
390 }
391
392
393
394
395
396
397
398 subreq = list_first_entry(&rreq->subrequests,
399 struct netfs_read_subrequest, rreq_link);
400 iopos = 0;
401 subreq_failed = (subreq->error < 0);
402
403 trace_netfs_rreq(rreq, netfs_rreq_trace_unlock);
404
405 rcu_read_lock();
406 xas_for_each(&xas, page, last_page) {
407 unsigned int pgpos = (page->index - start_page) * PAGE_SIZE;
408 unsigned int pgend = pgpos + thp_size(page);
409 bool pg_failed = false;
410
411 for (;;) {
412 if (!subreq) {
413 pg_failed = true;
414 break;
415 }
416 if (test_bit(NETFS_SREQ_WRITE_TO_CACHE, &subreq->flags))
417 set_page_fscache(page);
418 pg_failed |= subreq_failed;
419 if (pgend < iopos + subreq->len)
420 break;
421
422 account += subreq->transferred;
423 iopos += subreq->len;
424 if (!list_is_last(&subreq->rreq_link, &rreq->subrequests)) {
425 subreq = list_next_entry(subreq, rreq_link);
426 subreq_failed = (subreq->error < 0);
427 } else {
428 subreq = NULL;
429 subreq_failed = false;
430 }
431 if (pgend == iopos)
432 break;
433 }
434
435 if (!pg_failed) {
436 for (i = 0; i < thp_nr_pages(page); i++)
437 flush_dcache_page(page);
438 SetPageUptodate(page);
439 }
440
441 if (!test_bit(NETFS_RREQ_DONT_UNLOCK_PAGES, &rreq->flags)) {
442 if (page->index == rreq->no_unlock_page &&
443 test_bit(NETFS_RREQ_NO_UNLOCK_PAGE, &rreq->flags))
444 _debug("no unlock");
445 else
446 unlock_page(page);
447 }
448 }
449 rcu_read_unlock();
450
451 task_io_account_read(account);
452 if (rreq->netfs_ops->done)
453 rreq->netfs_ops->done(rreq);
454}
455
456
457
458
459static void netfs_rreq_short_read(struct netfs_read_request *rreq,
460 struct netfs_read_subrequest *subreq)
461{
462 __clear_bit(NETFS_SREQ_SHORT_READ, &subreq->flags);
463 __set_bit(NETFS_SREQ_SEEK_DATA_READ, &subreq->flags);
464
465 netfs_stat(&netfs_n_rh_short_read);
466 trace_netfs_sreq(subreq, netfs_sreq_trace_resubmit_short);
467
468 netfs_get_read_subrequest(subreq);
469 atomic_inc(&rreq->nr_rd_ops);
470 if (subreq->source == NETFS_READ_FROM_CACHE)
471 netfs_read_from_cache(rreq, subreq, true);
472 else
473 netfs_read_from_server(rreq, subreq);
474}
475
476
477
478
479
480static bool netfs_rreq_perform_resubmissions(struct netfs_read_request *rreq)
481{
482 struct netfs_read_subrequest *subreq;
483
484 WARN_ON(in_interrupt());
485
486 trace_netfs_rreq(rreq, netfs_rreq_trace_resubmit);
487
488
489
490
491 atomic_inc(&rreq->nr_rd_ops);
492
493 __clear_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags);
494 list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
495 if (subreq->error) {
496 if (subreq->source != NETFS_READ_FROM_CACHE)
497 break;
498 subreq->source = NETFS_DOWNLOAD_FROM_SERVER;
499 subreq->error = 0;
500 netfs_stat(&netfs_n_rh_download_instead);
501 trace_netfs_sreq(subreq, netfs_sreq_trace_download_instead);
502 netfs_get_read_subrequest(subreq);
503 atomic_inc(&rreq->nr_rd_ops);
504 netfs_read_from_server(rreq, subreq);
505 } else if (test_bit(NETFS_SREQ_SHORT_READ, &subreq->flags)) {
506 netfs_rreq_short_read(rreq, subreq);
507 }
508 }
509
510
511 if (atomic_dec_and_test(&rreq->nr_rd_ops))
512 return true;
513
514 wake_up_var(&rreq->nr_rd_ops);
515 return false;
516}
517
518
519
520
521static void netfs_rreq_is_still_valid(struct netfs_read_request *rreq)
522{
523 struct netfs_read_subrequest *subreq;
524
525 if (!rreq->netfs_ops->is_still_valid ||
526 rreq->netfs_ops->is_still_valid(rreq))
527 return;
528
529 list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
530 if (subreq->source == NETFS_READ_FROM_CACHE) {
531 subreq->error = -ESTALE;
532 __set_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags);
533 }
534 }
535}
536
537
538
539
540
541
542
543static void netfs_rreq_assess(struct netfs_read_request *rreq, bool was_async)
544{
545 trace_netfs_rreq(rreq, netfs_rreq_trace_assess);
546
547again:
548 netfs_rreq_is_still_valid(rreq);
549
550 if (!test_bit(NETFS_RREQ_FAILED, &rreq->flags) &&
551 test_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags)) {
552 if (netfs_rreq_perform_resubmissions(rreq))
553 goto again;
554 return;
555 }
556
557 netfs_rreq_unlock(rreq);
558
559 clear_bit_unlock(NETFS_RREQ_IN_PROGRESS, &rreq->flags);
560 wake_up_bit(&rreq->flags, NETFS_RREQ_IN_PROGRESS);
561
562 if (test_bit(NETFS_RREQ_WRITE_TO_CACHE, &rreq->flags))
563 return netfs_rreq_write_to_cache(rreq, was_async);
564
565 netfs_rreq_completed(rreq, was_async);
566}
567
568static void netfs_rreq_work(struct work_struct *work)
569{
570 struct netfs_read_request *rreq =
571 container_of(work, struct netfs_read_request, work);
572 netfs_rreq_assess(rreq, false);
573}
574
575
576
577
578
579static void netfs_rreq_terminated(struct netfs_read_request *rreq,
580 bool was_async)
581{
582 if (test_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags) &&
583 was_async) {
584 if (!queue_work(system_unbound_wq, &rreq->work))
585 BUG();
586 } else {
587 netfs_rreq_assess(rreq, was_async);
588 }
589}
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609void netfs_subreq_terminated(struct netfs_read_subrequest *subreq,
610 ssize_t transferred_or_error,
611 bool was_async)
612{
613 struct netfs_read_request *rreq = subreq->rreq;
614 int u;
615
616 _enter("[%u]{%llx,%lx},%zd",
617 subreq->debug_index, subreq->start, subreq->flags,
618 transferred_or_error);
619
620 switch (subreq->source) {
621 case NETFS_READ_FROM_CACHE:
622 netfs_stat(&netfs_n_rh_read_done);
623 break;
624 case NETFS_DOWNLOAD_FROM_SERVER:
625 netfs_stat(&netfs_n_rh_download_done);
626 break;
627 default:
628 break;
629 }
630
631 if (IS_ERR_VALUE(transferred_or_error)) {
632 subreq->error = transferred_or_error;
633 trace_netfs_failure(rreq, subreq, transferred_or_error,
634 netfs_fail_read);
635 goto failed;
636 }
637
638 if (WARN(transferred_or_error > subreq->len - subreq->transferred,
639 "Subreq overread: R%x[%x] %zd > %zu - %zu",
640 rreq->debug_id, subreq->debug_index,
641 transferred_or_error, subreq->len, subreq->transferred))
642 transferred_or_error = subreq->len - subreq->transferred;
643
644 subreq->error = 0;
645 subreq->transferred += transferred_or_error;
646 if (subreq->transferred < subreq->len)
647 goto incomplete;
648
649complete:
650 __clear_bit(NETFS_SREQ_NO_PROGRESS, &subreq->flags);
651 if (test_bit(NETFS_SREQ_WRITE_TO_CACHE, &subreq->flags))
652 set_bit(NETFS_RREQ_WRITE_TO_CACHE, &rreq->flags);
653
654out:
655 trace_netfs_sreq(subreq, netfs_sreq_trace_terminated);
656
657
658 u = atomic_dec_return(&rreq->nr_rd_ops);
659 if (u == 0)
660 netfs_rreq_terminated(rreq, was_async);
661 else if (u == 1)
662 wake_up_var(&rreq->nr_rd_ops);
663
664 netfs_put_subrequest(subreq, was_async);
665 return;
666
667incomplete:
668 if (test_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags)) {
669 netfs_clear_unread(subreq);
670 subreq->transferred = subreq->len;
671 goto complete;
672 }
673
674 if (transferred_or_error == 0) {
675 if (__test_and_set_bit(NETFS_SREQ_NO_PROGRESS, &subreq->flags)) {
676 subreq->error = -ENODATA;
677 goto failed;
678 }
679 } else {
680 __clear_bit(NETFS_SREQ_NO_PROGRESS, &subreq->flags);
681 }
682
683 __set_bit(NETFS_SREQ_SHORT_READ, &subreq->flags);
684 set_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags);
685 goto out;
686
687failed:
688 if (subreq->source == NETFS_READ_FROM_CACHE) {
689 netfs_stat(&netfs_n_rh_read_failed);
690 set_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags);
691 } else {
692 netfs_stat(&netfs_n_rh_download_failed);
693 set_bit(NETFS_RREQ_FAILED, &rreq->flags);
694 rreq->error = subreq->error;
695 }
696 goto out;
697}
698EXPORT_SYMBOL(netfs_subreq_terminated);
699
700static enum netfs_read_source netfs_cache_prepare_read(struct netfs_read_subrequest *subreq,
701 loff_t i_size)
702{
703 struct netfs_read_request *rreq = subreq->rreq;
704 struct netfs_cache_resources *cres = &rreq->cache_resources;
705
706 if (cres->ops)
707 return cres->ops->prepare_read(subreq, i_size);
708 if (subreq->start >= rreq->i_size)
709 return NETFS_FILL_WITH_ZEROES;
710 return NETFS_DOWNLOAD_FROM_SERVER;
711}
712
713
714
715
716static enum netfs_read_source
717netfs_rreq_prepare_read(struct netfs_read_request *rreq,
718 struct netfs_read_subrequest *subreq)
719{
720 enum netfs_read_source source;
721
722 _enter("%llx-%llx,%llx", subreq->start, subreq->start + subreq->len, rreq->i_size);
723
724 source = netfs_cache_prepare_read(subreq, rreq->i_size);
725 if (source == NETFS_INVALID_READ)
726 goto out;
727
728 if (source == NETFS_DOWNLOAD_FROM_SERVER) {
729
730
731
732
733
734
735 if (subreq->len > rreq->i_size - subreq->start)
736 subreq->len = rreq->i_size - subreq->start;
737
738 if (rreq->netfs_ops->clamp_length &&
739 !rreq->netfs_ops->clamp_length(subreq)) {
740 source = NETFS_INVALID_READ;
741 goto out;
742 }
743 }
744
745 if (WARN_ON(subreq->len == 0))
746 source = NETFS_INVALID_READ;
747
748out:
749 subreq->source = source;
750 trace_netfs_sreq(subreq, netfs_sreq_trace_prepare);
751 return source;
752}
753
754
755
756
757static bool netfs_rreq_submit_slice(struct netfs_read_request *rreq,
758 unsigned int *_debug_index)
759{
760 struct netfs_read_subrequest *subreq;
761 enum netfs_read_source source;
762
763 subreq = netfs_alloc_subrequest(rreq);
764 if (!subreq)
765 return false;
766
767 subreq->debug_index = (*_debug_index)++;
768 subreq->start = rreq->start + rreq->submitted;
769 subreq->len = rreq->len - rreq->submitted;
770
771 _debug("slice %llx,%zx,%zx", subreq->start, subreq->len, rreq->submitted);
772 list_add_tail(&subreq->rreq_link, &rreq->subrequests);
773
774
775
776
777
778
779
780
781
782 source = netfs_rreq_prepare_read(rreq, subreq);
783 if (source == NETFS_INVALID_READ)
784 goto subreq_failed;
785
786 atomic_inc(&rreq->nr_rd_ops);
787
788 rreq->submitted += subreq->len;
789
790 trace_netfs_sreq(subreq, netfs_sreq_trace_submit);
791 switch (source) {
792 case NETFS_FILL_WITH_ZEROES:
793 netfs_fill_with_zeroes(rreq, subreq);
794 break;
795 case NETFS_DOWNLOAD_FROM_SERVER:
796 netfs_read_from_server(rreq, subreq);
797 break;
798 case NETFS_READ_FROM_CACHE:
799 netfs_read_from_cache(rreq, subreq, false);
800 break;
801 default:
802 BUG();
803 }
804
805 return true;
806
807subreq_failed:
808 rreq->error = subreq->error;
809 netfs_put_subrequest(subreq, false);
810 return false;
811}
812
813static void netfs_cache_expand_readahead(struct netfs_read_request *rreq,
814 loff_t *_start, size_t *_len, loff_t i_size)
815{
816 struct netfs_cache_resources *cres = &rreq->cache_resources;
817
818 if (cres->ops && cres->ops->expand_readahead)
819 cres->ops->expand_readahead(cres, _start, _len, i_size);
820}
821
822static void netfs_rreq_expand(struct netfs_read_request *rreq,
823 struct readahead_control *ractl)
824{
825
826
827
828 netfs_cache_expand_readahead(rreq, &rreq->start, &rreq->len, rreq->i_size);
829
830
831
832
833 if (rreq->netfs_ops->expand_readahead)
834 rreq->netfs_ops->expand_readahead(rreq);
835
836
837
838
839
840
841
842
843
844
845 if (rreq->start != readahead_pos(ractl) ||
846 rreq->len != readahead_length(ractl)) {
847 readahead_expand(ractl, rreq->start, rreq->len);
848 rreq->start = readahead_pos(ractl);
849 rreq->len = readahead_length(ractl);
850
851 trace_netfs_read(rreq, readahead_pos(ractl), readahead_length(ractl),
852 netfs_read_trace_expanded);
853 }
854}
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874void netfs_readahead(struct readahead_control *ractl,
875 const struct netfs_read_request_ops *ops,
876 void *netfs_priv)
877{
878 struct netfs_read_request *rreq;
879 struct page *page;
880 unsigned int debug_index = 0;
881 int ret;
882
883 _enter("%lx,%x", readahead_index(ractl), readahead_count(ractl));
884
885 if (readahead_count(ractl) == 0)
886 goto cleanup;
887
888 rreq = netfs_alloc_read_request(ops, netfs_priv, ractl->file);
889 if (!rreq)
890 goto cleanup;
891 rreq->mapping = ractl->mapping;
892 rreq->start = readahead_pos(ractl);
893 rreq->len = readahead_length(ractl);
894
895 if (ops->begin_cache_operation) {
896 ret = ops->begin_cache_operation(rreq);
897 if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS)
898 goto cleanup_free;
899 }
900
901 netfs_stat(&netfs_n_rh_readahead);
902 trace_netfs_read(rreq, readahead_pos(ractl), readahead_length(ractl),
903 netfs_read_trace_readahead);
904
905 netfs_rreq_expand(rreq, ractl);
906
907 atomic_set(&rreq->nr_rd_ops, 1);
908 do {
909 if (!netfs_rreq_submit_slice(rreq, &debug_index))
910 break;
911
912 } while (rreq->submitted < rreq->len);
913
914
915
916
917 while ((page = readahead_page(ractl)))
918 put_page(page);
919
920
921 if (atomic_dec_and_test(&rreq->nr_rd_ops))
922 netfs_rreq_assess(rreq, false);
923 return;
924
925cleanup_free:
926 netfs_put_read_request(rreq, false);
927 return;
928cleanup:
929 if (netfs_priv)
930 ops->cleanup(ractl->mapping, netfs_priv);
931 return;
932}
933EXPORT_SYMBOL(netfs_readahead);
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952int netfs_readpage(struct file *file,
953 struct page *page,
954 const struct netfs_read_request_ops *ops,
955 void *netfs_priv)
956{
957 struct netfs_read_request *rreq;
958 unsigned int debug_index = 0;
959 int ret;
960
961 _enter("%lx", page_index(page));
962
963 rreq = netfs_alloc_read_request(ops, netfs_priv, file);
964 if (!rreq) {
965 if (netfs_priv)
966 ops->cleanup(netfs_priv, page_file_mapping(page));
967 unlock_page(page);
968 return -ENOMEM;
969 }
970 rreq->mapping = page_file_mapping(page);
971 rreq->start = page_file_offset(page);
972 rreq->len = thp_size(page);
973
974 if (ops->begin_cache_operation) {
975 ret = ops->begin_cache_operation(rreq);
976 if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS) {
977 unlock_page(page);
978 goto out;
979 }
980 }
981
982 netfs_stat(&netfs_n_rh_readpage);
983 trace_netfs_read(rreq, rreq->start, rreq->len, netfs_read_trace_readpage);
984
985 netfs_get_read_request(rreq);
986
987 atomic_set(&rreq->nr_rd_ops, 1);
988 do {
989 if (!netfs_rreq_submit_slice(rreq, &debug_index))
990 break;
991
992 } while (rreq->submitted < rreq->len);
993
994
995
996
997
998 do {
999 wait_var_event(&rreq->nr_rd_ops, atomic_read(&rreq->nr_rd_ops) == 1);
1000 netfs_rreq_assess(rreq, false);
1001 } while (test_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags));
1002
1003 ret = rreq->error;
1004 if (ret == 0 && rreq->submitted < rreq->len) {
1005 trace_netfs_failure(rreq, NULL, ret, netfs_fail_short_readpage);
1006 ret = -EIO;
1007 }
1008out:
1009 netfs_put_read_request(rreq, false);
1010 return ret;
1011}
1012EXPORT_SYMBOL(netfs_readpage);
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028static bool netfs_skip_page_read(struct page *page, loff_t pos, size_t len)
1029{
1030 struct inode *inode = page->mapping->host;
1031 loff_t i_size = i_size_read(inode);
1032 size_t offset = offset_in_thp(page, pos);
1033
1034
1035 if (offset == 0 && len >= thp_size(page))
1036 return true;
1037
1038
1039 if (pos - offset >= i_size)
1040 goto zero_out;
1041
1042
1043 if (offset == 0 && (pos + len) >= i_size)
1044 goto zero_out;
1045
1046 return false;
1047zero_out:
1048 zero_user_segments(page, 0, offset, offset + len, thp_size(page));
1049 return true;
1050}
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083int netfs_write_begin(struct file *file, struct address_space *mapping,
1084 loff_t pos, unsigned int len, unsigned int flags,
1085 struct page **_page, void **_fsdata,
1086 const struct netfs_read_request_ops *ops,
1087 void *netfs_priv)
1088{
1089 struct netfs_read_request *rreq;
1090 struct page *page, *xpage;
1091 struct inode *inode = file_inode(file);
1092 unsigned int debug_index = 0;
1093 pgoff_t index = pos >> PAGE_SHIFT;
1094 int ret;
1095
1096 DEFINE_READAHEAD(ractl, file, NULL, mapping, index);
1097
1098retry:
1099 page = grab_cache_page_write_begin(mapping, index, flags);
1100 if (!page)
1101 return -ENOMEM;
1102
1103 if (ops->check_write_begin) {
1104
1105 ret = ops->check_write_begin(file, pos, len, page, _fsdata);
1106 if (ret < 0) {
1107 trace_netfs_failure(NULL, NULL, ret, netfs_fail_check_write_begin);
1108 if (ret == -EAGAIN)
1109 goto retry;
1110 goto error;
1111 }
1112 }
1113
1114 if (PageUptodate(page))
1115 goto have_page;
1116
1117
1118
1119
1120
1121 if (!ops->is_cache_enabled(inode) &&
1122 netfs_skip_page_read(page, pos, len)) {
1123 netfs_stat(&netfs_n_rh_write_zskip);
1124 goto have_page_no_wait;
1125 }
1126
1127 ret = -ENOMEM;
1128 rreq = netfs_alloc_read_request(ops, netfs_priv, file);
1129 if (!rreq)
1130 goto error;
1131 rreq->mapping = page->mapping;
1132 rreq->start = page_offset(page);
1133 rreq->len = thp_size(page);
1134 rreq->no_unlock_page = page->index;
1135 __set_bit(NETFS_RREQ_NO_UNLOCK_PAGE, &rreq->flags);
1136 netfs_priv = NULL;
1137
1138 if (ops->begin_cache_operation) {
1139 ret = ops->begin_cache_operation(rreq);
1140 if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS)
1141 goto error_put;
1142 }
1143
1144 netfs_stat(&netfs_n_rh_write_begin);
1145 trace_netfs_read(rreq, pos, len, netfs_read_trace_write_begin);
1146
1147
1148
1149
1150 ractl._nr_pages = thp_nr_pages(page);
1151 netfs_rreq_expand(rreq, &ractl);
1152 netfs_get_read_request(rreq);
1153
1154
1155 while ((xpage = readahead_page(&ractl)))
1156 if (xpage != page)
1157 put_page(xpage);
1158
1159 atomic_set(&rreq->nr_rd_ops, 1);
1160 do {
1161 if (!netfs_rreq_submit_slice(rreq, &debug_index))
1162 break;
1163
1164 } while (rreq->submitted < rreq->len);
1165
1166
1167
1168
1169
1170 for (;;) {
1171 wait_var_event(&rreq->nr_rd_ops, atomic_read(&rreq->nr_rd_ops) == 1);
1172 netfs_rreq_assess(rreq, false);
1173 if (!test_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags))
1174 break;
1175 cond_resched();
1176 }
1177
1178 ret = rreq->error;
1179 if (ret == 0 && rreq->submitted < rreq->len) {
1180 trace_netfs_failure(rreq, NULL, ret, netfs_fail_short_write_begin);
1181 ret = -EIO;
1182 }
1183 netfs_put_read_request(rreq, false);
1184 if (ret < 0)
1185 goto error;
1186
1187have_page:
1188 ret = wait_on_page_fscache_killable(page);
1189 if (ret < 0)
1190 goto error;
1191have_page_no_wait:
1192 if (netfs_priv)
1193 ops->cleanup(netfs_priv, mapping);
1194 *_page = page;
1195 _leave(" = 0");
1196 return 0;
1197
1198error_put:
1199 netfs_put_read_request(rreq, false);
1200error:
1201 unlock_page(page);
1202 put_page(page);
1203 if (netfs_priv)
1204 ops->cleanup(netfs_priv, mapping);
1205 _leave(" = %d", ret);
1206 return ret;
1207}
1208EXPORT_SYMBOL(netfs_write_begin);
1209