1
2
3
4
5
6
7
8
9
10
11
12#define FSCACHE_DEBUG_LEVEL PAGE
13#include <linux/module.h>
14#include <linux/fscache-cache.h>
15#include <linux/buffer_head.h>
16#include <linux/pagevec.h>
17#include <linux/slab.h>
18#include "internal.h"
19
20
21
22
23bool __fscache_check_page_write(struct fscache_cookie *cookie, struct page *page)
24{
25 void *val;
26
27 rcu_read_lock();
28 val = radix_tree_lookup(&cookie->stores, page->index);
29 rcu_read_unlock();
30
31 return val != NULL;
32}
33EXPORT_SYMBOL(__fscache_check_page_write);
34
35
36
37
38void __fscache_wait_on_page_write(struct fscache_cookie *cookie, struct page *page)
39{
40 wait_queue_head_t *wq = bit_waitqueue(&cookie->flags, 0);
41
42 wait_event(*wq, !__fscache_check_page_write(cookie, page));
43}
44EXPORT_SYMBOL(__fscache_wait_on_page_write);
45
46
47
48
49
50static
51bool release_page_wait_timeout(struct fscache_cookie *cookie, struct page *page)
52{
53 wait_queue_head_t *wq = bit_waitqueue(&cookie->flags, 0);
54
55 return wait_event_timeout(*wq, !__fscache_check_page_write(cookie, page),
56 HZ);
57}
58
59
60
61
62
63bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
64 struct page *page,
65 gfp_t gfp)
66{
67 struct page *xpage;
68 void *val;
69
70 _enter("%p,%p,%x", cookie, page, gfp);
71
72try_again:
73 rcu_read_lock();
74 val = radix_tree_lookup(&cookie->stores, page->index);
75 if (!val) {
76 rcu_read_unlock();
77 fscache_stat(&fscache_n_store_vmscan_not_storing);
78 __fscache_uncache_page(cookie, page);
79 return true;
80 }
81
82
83
84 if (radix_tree_tag_get(&cookie->stores, page->index,
85 FSCACHE_COOKIE_STORING_TAG)) {
86 rcu_read_unlock();
87 goto page_busy;
88 }
89
90
91
92 spin_lock(&cookie->stores_lock);
93 rcu_read_unlock();
94
95 if (radix_tree_tag_get(&cookie->stores, page->index,
96 FSCACHE_COOKIE_STORING_TAG)) {
97
98
99 spin_unlock(&cookie->stores_lock);
100 goto page_busy;
101 }
102
103 xpage = radix_tree_delete(&cookie->stores, page->index);
104 spin_unlock(&cookie->stores_lock);
105
106 if (xpage) {
107 fscache_stat(&fscache_n_store_vmscan_cancelled);
108 fscache_stat(&fscache_n_store_radix_deletes);
109 ASSERTCMP(xpage, ==, page);
110 } else {
111 fscache_stat(&fscache_n_store_vmscan_gone);
112 }
113
114 wake_up_bit(&cookie->flags, 0);
115 if (xpage)
116 put_page(xpage);
117 __fscache_uncache_page(cookie, page);
118 return true;
119
120page_busy:
121
122
123
124
125 if (!(gfp & __GFP_DIRECT_RECLAIM) || !(gfp & __GFP_FS)) {
126 fscache_stat(&fscache_n_store_vmscan_busy);
127 return false;
128 }
129
130 fscache_stat(&fscache_n_store_vmscan_wait);
131 if (!release_page_wait_timeout(cookie, page))
132 _debug("fscache writeout timeout page: %p{%lx}",
133 page, page->index);
134
135 gfp &= ~__GFP_DIRECT_RECLAIM;
136 goto try_again;
137}
138EXPORT_SYMBOL(__fscache_maybe_release_page);
139
140
141
142
143static void fscache_end_page_write(struct fscache_object *object,
144 struct page *page)
145{
146 struct fscache_cookie *cookie;
147 struct page *xpage = NULL;
148
149 spin_lock(&object->lock);
150 cookie = object->cookie;
151 if (cookie) {
152
153
154 spin_lock(&cookie->stores_lock);
155 radix_tree_tag_clear(&cookie->stores, page->index,
156 FSCACHE_COOKIE_STORING_TAG);
157 if (!radix_tree_tag_get(&cookie->stores, page->index,
158 FSCACHE_COOKIE_PENDING_TAG)) {
159 fscache_stat(&fscache_n_store_radix_deletes);
160 xpage = radix_tree_delete(&cookie->stores, page->index);
161 }
162 spin_unlock(&cookie->stores_lock);
163 wake_up_bit(&cookie->flags, 0);
164 }
165 spin_unlock(&object->lock);
166 if (xpage)
167 put_page(xpage);
168}
169
170
171
172
173static void fscache_attr_changed_op(struct fscache_operation *op)
174{
175 struct fscache_object *object = op->object;
176 int ret;
177
178 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
179
180 fscache_stat(&fscache_n_attr_changed_calls);
181
182 if (fscache_object_is_active(object)) {
183 fscache_stat(&fscache_n_cop_attr_changed);
184 ret = object->cache->ops->attr_changed(object);
185 fscache_stat_d(&fscache_n_cop_attr_changed);
186 if (ret < 0)
187 fscache_abort_object(object);
188 }
189
190 fscache_op_complete(op, true);
191 _leave("");
192}
193
194
195
196
197int __fscache_attr_changed(struct fscache_cookie *cookie)
198{
199 struct fscache_operation *op;
200 struct fscache_object *object;
201 bool wake_cookie = false;
202
203 _enter("%p", cookie);
204
205 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
206
207 fscache_stat(&fscache_n_attr_changed);
208
209 op = kzalloc(sizeof(*op), GFP_KERNEL);
210 if (!op) {
211 fscache_stat(&fscache_n_attr_changed_nomem);
212 _leave(" = -ENOMEM");
213 return -ENOMEM;
214 }
215
216 fscache_operation_init(op, fscache_attr_changed_op, NULL, NULL);
217 op->flags = FSCACHE_OP_ASYNC |
218 (1 << FSCACHE_OP_EXCLUSIVE) |
219 (1 << FSCACHE_OP_UNUSE_COOKIE);
220
221 spin_lock(&cookie->lock);
222
223 if (!fscache_cookie_enabled(cookie) ||
224 hlist_empty(&cookie->backing_objects))
225 goto nobufs;
226 object = hlist_entry(cookie->backing_objects.first,
227 struct fscache_object, cookie_link);
228
229 __fscache_use_cookie(cookie);
230 if (fscache_submit_exclusive_op(object, op) < 0)
231 goto nobufs_dec;
232 spin_unlock(&cookie->lock);
233 fscache_stat(&fscache_n_attr_changed_ok);
234 fscache_put_operation(op);
235 _leave(" = 0");
236 return 0;
237
238nobufs_dec:
239 wake_cookie = __fscache_unuse_cookie(cookie);
240nobufs:
241 spin_unlock(&cookie->lock);
242 fscache_put_operation(op);
243 if (wake_cookie)
244 __fscache_wake_unused_cookie(cookie);
245 fscache_stat(&fscache_n_attr_changed_nobufs);
246 _leave(" = %d", -ENOBUFS);
247 return -ENOBUFS;
248}
249EXPORT_SYMBOL(__fscache_attr_changed);
250
251
252
253
254static void fscache_do_cancel_retrieval(struct fscache_operation *_op)
255{
256 struct fscache_retrieval *op =
257 container_of(_op, struct fscache_retrieval, op);
258
259 atomic_set(&op->n_pages, 0);
260}
261
262
263
264
265static void fscache_release_retrieval_op(struct fscache_operation *_op)
266{
267 struct fscache_retrieval *op =
268 container_of(_op, struct fscache_retrieval, op);
269
270 _enter("{OP%x}", op->op.debug_id);
271
272 ASSERTIFCMP(op->op.state != FSCACHE_OP_ST_INITIALISED,
273 atomic_read(&op->n_pages), ==, 0);
274
275 fscache_hist(fscache_retrieval_histogram, op->start_time);
276 if (op->context)
277 fscache_put_context(op->cookie, op->context);
278
279 _leave("");
280}
281
282
283
284
285static struct fscache_retrieval *fscache_alloc_retrieval(
286 struct fscache_cookie *cookie,
287 struct address_space *mapping,
288 fscache_rw_complete_t end_io_func,
289 void *context)
290{
291 struct fscache_retrieval *op;
292
293
294 op = kzalloc(sizeof(*op), GFP_NOIO);
295 if (!op) {
296 fscache_stat(&fscache_n_retrievals_nomem);
297 return NULL;
298 }
299
300 fscache_operation_init(&op->op, NULL,
301 fscache_do_cancel_retrieval,
302 fscache_release_retrieval_op);
303 op->op.flags = FSCACHE_OP_MYTHREAD |
304 (1UL << FSCACHE_OP_WAITING) |
305 (1UL << FSCACHE_OP_UNUSE_COOKIE);
306 op->cookie = cookie;
307 op->mapping = mapping;
308 op->end_io_func = end_io_func;
309 op->context = context;
310 op->start_time = jiffies;
311 INIT_LIST_HEAD(&op->to_do);
312
313
314
315
316 if (context)
317 fscache_get_context(op->cookie, context);
318 return op;
319}
320
321
322
323
324int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
325{
326 unsigned long jif;
327
328 _enter("");
329
330 if (!test_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags)) {
331 _leave(" = 0 [imm]");
332 return 0;
333 }
334
335 fscache_stat(&fscache_n_retrievals_wait);
336
337 jif = jiffies;
338 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
339 TASK_INTERRUPTIBLE) != 0) {
340 fscache_stat(&fscache_n_retrievals_intr);
341 _leave(" = -ERESTARTSYS");
342 return -ERESTARTSYS;
343 }
344
345 ASSERT(!test_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags));
346
347 smp_rmb();
348 fscache_hist(fscache_retrieval_delay_histogram, jif);
349 _leave(" = 0 [dly]");
350 return 0;
351}
352
353
354
355
356int fscache_wait_for_operation_activation(struct fscache_object *object,
357 struct fscache_operation *op,
358 atomic_t *stat_op_waits,
359 atomic_t *stat_object_dead)
360{
361 int ret;
362
363 if (!test_bit(FSCACHE_OP_WAITING, &op->flags))
364 goto check_if_dead;
365
366 _debug(">>> WT");
367 if (stat_op_waits)
368 fscache_stat(stat_op_waits);
369 if (wait_on_bit(&op->flags, FSCACHE_OP_WAITING,
370 TASK_INTERRUPTIBLE) != 0) {
371 ret = fscache_cancel_op(op, false);
372 if (ret == 0)
373 return -ERESTARTSYS;
374
375
376
377 wait_on_bit(&op->flags, FSCACHE_OP_WAITING,
378 TASK_UNINTERRUPTIBLE);
379 }
380 _debug("<<< GO");
381
382check_if_dead:
383 if (op->state == FSCACHE_OP_ST_CANCELLED) {
384 if (stat_object_dead)
385 fscache_stat(stat_object_dead);
386 _leave(" = -ENOBUFS [cancelled]");
387 return -ENOBUFS;
388 }
389 if (unlikely(fscache_object_is_dying(object) ||
390 fscache_cache_is_broken(object))) {
391 enum fscache_operation_state state = op->state;
392 fscache_cancel_op(op, true);
393 if (stat_object_dead)
394 fscache_stat(stat_object_dead);
395 _leave(" = -ENOBUFS [obj dead %d]", state);
396 return -ENOBUFS;
397 }
398 return 0;
399}
400
401
402
403
404
405
406
407
408
409
410int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
411 struct page *page,
412 fscache_rw_complete_t end_io_func,
413 void *context,
414 gfp_t gfp)
415{
416 struct fscache_retrieval *op;
417 struct fscache_object *object;
418 bool wake_cookie = false;
419 int ret;
420
421 _enter("%p,%p,,,", cookie, page);
422
423 fscache_stat(&fscache_n_retrievals);
424
425 if (hlist_empty(&cookie->backing_objects))
426 goto nobufs;
427
428 if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
429 _leave(" = -ENOBUFS [invalidating]");
430 return -ENOBUFS;
431 }
432
433 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
434 ASSERTCMP(page, !=, NULL);
435
436 if (fscache_wait_for_deferred_lookup(cookie) < 0)
437 return -ERESTARTSYS;
438
439 op = fscache_alloc_retrieval(cookie, page->mapping,
440 end_io_func, context);
441 if (!op) {
442 _leave(" = -ENOMEM");
443 return -ENOMEM;
444 }
445 atomic_set(&op->n_pages, 1);
446
447 spin_lock(&cookie->lock);
448
449 if (!fscache_cookie_enabled(cookie) ||
450 hlist_empty(&cookie->backing_objects))
451 goto nobufs_unlock;
452 object = hlist_entry(cookie->backing_objects.first,
453 struct fscache_object, cookie_link);
454
455 ASSERT(test_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags));
456
457 __fscache_use_cookie(cookie);
458 atomic_inc(&object->n_reads);
459 __set_bit(FSCACHE_OP_DEC_READ_CNT, &op->op.flags);
460
461 if (fscache_submit_op(object, &op->op) < 0)
462 goto nobufs_unlock_dec;
463 spin_unlock(&cookie->lock);
464
465 fscache_stat(&fscache_n_retrieval_ops);
466
467
468
469 ret = fscache_wait_for_operation_activation(
470 object, &op->op,
471 __fscache_stat(&fscache_n_retrieval_op_waits),
472 __fscache_stat(&fscache_n_retrievals_object_dead));
473 if (ret < 0)
474 goto error;
475
476
477 if (test_bit(FSCACHE_COOKIE_NO_DATA_YET, &object->cookie->flags)) {
478 fscache_stat(&fscache_n_cop_allocate_page);
479 ret = object->cache->ops->allocate_page(op, page, gfp);
480 fscache_stat_d(&fscache_n_cop_allocate_page);
481 if (ret == 0)
482 ret = -ENODATA;
483 } else {
484 fscache_stat(&fscache_n_cop_read_or_alloc_page);
485 ret = object->cache->ops->read_or_alloc_page(op, page, gfp);
486 fscache_stat_d(&fscache_n_cop_read_or_alloc_page);
487 }
488
489error:
490 if (ret == -ENOMEM)
491 fscache_stat(&fscache_n_retrievals_nomem);
492 else if (ret == -ERESTARTSYS)
493 fscache_stat(&fscache_n_retrievals_intr);
494 else if (ret == -ENODATA)
495 fscache_stat(&fscache_n_retrievals_nodata);
496 else if (ret < 0)
497 fscache_stat(&fscache_n_retrievals_nobufs);
498 else
499 fscache_stat(&fscache_n_retrievals_ok);
500
501 fscache_put_retrieval(op);
502 _leave(" = %d", ret);
503 return ret;
504
505nobufs_unlock_dec:
506 atomic_dec(&object->n_reads);
507 wake_cookie = __fscache_unuse_cookie(cookie);
508nobufs_unlock:
509 spin_unlock(&cookie->lock);
510 if (wake_cookie)
511 __fscache_wake_unused_cookie(cookie);
512 fscache_put_retrieval(op);
513nobufs:
514 fscache_stat(&fscache_n_retrievals_nobufs);
515 _leave(" = -ENOBUFS");
516 return -ENOBUFS;
517}
518EXPORT_SYMBOL(__fscache_read_or_alloc_page);
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
539 struct address_space *mapping,
540 struct list_head *pages,
541 unsigned *nr_pages,
542 fscache_rw_complete_t end_io_func,
543 void *context,
544 gfp_t gfp)
545{
546 struct fscache_retrieval *op;
547 struct fscache_object *object;
548 bool wake_cookie = false;
549 int ret;
550
551 _enter("%p,,%d,,,", cookie, *nr_pages);
552
553 fscache_stat(&fscache_n_retrievals);
554
555 if (hlist_empty(&cookie->backing_objects))
556 goto nobufs;
557
558 if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
559 _leave(" = -ENOBUFS [invalidating]");
560 return -ENOBUFS;
561 }
562
563 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
564 ASSERTCMP(*nr_pages, >, 0);
565 ASSERT(!list_empty(pages));
566
567 if (fscache_wait_for_deferred_lookup(cookie) < 0)
568 return -ERESTARTSYS;
569
570 op = fscache_alloc_retrieval(cookie, mapping, end_io_func, context);
571 if (!op)
572 return -ENOMEM;
573 atomic_set(&op->n_pages, *nr_pages);
574
575 spin_lock(&cookie->lock);
576
577 if (!fscache_cookie_enabled(cookie) ||
578 hlist_empty(&cookie->backing_objects))
579 goto nobufs_unlock;
580 object = hlist_entry(cookie->backing_objects.first,
581 struct fscache_object, cookie_link);
582
583 __fscache_use_cookie(cookie);
584 atomic_inc(&object->n_reads);
585 __set_bit(FSCACHE_OP_DEC_READ_CNT, &op->op.flags);
586
587 if (fscache_submit_op(object, &op->op) < 0)
588 goto nobufs_unlock_dec;
589 spin_unlock(&cookie->lock);
590
591 fscache_stat(&fscache_n_retrieval_ops);
592
593
594
595 ret = fscache_wait_for_operation_activation(
596 object, &op->op,
597 __fscache_stat(&fscache_n_retrieval_op_waits),
598 __fscache_stat(&fscache_n_retrievals_object_dead));
599 if (ret < 0)
600 goto error;
601
602
603 if (test_bit(FSCACHE_COOKIE_NO_DATA_YET, &object->cookie->flags)) {
604 fscache_stat(&fscache_n_cop_allocate_pages);
605 ret = object->cache->ops->allocate_pages(
606 op, pages, nr_pages, gfp);
607 fscache_stat_d(&fscache_n_cop_allocate_pages);
608 } else {
609 fscache_stat(&fscache_n_cop_read_or_alloc_pages);
610 ret = object->cache->ops->read_or_alloc_pages(
611 op, pages, nr_pages, gfp);
612 fscache_stat_d(&fscache_n_cop_read_or_alloc_pages);
613 }
614
615error:
616 if (ret == -ENOMEM)
617 fscache_stat(&fscache_n_retrievals_nomem);
618 else if (ret == -ERESTARTSYS)
619 fscache_stat(&fscache_n_retrievals_intr);
620 else if (ret == -ENODATA)
621 fscache_stat(&fscache_n_retrievals_nodata);
622 else if (ret < 0)
623 fscache_stat(&fscache_n_retrievals_nobufs);
624 else
625 fscache_stat(&fscache_n_retrievals_ok);
626
627 fscache_put_retrieval(op);
628 _leave(" = %d", ret);
629 return ret;
630
631nobufs_unlock_dec:
632 atomic_dec(&object->n_reads);
633 wake_cookie = __fscache_unuse_cookie(cookie);
634nobufs_unlock:
635 spin_unlock(&cookie->lock);
636 fscache_put_retrieval(op);
637 if (wake_cookie)
638 __fscache_wake_unused_cookie(cookie);
639nobufs:
640 fscache_stat(&fscache_n_retrievals_nobufs);
641 _leave(" = -ENOBUFS");
642 return -ENOBUFS;
643}
644EXPORT_SYMBOL(__fscache_read_or_alloc_pages);
645
646
647
648
649
650
651
652
653
654int __fscache_alloc_page(struct fscache_cookie *cookie,
655 struct page *page,
656 gfp_t gfp)
657{
658 struct fscache_retrieval *op;
659 struct fscache_object *object;
660 bool wake_cookie = false;
661 int ret;
662
663 _enter("%p,%p,,,", cookie, page);
664
665 fscache_stat(&fscache_n_allocs);
666
667 if (hlist_empty(&cookie->backing_objects))
668 goto nobufs;
669
670 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
671 ASSERTCMP(page, !=, NULL);
672
673 if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
674 _leave(" = -ENOBUFS [invalidating]");
675 return -ENOBUFS;
676 }
677
678 if (fscache_wait_for_deferred_lookup(cookie) < 0)
679 return -ERESTARTSYS;
680
681 op = fscache_alloc_retrieval(cookie, page->mapping, NULL, NULL);
682 if (!op)
683 return -ENOMEM;
684 atomic_set(&op->n_pages, 1);
685
686 spin_lock(&cookie->lock);
687
688 if (!fscache_cookie_enabled(cookie) ||
689 hlist_empty(&cookie->backing_objects))
690 goto nobufs_unlock;
691 object = hlist_entry(cookie->backing_objects.first,
692 struct fscache_object, cookie_link);
693
694 __fscache_use_cookie(cookie);
695 if (fscache_submit_op(object, &op->op) < 0)
696 goto nobufs_unlock_dec;
697 spin_unlock(&cookie->lock);
698
699 fscache_stat(&fscache_n_alloc_ops);
700
701 ret = fscache_wait_for_operation_activation(
702 object, &op->op,
703 __fscache_stat(&fscache_n_alloc_op_waits),
704 __fscache_stat(&fscache_n_allocs_object_dead));
705 if (ret < 0)
706 goto error;
707
708
709 fscache_stat(&fscache_n_cop_allocate_page);
710 ret = object->cache->ops->allocate_page(op, page, gfp);
711 fscache_stat_d(&fscache_n_cop_allocate_page);
712
713error:
714 if (ret == -ERESTARTSYS)
715 fscache_stat(&fscache_n_allocs_intr);
716 else if (ret < 0)
717 fscache_stat(&fscache_n_allocs_nobufs);
718 else
719 fscache_stat(&fscache_n_allocs_ok);
720
721 fscache_put_retrieval(op);
722 _leave(" = %d", ret);
723 return ret;
724
725nobufs_unlock_dec:
726 wake_cookie = __fscache_unuse_cookie(cookie);
727nobufs_unlock:
728 spin_unlock(&cookie->lock);
729 fscache_put_retrieval(op);
730 if (wake_cookie)
731 __fscache_wake_unused_cookie(cookie);
732nobufs:
733 fscache_stat(&fscache_n_allocs_nobufs);
734 _leave(" = -ENOBUFS");
735 return -ENOBUFS;
736}
737EXPORT_SYMBOL(__fscache_alloc_page);
738
739
740
741
742
743void __fscache_readpages_cancel(struct fscache_cookie *cookie,
744 struct list_head *pages)
745{
746 struct page *page;
747
748 list_for_each_entry(page, pages, lru) {
749 if (PageFsCache(page))
750 __fscache_uncache_page(cookie, page);
751 }
752}
753EXPORT_SYMBOL(__fscache_readpages_cancel);
754
755
756
757
758static void fscache_release_write_op(struct fscache_operation *_op)
759{
760 _enter("{OP%x}", _op->debug_id);
761}
762
763
764
765
766static void fscache_write_op(struct fscache_operation *_op)
767{
768 struct fscache_storage *op =
769 container_of(_op, struct fscache_storage, op);
770 struct fscache_object *object = op->op.object;
771 struct fscache_cookie *cookie;
772 struct page *page;
773 unsigned n;
774 void *results[1];
775 int ret;
776
777 _enter("{OP%x,%d}", op->op.debug_id, atomic_read(&op->op.usage));
778
779 spin_lock(&object->lock);
780 cookie = object->cookie;
781
782 if (!fscache_object_is_active(object)) {
783
784
785
786 spin_unlock(&object->lock);
787 fscache_op_complete(&op->op, false);
788 _leave(" [inactive]");
789 return;
790 }
791
792 if (!cookie) {
793
794
795
796
797
798
799 spin_unlock(&object->lock);
800 fscache_op_complete(&op->op, false);
801 _leave(" [cancel] op{f=%lx s=%u} obj{s=%s f=%lx}",
802 _op->flags, _op->state, object->state->short_name,
803 object->flags);
804 return;
805 }
806
807 spin_lock(&cookie->stores_lock);
808
809 fscache_stat(&fscache_n_store_calls);
810
811
812 page = NULL;
813 n = radix_tree_gang_lookup_tag(&cookie->stores, results, 0, 1,
814 FSCACHE_COOKIE_PENDING_TAG);
815 if (n != 1)
816 goto superseded;
817 page = results[0];
818 _debug("gang %d [%lx]", n, page->index);
819 if (page->index >= op->store_limit) {
820 fscache_stat(&fscache_n_store_pages_over_limit);
821 goto superseded;
822 }
823
824 radix_tree_tag_set(&cookie->stores, page->index,
825 FSCACHE_COOKIE_STORING_TAG);
826 radix_tree_tag_clear(&cookie->stores, page->index,
827 FSCACHE_COOKIE_PENDING_TAG);
828
829 spin_unlock(&cookie->stores_lock);
830 spin_unlock(&object->lock);
831
832 fscache_stat(&fscache_n_store_pages);
833 fscache_stat(&fscache_n_cop_write_page);
834 ret = object->cache->ops->write_page(op, page);
835 fscache_stat_d(&fscache_n_cop_write_page);
836 fscache_end_page_write(object, page);
837 if (ret < 0) {
838 fscache_abort_object(object);
839 fscache_op_complete(&op->op, true);
840 } else {
841 fscache_enqueue_operation(&op->op);
842 }
843
844 _leave("");
845 return;
846
847superseded:
848
849
850 _debug("cease");
851 spin_unlock(&cookie->stores_lock);
852 clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
853 spin_unlock(&object->lock);
854 fscache_op_complete(&op->op, true);
855 _leave("");
856}
857
858
859
860
861void fscache_invalidate_writes(struct fscache_cookie *cookie)
862{
863 struct page *page;
864 void *results[16];
865 int n, i;
866
867 _enter("");
868
869 for (;;) {
870 spin_lock(&cookie->stores_lock);
871 n = radix_tree_gang_lookup_tag(&cookie->stores, results, 0,
872 ARRAY_SIZE(results),
873 FSCACHE_COOKIE_PENDING_TAG);
874 if (n == 0) {
875 spin_unlock(&cookie->stores_lock);
876 break;
877 }
878
879 for (i = n - 1; i >= 0; i--) {
880 page = results[i];
881 radix_tree_delete(&cookie->stores, page->index);
882 }
883
884 spin_unlock(&cookie->stores_lock);
885
886 for (i = n - 1; i >= 0; i--)
887 put_page(results[i]);
888 }
889
890 wake_up_bit(&cookie->flags, 0);
891
892 _leave("");
893}
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924int __fscache_write_page(struct fscache_cookie *cookie,
925 struct page *page,
926 gfp_t gfp)
927{
928 struct fscache_storage *op;
929 struct fscache_object *object;
930 bool wake_cookie = false;
931 int ret;
932
933 _enter("%p,%x,", cookie, (u32) page->flags);
934
935 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
936 ASSERT(PageFsCache(page));
937
938 fscache_stat(&fscache_n_stores);
939
940 if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
941 _leave(" = -ENOBUFS [invalidating]");
942 return -ENOBUFS;
943 }
944
945 op = kzalloc(sizeof(*op), GFP_NOIO | __GFP_NOMEMALLOC | __GFP_NORETRY);
946 if (!op)
947 goto nomem;
948
949 fscache_operation_init(&op->op, fscache_write_op, NULL,
950 fscache_release_write_op);
951 op->op.flags = FSCACHE_OP_ASYNC |
952 (1 << FSCACHE_OP_WAITING) |
953 (1 << FSCACHE_OP_UNUSE_COOKIE);
954
955 ret = radix_tree_maybe_preload(gfp & ~__GFP_HIGHMEM);
956 if (ret < 0)
957 goto nomem_free;
958
959 ret = -ENOBUFS;
960 spin_lock(&cookie->lock);
961
962 if (!fscache_cookie_enabled(cookie) ||
963 hlist_empty(&cookie->backing_objects))
964 goto nobufs;
965 object = hlist_entry(cookie->backing_objects.first,
966 struct fscache_object, cookie_link);
967 if (test_bit(FSCACHE_IOERROR, &object->cache->flags))
968 goto nobufs;
969
970
971
972 spin_lock(&object->lock);
973 spin_lock(&cookie->stores_lock);
974
975 _debug("store limit %llx", (unsigned long long) object->store_limit);
976
977 ret = radix_tree_insert(&cookie->stores, page->index, page);
978 if (ret < 0) {
979 if (ret == -EEXIST)
980 goto already_queued;
981 _debug("insert failed %d", ret);
982 goto nobufs_unlock_obj;
983 }
984
985 radix_tree_tag_set(&cookie->stores, page->index,
986 FSCACHE_COOKIE_PENDING_TAG);
987 get_page(page);
988
989
990
991 if (test_and_set_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags))
992 goto already_pending;
993
994 spin_unlock(&cookie->stores_lock);
995 spin_unlock(&object->lock);
996
997 op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
998 op->store_limit = object->store_limit;
999
1000 __fscache_use_cookie(cookie);
1001 if (fscache_submit_op(object, &op->op) < 0)
1002 goto submit_failed;
1003
1004 spin_unlock(&cookie->lock);
1005 radix_tree_preload_end();
1006 fscache_stat(&fscache_n_store_ops);
1007 fscache_stat(&fscache_n_stores_ok);
1008
1009
1010 fscache_put_operation(&op->op);
1011 _leave(" = 0");
1012 return 0;
1013
1014already_queued:
1015 fscache_stat(&fscache_n_stores_again);
1016already_pending:
1017 spin_unlock(&cookie->stores_lock);
1018 spin_unlock(&object->lock);
1019 spin_unlock(&cookie->lock);
1020 radix_tree_preload_end();
1021 fscache_put_operation(&op->op);
1022 fscache_stat(&fscache_n_stores_ok);
1023 _leave(" = 0");
1024 return 0;
1025
1026submit_failed:
1027 spin_lock(&cookie->stores_lock);
1028 radix_tree_delete(&cookie->stores, page->index);
1029 spin_unlock(&cookie->stores_lock);
1030 wake_cookie = __fscache_unuse_cookie(cookie);
1031 put_page(page);
1032 ret = -ENOBUFS;
1033 goto nobufs;
1034
1035nobufs_unlock_obj:
1036 spin_unlock(&cookie->stores_lock);
1037 spin_unlock(&object->lock);
1038nobufs:
1039 spin_unlock(&cookie->lock);
1040 radix_tree_preload_end();
1041 fscache_put_operation(&op->op);
1042 if (wake_cookie)
1043 __fscache_wake_unused_cookie(cookie);
1044 fscache_stat(&fscache_n_stores_nobufs);
1045 _leave(" = -ENOBUFS");
1046 return -ENOBUFS;
1047
1048nomem_free:
1049 fscache_put_operation(&op->op);
1050nomem:
1051 fscache_stat(&fscache_n_stores_oom);
1052 _leave(" = -ENOMEM");
1053 return -ENOMEM;
1054}
1055EXPORT_SYMBOL(__fscache_write_page);
1056
1057
1058
1059
1060void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
1061{
1062 struct fscache_object *object;
1063
1064 _enter(",%p", page);
1065
1066 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
1067 ASSERTCMP(page, !=, NULL);
1068
1069 fscache_stat(&fscache_n_uncaches);
1070
1071
1072 if (!PageFsCache(page))
1073 goto done;
1074
1075
1076 spin_lock(&cookie->lock);
1077
1078 if (hlist_empty(&cookie->backing_objects)) {
1079 ClearPageFsCache(page);
1080 goto done_unlock;
1081 }
1082
1083 object = hlist_entry(cookie->backing_objects.first,
1084 struct fscache_object, cookie_link);
1085
1086
1087 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
1088
1089
1090
1091 if (TestClearPageFsCache(page) &&
1092 object->cache->ops->uncache_page) {
1093
1094 fscache_stat(&fscache_n_cop_uncache_page);
1095 object->cache->ops->uncache_page(object, page);
1096 fscache_stat_d(&fscache_n_cop_uncache_page);
1097 goto done;
1098 }
1099
1100done_unlock:
1101 spin_unlock(&cookie->lock);
1102done:
1103 _leave("");
1104}
1105EXPORT_SYMBOL(__fscache_uncache_page);
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115void fscache_mark_page_cached(struct fscache_retrieval *op, struct page *page)
1116{
1117 struct fscache_cookie *cookie = op->op.object->cookie;
1118
1119#ifdef CONFIG_FSCACHE_STATS
1120 atomic_inc(&fscache_n_marks);
1121#endif
1122
1123 _debug("- mark %p{%lx}", page, page->index);
1124 if (TestSetPageFsCache(page)) {
1125 static bool once_only;
1126 if (!once_only) {
1127 once_only = true;
1128 pr_warn("Cookie type %s marked page %lx multiple times\n",
1129 cookie->def->name, page->index);
1130 }
1131 }
1132
1133 if (cookie->def->mark_page_cached)
1134 cookie->def->mark_page_cached(cookie->netfs_data,
1135 op->mapping, page);
1136}
1137EXPORT_SYMBOL(fscache_mark_page_cached);
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147void fscache_mark_pages_cached(struct fscache_retrieval *op,
1148 struct pagevec *pagevec)
1149{
1150 unsigned long loop;
1151
1152 for (loop = 0; loop < pagevec->nr; loop++)
1153 fscache_mark_page_cached(op, pagevec->pages[loop]);
1154
1155 pagevec_reinit(pagevec);
1156}
1157EXPORT_SYMBOL(fscache_mark_pages_cached);
1158
1159
1160
1161
1162
1163void __fscache_uncache_all_inode_pages(struct fscache_cookie *cookie,
1164 struct inode *inode)
1165{
1166 struct address_space *mapping = inode->i_mapping;
1167 struct pagevec pvec;
1168 pgoff_t next;
1169 int i;
1170
1171 _enter("%p,%p", cookie, inode);
1172
1173 if (!mapping || mapping->nrpages == 0) {
1174 _leave(" [no pages]");
1175 return;
1176 }
1177
1178 pagevec_init(&pvec, 0);
1179 next = 0;
1180 do {
1181 if (!pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE))
1182 break;
1183 for (i = 0; i < pagevec_count(&pvec); i++) {
1184 struct page *page = pvec.pages[i];
1185 next = page->index;
1186 if (PageFsCache(page)) {
1187 __fscache_wait_on_page_write(cookie, page);
1188 __fscache_uncache_page(cookie, page);
1189 }
1190 }
1191 pagevec_release(&pvec);
1192 cond_resched();
1193 } while (++next);
1194
1195 _leave("");
1196}
1197EXPORT_SYMBOL(__fscache_uncache_all_inode_pages);
1198