1
2
3
4
5
6
7
8
9
10
11
12#define FSCACHE_DEBUG_LEVEL PAGE
13#include <linux/module.h>
14#include <linux/fscache-cache.h>
15#include <linux/buffer_head.h>
16#include <linux/pagevec.h>
17#include <linux/slab.h>
18#include "internal.h"
19
20
21
22
23bool __fscache_check_page_write(struct fscache_cookie *cookie, struct page *page)
24{
25 void *val;
26
27 rcu_read_lock();
28 val = radix_tree_lookup(&cookie->stores, page->index);
29 rcu_read_unlock();
30
31 return val != NULL;
32}
33EXPORT_SYMBOL(__fscache_check_page_write);
34
35
36
37
38void __fscache_wait_on_page_write(struct fscache_cookie *cookie, struct page *page)
39{
40 wait_queue_head_t *wq = bit_waitqueue(&cookie->flags, 0);
41
42 wait_event(*wq, !__fscache_check_page_write(cookie, page));
43}
44EXPORT_SYMBOL(__fscache_wait_on_page_write);
45
46
47
48
49
50static
51bool release_page_wait_timeout(struct fscache_cookie *cookie, struct page *page)
52{
53 wait_queue_head_t *wq = bit_waitqueue(&cookie->flags, 0);
54
55 return wait_event_timeout(*wq, !__fscache_check_page_write(cookie, page),
56 HZ);
57}
58
59
60
61
62
63bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
64 struct page *page,
65 gfp_t gfp)
66{
67 struct page *xpage;
68 void *val;
69
70 _enter("%p,%p,%x", cookie, page, gfp);
71
72try_again:
73 rcu_read_lock();
74 val = radix_tree_lookup(&cookie->stores, page->index);
75 if (!val) {
76 rcu_read_unlock();
77 fscache_stat(&fscache_n_store_vmscan_not_storing);
78 __fscache_uncache_page(cookie, page);
79 return true;
80 }
81
82
83
84 if (radix_tree_tag_get(&cookie->stores, page->index,
85 FSCACHE_COOKIE_STORING_TAG)) {
86 rcu_read_unlock();
87 goto page_busy;
88 }
89
90
91
92 spin_lock(&cookie->stores_lock);
93 rcu_read_unlock();
94
95 if (radix_tree_tag_get(&cookie->stores, page->index,
96 FSCACHE_COOKIE_STORING_TAG)) {
97
98
99 spin_unlock(&cookie->stores_lock);
100 goto page_busy;
101 }
102
103 xpage = radix_tree_delete(&cookie->stores, page->index);
104 spin_unlock(&cookie->stores_lock);
105
106 if (xpage) {
107 fscache_stat(&fscache_n_store_vmscan_cancelled);
108 fscache_stat(&fscache_n_store_radix_deletes);
109 ASSERTCMP(xpage, ==, page);
110 } else {
111 fscache_stat(&fscache_n_store_vmscan_gone);
112 }
113
114 wake_up_bit(&cookie->flags, 0);
115 if (xpage)
116 page_cache_release(xpage);
117 __fscache_uncache_page(cookie, page);
118 return true;
119
120page_busy:
121
122
123
124
125 if (!(gfp & __GFP_DIRECT_RECLAIM) || !(gfp & __GFP_FS)) {
126 fscache_stat(&fscache_n_store_vmscan_busy);
127 return false;
128 }
129
130 fscache_stat(&fscache_n_store_vmscan_wait);
131 if (!release_page_wait_timeout(cookie, page))
132 _debug("fscache writeout timeout page: %p{%lx}",
133 page, page->index);
134
135 gfp &= ~__GFP_DIRECT_RECLAIM;
136 goto try_again;
137}
138EXPORT_SYMBOL(__fscache_maybe_release_page);
139
140
141
142
143static void fscache_end_page_write(struct fscache_object *object,
144 struct page *page)
145{
146 struct fscache_cookie *cookie;
147 struct page *xpage = NULL;
148
149 spin_lock(&object->lock);
150 cookie = object->cookie;
151 if (cookie) {
152
153
154 spin_lock(&cookie->stores_lock);
155 radix_tree_tag_clear(&cookie->stores, page->index,
156 FSCACHE_COOKIE_STORING_TAG);
157 if (!radix_tree_tag_get(&cookie->stores, page->index,
158 FSCACHE_COOKIE_PENDING_TAG)) {
159 fscache_stat(&fscache_n_store_radix_deletes);
160 xpage = radix_tree_delete(&cookie->stores, page->index);
161 }
162 spin_unlock(&cookie->stores_lock);
163 wake_up_bit(&cookie->flags, 0);
164 }
165 spin_unlock(&object->lock);
166 if (xpage)
167 page_cache_release(xpage);
168}
169
170
171
172
173static void fscache_attr_changed_op(struct fscache_operation *op)
174{
175 struct fscache_object *object = op->object;
176 int ret;
177
178 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
179
180 fscache_stat(&fscache_n_attr_changed_calls);
181
182 if (fscache_object_is_active(object)) {
183 fscache_stat(&fscache_n_cop_attr_changed);
184 ret = object->cache->ops->attr_changed(object);
185 fscache_stat_d(&fscache_n_cop_attr_changed);
186 if (ret < 0)
187 fscache_abort_object(object);
188 }
189
190 fscache_op_complete(op, true);
191 _leave("");
192}
193
194
195
196
197int __fscache_attr_changed(struct fscache_cookie *cookie)
198{
199 struct fscache_operation *op;
200 struct fscache_object *object;
201 bool wake_cookie = false;
202
203 _enter("%p", cookie);
204
205 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
206
207 fscache_stat(&fscache_n_attr_changed);
208
209 op = kzalloc(sizeof(*op), GFP_KERNEL);
210 if (!op) {
211 fscache_stat(&fscache_n_attr_changed_nomem);
212 _leave(" = -ENOMEM");
213 return -ENOMEM;
214 }
215
216 fscache_operation_init(op, fscache_attr_changed_op, NULL, NULL);
217 op->flags = FSCACHE_OP_ASYNC |
218 (1 << FSCACHE_OP_EXCLUSIVE) |
219 (1 << FSCACHE_OP_UNUSE_COOKIE);
220
221 spin_lock(&cookie->lock);
222
223 if (!fscache_cookie_enabled(cookie) ||
224 hlist_empty(&cookie->backing_objects))
225 goto nobufs;
226 object = hlist_entry(cookie->backing_objects.first,
227 struct fscache_object, cookie_link);
228
229 __fscache_use_cookie(cookie);
230 if (fscache_submit_exclusive_op(object, op) < 0)
231 goto nobufs_dec;
232 spin_unlock(&cookie->lock);
233 fscache_stat(&fscache_n_attr_changed_ok);
234 fscache_put_operation(op);
235 _leave(" = 0");
236 return 0;
237
238nobufs_dec:
239 wake_cookie = __fscache_unuse_cookie(cookie);
240nobufs:
241 spin_unlock(&cookie->lock);
242 fscache_put_operation(op);
243 if (wake_cookie)
244 __fscache_wake_unused_cookie(cookie);
245 fscache_stat(&fscache_n_attr_changed_nobufs);
246 _leave(" = %d", -ENOBUFS);
247 return -ENOBUFS;
248}
249EXPORT_SYMBOL(__fscache_attr_changed);
250
251
252
253
254static void fscache_do_cancel_retrieval(struct fscache_operation *_op)
255{
256 struct fscache_retrieval *op =
257 container_of(_op, struct fscache_retrieval, op);
258
259 atomic_set(&op->n_pages, 0);
260}
261
262
263
264
265static void fscache_release_retrieval_op(struct fscache_operation *_op)
266{
267 struct fscache_retrieval *op =
268 container_of(_op, struct fscache_retrieval, op);
269
270 _enter("{OP%x}", op->op.debug_id);
271
272 ASSERTIFCMP(op->op.state != FSCACHE_OP_ST_INITIALISED,
273 atomic_read(&op->n_pages), ==, 0);
274
275 fscache_hist(fscache_retrieval_histogram, op->start_time);
276 if (op->context)
277 fscache_put_context(op->cookie, op->context);
278
279 _leave("");
280}
281
282
283
284
285static struct fscache_retrieval *fscache_alloc_retrieval(
286 struct fscache_cookie *cookie,
287 struct address_space *mapping,
288 fscache_rw_complete_t end_io_func,
289 void *context)
290{
291 struct fscache_retrieval *op;
292
293
294 op = kzalloc(sizeof(*op), GFP_NOIO);
295 if (!op) {
296 fscache_stat(&fscache_n_retrievals_nomem);
297 return NULL;
298 }
299
300 fscache_operation_init(&op->op, NULL,
301 fscache_do_cancel_retrieval,
302 fscache_release_retrieval_op);
303 op->op.flags = FSCACHE_OP_MYTHREAD |
304 (1UL << FSCACHE_OP_WAITING) |
305 (1UL << FSCACHE_OP_UNUSE_COOKIE);
306 op->cookie = cookie;
307 op->mapping = mapping;
308 op->end_io_func = end_io_func;
309 op->context = context;
310 op->start_time = jiffies;
311 INIT_LIST_HEAD(&op->to_do);
312
313
314
315
316 if (context)
317 fscache_get_context(op->cookie, context);
318 return op;
319}
320
321
322
323
324int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
325{
326 unsigned long jif;
327
328 _enter("");
329
330 if (!test_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags)) {
331 _leave(" = 0 [imm]");
332 return 0;
333 }
334
335 fscache_stat(&fscache_n_retrievals_wait);
336
337 jif = jiffies;
338 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
339 TASK_INTERRUPTIBLE) != 0) {
340 fscache_stat(&fscache_n_retrievals_intr);
341 _leave(" = -ERESTARTSYS");
342 return -ERESTARTSYS;
343 }
344
345 ASSERT(!test_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags));
346
347 smp_rmb();
348 fscache_hist(fscache_retrieval_delay_histogram, jif);
349 _leave(" = 0 [dly]");
350 return 0;
351}
352
353
354
355
356int fscache_wait_for_operation_activation(struct fscache_object *object,
357 struct fscache_operation *op,
358 atomic_t *stat_op_waits,
359 atomic_t *stat_object_dead)
360{
361 int ret;
362
363 if (!test_bit(FSCACHE_OP_WAITING, &op->flags))
364 goto check_if_dead;
365
366 _debug(">>> WT");
367 if (stat_op_waits)
368 fscache_stat(stat_op_waits);
369 if (wait_on_bit(&op->flags, FSCACHE_OP_WAITING,
370 TASK_INTERRUPTIBLE) != 0) {
371 ret = fscache_cancel_op(op, false);
372 if (ret == 0)
373 return -ERESTARTSYS;
374
375
376
377 wait_on_bit(&op->flags, FSCACHE_OP_WAITING,
378 TASK_UNINTERRUPTIBLE);
379 }
380 _debug("<<< GO");
381
382check_if_dead:
383 if (op->state == FSCACHE_OP_ST_CANCELLED) {
384 if (stat_object_dead)
385 fscache_stat(stat_object_dead);
386 _leave(" = -ENOBUFS [cancelled]");
387 return -ENOBUFS;
388 }
389 if (unlikely(fscache_object_is_dying(object) ||
390 fscache_cache_is_broken(object))) {
391 enum fscache_operation_state state = op->state;
392 fscache_cancel_op(op, true);
393 if (stat_object_dead)
394 fscache_stat(stat_object_dead);
395 _leave(" = -ENOBUFS [obj dead %d]", state);
396 return -ENOBUFS;
397 }
398 return 0;
399}
400
401
402
403
404
405
406
407
408
409
410int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
411 struct page *page,
412 fscache_rw_complete_t end_io_func,
413 void *context,
414 gfp_t gfp)
415{
416 struct fscache_retrieval *op;
417 struct fscache_object *object;
418 bool wake_cookie = false;
419 int ret;
420
421 _enter("%p,%p,,,", cookie, page);
422
423 fscache_stat(&fscache_n_retrievals);
424
425 if (hlist_empty(&cookie->backing_objects))
426 goto nobufs;
427
428 if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
429 _leave(" = -ENOBUFS [invalidating]");
430 return -ENOBUFS;
431 }
432
433 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
434 ASSERTCMP(page, !=, NULL);
435
436 if (fscache_wait_for_deferred_lookup(cookie) < 0)
437 return -ERESTARTSYS;
438
439 op = fscache_alloc_retrieval(cookie, page->mapping,
440 end_io_func, context);
441 if (!op) {
442 _leave(" = -ENOMEM");
443 return -ENOMEM;
444 }
445 atomic_set(&op->n_pages, 1);
446
447 spin_lock(&cookie->lock);
448
449 if (!fscache_cookie_enabled(cookie) ||
450 hlist_empty(&cookie->backing_objects))
451 goto nobufs_unlock;
452 object = hlist_entry(cookie->backing_objects.first,
453 struct fscache_object, cookie_link);
454
455 ASSERT(test_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags));
456
457 __fscache_use_cookie(cookie);
458 atomic_inc(&object->n_reads);
459 __set_bit(FSCACHE_OP_DEC_READ_CNT, &op->op.flags);
460
461 if (fscache_submit_op(object, &op->op) < 0)
462 goto nobufs_unlock_dec;
463 spin_unlock(&cookie->lock);
464
465 fscache_stat(&fscache_n_retrieval_ops);
466
467
468
469 ret = fscache_wait_for_operation_activation(
470 object, &op->op,
471 __fscache_stat(&fscache_n_retrieval_op_waits),
472 __fscache_stat(&fscache_n_retrievals_object_dead));
473 if (ret < 0)
474 goto error;
475
476
477 if (test_bit(FSCACHE_COOKIE_NO_DATA_YET, &object->cookie->flags)) {
478 fscache_stat(&fscache_n_cop_allocate_page);
479 ret = object->cache->ops->allocate_page(op, page, gfp);
480 fscache_stat_d(&fscache_n_cop_allocate_page);
481 if (ret == 0)
482 ret = -ENODATA;
483 } else {
484 fscache_stat(&fscache_n_cop_read_or_alloc_page);
485 ret = object->cache->ops->read_or_alloc_page(op, page, gfp);
486 fscache_stat_d(&fscache_n_cop_read_or_alloc_page);
487 }
488
489error:
490 if (ret == -ENOMEM)
491 fscache_stat(&fscache_n_retrievals_nomem);
492 else if (ret == -ERESTARTSYS)
493 fscache_stat(&fscache_n_retrievals_intr);
494 else if (ret == -ENODATA)
495 fscache_stat(&fscache_n_retrievals_nodata);
496 else if (ret < 0)
497 fscache_stat(&fscache_n_retrievals_nobufs);
498 else
499 fscache_stat(&fscache_n_retrievals_ok);
500
501 fscache_put_retrieval(op);
502 _leave(" = %d", ret);
503 return ret;
504
505nobufs_unlock_dec:
506 atomic_dec(&object->n_reads);
507 wake_cookie = __fscache_unuse_cookie(cookie);
508nobufs_unlock:
509 spin_unlock(&cookie->lock);
510 if (wake_cookie)
511 __fscache_wake_unused_cookie(cookie);
512 fscache_put_retrieval(op);
513nobufs:
514 fscache_stat(&fscache_n_retrievals_nobufs);
515 _leave(" = -ENOBUFS");
516 return -ENOBUFS;
517}
518EXPORT_SYMBOL(__fscache_read_or_alloc_page);
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
539 struct address_space *mapping,
540 struct list_head *pages,
541 unsigned *nr_pages,
542 fscache_rw_complete_t end_io_func,
543 void *context,
544 gfp_t gfp)
545{
546 struct fscache_retrieval *op;
547 struct fscache_object *object;
548 bool wake_cookie = false;
549 int ret;
550
551 _enter("%p,,%d,,,", cookie, *nr_pages);
552
553 fscache_stat(&fscache_n_retrievals);
554
555 if (hlist_empty(&cookie->backing_objects))
556 goto nobufs;
557
558 if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
559 _leave(" = -ENOBUFS [invalidating]");
560 return -ENOBUFS;
561 }
562
563 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
564 ASSERTCMP(*nr_pages, >, 0);
565 ASSERT(!list_empty(pages));
566
567 if (fscache_wait_for_deferred_lookup(cookie) < 0)
568 return -ERESTARTSYS;
569
570 op = fscache_alloc_retrieval(cookie, mapping, end_io_func, context);
571 if (!op)
572 return -ENOMEM;
573 atomic_set(&op->n_pages, *nr_pages);
574
575 spin_lock(&cookie->lock);
576
577 if (!fscache_cookie_enabled(cookie) ||
578 hlist_empty(&cookie->backing_objects))
579 goto nobufs_unlock;
580 object = hlist_entry(cookie->backing_objects.first,
581 struct fscache_object, cookie_link);
582
583 __fscache_use_cookie(cookie);
584 atomic_inc(&object->n_reads);
585 __set_bit(FSCACHE_OP_DEC_READ_CNT, &op->op.flags);
586
587 if (fscache_submit_op(object, &op->op) < 0)
588 goto nobufs_unlock_dec;
589 spin_unlock(&cookie->lock);
590
591 fscache_stat(&fscache_n_retrieval_ops);
592
593
594
595 ret = fscache_wait_for_operation_activation(
596 object, &op->op,
597 __fscache_stat(&fscache_n_retrieval_op_waits),
598 __fscache_stat(&fscache_n_retrievals_object_dead));
599 if (ret < 0)
600 goto error;
601
602
603 if (test_bit(FSCACHE_COOKIE_NO_DATA_YET, &object->cookie->flags)) {
604 fscache_stat(&fscache_n_cop_allocate_pages);
605 ret = object->cache->ops->allocate_pages(
606 op, pages, nr_pages, gfp);
607 fscache_stat_d(&fscache_n_cop_allocate_pages);
608 } else {
609 fscache_stat(&fscache_n_cop_read_or_alloc_pages);
610 ret = object->cache->ops->read_or_alloc_pages(
611 op, pages, nr_pages, gfp);
612 fscache_stat_d(&fscache_n_cop_read_or_alloc_pages);
613 }
614
615error:
616 if (ret == -ENOMEM)
617 fscache_stat(&fscache_n_retrievals_nomem);
618 else if (ret == -ERESTARTSYS)
619 fscache_stat(&fscache_n_retrievals_intr);
620 else if (ret == -ENODATA)
621 fscache_stat(&fscache_n_retrievals_nodata);
622 else if (ret < 0)
623 fscache_stat(&fscache_n_retrievals_nobufs);
624 else
625 fscache_stat(&fscache_n_retrievals_ok);
626
627 fscache_put_retrieval(op);
628 _leave(" = %d", ret);
629 return ret;
630
631nobufs_unlock_dec:
632 atomic_dec(&object->n_reads);
633 wake_cookie = __fscache_unuse_cookie(cookie);
634nobufs_unlock:
635 spin_unlock(&cookie->lock);
636 fscache_put_retrieval(op);
637 if (wake_cookie)
638 __fscache_wake_unused_cookie(cookie);
639nobufs:
640 fscache_stat(&fscache_n_retrievals_nobufs);
641 _leave(" = -ENOBUFS");
642 return -ENOBUFS;
643}
644EXPORT_SYMBOL(__fscache_read_or_alloc_pages);
645
646
647
648
649
650
651
652
653
654int __fscache_alloc_page(struct fscache_cookie *cookie,
655 struct page *page,
656 gfp_t gfp)
657{
658 struct fscache_retrieval *op;
659 struct fscache_object *object;
660 bool wake_cookie = false;
661 int ret;
662
663 _enter("%p,%p,,,", cookie, page);
664
665 fscache_stat(&fscache_n_allocs);
666
667 if (hlist_empty(&cookie->backing_objects))
668 goto nobufs;
669
670 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
671 ASSERTCMP(page, !=, NULL);
672
673 if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
674 _leave(" = -ENOBUFS [invalidating]");
675 return -ENOBUFS;
676 }
677
678 if (fscache_wait_for_deferred_lookup(cookie) < 0)
679 return -ERESTARTSYS;
680
681 op = fscache_alloc_retrieval(cookie, page->mapping, NULL, NULL);
682 if (!op)
683 return -ENOMEM;
684 atomic_set(&op->n_pages, 1);
685
686 spin_lock(&cookie->lock);
687
688 if (!fscache_cookie_enabled(cookie) ||
689 hlist_empty(&cookie->backing_objects))
690 goto nobufs_unlock;
691 object = hlist_entry(cookie->backing_objects.first,
692 struct fscache_object, cookie_link);
693
694 __fscache_use_cookie(cookie);
695 if (fscache_submit_op(object, &op->op) < 0)
696 goto nobufs_unlock_dec;
697 spin_unlock(&cookie->lock);
698
699 fscache_stat(&fscache_n_alloc_ops);
700
701 ret = fscache_wait_for_operation_activation(
702 object, &op->op,
703 __fscache_stat(&fscache_n_alloc_op_waits),
704 __fscache_stat(&fscache_n_allocs_object_dead));
705 if (ret < 0)
706 goto error;
707
708
709 fscache_stat(&fscache_n_cop_allocate_page);
710 ret = object->cache->ops->allocate_page(op, page, gfp);
711 fscache_stat_d(&fscache_n_cop_allocate_page);
712
713error:
714 if (ret == -ERESTARTSYS)
715 fscache_stat(&fscache_n_allocs_intr);
716 else if (ret < 0)
717 fscache_stat(&fscache_n_allocs_nobufs);
718 else
719 fscache_stat(&fscache_n_allocs_ok);
720
721 fscache_put_retrieval(op);
722 _leave(" = %d", ret);
723 return ret;
724
725nobufs_unlock_dec:
726 wake_cookie = __fscache_unuse_cookie(cookie);
727nobufs_unlock:
728 spin_unlock(&cookie->lock);
729 fscache_put_retrieval(op);
730 if (wake_cookie)
731 __fscache_wake_unused_cookie(cookie);
732nobufs:
733 fscache_stat(&fscache_n_allocs_nobufs);
734 _leave(" = -ENOBUFS");
735 return -ENOBUFS;
736}
737EXPORT_SYMBOL(__fscache_alloc_page);
738
739
740
741
742
743void __fscache_readpages_cancel(struct fscache_cookie *cookie,
744 struct list_head *pages)
745{
746 struct page *page;
747
748 list_for_each_entry(page, pages, lru) {
749 if (PageFsCache(page))
750 __fscache_uncache_page(cookie, page);
751 }
752}
753EXPORT_SYMBOL(__fscache_readpages_cancel);
754
755
756
757
758static void fscache_release_write_op(struct fscache_operation *_op)
759{
760 _enter("{OP%x}", _op->debug_id);
761}
762
763
764
765
766static void fscache_write_op(struct fscache_operation *_op)
767{
768 struct fscache_storage *op =
769 container_of(_op, struct fscache_storage, op);
770 struct fscache_object *object = op->op.object;
771 struct fscache_cookie *cookie;
772 struct page *page;
773 unsigned n;
774 void *results[1];
775 int ret;
776
777 _enter("{OP%x,%d}", op->op.debug_id, atomic_read(&op->op.usage));
778
779 spin_lock(&object->lock);
780 cookie = object->cookie;
781
782 if (!fscache_object_is_active(object)) {
783
784
785
786 spin_unlock(&object->lock);
787 fscache_op_complete(&op->op, false);
788 _leave(" [inactive]");
789 return;
790 }
791
792 if (!cookie) {
793
794
795
796
797
798
799 spin_unlock(&object->lock);
800 fscache_op_complete(&op->op, false);
801 _leave(" [cancel] op{f=%lx s=%u} obj{s=%s f=%lx}",
802 _op->flags, _op->state, object->state->short_name,
803 object->flags);
804 return;
805 }
806
807 spin_lock(&cookie->stores_lock);
808
809 fscache_stat(&fscache_n_store_calls);
810
811
812 page = NULL;
813 n = radix_tree_gang_lookup_tag(&cookie->stores, results, 0, 1,
814 FSCACHE_COOKIE_PENDING_TAG);
815 if (n != 1)
816 goto superseded;
817 page = results[0];
818 _debug("gang %d [%lx]", n, page->index);
819 if (page->index >= op->store_limit) {
820 fscache_stat(&fscache_n_store_pages_over_limit);
821 goto superseded;
822 }
823
824 radix_tree_tag_set(&cookie->stores, page->index,
825 FSCACHE_COOKIE_STORING_TAG);
826 radix_tree_tag_clear(&cookie->stores, page->index,
827 FSCACHE_COOKIE_PENDING_TAG);
828
829 spin_unlock(&cookie->stores_lock);
830 spin_unlock(&object->lock);
831
832 fscache_stat(&fscache_n_store_pages);
833 fscache_stat(&fscache_n_cop_write_page);
834 ret = object->cache->ops->write_page(op, page);
835 fscache_stat_d(&fscache_n_cop_write_page);
836 fscache_end_page_write(object, page);
837 if (ret < 0) {
838 fscache_abort_object(object);
839 fscache_op_complete(&op->op, true);
840 } else {
841 fscache_enqueue_operation(&op->op);
842 }
843
844 _leave("");
845 return;
846
847superseded:
848
849
850 _debug("cease");
851 spin_unlock(&cookie->stores_lock);
852 clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
853 spin_unlock(&object->lock);
854 fscache_op_complete(&op->op, true);
855 _leave("");
856}
857
858
859
860
861void fscache_invalidate_writes(struct fscache_cookie *cookie)
862{
863 struct page *page;
864 void *results[16];
865 int n, i;
866
867 _enter("");
868
869 for (;;) {
870 spin_lock(&cookie->stores_lock);
871 n = radix_tree_gang_lookup_tag(&cookie->stores, results, 0,
872 ARRAY_SIZE(results),
873 FSCACHE_COOKIE_PENDING_TAG);
874 if (n == 0) {
875 spin_unlock(&cookie->stores_lock);
876 break;
877 }
878
879 for (i = n - 1; i >= 0; i--) {
880 page = results[i];
881 radix_tree_delete(&cookie->stores, page->index);
882 }
883
884 spin_unlock(&cookie->stores_lock);
885
886 for (i = n - 1; i >= 0; i--)
887 page_cache_release(results[i]);
888 }
889
890 _leave("");
891}
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922int __fscache_write_page(struct fscache_cookie *cookie,
923 struct page *page,
924 gfp_t gfp)
925{
926 struct fscache_storage *op;
927 struct fscache_object *object;
928 bool wake_cookie = false;
929 int ret;
930
931 _enter("%p,%x,", cookie, (u32) page->flags);
932
933 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
934 ASSERT(PageFsCache(page));
935
936 fscache_stat(&fscache_n_stores);
937
938 if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
939 _leave(" = -ENOBUFS [invalidating]");
940 return -ENOBUFS;
941 }
942
943 op = kzalloc(sizeof(*op), GFP_NOIO | __GFP_NOMEMALLOC | __GFP_NORETRY);
944 if (!op)
945 goto nomem;
946
947 fscache_operation_init(&op->op, fscache_write_op, NULL,
948 fscache_release_write_op);
949 op->op.flags = FSCACHE_OP_ASYNC |
950 (1 << FSCACHE_OP_WAITING) |
951 (1 << FSCACHE_OP_UNUSE_COOKIE);
952
953 ret = radix_tree_maybe_preload(gfp & ~__GFP_HIGHMEM);
954 if (ret < 0)
955 goto nomem_free;
956
957 ret = -ENOBUFS;
958 spin_lock(&cookie->lock);
959
960 if (!fscache_cookie_enabled(cookie) ||
961 hlist_empty(&cookie->backing_objects))
962 goto nobufs;
963 object = hlist_entry(cookie->backing_objects.first,
964 struct fscache_object, cookie_link);
965 if (test_bit(FSCACHE_IOERROR, &object->cache->flags))
966 goto nobufs;
967
968
969
970 spin_lock(&object->lock);
971 spin_lock(&cookie->stores_lock);
972
973 _debug("store limit %llx", (unsigned long long) object->store_limit);
974
975 ret = radix_tree_insert(&cookie->stores, page->index, page);
976 if (ret < 0) {
977 if (ret == -EEXIST)
978 goto already_queued;
979 _debug("insert failed %d", ret);
980 goto nobufs_unlock_obj;
981 }
982
983 radix_tree_tag_set(&cookie->stores, page->index,
984 FSCACHE_COOKIE_PENDING_TAG);
985 page_cache_get(page);
986
987
988
989 if (test_and_set_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags))
990 goto already_pending;
991
992 spin_unlock(&cookie->stores_lock);
993 spin_unlock(&object->lock);
994
995 op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
996 op->store_limit = object->store_limit;
997
998 __fscache_use_cookie(cookie);
999 if (fscache_submit_op(object, &op->op) < 0)
1000 goto submit_failed;
1001
1002 spin_unlock(&cookie->lock);
1003 radix_tree_preload_end();
1004 fscache_stat(&fscache_n_store_ops);
1005 fscache_stat(&fscache_n_stores_ok);
1006
1007
1008 fscache_put_operation(&op->op);
1009 _leave(" = 0");
1010 return 0;
1011
1012already_queued:
1013 fscache_stat(&fscache_n_stores_again);
1014already_pending:
1015 spin_unlock(&cookie->stores_lock);
1016 spin_unlock(&object->lock);
1017 spin_unlock(&cookie->lock);
1018 radix_tree_preload_end();
1019 fscache_put_operation(&op->op);
1020 fscache_stat(&fscache_n_stores_ok);
1021 _leave(" = 0");
1022 return 0;
1023
1024submit_failed:
1025 spin_lock(&cookie->stores_lock);
1026 radix_tree_delete(&cookie->stores, page->index);
1027 spin_unlock(&cookie->stores_lock);
1028 wake_cookie = __fscache_unuse_cookie(cookie);
1029 page_cache_release(page);
1030 ret = -ENOBUFS;
1031 goto nobufs;
1032
1033nobufs_unlock_obj:
1034 spin_unlock(&cookie->stores_lock);
1035 spin_unlock(&object->lock);
1036nobufs:
1037 spin_unlock(&cookie->lock);
1038 radix_tree_preload_end();
1039 fscache_put_operation(&op->op);
1040 if (wake_cookie)
1041 __fscache_wake_unused_cookie(cookie);
1042 fscache_stat(&fscache_n_stores_nobufs);
1043 _leave(" = -ENOBUFS");
1044 return -ENOBUFS;
1045
1046nomem_free:
1047 fscache_put_operation(&op->op);
1048nomem:
1049 fscache_stat(&fscache_n_stores_oom);
1050 _leave(" = -ENOMEM");
1051 return -ENOMEM;
1052}
1053EXPORT_SYMBOL(__fscache_write_page);
1054
1055
1056
1057
1058void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
1059{
1060 struct fscache_object *object;
1061
1062 _enter(",%p", page);
1063
1064 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
1065 ASSERTCMP(page, !=, NULL);
1066
1067 fscache_stat(&fscache_n_uncaches);
1068
1069
1070 if (!PageFsCache(page))
1071 goto done;
1072
1073
1074 spin_lock(&cookie->lock);
1075
1076 if (hlist_empty(&cookie->backing_objects)) {
1077 ClearPageFsCache(page);
1078 goto done_unlock;
1079 }
1080
1081 object = hlist_entry(cookie->backing_objects.first,
1082 struct fscache_object, cookie_link);
1083
1084
1085 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
1086
1087
1088
1089 if (TestClearPageFsCache(page) &&
1090 object->cache->ops->uncache_page) {
1091
1092 fscache_stat(&fscache_n_cop_uncache_page);
1093 object->cache->ops->uncache_page(object, page);
1094 fscache_stat_d(&fscache_n_cop_uncache_page);
1095 goto done;
1096 }
1097
1098done_unlock:
1099 spin_unlock(&cookie->lock);
1100done:
1101 _leave("");
1102}
1103EXPORT_SYMBOL(__fscache_uncache_page);
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113void fscache_mark_page_cached(struct fscache_retrieval *op, struct page *page)
1114{
1115 struct fscache_cookie *cookie = op->op.object->cookie;
1116
1117#ifdef CONFIG_FSCACHE_STATS
1118 atomic_inc(&fscache_n_marks);
1119#endif
1120
1121 _debug("- mark %p{%lx}", page, page->index);
1122 if (TestSetPageFsCache(page)) {
1123 static bool once_only;
1124 if (!once_only) {
1125 once_only = true;
1126 pr_warn("Cookie type %s marked page %lx multiple times\n",
1127 cookie->def->name, page->index);
1128 }
1129 }
1130
1131 if (cookie->def->mark_page_cached)
1132 cookie->def->mark_page_cached(cookie->netfs_data,
1133 op->mapping, page);
1134}
1135EXPORT_SYMBOL(fscache_mark_page_cached);
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145void fscache_mark_pages_cached(struct fscache_retrieval *op,
1146 struct pagevec *pagevec)
1147{
1148 unsigned long loop;
1149
1150 for (loop = 0; loop < pagevec->nr; loop++)
1151 fscache_mark_page_cached(op, pagevec->pages[loop]);
1152
1153 pagevec_reinit(pagevec);
1154}
1155EXPORT_SYMBOL(fscache_mark_pages_cached);
1156
1157
1158
1159
1160
1161void __fscache_uncache_all_inode_pages(struct fscache_cookie *cookie,
1162 struct inode *inode)
1163{
1164 struct address_space *mapping = inode->i_mapping;
1165 struct pagevec pvec;
1166 pgoff_t next;
1167 int i;
1168
1169 _enter("%p,%p", cookie, inode);
1170
1171 if (!mapping || mapping->nrpages == 0) {
1172 _leave(" [no pages]");
1173 return;
1174 }
1175
1176 pagevec_init(&pvec, 0);
1177 next = 0;
1178 do {
1179 if (!pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE))
1180 break;
1181 for (i = 0; i < pagevec_count(&pvec); i++) {
1182 struct page *page = pvec.pages[i];
1183 next = page->index;
1184 if (PageFsCache(page)) {
1185 __fscache_wait_on_page_write(cookie, page);
1186 __fscache_uncache_page(cookie, page);
1187 }
1188 }
1189 pagevec_release(&pvec);
1190 cond_resched();
1191 } while (++next);
1192
1193 _leave("");
1194}
1195EXPORT_SYMBOL(__fscache_uncache_all_inode_pages);
1196