1
2
3
4
5
6
7
8
9
10
11
12#include <linux/mount.h>
13#include <linux/slab.h>
14#include <linux/file.h>
15#include <linux/swap.h>
16#include "internal.h"
17
18
19
20
21
22
23
24static int cachefiles_read_waiter(wait_queue_t *wait, unsigned mode,
25 int sync, void *_key)
26{
27 struct cachefiles_one_read *monitor =
28 container_of(wait, struct cachefiles_one_read, monitor);
29 struct cachefiles_object *object;
30 struct fscache_retrieval *op = monitor->op;
31 struct wait_bit_key *key = _key;
32 struct page *page = wait->private;
33
34 ASSERT(key);
35
36 _enter("{%lu},%u,%d,{%p,%u}",
37 monitor->netfs_page->index, mode, sync,
38 key->flags, key->bit_nr);
39
40 if (key->flags != &page->flags ||
41 key->bit_nr != PG_locked)
42 return 0;
43
44 _debug("--- monitor %p %lx ---", page, page->flags);
45
46 if (!PageUptodate(page) && !PageError(page)) {
47
48 _debug("page probably truncated");
49 }
50
51
52 list_del(&wait->task_list);
53
54
55 ASSERT(op);
56
57
58
59
60
61
62 fscache_get_retrieval(op);
63
64 object = container_of(op->op.object, struct cachefiles_object, fscache);
65 spin_lock(&object->work_lock);
66 list_add_tail(&monitor->op_link, &op->to_do);
67 spin_unlock(&object->work_lock);
68
69 fscache_enqueue_retrieval(op);
70 fscache_put_retrieval(op);
71 return 0;
72}
73
74
75
76
77
78
79
80
81static int cachefiles_read_reissue(struct cachefiles_object *object,
82 struct cachefiles_one_read *monitor)
83{
84 struct address_space *bmapping = object->backer->d_inode->i_mapping;
85 struct page *backpage = monitor->back_page, *backpage2;
86 int ret;
87
88 _enter("{ino=%lx},{%lx,%lx}",
89 object->backer->d_inode->i_ino,
90 backpage->index, backpage->flags);
91
92
93 if (backpage->mapping != bmapping) {
94 _leave(" = -ENODATA [mapping]");
95 return -ENODATA;
96 }
97
98 backpage2 = find_get_page(bmapping, backpage->index);
99 if (!backpage2) {
100 _leave(" = -ENODATA [gone]");
101 return -ENODATA;
102 }
103
104 if (backpage != backpage2) {
105 put_page(backpage2);
106 _leave(" = -ENODATA [different]");
107 return -ENODATA;
108 }
109
110
111
112 put_page(backpage2);
113
114 INIT_LIST_HEAD(&monitor->op_link);
115 add_page_wait_queue(backpage, &monitor->monitor);
116
117 if (trylock_page(backpage)) {
118 ret = -EIO;
119 if (PageError(backpage))
120 goto unlock_discard;
121 ret = 0;
122 if (PageUptodate(backpage))
123 goto unlock_discard;
124
125 _debug("reissue read");
126 ret = bmapping->a_ops->readpage(NULL, backpage);
127 if (ret < 0)
128 goto unlock_discard;
129 }
130
131
132
133
134 if (trylock_page(backpage)) {
135 _debug("jumpstart %p {%lx}", backpage, backpage->flags);
136 unlock_page(backpage);
137 }
138
139
140 _leave(" = -EINPROGRESS");
141 return -EINPROGRESS;
142
143unlock_discard:
144 unlock_page(backpage);
145 spin_lock_irq(&object->work_lock);
146 list_del(&monitor->op_link);
147 spin_unlock_irq(&object->work_lock);
148 _leave(" = %d", ret);
149 return ret;
150}
151
152
153
154
155
156static void cachefiles_read_copier(struct fscache_operation *_op)
157{
158 struct cachefiles_one_read *monitor;
159 struct cachefiles_object *object;
160 struct fscache_retrieval *op;
161 int error, max;
162
163 op = container_of(_op, struct fscache_retrieval, op);
164 object = container_of(op->op.object,
165 struct cachefiles_object, fscache);
166
167 _enter("{ino=%lu}", object->backer->d_inode->i_ino);
168
169 max = 8;
170 spin_lock_irq(&object->work_lock);
171
172 while (!list_empty(&op->to_do)) {
173 monitor = list_entry(op->to_do.next,
174 struct cachefiles_one_read, op_link);
175 list_del(&monitor->op_link);
176
177 spin_unlock_irq(&object->work_lock);
178
179 _debug("- copy {%lu}", monitor->back_page->index);
180
181 recheck:
182 if (test_bit(FSCACHE_COOKIE_INVALIDATING,
183 &object->fscache.cookie->flags)) {
184 error = -ESTALE;
185 } else if (PageUptodate(monitor->back_page)) {
186 copy_highpage(monitor->netfs_page, monitor->back_page);
187 fscache_mark_page_cached(monitor->op,
188 monitor->netfs_page);
189 error = 0;
190 } else if (!PageError(monitor->back_page)) {
191
192 error = cachefiles_read_reissue(object, monitor);
193 if (error == -EINPROGRESS)
194 goto next;
195 goto recheck;
196 } else {
197 cachefiles_io_error_obj(
198 object,
199 "Readpage failed on backing file %lx",
200 (unsigned long) monitor->back_page->flags);
201 error = -EIO;
202 }
203
204 page_cache_release(monitor->back_page);
205
206 fscache_end_io(op, monitor->netfs_page, error);
207 page_cache_release(monitor->netfs_page);
208 fscache_retrieval_complete(op, 1);
209 fscache_put_retrieval(op);
210 kfree(monitor);
211
212 next:
213
214 max--;
215 if (max < 0 || need_resched()) {
216 if (!list_empty(&op->to_do))
217 fscache_enqueue_retrieval(op);
218 _leave(" [maxed out]");
219 return;
220 }
221
222 spin_lock_irq(&object->work_lock);
223 }
224
225 spin_unlock_irq(&object->work_lock);
226 _leave("");
227}
228
229
230
231
232
233static int cachefiles_read_backing_file_one(struct cachefiles_object *object,
234 struct fscache_retrieval *op,
235 struct page *netpage)
236{
237 struct cachefiles_one_read *monitor;
238 struct address_space *bmapping;
239 struct page *newpage, *backpage;
240 int ret;
241
242 _enter("");
243
244 _debug("read back %p{%lu,%d}",
245 netpage, netpage->index, page_count(netpage));
246
247 monitor = kzalloc(sizeof(*monitor), cachefiles_gfp);
248 if (!monitor)
249 goto nomem;
250
251 monitor->netfs_page = netpage;
252 monitor->op = fscache_get_retrieval(op);
253
254 init_waitqueue_func_entry(&monitor->monitor, cachefiles_read_waiter);
255
256
257 bmapping = object->backer->d_inode->i_mapping;
258 newpage = NULL;
259
260 for (;;) {
261 backpage = find_get_page(bmapping, netpage->index);
262 if (backpage)
263 goto backing_page_already_present;
264
265 if (!newpage) {
266 newpage = __page_cache_alloc(cachefiles_gfp |
267 __GFP_COLD);
268 if (!newpage)
269 goto nomem_monitor;
270 }
271
272 ret = add_to_page_cache_lru(newpage, bmapping,
273 netpage->index, cachefiles_gfp);
274 if (ret == 0)
275 goto installed_new_backing_page;
276 if (ret != -EEXIST)
277 goto nomem_page;
278 }
279
280
281
282installed_new_backing_page:
283 _debug("- new %p", newpage);
284
285 backpage = newpage;
286 newpage = NULL;
287
288read_backing_page:
289 ret = bmapping->a_ops->readpage(NULL, backpage);
290 if (ret < 0)
291 goto read_error;
292
293
294monitor_backing_page:
295 _debug("- monitor add");
296
297
298 page_cache_get(monitor->netfs_page);
299 page_cache_get(backpage);
300 monitor->back_page = backpage;
301 monitor->monitor.private = backpage;
302 add_page_wait_queue(backpage, &monitor->monitor);
303 monitor = NULL;
304
305
306
307
308 if (trylock_page(backpage)) {
309 _debug("jumpstart %p {%lx}", backpage, backpage->flags);
310 unlock_page(backpage);
311 }
312 goto success;
313
314
315
316backing_page_already_present:
317 _debug("- present");
318
319 if (newpage) {
320 page_cache_release(newpage);
321 newpage = NULL;
322 }
323
324 if (PageError(backpage))
325 goto io_error;
326
327 if (PageUptodate(backpage))
328 goto backing_page_already_uptodate;
329
330 if (!trylock_page(backpage))
331 goto monitor_backing_page;
332 _debug("read %p {%lx}", backpage, backpage->flags);
333 goto read_backing_page;
334
335
336
337backing_page_already_uptodate:
338 _debug("- uptodate");
339
340 fscache_mark_page_cached(op, netpage);
341
342 copy_highpage(netpage, backpage);
343 fscache_end_io(op, netpage, 0);
344 fscache_retrieval_complete(op, 1);
345
346success:
347 _debug("success");
348 ret = 0;
349
350out:
351 if (backpage)
352 page_cache_release(backpage);
353 if (monitor) {
354 fscache_put_retrieval(monitor->op);
355 kfree(monitor);
356 }
357 _leave(" = %d", ret);
358 return ret;
359
360read_error:
361 _debug("read error %d", ret);
362 if (ret == -ENOMEM) {
363 fscache_retrieval_complete(op, 1);
364 goto out;
365 }
366io_error:
367 cachefiles_io_error_obj(object, "Page read error on backing file");
368 fscache_retrieval_complete(op, 1);
369 ret = -ENOBUFS;
370 goto out;
371
372nomem_page:
373 page_cache_release(newpage);
374nomem_monitor:
375 fscache_put_retrieval(monitor->op);
376 kfree(monitor);
377nomem:
378 fscache_retrieval_complete(op, 1);
379 _leave(" = -ENOMEM");
380 return -ENOMEM;
381}
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397int cachefiles_read_or_alloc_page(struct fscache_retrieval *op,
398 struct page *page,
399 gfp_t gfp)
400{
401 struct cachefiles_object *object;
402 struct cachefiles_cache *cache;
403 struct inode *inode;
404 sector_t block0, block;
405 unsigned shift;
406 int ret;
407
408 object = container_of(op->op.object,
409 struct cachefiles_object, fscache);
410 cache = container_of(object->fscache.cache,
411 struct cachefiles_cache, cache);
412
413 _enter("{%p},{%lx},,,", object, page->index);
414
415 if (!object->backer)
416 goto enobufs;
417
418 inode = object->backer->d_inode;
419 ASSERT(S_ISREG(inode->i_mode));
420 ASSERT(inode->i_mapping->a_ops->bmap);
421 ASSERT(inode->i_mapping->a_ops->readpages);
422
423
424 shift = PAGE_SHIFT - inode->i_sb->s_blocksize_bits;
425
426 op->op.flags &= FSCACHE_OP_KEEP_FLAGS;
427 op->op.flags |= FSCACHE_OP_ASYNC;
428 op->op.processor = cachefiles_read_copier;
429
430
431
432
433
434
435
436 block0 = page->index;
437 block0 <<= shift;
438
439 block = inode->i_mapping->a_ops->bmap(inode->i_mapping, block0);
440 _debug("%llx -> %llx",
441 (unsigned long long) block0,
442 (unsigned long long) block);
443
444 if (block) {
445
446
447 ret = cachefiles_read_backing_file_one(object, op, page);
448 } else if (cachefiles_has_space(cache, 0, 1) == 0) {
449
450 fscache_mark_page_cached(op, page);
451 fscache_retrieval_complete(op, 1);
452 ret = -ENODATA;
453 } else {
454 goto enobufs;
455 }
456
457 _leave(" = %d", ret);
458 return ret;
459
460enobufs:
461 fscache_retrieval_complete(op, 1);
462 _leave(" = -ENOBUFS");
463 return -ENOBUFS;
464}
465
466
467
468
469
470static int cachefiles_read_backing_file(struct cachefiles_object *object,
471 struct fscache_retrieval *op,
472 struct list_head *list)
473{
474 struct cachefiles_one_read *monitor = NULL;
475 struct address_space *bmapping = object->backer->d_inode->i_mapping;
476 struct page *newpage = NULL, *netpage, *_n, *backpage = NULL;
477 int ret = 0;
478
479 _enter("");
480
481 list_for_each_entry_safe(netpage, _n, list, lru) {
482 list_del(&netpage->lru);
483
484 _debug("read back %p{%lu,%d}",
485 netpage, netpage->index, page_count(netpage));
486
487 if (!monitor) {
488 monitor = kzalloc(sizeof(*monitor), cachefiles_gfp);
489 if (!monitor)
490 goto nomem;
491
492 monitor->op = fscache_get_retrieval(op);
493 init_waitqueue_func_entry(&monitor->monitor,
494 cachefiles_read_waiter);
495 }
496
497 for (;;) {
498 backpage = find_get_page(bmapping, netpage->index);
499 if (backpage)
500 goto backing_page_already_present;
501
502 if (!newpage) {
503 newpage = __page_cache_alloc(cachefiles_gfp |
504 __GFP_COLD);
505 if (!newpage)
506 goto nomem;
507 }
508
509 ret = add_to_page_cache_lru(newpage, bmapping,
510 netpage->index,
511 cachefiles_gfp);
512 if (ret == 0)
513 goto installed_new_backing_page;
514 if (ret != -EEXIST)
515 goto nomem;
516 }
517
518
519
520 installed_new_backing_page:
521 _debug("- new %p", newpage);
522
523 backpage = newpage;
524 newpage = NULL;
525
526 reread_backing_page:
527 ret = bmapping->a_ops->readpage(NULL, backpage);
528 if (ret < 0)
529 goto read_error;
530
531
532
533 monitor_backing_page:
534 _debug("- monitor add");
535
536 ret = add_to_page_cache_lru(netpage, op->mapping,
537 netpage->index, cachefiles_gfp);
538 if (ret < 0) {
539 if (ret == -EEXIST) {
540 page_cache_release(netpage);
541 fscache_retrieval_complete(op, 1);
542 continue;
543 }
544 goto nomem;
545 }
546
547
548 page_cache_get(netpage);
549 monitor->netfs_page = netpage;
550
551 page_cache_get(backpage);
552 monitor->back_page = backpage;
553 monitor->monitor.private = backpage;
554 add_page_wait_queue(backpage, &monitor->monitor);
555 monitor = NULL;
556
557
558
559
560 if (trylock_page(backpage)) {
561 _debug("2unlock %p {%lx}", backpage, backpage->flags);
562 unlock_page(backpage);
563 }
564
565 page_cache_release(backpage);
566 backpage = NULL;
567
568 page_cache_release(netpage);
569 netpage = NULL;
570 continue;
571
572
573
574 backing_page_already_present:
575 _debug("- present %p", backpage);
576
577 if (PageError(backpage))
578 goto io_error;
579
580 if (PageUptodate(backpage))
581 goto backing_page_already_uptodate;
582
583 _debug("- not ready %p{%lx}", backpage, backpage->flags);
584
585 if (!trylock_page(backpage))
586 goto monitor_backing_page;
587
588 if (PageError(backpage)) {
589 _debug("error %lx", backpage->flags);
590 unlock_page(backpage);
591 goto io_error;
592 }
593
594 if (PageUptodate(backpage))
595 goto backing_page_already_uptodate_unlock;
596
597
598
599 goto reread_backing_page;
600
601
602
603 backing_page_already_uptodate_unlock:
604 _debug("uptodate %lx", backpage->flags);
605 unlock_page(backpage);
606 backing_page_already_uptodate:
607 _debug("- uptodate");
608
609 ret = add_to_page_cache_lru(netpage, op->mapping,
610 netpage->index, cachefiles_gfp);
611 if (ret < 0) {
612 if (ret == -EEXIST) {
613 page_cache_release(netpage);
614 fscache_retrieval_complete(op, 1);
615 continue;
616 }
617 goto nomem;
618 }
619
620 copy_highpage(netpage, backpage);
621
622 page_cache_release(backpage);
623 backpage = NULL;
624
625 fscache_mark_page_cached(op, netpage);
626
627
628 fscache_end_io(op, netpage, 0);
629 page_cache_release(netpage);
630 netpage = NULL;
631 fscache_retrieval_complete(op, 1);
632 continue;
633 }
634
635 netpage = NULL;
636
637 _debug("out");
638
639out:
640 if (newpage)
641 page_cache_release(newpage);
642 if (netpage)
643 page_cache_release(netpage);
644 if (backpage)
645 page_cache_release(backpage);
646 if (monitor) {
647 fscache_put_retrieval(op);
648 kfree(monitor);
649 }
650
651 list_for_each_entry_safe(netpage, _n, list, lru) {
652 list_del(&netpage->lru);
653 page_cache_release(netpage);
654 fscache_retrieval_complete(op, 1);
655 }
656
657 _leave(" = %d", ret);
658 return ret;
659
660nomem:
661 _debug("nomem");
662 ret = -ENOMEM;
663 goto record_page_complete;
664
665read_error:
666 _debug("read error %d", ret);
667 if (ret == -ENOMEM)
668 goto record_page_complete;
669io_error:
670 cachefiles_io_error_obj(object, "Page read error on backing file");
671 ret = -ENOBUFS;
672record_page_complete:
673 fscache_retrieval_complete(op, 1);
674 goto out;
675}
676
677
678
679
680
681int cachefiles_read_or_alloc_pages(struct fscache_retrieval *op,
682 struct list_head *pages,
683 unsigned *nr_pages,
684 gfp_t gfp)
685{
686 struct cachefiles_object *object;
687 struct cachefiles_cache *cache;
688 struct list_head backpages;
689 struct pagevec pagevec;
690 struct inode *inode;
691 struct page *page, *_n;
692 unsigned shift, nrbackpages;
693 int ret, ret2, space;
694
695 object = container_of(op->op.object,
696 struct cachefiles_object, fscache);
697 cache = container_of(object->fscache.cache,
698 struct cachefiles_cache, cache);
699
700 _enter("{OBJ%x,%d},,%d,,",
701 object->fscache.debug_id, atomic_read(&op->op.usage),
702 *nr_pages);
703
704 if (!object->backer)
705 goto all_enobufs;
706
707 space = 1;
708 if (cachefiles_has_space(cache, 0, *nr_pages) < 0)
709 space = 0;
710
711 inode = object->backer->d_inode;
712 ASSERT(S_ISREG(inode->i_mode));
713 ASSERT(inode->i_mapping->a_ops->bmap);
714 ASSERT(inode->i_mapping->a_ops->readpages);
715
716
717 shift = PAGE_SHIFT - inode->i_sb->s_blocksize_bits;
718
719 pagevec_init(&pagevec, 0);
720
721 op->op.flags &= FSCACHE_OP_KEEP_FLAGS;
722 op->op.flags |= FSCACHE_OP_ASYNC;
723 op->op.processor = cachefiles_read_copier;
724
725 INIT_LIST_HEAD(&backpages);
726 nrbackpages = 0;
727
728 ret = space ? -ENODATA : -ENOBUFS;
729 list_for_each_entry_safe(page, _n, pages, lru) {
730 sector_t block0, block;
731
732
733
734
735
736
737
738 block0 = page->index;
739 block0 <<= shift;
740
741 block = inode->i_mapping->a_ops->bmap(inode->i_mapping,
742 block0);
743 _debug("%llx -> %llx",
744 (unsigned long long) block0,
745 (unsigned long long) block);
746
747 if (block) {
748
749
750 list_move(&page->lru, &backpages);
751 (*nr_pages)--;
752 nrbackpages++;
753 } else if (space && pagevec_add(&pagevec, page) == 0) {
754 fscache_mark_pages_cached(op, &pagevec);
755 fscache_retrieval_complete(op, 1);
756 ret = -ENODATA;
757 } else {
758 fscache_retrieval_complete(op, 1);
759 }
760 }
761
762 if (pagevec_count(&pagevec) > 0)
763 fscache_mark_pages_cached(op, &pagevec);
764
765 if (list_empty(pages))
766 ret = 0;
767
768
769
770 if (nrbackpages > 0) {
771 ret2 = cachefiles_read_backing_file(object, op, &backpages);
772 if (ret2 == -ENOMEM || ret2 == -EINTR)
773 ret = ret2;
774 }
775
776 _leave(" = %d [nr=%u%s]",
777 ret, *nr_pages, list_empty(pages) ? " empty" : "");
778 return ret;
779
780all_enobufs:
781 fscache_retrieval_complete(op, *nr_pages);
782 return -ENOBUFS;
783}
784
785
786
787
788
789
790
791
792
793
794
795
796int cachefiles_allocate_page(struct fscache_retrieval *op,
797 struct page *page,
798 gfp_t gfp)
799{
800 struct cachefiles_object *object;
801 struct cachefiles_cache *cache;
802 int ret;
803
804 object = container_of(op->op.object,
805 struct cachefiles_object, fscache);
806 cache = container_of(object->fscache.cache,
807 struct cachefiles_cache, cache);
808
809 _enter("%p,{%lx},", object, page->index);
810
811 ret = cachefiles_has_space(cache, 0, 1);
812 if (ret == 0)
813 fscache_mark_page_cached(op, page);
814 else
815 ret = -ENOBUFS;
816
817 fscache_retrieval_complete(op, 1);
818 _leave(" = %d", ret);
819 return ret;
820}
821
822
823
824
825
826
827
828
829
830
831
832
833int cachefiles_allocate_pages(struct fscache_retrieval *op,
834 struct list_head *pages,
835 unsigned *nr_pages,
836 gfp_t gfp)
837{
838 struct cachefiles_object *object;
839 struct cachefiles_cache *cache;
840 struct pagevec pagevec;
841 struct page *page;
842 int ret;
843
844 object = container_of(op->op.object,
845 struct cachefiles_object, fscache);
846 cache = container_of(object->fscache.cache,
847 struct cachefiles_cache, cache);
848
849 _enter("%p,,,%d,", object, *nr_pages);
850
851 ret = cachefiles_has_space(cache, 0, *nr_pages);
852 if (ret == 0) {
853 pagevec_init(&pagevec, 0);
854
855 list_for_each_entry(page, pages, lru) {
856 if (pagevec_add(&pagevec, page) == 0)
857 fscache_mark_pages_cached(op, &pagevec);
858 }
859
860 if (pagevec_count(&pagevec) > 0)
861 fscache_mark_pages_cached(op, &pagevec);
862 ret = -ENODATA;
863 } else {
864 ret = -ENOBUFS;
865 }
866
867 fscache_retrieval_complete(op, *nr_pages);
868 _leave(" = %d", ret);
869 return ret;
870}
871
872
873
874
875
876
877
878
879int cachefiles_write_page(struct fscache_storage *op, struct page *page)
880{
881 struct cachefiles_object *object;
882 struct cachefiles_cache *cache;
883 mm_segment_t old_fs;
884 struct file *file;
885 struct path path;
886 loff_t pos, eof;
887 size_t len;
888 void *data;
889 int ret = -ENOBUFS;
890
891 ASSERT(op != NULL);
892 ASSERT(page != NULL);
893
894 object = container_of(op->op.object,
895 struct cachefiles_object, fscache);
896
897 _enter("%p,%p{%lx},,,", object, page, page->index);
898
899 if (!object->backer) {
900 _leave(" = -ENOBUFS");
901 return -ENOBUFS;
902 }
903
904 ASSERT(S_ISREG(object->backer->d_inode->i_mode));
905
906 cache = container_of(object->fscache.cache,
907 struct cachefiles_cache, cache);
908
909 pos = (loff_t)page->index << PAGE_SHIFT;
910
911
912
913
914 eof = object->fscache.store_limit_l;
915 if (pos >= eof)
916 goto error;
917
918
919
920 path.mnt = cache->mnt;
921 path.dentry = object->backer;
922 file = dentry_open(&path, O_RDWR | O_LARGEFILE, cache->cache_cred);
923 if (IS_ERR(file)) {
924 ret = PTR_ERR(file);
925 goto error_2;
926 }
927
928 len = PAGE_SIZE;
929 if (eof & ~PAGE_MASK) {
930 if (eof - pos < PAGE_SIZE) {
931 _debug("cut short %llx to %llx",
932 pos, eof);
933 len = eof - pos;
934 ASSERTCMP(pos + len, ==, eof);
935 }
936 }
937
938 data = kmap(page);
939 file_start_write(file);
940 old_fs = get_fs();
941 set_fs(KERNEL_DS);
942 ret = file->f_op->write(file, (const void __user *)data, len, &pos);
943 set_fs(old_fs);
944 kunmap(page);
945 file_end_write(file);
946 fput(file);
947 if (ret != len)
948 goto error_eio;
949
950 _leave(" = 0");
951 return 0;
952
953error_eio:
954 ret = -EIO;
955error_2:
956 if (ret == -EIO)
957 cachefiles_io_error_obj(object,
958 "Write page to backing file failed");
959error:
960 _leave(" = -ENOBUFS [%d]", ret);
961 return -ENOBUFS;
962}
963
964
965
966
967
968void cachefiles_uncache_page(struct fscache_object *_object, struct page *page)
969{
970 struct cachefiles_object *object;
971 struct cachefiles_cache *cache;
972
973 object = container_of(_object, struct cachefiles_object, fscache);
974 cache = container_of(object->fscache.cache,
975 struct cachefiles_cache, cache);
976
977 _enter("%p,{%lu}", object, page->index);
978
979 spin_unlock(&object->fscache.cookie->lock);
980}
981