1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38#define DEBUG_SUBSYSTEM S_CLASS
39
40#include "../../include/linux/libcfs/libcfs.h"
41#include "../include/obd_class.h"
42#include "../include/obd_support.h"
43#include <linux/list.h>
44
45#include "../include/cl_object.h"
46#include "cl_internal.h"
47
48static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg);
49
50# define PASSERT(env, page, expr) \
51 do { \
52 if (unlikely(!(expr))) { \
53 CL_PAGE_DEBUG(D_ERROR, (env), (page), #expr "\n"); \
54 LASSERT(0); \
55 } \
56 } while (0)
57
58# define PINVRNT(env, page, exp) \
59 ((void)sizeof(env), (void)sizeof(page), (void)sizeof !!(exp))
60
61
62
63
64
65
66
67
68
69
70
71static void cl_page_get_trust(struct cl_page *page)
72{
73 LASSERT(atomic_read(&page->cp_ref) > 0);
74 atomic_inc(&page->cp_ref);
75}
76
77
78
79
80
81
82
83static const struct cl_page_slice *
84cl_page_at_trusted(const struct cl_page *page,
85 const struct lu_device_type *dtype)
86{
87 const struct cl_page_slice *slice;
88
89 list_for_each_entry(slice, &page->cp_layers, cpl_linkage) {
90 if (slice->cpl_obj->co_lu.lo_dev->ld_type == dtype)
91 return slice;
92 }
93 return NULL;
94}
95
96static void cl_page_free(const struct lu_env *env, struct cl_page *page)
97{
98 struct cl_object *obj = page->cp_obj;
99
100 PASSERT(env, page, list_empty(&page->cp_batch));
101 PASSERT(env, page, !page->cp_owner);
102 PASSERT(env, page, !page->cp_req);
103 PASSERT(env, page, page->cp_state == CPS_FREEING);
104
105 while (!list_empty(&page->cp_layers)) {
106 struct cl_page_slice *slice;
107
108 slice = list_entry(page->cp_layers.next,
109 struct cl_page_slice, cpl_linkage);
110 list_del_init(page->cp_layers.next);
111 if (unlikely(slice->cpl_ops->cpo_fini))
112 slice->cpl_ops->cpo_fini(env, slice);
113 }
114 lu_object_ref_del_at(&obj->co_lu, &page->cp_obj_ref, "cl_page", page);
115 cl_object_put(env, obj);
116 lu_ref_fini(&page->cp_reference);
117 kfree(page);
118}
119
120
121
122
123
124static inline void cl_page_state_set_trust(struct cl_page *page,
125 enum cl_page_state state)
126{
127
128 *(enum cl_page_state *)&page->cp_state = state;
129}
130
131struct cl_page *cl_page_alloc(const struct lu_env *env,
132 struct cl_object *o, pgoff_t ind,
133 struct page *vmpage,
134 enum cl_page_type type)
135{
136 struct cl_page *page;
137 struct lu_object_header *head;
138
139 page = kzalloc(cl_object_header(o)->coh_page_bufsize, GFP_NOFS);
140 if (page) {
141 int result = 0;
142
143 atomic_set(&page->cp_ref, 1);
144 page->cp_obj = o;
145 cl_object_get(o);
146 lu_object_ref_add_at(&o->co_lu, &page->cp_obj_ref, "cl_page",
147 page);
148 page->cp_vmpage = vmpage;
149 cl_page_state_set_trust(page, CPS_CACHED);
150 page->cp_type = type;
151 INIT_LIST_HEAD(&page->cp_layers);
152 INIT_LIST_HEAD(&page->cp_batch);
153 INIT_LIST_HEAD(&page->cp_flight);
154 lu_ref_init(&page->cp_reference);
155 head = o->co_lu.lo_header;
156 list_for_each_entry(o, &head->loh_layers, co_lu.lo_linkage) {
157 if (o->co_ops->coo_page_init) {
158 result = o->co_ops->coo_page_init(env, o, page,
159 ind);
160 if (result != 0) {
161 cl_page_delete0(env, page);
162 cl_page_free(env, page);
163 page = ERR_PTR(result);
164 break;
165 }
166 }
167 }
168 } else {
169 page = ERR_PTR(-ENOMEM);
170 }
171 return page;
172}
173
174
175
176
177
178
179
180
181
182
183
184
185struct cl_page *cl_page_find(const struct lu_env *env,
186 struct cl_object *o,
187 pgoff_t idx, struct page *vmpage,
188 enum cl_page_type type)
189{
190 struct cl_page *page = NULL;
191 struct cl_object_header *hdr;
192
193 LASSERT(type == CPT_CACHEABLE || type == CPT_TRANSIENT);
194 might_sleep();
195
196 hdr = cl_object_header(o);
197
198 CDEBUG(D_PAGE, "%lu@"DFID" %p %lx %d\n",
199 idx, PFID(&hdr->coh_lu.loh_fid), vmpage, vmpage->private, type);
200
201 if (type == CPT_CACHEABLE) {
202
203
204
205
206 KLASSERT(PageLocked(vmpage));
207
208
209
210
211
212
213
214
215
216 page = cl_vmpage_page(vmpage, o);
217
218 if (page)
219 return page;
220 }
221
222
223 page = cl_page_alloc(env, o, idx, vmpage, type);
224 return page;
225}
226EXPORT_SYMBOL(cl_page_find);
227
228static inline int cl_page_invariant(const struct cl_page *pg)
229{
230 return cl_page_in_use_noref(pg);
231}
232
233static void cl_page_state_set0(const struct lu_env *env,
234 struct cl_page *page, enum cl_page_state state)
235{
236 enum cl_page_state old;
237
238
239
240
241
242 static const int allowed_transitions[CPS_NR][CPS_NR] = {
243 [CPS_CACHED] = {
244 [CPS_CACHED] = 0,
245 [CPS_OWNED] = 1,
246 [CPS_PAGEIN] = 0,
247 [CPS_PAGEOUT] = 1,
248 [CPS_FREEING] = 1,
249 },
250 [CPS_OWNED] = {
251 [CPS_CACHED] = 1,
252 [CPS_OWNED] = 0,
253 [CPS_PAGEIN] = 1,
254 [CPS_PAGEOUT] = 1,
255 [CPS_FREEING] = 1,
256 },
257 [CPS_PAGEIN] = {
258 [CPS_CACHED] = 1,
259 [CPS_OWNED] = 0,
260 [CPS_PAGEIN] = 0,
261 [CPS_PAGEOUT] = 0,
262 [CPS_FREEING] = 0,
263 },
264 [CPS_PAGEOUT] = {
265 [CPS_CACHED] = 1,
266 [CPS_OWNED] = 0,
267 [CPS_PAGEIN] = 0,
268 [CPS_PAGEOUT] = 0,
269 [CPS_FREEING] = 0,
270 },
271 [CPS_FREEING] = {
272 [CPS_CACHED] = 0,
273 [CPS_OWNED] = 0,
274 [CPS_PAGEIN] = 0,
275 [CPS_PAGEOUT] = 0,
276 [CPS_FREEING] = 0,
277 }
278 };
279
280 old = page->cp_state;
281 PASSERT(env, page, allowed_transitions[old][state]);
282 CL_PAGE_HEADER(D_TRACE, env, page, "%d -> %d\n", old, state);
283 PASSERT(env, page, page->cp_state == old);
284 PASSERT(env, page, equi(state == CPS_OWNED, page->cp_owner));
285 cl_page_state_set_trust(page, state);
286}
287
288static void cl_page_state_set(const struct lu_env *env,
289 struct cl_page *page, enum cl_page_state state)
290{
291 cl_page_state_set0(env, page, state);
292}
293
294
295
296
297
298
299
300
301
302void cl_page_get(struct cl_page *page)
303{
304 cl_page_get_trust(page);
305}
306EXPORT_SYMBOL(cl_page_get);
307
308
309
310
311
312
313
314
315
316
317void cl_page_put(const struct lu_env *env, struct cl_page *page)
318{
319 CL_PAGE_HEADER(D_TRACE, env, page, "%d\n",
320 atomic_read(&page->cp_ref));
321
322 if (atomic_dec_and_test(&page->cp_ref)) {
323 LASSERT(page->cp_state == CPS_FREEING);
324
325 LASSERT(atomic_read(&page->cp_ref) == 0);
326 PASSERT(env, page, !page->cp_owner);
327 PASSERT(env, page, list_empty(&page->cp_batch));
328
329
330
331
332 cl_page_free(env, page);
333 }
334}
335EXPORT_SYMBOL(cl_page_put);
336
337
338
339
340struct cl_page *cl_vmpage_page(struct page *vmpage, struct cl_object *obj)
341{
342 struct cl_page *page;
343
344 KLASSERT(PageLocked(vmpage));
345
346
347
348
349
350
351
352 page = (struct cl_page *)vmpage->private;
353 if (page) {
354 cl_page_get_trust(page);
355 LASSERT(page->cp_type == CPT_CACHEABLE);
356 }
357 return page;
358}
359EXPORT_SYMBOL(cl_vmpage_page);
360
361const struct cl_page_slice *cl_page_at(const struct cl_page *page,
362 const struct lu_device_type *dtype)
363{
364 return cl_page_at_trusted(page, dtype);
365}
366EXPORT_SYMBOL(cl_page_at);
367
368#define CL_PAGE_OP(opname) offsetof(struct cl_page_operations, opname)
369
370#define CL_PAGE_INVOKE(_env, _page, _op, _proto, ...) \
371({ \
372 const struct lu_env *__env = (_env); \
373 struct cl_page *__page = (_page); \
374 const struct cl_page_slice *__scan; \
375 int __result; \
376 ptrdiff_t __op = (_op); \
377 int (*__method)_proto; \
378 \
379 __result = 0; \
380 list_for_each_entry(__scan, &__page->cp_layers, cpl_linkage) { \
381 __method = *(void **)((char *)__scan->cpl_ops + __op); \
382 if (__method) { \
383 __result = (*__method)(__env, __scan, ## __VA_ARGS__); \
384 if (__result != 0) \
385 break; \
386 } \
387 } \
388 if (__result > 0) \
389 __result = 0; \
390 __result; \
391})
392
393#define CL_PAGE_INVOKE_REVERSE(_env, _page, _op, _proto, ...) \
394({ \
395 const struct lu_env *__env = (_env); \
396 struct cl_page *__page = (_page); \
397 const struct cl_page_slice *__scan; \
398 int __result; \
399 ptrdiff_t __op = (_op); \
400 int (*__method)_proto; \
401 \
402 __result = 0; \
403 list_for_each_entry_reverse(__scan, &__page->cp_layers, \
404 cpl_linkage) { \
405 __method = *(void **)((char *)__scan->cpl_ops + __op); \
406 if (__method) { \
407 __result = (*__method)(__env, __scan, ## __VA_ARGS__); \
408 if (__result != 0) \
409 break; \
410 } \
411 } \
412 if (__result > 0) \
413 __result = 0; \
414 __result; \
415})
416
417#define CL_PAGE_INVOID(_env, _page, _op, _proto, ...) \
418do { \
419 const struct lu_env *__env = (_env); \
420 struct cl_page *__page = (_page); \
421 const struct cl_page_slice *__scan; \
422 ptrdiff_t __op = (_op); \
423 void (*__method)_proto; \
424 \
425 list_for_each_entry(__scan, &__page->cp_layers, cpl_linkage) { \
426 __method = *(void **)((char *)__scan->cpl_ops + __op); \
427 if (__method) \
428 (*__method)(__env, __scan, ## __VA_ARGS__); \
429 } \
430} while (0)
431
432#define CL_PAGE_INVOID_REVERSE(_env, _page, _op, _proto, ...) \
433do { \
434 const struct lu_env *__env = (_env); \
435 struct cl_page *__page = (_page); \
436 const struct cl_page_slice *__scan; \
437 ptrdiff_t __op = (_op); \
438 void (*__method)_proto; \
439 \
440 list_for_each_entry_reverse(__scan, &__page->cp_layers, cpl_linkage) { \
441 __method = *(void **)((char *)__scan->cpl_ops + __op); \
442 if (__method) \
443 (*__method)(__env, __scan, ## __VA_ARGS__); \
444 } \
445} while (0)
446
447static int cl_page_invoke(const struct lu_env *env,
448 struct cl_io *io, struct cl_page *page, ptrdiff_t op)
449
450{
451 PINVRNT(env, page, cl_object_same(page->cp_obj, io->ci_obj));
452 return CL_PAGE_INVOKE(env, page, op,
453 (const struct lu_env *,
454 const struct cl_page_slice *, struct cl_io *),
455 io);
456}
457
458static void cl_page_invoid(const struct lu_env *env,
459 struct cl_io *io, struct cl_page *page, ptrdiff_t op)
460
461{
462 PINVRNT(env, page, cl_object_same(page->cp_obj, io->ci_obj));
463 CL_PAGE_INVOID(env, page, op,
464 (const struct lu_env *,
465 const struct cl_page_slice *, struct cl_io *), io);
466}
467
468static void cl_page_owner_clear(struct cl_page *page)
469{
470 if (page->cp_owner) {
471 LASSERT(page->cp_owner->ci_owned_nr > 0);
472 page->cp_owner->ci_owned_nr--;
473 page->cp_owner = NULL;
474 }
475}
476
477static void cl_page_owner_set(struct cl_page *page)
478{
479 page->cp_owner->ci_owned_nr++;
480}
481
482void cl_page_disown0(const struct lu_env *env,
483 struct cl_io *io, struct cl_page *pg)
484{
485 enum cl_page_state state;
486
487 state = pg->cp_state;
488 PINVRNT(env, pg, state == CPS_OWNED || state == CPS_FREEING);
489 PINVRNT(env, pg, cl_page_invariant(pg) || state == CPS_FREEING);
490 cl_page_owner_clear(pg);
491
492 if (state == CPS_OWNED)
493 cl_page_state_set(env, pg, CPS_CACHED);
494
495
496
497
498
499 CL_PAGE_INVOID_REVERSE(env, pg, CL_PAGE_OP(cpo_disown),
500 (const struct lu_env *,
501 const struct cl_page_slice *, struct cl_io *),
502 io);
503}
504
505
506
507
508int cl_page_is_owned(const struct cl_page *pg, const struct cl_io *io)
509{
510 struct cl_io *top = cl_io_top((struct cl_io *)io);
511 LINVRNT(cl_object_same(pg->cp_obj, io->ci_obj));
512 return pg->cp_state == CPS_OWNED && pg->cp_owner == top;
513}
514EXPORT_SYMBOL(cl_page_is_owned);
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536static int cl_page_own0(const struct lu_env *env, struct cl_io *io,
537 struct cl_page *pg, int nonblock)
538{
539 int result;
540
541 PINVRNT(env, pg, !cl_page_is_owned(pg, io));
542
543 io = cl_io_top(io);
544
545 if (pg->cp_state == CPS_FREEING) {
546 result = -ENOENT;
547 } else {
548 result = CL_PAGE_INVOKE(env, pg, CL_PAGE_OP(cpo_own),
549 (const struct lu_env *,
550 const struct cl_page_slice *,
551 struct cl_io *, int),
552 io, nonblock);
553 if (result == 0) {
554 PASSERT(env, pg, !pg->cp_owner);
555 PASSERT(env, pg, !pg->cp_req);
556 pg->cp_owner = cl_io_top(io);
557 cl_page_owner_set(pg);
558 if (pg->cp_state != CPS_FREEING) {
559 cl_page_state_set(env, pg, CPS_OWNED);
560 } else {
561 cl_page_disown0(env, io, pg);
562 result = -ENOENT;
563 }
564 }
565 }
566 PINVRNT(env, pg, ergo(result == 0, cl_page_invariant(pg)));
567 return result;
568}
569
570
571
572
573
574
575int cl_page_own(const struct lu_env *env, struct cl_io *io, struct cl_page *pg)
576{
577 return cl_page_own0(env, io, pg, 0);
578}
579EXPORT_SYMBOL(cl_page_own);
580
581
582
583
584
585
586int cl_page_own_try(const struct lu_env *env, struct cl_io *io,
587 struct cl_page *pg)
588{
589 return cl_page_own0(env, io, pg, 1);
590}
591EXPORT_SYMBOL(cl_page_own_try);
592
593
594
595
596
597
598
599
600
601
602
603void cl_page_assume(const struct lu_env *env,
604 struct cl_io *io, struct cl_page *pg)
605{
606 PINVRNT(env, pg, cl_object_same(pg->cp_obj, io->ci_obj));
607
608 io = cl_io_top(io);
609
610 cl_page_invoid(env, io, pg, CL_PAGE_OP(cpo_assume));
611 PASSERT(env, pg, !pg->cp_owner);
612 pg->cp_owner = cl_io_top(io);
613 cl_page_owner_set(pg);
614 cl_page_state_set(env, pg, CPS_OWNED);
615}
616EXPORT_SYMBOL(cl_page_assume);
617
618
619
620
621
622
623
624
625
626
627
628
629void cl_page_unassume(const struct lu_env *env,
630 struct cl_io *io, struct cl_page *pg)
631{
632 PINVRNT(env, pg, cl_page_is_owned(pg, io));
633 PINVRNT(env, pg, cl_page_invariant(pg));
634
635 io = cl_io_top(io);
636 cl_page_owner_clear(pg);
637 cl_page_state_set(env, pg, CPS_CACHED);
638 CL_PAGE_INVOID_REVERSE(env, pg, CL_PAGE_OP(cpo_unassume),
639 (const struct lu_env *,
640 const struct cl_page_slice *, struct cl_io *),
641 io);
642}
643EXPORT_SYMBOL(cl_page_unassume);
644
645
646
647
648
649
650
651
652
653
654
655
656void cl_page_disown(const struct lu_env *env,
657 struct cl_io *io, struct cl_page *pg)
658{
659 PINVRNT(env, pg, cl_page_is_owned(pg, io) ||
660 pg->cp_state == CPS_FREEING);
661
662 io = cl_io_top(io);
663 cl_page_disown0(env, io, pg);
664}
665EXPORT_SYMBOL(cl_page_disown);
666
667
668
669
670
671
672
673
674
675
676
677void cl_page_discard(const struct lu_env *env,
678 struct cl_io *io, struct cl_page *pg)
679{
680 PINVRNT(env, pg, cl_page_is_owned(pg, io));
681 PINVRNT(env, pg, cl_page_invariant(pg));
682
683 cl_page_invoid(env, io, pg, CL_PAGE_OP(cpo_discard));
684}
685EXPORT_SYMBOL(cl_page_discard);
686
687
688
689
690
691
692static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg)
693{
694 PASSERT(env, pg, pg->cp_state != CPS_FREEING);
695
696
697
698
699 cl_page_owner_clear(pg);
700
701 cl_page_state_set0(env, pg, CPS_FREEING);
702
703 CL_PAGE_INVOID_REVERSE(env, pg, CL_PAGE_OP(cpo_delete),
704 (const struct lu_env *,
705 const struct cl_page_slice *));
706}
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732void cl_page_delete(const struct lu_env *env, struct cl_page *pg)
733{
734 PINVRNT(env, pg, cl_page_invariant(pg));
735 cl_page_delete0(env, pg);
736}
737EXPORT_SYMBOL(cl_page_delete);
738
739
740
741
742
743
744
745
746
747
748void cl_page_export(const struct lu_env *env, struct cl_page *pg, int uptodate)
749{
750 PINVRNT(env, pg, cl_page_invariant(pg));
751 CL_PAGE_INVOID(env, pg, CL_PAGE_OP(cpo_export),
752 (const struct lu_env *,
753 const struct cl_page_slice *, int), uptodate);
754}
755EXPORT_SYMBOL(cl_page_export);
756
757
758
759
760
761int cl_page_is_vmlocked(const struct lu_env *env, const struct cl_page *pg)
762{
763 int result;
764 const struct cl_page_slice *slice;
765
766 slice = container_of(pg->cp_layers.next,
767 const struct cl_page_slice, cpl_linkage);
768 PASSERT(env, pg, slice->cpl_ops->cpo_is_vmlocked);
769
770
771
772
773
774 result = slice->cpl_ops->cpo_is_vmlocked(env, slice);
775 PASSERT(env, pg, result == -EBUSY || result == -ENODATA);
776 return result == -EBUSY;
777}
778EXPORT_SYMBOL(cl_page_is_vmlocked);
779
780static enum cl_page_state cl_req_type_state(enum cl_req_type crt)
781{
782 return crt == CRT_WRITE ? CPS_PAGEOUT : CPS_PAGEIN;
783}
784
785static void cl_page_io_start(const struct lu_env *env,
786 struct cl_page *pg, enum cl_req_type crt)
787{
788
789
790
791 cl_page_owner_clear(pg);
792 cl_page_state_set(env, pg, cl_req_type_state(crt));
793}
794
795
796
797
798
799
800
801
802int cl_page_prep(const struct lu_env *env, struct cl_io *io,
803 struct cl_page *pg, enum cl_req_type crt)
804{
805 int result;
806
807 PINVRNT(env, pg, cl_page_is_owned(pg, io));
808 PINVRNT(env, pg, cl_page_invariant(pg));
809 PINVRNT(env, pg, crt < CRT_NR);
810
811
812
813
814
815
816 if (crt >= CRT_NR)
817 return -EINVAL;
818 result = cl_page_invoke(env, io, pg, CL_PAGE_OP(io[crt].cpo_prep));
819 if (result == 0)
820 cl_page_io_start(env, pg, crt);
821
822 CL_PAGE_HEADER(D_TRACE, env, pg, "%d %d\n", crt, result);
823 return result;
824}
825EXPORT_SYMBOL(cl_page_prep);
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842void cl_page_completion(const struct lu_env *env,
843 struct cl_page *pg, enum cl_req_type crt, int ioret)
844{
845 struct cl_sync_io *anchor = pg->cp_sync_io;
846
847 PASSERT(env, pg, crt < CRT_NR);
848
849 PASSERT(env, pg, !pg->cp_req);
850 PASSERT(env, pg, pg->cp_state == cl_req_type_state(crt));
851
852 CL_PAGE_HEADER(D_TRACE, env, pg, "%d %d\n", crt, ioret);
853
854 cl_page_state_set(env, pg, CPS_CACHED);
855 if (crt >= CRT_NR)
856 return;
857 CL_PAGE_INVOID_REVERSE(env, pg, CL_PAGE_OP(io[crt].cpo_completion),
858 (const struct lu_env *,
859 const struct cl_page_slice *, int), ioret);
860 if (anchor) {
861 LASSERT(pg->cp_sync_io == anchor);
862 pg->cp_sync_io = NULL;
863 }
864
865
866
867
868
869 cl_page_put(env, pg);
870
871 if (anchor)
872 cl_sync_io_note(env, anchor, ioret);
873}
874EXPORT_SYMBOL(cl_page_completion);
875
876
877
878
879
880
881
882
883
884
885int cl_page_make_ready(const struct lu_env *env, struct cl_page *pg,
886 enum cl_req_type crt)
887{
888 int result;
889
890 PINVRNT(env, pg, crt < CRT_NR);
891
892 if (crt >= CRT_NR)
893 return -EINVAL;
894 result = CL_PAGE_INVOKE(env, pg, CL_PAGE_OP(io[crt].cpo_make_ready),
895 (const struct lu_env *,
896 const struct cl_page_slice *));
897 if (result == 0) {
898 PASSERT(env, pg, pg->cp_state == CPS_CACHED);
899 cl_page_io_start(env, pg, crt);
900 }
901 CL_PAGE_HEADER(D_TRACE, env, pg, "%d %d\n", crt, result);
902 return result;
903}
904EXPORT_SYMBOL(cl_page_make_ready);
905
906
907
908
909
910
911
912
913
914int cl_page_flush(const struct lu_env *env, struct cl_io *io,
915 struct cl_page *pg)
916{
917 int result;
918
919 PINVRNT(env, pg, cl_page_is_owned(pg, io));
920 PINVRNT(env, pg, cl_page_invariant(pg));
921
922 result = cl_page_invoke(env, io, pg, CL_PAGE_OP(cpo_flush));
923
924 CL_PAGE_HEADER(D_TRACE, env, pg, "%d\n", result);
925 return result;
926}
927EXPORT_SYMBOL(cl_page_flush);
928
929
930
931
932
933
934
935
936int cl_page_is_under_lock(const struct lu_env *env, struct cl_io *io,
937 struct cl_page *page, pgoff_t *max_index)
938{
939 int rc;
940
941 PINVRNT(env, page, cl_page_invariant(page));
942
943 rc = CL_PAGE_INVOKE_REVERSE(env, page, CL_PAGE_OP(cpo_is_under_lock),
944 (const struct lu_env *,
945 const struct cl_page_slice *,
946 struct cl_io *, pgoff_t *),
947 io, max_index);
948 return rc;
949}
950EXPORT_SYMBOL(cl_page_is_under_lock);
951
952
953
954
955
956
957void cl_page_clip(const struct lu_env *env, struct cl_page *pg,
958 int from, int to)
959{
960 PINVRNT(env, pg, cl_page_invariant(pg));
961
962 CL_PAGE_HEADER(D_TRACE, env, pg, "%d %d\n", from, to);
963 CL_PAGE_INVOID(env, pg, CL_PAGE_OP(cpo_clip),
964 (const struct lu_env *,
965 const struct cl_page_slice *, int, int),
966 from, to);
967}
968EXPORT_SYMBOL(cl_page_clip);
969
970
971
972
973void cl_page_header_print(const struct lu_env *env, void *cookie,
974 lu_printer_t printer, const struct cl_page *pg)
975{
976 (*printer)(env, cookie,
977 "page@%p[%d %p %d %d %p %p]\n",
978 pg, atomic_read(&pg->cp_ref), pg->cp_obj,
979 pg->cp_state, pg->cp_type,
980 pg->cp_owner, pg->cp_req);
981}
982EXPORT_SYMBOL(cl_page_header_print);
983
984
985
986
987void cl_page_print(const struct lu_env *env, void *cookie,
988 lu_printer_t printer, const struct cl_page *pg)
989{
990 cl_page_header_print(env, cookie, printer, pg);
991 CL_PAGE_INVOKE(env, (struct cl_page *)pg, CL_PAGE_OP(cpo_print),
992 (const struct lu_env *env,
993 const struct cl_page_slice *slice,
994 void *cookie, lu_printer_t p), cookie, printer);
995 (*printer)(env, cookie, "end page@%p\n", pg);
996}
997EXPORT_SYMBOL(cl_page_print);
998
999
1000
1001
1002int cl_page_cancel(const struct lu_env *env, struct cl_page *page)
1003{
1004 return CL_PAGE_INVOKE(env, page, CL_PAGE_OP(cpo_cancel),
1005 (const struct lu_env *,
1006 const struct cl_page_slice *));
1007}
1008
1009
1010
1011
1012loff_t cl_offset(const struct cl_object *obj, pgoff_t idx)
1013{
1014
1015
1016
1017 return (loff_t)idx << PAGE_SHIFT;
1018}
1019EXPORT_SYMBOL(cl_offset);
1020
1021
1022
1023
1024pgoff_t cl_index(const struct cl_object *obj, loff_t offset)
1025{
1026
1027
1028
1029 return offset >> PAGE_SHIFT;
1030}
1031EXPORT_SYMBOL(cl_index);
1032
1033size_t cl_page_size(const struct cl_object *obj)
1034{
1035 return 1UL << PAGE_SHIFT;
1036}
1037EXPORT_SYMBOL(cl_page_size);
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048void cl_page_slice_add(struct cl_page *page, struct cl_page_slice *slice,
1049 struct cl_object *obj, pgoff_t index,
1050 const struct cl_page_operations *ops)
1051{
1052 list_add_tail(&slice->cpl_linkage, &page->cp_layers);
1053 slice->cpl_obj = obj;
1054 slice->cpl_index = index;
1055 slice->cpl_ops = ops;
1056 slice->cpl_page = page;
1057}
1058EXPORT_SYMBOL(cl_page_slice_add);
1059
1060
1061
1062
1063struct cl_client_cache *cl_cache_init(unsigned long lru_page_max)
1064{
1065 struct cl_client_cache *cache = NULL;
1066
1067 cache = kzalloc(sizeof(*cache), GFP_KERNEL);
1068 if (!cache)
1069 return NULL;
1070
1071
1072 atomic_set(&cache->ccc_users, 1);
1073 cache->ccc_lru_max = lru_page_max;
1074 atomic_long_set(&cache->ccc_lru_left, lru_page_max);
1075 spin_lock_init(&cache->ccc_lru_lock);
1076 INIT_LIST_HEAD(&cache->ccc_lru);
1077
1078 atomic_long_set(&cache->ccc_unstable_nr, 0);
1079 init_waitqueue_head(&cache->ccc_unstable_waitq);
1080
1081 return cache;
1082}
1083EXPORT_SYMBOL(cl_cache_init);
1084
1085
1086
1087
1088void cl_cache_incref(struct cl_client_cache *cache)
1089{
1090 atomic_inc(&cache->ccc_users);
1091}
1092EXPORT_SYMBOL(cl_cache_incref);
1093
1094
1095
1096
1097
1098
1099void cl_cache_decref(struct cl_client_cache *cache)
1100{
1101 if (atomic_dec_and_test(&cache->ccc_users))
1102 kfree(cache);
1103}
1104EXPORT_SYMBOL(cl_cache_decref);
1105