1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52#define DEBUG_SUBSYSTEM S_CLASS
53
54#include "../../include/linux/libcfs/libcfs.h"
55
56#include "../include/obd_class.h"
57#include "../include/obd_support.h"
58#include "../include/lustre_fid.h"
59#include <linux/list.h>
60#include "../../include/linux/libcfs/libcfs_hash.h"
61#include "../include/cl_object.h"
62#include "cl_internal.h"
63
64static struct kmem_cache *cl_env_kmem;
65
66
67static struct lock_class_key cl_page_guard_class;
68
69static struct lock_class_key cl_lock_guard_class;
70
71static struct lock_class_key cl_attr_guard_class;
72
73extern __u32 lu_context_tags_default;
74extern __u32 lu_session_tags_default;
75
76
77
78int cl_object_header_init(struct cl_object_header *h)
79{
80 int result;
81
82 result = lu_object_header_init(&h->coh_lu);
83 if (result == 0) {
84 spin_lock_init(&h->coh_page_guard);
85 spin_lock_init(&h->coh_lock_guard);
86 spin_lock_init(&h->coh_attr_guard);
87 lockdep_set_class(&h->coh_page_guard, &cl_page_guard_class);
88 lockdep_set_class(&h->coh_lock_guard, &cl_lock_guard_class);
89 lockdep_set_class(&h->coh_attr_guard, &cl_attr_guard_class);
90 h->coh_pages = 0;
91
92 INIT_RADIX_TREE(&h->coh_tree, GFP_ATOMIC);
93 INIT_LIST_HEAD(&h->coh_locks);
94 h->coh_page_bufsize = ALIGN(sizeof(struct cl_page), 8);
95 }
96 return result;
97}
98EXPORT_SYMBOL(cl_object_header_init);
99
100
101
102
103
104
105
106
107
108struct cl_object *cl_object_find(const struct lu_env *env,
109 struct cl_device *cd, const struct lu_fid *fid,
110 const struct cl_object_conf *c)
111{
112 might_sleep();
113 return lu2cl(lu_object_find_slice(env, cl2lu_dev(cd), fid, &c->coc_lu));
114}
115EXPORT_SYMBOL(cl_object_find);
116
117
118
119
120
121
122
123
124
125void cl_object_put(const struct lu_env *env, struct cl_object *o)
126{
127 lu_object_put(env, &o->co_lu);
128}
129EXPORT_SYMBOL(cl_object_put);
130
131
132
133
134
135
136
137
138
139void cl_object_get(struct cl_object *o)
140{
141 lu_object_get(&o->co_lu);
142}
143EXPORT_SYMBOL(cl_object_get);
144
145
146
147
148
149
150struct cl_object *cl_object_top(struct cl_object *o)
151{
152 struct cl_object_header *hdr = cl_object_header(o);
153 struct cl_object *top;
154
155 while (hdr->coh_parent)
156 hdr = hdr->coh_parent;
157
158 top = lu2cl(lu_object_top(&hdr->coh_lu));
159 CDEBUG(D_TRACE, "%p -> %p\n", o, top);
160 return top;
161}
162EXPORT_SYMBOL(cl_object_top);
163
164
165
166
167
168
169
170
171
172
173static spinlock_t *cl_object_attr_guard(struct cl_object *o)
174{
175 return &cl_object_header(cl_object_top(o))->coh_attr_guard;
176}
177
178
179
180
181
182
183
184
185void cl_object_attr_lock(struct cl_object *o)
186 __acquires(cl_object_attr_guard(o))
187{
188 spin_lock(cl_object_attr_guard(o));
189}
190EXPORT_SYMBOL(cl_object_attr_lock);
191
192
193
194
195void cl_object_attr_unlock(struct cl_object *o)
196 __releases(cl_object_attr_guard(o))
197{
198 spin_unlock(cl_object_attr_guard(o));
199}
200EXPORT_SYMBOL(cl_object_attr_unlock);
201
202
203
204
205
206
207
208
209int cl_object_attr_get(const struct lu_env *env, struct cl_object *obj,
210 struct cl_attr *attr)
211{
212 struct lu_object_header *top;
213 int result;
214
215 assert_spin_locked(cl_object_attr_guard(obj));
216
217 top = obj->co_lu.lo_header;
218 result = 0;
219 list_for_each_entry(obj, &top->loh_layers, co_lu.lo_linkage) {
220 if (obj->co_ops->coo_attr_get) {
221 result = obj->co_ops->coo_attr_get(env, obj, attr);
222 if (result != 0) {
223 if (result > 0)
224 result = 0;
225 break;
226 }
227 }
228 }
229 return result;
230}
231EXPORT_SYMBOL(cl_object_attr_get);
232
233
234
235
236
237
238
239
240int cl_object_attr_set(const struct lu_env *env, struct cl_object *obj,
241 const struct cl_attr *attr, unsigned v)
242{
243 struct lu_object_header *top;
244 int result;
245
246 assert_spin_locked(cl_object_attr_guard(obj));
247
248 top = obj->co_lu.lo_header;
249 result = 0;
250 list_for_each_entry_reverse(obj, &top->loh_layers, co_lu.lo_linkage) {
251 if (obj->co_ops->coo_attr_set) {
252 result = obj->co_ops->coo_attr_set(env, obj, attr, v);
253 if (result != 0) {
254 if (result > 0)
255 result = 0;
256 break;
257 }
258 }
259 }
260 return result;
261}
262EXPORT_SYMBOL(cl_object_attr_set);
263
264
265
266
267
268
269
270
271
272int cl_object_glimpse(const struct lu_env *env, struct cl_object *obj,
273 struct ost_lvb *lvb)
274{
275 struct lu_object_header *top;
276 int result;
277
278 top = obj->co_lu.lo_header;
279 result = 0;
280 list_for_each_entry_reverse(obj, &top->loh_layers, co_lu.lo_linkage) {
281 if (obj->co_ops->coo_glimpse) {
282 result = obj->co_ops->coo_glimpse(env, obj, lvb);
283 if (result != 0)
284 break;
285 }
286 }
287 LU_OBJECT_HEADER(D_DLMTRACE, env, lu_object_top(top),
288 "size: %llu mtime: %llu atime: %llu ctime: %llu blocks: %llu\n",
289 lvb->lvb_size, lvb->lvb_mtime, lvb->lvb_atime,
290 lvb->lvb_ctime, lvb->lvb_blocks);
291 return result;
292}
293EXPORT_SYMBOL(cl_object_glimpse);
294
295
296
297
298int cl_conf_set(const struct lu_env *env, struct cl_object *obj,
299 const struct cl_object_conf *conf)
300{
301 struct lu_object_header *top;
302 int result;
303
304 top = obj->co_lu.lo_header;
305 result = 0;
306 list_for_each_entry(obj, &top->loh_layers, co_lu.lo_linkage) {
307 if (obj->co_ops->coo_conf_set) {
308 result = obj->co_ops->coo_conf_set(env, obj, conf);
309 if (result != 0)
310 break;
311 }
312 }
313 return result;
314}
315EXPORT_SYMBOL(cl_conf_set);
316
317
318
319
320
321
322
323
324void cl_object_kill(const struct lu_env *env, struct cl_object *obj)
325{
326 struct cl_object_header *hdr;
327
328 hdr = cl_object_header(obj);
329 LASSERT(!hdr->coh_tree.rnode);
330 LASSERT(hdr->coh_pages == 0);
331
332 set_bit(LU_OBJECT_HEARD_BANSHEE, &hdr->coh_lu.loh_flags);
333
334
335
336
337
338
339
340 cl_locks_prune(env, obj, 0);
341}
342EXPORT_SYMBOL(cl_object_kill);
343
344
345
346
347void cl_object_prune(const struct lu_env *env, struct cl_object *obj)
348{
349 cl_pages_prune(env, obj);
350 cl_locks_prune(env, obj, 1);
351}
352EXPORT_SYMBOL(cl_object_prune);
353
354void cache_stats_init(struct cache_stats *cs, const char *name)
355{
356 int i;
357
358 cs->cs_name = name;
359 for (i = 0; i < CS_NR; i++)
360 atomic_set(&cs->cs_stats[i], 0);
361}
362
363static int cache_stats_print(const struct cache_stats *cs,
364 struct seq_file *m, int h)
365{
366 int i;
367
368
369
370
371 if (h) {
372 const char *names[CS_NR] = CS_NAMES;
373
374 seq_printf(m, "%6s", " ");
375 for (i = 0; i < CS_NR; i++)
376 seq_printf(m, "%8s", names[i]);
377 seq_printf(m, "\n");
378 }
379
380 seq_printf(m, "%5.5s:", cs->cs_name);
381 for (i = 0; i < CS_NR; i++)
382 seq_printf(m, "%8u", atomic_read(&cs->cs_stats[i]));
383 return 0;
384}
385
386
387
388
389
390
391
392int cl_site_init(struct cl_site *s, struct cl_device *d)
393{
394 int i;
395 int result;
396
397 result = lu_site_init(&s->cs_lu, &d->cd_lu_dev);
398 if (result == 0) {
399 cache_stats_init(&s->cs_pages, "pages");
400 cache_stats_init(&s->cs_locks, "locks");
401 for (i = 0; i < ARRAY_SIZE(s->cs_pages_state); ++i)
402 atomic_set(&s->cs_pages_state[0], 0);
403 for (i = 0; i < ARRAY_SIZE(s->cs_locks_state); ++i)
404 atomic_set(&s->cs_locks_state[i], 0);
405 }
406 return result;
407}
408EXPORT_SYMBOL(cl_site_init);
409
410
411
412
413void cl_site_fini(struct cl_site *s)
414{
415 lu_site_fini(&s->cs_lu);
416}
417EXPORT_SYMBOL(cl_site_fini);
418
419static struct cache_stats cl_env_stats = {
420 .cs_name = "envs",
421 .cs_stats = { ATOMIC_INIT(0), }
422};
423
424
425
426
427
428int cl_site_stats_print(const struct cl_site *site, struct seq_file *m)
429{
430 int i;
431 static const char *pstate[] = {
432 [CPS_CACHED] = "c",
433 [CPS_OWNED] = "o",
434 [CPS_PAGEOUT] = "w",
435 [CPS_PAGEIN] = "r",
436 [CPS_FREEING] = "f"
437 };
438 static const char *lstate[] = {
439 [CLS_NEW] = "n",
440 [CLS_QUEUING] = "q",
441 [CLS_ENQUEUED] = "e",
442 [CLS_HELD] = "h",
443 [CLS_INTRANSIT] = "t",
444 [CLS_CACHED] = "c",
445 [CLS_FREEING] = "f"
446 };
447
448
449
450
451
452
453 lu_site_stats_print(&site->cs_lu, m);
454 cache_stats_print(&site->cs_pages, m, 1);
455 seq_printf(m, " [");
456 for (i = 0; i < ARRAY_SIZE(site->cs_pages_state); ++i)
457 seq_printf(m, "%s: %u ", pstate[i],
458 atomic_read(&site->cs_pages_state[i]));
459 seq_printf(m, "]\n");
460 cache_stats_print(&site->cs_locks, m, 0);
461 seq_printf(m, " [");
462 for (i = 0; i < ARRAY_SIZE(site->cs_locks_state); ++i)
463 seq_printf(m, "%s: %u ", lstate[i],
464 atomic_read(&site->cs_locks_state[i]));
465 seq_printf(m, "]\n");
466 cache_stats_print(&cl_env_stats, m, 0);
467 seq_printf(m, "\n");
468 return 0;
469}
470EXPORT_SYMBOL(cl_site_stats_print);
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495struct cl_env {
496 void *ce_magic;
497 struct lu_env ce_lu;
498 struct lu_context ce_ses;
499
500
501
502
503
504 struct hlist_node ce_node;
505
506
507
508
509
510
511
512
513
514
515
516 void *ce_owner;
517
518
519
520
521
522 struct list_head ce_linkage;
523
524
525
526 int ce_ref;
527
528
529
530
531 void *ce_debug;
532};
533
534#define CL_ENV_INC(counter)
535#define CL_ENV_DEC(counter)
536
537static void cl_env_init0(struct cl_env *cle, void *debug)
538{
539 LASSERT(cle->ce_ref == 0);
540 LASSERT(cle->ce_magic == &cl_env_init0);
541 LASSERT(!cle->ce_debug && !cle->ce_owner);
542
543 cle->ce_ref = 1;
544 cle->ce_debug = debug;
545 CL_ENV_INC(busy);
546}
547
548
549
550
551
552static struct cfs_hash *cl_env_hash;
553
554static unsigned cl_env_hops_hash(struct cfs_hash *lh,
555 const void *key, unsigned mask)
556{
557#if BITS_PER_LONG == 64
558 return cfs_hash_u64_hash((__u64)key, mask);
559#else
560 return cfs_hash_u32_hash((__u32)key, mask);
561#endif
562}
563
564static void *cl_env_hops_obj(struct hlist_node *hn)
565{
566 struct cl_env *cle = hlist_entry(hn, struct cl_env, ce_node);
567
568 LASSERT(cle->ce_magic == &cl_env_init0);
569 return (void *)cle;
570}
571
572static int cl_env_hops_keycmp(const void *key, struct hlist_node *hn)
573{
574 struct cl_env *cle = cl_env_hops_obj(hn);
575
576 LASSERT(cle->ce_owner);
577 return (key == cle->ce_owner);
578}
579
580static void cl_env_hops_noop(struct cfs_hash *hs, struct hlist_node *hn)
581{
582 struct cl_env *cle = hlist_entry(hn, struct cl_env, ce_node);
583
584 LASSERT(cle->ce_magic == &cl_env_init0);
585}
586
587static struct cfs_hash_ops cl_env_hops = {
588 .hs_hash = cl_env_hops_hash,
589 .hs_key = cl_env_hops_obj,
590 .hs_keycmp = cl_env_hops_keycmp,
591 .hs_object = cl_env_hops_obj,
592 .hs_get = cl_env_hops_noop,
593 .hs_put_locked = cl_env_hops_noop,
594};
595
596static inline struct cl_env *cl_env_fetch(void)
597{
598 struct cl_env *cle;
599
600 cle = cfs_hash_lookup(cl_env_hash, (void *) (long) current->pid);
601 LASSERT(ergo(cle, cle->ce_magic == &cl_env_init0));
602 return cle;
603}
604
605static inline void cl_env_attach(struct cl_env *cle)
606{
607 if (cle) {
608 int rc;
609
610 LASSERT(!cle->ce_owner);
611 cle->ce_owner = (void *) (long) current->pid;
612 rc = cfs_hash_add_unique(cl_env_hash, cle->ce_owner,
613 &cle->ce_node);
614 LASSERT(rc == 0);
615 }
616}
617
618static inline void cl_env_do_detach(struct cl_env *cle)
619{
620 void *cookie;
621
622 LASSERT(cle->ce_owner == (void *) (long) current->pid);
623 cookie = cfs_hash_del(cl_env_hash, cle->ce_owner,
624 &cle->ce_node);
625 LASSERT(cookie == cle);
626 cle->ce_owner = NULL;
627}
628
629static int cl_env_store_init(void)
630{
631 cl_env_hash = cfs_hash_create("cl_env",
632 HASH_CL_ENV_BITS, HASH_CL_ENV_BITS,
633 HASH_CL_ENV_BKT_BITS, 0,
634 CFS_HASH_MIN_THETA,
635 CFS_HASH_MAX_THETA,
636 &cl_env_hops,
637 CFS_HASH_RW_BKTLOCK);
638 return cl_env_hash ? 0 : -ENOMEM;
639}
640
641static void cl_env_store_fini(void)
642{
643 cfs_hash_putref(cl_env_hash);
644}
645
646static inline struct cl_env *cl_env_detach(struct cl_env *cle)
647{
648 if (!cle)
649 cle = cl_env_fetch();
650
651 if (cle && cle->ce_owner)
652 cl_env_do_detach(cle);
653
654 return cle;
655}
656
657static struct lu_env *cl_env_new(__u32 ctx_tags, __u32 ses_tags, void *debug)
658{
659 struct lu_env *env;
660 struct cl_env *cle;
661
662 cle = kmem_cache_zalloc(cl_env_kmem, GFP_NOFS);
663 if (cle) {
664 int rc;
665
666 INIT_LIST_HEAD(&cle->ce_linkage);
667 cle->ce_magic = &cl_env_init0;
668 env = &cle->ce_lu;
669 rc = lu_env_init(env, ctx_tags | LCT_CL_THREAD);
670 if (rc == 0) {
671 rc = lu_context_init(&cle->ce_ses,
672 ses_tags | LCT_SESSION);
673 if (rc == 0) {
674 lu_context_enter(&cle->ce_ses);
675 env->le_ses = &cle->ce_ses;
676 cl_env_init0(cle, debug);
677 } else
678 lu_env_fini(env);
679 }
680 if (rc != 0) {
681 kmem_cache_free(cl_env_kmem, cle);
682 env = ERR_PTR(rc);
683 } else {
684 CL_ENV_INC(create);
685 CL_ENV_INC(total);
686 }
687 } else
688 env = ERR_PTR(-ENOMEM);
689 return env;
690}
691
692static void cl_env_fini(struct cl_env *cle)
693{
694 CL_ENV_DEC(total);
695 lu_context_fini(&cle->ce_lu.le_ctx);
696 lu_context_fini(&cle->ce_ses);
697 kmem_cache_free(cl_env_kmem, cle);
698}
699
700static inline struct cl_env *cl_env_container(struct lu_env *env)
701{
702 return container_of(env, struct cl_env, ce_lu);
703}
704
705static struct lu_env *cl_env_peek(int *refcheck)
706{
707 struct lu_env *env;
708 struct cl_env *cle;
709
710 CL_ENV_INC(lookup);
711
712
713 CLASSERT(offsetof(struct cl_env, ce_magic) == 0);
714
715 env = NULL;
716 cle = cl_env_fetch();
717 if (cle) {
718 CL_ENV_INC(hit);
719 env = &cle->ce_lu;
720 *refcheck = ++cle->ce_ref;
721 }
722 CDEBUG(D_OTHER, "%d@%p\n", cle ? cle->ce_ref : 0, cle);
723 return env;
724}
725
726
727
728
729
730
731
732
733
734
735
736
737struct lu_env *cl_env_get(int *refcheck)
738{
739 struct lu_env *env;
740
741 env = cl_env_peek(refcheck);
742 if (!env) {
743 env = cl_env_new(lu_context_tags_default,
744 lu_session_tags_default,
745 __builtin_return_address(0));
746
747 if (!IS_ERR(env)) {
748 struct cl_env *cle;
749
750 cle = cl_env_container(env);
751 cl_env_attach(cle);
752 *refcheck = cle->ce_ref;
753 CDEBUG(D_OTHER, "%d@%p\n", cle->ce_ref, cle);
754 }
755 }
756 return env;
757}
758EXPORT_SYMBOL(cl_env_get);
759
760
761
762
763
764
765struct lu_env *cl_env_alloc(int *refcheck, __u32 tags)
766{
767 struct lu_env *env;
768
769 LASSERT(!cl_env_peek(refcheck));
770 env = cl_env_new(tags, tags, __builtin_return_address(0));
771 if (!IS_ERR(env)) {
772 struct cl_env *cle;
773
774 cle = cl_env_container(env);
775 *refcheck = cle->ce_ref;
776 CDEBUG(D_OTHER, "%d@%p\n", cle->ce_ref, cle);
777 }
778 return env;
779}
780EXPORT_SYMBOL(cl_env_alloc);
781
782static void cl_env_exit(struct cl_env *cle)
783{
784 LASSERT(!cle->ce_owner);
785 lu_context_exit(&cle->ce_lu.le_ctx);
786 lu_context_exit(&cle->ce_ses);
787}
788
789
790
791
792
793
794
795
796void cl_env_put(struct lu_env *env, int *refcheck)
797{
798 struct cl_env *cle;
799
800 cle = cl_env_container(env);
801
802 LASSERT(cle->ce_ref > 0);
803 LASSERT(ergo(refcheck, cle->ce_ref == *refcheck));
804
805 CDEBUG(D_OTHER, "%d@%p\n", cle->ce_ref, cle);
806 if (--cle->ce_ref == 0) {
807 CL_ENV_DEC(busy);
808 cl_env_detach(cle);
809 cle->ce_debug = NULL;
810 cl_env_exit(cle);
811 cl_env_fini(cle);
812 }
813}
814EXPORT_SYMBOL(cl_env_put);
815
816
817
818
819
820
821void *cl_env_reenter(void)
822{
823 return cl_env_detach(NULL);
824}
825EXPORT_SYMBOL(cl_env_reenter);
826
827
828
829
830void cl_env_reexit(void *cookie)
831{
832 cl_env_detach(NULL);
833 cl_env_attach(cookie);
834}
835EXPORT_SYMBOL(cl_env_reexit);
836
837
838
839
840
841
842
843
844void cl_env_implant(struct lu_env *env, int *refcheck)
845{
846 struct cl_env *cle = cl_env_container(env);
847
848 LASSERT(cle->ce_ref > 0);
849
850 cl_env_attach(cle);
851 cl_env_get(refcheck);
852 CDEBUG(D_OTHER, "%d@%p\n", cle->ce_ref, cle);
853}
854EXPORT_SYMBOL(cl_env_implant);
855
856
857
858
859void cl_env_unplant(struct lu_env *env, int *refcheck)
860{
861 struct cl_env *cle = cl_env_container(env);
862
863 LASSERT(cle->ce_ref > 1);
864
865 CDEBUG(D_OTHER, "%d@%p\n", cle->ce_ref, cle);
866
867 cl_env_detach(cle);
868 cl_env_put(env, refcheck);
869}
870EXPORT_SYMBOL(cl_env_unplant);
871
872struct lu_env *cl_env_nested_get(struct cl_env_nest *nest)
873{
874 struct lu_env *env;
875
876 nest->cen_cookie = NULL;
877 env = cl_env_peek(&nest->cen_refcheck);
878 if (env) {
879 if (!cl_io_is_going(env))
880 return env;
881 cl_env_put(env, &nest->cen_refcheck);
882 nest->cen_cookie = cl_env_reenter();
883 }
884 env = cl_env_get(&nest->cen_refcheck);
885 if (IS_ERR(env)) {
886 cl_env_reexit(nest->cen_cookie);
887 return env;
888 }
889
890 LASSERT(!cl_io_is_going(env));
891 return env;
892}
893EXPORT_SYMBOL(cl_env_nested_get);
894
895void cl_env_nested_put(struct cl_env_nest *nest, struct lu_env *env)
896{
897 cl_env_put(env, &nest->cen_refcheck);
898 cl_env_reexit(nest->cen_cookie);
899}
900EXPORT_SYMBOL(cl_env_nested_put);
901
902
903
904
905
906
907void cl_lvb2attr(struct cl_attr *attr, const struct ost_lvb *lvb)
908{
909 attr->cat_size = lvb->lvb_size;
910 attr->cat_mtime = lvb->lvb_mtime;
911 attr->cat_atime = lvb->lvb_atime;
912 attr->cat_ctime = lvb->lvb_ctime;
913 attr->cat_blocks = lvb->lvb_blocks;
914}
915EXPORT_SYMBOL(cl_lvb2attr);
916
917
918
919
920
921
922
923struct cl_device *cl_type_setup(const struct lu_env *env, struct lu_site *site,
924 struct lu_device_type *ldt,
925 struct lu_device *next)
926{
927 const char *typename;
928 struct lu_device *d;
929
930 typename = ldt->ldt_name;
931 d = ldt->ldt_ops->ldto_device_alloc(env, ldt, NULL);
932 if (!IS_ERR(d)) {
933 int rc;
934
935 if (site)
936 d->ld_site = site;
937 rc = ldt->ldt_ops->ldto_device_init(env, d, typename, next);
938 if (rc == 0) {
939 lu_device_get(d);
940 lu_ref_add(&d->ld_reference,
941 "lu-stack", &lu_site_init);
942 } else {
943 ldt->ldt_ops->ldto_device_free(env, d);
944 CERROR("can't init device '%s', %d\n", typename, rc);
945 d = ERR_PTR(rc);
946 }
947 } else
948 CERROR("Cannot allocate device: '%s'\n", typename);
949 return lu2cl_dev(d);
950}
951EXPORT_SYMBOL(cl_type_setup);
952
953
954
955
956void cl_stack_fini(const struct lu_env *env, struct cl_device *cl)
957{
958 lu_stack_fini(env, cl2lu_dev(cl));
959}
960EXPORT_SYMBOL(cl_stack_fini);
961
962int cl_lock_init(void);
963void cl_lock_fini(void);
964
965int cl_page_init(void);
966void cl_page_fini(void);
967
968static struct lu_context_key cl_key;
969
970struct cl_thread_info *cl_env_info(const struct lu_env *env)
971{
972 return lu_context_key_get(&env->le_ctx, &cl_key);
973}
974
975
976LU_KEY_INIT_FINI(cl0, struct cl_thread_info);
977
978static void *cl_key_init(const struct lu_context *ctx,
979 struct lu_context_key *key)
980{
981 struct cl_thread_info *info;
982
983 info = cl0_key_init(ctx, key);
984 if (!IS_ERR(info)) {
985 int i;
986
987 for (i = 0; i < ARRAY_SIZE(info->clt_counters); ++i)
988 lu_ref_init(&info->clt_counters[i].ctc_locks_locked);
989 }
990 return info;
991}
992
993static void cl_key_fini(const struct lu_context *ctx,
994 struct lu_context_key *key, void *data)
995{
996 struct cl_thread_info *info;
997 int i;
998
999 info = data;
1000 for (i = 0; i < ARRAY_SIZE(info->clt_counters); ++i)
1001 lu_ref_fini(&info->clt_counters[i].ctc_locks_locked);
1002 cl0_key_fini(ctx, key, data);
1003}
1004
1005static void cl_key_exit(const struct lu_context *ctx,
1006 struct lu_context_key *key, void *data)
1007{
1008 struct cl_thread_info *info = data;
1009 int i;
1010
1011 for (i = 0; i < ARRAY_SIZE(info->clt_counters); ++i) {
1012 LASSERT(info->clt_counters[i].ctc_nr_held == 0);
1013 LASSERT(info->clt_counters[i].ctc_nr_used == 0);
1014 LASSERT(info->clt_counters[i].ctc_nr_locks_acquired == 0);
1015 LASSERT(info->clt_counters[i].ctc_nr_locks_locked == 0);
1016 lu_ref_fini(&info->clt_counters[i].ctc_locks_locked);
1017 lu_ref_init(&info->clt_counters[i].ctc_locks_locked);
1018 }
1019}
1020
1021static struct lu_context_key cl_key = {
1022 .lct_tags = LCT_CL_THREAD,
1023 .lct_init = cl_key_init,
1024 .lct_fini = cl_key_fini,
1025 .lct_exit = cl_key_exit
1026};
1027
1028static struct lu_kmem_descr cl_object_caches[] = {
1029 {
1030 .ckd_cache = &cl_env_kmem,
1031 .ckd_name = "cl_env_kmem",
1032 .ckd_size = sizeof(struct cl_env)
1033 },
1034 {
1035 .ckd_cache = NULL
1036 }
1037};
1038
1039
1040
1041
1042
1043
1044
1045int cl_global_init(void)
1046{
1047 int result;
1048
1049 result = cl_env_store_init();
1050 if (result)
1051 return result;
1052
1053 result = lu_kmem_init(cl_object_caches);
1054 if (result)
1055 goto out_store;
1056
1057 LU_CONTEXT_KEY_INIT(&cl_key);
1058 result = lu_context_key_register(&cl_key);
1059 if (result)
1060 goto out_kmem;
1061
1062 result = cl_lock_init();
1063 if (result)
1064 goto out_context;
1065
1066 result = cl_page_init();
1067 if (result)
1068 goto out_lock;
1069
1070 return 0;
1071out_lock:
1072 cl_lock_fini();
1073out_context:
1074 lu_context_key_degister(&cl_key);
1075out_kmem:
1076 lu_kmem_fini(cl_object_caches);
1077out_store:
1078 cl_env_store_fini();
1079 return result;
1080}
1081
1082
1083
1084
1085void cl_global_fini(void)
1086{
1087 cl_lock_fini();
1088 cl_page_fini();
1089 lu_context_key_degister(&cl_key);
1090 lu_kmem_fini(cl_object_caches);
1091 cl_env_store_fini();
1092}
1093