1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41#define DEBUG_SUBSYSTEM S_CLASS
42
43#include "../include/obd_class.h"
44#include "../include/obd_support.h"
45#include "../include/lustre_fid.h"
46#include <linux/list.h>
47#include "../include/cl_object.h"
48#include "cl_internal.h"
49
50
51static struct lock_class_key cl_lock_guard_class;
52static struct kmem_cache *cl_lock_kmem;
53
54static struct lu_kmem_descr cl_lock_caches[] = {
55 {
56 .ckd_cache = &cl_lock_kmem,
57 .ckd_name = "cl_lock_kmem",
58 .ckd_size = sizeof (struct cl_lock)
59 },
60 {
61 .ckd_cache = NULL
62 }
63};
64
65#define CS_LOCK_INC(o, item)
66#define CS_LOCK_DEC(o, item)
67#define CS_LOCKSTATE_INC(o, state)
68#define CS_LOCKSTATE_DEC(o, state)
69
70
71
72
73
74
75
76static int cl_lock_invariant_trusted(const struct lu_env *env,
77 const struct cl_lock *lock)
78{
79 return ergo(lock->cll_state == CLS_FREEING, lock->cll_holds == 0) &&
80 atomic_read(&lock->cll_ref) >= lock->cll_holds &&
81 lock->cll_holds >= lock->cll_users &&
82 lock->cll_holds >= 0 &&
83 lock->cll_users >= 0 &&
84 lock->cll_depth >= 0;
85}
86
87
88
89
90
91
92static int cl_lock_invariant(const struct lu_env *env,
93 const struct cl_lock *lock)
94{
95 int result;
96
97 result = atomic_read(&lock->cll_ref) > 0 &&
98 cl_lock_invariant_trusted(env, lock);
99 if (!result && env)
100 CL_LOCK_DEBUG(D_ERROR, env, lock, "invariant broken\n");
101 return result;
102}
103
104
105
106
107static enum clt_nesting_level cl_lock_nesting(const struct cl_lock *lock)
108{
109 return cl_object_header(lock->cll_descr.cld_obj)->coh_nesting;
110}
111
112
113
114
115static struct cl_thread_counters *cl_lock_counters(const struct lu_env *env,
116 const struct cl_lock *lock)
117{
118 struct cl_thread_info *info;
119 enum clt_nesting_level nesting;
120
121 info = cl_env_info(env);
122 nesting = cl_lock_nesting(lock);
123 LASSERT(nesting < ARRAY_SIZE(info->clt_counters));
124 return &info->clt_counters[nesting];
125}
126
127static void cl_lock_trace0(int level, const struct lu_env *env,
128 const char *prefix, const struct cl_lock *lock,
129 const char *func, const int line)
130{
131 struct cl_object_header *h = cl_object_header(lock->cll_descr.cld_obj);
132
133 CDEBUG(level, "%s: %p@(%d %p %d %d %d %d %d %lx)(%p/%d/%d) at %s():%d\n",
134 prefix, lock, atomic_read(&lock->cll_ref),
135 lock->cll_guarder, lock->cll_depth,
136 lock->cll_state, lock->cll_error, lock->cll_holds,
137 lock->cll_users, lock->cll_flags,
138 env, h->coh_nesting, cl_lock_nr_mutexed(env),
139 func, line);
140}
141
142#define cl_lock_trace(level, env, prefix, lock) \
143 cl_lock_trace0(level, env, prefix, lock, __func__, __LINE__)
144
145#define RETIP ((unsigned long)__builtin_return_address(0))
146
147#ifdef CONFIG_LOCKDEP
148static struct lock_class_key cl_lock_key;
149
150static void cl_lock_lockdep_init(struct cl_lock *lock)
151{
152 lockdep_set_class_and_name(lock, &cl_lock_key, "EXT");
153}
154
155static void cl_lock_lockdep_acquire(const struct lu_env *env,
156 struct cl_lock *lock, __u32 enqflags)
157{
158 cl_lock_counters(env, lock)->ctc_nr_locks_acquired++;
159 lock_map_acquire(&lock->dep_map);
160}
161
162static void cl_lock_lockdep_release(const struct lu_env *env,
163 struct cl_lock *lock)
164{
165 cl_lock_counters(env, lock)->ctc_nr_locks_acquired--;
166 lock_release(&lock->dep_map, 0, RETIP);
167}
168
169#else
170
171static void cl_lock_lockdep_init(struct cl_lock *lock)
172{}
173static void cl_lock_lockdep_acquire(const struct lu_env *env,
174 struct cl_lock *lock, __u32 enqflags)
175{}
176static void cl_lock_lockdep_release(const struct lu_env *env,
177 struct cl_lock *lock)
178{}
179
180#endif
181
182
183
184
185
186
187
188
189
190
191void cl_lock_slice_add(struct cl_lock *lock, struct cl_lock_slice *slice,
192 struct cl_object *obj,
193 const struct cl_lock_operations *ops)
194{
195 slice->cls_lock = lock;
196 list_add_tail(&slice->cls_linkage, &lock->cll_layers);
197 slice->cls_obj = obj;
198 slice->cls_ops = ops;
199}
200EXPORT_SYMBOL(cl_lock_slice_add);
201
202
203
204
205
206int cl_lock_mode_match(enum cl_lock_mode has, enum cl_lock_mode need)
207{
208 LINVRNT(need == CLM_READ || need == CLM_WRITE ||
209 need == CLM_PHANTOM || need == CLM_GROUP);
210 LINVRNT(has == CLM_READ || has == CLM_WRITE ||
211 has == CLM_PHANTOM || has == CLM_GROUP);
212 CLASSERT(CLM_PHANTOM < CLM_READ);
213 CLASSERT(CLM_READ < CLM_WRITE);
214 CLASSERT(CLM_WRITE < CLM_GROUP);
215
216 if (has != CLM_GROUP)
217 return need <= has;
218 else
219 return need == has;
220}
221EXPORT_SYMBOL(cl_lock_mode_match);
222
223
224
225
226int cl_lock_ext_match(const struct cl_lock_descr *has,
227 const struct cl_lock_descr *need)
228{
229 return
230 has->cld_start <= need->cld_start &&
231 has->cld_end >= need->cld_end &&
232 cl_lock_mode_match(has->cld_mode, need->cld_mode) &&
233 (has->cld_mode != CLM_GROUP || has->cld_gid == need->cld_gid);
234}
235EXPORT_SYMBOL(cl_lock_ext_match);
236
237
238
239
240
241int cl_lock_descr_match(const struct cl_lock_descr *has,
242 const struct cl_lock_descr *need)
243{
244 return
245 cl_object_same(has->cld_obj, need->cld_obj) &&
246 cl_lock_ext_match(has, need);
247}
248EXPORT_SYMBOL(cl_lock_descr_match);
249
250static void cl_lock_free(const struct lu_env *env, struct cl_lock *lock)
251{
252 struct cl_object *obj = lock->cll_descr.cld_obj;
253
254 LINVRNT(!cl_lock_is_mutexed(lock));
255
256 cl_lock_trace(D_DLMTRACE, env, "free lock", lock);
257 might_sleep();
258 while (!list_empty(&lock->cll_layers)) {
259 struct cl_lock_slice *slice;
260
261 slice = list_entry(lock->cll_layers.next,
262 struct cl_lock_slice, cls_linkage);
263 list_del_init(lock->cll_layers.next);
264 slice->cls_ops->clo_fini(env, slice);
265 }
266 CS_LOCK_DEC(obj, total);
267 CS_LOCKSTATE_DEC(obj, lock->cll_state);
268 lu_object_ref_del_at(&obj->co_lu, &lock->cll_obj_ref, "cl_lock", lock);
269 cl_object_put(env, obj);
270 lu_ref_fini(&lock->cll_reference);
271 lu_ref_fini(&lock->cll_holders);
272 mutex_destroy(&lock->cll_guard);
273 kmem_cache_free(cl_lock_kmem, lock);
274}
275
276
277
278
279
280
281
282
283
284
285void cl_lock_put(const struct lu_env *env, struct cl_lock *lock)
286{
287 struct cl_object *obj;
288
289 LINVRNT(cl_lock_invariant(env, lock));
290 obj = lock->cll_descr.cld_obj;
291 LINVRNT(obj);
292
293 CDEBUG(D_TRACE, "releasing reference: %d %p %lu\n",
294 atomic_read(&lock->cll_ref), lock, RETIP);
295
296 if (atomic_dec_and_test(&lock->cll_ref)) {
297 if (lock->cll_state == CLS_FREEING) {
298 LASSERT(list_empty(&lock->cll_linkage));
299 cl_lock_free(env, lock);
300 }
301 CS_LOCK_DEC(obj, busy);
302 }
303}
304EXPORT_SYMBOL(cl_lock_put);
305
306
307
308
309
310
311
312
313
314void cl_lock_get(struct cl_lock *lock)
315{
316 LINVRNT(cl_lock_invariant(NULL, lock));
317 CDEBUG(D_TRACE, "acquiring reference: %d %p %lu\n",
318 atomic_read(&lock->cll_ref), lock, RETIP);
319 atomic_inc(&lock->cll_ref);
320}
321EXPORT_SYMBOL(cl_lock_get);
322
323
324
325
326
327
328
329
330
331
332void cl_lock_get_trust(struct cl_lock *lock)
333{
334 CDEBUG(D_TRACE, "acquiring trusted reference: %d %p %lu\n",
335 atomic_read(&lock->cll_ref), lock, RETIP);
336 if (atomic_inc_return(&lock->cll_ref) == 1)
337 CS_LOCK_INC(lock->cll_descr.cld_obj, busy);
338}
339EXPORT_SYMBOL(cl_lock_get_trust);
340
341
342
343
344
345
346
347static void cl_lock_finish(const struct lu_env *env, struct cl_lock *lock)
348{
349 cl_lock_mutex_get(env, lock);
350 cl_lock_cancel(env, lock);
351 cl_lock_delete(env, lock);
352 cl_lock_mutex_put(env, lock);
353 cl_lock_put(env, lock);
354}
355
356static struct cl_lock *cl_lock_alloc(const struct lu_env *env,
357 struct cl_object *obj,
358 const struct cl_io *io,
359 const struct cl_lock_descr *descr)
360{
361 struct cl_lock *lock;
362 struct lu_object_header *head;
363
364 lock = kmem_cache_zalloc(cl_lock_kmem, GFP_NOFS);
365 if (lock) {
366 atomic_set(&lock->cll_ref, 1);
367 lock->cll_descr = *descr;
368 lock->cll_state = CLS_NEW;
369 cl_object_get(obj);
370 lu_object_ref_add_at(&obj->co_lu, &lock->cll_obj_ref, "cl_lock",
371 lock);
372 INIT_LIST_HEAD(&lock->cll_layers);
373 INIT_LIST_HEAD(&lock->cll_linkage);
374 INIT_LIST_HEAD(&lock->cll_inclosure);
375 lu_ref_init(&lock->cll_reference);
376 lu_ref_init(&lock->cll_holders);
377 mutex_init(&lock->cll_guard);
378 lockdep_set_class(&lock->cll_guard, &cl_lock_guard_class);
379 init_waitqueue_head(&lock->cll_wq);
380 head = obj->co_lu.lo_header;
381 CS_LOCKSTATE_INC(obj, CLS_NEW);
382 CS_LOCK_INC(obj, total);
383 CS_LOCK_INC(obj, create);
384 cl_lock_lockdep_init(lock);
385 list_for_each_entry(obj, &head->loh_layers, co_lu.lo_linkage) {
386 int err;
387
388 err = obj->co_ops->coo_lock_init(env, obj, lock, io);
389 if (err != 0) {
390 cl_lock_finish(env, lock);
391 lock = ERR_PTR(err);
392 break;
393 }
394 }
395 } else
396 lock = ERR_PTR(-ENOMEM);
397 return lock;
398}
399
400
401
402
403
404
405
406
407static enum cl_lock_state cl_lock_intransit(const struct lu_env *env,
408 struct cl_lock *lock)
409{
410 enum cl_lock_state state = lock->cll_state;
411
412 LASSERT(cl_lock_is_mutexed(lock));
413 LASSERT(state != CLS_INTRANSIT);
414 LASSERTF(state >= CLS_ENQUEUED && state <= CLS_CACHED,
415 "Malformed lock state %d.\n", state);
416
417 cl_lock_state_set(env, lock, CLS_INTRANSIT);
418 lock->cll_intransit_owner = current;
419 cl_lock_hold_add(env, lock, "intransit", current);
420 return state;
421}
422
423
424
425
426static void cl_lock_extransit(const struct lu_env *env, struct cl_lock *lock,
427 enum cl_lock_state state)
428{
429 LASSERT(cl_lock_is_mutexed(lock));
430 LASSERT(lock->cll_state == CLS_INTRANSIT);
431 LASSERT(state != CLS_INTRANSIT);
432 LASSERT(lock->cll_intransit_owner == current);
433
434 lock->cll_intransit_owner = NULL;
435 cl_lock_state_set(env, lock, state);
436 cl_lock_unhold(env, lock, "intransit", current);
437}
438
439
440
441
442int cl_lock_is_intransit(struct cl_lock *lock)
443{
444 LASSERT(cl_lock_is_mutexed(lock));
445 return lock->cll_state == CLS_INTRANSIT &&
446 lock->cll_intransit_owner != current;
447}
448EXPORT_SYMBOL(cl_lock_is_intransit);
449
450
451
452
453
454static int cl_lock_fits_into(const struct lu_env *env,
455 const struct cl_lock *lock,
456 const struct cl_lock_descr *need,
457 const struct cl_io *io)
458{
459 const struct cl_lock_slice *slice;
460
461 LINVRNT(cl_lock_invariant_trusted(env, lock));
462 list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
463 if (slice->cls_ops->clo_fits_into &&
464 !slice->cls_ops->clo_fits_into(env, slice, need, io))
465 return 0;
466 }
467 return 1;
468}
469
470static struct cl_lock *cl_lock_lookup(const struct lu_env *env,
471 struct cl_object *obj,
472 const struct cl_io *io,
473 const struct cl_lock_descr *need)
474{
475 struct cl_lock *lock;
476 struct cl_object_header *head;
477
478 head = cl_object_header(obj);
479 assert_spin_locked(&head->coh_lock_guard);
480 CS_LOCK_INC(obj, lookup);
481 list_for_each_entry(lock, &head->coh_locks, cll_linkage) {
482 int matched;
483
484 matched = cl_lock_ext_match(&lock->cll_descr, need) &&
485 lock->cll_state < CLS_FREEING &&
486 lock->cll_error == 0 &&
487 !(lock->cll_flags & CLF_CANCELLED) &&
488 cl_lock_fits_into(env, lock, need, io);
489 CDEBUG(D_DLMTRACE, "has: "DDESCR"(%d) need: "DDESCR": %d\n",
490 PDESCR(&lock->cll_descr), lock->cll_state, PDESCR(need),
491 matched);
492 if (matched) {
493 cl_lock_get_trust(lock);
494 CS_LOCK_INC(obj, hit);
495 return lock;
496 }
497 }
498 return NULL;
499}
500
501
502
503
504
505
506
507
508
509
510
511static struct cl_lock *cl_lock_find(const struct lu_env *env,
512 const struct cl_io *io,
513 const struct cl_lock_descr *need)
514{
515 struct cl_object_header *head;
516 struct cl_object *obj;
517 struct cl_lock *lock;
518
519 obj = need->cld_obj;
520 head = cl_object_header(obj);
521
522 spin_lock(&head->coh_lock_guard);
523 lock = cl_lock_lookup(env, obj, io, need);
524 spin_unlock(&head->coh_lock_guard);
525
526 if (!lock) {
527 lock = cl_lock_alloc(env, obj, io, need);
528 if (!IS_ERR(lock)) {
529 struct cl_lock *ghost;
530
531 spin_lock(&head->coh_lock_guard);
532 ghost = cl_lock_lookup(env, obj, io, need);
533 if (!ghost) {
534 cl_lock_get_trust(lock);
535 list_add_tail(&lock->cll_linkage,
536 &head->coh_locks);
537 spin_unlock(&head->coh_lock_guard);
538 CS_LOCK_INC(obj, busy);
539 } else {
540 spin_unlock(&head->coh_lock_guard);
541
542
543
544
545
546 cl_lock_finish(env, lock);
547 lock = ghost;
548 }
549 }
550 }
551 return lock;
552}
553
554
555
556
557
558
559struct cl_lock *cl_lock_peek(const struct lu_env *env, const struct cl_io *io,
560 const struct cl_lock_descr *need,
561 const char *scope, const void *source)
562{
563 struct cl_object_header *head;
564 struct cl_object *obj;
565 struct cl_lock *lock;
566
567 obj = need->cld_obj;
568 head = cl_object_header(obj);
569
570 do {
571 spin_lock(&head->coh_lock_guard);
572 lock = cl_lock_lookup(env, obj, io, need);
573 spin_unlock(&head->coh_lock_guard);
574 if (!lock)
575 return NULL;
576
577 cl_lock_mutex_get(env, lock);
578 if (lock->cll_state == CLS_INTRANSIT)
579
580 cl_lock_state_wait(env, lock);
581 if (lock->cll_state == CLS_FREEING) {
582 cl_lock_mutex_put(env, lock);
583 cl_lock_put(env, lock);
584 lock = NULL;
585 }
586 } while (!lock);
587
588 cl_lock_hold_add(env, lock, scope, source);
589 cl_lock_user_add(env, lock);
590 if (lock->cll_state == CLS_CACHED)
591 cl_use_try(env, lock, 1);
592 if (lock->cll_state == CLS_HELD) {
593 cl_lock_mutex_put(env, lock);
594 cl_lock_lockdep_acquire(env, lock, 0);
595 cl_lock_put(env, lock);
596 } else {
597 cl_unuse_try(env, lock);
598 cl_lock_unhold(env, lock, scope, source);
599 cl_lock_mutex_put(env, lock);
600 cl_lock_put(env, lock);
601 lock = NULL;
602 }
603
604 return lock;
605}
606EXPORT_SYMBOL(cl_lock_peek);
607
608
609
610
611
612
613
614const struct cl_lock_slice *cl_lock_at(const struct cl_lock *lock,
615 const struct lu_device_type *dtype)
616{
617 const struct cl_lock_slice *slice;
618
619 LINVRNT(cl_lock_invariant_trusted(NULL, lock));
620
621 list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
622 if (slice->cls_obj->co_lu.lo_dev->ld_type == dtype)
623 return slice;
624 }
625 return NULL;
626}
627EXPORT_SYMBOL(cl_lock_at);
628
629static void cl_lock_mutex_tail(const struct lu_env *env, struct cl_lock *lock)
630{
631 struct cl_thread_counters *counters;
632
633 counters = cl_lock_counters(env, lock);
634 lock->cll_depth++;
635 counters->ctc_nr_locks_locked++;
636 lu_ref_add(&counters->ctc_locks_locked, "cll_guard", lock);
637 cl_lock_trace(D_TRACE, env, "got mutex", lock);
638}
639
640
641
642
643
644
645
646
647
648
649
650void cl_lock_mutex_get(const struct lu_env *env, struct cl_lock *lock)
651{
652 LINVRNT(cl_lock_invariant(env, lock));
653
654 if (lock->cll_guarder == current) {
655 LINVRNT(cl_lock_is_mutexed(lock));
656 LINVRNT(lock->cll_depth > 0);
657 } else {
658 struct cl_object_header *hdr;
659 struct cl_thread_info *info;
660 int i;
661
662 LINVRNT(lock->cll_guarder != current);
663 hdr = cl_object_header(lock->cll_descr.cld_obj);
664
665
666
667 info = cl_env_info(env);
668 for (i = 0; i < hdr->coh_nesting; ++i)
669 LASSERT(info->clt_counters[i].ctc_nr_locks_locked == 0);
670 mutex_lock_nested(&lock->cll_guard, hdr->coh_nesting);
671 lock->cll_guarder = current;
672 LINVRNT(lock->cll_depth == 0);
673 }
674 cl_lock_mutex_tail(env, lock);
675}
676EXPORT_SYMBOL(cl_lock_mutex_get);
677
678
679
680
681
682
683
684
685
686
687
688
689static int cl_lock_mutex_try(const struct lu_env *env, struct cl_lock *lock)
690{
691 int result;
692
693 LINVRNT(cl_lock_invariant_trusted(env, lock));
694
695 result = 0;
696 if (lock->cll_guarder == current) {
697 LINVRNT(lock->cll_depth > 0);
698 cl_lock_mutex_tail(env, lock);
699 } else if (mutex_trylock(&lock->cll_guard)) {
700 LINVRNT(lock->cll_depth == 0);
701 lock->cll_guarder = current;
702 cl_lock_mutex_tail(env, lock);
703 } else
704 result = -EBUSY;
705 return result;
706}
707
708
709
710
711
712
713
714
715void cl_lock_mutex_put(const struct lu_env *env, struct cl_lock *lock)
716{
717 struct cl_thread_counters *counters;
718
719 LINVRNT(cl_lock_invariant(env, lock));
720 LINVRNT(cl_lock_is_mutexed(lock));
721 LINVRNT(lock->cll_guarder == current);
722 LINVRNT(lock->cll_depth > 0);
723
724 counters = cl_lock_counters(env, lock);
725 LINVRNT(counters->ctc_nr_locks_locked > 0);
726
727 cl_lock_trace(D_TRACE, env, "put mutex", lock);
728 lu_ref_del(&counters->ctc_locks_locked, "cll_guard", lock);
729 counters->ctc_nr_locks_locked--;
730 if (--lock->cll_depth == 0) {
731 lock->cll_guarder = NULL;
732 mutex_unlock(&lock->cll_guard);
733 }
734}
735EXPORT_SYMBOL(cl_lock_mutex_put);
736
737
738
739
740int cl_lock_is_mutexed(struct cl_lock *lock)
741{
742 return lock->cll_guarder == current;
743}
744EXPORT_SYMBOL(cl_lock_is_mutexed);
745
746
747
748
749int cl_lock_nr_mutexed(const struct lu_env *env)
750{
751 struct cl_thread_info *info;
752 int i;
753 int locked;
754
755
756
757
758
759
760 info = cl_env_info(env);
761 for (i = 0, locked = 0; i < ARRAY_SIZE(info->clt_counters); ++i)
762 locked += info->clt_counters[i].ctc_nr_locks_locked;
763 return locked;
764}
765EXPORT_SYMBOL(cl_lock_nr_mutexed);
766
767static void cl_lock_cancel0(const struct lu_env *env, struct cl_lock *lock)
768{
769 LINVRNT(cl_lock_is_mutexed(lock));
770 LINVRNT(cl_lock_invariant(env, lock));
771 if (!(lock->cll_flags & CLF_CANCELLED)) {
772 const struct cl_lock_slice *slice;
773
774 lock->cll_flags |= CLF_CANCELLED;
775 list_for_each_entry_reverse(slice, &lock->cll_layers,
776 cls_linkage) {
777 if (slice->cls_ops->clo_cancel)
778 slice->cls_ops->clo_cancel(env, slice);
779 }
780 }
781}
782
783static void cl_lock_delete0(const struct lu_env *env, struct cl_lock *lock)
784{
785 struct cl_object_header *head;
786 const struct cl_lock_slice *slice;
787
788 LINVRNT(cl_lock_is_mutexed(lock));
789 LINVRNT(cl_lock_invariant(env, lock));
790
791 if (lock->cll_state < CLS_FREEING) {
792 bool in_cache;
793
794 LASSERT(lock->cll_state != CLS_INTRANSIT);
795 cl_lock_state_set(env, lock, CLS_FREEING);
796
797 head = cl_object_header(lock->cll_descr.cld_obj);
798
799 spin_lock(&head->coh_lock_guard);
800 in_cache = !list_empty(&lock->cll_linkage);
801 if (in_cache)
802 list_del_init(&lock->cll_linkage);
803 spin_unlock(&head->coh_lock_guard);
804
805 if (in_cache)
806 cl_lock_put(env, lock);
807
808
809
810
811
812 list_for_each_entry_reverse(slice, &lock->cll_layers,
813 cls_linkage) {
814 if (slice->cls_ops->clo_delete)
815 slice->cls_ops->clo_delete(env, slice);
816 }
817
818
819
820
821
822
823
824
825
826 }
827}
828
829
830
831
832
833
834
835static void cl_lock_hold_mod(const struct lu_env *env, struct cl_lock *lock,
836 int delta)
837{
838 struct cl_thread_counters *counters;
839 enum clt_nesting_level nesting;
840
841 lock->cll_holds += delta;
842 nesting = cl_lock_nesting(lock);
843 if (nesting == CNL_TOP) {
844 counters = &cl_env_info(env)->clt_counters[CNL_TOP];
845 counters->ctc_nr_held += delta;
846 LASSERT(counters->ctc_nr_held >= 0);
847 }
848}
849
850
851
852
853
854static void cl_lock_used_mod(const struct lu_env *env, struct cl_lock *lock,
855 int delta)
856{
857 struct cl_thread_counters *counters;
858 enum clt_nesting_level nesting;
859
860 lock->cll_users += delta;
861 nesting = cl_lock_nesting(lock);
862 if (nesting == CNL_TOP) {
863 counters = &cl_env_info(env)->clt_counters[CNL_TOP];
864 counters->ctc_nr_used += delta;
865 LASSERT(counters->ctc_nr_used >= 0);
866 }
867}
868
869void cl_lock_hold_release(const struct lu_env *env, struct cl_lock *lock,
870 const char *scope, const void *source)
871{
872 LINVRNT(cl_lock_is_mutexed(lock));
873 LINVRNT(cl_lock_invariant(env, lock));
874 LASSERT(lock->cll_holds > 0);
875
876 cl_lock_trace(D_DLMTRACE, env, "hold release lock", lock);
877 lu_ref_del(&lock->cll_holders, scope, source);
878 cl_lock_hold_mod(env, lock, -1);
879 if (lock->cll_holds == 0) {
880 CL_LOCK_ASSERT(lock->cll_state != CLS_HELD, env, lock);
881 if (lock->cll_descr.cld_mode == CLM_PHANTOM ||
882 lock->cll_descr.cld_mode == CLM_GROUP ||
883 lock->cll_state != CLS_CACHED)
884
885
886
887
888 lock->cll_flags |= CLF_CANCELPEND|CLF_DOOMED;
889 if (lock->cll_flags & CLF_CANCELPEND) {
890 lock->cll_flags &= ~CLF_CANCELPEND;
891 cl_lock_cancel0(env, lock);
892 }
893 if (lock->cll_flags & CLF_DOOMED) {
894
895 lock->cll_flags &= ~CLF_DOOMED;
896 cl_lock_delete0(env, lock);
897 }
898 }
899}
900EXPORT_SYMBOL(cl_lock_hold_release);
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921int cl_lock_state_wait(const struct lu_env *env, struct cl_lock *lock)
922{
923 wait_queue_t waiter;
924 sigset_t blocked;
925 int result;
926
927 LINVRNT(cl_lock_is_mutexed(lock));
928 LINVRNT(cl_lock_invariant(env, lock));
929 LASSERT(lock->cll_depth == 1);
930 LASSERT(lock->cll_state != CLS_FREEING);
931
932 cl_lock_trace(D_DLMTRACE, env, "state wait lock", lock);
933 result = lock->cll_error;
934 if (result == 0) {
935
936
937
938
939 blocked = cfs_block_sigsinv(LUSTRE_FATAL_SIGS);
940
941 init_waitqueue_entry(&waiter, current);
942 add_wait_queue(&lock->cll_wq, &waiter);
943 set_current_state(TASK_INTERRUPTIBLE);
944 cl_lock_mutex_put(env, lock);
945
946 LASSERT(cl_lock_nr_mutexed(env) == 0);
947
948
949
950
951 result = -ERESTARTSYS;
952 if (likely(!OBD_FAIL_CHECK(OBD_FAIL_LOCK_STATE_WAIT_INTR))) {
953 schedule();
954 if (!cfs_signal_pending())
955 result = 0;
956 }
957
958 cl_lock_mutex_get(env, lock);
959 set_current_state(TASK_RUNNING);
960 remove_wait_queue(&lock->cll_wq, &waiter);
961
962
963 cfs_restore_sigs(blocked);
964 }
965 return result;
966}
967EXPORT_SYMBOL(cl_lock_state_wait);
968
969static void cl_lock_state_signal(const struct lu_env *env, struct cl_lock *lock,
970 enum cl_lock_state state)
971{
972 const struct cl_lock_slice *slice;
973
974 LINVRNT(cl_lock_is_mutexed(lock));
975 LINVRNT(cl_lock_invariant(env, lock));
976
977 list_for_each_entry(slice, &lock->cll_layers, cls_linkage)
978 if (slice->cls_ops->clo_state)
979 slice->cls_ops->clo_state(env, slice, state);
980 wake_up_all(&lock->cll_wq);
981}
982
983
984
985
986
987
988
989
990void cl_lock_signal(const struct lu_env *env, struct cl_lock *lock)
991{
992 cl_lock_trace(D_DLMTRACE, env, "state signal lock", lock);
993 cl_lock_state_signal(env, lock, lock->cll_state);
994}
995EXPORT_SYMBOL(cl_lock_signal);
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007void cl_lock_state_set(const struct lu_env *env, struct cl_lock *lock,
1008 enum cl_lock_state state)
1009{
1010 LASSERT(lock->cll_state <= state ||
1011 (lock->cll_state == CLS_CACHED &&
1012 (state == CLS_HELD ||
1013 state == CLS_NEW ||
1014 state == CLS_INTRANSIT)) ||
1015
1016 lock->cll_state == CLS_INTRANSIT);
1017
1018 if (lock->cll_state != state) {
1019 CS_LOCKSTATE_DEC(lock->cll_descr.cld_obj, lock->cll_state);
1020 CS_LOCKSTATE_INC(lock->cll_descr.cld_obj, state);
1021
1022 cl_lock_state_signal(env, lock, state);
1023 lock->cll_state = state;
1024 }
1025}
1026EXPORT_SYMBOL(cl_lock_state_set);
1027
1028static int cl_unuse_try_internal(const struct lu_env *env, struct cl_lock *lock)
1029{
1030 const struct cl_lock_slice *slice;
1031 int result;
1032
1033 do {
1034 result = 0;
1035
1036 LINVRNT(cl_lock_is_mutexed(lock));
1037 LINVRNT(cl_lock_invariant(env, lock));
1038 LASSERT(lock->cll_state == CLS_INTRANSIT);
1039
1040 result = -ENOSYS;
1041 list_for_each_entry_reverse(slice, &lock->cll_layers,
1042 cls_linkage) {
1043 if (slice->cls_ops->clo_unuse) {
1044 result = slice->cls_ops->clo_unuse(env, slice);
1045 if (result != 0)
1046 break;
1047 }
1048 }
1049 LASSERT(result != -ENOSYS);
1050 } while (result == CLO_REPEAT);
1051
1052 return result;
1053}
1054
1055
1056
1057
1058
1059
1060
1061int cl_use_try(const struct lu_env *env, struct cl_lock *lock, int atomic)
1062{
1063 const struct cl_lock_slice *slice;
1064 int result;
1065 enum cl_lock_state state;
1066
1067 cl_lock_trace(D_DLMTRACE, env, "use lock", lock);
1068
1069 LASSERT(lock->cll_state == CLS_CACHED);
1070 if (lock->cll_error)
1071 return lock->cll_error;
1072
1073 result = -ENOSYS;
1074 state = cl_lock_intransit(env, lock);
1075 list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
1076 if (slice->cls_ops->clo_use) {
1077 result = slice->cls_ops->clo_use(env, slice);
1078 if (result != 0)
1079 break;
1080 }
1081 }
1082 LASSERT(result != -ENOSYS);
1083
1084 LASSERTF(lock->cll_state == CLS_INTRANSIT, "Wrong state %d.\n",
1085 lock->cll_state);
1086
1087 if (result == 0) {
1088 state = CLS_HELD;
1089 } else {
1090 if (result == -ESTALE) {
1091
1092
1093
1094
1095
1096 state = CLS_NEW;
1097 result = CLO_REPEAT;
1098 }
1099
1100
1101 if (atomic) {
1102 int rc;
1103
1104 rc = cl_unuse_try_internal(env, lock);
1105
1106 if (rc < 0 && result > 0)
1107 result = rc;
1108 }
1109
1110 }
1111 cl_lock_extransit(env, lock, state);
1112 return result;
1113}
1114EXPORT_SYMBOL(cl_use_try);
1115
1116
1117
1118
1119
1120static int cl_enqueue_kick(const struct lu_env *env,
1121 struct cl_lock *lock,
1122 struct cl_io *io, __u32 flags)
1123{
1124 int result;
1125 const struct cl_lock_slice *slice;
1126
1127 result = -ENOSYS;
1128 list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
1129 if (slice->cls_ops->clo_enqueue) {
1130 result = slice->cls_ops->clo_enqueue(env,
1131 slice, io, flags);
1132 if (result != 0)
1133 break;
1134 }
1135 }
1136 LASSERT(result != -ENOSYS);
1137 return result;
1138}
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153int cl_enqueue_try(const struct lu_env *env, struct cl_lock *lock,
1154 struct cl_io *io, __u32 flags)
1155{
1156 int result;
1157
1158 cl_lock_trace(D_DLMTRACE, env, "enqueue lock", lock);
1159 do {
1160 LINVRNT(cl_lock_is_mutexed(lock));
1161
1162 result = lock->cll_error;
1163 if (result != 0)
1164 break;
1165
1166 switch (lock->cll_state) {
1167 case CLS_NEW:
1168 cl_lock_state_set(env, lock, CLS_QUEUING);
1169
1170 case CLS_QUEUING:
1171
1172 result = cl_enqueue_kick(env, lock, io, flags);
1173
1174
1175
1176 if (result == 0 && lock->cll_state == CLS_QUEUING)
1177 cl_lock_state_set(env, lock, CLS_ENQUEUED);
1178 break;
1179 case CLS_INTRANSIT:
1180 LASSERT(cl_lock_is_intransit(lock));
1181 result = CLO_WAIT;
1182 break;
1183 case CLS_CACHED:
1184
1185 result = cl_use_try(env, lock, 0);
1186 break;
1187 case CLS_ENQUEUED:
1188 case CLS_HELD:
1189 result = 0;
1190 break;
1191 default:
1192 case CLS_FREEING:
1193
1194
1195
1196
1197
1198 LBUG();
1199 }
1200 } while (result == CLO_REPEAT);
1201 return result;
1202}
1203EXPORT_SYMBOL(cl_enqueue_try);
1204
1205
1206
1207
1208
1209
1210
1211int cl_lock_enqueue_wait(const struct lu_env *env,
1212 struct cl_lock *lock,
1213 int keep_mutex)
1214{
1215 struct cl_lock *conflict;
1216 int rc = 0;
1217
1218 LASSERT(cl_lock_is_mutexed(lock));
1219 LASSERT(lock->cll_state == CLS_QUEUING);
1220 LASSERT(lock->cll_conflict);
1221
1222 conflict = lock->cll_conflict;
1223 lock->cll_conflict = NULL;
1224
1225 cl_lock_mutex_put(env, lock);
1226 LASSERT(cl_lock_nr_mutexed(env) == 0);
1227
1228 cl_lock_mutex_get(env, conflict);
1229 cl_lock_trace(D_DLMTRACE, env, "enqueue wait", conflict);
1230 cl_lock_cancel(env, conflict);
1231 cl_lock_delete(env, conflict);
1232
1233 while (conflict->cll_state != CLS_FREEING) {
1234 rc = cl_lock_state_wait(env, conflict);
1235 if (rc != 0)
1236 break;
1237 }
1238 cl_lock_mutex_put(env, conflict);
1239 lu_ref_del(&conflict->cll_reference, "cancel-wait", lock);
1240 cl_lock_put(env, conflict);
1241
1242 if (keep_mutex)
1243 cl_lock_mutex_get(env, lock);
1244
1245 LASSERT(rc <= 0);
1246 return rc;
1247}
1248EXPORT_SYMBOL(cl_lock_enqueue_wait);
1249
1250static int cl_enqueue_locked(const struct lu_env *env, struct cl_lock *lock,
1251 struct cl_io *io, __u32 enqflags)
1252{
1253 int result;
1254
1255 LINVRNT(cl_lock_is_mutexed(lock));
1256 LINVRNT(cl_lock_invariant(env, lock));
1257 LASSERT(lock->cll_holds > 0);
1258
1259 cl_lock_user_add(env, lock);
1260 do {
1261 result = cl_enqueue_try(env, lock, io, enqflags);
1262 if (result == CLO_WAIT) {
1263 if (lock->cll_conflict)
1264 result = cl_lock_enqueue_wait(env, lock, 1);
1265 else
1266 result = cl_lock_state_wait(env, lock);
1267 if (result == 0)
1268 continue;
1269 }
1270 break;
1271 } while (1);
1272 if (result != 0)
1273 cl_unuse_try(env, lock);
1274 LASSERT(ergo(result == 0 && !(enqflags & CEF_AGL),
1275 lock->cll_state == CLS_ENQUEUED ||
1276 lock->cll_state == CLS_HELD));
1277 return result;
1278}
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292int cl_unuse_try(const struct lu_env *env, struct cl_lock *lock)
1293{
1294 int result;
1295 enum cl_lock_state state = CLS_NEW;
1296
1297 cl_lock_trace(D_DLMTRACE, env, "unuse lock", lock);
1298
1299 if (lock->cll_users > 1) {
1300 cl_lock_user_del(env, lock);
1301 return 0;
1302 }
1303
1304
1305
1306
1307 if (!(lock->cll_state == CLS_HELD || lock->cll_state == CLS_ENQUEUED)) {
1308 cl_lock_user_del(env, lock);
1309 return 0;
1310 }
1311
1312
1313
1314
1315
1316
1317
1318 state = cl_lock_intransit(env, lock);
1319
1320 result = cl_unuse_try_internal(env, lock);
1321 LASSERT(lock->cll_state == CLS_INTRANSIT);
1322 LASSERT(result != CLO_WAIT);
1323 cl_lock_user_del(env, lock);
1324 if (result == 0 || result == -ESTALE) {
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334 if (state == CLS_HELD && result == 0)
1335 state = CLS_CACHED;
1336 else
1337 state = CLS_NEW;
1338 cl_lock_extransit(env, lock, state);
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349 result = 0;
1350 } else {
1351 CERROR("result = %d, this is unlikely!\n", result);
1352 state = CLS_NEW;
1353 cl_lock_extransit(env, lock, state);
1354 }
1355 return result ?: lock->cll_error;
1356}
1357EXPORT_SYMBOL(cl_unuse_try);
1358
1359static void cl_unuse_locked(const struct lu_env *env, struct cl_lock *lock)
1360{
1361 int result;
1362
1363 result = cl_unuse_try(env, lock);
1364 if (result)
1365 CL_LOCK_DEBUG(D_ERROR, env, lock, "unuse return %d\n", result);
1366}
1367
1368
1369
1370
1371void cl_unuse(const struct lu_env *env, struct cl_lock *lock)
1372{
1373 cl_lock_mutex_get(env, lock);
1374 cl_unuse_locked(env, lock);
1375 cl_lock_mutex_put(env, lock);
1376 cl_lock_lockdep_release(env, lock);
1377}
1378EXPORT_SYMBOL(cl_unuse);
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390int cl_wait_try(const struct lu_env *env, struct cl_lock *lock)
1391{
1392 const struct cl_lock_slice *slice;
1393 int result;
1394
1395 cl_lock_trace(D_DLMTRACE, env, "wait lock try", lock);
1396 do {
1397 LINVRNT(cl_lock_is_mutexed(lock));
1398 LINVRNT(cl_lock_invariant(env, lock));
1399 LASSERTF(lock->cll_state == CLS_QUEUING ||
1400 lock->cll_state == CLS_ENQUEUED ||
1401 lock->cll_state == CLS_HELD ||
1402 lock->cll_state == CLS_INTRANSIT,
1403 "lock state: %d\n", lock->cll_state);
1404 LASSERT(lock->cll_users > 0);
1405 LASSERT(lock->cll_holds > 0);
1406
1407 result = lock->cll_error;
1408 if (result != 0)
1409 break;
1410
1411 if (cl_lock_is_intransit(lock)) {
1412 result = CLO_WAIT;
1413 break;
1414 }
1415
1416 if (lock->cll_state == CLS_HELD)
1417
1418 break;
1419
1420 result = -ENOSYS;
1421 list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
1422 if (slice->cls_ops->clo_wait) {
1423 result = slice->cls_ops->clo_wait(env, slice);
1424 if (result != 0)
1425 break;
1426 }
1427 }
1428 LASSERT(result != -ENOSYS);
1429 if (result == 0) {
1430 LASSERT(lock->cll_state != CLS_INTRANSIT);
1431 cl_lock_state_set(env, lock, CLS_HELD);
1432 }
1433 } while (result == CLO_REPEAT);
1434 return result;
1435}
1436EXPORT_SYMBOL(cl_wait_try);
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447int cl_wait(const struct lu_env *env, struct cl_lock *lock)
1448{
1449 int result;
1450
1451 cl_lock_mutex_get(env, lock);
1452
1453 LINVRNT(cl_lock_invariant(env, lock));
1454 LASSERTF(lock->cll_state == CLS_ENQUEUED || lock->cll_state == CLS_HELD,
1455 "Wrong state %d\n", lock->cll_state);
1456 LASSERT(lock->cll_holds > 0);
1457
1458 do {
1459 result = cl_wait_try(env, lock);
1460 if (result == CLO_WAIT) {
1461 result = cl_lock_state_wait(env, lock);
1462 if (result == 0)
1463 continue;
1464 }
1465 break;
1466 } while (1);
1467 if (result < 0) {
1468 cl_unuse_try(env, lock);
1469 cl_lock_lockdep_release(env, lock);
1470 }
1471 cl_lock_trace(D_DLMTRACE, env, "wait lock", lock);
1472 cl_lock_mutex_put(env, lock);
1473 LASSERT(ergo(result == 0, lock->cll_state == CLS_HELD));
1474 return result;
1475}
1476EXPORT_SYMBOL(cl_wait);
1477
1478
1479
1480
1481
1482unsigned long cl_lock_weigh(const struct lu_env *env, struct cl_lock *lock)
1483{
1484 const struct cl_lock_slice *slice;
1485 unsigned long pound;
1486 unsigned long ounce;
1487
1488 LINVRNT(cl_lock_is_mutexed(lock));
1489 LINVRNT(cl_lock_invariant(env, lock));
1490
1491 pound = 0;
1492 list_for_each_entry_reverse(slice, &lock->cll_layers, cls_linkage) {
1493 if (slice->cls_ops->clo_weigh) {
1494 ounce = slice->cls_ops->clo_weigh(env, slice);
1495 pound += ounce;
1496 if (pound < ounce)
1497 pound = ~0UL;
1498 }
1499 }
1500 return pound;
1501}
1502EXPORT_SYMBOL(cl_lock_weigh);
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514int cl_lock_modify(const struct lu_env *env, struct cl_lock *lock,
1515 const struct cl_lock_descr *desc)
1516{
1517 const struct cl_lock_slice *slice;
1518 struct cl_object *obj = lock->cll_descr.cld_obj;
1519 struct cl_object_header *hdr = cl_object_header(obj);
1520 int result;
1521
1522 cl_lock_trace(D_DLMTRACE, env, "modify lock", lock);
1523
1524 LASSERT(obj == desc->cld_obj);
1525 LINVRNT(cl_lock_is_mutexed(lock));
1526 LINVRNT(cl_lock_invariant(env, lock));
1527
1528 list_for_each_entry_reverse(slice, &lock->cll_layers, cls_linkage) {
1529 if (slice->cls_ops->clo_modify) {
1530 result = slice->cls_ops->clo_modify(env, slice, desc);
1531 if (result != 0)
1532 return result;
1533 }
1534 }
1535 CL_LOCK_DEBUG(D_DLMTRACE, env, lock, " -> "DDESCR"@"DFID"\n",
1536 PDESCR(desc), PFID(lu_object_fid(&desc->cld_obj->co_lu)));
1537
1538
1539
1540
1541
1542 spin_lock(&hdr->coh_lock_guard);
1543 lock->cll_descr = *desc;
1544 spin_unlock(&hdr->coh_lock_guard);
1545 return 0;
1546}
1547EXPORT_SYMBOL(cl_lock_modify);
1548
1549
1550
1551
1552
1553
1554void cl_lock_closure_init(const struct lu_env *env,
1555 struct cl_lock_closure *closure,
1556 struct cl_lock *origin, int wait)
1557{
1558 LINVRNT(cl_lock_is_mutexed(origin));
1559 LINVRNT(cl_lock_invariant(env, origin));
1560
1561 INIT_LIST_HEAD(&closure->clc_list);
1562 closure->clc_origin = origin;
1563 closure->clc_wait = wait;
1564 closure->clc_nr = 0;
1565}
1566EXPORT_SYMBOL(cl_lock_closure_init);
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578int cl_lock_closure_build(const struct lu_env *env, struct cl_lock *lock,
1579 struct cl_lock_closure *closure)
1580{
1581 const struct cl_lock_slice *slice;
1582 int result;
1583
1584 LINVRNT(cl_lock_is_mutexed(closure->clc_origin));
1585 LINVRNT(cl_lock_invariant(env, closure->clc_origin));
1586
1587 result = cl_lock_enclosure(env, lock, closure);
1588 if (result == 0) {
1589 list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
1590 if (slice->cls_ops->clo_closure) {
1591 result = slice->cls_ops->clo_closure(env, slice,
1592 closure);
1593 if (result != 0)
1594 break;
1595 }
1596 }
1597 }
1598 if (result != 0)
1599 cl_lock_disclosure(env, closure);
1600 return result;
1601}
1602EXPORT_SYMBOL(cl_lock_closure_build);
1603
1604
1605
1606
1607
1608
1609
1610
1611int cl_lock_enclosure(const struct lu_env *env, struct cl_lock *lock,
1612 struct cl_lock_closure *closure)
1613{
1614 int result = 0;
1615
1616 cl_lock_trace(D_DLMTRACE, env, "enclosure lock", lock);
1617 if (!cl_lock_mutex_try(env, lock)) {
1618
1619
1620
1621
1622 if (list_empty(&lock->cll_inclosure)) {
1623 cl_lock_get_trust(lock);
1624 lu_ref_add(&lock->cll_reference, "closure", closure);
1625 list_add(&lock->cll_inclosure, &closure->clc_list);
1626 closure->clc_nr++;
1627 } else
1628 cl_lock_mutex_put(env, lock);
1629 result = 0;
1630 } else {
1631 cl_lock_disclosure(env, closure);
1632 if (closure->clc_wait) {
1633 cl_lock_get_trust(lock);
1634 lu_ref_add(&lock->cll_reference, "closure-w", closure);
1635 cl_lock_mutex_put(env, closure->clc_origin);
1636
1637 LASSERT(cl_lock_nr_mutexed(env) == 0);
1638 cl_lock_mutex_get(env, lock);
1639 cl_lock_mutex_put(env, lock);
1640
1641 cl_lock_mutex_get(env, closure->clc_origin);
1642 lu_ref_del(&lock->cll_reference, "closure-w", closure);
1643 cl_lock_put(env, lock);
1644 }
1645 result = CLO_REPEAT;
1646 }
1647 return result;
1648}
1649EXPORT_SYMBOL(cl_lock_enclosure);
1650
1651
1652void cl_lock_disclosure(const struct lu_env *env,
1653 struct cl_lock_closure *closure)
1654{
1655 struct cl_lock *scan;
1656 struct cl_lock *temp;
1657
1658 cl_lock_trace(D_DLMTRACE, env, "disclosure lock", closure->clc_origin);
1659 list_for_each_entry_safe(scan, temp, &closure->clc_list,
1660 cll_inclosure) {
1661 list_del_init(&scan->cll_inclosure);
1662 cl_lock_mutex_put(env, scan);
1663 lu_ref_del(&scan->cll_reference, "closure", closure);
1664 cl_lock_put(env, scan);
1665 closure->clc_nr--;
1666 }
1667 LASSERT(closure->clc_nr == 0);
1668}
1669EXPORT_SYMBOL(cl_lock_disclosure);
1670
1671
1672void cl_lock_closure_fini(struct cl_lock_closure *closure)
1673{
1674 LASSERT(closure->clc_nr == 0);
1675 LASSERT(list_empty(&closure->clc_list));
1676}
1677EXPORT_SYMBOL(cl_lock_closure_fini);
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700void cl_lock_delete(const struct lu_env *env, struct cl_lock *lock)
1701{
1702 LINVRNT(cl_lock_is_mutexed(lock));
1703 LINVRNT(cl_lock_invariant(env, lock));
1704 LASSERT(ergo(cl_lock_nesting(lock) == CNL_TOP,
1705 cl_lock_nr_mutexed(env) == 1));
1706
1707 cl_lock_trace(D_DLMTRACE, env, "delete lock", lock);
1708 if (lock->cll_holds == 0)
1709 cl_lock_delete0(env, lock);
1710 else
1711 lock->cll_flags |= CLF_DOOMED;
1712}
1713EXPORT_SYMBOL(cl_lock_delete);
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725void cl_lock_error(const struct lu_env *env, struct cl_lock *lock, int error)
1726{
1727 LINVRNT(cl_lock_is_mutexed(lock));
1728 LINVRNT(cl_lock_invariant(env, lock));
1729
1730 if (lock->cll_error == 0 && error != 0) {
1731 cl_lock_trace(D_DLMTRACE, env, "set lock error", lock);
1732 lock->cll_error = error;
1733 cl_lock_signal(env, lock);
1734 cl_lock_cancel(env, lock);
1735 cl_lock_delete(env, lock);
1736 }
1737}
1738EXPORT_SYMBOL(cl_lock_error);
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751void cl_lock_cancel(const struct lu_env *env, struct cl_lock *lock)
1752{
1753 LINVRNT(cl_lock_is_mutexed(lock));
1754 LINVRNT(cl_lock_invariant(env, lock));
1755
1756 cl_lock_trace(D_DLMTRACE, env, "cancel lock", lock);
1757 if (lock->cll_holds == 0)
1758 cl_lock_cancel0(env, lock);
1759 else
1760 lock->cll_flags |= CLF_CANCELPEND;
1761}
1762EXPORT_SYMBOL(cl_lock_cancel);
1763
1764
1765
1766
1767
1768struct cl_lock *cl_lock_at_pgoff(const struct lu_env *env,
1769 struct cl_object *obj, pgoff_t index,
1770 struct cl_lock *except,
1771 int pending, int canceld)
1772{
1773 struct cl_object_header *head;
1774 struct cl_lock *scan;
1775 struct cl_lock *lock;
1776 struct cl_lock_descr *need;
1777
1778 head = cl_object_header(obj);
1779 need = &cl_env_info(env)->clt_descr;
1780 lock = NULL;
1781
1782 need->cld_mode = CLM_READ;
1783
1784
1785 need->cld_start = need->cld_end = index;
1786 need->cld_enq_flags = 0;
1787
1788 spin_lock(&head->coh_lock_guard);
1789
1790
1791
1792 list_for_each_entry(scan, &head->coh_locks, cll_linkage) {
1793 if (scan != except &&
1794 (scan->cll_descr.cld_mode == CLM_GROUP ||
1795 cl_lock_ext_match(&scan->cll_descr, need)) &&
1796 scan->cll_state >= CLS_HELD &&
1797 scan->cll_state < CLS_FREEING &&
1798
1799
1800
1801
1802
1803 (canceld || !(scan->cll_flags & CLF_CANCELLED)) &&
1804 (pending || !(scan->cll_flags & CLF_CANCELPEND))) {
1805
1806
1807
1808 cl_lock_get_trust(scan);
1809 lock = scan;
1810 break;
1811 }
1812 }
1813 spin_unlock(&head->coh_lock_guard);
1814 return lock;
1815}
1816EXPORT_SYMBOL(cl_lock_at_pgoff);
1817
1818
1819
1820
1821
1822static pgoff_t pgoff_at_lock(struct cl_page *page, struct cl_lock *lock)
1823{
1824 struct lu_device_type *dtype;
1825 const struct cl_page_slice *slice;
1826
1827 dtype = lock->cll_descr.cld_obj->co_lu.lo_dev->ld_type;
1828 slice = cl_page_at(page, dtype);
1829 return slice->cpl_page->cp_index;
1830}
1831
1832
1833
1834
1835static int check_and_discard_cb(const struct lu_env *env, struct cl_io *io,
1836 struct cl_page *page, void *cbdata)
1837{
1838 struct cl_thread_info *info = cl_env_info(env);
1839 struct cl_lock *lock = cbdata;
1840 pgoff_t index = pgoff_at_lock(page, lock);
1841
1842 if (index >= info->clt_fn_index) {
1843 struct cl_lock *tmp;
1844
1845
1846 tmp = cl_lock_at_pgoff(env, lock->cll_descr.cld_obj, index,
1847 lock, 1, 0);
1848 if (tmp) {
1849
1850
1851
1852
1853
1854 info->clt_fn_index = tmp->cll_descr.cld_end + 1;
1855 if (tmp->cll_descr.cld_end == CL_PAGE_EOF)
1856 info->clt_fn_index = CL_PAGE_EOF;
1857 cl_lock_put(env, tmp);
1858 } else if (cl_page_own(env, io, page) == 0) {
1859
1860 cl_page_unmap(env, io, page);
1861 cl_page_discard(env, io, page);
1862 cl_page_disown(env, io, page);
1863 } else {
1864 LASSERT(page->cp_state == CPS_FREEING);
1865 }
1866 }
1867
1868 info->clt_next_index = index + 1;
1869 return CLP_GANG_OKAY;
1870}
1871
1872static int discard_cb(const struct lu_env *env, struct cl_io *io,
1873 struct cl_page *page, void *cbdata)
1874{
1875 struct cl_thread_info *info = cl_env_info(env);
1876 struct cl_lock *lock = cbdata;
1877
1878 LASSERT(lock->cll_descr.cld_mode >= CLM_WRITE);
1879 KLASSERT(ergo(page->cp_type == CPT_CACHEABLE,
1880 !PageWriteback(cl_page_vmpage(env, page))));
1881 KLASSERT(ergo(page->cp_type == CPT_CACHEABLE,
1882 !PageDirty(cl_page_vmpage(env, page))));
1883
1884 info->clt_next_index = pgoff_at_lock(page, lock) + 1;
1885 if (cl_page_own(env, io, page) == 0) {
1886
1887 cl_page_unmap(env, io, page);
1888 cl_page_discard(env, io, page);
1889 cl_page_disown(env, io, page);
1890 } else {
1891 LASSERT(page->cp_state == CPS_FREEING);
1892 }
1893
1894 return CLP_GANG_OKAY;
1895}
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905int cl_lock_discard_pages(const struct lu_env *env, struct cl_lock *lock)
1906{
1907 struct cl_thread_info *info = cl_env_info(env);
1908 struct cl_io *io = &info->clt_io;
1909 struct cl_lock_descr *descr = &lock->cll_descr;
1910 cl_page_gang_cb_t cb;
1911 int res;
1912 int result;
1913
1914 LINVRNT(cl_lock_invariant(env, lock));
1915
1916 io->ci_obj = cl_object_top(descr->cld_obj);
1917 io->ci_ignore_layout = 1;
1918 result = cl_io_init(env, io, CIT_MISC, io->ci_obj);
1919 if (result != 0)
1920 goto out;
1921
1922 cb = descr->cld_mode == CLM_READ ? check_and_discard_cb : discard_cb;
1923 info->clt_fn_index = info->clt_next_index = descr->cld_start;
1924 do {
1925 res = cl_page_gang_lookup(env, descr->cld_obj, io,
1926 info->clt_next_index, descr->cld_end,
1927 cb, (void *)lock);
1928 if (info->clt_next_index > descr->cld_end)
1929 break;
1930
1931 if (res == CLP_GANG_RESCHED)
1932 cond_resched();
1933 } while (res != CLP_GANG_OKAY);
1934out:
1935 cl_io_fini(env, io);
1936 return result;
1937}
1938EXPORT_SYMBOL(cl_lock_discard_pages);
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948void cl_locks_prune(const struct lu_env *env, struct cl_object *obj, int cancel)
1949{
1950 struct cl_object_header *head;
1951 struct cl_lock *lock;
1952
1953 head = cl_object_header(obj);
1954
1955
1956
1957
1958 LASSERT(ergo(!cancel,
1959 !head->coh_tree.rnode && head->coh_pages == 0));
1960
1961 spin_lock(&head->coh_lock_guard);
1962 while (!list_empty(&head->coh_locks)) {
1963 lock = container_of(head->coh_locks.next,
1964 struct cl_lock, cll_linkage);
1965 cl_lock_get_trust(lock);
1966 spin_unlock(&head->coh_lock_guard);
1967 lu_ref_add(&lock->cll_reference, "prune", current);
1968
1969again:
1970 cl_lock_mutex_get(env, lock);
1971 if (lock->cll_state < CLS_FREEING) {
1972 LASSERT(lock->cll_users <= 1);
1973 if (unlikely(lock->cll_users == 1)) {
1974 struct l_wait_info lwi = { 0 };
1975
1976 cl_lock_mutex_put(env, lock);
1977 l_wait_event(lock->cll_wq,
1978 lock->cll_users == 0,
1979 &lwi);
1980 goto again;
1981 }
1982
1983 if (cancel)
1984 cl_lock_cancel(env, lock);
1985 cl_lock_delete(env, lock);
1986 }
1987 cl_lock_mutex_put(env, lock);
1988 lu_ref_del(&lock->cll_reference, "prune", current);
1989 cl_lock_put(env, lock);
1990 spin_lock(&head->coh_lock_guard);
1991 }
1992 spin_unlock(&head->coh_lock_guard);
1993}
1994EXPORT_SYMBOL(cl_locks_prune);
1995
1996static struct cl_lock *cl_lock_hold_mutex(const struct lu_env *env,
1997 const struct cl_io *io,
1998 const struct cl_lock_descr *need,
1999 const char *scope, const void *source)
2000{
2001 struct cl_lock *lock;
2002
2003 while (1) {
2004 lock = cl_lock_find(env, io, need);
2005 if (IS_ERR(lock))
2006 break;
2007 cl_lock_mutex_get(env, lock);
2008 if (lock->cll_state < CLS_FREEING &&
2009 !(lock->cll_flags & CLF_CANCELLED)) {
2010 cl_lock_hold_mod(env, lock, 1);
2011 lu_ref_add(&lock->cll_holders, scope, source);
2012 lu_ref_add(&lock->cll_reference, scope, source);
2013 break;
2014 }
2015 cl_lock_mutex_put(env, lock);
2016 cl_lock_put(env, lock);
2017 }
2018 return lock;
2019}
2020
2021
2022
2023
2024
2025
2026
2027
2028struct cl_lock *cl_lock_hold(const struct lu_env *env, const struct cl_io *io,
2029 const struct cl_lock_descr *need,
2030 const char *scope, const void *source)
2031{
2032 struct cl_lock *lock;
2033
2034 lock = cl_lock_hold_mutex(env, io, need, scope, source);
2035 if (!IS_ERR(lock))
2036 cl_lock_mutex_put(env, lock);
2037 return lock;
2038}
2039EXPORT_SYMBOL(cl_lock_hold);
2040
2041
2042
2043
2044
2045struct cl_lock *cl_lock_request(const struct lu_env *env, struct cl_io *io,
2046 const struct cl_lock_descr *need,
2047 const char *scope, const void *source)
2048{
2049 struct cl_lock *lock;
2050 int rc;
2051 __u32 enqflags = need->cld_enq_flags;
2052
2053 do {
2054 lock = cl_lock_hold_mutex(env, io, need, scope, source);
2055 if (IS_ERR(lock))
2056 break;
2057
2058 rc = cl_enqueue_locked(env, lock, io, enqflags);
2059 if (rc == 0) {
2060 if (cl_lock_fits_into(env, lock, need, io)) {
2061 if (!(enqflags & CEF_AGL)) {
2062 cl_lock_mutex_put(env, lock);
2063 cl_lock_lockdep_acquire(env, lock,
2064 enqflags);
2065 break;
2066 }
2067 rc = 1;
2068 }
2069 cl_unuse_locked(env, lock);
2070 }
2071 cl_lock_trace(D_DLMTRACE, env,
2072 rc <= 0 ? "enqueue failed" : "agl succeed", lock);
2073 cl_lock_hold_release(env, lock, scope, source);
2074 cl_lock_mutex_put(env, lock);
2075 lu_ref_del(&lock->cll_reference, scope, source);
2076 cl_lock_put(env, lock);
2077 if (rc > 0) {
2078 LASSERT(enqflags & CEF_AGL);
2079 lock = NULL;
2080 } else if (rc != 0) {
2081 lock = ERR_PTR(rc);
2082 }
2083 } while (rc == 0);
2084 return lock;
2085}
2086EXPORT_SYMBOL(cl_lock_request);
2087
2088
2089
2090
2091void cl_lock_hold_add(const struct lu_env *env, struct cl_lock *lock,
2092 const char *scope, const void *source)
2093{
2094 LINVRNT(cl_lock_is_mutexed(lock));
2095 LINVRNT(cl_lock_invariant(env, lock));
2096 LASSERT(lock->cll_state != CLS_FREEING);
2097
2098 cl_lock_hold_mod(env, lock, 1);
2099 cl_lock_get(lock);
2100 lu_ref_add(&lock->cll_holders, scope, source);
2101 lu_ref_add(&lock->cll_reference, scope, source);
2102}
2103EXPORT_SYMBOL(cl_lock_hold_add);
2104
2105
2106
2107
2108
2109void cl_lock_unhold(const struct lu_env *env, struct cl_lock *lock,
2110 const char *scope, const void *source)
2111{
2112 LINVRNT(cl_lock_invariant(env, lock));
2113 cl_lock_hold_release(env, lock, scope, source);
2114 lu_ref_del(&lock->cll_reference, scope, source);
2115 cl_lock_put(env, lock);
2116}
2117EXPORT_SYMBOL(cl_lock_unhold);
2118
2119
2120
2121
2122void cl_lock_release(const struct lu_env *env, struct cl_lock *lock,
2123 const char *scope, const void *source)
2124{
2125 LINVRNT(cl_lock_invariant(env, lock));
2126 cl_lock_trace(D_DLMTRACE, env, "release lock", lock);
2127 cl_lock_mutex_get(env, lock);
2128 cl_lock_hold_release(env, lock, scope, source);
2129 cl_lock_mutex_put(env, lock);
2130 lu_ref_del(&lock->cll_reference, scope, source);
2131 cl_lock_put(env, lock);
2132}
2133EXPORT_SYMBOL(cl_lock_release);
2134
2135void cl_lock_user_add(const struct lu_env *env, struct cl_lock *lock)
2136{
2137 LINVRNT(cl_lock_is_mutexed(lock));
2138 LINVRNT(cl_lock_invariant(env, lock));
2139
2140 cl_lock_used_mod(env, lock, 1);
2141}
2142EXPORT_SYMBOL(cl_lock_user_add);
2143
2144void cl_lock_user_del(const struct lu_env *env, struct cl_lock *lock)
2145{
2146 LINVRNT(cl_lock_is_mutexed(lock));
2147 LINVRNT(cl_lock_invariant(env, lock));
2148 LASSERT(lock->cll_users > 0);
2149
2150 cl_lock_used_mod(env, lock, -1);
2151 if (lock->cll_users == 0)
2152 wake_up_all(&lock->cll_wq);
2153}
2154EXPORT_SYMBOL(cl_lock_user_del);
2155
2156const char *cl_lock_mode_name(const enum cl_lock_mode mode)
2157{
2158 static const char *names[] = {
2159 [CLM_PHANTOM] = "P",
2160 [CLM_READ] = "R",
2161 [CLM_WRITE] = "W",
2162 [CLM_GROUP] = "G"
2163 };
2164 if (0 <= mode && mode < ARRAY_SIZE(names))
2165 return names[mode];
2166 else
2167 return "U";
2168}
2169EXPORT_SYMBOL(cl_lock_mode_name);
2170
2171
2172
2173
2174void cl_lock_descr_print(const struct lu_env *env, void *cookie,
2175 lu_printer_t printer,
2176 const struct cl_lock_descr *descr)
2177{
2178 const struct lu_fid *fid;
2179
2180 fid = lu_object_fid(&descr->cld_obj->co_lu);
2181 (*printer)(env, cookie, DDESCR"@"DFID, PDESCR(descr), PFID(fid));
2182}
2183EXPORT_SYMBOL(cl_lock_descr_print);
2184
2185
2186
2187
2188void cl_lock_print(const struct lu_env *env, void *cookie,
2189 lu_printer_t printer, const struct cl_lock *lock)
2190{
2191 const struct cl_lock_slice *slice;
2192 (*printer)(env, cookie, "lock@%p[%d %d %d %d %d %08lx] ",
2193 lock, atomic_read(&lock->cll_ref),
2194 lock->cll_state, lock->cll_error, lock->cll_holds,
2195 lock->cll_users, lock->cll_flags);
2196 cl_lock_descr_print(env, cookie, printer, &lock->cll_descr);
2197 (*printer)(env, cookie, " {\n");
2198
2199 list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
2200 (*printer)(env, cookie, " %s@%p: ",
2201 slice->cls_obj->co_lu.lo_dev->ld_type->ldt_name,
2202 slice);
2203 if (slice->cls_ops->clo_print)
2204 slice->cls_ops->clo_print(env, cookie, printer, slice);
2205 (*printer)(env, cookie, "\n");
2206 }
2207 (*printer)(env, cookie, "} lock@%p\n", lock);
2208}
2209EXPORT_SYMBOL(cl_lock_print);
2210
2211int cl_lock_init(void)
2212{
2213 return lu_kmem_init(cl_lock_caches);
2214}
2215
2216void cl_lock_fini(void)
2217{
2218 lu_kmem_fini(cl_lock_caches);
2219}
2220