1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42#define DEBUG_SUBSYSTEM S_LDLM
43
44#include "../../include/linux/libcfs/libcfs.h"
45#include "../include/lustre_intent.h"
46#include "../include/obd_class.h"
47#include "ldlm_internal.h"
48
49
50char *ldlm_lockname[] = {
51 [0] = "--",
52 [LCK_EX] = "EX",
53 [LCK_PW] = "PW",
54 [LCK_PR] = "PR",
55 [LCK_CW] = "CW",
56 [LCK_CR] = "CR",
57 [LCK_NL] = "NL",
58 [LCK_GROUP] = "GROUP",
59 [LCK_COS] = "COS",
60};
61EXPORT_SYMBOL(ldlm_lockname);
62
63char *ldlm_typename[] = {
64 [LDLM_PLAIN] = "PLN",
65 [LDLM_EXTENT] = "EXT",
66 [LDLM_FLOCK] = "FLK",
67 [LDLM_IBITS] = "IBT",
68};
69EXPORT_SYMBOL(ldlm_typename);
70
71static ldlm_policy_wire_to_local_t ldlm_policy_wire18_to_local[] = {
72 [LDLM_PLAIN - LDLM_MIN_TYPE] = ldlm_plain_policy_wire_to_local,
73 [LDLM_EXTENT - LDLM_MIN_TYPE] = ldlm_extent_policy_wire_to_local,
74 [LDLM_FLOCK - LDLM_MIN_TYPE] = ldlm_flock_policy_wire18_to_local,
75 [LDLM_IBITS - LDLM_MIN_TYPE] = ldlm_ibits_policy_wire_to_local,
76};
77
78static ldlm_policy_wire_to_local_t ldlm_policy_wire21_to_local[] = {
79 [LDLM_PLAIN - LDLM_MIN_TYPE] = ldlm_plain_policy_wire_to_local,
80 [LDLM_EXTENT - LDLM_MIN_TYPE] = ldlm_extent_policy_wire_to_local,
81 [LDLM_FLOCK - LDLM_MIN_TYPE] = ldlm_flock_policy_wire21_to_local,
82 [LDLM_IBITS - LDLM_MIN_TYPE] = ldlm_ibits_policy_wire_to_local,
83};
84
85static ldlm_policy_local_to_wire_t ldlm_policy_local_to_wire[] = {
86 [LDLM_PLAIN - LDLM_MIN_TYPE] = ldlm_plain_policy_local_to_wire,
87 [LDLM_EXTENT - LDLM_MIN_TYPE] = ldlm_extent_policy_local_to_wire,
88 [LDLM_FLOCK - LDLM_MIN_TYPE] = ldlm_flock_policy_local_to_wire,
89 [LDLM_IBITS - LDLM_MIN_TYPE] = ldlm_ibits_policy_local_to_wire,
90};
91
92
93
94
95void ldlm_convert_policy_to_wire(ldlm_type_t type,
96 const ldlm_policy_data_t *lpolicy,
97 ldlm_wire_policy_data_t *wpolicy)
98{
99 ldlm_policy_local_to_wire_t convert;
100
101 convert = ldlm_policy_local_to_wire[type - LDLM_MIN_TYPE];
102
103 convert(lpolicy, wpolicy);
104}
105
106
107
108
109void ldlm_convert_policy_to_local(struct obd_export *exp, ldlm_type_t type,
110 const ldlm_wire_policy_data_t *wpolicy,
111 ldlm_policy_data_t *lpolicy)
112{
113 ldlm_policy_wire_to_local_t convert;
114 int new_client;
115
116
117 new_client = (exp_connect_flags(exp) & OBD_CONNECT_FULL20) != 0;
118 if (new_client)
119 convert = ldlm_policy_wire21_to_local[type - LDLM_MIN_TYPE];
120 else
121 convert = ldlm_policy_wire18_to_local[type - LDLM_MIN_TYPE];
122
123 convert(wpolicy, lpolicy);
124}
125
126char *ldlm_it2str(int it)
127{
128 switch (it) {
129 case IT_OPEN:
130 return "open";
131 case IT_CREAT:
132 return "creat";
133 case (IT_OPEN | IT_CREAT):
134 return "open|creat";
135 case IT_READDIR:
136 return "readdir";
137 case IT_GETATTR:
138 return "getattr";
139 case IT_LOOKUP:
140 return "lookup";
141 case IT_UNLINK:
142 return "unlink";
143 case IT_GETXATTR:
144 return "getxattr";
145 case IT_LAYOUT:
146 return "layout";
147 default:
148 CERROR("Unknown intent %d\n", it);
149 return "UNKNOWN";
150 }
151}
152EXPORT_SYMBOL(ldlm_it2str);
153
154
155void ldlm_register_intent(struct ldlm_namespace *ns, ldlm_res_policy arg)
156{
157 ns->ns_policy = arg;
158}
159EXPORT_SYMBOL(ldlm_register_intent);
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174struct ldlm_lock *ldlm_lock_get(struct ldlm_lock *lock)
175{
176 atomic_inc(&lock->l_refc);
177 return lock;
178}
179EXPORT_SYMBOL(ldlm_lock_get);
180
181
182
183
184
185
186void ldlm_lock_put(struct ldlm_lock *lock)
187{
188 LASSERT(lock->l_resource != LP_POISON);
189 LASSERT(atomic_read(&lock->l_refc) > 0);
190 if (atomic_dec_and_test(&lock->l_refc)) {
191 struct ldlm_resource *res;
192
193 LDLM_DEBUG(lock,
194 "final lock_put on destroyed lock, freeing it.");
195
196 res = lock->l_resource;
197 LASSERT(lock->l_flags & LDLM_FL_DESTROYED);
198 LASSERT(list_empty(&lock->l_res_link));
199 LASSERT(list_empty(&lock->l_pending_chain));
200
201 lprocfs_counter_decr(ldlm_res_to_ns(res)->ns_stats,
202 LDLM_NSS_LOCKS);
203 lu_ref_del(&res->lr_reference, "lock", lock);
204 ldlm_resource_putref(res);
205 lock->l_resource = NULL;
206 if (lock->l_export) {
207 class_export_lock_put(lock->l_export, lock);
208 lock->l_export = NULL;
209 }
210
211 kfree(lock->l_lvb_data);
212
213 ldlm_interval_free(ldlm_interval_detach(lock));
214 lu_ref_fini(&lock->l_reference);
215 OBD_FREE_RCU(lock, sizeof(*lock), &lock->l_handle);
216 }
217}
218EXPORT_SYMBOL(ldlm_lock_put);
219
220
221
222
223int ldlm_lock_remove_from_lru_nolock(struct ldlm_lock *lock)
224{
225 int rc = 0;
226
227 if (!list_empty(&lock->l_lru)) {
228 struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
229
230 LASSERT(lock->l_resource->lr_type != LDLM_FLOCK);
231 list_del_init(&lock->l_lru);
232 LASSERT(ns->ns_nr_unused > 0);
233 ns->ns_nr_unused--;
234 rc = 1;
235 }
236 return rc;
237}
238
239
240
241
242int ldlm_lock_remove_from_lru(struct ldlm_lock *lock)
243{
244 struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
245 int rc;
246
247 if (lock->l_flags & LDLM_FL_NS_SRV) {
248 LASSERT(list_empty(&lock->l_lru));
249 return 0;
250 }
251
252 spin_lock(&ns->ns_lock);
253 rc = ldlm_lock_remove_from_lru_nolock(lock);
254 spin_unlock(&ns->ns_lock);
255 return rc;
256}
257
258
259
260
261void ldlm_lock_add_to_lru_nolock(struct ldlm_lock *lock)
262{
263 struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
264
265 lock->l_last_used = cfs_time_current();
266 LASSERT(list_empty(&lock->l_lru));
267 LASSERT(lock->l_resource->lr_type != LDLM_FLOCK);
268 list_add_tail(&lock->l_lru, &ns->ns_unused_list);
269 if (lock->l_flags & LDLM_FL_SKIPPED)
270 lock->l_flags &= ~LDLM_FL_SKIPPED;
271 LASSERT(ns->ns_nr_unused >= 0);
272 ns->ns_nr_unused++;
273}
274
275
276
277
278
279void ldlm_lock_add_to_lru(struct ldlm_lock *lock)
280{
281 struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
282
283 spin_lock(&ns->ns_lock);
284 ldlm_lock_add_to_lru_nolock(lock);
285 spin_unlock(&ns->ns_lock);
286}
287
288
289
290
291
292void ldlm_lock_touch_in_lru(struct ldlm_lock *lock)
293{
294 struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
295
296 if (lock->l_flags & LDLM_FL_NS_SRV) {
297 LASSERT(list_empty(&lock->l_lru));
298 return;
299 }
300
301 spin_lock(&ns->ns_lock);
302 if (!list_empty(&lock->l_lru)) {
303 ldlm_lock_remove_from_lru_nolock(lock);
304 ldlm_lock_add_to_lru_nolock(lock);
305 }
306 spin_unlock(&ns->ns_lock);
307}
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328int ldlm_lock_destroy_internal(struct ldlm_lock *lock)
329{
330 if (lock->l_readers || lock->l_writers) {
331 LDLM_ERROR(lock, "lock still has references");
332 LBUG();
333 }
334
335 if (!list_empty(&lock->l_res_link)) {
336 LDLM_ERROR(lock, "lock still on resource");
337 LBUG();
338 }
339
340 if (lock->l_flags & LDLM_FL_DESTROYED) {
341 LASSERT(list_empty(&lock->l_lru));
342 return 0;
343 }
344 lock->l_flags |= LDLM_FL_DESTROYED;
345
346 if (lock->l_export && lock->l_export->exp_lock_hash) {
347
348
349
350
351
352 cfs_hash_del(lock->l_export->exp_lock_hash,
353 &lock->l_remote_handle, &lock->l_exp_hash);
354 }
355
356 ldlm_lock_remove_from_lru(lock);
357 class_handle_unhash(&lock->l_handle);
358
359#if 0
360
361
362
363 if (lock->l_export)
364 class_export_put(lock->l_export);
365 lock->l_export = NULL;
366 if (lock->l_export && lock->l_completion_ast)
367 lock->l_completion_ast(lock, 0);
368#endif
369 return 1;
370}
371
372
373
374
375void ldlm_lock_destroy(struct ldlm_lock *lock)
376{
377 int first;
378
379 lock_res_and_lock(lock);
380 first = ldlm_lock_destroy_internal(lock);
381 unlock_res_and_lock(lock);
382
383
384 if (first) {
385 lu_ref_del(&lock->l_reference, "hash", lock);
386 LDLM_LOCK_RELEASE(lock);
387 }
388}
389
390
391
392
393void ldlm_lock_destroy_nolock(struct ldlm_lock *lock)
394{
395 int first;
396
397 first = ldlm_lock_destroy_internal(lock);
398
399 if (first) {
400 lu_ref_del(&lock->l_reference, "hash", lock);
401 LDLM_LOCK_RELEASE(lock);
402 }
403}
404
405
406static void lock_handle_addref(void *lock)
407{
408 LDLM_LOCK_GET((struct ldlm_lock *)lock);
409}
410
411static void lock_handle_free(void *lock, int size)
412{
413 LASSERT(size == sizeof(struct ldlm_lock));
414 OBD_SLAB_FREE(lock, ldlm_lock_slab, size);
415}
416
417struct portals_handle_ops lock_handle_ops = {
418 .hop_addref = lock_handle_addref,
419 .hop_free = lock_handle_free,
420};
421
422
423
424
425
426
427
428
429
430static struct ldlm_lock *ldlm_lock_new(struct ldlm_resource *resource)
431{
432 struct ldlm_lock *lock;
433
434 if (resource == NULL)
435 LBUG();
436
437 OBD_SLAB_ALLOC_PTR_GFP(lock, ldlm_lock_slab, GFP_NOFS);
438 if (lock == NULL)
439 return NULL;
440
441 spin_lock_init(&lock->l_lock);
442 lock->l_resource = resource;
443 lu_ref_add(&resource->lr_reference, "lock", lock);
444
445 atomic_set(&lock->l_refc, 2);
446 INIT_LIST_HEAD(&lock->l_res_link);
447 INIT_LIST_HEAD(&lock->l_lru);
448 INIT_LIST_HEAD(&lock->l_pending_chain);
449 INIT_LIST_HEAD(&lock->l_bl_ast);
450 INIT_LIST_HEAD(&lock->l_cp_ast);
451 INIT_LIST_HEAD(&lock->l_rk_ast);
452 init_waitqueue_head(&lock->l_waitq);
453 lock->l_blocking_lock = NULL;
454 INIT_LIST_HEAD(&lock->l_sl_mode);
455 INIT_LIST_HEAD(&lock->l_sl_policy);
456 INIT_HLIST_NODE(&lock->l_exp_hash);
457 INIT_HLIST_NODE(&lock->l_exp_flock_hash);
458
459 lprocfs_counter_incr(ldlm_res_to_ns(resource)->ns_stats,
460 LDLM_NSS_LOCKS);
461 INIT_LIST_HEAD(&lock->l_handle.h_link);
462 class_handle_hash(&lock->l_handle, &lock_handle_ops);
463
464 lu_ref_init(&lock->l_reference);
465 lu_ref_add(&lock->l_reference, "hash", lock);
466 lock->l_callback_timeout = 0;
467
468#if LUSTRE_TRACKS_LOCK_EXP_REFS
469 INIT_LIST_HEAD(&lock->l_exp_refs_link);
470 lock->l_exp_refs_nr = 0;
471 lock->l_exp_refs_target = NULL;
472#endif
473 INIT_LIST_HEAD(&lock->l_exp_list);
474
475 return lock;
476}
477
478
479
480
481
482
483int ldlm_lock_change_resource(struct ldlm_namespace *ns, struct ldlm_lock *lock,
484 const struct ldlm_res_id *new_resid)
485{
486 struct ldlm_resource *oldres = lock->l_resource;
487 struct ldlm_resource *newres;
488 int type;
489
490 LASSERT(ns_is_client(ns));
491
492 lock_res_and_lock(lock);
493 if (memcmp(new_resid, &lock->l_resource->lr_name,
494 sizeof(lock->l_resource->lr_name)) == 0) {
495
496 unlock_res_and_lock(lock);
497 return 0;
498 }
499
500 LASSERT(new_resid->name[0] != 0);
501
502
503 LASSERT(list_empty(&lock->l_res_link));
504
505 type = oldres->lr_type;
506 unlock_res_and_lock(lock);
507
508 newres = ldlm_resource_get(ns, NULL, new_resid, type, 1);
509 if (newres == NULL)
510 return -ENOMEM;
511
512 lu_ref_add(&newres->lr_reference, "lock", lock);
513
514
515
516
517
518
519 spin_lock(&lock->l_lock);
520 oldres = lock->l_resource;
521 if (oldres < newres) {
522 lock_res(oldres);
523 lock_res_nested(newres, LRT_NEW);
524 } else {
525 lock_res(newres);
526 lock_res_nested(oldres, LRT_NEW);
527 }
528 LASSERT(memcmp(new_resid, &oldres->lr_name,
529 sizeof(oldres->lr_name)) != 0);
530 lock->l_resource = newres;
531 unlock_res(oldres);
532 unlock_res_and_lock(lock);
533
534
535 lu_ref_del(&oldres->lr_reference, "lock", lock);
536 ldlm_resource_putref(oldres);
537
538 return 0;
539}
540EXPORT_SYMBOL(ldlm_lock_change_resource);
541
542
543
544
545
546
547
548
549
550
551void ldlm_lock2handle(const struct ldlm_lock *lock, struct lustre_handle *lockh)
552{
553 lockh->cookie = lock->l_handle.h_cookie;
554}
555EXPORT_SYMBOL(ldlm_lock2handle);
556
557
558
559
560
561
562
563struct ldlm_lock *__ldlm_handle2lock(const struct lustre_handle *handle,
564 __u64 flags)
565{
566 struct ldlm_lock *lock;
567
568 LASSERT(handle);
569
570 lock = class_handle2object(handle->cookie);
571 if (lock == NULL)
572 return NULL;
573
574
575
576 if (flags == 0 && ((lock->l_flags & LDLM_FL_DESTROYED) == 0)) {
577 lu_ref_add(&lock->l_reference, "handle", current);
578 return lock;
579 }
580
581 lock_res_and_lock(lock);
582
583 LASSERT(lock->l_resource != NULL);
584
585 lu_ref_add_atomic(&lock->l_reference, "handle", current);
586 if (unlikely(lock->l_flags & LDLM_FL_DESTROYED)) {
587 unlock_res_and_lock(lock);
588 CDEBUG(D_INFO, "lock already destroyed: lock %p\n", lock);
589 LDLM_LOCK_PUT(lock);
590 return NULL;
591 }
592
593 if (flags && (lock->l_flags & flags)) {
594 unlock_res_and_lock(lock);
595 LDLM_LOCK_PUT(lock);
596 return NULL;
597 }
598
599 if (flags)
600 lock->l_flags |= flags;
601
602 unlock_res_and_lock(lock);
603 return lock;
604}
605EXPORT_SYMBOL(__ldlm_handle2lock);
606
607
608
609
610
611
612void ldlm_lock2desc(struct ldlm_lock *lock, struct ldlm_lock_desc *desc)
613{
614 ldlm_res2desc(lock->l_resource, &desc->l_resource);
615 desc->l_req_mode = lock->l_req_mode;
616 desc->l_granted_mode = lock->l_granted_mode;
617 ldlm_convert_policy_to_wire(lock->l_resource->lr_type,
618 &lock->l_policy_data,
619 &desc->l_policy_data);
620}
621EXPORT_SYMBOL(ldlm_lock2desc);
622
623
624
625
626
627
628void ldlm_add_bl_work_item(struct ldlm_lock *lock, struct ldlm_lock *new,
629 struct list_head *work_list)
630{
631 if ((lock->l_flags & LDLM_FL_AST_SENT) == 0) {
632 LDLM_DEBUG(lock, "lock incompatible; sending blocking AST.");
633 lock->l_flags |= LDLM_FL_AST_SENT;
634
635
636 if (new->l_flags & LDLM_FL_AST_DISCARD_DATA)
637 lock->l_flags |= LDLM_FL_DISCARD_DATA;
638 LASSERT(list_empty(&lock->l_bl_ast));
639 list_add(&lock->l_bl_ast, work_list);
640 LDLM_LOCK_GET(lock);
641 LASSERT(lock->l_blocking_lock == NULL);
642 lock->l_blocking_lock = LDLM_LOCK_GET(new);
643 }
644}
645
646
647
648
649void ldlm_add_cp_work_item(struct ldlm_lock *lock, struct list_head *work_list)
650{
651 if ((lock->l_flags & LDLM_FL_CP_REQD) == 0) {
652 lock->l_flags |= LDLM_FL_CP_REQD;
653 LDLM_DEBUG(lock, "lock granted; sending completion AST.");
654 LASSERT(list_empty(&lock->l_cp_ast));
655 list_add(&lock->l_cp_ast, work_list);
656 LDLM_LOCK_GET(lock);
657 }
658}
659
660
661
662
663
664
665
666void ldlm_add_ast_work_item(struct ldlm_lock *lock, struct ldlm_lock *new,
667 struct list_head *work_list)
668{
669 check_res_locked(lock->l_resource);
670 if (new)
671 ldlm_add_bl_work_item(lock, new, work_list);
672 else
673 ldlm_add_cp_work_item(lock, work_list);
674}
675
676
677
678
679
680
681void ldlm_lock_addref(struct lustre_handle *lockh, __u32 mode)
682{
683 struct ldlm_lock *lock;
684
685 lock = ldlm_handle2lock(lockh);
686 LASSERT(lock != NULL);
687 ldlm_lock_addref_internal(lock, mode);
688 LDLM_LOCK_PUT(lock);
689}
690EXPORT_SYMBOL(ldlm_lock_addref);
691
692
693
694
695
696
697
698
699void ldlm_lock_addref_internal_nolock(struct ldlm_lock *lock, __u32 mode)
700{
701 ldlm_lock_remove_from_lru(lock);
702 if (mode & (LCK_NL | LCK_CR | LCK_PR)) {
703 lock->l_readers++;
704 lu_ref_add_atomic(&lock->l_reference, "reader", lock);
705 }
706 if (mode & (LCK_EX | LCK_CW | LCK_PW | LCK_GROUP | LCK_COS)) {
707 lock->l_writers++;
708 lu_ref_add_atomic(&lock->l_reference, "writer", lock);
709 }
710 LDLM_LOCK_GET(lock);
711 lu_ref_add_atomic(&lock->l_reference, "user", lock);
712 LDLM_DEBUG(lock, "ldlm_lock_addref(%s)", ldlm_lockname[mode]);
713}
714
715
716
717
718
719
720
721
722
723int ldlm_lock_addref_try(struct lustre_handle *lockh, __u32 mode)
724{
725 struct ldlm_lock *lock;
726 int result;
727
728 result = -EAGAIN;
729 lock = ldlm_handle2lock(lockh);
730 if (lock != NULL) {
731 lock_res_and_lock(lock);
732 if (lock->l_readers != 0 || lock->l_writers != 0 ||
733 !(lock->l_flags & LDLM_FL_CBPENDING)) {
734 ldlm_lock_addref_internal_nolock(lock, mode);
735 result = 0;
736 }
737 unlock_res_and_lock(lock);
738 LDLM_LOCK_PUT(lock);
739 }
740 return result;
741}
742EXPORT_SYMBOL(ldlm_lock_addref_try);
743
744
745
746
747
748
749void ldlm_lock_addref_internal(struct ldlm_lock *lock, __u32 mode)
750{
751 lock_res_and_lock(lock);
752 ldlm_lock_addref_internal_nolock(lock, mode);
753 unlock_res_and_lock(lock);
754}
755
756
757
758
759
760
761
762
763void ldlm_lock_decref_internal_nolock(struct ldlm_lock *lock, __u32 mode)
764{
765 LDLM_DEBUG(lock, "ldlm_lock_decref(%s)", ldlm_lockname[mode]);
766 if (mode & (LCK_NL | LCK_CR | LCK_PR)) {
767 LASSERT(lock->l_readers > 0);
768 lu_ref_del(&lock->l_reference, "reader", lock);
769 lock->l_readers--;
770 }
771 if (mode & (LCK_EX | LCK_CW | LCK_PW | LCK_GROUP | LCK_COS)) {
772 LASSERT(lock->l_writers > 0);
773 lu_ref_del(&lock->l_reference, "writer", lock);
774 lock->l_writers--;
775 }
776
777 lu_ref_del(&lock->l_reference, "user", lock);
778 LDLM_LOCK_RELEASE(lock);
779}
780
781
782
783
784
785
786
787
788
789void ldlm_lock_decref_internal(struct ldlm_lock *lock, __u32 mode)
790{
791 struct ldlm_namespace *ns;
792
793 lock_res_and_lock(lock);
794
795 ns = ldlm_lock_to_ns(lock);
796
797 ldlm_lock_decref_internal_nolock(lock, mode);
798
799 if (lock->l_flags & LDLM_FL_LOCAL &&
800 !lock->l_readers && !lock->l_writers) {
801
802
803 CDEBUG(D_INFO, "forcing cancel of local lock\n");
804 lock->l_flags |= LDLM_FL_CBPENDING;
805 }
806
807 if (!lock->l_readers && !lock->l_writers &&
808 (lock->l_flags & LDLM_FL_CBPENDING)) {
809
810
811 if ((lock->l_flags & LDLM_FL_NS_SRV) && lock->l_export)
812 CERROR("FL_CBPENDING set on non-local lock--just a warning\n");
813
814 LDLM_DEBUG(lock, "final decref done on cbpending lock");
815
816 LDLM_LOCK_GET(lock);
817 ldlm_lock_remove_from_lru(lock);
818 unlock_res_and_lock(lock);
819
820 if (lock->l_flags & LDLM_FL_FAIL_LOC)
821 OBD_RACE(OBD_FAIL_LDLM_CP_BL_RACE);
822
823 if ((lock->l_flags & LDLM_FL_ATOMIC_CB) ||
824 ldlm_bl_to_thread_lock(ns, NULL, lock) != 0)
825 ldlm_handle_bl_callback(ns, NULL, lock);
826 } else if (ns_is_client(ns) &&
827 !lock->l_readers && !lock->l_writers &&
828 !(lock->l_flags & LDLM_FL_NO_LRU) &&
829 !(lock->l_flags & LDLM_FL_BL_AST)) {
830
831 LDLM_DEBUG(lock, "add lock into lru list");
832
833
834
835 ldlm_lock_add_to_lru(lock);
836 unlock_res_and_lock(lock);
837
838 if (lock->l_flags & LDLM_FL_FAIL_LOC)
839 OBD_RACE(OBD_FAIL_LDLM_CP_BL_RACE);
840
841
842
843
844 if (!exp_connect_cancelset(lock->l_conn_export) &&
845 !ns_connect_lru_resize(ns))
846 ldlm_cancel_lru(ns, 0, LCF_ASYNC, 0);
847 } else {
848 LDLM_DEBUG(lock, "do not add lock into lru list");
849 unlock_res_and_lock(lock);
850 }
851}
852
853
854
855
856void ldlm_lock_decref(struct lustre_handle *lockh, __u32 mode)
857{
858 struct ldlm_lock *lock = __ldlm_handle2lock(lockh, 0);
859
860 LASSERTF(lock != NULL, "Non-existing lock: %#llx\n", lockh->cookie);
861 ldlm_lock_decref_internal(lock, mode);
862 LDLM_LOCK_PUT(lock);
863}
864EXPORT_SYMBOL(ldlm_lock_decref);
865
866
867
868
869
870
871
872
873void ldlm_lock_decref_and_cancel(struct lustre_handle *lockh, __u32 mode)
874{
875 struct ldlm_lock *lock = __ldlm_handle2lock(lockh, 0);
876
877 LASSERT(lock != NULL);
878
879 LDLM_DEBUG(lock, "ldlm_lock_decref(%s)", ldlm_lockname[mode]);
880 lock_res_and_lock(lock);
881 lock->l_flags |= LDLM_FL_CBPENDING;
882 unlock_res_and_lock(lock);
883 ldlm_lock_decref_internal(lock, mode);
884 LDLM_LOCK_PUT(lock);
885}
886EXPORT_SYMBOL(ldlm_lock_decref_and_cancel);
887
888struct sl_insert_point {
889 struct list_head *res_link;
890 struct list_head *mode_link;
891 struct list_head *policy_link;
892};
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908static void search_granted_lock(struct list_head *queue,
909 struct ldlm_lock *req,
910 struct sl_insert_point *prev)
911{
912 struct list_head *tmp;
913 struct ldlm_lock *lock, *mode_end, *policy_end;
914
915 list_for_each(tmp, queue) {
916 lock = list_entry(tmp, struct ldlm_lock, l_res_link);
917
918 mode_end = list_entry(lock->l_sl_mode.prev,
919 struct ldlm_lock, l_sl_mode);
920
921 if (lock->l_req_mode != req->l_req_mode) {
922
923 tmp = &mode_end->l_res_link;
924 continue;
925 }
926
927
928 if (lock->l_resource->lr_type == LDLM_PLAIN) {
929
930 prev->res_link = &mode_end->l_res_link;
931 prev->mode_link = &mode_end->l_sl_mode;
932 prev->policy_link = &req->l_sl_policy;
933 return;
934 }
935
936 if (lock->l_resource->lr_type == LDLM_IBITS) {
937 for (;;) {
938 policy_end =
939 list_entry(lock->l_sl_policy.prev,
940 struct ldlm_lock,
941 l_sl_policy);
942
943 if (lock->l_policy_data.l_inodebits.bits ==
944 req->l_policy_data.l_inodebits.bits) {
945
946
947 prev->res_link =
948 &policy_end->l_res_link;
949 prev->mode_link =
950 &policy_end->l_sl_mode;
951 prev->policy_link =
952 &policy_end->l_sl_policy;
953 return;
954 }
955
956 if (policy_end == mode_end)
957
958 break;
959
960
961 tmp = policy_end->l_res_link.next;
962 lock = list_entry(tmp, struct ldlm_lock,
963 l_res_link);
964 }
965
966
967
968 prev->res_link = &mode_end->l_res_link;
969 prev->mode_link = &mode_end->l_sl_mode;
970 prev->policy_link = &req->l_sl_policy;
971 return;
972 }
973
974 LDLM_ERROR(lock, "is not LDLM_PLAIN or LDLM_IBITS lock");
975 LBUG();
976 }
977
978
979
980 prev->res_link = queue->prev;
981 prev->mode_link = &req->l_sl_mode;
982 prev->policy_link = &req->l_sl_policy;
983}
984
985
986
987
988
989static void ldlm_granted_list_add_lock(struct ldlm_lock *lock,
990 struct sl_insert_point *prev)
991{
992 struct ldlm_resource *res = lock->l_resource;
993
994 check_res_locked(res);
995
996 ldlm_resource_dump(D_INFO, res);
997 LDLM_DEBUG(lock, "About to add lock:");
998
999 if (lock->l_flags & LDLM_FL_DESTROYED) {
1000 CDEBUG(D_OTHER, "Lock destroyed, not adding to resource\n");
1001 return;
1002 }
1003
1004 LASSERT(list_empty(&lock->l_res_link));
1005 LASSERT(list_empty(&lock->l_sl_mode));
1006 LASSERT(list_empty(&lock->l_sl_policy));
1007
1008
1009
1010
1011
1012 if (&lock->l_res_link != prev->res_link)
1013 list_add(&lock->l_res_link, prev->res_link);
1014 if (&lock->l_sl_mode != prev->mode_link)
1015 list_add(&lock->l_sl_mode, prev->mode_link);
1016 if (&lock->l_sl_policy != prev->policy_link)
1017 list_add(&lock->l_sl_policy, prev->policy_link);
1018}
1019
1020
1021
1022
1023
1024static void ldlm_grant_lock_with_skiplist(struct ldlm_lock *lock)
1025{
1026 struct sl_insert_point prev;
1027
1028 LASSERT(lock->l_req_mode == lock->l_granted_mode);
1029
1030 search_granted_lock(&lock->l_resource->lr_granted, lock, &prev);
1031 ldlm_granted_list_add_lock(lock, &prev);
1032}
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045void ldlm_grant_lock(struct ldlm_lock *lock, struct list_head *work_list)
1046{
1047 struct ldlm_resource *res = lock->l_resource;
1048
1049 check_res_locked(res);
1050
1051 lock->l_granted_mode = lock->l_req_mode;
1052 if (res->lr_type == LDLM_PLAIN || res->lr_type == LDLM_IBITS)
1053 ldlm_grant_lock_with_skiplist(lock);
1054 else if (res->lr_type == LDLM_EXTENT)
1055 ldlm_extent_add_lock(res, lock);
1056 else
1057 ldlm_resource_add_lock(res, &res->lr_granted, lock);
1058
1059 if (lock->l_granted_mode < res->lr_most_restr)
1060 res->lr_most_restr = lock->l_granted_mode;
1061
1062 if (work_list && lock->l_completion_ast != NULL)
1063 ldlm_add_ast_work_item(lock, NULL, work_list);
1064
1065 ldlm_pool_add(&ldlm_res_to_ns(res)->ns_pool, lock);
1066}
1067
1068
1069
1070
1071
1072
1073
1074static struct ldlm_lock *search_queue(struct list_head *queue,
1075 ldlm_mode_t *mode,
1076 ldlm_policy_data_t *policy,
1077 struct ldlm_lock *old_lock,
1078 __u64 flags, int unref)
1079{
1080 struct ldlm_lock *lock;
1081 struct list_head *tmp;
1082
1083 list_for_each(tmp, queue) {
1084 ldlm_mode_t match;
1085
1086 lock = list_entry(tmp, struct ldlm_lock, l_res_link);
1087
1088 if (lock == old_lock)
1089 break;
1090
1091
1092
1093 if (ldlm_is_excl(lock))
1094 continue;
1095
1096
1097
1098
1099
1100
1101
1102 if (lock->l_flags & LDLM_FL_CBPENDING &&
1103 !(flags & LDLM_FL_CBPENDING))
1104 continue;
1105 if (!unref && lock->l_flags & LDLM_FL_CBPENDING &&
1106 lock->l_readers == 0 && lock->l_writers == 0)
1107 continue;
1108
1109 if (!(lock->l_req_mode & *mode))
1110 continue;
1111 match = lock->l_req_mode;
1112
1113 if (lock->l_resource->lr_type == LDLM_EXTENT &&
1114 (lock->l_policy_data.l_extent.start >
1115 policy->l_extent.start ||
1116 lock->l_policy_data.l_extent.end < policy->l_extent.end))
1117 continue;
1118
1119 if (unlikely(match == LCK_GROUP) &&
1120 lock->l_resource->lr_type == LDLM_EXTENT &&
1121 lock->l_policy_data.l_extent.gid != policy->l_extent.gid)
1122 continue;
1123
1124
1125
1126 if (lock->l_resource->lr_type == LDLM_IBITS &&
1127 ((lock->l_policy_data.l_inodebits.bits &
1128 policy->l_inodebits.bits) !=
1129 policy->l_inodebits.bits))
1130 continue;
1131
1132 if (!unref && (lock->l_flags & LDLM_FL_GONE_MASK))
1133 continue;
1134
1135 if ((flags & LDLM_FL_LOCAL_ONLY) &&
1136 !(lock->l_flags & LDLM_FL_LOCAL))
1137 continue;
1138
1139 if (flags & LDLM_FL_TEST_LOCK) {
1140 LDLM_LOCK_GET(lock);
1141 ldlm_lock_touch_in_lru(lock);
1142 } else {
1143 ldlm_lock_addref_internal_nolock(lock, match);
1144 }
1145 *mode = match;
1146 return lock;
1147 }
1148
1149 return NULL;
1150}
1151
1152void ldlm_lock_fail_match_locked(struct ldlm_lock *lock)
1153{
1154 if ((lock->l_flags & LDLM_FL_FAIL_NOTIFIED) == 0) {
1155 lock->l_flags |= LDLM_FL_FAIL_NOTIFIED;
1156 wake_up_all(&lock->l_waitq);
1157 }
1158}
1159EXPORT_SYMBOL(ldlm_lock_fail_match_locked);
1160
1161void ldlm_lock_fail_match(struct ldlm_lock *lock)
1162{
1163 lock_res_and_lock(lock);
1164 ldlm_lock_fail_match_locked(lock);
1165 unlock_res_and_lock(lock);
1166}
1167EXPORT_SYMBOL(ldlm_lock_fail_match);
1168
1169
1170
1171
1172
1173
1174
1175
1176void ldlm_lock_allow_match_locked(struct ldlm_lock *lock)
1177{
1178 lock->l_flags |= LDLM_FL_LVB_READY;
1179 wake_up_all(&lock->l_waitq);
1180}
1181EXPORT_SYMBOL(ldlm_lock_allow_match_locked);
1182
1183
1184
1185
1186
1187void ldlm_lock_allow_match(struct ldlm_lock *lock)
1188{
1189 lock_res_and_lock(lock);
1190 ldlm_lock_allow_match_locked(lock);
1191 unlock_res_and_lock(lock);
1192}
1193EXPORT_SYMBOL(ldlm_lock_allow_match);
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225ldlm_mode_t ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags,
1226 const struct ldlm_res_id *res_id, ldlm_type_t type,
1227 ldlm_policy_data_t *policy, ldlm_mode_t mode,
1228 struct lustre_handle *lockh, int unref)
1229{
1230 struct ldlm_resource *res;
1231 struct ldlm_lock *lock, *old_lock = NULL;
1232 int rc = 0;
1233
1234 if (ns == NULL) {
1235 old_lock = ldlm_handle2lock(lockh);
1236 LASSERT(old_lock);
1237
1238 ns = ldlm_lock_to_ns(old_lock);
1239 res_id = &old_lock->l_resource->lr_name;
1240 type = old_lock->l_resource->lr_type;
1241 mode = old_lock->l_req_mode;
1242 }
1243
1244 res = ldlm_resource_get(ns, NULL, res_id, type, 0);
1245 if (res == NULL) {
1246 LASSERT(old_lock == NULL);
1247 return 0;
1248 }
1249
1250 LDLM_RESOURCE_ADDREF(res);
1251 lock_res(res);
1252
1253 lock = search_queue(&res->lr_granted, &mode, policy, old_lock,
1254 flags, unref);
1255 if (lock != NULL) {
1256 rc = 1;
1257 goto out;
1258 }
1259 if (flags & LDLM_FL_BLOCK_GRANTED) {
1260 rc = 0;
1261 goto out;
1262 }
1263 lock = search_queue(&res->lr_converting, &mode, policy, old_lock,
1264 flags, unref);
1265 if (lock != NULL) {
1266 rc = 1;
1267 goto out;
1268 }
1269 lock = search_queue(&res->lr_waiting, &mode, policy, old_lock,
1270 flags, unref);
1271 if (lock != NULL) {
1272 rc = 1;
1273 goto out;
1274 }
1275
1276 out:
1277 unlock_res(res);
1278 LDLM_RESOURCE_DELREF(res);
1279 ldlm_resource_putref(res);
1280
1281 if (lock) {
1282 ldlm_lock2handle(lock, lockh);
1283 if ((flags & LDLM_FL_LVB_READY) &&
1284 (!(lock->l_flags & LDLM_FL_LVB_READY))) {
1285 __u64 wait_flags = LDLM_FL_LVB_READY |
1286 LDLM_FL_DESTROYED | LDLM_FL_FAIL_NOTIFIED;
1287 struct l_wait_info lwi;
1288
1289 if (lock->l_completion_ast) {
1290 int err = lock->l_completion_ast(lock,
1291 LDLM_FL_WAIT_NOREPROC,
1292 NULL);
1293 if (err) {
1294 if (flags & LDLM_FL_TEST_LOCK)
1295 LDLM_LOCK_RELEASE(lock);
1296 else
1297 ldlm_lock_decref_internal(lock,
1298 mode);
1299 rc = 0;
1300 goto out2;
1301 }
1302 }
1303
1304 lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(obd_timeout),
1305 NULL, LWI_ON_SIGNAL_NOOP, NULL);
1306
1307
1308 l_wait_event(lock->l_waitq,
1309 lock->l_flags & wait_flags,
1310 &lwi);
1311 if (!(lock->l_flags & LDLM_FL_LVB_READY)) {
1312 if (flags & LDLM_FL_TEST_LOCK)
1313 LDLM_LOCK_RELEASE(lock);
1314 else
1315 ldlm_lock_decref_internal(lock, mode);
1316 rc = 0;
1317 }
1318 }
1319 }
1320 out2:
1321 if (rc) {
1322 LDLM_DEBUG(lock, "matched (%llu %llu)",
1323 (type == LDLM_PLAIN || type == LDLM_IBITS) ?
1324 res_id->name[2] : policy->l_extent.start,
1325 (type == LDLM_PLAIN || type == LDLM_IBITS) ?
1326 res_id->name[3] : policy->l_extent.end);
1327
1328
1329 if (lock->l_conn_export &&
1330 sptlrpc_import_check_ctx(
1331 class_exp2cliimp(lock->l_conn_export))) {
1332 if (!(flags & LDLM_FL_TEST_LOCK))
1333 ldlm_lock_decref_internal(lock, mode);
1334 rc = 0;
1335 }
1336
1337 if (flags & LDLM_FL_TEST_LOCK)
1338 LDLM_LOCK_RELEASE(lock);
1339
1340 } else if (!(flags & LDLM_FL_TEST_LOCK)) {
1341 LDLM_DEBUG_NOLOCK("not matched ns %p type %u mode %u res %llu/%llu (%llu %llu)",
1342 ns, type, mode, res_id->name[0],
1343 res_id->name[1],
1344 (type == LDLM_PLAIN || type == LDLM_IBITS) ?
1345 res_id->name[2] : policy->l_extent.start,
1346 (type == LDLM_PLAIN || type == LDLM_IBITS) ?
1347 res_id->name[3] : policy->l_extent.end);
1348 }
1349 if (old_lock)
1350 LDLM_LOCK_PUT(old_lock);
1351
1352 return rc ? mode : 0;
1353}
1354EXPORT_SYMBOL(ldlm_lock_match);
1355
1356ldlm_mode_t ldlm_revalidate_lock_handle(struct lustre_handle *lockh,
1357 __u64 *bits)
1358{
1359 struct ldlm_lock *lock;
1360 ldlm_mode_t mode = 0;
1361
1362 lock = ldlm_handle2lock(lockh);
1363 if (lock != NULL) {
1364 lock_res_and_lock(lock);
1365 if (lock->l_flags & LDLM_FL_GONE_MASK)
1366 goto out;
1367
1368 if (lock->l_flags & LDLM_FL_CBPENDING &&
1369 lock->l_readers == 0 && lock->l_writers == 0)
1370 goto out;
1371
1372 if (bits)
1373 *bits = lock->l_policy_data.l_inodebits.bits;
1374 mode = lock->l_granted_mode;
1375 ldlm_lock_addref_internal_nolock(lock, mode);
1376 }
1377
1378out:
1379 if (lock != NULL) {
1380 unlock_res_and_lock(lock);
1381 LDLM_LOCK_PUT(lock);
1382 }
1383 return mode;
1384}
1385EXPORT_SYMBOL(ldlm_revalidate_lock_handle);
1386
1387
1388int ldlm_fill_lvb(struct ldlm_lock *lock, struct req_capsule *pill,
1389 enum req_location loc, void *data, int size)
1390{
1391 void *lvb;
1392
1393 LASSERT(data != NULL);
1394 LASSERT(size >= 0);
1395
1396 switch (lock->l_lvb_type) {
1397 case LVB_T_OST:
1398 if (size == sizeof(struct ost_lvb)) {
1399 if (loc == RCL_CLIENT)
1400 lvb = req_capsule_client_swab_get(pill,
1401 &RMF_DLM_LVB,
1402 lustre_swab_ost_lvb);
1403 else
1404 lvb = req_capsule_server_swab_get(pill,
1405 &RMF_DLM_LVB,
1406 lustre_swab_ost_lvb);
1407 if (unlikely(lvb == NULL)) {
1408 LDLM_ERROR(lock, "no LVB");
1409 return -EPROTO;
1410 }
1411
1412 memcpy(data, lvb, size);
1413 } else if (size == sizeof(struct ost_lvb_v1)) {
1414 struct ost_lvb *olvb = data;
1415
1416 if (loc == RCL_CLIENT)
1417 lvb = req_capsule_client_swab_get(pill,
1418 &RMF_DLM_LVB,
1419 lustre_swab_ost_lvb_v1);
1420 else
1421 lvb = req_capsule_server_sized_swab_get(pill,
1422 &RMF_DLM_LVB, size,
1423 lustre_swab_ost_lvb_v1);
1424 if (unlikely(lvb == NULL)) {
1425 LDLM_ERROR(lock, "no LVB");
1426 return -EPROTO;
1427 }
1428
1429 memcpy(data, lvb, size);
1430 olvb->lvb_mtime_ns = 0;
1431 olvb->lvb_atime_ns = 0;
1432 olvb->lvb_ctime_ns = 0;
1433 } else {
1434 LDLM_ERROR(lock, "Replied unexpected ost LVB size %d",
1435 size);
1436 return -EINVAL;
1437 }
1438 break;
1439 case LVB_T_LQUOTA:
1440 if (size == sizeof(struct lquota_lvb)) {
1441 if (loc == RCL_CLIENT)
1442 lvb = req_capsule_client_swab_get(pill,
1443 &RMF_DLM_LVB,
1444 lustre_swab_lquota_lvb);
1445 else
1446 lvb = req_capsule_server_swab_get(pill,
1447 &RMF_DLM_LVB,
1448 lustre_swab_lquota_lvb);
1449 if (unlikely(lvb == NULL)) {
1450 LDLM_ERROR(lock, "no LVB");
1451 return -EPROTO;
1452 }
1453
1454 memcpy(data, lvb, size);
1455 } else {
1456 LDLM_ERROR(lock,
1457 "Replied unexpected lquota LVB size %d",
1458 size);
1459 return -EINVAL;
1460 }
1461 break;
1462 case LVB_T_LAYOUT:
1463 if (size == 0)
1464 break;
1465
1466 if (loc == RCL_CLIENT)
1467 lvb = req_capsule_client_get(pill, &RMF_DLM_LVB);
1468 else
1469 lvb = req_capsule_server_get(pill, &RMF_DLM_LVB);
1470 if (unlikely(lvb == NULL)) {
1471 LDLM_ERROR(lock, "no LVB");
1472 return -EPROTO;
1473 }
1474
1475 memcpy(data, lvb, size);
1476 break;
1477 default:
1478 LDLM_ERROR(lock, "Unknown LVB type: %d\n", lock->l_lvb_type);
1479 dump_stack();
1480 return -EINVAL;
1481 }
1482
1483 return 0;
1484}
1485
1486
1487
1488
1489
1490struct ldlm_lock *ldlm_lock_create(struct ldlm_namespace *ns,
1491 const struct ldlm_res_id *res_id,
1492 ldlm_type_t type,
1493 ldlm_mode_t mode,
1494 const struct ldlm_callback_suite *cbs,
1495 void *data, __u32 lvb_len,
1496 enum lvb_type lvb_type)
1497{
1498 struct ldlm_lock *lock;
1499 struct ldlm_resource *res;
1500
1501 res = ldlm_resource_get(ns, NULL, res_id, type, 1);
1502 if (res == NULL)
1503 return NULL;
1504
1505 lock = ldlm_lock_new(res);
1506
1507 if (lock == NULL)
1508 return NULL;
1509
1510 lock->l_req_mode = mode;
1511 lock->l_ast_data = data;
1512 lock->l_pid = current_pid();
1513 if (ns_is_server(ns))
1514 lock->l_flags |= LDLM_FL_NS_SRV;
1515 if (cbs) {
1516 lock->l_blocking_ast = cbs->lcs_blocking;
1517 lock->l_completion_ast = cbs->lcs_completion;
1518 lock->l_glimpse_ast = cbs->lcs_glimpse;
1519 }
1520
1521 lock->l_tree_node = NULL;
1522
1523 if (type == LDLM_EXTENT) {
1524 if (ldlm_interval_alloc(lock) == NULL)
1525 goto out;
1526 }
1527
1528 if (lvb_len) {
1529 lock->l_lvb_len = lvb_len;
1530 lock->l_lvb_data = kzalloc(lvb_len, GFP_NOFS);
1531 if (!lock->l_lvb_data)
1532 goto out;
1533 }
1534
1535 lock->l_lvb_type = lvb_type;
1536 if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_NEW_LOCK))
1537 goto out;
1538
1539 return lock;
1540
1541out:
1542 ldlm_lock_destroy(lock);
1543 LDLM_LOCK_RELEASE(lock);
1544 return NULL;
1545}
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557ldlm_error_t ldlm_lock_enqueue(struct ldlm_namespace *ns,
1558 struct ldlm_lock **lockp,
1559 void *cookie, __u64 *flags)
1560{
1561 struct ldlm_lock *lock = *lockp;
1562 struct ldlm_resource *res = lock->l_resource;
1563 int local = ns_is_client(ldlm_res_to_ns(res));
1564 ldlm_error_t rc = ELDLM_OK;
1565 struct ldlm_interval *node = NULL;
1566
1567 lock->l_last_activity = get_seconds();
1568
1569 if ((*flags & (LDLM_FL_HAS_INTENT|LDLM_FL_REPLAY)) == LDLM_FL_HAS_INTENT
1570 && !local && ns->ns_policy) {
1571 rc = ns->ns_policy(ns, lockp, cookie, lock->l_req_mode, *flags,
1572 NULL);
1573 if (rc == ELDLM_LOCK_REPLACED) {
1574
1575
1576
1577
1578 if (lock != *lockp) {
1579 ldlm_lock_destroy(lock);
1580 LDLM_LOCK_RELEASE(lock);
1581 }
1582 *flags |= LDLM_FL_LOCK_CHANGED;
1583 return 0;
1584 } else if (rc != ELDLM_OK ||
1585 (rc == ELDLM_OK && (*flags & LDLM_FL_INTENT_ONLY))) {
1586 ldlm_lock_destroy(lock);
1587 return rc;
1588 }
1589 }
1590
1591
1592
1593
1594
1595 if (!local && (*flags & LDLM_FL_REPLAY) && res->lr_type == LDLM_EXTENT)
1596 OBD_SLAB_ALLOC_PTR_GFP(node, ldlm_interval_slab, GFP_NOFS);
1597
1598 lock_res_and_lock(lock);
1599 if (local && lock->l_req_mode == lock->l_granted_mode) {
1600
1601
1602
1603 *flags &= ~(LDLM_FL_BLOCK_GRANTED |
1604 LDLM_FL_BLOCK_CONV | LDLM_FL_BLOCK_WAIT);
1605 goto out;
1606 }
1607
1608 ldlm_resource_unlink_lock(lock);
1609 if (res->lr_type == LDLM_EXTENT && lock->l_tree_node == NULL) {
1610 if (node == NULL) {
1611 ldlm_lock_destroy_nolock(lock);
1612 rc = -ENOMEM;
1613 goto out;
1614 }
1615
1616 INIT_LIST_HEAD(&node->li_group);
1617 ldlm_interval_attach(node, lock);
1618 node = NULL;
1619 }
1620
1621
1622
1623 lock->l_flags |= *flags & LDLM_FL_AST_DISCARD_DATA;
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636 if (local) {
1637 if (*flags & LDLM_FL_BLOCK_CONV)
1638 ldlm_resource_add_lock(res, &res->lr_converting, lock);
1639 else if (*flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED))
1640 ldlm_resource_add_lock(res, &res->lr_waiting, lock);
1641 else
1642 ldlm_grant_lock(lock, NULL);
1643 goto out;
1644 } else {
1645 CERROR("This is client-side-only module, cannot handle LDLM_NAMESPACE_SERVER resource type lock.\n");
1646 LBUG();
1647 }
1648
1649out:
1650 unlock_res_and_lock(lock);
1651 if (node)
1652 OBD_SLAB_FREE(node, ldlm_interval_slab, sizeof(*node));
1653 return rc;
1654}
1655
1656
1657
1658
1659
1660static int
1661ldlm_work_bl_ast_lock(struct ptlrpc_request_set *rqset, void *opaq)
1662{
1663 struct ldlm_cb_set_arg *arg = opaq;
1664 struct ldlm_lock_desc d;
1665 int rc;
1666 struct ldlm_lock *lock;
1667
1668 if (list_empty(arg->list))
1669 return -ENOENT;
1670
1671 lock = list_entry(arg->list->next, struct ldlm_lock, l_bl_ast);
1672
1673
1674 lock_res_and_lock(lock);
1675 list_del_init(&lock->l_bl_ast);
1676
1677 LASSERT(lock->l_flags & LDLM_FL_AST_SENT);
1678 LASSERT(lock->l_bl_ast_run == 0);
1679 LASSERT(lock->l_blocking_lock);
1680 lock->l_bl_ast_run++;
1681 unlock_res_and_lock(lock);
1682
1683 ldlm_lock2desc(lock->l_blocking_lock, &d);
1684
1685 rc = lock->l_blocking_ast(lock, &d, (void *)arg, LDLM_CB_BLOCKING);
1686 LDLM_LOCK_RELEASE(lock->l_blocking_lock);
1687 lock->l_blocking_lock = NULL;
1688 LDLM_LOCK_RELEASE(lock);
1689
1690 return rc;
1691}
1692
1693
1694
1695
1696static int
1697ldlm_work_cp_ast_lock(struct ptlrpc_request_set *rqset, void *opaq)
1698{
1699 struct ldlm_cb_set_arg *arg = opaq;
1700 int rc = 0;
1701 struct ldlm_lock *lock;
1702 ldlm_completion_callback completion_callback;
1703
1704 if (list_empty(arg->list))
1705 return -ENOENT;
1706
1707 lock = list_entry(arg->list->next, struct ldlm_lock, l_cp_ast);
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721 lock_res_and_lock(lock);
1722 list_del_init(&lock->l_cp_ast);
1723 LASSERT(lock->l_flags & LDLM_FL_CP_REQD);
1724
1725
1726 completion_callback = lock->l_completion_ast;
1727 lock->l_flags &= ~LDLM_FL_CP_REQD;
1728 unlock_res_and_lock(lock);
1729
1730 if (completion_callback != NULL)
1731 rc = completion_callback(lock, 0, (void *)arg);
1732 LDLM_LOCK_RELEASE(lock);
1733
1734 return rc;
1735}
1736
1737
1738
1739
1740static int
1741ldlm_work_revoke_ast_lock(struct ptlrpc_request_set *rqset, void *opaq)
1742{
1743 struct ldlm_cb_set_arg *arg = opaq;
1744 struct ldlm_lock_desc desc;
1745 int rc;
1746 struct ldlm_lock *lock;
1747
1748 if (list_empty(arg->list))
1749 return -ENOENT;
1750
1751 lock = list_entry(arg->list->next, struct ldlm_lock, l_rk_ast);
1752 list_del_init(&lock->l_rk_ast);
1753
1754
1755 ldlm_lock2desc(lock, &desc);
1756 desc.l_req_mode = LCK_EX;
1757 desc.l_granted_mode = 0;
1758
1759 rc = lock->l_blocking_ast(lock, &desc, (void *)arg, LDLM_CB_BLOCKING);
1760 LDLM_LOCK_RELEASE(lock);
1761
1762 return rc;
1763}
1764
1765
1766
1767
1768int ldlm_work_gl_ast_lock(struct ptlrpc_request_set *rqset, void *opaq)
1769{
1770 struct ldlm_cb_set_arg *arg = opaq;
1771 struct ldlm_glimpse_work *gl_work;
1772 struct ldlm_lock *lock;
1773 int rc = 0;
1774
1775 if (list_empty(arg->list))
1776 return -ENOENT;
1777
1778 gl_work = list_entry(arg->list->next, struct ldlm_glimpse_work,
1779 gl_list);
1780 list_del_init(&gl_work->gl_list);
1781
1782 lock = gl_work->gl_lock;
1783
1784
1785 arg->gl_desc = gl_work->gl_desc;
1786
1787
1788 if (lock->l_glimpse_ast(lock, (void *)arg) == 0)
1789 rc = 1;
1790
1791 LDLM_LOCK_RELEASE(lock);
1792
1793 if ((gl_work->gl_flags & LDLM_GL_WORK_NOFREE) == 0)
1794 kfree(gl_work);
1795
1796 return rc;
1797}
1798
1799
1800
1801
1802
1803
1804
1805int ldlm_run_ast_work(struct ldlm_namespace *ns, struct list_head *rpc_list,
1806 enum ldlm_desc_ast_t ast_type)
1807{
1808 struct ldlm_cb_set_arg *arg;
1809 set_producer_func work_ast_lock;
1810 int rc;
1811
1812 if (list_empty(rpc_list))
1813 return 0;
1814
1815 arg = kzalloc(sizeof(*arg), GFP_NOFS);
1816 if (!arg)
1817 return -ENOMEM;
1818
1819 atomic_set(&arg->restart, 0);
1820 arg->list = rpc_list;
1821
1822 switch (ast_type) {
1823 case LDLM_WORK_BL_AST:
1824 arg->type = LDLM_BL_CALLBACK;
1825 work_ast_lock = ldlm_work_bl_ast_lock;
1826 break;
1827 case LDLM_WORK_CP_AST:
1828 arg->type = LDLM_CP_CALLBACK;
1829 work_ast_lock = ldlm_work_cp_ast_lock;
1830 break;
1831 case LDLM_WORK_REVOKE_AST:
1832 arg->type = LDLM_BL_CALLBACK;
1833 work_ast_lock = ldlm_work_revoke_ast_lock;
1834 break;
1835 case LDLM_WORK_GL_AST:
1836 arg->type = LDLM_GL_CALLBACK;
1837 work_ast_lock = ldlm_work_gl_ast_lock;
1838 break;
1839 default:
1840 LBUG();
1841 }
1842
1843
1844
1845
1846
1847 arg->set = ptlrpc_prep_fcset(ns->ns_max_parallel_ast ? : UINT_MAX,
1848 work_ast_lock, arg);
1849 if (arg->set == NULL) {
1850 rc = -ENOMEM;
1851 goto out;
1852 }
1853
1854 ptlrpc_set_wait(arg->set);
1855 ptlrpc_set_destroy(arg->set);
1856
1857 rc = atomic_read(&arg->restart) ? -ERESTART : 0;
1858 goto out;
1859out:
1860 kfree(arg);
1861 return rc;
1862}
1863
1864static int reprocess_one_queue(struct ldlm_resource *res, void *closure)
1865{
1866 ldlm_reprocess_all(res);
1867 return LDLM_ITER_CONTINUE;
1868}
1869
1870static int ldlm_reprocess_res(struct cfs_hash *hs, struct cfs_hash_bd *bd,
1871 struct hlist_node *hnode, void *arg)
1872{
1873 struct ldlm_resource *res = cfs_hash_object(hs, hnode);
1874 int rc;
1875
1876 rc = reprocess_one_queue(res, arg);
1877
1878 return rc == LDLM_ITER_STOP;
1879}
1880
1881
1882
1883
1884
1885void ldlm_reprocess_all_ns(struct ldlm_namespace *ns)
1886{
1887 if (ns != NULL) {
1888 cfs_hash_for_each_nolock(ns->ns_rs_hash,
1889 ldlm_reprocess_res, NULL);
1890 }
1891}
1892EXPORT_SYMBOL(ldlm_reprocess_all_ns);
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902void ldlm_reprocess_all(struct ldlm_resource *res)
1903{
1904 LIST_HEAD(rpc_list);
1905
1906 if (!ns_is_client(ldlm_res_to_ns(res))) {
1907 CERROR("This is client-side-only module, cannot handle LDLM_NAMESPACE_SERVER resource type lock.\n");
1908 LBUG();
1909 }
1910}
1911
1912
1913
1914
1915
1916void ldlm_cancel_callback(struct ldlm_lock *lock)
1917{
1918 check_res_locked(lock->l_resource);
1919 if (!(lock->l_flags & LDLM_FL_CANCEL)) {
1920 lock->l_flags |= LDLM_FL_CANCEL;
1921 if (lock->l_blocking_ast) {
1922 unlock_res_and_lock(lock);
1923 lock->l_blocking_ast(lock, NULL, lock->l_ast_data,
1924 LDLM_CB_CANCELING);
1925 lock_res_and_lock(lock);
1926 } else {
1927 LDLM_DEBUG(lock, "no blocking ast");
1928 }
1929 }
1930 lock->l_flags |= LDLM_FL_BL_DONE;
1931}
1932
1933
1934
1935
1936void ldlm_unlink_lock_skiplist(struct ldlm_lock *req)
1937{
1938 if (req->l_resource->lr_type != LDLM_PLAIN &&
1939 req->l_resource->lr_type != LDLM_IBITS)
1940 return;
1941
1942 list_del_init(&req->l_sl_policy);
1943 list_del_init(&req->l_sl_mode);
1944}
1945
1946
1947
1948
1949void ldlm_lock_cancel(struct ldlm_lock *lock)
1950{
1951 struct ldlm_resource *res;
1952 struct ldlm_namespace *ns;
1953
1954 lock_res_and_lock(lock);
1955
1956 res = lock->l_resource;
1957 ns = ldlm_res_to_ns(res);
1958
1959
1960
1961 if (lock->l_readers || lock->l_writers) {
1962 LDLM_ERROR(lock, "lock still has references");
1963 LBUG();
1964 }
1965
1966 if (lock->l_flags & LDLM_FL_WAITED)
1967 ldlm_del_waiting_lock(lock);
1968
1969
1970 ldlm_cancel_callback(lock);
1971
1972
1973
1974 if (lock->l_flags & LDLM_FL_WAITED)
1975 ldlm_del_waiting_lock(lock);
1976
1977 ldlm_resource_unlink_lock(lock);
1978 ldlm_lock_destroy_nolock(lock);
1979
1980 if (lock->l_granted_mode == lock->l_req_mode)
1981 ldlm_pool_del(&ns->ns_pool, lock);
1982
1983
1984
1985 lock->l_granted_mode = LCK_MINMODE;
1986 unlock_res_and_lock(lock);
1987}
1988EXPORT_SYMBOL(ldlm_lock_cancel);
1989
1990
1991
1992
1993int ldlm_lock_set_data(struct lustre_handle *lockh, void *data)
1994{
1995 struct ldlm_lock *lock = ldlm_handle2lock(lockh);
1996 int rc = -EINVAL;
1997
1998 if (lock) {
1999 if (lock->l_ast_data == NULL)
2000 lock->l_ast_data = data;
2001 if (lock->l_ast_data == data)
2002 rc = 0;
2003 LDLM_LOCK_PUT(lock);
2004 }
2005 return rc;
2006}
2007EXPORT_SYMBOL(ldlm_lock_set_data);
2008
2009struct export_cl_data {
2010 struct obd_export *ecl_exp;
2011 int ecl_loop;
2012};
2013
2014
2015
2016
2017
2018int ldlm_cancel_locks_for_export_cb(struct cfs_hash *hs, struct cfs_hash_bd *bd,
2019 struct hlist_node *hnode, void *data)
2020
2021{
2022 struct export_cl_data *ecl = (struct export_cl_data *)data;
2023 struct obd_export *exp = ecl->ecl_exp;
2024 struct ldlm_lock *lock = cfs_hash_object(hs, hnode);
2025 struct ldlm_resource *res;
2026
2027 res = ldlm_resource_getref(lock->l_resource);
2028 LDLM_LOCK_GET(lock);
2029
2030 LDLM_DEBUG(lock, "export %p", exp);
2031 ldlm_res_lvbo_update(res, NULL, 1);
2032 ldlm_lock_cancel(lock);
2033 ldlm_reprocess_all(res);
2034 ldlm_resource_putref(res);
2035 LDLM_LOCK_RELEASE(lock);
2036
2037 ecl->ecl_loop++;
2038 if ((ecl->ecl_loop & -ecl->ecl_loop) == ecl->ecl_loop) {
2039 CDEBUG(D_INFO,
2040 "Cancel lock %p for export %p (loop %d), still have %d locks left on hash table.\n",
2041 lock, exp, ecl->ecl_loop,
2042 atomic_read(&hs->hs_count));
2043 }
2044
2045 return 0;
2046}
2047
2048
2049
2050
2051
2052
2053void ldlm_cancel_locks_for_export(struct obd_export *exp)
2054{
2055 struct export_cl_data ecl = {
2056 .ecl_exp = exp,
2057 .ecl_loop = 0,
2058 };
2059
2060 cfs_hash_for_each_empty(exp->exp_lock_hash,
2061 ldlm_cancel_locks_for_export_cb, &ecl);
2062}
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074void ldlm_lock_downgrade(struct ldlm_lock *lock, int new_mode)
2075{
2076 LASSERT(lock->l_granted_mode & (LCK_PW | LCK_EX));
2077 LASSERT(new_mode == LCK_COS);
2078
2079 lock_res_and_lock(lock);
2080 ldlm_resource_unlink_lock(lock);
2081
2082
2083
2084
2085 ldlm_pool_del(&ldlm_lock_to_ns(lock)->ns_pool, lock);
2086
2087 lock->l_req_mode = new_mode;
2088 ldlm_grant_lock(lock, NULL);
2089 unlock_res_and_lock(lock);
2090 ldlm_reprocess_all(lock->l_resource);
2091}
2092EXPORT_SYMBOL(ldlm_lock_downgrade);
2093
2094
2095
2096
2097
2098
2099
2100
2101struct ldlm_resource *ldlm_lock_convert(struct ldlm_lock *lock, int new_mode,
2102 __u32 *flags)
2103{
2104 LIST_HEAD(rpc_list);
2105 struct ldlm_resource *res;
2106 struct ldlm_namespace *ns;
2107 int granted = 0;
2108 struct ldlm_interval *node;
2109
2110
2111 if (new_mode == lock->l_granted_mode) {
2112 *flags |= LDLM_FL_BLOCK_GRANTED;
2113 return lock->l_resource;
2114 }
2115
2116
2117
2118 OBD_SLAB_ALLOC_PTR_GFP(node, ldlm_interval_slab, GFP_NOFS);
2119 if (node == NULL)
2120
2121 return NULL;
2122
2123 LASSERTF((new_mode == LCK_PW && lock->l_granted_mode == LCK_PR),
2124 "new_mode %u, granted %u\n", new_mode, lock->l_granted_mode);
2125
2126 lock_res_and_lock(lock);
2127
2128 res = lock->l_resource;
2129 ns = ldlm_res_to_ns(res);
2130
2131 lock->l_req_mode = new_mode;
2132 if (res->lr_type == LDLM_PLAIN || res->lr_type == LDLM_IBITS) {
2133 ldlm_resource_unlink_lock(lock);
2134 } else {
2135 ldlm_resource_unlink_lock(lock);
2136 if (res->lr_type == LDLM_EXTENT) {
2137
2138
2139
2140 INIT_LIST_HEAD(&node->li_group);
2141 ldlm_interval_attach(node, lock);
2142 node = NULL;
2143 }
2144 }
2145
2146
2147
2148
2149
2150 ldlm_pool_del(&ns->ns_pool, lock);
2151
2152
2153 if (ns_is_client(ldlm_res_to_ns(res))) {
2154 if (*flags & (LDLM_FL_BLOCK_CONV | LDLM_FL_BLOCK_GRANTED)) {
2155 ldlm_resource_add_lock(res, &res->lr_converting, lock);
2156 } else {
2157
2158
2159 LDLM_ERROR(lock, "Erroneous flags %x on local lock\n",
2160 *flags);
2161 LBUG();
2162
2163 ldlm_grant_lock(lock, &rpc_list);
2164 granted = 1;
2165
2166 if (lock->l_completion_ast)
2167 lock->l_completion_ast(lock, 0, NULL);
2168 }
2169 } else {
2170 CERROR("This is client-side-only module, cannot handle LDLM_NAMESPACE_SERVER resource type lock.\n");
2171 LBUG();
2172 }
2173 unlock_res_and_lock(lock);
2174
2175 if (granted)
2176 ldlm_run_ast_work(ns, &rpc_list, LDLM_WORK_CP_AST);
2177 if (node)
2178 OBD_SLAB_FREE(node, ldlm_interval_slab, sizeof(*node));
2179 return res;
2180}
2181EXPORT_SYMBOL(ldlm_lock_convert);
2182
2183
2184
2185
2186
2187
2188void ldlm_lock_dump_handle(int level, struct lustre_handle *lockh)
2189{
2190 struct ldlm_lock *lock;
2191
2192 if (!((libcfs_debug | D_ERROR) & level))
2193 return;
2194
2195 lock = ldlm_handle2lock(lockh);
2196 if (lock == NULL)
2197 return;
2198
2199 LDLM_DEBUG_LIMIT(level, lock, "###");
2200
2201 LDLM_LOCK_PUT(lock);
2202}
2203EXPORT_SYMBOL(ldlm_lock_dump_handle);
2204
2205
2206
2207
2208
2209void _ldlm_lock_debug(struct ldlm_lock *lock,
2210 struct libcfs_debug_msg_data *msgdata,
2211 const char *fmt, ...)
2212{
2213 va_list args;
2214 struct obd_export *exp = lock->l_export;
2215 struct ldlm_resource *resource = lock->l_resource;
2216 char *nid = "local";
2217
2218 va_start(args, fmt);
2219
2220 if (exp && exp->exp_connection) {
2221 nid = libcfs_nid2str(exp->exp_connection->c_peer.nid);
2222 } else if (exp && exp->exp_obd != NULL) {
2223 struct obd_import *imp = exp->exp_obd->u.cli.cl_import;
2224
2225 nid = libcfs_nid2str(imp->imp_connection->c_peer.nid);
2226 }
2227
2228 if (resource == NULL) {
2229 libcfs_debug_vmsg2(msgdata, fmt, args,
2230 " ns: \?\? lock: %p/%#llx lrc: %d/%d,%d mode: %s/%s res: \?\? rrc=\?\? type: \?\?\? flags: %#llx nid: %s remote: %#llx expref: %d pid: %u timeout: %lu lvb_type: %d\n",
2231 lock,
2232 lock->l_handle.h_cookie, atomic_read(&lock->l_refc),
2233 lock->l_readers, lock->l_writers,
2234 ldlm_lockname[lock->l_granted_mode],
2235 ldlm_lockname[lock->l_req_mode],
2236 lock->l_flags, nid, lock->l_remote_handle.cookie,
2237 exp ? atomic_read(&exp->exp_refcount) : -99,
2238 lock->l_pid, lock->l_callback_timeout, lock->l_lvb_type);
2239 va_end(args);
2240 return;
2241 }
2242
2243 switch (resource->lr_type) {
2244 case LDLM_EXTENT:
2245 libcfs_debug_vmsg2(msgdata, fmt, args,
2246 " ns: %s lock: %p/%#llx lrc: %d/%d,%d mode: %s/%s res: " DLDLMRES " rrc: %d type: %s [%llu->%llu] (req %llu->%llu) flags: %#llx nid: %s remote: %#llx expref: %d pid: %u timeout: %lu lvb_type: %d\n",
2247 ldlm_lock_to_ns_name(lock), lock,
2248 lock->l_handle.h_cookie, atomic_read(&lock->l_refc),
2249 lock->l_readers, lock->l_writers,
2250 ldlm_lockname[lock->l_granted_mode],
2251 ldlm_lockname[lock->l_req_mode],
2252 PLDLMRES(resource),
2253 atomic_read(&resource->lr_refcount),
2254 ldlm_typename[resource->lr_type],
2255 lock->l_policy_data.l_extent.start,
2256 lock->l_policy_data.l_extent.end,
2257 lock->l_req_extent.start, lock->l_req_extent.end,
2258 lock->l_flags, nid, lock->l_remote_handle.cookie,
2259 exp ? atomic_read(&exp->exp_refcount) : -99,
2260 lock->l_pid, lock->l_callback_timeout,
2261 lock->l_lvb_type);
2262 break;
2263
2264 case LDLM_FLOCK:
2265 libcfs_debug_vmsg2(msgdata, fmt, args,
2266 " ns: %s lock: %p/%#llx lrc: %d/%d,%d mode: %s/%s res: " DLDLMRES " rrc: %d type: %s pid: %d [%llu->%llu] flags: %#llx nid: %s remote: %#llx expref: %d pid: %u timeout: %lu\n",
2267 ldlm_lock_to_ns_name(lock), lock,
2268 lock->l_handle.h_cookie, atomic_read(&lock->l_refc),
2269 lock->l_readers, lock->l_writers,
2270 ldlm_lockname[lock->l_granted_mode],
2271 ldlm_lockname[lock->l_req_mode],
2272 PLDLMRES(resource),
2273 atomic_read(&resource->lr_refcount),
2274 ldlm_typename[resource->lr_type],
2275 lock->l_policy_data.l_flock.pid,
2276 lock->l_policy_data.l_flock.start,
2277 lock->l_policy_data.l_flock.end,
2278 lock->l_flags, nid, lock->l_remote_handle.cookie,
2279 exp ? atomic_read(&exp->exp_refcount) : -99,
2280 lock->l_pid, lock->l_callback_timeout);
2281 break;
2282
2283 case LDLM_IBITS:
2284 libcfs_debug_vmsg2(msgdata, fmt, args,
2285 " ns: %s lock: %p/%#llx lrc: %d/%d,%d mode: %s/%s res: " DLDLMRES " bits %#llx rrc: %d type: %s flags: %#llx nid: %s remote: %#llx expref: %d pid: %u timeout: %lu lvb_type: %d\n",
2286 ldlm_lock_to_ns_name(lock),
2287 lock, lock->l_handle.h_cookie,
2288 atomic_read(&lock->l_refc),
2289 lock->l_readers, lock->l_writers,
2290 ldlm_lockname[lock->l_granted_mode],
2291 ldlm_lockname[lock->l_req_mode],
2292 PLDLMRES(resource),
2293 lock->l_policy_data.l_inodebits.bits,
2294 atomic_read(&resource->lr_refcount),
2295 ldlm_typename[resource->lr_type],
2296 lock->l_flags, nid, lock->l_remote_handle.cookie,
2297 exp ? atomic_read(&exp->exp_refcount) : -99,
2298 lock->l_pid, lock->l_callback_timeout,
2299 lock->l_lvb_type);
2300 break;
2301
2302 default:
2303 libcfs_debug_vmsg2(msgdata, fmt, args,
2304 " ns: %s lock: %p/%#llx lrc: %d/%d,%d mode: %s/%s res: " DLDLMRES " rrc: %d type: %s flags: %#llx nid: %s remote: %#llx expref: %d pid: %u timeout: %lu lvb_type: %d\n",
2305 ldlm_lock_to_ns_name(lock),
2306 lock, lock->l_handle.h_cookie,
2307 atomic_read(&lock->l_refc),
2308 lock->l_readers, lock->l_writers,
2309 ldlm_lockname[lock->l_granted_mode],
2310 ldlm_lockname[lock->l_req_mode],
2311 PLDLMRES(resource),
2312 atomic_read(&resource->lr_refcount),
2313 ldlm_typename[resource->lr_type],
2314 lock->l_flags, nid, lock->l_remote_handle.cookie,
2315 exp ? atomic_read(&exp->exp_refcount) : -99,
2316 lock->l_pid, lock->l_callback_timeout,
2317 lock->l_lvb_type);
2318 break;
2319 }
2320 va_end(args);
2321}
2322EXPORT_SYMBOL(_ldlm_lock_debug);
2323