1
2
3
4
5
6
7
8
9
10#include <linux/sched.h>
11#include <linux/slab.h>
12#include <linux/spinlock.h>
13#include <linux/buffer_head.h>
14#include <linux/delay.h>
15#include <linux/sort.h>
16#include <linux/jhash.h>
17#include <linux/kallsyms.h>
18#include <linux/gfs2_ondisk.h>
19#include <linux/list.h>
20#include <linux/wait.h>
21#include <linux/module.h>
22#include <asm/uaccess.h>
23#include <linux/seq_file.h>
24#include <linux/debugfs.h>
25#include <linux/kthread.h>
26#include <linux/freezer.h>
27#include <linux/workqueue.h>
28#include <linux/jiffies.h>
29#include <linux/rcupdate.h>
30#include <linux/rculist_bl.h>
31#include <linux/bit_spinlock.h>
32
33#include "gfs2.h"
34#include "incore.h"
35#include "glock.h"
36#include "glops.h"
37#include "inode.h"
38#include "lops.h"
39#include "meta_io.h"
40#include "quota.h"
41#include "super.h"
42#include "util.h"
43#include "bmap.h"
44#define CREATE_TRACE_POINTS
45#include "trace_gfs2.h"
46
47struct gfs2_glock_iter {
48 int hash;
49 struct gfs2_sbd *sdp;
50 struct gfs2_glock *gl;
51 char string[512];
52};
53
54typedef void (*glock_examiner) (struct gfs2_glock * gl);
55
56static int __dump_glock(struct seq_file *seq, const struct gfs2_glock *gl);
57#define GLOCK_BUG_ON(gl,x) do { if (unlikely(x)) { __dump_glock(NULL, gl); BUG(); } } while(0)
58static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target);
59
60static struct dentry *gfs2_root;
61static struct workqueue_struct *glock_workqueue;
62struct workqueue_struct *gfs2_delete_workqueue;
63static LIST_HEAD(lru_list);
64static atomic_t lru_count = ATOMIC_INIT(0);
65static DEFINE_SPINLOCK(lru_lock);
66
67#define GFS2_GL_HASH_SHIFT 15
68#define GFS2_GL_HASH_SIZE (1 << GFS2_GL_HASH_SHIFT)
69#define GFS2_GL_HASH_MASK (GFS2_GL_HASH_SIZE - 1)
70
71static struct hlist_bl_head gl_hash_table[GFS2_GL_HASH_SIZE];
72static struct dentry *gfs2_root;
73
74
75
76
77
78
79
80
81static unsigned int gl_hash(const struct gfs2_sbd *sdp,
82 const struct lm_lockname *name)
83{
84 unsigned int h;
85
86 h = jhash(&name->ln_number, sizeof(u64), 0);
87 h = jhash(&name->ln_type, sizeof(unsigned int), h);
88 h = jhash(&sdp, sizeof(struct gfs2_sbd *), h);
89 h &= GFS2_GL_HASH_MASK;
90
91 return h;
92}
93
94static inline void spin_lock_bucket(unsigned int hash)
95{
96 hlist_bl_lock(&gl_hash_table[hash]);
97}
98
99static inline void spin_unlock_bucket(unsigned int hash)
100{
101 hlist_bl_unlock(&gl_hash_table[hash]);
102}
103
104static void gfs2_glock_dealloc(struct rcu_head *rcu)
105{
106 struct gfs2_glock *gl = container_of(rcu, struct gfs2_glock, gl_rcu);
107
108 if (gl->gl_ops->go_flags & GLOF_ASPACE)
109 kmem_cache_free(gfs2_glock_aspace_cachep, gl);
110 else
111 kmem_cache_free(gfs2_glock_cachep, gl);
112}
113
114void gfs2_glock_free(struct gfs2_glock *gl)
115{
116 struct gfs2_sbd *sdp = gl->gl_sbd;
117
118 call_rcu(&gl->gl_rcu, gfs2_glock_dealloc);
119 if (atomic_dec_and_test(&sdp->sd_glock_disposal))
120 wake_up(&sdp->sd_glock_wait);
121}
122
123
124
125
126
127
128
129void gfs2_glock_hold(struct gfs2_glock *gl)
130{
131 GLOCK_BUG_ON(gl, atomic_read(&gl->gl_ref) == 0);
132 atomic_inc(&gl->gl_ref);
133}
134
135
136
137
138
139
140
141
142static int demote_ok(const struct gfs2_glock *gl)
143{
144 const struct gfs2_glock_operations *glops = gl->gl_ops;
145
146
147
148 if (gl->gl_state == LM_ST_UNLOCKED)
149 return 0;
150 if (test_bit(GLF_LFLUSH, &gl->gl_flags))
151 return 0;
152 if ((gl->gl_name.ln_type != LM_TYPE_INODE) &&
153 !list_empty(&gl->gl_holders))
154 return 0;
155 if (glops->go_demote_ok)
156 return glops->go_demote_ok(gl);
157 return 1;
158}
159
160
161
162
163
164
165
166
167
168
169static void __gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl)
170{
171 if (demote_ok(gl)) {
172 spin_lock(&lru_lock);
173
174 if (!list_empty(&gl->gl_lru))
175 list_del_init(&gl->gl_lru);
176 else
177 atomic_inc(&lru_count);
178
179 list_add_tail(&gl->gl_lru, &lru_list);
180 spin_unlock(&lru_lock);
181 }
182}
183
184void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl)
185{
186 spin_lock(&gl->gl_spin);
187 __gfs2_glock_schedule_for_reclaim(gl);
188 spin_unlock(&gl->gl_spin);
189}
190
191
192
193
194
195
196
197
198
199void gfs2_glock_put_nolock(struct gfs2_glock *gl)
200{
201 if (atomic_dec_and_test(&gl->gl_ref))
202 GLOCK_BUG_ON(gl, 1);
203}
204
205
206
207
208
209
210
211void gfs2_glock_put(struct gfs2_glock *gl)
212{
213 struct gfs2_sbd *sdp = gl->gl_sbd;
214 struct address_space *mapping = gfs2_glock2aspace(gl);
215
216 if (atomic_dec_and_test(&gl->gl_ref)) {
217 spin_lock_bucket(gl->gl_hash);
218 hlist_bl_del_rcu(&gl->gl_list);
219 spin_unlock_bucket(gl->gl_hash);
220 spin_lock(&lru_lock);
221 if (!list_empty(&gl->gl_lru)) {
222 list_del_init(&gl->gl_lru);
223 atomic_dec(&lru_count);
224 }
225 spin_unlock(&lru_lock);
226 GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders));
227 GLOCK_BUG_ON(gl, mapping && mapping->nrpages);
228 trace_gfs2_glock_put(gl);
229 sdp->sd_lockstruct.ls_ops->lm_put_lock(gl);
230 }
231}
232
233
234
235
236
237
238
239
240
241static struct gfs2_glock *search_bucket(unsigned int hash,
242 const struct gfs2_sbd *sdp,
243 const struct lm_lockname *name)
244{
245 struct gfs2_glock *gl;
246 struct hlist_bl_node *h;
247
248 hlist_bl_for_each_entry_rcu(gl, h, &gl_hash_table[hash], gl_list) {
249 if (!lm_name_equal(&gl->gl_name, name))
250 continue;
251 if (gl->gl_sbd != sdp)
252 continue;
253 if (atomic_inc_not_zero(&gl->gl_ref))
254 return gl;
255 }
256
257 return NULL;
258}
259
260
261
262
263
264
265
266
267
268static inline int may_grant(const struct gfs2_glock *gl, const struct gfs2_holder *gh)
269{
270 const struct gfs2_holder *gh_head = list_entry(gl->gl_holders.next, const struct gfs2_holder, gh_list);
271 if ((gh->gh_state == LM_ST_EXCLUSIVE ||
272 gh_head->gh_state == LM_ST_EXCLUSIVE) && gh != gh_head)
273 return 0;
274 if (gl->gl_state == gh->gh_state)
275 return 1;
276 if (gh->gh_flags & GL_EXACT)
277 return 0;
278 if (gl->gl_state == LM_ST_EXCLUSIVE) {
279 if (gh->gh_state == LM_ST_SHARED && gh_head->gh_state == LM_ST_SHARED)
280 return 1;
281 if (gh->gh_state == LM_ST_DEFERRED && gh_head->gh_state == LM_ST_DEFERRED)
282 return 1;
283 }
284 if (gl->gl_state != LM_ST_UNLOCKED && (gh->gh_flags & LM_FLAG_ANY))
285 return 1;
286 return 0;
287}
288
289static void gfs2_holder_wake(struct gfs2_holder *gh)
290{
291 clear_bit(HIF_WAIT, &gh->gh_iflags);
292 smp_mb__after_clear_bit();
293 wake_up_bit(&gh->gh_iflags, HIF_WAIT);
294}
295
296
297
298
299
300
301static inline void do_error(struct gfs2_glock *gl, const int ret)
302{
303 struct gfs2_holder *gh, *tmp;
304
305 list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
306 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
307 continue;
308 if (ret & LM_OUT_ERROR)
309 gh->gh_error = -EIO;
310 else if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))
311 gh->gh_error = GLR_TRYFAILED;
312 else
313 continue;
314 list_del_init(&gh->gh_list);
315 trace_gfs2_glock_queue(gh, 0);
316 gfs2_holder_wake(gh);
317 }
318}
319
320
321
322
323
324
325
326
327
328static int do_promote(struct gfs2_glock *gl)
329__releases(&gl->gl_spin)
330__acquires(&gl->gl_spin)
331{
332 const struct gfs2_glock_operations *glops = gl->gl_ops;
333 struct gfs2_holder *gh, *tmp;
334 int ret;
335
336restart:
337 list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
338 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
339 continue;
340 if (may_grant(gl, gh)) {
341 if (gh->gh_list.prev == &gl->gl_holders &&
342 glops->go_lock) {
343 spin_unlock(&gl->gl_spin);
344
345 ret = glops->go_lock(gh);
346 spin_lock(&gl->gl_spin);
347 if (ret) {
348 if (ret == 1)
349 return 2;
350 gh->gh_error = ret;
351 list_del_init(&gh->gh_list);
352 trace_gfs2_glock_queue(gh, 0);
353 gfs2_holder_wake(gh);
354 goto restart;
355 }
356 set_bit(HIF_HOLDER, &gh->gh_iflags);
357 trace_gfs2_promote(gh, 1);
358 gfs2_holder_wake(gh);
359 goto restart;
360 }
361 set_bit(HIF_HOLDER, &gh->gh_iflags);
362 trace_gfs2_promote(gh, 0);
363 gfs2_holder_wake(gh);
364 continue;
365 }
366 if (gh->gh_list.prev == &gl->gl_holders)
367 return 1;
368 do_error(gl, 0);
369 break;
370 }
371 return 0;
372}
373
374
375
376
377
378
379static inline struct gfs2_holder *find_first_waiter(const struct gfs2_glock *gl)
380{
381 struct gfs2_holder *gh;
382
383 list_for_each_entry(gh, &gl->gl_holders, gh_list) {
384 if (!test_bit(HIF_HOLDER, &gh->gh_iflags))
385 return gh;
386 }
387 return NULL;
388}
389
390
391
392
393
394
395
396
397static void state_change(struct gfs2_glock *gl, unsigned int new_state)
398{
399 int held1, held2;
400
401 held1 = (gl->gl_state != LM_ST_UNLOCKED);
402 held2 = (new_state != LM_ST_UNLOCKED);
403
404 if (held1 != held2) {
405 if (held2)
406 gfs2_glock_hold(gl);
407 else
408 gfs2_glock_put_nolock(gl);
409 }
410 if (held1 && held2 && list_empty(&gl->gl_holders))
411 clear_bit(GLF_QUEUED, &gl->gl_flags);
412
413 gl->gl_state = new_state;
414 gl->gl_tchange = jiffies;
415}
416
417static void gfs2_demote_wake(struct gfs2_glock *gl)
418{
419 gl->gl_demote_state = LM_ST_EXCLUSIVE;
420 clear_bit(GLF_DEMOTE, &gl->gl_flags);
421 smp_mb__after_clear_bit();
422 wake_up_bit(&gl->gl_flags, GLF_DEMOTE);
423}
424
425
426
427
428
429
430
431
432static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
433{
434 const struct gfs2_glock_operations *glops = gl->gl_ops;
435 struct gfs2_holder *gh;
436 unsigned state = ret & LM_OUT_ST_MASK;
437 int rv;
438
439 spin_lock(&gl->gl_spin);
440 trace_gfs2_glock_state_change(gl, state);
441 state_change(gl, state);
442 gh = find_first_waiter(gl);
443
444
445 if (test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags) &&
446 state != LM_ST_UNLOCKED && gl->gl_demote_state == LM_ST_UNLOCKED)
447 gl->gl_target = LM_ST_UNLOCKED;
448
449
450 if (unlikely(state != gl->gl_target)) {
451 if (gh && !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) {
452
453 if (ret & LM_OUT_CANCELED) {
454 if ((gh->gh_flags & LM_FLAG_PRIORITY) == 0)
455 list_move_tail(&gh->gh_list, &gl->gl_holders);
456 gh = find_first_waiter(gl);
457 gl->gl_target = gh->gh_state;
458 goto retry;
459 }
460
461 if ((ret & LM_OUT_ERROR) ||
462 (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) {
463 gl->gl_target = gl->gl_state;
464 do_error(gl, ret);
465 goto out;
466 }
467 }
468 switch(state) {
469
470 case LM_ST_UNLOCKED:
471retry:
472 do_xmote(gl, gh, gl->gl_target);
473 break;
474
475 case LM_ST_SHARED:
476 case LM_ST_DEFERRED:
477 do_xmote(gl, gh, LM_ST_UNLOCKED);
478 break;
479 default:
480 printk(KERN_ERR "GFS2: wanted %u got %u\n", gl->gl_target, state);
481 GLOCK_BUG_ON(gl, 1);
482 }
483 spin_unlock(&gl->gl_spin);
484 return;
485 }
486
487
488 if (test_and_clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags))
489 gfs2_demote_wake(gl);
490 if (state != LM_ST_UNLOCKED) {
491 if (glops->go_xmote_bh) {
492 spin_unlock(&gl->gl_spin);
493 rv = glops->go_xmote_bh(gl, gh);
494 spin_lock(&gl->gl_spin);
495 if (rv) {
496 do_error(gl, rv);
497 goto out;
498 }
499 }
500 rv = do_promote(gl);
501 if (rv == 2)
502 goto out_locked;
503 }
504out:
505 clear_bit(GLF_LOCK, &gl->gl_flags);
506out_locked:
507 spin_unlock(&gl->gl_spin);
508}
509
510
511
512
513
514
515
516
517
518static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target)
519__releases(&gl->gl_spin)
520__acquires(&gl->gl_spin)
521{
522 const struct gfs2_glock_operations *glops = gl->gl_ops;
523 struct gfs2_sbd *sdp = gl->gl_sbd;
524 unsigned int lck_flags = gh ? gh->gh_flags : 0;
525 int ret;
526
527 lck_flags &= (LM_FLAG_TRY | LM_FLAG_TRY_1CB | LM_FLAG_NOEXP |
528 LM_FLAG_PRIORITY);
529 GLOCK_BUG_ON(gl, gl->gl_state == target);
530 GLOCK_BUG_ON(gl, gl->gl_state == gl->gl_target);
531 if ((target == LM_ST_UNLOCKED || target == LM_ST_DEFERRED) &&
532 glops->go_inval) {
533 set_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
534 do_error(gl, 0);
535 }
536 gl->gl_req = target;
537 spin_unlock(&gl->gl_spin);
538 if (glops->go_xmote_th)
539 glops->go_xmote_th(gl);
540 if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
541 glops->go_inval(gl, target == LM_ST_DEFERRED ? 0 : DIO_METADATA);
542 clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
543
544 gfs2_glock_hold(gl);
545 if (target != LM_ST_UNLOCKED && (gl->gl_state == LM_ST_SHARED ||
546 gl->gl_state == LM_ST_DEFERRED) &&
547 !(lck_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)))
548 lck_flags |= LM_FLAG_TRY_1CB;
549
550 if (sdp->sd_lockstruct.ls_ops->lm_lock) {
551
552 ret = sdp->sd_lockstruct.ls_ops->lm_lock(gl, target, lck_flags);
553 GLOCK_BUG_ON(gl, ret);
554 } else {
555 finish_xmote(gl, target);
556 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
557 gfs2_glock_put(gl);
558 }
559
560 spin_lock(&gl->gl_spin);
561}
562
563
564
565
566
567
568static inline struct gfs2_holder *find_first_holder(const struct gfs2_glock *gl)
569{
570 struct gfs2_holder *gh;
571
572 if (!list_empty(&gl->gl_holders)) {
573 gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
574 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
575 return gh;
576 }
577 return NULL;
578}
579
580
581
582
583
584
585
586
587static void run_queue(struct gfs2_glock *gl, const int nonblock)
588__releases(&gl->gl_spin)
589__acquires(&gl->gl_spin)
590{
591 struct gfs2_holder *gh = NULL;
592 int ret;
593
594 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags))
595 return;
596
597 GLOCK_BUG_ON(gl, test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags));
598
599 if (test_bit(GLF_DEMOTE, &gl->gl_flags) &&
600 gl->gl_demote_state != gl->gl_state) {
601 if (find_first_holder(gl))
602 goto out_unlock;
603 if (nonblock)
604 goto out_sched;
605 set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
606 GLOCK_BUG_ON(gl, gl->gl_demote_state == LM_ST_EXCLUSIVE);
607 gl->gl_target = gl->gl_demote_state;
608 } else {
609 if (test_bit(GLF_DEMOTE, &gl->gl_flags))
610 gfs2_demote_wake(gl);
611 ret = do_promote(gl);
612 if (ret == 0)
613 goto out_unlock;
614 if (ret == 2)
615 goto out;
616 gh = find_first_waiter(gl);
617 gl->gl_target = gh->gh_state;
618 if (!(gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)))
619 do_error(gl, 0);
620 }
621 do_xmote(gl, gh, gl->gl_target);
622out:
623 return;
624
625out_sched:
626 clear_bit(GLF_LOCK, &gl->gl_flags);
627 smp_mb__after_clear_bit();
628 gfs2_glock_hold(gl);
629 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
630 gfs2_glock_put_nolock(gl);
631 return;
632
633out_unlock:
634 clear_bit(GLF_LOCK, &gl->gl_flags);
635 smp_mb__after_clear_bit();
636 return;
637}
638
639static void delete_work_func(struct work_struct *work)
640{
641 struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_delete);
642 struct gfs2_sbd *sdp = gl->gl_sbd;
643 struct gfs2_inode *ip;
644 struct inode *inode;
645 u64 no_addr = gl->gl_name.ln_number;
646
647 ip = gl->gl_object;
648
649
650 if (ip)
651 inode = gfs2_ilookup(sdp->sd_vfs, no_addr);
652 else
653 inode = gfs2_lookup_by_inum(sdp, no_addr, NULL, GFS2_BLKST_UNLINKED);
654 if (inode && !IS_ERR(inode)) {
655 d_prune_aliases(inode);
656 iput(inode);
657 }
658 gfs2_glock_put(gl);
659}
660
661static void glock_work_func(struct work_struct *work)
662{
663 unsigned long delay = 0;
664 struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work);
665 int drop_ref = 0;
666
667 if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags)) {
668 finish_xmote(gl, gl->gl_reply);
669 drop_ref = 1;
670 }
671 spin_lock(&gl->gl_spin);
672 if (test_and_clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
673 gl->gl_state != LM_ST_UNLOCKED &&
674 gl->gl_demote_state != LM_ST_EXCLUSIVE) {
675 unsigned long holdtime, now = jiffies;
676 holdtime = gl->gl_tchange + gl->gl_ops->go_min_hold_time;
677 if (time_before(now, holdtime))
678 delay = holdtime - now;
679 set_bit(delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE, &gl->gl_flags);
680 }
681 run_queue(gl, 0);
682 spin_unlock(&gl->gl_spin);
683 if (!delay ||
684 queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
685 gfs2_glock_put(gl);
686 if (drop_ref)
687 gfs2_glock_put(gl);
688}
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
704 const struct gfs2_glock_operations *glops, int create,
705 struct gfs2_glock **glp)
706{
707 struct super_block *s = sdp->sd_vfs;
708 struct lm_lockname name = { .ln_number = number, .ln_type = glops->go_type };
709 struct gfs2_glock *gl, *tmp;
710 unsigned int hash = gl_hash(sdp, &name);
711 struct address_space *mapping;
712 struct kmem_cache *cachep;
713
714 rcu_read_lock();
715 gl = search_bucket(hash, sdp, &name);
716 rcu_read_unlock();
717
718 *glp = gl;
719 if (gl)
720 return 0;
721 if (!create)
722 return -ENOENT;
723
724 if (glops->go_flags & GLOF_ASPACE)
725 cachep = gfs2_glock_aspace_cachep;
726 else
727 cachep = gfs2_glock_cachep;
728 gl = kmem_cache_alloc(cachep, GFP_KERNEL);
729 if (!gl)
730 return -ENOMEM;
731
732 atomic_inc(&sdp->sd_glock_disposal);
733 gl->gl_flags = 0;
734 gl->gl_name = name;
735 atomic_set(&gl->gl_ref, 1);
736 gl->gl_state = LM_ST_UNLOCKED;
737 gl->gl_target = LM_ST_UNLOCKED;
738 gl->gl_demote_state = LM_ST_EXCLUSIVE;
739 gl->gl_hash = hash;
740 gl->gl_ops = glops;
741 snprintf(gl->gl_strname, GDLM_STRNAME_BYTES, "%8x%16llx", name.ln_type, (unsigned long long)number);
742 memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb));
743 gl->gl_lksb.sb_lvbptr = gl->gl_lvb;
744 gl->gl_tchange = jiffies;
745 gl->gl_object = NULL;
746 gl->gl_sbd = sdp;
747 INIT_DELAYED_WORK(&gl->gl_work, glock_work_func);
748 INIT_WORK(&gl->gl_delete, delete_work_func);
749
750 mapping = gfs2_glock2aspace(gl);
751 if (mapping) {
752 mapping->a_ops = &gfs2_meta_aops;
753 mapping->host = s->s_bdev->bd_inode;
754 mapping->flags = 0;
755 mapping_set_gfp_mask(mapping, GFP_NOFS);
756 mapping->assoc_mapping = NULL;
757 mapping->backing_dev_info = s->s_bdi;
758 mapping->writeback_index = 0;
759 }
760
761 spin_lock_bucket(hash);
762 tmp = search_bucket(hash, sdp, &name);
763 if (tmp) {
764 spin_unlock_bucket(hash);
765 kmem_cache_free(cachep, gl);
766 atomic_dec(&sdp->sd_glock_disposal);
767 gl = tmp;
768 } else {
769 hlist_bl_add_head_rcu(&gl->gl_list, &gl_hash_table[hash]);
770 spin_unlock_bucket(hash);
771 }
772
773 *glp = gl;
774
775 return 0;
776}
777
778
779
780
781
782
783
784
785
786
787void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags,
788 struct gfs2_holder *gh)
789{
790 INIT_LIST_HEAD(&gh->gh_list);
791 gh->gh_gl = gl;
792 gh->gh_ip = (unsigned long)__builtin_return_address(0);
793 gh->gh_owner_pid = get_pid(task_pid(current));
794 gh->gh_state = state;
795 gh->gh_flags = flags;
796 gh->gh_error = 0;
797 gh->gh_iflags = 0;
798 gfs2_glock_hold(gl);
799}
800
801
802
803
804
805
806
807
808
809
810
811void gfs2_holder_reinit(unsigned int state, unsigned flags, struct gfs2_holder *gh)
812{
813 gh->gh_state = state;
814 gh->gh_flags = flags;
815 gh->gh_iflags = 0;
816 gh->gh_ip = (unsigned long)__builtin_return_address(0);
817 if (gh->gh_owner_pid)
818 put_pid(gh->gh_owner_pid);
819 gh->gh_owner_pid = get_pid(task_pid(current));
820}
821
822
823
824
825
826
827
828void gfs2_holder_uninit(struct gfs2_holder *gh)
829{
830 put_pid(gh->gh_owner_pid);
831 gfs2_glock_put(gh->gh_gl);
832 gh->gh_gl = NULL;
833 gh->gh_ip = 0;
834}
835
836
837
838
839
840
841
842
843
844
845static int gfs2_glock_holder_wait(void *word)
846{
847 schedule();
848 return 0;
849}
850
851static int gfs2_glock_demote_wait(void *word)
852{
853 schedule();
854 return 0;
855}
856
857static void wait_on_holder(struct gfs2_holder *gh)
858{
859 might_sleep();
860 wait_on_bit(&gh->gh_iflags, HIF_WAIT, gfs2_glock_holder_wait, TASK_UNINTERRUPTIBLE);
861}
862
863static void wait_on_demote(struct gfs2_glock *gl)
864{
865 might_sleep();
866 wait_on_bit(&gl->gl_flags, GLF_DEMOTE, gfs2_glock_demote_wait, TASK_UNINTERRUPTIBLE);
867}
868
869
870
871
872
873
874
875
876
877
878static void handle_callback(struct gfs2_glock *gl, unsigned int state,
879 unsigned long delay)
880{
881 int bit = delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE;
882
883 set_bit(bit, &gl->gl_flags);
884 if (gl->gl_demote_state == LM_ST_EXCLUSIVE) {
885 gl->gl_demote_state = state;
886 gl->gl_demote_time = jiffies;
887 } else if (gl->gl_demote_state != LM_ST_UNLOCKED &&
888 gl->gl_demote_state != state) {
889 gl->gl_demote_state = LM_ST_UNLOCKED;
890 }
891 if (gl->gl_ops->go_callback)
892 gl->gl_ops->go_callback(gl);
893 trace_gfs2_demote_rq(gl);
894}
895
896
897
898
899
900
901
902
903int gfs2_glock_wait(struct gfs2_holder *gh)
904{
905 wait_on_holder(gh);
906 return gh->gh_error;
907}
908
909void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...)
910{
911 struct va_format vaf;
912 va_list args;
913
914 va_start(args, fmt);
915
916 if (seq) {
917 struct gfs2_glock_iter *gi = seq->private;
918 vsprintf(gi->string, fmt, args);
919 seq_printf(seq, gi->string);
920 } else {
921 vaf.fmt = fmt;
922 vaf.va = &args;
923
924 printk(KERN_ERR " %pV", &vaf);
925 }
926
927 va_end(args);
928}
929
930
931
932
933
934
935
936
937
938
939
940static inline void add_to_queue(struct gfs2_holder *gh)
941__releases(&gl->gl_spin)
942__acquires(&gl->gl_spin)
943{
944 struct gfs2_glock *gl = gh->gh_gl;
945 struct gfs2_sbd *sdp = gl->gl_sbd;
946 struct list_head *insert_pt = NULL;
947 struct gfs2_holder *gh2;
948 int try_lock = 0;
949
950 BUG_ON(gh->gh_owner_pid == NULL);
951 if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags))
952 BUG();
953
954 if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
955 if (test_bit(GLF_LOCK, &gl->gl_flags))
956 try_lock = 1;
957 if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
958 goto fail;
959 }
960
961 list_for_each_entry(gh2, &gl->gl_holders, gh_list) {
962 if (unlikely(gh2->gh_owner_pid == gh->gh_owner_pid &&
963 (gh->gh_gl->gl_ops->go_type != LM_TYPE_FLOCK)))
964 goto trap_recursive;
965 if (try_lock &&
966 !(gh2->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) &&
967 !may_grant(gl, gh)) {
968fail:
969 gh->gh_error = GLR_TRYFAILED;
970 gfs2_holder_wake(gh);
971 return;
972 }
973 if (test_bit(HIF_HOLDER, &gh2->gh_iflags))
974 continue;
975 if (unlikely((gh->gh_flags & LM_FLAG_PRIORITY) && !insert_pt))
976 insert_pt = &gh2->gh_list;
977 }
978 set_bit(GLF_QUEUED, &gl->gl_flags);
979 trace_gfs2_glock_queue(gh, 1);
980 if (likely(insert_pt == NULL)) {
981 list_add_tail(&gh->gh_list, &gl->gl_holders);
982 if (unlikely(gh->gh_flags & LM_FLAG_PRIORITY))
983 goto do_cancel;
984 return;
985 }
986 list_add_tail(&gh->gh_list, insert_pt);
987do_cancel:
988 gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
989 if (!(gh->gh_flags & LM_FLAG_PRIORITY)) {
990 spin_unlock(&gl->gl_spin);
991 if (sdp->sd_lockstruct.ls_ops->lm_cancel)
992 sdp->sd_lockstruct.ls_ops->lm_cancel(gl);
993 spin_lock(&gl->gl_spin);
994 }
995 return;
996
997trap_recursive:
998 print_symbol(KERN_ERR "original: %s\n", gh2->gh_ip);
999 printk(KERN_ERR "pid: %d\n", pid_nr(gh2->gh_owner_pid));
1000 printk(KERN_ERR "lock type: %d req lock state : %d\n",
1001 gh2->gh_gl->gl_name.ln_type, gh2->gh_state);
1002 print_symbol(KERN_ERR "new: %s\n", gh->gh_ip);
1003 printk(KERN_ERR "pid: %d\n", pid_nr(gh->gh_owner_pid));
1004 printk(KERN_ERR "lock type: %d req lock state : %d\n",
1005 gh->gh_gl->gl_name.ln_type, gh->gh_state);
1006 __dump_glock(NULL, gl);
1007 BUG();
1008}
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019int gfs2_glock_nq(struct gfs2_holder *gh)
1020{
1021 struct gfs2_glock *gl = gh->gh_gl;
1022 struct gfs2_sbd *sdp = gl->gl_sbd;
1023 int error = 0;
1024
1025 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
1026 return -EIO;
1027
1028 spin_lock(&gl->gl_spin);
1029 add_to_queue(gh);
1030 if ((LM_FLAG_NOEXP & gh->gh_flags) &&
1031 test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))
1032 set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
1033 run_queue(gl, 1);
1034 spin_unlock(&gl->gl_spin);
1035
1036 if (!(gh->gh_flags & GL_ASYNC))
1037 error = gfs2_glock_wait(gh);
1038
1039 return error;
1040}
1041
1042
1043
1044
1045
1046
1047
1048
1049int gfs2_glock_poll(struct gfs2_holder *gh)
1050{
1051 return test_bit(HIF_WAIT, &gh->gh_iflags) ? 0 : 1;
1052}
1053
1054
1055
1056
1057
1058
1059
1060void gfs2_glock_dq(struct gfs2_holder *gh)
1061{
1062 struct gfs2_glock *gl = gh->gh_gl;
1063 const struct gfs2_glock_operations *glops = gl->gl_ops;
1064 unsigned delay = 0;
1065 int fast_path = 0;
1066
1067 spin_lock(&gl->gl_spin);
1068 if (gh->gh_flags & GL_NOCACHE)
1069 handle_callback(gl, LM_ST_UNLOCKED, 0);
1070
1071 list_del_init(&gh->gh_list);
1072 if (find_first_holder(gl) == NULL) {
1073 if (glops->go_unlock) {
1074 GLOCK_BUG_ON(gl, test_and_set_bit(GLF_LOCK, &gl->gl_flags));
1075 spin_unlock(&gl->gl_spin);
1076 glops->go_unlock(gh);
1077 spin_lock(&gl->gl_spin);
1078 clear_bit(GLF_LOCK, &gl->gl_flags);
1079 }
1080 if (list_empty(&gl->gl_holders) &&
1081 !test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
1082 !test_bit(GLF_DEMOTE, &gl->gl_flags))
1083 fast_path = 1;
1084 }
1085 __gfs2_glock_schedule_for_reclaim(gl);
1086 trace_gfs2_glock_queue(gh, 0);
1087 spin_unlock(&gl->gl_spin);
1088 if (likely(fast_path))
1089 return;
1090
1091 gfs2_glock_hold(gl);
1092 if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
1093 !test_bit(GLF_DEMOTE, &gl->gl_flags))
1094 delay = gl->gl_ops->go_min_hold_time;
1095 if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
1096 gfs2_glock_put(gl);
1097}
1098
1099void gfs2_glock_dq_wait(struct gfs2_holder *gh)
1100{
1101 struct gfs2_glock *gl = gh->gh_gl;
1102 gfs2_glock_dq(gh);
1103 wait_on_demote(gl);
1104}
1105
1106
1107
1108
1109
1110
1111
1112void gfs2_glock_dq_uninit(struct gfs2_holder *gh)
1113{
1114 gfs2_glock_dq(gh);
1115 gfs2_holder_uninit(gh);
1116}
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130int gfs2_glock_nq_num(struct gfs2_sbd *sdp, u64 number,
1131 const struct gfs2_glock_operations *glops,
1132 unsigned int state, int flags, struct gfs2_holder *gh)
1133{
1134 struct gfs2_glock *gl;
1135 int error;
1136
1137 error = gfs2_glock_get(sdp, number, glops, CREATE, &gl);
1138 if (!error) {
1139 error = gfs2_glock_nq_init(gl, state, flags, gh);
1140 gfs2_glock_put(gl);
1141 }
1142
1143 return error;
1144}
1145
1146
1147
1148
1149
1150
1151
1152
1153static int glock_compare(const void *arg_a, const void *arg_b)
1154{
1155 const struct gfs2_holder *gh_a = *(const struct gfs2_holder **)arg_a;
1156 const struct gfs2_holder *gh_b = *(const struct gfs2_holder **)arg_b;
1157 const struct lm_lockname *a = &gh_a->gh_gl->gl_name;
1158 const struct lm_lockname *b = &gh_b->gh_gl->gl_name;
1159
1160 if (a->ln_number > b->ln_number)
1161 return 1;
1162 if (a->ln_number < b->ln_number)
1163 return -1;
1164 BUG_ON(gh_a->gh_gl->gl_ops->go_type == gh_b->gh_gl->gl_ops->go_type);
1165 return 0;
1166}
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177static int nq_m_sync(unsigned int num_gh, struct gfs2_holder *ghs,
1178 struct gfs2_holder **p)
1179{
1180 unsigned int x;
1181 int error = 0;
1182
1183 for (x = 0; x < num_gh; x++)
1184 p[x] = &ghs[x];
1185
1186 sort(p, num_gh, sizeof(struct gfs2_holder *), glock_compare, NULL);
1187
1188 for (x = 0; x < num_gh; x++) {
1189 p[x]->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1190
1191 error = gfs2_glock_nq(p[x]);
1192 if (error) {
1193 while (x--)
1194 gfs2_glock_dq(p[x]);
1195 break;
1196 }
1197 }
1198
1199 return error;
1200}
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1213{
1214 struct gfs2_holder *tmp[4];
1215 struct gfs2_holder **pph = tmp;
1216 int error = 0;
1217
1218 switch(num_gh) {
1219 case 0:
1220 return 0;
1221 case 1:
1222 ghs->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1223 return gfs2_glock_nq(ghs);
1224 default:
1225 if (num_gh <= 4)
1226 break;
1227 pph = kmalloc(num_gh * sizeof(struct gfs2_holder *), GFP_NOFS);
1228 if (!pph)
1229 return -ENOMEM;
1230 }
1231
1232 error = nq_m_sync(num_gh, ghs, pph);
1233
1234 if (pph != tmp)
1235 kfree(pph);
1236
1237 return error;
1238}
1239
1240
1241
1242
1243
1244
1245
1246
1247void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1248{
1249 while (num_gh--)
1250 gfs2_glock_dq(&ghs[num_gh]);
1251}
1252
1253
1254
1255
1256
1257
1258
1259
1260void gfs2_glock_dq_uninit_m(unsigned int num_gh, struct gfs2_holder *ghs)
1261{
1262 while (num_gh--)
1263 gfs2_glock_dq_uninit(&ghs[num_gh]);
1264}
1265
1266void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state)
1267{
1268 unsigned long delay = 0;
1269 unsigned long holdtime;
1270 unsigned long now = jiffies;
1271
1272 gfs2_glock_hold(gl);
1273 holdtime = gl->gl_tchange + gl->gl_ops->go_min_hold_time;
1274 if (test_bit(GLF_QUEUED, &gl->gl_flags)) {
1275 if (time_before(now, holdtime))
1276 delay = holdtime - now;
1277 if (test_bit(GLF_REPLY_PENDING, &gl->gl_flags))
1278 delay = gl->gl_ops->go_min_hold_time;
1279 }
1280
1281 spin_lock(&gl->gl_spin);
1282 handle_callback(gl, state, delay);
1283 spin_unlock(&gl->gl_spin);
1284 if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
1285 gfs2_glock_put(gl);
1286}
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299static int gfs2_should_freeze(const struct gfs2_glock *gl)
1300{
1301 const struct gfs2_holder *gh;
1302
1303 if (gl->gl_reply & ~LM_OUT_ST_MASK)
1304 return 0;
1305 if (gl->gl_target == LM_ST_UNLOCKED)
1306 return 0;
1307
1308 list_for_each_entry(gh, &gl->gl_holders, gh_list) {
1309 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
1310 continue;
1311 if (LM_FLAG_NOEXP & gh->gh_flags)
1312 return 0;
1313 }
1314
1315 return 1;
1316}
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327void gfs2_glock_complete(struct gfs2_glock *gl, int ret)
1328{
1329 struct lm_lockstruct *ls = &gl->gl_sbd->sd_lockstruct;
1330
1331 spin_lock(&gl->gl_spin);
1332 gl->gl_reply = ret;
1333
1334 if (unlikely(test_bit(DFL_BLOCK_LOCKS, &ls->ls_flags))) {
1335 if (gfs2_should_freeze(gl)) {
1336 set_bit(GLF_FROZEN, &gl->gl_flags);
1337 spin_unlock(&gl->gl_spin);
1338 return;
1339 }
1340 }
1341
1342 spin_unlock(&gl->gl_spin);
1343 set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
1344 smp_wmb();
1345 gfs2_glock_hold(gl);
1346 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1347 gfs2_glock_put(gl);
1348}
1349
1350
1351static int gfs2_shrink_glock_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask)
1352{
1353 struct gfs2_glock *gl;
1354 int may_demote;
1355 int nr_skipped = 0;
1356 LIST_HEAD(skipped);
1357
1358 if (nr == 0)
1359 goto out;
1360
1361 if (!(gfp_mask & __GFP_FS))
1362 return -1;
1363
1364 spin_lock(&lru_lock);
1365 while(nr && !list_empty(&lru_list)) {
1366 gl = list_entry(lru_list.next, struct gfs2_glock, gl_lru);
1367 list_del_init(&gl->gl_lru);
1368 atomic_dec(&lru_count);
1369
1370
1371 if (!test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
1372 gfs2_glock_hold(gl);
1373 spin_unlock(&lru_lock);
1374 spin_lock(&gl->gl_spin);
1375 may_demote = demote_ok(gl);
1376 if (may_demote) {
1377 handle_callback(gl, LM_ST_UNLOCKED, 0);
1378 nr--;
1379 }
1380 clear_bit(GLF_LOCK, &gl->gl_flags);
1381 smp_mb__after_clear_bit();
1382 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1383 gfs2_glock_put_nolock(gl);
1384 spin_unlock(&gl->gl_spin);
1385 spin_lock(&lru_lock);
1386 continue;
1387 }
1388 nr_skipped++;
1389 list_add(&gl->gl_lru, &skipped);
1390 }
1391 list_splice(&skipped, &lru_list);
1392 atomic_add(nr_skipped, &lru_count);
1393 spin_unlock(&lru_lock);
1394out:
1395 return (atomic_read(&lru_count) / 100) * sysctl_vfs_cache_pressure;
1396}
1397
1398static struct shrinker glock_shrinker = {
1399 .shrink = gfs2_shrink_glock_memory,
1400 .seeks = DEFAULT_SEEKS,
1401};
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411static void examine_bucket(glock_examiner examiner, const struct gfs2_sbd *sdp,
1412 unsigned int hash)
1413{
1414 struct gfs2_glock *gl;
1415 struct hlist_bl_head *head = &gl_hash_table[hash];
1416 struct hlist_bl_node *pos;
1417
1418 rcu_read_lock();
1419 hlist_bl_for_each_entry_rcu(gl, pos, head, gl_list) {
1420 if ((gl->gl_sbd == sdp) && atomic_read(&gl->gl_ref))
1421 examiner(gl);
1422 }
1423 rcu_read_unlock();
1424 cond_resched();
1425}
1426
1427static void glock_hash_walk(glock_examiner examiner, const struct gfs2_sbd *sdp)
1428{
1429 unsigned x;
1430
1431 for (x = 0; x < GFS2_GL_HASH_SIZE; x++)
1432 examine_bucket(examiner, sdp, x);
1433}
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444static void thaw_glock(struct gfs2_glock *gl)
1445{
1446 if (!test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))
1447 return;
1448 set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
1449 gfs2_glock_hold(gl);
1450 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1451 gfs2_glock_put(gl);
1452}
1453
1454
1455
1456
1457
1458
1459
1460static void clear_glock(struct gfs2_glock *gl)
1461{
1462 spin_lock(&lru_lock);
1463 if (!list_empty(&gl->gl_lru)) {
1464 list_del_init(&gl->gl_lru);
1465 atomic_dec(&lru_count);
1466 }
1467 spin_unlock(&lru_lock);
1468
1469 spin_lock(&gl->gl_spin);
1470 if (gl->gl_state != LM_ST_UNLOCKED)
1471 handle_callback(gl, LM_ST_UNLOCKED, 0);
1472 spin_unlock(&gl->gl_spin);
1473 gfs2_glock_hold(gl);
1474 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1475 gfs2_glock_put(gl);
1476}
1477
1478
1479
1480
1481
1482
1483
1484void gfs2_glock_thaw(struct gfs2_sbd *sdp)
1485{
1486 glock_hash_walk(thaw_glock, sdp);
1487}
1488
1489static int dump_glock(struct seq_file *seq, struct gfs2_glock *gl)
1490{
1491 int ret;
1492 spin_lock(&gl->gl_spin);
1493 ret = __dump_glock(seq, gl);
1494 spin_unlock(&gl->gl_spin);
1495 return ret;
1496}
1497
1498static void dump_glock_func(struct gfs2_glock *gl)
1499{
1500 dump_glock(NULL, gl);
1501}
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511void gfs2_gl_hash_clear(struct gfs2_sbd *sdp)
1512{
1513 glock_hash_walk(clear_glock, sdp);
1514 flush_workqueue(glock_workqueue);
1515 wait_event(sdp->sd_glock_wait, atomic_read(&sdp->sd_glock_disposal) == 0);
1516 glock_hash_walk(dump_glock_func, sdp);
1517}
1518
1519void gfs2_glock_finish_truncate(struct gfs2_inode *ip)
1520{
1521 struct gfs2_glock *gl = ip->i_gl;
1522 int ret;
1523
1524 ret = gfs2_truncatei_resume(ip);
1525 gfs2_assert_withdraw(gl->gl_sbd, ret == 0);
1526
1527 spin_lock(&gl->gl_spin);
1528 clear_bit(GLF_LOCK, &gl->gl_flags);
1529 run_queue(gl, 1);
1530 spin_unlock(&gl->gl_spin);
1531}
1532
1533static const char *state2str(unsigned state)
1534{
1535 switch(state) {
1536 case LM_ST_UNLOCKED:
1537 return "UN";
1538 case LM_ST_SHARED:
1539 return "SH";
1540 case LM_ST_DEFERRED:
1541 return "DF";
1542 case LM_ST_EXCLUSIVE:
1543 return "EX";
1544 }
1545 return "??";
1546}
1547
1548static const char *hflags2str(char *buf, unsigned flags, unsigned long iflags)
1549{
1550 char *p = buf;
1551 if (flags & LM_FLAG_TRY)
1552 *p++ = 't';
1553 if (flags & LM_FLAG_TRY_1CB)
1554 *p++ = 'T';
1555 if (flags & LM_FLAG_NOEXP)
1556 *p++ = 'e';
1557 if (flags & LM_FLAG_ANY)
1558 *p++ = 'A';
1559 if (flags & LM_FLAG_PRIORITY)
1560 *p++ = 'p';
1561 if (flags & GL_ASYNC)
1562 *p++ = 'a';
1563 if (flags & GL_EXACT)
1564 *p++ = 'E';
1565 if (flags & GL_NOCACHE)
1566 *p++ = 'c';
1567 if (test_bit(HIF_HOLDER, &iflags))
1568 *p++ = 'H';
1569 if (test_bit(HIF_WAIT, &iflags))
1570 *p++ = 'W';
1571 if (test_bit(HIF_FIRST, &iflags))
1572 *p++ = 'F';
1573 *p = 0;
1574 return buf;
1575}
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585static int dump_holder(struct seq_file *seq, const struct gfs2_holder *gh)
1586{
1587 struct task_struct *gh_owner = NULL;
1588 char flags_buf[32];
1589
1590 if (gh->gh_owner_pid)
1591 gh_owner = pid_task(gh->gh_owner_pid, PIDTYPE_PID);
1592 gfs2_print_dbg(seq, " H: s:%s f:%s e:%d p:%ld [%s] %pS\n",
1593 state2str(gh->gh_state),
1594 hflags2str(flags_buf, gh->gh_flags, gh->gh_iflags),
1595 gh->gh_error,
1596 gh->gh_owner_pid ? (long)pid_nr(gh->gh_owner_pid) : -1,
1597 gh_owner ? gh_owner->comm : "(ended)",
1598 (void *)gh->gh_ip);
1599 return 0;
1600}
1601
1602static const char *gflags2str(char *buf, const unsigned long *gflags)
1603{
1604 char *p = buf;
1605 if (test_bit(GLF_LOCK, gflags))
1606 *p++ = 'l';
1607 if (test_bit(GLF_DEMOTE, gflags))
1608 *p++ = 'D';
1609 if (test_bit(GLF_PENDING_DEMOTE, gflags))
1610 *p++ = 'd';
1611 if (test_bit(GLF_DEMOTE_IN_PROGRESS, gflags))
1612 *p++ = 'p';
1613 if (test_bit(GLF_DIRTY, gflags))
1614 *p++ = 'y';
1615 if (test_bit(GLF_LFLUSH, gflags))
1616 *p++ = 'f';
1617 if (test_bit(GLF_INVALIDATE_IN_PROGRESS, gflags))
1618 *p++ = 'i';
1619 if (test_bit(GLF_REPLY_PENDING, gflags))
1620 *p++ = 'r';
1621 if (test_bit(GLF_INITIAL, gflags))
1622 *p++ = 'I';
1623 if (test_bit(GLF_FROZEN, gflags))
1624 *p++ = 'F';
1625 if (test_bit(GLF_QUEUED, gflags))
1626 *p++ = 'q';
1627 *p = 0;
1628 return buf;
1629}
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649static int __dump_glock(struct seq_file *seq, const struct gfs2_glock *gl)
1650{
1651 const struct gfs2_glock_operations *glops = gl->gl_ops;
1652 unsigned long long dtime;
1653 const struct gfs2_holder *gh;
1654 char gflags_buf[32];
1655 int error = 0;
1656
1657 dtime = jiffies - gl->gl_demote_time;
1658 dtime *= 1000000/HZ;
1659 if (!test_bit(GLF_DEMOTE, &gl->gl_flags))
1660 dtime = 0;
1661 gfs2_print_dbg(seq, "G: s:%s n:%u/%llx f:%s t:%s d:%s/%llu a:%d r:%d\n",
1662 state2str(gl->gl_state),
1663 gl->gl_name.ln_type,
1664 (unsigned long long)gl->gl_name.ln_number,
1665 gflags2str(gflags_buf, &gl->gl_flags),
1666 state2str(gl->gl_target),
1667 state2str(gl->gl_demote_state), dtime,
1668 atomic_read(&gl->gl_ail_count),
1669 atomic_read(&gl->gl_ref));
1670
1671 list_for_each_entry(gh, &gl->gl_holders, gh_list) {
1672 error = dump_holder(seq, gh);
1673 if (error)
1674 goto out;
1675 }
1676 if (gl->gl_state != LM_ST_UNLOCKED && glops->go_dump)
1677 error = glops->go_dump(seq, gl);
1678out:
1679 return error;
1680}
1681
1682
1683
1684
1685int __init gfs2_glock_init(void)
1686{
1687 unsigned i;
1688 for(i = 0; i < GFS2_GL_HASH_SIZE; i++) {
1689 INIT_HLIST_BL_HEAD(&gl_hash_table[i]);
1690 }
1691
1692 glock_workqueue = alloc_workqueue("glock_workqueue", WQ_MEM_RECLAIM |
1693 WQ_HIGHPRI | WQ_FREEZABLE, 0);
1694 if (IS_ERR(glock_workqueue))
1695 return PTR_ERR(glock_workqueue);
1696 gfs2_delete_workqueue = alloc_workqueue("delete_workqueue",
1697 WQ_MEM_RECLAIM | WQ_FREEZABLE,
1698 0);
1699 if (IS_ERR(gfs2_delete_workqueue)) {
1700 destroy_workqueue(glock_workqueue);
1701 return PTR_ERR(gfs2_delete_workqueue);
1702 }
1703
1704 register_shrinker(&glock_shrinker);
1705
1706 return 0;
1707}
1708
1709void gfs2_glock_exit(void)
1710{
1711 unregister_shrinker(&glock_shrinker);
1712 destroy_workqueue(glock_workqueue);
1713 destroy_workqueue(gfs2_delete_workqueue);
1714}
1715
1716static inline struct gfs2_glock *glock_hash_chain(unsigned hash)
1717{
1718 return hlist_bl_entry(hlist_bl_first_rcu(&gl_hash_table[hash]),
1719 struct gfs2_glock, gl_list);
1720}
1721
1722static inline struct gfs2_glock *glock_hash_next(struct gfs2_glock *gl)
1723{
1724 return hlist_bl_entry(rcu_dereference(gl->gl_list.next),
1725 struct gfs2_glock, gl_list);
1726}
1727
1728static int gfs2_glock_iter_next(struct gfs2_glock_iter *gi)
1729{
1730 struct gfs2_glock *gl;
1731
1732 do {
1733 gl = gi->gl;
1734 if (gl) {
1735 gi->gl = glock_hash_next(gl);
1736 } else {
1737 gi->gl = glock_hash_chain(gi->hash);
1738 }
1739 while (gi->gl == NULL) {
1740 gi->hash++;
1741 if (gi->hash >= GFS2_GL_HASH_SIZE) {
1742 rcu_read_unlock();
1743 return 1;
1744 }
1745 gi->gl = glock_hash_chain(gi->hash);
1746 }
1747
1748 } while (gi->sdp != gi->gl->gl_sbd || atomic_read(&gi->gl->gl_ref) == 0);
1749
1750 return 0;
1751}
1752
1753static void *gfs2_glock_seq_start(struct seq_file *seq, loff_t *pos)
1754{
1755 struct gfs2_glock_iter *gi = seq->private;
1756 loff_t n = *pos;
1757
1758 gi->hash = 0;
1759 rcu_read_lock();
1760
1761 do {
1762 if (gfs2_glock_iter_next(gi))
1763 return NULL;
1764 } while (n--);
1765
1766 return gi->gl;
1767}
1768
1769static void *gfs2_glock_seq_next(struct seq_file *seq, void *iter_ptr,
1770 loff_t *pos)
1771{
1772 struct gfs2_glock_iter *gi = seq->private;
1773
1774 (*pos)++;
1775
1776 if (gfs2_glock_iter_next(gi))
1777 return NULL;
1778
1779 return gi->gl;
1780}
1781
1782static void gfs2_glock_seq_stop(struct seq_file *seq, void *iter_ptr)
1783{
1784 struct gfs2_glock_iter *gi = seq->private;
1785
1786 if (gi->gl)
1787 rcu_read_unlock();
1788 gi->gl = NULL;
1789}
1790
1791static int gfs2_glock_seq_show(struct seq_file *seq, void *iter_ptr)
1792{
1793 return dump_glock(seq, iter_ptr);
1794}
1795
1796static const struct seq_operations gfs2_glock_seq_ops = {
1797 .start = gfs2_glock_seq_start,
1798 .next = gfs2_glock_seq_next,
1799 .stop = gfs2_glock_seq_stop,
1800 .show = gfs2_glock_seq_show,
1801};
1802
1803static int gfs2_debugfs_open(struct inode *inode, struct file *file)
1804{
1805 int ret = seq_open_private(file, &gfs2_glock_seq_ops,
1806 sizeof(struct gfs2_glock_iter));
1807 if (ret == 0) {
1808 struct seq_file *seq = file->private_data;
1809 struct gfs2_glock_iter *gi = seq->private;
1810 gi->sdp = inode->i_private;
1811 }
1812 return ret;
1813}
1814
1815static const struct file_operations gfs2_debug_fops = {
1816 .owner = THIS_MODULE,
1817 .open = gfs2_debugfs_open,
1818 .read = seq_read,
1819 .llseek = seq_lseek,
1820 .release = seq_release_private,
1821};
1822
1823int gfs2_create_debugfs_file(struct gfs2_sbd *sdp)
1824{
1825 sdp->debugfs_dir = debugfs_create_dir(sdp->sd_table_name, gfs2_root);
1826 if (!sdp->debugfs_dir)
1827 return -ENOMEM;
1828 sdp->debugfs_dentry_glocks = debugfs_create_file("glocks",
1829 S_IFREG | S_IRUGO,
1830 sdp->debugfs_dir, sdp,
1831 &gfs2_debug_fops);
1832 if (!sdp->debugfs_dentry_glocks)
1833 return -ENOMEM;
1834
1835 return 0;
1836}
1837
1838void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp)
1839{
1840 if (sdp && sdp->debugfs_dir) {
1841 if (sdp->debugfs_dentry_glocks) {
1842 debugfs_remove(sdp->debugfs_dentry_glocks);
1843 sdp->debugfs_dentry_glocks = NULL;
1844 }
1845 debugfs_remove(sdp->debugfs_dir);
1846 sdp->debugfs_dir = NULL;
1847 }
1848}
1849
1850int gfs2_register_debugfs(void)
1851{
1852 gfs2_root = debugfs_create_dir("gfs2", NULL);
1853 return gfs2_root ? 0 : -ENOMEM;
1854}
1855
1856void gfs2_unregister_debugfs(void)
1857{
1858 debugfs_remove(gfs2_root);
1859 gfs2_root = NULL;
1860}
1861