1
2
3
4
5
6
7
8
9
10#include <linux/sched.h>
11#include <linux/slab.h>
12#include <linux/spinlock.h>
13#include <linux/buffer_head.h>
14#include <linux/delay.h>
15#include <linux/sort.h>
16#include <linux/jhash.h>
17#include <linux/kallsyms.h>
18#include <linux/gfs2_ondisk.h>
19#include <linux/list.h>
20#include <linux/wait.h>
21#include <linux/module.h>
22#include <asm/uaccess.h>
23#include <linux/seq_file.h>
24#include <linux/debugfs.h>
25#include <linux/kthread.h>
26#include <linux/freezer.h>
27#include <linux/workqueue.h>
28#include <linux/jiffies.h>
29#include <linux/rcupdate.h>
30#include <linux/rculist_bl.h>
31#include <linux/bit_spinlock.h>
32#include <linux/percpu.h>
33#include <linux/list_sort.h>
34
35#include "gfs2.h"
36#include "incore.h"
37#include "glock.h"
38#include "glops.h"
39#include "inode.h"
40#include "lops.h"
41#include "meta_io.h"
42#include "quota.h"
43#include "super.h"
44#include "util.h"
45#include "bmap.h"
46#define CREATE_TRACE_POINTS
47#include "trace_gfs2.h"
48
49struct gfs2_glock_iter {
50 int hash;
51 unsigned nhash;
52 struct gfs2_sbd *sdp;
53 struct gfs2_glock *gl;
54 loff_t last_pos;
55};
56
57typedef void (*glock_examiner) (struct gfs2_glock * gl);
58
59static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target);
60
61static struct dentry *gfs2_root;
62static struct workqueue_struct *glock_workqueue;
63struct workqueue_struct *gfs2_delete_workqueue;
64static LIST_HEAD(lru_list);
65static atomic_t lru_count = ATOMIC_INIT(0);
66static DEFINE_SPINLOCK(lru_lock);
67
68#define GFS2_GL_HASH_SHIFT 15
69#define GFS2_GL_HASH_SIZE (1 << GFS2_GL_HASH_SHIFT)
70#define GFS2_GL_HASH_MASK (GFS2_GL_HASH_SIZE - 1)
71
72static struct hlist_bl_head gl_hash_table[GFS2_GL_HASH_SIZE];
73static struct dentry *gfs2_root;
74
75
76
77
78
79
80
81
82static unsigned int gl_hash(const struct gfs2_sbd *sdp,
83 const struct lm_lockname *name)
84{
85 unsigned int h;
86
87 h = jhash(&name->ln_number, sizeof(u64), 0);
88 h = jhash(&name->ln_type, sizeof(unsigned int), h);
89 h = jhash(&sdp, sizeof(struct gfs2_sbd *), h);
90 h &= GFS2_GL_HASH_MASK;
91
92 return h;
93}
94
95static inline void spin_lock_bucket(unsigned int hash)
96{
97 hlist_bl_lock(&gl_hash_table[hash]);
98}
99
100static inline void spin_unlock_bucket(unsigned int hash)
101{
102 hlist_bl_unlock(&gl_hash_table[hash]);
103}
104
105static void gfs2_glock_dealloc(struct rcu_head *rcu)
106{
107 struct gfs2_glock *gl = container_of(rcu, struct gfs2_glock, gl_rcu);
108
109 if (gl->gl_ops->go_flags & GLOF_ASPACE) {
110 kmem_cache_free(gfs2_glock_aspace_cachep, gl);
111 } else {
112 kfree(gl->gl_lksb.sb_lvbptr);
113 kmem_cache_free(gfs2_glock_cachep, gl);
114 }
115}
116
117void gfs2_glock_free(struct gfs2_glock *gl)
118{
119 struct gfs2_sbd *sdp = gl->gl_sbd;
120
121 call_rcu(&gl->gl_rcu, gfs2_glock_dealloc);
122 if (atomic_dec_and_test(&sdp->sd_glock_disposal))
123 wake_up(&sdp->sd_glock_wait);
124}
125
126
127
128
129
130
131
132void gfs2_glock_hold(struct gfs2_glock *gl)
133{
134 GLOCK_BUG_ON(gl, atomic_read(&gl->gl_ref) == 0);
135 atomic_inc(&gl->gl_ref);
136}
137
138
139
140
141
142
143
144
145static int demote_ok(const struct gfs2_glock *gl)
146{
147 const struct gfs2_glock_operations *glops = gl->gl_ops;
148
149 if (gl->gl_state == LM_ST_UNLOCKED)
150 return 0;
151 if (!list_empty(&gl->gl_holders))
152 return 0;
153 if (glops->go_demote_ok)
154 return glops->go_demote_ok(gl);
155 return 1;
156}
157
158
159void gfs2_glock_add_to_lru(struct gfs2_glock *gl)
160{
161 spin_lock(&lru_lock);
162
163 if (!list_empty(&gl->gl_lru))
164 list_del_init(&gl->gl_lru);
165 else
166 atomic_inc(&lru_count);
167
168 list_add_tail(&gl->gl_lru, &lru_list);
169 set_bit(GLF_LRU, &gl->gl_flags);
170 spin_unlock(&lru_lock);
171}
172
173static void __gfs2_glock_remove_from_lru(struct gfs2_glock *gl)
174{
175 if (!list_empty(&gl->gl_lru)) {
176 list_del_init(&gl->gl_lru);
177 atomic_dec(&lru_count);
178 clear_bit(GLF_LRU, &gl->gl_flags);
179 }
180}
181
182static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl)
183{
184 spin_lock(&lru_lock);
185 __gfs2_glock_remove_from_lru(gl);
186 spin_unlock(&lru_lock);
187}
188
189
190
191
192
193
194
195
196
197void gfs2_glock_put_nolock(struct gfs2_glock *gl)
198{
199 if (atomic_dec_and_test(&gl->gl_ref))
200 GLOCK_BUG_ON(gl, 1);
201}
202
203
204
205
206
207
208
209void gfs2_glock_put(struct gfs2_glock *gl)
210{
211 struct gfs2_sbd *sdp = gl->gl_sbd;
212 struct address_space *mapping = gfs2_glock2aspace(gl);
213
214 if (atomic_dec_and_lock(&gl->gl_ref, &lru_lock)) {
215 __gfs2_glock_remove_from_lru(gl);
216 spin_unlock(&lru_lock);
217 spin_lock_bucket(gl->gl_hash);
218 hlist_bl_del_rcu(&gl->gl_list);
219 spin_unlock_bucket(gl->gl_hash);
220 GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders));
221 GLOCK_BUG_ON(gl, mapping && mapping->nrpages);
222 trace_gfs2_glock_put(gl);
223 sdp->sd_lockstruct.ls_ops->lm_put_lock(gl);
224 }
225}
226
227
228
229
230
231
232
233
234
235static struct gfs2_glock *search_bucket(unsigned int hash,
236 const struct gfs2_sbd *sdp,
237 const struct lm_lockname *name)
238{
239 struct gfs2_glock *gl;
240 struct hlist_bl_node *h;
241
242 hlist_bl_for_each_entry_rcu(gl, h, &gl_hash_table[hash], gl_list) {
243 if (!lm_name_equal(&gl->gl_name, name))
244 continue;
245 if (gl->gl_sbd != sdp)
246 continue;
247 if (atomic_inc_not_zero(&gl->gl_ref))
248 return gl;
249 }
250
251 return NULL;
252}
253
254
255
256
257
258
259
260
261
262static inline int may_grant(const struct gfs2_glock *gl, const struct gfs2_holder *gh)
263{
264 const struct gfs2_holder *gh_head = list_entry(gl->gl_holders.next, const struct gfs2_holder, gh_list);
265 if ((gh->gh_state == LM_ST_EXCLUSIVE ||
266 gh_head->gh_state == LM_ST_EXCLUSIVE) && gh != gh_head)
267 return 0;
268 if (gl->gl_state == gh->gh_state)
269 return 1;
270 if (gh->gh_flags & GL_EXACT)
271 return 0;
272 if (gl->gl_state == LM_ST_EXCLUSIVE) {
273 if (gh->gh_state == LM_ST_SHARED && gh_head->gh_state == LM_ST_SHARED)
274 return 1;
275 if (gh->gh_state == LM_ST_DEFERRED && gh_head->gh_state == LM_ST_DEFERRED)
276 return 1;
277 }
278 if (gl->gl_state != LM_ST_UNLOCKED && (gh->gh_flags & LM_FLAG_ANY))
279 return 1;
280 return 0;
281}
282
283static void gfs2_holder_wake(struct gfs2_holder *gh)
284{
285 clear_bit(HIF_WAIT, &gh->gh_iflags);
286 smp_mb__after_clear_bit();
287 wake_up_bit(&gh->gh_iflags, HIF_WAIT);
288}
289
290
291
292
293
294
295static inline void do_error(struct gfs2_glock *gl, const int ret)
296{
297 struct gfs2_holder *gh, *tmp;
298
299 list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
300 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
301 continue;
302 if (ret & LM_OUT_ERROR)
303 gh->gh_error = -EIO;
304 else if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))
305 gh->gh_error = GLR_TRYFAILED;
306 else
307 continue;
308 list_del_init(&gh->gh_list);
309 trace_gfs2_glock_queue(gh, 0);
310 gfs2_holder_wake(gh);
311 }
312}
313
314
315
316
317
318
319
320
321
322static int do_promote(struct gfs2_glock *gl)
323__releases(&gl->gl_spin)
324__acquires(&gl->gl_spin)
325{
326 const struct gfs2_glock_operations *glops = gl->gl_ops;
327 struct gfs2_holder *gh, *tmp;
328 int ret;
329
330restart:
331 list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
332 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
333 continue;
334 if (may_grant(gl, gh)) {
335 if (gh->gh_list.prev == &gl->gl_holders &&
336 glops->go_lock) {
337 spin_unlock(&gl->gl_spin);
338
339 ret = glops->go_lock(gh);
340 spin_lock(&gl->gl_spin);
341 if (ret) {
342 if (ret == 1)
343 return 2;
344 gh->gh_error = ret;
345 list_del_init(&gh->gh_list);
346 trace_gfs2_glock_queue(gh, 0);
347 gfs2_holder_wake(gh);
348 goto restart;
349 }
350 set_bit(HIF_HOLDER, &gh->gh_iflags);
351 trace_gfs2_promote(gh, 1);
352 gfs2_holder_wake(gh);
353 goto restart;
354 }
355 set_bit(HIF_HOLDER, &gh->gh_iflags);
356 trace_gfs2_promote(gh, 0);
357 gfs2_holder_wake(gh);
358 continue;
359 }
360 if (gh->gh_list.prev == &gl->gl_holders)
361 return 1;
362 do_error(gl, 0);
363 break;
364 }
365 return 0;
366}
367
368
369
370
371
372
373static inline struct gfs2_holder *find_first_waiter(const struct gfs2_glock *gl)
374{
375 struct gfs2_holder *gh;
376
377 list_for_each_entry(gh, &gl->gl_holders, gh_list) {
378 if (!test_bit(HIF_HOLDER, &gh->gh_iflags))
379 return gh;
380 }
381 return NULL;
382}
383
384
385
386
387
388
389
390
391static void state_change(struct gfs2_glock *gl, unsigned int new_state)
392{
393 int held1, held2;
394
395 held1 = (gl->gl_state != LM_ST_UNLOCKED);
396 held2 = (new_state != LM_ST_UNLOCKED);
397
398 if (held1 != held2) {
399 if (held2)
400 gfs2_glock_hold(gl);
401 else
402 gfs2_glock_put_nolock(gl);
403 }
404 if (held1 && held2 && list_empty(&gl->gl_holders))
405 clear_bit(GLF_QUEUED, &gl->gl_flags);
406
407 if (new_state != gl->gl_target)
408
409 gl->gl_hold_time = max(gl->gl_hold_time - GL_GLOCK_HOLD_DECR,
410 GL_GLOCK_MIN_HOLD);
411 gl->gl_state = new_state;
412 gl->gl_tchange = jiffies;
413}
414
415static void gfs2_demote_wake(struct gfs2_glock *gl)
416{
417 gl->gl_demote_state = LM_ST_EXCLUSIVE;
418 clear_bit(GLF_DEMOTE, &gl->gl_flags);
419 smp_mb__after_clear_bit();
420 wake_up_bit(&gl->gl_flags, GLF_DEMOTE);
421}
422
423
424
425
426
427
428
429
430static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
431{
432 const struct gfs2_glock_operations *glops = gl->gl_ops;
433 struct gfs2_holder *gh;
434 unsigned state = ret & LM_OUT_ST_MASK;
435 int rv;
436
437 spin_lock(&gl->gl_spin);
438 trace_gfs2_glock_state_change(gl, state);
439 state_change(gl, state);
440 gh = find_first_waiter(gl);
441
442
443 if (test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags) &&
444 state != LM_ST_UNLOCKED && gl->gl_demote_state == LM_ST_UNLOCKED)
445 gl->gl_target = LM_ST_UNLOCKED;
446
447
448 if (unlikely(state != gl->gl_target)) {
449 if (gh && !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) {
450
451 if (ret & LM_OUT_CANCELED) {
452 if ((gh->gh_flags & LM_FLAG_PRIORITY) == 0)
453 list_move_tail(&gh->gh_list, &gl->gl_holders);
454 gh = find_first_waiter(gl);
455 gl->gl_target = gh->gh_state;
456 goto retry;
457 }
458
459 if ((ret & LM_OUT_ERROR) ||
460 (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) {
461 gl->gl_target = gl->gl_state;
462 do_error(gl, ret);
463 goto out;
464 }
465 }
466 switch(state) {
467
468 case LM_ST_UNLOCKED:
469retry:
470 do_xmote(gl, gh, gl->gl_target);
471 break;
472
473 case LM_ST_SHARED:
474 case LM_ST_DEFERRED:
475 do_xmote(gl, gh, LM_ST_UNLOCKED);
476 break;
477 default:
478 printk(KERN_ERR "GFS2: wanted %u got %u\n", gl->gl_target, state);
479 GLOCK_BUG_ON(gl, 1);
480 }
481 spin_unlock(&gl->gl_spin);
482 return;
483 }
484
485
486 if (test_and_clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags))
487 gfs2_demote_wake(gl);
488 if (state != LM_ST_UNLOCKED) {
489 if (glops->go_xmote_bh) {
490 spin_unlock(&gl->gl_spin);
491 rv = glops->go_xmote_bh(gl, gh);
492 spin_lock(&gl->gl_spin);
493 if (rv) {
494 do_error(gl, rv);
495 goto out;
496 }
497 }
498 rv = do_promote(gl);
499 if (rv == 2)
500 goto out_locked;
501 }
502out:
503 clear_bit(GLF_LOCK, &gl->gl_flags);
504out_locked:
505 spin_unlock(&gl->gl_spin);
506}
507
508
509
510
511
512
513
514
515
516static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target)
517__releases(&gl->gl_spin)
518__acquires(&gl->gl_spin)
519{
520 const struct gfs2_glock_operations *glops = gl->gl_ops;
521 struct gfs2_sbd *sdp = gl->gl_sbd;
522 unsigned int lck_flags = gh ? gh->gh_flags : 0;
523 int ret;
524
525 lck_flags &= (LM_FLAG_TRY | LM_FLAG_TRY_1CB | LM_FLAG_NOEXP |
526 LM_FLAG_PRIORITY);
527 GLOCK_BUG_ON(gl, gl->gl_state == target);
528 GLOCK_BUG_ON(gl, gl->gl_state == gl->gl_target);
529 if ((target == LM_ST_UNLOCKED || target == LM_ST_DEFERRED) &&
530 glops->go_inval) {
531 set_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
532 do_error(gl, 0);
533 }
534 gl->gl_req = target;
535 set_bit(GLF_BLOCKING, &gl->gl_flags);
536 if ((gl->gl_req == LM_ST_UNLOCKED) ||
537 (gl->gl_state == LM_ST_EXCLUSIVE) ||
538 (lck_flags & (LM_FLAG_TRY|LM_FLAG_TRY_1CB)))
539 clear_bit(GLF_BLOCKING, &gl->gl_flags);
540 spin_unlock(&gl->gl_spin);
541 if (glops->go_sync)
542 glops->go_sync(gl);
543 if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
544 glops->go_inval(gl, target == LM_ST_DEFERRED ? 0 : DIO_METADATA);
545 clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
546
547 gfs2_glock_hold(gl);
548 if (sdp->sd_lockstruct.ls_ops->lm_lock) {
549
550 ret = sdp->sd_lockstruct.ls_ops->lm_lock(gl, target, lck_flags);
551 if (ret) {
552 printk(KERN_ERR "GFS2: lm_lock ret %d\n", ret);
553 GLOCK_BUG_ON(gl, 1);
554 }
555 } else {
556 finish_xmote(gl, target);
557 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
558 gfs2_glock_put(gl);
559 }
560
561 spin_lock(&gl->gl_spin);
562}
563
564
565
566
567
568
569static inline struct gfs2_holder *find_first_holder(const struct gfs2_glock *gl)
570{
571 struct gfs2_holder *gh;
572
573 if (!list_empty(&gl->gl_holders)) {
574 gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
575 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
576 return gh;
577 }
578 return NULL;
579}
580
581
582
583
584
585
586
587
588static void run_queue(struct gfs2_glock *gl, const int nonblock)
589__releases(&gl->gl_spin)
590__acquires(&gl->gl_spin)
591{
592 struct gfs2_holder *gh = NULL;
593 int ret;
594
595 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags))
596 return;
597
598 GLOCK_BUG_ON(gl, test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags));
599
600 if (test_bit(GLF_DEMOTE, &gl->gl_flags) &&
601 gl->gl_demote_state != gl->gl_state) {
602 if (find_first_holder(gl))
603 goto out_unlock;
604 if (nonblock)
605 goto out_sched;
606 set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
607 GLOCK_BUG_ON(gl, gl->gl_demote_state == LM_ST_EXCLUSIVE);
608 gl->gl_target = gl->gl_demote_state;
609 } else {
610 if (test_bit(GLF_DEMOTE, &gl->gl_flags))
611 gfs2_demote_wake(gl);
612 ret = do_promote(gl);
613 if (ret == 0)
614 goto out_unlock;
615 if (ret == 2)
616 goto out;
617 gh = find_first_waiter(gl);
618 gl->gl_target = gh->gh_state;
619 if (!(gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)))
620 do_error(gl, 0);
621 }
622 do_xmote(gl, gh, gl->gl_target);
623out:
624 return;
625
626out_sched:
627 clear_bit(GLF_LOCK, &gl->gl_flags);
628 smp_mb__after_clear_bit();
629 gfs2_glock_hold(gl);
630 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
631 gfs2_glock_put_nolock(gl);
632 return;
633
634out_unlock:
635 clear_bit(GLF_LOCK, &gl->gl_flags);
636 smp_mb__after_clear_bit();
637 return;
638}
639
640static void delete_work_func(struct work_struct *work)
641{
642 struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_delete);
643 struct gfs2_sbd *sdp = gl->gl_sbd;
644 struct gfs2_inode *ip;
645 struct inode *inode;
646 u64 no_addr = gl->gl_name.ln_number;
647
648 ip = gl->gl_object;
649
650
651 if (ip)
652 inode = gfs2_ilookup(sdp->sd_vfs, no_addr, 1);
653 else
654 inode = gfs2_lookup_by_inum(sdp, no_addr, NULL, GFS2_BLKST_UNLINKED);
655 if (inode && !IS_ERR(inode)) {
656 d_prune_aliases(inode);
657 iput(inode);
658 }
659 gfs2_glock_put(gl);
660}
661
662static void glock_work_func(struct work_struct *work)
663{
664 unsigned long delay = 0;
665 struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work);
666 int drop_ref = 0;
667
668 if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags)) {
669 finish_xmote(gl, gl->gl_reply);
670 drop_ref = 1;
671 }
672 spin_lock(&gl->gl_spin);
673 if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
674 gl->gl_state != LM_ST_UNLOCKED &&
675 gl->gl_demote_state != LM_ST_EXCLUSIVE) {
676 unsigned long holdtime, now = jiffies;
677
678 holdtime = gl->gl_tchange + gl->gl_hold_time;
679 if (time_before(now, holdtime))
680 delay = holdtime - now;
681
682 if (!delay) {
683 clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags);
684 set_bit(GLF_DEMOTE, &gl->gl_flags);
685 }
686 }
687 run_queue(gl, 0);
688 spin_unlock(&gl->gl_spin);
689 if (!delay)
690 gfs2_glock_put(gl);
691 else {
692 if (gl->gl_name.ln_type != LM_TYPE_INODE)
693 delay = 0;
694 if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
695 gfs2_glock_put(gl);
696 }
697 if (drop_ref)
698 gfs2_glock_put(gl);
699}
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
715 const struct gfs2_glock_operations *glops, int create,
716 struct gfs2_glock **glp)
717{
718 struct super_block *s = sdp->sd_vfs;
719 struct lm_lockname name = { .ln_number = number, .ln_type = glops->go_type };
720 struct gfs2_glock *gl, *tmp;
721 unsigned int hash = gl_hash(sdp, &name);
722 struct address_space *mapping;
723 struct kmem_cache *cachep;
724
725 rcu_read_lock();
726 gl = search_bucket(hash, sdp, &name);
727 rcu_read_unlock();
728
729 *glp = gl;
730 if (gl)
731 return 0;
732 if (!create)
733 return -ENOENT;
734
735 if (glops->go_flags & GLOF_ASPACE)
736 cachep = gfs2_glock_aspace_cachep;
737 else
738 cachep = gfs2_glock_cachep;
739 gl = kmem_cache_alloc(cachep, GFP_KERNEL);
740 if (!gl)
741 return -ENOMEM;
742
743 memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb));
744
745 if (glops->go_flags & GLOF_LVB) {
746 gl->gl_lksb.sb_lvbptr = kzalloc(GFS2_MIN_LVB_SIZE, GFP_KERNEL);
747 if (!gl->gl_lksb.sb_lvbptr) {
748 kmem_cache_free(cachep, gl);
749 return -ENOMEM;
750 }
751 }
752
753 atomic_inc(&sdp->sd_glock_disposal);
754 gl->gl_sbd = sdp;
755 gl->gl_flags = 0;
756 gl->gl_name = name;
757 atomic_set(&gl->gl_ref, 1);
758 gl->gl_state = LM_ST_UNLOCKED;
759 gl->gl_target = LM_ST_UNLOCKED;
760 gl->gl_demote_state = LM_ST_EXCLUSIVE;
761 gl->gl_hash = hash;
762 gl->gl_ops = glops;
763 gl->gl_dstamp = ktime_set(0, 0);
764 preempt_disable();
765
766 gl->gl_stats = this_cpu_ptr(sdp->sd_lkstats)->lkstats[glops->go_type];
767 preempt_enable();
768 gl->gl_stats.stats[GFS2_LKS_DCOUNT] = 0;
769 gl->gl_stats.stats[GFS2_LKS_QCOUNT] = 0;
770 gl->gl_tchange = jiffies;
771 gl->gl_object = NULL;
772 gl->gl_hold_time = GL_GLOCK_DFT_HOLD;
773 INIT_DELAYED_WORK(&gl->gl_work, glock_work_func);
774 INIT_WORK(&gl->gl_delete, delete_work_func);
775
776 mapping = gfs2_glock2aspace(gl);
777 if (mapping) {
778 mapping->a_ops = &gfs2_meta_aops;
779 mapping->host = s->s_bdev->bd_inode;
780 mapping->flags = 0;
781 mapping_set_gfp_mask(mapping, GFP_NOFS);
782 mapping->private_data = NULL;
783 mapping->backing_dev_info = s->s_bdi;
784 mapping->writeback_index = 0;
785 }
786
787 spin_lock_bucket(hash);
788 tmp = search_bucket(hash, sdp, &name);
789 if (tmp) {
790 spin_unlock_bucket(hash);
791 kfree(gl->gl_lksb.sb_lvbptr);
792 kmem_cache_free(cachep, gl);
793 atomic_dec(&sdp->sd_glock_disposal);
794 gl = tmp;
795 } else {
796 hlist_bl_add_head_rcu(&gl->gl_list, &gl_hash_table[hash]);
797 spin_unlock_bucket(hash);
798 }
799
800 *glp = gl;
801
802 return 0;
803}
804
805
806
807
808
809
810
811
812
813
814void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags,
815 struct gfs2_holder *gh)
816{
817 INIT_LIST_HEAD(&gh->gh_list);
818 gh->gh_gl = gl;
819 gh->gh_ip = (unsigned long)__builtin_return_address(0);
820 gh->gh_owner_pid = get_pid(task_pid(current));
821 gh->gh_state = state;
822 gh->gh_flags = flags;
823 gh->gh_error = 0;
824 gh->gh_iflags = 0;
825 gfs2_glock_hold(gl);
826}
827
828
829
830
831
832
833
834
835
836
837
838void gfs2_holder_reinit(unsigned int state, unsigned flags, struct gfs2_holder *gh)
839{
840 gh->gh_state = state;
841 gh->gh_flags = flags;
842 gh->gh_iflags = 0;
843 gh->gh_ip = (unsigned long)__builtin_return_address(0);
844 if (gh->gh_owner_pid)
845 put_pid(gh->gh_owner_pid);
846 gh->gh_owner_pid = get_pid(task_pid(current));
847}
848
849
850
851
852
853
854
855void gfs2_holder_uninit(struct gfs2_holder *gh)
856{
857 put_pid(gh->gh_owner_pid);
858 gfs2_glock_put(gh->gh_gl);
859 gh->gh_gl = NULL;
860 gh->gh_ip = 0;
861}
862
863
864
865
866
867
868
869
870
871
872static int gfs2_glock_holder_wait(void *word)
873{
874 schedule();
875 return 0;
876}
877
878static int gfs2_glock_demote_wait(void *word)
879{
880 schedule();
881 return 0;
882}
883
884
885
886
887
888
889
890
891int gfs2_glock_wait(struct gfs2_holder *gh)
892{
893 unsigned long time1 = jiffies;
894
895 might_sleep();
896 wait_on_bit(&gh->gh_iflags, HIF_WAIT, gfs2_glock_holder_wait, TASK_UNINTERRUPTIBLE);
897 if (time_after(jiffies, time1 + HZ))
898
899 gh->gh_gl->gl_hold_time = min(gh->gh_gl->gl_hold_time +
900 GL_GLOCK_HOLD_INCR,
901 GL_GLOCK_MAX_HOLD);
902 return gh->gh_error;
903}
904
905
906
907
908
909
910
911
912
913
914static void handle_callback(struct gfs2_glock *gl, unsigned int state,
915 unsigned long delay, bool remote)
916{
917 int bit = delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE;
918
919 set_bit(bit, &gl->gl_flags);
920 if (gl->gl_demote_state == LM_ST_EXCLUSIVE) {
921 gl->gl_demote_state = state;
922 gl->gl_demote_time = jiffies;
923 } else if (gl->gl_demote_state != LM_ST_UNLOCKED &&
924 gl->gl_demote_state != state) {
925 gl->gl_demote_state = LM_ST_UNLOCKED;
926 }
927 if (gl->gl_ops->go_callback)
928 gl->gl_ops->go_callback(gl, remote);
929 trace_gfs2_demote_rq(gl, remote);
930}
931
932void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...)
933{
934 struct va_format vaf;
935 va_list args;
936
937 va_start(args, fmt);
938
939 if (seq) {
940 seq_vprintf(seq, fmt, args);
941 } else {
942 vaf.fmt = fmt;
943 vaf.va = &args;
944
945 printk(KERN_ERR " %pV", &vaf);
946 }
947
948 va_end(args);
949}
950
951
952
953
954
955
956
957
958
959
960
961static inline void add_to_queue(struct gfs2_holder *gh)
962__releases(&gl->gl_spin)
963__acquires(&gl->gl_spin)
964{
965 struct gfs2_glock *gl = gh->gh_gl;
966 struct gfs2_sbd *sdp = gl->gl_sbd;
967 struct list_head *insert_pt = NULL;
968 struct gfs2_holder *gh2;
969 int try_futile = 0;
970
971 BUG_ON(gh->gh_owner_pid == NULL);
972 if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags))
973 BUG();
974
975 if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
976 if (test_bit(GLF_LOCK, &gl->gl_flags))
977 try_futile = !may_grant(gl, gh);
978 if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
979 goto fail;
980 }
981
982 list_for_each_entry(gh2, &gl->gl_holders, gh_list) {
983 if (unlikely(gh2->gh_owner_pid == gh->gh_owner_pid &&
984 (gh->gh_gl->gl_ops->go_type != LM_TYPE_FLOCK)))
985 goto trap_recursive;
986 if (try_futile &&
987 !(gh2->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) {
988fail:
989 gh->gh_error = GLR_TRYFAILED;
990 gfs2_holder_wake(gh);
991 return;
992 }
993 if (test_bit(HIF_HOLDER, &gh2->gh_iflags))
994 continue;
995 if (unlikely((gh->gh_flags & LM_FLAG_PRIORITY) && !insert_pt))
996 insert_pt = &gh2->gh_list;
997 }
998 set_bit(GLF_QUEUED, &gl->gl_flags);
999 trace_gfs2_glock_queue(gh, 1);
1000 gfs2_glstats_inc(gl, GFS2_LKS_QCOUNT);
1001 gfs2_sbstats_inc(gl, GFS2_LKS_QCOUNT);
1002 if (likely(insert_pt == NULL)) {
1003 list_add_tail(&gh->gh_list, &gl->gl_holders);
1004 if (unlikely(gh->gh_flags & LM_FLAG_PRIORITY))
1005 goto do_cancel;
1006 return;
1007 }
1008 list_add_tail(&gh->gh_list, insert_pt);
1009do_cancel:
1010 gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
1011 if (!(gh->gh_flags & LM_FLAG_PRIORITY)) {
1012 spin_unlock(&gl->gl_spin);
1013 if (sdp->sd_lockstruct.ls_ops->lm_cancel)
1014 sdp->sd_lockstruct.ls_ops->lm_cancel(gl);
1015 spin_lock(&gl->gl_spin);
1016 }
1017 return;
1018
1019trap_recursive:
1020 printk(KERN_ERR "original: %pSR\n", (void *)gh2->gh_ip);
1021 printk(KERN_ERR "pid: %d\n", pid_nr(gh2->gh_owner_pid));
1022 printk(KERN_ERR "lock type: %d req lock state : %d\n",
1023 gh2->gh_gl->gl_name.ln_type, gh2->gh_state);
1024 printk(KERN_ERR "new: %pSR\n", (void *)gh->gh_ip);
1025 printk(KERN_ERR "pid: %d\n", pid_nr(gh->gh_owner_pid));
1026 printk(KERN_ERR "lock type: %d req lock state : %d\n",
1027 gh->gh_gl->gl_name.ln_type, gh->gh_state);
1028 gfs2_dump_glock(NULL, gl);
1029 BUG();
1030}
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041int gfs2_glock_nq(struct gfs2_holder *gh)
1042{
1043 struct gfs2_glock *gl = gh->gh_gl;
1044 struct gfs2_sbd *sdp = gl->gl_sbd;
1045 int error = 0;
1046
1047 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
1048 return -EIO;
1049
1050 if (test_bit(GLF_LRU, &gl->gl_flags))
1051 gfs2_glock_remove_from_lru(gl);
1052
1053 spin_lock(&gl->gl_spin);
1054 add_to_queue(gh);
1055 if ((LM_FLAG_NOEXP & gh->gh_flags) &&
1056 test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))
1057 set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
1058 run_queue(gl, 1);
1059 spin_unlock(&gl->gl_spin);
1060
1061 if (!(gh->gh_flags & GL_ASYNC))
1062 error = gfs2_glock_wait(gh);
1063
1064 return error;
1065}
1066
1067
1068
1069
1070
1071
1072
1073
1074int gfs2_glock_poll(struct gfs2_holder *gh)
1075{
1076 return test_bit(HIF_WAIT, &gh->gh_iflags) ? 0 : 1;
1077}
1078
1079
1080
1081
1082
1083
1084
1085void gfs2_glock_dq(struct gfs2_holder *gh)
1086{
1087 struct gfs2_glock *gl = gh->gh_gl;
1088 const struct gfs2_glock_operations *glops = gl->gl_ops;
1089 unsigned delay = 0;
1090 int fast_path = 0;
1091
1092 spin_lock(&gl->gl_spin);
1093 if (gh->gh_flags & GL_NOCACHE)
1094 handle_callback(gl, LM_ST_UNLOCKED, 0, false);
1095
1096 list_del_init(&gh->gh_list);
1097 if (find_first_holder(gl) == NULL) {
1098 if (glops->go_unlock) {
1099 GLOCK_BUG_ON(gl, test_and_set_bit(GLF_LOCK, &gl->gl_flags));
1100 spin_unlock(&gl->gl_spin);
1101 glops->go_unlock(gh);
1102 spin_lock(&gl->gl_spin);
1103 clear_bit(GLF_LOCK, &gl->gl_flags);
1104 }
1105 if (list_empty(&gl->gl_holders) &&
1106 !test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
1107 !test_bit(GLF_DEMOTE, &gl->gl_flags))
1108 fast_path = 1;
1109 }
1110 if (!test_bit(GLF_LFLUSH, &gl->gl_flags) && demote_ok(gl))
1111 gfs2_glock_add_to_lru(gl);
1112
1113 trace_gfs2_glock_queue(gh, 0);
1114 spin_unlock(&gl->gl_spin);
1115 if (likely(fast_path))
1116 return;
1117
1118 gfs2_glock_hold(gl);
1119 if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
1120 !test_bit(GLF_DEMOTE, &gl->gl_flags) &&
1121 gl->gl_name.ln_type == LM_TYPE_INODE)
1122 delay = gl->gl_hold_time;
1123 if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
1124 gfs2_glock_put(gl);
1125}
1126
1127void gfs2_glock_dq_wait(struct gfs2_holder *gh)
1128{
1129 struct gfs2_glock *gl = gh->gh_gl;
1130 gfs2_glock_dq(gh);
1131 might_sleep();
1132 wait_on_bit(&gl->gl_flags, GLF_DEMOTE, gfs2_glock_demote_wait, TASK_UNINTERRUPTIBLE);
1133}
1134
1135
1136
1137
1138
1139
1140
1141void gfs2_glock_dq_uninit(struct gfs2_holder *gh)
1142{
1143 gfs2_glock_dq(gh);
1144 gfs2_holder_uninit(gh);
1145}
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159int gfs2_glock_nq_num(struct gfs2_sbd *sdp, u64 number,
1160 const struct gfs2_glock_operations *glops,
1161 unsigned int state, int flags, struct gfs2_holder *gh)
1162{
1163 struct gfs2_glock *gl;
1164 int error;
1165
1166 error = gfs2_glock_get(sdp, number, glops, CREATE, &gl);
1167 if (!error) {
1168 error = gfs2_glock_nq_init(gl, state, flags, gh);
1169 gfs2_glock_put(gl);
1170 }
1171
1172 return error;
1173}
1174
1175
1176
1177
1178
1179
1180
1181
1182static int glock_compare(const void *arg_a, const void *arg_b)
1183{
1184 const struct gfs2_holder *gh_a = *(const struct gfs2_holder **)arg_a;
1185 const struct gfs2_holder *gh_b = *(const struct gfs2_holder **)arg_b;
1186 const struct lm_lockname *a = &gh_a->gh_gl->gl_name;
1187 const struct lm_lockname *b = &gh_b->gh_gl->gl_name;
1188
1189 if (a->ln_number > b->ln_number)
1190 return 1;
1191 if (a->ln_number < b->ln_number)
1192 return -1;
1193 BUG_ON(gh_a->gh_gl->gl_ops->go_type == gh_b->gh_gl->gl_ops->go_type);
1194 return 0;
1195}
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206static int nq_m_sync(unsigned int num_gh, struct gfs2_holder *ghs,
1207 struct gfs2_holder **p)
1208{
1209 unsigned int x;
1210 int error = 0;
1211
1212 for (x = 0; x < num_gh; x++)
1213 p[x] = &ghs[x];
1214
1215 sort(p, num_gh, sizeof(struct gfs2_holder *), glock_compare, NULL);
1216
1217 for (x = 0; x < num_gh; x++) {
1218 p[x]->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1219
1220 error = gfs2_glock_nq(p[x]);
1221 if (error) {
1222 while (x--)
1223 gfs2_glock_dq(p[x]);
1224 break;
1225 }
1226 }
1227
1228 return error;
1229}
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1242{
1243 struct gfs2_holder *tmp[4];
1244 struct gfs2_holder **pph = tmp;
1245 int error = 0;
1246
1247 switch(num_gh) {
1248 case 0:
1249 return 0;
1250 case 1:
1251 ghs->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1252 return gfs2_glock_nq(ghs);
1253 default:
1254 if (num_gh <= 4)
1255 break;
1256 pph = kmalloc(num_gh * sizeof(struct gfs2_holder *), GFP_NOFS);
1257 if (!pph)
1258 return -ENOMEM;
1259 }
1260
1261 error = nq_m_sync(num_gh, ghs, pph);
1262
1263 if (pph != tmp)
1264 kfree(pph);
1265
1266 return error;
1267}
1268
1269
1270
1271
1272
1273
1274
1275
1276void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1277{
1278 while (num_gh--)
1279 gfs2_glock_dq(&ghs[num_gh]);
1280}
1281
1282void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state)
1283{
1284 unsigned long delay = 0;
1285 unsigned long holdtime;
1286 unsigned long now = jiffies;
1287
1288 gfs2_glock_hold(gl);
1289 holdtime = gl->gl_tchange + gl->gl_hold_time;
1290 if (test_bit(GLF_QUEUED, &gl->gl_flags) &&
1291 gl->gl_name.ln_type == LM_TYPE_INODE) {
1292 if (time_before(now, holdtime))
1293 delay = holdtime - now;
1294 if (test_bit(GLF_REPLY_PENDING, &gl->gl_flags))
1295 delay = gl->gl_hold_time;
1296 }
1297
1298 spin_lock(&gl->gl_spin);
1299 handle_callback(gl, state, delay, true);
1300 spin_unlock(&gl->gl_spin);
1301 if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
1302 gfs2_glock_put(gl);
1303}
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316static int gfs2_should_freeze(const struct gfs2_glock *gl)
1317{
1318 const struct gfs2_holder *gh;
1319
1320 if (gl->gl_reply & ~LM_OUT_ST_MASK)
1321 return 0;
1322 if (gl->gl_target == LM_ST_UNLOCKED)
1323 return 0;
1324
1325 list_for_each_entry(gh, &gl->gl_holders, gh_list) {
1326 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
1327 continue;
1328 if (LM_FLAG_NOEXP & gh->gh_flags)
1329 return 0;
1330 }
1331
1332 return 1;
1333}
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344void gfs2_glock_complete(struct gfs2_glock *gl, int ret)
1345{
1346 struct lm_lockstruct *ls = &gl->gl_sbd->sd_lockstruct;
1347
1348 spin_lock(&gl->gl_spin);
1349 gl->gl_reply = ret;
1350
1351 if (unlikely(test_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags))) {
1352 if (gfs2_should_freeze(gl)) {
1353 set_bit(GLF_FROZEN, &gl->gl_flags);
1354 spin_unlock(&gl->gl_spin);
1355 return;
1356 }
1357 }
1358
1359 spin_unlock(&gl->gl_spin);
1360 set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
1361 smp_wmb();
1362 gfs2_glock_hold(gl);
1363 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1364 gfs2_glock_put(gl);
1365}
1366
1367static int glock_cmp(void *priv, struct list_head *a, struct list_head *b)
1368{
1369 struct gfs2_glock *gla, *glb;
1370
1371 gla = list_entry(a, struct gfs2_glock, gl_lru);
1372 glb = list_entry(b, struct gfs2_glock, gl_lru);
1373
1374 if (gla->gl_name.ln_number > glb->gl_name.ln_number)
1375 return 1;
1376 if (gla->gl_name.ln_number < glb->gl_name.ln_number)
1377 return -1;
1378
1379 return 0;
1380}
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396static void gfs2_dispose_glock_lru(struct list_head *list)
1397__releases(&lru_lock)
1398__acquires(&lru_lock)
1399{
1400 struct gfs2_glock *gl;
1401
1402 list_sort(NULL, list, glock_cmp);
1403
1404 while(!list_empty(list)) {
1405 gl = list_entry(list->next, struct gfs2_glock, gl_lru);
1406 list_del_init(&gl->gl_lru);
1407 clear_bit(GLF_LRU, &gl->gl_flags);
1408 gfs2_glock_hold(gl);
1409 spin_unlock(&lru_lock);
1410 spin_lock(&gl->gl_spin);
1411 if (demote_ok(gl))
1412 handle_callback(gl, LM_ST_UNLOCKED, 0, false);
1413 WARN_ON(!test_and_clear_bit(GLF_LOCK, &gl->gl_flags));
1414 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1415 gfs2_glock_put_nolock(gl);
1416 spin_unlock(&gl->gl_spin);
1417 spin_lock(&lru_lock);
1418 }
1419}
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430static long gfs2_scan_glock_lru(int nr)
1431{
1432 struct gfs2_glock *gl;
1433 LIST_HEAD(skipped);
1434 LIST_HEAD(dispose);
1435 long freed = 0;
1436
1437 spin_lock(&lru_lock);
1438 while ((nr-- >= 0) && !list_empty(&lru_list)) {
1439 gl = list_entry(lru_list.next, struct gfs2_glock, gl_lru);
1440
1441
1442 if (!test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
1443 list_move(&gl->gl_lru, &dispose);
1444 atomic_dec(&lru_count);
1445 freed++;
1446 continue;
1447 }
1448
1449 list_move(&gl->gl_lru, &skipped);
1450 }
1451 list_splice(&skipped, &lru_list);
1452 if (!list_empty(&dispose))
1453 gfs2_dispose_glock_lru(&dispose);
1454 spin_unlock(&lru_lock);
1455
1456 return freed;
1457}
1458
1459static unsigned long gfs2_glock_shrink_scan(struct shrinker *shrink,
1460 struct shrink_control *sc)
1461{
1462 if (!(sc->gfp_mask & __GFP_FS))
1463 return SHRINK_STOP;
1464 return gfs2_scan_glock_lru(sc->nr_to_scan);
1465}
1466
1467static unsigned long gfs2_glock_shrink_count(struct shrinker *shrink,
1468 struct shrink_control *sc)
1469{
1470 return vfs_pressure_ratio(atomic_read(&lru_count));
1471}
1472
1473static struct shrinker glock_shrinker = {
1474 .seeks = DEFAULT_SEEKS,
1475 .count_objects = gfs2_glock_shrink_count,
1476 .scan_objects = gfs2_glock_shrink_scan,
1477};
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487static void examine_bucket(glock_examiner examiner, const struct gfs2_sbd *sdp,
1488 unsigned int hash)
1489{
1490 struct gfs2_glock *gl;
1491 struct hlist_bl_head *head = &gl_hash_table[hash];
1492 struct hlist_bl_node *pos;
1493
1494 rcu_read_lock();
1495 hlist_bl_for_each_entry_rcu(gl, pos, head, gl_list) {
1496 if ((gl->gl_sbd == sdp) && atomic_inc_not_zero(&gl->gl_ref))
1497 examiner(gl);
1498 }
1499 rcu_read_unlock();
1500 cond_resched();
1501}
1502
1503static void glock_hash_walk(glock_examiner examiner, const struct gfs2_sbd *sdp)
1504{
1505 unsigned x;
1506
1507 for (x = 0; x < GFS2_GL_HASH_SIZE; x++)
1508 examine_bucket(examiner, sdp, x);
1509}
1510
1511
1512
1513
1514
1515
1516
1517
1518static void thaw_glock(struct gfs2_glock *gl)
1519{
1520 if (!test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))
1521 goto out;
1522 set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
1523 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) {
1524out:
1525 gfs2_glock_put(gl);
1526 }
1527}
1528
1529
1530
1531
1532
1533
1534
1535static void clear_glock(struct gfs2_glock *gl)
1536{
1537 gfs2_glock_remove_from_lru(gl);
1538
1539 spin_lock(&gl->gl_spin);
1540 if (gl->gl_state != LM_ST_UNLOCKED)
1541 handle_callback(gl, LM_ST_UNLOCKED, 0, false);
1542 spin_unlock(&gl->gl_spin);
1543 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1544 gfs2_glock_put(gl);
1545}
1546
1547
1548
1549
1550
1551
1552
1553void gfs2_glock_thaw(struct gfs2_sbd *sdp)
1554{
1555 glock_hash_walk(thaw_glock, sdp);
1556}
1557
1558static int dump_glock(struct seq_file *seq, struct gfs2_glock *gl)
1559{
1560 int ret;
1561 spin_lock(&gl->gl_spin);
1562 ret = gfs2_dump_glock(seq, gl);
1563 spin_unlock(&gl->gl_spin);
1564 return ret;
1565}
1566
1567static void dump_glock_func(struct gfs2_glock *gl)
1568{
1569 dump_glock(NULL, gl);
1570}
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580void gfs2_gl_hash_clear(struct gfs2_sbd *sdp)
1581{
1582 set_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags);
1583 flush_workqueue(glock_workqueue);
1584 glock_hash_walk(clear_glock, sdp);
1585 flush_workqueue(glock_workqueue);
1586 wait_event(sdp->sd_glock_wait, atomic_read(&sdp->sd_glock_disposal) == 0);
1587 glock_hash_walk(dump_glock_func, sdp);
1588}
1589
1590void gfs2_glock_finish_truncate(struct gfs2_inode *ip)
1591{
1592 struct gfs2_glock *gl = ip->i_gl;
1593 int ret;
1594
1595 ret = gfs2_truncatei_resume(ip);
1596 gfs2_assert_withdraw(gl->gl_sbd, ret == 0);
1597
1598 spin_lock(&gl->gl_spin);
1599 clear_bit(GLF_LOCK, &gl->gl_flags);
1600 run_queue(gl, 1);
1601 spin_unlock(&gl->gl_spin);
1602}
1603
1604static const char *state2str(unsigned state)
1605{
1606 switch(state) {
1607 case LM_ST_UNLOCKED:
1608 return "UN";
1609 case LM_ST_SHARED:
1610 return "SH";
1611 case LM_ST_DEFERRED:
1612 return "DF";
1613 case LM_ST_EXCLUSIVE:
1614 return "EX";
1615 }
1616 return "??";
1617}
1618
1619static const char *hflags2str(char *buf, unsigned flags, unsigned long iflags)
1620{
1621 char *p = buf;
1622 if (flags & LM_FLAG_TRY)
1623 *p++ = 't';
1624 if (flags & LM_FLAG_TRY_1CB)
1625 *p++ = 'T';
1626 if (flags & LM_FLAG_NOEXP)
1627 *p++ = 'e';
1628 if (flags & LM_FLAG_ANY)
1629 *p++ = 'A';
1630 if (flags & LM_FLAG_PRIORITY)
1631 *p++ = 'p';
1632 if (flags & GL_ASYNC)
1633 *p++ = 'a';
1634 if (flags & GL_EXACT)
1635 *p++ = 'E';
1636 if (flags & GL_NOCACHE)
1637 *p++ = 'c';
1638 if (test_bit(HIF_HOLDER, &iflags))
1639 *p++ = 'H';
1640 if (test_bit(HIF_WAIT, &iflags))
1641 *p++ = 'W';
1642 if (test_bit(HIF_FIRST, &iflags))
1643 *p++ = 'F';
1644 *p = 0;
1645 return buf;
1646}
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656static int dump_holder(struct seq_file *seq, const struct gfs2_holder *gh)
1657{
1658 struct task_struct *gh_owner = NULL;
1659 char flags_buf[32];
1660
1661 if (gh->gh_owner_pid)
1662 gh_owner = pid_task(gh->gh_owner_pid, PIDTYPE_PID);
1663 gfs2_print_dbg(seq, " H: s:%s f:%s e:%d p:%ld [%s] %pS\n",
1664 state2str(gh->gh_state),
1665 hflags2str(flags_buf, gh->gh_flags, gh->gh_iflags),
1666 gh->gh_error,
1667 gh->gh_owner_pid ? (long)pid_nr(gh->gh_owner_pid) : -1,
1668 gh_owner ? gh_owner->comm : "(ended)",
1669 (void *)gh->gh_ip);
1670 return 0;
1671}
1672
1673static const char *gflags2str(char *buf, const struct gfs2_glock *gl)
1674{
1675 const unsigned long *gflags = &gl->gl_flags;
1676 char *p = buf;
1677
1678 if (test_bit(GLF_LOCK, gflags))
1679 *p++ = 'l';
1680 if (test_bit(GLF_DEMOTE, gflags))
1681 *p++ = 'D';
1682 if (test_bit(GLF_PENDING_DEMOTE, gflags))
1683 *p++ = 'd';
1684 if (test_bit(GLF_DEMOTE_IN_PROGRESS, gflags))
1685 *p++ = 'p';
1686 if (test_bit(GLF_DIRTY, gflags))
1687 *p++ = 'y';
1688 if (test_bit(GLF_LFLUSH, gflags))
1689 *p++ = 'f';
1690 if (test_bit(GLF_INVALIDATE_IN_PROGRESS, gflags))
1691 *p++ = 'i';
1692 if (test_bit(GLF_REPLY_PENDING, gflags))
1693 *p++ = 'r';
1694 if (test_bit(GLF_INITIAL, gflags))
1695 *p++ = 'I';
1696 if (test_bit(GLF_FROZEN, gflags))
1697 *p++ = 'F';
1698 if (test_bit(GLF_QUEUED, gflags))
1699 *p++ = 'q';
1700 if (test_bit(GLF_LRU, gflags))
1701 *p++ = 'L';
1702 if (gl->gl_object)
1703 *p++ = 'o';
1704 if (test_bit(GLF_BLOCKING, gflags))
1705 *p++ = 'b';
1706 *p = 0;
1707 return buf;
1708}
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728int gfs2_dump_glock(struct seq_file *seq, const struct gfs2_glock *gl)
1729{
1730 const struct gfs2_glock_operations *glops = gl->gl_ops;
1731 unsigned long long dtime;
1732 const struct gfs2_holder *gh;
1733 char gflags_buf[32];
1734 int error = 0;
1735
1736 dtime = jiffies - gl->gl_demote_time;
1737 dtime *= 1000000/HZ;
1738 if (!test_bit(GLF_DEMOTE, &gl->gl_flags))
1739 dtime = 0;
1740 gfs2_print_dbg(seq, "G: s:%s n:%u/%llx f:%s t:%s d:%s/%llu a:%d v:%d r:%d m:%ld\n",
1741 state2str(gl->gl_state),
1742 gl->gl_name.ln_type,
1743 (unsigned long long)gl->gl_name.ln_number,
1744 gflags2str(gflags_buf, gl),
1745 state2str(gl->gl_target),
1746 state2str(gl->gl_demote_state), dtime,
1747 atomic_read(&gl->gl_ail_count),
1748 atomic_read(&gl->gl_revokes),
1749 atomic_read(&gl->gl_ref), gl->gl_hold_time);
1750
1751 list_for_each_entry(gh, &gl->gl_holders, gh_list) {
1752 error = dump_holder(seq, gh);
1753 if (error)
1754 goto out;
1755 }
1756 if (gl->gl_state != LM_ST_UNLOCKED && glops->go_dump)
1757 error = glops->go_dump(seq, gl);
1758out:
1759 return error;
1760}
1761
1762static int gfs2_glstats_seq_show(struct seq_file *seq, void *iter_ptr)
1763{
1764 struct gfs2_glock *gl = iter_ptr;
1765
1766 seq_printf(seq, "G: n:%u/%llx rtt:%lld/%lld rttb:%lld/%lld irt:%lld/%lld dcnt: %lld qcnt: %lld\n",
1767 gl->gl_name.ln_type,
1768 (unsigned long long)gl->gl_name.ln_number,
1769 (long long)gl->gl_stats.stats[GFS2_LKS_SRTT],
1770 (long long)gl->gl_stats.stats[GFS2_LKS_SRTTVAR],
1771 (long long)gl->gl_stats.stats[GFS2_LKS_SRTTB],
1772 (long long)gl->gl_stats.stats[GFS2_LKS_SRTTVARB],
1773 (long long)gl->gl_stats.stats[GFS2_LKS_SIRT],
1774 (long long)gl->gl_stats.stats[GFS2_LKS_SIRTVAR],
1775 (long long)gl->gl_stats.stats[GFS2_LKS_DCOUNT],
1776 (long long)gl->gl_stats.stats[GFS2_LKS_QCOUNT]);
1777 return 0;
1778}
1779
1780static const char *gfs2_gltype[] = {
1781 "type",
1782 "reserved",
1783 "nondisk",
1784 "inode",
1785 "rgrp",
1786 "meta",
1787 "iopen",
1788 "flock",
1789 "plock",
1790 "quota",
1791 "journal",
1792};
1793
1794static const char *gfs2_stype[] = {
1795 [GFS2_LKS_SRTT] = "srtt",
1796 [GFS2_LKS_SRTTVAR] = "srttvar",
1797 [GFS2_LKS_SRTTB] = "srttb",
1798 [GFS2_LKS_SRTTVARB] = "srttvarb",
1799 [GFS2_LKS_SIRT] = "sirt",
1800 [GFS2_LKS_SIRTVAR] = "sirtvar",
1801 [GFS2_LKS_DCOUNT] = "dlm",
1802 [GFS2_LKS_QCOUNT] = "queue",
1803};
1804
1805#define GFS2_NR_SBSTATS (ARRAY_SIZE(gfs2_gltype) * ARRAY_SIZE(gfs2_stype))
1806
1807static int gfs2_sbstats_seq_show(struct seq_file *seq, void *iter_ptr)
1808{
1809 struct gfs2_glock_iter *gi = seq->private;
1810 struct gfs2_sbd *sdp = gi->sdp;
1811 unsigned index = gi->hash >> 3;
1812 unsigned subindex = gi->hash & 0x07;
1813 s64 value;
1814 int i;
1815
1816 if (index == 0 && subindex != 0)
1817 return 0;
1818
1819 seq_printf(seq, "%-10s %8s:", gfs2_gltype[index],
1820 (index == 0) ? "cpu": gfs2_stype[subindex]);
1821
1822 for_each_possible_cpu(i) {
1823 const struct gfs2_pcpu_lkstats *lkstats = per_cpu_ptr(sdp->sd_lkstats, i);
1824 if (index == 0) {
1825 value = i;
1826 } else {
1827 value = lkstats->lkstats[index - 1].stats[subindex];
1828 }
1829 seq_printf(seq, " %15lld", (long long)value);
1830 }
1831 seq_putc(seq, '\n');
1832 return 0;
1833}
1834
1835int __init gfs2_glock_init(void)
1836{
1837 unsigned i;
1838 for(i = 0; i < GFS2_GL_HASH_SIZE; i++) {
1839 INIT_HLIST_BL_HEAD(&gl_hash_table[i]);
1840 }
1841
1842 glock_workqueue = alloc_workqueue("glock_workqueue", WQ_MEM_RECLAIM |
1843 WQ_HIGHPRI | WQ_FREEZABLE, 0);
1844 if (!glock_workqueue)
1845 return -ENOMEM;
1846 gfs2_delete_workqueue = alloc_workqueue("delete_workqueue",
1847 WQ_MEM_RECLAIM | WQ_FREEZABLE,
1848 0);
1849 if (!gfs2_delete_workqueue) {
1850 destroy_workqueue(glock_workqueue);
1851 return -ENOMEM;
1852 }
1853
1854 register_shrinker(&glock_shrinker);
1855
1856 return 0;
1857}
1858
1859void gfs2_glock_exit(void)
1860{
1861 unregister_shrinker(&glock_shrinker);
1862 destroy_workqueue(glock_workqueue);
1863 destroy_workqueue(gfs2_delete_workqueue);
1864}
1865
1866static inline struct gfs2_glock *glock_hash_chain(unsigned hash)
1867{
1868 return hlist_bl_entry(hlist_bl_first_rcu(&gl_hash_table[hash]),
1869 struct gfs2_glock, gl_list);
1870}
1871
1872static inline struct gfs2_glock *glock_hash_next(struct gfs2_glock *gl)
1873{
1874 return hlist_bl_entry(rcu_dereference(gl->gl_list.next),
1875 struct gfs2_glock, gl_list);
1876}
1877
1878static int gfs2_glock_iter_next(struct gfs2_glock_iter *gi)
1879{
1880 struct gfs2_glock *gl;
1881
1882 do {
1883 gl = gi->gl;
1884 if (gl) {
1885 gi->gl = glock_hash_next(gl);
1886 gi->nhash++;
1887 } else {
1888 if (gi->hash >= GFS2_GL_HASH_SIZE) {
1889 rcu_read_unlock();
1890 return 1;
1891 }
1892 gi->gl = glock_hash_chain(gi->hash);
1893 gi->nhash = 0;
1894 }
1895 while (gi->gl == NULL) {
1896 gi->hash++;
1897 if (gi->hash >= GFS2_GL_HASH_SIZE) {
1898 rcu_read_unlock();
1899 return 1;
1900 }
1901 gi->gl = glock_hash_chain(gi->hash);
1902 gi->nhash = 0;
1903 }
1904
1905 } while (gi->sdp != gi->gl->gl_sbd || atomic_read(&gi->gl->gl_ref) == 0);
1906
1907 return 0;
1908}
1909
1910static void *gfs2_glock_seq_start(struct seq_file *seq, loff_t *pos)
1911{
1912 struct gfs2_glock_iter *gi = seq->private;
1913 loff_t n = *pos;
1914
1915 if (gi->last_pos <= *pos)
1916 n = gi->nhash + (*pos - gi->last_pos);
1917 else
1918 gi->hash = 0;
1919
1920 gi->nhash = 0;
1921 rcu_read_lock();
1922
1923 do {
1924 if (gfs2_glock_iter_next(gi))
1925 return NULL;
1926 } while (n--);
1927
1928 gi->last_pos = *pos;
1929 return gi->gl;
1930}
1931
1932static void *gfs2_glock_seq_next(struct seq_file *seq, void *iter_ptr,
1933 loff_t *pos)
1934{
1935 struct gfs2_glock_iter *gi = seq->private;
1936
1937 (*pos)++;
1938 gi->last_pos = *pos;
1939 if (gfs2_glock_iter_next(gi))
1940 return NULL;
1941
1942 return gi->gl;
1943}
1944
1945static void gfs2_glock_seq_stop(struct seq_file *seq, void *iter_ptr)
1946{
1947 struct gfs2_glock_iter *gi = seq->private;
1948
1949 if (gi->gl)
1950 rcu_read_unlock();
1951 gi->gl = NULL;
1952}
1953
1954static int gfs2_glock_seq_show(struct seq_file *seq, void *iter_ptr)
1955{
1956 return dump_glock(seq, iter_ptr);
1957}
1958
1959static void *gfs2_sbstats_seq_start(struct seq_file *seq, loff_t *pos)
1960{
1961 struct gfs2_glock_iter *gi = seq->private;
1962
1963 gi->hash = *pos;
1964 if (*pos >= GFS2_NR_SBSTATS)
1965 return NULL;
1966 preempt_disable();
1967 return SEQ_START_TOKEN;
1968}
1969
1970static void *gfs2_sbstats_seq_next(struct seq_file *seq, void *iter_ptr,
1971 loff_t *pos)
1972{
1973 struct gfs2_glock_iter *gi = seq->private;
1974 (*pos)++;
1975 gi->hash++;
1976 if (gi->hash >= GFS2_NR_SBSTATS) {
1977 preempt_enable();
1978 return NULL;
1979 }
1980 return SEQ_START_TOKEN;
1981}
1982
1983static void gfs2_sbstats_seq_stop(struct seq_file *seq, void *iter_ptr)
1984{
1985 preempt_enable();
1986}
1987
1988static const struct seq_operations gfs2_glock_seq_ops = {
1989 .start = gfs2_glock_seq_start,
1990 .next = gfs2_glock_seq_next,
1991 .stop = gfs2_glock_seq_stop,
1992 .show = gfs2_glock_seq_show,
1993};
1994
1995static const struct seq_operations gfs2_glstats_seq_ops = {
1996 .start = gfs2_glock_seq_start,
1997 .next = gfs2_glock_seq_next,
1998 .stop = gfs2_glock_seq_stop,
1999 .show = gfs2_glstats_seq_show,
2000};
2001
2002static const struct seq_operations gfs2_sbstats_seq_ops = {
2003 .start = gfs2_sbstats_seq_start,
2004 .next = gfs2_sbstats_seq_next,
2005 .stop = gfs2_sbstats_seq_stop,
2006 .show = gfs2_sbstats_seq_show,
2007};
2008
2009#define GFS2_SEQ_GOODSIZE min(PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER, 65536UL)
2010
2011static int gfs2_glocks_open(struct inode *inode, struct file *file)
2012{
2013 int ret = seq_open_private(file, &gfs2_glock_seq_ops,
2014 sizeof(struct gfs2_glock_iter));
2015 if (ret == 0) {
2016 struct seq_file *seq = file->private_data;
2017 struct gfs2_glock_iter *gi = seq->private;
2018 gi->sdp = inode->i_private;
2019 seq->buf = kmalloc(GFS2_SEQ_GOODSIZE, GFP_KERNEL | __GFP_NOWARN);
2020 if (seq->buf)
2021 seq->size = GFS2_SEQ_GOODSIZE;
2022 }
2023 return ret;
2024}
2025
2026static int gfs2_glstats_open(struct inode *inode, struct file *file)
2027{
2028 int ret = seq_open_private(file, &gfs2_glstats_seq_ops,
2029 sizeof(struct gfs2_glock_iter));
2030 if (ret == 0) {
2031 struct seq_file *seq = file->private_data;
2032 struct gfs2_glock_iter *gi = seq->private;
2033 gi->sdp = inode->i_private;
2034 seq->buf = kmalloc(GFS2_SEQ_GOODSIZE, GFP_KERNEL | __GFP_NOWARN);
2035 if (seq->buf)
2036 seq->size = GFS2_SEQ_GOODSIZE;
2037 }
2038 return ret;
2039}
2040
2041static int gfs2_sbstats_open(struct inode *inode, struct file *file)
2042{
2043 int ret = seq_open_private(file, &gfs2_sbstats_seq_ops,
2044 sizeof(struct gfs2_glock_iter));
2045 if (ret == 0) {
2046 struct seq_file *seq = file->private_data;
2047 struct gfs2_glock_iter *gi = seq->private;
2048 gi->sdp = inode->i_private;
2049 }
2050 return ret;
2051}
2052
2053static const struct file_operations gfs2_glocks_fops = {
2054 .owner = THIS_MODULE,
2055 .open = gfs2_glocks_open,
2056 .read = seq_read,
2057 .llseek = seq_lseek,
2058 .release = seq_release_private,
2059};
2060
2061static const struct file_operations gfs2_glstats_fops = {
2062 .owner = THIS_MODULE,
2063 .open = gfs2_glstats_open,
2064 .read = seq_read,
2065 .llseek = seq_lseek,
2066 .release = seq_release_private,
2067};
2068
2069static const struct file_operations gfs2_sbstats_fops = {
2070 .owner = THIS_MODULE,
2071 .open = gfs2_sbstats_open,
2072 .read = seq_read,
2073 .llseek = seq_lseek,
2074 .release = seq_release_private,
2075};
2076
2077int gfs2_create_debugfs_file(struct gfs2_sbd *sdp)
2078{
2079 sdp->debugfs_dir = debugfs_create_dir(sdp->sd_table_name, gfs2_root);
2080 if (!sdp->debugfs_dir)
2081 return -ENOMEM;
2082 sdp->debugfs_dentry_glocks = debugfs_create_file("glocks",
2083 S_IFREG | S_IRUGO,
2084 sdp->debugfs_dir, sdp,
2085 &gfs2_glocks_fops);
2086 if (!sdp->debugfs_dentry_glocks)
2087 goto fail;
2088
2089 sdp->debugfs_dentry_glstats = debugfs_create_file("glstats",
2090 S_IFREG | S_IRUGO,
2091 sdp->debugfs_dir, sdp,
2092 &gfs2_glstats_fops);
2093 if (!sdp->debugfs_dentry_glstats)
2094 goto fail;
2095
2096 sdp->debugfs_dentry_sbstats = debugfs_create_file("sbstats",
2097 S_IFREG | S_IRUGO,
2098 sdp->debugfs_dir, sdp,
2099 &gfs2_sbstats_fops);
2100 if (!sdp->debugfs_dentry_sbstats)
2101 goto fail;
2102
2103 return 0;
2104fail:
2105 gfs2_delete_debugfs_file(sdp);
2106 return -ENOMEM;
2107}
2108
2109void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp)
2110{
2111 if (sdp->debugfs_dir) {
2112 if (sdp->debugfs_dentry_glocks) {
2113 debugfs_remove(sdp->debugfs_dentry_glocks);
2114 sdp->debugfs_dentry_glocks = NULL;
2115 }
2116 if (sdp->debugfs_dentry_glstats) {
2117 debugfs_remove(sdp->debugfs_dentry_glstats);
2118 sdp->debugfs_dentry_glstats = NULL;
2119 }
2120 if (sdp->debugfs_dentry_sbstats) {
2121 debugfs_remove(sdp->debugfs_dentry_sbstats);
2122 sdp->debugfs_dentry_sbstats = NULL;
2123 }
2124 debugfs_remove(sdp->debugfs_dir);
2125 sdp->debugfs_dir = NULL;
2126 }
2127}
2128
2129int gfs2_register_debugfs(void)
2130{
2131 gfs2_root = debugfs_create_dir("gfs2", NULL);
2132 return gfs2_root ? 0 : -ENOMEM;
2133}
2134
2135void gfs2_unregister_debugfs(void)
2136{
2137 debugfs_remove(gfs2_root);
2138 gfs2_root = NULL;
2139}
2140