1
2
3
4
5
6
7
8
9
10#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11
12#include <linux/sched.h>
13#include <linux/slab.h>
14#include <linux/spinlock.h>
15#include <linux/buffer_head.h>
16#include <linux/delay.h>
17#include <linux/sort.h>
18#include <linux/hash.h>
19#include <linux/jhash.h>
20#include <linux/kallsyms.h>
21#include <linux/gfs2_ondisk.h>
22#include <linux/list.h>
23#include <linux/wait.h>
24#include <linux/module.h>
25#include <linux/uaccess.h>
26#include <linux/seq_file.h>
27#include <linux/debugfs.h>
28#include <linux/kthread.h>
29#include <linux/freezer.h>
30#include <linux/workqueue.h>
31#include <linux/jiffies.h>
32#include <linux/rcupdate.h>
33#include <linux/rculist_bl.h>
34#include <linux/bit_spinlock.h>
35#include <linux/percpu.h>
36#include <linux/list_sort.h>
37#include <linux/lockref.h>
38#include <linux/rhashtable.h>
39
40#include "gfs2.h"
41#include "incore.h"
42#include "glock.h"
43#include "glops.h"
44#include "inode.h"
45#include "lops.h"
46#include "meta_io.h"
47#include "quota.h"
48#include "super.h"
49#include "util.h"
50#include "bmap.h"
51#define CREATE_TRACE_POINTS
52#include "trace_gfs2.h"
53
54struct gfs2_glock_iter {
55 struct gfs2_sbd *sdp;
56 struct rhashtable_iter hti;
57 struct gfs2_glock *gl;
58 loff_t last_pos;
59};
60
61typedef void (*glock_examiner) (struct gfs2_glock * gl);
62
63static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target);
64
65static struct dentry *gfs2_root;
66static struct workqueue_struct *glock_workqueue;
67struct workqueue_struct *gfs2_delete_workqueue;
68static LIST_HEAD(lru_list);
69static atomic_t lru_count = ATOMIC_INIT(0);
70static DEFINE_SPINLOCK(lru_lock);
71
72#define GFS2_GL_HASH_SHIFT 15
73#define GFS2_GL_HASH_SIZE BIT(GFS2_GL_HASH_SHIFT)
74
75static const struct rhashtable_params ht_parms = {
76 .nelem_hint = GFS2_GL_HASH_SIZE * 3 / 4,
77 .key_len = offsetofend(struct lm_lockname, ln_type),
78 .key_offset = offsetof(struct gfs2_glock, gl_name),
79 .head_offset = offsetof(struct gfs2_glock, gl_node),
80};
81
82static struct rhashtable gl_hash_table;
83
84#define GLOCK_WAIT_TABLE_BITS 12
85#define GLOCK_WAIT_TABLE_SIZE (1 << GLOCK_WAIT_TABLE_BITS)
86static wait_queue_head_t glock_wait_table[GLOCK_WAIT_TABLE_SIZE] __cacheline_aligned;
87
88struct wait_glock_queue {
89 struct lm_lockname *name;
90 wait_queue_entry_t wait;
91};
92
93static int glock_wake_function(wait_queue_entry_t *wait, unsigned int mode,
94 int sync, void *key)
95{
96 struct wait_glock_queue *wait_glock =
97 container_of(wait, struct wait_glock_queue, wait);
98 struct lm_lockname *wait_name = wait_glock->name;
99 struct lm_lockname *wake_name = key;
100
101 if (wake_name->ln_sbd != wait_name->ln_sbd ||
102 wake_name->ln_number != wait_name->ln_number ||
103 wake_name->ln_type != wait_name->ln_type)
104 return 0;
105 return autoremove_wake_function(wait, mode, sync, key);
106}
107
108static wait_queue_head_t *glock_waitqueue(struct lm_lockname *name)
109{
110 u32 hash = jhash2((u32 *)name, ht_parms.key_len / 4, 0);
111
112 return glock_wait_table + hash_32(hash, GLOCK_WAIT_TABLE_BITS);
113}
114
115
116
117
118
119static void wake_up_glock(struct gfs2_glock *gl)
120{
121 wait_queue_head_t *wq = glock_waitqueue(&gl->gl_name);
122
123 if (waitqueue_active(wq))
124 __wake_up(wq, TASK_NORMAL, 1, &gl->gl_name);
125}
126
127static void gfs2_glock_dealloc(struct rcu_head *rcu)
128{
129 struct gfs2_glock *gl = container_of(rcu, struct gfs2_glock, gl_rcu);
130
131 if (gl->gl_ops->go_flags & GLOF_ASPACE) {
132 kmem_cache_free(gfs2_glock_aspace_cachep, gl);
133 } else {
134 kfree(gl->gl_lksb.sb_lvbptr);
135 kmem_cache_free(gfs2_glock_cachep, gl);
136 }
137}
138
139void gfs2_glock_free(struct gfs2_glock *gl)
140{
141 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
142
143 rhashtable_remove_fast(&gl_hash_table, &gl->gl_node, ht_parms);
144 smp_mb();
145 wake_up_glock(gl);
146 call_rcu(&gl->gl_rcu, gfs2_glock_dealloc);
147 if (atomic_dec_and_test(&sdp->sd_glock_disposal))
148 wake_up(&sdp->sd_glock_wait);
149}
150
151
152
153
154
155
156
157void gfs2_glock_hold(struct gfs2_glock *gl)
158{
159 GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref));
160 lockref_get(&gl->gl_lockref);
161}
162
163
164
165
166
167
168
169
170static int demote_ok(const struct gfs2_glock *gl)
171{
172 const struct gfs2_glock_operations *glops = gl->gl_ops;
173
174 if (gl->gl_state == LM_ST_UNLOCKED)
175 return 0;
176 if (!list_empty(&gl->gl_holders))
177 return 0;
178 if (glops->go_demote_ok)
179 return glops->go_demote_ok(gl);
180 return 1;
181}
182
183
184void gfs2_glock_add_to_lru(struct gfs2_glock *gl)
185{
186 spin_lock(&lru_lock);
187
188 if (!list_empty(&gl->gl_lru))
189 list_del_init(&gl->gl_lru);
190 else
191 atomic_inc(&lru_count);
192
193 list_add_tail(&gl->gl_lru, &lru_list);
194 set_bit(GLF_LRU, &gl->gl_flags);
195 spin_unlock(&lru_lock);
196}
197
198static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl)
199{
200 if (!(gl->gl_ops->go_flags & GLOF_LRU))
201 return;
202
203 spin_lock(&lru_lock);
204 if (!list_empty(&gl->gl_lru)) {
205 list_del_init(&gl->gl_lru);
206 atomic_dec(&lru_count);
207 clear_bit(GLF_LRU, &gl->gl_flags);
208 }
209 spin_unlock(&lru_lock);
210}
211
212
213
214
215
216static void __gfs2_glock_queue_work(struct gfs2_glock *gl, unsigned long delay) {
217 if (!queue_delayed_work(glock_workqueue, &gl->gl_work, delay)) {
218
219
220
221
222
223
224 GLOCK_BUG_ON(gl, gl->gl_lockref.count < 2);
225 gl->gl_lockref.count--;
226 }
227}
228
229static void gfs2_glock_queue_work(struct gfs2_glock *gl, unsigned long delay) {
230 spin_lock(&gl->gl_lockref.lock);
231 __gfs2_glock_queue_work(gl, delay);
232 spin_unlock(&gl->gl_lockref.lock);
233}
234
235static void __gfs2_glock_put(struct gfs2_glock *gl)
236{
237 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
238 struct address_space *mapping = gfs2_glock2aspace(gl);
239
240 lockref_mark_dead(&gl->gl_lockref);
241
242 gfs2_glock_remove_from_lru(gl);
243 spin_unlock(&gl->gl_lockref.lock);
244 GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders));
245 GLOCK_BUG_ON(gl, mapping && mapping->nrpages);
246 trace_gfs2_glock_put(gl);
247 sdp->sd_lockstruct.ls_ops->lm_put_lock(gl);
248}
249
250
251
252
253void gfs2_glock_queue_put(struct gfs2_glock *gl)
254{
255 gfs2_glock_queue_work(gl, 0);
256}
257
258
259
260
261
262
263
264void gfs2_glock_put(struct gfs2_glock *gl)
265{
266 if (lockref_put_or_lock(&gl->gl_lockref))
267 return;
268
269 __gfs2_glock_put(gl);
270}
271
272
273
274
275
276
277
278
279
280static inline int may_grant(const struct gfs2_glock *gl, const struct gfs2_holder *gh)
281{
282 const struct gfs2_holder *gh_head = list_entry(gl->gl_holders.next, const struct gfs2_holder, gh_list);
283 if ((gh->gh_state == LM_ST_EXCLUSIVE ||
284 gh_head->gh_state == LM_ST_EXCLUSIVE) && gh != gh_head)
285 return 0;
286 if (gl->gl_state == gh->gh_state)
287 return 1;
288 if (gh->gh_flags & GL_EXACT)
289 return 0;
290 if (gl->gl_state == LM_ST_EXCLUSIVE) {
291 if (gh->gh_state == LM_ST_SHARED && gh_head->gh_state == LM_ST_SHARED)
292 return 1;
293 if (gh->gh_state == LM_ST_DEFERRED && gh_head->gh_state == LM_ST_DEFERRED)
294 return 1;
295 }
296 if (gl->gl_state != LM_ST_UNLOCKED && (gh->gh_flags & LM_FLAG_ANY))
297 return 1;
298 return 0;
299}
300
301static void gfs2_holder_wake(struct gfs2_holder *gh)
302{
303 clear_bit(HIF_WAIT, &gh->gh_iflags);
304 smp_mb__after_atomic();
305 wake_up_bit(&gh->gh_iflags, HIF_WAIT);
306}
307
308
309
310
311
312
313static void do_error(struct gfs2_glock *gl, const int ret)
314{
315 struct gfs2_holder *gh, *tmp;
316
317 list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
318 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
319 continue;
320 if (ret & LM_OUT_ERROR)
321 gh->gh_error = -EIO;
322 else if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))
323 gh->gh_error = GLR_TRYFAILED;
324 else
325 continue;
326 list_del_init(&gh->gh_list);
327 trace_gfs2_glock_queue(gh, 0);
328 gfs2_holder_wake(gh);
329 }
330}
331
332
333
334
335
336
337
338
339
340static int do_promote(struct gfs2_glock *gl)
341__releases(&gl->gl_lockref.lock)
342__acquires(&gl->gl_lockref.lock)
343{
344 const struct gfs2_glock_operations *glops = gl->gl_ops;
345 struct gfs2_holder *gh, *tmp;
346 int ret;
347
348restart:
349 list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
350 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
351 continue;
352 if (may_grant(gl, gh)) {
353 if (gh->gh_list.prev == &gl->gl_holders &&
354 glops->go_lock) {
355 spin_unlock(&gl->gl_lockref.lock);
356
357 ret = glops->go_lock(gh);
358 spin_lock(&gl->gl_lockref.lock);
359 if (ret) {
360 if (ret == 1)
361 return 2;
362 gh->gh_error = ret;
363 list_del_init(&gh->gh_list);
364 trace_gfs2_glock_queue(gh, 0);
365 gfs2_holder_wake(gh);
366 goto restart;
367 }
368 set_bit(HIF_HOLDER, &gh->gh_iflags);
369 trace_gfs2_promote(gh, 1);
370 gfs2_holder_wake(gh);
371 goto restart;
372 }
373 set_bit(HIF_HOLDER, &gh->gh_iflags);
374 trace_gfs2_promote(gh, 0);
375 gfs2_holder_wake(gh);
376 continue;
377 }
378 if (gh->gh_list.prev == &gl->gl_holders)
379 return 1;
380 do_error(gl, 0);
381 break;
382 }
383 return 0;
384}
385
386
387
388
389
390
391static inline struct gfs2_holder *find_first_waiter(const struct gfs2_glock *gl)
392{
393 struct gfs2_holder *gh;
394
395 list_for_each_entry(gh, &gl->gl_holders, gh_list) {
396 if (!test_bit(HIF_HOLDER, &gh->gh_iflags))
397 return gh;
398 }
399 return NULL;
400}
401
402
403
404
405
406
407
408
409static void state_change(struct gfs2_glock *gl, unsigned int new_state)
410{
411 int held1, held2;
412
413 held1 = (gl->gl_state != LM_ST_UNLOCKED);
414 held2 = (new_state != LM_ST_UNLOCKED);
415
416 if (held1 != held2) {
417 GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref));
418 if (held2)
419 gl->gl_lockref.count++;
420 else
421 gl->gl_lockref.count--;
422 }
423 if (held1 && held2 && list_empty(&gl->gl_holders))
424 clear_bit(GLF_QUEUED, &gl->gl_flags);
425
426 if (new_state != gl->gl_target)
427
428 gl->gl_hold_time = max(gl->gl_hold_time - GL_GLOCK_HOLD_DECR,
429 GL_GLOCK_MIN_HOLD);
430 gl->gl_state = new_state;
431 gl->gl_tchange = jiffies;
432}
433
434static void gfs2_demote_wake(struct gfs2_glock *gl)
435{
436 gl->gl_demote_state = LM_ST_EXCLUSIVE;
437 clear_bit(GLF_DEMOTE, &gl->gl_flags);
438 smp_mb__after_atomic();
439 wake_up_bit(&gl->gl_flags, GLF_DEMOTE);
440}
441
442
443
444
445
446
447
448
449static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
450{
451 const struct gfs2_glock_operations *glops = gl->gl_ops;
452 struct gfs2_holder *gh;
453 unsigned state = ret & LM_OUT_ST_MASK;
454 int rv;
455
456 spin_lock(&gl->gl_lockref.lock);
457 trace_gfs2_glock_state_change(gl, state);
458 state_change(gl, state);
459 gh = find_first_waiter(gl);
460
461
462 if (test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags) &&
463 state != LM_ST_UNLOCKED && gl->gl_demote_state == LM_ST_UNLOCKED)
464 gl->gl_target = LM_ST_UNLOCKED;
465
466
467 if (unlikely(state != gl->gl_target)) {
468 if (gh && !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) {
469
470 if (ret & LM_OUT_CANCELED) {
471 if ((gh->gh_flags & LM_FLAG_PRIORITY) == 0)
472 list_move_tail(&gh->gh_list, &gl->gl_holders);
473 gh = find_first_waiter(gl);
474 gl->gl_target = gh->gh_state;
475 goto retry;
476 }
477
478 if ((ret & LM_OUT_ERROR) ||
479 (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) {
480 gl->gl_target = gl->gl_state;
481 do_error(gl, ret);
482 goto out;
483 }
484 }
485 switch(state) {
486
487 case LM_ST_UNLOCKED:
488retry:
489 do_xmote(gl, gh, gl->gl_target);
490 break;
491
492 case LM_ST_SHARED:
493 case LM_ST_DEFERRED:
494 do_xmote(gl, gh, LM_ST_UNLOCKED);
495 break;
496 default:
497 fs_err(gl->gl_name.ln_sbd, "wanted %u got %u\n",
498 gl->gl_target, state);
499 GLOCK_BUG_ON(gl, 1);
500 }
501 spin_unlock(&gl->gl_lockref.lock);
502 return;
503 }
504
505
506 if (test_and_clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags))
507 gfs2_demote_wake(gl);
508 if (state != LM_ST_UNLOCKED) {
509 if (glops->go_xmote_bh) {
510 spin_unlock(&gl->gl_lockref.lock);
511 rv = glops->go_xmote_bh(gl, gh);
512 spin_lock(&gl->gl_lockref.lock);
513 if (rv) {
514 do_error(gl, rv);
515 goto out;
516 }
517 }
518 rv = do_promote(gl);
519 if (rv == 2)
520 goto out_locked;
521 }
522out:
523 clear_bit(GLF_LOCK, &gl->gl_flags);
524out_locked:
525 spin_unlock(&gl->gl_lockref.lock);
526}
527
528
529
530
531
532
533
534
535
536static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target)
537__releases(&gl->gl_lockref.lock)
538__acquires(&gl->gl_lockref.lock)
539{
540 const struct gfs2_glock_operations *glops = gl->gl_ops;
541 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
542 unsigned int lck_flags = (unsigned int)(gh ? gh->gh_flags : 0);
543 int ret;
544
545 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)) &&
546 target != LM_ST_UNLOCKED)
547 return;
548 lck_flags &= (LM_FLAG_TRY | LM_FLAG_TRY_1CB | LM_FLAG_NOEXP |
549 LM_FLAG_PRIORITY);
550 GLOCK_BUG_ON(gl, gl->gl_state == target);
551 GLOCK_BUG_ON(gl, gl->gl_state == gl->gl_target);
552 if ((target == LM_ST_UNLOCKED || target == LM_ST_DEFERRED) &&
553 glops->go_inval) {
554 set_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
555 do_error(gl, 0);
556 }
557 gl->gl_req = target;
558 set_bit(GLF_BLOCKING, &gl->gl_flags);
559 if ((gl->gl_req == LM_ST_UNLOCKED) ||
560 (gl->gl_state == LM_ST_EXCLUSIVE) ||
561 (lck_flags & (LM_FLAG_TRY|LM_FLAG_TRY_1CB)))
562 clear_bit(GLF_BLOCKING, &gl->gl_flags);
563 spin_unlock(&gl->gl_lockref.lock);
564 if (glops->go_sync)
565 glops->go_sync(gl);
566 if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
567 glops->go_inval(gl, target == LM_ST_DEFERRED ? 0 : DIO_METADATA);
568 clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
569
570 gfs2_glock_hold(gl);
571 if (sdp->sd_lockstruct.ls_ops->lm_lock) {
572
573 ret = sdp->sd_lockstruct.ls_ops->lm_lock(gl, target, lck_flags);
574 if (ret == -EINVAL && gl->gl_target == LM_ST_UNLOCKED &&
575 target == LM_ST_UNLOCKED &&
576 test_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags)) {
577 finish_xmote(gl, target);
578 gfs2_glock_queue_work(gl, 0);
579 }
580 else if (ret) {
581 fs_err(sdp, "lm_lock ret %d\n", ret);
582 GLOCK_BUG_ON(gl, !test_bit(SDF_SHUTDOWN,
583 &sdp->sd_flags));
584 }
585 } else {
586 finish_xmote(gl, target);
587 gfs2_glock_queue_work(gl, 0);
588 }
589
590 spin_lock(&gl->gl_lockref.lock);
591}
592
593
594
595
596
597
598static inline struct gfs2_holder *find_first_holder(const struct gfs2_glock *gl)
599{
600 struct gfs2_holder *gh;
601
602 if (!list_empty(&gl->gl_holders)) {
603 gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
604 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
605 return gh;
606 }
607 return NULL;
608}
609
610
611
612
613
614
615
616
617static void run_queue(struct gfs2_glock *gl, const int nonblock)
618__releases(&gl->gl_lockref.lock)
619__acquires(&gl->gl_lockref.lock)
620{
621 struct gfs2_holder *gh = NULL;
622 int ret;
623
624 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags))
625 return;
626
627 GLOCK_BUG_ON(gl, test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags));
628
629 if (test_bit(GLF_DEMOTE, &gl->gl_flags) &&
630 gl->gl_demote_state != gl->gl_state) {
631 if (find_first_holder(gl))
632 goto out_unlock;
633 if (nonblock)
634 goto out_sched;
635 set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
636 GLOCK_BUG_ON(gl, gl->gl_demote_state == LM_ST_EXCLUSIVE);
637 gl->gl_target = gl->gl_demote_state;
638 } else {
639 if (test_bit(GLF_DEMOTE, &gl->gl_flags))
640 gfs2_demote_wake(gl);
641 ret = do_promote(gl);
642 if (ret == 0)
643 goto out_unlock;
644 if (ret == 2)
645 goto out;
646 gh = find_first_waiter(gl);
647 gl->gl_target = gh->gh_state;
648 if (!(gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)))
649 do_error(gl, 0);
650 }
651 do_xmote(gl, gh, gl->gl_target);
652out:
653 return;
654
655out_sched:
656 clear_bit(GLF_LOCK, &gl->gl_flags);
657 smp_mb__after_atomic();
658 gl->gl_lockref.count++;
659 __gfs2_glock_queue_work(gl, 0);
660 return;
661
662out_unlock:
663 clear_bit(GLF_LOCK, &gl->gl_flags);
664 smp_mb__after_atomic();
665 return;
666}
667
668static void delete_work_func(struct work_struct *work)
669{
670 struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_delete);
671 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
672 struct inode *inode;
673 u64 no_addr = gl->gl_name.ln_number;
674
675
676
677
678 if (test_bit(GLF_INODE_CREATING, &gl->gl_flags))
679 goto out;
680
681 inode = gfs2_lookup_by_inum(sdp, no_addr, NULL, GFS2_BLKST_UNLINKED);
682 if (inode && !IS_ERR(inode)) {
683 d_prune_aliases(inode);
684 iput(inode);
685 }
686out:
687 gfs2_glock_put(gl);
688}
689
690static void glock_work_func(struct work_struct *work)
691{
692 unsigned long delay = 0;
693 struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work);
694 unsigned int drop_refs = 1;
695
696 if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags)) {
697 finish_xmote(gl, gl->gl_reply);
698 drop_refs++;
699 }
700 spin_lock(&gl->gl_lockref.lock);
701 if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
702 gl->gl_state != LM_ST_UNLOCKED &&
703 gl->gl_demote_state != LM_ST_EXCLUSIVE) {
704 unsigned long holdtime, now = jiffies;
705
706 holdtime = gl->gl_tchange + gl->gl_hold_time;
707 if (time_before(now, holdtime))
708 delay = holdtime - now;
709
710 if (!delay) {
711 clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags);
712 set_bit(GLF_DEMOTE, &gl->gl_flags);
713 }
714 }
715 run_queue(gl, 0);
716 if (delay) {
717
718 drop_refs--;
719 if (gl->gl_name.ln_type != LM_TYPE_INODE)
720 delay = 0;
721 __gfs2_glock_queue_work(gl, delay);
722 }
723
724
725
726
727
728
729 gl->gl_lockref.count -= drop_refs;
730 if (!gl->gl_lockref.count) {
731 __gfs2_glock_put(gl);
732 return;
733 }
734 spin_unlock(&gl->gl_lockref.lock);
735}
736
737static struct gfs2_glock *find_insert_glock(struct lm_lockname *name,
738 struct gfs2_glock *new)
739{
740 struct wait_glock_queue wait;
741 wait_queue_head_t *wq = glock_waitqueue(name);
742 struct gfs2_glock *gl;
743
744 wait.name = name;
745 init_wait(&wait.wait);
746 wait.wait.func = glock_wake_function;
747
748again:
749 prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE);
750 rcu_read_lock();
751 if (new) {
752 gl = rhashtable_lookup_get_insert_fast(&gl_hash_table,
753 &new->gl_node, ht_parms);
754 if (IS_ERR(gl))
755 goto out;
756 } else {
757 gl = rhashtable_lookup_fast(&gl_hash_table,
758 name, ht_parms);
759 }
760 if (gl && !lockref_get_not_dead(&gl->gl_lockref)) {
761 rcu_read_unlock();
762 schedule();
763 goto again;
764 }
765out:
766 rcu_read_unlock();
767 finish_wait(wq, &wait.wait);
768 return gl;
769}
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
785 const struct gfs2_glock_operations *glops, int create,
786 struct gfs2_glock **glp)
787{
788 struct super_block *s = sdp->sd_vfs;
789 struct lm_lockname name = { .ln_number = number,
790 .ln_type = glops->go_type,
791 .ln_sbd = sdp };
792 struct gfs2_glock *gl, *tmp;
793 struct address_space *mapping;
794 struct kmem_cache *cachep;
795 int ret = 0;
796
797 gl = find_insert_glock(&name, NULL);
798 if (gl) {
799 *glp = gl;
800 return 0;
801 }
802 if (!create)
803 return -ENOENT;
804
805 if (glops->go_flags & GLOF_ASPACE)
806 cachep = gfs2_glock_aspace_cachep;
807 else
808 cachep = gfs2_glock_cachep;
809 gl = kmem_cache_alloc(cachep, GFP_NOFS);
810 if (!gl)
811 return -ENOMEM;
812
813 memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb));
814
815 if (glops->go_flags & GLOF_LVB) {
816 gl->gl_lksb.sb_lvbptr = kzalloc(GFS2_MIN_LVB_SIZE, GFP_NOFS);
817 if (!gl->gl_lksb.sb_lvbptr) {
818 kmem_cache_free(cachep, gl);
819 return -ENOMEM;
820 }
821 }
822
823 atomic_inc(&sdp->sd_glock_disposal);
824 gl->gl_node.next = NULL;
825 gl->gl_flags = 0;
826 gl->gl_name = name;
827 gl->gl_lockref.count = 1;
828 gl->gl_state = LM_ST_UNLOCKED;
829 gl->gl_target = LM_ST_UNLOCKED;
830 gl->gl_demote_state = LM_ST_EXCLUSIVE;
831 gl->gl_ops = glops;
832 gl->gl_dstamp = 0;
833 preempt_disable();
834
835 gl->gl_stats = this_cpu_ptr(sdp->sd_lkstats)->lkstats[glops->go_type];
836 preempt_enable();
837 gl->gl_stats.stats[GFS2_LKS_DCOUNT] = 0;
838 gl->gl_stats.stats[GFS2_LKS_QCOUNT] = 0;
839 gl->gl_tchange = jiffies;
840 gl->gl_object = NULL;
841 gl->gl_hold_time = GL_GLOCK_DFT_HOLD;
842 INIT_DELAYED_WORK(&gl->gl_work, glock_work_func);
843 INIT_WORK(&gl->gl_delete, delete_work_func);
844
845 mapping = gfs2_glock2aspace(gl);
846 if (mapping) {
847 mapping->a_ops = &gfs2_meta_aops;
848 mapping->host = s->s_bdev->bd_inode;
849 mapping->flags = 0;
850 mapping_set_gfp_mask(mapping, GFP_NOFS);
851 mapping->private_data = NULL;
852 mapping->writeback_index = 0;
853 }
854
855 tmp = find_insert_glock(&name, gl);
856 if (!tmp) {
857 *glp = gl;
858 goto out;
859 }
860 if (IS_ERR(tmp)) {
861 ret = PTR_ERR(tmp);
862 goto out_free;
863 }
864 *glp = tmp;
865
866out_free:
867 kfree(gl->gl_lksb.sb_lvbptr);
868 kmem_cache_free(cachep, gl);
869 atomic_dec(&sdp->sd_glock_disposal);
870
871out:
872 return ret;
873}
874
875
876
877
878
879
880
881
882
883
884void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, u16 flags,
885 struct gfs2_holder *gh)
886{
887 INIT_LIST_HEAD(&gh->gh_list);
888 gh->gh_gl = gl;
889 gh->gh_ip = _RET_IP_;
890 gh->gh_owner_pid = get_pid(task_pid(current));
891 gh->gh_state = state;
892 gh->gh_flags = flags;
893 gh->gh_error = 0;
894 gh->gh_iflags = 0;
895 gfs2_glock_hold(gl);
896}
897
898
899
900
901
902
903
904
905
906
907
908void gfs2_holder_reinit(unsigned int state, u16 flags, struct gfs2_holder *gh)
909{
910 gh->gh_state = state;
911 gh->gh_flags = flags;
912 gh->gh_iflags = 0;
913 gh->gh_ip = _RET_IP_;
914 put_pid(gh->gh_owner_pid);
915 gh->gh_owner_pid = get_pid(task_pid(current));
916}
917
918
919
920
921
922
923
924void gfs2_holder_uninit(struct gfs2_holder *gh)
925{
926 put_pid(gh->gh_owner_pid);
927 gfs2_glock_put(gh->gh_gl);
928 gfs2_holder_mark_uninitialized(gh);
929 gh->gh_ip = 0;
930}
931
932
933
934
935
936
937
938
939int gfs2_glock_wait(struct gfs2_holder *gh)
940{
941 unsigned long time1 = jiffies;
942
943 might_sleep();
944 wait_on_bit(&gh->gh_iflags, HIF_WAIT, TASK_UNINTERRUPTIBLE);
945 if (time_after(jiffies, time1 + HZ))
946
947 gh->gh_gl->gl_hold_time = min(gh->gh_gl->gl_hold_time +
948 GL_GLOCK_HOLD_INCR,
949 GL_GLOCK_MAX_HOLD);
950 return gh->gh_error;
951}
952
953
954
955
956
957
958
959
960
961
962static void handle_callback(struct gfs2_glock *gl, unsigned int state,
963 unsigned long delay, bool remote)
964{
965 int bit = delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE;
966
967 set_bit(bit, &gl->gl_flags);
968 if (gl->gl_demote_state == LM_ST_EXCLUSIVE) {
969 gl->gl_demote_state = state;
970 gl->gl_demote_time = jiffies;
971 } else if (gl->gl_demote_state != LM_ST_UNLOCKED &&
972 gl->gl_demote_state != state) {
973 gl->gl_demote_state = LM_ST_UNLOCKED;
974 }
975 if (gl->gl_ops->go_callback)
976 gl->gl_ops->go_callback(gl, remote);
977 trace_gfs2_demote_rq(gl, remote);
978}
979
980void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...)
981{
982 struct va_format vaf;
983 va_list args;
984
985 va_start(args, fmt);
986
987 if (seq) {
988 seq_vprintf(seq, fmt, args);
989 } else {
990 vaf.fmt = fmt;
991 vaf.va = &args;
992
993 pr_err("%pV", &vaf);
994 }
995
996 va_end(args);
997}
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009static inline void add_to_queue(struct gfs2_holder *gh)
1010__releases(&gl->gl_lockref.lock)
1011__acquires(&gl->gl_lockref.lock)
1012{
1013 struct gfs2_glock *gl = gh->gh_gl;
1014 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
1015 struct list_head *insert_pt = NULL;
1016 struct gfs2_holder *gh2;
1017 int try_futile = 0;
1018
1019 BUG_ON(gh->gh_owner_pid == NULL);
1020 if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags))
1021 BUG();
1022
1023 if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
1024 if (test_bit(GLF_LOCK, &gl->gl_flags))
1025 try_futile = !may_grant(gl, gh);
1026 if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
1027 goto fail;
1028 }
1029
1030 list_for_each_entry(gh2, &gl->gl_holders, gh_list) {
1031 if (unlikely(gh2->gh_owner_pid == gh->gh_owner_pid &&
1032 (gh->gh_gl->gl_ops->go_type != LM_TYPE_FLOCK)))
1033 goto trap_recursive;
1034 if (try_futile &&
1035 !(gh2->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) {
1036fail:
1037 gh->gh_error = GLR_TRYFAILED;
1038 gfs2_holder_wake(gh);
1039 return;
1040 }
1041 if (test_bit(HIF_HOLDER, &gh2->gh_iflags))
1042 continue;
1043 if (unlikely((gh->gh_flags & LM_FLAG_PRIORITY) && !insert_pt))
1044 insert_pt = &gh2->gh_list;
1045 }
1046 set_bit(GLF_QUEUED, &gl->gl_flags);
1047 trace_gfs2_glock_queue(gh, 1);
1048 gfs2_glstats_inc(gl, GFS2_LKS_QCOUNT);
1049 gfs2_sbstats_inc(gl, GFS2_LKS_QCOUNT);
1050 if (likely(insert_pt == NULL)) {
1051 list_add_tail(&gh->gh_list, &gl->gl_holders);
1052 if (unlikely(gh->gh_flags & LM_FLAG_PRIORITY))
1053 goto do_cancel;
1054 return;
1055 }
1056 list_add_tail(&gh->gh_list, insert_pt);
1057do_cancel:
1058 gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
1059 if (!(gh->gh_flags & LM_FLAG_PRIORITY)) {
1060 spin_unlock(&gl->gl_lockref.lock);
1061 if (sdp->sd_lockstruct.ls_ops->lm_cancel)
1062 sdp->sd_lockstruct.ls_ops->lm_cancel(gl);
1063 spin_lock(&gl->gl_lockref.lock);
1064 }
1065 return;
1066
1067trap_recursive:
1068 fs_err(sdp, "original: %pSR\n", (void *)gh2->gh_ip);
1069 fs_err(sdp, "pid: %d\n", pid_nr(gh2->gh_owner_pid));
1070 fs_err(sdp, "lock type: %d req lock state : %d\n",
1071 gh2->gh_gl->gl_name.ln_type, gh2->gh_state);
1072 fs_err(sdp, "new: %pSR\n", (void *)gh->gh_ip);
1073 fs_err(sdp, "pid: %d\n", pid_nr(gh->gh_owner_pid));
1074 fs_err(sdp, "lock type: %d req lock state : %d\n",
1075 gh->gh_gl->gl_name.ln_type, gh->gh_state);
1076 gfs2_dump_glock(NULL, gl);
1077 BUG();
1078}
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089int gfs2_glock_nq(struct gfs2_holder *gh)
1090{
1091 struct gfs2_glock *gl = gh->gh_gl;
1092 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
1093 int error = 0;
1094
1095 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
1096 return -EIO;
1097
1098 if (test_bit(GLF_LRU, &gl->gl_flags))
1099 gfs2_glock_remove_from_lru(gl);
1100
1101 spin_lock(&gl->gl_lockref.lock);
1102 add_to_queue(gh);
1103 if (unlikely((LM_FLAG_NOEXP & gh->gh_flags) &&
1104 test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))) {
1105 set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
1106 gl->gl_lockref.count++;
1107 __gfs2_glock_queue_work(gl, 0);
1108 }
1109 run_queue(gl, 1);
1110 spin_unlock(&gl->gl_lockref.lock);
1111
1112 if (!(gh->gh_flags & GL_ASYNC))
1113 error = gfs2_glock_wait(gh);
1114
1115 return error;
1116}
1117
1118
1119
1120
1121
1122
1123
1124
1125int gfs2_glock_poll(struct gfs2_holder *gh)
1126{
1127 return test_bit(HIF_WAIT, &gh->gh_iflags) ? 0 : 1;
1128}
1129
1130
1131
1132
1133
1134
1135
1136void gfs2_glock_dq(struct gfs2_holder *gh)
1137{
1138 struct gfs2_glock *gl = gh->gh_gl;
1139 const struct gfs2_glock_operations *glops = gl->gl_ops;
1140 unsigned delay = 0;
1141 int fast_path = 0;
1142
1143 spin_lock(&gl->gl_lockref.lock);
1144 if (gh->gh_flags & GL_NOCACHE)
1145 handle_callback(gl, LM_ST_UNLOCKED, 0, false);
1146
1147 list_del_init(&gh->gh_list);
1148 clear_bit(HIF_HOLDER, &gh->gh_iflags);
1149 if (find_first_holder(gl) == NULL) {
1150 if (glops->go_unlock) {
1151 GLOCK_BUG_ON(gl, test_and_set_bit(GLF_LOCK, &gl->gl_flags));
1152 spin_unlock(&gl->gl_lockref.lock);
1153 glops->go_unlock(gh);
1154 spin_lock(&gl->gl_lockref.lock);
1155 clear_bit(GLF_LOCK, &gl->gl_flags);
1156 }
1157 if (list_empty(&gl->gl_holders) &&
1158 !test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
1159 !test_bit(GLF_DEMOTE, &gl->gl_flags))
1160 fast_path = 1;
1161 }
1162 if (!test_bit(GLF_LFLUSH, &gl->gl_flags) && demote_ok(gl) &&
1163 (glops->go_flags & GLOF_LRU))
1164 gfs2_glock_add_to_lru(gl);
1165
1166 trace_gfs2_glock_queue(gh, 0);
1167 if (unlikely(!fast_path)) {
1168 gl->gl_lockref.count++;
1169 if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
1170 !test_bit(GLF_DEMOTE, &gl->gl_flags) &&
1171 gl->gl_name.ln_type == LM_TYPE_INODE)
1172 delay = gl->gl_hold_time;
1173 __gfs2_glock_queue_work(gl, delay);
1174 }
1175 spin_unlock(&gl->gl_lockref.lock);
1176}
1177
1178void gfs2_glock_dq_wait(struct gfs2_holder *gh)
1179{
1180 struct gfs2_glock *gl = gh->gh_gl;
1181 gfs2_glock_dq(gh);
1182 might_sleep();
1183 wait_on_bit(&gl->gl_flags, GLF_DEMOTE, TASK_UNINTERRUPTIBLE);
1184}
1185
1186
1187
1188
1189
1190
1191
1192void gfs2_glock_dq_uninit(struct gfs2_holder *gh)
1193{
1194 gfs2_glock_dq(gh);
1195 gfs2_holder_uninit(gh);
1196}
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210int gfs2_glock_nq_num(struct gfs2_sbd *sdp, u64 number,
1211 const struct gfs2_glock_operations *glops,
1212 unsigned int state, u16 flags, struct gfs2_holder *gh)
1213{
1214 struct gfs2_glock *gl;
1215 int error;
1216
1217 error = gfs2_glock_get(sdp, number, glops, CREATE, &gl);
1218 if (!error) {
1219 error = gfs2_glock_nq_init(gl, state, flags, gh);
1220 gfs2_glock_put(gl);
1221 }
1222
1223 return error;
1224}
1225
1226
1227
1228
1229
1230
1231
1232
1233static int glock_compare(const void *arg_a, const void *arg_b)
1234{
1235 const struct gfs2_holder *gh_a = *(const struct gfs2_holder **)arg_a;
1236 const struct gfs2_holder *gh_b = *(const struct gfs2_holder **)arg_b;
1237 const struct lm_lockname *a = &gh_a->gh_gl->gl_name;
1238 const struct lm_lockname *b = &gh_b->gh_gl->gl_name;
1239
1240 if (a->ln_number > b->ln_number)
1241 return 1;
1242 if (a->ln_number < b->ln_number)
1243 return -1;
1244 BUG_ON(gh_a->gh_gl->gl_ops->go_type == gh_b->gh_gl->gl_ops->go_type);
1245 return 0;
1246}
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257static int nq_m_sync(unsigned int num_gh, struct gfs2_holder *ghs,
1258 struct gfs2_holder **p)
1259{
1260 unsigned int x;
1261 int error = 0;
1262
1263 for (x = 0; x < num_gh; x++)
1264 p[x] = &ghs[x];
1265
1266 sort(p, num_gh, sizeof(struct gfs2_holder *), glock_compare, NULL);
1267
1268 for (x = 0; x < num_gh; x++) {
1269 p[x]->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1270
1271 error = gfs2_glock_nq(p[x]);
1272 if (error) {
1273 while (x--)
1274 gfs2_glock_dq(p[x]);
1275 break;
1276 }
1277 }
1278
1279 return error;
1280}
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1293{
1294 struct gfs2_holder *tmp[4];
1295 struct gfs2_holder **pph = tmp;
1296 int error = 0;
1297
1298 switch(num_gh) {
1299 case 0:
1300 return 0;
1301 case 1:
1302 ghs->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1303 return gfs2_glock_nq(ghs);
1304 default:
1305 if (num_gh <= 4)
1306 break;
1307 pph = kmalloc_array(num_gh, sizeof(struct gfs2_holder *),
1308 GFP_NOFS);
1309 if (!pph)
1310 return -ENOMEM;
1311 }
1312
1313 error = nq_m_sync(num_gh, ghs, pph);
1314
1315 if (pph != tmp)
1316 kfree(pph);
1317
1318 return error;
1319}
1320
1321
1322
1323
1324
1325
1326
1327
1328void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1329{
1330 while (num_gh--)
1331 gfs2_glock_dq(&ghs[num_gh]);
1332}
1333
1334void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state)
1335{
1336 unsigned long delay = 0;
1337 unsigned long holdtime;
1338 unsigned long now = jiffies;
1339
1340 gfs2_glock_hold(gl);
1341 holdtime = gl->gl_tchange + gl->gl_hold_time;
1342 if (test_bit(GLF_QUEUED, &gl->gl_flags) &&
1343 gl->gl_name.ln_type == LM_TYPE_INODE) {
1344 if (time_before(now, holdtime))
1345 delay = holdtime - now;
1346 if (test_bit(GLF_REPLY_PENDING, &gl->gl_flags))
1347 delay = gl->gl_hold_time;
1348 }
1349
1350 spin_lock(&gl->gl_lockref.lock);
1351 handle_callback(gl, state, delay, true);
1352 __gfs2_glock_queue_work(gl, delay);
1353 spin_unlock(&gl->gl_lockref.lock);
1354}
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367static int gfs2_should_freeze(const struct gfs2_glock *gl)
1368{
1369 const struct gfs2_holder *gh;
1370
1371 if (gl->gl_reply & ~LM_OUT_ST_MASK)
1372 return 0;
1373 if (gl->gl_target == LM_ST_UNLOCKED)
1374 return 0;
1375
1376 list_for_each_entry(gh, &gl->gl_holders, gh_list) {
1377 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
1378 continue;
1379 if (LM_FLAG_NOEXP & gh->gh_flags)
1380 return 0;
1381 }
1382
1383 return 1;
1384}
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395void gfs2_glock_complete(struct gfs2_glock *gl, int ret)
1396{
1397 struct lm_lockstruct *ls = &gl->gl_name.ln_sbd->sd_lockstruct;
1398
1399 spin_lock(&gl->gl_lockref.lock);
1400 gl->gl_reply = ret;
1401
1402 if (unlikely(test_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags))) {
1403 if (gfs2_should_freeze(gl)) {
1404 set_bit(GLF_FROZEN, &gl->gl_flags);
1405 spin_unlock(&gl->gl_lockref.lock);
1406 return;
1407 }
1408 }
1409
1410 gl->gl_lockref.count++;
1411 set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
1412 __gfs2_glock_queue_work(gl, 0);
1413 spin_unlock(&gl->gl_lockref.lock);
1414}
1415
1416static int glock_cmp(void *priv, struct list_head *a, struct list_head *b)
1417{
1418 struct gfs2_glock *gla, *glb;
1419
1420 gla = list_entry(a, struct gfs2_glock, gl_lru);
1421 glb = list_entry(b, struct gfs2_glock, gl_lru);
1422
1423 if (gla->gl_name.ln_number > glb->gl_name.ln_number)
1424 return 1;
1425 if (gla->gl_name.ln_number < glb->gl_name.ln_number)
1426 return -1;
1427
1428 return 0;
1429}
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445static void gfs2_dispose_glock_lru(struct list_head *list)
1446__releases(&lru_lock)
1447__acquires(&lru_lock)
1448{
1449 struct gfs2_glock *gl;
1450
1451 list_sort(NULL, list, glock_cmp);
1452
1453 while(!list_empty(list)) {
1454 gl = list_entry(list->next, struct gfs2_glock, gl_lru);
1455 list_del_init(&gl->gl_lru);
1456 if (!spin_trylock(&gl->gl_lockref.lock)) {
1457add_back_to_lru:
1458 list_add(&gl->gl_lru, &lru_list);
1459 atomic_inc(&lru_count);
1460 continue;
1461 }
1462 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
1463 spin_unlock(&gl->gl_lockref.lock);
1464 goto add_back_to_lru;
1465 }
1466 clear_bit(GLF_LRU, &gl->gl_flags);
1467 gl->gl_lockref.count++;
1468 if (demote_ok(gl))
1469 handle_callback(gl, LM_ST_UNLOCKED, 0, false);
1470 WARN_ON(!test_and_clear_bit(GLF_LOCK, &gl->gl_flags));
1471 __gfs2_glock_queue_work(gl, 0);
1472 spin_unlock(&gl->gl_lockref.lock);
1473 cond_resched_lock(&lru_lock);
1474 }
1475}
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486static long gfs2_scan_glock_lru(int nr)
1487{
1488 struct gfs2_glock *gl;
1489 LIST_HEAD(skipped);
1490 LIST_HEAD(dispose);
1491 long freed = 0;
1492
1493 spin_lock(&lru_lock);
1494 while ((nr-- >= 0) && !list_empty(&lru_list)) {
1495 gl = list_entry(lru_list.next, struct gfs2_glock, gl_lru);
1496
1497
1498 if (!test_bit(GLF_LOCK, &gl->gl_flags)) {
1499 list_move(&gl->gl_lru, &dispose);
1500 atomic_dec(&lru_count);
1501 freed++;
1502 continue;
1503 }
1504
1505 list_move(&gl->gl_lru, &skipped);
1506 }
1507 list_splice(&skipped, &lru_list);
1508 if (!list_empty(&dispose))
1509 gfs2_dispose_glock_lru(&dispose);
1510 spin_unlock(&lru_lock);
1511
1512 return freed;
1513}
1514
1515static unsigned long gfs2_glock_shrink_scan(struct shrinker *shrink,
1516 struct shrink_control *sc)
1517{
1518 if (!(sc->gfp_mask & __GFP_FS))
1519 return SHRINK_STOP;
1520 return gfs2_scan_glock_lru(sc->nr_to_scan);
1521}
1522
1523static unsigned long gfs2_glock_shrink_count(struct shrinker *shrink,
1524 struct shrink_control *sc)
1525{
1526 return vfs_pressure_ratio(atomic_read(&lru_count));
1527}
1528
1529static struct shrinker glock_shrinker = {
1530 .seeks = DEFAULT_SEEKS,
1531 .count_objects = gfs2_glock_shrink_count,
1532 .scan_objects = gfs2_glock_shrink_scan,
1533};
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546static void glock_hash_walk(glock_examiner examiner, const struct gfs2_sbd *sdp)
1547{
1548 struct gfs2_glock *gl;
1549 struct rhashtable_iter iter;
1550
1551 rhashtable_walk_enter(&gl_hash_table, &iter);
1552
1553 do {
1554 rhashtable_walk_start(&iter);
1555
1556 while ((gl = rhashtable_walk_next(&iter)) && !IS_ERR(gl))
1557 if (gl->gl_name.ln_sbd == sdp &&
1558 lockref_get_not_dead(&gl->gl_lockref))
1559 examiner(gl);
1560
1561 rhashtable_walk_stop(&iter);
1562 } while (cond_resched(), gl == ERR_PTR(-EAGAIN));
1563
1564 rhashtable_walk_exit(&iter);
1565}
1566
1567
1568
1569
1570
1571
1572
1573static void thaw_glock(struct gfs2_glock *gl)
1574{
1575 if (!test_and_clear_bit(GLF_FROZEN, &gl->gl_flags)) {
1576 gfs2_glock_put(gl);
1577 return;
1578 }
1579 set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
1580 gfs2_glock_queue_work(gl, 0);
1581}
1582
1583
1584
1585
1586
1587
1588
1589static void clear_glock(struct gfs2_glock *gl)
1590{
1591 gfs2_glock_remove_from_lru(gl);
1592
1593 spin_lock(&gl->gl_lockref.lock);
1594 if (gl->gl_state != LM_ST_UNLOCKED)
1595 handle_callback(gl, LM_ST_UNLOCKED, 0, false);
1596 __gfs2_glock_queue_work(gl, 0);
1597 spin_unlock(&gl->gl_lockref.lock);
1598}
1599
1600
1601
1602
1603
1604
1605
1606void gfs2_glock_thaw(struct gfs2_sbd *sdp)
1607{
1608 glock_hash_walk(thaw_glock, sdp);
1609}
1610
1611static void dump_glock(struct seq_file *seq, struct gfs2_glock *gl)
1612{
1613 spin_lock(&gl->gl_lockref.lock);
1614 gfs2_dump_glock(seq, gl);
1615 spin_unlock(&gl->gl_lockref.lock);
1616}
1617
1618static void dump_glock_func(struct gfs2_glock *gl)
1619{
1620 dump_glock(NULL, gl);
1621}
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631void gfs2_gl_hash_clear(struct gfs2_sbd *sdp)
1632{
1633 set_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags);
1634 flush_workqueue(glock_workqueue);
1635 glock_hash_walk(clear_glock, sdp);
1636 flush_workqueue(glock_workqueue);
1637 wait_event_timeout(sdp->sd_glock_wait,
1638 atomic_read(&sdp->sd_glock_disposal) == 0,
1639 HZ * 600);
1640 glock_hash_walk(dump_glock_func, sdp);
1641}
1642
1643void gfs2_glock_finish_truncate(struct gfs2_inode *ip)
1644{
1645 struct gfs2_glock *gl = ip->i_gl;
1646 int ret;
1647
1648 ret = gfs2_truncatei_resume(ip);
1649 gfs2_assert_withdraw(gl->gl_name.ln_sbd, ret == 0);
1650
1651 spin_lock(&gl->gl_lockref.lock);
1652 clear_bit(GLF_LOCK, &gl->gl_flags);
1653 run_queue(gl, 1);
1654 spin_unlock(&gl->gl_lockref.lock);
1655}
1656
1657static const char *state2str(unsigned state)
1658{
1659 switch(state) {
1660 case LM_ST_UNLOCKED:
1661 return "UN";
1662 case LM_ST_SHARED:
1663 return "SH";
1664 case LM_ST_DEFERRED:
1665 return "DF";
1666 case LM_ST_EXCLUSIVE:
1667 return "EX";
1668 }
1669 return "??";
1670}
1671
1672static const char *hflags2str(char *buf, u16 flags, unsigned long iflags)
1673{
1674 char *p = buf;
1675 if (flags & LM_FLAG_TRY)
1676 *p++ = 't';
1677 if (flags & LM_FLAG_TRY_1CB)
1678 *p++ = 'T';
1679 if (flags & LM_FLAG_NOEXP)
1680 *p++ = 'e';
1681 if (flags & LM_FLAG_ANY)
1682 *p++ = 'A';
1683 if (flags & LM_FLAG_PRIORITY)
1684 *p++ = 'p';
1685 if (flags & GL_ASYNC)
1686 *p++ = 'a';
1687 if (flags & GL_EXACT)
1688 *p++ = 'E';
1689 if (flags & GL_NOCACHE)
1690 *p++ = 'c';
1691 if (test_bit(HIF_HOLDER, &iflags))
1692 *p++ = 'H';
1693 if (test_bit(HIF_WAIT, &iflags))
1694 *p++ = 'W';
1695 if (test_bit(HIF_FIRST, &iflags))
1696 *p++ = 'F';
1697 *p = 0;
1698 return buf;
1699}
1700
1701
1702
1703
1704
1705
1706
1707
1708static void dump_holder(struct seq_file *seq, const struct gfs2_holder *gh)
1709{
1710 struct task_struct *gh_owner = NULL;
1711 char flags_buf[32];
1712
1713 rcu_read_lock();
1714 if (gh->gh_owner_pid)
1715 gh_owner = pid_task(gh->gh_owner_pid, PIDTYPE_PID);
1716 gfs2_print_dbg(seq, " H: s:%s f:%s e:%d p:%ld [%s] %pS\n",
1717 state2str(gh->gh_state),
1718 hflags2str(flags_buf, gh->gh_flags, gh->gh_iflags),
1719 gh->gh_error,
1720 gh->gh_owner_pid ? (long)pid_nr(gh->gh_owner_pid) : -1,
1721 gh_owner ? gh_owner->comm : "(ended)",
1722 (void *)gh->gh_ip);
1723 rcu_read_unlock();
1724}
1725
1726static const char *gflags2str(char *buf, const struct gfs2_glock *gl)
1727{
1728 const unsigned long *gflags = &gl->gl_flags;
1729 char *p = buf;
1730
1731 if (test_bit(GLF_LOCK, gflags))
1732 *p++ = 'l';
1733 if (test_bit(GLF_DEMOTE, gflags))
1734 *p++ = 'D';
1735 if (test_bit(GLF_PENDING_DEMOTE, gflags))
1736 *p++ = 'd';
1737 if (test_bit(GLF_DEMOTE_IN_PROGRESS, gflags))
1738 *p++ = 'p';
1739 if (test_bit(GLF_DIRTY, gflags))
1740 *p++ = 'y';
1741 if (test_bit(GLF_LFLUSH, gflags))
1742 *p++ = 'f';
1743 if (test_bit(GLF_INVALIDATE_IN_PROGRESS, gflags))
1744 *p++ = 'i';
1745 if (test_bit(GLF_REPLY_PENDING, gflags))
1746 *p++ = 'r';
1747 if (test_bit(GLF_INITIAL, gflags))
1748 *p++ = 'I';
1749 if (test_bit(GLF_FROZEN, gflags))
1750 *p++ = 'F';
1751 if (test_bit(GLF_QUEUED, gflags))
1752 *p++ = 'q';
1753 if (test_bit(GLF_LRU, gflags))
1754 *p++ = 'L';
1755 if (gl->gl_object)
1756 *p++ = 'o';
1757 if (test_bit(GLF_BLOCKING, gflags))
1758 *p++ = 'b';
1759 *p = 0;
1760 return buf;
1761}
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780void gfs2_dump_glock(struct seq_file *seq, const struct gfs2_glock *gl)
1781{
1782 const struct gfs2_glock_operations *glops = gl->gl_ops;
1783 unsigned long long dtime;
1784 const struct gfs2_holder *gh;
1785 char gflags_buf[32];
1786
1787 dtime = jiffies - gl->gl_demote_time;
1788 dtime *= 1000000/HZ;
1789 if (!test_bit(GLF_DEMOTE, &gl->gl_flags))
1790 dtime = 0;
1791 gfs2_print_dbg(seq, "G: s:%s n:%u/%llx f:%s t:%s d:%s/%llu a:%d v:%d r:%d m:%ld\n",
1792 state2str(gl->gl_state),
1793 gl->gl_name.ln_type,
1794 (unsigned long long)gl->gl_name.ln_number,
1795 gflags2str(gflags_buf, gl),
1796 state2str(gl->gl_target),
1797 state2str(gl->gl_demote_state), dtime,
1798 atomic_read(&gl->gl_ail_count),
1799 atomic_read(&gl->gl_revokes),
1800 (int)gl->gl_lockref.count, gl->gl_hold_time);
1801
1802 list_for_each_entry(gh, &gl->gl_holders, gh_list)
1803 dump_holder(seq, gh);
1804
1805 if (gl->gl_state != LM_ST_UNLOCKED && glops->go_dump)
1806 glops->go_dump(seq, gl);
1807}
1808
1809static int gfs2_glstats_seq_show(struct seq_file *seq, void *iter_ptr)
1810{
1811 struct gfs2_glock *gl = iter_ptr;
1812
1813 seq_printf(seq, "G: n:%u/%llx rtt:%llu/%llu rttb:%llu/%llu irt:%llu/%llu dcnt: %llu qcnt: %llu\n",
1814 gl->gl_name.ln_type,
1815 (unsigned long long)gl->gl_name.ln_number,
1816 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTT],
1817 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTVAR],
1818 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTB],
1819 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTVARB],
1820 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SIRT],
1821 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SIRTVAR],
1822 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_DCOUNT],
1823 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_QCOUNT]);
1824 return 0;
1825}
1826
1827static const char *gfs2_gltype[] = {
1828 "type",
1829 "reserved",
1830 "nondisk",
1831 "inode",
1832 "rgrp",
1833 "meta",
1834 "iopen",
1835 "flock",
1836 "plock",
1837 "quota",
1838 "journal",
1839};
1840
1841static const char *gfs2_stype[] = {
1842 [GFS2_LKS_SRTT] = "srtt",
1843 [GFS2_LKS_SRTTVAR] = "srttvar",
1844 [GFS2_LKS_SRTTB] = "srttb",
1845 [GFS2_LKS_SRTTVARB] = "srttvarb",
1846 [GFS2_LKS_SIRT] = "sirt",
1847 [GFS2_LKS_SIRTVAR] = "sirtvar",
1848 [GFS2_LKS_DCOUNT] = "dlm",
1849 [GFS2_LKS_QCOUNT] = "queue",
1850};
1851
1852#define GFS2_NR_SBSTATS (ARRAY_SIZE(gfs2_gltype) * ARRAY_SIZE(gfs2_stype))
1853
1854static int gfs2_sbstats_seq_show(struct seq_file *seq, void *iter_ptr)
1855{
1856 struct gfs2_sbd *sdp = seq->private;
1857 loff_t pos = *(loff_t *)iter_ptr;
1858 unsigned index = pos >> 3;
1859 unsigned subindex = pos & 0x07;
1860 int i;
1861
1862 if (index == 0 && subindex != 0)
1863 return 0;
1864
1865 seq_printf(seq, "%-10s %8s:", gfs2_gltype[index],
1866 (index == 0) ? "cpu": gfs2_stype[subindex]);
1867
1868 for_each_possible_cpu(i) {
1869 const struct gfs2_pcpu_lkstats *lkstats = per_cpu_ptr(sdp->sd_lkstats, i);
1870
1871 if (index == 0)
1872 seq_printf(seq, " %15u", i);
1873 else
1874 seq_printf(seq, " %15llu", (unsigned long long)lkstats->
1875 lkstats[index - 1].stats[subindex]);
1876 }
1877 seq_putc(seq, '\n');
1878 return 0;
1879}
1880
1881int __init gfs2_glock_init(void)
1882{
1883 int i, ret;
1884
1885 ret = rhashtable_init(&gl_hash_table, &ht_parms);
1886 if (ret < 0)
1887 return ret;
1888
1889 glock_workqueue = alloc_workqueue("glock_workqueue", WQ_MEM_RECLAIM |
1890 WQ_HIGHPRI | WQ_FREEZABLE, 0);
1891 if (!glock_workqueue) {
1892 rhashtable_destroy(&gl_hash_table);
1893 return -ENOMEM;
1894 }
1895 gfs2_delete_workqueue = alloc_workqueue("delete_workqueue",
1896 WQ_MEM_RECLAIM | WQ_FREEZABLE,
1897 0);
1898 if (!gfs2_delete_workqueue) {
1899 destroy_workqueue(glock_workqueue);
1900 rhashtable_destroy(&gl_hash_table);
1901 return -ENOMEM;
1902 }
1903
1904 ret = register_shrinker(&glock_shrinker);
1905 if (ret) {
1906 destroy_workqueue(gfs2_delete_workqueue);
1907 destroy_workqueue(glock_workqueue);
1908 rhashtable_destroy(&gl_hash_table);
1909 return ret;
1910 }
1911
1912 for (i = 0; i < GLOCK_WAIT_TABLE_SIZE; i++)
1913 init_waitqueue_head(glock_wait_table + i);
1914
1915 return 0;
1916}
1917
1918void gfs2_glock_exit(void)
1919{
1920 unregister_shrinker(&glock_shrinker);
1921 rhashtable_destroy(&gl_hash_table);
1922 destroy_workqueue(glock_workqueue);
1923 destroy_workqueue(gfs2_delete_workqueue);
1924}
1925
1926static void gfs2_glock_iter_next(struct gfs2_glock_iter *gi, loff_t n)
1927{
1928 struct gfs2_glock *gl = gi->gl;
1929
1930 if (gl) {
1931 if (n == 0)
1932 return;
1933 if (!lockref_put_not_zero(&gl->gl_lockref))
1934 gfs2_glock_queue_put(gl);
1935 }
1936 for (;;) {
1937 gl = rhashtable_walk_next(&gi->hti);
1938 if (IS_ERR_OR_NULL(gl)) {
1939 if (gl == ERR_PTR(-EAGAIN)) {
1940 n = 1;
1941 continue;
1942 }
1943 gl = NULL;
1944 break;
1945 }
1946 if (gl->gl_name.ln_sbd != gi->sdp)
1947 continue;
1948 if (n <= 1) {
1949 if (!lockref_get_not_dead(&gl->gl_lockref))
1950 continue;
1951 break;
1952 } else {
1953 if (__lockref_is_dead(&gl->gl_lockref))
1954 continue;
1955 n--;
1956 }
1957 }
1958 gi->gl = gl;
1959}
1960
1961static void *gfs2_glock_seq_start(struct seq_file *seq, loff_t *pos)
1962 __acquires(RCU)
1963{
1964 struct gfs2_glock_iter *gi = seq->private;
1965 loff_t n;
1966
1967
1968
1969
1970
1971 if (*pos < gi->last_pos) {
1972 rhashtable_walk_exit(&gi->hti);
1973 rhashtable_walk_enter(&gl_hash_table, &gi->hti);
1974 n = *pos + 1;
1975 } else {
1976 n = *pos - gi->last_pos;
1977 }
1978
1979 rhashtable_walk_start(&gi->hti);
1980
1981 gfs2_glock_iter_next(gi, n);
1982 gi->last_pos = *pos;
1983 return gi->gl;
1984}
1985
1986static void *gfs2_glock_seq_next(struct seq_file *seq, void *iter_ptr,
1987 loff_t *pos)
1988{
1989 struct gfs2_glock_iter *gi = seq->private;
1990
1991 (*pos)++;
1992 gi->last_pos = *pos;
1993 gfs2_glock_iter_next(gi, 1);
1994 return gi->gl;
1995}
1996
1997static void gfs2_glock_seq_stop(struct seq_file *seq, void *iter_ptr)
1998 __releases(RCU)
1999{
2000 struct gfs2_glock_iter *gi = seq->private;
2001
2002 rhashtable_walk_stop(&gi->hti);
2003}
2004
2005static int gfs2_glock_seq_show(struct seq_file *seq, void *iter_ptr)
2006{
2007 dump_glock(seq, iter_ptr);
2008 return 0;
2009}
2010
2011static void *gfs2_sbstats_seq_start(struct seq_file *seq, loff_t *pos)
2012{
2013 preempt_disable();
2014 if (*pos >= GFS2_NR_SBSTATS)
2015 return NULL;
2016 return pos;
2017}
2018
2019static void *gfs2_sbstats_seq_next(struct seq_file *seq, void *iter_ptr,
2020 loff_t *pos)
2021{
2022 (*pos)++;
2023 if (*pos >= GFS2_NR_SBSTATS)
2024 return NULL;
2025 return pos;
2026}
2027
2028static void gfs2_sbstats_seq_stop(struct seq_file *seq, void *iter_ptr)
2029{
2030 preempt_enable();
2031}
2032
2033static const struct seq_operations gfs2_glock_seq_ops = {
2034 .start = gfs2_glock_seq_start,
2035 .next = gfs2_glock_seq_next,
2036 .stop = gfs2_glock_seq_stop,
2037 .show = gfs2_glock_seq_show,
2038};
2039
2040static const struct seq_operations gfs2_glstats_seq_ops = {
2041 .start = gfs2_glock_seq_start,
2042 .next = gfs2_glock_seq_next,
2043 .stop = gfs2_glock_seq_stop,
2044 .show = gfs2_glstats_seq_show,
2045};
2046
2047static const struct seq_operations gfs2_sbstats_seq_ops = {
2048 .start = gfs2_sbstats_seq_start,
2049 .next = gfs2_sbstats_seq_next,
2050 .stop = gfs2_sbstats_seq_stop,
2051 .show = gfs2_sbstats_seq_show,
2052};
2053
2054#define GFS2_SEQ_GOODSIZE min(PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER, 65536UL)
2055
2056static int __gfs2_glocks_open(struct inode *inode, struct file *file,
2057 const struct seq_operations *ops)
2058{
2059 int ret = seq_open_private(file, ops, sizeof(struct gfs2_glock_iter));
2060 if (ret == 0) {
2061 struct seq_file *seq = file->private_data;
2062 struct gfs2_glock_iter *gi = seq->private;
2063
2064 gi->sdp = inode->i_private;
2065 seq->buf = kmalloc(GFS2_SEQ_GOODSIZE, GFP_KERNEL | __GFP_NOWARN);
2066 if (seq->buf)
2067 seq->size = GFS2_SEQ_GOODSIZE;
2068
2069
2070
2071
2072 gi->last_pos = -1;
2073 gi->gl = NULL;
2074 rhashtable_walk_enter(&gl_hash_table, &gi->hti);
2075 }
2076 return ret;
2077}
2078
2079static int gfs2_glocks_open(struct inode *inode, struct file *file)
2080{
2081 return __gfs2_glocks_open(inode, file, &gfs2_glock_seq_ops);
2082}
2083
2084static int gfs2_glocks_release(struct inode *inode, struct file *file)
2085{
2086 struct seq_file *seq = file->private_data;
2087 struct gfs2_glock_iter *gi = seq->private;
2088
2089 if (gi->gl)
2090 gfs2_glock_put(gi->gl);
2091 rhashtable_walk_exit(&gi->hti);
2092 return seq_release_private(inode, file);
2093}
2094
2095static int gfs2_glstats_open(struct inode *inode, struct file *file)
2096{
2097 return __gfs2_glocks_open(inode, file, &gfs2_glstats_seq_ops);
2098}
2099
2100static int gfs2_sbstats_open(struct inode *inode, struct file *file)
2101{
2102 int ret = seq_open(file, &gfs2_sbstats_seq_ops);
2103 if (ret == 0) {
2104 struct seq_file *seq = file->private_data;
2105 seq->private = inode->i_private;
2106 }
2107 return ret;
2108}
2109
2110static const struct file_operations gfs2_glocks_fops = {
2111 .owner = THIS_MODULE,
2112 .open = gfs2_glocks_open,
2113 .read = seq_read,
2114 .llseek = seq_lseek,
2115 .release = gfs2_glocks_release,
2116};
2117
2118static const struct file_operations gfs2_glstats_fops = {
2119 .owner = THIS_MODULE,
2120 .open = gfs2_glstats_open,
2121 .read = seq_read,
2122 .llseek = seq_lseek,
2123 .release = gfs2_glocks_release,
2124};
2125
2126static const struct file_operations gfs2_sbstats_fops = {
2127 .owner = THIS_MODULE,
2128 .open = gfs2_sbstats_open,
2129 .read = seq_read,
2130 .llseek = seq_lseek,
2131 .release = seq_release,
2132};
2133
2134int gfs2_create_debugfs_file(struct gfs2_sbd *sdp)
2135{
2136 struct dentry *dent;
2137
2138 dent = debugfs_create_dir(sdp->sd_table_name, gfs2_root);
2139 if (IS_ERR_OR_NULL(dent))
2140 goto fail;
2141 sdp->debugfs_dir = dent;
2142
2143 dent = debugfs_create_file("glocks",
2144 S_IFREG | S_IRUGO,
2145 sdp->debugfs_dir, sdp,
2146 &gfs2_glocks_fops);
2147 if (IS_ERR_OR_NULL(dent))
2148 goto fail;
2149 sdp->debugfs_dentry_glocks = dent;
2150
2151 dent = debugfs_create_file("glstats",
2152 S_IFREG | S_IRUGO,
2153 sdp->debugfs_dir, sdp,
2154 &gfs2_glstats_fops);
2155 if (IS_ERR_OR_NULL(dent))
2156 goto fail;
2157 sdp->debugfs_dentry_glstats = dent;
2158
2159 dent = debugfs_create_file("sbstats",
2160 S_IFREG | S_IRUGO,
2161 sdp->debugfs_dir, sdp,
2162 &gfs2_sbstats_fops);
2163 if (IS_ERR_OR_NULL(dent))
2164 goto fail;
2165 sdp->debugfs_dentry_sbstats = dent;
2166
2167 return 0;
2168fail:
2169 gfs2_delete_debugfs_file(sdp);
2170 return dent ? PTR_ERR(dent) : -ENOMEM;
2171}
2172
2173void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp)
2174{
2175 if (sdp->debugfs_dir) {
2176 if (sdp->debugfs_dentry_glocks) {
2177 debugfs_remove(sdp->debugfs_dentry_glocks);
2178 sdp->debugfs_dentry_glocks = NULL;
2179 }
2180 if (sdp->debugfs_dentry_glstats) {
2181 debugfs_remove(sdp->debugfs_dentry_glstats);
2182 sdp->debugfs_dentry_glstats = NULL;
2183 }
2184 if (sdp->debugfs_dentry_sbstats) {
2185 debugfs_remove(sdp->debugfs_dentry_sbstats);
2186 sdp->debugfs_dentry_sbstats = NULL;
2187 }
2188 debugfs_remove(sdp->debugfs_dir);
2189 sdp->debugfs_dir = NULL;
2190 }
2191}
2192
2193int gfs2_register_debugfs(void)
2194{
2195 gfs2_root = debugfs_create_dir("gfs2", NULL);
2196 if (IS_ERR(gfs2_root))
2197 return PTR_ERR(gfs2_root);
2198 return gfs2_root ? 0 : -ENOMEM;
2199}
2200
2201void gfs2_unregister_debugfs(void)
2202{
2203 debugfs_remove(gfs2_root);
2204 gfs2_root = NULL;
2205}
2206