linux/fs/gfs2/glock.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
   3 * Copyright (C) 2004-2006 Red Hat, Inc.  All rights reserved.
   4 *
   5 * This copyrighted material is made available to anyone wishing to use,
   6 * modify, copy, or redistribute it subject to the terms and conditions
   7 * of the GNU General Public License version 2.
   8 */
   9
  10#include <linux/sched.h>
  11#include <linux/slab.h>
  12#include <linux/spinlock.h>
  13#include <linux/completion.h>
  14#include <linux/buffer_head.h>
  15#include <linux/delay.h>
  16#include <linux/sort.h>
  17#include <linux/jhash.h>
  18#include <linux/kallsyms.h>
  19#include <linux/gfs2_ondisk.h>
  20#include <linux/list.h>
  21#include <linux/lm_interface.h>
  22#include <linux/wait.h>
  23#include <linux/module.h>
  24#include <linux/rwsem.h>
  25#include <asm/uaccess.h>
  26#include <linux/seq_file.h>
  27#include <linux/debugfs.h>
  28#include <linux/kthread.h>
  29#include <linux/freezer.h>
  30#include <linux/workqueue.h>
  31#include <linux/jiffies.h>
  32
  33#include "gfs2.h"
  34#include "incore.h"
  35#include "glock.h"
  36#include "glops.h"
  37#include "inode.h"
  38#include "lm.h"
  39#include "lops.h"
  40#include "meta_io.h"
  41#include "quota.h"
  42#include "super.h"
  43#include "util.h"
  44
  45struct gfs2_gl_hash_bucket {
  46        struct hlist_head hb_list;
  47};
  48
  49struct glock_iter {
  50        int hash;                     /* hash bucket index         */
  51        struct gfs2_sbd *sdp;         /* incore superblock         */
  52        struct gfs2_glock *gl;        /* current glock struct      */
  53        struct seq_file *seq;         /* sequence file for debugfs */
  54        char string[512];             /* scratch space             */
  55};
  56
  57typedef void (*glock_examiner) (struct gfs2_glock * gl);
  58
  59static int gfs2_dump_lockstate(struct gfs2_sbd *sdp);
  60static int dump_glock(struct glock_iter *gi, struct gfs2_glock *gl);
  61static void gfs2_glock_xmote_th(struct gfs2_glock *gl, struct gfs2_holder *gh);
  62static void gfs2_glock_drop_th(struct gfs2_glock *gl);
  63static void run_queue(struct gfs2_glock *gl);
  64
  65static DECLARE_RWSEM(gfs2_umount_flush_sem);
  66static struct dentry *gfs2_root;
  67static struct task_struct *scand_process;
  68static unsigned int scand_secs = 5;
  69static struct workqueue_struct *glock_workqueue;
  70
  71#define GFS2_GL_HASH_SHIFT      15
  72#define GFS2_GL_HASH_SIZE       (1 << GFS2_GL_HASH_SHIFT)
  73#define GFS2_GL_HASH_MASK       (GFS2_GL_HASH_SIZE - 1)
  74
  75static struct gfs2_gl_hash_bucket gl_hash_table[GFS2_GL_HASH_SIZE];
  76static struct dentry *gfs2_root;
  77
  78/*
  79 * Despite what you might think, the numbers below are not arbitrary :-)
  80 * They are taken from the ipv4 routing hash code, which is well tested
  81 * and thus should be nearly optimal. Later on we might tweek the numbers
  82 * but for now this should be fine.
  83 *
  84 * The reason for putting the locks in a separate array from the list heads
  85 * is that we can have fewer locks than list heads and save memory. We use
  86 * the same hash function for both, but with a different hash mask.
  87 */
  88#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || \
  89        defined(CONFIG_PROVE_LOCKING)
  90
  91#ifdef CONFIG_LOCKDEP
  92# define GL_HASH_LOCK_SZ        256
  93#else
  94# if NR_CPUS >= 32
  95#  define GL_HASH_LOCK_SZ       4096
  96# elif NR_CPUS >= 16
  97#  define GL_HASH_LOCK_SZ       2048
  98# elif NR_CPUS >= 8
  99#  define GL_HASH_LOCK_SZ       1024
 100# elif NR_CPUS >= 4
 101#  define GL_HASH_LOCK_SZ       512
 102# else
 103#  define GL_HASH_LOCK_SZ       256
 104# endif
 105#endif
 106
 107/* We never want more locks than chains */
 108#if GFS2_GL_HASH_SIZE < GL_HASH_LOCK_SZ
 109# undef GL_HASH_LOCK_SZ
 110# define GL_HASH_LOCK_SZ GFS2_GL_HASH_SIZE
 111#endif
 112
 113static rwlock_t gl_hash_locks[GL_HASH_LOCK_SZ];
 114
 115static inline rwlock_t *gl_lock_addr(unsigned int x)
 116{
 117        return &gl_hash_locks[x & (GL_HASH_LOCK_SZ-1)];
 118}
 119#else /* not SMP, so no spinlocks required */
 120static inline rwlock_t *gl_lock_addr(unsigned int x)
 121{
 122        return NULL;
 123}
 124#endif
 125
 126/**
 127 * relaxed_state_ok - is a requested lock compatible with the current lock mode?
 128 * @actual: the current state of the lock
 129 * @requested: the lock state that was requested by the caller
 130 * @flags: the modifier flags passed in by the caller
 131 *
 132 * Returns: 1 if the locks are compatible, 0 otherwise
 133 */
 134
 135static inline int relaxed_state_ok(unsigned int actual, unsigned requested,
 136                                   int flags)
 137{
 138        if (actual == requested)
 139                return 1;
 140
 141        if (flags & GL_EXACT)
 142                return 0;
 143
 144        if (actual == LM_ST_EXCLUSIVE && requested == LM_ST_SHARED)
 145                return 1;
 146
 147        if (actual != LM_ST_UNLOCKED && (flags & LM_FLAG_ANY))
 148                return 1;
 149
 150        return 0;
 151}
 152
 153/**
 154 * gl_hash() - Turn glock number into hash bucket number
 155 * @lock: The glock number
 156 *
 157 * Returns: The number of the corresponding hash bucket
 158 */
 159
 160static unsigned int gl_hash(const struct gfs2_sbd *sdp,
 161                            const struct lm_lockname *name)
 162{
 163        unsigned int h;
 164
 165        h = jhash(&name->ln_number, sizeof(u64), 0);
 166        h = jhash(&name->ln_type, sizeof(unsigned int), h);
 167        h = jhash(&sdp, sizeof(struct gfs2_sbd *), h);
 168        h &= GFS2_GL_HASH_MASK;
 169
 170        return h;
 171}
 172
 173/**
 174 * glock_free() - Perform a few checks and then release struct gfs2_glock
 175 * @gl: The glock to release
 176 *
 177 * Also calls lock module to release its internal structure for this glock.
 178 *
 179 */
 180
 181static void glock_free(struct gfs2_glock *gl)
 182{
 183        struct gfs2_sbd *sdp = gl->gl_sbd;
 184        struct inode *aspace = gl->gl_aspace;
 185
 186        gfs2_lm_put_lock(sdp, gl->gl_lock);
 187
 188        if (aspace)
 189                gfs2_aspace_put(aspace);
 190
 191        kmem_cache_free(gfs2_glock_cachep, gl);
 192}
 193
 194/**
 195 * gfs2_glock_hold() - increment reference count on glock
 196 * @gl: The glock to hold
 197 *
 198 */
 199
 200void gfs2_glock_hold(struct gfs2_glock *gl)
 201{
 202        atomic_inc(&gl->gl_ref);
 203}
 204
 205/**
 206 * gfs2_glock_put() - Decrement reference count on glock
 207 * @gl: The glock to put
 208 *
 209 */
 210
 211int gfs2_glock_put(struct gfs2_glock *gl)
 212{
 213        int rv = 0;
 214        struct gfs2_sbd *sdp = gl->gl_sbd;
 215
 216        write_lock(gl_lock_addr(gl->gl_hash));
 217        if (atomic_dec_and_test(&gl->gl_ref)) {
 218                hlist_del(&gl->gl_list);
 219                write_unlock(gl_lock_addr(gl->gl_hash));
 220                BUG_ON(spin_is_locked(&gl->gl_spin));
 221                gfs2_assert(sdp, gl->gl_state == LM_ST_UNLOCKED);
 222                gfs2_assert(sdp, list_empty(&gl->gl_reclaim));
 223                gfs2_assert(sdp, list_empty(&gl->gl_holders));
 224                gfs2_assert(sdp, list_empty(&gl->gl_waiters1));
 225                gfs2_assert(sdp, list_empty(&gl->gl_waiters3));
 226                glock_free(gl);
 227                rv = 1;
 228                goto out;
 229        }
 230        write_unlock(gl_lock_addr(gl->gl_hash));
 231out:
 232        return rv;
 233}
 234
 235/**
 236 * search_bucket() - Find struct gfs2_glock by lock number
 237 * @bucket: the bucket to search
 238 * @name: The lock name
 239 *
 240 * Returns: NULL, or the struct gfs2_glock with the requested number
 241 */
 242
 243static struct gfs2_glock *search_bucket(unsigned int hash,
 244                                        const struct gfs2_sbd *sdp,
 245                                        const struct lm_lockname *name)
 246{
 247        struct gfs2_glock *gl;
 248        struct hlist_node *h;
 249
 250        hlist_for_each_entry(gl, h, &gl_hash_table[hash].hb_list, gl_list) {
 251                if (!lm_name_equal(&gl->gl_name, name))
 252                        continue;
 253                if (gl->gl_sbd != sdp)
 254                        continue;
 255
 256                atomic_inc(&gl->gl_ref);
 257
 258                return gl;
 259        }
 260
 261        return NULL;
 262}
 263
 264/**
 265 * gfs2_glock_find() - Find glock by lock number
 266 * @sdp: The GFS2 superblock
 267 * @name: The lock name
 268 *
 269 * Returns: NULL, or the struct gfs2_glock with the requested number
 270 */
 271
 272static struct gfs2_glock *gfs2_glock_find(const struct gfs2_sbd *sdp,
 273                                          const struct lm_lockname *name)
 274{
 275        unsigned int hash = gl_hash(sdp, name);
 276        struct gfs2_glock *gl;
 277
 278        read_lock(gl_lock_addr(hash));
 279        gl = search_bucket(hash, sdp, name);
 280        read_unlock(gl_lock_addr(hash));
 281
 282        return gl;
 283}
 284
 285static void glock_work_func(struct work_struct *work)
 286{
 287        struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work);
 288
 289        spin_lock(&gl->gl_spin);
 290        if (test_and_clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags))
 291                set_bit(GLF_DEMOTE, &gl->gl_flags);
 292        run_queue(gl);
 293        spin_unlock(&gl->gl_spin);
 294        gfs2_glock_put(gl);
 295}
 296
 297/**
 298 * gfs2_glock_get() - Get a glock, or create one if one doesn't exist
 299 * @sdp: The GFS2 superblock
 300 * @number: the lock number
 301 * @glops: The glock_operations to use
 302 * @create: If 0, don't create the glock if it doesn't exist
 303 * @glp: the glock is returned here
 304 *
 305 * This does not lock a glock, just finds/creates structures for one.
 306 *
 307 * Returns: errno
 308 */
 309
 310int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
 311                   const struct gfs2_glock_operations *glops, int create,
 312                   struct gfs2_glock **glp)
 313{
 314        struct lm_lockname name = { .ln_number = number, .ln_type = glops->go_type };
 315        struct gfs2_glock *gl, *tmp;
 316        unsigned int hash = gl_hash(sdp, &name);
 317        int error;
 318
 319        read_lock(gl_lock_addr(hash));
 320        gl = search_bucket(hash, sdp, &name);
 321        read_unlock(gl_lock_addr(hash));
 322
 323        if (gl || !create) {
 324                *glp = gl;
 325                return 0;
 326        }
 327
 328        gl = kmem_cache_alloc(gfs2_glock_cachep, GFP_KERNEL);
 329        if (!gl)
 330                return -ENOMEM;
 331
 332        gl->gl_flags = 0;
 333        gl->gl_name = name;
 334        atomic_set(&gl->gl_ref, 1);
 335        gl->gl_state = LM_ST_UNLOCKED;
 336        gl->gl_demote_state = LM_ST_EXCLUSIVE;
 337        gl->gl_hash = hash;
 338        gl->gl_owner_pid = 0;
 339        gl->gl_ip = 0;
 340        gl->gl_ops = glops;
 341        gl->gl_req_gh = NULL;
 342        gl->gl_req_bh = NULL;
 343        gl->gl_vn = 0;
 344        gl->gl_stamp = jiffies;
 345        gl->gl_tchange = jiffies;
 346        gl->gl_object = NULL;
 347        gl->gl_sbd = sdp;
 348        gl->gl_aspace = NULL;
 349        lops_init_le(&gl->gl_le, &gfs2_glock_lops);
 350        INIT_DELAYED_WORK(&gl->gl_work, glock_work_func);
 351
 352        /* If this glock protects actual on-disk data or metadata blocks,
 353           create a VFS inode to manage the pages/buffers holding them. */
 354        if (glops == &gfs2_inode_glops || glops == &gfs2_rgrp_glops) {
 355                gl->gl_aspace = gfs2_aspace_get(sdp);
 356                if (!gl->gl_aspace) {
 357                        error = -ENOMEM;
 358                        goto fail;
 359                }
 360        }
 361
 362        error = gfs2_lm_get_lock(sdp, &name, &gl->gl_lock);
 363        if (error)
 364                goto fail_aspace;
 365
 366        write_lock(gl_lock_addr(hash));
 367        tmp = search_bucket(hash, sdp, &name);
 368        if (tmp) {
 369                write_unlock(gl_lock_addr(hash));
 370                glock_free(gl);
 371                gl = tmp;
 372        } else {
 373                hlist_add_head(&gl->gl_list, &gl_hash_table[hash].hb_list);
 374                write_unlock(gl_lock_addr(hash));
 375        }
 376
 377        *glp = gl;
 378
 379        return 0;
 380
 381fail_aspace:
 382        if (gl->gl_aspace)
 383                gfs2_aspace_put(gl->gl_aspace);
 384fail:
 385        kmem_cache_free(gfs2_glock_cachep, gl);
 386        return error;
 387}
 388
 389/**
 390 * gfs2_holder_init - initialize a struct gfs2_holder in the default way
 391 * @gl: the glock
 392 * @state: the state we're requesting
 393 * @flags: the modifier flags
 394 * @gh: the holder structure
 395 *
 396 */
 397
 398void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags,
 399                      struct gfs2_holder *gh)
 400{
 401        INIT_LIST_HEAD(&gh->gh_list);
 402        gh->gh_gl = gl;
 403        gh->gh_ip = (unsigned long)__builtin_return_address(0);
 404        gh->gh_owner_pid = current->pid;
 405        gh->gh_state = state;
 406        gh->gh_flags = flags;
 407        gh->gh_error = 0;
 408        gh->gh_iflags = 0;
 409        gfs2_glock_hold(gl);
 410}
 411
 412/**
 413 * gfs2_holder_reinit - reinitialize a struct gfs2_holder so we can requeue it
 414 * @state: the state we're requesting
 415 * @flags: the modifier flags
 416 * @gh: the holder structure
 417 *
 418 * Don't mess with the glock.
 419 *
 420 */
 421
 422void gfs2_holder_reinit(unsigned int state, unsigned flags, struct gfs2_holder *gh)
 423{
 424        gh->gh_state = state;
 425        gh->gh_flags = flags;
 426        gh->gh_iflags = 0;
 427        gh->gh_ip = (unsigned long)__builtin_return_address(0);
 428}
 429
 430/**
 431 * gfs2_holder_uninit - uninitialize a holder structure (drop glock reference)
 432 * @gh: the holder structure
 433 *
 434 */
 435
 436void gfs2_holder_uninit(struct gfs2_holder *gh)
 437{
 438        gfs2_glock_put(gh->gh_gl);
 439        gh->gh_gl = NULL;
 440        gh->gh_ip = 0;
 441}
 442
 443static void gfs2_holder_wake(struct gfs2_holder *gh)
 444{
 445        clear_bit(HIF_WAIT, &gh->gh_iflags);
 446        smp_mb__after_clear_bit();
 447        wake_up_bit(&gh->gh_iflags, HIF_WAIT);
 448}
 449
 450static int just_schedule(void *word)
 451{
 452        schedule();
 453        return 0;
 454}
 455
 456static void wait_on_holder(struct gfs2_holder *gh)
 457{
 458        might_sleep();
 459        wait_on_bit(&gh->gh_iflags, HIF_WAIT, just_schedule, TASK_UNINTERRUPTIBLE);
 460}
 461
 462static void gfs2_demote_wake(struct gfs2_glock *gl)
 463{
 464        BUG_ON(!spin_is_locked(&gl->gl_spin));
 465        gl->gl_demote_state = LM_ST_EXCLUSIVE;
 466        clear_bit(GLF_DEMOTE, &gl->gl_flags);
 467        smp_mb__after_clear_bit();
 468        wake_up_bit(&gl->gl_flags, GLF_DEMOTE);
 469}
 470
 471static void wait_on_demote(struct gfs2_glock *gl)
 472{
 473        might_sleep();
 474        wait_on_bit(&gl->gl_flags, GLF_DEMOTE, just_schedule, TASK_UNINTERRUPTIBLE);
 475}
 476
 477/**
 478 * rq_mutex - process a mutex request in the queue
 479 * @gh: the glock holder
 480 *
 481 * Returns: 1 if the queue is blocked
 482 */
 483
 484static int rq_mutex(struct gfs2_holder *gh)
 485{
 486        struct gfs2_glock *gl = gh->gh_gl;
 487
 488        list_del_init(&gh->gh_list);
 489        /*  gh->gh_error never examined.  */
 490        set_bit(GLF_LOCK, &gl->gl_flags);
 491        clear_bit(HIF_WAIT, &gh->gh_iflags);
 492        smp_mb();
 493        wake_up_bit(&gh->gh_iflags, HIF_WAIT);
 494
 495        return 1;
 496}
 497
 498/**
 499 * rq_promote - process a promote request in the queue
 500 * @gh: the glock holder
 501 *
 502 * Acquire a new inter-node lock, or change a lock state to more restrictive.
 503 *
 504 * Returns: 1 if the queue is blocked
 505 */
 506
 507static int rq_promote(struct gfs2_holder *gh)
 508{
 509        struct gfs2_glock *gl = gh->gh_gl;
 510        struct gfs2_sbd *sdp = gl->gl_sbd;
 511
 512        if (!relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)) {
 513                if (list_empty(&gl->gl_holders)) {
 514                        gl->gl_req_gh = gh;
 515                        set_bit(GLF_LOCK, &gl->gl_flags);
 516                        spin_unlock(&gl->gl_spin);
 517
 518                        if (atomic_read(&sdp->sd_reclaim_count) >
 519                            gfs2_tune_get(sdp, gt_reclaim_limit) &&
 520                            !(gh->gh_flags & LM_FLAG_PRIORITY)) {
 521                                gfs2_reclaim_glock(sdp);
 522                                gfs2_reclaim_glock(sdp);
 523                        }
 524
 525                        gfs2_glock_xmote_th(gh->gh_gl, gh);
 526                        spin_lock(&gl->gl_spin);
 527                }
 528                return 1;
 529        }
 530
 531        if (list_empty(&gl->gl_holders)) {
 532                set_bit(HIF_FIRST, &gh->gh_iflags);
 533                set_bit(GLF_LOCK, &gl->gl_flags);
 534        } else {
 535                struct gfs2_holder *next_gh;
 536                if (gh->gh_state == LM_ST_EXCLUSIVE)
 537                        return 1;
 538                next_gh = list_entry(gl->gl_holders.next, struct gfs2_holder,
 539                                     gh_list);
 540                if (next_gh->gh_state == LM_ST_EXCLUSIVE)
 541                         return 1;
 542        }
 543
 544        list_move_tail(&gh->gh_list, &gl->gl_holders);
 545        gh->gh_error = 0;
 546        set_bit(HIF_HOLDER, &gh->gh_iflags);
 547
 548        gfs2_holder_wake(gh);
 549
 550        return 0;
 551}
 552
 553/**
 554 * rq_demote - process a demote request in the queue
 555 * @gh: the glock holder
 556 *
 557 * Returns: 1 if the queue is blocked
 558 */
 559
 560static int rq_demote(struct gfs2_glock *gl)
 561{
 562        if (!list_empty(&gl->gl_holders))
 563                return 1;
 564
 565        if (gl->gl_state == gl->gl_demote_state ||
 566            gl->gl_state == LM_ST_UNLOCKED) {
 567                gfs2_demote_wake(gl);
 568                return 0;
 569        }
 570        set_bit(GLF_LOCK, &gl->gl_flags);
 571        if (gl->gl_demote_state == LM_ST_UNLOCKED ||
 572            gl->gl_state != LM_ST_EXCLUSIVE) {
 573                spin_unlock(&gl->gl_spin);
 574                gfs2_glock_drop_th(gl);
 575        } else {
 576                spin_unlock(&gl->gl_spin);
 577                gfs2_glock_xmote_th(gl, NULL);
 578        }
 579        spin_lock(&gl->gl_spin);
 580
 581        return 0;
 582}
 583
 584/**
 585 * run_queue - process holder structures on a glock
 586 * @gl: the glock
 587 *
 588 */
 589static void run_queue(struct gfs2_glock *gl)
 590{
 591        struct gfs2_holder *gh;
 592        int blocked = 1;
 593
 594        for (;;) {
 595                if (test_bit(GLF_LOCK, &gl->gl_flags))
 596                        break;
 597
 598                if (!list_empty(&gl->gl_waiters1)) {
 599                        gh = list_entry(gl->gl_waiters1.next,
 600                                        struct gfs2_holder, gh_list);
 601
 602                        if (test_bit(HIF_MUTEX, &gh->gh_iflags))
 603                                blocked = rq_mutex(gh);
 604                        else
 605                                gfs2_assert_warn(gl->gl_sbd, 0);
 606
 607                } else if (test_bit(GLF_DEMOTE, &gl->gl_flags)) {
 608                        blocked = rq_demote(gl);
 609                } else if (!list_empty(&gl->gl_waiters3)) {
 610                        gh = list_entry(gl->gl_waiters3.next,
 611                                        struct gfs2_holder, gh_list);
 612
 613                        if (test_bit(HIF_PROMOTE, &gh->gh_iflags))
 614                                blocked = rq_promote(gh);
 615                        else
 616                                gfs2_assert_warn(gl->gl_sbd, 0);
 617
 618                } else
 619                        break;
 620
 621                if (blocked)
 622                        break;
 623        }
 624}
 625
 626/**
 627 * gfs2_glmutex_lock - acquire a local lock on a glock
 628 * @gl: the glock
 629 *
 630 * Gives caller exclusive access to manipulate a glock structure.
 631 */
 632
 633static void gfs2_glmutex_lock(struct gfs2_glock *gl)
 634{
 635        struct gfs2_holder gh;
 636
 637        gfs2_holder_init(gl, 0, 0, &gh);
 638        set_bit(HIF_MUTEX, &gh.gh_iflags);
 639        if (test_and_set_bit(HIF_WAIT, &gh.gh_iflags))
 640                BUG();
 641
 642        spin_lock(&gl->gl_spin);
 643        if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
 644                list_add_tail(&gh.gh_list, &gl->gl_waiters1);
 645        } else {
 646                gl->gl_owner_pid = current->pid;
 647                gl->gl_ip = (unsigned long)__builtin_return_address(0);
 648                clear_bit(HIF_WAIT, &gh.gh_iflags);
 649                smp_mb();
 650                wake_up_bit(&gh.gh_iflags, HIF_WAIT);
 651        }
 652        spin_unlock(&gl->gl_spin);
 653
 654        wait_on_holder(&gh);
 655        gfs2_holder_uninit(&gh);
 656}
 657
 658/**
 659 * gfs2_glmutex_trylock - try to acquire a local lock on a glock
 660 * @gl: the glock
 661 *
 662 * Returns: 1 if the glock is acquired
 663 */
 664
 665static int gfs2_glmutex_trylock(struct gfs2_glock *gl)
 666{
 667        int acquired = 1;
 668
 669        spin_lock(&gl->gl_spin);
 670        if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
 671                acquired = 0;
 672        } else {
 673                gl->gl_owner_pid = current->pid;
 674                gl->gl_ip = (unsigned long)__builtin_return_address(0);
 675        }
 676        spin_unlock(&gl->gl_spin);
 677
 678        return acquired;
 679}
 680
 681/**
 682 * gfs2_glmutex_unlock - release a local lock on a glock
 683 * @gl: the glock
 684 *
 685 */
 686
 687static void gfs2_glmutex_unlock(struct gfs2_glock *gl)
 688{
 689        spin_lock(&gl->gl_spin);
 690        clear_bit(GLF_LOCK, &gl->gl_flags);
 691        gl->gl_owner_pid = 0;
 692        gl->gl_ip = 0;
 693        run_queue(gl);
 694        BUG_ON(!spin_is_locked(&gl->gl_spin));
 695        spin_unlock(&gl->gl_spin);
 696}
 697
 698/**
 699 * handle_callback - process a demote request
 700 * @gl: the glock
 701 * @state: the state the caller wants us to change to
 702 *
 703 * There are only two requests that we are going to see in actual
 704 * practise: LM_ST_SHARED and LM_ST_UNLOCKED
 705 */
 706
 707static void handle_callback(struct gfs2_glock *gl, unsigned int state,
 708                            int remote, unsigned long delay)
 709{
 710        int bit = delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE;
 711
 712        spin_lock(&gl->gl_spin);
 713        set_bit(bit, &gl->gl_flags);
 714        if (gl->gl_demote_state == LM_ST_EXCLUSIVE) {
 715                gl->gl_demote_state = state;
 716                gl->gl_demote_time = jiffies;
 717                if (remote && gl->gl_ops->go_type == LM_TYPE_IOPEN &&
 718                    gl->gl_object) {
 719                        gfs2_glock_schedule_for_reclaim(gl);
 720                        spin_unlock(&gl->gl_spin);
 721                        return;
 722                }
 723        } else if (gl->gl_demote_state != LM_ST_UNLOCKED &&
 724                        gl->gl_demote_state != state) {
 725                gl->gl_demote_state = LM_ST_UNLOCKED;
 726        }
 727        spin_unlock(&gl->gl_spin);
 728}
 729
 730/**
 731 * state_change - record that the glock is now in a different state
 732 * @gl: the glock
 733 * @new_state the new state
 734 *
 735 */
 736
 737static void state_change(struct gfs2_glock *gl, unsigned int new_state)
 738{
 739        int held1, held2;
 740
 741        held1 = (gl->gl_state != LM_ST_UNLOCKED);
 742        held2 = (new_state != LM_ST_UNLOCKED);
 743
 744        if (held1 != held2) {
 745                if (held2)
 746                        gfs2_glock_hold(gl);
 747                else
 748                        gfs2_glock_put(gl);
 749        }
 750
 751        gl->gl_state = new_state;
 752        gl->gl_tchange = jiffies;
 753}
 754
 755/**
 756 * xmote_bh - Called after the lock module is done acquiring a lock
 757 * @gl: The glock in question
 758 * @ret: the int returned from the lock module
 759 *
 760 */
 761
 762static void xmote_bh(struct gfs2_glock *gl, unsigned int ret)
 763{
 764        struct gfs2_sbd *sdp = gl->gl_sbd;
 765        const struct gfs2_glock_operations *glops = gl->gl_ops;
 766        struct gfs2_holder *gh = gl->gl_req_gh;
 767        int prev_state = gl->gl_state;
 768        int op_done = 1;
 769
 770        gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
 771        gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
 772        gfs2_assert_warn(sdp, !(ret & LM_OUT_ASYNC));
 773
 774        state_change(gl, ret & LM_OUT_ST_MASK);
 775
 776        if (prev_state != LM_ST_UNLOCKED && !(ret & LM_OUT_CACHEABLE)) {
 777                if (glops->go_inval)
 778                        glops->go_inval(gl, DIO_METADATA);
 779        } else if (gl->gl_state == LM_ST_DEFERRED) {
 780                /* We might not want to do this here.
 781                   Look at moving to the inode glops. */
 782                if (glops->go_inval)
 783                        glops->go_inval(gl, 0);
 784        }
 785
 786        /*  Deal with each possible exit condition  */
 787
 788        if (!gh) {
 789                gl->gl_stamp = jiffies;
 790                if (ret & LM_OUT_CANCELED) {
 791                        op_done = 0;
 792                } else {
 793                        spin_lock(&gl->gl_spin);
 794                        if (gl->gl_state != gl->gl_demote_state) {
 795                                gl->gl_req_bh = NULL;
 796                                spin_unlock(&gl->gl_spin);
 797                                gfs2_glock_drop_th(gl);
 798                                gfs2_glock_put(gl);
 799                                return;
 800                        }
 801                        gfs2_demote_wake(gl);
 802                        spin_unlock(&gl->gl_spin);
 803                }
 804        } else {
 805                spin_lock(&gl->gl_spin);
 806                list_del_init(&gh->gh_list);
 807                gh->gh_error = -EIO;
 808                if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) 
 809                        goto out;
 810                gh->gh_error = GLR_CANCELED;
 811                if (ret & LM_OUT_CANCELED) 
 812                        goto out;
 813                if (relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)) {
 814                        list_add_tail(&gh->gh_list, &gl->gl_holders);
 815                        gh->gh_error = 0;
 816                        set_bit(HIF_HOLDER, &gh->gh_iflags);
 817                        set_bit(HIF_FIRST, &gh->gh_iflags);
 818                        op_done = 0;
 819                        goto out;
 820                }
 821                gh->gh_error = GLR_TRYFAILED;
 822                if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))
 823                        goto out;
 824                gh->gh_error = -EINVAL;
 825                if (gfs2_assert_withdraw(sdp, 0) == -1)
 826                        fs_err(sdp, "ret = 0x%.8X\n", ret);
 827out:
 828                spin_unlock(&gl->gl_spin);
 829        }
 830
 831        if (glops->go_xmote_bh)
 832                glops->go_xmote_bh(gl);
 833
 834        if (op_done) {
 835                spin_lock(&gl->gl_spin);
 836                gl->gl_req_gh = NULL;
 837                gl->gl_req_bh = NULL;
 838                clear_bit(GLF_LOCK, &gl->gl_flags);
 839                spin_unlock(&gl->gl_spin);
 840        }
 841
 842        gfs2_glock_put(gl);
 843
 844        if (gh)
 845                gfs2_holder_wake(gh);
 846}
 847
 848/**
 849 * gfs2_glock_xmote_th - Call into the lock module to acquire or change a glock
 850 * @gl: The glock in question
 851 * @state: the requested state
 852 * @flags: modifier flags to the lock call
 853 *
 854 */
 855
 856static void gfs2_glock_xmote_th(struct gfs2_glock *gl, struct gfs2_holder *gh)
 857{
 858        struct gfs2_sbd *sdp = gl->gl_sbd;
 859        int flags = gh ? gh->gh_flags : 0;
 860        unsigned state = gh ? gh->gh_state : gl->gl_demote_state;
 861        const struct gfs2_glock_operations *glops = gl->gl_ops;
 862        int lck_flags = flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB |
 863                                 LM_FLAG_NOEXP | LM_FLAG_ANY |
 864                                 LM_FLAG_PRIORITY);
 865        unsigned int lck_ret;
 866
 867        if (glops->go_xmote_th)
 868                glops->go_xmote_th(gl);
 869
 870        gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
 871        gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
 872        gfs2_assert_warn(sdp, state != LM_ST_UNLOCKED);
 873        gfs2_assert_warn(sdp, state != gl->gl_state);
 874
 875        gfs2_glock_hold(gl);
 876        gl->gl_req_bh = xmote_bh;
 877
 878        lck_ret = gfs2_lm_lock(sdp, gl->gl_lock, gl->gl_state, state, lck_flags);
 879
 880        if (gfs2_assert_withdraw(sdp, !(lck_ret & LM_OUT_ERROR)))
 881                return;
 882
 883        if (lck_ret & LM_OUT_ASYNC)
 884                gfs2_assert_warn(sdp, lck_ret == LM_OUT_ASYNC);
 885        else
 886                xmote_bh(gl, lck_ret);
 887}
 888
 889/**
 890 * drop_bh - Called after a lock module unlock completes
 891 * @gl: the glock
 892 * @ret: the return status
 893 *
 894 * Doesn't wake up the process waiting on the struct gfs2_holder (if any)
 895 * Doesn't drop the reference on the glock the top half took out
 896 *
 897 */
 898
 899static void drop_bh(struct gfs2_glock *gl, unsigned int ret)
 900{
 901        struct gfs2_sbd *sdp = gl->gl_sbd;
 902        const struct gfs2_glock_operations *glops = gl->gl_ops;
 903        struct gfs2_holder *gh = gl->gl_req_gh;
 904
 905        gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
 906        gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
 907        gfs2_assert_warn(sdp, !ret);
 908
 909        state_change(gl, LM_ST_UNLOCKED);
 910
 911        if (glops->go_inval)
 912                glops->go_inval(gl, DIO_METADATA);
 913
 914        if (gh) {
 915                spin_lock(&gl->gl_spin);
 916                list_del_init(&gh->gh_list);
 917                gh->gh_error = 0;
 918                spin_unlock(&gl->gl_spin);
 919        }
 920
 921        spin_lock(&gl->gl_spin);
 922        gfs2_demote_wake(gl);
 923        gl->gl_req_gh = NULL;
 924        gl->gl_req_bh = NULL;
 925        clear_bit(GLF_LOCK, &gl->gl_flags);
 926        spin_unlock(&gl->gl_spin);
 927
 928        gfs2_glock_put(gl);
 929
 930        if (gh)
 931                gfs2_holder_wake(gh);
 932}
 933
 934/**
 935 * gfs2_glock_drop_th - call into the lock module to unlock a lock
 936 * @gl: the glock
 937 *
 938 */
 939
 940static void gfs2_glock_drop_th(struct gfs2_glock *gl)
 941{
 942        struct gfs2_sbd *sdp = gl->gl_sbd;
 943        const struct gfs2_glock_operations *glops = gl->gl_ops;
 944        unsigned int ret;
 945
 946        if (glops->go_drop_th)
 947                glops->go_drop_th(gl);
 948
 949        gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
 950        gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
 951        gfs2_assert_warn(sdp, gl->gl_state != LM_ST_UNLOCKED);
 952
 953        gfs2_glock_hold(gl);
 954        gl->gl_req_bh = drop_bh;
 955
 956        ret = gfs2_lm_unlock(sdp, gl->gl_lock, gl->gl_state);
 957
 958        if (gfs2_assert_withdraw(sdp, !(ret & LM_OUT_ERROR)))
 959                return;
 960
 961        if (!ret)
 962                drop_bh(gl, ret);
 963        else
 964                gfs2_assert_warn(sdp, ret == LM_OUT_ASYNC);
 965}
 966
 967/**
 968 * do_cancels - cancel requests for locks stuck waiting on an expire flag
 969 * @gh: the LM_FLAG_PRIORITY holder waiting to acquire the lock
 970 *
 971 * Don't cancel GL_NOCANCEL requests.
 972 */
 973
 974static void do_cancels(struct gfs2_holder *gh)
 975{
 976        struct gfs2_glock *gl = gh->gh_gl;
 977
 978        spin_lock(&gl->gl_spin);
 979
 980        while (gl->gl_req_gh != gh &&
 981               !test_bit(HIF_HOLDER, &gh->gh_iflags) &&
 982               !list_empty(&gh->gh_list)) {
 983                if (gl->gl_req_bh && !(gl->gl_req_gh &&
 984                                     (gl->gl_req_gh->gh_flags & GL_NOCANCEL))) {
 985                        spin_unlock(&gl->gl_spin);
 986                        gfs2_lm_cancel(gl->gl_sbd, gl->gl_lock);
 987                        msleep(100);
 988                        spin_lock(&gl->gl_spin);
 989                } else {
 990                        spin_unlock(&gl->gl_spin);
 991                        msleep(100);
 992                        spin_lock(&gl->gl_spin);
 993                }
 994        }
 995
 996        spin_unlock(&gl->gl_spin);
 997}
 998
 999/**
1000 * glock_wait_internal - wait on a glock acquisition
1001 * @gh: the glock holder
1002 *
1003 * Returns: 0 on success
1004 */
1005
1006static int glock_wait_internal(struct gfs2_holder *gh)
1007{
1008        struct gfs2_glock *gl = gh->gh_gl;
1009        struct gfs2_sbd *sdp = gl->gl_sbd;
1010        const struct gfs2_glock_operations *glops = gl->gl_ops;
1011
1012        if (test_bit(HIF_ABORTED, &gh->gh_iflags))
1013                return -EIO;
1014
1015        if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
1016                spin_lock(&gl->gl_spin);
1017                if (gl->gl_req_gh != gh &&
1018                    !test_bit(HIF_HOLDER, &gh->gh_iflags) &&
1019                    !list_empty(&gh->gh_list)) {
1020                        list_del_init(&gh->gh_list);
1021                        gh->gh_error = GLR_TRYFAILED;
1022                        run_queue(gl);
1023                        spin_unlock(&gl->gl_spin);
1024                        return gh->gh_error;
1025                }
1026                spin_unlock(&gl->gl_spin);
1027        }
1028
1029        if (gh->gh_flags & LM_FLAG_PRIORITY)
1030                do_cancels(gh);
1031
1032        wait_on_holder(gh);
1033        if (gh->gh_error)
1034                return gh->gh_error;
1035
1036        gfs2_assert_withdraw(sdp, test_bit(HIF_HOLDER, &gh->gh_iflags));
1037        gfs2_assert_withdraw(sdp, relaxed_state_ok(gl->gl_state, gh->gh_state,
1038                                                   gh->gh_flags));
1039
1040        if (test_bit(HIF_FIRST, &gh->gh_iflags)) {
1041                gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
1042
1043                if (glops->go_lock) {
1044                        gh->gh_error = glops->go_lock(gh);
1045                        if (gh->gh_error) {
1046                                spin_lock(&gl->gl_spin);
1047                                list_del_init(&gh->gh_list);
1048                                spin_unlock(&gl->gl_spin);
1049                        }
1050                }
1051
1052                spin_lock(&gl->gl_spin);
1053                gl->gl_req_gh = NULL;
1054                gl->gl_req_bh = NULL;
1055                clear_bit(GLF_LOCK, &gl->gl_flags);
1056                run_queue(gl);
1057                spin_unlock(&gl->gl_spin);
1058        }
1059
1060        return gh->gh_error;
1061}
1062
1063static inline struct gfs2_holder *
1064find_holder_by_owner(struct list_head *head, pid_t pid)
1065{
1066        struct gfs2_holder *gh;
1067
1068        list_for_each_entry(gh, head, gh_list) {
1069                if (gh->gh_owner_pid == pid)
1070                        return gh;
1071        }
1072
1073        return NULL;
1074}
1075
1076static void print_dbg(struct glock_iter *gi, const char *fmt, ...)
1077{
1078        va_list args;
1079
1080        va_start(args, fmt);
1081        if (gi) {
1082                vsprintf(gi->string, fmt, args);
1083                seq_printf(gi->seq, gi->string);
1084        }
1085        else
1086                vprintk(fmt, args);
1087        va_end(args);
1088}
1089
1090/**
1091 * add_to_queue - Add a holder to the wait queue (but look for recursion)
1092 * @gh: the holder structure to add
1093 *
1094 */
1095
1096static void add_to_queue(struct gfs2_holder *gh)
1097{
1098        struct gfs2_glock *gl = gh->gh_gl;
1099        struct gfs2_holder *existing;
1100
1101        BUG_ON(!gh->gh_owner_pid);
1102        if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags))
1103                BUG();
1104
1105        if (!(gh->gh_flags & GL_FLOCK)) {
1106                existing = find_holder_by_owner(&gl->gl_holders, 
1107                                                gh->gh_owner_pid);
1108                if (existing) {
1109                        print_symbol(KERN_WARNING "original: %s\n", 
1110                                     existing->gh_ip);
1111                        printk(KERN_INFO "pid : %d\n", existing->gh_owner_pid);
1112                        printk(KERN_INFO "lock type : %d lock state : %d\n",
1113                               existing->gh_gl->gl_name.ln_type, 
1114                               existing->gh_gl->gl_state);
1115                        print_symbol(KERN_WARNING "new: %s\n", gh->gh_ip);
1116                        printk(KERN_INFO "pid : %d\n", gh->gh_owner_pid);
1117                        printk(KERN_INFO "lock type : %d lock state : %d\n",
1118                               gl->gl_name.ln_type, gl->gl_state);
1119                        BUG();
1120                }
1121                
1122                existing = find_holder_by_owner(&gl->gl_waiters3, 
1123                                                gh->gh_owner_pid);
1124                if (existing) {
1125                        print_symbol(KERN_WARNING "original: %s\n", 
1126                                     existing->gh_ip);
1127                        print_symbol(KERN_WARNING "new: %s\n", gh->gh_ip);
1128                        BUG();
1129                }
1130        }
1131
1132        if (gh->gh_flags & LM_FLAG_PRIORITY)
1133                list_add(&gh->gh_list, &gl->gl_waiters3);
1134        else
1135                list_add_tail(&gh->gh_list, &gl->gl_waiters3);
1136}
1137
1138/**
1139 * gfs2_glock_nq - enqueue a struct gfs2_holder onto a glock (acquire a glock)
1140 * @gh: the holder structure
1141 *
1142 * if (gh->gh_flags & GL_ASYNC), this never returns an error
1143 *
1144 * Returns: 0, GLR_TRYFAILED, or errno on failure
1145 */
1146
1147int gfs2_glock_nq(struct gfs2_holder *gh)
1148{
1149        struct gfs2_glock *gl = gh->gh_gl;
1150        struct gfs2_sbd *sdp = gl->gl_sbd;
1151        int error = 0;
1152
1153restart:
1154        if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) {
1155                set_bit(HIF_ABORTED, &gh->gh_iflags);
1156                return -EIO;
1157        }
1158
1159        set_bit(HIF_PROMOTE, &gh->gh_iflags);
1160
1161        spin_lock(&gl->gl_spin);
1162        add_to_queue(gh);
1163        run_queue(gl);
1164        spin_unlock(&gl->gl_spin);
1165
1166        if (!(gh->gh_flags & GL_ASYNC)) {
1167                error = glock_wait_internal(gh);
1168                if (error == GLR_CANCELED) {
1169                        msleep(100);
1170                        goto restart;
1171                }
1172        }
1173
1174        return error;
1175}
1176
1177/**
1178 * gfs2_glock_poll - poll to see if an async request has been completed
1179 * @gh: the holder
1180 *
1181 * Returns: 1 if the request is ready to be gfs2_glock_wait()ed on
1182 */
1183
1184int gfs2_glock_poll(struct gfs2_holder *gh)
1185{
1186        struct gfs2_glock *gl = gh->gh_gl;
1187        int ready = 0;
1188
1189        spin_lock(&gl->gl_spin);
1190
1191        if (test_bit(HIF_HOLDER, &gh->gh_iflags))
1192                ready = 1;
1193        else if (list_empty(&gh->gh_list)) {
1194                if (gh->gh_error == GLR_CANCELED) {
1195                        spin_unlock(&gl->gl_spin);
1196                        msleep(100);
1197                        if (gfs2_glock_nq(gh))
1198                                return 1;
1199                        return 0;
1200                } else
1201                        ready = 1;
1202        }
1203
1204        spin_unlock(&gl->gl_spin);
1205
1206        return ready;
1207}
1208
1209/**
1210 * gfs2_glock_wait - wait for a lock acquisition that ended in a GLR_ASYNC
1211 * @gh: the holder structure
1212 *
1213 * Returns: 0, GLR_TRYFAILED, or errno on failure
1214 */
1215
1216int gfs2_glock_wait(struct gfs2_holder *gh)
1217{
1218        int error;
1219
1220        error = glock_wait_internal(gh);
1221        if (error == GLR_CANCELED) {
1222                msleep(100);
1223                gh->gh_flags &= ~GL_ASYNC;
1224                error = gfs2_glock_nq(gh);
1225        }
1226
1227        return error;
1228}
1229
1230/**
1231 * gfs2_glock_dq - dequeue a struct gfs2_holder from a glock (release a glock)
1232 * @gh: the glock holder
1233 *
1234 */
1235
1236void gfs2_glock_dq(struct gfs2_holder *gh)
1237{
1238        struct gfs2_glock *gl = gh->gh_gl;
1239        const struct gfs2_glock_operations *glops = gl->gl_ops;
1240        unsigned delay = 0;
1241
1242        if (gh->gh_flags & GL_NOCACHE)
1243                handle_callback(gl, LM_ST_UNLOCKED, 0, 0);
1244
1245        gfs2_glmutex_lock(gl);
1246
1247        spin_lock(&gl->gl_spin);
1248        list_del_init(&gh->gh_list);
1249
1250        if (list_empty(&gl->gl_holders)) {
1251                spin_unlock(&gl->gl_spin);
1252
1253                if (glops->go_unlock)
1254                        glops->go_unlock(gh);
1255
1256                spin_lock(&gl->gl_spin);
1257                gl->gl_stamp = jiffies;
1258        }
1259
1260        clear_bit(GLF_LOCK, &gl->gl_flags);
1261        spin_unlock(&gl->gl_spin);
1262
1263        gfs2_glock_hold(gl);
1264        if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
1265            !test_bit(GLF_DEMOTE, &gl->gl_flags))
1266                delay = gl->gl_ops->go_min_hold_time;
1267        if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
1268                gfs2_glock_put(gl);
1269}
1270
1271void gfs2_glock_dq_wait(struct gfs2_holder *gh)
1272{
1273        struct gfs2_glock *gl = gh->gh_gl;
1274        gfs2_glock_dq(gh);
1275        wait_on_demote(gl);
1276}
1277
1278/**
1279 * gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it
1280 * @gh: the holder structure
1281 *
1282 */
1283
1284void gfs2_glock_dq_uninit(struct gfs2_holder *gh)
1285{
1286        gfs2_glock_dq(gh);
1287        gfs2_holder_uninit(gh);
1288}
1289
1290/**
1291 * gfs2_glock_nq_num - acquire a glock based on lock number
1292 * @sdp: the filesystem
1293 * @number: the lock number
1294 * @glops: the glock operations for the type of glock
1295 * @state: the state to acquire the glock in
1296 * @flags: modifier flags for the aquisition
1297 * @gh: the struct gfs2_holder
1298 *
1299 * Returns: errno
1300 */
1301
1302int gfs2_glock_nq_num(struct gfs2_sbd *sdp, u64 number,
1303                      const struct gfs2_glock_operations *glops,
1304                      unsigned int state, int flags, struct gfs2_holder *gh)
1305{
1306        struct gfs2_glock *gl;
1307        int error;
1308
1309        error = gfs2_glock_get(sdp, number, glops, CREATE, &gl);
1310        if (!error) {
1311                error = gfs2_glock_nq_init(gl, state, flags, gh);
1312                gfs2_glock_put(gl);
1313        }
1314
1315        return error;
1316}
1317
1318/**
1319 * glock_compare - Compare two struct gfs2_glock structures for sorting
1320 * @arg_a: the first structure
1321 * @arg_b: the second structure
1322 *
1323 */
1324
1325static int glock_compare(const void *arg_a, const void *arg_b)
1326{
1327        const struct gfs2_holder *gh_a = *(const struct gfs2_holder **)arg_a;
1328        const struct gfs2_holder *gh_b = *(const struct gfs2_holder **)arg_b;
1329        const struct lm_lockname *a = &gh_a->gh_gl->gl_name;
1330        const struct lm_lockname *b = &gh_b->gh_gl->gl_name;
1331
1332        if (a->ln_number > b->ln_number)
1333                return 1;
1334        if (a->ln_number < b->ln_number)
1335                return -1;
1336        BUG_ON(gh_a->gh_gl->gl_ops->go_type == gh_b->gh_gl->gl_ops->go_type);
1337        return 0;
1338}
1339
1340/**
1341 * nq_m_sync - synchonously acquire more than one glock in deadlock free order
1342 * @num_gh: the number of structures
1343 * @ghs: an array of struct gfs2_holder structures
1344 *
1345 * Returns: 0 on success (all glocks acquired),
1346 *          errno on failure (no glocks acquired)
1347 */
1348
1349static int nq_m_sync(unsigned int num_gh, struct gfs2_holder *ghs,
1350                     struct gfs2_holder **p)
1351{
1352        unsigned int x;
1353        int error = 0;
1354
1355        for (x = 0; x < num_gh; x++)
1356                p[x] = &ghs[x];
1357
1358        sort(p, num_gh, sizeof(struct gfs2_holder *), glock_compare, NULL);
1359
1360        for (x = 0; x < num_gh; x++) {
1361                p[x]->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1362
1363                error = gfs2_glock_nq(p[x]);
1364                if (error) {
1365                        while (x--)
1366                                gfs2_glock_dq(p[x]);
1367                        break;
1368                }
1369        }
1370
1371        return error;
1372}
1373
1374/**
1375 * gfs2_glock_nq_m - acquire multiple glocks
1376 * @num_gh: the number of structures
1377 * @ghs: an array of struct gfs2_holder structures
1378 *
1379 *
1380 * Returns: 0 on success (all glocks acquired),
1381 *          errno on failure (no glocks acquired)
1382 */
1383
1384int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1385{
1386        struct gfs2_holder *tmp[4];
1387        struct gfs2_holder **pph = tmp;
1388        int error = 0;
1389
1390        switch(num_gh) {
1391        case 0:
1392                return 0;
1393        case 1:
1394                ghs->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1395                return gfs2_glock_nq(ghs);
1396        default:
1397                if (num_gh <= 4)
1398                        break;
1399                pph = kmalloc(num_gh * sizeof(struct gfs2_holder *), GFP_NOFS);
1400                if (!pph)
1401                        return -ENOMEM;
1402        }
1403
1404        error = nq_m_sync(num_gh, ghs, pph);
1405
1406        if (pph != tmp)
1407                kfree(pph);
1408
1409        return error;
1410}
1411
1412/**
1413 * gfs2_glock_dq_m - release multiple glocks
1414 * @num_gh: the number of structures
1415 * @ghs: an array of struct gfs2_holder structures
1416 *
1417 */
1418
1419void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1420{
1421        unsigned int x;
1422
1423        for (x = 0; x < num_gh; x++)
1424                gfs2_glock_dq(&ghs[x]);
1425}
1426
1427/**
1428 * gfs2_glock_dq_uninit_m - release multiple glocks
1429 * @num_gh: the number of structures
1430 * @ghs: an array of struct gfs2_holder structures
1431 *
1432 */
1433
1434void gfs2_glock_dq_uninit_m(unsigned int num_gh, struct gfs2_holder *ghs)
1435{
1436        unsigned int x;
1437
1438        for (x = 0; x < num_gh; x++)
1439                gfs2_glock_dq_uninit(&ghs[x]);
1440}
1441
1442/**
1443 * gfs2_lvb_hold - attach a LVB from a glock
1444 * @gl: The glock in question
1445 *
1446 */
1447
1448int gfs2_lvb_hold(struct gfs2_glock *gl)
1449{
1450        int error;
1451
1452        gfs2_glmutex_lock(gl);
1453
1454        if (!atomic_read(&gl->gl_lvb_count)) {
1455                error = gfs2_lm_hold_lvb(gl->gl_sbd, gl->gl_lock, &gl->gl_lvb);
1456                if (error) {
1457                        gfs2_glmutex_unlock(gl);
1458                        return error;
1459                }
1460                gfs2_glock_hold(gl);
1461        }
1462        atomic_inc(&gl->gl_lvb_count);
1463
1464        gfs2_glmutex_unlock(gl);
1465
1466        return 0;
1467}
1468
1469/**
1470 * gfs2_lvb_unhold - detach a LVB from a glock
1471 * @gl: The glock in question
1472 *
1473 */
1474
1475void gfs2_lvb_unhold(struct gfs2_glock *gl)
1476{
1477        gfs2_glock_hold(gl);
1478        gfs2_glmutex_lock(gl);
1479
1480        gfs2_assert(gl->gl_sbd, atomic_read(&gl->gl_lvb_count) > 0);
1481        if (atomic_dec_and_test(&gl->gl_lvb_count)) {
1482                gfs2_lm_unhold_lvb(gl->gl_sbd, gl->gl_lock, gl->gl_lvb);
1483                gl->gl_lvb = NULL;
1484                gfs2_glock_put(gl);
1485        }
1486
1487        gfs2_glmutex_unlock(gl);
1488        gfs2_glock_put(gl);
1489}
1490
1491static void blocking_cb(struct gfs2_sbd *sdp, struct lm_lockname *name,
1492                        unsigned int state)
1493{
1494        struct gfs2_glock *gl;
1495        unsigned long delay = 0;
1496        unsigned long holdtime;
1497        unsigned long now = jiffies;
1498
1499        gl = gfs2_glock_find(sdp, name);
1500        if (!gl)
1501                return;
1502
1503        holdtime = gl->gl_tchange + gl->gl_ops->go_min_hold_time;
1504        if (time_before(now, holdtime))
1505                delay = holdtime - now;
1506
1507        handle_callback(gl, state, 1, delay);
1508        if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
1509                gfs2_glock_put(gl);
1510}
1511
1512/**
1513 * gfs2_glock_cb - Callback used by locking module
1514 * @sdp: Pointer to the superblock
1515 * @type: Type of callback
1516 * @data: Type dependent data pointer
1517 *
1518 * Called by the locking module when it wants to tell us something.
1519 * Either we need to drop a lock, one of our ASYNC requests completed, or
1520 * a journal from another client needs to be recovered.
1521 */
1522
1523void gfs2_glock_cb(void *cb_data, unsigned int type, void *data)
1524{
1525        struct gfs2_sbd *sdp = cb_data;
1526
1527        switch (type) {
1528        case LM_CB_NEED_E:
1529                blocking_cb(sdp, data, LM_ST_UNLOCKED);
1530                return;
1531
1532        case LM_CB_NEED_D:
1533                blocking_cb(sdp, data, LM_ST_DEFERRED);
1534                return;
1535
1536        case LM_CB_NEED_S:
1537                blocking_cb(sdp, data, LM_ST_SHARED);
1538                return;
1539
1540        case LM_CB_ASYNC: {
1541                struct lm_async_cb *async = data;
1542                struct gfs2_glock *gl;
1543
1544                down_read(&gfs2_umount_flush_sem);
1545                gl = gfs2_glock_find(sdp, &async->lc_name);
1546                if (gfs2_assert_warn(sdp, gl))
1547                        return;
1548                if (!gfs2_assert_warn(sdp, gl->gl_req_bh))
1549                        gl->gl_req_bh(gl, async->lc_ret);
1550                if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1551                        gfs2_glock_put(gl);
1552                up_read(&gfs2_umount_flush_sem);
1553                return;
1554        }
1555
1556        case LM_CB_NEED_RECOVERY:
1557                gfs2_jdesc_make_dirty(sdp, *(unsigned int *)data);
1558                if (sdp->sd_recoverd_process)
1559                        wake_up_process(sdp->sd_recoverd_process);
1560                return;
1561
1562        case LM_CB_DROPLOCKS:
1563                gfs2_gl_hash_clear(sdp, NO_WAIT);
1564                gfs2_quota_scan(sdp);
1565                return;
1566
1567        default:
1568                gfs2_assert_warn(sdp, 0);
1569                return;
1570        }
1571}
1572
1573/**
1574 * demote_ok - Check to see if it's ok to unlock a glock
1575 * @gl: the glock
1576 *
1577 * Returns: 1 if it's ok
1578 */
1579
1580static int demote_ok(struct gfs2_glock *gl)
1581{
1582        const struct gfs2_glock_operations *glops = gl->gl_ops;
1583        int demote = 1;
1584
1585        if (test_bit(GLF_STICKY, &gl->gl_flags))
1586                demote = 0;
1587        else if (glops->go_demote_ok)
1588                demote = glops->go_demote_ok(gl);
1589
1590        return demote;
1591}
1592
1593/**
1594 * gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list
1595 * @gl: the glock
1596 *
1597 */
1598
1599void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl)
1600{
1601        struct gfs2_sbd *sdp = gl->gl_sbd;
1602
1603        spin_lock(&sdp->sd_reclaim_lock);
1604        if (list_empty(&gl->gl_reclaim)) {
1605                gfs2_glock_hold(gl);
1606                list_add(&gl->gl_reclaim, &sdp->sd_reclaim_list);
1607                atomic_inc(&sdp->sd_reclaim_count);
1608        }
1609        spin_unlock(&sdp->sd_reclaim_lock);
1610
1611        wake_up(&sdp->sd_reclaim_wq);
1612}
1613
1614/**
1615 * gfs2_reclaim_glock - process the next glock on the filesystem's reclaim list
1616 * @sdp: the filesystem
1617 *
1618 * Called from gfs2_glockd() glock reclaim daemon, or when promoting a
1619 * different glock and we notice that there are a lot of glocks in the
1620 * reclaim list.
1621 *
1622 */
1623
1624void gfs2_reclaim_glock(struct gfs2_sbd *sdp)
1625{
1626        struct gfs2_glock *gl;
1627
1628        spin_lock(&sdp->sd_reclaim_lock);
1629        if (list_empty(&sdp->sd_reclaim_list)) {
1630                spin_unlock(&sdp->sd_reclaim_lock);
1631                return;
1632        }
1633        gl = list_entry(sdp->sd_reclaim_list.next,
1634                        struct gfs2_glock, gl_reclaim);
1635        list_del_init(&gl->gl_reclaim);
1636        spin_unlock(&sdp->sd_reclaim_lock);
1637
1638        atomic_dec(&sdp->sd_reclaim_count);
1639        atomic_inc(&sdp->sd_reclaimed);
1640
1641        if (gfs2_glmutex_trylock(gl)) {
1642                if (list_empty(&gl->gl_holders) &&
1643                    gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl))
1644                        handle_callback(gl, LM_ST_UNLOCKED, 0, 0);
1645                gfs2_glmutex_unlock(gl);
1646        }
1647
1648        gfs2_glock_put(gl);
1649}
1650
1651/**
1652 * examine_bucket - Call a function for glock in a hash bucket
1653 * @examiner: the function
1654 * @sdp: the filesystem
1655 * @bucket: the bucket
1656 *
1657 * Returns: 1 if the bucket has entries
1658 */
1659
1660static int examine_bucket(glock_examiner examiner, struct gfs2_sbd *sdp,
1661                          unsigned int hash)
1662{
1663        struct gfs2_glock *gl, *prev = NULL;
1664        int has_entries = 0;
1665        struct hlist_head *head = &gl_hash_table[hash].hb_list;
1666
1667        read_lock(gl_lock_addr(hash));
1668        /* Can't use hlist_for_each_entry - don't want prefetch here */
1669        if (hlist_empty(head))
1670                goto out;
1671        gl = list_entry(head->first, struct gfs2_glock, gl_list);
1672        while(1) {
1673                if (!sdp || gl->gl_sbd == sdp) {
1674                        gfs2_glock_hold(gl);
1675                        read_unlock(gl_lock_addr(hash));
1676                        if (prev)
1677                                gfs2_glock_put(prev);
1678                        prev = gl;
1679                        examiner(gl);
1680                        has_entries = 1;
1681                        read_lock(gl_lock_addr(hash));
1682                }
1683                if (gl->gl_list.next == NULL)
1684                        break;
1685                gl = list_entry(gl->gl_list.next, struct gfs2_glock, gl_list);
1686        }
1687out:
1688        read_unlock(gl_lock_addr(hash));
1689        if (prev)
1690                gfs2_glock_put(prev);
1691        cond_resched();
1692        return has_entries;
1693}
1694
1695/**
1696 * scan_glock - look at a glock and see if we can reclaim it
1697 * @gl: the glock to look at
1698 *
1699 */
1700
1701static void scan_glock(struct gfs2_glock *gl)
1702{
1703        if (gl->gl_ops == &gfs2_inode_glops && gl->gl_object)
1704                return;
1705
1706        if (gfs2_glmutex_trylock(gl)) {
1707                if (list_empty(&gl->gl_holders) &&
1708                    gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl))
1709                        goto out_schedule;
1710                gfs2_glmutex_unlock(gl);
1711        }
1712        return;
1713
1714out_schedule:
1715        gfs2_glmutex_unlock(gl);
1716        gfs2_glock_schedule_for_reclaim(gl);
1717}
1718
1719/**
1720 * clear_glock - look at a glock and see if we can free it from glock cache
1721 * @gl: the glock to look at
1722 *
1723 */
1724
1725static void clear_glock(struct gfs2_glock *gl)
1726{
1727        struct gfs2_sbd *sdp = gl->gl_sbd;
1728        int released;
1729
1730        spin_lock(&sdp->sd_reclaim_lock);
1731        if (!list_empty(&gl->gl_reclaim)) {
1732                list_del_init(&gl->gl_reclaim);
1733                atomic_dec(&sdp->sd_reclaim_count);
1734                spin_unlock(&sdp->sd_reclaim_lock);
1735                released = gfs2_glock_put(gl);
1736                gfs2_assert(sdp, !released);
1737        } else {
1738                spin_unlock(&sdp->sd_reclaim_lock);
1739        }
1740
1741        if (gfs2_glmutex_trylock(gl)) {
1742                if (list_empty(&gl->gl_holders) &&
1743                    gl->gl_state != LM_ST_UNLOCKED)
1744                        handle_callback(gl, LM_ST_UNLOCKED, 0, 0);
1745                gfs2_glmutex_unlock(gl);
1746        }
1747}
1748
1749/**
1750 * gfs2_gl_hash_clear - Empty out the glock hash table
1751 * @sdp: the filesystem
1752 * @wait: wait until it's all gone
1753 *
1754 * Called when unmounting the filesystem, or when inter-node lock manager
1755 * requests DROPLOCKS because it is running out of capacity.
1756 */
1757
1758void gfs2_gl_hash_clear(struct gfs2_sbd *sdp, int wait)
1759{
1760        unsigned long t;
1761        unsigned int x;
1762        int cont;
1763
1764        t = jiffies;
1765
1766        for (;;) {
1767                cont = 0;
1768                for (x = 0; x < GFS2_GL_HASH_SIZE; x++) {
1769                        if (examine_bucket(clear_glock, sdp, x))
1770                                cont = 1;
1771                }
1772
1773                if (!wait || !cont)
1774                        break;
1775
1776                if (time_after_eq(jiffies,
1777                                  t + gfs2_tune_get(sdp, gt_stall_secs) * HZ)) {
1778                        fs_warn(sdp, "Unmount seems to be stalled. "
1779                                     "Dumping lock state...\n");
1780                        gfs2_dump_lockstate(sdp);
1781                        t = jiffies;
1782                }
1783
1784                down_write(&gfs2_umount_flush_sem);
1785                invalidate_inodes(sdp->sd_vfs);
1786                up_write(&gfs2_umount_flush_sem);
1787                msleep(10);
1788        }
1789}
1790
1791/*
1792 *  Diagnostic routines to help debug distributed deadlock
1793 */
1794
1795static void gfs2_print_symbol(struct glock_iter *gi, const char *fmt,
1796                              unsigned long address)
1797{
1798        char buffer[KSYM_SYMBOL_LEN];
1799
1800        sprint_symbol(buffer, address);
1801        print_dbg(gi, fmt, buffer);
1802}
1803
1804/**
1805 * dump_holder - print information about a glock holder
1806 * @str: a string naming the type of holder
1807 * @gh: the glock holder
1808 *
1809 * Returns: 0 on success, -ENOBUFS when we run out of space
1810 */
1811
1812static int dump_holder(struct glock_iter *gi, char *str,
1813                       struct gfs2_holder *gh)
1814{
1815        unsigned int x;
1816        struct task_struct *gh_owner;
1817
1818        print_dbg(gi, "  %s\n", str);
1819        if (gh->gh_owner_pid) {
1820                print_dbg(gi, "    owner = %ld ", (long)gh->gh_owner_pid);
1821                gh_owner = find_task_by_pid(gh->gh_owner_pid);
1822                if (gh_owner)
1823                        print_dbg(gi, "(%s)\n", gh_owner->comm);
1824                else
1825                        print_dbg(gi, "(ended)\n");
1826        } else
1827                print_dbg(gi, "    owner = -1\n");
1828        print_dbg(gi, "    gh_state = %u\n", gh->gh_state);
1829        print_dbg(gi, "    gh_flags =");
1830        for (x = 0; x < 32; x++)
1831                if (gh->gh_flags & (1 << x))
1832                        print_dbg(gi, " %u", x);
1833        print_dbg(gi, " \n");
1834        print_dbg(gi, "    error = %d\n", gh->gh_error);
1835        print_dbg(gi, "    gh_iflags =");
1836        for (x = 0; x < 32; x++)
1837                if (test_bit(x, &gh->gh_iflags))
1838                        print_dbg(gi, " %u", x);
1839        print_dbg(gi, " \n");
1840        gfs2_print_symbol(gi, "    initialized at: %s\n", gh->gh_ip);
1841
1842        return 0;
1843}
1844
1845/**
1846 * dump_inode - print information about an inode
1847 * @ip: the inode
1848 *
1849 * Returns: 0 on success, -ENOBUFS when we run out of space
1850 */
1851
1852static int dump_inode(struct glock_iter *gi, struct gfs2_inode *ip)
1853{
1854        unsigned int x;
1855
1856        print_dbg(gi, "  Inode:\n");
1857        print_dbg(gi, "    num = %llu/%llu\n",
1858                  (unsigned long long)ip->i_no_formal_ino,
1859                  (unsigned long long)ip->i_no_addr);
1860        print_dbg(gi, "    type = %u\n", IF2DT(ip->i_inode.i_mode));
1861        print_dbg(gi, "    i_flags =");
1862        for (x = 0; x < 32; x++)
1863                if (test_bit(x, &ip->i_flags))
1864                        print_dbg(gi, " %u", x);
1865        print_dbg(gi, " \n");
1866        return 0;
1867}
1868
1869/**
1870 * dump_glock - print information about a glock
1871 * @gl: the glock
1872 * @count: where we are in the buffer
1873 *
1874 * Returns: 0 on success, -ENOBUFS when we run out of space
1875 */
1876
1877static int dump_glock(struct glock_iter *gi, struct gfs2_glock *gl)
1878{
1879        struct gfs2_holder *gh;
1880        unsigned int x;
1881        int error = -ENOBUFS;
1882        struct task_struct *gl_owner;
1883
1884        spin_lock(&gl->gl_spin);
1885
1886        print_dbg(gi, "Glock 0x%p (%u, 0x%llx)\n", gl, gl->gl_name.ln_type,
1887                   (unsigned long long)gl->gl_name.ln_number);
1888        print_dbg(gi, "  gl_flags =");
1889        for (x = 0; x < 32; x++) {
1890                if (test_bit(x, &gl->gl_flags))
1891                        print_dbg(gi, " %u", x);
1892        }
1893        if (!test_bit(GLF_LOCK, &gl->gl_flags))
1894                print_dbg(gi, " (unlocked)");
1895        print_dbg(gi, " \n");
1896        print_dbg(gi, "  gl_ref = %d\n", atomic_read(&gl->gl_ref));
1897        print_dbg(gi, "  gl_state = %u\n", gl->gl_state);
1898        if (gl->gl_owner_pid) {
1899                gl_owner = find_task_by_pid(gl->gl_owner_pid);
1900                if (gl_owner)
1901                        print_dbg(gi, "  gl_owner = pid %d (%s)\n",
1902                                  gl->gl_owner_pid, gl_owner->comm);
1903                else
1904                        print_dbg(gi, "  gl_owner = %d (ended)\n",
1905                                  gl->gl_owner_pid);
1906        } else
1907                print_dbg(gi, "  gl_owner = -1\n");
1908        print_dbg(gi, "  gl_ip = %lu\n", gl->gl_ip);
1909        print_dbg(gi, "  req_gh = %s\n", (gl->gl_req_gh) ? "yes" : "no");
1910        print_dbg(gi, "  req_bh = %s\n", (gl->gl_req_bh) ? "yes" : "no");
1911        print_dbg(gi, "  lvb_count = %d\n", atomic_read(&gl->gl_lvb_count));
1912        print_dbg(gi, "  object = %s\n", (gl->gl_object) ? "yes" : "no");
1913        print_dbg(gi, "  le = %s\n",
1914                   (list_empty(&gl->gl_le.le_list)) ? "no" : "yes");
1915        print_dbg(gi, "  reclaim = %s\n",
1916                   (list_empty(&gl->gl_reclaim)) ? "no" : "yes");
1917        if (gl->gl_aspace)
1918                print_dbg(gi, "  aspace = 0x%p nrpages = %lu\n", gl->gl_aspace,
1919                           gl->gl_aspace->i_mapping->nrpages);
1920        else
1921                print_dbg(gi, "  aspace = no\n");
1922        print_dbg(gi, "  ail = %d\n", atomic_read(&gl->gl_ail_count));
1923        if (gl->gl_req_gh) {
1924                error = dump_holder(gi, "Request", gl->gl_req_gh);
1925                if (error)
1926                        goto out;
1927        }
1928        list_for_each_entry(gh, &gl->gl_holders, gh_list) {
1929                error = dump_holder(gi, "Holder", gh);
1930                if (error)
1931                        goto out;
1932        }
1933        list_for_each_entry(gh, &gl->gl_waiters1, gh_list) {
1934                error = dump_holder(gi, "Waiter1", gh);
1935                if (error)
1936                        goto out;
1937        }
1938        list_for_each_entry(gh, &gl->gl_waiters3, gh_list) {
1939                error = dump_holder(gi, "Waiter3", gh);
1940                if (error)
1941                        goto out;
1942        }
1943        if (test_bit(GLF_DEMOTE, &gl->gl_flags)) {
1944                print_dbg(gi, "  Demotion req to state %u (%llu uS ago)\n",
1945                          gl->gl_demote_state, (unsigned long long)
1946                          (jiffies - gl->gl_demote_time)*(1000000/HZ));
1947        }
1948        if (gl->gl_ops == &gfs2_inode_glops && gl->gl_object) {
1949                if (!test_bit(GLF_LOCK, &gl->gl_flags) &&
1950                        list_empty(&gl->gl_holders)) {
1951                        error = dump_inode(gi, gl->gl_object);
1952                        if (error)
1953                                goto out;
1954                } else {
1955                        error = -ENOBUFS;
1956                        print_dbg(gi, "  Inode: busy\n");
1957                }
1958        }
1959
1960        error = 0;
1961
1962out:
1963        spin_unlock(&gl->gl_spin);
1964        return error;
1965}
1966
1967/**
1968 * gfs2_dump_lockstate - print out the current lockstate
1969 * @sdp: the filesystem
1970 * @ub: the buffer to copy the information into
1971 *
1972 * If @ub is NULL, dump the lockstate to the console.
1973 *
1974 */
1975
1976static int gfs2_dump_lockstate(struct gfs2_sbd *sdp)
1977{
1978        struct gfs2_glock *gl;
1979        struct hlist_node *h;
1980        unsigned int x;
1981        int error = 0;
1982
1983        for (x = 0; x < GFS2_GL_HASH_SIZE; x++) {
1984
1985                read_lock(gl_lock_addr(x));
1986
1987                hlist_for_each_entry(gl, h, &gl_hash_table[x].hb_list, gl_list) {
1988                        if (gl->gl_sbd != sdp)
1989                                continue;
1990
1991                        error = dump_glock(NULL, gl);
1992                        if (error)
1993                                break;
1994                }
1995
1996                read_unlock(gl_lock_addr(x));
1997
1998                if (error)
1999                        break;
2000        }
2001
2002
2003        return error;
2004}
2005
2006/**
2007 * gfs2_scand - Look for cached glocks and inodes to toss from memory
2008 * @sdp: Pointer to GFS2 superblock
2009 *
2010 * One of these daemons runs, finding candidates to add to sd_reclaim_list.
2011 * See gfs2_glockd()
2012 */
2013
2014static int gfs2_scand(void *data)
2015{
2016        unsigned x;
2017        unsigned delay;
2018
2019        while (!kthread_should_stop()) {
2020                for (x = 0; x < GFS2_GL_HASH_SIZE; x++)
2021                        examine_bucket(scan_glock, NULL, x);
2022                if (freezing(current))
2023                        refrigerator();
2024                delay = scand_secs;
2025                if (delay < 1)
2026                        delay = 1;
2027                schedule_timeout_interruptible(delay * HZ);
2028        }
2029
2030        return 0;
2031}
2032
2033
2034
2035int __init gfs2_glock_init(void)
2036{
2037        unsigned i;
2038        for(i = 0; i < GFS2_GL_HASH_SIZE; i++) {
2039                INIT_HLIST_HEAD(&gl_hash_table[i].hb_list);
2040        }
2041#ifdef GL_HASH_LOCK_SZ
2042        for(i = 0; i < GL_HASH_LOCK_SZ; i++) {
2043                rwlock_init(&gl_hash_locks[i]);
2044        }
2045#endif
2046
2047        scand_process = kthread_run(gfs2_scand, NULL, "gfs2_scand");
2048        if (IS_ERR(scand_process))
2049                return PTR_ERR(scand_process);
2050
2051        glock_workqueue = create_workqueue("glock_workqueue");
2052        if (IS_ERR(glock_workqueue)) {
2053                kthread_stop(scand_process);
2054                return PTR_ERR(glock_workqueue);
2055        }
2056
2057        return 0;
2058}
2059
2060void gfs2_glock_exit(void)
2061{
2062        destroy_workqueue(glock_workqueue);
2063        kthread_stop(scand_process);
2064}
2065
2066module_param(scand_secs, uint, S_IRUGO|S_IWUSR);
2067MODULE_PARM_DESC(scand_secs, "The number of seconds between scand runs");
2068
2069static int gfs2_glock_iter_next(struct glock_iter *gi)
2070{
2071        struct gfs2_glock *gl;
2072
2073restart:
2074        read_lock(gl_lock_addr(gi->hash));
2075        gl = gi->gl;
2076        if (gl) {
2077                gi->gl = hlist_entry(gl->gl_list.next,
2078                                     struct gfs2_glock, gl_list);
2079                if (gi->gl)
2080                        gfs2_glock_hold(gi->gl);
2081        }
2082        read_unlock(gl_lock_addr(gi->hash));
2083        if (gl)
2084                gfs2_glock_put(gl);
2085        if (gl && gi->gl == NULL)
2086                gi->hash++;
2087        while(gi->gl == NULL) {
2088                if (gi->hash >= GFS2_GL_HASH_SIZE)
2089                        return 1;
2090                read_lock(gl_lock_addr(gi->hash));
2091                gi->gl = hlist_entry(gl_hash_table[gi->hash].hb_list.first,
2092                                     struct gfs2_glock, gl_list);
2093                if (gi->gl)
2094                        gfs2_glock_hold(gi->gl);
2095                read_unlock(gl_lock_addr(gi->hash));
2096                gi->hash++;
2097        }
2098
2099        if (gi->sdp != gi->gl->gl_sbd)
2100                goto restart;
2101
2102        return 0;
2103}
2104
2105static void gfs2_glock_iter_free(struct glock_iter *gi)
2106{
2107        if (gi->gl)
2108                gfs2_glock_put(gi->gl);
2109        kfree(gi);
2110}
2111
2112static struct glock_iter *gfs2_glock_iter_init(struct gfs2_sbd *sdp)
2113{
2114        struct glock_iter *gi;
2115
2116        gi = kmalloc(sizeof (*gi), GFP_KERNEL);
2117        if (!gi)
2118                return NULL;
2119
2120        gi->sdp = sdp;
2121        gi->hash = 0;
2122        gi->seq = NULL;
2123        gi->gl = NULL;
2124        memset(gi->string, 0, sizeof(gi->string));
2125
2126        if (gfs2_glock_iter_next(gi)) {
2127                gfs2_glock_iter_free(gi);
2128                return NULL;
2129        }
2130
2131        return gi;
2132}
2133
2134static void *gfs2_glock_seq_start(struct seq_file *file, loff_t *pos)
2135{
2136        struct glock_iter *gi;
2137        loff_t n = *pos;
2138
2139        gi = gfs2_glock_iter_init(file->private);
2140        if (!gi)
2141                return NULL;
2142
2143        while(n--) {
2144                if (gfs2_glock_iter_next(gi)) {
2145                        gfs2_glock_iter_free(gi);
2146                        return NULL;
2147                }
2148        }
2149
2150        return gi;
2151}
2152
2153static void *gfs2_glock_seq_next(struct seq_file *file, void *iter_ptr,
2154                                 loff_t *pos)
2155{
2156        struct glock_iter *gi = iter_ptr;
2157
2158        (*pos)++;
2159
2160        if (gfs2_glock_iter_next(gi)) {
2161                gfs2_glock_iter_free(gi);
2162                return NULL;
2163        }
2164
2165        return gi;
2166}
2167
2168static void gfs2_glock_seq_stop(struct seq_file *file, void *iter_ptr)
2169{
2170        struct glock_iter *gi = iter_ptr;
2171        if (gi)
2172                gfs2_glock_iter_free(gi);
2173}
2174
2175static int gfs2_glock_seq_show(struct seq_file *file, void *iter_ptr)
2176{
2177        struct glock_iter *gi = iter_ptr;
2178
2179        gi->seq = file;
2180        dump_glock(gi, gi->gl);
2181
2182        return 0;
2183}
2184
2185static const struct seq_operations gfs2_glock_seq_ops = {
2186        .start = gfs2_glock_seq_start,
2187        .next  = gfs2_glock_seq_next,
2188        .stop  = gfs2_glock_seq_stop,
2189        .show  = gfs2_glock_seq_show,
2190};
2191
2192static int gfs2_debugfs_open(struct inode *inode, struct file *file)
2193{
2194        struct seq_file *seq;
2195        int ret;
2196
2197        ret = seq_open(file, &gfs2_glock_seq_ops);
2198        if (ret)
2199                return ret;
2200
2201        seq = file->private_data;
2202        seq->private = inode->i_private;
2203
2204        return 0;
2205}
2206
2207static const struct file_operations gfs2_debug_fops = {
2208        .owner   = THIS_MODULE,
2209        .open    = gfs2_debugfs_open,
2210        .read    = seq_read,
2211        .llseek  = seq_lseek,
2212        .release = seq_release
2213};
2214
2215int gfs2_create_debugfs_file(struct gfs2_sbd *sdp)
2216{
2217        sdp->debugfs_dir = debugfs_create_dir(sdp->sd_table_name, gfs2_root);
2218        if (!sdp->debugfs_dir)
2219                return -ENOMEM;
2220        sdp->debugfs_dentry_glocks = debugfs_create_file("glocks",
2221                                                         S_IFREG | S_IRUGO,
2222                                                         sdp->debugfs_dir, sdp,
2223                                                         &gfs2_debug_fops);
2224        if (!sdp->debugfs_dentry_glocks)
2225                return -ENOMEM;
2226
2227        return 0;
2228}
2229
2230void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp)
2231{
2232        if (sdp && sdp->debugfs_dir) {
2233                if (sdp->debugfs_dentry_glocks) {
2234                        debugfs_remove(sdp->debugfs_dentry_glocks);
2235                        sdp->debugfs_dentry_glocks = NULL;
2236                }
2237                debugfs_remove(sdp->debugfs_dir);
2238                sdp->debugfs_dir = NULL;
2239        }
2240}
2241
2242int gfs2_register_debugfs(void)
2243{
2244        gfs2_root = debugfs_create_dir("gfs2", NULL);
2245        return gfs2_root ? 0 : -ENOMEM;
2246}
2247
2248void gfs2_unregister_debugfs(void)
2249{
2250        debugfs_remove(gfs2_root);
2251        gfs2_root = NULL;
2252}
2253