linux/fs/lockd/clntlock.c
<<
>>
Prefs
   1/*
   2 * linux/fs/lockd/clntlock.c
   3 *
   4 * Lock handling for the client side NLM implementation
   5 *
   6 * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de>
   7 */
   8
   9#include <linux/module.h>
  10#include <linux/types.h>
  11#include <linux/slab.h>
  12#include <linux/time.h>
  13#include <linux/nfs_fs.h>
  14#include <linux/sunrpc/clnt.h>
  15#include <linux/sunrpc/svc.h>
  16#include <linux/lockd/lockd.h>
  17#include <linux/kthread.h>
  18
  19#define NLMDBG_FACILITY         NLMDBG_CLIENT
  20
  21/*
  22 * Local function prototypes
  23 */
  24static int                      reclaimer(void *ptr);
  25
  26/*
  27 * The following functions handle blocking and granting from the
  28 * client perspective.
  29 */
  30
  31/*
  32 * This is the representation of a blocked client lock.
  33 */
  34struct nlm_wait {
  35        struct list_head        b_list;         /* linked list */
  36        wait_queue_head_t       b_wait;         /* where to wait on */
  37        struct nlm_host *       b_host;
  38        struct file_lock *      b_lock;         /* local file lock */
  39        unsigned short          b_reclaim;      /* got to reclaim lock */
  40        __be32                  b_status;       /* grant callback status */
  41};
  42
  43static LIST_HEAD(nlm_blocked);
  44static DEFINE_SPINLOCK(nlm_blocked_lock);
  45
  46/**
  47 * nlmclnt_init - Set up per-NFS mount point lockd data structures
  48 * @nlm_init: pointer to arguments structure
  49 *
  50 * Returns pointer to an appropriate nlm_host struct,
  51 * or an ERR_PTR value.
  52 */
  53struct nlm_host *nlmclnt_init(const struct nlmclnt_initdata *nlm_init)
  54{
  55        struct nlm_host *host;
  56        u32 nlm_version = (nlm_init->nfs_version == 2) ? 1 : 4;
  57        int status;
  58
  59        status = lockd_up();
  60        if (status < 0)
  61                return ERR_PTR(status);
  62
  63        host = nlmclnt_lookup_host(nlm_init->address, nlm_init->addrlen,
  64                                   nlm_init->protocol, nlm_version,
  65                                   nlm_init->hostname, nlm_init->noresvport);
  66        if (host == NULL) {
  67                lockd_down();
  68                return ERR_PTR(-ENOLCK);
  69        }
  70
  71        return host;
  72}
  73EXPORT_SYMBOL_GPL(nlmclnt_init);
  74
  75/**
  76 * nlmclnt_done - Release resources allocated by nlmclnt_init()
  77 * @host: nlm_host structure reserved by nlmclnt_init()
  78 *
  79 */
  80void nlmclnt_done(struct nlm_host *host)
  81{
  82        nlmclnt_release_host(host);
  83        lockd_down();
  84}
  85EXPORT_SYMBOL_GPL(nlmclnt_done);
  86
  87/*
  88 * Queue up a lock for blocking so that the GRANTED request can see it
  89 */
  90struct nlm_wait *nlmclnt_prepare_block(struct nlm_host *host, struct file_lock *fl)
  91{
  92        struct nlm_wait *block;
  93
  94        block = kmalloc(sizeof(*block), GFP_KERNEL);
  95        if (block != NULL) {
  96                block->b_host = host;
  97                block->b_lock = fl;
  98                init_waitqueue_head(&block->b_wait);
  99                block->b_status = nlm_lck_blocked;
 100
 101                spin_lock(&nlm_blocked_lock);
 102                list_add(&block->b_list, &nlm_blocked);
 103                spin_unlock(&nlm_blocked_lock);
 104        }
 105        return block;
 106}
 107
 108void nlmclnt_finish_block(struct nlm_wait *block)
 109{
 110        if (block == NULL)
 111                return;
 112        spin_lock(&nlm_blocked_lock);
 113        list_del(&block->b_list);
 114        spin_unlock(&nlm_blocked_lock);
 115        kfree(block);
 116}
 117
 118/*
 119 * Block on a lock
 120 */
 121int nlmclnt_block(struct nlm_wait *block, struct nlm_rqst *req, long timeout)
 122{
 123        long ret;
 124
 125        /* A borken server might ask us to block even if we didn't
 126         * request it. Just say no!
 127         */
 128        if (block == NULL)
 129                return -EAGAIN;
 130
 131        /* Go to sleep waiting for GRANT callback. Some servers seem
 132         * to lose callbacks, however, so we're going to poll from
 133         * time to time just to make sure.
 134         *
 135         * For now, the retry frequency is pretty high; normally 
 136         * a 1 minute timeout would do. See the comment before
 137         * nlmclnt_lock for an explanation.
 138         */
 139        ret = wait_event_interruptible_timeout(block->b_wait,
 140                        block->b_status != nlm_lck_blocked,
 141                        timeout);
 142        if (ret < 0)
 143                return -ERESTARTSYS;
 144        req->a_res.status = block->b_status;
 145        return 0;
 146}
 147
 148/*
 149 * The server lockd has called us back to tell us the lock was granted
 150 */
 151__be32 nlmclnt_grant(const struct sockaddr *addr, const struct nlm_lock *lock)
 152{
 153        const struct file_lock *fl = &lock->fl;
 154        const struct nfs_fh *fh = &lock->fh;
 155        struct nlm_wait *block;
 156        __be32 res = nlm_lck_denied;
 157
 158        /*
 159         * Look up blocked request based on arguments. 
 160         * Warning: must not use cookie to match it!
 161         */
 162        spin_lock(&nlm_blocked_lock);
 163        list_for_each_entry(block, &nlm_blocked, b_list) {
 164                struct file_lock *fl_blocked = block->b_lock;
 165
 166                if (fl_blocked->fl_start != fl->fl_start)
 167                        continue;
 168                if (fl_blocked->fl_end != fl->fl_end)
 169                        continue;
 170                /*
 171                 * Careful! The NLM server will return the 32-bit "pid" that
 172                 * we put on the wire: in this case the lockowner "pid".
 173                 */
 174                if (fl_blocked->fl_u.nfs_fl.owner->pid != lock->svid)
 175                        continue;
 176                if (!rpc_cmp_addr(nlm_addr(block->b_host), addr))
 177                        continue;
 178                if (nfs_compare_fh(NFS_FH(fl_blocked->fl_file->f_path.dentry->d_inode) ,fh) != 0)
 179                        continue;
 180                /* Alright, we found a lock. Set the return status
 181                 * and wake up the caller
 182                 */
 183                block->b_status = nlm_granted;
 184                wake_up(&block->b_wait);
 185                res = nlm_granted;
 186        }
 187        spin_unlock(&nlm_blocked_lock);
 188        return res;
 189}
 190
 191/*
 192 * The following procedures deal with the recovery of locks after a
 193 * server crash.
 194 */
 195
 196/*
 197 * Reclaim all locks on server host. We do this by spawning a separate
 198 * reclaimer thread.
 199 */
 200void
 201nlmclnt_recovery(struct nlm_host *host)
 202{
 203        struct task_struct *task;
 204
 205        if (!host->h_reclaiming++) {
 206                nlm_get_host(host);
 207                task = kthread_run(reclaimer, host, "%s-reclaim", host->h_name);
 208                if (IS_ERR(task))
 209                        printk(KERN_ERR "lockd: unable to spawn reclaimer "
 210                                "thread. Locks for %s won't be reclaimed! "
 211                                "(%ld)\n", host->h_name, PTR_ERR(task));
 212        }
 213}
 214
 215static int
 216reclaimer(void *ptr)
 217{
 218        struct nlm_host   *host = (struct nlm_host *) ptr;
 219        struct nlm_wait   *block;
 220        struct file_lock *fl, *next;
 221        u32 nsmstate;
 222
 223        allow_signal(SIGKILL);
 224
 225        down_write(&host->h_rwsem);
 226        lockd_up();     /* note: this cannot fail as lockd is already running */
 227
 228        dprintk("lockd: reclaiming locks for host %s\n", host->h_name);
 229
 230restart:
 231        nsmstate = host->h_nsmstate;
 232
 233        /* Force a portmap getport - the peer's lockd will
 234         * most likely end up on a different port.
 235         */
 236        host->h_nextrebind = jiffies;
 237        nlm_rebind_host(host);
 238
 239        /* First, reclaim all locks that have been granted. */
 240        list_splice_init(&host->h_granted, &host->h_reclaim);
 241        list_for_each_entry_safe(fl, next, &host->h_reclaim, fl_u.nfs_fl.list) {
 242                list_del_init(&fl->fl_u.nfs_fl.list);
 243
 244                /*
 245                 * sending this thread a SIGKILL will result in any unreclaimed
 246                 * locks being removed from the h_granted list. This means that
 247                 * the kernel will not attempt to reclaim them again if a new
 248                 * reclaimer thread is spawned for this host.
 249                 */
 250                if (signalled())
 251                        continue;
 252                if (nlmclnt_reclaim(host, fl) != 0)
 253                        continue;
 254                list_add_tail(&fl->fl_u.nfs_fl.list, &host->h_granted);
 255                if (host->h_nsmstate != nsmstate) {
 256                        /* Argh! The server rebooted again! */
 257                        goto restart;
 258                }
 259        }
 260
 261        host->h_reclaiming = 0;
 262        up_write(&host->h_rwsem);
 263        dprintk("NLM: done reclaiming locks for host %s\n", host->h_name);
 264
 265        /* Now, wake up all processes that sleep on a blocked lock */
 266        spin_lock(&nlm_blocked_lock);
 267        list_for_each_entry(block, &nlm_blocked, b_list) {
 268                if (block->b_host == host) {
 269                        block->b_status = nlm_lck_denied_grace_period;
 270                        wake_up(&block->b_wait);
 271                }
 272        }
 273        spin_unlock(&nlm_blocked_lock);
 274
 275        /* Release host handle after use */
 276        nlmclnt_release_host(host);
 277        lockd_down();
 278        return 0;
 279}
 280