linux/fs/lockd/clntlock.c
<<
>>
Prefs
   1/*
   2 * linux/fs/lockd/clntlock.c
   3 *
   4 * Lock handling for the client side NLM implementation
   5 *
   6 * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de>
   7 */
   8
   9#include <linux/module.h>
  10#include <linux/types.h>
  11#include <linux/time.h>
  12#include <linux/nfs_fs.h>
  13#include <linux/sunrpc/clnt.h>
  14#include <linux/sunrpc/svc.h>
  15#include <linux/lockd/lockd.h>
  16#include <linux/smp_lock.h>
  17#include <linux/kthread.h>
  18
  19#define NLMDBG_FACILITY         NLMDBG_CLIENT
  20
  21/*
  22 * Local function prototypes
  23 */
  24static int                      reclaimer(void *ptr);
  25
  26/*
  27 * The following functions handle blocking and granting from the
  28 * client perspective.
  29 */
  30
  31/*
  32 * This is the representation of a blocked client lock.
  33 */
  34struct nlm_wait {
  35        struct list_head        b_list;         /* linked list */
  36        wait_queue_head_t       b_wait;         /* where to wait on */
  37        struct nlm_host *       b_host;
  38        struct file_lock *      b_lock;         /* local file lock */
  39        unsigned short          b_reclaim;      /* got to reclaim lock */
  40        __be32                  b_status;       /* grant callback status */
  41};
  42
  43static LIST_HEAD(nlm_blocked);
  44
  45/**
  46 * nlmclnt_init - Set up per-NFS mount point lockd data structures
  47 * @nlm_init: pointer to arguments structure
  48 *
  49 * Returns pointer to an appropriate nlm_host struct,
  50 * or an ERR_PTR value.
  51 */
  52struct nlm_host *nlmclnt_init(const struct nlmclnt_initdata *nlm_init)
  53{
  54        struct nlm_host *host;
  55        u32 nlm_version = (nlm_init->nfs_version == 2) ? 1 : 4;
  56        int status;
  57
  58        status = lockd_up();
  59        if (status < 0)
  60                return ERR_PTR(status);
  61
  62        host = nlmclnt_lookup_host(nlm_init->address, nlm_init->addrlen,
  63                                   nlm_init->protocol, nlm_version,
  64                                   nlm_init->hostname, nlm_init->noresvport);
  65        if (host == NULL) {
  66                lockd_down();
  67                return ERR_PTR(-ENOLCK);
  68        }
  69
  70        return host;
  71}
  72EXPORT_SYMBOL_GPL(nlmclnt_init);
  73
  74/**
  75 * nlmclnt_done - Release resources allocated by nlmclnt_init()
  76 * @host: nlm_host structure reserved by nlmclnt_init()
  77 *
  78 */
  79void nlmclnt_done(struct nlm_host *host)
  80{
  81        nlm_release_host(host);
  82        lockd_down();
  83}
  84EXPORT_SYMBOL_GPL(nlmclnt_done);
  85
  86/*
  87 * Queue up a lock for blocking so that the GRANTED request can see it
  88 */
  89struct nlm_wait *nlmclnt_prepare_block(struct nlm_host *host, struct file_lock *fl)
  90{
  91        struct nlm_wait *block;
  92
  93        block = kmalloc(sizeof(*block), GFP_KERNEL);
  94        if (block != NULL) {
  95                block->b_host = host;
  96                block->b_lock = fl;
  97                init_waitqueue_head(&block->b_wait);
  98                block->b_status = nlm_lck_blocked;
  99                list_add(&block->b_list, &nlm_blocked);
 100        }
 101        return block;
 102}
 103
 104void nlmclnt_finish_block(struct nlm_wait *block)
 105{
 106        if (block == NULL)
 107                return;
 108        list_del(&block->b_list);
 109        kfree(block);
 110}
 111
 112/*
 113 * Block on a lock
 114 */
 115int nlmclnt_block(struct nlm_wait *block, struct nlm_rqst *req, long timeout)
 116{
 117        long ret;
 118
 119        /* A borken server might ask us to block even if we didn't
 120         * request it. Just say no!
 121         */
 122        if (block == NULL)
 123                return -EAGAIN;
 124
 125        /* Go to sleep waiting for GRANT callback. Some servers seem
 126         * to lose callbacks, however, so we're going to poll from
 127         * time to time just to make sure.
 128         *
 129         * For now, the retry frequency is pretty high; normally 
 130         * a 1 minute timeout would do. See the comment before
 131         * nlmclnt_lock for an explanation.
 132         */
 133        ret = wait_event_interruptible_timeout(block->b_wait,
 134                        block->b_status != nlm_lck_blocked,
 135                        timeout);
 136        if (ret < 0)
 137                return -ERESTARTSYS;
 138        req->a_res.status = block->b_status;
 139        return 0;
 140}
 141
 142/*
 143 * The server lockd has called us back to tell us the lock was granted
 144 */
 145__be32 nlmclnt_grant(const struct sockaddr *addr, const struct nlm_lock *lock)
 146{
 147        const struct file_lock *fl = &lock->fl;
 148        const struct nfs_fh *fh = &lock->fh;
 149        struct nlm_wait *block;
 150        __be32 res = nlm_lck_denied;
 151
 152        /*
 153         * Look up blocked request based on arguments. 
 154         * Warning: must not use cookie to match it!
 155         */
 156        list_for_each_entry(block, &nlm_blocked, b_list) {
 157                struct file_lock *fl_blocked = block->b_lock;
 158
 159                if (fl_blocked->fl_start != fl->fl_start)
 160                        continue;
 161                if (fl_blocked->fl_end != fl->fl_end)
 162                        continue;
 163                /*
 164                 * Careful! The NLM server will return the 32-bit "pid" that
 165                 * we put on the wire: in this case the lockowner "pid".
 166                 */
 167                if (fl_blocked->fl_u.nfs_fl.owner->pid != lock->svid)
 168                        continue;
 169                if (!rpc_cmp_addr(nlm_addr(block->b_host), addr))
 170                        continue;
 171                if (nfs_compare_fh(NFS_FH(fl_blocked->fl_file->f_path.dentry->d_inode) ,fh) != 0)
 172                        continue;
 173                /* Alright, we found a lock. Set the return status
 174                 * and wake up the caller
 175                 */
 176                block->b_status = nlm_granted;
 177                wake_up(&block->b_wait);
 178                res = nlm_granted;
 179        }
 180        return res;
 181}
 182
 183/*
 184 * The following procedures deal with the recovery of locks after a
 185 * server crash.
 186 */
 187
 188/*
 189 * Reclaim all locks on server host. We do this by spawning a separate
 190 * reclaimer thread.
 191 */
 192void
 193nlmclnt_recovery(struct nlm_host *host)
 194{
 195        struct task_struct *task;
 196
 197        if (!host->h_reclaiming++) {
 198                nlm_get_host(host);
 199                task = kthread_run(reclaimer, host, "%s-reclaim", host->h_name);
 200                if (IS_ERR(task))
 201                        printk(KERN_ERR "lockd: unable to spawn reclaimer "
 202                                "thread. Locks for %s won't be reclaimed! "
 203                                "(%ld)\n", host->h_name, PTR_ERR(task));
 204        }
 205}
 206
 207static int
 208reclaimer(void *ptr)
 209{
 210        struct nlm_host   *host = (struct nlm_host *) ptr;
 211        struct nlm_wait   *block;
 212        struct file_lock *fl, *next;
 213        u32 nsmstate;
 214
 215        allow_signal(SIGKILL);
 216
 217        down_write(&host->h_rwsem);
 218
 219        /* This one ensures that our parent doesn't terminate while the
 220         * reclaim is in progress */
 221        lock_kernel();
 222        lockd_up();     /* note: this cannot fail as lockd is already running */
 223
 224        dprintk("lockd: reclaiming locks for host %s\n", host->h_name);
 225
 226restart:
 227        nsmstate = host->h_nsmstate;
 228
 229        /* Force a portmap getport - the peer's lockd will
 230         * most likely end up on a different port.
 231         */
 232        host->h_nextrebind = jiffies;
 233        nlm_rebind_host(host);
 234
 235        /* First, reclaim all locks that have been granted. */
 236        list_splice_init(&host->h_granted, &host->h_reclaim);
 237        list_for_each_entry_safe(fl, next, &host->h_reclaim, fl_u.nfs_fl.list) {
 238                list_del_init(&fl->fl_u.nfs_fl.list);
 239
 240                /*
 241                 * sending this thread a SIGKILL will result in any unreclaimed
 242                 * locks being removed from the h_granted list. This means that
 243                 * the kernel will not attempt to reclaim them again if a new
 244                 * reclaimer thread is spawned for this host.
 245                 */
 246                if (signalled())
 247                        continue;
 248                if (nlmclnt_reclaim(host, fl) != 0)
 249                        continue;
 250                list_add_tail(&fl->fl_u.nfs_fl.list, &host->h_granted);
 251                if (host->h_nsmstate != nsmstate) {
 252                        /* Argh! The server rebooted again! */
 253                        goto restart;
 254                }
 255        }
 256
 257        host->h_reclaiming = 0;
 258        up_write(&host->h_rwsem);
 259        dprintk("NLM: done reclaiming locks for host %s\n", host->h_name);
 260
 261        /* Now, wake up all processes that sleep on a blocked lock */
 262        list_for_each_entry(block, &nlm_blocked, b_list) {
 263                if (block->b_host == host) {
 264                        block->b_status = nlm_lck_denied_grace_period;
 265                        wake_up(&block->b_wait);
 266                }
 267        }
 268
 269        /* Release host handle after use */
 270        nlm_release_host(host);
 271        lockd_down();
 272        unlock_kernel();
 273        return 0;
 274}
 275