linux/fs/lockd/svclock.c
<<
>>
Prefs
   1/*
   2 * linux/fs/lockd/svclock.c
   3 *
   4 * Handling of server-side locks, mostly of the blocked variety.
   5 * This is the ugliest part of lockd because we tread on very thin ice.
   6 * GRANT and CANCEL calls may get stuck, meet in mid-flight, etc.
   7 * IMNSHO introducing the grant callback into the NLM protocol was one
   8 * of the worst ideas Sun ever had. Except maybe for the idea of doing
   9 * NFS file locking at all.
  10 *
  11 * I'm trying hard to avoid race conditions by protecting most accesses
  12 * to a file's list of blocked locks through a semaphore. The global
  13 * list of blocked locks is not protected in this fashion however.
  14 * Therefore, some functions (such as the RPC callback for the async grant
  15 * call) move blocked locks towards the head of the list *while some other
  16 * process might be traversing it*. This should not be a problem in
  17 * practice, because this will only cause functions traversing the list
  18 * to visit some blocks twice.
  19 *
  20 * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de>
  21 */
  22
  23#include <linux/types.h>
  24#include <linux/errno.h>
  25#include <linux/kernel.h>
  26#include <linux/sched.h>
  27#include <linux/smp_lock.h>
  28#include <linux/sunrpc/clnt.h>
  29#include <linux/sunrpc/svc.h>
  30#include <linux/lockd/nlm.h>
  31#include <linux/lockd/lockd.h>
  32
  33#define NLMDBG_FACILITY         NLMDBG_SVCLOCK
  34
  35#ifdef CONFIG_LOCKD_V4
  36#define nlm_deadlock    nlm4_deadlock
  37#else
  38#define nlm_deadlock    nlm_lck_denied
  39#endif
  40
  41static void nlmsvc_release_block(struct nlm_block *block);
  42static void     nlmsvc_insert_block(struct nlm_block *block, unsigned long);
  43static void     nlmsvc_remove_block(struct nlm_block *block);
  44
  45static int nlmsvc_setgrantargs(struct nlm_rqst *call, struct nlm_lock *lock);
  46static void nlmsvc_freegrantargs(struct nlm_rqst *call);
  47static const struct rpc_call_ops nlmsvc_grant_ops;
  48
  49/*
  50 * The list of blocked locks to retry
  51 */
  52static LIST_HEAD(nlm_blocked);
  53
  54/*
  55 * Insert a blocked lock into the global list
  56 */
  57static void
  58nlmsvc_insert_block(struct nlm_block *block, unsigned long when)
  59{
  60        struct nlm_block *b;
  61        struct list_head *pos;
  62
  63        dprintk("lockd: nlmsvc_insert_block(%p, %ld)\n", block, when);
  64        if (list_empty(&block->b_list)) {
  65                kref_get(&block->b_count);
  66        } else {
  67                list_del_init(&block->b_list);
  68        }
  69
  70        pos = &nlm_blocked;
  71        if (when != NLM_NEVER) {
  72                if ((when += jiffies) == NLM_NEVER)
  73                        when ++;
  74                list_for_each(pos, &nlm_blocked) {
  75                        b = list_entry(pos, struct nlm_block, b_list);
  76                        if (time_after(b->b_when,when) || b->b_when == NLM_NEVER)
  77                                break;
  78                }
  79                /* On normal exit from the loop, pos == &nlm_blocked,
  80                 * so we will be adding to the end of the list - good
  81                 */
  82        }
  83
  84        list_add_tail(&block->b_list, pos);
  85        block->b_when = when;
  86}
  87
  88/*
  89 * Remove a block from the global list
  90 */
  91static inline void
  92nlmsvc_remove_block(struct nlm_block *block)
  93{
  94        if (!list_empty(&block->b_list)) {
  95                list_del_init(&block->b_list);
  96                nlmsvc_release_block(block);
  97        }
  98}
  99
 100/*
 101 * Find a block for a given lock
 102 */
 103static struct nlm_block *
 104nlmsvc_lookup_block(struct nlm_file *file, struct nlm_lock *lock)
 105{
 106        struct nlm_block        *block;
 107        struct file_lock        *fl;
 108
 109        dprintk("lockd: nlmsvc_lookup_block f=%p pd=%d %Ld-%Ld ty=%d\n",
 110                                file, lock->fl.fl_pid,
 111                                (long long)lock->fl.fl_start,
 112                                (long long)lock->fl.fl_end, lock->fl.fl_type);
 113        list_for_each_entry(block, &nlm_blocked, b_list) {
 114                fl = &block->b_call->a_args.lock.fl;
 115                dprintk("lockd: check f=%p pd=%d %Ld-%Ld ty=%d cookie=%s\n",
 116                                block->b_file, fl->fl_pid,
 117                                (long long)fl->fl_start,
 118                                (long long)fl->fl_end, fl->fl_type,
 119                                nlmdbg_cookie2a(&block->b_call->a_args.cookie));
 120                if (block->b_file == file && nlm_compare_locks(fl, &lock->fl)) {
 121                        kref_get(&block->b_count);
 122                        return block;
 123                }
 124        }
 125
 126        return NULL;
 127}
 128
 129static inline int nlm_cookie_match(struct nlm_cookie *a, struct nlm_cookie *b)
 130{
 131        if(a->len != b->len)
 132                return 0;
 133        if(memcmp(a->data,b->data,a->len))
 134                return 0;
 135        return 1;
 136}
 137
 138/*
 139 * Find a block with a given NLM cookie.
 140 */
 141static inline struct nlm_block *
 142nlmsvc_find_block(struct nlm_cookie *cookie)
 143{
 144        struct nlm_block *block;
 145
 146        list_for_each_entry(block, &nlm_blocked, b_list) {
 147                if (nlm_cookie_match(&block->b_call->a_args.cookie,cookie))
 148                        goto found;
 149        }
 150
 151        return NULL;
 152
 153found:
 154        dprintk("nlmsvc_find_block(%s): block=%p\n", nlmdbg_cookie2a(cookie), block);
 155        kref_get(&block->b_count);
 156        return block;
 157}
 158
 159/*
 160 * Create a block and initialize it.
 161 *
 162 * Note: we explicitly set the cookie of the grant reply to that of
 163 * the blocked lock request. The spec explicitly mentions that the client
 164 * should _not_ rely on the callback containing the same cookie as the
 165 * request, but (as I found out later) that's because some implementations
 166 * do just this. Never mind the standards comittees, they support our
 167 * logging industries.
 168 *
 169 * 10 years later: I hope we can safely ignore these old and broken
 170 * clients by now. Let's fix this so we can uniquely identify an incoming
 171 * GRANTED_RES message by cookie, without having to rely on the client's IP
 172 * address. --okir
 173 */
 174static struct nlm_block *
 175nlmsvc_create_block(struct svc_rqst *rqstp, struct nlm_host *host,
 176                    struct nlm_file *file, struct nlm_lock *lock,
 177                    struct nlm_cookie *cookie)
 178{
 179        struct nlm_block        *block;
 180        struct nlm_rqst         *call = NULL;
 181
 182        call = nlm_alloc_call(host);
 183        if (call == NULL)
 184                return NULL;
 185
 186        /* Allocate memory for block, and initialize arguments */
 187        block = kzalloc(sizeof(*block), GFP_KERNEL);
 188        if (block == NULL)
 189                goto failed;
 190        kref_init(&block->b_count);
 191        INIT_LIST_HEAD(&block->b_list);
 192        INIT_LIST_HEAD(&block->b_flist);
 193
 194        if (!nlmsvc_setgrantargs(call, lock))
 195                goto failed_free;
 196
 197        /* Set notifier function for VFS, and init args */
 198        call->a_args.lock.fl.fl_flags |= FL_SLEEP;
 199        call->a_args.lock.fl.fl_lmops = &nlmsvc_lock_operations;
 200        nlmclnt_next_cookie(&call->a_args.cookie);
 201
 202        dprintk("lockd: created block %p...\n", block);
 203
 204        /* Create and initialize the block */
 205        block->b_daemon = rqstp->rq_server;
 206        block->b_host   = host;
 207        block->b_file   = file;
 208        block->b_fl = NULL;
 209        file->f_count++;
 210
 211        /* Add to file's list of blocks */
 212        list_add(&block->b_flist, &file->f_blocks);
 213
 214        /* Set up RPC arguments for callback */
 215        block->b_call = call;
 216        call->a_flags   = RPC_TASK_ASYNC;
 217        call->a_block = block;
 218
 219        return block;
 220
 221failed_free:
 222        kfree(block);
 223failed:
 224        nlm_release_call(call);
 225        return NULL;
 226}
 227
 228/*
 229 * Delete a block. If the lock was cancelled or the grant callback
 230 * failed, unlock is set to 1.
 231 * It is the caller's responsibility to check whether the file
 232 * can be closed hereafter.
 233 */
 234static int nlmsvc_unlink_block(struct nlm_block *block)
 235{
 236        int status;
 237        dprintk("lockd: unlinking block %p...\n", block);
 238
 239        /* Remove block from list */
 240        status = posix_unblock_lock(block->b_file->f_file, &block->b_call->a_args.lock.fl);
 241        nlmsvc_remove_block(block);
 242        return status;
 243}
 244
 245static void nlmsvc_free_block(struct kref *kref)
 246{
 247        struct nlm_block *block = container_of(kref, struct nlm_block, b_count);
 248        struct nlm_file         *file = block->b_file;
 249
 250        dprintk("lockd: freeing block %p...\n", block);
 251
 252        /* Remove block from file's list of blocks */
 253        mutex_lock(&file->f_mutex);
 254        list_del_init(&block->b_flist);
 255        mutex_unlock(&file->f_mutex);
 256
 257        nlmsvc_freegrantargs(block->b_call);
 258        nlm_release_call(block->b_call);
 259        nlm_release_file(block->b_file);
 260        kfree(block->b_fl);
 261        kfree(block);
 262}
 263
 264static void nlmsvc_release_block(struct nlm_block *block)
 265{
 266        if (block != NULL)
 267                kref_put(&block->b_count, nlmsvc_free_block);
 268}
 269
 270/*
 271 * Loop over all blocks and delete blocks held by
 272 * a matching host.
 273 */
 274void nlmsvc_traverse_blocks(struct nlm_host *host,
 275                        struct nlm_file *file,
 276                        nlm_host_match_fn_t match)
 277{
 278        struct nlm_block *block, *next;
 279
 280restart:
 281        mutex_lock(&file->f_mutex);
 282        list_for_each_entry_safe(block, next, &file->f_blocks, b_flist) {
 283                if (!match(block->b_host, host))
 284                        continue;
 285                /* Do not destroy blocks that are not on
 286                 * the global retry list - why? */
 287                if (list_empty(&block->b_list))
 288                        continue;
 289                kref_get(&block->b_count);
 290                mutex_unlock(&file->f_mutex);
 291                nlmsvc_unlink_block(block);
 292                nlmsvc_release_block(block);
 293                goto restart;
 294        }
 295        mutex_unlock(&file->f_mutex);
 296}
 297
 298/*
 299 * Initialize arguments for GRANTED call. The nlm_rqst structure
 300 * has been cleared already.
 301 */
 302static int nlmsvc_setgrantargs(struct nlm_rqst *call, struct nlm_lock *lock)
 303{
 304        locks_copy_lock(&call->a_args.lock.fl, &lock->fl);
 305        memcpy(&call->a_args.lock.fh, &lock->fh, sizeof(call->a_args.lock.fh));
 306        call->a_args.lock.caller = utsname()->nodename;
 307        call->a_args.lock.oh.len = lock->oh.len;
 308
 309        /* set default data area */
 310        call->a_args.lock.oh.data = call->a_owner;
 311        call->a_args.lock.svid = lock->fl.fl_pid;
 312
 313        if (lock->oh.len > NLMCLNT_OHSIZE) {
 314                void *data = kmalloc(lock->oh.len, GFP_KERNEL);
 315                if (!data)
 316                        return 0;
 317                call->a_args.lock.oh.data = (u8 *) data;
 318        }
 319
 320        memcpy(call->a_args.lock.oh.data, lock->oh.data, lock->oh.len);
 321        return 1;
 322}
 323
 324static void nlmsvc_freegrantargs(struct nlm_rqst *call)
 325{
 326        if (call->a_args.lock.oh.data != call->a_owner)
 327                kfree(call->a_args.lock.oh.data);
 328}
 329
 330/*
 331 * Deferred lock request handling for non-blocking lock
 332 */
 333static __be32
 334nlmsvc_defer_lock_rqst(struct svc_rqst *rqstp, struct nlm_block *block)
 335{
 336        __be32 status = nlm_lck_denied_nolocks;
 337
 338        block->b_flags |= B_QUEUED;
 339
 340        nlmsvc_insert_block(block, NLM_TIMEOUT);
 341
 342        block->b_cache_req = &rqstp->rq_chandle;
 343        if (rqstp->rq_chandle.defer) {
 344                block->b_deferred_req =
 345                        rqstp->rq_chandle.defer(block->b_cache_req);
 346                if (block->b_deferred_req != NULL)
 347                        status = nlm_drop_reply;
 348        }
 349        dprintk("lockd: nlmsvc_defer_lock_rqst block %p flags %d status %d\n",
 350                block, block->b_flags, ntohl(status));
 351
 352        return status;
 353}
 354
 355/*
 356 * Attempt to establish a lock, and if it can't be granted, block it
 357 * if required.
 358 */
 359__be32
 360nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file,
 361                        struct nlm_lock *lock, int wait, struct nlm_cookie *cookie)
 362{
 363        struct nlm_block        *block = NULL;
 364        struct nlm_host         *host;
 365        int                     error;
 366        __be32                  ret;
 367
 368        dprintk("lockd: nlmsvc_lock(%s/%ld, ty=%d, pi=%d, %Ld-%Ld, bl=%d)\n",
 369                                file->f_file->f_path.dentry->d_inode->i_sb->s_id,
 370                                file->f_file->f_path.dentry->d_inode->i_ino,
 371                                lock->fl.fl_type, lock->fl.fl_pid,
 372                                (long long)lock->fl.fl_start,
 373                                (long long)lock->fl.fl_end,
 374                                wait);
 375
 376        /* Create host handle for callback */
 377        host = nlmsvc_lookup_host(rqstp, lock->caller, lock->len);
 378        if (host == NULL)
 379                return nlm_lck_denied_nolocks;
 380
 381        /* Lock file against concurrent access */
 382        mutex_lock(&file->f_mutex);
 383        /* Get existing block (in case client is busy-waiting)
 384         * or create new block
 385         */
 386        block = nlmsvc_lookup_block(file, lock);
 387        if (block == NULL) {
 388                block = nlmsvc_create_block(rqstp, nlm_get_host(host), file,
 389                                lock, cookie);
 390                ret = nlm_lck_denied_nolocks;
 391                if (block == NULL)
 392                        goto out;
 393                lock = &block->b_call->a_args.lock;
 394        } else
 395                lock->fl.fl_flags &= ~FL_SLEEP;
 396
 397        if (block->b_flags & B_QUEUED) {
 398                dprintk("lockd: nlmsvc_lock deferred block %p flags %d\n",
 399                                                        block, block->b_flags);
 400                if (block->b_granted) {
 401                        nlmsvc_unlink_block(block);
 402                        ret = nlm_granted;
 403                        goto out;
 404                }
 405                if (block->b_flags & B_TIMED_OUT) {
 406                        nlmsvc_unlink_block(block);
 407                        ret = nlm_lck_denied;
 408                        goto out;
 409                }
 410                ret = nlm_drop_reply;
 411                goto out;
 412        }
 413
 414        if (!wait)
 415                lock->fl.fl_flags &= ~FL_SLEEP;
 416        error = vfs_lock_file(file->f_file, F_SETLK, &lock->fl, NULL);
 417        lock->fl.fl_flags &= ~FL_SLEEP;
 418
 419        dprintk("lockd: vfs_lock_file returned %d\n", error);
 420        switch(error) {
 421                case 0:
 422                        ret = nlm_granted;
 423                        goto out;
 424                case -EAGAIN:
 425                        ret = nlm_lck_denied;
 426                        break;
 427                case -EINPROGRESS:
 428                        if (wait)
 429                                break;
 430                        /* Filesystem lock operation is in progress
 431                           Add it to the queue waiting for callback */
 432                        ret = nlmsvc_defer_lock_rqst(rqstp, block);
 433                        goto out;
 434                case -EDEADLK:
 435                        ret = nlm_deadlock;
 436                        goto out;
 437                default:                        /* includes ENOLCK */
 438                        ret = nlm_lck_denied_nolocks;
 439                        goto out;
 440        }
 441
 442        ret = nlm_lck_denied;
 443        if (!wait)
 444                goto out;
 445
 446        ret = nlm_lck_blocked;
 447
 448        /* Append to list of blocked */
 449        nlmsvc_insert_block(block, NLM_NEVER);
 450out:
 451        mutex_unlock(&file->f_mutex);
 452        nlmsvc_release_block(block);
 453        nlm_release_host(host);
 454        dprintk("lockd: nlmsvc_lock returned %u\n", ret);
 455        return ret;
 456}
 457
 458/*
 459 * Test for presence of a conflicting lock.
 460 */
 461__be32
 462nlmsvc_testlock(struct svc_rqst *rqstp, struct nlm_file *file,
 463                struct nlm_lock *lock, struct nlm_lock *conflock,
 464                struct nlm_cookie *cookie)
 465{
 466        struct nlm_block        *block = NULL;
 467        int                     error;
 468        __be32                  ret;
 469
 470        dprintk("lockd: nlmsvc_testlock(%s/%ld, ty=%d, %Ld-%Ld)\n",
 471                                file->f_file->f_path.dentry->d_inode->i_sb->s_id,
 472                                file->f_file->f_path.dentry->d_inode->i_ino,
 473                                lock->fl.fl_type,
 474                                (long long)lock->fl.fl_start,
 475                                (long long)lock->fl.fl_end);
 476
 477        /* Get existing block (in case client is busy-waiting) */
 478        block = nlmsvc_lookup_block(file, lock);
 479
 480        if (block == NULL) {
 481                struct file_lock *conf = kzalloc(sizeof(*conf), GFP_KERNEL);
 482                struct nlm_host *host;
 483
 484                if (conf == NULL)
 485                        return nlm_granted;
 486                /* Create host handle for callback */
 487                host = nlmsvc_lookup_host(rqstp, lock->caller, lock->len);
 488                if (host == NULL) {
 489                        kfree(conf);
 490                        return nlm_lck_denied_nolocks;
 491                }
 492                block = nlmsvc_create_block(rqstp, host, file, lock, cookie);
 493                if (block == NULL) {
 494                        kfree(conf);
 495                        return nlm_granted;
 496                }
 497                block->b_fl = conf;
 498        }
 499        if (block->b_flags & B_QUEUED) {
 500                dprintk("lockd: nlmsvc_testlock deferred block %p flags %d fl %p\n",
 501                        block, block->b_flags, block->b_fl);
 502                if (block->b_flags & B_TIMED_OUT) {
 503                        nlmsvc_unlink_block(block);
 504                        return nlm_lck_denied;
 505                }
 506                if (block->b_flags & B_GOT_CALLBACK) {
 507                        if (block->b_fl != NULL
 508                                        && block->b_fl->fl_type != F_UNLCK) {
 509                                lock->fl = *block->b_fl;
 510                                goto conf_lock;
 511                        }
 512                        else {
 513                                nlmsvc_unlink_block(block);
 514                                return nlm_granted;
 515                        }
 516                }
 517                return nlm_drop_reply;
 518        }
 519
 520        error = vfs_test_lock(file->f_file, &lock->fl);
 521        if (error == -EINPROGRESS)
 522                return nlmsvc_defer_lock_rqst(rqstp, block);
 523        if (error) {
 524                ret = nlm_lck_denied_nolocks;
 525                goto out;
 526        }
 527        if (lock->fl.fl_type == F_UNLCK) {
 528                ret = nlm_granted;
 529                goto out;
 530        }
 531
 532conf_lock:
 533        dprintk("lockd: conflicting lock(ty=%d, %Ld-%Ld)\n",
 534                lock->fl.fl_type, (long long)lock->fl.fl_start,
 535                (long long)lock->fl.fl_end);
 536        conflock->caller = "somehost";  /* FIXME */
 537        conflock->len = strlen(conflock->caller);
 538        conflock->oh.len = 0;           /* don't return OH info */
 539        conflock->svid = lock->fl.fl_pid;
 540        conflock->fl.fl_type = lock->fl.fl_type;
 541        conflock->fl.fl_start = lock->fl.fl_start;
 542        conflock->fl.fl_end = lock->fl.fl_end;
 543        ret = nlm_lck_denied;
 544out:
 545        if (block)
 546                nlmsvc_release_block(block);
 547        return ret;
 548}
 549
 550/*
 551 * Remove a lock.
 552 * This implies a CANCEL call: We send a GRANT_MSG, the client replies
 553 * with a GRANT_RES call which gets lost, and calls UNLOCK immediately
 554 * afterwards. In this case the block will still be there, and hence
 555 * must be removed.
 556 */
 557__be32
 558nlmsvc_unlock(struct nlm_file *file, struct nlm_lock *lock)
 559{
 560        int     error;
 561
 562        dprintk("lockd: nlmsvc_unlock(%s/%ld, pi=%d, %Ld-%Ld)\n",
 563                                file->f_file->f_path.dentry->d_inode->i_sb->s_id,
 564                                file->f_file->f_path.dentry->d_inode->i_ino,
 565                                lock->fl.fl_pid,
 566                                (long long)lock->fl.fl_start,
 567                                (long long)lock->fl.fl_end);
 568
 569        /* First, cancel any lock that might be there */
 570        nlmsvc_cancel_blocked(file, lock);
 571
 572        lock->fl.fl_type = F_UNLCK;
 573        error = vfs_lock_file(file->f_file, F_SETLK, &lock->fl, NULL);
 574
 575        return (error < 0)? nlm_lck_denied_nolocks : nlm_granted;
 576}
 577
 578/*
 579 * Cancel a previously blocked request.
 580 *
 581 * A cancel request always overrides any grant that may currently
 582 * be in progress.
 583 * The calling procedure must check whether the file can be closed.
 584 */
 585__be32
 586nlmsvc_cancel_blocked(struct nlm_file *file, struct nlm_lock *lock)
 587{
 588        struct nlm_block        *block;
 589        int status = 0;
 590
 591        dprintk("lockd: nlmsvc_cancel(%s/%ld, pi=%d, %Ld-%Ld)\n",
 592                                file->f_file->f_path.dentry->d_inode->i_sb->s_id,
 593                                file->f_file->f_path.dentry->d_inode->i_ino,
 594                                lock->fl.fl_pid,
 595                                (long long)lock->fl.fl_start,
 596                                (long long)lock->fl.fl_end);
 597
 598        mutex_lock(&file->f_mutex);
 599        block = nlmsvc_lookup_block(file, lock);
 600        mutex_unlock(&file->f_mutex);
 601        if (block != NULL) {
 602                vfs_cancel_lock(block->b_file->f_file,
 603                                &block->b_call->a_args.lock.fl);
 604                status = nlmsvc_unlink_block(block);
 605                nlmsvc_release_block(block);
 606        }
 607        return status ? nlm_lck_denied : nlm_granted;
 608}
 609
 610/*
 611 * This is a callback from the filesystem for VFS file lock requests.
 612 * It will be used if fl_grant is defined and the filesystem can not
 613 * respond to the request immediately.
 614 * For GETLK request it will copy the reply to the nlm_block.
 615 * For SETLK or SETLKW request it will get the local posix lock.
 616 * In all cases it will move the block to the head of nlm_blocked q where
 617 * nlmsvc_retry_blocked() can send back a reply for SETLKW or revisit the
 618 * deferred rpc for GETLK and SETLK.
 619 */
 620static void
 621nlmsvc_update_deferred_block(struct nlm_block *block, struct file_lock *conf,
 622                             int result)
 623{
 624        block->b_flags |= B_GOT_CALLBACK;
 625        if (result == 0)
 626                block->b_granted = 1;
 627        else
 628                block->b_flags |= B_TIMED_OUT;
 629        if (conf) {
 630                if (block->b_fl)
 631                        locks_copy_lock(block->b_fl, conf);
 632        }
 633}
 634
 635static int nlmsvc_grant_deferred(struct file_lock *fl, struct file_lock *conf,
 636                                        int result)
 637{
 638        struct nlm_block *block;
 639        int rc = -ENOENT;
 640
 641        lock_kernel();
 642        list_for_each_entry(block, &nlm_blocked, b_list) {
 643                if (nlm_compare_locks(&block->b_call->a_args.lock.fl, fl)) {
 644                        dprintk("lockd: nlmsvc_notify_blocked block %p flags %d\n",
 645                                                        block, block->b_flags);
 646                        if (block->b_flags & B_QUEUED) {
 647                                if (block->b_flags & B_TIMED_OUT) {
 648                                        rc = -ENOLCK;
 649                                        break;
 650                                }
 651                                nlmsvc_update_deferred_block(block, conf, result);
 652                        } else if (result == 0)
 653                                block->b_granted = 1;
 654
 655                        nlmsvc_insert_block(block, 0);
 656                        svc_wake_up(block->b_daemon);
 657                        rc = 0;
 658                        break;
 659                }
 660        }
 661        unlock_kernel();
 662        if (rc == -ENOENT)
 663                printk(KERN_WARNING "lockd: grant for unknown block\n");
 664        return rc;
 665}
 666
 667/*
 668 * Unblock a blocked lock request. This is a callback invoked from the
 669 * VFS layer when a lock on which we blocked is removed.
 670 *
 671 * This function doesn't grant the blocked lock instantly, but rather moves
 672 * the block to the head of nlm_blocked where it can be picked up by lockd.
 673 */
 674static void
 675nlmsvc_notify_blocked(struct file_lock *fl)
 676{
 677        struct nlm_block        *block;
 678
 679        dprintk("lockd: VFS unblock notification for block %p\n", fl);
 680        list_for_each_entry(block, &nlm_blocked, b_list) {
 681                if (nlm_compare_locks(&block->b_call->a_args.lock.fl, fl)) {
 682                        nlmsvc_insert_block(block, 0);
 683                        svc_wake_up(block->b_daemon);
 684                        return;
 685                }
 686        }
 687
 688        printk(KERN_WARNING "lockd: notification for unknown block!\n");
 689}
 690
 691static int nlmsvc_same_owner(struct file_lock *fl1, struct file_lock *fl2)
 692{
 693        return fl1->fl_owner == fl2->fl_owner && fl1->fl_pid == fl2->fl_pid;
 694}
 695
 696struct lock_manager_operations nlmsvc_lock_operations = {
 697        .fl_compare_owner = nlmsvc_same_owner,
 698        .fl_notify = nlmsvc_notify_blocked,
 699        .fl_grant = nlmsvc_grant_deferred,
 700};
 701
 702/*
 703 * Try to claim a lock that was previously blocked.
 704 *
 705 * Note that we use both the RPC_GRANTED_MSG call _and_ an async
 706 * RPC thread when notifying the client. This seems like overkill...
 707 * Here's why:
 708 *  -   we don't want to use a synchronous RPC thread, otherwise
 709 *      we might find ourselves hanging on a dead portmapper.
 710 *  -   Some lockd implementations (e.g. HP) don't react to
 711 *      RPC_GRANTED calls; they seem to insist on RPC_GRANTED_MSG calls.
 712 */
 713static void
 714nlmsvc_grant_blocked(struct nlm_block *block)
 715{
 716        struct nlm_file         *file = block->b_file;
 717        struct nlm_lock         *lock = &block->b_call->a_args.lock;
 718        int                     error;
 719
 720        dprintk("lockd: grant blocked lock %p\n", block);
 721
 722        kref_get(&block->b_count);
 723
 724        /* Unlink block request from list */
 725        nlmsvc_unlink_block(block);
 726
 727        /* If b_granted is true this means we've been here before.
 728         * Just retry the grant callback, possibly refreshing the RPC
 729         * binding */
 730        if (block->b_granted) {
 731                nlm_rebind_host(block->b_host);
 732                goto callback;
 733        }
 734
 735        /* Try the lock operation again */
 736        lock->fl.fl_flags |= FL_SLEEP;
 737        error = vfs_lock_file(file->f_file, F_SETLK, &lock->fl, NULL);
 738        lock->fl.fl_flags &= ~FL_SLEEP;
 739
 740        switch (error) {
 741        case 0:
 742                break;
 743        case -EAGAIN:
 744        case -EINPROGRESS:
 745                dprintk("lockd: lock still blocked error %d\n", error);
 746                nlmsvc_insert_block(block, NLM_NEVER);
 747                nlmsvc_release_block(block);
 748                return;
 749        default:
 750                printk(KERN_WARNING "lockd: unexpected error %d in %s!\n",
 751                                -error, __FUNCTION__);
 752                nlmsvc_insert_block(block, 10 * HZ);
 753                nlmsvc_release_block(block);
 754                return;
 755        }
 756
 757callback:
 758        /* Lock was granted by VFS. */
 759        dprintk("lockd: GRANTing blocked lock.\n");
 760        block->b_granted = 1;
 761
 762        /* Schedule next grant callback in 30 seconds */
 763        nlmsvc_insert_block(block, 30 * HZ);
 764
 765        /* Call the client */
 766        nlm_async_call(block->b_call, NLMPROC_GRANTED_MSG, &nlmsvc_grant_ops);
 767}
 768
 769/*
 770 * This is the callback from the RPC layer when the NLM_GRANTED_MSG
 771 * RPC call has succeeded or timed out.
 772 * Like all RPC callbacks, it is invoked by the rpciod process, so it
 773 * better not sleep. Therefore, we put the blocked lock on the nlm_blocked
 774 * chain once more in order to have it removed by lockd itself (which can
 775 * then sleep on the file semaphore without disrupting e.g. the nfs client).
 776 */
 777static void nlmsvc_grant_callback(struct rpc_task *task, void *data)
 778{
 779        struct nlm_rqst         *call = data;
 780        struct nlm_block        *block = call->a_block;
 781        unsigned long           timeout;
 782
 783        dprintk("lockd: GRANT_MSG RPC callback\n");
 784
 785        /* Technically, we should down the file semaphore here. Since we
 786         * move the block towards the head of the queue only, no harm
 787         * can be done, though. */
 788        if (task->tk_status < 0) {
 789                /* RPC error: Re-insert for retransmission */
 790                timeout = 10 * HZ;
 791        } else {
 792                /* Call was successful, now wait for client callback */
 793                timeout = 60 * HZ;
 794        }
 795        nlmsvc_insert_block(block, timeout);
 796        svc_wake_up(block->b_daemon);
 797}
 798
 799static void nlmsvc_grant_release(void *data)
 800{
 801        struct nlm_rqst         *call = data;
 802
 803        nlmsvc_release_block(call->a_block);
 804}
 805
 806static const struct rpc_call_ops nlmsvc_grant_ops = {
 807        .rpc_call_done = nlmsvc_grant_callback,
 808        .rpc_release = nlmsvc_grant_release,
 809};
 810
 811/*
 812 * We received a GRANT_RES callback. Try to find the corresponding
 813 * block.
 814 */
 815void
 816nlmsvc_grant_reply(struct nlm_cookie *cookie, __be32 status)
 817{
 818        struct nlm_block        *block;
 819
 820        dprintk("grant_reply: looking for cookie %x, s=%d \n",
 821                *(unsigned int *)(cookie->data), status);
 822        if (!(block = nlmsvc_find_block(cookie)))
 823                return;
 824
 825        if (block) {
 826                if (status == nlm_lck_denied_grace_period) {
 827                        /* Try again in a couple of seconds */
 828                        nlmsvc_insert_block(block, 10 * HZ);
 829                } else {
 830                        /* Lock is now held by client, or has been rejected.
 831                         * In both cases, the block should be removed. */
 832                        nlmsvc_unlink_block(block);
 833                }
 834        }
 835        nlmsvc_release_block(block);
 836}
 837
 838/* Helper function to handle retry of a deferred block.
 839 * If it is a blocking lock, call grant_blocked.
 840 * For a non-blocking lock or test lock, revisit the request.
 841 */
 842static void
 843retry_deferred_block(struct nlm_block *block)
 844{
 845        if (!(block->b_flags & B_GOT_CALLBACK))
 846                block->b_flags |= B_TIMED_OUT;
 847        nlmsvc_insert_block(block, NLM_TIMEOUT);
 848        dprintk("revisit block %p flags %d\n",  block, block->b_flags);
 849        if (block->b_deferred_req) {
 850                block->b_deferred_req->revisit(block->b_deferred_req, 0);
 851                block->b_deferred_req = NULL;
 852        }
 853}
 854
 855/*
 856 * Retry all blocked locks that have been notified. This is where lockd
 857 * picks up locks that can be granted, or grant notifications that must
 858 * be retransmitted.
 859 */
 860unsigned long
 861nlmsvc_retry_blocked(void)
 862{
 863        unsigned long   timeout = MAX_SCHEDULE_TIMEOUT;
 864        struct nlm_block *block;
 865
 866        while (!list_empty(&nlm_blocked)) {
 867                block = list_entry(nlm_blocked.next, struct nlm_block, b_list);
 868
 869                if (block->b_when == NLM_NEVER)
 870                        break;
 871                if (time_after(block->b_when,jiffies)) {
 872                        timeout = block->b_when - jiffies;
 873                        break;
 874                }
 875
 876                dprintk("nlmsvc_retry_blocked(%p, when=%ld)\n",
 877                        block, block->b_when);
 878                if (block->b_flags & B_QUEUED) {
 879                        dprintk("nlmsvc_retry_blocked delete block (%p, granted=%d, flags=%d)\n",
 880                                block, block->b_granted, block->b_flags);
 881                        retry_deferred_block(block);
 882                } else
 883                        nlmsvc_grant_blocked(block);
 884        }
 885
 886        return timeout;
 887}
 888