linux/fs/nfs/nfs4state.c
<<
>>
Prefs
   1/*
   2 *  fs/nfs/nfs4state.c
   3 *
   4 *  Client-side XDR for NFSv4.
   5 *
   6 *  Copyright (c) 2002 The Regents of the University of Michigan.
   7 *  All rights reserved.
   8 *
   9 *  Kendrick Smith <kmsmith@umich.edu>
  10 *
  11 *  Redistribution and use in source and binary forms, with or without
  12 *  modification, are permitted provided that the following conditions
  13 *  are met:
  14 *
  15 *  1. Redistributions of source code must retain the above copyright
  16 *     notice, this list of conditions and the following disclaimer.
  17 *  2. Redistributions in binary form must reproduce the above copyright
  18 *     notice, this list of conditions and the following disclaimer in the
  19 *     documentation and/or other materials provided with the distribution.
  20 *  3. Neither the name of the University nor the names of its
  21 *     contributors may be used to endorse or promote products derived
  22 *     from this software without specific prior written permission.
  23 *
  24 *  THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
  25 *  WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
  26 *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  27 *  DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
  28 *  FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  29 *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  30 *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
  31 *  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
  32 *  LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
  33 *  NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  34 *  SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  35 *
  36 * Implementation of the NFSv4 state model.  For the time being,
  37 * this is minimal, but will be made much more complex in a
  38 * subsequent patch.
  39 */
  40
  41#include <linux/kernel.h>
  42#include <linux/slab.h>
  43#include <linux/fs.h>
  44#include <linux/nfs_fs.h>
  45#include <linux/kthread.h>
  46#include <linux/module.h>
  47#include <linux/random.h>
  48#include <linux/ratelimit.h>
  49#include <linux/workqueue.h>
  50#include <linux/bitops.h>
  51#include <linux/jiffies.h>
  52
  53#include <linux/sunrpc/clnt.h>
  54
  55#include "nfs4_fs.h"
  56#include "callback.h"
  57#include "delegation.h"
  58#include "internal.h"
  59#include "nfs4idmap.h"
  60#include "nfs4session.h"
  61#include "pnfs.h"
  62#include "netns.h"
  63
  64#define NFSDBG_FACILITY         NFSDBG_STATE
  65
  66#define OPENOWNER_POOL_SIZE     8
  67
  68const nfs4_stateid zero_stateid = {
  69        { .data = { 0 } },
  70        .type = NFS4_SPECIAL_STATEID_TYPE,
  71};
  72static DEFINE_MUTEX(nfs_clid_init_mutex);
  73
  74int nfs4_init_clientid(struct nfs_client *clp, struct rpc_cred *cred)
  75{
  76        struct nfs4_setclientid_res clid = {
  77                .clientid = clp->cl_clientid,
  78                .confirm = clp->cl_confirm,
  79        };
  80        unsigned short port;
  81        int status;
  82        struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id);
  83
  84        if (test_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state))
  85                goto do_confirm;
  86        port = nn->nfs_callback_tcpport;
  87        if (clp->cl_addr.ss_family == AF_INET6)
  88                port = nn->nfs_callback_tcpport6;
  89
  90        status = nfs4_proc_setclientid(clp, NFS4_CALLBACK, port, cred, &clid);
  91        if (status != 0)
  92                goto out;
  93        clp->cl_clientid = clid.clientid;
  94        clp->cl_confirm = clid.confirm;
  95        set_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state);
  96do_confirm:
  97        status = nfs4_proc_setclientid_confirm(clp, &clid, cred);
  98        if (status != 0)
  99                goto out;
 100        clear_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state);
 101        nfs4_schedule_state_renewal(clp);
 102out:
 103        return status;
 104}
 105
 106/**
 107 * nfs40_discover_server_trunking - Detect server IP address trunking (mv0)
 108 *
 109 * @clp: nfs_client under test
 110 * @result: OUT: found nfs_client, or clp
 111 * @cred: credential to use for trunking test
 112 *
 113 * Returns zero, a negative errno, or a negative NFS4ERR status.
 114 * If zero is returned, an nfs_client pointer is planted in
 115 * "result".
 116 *
 117 * Note: The returned client may not yet be marked ready.
 118 */
 119int nfs40_discover_server_trunking(struct nfs_client *clp,
 120                                   struct nfs_client **result,
 121                                   struct rpc_cred *cred)
 122{
 123        struct nfs4_setclientid_res clid = {
 124                .clientid = clp->cl_clientid,
 125                .confirm = clp->cl_confirm,
 126        };
 127        struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id);
 128        unsigned short port;
 129        int status;
 130
 131        port = nn->nfs_callback_tcpport;
 132        if (clp->cl_addr.ss_family == AF_INET6)
 133                port = nn->nfs_callback_tcpport6;
 134
 135        status = nfs4_proc_setclientid(clp, NFS4_CALLBACK, port, cred, &clid);
 136        if (status != 0)
 137                goto out;
 138        clp->cl_clientid = clid.clientid;
 139        clp->cl_confirm = clid.confirm;
 140
 141        status = nfs40_walk_client_list(clp, result, cred);
 142        if (status == 0) {
 143                /* Sustain the lease, even if it's empty.  If the clientid4
 144                 * goes stale it's of no use for trunking discovery. */
 145                nfs4_schedule_state_renewal(*result);
 146        }
 147out:
 148        return status;
 149}
 150
 151struct rpc_cred *nfs4_get_machine_cred_locked(struct nfs_client *clp)
 152{
 153        struct rpc_cred *cred = NULL;
 154
 155        if (clp->cl_machine_cred != NULL)
 156                cred = get_rpccred(clp->cl_machine_cred);
 157        return cred;
 158}
 159
 160static void nfs4_root_machine_cred(struct nfs_client *clp)
 161{
 162        struct rpc_cred *cred, *new;
 163
 164        new = rpc_lookup_machine_cred(NULL);
 165        spin_lock(&clp->cl_lock);
 166        cred = clp->cl_machine_cred;
 167        clp->cl_machine_cred = new;
 168        spin_unlock(&clp->cl_lock);
 169        if (cred != NULL)
 170                put_rpccred(cred);
 171}
 172
 173static struct rpc_cred *
 174nfs4_get_renew_cred_server_locked(struct nfs_server *server)
 175{
 176        struct rpc_cred *cred = NULL;
 177        struct nfs4_state_owner *sp;
 178        struct rb_node *pos;
 179
 180        for (pos = rb_first(&server->state_owners);
 181             pos != NULL;
 182             pos = rb_next(pos)) {
 183                sp = rb_entry(pos, struct nfs4_state_owner, so_server_node);
 184                if (list_empty(&sp->so_states))
 185                        continue;
 186                cred = get_rpccred(sp->so_cred);
 187                break;
 188        }
 189        return cred;
 190}
 191
 192/**
 193 * nfs4_get_renew_cred_locked - Acquire credential for a renew operation
 194 * @clp: client state handle
 195 *
 196 * Returns an rpc_cred with reference count bumped, or NULL.
 197 * Caller must hold clp->cl_lock.
 198 */
 199struct rpc_cred *nfs4_get_renew_cred_locked(struct nfs_client *clp)
 200{
 201        struct rpc_cred *cred = NULL;
 202        struct nfs_server *server;
 203
 204        /* Use machine credentials if available */
 205        cred = nfs4_get_machine_cred_locked(clp);
 206        if (cred != NULL)
 207                goto out;
 208
 209        rcu_read_lock();
 210        list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
 211                cred = nfs4_get_renew_cred_server_locked(server);
 212                if (cred != NULL)
 213                        break;
 214        }
 215        rcu_read_unlock();
 216
 217out:
 218        return cred;
 219}
 220
 221static void nfs4_end_drain_slot_table(struct nfs4_slot_table *tbl)
 222{
 223        if (test_and_clear_bit(NFS4_SLOT_TBL_DRAINING, &tbl->slot_tbl_state)) {
 224                spin_lock(&tbl->slot_tbl_lock);
 225                nfs41_wake_slot_table(tbl);
 226                spin_unlock(&tbl->slot_tbl_lock);
 227        }
 228}
 229
 230static void nfs4_end_drain_session(struct nfs_client *clp)
 231{
 232        struct nfs4_session *ses = clp->cl_session;
 233
 234        if (clp->cl_slot_tbl) {
 235                nfs4_end_drain_slot_table(clp->cl_slot_tbl);
 236                return;
 237        }
 238
 239        if (ses != NULL) {
 240                nfs4_end_drain_slot_table(&ses->bc_slot_table);
 241                nfs4_end_drain_slot_table(&ses->fc_slot_table);
 242        }
 243}
 244
 245static int nfs4_drain_slot_tbl(struct nfs4_slot_table *tbl)
 246{
 247        set_bit(NFS4_SLOT_TBL_DRAINING, &tbl->slot_tbl_state);
 248        spin_lock(&tbl->slot_tbl_lock);
 249        if (tbl->highest_used_slotid != NFS4_NO_SLOT) {
 250                reinit_completion(&tbl->complete);
 251                spin_unlock(&tbl->slot_tbl_lock);
 252                return wait_for_completion_interruptible(&tbl->complete);
 253        }
 254        spin_unlock(&tbl->slot_tbl_lock);
 255        return 0;
 256}
 257
 258static int nfs4_begin_drain_session(struct nfs_client *clp)
 259{
 260        struct nfs4_session *ses = clp->cl_session;
 261        int ret = 0;
 262
 263        if (clp->cl_slot_tbl)
 264                return nfs4_drain_slot_tbl(clp->cl_slot_tbl);
 265
 266        /* back channel */
 267        ret = nfs4_drain_slot_tbl(&ses->bc_slot_table);
 268        if (ret)
 269                return ret;
 270        /* fore channel */
 271        return nfs4_drain_slot_tbl(&ses->fc_slot_table);
 272}
 273
 274#if defined(CONFIG_NFS_V4_1)
 275
 276static int nfs41_setup_state_renewal(struct nfs_client *clp)
 277{
 278        int status;
 279        struct nfs_fsinfo fsinfo;
 280        unsigned long now;
 281
 282        if (!test_bit(NFS_CS_CHECK_LEASE_TIME, &clp->cl_res_state)) {
 283                nfs4_schedule_state_renewal(clp);
 284                return 0;
 285        }
 286
 287        now = jiffies;
 288        status = nfs4_proc_get_lease_time(clp, &fsinfo);
 289        if (status == 0) {
 290                nfs4_set_lease_period(clp, fsinfo.lease_time * HZ, now);
 291                nfs4_schedule_state_renewal(clp);
 292        }
 293
 294        return status;
 295}
 296
 297static void nfs41_finish_session_reset(struct nfs_client *clp)
 298{
 299        clear_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state);
 300        clear_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state);
 301        /* create_session negotiated new slot table */
 302        clear_bit(NFS4CLNT_BIND_CONN_TO_SESSION, &clp->cl_state);
 303        nfs41_setup_state_renewal(clp);
 304}
 305
 306int nfs41_init_clientid(struct nfs_client *clp, struct rpc_cred *cred)
 307{
 308        int status;
 309
 310        if (test_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state))
 311                goto do_confirm;
 312        status = nfs4_proc_exchange_id(clp, cred);
 313        if (status != 0)
 314                goto out;
 315        set_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state);
 316do_confirm:
 317        status = nfs4_proc_create_session(clp, cred);
 318        if (status != 0)
 319                goto out;
 320        nfs41_finish_session_reset(clp);
 321        nfs_mark_client_ready(clp, NFS_CS_READY);
 322out:
 323        return status;
 324}
 325
 326/**
 327 * nfs41_discover_server_trunking - Detect server IP address trunking (mv1)
 328 *
 329 * @clp: nfs_client under test
 330 * @result: OUT: found nfs_client, or clp
 331 * @cred: credential to use for trunking test
 332 *
 333 * Returns NFS4_OK, a negative errno, or a negative NFS4ERR status.
 334 * If NFS4_OK is returned, an nfs_client pointer is planted in
 335 * "result".
 336 *
 337 * Note: The returned client may not yet be marked ready.
 338 */
 339int nfs41_discover_server_trunking(struct nfs_client *clp,
 340                                   struct nfs_client **result,
 341                                   struct rpc_cred *cred)
 342{
 343        int status;
 344
 345        status = nfs4_proc_exchange_id(clp, cred);
 346        if (status != NFS4_OK)
 347                return status;
 348
 349        status = nfs41_walk_client_list(clp, result, cred);
 350        if (status < 0)
 351                return status;
 352        if (clp != *result)
 353                return 0;
 354
 355        /* Purge state if the client id was established in a prior instance */
 356        if (clp->cl_exchange_flags & EXCHGID4_FLAG_CONFIRMED_R)
 357                set_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state);
 358        else
 359                set_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state);
 360        nfs4_schedule_state_manager(clp);
 361        status = nfs_wait_client_init_complete(clp);
 362        if (status < 0)
 363                nfs_put_client(clp);
 364        return status;
 365}
 366
 367#endif /* CONFIG_NFS_V4_1 */
 368
 369/**
 370 * nfs4_get_clid_cred - Acquire credential for a setclientid operation
 371 * @clp: client state handle
 372 *
 373 * Returns an rpc_cred with reference count bumped, or NULL.
 374 */
 375struct rpc_cred *nfs4_get_clid_cred(struct nfs_client *clp)
 376{
 377        struct rpc_cred *cred;
 378
 379        spin_lock(&clp->cl_lock);
 380        cred = nfs4_get_machine_cred_locked(clp);
 381        spin_unlock(&clp->cl_lock);
 382        return cred;
 383}
 384
 385static struct nfs4_state_owner *
 386nfs4_find_state_owner_locked(struct nfs_server *server, struct rpc_cred *cred)
 387{
 388        struct rb_node **p = &server->state_owners.rb_node,
 389                       *parent = NULL;
 390        struct nfs4_state_owner *sp;
 391
 392        while (*p != NULL) {
 393                parent = *p;
 394                sp = rb_entry(parent, struct nfs4_state_owner, so_server_node);
 395
 396                if (cred < sp->so_cred)
 397                        p = &parent->rb_left;
 398                else if (cred > sp->so_cred)
 399                        p = &parent->rb_right;
 400                else {
 401                        if (!list_empty(&sp->so_lru))
 402                                list_del_init(&sp->so_lru);
 403                        atomic_inc(&sp->so_count);
 404                        return sp;
 405                }
 406        }
 407        return NULL;
 408}
 409
 410static struct nfs4_state_owner *
 411nfs4_insert_state_owner_locked(struct nfs4_state_owner *new)
 412{
 413        struct nfs_server *server = new->so_server;
 414        struct rb_node **p = &server->state_owners.rb_node,
 415                       *parent = NULL;
 416        struct nfs4_state_owner *sp;
 417        int err;
 418
 419        while (*p != NULL) {
 420                parent = *p;
 421                sp = rb_entry(parent, struct nfs4_state_owner, so_server_node);
 422
 423                if (new->so_cred < sp->so_cred)
 424                        p = &parent->rb_left;
 425                else if (new->so_cred > sp->so_cred)
 426                        p = &parent->rb_right;
 427                else {
 428                        if (!list_empty(&sp->so_lru))
 429                                list_del_init(&sp->so_lru);
 430                        atomic_inc(&sp->so_count);
 431                        return sp;
 432                }
 433        }
 434        err = ida_get_new(&server->openowner_id, &new->so_seqid.owner_id);
 435        if (err)
 436                return ERR_PTR(err);
 437        rb_link_node(&new->so_server_node, parent, p);
 438        rb_insert_color(&new->so_server_node, &server->state_owners);
 439        return new;
 440}
 441
 442static void
 443nfs4_remove_state_owner_locked(struct nfs4_state_owner *sp)
 444{
 445        struct nfs_server *server = sp->so_server;
 446
 447        if (!RB_EMPTY_NODE(&sp->so_server_node))
 448                rb_erase(&sp->so_server_node, &server->state_owners);
 449        ida_remove(&server->openowner_id, sp->so_seqid.owner_id);
 450}
 451
 452static void
 453nfs4_init_seqid_counter(struct nfs_seqid_counter *sc)
 454{
 455        sc->create_time = ktime_get();
 456        sc->flags = 0;
 457        sc->counter = 0;
 458        spin_lock_init(&sc->lock);
 459        INIT_LIST_HEAD(&sc->list);
 460        rpc_init_wait_queue(&sc->wait, "Seqid_waitqueue");
 461}
 462
 463static void
 464nfs4_destroy_seqid_counter(struct nfs_seqid_counter *sc)
 465{
 466        rpc_destroy_wait_queue(&sc->wait);
 467}
 468
 469/*
 470 * nfs4_alloc_state_owner(): this is called on the OPEN or CREATE path to
 471 * create a new state_owner.
 472 *
 473 */
 474static struct nfs4_state_owner *
 475nfs4_alloc_state_owner(struct nfs_server *server,
 476                struct rpc_cred *cred,
 477                gfp_t gfp_flags)
 478{
 479        struct nfs4_state_owner *sp;
 480
 481        sp = kzalloc(sizeof(*sp), gfp_flags);
 482        if (!sp)
 483                return NULL;
 484        sp->so_server = server;
 485        sp->so_cred = get_rpccred(cred);
 486        spin_lock_init(&sp->so_lock);
 487        INIT_LIST_HEAD(&sp->so_states);
 488        nfs4_init_seqid_counter(&sp->so_seqid);
 489        atomic_set(&sp->so_count, 1);
 490        INIT_LIST_HEAD(&sp->so_lru);
 491        seqcount_init(&sp->so_reclaim_seqcount);
 492        mutex_init(&sp->so_delegreturn_mutex);
 493        return sp;
 494}
 495
 496static void
 497nfs4_drop_state_owner(struct nfs4_state_owner *sp)
 498{
 499        struct rb_node *rb_node = &sp->so_server_node;
 500
 501        if (!RB_EMPTY_NODE(rb_node)) {
 502                struct nfs_server *server = sp->so_server;
 503                struct nfs_client *clp = server->nfs_client;
 504
 505                spin_lock(&clp->cl_lock);
 506                if (!RB_EMPTY_NODE(rb_node)) {
 507                        rb_erase(rb_node, &server->state_owners);
 508                        RB_CLEAR_NODE(rb_node);
 509                }
 510                spin_unlock(&clp->cl_lock);
 511        }
 512}
 513
 514static void nfs4_free_state_owner(struct nfs4_state_owner *sp)
 515{
 516        nfs4_destroy_seqid_counter(&sp->so_seqid);
 517        put_rpccred(sp->so_cred);
 518        kfree(sp);
 519}
 520
 521static void nfs4_gc_state_owners(struct nfs_server *server)
 522{
 523        struct nfs_client *clp = server->nfs_client;
 524        struct nfs4_state_owner *sp, *tmp;
 525        unsigned long time_min, time_max;
 526        LIST_HEAD(doomed);
 527
 528        spin_lock(&clp->cl_lock);
 529        time_max = jiffies;
 530        time_min = (long)time_max - (long)clp->cl_lease_time;
 531        list_for_each_entry_safe(sp, tmp, &server->state_owners_lru, so_lru) {
 532                /* NB: LRU is sorted so that oldest is at the head */
 533                if (time_in_range(sp->so_expires, time_min, time_max))
 534                        break;
 535                list_move(&sp->so_lru, &doomed);
 536                nfs4_remove_state_owner_locked(sp);
 537        }
 538        spin_unlock(&clp->cl_lock);
 539
 540        list_for_each_entry_safe(sp, tmp, &doomed, so_lru) {
 541                list_del(&sp->so_lru);
 542                nfs4_free_state_owner(sp);
 543        }
 544}
 545
 546/**
 547 * nfs4_get_state_owner - Look up a state owner given a credential
 548 * @server: nfs_server to search
 549 * @cred: RPC credential to match
 550 *
 551 * Returns a pointer to an instantiated nfs4_state_owner struct, or NULL.
 552 */
 553struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *server,
 554                                              struct rpc_cred *cred,
 555                                              gfp_t gfp_flags)
 556{
 557        struct nfs_client *clp = server->nfs_client;
 558        struct nfs4_state_owner *sp, *new;
 559
 560        spin_lock(&clp->cl_lock);
 561        sp = nfs4_find_state_owner_locked(server, cred);
 562        spin_unlock(&clp->cl_lock);
 563        if (sp != NULL)
 564                goto out;
 565        new = nfs4_alloc_state_owner(server, cred, gfp_flags);
 566        if (new == NULL)
 567                goto out;
 568        do {
 569                if (ida_pre_get(&server->openowner_id, gfp_flags) == 0)
 570                        break;
 571                spin_lock(&clp->cl_lock);
 572                sp = nfs4_insert_state_owner_locked(new);
 573                spin_unlock(&clp->cl_lock);
 574        } while (sp == ERR_PTR(-EAGAIN));
 575        if (sp != new)
 576                nfs4_free_state_owner(new);
 577out:
 578        nfs4_gc_state_owners(server);
 579        return sp;
 580}
 581
 582/**
 583 * nfs4_put_state_owner - Release a nfs4_state_owner
 584 * @sp: state owner data to release
 585 *
 586 * Note that we keep released state owners on an LRU
 587 * list.
 588 * This caches valid state owners so that they can be
 589 * reused, to avoid the OPEN_CONFIRM on minor version 0.
 590 * It also pins the uniquifier of dropped state owners for
 591 * a while, to ensure that those state owner names are
 592 * never reused.
 593 */
 594void nfs4_put_state_owner(struct nfs4_state_owner *sp)
 595{
 596        struct nfs_server *server = sp->so_server;
 597        struct nfs_client *clp = server->nfs_client;
 598
 599        if (!atomic_dec_and_lock(&sp->so_count, &clp->cl_lock))
 600                return;
 601
 602        sp->so_expires = jiffies;
 603        list_add_tail(&sp->so_lru, &server->state_owners_lru);
 604        spin_unlock(&clp->cl_lock);
 605}
 606
 607/**
 608 * nfs4_purge_state_owners - Release all cached state owners
 609 * @server: nfs_server with cached state owners to release
 610 *
 611 * Called at umount time.  Remaining state owners will be on
 612 * the LRU with ref count of zero.
 613 */
 614void nfs4_purge_state_owners(struct nfs_server *server)
 615{
 616        struct nfs_client *clp = server->nfs_client;
 617        struct nfs4_state_owner *sp, *tmp;
 618        LIST_HEAD(doomed);
 619
 620        spin_lock(&clp->cl_lock);
 621        list_for_each_entry_safe(sp, tmp, &server->state_owners_lru, so_lru) {
 622                list_move(&sp->so_lru, &doomed);
 623                nfs4_remove_state_owner_locked(sp);
 624        }
 625        spin_unlock(&clp->cl_lock);
 626
 627        list_for_each_entry_safe(sp, tmp, &doomed, so_lru) {
 628                list_del(&sp->so_lru);
 629                nfs4_free_state_owner(sp);
 630        }
 631}
 632
 633static struct nfs4_state *
 634nfs4_alloc_open_state(void)
 635{
 636        struct nfs4_state *state;
 637
 638        state = kzalloc(sizeof(*state), GFP_NOFS);
 639        if (!state)
 640                return NULL;
 641        atomic_set(&state->count, 1);
 642        INIT_LIST_HEAD(&state->lock_states);
 643        spin_lock_init(&state->state_lock);
 644        seqlock_init(&state->seqlock);
 645        return state;
 646}
 647
 648void
 649nfs4_state_set_mode_locked(struct nfs4_state *state, fmode_t fmode)
 650{
 651        if (state->state == fmode)
 652                return;
 653        /* NB! List reordering - see the reclaim code for why.  */
 654        if ((fmode & FMODE_WRITE) != (state->state & FMODE_WRITE)) {
 655                if (fmode & FMODE_WRITE)
 656                        list_move(&state->open_states, &state->owner->so_states);
 657                else
 658                        list_move_tail(&state->open_states, &state->owner->so_states);
 659        }
 660        state->state = fmode;
 661}
 662
 663static struct nfs4_state *
 664__nfs4_find_state_byowner(struct inode *inode, struct nfs4_state_owner *owner)
 665{
 666        struct nfs_inode *nfsi = NFS_I(inode);
 667        struct nfs4_state *state;
 668
 669        list_for_each_entry(state, &nfsi->open_states, inode_states) {
 670                if (state->owner != owner)
 671                        continue;
 672                if (!nfs4_valid_open_stateid(state))
 673                        continue;
 674                if (atomic_inc_not_zero(&state->count))
 675                        return state;
 676        }
 677        return NULL;
 678}
 679
 680static void
 681nfs4_free_open_state(struct nfs4_state *state)
 682{
 683        kfree(state);
 684}
 685
 686struct nfs4_state *
 687nfs4_get_open_state(struct inode *inode, struct nfs4_state_owner *owner)
 688{
 689        struct nfs4_state *state, *new;
 690        struct nfs_inode *nfsi = NFS_I(inode);
 691
 692        spin_lock(&inode->i_lock);
 693        state = __nfs4_find_state_byowner(inode, owner);
 694        spin_unlock(&inode->i_lock);
 695        if (state)
 696                goto out;
 697        new = nfs4_alloc_open_state();
 698        spin_lock(&owner->so_lock);
 699        spin_lock(&inode->i_lock);
 700        state = __nfs4_find_state_byowner(inode, owner);
 701        if (state == NULL && new != NULL) {
 702                state = new;
 703                state->owner = owner;
 704                atomic_inc(&owner->so_count);
 705                list_add(&state->inode_states, &nfsi->open_states);
 706                ihold(inode);
 707                state->inode = inode;
 708                spin_unlock(&inode->i_lock);
 709                /* Note: The reclaim code dictates that we add stateless
 710                 * and read-only stateids to the end of the list */
 711                list_add_tail(&state->open_states, &owner->so_states);
 712                spin_unlock(&owner->so_lock);
 713        } else {
 714                spin_unlock(&inode->i_lock);
 715                spin_unlock(&owner->so_lock);
 716                if (new)
 717                        nfs4_free_open_state(new);
 718        }
 719out:
 720        return state;
 721}
 722
 723void nfs4_put_open_state(struct nfs4_state *state)
 724{
 725        struct inode *inode = state->inode;
 726        struct nfs4_state_owner *owner = state->owner;
 727
 728        if (!atomic_dec_and_lock(&state->count, &owner->so_lock))
 729                return;
 730        spin_lock(&inode->i_lock);
 731        list_del(&state->inode_states);
 732        list_del(&state->open_states);
 733        spin_unlock(&inode->i_lock);
 734        spin_unlock(&owner->so_lock);
 735        iput(inode);
 736        nfs4_free_open_state(state);
 737        nfs4_put_state_owner(owner);
 738}
 739
 740/*
 741 * Close the current file.
 742 */
 743static void __nfs4_close(struct nfs4_state *state,
 744                fmode_t fmode, gfp_t gfp_mask, int wait)
 745{
 746        struct nfs4_state_owner *owner = state->owner;
 747        int call_close = 0;
 748        fmode_t newstate;
 749
 750        atomic_inc(&owner->so_count);
 751        /* Protect against nfs4_find_state() */
 752        spin_lock(&owner->so_lock);
 753        switch (fmode & (FMODE_READ | FMODE_WRITE)) {
 754                case FMODE_READ:
 755                        state->n_rdonly--;
 756                        break;
 757                case FMODE_WRITE:
 758                        state->n_wronly--;
 759                        break;
 760                case FMODE_READ|FMODE_WRITE:
 761                        state->n_rdwr--;
 762        }
 763        newstate = FMODE_READ|FMODE_WRITE;
 764        if (state->n_rdwr == 0) {
 765                if (state->n_rdonly == 0) {
 766                        newstate &= ~FMODE_READ;
 767                        call_close |= test_bit(NFS_O_RDONLY_STATE, &state->flags);
 768                        call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags);
 769                }
 770                if (state->n_wronly == 0) {
 771                        newstate &= ~FMODE_WRITE;
 772                        call_close |= test_bit(NFS_O_WRONLY_STATE, &state->flags);
 773                        call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags);
 774                }
 775                if (newstate == 0)
 776                        clear_bit(NFS_DELEGATED_STATE, &state->flags);
 777        }
 778        nfs4_state_set_mode_locked(state, newstate);
 779        spin_unlock(&owner->so_lock);
 780
 781        if (!call_close) {
 782                nfs4_put_open_state(state);
 783                nfs4_put_state_owner(owner);
 784        } else
 785                nfs4_do_close(state, gfp_mask, wait);
 786}
 787
 788void nfs4_close_state(struct nfs4_state *state, fmode_t fmode)
 789{
 790        __nfs4_close(state, fmode, GFP_NOFS, 0);
 791}
 792
 793void nfs4_close_sync(struct nfs4_state *state, fmode_t fmode)
 794{
 795        __nfs4_close(state, fmode, GFP_KERNEL, 1);
 796}
 797
 798/*
 799 * Search the state->lock_states for an existing lock_owner
 800 * that is compatible with current->files
 801 */
 802static struct nfs4_lock_state *
 803__nfs4_find_lock_state(struct nfs4_state *state, fl_owner_t fl_owner)
 804{
 805        struct nfs4_lock_state *pos;
 806        list_for_each_entry(pos, &state->lock_states, ls_locks) {
 807                if (pos->ls_owner != fl_owner)
 808                        continue;
 809                atomic_inc(&pos->ls_count);
 810                return pos;
 811        }
 812        return NULL;
 813}
 814
 815/*
 816 * Return a compatible lock_state. If no initialized lock_state structure
 817 * exists, return an uninitialized one.
 818 *
 819 */
 820static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, fl_owner_t fl_owner)
 821{
 822        struct nfs4_lock_state *lsp;
 823        struct nfs_server *server = state->owner->so_server;
 824
 825        lsp = kzalloc(sizeof(*lsp), GFP_NOFS);
 826        if (lsp == NULL)
 827                return NULL;
 828        nfs4_init_seqid_counter(&lsp->ls_seqid);
 829        atomic_set(&lsp->ls_count, 1);
 830        lsp->ls_state = state;
 831        lsp->ls_owner = fl_owner;
 832        lsp->ls_seqid.owner_id = ida_simple_get(&server->lockowner_id, 0, 0, GFP_NOFS);
 833        if (lsp->ls_seqid.owner_id < 0)
 834                goto out_free;
 835        INIT_LIST_HEAD(&lsp->ls_locks);
 836        return lsp;
 837out_free:
 838        kfree(lsp);
 839        return NULL;
 840}
 841
 842void nfs4_free_lock_state(struct nfs_server *server, struct nfs4_lock_state *lsp)
 843{
 844        ida_simple_remove(&server->lockowner_id, lsp->ls_seqid.owner_id);
 845        nfs4_destroy_seqid_counter(&lsp->ls_seqid);
 846        kfree(lsp);
 847}
 848
 849/*
 850 * Return a compatible lock_state. If no initialized lock_state structure
 851 * exists, return an uninitialized one.
 852 *
 853 */
 854static struct nfs4_lock_state *nfs4_get_lock_state(struct nfs4_state *state, fl_owner_t owner)
 855{
 856        struct nfs4_lock_state *lsp, *new = NULL;
 857        
 858        for(;;) {
 859                spin_lock(&state->state_lock);
 860                lsp = __nfs4_find_lock_state(state, owner);
 861                if (lsp != NULL)
 862                        break;
 863                if (new != NULL) {
 864                        list_add(&new->ls_locks, &state->lock_states);
 865                        set_bit(LK_STATE_IN_USE, &state->flags);
 866                        lsp = new;
 867                        new = NULL;
 868                        break;
 869                }
 870                spin_unlock(&state->state_lock);
 871                new = nfs4_alloc_lock_state(state, owner);
 872                if (new == NULL)
 873                        return NULL;
 874        }
 875        spin_unlock(&state->state_lock);
 876        if (new != NULL)
 877                nfs4_free_lock_state(state->owner->so_server, new);
 878        return lsp;
 879}
 880
 881/*
 882 * Release reference to lock_state, and free it if we see that
 883 * it is no longer in use
 884 */
 885void nfs4_put_lock_state(struct nfs4_lock_state *lsp)
 886{
 887        struct nfs_server *server;
 888        struct nfs4_state *state;
 889
 890        if (lsp == NULL)
 891                return;
 892        state = lsp->ls_state;
 893        if (!atomic_dec_and_lock(&lsp->ls_count, &state->state_lock))
 894                return;
 895        list_del(&lsp->ls_locks);
 896        if (list_empty(&state->lock_states))
 897                clear_bit(LK_STATE_IN_USE, &state->flags);
 898        spin_unlock(&state->state_lock);
 899        server = state->owner->so_server;
 900        if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags)) {
 901                struct nfs_client *clp = server->nfs_client;
 902
 903                clp->cl_mvops->free_lock_state(server, lsp);
 904        } else
 905                nfs4_free_lock_state(server, lsp);
 906}
 907
 908static void nfs4_fl_copy_lock(struct file_lock *dst, struct file_lock *src)
 909{
 910        struct nfs4_lock_state *lsp = src->fl_u.nfs4_fl.owner;
 911
 912        dst->fl_u.nfs4_fl.owner = lsp;
 913        atomic_inc(&lsp->ls_count);
 914}
 915
 916static void nfs4_fl_release_lock(struct file_lock *fl)
 917{
 918        nfs4_put_lock_state(fl->fl_u.nfs4_fl.owner);
 919}
 920
 921static const struct file_lock_operations nfs4_fl_lock_ops = {
 922        .fl_copy_lock = nfs4_fl_copy_lock,
 923        .fl_release_private = nfs4_fl_release_lock,
 924};
 925
 926int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl)
 927{
 928        struct nfs4_lock_state *lsp;
 929
 930        if (fl->fl_ops != NULL)
 931                return 0;
 932        lsp = nfs4_get_lock_state(state, fl->fl_owner);
 933        if (lsp == NULL)
 934                return -ENOMEM;
 935        fl->fl_u.nfs4_fl.owner = lsp;
 936        fl->fl_ops = &nfs4_fl_lock_ops;
 937        return 0;
 938}
 939
 940static int nfs4_copy_lock_stateid(nfs4_stateid *dst,
 941                struct nfs4_state *state,
 942                const struct nfs_lockowner *lockowner)
 943{
 944        struct nfs4_lock_state *lsp;
 945        fl_owner_t fl_owner;
 946        int ret = -ENOENT;
 947
 948
 949        if (lockowner == NULL)
 950                goto out;
 951
 952        if (test_bit(LK_STATE_IN_USE, &state->flags) == 0)
 953                goto out;
 954
 955        fl_owner = lockowner->l_owner;
 956        spin_lock(&state->state_lock);
 957        lsp = __nfs4_find_lock_state(state, fl_owner);
 958        if (lsp && test_bit(NFS_LOCK_LOST, &lsp->ls_flags))
 959                ret = -EIO;
 960        else if (lsp != NULL && test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) != 0) {
 961                nfs4_stateid_copy(dst, &lsp->ls_stateid);
 962                ret = 0;
 963        }
 964        spin_unlock(&state->state_lock);
 965        nfs4_put_lock_state(lsp);
 966out:
 967        return ret;
 968}
 969
 970static void nfs4_copy_open_stateid(nfs4_stateid *dst, struct nfs4_state *state)
 971{
 972        const nfs4_stateid *src;
 973        int seq;
 974
 975        do {
 976                src = &zero_stateid;
 977                seq = read_seqbegin(&state->seqlock);
 978                if (test_bit(NFS_OPEN_STATE, &state->flags))
 979                        src = &state->open_stateid;
 980                nfs4_stateid_copy(dst, src);
 981        } while (read_seqretry(&state->seqlock, seq));
 982}
 983
 984/*
 985 * Byte-range lock aware utility to initialize the stateid of read/write
 986 * requests.
 987 */
 988int nfs4_select_rw_stateid(struct nfs4_state *state,
 989                fmode_t fmode, const struct nfs_lockowner *lockowner,
 990                nfs4_stateid *dst, struct rpc_cred **cred)
 991{
 992        int ret;
 993
 994        if (!nfs4_valid_open_stateid(state))
 995                return -EIO;
 996        if (cred != NULL)
 997                *cred = NULL;
 998        ret = nfs4_copy_lock_stateid(dst, state, lockowner);
 999        if (ret == -EIO)
1000                /* A lost lock - don't even consider delegations */
1001                goto out;
1002        /* returns true if delegation stateid found and copied */
1003        if (nfs4_copy_delegation_stateid(state->inode, fmode, dst, cred)) {
1004                ret = 0;
1005                goto out;
1006        }
1007        if (ret != -ENOENT)
1008                /* nfs4_copy_delegation_stateid() didn't over-write
1009                 * dst, so it still has the lock stateid which we now
1010                 * choose to use.
1011                 */
1012                goto out;
1013        nfs4_copy_open_stateid(dst, state);
1014        ret = 0;
1015out:
1016        if (nfs_server_capable(state->inode, NFS_CAP_STATEID_NFSV41))
1017                dst->seqid = 0;
1018        return ret;
1019}
1020
1021struct nfs_seqid *nfs_alloc_seqid(struct nfs_seqid_counter *counter, gfp_t gfp_mask)
1022{
1023        struct nfs_seqid *new;
1024
1025        new = kmalloc(sizeof(*new), gfp_mask);
1026        if (new == NULL)
1027                return ERR_PTR(-ENOMEM);
1028        new->sequence = counter;
1029        INIT_LIST_HEAD(&new->list);
1030        new->task = NULL;
1031        return new;
1032}
1033
1034void nfs_release_seqid(struct nfs_seqid *seqid)
1035{
1036        struct nfs_seqid_counter *sequence;
1037
1038        if (seqid == NULL || list_empty(&seqid->list))
1039                return;
1040        sequence = seqid->sequence;
1041        spin_lock(&sequence->lock);
1042        list_del_init(&seqid->list);
1043        if (!list_empty(&sequence->list)) {
1044                struct nfs_seqid *next;
1045
1046                next = list_first_entry(&sequence->list,
1047                                struct nfs_seqid, list);
1048                rpc_wake_up_queued_task(&sequence->wait, next->task);
1049        }
1050        spin_unlock(&sequence->lock);
1051}
1052
1053void nfs_free_seqid(struct nfs_seqid *seqid)
1054{
1055        nfs_release_seqid(seqid);
1056        kfree(seqid);
1057}
1058
1059/*
1060 * Increment the seqid if the OPEN/OPEN_DOWNGRADE/CLOSE succeeded, or
1061 * failed with a seqid incrementing error -
1062 * see comments nfs4.h:seqid_mutating_error()
1063 */
1064static void nfs_increment_seqid(int status, struct nfs_seqid *seqid)
1065{
1066        switch (status) {
1067                case 0:
1068                        break;
1069                case -NFS4ERR_BAD_SEQID:
1070                        if (seqid->sequence->flags & NFS_SEQID_CONFIRMED)
1071                                return;
1072                        pr_warn_ratelimited("NFS: v4 server returned a bad"
1073                                        " sequence-id error on an"
1074                                        " unconfirmed sequence %p!\n",
1075                                        seqid->sequence);
1076                case -NFS4ERR_STALE_CLIENTID:
1077                case -NFS4ERR_STALE_STATEID:
1078                case -NFS4ERR_BAD_STATEID:
1079                case -NFS4ERR_BADXDR:
1080                case -NFS4ERR_RESOURCE:
1081                case -NFS4ERR_NOFILEHANDLE:
1082                        /* Non-seqid mutating errors */
1083                        return;
1084        };
1085        /*
1086         * Note: no locking needed as we are guaranteed to be first
1087         * on the sequence list
1088         */
1089        seqid->sequence->counter++;
1090}
1091
1092void nfs_increment_open_seqid(int status, struct nfs_seqid *seqid)
1093{
1094        struct nfs4_state_owner *sp;
1095
1096        if (seqid == NULL)
1097                return;
1098
1099        sp = container_of(seqid->sequence, struct nfs4_state_owner, so_seqid);
1100        if (status == -NFS4ERR_BAD_SEQID)
1101                nfs4_drop_state_owner(sp);
1102        if (!nfs4_has_session(sp->so_server->nfs_client))
1103                nfs_increment_seqid(status, seqid);
1104}
1105
1106/*
1107 * Increment the seqid if the LOCK/LOCKU succeeded, or
1108 * failed with a seqid incrementing error -
1109 * see comments nfs4.h:seqid_mutating_error()
1110 */
1111void nfs_increment_lock_seqid(int status, struct nfs_seqid *seqid)
1112{
1113        if (seqid != NULL)
1114                nfs_increment_seqid(status, seqid);
1115}
1116
1117int nfs_wait_on_sequence(struct nfs_seqid *seqid, struct rpc_task *task)
1118{
1119        struct nfs_seqid_counter *sequence;
1120        int status = 0;
1121
1122        if (seqid == NULL)
1123                goto out;
1124        sequence = seqid->sequence;
1125        spin_lock(&sequence->lock);
1126        seqid->task = task;
1127        if (list_empty(&seqid->list))
1128                list_add_tail(&seqid->list, &sequence->list);
1129        if (list_first_entry(&sequence->list, struct nfs_seqid, list) == seqid)
1130                goto unlock;
1131        rpc_sleep_on(&sequence->wait, task, NULL);
1132        status = -EAGAIN;
1133unlock:
1134        spin_unlock(&sequence->lock);
1135out:
1136        return status;
1137}
1138
1139static int nfs4_run_state_manager(void *);
1140
1141static void nfs4_clear_state_manager_bit(struct nfs_client *clp)
1142{
1143        smp_mb__before_atomic();
1144        clear_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state);
1145        smp_mb__after_atomic();
1146        wake_up_bit(&clp->cl_state, NFS4CLNT_MANAGER_RUNNING);
1147        rpc_wake_up(&clp->cl_rpcwaitq);
1148}
1149
1150/*
1151 * Schedule the nfs_client asynchronous state management routine
1152 */
1153void nfs4_schedule_state_manager(struct nfs_client *clp)
1154{
1155        struct task_struct *task;
1156        char buf[INET6_ADDRSTRLEN + sizeof("-manager") + 1];
1157
1158        if (test_and_set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) != 0)
1159                return;
1160        __module_get(THIS_MODULE);
1161        atomic_inc(&clp->cl_count);
1162
1163        /* The rcu_read_lock() is not strictly necessary, as the state
1164         * manager is the only thread that ever changes the rpc_xprt
1165         * after it's initialized.  At this point, we're single threaded. */
1166        rcu_read_lock();
1167        snprintf(buf, sizeof(buf), "%s-manager",
1168                        rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR));
1169        rcu_read_unlock();
1170        task = kthread_run(nfs4_run_state_manager, clp, "%s", buf);
1171        if (IS_ERR(task)) {
1172                printk(KERN_ERR "%s: kthread_run: %ld\n",
1173                        __func__, PTR_ERR(task));
1174                nfs4_clear_state_manager_bit(clp);
1175                nfs_put_client(clp);
1176                module_put(THIS_MODULE);
1177        }
1178}
1179
1180/*
1181 * Schedule a lease recovery attempt
1182 */
1183void nfs4_schedule_lease_recovery(struct nfs_client *clp)
1184{
1185        if (!clp)
1186                return;
1187        if (!test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state))
1188                set_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state);
1189        dprintk("%s: scheduling lease recovery for server %s\n", __func__,
1190                        clp->cl_hostname);
1191        nfs4_schedule_state_manager(clp);
1192}
1193EXPORT_SYMBOL_GPL(nfs4_schedule_lease_recovery);
1194
1195/**
1196 * nfs4_schedule_migration_recovery - trigger migration recovery
1197 *
1198 * @server: FSID that is migrating
1199 *
1200 * Returns zero if recovery has started, otherwise a negative NFS4ERR
1201 * value is returned.
1202 */
1203int nfs4_schedule_migration_recovery(const struct nfs_server *server)
1204{
1205        struct nfs_client *clp = server->nfs_client;
1206
1207        if (server->fh_expire_type != NFS4_FH_PERSISTENT) {
1208                pr_err("NFS: volatile file handles not supported (server %s)\n",
1209                                clp->cl_hostname);
1210                return -NFS4ERR_IO;
1211        }
1212
1213        if (test_bit(NFS_MIG_FAILED, &server->mig_status))
1214                return -NFS4ERR_IO;
1215
1216        dprintk("%s: scheduling migration recovery for (%llx:%llx) on %s\n",
1217                        __func__,
1218                        (unsigned long long)server->fsid.major,
1219                        (unsigned long long)server->fsid.minor,
1220                        clp->cl_hostname);
1221
1222        set_bit(NFS_MIG_IN_TRANSITION,
1223                        &((struct nfs_server *)server)->mig_status);
1224        set_bit(NFS4CLNT_MOVED, &clp->cl_state);
1225
1226        nfs4_schedule_state_manager(clp);
1227        return 0;
1228}
1229EXPORT_SYMBOL_GPL(nfs4_schedule_migration_recovery);
1230
1231/**
1232 * nfs4_schedule_lease_moved_recovery - start lease-moved recovery
1233 *
1234 * @clp: server to check for moved leases
1235 *
1236 */
1237void nfs4_schedule_lease_moved_recovery(struct nfs_client *clp)
1238{
1239        dprintk("%s: scheduling lease-moved recovery for client ID %llx on %s\n",
1240                __func__, clp->cl_clientid, clp->cl_hostname);
1241
1242        set_bit(NFS4CLNT_LEASE_MOVED, &clp->cl_state);
1243        nfs4_schedule_state_manager(clp);
1244}
1245EXPORT_SYMBOL_GPL(nfs4_schedule_lease_moved_recovery);
1246
1247int nfs4_wait_clnt_recover(struct nfs_client *clp)
1248{
1249        int res;
1250
1251        might_sleep();
1252
1253        atomic_inc(&clp->cl_count);
1254        res = wait_on_bit_action(&clp->cl_state, NFS4CLNT_MANAGER_RUNNING,
1255                                 nfs_wait_bit_killable, TASK_KILLABLE);
1256        if (res)
1257                goto out;
1258        if (clp->cl_cons_state < 0)
1259                res = clp->cl_cons_state;
1260out:
1261        nfs_put_client(clp);
1262        return res;
1263}
1264
1265int nfs4_client_recover_expired_lease(struct nfs_client *clp)
1266{
1267        unsigned int loop;
1268        int ret;
1269
1270        for (loop = NFS4_MAX_LOOP_ON_RECOVER; loop != 0; loop--) {
1271                ret = nfs4_wait_clnt_recover(clp);
1272                if (ret != 0)
1273                        break;
1274                if (!test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) &&
1275                    !test_bit(NFS4CLNT_CHECK_LEASE,&clp->cl_state))
1276                        break;
1277                nfs4_schedule_state_manager(clp);
1278                ret = -EIO;
1279        }
1280        return ret;
1281}
1282
1283/*
1284 * nfs40_handle_cb_pathdown - return all delegations after NFS4ERR_CB_PATH_DOWN
1285 * @clp: client to process
1286 *
1287 * Set the NFS4CLNT_LEASE_EXPIRED state in order to force a
1288 * resend of the SETCLIENTID and hence re-establish the
1289 * callback channel. Then return all existing delegations.
1290 */
1291static void nfs40_handle_cb_pathdown(struct nfs_client *clp)
1292{
1293        set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
1294        nfs_expire_all_delegations(clp);
1295        dprintk("%s: handling CB_PATHDOWN recovery for server %s\n", __func__,
1296                        clp->cl_hostname);
1297}
1298
1299void nfs4_schedule_path_down_recovery(struct nfs_client *clp)
1300{
1301        nfs40_handle_cb_pathdown(clp);
1302        nfs4_schedule_state_manager(clp);
1303}
1304
1305static int nfs4_state_mark_reclaim_reboot(struct nfs_client *clp, struct nfs4_state *state)
1306{
1307
1308        if (!nfs4_valid_open_stateid(state))
1309                return 0;
1310        set_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags);
1311        /* Don't recover state that expired before the reboot */
1312        if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags)) {
1313                clear_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags);
1314                return 0;
1315        }
1316        set_bit(NFS_OWNER_RECLAIM_REBOOT, &state->owner->so_flags);
1317        set_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state);
1318        return 1;
1319}
1320
1321int nfs4_state_mark_reclaim_nograce(struct nfs_client *clp, struct nfs4_state *state)
1322{
1323        if (!nfs4_valid_open_stateid(state))
1324                return 0;
1325        set_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags);
1326        clear_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags);
1327        set_bit(NFS_OWNER_RECLAIM_NOGRACE, &state->owner->so_flags);
1328        set_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state);
1329        return 1;
1330}
1331
1332int nfs4_schedule_stateid_recovery(const struct nfs_server *server, struct nfs4_state *state)
1333{
1334        struct nfs_client *clp = server->nfs_client;
1335
1336        if (!nfs4_state_mark_reclaim_nograce(clp, state))
1337                return -EBADF;
1338        dprintk("%s: scheduling stateid recovery for server %s\n", __func__,
1339                        clp->cl_hostname);
1340        nfs4_schedule_state_manager(clp);
1341        return 0;
1342}
1343EXPORT_SYMBOL_GPL(nfs4_schedule_stateid_recovery);
1344
1345static struct nfs4_lock_state *
1346nfs_state_find_lock_state_by_stateid(struct nfs4_state *state,
1347                const nfs4_stateid *stateid)
1348{
1349        struct nfs4_lock_state *pos;
1350
1351        list_for_each_entry(pos, &state->lock_states, ls_locks) {
1352                if (!test_bit(NFS_LOCK_INITIALIZED, &pos->ls_flags))
1353                        continue;
1354                if (nfs4_stateid_match_other(&pos->ls_stateid, stateid))
1355                        return pos;
1356        }
1357        return NULL;
1358}
1359
1360static bool nfs_state_lock_state_matches_stateid(struct nfs4_state *state,
1361                const nfs4_stateid *stateid)
1362{
1363        bool found = false;
1364
1365        if (test_bit(LK_STATE_IN_USE, &state->flags)) {
1366                spin_lock(&state->state_lock);
1367                if (nfs_state_find_lock_state_by_stateid(state, stateid))
1368                        found = true;
1369                spin_unlock(&state->state_lock);
1370        }
1371        return found;
1372}
1373
1374void nfs_inode_find_state_and_recover(struct inode *inode,
1375                const nfs4_stateid *stateid)
1376{
1377        struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
1378        struct nfs_inode *nfsi = NFS_I(inode);
1379        struct nfs_open_context *ctx;
1380        struct nfs4_state *state;
1381        bool found = false;
1382
1383        spin_lock(&inode->i_lock);
1384        list_for_each_entry(ctx, &nfsi->open_files, list) {
1385                state = ctx->state;
1386                if (state == NULL)
1387                        continue;
1388                if (nfs4_stateid_match_other(&state->stateid, stateid) &&
1389                    nfs4_state_mark_reclaim_nograce(clp, state)) {
1390                        found = true;
1391                        continue;
1392                }
1393                if (nfs_state_lock_state_matches_stateid(state, stateid) &&
1394                    nfs4_state_mark_reclaim_nograce(clp, state))
1395                        found = true;
1396        }
1397        spin_unlock(&inode->i_lock);
1398
1399        nfs_inode_find_delegation_state_and_recover(inode, stateid);
1400        if (found)
1401                nfs4_schedule_state_manager(clp);
1402}
1403
1404static void nfs4_state_mark_open_context_bad(struct nfs4_state *state)
1405{
1406        struct inode *inode = state->inode;
1407        struct nfs_inode *nfsi = NFS_I(inode);
1408        struct nfs_open_context *ctx;
1409
1410        spin_lock(&inode->i_lock);
1411        list_for_each_entry(ctx, &nfsi->open_files, list) {
1412                if (ctx->state != state)
1413                        continue;
1414                set_bit(NFS_CONTEXT_BAD, &ctx->flags);
1415        }
1416        spin_unlock(&inode->i_lock);
1417}
1418
1419static void nfs4_state_mark_recovery_failed(struct nfs4_state *state, int error)
1420{
1421        set_bit(NFS_STATE_RECOVERY_FAILED, &state->flags);
1422        nfs4_state_mark_open_context_bad(state);
1423}
1424
1425
1426static int nfs4_reclaim_locks(struct nfs4_state *state, const struct nfs4_state_recovery_ops *ops)
1427{
1428        struct inode *inode = state->inode;
1429        struct nfs_inode *nfsi = NFS_I(inode);
1430        struct file_lock *fl;
1431        int status = 0;
1432        struct file_lock_context *flctx = inode->i_flctx;
1433        struct list_head *list;
1434
1435        if (flctx == NULL)
1436                return 0;
1437
1438        list = &flctx->flc_posix;
1439
1440        /* Guard against delegation returns and new lock/unlock calls */
1441        down_write(&nfsi->rwsem);
1442        spin_lock(&flctx->flc_lock);
1443restart:
1444        list_for_each_entry(fl, list, fl_list) {
1445                if (nfs_file_open_context(fl->fl_file)->state != state)
1446                        continue;
1447                spin_unlock(&flctx->flc_lock);
1448                status = ops->recover_lock(state, fl);
1449                switch (status) {
1450                case 0:
1451                        break;
1452                case -ESTALE:
1453                case -NFS4ERR_ADMIN_REVOKED:
1454                case -NFS4ERR_STALE_STATEID:
1455                case -NFS4ERR_BAD_STATEID:
1456                case -NFS4ERR_EXPIRED:
1457                case -NFS4ERR_NO_GRACE:
1458                case -NFS4ERR_STALE_CLIENTID:
1459                case -NFS4ERR_BADSESSION:
1460                case -NFS4ERR_BADSLOT:
1461                case -NFS4ERR_BAD_HIGH_SLOT:
1462                case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
1463                        goto out;
1464                default:
1465                        pr_err("NFS: %s: unhandled error %d\n",
1466                                        __func__, status);
1467                case -ENOMEM:
1468                case -NFS4ERR_DENIED:
1469                case -NFS4ERR_RECLAIM_BAD:
1470                case -NFS4ERR_RECLAIM_CONFLICT:
1471                        /* kill_proc(fl->fl_pid, SIGLOST, 1); */
1472                        status = 0;
1473                }
1474                spin_lock(&flctx->flc_lock);
1475        }
1476        if (list == &flctx->flc_posix) {
1477                list = &flctx->flc_flock;
1478                goto restart;
1479        }
1480        spin_unlock(&flctx->flc_lock);
1481out:
1482        up_write(&nfsi->rwsem);
1483        return status;
1484}
1485
1486static int nfs4_reclaim_open_state(struct nfs4_state_owner *sp, const struct nfs4_state_recovery_ops *ops)
1487{
1488        struct nfs4_state *state;
1489        struct nfs4_lock_state *lock;
1490        int status = 0;
1491
1492        /* Note: we rely on the sp->so_states list being ordered 
1493         * so that we always reclaim open(O_RDWR) and/or open(O_WRITE)
1494         * states first.
1495         * This is needed to ensure that the server won't give us any
1496         * read delegations that we have to return if, say, we are
1497         * recovering after a network partition or a reboot from a
1498         * server that doesn't support a grace period.
1499         */
1500        spin_lock(&sp->so_lock);
1501        raw_write_seqcount_begin(&sp->so_reclaim_seqcount);
1502restart:
1503        list_for_each_entry(state, &sp->so_states, open_states) {
1504                if (!test_and_clear_bit(ops->state_flag_bit, &state->flags))
1505                        continue;
1506                if (!nfs4_valid_open_stateid(state))
1507                        continue;
1508                if (state->state == 0)
1509                        continue;
1510                atomic_inc(&state->count);
1511                spin_unlock(&sp->so_lock);
1512                status = ops->recover_open(sp, state);
1513                if (status >= 0) {
1514                        status = nfs4_reclaim_locks(state, ops);
1515                        if (status >= 0) {
1516                                if (!test_bit(NFS_DELEGATED_STATE, &state->flags)) {
1517                                        spin_lock(&state->state_lock);
1518                                        list_for_each_entry(lock, &state->lock_states, ls_locks) {
1519                                                if (!test_bit(NFS_LOCK_INITIALIZED, &lock->ls_flags))
1520                                                        pr_warn_ratelimited("NFS: "
1521                                                                            "%s: Lock reclaim "
1522                                                                            "failed!\n", __func__);
1523                                        }
1524                                        spin_unlock(&state->state_lock);
1525                                }
1526                                clear_bit(NFS_STATE_RECLAIM_NOGRACE,
1527                                        &state->flags);
1528                                nfs4_put_open_state(state);
1529                                spin_lock(&sp->so_lock);
1530                                goto restart;
1531                        }
1532                }
1533                switch (status) {
1534                        default:
1535                                printk(KERN_ERR "NFS: %s: unhandled error %d\n",
1536                                        __func__, status);
1537                        case -ENOENT:
1538                        case -ENOMEM:
1539                        case -EACCES:
1540                        case -EROFS:
1541                        case -EIO:
1542                        case -ESTALE:
1543                                /* Open state on this file cannot be recovered */
1544                                nfs4_state_mark_recovery_failed(state, status);
1545                                break;
1546                        case -EAGAIN:
1547                                ssleep(1);
1548                        case -NFS4ERR_ADMIN_REVOKED:
1549                        case -NFS4ERR_STALE_STATEID:
1550                        case -NFS4ERR_OLD_STATEID:
1551                        case -NFS4ERR_BAD_STATEID:
1552                        case -NFS4ERR_RECLAIM_BAD:
1553                        case -NFS4ERR_RECLAIM_CONFLICT:
1554                                nfs4_state_mark_reclaim_nograce(sp->so_server->nfs_client, state);
1555                                break;
1556                        case -NFS4ERR_EXPIRED:
1557                        case -NFS4ERR_NO_GRACE:
1558                                nfs4_state_mark_reclaim_nograce(sp->so_server->nfs_client, state);
1559                        case -NFS4ERR_STALE_CLIENTID:
1560                        case -NFS4ERR_BADSESSION:
1561                        case -NFS4ERR_BADSLOT:
1562                        case -NFS4ERR_BAD_HIGH_SLOT:
1563                        case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
1564                                goto out_err;
1565                }
1566                nfs4_put_open_state(state);
1567                spin_lock(&sp->so_lock);
1568                goto restart;
1569        }
1570        raw_write_seqcount_end(&sp->so_reclaim_seqcount);
1571        spin_unlock(&sp->so_lock);
1572        return 0;
1573out_err:
1574        nfs4_put_open_state(state);
1575        spin_lock(&sp->so_lock);
1576        raw_write_seqcount_end(&sp->so_reclaim_seqcount);
1577        spin_unlock(&sp->so_lock);
1578        return status;
1579}
1580
1581static void nfs4_clear_open_state(struct nfs4_state *state)
1582{
1583        struct nfs4_lock_state *lock;
1584
1585        clear_bit(NFS_DELEGATED_STATE, &state->flags);
1586        clear_bit(NFS_O_RDONLY_STATE, &state->flags);
1587        clear_bit(NFS_O_WRONLY_STATE, &state->flags);
1588        clear_bit(NFS_O_RDWR_STATE, &state->flags);
1589        spin_lock(&state->state_lock);
1590        list_for_each_entry(lock, &state->lock_states, ls_locks) {
1591                lock->ls_seqid.flags = 0;
1592                clear_bit(NFS_LOCK_INITIALIZED, &lock->ls_flags);
1593        }
1594        spin_unlock(&state->state_lock);
1595}
1596
1597static void nfs4_reset_seqids(struct nfs_server *server,
1598        int (*mark_reclaim)(struct nfs_client *clp, struct nfs4_state *state))
1599{
1600        struct nfs_client *clp = server->nfs_client;
1601        struct nfs4_state_owner *sp;
1602        struct rb_node *pos;
1603        struct nfs4_state *state;
1604
1605        spin_lock(&clp->cl_lock);
1606        for (pos = rb_first(&server->state_owners);
1607             pos != NULL;
1608             pos = rb_next(pos)) {
1609                sp = rb_entry(pos, struct nfs4_state_owner, so_server_node);
1610                sp->so_seqid.flags = 0;
1611                spin_lock(&sp->so_lock);
1612                list_for_each_entry(state, &sp->so_states, open_states) {
1613                        if (mark_reclaim(clp, state))
1614                                nfs4_clear_open_state(state);
1615                }
1616                spin_unlock(&sp->so_lock);
1617        }
1618        spin_unlock(&clp->cl_lock);
1619}
1620
1621static void nfs4_state_mark_reclaim_helper(struct nfs_client *clp,
1622        int (*mark_reclaim)(struct nfs_client *clp, struct nfs4_state *state))
1623{
1624        struct nfs_server *server;
1625
1626        rcu_read_lock();
1627        list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
1628                nfs4_reset_seqids(server, mark_reclaim);
1629        rcu_read_unlock();
1630}
1631
1632static void nfs4_state_start_reclaim_reboot(struct nfs_client *clp)
1633{
1634        /* Mark all delegations for reclaim */
1635        nfs_delegation_mark_reclaim(clp);
1636        nfs4_state_mark_reclaim_helper(clp, nfs4_state_mark_reclaim_reboot);
1637}
1638
1639static void nfs4_reclaim_complete(struct nfs_client *clp,
1640                                 const struct nfs4_state_recovery_ops *ops,
1641                                 struct rpc_cred *cred)
1642{
1643        /* Notify the server we're done reclaiming our state */
1644        if (ops->reclaim_complete)
1645                (void)ops->reclaim_complete(clp, cred);
1646}
1647
1648static void nfs4_clear_reclaim_server(struct nfs_server *server)
1649{
1650        struct nfs_client *clp = server->nfs_client;
1651        struct nfs4_state_owner *sp;
1652        struct rb_node *pos;
1653        struct nfs4_state *state;
1654
1655        spin_lock(&clp->cl_lock);
1656        for (pos = rb_first(&server->state_owners);
1657             pos != NULL;
1658             pos = rb_next(pos)) {
1659                sp = rb_entry(pos, struct nfs4_state_owner, so_server_node);
1660                spin_lock(&sp->so_lock);
1661                list_for_each_entry(state, &sp->so_states, open_states) {
1662                        if (!test_and_clear_bit(NFS_STATE_RECLAIM_REBOOT,
1663                                                &state->flags))
1664                                continue;
1665                        nfs4_state_mark_reclaim_nograce(clp, state);
1666                }
1667                spin_unlock(&sp->so_lock);
1668        }
1669        spin_unlock(&clp->cl_lock);
1670}
1671
1672static int nfs4_state_clear_reclaim_reboot(struct nfs_client *clp)
1673{
1674        struct nfs_server *server;
1675
1676        if (!test_and_clear_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state))
1677                return 0;
1678
1679        rcu_read_lock();
1680        list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
1681                nfs4_clear_reclaim_server(server);
1682        rcu_read_unlock();
1683
1684        nfs_delegation_reap_unclaimed(clp);
1685        return 1;
1686}
1687
1688static void nfs4_state_end_reclaim_reboot(struct nfs_client *clp)
1689{
1690        const struct nfs4_state_recovery_ops *ops;
1691        struct rpc_cred *cred;
1692
1693        if (!nfs4_state_clear_reclaim_reboot(clp))
1694                return;
1695        ops = clp->cl_mvops->reboot_recovery_ops;
1696        cred = nfs4_get_clid_cred(clp);
1697        nfs4_reclaim_complete(clp, ops, cred);
1698        put_rpccred(cred);
1699}
1700
1701static void nfs4_state_start_reclaim_nograce(struct nfs_client *clp)
1702{
1703        nfs_mark_test_expired_all_delegations(clp);
1704        nfs4_state_mark_reclaim_helper(clp, nfs4_state_mark_reclaim_nograce);
1705}
1706
1707static int nfs4_recovery_handle_error(struct nfs_client *clp, int error)
1708{
1709        switch (error) {
1710                case 0:
1711                        break;
1712                case -NFS4ERR_CB_PATH_DOWN:
1713                        nfs40_handle_cb_pathdown(clp);
1714                        break;
1715                case -NFS4ERR_NO_GRACE:
1716                        nfs4_state_end_reclaim_reboot(clp);
1717                        break;
1718                case -NFS4ERR_STALE_CLIENTID:
1719                        set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
1720                        nfs4_state_clear_reclaim_reboot(clp);
1721                        nfs4_state_start_reclaim_reboot(clp);
1722                        break;
1723                case -NFS4ERR_EXPIRED:
1724                        set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
1725                        nfs4_state_start_reclaim_nograce(clp);
1726                        break;
1727                case -NFS4ERR_BADSESSION:
1728                case -NFS4ERR_BADSLOT:
1729                case -NFS4ERR_BAD_HIGH_SLOT:
1730                case -NFS4ERR_DEADSESSION:
1731                case -NFS4ERR_SEQ_FALSE_RETRY:
1732                case -NFS4ERR_SEQ_MISORDERED:
1733                        set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state);
1734                        /* Zero session reset errors */
1735                        break;
1736                case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
1737                        set_bit(NFS4CLNT_BIND_CONN_TO_SESSION, &clp->cl_state);
1738                        break;
1739                default:
1740                        dprintk("%s: failed to handle error %d for server %s\n",
1741                                        __func__, error, clp->cl_hostname);
1742                        return error;
1743        }
1744        dprintk("%s: handled error %d for server %s\n", __func__, error,
1745                        clp->cl_hostname);
1746        return 0;
1747}
1748
1749static int nfs4_do_reclaim(struct nfs_client *clp, const struct nfs4_state_recovery_ops *ops)
1750{
1751        struct nfs4_state_owner *sp;
1752        struct nfs_server *server;
1753        struct rb_node *pos;
1754        int status = 0;
1755
1756restart:
1757        rcu_read_lock();
1758        list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
1759                nfs4_purge_state_owners(server);
1760                spin_lock(&clp->cl_lock);
1761                for (pos = rb_first(&server->state_owners);
1762                     pos != NULL;
1763                     pos = rb_next(pos)) {
1764                        sp = rb_entry(pos,
1765                                struct nfs4_state_owner, so_server_node);
1766                        if (!test_and_clear_bit(ops->owner_flag_bit,
1767                                                        &sp->so_flags))
1768                                continue;
1769                        if (!atomic_inc_not_zero(&sp->so_count))
1770                                continue;
1771                        spin_unlock(&clp->cl_lock);
1772                        rcu_read_unlock();
1773
1774                        status = nfs4_reclaim_open_state(sp, ops);
1775                        if (status < 0) {
1776                                set_bit(ops->owner_flag_bit, &sp->so_flags);
1777                                nfs4_put_state_owner(sp);
1778                                status = nfs4_recovery_handle_error(clp, status);
1779                                return (status != 0) ? status : -EAGAIN;
1780                        }
1781
1782                        nfs4_put_state_owner(sp);
1783                        goto restart;
1784                }
1785                spin_unlock(&clp->cl_lock);
1786        }
1787        rcu_read_unlock();
1788        return 0;
1789}
1790
1791static int nfs4_check_lease(struct nfs_client *clp)
1792{
1793        struct rpc_cred *cred;
1794        const struct nfs4_state_maintenance_ops *ops =
1795                clp->cl_mvops->state_renewal_ops;
1796        int status;
1797
1798        /* Is the client already known to have an expired lease? */
1799        if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state))
1800                return 0;
1801        spin_lock(&clp->cl_lock);
1802        cred = ops->get_state_renewal_cred_locked(clp);
1803        spin_unlock(&clp->cl_lock);
1804        if (cred == NULL) {
1805                cred = nfs4_get_clid_cred(clp);
1806                status = -ENOKEY;
1807                if (cred == NULL)
1808                        goto out;
1809        }
1810        status = ops->renew_lease(clp, cred);
1811        put_rpccred(cred);
1812        if (status == -ETIMEDOUT) {
1813                set_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state);
1814                return 0;
1815        }
1816out:
1817        return nfs4_recovery_handle_error(clp, status);
1818}
1819
1820/* Set NFS4CLNT_LEASE_EXPIRED and reclaim reboot state for all v4.0 errors
1821 * and for recoverable errors on EXCHANGE_ID for v4.1
1822 */
1823static int nfs4_handle_reclaim_lease_error(struct nfs_client *clp, int status)
1824{
1825        switch (status) {
1826        case -NFS4ERR_SEQ_MISORDERED:
1827                if (test_and_set_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state))
1828                        return -ESERVERFAULT;
1829                /* Lease confirmation error: retry after purging the lease */
1830                ssleep(1);
1831                clear_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state);
1832                break;
1833        case -NFS4ERR_STALE_CLIENTID:
1834                clear_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state);
1835                nfs4_state_start_reclaim_reboot(clp);
1836                break;
1837        case -NFS4ERR_CLID_INUSE:
1838                pr_err("NFS: Server %s reports our clientid is in use\n",
1839                        clp->cl_hostname);
1840                nfs_mark_client_ready(clp, -EPERM);
1841                clear_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state);
1842                return -EPERM;
1843        case -EACCES:
1844        case -NFS4ERR_DELAY:
1845        case -ETIMEDOUT:
1846        case -EAGAIN:
1847                ssleep(1);
1848                break;
1849
1850        case -NFS4ERR_MINOR_VERS_MISMATCH:
1851                if (clp->cl_cons_state == NFS_CS_SESSION_INITING)
1852                        nfs_mark_client_ready(clp, -EPROTONOSUPPORT);
1853                dprintk("%s: exit with error %d for server %s\n",
1854                                __func__, -EPROTONOSUPPORT, clp->cl_hostname);
1855                return -EPROTONOSUPPORT;
1856        case -NFS4ERR_NOT_SAME: /* FixMe: implement recovery
1857                                 * in nfs4_exchange_id */
1858        default:
1859                dprintk("%s: exit with error %d for server %s\n", __func__,
1860                                status, clp->cl_hostname);
1861                return status;
1862        }
1863        set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
1864        dprintk("%s: handled error %d for server %s\n", __func__, status,
1865                        clp->cl_hostname);
1866        return 0;
1867}
1868
1869static int nfs4_establish_lease(struct nfs_client *clp)
1870{
1871        struct rpc_cred *cred;
1872        const struct nfs4_state_recovery_ops *ops =
1873                clp->cl_mvops->reboot_recovery_ops;
1874        int status;
1875
1876        nfs4_begin_drain_session(clp);
1877        cred = nfs4_get_clid_cred(clp);
1878        if (cred == NULL)
1879                return -ENOENT;
1880        status = ops->establish_clid(clp, cred);
1881        put_rpccred(cred);
1882        if (status != 0)
1883                return status;
1884        pnfs_destroy_all_layouts(clp);
1885        return 0;
1886}
1887
1888/*
1889 * Returns zero or a negative errno.  NFS4ERR values are converted
1890 * to local errno values.
1891 */
1892static int nfs4_reclaim_lease(struct nfs_client *clp)
1893{
1894        int status;
1895
1896        status = nfs4_establish_lease(clp);
1897        if (status < 0)
1898                return nfs4_handle_reclaim_lease_error(clp, status);
1899        if (test_and_clear_bit(NFS4CLNT_SERVER_SCOPE_MISMATCH, &clp->cl_state))
1900                nfs4_state_start_reclaim_nograce(clp);
1901        if (!test_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state))
1902                set_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state);
1903        clear_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state);
1904        clear_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
1905        return 0;
1906}
1907
1908static int nfs4_purge_lease(struct nfs_client *clp)
1909{
1910        int status;
1911
1912        status = nfs4_establish_lease(clp);
1913        if (status < 0)
1914                return nfs4_handle_reclaim_lease_error(clp, status);
1915        clear_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state);
1916        set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
1917        nfs4_state_start_reclaim_nograce(clp);
1918        return 0;
1919}
1920
1921/*
1922 * Try remote migration of one FSID from a source server to a
1923 * destination server.  The source server provides a list of
1924 * potential destinations.
1925 *
1926 * Returns zero or a negative NFS4ERR status code.
1927 */
1928static int nfs4_try_migration(struct nfs_server *server, struct rpc_cred *cred)
1929{
1930        struct nfs_client *clp = server->nfs_client;
1931        struct nfs4_fs_locations *locations = NULL;
1932        struct inode *inode;
1933        struct page *page;
1934        int status, result;
1935
1936        dprintk("--> %s: FSID %llx:%llx on \"%s\"\n", __func__,
1937                        (unsigned long long)server->fsid.major,
1938                        (unsigned long long)server->fsid.minor,
1939                        clp->cl_hostname);
1940
1941        result = 0;
1942        page = alloc_page(GFP_KERNEL);
1943        locations = kmalloc(sizeof(struct nfs4_fs_locations), GFP_KERNEL);
1944        if (page == NULL || locations == NULL) {
1945                dprintk("<-- %s: no memory\n", __func__);
1946                goto out;
1947        }
1948
1949        inode = d_inode(server->super->s_root);
1950        result = nfs4_proc_get_locations(inode, locations, page, cred);
1951        if (result) {
1952                dprintk("<-- %s: failed to retrieve fs_locations: %d\n",
1953                        __func__, result);
1954                goto out;
1955        }
1956
1957        result = -NFS4ERR_NXIO;
1958        if (!(locations->fattr.valid & NFS_ATTR_FATTR_V4_LOCATIONS)) {
1959                dprintk("<-- %s: No fs_locations data, migration skipped\n",
1960                        __func__);
1961                goto out;
1962        }
1963
1964        nfs4_begin_drain_session(clp);
1965
1966        status = nfs4_replace_transport(server, locations);
1967        if (status != 0) {
1968                dprintk("<-- %s: failed to replace transport: %d\n",
1969                        __func__, status);
1970                goto out;
1971        }
1972
1973        result = 0;
1974        dprintk("<-- %s: migration succeeded\n", __func__);
1975
1976out:
1977        if (page != NULL)
1978                __free_page(page);
1979        kfree(locations);
1980        if (result) {
1981                pr_err("NFS: migration recovery failed (server %s)\n",
1982                                clp->cl_hostname);
1983                set_bit(NFS_MIG_FAILED, &server->mig_status);
1984        }
1985        return result;
1986}
1987
1988/*
1989 * Returns zero or a negative NFS4ERR status code.
1990 */
1991static int nfs4_handle_migration(struct nfs_client *clp)
1992{
1993        const struct nfs4_state_maintenance_ops *ops =
1994                                clp->cl_mvops->state_renewal_ops;
1995        struct nfs_server *server;
1996        struct rpc_cred *cred;
1997
1998        dprintk("%s: migration reported on \"%s\"\n", __func__,
1999                        clp->cl_hostname);
2000
2001        spin_lock(&clp->cl_lock);
2002        cred = ops->get_state_renewal_cred_locked(clp);
2003        spin_unlock(&clp->cl_lock);
2004        if (cred == NULL)
2005                return -NFS4ERR_NOENT;
2006
2007        clp->cl_mig_gen++;
2008restart:
2009        rcu_read_lock();
2010        list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
2011                int status;
2012
2013                if (server->mig_gen == clp->cl_mig_gen)
2014                        continue;
2015                server->mig_gen = clp->cl_mig_gen;
2016
2017                if (!test_and_clear_bit(NFS_MIG_IN_TRANSITION,
2018                                                &server->mig_status))
2019                        continue;
2020
2021                rcu_read_unlock();
2022                status = nfs4_try_migration(server, cred);
2023                if (status < 0) {
2024                        put_rpccred(cred);
2025                        return status;
2026                }
2027                goto restart;
2028        }
2029        rcu_read_unlock();
2030        put_rpccred(cred);
2031        return 0;
2032}
2033
2034/*
2035 * Test each nfs_server on the clp's cl_superblocks list to see
2036 * if it's moved to another server.  Stop when the server no longer
2037 * returns NFS4ERR_LEASE_MOVED.
2038 */
2039static int nfs4_handle_lease_moved(struct nfs_client *clp)
2040{
2041        const struct nfs4_state_maintenance_ops *ops =
2042                                clp->cl_mvops->state_renewal_ops;
2043        struct nfs_server *server;
2044        struct rpc_cred *cred;
2045
2046        dprintk("%s: lease moved reported on \"%s\"\n", __func__,
2047                        clp->cl_hostname);
2048
2049        spin_lock(&clp->cl_lock);
2050        cred = ops->get_state_renewal_cred_locked(clp);
2051        spin_unlock(&clp->cl_lock);
2052        if (cred == NULL)
2053                return -NFS4ERR_NOENT;
2054
2055        clp->cl_mig_gen++;
2056restart:
2057        rcu_read_lock();
2058        list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
2059                struct inode *inode;
2060                int status;
2061
2062                if (server->mig_gen == clp->cl_mig_gen)
2063                        continue;
2064                server->mig_gen = clp->cl_mig_gen;
2065
2066                rcu_read_unlock();
2067
2068                inode = d_inode(server->super->s_root);
2069                status = nfs4_proc_fsid_present(inode, cred);
2070                if (status != -NFS4ERR_MOVED)
2071                        goto restart;   /* wasn't this one */
2072                if (nfs4_try_migration(server, cred) == -NFS4ERR_LEASE_MOVED)
2073                        goto restart;   /* there are more */
2074                goto out;
2075        }
2076        rcu_read_unlock();
2077
2078out:
2079        put_rpccred(cred);
2080        return 0;
2081}
2082
2083/**
2084 * nfs4_discover_server_trunking - Detect server IP address trunking
2085 *
2086 * @clp: nfs_client under test
2087 * @result: OUT: found nfs_client, or clp
2088 *
2089 * Returns zero or a negative errno.  If zero is returned,
2090 * an nfs_client pointer is planted in "result".
2091 *
2092 * Note: since we are invoked in process context, and
2093 * not from inside the state manager, we cannot use
2094 * nfs4_handle_reclaim_lease_error().
2095 */
2096int nfs4_discover_server_trunking(struct nfs_client *clp,
2097                                  struct nfs_client **result)
2098{
2099        const struct nfs4_state_recovery_ops *ops =
2100                                clp->cl_mvops->reboot_recovery_ops;
2101        struct rpc_clnt *clnt;
2102        struct rpc_cred *cred;
2103        int i, status;
2104
2105        dprintk("NFS: %s: testing '%s'\n", __func__, clp->cl_hostname);
2106
2107        clnt = clp->cl_rpcclient;
2108        i = 0;
2109
2110        mutex_lock(&nfs_clid_init_mutex);
2111again:
2112        status  = -ENOENT;
2113        cred = nfs4_get_clid_cred(clp);
2114        if (cred == NULL)
2115                goto out_unlock;
2116
2117        status = ops->detect_trunking(clp, result, cred);
2118        put_rpccred(cred);
2119        switch (status) {
2120        case 0:
2121                break;
2122        case -ETIMEDOUT:
2123                if (clnt->cl_softrtry)
2124                        break;
2125        case -NFS4ERR_DELAY:
2126        case -EAGAIN:
2127                ssleep(1);
2128        case -NFS4ERR_STALE_CLIENTID:
2129                dprintk("NFS: %s after status %d, retrying\n",
2130                        __func__, status);
2131                goto again;
2132        case -EACCES:
2133                if (i++ == 0) {
2134                        nfs4_root_machine_cred(clp);
2135                        goto again;
2136                }
2137                if (clnt->cl_auth->au_flavor == RPC_AUTH_UNIX)
2138                        break;
2139        case -NFS4ERR_CLID_INUSE:
2140        case -NFS4ERR_WRONGSEC:
2141                /* No point in retrying if we already used RPC_AUTH_UNIX */
2142                if (clnt->cl_auth->au_flavor == RPC_AUTH_UNIX) {
2143                        status = -EPERM;
2144                        break;
2145                }
2146                clnt = rpc_clone_client_set_auth(clnt, RPC_AUTH_UNIX);
2147                if (IS_ERR(clnt)) {
2148                        status = PTR_ERR(clnt);
2149                        break;
2150                }
2151                /* Note: this is safe because we haven't yet marked the
2152                 * client as ready, so we are the only user of
2153                 * clp->cl_rpcclient
2154                 */
2155                clnt = xchg(&clp->cl_rpcclient, clnt);
2156                rpc_shutdown_client(clnt);
2157                clnt = clp->cl_rpcclient;
2158                goto again;
2159
2160        case -NFS4ERR_MINOR_VERS_MISMATCH:
2161                status = -EPROTONOSUPPORT;
2162                break;
2163
2164        case -EKEYEXPIRED:
2165        case -NFS4ERR_NOT_SAME: /* FixMe: implement recovery
2166                                 * in nfs4_exchange_id */
2167                status = -EKEYEXPIRED;
2168                break;
2169        default:
2170                pr_warn("NFS: %s unhandled error %d. Exiting with error EIO\n",
2171                                __func__, status);
2172                status = -EIO;
2173        }
2174
2175out_unlock:
2176        mutex_unlock(&nfs_clid_init_mutex);
2177        dprintk("NFS: %s: status = %d\n", __func__, status);
2178        return status;
2179}
2180
2181#ifdef CONFIG_NFS_V4_1
2182void nfs4_schedule_session_recovery(struct nfs4_session *session, int err)
2183{
2184        struct nfs_client *clp = session->clp;
2185
2186        switch (err) {
2187        default:
2188                set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state);
2189                break;
2190        case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
2191                set_bit(NFS4CLNT_BIND_CONN_TO_SESSION, &clp->cl_state);
2192        }
2193        nfs4_schedule_lease_recovery(clp);
2194}
2195EXPORT_SYMBOL_GPL(nfs4_schedule_session_recovery);
2196
2197void nfs41_notify_server(struct nfs_client *clp)
2198{
2199        /* Use CHECK_LEASE to ping the server with a SEQUENCE */
2200        set_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state);
2201        nfs4_schedule_state_manager(clp);
2202}
2203
2204static void nfs4_reset_all_state(struct nfs_client *clp)
2205{
2206        if (test_and_set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) == 0) {
2207                set_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state);
2208                clear_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state);
2209                nfs4_state_start_reclaim_nograce(clp);
2210                dprintk("%s: scheduling reset of all state for server %s!\n",
2211                                __func__, clp->cl_hostname);
2212                nfs4_schedule_state_manager(clp);
2213        }
2214}
2215
2216static void nfs41_handle_server_reboot(struct nfs_client *clp)
2217{
2218        if (test_and_set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) == 0) {
2219                nfs4_state_start_reclaim_reboot(clp);
2220                dprintk("%s: server %s rebooted!\n", __func__,
2221                                clp->cl_hostname);
2222                nfs4_schedule_state_manager(clp);
2223        }
2224}
2225
2226static void nfs41_handle_all_state_revoked(struct nfs_client *clp)
2227{
2228        nfs4_reset_all_state(clp);
2229        dprintk("%s: state revoked on server %s\n", __func__, clp->cl_hostname);
2230}
2231
2232static void nfs41_handle_some_state_revoked(struct nfs_client *clp)
2233{
2234        nfs4_state_start_reclaim_nograce(clp);
2235        nfs4_schedule_state_manager(clp);
2236
2237        dprintk("%s: state revoked on server %s\n", __func__, clp->cl_hostname);
2238}
2239
2240static void nfs41_handle_recallable_state_revoked(struct nfs_client *clp)
2241{
2242        /* FIXME: For now, we destroy all layouts. */
2243        pnfs_destroy_all_layouts(clp);
2244        /* FIXME: For now, we test all delegations+open state+locks. */
2245        nfs41_handle_some_state_revoked(clp);
2246        dprintk("%s: Recallable state revoked on server %s!\n", __func__,
2247                        clp->cl_hostname);
2248}
2249
2250static void nfs41_handle_backchannel_fault(struct nfs_client *clp)
2251{
2252        set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state);
2253        nfs4_schedule_state_manager(clp);
2254
2255        dprintk("%s: server %s declared a backchannel fault\n", __func__,
2256                        clp->cl_hostname);
2257}
2258
2259static void nfs41_handle_cb_path_down(struct nfs_client *clp)
2260{
2261        if (test_and_set_bit(NFS4CLNT_BIND_CONN_TO_SESSION,
2262                &clp->cl_state) == 0)
2263                nfs4_schedule_state_manager(clp);
2264}
2265
2266void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags,
2267                bool recovery)
2268{
2269        if (!flags)
2270                return;
2271
2272        dprintk("%s: \"%s\" (client ID %llx) flags=0x%08x\n",
2273                __func__, clp->cl_hostname, clp->cl_clientid, flags);
2274        /*
2275         * If we're called from the state manager thread, then assume we're
2276         * already handling the RECLAIM_NEEDED and/or STATE_REVOKED.
2277         * Those flags are expected to remain set until we're done
2278         * recovering (see RFC5661, section 18.46.3).
2279         */
2280        if (recovery)
2281                goto out_recovery;
2282
2283        if (flags & SEQ4_STATUS_RESTART_RECLAIM_NEEDED)
2284                nfs41_handle_server_reboot(clp);
2285        if (flags & (SEQ4_STATUS_EXPIRED_ALL_STATE_REVOKED))
2286                nfs41_handle_all_state_revoked(clp);
2287        if (flags & (SEQ4_STATUS_EXPIRED_SOME_STATE_REVOKED |
2288                            SEQ4_STATUS_ADMIN_STATE_REVOKED))
2289                nfs41_handle_some_state_revoked(clp);
2290        if (flags & SEQ4_STATUS_LEASE_MOVED)
2291                nfs4_schedule_lease_moved_recovery(clp);
2292        if (flags & SEQ4_STATUS_RECALLABLE_STATE_REVOKED)
2293                nfs41_handle_recallable_state_revoked(clp);
2294out_recovery:
2295        if (flags & SEQ4_STATUS_BACKCHANNEL_FAULT)
2296                nfs41_handle_backchannel_fault(clp);
2297        else if (flags & (SEQ4_STATUS_CB_PATH_DOWN |
2298                                SEQ4_STATUS_CB_PATH_DOWN_SESSION))
2299                nfs41_handle_cb_path_down(clp);
2300}
2301
2302static int nfs4_reset_session(struct nfs_client *clp)
2303{
2304        struct rpc_cred *cred;
2305        int status;
2306
2307        if (!nfs4_has_session(clp))
2308                return 0;
2309        nfs4_begin_drain_session(clp);
2310        cred = nfs4_get_clid_cred(clp);
2311        status = nfs4_proc_destroy_session(clp->cl_session, cred);
2312        switch (status) {
2313        case 0:
2314        case -NFS4ERR_BADSESSION:
2315        case -NFS4ERR_DEADSESSION:
2316                break;
2317        case -NFS4ERR_BACK_CHAN_BUSY:
2318        case -NFS4ERR_DELAY:
2319                set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state);
2320                status = 0;
2321                ssleep(1);
2322                goto out;
2323        default:
2324                status = nfs4_recovery_handle_error(clp, status);
2325                goto out;
2326        }
2327
2328        memset(clp->cl_session->sess_id.data, 0, NFS4_MAX_SESSIONID_LEN);
2329        status = nfs4_proc_create_session(clp, cred);
2330        if (status) {
2331                dprintk("%s: session reset failed with status %d for server %s!\n",
2332                        __func__, status, clp->cl_hostname);
2333                status = nfs4_handle_reclaim_lease_error(clp, status);
2334                goto out;
2335        }
2336        nfs41_finish_session_reset(clp);
2337        dprintk("%s: session reset was successful for server %s!\n",
2338                        __func__, clp->cl_hostname);
2339out:
2340        if (cred)
2341                put_rpccred(cred);
2342        return status;
2343}
2344
2345static int nfs4_bind_conn_to_session(struct nfs_client *clp)
2346{
2347        struct rpc_cred *cred;
2348        int ret;
2349
2350        if (!nfs4_has_session(clp))
2351                return 0;
2352        nfs4_begin_drain_session(clp);
2353        cred = nfs4_get_clid_cred(clp);
2354        ret = nfs4_proc_bind_conn_to_session(clp, cred);
2355        if (cred)
2356                put_rpccred(cred);
2357        clear_bit(NFS4CLNT_BIND_CONN_TO_SESSION, &clp->cl_state);
2358        switch (ret) {
2359        case 0:
2360                dprintk("%s: bind_conn_to_session was successful for server %s!\n",
2361                        __func__, clp->cl_hostname);
2362                break;
2363        case -NFS4ERR_DELAY:
2364                ssleep(1);
2365                set_bit(NFS4CLNT_BIND_CONN_TO_SESSION, &clp->cl_state);
2366                break;
2367        default:
2368                return nfs4_recovery_handle_error(clp, ret);
2369        }
2370        return 0;
2371}
2372#else /* CONFIG_NFS_V4_1 */
2373static int nfs4_reset_session(struct nfs_client *clp) { return 0; }
2374
2375static int nfs4_bind_conn_to_session(struct nfs_client *clp)
2376{
2377        return 0;
2378}
2379#endif /* CONFIG_NFS_V4_1 */
2380
2381static void nfs4_state_manager(struct nfs_client *clp)
2382{
2383        int status = 0;
2384        const char *section = "", *section_sep = "";
2385
2386        /* Ensure exclusive access to NFSv4 state */
2387        do {
2388                if (test_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state)) {
2389                        section = "purge state";
2390                        status = nfs4_purge_lease(clp);
2391                        if (status < 0)
2392                                goto out_error;
2393                        continue;
2394                }
2395
2396                if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state)) {
2397                        section = "lease expired";
2398                        /* We're going to have to re-establish a clientid */
2399                        status = nfs4_reclaim_lease(clp);
2400                        if (status < 0)
2401                                goto out_error;
2402                        continue;
2403                }
2404
2405                /* Initialize or reset the session */
2406                if (test_and_clear_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state)) {
2407                        section = "reset session";
2408                        status = nfs4_reset_session(clp);
2409                        if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state))
2410                                continue;
2411                        if (status < 0)
2412                                goto out_error;
2413                }
2414
2415                /* Send BIND_CONN_TO_SESSION */
2416                if (test_and_clear_bit(NFS4CLNT_BIND_CONN_TO_SESSION,
2417                                &clp->cl_state)) {
2418                        section = "bind conn to session";
2419                        status = nfs4_bind_conn_to_session(clp);
2420                        if (status < 0)
2421                                goto out_error;
2422                        continue;
2423                }
2424
2425                if (test_and_clear_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state)) {
2426                        section = "check lease";
2427                        status = nfs4_check_lease(clp);
2428                        if (status < 0)
2429                                goto out_error;
2430                        continue;
2431                }
2432
2433                if (test_and_clear_bit(NFS4CLNT_MOVED, &clp->cl_state)) {
2434                        section = "migration";
2435                        status = nfs4_handle_migration(clp);
2436                        if (status < 0)
2437                                goto out_error;
2438                }
2439
2440                if (test_and_clear_bit(NFS4CLNT_LEASE_MOVED, &clp->cl_state)) {
2441                        section = "lease moved";
2442                        status = nfs4_handle_lease_moved(clp);
2443                        if (status < 0)
2444                                goto out_error;
2445                }
2446
2447                /* First recover reboot state... */
2448                if (test_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state)) {
2449                        section = "reclaim reboot";
2450                        status = nfs4_do_reclaim(clp,
2451                                clp->cl_mvops->reboot_recovery_ops);
2452                        if (status == -EAGAIN)
2453                                continue;
2454                        if (status < 0)
2455                                goto out_error;
2456                        nfs4_state_end_reclaim_reboot(clp);
2457                }
2458
2459                /* Detect expired delegations... */
2460                if (test_and_clear_bit(NFS4CLNT_DELEGATION_EXPIRED, &clp->cl_state)) {
2461                        section = "detect expired delegations";
2462                        nfs_reap_expired_delegations(clp);
2463                        continue;
2464                }
2465
2466                /* Now recover expired state... */
2467                if (test_and_clear_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state)) {
2468                        section = "reclaim nograce";
2469                        status = nfs4_do_reclaim(clp,
2470                                clp->cl_mvops->nograce_recovery_ops);
2471                        if (status == -EAGAIN)
2472                                continue;
2473                        if (status < 0)
2474                                goto out_error;
2475                }
2476
2477                nfs4_end_drain_session(clp);
2478                if (test_and_clear_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state)) {
2479                        nfs_client_return_marked_delegations(clp);
2480                        continue;
2481                }
2482
2483                nfs4_clear_state_manager_bit(clp);
2484                /* Did we race with an attempt to give us more work? */
2485                if (clp->cl_state == 0)
2486                        break;
2487                if (test_and_set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) != 0)
2488                        break;
2489        } while (atomic_read(&clp->cl_count) > 1);
2490        return;
2491out_error:
2492        if (strlen(section))
2493                section_sep = ": ";
2494        pr_warn_ratelimited("NFS: state manager%s%s failed on NFSv4 server %s"
2495                        " with error %d\n", section_sep, section,
2496                        clp->cl_hostname, -status);
2497        ssleep(1);
2498        nfs4_end_drain_session(clp);
2499        nfs4_clear_state_manager_bit(clp);
2500}
2501
2502static int nfs4_run_state_manager(void *ptr)
2503{
2504        struct nfs_client *clp = ptr;
2505
2506        allow_signal(SIGKILL);
2507        nfs4_state_manager(clp);
2508        nfs_put_client(clp);
2509        module_put_and_exit(0);
2510        return 0;
2511}
2512
2513/*
2514 * Local variables:
2515 *  c-basic-offset: 8
2516 * End:
2517 */
2518