linux/net/sunrpc/clnt.c
<<
>>
Prefs
   1/*
   2 *  linux/net/sunrpc/clnt.c
   3 *
   4 *  This file contains the high-level RPC interface.
   5 *  It is modeled as a finite state machine to support both synchronous
   6 *  and asynchronous requests.
   7 *
   8 *  -   RPC header generation and argument serialization.
   9 *  -   Credential refresh.
  10 *  -   TCP connect handling.
  11 *  -   Retry of operation when it is suspected the operation failed because
  12 *      of uid squashing on the server, or when the credentials were stale
  13 *      and need to be refreshed, or when a packet was damaged in transit.
  14 *      This may be have to be moved to the VFS layer.
  15 *
  16 *  Copyright (C) 1992,1993 Rick Sladkey <jrs@world.std.com>
  17 *  Copyright (C) 1995,1996 Olaf Kirch <okir@monad.swb.de>
  18 */
  19
  20
  21#include <linux/module.h>
  22#include <linux/types.h>
  23#include <linux/kallsyms.h>
  24#include <linux/mm.h>
  25#include <linux/namei.h>
  26#include <linux/mount.h>
  27#include <linux/slab.h>
  28#include <linux/rcupdate.h>
  29#include <linux/utsname.h>
  30#include <linux/workqueue.h>
  31#include <linux/in.h>
  32#include <linux/in6.h>
  33#include <linux/un.h>
  34
  35#include <linux/sunrpc/clnt.h>
  36#include <linux/sunrpc/addr.h>
  37#include <linux/sunrpc/rpc_pipe_fs.h>
  38#include <linux/sunrpc/metrics.h>
  39#include <linux/sunrpc/bc_xprt.h>
  40#include <trace/events/sunrpc.h>
  41
  42#include "sunrpc.h"
  43#include "netns.h"
  44
  45#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
  46# define RPCDBG_FACILITY        RPCDBG_CALL
  47#endif
  48
  49#define dprint_status(t)                                        \
  50        dprintk("RPC: %5u %s (status %d)\n", t->tk_pid,         \
  51                        __func__, t->tk_status)
  52
  53/*
  54 * All RPC clients are linked into this list
  55 */
  56
  57static DECLARE_WAIT_QUEUE_HEAD(destroy_wait);
  58
  59
  60static void     call_start(struct rpc_task *task);
  61static void     call_reserve(struct rpc_task *task);
  62static void     call_reserveresult(struct rpc_task *task);
  63static void     call_allocate(struct rpc_task *task);
  64static void     call_decode(struct rpc_task *task);
  65static void     call_bind(struct rpc_task *task);
  66static void     call_bind_status(struct rpc_task *task);
  67static void     call_transmit(struct rpc_task *task);
  68#if defined(CONFIG_SUNRPC_BACKCHANNEL)
  69static void     call_bc_transmit(struct rpc_task *task);
  70#endif /* CONFIG_SUNRPC_BACKCHANNEL */
  71static void     call_status(struct rpc_task *task);
  72static void     call_transmit_status(struct rpc_task *task);
  73static void     call_refresh(struct rpc_task *task);
  74static void     call_refreshresult(struct rpc_task *task);
  75static void     call_timeout(struct rpc_task *task);
  76static void     call_connect(struct rpc_task *task);
  77static void     call_connect_status(struct rpc_task *task);
  78
  79static __be32   *rpc_encode_header(struct rpc_task *task);
  80static __be32   *rpc_verify_header(struct rpc_task *task);
  81static int      rpc_ping(struct rpc_clnt *clnt);
  82
  83static void rpc_register_client(struct rpc_clnt *clnt)
  84{
  85        struct net *net = rpc_net_ns(clnt);
  86        struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
  87
  88        spin_lock(&sn->rpc_client_lock);
  89        list_add(&clnt->cl_clients, &sn->all_clients);
  90        spin_unlock(&sn->rpc_client_lock);
  91}
  92
  93static void rpc_unregister_client(struct rpc_clnt *clnt)
  94{
  95        struct net *net = rpc_net_ns(clnt);
  96        struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
  97
  98        spin_lock(&sn->rpc_client_lock);
  99        list_del(&clnt->cl_clients);
 100        spin_unlock(&sn->rpc_client_lock);
 101}
 102
 103static void __rpc_clnt_remove_pipedir(struct rpc_clnt *clnt)
 104{
 105        rpc_remove_client_dir(clnt);
 106}
 107
 108static void rpc_clnt_remove_pipedir(struct rpc_clnt *clnt)
 109{
 110        struct net *net = rpc_net_ns(clnt);
 111        struct super_block *pipefs_sb;
 112
 113        pipefs_sb = rpc_get_sb_net(net);
 114        if (pipefs_sb) {
 115                __rpc_clnt_remove_pipedir(clnt);
 116                rpc_put_sb_net(net);
 117        }
 118}
 119
 120static struct dentry *rpc_setup_pipedir_sb(struct super_block *sb,
 121                                    struct rpc_clnt *clnt)
 122{
 123        static uint32_t clntid;
 124        const char *dir_name = clnt->cl_program->pipe_dir_name;
 125        char name[15];
 126        struct dentry *dir, *dentry;
 127
 128        dir = rpc_d_lookup_sb(sb, dir_name);
 129        if (dir == NULL) {
 130                pr_info("RPC: pipefs directory doesn't exist: %s\n", dir_name);
 131                return dir;
 132        }
 133        for (;;) {
 134                snprintf(name, sizeof(name), "clnt%x", (unsigned int)clntid++);
 135                name[sizeof(name) - 1] = '\0';
 136                dentry = rpc_create_client_dir(dir, name, clnt);
 137                if (!IS_ERR(dentry))
 138                        break;
 139                if (dentry == ERR_PTR(-EEXIST))
 140                        continue;
 141                printk(KERN_INFO "RPC: Couldn't create pipefs entry"
 142                                " %s/%s, error %ld\n",
 143                                dir_name, name, PTR_ERR(dentry));
 144                break;
 145        }
 146        dput(dir);
 147        return dentry;
 148}
 149
 150static int
 151rpc_setup_pipedir(struct super_block *pipefs_sb, struct rpc_clnt *clnt)
 152{
 153        struct dentry *dentry;
 154
 155        if (clnt->cl_program->pipe_dir_name != NULL) {
 156                dentry = rpc_setup_pipedir_sb(pipefs_sb, clnt);
 157                if (IS_ERR(dentry))
 158                        return PTR_ERR(dentry);
 159        }
 160        return 0;
 161}
 162
 163static int rpc_clnt_skip_event(struct rpc_clnt *clnt, unsigned long event)
 164{
 165        if (clnt->cl_program->pipe_dir_name == NULL)
 166                return 1;
 167
 168        switch (event) {
 169        case RPC_PIPEFS_MOUNT:
 170                if (clnt->cl_pipedir_objects.pdh_dentry != NULL)
 171                        return 1;
 172                if (atomic_read(&clnt->cl_count) == 0)
 173                        return 1;
 174                break;
 175        case RPC_PIPEFS_UMOUNT:
 176                if (clnt->cl_pipedir_objects.pdh_dentry == NULL)
 177                        return 1;
 178                break;
 179        }
 180        return 0;
 181}
 182
 183static int __rpc_clnt_handle_event(struct rpc_clnt *clnt, unsigned long event,
 184                                   struct super_block *sb)
 185{
 186        struct dentry *dentry;
 187        int err = 0;
 188
 189        switch (event) {
 190        case RPC_PIPEFS_MOUNT:
 191                dentry = rpc_setup_pipedir_sb(sb, clnt);
 192                if (!dentry)
 193                        return -ENOENT;
 194                if (IS_ERR(dentry))
 195                        return PTR_ERR(dentry);
 196                break;
 197        case RPC_PIPEFS_UMOUNT:
 198                __rpc_clnt_remove_pipedir(clnt);
 199                break;
 200        default:
 201                printk(KERN_ERR "%s: unknown event: %ld\n", __func__, event);
 202                return -ENOTSUPP;
 203        }
 204        return err;
 205}
 206
 207static int __rpc_pipefs_event(struct rpc_clnt *clnt, unsigned long event,
 208                                struct super_block *sb)
 209{
 210        int error = 0;
 211
 212        for (;; clnt = clnt->cl_parent) {
 213                if (!rpc_clnt_skip_event(clnt, event))
 214                        error = __rpc_clnt_handle_event(clnt, event, sb);
 215                if (error || clnt == clnt->cl_parent)
 216                        break;
 217        }
 218        return error;
 219}
 220
 221static struct rpc_clnt *rpc_get_client_for_event(struct net *net, int event)
 222{
 223        struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
 224        struct rpc_clnt *clnt;
 225
 226        spin_lock(&sn->rpc_client_lock);
 227        list_for_each_entry(clnt, &sn->all_clients, cl_clients) {
 228                if (rpc_clnt_skip_event(clnt, event))
 229                        continue;
 230                spin_unlock(&sn->rpc_client_lock);
 231                return clnt;
 232        }
 233        spin_unlock(&sn->rpc_client_lock);
 234        return NULL;
 235}
 236
 237static int rpc_pipefs_event(struct notifier_block *nb, unsigned long event,
 238                            void *ptr)
 239{
 240        struct super_block *sb = ptr;
 241        struct rpc_clnt *clnt;
 242        int error = 0;
 243
 244        while ((clnt = rpc_get_client_for_event(sb->s_fs_info, event))) {
 245                error = __rpc_pipefs_event(clnt, event, sb);
 246                if (error)
 247                        break;
 248        }
 249        return error;
 250}
 251
 252static struct notifier_block rpc_clients_block = {
 253        .notifier_call  = rpc_pipefs_event,
 254        .priority       = SUNRPC_PIPEFS_RPC_PRIO,
 255};
 256
 257int rpc_clients_notifier_register(void)
 258{
 259        return rpc_pipefs_notifier_register(&rpc_clients_block);
 260}
 261
 262void rpc_clients_notifier_unregister(void)
 263{
 264        return rpc_pipefs_notifier_unregister(&rpc_clients_block);
 265}
 266
 267static struct rpc_xprt *rpc_clnt_set_transport(struct rpc_clnt *clnt,
 268                struct rpc_xprt *xprt,
 269                const struct rpc_timeout *timeout)
 270{
 271        struct rpc_xprt *old;
 272
 273        spin_lock(&clnt->cl_lock);
 274        old = rcu_dereference_protected(clnt->cl_xprt,
 275                        lockdep_is_held(&clnt->cl_lock));
 276
 277        if (!xprt_bound(xprt))
 278                clnt->cl_autobind = 1;
 279
 280        clnt->cl_timeout = timeout;
 281        rcu_assign_pointer(clnt->cl_xprt, xprt);
 282        spin_unlock(&clnt->cl_lock);
 283
 284        return old;
 285}
 286
 287static void rpc_clnt_set_nodename(struct rpc_clnt *clnt, const char *nodename)
 288{
 289        clnt->cl_nodelen = strlcpy(clnt->cl_nodename,
 290                        nodename, sizeof(clnt->cl_nodename));
 291}
 292
 293static int rpc_client_register(struct rpc_clnt *clnt,
 294                               rpc_authflavor_t pseudoflavor,
 295                               const char *client_name)
 296{
 297        struct rpc_auth_create_args auth_args = {
 298                .pseudoflavor = pseudoflavor,
 299                .target_name = client_name,
 300        };
 301        struct rpc_auth *auth;
 302        struct net *net = rpc_net_ns(clnt);
 303        struct super_block *pipefs_sb;
 304        int err;
 305
 306        rpc_clnt_debugfs_register(clnt);
 307
 308        pipefs_sb = rpc_get_sb_net(net);
 309        if (pipefs_sb) {
 310                err = rpc_setup_pipedir(pipefs_sb, clnt);
 311                if (err)
 312                        goto out;
 313        }
 314
 315        rpc_register_client(clnt);
 316        if (pipefs_sb)
 317                rpc_put_sb_net(net);
 318
 319        auth = rpcauth_create(&auth_args, clnt);
 320        if (IS_ERR(auth)) {
 321                dprintk("RPC:       Couldn't create auth handle (flavor %u)\n",
 322                                pseudoflavor);
 323                err = PTR_ERR(auth);
 324                goto err_auth;
 325        }
 326        return 0;
 327err_auth:
 328        pipefs_sb = rpc_get_sb_net(net);
 329        rpc_unregister_client(clnt);
 330        __rpc_clnt_remove_pipedir(clnt);
 331out:
 332        if (pipefs_sb)
 333                rpc_put_sb_net(net);
 334        rpc_clnt_debugfs_unregister(clnt);
 335        return err;
 336}
 337
 338static DEFINE_IDA(rpc_clids);
 339
 340void rpc_cleanup_clids(void)
 341{
 342        ida_destroy(&rpc_clids);
 343}
 344
 345static int rpc_alloc_clid(struct rpc_clnt *clnt)
 346{
 347        int clid;
 348
 349        clid = ida_simple_get(&rpc_clids, 0, 0, GFP_KERNEL);
 350        if (clid < 0)
 351                return clid;
 352        clnt->cl_clid = clid;
 353        return 0;
 354}
 355
 356static void rpc_free_clid(struct rpc_clnt *clnt)
 357{
 358        ida_simple_remove(&rpc_clids, clnt->cl_clid);
 359}
 360
 361static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args,
 362                struct rpc_xprt_switch *xps,
 363                struct rpc_xprt *xprt,
 364                struct rpc_clnt *parent)
 365{
 366        const struct rpc_program *program = args->program;
 367        const struct rpc_version *version;
 368        struct rpc_clnt *clnt = NULL;
 369        const struct rpc_timeout *timeout;
 370        const char *nodename = args->nodename;
 371        int err;
 372
 373        /* sanity check the name before trying to print it */
 374        dprintk("RPC:       creating %s client for %s (xprt %p)\n",
 375                        program->name, args->servername, xprt);
 376
 377        err = rpciod_up();
 378        if (err)
 379                goto out_no_rpciod;
 380
 381        err = -EINVAL;
 382        if (args->version >= program->nrvers)
 383                goto out_err;
 384        version = program->version[args->version];
 385        if (version == NULL)
 386                goto out_err;
 387
 388        err = -ENOMEM;
 389        clnt = kzalloc(sizeof(*clnt), GFP_KERNEL);
 390        if (!clnt)
 391                goto out_err;
 392        clnt->cl_parent = parent ? : clnt;
 393
 394        err = rpc_alloc_clid(clnt);
 395        if (err)
 396                goto out_no_clid;
 397
 398        clnt->cl_procinfo = version->procs;
 399        clnt->cl_maxproc  = version->nrprocs;
 400        clnt->cl_prog     = args->prognumber ? : program->number;
 401        clnt->cl_vers     = version->number;
 402        clnt->cl_stats    = program->stats;
 403        clnt->cl_metrics  = rpc_alloc_iostats(clnt);
 404        rpc_init_pipe_dir_head(&clnt->cl_pipedir_objects);
 405        err = -ENOMEM;
 406        if (clnt->cl_metrics == NULL)
 407                goto out_no_stats;
 408        clnt->cl_program  = program;
 409        INIT_LIST_HEAD(&clnt->cl_tasks);
 410        spin_lock_init(&clnt->cl_lock);
 411
 412        timeout = xprt->timeout;
 413        if (args->timeout != NULL) {
 414                memcpy(&clnt->cl_timeout_default, args->timeout,
 415                                sizeof(clnt->cl_timeout_default));
 416                timeout = &clnt->cl_timeout_default;
 417        }
 418
 419        rpc_clnt_set_transport(clnt, xprt, timeout);
 420        xprt_iter_init(&clnt->cl_xpi, xps);
 421        xprt_switch_put(xps);
 422
 423        clnt->cl_rtt = &clnt->cl_rtt_default;
 424        rpc_init_rtt(&clnt->cl_rtt_default, clnt->cl_timeout->to_initval);
 425
 426        atomic_set(&clnt->cl_count, 1);
 427
 428        if (nodename == NULL)
 429                nodename = utsname()->nodename;
 430        /* save the nodename */
 431        rpc_clnt_set_nodename(clnt, nodename);
 432
 433        err = rpc_client_register(clnt, args->authflavor, args->client_name);
 434        if (err)
 435                goto out_no_path;
 436        if (parent)
 437                atomic_inc(&parent->cl_count);
 438        return clnt;
 439
 440out_no_path:
 441        rpc_free_iostats(clnt->cl_metrics);
 442out_no_stats:
 443        rpc_free_clid(clnt);
 444out_no_clid:
 445        kfree(clnt);
 446out_err:
 447        rpciod_down();
 448out_no_rpciod:
 449        xprt_switch_put(xps);
 450        xprt_put(xprt);
 451        return ERR_PTR(err);
 452}
 453
 454static struct rpc_clnt *rpc_create_xprt(struct rpc_create_args *args,
 455                                        struct rpc_xprt *xprt)
 456{
 457        struct rpc_clnt *clnt = NULL;
 458        struct rpc_xprt_switch *xps;
 459
 460        if (args->bc_xprt && args->bc_xprt->xpt_bc_xps) {
 461                WARN_ON_ONCE(!(args->protocol & XPRT_TRANSPORT_BC));
 462                xps = args->bc_xprt->xpt_bc_xps;
 463                xprt_switch_get(xps);
 464        } else {
 465                xps = xprt_switch_alloc(xprt, GFP_KERNEL);
 466                if (xps == NULL) {
 467                        xprt_put(xprt);
 468                        return ERR_PTR(-ENOMEM);
 469                }
 470                if (xprt->bc_xprt) {
 471                        xprt_switch_get(xps);
 472                        xprt->bc_xprt->xpt_bc_xps = xps;
 473                }
 474        }
 475        clnt = rpc_new_client(args, xps, xprt, NULL);
 476        if (IS_ERR(clnt))
 477                return clnt;
 478
 479        if (!(args->flags & RPC_CLNT_CREATE_NOPING)) {
 480                int err = rpc_ping(clnt);
 481                if (err != 0) {
 482                        rpc_shutdown_client(clnt);
 483                        return ERR_PTR(err);
 484                }
 485        }
 486
 487        clnt->cl_softrtry = 1;
 488        if (args->flags & RPC_CLNT_CREATE_HARDRTRY)
 489                clnt->cl_softrtry = 0;
 490
 491        if (args->flags & RPC_CLNT_CREATE_AUTOBIND)
 492                clnt->cl_autobind = 1;
 493        if (args->flags & RPC_CLNT_CREATE_NO_RETRANS_TIMEOUT)
 494                clnt->cl_noretranstimeo = 1;
 495        if (args->flags & RPC_CLNT_CREATE_DISCRTRY)
 496                clnt->cl_discrtry = 1;
 497        if (!(args->flags & RPC_CLNT_CREATE_QUIET))
 498                clnt->cl_chatty = 1;
 499
 500        return clnt;
 501}
 502
 503/**
 504 * rpc_create - create an RPC client and transport with one call
 505 * @args: rpc_clnt create argument structure
 506 *
 507 * Creates and initializes an RPC transport and an RPC client.
 508 *
 509 * It can ping the server in order to determine if it is up, and to see if
 510 * it supports this program and version.  RPC_CLNT_CREATE_NOPING disables
 511 * this behavior so asynchronous tasks can also use rpc_create.
 512 */
 513struct rpc_clnt *rpc_create(struct rpc_create_args *args)
 514{
 515        struct rpc_xprt *xprt;
 516        struct xprt_create xprtargs = {
 517                .net = args->net,
 518                .ident = args->protocol,
 519                .srcaddr = args->saddress,
 520                .dstaddr = args->address,
 521                .addrlen = args->addrsize,
 522                .servername = args->servername,
 523                .bc_xprt = args->bc_xprt,
 524        };
 525        char servername[48];
 526
 527        if (args->bc_xprt) {
 528                WARN_ON_ONCE(!(args->protocol & XPRT_TRANSPORT_BC));
 529                xprt = args->bc_xprt->xpt_bc_xprt;
 530                if (xprt) {
 531                        xprt_get(xprt);
 532                        return rpc_create_xprt(args, xprt);
 533                }
 534        }
 535
 536        if (args->flags & RPC_CLNT_CREATE_INFINITE_SLOTS)
 537                xprtargs.flags |= XPRT_CREATE_INFINITE_SLOTS;
 538        if (args->flags & RPC_CLNT_CREATE_NO_IDLE_TIMEOUT)
 539                xprtargs.flags |= XPRT_CREATE_NO_IDLE_TIMEOUT;
 540        /*
 541         * If the caller chooses not to specify a hostname, whip
 542         * up a string representation of the passed-in address.
 543         */
 544        if (xprtargs.servername == NULL) {
 545                struct sockaddr_un *sun =
 546                                (struct sockaddr_un *)args->address;
 547                struct sockaddr_in *sin =
 548                                (struct sockaddr_in *)args->address;
 549                struct sockaddr_in6 *sin6 =
 550                                (struct sockaddr_in6 *)args->address;
 551
 552                servername[0] = '\0';
 553                switch (args->address->sa_family) {
 554                case AF_LOCAL:
 555                        snprintf(servername, sizeof(servername), "%s",
 556                                 sun->sun_path);
 557                        break;
 558                case AF_INET:
 559                        snprintf(servername, sizeof(servername), "%pI4",
 560                                 &sin->sin_addr.s_addr);
 561                        break;
 562                case AF_INET6:
 563                        snprintf(servername, sizeof(servername), "%pI6",
 564                                 &sin6->sin6_addr);
 565                        break;
 566                default:
 567                        /* caller wants default server name, but
 568                         * address family isn't recognized. */
 569                        return ERR_PTR(-EINVAL);
 570                }
 571                xprtargs.servername = servername;
 572        }
 573
 574        xprt = xprt_create_transport(&xprtargs);
 575        if (IS_ERR(xprt))
 576                return (struct rpc_clnt *)xprt;
 577
 578        /*
 579         * By default, kernel RPC client connects from a reserved port.
 580         * CAP_NET_BIND_SERVICE will not be set for unprivileged requesters,
 581         * but it is always enabled for rpciod, which handles the connect
 582         * operation.
 583         */
 584        xprt->resvport = 1;
 585        if (args->flags & RPC_CLNT_CREATE_NONPRIVPORT)
 586                xprt->resvport = 0;
 587
 588        return rpc_create_xprt(args, xprt);
 589}
 590EXPORT_SYMBOL_GPL(rpc_create);
 591
 592/*
 593 * This function clones the RPC client structure. It allows us to share the
 594 * same transport while varying parameters such as the authentication
 595 * flavour.
 596 */
 597static struct rpc_clnt *__rpc_clone_client(struct rpc_create_args *args,
 598                                           struct rpc_clnt *clnt)
 599{
 600        struct rpc_xprt_switch *xps;
 601        struct rpc_xprt *xprt;
 602        struct rpc_clnt *new;
 603        int err;
 604
 605        err = -ENOMEM;
 606        rcu_read_lock();
 607        xprt = xprt_get(rcu_dereference(clnt->cl_xprt));
 608        xps = xprt_switch_get(rcu_dereference(clnt->cl_xpi.xpi_xpswitch));
 609        rcu_read_unlock();
 610        if (xprt == NULL || xps == NULL) {
 611                xprt_put(xprt);
 612                xprt_switch_put(xps);
 613                goto out_err;
 614        }
 615        args->servername = xprt->servername;
 616        args->nodename = clnt->cl_nodename;
 617
 618        new = rpc_new_client(args, xps, xprt, clnt);
 619        if (IS_ERR(new)) {
 620                err = PTR_ERR(new);
 621                goto out_err;
 622        }
 623
 624        /* Turn off autobind on clones */
 625        new->cl_autobind = 0;
 626        new->cl_softrtry = clnt->cl_softrtry;
 627        new->cl_noretranstimeo = clnt->cl_noretranstimeo;
 628        new->cl_discrtry = clnt->cl_discrtry;
 629        new->cl_chatty = clnt->cl_chatty;
 630        return new;
 631
 632out_err:
 633        dprintk("RPC:       %s: returned error %d\n", __func__, err);
 634        return ERR_PTR(err);
 635}
 636
 637/**
 638 * rpc_clone_client - Clone an RPC client structure
 639 *
 640 * @clnt: RPC client whose parameters are copied
 641 *
 642 * Returns a fresh RPC client or an ERR_PTR.
 643 */
 644struct rpc_clnt *rpc_clone_client(struct rpc_clnt *clnt)
 645{
 646        struct rpc_create_args args = {
 647                .program        = clnt->cl_program,
 648                .prognumber     = clnt->cl_prog,
 649                .version        = clnt->cl_vers,
 650                .authflavor     = clnt->cl_auth->au_flavor,
 651        };
 652        return __rpc_clone_client(&args, clnt);
 653}
 654EXPORT_SYMBOL_GPL(rpc_clone_client);
 655
 656/**
 657 * rpc_clone_client_set_auth - Clone an RPC client structure and set its auth
 658 *
 659 * @clnt: RPC client whose parameters are copied
 660 * @flavor: security flavor for new client
 661 *
 662 * Returns a fresh RPC client or an ERR_PTR.
 663 */
 664struct rpc_clnt *
 665rpc_clone_client_set_auth(struct rpc_clnt *clnt, rpc_authflavor_t flavor)
 666{
 667        struct rpc_create_args args = {
 668                .program        = clnt->cl_program,
 669                .prognumber     = clnt->cl_prog,
 670                .version        = clnt->cl_vers,
 671                .authflavor     = flavor,
 672        };
 673        return __rpc_clone_client(&args, clnt);
 674}
 675EXPORT_SYMBOL_GPL(rpc_clone_client_set_auth);
 676
 677/**
 678 * rpc_switch_client_transport: switch the RPC transport on the fly
 679 * @clnt: pointer to a struct rpc_clnt
 680 * @args: pointer to the new transport arguments
 681 * @timeout: pointer to the new timeout parameters
 682 *
 683 * This function allows the caller to switch the RPC transport for the
 684 * rpc_clnt structure 'clnt' to allow it to connect to a mirrored NFS
 685 * server, for instance.  It assumes that the caller has ensured that
 686 * there are no active RPC tasks by using some form of locking.
 687 *
 688 * Returns zero if "clnt" is now using the new xprt.  Otherwise a
 689 * negative errno is returned, and "clnt" continues to use the old
 690 * xprt.
 691 */
 692int rpc_switch_client_transport(struct rpc_clnt *clnt,
 693                struct xprt_create *args,
 694                const struct rpc_timeout *timeout)
 695{
 696        const struct rpc_timeout *old_timeo;
 697        rpc_authflavor_t pseudoflavor;
 698        struct rpc_xprt_switch *xps, *oldxps;
 699        struct rpc_xprt *xprt, *old;
 700        struct rpc_clnt *parent;
 701        int err;
 702
 703        xprt = xprt_create_transport(args);
 704        if (IS_ERR(xprt)) {
 705                dprintk("RPC:       failed to create new xprt for clnt %p\n",
 706                        clnt);
 707                return PTR_ERR(xprt);
 708        }
 709
 710        xps = xprt_switch_alloc(xprt, GFP_KERNEL);
 711        if (xps == NULL) {
 712                xprt_put(xprt);
 713                return -ENOMEM;
 714        }
 715
 716        pseudoflavor = clnt->cl_auth->au_flavor;
 717
 718        old_timeo = clnt->cl_timeout;
 719        old = rpc_clnt_set_transport(clnt, xprt, timeout);
 720        oldxps = xprt_iter_xchg_switch(&clnt->cl_xpi, xps);
 721
 722        rpc_unregister_client(clnt);
 723        __rpc_clnt_remove_pipedir(clnt);
 724        rpc_clnt_debugfs_unregister(clnt);
 725
 726        /*
 727         * A new transport was created.  "clnt" therefore
 728         * becomes the root of a new cl_parent tree.  clnt's
 729         * children, if it has any, still point to the old xprt.
 730         */
 731        parent = clnt->cl_parent;
 732        clnt->cl_parent = clnt;
 733
 734        /*
 735         * The old rpc_auth cache cannot be re-used.  GSS
 736         * contexts in particular are between a single
 737         * client and server.
 738         */
 739        err = rpc_client_register(clnt, pseudoflavor, NULL);
 740        if (err)
 741                goto out_revert;
 742
 743        synchronize_rcu();
 744        if (parent != clnt)
 745                rpc_release_client(parent);
 746        xprt_switch_put(oldxps);
 747        xprt_put(old);
 748        dprintk("RPC:       replaced xprt for clnt %p\n", clnt);
 749        return 0;
 750
 751out_revert:
 752        xps = xprt_iter_xchg_switch(&clnt->cl_xpi, oldxps);
 753        rpc_clnt_set_transport(clnt, old, old_timeo);
 754        clnt->cl_parent = parent;
 755        rpc_client_register(clnt, pseudoflavor, NULL);
 756        xprt_switch_put(xps);
 757        xprt_put(xprt);
 758        dprintk("RPC:       failed to switch xprt for clnt %p\n", clnt);
 759        return err;
 760}
 761EXPORT_SYMBOL_GPL(rpc_switch_client_transport);
 762
 763static
 764int rpc_clnt_xprt_iter_init(struct rpc_clnt *clnt, struct rpc_xprt_iter *xpi)
 765{
 766        struct rpc_xprt_switch *xps;
 767
 768        rcu_read_lock();
 769        xps = xprt_switch_get(rcu_dereference(clnt->cl_xpi.xpi_xpswitch));
 770        rcu_read_unlock();
 771        if (xps == NULL)
 772                return -EAGAIN;
 773        xprt_iter_init_listall(xpi, xps);
 774        xprt_switch_put(xps);
 775        return 0;
 776}
 777
 778/**
 779 * rpc_clnt_iterate_for_each_xprt - Apply a function to all transports
 780 * @clnt: pointer to client
 781 * @fn: function to apply
 782 * @data: void pointer to function data
 783 *
 784 * Iterates through the list of RPC transports currently attached to the
 785 * client and applies the function fn(clnt, xprt, data).
 786 *
 787 * On error, the iteration stops, and the function returns the error value.
 788 */
 789int rpc_clnt_iterate_for_each_xprt(struct rpc_clnt *clnt,
 790                int (*fn)(struct rpc_clnt *, struct rpc_xprt *, void *),
 791                void *data)
 792{
 793        struct rpc_xprt_iter xpi;
 794        int ret;
 795
 796        ret = rpc_clnt_xprt_iter_init(clnt, &xpi);
 797        if (ret)
 798                return ret;
 799        for (;;) {
 800                struct rpc_xprt *xprt = xprt_iter_get_next(&xpi);
 801
 802                if (!xprt)
 803                        break;
 804                ret = fn(clnt, xprt, data);
 805                xprt_put(xprt);
 806                if (ret < 0)
 807                        break;
 808        }
 809        xprt_iter_destroy(&xpi);
 810        return ret;
 811}
 812EXPORT_SYMBOL_GPL(rpc_clnt_iterate_for_each_xprt);
 813
 814/*
 815 * Kill all tasks for the given client.
 816 * XXX: kill their descendants as well?
 817 */
 818void rpc_killall_tasks(struct rpc_clnt *clnt)
 819{
 820        struct rpc_task *rovr;
 821
 822
 823        if (list_empty(&clnt->cl_tasks))
 824                return;
 825        dprintk("RPC:       killing all tasks for client %p\n", clnt);
 826        /*
 827         * Spin lock all_tasks to prevent changes...
 828         */
 829        spin_lock(&clnt->cl_lock);
 830        list_for_each_entry(rovr, &clnt->cl_tasks, tk_task) {
 831                if (!RPC_IS_ACTIVATED(rovr))
 832                        continue;
 833                if (!(rovr->tk_flags & RPC_TASK_KILLED)) {
 834                        rovr->tk_flags |= RPC_TASK_KILLED;
 835                        rpc_exit(rovr, -EIO);
 836                        if (RPC_IS_QUEUED(rovr))
 837                                rpc_wake_up_queued_task(rovr->tk_waitqueue,
 838                                                        rovr);
 839                }
 840        }
 841        spin_unlock(&clnt->cl_lock);
 842}
 843EXPORT_SYMBOL_GPL(rpc_killall_tasks);
 844
 845/*
 846 * Properly shut down an RPC client, terminating all outstanding
 847 * requests.
 848 */
 849void rpc_shutdown_client(struct rpc_clnt *clnt)
 850{
 851        might_sleep();
 852
 853        dprintk_rcu("RPC:       shutting down %s client for %s\n",
 854                        clnt->cl_program->name,
 855                        rcu_dereference(clnt->cl_xprt)->servername);
 856
 857        while (!list_empty(&clnt->cl_tasks)) {
 858                rpc_killall_tasks(clnt);
 859                wait_event_timeout(destroy_wait,
 860                        list_empty(&clnt->cl_tasks), 1*HZ);
 861        }
 862
 863        rpc_release_client(clnt);
 864}
 865EXPORT_SYMBOL_GPL(rpc_shutdown_client);
 866
 867/*
 868 * Free an RPC client
 869 */
 870static struct rpc_clnt *
 871rpc_free_client(struct rpc_clnt *clnt)
 872{
 873        struct rpc_clnt *parent = NULL;
 874
 875        dprintk_rcu("RPC:       destroying %s client for %s\n",
 876                        clnt->cl_program->name,
 877                        rcu_dereference(clnt->cl_xprt)->servername);
 878        if (clnt->cl_parent != clnt)
 879                parent = clnt->cl_parent;
 880        rpc_clnt_debugfs_unregister(clnt);
 881        rpc_clnt_remove_pipedir(clnt);
 882        rpc_unregister_client(clnt);
 883        rpc_free_iostats(clnt->cl_metrics);
 884        clnt->cl_metrics = NULL;
 885        xprt_put(rcu_dereference_raw(clnt->cl_xprt));
 886        xprt_iter_destroy(&clnt->cl_xpi);
 887        rpciod_down();
 888        rpc_free_clid(clnt);
 889        kfree(clnt);
 890        return parent;
 891}
 892
 893/*
 894 * Free an RPC client
 895 */
 896static struct rpc_clnt * 
 897rpc_free_auth(struct rpc_clnt *clnt)
 898{
 899        if (clnt->cl_auth == NULL)
 900                return rpc_free_client(clnt);
 901
 902        /*
 903         * Note: RPCSEC_GSS may need to send NULL RPC calls in order to
 904         *       release remaining GSS contexts. This mechanism ensures
 905         *       that it can do so safely.
 906         */
 907        atomic_inc(&clnt->cl_count);
 908        rpcauth_release(clnt->cl_auth);
 909        clnt->cl_auth = NULL;
 910        if (atomic_dec_and_test(&clnt->cl_count))
 911                return rpc_free_client(clnt);
 912        return NULL;
 913}
 914
 915/*
 916 * Release reference to the RPC client
 917 */
 918void
 919rpc_release_client(struct rpc_clnt *clnt)
 920{
 921        dprintk("RPC:       rpc_release_client(%p)\n", clnt);
 922
 923        do {
 924                if (list_empty(&clnt->cl_tasks))
 925                        wake_up(&destroy_wait);
 926                if (!atomic_dec_and_test(&clnt->cl_count))
 927                        break;
 928                clnt = rpc_free_auth(clnt);
 929        } while (clnt != NULL);
 930}
 931EXPORT_SYMBOL_GPL(rpc_release_client);
 932
 933/**
 934 * rpc_bind_new_program - bind a new RPC program to an existing client
 935 * @old: old rpc_client
 936 * @program: rpc program to set
 937 * @vers: rpc program version
 938 *
 939 * Clones the rpc client and sets up a new RPC program. This is mainly
 940 * of use for enabling different RPC programs to share the same transport.
 941 * The Sun NFSv2/v3 ACL protocol can do this.
 942 */
 943struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *old,
 944                                      const struct rpc_program *program,
 945                                      u32 vers)
 946{
 947        struct rpc_create_args args = {
 948                .program        = program,
 949                .prognumber     = program->number,
 950                .version        = vers,
 951                .authflavor     = old->cl_auth->au_flavor,
 952        };
 953        struct rpc_clnt *clnt;
 954        int err;
 955
 956        clnt = __rpc_clone_client(&args, old);
 957        if (IS_ERR(clnt))
 958                goto out;
 959        err = rpc_ping(clnt);
 960        if (err != 0) {
 961                rpc_shutdown_client(clnt);
 962                clnt = ERR_PTR(err);
 963        }
 964out:
 965        return clnt;
 966}
 967EXPORT_SYMBOL_GPL(rpc_bind_new_program);
 968
 969void rpc_task_release_client(struct rpc_task *task)
 970{
 971        struct rpc_clnt *clnt = task->tk_client;
 972        struct rpc_xprt *xprt = task->tk_xprt;
 973
 974        if (clnt != NULL) {
 975                /* Remove from client task list */
 976                spin_lock(&clnt->cl_lock);
 977                list_del(&task->tk_task);
 978                spin_unlock(&clnt->cl_lock);
 979                task->tk_client = NULL;
 980
 981                rpc_release_client(clnt);
 982        }
 983
 984        if (xprt != NULL) {
 985                task->tk_xprt = NULL;
 986
 987                xprt_put(xprt);
 988        }
 989}
 990
 991static
 992void rpc_task_set_client(struct rpc_task *task, struct rpc_clnt *clnt)
 993{
 994
 995        if (clnt != NULL) {
 996                if (task->tk_xprt == NULL)
 997                        task->tk_xprt = xprt_iter_get_next(&clnt->cl_xpi);
 998                task->tk_client = clnt;
 999                atomic_inc(&clnt->cl_count);
1000                if (clnt->cl_softrtry)
1001                        task->tk_flags |= RPC_TASK_SOFT;
1002                if (clnt->cl_noretranstimeo)
1003                        task->tk_flags |= RPC_TASK_NO_RETRANS_TIMEOUT;
1004                if (atomic_read(&clnt->cl_swapper))
1005                        task->tk_flags |= RPC_TASK_SWAPPER;
1006                /* Add to the client's list of all tasks */
1007                spin_lock(&clnt->cl_lock);
1008                list_add_tail(&task->tk_task, &clnt->cl_tasks);
1009                spin_unlock(&clnt->cl_lock);
1010        }
1011}
1012
1013static void
1014rpc_task_set_rpc_message(struct rpc_task *task, const struct rpc_message *msg)
1015{
1016        if (msg != NULL) {
1017                task->tk_msg.rpc_proc = msg->rpc_proc;
1018                task->tk_msg.rpc_argp = msg->rpc_argp;
1019                task->tk_msg.rpc_resp = msg->rpc_resp;
1020                if (msg->rpc_cred != NULL)
1021                        task->tk_msg.rpc_cred = get_rpccred(msg->rpc_cred);
1022        }
1023}
1024
1025/*
1026 * Default callback for async RPC calls
1027 */
1028static void
1029rpc_default_callback(struct rpc_task *task, void *data)
1030{
1031}
1032
1033static const struct rpc_call_ops rpc_default_ops = {
1034        .rpc_call_done = rpc_default_callback,
1035};
1036
1037/**
1038 * rpc_run_task - Allocate a new RPC task, then run rpc_execute against it
1039 * @task_setup_data: pointer to task initialisation data
1040 */
1041struct rpc_task *rpc_run_task(const struct rpc_task_setup *task_setup_data)
1042{
1043        struct rpc_task *task;
1044
1045        task = rpc_new_task(task_setup_data);
1046        if (IS_ERR(task))
1047                goto out;
1048
1049        rpc_task_set_client(task, task_setup_data->rpc_client);
1050        rpc_task_set_rpc_message(task, task_setup_data->rpc_message);
1051
1052        if (task->tk_action == NULL)
1053                rpc_call_start(task);
1054
1055        atomic_inc(&task->tk_count);
1056        rpc_execute(task);
1057out:
1058        return task;
1059}
1060EXPORT_SYMBOL_GPL(rpc_run_task);
1061
1062/**
1063 * rpc_call_sync - Perform a synchronous RPC call
1064 * @clnt: pointer to RPC client
1065 * @msg: RPC call parameters
1066 * @flags: RPC call flags
1067 */
1068int rpc_call_sync(struct rpc_clnt *clnt, const struct rpc_message *msg, int flags)
1069{
1070        struct rpc_task *task;
1071        struct rpc_task_setup task_setup_data = {
1072                .rpc_client = clnt,
1073                .rpc_message = msg,
1074                .callback_ops = &rpc_default_ops,
1075                .flags = flags,
1076        };
1077        int status;
1078
1079        WARN_ON_ONCE(flags & RPC_TASK_ASYNC);
1080        if (flags & RPC_TASK_ASYNC) {
1081                rpc_release_calldata(task_setup_data.callback_ops,
1082                        task_setup_data.callback_data);
1083                return -EINVAL;
1084        }
1085
1086        task = rpc_run_task(&task_setup_data);
1087        if (IS_ERR(task))
1088                return PTR_ERR(task);
1089        status = task->tk_status;
1090        rpc_put_task(task);
1091        return status;
1092}
1093EXPORT_SYMBOL_GPL(rpc_call_sync);
1094
1095/**
1096 * rpc_call_async - Perform an asynchronous RPC call
1097 * @clnt: pointer to RPC client
1098 * @msg: RPC call parameters
1099 * @flags: RPC call flags
1100 * @tk_ops: RPC call ops
1101 * @data: user call data
1102 */
1103int
1104rpc_call_async(struct rpc_clnt *clnt, const struct rpc_message *msg, int flags,
1105               const struct rpc_call_ops *tk_ops, void *data)
1106{
1107        struct rpc_task *task;
1108        struct rpc_task_setup task_setup_data = {
1109                .rpc_client = clnt,
1110                .rpc_message = msg,
1111                .callback_ops = tk_ops,
1112                .callback_data = data,
1113                .flags = flags|RPC_TASK_ASYNC,
1114        };
1115
1116        task = rpc_run_task(&task_setup_data);
1117        if (IS_ERR(task))
1118                return PTR_ERR(task);
1119        rpc_put_task(task);
1120        return 0;
1121}
1122EXPORT_SYMBOL_GPL(rpc_call_async);
1123
1124#if defined(CONFIG_SUNRPC_BACKCHANNEL)
1125/**
1126 * rpc_run_bc_task - Allocate a new RPC task for backchannel use, then run
1127 * rpc_execute against it
1128 * @req: RPC request
1129 */
1130struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req)
1131{
1132        struct rpc_task *task;
1133        struct xdr_buf *xbufp = &req->rq_snd_buf;
1134        struct rpc_task_setup task_setup_data = {
1135                .callback_ops = &rpc_default_ops,
1136                .flags = RPC_TASK_SOFTCONN,
1137        };
1138
1139        dprintk("RPC: rpc_run_bc_task req= %p\n", req);
1140        /*
1141         * Create an rpc_task to send the data
1142         */
1143        task = rpc_new_task(&task_setup_data);
1144        if (IS_ERR(task)) {
1145                xprt_free_bc_request(req);
1146                goto out;
1147        }
1148        task->tk_rqstp = req;
1149
1150        /*
1151         * Set up the xdr_buf length.
1152         * This also indicates that the buffer is XDR encoded already.
1153         */
1154        xbufp->len = xbufp->head[0].iov_len + xbufp->page_len +
1155                        xbufp->tail[0].iov_len;
1156
1157        task->tk_action = call_bc_transmit;
1158        atomic_inc(&task->tk_count);
1159        WARN_ON_ONCE(atomic_read(&task->tk_count) != 2);
1160        rpc_execute(task);
1161
1162out:
1163        dprintk("RPC: rpc_run_bc_task: task= %p\n", task);
1164        return task;
1165}
1166#endif /* CONFIG_SUNRPC_BACKCHANNEL */
1167
1168void
1169rpc_call_start(struct rpc_task *task)
1170{
1171        task->tk_action = call_start;
1172}
1173EXPORT_SYMBOL_GPL(rpc_call_start);
1174
1175/**
1176 * rpc_peeraddr - extract remote peer address from clnt's xprt
1177 * @clnt: RPC client structure
1178 * @buf: target buffer
1179 * @bufsize: length of target buffer
1180 *
1181 * Returns the number of bytes that are actually in the stored address.
1182 */
1183size_t rpc_peeraddr(struct rpc_clnt *clnt, struct sockaddr *buf, size_t bufsize)
1184{
1185        size_t bytes;
1186        struct rpc_xprt *xprt;
1187
1188        rcu_read_lock();
1189        xprt = rcu_dereference(clnt->cl_xprt);
1190
1191        bytes = xprt->addrlen;
1192        if (bytes > bufsize)
1193                bytes = bufsize;
1194        memcpy(buf, &xprt->addr, bytes);
1195        rcu_read_unlock();
1196
1197        return bytes;
1198}
1199EXPORT_SYMBOL_GPL(rpc_peeraddr);
1200
1201/**
1202 * rpc_peeraddr2str - return remote peer address in printable format
1203 * @clnt: RPC client structure
1204 * @format: address format
1205 *
1206 * NB: the lifetime of the memory referenced by the returned pointer is
1207 * the same as the rpc_xprt itself.  As long as the caller uses this
1208 * pointer, it must hold the RCU read lock.
1209 */
1210const char *rpc_peeraddr2str(struct rpc_clnt *clnt,
1211                             enum rpc_display_format_t format)
1212{
1213        struct rpc_xprt *xprt;
1214
1215        xprt = rcu_dereference(clnt->cl_xprt);
1216
1217        if (xprt->address_strings[format] != NULL)
1218                return xprt->address_strings[format];
1219        else
1220                return "unprintable";
1221}
1222EXPORT_SYMBOL_GPL(rpc_peeraddr2str);
1223
1224static const struct sockaddr_in rpc_inaddr_loopback = {
1225        .sin_family             = AF_INET,
1226        .sin_addr.s_addr        = htonl(INADDR_ANY),
1227};
1228
1229static const struct sockaddr_in6 rpc_in6addr_loopback = {
1230        .sin6_family            = AF_INET6,
1231        .sin6_addr              = IN6ADDR_ANY_INIT,
1232};
1233
1234/*
1235 * Try a getsockname() on a connected datagram socket.  Using a
1236 * connected datagram socket prevents leaving a socket in TIME_WAIT.
1237 * This conserves the ephemeral port number space.
1238 *
1239 * Returns zero and fills in "buf" if successful; otherwise, a
1240 * negative errno is returned.
1241 */
1242static int rpc_sockname(struct net *net, struct sockaddr *sap, size_t salen,
1243                        struct sockaddr *buf, int buflen)
1244{
1245        struct socket *sock;
1246        int err;
1247
1248        err = __sock_create(net, sap->sa_family,
1249                                SOCK_DGRAM, IPPROTO_UDP, &sock, 1);
1250        if (err < 0) {
1251                dprintk("RPC:       can't create UDP socket (%d)\n", err);
1252                goto out;
1253        }
1254
1255        switch (sap->sa_family) {
1256        case AF_INET:
1257                err = kernel_bind(sock,
1258                                (struct sockaddr *)&rpc_inaddr_loopback,
1259                                sizeof(rpc_inaddr_loopback));
1260                break;
1261        case AF_INET6:
1262                err = kernel_bind(sock,
1263                                (struct sockaddr *)&rpc_in6addr_loopback,
1264                                sizeof(rpc_in6addr_loopback));
1265                break;
1266        default:
1267                err = -EAFNOSUPPORT;
1268                goto out;
1269        }
1270        if (err < 0) {
1271                dprintk("RPC:       can't bind UDP socket (%d)\n", err);
1272                goto out_release;
1273        }
1274
1275        err = kernel_connect(sock, sap, salen, 0);
1276        if (err < 0) {
1277                dprintk("RPC:       can't connect UDP socket (%d)\n", err);
1278                goto out_release;
1279        }
1280
1281        err = kernel_getsockname(sock, buf, &buflen);
1282        if (err < 0) {
1283                dprintk("RPC:       getsockname failed (%d)\n", err);
1284                goto out_release;
1285        }
1286
1287        err = 0;
1288        if (buf->sa_family == AF_INET6) {
1289                struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)buf;
1290                sin6->sin6_scope_id = 0;
1291        }
1292        dprintk("RPC:       %s succeeded\n", __func__);
1293
1294out_release:
1295        sock_release(sock);
1296out:
1297        return err;
1298}
1299
1300/*
1301 * Scraping a connected socket failed, so we don't have a useable
1302 * local address.  Fallback: generate an address that will prevent
1303 * the server from calling us back.
1304 *
1305 * Returns zero and fills in "buf" if successful; otherwise, a
1306 * negative errno is returned.
1307 */
1308static int rpc_anyaddr(int family, struct sockaddr *buf, size_t buflen)
1309{
1310        switch (family) {
1311        case AF_INET:
1312                if (buflen < sizeof(rpc_inaddr_loopback))
1313                        return -EINVAL;
1314                memcpy(buf, &rpc_inaddr_loopback,
1315                                sizeof(rpc_inaddr_loopback));
1316                break;
1317        case AF_INET6:
1318                if (buflen < sizeof(rpc_in6addr_loopback))
1319                        return -EINVAL;
1320                memcpy(buf, &rpc_in6addr_loopback,
1321                                sizeof(rpc_in6addr_loopback));
1322                break;
1323        default:
1324                dprintk("RPC:       %s: address family not supported\n",
1325                        __func__);
1326                return -EAFNOSUPPORT;
1327        }
1328        dprintk("RPC:       %s: succeeded\n", __func__);
1329        return 0;
1330}
1331
1332/**
1333 * rpc_localaddr - discover local endpoint address for an RPC client
1334 * @clnt: RPC client structure
1335 * @buf: target buffer
1336 * @buflen: size of target buffer, in bytes
1337 *
1338 * Returns zero and fills in "buf" and "buflen" if successful;
1339 * otherwise, a negative errno is returned.
1340 *
1341 * This works even if the underlying transport is not currently connected,
1342 * or if the upper layer never previously provided a source address.
1343 *
1344 * The result of this function call is transient: multiple calls in
1345 * succession may give different results, depending on how local
1346 * networking configuration changes over time.
1347 */
1348int rpc_localaddr(struct rpc_clnt *clnt, struct sockaddr *buf, size_t buflen)
1349{
1350        struct sockaddr_storage address;
1351        struct sockaddr *sap = (struct sockaddr *)&address;
1352        struct rpc_xprt *xprt;
1353        struct net *net;
1354        size_t salen;
1355        int err;
1356
1357        rcu_read_lock();
1358        xprt = rcu_dereference(clnt->cl_xprt);
1359        salen = xprt->addrlen;
1360        memcpy(sap, &xprt->addr, salen);
1361        net = get_net(xprt->xprt_net);
1362        rcu_read_unlock();
1363
1364        rpc_set_port(sap, 0);
1365        err = rpc_sockname(net, sap, salen, buf, buflen);
1366        put_net(net);
1367        if (err != 0)
1368                /* Couldn't discover local address, return ANYADDR */
1369                return rpc_anyaddr(sap->sa_family, buf, buflen);
1370        return 0;
1371}
1372EXPORT_SYMBOL_GPL(rpc_localaddr);
1373
1374void
1375rpc_setbufsize(struct rpc_clnt *clnt, unsigned int sndsize, unsigned int rcvsize)
1376{
1377        struct rpc_xprt *xprt;
1378
1379        rcu_read_lock();
1380        xprt = rcu_dereference(clnt->cl_xprt);
1381        if (xprt->ops->set_buffer_size)
1382                xprt->ops->set_buffer_size(xprt, sndsize, rcvsize);
1383        rcu_read_unlock();
1384}
1385EXPORT_SYMBOL_GPL(rpc_setbufsize);
1386
1387/**
1388 * rpc_protocol - Get transport protocol number for an RPC client
1389 * @clnt: RPC client to query
1390 *
1391 */
1392int rpc_protocol(struct rpc_clnt *clnt)
1393{
1394        int protocol;
1395
1396        rcu_read_lock();
1397        protocol = rcu_dereference(clnt->cl_xprt)->prot;
1398        rcu_read_unlock();
1399        return protocol;
1400}
1401EXPORT_SYMBOL_GPL(rpc_protocol);
1402
1403/**
1404 * rpc_net_ns - Get the network namespace for this RPC client
1405 * @clnt: RPC client to query
1406 *
1407 */
1408struct net *rpc_net_ns(struct rpc_clnt *clnt)
1409{
1410        struct net *ret;
1411
1412        rcu_read_lock();
1413        ret = rcu_dereference(clnt->cl_xprt)->xprt_net;
1414        rcu_read_unlock();
1415        return ret;
1416}
1417EXPORT_SYMBOL_GPL(rpc_net_ns);
1418
1419/**
1420 * rpc_max_payload - Get maximum payload size for a transport, in bytes
1421 * @clnt: RPC client to query
1422 *
1423 * For stream transports, this is one RPC record fragment (see RFC
1424 * 1831), as we don't support multi-record requests yet.  For datagram
1425 * transports, this is the size of an IP packet minus the IP, UDP, and
1426 * RPC header sizes.
1427 */
1428size_t rpc_max_payload(struct rpc_clnt *clnt)
1429{
1430        size_t ret;
1431
1432        rcu_read_lock();
1433        ret = rcu_dereference(clnt->cl_xprt)->max_payload;
1434        rcu_read_unlock();
1435        return ret;
1436}
1437EXPORT_SYMBOL_GPL(rpc_max_payload);
1438
1439/**
1440 * rpc_max_bc_payload - Get maximum backchannel payload size, in bytes
1441 * @clnt: RPC client to query
1442 */
1443size_t rpc_max_bc_payload(struct rpc_clnt *clnt)
1444{
1445        struct rpc_xprt *xprt;
1446        size_t ret;
1447
1448        rcu_read_lock();
1449        xprt = rcu_dereference(clnt->cl_xprt);
1450        ret = xprt->ops->bc_maxpayload(xprt);
1451        rcu_read_unlock();
1452        return ret;
1453}
1454EXPORT_SYMBOL_GPL(rpc_max_bc_payload);
1455
1456/**
1457 * rpc_get_timeout - Get timeout for transport in units of HZ
1458 * @clnt: RPC client to query
1459 */
1460unsigned long rpc_get_timeout(struct rpc_clnt *clnt)
1461{
1462        unsigned long ret;
1463
1464        rcu_read_lock();
1465        ret = rcu_dereference(clnt->cl_xprt)->timeout->to_initval;
1466        rcu_read_unlock();
1467        return ret;
1468}
1469EXPORT_SYMBOL_GPL(rpc_get_timeout);
1470
1471/**
1472 * rpc_force_rebind - force transport to check that remote port is unchanged
1473 * @clnt: client to rebind
1474 *
1475 */
1476void rpc_force_rebind(struct rpc_clnt *clnt)
1477{
1478        if (clnt->cl_autobind) {
1479                rcu_read_lock();
1480                xprt_clear_bound(rcu_dereference(clnt->cl_xprt));
1481                rcu_read_unlock();
1482        }
1483}
1484EXPORT_SYMBOL_GPL(rpc_force_rebind);
1485
1486/*
1487 * Restart an (async) RPC call from the call_prepare state.
1488 * Usually called from within the exit handler.
1489 */
1490int
1491rpc_restart_call_prepare(struct rpc_task *task)
1492{
1493        if (RPC_ASSASSINATED(task))
1494                return 0;
1495        task->tk_action = call_start;
1496        task->tk_status = 0;
1497        if (task->tk_ops->rpc_call_prepare != NULL)
1498                task->tk_action = rpc_prepare_task;
1499        return 1;
1500}
1501EXPORT_SYMBOL_GPL(rpc_restart_call_prepare);
1502
1503/*
1504 * Restart an (async) RPC call. Usually called from within the
1505 * exit handler.
1506 */
1507int
1508rpc_restart_call(struct rpc_task *task)
1509{
1510        if (RPC_ASSASSINATED(task))
1511                return 0;
1512        task->tk_action = call_start;
1513        task->tk_status = 0;
1514        return 1;
1515}
1516EXPORT_SYMBOL_GPL(rpc_restart_call);
1517
1518#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
1519const char
1520*rpc_proc_name(const struct rpc_task *task)
1521{
1522        const struct rpc_procinfo *proc = task->tk_msg.rpc_proc;
1523
1524        if (proc) {
1525                if (proc->p_name)
1526                        return proc->p_name;
1527                else
1528                        return "NULL";
1529        } else
1530                return "no proc";
1531}
1532#endif
1533
1534/*
1535 * 0.  Initial state
1536 *
1537 *     Other FSM states can be visited zero or more times, but
1538 *     this state is visited exactly once for each RPC.
1539 */
1540static void
1541call_start(struct rpc_task *task)
1542{
1543        struct rpc_clnt *clnt = task->tk_client;
1544
1545        dprintk("RPC: %5u call_start %s%d proc %s (%s)\n", task->tk_pid,
1546                        clnt->cl_program->name, clnt->cl_vers,
1547                        rpc_proc_name(task),
1548                        (RPC_IS_ASYNC(task) ? "async" : "sync"));
1549
1550        /* Increment call count */
1551        task->tk_msg.rpc_proc->p_count++;
1552        clnt->cl_stats->rpccnt++;
1553        task->tk_action = call_reserve;
1554}
1555
1556/*
1557 * 1.   Reserve an RPC call slot
1558 */
1559static void
1560call_reserve(struct rpc_task *task)
1561{
1562        dprint_status(task);
1563
1564        task->tk_status  = 0;
1565        task->tk_action  = call_reserveresult;
1566        xprt_reserve(task);
1567}
1568
1569static void call_retry_reserve(struct rpc_task *task);
1570
1571/*
1572 * 1b.  Grok the result of xprt_reserve()
1573 */
1574static void
1575call_reserveresult(struct rpc_task *task)
1576{
1577        int status = task->tk_status;
1578
1579        dprint_status(task);
1580
1581        /*
1582         * After a call to xprt_reserve(), we must have either
1583         * a request slot or else an error status.
1584         */
1585        task->tk_status = 0;
1586        if (status >= 0) {
1587                if (task->tk_rqstp) {
1588                        task->tk_action = call_refresh;
1589                        return;
1590                }
1591
1592                printk(KERN_ERR "%s: status=%d, but no request slot, exiting\n",
1593                                __func__, status);
1594                rpc_exit(task, -EIO);
1595                return;
1596        }
1597
1598        /*
1599         * Even though there was an error, we may have acquired
1600         * a request slot somehow.  Make sure not to leak it.
1601         */
1602        if (task->tk_rqstp) {
1603                printk(KERN_ERR "%s: status=%d, request allocated anyway\n",
1604                                __func__, status);
1605                xprt_release(task);
1606        }
1607
1608        switch (status) {
1609        case -ENOMEM:
1610                rpc_delay(task, HZ >> 2);
1611        case -EAGAIN:   /* woken up; retry */
1612                task->tk_action = call_retry_reserve;
1613                return;
1614        case -EIO:      /* probably a shutdown */
1615                break;
1616        default:
1617                printk(KERN_ERR "%s: unrecognized error %d, exiting\n",
1618                                __func__, status);
1619                break;
1620        }
1621        rpc_exit(task, status);
1622}
1623
1624/*
1625 * 1c.  Retry reserving an RPC call slot
1626 */
1627static void
1628call_retry_reserve(struct rpc_task *task)
1629{
1630        dprint_status(task);
1631
1632        task->tk_status  = 0;
1633        task->tk_action  = call_reserveresult;
1634        xprt_retry_reserve(task);
1635}
1636
1637/*
1638 * 2.   Bind and/or refresh the credentials
1639 */
1640static void
1641call_refresh(struct rpc_task *task)
1642{
1643        dprint_status(task);
1644
1645        task->tk_action = call_refreshresult;
1646        task->tk_status = 0;
1647        task->tk_client->cl_stats->rpcauthrefresh++;
1648        rpcauth_refreshcred(task);
1649}
1650
1651/*
1652 * 2a.  Process the results of a credential refresh
1653 */
1654static void
1655call_refreshresult(struct rpc_task *task)
1656{
1657        int status = task->tk_status;
1658
1659        dprint_status(task);
1660
1661        task->tk_status = 0;
1662        task->tk_action = call_refresh;
1663        switch (status) {
1664        case 0:
1665                if (rpcauth_uptodatecred(task)) {
1666                        task->tk_action = call_allocate;
1667                        return;
1668                }
1669                /* Use rate-limiting and a max number of retries if refresh
1670                 * had status 0 but failed to update the cred.
1671                 */
1672        case -ETIMEDOUT:
1673                rpc_delay(task, 3*HZ);
1674        case -EAGAIN:
1675                status = -EACCES;
1676        case -EKEYEXPIRED:
1677                if (!task->tk_cred_retry)
1678                        break;
1679                task->tk_cred_retry--;
1680                dprintk("RPC: %5u %s: retry refresh creds\n",
1681                                task->tk_pid, __func__);
1682                return;
1683        }
1684        dprintk("RPC: %5u %s: refresh creds failed with error %d\n",
1685                                task->tk_pid, __func__, status);
1686        rpc_exit(task, status);
1687}
1688
1689/*
1690 * 2b.  Allocate the buffer. For details, see sched.c:rpc_malloc.
1691 *      (Note: buffer memory is freed in xprt_release).
1692 */
1693static void
1694call_allocate(struct rpc_task *task)
1695{
1696        unsigned int slack = task->tk_rqstp->rq_cred->cr_auth->au_cslack;
1697        struct rpc_rqst *req = task->tk_rqstp;
1698        struct rpc_xprt *xprt = req->rq_xprt;
1699        struct rpc_procinfo *proc = task->tk_msg.rpc_proc;
1700        int status;
1701
1702        dprint_status(task);
1703
1704        task->tk_status = 0;
1705        task->tk_action = call_bind;
1706
1707        if (req->rq_buffer)
1708                return;
1709
1710        if (proc->p_proc != 0) {
1711                BUG_ON(proc->p_arglen == 0);
1712                if (proc->p_decode != NULL)
1713                        BUG_ON(proc->p_replen == 0);
1714        }
1715
1716        /*
1717         * Calculate the size (in quads) of the RPC call
1718         * and reply headers, and convert both values
1719         * to byte sizes.
1720         */
1721        req->rq_callsize = RPC_CALLHDRSIZE + (slack << 1) + proc->p_arglen;
1722        req->rq_callsize <<= 2;
1723        req->rq_rcvsize = RPC_REPHDRSIZE + slack + proc->p_replen;
1724        req->rq_rcvsize <<= 2;
1725
1726        status = xprt->ops->buf_alloc(task);
1727        xprt_inject_disconnect(xprt);
1728        if (status == 0)
1729                return;
1730        if (status != -ENOMEM) {
1731                rpc_exit(task, status);
1732                return;
1733        }
1734
1735        dprintk("RPC: %5u rpc_buffer allocation failed\n", task->tk_pid);
1736
1737        if (RPC_IS_ASYNC(task) || !fatal_signal_pending(current)) {
1738                task->tk_action = call_allocate;
1739                rpc_delay(task, HZ>>4);
1740                return;
1741        }
1742
1743        rpc_exit(task, -ERESTARTSYS);
1744}
1745
1746static inline int
1747rpc_task_need_encode(struct rpc_task *task)
1748{
1749        return task->tk_rqstp->rq_snd_buf.len == 0;
1750}
1751
1752static inline void
1753rpc_task_force_reencode(struct rpc_task *task)
1754{
1755        task->tk_rqstp->rq_snd_buf.len = 0;
1756        task->tk_rqstp->rq_bytes_sent = 0;
1757}
1758
1759/*
1760 * 3.   Encode arguments of an RPC call
1761 */
1762static void
1763rpc_xdr_encode(struct rpc_task *task)
1764{
1765        struct rpc_rqst *req = task->tk_rqstp;
1766        kxdreproc_t     encode;
1767        __be32          *p;
1768
1769        dprint_status(task);
1770
1771        xdr_buf_init(&req->rq_snd_buf,
1772                     req->rq_buffer,
1773                     req->rq_callsize);
1774        xdr_buf_init(&req->rq_rcv_buf,
1775                     req->rq_rbuffer,
1776                     req->rq_rcvsize);
1777
1778        p = rpc_encode_header(task);
1779        if (p == NULL) {
1780                printk(KERN_INFO "RPC: couldn't encode RPC header, exit EIO\n");
1781                rpc_exit(task, -EIO);
1782                return;
1783        }
1784
1785        encode = task->tk_msg.rpc_proc->p_encode;
1786        if (encode == NULL)
1787                return;
1788
1789        task->tk_status = rpcauth_wrap_req(task, encode, req, p,
1790                        task->tk_msg.rpc_argp);
1791}
1792
1793/*
1794 * 4.   Get the server port number if not yet set
1795 */
1796static void
1797call_bind(struct rpc_task *task)
1798{
1799        struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt;
1800
1801        dprint_status(task);
1802
1803        task->tk_action = call_connect;
1804        if (!xprt_bound(xprt)) {
1805                task->tk_action = call_bind_status;
1806                task->tk_timeout = xprt->bind_timeout;
1807                xprt->ops->rpcbind(task);
1808        }
1809}
1810
1811/*
1812 * 4a.  Sort out bind result
1813 */
1814static void
1815call_bind_status(struct rpc_task *task)
1816{
1817        int status = -EIO;
1818
1819        if (task->tk_status >= 0) {
1820                dprint_status(task);
1821                task->tk_status = 0;
1822                task->tk_action = call_connect;
1823                return;
1824        }
1825
1826        trace_rpc_bind_status(task);
1827        switch (task->tk_status) {
1828        case -ENOMEM:
1829                dprintk("RPC: %5u rpcbind out of memory\n", task->tk_pid);
1830                rpc_delay(task, HZ >> 2);
1831                goto retry_timeout;
1832        case -EACCES:
1833                dprintk("RPC: %5u remote rpcbind: RPC program/version "
1834                                "unavailable\n", task->tk_pid);
1835                /* fail immediately if this is an RPC ping */
1836                if (task->tk_msg.rpc_proc->p_proc == 0) {
1837                        status = -EOPNOTSUPP;
1838                        break;
1839                }
1840                if (task->tk_rebind_retry == 0)
1841                        break;
1842                task->tk_rebind_retry--;
1843                rpc_delay(task, 3*HZ);
1844                goto retry_timeout;
1845        case -ETIMEDOUT:
1846                dprintk("RPC: %5u rpcbind request timed out\n",
1847                                task->tk_pid);
1848                goto retry_timeout;
1849        case -EPFNOSUPPORT:
1850                /* server doesn't support any rpcbind version we know of */
1851                dprintk("RPC: %5u unrecognized remote rpcbind service\n",
1852                                task->tk_pid);
1853                break;
1854        case -EPROTONOSUPPORT:
1855                dprintk("RPC: %5u remote rpcbind version unavailable, retrying\n",
1856                                task->tk_pid);
1857                goto retry_timeout;
1858        case -ECONNREFUSED:             /* connection problems */
1859        case -ECONNRESET:
1860        case -ECONNABORTED:
1861        case -ENOTCONN:
1862        case -EHOSTDOWN:
1863        case -EHOSTUNREACH:
1864        case -ENETUNREACH:
1865        case -ENOBUFS:
1866        case -EPIPE:
1867                dprintk("RPC: %5u remote rpcbind unreachable: %d\n",
1868                                task->tk_pid, task->tk_status);
1869                if (!RPC_IS_SOFTCONN(task)) {
1870                        rpc_delay(task, 5*HZ);
1871                        goto retry_timeout;
1872                }
1873                status = task->tk_status;
1874                break;
1875        default:
1876                dprintk("RPC: %5u unrecognized rpcbind error (%d)\n",
1877                                task->tk_pid, -task->tk_status);
1878        }
1879
1880        rpc_exit(task, status);
1881        return;
1882
1883retry_timeout:
1884        task->tk_status = 0;
1885        task->tk_action = call_timeout;
1886}
1887
1888/*
1889 * 4b.  Connect to the RPC server
1890 */
1891static void
1892call_connect(struct rpc_task *task)
1893{
1894        struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt;
1895
1896        dprintk("RPC: %5u call_connect xprt %p %s connected\n",
1897                        task->tk_pid, xprt,
1898                        (xprt_connected(xprt) ? "is" : "is not"));
1899
1900        task->tk_action = call_transmit;
1901        if (!xprt_connected(xprt)) {
1902                task->tk_action = call_connect_status;
1903                if (task->tk_status < 0)
1904                        return;
1905                if (task->tk_flags & RPC_TASK_NOCONNECT) {
1906                        rpc_exit(task, -ENOTCONN);
1907                        return;
1908                }
1909                xprt_connect(task);
1910        }
1911}
1912
1913/*
1914 * 4c.  Sort out connect result
1915 */
1916static void
1917call_connect_status(struct rpc_task *task)
1918{
1919        struct rpc_clnt *clnt = task->tk_client;
1920        int status = task->tk_status;
1921
1922        dprint_status(task);
1923
1924        trace_rpc_connect_status(task, status);
1925        task->tk_status = 0;
1926        switch (status) {
1927        case -ECONNREFUSED:
1928        case -ECONNRESET:
1929        case -ECONNABORTED:
1930        case -ENETUNREACH:
1931        case -EHOSTUNREACH:
1932        case -EADDRINUSE:
1933        case -ENOBUFS:
1934        case -EPIPE:
1935                xprt_conditional_disconnect(task->tk_rqstp->rq_xprt,
1936                                            task->tk_rqstp->rq_connect_cookie);
1937                if (RPC_IS_SOFTCONN(task))
1938                        break;
1939                /* retry with existing socket, after a delay */
1940                rpc_delay(task, 3*HZ);
1941        case -EAGAIN:
1942                /* Check for timeouts before looping back to call_bind */
1943        case -ETIMEDOUT:
1944                task->tk_action = call_timeout;
1945                return;
1946        case 0:
1947                clnt->cl_stats->netreconn++;
1948                task->tk_action = call_transmit;
1949                return;
1950        }
1951        rpc_exit(task, status);
1952}
1953
1954/*
1955 * 5.   Transmit the RPC request, and wait for reply
1956 */
1957static void
1958call_transmit(struct rpc_task *task)
1959{
1960        int is_retrans = RPC_WAS_SENT(task);
1961
1962        dprint_status(task);
1963
1964        task->tk_action = call_status;
1965        if (task->tk_status < 0)
1966                return;
1967        if (!xprt_prepare_transmit(task))
1968                return;
1969        task->tk_action = call_transmit_status;
1970        /* Encode here so that rpcsec_gss can use correct sequence number. */
1971        if (rpc_task_need_encode(task)) {
1972                rpc_xdr_encode(task);
1973                /* Did the encode result in an error condition? */
1974                if (task->tk_status != 0) {
1975                        /* Was the error nonfatal? */
1976                        if (task->tk_status == -EAGAIN)
1977                                rpc_delay(task, HZ >> 4);
1978                        else
1979                                rpc_exit(task, task->tk_status);
1980                        return;
1981                }
1982        }
1983        xprt_transmit(task);
1984        if (task->tk_status < 0)
1985                return;
1986        if (is_retrans)
1987                task->tk_client->cl_stats->rpcretrans++;
1988        /*
1989         * On success, ensure that we call xprt_end_transmit() before sleeping
1990         * in order to allow access to the socket to other RPC requests.
1991         */
1992        call_transmit_status(task);
1993        if (rpc_reply_expected(task))
1994                return;
1995        task->tk_action = rpc_exit_task;
1996        rpc_wake_up_queued_task(&task->tk_rqstp->rq_xprt->pending, task);
1997}
1998
1999/*
2000 * 5a.  Handle cleanup after a transmission
2001 */
2002static void
2003call_transmit_status(struct rpc_task *task)
2004{
2005        task->tk_action = call_status;
2006
2007        /*
2008         * Common case: success.  Force the compiler to put this
2009         * test first.
2010         */
2011        if (task->tk_status == 0) {
2012                xprt_end_transmit(task);
2013                rpc_task_force_reencode(task);
2014                return;
2015        }
2016
2017        switch (task->tk_status) {
2018        case -EAGAIN:
2019        case -ENOBUFS:
2020                break;
2021        default:
2022                dprint_status(task);
2023                xprt_end_transmit(task);
2024                rpc_task_force_reencode(task);
2025                break;
2026                /*
2027                 * Special cases: if we've been waiting on the
2028                 * socket's write_space() callback, or if the
2029                 * socket just returned a connection error,
2030                 * then hold onto the transport lock.
2031                 */
2032        case -ECONNREFUSED:
2033        case -EHOSTDOWN:
2034        case -EHOSTUNREACH:
2035        case -ENETUNREACH:
2036        case -EPERM:
2037                if (RPC_IS_SOFTCONN(task)) {
2038                        xprt_end_transmit(task);
2039                        rpc_exit(task, task->tk_status);
2040                        break;
2041                }
2042        case -ECONNRESET:
2043        case -ECONNABORTED:
2044        case -EADDRINUSE:
2045        case -ENOTCONN:
2046        case -EPIPE:
2047                rpc_task_force_reencode(task);
2048        }
2049}
2050
2051#if defined(CONFIG_SUNRPC_BACKCHANNEL)
2052/*
2053 * 5b.  Send the backchannel RPC reply.  On error, drop the reply.  In
2054 * addition, disconnect on connectivity errors.
2055 */
2056static void
2057call_bc_transmit(struct rpc_task *task)
2058{
2059        struct rpc_rqst *req = task->tk_rqstp;
2060
2061        if (!xprt_prepare_transmit(task))
2062                goto out_retry;
2063
2064        if (task->tk_status < 0) {
2065                printk(KERN_NOTICE "RPC: Could not send backchannel reply "
2066                        "error: %d\n", task->tk_status);
2067                goto out_done;
2068        }
2069        if (req->rq_connect_cookie != req->rq_xprt->connect_cookie)
2070                req->rq_bytes_sent = 0;
2071
2072        xprt_transmit(task);
2073
2074        if (task->tk_status == -EAGAIN)
2075                goto out_nospace;
2076
2077        xprt_end_transmit(task);
2078        dprint_status(task);
2079        switch (task->tk_status) {
2080        case 0:
2081                /* Success */
2082        case -EHOSTDOWN:
2083        case -EHOSTUNREACH:
2084        case -ENETUNREACH:
2085        case -ECONNRESET:
2086        case -ECONNREFUSED:
2087        case -EADDRINUSE:
2088        case -ENOTCONN:
2089        case -EPIPE:
2090                break;
2091        case -ETIMEDOUT:
2092                /*
2093                 * Problem reaching the server.  Disconnect and let the
2094                 * forechannel reestablish the connection.  The server will
2095                 * have to retransmit the backchannel request and we'll
2096                 * reprocess it.  Since these ops are idempotent, there's no
2097                 * need to cache our reply at this time.
2098                 */
2099                printk(KERN_NOTICE "RPC: Could not send backchannel reply "
2100                        "error: %d\n", task->tk_status);
2101                xprt_conditional_disconnect(req->rq_xprt,
2102                        req->rq_connect_cookie);
2103                break;
2104        default:
2105                /*
2106                 * We were unable to reply and will have to drop the
2107                 * request.  The server should reconnect and retransmit.
2108                 */
2109                WARN_ON_ONCE(task->tk_status == -EAGAIN);
2110                printk(KERN_NOTICE "RPC: Could not send backchannel reply "
2111                        "error: %d\n", task->tk_status);
2112                break;
2113        }
2114        rpc_wake_up_queued_task(&req->rq_xprt->pending, task);
2115out_done:
2116        task->tk_action = rpc_exit_task;
2117        return;
2118out_nospace:
2119        req->rq_connect_cookie = req->rq_xprt->connect_cookie;
2120out_retry:
2121        task->tk_status = 0;
2122}
2123#endif /* CONFIG_SUNRPC_BACKCHANNEL */
2124
2125/*
2126 * 6.   Sort out the RPC call status
2127 */
2128static void
2129call_status(struct rpc_task *task)
2130{
2131        struct rpc_clnt *clnt = task->tk_client;
2132        struct rpc_rqst *req = task->tk_rqstp;
2133        int             status;
2134
2135        if (req->rq_reply_bytes_recvd > 0 && !req->rq_bytes_sent)
2136                task->tk_status = req->rq_reply_bytes_recvd;
2137
2138        dprint_status(task);
2139
2140        status = task->tk_status;
2141        if (status >= 0) {
2142                task->tk_action = call_decode;
2143                return;
2144        }
2145
2146        trace_rpc_call_status(task);
2147        task->tk_status = 0;
2148        switch(status) {
2149        case -EHOSTDOWN:
2150        case -EHOSTUNREACH:
2151        case -ENETUNREACH:
2152        case -EPERM:
2153                if (RPC_IS_SOFTCONN(task)) {
2154                        rpc_exit(task, status);
2155                        break;
2156                }
2157                /*
2158                 * Delay any retries for 3 seconds, then handle as if it
2159                 * were a timeout.
2160                 */
2161                rpc_delay(task, 3*HZ);
2162        case -ETIMEDOUT:
2163                task->tk_action = call_timeout;
2164                if (!(task->tk_flags & RPC_TASK_NO_RETRANS_TIMEOUT)
2165                    && task->tk_client->cl_discrtry)
2166                        xprt_conditional_disconnect(req->rq_xprt,
2167                                        req->rq_connect_cookie);
2168                break;
2169        case -ECONNREFUSED:
2170        case -ECONNRESET:
2171        case -ECONNABORTED:
2172                rpc_force_rebind(clnt);
2173        case -EADDRINUSE:
2174                rpc_delay(task, 3*HZ);
2175        case -EPIPE:
2176        case -ENOTCONN:
2177                task->tk_action = call_bind;
2178                break;
2179        case -ENOBUFS:
2180                rpc_delay(task, HZ>>2);
2181        case -EAGAIN:
2182                task->tk_action = call_transmit;
2183                break;
2184        case -EIO:
2185                /* shutdown or soft timeout */
2186                rpc_exit(task, status);
2187                break;
2188        default:
2189                if (clnt->cl_chatty)
2190                        printk("%s: RPC call returned error %d\n",
2191                               clnt->cl_program->name, -status);
2192                rpc_exit(task, status);
2193        }
2194}
2195
2196/*
2197 * 6a.  Handle RPC timeout
2198 *      We do not release the request slot, so we keep using the
2199 *      same XID for all retransmits.
2200 */
2201static void
2202call_timeout(struct rpc_task *task)
2203{
2204        struct rpc_clnt *clnt = task->tk_client;
2205
2206        if (xprt_adjust_timeout(task->tk_rqstp) == 0) {
2207                dprintk("RPC: %5u call_timeout (minor)\n", task->tk_pid);
2208                goto retry;
2209        }
2210
2211        dprintk("RPC: %5u call_timeout (major)\n", task->tk_pid);
2212        task->tk_timeouts++;
2213
2214        if (RPC_IS_SOFTCONN(task)) {
2215                rpc_exit(task, -ETIMEDOUT);
2216                return;
2217        }
2218        if (RPC_IS_SOFT(task)) {
2219                if (clnt->cl_chatty) {
2220                        printk(KERN_NOTICE "%s: server %s not responding, timed out\n",
2221                                clnt->cl_program->name,
2222                                task->tk_xprt->servername);
2223                }
2224                if (task->tk_flags & RPC_TASK_TIMEOUT)
2225                        rpc_exit(task, -ETIMEDOUT);
2226                else
2227                        rpc_exit(task, -EIO);
2228                return;
2229        }
2230
2231        if (!(task->tk_flags & RPC_CALL_MAJORSEEN)) {
2232                task->tk_flags |= RPC_CALL_MAJORSEEN;
2233                if (clnt->cl_chatty) {
2234                        printk(KERN_NOTICE "%s: server %s not responding, still trying\n",
2235                        clnt->cl_program->name,
2236                        task->tk_xprt->servername);
2237                }
2238        }
2239        rpc_force_rebind(clnt);
2240        /*
2241         * Did our request time out due to an RPCSEC_GSS out-of-sequence
2242         * event? RFC2203 requires the server to drop all such requests.
2243         */
2244        rpcauth_invalcred(task);
2245
2246retry:
2247        task->tk_action = call_bind;
2248        task->tk_status = 0;
2249}
2250
2251/*
2252 * 7.   Decode the RPC reply
2253 */
2254static void
2255call_decode(struct rpc_task *task)
2256{
2257        struct rpc_clnt *clnt = task->tk_client;
2258        struct rpc_rqst *req = task->tk_rqstp;
2259        kxdrdproc_t     decode = task->tk_msg.rpc_proc->p_decode;
2260        __be32          *p;
2261
2262        dprint_status(task);
2263
2264        if (task->tk_flags & RPC_CALL_MAJORSEEN) {
2265                if (clnt->cl_chatty) {
2266                        printk(KERN_NOTICE "%s: server %s OK\n",
2267                                clnt->cl_program->name,
2268                                task->tk_xprt->servername);
2269                }
2270                task->tk_flags &= ~RPC_CALL_MAJORSEEN;
2271        }
2272
2273        /*
2274         * Ensure that we see all writes made by xprt_complete_rqst()
2275         * before it changed req->rq_reply_bytes_recvd.
2276         */
2277        smp_rmb();
2278        req->rq_rcv_buf.len = req->rq_private_buf.len;
2279
2280        /* Check that the softirq receive buffer is valid */
2281        WARN_ON(memcmp(&req->rq_rcv_buf, &req->rq_private_buf,
2282                                sizeof(req->rq_rcv_buf)) != 0);
2283
2284        if (req->rq_rcv_buf.len < 12) {
2285                if (!RPC_IS_SOFT(task)) {
2286                        task->tk_action = call_bind;
2287                        goto out_retry;
2288                }
2289                dprintk("RPC:       %s: too small RPC reply size (%d bytes)\n",
2290                                clnt->cl_program->name, task->tk_status);
2291                task->tk_action = call_timeout;
2292                goto out_retry;
2293        }
2294
2295        p = rpc_verify_header(task);
2296        if (IS_ERR(p)) {
2297                if (p == ERR_PTR(-EAGAIN))
2298                        goto out_retry;
2299                return;
2300        }
2301
2302        task->tk_action = rpc_exit_task;
2303
2304        if (decode) {
2305                task->tk_status = rpcauth_unwrap_resp(task, decode, req, p,
2306                                                      task->tk_msg.rpc_resp);
2307        }
2308        dprintk("RPC: %5u call_decode result %d\n", task->tk_pid,
2309                        task->tk_status);
2310        return;
2311out_retry:
2312        task->tk_status = 0;
2313        /* Note: rpc_verify_header() may have freed the RPC slot */
2314        if (task->tk_rqstp == req) {
2315                req->rq_reply_bytes_recvd = req->rq_rcv_buf.len = 0;
2316                if (task->tk_client->cl_discrtry)
2317                        xprt_conditional_disconnect(req->rq_xprt,
2318                                        req->rq_connect_cookie);
2319        }
2320}
2321
2322static __be32 *
2323rpc_encode_header(struct rpc_task *task)
2324{
2325        struct rpc_clnt *clnt = task->tk_client;
2326        struct rpc_rqst *req = task->tk_rqstp;
2327        __be32          *p = req->rq_svec[0].iov_base;
2328
2329        /* FIXME: check buffer size? */
2330
2331        p = xprt_skip_transport_header(req->rq_xprt, p);
2332        *p++ = req->rq_xid;             /* XID */
2333        *p++ = htonl(RPC_CALL);         /* CALL */
2334        *p++ = htonl(RPC_VERSION);      /* RPC version */
2335        *p++ = htonl(clnt->cl_prog);    /* program number */
2336        *p++ = htonl(clnt->cl_vers);    /* program version */
2337        *p++ = htonl(task->tk_msg.rpc_proc->p_proc);    /* procedure */
2338        p = rpcauth_marshcred(task, p);
2339        req->rq_slen = xdr_adjust_iovec(&req->rq_svec[0], p);
2340        return p;
2341}
2342
2343static __be32 *
2344rpc_verify_header(struct rpc_task *task)
2345{
2346        struct rpc_clnt *clnt = task->tk_client;
2347        struct kvec *iov = &task->tk_rqstp->rq_rcv_buf.head[0];
2348        int len = task->tk_rqstp->rq_rcv_buf.len >> 2;
2349        __be32  *p = iov->iov_base;
2350        u32 n;
2351        int error = -EACCES;
2352
2353        if ((task->tk_rqstp->rq_rcv_buf.len & 3) != 0) {
2354                /* RFC-1014 says that the representation of XDR data must be a
2355                 * multiple of four bytes
2356                 * - if it isn't pointer subtraction in the NFS client may give
2357                 *   undefined results
2358                 */
2359                dprintk("RPC: %5u %s: XDR representation not a multiple of"
2360                       " 4 bytes: 0x%x\n", task->tk_pid, __func__,
2361                       task->tk_rqstp->rq_rcv_buf.len);
2362                error = -EIO;
2363                goto out_err;
2364        }
2365        if ((len -= 3) < 0)
2366                goto out_overflow;
2367
2368        p += 1; /* skip XID */
2369        if ((n = ntohl(*p++)) != RPC_REPLY) {
2370                dprintk("RPC: %5u %s: not an RPC reply: %x\n",
2371                        task->tk_pid, __func__, n);
2372                error = -EIO;
2373                goto out_garbage;
2374        }
2375
2376        if ((n = ntohl(*p++)) != RPC_MSG_ACCEPTED) {
2377                if (--len < 0)
2378                        goto out_overflow;
2379                switch ((n = ntohl(*p++))) {
2380                case RPC_AUTH_ERROR:
2381                        break;
2382                case RPC_MISMATCH:
2383                        dprintk("RPC: %5u %s: RPC call version mismatch!\n",
2384                                task->tk_pid, __func__);
2385                        error = -EPROTONOSUPPORT;
2386                        goto out_err;
2387                default:
2388                        dprintk("RPC: %5u %s: RPC call rejected, "
2389                                "unknown error: %x\n",
2390                                task->tk_pid, __func__, n);
2391                        error = -EIO;
2392                        goto out_err;
2393                }
2394                if (--len < 0)
2395                        goto out_overflow;
2396                switch ((n = ntohl(*p++))) {
2397                case RPC_AUTH_REJECTEDCRED:
2398                case RPC_AUTH_REJECTEDVERF:
2399                case RPCSEC_GSS_CREDPROBLEM:
2400                case RPCSEC_GSS_CTXPROBLEM:
2401                        if (!task->tk_cred_retry)
2402                                break;
2403                        task->tk_cred_retry--;
2404                        dprintk("RPC: %5u %s: retry stale creds\n",
2405                                        task->tk_pid, __func__);
2406                        rpcauth_invalcred(task);
2407                        /* Ensure we obtain a new XID! */
2408                        xprt_release(task);
2409                        task->tk_action = call_reserve;
2410                        goto out_retry;
2411                case RPC_AUTH_BADCRED:
2412                case RPC_AUTH_BADVERF:
2413                        /* possibly garbled cred/verf? */
2414                        if (!task->tk_garb_retry)
2415                                break;
2416                        task->tk_garb_retry--;
2417                        dprintk("RPC: %5u %s: retry garbled creds\n",
2418                                        task->tk_pid, __func__);
2419                        task->tk_action = call_bind;
2420                        goto out_retry;
2421                case RPC_AUTH_TOOWEAK:
2422                        printk(KERN_NOTICE "RPC: server %s requires stronger "
2423                               "authentication.\n",
2424                               task->tk_xprt->servername);
2425                        break;
2426                default:
2427                        dprintk("RPC: %5u %s: unknown auth error: %x\n",
2428                                        task->tk_pid, __func__, n);
2429                        error = -EIO;
2430                }
2431                dprintk("RPC: %5u %s: call rejected %d\n",
2432                                task->tk_pid, __func__, n);
2433                goto out_err;
2434        }
2435        p = rpcauth_checkverf(task, p);
2436        if (IS_ERR(p)) {
2437                error = PTR_ERR(p);
2438                dprintk("RPC: %5u %s: auth check failed with %d\n",
2439                                task->tk_pid, __func__, error);
2440                goto out_garbage;               /* bad verifier, retry */
2441        }
2442        len = p - (__be32 *)iov->iov_base - 1;
2443        if (len < 0)
2444                goto out_overflow;
2445        switch ((n = ntohl(*p++))) {
2446        case RPC_SUCCESS:
2447                return p;
2448        case RPC_PROG_UNAVAIL:
2449                dprintk("RPC: %5u %s: program %u is unsupported "
2450                                "by server %s\n", task->tk_pid, __func__,
2451                                (unsigned int)clnt->cl_prog,
2452                                task->tk_xprt->servername);
2453                error = -EPFNOSUPPORT;
2454                goto out_err;
2455        case RPC_PROG_MISMATCH:
2456                dprintk("RPC: %5u %s: program %u, version %u unsupported "
2457                                "by server %s\n", task->tk_pid, __func__,
2458                                (unsigned int)clnt->cl_prog,
2459                                (unsigned int)clnt->cl_vers,
2460                                task->tk_xprt->servername);
2461                error = -EPROTONOSUPPORT;
2462                goto out_err;
2463        case RPC_PROC_UNAVAIL:
2464                dprintk("RPC: %5u %s: proc %s unsupported by program %u, "
2465                                "version %u on server %s\n",
2466                                task->tk_pid, __func__,
2467                                rpc_proc_name(task),
2468                                clnt->cl_prog, clnt->cl_vers,
2469                                task->tk_xprt->servername);
2470                error = -EOPNOTSUPP;
2471                goto out_err;
2472        case RPC_GARBAGE_ARGS:
2473                dprintk("RPC: %5u %s: server saw garbage\n",
2474                                task->tk_pid, __func__);
2475                break;                  /* retry */
2476        default:
2477                dprintk("RPC: %5u %s: server accept status: %x\n",
2478                                task->tk_pid, __func__, n);
2479                /* Also retry */
2480        }
2481
2482out_garbage:
2483        clnt->cl_stats->rpcgarbage++;
2484        if (task->tk_garb_retry) {
2485                task->tk_garb_retry--;
2486                dprintk("RPC: %5u %s: retrying\n",
2487                                task->tk_pid, __func__);
2488                task->tk_action = call_bind;
2489out_retry:
2490                return ERR_PTR(-EAGAIN);
2491        }
2492out_err:
2493        rpc_exit(task, error);
2494        dprintk("RPC: %5u %s: call failed with error %d\n", task->tk_pid,
2495                        __func__, error);
2496        return ERR_PTR(error);
2497out_overflow:
2498        dprintk("RPC: %5u %s: server reply was truncated.\n", task->tk_pid,
2499                        __func__);
2500        goto out_garbage;
2501}
2502
2503static void rpcproc_encode_null(void *rqstp, struct xdr_stream *xdr, void *obj)
2504{
2505}
2506
2507static int rpcproc_decode_null(void *rqstp, struct xdr_stream *xdr, void *obj)
2508{
2509        return 0;
2510}
2511
2512static struct rpc_procinfo rpcproc_null = {
2513        .p_encode = rpcproc_encode_null,
2514        .p_decode = rpcproc_decode_null,
2515};
2516
2517static int rpc_ping(struct rpc_clnt *clnt)
2518{
2519        struct rpc_message msg = {
2520                .rpc_proc = &rpcproc_null,
2521        };
2522        int err;
2523        msg.rpc_cred = authnull_ops.lookup_cred(NULL, NULL, 0);
2524        err = rpc_call_sync(clnt, &msg, RPC_TASK_SOFT | RPC_TASK_SOFTCONN);
2525        put_rpccred(msg.rpc_cred);
2526        return err;
2527}
2528
2529static
2530struct rpc_task *rpc_call_null_helper(struct rpc_clnt *clnt,
2531                struct rpc_xprt *xprt, struct rpc_cred *cred, int flags,
2532                const struct rpc_call_ops *ops, void *data)
2533{
2534        struct rpc_message msg = {
2535                .rpc_proc = &rpcproc_null,
2536                .rpc_cred = cred,
2537        };
2538        struct rpc_task_setup task_setup_data = {
2539                .rpc_client = clnt,
2540                .rpc_xprt = xprt,
2541                .rpc_message = &msg,
2542                .callback_ops = (ops != NULL) ? ops : &rpc_default_ops,
2543                .callback_data = data,
2544                .flags = flags,
2545        };
2546
2547        return rpc_run_task(&task_setup_data);
2548}
2549
2550struct rpc_task *rpc_call_null(struct rpc_clnt *clnt, struct rpc_cred *cred, int flags)
2551{
2552        return rpc_call_null_helper(clnt, NULL, cred, flags, NULL, NULL);
2553}
2554EXPORT_SYMBOL_GPL(rpc_call_null);
2555
2556struct rpc_cb_add_xprt_calldata {
2557        struct rpc_xprt_switch *xps;
2558        struct rpc_xprt *xprt;
2559};
2560
2561static void rpc_cb_add_xprt_done(struct rpc_task *task, void *calldata)
2562{
2563        struct rpc_cb_add_xprt_calldata *data = calldata;
2564
2565        if (task->tk_status == 0)
2566                rpc_xprt_switch_add_xprt(data->xps, data->xprt);
2567}
2568
2569static void rpc_cb_add_xprt_release(void *calldata)
2570{
2571        struct rpc_cb_add_xprt_calldata *data = calldata;
2572
2573        xprt_put(data->xprt);
2574        xprt_switch_put(data->xps);
2575        kfree(data);
2576}
2577
2578const static struct rpc_call_ops rpc_cb_add_xprt_call_ops = {
2579        .rpc_call_done = rpc_cb_add_xprt_done,
2580        .rpc_release = rpc_cb_add_xprt_release,
2581};
2582
2583/**
2584 * rpc_clnt_test_and_add_xprt - Test and add a new transport to a rpc_clnt
2585 * @clnt: pointer to struct rpc_clnt
2586 * @xps: pointer to struct rpc_xprt_switch,
2587 * @xprt: pointer struct rpc_xprt
2588 * @dummy: unused
2589 */
2590int rpc_clnt_test_and_add_xprt(struct rpc_clnt *clnt,
2591                struct rpc_xprt_switch *xps, struct rpc_xprt *xprt,
2592                void *dummy)
2593{
2594        struct rpc_cb_add_xprt_calldata *data;
2595        struct rpc_cred *cred;
2596        struct rpc_task *task;
2597
2598        data = kmalloc(sizeof(*data), GFP_NOFS);
2599        if (!data)
2600                return -ENOMEM;
2601        data->xps = xprt_switch_get(xps);
2602        data->xprt = xprt_get(xprt);
2603
2604        cred = authnull_ops.lookup_cred(NULL, NULL, 0);
2605        task = rpc_call_null_helper(clnt, xprt, cred,
2606                        RPC_TASK_SOFT|RPC_TASK_SOFTCONN|RPC_TASK_ASYNC,
2607                        &rpc_cb_add_xprt_call_ops, data);
2608        put_rpccred(cred);
2609        if (IS_ERR(task))
2610                return PTR_ERR(task);
2611        rpc_put_task(task);
2612        return 1;
2613}
2614EXPORT_SYMBOL_GPL(rpc_clnt_test_and_add_xprt);
2615
2616/**
2617 * rpc_clnt_setup_test_and_add_xprt()
2618 *
2619 * This is an rpc_clnt_add_xprt setup() function which returns 1 so:
2620 *   1) caller of the test function must dereference the rpc_xprt_switch
2621 *   and the rpc_xprt.
2622 *   2) test function must call rpc_xprt_switch_add_xprt, usually in
2623 *   the rpc_call_done routine.
2624 *
2625 * Upon success (return of 1), the test function adds the new
2626 * transport to the rpc_clnt xprt switch
2627 *
2628 * @clnt: struct rpc_clnt to get the new transport
2629 * @xps:  the rpc_xprt_switch to hold the new transport
2630 * @xprt: the rpc_xprt to test
2631 * @data: a struct rpc_add_xprt_test pointer that holds the test function
2632 *        and test function call data
2633 */
2634int rpc_clnt_setup_test_and_add_xprt(struct rpc_clnt *clnt,
2635                                     struct rpc_xprt_switch *xps,
2636                                     struct rpc_xprt *xprt,
2637                                     void *data)
2638{
2639        struct rpc_cred *cred;
2640        struct rpc_task *task;
2641        struct rpc_add_xprt_test *xtest = (struct rpc_add_xprt_test *)data;
2642        int status = -EADDRINUSE;
2643
2644        xprt = xprt_get(xprt);
2645        xprt_switch_get(xps);
2646
2647        if (rpc_xprt_switch_has_addr(xps, (struct sockaddr *)&xprt->addr))
2648                goto out_err;
2649
2650        /* Test the connection */
2651        cred = authnull_ops.lookup_cred(NULL, NULL, 0);
2652        task = rpc_call_null_helper(clnt, xprt, cred,
2653                                    RPC_TASK_SOFT | RPC_TASK_SOFTCONN,
2654                                    NULL, NULL);
2655        put_rpccred(cred);
2656        if (IS_ERR(task)) {
2657                status = PTR_ERR(task);
2658                goto out_err;
2659        }
2660        status = task->tk_status;
2661        rpc_put_task(task);
2662
2663        if (status < 0)
2664                goto out_err;
2665
2666        /* rpc_xprt_switch and rpc_xprt are deferrenced by add_xprt_test() */
2667        xtest->add_xprt_test(clnt, xprt, xtest->data);
2668
2669        /* so that rpc_clnt_add_xprt does not call rpc_xprt_switch_add_xprt */
2670        return 1;
2671out_err:
2672        xprt_put(xprt);
2673        xprt_switch_put(xps);
2674        pr_info("RPC:   rpc_clnt_test_xprt failed: %d addr %s not added\n",
2675                status, xprt->address_strings[RPC_DISPLAY_ADDR]);
2676        return status;
2677}
2678EXPORT_SYMBOL_GPL(rpc_clnt_setup_test_and_add_xprt);
2679
2680/**
2681 * rpc_clnt_add_xprt - Add a new transport to a rpc_clnt
2682 * @clnt: pointer to struct rpc_clnt
2683 * @xprtargs: pointer to struct xprt_create
2684 * @setup: callback to test and/or set up the connection
2685 * @data: pointer to setup function data
2686 *
2687 * Creates a new transport using the parameters set in args and
2688 * adds it to clnt.
2689 * If ping is set, then test that connectivity succeeds before
2690 * adding the new transport.
2691 *
2692 */
2693int rpc_clnt_add_xprt(struct rpc_clnt *clnt,
2694                struct xprt_create *xprtargs,
2695                int (*setup)(struct rpc_clnt *,
2696                        struct rpc_xprt_switch *,
2697                        struct rpc_xprt *,
2698                        void *),
2699                void *data)
2700{
2701        struct rpc_xprt_switch *xps;
2702        struct rpc_xprt *xprt;
2703        unsigned long reconnect_timeout;
2704        unsigned char resvport;
2705        int ret = 0;
2706
2707        rcu_read_lock();
2708        xps = xprt_switch_get(rcu_dereference(clnt->cl_xpi.xpi_xpswitch));
2709        xprt = xprt_iter_xprt(&clnt->cl_xpi);
2710        if (xps == NULL || xprt == NULL) {
2711                rcu_read_unlock();
2712                return -EAGAIN;
2713        }
2714        resvport = xprt->resvport;
2715        reconnect_timeout = xprt->max_reconnect_timeout;
2716        rcu_read_unlock();
2717
2718        xprt = xprt_create_transport(xprtargs);
2719        if (IS_ERR(xprt)) {
2720                ret = PTR_ERR(xprt);
2721                goto out_put_switch;
2722        }
2723        xprt->resvport = resvport;
2724        xprt->max_reconnect_timeout = reconnect_timeout;
2725
2726        rpc_xprt_switch_set_roundrobin(xps);
2727        if (setup) {
2728                ret = setup(clnt, xps, xprt, data);
2729                if (ret != 0)
2730                        goto out_put_xprt;
2731        }
2732        rpc_xprt_switch_add_xprt(xps, xprt);
2733out_put_xprt:
2734        xprt_put(xprt);
2735out_put_switch:
2736        xprt_switch_put(xps);
2737        return ret;
2738}
2739EXPORT_SYMBOL_GPL(rpc_clnt_add_xprt);
2740
2741static int
2742rpc_xprt_cap_max_reconnect_timeout(struct rpc_clnt *clnt,
2743                struct rpc_xprt *xprt,
2744                void *data)
2745{
2746        unsigned long timeout = *((unsigned long *)data);
2747
2748        if (timeout < xprt->max_reconnect_timeout)
2749                xprt->max_reconnect_timeout = timeout;
2750        return 0;
2751}
2752
2753void
2754rpc_cap_max_reconnect_timeout(struct rpc_clnt *clnt, unsigned long timeo)
2755{
2756        rpc_clnt_iterate_for_each_xprt(clnt,
2757                        rpc_xprt_cap_max_reconnect_timeout,
2758                        &timeo);
2759}
2760EXPORT_SYMBOL_GPL(rpc_cap_max_reconnect_timeout);
2761
2762void rpc_clnt_xprt_switch_put(struct rpc_clnt *clnt)
2763{
2764        rcu_read_lock();
2765        xprt_switch_put(rcu_dereference(clnt->cl_xpi.xpi_xpswitch));
2766        rcu_read_unlock();
2767}
2768EXPORT_SYMBOL_GPL(rpc_clnt_xprt_switch_put);
2769
2770void rpc_clnt_xprt_switch_add_xprt(struct rpc_clnt *clnt, struct rpc_xprt *xprt)
2771{
2772        rcu_read_lock();
2773        rpc_xprt_switch_add_xprt(rcu_dereference(clnt->cl_xpi.xpi_xpswitch),
2774                                 xprt);
2775        rcu_read_unlock();
2776}
2777EXPORT_SYMBOL_GPL(rpc_clnt_xprt_switch_add_xprt);
2778
2779bool rpc_clnt_xprt_switch_has_addr(struct rpc_clnt *clnt,
2780                                   const struct sockaddr *sap)
2781{
2782        struct rpc_xprt_switch *xps;
2783        bool ret;
2784
2785        rcu_read_lock();
2786        xps = rcu_dereference(clnt->cl_xpi.xpi_xpswitch);
2787        ret = rpc_xprt_switch_has_addr(xps, sap);
2788        rcu_read_unlock();
2789        return ret;
2790}
2791EXPORT_SYMBOL_GPL(rpc_clnt_xprt_switch_has_addr);
2792
2793#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
2794static void rpc_show_header(void)
2795{
2796        printk(KERN_INFO "-pid- flgs status -client- --rqstp- "
2797                "-timeout ---ops--\n");
2798}
2799
2800static void rpc_show_task(const struct rpc_clnt *clnt,
2801                          const struct rpc_task *task)
2802{
2803        const char *rpc_waitq = "none";
2804
2805        if (RPC_IS_QUEUED(task))
2806                rpc_waitq = rpc_qname(task->tk_waitqueue);
2807
2808        printk(KERN_INFO "%5u %04x %6d %8p %8p %8ld %8p %sv%u %s a:%ps q:%s\n",
2809                task->tk_pid, task->tk_flags, task->tk_status,
2810                clnt, task->tk_rqstp, task->tk_timeout, task->tk_ops,
2811                clnt->cl_program->name, clnt->cl_vers, rpc_proc_name(task),
2812                task->tk_action, rpc_waitq);
2813}
2814
2815void rpc_show_tasks(struct net *net)
2816{
2817        struct rpc_clnt *clnt;
2818        struct rpc_task *task;
2819        int header = 0;
2820        struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
2821
2822        spin_lock(&sn->rpc_client_lock);
2823        list_for_each_entry(clnt, &sn->all_clients, cl_clients) {
2824                spin_lock(&clnt->cl_lock);
2825                list_for_each_entry(task, &clnt->cl_tasks, tk_task) {
2826                        if (!header) {
2827                                rpc_show_header();
2828                                header++;
2829                        }
2830                        rpc_show_task(clnt, task);
2831                }
2832                spin_unlock(&clnt->cl_lock);
2833        }
2834        spin_unlock(&sn->rpc_client_lock);
2835}
2836#endif
2837
2838#if IS_ENABLED(CONFIG_SUNRPC_SWAP)
2839static int
2840rpc_clnt_swap_activate_callback(struct rpc_clnt *clnt,
2841                struct rpc_xprt *xprt,
2842                void *dummy)
2843{
2844        return xprt_enable_swap(xprt);
2845}
2846
2847int
2848rpc_clnt_swap_activate(struct rpc_clnt *clnt)
2849{
2850        if (atomic_inc_return(&clnt->cl_swapper) == 1)
2851                return rpc_clnt_iterate_for_each_xprt(clnt,
2852                                rpc_clnt_swap_activate_callback, NULL);
2853        return 0;
2854}
2855EXPORT_SYMBOL_GPL(rpc_clnt_swap_activate);
2856
2857static int
2858rpc_clnt_swap_deactivate_callback(struct rpc_clnt *clnt,
2859                struct rpc_xprt *xprt,
2860                void *dummy)
2861{
2862        xprt_disable_swap(xprt);
2863        return 0;
2864}
2865
2866void
2867rpc_clnt_swap_deactivate(struct rpc_clnt *clnt)
2868{
2869        if (atomic_dec_if_positive(&clnt->cl_swapper) == 0)
2870                rpc_clnt_iterate_for_each_xprt(clnt,
2871                                rpc_clnt_swap_deactivate_callback, NULL);
2872}
2873EXPORT_SYMBOL_GPL(rpc_clnt_swap_deactivate);
2874#endif /* CONFIG_SUNRPC_SWAP */
2875