linux/net/sunrpc/svc_xprt.c
<<
>>
Prefs
   1/*
   2 * linux/net/sunrpc/svc_xprt.c
   3 *
   4 * Author: Tom Tucker <tom@opengridcomputing.com>
   5 */
   6
   7#include <linux/sched.h>
   8#include <linux/smp_lock.h>
   9#include <linux/errno.h>
  10#include <linux/freezer.h>
  11#include <linux/kthread.h>
  12#include <net/sock.h>
  13#include <linux/sunrpc/stats.h>
  14#include <linux/sunrpc/svc_xprt.h>
  15#include <linux/sunrpc/svcsock.h>
  16
  17#define RPCDBG_FACILITY RPCDBG_SVCXPRT
  18
  19#define SVC_MAX_WAKING 5
  20
  21static struct svc_deferred_req *svc_deferred_dequeue(struct svc_xprt *xprt);
  22static int svc_deferred_recv(struct svc_rqst *rqstp);
  23static struct cache_deferred_req *svc_defer(struct cache_req *req);
  24static void svc_age_temp_xprts(unsigned long closure);
  25
  26/* apparently the "standard" is that clients close
  27 * idle connections after 5 minutes, servers after
  28 * 6 minutes
  29 *   http://www.connectathon.org/talks96/nfstcp.pdf
  30 */
  31static int svc_conn_age_period = 6*60;
  32
  33/* List of registered transport classes */
  34static DEFINE_SPINLOCK(svc_xprt_class_lock);
  35static LIST_HEAD(svc_xprt_class_list);
  36
  37/* SMP locking strategy:
  38 *
  39 *      svc_pool->sp_lock protects most of the fields of that pool.
  40 *      svc_serv->sv_lock protects sv_tempsocks, sv_permsocks, sv_tmpcnt.
  41 *      when both need to be taken (rare), svc_serv->sv_lock is first.
  42 *      BKL protects svc_serv->sv_nrthread.
  43 *      svc_sock->sk_lock protects the svc_sock->sk_deferred list
  44 *             and the ->sk_info_authunix cache.
  45 *
  46 *      The XPT_BUSY bit in xprt->xpt_flags prevents a transport being
  47 *      enqueued multiply. During normal transport processing this bit
  48 *      is set by svc_xprt_enqueue and cleared by svc_xprt_received.
  49 *      Providers should not manipulate this bit directly.
  50 *
  51 *      Some flags can be set to certain values at any time
  52 *      providing that certain rules are followed:
  53 *
  54 *      XPT_CONN, XPT_DATA:
  55 *              - Can be set or cleared at any time.
  56 *              - After a set, svc_xprt_enqueue must be called to enqueue
  57 *                the transport for processing.
  58 *              - After a clear, the transport must be read/accepted.
  59 *                If this succeeds, it must be set again.
  60 *      XPT_CLOSE:
  61 *              - Can set at any time. It is never cleared.
  62 *      XPT_DEAD:
  63 *              - Can only be set while XPT_BUSY is held which ensures
  64 *                that no other thread will be using the transport or will
  65 *                try to set XPT_DEAD.
  66 */
  67
  68int svc_reg_xprt_class(struct svc_xprt_class *xcl)
  69{
  70        struct svc_xprt_class *cl;
  71        int res = -EEXIST;
  72
  73        dprintk("svc: Adding svc transport class '%s'\n", xcl->xcl_name);
  74
  75        INIT_LIST_HEAD(&xcl->xcl_list);
  76        spin_lock(&svc_xprt_class_lock);
  77        /* Make sure there isn't already a class with the same name */
  78        list_for_each_entry(cl, &svc_xprt_class_list, xcl_list) {
  79                if (strcmp(xcl->xcl_name, cl->xcl_name) == 0)
  80                        goto out;
  81        }
  82        list_add_tail(&xcl->xcl_list, &svc_xprt_class_list);
  83        res = 0;
  84out:
  85        spin_unlock(&svc_xprt_class_lock);
  86        return res;
  87}
  88EXPORT_SYMBOL_GPL(svc_reg_xprt_class);
  89
  90void svc_unreg_xprt_class(struct svc_xprt_class *xcl)
  91{
  92        dprintk("svc: Removing svc transport class '%s'\n", xcl->xcl_name);
  93        spin_lock(&svc_xprt_class_lock);
  94        list_del_init(&xcl->xcl_list);
  95        spin_unlock(&svc_xprt_class_lock);
  96}
  97EXPORT_SYMBOL_GPL(svc_unreg_xprt_class);
  98
  99/*
 100 * Format the transport list for printing
 101 */
 102int svc_print_xprts(char *buf, int maxlen)
 103{
 104        struct list_head *le;
 105        char tmpstr[80];
 106        int len = 0;
 107        buf[0] = '\0';
 108
 109        spin_lock(&svc_xprt_class_lock);
 110        list_for_each(le, &svc_xprt_class_list) {
 111                int slen;
 112                struct svc_xprt_class *xcl =
 113                        list_entry(le, struct svc_xprt_class, xcl_list);
 114
 115                sprintf(tmpstr, "%s %d\n", xcl->xcl_name, xcl->xcl_max_payload);
 116                slen = strlen(tmpstr);
 117                if (len + slen > maxlen)
 118                        break;
 119                len += slen;
 120                strcat(buf, tmpstr);
 121        }
 122        spin_unlock(&svc_xprt_class_lock);
 123
 124        return len;
 125}
 126
 127static void svc_xprt_free(struct kref *kref)
 128{
 129        struct svc_xprt *xprt =
 130                container_of(kref, struct svc_xprt, xpt_ref);
 131        struct module *owner = xprt->xpt_class->xcl_owner;
 132        if (test_bit(XPT_CACHE_AUTH, &xprt->xpt_flags)
 133            && xprt->xpt_auth_cache != NULL)
 134                svcauth_unix_info_release(xprt->xpt_auth_cache);
 135        xprt->xpt_ops->xpo_free(xprt);
 136        module_put(owner);
 137}
 138
 139void svc_xprt_put(struct svc_xprt *xprt)
 140{
 141        kref_put(&xprt->xpt_ref, svc_xprt_free);
 142}
 143EXPORT_SYMBOL_GPL(svc_xprt_put);
 144
 145/*
 146 * Called by transport drivers to initialize the transport independent
 147 * portion of the transport instance.
 148 */
 149void svc_xprt_init(struct svc_xprt_class *xcl, struct svc_xprt *xprt,
 150                   struct svc_serv *serv)
 151{
 152        memset(xprt, 0, sizeof(*xprt));
 153        xprt->xpt_class = xcl;
 154        xprt->xpt_ops = xcl->xcl_ops;
 155        kref_init(&xprt->xpt_ref);
 156        xprt->xpt_server = serv;
 157        INIT_LIST_HEAD(&xprt->xpt_list);
 158        INIT_LIST_HEAD(&xprt->xpt_ready);
 159        INIT_LIST_HEAD(&xprt->xpt_deferred);
 160        mutex_init(&xprt->xpt_mutex);
 161        spin_lock_init(&xprt->xpt_lock);
 162        set_bit(XPT_BUSY, &xprt->xpt_flags);
 163        rpc_init_wait_queue(&xprt->xpt_bc_pending, "xpt_bc_pending");
 164}
 165EXPORT_SYMBOL_GPL(svc_xprt_init);
 166
 167static struct svc_xprt *__svc_xpo_create(struct svc_xprt_class *xcl,
 168                                         struct svc_serv *serv,
 169                                         const int family,
 170                                         const unsigned short port,
 171                                         int flags)
 172{
 173        struct sockaddr_in sin = {
 174                .sin_family             = AF_INET,
 175                .sin_addr.s_addr        = htonl(INADDR_ANY),
 176                .sin_port               = htons(port),
 177        };
 178        struct sockaddr_in6 sin6 = {
 179                .sin6_family            = AF_INET6,
 180                .sin6_addr              = IN6ADDR_ANY_INIT,
 181                .sin6_port              = htons(port),
 182        };
 183        struct sockaddr *sap;
 184        size_t len;
 185
 186        switch (family) {
 187        case PF_INET:
 188                sap = (struct sockaddr *)&sin;
 189                len = sizeof(sin);
 190                break;
 191        case PF_INET6:
 192                sap = (struct sockaddr *)&sin6;
 193                len = sizeof(sin6);
 194                break;
 195        default:
 196                return ERR_PTR(-EAFNOSUPPORT);
 197        }
 198
 199        return xcl->xcl_ops->xpo_create(serv, sap, len, flags);
 200}
 201
 202int svc_create_xprt(struct svc_serv *serv, const char *xprt_name,
 203                    const int family, const unsigned short port,
 204                    int flags)
 205{
 206        struct svc_xprt_class *xcl;
 207
 208        dprintk("svc: creating transport %s[%d]\n", xprt_name, port);
 209        spin_lock(&svc_xprt_class_lock);
 210        list_for_each_entry(xcl, &svc_xprt_class_list, xcl_list) {
 211                struct svc_xprt *newxprt;
 212
 213                if (strcmp(xprt_name, xcl->xcl_name))
 214                        continue;
 215
 216                if (!try_module_get(xcl->xcl_owner))
 217                        goto err;
 218
 219                spin_unlock(&svc_xprt_class_lock);
 220                newxprt = __svc_xpo_create(xcl, serv, family, port, flags);
 221                if (IS_ERR(newxprt)) {
 222                        module_put(xcl->xcl_owner);
 223                        return PTR_ERR(newxprt);
 224                }
 225
 226                clear_bit(XPT_TEMP, &newxprt->xpt_flags);
 227                spin_lock_bh(&serv->sv_lock);
 228                list_add(&newxprt->xpt_list, &serv->sv_permsocks);
 229                spin_unlock_bh(&serv->sv_lock);
 230                clear_bit(XPT_BUSY, &newxprt->xpt_flags);
 231                return svc_xprt_local_port(newxprt);
 232        }
 233 err:
 234        spin_unlock(&svc_xprt_class_lock);
 235        dprintk("svc: transport %s not found\n", xprt_name);
 236        return -ENOENT;
 237}
 238EXPORT_SYMBOL_GPL(svc_create_xprt);
 239
 240/*
 241 * Copy the local and remote xprt addresses to the rqstp structure
 242 */
 243void svc_xprt_copy_addrs(struct svc_rqst *rqstp, struct svc_xprt *xprt)
 244{
 245        struct sockaddr *sin;
 246
 247        memcpy(&rqstp->rq_addr, &xprt->xpt_remote, xprt->xpt_remotelen);
 248        rqstp->rq_addrlen = xprt->xpt_remotelen;
 249
 250        /*
 251         * Destination address in request is needed for binding the
 252         * source address in RPC replies/callbacks later.
 253         */
 254        sin = (struct sockaddr *)&xprt->xpt_local;
 255        switch (sin->sa_family) {
 256        case AF_INET:
 257                rqstp->rq_daddr.addr = ((struct sockaddr_in *)sin)->sin_addr;
 258                break;
 259        case AF_INET6:
 260                rqstp->rq_daddr.addr6 = ((struct sockaddr_in6 *)sin)->sin6_addr;
 261                break;
 262        }
 263}
 264EXPORT_SYMBOL_GPL(svc_xprt_copy_addrs);
 265
 266/**
 267 * svc_print_addr - Format rq_addr field for printing
 268 * @rqstp: svc_rqst struct containing address to print
 269 * @buf: target buffer for formatted address
 270 * @len: length of target buffer
 271 *
 272 */
 273char *svc_print_addr(struct svc_rqst *rqstp, char *buf, size_t len)
 274{
 275        return __svc_print_addr(svc_addr(rqstp), buf, len);
 276}
 277EXPORT_SYMBOL_GPL(svc_print_addr);
 278
 279/*
 280 * Queue up an idle server thread.  Must have pool->sp_lock held.
 281 * Note: this is really a stack rather than a queue, so that we only
 282 * use as many different threads as we need, and the rest don't pollute
 283 * the cache.
 284 */
 285static void svc_thread_enqueue(struct svc_pool *pool, struct svc_rqst *rqstp)
 286{
 287        list_add(&rqstp->rq_list, &pool->sp_threads);
 288}
 289
 290/*
 291 * Dequeue an nfsd thread.  Must have pool->sp_lock held.
 292 */
 293static void svc_thread_dequeue(struct svc_pool *pool, struct svc_rqst *rqstp)
 294{
 295        list_del(&rqstp->rq_list);
 296}
 297
 298/*
 299 * Queue up a transport with data pending. If there are idle nfsd
 300 * processes, wake 'em up.
 301 *
 302 */
 303void svc_xprt_enqueue(struct svc_xprt *xprt)
 304{
 305        struct svc_serv *serv = xprt->xpt_server;
 306        struct svc_pool *pool;
 307        struct svc_rqst *rqstp;
 308        int cpu;
 309        int thread_avail;
 310
 311        if (!(xprt->xpt_flags &
 312              ((1<<XPT_CONN)|(1<<XPT_DATA)|(1<<XPT_CLOSE)|(1<<XPT_DEFERRED))))
 313                return;
 314
 315        cpu = get_cpu();
 316        pool = svc_pool_for_cpu(xprt->xpt_server, cpu);
 317        put_cpu();
 318
 319        spin_lock_bh(&pool->sp_lock);
 320
 321        if (test_bit(XPT_DEAD, &xprt->xpt_flags)) {
 322                /* Don't enqueue dead transports */
 323                dprintk("svc: transport %p is dead, not enqueued\n", xprt);
 324                goto out_unlock;
 325        }
 326
 327        pool->sp_stats.packets++;
 328
 329        /* Mark transport as busy. It will remain in this state until
 330         * the provider calls svc_xprt_received. We update XPT_BUSY
 331         * atomically because it also guards against trying to enqueue
 332         * the transport twice.
 333         */
 334        if (test_and_set_bit(XPT_BUSY, &xprt->xpt_flags)) {
 335                /* Don't enqueue transport while already enqueued */
 336                dprintk("svc: transport %p busy, not enqueued\n", xprt);
 337                goto out_unlock;
 338        }
 339        BUG_ON(xprt->xpt_pool != NULL);
 340        xprt->xpt_pool = pool;
 341
 342        /* Handle pending connection */
 343        if (test_bit(XPT_CONN, &xprt->xpt_flags))
 344                goto process;
 345
 346        /* Handle close in-progress */
 347        if (test_bit(XPT_CLOSE, &xprt->xpt_flags))
 348                goto process;
 349
 350        /* Check if we have space to reply to a request */
 351        if (!xprt->xpt_ops->xpo_has_wspace(xprt)) {
 352                /* Don't enqueue while not enough space for reply */
 353                dprintk("svc: no write space, transport %p  not enqueued\n",
 354                        xprt);
 355                xprt->xpt_pool = NULL;
 356                clear_bit(XPT_BUSY, &xprt->xpt_flags);
 357                goto out_unlock;
 358        }
 359
 360 process:
 361        /* Work out whether threads are available */
 362        thread_avail = !list_empty(&pool->sp_threads);  /* threads are asleep */
 363        if (pool->sp_nwaking >= SVC_MAX_WAKING) {
 364                /* too many threads are runnable and trying to wake up */
 365                thread_avail = 0;
 366                pool->sp_stats.overloads_avoided++;
 367        }
 368
 369        if (thread_avail) {
 370                rqstp = list_entry(pool->sp_threads.next,
 371                                   struct svc_rqst,
 372                                   rq_list);
 373                dprintk("svc: transport %p served by daemon %p\n",
 374                        xprt, rqstp);
 375                svc_thread_dequeue(pool, rqstp);
 376                if (rqstp->rq_xprt)
 377                        printk(KERN_ERR
 378                                "svc_xprt_enqueue: server %p, rq_xprt=%p!\n",
 379                                rqstp, rqstp->rq_xprt);
 380                rqstp->rq_xprt = xprt;
 381                svc_xprt_get(xprt);
 382                rqstp->rq_reserved = serv->sv_max_mesg;
 383                atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved);
 384                rqstp->rq_waking = 1;
 385                pool->sp_nwaking++;
 386                pool->sp_stats.threads_woken++;
 387                BUG_ON(xprt->xpt_pool != pool);
 388                wake_up(&rqstp->rq_wait);
 389        } else {
 390                dprintk("svc: transport %p put into queue\n", xprt);
 391                list_add_tail(&xprt->xpt_ready, &pool->sp_sockets);
 392                pool->sp_stats.sockets_queued++;
 393                BUG_ON(xprt->xpt_pool != pool);
 394        }
 395
 396out_unlock:
 397        spin_unlock_bh(&pool->sp_lock);
 398}
 399EXPORT_SYMBOL_GPL(svc_xprt_enqueue);
 400
 401/*
 402 * Dequeue the first transport.  Must be called with the pool->sp_lock held.
 403 */
 404static struct svc_xprt *svc_xprt_dequeue(struct svc_pool *pool)
 405{
 406        struct svc_xprt *xprt;
 407
 408        if (list_empty(&pool->sp_sockets))
 409                return NULL;
 410
 411        xprt = list_entry(pool->sp_sockets.next,
 412                          struct svc_xprt, xpt_ready);
 413        list_del_init(&xprt->xpt_ready);
 414
 415        dprintk("svc: transport %p dequeued, inuse=%d\n",
 416                xprt, atomic_read(&xprt->xpt_ref.refcount));
 417
 418        return xprt;
 419}
 420
 421/*
 422 * svc_xprt_received conditionally queues the transport for processing
 423 * by another thread. The caller must hold the XPT_BUSY bit and must
 424 * not thereafter touch transport data.
 425 *
 426 * Note: XPT_DATA only gets cleared when a read-attempt finds no (or
 427 * insufficient) data.
 428 */
 429void svc_xprt_received(struct svc_xprt *xprt)
 430{
 431        BUG_ON(!test_bit(XPT_BUSY, &xprt->xpt_flags));
 432        xprt->xpt_pool = NULL;
 433        clear_bit(XPT_BUSY, &xprt->xpt_flags);
 434        svc_xprt_enqueue(xprt);
 435}
 436EXPORT_SYMBOL_GPL(svc_xprt_received);
 437
 438/**
 439 * svc_reserve - change the space reserved for the reply to a request.
 440 * @rqstp:  The request in question
 441 * @space: new max space to reserve
 442 *
 443 * Each request reserves some space on the output queue of the transport
 444 * to make sure the reply fits.  This function reduces that reserved
 445 * space to be the amount of space used already, plus @space.
 446 *
 447 */
 448void svc_reserve(struct svc_rqst *rqstp, int space)
 449{
 450        space += rqstp->rq_res.head[0].iov_len;
 451
 452        if (space < rqstp->rq_reserved) {
 453                struct svc_xprt *xprt = rqstp->rq_xprt;
 454                atomic_sub((rqstp->rq_reserved - space), &xprt->xpt_reserved);
 455                rqstp->rq_reserved = space;
 456
 457                svc_xprt_enqueue(xprt);
 458        }
 459}
 460EXPORT_SYMBOL_GPL(svc_reserve);
 461
 462static void svc_xprt_release(struct svc_rqst *rqstp)
 463{
 464        struct svc_xprt *xprt = rqstp->rq_xprt;
 465
 466        rqstp->rq_xprt->xpt_ops->xpo_release_rqst(rqstp);
 467
 468        kfree(rqstp->rq_deferred);
 469        rqstp->rq_deferred = NULL;
 470
 471        svc_free_res_pages(rqstp);
 472        rqstp->rq_res.page_len = 0;
 473        rqstp->rq_res.page_base = 0;
 474
 475        /* Reset response buffer and release
 476         * the reservation.
 477         * But first, check that enough space was reserved
 478         * for the reply, otherwise we have a bug!
 479         */
 480        if ((rqstp->rq_res.len) >  rqstp->rq_reserved)
 481                printk(KERN_ERR "RPC request reserved %d but used %d\n",
 482                       rqstp->rq_reserved,
 483                       rqstp->rq_res.len);
 484
 485        rqstp->rq_res.head[0].iov_len = 0;
 486        svc_reserve(rqstp, 0);
 487        rqstp->rq_xprt = NULL;
 488
 489        svc_xprt_put(xprt);
 490}
 491
 492/*
 493 * External function to wake up a server waiting for data
 494 * This really only makes sense for services like lockd
 495 * which have exactly one thread anyway.
 496 */
 497void svc_wake_up(struct svc_serv *serv)
 498{
 499        struct svc_rqst *rqstp;
 500        unsigned int i;
 501        struct svc_pool *pool;
 502
 503        for (i = 0; i < serv->sv_nrpools; i++) {
 504                pool = &serv->sv_pools[i];
 505
 506                spin_lock_bh(&pool->sp_lock);
 507                if (!list_empty(&pool->sp_threads)) {
 508                        rqstp = list_entry(pool->sp_threads.next,
 509                                           struct svc_rqst,
 510                                           rq_list);
 511                        dprintk("svc: daemon %p woken up.\n", rqstp);
 512                        /*
 513                        svc_thread_dequeue(pool, rqstp);
 514                        rqstp->rq_xprt = NULL;
 515                         */
 516                        wake_up(&rqstp->rq_wait);
 517                }
 518                spin_unlock_bh(&pool->sp_lock);
 519        }
 520}
 521EXPORT_SYMBOL_GPL(svc_wake_up);
 522
 523int svc_port_is_privileged(struct sockaddr *sin)
 524{
 525        switch (sin->sa_family) {
 526        case AF_INET:
 527                return ntohs(((struct sockaddr_in *)sin)->sin_port)
 528                        < PROT_SOCK;
 529        case AF_INET6:
 530                return ntohs(((struct sockaddr_in6 *)sin)->sin6_port)
 531                        < PROT_SOCK;
 532        default:
 533                return 0;
 534        }
 535}
 536
 537/*
 538 * Make sure that we don't have too many active connections. If we have,
 539 * something must be dropped. It's not clear what will happen if we allow
 540 * "too many" connections, but when dealing with network-facing software,
 541 * we have to code defensively. Here we do that by imposing hard limits.
 542 *
 543 * There's no point in trying to do random drop here for DoS
 544 * prevention. The NFS clients does 1 reconnect in 15 seconds. An
 545 * attacker can easily beat that.
 546 *
 547 * The only somewhat efficient mechanism would be if drop old
 548 * connections from the same IP first. But right now we don't even
 549 * record the client IP in svc_sock.
 550 *
 551 * single-threaded services that expect a lot of clients will probably
 552 * need to set sv_maxconn to override the default value which is based
 553 * on the number of threads
 554 */
 555static void svc_check_conn_limits(struct svc_serv *serv)
 556{
 557        unsigned int limit = serv->sv_maxconn ? serv->sv_maxconn :
 558                                (serv->sv_nrthreads+3) * 20;
 559
 560        if (serv->sv_tmpcnt > limit) {
 561                struct svc_xprt *xprt = NULL;
 562                spin_lock_bh(&serv->sv_lock);
 563                if (!list_empty(&serv->sv_tempsocks)) {
 564                        if (net_ratelimit()) {
 565                                /* Try to help the admin */
 566                                printk(KERN_NOTICE "%s: too many open  "
 567                                       "connections, consider increasing %s\n",
 568                                       serv->sv_name, serv->sv_maxconn ?
 569                                       "the max number of connections." :
 570                                       "the number of threads.");
 571                        }
 572                        /*
 573                         * Always select the oldest connection. It's not fair,
 574                         * but so is life
 575                         */
 576                        xprt = list_entry(serv->sv_tempsocks.prev,
 577                                          struct svc_xprt,
 578                                          xpt_list);
 579                        set_bit(XPT_CLOSE, &xprt->xpt_flags);
 580                        svc_xprt_get(xprt);
 581                }
 582                spin_unlock_bh(&serv->sv_lock);
 583
 584                if (xprt) {
 585                        svc_xprt_enqueue(xprt);
 586                        svc_xprt_put(xprt);
 587                }
 588        }
 589}
 590
 591/*
 592 * Receive the next request on any transport.  This code is carefully
 593 * organised not to touch any cachelines in the shared svc_serv
 594 * structure, only cachelines in the local svc_pool.
 595 */
 596int svc_recv(struct svc_rqst *rqstp, long timeout)
 597{
 598        struct svc_xprt         *xprt = NULL;
 599        struct svc_serv         *serv = rqstp->rq_server;
 600        struct svc_pool         *pool = rqstp->rq_pool;
 601        int                     len, i;
 602        int                     pages;
 603        struct xdr_buf          *arg;
 604        DECLARE_WAITQUEUE(wait, current);
 605        long                    time_left;
 606
 607        dprintk("svc: server %p waiting for data (to = %ld)\n",
 608                rqstp, timeout);
 609
 610        if (rqstp->rq_xprt)
 611                printk(KERN_ERR
 612                        "svc_recv: service %p, transport not NULL!\n",
 613                         rqstp);
 614        if (waitqueue_active(&rqstp->rq_wait))
 615                printk(KERN_ERR
 616                        "svc_recv: service %p, wait queue active!\n",
 617                         rqstp);
 618
 619        /* now allocate needed pages.  If we get a failure, sleep briefly */
 620        pages = (serv->sv_max_mesg + PAGE_SIZE) / PAGE_SIZE;
 621        for (i = 0; i < pages ; i++)
 622                while (rqstp->rq_pages[i] == NULL) {
 623                        struct page *p = alloc_page(GFP_KERNEL);
 624                        if (!p) {
 625                                set_current_state(TASK_INTERRUPTIBLE);
 626                                if (signalled() || kthread_should_stop()) {
 627                                        set_current_state(TASK_RUNNING);
 628                                        return -EINTR;
 629                                }
 630                                schedule_timeout(msecs_to_jiffies(500));
 631                        }
 632                        rqstp->rq_pages[i] = p;
 633                }
 634        rqstp->rq_pages[i++] = NULL; /* this might be seen in nfs_read_actor */
 635        BUG_ON(pages >= RPCSVC_MAXPAGES);
 636
 637        /* Make arg->head point to first page and arg->pages point to rest */
 638        arg = &rqstp->rq_arg;
 639        arg->head[0].iov_base = page_address(rqstp->rq_pages[0]);
 640        arg->head[0].iov_len = PAGE_SIZE;
 641        arg->pages = rqstp->rq_pages + 1;
 642        arg->page_base = 0;
 643        /* save at least one page for response */
 644        arg->page_len = (pages-2)*PAGE_SIZE;
 645        arg->len = (pages-1)*PAGE_SIZE;
 646        arg->tail[0].iov_len = 0;
 647
 648        try_to_freeze();
 649        cond_resched();
 650        if (signalled() || kthread_should_stop())
 651                return -EINTR;
 652
 653        spin_lock_bh(&pool->sp_lock);
 654        if (rqstp->rq_waking) {
 655                rqstp->rq_waking = 0;
 656                pool->sp_nwaking--;
 657                BUG_ON(pool->sp_nwaking < 0);
 658        }
 659        xprt = svc_xprt_dequeue(pool);
 660        if (xprt) {
 661                rqstp->rq_xprt = xprt;
 662                svc_xprt_get(xprt);
 663                rqstp->rq_reserved = serv->sv_max_mesg;
 664                atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved);
 665        } else {
 666                /* No data pending. Go to sleep */
 667                svc_thread_enqueue(pool, rqstp);
 668
 669                /*
 670                 * We have to be able to interrupt this wait
 671                 * to bring down the daemons ...
 672                 */
 673                set_current_state(TASK_INTERRUPTIBLE);
 674
 675                /*
 676                 * checking kthread_should_stop() here allows us to avoid
 677                 * locking and signalling when stopping kthreads that call
 678                 * svc_recv. If the thread has already been woken up, then
 679                 * we can exit here without sleeping. If not, then it
 680                 * it'll be woken up quickly during the schedule_timeout
 681                 */
 682                if (kthread_should_stop()) {
 683                        set_current_state(TASK_RUNNING);
 684                        spin_unlock_bh(&pool->sp_lock);
 685                        return -EINTR;
 686                }
 687
 688                add_wait_queue(&rqstp->rq_wait, &wait);
 689                spin_unlock_bh(&pool->sp_lock);
 690
 691                time_left = schedule_timeout(timeout);
 692
 693                try_to_freeze();
 694
 695                spin_lock_bh(&pool->sp_lock);
 696                remove_wait_queue(&rqstp->rq_wait, &wait);
 697                if (!time_left)
 698                        pool->sp_stats.threads_timedout++;
 699
 700                xprt = rqstp->rq_xprt;
 701                if (!xprt) {
 702                        svc_thread_dequeue(pool, rqstp);
 703                        spin_unlock_bh(&pool->sp_lock);
 704                        dprintk("svc: server %p, no data yet\n", rqstp);
 705                        if (signalled() || kthread_should_stop())
 706                                return -EINTR;
 707                        else
 708                                return -EAGAIN;
 709                }
 710        }
 711        spin_unlock_bh(&pool->sp_lock);
 712
 713        len = 0;
 714        if (test_bit(XPT_LISTENER, &xprt->xpt_flags)) {
 715                struct svc_xprt *newxpt;
 716                newxpt = xprt->xpt_ops->xpo_accept(xprt);
 717                if (newxpt) {
 718                        /*
 719                         * We know this module_get will succeed because the
 720                         * listener holds a reference too
 721                         */
 722                        __module_get(newxpt->xpt_class->xcl_owner);
 723                        svc_check_conn_limits(xprt->xpt_server);
 724                        spin_lock_bh(&serv->sv_lock);
 725                        set_bit(XPT_TEMP, &newxpt->xpt_flags);
 726                        list_add(&newxpt->xpt_list, &serv->sv_tempsocks);
 727                        serv->sv_tmpcnt++;
 728                        if (serv->sv_temptimer.function == NULL) {
 729                                /* setup timer to age temp transports */
 730                                setup_timer(&serv->sv_temptimer,
 731                                            svc_age_temp_xprts,
 732                                            (unsigned long)serv);
 733                                mod_timer(&serv->sv_temptimer,
 734                                          jiffies + svc_conn_age_period * HZ);
 735                        }
 736                        spin_unlock_bh(&serv->sv_lock);
 737                        svc_xprt_received(newxpt);
 738                }
 739                svc_xprt_received(xprt);
 740        } else if (!test_bit(XPT_CLOSE, &xprt->xpt_flags)) {
 741                dprintk("svc: server %p, pool %u, transport %p, inuse=%d\n",
 742                        rqstp, pool->sp_id, xprt,
 743                        atomic_read(&xprt->xpt_ref.refcount));
 744                rqstp->rq_deferred = svc_deferred_dequeue(xprt);
 745                if (rqstp->rq_deferred) {
 746                        svc_xprt_received(xprt);
 747                        len = svc_deferred_recv(rqstp);
 748                } else
 749                        len = xprt->xpt_ops->xpo_recvfrom(rqstp);
 750                dprintk("svc: got len=%d\n", len);
 751        }
 752
 753        if (test_bit(XPT_CLOSE, &xprt->xpt_flags)) {
 754                dprintk("svc_recv: found XPT_CLOSE\n");
 755                svc_delete_xprt(xprt);
 756        }
 757
 758        /* No data, incomplete (TCP) read, or accept() */
 759        if (len == 0 || len == -EAGAIN) {
 760                rqstp->rq_res.len = 0;
 761                svc_xprt_release(rqstp);
 762                return -EAGAIN;
 763        }
 764        clear_bit(XPT_OLD, &xprt->xpt_flags);
 765
 766        rqstp->rq_secure = svc_port_is_privileged(svc_addr(rqstp));
 767        rqstp->rq_chandle.defer = svc_defer;
 768
 769        if (serv->sv_stats)
 770                serv->sv_stats->netcnt++;
 771        return len;
 772}
 773EXPORT_SYMBOL_GPL(svc_recv);
 774
 775/*
 776 * Drop request
 777 */
 778void svc_drop(struct svc_rqst *rqstp)
 779{
 780        dprintk("svc: xprt %p dropped request\n", rqstp->rq_xprt);
 781        svc_xprt_release(rqstp);
 782}
 783EXPORT_SYMBOL_GPL(svc_drop);
 784
 785/*
 786 * Return reply to client.
 787 */
 788int svc_send(struct svc_rqst *rqstp)
 789{
 790        struct svc_xprt *xprt;
 791        int             len;
 792        struct xdr_buf  *xb;
 793
 794        xprt = rqstp->rq_xprt;
 795        if (!xprt)
 796                return -EFAULT;
 797
 798        /* release the receive skb before sending the reply */
 799        rqstp->rq_xprt->xpt_ops->xpo_release_rqst(rqstp);
 800
 801        /* calculate over-all length */
 802        xb = &rqstp->rq_res;
 803        xb->len = xb->head[0].iov_len +
 804                xb->page_len +
 805                xb->tail[0].iov_len;
 806
 807        /* Grab mutex to serialize outgoing data. */
 808        mutex_lock(&xprt->xpt_mutex);
 809        if (test_bit(XPT_DEAD, &xprt->xpt_flags))
 810                len = -ENOTCONN;
 811        else
 812                len = xprt->xpt_ops->xpo_sendto(rqstp);
 813        mutex_unlock(&xprt->xpt_mutex);
 814        rpc_wake_up(&xprt->xpt_bc_pending);
 815        svc_xprt_release(rqstp);
 816
 817        if (len == -ECONNREFUSED || len == -ENOTCONN || len == -EAGAIN)
 818                return 0;
 819        return len;
 820}
 821
 822/*
 823 * Timer function to close old temporary transports, using
 824 * a mark-and-sweep algorithm.
 825 */
 826static void svc_age_temp_xprts(unsigned long closure)
 827{
 828        struct svc_serv *serv = (struct svc_serv *)closure;
 829        struct svc_xprt *xprt;
 830        struct list_head *le, *next;
 831        LIST_HEAD(to_be_aged);
 832
 833        dprintk("svc_age_temp_xprts\n");
 834
 835        if (!spin_trylock_bh(&serv->sv_lock)) {
 836                /* busy, try again 1 sec later */
 837                dprintk("svc_age_temp_xprts: busy\n");
 838                mod_timer(&serv->sv_temptimer, jiffies + HZ);
 839                return;
 840        }
 841
 842        list_for_each_safe(le, next, &serv->sv_tempsocks) {
 843                xprt = list_entry(le, struct svc_xprt, xpt_list);
 844
 845                /* First time through, just mark it OLD. Second time
 846                 * through, close it. */
 847                if (!test_and_set_bit(XPT_OLD, &xprt->xpt_flags))
 848                        continue;
 849                if (atomic_read(&xprt->xpt_ref.refcount) > 1
 850                    || test_bit(XPT_BUSY, &xprt->xpt_flags))
 851                        continue;
 852                svc_xprt_get(xprt);
 853                list_move(le, &to_be_aged);
 854                set_bit(XPT_CLOSE, &xprt->xpt_flags);
 855                set_bit(XPT_DETACHED, &xprt->xpt_flags);
 856        }
 857        spin_unlock_bh(&serv->sv_lock);
 858
 859        while (!list_empty(&to_be_aged)) {
 860                le = to_be_aged.next;
 861                /* fiddling the xpt_list node is safe 'cos we're XPT_DETACHED */
 862                list_del_init(le);
 863                xprt = list_entry(le, struct svc_xprt, xpt_list);
 864
 865                dprintk("queuing xprt %p for closing\n", xprt);
 866
 867                /* a thread will dequeue and close it soon */
 868                svc_xprt_enqueue(xprt);
 869                svc_xprt_put(xprt);
 870        }
 871
 872        mod_timer(&serv->sv_temptimer, jiffies + svc_conn_age_period * HZ);
 873}
 874
 875/*
 876 * Remove a dead transport
 877 */
 878void svc_delete_xprt(struct svc_xprt *xprt)
 879{
 880        struct svc_serv *serv = xprt->xpt_server;
 881        struct svc_deferred_req *dr;
 882
 883        /* Only do this once */
 884        if (test_and_set_bit(XPT_DEAD, &xprt->xpt_flags))
 885                return;
 886
 887        dprintk("svc: svc_delete_xprt(%p)\n", xprt);
 888        xprt->xpt_ops->xpo_detach(xprt);
 889
 890        spin_lock_bh(&serv->sv_lock);
 891        if (!test_and_set_bit(XPT_DETACHED, &xprt->xpt_flags))
 892                list_del_init(&xprt->xpt_list);
 893        /*
 894         * We used to delete the transport from whichever list
 895         * it's sk_xprt.xpt_ready node was on, but we don't actually
 896         * need to.  This is because the only time we're called
 897         * while still attached to a queue, the queue itself
 898         * is about to be destroyed (in svc_destroy).
 899         */
 900        if (test_bit(XPT_TEMP, &xprt->xpt_flags))
 901                serv->sv_tmpcnt--;
 902
 903        for (dr = svc_deferred_dequeue(xprt); dr;
 904             dr = svc_deferred_dequeue(xprt)) {
 905                svc_xprt_put(xprt);
 906                kfree(dr);
 907        }
 908
 909        svc_xprt_put(xprt);
 910        spin_unlock_bh(&serv->sv_lock);
 911}
 912
 913void svc_close_xprt(struct svc_xprt *xprt)
 914{
 915        set_bit(XPT_CLOSE, &xprt->xpt_flags);
 916        if (test_and_set_bit(XPT_BUSY, &xprt->xpt_flags))
 917                /* someone else will have to effect the close */
 918                return;
 919
 920        svc_xprt_get(xprt);
 921        svc_delete_xprt(xprt);
 922        clear_bit(XPT_BUSY, &xprt->xpt_flags);
 923        svc_xprt_put(xprt);
 924}
 925EXPORT_SYMBOL_GPL(svc_close_xprt);
 926
 927void svc_close_all(struct list_head *xprt_list)
 928{
 929        struct svc_xprt *xprt;
 930        struct svc_xprt *tmp;
 931
 932        list_for_each_entry_safe(xprt, tmp, xprt_list, xpt_list) {
 933                set_bit(XPT_CLOSE, &xprt->xpt_flags);
 934                if (test_bit(XPT_BUSY, &xprt->xpt_flags)) {
 935                        /* Waiting to be processed, but no threads left,
 936                         * So just remove it from the waiting list
 937                         */
 938                        list_del_init(&xprt->xpt_ready);
 939                        clear_bit(XPT_BUSY, &xprt->xpt_flags);
 940                }
 941                svc_close_xprt(xprt);
 942        }
 943}
 944
 945/*
 946 * Handle defer and revisit of requests
 947 */
 948
 949static void svc_revisit(struct cache_deferred_req *dreq, int too_many)
 950{
 951        struct svc_deferred_req *dr =
 952                container_of(dreq, struct svc_deferred_req, handle);
 953        struct svc_xprt *xprt = dr->xprt;
 954
 955        spin_lock(&xprt->xpt_lock);
 956        set_bit(XPT_DEFERRED, &xprt->xpt_flags);
 957        if (too_many || test_bit(XPT_DEAD, &xprt->xpt_flags)) {
 958                spin_unlock(&xprt->xpt_lock);
 959                dprintk("revisit canceled\n");
 960                svc_xprt_put(xprt);
 961                kfree(dr);
 962                return;
 963        }
 964        dprintk("revisit queued\n");
 965        dr->xprt = NULL;
 966        list_add(&dr->handle.recent, &xprt->xpt_deferred);
 967        spin_unlock(&xprt->xpt_lock);
 968        svc_xprt_enqueue(xprt);
 969        svc_xprt_put(xprt);
 970}
 971
 972/*
 973 * Save the request off for later processing. The request buffer looks
 974 * like this:
 975 *
 976 * <xprt-header><rpc-header><rpc-pagelist><rpc-tail>
 977 *
 978 * This code can only handle requests that consist of an xprt-header
 979 * and rpc-header.
 980 */
 981static struct cache_deferred_req *svc_defer(struct cache_req *req)
 982{
 983        struct svc_rqst *rqstp = container_of(req, struct svc_rqst, rq_chandle);
 984        struct svc_deferred_req *dr;
 985
 986        if (rqstp->rq_arg.page_len || !rqstp->rq_usedeferral)
 987                return NULL; /* if more than a page, give up FIXME */
 988        if (rqstp->rq_deferred) {
 989                dr = rqstp->rq_deferred;
 990                rqstp->rq_deferred = NULL;
 991        } else {
 992                size_t skip;
 993                size_t size;
 994                /* FIXME maybe discard if size too large */
 995                size = sizeof(struct svc_deferred_req) + rqstp->rq_arg.len;
 996                dr = kmalloc(size, GFP_KERNEL);
 997                if (dr == NULL)
 998                        return NULL;
 999
1000                dr->handle.owner = rqstp->rq_server;
1001                dr->prot = rqstp->rq_prot;
1002                memcpy(&dr->addr, &rqstp->rq_addr, rqstp->rq_addrlen);
1003                dr->addrlen = rqstp->rq_addrlen;
1004                dr->daddr = rqstp->rq_daddr;
1005                dr->argslen = rqstp->rq_arg.len >> 2;
1006                dr->xprt_hlen = rqstp->rq_xprt_hlen;
1007
1008                /* back up head to the start of the buffer and copy */
1009                skip = rqstp->rq_arg.len - rqstp->rq_arg.head[0].iov_len;
1010                memcpy(dr->args, rqstp->rq_arg.head[0].iov_base - skip,
1011                       dr->argslen << 2);
1012        }
1013        svc_xprt_get(rqstp->rq_xprt);
1014        dr->xprt = rqstp->rq_xprt;
1015
1016        dr->handle.revisit = svc_revisit;
1017        return &dr->handle;
1018}
1019
1020/*
1021 * recv data from a deferred request into an active one
1022 */
1023static int svc_deferred_recv(struct svc_rqst *rqstp)
1024{
1025        struct svc_deferred_req *dr = rqstp->rq_deferred;
1026
1027        /* setup iov_base past transport header */
1028        rqstp->rq_arg.head[0].iov_base = dr->args + (dr->xprt_hlen>>2);
1029        /* The iov_len does not include the transport header bytes */
1030        rqstp->rq_arg.head[0].iov_len = (dr->argslen<<2) - dr->xprt_hlen;
1031        rqstp->rq_arg.page_len = 0;
1032        /* The rq_arg.len includes the transport header bytes */
1033        rqstp->rq_arg.len     = dr->argslen<<2;
1034        rqstp->rq_prot        = dr->prot;
1035        memcpy(&rqstp->rq_addr, &dr->addr, dr->addrlen);
1036        rqstp->rq_addrlen     = dr->addrlen;
1037        /* Save off transport header len in case we get deferred again */
1038        rqstp->rq_xprt_hlen   = dr->xprt_hlen;
1039        rqstp->rq_daddr       = dr->daddr;
1040        rqstp->rq_respages    = rqstp->rq_pages;
1041        return (dr->argslen<<2) - dr->xprt_hlen;
1042}
1043
1044
1045static struct svc_deferred_req *svc_deferred_dequeue(struct svc_xprt *xprt)
1046{
1047        struct svc_deferred_req *dr = NULL;
1048
1049        if (!test_bit(XPT_DEFERRED, &xprt->xpt_flags))
1050                return NULL;
1051        spin_lock(&xprt->xpt_lock);
1052        clear_bit(XPT_DEFERRED, &xprt->xpt_flags);
1053        if (!list_empty(&xprt->xpt_deferred)) {
1054                dr = list_entry(xprt->xpt_deferred.next,
1055                                struct svc_deferred_req,
1056                                handle.recent);
1057                list_del_init(&dr->handle.recent);
1058                set_bit(XPT_DEFERRED, &xprt->xpt_flags);
1059        }
1060        spin_unlock(&xprt->xpt_lock);
1061        return dr;
1062}
1063
1064/**
1065 * svc_find_xprt - find an RPC transport instance
1066 * @serv: pointer to svc_serv to search
1067 * @xcl_name: C string containing transport's class name
1068 * @af: Address family of transport's local address
1069 * @port: transport's IP port number
1070 *
1071 * Return the transport instance pointer for the endpoint accepting
1072 * connections/peer traffic from the specified transport class,
1073 * address family and port.
1074 *
1075 * Specifying 0 for the address family or port is effectively a
1076 * wild-card, and will result in matching the first transport in the
1077 * service's list that has a matching class name.
1078 */
1079struct svc_xprt *svc_find_xprt(struct svc_serv *serv, const char *xcl_name,
1080                               const sa_family_t af, const unsigned short port)
1081{
1082        struct svc_xprt *xprt;
1083        struct svc_xprt *found = NULL;
1084
1085        /* Sanity check the args */
1086        if (serv == NULL || xcl_name == NULL)
1087                return found;
1088
1089        spin_lock_bh(&serv->sv_lock);
1090        list_for_each_entry(xprt, &serv->sv_permsocks, xpt_list) {
1091                if (strcmp(xprt->xpt_class->xcl_name, xcl_name))
1092                        continue;
1093                if (af != AF_UNSPEC && af != xprt->xpt_local.ss_family)
1094                        continue;
1095                if (port != 0 && port != svc_xprt_local_port(xprt))
1096                        continue;
1097                found = xprt;
1098                svc_xprt_get(xprt);
1099                break;
1100        }
1101        spin_unlock_bh(&serv->sv_lock);
1102        return found;
1103}
1104EXPORT_SYMBOL_GPL(svc_find_xprt);
1105
1106static int svc_one_xprt_name(const struct svc_xprt *xprt,
1107                             char *pos, int remaining)
1108{
1109        int len;
1110
1111        len = snprintf(pos, remaining, "%s %u\n",
1112                        xprt->xpt_class->xcl_name,
1113                        svc_xprt_local_port(xprt));
1114        if (len >= remaining)
1115                return -ENAMETOOLONG;
1116        return len;
1117}
1118
1119/**
1120 * svc_xprt_names - format a buffer with a list of transport names
1121 * @serv: pointer to an RPC service
1122 * @buf: pointer to a buffer to be filled in
1123 * @buflen: length of buffer to be filled in
1124 *
1125 * Fills in @buf with a string containing a list of transport names,
1126 * each name terminated with '\n'.
1127 *
1128 * Returns positive length of the filled-in string on success; otherwise
1129 * a negative errno value is returned if an error occurs.
1130 */
1131int svc_xprt_names(struct svc_serv *serv, char *buf, const int buflen)
1132{
1133        struct svc_xprt *xprt;
1134        int len, totlen;
1135        char *pos;
1136
1137        /* Sanity check args */
1138        if (!serv)
1139                return 0;
1140
1141        spin_lock_bh(&serv->sv_lock);
1142
1143        pos = buf;
1144        totlen = 0;
1145        list_for_each_entry(xprt, &serv->sv_permsocks, xpt_list) {
1146                len = svc_one_xprt_name(xprt, pos, buflen - totlen);
1147                if (len < 0) {
1148                        *buf = '\0';
1149                        totlen = len;
1150                }
1151                if (len <= 0)
1152                        break;
1153
1154                pos += len;
1155                totlen += len;
1156        }
1157
1158        spin_unlock_bh(&serv->sv_lock);
1159        return totlen;
1160}
1161EXPORT_SYMBOL_GPL(svc_xprt_names);
1162
1163
1164/*----------------------------------------------------------------------------*/
1165
1166static void *svc_pool_stats_start(struct seq_file *m, loff_t *pos)
1167{
1168        unsigned int pidx = (unsigned int)*pos;
1169        struct svc_serv *serv = m->private;
1170
1171        dprintk("svc_pool_stats_start, *pidx=%u\n", pidx);
1172
1173        if (!pidx)
1174                return SEQ_START_TOKEN;
1175        return (pidx > serv->sv_nrpools ? NULL : &serv->sv_pools[pidx-1]);
1176}
1177
1178static void *svc_pool_stats_next(struct seq_file *m, void *p, loff_t *pos)
1179{
1180        struct svc_pool *pool = p;
1181        struct svc_serv *serv = m->private;
1182
1183        dprintk("svc_pool_stats_next, *pos=%llu\n", *pos);
1184
1185        if (p == SEQ_START_TOKEN) {
1186                pool = &serv->sv_pools[0];
1187        } else {
1188                unsigned int pidx = (pool - &serv->sv_pools[0]);
1189                if (pidx < serv->sv_nrpools-1)
1190                        pool = &serv->sv_pools[pidx+1];
1191                else
1192                        pool = NULL;
1193        }
1194        ++*pos;
1195        return pool;
1196}
1197
1198static void svc_pool_stats_stop(struct seq_file *m, void *p)
1199{
1200}
1201
1202static int svc_pool_stats_show(struct seq_file *m, void *p)
1203{
1204        struct svc_pool *pool = p;
1205
1206        if (p == SEQ_START_TOKEN) {
1207                seq_puts(m, "# pool packets-arrived sockets-enqueued threads-woken overloads-avoided threads-timedout\n");
1208                return 0;
1209        }
1210
1211        seq_printf(m, "%u %lu %lu %lu %lu %lu\n",
1212                pool->sp_id,
1213                pool->sp_stats.packets,
1214                pool->sp_stats.sockets_queued,
1215                pool->sp_stats.threads_woken,
1216                pool->sp_stats.overloads_avoided,
1217                pool->sp_stats.threads_timedout);
1218
1219        return 0;
1220}
1221
1222static const struct seq_operations svc_pool_stats_seq_ops = {
1223        .start  = svc_pool_stats_start,
1224        .next   = svc_pool_stats_next,
1225        .stop   = svc_pool_stats_stop,
1226        .show   = svc_pool_stats_show,
1227};
1228
1229int svc_pool_stats_open(struct svc_serv *serv, struct file *file)
1230{
1231        int err;
1232
1233        err = seq_open(file, &svc_pool_stats_seq_ops);
1234        if (!err)
1235                ((struct seq_file *) file->private_data)->private = serv;
1236        return err;
1237}
1238EXPORT_SYMBOL(svc_pool_stats_open);
1239
1240/*----------------------------------------------------------------------------*/
1241