linux/fs/nfsd/nfscache.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Request reply cache. This is currently a global cache, but this may
   4 * change in the future and be a per-client cache.
   5 *
   6 * This code is heavily inspired by the 44BSD implementation, although
   7 * it does things a bit differently.
   8 *
   9 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
  10 */
  11
  12#include <linux/sunrpc/svc_xprt.h>
  13#include <linux/slab.h>
  14#include <linux/vmalloc.h>
  15#include <linux/sunrpc/addr.h>
  16#include <linux/highmem.h>
  17#include <linux/log2.h>
  18#include <linux/hash.h>
  19#include <net/checksum.h>
  20
  21#include "nfsd.h"
  22#include "cache.h"
  23#include "trace.h"
  24
  25/*
  26 * We use this value to determine the number of hash buckets from the max
  27 * cache size, the idea being that when the cache is at its maximum number
  28 * of entries, then this should be the average number of entries per bucket.
  29 */
  30#define TARGET_BUCKET_SIZE      64
  31
  32struct nfsd_drc_bucket {
  33        struct rb_root rb_head;
  34        struct list_head lru_head;
  35        spinlock_t cache_lock;
  36};
  37
  38static struct kmem_cache        *drc_slab;
  39
  40static int      nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *vec);
  41static unsigned long nfsd_reply_cache_count(struct shrinker *shrink,
  42                                            struct shrink_control *sc);
  43static unsigned long nfsd_reply_cache_scan(struct shrinker *shrink,
  44                                           struct shrink_control *sc);
  45
  46/*
  47 * Put a cap on the size of the DRC based on the amount of available
  48 * low memory in the machine.
  49 *
  50 *  64MB:    8192
  51 * 128MB:   11585
  52 * 256MB:   16384
  53 * 512MB:   23170
  54 *   1GB:   32768
  55 *   2GB:   46340
  56 *   4GB:   65536
  57 *   8GB:   92681
  58 *  16GB:  131072
  59 *
  60 * ...with a hard cap of 256k entries. In the worst case, each entry will be
  61 * ~1k, so the above numbers should give a rough max of the amount of memory
  62 * used in k.
  63 *
  64 * XXX: these limits are per-container, so memory used will increase
  65 * linearly with number of containers.  Maybe that's OK.
  66 */
  67static unsigned int
  68nfsd_cache_size_limit(void)
  69{
  70        unsigned int limit;
  71        unsigned long low_pages = totalram_pages() - totalhigh_pages();
  72
  73        limit = (16 * int_sqrt(low_pages)) << (PAGE_SHIFT-10);
  74        return min_t(unsigned int, limit, 256*1024);
  75}
  76
  77/*
  78 * Compute the number of hash buckets we need. Divide the max cachesize by
  79 * the "target" max bucket size, and round up to next power of two.
  80 */
  81static unsigned int
  82nfsd_hashsize(unsigned int limit)
  83{
  84        return roundup_pow_of_two(limit / TARGET_BUCKET_SIZE);
  85}
  86
  87static u32
  88nfsd_cache_hash(__be32 xid, struct nfsd_net *nn)
  89{
  90        return hash_32(be32_to_cpu(xid), nn->maskbits);
  91}
  92
  93static struct svc_cacherep *
  94nfsd_reply_cache_alloc(struct svc_rqst *rqstp, __wsum csum,
  95                        struct nfsd_net *nn)
  96{
  97        struct svc_cacherep     *rp;
  98
  99        rp = kmem_cache_alloc(drc_slab, GFP_KERNEL);
 100        if (rp) {
 101                rp->c_state = RC_UNUSED;
 102                rp->c_type = RC_NOCACHE;
 103                RB_CLEAR_NODE(&rp->c_node);
 104                INIT_LIST_HEAD(&rp->c_lru);
 105
 106                memset(&rp->c_key, 0, sizeof(rp->c_key));
 107                rp->c_key.k_xid = rqstp->rq_xid;
 108                rp->c_key.k_proc = rqstp->rq_proc;
 109                rpc_copy_addr((struct sockaddr *)&rp->c_key.k_addr, svc_addr(rqstp));
 110                rpc_set_port((struct sockaddr *)&rp->c_key.k_addr, rpc_get_port(svc_addr(rqstp)));
 111                rp->c_key.k_prot = rqstp->rq_prot;
 112                rp->c_key.k_vers = rqstp->rq_vers;
 113                rp->c_key.k_len = rqstp->rq_arg.len;
 114                rp->c_key.k_csum = csum;
 115        }
 116        return rp;
 117}
 118
 119static void
 120nfsd_reply_cache_free_locked(struct nfsd_drc_bucket *b, struct svc_cacherep *rp,
 121                                struct nfsd_net *nn)
 122{
 123        if (rp->c_type == RC_REPLBUFF && rp->c_replvec.iov_base) {
 124                nfsd_stats_drc_mem_usage_sub(nn, rp->c_replvec.iov_len);
 125                kfree(rp->c_replvec.iov_base);
 126        }
 127        if (rp->c_state != RC_UNUSED) {
 128                rb_erase(&rp->c_node, &b->rb_head);
 129                list_del(&rp->c_lru);
 130                atomic_dec(&nn->num_drc_entries);
 131                nfsd_stats_drc_mem_usage_sub(nn, sizeof(*rp));
 132        }
 133        kmem_cache_free(drc_slab, rp);
 134}
 135
 136static void
 137nfsd_reply_cache_free(struct nfsd_drc_bucket *b, struct svc_cacherep *rp,
 138                        struct nfsd_net *nn)
 139{
 140        spin_lock(&b->cache_lock);
 141        nfsd_reply_cache_free_locked(b, rp, nn);
 142        spin_unlock(&b->cache_lock);
 143}
 144
 145int nfsd_drc_slab_create(void)
 146{
 147        drc_slab = kmem_cache_create("nfsd_drc",
 148                                sizeof(struct svc_cacherep), 0, 0, NULL);
 149        return drc_slab ? 0: -ENOMEM;
 150}
 151
 152void nfsd_drc_slab_free(void)
 153{
 154        kmem_cache_destroy(drc_slab);
 155}
 156
 157static int nfsd_reply_cache_stats_init(struct nfsd_net *nn)
 158{
 159        return nfsd_percpu_counters_init(nn->counter, NFSD_NET_COUNTERS_NUM);
 160}
 161
 162static void nfsd_reply_cache_stats_destroy(struct nfsd_net *nn)
 163{
 164        nfsd_percpu_counters_destroy(nn->counter, NFSD_NET_COUNTERS_NUM);
 165}
 166
 167int nfsd_reply_cache_init(struct nfsd_net *nn)
 168{
 169        unsigned int hashsize;
 170        unsigned int i;
 171        int status = 0;
 172
 173        nn->max_drc_entries = nfsd_cache_size_limit();
 174        atomic_set(&nn->num_drc_entries, 0);
 175        hashsize = nfsd_hashsize(nn->max_drc_entries);
 176        nn->maskbits = ilog2(hashsize);
 177
 178        status = nfsd_reply_cache_stats_init(nn);
 179        if (status)
 180                goto out_nomem;
 181
 182        nn->nfsd_reply_cache_shrinker.scan_objects = nfsd_reply_cache_scan;
 183        nn->nfsd_reply_cache_shrinker.count_objects = nfsd_reply_cache_count;
 184        nn->nfsd_reply_cache_shrinker.seeks = 1;
 185        status = register_shrinker(&nn->nfsd_reply_cache_shrinker);
 186        if (status)
 187                goto out_stats_destroy;
 188
 189        nn->drc_hashtbl = kvzalloc(array_size(hashsize,
 190                                sizeof(*nn->drc_hashtbl)), GFP_KERNEL);
 191        if (!nn->drc_hashtbl)
 192                goto out_shrinker;
 193
 194        for (i = 0; i < hashsize; i++) {
 195                INIT_LIST_HEAD(&nn->drc_hashtbl[i].lru_head);
 196                spin_lock_init(&nn->drc_hashtbl[i].cache_lock);
 197        }
 198        nn->drc_hashsize = hashsize;
 199
 200        return 0;
 201out_shrinker:
 202        unregister_shrinker(&nn->nfsd_reply_cache_shrinker);
 203out_stats_destroy:
 204        nfsd_reply_cache_stats_destroy(nn);
 205out_nomem:
 206        printk(KERN_ERR "nfsd: failed to allocate reply cache\n");
 207        return -ENOMEM;
 208}
 209
 210void nfsd_reply_cache_shutdown(struct nfsd_net *nn)
 211{
 212        struct svc_cacherep     *rp;
 213        unsigned int i;
 214
 215        nfsd_reply_cache_stats_destroy(nn);
 216        unregister_shrinker(&nn->nfsd_reply_cache_shrinker);
 217
 218        for (i = 0; i < nn->drc_hashsize; i++) {
 219                struct list_head *head = &nn->drc_hashtbl[i].lru_head;
 220                while (!list_empty(head)) {
 221                        rp = list_first_entry(head, struct svc_cacherep, c_lru);
 222                        nfsd_reply_cache_free_locked(&nn->drc_hashtbl[i],
 223                                                                        rp, nn);
 224                }
 225        }
 226
 227        kvfree(nn->drc_hashtbl);
 228        nn->drc_hashtbl = NULL;
 229        nn->drc_hashsize = 0;
 230
 231}
 232
 233/*
 234 * Move cache entry to end of LRU list, and queue the cleaner to run if it's
 235 * not already scheduled.
 236 */
 237static void
 238lru_put_end(struct nfsd_drc_bucket *b, struct svc_cacherep *rp)
 239{
 240        rp->c_timestamp = jiffies;
 241        list_move_tail(&rp->c_lru, &b->lru_head);
 242}
 243
 244static long
 245prune_bucket(struct nfsd_drc_bucket *b, struct nfsd_net *nn)
 246{
 247        struct svc_cacherep *rp, *tmp;
 248        long freed = 0;
 249
 250        list_for_each_entry_safe(rp, tmp, &b->lru_head, c_lru) {
 251                /*
 252                 * Don't free entries attached to calls that are still
 253                 * in-progress, but do keep scanning the list.
 254                 */
 255                if (rp->c_state == RC_INPROG)
 256                        continue;
 257                if (atomic_read(&nn->num_drc_entries) <= nn->max_drc_entries &&
 258                    time_before(jiffies, rp->c_timestamp + RC_EXPIRE))
 259                        break;
 260                nfsd_reply_cache_free_locked(b, rp, nn);
 261                freed++;
 262        }
 263        return freed;
 264}
 265
 266/*
 267 * Walk the LRU list and prune off entries that are older than RC_EXPIRE.
 268 * Also prune the oldest ones when the total exceeds the max number of entries.
 269 */
 270static long
 271prune_cache_entries(struct nfsd_net *nn)
 272{
 273        unsigned int i;
 274        long freed = 0;
 275
 276        for (i = 0; i < nn->drc_hashsize; i++) {
 277                struct nfsd_drc_bucket *b = &nn->drc_hashtbl[i];
 278
 279                if (list_empty(&b->lru_head))
 280                        continue;
 281                spin_lock(&b->cache_lock);
 282                freed += prune_bucket(b, nn);
 283                spin_unlock(&b->cache_lock);
 284        }
 285        return freed;
 286}
 287
 288static unsigned long
 289nfsd_reply_cache_count(struct shrinker *shrink, struct shrink_control *sc)
 290{
 291        struct nfsd_net *nn = container_of(shrink,
 292                                struct nfsd_net, nfsd_reply_cache_shrinker);
 293
 294        return atomic_read(&nn->num_drc_entries);
 295}
 296
 297static unsigned long
 298nfsd_reply_cache_scan(struct shrinker *shrink, struct shrink_control *sc)
 299{
 300        struct nfsd_net *nn = container_of(shrink,
 301                                struct nfsd_net, nfsd_reply_cache_shrinker);
 302
 303        return prune_cache_entries(nn);
 304}
 305/*
 306 * Walk an xdr_buf and get a CRC for at most the first RC_CSUMLEN bytes
 307 */
 308static __wsum
 309nfsd_cache_csum(struct svc_rqst *rqstp)
 310{
 311        int idx;
 312        unsigned int base;
 313        __wsum csum;
 314        struct xdr_buf *buf = &rqstp->rq_arg;
 315        const unsigned char *p = buf->head[0].iov_base;
 316        size_t csum_len = min_t(size_t, buf->head[0].iov_len + buf->page_len,
 317                                RC_CSUMLEN);
 318        size_t len = min(buf->head[0].iov_len, csum_len);
 319
 320        /* rq_arg.head first */
 321        csum = csum_partial(p, len, 0);
 322        csum_len -= len;
 323
 324        /* Continue into page array */
 325        idx = buf->page_base / PAGE_SIZE;
 326        base = buf->page_base & ~PAGE_MASK;
 327        while (csum_len) {
 328                p = page_address(buf->pages[idx]) + base;
 329                len = min_t(size_t, PAGE_SIZE - base, csum_len);
 330                csum = csum_partial(p, len, csum);
 331                csum_len -= len;
 332                base = 0;
 333                ++idx;
 334        }
 335        return csum;
 336}
 337
 338static int
 339nfsd_cache_key_cmp(const struct svc_cacherep *key,
 340                        const struct svc_cacherep *rp, struct nfsd_net *nn)
 341{
 342        if (key->c_key.k_xid == rp->c_key.k_xid &&
 343            key->c_key.k_csum != rp->c_key.k_csum) {
 344                nfsd_stats_payload_misses_inc(nn);
 345                trace_nfsd_drc_mismatch(nn, key, rp);
 346        }
 347
 348        return memcmp(&key->c_key, &rp->c_key, sizeof(key->c_key));
 349}
 350
 351/*
 352 * Search the request hash for an entry that matches the given rqstp.
 353 * Must be called with cache_lock held. Returns the found entry or
 354 * inserts an empty key on failure.
 355 */
 356static struct svc_cacherep *
 357nfsd_cache_insert(struct nfsd_drc_bucket *b, struct svc_cacherep *key,
 358                        struct nfsd_net *nn)
 359{
 360        struct svc_cacherep     *rp, *ret = key;
 361        struct rb_node          **p = &b->rb_head.rb_node,
 362                                *parent = NULL;
 363        unsigned int            entries = 0;
 364        int cmp;
 365
 366        while (*p != NULL) {
 367                ++entries;
 368                parent = *p;
 369                rp = rb_entry(parent, struct svc_cacherep, c_node);
 370
 371                cmp = nfsd_cache_key_cmp(key, rp, nn);
 372                if (cmp < 0)
 373                        p = &parent->rb_left;
 374                else if (cmp > 0)
 375                        p = &parent->rb_right;
 376                else {
 377                        ret = rp;
 378                        goto out;
 379                }
 380        }
 381        rb_link_node(&key->c_node, parent, p);
 382        rb_insert_color(&key->c_node, &b->rb_head);
 383out:
 384        /* tally hash chain length stats */
 385        if (entries > nn->longest_chain) {
 386                nn->longest_chain = entries;
 387                nn->longest_chain_cachesize = atomic_read(&nn->num_drc_entries);
 388        } else if (entries == nn->longest_chain) {
 389                /* prefer to keep the smallest cachesize possible here */
 390                nn->longest_chain_cachesize = min_t(unsigned int,
 391                                nn->longest_chain_cachesize,
 392                                atomic_read(&nn->num_drc_entries));
 393        }
 394
 395        lru_put_end(b, ret);
 396        return ret;
 397}
 398
 399/**
 400 * nfsd_cache_lookup - Find an entry in the duplicate reply cache
 401 * @rqstp: Incoming Call to find
 402 *
 403 * Try to find an entry matching the current call in the cache. When none
 404 * is found, we try to grab the oldest expired entry off the LRU list. If
 405 * a suitable one isn't there, then drop the cache_lock and allocate a
 406 * new one, then search again in case one got inserted while this thread
 407 * didn't hold the lock.
 408 *
 409 * Return values:
 410 *   %RC_DOIT: Process the request normally
 411 *   %RC_REPLY: Reply from cache
 412 *   %RC_DROPIT: Do not process the request further
 413 */
 414int nfsd_cache_lookup(struct svc_rqst *rqstp)
 415{
 416        struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
 417        struct svc_cacherep     *rp, *found;
 418        __be32                  xid = rqstp->rq_xid;
 419        __wsum                  csum;
 420        u32 hash = nfsd_cache_hash(xid, nn);
 421        struct nfsd_drc_bucket *b = &nn->drc_hashtbl[hash];
 422        int type = rqstp->rq_cachetype;
 423        int rtn = RC_DOIT;
 424
 425        rqstp->rq_cacherep = NULL;
 426        if (type == RC_NOCACHE) {
 427                nfsd_stats_rc_nocache_inc();
 428                goto out;
 429        }
 430
 431        csum = nfsd_cache_csum(rqstp);
 432
 433        /*
 434         * Since the common case is a cache miss followed by an insert,
 435         * preallocate an entry.
 436         */
 437        rp = nfsd_reply_cache_alloc(rqstp, csum, nn);
 438        if (!rp)
 439                goto out;
 440
 441        spin_lock(&b->cache_lock);
 442        found = nfsd_cache_insert(b, rp, nn);
 443        if (found != rp) {
 444                nfsd_reply_cache_free_locked(NULL, rp, nn);
 445                rp = found;
 446                goto found_entry;
 447        }
 448
 449        nfsd_stats_rc_misses_inc();
 450        rqstp->rq_cacherep = rp;
 451        rp->c_state = RC_INPROG;
 452
 453        atomic_inc(&nn->num_drc_entries);
 454        nfsd_stats_drc_mem_usage_add(nn, sizeof(*rp));
 455
 456        /* go ahead and prune the cache */
 457        prune_bucket(b, nn);
 458
 459out_unlock:
 460        spin_unlock(&b->cache_lock);
 461out:
 462        return rtn;
 463
 464found_entry:
 465        /* We found a matching entry which is either in progress or done. */
 466        nfsd_stats_rc_hits_inc();
 467        rtn = RC_DROPIT;
 468
 469        /* Request being processed */
 470        if (rp->c_state == RC_INPROG)
 471                goto out_trace;
 472
 473        /* From the hall of fame of impractical attacks:
 474         * Is this a user who tries to snoop on the cache? */
 475        rtn = RC_DOIT;
 476        if (!test_bit(RQ_SECURE, &rqstp->rq_flags) && rp->c_secure)
 477                goto out_trace;
 478
 479        /* Compose RPC reply header */
 480        switch (rp->c_type) {
 481        case RC_NOCACHE:
 482                break;
 483        case RC_REPLSTAT:
 484                svc_putu32(&rqstp->rq_res.head[0], rp->c_replstat);
 485                rtn = RC_REPLY;
 486                break;
 487        case RC_REPLBUFF:
 488                if (!nfsd_cache_append(rqstp, &rp->c_replvec))
 489                        goto out_unlock; /* should not happen */
 490                rtn = RC_REPLY;
 491                break;
 492        default:
 493                WARN_ONCE(1, "nfsd: bad repcache type %d\n", rp->c_type);
 494        }
 495
 496out_trace:
 497        trace_nfsd_drc_found(nn, rqstp, rtn);
 498        goto out_unlock;
 499}
 500
 501/**
 502 * nfsd_cache_update - Update an entry in the duplicate reply cache.
 503 * @rqstp: svc_rqst with a finished Reply
 504 * @cachetype: which cache to update
 505 * @statp: Reply's status code
 506 *
 507 * This is called from nfsd_dispatch when the procedure has been
 508 * executed and the complete reply is in rqstp->rq_res.
 509 *
 510 * We're copying around data here rather than swapping buffers because
 511 * the toplevel loop requires max-sized buffers, which would be a waste
 512 * of memory for a cache with a max reply size of 100 bytes (diropokres).
 513 *
 514 * If we should start to use different types of cache entries tailored
 515 * specifically for attrstat and fh's, we may save even more space.
 516 *
 517 * Also note that a cachetype of RC_NOCACHE can legally be passed when
 518 * nfsd failed to encode a reply that otherwise would have been cached.
 519 * In this case, nfsd_cache_update is called with statp == NULL.
 520 */
 521void nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
 522{
 523        struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
 524        struct svc_cacherep *rp = rqstp->rq_cacherep;
 525        struct kvec     *resv = &rqstp->rq_res.head[0], *cachv;
 526        u32             hash;
 527        struct nfsd_drc_bucket *b;
 528        int             len;
 529        size_t          bufsize = 0;
 530
 531        if (!rp)
 532                return;
 533
 534        hash = nfsd_cache_hash(rp->c_key.k_xid, nn);
 535        b = &nn->drc_hashtbl[hash];
 536
 537        len = resv->iov_len - ((char*)statp - (char*)resv->iov_base);
 538        len >>= 2;
 539
 540        /* Don't cache excessive amounts of data and XDR failures */
 541        if (!statp || len > (256 >> 2)) {
 542                nfsd_reply_cache_free(b, rp, nn);
 543                return;
 544        }
 545
 546        switch (cachetype) {
 547        case RC_REPLSTAT:
 548                if (len != 1)
 549                        printk("nfsd: RC_REPLSTAT/reply len %d!\n",len);
 550                rp->c_replstat = *statp;
 551                break;
 552        case RC_REPLBUFF:
 553                cachv = &rp->c_replvec;
 554                bufsize = len << 2;
 555                cachv->iov_base = kmalloc(bufsize, GFP_KERNEL);
 556                if (!cachv->iov_base) {
 557                        nfsd_reply_cache_free(b, rp, nn);
 558                        return;
 559                }
 560                cachv->iov_len = bufsize;
 561                memcpy(cachv->iov_base, statp, bufsize);
 562                break;
 563        case RC_NOCACHE:
 564                nfsd_reply_cache_free(b, rp, nn);
 565                return;
 566        }
 567        spin_lock(&b->cache_lock);
 568        nfsd_stats_drc_mem_usage_add(nn, bufsize);
 569        lru_put_end(b, rp);
 570        rp->c_secure = test_bit(RQ_SECURE, &rqstp->rq_flags);
 571        rp->c_type = cachetype;
 572        rp->c_state = RC_DONE;
 573        spin_unlock(&b->cache_lock);
 574        return;
 575}
 576
 577/*
 578 * Copy cached reply to current reply buffer. Should always fit.
 579 * FIXME as reply is in a page, we should just attach the page, and
 580 * keep a refcount....
 581 */
 582static int
 583nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *data)
 584{
 585        struct kvec     *vec = &rqstp->rq_res.head[0];
 586
 587        if (vec->iov_len + data->iov_len > PAGE_SIZE) {
 588                printk(KERN_WARNING "nfsd: cached reply too large (%zd).\n",
 589                                data->iov_len);
 590                return 0;
 591        }
 592        memcpy((char*)vec->iov_base + vec->iov_len, data->iov_base, data->iov_len);
 593        vec->iov_len += data->iov_len;
 594        return 1;
 595}
 596
 597/*
 598 * Note that fields may be added, removed or reordered in the future. Programs
 599 * scraping this file for info should test the labels to ensure they're
 600 * getting the correct field.
 601 */
 602static int nfsd_reply_cache_stats_show(struct seq_file *m, void *v)
 603{
 604        struct nfsd_net *nn = m->private;
 605
 606        seq_printf(m, "max entries:           %u\n", nn->max_drc_entries);
 607        seq_printf(m, "num entries:           %u\n",
 608                   atomic_read(&nn->num_drc_entries));
 609        seq_printf(m, "hash buckets:          %u\n", 1 << nn->maskbits);
 610        seq_printf(m, "mem usage:             %lld\n",
 611                   percpu_counter_sum_positive(&nn->counter[NFSD_NET_DRC_MEM_USAGE]));
 612        seq_printf(m, "cache hits:            %lld\n",
 613                   percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_RC_HITS]));
 614        seq_printf(m, "cache misses:          %lld\n",
 615                   percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_RC_MISSES]));
 616        seq_printf(m, "not cached:            %lld\n",
 617                   percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_RC_NOCACHE]));
 618        seq_printf(m, "payload misses:        %lld\n",
 619                   percpu_counter_sum_positive(&nn->counter[NFSD_NET_PAYLOAD_MISSES]));
 620        seq_printf(m, "longest chain len:     %u\n", nn->longest_chain);
 621        seq_printf(m, "cachesize at longest:  %u\n", nn->longest_chain_cachesize);
 622        return 0;
 623}
 624
 625int nfsd_reply_cache_stats_open(struct inode *inode, struct file *file)
 626{
 627        struct nfsd_net *nn = net_generic(file_inode(file)->i_sb->s_fs_info,
 628                                                                nfsd_net_id);
 629
 630        return single_open(file, nfsd_reply_cache_stats_show, nn);
 631}
 632