linux/net/sunrpc/cache.c
<<
>>
Prefs
   1/*
   2 * net/sunrpc/cache.c
   3 *
   4 * Generic code for various authentication-related caches
   5 * used by sunrpc clients and servers.
   6 *
   7 * Copyright (C) 2002 Neil Brown <neilb@cse.unsw.edu.au>
   8 *
   9 * Released under terms in GPL version 2.  See COPYING.
  10 *
  11 */
  12
  13#include <linux/types.h>
  14#include <linux/fs.h>
  15#include <linux/file.h>
  16#include <linux/slab.h>
  17#include <linux/signal.h>
  18#include <linux/sched.h>
  19#include <linux/kmod.h>
  20#include <linux/list.h>
  21#include <linux/module.h>
  22#include <linux/ctype.h>
  23#include <linux/string_helpers.h>
  24#include <linux/uaccess.h>
  25#include <linux/poll.h>
  26#include <linux/seq_file.h>
  27#include <linux/proc_fs.h>
  28#include <linux/net.h>
  29#include <linux/workqueue.h>
  30#include <linux/mutex.h>
  31#include <linux/pagemap.h>
  32#include <asm/ioctls.h>
  33#include <linux/sunrpc/types.h>
  34#include <linux/sunrpc/cache.h>
  35#include <linux/sunrpc/stats.h>
  36#include <linux/sunrpc/rpc_pipe_fs.h>
  37#include "netns.h"
  38
  39#define  RPCDBG_FACILITY RPCDBG_CACHE
  40
  41static bool cache_defer_req(struct cache_req *req, struct cache_head *item);
  42static void cache_revisit_request(struct cache_head *item);
  43
  44static void cache_init(struct cache_head *h, struct cache_detail *detail)
  45{
  46        time_t now = seconds_since_boot();
  47        INIT_HLIST_NODE(&h->cache_list);
  48        h->flags = 0;
  49        kref_init(&h->ref);
  50        h->expiry_time = now + CACHE_NEW_EXPIRY;
  51        if (now <= detail->flush_time)
  52                /* ensure it isn't already expired */
  53                now = detail->flush_time + 1;
  54        h->last_refresh = now;
  55}
  56
  57struct cache_head *sunrpc_cache_lookup(struct cache_detail *detail,
  58                                       struct cache_head *key, int hash)
  59{
  60        struct cache_head *new = NULL, *freeme = NULL, *tmp = NULL;
  61        struct hlist_head *head;
  62
  63        head = &detail->hash_table[hash];
  64
  65        read_lock(&detail->hash_lock);
  66
  67        hlist_for_each_entry(tmp, head, cache_list) {
  68                if (detail->match(tmp, key)) {
  69                        if (cache_is_expired(detail, tmp))
  70                                /* This entry is expired, we will discard it. */
  71                                break;
  72                        cache_get(tmp);
  73                        read_unlock(&detail->hash_lock);
  74                        return tmp;
  75                }
  76        }
  77        read_unlock(&detail->hash_lock);
  78        /* Didn't find anything, insert an empty entry */
  79
  80        new = detail->alloc();
  81        if (!new)
  82                return NULL;
  83        /* must fully initialise 'new', else
  84         * we might get lose if we need to
  85         * cache_put it soon.
  86         */
  87        cache_init(new, detail);
  88        detail->init(new, key);
  89
  90        write_lock(&detail->hash_lock);
  91
  92        /* check if entry appeared while we slept */
  93        hlist_for_each_entry(tmp, head, cache_list) {
  94                if (detail->match(tmp, key)) {
  95                        if (cache_is_expired(detail, tmp)) {
  96                                hlist_del_init(&tmp->cache_list);
  97                                detail->entries --;
  98                                freeme = tmp;
  99                                break;
 100                        }
 101                        cache_get(tmp);
 102                        write_unlock(&detail->hash_lock);
 103                        cache_put(new, detail);
 104                        return tmp;
 105                }
 106        }
 107
 108        hlist_add_head(&new->cache_list, head);
 109        detail->entries++;
 110        cache_get(new);
 111        write_unlock(&detail->hash_lock);
 112
 113        if (freeme)
 114                cache_put(freeme, detail);
 115        return new;
 116}
 117EXPORT_SYMBOL_GPL(sunrpc_cache_lookup);
 118
 119
 120static void cache_dequeue(struct cache_detail *detail, struct cache_head *ch);
 121
 122static void cache_fresh_locked(struct cache_head *head, time_t expiry,
 123                               struct cache_detail *detail)
 124{
 125        time_t now = seconds_since_boot();
 126        if (now <= detail->flush_time)
 127                /* ensure it isn't immediately treated as expired */
 128                now = detail->flush_time + 1;
 129        head->expiry_time = expiry;
 130        head->last_refresh = now;
 131        smp_wmb(); /* paired with smp_rmb() in cache_is_valid() */
 132        set_bit(CACHE_VALID, &head->flags);
 133}
 134
 135static void cache_fresh_unlocked(struct cache_head *head,
 136                                 struct cache_detail *detail)
 137{
 138        if (test_and_clear_bit(CACHE_PENDING, &head->flags)) {
 139                cache_revisit_request(head);
 140                cache_dequeue(detail, head);
 141        }
 142}
 143
 144struct cache_head *sunrpc_cache_update(struct cache_detail *detail,
 145                                       struct cache_head *new, struct cache_head *old, int hash)
 146{
 147        /* The 'old' entry is to be replaced by 'new'.
 148         * If 'old' is not VALID, we update it directly,
 149         * otherwise we need to replace it
 150         */
 151        struct cache_head *tmp;
 152
 153        if (!test_bit(CACHE_VALID, &old->flags)) {
 154                write_lock(&detail->hash_lock);
 155                if (!test_bit(CACHE_VALID, &old->flags)) {
 156                        if (test_bit(CACHE_NEGATIVE, &new->flags))
 157                                set_bit(CACHE_NEGATIVE, &old->flags);
 158                        else
 159                                detail->update(old, new);
 160                        cache_fresh_locked(old, new->expiry_time, detail);
 161                        write_unlock(&detail->hash_lock);
 162                        cache_fresh_unlocked(old, detail);
 163                        return old;
 164                }
 165                write_unlock(&detail->hash_lock);
 166        }
 167        /* We need to insert a new entry */
 168        tmp = detail->alloc();
 169        if (!tmp) {
 170                cache_put(old, detail);
 171                return NULL;
 172        }
 173        cache_init(tmp, detail);
 174        detail->init(tmp, old);
 175
 176        write_lock(&detail->hash_lock);
 177        if (test_bit(CACHE_NEGATIVE, &new->flags))
 178                set_bit(CACHE_NEGATIVE, &tmp->flags);
 179        else
 180                detail->update(tmp, new);
 181        hlist_add_head(&tmp->cache_list, &detail->hash_table[hash]);
 182        detail->entries++;
 183        cache_get(tmp);
 184        cache_fresh_locked(tmp, new->expiry_time, detail);
 185        cache_fresh_locked(old, 0, detail);
 186        write_unlock(&detail->hash_lock);
 187        cache_fresh_unlocked(tmp, detail);
 188        cache_fresh_unlocked(old, detail);
 189        cache_put(old, detail);
 190        return tmp;
 191}
 192EXPORT_SYMBOL_GPL(sunrpc_cache_update);
 193
 194static int cache_make_upcall(struct cache_detail *cd, struct cache_head *h)
 195{
 196        if (cd->cache_upcall)
 197                return cd->cache_upcall(cd, h);
 198        return sunrpc_cache_pipe_upcall(cd, h);
 199}
 200
 201static inline int cache_is_valid(struct cache_head *h)
 202{
 203        if (!test_bit(CACHE_VALID, &h->flags))
 204                return -EAGAIN;
 205        else {
 206                /* entry is valid */
 207                if (test_bit(CACHE_NEGATIVE, &h->flags))
 208                        return -ENOENT;
 209                else {
 210                        /*
 211                         * In combination with write barrier in
 212                         * sunrpc_cache_update, ensures that anyone
 213                         * using the cache entry after this sees the
 214                         * updated contents:
 215                         */
 216                        smp_rmb();
 217                        return 0;
 218                }
 219        }
 220}
 221
 222static int try_to_negate_entry(struct cache_detail *detail, struct cache_head *h)
 223{
 224        int rv;
 225
 226        write_lock(&detail->hash_lock);
 227        rv = cache_is_valid(h);
 228        if (rv == -EAGAIN) {
 229                set_bit(CACHE_NEGATIVE, &h->flags);
 230                cache_fresh_locked(h, seconds_since_boot()+CACHE_NEW_EXPIRY,
 231                                   detail);
 232                rv = -ENOENT;
 233        }
 234        write_unlock(&detail->hash_lock);
 235        cache_fresh_unlocked(h, detail);
 236        return rv;
 237}
 238
 239/*
 240 * This is the generic cache management routine for all
 241 * the authentication caches.
 242 * It checks the currency of a cache item and will (later)
 243 * initiate an upcall to fill it if needed.
 244 *
 245 *
 246 * Returns 0 if the cache_head can be used, or cache_puts it and returns
 247 * -EAGAIN if upcall is pending and request has been queued
 248 * -ETIMEDOUT if upcall failed or request could not be queue or
 249 *           upcall completed but item is still invalid (implying that
 250 *           the cache item has been replaced with a newer one).
 251 * -ENOENT if cache entry was negative
 252 */
 253int cache_check(struct cache_detail *detail,
 254                    struct cache_head *h, struct cache_req *rqstp)
 255{
 256        int rv;
 257        long refresh_age, age;
 258
 259        /* First decide return status as best we can */
 260        rv = cache_is_valid(h);
 261
 262        /* now see if we want to start an upcall */
 263        refresh_age = (h->expiry_time - h->last_refresh);
 264        age = seconds_since_boot() - h->last_refresh;
 265
 266        if (rqstp == NULL) {
 267                if (rv == -EAGAIN)
 268                        rv = -ENOENT;
 269        } else if (rv == -EAGAIN ||
 270                   (h->expiry_time != 0 && age > refresh_age/2)) {
 271                dprintk("RPC:       Want update, refage=%ld, age=%ld\n",
 272                                refresh_age, age);
 273                if (!test_and_set_bit(CACHE_PENDING, &h->flags)) {
 274                        switch (cache_make_upcall(detail, h)) {
 275                        case -EINVAL:
 276                                rv = try_to_negate_entry(detail, h);
 277                                break;
 278                        case -EAGAIN:
 279                                cache_fresh_unlocked(h, detail);
 280                                break;
 281                        }
 282                }
 283        }
 284
 285        if (rv == -EAGAIN) {
 286                if (!cache_defer_req(rqstp, h)) {
 287                        /*
 288                         * Request was not deferred; handle it as best
 289                         * we can ourselves:
 290                         */
 291                        rv = cache_is_valid(h);
 292                        if (rv == -EAGAIN)
 293                                rv = -ETIMEDOUT;
 294                }
 295        }
 296        if (rv)
 297                cache_put(h, detail);
 298        return rv;
 299}
 300EXPORT_SYMBOL_GPL(cache_check);
 301
 302/*
 303 * caches need to be periodically cleaned.
 304 * For this we maintain a list of cache_detail and
 305 * a current pointer into that list and into the table
 306 * for that entry.
 307 *
 308 * Each time cache_clean is called it finds the next non-empty entry
 309 * in the current table and walks the list in that entry
 310 * looking for entries that can be removed.
 311 *
 312 * An entry gets removed if:
 313 * - The expiry is before current time
 314 * - The last_refresh time is before the flush_time for that cache
 315 *
 316 * later we might drop old entries with non-NEVER expiry if that table
 317 * is getting 'full' for some definition of 'full'
 318 *
 319 * The question of "how often to scan a table" is an interesting one
 320 * and is answered in part by the use of the "nextcheck" field in the
 321 * cache_detail.
 322 * When a scan of a table begins, the nextcheck field is set to a time
 323 * that is well into the future.
 324 * While scanning, if an expiry time is found that is earlier than the
 325 * current nextcheck time, nextcheck is set to that expiry time.
 326 * If the flush_time is ever set to a time earlier than the nextcheck
 327 * time, the nextcheck time is then set to that flush_time.
 328 *
 329 * A table is then only scanned if the current time is at least
 330 * the nextcheck time.
 331 *
 332 */
 333
 334static LIST_HEAD(cache_list);
 335static DEFINE_SPINLOCK(cache_list_lock);
 336static struct cache_detail *current_detail;
 337static int current_index;
 338
 339static void do_cache_clean(struct work_struct *work);
 340static struct delayed_work cache_cleaner;
 341
 342void sunrpc_init_cache_detail(struct cache_detail *cd)
 343{
 344        rwlock_init(&cd->hash_lock);
 345        INIT_LIST_HEAD(&cd->queue);
 346        spin_lock(&cache_list_lock);
 347        cd->nextcheck = 0;
 348        cd->entries = 0;
 349        atomic_set(&cd->readers, 0);
 350        cd->last_close = 0;
 351        cd->last_warn = -1;
 352        list_add(&cd->others, &cache_list);
 353        spin_unlock(&cache_list_lock);
 354
 355        /* start the cleaning process */
 356        queue_delayed_work(system_power_efficient_wq, &cache_cleaner, 0);
 357}
 358EXPORT_SYMBOL_GPL(sunrpc_init_cache_detail);
 359
 360void sunrpc_destroy_cache_detail(struct cache_detail *cd)
 361{
 362        cache_purge(cd);
 363        spin_lock(&cache_list_lock);
 364        write_lock(&cd->hash_lock);
 365        if (current_detail == cd)
 366                current_detail = NULL;
 367        list_del_init(&cd->others);
 368        write_unlock(&cd->hash_lock);
 369        spin_unlock(&cache_list_lock);
 370        if (list_empty(&cache_list)) {
 371                /* module must be being unloaded so its safe to kill the worker */
 372                cancel_delayed_work_sync(&cache_cleaner);
 373        }
 374}
 375EXPORT_SYMBOL_GPL(sunrpc_destroy_cache_detail);
 376
 377/* clean cache tries to find something to clean
 378 * and cleans it.
 379 * It returns 1 if it cleaned something,
 380 *            0 if it didn't find anything this time
 381 *           -1 if it fell off the end of the list.
 382 */
 383static int cache_clean(void)
 384{
 385        int rv = 0;
 386        struct list_head *next;
 387
 388        spin_lock(&cache_list_lock);
 389
 390        /* find a suitable table if we don't already have one */
 391        while (current_detail == NULL ||
 392            current_index >= current_detail->hash_size) {
 393                if (current_detail)
 394                        next = current_detail->others.next;
 395                else
 396                        next = cache_list.next;
 397                if (next == &cache_list) {
 398                        current_detail = NULL;
 399                        spin_unlock(&cache_list_lock);
 400                        return -1;
 401                }
 402                current_detail = list_entry(next, struct cache_detail, others);
 403                if (current_detail->nextcheck > seconds_since_boot())
 404                        current_index = current_detail->hash_size;
 405                else {
 406                        current_index = 0;
 407                        current_detail->nextcheck = seconds_since_boot()+30*60;
 408                }
 409        }
 410
 411        /* find a non-empty bucket in the table */
 412        while (current_detail &&
 413               current_index < current_detail->hash_size &&
 414               hlist_empty(&current_detail->hash_table[current_index]))
 415                current_index++;
 416
 417        /* find a cleanable entry in the bucket and clean it, or set to next bucket */
 418
 419        if (current_detail && current_index < current_detail->hash_size) {
 420                struct cache_head *ch = NULL;
 421                struct cache_detail *d;
 422                struct hlist_head *head;
 423                struct hlist_node *tmp;
 424
 425                write_lock(&current_detail->hash_lock);
 426
 427                /* Ok, now to clean this strand */
 428
 429                head = &current_detail->hash_table[current_index];
 430                hlist_for_each_entry_safe(ch, tmp, head, cache_list) {
 431                        if (current_detail->nextcheck > ch->expiry_time)
 432                                current_detail->nextcheck = ch->expiry_time+1;
 433                        if (!cache_is_expired(current_detail, ch))
 434                                continue;
 435
 436                        hlist_del_init(&ch->cache_list);
 437                        current_detail->entries--;
 438                        rv = 1;
 439                        break;
 440                }
 441
 442                write_unlock(&current_detail->hash_lock);
 443                d = current_detail;
 444                if (!ch)
 445                        current_index ++;
 446                spin_unlock(&cache_list_lock);
 447                if (ch) {
 448                        set_bit(CACHE_CLEANED, &ch->flags);
 449                        cache_fresh_unlocked(ch, d);
 450                        cache_put(ch, d);
 451                }
 452        } else
 453                spin_unlock(&cache_list_lock);
 454
 455        return rv;
 456}
 457
 458/*
 459 * We want to regularly clean the cache, so we need to schedule some work ...
 460 */
 461static void do_cache_clean(struct work_struct *work)
 462{
 463        int delay = 5;
 464        if (cache_clean() == -1)
 465                delay = round_jiffies_relative(30*HZ);
 466
 467        if (list_empty(&cache_list))
 468                delay = 0;
 469
 470        if (delay)
 471                queue_delayed_work(system_power_efficient_wq,
 472                                   &cache_cleaner, delay);
 473}
 474
 475
 476/*
 477 * Clean all caches promptly.  This just calls cache_clean
 478 * repeatedly until we are sure that every cache has had a chance to
 479 * be fully cleaned
 480 */
 481void cache_flush(void)
 482{
 483        while (cache_clean() != -1)
 484                cond_resched();
 485        while (cache_clean() != -1)
 486                cond_resched();
 487}
 488EXPORT_SYMBOL_GPL(cache_flush);
 489
 490void cache_purge(struct cache_detail *detail)
 491{
 492        struct cache_head *ch = NULL;
 493        struct hlist_head *head = NULL;
 494        struct hlist_node *tmp = NULL;
 495        int i = 0;
 496
 497        write_lock(&detail->hash_lock);
 498        if (!detail->entries) {
 499                write_unlock(&detail->hash_lock);
 500                return;
 501        }
 502
 503        dprintk("RPC: %d entries in %s cache\n", detail->entries, detail->name);
 504        for (i = 0; i < detail->hash_size; i++) {
 505                head = &detail->hash_table[i];
 506                hlist_for_each_entry_safe(ch, tmp, head, cache_list) {
 507                        hlist_del_init(&ch->cache_list);
 508                        detail->entries--;
 509
 510                        set_bit(CACHE_CLEANED, &ch->flags);
 511                        write_unlock(&detail->hash_lock);
 512                        cache_fresh_unlocked(ch, detail);
 513                        cache_put(ch, detail);
 514                        write_lock(&detail->hash_lock);
 515                }
 516        }
 517        write_unlock(&detail->hash_lock);
 518}
 519EXPORT_SYMBOL_GPL(cache_purge);
 520
 521
 522/*
 523 * Deferral and Revisiting of Requests.
 524 *
 525 * If a cache lookup finds a pending entry, we
 526 * need to defer the request and revisit it later.
 527 * All deferred requests are stored in a hash table,
 528 * indexed by "struct cache_head *".
 529 * As it may be wasteful to store a whole request
 530 * structure, we allow the request to provide a
 531 * deferred form, which must contain a
 532 * 'struct cache_deferred_req'
 533 * This cache_deferred_req contains a method to allow
 534 * it to be revisited when cache info is available
 535 */
 536
 537#define DFR_HASHSIZE    (PAGE_SIZE/sizeof(struct list_head))
 538#define DFR_HASH(item)  ((((long)item)>>4 ^ (((long)item)>>13)) % DFR_HASHSIZE)
 539
 540#define DFR_MAX 300     /* ??? */
 541
 542static DEFINE_SPINLOCK(cache_defer_lock);
 543static LIST_HEAD(cache_defer_list);
 544static struct hlist_head cache_defer_hash[DFR_HASHSIZE];
 545static int cache_defer_cnt;
 546
 547static void __unhash_deferred_req(struct cache_deferred_req *dreq)
 548{
 549        hlist_del_init(&dreq->hash);
 550        if (!list_empty(&dreq->recent)) {
 551                list_del_init(&dreq->recent);
 552                cache_defer_cnt--;
 553        }
 554}
 555
 556static void __hash_deferred_req(struct cache_deferred_req *dreq, struct cache_head *item)
 557{
 558        int hash = DFR_HASH(item);
 559
 560        INIT_LIST_HEAD(&dreq->recent);
 561        hlist_add_head(&dreq->hash, &cache_defer_hash[hash]);
 562}
 563
 564static void setup_deferral(struct cache_deferred_req *dreq,
 565                           struct cache_head *item,
 566                           int count_me)
 567{
 568
 569        dreq->item = item;
 570
 571        spin_lock(&cache_defer_lock);
 572
 573        __hash_deferred_req(dreq, item);
 574
 575        if (count_me) {
 576                cache_defer_cnt++;
 577                list_add(&dreq->recent, &cache_defer_list);
 578        }
 579
 580        spin_unlock(&cache_defer_lock);
 581
 582}
 583
 584struct thread_deferred_req {
 585        struct cache_deferred_req handle;
 586        struct completion completion;
 587};
 588
 589static void cache_restart_thread(struct cache_deferred_req *dreq, int too_many)
 590{
 591        struct thread_deferred_req *dr =
 592                container_of(dreq, struct thread_deferred_req, handle);
 593        complete(&dr->completion);
 594}
 595
 596static void cache_wait_req(struct cache_req *req, struct cache_head *item)
 597{
 598        struct thread_deferred_req sleeper;
 599        struct cache_deferred_req *dreq = &sleeper.handle;
 600
 601        sleeper.completion = COMPLETION_INITIALIZER_ONSTACK(sleeper.completion);
 602        dreq->revisit = cache_restart_thread;
 603
 604        setup_deferral(dreq, item, 0);
 605
 606        if (!test_bit(CACHE_PENDING, &item->flags) ||
 607            wait_for_completion_interruptible_timeout(
 608                    &sleeper.completion, req->thread_wait) <= 0) {
 609                /* The completion wasn't completed, so we need
 610                 * to clean up
 611                 */
 612                spin_lock(&cache_defer_lock);
 613                if (!hlist_unhashed(&sleeper.handle.hash)) {
 614                        __unhash_deferred_req(&sleeper.handle);
 615                        spin_unlock(&cache_defer_lock);
 616                } else {
 617                        /* cache_revisit_request already removed
 618                         * this from the hash table, but hasn't
 619                         * called ->revisit yet.  It will very soon
 620                         * and we need to wait for it.
 621                         */
 622                        spin_unlock(&cache_defer_lock);
 623                        wait_for_completion(&sleeper.completion);
 624                }
 625        }
 626}
 627
 628static void cache_limit_defers(void)
 629{
 630        /* Make sure we haven't exceed the limit of allowed deferred
 631         * requests.
 632         */
 633        struct cache_deferred_req *discard = NULL;
 634
 635        if (cache_defer_cnt <= DFR_MAX)
 636                return;
 637
 638        spin_lock(&cache_defer_lock);
 639
 640        /* Consider removing either the first or the last */
 641        if (cache_defer_cnt > DFR_MAX) {
 642                if (prandom_u32() & 1)
 643                        discard = list_entry(cache_defer_list.next,
 644                                             struct cache_deferred_req, recent);
 645                else
 646                        discard = list_entry(cache_defer_list.prev,
 647                                             struct cache_deferred_req, recent);
 648                __unhash_deferred_req(discard);
 649        }
 650        spin_unlock(&cache_defer_lock);
 651        if (discard)
 652                discard->revisit(discard, 1);
 653}
 654
 655/* Return true if and only if a deferred request is queued. */
 656static bool cache_defer_req(struct cache_req *req, struct cache_head *item)
 657{
 658        struct cache_deferred_req *dreq;
 659
 660        if (req->thread_wait) {
 661                cache_wait_req(req, item);
 662                if (!test_bit(CACHE_PENDING, &item->flags))
 663                        return false;
 664        }
 665        dreq = req->defer(req);
 666        if (dreq == NULL)
 667                return false;
 668        setup_deferral(dreq, item, 1);
 669        if (!test_bit(CACHE_PENDING, &item->flags))
 670                /* Bit could have been cleared before we managed to
 671                 * set up the deferral, so need to revisit just in case
 672                 */
 673                cache_revisit_request(item);
 674
 675        cache_limit_defers();
 676        return true;
 677}
 678
 679static void cache_revisit_request(struct cache_head *item)
 680{
 681        struct cache_deferred_req *dreq;
 682        struct list_head pending;
 683        struct hlist_node *tmp;
 684        int hash = DFR_HASH(item);
 685
 686        INIT_LIST_HEAD(&pending);
 687        spin_lock(&cache_defer_lock);
 688
 689        hlist_for_each_entry_safe(dreq, tmp, &cache_defer_hash[hash], hash)
 690                if (dreq->item == item) {
 691                        __unhash_deferred_req(dreq);
 692                        list_add(&dreq->recent, &pending);
 693                }
 694
 695        spin_unlock(&cache_defer_lock);
 696
 697        while (!list_empty(&pending)) {
 698                dreq = list_entry(pending.next, struct cache_deferred_req, recent);
 699                list_del_init(&dreq->recent);
 700                dreq->revisit(dreq, 0);
 701        }
 702}
 703
 704void cache_clean_deferred(void *owner)
 705{
 706        struct cache_deferred_req *dreq, *tmp;
 707        struct list_head pending;
 708
 709
 710        INIT_LIST_HEAD(&pending);
 711        spin_lock(&cache_defer_lock);
 712
 713        list_for_each_entry_safe(dreq, tmp, &cache_defer_list, recent) {
 714                if (dreq->owner == owner) {
 715                        __unhash_deferred_req(dreq);
 716                        list_add(&dreq->recent, &pending);
 717                }
 718        }
 719        spin_unlock(&cache_defer_lock);
 720
 721        while (!list_empty(&pending)) {
 722                dreq = list_entry(pending.next, struct cache_deferred_req, recent);
 723                list_del_init(&dreq->recent);
 724                dreq->revisit(dreq, 1);
 725        }
 726}
 727
 728/*
 729 * communicate with user-space
 730 *
 731 * We have a magic /proc file - /proc/net/rpc/<cachename>/channel.
 732 * On read, you get a full request, or block.
 733 * On write, an update request is processed.
 734 * Poll works if anything to read, and always allows write.
 735 *
 736 * Implemented by linked list of requests.  Each open file has
 737 * a ->private that also exists in this list.  New requests are added
 738 * to the end and may wakeup and preceding readers.
 739 * New readers are added to the head.  If, on read, an item is found with
 740 * CACHE_UPCALLING clear, we free it from the list.
 741 *
 742 */
 743
 744static DEFINE_SPINLOCK(queue_lock);
 745static DEFINE_MUTEX(queue_io_mutex);
 746
 747struct cache_queue {
 748        struct list_head        list;
 749        int                     reader; /* if 0, then request */
 750};
 751struct cache_request {
 752        struct cache_queue      q;
 753        struct cache_head       *item;
 754        char                    * buf;
 755        int                     len;
 756        int                     readers;
 757};
 758struct cache_reader {
 759        struct cache_queue      q;
 760        int                     offset; /* if non-0, we have a refcnt on next request */
 761};
 762
 763static int cache_request(struct cache_detail *detail,
 764                               struct cache_request *crq)
 765{
 766        char *bp = crq->buf;
 767        int len = PAGE_SIZE;
 768
 769        detail->cache_request(detail, crq->item, &bp, &len);
 770        if (len < 0)
 771                return -EAGAIN;
 772        return PAGE_SIZE - len;
 773}
 774
 775static ssize_t cache_read(struct file *filp, char __user *buf, size_t count,
 776                          loff_t *ppos, struct cache_detail *cd)
 777{
 778        struct cache_reader *rp = filp->private_data;
 779        struct cache_request *rq;
 780        struct inode *inode = file_inode(filp);
 781        int err;
 782
 783        if (count == 0)
 784                return 0;
 785
 786        inode_lock(inode); /* protect against multiple concurrent
 787                              * readers on this file */
 788 again:
 789        spin_lock(&queue_lock);
 790        /* need to find next request */
 791        while (rp->q.list.next != &cd->queue &&
 792               list_entry(rp->q.list.next, struct cache_queue, list)
 793               ->reader) {
 794                struct list_head *next = rp->q.list.next;
 795                list_move(&rp->q.list, next);
 796        }
 797        if (rp->q.list.next == &cd->queue) {
 798                spin_unlock(&queue_lock);
 799                inode_unlock(inode);
 800                WARN_ON_ONCE(rp->offset);
 801                return 0;
 802        }
 803        rq = container_of(rp->q.list.next, struct cache_request, q.list);
 804        WARN_ON_ONCE(rq->q.reader);
 805        if (rp->offset == 0)
 806                rq->readers++;
 807        spin_unlock(&queue_lock);
 808
 809        if (rq->len == 0) {
 810                err = cache_request(cd, rq);
 811                if (err < 0)
 812                        goto out;
 813                rq->len = err;
 814        }
 815
 816        if (rp->offset == 0 && !test_bit(CACHE_PENDING, &rq->item->flags)) {
 817                err = -EAGAIN;
 818                spin_lock(&queue_lock);
 819                list_move(&rp->q.list, &rq->q.list);
 820                spin_unlock(&queue_lock);
 821        } else {
 822                if (rp->offset + count > rq->len)
 823                        count = rq->len - rp->offset;
 824                err = -EFAULT;
 825                if (copy_to_user(buf, rq->buf + rp->offset, count))
 826                        goto out;
 827                rp->offset += count;
 828                if (rp->offset >= rq->len) {
 829                        rp->offset = 0;
 830                        spin_lock(&queue_lock);
 831                        list_move(&rp->q.list, &rq->q.list);
 832                        spin_unlock(&queue_lock);
 833                }
 834                err = 0;
 835        }
 836 out:
 837        if (rp->offset == 0) {
 838                /* need to release rq */
 839                spin_lock(&queue_lock);
 840                rq->readers--;
 841                if (rq->readers == 0 &&
 842                    !test_bit(CACHE_PENDING, &rq->item->flags)) {
 843                        list_del(&rq->q.list);
 844                        spin_unlock(&queue_lock);
 845                        cache_put(rq->item, cd);
 846                        kfree(rq->buf);
 847                        kfree(rq);
 848                } else
 849                        spin_unlock(&queue_lock);
 850        }
 851        if (err == -EAGAIN)
 852                goto again;
 853        inode_unlock(inode);
 854        return err ? err :  count;
 855}
 856
 857static ssize_t cache_do_downcall(char *kaddr, const char __user *buf,
 858                                 size_t count, struct cache_detail *cd)
 859{
 860        ssize_t ret;
 861
 862        if (count == 0)
 863                return -EINVAL;
 864        if (copy_from_user(kaddr, buf, count))
 865                return -EFAULT;
 866        kaddr[count] = '\0';
 867        ret = cd->cache_parse(cd, kaddr, count);
 868        if (!ret)
 869                ret = count;
 870        return ret;
 871}
 872
 873static ssize_t cache_slow_downcall(const char __user *buf,
 874                                   size_t count, struct cache_detail *cd)
 875{
 876        static char write_buf[8192]; /* protected by queue_io_mutex */
 877        ssize_t ret = -EINVAL;
 878
 879        if (count >= sizeof(write_buf))
 880                goto out;
 881        mutex_lock(&queue_io_mutex);
 882        ret = cache_do_downcall(write_buf, buf, count, cd);
 883        mutex_unlock(&queue_io_mutex);
 884out:
 885        return ret;
 886}
 887
 888static ssize_t cache_downcall(struct address_space *mapping,
 889                              const char __user *buf,
 890                              size_t count, struct cache_detail *cd)
 891{
 892        struct page *page;
 893        char *kaddr;
 894        ssize_t ret = -ENOMEM;
 895
 896        if (count >= PAGE_SIZE)
 897                goto out_slow;
 898
 899        page = find_or_create_page(mapping, 0, GFP_KERNEL);
 900        if (!page)
 901                goto out_slow;
 902
 903        kaddr = kmap(page);
 904        ret = cache_do_downcall(kaddr, buf, count, cd);
 905        kunmap(page);
 906        unlock_page(page);
 907        put_page(page);
 908        return ret;
 909out_slow:
 910        return cache_slow_downcall(buf, count, cd);
 911}
 912
 913static ssize_t cache_write(struct file *filp, const char __user *buf,
 914                           size_t count, loff_t *ppos,
 915                           struct cache_detail *cd)
 916{
 917        struct address_space *mapping = filp->f_mapping;
 918        struct inode *inode = file_inode(filp);
 919        ssize_t ret = -EINVAL;
 920
 921        if (!cd->cache_parse)
 922                goto out;
 923
 924        inode_lock(inode);
 925        ret = cache_downcall(mapping, buf, count, cd);
 926        inode_unlock(inode);
 927out:
 928        return ret;
 929}
 930
 931static DECLARE_WAIT_QUEUE_HEAD(queue_wait);
 932
 933static unsigned int cache_poll(struct file *filp, poll_table *wait,
 934                               struct cache_detail *cd)
 935{
 936        unsigned int mask;
 937        struct cache_reader *rp = filp->private_data;
 938        struct cache_queue *cq;
 939
 940        poll_wait(filp, &queue_wait, wait);
 941
 942        /* alway allow write */
 943        mask = POLLOUT | POLLWRNORM;
 944
 945        if (!rp)
 946                return mask;
 947
 948        spin_lock(&queue_lock);
 949
 950        for (cq= &rp->q; &cq->list != &cd->queue;
 951             cq = list_entry(cq->list.next, struct cache_queue, list))
 952                if (!cq->reader) {
 953                        mask |= POLLIN | POLLRDNORM;
 954                        break;
 955                }
 956        spin_unlock(&queue_lock);
 957        return mask;
 958}
 959
 960static int cache_ioctl(struct inode *ino, struct file *filp,
 961                       unsigned int cmd, unsigned long arg,
 962                       struct cache_detail *cd)
 963{
 964        int len = 0;
 965        struct cache_reader *rp = filp->private_data;
 966        struct cache_queue *cq;
 967
 968        if (cmd != FIONREAD || !rp)
 969                return -EINVAL;
 970
 971        spin_lock(&queue_lock);
 972
 973        /* only find the length remaining in current request,
 974         * or the length of the next request
 975         */
 976        for (cq= &rp->q; &cq->list != &cd->queue;
 977             cq = list_entry(cq->list.next, struct cache_queue, list))
 978                if (!cq->reader) {
 979                        struct cache_request *cr =
 980                                container_of(cq, struct cache_request, q);
 981                        len = cr->len - rp->offset;
 982                        break;
 983                }
 984        spin_unlock(&queue_lock);
 985
 986        return put_user(len, (int __user *)arg);
 987}
 988
 989static int cache_open(struct inode *inode, struct file *filp,
 990                      struct cache_detail *cd)
 991{
 992        struct cache_reader *rp = NULL;
 993
 994        if (!cd || !try_module_get(cd->owner))
 995                return -EACCES;
 996        nonseekable_open(inode, filp);
 997        if (filp->f_mode & FMODE_READ) {
 998                rp = kmalloc(sizeof(*rp), GFP_KERNEL);
 999                if (!rp) {
1000                        module_put(cd->owner);
1001                        return -ENOMEM;
1002                }
1003                rp->offset = 0;
1004                rp->q.reader = 1;
1005                atomic_inc(&cd->readers);
1006                spin_lock(&queue_lock);
1007                list_add(&rp->q.list, &cd->queue);
1008                spin_unlock(&queue_lock);
1009        }
1010        filp->private_data = rp;
1011        return 0;
1012}
1013
1014static int cache_release(struct inode *inode, struct file *filp,
1015                         struct cache_detail *cd)
1016{
1017        struct cache_reader *rp = filp->private_data;
1018
1019        if (rp) {
1020                spin_lock(&queue_lock);
1021                if (rp->offset) {
1022                        struct cache_queue *cq;
1023                        for (cq= &rp->q; &cq->list != &cd->queue;
1024                             cq = list_entry(cq->list.next, struct cache_queue, list))
1025                                if (!cq->reader) {
1026                                        container_of(cq, struct cache_request, q)
1027                                                ->readers--;
1028                                        break;
1029                                }
1030                        rp->offset = 0;
1031                }
1032                list_del(&rp->q.list);
1033                spin_unlock(&queue_lock);
1034
1035                filp->private_data = NULL;
1036                kfree(rp);
1037
1038                cd->last_close = seconds_since_boot();
1039                atomic_dec(&cd->readers);
1040        }
1041        module_put(cd->owner);
1042        return 0;
1043}
1044
1045
1046
1047static void cache_dequeue(struct cache_detail *detail, struct cache_head *ch)
1048{
1049        struct cache_queue *cq, *tmp;
1050        struct cache_request *cr;
1051        struct list_head dequeued;
1052
1053        INIT_LIST_HEAD(&dequeued);
1054        spin_lock(&queue_lock);
1055        list_for_each_entry_safe(cq, tmp, &detail->queue, list)
1056                if (!cq->reader) {
1057                        cr = container_of(cq, struct cache_request, q);
1058                        if (cr->item != ch)
1059                                continue;
1060                        if (test_bit(CACHE_PENDING, &ch->flags))
1061                                /* Lost a race and it is pending again */
1062                                break;
1063                        if (cr->readers != 0)
1064                                continue;
1065                        list_move(&cr->q.list, &dequeued);
1066                }
1067        spin_unlock(&queue_lock);
1068        while (!list_empty(&dequeued)) {
1069                cr = list_entry(dequeued.next, struct cache_request, q.list);
1070                list_del(&cr->q.list);
1071                cache_put(cr->item, detail);
1072                kfree(cr->buf);
1073                kfree(cr);
1074        }
1075}
1076
1077/*
1078 * Support routines for text-based upcalls.
1079 * Fields are separated by spaces.
1080 * Fields are either mangled to quote space tab newline slosh with slosh
1081 * or a hexified with a leading \x
1082 * Record is terminated with newline.
1083 *
1084 */
1085
1086void qword_add(char **bpp, int *lp, char *str)
1087{
1088        char *bp = *bpp;
1089        int len = *lp;
1090        int ret;
1091
1092        if (len < 0) return;
1093
1094        ret = string_escape_str(str, bp, len, ESCAPE_OCTAL, "\\ \n\t");
1095        if (ret >= len) {
1096                bp += len;
1097                len = -1;
1098        } else {
1099                bp += ret;
1100                len -= ret;
1101                *bp++ = ' ';
1102                len--;
1103        }
1104        *bpp = bp;
1105        *lp = len;
1106}
1107EXPORT_SYMBOL_GPL(qword_add);
1108
1109void qword_addhex(char **bpp, int *lp, char *buf, int blen)
1110{
1111        char *bp = *bpp;
1112        int len = *lp;
1113
1114        if (len < 0) return;
1115
1116        if (len > 2) {
1117                *bp++ = '\\';
1118                *bp++ = 'x';
1119                len -= 2;
1120                while (blen && len >= 2) {
1121                        bp = hex_byte_pack(bp, *buf++);
1122                        len -= 2;
1123                        blen--;
1124                }
1125        }
1126        if (blen || len<1) len = -1;
1127        else {
1128                *bp++ = ' ';
1129                len--;
1130        }
1131        *bpp = bp;
1132        *lp = len;
1133}
1134EXPORT_SYMBOL_GPL(qword_addhex);
1135
1136static void warn_no_listener(struct cache_detail *detail)
1137{
1138        if (detail->last_warn != detail->last_close) {
1139                detail->last_warn = detail->last_close;
1140                if (detail->warn_no_listener)
1141                        detail->warn_no_listener(detail, detail->last_close != 0);
1142        }
1143}
1144
1145static bool cache_listeners_exist(struct cache_detail *detail)
1146{
1147        if (atomic_read(&detail->readers))
1148                return true;
1149        if (detail->last_close == 0)
1150                /* This cache was never opened */
1151                return false;
1152        if (detail->last_close < seconds_since_boot() - 30)
1153                /*
1154                 * We allow for the possibility that someone might
1155                 * restart a userspace daemon without restarting the
1156                 * server; but after 30 seconds, we give up.
1157                 */
1158                 return false;
1159        return true;
1160}
1161
1162/*
1163 * register an upcall request to user-space and queue it up for read() by the
1164 * upcall daemon.
1165 *
1166 * Each request is at most one page long.
1167 */
1168int sunrpc_cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h)
1169{
1170
1171        char *buf;
1172        struct cache_request *crq;
1173        int ret = 0;
1174
1175        if (!detail->cache_request)
1176                return -EINVAL;
1177
1178        if (!cache_listeners_exist(detail)) {
1179                warn_no_listener(detail);
1180                return -EINVAL;
1181        }
1182        if (test_bit(CACHE_CLEANED, &h->flags))
1183                /* Too late to make an upcall */
1184                return -EAGAIN;
1185
1186        buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
1187        if (!buf)
1188                return -EAGAIN;
1189
1190        crq = kmalloc(sizeof (*crq), GFP_KERNEL);
1191        if (!crq) {
1192                kfree(buf);
1193                return -EAGAIN;
1194        }
1195
1196        crq->q.reader = 0;
1197        crq->buf = buf;
1198        crq->len = 0;
1199        crq->readers = 0;
1200        spin_lock(&queue_lock);
1201        if (test_bit(CACHE_PENDING, &h->flags)) {
1202                crq->item = cache_get(h);
1203                list_add_tail(&crq->q.list, &detail->queue);
1204        } else
1205                /* Lost a race, no longer PENDING, so don't enqueue */
1206                ret = -EAGAIN;
1207        spin_unlock(&queue_lock);
1208        wake_up(&queue_wait);
1209        if (ret == -EAGAIN) {
1210                kfree(buf);
1211                kfree(crq);
1212        }
1213        return ret;
1214}
1215EXPORT_SYMBOL_GPL(sunrpc_cache_pipe_upcall);
1216
1217/*
1218 * parse a message from user-space and pass it
1219 * to an appropriate cache
1220 * Messages are, like requests, separated into fields by
1221 * spaces and dequotes as \xHEXSTRING or embedded \nnn octal
1222 *
1223 * Message is
1224 *   reply cachename expiry key ... content....
1225 *
1226 * key and content are both parsed by cache
1227 */
1228
1229int qword_get(char **bpp, char *dest, int bufsize)
1230{
1231        /* return bytes copied, or -1 on error */
1232        char *bp = *bpp;
1233        int len = 0;
1234
1235        while (*bp == ' ') bp++;
1236
1237        if (bp[0] == '\\' && bp[1] == 'x') {
1238                /* HEX STRING */
1239                bp += 2;
1240                while (len < bufsize - 1) {
1241                        int h, l;
1242
1243                        h = hex_to_bin(bp[0]);
1244                        if (h < 0)
1245                                break;
1246
1247                        l = hex_to_bin(bp[1]);
1248                        if (l < 0)
1249                                break;
1250
1251                        *dest++ = (h << 4) | l;
1252                        bp += 2;
1253                        len++;
1254                }
1255        } else {
1256                /* text with \nnn octal quoting */
1257                while (*bp != ' ' && *bp != '\n' && *bp && len < bufsize-1) {
1258                        if (*bp == '\\' &&
1259                            isodigit(bp[1]) && (bp[1] <= '3') &&
1260                            isodigit(bp[2]) &&
1261                            isodigit(bp[3])) {
1262                                int byte = (*++bp -'0');
1263                                bp++;
1264                                byte = (byte << 3) | (*bp++ - '0');
1265                                byte = (byte << 3) | (*bp++ - '0');
1266                                *dest++ = byte;
1267                                len++;
1268                        } else {
1269                                *dest++ = *bp++;
1270                                len++;
1271                        }
1272                }
1273        }
1274
1275        if (*bp != ' ' && *bp != '\n' && *bp != '\0')
1276                return -1;
1277        while (*bp == ' ') bp++;
1278        *bpp = bp;
1279        *dest = '\0';
1280        return len;
1281}
1282EXPORT_SYMBOL_GPL(qword_get);
1283
1284
1285/*
1286 * support /proc/net/rpc/$CACHENAME/content
1287 * as a seqfile.
1288 * We call ->cache_show passing NULL for the item to
1289 * get a header, then pass each real item in the cache
1290 */
1291
1292void *cache_seq_start(struct seq_file *m, loff_t *pos)
1293        __acquires(cd->hash_lock)
1294{
1295        loff_t n = *pos;
1296        unsigned int hash, entry;
1297        struct cache_head *ch;
1298        struct cache_detail *cd = m->private;
1299
1300        read_lock(&cd->hash_lock);
1301        if (!n--)
1302                return SEQ_START_TOKEN;
1303        hash = n >> 32;
1304        entry = n & ((1LL<<32) - 1);
1305
1306        hlist_for_each_entry(ch, &cd->hash_table[hash], cache_list)
1307                if (!entry--)
1308                        return ch;
1309        n &= ~((1LL<<32) - 1);
1310        do {
1311                hash++;
1312                n += 1LL<<32;
1313        } while(hash < cd->hash_size &&
1314                hlist_empty(&cd->hash_table[hash]));
1315        if (hash >= cd->hash_size)
1316                return NULL;
1317        *pos = n+1;
1318        return hlist_entry_safe(cd->hash_table[hash].first,
1319                                struct cache_head, cache_list);
1320}
1321EXPORT_SYMBOL_GPL(cache_seq_start);
1322
1323void *cache_seq_next(struct seq_file *m, void *p, loff_t *pos)
1324{
1325        struct cache_head *ch = p;
1326        int hash = (*pos >> 32);
1327        struct cache_detail *cd = m->private;
1328
1329        if (p == SEQ_START_TOKEN)
1330                hash = 0;
1331        else if (ch->cache_list.next == NULL) {
1332                hash++;
1333                *pos += 1LL<<32;
1334        } else {
1335                ++*pos;
1336                return hlist_entry_safe(ch->cache_list.next,
1337                                        struct cache_head, cache_list);
1338        }
1339        *pos &= ~((1LL<<32) - 1);
1340        while (hash < cd->hash_size &&
1341               hlist_empty(&cd->hash_table[hash])) {
1342                hash++;
1343                *pos += 1LL<<32;
1344        }
1345        if (hash >= cd->hash_size)
1346                return NULL;
1347        ++*pos;
1348        return hlist_entry_safe(cd->hash_table[hash].first,
1349                                struct cache_head, cache_list);
1350}
1351EXPORT_SYMBOL_GPL(cache_seq_next);
1352
1353void cache_seq_stop(struct seq_file *m, void *p)
1354        __releases(cd->hash_lock)
1355{
1356        struct cache_detail *cd = m->private;
1357        read_unlock(&cd->hash_lock);
1358}
1359EXPORT_SYMBOL_GPL(cache_seq_stop);
1360
1361static int c_show(struct seq_file *m, void *p)
1362{
1363        struct cache_head *cp = p;
1364        struct cache_detail *cd = m->private;
1365
1366        if (p == SEQ_START_TOKEN)
1367                return cd->cache_show(m, cd, NULL);
1368
1369        ifdebug(CACHE)
1370                seq_printf(m, "# expiry=%ld refcnt=%d flags=%lx\n",
1371                           convert_to_wallclock(cp->expiry_time),
1372                           kref_read(&cp->ref), cp->flags);
1373        cache_get(cp);
1374        if (cache_check(cd, cp, NULL))
1375                /* cache_check does a cache_put on failure */
1376                seq_printf(m, "# ");
1377        else {
1378                if (cache_is_expired(cd, cp))
1379                        seq_printf(m, "# ");
1380                cache_put(cp, cd);
1381        }
1382
1383        return cd->cache_show(m, cd, cp);
1384}
1385
1386static const struct seq_operations cache_content_op = {
1387        .start  = cache_seq_start,
1388        .next   = cache_seq_next,
1389        .stop   = cache_seq_stop,
1390        .show   = c_show,
1391};
1392
1393static int content_open(struct inode *inode, struct file *file,
1394                        struct cache_detail *cd)
1395{
1396        struct seq_file *seq;
1397        int err;
1398
1399        if (!cd || !try_module_get(cd->owner))
1400                return -EACCES;
1401
1402        err = seq_open(file, &cache_content_op);
1403        if (err) {
1404                module_put(cd->owner);
1405                return err;
1406        }
1407
1408        seq = file->private_data;
1409        seq->private = cd;
1410        return 0;
1411}
1412
1413static int content_release(struct inode *inode, struct file *file,
1414                struct cache_detail *cd)
1415{
1416        int ret = seq_release(inode, file);
1417        module_put(cd->owner);
1418        return ret;
1419}
1420
1421static int open_flush(struct inode *inode, struct file *file,
1422                        struct cache_detail *cd)
1423{
1424        if (!cd || !try_module_get(cd->owner))
1425                return -EACCES;
1426        return nonseekable_open(inode, file);
1427}
1428
1429static int release_flush(struct inode *inode, struct file *file,
1430                        struct cache_detail *cd)
1431{
1432        module_put(cd->owner);
1433        return 0;
1434}
1435
1436static ssize_t read_flush(struct file *file, char __user *buf,
1437                          size_t count, loff_t *ppos,
1438                          struct cache_detail *cd)
1439{
1440        char tbuf[22];
1441        size_t len;
1442
1443        len = snprintf(tbuf, sizeof(tbuf), "%lu\n",
1444                        convert_to_wallclock(cd->flush_time));
1445        return simple_read_from_buffer(buf, count, ppos, tbuf, len);
1446}
1447
1448static ssize_t write_flush(struct file *file, const char __user *buf,
1449                           size_t count, loff_t *ppos,
1450                           struct cache_detail *cd)
1451{
1452        char tbuf[20];
1453        char *bp, *ep;
1454        time_t then, now;
1455
1456        if (*ppos || count > sizeof(tbuf)-1)
1457                return -EINVAL;
1458        if (copy_from_user(tbuf, buf, count))
1459                return -EFAULT;
1460        tbuf[count] = 0;
1461        simple_strtoul(tbuf, &ep, 0);
1462        if (*ep && *ep != '\n')
1463                return -EINVAL;
1464
1465        bp = tbuf;
1466        then = get_expiry(&bp);
1467        now = seconds_since_boot();
1468        cd->nextcheck = now;
1469        /* Can only set flush_time to 1 second beyond "now", or
1470         * possibly 1 second beyond flushtime.  This is because
1471         * flush_time never goes backwards so it mustn't get too far
1472         * ahead of time.
1473         */
1474        if (then >= now) {
1475                /* Want to flush everything, so behave like cache_purge() */
1476                if (cd->flush_time >= now)
1477                        now = cd->flush_time + 1;
1478                then = now;
1479        }
1480
1481        cd->flush_time = then;
1482        cache_flush();
1483
1484        *ppos += count;
1485        return count;
1486}
1487
1488static ssize_t cache_read_procfs(struct file *filp, char __user *buf,
1489                                 size_t count, loff_t *ppos)
1490{
1491        struct cache_detail *cd = PDE_DATA(file_inode(filp));
1492
1493        return cache_read(filp, buf, count, ppos, cd);
1494}
1495
1496static ssize_t cache_write_procfs(struct file *filp, const char __user *buf,
1497                                  size_t count, loff_t *ppos)
1498{
1499        struct cache_detail *cd = PDE_DATA(file_inode(filp));
1500
1501        return cache_write(filp, buf, count, ppos, cd);
1502}
1503
1504static unsigned int cache_poll_procfs(struct file *filp, poll_table *wait)
1505{
1506        struct cache_detail *cd = PDE_DATA(file_inode(filp));
1507
1508        return cache_poll(filp, wait, cd);
1509}
1510
1511static long cache_ioctl_procfs(struct file *filp,
1512                               unsigned int cmd, unsigned long arg)
1513{
1514        struct inode *inode = file_inode(filp);
1515        struct cache_detail *cd = PDE_DATA(inode);
1516
1517        return cache_ioctl(inode, filp, cmd, arg, cd);
1518}
1519
1520static int cache_open_procfs(struct inode *inode, struct file *filp)
1521{
1522        struct cache_detail *cd = PDE_DATA(inode);
1523
1524        return cache_open(inode, filp, cd);
1525}
1526
1527static int cache_release_procfs(struct inode *inode, struct file *filp)
1528{
1529        struct cache_detail *cd = PDE_DATA(inode);
1530
1531        return cache_release(inode, filp, cd);
1532}
1533
1534static const struct file_operations cache_file_operations_procfs = {
1535        .owner          = THIS_MODULE,
1536        .llseek         = no_llseek,
1537        .read           = cache_read_procfs,
1538        .write          = cache_write_procfs,
1539        .poll           = cache_poll_procfs,
1540        .unlocked_ioctl = cache_ioctl_procfs, /* for FIONREAD */
1541        .open           = cache_open_procfs,
1542        .release        = cache_release_procfs,
1543};
1544
1545static int content_open_procfs(struct inode *inode, struct file *filp)
1546{
1547        struct cache_detail *cd = PDE_DATA(inode);
1548
1549        return content_open(inode, filp, cd);
1550}
1551
1552static int content_release_procfs(struct inode *inode, struct file *filp)
1553{
1554        struct cache_detail *cd = PDE_DATA(inode);
1555
1556        return content_release(inode, filp, cd);
1557}
1558
1559static const struct file_operations content_file_operations_procfs = {
1560        .open           = content_open_procfs,
1561        .read           = seq_read,
1562        .llseek         = seq_lseek,
1563        .release        = content_release_procfs,
1564};
1565
1566static int open_flush_procfs(struct inode *inode, struct file *filp)
1567{
1568        struct cache_detail *cd = PDE_DATA(inode);
1569
1570        return open_flush(inode, filp, cd);
1571}
1572
1573static int release_flush_procfs(struct inode *inode, struct file *filp)
1574{
1575        struct cache_detail *cd = PDE_DATA(inode);
1576
1577        return release_flush(inode, filp, cd);
1578}
1579
1580static ssize_t read_flush_procfs(struct file *filp, char __user *buf,
1581                            size_t count, loff_t *ppos)
1582{
1583        struct cache_detail *cd = PDE_DATA(file_inode(filp));
1584
1585        return read_flush(filp, buf, count, ppos, cd);
1586}
1587
1588static ssize_t write_flush_procfs(struct file *filp,
1589                                  const char __user *buf,
1590                                  size_t count, loff_t *ppos)
1591{
1592        struct cache_detail *cd = PDE_DATA(file_inode(filp));
1593
1594        return write_flush(filp, buf, count, ppos, cd);
1595}
1596
1597static const struct file_operations cache_flush_operations_procfs = {
1598        .open           = open_flush_procfs,
1599        .read           = read_flush_procfs,
1600        .write          = write_flush_procfs,
1601        .release        = release_flush_procfs,
1602        .llseek         = no_llseek,
1603};
1604
1605static void remove_cache_proc_entries(struct cache_detail *cd)
1606{
1607        if (cd->procfs) {
1608                proc_remove(cd->procfs);
1609                cd->procfs = NULL;
1610        }
1611}
1612
1613#ifdef CONFIG_PROC_FS
1614static int create_cache_proc_entries(struct cache_detail *cd, struct net *net)
1615{
1616        struct proc_dir_entry *p;
1617        struct sunrpc_net *sn;
1618
1619        sn = net_generic(net, sunrpc_net_id);
1620        cd->procfs = proc_mkdir(cd->name, sn->proc_net_rpc);
1621        if (cd->procfs == NULL)
1622                goto out_nomem;
1623
1624        p = proc_create_data("flush", S_IFREG|S_IRUSR|S_IWUSR,
1625                             cd->procfs, &cache_flush_operations_procfs, cd);
1626        if (p == NULL)
1627                goto out_nomem;
1628
1629        if (cd->cache_request || cd->cache_parse) {
1630                p = proc_create_data("channel", S_IFREG|S_IRUSR|S_IWUSR,
1631                                cd->procfs, &cache_file_operations_procfs, cd);
1632                if (p == NULL)
1633                        goto out_nomem;
1634        }
1635        if (cd->cache_show) {
1636                p = proc_create_data("content", S_IFREG|S_IRUSR,
1637                                cd->procfs, &content_file_operations_procfs, cd);
1638                if (p == NULL)
1639                        goto out_nomem;
1640        }
1641        return 0;
1642out_nomem:
1643        remove_cache_proc_entries(cd);
1644        return -ENOMEM;
1645}
1646#else /* CONFIG_PROC_FS */
1647static int create_cache_proc_entries(struct cache_detail *cd, struct net *net)
1648{
1649        return 0;
1650}
1651#endif
1652
1653void __init cache_initialize(void)
1654{
1655        INIT_DEFERRABLE_WORK(&cache_cleaner, do_cache_clean);
1656}
1657
1658int cache_register_net(struct cache_detail *cd, struct net *net)
1659{
1660        int ret;
1661
1662        sunrpc_init_cache_detail(cd);
1663        ret = create_cache_proc_entries(cd, net);
1664        if (ret)
1665                sunrpc_destroy_cache_detail(cd);
1666        return ret;
1667}
1668EXPORT_SYMBOL_GPL(cache_register_net);
1669
1670void cache_unregister_net(struct cache_detail *cd, struct net *net)
1671{
1672        remove_cache_proc_entries(cd);
1673        sunrpc_destroy_cache_detail(cd);
1674}
1675EXPORT_SYMBOL_GPL(cache_unregister_net);
1676
1677struct cache_detail *cache_create_net(struct cache_detail *tmpl, struct net *net)
1678{
1679        struct cache_detail *cd;
1680        int i;
1681
1682        cd = kmemdup(tmpl, sizeof(struct cache_detail), GFP_KERNEL);
1683        if (cd == NULL)
1684                return ERR_PTR(-ENOMEM);
1685
1686        cd->hash_table = kzalloc(cd->hash_size * sizeof(struct hlist_head),
1687                                 GFP_KERNEL);
1688        if (cd->hash_table == NULL) {
1689                kfree(cd);
1690                return ERR_PTR(-ENOMEM);
1691        }
1692
1693        for (i = 0; i < cd->hash_size; i++)
1694                INIT_HLIST_HEAD(&cd->hash_table[i]);
1695        cd->net = net;
1696        return cd;
1697}
1698EXPORT_SYMBOL_GPL(cache_create_net);
1699
1700void cache_destroy_net(struct cache_detail *cd, struct net *net)
1701{
1702        kfree(cd->hash_table);
1703        kfree(cd);
1704}
1705EXPORT_SYMBOL_GPL(cache_destroy_net);
1706
1707static ssize_t cache_read_pipefs(struct file *filp, char __user *buf,
1708                                 size_t count, loff_t *ppos)
1709{
1710        struct cache_detail *cd = RPC_I(file_inode(filp))->private;
1711
1712        return cache_read(filp, buf, count, ppos, cd);
1713}
1714
1715static ssize_t cache_write_pipefs(struct file *filp, const char __user *buf,
1716                                  size_t count, loff_t *ppos)
1717{
1718        struct cache_detail *cd = RPC_I(file_inode(filp))->private;
1719
1720        return cache_write(filp, buf, count, ppos, cd);
1721}
1722
1723static unsigned int cache_poll_pipefs(struct file *filp, poll_table *wait)
1724{
1725        struct cache_detail *cd = RPC_I(file_inode(filp))->private;
1726
1727        return cache_poll(filp, wait, cd);
1728}
1729
1730static long cache_ioctl_pipefs(struct file *filp,
1731                              unsigned int cmd, unsigned long arg)
1732{
1733        struct inode *inode = file_inode(filp);
1734        struct cache_detail *cd = RPC_I(inode)->private;
1735
1736        return cache_ioctl(inode, filp, cmd, arg, cd);
1737}
1738
1739static int cache_open_pipefs(struct inode *inode, struct file *filp)
1740{
1741        struct cache_detail *cd = RPC_I(inode)->private;
1742
1743        return cache_open(inode, filp, cd);
1744}
1745
1746static int cache_release_pipefs(struct inode *inode, struct file *filp)
1747{
1748        struct cache_detail *cd = RPC_I(inode)->private;
1749
1750        return cache_release(inode, filp, cd);
1751}
1752
1753const struct file_operations cache_file_operations_pipefs = {
1754        .owner          = THIS_MODULE,
1755        .llseek         = no_llseek,
1756        .read           = cache_read_pipefs,
1757        .write          = cache_write_pipefs,
1758        .poll           = cache_poll_pipefs,
1759        .unlocked_ioctl = cache_ioctl_pipefs, /* for FIONREAD */
1760        .open           = cache_open_pipefs,
1761        .release        = cache_release_pipefs,
1762};
1763
1764static int content_open_pipefs(struct inode *inode, struct file *filp)
1765{
1766        struct cache_detail *cd = RPC_I(inode)->private;
1767
1768        return content_open(inode, filp, cd);
1769}
1770
1771static int content_release_pipefs(struct inode *inode, struct file *filp)
1772{
1773        struct cache_detail *cd = RPC_I(inode)->private;
1774
1775        return content_release(inode, filp, cd);
1776}
1777
1778const struct file_operations content_file_operations_pipefs = {
1779        .open           = content_open_pipefs,
1780        .read           = seq_read,
1781        .llseek         = seq_lseek,
1782        .release        = content_release_pipefs,
1783};
1784
1785static int open_flush_pipefs(struct inode *inode, struct file *filp)
1786{
1787        struct cache_detail *cd = RPC_I(inode)->private;
1788
1789        return open_flush(inode, filp, cd);
1790}
1791
1792static int release_flush_pipefs(struct inode *inode, struct file *filp)
1793{
1794        struct cache_detail *cd = RPC_I(inode)->private;
1795
1796        return release_flush(inode, filp, cd);
1797}
1798
1799static ssize_t read_flush_pipefs(struct file *filp, char __user *buf,
1800                            size_t count, loff_t *ppos)
1801{
1802        struct cache_detail *cd = RPC_I(file_inode(filp))->private;
1803
1804        return read_flush(filp, buf, count, ppos, cd);
1805}
1806
1807static ssize_t write_flush_pipefs(struct file *filp,
1808                                  const char __user *buf,
1809                                  size_t count, loff_t *ppos)
1810{
1811        struct cache_detail *cd = RPC_I(file_inode(filp))->private;
1812
1813        return write_flush(filp, buf, count, ppos, cd);
1814}
1815
1816const struct file_operations cache_flush_operations_pipefs = {
1817        .open           = open_flush_pipefs,
1818        .read           = read_flush_pipefs,
1819        .write          = write_flush_pipefs,
1820        .release        = release_flush_pipefs,
1821        .llseek         = no_llseek,
1822};
1823
1824int sunrpc_cache_register_pipefs(struct dentry *parent,
1825                                 const char *name, umode_t umode,
1826                                 struct cache_detail *cd)
1827{
1828        struct dentry *dir = rpc_create_cache_dir(parent, name, umode, cd);
1829        if (IS_ERR(dir))
1830                return PTR_ERR(dir);
1831        cd->pipefs = dir;
1832        return 0;
1833}
1834EXPORT_SYMBOL_GPL(sunrpc_cache_register_pipefs);
1835
1836void sunrpc_cache_unregister_pipefs(struct cache_detail *cd)
1837{
1838        if (cd->pipefs) {
1839                rpc_remove_cache_dir(cd->pipefs);
1840                cd->pipefs = NULL;
1841        }
1842}
1843EXPORT_SYMBOL_GPL(sunrpc_cache_unregister_pipefs);
1844
1845void sunrpc_cache_unhash(struct cache_detail *cd, struct cache_head *h)
1846{
1847        write_lock(&cd->hash_lock);
1848        if (!hlist_unhashed(&h->cache_list)){
1849                hlist_del_init(&h->cache_list);
1850                cd->entries--;
1851                write_unlock(&cd->hash_lock);
1852                cache_put(h, cd);
1853        } else
1854                write_unlock(&cd->hash_lock);
1855}
1856EXPORT_SYMBOL_GPL(sunrpc_cache_unhash);
1857