1
2
3
4
5
6
7
8
9
10
11
12
13#include <linux/types.h>
14#include <linux/fs.h>
15#include <linux/file.h>
16#include <linux/slab.h>
17#include <linux/signal.h>
18#include <linux/sched.h>
19#include <linux/kmod.h>
20#include <linux/list.h>
21#include <linux/module.h>
22#include <linux/ctype.h>
23#include <linux/string_helpers.h>
24#include <linux/uaccess.h>
25#include <linux/poll.h>
26#include <linux/seq_file.h>
27#include <linux/proc_fs.h>
28#include <linux/net.h>
29#include <linux/workqueue.h>
30#include <linux/mutex.h>
31#include <linux/pagemap.h>
32#include <asm/ioctls.h>
33#include <linux/sunrpc/types.h>
34#include <linux/sunrpc/cache.h>
35#include <linux/sunrpc/stats.h>
36#include <linux/sunrpc/rpc_pipe_fs.h>
37#include "netns.h"
38
39#define RPCDBG_FACILITY RPCDBG_CACHE
40
41static bool cache_defer_req(struct cache_req *req, struct cache_head *item);
42static void cache_revisit_request(struct cache_head *item);
43static bool cache_listeners_exist(struct cache_detail *detail);
44
45static void cache_init(struct cache_head *h, struct cache_detail *detail)
46{
47 time_t now = seconds_since_boot();
48 INIT_HLIST_NODE(&h->cache_list);
49 h->flags = 0;
50 kref_init(&h->ref);
51 h->expiry_time = now + CACHE_NEW_EXPIRY;
52 if (now <= detail->flush_time)
53
54 now = detail->flush_time + 1;
55 h->last_refresh = now;
56}
57
58static void cache_fresh_unlocked(struct cache_head *head,
59 struct cache_detail *detail);
60
61static struct cache_head *sunrpc_cache_find_rcu(struct cache_detail *detail,
62 struct cache_head *key,
63 int hash)
64{
65 struct hlist_head *head = &detail->hash_table[hash];
66 struct cache_head *tmp;
67
68 rcu_read_lock();
69 hlist_for_each_entry_rcu(tmp, head, cache_list) {
70 if (detail->match(tmp, key)) {
71 if (cache_is_expired(detail, tmp))
72 continue;
73 tmp = cache_get_rcu(tmp);
74 rcu_read_unlock();
75 return tmp;
76 }
77 }
78 rcu_read_unlock();
79 return NULL;
80}
81
82static struct cache_head *sunrpc_cache_add_entry(struct cache_detail *detail,
83 struct cache_head *key,
84 int hash)
85{
86 struct cache_head *new, *tmp, *freeme = NULL;
87 struct hlist_head *head = &detail->hash_table[hash];
88
89 new = detail->alloc();
90 if (!new)
91 return NULL;
92
93
94
95
96 cache_init(new, detail);
97 detail->init(new, key);
98
99 spin_lock(&detail->hash_lock);
100
101
102 hlist_for_each_entry_rcu(tmp, head, cache_list) {
103 if (detail->match(tmp, key)) {
104 if (cache_is_expired(detail, tmp)) {
105 hlist_del_init_rcu(&tmp->cache_list);
106 detail->entries --;
107 freeme = tmp;
108 break;
109 }
110 cache_get(tmp);
111 spin_unlock(&detail->hash_lock);
112 cache_put(new, detail);
113 return tmp;
114 }
115 }
116
117 hlist_add_head_rcu(&new->cache_list, head);
118 detail->entries++;
119 cache_get(new);
120 spin_unlock(&detail->hash_lock);
121
122 if (freeme) {
123 cache_fresh_unlocked(freeme, detail);
124 cache_put(freeme, detail);
125 }
126 return new;
127}
128
129struct cache_head *sunrpc_cache_lookup_rcu(struct cache_detail *detail,
130 struct cache_head *key, int hash)
131{
132 struct cache_head *ret;
133
134 ret = sunrpc_cache_find_rcu(detail, key, hash);
135 if (ret)
136 return ret;
137
138 return sunrpc_cache_add_entry(detail, key, hash);
139}
140EXPORT_SYMBOL_GPL(sunrpc_cache_lookup_rcu);
141
142static void cache_dequeue(struct cache_detail *detail, struct cache_head *ch);
143
144static void cache_fresh_locked(struct cache_head *head, time_t expiry,
145 struct cache_detail *detail)
146{
147 time_t now = seconds_since_boot();
148 if (now <= detail->flush_time)
149
150 now = detail->flush_time + 1;
151 head->expiry_time = expiry;
152 head->last_refresh = now;
153 smp_wmb();
154 set_bit(CACHE_VALID, &head->flags);
155}
156
157static void cache_fresh_unlocked(struct cache_head *head,
158 struct cache_detail *detail)
159{
160 if (test_and_clear_bit(CACHE_PENDING, &head->flags)) {
161 cache_revisit_request(head);
162 cache_dequeue(detail, head);
163 }
164}
165
166struct cache_head *sunrpc_cache_update(struct cache_detail *detail,
167 struct cache_head *new, struct cache_head *old, int hash)
168{
169
170
171
172
173 struct cache_head *tmp;
174
175 if (!test_bit(CACHE_VALID, &old->flags)) {
176 spin_lock(&detail->hash_lock);
177 if (!test_bit(CACHE_VALID, &old->flags)) {
178 if (test_bit(CACHE_NEGATIVE, &new->flags))
179 set_bit(CACHE_NEGATIVE, &old->flags);
180 else
181 detail->update(old, new);
182 cache_fresh_locked(old, new->expiry_time, detail);
183 spin_unlock(&detail->hash_lock);
184 cache_fresh_unlocked(old, detail);
185 return old;
186 }
187 spin_unlock(&detail->hash_lock);
188 }
189
190 tmp = detail->alloc();
191 if (!tmp) {
192 cache_put(old, detail);
193 return NULL;
194 }
195 cache_init(tmp, detail);
196 detail->init(tmp, old);
197
198 spin_lock(&detail->hash_lock);
199 if (test_bit(CACHE_NEGATIVE, &new->flags))
200 set_bit(CACHE_NEGATIVE, &tmp->flags);
201 else
202 detail->update(tmp, new);
203 hlist_add_head(&tmp->cache_list, &detail->hash_table[hash]);
204 detail->entries++;
205 cache_get(tmp);
206 cache_fresh_locked(tmp, new->expiry_time, detail);
207 cache_fresh_locked(old, 0, detail);
208 spin_unlock(&detail->hash_lock);
209 cache_fresh_unlocked(tmp, detail);
210 cache_fresh_unlocked(old, detail);
211 cache_put(old, detail);
212 return tmp;
213}
214EXPORT_SYMBOL_GPL(sunrpc_cache_update);
215
216static int cache_make_upcall(struct cache_detail *cd, struct cache_head *h)
217{
218 if (cd->cache_upcall)
219 return cd->cache_upcall(cd, h);
220 return sunrpc_cache_pipe_upcall(cd, h);
221}
222
223static inline int cache_is_valid(struct cache_head *h)
224{
225 if (!test_bit(CACHE_VALID, &h->flags))
226 return -EAGAIN;
227 else {
228
229 if (test_bit(CACHE_NEGATIVE, &h->flags))
230 return -ENOENT;
231 else {
232
233
234
235
236
237
238 smp_rmb();
239 return 0;
240 }
241 }
242}
243
244static int try_to_negate_entry(struct cache_detail *detail, struct cache_head *h)
245{
246 int rv;
247
248 spin_lock(&detail->hash_lock);
249 rv = cache_is_valid(h);
250 if (rv == -EAGAIN) {
251 set_bit(CACHE_NEGATIVE, &h->flags);
252 cache_fresh_locked(h, seconds_since_boot()+CACHE_NEW_EXPIRY,
253 detail);
254 rv = -ENOENT;
255 }
256 spin_unlock(&detail->hash_lock);
257 cache_fresh_unlocked(h, detail);
258 return rv;
259}
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275int cache_check(struct cache_detail *detail,
276 struct cache_head *h, struct cache_req *rqstp)
277{
278 int rv;
279 long refresh_age, age;
280
281
282 rv = cache_is_valid(h);
283
284
285 refresh_age = (h->expiry_time - h->last_refresh);
286 age = seconds_since_boot() - h->last_refresh;
287
288 if (rqstp == NULL) {
289 if (rv == -EAGAIN)
290 rv = -ENOENT;
291 } else if (rv == -EAGAIN ||
292 (h->expiry_time != 0 && age > refresh_age/2)) {
293 dprintk("RPC: Want update, refage=%ld, age=%ld\n",
294 refresh_age, age);
295 if (!test_and_set_bit(CACHE_PENDING, &h->flags)) {
296 switch (cache_make_upcall(detail, h)) {
297 case -EINVAL:
298 rv = try_to_negate_entry(detail, h);
299 break;
300 case -EAGAIN:
301 cache_fresh_unlocked(h, detail);
302 break;
303 }
304 } else if (!cache_listeners_exist(detail))
305 rv = try_to_negate_entry(detail, h);
306 }
307
308 if (rv == -EAGAIN) {
309 if (!cache_defer_req(rqstp, h)) {
310
311
312
313
314 rv = cache_is_valid(h);
315 if (rv == -EAGAIN)
316 rv = -ETIMEDOUT;
317 }
318 }
319 if (rv)
320 cache_put(h, detail);
321 return rv;
322}
323EXPORT_SYMBOL_GPL(cache_check);
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357static LIST_HEAD(cache_list);
358static DEFINE_SPINLOCK(cache_list_lock);
359static struct cache_detail *current_detail;
360static int current_index;
361
362static void do_cache_clean(struct work_struct *work);
363static struct delayed_work cache_cleaner;
364
365void sunrpc_init_cache_detail(struct cache_detail *cd)
366{
367 spin_lock_init(&cd->hash_lock);
368 INIT_LIST_HEAD(&cd->queue);
369 spin_lock(&cache_list_lock);
370 cd->nextcheck = 0;
371 cd->entries = 0;
372 atomic_set(&cd->writers, 0);
373 cd->last_close = 0;
374 cd->last_warn = -1;
375 list_add(&cd->others, &cache_list);
376 spin_unlock(&cache_list_lock);
377
378
379 queue_delayed_work(system_power_efficient_wq, &cache_cleaner, 0);
380}
381EXPORT_SYMBOL_GPL(sunrpc_init_cache_detail);
382
383void sunrpc_destroy_cache_detail(struct cache_detail *cd)
384{
385 cache_purge(cd);
386 spin_lock(&cache_list_lock);
387 spin_lock(&cd->hash_lock);
388 if (current_detail == cd)
389 current_detail = NULL;
390 list_del_init(&cd->others);
391 spin_unlock(&cd->hash_lock);
392 spin_unlock(&cache_list_lock);
393 if (list_empty(&cache_list)) {
394
395 cancel_delayed_work_sync(&cache_cleaner);
396 }
397}
398EXPORT_SYMBOL_GPL(sunrpc_destroy_cache_detail);
399
400
401
402
403
404
405
406static int cache_clean(void)
407{
408 int rv = 0;
409 struct list_head *next;
410
411 spin_lock(&cache_list_lock);
412
413
414 while (current_detail == NULL ||
415 current_index >= current_detail->hash_size) {
416 if (current_detail)
417 next = current_detail->others.next;
418 else
419 next = cache_list.next;
420 if (next == &cache_list) {
421 current_detail = NULL;
422 spin_unlock(&cache_list_lock);
423 return -1;
424 }
425 current_detail = list_entry(next, struct cache_detail, others);
426 if (current_detail->nextcheck > seconds_since_boot())
427 current_index = current_detail->hash_size;
428 else {
429 current_index = 0;
430 current_detail->nextcheck = seconds_since_boot()+30*60;
431 }
432 }
433
434
435 while (current_detail &&
436 current_index < current_detail->hash_size &&
437 hlist_empty(¤t_detail->hash_table[current_index]))
438 current_index++;
439
440
441
442 if (current_detail && current_index < current_detail->hash_size) {
443 struct cache_head *ch = NULL;
444 struct cache_detail *d;
445 struct hlist_head *head;
446 struct hlist_node *tmp;
447
448 spin_lock(¤t_detail->hash_lock);
449
450
451
452 head = ¤t_detail->hash_table[current_index];
453 hlist_for_each_entry_safe(ch, tmp, head, cache_list) {
454 if (current_detail->nextcheck > ch->expiry_time)
455 current_detail->nextcheck = ch->expiry_time+1;
456 if (!cache_is_expired(current_detail, ch))
457 continue;
458
459 hlist_del_init_rcu(&ch->cache_list);
460 current_detail->entries--;
461 rv = 1;
462 break;
463 }
464
465 spin_unlock(¤t_detail->hash_lock);
466 d = current_detail;
467 if (!ch)
468 current_index ++;
469 spin_unlock(&cache_list_lock);
470 if (ch) {
471 set_bit(CACHE_CLEANED, &ch->flags);
472 cache_fresh_unlocked(ch, d);
473 cache_put(ch, d);
474 }
475 } else
476 spin_unlock(&cache_list_lock);
477
478 return rv;
479}
480
481
482
483
484static void do_cache_clean(struct work_struct *work)
485{
486 int delay = 5;
487 if (cache_clean() == -1)
488 delay = round_jiffies_relative(30*HZ);
489
490 if (list_empty(&cache_list))
491 delay = 0;
492
493 if (delay)
494 queue_delayed_work(system_power_efficient_wq,
495 &cache_cleaner, delay);
496}
497
498
499
500
501
502
503
504void cache_flush(void)
505{
506 while (cache_clean() != -1)
507 cond_resched();
508 while (cache_clean() != -1)
509 cond_resched();
510}
511EXPORT_SYMBOL_GPL(cache_flush);
512
513void cache_purge(struct cache_detail *detail)
514{
515 struct cache_head *ch = NULL;
516 struct hlist_head *head = NULL;
517 struct hlist_node *tmp = NULL;
518 int i = 0;
519
520 spin_lock(&detail->hash_lock);
521 if (!detail->entries) {
522 spin_unlock(&detail->hash_lock);
523 return;
524 }
525
526 dprintk("RPC: %d entries in %s cache\n", detail->entries, detail->name);
527 for (i = 0; i < detail->hash_size; i++) {
528 head = &detail->hash_table[i];
529 hlist_for_each_entry_safe(ch, tmp, head, cache_list) {
530 hlist_del_init_rcu(&ch->cache_list);
531 detail->entries--;
532
533 set_bit(CACHE_CLEANED, &ch->flags);
534 spin_unlock(&detail->hash_lock);
535 cache_fresh_unlocked(ch, detail);
536 cache_put(ch, detail);
537 spin_lock(&detail->hash_lock);
538 }
539 }
540 spin_unlock(&detail->hash_lock);
541}
542EXPORT_SYMBOL_GPL(cache_purge);
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560#define DFR_HASHSIZE (PAGE_SIZE/sizeof(struct list_head))
561#define DFR_HASH(item) ((((long)item)>>4 ^ (((long)item)>>13)) % DFR_HASHSIZE)
562
563#define DFR_MAX 300
564
565static DEFINE_SPINLOCK(cache_defer_lock);
566static LIST_HEAD(cache_defer_list);
567static struct hlist_head cache_defer_hash[DFR_HASHSIZE];
568static int cache_defer_cnt;
569
570static void __unhash_deferred_req(struct cache_deferred_req *dreq)
571{
572 hlist_del_init(&dreq->hash);
573 if (!list_empty(&dreq->recent)) {
574 list_del_init(&dreq->recent);
575 cache_defer_cnt--;
576 }
577}
578
579static void __hash_deferred_req(struct cache_deferred_req *dreq, struct cache_head *item)
580{
581 int hash = DFR_HASH(item);
582
583 INIT_LIST_HEAD(&dreq->recent);
584 hlist_add_head(&dreq->hash, &cache_defer_hash[hash]);
585}
586
587static void setup_deferral(struct cache_deferred_req *dreq,
588 struct cache_head *item,
589 int count_me)
590{
591
592 dreq->item = item;
593
594 spin_lock(&cache_defer_lock);
595
596 __hash_deferred_req(dreq, item);
597
598 if (count_me) {
599 cache_defer_cnt++;
600 list_add(&dreq->recent, &cache_defer_list);
601 }
602
603 spin_unlock(&cache_defer_lock);
604
605}
606
607struct thread_deferred_req {
608 struct cache_deferred_req handle;
609 struct completion completion;
610};
611
612static void cache_restart_thread(struct cache_deferred_req *dreq, int too_many)
613{
614 struct thread_deferred_req *dr =
615 container_of(dreq, struct thread_deferred_req, handle);
616 complete(&dr->completion);
617}
618
619static void cache_wait_req(struct cache_req *req, struct cache_head *item)
620{
621 struct thread_deferred_req sleeper;
622 struct cache_deferred_req *dreq = &sleeper.handle;
623
624 sleeper.completion = COMPLETION_INITIALIZER_ONSTACK(sleeper.completion);
625 dreq->revisit = cache_restart_thread;
626
627 setup_deferral(dreq, item, 0);
628
629 if (!test_bit(CACHE_PENDING, &item->flags) ||
630 wait_for_completion_interruptible_timeout(
631 &sleeper.completion, req->thread_wait) <= 0) {
632
633
634
635 spin_lock(&cache_defer_lock);
636 if (!hlist_unhashed(&sleeper.handle.hash)) {
637 __unhash_deferred_req(&sleeper.handle);
638 spin_unlock(&cache_defer_lock);
639 } else {
640
641
642
643
644
645 spin_unlock(&cache_defer_lock);
646 wait_for_completion(&sleeper.completion);
647 }
648 }
649}
650
651static void cache_limit_defers(void)
652{
653
654
655
656 struct cache_deferred_req *discard = NULL;
657
658 if (cache_defer_cnt <= DFR_MAX)
659 return;
660
661 spin_lock(&cache_defer_lock);
662
663
664 if (cache_defer_cnt > DFR_MAX) {
665 if (prandom_u32() & 1)
666 discard = list_entry(cache_defer_list.next,
667 struct cache_deferred_req, recent);
668 else
669 discard = list_entry(cache_defer_list.prev,
670 struct cache_deferred_req, recent);
671 __unhash_deferred_req(discard);
672 }
673 spin_unlock(&cache_defer_lock);
674 if (discard)
675 discard->revisit(discard, 1);
676}
677
678
679static bool cache_defer_req(struct cache_req *req, struct cache_head *item)
680{
681 struct cache_deferred_req *dreq;
682
683 if (req->thread_wait) {
684 cache_wait_req(req, item);
685 if (!test_bit(CACHE_PENDING, &item->flags))
686 return false;
687 }
688 dreq = req->defer(req);
689 if (dreq == NULL)
690 return false;
691 setup_deferral(dreq, item, 1);
692 if (!test_bit(CACHE_PENDING, &item->flags))
693
694
695
696 cache_revisit_request(item);
697
698 cache_limit_defers();
699 return true;
700}
701
702static void cache_revisit_request(struct cache_head *item)
703{
704 struct cache_deferred_req *dreq;
705 struct list_head pending;
706 struct hlist_node *tmp;
707 int hash = DFR_HASH(item);
708
709 INIT_LIST_HEAD(&pending);
710 spin_lock(&cache_defer_lock);
711
712 hlist_for_each_entry_safe(dreq, tmp, &cache_defer_hash[hash], hash)
713 if (dreq->item == item) {
714 __unhash_deferred_req(dreq);
715 list_add(&dreq->recent, &pending);
716 }
717
718 spin_unlock(&cache_defer_lock);
719
720 while (!list_empty(&pending)) {
721 dreq = list_entry(pending.next, struct cache_deferred_req, recent);
722 list_del_init(&dreq->recent);
723 dreq->revisit(dreq, 0);
724 }
725}
726
727void cache_clean_deferred(void *owner)
728{
729 struct cache_deferred_req *dreq, *tmp;
730 struct list_head pending;
731
732
733 INIT_LIST_HEAD(&pending);
734 spin_lock(&cache_defer_lock);
735
736 list_for_each_entry_safe(dreq, tmp, &cache_defer_list, recent) {
737 if (dreq->owner == owner) {
738 __unhash_deferred_req(dreq);
739 list_add(&dreq->recent, &pending);
740 }
741 }
742 spin_unlock(&cache_defer_lock);
743
744 while (!list_empty(&pending)) {
745 dreq = list_entry(pending.next, struct cache_deferred_req, recent);
746 list_del_init(&dreq->recent);
747 dreq->revisit(dreq, 1);
748 }
749}
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767static DEFINE_SPINLOCK(queue_lock);
768static DEFINE_MUTEX(queue_io_mutex);
769
770struct cache_queue {
771 struct list_head list;
772 int reader;
773};
774struct cache_request {
775 struct cache_queue q;
776 struct cache_head *item;
777 char * buf;
778 int len;
779 int readers;
780};
781struct cache_reader {
782 struct cache_queue q;
783 int offset;
784};
785
786static int cache_request(struct cache_detail *detail,
787 struct cache_request *crq)
788{
789 char *bp = crq->buf;
790 int len = PAGE_SIZE;
791
792 detail->cache_request(detail, crq->item, &bp, &len);
793 if (len < 0)
794 return -EAGAIN;
795 return PAGE_SIZE - len;
796}
797
798static ssize_t cache_read(struct file *filp, char __user *buf, size_t count,
799 loff_t *ppos, struct cache_detail *cd)
800{
801 struct cache_reader *rp = filp->private_data;
802 struct cache_request *rq;
803 struct inode *inode = file_inode(filp);
804 int err;
805
806 if (count == 0)
807 return 0;
808
809 inode_lock(inode);
810
811 again:
812 spin_lock(&queue_lock);
813
814 while (rp->q.list.next != &cd->queue &&
815 list_entry(rp->q.list.next, struct cache_queue, list)
816 ->reader) {
817 struct list_head *next = rp->q.list.next;
818 list_move(&rp->q.list, next);
819 }
820 if (rp->q.list.next == &cd->queue) {
821 spin_unlock(&queue_lock);
822 inode_unlock(inode);
823 WARN_ON_ONCE(rp->offset);
824 return 0;
825 }
826 rq = container_of(rp->q.list.next, struct cache_request, q.list);
827 WARN_ON_ONCE(rq->q.reader);
828 if (rp->offset == 0)
829 rq->readers++;
830 spin_unlock(&queue_lock);
831
832 if (rq->len == 0) {
833 err = cache_request(cd, rq);
834 if (err < 0)
835 goto out;
836 rq->len = err;
837 }
838
839 if (rp->offset == 0 && !test_bit(CACHE_PENDING, &rq->item->flags)) {
840 err = -EAGAIN;
841 spin_lock(&queue_lock);
842 list_move(&rp->q.list, &rq->q.list);
843 spin_unlock(&queue_lock);
844 } else {
845 if (rp->offset + count > rq->len)
846 count = rq->len - rp->offset;
847 err = -EFAULT;
848 if (copy_to_user(buf, rq->buf + rp->offset, count))
849 goto out;
850 rp->offset += count;
851 if (rp->offset >= rq->len) {
852 rp->offset = 0;
853 spin_lock(&queue_lock);
854 list_move(&rp->q.list, &rq->q.list);
855 spin_unlock(&queue_lock);
856 }
857 err = 0;
858 }
859 out:
860 if (rp->offset == 0) {
861
862 spin_lock(&queue_lock);
863 rq->readers--;
864 if (rq->readers == 0 &&
865 !test_bit(CACHE_PENDING, &rq->item->flags)) {
866 list_del(&rq->q.list);
867 spin_unlock(&queue_lock);
868 cache_put(rq->item, cd);
869 kfree(rq->buf);
870 kfree(rq);
871 } else
872 spin_unlock(&queue_lock);
873 }
874 if (err == -EAGAIN)
875 goto again;
876 inode_unlock(inode);
877 return err ? err : count;
878}
879
880static ssize_t cache_do_downcall(char *kaddr, const char __user *buf,
881 size_t count, struct cache_detail *cd)
882{
883 ssize_t ret;
884
885 if (count == 0)
886 return -EINVAL;
887 if (copy_from_user(kaddr, buf, count))
888 return -EFAULT;
889 kaddr[count] = '\0';
890 ret = cd->cache_parse(cd, kaddr, count);
891 if (!ret)
892 ret = count;
893 return ret;
894}
895
896static ssize_t cache_slow_downcall(const char __user *buf,
897 size_t count, struct cache_detail *cd)
898{
899 static char write_buf[8192];
900 ssize_t ret = -EINVAL;
901
902 if (count >= sizeof(write_buf))
903 goto out;
904 mutex_lock(&queue_io_mutex);
905 ret = cache_do_downcall(write_buf, buf, count, cd);
906 mutex_unlock(&queue_io_mutex);
907out:
908 return ret;
909}
910
911static ssize_t cache_downcall(struct address_space *mapping,
912 const char __user *buf,
913 size_t count, struct cache_detail *cd)
914{
915 struct page *page;
916 char *kaddr;
917 ssize_t ret = -ENOMEM;
918
919 if (count >= PAGE_SIZE)
920 goto out_slow;
921
922 page = find_or_create_page(mapping, 0, GFP_KERNEL);
923 if (!page)
924 goto out_slow;
925
926 kaddr = kmap(page);
927 ret = cache_do_downcall(kaddr, buf, count, cd);
928 kunmap(page);
929 unlock_page(page);
930 put_page(page);
931 return ret;
932out_slow:
933 return cache_slow_downcall(buf, count, cd);
934}
935
936static ssize_t cache_write(struct file *filp, const char __user *buf,
937 size_t count, loff_t *ppos,
938 struct cache_detail *cd)
939{
940 struct address_space *mapping = filp->f_mapping;
941 struct inode *inode = file_inode(filp);
942 ssize_t ret = -EINVAL;
943
944 if (!cd->cache_parse)
945 goto out;
946
947 inode_lock(inode);
948 ret = cache_downcall(mapping, buf, count, cd);
949 inode_unlock(inode);
950out:
951 return ret;
952}
953
954static DECLARE_WAIT_QUEUE_HEAD(queue_wait);
955
956static __poll_t cache_poll(struct file *filp, poll_table *wait,
957 struct cache_detail *cd)
958{
959 __poll_t mask;
960 struct cache_reader *rp = filp->private_data;
961 struct cache_queue *cq;
962
963 poll_wait(filp, &queue_wait, wait);
964
965
966 mask = EPOLLOUT | EPOLLWRNORM;
967
968 if (!rp)
969 return mask;
970
971 spin_lock(&queue_lock);
972
973 for (cq= &rp->q; &cq->list != &cd->queue;
974 cq = list_entry(cq->list.next, struct cache_queue, list))
975 if (!cq->reader) {
976 mask |= EPOLLIN | EPOLLRDNORM;
977 break;
978 }
979 spin_unlock(&queue_lock);
980 return mask;
981}
982
983static int cache_ioctl(struct inode *ino, struct file *filp,
984 unsigned int cmd, unsigned long arg,
985 struct cache_detail *cd)
986{
987 int len = 0;
988 struct cache_reader *rp = filp->private_data;
989 struct cache_queue *cq;
990
991 if (cmd != FIONREAD || !rp)
992 return -EINVAL;
993
994 spin_lock(&queue_lock);
995
996
997
998
999 for (cq= &rp->q; &cq->list != &cd->queue;
1000 cq = list_entry(cq->list.next, struct cache_queue, list))
1001 if (!cq->reader) {
1002 struct cache_request *cr =
1003 container_of(cq, struct cache_request, q);
1004 len = cr->len - rp->offset;
1005 break;
1006 }
1007 spin_unlock(&queue_lock);
1008
1009 return put_user(len, (int __user *)arg);
1010}
1011
1012static int cache_open(struct inode *inode, struct file *filp,
1013 struct cache_detail *cd)
1014{
1015 struct cache_reader *rp = NULL;
1016
1017 if (!cd || !try_module_get(cd->owner))
1018 return -EACCES;
1019 nonseekable_open(inode, filp);
1020 if (filp->f_mode & FMODE_READ) {
1021 rp = kmalloc(sizeof(*rp), GFP_KERNEL);
1022 if (!rp) {
1023 module_put(cd->owner);
1024 return -ENOMEM;
1025 }
1026 rp->offset = 0;
1027 rp->q.reader = 1;
1028
1029 spin_lock(&queue_lock);
1030 list_add(&rp->q.list, &cd->queue);
1031 spin_unlock(&queue_lock);
1032 }
1033 if (filp->f_mode & FMODE_WRITE)
1034 atomic_inc(&cd->writers);
1035 filp->private_data = rp;
1036 return 0;
1037}
1038
1039static int cache_release(struct inode *inode, struct file *filp,
1040 struct cache_detail *cd)
1041{
1042 struct cache_reader *rp = filp->private_data;
1043
1044 if (rp) {
1045 spin_lock(&queue_lock);
1046 if (rp->offset) {
1047 struct cache_queue *cq;
1048 for (cq= &rp->q; &cq->list != &cd->queue;
1049 cq = list_entry(cq->list.next, struct cache_queue, list))
1050 if (!cq->reader) {
1051 container_of(cq, struct cache_request, q)
1052 ->readers--;
1053 break;
1054 }
1055 rp->offset = 0;
1056 }
1057 list_del(&rp->q.list);
1058 spin_unlock(&queue_lock);
1059
1060 filp->private_data = NULL;
1061 kfree(rp);
1062
1063 }
1064 if (filp->f_mode & FMODE_WRITE) {
1065 atomic_dec(&cd->writers);
1066 cd->last_close = seconds_since_boot();
1067 }
1068 module_put(cd->owner);
1069 return 0;
1070}
1071
1072
1073
1074static void cache_dequeue(struct cache_detail *detail, struct cache_head *ch)
1075{
1076 struct cache_queue *cq, *tmp;
1077 struct cache_request *cr;
1078 struct list_head dequeued;
1079
1080 INIT_LIST_HEAD(&dequeued);
1081 spin_lock(&queue_lock);
1082 list_for_each_entry_safe(cq, tmp, &detail->queue, list)
1083 if (!cq->reader) {
1084 cr = container_of(cq, struct cache_request, q);
1085 if (cr->item != ch)
1086 continue;
1087 if (test_bit(CACHE_PENDING, &ch->flags))
1088
1089 break;
1090 if (cr->readers != 0)
1091 continue;
1092 list_move(&cr->q.list, &dequeued);
1093 }
1094 spin_unlock(&queue_lock);
1095 while (!list_empty(&dequeued)) {
1096 cr = list_entry(dequeued.next, struct cache_request, q.list);
1097 list_del(&cr->q.list);
1098 cache_put(cr->item, detail);
1099 kfree(cr->buf);
1100 kfree(cr);
1101 }
1102}
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113void qword_add(char **bpp, int *lp, char *str)
1114{
1115 char *bp = *bpp;
1116 int len = *lp;
1117 int ret;
1118
1119 if (len < 0) return;
1120
1121 ret = string_escape_str(str, bp, len, ESCAPE_OCTAL, "\\ \n\t");
1122 if (ret >= len) {
1123 bp += len;
1124 len = -1;
1125 } else {
1126 bp += ret;
1127 len -= ret;
1128 *bp++ = ' ';
1129 len--;
1130 }
1131 *bpp = bp;
1132 *lp = len;
1133}
1134EXPORT_SYMBOL_GPL(qword_add);
1135
1136void qword_addhex(char **bpp, int *lp, char *buf, int blen)
1137{
1138 char *bp = *bpp;
1139 int len = *lp;
1140
1141 if (len < 0) return;
1142
1143 if (len > 2) {
1144 *bp++ = '\\';
1145 *bp++ = 'x';
1146 len -= 2;
1147 while (blen && len >= 2) {
1148 bp = hex_byte_pack(bp, *buf++);
1149 len -= 2;
1150 blen--;
1151 }
1152 }
1153 if (blen || len<1) len = -1;
1154 else {
1155 *bp++ = ' ';
1156 len--;
1157 }
1158 *bpp = bp;
1159 *lp = len;
1160}
1161EXPORT_SYMBOL_GPL(qword_addhex);
1162
1163static void warn_no_listener(struct cache_detail *detail)
1164{
1165 if (detail->last_warn != detail->last_close) {
1166 detail->last_warn = detail->last_close;
1167 if (detail->warn_no_listener)
1168 detail->warn_no_listener(detail, detail->last_close != 0);
1169 }
1170}
1171
1172static bool cache_listeners_exist(struct cache_detail *detail)
1173{
1174 if (atomic_read(&detail->writers))
1175 return true;
1176 if (detail->last_close == 0)
1177
1178 return false;
1179 if (detail->last_close < seconds_since_boot() - 30)
1180
1181
1182
1183
1184
1185 return false;
1186 return true;
1187}
1188
1189
1190
1191
1192
1193
1194
1195int sunrpc_cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h)
1196{
1197
1198 char *buf;
1199 struct cache_request *crq;
1200 int ret = 0;
1201
1202 if (!detail->cache_request)
1203 return -EINVAL;
1204
1205 if (!cache_listeners_exist(detail)) {
1206 warn_no_listener(detail);
1207 return -EINVAL;
1208 }
1209 if (test_bit(CACHE_CLEANED, &h->flags))
1210
1211 return -EAGAIN;
1212
1213 buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
1214 if (!buf)
1215 return -EAGAIN;
1216
1217 crq = kmalloc(sizeof (*crq), GFP_KERNEL);
1218 if (!crq) {
1219 kfree(buf);
1220 return -EAGAIN;
1221 }
1222
1223 crq->q.reader = 0;
1224 crq->buf = buf;
1225 crq->len = 0;
1226 crq->readers = 0;
1227 spin_lock(&queue_lock);
1228 if (test_bit(CACHE_PENDING, &h->flags)) {
1229 crq->item = cache_get(h);
1230 list_add_tail(&crq->q.list, &detail->queue);
1231 } else
1232
1233 ret = -EAGAIN;
1234 spin_unlock(&queue_lock);
1235 wake_up(&queue_wait);
1236 if (ret == -EAGAIN) {
1237 kfree(buf);
1238 kfree(crq);
1239 }
1240 return ret;
1241}
1242EXPORT_SYMBOL_GPL(sunrpc_cache_pipe_upcall);
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256int qword_get(char **bpp, char *dest, int bufsize)
1257{
1258
1259 char *bp = *bpp;
1260 int len = 0;
1261
1262 while (*bp == ' ') bp++;
1263
1264 if (bp[0] == '\\' && bp[1] == 'x') {
1265
1266 bp += 2;
1267 while (len < bufsize - 1) {
1268 int h, l;
1269
1270 h = hex_to_bin(bp[0]);
1271 if (h < 0)
1272 break;
1273
1274 l = hex_to_bin(bp[1]);
1275 if (l < 0)
1276 break;
1277
1278 *dest++ = (h << 4) | l;
1279 bp += 2;
1280 len++;
1281 }
1282 } else {
1283
1284 while (*bp != ' ' && *bp != '\n' && *bp && len < bufsize-1) {
1285 if (*bp == '\\' &&
1286 isodigit(bp[1]) && (bp[1] <= '3') &&
1287 isodigit(bp[2]) &&
1288 isodigit(bp[3])) {
1289 int byte = (*++bp -'0');
1290 bp++;
1291 byte = (byte << 3) | (*bp++ - '0');
1292 byte = (byte << 3) | (*bp++ - '0');
1293 *dest++ = byte;
1294 len++;
1295 } else {
1296 *dest++ = *bp++;
1297 len++;
1298 }
1299 }
1300 }
1301
1302 if (*bp != ' ' && *bp != '\n' && *bp != '\0')
1303 return -1;
1304 while (*bp == ' ') bp++;
1305 *bpp = bp;
1306 *dest = '\0';
1307 return len;
1308}
1309EXPORT_SYMBOL_GPL(qword_get);
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319static void *__cache_seq_start(struct seq_file *m, loff_t *pos)
1320{
1321 loff_t n = *pos;
1322 unsigned int hash, entry;
1323 struct cache_head *ch;
1324 struct cache_detail *cd = m->private;
1325
1326 if (!n--)
1327 return SEQ_START_TOKEN;
1328 hash = n >> 32;
1329 entry = n & ((1LL<<32) - 1);
1330
1331 hlist_for_each_entry_rcu(ch, &cd->hash_table[hash], cache_list)
1332 if (!entry--)
1333 return ch;
1334 n &= ~((1LL<<32) - 1);
1335 do {
1336 hash++;
1337 n += 1LL<<32;
1338 } while(hash < cd->hash_size &&
1339 hlist_empty(&cd->hash_table[hash]));
1340 if (hash >= cd->hash_size)
1341 return NULL;
1342 *pos = n+1;
1343 return hlist_entry_safe(rcu_dereference_raw(
1344 hlist_first_rcu(&cd->hash_table[hash])),
1345 struct cache_head, cache_list);
1346}
1347
1348static void *cache_seq_next(struct seq_file *m, void *p, loff_t *pos)
1349{
1350 struct cache_head *ch = p;
1351 int hash = (*pos >> 32);
1352 struct cache_detail *cd = m->private;
1353
1354 if (p == SEQ_START_TOKEN)
1355 hash = 0;
1356 else if (ch->cache_list.next == NULL) {
1357 hash++;
1358 *pos += 1LL<<32;
1359 } else {
1360 ++*pos;
1361 return hlist_entry_safe(rcu_dereference_raw(
1362 hlist_next_rcu(&ch->cache_list)),
1363 struct cache_head, cache_list);
1364 }
1365 *pos &= ~((1LL<<32) - 1);
1366 while (hash < cd->hash_size &&
1367 hlist_empty(&cd->hash_table[hash])) {
1368 hash++;
1369 *pos += 1LL<<32;
1370 }
1371 if (hash >= cd->hash_size)
1372 return NULL;
1373 ++*pos;
1374 return hlist_entry_safe(rcu_dereference_raw(
1375 hlist_first_rcu(&cd->hash_table[hash])),
1376 struct cache_head, cache_list);
1377}
1378
1379void *cache_seq_start_rcu(struct seq_file *m, loff_t *pos)
1380 __acquires(RCU)
1381{
1382 rcu_read_lock();
1383 return __cache_seq_start(m, pos);
1384}
1385EXPORT_SYMBOL_GPL(cache_seq_start_rcu);
1386
1387void *cache_seq_next_rcu(struct seq_file *file, void *p, loff_t *pos)
1388{
1389 return cache_seq_next(file, p, pos);
1390}
1391EXPORT_SYMBOL_GPL(cache_seq_next_rcu);
1392
1393void cache_seq_stop_rcu(struct seq_file *m, void *p)
1394 __releases(RCU)
1395{
1396 rcu_read_unlock();
1397}
1398EXPORT_SYMBOL_GPL(cache_seq_stop_rcu);
1399
1400static int c_show(struct seq_file *m, void *p)
1401{
1402 struct cache_head *cp = p;
1403 struct cache_detail *cd = m->private;
1404
1405 if (p == SEQ_START_TOKEN)
1406 return cd->cache_show(m, cd, NULL);
1407
1408 ifdebug(CACHE)
1409 seq_printf(m, "# expiry=%ld refcnt=%d flags=%lx\n",
1410 convert_to_wallclock(cp->expiry_time),
1411 kref_read(&cp->ref), cp->flags);
1412 cache_get(cp);
1413 if (cache_check(cd, cp, NULL))
1414
1415 seq_printf(m, "# ");
1416 else {
1417 if (cache_is_expired(cd, cp))
1418 seq_printf(m, "# ");
1419 cache_put(cp, cd);
1420 }
1421
1422 return cd->cache_show(m, cd, cp);
1423}
1424
1425static const struct seq_operations cache_content_op = {
1426 .start = cache_seq_start_rcu,
1427 .next = cache_seq_next_rcu,
1428 .stop = cache_seq_stop_rcu,
1429 .show = c_show,
1430};
1431
1432static int content_open(struct inode *inode, struct file *file,
1433 struct cache_detail *cd)
1434{
1435 struct seq_file *seq;
1436 int err;
1437
1438 if (!cd || !try_module_get(cd->owner))
1439 return -EACCES;
1440
1441 err = seq_open(file, &cache_content_op);
1442 if (err) {
1443 module_put(cd->owner);
1444 return err;
1445 }
1446
1447 seq = file->private_data;
1448 seq->private = cd;
1449 return 0;
1450}
1451
1452static int content_release(struct inode *inode, struct file *file,
1453 struct cache_detail *cd)
1454{
1455 int ret = seq_release(inode, file);
1456 module_put(cd->owner);
1457 return ret;
1458}
1459
1460static int open_flush(struct inode *inode, struct file *file,
1461 struct cache_detail *cd)
1462{
1463 if (!cd || !try_module_get(cd->owner))
1464 return -EACCES;
1465 return nonseekable_open(inode, file);
1466}
1467
1468static int release_flush(struct inode *inode, struct file *file,
1469 struct cache_detail *cd)
1470{
1471 module_put(cd->owner);
1472 return 0;
1473}
1474
1475static ssize_t read_flush(struct file *file, char __user *buf,
1476 size_t count, loff_t *ppos,
1477 struct cache_detail *cd)
1478{
1479 char tbuf[22];
1480 size_t len;
1481
1482 len = snprintf(tbuf, sizeof(tbuf), "%lu\n",
1483 convert_to_wallclock(cd->flush_time));
1484 return simple_read_from_buffer(buf, count, ppos, tbuf, len);
1485}
1486
1487static ssize_t write_flush(struct file *file, const char __user *buf,
1488 size_t count, loff_t *ppos,
1489 struct cache_detail *cd)
1490{
1491 char tbuf[20];
1492 char *ep;
1493 time_t now;
1494
1495 if (*ppos || count > sizeof(tbuf)-1)
1496 return -EINVAL;
1497 if (copy_from_user(tbuf, buf, count))
1498 return -EFAULT;
1499 tbuf[count] = 0;
1500 simple_strtoul(tbuf, &ep, 0);
1501 if (*ep && *ep != '\n')
1502 return -EINVAL;
1503
1504
1505
1506
1507
1508 now = seconds_since_boot();
1509
1510
1511
1512
1513
1514
1515
1516 if (cd->flush_time >= now)
1517 now = cd->flush_time + 1;
1518
1519 cd->flush_time = now;
1520 cd->nextcheck = now;
1521 cache_flush();
1522
1523 *ppos += count;
1524 return count;
1525}
1526
1527static ssize_t cache_read_procfs(struct file *filp, char __user *buf,
1528 size_t count, loff_t *ppos)
1529{
1530 struct cache_detail *cd = PDE_DATA(file_inode(filp));
1531
1532 return cache_read(filp, buf, count, ppos, cd);
1533}
1534
1535static ssize_t cache_write_procfs(struct file *filp, const char __user *buf,
1536 size_t count, loff_t *ppos)
1537{
1538 struct cache_detail *cd = PDE_DATA(file_inode(filp));
1539
1540 return cache_write(filp, buf, count, ppos, cd);
1541}
1542
1543static __poll_t cache_poll_procfs(struct file *filp, poll_table *wait)
1544{
1545 struct cache_detail *cd = PDE_DATA(file_inode(filp));
1546
1547 return cache_poll(filp, wait, cd);
1548}
1549
1550static long cache_ioctl_procfs(struct file *filp,
1551 unsigned int cmd, unsigned long arg)
1552{
1553 struct inode *inode = file_inode(filp);
1554 struct cache_detail *cd = PDE_DATA(inode);
1555
1556 return cache_ioctl(inode, filp, cmd, arg, cd);
1557}
1558
1559static int cache_open_procfs(struct inode *inode, struct file *filp)
1560{
1561 struct cache_detail *cd = PDE_DATA(inode);
1562
1563 return cache_open(inode, filp, cd);
1564}
1565
1566static int cache_release_procfs(struct inode *inode, struct file *filp)
1567{
1568 struct cache_detail *cd = PDE_DATA(inode);
1569
1570 return cache_release(inode, filp, cd);
1571}
1572
1573static const struct file_operations cache_file_operations_procfs = {
1574 .owner = THIS_MODULE,
1575 .llseek = no_llseek,
1576 .read = cache_read_procfs,
1577 .write = cache_write_procfs,
1578 .poll = cache_poll_procfs,
1579 .unlocked_ioctl = cache_ioctl_procfs,
1580 .open = cache_open_procfs,
1581 .release = cache_release_procfs,
1582};
1583
1584static int content_open_procfs(struct inode *inode, struct file *filp)
1585{
1586 struct cache_detail *cd = PDE_DATA(inode);
1587
1588 return content_open(inode, filp, cd);
1589}
1590
1591static int content_release_procfs(struct inode *inode, struct file *filp)
1592{
1593 struct cache_detail *cd = PDE_DATA(inode);
1594
1595 return content_release(inode, filp, cd);
1596}
1597
1598static const struct file_operations content_file_operations_procfs = {
1599 .open = content_open_procfs,
1600 .read = seq_read,
1601 .llseek = seq_lseek,
1602 .release = content_release_procfs,
1603};
1604
1605static int open_flush_procfs(struct inode *inode, struct file *filp)
1606{
1607 struct cache_detail *cd = PDE_DATA(inode);
1608
1609 return open_flush(inode, filp, cd);
1610}
1611
1612static int release_flush_procfs(struct inode *inode, struct file *filp)
1613{
1614 struct cache_detail *cd = PDE_DATA(inode);
1615
1616 return release_flush(inode, filp, cd);
1617}
1618
1619static ssize_t read_flush_procfs(struct file *filp, char __user *buf,
1620 size_t count, loff_t *ppos)
1621{
1622 struct cache_detail *cd = PDE_DATA(file_inode(filp));
1623
1624 return read_flush(filp, buf, count, ppos, cd);
1625}
1626
1627static ssize_t write_flush_procfs(struct file *filp,
1628 const char __user *buf,
1629 size_t count, loff_t *ppos)
1630{
1631 struct cache_detail *cd = PDE_DATA(file_inode(filp));
1632
1633 return write_flush(filp, buf, count, ppos, cd);
1634}
1635
1636static const struct file_operations cache_flush_operations_procfs = {
1637 .open = open_flush_procfs,
1638 .read = read_flush_procfs,
1639 .write = write_flush_procfs,
1640 .release = release_flush_procfs,
1641 .llseek = no_llseek,
1642};
1643
1644static void remove_cache_proc_entries(struct cache_detail *cd)
1645{
1646 if (cd->procfs) {
1647 proc_remove(cd->procfs);
1648 cd->procfs = NULL;
1649 }
1650}
1651
1652#ifdef CONFIG_PROC_FS
1653static int create_cache_proc_entries(struct cache_detail *cd, struct net *net)
1654{
1655 struct proc_dir_entry *p;
1656 struct sunrpc_net *sn;
1657
1658 sn = net_generic(net, sunrpc_net_id);
1659 cd->procfs = proc_mkdir(cd->name, sn->proc_net_rpc);
1660 if (cd->procfs == NULL)
1661 goto out_nomem;
1662
1663 p = proc_create_data("flush", S_IFREG | 0600,
1664 cd->procfs, &cache_flush_operations_procfs, cd);
1665 if (p == NULL)
1666 goto out_nomem;
1667
1668 if (cd->cache_request || cd->cache_parse) {
1669 p = proc_create_data("channel", S_IFREG | 0600, cd->procfs,
1670 &cache_file_operations_procfs, cd);
1671 if (p == NULL)
1672 goto out_nomem;
1673 }
1674 if (cd->cache_show) {
1675 p = proc_create_data("content", S_IFREG | 0400, cd->procfs,
1676 &content_file_operations_procfs, cd);
1677 if (p == NULL)
1678 goto out_nomem;
1679 }
1680 return 0;
1681out_nomem:
1682 remove_cache_proc_entries(cd);
1683 return -ENOMEM;
1684}
1685#else
1686static int create_cache_proc_entries(struct cache_detail *cd, struct net *net)
1687{
1688 return 0;
1689}
1690#endif
1691
1692void __init cache_initialize(void)
1693{
1694 INIT_DEFERRABLE_WORK(&cache_cleaner, do_cache_clean);
1695}
1696
1697int cache_register_net(struct cache_detail *cd, struct net *net)
1698{
1699 int ret;
1700
1701 sunrpc_init_cache_detail(cd);
1702 ret = create_cache_proc_entries(cd, net);
1703 if (ret)
1704 sunrpc_destroy_cache_detail(cd);
1705 return ret;
1706}
1707EXPORT_SYMBOL_GPL(cache_register_net);
1708
1709void cache_unregister_net(struct cache_detail *cd, struct net *net)
1710{
1711 remove_cache_proc_entries(cd);
1712 sunrpc_destroy_cache_detail(cd);
1713}
1714EXPORT_SYMBOL_GPL(cache_unregister_net);
1715
1716struct cache_detail *cache_create_net(const struct cache_detail *tmpl, struct net *net)
1717{
1718 struct cache_detail *cd;
1719 int i;
1720
1721 cd = kmemdup(tmpl, sizeof(struct cache_detail), GFP_KERNEL);
1722 if (cd == NULL)
1723 return ERR_PTR(-ENOMEM);
1724
1725 cd->hash_table = kcalloc(cd->hash_size, sizeof(struct hlist_head),
1726 GFP_KERNEL);
1727 if (cd->hash_table == NULL) {
1728 kfree(cd);
1729 return ERR_PTR(-ENOMEM);
1730 }
1731
1732 for (i = 0; i < cd->hash_size; i++)
1733 INIT_HLIST_HEAD(&cd->hash_table[i]);
1734 cd->net = net;
1735 return cd;
1736}
1737EXPORT_SYMBOL_GPL(cache_create_net);
1738
1739void cache_destroy_net(struct cache_detail *cd, struct net *net)
1740{
1741 kfree(cd->hash_table);
1742 kfree(cd);
1743}
1744EXPORT_SYMBOL_GPL(cache_destroy_net);
1745
1746static ssize_t cache_read_pipefs(struct file *filp, char __user *buf,
1747 size_t count, loff_t *ppos)
1748{
1749 struct cache_detail *cd = RPC_I(file_inode(filp))->private;
1750
1751 return cache_read(filp, buf, count, ppos, cd);
1752}
1753
1754static ssize_t cache_write_pipefs(struct file *filp, const char __user *buf,
1755 size_t count, loff_t *ppos)
1756{
1757 struct cache_detail *cd = RPC_I(file_inode(filp))->private;
1758
1759 return cache_write(filp, buf, count, ppos, cd);
1760}
1761
1762static __poll_t cache_poll_pipefs(struct file *filp, poll_table *wait)
1763{
1764 struct cache_detail *cd = RPC_I(file_inode(filp))->private;
1765
1766 return cache_poll(filp, wait, cd);
1767}
1768
1769static long cache_ioctl_pipefs(struct file *filp,
1770 unsigned int cmd, unsigned long arg)
1771{
1772 struct inode *inode = file_inode(filp);
1773 struct cache_detail *cd = RPC_I(inode)->private;
1774
1775 return cache_ioctl(inode, filp, cmd, arg, cd);
1776}
1777
1778static int cache_open_pipefs(struct inode *inode, struct file *filp)
1779{
1780 struct cache_detail *cd = RPC_I(inode)->private;
1781
1782 return cache_open(inode, filp, cd);
1783}
1784
1785static int cache_release_pipefs(struct inode *inode, struct file *filp)
1786{
1787 struct cache_detail *cd = RPC_I(inode)->private;
1788
1789 return cache_release(inode, filp, cd);
1790}
1791
1792const struct file_operations cache_file_operations_pipefs = {
1793 .owner = THIS_MODULE,
1794 .llseek = no_llseek,
1795 .read = cache_read_pipefs,
1796 .write = cache_write_pipefs,
1797 .poll = cache_poll_pipefs,
1798 .unlocked_ioctl = cache_ioctl_pipefs,
1799 .open = cache_open_pipefs,
1800 .release = cache_release_pipefs,
1801};
1802
1803static int content_open_pipefs(struct inode *inode, struct file *filp)
1804{
1805 struct cache_detail *cd = RPC_I(inode)->private;
1806
1807 return content_open(inode, filp, cd);
1808}
1809
1810static int content_release_pipefs(struct inode *inode, struct file *filp)
1811{
1812 struct cache_detail *cd = RPC_I(inode)->private;
1813
1814 return content_release(inode, filp, cd);
1815}
1816
1817const struct file_operations content_file_operations_pipefs = {
1818 .open = content_open_pipefs,
1819 .read = seq_read,
1820 .llseek = seq_lseek,
1821 .release = content_release_pipefs,
1822};
1823
1824static int open_flush_pipefs(struct inode *inode, struct file *filp)
1825{
1826 struct cache_detail *cd = RPC_I(inode)->private;
1827
1828 return open_flush(inode, filp, cd);
1829}
1830
1831static int release_flush_pipefs(struct inode *inode, struct file *filp)
1832{
1833 struct cache_detail *cd = RPC_I(inode)->private;
1834
1835 return release_flush(inode, filp, cd);
1836}
1837
1838static ssize_t read_flush_pipefs(struct file *filp, char __user *buf,
1839 size_t count, loff_t *ppos)
1840{
1841 struct cache_detail *cd = RPC_I(file_inode(filp))->private;
1842
1843 return read_flush(filp, buf, count, ppos, cd);
1844}
1845
1846static ssize_t write_flush_pipefs(struct file *filp,
1847 const char __user *buf,
1848 size_t count, loff_t *ppos)
1849{
1850 struct cache_detail *cd = RPC_I(file_inode(filp))->private;
1851
1852 return write_flush(filp, buf, count, ppos, cd);
1853}
1854
1855const struct file_operations cache_flush_operations_pipefs = {
1856 .open = open_flush_pipefs,
1857 .read = read_flush_pipefs,
1858 .write = write_flush_pipefs,
1859 .release = release_flush_pipefs,
1860 .llseek = no_llseek,
1861};
1862
1863int sunrpc_cache_register_pipefs(struct dentry *parent,
1864 const char *name, umode_t umode,
1865 struct cache_detail *cd)
1866{
1867 struct dentry *dir = rpc_create_cache_dir(parent, name, umode, cd);
1868 if (IS_ERR(dir))
1869 return PTR_ERR(dir);
1870 cd->pipefs = dir;
1871 return 0;
1872}
1873EXPORT_SYMBOL_GPL(sunrpc_cache_register_pipefs);
1874
1875void sunrpc_cache_unregister_pipefs(struct cache_detail *cd)
1876{
1877 if (cd->pipefs) {
1878 rpc_remove_cache_dir(cd->pipefs);
1879 cd->pipefs = NULL;
1880 }
1881}
1882EXPORT_SYMBOL_GPL(sunrpc_cache_unregister_pipefs);
1883
1884void sunrpc_cache_unhash(struct cache_detail *cd, struct cache_head *h)
1885{
1886 spin_lock(&cd->hash_lock);
1887 if (!hlist_unhashed(&h->cache_list)){
1888 hlist_del_init_rcu(&h->cache_list);
1889 cd->entries--;
1890 spin_unlock(&cd->hash_lock);
1891 cache_put(h, cd);
1892 } else
1893 spin_unlock(&cd->hash_lock);
1894}
1895EXPORT_SYMBOL_GPL(sunrpc_cache_unhash);
1896