1
2
3
4
5
6
7
8
9
10
11
12
13#include <linux/types.h>
14#include <linux/fs.h>
15#include <linux/file.h>
16#include <linux/slab.h>
17#include <linux/signal.h>
18#include <linux/sched.h>
19#include <linux/kmod.h>
20#include <linux/list.h>
21#include <linux/module.h>
22#include <linux/ctype.h>
23#include <linux/string_helpers.h>
24#include <asm/uaccess.h>
25#include <linux/poll.h>
26#include <linux/seq_file.h>
27#include <linux/proc_fs.h>
28#include <linux/net.h>
29#include <linux/workqueue.h>
30#include <linux/mutex.h>
31#include <linux/pagemap.h>
32#include <asm/ioctls.h>
33#include <linux/sunrpc/types.h>
34#include <linux/sunrpc/cache.h>
35#include <linux/sunrpc/stats.h>
36#include <linux/sunrpc/rpc_pipe_fs.h>
37#include "netns.h"
38
39#define RPCDBG_FACILITY RPCDBG_CACHE
40
41static bool cache_defer_req(struct cache_req *req, struct cache_head *item);
42static void cache_revisit_request(struct cache_head *item);
43
44static void cache_init(struct cache_head *h, struct cache_detail *detail)
45{
46 time_t now = seconds_since_boot();
47 INIT_HLIST_NODE(&h->cache_list);
48 h->flags = 0;
49 kref_init(&h->ref);
50 h->expiry_time = now + CACHE_NEW_EXPIRY;
51 if (now <= detail->flush_time)
52
53 now = detail->flush_time + 1;
54 h->last_refresh = now;
55}
56
57struct cache_head *sunrpc_cache_lookup(struct cache_detail *detail,
58 struct cache_head *key, int hash)
59{
60 struct cache_head *new = NULL, *freeme = NULL, *tmp = NULL;
61 struct hlist_head *head;
62
63 head = &detail->hash_table[hash];
64
65 read_lock(&detail->hash_lock);
66
67 hlist_for_each_entry(tmp, head, cache_list) {
68 if (detail->match(tmp, key)) {
69 if (cache_is_expired(detail, tmp))
70
71 break;
72 cache_get(tmp);
73 read_unlock(&detail->hash_lock);
74 return tmp;
75 }
76 }
77 read_unlock(&detail->hash_lock);
78
79
80 new = detail->alloc();
81 if (!new)
82 return NULL;
83
84
85
86
87 cache_init(new, detail);
88 detail->init(new, key);
89
90 write_lock(&detail->hash_lock);
91
92
93 hlist_for_each_entry(tmp, head, cache_list) {
94 if (detail->match(tmp, key)) {
95 if (cache_is_expired(detail, tmp)) {
96 hlist_del_init(&tmp->cache_list);
97 detail->entries --;
98 freeme = tmp;
99 break;
100 }
101 cache_get(tmp);
102 write_unlock(&detail->hash_lock);
103 cache_put(new, detail);
104 return tmp;
105 }
106 }
107
108 hlist_add_head(&new->cache_list, head);
109 detail->entries++;
110 cache_get(new);
111 write_unlock(&detail->hash_lock);
112
113 if (freeme)
114 cache_put(freeme, detail);
115 return new;
116}
117EXPORT_SYMBOL_GPL(sunrpc_cache_lookup);
118
119
120static void cache_dequeue(struct cache_detail *detail, struct cache_head *ch);
121
122static void cache_fresh_locked(struct cache_head *head, time_t expiry,
123 struct cache_detail *detail)
124{
125 time_t now = seconds_since_boot();
126 if (now <= detail->flush_time)
127
128 now = detail->flush_time + 1;
129 head->expiry_time = expiry;
130 head->last_refresh = now;
131 smp_wmb();
132 set_bit(CACHE_VALID, &head->flags);
133}
134
135static void cache_fresh_unlocked(struct cache_head *head,
136 struct cache_detail *detail)
137{
138 if (test_and_clear_bit(CACHE_PENDING, &head->flags)) {
139 cache_revisit_request(head);
140 cache_dequeue(detail, head);
141 }
142}
143
144struct cache_head *sunrpc_cache_update(struct cache_detail *detail,
145 struct cache_head *new, struct cache_head *old, int hash)
146{
147
148
149
150
151 struct cache_head *tmp;
152
153 if (!test_bit(CACHE_VALID, &old->flags)) {
154 write_lock(&detail->hash_lock);
155 if (!test_bit(CACHE_VALID, &old->flags)) {
156 if (test_bit(CACHE_NEGATIVE, &new->flags))
157 set_bit(CACHE_NEGATIVE, &old->flags);
158 else
159 detail->update(old, new);
160 cache_fresh_locked(old, new->expiry_time, detail);
161 write_unlock(&detail->hash_lock);
162 cache_fresh_unlocked(old, detail);
163 return old;
164 }
165 write_unlock(&detail->hash_lock);
166 }
167
168 tmp = detail->alloc();
169 if (!tmp) {
170 cache_put(old, detail);
171 return NULL;
172 }
173 cache_init(tmp, detail);
174 detail->init(tmp, old);
175
176 write_lock(&detail->hash_lock);
177 if (test_bit(CACHE_NEGATIVE, &new->flags))
178 set_bit(CACHE_NEGATIVE, &tmp->flags);
179 else
180 detail->update(tmp, new);
181 hlist_add_head(&tmp->cache_list, &detail->hash_table[hash]);
182 detail->entries++;
183 cache_get(tmp);
184 cache_fresh_locked(tmp, new->expiry_time, detail);
185 cache_fresh_locked(old, 0, detail);
186 write_unlock(&detail->hash_lock);
187 cache_fresh_unlocked(tmp, detail);
188 cache_fresh_unlocked(old, detail);
189 cache_put(old, detail);
190 return tmp;
191}
192EXPORT_SYMBOL_GPL(sunrpc_cache_update);
193
194static int cache_make_upcall(struct cache_detail *cd, struct cache_head *h)
195{
196 if (cd->cache_upcall)
197 return cd->cache_upcall(cd, h);
198 return sunrpc_cache_pipe_upcall(cd, h);
199}
200
201static inline int cache_is_valid(struct cache_head *h)
202{
203 if (!test_bit(CACHE_VALID, &h->flags))
204 return -EAGAIN;
205 else {
206
207 if (test_bit(CACHE_NEGATIVE, &h->flags))
208 return -ENOENT;
209 else {
210
211
212
213
214
215
216 smp_rmb();
217 return 0;
218 }
219 }
220}
221
222static int try_to_negate_entry(struct cache_detail *detail, struct cache_head *h)
223{
224 int rv;
225
226 write_lock(&detail->hash_lock);
227 rv = cache_is_valid(h);
228 if (rv == -EAGAIN) {
229 set_bit(CACHE_NEGATIVE, &h->flags);
230 cache_fresh_locked(h, seconds_since_boot()+CACHE_NEW_EXPIRY,
231 detail);
232 rv = -ENOENT;
233 }
234 write_unlock(&detail->hash_lock);
235 cache_fresh_unlocked(h, detail);
236 return rv;
237}
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253int cache_check(struct cache_detail *detail,
254 struct cache_head *h, struct cache_req *rqstp)
255{
256 int rv;
257 long refresh_age, age;
258
259
260 rv = cache_is_valid(h);
261
262
263 refresh_age = (h->expiry_time - h->last_refresh);
264 age = seconds_since_boot() - h->last_refresh;
265
266 if (rqstp == NULL) {
267 if (rv == -EAGAIN)
268 rv = -ENOENT;
269 } else if (rv == -EAGAIN ||
270 (h->expiry_time != 0 && age > refresh_age/2)) {
271 dprintk("RPC: Want update, refage=%ld, age=%ld\n",
272 refresh_age, age);
273 if (!test_and_set_bit(CACHE_PENDING, &h->flags)) {
274 switch (cache_make_upcall(detail, h)) {
275 case -EINVAL:
276 rv = try_to_negate_entry(detail, h);
277 break;
278 case -EAGAIN:
279 cache_fresh_unlocked(h, detail);
280 break;
281 }
282 }
283 }
284
285 if (rv == -EAGAIN) {
286 if (!cache_defer_req(rqstp, h)) {
287
288
289
290
291 rv = cache_is_valid(h);
292 if (rv == -EAGAIN)
293 rv = -ETIMEDOUT;
294 }
295 }
296 if (rv)
297 cache_put(h, detail);
298 return rv;
299}
300EXPORT_SYMBOL_GPL(cache_check);
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334static LIST_HEAD(cache_list);
335static DEFINE_SPINLOCK(cache_list_lock);
336static struct cache_detail *current_detail;
337static int current_index;
338
339static void do_cache_clean(struct work_struct *work);
340static struct delayed_work cache_cleaner;
341
342void sunrpc_init_cache_detail(struct cache_detail *cd)
343{
344 rwlock_init(&cd->hash_lock);
345 INIT_LIST_HEAD(&cd->queue);
346 spin_lock(&cache_list_lock);
347 cd->nextcheck = 0;
348 cd->entries = 0;
349 atomic_set(&cd->readers, 0);
350 cd->last_close = 0;
351 cd->last_warn = -1;
352 list_add(&cd->others, &cache_list);
353 spin_unlock(&cache_list_lock);
354
355
356 schedule_delayed_work(&cache_cleaner, 0);
357}
358EXPORT_SYMBOL_GPL(sunrpc_init_cache_detail);
359
360void sunrpc_destroy_cache_detail(struct cache_detail *cd)
361{
362 cache_purge(cd);
363 spin_lock(&cache_list_lock);
364 write_lock(&cd->hash_lock);
365 if (cd->entries) {
366 write_unlock(&cd->hash_lock);
367 spin_unlock(&cache_list_lock);
368 goto out;
369 }
370 if (current_detail == cd)
371 current_detail = NULL;
372 list_del_init(&cd->others);
373 write_unlock(&cd->hash_lock);
374 spin_unlock(&cache_list_lock);
375 if (list_empty(&cache_list)) {
376
377 cancel_delayed_work_sync(&cache_cleaner);
378 }
379 return;
380out:
381 printk(KERN_ERR "RPC: failed to unregister %s cache\n", cd->name);
382}
383EXPORT_SYMBOL_GPL(sunrpc_destroy_cache_detail);
384
385
386
387
388
389
390
391static int cache_clean(void)
392{
393 int rv = 0;
394 struct list_head *next;
395
396 spin_lock(&cache_list_lock);
397
398
399 while (current_detail == NULL ||
400 current_index >= current_detail->hash_size) {
401 if (current_detail)
402 next = current_detail->others.next;
403 else
404 next = cache_list.next;
405 if (next == &cache_list) {
406 current_detail = NULL;
407 spin_unlock(&cache_list_lock);
408 return -1;
409 }
410 current_detail = list_entry(next, struct cache_detail, others);
411 if (current_detail->nextcheck > seconds_since_boot())
412 current_index = current_detail->hash_size;
413 else {
414 current_index = 0;
415 current_detail->nextcheck = seconds_since_boot()+30*60;
416 }
417 }
418
419
420 while (current_detail &&
421 current_index < current_detail->hash_size &&
422 hlist_empty(¤t_detail->hash_table[current_index]))
423 current_index++;
424
425
426
427 if (current_detail && current_index < current_detail->hash_size) {
428 struct cache_head *ch = NULL;
429 struct cache_detail *d;
430 struct hlist_head *head;
431 struct hlist_node *tmp;
432
433 write_lock(¤t_detail->hash_lock);
434
435
436
437 head = ¤t_detail->hash_table[current_index];
438 hlist_for_each_entry_safe(ch, tmp, head, cache_list) {
439 if (current_detail->nextcheck > ch->expiry_time)
440 current_detail->nextcheck = ch->expiry_time+1;
441 if (!cache_is_expired(current_detail, ch))
442 continue;
443
444 hlist_del_init(&ch->cache_list);
445 current_detail->entries--;
446 rv = 1;
447 break;
448 }
449
450 write_unlock(¤t_detail->hash_lock);
451 d = current_detail;
452 if (!ch)
453 current_index ++;
454 spin_unlock(&cache_list_lock);
455 if (ch) {
456 set_bit(CACHE_CLEANED, &ch->flags);
457 cache_fresh_unlocked(ch, d);
458 cache_put(ch, d);
459 }
460 } else
461 spin_unlock(&cache_list_lock);
462
463 return rv;
464}
465
466
467
468
469static void do_cache_clean(struct work_struct *work)
470{
471 int delay = 5;
472 if (cache_clean() == -1)
473 delay = round_jiffies_relative(30*HZ);
474
475 if (list_empty(&cache_list))
476 delay = 0;
477
478 if (delay)
479 schedule_delayed_work(&cache_cleaner, delay);
480}
481
482
483
484
485
486
487
488void cache_flush(void)
489{
490 while (cache_clean() != -1)
491 cond_resched();
492 while (cache_clean() != -1)
493 cond_resched();
494}
495EXPORT_SYMBOL_GPL(cache_flush);
496
497void cache_purge(struct cache_detail *detail)
498{
499 time_t now = seconds_since_boot();
500 if (detail->flush_time >= now)
501 now = detail->flush_time + 1;
502
503 detail->flush_time = now;
504 detail->nextcheck = seconds_since_boot();
505 cache_flush();
506}
507EXPORT_SYMBOL_GPL(cache_purge);
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525#define DFR_HASHSIZE (PAGE_SIZE/sizeof(struct list_head))
526#define DFR_HASH(item) ((((long)item)>>4 ^ (((long)item)>>13)) % DFR_HASHSIZE)
527
528#define DFR_MAX 300
529
530static DEFINE_SPINLOCK(cache_defer_lock);
531static LIST_HEAD(cache_defer_list);
532static struct hlist_head cache_defer_hash[DFR_HASHSIZE];
533static int cache_defer_cnt;
534
535static void __unhash_deferred_req(struct cache_deferred_req *dreq)
536{
537 hlist_del_init(&dreq->hash);
538 if (!list_empty(&dreq->recent)) {
539 list_del_init(&dreq->recent);
540 cache_defer_cnt--;
541 }
542}
543
544static void __hash_deferred_req(struct cache_deferred_req *dreq, struct cache_head *item)
545{
546 int hash = DFR_HASH(item);
547
548 INIT_LIST_HEAD(&dreq->recent);
549 hlist_add_head(&dreq->hash, &cache_defer_hash[hash]);
550}
551
552static void setup_deferral(struct cache_deferred_req *dreq,
553 struct cache_head *item,
554 int count_me)
555{
556
557 dreq->item = item;
558
559 spin_lock(&cache_defer_lock);
560
561 __hash_deferred_req(dreq, item);
562
563 if (count_me) {
564 cache_defer_cnt++;
565 list_add(&dreq->recent, &cache_defer_list);
566 }
567
568 spin_unlock(&cache_defer_lock);
569
570}
571
572struct thread_deferred_req {
573 struct cache_deferred_req handle;
574 struct completion completion;
575};
576
577static void cache_restart_thread(struct cache_deferred_req *dreq, int too_many)
578{
579 struct thread_deferred_req *dr =
580 container_of(dreq, struct thread_deferred_req, handle);
581 complete(&dr->completion);
582}
583
584static void cache_wait_req(struct cache_req *req, struct cache_head *item)
585{
586 struct thread_deferred_req sleeper;
587 struct cache_deferred_req *dreq = &sleeper.handle;
588
589 sleeper.completion = COMPLETION_INITIALIZER_ONSTACK(sleeper.completion);
590 dreq->revisit = cache_restart_thread;
591
592 setup_deferral(dreq, item, 0);
593
594 if (!test_bit(CACHE_PENDING, &item->flags) ||
595 wait_for_completion_interruptible_timeout(
596 &sleeper.completion, req->thread_wait) <= 0) {
597
598
599
600 spin_lock(&cache_defer_lock);
601 if (!hlist_unhashed(&sleeper.handle.hash)) {
602 __unhash_deferred_req(&sleeper.handle);
603 spin_unlock(&cache_defer_lock);
604 } else {
605
606
607
608
609
610 spin_unlock(&cache_defer_lock);
611 wait_for_completion(&sleeper.completion);
612 }
613 }
614}
615
616static void cache_limit_defers(void)
617{
618
619
620
621 struct cache_deferred_req *discard = NULL;
622
623 if (cache_defer_cnt <= DFR_MAX)
624 return;
625
626 spin_lock(&cache_defer_lock);
627
628
629 if (cache_defer_cnt > DFR_MAX) {
630 if (prandom_u32() & 1)
631 discard = list_entry(cache_defer_list.next,
632 struct cache_deferred_req, recent);
633 else
634 discard = list_entry(cache_defer_list.prev,
635 struct cache_deferred_req, recent);
636 __unhash_deferred_req(discard);
637 }
638 spin_unlock(&cache_defer_lock);
639 if (discard)
640 discard->revisit(discard, 1);
641}
642
643
644static bool cache_defer_req(struct cache_req *req, struct cache_head *item)
645{
646 struct cache_deferred_req *dreq;
647
648 if (req->thread_wait) {
649 cache_wait_req(req, item);
650 if (!test_bit(CACHE_PENDING, &item->flags))
651 return false;
652 }
653 dreq = req->defer(req);
654 if (dreq == NULL)
655 return false;
656 setup_deferral(dreq, item, 1);
657 if (!test_bit(CACHE_PENDING, &item->flags))
658
659
660
661 cache_revisit_request(item);
662
663 cache_limit_defers();
664 return true;
665}
666
667static void cache_revisit_request(struct cache_head *item)
668{
669 struct cache_deferred_req *dreq;
670 struct list_head pending;
671 struct hlist_node *tmp;
672 int hash = DFR_HASH(item);
673
674 INIT_LIST_HEAD(&pending);
675 spin_lock(&cache_defer_lock);
676
677 hlist_for_each_entry_safe(dreq, tmp, &cache_defer_hash[hash], hash)
678 if (dreq->item == item) {
679 __unhash_deferred_req(dreq);
680 list_add(&dreq->recent, &pending);
681 }
682
683 spin_unlock(&cache_defer_lock);
684
685 while (!list_empty(&pending)) {
686 dreq = list_entry(pending.next, struct cache_deferred_req, recent);
687 list_del_init(&dreq->recent);
688 dreq->revisit(dreq, 0);
689 }
690}
691
692void cache_clean_deferred(void *owner)
693{
694 struct cache_deferred_req *dreq, *tmp;
695 struct list_head pending;
696
697
698 INIT_LIST_HEAD(&pending);
699 spin_lock(&cache_defer_lock);
700
701 list_for_each_entry_safe(dreq, tmp, &cache_defer_list, recent) {
702 if (dreq->owner == owner) {
703 __unhash_deferred_req(dreq);
704 list_add(&dreq->recent, &pending);
705 }
706 }
707 spin_unlock(&cache_defer_lock);
708
709 while (!list_empty(&pending)) {
710 dreq = list_entry(pending.next, struct cache_deferred_req, recent);
711 list_del_init(&dreq->recent);
712 dreq->revisit(dreq, 1);
713 }
714}
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732static DEFINE_SPINLOCK(queue_lock);
733static DEFINE_MUTEX(queue_io_mutex);
734
735struct cache_queue {
736 struct list_head list;
737 int reader;
738};
739struct cache_request {
740 struct cache_queue q;
741 struct cache_head *item;
742 char * buf;
743 int len;
744 int readers;
745};
746struct cache_reader {
747 struct cache_queue q;
748 int offset;
749};
750
751static int cache_request(struct cache_detail *detail,
752 struct cache_request *crq)
753{
754 char *bp = crq->buf;
755 int len = PAGE_SIZE;
756
757 detail->cache_request(detail, crq->item, &bp, &len);
758 if (len < 0)
759 return -EAGAIN;
760 return PAGE_SIZE - len;
761}
762
763static ssize_t cache_read(struct file *filp, char __user *buf, size_t count,
764 loff_t *ppos, struct cache_detail *cd)
765{
766 struct cache_reader *rp = filp->private_data;
767 struct cache_request *rq;
768 struct inode *inode = file_inode(filp);
769 int err;
770
771 if (count == 0)
772 return 0;
773
774 inode_lock(inode);
775
776 again:
777 spin_lock(&queue_lock);
778
779 while (rp->q.list.next != &cd->queue &&
780 list_entry(rp->q.list.next, struct cache_queue, list)
781 ->reader) {
782 struct list_head *next = rp->q.list.next;
783 list_move(&rp->q.list, next);
784 }
785 if (rp->q.list.next == &cd->queue) {
786 spin_unlock(&queue_lock);
787 inode_unlock(inode);
788 WARN_ON_ONCE(rp->offset);
789 return 0;
790 }
791 rq = container_of(rp->q.list.next, struct cache_request, q.list);
792 WARN_ON_ONCE(rq->q.reader);
793 if (rp->offset == 0)
794 rq->readers++;
795 spin_unlock(&queue_lock);
796
797 if (rq->len == 0) {
798 err = cache_request(cd, rq);
799 if (err < 0)
800 goto out;
801 rq->len = err;
802 }
803
804 if (rp->offset == 0 && !test_bit(CACHE_PENDING, &rq->item->flags)) {
805 err = -EAGAIN;
806 spin_lock(&queue_lock);
807 list_move(&rp->q.list, &rq->q.list);
808 spin_unlock(&queue_lock);
809 } else {
810 if (rp->offset + count > rq->len)
811 count = rq->len - rp->offset;
812 err = -EFAULT;
813 if (copy_to_user(buf, rq->buf + rp->offset, count))
814 goto out;
815 rp->offset += count;
816 if (rp->offset >= rq->len) {
817 rp->offset = 0;
818 spin_lock(&queue_lock);
819 list_move(&rp->q.list, &rq->q.list);
820 spin_unlock(&queue_lock);
821 }
822 err = 0;
823 }
824 out:
825 if (rp->offset == 0) {
826
827 spin_lock(&queue_lock);
828 rq->readers--;
829 if (rq->readers == 0 &&
830 !test_bit(CACHE_PENDING, &rq->item->flags)) {
831 list_del(&rq->q.list);
832 spin_unlock(&queue_lock);
833 cache_put(rq->item, cd);
834 kfree(rq->buf);
835 kfree(rq);
836 } else
837 spin_unlock(&queue_lock);
838 }
839 if (err == -EAGAIN)
840 goto again;
841 inode_unlock(inode);
842 return err ? err : count;
843}
844
845static ssize_t cache_do_downcall(char *kaddr, const char __user *buf,
846 size_t count, struct cache_detail *cd)
847{
848 ssize_t ret;
849
850 if (count == 0)
851 return -EINVAL;
852 if (copy_from_user(kaddr, buf, count))
853 return -EFAULT;
854 kaddr[count] = '\0';
855 ret = cd->cache_parse(cd, kaddr, count);
856 if (!ret)
857 ret = count;
858 return ret;
859}
860
861static ssize_t cache_slow_downcall(const char __user *buf,
862 size_t count, struct cache_detail *cd)
863{
864 static char write_buf[8192];
865 ssize_t ret = -EINVAL;
866
867 if (count >= sizeof(write_buf))
868 goto out;
869 mutex_lock(&queue_io_mutex);
870 ret = cache_do_downcall(write_buf, buf, count, cd);
871 mutex_unlock(&queue_io_mutex);
872out:
873 return ret;
874}
875
876static ssize_t cache_downcall(struct address_space *mapping,
877 const char __user *buf,
878 size_t count, struct cache_detail *cd)
879{
880 struct page *page;
881 char *kaddr;
882 ssize_t ret = -ENOMEM;
883
884 if (count >= PAGE_SIZE)
885 goto out_slow;
886
887 page = find_or_create_page(mapping, 0, GFP_KERNEL);
888 if (!page)
889 goto out_slow;
890
891 kaddr = kmap(page);
892 ret = cache_do_downcall(kaddr, buf, count, cd);
893 kunmap(page);
894 unlock_page(page);
895 put_page(page);
896 return ret;
897out_slow:
898 return cache_slow_downcall(buf, count, cd);
899}
900
901static ssize_t cache_write(struct file *filp, const char __user *buf,
902 size_t count, loff_t *ppos,
903 struct cache_detail *cd)
904{
905 struct address_space *mapping = filp->f_mapping;
906 struct inode *inode = file_inode(filp);
907 ssize_t ret = -EINVAL;
908
909 if (!cd->cache_parse)
910 goto out;
911
912 inode_lock(inode);
913 ret = cache_downcall(mapping, buf, count, cd);
914 inode_unlock(inode);
915out:
916 return ret;
917}
918
919static DECLARE_WAIT_QUEUE_HEAD(queue_wait);
920
921static unsigned int cache_poll(struct file *filp, poll_table *wait,
922 struct cache_detail *cd)
923{
924 unsigned int mask;
925 struct cache_reader *rp = filp->private_data;
926 struct cache_queue *cq;
927
928 poll_wait(filp, &queue_wait, wait);
929
930
931 mask = POLLOUT | POLLWRNORM;
932
933 if (!rp)
934 return mask;
935
936 spin_lock(&queue_lock);
937
938 for (cq= &rp->q; &cq->list != &cd->queue;
939 cq = list_entry(cq->list.next, struct cache_queue, list))
940 if (!cq->reader) {
941 mask |= POLLIN | POLLRDNORM;
942 break;
943 }
944 spin_unlock(&queue_lock);
945 return mask;
946}
947
948static int cache_ioctl(struct inode *ino, struct file *filp,
949 unsigned int cmd, unsigned long arg,
950 struct cache_detail *cd)
951{
952 int len = 0;
953 struct cache_reader *rp = filp->private_data;
954 struct cache_queue *cq;
955
956 if (cmd != FIONREAD || !rp)
957 return -EINVAL;
958
959 spin_lock(&queue_lock);
960
961
962
963
964 for (cq= &rp->q; &cq->list != &cd->queue;
965 cq = list_entry(cq->list.next, struct cache_queue, list))
966 if (!cq->reader) {
967 struct cache_request *cr =
968 container_of(cq, struct cache_request, q);
969 len = cr->len - rp->offset;
970 break;
971 }
972 spin_unlock(&queue_lock);
973
974 return put_user(len, (int __user *)arg);
975}
976
977static int cache_open(struct inode *inode, struct file *filp,
978 struct cache_detail *cd)
979{
980 struct cache_reader *rp = NULL;
981
982 if (!cd || !try_module_get(cd->owner))
983 return -EACCES;
984 nonseekable_open(inode, filp);
985 if (filp->f_mode & FMODE_READ) {
986 rp = kmalloc(sizeof(*rp), GFP_KERNEL);
987 if (!rp) {
988 module_put(cd->owner);
989 return -ENOMEM;
990 }
991 rp->offset = 0;
992 rp->q.reader = 1;
993 atomic_inc(&cd->readers);
994 spin_lock(&queue_lock);
995 list_add(&rp->q.list, &cd->queue);
996 spin_unlock(&queue_lock);
997 }
998 filp->private_data = rp;
999 return 0;
1000}
1001
1002static int cache_release(struct inode *inode, struct file *filp,
1003 struct cache_detail *cd)
1004{
1005 struct cache_reader *rp = filp->private_data;
1006
1007 if (rp) {
1008 spin_lock(&queue_lock);
1009 if (rp->offset) {
1010 struct cache_queue *cq;
1011 for (cq= &rp->q; &cq->list != &cd->queue;
1012 cq = list_entry(cq->list.next, struct cache_queue, list))
1013 if (!cq->reader) {
1014 container_of(cq, struct cache_request, q)
1015 ->readers--;
1016 break;
1017 }
1018 rp->offset = 0;
1019 }
1020 list_del(&rp->q.list);
1021 spin_unlock(&queue_lock);
1022
1023 filp->private_data = NULL;
1024 kfree(rp);
1025
1026 cd->last_close = seconds_since_boot();
1027 atomic_dec(&cd->readers);
1028 }
1029 module_put(cd->owner);
1030 return 0;
1031}
1032
1033
1034
1035static void cache_dequeue(struct cache_detail *detail, struct cache_head *ch)
1036{
1037 struct cache_queue *cq, *tmp;
1038 struct cache_request *cr;
1039 struct list_head dequeued;
1040
1041 INIT_LIST_HEAD(&dequeued);
1042 spin_lock(&queue_lock);
1043 list_for_each_entry_safe(cq, tmp, &detail->queue, list)
1044 if (!cq->reader) {
1045 cr = container_of(cq, struct cache_request, q);
1046 if (cr->item != ch)
1047 continue;
1048 if (test_bit(CACHE_PENDING, &ch->flags))
1049
1050 break;
1051 if (cr->readers != 0)
1052 continue;
1053 list_move(&cr->q.list, &dequeued);
1054 }
1055 spin_unlock(&queue_lock);
1056 while (!list_empty(&dequeued)) {
1057 cr = list_entry(dequeued.next, struct cache_request, q.list);
1058 list_del(&cr->q.list);
1059 cache_put(cr->item, detail);
1060 kfree(cr->buf);
1061 kfree(cr);
1062 }
1063}
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074void qword_add(char **bpp, int *lp, char *str)
1075{
1076 char *bp = *bpp;
1077 int len = *lp;
1078 int ret;
1079
1080 if (len < 0) return;
1081
1082 ret = string_escape_str(str, bp, len, ESCAPE_OCTAL, "\\ \n\t");
1083 if (ret >= len) {
1084 bp += len;
1085 len = -1;
1086 } else {
1087 bp += ret;
1088 len -= ret;
1089 *bp++ = ' ';
1090 len--;
1091 }
1092 *bpp = bp;
1093 *lp = len;
1094}
1095EXPORT_SYMBOL_GPL(qword_add);
1096
1097void qword_addhex(char **bpp, int *lp, char *buf, int blen)
1098{
1099 char *bp = *bpp;
1100 int len = *lp;
1101
1102 if (len < 0) return;
1103
1104 if (len > 2) {
1105 *bp++ = '\\';
1106 *bp++ = 'x';
1107 len -= 2;
1108 while (blen && len >= 2) {
1109 bp = hex_byte_pack(bp, *buf++);
1110 len -= 2;
1111 blen--;
1112 }
1113 }
1114 if (blen || len<1) len = -1;
1115 else {
1116 *bp++ = ' ';
1117 len--;
1118 }
1119 *bpp = bp;
1120 *lp = len;
1121}
1122EXPORT_SYMBOL_GPL(qword_addhex);
1123
1124static void warn_no_listener(struct cache_detail *detail)
1125{
1126 if (detail->last_warn != detail->last_close) {
1127 detail->last_warn = detail->last_close;
1128 if (detail->warn_no_listener)
1129 detail->warn_no_listener(detail, detail->last_close != 0);
1130 }
1131}
1132
1133static bool cache_listeners_exist(struct cache_detail *detail)
1134{
1135 if (atomic_read(&detail->readers))
1136 return true;
1137 if (detail->last_close == 0)
1138
1139 return false;
1140 if (detail->last_close < seconds_since_boot() - 30)
1141
1142
1143
1144
1145
1146 return false;
1147 return true;
1148}
1149
1150
1151
1152
1153
1154
1155
1156int sunrpc_cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h)
1157{
1158
1159 char *buf;
1160 struct cache_request *crq;
1161 int ret = 0;
1162
1163 if (!detail->cache_request)
1164 return -EINVAL;
1165
1166 if (!cache_listeners_exist(detail)) {
1167 warn_no_listener(detail);
1168 return -EINVAL;
1169 }
1170 if (test_bit(CACHE_CLEANED, &h->flags))
1171
1172 return -EAGAIN;
1173
1174 buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
1175 if (!buf)
1176 return -EAGAIN;
1177
1178 crq = kmalloc(sizeof (*crq), GFP_KERNEL);
1179 if (!crq) {
1180 kfree(buf);
1181 return -EAGAIN;
1182 }
1183
1184 crq->q.reader = 0;
1185 crq->buf = buf;
1186 crq->len = 0;
1187 crq->readers = 0;
1188 spin_lock(&queue_lock);
1189 if (test_bit(CACHE_PENDING, &h->flags)) {
1190 crq->item = cache_get(h);
1191 list_add_tail(&crq->q.list, &detail->queue);
1192 } else
1193
1194 ret = -EAGAIN;
1195 spin_unlock(&queue_lock);
1196 wake_up(&queue_wait);
1197 if (ret == -EAGAIN) {
1198 kfree(buf);
1199 kfree(crq);
1200 }
1201 return ret;
1202}
1203EXPORT_SYMBOL_GPL(sunrpc_cache_pipe_upcall);
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217int qword_get(char **bpp, char *dest, int bufsize)
1218{
1219
1220 char *bp = *bpp;
1221 int len = 0;
1222
1223 while (*bp == ' ') bp++;
1224
1225 if (bp[0] == '\\' && bp[1] == 'x') {
1226
1227 bp += 2;
1228 while (len < bufsize - 1) {
1229 int h, l;
1230
1231 h = hex_to_bin(bp[0]);
1232 if (h < 0)
1233 break;
1234
1235 l = hex_to_bin(bp[1]);
1236 if (l < 0)
1237 break;
1238
1239 *dest++ = (h << 4) | l;
1240 bp += 2;
1241 len++;
1242 }
1243 } else {
1244
1245 while (*bp != ' ' && *bp != '\n' && *bp && len < bufsize-1) {
1246 if (*bp == '\\' &&
1247 isodigit(bp[1]) && (bp[1] <= '3') &&
1248 isodigit(bp[2]) &&
1249 isodigit(bp[3])) {
1250 int byte = (*++bp -'0');
1251 bp++;
1252 byte = (byte << 3) | (*bp++ - '0');
1253 byte = (byte << 3) | (*bp++ - '0');
1254 *dest++ = byte;
1255 len++;
1256 } else {
1257 *dest++ = *bp++;
1258 len++;
1259 }
1260 }
1261 }
1262
1263 if (*bp != ' ' && *bp != '\n' && *bp != '\0')
1264 return -1;
1265 while (*bp == ' ') bp++;
1266 *bpp = bp;
1267 *dest = '\0';
1268 return len;
1269}
1270EXPORT_SYMBOL_GPL(qword_get);
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280void *cache_seq_start(struct seq_file *m, loff_t *pos)
1281 __acquires(cd->hash_lock)
1282{
1283 loff_t n = *pos;
1284 unsigned int hash, entry;
1285 struct cache_head *ch;
1286 struct cache_detail *cd = m->private;
1287
1288 read_lock(&cd->hash_lock);
1289 if (!n--)
1290 return SEQ_START_TOKEN;
1291 hash = n >> 32;
1292 entry = n & ((1LL<<32) - 1);
1293
1294 hlist_for_each_entry(ch, &cd->hash_table[hash], cache_list)
1295 if (!entry--)
1296 return ch;
1297 n &= ~((1LL<<32) - 1);
1298 do {
1299 hash++;
1300 n += 1LL<<32;
1301 } while(hash < cd->hash_size &&
1302 hlist_empty(&cd->hash_table[hash]));
1303 if (hash >= cd->hash_size)
1304 return NULL;
1305 *pos = n+1;
1306 return hlist_entry_safe(cd->hash_table[hash].first,
1307 struct cache_head, cache_list);
1308}
1309EXPORT_SYMBOL_GPL(cache_seq_start);
1310
1311void *cache_seq_next(struct seq_file *m, void *p, loff_t *pos)
1312{
1313 struct cache_head *ch = p;
1314 int hash = (*pos >> 32);
1315 struct cache_detail *cd = m->private;
1316
1317 if (p == SEQ_START_TOKEN)
1318 hash = 0;
1319 else if (ch->cache_list.next == NULL) {
1320 hash++;
1321 *pos += 1LL<<32;
1322 } else {
1323 ++*pos;
1324 return hlist_entry_safe(ch->cache_list.next,
1325 struct cache_head, cache_list);
1326 }
1327 *pos &= ~((1LL<<32) - 1);
1328 while (hash < cd->hash_size &&
1329 hlist_empty(&cd->hash_table[hash])) {
1330 hash++;
1331 *pos += 1LL<<32;
1332 }
1333 if (hash >= cd->hash_size)
1334 return NULL;
1335 ++*pos;
1336 return hlist_entry_safe(cd->hash_table[hash].first,
1337 struct cache_head, cache_list);
1338}
1339EXPORT_SYMBOL_GPL(cache_seq_next);
1340
1341void cache_seq_stop(struct seq_file *m, void *p)
1342 __releases(cd->hash_lock)
1343{
1344 struct cache_detail *cd = m->private;
1345 read_unlock(&cd->hash_lock);
1346}
1347EXPORT_SYMBOL_GPL(cache_seq_stop);
1348
1349static int c_show(struct seq_file *m, void *p)
1350{
1351 struct cache_head *cp = p;
1352 struct cache_detail *cd = m->private;
1353
1354 if (p == SEQ_START_TOKEN)
1355 return cd->cache_show(m, cd, NULL);
1356
1357 ifdebug(CACHE)
1358 seq_printf(m, "# expiry=%ld refcnt=%d flags=%lx\n",
1359 convert_to_wallclock(cp->expiry_time),
1360 atomic_read(&cp->ref.refcount), cp->flags);
1361 cache_get(cp);
1362 if (cache_check(cd, cp, NULL))
1363
1364 seq_printf(m, "# ");
1365 else {
1366 if (cache_is_expired(cd, cp))
1367 seq_printf(m, "# ");
1368 cache_put(cp, cd);
1369 }
1370
1371 return cd->cache_show(m, cd, cp);
1372}
1373
1374static const struct seq_operations cache_content_op = {
1375 .start = cache_seq_start,
1376 .next = cache_seq_next,
1377 .stop = cache_seq_stop,
1378 .show = c_show,
1379};
1380
1381static int content_open(struct inode *inode, struct file *file,
1382 struct cache_detail *cd)
1383{
1384 struct seq_file *seq;
1385 int err;
1386
1387 if (!cd || !try_module_get(cd->owner))
1388 return -EACCES;
1389
1390 err = seq_open(file, &cache_content_op);
1391 if (err) {
1392 module_put(cd->owner);
1393 return err;
1394 }
1395
1396 seq = file->private_data;
1397 seq->private = cd;
1398 return 0;
1399}
1400
1401static int content_release(struct inode *inode, struct file *file,
1402 struct cache_detail *cd)
1403{
1404 int ret = seq_release(inode, file);
1405 module_put(cd->owner);
1406 return ret;
1407}
1408
1409static int open_flush(struct inode *inode, struct file *file,
1410 struct cache_detail *cd)
1411{
1412 if (!cd || !try_module_get(cd->owner))
1413 return -EACCES;
1414 return nonseekable_open(inode, file);
1415}
1416
1417static int release_flush(struct inode *inode, struct file *file,
1418 struct cache_detail *cd)
1419{
1420 module_put(cd->owner);
1421 return 0;
1422}
1423
1424static ssize_t read_flush(struct file *file, char __user *buf,
1425 size_t count, loff_t *ppos,
1426 struct cache_detail *cd)
1427{
1428 char tbuf[22];
1429 unsigned long p = *ppos;
1430 size_t len;
1431
1432 snprintf(tbuf, sizeof(tbuf), "%lu\n", convert_to_wallclock(cd->flush_time));
1433 len = strlen(tbuf);
1434 if (p >= len)
1435 return 0;
1436 len -= p;
1437 if (len > count)
1438 len = count;
1439 if (copy_to_user(buf, (void*)(tbuf+p), len))
1440 return -EFAULT;
1441 *ppos += len;
1442 return len;
1443}
1444
1445static ssize_t write_flush(struct file *file, const char __user *buf,
1446 size_t count, loff_t *ppos,
1447 struct cache_detail *cd)
1448{
1449 char tbuf[20];
1450 char *bp, *ep;
1451 time_t then, now;
1452
1453 if (*ppos || count > sizeof(tbuf)-1)
1454 return -EINVAL;
1455 if (copy_from_user(tbuf, buf, count))
1456 return -EFAULT;
1457 tbuf[count] = 0;
1458 simple_strtoul(tbuf, &ep, 0);
1459 if (*ep && *ep != '\n')
1460 return -EINVAL;
1461
1462 bp = tbuf;
1463 then = get_expiry(&bp);
1464 now = seconds_since_boot();
1465 cd->nextcheck = now;
1466
1467
1468
1469
1470
1471 if (then >= now) {
1472
1473 if (cd->flush_time >= now)
1474 now = cd->flush_time + 1;
1475 then = now;
1476 }
1477
1478 cd->flush_time = then;
1479 cache_flush();
1480
1481 *ppos += count;
1482 return count;
1483}
1484
1485static ssize_t cache_read_procfs(struct file *filp, char __user *buf,
1486 size_t count, loff_t *ppos)
1487{
1488 struct cache_detail *cd = PDE_DATA(file_inode(filp));
1489
1490 return cache_read(filp, buf, count, ppos, cd);
1491}
1492
1493static ssize_t cache_write_procfs(struct file *filp, const char __user *buf,
1494 size_t count, loff_t *ppos)
1495{
1496 struct cache_detail *cd = PDE_DATA(file_inode(filp));
1497
1498 return cache_write(filp, buf, count, ppos, cd);
1499}
1500
1501static unsigned int cache_poll_procfs(struct file *filp, poll_table *wait)
1502{
1503 struct cache_detail *cd = PDE_DATA(file_inode(filp));
1504
1505 return cache_poll(filp, wait, cd);
1506}
1507
1508static long cache_ioctl_procfs(struct file *filp,
1509 unsigned int cmd, unsigned long arg)
1510{
1511 struct inode *inode = file_inode(filp);
1512 struct cache_detail *cd = PDE_DATA(inode);
1513
1514 return cache_ioctl(inode, filp, cmd, arg, cd);
1515}
1516
1517static int cache_open_procfs(struct inode *inode, struct file *filp)
1518{
1519 struct cache_detail *cd = PDE_DATA(inode);
1520
1521 return cache_open(inode, filp, cd);
1522}
1523
1524static int cache_release_procfs(struct inode *inode, struct file *filp)
1525{
1526 struct cache_detail *cd = PDE_DATA(inode);
1527
1528 return cache_release(inode, filp, cd);
1529}
1530
1531static const struct file_operations cache_file_operations_procfs = {
1532 .owner = THIS_MODULE,
1533 .llseek = no_llseek,
1534 .read = cache_read_procfs,
1535 .write = cache_write_procfs,
1536 .poll = cache_poll_procfs,
1537 .unlocked_ioctl = cache_ioctl_procfs,
1538 .open = cache_open_procfs,
1539 .release = cache_release_procfs,
1540};
1541
1542static int content_open_procfs(struct inode *inode, struct file *filp)
1543{
1544 struct cache_detail *cd = PDE_DATA(inode);
1545
1546 return content_open(inode, filp, cd);
1547}
1548
1549static int content_release_procfs(struct inode *inode, struct file *filp)
1550{
1551 struct cache_detail *cd = PDE_DATA(inode);
1552
1553 return content_release(inode, filp, cd);
1554}
1555
1556static const struct file_operations content_file_operations_procfs = {
1557 .open = content_open_procfs,
1558 .read = seq_read,
1559 .llseek = seq_lseek,
1560 .release = content_release_procfs,
1561};
1562
1563static int open_flush_procfs(struct inode *inode, struct file *filp)
1564{
1565 struct cache_detail *cd = PDE_DATA(inode);
1566
1567 return open_flush(inode, filp, cd);
1568}
1569
1570static int release_flush_procfs(struct inode *inode, struct file *filp)
1571{
1572 struct cache_detail *cd = PDE_DATA(inode);
1573
1574 return release_flush(inode, filp, cd);
1575}
1576
1577static ssize_t read_flush_procfs(struct file *filp, char __user *buf,
1578 size_t count, loff_t *ppos)
1579{
1580 struct cache_detail *cd = PDE_DATA(file_inode(filp));
1581
1582 return read_flush(filp, buf, count, ppos, cd);
1583}
1584
1585static ssize_t write_flush_procfs(struct file *filp,
1586 const char __user *buf,
1587 size_t count, loff_t *ppos)
1588{
1589 struct cache_detail *cd = PDE_DATA(file_inode(filp));
1590
1591 return write_flush(filp, buf, count, ppos, cd);
1592}
1593
1594static const struct file_operations cache_flush_operations_procfs = {
1595 .open = open_flush_procfs,
1596 .read = read_flush_procfs,
1597 .write = write_flush_procfs,
1598 .release = release_flush_procfs,
1599 .llseek = no_llseek,
1600};
1601
1602static void remove_cache_proc_entries(struct cache_detail *cd, struct net *net)
1603{
1604 struct sunrpc_net *sn;
1605
1606 if (cd->u.procfs.proc_ent == NULL)
1607 return;
1608 if (cd->u.procfs.flush_ent)
1609 remove_proc_entry("flush", cd->u.procfs.proc_ent);
1610 if (cd->u.procfs.channel_ent)
1611 remove_proc_entry("channel", cd->u.procfs.proc_ent);
1612 if (cd->u.procfs.content_ent)
1613 remove_proc_entry("content", cd->u.procfs.proc_ent);
1614 cd->u.procfs.proc_ent = NULL;
1615 sn = net_generic(net, sunrpc_net_id);
1616 remove_proc_entry(cd->name, sn->proc_net_rpc);
1617}
1618
1619#ifdef CONFIG_PROC_FS
1620static int create_cache_proc_entries(struct cache_detail *cd, struct net *net)
1621{
1622 struct proc_dir_entry *p;
1623 struct sunrpc_net *sn;
1624
1625 sn = net_generic(net, sunrpc_net_id);
1626 cd->u.procfs.proc_ent = proc_mkdir(cd->name, sn->proc_net_rpc);
1627 if (cd->u.procfs.proc_ent == NULL)
1628 goto out_nomem;
1629 cd->u.procfs.channel_ent = NULL;
1630 cd->u.procfs.content_ent = NULL;
1631
1632 p = proc_create_data("flush", S_IFREG|S_IRUSR|S_IWUSR,
1633 cd->u.procfs.proc_ent,
1634 &cache_flush_operations_procfs, cd);
1635 cd->u.procfs.flush_ent = p;
1636 if (p == NULL)
1637 goto out_nomem;
1638
1639 if (cd->cache_request || cd->cache_parse) {
1640 p = proc_create_data("channel", S_IFREG|S_IRUSR|S_IWUSR,
1641 cd->u.procfs.proc_ent,
1642 &cache_file_operations_procfs, cd);
1643 cd->u.procfs.channel_ent = p;
1644 if (p == NULL)
1645 goto out_nomem;
1646 }
1647 if (cd->cache_show) {
1648 p = proc_create_data("content", S_IFREG|S_IRUSR,
1649 cd->u.procfs.proc_ent,
1650 &content_file_operations_procfs, cd);
1651 cd->u.procfs.content_ent = p;
1652 if (p == NULL)
1653 goto out_nomem;
1654 }
1655 return 0;
1656out_nomem:
1657 remove_cache_proc_entries(cd, net);
1658 return -ENOMEM;
1659}
1660#else
1661static int create_cache_proc_entries(struct cache_detail *cd, struct net *net)
1662{
1663 return 0;
1664}
1665#endif
1666
1667void __init cache_initialize(void)
1668{
1669 INIT_DEFERRABLE_WORK(&cache_cleaner, do_cache_clean);
1670}
1671
1672int cache_register_net(struct cache_detail *cd, struct net *net)
1673{
1674 int ret;
1675
1676 sunrpc_init_cache_detail(cd);
1677 ret = create_cache_proc_entries(cd, net);
1678 if (ret)
1679 sunrpc_destroy_cache_detail(cd);
1680 return ret;
1681}
1682EXPORT_SYMBOL_GPL(cache_register_net);
1683
1684void cache_unregister_net(struct cache_detail *cd, struct net *net)
1685{
1686 remove_cache_proc_entries(cd, net);
1687 sunrpc_destroy_cache_detail(cd);
1688}
1689EXPORT_SYMBOL_GPL(cache_unregister_net);
1690
1691struct cache_detail *cache_create_net(struct cache_detail *tmpl, struct net *net)
1692{
1693 struct cache_detail *cd;
1694 int i;
1695
1696 cd = kmemdup(tmpl, sizeof(struct cache_detail), GFP_KERNEL);
1697 if (cd == NULL)
1698 return ERR_PTR(-ENOMEM);
1699
1700 cd->hash_table = kzalloc(cd->hash_size * sizeof(struct hlist_head),
1701 GFP_KERNEL);
1702 if (cd->hash_table == NULL) {
1703 kfree(cd);
1704 return ERR_PTR(-ENOMEM);
1705 }
1706
1707 for (i = 0; i < cd->hash_size; i++)
1708 INIT_HLIST_HEAD(&cd->hash_table[i]);
1709 cd->net = net;
1710 return cd;
1711}
1712EXPORT_SYMBOL_GPL(cache_create_net);
1713
1714void cache_destroy_net(struct cache_detail *cd, struct net *net)
1715{
1716 kfree(cd->hash_table);
1717 kfree(cd);
1718}
1719EXPORT_SYMBOL_GPL(cache_destroy_net);
1720
1721static ssize_t cache_read_pipefs(struct file *filp, char __user *buf,
1722 size_t count, loff_t *ppos)
1723{
1724 struct cache_detail *cd = RPC_I(file_inode(filp))->private;
1725
1726 return cache_read(filp, buf, count, ppos, cd);
1727}
1728
1729static ssize_t cache_write_pipefs(struct file *filp, const char __user *buf,
1730 size_t count, loff_t *ppos)
1731{
1732 struct cache_detail *cd = RPC_I(file_inode(filp))->private;
1733
1734 return cache_write(filp, buf, count, ppos, cd);
1735}
1736
1737static unsigned int cache_poll_pipefs(struct file *filp, poll_table *wait)
1738{
1739 struct cache_detail *cd = RPC_I(file_inode(filp))->private;
1740
1741 return cache_poll(filp, wait, cd);
1742}
1743
1744static long cache_ioctl_pipefs(struct file *filp,
1745 unsigned int cmd, unsigned long arg)
1746{
1747 struct inode *inode = file_inode(filp);
1748 struct cache_detail *cd = RPC_I(inode)->private;
1749
1750 return cache_ioctl(inode, filp, cmd, arg, cd);
1751}
1752
1753static int cache_open_pipefs(struct inode *inode, struct file *filp)
1754{
1755 struct cache_detail *cd = RPC_I(inode)->private;
1756
1757 return cache_open(inode, filp, cd);
1758}
1759
1760static int cache_release_pipefs(struct inode *inode, struct file *filp)
1761{
1762 struct cache_detail *cd = RPC_I(inode)->private;
1763
1764 return cache_release(inode, filp, cd);
1765}
1766
1767const struct file_operations cache_file_operations_pipefs = {
1768 .owner = THIS_MODULE,
1769 .llseek = no_llseek,
1770 .read = cache_read_pipefs,
1771 .write = cache_write_pipefs,
1772 .poll = cache_poll_pipefs,
1773 .unlocked_ioctl = cache_ioctl_pipefs,
1774 .open = cache_open_pipefs,
1775 .release = cache_release_pipefs,
1776};
1777
1778static int content_open_pipefs(struct inode *inode, struct file *filp)
1779{
1780 struct cache_detail *cd = RPC_I(inode)->private;
1781
1782 return content_open(inode, filp, cd);
1783}
1784
1785static int content_release_pipefs(struct inode *inode, struct file *filp)
1786{
1787 struct cache_detail *cd = RPC_I(inode)->private;
1788
1789 return content_release(inode, filp, cd);
1790}
1791
1792const struct file_operations content_file_operations_pipefs = {
1793 .open = content_open_pipefs,
1794 .read = seq_read,
1795 .llseek = seq_lseek,
1796 .release = content_release_pipefs,
1797};
1798
1799static int open_flush_pipefs(struct inode *inode, struct file *filp)
1800{
1801 struct cache_detail *cd = RPC_I(inode)->private;
1802
1803 return open_flush(inode, filp, cd);
1804}
1805
1806static int release_flush_pipefs(struct inode *inode, struct file *filp)
1807{
1808 struct cache_detail *cd = RPC_I(inode)->private;
1809
1810 return release_flush(inode, filp, cd);
1811}
1812
1813static ssize_t read_flush_pipefs(struct file *filp, char __user *buf,
1814 size_t count, loff_t *ppos)
1815{
1816 struct cache_detail *cd = RPC_I(file_inode(filp))->private;
1817
1818 return read_flush(filp, buf, count, ppos, cd);
1819}
1820
1821static ssize_t write_flush_pipefs(struct file *filp,
1822 const char __user *buf,
1823 size_t count, loff_t *ppos)
1824{
1825 struct cache_detail *cd = RPC_I(file_inode(filp))->private;
1826
1827 return write_flush(filp, buf, count, ppos, cd);
1828}
1829
1830const struct file_operations cache_flush_operations_pipefs = {
1831 .open = open_flush_pipefs,
1832 .read = read_flush_pipefs,
1833 .write = write_flush_pipefs,
1834 .release = release_flush_pipefs,
1835 .llseek = no_llseek,
1836};
1837
1838int sunrpc_cache_register_pipefs(struct dentry *parent,
1839 const char *name, umode_t umode,
1840 struct cache_detail *cd)
1841{
1842 struct dentry *dir = rpc_create_cache_dir(parent, name, umode, cd);
1843 if (IS_ERR(dir))
1844 return PTR_ERR(dir);
1845 cd->u.pipefs.dir = dir;
1846 return 0;
1847}
1848EXPORT_SYMBOL_GPL(sunrpc_cache_register_pipefs);
1849
1850void sunrpc_cache_unregister_pipefs(struct cache_detail *cd)
1851{
1852 rpc_remove_cache_dir(cd->u.pipefs.dir);
1853 cd->u.pipefs.dir = NULL;
1854}
1855EXPORT_SYMBOL_GPL(sunrpc_cache_unregister_pipefs);
1856
1857