1
2
3
4
5
6
7
8
9
10
11#include <linux/types.h>
12#include <linux/fs.h>
13#include <linux/file.h>
14#include <linux/slab.h>
15#include <linux/signal.h>
16#include <linux/sched.h>
17#include <linux/kmod.h>
18#include <linux/list.h>
19#include <linux/module.h>
20#include <linux/ctype.h>
21#include <linux/string_helpers.h>
22#include <linux/uaccess.h>
23#include <linux/poll.h>
24#include <linux/seq_file.h>
25#include <linux/proc_fs.h>
26#include <linux/net.h>
27#include <linux/workqueue.h>
28#include <linux/mutex.h>
29#include <linux/pagemap.h>
30#include <asm/ioctls.h>
31#include <linux/sunrpc/types.h>
32#include <linux/sunrpc/cache.h>
33#include <linux/sunrpc/stats.h>
34#include <linux/sunrpc/rpc_pipe_fs.h>
35#include "netns.h"
36
37#define RPCDBG_FACILITY RPCDBG_CACHE
38
39static bool cache_defer_req(struct cache_req *req, struct cache_head *item);
40static void cache_revisit_request(struct cache_head *item);
41static bool cache_listeners_exist(struct cache_detail *detail);
42
43static void cache_init(struct cache_head *h, struct cache_detail *detail)
44{
45 time_t now = seconds_since_boot();
46 INIT_HLIST_NODE(&h->cache_list);
47 h->flags = 0;
48 kref_init(&h->ref);
49 h->expiry_time = now + CACHE_NEW_EXPIRY;
50 if (now <= detail->flush_time)
51
52 now = detail->flush_time + 1;
53 h->last_refresh = now;
54}
55
56static inline int cache_is_valid(struct cache_head *h);
57static void cache_fresh_locked(struct cache_head *head, time_t expiry,
58 struct cache_detail *detail);
59static void cache_fresh_unlocked(struct cache_head *head,
60 struct cache_detail *detail);
61
62static struct cache_head *sunrpc_cache_find_rcu(struct cache_detail *detail,
63 struct cache_head *key,
64 int hash)
65{
66 struct hlist_head *head = &detail->hash_table[hash];
67 struct cache_head *tmp;
68
69 rcu_read_lock();
70 hlist_for_each_entry_rcu(tmp, head, cache_list) {
71 if (detail->match(tmp, key)) {
72 if (cache_is_expired(detail, tmp))
73 continue;
74 tmp = cache_get_rcu(tmp);
75 rcu_read_unlock();
76 return tmp;
77 }
78 }
79 rcu_read_unlock();
80 return NULL;
81}
82
83static struct cache_head *sunrpc_cache_add_entry(struct cache_detail *detail,
84 struct cache_head *key,
85 int hash)
86{
87 struct cache_head *new, *tmp, *freeme = NULL;
88 struct hlist_head *head = &detail->hash_table[hash];
89
90 new = detail->alloc();
91 if (!new)
92 return NULL;
93
94
95
96
97 cache_init(new, detail);
98 detail->init(new, key);
99
100 spin_lock(&detail->hash_lock);
101
102
103 hlist_for_each_entry_rcu(tmp, head, cache_list) {
104 if (detail->match(tmp, key)) {
105 if (cache_is_expired(detail, tmp)) {
106 hlist_del_init_rcu(&tmp->cache_list);
107 detail->entries --;
108 if (cache_is_valid(tmp) == -EAGAIN)
109 set_bit(CACHE_NEGATIVE, &tmp->flags);
110 cache_fresh_locked(tmp, 0, detail);
111 freeme = tmp;
112 break;
113 }
114 cache_get(tmp);
115 spin_unlock(&detail->hash_lock);
116 cache_put(new, detail);
117 return tmp;
118 }
119 }
120
121 hlist_add_head_rcu(&new->cache_list, head);
122 detail->entries++;
123 cache_get(new);
124 spin_unlock(&detail->hash_lock);
125
126 if (freeme) {
127 cache_fresh_unlocked(freeme, detail);
128 cache_put(freeme, detail);
129 }
130 return new;
131}
132
133struct cache_head *sunrpc_cache_lookup_rcu(struct cache_detail *detail,
134 struct cache_head *key, int hash)
135{
136 struct cache_head *ret;
137
138 ret = sunrpc_cache_find_rcu(detail, key, hash);
139 if (ret)
140 return ret;
141
142 return sunrpc_cache_add_entry(detail, key, hash);
143}
144EXPORT_SYMBOL_GPL(sunrpc_cache_lookup_rcu);
145
146static void cache_dequeue(struct cache_detail *detail, struct cache_head *ch);
147
148static void cache_fresh_locked(struct cache_head *head, time_t expiry,
149 struct cache_detail *detail)
150{
151 time_t now = seconds_since_boot();
152 if (now <= detail->flush_time)
153
154 now = detail->flush_time + 1;
155 head->expiry_time = expiry;
156 head->last_refresh = now;
157 smp_wmb();
158 set_bit(CACHE_VALID, &head->flags);
159}
160
161static void cache_fresh_unlocked(struct cache_head *head,
162 struct cache_detail *detail)
163{
164 if (test_and_clear_bit(CACHE_PENDING, &head->flags)) {
165 cache_revisit_request(head);
166 cache_dequeue(detail, head);
167 }
168}
169
170struct cache_head *sunrpc_cache_update(struct cache_detail *detail,
171 struct cache_head *new, struct cache_head *old, int hash)
172{
173
174
175
176
177 struct cache_head *tmp;
178
179 if (!test_bit(CACHE_VALID, &old->flags)) {
180 spin_lock(&detail->hash_lock);
181 if (!test_bit(CACHE_VALID, &old->flags)) {
182 if (test_bit(CACHE_NEGATIVE, &new->flags))
183 set_bit(CACHE_NEGATIVE, &old->flags);
184 else
185 detail->update(old, new);
186 cache_fresh_locked(old, new->expiry_time, detail);
187 spin_unlock(&detail->hash_lock);
188 cache_fresh_unlocked(old, detail);
189 return old;
190 }
191 spin_unlock(&detail->hash_lock);
192 }
193
194 tmp = detail->alloc();
195 if (!tmp) {
196 cache_put(old, detail);
197 return NULL;
198 }
199 cache_init(tmp, detail);
200 detail->init(tmp, old);
201
202 spin_lock(&detail->hash_lock);
203 if (test_bit(CACHE_NEGATIVE, &new->flags))
204 set_bit(CACHE_NEGATIVE, &tmp->flags);
205 else
206 detail->update(tmp, new);
207 hlist_add_head(&tmp->cache_list, &detail->hash_table[hash]);
208 detail->entries++;
209 cache_get(tmp);
210 cache_fresh_locked(tmp, new->expiry_time, detail);
211 cache_fresh_locked(old, 0, detail);
212 spin_unlock(&detail->hash_lock);
213 cache_fresh_unlocked(tmp, detail);
214 cache_fresh_unlocked(old, detail);
215 cache_put(old, detail);
216 return tmp;
217}
218EXPORT_SYMBOL_GPL(sunrpc_cache_update);
219
220static int cache_make_upcall(struct cache_detail *cd, struct cache_head *h)
221{
222 if (cd->cache_upcall)
223 return cd->cache_upcall(cd, h);
224 return sunrpc_cache_pipe_upcall(cd, h);
225}
226
227static inline int cache_is_valid(struct cache_head *h)
228{
229 if (!test_bit(CACHE_VALID, &h->flags))
230 return -EAGAIN;
231 else {
232
233 if (test_bit(CACHE_NEGATIVE, &h->flags))
234 return -ENOENT;
235 else {
236
237
238
239
240
241
242 smp_rmb();
243 return 0;
244 }
245 }
246}
247
248static int try_to_negate_entry(struct cache_detail *detail, struct cache_head *h)
249{
250 int rv;
251
252 spin_lock(&detail->hash_lock);
253 rv = cache_is_valid(h);
254 if (rv == -EAGAIN) {
255 set_bit(CACHE_NEGATIVE, &h->flags);
256 cache_fresh_locked(h, seconds_since_boot()+CACHE_NEW_EXPIRY,
257 detail);
258 rv = -ENOENT;
259 }
260 spin_unlock(&detail->hash_lock);
261 cache_fresh_unlocked(h, detail);
262 return rv;
263}
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279int cache_check(struct cache_detail *detail,
280 struct cache_head *h, struct cache_req *rqstp)
281{
282 int rv;
283 long refresh_age, age;
284
285
286 rv = cache_is_valid(h);
287
288
289 refresh_age = (h->expiry_time - h->last_refresh);
290 age = seconds_since_boot() - h->last_refresh;
291
292 if (rqstp == NULL) {
293 if (rv == -EAGAIN)
294 rv = -ENOENT;
295 } else if (rv == -EAGAIN ||
296 (h->expiry_time != 0 && age > refresh_age/2)) {
297 dprintk("RPC: Want update, refage=%ld, age=%ld\n",
298 refresh_age, age);
299 if (!test_and_set_bit(CACHE_PENDING, &h->flags)) {
300 switch (cache_make_upcall(detail, h)) {
301 case -EINVAL:
302 rv = try_to_negate_entry(detail, h);
303 break;
304 case -EAGAIN:
305 cache_fresh_unlocked(h, detail);
306 break;
307 }
308 } else if (!cache_listeners_exist(detail))
309 rv = try_to_negate_entry(detail, h);
310 }
311
312 if (rv == -EAGAIN) {
313 if (!cache_defer_req(rqstp, h)) {
314
315
316
317
318 rv = cache_is_valid(h);
319 if (rv == -EAGAIN)
320 rv = -ETIMEDOUT;
321 }
322 }
323 if (rv)
324 cache_put(h, detail);
325 return rv;
326}
327EXPORT_SYMBOL_GPL(cache_check);
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361static LIST_HEAD(cache_list);
362static DEFINE_SPINLOCK(cache_list_lock);
363static struct cache_detail *current_detail;
364static int current_index;
365
366static void do_cache_clean(struct work_struct *work);
367static struct delayed_work cache_cleaner;
368
369void sunrpc_init_cache_detail(struct cache_detail *cd)
370{
371 spin_lock_init(&cd->hash_lock);
372 INIT_LIST_HEAD(&cd->queue);
373 spin_lock(&cache_list_lock);
374 cd->nextcheck = 0;
375 cd->entries = 0;
376 atomic_set(&cd->writers, 0);
377 cd->last_close = 0;
378 cd->last_warn = -1;
379 list_add(&cd->others, &cache_list);
380 spin_unlock(&cache_list_lock);
381
382
383 queue_delayed_work(system_power_efficient_wq, &cache_cleaner, 0);
384}
385EXPORT_SYMBOL_GPL(sunrpc_init_cache_detail);
386
387void sunrpc_destroy_cache_detail(struct cache_detail *cd)
388{
389 cache_purge(cd);
390 spin_lock(&cache_list_lock);
391 spin_lock(&cd->hash_lock);
392 if (current_detail == cd)
393 current_detail = NULL;
394 list_del_init(&cd->others);
395 spin_unlock(&cd->hash_lock);
396 spin_unlock(&cache_list_lock);
397 if (list_empty(&cache_list)) {
398
399 cancel_delayed_work_sync(&cache_cleaner);
400 }
401}
402EXPORT_SYMBOL_GPL(sunrpc_destroy_cache_detail);
403
404
405
406
407
408
409
410static int cache_clean(void)
411{
412 int rv = 0;
413 struct list_head *next;
414
415 spin_lock(&cache_list_lock);
416
417
418 while (current_detail == NULL ||
419 current_index >= current_detail->hash_size) {
420 if (current_detail)
421 next = current_detail->others.next;
422 else
423 next = cache_list.next;
424 if (next == &cache_list) {
425 current_detail = NULL;
426 spin_unlock(&cache_list_lock);
427 return -1;
428 }
429 current_detail = list_entry(next, struct cache_detail, others);
430 if (current_detail->nextcheck > seconds_since_boot())
431 current_index = current_detail->hash_size;
432 else {
433 current_index = 0;
434 current_detail->nextcheck = seconds_since_boot()+30*60;
435 }
436 }
437
438
439 while (current_detail &&
440 current_index < current_detail->hash_size &&
441 hlist_empty(¤t_detail->hash_table[current_index]))
442 current_index++;
443
444
445
446 if (current_detail && current_index < current_detail->hash_size) {
447 struct cache_head *ch = NULL;
448 struct cache_detail *d;
449 struct hlist_head *head;
450 struct hlist_node *tmp;
451
452 spin_lock(¤t_detail->hash_lock);
453
454
455
456 head = ¤t_detail->hash_table[current_index];
457 hlist_for_each_entry_safe(ch, tmp, head, cache_list) {
458 if (current_detail->nextcheck > ch->expiry_time)
459 current_detail->nextcheck = ch->expiry_time+1;
460 if (!cache_is_expired(current_detail, ch))
461 continue;
462
463 hlist_del_init_rcu(&ch->cache_list);
464 current_detail->entries--;
465 rv = 1;
466 break;
467 }
468
469 spin_unlock(¤t_detail->hash_lock);
470 d = current_detail;
471 if (!ch)
472 current_index ++;
473 spin_unlock(&cache_list_lock);
474 if (ch) {
475 set_bit(CACHE_CLEANED, &ch->flags);
476 cache_fresh_unlocked(ch, d);
477 cache_put(ch, d);
478 }
479 } else
480 spin_unlock(&cache_list_lock);
481
482 return rv;
483}
484
485
486
487
488static void do_cache_clean(struct work_struct *work)
489{
490 int delay = 5;
491 if (cache_clean() == -1)
492 delay = round_jiffies_relative(30*HZ);
493
494 if (list_empty(&cache_list))
495 delay = 0;
496
497 if (delay)
498 queue_delayed_work(system_power_efficient_wq,
499 &cache_cleaner, delay);
500}
501
502
503
504
505
506
507
508void cache_flush(void)
509{
510 while (cache_clean() != -1)
511 cond_resched();
512 while (cache_clean() != -1)
513 cond_resched();
514}
515EXPORT_SYMBOL_GPL(cache_flush);
516
517void cache_purge(struct cache_detail *detail)
518{
519 struct cache_head *ch = NULL;
520 struct hlist_head *head = NULL;
521 struct hlist_node *tmp = NULL;
522 int i = 0;
523
524 spin_lock(&detail->hash_lock);
525 if (!detail->entries) {
526 spin_unlock(&detail->hash_lock);
527 return;
528 }
529
530 dprintk("RPC: %d entries in %s cache\n", detail->entries, detail->name);
531 for (i = 0; i < detail->hash_size; i++) {
532 head = &detail->hash_table[i];
533 hlist_for_each_entry_safe(ch, tmp, head, cache_list) {
534 hlist_del_init_rcu(&ch->cache_list);
535 detail->entries--;
536
537 set_bit(CACHE_CLEANED, &ch->flags);
538 spin_unlock(&detail->hash_lock);
539 cache_fresh_unlocked(ch, detail);
540 cache_put(ch, detail);
541 spin_lock(&detail->hash_lock);
542 }
543 }
544 spin_unlock(&detail->hash_lock);
545}
546EXPORT_SYMBOL_GPL(cache_purge);
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564#define DFR_HASHSIZE (PAGE_SIZE/sizeof(struct list_head))
565#define DFR_HASH(item) ((((long)item)>>4 ^ (((long)item)>>13)) % DFR_HASHSIZE)
566
567#define DFR_MAX 300
568
569static DEFINE_SPINLOCK(cache_defer_lock);
570static LIST_HEAD(cache_defer_list);
571static struct hlist_head cache_defer_hash[DFR_HASHSIZE];
572static int cache_defer_cnt;
573
574static void __unhash_deferred_req(struct cache_deferred_req *dreq)
575{
576 hlist_del_init(&dreq->hash);
577 if (!list_empty(&dreq->recent)) {
578 list_del_init(&dreq->recent);
579 cache_defer_cnt--;
580 }
581}
582
583static void __hash_deferred_req(struct cache_deferred_req *dreq, struct cache_head *item)
584{
585 int hash = DFR_HASH(item);
586
587 INIT_LIST_HEAD(&dreq->recent);
588 hlist_add_head(&dreq->hash, &cache_defer_hash[hash]);
589}
590
591static void setup_deferral(struct cache_deferred_req *dreq,
592 struct cache_head *item,
593 int count_me)
594{
595
596 dreq->item = item;
597
598 spin_lock(&cache_defer_lock);
599
600 __hash_deferred_req(dreq, item);
601
602 if (count_me) {
603 cache_defer_cnt++;
604 list_add(&dreq->recent, &cache_defer_list);
605 }
606
607 spin_unlock(&cache_defer_lock);
608
609}
610
611struct thread_deferred_req {
612 struct cache_deferred_req handle;
613 struct completion completion;
614};
615
616static void cache_restart_thread(struct cache_deferred_req *dreq, int too_many)
617{
618 struct thread_deferred_req *dr =
619 container_of(dreq, struct thread_deferred_req, handle);
620 complete(&dr->completion);
621}
622
623static void cache_wait_req(struct cache_req *req, struct cache_head *item)
624{
625 struct thread_deferred_req sleeper;
626 struct cache_deferred_req *dreq = &sleeper.handle;
627
628 sleeper.completion = COMPLETION_INITIALIZER_ONSTACK(sleeper.completion);
629 dreq->revisit = cache_restart_thread;
630
631 setup_deferral(dreq, item, 0);
632
633 if (!test_bit(CACHE_PENDING, &item->flags) ||
634 wait_for_completion_interruptible_timeout(
635 &sleeper.completion, req->thread_wait) <= 0) {
636
637
638
639 spin_lock(&cache_defer_lock);
640 if (!hlist_unhashed(&sleeper.handle.hash)) {
641 __unhash_deferred_req(&sleeper.handle);
642 spin_unlock(&cache_defer_lock);
643 } else {
644
645
646
647
648
649 spin_unlock(&cache_defer_lock);
650 wait_for_completion(&sleeper.completion);
651 }
652 }
653}
654
655static void cache_limit_defers(void)
656{
657
658
659
660 struct cache_deferred_req *discard = NULL;
661
662 if (cache_defer_cnt <= DFR_MAX)
663 return;
664
665 spin_lock(&cache_defer_lock);
666
667
668 if (cache_defer_cnt > DFR_MAX) {
669 if (prandom_u32() & 1)
670 discard = list_entry(cache_defer_list.next,
671 struct cache_deferred_req, recent);
672 else
673 discard = list_entry(cache_defer_list.prev,
674 struct cache_deferred_req, recent);
675 __unhash_deferred_req(discard);
676 }
677 spin_unlock(&cache_defer_lock);
678 if (discard)
679 discard->revisit(discard, 1);
680}
681
682
683static bool cache_defer_req(struct cache_req *req, struct cache_head *item)
684{
685 struct cache_deferred_req *dreq;
686
687 if (req->thread_wait) {
688 cache_wait_req(req, item);
689 if (!test_bit(CACHE_PENDING, &item->flags))
690 return false;
691 }
692 dreq = req->defer(req);
693 if (dreq == NULL)
694 return false;
695 setup_deferral(dreq, item, 1);
696 if (!test_bit(CACHE_PENDING, &item->flags))
697
698
699
700 cache_revisit_request(item);
701
702 cache_limit_defers();
703 return true;
704}
705
706static void cache_revisit_request(struct cache_head *item)
707{
708 struct cache_deferred_req *dreq;
709 struct list_head pending;
710 struct hlist_node *tmp;
711 int hash = DFR_HASH(item);
712
713 INIT_LIST_HEAD(&pending);
714 spin_lock(&cache_defer_lock);
715
716 hlist_for_each_entry_safe(dreq, tmp, &cache_defer_hash[hash], hash)
717 if (dreq->item == item) {
718 __unhash_deferred_req(dreq);
719 list_add(&dreq->recent, &pending);
720 }
721
722 spin_unlock(&cache_defer_lock);
723
724 while (!list_empty(&pending)) {
725 dreq = list_entry(pending.next, struct cache_deferred_req, recent);
726 list_del_init(&dreq->recent);
727 dreq->revisit(dreq, 0);
728 }
729}
730
731void cache_clean_deferred(void *owner)
732{
733 struct cache_deferred_req *dreq, *tmp;
734 struct list_head pending;
735
736
737 INIT_LIST_HEAD(&pending);
738 spin_lock(&cache_defer_lock);
739
740 list_for_each_entry_safe(dreq, tmp, &cache_defer_list, recent) {
741 if (dreq->owner == owner) {
742 __unhash_deferred_req(dreq);
743 list_add(&dreq->recent, &pending);
744 }
745 }
746 spin_unlock(&cache_defer_lock);
747
748 while (!list_empty(&pending)) {
749 dreq = list_entry(pending.next, struct cache_deferred_req, recent);
750 list_del_init(&dreq->recent);
751 dreq->revisit(dreq, 1);
752 }
753}
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771static DEFINE_SPINLOCK(queue_lock);
772static DEFINE_MUTEX(queue_io_mutex);
773
774struct cache_queue {
775 struct list_head list;
776 int reader;
777};
778struct cache_request {
779 struct cache_queue q;
780 struct cache_head *item;
781 char * buf;
782 int len;
783 int readers;
784};
785struct cache_reader {
786 struct cache_queue q;
787 int offset;
788};
789
790static int cache_request(struct cache_detail *detail,
791 struct cache_request *crq)
792{
793 char *bp = crq->buf;
794 int len = PAGE_SIZE;
795
796 detail->cache_request(detail, crq->item, &bp, &len);
797 if (len < 0)
798 return -EAGAIN;
799 return PAGE_SIZE - len;
800}
801
802static ssize_t cache_read(struct file *filp, char __user *buf, size_t count,
803 loff_t *ppos, struct cache_detail *cd)
804{
805 struct cache_reader *rp = filp->private_data;
806 struct cache_request *rq;
807 struct inode *inode = file_inode(filp);
808 int err;
809
810 if (count == 0)
811 return 0;
812
813 inode_lock(inode);
814
815 again:
816 spin_lock(&queue_lock);
817
818 while (rp->q.list.next != &cd->queue &&
819 list_entry(rp->q.list.next, struct cache_queue, list)
820 ->reader) {
821 struct list_head *next = rp->q.list.next;
822 list_move(&rp->q.list, next);
823 }
824 if (rp->q.list.next == &cd->queue) {
825 spin_unlock(&queue_lock);
826 inode_unlock(inode);
827 WARN_ON_ONCE(rp->offset);
828 return 0;
829 }
830 rq = container_of(rp->q.list.next, struct cache_request, q.list);
831 WARN_ON_ONCE(rq->q.reader);
832 if (rp->offset == 0)
833 rq->readers++;
834 spin_unlock(&queue_lock);
835
836 if (rq->len == 0) {
837 err = cache_request(cd, rq);
838 if (err < 0)
839 goto out;
840 rq->len = err;
841 }
842
843 if (rp->offset == 0 && !test_bit(CACHE_PENDING, &rq->item->flags)) {
844 err = -EAGAIN;
845 spin_lock(&queue_lock);
846 list_move(&rp->q.list, &rq->q.list);
847 spin_unlock(&queue_lock);
848 } else {
849 if (rp->offset + count > rq->len)
850 count = rq->len - rp->offset;
851 err = -EFAULT;
852 if (copy_to_user(buf, rq->buf + rp->offset, count))
853 goto out;
854 rp->offset += count;
855 if (rp->offset >= rq->len) {
856 rp->offset = 0;
857 spin_lock(&queue_lock);
858 list_move(&rp->q.list, &rq->q.list);
859 spin_unlock(&queue_lock);
860 }
861 err = 0;
862 }
863 out:
864 if (rp->offset == 0) {
865
866 spin_lock(&queue_lock);
867 rq->readers--;
868 if (rq->readers == 0 &&
869 !test_bit(CACHE_PENDING, &rq->item->flags)) {
870 list_del(&rq->q.list);
871 spin_unlock(&queue_lock);
872 cache_put(rq->item, cd);
873 kfree(rq->buf);
874 kfree(rq);
875 } else
876 spin_unlock(&queue_lock);
877 }
878 if (err == -EAGAIN)
879 goto again;
880 inode_unlock(inode);
881 return err ? err : count;
882}
883
884static ssize_t cache_do_downcall(char *kaddr, const char __user *buf,
885 size_t count, struct cache_detail *cd)
886{
887 ssize_t ret;
888
889 if (count == 0)
890 return -EINVAL;
891 if (copy_from_user(kaddr, buf, count))
892 return -EFAULT;
893 kaddr[count] = '\0';
894 ret = cd->cache_parse(cd, kaddr, count);
895 if (!ret)
896 ret = count;
897 return ret;
898}
899
900static ssize_t cache_slow_downcall(const char __user *buf,
901 size_t count, struct cache_detail *cd)
902{
903 static char write_buf[8192];
904 ssize_t ret = -EINVAL;
905
906 if (count >= sizeof(write_buf))
907 goto out;
908 mutex_lock(&queue_io_mutex);
909 ret = cache_do_downcall(write_buf, buf, count, cd);
910 mutex_unlock(&queue_io_mutex);
911out:
912 return ret;
913}
914
915static ssize_t cache_downcall(struct address_space *mapping,
916 const char __user *buf,
917 size_t count, struct cache_detail *cd)
918{
919 struct page *page;
920 char *kaddr;
921 ssize_t ret = -ENOMEM;
922
923 if (count >= PAGE_SIZE)
924 goto out_slow;
925
926 page = find_or_create_page(mapping, 0, GFP_KERNEL);
927 if (!page)
928 goto out_slow;
929
930 kaddr = kmap(page);
931 ret = cache_do_downcall(kaddr, buf, count, cd);
932 kunmap(page);
933 unlock_page(page);
934 put_page(page);
935 return ret;
936out_slow:
937 return cache_slow_downcall(buf, count, cd);
938}
939
940static ssize_t cache_write(struct file *filp, const char __user *buf,
941 size_t count, loff_t *ppos,
942 struct cache_detail *cd)
943{
944 struct address_space *mapping = filp->f_mapping;
945 struct inode *inode = file_inode(filp);
946 ssize_t ret = -EINVAL;
947
948 if (!cd->cache_parse)
949 goto out;
950
951 inode_lock(inode);
952 ret = cache_downcall(mapping, buf, count, cd);
953 inode_unlock(inode);
954out:
955 return ret;
956}
957
958static DECLARE_WAIT_QUEUE_HEAD(queue_wait);
959
960static __poll_t cache_poll(struct file *filp, poll_table *wait,
961 struct cache_detail *cd)
962{
963 __poll_t mask;
964 struct cache_reader *rp = filp->private_data;
965 struct cache_queue *cq;
966
967 poll_wait(filp, &queue_wait, wait);
968
969
970 mask = EPOLLOUT | EPOLLWRNORM;
971
972 if (!rp)
973 return mask;
974
975 spin_lock(&queue_lock);
976
977 for (cq= &rp->q; &cq->list != &cd->queue;
978 cq = list_entry(cq->list.next, struct cache_queue, list))
979 if (!cq->reader) {
980 mask |= EPOLLIN | EPOLLRDNORM;
981 break;
982 }
983 spin_unlock(&queue_lock);
984 return mask;
985}
986
987static int cache_ioctl(struct inode *ino, struct file *filp,
988 unsigned int cmd, unsigned long arg,
989 struct cache_detail *cd)
990{
991 int len = 0;
992 struct cache_reader *rp = filp->private_data;
993 struct cache_queue *cq;
994
995 if (cmd != FIONREAD || !rp)
996 return -EINVAL;
997
998 spin_lock(&queue_lock);
999
1000
1001
1002
1003 for (cq= &rp->q; &cq->list != &cd->queue;
1004 cq = list_entry(cq->list.next, struct cache_queue, list))
1005 if (!cq->reader) {
1006 struct cache_request *cr =
1007 container_of(cq, struct cache_request, q);
1008 len = cr->len - rp->offset;
1009 break;
1010 }
1011 spin_unlock(&queue_lock);
1012
1013 return put_user(len, (int __user *)arg);
1014}
1015
1016static int cache_open(struct inode *inode, struct file *filp,
1017 struct cache_detail *cd)
1018{
1019 struct cache_reader *rp = NULL;
1020
1021 if (!cd || !try_module_get(cd->owner))
1022 return -EACCES;
1023 nonseekable_open(inode, filp);
1024 if (filp->f_mode & FMODE_READ) {
1025 rp = kmalloc(sizeof(*rp), GFP_KERNEL);
1026 if (!rp) {
1027 module_put(cd->owner);
1028 return -ENOMEM;
1029 }
1030 rp->offset = 0;
1031 rp->q.reader = 1;
1032
1033 spin_lock(&queue_lock);
1034 list_add(&rp->q.list, &cd->queue);
1035 spin_unlock(&queue_lock);
1036 }
1037 if (filp->f_mode & FMODE_WRITE)
1038 atomic_inc(&cd->writers);
1039 filp->private_data = rp;
1040 return 0;
1041}
1042
1043static int cache_release(struct inode *inode, struct file *filp,
1044 struct cache_detail *cd)
1045{
1046 struct cache_reader *rp = filp->private_data;
1047
1048 if (rp) {
1049 spin_lock(&queue_lock);
1050 if (rp->offset) {
1051 struct cache_queue *cq;
1052 for (cq= &rp->q; &cq->list != &cd->queue;
1053 cq = list_entry(cq->list.next, struct cache_queue, list))
1054 if (!cq->reader) {
1055 container_of(cq, struct cache_request, q)
1056 ->readers--;
1057 break;
1058 }
1059 rp->offset = 0;
1060 }
1061 list_del(&rp->q.list);
1062 spin_unlock(&queue_lock);
1063
1064 filp->private_data = NULL;
1065 kfree(rp);
1066
1067 }
1068 if (filp->f_mode & FMODE_WRITE) {
1069 atomic_dec(&cd->writers);
1070 cd->last_close = seconds_since_boot();
1071 }
1072 module_put(cd->owner);
1073 return 0;
1074}
1075
1076
1077
1078static void cache_dequeue(struct cache_detail *detail, struct cache_head *ch)
1079{
1080 struct cache_queue *cq, *tmp;
1081 struct cache_request *cr;
1082 struct list_head dequeued;
1083
1084 INIT_LIST_HEAD(&dequeued);
1085 spin_lock(&queue_lock);
1086 list_for_each_entry_safe(cq, tmp, &detail->queue, list)
1087 if (!cq->reader) {
1088 cr = container_of(cq, struct cache_request, q);
1089 if (cr->item != ch)
1090 continue;
1091 if (test_bit(CACHE_PENDING, &ch->flags))
1092
1093 break;
1094 if (cr->readers != 0)
1095 continue;
1096 list_move(&cr->q.list, &dequeued);
1097 }
1098 spin_unlock(&queue_lock);
1099 while (!list_empty(&dequeued)) {
1100 cr = list_entry(dequeued.next, struct cache_request, q.list);
1101 list_del(&cr->q.list);
1102 cache_put(cr->item, detail);
1103 kfree(cr->buf);
1104 kfree(cr);
1105 }
1106}
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117void qword_add(char **bpp, int *lp, char *str)
1118{
1119 char *bp = *bpp;
1120 int len = *lp;
1121 int ret;
1122
1123 if (len < 0) return;
1124
1125 ret = string_escape_str(str, bp, len, ESCAPE_OCTAL, "\\ \n\t");
1126 if (ret >= len) {
1127 bp += len;
1128 len = -1;
1129 } else {
1130 bp += ret;
1131 len -= ret;
1132 *bp++ = ' ';
1133 len--;
1134 }
1135 *bpp = bp;
1136 *lp = len;
1137}
1138EXPORT_SYMBOL_GPL(qword_add);
1139
1140void qword_addhex(char **bpp, int *lp, char *buf, int blen)
1141{
1142 char *bp = *bpp;
1143 int len = *lp;
1144
1145 if (len < 0) return;
1146
1147 if (len > 2) {
1148 *bp++ = '\\';
1149 *bp++ = 'x';
1150 len -= 2;
1151 while (blen && len >= 2) {
1152 bp = hex_byte_pack(bp, *buf++);
1153 len -= 2;
1154 blen--;
1155 }
1156 }
1157 if (blen || len<1) len = -1;
1158 else {
1159 *bp++ = ' ';
1160 len--;
1161 }
1162 *bpp = bp;
1163 *lp = len;
1164}
1165EXPORT_SYMBOL_GPL(qword_addhex);
1166
1167static void warn_no_listener(struct cache_detail *detail)
1168{
1169 if (detail->last_warn != detail->last_close) {
1170 detail->last_warn = detail->last_close;
1171 if (detail->warn_no_listener)
1172 detail->warn_no_listener(detail, detail->last_close != 0);
1173 }
1174}
1175
1176static bool cache_listeners_exist(struct cache_detail *detail)
1177{
1178 if (atomic_read(&detail->writers))
1179 return true;
1180 if (detail->last_close == 0)
1181
1182 return false;
1183 if (detail->last_close < seconds_since_boot() - 30)
1184
1185
1186
1187
1188
1189 return false;
1190 return true;
1191}
1192
1193
1194
1195
1196
1197
1198
1199int sunrpc_cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h)
1200{
1201
1202 char *buf;
1203 struct cache_request *crq;
1204 int ret = 0;
1205
1206 if (!detail->cache_request)
1207 return -EINVAL;
1208
1209 if (!cache_listeners_exist(detail)) {
1210 warn_no_listener(detail);
1211 return -EINVAL;
1212 }
1213 if (test_bit(CACHE_CLEANED, &h->flags))
1214
1215 return -EAGAIN;
1216
1217 buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
1218 if (!buf)
1219 return -EAGAIN;
1220
1221 crq = kmalloc(sizeof (*crq), GFP_KERNEL);
1222 if (!crq) {
1223 kfree(buf);
1224 return -EAGAIN;
1225 }
1226
1227 crq->q.reader = 0;
1228 crq->buf = buf;
1229 crq->len = 0;
1230 crq->readers = 0;
1231 spin_lock(&queue_lock);
1232 if (test_bit(CACHE_PENDING, &h->flags)) {
1233 crq->item = cache_get(h);
1234 list_add_tail(&crq->q.list, &detail->queue);
1235 } else
1236
1237 ret = -EAGAIN;
1238 spin_unlock(&queue_lock);
1239 wake_up(&queue_wait);
1240 if (ret == -EAGAIN) {
1241 kfree(buf);
1242 kfree(crq);
1243 }
1244 return ret;
1245}
1246EXPORT_SYMBOL_GPL(sunrpc_cache_pipe_upcall);
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260int qword_get(char **bpp, char *dest, int bufsize)
1261{
1262
1263 char *bp = *bpp;
1264 int len = 0;
1265
1266 while (*bp == ' ') bp++;
1267
1268 if (bp[0] == '\\' && bp[1] == 'x') {
1269
1270 bp += 2;
1271 while (len < bufsize - 1) {
1272 int h, l;
1273
1274 h = hex_to_bin(bp[0]);
1275 if (h < 0)
1276 break;
1277
1278 l = hex_to_bin(bp[1]);
1279 if (l < 0)
1280 break;
1281
1282 *dest++ = (h << 4) | l;
1283 bp += 2;
1284 len++;
1285 }
1286 } else {
1287
1288 while (*bp != ' ' && *bp != '\n' && *bp && len < bufsize-1) {
1289 if (*bp == '\\' &&
1290 isodigit(bp[1]) && (bp[1] <= '3') &&
1291 isodigit(bp[2]) &&
1292 isodigit(bp[3])) {
1293 int byte = (*++bp -'0');
1294 bp++;
1295 byte = (byte << 3) | (*bp++ - '0');
1296 byte = (byte << 3) | (*bp++ - '0');
1297 *dest++ = byte;
1298 len++;
1299 } else {
1300 *dest++ = *bp++;
1301 len++;
1302 }
1303 }
1304 }
1305
1306 if (*bp != ' ' && *bp != '\n' && *bp != '\0')
1307 return -1;
1308 while (*bp == ' ') bp++;
1309 *bpp = bp;
1310 *dest = '\0';
1311 return len;
1312}
1313EXPORT_SYMBOL_GPL(qword_get);
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323static void *__cache_seq_start(struct seq_file *m, loff_t *pos)
1324{
1325 loff_t n = *pos;
1326 unsigned int hash, entry;
1327 struct cache_head *ch;
1328 struct cache_detail *cd = m->private;
1329
1330 if (!n--)
1331 return SEQ_START_TOKEN;
1332 hash = n >> 32;
1333 entry = n & ((1LL<<32) - 1);
1334
1335 hlist_for_each_entry_rcu(ch, &cd->hash_table[hash], cache_list)
1336 if (!entry--)
1337 return ch;
1338 n &= ~((1LL<<32) - 1);
1339 do {
1340 hash++;
1341 n += 1LL<<32;
1342 } while(hash < cd->hash_size &&
1343 hlist_empty(&cd->hash_table[hash]));
1344 if (hash >= cd->hash_size)
1345 return NULL;
1346 *pos = n+1;
1347 return hlist_entry_safe(rcu_dereference_raw(
1348 hlist_first_rcu(&cd->hash_table[hash])),
1349 struct cache_head, cache_list);
1350}
1351
1352static void *cache_seq_next(struct seq_file *m, void *p, loff_t *pos)
1353{
1354 struct cache_head *ch = p;
1355 int hash = (*pos >> 32);
1356 struct cache_detail *cd = m->private;
1357
1358 if (p == SEQ_START_TOKEN)
1359 hash = 0;
1360 else if (ch->cache_list.next == NULL) {
1361 hash++;
1362 *pos += 1LL<<32;
1363 } else {
1364 ++*pos;
1365 return hlist_entry_safe(rcu_dereference_raw(
1366 hlist_next_rcu(&ch->cache_list)),
1367 struct cache_head, cache_list);
1368 }
1369 *pos &= ~((1LL<<32) - 1);
1370 while (hash < cd->hash_size &&
1371 hlist_empty(&cd->hash_table[hash])) {
1372 hash++;
1373 *pos += 1LL<<32;
1374 }
1375 if (hash >= cd->hash_size)
1376 return NULL;
1377 ++*pos;
1378 return hlist_entry_safe(rcu_dereference_raw(
1379 hlist_first_rcu(&cd->hash_table[hash])),
1380 struct cache_head, cache_list);
1381}
1382
1383void *cache_seq_start_rcu(struct seq_file *m, loff_t *pos)
1384 __acquires(RCU)
1385{
1386 rcu_read_lock();
1387 return __cache_seq_start(m, pos);
1388}
1389EXPORT_SYMBOL_GPL(cache_seq_start_rcu);
1390
1391void *cache_seq_next_rcu(struct seq_file *file, void *p, loff_t *pos)
1392{
1393 return cache_seq_next(file, p, pos);
1394}
1395EXPORT_SYMBOL_GPL(cache_seq_next_rcu);
1396
1397void cache_seq_stop_rcu(struct seq_file *m, void *p)
1398 __releases(RCU)
1399{
1400 rcu_read_unlock();
1401}
1402EXPORT_SYMBOL_GPL(cache_seq_stop_rcu);
1403
1404static int c_show(struct seq_file *m, void *p)
1405{
1406 struct cache_head *cp = p;
1407 struct cache_detail *cd = m->private;
1408
1409 if (p == SEQ_START_TOKEN)
1410 return cd->cache_show(m, cd, NULL);
1411
1412 ifdebug(CACHE)
1413 seq_printf(m, "# expiry=%ld refcnt=%d flags=%lx\n",
1414 convert_to_wallclock(cp->expiry_time),
1415 kref_read(&cp->ref), cp->flags);
1416 cache_get(cp);
1417 if (cache_check(cd, cp, NULL))
1418
1419 seq_printf(m, "# ");
1420 else {
1421 if (cache_is_expired(cd, cp))
1422 seq_printf(m, "# ");
1423 cache_put(cp, cd);
1424 }
1425
1426 return cd->cache_show(m, cd, cp);
1427}
1428
1429static const struct seq_operations cache_content_op = {
1430 .start = cache_seq_start_rcu,
1431 .next = cache_seq_next_rcu,
1432 .stop = cache_seq_stop_rcu,
1433 .show = c_show,
1434};
1435
1436static int content_open(struct inode *inode, struct file *file,
1437 struct cache_detail *cd)
1438{
1439 struct seq_file *seq;
1440 int err;
1441
1442 if (!cd || !try_module_get(cd->owner))
1443 return -EACCES;
1444
1445 err = seq_open(file, &cache_content_op);
1446 if (err) {
1447 module_put(cd->owner);
1448 return err;
1449 }
1450
1451 seq = file->private_data;
1452 seq->private = cd;
1453 return 0;
1454}
1455
1456static int content_release(struct inode *inode, struct file *file,
1457 struct cache_detail *cd)
1458{
1459 int ret = seq_release(inode, file);
1460 module_put(cd->owner);
1461 return ret;
1462}
1463
1464static int open_flush(struct inode *inode, struct file *file,
1465 struct cache_detail *cd)
1466{
1467 if (!cd || !try_module_get(cd->owner))
1468 return -EACCES;
1469 return nonseekable_open(inode, file);
1470}
1471
1472static int release_flush(struct inode *inode, struct file *file,
1473 struct cache_detail *cd)
1474{
1475 module_put(cd->owner);
1476 return 0;
1477}
1478
1479static ssize_t read_flush(struct file *file, char __user *buf,
1480 size_t count, loff_t *ppos,
1481 struct cache_detail *cd)
1482{
1483 char tbuf[22];
1484 size_t len;
1485
1486 len = snprintf(tbuf, sizeof(tbuf), "%lu\n",
1487 convert_to_wallclock(cd->flush_time));
1488 return simple_read_from_buffer(buf, count, ppos, tbuf, len);
1489}
1490
1491static ssize_t write_flush(struct file *file, const char __user *buf,
1492 size_t count, loff_t *ppos,
1493 struct cache_detail *cd)
1494{
1495 char tbuf[20];
1496 char *ep;
1497 time_t now;
1498
1499 if (*ppos || count > sizeof(tbuf)-1)
1500 return -EINVAL;
1501 if (copy_from_user(tbuf, buf, count))
1502 return -EFAULT;
1503 tbuf[count] = 0;
1504 simple_strtoul(tbuf, &ep, 0);
1505 if (*ep && *ep != '\n')
1506 return -EINVAL;
1507
1508
1509
1510
1511
1512 now = seconds_since_boot();
1513
1514
1515
1516
1517
1518
1519
1520 if (cd->flush_time >= now)
1521 now = cd->flush_time + 1;
1522
1523 cd->flush_time = now;
1524 cd->nextcheck = now;
1525 cache_flush();
1526
1527 if (cd->flush)
1528 cd->flush();
1529
1530 *ppos += count;
1531 return count;
1532}
1533
1534static ssize_t cache_read_procfs(struct file *filp, char __user *buf,
1535 size_t count, loff_t *ppos)
1536{
1537 struct cache_detail *cd = PDE_DATA(file_inode(filp));
1538
1539 return cache_read(filp, buf, count, ppos, cd);
1540}
1541
1542static ssize_t cache_write_procfs(struct file *filp, const char __user *buf,
1543 size_t count, loff_t *ppos)
1544{
1545 struct cache_detail *cd = PDE_DATA(file_inode(filp));
1546
1547 return cache_write(filp, buf, count, ppos, cd);
1548}
1549
1550static __poll_t cache_poll_procfs(struct file *filp, poll_table *wait)
1551{
1552 struct cache_detail *cd = PDE_DATA(file_inode(filp));
1553
1554 return cache_poll(filp, wait, cd);
1555}
1556
1557static long cache_ioctl_procfs(struct file *filp,
1558 unsigned int cmd, unsigned long arg)
1559{
1560 struct inode *inode = file_inode(filp);
1561 struct cache_detail *cd = PDE_DATA(inode);
1562
1563 return cache_ioctl(inode, filp, cmd, arg, cd);
1564}
1565
1566static int cache_open_procfs(struct inode *inode, struct file *filp)
1567{
1568 struct cache_detail *cd = PDE_DATA(inode);
1569
1570 return cache_open(inode, filp, cd);
1571}
1572
1573static int cache_release_procfs(struct inode *inode, struct file *filp)
1574{
1575 struct cache_detail *cd = PDE_DATA(inode);
1576
1577 return cache_release(inode, filp, cd);
1578}
1579
1580static const struct file_operations cache_file_operations_procfs = {
1581 .owner = THIS_MODULE,
1582 .llseek = no_llseek,
1583 .read = cache_read_procfs,
1584 .write = cache_write_procfs,
1585 .poll = cache_poll_procfs,
1586 .unlocked_ioctl = cache_ioctl_procfs,
1587 .open = cache_open_procfs,
1588 .release = cache_release_procfs,
1589};
1590
1591static int content_open_procfs(struct inode *inode, struct file *filp)
1592{
1593 struct cache_detail *cd = PDE_DATA(inode);
1594
1595 return content_open(inode, filp, cd);
1596}
1597
1598static int content_release_procfs(struct inode *inode, struct file *filp)
1599{
1600 struct cache_detail *cd = PDE_DATA(inode);
1601
1602 return content_release(inode, filp, cd);
1603}
1604
1605static const struct file_operations content_file_operations_procfs = {
1606 .open = content_open_procfs,
1607 .read = seq_read,
1608 .llseek = seq_lseek,
1609 .release = content_release_procfs,
1610};
1611
1612static int open_flush_procfs(struct inode *inode, struct file *filp)
1613{
1614 struct cache_detail *cd = PDE_DATA(inode);
1615
1616 return open_flush(inode, filp, cd);
1617}
1618
1619static int release_flush_procfs(struct inode *inode, struct file *filp)
1620{
1621 struct cache_detail *cd = PDE_DATA(inode);
1622
1623 return release_flush(inode, filp, cd);
1624}
1625
1626static ssize_t read_flush_procfs(struct file *filp, char __user *buf,
1627 size_t count, loff_t *ppos)
1628{
1629 struct cache_detail *cd = PDE_DATA(file_inode(filp));
1630
1631 return read_flush(filp, buf, count, ppos, cd);
1632}
1633
1634static ssize_t write_flush_procfs(struct file *filp,
1635 const char __user *buf,
1636 size_t count, loff_t *ppos)
1637{
1638 struct cache_detail *cd = PDE_DATA(file_inode(filp));
1639
1640 return write_flush(filp, buf, count, ppos, cd);
1641}
1642
1643static const struct file_operations cache_flush_operations_procfs = {
1644 .open = open_flush_procfs,
1645 .read = read_flush_procfs,
1646 .write = write_flush_procfs,
1647 .release = release_flush_procfs,
1648 .llseek = no_llseek,
1649};
1650
1651static void remove_cache_proc_entries(struct cache_detail *cd)
1652{
1653 if (cd->procfs) {
1654 proc_remove(cd->procfs);
1655 cd->procfs = NULL;
1656 }
1657}
1658
1659#ifdef CONFIG_PROC_FS
1660static int create_cache_proc_entries(struct cache_detail *cd, struct net *net)
1661{
1662 struct proc_dir_entry *p;
1663 struct sunrpc_net *sn;
1664
1665 sn = net_generic(net, sunrpc_net_id);
1666 cd->procfs = proc_mkdir(cd->name, sn->proc_net_rpc);
1667 if (cd->procfs == NULL)
1668 goto out_nomem;
1669
1670 p = proc_create_data("flush", S_IFREG | 0600,
1671 cd->procfs, &cache_flush_operations_procfs, cd);
1672 if (p == NULL)
1673 goto out_nomem;
1674
1675 if (cd->cache_request || cd->cache_parse) {
1676 p = proc_create_data("channel", S_IFREG | 0600, cd->procfs,
1677 &cache_file_operations_procfs, cd);
1678 if (p == NULL)
1679 goto out_nomem;
1680 }
1681 if (cd->cache_show) {
1682 p = proc_create_data("content", S_IFREG | 0400, cd->procfs,
1683 &content_file_operations_procfs, cd);
1684 if (p == NULL)
1685 goto out_nomem;
1686 }
1687 return 0;
1688out_nomem:
1689 remove_cache_proc_entries(cd);
1690 return -ENOMEM;
1691}
1692#else
1693static int create_cache_proc_entries(struct cache_detail *cd, struct net *net)
1694{
1695 return 0;
1696}
1697#endif
1698
1699void __init cache_initialize(void)
1700{
1701 INIT_DEFERRABLE_WORK(&cache_cleaner, do_cache_clean);
1702}
1703
1704int cache_register_net(struct cache_detail *cd, struct net *net)
1705{
1706 int ret;
1707
1708 sunrpc_init_cache_detail(cd);
1709 ret = create_cache_proc_entries(cd, net);
1710 if (ret)
1711 sunrpc_destroy_cache_detail(cd);
1712 return ret;
1713}
1714EXPORT_SYMBOL_GPL(cache_register_net);
1715
1716void cache_unregister_net(struct cache_detail *cd, struct net *net)
1717{
1718 remove_cache_proc_entries(cd);
1719 sunrpc_destroy_cache_detail(cd);
1720}
1721EXPORT_SYMBOL_GPL(cache_unregister_net);
1722
1723struct cache_detail *cache_create_net(const struct cache_detail *tmpl, struct net *net)
1724{
1725 struct cache_detail *cd;
1726 int i;
1727
1728 cd = kmemdup(tmpl, sizeof(struct cache_detail), GFP_KERNEL);
1729 if (cd == NULL)
1730 return ERR_PTR(-ENOMEM);
1731
1732 cd->hash_table = kcalloc(cd->hash_size, sizeof(struct hlist_head),
1733 GFP_KERNEL);
1734 if (cd->hash_table == NULL) {
1735 kfree(cd);
1736 return ERR_PTR(-ENOMEM);
1737 }
1738
1739 for (i = 0; i < cd->hash_size; i++)
1740 INIT_HLIST_HEAD(&cd->hash_table[i]);
1741 cd->net = net;
1742 return cd;
1743}
1744EXPORT_SYMBOL_GPL(cache_create_net);
1745
1746void cache_destroy_net(struct cache_detail *cd, struct net *net)
1747{
1748 kfree(cd->hash_table);
1749 kfree(cd);
1750}
1751EXPORT_SYMBOL_GPL(cache_destroy_net);
1752
1753static ssize_t cache_read_pipefs(struct file *filp, char __user *buf,
1754 size_t count, loff_t *ppos)
1755{
1756 struct cache_detail *cd = RPC_I(file_inode(filp))->private;
1757
1758 return cache_read(filp, buf, count, ppos, cd);
1759}
1760
1761static ssize_t cache_write_pipefs(struct file *filp, const char __user *buf,
1762 size_t count, loff_t *ppos)
1763{
1764 struct cache_detail *cd = RPC_I(file_inode(filp))->private;
1765
1766 return cache_write(filp, buf, count, ppos, cd);
1767}
1768
1769static __poll_t cache_poll_pipefs(struct file *filp, poll_table *wait)
1770{
1771 struct cache_detail *cd = RPC_I(file_inode(filp))->private;
1772
1773 return cache_poll(filp, wait, cd);
1774}
1775
1776static long cache_ioctl_pipefs(struct file *filp,
1777 unsigned int cmd, unsigned long arg)
1778{
1779 struct inode *inode = file_inode(filp);
1780 struct cache_detail *cd = RPC_I(inode)->private;
1781
1782 return cache_ioctl(inode, filp, cmd, arg, cd);
1783}
1784
1785static int cache_open_pipefs(struct inode *inode, struct file *filp)
1786{
1787 struct cache_detail *cd = RPC_I(inode)->private;
1788
1789 return cache_open(inode, filp, cd);
1790}
1791
1792static int cache_release_pipefs(struct inode *inode, struct file *filp)
1793{
1794 struct cache_detail *cd = RPC_I(inode)->private;
1795
1796 return cache_release(inode, filp, cd);
1797}
1798
1799const struct file_operations cache_file_operations_pipefs = {
1800 .owner = THIS_MODULE,
1801 .llseek = no_llseek,
1802 .read = cache_read_pipefs,
1803 .write = cache_write_pipefs,
1804 .poll = cache_poll_pipefs,
1805 .unlocked_ioctl = cache_ioctl_pipefs,
1806 .open = cache_open_pipefs,
1807 .release = cache_release_pipefs,
1808};
1809
1810static int content_open_pipefs(struct inode *inode, struct file *filp)
1811{
1812 struct cache_detail *cd = RPC_I(inode)->private;
1813
1814 return content_open(inode, filp, cd);
1815}
1816
1817static int content_release_pipefs(struct inode *inode, struct file *filp)
1818{
1819 struct cache_detail *cd = RPC_I(inode)->private;
1820
1821 return content_release(inode, filp, cd);
1822}
1823
1824const struct file_operations content_file_operations_pipefs = {
1825 .open = content_open_pipefs,
1826 .read = seq_read,
1827 .llseek = seq_lseek,
1828 .release = content_release_pipefs,
1829};
1830
1831static int open_flush_pipefs(struct inode *inode, struct file *filp)
1832{
1833 struct cache_detail *cd = RPC_I(inode)->private;
1834
1835 return open_flush(inode, filp, cd);
1836}
1837
1838static int release_flush_pipefs(struct inode *inode, struct file *filp)
1839{
1840 struct cache_detail *cd = RPC_I(inode)->private;
1841
1842 return release_flush(inode, filp, cd);
1843}
1844
1845static ssize_t read_flush_pipefs(struct file *filp, char __user *buf,
1846 size_t count, loff_t *ppos)
1847{
1848 struct cache_detail *cd = RPC_I(file_inode(filp))->private;
1849
1850 return read_flush(filp, buf, count, ppos, cd);
1851}
1852
1853static ssize_t write_flush_pipefs(struct file *filp,
1854 const char __user *buf,
1855 size_t count, loff_t *ppos)
1856{
1857 struct cache_detail *cd = RPC_I(file_inode(filp))->private;
1858
1859 return write_flush(filp, buf, count, ppos, cd);
1860}
1861
1862const struct file_operations cache_flush_operations_pipefs = {
1863 .open = open_flush_pipefs,
1864 .read = read_flush_pipefs,
1865 .write = write_flush_pipefs,
1866 .release = release_flush_pipefs,
1867 .llseek = no_llseek,
1868};
1869
1870int sunrpc_cache_register_pipefs(struct dentry *parent,
1871 const char *name, umode_t umode,
1872 struct cache_detail *cd)
1873{
1874 struct dentry *dir = rpc_create_cache_dir(parent, name, umode, cd);
1875 if (IS_ERR(dir))
1876 return PTR_ERR(dir);
1877 cd->pipefs = dir;
1878 return 0;
1879}
1880EXPORT_SYMBOL_GPL(sunrpc_cache_register_pipefs);
1881
1882void sunrpc_cache_unregister_pipefs(struct cache_detail *cd)
1883{
1884 if (cd->pipefs) {
1885 rpc_remove_cache_dir(cd->pipefs);
1886 cd->pipefs = NULL;
1887 }
1888}
1889EXPORT_SYMBOL_GPL(sunrpc_cache_unregister_pipefs);
1890
1891void sunrpc_cache_unhash(struct cache_detail *cd, struct cache_head *h)
1892{
1893 spin_lock(&cd->hash_lock);
1894 if (!hlist_unhashed(&h->cache_list)){
1895 hlist_del_init_rcu(&h->cache_list);
1896 cd->entries--;
1897 spin_unlock(&cd->hash_lock);
1898 cache_put(h, cd);
1899 } else
1900 spin_unlock(&cd->hash_lock);
1901}
1902EXPORT_SYMBOL_GPL(sunrpc_cache_unhash);
1903