1
2
3
4
5
6
7
8
9
10
11
12
13#include <linux/types.h>
14#include <linux/fs.h>
15#include <linux/file.h>
16#include <linux/slab.h>
17#include <linux/signal.h>
18#include <linux/sched.h>
19#include <linux/kmod.h>
20#include <linux/list.h>
21#include <linux/module.h>
22#include <linux/ctype.h>
23#include <linux/string_helpers.h>
24#include <linux/uaccess.h>
25#include <linux/poll.h>
26#include <linux/seq_file.h>
27#include <linux/proc_fs.h>
28#include <linux/net.h>
29#include <linux/workqueue.h>
30#include <linux/mutex.h>
31#include <linux/pagemap.h>
32#include <asm/ioctls.h>
33#include <linux/sunrpc/types.h>
34#include <linux/sunrpc/cache.h>
35#include <linux/sunrpc/stats.h>
36#include <linux/sunrpc/rpc_pipe_fs.h>
37#include "netns.h"
38
39#define RPCDBG_FACILITY RPCDBG_CACHE
40
41static bool cache_defer_req(struct cache_req *req, struct cache_head *item);
42static void cache_revisit_request(struct cache_head *item);
43
44static void cache_init(struct cache_head *h, struct cache_detail *detail)
45{
46 time_t now = seconds_since_boot();
47 INIT_HLIST_NODE(&h->cache_list);
48 h->flags = 0;
49 kref_init(&h->ref);
50 h->expiry_time = now + CACHE_NEW_EXPIRY;
51 if (now <= detail->flush_time)
52
53 now = detail->flush_time + 1;
54 h->last_refresh = now;
55}
56
57static inline int cache_is_valid(struct cache_head *h);
58static void cache_fresh_locked(struct cache_head *head, time_t expiry,
59 struct cache_detail *detail);
60static void cache_fresh_unlocked(struct cache_head *head,
61 struct cache_detail *detail);
62
63static struct cache_head *sunrpc_cache_find_rcu(struct cache_detail *detail,
64 struct cache_head *key,
65 int hash)
66{
67 struct hlist_head *head = &detail->hash_table[hash];
68 struct cache_head *tmp;
69
70 rcu_read_lock();
71 hlist_for_each_entry_rcu(tmp, head, cache_list) {
72 if (detail->match(tmp, key)) {
73 if (cache_is_expired(detail, tmp))
74 continue;
75 tmp = cache_get_rcu(tmp);
76 rcu_read_unlock();
77 return tmp;
78 }
79 }
80 rcu_read_unlock();
81 return NULL;
82}
83
84static struct cache_head *sunrpc_cache_add_entry(struct cache_detail *detail,
85 struct cache_head *key,
86 int hash)
87{
88 struct cache_head *new, *tmp, *freeme = NULL;
89 struct hlist_head *head = &detail->hash_table[hash];
90
91 new = detail->alloc();
92 if (!new)
93 return NULL;
94
95
96
97
98 cache_init(new, detail);
99 detail->init(new, key);
100
101 spin_lock(&detail->hash_lock);
102
103
104 hlist_for_each_entry_rcu(tmp, head, cache_list) {
105 if (detail->match(tmp, key)) {
106 if (cache_is_expired(detail, tmp)) {
107 hlist_del_init_rcu(&tmp->cache_list);
108 detail->entries --;
109 if (cache_is_valid(tmp) == -EAGAIN)
110 set_bit(CACHE_NEGATIVE, &tmp->flags);
111 cache_fresh_locked(tmp, 0, detail);
112 freeme = tmp;
113 break;
114 }
115 cache_get(tmp);
116 spin_unlock(&detail->hash_lock);
117 cache_put(new, detail);
118 return tmp;
119 }
120 }
121
122 hlist_add_head_rcu(&new->cache_list, head);
123 detail->entries++;
124 cache_get(new);
125 spin_unlock(&detail->hash_lock);
126
127 if (freeme) {
128 cache_fresh_unlocked(freeme, detail);
129 cache_put(freeme, detail);
130 }
131 return new;
132}
133
134struct cache_head *sunrpc_cache_lookup_rcu(struct cache_detail *detail,
135 struct cache_head *key, int hash)
136{
137 struct cache_head *ret;
138
139 ret = sunrpc_cache_find_rcu(detail, key, hash);
140 if (ret)
141 return ret;
142
143 return sunrpc_cache_add_entry(detail, key, hash);
144}
145EXPORT_SYMBOL_GPL(sunrpc_cache_lookup_rcu);
146
147static void cache_dequeue(struct cache_detail *detail, struct cache_head *ch);
148
149static void cache_fresh_locked(struct cache_head *head, time_t expiry,
150 struct cache_detail *detail)
151{
152 time_t now = seconds_since_boot();
153 if (now <= detail->flush_time)
154
155 now = detail->flush_time + 1;
156 head->expiry_time = expiry;
157 head->last_refresh = now;
158 smp_wmb();
159 set_bit(CACHE_VALID, &head->flags);
160}
161
162static void cache_fresh_unlocked(struct cache_head *head,
163 struct cache_detail *detail)
164{
165 if (test_and_clear_bit(CACHE_PENDING, &head->flags)) {
166 cache_revisit_request(head);
167 cache_dequeue(detail, head);
168 }
169}
170
171struct cache_head *sunrpc_cache_update(struct cache_detail *detail,
172 struct cache_head *new, struct cache_head *old, int hash)
173{
174
175
176
177
178 struct cache_head *tmp;
179
180 if (!test_bit(CACHE_VALID, &old->flags)) {
181 spin_lock(&detail->hash_lock);
182 if (!test_bit(CACHE_VALID, &old->flags)) {
183 if (test_bit(CACHE_NEGATIVE, &new->flags))
184 set_bit(CACHE_NEGATIVE, &old->flags);
185 else
186 detail->update(old, new);
187 cache_fresh_locked(old, new->expiry_time, detail);
188 spin_unlock(&detail->hash_lock);
189 cache_fresh_unlocked(old, detail);
190 return old;
191 }
192 spin_unlock(&detail->hash_lock);
193 }
194
195 tmp = detail->alloc();
196 if (!tmp) {
197 cache_put(old, detail);
198 return NULL;
199 }
200 cache_init(tmp, detail);
201 detail->init(tmp, old);
202
203 spin_lock(&detail->hash_lock);
204 if (test_bit(CACHE_NEGATIVE, &new->flags))
205 set_bit(CACHE_NEGATIVE, &tmp->flags);
206 else
207 detail->update(tmp, new);
208 hlist_add_head(&tmp->cache_list, &detail->hash_table[hash]);
209 detail->entries++;
210 cache_get(tmp);
211 cache_fresh_locked(tmp, new->expiry_time, detail);
212 cache_fresh_locked(old, 0, detail);
213 spin_unlock(&detail->hash_lock);
214 cache_fresh_unlocked(tmp, detail);
215 cache_fresh_unlocked(old, detail);
216 cache_put(old, detail);
217 return tmp;
218}
219EXPORT_SYMBOL_GPL(sunrpc_cache_update);
220
221static int cache_make_upcall(struct cache_detail *cd, struct cache_head *h)
222{
223 if (cd->cache_upcall)
224 return cd->cache_upcall(cd, h);
225 return sunrpc_cache_pipe_upcall(cd, h);
226}
227
228static inline int cache_is_valid(struct cache_head *h)
229{
230 if (!test_bit(CACHE_VALID, &h->flags))
231 return -EAGAIN;
232 else {
233
234 if (test_bit(CACHE_NEGATIVE, &h->flags))
235 return -ENOENT;
236 else {
237
238
239
240
241
242
243 smp_rmb();
244 return 0;
245 }
246 }
247}
248
249static int try_to_negate_entry(struct cache_detail *detail, struct cache_head *h)
250{
251 int rv;
252
253 spin_lock(&detail->hash_lock);
254 rv = cache_is_valid(h);
255 if (rv == -EAGAIN) {
256 set_bit(CACHE_NEGATIVE, &h->flags);
257 cache_fresh_locked(h, seconds_since_boot()+CACHE_NEW_EXPIRY,
258 detail);
259 rv = -ENOENT;
260 }
261 spin_unlock(&detail->hash_lock);
262 cache_fresh_unlocked(h, detail);
263 return rv;
264}
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280int cache_check(struct cache_detail *detail,
281 struct cache_head *h, struct cache_req *rqstp)
282{
283 int rv;
284 long refresh_age, age;
285
286
287 rv = cache_is_valid(h);
288
289
290 refresh_age = (h->expiry_time - h->last_refresh);
291 age = seconds_since_boot() - h->last_refresh;
292
293 if (rqstp == NULL) {
294 if (rv == -EAGAIN)
295 rv = -ENOENT;
296 } else if (rv == -EAGAIN ||
297 (h->expiry_time != 0 && age > refresh_age/2)) {
298 dprintk("RPC: Want update, refage=%ld, age=%ld\n",
299 refresh_age, age);
300 if (!test_and_set_bit(CACHE_PENDING, &h->flags)) {
301 switch (cache_make_upcall(detail, h)) {
302 case -EINVAL:
303 rv = try_to_negate_entry(detail, h);
304 break;
305 case -EAGAIN:
306 cache_fresh_unlocked(h, detail);
307 break;
308 }
309 }
310 }
311
312 if (rv == -EAGAIN) {
313 if (!cache_defer_req(rqstp, h)) {
314
315
316
317
318 rv = cache_is_valid(h);
319 if (rv == -EAGAIN)
320 rv = -ETIMEDOUT;
321 }
322 }
323 if (rv)
324 cache_put(h, detail);
325 return rv;
326}
327EXPORT_SYMBOL_GPL(cache_check);
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361static LIST_HEAD(cache_list);
362static DEFINE_SPINLOCK(cache_list_lock);
363static struct cache_detail *current_detail;
364static int current_index;
365
366static void do_cache_clean(struct work_struct *work);
367static struct delayed_work cache_cleaner;
368
369void sunrpc_init_cache_detail(struct cache_detail *cd)
370{
371 spin_lock_init(&cd->hash_lock);
372 INIT_LIST_HEAD(&cd->queue);
373 spin_lock(&cache_list_lock);
374 cd->nextcheck = 0;
375 cd->entries = 0;
376 atomic_set(&cd->readers, 0);
377 cd->last_close = 0;
378 cd->last_warn = -1;
379 list_add(&cd->others, &cache_list);
380 spin_unlock(&cache_list_lock);
381
382
383 queue_delayed_work(system_power_efficient_wq, &cache_cleaner, 0);
384}
385EXPORT_SYMBOL_GPL(sunrpc_init_cache_detail);
386
387void sunrpc_destroy_cache_detail(struct cache_detail *cd)
388{
389 cache_purge(cd);
390 spin_lock(&cache_list_lock);
391 spin_lock(&cd->hash_lock);
392 if (current_detail == cd)
393 current_detail = NULL;
394 list_del_init(&cd->others);
395 spin_unlock(&cd->hash_lock);
396 spin_unlock(&cache_list_lock);
397 if (list_empty(&cache_list)) {
398
399 cancel_delayed_work_sync(&cache_cleaner);
400 }
401}
402EXPORT_SYMBOL_GPL(sunrpc_destroy_cache_detail);
403
404
405
406
407
408
409
410static int cache_clean(void)
411{
412 int rv = 0;
413 struct list_head *next;
414
415 spin_lock(&cache_list_lock);
416
417
418 while (current_detail == NULL ||
419 current_index >= current_detail->hash_size) {
420 if (current_detail)
421 next = current_detail->others.next;
422 else
423 next = cache_list.next;
424 if (next == &cache_list) {
425 current_detail = NULL;
426 spin_unlock(&cache_list_lock);
427 return -1;
428 }
429 current_detail = list_entry(next, struct cache_detail, others);
430 if (current_detail->nextcheck > seconds_since_boot())
431 current_index = current_detail->hash_size;
432 else {
433 current_index = 0;
434 current_detail->nextcheck = seconds_since_boot()+30*60;
435 }
436 }
437
438
439 while (current_detail &&
440 current_index < current_detail->hash_size &&
441 hlist_empty(¤t_detail->hash_table[current_index]))
442 current_index++;
443
444
445
446 if (current_detail && current_index < current_detail->hash_size) {
447 struct cache_head *ch = NULL;
448 struct cache_detail *d;
449 struct hlist_head *head;
450 struct hlist_node *tmp;
451
452 spin_lock(¤t_detail->hash_lock);
453
454
455
456 head = ¤t_detail->hash_table[current_index];
457 hlist_for_each_entry_safe(ch, tmp, head, cache_list) {
458 if (current_detail->nextcheck > ch->expiry_time)
459 current_detail->nextcheck = ch->expiry_time+1;
460 if (!cache_is_expired(current_detail, ch))
461 continue;
462
463 hlist_del_init_rcu(&ch->cache_list);
464 current_detail->entries--;
465 rv = 1;
466 break;
467 }
468
469 spin_unlock(¤t_detail->hash_lock);
470 d = current_detail;
471 if (!ch)
472 current_index ++;
473 spin_unlock(&cache_list_lock);
474 if (ch) {
475 set_bit(CACHE_CLEANED, &ch->flags);
476 cache_fresh_unlocked(ch, d);
477 cache_put(ch, d);
478 }
479 } else
480 spin_unlock(&cache_list_lock);
481
482 return rv;
483}
484
485
486
487
488static void do_cache_clean(struct work_struct *work)
489{
490 int delay = 5;
491 if (cache_clean() == -1)
492 delay = round_jiffies_relative(30*HZ);
493
494 if (list_empty(&cache_list))
495 delay = 0;
496
497 if (delay)
498 queue_delayed_work(system_power_efficient_wq,
499 &cache_cleaner, delay);
500}
501
502
503
504
505
506
507
508void cache_flush(void)
509{
510 while (cache_clean() != -1)
511 cond_resched();
512 while (cache_clean() != -1)
513 cond_resched();
514}
515EXPORT_SYMBOL_GPL(cache_flush);
516
517void cache_purge(struct cache_detail *detail)
518{
519 struct cache_head *ch = NULL;
520 struct hlist_head *head = NULL;
521 struct hlist_node *tmp = NULL;
522 int i = 0;
523
524 spin_lock(&detail->hash_lock);
525 if (!detail->entries) {
526 spin_unlock(&detail->hash_lock);
527 return;
528 }
529
530 dprintk("RPC: %d entries in %s cache\n", detail->entries, detail->name);
531 for (i = 0; i < detail->hash_size; i++) {
532 head = &detail->hash_table[i];
533 hlist_for_each_entry_safe(ch, tmp, head, cache_list) {
534 hlist_del_init_rcu(&ch->cache_list);
535 detail->entries--;
536
537 set_bit(CACHE_CLEANED, &ch->flags);
538 spin_unlock(&detail->hash_lock);
539 cache_fresh_unlocked(ch, detail);
540 cache_put(ch, detail);
541 spin_lock(&detail->hash_lock);
542 }
543 }
544 spin_unlock(&detail->hash_lock);
545}
546EXPORT_SYMBOL_GPL(cache_purge);
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564#define DFR_HASHSIZE (PAGE_SIZE/sizeof(struct list_head))
565#define DFR_HASH(item) ((((long)item)>>4 ^ (((long)item)>>13)) % DFR_HASHSIZE)
566
567#define DFR_MAX 300
568
569static DEFINE_SPINLOCK(cache_defer_lock);
570static LIST_HEAD(cache_defer_list);
571static struct hlist_head cache_defer_hash[DFR_HASHSIZE];
572static int cache_defer_cnt;
573
574static void __unhash_deferred_req(struct cache_deferred_req *dreq)
575{
576 hlist_del_init(&dreq->hash);
577 if (!list_empty(&dreq->recent)) {
578 list_del_init(&dreq->recent);
579 cache_defer_cnt--;
580 }
581}
582
583static void __hash_deferred_req(struct cache_deferred_req *dreq, struct cache_head *item)
584{
585 int hash = DFR_HASH(item);
586
587 INIT_LIST_HEAD(&dreq->recent);
588 hlist_add_head(&dreq->hash, &cache_defer_hash[hash]);
589}
590
591static void setup_deferral(struct cache_deferred_req *dreq,
592 struct cache_head *item,
593 int count_me)
594{
595
596 dreq->item = item;
597
598 spin_lock(&cache_defer_lock);
599
600 __hash_deferred_req(dreq, item);
601
602 if (count_me) {
603 cache_defer_cnt++;
604 list_add(&dreq->recent, &cache_defer_list);
605 }
606
607 spin_unlock(&cache_defer_lock);
608
609}
610
611struct thread_deferred_req {
612 struct cache_deferred_req handle;
613 struct completion completion;
614};
615
616static void cache_restart_thread(struct cache_deferred_req *dreq, int too_many)
617{
618 struct thread_deferred_req *dr =
619 container_of(dreq, struct thread_deferred_req, handle);
620 complete(&dr->completion);
621}
622
623static void cache_wait_req(struct cache_req *req, struct cache_head *item)
624{
625 struct thread_deferred_req sleeper;
626 struct cache_deferred_req *dreq = &sleeper.handle;
627
628 sleeper.completion = COMPLETION_INITIALIZER_ONSTACK(sleeper.completion);
629 dreq->revisit = cache_restart_thread;
630
631 setup_deferral(dreq, item, 0);
632
633 if (!test_bit(CACHE_PENDING, &item->flags) ||
634 wait_for_completion_interruptible_timeout(
635 &sleeper.completion, req->thread_wait) <= 0) {
636
637
638
639 spin_lock(&cache_defer_lock);
640 if (!hlist_unhashed(&sleeper.handle.hash)) {
641 __unhash_deferred_req(&sleeper.handle);
642 spin_unlock(&cache_defer_lock);
643 } else {
644
645
646
647
648
649 spin_unlock(&cache_defer_lock);
650 wait_for_completion(&sleeper.completion);
651 }
652 }
653}
654
655static void cache_limit_defers(void)
656{
657
658
659
660 struct cache_deferred_req *discard = NULL;
661
662 if (cache_defer_cnt <= DFR_MAX)
663 return;
664
665 spin_lock(&cache_defer_lock);
666
667
668 if (cache_defer_cnt > DFR_MAX) {
669 if (prandom_u32() & 1)
670 discard = list_entry(cache_defer_list.next,
671 struct cache_deferred_req, recent);
672 else
673 discard = list_entry(cache_defer_list.prev,
674 struct cache_deferred_req, recent);
675 __unhash_deferred_req(discard);
676 }
677 spin_unlock(&cache_defer_lock);
678 if (discard)
679 discard->revisit(discard, 1);
680}
681
682
683static bool cache_defer_req(struct cache_req *req, struct cache_head *item)
684{
685 struct cache_deferred_req *dreq;
686
687 if (req->thread_wait) {
688 cache_wait_req(req, item);
689 if (!test_bit(CACHE_PENDING, &item->flags))
690 return false;
691 }
692 dreq = req->defer(req);
693 if (dreq == NULL)
694 return false;
695 setup_deferral(dreq, item, 1);
696 if (!test_bit(CACHE_PENDING, &item->flags))
697
698
699
700 cache_revisit_request(item);
701
702 cache_limit_defers();
703 return true;
704}
705
706static void cache_revisit_request(struct cache_head *item)
707{
708 struct cache_deferred_req *dreq;
709 struct list_head pending;
710 struct hlist_node *tmp;
711 int hash = DFR_HASH(item);
712
713 INIT_LIST_HEAD(&pending);
714 spin_lock(&cache_defer_lock);
715
716 hlist_for_each_entry_safe(dreq, tmp, &cache_defer_hash[hash], hash)
717 if (dreq->item == item) {
718 __unhash_deferred_req(dreq);
719 list_add(&dreq->recent, &pending);
720 }
721
722 spin_unlock(&cache_defer_lock);
723
724 while (!list_empty(&pending)) {
725 dreq = list_entry(pending.next, struct cache_deferred_req, recent);
726 list_del_init(&dreq->recent);
727 dreq->revisit(dreq, 0);
728 }
729}
730
731void cache_clean_deferred(void *owner)
732{
733 struct cache_deferred_req *dreq, *tmp;
734 struct list_head pending;
735
736
737 INIT_LIST_HEAD(&pending);
738 spin_lock(&cache_defer_lock);
739
740 list_for_each_entry_safe(dreq, tmp, &cache_defer_list, recent) {
741 if (dreq->owner == owner) {
742 __unhash_deferred_req(dreq);
743 list_add(&dreq->recent, &pending);
744 }
745 }
746 spin_unlock(&cache_defer_lock);
747
748 while (!list_empty(&pending)) {
749 dreq = list_entry(pending.next, struct cache_deferred_req, recent);
750 list_del_init(&dreq->recent);
751 dreq->revisit(dreq, 1);
752 }
753}
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771static DEFINE_SPINLOCK(queue_lock);
772static DEFINE_MUTEX(queue_io_mutex);
773
774struct cache_queue {
775 struct list_head list;
776 int reader;
777};
778struct cache_request {
779 struct cache_queue q;
780 struct cache_head *item;
781 char * buf;
782 int len;
783 int readers;
784};
785struct cache_reader {
786 struct cache_queue q;
787 int offset;
788};
789
790static int cache_request(struct cache_detail *detail,
791 struct cache_request *crq)
792{
793 char *bp = crq->buf;
794 int len = PAGE_SIZE;
795
796 detail->cache_request(detail, crq->item, &bp, &len);
797 if (len < 0)
798 return -EAGAIN;
799 return PAGE_SIZE - len;
800}
801
802static ssize_t cache_read(struct file *filp, char __user *buf, size_t count,
803 loff_t *ppos, struct cache_detail *cd)
804{
805 struct cache_reader *rp = filp->private_data;
806 struct cache_request *rq;
807 struct inode *inode = file_inode(filp);
808 int err;
809
810 if (count == 0)
811 return 0;
812
813 inode_lock(inode);
814
815 again:
816 spin_lock(&queue_lock);
817
818 while (rp->q.list.next != &cd->queue &&
819 list_entry(rp->q.list.next, struct cache_queue, list)
820 ->reader) {
821 struct list_head *next = rp->q.list.next;
822 list_move(&rp->q.list, next);
823 }
824 if (rp->q.list.next == &cd->queue) {
825 spin_unlock(&queue_lock);
826 inode_unlock(inode);
827 WARN_ON_ONCE(rp->offset);
828 return 0;
829 }
830 rq = container_of(rp->q.list.next, struct cache_request, q.list);
831 WARN_ON_ONCE(rq->q.reader);
832 if (rp->offset == 0)
833 rq->readers++;
834 spin_unlock(&queue_lock);
835
836 if (rq->len == 0) {
837 err = cache_request(cd, rq);
838 if (err < 0)
839 goto out;
840 rq->len = err;
841 }
842
843 if (rp->offset == 0 && !test_bit(CACHE_PENDING, &rq->item->flags)) {
844 err = -EAGAIN;
845 spin_lock(&queue_lock);
846 list_move(&rp->q.list, &rq->q.list);
847 spin_unlock(&queue_lock);
848 } else {
849 if (rp->offset + count > rq->len)
850 count = rq->len - rp->offset;
851 err = -EFAULT;
852 if (copy_to_user(buf, rq->buf + rp->offset, count))
853 goto out;
854 rp->offset += count;
855 if (rp->offset >= rq->len) {
856 rp->offset = 0;
857 spin_lock(&queue_lock);
858 list_move(&rp->q.list, &rq->q.list);
859 spin_unlock(&queue_lock);
860 }
861 err = 0;
862 }
863 out:
864 if (rp->offset == 0) {
865
866 spin_lock(&queue_lock);
867 rq->readers--;
868 if (rq->readers == 0 &&
869 !test_bit(CACHE_PENDING, &rq->item->flags)) {
870 list_del(&rq->q.list);
871 spin_unlock(&queue_lock);
872 cache_put(rq->item, cd);
873 kfree(rq->buf);
874 kfree(rq);
875 } else
876 spin_unlock(&queue_lock);
877 }
878 if (err == -EAGAIN)
879 goto again;
880 inode_unlock(inode);
881 return err ? err : count;
882}
883
884static ssize_t cache_do_downcall(char *kaddr, const char __user *buf,
885 size_t count, struct cache_detail *cd)
886{
887 ssize_t ret;
888
889 if (count == 0)
890 return -EINVAL;
891 if (copy_from_user(kaddr, buf, count))
892 return -EFAULT;
893 kaddr[count] = '\0';
894 ret = cd->cache_parse(cd, kaddr, count);
895 if (!ret)
896 ret = count;
897 return ret;
898}
899
900static ssize_t cache_slow_downcall(const char __user *buf,
901 size_t count, struct cache_detail *cd)
902{
903 static char write_buf[8192];
904 ssize_t ret = -EINVAL;
905
906 if (count >= sizeof(write_buf))
907 goto out;
908 mutex_lock(&queue_io_mutex);
909 ret = cache_do_downcall(write_buf, buf, count, cd);
910 mutex_unlock(&queue_io_mutex);
911out:
912 return ret;
913}
914
915static ssize_t cache_downcall(struct address_space *mapping,
916 const char __user *buf,
917 size_t count, struct cache_detail *cd)
918{
919 struct page *page;
920 char *kaddr;
921 ssize_t ret = -ENOMEM;
922
923 if (count >= PAGE_SIZE)
924 goto out_slow;
925
926 page = find_or_create_page(mapping, 0, GFP_KERNEL);
927 if (!page)
928 goto out_slow;
929
930 kaddr = kmap(page);
931 ret = cache_do_downcall(kaddr, buf, count, cd);
932 kunmap(page);
933 unlock_page(page);
934 put_page(page);
935 return ret;
936out_slow:
937 return cache_slow_downcall(buf, count, cd);
938}
939
940static ssize_t cache_write(struct file *filp, const char __user *buf,
941 size_t count, loff_t *ppos,
942 struct cache_detail *cd)
943{
944 struct address_space *mapping = filp->f_mapping;
945 struct inode *inode = file_inode(filp);
946 ssize_t ret = -EINVAL;
947
948 if (!cd->cache_parse)
949 goto out;
950
951 inode_lock(inode);
952 ret = cache_downcall(mapping, buf, count, cd);
953 inode_unlock(inode);
954out:
955 return ret;
956}
957
958static DECLARE_WAIT_QUEUE_HEAD(queue_wait);
959
960static __poll_t cache_poll(struct file *filp, poll_table *wait,
961 struct cache_detail *cd)
962{
963 __poll_t mask;
964 struct cache_reader *rp = filp->private_data;
965 struct cache_queue *cq;
966
967 poll_wait(filp, &queue_wait, wait);
968
969
970 mask = EPOLLOUT | EPOLLWRNORM;
971
972 if (!rp)
973 return mask;
974
975 spin_lock(&queue_lock);
976
977 for (cq= &rp->q; &cq->list != &cd->queue;
978 cq = list_entry(cq->list.next, struct cache_queue, list))
979 if (!cq->reader) {
980 mask |= EPOLLIN | EPOLLRDNORM;
981 break;
982 }
983 spin_unlock(&queue_lock);
984 return mask;
985}
986
987static int cache_ioctl(struct inode *ino, struct file *filp,
988 unsigned int cmd, unsigned long arg,
989 struct cache_detail *cd)
990{
991 int len = 0;
992 struct cache_reader *rp = filp->private_data;
993 struct cache_queue *cq;
994
995 if (cmd != FIONREAD || !rp)
996 return -EINVAL;
997
998 spin_lock(&queue_lock);
999
1000
1001
1002
1003 for (cq= &rp->q; &cq->list != &cd->queue;
1004 cq = list_entry(cq->list.next, struct cache_queue, list))
1005 if (!cq->reader) {
1006 struct cache_request *cr =
1007 container_of(cq, struct cache_request, q);
1008 len = cr->len - rp->offset;
1009 break;
1010 }
1011 spin_unlock(&queue_lock);
1012
1013 return put_user(len, (int __user *)arg);
1014}
1015
1016static int cache_open(struct inode *inode, struct file *filp,
1017 struct cache_detail *cd)
1018{
1019 struct cache_reader *rp = NULL;
1020
1021 if (!cd || !try_module_get(cd->owner))
1022 return -EACCES;
1023 nonseekable_open(inode, filp);
1024 if (filp->f_mode & FMODE_READ) {
1025 rp = kmalloc(sizeof(*rp), GFP_KERNEL);
1026 if (!rp) {
1027 module_put(cd->owner);
1028 return -ENOMEM;
1029 }
1030 rp->offset = 0;
1031 rp->q.reader = 1;
1032 atomic_inc(&cd->readers);
1033 spin_lock(&queue_lock);
1034 list_add(&rp->q.list, &cd->queue);
1035 spin_unlock(&queue_lock);
1036 }
1037 filp->private_data = rp;
1038 return 0;
1039}
1040
1041static int cache_release(struct inode *inode, struct file *filp,
1042 struct cache_detail *cd)
1043{
1044 struct cache_reader *rp = filp->private_data;
1045
1046 if (rp) {
1047 spin_lock(&queue_lock);
1048 if (rp->offset) {
1049 struct cache_queue *cq;
1050 for (cq= &rp->q; &cq->list != &cd->queue;
1051 cq = list_entry(cq->list.next, struct cache_queue, list))
1052 if (!cq->reader) {
1053 container_of(cq, struct cache_request, q)
1054 ->readers--;
1055 break;
1056 }
1057 rp->offset = 0;
1058 }
1059 list_del(&rp->q.list);
1060 spin_unlock(&queue_lock);
1061
1062 filp->private_data = NULL;
1063 kfree(rp);
1064
1065 cd->last_close = seconds_since_boot();
1066 atomic_dec(&cd->readers);
1067 }
1068 module_put(cd->owner);
1069 return 0;
1070}
1071
1072
1073
1074static void cache_dequeue(struct cache_detail *detail, struct cache_head *ch)
1075{
1076 struct cache_queue *cq, *tmp;
1077 struct cache_request *cr;
1078 struct list_head dequeued;
1079
1080 INIT_LIST_HEAD(&dequeued);
1081 spin_lock(&queue_lock);
1082 list_for_each_entry_safe(cq, tmp, &detail->queue, list)
1083 if (!cq->reader) {
1084 cr = container_of(cq, struct cache_request, q);
1085 if (cr->item != ch)
1086 continue;
1087 if (test_bit(CACHE_PENDING, &ch->flags))
1088
1089 break;
1090 if (cr->readers != 0)
1091 continue;
1092 list_move(&cr->q.list, &dequeued);
1093 }
1094 spin_unlock(&queue_lock);
1095 while (!list_empty(&dequeued)) {
1096 cr = list_entry(dequeued.next, struct cache_request, q.list);
1097 list_del(&cr->q.list);
1098 cache_put(cr->item, detail);
1099 kfree(cr->buf);
1100 kfree(cr);
1101 }
1102}
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113void qword_add(char **bpp, int *lp, char *str)
1114{
1115 char *bp = *bpp;
1116 int len = *lp;
1117 int ret;
1118
1119 if (len < 0) return;
1120
1121 ret = string_escape_str(str, bp, len, ESCAPE_OCTAL, "\\ \n\t");
1122 if (ret >= len) {
1123 bp += len;
1124 len = -1;
1125 } else {
1126 bp += ret;
1127 len -= ret;
1128 *bp++ = ' ';
1129 len--;
1130 }
1131 *bpp = bp;
1132 *lp = len;
1133}
1134EXPORT_SYMBOL_GPL(qword_add);
1135
1136void qword_addhex(char **bpp, int *lp, char *buf, int blen)
1137{
1138 char *bp = *bpp;
1139 int len = *lp;
1140
1141 if (len < 0) return;
1142
1143 if (len > 2) {
1144 *bp++ = '\\';
1145 *bp++ = 'x';
1146 len -= 2;
1147 while (blen && len >= 2) {
1148 bp = hex_byte_pack(bp, *buf++);
1149 len -= 2;
1150 blen--;
1151 }
1152 }
1153 if (blen || len<1) len = -1;
1154 else {
1155 *bp++ = ' ';
1156 len--;
1157 }
1158 *bpp = bp;
1159 *lp = len;
1160}
1161EXPORT_SYMBOL_GPL(qword_addhex);
1162
1163static void warn_no_listener(struct cache_detail *detail)
1164{
1165 if (detail->last_warn != detail->last_close) {
1166 detail->last_warn = detail->last_close;
1167 if (detail->warn_no_listener)
1168 detail->warn_no_listener(detail, detail->last_close != 0);
1169 }
1170}
1171
1172static bool cache_listeners_exist(struct cache_detail *detail)
1173{
1174 if (atomic_read(&detail->readers))
1175 return true;
1176 if (detail->last_close == 0)
1177
1178 return false;
1179 if (detail->last_close < seconds_since_boot() - 30)
1180
1181
1182
1183
1184
1185 return false;
1186 return true;
1187}
1188
1189
1190
1191
1192
1193
1194
1195int sunrpc_cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h)
1196{
1197
1198 char *buf;
1199 struct cache_request *crq;
1200 int ret = 0;
1201
1202 if (!detail->cache_request)
1203 return -EINVAL;
1204
1205 if (!cache_listeners_exist(detail)) {
1206 warn_no_listener(detail);
1207 return -EINVAL;
1208 }
1209 if (test_bit(CACHE_CLEANED, &h->flags))
1210
1211 return -EAGAIN;
1212
1213 buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
1214 if (!buf)
1215 return -EAGAIN;
1216
1217 crq = kmalloc(sizeof (*crq), GFP_KERNEL);
1218 if (!crq) {
1219 kfree(buf);
1220 return -EAGAIN;
1221 }
1222
1223 crq->q.reader = 0;
1224 crq->buf = buf;
1225 crq->len = 0;
1226 crq->readers = 0;
1227 spin_lock(&queue_lock);
1228 if (test_bit(CACHE_PENDING, &h->flags)) {
1229 crq->item = cache_get(h);
1230 list_add_tail(&crq->q.list, &detail->queue);
1231 } else
1232
1233 ret = -EAGAIN;
1234 spin_unlock(&queue_lock);
1235 wake_up(&queue_wait);
1236 if (ret == -EAGAIN) {
1237 kfree(buf);
1238 kfree(crq);
1239 }
1240 return ret;
1241}
1242EXPORT_SYMBOL_GPL(sunrpc_cache_pipe_upcall);
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256int qword_get(char **bpp, char *dest, int bufsize)
1257{
1258
1259 char *bp = *bpp;
1260 int len = 0;
1261
1262 while (*bp == ' ') bp++;
1263
1264 if (bp[0] == '\\' && bp[1] == 'x') {
1265
1266 bp += 2;
1267 while (len < bufsize - 1) {
1268 int h, l;
1269
1270 h = hex_to_bin(bp[0]);
1271 if (h < 0)
1272 break;
1273
1274 l = hex_to_bin(bp[1]);
1275 if (l < 0)
1276 break;
1277
1278 *dest++ = (h << 4) | l;
1279 bp += 2;
1280 len++;
1281 }
1282 } else {
1283
1284 while (*bp != ' ' && *bp != '\n' && *bp && len < bufsize-1) {
1285 if (*bp == '\\' &&
1286 isodigit(bp[1]) && (bp[1] <= '3') &&
1287 isodigit(bp[2]) &&
1288 isodigit(bp[3])) {
1289 int byte = (*++bp -'0');
1290 bp++;
1291 byte = (byte << 3) | (*bp++ - '0');
1292 byte = (byte << 3) | (*bp++ - '0');
1293 *dest++ = byte;
1294 len++;
1295 } else {
1296 *dest++ = *bp++;
1297 len++;
1298 }
1299 }
1300 }
1301
1302 if (*bp != ' ' && *bp != '\n' && *bp != '\0')
1303 return -1;
1304 while (*bp == ' ') bp++;
1305 *bpp = bp;
1306 *dest = '\0';
1307 return len;
1308}
1309EXPORT_SYMBOL_GPL(qword_get);
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319static void *__cache_seq_start(struct seq_file *m, loff_t *pos)
1320{
1321 loff_t n = *pos;
1322 unsigned int hash, entry;
1323 struct cache_head *ch;
1324 struct cache_detail *cd = m->private;
1325
1326 if (!n--)
1327 return SEQ_START_TOKEN;
1328 hash = n >> 32;
1329 entry = n & ((1LL<<32) - 1);
1330
1331 hlist_for_each_entry_rcu(ch, &cd->hash_table[hash], cache_list)
1332 if (!entry--)
1333 return ch;
1334 n &= ~((1LL<<32) - 1);
1335 do {
1336 hash++;
1337 n += 1LL<<32;
1338 } while(hash < cd->hash_size &&
1339 hlist_empty(&cd->hash_table[hash]));
1340 if (hash >= cd->hash_size)
1341 return NULL;
1342 *pos = n+1;
1343 return hlist_entry_safe(rcu_dereference_raw(
1344 hlist_first_rcu(&cd->hash_table[hash])),
1345 struct cache_head, cache_list);
1346}
1347
1348static void *cache_seq_next(struct seq_file *m, void *p, loff_t *pos)
1349{
1350 struct cache_head *ch = p;
1351 int hash = (*pos >> 32);
1352 struct cache_detail *cd = m->private;
1353
1354 if (p == SEQ_START_TOKEN)
1355 hash = 0;
1356 else if (ch->cache_list.next == NULL) {
1357 hash++;
1358 *pos += 1LL<<32;
1359 } else {
1360 ++*pos;
1361 return hlist_entry_safe(rcu_dereference_raw(
1362 hlist_next_rcu(&ch->cache_list)),
1363 struct cache_head, cache_list);
1364 }
1365 *pos &= ~((1LL<<32) - 1);
1366 while (hash < cd->hash_size &&
1367 hlist_empty(&cd->hash_table[hash])) {
1368 hash++;
1369 *pos += 1LL<<32;
1370 }
1371 if (hash >= cd->hash_size)
1372 return NULL;
1373 ++*pos;
1374 return hlist_entry_safe(rcu_dereference_raw(
1375 hlist_first_rcu(&cd->hash_table[hash])),
1376 struct cache_head, cache_list);
1377}
1378EXPORT_SYMBOL_GPL(cache_seq_next);
1379
1380void *cache_seq_start_rcu(struct seq_file *m, loff_t *pos)
1381 __acquires(RCU)
1382{
1383 rcu_read_lock();
1384 return __cache_seq_start(m, pos);
1385}
1386EXPORT_SYMBOL_GPL(cache_seq_start_rcu);
1387
1388void *cache_seq_next_rcu(struct seq_file *file, void *p, loff_t *pos)
1389{
1390 return cache_seq_next(file, p, pos);
1391}
1392EXPORT_SYMBOL_GPL(cache_seq_next_rcu);
1393
1394void cache_seq_stop_rcu(struct seq_file *m, void *p)
1395 __releases(RCU)
1396{
1397 rcu_read_unlock();
1398}
1399EXPORT_SYMBOL_GPL(cache_seq_stop_rcu);
1400
1401static int c_show(struct seq_file *m, void *p)
1402{
1403 struct cache_head *cp = p;
1404 struct cache_detail *cd = m->private;
1405
1406 if (p == SEQ_START_TOKEN)
1407 return cd->cache_show(m, cd, NULL);
1408
1409 ifdebug(CACHE)
1410 seq_printf(m, "# expiry=%ld refcnt=%d flags=%lx\n",
1411 convert_to_wallclock(cp->expiry_time),
1412 kref_read(&cp->ref), cp->flags);
1413 cache_get(cp);
1414 if (cache_check(cd, cp, NULL))
1415
1416 seq_printf(m, "# ");
1417 else {
1418 if (cache_is_expired(cd, cp))
1419 seq_printf(m, "# ");
1420 cache_put(cp, cd);
1421 }
1422
1423 return cd->cache_show(m, cd, cp);
1424}
1425
1426static const struct seq_operations cache_content_op = {
1427 .start = cache_seq_start_rcu,
1428 .next = cache_seq_next_rcu,
1429 .stop = cache_seq_stop_rcu,
1430 .show = c_show,
1431};
1432
1433static int content_open(struct inode *inode, struct file *file,
1434 struct cache_detail *cd)
1435{
1436 struct seq_file *seq;
1437 int err;
1438
1439 if (!cd || !try_module_get(cd->owner))
1440 return -EACCES;
1441
1442 err = seq_open(file, &cache_content_op);
1443 if (err) {
1444 module_put(cd->owner);
1445 return err;
1446 }
1447
1448 seq = file->private_data;
1449 seq->private = cd;
1450 return 0;
1451}
1452
1453static int content_release(struct inode *inode, struct file *file,
1454 struct cache_detail *cd)
1455{
1456 int ret = seq_release(inode, file);
1457 module_put(cd->owner);
1458 return ret;
1459}
1460
1461static int open_flush(struct inode *inode, struct file *file,
1462 struct cache_detail *cd)
1463{
1464 if (!cd || !try_module_get(cd->owner))
1465 return -EACCES;
1466 return nonseekable_open(inode, file);
1467}
1468
1469static int release_flush(struct inode *inode, struct file *file,
1470 struct cache_detail *cd)
1471{
1472 module_put(cd->owner);
1473 return 0;
1474}
1475
1476static ssize_t read_flush(struct file *file, char __user *buf,
1477 size_t count, loff_t *ppos,
1478 struct cache_detail *cd)
1479{
1480 char tbuf[22];
1481 size_t len;
1482
1483 len = snprintf(tbuf, sizeof(tbuf), "%lu\n",
1484 convert_to_wallclock(cd->flush_time));
1485 return simple_read_from_buffer(buf, count, ppos, tbuf, len);
1486}
1487
1488static ssize_t write_flush(struct file *file, const char __user *buf,
1489 size_t count, loff_t *ppos,
1490 struct cache_detail *cd)
1491{
1492 char tbuf[20];
1493 char *ep;
1494 time_t now;
1495
1496 if (*ppos || count > sizeof(tbuf)-1)
1497 return -EINVAL;
1498 if (copy_from_user(tbuf, buf, count))
1499 return -EFAULT;
1500 tbuf[count] = 0;
1501 simple_strtoul(tbuf, &ep, 0);
1502 if (*ep && *ep != '\n')
1503 return -EINVAL;
1504
1505
1506
1507
1508
1509 now = seconds_since_boot();
1510
1511
1512
1513
1514
1515
1516
1517 if (cd->flush_time >= now)
1518 now = cd->flush_time + 1;
1519
1520 cd->flush_time = now;
1521 cd->nextcheck = now;
1522 cache_flush();
1523
1524 *ppos += count;
1525 return count;
1526}
1527
1528static ssize_t cache_read_procfs(struct file *filp, char __user *buf,
1529 size_t count, loff_t *ppos)
1530{
1531 struct cache_detail *cd = PDE_DATA(file_inode(filp));
1532
1533 return cache_read(filp, buf, count, ppos, cd);
1534}
1535
1536static ssize_t cache_write_procfs(struct file *filp, const char __user *buf,
1537 size_t count, loff_t *ppos)
1538{
1539 struct cache_detail *cd = PDE_DATA(file_inode(filp));
1540
1541 return cache_write(filp, buf, count, ppos, cd);
1542}
1543
1544static __poll_t cache_poll_procfs(struct file *filp, poll_table *wait)
1545{
1546 struct cache_detail *cd = PDE_DATA(file_inode(filp));
1547
1548 return cache_poll(filp, wait, cd);
1549}
1550
1551static long cache_ioctl_procfs(struct file *filp,
1552 unsigned int cmd, unsigned long arg)
1553{
1554 struct inode *inode = file_inode(filp);
1555 struct cache_detail *cd = PDE_DATA(inode);
1556
1557 return cache_ioctl(inode, filp, cmd, arg, cd);
1558}
1559
1560static int cache_open_procfs(struct inode *inode, struct file *filp)
1561{
1562 struct cache_detail *cd = PDE_DATA(inode);
1563
1564 return cache_open(inode, filp, cd);
1565}
1566
1567static int cache_release_procfs(struct inode *inode, struct file *filp)
1568{
1569 struct cache_detail *cd = PDE_DATA(inode);
1570
1571 return cache_release(inode, filp, cd);
1572}
1573
1574static const struct file_operations cache_file_operations_procfs = {
1575 .owner = THIS_MODULE,
1576 .llseek = no_llseek,
1577 .read = cache_read_procfs,
1578 .write = cache_write_procfs,
1579 .poll = cache_poll_procfs,
1580 .unlocked_ioctl = cache_ioctl_procfs,
1581 .open = cache_open_procfs,
1582 .release = cache_release_procfs,
1583};
1584
1585static int content_open_procfs(struct inode *inode, struct file *filp)
1586{
1587 struct cache_detail *cd = PDE_DATA(inode);
1588
1589 return content_open(inode, filp, cd);
1590}
1591
1592static int content_release_procfs(struct inode *inode, struct file *filp)
1593{
1594 struct cache_detail *cd = PDE_DATA(inode);
1595
1596 return content_release(inode, filp, cd);
1597}
1598
1599static const struct file_operations content_file_operations_procfs = {
1600 .open = content_open_procfs,
1601 .read = seq_read,
1602 .llseek = seq_lseek,
1603 .release = content_release_procfs,
1604};
1605
1606static int open_flush_procfs(struct inode *inode, struct file *filp)
1607{
1608 struct cache_detail *cd = PDE_DATA(inode);
1609
1610 return open_flush(inode, filp, cd);
1611}
1612
1613static int release_flush_procfs(struct inode *inode, struct file *filp)
1614{
1615 struct cache_detail *cd = PDE_DATA(inode);
1616
1617 return release_flush(inode, filp, cd);
1618}
1619
1620static ssize_t read_flush_procfs(struct file *filp, char __user *buf,
1621 size_t count, loff_t *ppos)
1622{
1623 struct cache_detail *cd = PDE_DATA(file_inode(filp));
1624
1625 return read_flush(filp, buf, count, ppos, cd);
1626}
1627
1628static ssize_t write_flush_procfs(struct file *filp,
1629 const char __user *buf,
1630 size_t count, loff_t *ppos)
1631{
1632 struct cache_detail *cd = PDE_DATA(file_inode(filp));
1633
1634 return write_flush(filp, buf, count, ppos, cd);
1635}
1636
1637static const struct file_operations cache_flush_operations_procfs = {
1638 .open = open_flush_procfs,
1639 .read = read_flush_procfs,
1640 .write = write_flush_procfs,
1641 .release = release_flush_procfs,
1642 .llseek = no_llseek,
1643};
1644
1645static void remove_cache_proc_entries(struct cache_detail *cd)
1646{
1647 if (cd->procfs) {
1648 proc_remove(cd->procfs);
1649 cd->procfs = NULL;
1650 }
1651}
1652
1653#ifdef CONFIG_PROC_FS
1654static int create_cache_proc_entries(struct cache_detail *cd, struct net *net)
1655{
1656 struct proc_dir_entry *p;
1657 struct sunrpc_net *sn;
1658
1659 sn = net_generic(net, sunrpc_net_id);
1660 cd->procfs = proc_mkdir(cd->name, sn->proc_net_rpc);
1661 if (cd->procfs == NULL)
1662 goto out_nomem;
1663
1664 p = proc_create_data("flush", S_IFREG | 0600,
1665 cd->procfs, &cache_flush_operations_procfs, cd);
1666 if (p == NULL)
1667 goto out_nomem;
1668
1669 if (cd->cache_request || cd->cache_parse) {
1670 p = proc_create_data("channel", S_IFREG | 0600, cd->procfs,
1671 &cache_file_operations_procfs, cd);
1672 if (p == NULL)
1673 goto out_nomem;
1674 }
1675 if (cd->cache_show) {
1676 p = proc_create_data("content", S_IFREG | 0400, cd->procfs,
1677 &content_file_operations_procfs, cd);
1678 if (p == NULL)
1679 goto out_nomem;
1680 }
1681 return 0;
1682out_nomem:
1683 remove_cache_proc_entries(cd);
1684 return -ENOMEM;
1685}
1686#else
1687static int create_cache_proc_entries(struct cache_detail *cd, struct net *net)
1688{
1689 return 0;
1690}
1691#endif
1692
1693void __init cache_initialize(void)
1694{
1695 INIT_DEFERRABLE_WORK(&cache_cleaner, do_cache_clean);
1696}
1697
1698int cache_register_net(struct cache_detail *cd, struct net *net)
1699{
1700 int ret;
1701
1702 sunrpc_init_cache_detail(cd);
1703 ret = create_cache_proc_entries(cd, net);
1704 if (ret)
1705 sunrpc_destroy_cache_detail(cd);
1706 return ret;
1707}
1708EXPORT_SYMBOL_GPL(cache_register_net);
1709
1710void cache_unregister_net(struct cache_detail *cd, struct net *net)
1711{
1712 remove_cache_proc_entries(cd);
1713 sunrpc_destroy_cache_detail(cd);
1714}
1715EXPORT_SYMBOL_GPL(cache_unregister_net);
1716
1717struct cache_detail *cache_create_net(const struct cache_detail *tmpl, struct net *net)
1718{
1719 struct cache_detail *cd;
1720 int i;
1721
1722 cd = kmemdup(tmpl, sizeof(struct cache_detail), GFP_KERNEL);
1723 if (cd == NULL)
1724 return ERR_PTR(-ENOMEM);
1725
1726 cd->hash_table = kcalloc(cd->hash_size, sizeof(struct hlist_head),
1727 GFP_KERNEL);
1728 if (cd->hash_table == NULL) {
1729 kfree(cd);
1730 return ERR_PTR(-ENOMEM);
1731 }
1732
1733 for (i = 0; i < cd->hash_size; i++)
1734 INIT_HLIST_HEAD(&cd->hash_table[i]);
1735 cd->net = net;
1736 return cd;
1737}
1738EXPORT_SYMBOL_GPL(cache_create_net);
1739
1740void cache_destroy_net(struct cache_detail *cd, struct net *net)
1741{
1742 kfree(cd->hash_table);
1743 kfree(cd);
1744}
1745EXPORT_SYMBOL_GPL(cache_destroy_net);
1746
1747static ssize_t cache_read_pipefs(struct file *filp, char __user *buf,
1748 size_t count, loff_t *ppos)
1749{
1750 struct cache_detail *cd = RPC_I(file_inode(filp))->private;
1751
1752 return cache_read(filp, buf, count, ppos, cd);
1753}
1754
1755static ssize_t cache_write_pipefs(struct file *filp, const char __user *buf,
1756 size_t count, loff_t *ppos)
1757{
1758 struct cache_detail *cd = RPC_I(file_inode(filp))->private;
1759
1760 return cache_write(filp, buf, count, ppos, cd);
1761}
1762
1763static __poll_t cache_poll_pipefs(struct file *filp, poll_table *wait)
1764{
1765 struct cache_detail *cd = RPC_I(file_inode(filp))->private;
1766
1767 return cache_poll(filp, wait, cd);
1768}
1769
1770static long cache_ioctl_pipefs(struct file *filp,
1771 unsigned int cmd, unsigned long arg)
1772{
1773 struct inode *inode = file_inode(filp);
1774 struct cache_detail *cd = RPC_I(inode)->private;
1775
1776 return cache_ioctl(inode, filp, cmd, arg, cd);
1777}
1778
1779static int cache_open_pipefs(struct inode *inode, struct file *filp)
1780{
1781 struct cache_detail *cd = RPC_I(inode)->private;
1782
1783 return cache_open(inode, filp, cd);
1784}
1785
1786static int cache_release_pipefs(struct inode *inode, struct file *filp)
1787{
1788 struct cache_detail *cd = RPC_I(inode)->private;
1789
1790 return cache_release(inode, filp, cd);
1791}
1792
1793const struct file_operations cache_file_operations_pipefs = {
1794 .owner = THIS_MODULE,
1795 .llseek = no_llseek,
1796 .read = cache_read_pipefs,
1797 .write = cache_write_pipefs,
1798 .poll = cache_poll_pipefs,
1799 .unlocked_ioctl = cache_ioctl_pipefs,
1800 .open = cache_open_pipefs,
1801 .release = cache_release_pipefs,
1802};
1803
1804static int content_open_pipefs(struct inode *inode, struct file *filp)
1805{
1806 struct cache_detail *cd = RPC_I(inode)->private;
1807
1808 return content_open(inode, filp, cd);
1809}
1810
1811static int content_release_pipefs(struct inode *inode, struct file *filp)
1812{
1813 struct cache_detail *cd = RPC_I(inode)->private;
1814
1815 return content_release(inode, filp, cd);
1816}
1817
1818const struct file_operations content_file_operations_pipefs = {
1819 .open = content_open_pipefs,
1820 .read = seq_read,
1821 .llseek = seq_lseek,
1822 .release = content_release_pipefs,
1823};
1824
1825static int open_flush_pipefs(struct inode *inode, struct file *filp)
1826{
1827 struct cache_detail *cd = RPC_I(inode)->private;
1828
1829 return open_flush(inode, filp, cd);
1830}
1831
1832static int release_flush_pipefs(struct inode *inode, struct file *filp)
1833{
1834 struct cache_detail *cd = RPC_I(inode)->private;
1835
1836 return release_flush(inode, filp, cd);
1837}
1838
1839static ssize_t read_flush_pipefs(struct file *filp, char __user *buf,
1840 size_t count, loff_t *ppos)
1841{
1842 struct cache_detail *cd = RPC_I(file_inode(filp))->private;
1843
1844 return read_flush(filp, buf, count, ppos, cd);
1845}
1846
1847static ssize_t write_flush_pipefs(struct file *filp,
1848 const char __user *buf,
1849 size_t count, loff_t *ppos)
1850{
1851 struct cache_detail *cd = RPC_I(file_inode(filp))->private;
1852
1853 return write_flush(filp, buf, count, ppos, cd);
1854}
1855
1856const struct file_operations cache_flush_operations_pipefs = {
1857 .open = open_flush_pipefs,
1858 .read = read_flush_pipefs,
1859 .write = write_flush_pipefs,
1860 .release = release_flush_pipefs,
1861 .llseek = no_llseek,
1862};
1863
1864int sunrpc_cache_register_pipefs(struct dentry *parent,
1865 const char *name, umode_t umode,
1866 struct cache_detail *cd)
1867{
1868 struct dentry *dir = rpc_create_cache_dir(parent, name, umode, cd);
1869 if (IS_ERR(dir))
1870 return PTR_ERR(dir);
1871 cd->pipefs = dir;
1872 return 0;
1873}
1874EXPORT_SYMBOL_GPL(sunrpc_cache_register_pipefs);
1875
1876void sunrpc_cache_unregister_pipefs(struct cache_detail *cd)
1877{
1878 if (cd->pipefs) {
1879 rpc_remove_cache_dir(cd->pipefs);
1880 cd->pipefs = NULL;
1881 }
1882}
1883EXPORT_SYMBOL_GPL(sunrpc_cache_unregister_pipefs);
1884
1885void sunrpc_cache_unhash(struct cache_detail *cd, struct cache_head *h)
1886{
1887 spin_lock(&cd->hash_lock);
1888 if (!hlist_unhashed(&h->cache_list)){
1889 hlist_del_init_rcu(&h->cache_list);
1890 cd->entries--;
1891 spin_unlock(&cd->hash_lock);
1892 cache_put(h, cd);
1893 } else
1894 spin_unlock(&cd->hash_lock);
1895}
1896EXPORT_SYMBOL_GPL(sunrpc_cache_unhash);
1897