1
2
3
4
5
6
7
8
9
10
11#include <linux/types.h>
12#include <linux/fs.h>
13#include <linux/file.h>
14#include <linux/slab.h>
15#include <linux/signal.h>
16#include <linux/sched.h>
17#include <linux/kmod.h>
18#include <linux/list.h>
19#include <linux/module.h>
20#include <linux/ctype.h>
21#include <linux/string_helpers.h>
22#include <linux/uaccess.h>
23#include <linux/poll.h>
24#include <linux/seq_file.h>
25#include <linux/proc_fs.h>
26#include <linux/net.h>
27#include <linux/workqueue.h>
28#include <linux/mutex.h>
29#include <linux/pagemap.h>
30#include <asm/ioctls.h>
31#include <linux/sunrpc/types.h>
32#include <linux/sunrpc/cache.h>
33#include <linux/sunrpc/stats.h>
34#include <linux/sunrpc/rpc_pipe_fs.h>
35#include <trace/events/sunrpc.h>
36#include "netns.h"
37
38#define RPCDBG_FACILITY RPCDBG_CACHE
39
40static bool cache_defer_req(struct cache_req *req, struct cache_head *item);
41static void cache_revisit_request(struct cache_head *item);
42
43static void cache_init(struct cache_head *h, struct cache_detail *detail)
44{
45 time64_t now = seconds_since_boot();
46 INIT_HLIST_NODE(&h->cache_list);
47 h->flags = 0;
48 kref_init(&h->ref);
49 h->expiry_time = now + CACHE_NEW_EXPIRY;
50 if (now <= detail->flush_time)
51
52 now = detail->flush_time + 1;
53 h->last_refresh = now;
54}
55
56static void cache_fresh_unlocked(struct cache_head *head,
57 struct cache_detail *detail);
58
59static struct cache_head *sunrpc_cache_find_rcu(struct cache_detail *detail,
60 struct cache_head *key,
61 int hash)
62{
63 struct hlist_head *head = &detail->hash_table[hash];
64 struct cache_head *tmp;
65
66 rcu_read_lock();
67 hlist_for_each_entry_rcu(tmp, head, cache_list) {
68 if (!detail->match(tmp, key))
69 continue;
70 if (test_bit(CACHE_VALID, &tmp->flags) &&
71 cache_is_expired(detail, tmp))
72 continue;
73 tmp = cache_get_rcu(tmp);
74 rcu_read_unlock();
75 return tmp;
76 }
77 rcu_read_unlock();
78 return NULL;
79}
80
81static void sunrpc_begin_cache_remove_entry(struct cache_head *ch,
82 struct cache_detail *cd)
83{
84
85 hlist_del_init_rcu(&ch->cache_list);
86 set_bit(CACHE_CLEANED, &ch->flags);
87 cd->entries --;
88}
89
90static void sunrpc_end_cache_remove_entry(struct cache_head *ch,
91 struct cache_detail *cd)
92{
93 cache_fresh_unlocked(ch, cd);
94 cache_put(ch, cd);
95}
96
97static struct cache_head *sunrpc_cache_add_entry(struct cache_detail *detail,
98 struct cache_head *key,
99 int hash)
100{
101 struct cache_head *new, *tmp, *freeme = NULL;
102 struct hlist_head *head = &detail->hash_table[hash];
103
104 new = detail->alloc();
105 if (!new)
106 return NULL;
107
108
109
110
111 cache_init(new, detail);
112 detail->init(new, key);
113
114 spin_lock(&detail->hash_lock);
115
116
117 hlist_for_each_entry_rcu(tmp, head, cache_list,
118 lockdep_is_held(&detail->hash_lock)) {
119 if (!detail->match(tmp, key))
120 continue;
121 if (test_bit(CACHE_VALID, &tmp->flags) &&
122 cache_is_expired(detail, tmp)) {
123 sunrpc_begin_cache_remove_entry(tmp, detail);
124 trace_cache_entry_expired(detail, tmp);
125 freeme = tmp;
126 break;
127 }
128 cache_get(tmp);
129 spin_unlock(&detail->hash_lock);
130 cache_put(new, detail);
131 return tmp;
132 }
133
134 hlist_add_head_rcu(&new->cache_list, head);
135 detail->entries++;
136 cache_get(new);
137 spin_unlock(&detail->hash_lock);
138
139 if (freeme)
140 sunrpc_end_cache_remove_entry(freeme, detail);
141 return new;
142}
143
144struct cache_head *sunrpc_cache_lookup_rcu(struct cache_detail *detail,
145 struct cache_head *key, int hash)
146{
147 struct cache_head *ret;
148
149 ret = sunrpc_cache_find_rcu(detail, key, hash);
150 if (ret)
151 return ret;
152
153 return sunrpc_cache_add_entry(detail, key, hash);
154}
155EXPORT_SYMBOL_GPL(sunrpc_cache_lookup_rcu);
156
157static void cache_dequeue(struct cache_detail *detail, struct cache_head *ch);
158
159static void cache_fresh_locked(struct cache_head *head, time64_t expiry,
160 struct cache_detail *detail)
161{
162 time64_t now = seconds_since_boot();
163 if (now <= detail->flush_time)
164
165 now = detail->flush_time + 1;
166 head->expiry_time = expiry;
167 head->last_refresh = now;
168 smp_wmb();
169 set_bit(CACHE_VALID, &head->flags);
170}
171
172static void cache_fresh_unlocked(struct cache_head *head,
173 struct cache_detail *detail)
174{
175 if (test_and_clear_bit(CACHE_PENDING, &head->flags)) {
176 cache_revisit_request(head);
177 cache_dequeue(detail, head);
178 }
179}
180
181static void cache_make_negative(struct cache_detail *detail,
182 struct cache_head *h)
183{
184 set_bit(CACHE_NEGATIVE, &h->flags);
185 trace_cache_entry_make_negative(detail, h);
186}
187
188static void cache_entry_update(struct cache_detail *detail,
189 struct cache_head *h,
190 struct cache_head *new)
191{
192 if (!test_bit(CACHE_NEGATIVE, &new->flags)) {
193 detail->update(h, new);
194 trace_cache_entry_update(detail, h);
195 } else {
196 cache_make_negative(detail, h);
197 }
198}
199
200struct cache_head *sunrpc_cache_update(struct cache_detail *detail,
201 struct cache_head *new, struct cache_head *old, int hash)
202{
203
204
205
206
207 struct cache_head *tmp;
208
209 if (!test_bit(CACHE_VALID, &old->flags)) {
210 spin_lock(&detail->hash_lock);
211 if (!test_bit(CACHE_VALID, &old->flags)) {
212 cache_entry_update(detail, old, new);
213 cache_fresh_locked(old, new->expiry_time, detail);
214 spin_unlock(&detail->hash_lock);
215 cache_fresh_unlocked(old, detail);
216 return old;
217 }
218 spin_unlock(&detail->hash_lock);
219 }
220
221 tmp = detail->alloc();
222 if (!tmp) {
223 cache_put(old, detail);
224 return NULL;
225 }
226 cache_init(tmp, detail);
227 detail->init(tmp, old);
228
229 spin_lock(&detail->hash_lock);
230 cache_entry_update(detail, tmp, new);
231 hlist_add_head(&tmp->cache_list, &detail->hash_table[hash]);
232 detail->entries++;
233 cache_get(tmp);
234 cache_fresh_locked(tmp, new->expiry_time, detail);
235 cache_fresh_locked(old, 0, detail);
236 spin_unlock(&detail->hash_lock);
237 cache_fresh_unlocked(tmp, detail);
238 cache_fresh_unlocked(old, detail);
239 cache_put(old, detail);
240 return tmp;
241}
242EXPORT_SYMBOL_GPL(sunrpc_cache_update);
243
244static inline int cache_is_valid(struct cache_head *h)
245{
246 if (!test_bit(CACHE_VALID, &h->flags))
247 return -EAGAIN;
248 else {
249
250 if (test_bit(CACHE_NEGATIVE, &h->flags))
251 return -ENOENT;
252 else {
253
254
255
256
257
258
259 smp_rmb();
260 return 0;
261 }
262 }
263}
264
265static int try_to_negate_entry(struct cache_detail *detail, struct cache_head *h)
266{
267 int rv;
268
269 spin_lock(&detail->hash_lock);
270 rv = cache_is_valid(h);
271 if (rv == -EAGAIN) {
272 cache_make_negative(detail, h);
273 cache_fresh_locked(h, seconds_since_boot()+CACHE_NEW_EXPIRY,
274 detail);
275 rv = -ENOENT;
276 }
277 spin_unlock(&detail->hash_lock);
278 cache_fresh_unlocked(h, detail);
279 return rv;
280}
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296int cache_check(struct cache_detail *detail,
297 struct cache_head *h, struct cache_req *rqstp)
298{
299 int rv;
300 time64_t refresh_age, age;
301
302
303 rv = cache_is_valid(h);
304
305
306 refresh_age = (h->expiry_time - h->last_refresh);
307 age = seconds_since_boot() - h->last_refresh;
308
309 if (rqstp == NULL) {
310 if (rv == -EAGAIN)
311 rv = -ENOENT;
312 } else if (rv == -EAGAIN ||
313 (h->expiry_time != 0 && age > refresh_age/2)) {
314 dprintk("RPC: Want update, refage=%lld, age=%lld\n",
315 refresh_age, age);
316 switch (detail->cache_upcall(detail, h)) {
317 case -EINVAL:
318 rv = try_to_negate_entry(detail, h);
319 break;
320 case -EAGAIN:
321 cache_fresh_unlocked(h, detail);
322 break;
323 }
324 }
325
326 if (rv == -EAGAIN) {
327 if (!cache_defer_req(rqstp, h)) {
328
329
330
331
332 rv = cache_is_valid(h);
333 if (rv == -EAGAIN)
334 rv = -ETIMEDOUT;
335 }
336 }
337 if (rv)
338 cache_put(h, detail);
339 return rv;
340}
341EXPORT_SYMBOL_GPL(cache_check);
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375static LIST_HEAD(cache_list);
376static DEFINE_SPINLOCK(cache_list_lock);
377static struct cache_detail *current_detail;
378static int current_index;
379
380static void do_cache_clean(struct work_struct *work);
381static struct delayed_work cache_cleaner;
382
383void sunrpc_init_cache_detail(struct cache_detail *cd)
384{
385 spin_lock_init(&cd->hash_lock);
386 INIT_LIST_HEAD(&cd->queue);
387 spin_lock(&cache_list_lock);
388 cd->nextcheck = 0;
389 cd->entries = 0;
390 atomic_set(&cd->writers, 0);
391 cd->last_close = 0;
392 cd->last_warn = -1;
393 list_add(&cd->others, &cache_list);
394 spin_unlock(&cache_list_lock);
395
396
397 queue_delayed_work(system_power_efficient_wq, &cache_cleaner, 0);
398}
399EXPORT_SYMBOL_GPL(sunrpc_init_cache_detail);
400
401void sunrpc_destroy_cache_detail(struct cache_detail *cd)
402{
403 cache_purge(cd);
404 spin_lock(&cache_list_lock);
405 spin_lock(&cd->hash_lock);
406 if (current_detail == cd)
407 current_detail = NULL;
408 list_del_init(&cd->others);
409 spin_unlock(&cd->hash_lock);
410 spin_unlock(&cache_list_lock);
411 if (list_empty(&cache_list)) {
412
413 cancel_delayed_work_sync(&cache_cleaner);
414 }
415}
416EXPORT_SYMBOL_GPL(sunrpc_destroy_cache_detail);
417
418
419
420
421
422
423
424static int cache_clean(void)
425{
426 int rv = 0;
427 struct list_head *next;
428
429 spin_lock(&cache_list_lock);
430
431
432 while (current_detail == NULL ||
433 current_index >= current_detail->hash_size) {
434 if (current_detail)
435 next = current_detail->others.next;
436 else
437 next = cache_list.next;
438 if (next == &cache_list) {
439 current_detail = NULL;
440 spin_unlock(&cache_list_lock);
441 return -1;
442 }
443 current_detail = list_entry(next, struct cache_detail, others);
444 if (current_detail->nextcheck > seconds_since_boot())
445 current_index = current_detail->hash_size;
446 else {
447 current_index = 0;
448 current_detail->nextcheck = seconds_since_boot()+30*60;
449 }
450 }
451
452
453 while (current_detail &&
454 current_index < current_detail->hash_size &&
455 hlist_empty(¤t_detail->hash_table[current_index]))
456 current_index++;
457
458
459
460 if (current_detail && current_index < current_detail->hash_size) {
461 struct cache_head *ch = NULL;
462 struct cache_detail *d;
463 struct hlist_head *head;
464 struct hlist_node *tmp;
465
466 spin_lock(¤t_detail->hash_lock);
467
468
469
470 head = ¤t_detail->hash_table[current_index];
471 hlist_for_each_entry_safe(ch, tmp, head, cache_list) {
472 if (current_detail->nextcheck > ch->expiry_time)
473 current_detail->nextcheck = ch->expiry_time+1;
474 if (!cache_is_expired(current_detail, ch))
475 continue;
476
477 sunrpc_begin_cache_remove_entry(ch, current_detail);
478 trace_cache_entry_expired(current_detail, ch);
479 rv = 1;
480 break;
481 }
482
483 spin_unlock(¤t_detail->hash_lock);
484 d = current_detail;
485 if (!ch)
486 current_index ++;
487 spin_unlock(&cache_list_lock);
488 if (ch)
489 sunrpc_end_cache_remove_entry(ch, d);
490 } else
491 spin_unlock(&cache_list_lock);
492
493 return rv;
494}
495
496
497
498
499static void do_cache_clean(struct work_struct *work)
500{
501 int delay;
502
503 if (list_empty(&cache_list))
504 return;
505
506 if (cache_clean() == -1)
507 delay = round_jiffies_relative(30*HZ);
508 else
509 delay = 5;
510
511 queue_delayed_work(system_power_efficient_wq, &cache_cleaner, delay);
512}
513
514
515
516
517
518
519
520void cache_flush(void)
521{
522 while (cache_clean() != -1)
523 cond_resched();
524 while (cache_clean() != -1)
525 cond_resched();
526}
527EXPORT_SYMBOL_GPL(cache_flush);
528
529void cache_purge(struct cache_detail *detail)
530{
531 struct cache_head *ch = NULL;
532 struct hlist_head *head = NULL;
533 int i = 0;
534
535 spin_lock(&detail->hash_lock);
536 if (!detail->entries) {
537 spin_unlock(&detail->hash_lock);
538 return;
539 }
540
541 dprintk("RPC: %d entries in %s cache\n", detail->entries, detail->name);
542 for (i = 0; i < detail->hash_size; i++) {
543 head = &detail->hash_table[i];
544 while (!hlist_empty(head)) {
545 ch = hlist_entry(head->first, struct cache_head,
546 cache_list);
547 sunrpc_begin_cache_remove_entry(ch, detail);
548 spin_unlock(&detail->hash_lock);
549 sunrpc_end_cache_remove_entry(ch, detail);
550 spin_lock(&detail->hash_lock);
551 }
552 }
553 spin_unlock(&detail->hash_lock);
554}
555EXPORT_SYMBOL_GPL(cache_purge);
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573#define DFR_HASHSIZE (PAGE_SIZE/sizeof(struct list_head))
574#define DFR_HASH(item) ((((long)item)>>4 ^ (((long)item)>>13)) % DFR_HASHSIZE)
575
576#define DFR_MAX 300
577
578static DEFINE_SPINLOCK(cache_defer_lock);
579static LIST_HEAD(cache_defer_list);
580static struct hlist_head cache_defer_hash[DFR_HASHSIZE];
581static int cache_defer_cnt;
582
583static void __unhash_deferred_req(struct cache_deferred_req *dreq)
584{
585 hlist_del_init(&dreq->hash);
586 if (!list_empty(&dreq->recent)) {
587 list_del_init(&dreq->recent);
588 cache_defer_cnt--;
589 }
590}
591
592static void __hash_deferred_req(struct cache_deferred_req *dreq, struct cache_head *item)
593{
594 int hash = DFR_HASH(item);
595
596 INIT_LIST_HEAD(&dreq->recent);
597 hlist_add_head(&dreq->hash, &cache_defer_hash[hash]);
598}
599
600static void setup_deferral(struct cache_deferred_req *dreq,
601 struct cache_head *item,
602 int count_me)
603{
604
605 dreq->item = item;
606
607 spin_lock(&cache_defer_lock);
608
609 __hash_deferred_req(dreq, item);
610
611 if (count_me) {
612 cache_defer_cnt++;
613 list_add(&dreq->recent, &cache_defer_list);
614 }
615
616 spin_unlock(&cache_defer_lock);
617
618}
619
620struct thread_deferred_req {
621 struct cache_deferred_req handle;
622 struct completion completion;
623};
624
625static void cache_restart_thread(struct cache_deferred_req *dreq, int too_many)
626{
627 struct thread_deferred_req *dr =
628 container_of(dreq, struct thread_deferred_req, handle);
629 complete(&dr->completion);
630}
631
632static void cache_wait_req(struct cache_req *req, struct cache_head *item)
633{
634 struct thread_deferred_req sleeper;
635 struct cache_deferred_req *dreq = &sleeper.handle;
636
637 sleeper.completion = COMPLETION_INITIALIZER_ONSTACK(sleeper.completion);
638 dreq->revisit = cache_restart_thread;
639
640 setup_deferral(dreq, item, 0);
641
642 if (!test_bit(CACHE_PENDING, &item->flags) ||
643 wait_for_completion_interruptible_timeout(
644 &sleeper.completion, req->thread_wait) <= 0) {
645
646
647
648 spin_lock(&cache_defer_lock);
649 if (!hlist_unhashed(&sleeper.handle.hash)) {
650 __unhash_deferred_req(&sleeper.handle);
651 spin_unlock(&cache_defer_lock);
652 } else {
653
654
655
656
657
658 spin_unlock(&cache_defer_lock);
659 wait_for_completion(&sleeper.completion);
660 }
661 }
662}
663
664static void cache_limit_defers(void)
665{
666
667
668
669 struct cache_deferred_req *discard = NULL;
670
671 if (cache_defer_cnt <= DFR_MAX)
672 return;
673
674 spin_lock(&cache_defer_lock);
675
676
677 if (cache_defer_cnt > DFR_MAX) {
678 if (prandom_u32() & 1)
679 discard = list_entry(cache_defer_list.next,
680 struct cache_deferred_req, recent);
681 else
682 discard = list_entry(cache_defer_list.prev,
683 struct cache_deferred_req, recent);
684 __unhash_deferred_req(discard);
685 }
686 spin_unlock(&cache_defer_lock);
687 if (discard)
688 discard->revisit(discard, 1);
689}
690
691
692static bool cache_defer_req(struct cache_req *req, struct cache_head *item)
693{
694 struct cache_deferred_req *dreq;
695
696 if (req->thread_wait) {
697 cache_wait_req(req, item);
698 if (!test_bit(CACHE_PENDING, &item->flags))
699 return false;
700 }
701 dreq = req->defer(req);
702 if (dreq == NULL)
703 return false;
704 setup_deferral(dreq, item, 1);
705 if (!test_bit(CACHE_PENDING, &item->flags))
706
707
708
709 cache_revisit_request(item);
710
711 cache_limit_defers();
712 return true;
713}
714
715static void cache_revisit_request(struct cache_head *item)
716{
717 struct cache_deferred_req *dreq;
718 struct list_head pending;
719 struct hlist_node *tmp;
720 int hash = DFR_HASH(item);
721
722 INIT_LIST_HEAD(&pending);
723 spin_lock(&cache_defer_lock);
724
725 hlist_for_each_entry_safe(dreq, tmp, &cache_defer_hash[hash], hash)
726 if (dreq->item == item) {
727 __unhash_deferred_req(dreq);
728 list_add(&dreq->recent, &pending);
729 }
730
731 spin_unlock(&cache_defer_lock);
732
733 while (!list_empty(&pending)) {
734 dreq = list_entry(pending.next, struct cache_deferred_req, recent);
735 list_del_init(&dreq->recent);
736 dreq->revisit(dreq, 0);
737 }
738}
739
740void cache_clean_deferred(void *owner)
741{
742 struct cache_deferred_req *dreq, *tmp;
743 struct list_head pending;
744
745
746 INIT_LIST_HEAD(&pending);
747 spin_lock(&cache_defer_lock);
748
749 list_for_each_entry_safe(dreq, tmp, &cache_defer_list, recent) {
750 if (dreq->owner == owner) {
751 __unhash_deferred_req(dreq);
752 list_add(&dreq->recent, &pending);
753 }
754 }
755 spin_unlock(&cache_defer_lock);
756
757 while (!list_empty(&pending)) {
758 dreq = list_entry(pending.next, struct cache_deferred_req, recent);
759 list_del_init(&dreq->recent);
760 dreq->revisit(dreq, 1);
761 }
762}
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780static DEFINE_SPINLOCK(queue_lock);
781
782struct cache_queue {
783 struct list_head list;
784 int reader;
785};
786struct cache_request {
787 struct cache_queue q;
788 struct cache_head *item;
789 char * buf;
790 int len;
791 int readers;
792};
793struct cache_reader {
794 struct cache_queue q;
795 int offset;
796};
797
798static int cache_request(struct cache_detail *detail,
799 struct cache_request *crq)
800{
801 char *bp = crq->buf;
802 int len = PAGE_SIZE;
803
804 detail->cache_request(detail, crq->item, &bp, &len);
805 if (len < 0)
806 return -E2BIG;
807 return PAGE_SIZE - len;
808}
809
810static ssize_t cache_read(struct file *filp, char __user *buf, size_t count,
811 loff_t *ppos, struct cache_detail *cd)
812{
813 struct cache_reader *rp = filp->private_data;
814 struct cache_request *rq;
815 struct inode *inode = file_inode(filp);
816 int err;
817
818 if (count == 0)
819 return 0;
820
821 inode_lock(inode);
822
823 again:
824 spin_lock(&queue_lock);
825
826 while (rp->q.list.next != &cd->queue &&
827 list_entry(rp->q.list.next, struct cache_queue, list)
828 ->reader) {
829 struct list_head *next = rp->q.list.next;
830 list_move(&rp->q.list, next);
831 }
832 if (rp->q.list.next == &cd->queue) {
833 spin_unlock(&queue_lock);
834 inode_unlock(inode);
835 WARN_ON_ONCE(rp->offset);
836 return 0;
837 }
838 rq = container_of(rp->q.list.next, struct cache_request, q.list);
839 WARN_ON_ONCE(rq->q.reader);
840 if (rp->offset == 0)
841 rq->readers++;
842 spin_unlock(&queue_lock);
843
844 if (rq->len == 0) {
845 err = cache_request(cd, rq);
846 if (err < 0)
847 goto out;
848 rq->len = err;
849 }
850
851 if (rp->offset == 0 && !test_bit(CACHE_PENDING, &rq->item->flags)) {
852 err = -EAGAIN;
853 spin_lock(&queue_lock);
854 list_move(&rp->q.list, &rq->q.list);
855 spin_unlock(&queue_lock);
856 } else {
857 if (rp->offset + count > rq->len)
858 count = rq->len - rp->offset;
859 err = -EFAULT;
860 if (copy_to_user(buf, rq->buf + rp->offset, count))
861 goto out;
862 rp->offset += count;
863 if (rp->offset >= rq->len) {
864 rp->offset = 0;
865 spin_lock(&queue_lock);
866 list_move(&rp->q.list, &rq->q.list);
867 spin_unlock(&queue_lock);
868 }
869 err = 0;
870 }
871 out:
872 if (rp->offset == 0) {
873
874 spin_lock(&queue_lock);
875 rq->readers--;
876 if (rq->readers == 0 &&
877 !test_bit(CACHE_PENDING, &rq->item->flags)) {
878 list_del(&rq->q.list);
879 spin_unlock(&queue_lock);
880 cache_put(rq->item, cd);
881 kfree(rq->buf);
882 kfree(rq);
883 } else
884 spin_unlock(&queue_lock);
885 }
886 if (err == -EAGAIN)
887 goto again;
888 inode_unlock(inode);
889 return err ? err : count;
890}
891
892static ssize_t cache_do_downcall(char *kaddr, const char __user *buf,
893 size_t count, struct cache_detail *cd)
894{
895 ssize_t ret;
896
897 if (count == 0)
898 return -EINVAL;
899 if (copy_from_user(kaddr, buf, count))
900 return -EFAULT;
901 kaddr[count] = '\0';
902 ret = cd->cache_parse(cd, kaddr, count);
903 if (!ret)
904 ret = count;
905 return ret;
906}
907
908static ssize_t cache_downcall(struct address_space *mapping,
909 const char __user *buf,
910 size_t count, struct cache_detail *cd)
911{
912 char *write_buf;
913 ssize_t ret = -ENOMEM;
914
915 if (count >= 32768) {
916 ret = -EINVAL;
917 goto out;
918 }
919
920 write_buf = kvmalloc(count + 1, GFP_KERNEL);
921 if (!write_buf)
922 goto out;
923
924 ret = cache_do_downcall(write_buf, buf, count, cd);
925 kvfree(write_buf);
926out:
927 return ret;
928}
929
930static ssize_t cache_write(struct file *filp, const char __user *buf,
931 size_t count, loff_t *ppos,
932 struct cache_detail *cd)
933{
934 struct address_space *mapping = filp->f_mapping;
935 struct inode *inode = file_inode(filp);
936 ssize_t ret = -EINVAL;
937
938 if (!cd->cache_parse)
939 goto out;
940
941 inode_lock(inode);
942 ret = cache_downcall(mapping, buf, count, cd);
943 inode_unlock(inode);
944out:
945 return ret;
946}
947
948static DECLARE_WAIT_QUEUE_HEAD(queue_wait);
949
950static __poll_t cache_poll(struct file *filp, poll_table *wait,
951 struct cache_detail *cd)
952{
953 __poll_t mask;
954 struct cache_reader *rp = filp->private_data;
955 struct cache_queue *cq;
956
957 poll_wait(filp, &queue_wait, wait);
958
959
960 mask = EPOLLOUT | EPOLLWRNORM;
961
962 if (!rp)
963 return mask;
964
965 spin_lock(&queue_lock);
966
967 for (cq= &rp->q; &cq->list != &cd->queue;
968 cq = list_entry(cq->list.next, struct cache_queue, list))
969 if (!cq->reader) {
970 mask |= EPOLLIN | EPOLLRDNORM;
971 break;
972 }
973 spin_unlock(&queue_lock);
974 return mask;
975}
976
977static int cache_ioctl(struct inode *ino, struct file *filp,
978 unsigned int cmd, unsigned long arg,
979 struct cache_detail *cd)
980{
981 int len = 0;
982 struct cache_reader *rp = filp->private_data;
983 struct cache_queue *cq;
984
985 if (cmd != FIONREAD || !rp)
986 return -EINVAL;
987
988 spin_lock(&queue_lock);
989
990
991
992
993 for (cq= &rp->q; &cq->list != &cd->queue;
994 cq = list_entry(cq->list.next, struct cache_queue, list))
995 if (!cq->reader) {
996 struct cache_request *cr =
997 container_of(cq, struct cache_request, q);
998 len = cr->len - rp->offset;
999 break;
1000 }
1001 spin_unlock(&queue_lock);
1002
1003 return put_user(len, (int __user *)arg);
1004}
1005
1006static int cache_open(struct inode *inode, struct file *filp,
1007 struct cache_detail *cd)
1008{
1009 struct cache_reader *rp = NULL;
1010
1011 if (!cd || !try_module_get(cd->owner))
1012 return -EACCES;
1013 nonseekable_open(inode, filp);
1014 if (filp->f_mode & FMODE_READ) {
1015 rp = kmalloc(sizeof(*rp), GFP_KERNEL);
1016 if (!rp) {
1017 module_put(cd->owner);
1018 return -ENOMEM;
1019 }
1020 rp->offset = 0;
1021 rp->q.reader = 1;
1022
1023 spin_lock(&queue_lock);
1024 list_add(&rp->q.list, &cd->queue);
1025 spin_unlock(&queue_lock);
1026 }
1027 if (filp->f_mode & FMODE_WRITE)
1028 atomic_inc(&cd->writers);
1029 filp->private_data = rp;
1030 return 0;
1031}
1032
1033static int cache_release(struct inode *inode, struct file *filp,
1034 struct cache_detail *cd)
1035{
1036 struct cache_reader *rp = filp->private_data;
1037
1038 if (rp) {
1039 spin_lock(&queue_lock);
1040 if (rp->offset) {
1041 struct cache_queue *cq;
1042 for (cq= &rp->q; &cq->list != &cd->queue;
1043 cq = list_entry(cq->list.next, struct cache_queue, list))
1044 if (!cq->reader) {
1045 container_of(cq, struct cache_request, q)
1046 ->readers--;
1047 break;
1048 }
1049 rp->offset = 0;
1050 }
1051 list_del(&rp->q.list);
1052 spin_unlock(&queue_lock);
1053
1054 filp->private_data = NULL;
1055 kfree(rp);
1056
1057 }
1058 if (filp->f_mode & FMODE_WRITE) {
1059 atomic_dec(&cd->writers);
1060 cd->last_close = seconds_since_boot();
1061 }
1062 module_put(cd->owner);
1063 return 0;
1064}
1065
1066
1067
1068static void cache_dequeue(struct cache_detail *detail, struct cache_head *ch)
1069{
1070 struct cache_queue *cq, *tmp;
1071 struct cache_request *cr;
1072 struct list_head dequeued;
1073
1074 INIT_LIST_HEAD(&dequeued);
1075 spin_lock(&queue_lock);
1076 list_for_each_entry_safe(cq, tmp, &detail->queue, list)
1077 if (!cq->reader) {
1078 cr = container_of(cq, struct cache_request, q);
1079 if (cr->item != ch)
1080 continue;
1081 if (test_bit(CACHE_PENDING, &ch->flags))
1082
1083 break;
1084 if (cr->readers != 0)
1085 continue;
1086 list_move(&cr->q.list, &dequeued);
1087 }
1088 spin_unlock(&queue_lock);
1089 while (!list_empty(&dequeued)) {
1090 cr = list_entry(dequeued.next, struct cache_request, q.list);
1091 list_del(&cr->q.list);
1092 cache_put(cr->item, detail);
1093 kfree(cr->buf);
1094 kfree(cr);
1095 }
1096}
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107void qword_add(char **bpp, int *lp, char *str)
1108{
1109 char *bp = *bpp;
1110 int len = *lp;
1111 int ret;
1112
1113 if (len < 0) return;
1114
1115 ret = string_escape_str(str, bp, len, ESCAPE_OCTAL, "\\ \n\t");
1116 if (ret >= len) {
1117 bp += len;
1118 len = -1;
1119 } else {
1120 bp += ret;
1121 len -= ret;
1122 *bp++ = ' ';
1123 len--;
1124 }
1125 *bpp = bp;
1126 *lp = len;
1127}
1128EXPORT_SYMBOL_GPL(qword_add);
1129
1130void qword_addhex(char **bpp, int *lp, char *buf, int blen)
1131{
1132 char *bp = *bpp;
1133 int len = *lp;
1134
1135 if (len < 0) return;
1136
1137 if (len > 2) {
1138 *bp++ = '\\';
1139 *bp++ = 'x';
1140 len -= 2;
1141 while (blen && len >= 2) {
1142 bp = hex_byte_pack(bp, *buf++);
1143 len -= 2;
1144 blen--;
1145 }
1146 }
1147 if (blen || len<1) len = -1;
1148 else {
1149 *bp++ = ' ';
1150 len--;
1151 }
1152 *bpp = bp;
1153 *lp = len;
1154}
1155EXPORT_SYMBOL_GPL(qword_addhex);
1156
1157static void warn_no_listener(struct cache_detail *detail)
1158{
1159 if (detail->last_warn != detail->last_close) {
1160 detail->last_warn = detail->last_close;
1161 if (detail->warn_no_listener)
1162 detail->warn_no_listener(detail, detail->last_close != 0);
1163 }
1164}
1165
1166static bool cache_listeners_exist(struct cache_detail *detail)
1167{
1168 if (atomic_read(&detail->writers))
1169 return true;
1170 if (detail->last_close == 0)
1171
1172 return false;
1173 if (detail->last_close < seconds_since_boot() - 30)
1174
1175
1176
1177
1178
1179 return false;
1180 return true;
1181}
1182
1183
1184
1185
1186
1187
1188
1189static int cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h)
1190{
1191 char *buf;
1192 struct cache_request *crq;
1193 int ret = 0;
1194
1195 if (test_bit(CACHE_CLEANED, &h->flags))
1196
1197 return -EAGAIN;
1198
1199 buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
1200 if (!buf)
1201 return -EAGAIN;
1202
1203 crq = kmalloc(sizeof (*crq), GFP_KERNEL);
1204 if (!crq) {
1205 kfree(buf);
1206 return -EAGAIN;
1207 }
1208
1209 crq->q.reader = 0;
1210 crq->buf = buf;
1211 crq->len = 0;
1212 crq->readers = 0;
1213 spin_lock(&queue_lock);
1214 if (test_bit(CACHE_PENDING, &h->flags)) {
1215 crq->item = cache_get(h);
1216 list_add_tail(&crq->q.list, &detail->queue);
1217 trace_cache_entry_upcall(detail, h);
1218 } else
1219
1220 ret = -EAGAIN;
1221 spin_unlock(&queue_lock);
1222 wake_up(&queue_wait);
1223 if (ret == -EAGAIN) {
1224 kfree(buf);
1225 kfree(crq);
1226 }
1227 return ret;
1228}
1229
1230int sunrpc_cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h)
1231{
1232 if (test_and_set_bit(CACHE_PENDING, &h->flags))
1233 return 0;
1234 return cache_pipe_upcall(detail, h);
1235}
1236EXPORT_SYMBOL_GPL(sunrpc_cache_pipe_upcall);
1237
1238int sunrpc_cache_pipe_upcall_timeout(struct cache_detail *detail,
1239 struct cache_head *h)
1240{
1241 if (!cache_listeners_exist(detail)) {
1242 warn_no_listener(detail);
1243 trace_cache_entry_no_listener(detail, h);
1244 return -EINVAL;
1245 }
1246 return sunrpc_cache_pipe_upcall(detail, h);
1247}
1248EXPORT_SYMBOL_GPL(sunrpc_cache_pipe_upcall_timeout);
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262int qword_get(char **bpp, char *dest, int bufsize)
1263{
1264
1265 char *bp = *bpp;
1266 int len = 0;
1267
1268 while (*bp == ' ') bp++;
1269
1270 if (bp[0] == '\\' && bp[1] == 'x') {
1271
1272 bp += 2;
1273 while (len < bufsize - 1) {
1274 int h, l;
1275
1276 h = hex_to_bin(bp[0]);
1277 if (h < 0)
1278 break;
1279
1280 l = hex_to_bin(bp[1]);
1281 if (l < 0)
1282 break;
1283
1284 *dest++ = (h << 4) | l;
1285 bp += 2;
1286 len++;
1287 }
1288 } else {
1289
1290 while (*bp != ' ' && *bp != '\n' && *bp && len < bufsize-1) {
1291 if (*bp == '\\' &&
1292 isodigit(bp[1]) && (bp[1] <= '3') &&
1293 isodigit(bp[2]) &&
1294 isodigit(bp[3])) {
1295 int byte = (*++bp -'0');
1296 bp++;
1297 byte = (byte << 3) | (*bp++ - '0');
1298 byte = (byte << 3) | (*bp++ - '0');
1299 *dest++ = byte;
1300 len++;
1301 } else {
1302 *dest++ = *bp++;
1303 len++;
1304 }
1305 }
1306 }
1307
1308 if (*bp != ' ' && *bp != '\n' && *bp != '\0')
1309 return -1;
1310 while (*bp == ' ') bp++;
1311 *bpp = bp;
1312 *dest = '\0';
1313 return len;
1314}
1315EXPORT_SYMBOL_GPL(qword_get);
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325static void *__cache_seq_start(struct seq_file *m, loff_t *pos)
1326{
1327 loff_t n = *pos;
1328 unsigned int hash, entry;
1329 struct cache_head *ch;
1330 struct cache_detail *cd = m->private;
1331
1332 if (!n--)
1333 return SEQ_START_TOKEN;
1334 hash = n >> 32;
1335 entry = n & ((1LL<<32) - 1);
1336
1337 hlist_for_each_entry_rcu(ch, &cd->hash_table[hash], cache_list)
1338 if (!entry--)
1339 return ch;
1340 n &= ~((1LL<<32) - 1);
1341 do {
1342 hash++;
1343 n += 1LL<<32;
1344 } while(hash < cd->hash_size &&
1345 hlist_empty(&cd->hash_table[hash]));
1346 if (hash >= cd->hash_size)
1347 return NULL;
1348 *pos = n+1;
1349 return hlist_entry_safe(rcu_dereference_raw(
1350 hlist_first_rcu(&cd->hash_table[hash])),
1351 struct cache_head, cache_list);
1352}
1353
1354static void *cache_seq_next(struct seq_file *m, void *p, loff_t *pos)
1355{
1356 struct cache_head *ch = p;
1357 int hash = (*pos >> 32);
1358 struct cache_detail *cd = m->private;
1359
1360 if (p == SEQ_START_TOKEN)
1361 hash = 0;
1362 else if (ch->cache_list.next == NULL) {
1363 hash++;
1364 *pos += 1LL<<32;
1365 } else {
1366 ++*pos;
1367 return hlist_entry_safe(rcu_dereference_raw(
1368 hlist_next_rcu(&ch->cache_list)),
1369 struct cache_head, cache_list);
1370 }
1371 *pos &= ~((1LL<<32) - 1);
1372 while (hash < cd->hash_size &&
1373 hlist_empty(&cd->hash_table[hash])) {
1374 hash++;
1375 *pos += 1LL<<32;
1376 }
1377 if (hash >= cd->hash_size)
1378 return NULL;
1379 ++*pos;
1380 return hlist_entry_safe(rcu_dereference_raw(
1381 hlist_first_rcu(&cd->hash_table[hash])),
1382 struct cache_head, cache_list);
1383}
1384
1385void *cache_seq_start_rcu(struct seq_file *m, loff_t *pos)
1386 __acquires(RCU)
1387{
1388 rcu_read_lock();
1389 return __cache_seq_start(m, pos);
1390}
1391EXPORT_SYMBOL_GPL(cache_seq_start_rcu);
1392
1393void *cache_seq_next_rcu(struct seq_file *file, void *p, loff_t *pos)
1394{
1395 return cache_seq_next(file, p, pos);
1396}
1397EXPORT_SYMBOL_GPL(cache_seq_next_rcu);
1398
1399void cache_seq_stop_rcu(struct seq_file *m, void *p)
1400 __releases(RCU)
1401{
1402 rcu_read_unlock();
1403}
1404EXPORT_SYMBOL_GPL(cache_seq_stop_rcu);
1405
1406static int c_show(struct seq_file *m, void *p)
1407{
1408 struct cache_head *cp = p;
1409 struct cache_detail *cd = m->private;
1410
1411 if (p == SEQ_START_TOKEN)
1412 return cd->cache_show(m, cd, NULL);
1413
1414 ifdebug(CACHE)
1415 seq_printf(m, "# expiry=%lld refcnt=%d flags=%lx\n",
1416 convert_to_wallclock(cp->expiry_time),
1417 kref_read(&cp->ref), cp->flags);
1418 cache_get(cp);
1419 if (cache_check(cd, cp, NULL))
1420
1421 seq_puts(m, "# ");
1422 else {
1423 if (cache_is_expired(cd, cp))
1424 seq_puts(m, "# ");
1425 cache_put(cp, cd);
1426 }
1427
1428 return cd->cache_show(m, cd, cp);
1429}
1430
1431static const struct seq_operations cache_content_op = {
1432 .start = cache_seq_start_rcu,
1433 .next = cache_seq_next_rcu,
1434 .stop = cache_seq_stop_rcu,
1435 .show = c_show,
1436};
1437
1438static int content_open(struct inode *inode, struct file *file,
1439 struct cache_detail *cd)
1440{
1441 struct seq_file *seq;
1442 int err;
1443
1444 if (!cd || !try_module_get(cd->owner))
1445 return -EACCES;
1446
1447 err = seq_open(file, &cache_content_op);
1448 if (err) {
1449 module_put(cd->owner);
1450 return err;
1451 }
1452
1453 seq = file->private_data;
1454 seq->private = cd;
1455 return 0;
1456}
1457
1458static int content_release(struct inode *inode, struct file *file,
1459 struct cache_detail *cd)
1460{
1461 int ret = seq_release(inode, file);
1462 module_put(cd->owner);
1463 return ret;
1464}
1465
1466static int open_flush(struct inode *inode, struct file *file,
1467 struct cache_detail *cd)
1468{
1469 if (!cd || !try_module_get(cd->owner))
1470 return -EACCES;
1471 return nonseekable_open(inode, file);
1472}
1473
1474static int release_flush(struct inode *inode, struct file *file,
1475 struct cache_detail *cd)
1476{
1477 module_put(cd->owner);
1478 return 0;
1479}
1480
1481static ssize_t read_flush(struct file *file, char __user *buf,
1482 size_t count, loff_t *ppos,
1483 struct cache_detail *cd)
1484{
1485 char tbuf[22];
1486 size_t len;
1487
1488 len = snprintf(tbuf, sizeof(tbuf), "%llu\n",
1489 convert_to_wallclock(cd->flush_time));
1490 return simple_read_from_buffer(buf, count, ppos, tbuf, len);
1491}
1492
1493static ssize_t write_flush(struct file *file, const char __user *buf,
1494 size_t count, loff_t *ppos,
1495 struct cache_detail *cd)
1496{
1497 char tbuf[20];
1498 char *ep;
1499 time64_t now;
1500
1501 if (*ppos || count > sizeof(tbuf)-1)
1502 return -EINVAL;
1503 if (copy_from_user(tbuf, buf, count))
1504 return -EFAULT;
1505 tbuf[count] = 0;
1506 simple_strtoul(tbuf, &ep, 0);
1507 if (*ep && *ep != '\n')
1508 return -EINVAL;
1509
1510
1511
1512
1513
1514 now = seconds_since_boot();
1515
1516
1517
1518
1519
1520
1521
1522 if (cd->flush_time >= now)
1523 now = cd->flush_time + 1;
1524
1525 cd->flush_time = now;
1526 cd->nextcheck = now;
1527 cache_flush();
1528
1529 if (cd->flush)
1530 cd->flush();
1531
1532 *ppos += count;
1533 return count;
1534}
1535
1536static ssize_t cache_read_procfs(struct file *filp, char __user *buf,
1537 size_t count, loff_t *ppos)
1538{
1539 struct cache_detail *cd = PDE_DATA(file_inode(filp));
1540
1541 return cache_read(filp, buf, count, ppos, cd);
1542}
1543
1544static ssize_t cache_write_procfs(struct file *filp, const char __user *buf,
1545 size_t count, loff_t *ppos)
1546{
1547 struct cache_detail *cd = PDE_DATA(file_inode(filp));
1548
1549 return cache_write(filp, buf, count, ppos, cd);
1550}
1551
1552static __poll_t cache_poll_procfs(struct file *filp, poll_table *wait)
1553{
1554 struct cache_detail *cd = PDE_DATA(file_inode(filp));
1555
1556 return cache_poll(filp, wait, cd);
1557}
1558
1559static long cache_ioctl_procfs(struct file *filp,
1560 unsigned int cmd, unsigned long arg)
1561{
1562 struct inode *inode = file_inode(filp);
1563 struct cache_detail *cd = PDE_DATA(inode);
1564
1565 return cache_ioctl(inode, filp, cmd, arg, cd);
1566}
1567
1568static int cache_open_procfs(struct inode *inode, struct file *filp)
1569{
1570 struct cache_detail *cd = PDE_DATA(inode);
1571
1572 return cache_open(inode, filp, cd);
1573}
1574
1575static int cache_release_procfs(struct inode *inode, struct file *filp)
1576{
1577 struct cache_detail *cd = PDE_DATA(inode);
1578
1579 return cache_release(inode, filp, cd);
1580}
1581
1582static const struct proc_ops cache_channel_proc_ops = {
1583 .proc_lseek = no_llseek,
1584 .proc_read = cache_read_procfs,
1585 .proc_write = cache_write_procfs,
1586 .proc_poll = cache_poll_procfs,
1587 .proc_ioctl = cache_ioctl_procfs,
1588 .proc_open = cache_open_procfs,
1589 .proc_release = cache_release_procfs,
1590};
1591
1592static int content_open_procfs(struct inode *inode, struct file *filp)
1593{
1594 struct cache_detail *cd = PDE_DATA(inode);
1595
1596 return content_open(inode, filp, cd);
1597}
1598
1599static int content_release_procfs(struct inode *inode, struct file *filp)
1600{
1601 struct cache_detail *cd = PDE_DATA(inode);
1602
1603 return content_release(inode, filp, cd);
1604}
1605
1606static const struct proc_ops content_proc_ops = {
1607 .proc_open = content_open_procfs,
1608 .proc_read = seq_read,
1609 .proc_lseek = seq_lseek,
1610 .proc_release = content_release_procfs,
1611};
1612
1613static int open_flush_procfs(struct inode *inode, struct file *filp)
1614{
1615 struct cache_detail *cd = PDE_DATA(inode);
1616
1617 return open_flush(inode, filp, cd);
1618}
1619
1620static int release_flush_procfs(struct inode *inode, struct file *filp)
1621{
1622 struct cache_detail *cd = PDE_DATA(inode);
1623
1624 return release_flush(inode, filp, cd);
1625}
1626
1627static ssize_t read_flush_procfs(struct file *filp, char __user *buf,
1628 size_t count, loff_t *ppos)
1629{
1630 struct cache_detail *cd = PDE_DATA(file_inode(filp));
1631
1632 return read_flush(filp, buf, count, ppos, cd);
1633}
1634
1635static ssize_t write_flush_procfs(struct file *filp,
1636 const char __user *buf,
1637 size_t count, loff_t *ppos)
1638{
1639 struct cache_detail *cd = PDE_DATA(file_inode(filp));
1640
1641 return write_flush(filp, buf, count, ppos, cd);
1642}
1643
1644static const struct proc_ops cache_flush_proc_ops = {
1645 .proc_open = open_flush_procfs,
1646 .proc_read = read_flush_procfs,
1647 .proc_write = write_flush_procfs,
1648 .proc_release = release_flush_procfs,
1649 .proc_lseek = no_llseek,
1650};
1651
1652static void remove_cache_proc_entries(struct cache_detail *cd)
1653{
1654 if (cd->procfs) {
1655 proc_remove(cd->procfs);
1656 cd->procfs = NULL;
1657 }
1658}
1659
1660#ifdef CONFIG_PROC_FS
1661static int create_cache_proc_entries(struct cache_detail *cd, struct net *net)
1662{
1663 struct proc_dir_entry *p;
1664 struct sunrpc_net *sn;
1665
1666 sn = net_generic(net, sunrpc_net_id);
1667 cd->procfs = proc_mkdir(cd->name, sn->proc_net_rpc);
1668 if (cd->procfs == NULL)
1669 goto out_nomem;
1670
1671 p = proc_create_data("flush", S_IFREG | 0600,
1672 cd->procfs, &cache_flush_proc_ops, cd);
1673 if (p == NULL)
1674 goto out_nomem;
1675
1676 if (cd->cache_request || cd->cache_parse) {
1677 p = proc_create_data("channel", S_IFREG | 0600, cd->procfs,
1678 &cache_channel_proc_ops, cd);
1679 if (p == NULL)
1680 goto out_nomem;
1681 }
1682 if (cd->cache_show) {
1683 p = proc_create_data("content", S_IFREG | 0400, cd->procfs,
1684 &content_proc_ops, cd);
1685 if (p == NULL)
1686 goto out_nomem;
1687 }
1688 return 0;
1689out_nomem:
1690 remove_cache_proc_entries(cd);
1691 return -ENOMEM;
1692}
1693#else
1694static int create_cache_proc_entries(struct cache_detail *cd, struct net *net)
1695{
1696 return 0;
1697}
1698#endif
1699
1700void __init cache_initialize(void)
1701{
1702 INIT_DEFERRABLE_WORK(&cache_cleaner, do_cache_clean);
1703}
1704
1705int cache_register_net(struct cache_detail *cd, struct net *net)
1706{
1707 int ret;
1708
1709 sunrpc_init_cache_detail(cd);
1710 ret = create_cache_proc_entries(cd, net);
1711 if (ret)
1712 sunrpc_destroy_cache_detail(cd);
1713 return ret;
1714}
1715EXPORT_SYMBOL_GPL(cache_register_net);
1716
1717void cache_unregister_net(struct cache_detail *cd, struct net *net)
1718{
1719 remove_cache_proc_entries(cd);
1720 sunrpc_destroy_cache_detail(cd);
1721}
1722EXPORT_SYMBOL_GPL(cache_unregister_net);
1723
1724struct cache_detail *cache_create_net(const struct cache_detail *tmpl, struct net *net)
1725{
1726 struct cache_detail *cd;
1727 int i;
1728
1729 cd = kmemdup(tmpl, sizeof(struct cache_detail), GFP_KERNEL);
1730 if (cd == NULL)
1731 return ERR_PTR(-ENOMEM);
1732
1733 cd->hash_table = kcalloc(cd->hash_size, sizeof(struct hlist_head),
1734 GFP_KERNEL);
1735 if (cd->hash_table == NULL) {
1736 kfree(cd);
1737 return ERR_PTR(-ENOMEM);
1738 }
1739
1740 for (i = 0; i < cd->hash_size; i++)
1741 INIT_HLIST_HEAD(&cd->hash_table[i]);
1742 cd->net = net;
1743 return cd;
1744}
1745EXPORT_SYMBOL_GPL(cache_create_net);
1746
1747void cache_destroy_net(struct cache_detail *cd, struct net *net)
1748{
1749 kfree(cd->hash_table);
1750 kfree(cd);
1751}
1752EXPORT_SYMBOL_GPL(cache_destroy_net);
1753
1754static ssize_t cache_read_pipefs(struct file *filp, char __user *buf,
1755 size_t count, loff_t *ppos)
1756{
1757 struct cache_detail *cd = RPC_I(file_inode(filp))->private;
1758
1759 return cache_read(filp, buf, count, ppos, cd);
1760}
1761
1762static ssize_t cache_write_pipefs(struct file *filp, const char __user *buf,
1763 size_t count, loff_t *ppos)
1764{
1765 struct cache_detail *cd = RPC_I(file_inode(filp))->private;
1766
1767 return cache_write(filp, buf, count, ppos, cd);
1768}
1769
1770static __poll_t cache_poll_pipefs(struct file *filp, poll_table *wait)
1771{
1772 struct cache_detail *cd = RPC_I(file_inode(filp))->private;
1773
1774 return cache_poll(filp, wait, cd);
1775}
1776
1777static long cache_ioctl_pipefs(struct file *filp,
1778 unsigned int cmd, unsigned long arg)
1779{
1780 struct inode *inode = file_inode(filp);
1781 struct cache_detail *cd = RPC_I(inode)->private;
1782
1783 return cache_ioctl(inode, filp, cmd, arg, cd);
1784}
1785
1786static int cache_open_pipefs(struct inode *inode, struct file *filp)
1787{
1788 struct cache_detail *cd = RPC_I(inode)->private;
1789
1790 return cache_open(inode, filp, cd);
1791}
1792
1793static int cache_release_pipefs(struct inode *inode, struct file *filp)
1794{
1795 struct cache_detail *cd = RPC_I(inode)->private;
1796
1797 return cache_release(inode, filp, cd);
1798}
1799
1800const struct file_operations cache_file_operations_pipefs = {
1801 .owner = THIS_MODULE,
1802 .llseek = no_llseek,
1803 .read = cache_read_pipefs,
1804 .write = cache_write_pipefs,
1805 .poll = cache_poll_pipefs,
1806 .unlocked_ioctl = cache_ioctl_pipefs,
1807 .open = cache_open_pipefs,
1808 .release = cache_release_pipefs,
1809};
1810
1811static int content_open_pipefs(struct inode *inode, struct file *filp)
1812{
1813 struct cache_detail *cd = RPC_I(inode)->private;
1814
1815 return content_open(inode, filp, cd);
1816}
1817
1818static int content_release_pipefs(struct inode *inode, struct file *filp)
1819{
1820 struct cache_detail *cd = RPC_I(inode)->private;
1821
1822 return content_release(inode, filp, cd);
1823}
1824
1825const struct file_operations content_file_operations_pipefs = {
1826 .open = content_open_pipefs,
1827 .read = seq_read,
1828 .llseek = seq_lseek,
1829 .release = content_release_pipefs,
1830};
1831
1832static int open_flush_pipefs(struct inode *inode, struct file *filp)
1833{
1834 struct cache_detail *cd = RPC_I(inode)->private;
1835
1836 return open_flush(inode, filp, cd);
1837}
1838
1839static int release_flush_pipefs(struct inode *inode, struct file *filp)
1840{
1841 struct cache_detail *cd = RPC_I(inode)->private;
1842
1843 return release_flush(inode, filp, cd);
1844}
1845
1846static ssize_t read_flush_pipefs(struct file *filp, char __user *buf,
1847 size_t count, loff_t *ppos)
1848{
1849 struct cache_detail *cd = RPC_I(file_inode(filp))->private;
1850
1851 return read_flush(filp, buf, count, ppos, cd);
1852}
1853
1854static ssize_t write_flush_pipefs(struct file *filp,
1855 const char __user *buf,
1856 size_t count, loff_t *ppos)
1857{
1858 struct cache_detail *cd = RPC_I(file_inode(filp))->private;
1859
1860 return write_flush(filp, buf, count, ppos, cd);
1861}
1862
1863const struct file_operations cache_flush_operations_pipefs = {
1864 .open = open_flush_pipefs,
1865 .read = read_flush_pipefs,
1866 .write = write_flush_pipefs,
1867 .release = release_flush_pipefs,
1868 .llseek = no_llseek,
1869};
1870
1871int sunrpc_cache_register_pipefs(struct dentry *parent,
1872 const char *name, umode_t umode,
1873 struct cache_detail *cd)
1874{
1875 struct dentry *dir = rpc_create_cache_dir(parent, name, umode, cd);
1876 if (IS_ERR(dir))
1877 return PTR_ERR(dir);
1878 cd->pipefs = dir;
1879 return 0;
1880}
1881EXPORT_SYMBOL_GPL(sunrpc_cache_register_pipefs);
1882
1883void sunrpc_cache_unregister_pipefs(struct cache_detail *cd)
1884{
1885 if (cd->pipefs) {
1886 rpc_remove_cache_dir(cd->pipefs);
1887 cd->pipefs = NULL;
1888 }
1889}
1890EXPORT_SYMBOL_GPL(sunrpc_cache_unregister_pipefs);
1891
1892void sunrpc_cache_unhash(struct cache_detail *cd, struct cache_head *h)
1893{
1894 spin_lock(&cd->hash_lock);
1895 if (!hlist_unhashed(&h->cache_list)){
1896 sunrpc_begin_cache_remove_entry(h, cd);
1897 spin_unlock(&cd->hash_lock);
1898 sunrpc_end_cache_remove_entry(h, cd);
1899 } else
1900 spin_unlock(&cd->hash_lock);
1901}
1902EXPORT_SYMBOL_GPL(sunrpc_cache_unhash);
1903