1
2
3
4
5
6
7
8
9
10
11
12
13#include <linux/types.h>
14#include <linux/fs.h>
15#include <linux/file.h>
16#include <linux/slab.h>
17#include <linux/signal.h>
18#include <linux/sched.h>
19#include <linux/kmod.h>
20#include <linux/list.h>
21#include <linux/module.h>
22#include <linux/ctype.h>
23#include <linux/string_helpers.h>
24#include <linux/uaccess.h>
25#include <linux/poll.h>
26#include <linux/seq_file.h>
27#include <linux/proc_fs.h>
28#include <linux/net.h>
29#include <linux/workqueue.h>
30#include <linux/mutex.h>
31#include <linux/pagemap.h>
32#include <asm/ioctls.h>
33#include <linux/sunrpc/types.h>
34#include <linux/sunrpc/cache.h>
35#include <linux/sunrpc/stats.h>
36#include <linux/sunrpc/rpc_pipe_fs.h>
37#include <trace/events/sunrpc.h>
38#include "netns.h"
39
40#define RPCDBG_FACILITY RPCDBG_CACHE
41
42static bool cache_defer_req(struct cache_req *req, struct cache_head *item);
43static void cache_revisit_request(struct cache_head *item);
44
45static void cache_init(struct cache_head *h, struct cache_detail *detail)
46{
47 time64_t now = seconds_since_boot();
48 INIT_HLIST_NODE(&h->cache_list);
49 h->flags = 0;
50 kref_init(&h->ref);
51 h->expiry_time = now + CACHE_NEW_EXPIRY;
52 if (now <= detail->flush_time)
53
54 now = detail->flush_time + 1;
55 h->last_refresh = now;
56}
57
58static void cache_fresh_unlocked(struct cache_head *head,
59 struct cache_detail *detail);
60
61static struct cache_head *sunrpc_cache_find_rcu(struct cache_detail *detail,
62 struct cache_head *key,
63 int hash)
64{
65 struct hlist_head *head = &detail->hash_table[hash];
66 struct cache_head *tmp;
67
68 rcu_read_lock();
69 hlist_for_each_entry_rcu(tmp, head, cache_list) {
70 if (!detail->match(tmp, key))
71 continue;
72 if (test_bit(CACHE_VALID, &tmp->flags) &&
73 cache_is_expired(detail, tmp))
74 continue;
75 tmp = cache_get_rcu(tmp);
76 rcu_read_unlock();
77 return tmp;
78 }
79 rcu_read_unlock();
80 return NULL;
81}
82
83static void sunrpc_begin_cache_remove_entry(struct cache_head *ch,
84 struct cache_detail *cd)
85{
86
87 hlist_del_init_rcu(&ch->cache_list);
88 set_bit(CACHE_CLEANED, &ch->flags);
89 cd->entries --;
90}
91
92static void sunrpc_end_cache_remove_entry(struct cache_head *ch,
93 struct cache_detail *cd)
94{
95 cache_fresh_unlocked(ch, cd);
96 cache_put(ch, cd);
97}
98
99static struct cache_head *sunrpc_cache_add_entry(struct cache_detail *detail,
100 struct cache_head *key,
101 int hash)
102{
103 struct cache_head *new, *tmp, *freeme = NULL;
104 struct hlist_head *head = &detail->hash_table[hash];
105
106 new = detail->alloc();
107 if (!new)
108 return NULL;
109
110
111
112
113 cache_init(new, detail);
114 detail->init(new, key);
115
116 spin_lock(&detail->hash_lock);
117
118
119 hlist_for_each_entry_rcu(tmp, head, cache_list,
120 lockdep_is_held(&detail->hash_lock)) {
121 if (!detail->match(tmp, key))
122 continue;
123 if (test_bit(CACHE_VALID, &tmp->flags) &&
124 cache_is_expired(detail, tmp)) {
125 sunrpc_begin_cache_remove_entry(tmp, detail);
126 trace_cache_entry_expired(detail, tmp);
127 freeme = tmp;
128 break;
129 }
130 cache_get(tmp);
131 spin_unlock(&detail->hash_lock);
132 cache_put(new, detail);
133 return tmp;
134 }
135
136 hlist_add_head_rcu(&new->cache_list, head);
137 detail->entries++;
138 cache_get(new);
139 spin_unlock(&detail->hash_lock);
140
141 if (freeme)
142 sunrpc_end_cache_remove_entry(freeme, detail);
143 return new;
144}
145
146struct cache_head *sunrpc_cache_lookup_rcu(struct cache_detail *detail,
147 struct cache_head *key, int hash)
148{
149 struct cache_head *ret;
150
151 ret = sunrpc_cache_find_rcu(detail, key, hash);
152 if (ret)
153 return ret;
154
155 return sunrpc_cache_add_entry(detail, key, hash);
156}
157EXPORT_SYMBOL_GPL(sunrpc_cache_lookup_rcu);
158
159static void cache_dequeue(struct cache_detail *detail, struct cache_head *ch);
160
161static void cache_fresh_locked(struct cache_head *head, time64_t expiry,
162 struct cache_detail *detail)
163{
164 time64_t now = seconds_since_boot();
165 if (now <= detail->flush_time)
166
167 now = detail->flush_time + 1;
168 head->expiry_time = expiry;
169 head->last_refresh = now;
170 smp_wmb();
171 set_bit(CACHE_VALID, &head->flags);
172}
173
174static void cache_fresh_unlocked(struct cache_head *head,
175 struct cache_detail *detail)
176{
177 if (test_and_clear_bit(CACHE_PENDING, &head->flags)) {
178 cache_revisit_request(head);
179 cache_dequeue(detail, head);
180 }
181}
182
183static void cache_make_negative(struct cache_detail *detail,
184 struct cache_head *h)
185{
186 set_bit(CACHE_NEGATIVE, &h->flags);
187 trace_cache_entry_make_negative(detail, h);
188}
189
190static void cache_entry_update(struct cache_detail *detail,
191 struct cache_head *h,
192 struct cache_head *new)
193{
194 if (!test_bit(CACHE_NEGATIVE, &new->flags)) {
195 detail->update(h, new);
196 trace_cache_entry_update(detail, h);
197 } else {
198 cache_make_negative(detail, h);
199 }
200}
201
202struct cache_head *sunrpc_cache_update(struct cache_detail *detail,
203 struct cache_head *new, struct cache_head *old, int hash)
204{
205
206
207
208
209 struct cache_head *tmp;
210
211 if (!test_bit(CACHE_VALID, &old->flags)) {
212 spin_lock(&detail->hash_lock);
213 if (!test_bit(CACHE_VALID, &old->flags)) {
214 cache_entry_update(detail, old, new);
215 cache_fresh_locked(old, new->expiry_time, detail);
216 spin_unlock(&detail->hash_lock);
217 cache_fresh_unlocked(old, detail);
218 return old;
219 }
220 spin_unlock(&detail->hash_lock);
221 }
222
223 tmp = detail->alloc();
224 if (!tmp) {
225 cache_put(old, detail);
226 return NULL;
227 }
228 cache_init(tmp, detail);
229 detail->init(tmp, old);
230
231 spin_lock(&detail->hash_lock);
232 cache_entry_update(detail, tmp, new);
233 hlist_add_head(&tmp->cache_list, &detail->hash_table[hash]);
234 detail->entries++;
235 cache_get(tmp);
236 cache_fresh_locked(tmp, new->expiry_time, detail);
237 cache_fresh_locked(old, 0, detail);
238 spin_unlock(&detail->hash_lock);
239 cache_fresh_unlocked(tmp, detail);
240 cache_fresh_unlocked(old, detail);
241 cache_put(old, detail);
242 return tmp;
243}
244EXPORT_SYMBOL_GPL(sunrpc_cache_update);
245
246static inline int cache_is_valid(struct cache_head *h)
247{
248 if (!test_bit(CACHE_VALID, &h->flags))
249 return -EAGAIN;
250 else {
251
252 if (test_bit(CACHE_NEGATIVE, &h->flags))
253 return -ENOENT;
254 else {
255
256
257
258
259
260
261 smp_rmb();
262 return 0;
263 }
264 }
265}
266
267static int try_to_negate_entry(struct cache_detail *detail, struct cache_head *h)
268{
269 int rv;
270
271 spin_lock(&detail->hash_lock);
272 rv = cache_is_valid(h);
273 if (rv == -EAGAIN) {
274 cache_make_negative(detail, h);
275 cache_fresh_locked(h, seconds_since_boot()+CACHE_NEW_EXPIRY,
276 detail);
277 rv = -ENOENT;
278 }
279 spin_unlock(&detail->hash_lock);
280 cache_fresh_unlocked(h, detail);
281 return rv;
282}
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298int cache_check(struct cache_detail *detail,
299 struct cache_head *h, struct cache_req *rqstp)
300{
301 int rv;
302 time64_t refresh_age, age;
303
304
305 rv = cache_is_valid(h);
306
307
308 refresh_age = (h->expiry_time - h->last_refresh);
309 age = seconds_since_boot() - h->last_refresh;
310
311 if (rqstp == NULL) {
312 if (rv == -EAGAIN)
313 rv = -ENOENT;
314 } else if (rv == -EAGAIN ||
315 (h->expiry_time != 0 && age > refresh_age/2)) {
316 dprintk("RPC: Want update, refage=%lld, age=%lld\n",
317 refresh_age, age);
318 switch (detail->cache_upcall(detail, h)) {
319 case -EINVAL:
320 rv = try_to_negate_entry(detail, h);
321 break;
322 case -EAGAIN:
323 cache_fresh_unlocked(h, detail);
324 break;
325 }
326 }
327
328 if (rv == -EAGAIN) {
329 if (!cache_defer_req(rqstp, h)) {
330
331
332
333
334 rv = cache_is_valid(h);
335 if (rv == -EAGAIN)
336 rv = -ETIMEDOUT;
337 }
338 }
339 if (rv)
340 cache_put(h, detail);
341 return rv;
342}
343EXPORT_SYMBOL_GPL(cache_check);
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377static LIST_HEAD(cache_list);
378static DEFINE_SPINLOCK(cache_list_lock);
379static struct cache_detail *current_detail;
380static int current_index;
381
382static void do_cache_clean(struct work_struct *work);
383static struct delayed_work cache_cleaner;
384
385void sunrpc_init_cache_detail(struct cache_detail *cd)
386{
387 spin_lock_init(&cd->hash_lock);
388 INIT_LIST_HEAD(&cd->queue);
389 spin_lock(&cache_list_lock);
390 cd->nextcheck = 0;
391 cd->entries = 0;
392 atomic_set(&cd->writers, 0);
393 cd->last_close = 0;
394 cd->last_warn = -1;
395 list_add(&cd->others, &cache_list);
396 spin_unlock(&cache_list_lock);
397
398
399 queue_delayed_work(system_power_efficient_wq, &cache_cleaner, 0);
400}
401EXPORT_SYMBOL_GPL(sunrpc_init_cache_detail);
402
403void sunrpc_destroy_cache_detail(struct cache_detail *cd)
404{
405 cache_purge(cd);
406 spin_lock(&cache_list_lock);
407 spin_lock(&cd->hash_lock);
408 if (current_detail == cd)
409 current_detail = NULL;
410 list_del_init(&cd->others);
411 spin_unlock(&cd->hash_lock);
412 spin_unlock(&cache_list_lock);
413 if (list_empty(&cache_list)) {
414
415 cancel_delayed_work_sync(&cache_cleaner);
416 }
417}
418EXPORT_SYMBOL_GPL(sunrpc_destroy_cache_detail);
419
420
421
422
423
424
425
426static int cache_clean(void)
427{
428 int rv = 0;
429 struct list_head *next;
430
431 spin_lock(&cache_list_lock);
432
433
434 while (current_detail == NULL ||
435 current_index >= current_detail->hash_size) {
436 if (current_detail)
437 next = current_detail->others.next;
438 else
439 next = cache_list.next;
440 if (next == &cache_list) {
441 current_detail = NULL;
442 spin_unlock(&cache_list_lock);
443 return -1;
444 }
445 current_detail = list_entry(next, struct cache_detail, others);
446 if (current_detail->nextcheck > seconds_since_boot())
447 current_index = current_detail->hash_size;
448 else {
449 current_index = 0;
450 current_detail->nextcheck = seconds_since_boot()+30*60;
451 }
452 }
453
454
455 while (current_detail &&
456 current_index < current_detail->hash_size &&
457 hlist_empty(¤t_detail->hash_table[current_index]))
458 current_index++;
459
460
461
462 if (current_detail && current_index < current_detail->hash_size) {
463 struct cache_head *ch = NULL;
464 struct cache_detail *d;
465 struct hlist_head *head;
466 struct hlist_node *tmp;
467
468 spin_lock(¤t_detail->hash_lock);
469
470
471
472 head = ¤t_detail->hash_table[current_index];
473 hlist_for_each_entry_safe(ch, tmp, head, cache_list) {
474 if (current_detail->nextcheck > ch->expiry_time)
475 current_detail->nextcheck = ch->expiry_time+1;
476 if (!cache_is_expired(current_detail, ch))
477 continue;
478
479 sunrpc_begin_cache_remove_entry(ch, current_detail);
480 trace_cache_entry_expired(current_detail, ch);
481 rv = 1;
482 break;
483 }
484
485 spin_unlock(¤t_detail->hash_lock);
486 d = current_detail;
487 if (!ch)
488 current_index ++;
489 spin_unlock(&cache_list_lock);
490 if (ch)
491 sunrpc_end_cache_remove_entry(ch, d);
492 } else
493 spin_unlock(&cache_list_lock);
494
495 return rv;
496}
497
498
499
500
501static void do_cache_clean(struct work_struct *work)
502{
503 int delay;
504
505 if (list_empty(&cache_list))
506 return;
507
508 if (cache_clean() == -1)
509 delay = round_jiffies_relative(30*HZ);
510 else
511 delay = 5;
512
513 queue_delayed_work(system_power_efficient_wq, &cache_cleaner, delay);
514}
515
516
517
518
519
520
521
522void cache_flush(void)
523{
524 while (cache_clean() != -1)
525 cond_resched();
526 while (cache_clean() != -1)
527 cond_resched();
528}
529EXPORT_SYMBOL_GPL(cache_flush);
530
531void cache_purge(struct cache_detail *detail)
532{
533 struct cache_head *ch = NULL;
534 struct hlist_head *head = NULL;
535 int i = 0;
536
537 spin_lock(&detail->hash_lock);
538 if (!detail->entries) {
539 spin_unlock(&detail->hash_lock);
540 return;
541 }
542
543 dprintk("RPC: %d entries in %s cache\n", detail->entries, detail->name);
544 for (i = 0; i < detail->hash_size; i++) {
545 head = &detail->hash_table[i];
546 while (!hlist_empty(head)) {
547 ch = hlist_entry(head->first, struct cache_head,
548 cache_list);
549 sunrpc_begin_cache_remove_entry(ch, detail);
550 spin_unlock(&detail->hash_lock);
551 sunrpc_end_cache_remove_entry(ch, detail);
552 spin_lock(&detail->hash_lock);
553 }
554 }
555 spin_unlock(&detail->hash_lock);
556}
557EXPORT_SYMBOL_GPL(cache_purge);
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575#define DFR_HASHSIZE (PAGE_SIZE/sizeof(struct list_head))
576#define DFR_HASH(item) ((((long)item)>>4 ^ (((long)item)>>13)) % DFR_HASHSIZE)
577
578#define DFR_MAX 300
579
580static DEFINE_SPINLOCK(cache_defer_lock);
581static LIST_HEAD(cache_defer_list);
582static struct hlist_head cache_defer_hash[DFR_HASHSIZE];
583static int cache_defer_cnt;
584
585static void __unhash_deferred_req(struct cache_deferred_req *dreq)
586{
587 hlist_del_init(&dreq->hash);
588 if (!list_empty(&dreq->recent)) {
589 list_del_init(&dreq->recent);
590 cache_defer_cnt--;
591 }
592}
593
594static void __hash_deferred_req(struct cache_deferred_req *dreq, struct cache_head *item)
595{
596 int hash = DFR_HASH(item);
597
598 INIT_LIST_HEAD(&dreq->recent);
599 hlist_add_head(&dreq->hash, &cache_defer_hash[hash]);
600}
601
602static void setup_deferral(struct cache_deferred_req *dreq,
603 struct cache_head *item,
604 int count_me)
605{
606
607 dreq->item = item;
608
609 spin_lock(&cache_defer_lock);
610
611 __hash_deferred_req(dreq, item);
612
613 if (count_me) {
614 cache_defer_cnt++;
615 list_add(&dreq->recent, &cache_defer_list);
616 }
617
618 spin_unlock(&cache_defer_lock);
619
620}
621
622struct thread_deferred_req {
623 struct cache_deferred_req handle;
624 struct completion completion;
625};
626
627static void cache_restart_thread(struct cache_deferred_req *dreq, int too_many)
628{
629 struct thread_deferred_req *dr =
630 container_of(dreq, struct thread_deferred_req, handle);
631 complete(&dr->completion);
632}
633
634static void cache_wait_req(struct cache_req *req, struct cache_head *item)
635{
636 struct thread_deferred_req sleeper;
637 struct cache_deferred_req *dreq = &sleeper.handle;
638
639 sleeper.completion = COMPLETION_INITIALIZER_ONSTACK(sleeper.completion);
640 dreq->revisit = cache_restart_thread;
641
642 setup_deferral(dreq, item, 0);
643
644 if (!test_bit(CACHE_PENDING, &item->flags) ||
645 wait_for_completion_interruptible_timeout(
646 &sleeper.completion, req->thread_wait) <= 0) {
647
648
649
650 spin_lock(&cache_defer_lock);
651 if (!hlist_unhashed(&sleeper.handle.hash)) {
652 __unhash_deferred_req(&sleeper.handle);
653 spin_unlock(&cache_defer_lock);
654 } else {
655
656
657
658
659
660 spin_unlock(&cache_defer_lock);
661 wait_for_completion(&sleeper.completion);
662 }
663 }
664}
665
666static void cache_limit_defers(void)
667{
668
669
670
671 struct cache_deferred_req *discard = NULL;
672
673 if (cache_defer_cnt <= DFR_MAX)
674 return;
675
676 spin_lock(&cache_defer_lock);
677
678
679 if (cache_defer_cnt > DFR_MAX) {
680 if (prandom_u32() & 1)
681 discard = list_entry(cache_defer_list.next,
682 struct cache_deferred_req, recent);
683 else
684 discard = list_entry(cache_defer_list.prev,
685 struct cache_deferred_req, recent);
686 __unhash_deferred_req(discard);
687 }
688 spin_unlock(&cache_defer_lock);
689 if (discard)
690 discard->revisit(discard, 1);
691}
692
693
694static bool cache_defer_req(struct cache_req *req, struct cache_head *item)
695{
696 struct cache_deferred_req *dreq;
697
698 if (req->thread_wait) {
699 cache_wait_req(req, item);
700 if (!test_bit(CACHE_PENDING, &item->flags))
701 return false;
702 }
703 dreq = req->defer(req);
704 if (dreq == NULL)
705 return false;
706 setup_deferral(dreq, item, 1);
707 if (!test_bit(CACHE_PENDING, &item->flags))
708
709
710
711 cache_revisit_request(item);
712
713 cache_limit_defers();
714 return true;
715}
716
717static void cache_revisit_request(struct cache_head *item)
718{
719 struct cache_deferred_req *dreq;
720 struct list_head pending;
721 struct hlist_node *tmp;
722 int hash = DFR_HASH(item);
723
724 INIT_LIST_HEAD(&pending);
725 spin_lock(&cache_defer_lock);
726
727 hlist_for_each_entry_safe(dreq, tmp, &cache_defer_hash[hash], hash)
728 if (dreq->item == item) {
729 __unhash_deferred_req(dreq);
730 list_add(&dreq->recent, &pending);
731 }
732
733 spin_unlock(&cache_defer_lock);
734
735 while (!list_empty(&pending)) {
736 dreq = list_entry(pending.next, struct cache_deferred_req, recent);
737 list_del_init(&dreq->recent);
738 dreq->revisit(dreq, 0);
739 }
740}
741
742void cache_clean_deferred(void *owner)
743{
744 struct cache_deferred_req *dreq, *tmp;
745 struct list_head pending;
746
747
748 INIT_LIST_HEAD(&pending);
749 spin_lock(&cache_defer_lock);
750
751 list_for_each_entry_safe(dreq, tmp, &cache_defer_list, recent) {
752 if (dreq->owner == owner) {
753 __unhash_deferred_req(dreq);
754 list_add(&dreq->recent, &pending);
755 }
756 }
757 spin_unlock(&cache_defer_lock);
758
759 while (!list_empty(&pending)) {
760 dreq = list_entry(pending.next, struct cache_deferred_req, recent);
761 list_del_init(&dreq->recent);
762 dreq->revisit(dreq, 1);
763 }
764}
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782static DEFINE_SPINLOCK(queue_lock);
783
784struct cache_queue {
785 struct list_head list;
786 int reader;
787};
788struct cache_request {
789 struct cache_queue q;
790 struct cache_head *item;
791 char * buf;
792 int len;
793 int readers;
794};
795struct cache_reader {
796 struct cache_queue q;
797 int offset;
798};
799
800static int cache_request(struct cache_detail *detail,
801 struct cache_request *crq)
802{
803 char *bp = crq->buf;
804 int len = PAGE_SIZE;
805
806 detail->cache_request(detail, crq->item, &bp, &len);
807 if (len < 0)
808 return -EAGAIN;
809 return PAGE_SIZE - len;
810}
811
812static ssize_t cache_read(struct file *filp, char __user *buf, size_t count,
813 loff_t *ppos, struct cache_detail *cd)
814{
815 struct cache_reader *rp = filp->private_data;
816 struct cache_request *rq;
817 struct inode *inode = file_inode(filp);
818 int err;
819
820 if (count == 0)
821 return 0;
822
823 inode_lock(inode);
824
825 again:
826 spin_lock(&queue_lock);
827
828 while (rp->q.list.next != &cd->queue &&
829 list_entry(rp->q.list.next, struct cache_queue, list)
830 ->reader) {
831 struct list_head *next = rp->q.list.next;
832 list_move(&rp->q.list, next);
833 }
834 if (rp->q.list.next == &cd->queue) {
835 spin_unlock(&queue_lock);
836 inode_unlock(inode);
837 WARN_ON_ONCE(rp->offset);
838 return 0;
839 }
840 rq = container_of(rp->q.list.next, struct cache_request, q.list);
841 WARN_ON_ONCE(rq->q.reader);
842 if (rp->offset == 0)
843 rq->readers++;
844 spin_unlock(&queue_lock);
845
846 if (rq->len == 0) {
847 err = cache_request(cd, rq);
848 if (err < 0)
849 goto out;
850 rq->len = err;
851 }
852
853 if (rp->offset == 0 && !test_bit(CACHE_PENDING, &rq->item->flags)) {
854 err = -EAGAIN;
855 spin_lock(&queue_lock);
856 list_move(&rp->q.list, &rq->q.list);
857 spin_unlock(&queue_lock);
858 } else {
859 if (rp->offset + count > rq->len)
860 count = rq->len - rp->offset;
861 err = -EFAULT;
862 if (copy_to_user(buf, rq->buf + rp->offset, count))
863 goto out;
864 rp->offset += count;
865 if (rp->offset >= rq->len) {
866 rp->offset = 0;
867 spin_lock(&queue_lock);
868 list_move(&rp->q.list, &rq->q.list);
869 spin_unlock(&queue_lock);
870 }
871 err = 0;
872 }
873 out:
874 if (rp->offset == 0) {
875
876 spin_lock(&queue_lock);
877 rq->readers--;
878 if (rq->readers == 0 &&
879 !test_bit(CACHE_PENDING, &rq->item->flags)) {
880 list_del(&rq->q.list);
881 spin_unlock(&queue_lock);
882 cache_put(rq->item, cd);
883 kfree(rq->buf);
884 kfree(rq);
885 } else
886 spin_unlock(&queue_lock);
887 }
888 if (err == -EAGAIN)
889 goto again;
890 inode_unlock(inode);
891 return err ? err : count;
892}
893
894static ssize_t cache_do_downcall(char *kaddr, const char __user *buf,
895 size_t count, struct cache_detail *cd)
896{
897 ssize_t ret;
898
899 if (count == 0)
900 return -EINVAL;
901 if (copy_from_user(kaddr, buf, count))
902 return -EFAULT;
903 kaddr[count] = '\0';
904 ret = cd->cache_parse(cd, kaddr, count);
905 if (!ret)
906 ret = count;
907 return ret;
908}
909
910static ssize_t cache_downcall(struct address_space *mapping,
911 const char __user *buf,
912 size_t count, struct cache_detail *cd)
913{
914 char *write_buf;
915 ssize_t ret = -ENOMEM;
916
917 if (count >= 32768) {
918 ret = -EINVAL;
919 goto out;
920 }
921
922 write_buf = kvmalloc(count + 1, GFP_KERNEL);
923 if (!write_buf)
924 goto out;
925
926 ret = cache_do_downcall(write_buf, buf, count, cd);
927 kvfree(write_buf);
928out:
929 return ret;
930}
931
932static ssize_t cache_write(struct file *filp, const char __user *buf,
933 size_t count, loff_t *ppos,
934 struct cache_detail *cd)
935{
936 struct address_space *mapping = filp->f_mapping;
937 struct inode *inode = file_inode(filp);
938 ssize_t ret = -EINVAL;
939
940 if (!cd->cache_parse)
941 goto out;
942
943 inode_lock(inode);
944 ret = cache_downcall(mapping, buf, count, cd);
945 inode_unlock(inode);
946out:
947 return ret;
948}
949
950static DECLARE_WAIT_QUEUE_HEAD(queue_wait);
951
952static __poll_t cache_poll(struct file *filp, poll_table *wait,
953 struct cache_detail *cd)
954{
955 __poll_t mask;
956 struct cache_reader *rp = filp->private_data;
957 struct cache_queue *cq;
958
959 poll_wait(filp, &queue_wait, wait);
960
961
962 mask = EPOLLOUT | EPOLLWRNORM;
963
964 if (!rp)
965 return mask;
966
967 spin_lock(&queue_lock);
968
969 for (cq= &rp->q; &cq->list != &cd->queue;
970 cq = list_entry(cq->list.next, struct cache_queue, list))
971 if (!cq->reader) {
972 mask |= EPOLLIN | EPOLLRDNORM;
973 break;
974 }
975 spin_unlock(&queue_lock);
976 return mask;
977}
978
979static int cache_ioctl(struct inode *ino, struct file *filp,
980 unsigned int cmd, unsigned long arg,
981 struct cache_detail *cd)
982{
983 int len = 0;
984 struct cache_reader *rp = filp->private_data;
985 struct cache_queue *cq;
986
987 if (cmd != FIONREAD || !rp)
988 return -EINVAL;
989
990 spin_lock(&queue_lock);
991
992
993
994
995 for (cq= &rp->q; &cq->list != &cd->queue;
996 cq = list_entry(cq->list.next, struct cache_queue, list))
997 if (!cq->reader) {
998 struct cache_request *cr =
999 container_of(cq, struct cache_request, q);
1000 len = cr->len - rp->offset;
1001 break;
1002 }
1003 spin_unlock(&queue_lock);
1004
1005 return put_user(len, (int __user *)arg);
1006}
1007
1008static int cache_open(struct inode *inode, struct file *filp,
1009 struct cache_detail *cd)
1010{
1011 struct cache_reader *rp = NULL;
1012
1013 if (!cd || !try_module_get(cd->owner))
1014 return -EACCES;
1015 nonseekable_open(inode, filp);
1016 if (filp->f_mode & FMODE_READ) {
1017 rp = kmalloc(sizeof(*rp), GFP_KERNEL);
1018 if (!rp) {
1019 module_put(cd->owner);
1020 return -ENOMEM;
1021 }
1022 rp->offset = 0;
1023 rp->q.reader = 1;
1024
1025 spin_lock(&queue_lock);
1026 list_add(&rp->q.list, &cd->queue);
1027 spin_unlock(&queue_lock);
1028 }
1029 if (filp->f_mode & FMODE_WRITE)
1030 atomic_inc(&cd->writers);
1031 filp->private_data = rp;
1032 return 0;
1033}
1034
1035static int cache_release(struct inode *inode, struct file *filp,
1036 struct cache_detail *cd)
1037{
1038 struct cache_reader *rp = filp->private_data;
1039
1040 if (rp) {
1041 spin_lock(&queue_lock);
1042 if (rp->offset) {
1043 struct cache_queue *cq;
1044 for (cq= &rp->q; &cq->list != &cd->queue;
1045 cq = list_entry(cq->list.next, struct cache_queue, list))
1046 if (!cq->reader) {
1047 container_of(cq, struct cache_request, q)
1048 ->readers--;
1049 break;
1050 }
1051 rp->offset = 0;
1052 }
1053 list_del(&rp->q.list);
1054 spin_unlock(&queue_lock);
1055
1056 filp->private_data = NULL;
1057 kfree(rp);
1058
1059 }
1060 if (filp->f_mode & FMODE_WRITE) {
1061 atomic_dec(&cd->writers);
1062 cd->last_close = seconds_since_boot();
1063 }
1064 module_put(cd->owner);
1065 return 0;
1066}
1067
1068
1069
1070static void cache_dequeue(struct cache_detail *detail, struct cache_head *ch)
1071{
1072 struct cache_queue *cq, *tmp;
1073 struct cache_request *cr;
1074 struct list_head dequeued;
1075
1076 INIT_LIST_HEAD(&dequeued);
1077 spin_lock(&queue_lock);
1078 list_for_each_entry_safe(cq, tmp, &detail->queue, list)
1079 if (!cq->reader) {
1080 cr = container_of(cq, struct cache_request, q);
1081 if (cr->item != ch)
1082 continue;
1083 if (test_bit(CACHE_PENDING, &ch->flags))
1084
1085 break;
1086 if (cr->readers != 0)
1087 continue;
1088 list_move(&cr->q.list, &dequeued);
1089 }
1090 spin_unlock(&queue_lock);
1091 while (!list_empty(&dequeued)) {
1092 cr = list_entry(dequeued.next, struct cache_request, q.list);
1093 list_del(&cr->q.list);
1094 cache_put(cr->item, detail);
1095 kfree(cr->buf);
1096 kfree(cr);
1097 }
1098}
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109void qword_add(char **bpp, int *lp, char *str)
1110{
1111 char *bp = *bpp;
1112 int len = *lp;
1113 int ret;
1114
1115 if (len < 0) return;
1116
1117 ret = string_escape_str(str, bp, len, ESCAPE_OCTAL, "\\ \n\t");
1118 if (ret >= len) {
1119 bp += len;
1120 len = -1;
1121 } else {
1122 bp += ret;
1123 len -= ret;
1124 *bp++ = ' ';
1125 len--;
1126 }
1127 *bpp = bp;
1128 *lp = len;
1129}
1130EXPORT_SYMBOL_GPL(qword_add);
1131
1132void qword_addhex(char **bpp, int *lp, char *buf, int blen)
1133{
1134 char *bp = *bpp;
1135 int len = *lp;
1136
1137 if (len < 0) return;
1138
1139 if (len > 2) {
1140 *bp++ = '\\';
1141 *bp++ = 'x';
1142 len -= 2;
1143 while (blen && len >= 2) {
1144 bp = hex_byte_pack(bp, *buf++);
1145 len -= 2;
1146 blen--;
1147 }
1148 }
1149 if (blen || len<1) len = -1;
1150 else {
1151 *bp++ = ' ';
1152 len--;
1153 }
1154 *bpp = bp;
1155 *lp = len;
1156}
1157EXPORT_SYMBOL_GPL(qword_addhex);
1158
1159static void warn_no_listener(struct cache_detail *detail)
1160{
1161 if (detail->last_warn != detail->last_close) {
1162 detail->last_warn = detail->last_close;
1163 if (detail->warn_no_listener)
1164 detail->warn_no_listener(detail, detail->last_close != 0);
1165 }
1166}
1167
1168static bool cache_listeners_exist(struct cache_detail *detail)
1169{
1170 if (atomic_read(&detail->writers))
1171 return true;
1172 if (detail->last_close == 0)
1173
1174 return false;
1175 if (detail->last_close < seconds_since_boot() - 30)
1176
1177
1178
1179
1180
1181 return false;
1182 return true;
1183}
1184
1185
1186
1187
1188
1189
1190
1191static int cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h)
1192{
1193 char *buf;
1194 struct cache_request *crq;
1195 int ret = 0;
1196
1197 if (test_bit(CACHE_CLEANED, &h->flags))
1198
1199 return -EAGAIN;
1200
1201 buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
1202 if (!buf)
1203 return -EAGAIN;
1204
1205 crq = kmalloc(sizeof (*crq), GFP_KERNEL);
1206 if (!crq) {
1207 kfree(buf);
1208 return -EAGAIN;
1209 }
1210
1211 crq->q.reader = 0;
1212 crq->buf = buf;
1213 crq->len = 0;
1214 crq->readers = 0;
1215 spin_lock(&queue_lock);
1216 if (test_bit(CACHE_PENDING, &h->flags)) {
1217 crq->item = cache_get(h);
1218 list_add_tail(&crq->q.list, &detail->queue);
1219 trace_cache_entry_upcall(detail, h);
1220 } else
1221
1222 ret = -EAGAIN;
1223 spin_unlock(&queue_lock);
1224 wake_up(&queue_wait);
1225 if (ret == -EAGAIN) {
1226 kfree(buf);
1227 kfree(crq);
1228 }
1229 return ret;
1230}
1231
1232int sunrpc_cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h)
1233{
1234 if (test_and_set_bit(CACHE_PENDING, &h->flags))
1235 return 0;
1236 return cache_pipe_upcall(detail, h);
1237}
1238EXPORT_SYMBOL_GPL(sunrpc_cache_pipe_upcall);
1239
1240int sunrpc_cache_pipe_upcall_timeout(struct cache_detail *detail,
1241 struct cache_head *h)
1242{
1243 if (!cache_listeners_exist(detail)) {
1244 warn_no_listener(detail);
1245 trace_cache_entry_no_listener(detail, h);
1246 return -EINVAL;
1247 }
1248 return sunrpc_cache_pipe_upcall(detail, h);
1249}
1250EXPORT_SYMBOL_GPL(sunrpc_cache_pipe_upcall_timeout);
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264int qword_get(char **bpp, char *dest, int bufsize)
1265{
1266
1267 char *bp = *bpp;
1268 int len = 0;
1269
1270 while (*bp == ' ') bp++;
1271
1272 if (bp[0] == '\\' && bp[1] == 'x') {
1273
1274 bp += 2;
1275 while (len < bufsize - 1) {
1276 int h, l;
1277
1278 h = hex_to_bin(bp[0]);
1279 if (h < 0)
1280 break;
1281
1282 l = hex_to_bin(bp[1]);
1283 if (l < 0)
1284 break;
1285
1286 *dest++ = (h << 4) | l;
1287 bp += 2;
1288 len++;
1289 }
1290 } else {
1291
1292 while (*bp != ' ' && *bp != '\n' && *bp && len < bufsize-1) {
1293 if (*bp == '\\' &&
1294 isodigit(bp[1]) && (bp[1] <= '3') &&
1295 isodigit(bp[2]) &&
1296 isodigit(bp[3])) {
1297 int byte = (*++bp -'0');
1298 bp++;
1299 byte = (byte << 3) | (*bp++ - '0');
1300 byte = (byte << 3) | (*bp++ - '0');
1301 *dest++ = byte;
1302 len++;
1303 } else {
1304 *dest++ = *bp++;
1305 len++;
1306 }
1307 }
1308 }
1309
1310 if (*bp != ' ' && *bp != '\n' && *bp != '\0')
1311 return -1;
1312 while (*bp == ' ') bp++;
1313 *bpp = bp;
1314 *dest = '\0';
1315 return len;
1316}
1317EXPORT_SYMBOL_GPL(qword_get);
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327static void *__cache_seq_start(struct seq_file *m, loff_t *pos)
1328{
1329 loff_t n = *pos;
1330 unsigned int hash, entry;
1331 struct cache_head *ch;
1332 struct cache_detail *cd = m->private;
1333
1334 if (!n--)
1335 return SEQ_START_TOKEN;
1336 hash = n >> 32;
1337 entry = n & ((1LL<<32) - 1);
1338
1339 hlist_for_each_entry_rcu(ch, &cd->hash_table[hash], cache_list)
1340 if (!entry--)
1341 return ch;
1342 n &= ~((1LL<<32) - 1);
1343 do {
1344 hash++;
1345 n += 1LL<<32;
1346 } while(hash < cd->hash_size &&
1347 hlist_empty(&cd->hash_table[hash]));
1348 if (hash >= cd->hash_size)
1349 return NULL;
1350 *pos = n+1;
1351 return hlist_entry_safe(rcu_dereference_raw(
1352 hlist_first_rcu(&cd->hash_table[hash])),
1353 struct cache_head, cache_list);
1354}
1355
1356static void *cache_seq_next(struct seq_file *m, void *p, loff_t *pos)
1357{
1358 struct cache_head *ch = p;
1359 int hash = (*pos >> 32);
1360 struct cache_detail *cd = m->private;
1361
1362 if (p == SEQ_START_TOKEN)
1363 hash = 0;
1364 else if (ch->cache_list.next == NULL) {
1365 hash++;
1366 *pos += 1LL<<32;
1367 } else {
1368 ++*pos;
1369 return hlist_entry_safe(rcu_dereference_raw(
1370 hlist_next_rcu(&ch->cache_list)),
1371 struct cache_head, cache_list);
1372 }
1373 *pos &= ~((1LL<<32) - 1);
1374 while (hash < cd->hash_size &&
1375 hlist_empty(&cd->hash_table[hash])) {
1376 hash++;
1377 *pos += 1LL<<32;
1378 }
1379 if (hash >= cd->hash_size)
1380 return NULL;
1381 ++*pos;
1382 return hlist_entry_safe(rcu_dereference_raw(
1383 hlist_first_rcu(&cd->hash_table[hash])),
1384 struct cache_head, cache_list);
1385}
1386
1387void *cache_seq_start_rcu(struct seq_file *m, loff_t *pos)
1388 __acquires(RCU)
1389{
1390 rcu_read_lock();
1391 return __cache_seq_start(m, pos);
1392}
1393EXPORT_SYMBOL_GPL(cache_seq_start_rcu);
1394
1395void *cache_seq_next_rcu(struct seq_file *file, void *p, loff_t *pos)
1396{
1397 return cache_seq_next(file, p, pos);
1398}
1399EXPORT_SYMBOL_GPL(cache_seq_next_rcu);
1400
1401void cache_seq_stop_rcu(struct seq_file *m, void *p)
1402 __releases(RCU)
1403{
1404 rcu_read_unlock();
1405}
1406EXPORT_SYMBOL_GPL(cache_seq_stop_rcu);
1407
1408static int c_show(struct seq_file *m, void *p)
1409{
1410 struct cache_head *cp = p;
1411 struct cache_detail *cd = m->private;
1412
1413 if (p == SEQ_START_TOKEN)
1414 return cd->cache_show(m, cd, NULL);
1415
1416 ifdebug(CACHE)
1417 seq_printf(m, "# expiry=%lld refcnt=%d flags=%lx\n",
1418 convert_to_wallclock(cp->expiry_time),
1419 kref_read(&cp->ref), cp->flags);
1420 cache_get(cp);
1421 if (cache_check(cd, cp, NULL))
1422
1423 seq_puts(m, "# ");
1424 else {
1425 if (cache_is_expired(cd, cp))
1426 seq_puts(m, "# ");
1427 cache_put(cp, cd);
1428 }
1429
1430 return cd->cache_show(m, cd, cp);
1431}
1432
1433static const struct seq_operations cache_content_op = {
1434 .start = cache_seq_start_rcu,
1435 .next = cache_seq_next_rcu,
1436 .stop = cache_seq_stop_rcu,
1437 .show = c_show,
1438};
1439
1440static int content_open(struct inode *inode, struct file *file,
1441 struct cache_detail *cd)
1442{
1443 struct seq_file *seq;
1444 int err;
1445
1446 if (!cd || !try_module_get(cd->owner))
1447 return -EACCES;
1448
1449 err = seq_open(file, &cache_content_op);
1450 if (err) {
1451 module_put(cd->owner);
1452 return err;
1453 }
1454
1455 seq = file->private_data;
1456 seq->private = cd;
1457 return 0;
1458}
1459
1460static int content_release(struct inode *inode, struct file *file,
1461 struct cache_detail *cd)
1462{
1463 int ret = seq_release(inode, file);
1464 module_put(cd->owner);
1465 return ret;
1466}
1467
1468static int open_flush(struct inode *inode, struct file *file,
1469 struct cache_detail *cd)
1470{
1471 if (!cd || !try_module_get(cd->owner))
1472 return -EACCES;
1473 return nonseekable_open(inode, file);
1474}
1475
1476static int release_flush(struct inode *inode, struct file *file,
1477 struct cache_detail *cd)
1478{
1479 module_put(cd->owner);
1480 return 0;
1481}
1482
1483static ssize_t read_flush(struct file *file, char __user *buf,
1484 size_t count, loff_t *ppos,
1485 struct cache_detail *cd)
1486{
1487 char tbuf[22];
1488 size_t len;
1489
1490 len = snprintf(tbuf, sizeof(tbuf), "%llu\n",
1491 convert_to_wallclock(cd->flush_time));
1492 return simple_read_from_buffer(buf, count, ppos, tbuf, len);
1493}
1494
1495static ssize_t write_flush(struct file *file, const char __user *buf,
1496 size_t count, loff_t *ppos,
1497 struct cache_detail *cd)
1498{
1499 char tbuf[20];
1500 char *ep;
1501 time64_t now;
1502
1503 if (*ppos || count > sizeof(tbuf)-1)
1504 return -EINVAL;
1505 if (copy_from_user(tbuf, buf, count))
1506 return -EFAULT;
1507 tbuf[count] = 0;
1508 simple_strtoul(tbuf, &ep, 0);
1509 if (*ep && *ep != '\n')
1510 return -EINVAL;
1511
1512
1513
1514
1515
1516 now = seconds_since_boot();
1517
1518
1519
1520
1521
1522
1523
1524 if (cd->flush_time >= now)
1525 now = cd->flush_time + 1;
1526
1527 cd->flush_time = now;
1528 cd->nextcheck = now;
1529 cache_flush();
1530
1531 *ppos += count;
1532 return count;
1533}
1534
1535static ssize_t cache_read_procfs(struct file *filp, char __user *buf,
1536 size_t count, loff_t *ppos)
1537{
1538 struct cache_detail *cd = PDE_DATA(file_inode(filp));
1539
1540 return cache_read(filp, buf, count, ppos, cd);
1541}
1542
1543static ssize_t cache_write_procfs(struct file *filp, const char __user *buf,
1544 size_t count, loff_t *ppos)
1545{
1546 struct cache_detail *cd = PDE_DATA(file_inode(filp));
1547
1548 return cache_write(filp, buf, count, ppos, cd);
1549}
1550
1551static __poll_t cache_poll_procfs(struct file *filp, poll_table *wait)
1552{
1553 struct cache_detail *cd = PDE_DATA(file_inode(filp));
1554
1555 return cache_poll(filp, wait, cd);
1556}
1557
1558static long cache_ioctl_procfs(struct file *filp,
1559 unsigned int cmd, unsigned long arg)
1560{
1561 struct inode *inode = file_inode(filp);
1562 struct cache_detail *cd = PDE_DATA(inode);
1563
1564 return cache_ioctl(inode, filp, cmd, arg, cd);
1565}
1566
1567static int cache_open_procfs(struct inode *inode, struct file *filp)
1568{
1569 struct cache_detail *cd = PDE_DATA(inode);
1570
1571 return cache_open(inode, filp, cd);
1572}
1573
1574static int cache_release_procfs(struct inode *inode, struct file *filp)
1575{
1576 struct cache_detail *cd = PDE_DATA(inode);
1577
1578 return cache_release(inode, filp, cd);
1579}
1580
1581static const struct file_operations cache_file_operations_procfs = {
1582 .owner = THIS_MODULE,
1583 .llseek = no_llseek,
1584 .read = cache_read_procfs,
1585 .write = cache_write_procfs,
1586 .poll = cache_poll_procfs,
1587 .unlocked_ioctl = cache_ioctl_procfs,
1588 .open = cache_open_procfs,
1589 .release = cache_release_procfs,
1590};
1591
1592static int content_open_procfs(struct inode *inode, struct file *filp)
1593{
1594 struct cache_detail *cd = PDE_DATA(inode);
1595
1596 return content_open(inode, filp, cd);
1597}
1598
1599static int content_release_procfs(struct inode *inode, struct file *filp)
1600{
1601 struct cache_detail *cd = PDE_DATA(inode);
1602
1603 return content_release(inode, filp, cd);
1604}
1605
1606static const struct file_operations content_file_operations_procfs = {
1607 .open = content_open_procfs,
1608 .read = seq_read,
1609 .llseek = seq_lseek,
1610 .release = content_release_procfs,
1611};
1612
1613static int open_flush_procfs(struct inode *inode, struct file *filp)
1614{
1615 struct cache_detail *cd = PDE_DATA(inode);
1616
1617 return open_flush(inode, filp, cd);
1618}
1619
1620static int release_flush_procfs(struct inode *inode, struct file *filp)
1621{
1622 struct cache_detail *cd = PDE_DATA(inode);
1623
1624 return release_flush(inode, filp, cd);
1625}
1626
1627static ssize_t read_flush_procfs(struct file *filp, char __user *buf,
1628 size_t count, loff_t *ppos)
1629{
1630 struct cache_detail *cd = PDE_DATA(file_inode(filp));
1631
1632 return read_flush(filp, buf, count, ppos, cd);
1633}
1634
1635static ssize_t write_flush_procfs(struct file *filp,
1636 const char __user *buf,
1637 size_t count, loff_t *ppos)
1638{
1639 struct cache_detail *cd = PDE_DATA(file_inode(filp));
1640
1641 return write_flush(filp, buf, count, ppos, cd);
1642}
1643
1644static const struct file_operations cache_flush_operations_procfs = {
1645 .open = open_flush_procfs,
1646 .read = read_flush_procfs,
1647 .write = write_flush_procfs,
1648 .release = release_flush_procfs,
1649 .llseek = no_llseek,
1650};
1651
1652static void remove_cache_proc_entries(struct cache_detail *cd)
1653{
1654 if (cd->procfs) {
1655 proc_remove(cd->procfs);
1656 cd->procfs = NULL;
1657 }
1658}
1659
1660#ifdef CONFIG_PROC_FS
1661static int create_cache_proc_entries(struct cache_detail *cd, struct net *net)
1662{
1663 struct proc_dir_entry *p;
1664 struct sunrpc_net *sn;
1665
1666 sn = net_generic(net, sunrpc_net_id);
1667 cd->procfs = proc_mkdir(cd->name, sn->proc_net_rpc);
1668 if (cd->procfs == NULL)
1669 goto out_nomem;
1670
1671 p = proc_create_data("flush", S_IFREG | 0600,
1672 cd->procfs, &cache_flush_operations_procfs, cd);
1673 if (p == NULL)
1674 goto out_nomem;
1675
1676 if (cd->cache_request || cd->cache_parse) {
1677 p = proc_create_data("channel", S_IFREG | 0600, cd->procfs,
1678 &cache_file_operations_procfs, cd);
1679 if (p == NULL)
1680 goto out_nomem;
1681 }
1682 if (cd->cache_show) {
1683 p = proc_create_data("content", S_IFREG | 0400, cd->procfs,
1684 &content_file_operations_procfs, cd);
1685 if (p == NULL)
1686 goto out_nomem;
1687 }
1688 return 0;
1689out_nomem:
1690 remove_cache_proc_entries(cd);
1691 return -ENOMEM;
1692}
1693#else
1694static int create_cache_proc_entries(struct cache_detail *cd, struct net *net)
1695{
1696 return 0;
1697}
1698#endif
1699
1700void __init cache_initialize(void)
1701{
1702 INIT_DEFERRABLE_WORK(&cache_cleaner, do_cache_clean);
1703}
1704
1705int cache_register_net(struct cache_detail *cd, struct net *net)
1706{
1707 int ret;
1708
1709 sunrpc_init_cache_detail(cd);
1710 ret = create_cache_proc_entries(cd, net);
1711 if (ret)
1712 sunrpc_destroy_cache_detail(cd);
1713 return ret;
1714}
1715EXPORT_SYMBOL_GPL(cache_register_net);
1716
1717void cache_unregister_net(struct cache_detail *cd, struct net *net)
1718{
1719 remove_cache_proc_entries(cd);
1720 sunrpc_destroy_cache_detail(cd);
1721}
1722EXPORT_SYMBOL_GPL(cache_unregister_net);
1723
1724struct cache_detail *cache_create_net(const struct cache_detail *tmpl, struct net *net)
1725{
1726 struct cache_detail *cd;
1727 int i;
1728
1729 cd = kmemdup(tmpl, sizeof(struct cache_detail), GFP_KERNEL);
1730 if (cd == NULL)
1731 return ERR_PTR(-ENOMEM);
1732
1733 cd->hash_table = kcalloc(cd->hash_size, sizeof(struct hlist_head),
1734 GFP_KERNEL);
1735 if (cd->hash_table == NULL) {
1736 kfree(cd);
1737 return ERR_PTR(-ENOMEM);
1738 }
1739
1740 for (i = 0; i < cd->hash_size; i++)
1741 INIT_HLIST_HEAD(&cd->hash_table[i]);
1742 cd->net = net;
1743 return cd;
1744}
1745EXPORT_SYMBOL_GPL(cache_create_net);
1746
1747void cache_destroy_net(struct cache_detail *cd, struct net *net)
1748{
1749 kfree(cd->hash_table);
1750 kfree(cd);
1751}
1752EXPORT_SYMBOL_GPL(cache_destroy_net);
1753
1754static ssize_t cache_read_pipefs(struct file *filp, char __user *buf,
1755 size_t count, loff_t *ppos)
1756{
1757 struct cache_detail *cd = RPC_I(file_inode(filp))->private;
1758
1759 return cache_read(filp, buf, count, ppos, cd);
1760}
1761
1762static ssize_t cache_write_pipefs(struct file *filp, const char __user *buf,
1763 size_t count, loff_t *ppos)
1764{
1765 struct cache_detail *cd = RPC_I(file_inode(filp))->private;
1766
1767 return cache_write(filp, buf, count, ppos, cd);
1768}
1769
1770static __poll_t cache_poll_pipefs(struct file *filp, poll_table *wait)
1771{
1772 struct cache_detail *cd = RPC_I(file_inode(filp))->private;
1773
1774 return cache_poll(filp, wait, cd);
1775}
1776
1777static long cache_ioctl_pipefs(struct file *filp,
1778 unsigned int cmd, unsigned long arg)
1779{
1780 struct inode *inode = file_inode(filp);
1781 struct cache_detail *cd = RPC_I(inode)->private;
1782
1783 return cache_ioctl(inode, filp, cmd, arg, cd);
1784}
1785
1786static int cache_open_pipefs(struct inode *inode, struct file *filp)
1787{
1788 struct cache_detail *cd = RPC_I(inode)->private;
1789
1790 return cache_open(inode, filp, cd);
1791}
1792
1793static int cache_release_pipefs(struct inode *inode, struct file *filp)
1794{
1795 struct cache_detail *cd = RPC_I(inode)->private;
1796
1797 return cache_release(inode, filp, cd);
1798}
1799
1800const struct file_operations cache_file_operations_pipefs = {
1801 .owner = THIS_MODULE,
1802 .llseek = no_llseek,
1803 .read = cache_read_pipefs,
1804 .write = cache_write_pipefs,
1805 .poll = cache_poll_pipefs,
1806 .unlocked_ioctl = cache_ioctl_pipefs,
1807 .open = cache_open_pipefs,
1808 .release = cache_release_pipefs,
1809};
1810
1811static int content_open_pipefs(struct inode *inode, struct file *filp)
1812{
1813 struct cache_detail *cd = RPC_I(inode)->private;
1814
1815 return content_open(inode, filp, cd);
1816}
1817
1818static int content_release_pipefs(struct inode *inode, struct file *filp)
1819{
1820 struct cache_detail *cd = RPC_I(inode)->private;
1821
1822 return content_release(inode, filp, cd);
1823}
1824
1825const struct file_operations content_file_operations_pipefs = {
1826 .open = content_open_pipefs,
1827 .read = seq_read,
1828 .llseek = seq_lseek,
1829 .release = content_release_pipefs,
1830};
1831
1832static int open_flush_pipefs(struct inode *inode, struct file *filp)
1833{
1834 struct cache_detail *cd = RPC_I(inode)->private;
1835
1836 return open_flush(inode, filp, cd);
1837}
1838
1839static int release_flush_pipefs(struct inode *inode, struct file *filp)
1840{
1841 struct cache_detail *cd = RPC_I(inode)->private;
1842
1843 return release_flush(inode, filp, cd);
1844}
1845
1846static ssize_t read_flush_pipefs(struct file *filp, char __user *buf,
1847 size_t count, loff_t *ppos)
1848{
1849 struct cache_detail *cd = RPC_I(file_inode(filp))->private;
1850
1851 return read_flush(filp, buf, count, ppos, cd);
1852}
1853
1854static ssize_t write_flush_pipefs(struct file *filp,
1855 const char __user *buf,
1856 size_t count, loff_t *ppos)
1857{
1858 struct cache_detail *cd = RPC_I(file_inode(filp))->private;
1859
1860 return write_flush(filp, buf, count, ppos, cd);
1861}
1862
1863const struct file_operations cache_flush_operations_pipefs = {
1864 .open = open_flush_pipefs,
1865 .read = read_flush_pipefs,
1866 .write = write_flush_pipefs,
1867 .release = release_flush_pipefs,
1868 .llseek = no_llseek,
1869};
1870
1871int sunrpc_cache_register_pipefs(struct dentry *parent,
1872 const char *name, umode_t umode,
1873 struct cache_detail *cd)
1874{
1875 struct dentry *dir = rpc_create_cache_dir(parent, name, umode, cd);
1876 if (IS_ERR(dir))
1877 return PTR_ERR(dir);
1878 cd->pipefs = dir;
1879 return 0;
1880}
1881EXPORT_SYMBOL_GPL(sunrpc_cache_register_pipefs);
1882
1883void sunrpc_cache_unregister_pipefs(struct cache_detail *cd)
1884{
1885 if (cd->pipefs) {
1886 rpc_remove_cache_dir(cd->pipefs);
1887 cd->pipefs = NULL;
1888 }
1889}
1890EXPORT_SYMBOL_GPL(sunrpc_cache_unregister_pipefs);
1891
1892void sunrpc_cache_unhash(struct cache_detail *cd, struct cache_head *h)
1893{
1894 spin_lock(&cd->hash_lock);
1895 if (!hlist_unhashed(&h->cache_list)){
1896 sunrpc_begin_cache_remove_entry(h, cd);
1897 spin_unlock(&cd->hash_lock);
1898 sunrpc_end_cache_remove_entry(h, cd);
1899 } else
1900 spin_unlock(&cd->hash_lock);
1901}
1902EXPORT_SYMBOL_GPL(sunrpc_cache_unhash);
1903