1
2
3
4
5
6
7
8
9
10
11
12
13#include <linux/types.h>
14#include <linux/fs.h>
15#include <linux/file.h>
16#include <linux/slab.h>
17#include <linux/signal.h>
18#include <linux/sched.h>
19#include <linux/kmod.h>
20#include <linux/list.h>
21#include <linux/module.h>
22#include <linux/ctype.h>
23#include <asm/uaccess.h>
24#include <linux/poll.h>
25#include <linux/seq_file.h>
26#include <linux/proc_fs.h>
27#include <linux/net.h>
28#include <linux/workqueue.h>
29#include <linux/mutex.h>
30#include <asm/ioctls.h>
31#include <linux/sunrpc/types.h>
32#include <linux/sunrpc/cache.h>
33#include <linux/sunrpc/stats.h>
34
35#define RPCDBG_FACILITY RPCDBG_CACHE
36
37static int cache_defer_req(struct cache_req *req, struct cache_head *item);
38static void cache_revisit_request(struct cache_head *item);
39
40static void cache_init(struct cache_head *h)
41{
42 time_t now = get_seconds();
43 h->next = NULL;
44 h->flags = 0;
45 kref_init(&h->ref);
46 h->expiry_time = now + CACHE_NEW_EXPIRY;
47 h->last_refresh = now;
48}
49
50struct cache_head *sunrpc_cache_lookup(struct cache_detail *detail,
51 struct cache_head *key, int hash)
52{
53 struct cache_head **head, **hp;
54 struct cache_head *new = NULL;
55
56 head = &detail->hash_table[hash];
57
58 read_lock(&detail->hash_lock);
59
60 for (hp=head; *hp != NULL ; hp = &(*hp)->next) {
61 struct cache_head *tmp = *hp;
62 if (detail->match(tmp, key)) {
63 cache_get(tmp);
64 read_unlock(&detail->hash_lock);
65 return tmp;
66 }
67 }
68 read_unlock(&detail->hash_lock);
69
70
71 new = detail->alloc();
72 if (!new)
73 return NULL;
74
75
76
77
78 cache_init(new);
79 detail->init(new, key);
80
81 write_lock(&detail->hash_lock);
82
83
84 for (hp=head; *hp != NULL ; hp = &(*hp)->next) {
85 struct cache_head *tmp = *hp;
86 if (detail->match(tmp, key)) {
87 cache_get(tmp);
88 write_unlock(&detail->hash_lock);
89 cache_put(new, detail);
90 return tmp;
91 }
92 }
93 new->next = *head;
94 *head = new;
95 detail->entries++;
96 cache_get(new);
97 write_unlock(&detail->hash_lock);
98
99 return new;
100}
101EXPORT_SYMBOL(sunrpc_cache_lookup);
102
103
104static void queue_loose(struct cache_detail *detail, struct cache_head *ch);
105
106static int cache_fresh_locked(struct cache_head *head, time_t expiry)
107{
108 head->expiry_time = expiry;
109 head->last_refresh = get_seconds();
110 return !test_and_set_bit(CACHE_VALID, &head->flags);
111}
112
113static void cache_fresh_unlocked(struct cache_head *head,
114 struct cache_detail *detail, int new)
115{
116 if (new)
117 cache_revisit_request(head);
118 if (test_and_clear_bit(CACHE_PENDING, &head->flags)) {
119 cache_revisit_request(head);
120 queue_loose(detail, head);
121 }
122}
123
124struct cache_head *sunrpc_cache_update(struct cache_detail *detail,
125 struct cache_head *new, struct cache_head *old, int hash)
126{
127
128
129
130
131 struct cache_head **head;
132 struct cache_head *tmp;
133 int is_new;
134
135 if (!test_bit(CACHE_VALID, &old->flags)) {
136 write_lock(&detail->hash_lock);
137 if (!test_bit(CACHE_VALID, &old->flags)) {
138 if (test_bit(CACHE_NEGATIVE, &new->flags))
139 set_bit(CACHE_NEGATIVE, &old->flags);
140 else
141 detail->update(old, new);
142 is_new = cache_fresh_locked(old, new->expiry_time);
143 write_unlock(&detail->hash_lock);
144 cache_fresh_unlocked(old, detail, is_new);
145 return old;
146 }
147 write_unlock(&detail->hash_lock);
148 }
149
150 tmp = detail->alloc();
151 if (!tmp) {
152 cache_put(old, detail);
153 return NULL;
154 }
155 cache_init(tmp);
156 detail->init(tmp, old);
157 head = &detail->hash_table[hash];
158
159 write_lock(&detail->hash_lock);
160 if (test_bit(CACHE_NEGATIVE, &new->flags))
161 set_bit(CACHE_NEGATIVE, &tmp->flags);
162 else
163 detail->update(tmp, new);
164 tmp->next = *head;
165 *head = tmp;
166 detail->entries++;
167 cache_get(tmp);
168 is_new = cache_fresh_locked(tmp, new->expiry_time);
169 cache_fresh_locked(old, 0);
170 write_unlock(&detail->hash_lock);
171 cache_fresh_unlocked(tmp, detail, is_new);
172 cache_fresh_unlocked(old, detail, 0);
173 cache_put(old, detail);
174 return tmp;
175}
176EXPORT_SYMBOL(sunrpc_cache_update);
177
178static int cache_make_upcall(struct cache_detail *detail, struct cache_head *h);
179
180
181
182
183
184
185
186
187
188
189
190
191int cache_check(struct cache_detail *detail,
192 struct cache_head *h, struct cache_req *rqstp)
193{
194 int rv;
195 long refresh_age, age;
196
197
198 if (!test_bit(CACHE_VALID, &h->flags) ||
199 h->expiry_time < get_seconds())
200 rv = -EAGAIN;
201 else if (detail->flush_time > h->last_refresh)
202 rv = -EAGAIN;
203 else {
204
205 if (test_bit(CACHE_NEGATIVE, &h->flags))
206 rv = -ENOENT;
207 else rv = 0;
208 }
209
210
211 refresh_age = (h->expiry_time - h->last_refresh);
212 age = get_seconds() - h->last_refresh;
213
214 if (rqstp == NULL) {
215 if (rv == -EAGAIN)
216 rv = -ENOENT;
217 } else if (rv == -EAGAIN || age > refresh_age/2) {
218 dprintk("RPC: Want update, refage=%ld, age=%ld\n",
219 refresh_age, age);
220 if (!test_and_set_bit(CACHE_PENDING, &h->flags)) {
221 switch (cache_make_upcall(detail, h)) {
222 case -EINVAL:
223 clear_bit(CACHE_PENDING, &h->flags);
224 if (rv == -EAGAIN) {
225 set_bit(CACHE_NEGATIVE, &h->flags);
226 cache_fresh_unlocked(h, detail,
227 cache_fresh_locked(h, get_seconds()+CACHE_NEW_EXPIRY));
228 rv = -ENOENT;
229 }
230 break;
231
232 case -EAGAIN:
233 clear_bit(CACHE_PENDING, &h->flags);
234 cache_revisit_request(h);
235 break;
236 }
237 }
238 }
239
240 if (rv == -EAGAIN)
241 if (cache_defer_req(rqstp, h) != 0)
242 rv = -ETIMEDOUT;
243
244 if (rv)
245 cache_put(h, detail);
246 return rv;
247}
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281static LIST_HEAD(cache_list);
282static DEFINE_SPINLOCK(cache_list_lock);
283static struct cache_detail *current_detail;
284static int current_index;
285
286static const struct file_operations cache_file_operations;
287static const struct file_operations content_file_operations;
288static const struct file_operations cache_flush_operations;
289
290static void do_cache_clean(struct work_struct *work);
291static DECLARE_DELAYED_WORK(cache_cleaner, do_cache_clean);
292
293void cache_register(struct cache_detail *cd)
294{
295 cd->proc_ent = proc_mkdir(cd->name, proc_net_rpc);
296 if (cd->proc_ent) {
297 struct proc_dir_entry *p;
298 cd->proc_ent->owner = cd->owner;
299 cd->channel_ent = cd->content_ent = NULL;
300
301 p = create_proc_entry("flush", S_IFREG|S_IRUSR|S_IWUSR,
302 cd->proc_ent);
303 cd->flush_ent = p;
304 if (p) {
305 p->proc_fops = &cache_flush_operations;
306 p->owner = cd->owner;
307 p->data = cd;
308 }
309
310 if (cd->cache_request || cd->cache_parse) {
311 p = create_proc_entry("channel", S_IFREG|S_IRUSR|S_IWUSR,
312 cd->proc_ent);
313 cd->channel_ent = p;
314 if (p) {
315 p->proc_fops = &cache_file_operations;
316 p->owner = cd->owner;
317 p->data = cd;
318 }
319 }
320 if (cd->cache_show) {
321 p = create_proc_entry("content", S_IFREG|S_IRUSR|S_IWUSR,
322 cd->proc_ent);
323 cd->content_ent = p;
324 if (p) {
325 p->proc_fops = &content_file_operations;
326 p->owner = cd->owner;
327 p->data = cd;
328 }
329 }
330 }
331 rwlock_init(&cd->hash_lock);
332 INIT_LIST_HEAD(&cd->queue);
333 spin_lock(&cache_list_lock);
334 cd->nextcheck = 0;
335 cd->entries = 0;
336 atomic_set(&cd->readers, 0);
337 cd->last_close = 0;
338 cd->last_warn = -1;
339 list_add(&cd->others, &cache_list);
340 spin_unlock(&cache_list_lock);
341
342
343 schedule_delayed_work(&cache_cleaner, 0);
344}
345
346int cache_unregister(struct cache_detail *cd)
347{
348 cache_purge(cd);
349 spin_lock(&cache_list_lock);
350 write_lock(&cd->hash_lock);
351 if (cd->entries || atomic_read(&cd->inuse)) {
352 write_unlock(&cd->hash_lock);
353 spin_unlock(&cache_list_lock);
354 return -EBUSY;
355 }
356 if (current_detail == cd)
357 current_detail = NULL;
358 list_del_init(&cd->others);
359 write_unlock(&cd->hash_lock);
360 spin_unlock(&cache_list_lock);
361 if (cd->proc_ent) {
362 if (cd->flush_ent)
363 remove_proc_entry("flush", cd->proc_ent);
364 if (cd->channel_ent)
365 remove_proc_entry("channel", cd->proc_ent);
366 if (cd->content_ent)
367 remove_proc_entry("content", cd->proc_ent);
368
369 cd->proc_ent = NULL;
370 remove_proc_entry(cd->name, proc_net_rpc);
371 }
372 if (list_empty(&cache_list)) {
373
374 cancel_delayed_work_sync(&cache_cleaner);
375 }
376 return 0;
377}
378
379
380
381
382
383
384
385static int cache_clean(void)
386{
387 int rv = 0;
388 struct list_head *next;
389
390 spin_lock(&cache_list_lock);
391
392
393 while (current_detail == NULL ||
394 current_index >= current_detail->hash_size) {
395 if (current_detail)
396 next = current_detail->others.next;
397 else
398 next = cache_list.next;
399 if (next == &cache_list) {
400 current_detail = NULL;
401 spin_unlock(&cache_list_lock);
402 return -1;
403 }
404 current_detail = list_entry(next, struct cache_detail, others);
405 if (current_detail->nextcheck > get_seconds())
406 current_index = current_detail->hash_size;
407 else {
408 current_index = 0;
409 current_detail->nextcheck = get_seconds()+30*60;
410 }
411 }
412
413
414 while (current_detail &&
415 current_index < current_detail->hash_size &&
416 current_detail->hash_table[current_index] == NULL)
417 current_index++;
418
419
420
421 if (current_detail && current_index < current_detail->hash_size) {
422 struct cache_head *ch, **cp;
423 struct cache_detail *d;
424
425 write_lock(¤t_detail->hash_lock);
426
427
428
429 cp = & current_detail->hash_table[current_index];
430 ch = *cp;
431 for (; ch; cp= & ch->next, ch= *cp) {
432 if (current_detail->nextcheck > ch->expiry_time)
433 current_detail->nextcheck = ch->expiry_time+1;
434 if (ch->expiry_time >= get_seconds()
435 && ch->last_refresh >= current_detail->flush_time
436 )
437 continue;
438 if (test_and_clear_bit(CACHE_PENDING, &ch->flags))
439 queue_loose(current_detail, ch);
440
441 if (atomic_read(&ch->ref.refcount) == 1)
442 break;
443 }
444 if (ch) {
445 *cp = ch->next;
446 ch->next = NULL;
447 current_detail->entries--;
448 rv = 1;
449 }
450 write_unlock(¤t_detail->hash_lock);
451 d = current_detail;
452 if (!ch)
453 current_index ++;
454 spin_unlock(&cache_list_lock);
455 if (ch)
456 cache_put(ch, d);
457 } else
458 spin_unlock(&cache_list_lock);
459
460 return rv;
461}
462
463
464
465
466static void do_cache_clean(struct work_struct *work)
467{
468 int delay = 5;
469 if (cache_clean() == -1)
470 delay = 30*HZ;
471
472 if (list_empty(&cache_list))
473 delay = 0;
474
475 if (delay)
476 schedule_delayed_work(&cache_cleaner, delay);
477}
478
479
480
481
482
483
484
485void cache_flush(void)
486{
487 while (cache_clean() != -1)
488 cond_resched();
489 while (cache_clean() != -1)
490 cond_resched();
491}
492
493void cache_purge(struct cache_detail *detail)
494{
495 detail->flush_time = LONG_MAX;
496 detail->nextcheck = get_seconds();
497 cache_flush();
498 detail->flush_time = 1;
499}
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518#define DFR_HASHSIZE (PAGE_SIZE/sizeof(struct list_head))
519#define DFR_HASH(item) ((((long)item)>>4 ^ (((long)item)>>13)) % DFR_HASHSIZE)
520
521#define DFR_MAX 300
522
523static DEFINE_SPINLOCK(cache_defer_lock);
524static LIST_HEAD(cache_defer_list);
525static struct list_head cache_defer_hash[DFR_HASHSIZE];
526static int cache_defer_cnt;
527
528static int cache_defer_req(struct cache_req *req, struct cache_head *item)
529{
530 struct cache_deferred_req *dreq;
531 int hash = DFR_HASH(item);
532
533 if (cache_defer_cnt >= DFR_MAX) {
534
535
536
537 if (net_random()&1)
538 return -ETIMEDOUT;
539 }
540 dreq = req->defer(req);
541 if (dreq == NULL)
542 return -ETIMEDOUT;
543
544 dreq->item = item;
545 dreq->recv_time = get_seconds();
546
547 spin_lock(&cache_defer_lock);
548
549 list_add(&dreq->recent, &cache_defer_list);
550
551 if (cache_defer_hash[hash].next == NULL)
552 INIT_LIST_HEAD(&cache_defer_hash[hash]);
553 list_add(&dreq->hash, &cache_defer_hash[hash]);
554
555
556 dreq = NULL;
557 if (++cache_defer_cnt > DFR_MAX) {
558 dreq = list_entry(cache_defer_list.prev,
559 struct cache_deferred_req, recent);
560 list_del(&dreq->recent);
561 list_del(&dreq->hash);
562 cache_defer_cnt--;
563 }
564 spin_unlock(&cache_defer_lock);
565
566 if (dreq) {
567
568 dreq->revisit(dreq, 1);
569 }
570 if (!test_bit(CACHE_PENDING, &item->flags)) {
571
572 cache_revisit_request(item);
573 }
574 return 0;
575}
576
577static void cache_revisit_request(struct cache_head *item)
578{
579 struct cache_deferred_req *dreq;
580 struct list_head pending;
581
582 struct list_head *lp;
583 int hash = DFR_HASH(item);
584
585 INIT_LIST_HEAD(&pending);
586 spin_lock(&cache_defer_lock);
587
588 lp = cache_defer_hash[hash].next;
589 if (lp) {
590 while (lp != &cache_defer_hash[hash]) {
591 dreq = list_entry(lp, struct cache_deferred_req, hash);
592 lp = lp->next;
593 if (dreq->item == item) {
594 list_del(&dreq->hash);
595 list_move(&dreq->recent, &pending);
596 cache_defer_cnt--;
597 }
598 }
599 }
600 spin_unlock(&cache_defer_lock);
601
602 while (!list_empty(&pending)) {
603 dreq = list_entry(pending.next, struct cache_deferred_req, recent);
604 list_del_init(&dreq->recent);
605 dreq->revisit(dreq, 0);
606 }
607}
608
609void cache_clean_deferred(void *owner)
610{
611 struct cache_deferred_req *dreq, *tmp;
612 struct list_head pending;
613
614
615 INIT_LIST_HEAD(&pending);
616 spin_lock(&cache_defer_lock);
617
618 list_for_each_entry_safe(dreq, tmp, &cache_defer_list, recent) {
619 if (dreq->owner == owner) {
620 list_del(&dreq->hash);
621 list_move(&dreq->recent, &pending);
622 cache_defer_cnt--;
623 }
624 }
625 spin_unlock(&cache_defer_lock);
626
627 while (!list_empty(&pending)) {
628 dreq = list_entry(pending.next, struct cache_deferred_req, recent);
629 list_del_init(&dreq->recent);
630 dreq->revisit(dreq, 1);
631 }
632}
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650static DEFINE_SPINLOCK(queue_lock);
651static DEFINE_MUTEX(queue_io_mutex);
652
653struct cache_queue {
654 struct list_head list;
655 int reader;
656};
657struct cache_request {
658 struct cache_queue q;
659 struct cache_head *item;
660 char * buf;
661 int len;
662 int readers;
663};
664struct cache_reader {
665 struct cache_queue q;
666 int offset;
667};
668
669static ssize_t
670cache_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos)
671{
672 struct cache_reader *rp = filp->private_data;
673 struct cache_request *rq;
674 struct cache_detail *cd = PDE(filp->f_path.dentry->d_inode)->data;
675 int err;
676
677 if (count == 0)
678 return 0;
679
680 mutex_lock(&queue_io_mutex);
681
682 again:
683 spin_lock(&queue_lock);
684
685 while (rp->q.list.next != &cd->queue &&
686 list_entry(rp->q.list.next, struct cache_queue, list)
687 ->reader) {
688 struct list_head *next = rp->q.list.next;
689 list_move(&rp->q.list, next);
690 }
691 if (rp->q.list.next == &cd->queue) {
692 spin_unlock(&queue_lock);
693 mutex_unlock(&queue_io_mutex);
694 BUG_ON(rp->offset);
695 return 0;
696 }
697 rq = container_of(rp->q.list.next, struct cache_request, q.list);
698 BUG_ON(rq->q.reader);
699 if (rp->offset == 0)
700 rq->readers++;
701 spin_unlock(&queue_lock);
702
703 if (rp->offset == 0 && !test_bit(CACHE_PENDING, &rq->item->flags)) {
704 err = -EAGAIN;
705 spin_lock(&queue_lock);
706 list_move(&rp->q.list, &rq->q.list);
707 spin_unlock(&queue_lock);
708 } else {
709 if (rp->offset + count > rq->len)
710 count = rq->len - rp->offset;
711 err = -EFAULT;
712 if (copy_to_user(buf, rq->buf + rp->offset, count))
713 goto out;
714 rp->offset += count;
715 if (rp->offset >= rq->len) {
716 rp->offset = 0;
717 spin_lock(&queue_lock);
718 list_move(&rp->q.list, &rq->q.list);
719 spin_unlock(&queue_lock);
720 }
721 err = 0;
722 }
723 out:
724 if (rp->offset == 0) {
725
726 spin_lock(&queue_lock);
727 rq->readers--;
728 if (rq->readers == 0 &&
729 !test_bit(CACHE_PENDING, &rq->item->flags)) {
730 list_del(&rq->q.list);
731 spin_unlock(&queue_lock);
732 cache_put(rq->item, cd);
733 kfree(rq->buf);
734 kfree(rq);
735 } else
736 spin_unlock(&queue_lock);
737 }
738 if (err == -EAGAIN)
739 goto again;
740 mutex_unlock(&queue_io_mutex);
741 return err ? err : count;
742}
743
744static char write_buf[8192];
745
746static ssize_t
747cache_write(struct file *filp, const char __user *buf, size_t count,
748 loff_t *ppos)
749{
750 int err;
751 struct cache_detail *cd = PDE(filp->f_path.dentry->d_inode)->data;
752
753 if (count == 0)
754 return 0;
755 if (count >= sizeof(write_buf))
756 return -EINVAL;
757
758 mutex_lock(&queue_io_mutex);
759
760 if (copy_from_user(write_buf, buf, count)) {
761 mutex_unlock(&queue_io_mutex);
762 return -EFAULT;
763 }
764 write_buf[count] = '\0';
765 if (cd->cache_parse)
766 err = cd->cache_parse(cd, write_buf, count);
767 else
768 err = -EINVAL;
769
770 mutex_unlock(&queue_io_mutex);
771 return err ? err : count;
772}
773
774static DECLARE_WAIT_QUEUE_HEAD(queue_wait);
775
776static unsigned int
777cache_poll(struct file *filp, poll_table *wait)
778{
779 unsigned int mask;
780 struct cache_reader *rp = filp->private_data;
781 struct cache_queue *cq;
782 struct cache_detail *cd = PDE(filp->f_path.dentry->d_inode)->data;
783
784 poll_wait(filp, &queue_wait, wait);
785
786
787 mask = POLL_OUT | POLLWRNORM;
788
789 if (!rp)
790 return mask;
791
792 spin_lock(&queue_lock);
793
794 for (cq= &rp->q; &cq->list != &cd->queue;
795 cq = list_entry(cq->list.next, struct cache_queue, list))
796 if (!cq->reader) {
797 mask |= POLLIN | POLLRDNORM;
798 break;
799 }
800 spin_unlock(&queue_lock);
801 return mask;
802}
803
804static int
805cache_ioctl(struct inode *ino, struct file *filp,
806 unsigned int cmd, unsigned long arg)
807{
808 int len = 0;
809 struct cache_reader *rp = filp->private_data;
810 struct cache_queue *cq;
811 struct cache_detail *cd = PDE(ino)->data;
812
813 if (cmd != FIONREAD || !rp)
814 return -EINVAL;
815
816 spin_lock(&queue_lock);
817
818
819
820
821 for (cq= &rp->q; &cq->list != &cd->queue;
822 cq = list_entry(cq->list.next, struct cache_queue, list))
823 if (!cq->reader) {
824 struct cache_request *cr =
825 container_of(cq, struct cache_request, q);
826 len = cr->len - rp->offset;
827 break;
828 }
829 spin_unlock(&queue_lock);
830
831 return put_user(len, (int __user *)arg);
832}
833
834static int
835cache_open(struct inode *inode, struct file *filp)
836{
837 struct cache_reader *rp = NULL;
838
839 nonseekable_open(inode, filp);
840 if (filp->f_mode & FMODE_READ) {
841 struct cache_detail *cd = PDE(inode)->data;
842
843 rp = kmalloc(sizeof(*rp), GFP_KERNEL);
844 if (!rp)
845 return -ENOMEM;
846 rp->offset = 0;
847 rp->q.reader = 1;
848 atomic_inc(&cd->readers);
849 spin_lock(&queue_lock);
850 list_add(&rp->q.list, &cd->queue);
851 spin_unlock(&queue_lock);
852 }
853 filp->private_data = rp;
854 return 0;
855}
856
857static int
858cache_release(struct inode *inode, struct file *filp)
859{
860 struct cache_reader *rp = filp->private_data;
861 struct cache_detail *cd = PDE(inode)->data;
862
863 if (rp) {
864 spin_lock(&queue_lock);
865 if (rp->offset) {
866 struct cache_queue *cq;
867 for (cq= &rp->q; &cq->list != &cd->queue;
868 cq = list_entry(cq->list.next, struct cache_queue, list))
869 if (!cq->reader) {
870 container_of(cq, struct cache_request, q)
871 ->readers--;
872 break;
873 }
874 rp->offset = 0;
875 }
876 list_del(&rp->q.list);
877 spin_unlock(&queue_lock);
878
879 filp->private_data = NULL;
880 kfree(rp);
881
882 cd->last_close = get_seconds();
883 atomic_dec(&cd->readers);
884 }
885 return 0;
886}
887
888
889
890static const struct file_operations cache_file_operations = {
891 .owner = THIS_MODULE,
892 .llseek = no_llseek,
893 .read = cache_read,
894 .write = cache_write,
895 .poll = cache_poll,
896 .ioctl = cache_ioctl,
897 .open = cache_open,
898 .release = cache_release,
899};
900
901
902static void queue_loose(struct cache_detail *detail, struct cache_head *ch)
903{
904 struct cache_queue *cq;
905 spin_lock(&queue_lock);
906 list_for_each_entry(cq, &detail->queue, list)
907 if (!cq->reader) {
908 struct cache_request *cr = container_of(cq, struct cache_request, q);
909 if (cr->item != ch)
910 continue;
911 if (cr->readers != 0)
912 continue;
913 list_del(&cr->q.list);
914 spin_unlock(&queue_lock);
915 cache_put(cr->item, detail);
916 kfree(cr->buf);
917 kfree(cr);
918 return;
919 }
920 spin_unlock(&queue_lock);
921}
922
923
924
925
926
927
928
929
930
931
932void qword_add(char **bpp, int *lp, char *str)
933{
934 char *bp = *bpp;
935 int len = *lp;
936 char c;
937
938 if (len < 0) return;
939
940 while ((c=*str++) && len)
941 switch(c) {
942 case ' ':
943 case '\t':
944 case '\n':
945 case '\\':
946 if (len >= 4) {
947 *bp++ = '\\';
948 *bp++ = '0' + ((c & 0300)>>6);
949 *bp++ = '0' + ((c & 0070)>>3);
950 *bp++ = '0' + ((c & 0007)>>0);
951 }
952 len -= 4;
953 break;
954 default:
955 *bp++ = c;
956 len--;
957 }
958 if (c || len <1) len = -1;
959 else {
960 *bp++ = ' ';
961 len--;
962 }
963 *bpp = bp;
964 *lp = len;
965}
966
967void qword_addhex(char **bpp, int *lp, char *buf, int blen)
968{
969 char *bp = *bpp;
970 int len = *lp;
971
972 if (len < 0) return;
973
974 if (len > 2) {
975 *bp++ = '\\';
976 *bp++ = 'x';
977 len -= 2;
978 while (blen && len >= 2) {
979 unsigned char c = *buf++;
980 *bp++ = '0' + ((c&0xf0)>>4) + (c>=0xa0)*('a'-'9'-1);
981 *bp++ = '0' + (c&0x0f) + ((c&0x0f)>=0x0a)*('a'-'9'-1);
982 len -= 2;
983 blen--;
984 }
985 }
986 if (blen || len<1) len = -1;
987 else {
988 *bp++ = ' ';
989 len--;
990 }
991 *bpp = bp;
992 *lp = len;
993}
994
995static void warn_no_listener(struct cache_detail *detail)
996{
997 if (detail->last_warn != detail->last_close) {
998 detail->last_warn = detail->last_close;
999 if (detail->warn_no_listener)
1000 detail->warn_no_listener(detail);
1001 }
1002}
1003
1004
1005
1006
1007
1008static int cache_make_upcall(struct cache_detail *detail, struct cache_head *h)
1009{
1010
1011 char *buf;
1012 struct cache_request *crq;
1013 char *bp;
1014 int len;
1015
1016 if (detail->cache_request == NULL)
1017 return -EINVAL;
1018
1019 if (atomic_read(&detail->readers) == 0 &&
1020 detail->last_close < get_seconds() - 30) {
1021 warn_no_listener(detail);
1022 return -EINVAL;
1023 }
1024
1025 buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
1026 if (!buf)
1027 return -EAGAIN;
1028
1029 crq = kmalloc(sizeof (*crq), GFP_KERNEL);
1030 if (!crq) {
1031 kfree(buf);
1032 return -EAGAIN;
1033 }
1034
1035 bp = buf; len = PAGE_SIZE;
1036
1037 detail->cache_request(detail, h, &bp, &len);
1038
1039 if (len < 0) {
1040 kfree(buf);
1041 kfree(crq);
1042 return -EAGAIN;
1043 }
1044 crq->q.reader = 0;
1045 crq->item = cache_get(h);
1046 crq->buf = buf;
1047 crq->len = PAGE_SIZE - len;
1048 crq->readers = 0;
1049 spin_lock(&queue_lock);
1050 list_add_tail(&crq->q.list, &detail->queue);
1051 spin_unlock(&queue_lock);
1052 wake_up(&queue_wait);
1053 return 0;
1054}
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068#define isodigit(c) (isdigit(c) && c <= '7')
1069int qword_get(char **bpp, char *dest, int bufsize)
1070{
1071
1072 char *bp = *bpp;
1073 int len = 0;
1074
1075 while (*bp == ' ') bp++;
1076
1077 if (bp[0] == '\\' && bp[1] == 'x') {
1078
1079 bp += 2;
1080 while (isxdigit(bp[0]) && isxdigit(bp[1]) && len < bufsize) {
1081 int byte = isdigit(*bp) ? *bp-'0' : toupper(*bp)-'A'+10;
1082 bp++;
1083 byte <<= 4;
1084 byte |= isdigit(*bp) ? *bp-'0' : toupper(*bp)-'A'+10;
1085 *dest++ = byte;
1086 bp++;
1087 len++;
1088 }
1089 } else {
1090
1091 while (*bp != ' ' && *bp != '\n' && *bp && len < bufsize-1) {
1092 if (*bp == '\\' &&
1093 isodigit(bp[1]) && (bp[1] <= '3') &&
1094 isodigit(bp[2]) &&
1095 isodigit(bp[3])) {
1096 int byte = (*++bp -'0');
1097 bp++;
1098 byte = (byte << 3) | (*bp++ - '0');
1099 byte = (byte << 3) | (*bp++ - '0');
1100 *dest++ = byte;
1101 len++;
1102 } else {
1103 *dest++ = *bp++;
1104 len++;
1105 }
1106 }
1107 }
1108
1109 if (*bp != ' ' && *bp != '\n' && *bp != '\0')
1110 return -1;
1111 while (*bp == ' ') bp++;
1112 *bpp = bp;
1113 *dest = '\0';
1114 return len;
1115}
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125struct handle {
1126 struct cache_detail *cd;
1127};
1128
1129static void *c_start(struct seq_file *m, loff_t *pos)
1130{
1131 loff_t n = *pos;
1132 unsigned hash, entry;
1133 struct cache_head *ch;
1134 struct cache_detail *cd = ((struct handle*)m->private)->cd;
1135
1136
1137 read_lock(&cd->hash_lock);
1138 if (!n--)
1139 return SEQ_START_TOKEN;
1140 hash = n >> 32;
1141 entry = n & ((1LL<<32) - 1);
1142
1143 for (ch=cd->hash_table[hash]; ch; ch=ch->next)
1144 if (!entry--)
1145 return ch;
1146 n &= ~((1LL<<32) - 1);
1147 do {
1148 hash++;
1149 n += 1LL<<32;
1150 } while(hash < cd->hash_size &&
1151 cd->hash_table[hash]==NULL);
1152 if (hash >= cd->hash_size)
1153 return NULL;
1154 *pos = n+1;
1155 return cd->hash_table[hash];
1156}
1157
1158static void *c_next(struct seq_file *m, void *p, loff_t *pos)
1159{
1160 struct cache_head *ch = p;
1161 int hash = (*pos >> 32);
1162 struct cache_detail *cd = ((struct handle*)m->private)->cd;
1163
1164 if (p == SEQ_START_TOKEN)
1165 hash = 0;
1166 else if (ch->next == NULL) {
1167 hash++;
1168 *pos += 1LL<<32;
1169 } else {
1170 ++*pos;
1171 return ch->next;
1172 }
1173 *pos &= ~((1LL<<32) - 1);
1174 while (hash < cd->hash_size &&
1175 cd->hash_table[hash] == NULL) {
1176 hash++;
1177 *pos += 1LL<<32;
1178 }
1179 if (hash >= cd->hash_size)
1180 return NULL;
1181 ++*pos;
1182 return cd->hash_table[hash];
1183}
1184
1185static void c_stop(struct seq_file *m, void *p)
1186{
1187 struct cache_detail *cd = ((struct handle*)m->private)->cd;
1188 read_unlock(&cd->hash_lock);
1189}
1190
1191static int c_show(struct seq_file *m, void *p)
1192{
1193 struct cache_head *cp = p;
1194 struct cache_detail *cd = ((struct handle*)m->private)->cd;
1195
1196 if (p == SEQ_START_TOKEN)
1197 return cd->cache_show(m, cd, NULL);
1198
1199 ifdebug(CACHE)
1200 seq_printf(m, "# expiry=%ld refcnt=%d flags=%lx\n",
1201 cp->expiry_time, atomic_read(&cp->ref.refcount), cp->flags);
1202 cache_get(cp);
1203 if (cache_check(cd, cp, NULL))
1204
1205 seq_printf(m, "# ");
1206 else
1207 cache_put(cp, cd);
1208
1209 return cd->cache_show(m, cd, cp);
1210}
1211
1212static const struct seq_operations cache_content_op = {
1213 .start = c_start,
1214 .next = c_next,
1215 .stop = c_stop,
1216 .show = c_show,
1217};
1218
1219static int content_open(struct inode *inode, struct file *file)
1220{
1221 struct handle *han;
1222 struct cache_detail *cd = PDE(inode)->data;
1223
1224 han = __seq_open_private(file, &cache_content_op, sizeof(*han));
1225 if (han == NULL)
1226 return -ENOMEM;
1227
1228 han->cd = cd;
1229 return 0;
1230}
1231
1232static const struct file_operations content_file_operations = {
1233 .open = content_open,
1234 .read = seq_read,
1235 .llseek = seq_lseek,
1236 .release = seq_release_private,
1237};
1238
1239static ssize_t read_flush(struct file *file, char __user *buf,
1240 size_t count, loff_t *ppos)
1241{
1242 struct cache_detail *cd = PDE(file->f_path.dentry->d_inode)->data;
1243 char tbuf[20];
1244 unsigned long p = *ppos;
1245 int len;
1246
1247 sprintf(tbuf, "%lu\n", cd->flush_time);
1248 len = strlen(tbuf);
1249 if (p >= len)
1250 return 0;
1251 len -= p;
1252 if (len > count) len = count;
1253 if (copy_to_user(buf, (void*)(tbuf+p), len))
1254 len = -EFAULT;
1255 else
1256 *ppos += len;
1257 return len;
1258}
1259
1260static ssize_t write_flush(struct file * file, const char __user * buf,
1261 size_t count, loff_t *ppos)
1262{
1263 struct cache_detail *cd = PDE(file->f_path.dentry->d_inode)->data;
1264 char tbuf[20];
1265 char *ep;
1266 long flushtime;
1267 if (*ppos || count > sizeof(tbuf)-1)
1268 return -EINVAL;
1269 if (copy_from_user(tbuf, buf, count))
1270 return -EFAULT;
1271 tbuf[count] = 0;
1272 flushtime = simple_strtoul(tbuf, &ep, 0);
1273 if (*ep && *ep != '\n')
1274 return -EINVAL;
1275
1276 cd->flush_time = flushtime;
1277 cd->nextcheck = get_seconds();
1278 cache_flush();
1279
1280 *ppos += count;
1281 return count;
1282}
1283
1284static const struct file_operations cache_flush_operations = {
1285 .open = nonseekable_open,
1286 .read = read_flush,
1287 .write = write_flush,
1288};
1289