1
2
3
4
5
6
7
8
9
10
11
12
13#include <linux/module.h>
14
15#include <linux/sched.h>
16#include <linux/interrupt.h>
17#include <linux/slab.h>
18#include <linux/mempool.h>
19#include <linux/smp.h>
20#include <linux/spinlock.h>
21#include <linux/mutex.h>
22#include <linux/freezer.h>
23#include <linux/sched/mm.h>
24
25#include <linux/sunrpc/clnt.h>
26
27#include "sunrpc.h"
28
29#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
30#define RPCDBG_FACILITY RPCDBG_SCHED
31#endif
32
33#define CREATE_TRACE_POINTS
34#include <trace/events/sunrpc.h>
35
36
37
38
39#define RPC_BUFFER_MAXSIZE (2048)
40#define RPC_BUFFER_POOLSIZE (8)
41#define RPC_TASK_POOLSIZE (8)
42static struct kmem_cache *rpc_task_slabp __read_mostly;
43static struct kmem_cache *rpc_buffer_slabp __read_mostly;
44static mempool_t *rpc_task_mempool __read_mostly;
45static mempool_t *rpc_buffer_mempool __read_mostly;
46
47static void rpc_async_schedule(struct work_struct *);
48static void rpc_release_task(struct rpc_task *task);
49static void __rpc_queue_timer_fn(struct timer_list *t);
50
51
52
53
54static struct rpc_wait_queue delay_queue;
55
56
57
58
59struct workqueue_struct *rpciod_workqueue __read_mostly;
60struct workqueue_struct *xprtiod_workqueue __read_mostly;
61
62unsigned long
63rpc_task_timeout(const struct rpc_task *task)
64{
65 unsigned long timeout = READ_ONCE(task->tk_timeout);
66
67 if (timeout != 0) {
68 unsigned long now = jiffies;
69 if (time_before(now, timeout))
70 return timeout - now;
71 }
72 return 0;
73}
74EXPORT_SYMBOL_GPL(rpc_task_timeout);
75
76
77
78
79
80
81static void
82__rpc_disable_timer(struct rpc_wait_queue *queue, struct rpc_task *task)
83{
84 if (list_empty(&task->u.tk_wait.timer_list))
85 return;
86 dprintk("RPC: %5u disabling timer\n", task->tk_pid);
87 task->tk_timeout = 0;
88 list_del(&task->u.tk_wait.timer_list);
89 if (list_empty(&queue->timer_list.list))
90 del_timer(&queue->timer_list.timer);
91}
92
93static void
94rpc_set_queue_timer(struct rpc_wait_queue *queue, unsigned long expires)
95{
96 timer_reduce(&queue->timer_list.timer, expires);
97}
98
99
100
101
102static void
103__rpc_add_timer(struct rpc_wait_queue *queue, struct rpc_task *task,
104 unsigned long timeout)
105{
106 dprintk("RPC: %5u setting alarm for %u ms\n",
107 task->tk_pid, jiffies_to_msecs(timeout - jiffies));
108
109 task->tk_timeout = timeout;
110 rpc_set_queue_timer(queue, timeout);
111 list_add(&task->u.tk_wait.timer_list, &queue->timer_list.list);
112}
113
114static void rpc_set_waitqueue_priority(struct rpc_wait_queue *queue, int priority)
115{
116 if (queue->priority != priority) {
117 queue->priority = priority;
118 queue->nr = 1U << priority;
119 }
120}
121
122static void rpc_reset_waitqueue_priority(struct rpc_wait_queue *queue)
123{
124 rpc_set_waitqueue_priority(queue, queue->maxpriority);
125}
126
127
128
129
130static void
131__rpc_list_enqueue_task(struct list_head *q, struct rpc_task *task)
132{
133 struct rpc_task *t;
134
135 list_for_each_entry(t, q, u.tk_wait.list) {
136 if (t->tk_owner == task->tk_owner) {
137 list_add_tail(&task->u.tk_wait.links,
138 &t->u.tk_wait.links);
139
140 task->u.tk_wait.list.next = q;
141 task->u.tk_wait.list.prev = NULL;
142 return;
143 }
144 }
145 INIT_LIST_HEAD(&task->u.tk_wait.links);
146 list_add_tail(&task->u.tk_wait.list, q);
147}
148
149
150
151
152static void
153__rpc_list_dequeue_task(struct rpc_task *task)
154{
155 struct list_head *q;
156 struct rpc_task *t;
157
158 if (task->u.tk_wait.list.prev == NULL) {
159 list_del(&task->u.tk_wait.links);
160 return;
161 }
162 if (!list_empty(&task->u.tk_wait.links)) {
163 t = list_first_entry(&task->u.tk_wait.links,
164 struct rpc_task,
165 u.tk_wait.links);
166
167 q = t->u.tk_wait.list.next;
168 list_add_tail(&t->u.tk_wait.list, q);
169 list_del(&task->u.tk_wait.links);
170 }
171 list_del(&task->u.tk_wait.list);
172}
173
174
175
176
177static void __rpc_add_wait_queue_priority(struct rpc_wait_queue *queue,
178 struct rpc_task *task,
179 unsigned char queue_priority)
180{
181 if (unlikely(queue_priority > queue->maxpriority))
182 queue_priority = queue->maxpriority;
183 __rpc_list_enqueue_task(&queue->tasks[queue_priority], task);
184}
185
186
187
188
189
190
191
192
193
194static void __rpc_add_wait_queue(struct rpc_wait_queue *queue,
195 struct rpc_task *task,
196 unsigned char queue_priority)
197{
198 WARN_ON_ONCE(RPC_IS_QUEUED(task));
199 if (RPC_IS_QUEUED(task))
200 return;
201
202 INIT_LIST_HEAD(&task->u.tk_wait.timer_list);
203 if (RPC_IS_PRIORITY(queue))
204 __rpc_add_wait_queue_priority(queue, task, queue_priority);
205 else if (RPC_IS_SWAPPER(task))
206 list_add(&task->u.tk_wait.list, &queue->tasks[0]);
207 else
208 list_add_tail(&task->u.tk_wait.list, &queue->tasks[0]);
209 task->tk_waitqueue = queue;
210 queue->qlen++;
211
212 smp_wmb();
213 rpc_set_queued(task);
214
215 dprintk("RPC: %5u added to queue %p \"%s\"\n",
216 task->tk_pid, queue, rpc_qname(queue));
217}
218
219
220
221
222static void __rpc_remove_wait_queue_priority(struct rpc_task *task)
223{
224 __rpc_list_dequeue_task(task);
225}
226
227
228
229
230
231static void __rpc_remove_wait_queue(struct rpc_wait_queue *queue, struct rpc_task *task)
232{
233 __rpc_disable_timer(queue, task);
234 if (RPC_IS_PRIORITY(queue))
235 __rpc_remove_wait_queue_priority(task);
236 else
237 list_del(&task->u.tk_wait.list);
238 queue->qlen--;
239 dprintk("RPC: %5u removed from queue %p \"%s\"\n",
240 task->tk_pid, queue, rpc_qname(queue));
241}
242
243static void __rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname, unsigned char nr_queues)
244{
245 int i;
246
247 spin_lock_init(&queue->lock);
248 for (i = 0; i < ARRAY_SIZE(queue->tasks); i++)
249 INIT_LIST_HEAD(&queue->tasks[i]);
250 queue->maxpriority = nr_queues - 1;
251 rpc_reset_waitqueue_priority(queue);
252 queue->qlen = 0;
253 timer_setup(&queue->timer_list.timer, __rpc_queue_timer_fn, 0);
254 INIT_LIST_HEAD(&queue->timer_list.list);
255 rpc_assign_waitqueue_name(queue, qname);
256}
257
258void rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname)
259{
260 __rpc_init_priority_wait_queue(queue, qname, RPC_NR_PRIORITY);
261}
262EXPORT_SYMBOL_GPL(rpc_init_priority_wait_queue);
263
264void rpc_init_wait_queue(struct rpc_wait_queue *queue, const char *qname)
265{
266 __rpc_init_priority_wait_queue(queue, qname, 1);
267}
268EXPORT_SYMBOL_GPL(rpc_init_wait_queue);
269
270void rpc_destroy_wait_queue(struct rpc_wait_queue *queue)
271{
272 del_timer_sync(&queue->timer_list.timer);
273}
274EXPORT_SYMBOL_GPL(rpc_destroy_wait_queue);
275
276static int rpc_wait_bit_killable(struct wait_bit_key *key, int mode)
277{
278 freezable_schedule_unsafe();
279 if (signal_pending_state(mode, current))
280 return -ERESTARTSYS;
281 return 0;
282}
283
284#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) || IS_ENABLED(CONFIG_TRACEPOINTS)
285static void rpc_task_set_debuginfo(struct rpc_task *task)
286{
287 static atomic_t rpc_pid;
288
289 task->tk_pid = atomic_inc_return(&rpc_pid);
290}
291#else
292static inline void rpc_task_set_debuginfo(struct rpc_task *task)
293{
294}
295#endif
296
297static void rpc_set_active(struct rpc_task *task)
298{
299 rpc_task_set_debuginfo(task);
300 set_bit(RPC_TASK_ACTIVE, &task->tk_runstate);
301 trace_rpc_task_begin(task, NULL);
302}
303
304
305
306
307
308static int rpc_complete_task(struct rpc_task *task)
309{
310 void *m = &task->tk_runstate;
311 wait_queue_head_t *wq = bit_waitqueue(m, RPC_TASK_ACTIVE);
312 struct wait_bit_key k = __WAIT_BIT_KEY_INITIALIZER(m, RPC_TASK_ACTIVE);
313 unsigned long flags;
314 int ret;
315
316 trace_rpc_task_complete(task, NULL);
317
318 spin_lock_irqsave(&wq->lock, flags);
319 clear_bit(RPC_TASK_ACTIVE, &task->tk_runstate);
320 ret = atomic_dec_and_test(&task->tk_count);
321 if (waitqueue_active(wq))
322 __wake_up_locked_key(wq, TASK_NORMAL, &k);
323 spin_unlock_irqrestore(&wq->lock, flags);
324 return ret;
325}
326
327
328
329
330
331
332
333
334int __rpc_wait_for_completion_task(struct rpc_task *task, wait_bit_action_f *action)
335{
336 if (action == NULL)
337 action = rpc_wait_bit_killable;
338 return out_of_line_wait_on_bit(&task->tk_runstate, RPC_TASK_ACTIVE,
339 action, TASK_KILLABLE);
340}
341EXPORT_SYMBOL_GPL(__rpc_wait_for_completion_task);
342
343
344
345
346
347
348
349
350
351
352
353
354static void rpc_make_runnable(struct workqueue_struct *wq,
355 struct rpc_task *task)
356{
357 bool need_wakeup = !rpc_test_and_set_running(task);
358
359 rpc_clear_queued(task);
360 if (!need_wakeup)
361 return;
362 if (RPC_IS_ASYNC(task)) {
363 INIT_WORK(&task->u.tk_work, rpc_async_schedule);
364 queue_work(wq, &task->u.tk_work);
365 } else
366 wake_up_bit(&task->tk_runstate, RPC_TASK_QUEUED);
367}
368
369
370
371
372
373
374
375static void __rpc_sleep_on_priority(struct rpc_wait_queue *q,
376 struct rpc_task *task,
377 unsigned char queue_priority)
378{
379 dprintk("RPC: %5u sleep_on(queue \"%s\" time %lu)\n",
380 task->tk_pid, rpc_qname(q), jiffies);
381
382 trace_rpc_task_sleep(task, q);
383
384 __rpc_add_wait_queue(q, task, queue_priority);
385
386}
387
388static void __rpc_sleep_on_priority_timeout(struct rpc_wait_queue *q,
389 struct rpc_task *task, unsigned long timeout,
390 unsigned char queue_priority)
391{
392 if (time_is_after_jiffies(timeout)) {
393 __rpc_sleep_on_priority(q, task, queue_priority);
394 __rpc_add_timer(q, task, timeout);
395 } else
396 task->tk_status = -ETIMEDOUT;
397}
398
399static void rpc_set_tk_callback(struct rpc_task *task, rpc_action action)
400{
401 if (action && !WARN_ON_ONCE(task->tk_callback != NULL))
402 task->tk_callback = action;
403}
404
405static bool rpc_sleep_check_activated(struct rpc_task *task)
406{
407
408 if (WARN_ON_ONCE(!RPC_IS_ACTIVATED(task))) {
409 task->tk_status = -EIO;
410 rpc_put_task_async(task);
411 return false;
412 }
413 return true;
414}
415
416void rpc_sleep_on_timeout(struct rpc_wait_queue *q, struct rpc_task *task,
417 rpc_action action, unsigned long timeout)
418{
419 if (!rpc_sleep_check_activated(task))
420 return;
421
422 rpc_set_tk_callback(task, action);
423
424
425
426
427 spin_lock_bh(&q->lock);
428 __rpc_sleep_on_priority_timeout(q, task, timeout, task->tk_priority);
429 spin_unlock_bh(&q->lock);
430}
431EXPORT_SYMBOL_GPL(rpc_sleep_on_timeout);
432
433void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
434 rpc_action action)
435{
436 if (!rpc_sleep_check_activated(task))
437 return;
438
439 rpc_set_tk_callback(task, action);
440
441 WARN_ON_ONCE(task->tk_timeout != 0);
442
443
444
445 spin_lock_bh(&q->lock);
446 __rpc_sleep_on_priority(q, task, task->tk_priority);
447 spin_unlock_bh(&q->lock);
448}
449EXPORT_SYMBOL_GPL(rpc_sleep_on);
450
451void rpc_sleep_on_priority_timeout(struct rpc_wait_queue *q,
452 struct rpc_task *task, unsigned long timeout, int priority)
453{
454 if (!rpc_sleep_check_activated(task))
455 return;
456
457 priority -= RPC_PRIORITY_LOW;
458
459
460
461 spin_lock_bh(&q->lock);
462 __rpc_sleep_on_priority_timeout(q, task, timeout, priority);
463 spin_unlock_bh(&q->lock);
464}
465EXPORT_SYMBOL_GPL(rpc_sleep_on_priority_timeout);
466
467void rpc_sleep_on_priority(struct rpc_wait_queue *q, struct rpc_task *task,
468 int priority)
469{
470 if (!rpc_sleep_check_activated(task))
471 return;
472
473 WARN_ON_ONCE(task->tk_timeout != 0);
474 priority -= RPC_PRIORITY_LOW;
475
476
477
478 spin_lock_bh(&q->lock);
479 __rpc_sleep_on_priority(q, task, priority);
480 spin_unlock_bh(&q->lock);
481}
482EXPORT_SYMBOL_GPL(rpc_sleep_on_priority);
483
484
485
486
487
488
489
490
491
492static void __rpc_do_wake_up_task_on_wq(struct workqueue_struct *wq,
493 struct rpc_wait_queue *queue,
494 struct rpc_task *task)
495{
496 dprintk("RPC: %5u __rpc_wake_up_task (now %lu)\n",
497 task->tk_pid, jiffies);
498
499
500 if (!RPC_IS_ACTIVATED(task)) {
501 printk(KERN_ERR "RPC: Inactive task (%p) being woken up!\n", task);
502 return;
503 }
504
505 trace_rpc_task_wakeup(task, queue);
506
507 __rpc_remove_wait_queue(queue, task);
508
509 rpc_make_runnable(wq, task);
510
511 dprintk("RPC: __rpc_wake_up_task done\n");
512}
513
514
515
516
517static struct rpc_task *
518rpc_wake_up_task_on_wq_queue_action_locked(struct workqueue_struct *wq,
519 struct rpc_wait_queue *queue, struct rpc_task *task,
520 bool (*action)(struct rpc_task *, void *), void *data)
521{
522 if (RPC_IS_QUEUED(task)) {
523 smp_rmb();
524 if (task->tk_waitqueue == queue) {
525 if (action == NULL || action(task, data)) {
526 __rpc_do_wake_up_task_on_wq(wq, queue, task);
527 return task;
528 }
529 }
530 }
531 return NULL;
532}
533
534static void
535rpc_wake_up_task_on_wq_queue_locked(struct workqueue_struct *wq,
536 struct rpc_wait_queue *queue, struct rpc_task *task)
537{
538 rpc_wake_up_task_on_wq_queue_action_locked(wq, queue, task, NULL, NULL);
539}
540
541
542
543
544static void rpc_wake_up_task_queue_locked(struct rpc_wait_queue *queue, struct rpc_task *task)
545{
546 rpc_wake_up_task_on_wq_queue_locked(rpciod_workqueue, queue, task);
547}
548
549
550
551
552void rpc_wake_up_queued_task_on_wq(struct workqueue_struct *wq,
553 struct rpc_wait_queue *queue,
554 struct rpc_task *task)
555{
556 if (!RPC_IS_QUEUED(task))
557 return;
558 spin_lock_bh(&queue->lock);
559 rpc_wake_up_task_on_wq_queue_locked(wq, queue, task);
560 spin_unlock_bh(&queue->lock);
561}
562
563
564
565
566void rpc_wake_up_queued_task(struct rpc_wait_queue *queue, struct rpc_task *task)
567{
568 if (!RPC_IS_QUEUED(task))
569 return;
570 spin_lock_bh(&queue->lock);
571 rpc_wake_up_task_queue_locked(queue, task);
572 spin_unlock_bh(&queue->lock);
573}
574EXPORT_SYMBOL_GPL(rpc_wake_up_queued_task);
575
576static bool rpc_task_action_set_status(struct rpc_task *task, void *status)
577{
578 task->tk_status = *(int *)status;
579 return true;
580}
581
582static void
583rpc_wake_up_task_queue_set_status_locked(struct rpc_wait_queue *queue,
584 struct rpc_task *task, int status)
585{
586 rpc_wake_up_task_on_wq_queue_action_locked(rpciod_workqueue, queue,
587 task, rpc_task_action_set_status, &status);
588}
589
590
591
592
593
594
595
596
597
598
599void
600rpc_wake_up_queued_task_set_status(struct rpc_wait_queue *queue,
601 struct rpc_task *task, int status)
602{
603 if (!RPC_IS_QUEUED(task))
604 return;
605 spin_lock_bh(&queue->lock);
606 rpc_wake_up_task_queue_set_status_locked(queue, task, status);
607 spin_unlock_bh(&queue->lock);
608}
609
610
611
612
613static struct rpc_task *__rpc_find_next_queued_priority(struct rpc_wait_queue *queue)
614{
615 struct list_head *q;
616 struct rpc_task *task;
617
618
619
620
621 q = &queue->tasks[queue->priority];
622 if (!list_empty(q) && --queue->nr) {
623 task = list_first_entry(q, struct rpc_task, u.tk_wait.list);
624 goto out;
625 }
626
627
628
629
630 do {
631 if (q == &queue->tasks[0])
632 q = &queue->tasks[queue->maxpriority];
633 else
634 q = q - 1;
635 if (!list_empty(q)) {
636 task = list_first_entry(q, struct rpc_task, u.tk_wait.list);
637 goto new_queue;
638 }
639 } while (q != &queue->tasks[queue->priority]);
640
641 rpc_reset_waitqueue_priority(queue);
642 return NULL;
643
644new_queue:
645 rpc_set_waitqueue_priority(queue, (unsigned int)(q - &queue->tasks[0]));
646out:
647 return task;
648}
649
650static struct rpc_task *__rpc_find_next_queued(struct rpc_wait_queue *queue)
651{
652 if (RPC_IS_PRIORITY(queue))
653 return __rpc_find_next_queued_priority(queue);
654 if (!list_empty(&queue->tasks[0]))
655 return list_first_entry(&queue->tasks[0], struct rpc_task, u.tk_wait.list);
656 return NULL;
657}
658
659
660
661
662struct rpc_task *rpc_wake_up_first_on_wq(struct workqueue_struct *wq,
663 struct rpc_wait_queue *queue,
664 bool (*func)(struct rpc_task *, void *), void *data)
665{
666 struct rpc_task *task = NULL;
667
668 dprintk("RPC: wake_up_first(%p \"%s\")\n",
669 queue, rpc_qname(queue));
670 spin_lock_bh(&queue->lock);
671 task = __rpc_find_next_queued(queue);
672 if (task != NULL)
673 task = rpc_wake_up_task_on_wq_queue_action_locked(wq, queue,
674 task, func, data);
675 spin_unlock_bh(&queue->lock);
676
677 return task;
678}
679
680
681
682
683struct rpc_task *rpc_wake_up_first(struct rpc_wait_queue *queue,
684 bool (*func)(struct rpc_task *, void *), void *data)
685{
686 return rpc_wake_up_first_on_wq(rpciod_workqueue, queue, func, data);
687}
688EXPORT_SYMBOL_GPL(rpc_wake_up_first);
689
690static bool rpc_wake_up_next_func(struct rpc_task *task, void *data)
691{
692 return true;
693}
694
695
696
697
698struct rpc_task *rpc_wake_up_next(struct rpc_wait_queue *queue)
699{
700 return rpc_wake_up_first(queue, rpc_wake_up_next_func, NULL);
701}
702EXPORT_SYMBOL_GPL(rpc_wake_up_next);
703
704
705
706
707
708
709
710void rpc_wake_up(struct rpc_wait_queue *queue)
711{
712 struct list_head *head;
713
714 spin_lock_bh(&queue->lock);
715 head = &queue->tasks[queue->maxpriority];
716 for (;;) {
717 while (!list_empty(head)) {
718 struct rpc_task *task;
719 task = list_first_entry(head,
720 struct rpc_task,
721 u.tk_wait.list);
722 rpc_wake_up_task_queue_locked(queue, task);
723 }
724 if (head == &queue->tasks[0])
725 break;
726 head--;
727 }
728 spin_unlock_bh(&queue->lock);
729}
730EXPORT_SYMBOL_GPL(rpc_wake_up);
731
732
733
734
735
736
737
738
739void rpc_wake_up_status(struct rpc_wait_queue *queue, int status)
740{
741 struct list_head *head;
742
743 spin_lock_bh(&queue->lock);
744 head = &queue->tasks[queue->maxpriority];
745 for (;;) {
746 while (!list_empty(head)) {
747 struct rpc_task *task;
748 task = list_first_entry(head,
749 struct rpc_task,
750 u.tk_wait.list);
751 task->tk_status = status;
752 rpc_wake_up_task_queue_locked(queue, task);
753 }
754 if (head == &queue->tasks[0])
755 break;
756 head--;
757 }
758 spin_unlock_bh(&queue->lock);
759}
760EXPORT_SYMBOL_GPL(rpc_wake_up_status);
761
762static void __rpc_queue_timer_fn(struct timer_list *t)
763{
764 struct rpc_wait_queue *queue = from_timer(queue, t, timer_list.timer);
765 struct rpc_task *task, *n;
766 unsigned long expires, now, timeo;
767
768 spin_lock(&queue->lock);
769 expires = now = jiffies;
770 list_for_each_entry_safe(task, n, &queue->timer_list.list, u.tk_wait.timer_list) {
771 timeo = task->tk_timeout;
772 if (time_after_eq(now, timeo)) {
773 dprintk("RPC: %5u timeout\n", task->tk_pid);
774 task->tk_status = -ETIMEDOUT;
775 rpc_wake_up_task_queue_locked(queue, task);
776 continue;
777 }
778 if (expires == now || time_after(expires, timeo))
779 expires = timeo;
780 }
781 if (!list_empty(&queue->timer_list.list))
782 rpc_set_queue_timer(queue, expires);
783 spin_unlock(&queue->lock);
784}
785
786static void __rpc_atrun(struct rpc_task *task)
787{
788 if (task->tk_status == -ETIMEDOUT)
789 task->tk_status = 0;
790}
791
792
793
794
795void rpc_delay(struct rpc_task *task, unsigned long delay)
796{
797 rpc_sleep_on_timeout(&delay_queue, task, __rpc_atrun, jiffies + delay);
798}
799EXPORT_SYMBOL_GPL(rpc_delay);
800
801
802
803
804void rpc_prepare_task(struct rpc_task *task)
805{
806 task->tk_ops->rpc_call_prepare(task, task->tk_calldata);
807}
808
809static void
810rpc_init_task_statistics(struct rpc_task *task)
811{
812
813 task->tk_garb_retry = 2;
814 task->tk_cred_retry = 2;
815 task->tk_rebind_retry = 2;
816
817
818 task->tk_start = ktime_get();
819}
820
821static void
822rpc_reset_task_statistics(struct rpc_task *task)
823{
824 task->tk_timeouts = 0;
825 task->tk_flags &= ~(RPC_CALL_MAJORSEEN|RPC_TASK_SENT);
826 rpc_init_task_statistics(task);
827}
828
829
830
831
832void rpc_exit_task(struct rpc_task *task)
833{
834 task->tk_action = NULL;
835 if (task->tk_ops->rpc_call_done != NULL) {
836 task->tk_ops->rpc_call_done(task, task->tk_calldata);
837 if (task->tk_action != NULL) {
838
839 xprt_release(task);
840 rpc_reset_task_statistics(task);
841 }
842 }
843}
844
845void rpc_signal_task(struct rpc_task *task)
846{
847 struct rpc_wait_queue *queue;
848
849 if (!RPC_IS_ACTIVATED(task))
850 return;
851 set_bit(RPC_TASK_SIGNALLED, &task->tk_runstate);
852 smp_mb__after_atomic();
853 queue = READ_ONCE(task->tk_waitqueue);
854 if (queue)
855 rpc_wake_up_queued_task_set_status(queue, task, -ERESTARTSYS);
856}
857
858void rpc_exit(struct rpc_task *task, int status)
859{
860 task->tk_status = status;
861 task->tk_action = rpc_exit_task;
862 rpc_wake_up_queued_task(task->tk_waitqueue, task);
863}
864EXPORT_SYMBOL_GPL(rpc_exit);
865
866void rpc_release_calldata(const struct rpc_call_ops *ops, void *calldata)
867{
868 if (ops->rpc_release != NULL)
869 ops->rpc_release(calldata);
870}
871
872
873
874
875static void __rpc_execute(struct rpc_task *task)
876{
877 struct rpc_wait_queue *queue;
878 int task_is_async = RPC_IS_ASYNC(task);
879 int status = 0;
880
881 dprintk("RPC: %5u __rpc_execute flags=0x%x\n",
882 task->tk_pid, task->tk_flags);
883
884 WARN_ON_ONCE(RPC_IS_QUEUED(task));
885 if (RPC_IS_QUEUED(task))
886 return;
887
888 for (;;) {
889 void (*do_action)(struct rpc_task *);
890
891
892
893
894
895
896
897
898 do_action = task->tk_action;
899 if (task->tk_callback) {
900 do_action = task->tk_callback;
901 task->tk_callback = NULL;
902 }
903 if (!do_action)
904 break;
905 trace_rpc_task_run_action(task, do_action);
906 do_action(task);
907
908
909
910
911 if (!RPC_IS_QUEUED(task))
912 continue;
913
914
915
916
917 if (RPC_SIGNALLED(task))
918 rpc_exit(task, -ERESTARTSYS);
919
920
921
922
923
924
925
926
927
928
929 queue = task->tk_waitqueue;
930 spin_lock_bh(&queue->lock);
931 if (!RPC_IS_QUEUED(task)) {
932 spin_unlock_bh(&queue->lock);
933 continue;
934 }
935 rpc_clear_running(task);
936 spin_unlock_bh(&queue->lock);
937 if (task_is_async)
938 return;
939
940
941 dprintk("RPC: %5u sync task going to sleep\n", task->tk_pid);
942 status = out_of_line_wait_on_bit(&task->tk_runstate,
943 RPC_TASK_QUEUED, rpc_wait_bit_killable,
944 TASK_KILLABLE);
945 if (status < 0) {
946
947
948
949
950
951
952 dprintk("RPC: %5u got signal\n", task->tk_pid);
953 set_bit(RPC_TASK_SIGNALLED, &task->tk_runstate);
954 rpc_exit(task, -ERESTARTSYS);
955 }
956 dprintk("RPC: %5u sync task resuming\n", task->tk_pid);
957 }
958
959 dprintk("RPC: %5u return %d, status %d\n", task->tk_pid, status,
960 task->tk_status);
961
962 rpc_release_task(task);
963}
964
965
966
967
968
969
970
971
972
973
974void rpc_execute(struct rpc_task *task)
975{
976 bool is_async = RPC_IS_ASYNC(task);
977
978 rpc_set_active(task);
979 rpc_make_runnable(rpciod_workqueue, task);
980 if (!is_async)
981 __rpc_execute(task);
982}
983
984static void rpc_async_schedule(struct work_struct *work)
985{
986 unsigned int pflags = memalloc_nofs_save();
987
988 __rpc_execute(container_of(work, struct rpc_task, u.tk_work));
989 memalloc_nofs_restore(pflags);
990}
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009int rpc_malloc(struct rpc_task *task)
1010{
1011 struct rpc_rqst *rqst = task->tk_rqstp;
1012 size_t size = rqst->rq_callsize + rqst->rq_rcvsize;
1013 struct rpc_buffer *buf;
1014 gfp_t gfp = GFP_NOFS;
1015
1016 if (RPC_IS_SWAPPER(task))
1017 gfp = __GFP_MEMALLOC | GFP_NOWAIT | __GFP_NOWARN;
1018
1019 size += sizeof(struct rpc_buffer);
1020 if (size <= RPC_BUFFER_MAXSIZE)
1021 buf = mempool_alloc(rpc_buffer_mempool, gfp);
1022 else
1023 buf = kmalloc(size, gfp);
1024
1025 if (!buf)
1026 return -ENOMEM;
1027
1028 buf->len = size;
1029 dprintk("RPC: %5u allocated buffer of size %zu at %p\n",
1030 task->tk_pid, size, buf);
1031 rqst->rq_buffer = buf->data;
1032 rqst->rq_rbuffer = (char *)rqst->rq_buffer + rqst->rq_callsize;
1033 return 0;
1034}
1035EXPORT_SYMBOL_GPL(rpc_malloc);
1036
1037
1038
1039
1040
1041
1042void rpc_free(struct rpc_task *task)
1043{
1044 void *buffer = task->tk_rqstp->rq_buffer;
1045 size_t size;
1046 struct rpc_buffer *buf;
1047
1048 buf = container_of(buffer, struct rpc_buffer, data);
1049 size = buf->len;
1050
1051 dprintk("RPC: freeing buffer of size %zu at %p\n",
1052 size, buf);
1053
1054 if (size <= RPC_BUFFER_MAXSIZE)
1055 mempool_free(buf, rpc_buffer_mempool);
1056 else
1057 kfree(buf);
1058}
1059EXPORT_SYMBOL_GPL(rpc_free);
1060
1061
1062
1063
1064static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *task_setup_data)
1065{
1066 memset(task, 0, sizeof(*task));
1067 atomic_set(&task->tk_count, 1);
1068 task->tk_flags = task_setup_data->flags;
1069 task->tk_ops = task_setup_data->callback_ops;
1070 task->tk_calldata = task_setup_data->callback_data;
1071 INIT_LIST_HEAD(&task->tk_task);
1072
1073 task->tk_priority = task_setup_data->priority - RPC_PRIORITY_LOW;
1074 task->tk_owner = current->tgid;
1075
1076
1077 task->tk_workqueue = task_setup_data->workqueue;
1078
1079 task->tk_xprt = xprt_get(task_setup_data->rpc_xprt);
1080
1081 task->tk_op_cred = get_rpccred(task_setup_data->rpc_op_cred);
1082
1083 if (task->tk_ops->rpc_call_prepare != NULL)
1084 task->tk_action = rpc_prepare_task;
1085
1086 rpc_init_task_statistics(task);
1087
1088 dprintk("RPC: new task initialized, procpid %u\n",
1089 task_pid_nr(current));
1090}
1091
1092static struct rpc_task *
1093rpc_alloc_task(void)
1094{
1095 return (struct rpc_task *)mempool_alloc(rpc_task_mempool, GFP_NOFS);
1096}
1097
1098
1099
1100
1101struct rpc_task *rpc_new_task(const struct rpc_task_setup *setup_data)
1102{
1103 struct rpc_task *task = setup_data->task;
1104 unsigned short flags = 0;
1105
1106 if (task == NULL) {
1107 task = rpc_alloc_task();
1108 flags = RPC_TASK_DYNAMIC;
1109 }
1110
1111 rpc_init_task(task, setup_data);
1112 task->tk_flags |= flags;
1113 dprintk("RPC: allocated task %p\n", task);
1114 return task;
1115}
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136static void rpc_free_task(struct rpc_task *task)
1137{
1138 unsigned short tk_flags = task->tk_flags;
1139
1140 put_rpccred(task->tk_op_cred);
1141 rpc_release_calldata(task->tk_ops, task->tk_calldata);
1142
1143 if (tk_flags & RPC_TASK_DYNAMIC) {
1144 dprintk("RPC: %5u freeing task\n", task->tk_pid);
1145 mempool_free(task, rpc_task_mempool);
1146 }
1147}
1148
1149static void rpc_async_release(struct work_struct *work)
1150{
1151 unsigned int pflags = memalloc_nofs_save();
1152
1153 rpc_free_task(container_of(work, struct rpc_task, u.tk_work));
1154 memalloc_nofs_restore(pflags);
1155}
1156
1157static void rpc_release_resources_task(struct rpc_task *task)
1158{
1159 xprt_release(task);
1160 if (task->tk_msg.rpc_cred) {
1161 put_cred(task->tk_msg.rpc_cred);
1162 task->tk_msg.rpc_cred = NULL;
1163 }
1164 rpc_task_release_client(task);
1165}
1166
1167static void rpc_final_put_task(struct rpc_task *task,
1168 struct workqueue_struct *q)
1169{
1170 if (q != NULL) {
1171 INIT_WORK(&task->u.tk_work, rpc_async_release);
1172 queue_work(q, &task->u.tk_work);
1173 } else
1174 rpc_free_task(task);
1175}
1176
1177static void rpc_do_put_task(struct rpc_task *task, struct workqueue_struct *q)
1178{
1179 if (atomic_dec_and_test(&task->tk_count)) {
1180 rpc_release_resources_task(task);
1181 rpc_final_put_task(task, q);
1182 }
1183}
1184
1185void rpc_put_task(struct rpc_task *task)
1186{
1187 rpc_do_put_task(task, NULL);
1188}
1189EXPORT_SYMBOL_GPL(rpc_put_task);
1190
1191void rpc_put_task_async(struct rpc_task *task)
1192{
1193 rpc_do_put_task(task, task->tk_workqueue);
1194}
1195EXPORT_SYMBOL_GPL(rpc_put_task_async);
1196
1197static void rpc_release_task(struct rpc_task *task)
1198{
1199 dprintk("RPC: %5u release task\n", task->tk_pid);
1200
1201 WARN_ON_ONCE(RPC_IS_QUEUED(task));
1202
1203 rpc_release_resources_task(task);
1204
1205
1206
1207
1208
1209
1210 if (atomic_read(&task->tk_count) != 1 + !RPC_IS_ASYNC(task)) {
1211
1212 if (!rpc_complete_task(task))
1213 return;
1214 } else {
1215 if (!atomic_dec_and_test(&task->tk_count))
1216 return;
1217 }
1218 rpc_final_put_task(task, task->tk_workqueue);
1219}
1220
1221int rpciod_up(void)
1222{
1223 return try_module_get(THIS_MODULE) ? 0 : -EINVAL;
1224}
1225
1226void rpciod_down(void)
1227{
1228 module_put(THIS_MODULE);
1229}
1230
1231
1232
1233
1234static int rpciod_start(void)
1235{
1236 struct workqueue_struct *wq;
1237
1238
1239
1240
1241 dprintk("RPC: creating workqueue rpciod\n");
1242 wq = alloc_workqueue("rpciod", WQ_MEM_RECLAIM | WQ_UNBOUND, 0);
1243 if (!wq)
1244 goto out_failed;
1245 rpciod_workqueue = wq;
1246
1247 wq = alloc_workqueue("xprtiod", WQ_UNBOUND|WQ_MEM_RECLAIM|WQ_HIGHPRI, 0);
1248 if (!wq)
1249 goto free_rpciod;
1250 xprtiod_workqueue = wq;
1251 return 1;
1252free_rpciod:
1253 wq = rpciod_workqueue;
1254 rpciod_workqueue = NULL;
1255 destroy_workqueue(wq);
1256out_failed:
1257 return 0;
1258}
1259
1260static void rpciod_stop(void)
1261{
1262 struct workqueue_struct *wq = NULL;
1263
1264 if (rpciod_workqueue == NULL)
1265 return;
1266 dprintk("RPC: destroying workqueue rpciod\n");
1267
1268 wq = rpciod_workqueue;
1269 rpciod_workqueue = NULL;
1270 destroy_workqueue(wq);
1271 wq = xprtiod_workqueue;
1272 xprtiod_workqueue = NULL;
1273 destroy_workqueue(wq);
1274}
1275
1276void
1277rpc_destroy_mempool(void)
1278{
1279 rpciod_stop();
1280 mempool_destroy(rpc_buffer_mempool);
1281 mempool_destroy(rpc_task_mempool);
1282 kmem_cache_destroy(rpc_task_slabp);
1283 kmem_cache_destroy(rpc_buffer_slabp);
1284 rpc_destroy_wait_queue(&delay_queue);
1285}
1286
1287int
1288rpc_init_mempool(void)
1289{
1290
1291
1292
1293
1294 rpc_init_wait_queue(&delay_queue, "delayq");
1295 if (!rpciod_start())
1296 goto err_nomem;
1297
1298 rpc_task_slabp = kmem_cache_create("rpc_tasks",
1299 sizeof(struct rpc_task),
1300 0, SLAB_HWCACHE_ALIGN,
1301 NULL);
1302 if (!rpc_task_slabp)
1303 goto err_nomem;
1304 rpc_buffer_slabp = kmem_cache_create("rpc_buffers",
1305 RPC_BUFFER_MAXSIZE,
1306 0, SLAB_HWCACHE_ALIGN,
1307 NULL);
1308 if (!rpc_buffer_slabp)
1309 goto err_nomem;
1310 rpc_task_mempool = mempool_create_slab_pool(RPC_TASK_POOLSIZE,
1311 rpc_task_slabp);
1312 if (!rpc_task_mempool)
1313 goto err_nomem;
1314 rpc_buffer_mempool = mempool_create_slab_pool(RPC_BUFFER_POOLSIZE,
1315 rpc_buffer_slabp);
1316 if (!rpc_buffer_mempool)
1317 goto err_nomem;
1318 return 0;
1319err_nomem:
1320 rpc_destroy_mempool();
1321 return -ENOMEM;
1322}
1323