1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40#include <linux/module.h>
41
42#include <linux/types.h>
43#include <linux/interrupt.h>
44#include <linux/workqueue.h>
45#include <linux/net.h>
46#include <linux/ktime.h>
47
48#include <linux/sunrpc/clnt.h>
49#include <linux/sunrpc/metrics.h>
50#include <linux/sunrpc/bc_xprt.h>
51
52#include <trace/events/sunrpc.h>
53
54#include "sunrpc.h"
55
56
57
58
59
60#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
61# define RPCDBG_FACILITY RPCDBG_XPRT
62#endif
63
64
65
66
67static void xprt_init(struct rpc_xprt *xprt, struct net *net);
68static void xprt_request_init(struct rpc_task *, struct rpc_xprt *);
69static void xprt_connect_status(struct rpc_task *task);
70static int __xprt_get_cong(struct rpc_xprt *, struct rpc_task *);
71static void xprt_destroy(struct rpc_xprt *xprt);
72
73static DEFINE_SPINLOCK(xprt_list_lock);
74static LIST_HEAD(xprt_list);
75
76
77
78
79
80
81
82
83
84
85
86
87
88int xprt_register_transport(struct xprt_class *transport)
89{
90 struct xprt_class *t;
91 int result;
92
93 result = -EEXIST;
94 spin_lock(&xprt_list_lock);
95 list_for_each_entry(t, &xprt_list, list) {
96
97 if (t->ident == transport->ident)
98 goto out;
99 }
100
101 list_add_tail(&transport->list, &xprt_list);
102 printk(KERN_INFO "RPC: Registered %s transport module.\n",
103 transport->name);
104 result = 0;
105
106out:
107 spin_unlock(&xprt_list_lock);
108 return result;
109}
110EXPORT_SYMBOL_GPL(xprt_register_transport);
111
112
113
114
115
116
117
118
119
120int xprt_unregister_transport(struct xprt_class *transport)
121{
122 struct xprt_class *t;
123 int result;
124
125 result = 0;
126 spin_lock(&xprt_list_lock);
127 list_for_each_entry(t, &xprt_list, list) {
128 if (t == transport) {
129 printk(KERN_INFO
130 "RPC: Unregistered %s transport module.\n",
131 transport->name);
132 list_del_init(&transport->list);
133 goto out;
134 }
135 }
136 result = -ENOENT;
137
138out:
139 spin_unlock(&xprt_list_lock);
140 return result;
141}
142EXPORT_SYMBOL_GPL(xprt_unregister_transport);
143
144
145
146
147
148
149
150
151
152int xprt_load_transport(const char *transport_name)
153{
154 struct xprt_class *t;
155 int result;
156
157 result = 0;
158 spin_lock(&xprt_list_lock);
159 list_for_each_entry(t, &xprt_list, list) {
160 if (strcmp(t->name, transport_name) == 0) {
161 spin_unlock(&xprt_list_lock);
162 goto out;
163 }
164 }
165 spin_unlock(&xprt_list_lock);
166 result = request_module("xprt%s", transport_name);
167out:
168 return result;
169}
170EXPORT_SYMBOL_GPL(xprt_load_transport);
171
172
173
174
175
176
177
178
179
180
181int xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
182{
183 struct rpc_rqst *req = task->tk_rqstp;
184 int priority;
185
186 if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
187 if (task == xprt->snd_task)
188 return 1;
189 goto out_sleep;
190 }
191 xprt->snd_task = task;
192 if (req != NULL)
193 req->rq_ntrans++;
194
195 return 1;
196
197out_sleep:
198 dprintk("RPC: %5u failed to lock transport %p\n",
199 task->tk_pid, xprt);
200 task->tk_timeout = 0;
201 task->tk_status = -EAGAIN;
202 if (req == NULL)
203 priority = RPC_PRIORITY_LOW;
204 else if (!req->rq_ntrans)
205 priority = RPC_PRIORITY_NORMAL;
206 else
207 priority = RPC_PRIORITY_HIGH;
208 rpc_sleep_on_priority(&xprt->sending, task, NULL, priority);
209 return 0;
210}
211EXPORT_SYMBOL_GPL(xprt_reserve_xprt);
212
213static void xprt_clear_locked(struct rpc_xprt *xprt)
214{
215 xprt->snd_task = NULL;
216 if (!test_bit(XPRT_CLOSE_WAIT, &xprt->state)) {
217 smp_mb__before_atomic();
218 clear_bit(XPRT_LOCKED, &xprt->state);
219 smp_mb__after_atomic();
220 } else
221 queue_work(rpciod_workqueue, &xprt->task_cleanup);
222}
223
224
225
226
227
228
229
230
231
232int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
233{
234 struct rpc_rqst *req = task->tk_rqstp;
235 int priority;
236
237 if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
238 if (task == xprt->snd_task)
239 return 1;
240 goto out_sleep;
241 }
242 if (req == NULL) {
243 xprt->snd_task = task;
244 return 1;
245 }
246 if (__xprt_get_cong(xprt, task)) {
247 xprt->snd_task = task;
248 req->rq_ntrans++;
249 return 1;
250 }
251 xprt_clear_locked(xprt);
252out_sleep:
253 dprintk("RPC: %5u failed to lock transport %p\n", task->tk_pid, xprt);
254 task->tk_timeout = 0;
255 task->tk_status = -EAGAIN;
256 if (req == NULL)
257 priority = RPC_PRIORITY_LOW;
258 else if (!req->rq_ntrans)
259 priority = RPC_PRIORITY_NORMAL;
260 else
261 priority = RPC_PRIORITY_HIGH;
262 rpc_sleep_on_priority(&xprt->sending, task, NULL, priority);
263 return 0;
264}
265EXPORT_SYMBOL_GPL(xprt_reserve_xprt_cong);
266
267static inline int xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task)
268{
269 int retval;
270
271 spin_lock_bh(&xprt->transport_lock);
272 retval = xprt->ops->reserve_xprt(xprt, task);
273 spin_unlock_bh(&xprt->transport_lock);
274 return retval;
275}
276
277static bool __xprt_lock_write_func(struct rpc_task *task, void *data)
278{
279 struct rpc_xprt *xprt = data;
280 struct rpc_rqst *req;
281
282 req = task->tk_rqstp;
283 xprt->snd_task = task;
284 if (req)
285 req->rq_ntrans++;
286 return true;
287}
288
289static void __xprt_lock_write_next(struct rpc_xprt *xprt)
290{
291 if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
292 return;
293
294 if (rpc_wake_up_first(&xprt->sending, __xprt_lock_write_func, xprt))
295 return;
296 xprt_clear_locked(xprt);
297}
298
299static bool __xprt_lock_write_cong_func(struct rpc_task *task, void *data)
300{
301 struct rpc_xprt *xprt = data;
302 struct rpc_rqst *req;
303
304 req = task->tk_rqstp;
305 if (req == NULL) {
306 xprt->snd_task = task;
307 return true;
308 }
309 if (__xprt_get_cong(xprt, task)) {
310 xprt->snd_task = task;
311 req->rq_ntrans++;
312 return true;
313 }
314 return false;
315}
316
317static void __xprt_lock_write_next_cong(struct rpc_xprt *xprt)
318{
319 if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
320 return;
321 if (RPCXPRT_CONGESTED(xprt))
322 goto out_unlock;
323 if (rpc_wake_up_first(&xprt->sending, __xprt_lock_write_cong_func, xprt))
324 return;
325out_unlock:
326 xprt_clear_locked(xprt);
327}
328
329static void xprt_task_clear_bytes_sent(struct rpc_task *task)
330{
331 if (task != NULL) {
332 struct rpc_rqst *req = task->tk_rqstp;
333 if (req != NULL)
334 req->rq_bytes_sent = 0;
335 }
336}
337
338
339
340
341
342
343
344
345void xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
346{
347 if (xprt->snd_task == task) {
348 xprt_task_clear_bytes_sent(task);
349 xprt_clear_locked(xprt);
350 __xprt_lock_write_next(xprt);
351 }
352}
353EXPORT_SYMBOL_GPL(xprt_release_xprt);
354
355
356
357
358
359
360
361
362
363void xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
364{
365 if (xprt->snd_task == task) {
366 xprt_task_clear_bytes_sent(task);
367 xprt_clear_locked(xprt);
368 __xprt_lock_write_next_cong(xprt);
369 }
370}
371EXPORT_SYMBOL_GPL(xprt_release_xprt_cong);
372
373static inline void xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *task)
374{
375 spin_lock_bh(&xprt->transport_lock);
376 xprt->ops->release_xprt(xprt, task);
377 spin_unlock_bh(&xprt->transport_lock);
378}
379
380
381
382
383
384static int
385__xprt_get_cong(struct rpc_xprt *xprt, struct rpc_task *task)
386{
387 struct rpc_rqst *req = task->tk_rqstp;
388
389 if (req->rq_cong)
390 return 1;
391 dprintk("RPC: %5u xprt_cwnd_limited cong = %lu cwnd = %lu\n",
392 task->tk_pid, xprt->cong, xprt->cwnd);
393 if (RPCXPRT_CONGESTED(xprt))
394 return 0;
395 req->rq_cong = 1;
396 xprt->cong += RPC_CWNDSCALE;
397 return 1;
398}
399
400
401
402
403
404static void
405__xprt_put_cong(struct rpc_xprt *xprt, struct rpc_rqst *req)
406{
407 if (!req->rq_cong)
408 return;
409 req->rq_cong = 0;
410 xprt->cong -= RPC_CWNDSCALE;
411 __xprt_lock_write_next_cong(xprt);
412}
413
414
415
416
417
418
419
420void xprt_release_rqst_cong(struct rpc_task *task)
421{
422 struct rpc_rqst *req = task->tk_rqstp;
423
424 __xprt_put_cong(req->rq_xprt, req);
425}
426EXPORT_SYMBOL_GPL(xprt_release_rqst_cong);
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444void xprt_adjust_cwnd(struct rpc_xprt *xprt, struct rpc_task *task, int result)
445{
446 struct rpc_rqst *req = task->tk_rqstp;
447 unsigned long cwnd = xprt->cwnd;
448
449 if (result >= 0 && cwnd <= xprt->cong) {
450
451
452 cwnd += (RPC_CWNDSCALE * RPC_CWNDSCALE + (cwnd >> 1)) / cwnd;
453 if (cwnd > RPC_MAXCWND(xprt))
454 cwnd = RPC_MAXCWND(xprt);
455 __xprt_lock_write_next_cong(xprt);
456 } else if (result == -ETIMEDOUT) {
457 cwnd >>= 1;
458 if (cwnd < RPC_CWNDSCALE)
459 cwnd = RPC_CWNDSCALE;
460 }
461 dprintk("RPC: cong %ld, cwnd was %ld, now %ld\n",
462 xprt->cong, xprt->cwnd, cwnd);
463 xprt->cwnd = cwnd;
464 __xprt_put_cong(xprt, req);
465}
466EXPORT_SYMBOL_GPL(xprt_adjust_cwnd);
467
468
469
470
471
472
473
474void xprt_wake_pending_tasks(struct rpc_xprt *xprt, int status)
475{
476 if (status < 0)
477 rpc_wake_up_status(&xprt->pending, status);
478 else
479 rpc_wake_up(&xprt->pending);
480}
481EXPORT_SYMBOL_GPL(xprt_wake_pending_tasks);
482
483
484
485
486
487
488
489
490
491
492void xprt_wait_for_buffer_space(struct rpc_task *task, rpc_action action)
493{
494 struct rpc_rqst *req = task->tk_rqstp;
495 struct rpc_xprt *xprt = req->rq_xprt;
496
497 task->tk_timeout = RPC_IS_SOFT(task) ? req->rq_timeout : 0;
498 rpc_sleep_on(&xprt->pending, task, action);
499}
500EXPORT_SYMBOL_GPL(xprt_wait_for_buffer_space);
501
502
503
504
505
506
507
508void xprt_write_space(struct rpc_xprt *xprt)
509{
510 spin_lock_bh(&xprt->transport_lock);
511 if (xprt->snd_task) {
512 dprintk("RPC: write space: waking waiting task on "
513 "xprt %p\n", xprt);
514 rpc_wake_up_queued_task(&xprt->pending, xprt->snd_task);
515 }
516 spin_unlock_bh(&xprt->transport_lock);
517}
518EXPORT_SYMBOL_GPL(xprt_write_space);
519
520
521
522
523
524
525
526
527
528void xprt_set_retrans_timeout_def(struct rpc_task *task)
529{
530 task->tk_timeout = task->tk_rqstp->rq_timeout;
531}
532EXPORT_SYMBOL_GPL(xprt_set_retrans_timeout_def);
533
534
535
536
537
538
539
540void xprt_set_retrans_timeout_rtt(struct rpc_task *task)
541{
542 int timer = task->tk_msg.rpc_proc->p_timer;
543 struct rpc_clnt *clnt = task->tk_client;
544 struct rpc_rtt *rtt = clnt->cl_rtt;
545 struct rpc_rqst *req = task->tk_rqstp;
546 unsigned long max_timeout = clnt->cl_timeout->to_maxval;
547
548 task->tk_timeout = rpc_calc_rto(rtt, timer);
549 task->tk_timeout <<= rpc_ntimeo(rtt, timer) + req->rq_retries;
550 if (task->tk_timeout > max_timeout || task->tk_timeout == 0)
551 task->tk_timeout = max_timeout;
552}
553EXPORT_SYMBOL_GPL(xprt_set_retrans_timeout_rtt);
554
555static void xprt_reset_majortimeo(struct rpc_rqst *req)
556{
557 const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout;
558
559 req->rq_majortimeo = req->rq_timeout;
560 if (to->to_exponential)
561 req->rq_majortimeo <<= to->to_retries;
562 else
563 req->rq_majortimeo += to->to_increment * to->to_retries;
564 if (req->rq_majortimeo > to->to_maxval || req->rq_majortimeo == 0)
565 req->rq_majortimeo = to->to_maxval;
566 req->rq_majortimeo += jiffies;
567}
568
569
570
571
572
573
574int xprt_adjust_timeout(struct rpc_rqst *req)
575{
576 struct rpc_xprt *xprt = req->rq_xprt;
577 const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout;
578 int status = 0;
579
580 if (time_before(jiffies, req->rq_majortimeo)) {
581 if (to->to_exponential)
582 req->rq_timeout <<= 1;
583 else
584 req->rq_timeout += to->to_increment;
585 if (to->to_maxval && req->rq_timeout >= to->to_maxval)
586 req->rq_timeout = to->to_maxval;
587 req->rq_retries++;
588 } else {
589 req->rq_timeout = to->to_initval;
590 req->rq_retries = 0;
591 xprt_reset_majortimeo(req);
592
593 spin_lock_bh(&xprt->transport_lock);
594 rpc_init_rtt(req->rq_task->tk_client->cl_rtt, to->to_initval);
595 spin_unlock_bh(&xprt->transport_lock);
596 status = -ETIMEDOUT;
597 }
598
599 if (req->rq_timeout == 0) {
600 printk(KERN_WARNING "xprt_adjust_timeout: rq_timeout = 0!\n");
601 req->rq_timeout = 5 * HZ;
602 }
603 return status;
604}
605
606static void xprt_autoclose(struct work_struct *work)
607{
608 struct rpc_xprt *xprt =
609 container_of(work, struct rpc_xprt, task_cleanup);
610
611 xprt->ops->close(xprt);
612 clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
613 xprt_release_write(xprt, NULL);
614}
615
616
617
618
619
620
621void xprt_disconnect_done(struct rpc_xprt *xprt)
622{
623 dprintk("RPC: disconnected transport %p\n", xprt);
624 spin_lock_bh(&xprt->transport_lock);
625 xprt_clear_connected(xprt);
626 xprt_wake_pending_tasks(xprt, -EAGAIN);
627 spin_unlock_bh(&xprt->transport_lock);
628}
629EXPORT_SYMBOL_GPL(xprt_disconnect_done);
630
631
632
633
634
635
636void xprt_force_disconnect(struct rpc_xprt *xprt)
637{
638
639 spin_lock_bh(&xprt->transport_lock);
640 set_bit(XPRT_CLOSE_WAIT, &xprt->state);
641
642 if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0)
643 queue_work(rpciod_workqueue, &xprt->task_cleanup);
644 xprt_wake_pending_tasks(xprt, -EAGAIN);
645 spin_unlock_bh(&xprt->transport_lock);
646}
647
648
649
650
651
652
653
654
655
656
657
658
659void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie)
660{
661
662 spin_lock_bh(&xprt->transport_lock);
663 if (cookie != xprt->connect_cookie)
664 goto out;
665 if (test_bit(XPRT_CLOSING, &xprt->state) || !xprt_connected(xprt))
666 goto out;
667 set_bit(XPRT_CLOSE_WAIT, &xprt->state);
668
669 if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0)
670 queue_work(rpciod_workqueue, &xprt->task_cleanup);
671 xprt_wake_pending_tasks(xprt, -EAGAIN);
672out:
673 spin_unlock_bh(&xprt->transport_lock);
674}
675
676static void
677xprt_init_autodisconnect(unsigned long data)
678{
679 struct rpc_xprt *xprt = (struct rpc_xprt *)data;
680
681 spin_lock(&xprt->transport_lock);
682 if (!list_empty(&xprt->recv))
683 goto out_abort;
684 if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
685 goto out_abort;
686 spin_unlock(&xprt->transport_lock);
687 queue_work(rpciod_workqueue, &xprt->task_cleanup);
688 return;
689out_abort:
690 spin_unlock(&xprt->transport_lock);
691}
692
693bool xprt_lock_connect(struct rpc_xprt *xprt,
694 struct rpc_task *task,
695 void *cookie)
696{
697 bool ret = false;
698
699 spin_lock_bh(&xprt->transport_lock);
700 if (!test_bit(XPRT_LOCKED, &xprt->state))
701 goto out;
702 if (xprt->snd_task != task)
703 goto out;
704 xprt_task_clear_bytes_sent(task);
705 xprt->snd_task = cookie;
706 ret = true;
707out:
708 spin_unlock_bh(&xprt->transport_lock);
709 return ret;
710}
711
712void xprt_unlock_connect(struct rpc_xprt *xprt, void *cookie)
713{
714 spin_lock_bh(&xprt->transport_lock);
715 if (xprt->snd_task != cookie)
716 goto out;
717 if (!test_bit(XPRT_LOCKED, &xprt->state))
718 goto out;
719 xprt->snd_task =NULL;
720 xprt->ops->release_xprt(xprt, NULL);
721out:
722 spin_unlock_bh(&xprt->transport_lock);
723}
724
725
726
727
728
729
730void xprt_connect(struct rpc_task *task)
731{
732 struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt;
733
734 dprintk("RPC: %5u xprt_connect xprt %p %s connected\n", task->tk_pid,
735 xprt, (xprt_connected(xprt) ? "is" : "is not"));
736
737 if (!xprt_bound(xprt)) {
738 task->tk_status = -EAGAIN;
739 return;
740 }
741 if (!xprt_lock_write(xprt, task))
742 return;
743
744 if (test_and_clear_bit(XPRT_CLOSE_WAIT, &xprt->state))
745 xprt->ops->close(xprt);
746
747 if (!xprt_connected(xprt)) {
748 task->tk_rqstp->rq_bytes_sent = 0;
749 task->tk_timeout = task->tk_rqstp->rq_timeout;
750 rpc_sleep_on(&xprt->pending, task, xprt_connect_status);
751
752 if (test_bit(XPRT_CLOSING, &xprt->state))
753 return;
754 if (xprt_test_and_set_connecting(xprt))
755 return;
756 xprt->stat.connect_start = jiffies;
757 xprt->ops->connect(xprt, task);
758 }
759 xprt_release_write(xprt, task);
760}
761
762static void xprt_connect_status(struct rpc_task *task)
763{
764 struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt;
765
766 if (task->tk_status == 0) {
767 xprt->stat.connect_count++;
768 xprt->stat.connect_time += (long)jiffies - xprt->stat.connect_start;
769 dprintk("RPC: %5u xprt_connect_status: connection established\n",
770 task->tk_pid);
771 return;
772 }
773
774 switch (task->tk_status) {
775 case -ECONNREFUSED:
776 case -ECONNRESET:
777 case -ECONNABORTED:
778 case -ENETUNREACH:
779 case -EHOSTUNREACH:
780 case -EPIPE:
781 case -EAGAIN:
782 dprintk("RPC: %5u xprt_connect_status: retrying\n", task->tk_pid);
783 break;
784 case -ETIMEDOUT:
785 dprintk("RPC: %5u xprt_connect_status: connect attempt timed "
786 "out\n", task->tk_pid);
787 break;
788 default:
789 dprintk("RPC: %5u xprt_connect_status: error %d connecting to "
790 "server %s\n", task->tk_pid, -task->tk_status,
791 xprt->servername);
792 task->tk_status = -EIO;
793 }
794}
795
796
797
798
799
800
801
802struct rpc_rqst *xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid)
803{
804 struct rpc_rqst *entry;
805
806 list_for_each_entry(entry, &xprt->recv, rq_list)
807 if (entry->rq_xid == xid) {
808 trace_xprt_lookup_rqst(xprt, xid, 0);
809 return entry;
810 }
811
812 dprintk("RPC: xprt_lookup_rqst did not find xid %08x\n",
813 ntohl(xid));
814 trace_xprt_lookup_rqst(xprt, xid, -ENOENT);
815 xprt->stat.bad_xids++;
816 return NULL;
817}
818EXPORT_SYMBOL_GPL(xprt_lookup_rqst);
819
820static void xprt_update_rtt(struct rpc_task *task)
821{
822 struct rpc_rqst *req = task->tk_rqstp;
823 struct rpc_rtt *rtt = task->tk_client->cl_rtt;
824 unsigned int timer = task->tk_msg.rpc_proc->p_timer;
825 long m = usecs_to_jiffies(ktime_to_us(req->rq_rtt));
826
827 if (timer) {
828 if (req->rq_ntrans == 1)
829 rpc_update_rtt(rtt, timer, m);
830 rpc_set_timeo(rtt, timer, req->rq_ntrans - 1);
831 }
832}
833
834
835
836
837
838
839
840
841void xprt_complete_rqst(struct rpc_task *task, int copied)
842{
843 struct rpc_rqst *req = task->tk_rqstp;
844 struct rpc_xprt *xprt = req->rq_xprt;
845
846 dprintk("RPC: %5u xid %08x complete (%d bytes received)\n",
847 task->tk_pid, ntohl(req->rq_xid), copied);
848 trace_xprt_complete_rqst(xprt, req->rq_xid, copied);
849
850 xprt->stat.recvs++;
851 req->rq_rtt = ktime_sub(ktime_get(), req->rq_xtime);
852 if (xprt->ops->timer != NULL)
853 xprt_update_rtt(task);
854
855 list_del_init(&req->rq_list);
856 req->rq_private_buf.len = copied;
857
858
859 smp_wmb();
860 req->rq_reply_bytes_recvd = copied;
861 rpc_wake_up_queued_task(&xprt->pending, task);
862}
863EXPORT_SYMBOL_GPL(xprt_complete_rqst);
864
865static void xprt_timer(struct rpc_task *task)
866{
867 struct rpc_rqst *req = task->tk_rqstp;
868 struct rpc_xprt *xprt = req->rq_xprt;
869
870 if (task->tk_status != -ETIMEDOUT)
871 return;
872 dprintk("RPC: %5u xprt_timer\n", task->tk_pid);
873
874 spin_lock_bh(&xprt->transport_lock);
875 if (!req->rq_reply_bytes_recvd) {
876 if (xprt->ops->timer)
877 xprt->ops->timer(xprt, task);
878 } else
879 task->tk_status = 0;
880 spin_unlock_bh(&xprt->transport_lock);
881}
882
883static inline int xprt_has_timer(struct rpc_xprt *xprt)
884{
885 return xprt->idle_timeout != 0;
886}
887
888
889
890
891
892
893bool xprt_prepare_transmit(struct rpc_task *task)
894{
895 struct rpc_rqst *req = task->tk_rqstp;
896 struct rpc_xprt *xprt = req->rq_xprt;
897 bool ret = false;
898
899 dprintk("RPC: %5u xprt_prepare_transmit\n", task->tk_pid);
900
901 spin_lock_bh(&xprt->transport_lock);
902 if (!req->rq_bytes_sent) {
903 if (req->rq_reply_bytes_recvd) {
904 task->tk_status = req->rq_reply_bytes_recvd;
905 goto out_unlock;
906 }
907 if ((task->tk_flags & RPC_TASK_NO_RETRANS_TIMEOUT)
908 && xprt_connected(xprt)
909 && req->rq_connect_cookie == xprt->connect_cookie) {
910 xprt->ops->set_retrans_timeout(task);
911 rpc_sleep_on(&xprt->pending, task, xprt_timer);
912 goto out_unlock;
913 }
914 }
915 if (!xprt->ops->reserve_xprt(xprt, task)) {
916 task->tk_status = -EAGAIN;
917 goto out_unlock;
918 }
919 ret = true;
920out_unlock:
921 spin_unlock_bh(&xprt->transport_lock);
922 return ret;
923}
924
925void xprt_end_transmit(struct rpc_task *task)
926{
927 xprt_release_write(task->tk_rqstp->rq_xprt, task);
928}
929
930
931
932
933
934
935
936void xprt_transmit(struct rpc_task *task)
937{
938 struct rpc_rqst *req = task->tk_rqstp;
939 struct rpc_xprt *xprt = req->rq_xprt;
940 int status, numreqs;
941
942 dprintk("RPC: %5u xprt_transmit(%u)\n", task->tk_pid, req->rq_slen);
943
944 if (!req->rq_reply_bytes_recvd) {
945 if (list_empty(&req->rq_list) && rpc_reply_expected(task)) {
946
947
948
949 spin_lock_bh(&xprt->transport_lock);
950
951 memcpy(&req->rq_private_buf, &req->rq_rcv_buf,
952 sizeof(req->rq_private_buf));
953
954 list_add_tail(&req->rq_list, &xprt->recv);
955 spin_unlock_bh(&xprt->transport_lock);
956 xprt_reset_majortimeo(req);
957
958 del_singleshot_timer_sync(&xprt->timer);
959 }
960 } else if (!req->rq_bytes_sent)
961 return;
962
963 req->rq_xtime = ktime_get();
964 status = xprt->ops->send_request(task);
965 trace_xprt_transmit(xprt, req->rq_xid, status);
966 if (status != 0) {
967 task->tk_status = status;
968 return;
969 }
970
971 dprintk("RPC: %5u xmit complete\n", task->tk_pid);
972 task->tk_flags |= RPC_TASK_SENT;
973 spin_lock_bh(&xprt->transport_lock);
974
975 xprt->ops->set_retrans_timeout(task);
976
977 numreqs = atomic_read(&xprt->num_reqs);
978 if (numreqs > xprt->stat.max_slots)
979 xprt->stat.max_slots = numreqs;
980 xprt->stat.sends++;
981 xprt->stat.req_u += xprt->stat.sends - xprt->stat.recvs;
982 xprt->stat.bklog_u += xprt->backlog.qlen;
983 xprt->stat.sending_u += xprt->sending.qlen;
984 xprt->stat.pending_u += xprt->pending.qlen;
985
986
987 if (!xprt_connected(xprt))
988 task->tk_status = -ENOTCONN;
989 else {
990
991
992
993
994 if (!req->rq_reply_bytes_recvd && rpc_reply_expected(task))
995 rpc_sleep_on(&xprt->pending, task, xprt_timer);
996 req->rq_connect_cookie = xprt->connect_cookie;
997 }
998 spin_unlock_bh(&xprt->transport_lock);
999}
1000
1001static void xprt_add_backlog(struct rpc_xprt *xprt, struct rpc_task *task)
1002{
1003 set_bit(XPRT_CONGESTED, &xprt->state);
1004 rpc_sleep_on(&xprt->backlog, task, NULL);
1005}
1006
1007static void xprt_wake_up_backlog(struct rpc_xprt *xprt)
1008{
1009 if (rpc_wake_up_next(&xprt->backlog) == NULL)
1010 clear_bit(XPRT_CONGESTED, &xprt->state);
1011}
1012
1013static bool xprt_throttle_congested(struct rpc_xprt *xprt, struct rpc_task *task)
1014{
1015 bool ret = false;
1016
1017 if (!test_bit(XPRT_CONGESTED, &xprt->state))
1018 goto out;
1019 spin_lock(&xprt->reserve_lock);
1020 if (test_bit(XPRT_CONGESTED, &xprt->state)) {
1021 rpc_sleep_on(&xprt->backlog, task, NULL);
1022 ret = true;
1023 }
1024 spin_unlock(&xprt->reserve_lock);
1025out:
1026 return ret;
1027}
1028
1029static struct rpc_rqst *xprt_dynamic_alloc_slot(struct rpc_xprt *xprt, gfp_t gfp_flags)
1030{
1031 struct rpc_rqst *req = ERR_PTR(-EAGAIN);
1032
1033 if (!atomic_add_unless(&xprt->num_reqs, 1, xprt->max_reqs))
1034 goto out;
1035 req = kzalloc(sizeof(struct rpc_rqst), gfp_flags);
1036 if (req != NULL)
1037 goto out;
1038 atomic_dec(&xprt->num_reqs);
1039 req = ERR_PTR(-ENOMEM);
1040out:
1041 return req;
1042}
1043
1044static bool xprt_dynamic_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
1045{
1046 if (atomic_add_unless(&xprt->num_reqs, -1, xprt->min_reqs)) {
1047 kfree(req);
1048 return true;
1049 }
1050 return false;
1051}
1052
1053void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task)
1054{
1055 struct rpc_rqst *req;
1056
1057 spin_lock(&xprt->reserve_lock);
1058 if (!list_empty(&xprt->free)) {
1059 req = list_entry(xprt->free.next, struct rpc_rqst, rq_list);
1060 list_del(&req->rq_list);
1061 goto out_init_req;
1062 }
1063 req = xprt_dynamic_alloc_slot(xprt, GFP_NOWAIT|__GFP_NOWARN);
1064 if (!IS_ERR(req))
1065 goto out_init_req;
1066 switch (PTR_ERR(req)) {
1067 case -ENOMEM:
1068 dprintk("RPC: dynamic allocation of request slot "
1069 "failed! Retrying\n");
1070 task->tk_status = -ENOMEM;
1071 break;
1072 case -EAGAIN:
1073 xprt_add_backlog(xprt, task);
1074 dprintk("RPC: waiting for request slot\n");
1075 default:
1076 task->tk_status = -EAGAIN;
1077 }
1078 spin_unlock(&xprt->reserve_lock);
1079 return;
1080out_init_req:
1081 task->tk_status = 0;
1082 task->tk_rqstp = req;
1083 xprt_request_init(task, xprt);
1084 spin_unlock(&xprt->reserve_lock);
1085}
1086EXPORT_SYMBOL_GPL(xprt_alloc_slot);
1087
1088void xprt_lock_and_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task)
1089{
1090
1091
1092
1093
1094
1095 if (xprt_lock_write(xprt, task)) {
1096 xprt_alloc_slot(xprt, task);
1097 xprt_release_write(xprt, task);
1098 }
1099}
1100EXPORT_SYMBOL_GPL(xprt_lock_and_alloc_slot);
1101
1102static void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
1103{
1104 spin_lock(&xprt->reserve_lock);
1105 if (!xprt_dynamic_free_slot(xprt, req)) {
1106 memset(req, 0, sizeof(*req));
1107 list_add(&req->rq_list, &xprt->free);
1108 }
1109 xprt_wake_up_backlog(xprt);
1110 spin_unlock(&xprt->reserve_lock);
1111}
1112
1113static void xprt_free_all_slots(struct rpc_xprt *xprt)
1114{
1115 struct rpc_rqst *req;
1116 while (!list_empty(&xprt->free)) {
1117 req = list_first_entry(&xprt->free, struct rpc_rqst, rq_list);
1118 list_del(&req->rq_list);
1119 kfree(req);
1120 }
1121}
1122
1123struct rpc_xprt *xprt_alloc(struct net *net, size_t size,
1124 unsigned int num_prealloc,
1125 unsigned int max_alloc)
1126{
1127 struct rpc_xprt *xprt;
1128 struct rpc_rqst *req;
1129 int i;
1130
1131 xprt = kzalloc(size, GFP_KERNEL);
1132 if (xprt == NULL)
1133 goto out;
1134
1135 xprt_init(xprt, net);
1136
1137 for (i = 0; i < num_prealloc; i++) {
1138 req = kzalloc(sizeof(struct rpc_rqst), GFP_KERNEL);
1139 if (!req)
1140 goto out_free;
1141 list_add(&req->rq_list, &xprt->free);
1142 }
1143 if (max_alloc > num_prealloc)
1144 xprt->max_reqs = max_alloc;
1145 else
1146 xprt->max_reqs = num_prealloc;
1147 xprt->min_reqs = num_prealloc;
1148 atomic_set(&xprt->num_reqs, num_prealloc);
1149
1150 return xprt;
1151
1152out_free:
1153 xprt_free(xprt);
1154out:
1155 return NULL;
1156}
1157EXPORT_SYMBOL_GPL(xprt_alloc);
1158
1159void xprt_free(struct rpc_xprt *xprt)
1160{
1161 put_net(xprt->xprt_net);
1162 xprt_free_all_slots(xprt);
1163 kfree(xprt);
1164}
1165EXPORT_SYMBOL_GPL(xprt_free);
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175void xprt_reserve(struct rpc_task *task)
1176{
1177 struct rpc_xprt *xprt;
1178
1179 task->tk_status = 0;
1180 if (task->tk_rqstp != NULL)
1181 return;
1182
1183 task->tk_timeout = 0;
1184 task->tk_status = -EAGAIN;
1185 rcu_read_lock();
1186 xprt = rcu_dereference(task->tk_client->cl_xprt);
1187 if (!xprt_throttle_congested(xprt, task))
1188 xprt->ops->alloc_slot(xprt, task);
1189 rcu_read_unlock();
1190}
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201void xprt_retry_reserve(struct rpc_task *task)
1202{
1203 struct rpc_xprt *xprt;
1204
1205 task->tk_status = 0;
1206 if (task->tk_rqstp != NULL)
1207 return;
1208
1209 task->tk_timeout = 0;
1210 task->tk_status = -EAGAIN;
1211 rcu_read_lock();
1212 xprt = rcu_dereference(task->tk_client->cl_xprt);
1213 xprt->ops->alloc_slot(xprt, task);
1214 rcu_read_unlock();
1215}
1216
1217static inline __be32 xprt_alloc_xid(struct rpc_xprt *xprt)
1218{
1219 return (__force __be32)xprt->xid++;
1220}
1221
1222static inline void xprt_init_xid(struct rpc_xprt *xprt)
1223{
1224 xprt->xid = prandom_u32();
1225}
1226
1227static void xprt_request_init(struct rpc_task *task, struct rpc_xprt *xprt)
1228{
1229 struct rpc_rqst *req = task->tk_rqstp;
1230
1231 INIT_LIST_HEAD(&req->rq_list);
1232 req->rq_timeout = task->tk_client->cl_timeout->to_initval;
1233 req->rq_task = task;
1234 req->rq_xprt = xprt;
1235 req->rq_buffer = NULL;
1236 req->rq_xid = xprt_alloc_xid(xprt);
1237 req->rq_connect_cookie = xprt->connect_cookie - 1;
1238 req->rq_bytes_sent = 0;
1239 req->rq_snd_buf.len = 0;
1240 req->rq_snd_buf.buflen = 0;
1241 req->rq_rcv_buf.len = 0;
1242 req->rq_rcv_buf.buflen = 0;
1243 req->rq_release_snd_buf = NULL;
1244 xprt_reset_majortimeo(req);
1245 dprintk("RPC: %5u reserved req %p xid %08x\n", task->tk_pid,
1246 req, ntohl(req->rq_xid));
1247}
1248
1249
1250
1251
1252
1253
1254void xprt_release(struct rpc_task *task)
1255{
1256 struct rpc_xprt *xprt;
1257 struct rpc_rqst *req = task->tk_rqstp;
1258
1259 if (req == NULL) {
1260 if (task->tk_client) {
1261 rcu_read_lock();
1262 xprt = rcu_dereference(task->tk_client->cl_xprt);
1263 if (xprt->snd_task == task)
1264 xprt_release_write(xprt, task);
1265 rcu_read_unlock();
1266 }
1267 return;
1268 }
1269
1270 xprt = req->rq_xprt;
1271 if (task->tk_ops->rpc_count_stats != NULL)
1272 task->tk_ops->rpc_count_stats(task, task->tk_calldata);
1273 else if (task->tk_client)
1274 rpc_count_iostats(task, task->tk_client->cl_metrics);
1275 spin_lock_bh(&xprt->transport_lock);
1276 xprt->ops->release_xprt(xprt, task);
1277 if (xprt->ops->release_request)
1278 xprt->ops->release_request(task);
1279 if (!list_empty(&req->rq_list))
1280 list_del(&req->rq_list);
1281 xprt->last_used = jiffies;
1282 if (list_empty(&xprt->recv) && xprt_has_timer(xprt))
1283 mod_timer(&xprt->timer,
1284 xprt->last_used + xprt->idle_timeout);
1285 spin_unlock_bh(&xprt->transport_lock);
1286 if (req->rq_buffer)
1287 xprt->ops->buf_free(req->rq_buffer);
1288 if (req->rq_cred != NULL)
1289 put_rpccred(req->rq_cred);
1290 task->tk_rqstp = NULL;
1291 if (req->rq_release_snd_buf)
1292 req->rq_release_snd_buf(req);
1293
1294 dprintk("RPC: %5u release request %p\n", task->tk_pid, req);
1295 if (likely(!bc_prealloc(req)))
1296 xprt_free_slot(xprt, req);
1297 else
1298 xprt_free_bc_request(req);
1299}
1300
1301static void xprt_init(struct rpc_xprt *xprt, struct net *net)
1302{
1303 atomic_set(&xprt->count, 1);
1304
1305 spin_lock_init(&xprt->transport_lock);
1306 spin_lock_init(&xprt->reserve_lock);
1307
1308 INIT_LIST_HEAD(&xprt->free);
1309 INIT_LIST_HEAD(&xprt->recv);
1310#if defined(CONFIG_SUNRPC_BACKCHANNEL)
1311 spin_lock_init(&xprt->bc_pa_lock);
1312 INIT_LIST_HEAD(&xprt->bc_pa_list);
1313#endif
1314
1315 xprt->last_used = jiffies;
1316 xprt->cwnd = RPC_INITCWND;
1317 xprt->bind_index = 0;
1318
1319 rpc_init_wait_queue(&xprt->binding, "xprt_binding");
1320 rpc_init_wait_queue(&xprt->pending, "xprt_pending");
1321 rpc_init_priority_wait_queue(&xprt->sending, "xprt_sending");
1322 rpc_init_priority_wait_queue(&xprt->backlog, "xprt_backlog");
1323
1324 xprt_init_xid(xprt);
1325
1326 xprt->xprt_net = get_net(net);
1327}
1328
1329
1330
1331
1332
1333
1334struct rpc_xprt *xprt_create_transport(struct xprt_create *args)
1335{
1336 struct rpc_xprt *xprt;
1337 struct xprt_class *t;
1338
1339 spin_lock(&xprt_list_lock);
1340 list_for_each_entry(t, &xprt_list, list) {
1341 if (t->ident == args->ident) {
1342 spin_unlock(&xprt_list_lock);
1343 goto found;
1344 }
1345 }
1346 spin_unlock(&xprt_list_lock);
1347 dprintk("RPC: transport (%d) not supported\n", args->ident);
1348 return ERR_PTR(-EIO);
1349
1350found:
1351 xprt = t->setup(args);
1352 if (IS_ERR(xprt)) {
1353 dprintk("RPC: xprt_create_transport: failed, %ld\n",
1354 -PTR_ERR(xprt));
1355 goto out;
1356 }
1357 if (args->flags & XPRT_CREATE_NO_IDLE_TIMEOUT)
1358 xprt->idle_timeout = 0;
1359 INIT_WORK(&xprt->task_cleanup, xprt_autoclose);
1360 if (xprt_has_timer(xprt))
1361 setup_timer(&xprt->timer, xprt_init_autodisconnect,
1362 (unsigned long)xprt);
1363 else
1364 init_timer(&xprt->timer);
1365
1366 if (strlen(args->servername) > RPC_MAXNETNAMELEN) {
1367 xprt_destroy(xprt);
1368 return ERR_PTR(-EINVAL);
1369 }
1370 xprt->servername = kstrdup(args->servername, GFP_KERNEL);
1371 if (xprt->servername == NULL) {
1372 xprt_destroy(xprt);
1373 return ERR_PTR(-ENOMEM);
1374 }
1375
1376 rpc_xprt_debugfs_register(xprt);
1377
1378 dprintk("RPC: created transport %p with %u slots\n", xprt,
1379 xprt->max_reqs);
1380out:
1381 return xprt;
1382}
1383
1384
1385
1386
1387
1388
1389static void xprt_destroy(struct rpc_xprt *xprt)
1390{
1391 dprintk("RPC: destroying transport %p\n", xprt);
1392 del_timer_sync(&xprt->timer);
1393
1394 rpc_xprt_debugfs_unregister(xprt);
1395 rpc_destroy_wait_queue(&xprt->binding);
1396 rpc_destroy_wait_queue(&xprt->pending);
1397 rpc_destroy_wait_queue(&xprt->sending);
1398 rpc_destroy_wait_queue(&xprt->backlog);
1399 cancel_work_sync(&xprt->task_cleanup);
1400 kfree(xprt->servername);
1401
1402
1403
1404 xprt->ops->destroy(xprt);
1405}
1406
1407
1408
1409
1410
1411
1412void xprt_put(struct rpc_xprt *xprt)
1413{
1414 if (atomic_dec_and_test(&xprt->count))
1415 xprt_destroy(xprt);
1416}
1417