1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include <linux/tcp.h>
25#include <linux/slab.h>
26#include <linux/sunrpc/xprt.h>
27#include <linux/export.h>
28#include <linux/sunrpc/bc_xprt.h>
29
30#ifdef RPC_DEBUG
31#define RPCDBG_FACILITY RPCDBG_TRANS
32#endif
33
34
35
36
37
38static inline int xprt_need_to_requeue(struct rpc_xprt *xprt)
39{
40 return xprt->bc_alloc_count > 0;
41}
42
43static inline void xprt_inc_alloc_count(struct rpc_xprt *xprt, unsigned int n)
44{
45 xprt->bc_alloc_count += n;
46}
47
48static inline int xprt_dec_alloc_count(struct rpc_xprt *xprt, unsigned int n)
49{
50 return xprt->bc_alloc_count -= n;
51}
52
53
54
55
56
57static void xprt_free_allocation(struct rpc_rqst *req)
58{
59 struct xdr_buf *xbufp;
60
61 dprintk("RPC: free allocations for req= %p\n", req);
62 BUG_ON(test_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state));
63 xbufp = &req->rq_private_buf;
64 free_page((unsigned long)xbufp->head[0].iov_base);
65 xbufp = &req->rq_snd_buf;
66 free_page((unsigned long)xbufp->head[0].iov_base);
67 list_del(&req->rq_bc_pa_list);
68 kfree(req);
69}
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89int xprt_setup_backchannel(struct rpc_xprt *xprt, unsigned int min_reqs)
90{
91 struct page *page_rcv = NULL, *page_snd = NULL;
92 struct xdr_buf *xbufp = NULL;
93 struct rpc_rqst *req, *tmp;
94 struct list_head tmp_list;
95 int i;
96
97 dprintk("RPC: setup backchannel transport\n");
98
99
100
101
102
103
104
105
106
107 INIT_LIST_HEAD(&tmp_list);
108 for (i = 0; i < min_reqs; i++) {
109
110 req = kzalloc(sizeof(struct rpc_rqst), GFP_KERNEL);
111 if (req == NULL) {
112 printk(KERN_ERR "Failed to create bc rpc_rqst\n");
113 goto out_free;
114 }
115
116
117 dprintk("RPC: adding req= %p\n", req);
118 list_add(&req->rq_bc_pa_list, &tmp_list);
119
120 req->rq_xprt = xprt;
121 INIT_LIST_HEAD(&req->rq_list);
122 INIT_LIST_HEAD(&req->rq_bc_list);
123
124
125 page_rcv = alloc_page(GFP_KERNEL);
126 if (page_rcv == NULL) {
127 printk(KERN_ERR "Failed to create bc receive xbuf\n");
128 goto out_free;
129 }
130 xbufp = &req->rq_rcv_buf;
131 xbufp->head[0].iov_base = page_address(page_rcv);
132 xbufp->head[0].iov_len = PAGE_SIZE;
133 xbufp->tail[0].iov_base = NULL;
134 xbufp->tail[0].iov_len = 0;
135 xbufp->page_len = 0;
136 xbufp->len = PAGE_SIZE;
137 xbufp->buflen = PAGE_SIZE;
138
139
140 page_snd = alloc_page(GFP_KERNEL);
141 if (page_snd == NULL) {
142 printk(KERN_ERR "Failed to create bc snd xbuf\n");
143 goto out_free;
144 }
145
146 xbufp = &req->rq_snd_buf;
147 xbufp->head[0].iov_base = page_address(page_snd);
148 xbufp->head[0].iov_len = 0;
149 xbufp->tail[0].iov_base = NULL;
150 xbufp->tail[0].iov_len = 0;
151 xbufp->page_len = 0;
152 xbufp->len = 0;
153 xbufp->buflen = PAGE_SIZE;
154 }
155
156
157
158
159 spin_lock_bh(&xprt->bc_pa_lock);
160 list_splice(&tmp_list, &xprt->bc_pa_list);
161 xprt_inc_alloc_count(xprt, min_reqs);
162 spin_unlock_bh(&xprt->bc_pa_lock);
163
164 dprintk("RPC: setup backchannel transport done\n");
165 return 0;
166
167out_free:
168
169
170
171 list_for_each_entry_safe(req, tmp, &tmp_list, rq_bc_pa_list)
172 xprt_free_allocation(req);
173
174 dprintk("RPC: setup backchannel transport failed\n");
175 return -ENOMEM;
176}
177EXPORT_SYMBOL_GPL(xprt_setup_backchannel);
178
179
180
181
182
183
184
185
186
187
188void xprt_destroy_backchannel(struct rpc_xprt *xprt, unsigned int max_reqs)
189{
190 struct rpc_rqst *req = NULL, *tmp = NULL;
191
192 dprintk("RPC: destroy backchannel transport\n");
193
194 BUG_ON(max_reqs == 0);
195 spin_lock_bh(&xprt->bc_pa_lock);
196 xprt_dec_alloc_count(xprt, max_reqs);
197 list_for_each_entry_safe(req, tmp, &xprt->bc_pa_list, rq_bc_pa_list) {
198 dprintk("RPC: req=%p\n", req);
199 xprt_free_allocation(req);
200 if (--max_reqs == 0)
201 break;
202 }
203 spin_unlock_bh(&xprt->bc_pa_lock);
204
205 dprintk("RPC: backchannel list empty= %s\n",
206 list_empty(&xprt->bc_pa_list) ? "true" : "false");
207}
208EXPORT_SYMBOL_GPL(xprt_destroy_backchannel);
209
210
211
212
213
214
215
216
217
218
219
220
221struct rpc_rqst *xprt_alloc_bc_request(struct rpc_xprt *xprt)
222{
223 struct rpc_rqst *req;
224
225 dprintk("RPC: allocate a backchannel request\n");
226 spin_lock(&xprt->bc_pa_lock);
227 if (!list_empty(&xprt->bc_pa_list)) {
228 req = list_first_entry(&xprt->bc_pa_list, struct rpc_rqst,
229 rq_bc_pa_list);
230 list_del(&req->rq_bc_pa_list);
231 } else {
232 req = NULL;
233 }
234 spin_unlock(&xprt->bc_pa_lock);
235
236 if (req != NULL) {
237 set_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state);
238 req->rq_reply_bytes_recvd = 0;
239 req->rq_bytes_sent = 0;
240 memcpy(&req->rq_private_buf, &req->rq_rcv_buf,
241 sizeof(req->rq_private_buf));
242 }
243 dprintk("RPC: backchannel req=%p\n", req);
244 return req;
245}
246
247
248
249
250
251void xprt_free_bc_request(struct rpc_rqst *req)
252{
253 struct rpc_xprt *xprt = req->rq_xprt;
254
255 dprintk("RPC: free backchannel req=%p\n", req);
256
257 smp_mb__before_clear_bit();
258 BUG_ON(!test_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state));
259 clear_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state);
260 smp_mb__after_clear_bit();
261
262 if (!xprt_need_to_requeue(xprt)) {
263
264
265
266
267
268
269 dprintk("RPC: Last session removed req=%p\n", req);
270 xprt_free_allocation(req);
271 return;
272 }
273
274
275
276
277
278 spin_lock_bh(&xprt->bc_pa_lock);
279 list_add(&req->rq_bc_pa_list, &xprt->bc_pa_list);
280 spin_unlock_bh(&xprt->bc_pa_lock);
281}
282
283