1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include <linux/tcp.h>
25#include <linux/slab.h>
26#include <linux/sunrpc/xprt.h>
27#include <linux/export.h>
28#include <linux/sunrpc/bc_xprt.h>
29
30#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
31#define RPCDBG_FACILITY RPCDBG_TRANS
32#endif
33
34
35
36
37
38static inline int xprt_need_to_requeue(struct rpc_xprt *xprt)
39{
40 return xprt->bc_alloc_count < atomic_read(&xprt->bc_free_slots);
41}
42
43static inline void xprt_inc_alloc_count(struct rpc_xprt *xprt, unsigned int n)
44{
45 atomic_add(n, &xprt->bc_free_slots);
46 xprt->bc_alloc_count += n;
47}
48
49static inline int xprt_dec_alloc_count(struct rpc_xprt *xprt, unsigned int n)
50{
51 atomic_sub(n, &xprt->bc_free_slots);
52 return xprt->bc_alloc_count -= n;
53}
54
55
56
57
58
59static void xprt_free_allocation(struct rpc_rqst *req)
60{
61 struct xdr_buf *xbufp;
62
63 dprintk("RPC: free allocations for req= %p\n", req);
64 WARN_ON_ONCE(test_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state));
65 xbufp = &req->rq_rcv_buf;
66 free_page((unsigned long)xbufp->head[0].iov_base);
67 xbufp = &req->rq_snd_buf;
68 free_page((unsigned long)xbufp->head[0].iov_base);
69 kfree(req);
70}
71
72static int xprt_alloc_xdr_buf(struct xdr_buf *buf, gfp_t gfp_flags)
73{
74 struct page *page;
75
76 page = alloc_page(gfp_flags);
77 if (page == NULL)
78 return -ENOMEM;
79 xdr_buf_init(buf, page_address(page), PAGE_SIZE);
80 return 0;
81}
82
83static
84struct rpc_rqst *xprt_alloc_bc_req(struct rpc_xprt *xprt, gfp_t gfp_flags)
85{
86 struct rpc_rqst *req;
87
88
89 req = kzalloc(sizeof(*req), gfp_flags);
90 if (req == NULL)
91 return NULL;
92
93 req->rq_xprt = xprt;
94 INIT_LIST_HEAD(&req->rq_list);
95 INIT_LIST_HEAD(&req->rq_bc_list);
96
97
98 if (xprt_alloc_xdr_buf(&req->rq_rcv_buf, gfp_flags) < 0) {
99 printk(KERN_ERR "Failed to create bc receive xbuf\n");
100 goto out_free;
101 }
102 req->rq_rcv_buf.len = PAGE_SIZE;
103
104
105 if (xprt_alloc_xdr_buf(&req->rq_snd_buf, gfp_flags) < 0) {
106 printk(KERN_ERR "Failed to create bc snd xbuf\n");
107 goto out_free;
108 }
109 return req;
110out_free:
111 xprt_free_allocation(req);
112 return NULL;
113}
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133int xprt_setup_backchannel(struct rpc_xprt *xprt, unsigned int min_reqs)
134{
135 if (!xprt->ops->bc_setup)
136 return 0;
137 return xprt->ops->bc_setup(xprt, min_reqs);
138}
139EXPORT_SYMBOL_GPL(xprt_setup_backchannel);
140
141int xprt_setup_bc(struct rpc_xprt *xprt, unsigned int min_reqs)
142{
143 struct rpc_rqst *req;
144 struct list_head tmp_list;
145 int i;
146
147 dprintk("RPC: setup backchannel transport\n");
148
149
150
151
152
153
154
155
156
157 INIT_LIST_HEAD(&tmp_list);
158 for (i = 0; i < min_reqs; i++) {
159
160 req = xprt_alloc_bc_req(xprt, GFP_KERNEL);
161 if (req == NULL) {
162 printk(KERN_ERR "Failed to create bc rpc_rqst\n");
163 goto out_free;
164 }
165
166
167 dprintk("RPC: adding req= %p\n", req);
168 list_add(&req->rq_bc_pa_list, &tmp_list);
169 }
170
171
172
173
174 spin_lock_bh(&xprt->bc_pa_lock);
175 list_splice(&tmp_list, &xprt->bc_pa_list);
176 xprt_inc_alloc_count(xprt, min_reqs);
177 spin_unlock_bh(&xprt->bc_pa_lock);
178
179 dprintk("RPC: setup backchannel transport done\n");
180 return 0;
181
182out_free:
183
184
185
186 while (!list_empty(&tmp_list)) {
187 req = list_first_entry(&tmp_list,
188 struct rpc_rqst,
189 rq_bc_pa_list);
190 list_del(&req->rq_bc_pa_list);
191 xprt_free_allocation(req);
192 }
193
194 dprintk("RPC: setup backchannel transport failed\n");
195 return -ENOMEM;
196}
197
198
199
200
201
202
203
204
205
206
207void xprt_destroy_backchannel(struct rpc_xprt *xprt, unsigned int max_reqs)
208{
209 if (xprt->ops->bc_destroy)
210 xprt->ops->bc_destroy(xprt, max_reqs);
211}
212EXPORT_SYMBOL_GPL(xprt_destroy_backchannel);
213
214void xprt_destroy_bc(struct rpc_xprt *xprt, unsigned int max_reqs)
215{
216 struct rpc_rqst *req = NULL, *tmp = NULL;
217
218 dprintk("RPC: destroy backchannel transport\n");
219
220 if (max_reqs == 0)
221 goto out;
222
223 spin_lock_bh(&xprt->bc_pa_lock);
224 xprt_dec_alloc_count(xprt, max_reqs);
225 list_for_each_entry_safe(req, tmp, &xprt->bc_pa_list, rq_bc_pa_list) {
226 dprintk("RPC: req=%p\n", req);
227 list_del(&req->rq_bc_pa_list);
228 xprt_free_allocation(req);
229 if (--max_reqs == 0)
230 break;
231 }
232 spin_unlock_bh(&xprt->bc_pa_lock);
233
234out:
235 dprintk("RPC: backchannel list empty= %s\n",
236 list_empty(&xprt->bc_pa_list) ? "true" : "false");
237}
238
239static struct rpc_rqst *xprt_alloc_bc_request(struct rpc_xprt *xprt, __be32 xid)
240{
241 struct rpc_rqst *req = NULL;
242
243 dprintk("RPC: allocate a backchannel request\n");
244 if (atomic_read(&xprt->bc_free_slots) <= 0)
245 goto not_found;
246 if (list_empty(&xprt->bc_pa_list)) {
247 req = xprt_alloc_bc_req(xprt, GFP_ATOMIC);
248 if (!req)
249 goto not_found;
250 list_add_tail(&req->rq_bc_pa_list, &xprt->bc_pa_list);
251 xprt->bc_alloc_count++;
252 }
253 req = list_first_entry(&xprt->bc_pa_list, struct rpc_rqst,
254 rq_bc_pa_list);
255 req->rq_reply_bytes_recvd = 0;
256 req->rq_bytes_sent = 0;
257 memcpy(&req->rq_private_buf, &req->rq_rcv_buf,
258 sizeof(req->rq_private_buf));
259 req->rq_xid = xid;
260 req->rq_connect_cookie = xprt->connect_cookie;
261not_found:
262 dprintk("RPC: backchannel req=%p\n", req);
263 return req;
264}
265
266
267
268
269
270void xprt_free_bc_request(struct rpc_rqst *req)
271{
272 struct rpc_xprt *xprt = req->rq_xprt;
273
274 xprt->ops->bc_free_rqst(req);
275}
276
277void xprt_free_bc_rqst(struct rpc_rqst *req)
278{
279 struct rpc_xprt *xprt = req->rq_xprt;
280
281 dprintk("RPC: free backchannel req=%p\n", req);
282
283 req->rq_connect_cookie = xprt->connect_cookie - 1;
284 smp_mb__before_atomic();
285 clear_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state);
286 smp_mb__after_atomic();
287
288
289
290
291
292 spin_lock_bh(&xprt->bc_pa_lock);
293 if (xprt_need_to_requeue(xprt)) {
294 list_add_tail(&req->rq_bc_pa_list, &xprt->bc_pa_list);
295 xprt->bc_alloc_count++;
296 req = NULL;
297 }
298 spin_unlock_bh(&xprt->bc_pa_lock);
299 if (req != NULL) {
300
301
302
303
304
305
306 dprintk("RPC: Last session removed req=%p\n", req);
307 xprt_free_allocation(req);
308 return;
309 }
310}
311
312
313
314
315
316
317
318
319
320
321
322
323struct rpc_rqst *xprt_lookup_bc_request(struct rpc_xprt *xprt, __be32 xid)
324{
325 struct rpc_rqst *req;
326
327 spin_lock(&xprt->bc_pa_lock);
328 list_for_each_entry(req, &xprt->bc_pa_list, rq_bc_pa_list) {
329 if (req->rq_connect_cookie != xprt->connect_cookie)
330 continue;
331 if (req->rq_xid == xid)
332 goto found;
333 }
334 req = xprt_alloc_bc_request(xprt, xid);
335found:
336 spin_unlock(&xprt->bc_pa_lock);
337 return req;
338}
339
340
341
342
343
344
345
346void xprt_complete_bc_request(struct rpc_rqst *req, uint32_t copied)
347{
348 struct rpc_xprt *xprt = req->rq_xprt;
349 struct svc_serv *bc_serv = xprt->bc_serv;
350
351 spin_lock(&xprt->bc_pa_lock);
352 list_del(&req->rq_bc_pa_list);
353 xprt_dec_alloc_count(xprt, 1);
354 spin_unlock(&xprt->bc_pa_lock);
355
356 req->rq_private_buf.len = copied;
357 set_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state);
358
359 dprintk("RPC: add callback request to list\n");
360 spin_lock(&bc_serv->sv_cb_lock);
361 list_add(&req->rq_bc_list, &bc_serv->sv_cb_list);
362 wake_up(&bc_serv->sv_cb_waitq);
363 spin_unlock(&bc_serv->sv_cb_lock);
364}
365
366