1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42#ifndef _LINUX_SUNRPC_XPRT_RDMA_H
43#define _LINUX_SUNRPC_XPRT_RDMA_H
44
45#include <linux/wait.h>
46#include <linux/spinlock.h>
47#include <linux/atomic.h>
48#include <linux/kref.h>
49#include <linux/workqueue.h>
50#include <linux/llist.h>
51
52#include <rdma/rdma_cm.h>
53#include <rdma/ib_verbs.h>
54
55#include <linux/sunrpc/clnt.h>
56#include <linux/sunrpc/rpc_rdma_cid.h>
57#include <linux/sunrpc/rpc_rdma.h>
58#include <linux/sunrpc/xprtrdma.h>
59
60#define RDMA_RESOLVE_TIMEOUT (5000)
61#define RDMA_CONNECT_RETRY_MAX (2)
62
63#define RPCRDMA_BIND_TO (60U * HZ)
64#define RPCRDMA_INIT_REEST_TO (5U * HZ)
65#define RPCRDMA_MAX_REEST_TO (30U * HZ)
66#define RPCRDMA_IDLE_DISC_TO (5U * 60 * HZ)
67
68
69
70
71struct rpcrdma_mr;
72struct rpcrdma_ep {
73 struct kref re_kref;
74 struct rdma_cm_id *re_id;
75 struct ib_pd *re_pd;
76 unsigned int re_max_rdma_segs;
77 unsigned int re_max_fr_depth;
78 struct rpcrdma_mr *re_write_pad_mr;
79 enum ib_mr_type re_mrtype;
80 struct completion re_done;
81 unsigned int re_send_count;
82 unsigned int re_send_batch;
83 unsigned int re_max_inline_send;
84 unsigned int re_max_inline_recv;
85 int re_async_rc;
86 int re_connect_status;
87 atomic_t re_receiving;
88 atomic_t re_force_disconnect;
89 struct ib_qp_init_attr re_attr;
90 wait_queue_head_t re_connect_wait;
91 struct rpc_xprt *re_xprt;
92 struct rpcrdma_connect_private
93 re_cm_private;
94 struct rdma_conn_param re_remote_cma;
95 int re_receive_count;
96 unsigned int re_max_requests;
97 unsigned int re_inline_send;
98 unsigned int re_inline_recv;
99
100 atomic_t re_completion_ids;
101
102 char re_write_pad[XDR_UNIT];
103};
104
105
106
107
108
109
110
111#if defined(CONFIG_SUNRPC_BACKCHANNEL)
112#define RPCRDMA_BACKWARD_WRS (32)
113#else
114#define RPCRDMA_BACKWARD_WRS (0)
115#endif
116
117
118
119
120struct rpcrdma_regbuf {
121 struct ib_sge rg_iov;
122 struct ib_device *rg_device;
123 enum dma_data_direction rg_direction;
124 void *rg_data;
125};
126
127static inline u64 rdmab_addr(struct rpcrdma_regbuf *rb)
128{
129 return rb->rg_iov.addr;
130}
131
132static inline u32 rdmab_length(struct rpcrdma_regbuf *rb)
133{
134 return rb->rg_iov.length;
135}
136
137static inline u32 rdmab_lkey(struct rpcrdma_regbuf *rb)
138{
139 return rb->rg_iov.lkey;
140}
141
142static inline struct ib_device *rdmab_device(struct rpcrdma_regbuf *rb)
143{
144 return rb->rg_device;
145}
146
147static inline void *rdmab_data(const struct rpcrdma_regbuf *rb)
148{
149 return rb->rg_data;
150}
151
152#define RPCRDMA_DEF_GFP (GFP_NOIO | __GFP_NOWARN)
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170enum {
171 RPCRDMA_MAX_HDR_SEGS = 16,
172};
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188struct rpcrdma_rep {
189 struct ib_cqe rr_cqe;
190 struct rpc_rdma_cid rr_cid;
191
192 __be32 rr_xid;
193 __be32 rr_vers;
194 __be32 rr_proc;
195 int rr_wc_flags;
196 u32 rr_inv_rkey;
197 bool rr_temp;
198 struct rpcrdma_regbuf *rr_rdmabuf;
199 struct rpcrdma_xprt *rr_rxprt;
200 struct rpc_rqst *rr_rqst;
201 struct xdr_buf rr_hdrbuf;
202 struct xdr_stream rr_stream;
203 struct llist_node rr_node;
204 struct ib_recv_wr rr_recv_wr;
205 struct list_head rr_all;
206};
207
208
209
210
211
212
213
214enum {
215 RPCRDMA_MAX_RECV_BATCH = 7,
216};
217
218
219
220struct rpcrdma_req;
221struct rpcrdma_sendctx {
222 struct ib_cqe sc_cqe;
223 struct rpc_rdma_cid sc_cid;
224 struct rpcrdma_req *sc_req;
225 unsigned int sc_unmap_count;
226 struct ib_sge sc_sges[];
227};
228
229
230
231
232
233
234
235struct rpcrdma_req;
236struct rpcrdma_mr {
237 struct list_head mr_list;
238 struct rpcrdma_req *mr_req;
239
240 struct ib_mr *mr_ibmr;
241 struct ib_device *mr_device;
242 struct scatterlist *mr_sg;
243 int mr_nents;
244 enum dma_data_direction mr_dir;
245 struct ib_cqe mr_cqe;
246 struct completion mr_linv_done;
247 union {
248 struct ib_reg_wr mr_regwr;
249 struct ib_send_wr mr_invwr;
250 };
251 struct rpcrdma_xprt *mr_xprt;
252 u32 mr_handle;
253 u32 mr_length;
254 u64 mr_offset;
255 struct list_head mr_all;
256 struct rpc_rdma_cid mr_cid;
257};
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280enum {
281 RPCRDMA_MAX_IOV_SEGS = 3,
282 RPCRDMA_MAX_DATA_SEGS = ((1 * 1024 * 1024) / PAGE_SIZE) + 1,
283 RPCRDMA_MAX_SEGS = RPCRDMA_MAX_DATA_SEGS +
284 RPCRDMA_MAX_IOV_SEGS,
285};
286
287
288struct rpcrdma_mr_seg {
289 u32 mr_len;
290 struct page *mr_page;
291 u64 mr_offset;
292};
293
294
295
296
297
298
299
300
301
302
303
304enum {
305 RPCRDMA_MIN_SEND_SGES = 3,
306 RPCRDMA_MAX_PAGE_SGES = RPCRDMA_MAX_INLINE >> PAGE_SHIFT,
307 RPCRDMA_MAX_SEND_SGES = 1 + 1 + RPCRDMA_MAX_PAGE_SGES + 1,
308};
309
310struct rpcrdma_buffer;
311struct rpcrdma_req {
312 struct list_head rl_list;
313 struct rpc_rqst rl_slot;
314 struct rpcrdma_rep *rl_reply;
315 struct xdr_stream rl_stream;
316 struct xdr_buf rl_hdrbuf;
317 struct ib_send_wr rl_wr;
318 struct rpcrdma_sendctx *rl_sendctx;
319 struct rpcrdma_regbuf *rl_rdmabuf;
320 struct rpcrdma_regbuf *rl_sendbuf;
321 struct rpcrdma_regbuf *rl_recvbuf;
322
323 struct list_head rl_all;
324 struct kref rl_kref;
325
326 struct list_head rl_free_mrs;
327 struct list_head rl_registered;
328 struct rpcrdma_mr_seg rl_segments[RPCRDMA_MAX_SEGS];
329};
330
331static inline struct rpcrdma_req *
332rpcr_to_rdmar(const struct rpc_rqst *rqst)
333{
334 return container_of(rqst, struct rpcrdma_req, rl_slot);
335}
336
337static inline void
338rpcrdma_mr_push(struct rpcrdma_mr *mr, struct list_head *list)
339{
340 list_add(&mr->mr_list, list);
341}
342
343static inline struct rpcrdma_mr *
344rpcrdma_mr_pop(struct list_head *list)
345{
346 struct rpcrdma_mr *mr;
347
348 mr = list_first_entry_or_null(list, struct rpcrdma_mr, mr_list);
349 if (mr)
350 list_del_init(&mr->mr_list);
351 return mr;
352}
353
354
355
356
357
358
359
360struct rpcrdma_buffer {
361 spinlock_t rb_lock;
362 struct list_head rb_send_bufs;
363 struct list_head rb_mrs;
364
365 unsigned long rb_sc_head;
366 unsigned long rb_sc_tail;
367 unsigned long rb_sc_last;
368 struct rpcrdma_sendctx **rb_sc_ctxs;
369
370 struct list_head rb_allreqs;
371 struct list_head rb_all_mrs;
372 struct list_head rb_all_reps;
373
374 struct llist_head rb_free_reps;
375
376 __be32 rb_max_requests;
377 u32 rb_credits;
378
379 u32 rb_bc_srv_max_requests;
380 u32 rb_bc_max_requests;
381
382 struct work_struct rb_refresh_worker;
383};
384
385
386
387
388struct rpcrdma_stats {
389
390 unsigned long read_chunk_count;
391 unsigned long write_chunk_count;
392 unsigned long reply_chunk_count;
393 unsigned long long total_rdma_request;
394
395
396 unsigned long long pullup_copy_count;
397 unsigned long hardway_register_count;
398 unsigned long failed_marshal_count;
399 unsigned long bad_reply_count;
400 unsigned long mrs_recycled;
401 unsigned long mrs_orphaned;
402 unsigned long mrs_allocated;
403 unsigned long empty_sendctx_q;
404
405
406 unsigned long long total_rdma_reply;
407 unsigned long long fixup_copy_count;
408 unsigned long reply_waits_for_send;
409 unsigned long local_inv_needed;
410 unsigned long nomsg_call_count;
411 unsigned long bcall_count;
412};
413
414
415
416
417
418
419
420
421
422
423
424struct rpcrdma_xprt {
425 struct rpc_xprt rx_xprt;
426 struct rpcrdma_ep *rx_ep;
427 struct rpcrdma_buffer rx_buf;
428 struct delayed_work rx_connect_worker;
429 struct rpc_timeout rx_timeout;
430 struct rpcrdma_stats rx_stats;
431};
432
433#define rpcx_to_rdmax(x) container_of(x, struct rpcrdma_xprt, rx_xprt)
434
435static inline const char *
436rpcrdma_addrstr(const struct rpcrdma_xprt *r_xprt)
437{
438 return r_xprt->rx_xprt.address_strings[RPC_DISPLAY_ADDR];
439}
440
441static inline const char *
442rpcrdma_portstr(const struct rpcrdma_xprt *r_xprt)
443{
444 return r_xprt->rx_xprt.address_strings[RPC_DISPLAY_PORT];
445}
446
447
448
449
450extern int xprt_rdma_pad_optimize;
451
452
453
454
455extern unsigned int xprt_rdma_memreg_strategy;
456
457
458
459
460void rpcrdma_force_disconnect(struct rpcrdma_ep *ep);
461void rpcrdma_flush_disconnect(struct rpcrdma_xprt *r_xprt, struct ib_wc *wc);
462int rpcrdma_xprt_connect(struct rpcrdma_xprt *r_xprt);
463void rpcrdma_xprt_disconnect(struct rpcrdma_xprt *r_xprt);
464
465void rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, int needed, bool temp);
466
467
468
469
470struct rpcrdma_req *rpcrdma_req_create(struct rpcrdma_xprt *r_xprt, size_t size,
471 gfp_t flags);
472int rpcrdma_req_setup(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req);
473void rpcrdma_req_destroy(struct rpcrdma_req *req);
474int rpcrdma_buffer_create(struct rpcrdma_xprt *);
475void rpcrdma_buffer_destroy(struct rpcrdma_buffer *);
476struct rpcrdma_sendctx *rpcrdma_sendctx_get_locked(struct rpcrdma_xprt *r_xprt);
477
478struct rpcrdma_mr *rpcrdma_mr_get(struct rpcrdma_xprt *r_xprt);
479void rpcrdma_mrs_refresh(struct rpcrdma_xprt *r_xprt);
480
481struct rpcrdma_req *rpcrdma_buffer_get(struct rpcrdma_buffer *);
482void rpcrdma_buffer_put(struct rpcrdma_buffer *buffers,
483 struct rpcrdma_req *req);
484void rpcrdma_rep_put(struct rpcrdma_buffer *buf, struct rpcrdma_rep *rep);
485void rpcrdma_reply_put(struct rpcrdma_buffer *buffers, struct rpcrdma_req *req);
486
487bool rpcrdma_regbuf_realloc(struct rpcrdma_regbuf *rb, size_t size,
488 gfp_t flags);
489bool __rpcrdma_regbuf_dma_map(struct rpcrdma_xprt *r_xprt,
490 struct rpcrdma_regbuf *rb);
491
492
493
494
495
496
497static inline bool rpcrdma_regbuf_is_mapped(struct rpcrdma_regbuf *rb)
498{
499 return rb->rg_device != NULL;
500}
501
502
503
504
505
506
507
508
509static inline bool rpcrdma_regbuf_dma_map(struct rpcrdma_xprt *r_xprt,
510 struct rpcrdma_regbuf *rb)
511{
512 if (likely(rpcrdma_regbuf_is_mapped(rb)))
513 return true;
514 return __rpcrdma_regbuf_dma_map(r_xprt, rb);
515}
516
517
518
519
520
521static inline enum dma_data_direction
522rpcrdma_data_dir(bool writing)
523{
524 return writing ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
525}
526
527
528
529void frwr_reset(struct rpcrdma_req *req);
530int frwr_query_device(struct rpcrdma_ep *ep, const struct ib_device *device);
531int frwr_mr_init(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr *mr);
532void frwr_mr_release(struct rpcrdma_mr *mr);
533struct rpcrdma_mr_seg *frwr_map(struct rpcrdma_xprt *r_xprt,
534 struct rpcrdma_mr_seg *seg,
535 int nsegs, bool writing, __be32 xid,
536 struct rpcrdma_mr *mr);
537int frwr_send(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req);
538void frwr_reminv(struct rpcrdma_rep *rep, struct list_head *mrs);
539void frwr_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req);
540void frwr_unmap_async(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req);
541int frwr_wp_create(struct rpcrdma_xprt *r_xprt);
542
543
544
545
546
547enum rpcrdma_chunktype {
548 rpcrdma_noch = 0,
549 rpcrdma_noch_pullup,
550 rpcrdma_noch_mapped,
551 rpcrdma_readch,
552 rpcrdma_areadch,
553 rpcrdma_writech,
554 rpcrdma_replych
555};
556
557int rpcrdma_prepare_send_sges(struct rpcrdma_xprt *r_xprt,
558 struct rpcrdma_req *req, u32 hdrlen,
559 struct xdr_buf *xdr,
560 enum rpcrdma_chunktype rtype);
561void rpcrdma_sendctx_unmap(struct rpcrdma_sendctx *sc);
562int rpcrdma_marshal_req(struct rpcrdma_xprt *r_xprt, struct rpc_rqst *rqst);
563void rpcrdma_set_max_header_sizes(struct rpcrdma_ep *ep);
564void rpcrdma_reset_cwnd(struct rpcrdma_xprt *r_xprt);
565void rpcrdma_complete_rqst(struct rpcrdma_rep *rep);
566void rpcrdma_unpin_rqst(struct rpcrdma_rep *rep);
567void rpcrdma_reply_handler(struct rpcrdma_rep *rep);
568
569static inline void rpcrdma_set_xdrlen(struct xdr_buf *xdr, size_t len)
570{
571 xdr->head[0].iov_len = len;
572 xdr->len = len;
573}
574
575
576
577extern unsigned int xprt_rdma_max_inline_read;
578extern unsigned int xprt_rdma_max_inline_write;
579void xprt_rdma_format_addresses(struct rpc_xprt *xprt, struct sockaddr *sap);
580void xprt_rdma_free_addresses(struct rpc_xprt *xprt);
581void xprt_rdma_close(struct rpc_xprt *xprt);
582void xprt_rdma_print_stats(struct rpc_xprt *xprt, struct seq_file *seq);
583int xprt_rdma_init(void);
584void xprt_rdma_cleanup(void);
585
586
587
588#if defined(CONFIG_SUNRPC_BACKCHANNEL)
589int xprt_rdma_bc_setup(struct rpc_xprt *, unsigned int);
590size_t xprt_rdma_bc_maxpayload(struct rpc_xprt *);
591unsigned int xprt_rdma_bc_max_slots(struct rpc_xprt *);
592int rpcrdma_bc_post_recv(struct rpcrdma_xprt *, unsigned int);
593void rpcrdma_bc_receive_call(struct rpcrdma_xprt *, struct rpcrdma_rep *);
594int xprt_rdma_bc_send_reply(struct rpc_rqst *rqst);
595void xprt_rdma_bc_free_rqst(struct rpc_rqst *);
596void xprt_rdma_bc_destroy(struct rpc_xprt *, unsigned int);
597#endif
598
599extern struct xprt_class xprt_rdma_bc;
600
601#endif
602