1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42#ifndef _LINUX_SUNRPC_XPRT_RDMA_H
43#define _LINUX_SUNRPC_XPRT_RDMA_H
44
45#include <linux/wait.h>
46#include <linux/spinlock.h>
47#include <linux/atomic.h>
48#include <linux/workqueue.h>
49
50#include <rdma/rdma_cm.h>
51#include <rdma/ib_verbs.h>
52
53#include <linux/sunrpc/clnt.h>
54#include <linux/sunrpc/rpc_rdma.h>
55#include <linux/sunrpc/xprtrdma.h>
56
57#define RDMA_RESOLVE_TIMEOUT (5000)
58#define RDMA_CONNECT_RETRY_MAX (2)
59
60#define RPCRDMA_BIND_TO (60U * HZ)
61#define RPCRDMA_INIT_REEST_TO (5U * HZ)
62#define RPCRDMA_MAX_REEST_TO (30U * HZ)
63#define RPCRDMA_IDLE_DISC_TO (5U * 60 * HZ)
64
65
66
67
68struct rpcrdma_ia {
69 const struct rpcrdma_memreg_ops *ri_ops;
70 struct ib_device *ri_device;
71 struct rdma_cm_id *ri_id;
72 struct ib_pd *ri_pd;
73 struct completion ri_done;
74 struct completion ri_remove_done;
75 int ri_async_rc;
76 unsigned int ri_max_segs;
77 unsigned int ri_max_frwr_depth;
78 unsigned int ri_max_inline_write;
79 unsigned int ri_max_inline_read;
80 unsigned int ri_max_send_sges;
81 bool ri_implicit_roundup;
82 enum ib_mr_type ri_mrtype;
83 unsigned long ri_flags;
84 struct ib_qp_attr ri_qp_attr;
85 struct ib_qp_init_attr ri_qp_init_attr;
86};
87
88enum {
89 RPCRDMA_IAF_REMOVING = 0,
90};
91
92
93
94
95
96struct rpcrdma_ep {
97 unsigned int rep_send_count;
98 unsigned int rep_send_batch;
99 int rep_connected;
100 struct ib_qp_init_attr rep_attr;
101 wait_queue_head_t rep_connect_wait;
102 struct rpcrdma_connect_private rep_cm_private;
103 struct rdma_conn_param rep_remote_cma;
104 struct delayed_work rep_connect_worker;
105};
106
107
108
109
110
111#if defined(CONFIG_SUNRPC_BACKCHANNEL)
112#define RPCRDMA_BACKWARD_WRS (8)
113#else
114#define RPCRDMA_BACKWARD_WRS (0)
115#endif
116
117
118
119
120
121
122
123struct rpcrdma_regbuf {
124 struct ib_sge rg_iov;
125 struct ib_device *rg_device;
126 enum dma_data_direction rg_direction;
127 __be32 rg_base[0] __attribute__ ((aligned(256)));
128};
129
130static inline u64
131rdmab_addr(struct rpcrdma_regbuf *rb)
132{
133 return rb->rg_iov.addr;
134}
135
136static inline u32
137rdmab_length(struct rpcrdma_regbuf *rb)
138{
139 return rb->rg_iov.length;
140}
141
142static inline u32
143rdmab_lkey(struct rpcrdma_regbuf *rb)
144{
145 return rb->rg_iov.lkey;
146}
147
148static inline struct ib_device *
149rdmab_device(struct rpcrdma_regbuf *rb)
150{
151 return rb->rg_device;
152}
153
154#define RPCRDMA_DEF_GFP (GFP_NOIO | __GFP_NOWARN)
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174enum {
175 RPCRDMA_MAX_HDR_SEGS = 8,
176 RPCRDMA_HDRBUF_SIZE = 256,
177};
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193struct rpcrdma_rep {
194 struct ib_cqe rr_cqe;
195 __be32 rr_xid;
196 __be32 rr_vers;
197 __be32 rr_proc;
198 int rr_wc_flags;
199 u32 rr_inv_rkey;
200 bool rr_temp;
201 struct rpcrdma_regbuf *rr_rdmabuf;
202 struct rpcrdma_xprt *rr_rxprt;
203 struct work_struct rr_work;
204 struct xdr_buf rr_hdrbuf;
205 struct xdr_stream rr_stream;
206 struct rpc_rqst *rr_rqst;
207 struct list_head rr_list;
208 struct ib_recv_wr rr_recv_wr;
209};
210
211
212
213struct rpcrdma_req;
214struct rpcrdma_xprt;
215struct rpcrdma_sendctx {
216 struct ib_send_wr sc_wr;
217 struct ib_cqe sc_cqe;
218 struct rpcrdma_xprt *sc_xprt;
219 struct rpcrdma_req *sc_req;
220 unsigned int sc_unmap_count;
221 struct ib_sge sc_sges[];
222};
223
224
225
226
227
228
229
230enum {
231 RPCRDMA_MAX_SEND_BATCH = 7,
232};
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247enum rpcrdma_frwr_state {
248 FRWR_IS_INVALID,
249 FRWR_IS_VALID,
250 FRWR_FLUSHED_FR,
251 FRWR_FLUSHED_LI,
252};
253
254struct rpcrdma_frwr {
255 struct ib_mr *fr_mr;
256 struct ib_cqe fr_cqe;
257 enum rpcrdma_frwr_state fr_state;
258 struct completion fr_linv_done;
259 union {
260 struct ib_reg_wr fr_regwr;
261 struct ib_send_wr fr_invwr;
262 };
263};
264
265struct rpcrdma_fmr {
266 struct ib_fmr *fm_mr;
267 u64 *fm_physaddrs;
268};
269
270struct rpcrdma_mr {
271 struct list_head mr_list;
272 struct scatterlist *mr_sg;
273 int mr_nents;
274 enum dma_data_direction mr_dir;
275 union {
276 struct rpcrdma_fmr fmr;
277 struct rpcrdma_frwr frwr;
278 };
279 struct rpcrdma_xprt *mr_xprt;
280 u32 mr_handle;
281 u32 mr_length;
282 u64 mr_offset;
283 struct list_head mr_all;
284};
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307enum {
308 RPCRDMA_MAX_IOV_SEGS = 3,
309 RPCRDMA_MAX_DATA_SEGS = ((1 * 1024 * 1024) / PAGE_SIZE) + 1,
310 RPCRDMA_MAX_SEGS = RPCRDMA_MAX_DATA_SEGS +
311 RPCRDMA_MAX_IOV_SEGS,
312};
313
314struct rpcrdma_mr_seg {
315 u32 mr_len;
316 struct page *mr_page;
317 char *mr_offset;
318};
319
320
321
322
323
324
325
326
327
328
329
330enum {
331 RPCRDMA_MIN_SEND_SGES = 3,
332 RPCRDMA_MAX_PAGE_SGES = RPCRDMA_MAX_INLINE >> PAGE_SHIFT,
333 RPCRDMA_MAX_SEND_SGES = 1 + 1 + RPCRDMA_MAX_PAGE_SGES + 1,
334};
335
336struct rpcrdma_buffer;
337struct rpcrdma_req {
338 struct list_head rl_list;
339 struct rpc_rqst rl_slot;
340 struct rpcrdma_buffer *rl_buffer;
341 struct rpcrdma_rep *rl_reply;
342 struct xdr_stream rl_stream;
343 struct xdr_buf rl_hdrbuf;
344 struct rpcrdma_sendctx *rl_sendctx;
345 struct rpcrdma_regbuf *rl_rdmabuf;
346 struct rpcrdma_regbuf *rl_sendbuf;
347 struct rpcrdma_regbuf *rl_recvbuf;
348
349 struct list_head rl_all;
350 unsigned long rl_flags;
351
352 struct list_head rl_registered;
353 struct rpcrdma_mr_seg rl_segments[RPCRDMA_MAX_SEGS];
354};
355
356
357enum {
358 RPCRDMA_REQ_F_PENDING = 0,
359 RPCRDMA_REQ_F_TX_RESOURCES,
360};
361
362static inline struct rpcrdma_req *
363rpcr_to_rdmar(const struct rpc_rqst *rqst)
364{
365 return container_of(rqst, struct rpcrdma_req, rl_slot);
366}
367
368static inline void
369rpcrdma_mr_push(struct rpcrdma_mr *mr, struct list_head *list)
370{
371 list_add_tail(&mr->mr_list, list);
372}
373
374static inline struct rpcrdma_mr *
375rpcrdma_mr_pop(struct list_head *list)
376{
377 struct rpcrdma_mr *mr;
378
379 mr = list_first_entry(list, struct rpcrdma_mr, mr_list);
380 list_del_init(&mr->mr_list);
381 return mr;
382}
383
384
385
386
387
388
389
390struct rpcrdma_buffer {
391 spinlock_t rb_mrlock;
392 struct list_head rb_mrs;
393 struct list_head rb_all;
394
395 unsigned long rb_sc_head;
396 unsigned long rb_sc_tail;
397 unsigned long rb_sc_last;
398 struct rpcrdma_sendctx **rb_sc_ctxs;
399
400 spinlock_t rb_lock;
401 struct list_head rb_send_bufs;
402 struct list_head rb_recv_bufs;
403 unsigned long rb_flags;
404 u32 rb_max_requests;
405 u32 rb_credits;
406 int rb_posted_receives;
407
408 u32 rb_bc_srv_max_requests;
409 spinlock_t rb_reqslock;
410 struct list_head rb_allreqs;
411
412 u32 rb_bc_max_requests;
413
414 spinlock_t rb_recovery_lock;
415 struct list_head rb_stale_mrs;
416 struct delayed_work rb_recovery_worker;
417 struct delayed_work rb_refresh_worker;
418};
419#define rdmab_to_ia(b) (&container_of((b), struct rpcrdma_xprt, rx_buf)->rx_ia)
420
421
422enum {
423 RPCRDMA_BUF_F_EMPTY_SCQ = 0,
424};
425
426
427
428
429
430
431
432struct rpcrdma_create_data_internal {
433 unsigned int max_requests;
434 unsigned int rsize;
435 unsigned int wsize;
436 unsigned int inline_rsize;
437 unsigned int inline_wsize;
438};
439
440
441
442
443struct rpcrdma_stats {
444
445 unsigned long read_chunk_count;
446 unsigned long write_chunk_count;
447 unsigned long reply_chunk_count;
448 unsigned long long total_rdma_request;
449
450
451 unsigned long long pullup_copy_count;
452 unsigned long hardway_register_count;
453 unsigned long failed_marshal_count;
454 unsigned long bad_reply_count;
455 unsigned long mrs_recovered;
456 unsigned long mrs_orphaned;
457 unsigned long mrs_allocated;
458 unsigned long empty_sendctx_q;
459
460
461 unsigned long long total_rdma_reply;
462 unsigned long long fixup_copy_count;
463 unsigned long reply_waits_for_send;
464 unsigned long local_inv_needed;
465 unsigned long nomsg_call_count;
466 unsigned long bcall_count;
467};
468
469
470
471
472struct rpcrdma_xprt;
473struct rpcrdma_memreg_ops {
474 struct rpcrdma_mr_seg *
475 (*ro_map)(struct rpcrdma_xprt *,
476 struct rpcrdma_mr_seg *, int, bool,
477 struct rpcrdma_mr **);
478 int (*ro_send)(struct rpcrdma_ia *ia,
479 struct rpcrdma_req *req);
480 void (*ro_reminv)(struct rpcrdma_rep *rep,
481 struct list_head *mrs);
482 void (*ro_unmap_sync)(struct rpcrdma_xprt *,
483 struct list_head *);
484 void (*ro_recover_mr)(struct rpcrdma_mr *mr);
485 int (*ro_open)(struct rpcrdma_ia *,
486 struct rpcrdma_ep *,
487 struct rpcrdma_create_data_internal *);
488 size_t (*ro_maxpages)(struct rpcrdma_xprt *);
489 int (*ro_init_mr)(struct rpcrdma_ia *,
490 struct rpcrdma_mr *);
491 void (*ro_release_mr)(struct rpcrdma_mr *mr);
492 const char *ro_displayname;
493 const int ro_send_w_inv_ok;
494};
495
496extern const struct rpcrdma_memreg_ops rpcrdma_fmr_memreg_ops;
497extern const struct rpcrdma_memreg_ops rpcrdma_frwr_memreg_ops;
498
499
500
501
502
503
504
505
506
507
508
509struct rpcrdma_xprt {
510 struct rpc_xprt rx_xprt;
511 struct rpcrdma_ia rx_ia;
512 struct rpcrdma_ep rx_ep;
513 struct rpcrdma_buffer rx_buf;
514 struct rpcrdma_create_data_internal rx_data;
515 struct delayed_work rx_connect_worker;
516 struct rpcrdma_stats rx_stats;
517};
518
519#define rpcx_to_rdmax(x) container_of(x, struct rpcrdma_xprt, rx_xprt)
520#define rpcx_to_rdmad(x) (rpcx_to_rdmax(x)->rx_data)
521
522static inline const char *
523rpcrdma_addrstr(const struct rpcrdma_xprt *r_xprt)
524{
525 return r_xprt->rx_xprt.address_strings[RPC_DISPLAY_ADDR];
526}
527
528static inline const char *
529rpcrdma_portstr(const struct rpcrdma_xprt *r_xprt)
530{
531 return r_xprt->rx_xprt.address_strings[RPC_DISPLAY_PORT];
532}
533
534
535
536
537extern int xprt_rdma_pad_optimize;
538
539
540
541
542extern unsigned int xprt_rdma_memreg_strategy;
543
544
545
546
547int rpcrdma_ia_open(struct rpcrdma_xprt *xprt);
548void rpcrdma_ia_remove(struct rpcrdma_ia *ia);
549void rpcrdma_ia_close(struct rpcrdma_ia *);
550bool frwr_is_supported(struct rpcrdma_ia *);
551bool fmr_is_supported(struct rpcrdma_ia *);
552
553extern struct workqueue_struct *rpcrdma_receive_wq;
554
555
556
557
558int rpcrdma_ep_create(struct rpcrdma_ep *, struct rpcrdma_ia *,
559 struct rpcrdma_create_data_internal *);
560void rpcrdma_ep_destroy(struct rpcrdma_ep *, struct rpcrdma_ia *);
561int rpcrdma_ep_connect(struct rpcrdma_ep *, struct rpcrdma_ia *);
562void rpcrdma_conn_func(struct rpcrdma_ep *ep);
563void rpcrdma_ep_disconnect(struct rpcrdma_ep *, struct rpcrdma_ia *);
564
565int rpcrdma_ep_post(struct rpcrdma_ia *, struct rpcrdma_ep *,
566 struct rpcrdma_req *);
567void rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, bool temp);
568
569
570
571
572struct rpcrdma_req *rpcrdma_create_req(struct rpcrdma_xprt *);
573void rpcrdma_destroy_req(struct rpcrdma_req *);
574int rpcrdma_buffer_create(struct rpcrdma_xprt *);
575void rpcrdma_buffer_destroy(struct rpcrdma_buffer *);
576struct rpcrdma_sendctx *rpcrdma_sendctx_get_locked(struct rpcrdma_buffer *buf);
577
578struct rpcrdma_mr *rpcrdma_mr_get(struct rpcrdma_xprt *r_xprt);
579void rpcrdma_mr_put(struct rpcrdma_mr *mr);
580void rpcrdma_mr_unmap_and_put(struct rpcrdma_mr *mr);
581void rpcrdma_mr_defer_recovery(struct rpcrdma_mr *mr);
582
583struct rpcrdma_req *rpcrdma_buffer_get(struct rpcrdma_buffer *);
584void rpcrdma_buffer_put(struct rpcrdma_req *);
585void rpcrdma_recv_buffer_put(struct rpcrdma_rep *);
586
587struct rpcrdma_regbuf *rpcrdma_alloc_regbuf(size_t, enum dma_data_direction,
588 gfp_t);
589bool __rpcrdma_dma_map_regbuf(struct rpcrdma_ia *, struct rpcrdma_regbuf *);
590void rpcrdma_free_regbuf(struct rpcrdma_regbuf *);
591
592static inline bool
593rpcrdma_regbuf_is_mapped(struct rpcrdma_regbuf *rb)
594{
595 return rb->rg_device != NULL;
596}
597
598static inline bool
599rpcrdma_dma_map_regbuf(struct rpcrdma_ia *ia, struct rpcrdma_regbuf *rb)
600{
601 if (likely(rpcrdma_regbuf_is_mapped(rb)))
602 return true;
603 return __rpcrdma_dma_map_regbuf(ia, rb);
604}
605
606int rpcrdma_alloc_wq(void);
607void rpcrdma_destroy_wq(void);
608
609
610
611
612
613static inline enum dma_data_direction
614rpcrdma_data_dir(bool writing)
615{
616 return writing ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
617}
618
619
620
621
622
623enum rpcrdma_chunktype {
624 rpcrdma_noch = 0,
625 rpcrdma_readch,
626 rpcrdma_areadch,
627 rpcrdma_writech,
628 rpcrdma_replych
629};
630
631int rpcrdma_prepare_send_sges(struct rpcrdma_xprt *r_xprt,
632 struct rpcrdma_req *req, u32 hdrlen,
633 struct xdr_buf *xdr,
634 enum rpcrdma_chunktype rtype);
635void rpcrdma_unmap_sendctx(struct rpcrdma_sendctx *sc);
636int rpcrdma_marshal_req(struct rpcrdma_xprt *r_xprt, struct rpc_rqst *rqst);
637void rpcrdma_set_max_header_sizes(struct rpcrdma_xprt *);
638void rpcrdma_complete_rqst(struct rpcrdma_rep *rep);
639void rpcrdma_reply_handler(struct rpcrdma_rep *rep);
640void rpcrdma_release_rqst(struct rpcrdma_xprt *r_xprt,
641 struct rpcrdma_req *req);
642void rpcrdma_deferred_completion(struct work_struct *work);
643
644static inline void rpcrdma_set_xdrlen(struct xdr_buf *xdr, size_t len)
645{
646 xdr->head[0].iov_len = len;
647 xdr->len = len;
648}
649
650
651
652extern unsigned int xprt_rdma_max_inline_read;
653void xprt_rdma_format_addresses(struct rpc_xprt *xprt, struct sockaddr *sap);
654void xprt_rdma_free_addresses(struct rpc_xprt *xprt);
655void rpcrdma_connect_worker(struct work_struct *work);
656void xprt_rdma_print_stats(struct rpc_xprt *xprt, struct seq_file *seq);
657int xprt_rdma_init(void);
658void xprt_rdma_cleanup(void);
659
660
661
662#if defined(CONFIG_SUNRPC_BACKCHANNEL)
663int xprt_rdma_bc_setup(struct rpc_xprt *, unsigned int);
664int xprt_rdma_bc_up(struct svc_serv *, struct net *);
665size_t xprt_rdma_bc_maxpayload(struct rpc_xprt *);
666int rpcrdma_bc_post_recv(struct rpcrdma_xprt *, unsigned int);
667void rpcrdma_bc_receive_call(struct rpcrdma_xprt *, struct rpcrdma_rep *);
668int xprt_rdma_bc_send_reply(struct rpc_rqst *rqst);
669void xprt_rdma_bc_free_rqst(struct rpc_rqst *);
670void xprt_rdma_bc_destroy(struct rpc_xprt *, unsigned int);
671#endif
672
673extern struct xprt_class xprt_rdma_bc;
674
675#endif
676