1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41#include <linux/module.h>
42#include <linux/kernel.h>
43#include <linux/mm.h>
44#include <linux/string.h>
45#include <linux/stat.h>
46#include <linux/errno.h>
47#include <linux/unistd.h>
48#include <linux/uio.h>
49
50#include <asm/uaccess.h>
51#include <asm/io.h>
52
53#include <linux/init.h>
54#include <linux/fs.h>
55#include <linux/file.h>
56#include <linux/list.h>
57#include <linux/kmod.h>
58#include <linux/sysctl.h>
59#include <linux/pci.h>
60
61#include <net/sock.h>
62#include <linux/in.h>
63
64#define DEBUG_SUBSYSTEM S_LND
65
66#include <linux/libcfs/libcfs.h>
67#include <linux/lnet/lnet.h>
68#include <linux/lnet/lib-lnet.h>
69#include <linux/lnet/lnet-sysctl.h>
70
71#include <rdma/rdma_cm.h>
72#include <rdma/ib_cm.h>
73#include <rdma/ib_verbs.h>
74#include <rdma/ib_fmr_pool.h>
75
76#define IBLND_PEER_HASH_SIZE 101
77
78#define IBLND_RESCHED 100
79
80#define IBLND_N_SCHED 2
81#define IBLND_N_SCHED_HIGH 4
82
83typedef struct
84{
85 int *kib_dev_failover;
86 unsigned int *kib_service;
87 int *kib_min_reconnect_interval;
88 int *kib_max_reconnect_interval;
89 int *kib_cksum;
90 int *kib_timeout;
91 int *kib_keepalive;
92 int *kib_ntx;
93 int *kib_credits;
94 int *kib_peertxcredits;
95 int *kib_peerrtrcredits;
96 int *kib_peercredits_hiw;
97 int *kib_peertimeout;
98 char **kib_default_ipif;
99 int *kib_retry_count;
100 int *kib_rnr_retry_count;
101 int *kib_concurrent_sends;
102 int *kib_ib_mtu;
103 int *kib_map_on_demand;
104
105 int *kib_pmr_pool_size;
106 int *kib_fmr_pool_size;
107 int *kib_fmr_flush_trigger;
108 int *kib_fmr_cache;
109#if defined(CONFIG_SYSCTL) && !CFS_SYSFS_MODULE_PARM
110 ctl_table_header_t *kib_sysctl;
111#endif
112 int *kib_require_priv_port;
113 int *kib_use_priv_port;
114
115 int *kib_nscheds;
116} kib_tunables_t;
117
118extern kib_tunables_t kiblnd_tunables;
119
120#define IBLND_MSG_QUEUE_SIZE_V1 8
121#define IBLND_CREDIT_HIGHWATER_V1 7
122
123#define IBLND_CREDITS_DEFAULT 8
124#define IBLND_CREDITS_MAX ((typeof(((kib_msg_t*) 0)->ibm_credits)) - 1)
125
126#define IBLND_MSG_QUEUE_SIZE(v) ((v) == IBLND_MSG_VERSION_1 ? \
127 IBLND_MSG_QUEUE_SIZE_V1 : \
128 *kiblnd_tunables.kib_peertxcredits)
129#define IBLND_CREDITS_HIGHWATER(v) ((v) == IBLND_MSG_VERSION_1 ? \
130 IBLND_CREDIT_HIGHWATER_V1 : \
131 *kiblnd_tunables.kib_peercredits_hiw)
132
133#define kiblnd_rdma_create_id(cb, dev, ps, qpt) rdma_create_id(cb, dev, ps, qpt)
134
135static inline int
136kiblnd_concurrent_sends_v1(void)
137{
138 if (*kiblnd_tunables.kib_concurrent_sends > IBLND_MSG_QUEUE_SIZE_V1 * 2)
139 return IBLND_MSG_QUEUE_SIZE_V1 * 2;
140
141 if (*kiblnd_tunables.kib_concurrent_sends < IBLND_MSG_QUEUE_SIZE_V1 / 2)
142 return IBLND_MSG_QUEUE_SIZE_V1 / 2;
143
144 return *kiblnd_tunables.kib_concurrent_sends;
145}
146
147#define IBLND_CONCURRENT_SENDS(v) ((v) == IBLND_MSG_VERSION_1 ? \
148 kiblnd_concurrent_sends_v1() : \
149 *kiblnd_tunables.kib_concurrent_sends)
150
151#define IBLND_OOB_CAPABLE(v) ((v) != IBLND_MSG_VERSION_1)
152#define IBLND_OOB_MSGS(v) (IBLND_OOB_CAPABLE(v) ? 2 : 0)
153
154#define IBLND_MSG_SIZE (4<<10)
155#define IBLND_MAX_RDMA_FRAGS LNET_MAX_IOV
156#define IBLND_CFG_RDMA_FRAGS (*kiblnd_tunables.kib_map_on_demand != 0 ? \
157 *kiblnd_tunables.kib_map_on_demand : \
158 IBLND_MAX_RDMA_FRAGS)
159#define IBLND_RDMA_FRAGS(v) ((v) == IBLND_MSG_VERSION_1 ? \
160 IBLND_MAX_RDMA_FRAGS : IBLND_CFG_RDMA_FRAGS)
161
162
163
164
165
166#define IBLND_TX_POOL 256
167#define IBLND_PMR_POOL 256
168#define IBLND_FMR_POOL 256
169#define IBLND_FMR_POOL_FLUSH 192
170
171
172#define IBLND_TX_MSGS() (*kiblnd_tunables.kib_ntx)
173
174
175#define IBLND_RX_MSGS(v) (IBLND_MSG_QUEUE_SIZE(v) * 2 + IBLND_OOB_MSGS(v))
176#define IBLND_RX_MSG_BYTES(v) (IBLND_RX_MSGS(v) * IBLND_MSG_SIZE)
177#define IBLND_RX_MSG_PAGES(v) ((IBLND_RX_MSG_BYTES(v) + PAGE_SIZE - 1) / PAGE_SIZE)
178
179
180#define IBLND_RECV_WRS(v) IBLND_RX_MSGS(v)
181#define IBLND_SEND_WRS(v) ((IBLND_RDMA_FRAGS(v) + 1) * IBLND_CONCURRENT_SENDS(v))
182#define IBLND_CQ_ENTRIES(v) (IBLND_RECV_WRS(v) + IBLND_SEND_WRS(v))
183
184struct kib_hca_dev;
185
186
187#ifdef IFALIASZ
188#define KIB_IFNAME_SIZE IFALIASZ
189#else
190#define KIB_IFNAME_SIZE 256
191#endif
192
193typedef struct
194{
195 struct list_head ibd_list;
196 struct list_head ibd_fail_list;
197 __u32 ibd_ifip;
198
199 char ibd_ifname[KIB_IFNAME_SIZE];
200 int ibd_nnets;
201
202 cfs_time_t ibd_next_failover;
203 int ibd_failed_failover;
204 unsigned int ibd_failover;
205 unsigned int ibd_can_failover;
206 struct list_head ibd_nets;
207 struct kib_hca_dev *ibd_hdev;
208} kib_dev_t;
209
210typedef struct kib_hca_dev
211{
212 struct rdma_cm_id *ibh_cmid;
213 struct ib_device *ibh_ibdev;
214 int ibh_page_shift;
215 int ibh_page_size;
216 __u64 ibh_page_mask;
217 int ibh_mr_shift;
218 __u64 ibh_mr_size;
219 int ibh_nmrs;
220 struct ib_mr **ibh_mrs;
221 struct ib_pd *ibh_pd;
222 kib_dev_t *ibh_dev;
223 atomic_t ibh_ref;
224} kib_hca_dev_t;
225
226
227#define IBLND_POOL_DEADLINE 300
228
229#define IBLND_POOL_RETRY 1
230
231typedef struct
232{
233 int ibp_npages;
234 struct page *ibp_pages[0];
235} kib_pages_t;
236
237struct kib_pmr_pool;
238
239typedef struct {
240 struct list_head pmr_list;
241 struct ib_phys_buf *pmr_ipb;
242 struct ib_mr *pmr_mr;
243 struct kib_pmr_pool *pmr_pool;
244 __u64 pmr_iova;
245 int pmr_refcount;
246} kib_phys_mr_t;
247
248struct kib_pool;
249struct kib_poolset;
250
251typedef int (*kib_ps_pool_create_t)(struct kib_poolset *ps,
252 int inc, struct kib_pool **pp_po);
253typedef void (*kib_ps_pool_destroy_t)(struct kib_pool *po);
254typedef void (*kib_ps_node_init_t)(struct kib_pool *po, struct list_head *node);
255typedef void (*kib_ps_node_fini_t)(struct kib_pool *po, struct list_head *node);
256
257struct kib_net;
258
259#define IBLND_POOL_NAME_LEN 32
260
261typedef struct kib_poolset
262{
263 spinlock_t ps_lock;
264 struct kib_net *ps_net;
265 char ps_name[IBLND_POOL_NAME_LEN];
266 struct list_head ps_pool_list;
267 struct list_head ps_failed_pool_list;
268 cfs_time_t ps_next_retry;
269 int ps_increasing;
270 int ps_pool_size;
271 int ps_cpt;
272
273 kib_ps_pool_create_t ps_pool_create;
274 kib_ps_pool_destroy_t ps_pool_destroy;
275 kib_ps_node_init_t ps_node_init;
276 kib_ps_node_fini_t ps_node_fini;
277} kib_poolset_t;
278
279typedef struct kib_pool
280{
281 struct list_head po_list;
282 struct list_head po_free_list;
283 kib_poolset_t *po_owner;
284 cfs_time_t po_deadline;
285 int po_allocated;
286 int po_failed;
287 int po_size;
288} kib_pool_t;
289
290typedef struct {
291 kib_poolset_t tps_poolset;
292 __u64 tps_next_tx_cookie;
293} kib_tx_poolset_t;
294
295typedef struct {
296 kib_pool_t tpo_pool;
297 struct kib_hca_dev *tpo_hdev;
298 struct kib_tx *tpo_tx_descs;
299 kib_pages_t *tpo_tx_pages;
300} kib_tx_pool_t;
301
302typedef struct {
303 kib_poolset_t pps_poolset;
304} kib_pmr_poolset_t;
305
306typedef struct kib_pmr_pool {
307 struct kib_hca_dev *ppo_hdev;
308 kib_pool_t ppo_pool;
309} kib_pmr_pool_t;
310
311typedef struct
312{
313 spinlock_t fps_lock;
314 struct kib_net *fps_net;
315 struct list_head fps_pool_list;
316 struct list_head fps_failed_pool_list;
317 __u64 fps_version;
318 int fps_cpt;
319 int fps_pool_size;
320 int fps_flush_trigger;
321
322 int fps_increasing;
323
324 cfs_time_t fps_next_retry;
325} kib_fmr_poolset_t;
326
327typedef struct
328{
329 struct list_head fpo_list;
330 struct kib_hca_dev *fpo_hdev;
331 kib_fmr_poolset_t *fpo_owner;
332 struct ib_fmr_pool *fpo_fmr_pool;
333 cfs_time_t fpo_deadline;
334 int fpo_failed;
335 int fpo_map_count;
336} kib_fmr_pool_t;
337
338typedef struct {
339 struct ib_pool_fmr *fmr_pfmr;
340 kib_fmr_pool_t *fmr_pool;
341} kib_fmr_t;
342
343typedef struct kib_net
344{
345 struct list_head ibn_list;
346 __u64 ibn_incarnation;
347 int ibn_init;
348 int ibn_shutdown;
349
350 atomic_t ibn_npeers;
351 atomic_t ibn_nconns;
352
353 kib_tx_poolset_t **ibn_tx_ps;
354 kib_fmr_poolset_t **ibn_fmr_ps;
355 kib_pmr_poolset_t **ibn_pmr_ps;
356
357 kib_dev_t *ibn_dev;
358} kib_net_t;
359
360#define KIB_THREAD_SHIFT 16
361#define KIB_THREAD_ID(cpt, tid) ((cpt) << KIB_THREAD_SHIFT | (tid))
362#define KIB_THREAD_CPT(id) ((id) >> KIB_THREAD_SHIFT)
363#define KIB_THREAD_TID(id) ((id) & ((1UL << KIB_THREAD_SHIFT) - 1))
364
365struct kib_sched_info {
366
367 spinlock_t ibs_lock;
368
369 wait_queue_head_t ibs_waitq;
370
371 struct list_head ibs_conns;
372
373 int ibs_nthreads;
374
375 int ibs_nthreads_max;
376 int ibs_cpt;
377};
378
379typedef struct
380{
381 int kib_init;
382 int kib_shutdown;
383 struct list_head kib_devs;
384
385 struct list_head kib_failed_devs;
386
387 wait_queue_head_t kib_failover_waitq;
388 atomic_t kib_nthreads;
389
390 rwlock_t kib_global_lock;
391
392 struct list_head *kib_peers;
393
394 int kib_peer_hash_size;
395
396 void *kib_connd;
397
398 struct list_head kib_connd_conns;
399
400 struct list_head kib_connd_zombies;
401
402 wait_queue_head_t kib_connd_waitq;
403 spinlock_t kib_connd_lock;
404 struct ib_qp_attr kib_error_qpa;
405
406 struct kib_sched_info **kib_scheds;
407} kib_data_t;
408
409#define IBLND_INIT_NOTHING 0
410#define IBLND_INIT_DATA 1
411#define IBLND_INIT_ALL 2
412
413
414
415
416
417
418typedef struct kib_connparams
419{
420 __u16 ibcp_queue_depth;
421 __u16 ibcp_max_frags;
422 __u32 ibcp_max_msg_size;
423} WIRE_ATTR kib_connparams_t;
424
425typedef struct
426{
427 lnet_hdr_t ibim_hdr;
428 char ibim_payload[0];
429} WIRE_ATTR kib_immediate_msg_t;
430
431typedef struct
432{
433 __u32 rf_nob;
434 __u64 rf_addr;
435} WIRE_ATTR kib_rdma_frag_t;
436
437typedef struct
438{
439 __u32 rd_key;
440 __u32 rd_nfrags;
441 kib_rdma_frag_t rd_frags[0];
442} WIRE_ATTR kib_rdma_desc_t;
443
444typedef struct
445{
446 lnet_hdr_t ibprm_hdr;
447 __u64 ibprm_cookie;
448} WIRE_ATTR kib_putreq_msg_t;
449
450typedef struct
451{
452 __u64 ibpam_src_cookie;
453 __u64 ibpam_dst_cookie;
454 kib_rdma_desc_t ibpam_rd;
455} WIRE_ATTR kib_putack_msg_t;
456
457typedef struct
458{
459 lnet_hdr_t ibgm_hdr;
460 __u64 ibgm_cookie;
461 kib_rdma_desc_t ibgm_rd;
462} WIRE_ATTR kib_get_msg_t;
463
464typedef struct
465{
466 __u64 ibcm_cookie;
467 __s32 ibcm_status;
468} WIRE_ATTR kib_completion_msg_t;
469
470typedef struct
471{
472
473 __u32 ibm_magic;
474 __u16 ibm_version;
475
476 __u8 ibm_type;
477 __u8 ibm_credits;
478 __u32 ibm_nob;
479 __u32 ibm_cksum;
480 __u64 ibm_srcnid;
481 __u64 ibm_srcstamp;
482 __u64 ibm_dstnid;
483 __u64 ibm_dststamp;
484
485 union {
486 kib_connparams_t connparams;
487 kib_immediate_msg_t immediate;
488 kib_putreq_msg_t putreq;
489 kib_putack_msg_t putack;
490 kib_get_msg_t get;
491 kib_completion_msg_t completion;
492 } WIRE_ATTR ibm_u;
493} WIRE_ATTR kib_msg_t;
494
495#define IBLND_MSG_MAGIC LNET_PROTO_IB_MAGIC
496
497#define IBLND_MSG_VERSION_1 0x11
498#define IBLND_MSG_VERSION_2 0x12
499#define IBLND_MSG_VERSION IBLND_MSG_VERSION_2
500
501#define IBLND_MSG_CONNREQ 0xc0
502#define IBLND_MSG_CONNACK 0xc1
503#define IBLND_MSG_NOOP 0xd0
504#define IBLND_MSG_IMMEDIATE 0xd1
505#define IBLND_MSG_PUT_REQ 0xd2
506#define IBLND_MSG_PUT_NAK 0xd3
507#define IBLND_MSG_PUT_ACK 0xd4
508#define IBLND_MSG_PUT_DONE 0xd5
509#define IBLND_MSG_GET_REQ 0xd6
510#define IBLND_MSG_GET_DONE 0xd7
511
512typedef struct {
513 __u32 ibr_magic;
514 __u16 ibr_version;
515 __u8 ibr_why;
516 __u8 ibr_padding;
517 __u64 ibr_incarnation;
518 kib_connparams_t ibr_cp;
519} WIRE_ATTR kib_rej_t;
520
521
522#define IBLND_REJECT_CONN_RACE 1
523#define IBLND_REJECT_NO_RESOURCES 2
524#define IBLND_REJECT_FATAL 3
525
526#define IBLND_REJECT_CONN_UNCOMPAT 4
527#define IBLND_REJECT_CONN_STALE 5
528
529#define IBLND_REJECT_RDMA_FRAGS 6
530#define IBLND_REJECT_MSG_QUEUE_SIZE 7
531
532
533
534typedef struct kib_rx
535{
536 struct list_head rx_list;
537 struct kib_conn *rx_conn;
538 int rx_nob;
539 enum ib_wc_status rx_status;
540 kib_msg_t *rx_msg;
541 __u64 rx_msgaddr;
542 DECLARE_PCI_UNMAP_ADDR (rx_msgunmap);
543 struct ib_recv_wr rx_wrq;
544 struct ib_sge rx_sge;
545} kib_rx_t;
546
547#define IBLND_POSTRX_DONT_POST 0
548#define IBLND_POSTRX_NO_CREDIT 1
549#define IBLND_POSTRX_PEER_CREDIT 2
550#define IBLND_POSTRX_RSRVD_CREDIT 3
551
552typedef struct kib_tx
553{
554 struct list_head tx_list;
555 kib_tx_pool_t *tx_pool;
556 struct kib_conn *tx_conn;
557 short tx_sending;
558 short tx_queued;
559 short tx_waiting;
560 int tx_status;
561 unsigned long tx_deadline;
562 __u64 tx_cookie;
563 lnet_msg_t *tx_lntmsg[2];
564 kib_msg_t *tx_msg;
565 __u64 tx_msgaddr;
566 DECLARE_PCI_UNMAP_ADDR (tx_msgunmap);
567 int tx_nwrq;
568 struct ib_send_wr *tx_wrq;
569 struct ib_sge *tx_sge;
570 kib_rdma_desc_t *tx_rd;
571 int tx_nfrags;
572 struct scatterlist *tx_frags;
573 __u64 *tx_pages;
574 union {
575 kib_phys_mr_t *pmr;
576 kib_fmr_t fmr;
577 } tx_u;
578 int tx_dmadir;
579} kib_tx_t;
580
581typedef struct kib_connvars
582{
583
584 kib_msg_t cv_msg;
585} kib_connvars_t;
586
587typedef struct kib_conn
588{
589 struct kib_sched_info *ibc_sched;
590 struct kib_peer *ibc_peer;
591 kib_hca_dev_t *ibc_hdev;
592 struct list_head ibc_list;
593 struct list_head ibc_sched_list;
594 __u16 ibc_version;
595 __u64 ibc_incarnation;
596 atomic_t ibc_refcount;
597 int ibc_state;
598 int ibc_nsends_posted;
599 int ibc_noops_posted;
600 int ibc_credits;
601 int ibc_outstanding_credits;
602 int ibc_reserved_credits;
603 int ibc_comms_error;
604 unsigned int ibc_nrx:16;
605 unsigned int ibc_scheduled:1;
606 unsigned int ibc_ready:1;
607
608 unsigned long ibc_last_send;
609
610 struct list_head ibc_connd_list;
611
612 struct list_head ibc_early_rxs;
613
614 struct list_head ibc_tx_noops;
615 struct list_head ibc_tx_queue;
616 struct list_head ibc_tx_queue_nocred;
617 struct list_head ibc_tx_queue_rsrvd;
618 struct list_head ibc_active_txs;
619 spinlock_t ibc_lock;
620 kib_rx_t *ibc_rxs;
621 kib_pages_t *ibc_rx_pages;
622
623 struct rdma_cm_id *ibc_cmid;
624 struct ib_cq *ibc_cq;
625
626 kib_connvars_t *ibc_connvars;
627} kib_conn_t;
628
629#define IBLND_CONN_INIT 0
630#define IBLND_CONN_ACTIVE_CONNECT 1
631#define IBLND_CONN_PASSIVE_WAIT 2
632#define IBLND_CONN_ESTABLISHED 3
633#define IBLND_CONN_CLOSING 4
634#define IBLND_CONN_DISCONNECTED 5
635
636typedef struct kib_peer
637{
638 struct list_head ibp_list;
639 lnet_nid_t ibp_nid;
640 lnet_ni_t *ibp_ni;
641 atomic_t ibp_refcount;
642 struct list_head ibp_conns;
643 struct list_head ibp_tx_queue;
644 __u16 ibp_version;
645 __u64 ibp_incarnation;
646 int ibp_connecting;
647 int ibp_accepting;
648 int ibp_error;
649 cfs_time_t ibp_last_alive;
650} kib_peer_t;
651
652extern kib_data_t kiblnd_data;
653
654extern void kiblnd_hdev_destroy(kib_hca_dev_t *hdev);
655
656static inline void
657kiblnd_hdev_addref_locked(kib_hca_dev_t *hdev)
658{
659 LASSERT (atomic_read(&hdev->ibh_ref) > 0);
660 atomic_inc(&hdev->ibh_ref);
661}
662
663static inline void
664kiblnd_hdev_decref(kib_hca_dev_t *hdev)
665{
666 LASSERT (atomic_read(&hdev->ibh_ref) > 0);
667 if (atomic_dec_and_test(&hdev->ibh_ref))
668 kiblnd_hdev_destroy(hdev);
669}
670
671static inline int
672kiblnd_dev_can_failover(kib_dev_t *dev)
673{
674 if (!list_empty(&dev->ibd_fail_list))
675 return 0;
676
677 if (*kiblnd_tunables.kib_dev_failover == 0)
678 return 0;
679
680 if (*kiblnd_tunables.kib_dev_failover > 1)
681 return 1;
682
683 return dev->ibd_can_failover;
684}
685
686#define kiblnd_conn_addref(conn) \
687do { \
688 CDEBUG(D_NET, "conn[%p] (%d)++\n", \
689 (conn), atomic_read(&(conn)->ibc_refcount)); \
690 atomic_inc(&(conn)->ibc_refcount); \
691} while (0)
692
693#define kiblnd_conn_decref(conn) \
694do { \
695 unsigned long flags; \
696 \
697 CDEBUG(D_NET, "conn[%p] (%d)--\n", \
698 (conn), atomic_read(&(conn)->ibc_refcount)); \
699 LASSERT_ATOMIC_POS(&(conn)->ibc_refcount); \
700 if (atomic_dec_and_test(&(conn)->ibc_refcount)) { \
701 spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags); \
702 list_add_tail(&(conn)->ibc_list, \
703 &kiblnd_data.kib_connd_zombies); \
704 wake_up(&kiblnd_data.kib_connd_waitq); \
705 spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags);\
706 } \
707} while (0)
708
709#define kiblnd_peer_addref(peer) \
710do { \
711 CDEBUG(D_NET, "peer[%p] -> %s (%d)++\n", \
712 (peer), libcfs_nid2str((peer)->ibp_nid), \
713 atomic_read (&(peer)->ibp_refcount)); \
714 atomic_inc(&(peer)->ibp_refcount); \
715} while (0)
716
717#define kiblnd_peer_decref(peer) \
718do { \
719 CDEBUG(D_NET, "peer[%p] -> %s (%d)--\n", \
720 (peer), libcfs_nid2str((peer)->ibp_nid), \
721 atomic_read (&(peer)->ibp_refcount)); \
722 LASSERT_ATOMIC_POS(&(peer)->ibp_refcount); \
723 if (atomic_dec_and_test(&(peer)->ibp_refcount)) \
724 kiblnd_destroy_peer(peer); \
725} while (0)
726
727static inline struct list_head *
728kiblnd_nid2peerlist (lnet_nid_t nid)
729{
730 unsigned int hash =
731 ((unsigned int)nid) % kiblnd_data.kib_peer_hash_size;
732
733 return (&kiblnd_data.kib_peers [hash]);
734}
735
736static inline int
737kiblnd_peer_active (kib_peer_t *peer)
738{
739
740 return (!list_empty(&peer->ibp_list));
741}
742
743static inline kib_conn_t *
744kiblnd_get_conn_locked (kib_peer_t *peer)
745{
746 LASSERT (!list_empty(&peer->ibp_conns));
747
748
749 return list_entry(peer->ibp_conns.next, kib_conn_t, ibc_list);
750}
751
752static inline int
753kiblnd_send_keepalive(kib_conn_t *conn)
754{
755 return (*kiblnd_tunables.kib_keepalive > 0) &&
756 cfs_time_after(jiffies, conn->ibc_last_send +
757 *kiblnd_tunables.kib_keepalive*HZ);
758}
759
760static inline int
761kiblnd_need_noop(kib_conn_t *conn)
762{
763 LASSERT (conn->ibc_state >= IBLND_CONN_ESTABLISHED);
764
765 if (conn->ibc_outstanding_credits <
766 IBLND_CREDITS_HIGHWATER(conn->ibc_version) &&
767 !kiblnd_send_keepalive(conn))
768 return 0;
769
770 if (IBLND_OOB_CAPABLE(conn->ibc_version)) {
771 if (!list_empty(&conn->ibc_tx_queue_nocred))
772 return 0;
773
774
775 return (list_empty(&conn->ibc_tx_queue) ||
776 conn->ibc_credits == 0);
777 }
778
779 if (!list_empty(&conn->ibc_tx_noops) ||
780 !list_empty(&conn->ibc_tx_queue_nocred) ||
781 conn->ibc_credits == 0)
782 return 0;
783
784 if (conn->ibc_credits == 1 &&
785 conn->ibc_outstanding_credits == 0)
786 return 0;
787
788
789 return (list_empty(&conn->ibc_tx_queue) || conn->ibc_credits == 1);
790}
791
792static inline void
793kiblnd_abort_receives(kib_conn_t *conn)
794{
795 ib_modify_qp(conn->ibc_cmid->qp,
796 &kiblnd_data.kib_error_qpa, IB_QP_STATE);
797}
798
799static inline const char *
800kiblnd_queue2str (kib_conn_t *conn, struct list_head *q)
801{
802 if (q == &conn->ibc_tx_queue)
803 return "tx_queue";
804
805 if (q == &conn->ibc_tx_queue_rsrvd)
806 return "tx_queue_rsrvd";
807
808 if (q == &conn->ibc_tx_queue_nocred)
809 return "tx_queue_nocred";
810
811 if (q == &conn->ibc_active_txs)
812 return "active_txs";
813
814 LBUG();
815 return NULL;
816}
817
818
819
820
821#define IBLND_WID_TX 0
822#define IBLND_WID_RDMA 1
823#define IBLND_WID_RX 2
824#define IBLND_WID_MASK 3UL
825
826static inline __u64
827kiblnd_ptr2wreqid (void *ptr, int type)
828{
829 unsigned long lptr = (unsigned long)ptr;
830
831 LASSERT ((lptr & IBLND_WID_MASK) == 0);
832 LASSERT ((type & ~IBLND_WID_MASK) == 0);
833 return (__u64)(lptr | type);
834}
835
836static inline void *
837kiblnd_wreqid2ptr (__u64 wreqid)
838{
839 return (void *)(((unsigned long)wreqid) & ~IBLND_WID_MASK);
840}
841
842static inline int
843kiblnd_wreqid2type (__u64 wreqid)
844{
845 return (wreqid & IBLND_WID_MASK);
846}
847
848static inline void
849kiblnd_set_conn_state (kib_conn_t *conn, int state)
850{
851 conn->ibc_state = state;
852 mb();
853}
854
855static inline void
856kiblnd_init_msg (kib_msg_t *msg, int type, int body_nob)
857{
858 msg->ibm_type = type;
859 msg->ibm_nob = offsetof(kib_msg_t, ibm_u) + body_nob;
860}
861
862static inline int
863kiblnd_rd_size (kib_rdma_desc_t *rd)
864{
865 int i;
866 int size;
867
868 for (i = size = 0; i < rd->rd_nfrags; i++)
869 size += rd->rd_frags[i].rf_nob;
870
871 return size;
872}
873
874static inline __u64
875kiblnd_rd_frag_addr(kib_rdma_desc_t *rd, int index)
876{
877 return rd->rd_frags[index].rf_addr;
878}
879
880static inline __u32
881kiblnd_rd_frag_size(kib_rdma_desc_t *rd, int index)
882{
883 return rd->rd_frags[index].rf_nob;
884}
885
886static inline __u32
887kiblnd_rd_frag_key(kib_rdma_desc_t *rd, int index)
888{
889 return rd->rd_key;
890}
891
892static inline int
893kiblnd_rd_consume_frag(kib_rdma_desc_t *rd, int index, __u32 nob)
894{
895 if (nob < rd->rd_frags[index].rf_nob) {
896 rd->rd_frags[index].rf_addr += nob;
897 rd->rd_frags[index].rf_nob -= nob;
898 } else {
899 index ++;
900 }
901
902 return index;
903}
904
905static inline int
906kiblnd_rd_msg_size(kib_rdma_desc_t *rd, int msgtype, int n)
907{
908 LASSERT (msgtype == IBLND_MSG_GET_REQ ||
909 msgtype == IBLND_MSG_PUT_ACK);
910
911 return msgtype == IBLND_MSG_GET_REQ ?
912 offsetof(kib_get_msg_t, ibgm_rd.rd_frags[n]) :
913 offsetof(kib_putack_msg_t, ibpam_rd.rd_frags[n]);
914}
915
916
917static inline __u64
918kiblnd_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
919{
920 return ib_dma_mapping_error(dev, dma_addr);
921}
922
923static inline __u64 kiblnd_dma_map_single(struct ib_device *dev,
924 void *msg, size_t size,
925 enum dma_data_direction direction)
926{
927 return ib_dma_map_single(dev, msg, size, direction);
928}
929
930static inline void kiblnd_dma_unmap_single(struct ib_device *dev,
931 __u64 addr, size_t size,
932 enum dma_data_direction direction)
933{
934 ib_dma_unmap_single(dev, addr, size, direction);
935}
936
937#define KIBLND_UNMAP_ADDR_SET(p, m, a) do {} while (0)
938#define KIBLND_UNMAP_ADDR(p, m, a) (a)
939
940static inline int kiblnd_dma_map_sg(struct ib_device *dev,
941 struct scatterlist *sg, int nents,
942 enum dma_data_direction direction)
943{
944 return ib_dma_map_sg(dev, sg, nents, direction);
945}
946
947static inline void kiblnd_dma_unmap_sg(struct ib_device *dev,
948 struct scatterlist *sg, int nents,
949 enum dma_data_direction direction)
950{
951 ib_dma_unmap_sg(dev, sg, nents, direction);
952}
953
954static inline __u64 kiblnd_sg_dma_address(struct ib_device *dev,
955 struct scatterlist *sg)
956{
957 return ib_sg_dma_address(dev, sg);
958}
959
960static inline unsigned int kiblnd_sg_dma_len(struct ib_device *dev,
961 struct scatterlist *sg)
962{
963 return ib_sg_dma_len(dev, sg);
964}
965
966
967
968
969
970#define KIBLND_CONN_PARAM(e) ((e)->param.conn.private_data)
971#define KIBLND_CONN_PARAM_LEN(e) ((e)->param.conn.private_data_len)
972
973
974struct ib_mr *kiblnd_find_rd_dma_mr(kib_hca_dev_t *hdev,
975 kib_rdma_desc_t *rd);
976struct ib_mr *kiblnd_find_dma_mr(kib_hca_dev_t *hdev,
977 __u64 addr, __u64 size);
978void kiblnd_map_rx_descs(kib_conn_t *conn);
979void kiblnd_unmap_rx_descs(kib_conn_t *conn);
980int kiblnd_map_tx(lnet_ni_t *ni, kib_tx_t *tx,
981 kib_rdma_desc_t *rd, int nfrags);
982void kiblnd_unmap_tx(lnet_ni_t *ni, kib_tx_t *tx);
983void kiblnd_pool_free_node(kib_pool_t *pool, struct list_head *node);
984struct list_head *kiblnd_pool_alloc_node(kib_poolset_t *ps);
985
986int kiblnd_fmr_pool_map(kib_fmr_poolset_t *fps, __u64 *pages,
987 int npages, __u64 iov, kib_fmr_t *fmr);
988void kiblnd_fmr_pool_unmap(kib_fmr_t *fmr, int status);
989
990int kiblnd_pmr_pool_map(kib_pmr_poolset_t *pps, kib_hca_dev_t *hdev,
991 kib_rdma_desc_t *rd, __u64 *iova, kib_phys_mr_t **pp_pmr);
992void kiblnd_pmr_pool_unmap(kib_phys_mr_t *pmr);
993
994int kiblnd_startup (lnet_ni_t *ni);
995void kiblnd_shutdown (lnet_ni_t *ni);
996int kiblnd_ctl (lnet_ni_t *ni, unsigned int cmd, void *arg);
997void kiblnd_query (struct lnet_ni *ni, lnet_nid_t nid, cfs_time_t *when);
998
999int kiblnd_tunables_init(void);
1000void kiblnd_tunables_fini(void);
1001
1002int kiblnd_connd (void *arg);
1003int kiblnd_scheduler(void *arg);
1004int kiblnd_thread_start(int (*fn)(void *arg), void *arg, char *name);
1005int kiblnd_failover_thread (void *arg);
1006
1007int kiblnd_alloc_pages(kib_pages_t **pp, int cpt, int npages);
1008void kiblnd_free_pages (kib_pages_t *p);
1009
1010int kiblnd_cm_callback(struct rdma_cm_id *cmid,
1011 struct rdma_cm_event *event);
1012int kiblnd_translate_mtu(int value);
1013
1014int kiblnd_dev_failover(kib_dev_t *dev);
1015int kiblnd_create_peer (lnet_ni_t *ni, kib_peer_t **peerp, lnet_nid_t nid);
1016void kiblnd_destroy_peer (kib_peer_t *peer);
1017void kiblnd_destroy_dev (kib_dev_t *dev);
1018void kiblnd_unlink_peer_locked (kib_peer_t *peer);
1019void kiblnd_peer_alive (kib_peer_t *peer);
1020kib_peer_t *kiblnd_find_peer_locked (lnet_nid_t nid);
1021void kiblnd_peer_connect_failed (kib_peer_t *peer, int active, int error);
1022int kiblnd_close_stale_conns_locked (kib_peer_t *peer,
1023 int version, __u64 incarnation);
1024int kiblnd_close_peer_conns_locked (kib_peer_t *peer, int why);
1025
1026void kiblnd_connreq_done(kib_conn_t *conn, int status);
1027kib_conn_t *kiblnd_create_conn (kib_peer_t *peer, struct rdma_cm_id *cmid,
1028 int state, int version);
1029void kiblnd_destroy_conn (kib_conn_t *conn);
1030void kiblnd_close_conn (kib_conn_t *conn, int error);
1031void kiblnd_close_conn_locked (kib_conn_t *conn, int error);
1032
1033int kiblnd_init_rdma (kib_conn_t *conn, kib_tx_t *tx, int type,
1034 int nob, kib_rdma_desc_t *dstrd, __u64 dstcookie);
1035
1036void kiblnd_launch_tx (lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid);
1037void kiblnd_queue_tx_locked (kib_tx_t *tx, kib_conn_t *conn);
1038void kiblnd_queue_tx (kib_tx_t *tx, kib_conn_t *conn);
1039void kiblnd_init_tx_msg (lnet_ni_t *ni, kib_tx_t *tx, int type, int body_nob);
1040void kiblnd_txlist_done (lnet_ni_t *ni, struct list_head *txlist,
1041 int status);
1042void kiblnd_check_sends (kib_conn_t *conn);
1043
1044void kiblnd_qp_event(struct ib_event *event, void *arg);
1045void kiblnd_cq_event(struct ib_event *event, void *arg);
1046void kiblnd_cq_completion(struct ib_cq *cq, void *arg);
1047
1048void kiblnd_pack_msg (lnet_ni_t *ni, kib_msg_t *msg, int version,
1049 int credits, lnet_nid_t dstnid, __u64 dststamp);
1050int kiblnd_unpack_msg(kib_msg_t *msg, int nob);
1051int kiblnd_post_rx (kib_rx_t *rx, int credit);
1052
1053int kiblnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg);
1054int kiblnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed,
1055 unsigned int niov, struct iovec *iov, lnet_kiov_t *kiov,
1056 unsigned int offset, unsigned int mlen, unsigned int rlen);
1057