1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39#if !defined(IB_VERBS_H)
40#define IB_VERBS_H
41
42#include <linux/types.h>
43#include <linux/device.h>
44#include <linux/mm.h>
45#include <linux/dma-mapping.h>
46#include <linux/kref.h>
47#include <linux/list.h>
48#include <linux/rwsem.h>
49#include <linux/scatterlist.h>
50#include <linux/workqueue.h>
51
52#include <linux/atomic.h>
53#include <asm/uaccess.h>
54
55extern struct workqueue_struct *ib_wq;
56
57union ib_gid {
58 u8 raw[16];
59 struct {
60 __be64 subnet_prefix;
61 __be64 interface_id;
62 } global;
63};
64
65enum rdma_node_type {
66
67 RDMA_NODE_IB_CA = 1,
68 RDMA_NODE_IB_SWITCH,
69 RDMA_NODE_IB_ROUTER,
70 RDMA_NODE_RNIC
71};
72
73enum rdma_transport_type {
74 RDMA_TRANSPORT_IB,
75 RDMA_TRANSPORT_IWARP
76};
77
78enum rdma_transport_type
79rdma_node_get_transport(enum rdma_node_type node_type) __attribute_const__;
80
81enum rdma_link_layer {
82 IB_LINK_LAYER_UNSPECIFIED,
83 IB_LINK_LAYER_INFINIBAND,
84 IB_LINK_LAYER_ETHERNET,
85};
86
87enum ib_device_cap_flags {
88 IB_DEVICE_RESIZE_MAX_WR = 1,
89 IB_DEVICE_BAD_PKEY_CNTR = (1<<1),
90 IB_DEVICE_BAD_QKEY_CNTR = (1<<2),
91 IB_DEVICE_RAW_MULTI = (1<<3),
92 IB_DEVICE_AUTO_PATH_MIG = (1<<4),
93 IB_DEVICE_CHANGE_PHY_PORT = (1<<5),
94 IB_DEVICE_UD_AV_PORT_ENFORCE = (1<<6),
95 IB_DEVICE_CURR_QP_STATE_MOD = (1<<7),
96 IB_DEVICE_SHUTDOWN_PORT = (1<<8),
97 IB_DEVICE_INIT_TYPE = (1<<9),
98 IB_DEVICE_PORT_ACTIVE_EVENT = (1<<10),
99 IB_DEVICE_SYS_IMAGE_GUID = (1<<11),
100 IB_DEVICE_RC_RNR_NAK_GEN = (1<<12),
101 IB_DEVICE_SRQ_RESIZE = (1<<13),
102 IB_DEVICE_N_NOTIFY_CQ = (1<<14),
103 IB_DEVICE_LOCAL_DMA_LKEY = (1<<15),
104 IB_DEVICE_RESERVED = (1<<16),
105 IB_DEVICE_MEM_WINDOW = (1<<17),
106
107
108
109
110
111
112
113 IB_DEVICE_UD_IP_CSUM = (1<<18),
114 IB_DEVICE_UD_TSO = (1<<19),
115 IB_DEVICE_XRC = (1<<20),
116 IB_DEVICE_MEM_MGT_EXTENSIONS = (1<<21),
117 IB_DEVICE_BLOCK_MULTICAST_LOOPBACK = (1<<22),
118};
119
120enum ib_atomic_cap {
121 IB_ATOMIC_NONE,
122 IB_ATOMIC_HCA,
123 IB_ATOMIC_GLOB
124};
125
126struct ib_device_attr {
127 u64 fw_ver;
128 __be64 sys_image_guid;
129 u64 max_mr_size;
130 u64 page_size_cap;
131 u32 vendor_id;
132 u32 vendor_part_id;
133 u32 hw_ver;
134 int max_qp;
135 int max_qp_wr;
136 int device_cap_flags;
137 int max_sge;
138 int max_sge_rd;
139 int max_cq;
140 int max_cqe;
141 int max_mr;
142 int max_pd;
143 int max_qp_rd_atom;
144 int max_ee_rd_atom;
145 int max_res_rd_atom;
146 int max_qp_init_rd_atom;
147 int max_ee_init_rd_atom;
148 enum ib_atomic_cap atomic_cap;
149 enum ib_atomic_cap masked_atomic_cap;
150 int max_ee;
151 int max_rdd;
152 int max_mw;
153 int max_raw_ipv6_qp;
154 int max_raw_ethy_qp;
155 int max_mcast_grp;
156 int max_mcast_qp_attach;
157 int max_total_mcast_qp_attach;
158 int max_ah;
159 int max_fmr;
160 int max_map_per_fmr;
161 int max_srq;
162 int max_srq_wr;
163 int max_srq_sge;
164 unsigned int max_fast_reg_page_list_len;
165 u16 max_pkeys;
166 u8 local_ca_ack_delay;
167};
168
169enum ib_mtu {
170 IB_MTU_256 = 1,
171 IB_MTU_512 = 2,
172 IB_MTU_1024 = 3,
173 IB_MTU_2048 = 4,
174 IB_MTU_4096 = 5
175};
176
177static inline int ib_mtu_enum_to_int(enum ib_mtu mtu)
178{
179 switch (mtu) {
180 case IB_MTU_256: return 256;
181 case IB_MTU_512: return 512;
182 case IB_MTU_1024: return 1024;
183 case IB_MTU_2048: return 2048;
184 case IB_MTU_4096: return 4096;
185 default: return -1;
186 }
187}
188
189enum ib_port_state {
190 IB_PORT_NOP = 0,
191 IB_PORT_DOWN = 1,
192 IB_PORT_INIT = 2,
193 IB_PORT_ARMED = 3,
194 IB_PORT_ACTIVE = 4,
195 IB_PORT_ACTIVE_DEFER = 5
196};
197
198enum ib_port_cap_flags {
199 IB_PORT_SM = 1 << 1,
200 IB_PORT_NOTICE_SUP = 1 << 2,
201 IB_PORT_TRAP_SUP = 1 << 3,
202 IB_PORT_OPT_IPD_SUP = 1 << 4,
203 IB_PORT_AUTO_MIGR_SUP = 1 << 5,
204 IB_PORT_SL_MAP_SUP = 1 << 6,
205 IB_PORT_MKEY_NVRAM = 1 << 7,
206 IB_PORT_PKEY_NVRAM = 1 << 8,
207 IB_PORT_LED_INFO_SUP = 1 << 9,
208 IB_PORT_SM_DISABLED = 1 << 10,
209 IB_PORT_SYS_IMAGE_GUID_SUP = 1 << 11,
210 IB_PORT_PKEY_SW_EXT_PORT_TRAP_SUP = 1 << 12,
211 IB_PORT_EXTENDED_SPEEDS_SUP = 1 << 14,
212 IB_PORT_CM_SUP = 1 << 16,
213 IB_PORT_SNMP_TUNNEL_SUP = 1 << 17,
214 IB_PORT_REINIT_SUP = 1 << 18,
215 IB_PORT_DEVICE_MGMT_SUP = 1 << 19,
216 IB_PORT_VENDOR_CLASS_SUP = 1 << 20,
217 IB_PORT_DR_NOTICE_SUP = 1 << 21,
218 IB_PORT_CAP_MASK_NOTICE_SUP = 1 << 22,
219 IB_PORT_BOOT_MGMT_SUP = 1 << 23,
220 IB_PORT_LINK_LATENCY_SUP = 1 << 24,
221 IB_PORT_CLIENT_REG_SUP = 1 << 25
222};
223
224enum ib_port_width {
225 IB_WIDTH_1X = 1,
226 IB_WIDTH_4X = 2,
227 IB_WIDTH_8X = 4,
228 IB_WIDTH_12X = 8
229};
230
231static inline int ib_width_enum_to_int(enum ib_port_width width)
232{
233 switch (width) {
234 case IB_WIDTH_1X: return 1;
235 case IB_WIDTH_4X: return 4;
236 case IB_WIDTH_8X: return 8;
237 case IB_WIDTH_12X: return 12;
238 default: return -1;
239 }
240}
241
242enum ib_port_speed {
243 IB_SPEED_SDR = 1,
244 IB_SPEED_DDR = 2,
245 IB_SPEED_QDR = 4,
246 IB_SPEED_FDR10 = 8,
247 IB_SPEED_FDR = 16,
248 IB_SPEED_EDR = 32
249};
250
251struct ib_protocol_stats {
252
253};
254
255struct iw_protocol_stats {
256 u64 ipInReceives;
257 u64 ipInHdrErrors;
258 u64 ipInTooBigErrors;
259 u64 ipInNoRoutes;
260 u64 ipInAddrErrors;
261 u64 ipInUnknownProtos;
262 u64 ipInTruncatedPkts;
263 u64 ipInDiscards;
264 u64 ipInDelivers;
265 u64 ipOutForwDatagrams;
266 u64 ipOutRequests;
267 u64 ipOutDiscards;
268 u64 ipOutNoRoutes;
269 u64 ipReasmTimeout;
270 u64 ipReasmReqds;
271 u64 ipReasmOKs;
272 u64 ipReasmFails;
273 u64 ipFragOKs;
274 u64 ipFragFails;
275 u64 ipFragCreates;
276 u64 ipInMcastPkts;
277 u64 ipOutMcastPkts;
278 u64 ipInBcastPkts;
279 u64 ipOutBcastPkts;
280
281 u64 tcpRtoAlgorithm;
282 u64 tcpRtoMin;
283 u64 tcpRtoMax;
284 u64 tcpMaxConn;
285 u64 tcpActiveOpens;
286 u64 tcpPassiveOpens;
287 u64 tcpAttemptFails;
288 u64 tcpEstabResets;
289 u64 tcpCurrEstab;
290 u64 tcpInSegs;
291 u64 tcpOutSegs;
292 u64 tcpRetransSegs;
293 u64 tcpInErrs;
294 u64 tcpOutRsts;
295};
296
297union rdma_protocol_stats {
298 struct ib_protocol_stats ib;
299 struct iw_protocol_stats iw;
300};
301
302struct ib_port_attr {
303 enum ib_port_state state;
304 enum ib_mtu max_mtu;
305 enum ib_mtu active_mtu;
306 int gid_tbl_len;
307 u32 port_cap_flags;
308 u32 max_msg_sz;
309 u32 bad_pkey_cntr;
310 u32 qkey_viol_cntr;
311 u16 pkey_tbl_len;
312 u16 lid;
313 u16 sm_lid;
314 u8 lmc;
315 u8 max_vl_num;
316 u8 sm_sl;
317 u8 subnet_timeout;
318 u8 init_type_reply;
319 u8 active_width;
320 u8 active_speed;
321 u8 phys_state;
322};
323
324enum ib_device_modify_flags {
325 IB_DEVICE_MODIFY_SYS_IMAGE_GUID = 1 << 0,
326 IB_DEVICE_MODIFY_NODE_DESC = 1 << 1
327};
328
329struct ib_device_modify {
330 u64 sys_image_guid;
331 char node_desc[64];
332};
333
334enum ib_port_modify_flags {
335 IB_PORT_SHUTDOWN = 1,
336 IB_PORT_INIT_TYPE = (1<<2),
337 IB_PORT_RESET_QKEY_CNTR = (1<<3)
338};
339
340struct ib_port_modify {
341 u32 set_port_cap_mask;
342 u32 clr_port_cap_mask;
343 u8 init_type;
344};
345
346enum ib_event_type {
347 IB_EVENT_CQ_ERR,
348 IB_EVENT_QP_FATAL,
349 IB_EVENT_QP_REQ_ERR,
350 IB_EVENT_QP_ACCESS_ERR,
351 IB_EVENT_COMM_EST,
352 IB_EVENT_SQ_DRAINED,
353 IB_EVENT_PATH_MIG,
354 IB_EVENT_PATH_MIG_ERR,
355 IB_EVENT_DEVICE_FATAL,
356 IB_EVENT_PORT_ACTIVE,
357 IB_EVENT_PORT_ERR,
358 IB_EVENT_LID_CHANGE,
359 IB_EVENT_PKEY_CHANGE,
360 IB_EVENT_SM_CHANGE,
361 IB_EVENT_SRQ_ERR,
362 IB_EVENT_SRQ_LIMIT_REACHED,
363 IB_EVENT_QP_LAST_WQE_REACHED,
364 IB_EVENT_CLIENT_REREGISTER,
365 IB_EVENT_GID_CHANGE,
366};
367
368struct ib_event {
369 struct ib_device *device;
370 union {
371 struct ib_cq *cq;
372 struct ib_qp *qp;
373 struct ib_srq *srq;
374 u8 port_num;
375 } element;
376 enum ib_event_type event;
377};
378
379struct ib_event_handler {
380 struct ib_device *device;
381 void (*handler)(struct ib_event_handler *, struct ib_event *);
382 struct list_head list;
383};
384
385#define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler) \
386 do { \
387 (_ptr)->device = _device; \
388 (_ptr)->handler = _handler; \
389 INIT_LIST_HEAD(&(_ptr)->list); \
390 } while (0)
391
392struct ib_global_route {
393 union ib_gid dgid;
394 u32 flow_label;
395 u8 sgid_index;
396 u8 hop_limit;
397 u8 traffic_class;
398};
399
400struct ib_grh {
401 __be32 version_tclass_flow;
402 __be16 paylen;
403 u8 next_hdr;
404 u8 hop_limit;
405 union ib_gid sgid;
406 union ib_gid dgid;
407};
408
409enum {
410 IB_MULTICAST_QPN = 0xffffff
411};
412
413#define IB_LID_PERMISSIVE cpu_to_be16(0xFFFF)
414
415enum ib_ah_flags {
416 IB_AH_GRH = 1
417};
418
419enum ib_rate {
420 IB_RATE_PORT_CURRENT = 0,
421 IB_RATE_2_5_GBPS = 2,
422 IB_RATE_5_GBPS = 5,
423 IB_RATE_10_GBPS = 3,
424 IB_RATE_20_GBPS = 6,
425 IB_RATE_30_GBPS = 4,
426 IB_RATE_40_GBPS = 7,
427 IB_RATE_60_GBPS = 8,
428 IB_RATE_80_GBPS = 9,
429 IB_RATE_120_GBPS = 10,
430 IB_RATE_14_GBPS = 11,
431 IB_RATE_56_GBPS = 12,
432 IB_RATE_112_GBPS = 13,
433 IB_RATE_168_GBPS = 14,
434 IB_RATE_25_GBPS = 15,
435 IB_RATE_100_GBPS = 16,
436 IB_RATE_200_GBPS = 17,
437 IB_RATE_300_GBPS = 18
438};
439
440
441
442
443
444
445
446int ib_rate_to_mult(enum ib_rate rate) __attribute_const__;
447
448
449
450
451
452
453int ib_rate_to_mbps(enum ib_rate rate) __attribute_const__;
454
455
456
457
458
459
460enum ib_rate mult_to_ib_rate(int mult) __attribute_const__;
461
462struct ib_ah_attr {
463 struct ib_global_route grh;
464 u16 dlid;
465 u8 sl;
466 u8 src_path_bits;
467 u8 static_rate;
468 u8 ah_flags;
469 u8 port_num;
470};
471
472enum ib_wc_status {
473 IB_WC_SUCCESS,
474 IB_WC_LOC_LEN_ERR,
475 IB_WC_LOC_QP_OP_ERR,
476 IB_WC_LOC_EEC_OP_ERR,
477 IB_WC_LOC_PROT_ERR,
478 IB_WC_WR_FLUSH_ERR,
479 IB_WC_MW_BIND_ERR,
480 IB_WC_BAD_RESP_ERR,
481 IB_WC_LOC_ACCESS_ERR,
482 IB_WC_REM_INV_REQ_ERR,
483 IB_WC_REM_ACCESS_ERR,
484 IB_WC_REM_OP_ERR,
485 IB_WC_RETRY_EXC_ERR,
486 IB_WC_RNR_RETRY_EXC_ERR,
487 IB_WC_LOC_RDD_VIOL_ERR,
488 IB_WC_REM_INV_RD_REQ_ERR,
489 IB_WC_REM_ABORT_ERR,
490 IB_WC_INV_EECN_ERR,
491 IB_WC_INV_EEC_STATE_ERR,
492 IB_WC_FATAL_ERR,
493 IB_WC_RESP_TIMEOUT_ERR,
494 IB_WC_GENERAL_ERR
495};
496
497enum ib_wc_opcode {
498 IB_WC_SEND,
499 IB_WC_RDMA_WRITE,
500 IB_WC_RDMA_READ,
501 IB_WC_COMP_SWAP,
502 IB_WC_FETCH_ADD,
503 IB_WC_BIND_MW,
504 IB_WC_LSO,
505 IB_WC_LOCAL_INV,
506 IB_WC_FAST_REG_MR,
507 IB_WC_MASKED_COMP_SWAP,
508 IB_WC_MASKED_FETCH_ADD,
509
510
511
512
513 IB_WC_RECV = 1 << 7,
514 IB_WC_RECV_RDMA_WITH_IMM
515};
516
517enum ib_wc_flags {
518 IB_WC_GRH = 1,
519 IB_WC_WITH_IMM = (1<<1),
520 IB_WC_WITH_INVALIDATE = (1<<2),
521 IB_WC_IP_CSUM_OK = (1<<3),
522};
523
524struct ib_wc {
525 u64 wr_id;
526 enum ib_wc_status status;
527 enum ib_wc_opcode opcode;
528 u32 vendor_err;
529 u32 byte_len;
530 struct ib_qp *qp;
531 union {
532 __be32 imm_data;
533 u32 invalidate_rkey;
534 } ex;
535 u32 src_qp;
536 int wc_flags;
537 u16 pkey_index;
538 u16 slid;
539 u8 sl;
540 u8 dlid_path_bits;
541 u8 port_num;
542};
543
544enum ib_cq_notify_flags {
545 IB_CQ_SOLICITED = 1 << 0,
546 IB_CQ_NEXT_COMP = 1 << 1,
547 IB_CQ_SOLICITED_MASK = IB_CQ_SOLICITED | IB_CQ_NEXT_COMP,
548 IB_CQ_REPORT_MISSED_EVENTS = 1 << 2,
549};
550
551enum ib_srq_type {
552 IB_SRQT_BASIC,
553 IB_SRQT_XRC
554};
555
556enum ib_srq_attr_mask {
557 IB_SRQ_MAX_WR = 1 << 0,
558 IB_SRQ_LIMIT = 1 << 1,
559};
560
561struct ib_srq_attr {
562 u32 max_wr;
563 u32 max_sge;
564 u32 srq_limit;
565};
566
567struct ib_srq_init_attr {
568 void (*event_handler)(struct ib_event *, void *);
569 void *srq_context;
570 struct ib_srq_attr attr;
571 enum ib_srq_type srq_type;
572
573 union {
574 struct {
575 struct ib_xrcd *xrcd;
576 struct ib_cq *cq;
577 } xrc;
578 } ext;
579};
580
581struct ib_qp_cap {
582 u32 max_send_wr;
583 u32 max_recv_wr;
584 u32 max_send_sge;
585 u32 max_recv_sge;
586 u32 max_inline_data;
587};
588
589enum ib_sig_type {
590 IB_SIGNAL_ALL_WR,
591 IB_SIGNAL_REQ_WR
592};
593
594enum ib_qp_type {
595
596
597
598
599
600 IB_QPT_SMI,
601 IB_QPT_GSI,
602
603 IB_QPT_RC,
604 IB_QPT_UC,
605 IB_QPT_UD,
606 IB_QPT_RAW_IPV6,
607 IB_QPT_RAW_ETHERTYPE,
608 IB_QPT_RAW_PACKET = 8,
609 IB_QPT_XRC_INI = 9,
610 IB_QPT_XRC_TGT,
611 IB_QPT_MAX
612};
613
614enum ib_qp_create_flags {
615 IB_QP_CREATE_IPOIB_UD_LSO = 1 << 0,
616 IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK = 1 << 1,
617};
618
619struct ib_qp_init_attr {
620 void (*event_handler)(struct ib_event *, void *);
621 void *qp_context;
622 struct ib_cq *send_cq;
623 struct ib_cq *recv_cq;
624 struct ib_srq *srq;
625 struct ib_xrcd *xrcd;
626 struct ib_qp_cap cap;
627 enum ib_sig_type sq_sig_type;
628 enum ib_qp_type qp_type;
629 enum ib_qp_create_flags create_flags;
630 u8 port_num;
631};
632
633struct ib_qp_open_attr {
634 void (*event_handler)(struct ib_event *, void *);
635 void *qp_context;
636 u32 qp_num;
637 enum ib_qp_type qp_type;
638};
639
640enum ib_rnr_timeout {
641 IB_RNR_TIMER_655_36 = 0,
642 IB_RNR_TIMER_000_01 = 1,
643 IB_RNR_TIMER_000_02 = 2,
644 IB_RNR_TIMER_000_03 = 3,
645 IB_RNR_TIMER_000_04 = 4,
646 IB_RNR_TIMER_000_06 = 5,
647 IB_RNR_TIMER_000_08 = 6,
648 IB_RNR_TIMER_000_12 = 7,
649 IB_RNR_TIMER_000_16 = 8,
650 IB_RNR_TIMER_000_24 = 9,
651 IB_RNR_TIMER_000_32 = 10,
652 IB_RNR_TIMER_000_48 = 11,
653 IB_RNR_TIMER_000_64 = 12,
654 IB_RNR_TIMER_000_96 = 13,
655 IB_RNR_TIMER_001_28 = 14,
656 IB_RNR_TIMER_001_92 = 15,
657 IB_RNR_TIMER_002_56 = 16,
658 IB_RNR_TIMER_003_84 = 17,
659 IB_RNR_TIMER_005_12 = 18,
660 IB_RNR_TIMER_007_68 = 19,
661 IB_RNR_TIMER_010_24 = 20,
662 IB_RNR_TIMER_015_36 = 21,
663 IB_RNR_TIMER_020_48 = 22,
664 IB_RNR_TIMER_030_72 = 23,
665 IB_RNR_TIMER_040_96 = 24,
666 IB_RNR_TIMER_061_44 = 25,
667 IB_RNR_TIMER_081_92 = 26,
668 IB_RNR_TIMER_122_88 = 27,
669 IB_RNR_TIMER_163_84 = 28,
670 IB_RNR_TIMER_245_76 = 29,
671 IB_RNR_TIMER_327_68 = 30,
672 IB_RNR_TIMER_491_52 = 31
673};
674
675enum ib_qp_attr_mask {
676 IB_QP_STATE = 1,
677 IB_QP_CUR_STATE = (1<<1),
678 IB_QP_EN_SQD_ASYNC_NOTIFY = (1<<2),
679 IB_QP_ACCESS_FLAGS = (1<<3),
680 IB_QP_PKEY_INDEX = (1<<4),
681 IB_QP_PORT = (1<<5),
682 IB_QP_QKEY = (1<<6),
683 IB_QP_AV = (1<<7),
684 IB_QP_PATH_MTU = (1<<8),
685 IB_QP_TIMEOUT = (1<<9),
686 IB_QP_RETRY_CNT = (1<<10),
687 IB_QP_RNR_RETRY = (1<<11),
688 IB_QP_RQ_PSN = (1<<12),
689 IB_QP_MAX_QP_RD_ATOMIC = (1<<13),
690 IB_QP_ALT_PATH = (1<<14),
691 IB_QP_MIN_RNR_TIMER = (1<<15),
692 IB_QP_SQ_PSN = (1<<16),
693 IB_QP_MAX_DEST_RD_ATOMIC = (1<<17),
694 IB_QP_PATH_MIG_STATE = (1<<18),
695 IB_QP_CAP = (1<<19),
696 IB_QP_DEST_QPN = (1<<20)
697};
698
699enum ib_qp_state {
700 IB_QPS_RESET,
701 IB_QPS_INIT,
702 IB_QPS_RTR,
703 IB_QPS_RTS,
704 IB_QPS_SQD,
705 IB_QPS_SQE,
706 IB_QPS_ERR
707};
708
709enum ib_mig_state {
710 IB_MIG_MIGRATED,
711 IB_MIG_REARM,
712 IB_MIG_ARMED
713};
714
715struct ib_qp_attr {
716 enum ib_qp_state qp_state;
717 enum ib_qp_state cur_qp_state;
718 enum ib_mtu path_mtu;
719 enum ib_mig_state path_mig_state;
720 u32 qkey;
721 u32 rq_psn;
722 u32 sq_psn;
723 u32 dest_qp_num;
724 int qp_access_flags;
725 struct ib_qp_cap cap;
726 struct ib_ah_attr ah_attr;
727 struct ib_ah_attr alt_ah_attr;
728 u16 pkey_index;
729 u16 alt_pkey_index;
730 u8 en_sqd_async_notify;
731 u8 sq_draining;
732 u8 max_rd_atomic;
733 u8 max_dest_rd_atomic;
734 u8 min_rnr_timer;
735 u8 port_num;
736 u8 timeout;
737 u8 retry_cnt;
738 u8 rnr_retry;
739 u8 alt_port_num;
740 u8 alt_timeout;
741};
742
743enum ib_wr_opcode {
744 IB_WR_RDMA_WRITE,
745 IB_WR_RDMA_WRITE_WITH_IMM,
746 IB_WR_SEND,
747 IB_WR_SEND_WITH_IMM,
748 IB_WR_RDMA_READ,
749 IB_WR_ATOMIC_CMP_AND_SWP,
750 IB_WR_ATOMIC_FETCH_AND_ADD,
751 IB_WR_LSO,
752 IB_WR_SEND_WITH_INV,
753 IB_WR_RDMA_READ_WITH_INV,
754 IB_WR_LOCAL_INV,
755 IB_WR_FAST_REG_MR,
756 IB_WR_MASKED_ATOMIC_CMP_AND_SWP,
757 IB_WR_MASKED_ATOMIC_FETCH_AND_ADD,
758};
759
760enum ib_send_flags {
761 IB_SEND_FENCE = 1,
762 IB_SEND_SIGNALED = (1<<1),
763 IB_SEND_SOLICITED = (1<<2),
764 IB_SEND_INLINE = (1<<3),
765 IB_SEND_IP_CSUM = (1<<4)
766};
767
768struct ib_sge {
769 u64 addr;
770 u32 length;
771 u32 lkey;
772};
773
774struct ib_fast_reg_page_list {
775 struct ib_device *device;
776 u64 *page_list;
777 unsigned int max_page_list_len;
778};
779
780struct ib_send_wr {
781 struct ib_send_wr *next;
782 u64 wr_id;
783 struct ib_sge *sg_list;
784 int num_sge;
785 enum ib_wr_opcode opcode;
786 int send_flags;
787 union {
788 __be32 imm_data;
789 u32 invalidate_rkey;
790 } ex;
791 union {
792 struct {
793 u64 remote_addr;
794 u32 rkey;
795 } rdma;
796 struct {
797 u64 remote_addr;
798 u64 compare_add;
799 u64 swap;
800 u64 compare_add_mask;
801 u64 swap_mask;
802 u32 rkey;
803 } atomic;
804 struct {
805 struct ib_ah *ah;
806 void *header;
807 int hlen;
808 int mss;
809 u32 remote_qpn;
810 u32 remote_qkey;
811 u16 pkey_index;
812 u8 port_num;
813 } ud;
814 struct {
815 u64 iova_start;
816 struct ib_fast_reg_page_list *page_list;
817 unsigned int page_shift;
818 unsigned int page_list_len;
819 u32 length;
820 int access_flags;
821 u32 rkey;
822 } fast_reg;
823 } wr;
824 u32 xrc_remote_srq_num;
825};
826
827struct ib_recv_wr {
828 struct ib_recv_wr *next;
829 u64 wr_id;
830 struct ib_sge *sg_list;
831 int num_sge;
832};
833
834enum ib_access_flags {
835 IB_ACCESS_LOCAL_WRITE = 1,
836 IB_ACCESS_REMOTE_WRITE = (1<<1),
837 IB_ACCESS_REMOTE_READ = (1<<2),
838 IB_ACCESS_REMOTE_ATOMIC = (1<<3),
839 IB_ACCESS_MW_BIND = (1<<4)
840};
841
842struct ib_phys_buf {
843 u64 addr;
844 u64 size;
845};
846
847struct ib_mr_attr {
848 struct ib_pd *pd;
849 u64 device_virt_addr;
850 u64 size;
851 int mr_access_flags;
852 u32 lkey;
853 u32 rkey;
854};
855
856enum ib_mr_rereg_flags {
857 IB_MR_REREG_TRANS = 1,
858 IB_MR_REREG_PD = (1<<1),
859 IB_MR_REREG_ACCESS = (1<<2)
860};
861
862struct ib_mw_bind {
863 struct ib_mr *mr;
864 u64 wr_id;
865 u64 addr;
866 u32 length;
867 int send_flags;
868 int mw_access_flags;
869};
870
871struct ib_fmr_attr {
872 int max_pages;
873 int max_maps;
874 u8 page_shift;
875};
876
877struct ib_ucontext {
878 struct ib_device *device;
879 struct list_head pd_list;
880 struct list_head mr_list;
881 struct list_head mw_list;
882 struct list_head cq_list;
883 struct list_head qp_list;
884 struct list_head srq_list;
885 struct list_head ah_list;
886 struct list_head xrcd_list;
887 int closing;
888};
889
890struct ib_uobject {
891 u64 user_handle;
892 struct ib_ucontext *context;
893 void *object;
894 struct list_head list;
895 int id;
896 struct kref ref;
897 struct rw_semaphore mutex;
898 int live;
899};
900
901struct ib_udata {
902 void __user *inbuf;
903 void __user *outbuf;
904 size_t inlen;
905 size_t outlen;
906};
907
908struct ib_pd {
909 struct ib_device *device;
910 struct ib_uobject *uobject;
911 atomic_t usecnt;
912};
913
914struct ib_xrcd {
915 struct ib_device *device;
916 atomic_t usecnt;
917 struct inode *inode;
918
919 struct mutex tgt_qp_mutex;
920 struct list_head tgt_qp_list;
921};
922
923struct ib_ah {
924 struct ib_device *device;
925 struct ib_pd *pd;
926 struct ib_uobject *uobject;
927};
928
929typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context);
930
931struct ib_cq {
932 struct ib_device *device;
933 struct ib_uobject *uobject;
934 ib_comp_handler comp_handler;
935 void (*event_handler)(struct ib_event *, void *);
936 void *cq_context;
937 int cqe;
938 atomic_t usecnt;
939};
940
941struct ib_srq {
942 struct ib_device *device;
943 struct ib_pd *pd;
944 struct ib_uobject *uobject;
945 void (*event_handler)(struct ib_event *, void *);
946 void *srq_context;
947 enum ib_srq_type srq_type;
948 atomic_t usecnt;
949
950 union {
951 struct {
952 struct ib_xrcd *xrcd;
953 struct ib_cq *cq;
954 u32 srq_num;
955 } xrc;
956 } ext;
957};
958
959struct ib_qp {
960 struct ib_device *device;
961 struct ib_pd *pd;
962 struct ib_cq *send_cq;
963 struct ib_cq *recv_cq;
964 struct ib_srq *srq;
965 struct ib_xrcd *xrcd;
966 struct list_head xrcd_list;
967 atomic_t usecnt;
968 struct list_head open_list;
969 struct ib_qp *real_qp;
970 struct ib_uobject *uobject;
971 void (*event_handler)(struct ib_event *, void *);
972 void *qp_context;
973 u32 qp_num;
974 enum ib_qp_type qp_type;
975};
976
977struct ib_mr {
978 struct ib_device *device;
979 struct ib_pd *pd;
980 struct ib_uobject *uobject;
981 u32 lkey;
982 u32 rkey;
983 atomic_t usecnt;
984};
985
986struct ib_mw {
987 struct ib_device *device;
988 struct ib_pd *pd;
989 struct ib_uobject *uobject;
990 u32 rkey;
991};
992
993struct ib_fmr {
994 struct ib_device *device;
995 struct ib_pd *pd;
996 struct list_head list;
997 u32 lkey;
998 u32 rkey;
999};
1000
1001struct ib_mad;
1002struct ib_grh;
1003
1004enum ib_process_mad_flags {
1005 IB_MAD_IGNORE_MKEY = 1,
1006 IB_MAD_IGNORE_BKEY = 2,
1007 IB_MAD_IGNORE_ALL = IB_MAD_IGNORE_MKEY | IB_MAD_IGNORE_BKEY
1008};
1009
1010enum ib_mad_result {
1011 IB_MAD_RESULT_FAILURE = 0,
1012 IB_MAD_RESULT_SUCCESS = 1 << 0,
1013 IB_MAD_RESULT_REPLY = 1 << 1,
1014 IB_MAD_RESULT_CONSUMED = 1 << 2
1015};
1016
1017#define IB_DEVICE_NAME_MAX 64
1018
1019struct ib_cache {
1020 rwlock_t lock;
1021 struct ib_event_handler event_handler;
1022 struct ib_pkey_cache **pkey_cache;
1023 struct ib_gid_cache **gid_cache;
1024 u8 *lmc_cache;
1025};
1026
1027struct ib_dma_mapping_ops {
1028 int (*mapping_error)(struct ib_device *dev,
1029 u64 dma_addr);
1030 u64 (*map_single)(struct ib_device *dev,
1031 void *ptr, size_t size,
1032 enum dma_data_direction direction);
1033 void (*unmap_single)(struct ib_device *dev,
1034 u64 addr, size_t size,
1035 enum dma_data_direction direction);
1036 u64 (*map_page)(struct ib_device *dev,
1037 struct page *page, unsigned long offset,
1038 size_t size,
1039 enum dma_data_direction direction);
1040 void (*unmap_page)(struct ib_device *dev,
1041 u64 addr, size_t size,
1042 enum dma_data_direction direction);
1043 int (*map_sg)(struct ib_device *dev,
1044 struct scatterlist *sg, int nents,
1045 enum dma_data_direction direction);
1046 void (*unmap_sg)(struct ib_device *dev,
1047 struct scatterlist *sg, int nents,
1048 enum dma_data_direction direction);
1049 u64 (*dma_address)(struct ib_device *dev,
1050 struct scatterlist *sg);
1051 unsigned int (*dma_len)(struct ib_device *dev,
1052 struct scatterlist *sg);
1053 void (*sync_single_for_cpu)(struct ib_device *dev,
1054 u64 dma_handle,
1055 size_t size,
1056 enum dma_data_direction dir);
1057 void (*sync_single_for_device)(struct ib_device *dev,
1058 u64 dma_handle,
1059 size_t size,
1060 enum dma_data_direction dir);
1061 void *(*alloc_coherent)(struct ib_device *dev,
1062 size_t size,
1063 u64 *dma_handle,
1064 gfp_t flag);
1065 void (*free_coherent)(struct ib_device *dev,
1066 size_t size, void *cpu_addr,
1067 u64 dma_handle);
1068};
1069
1070struct iw_cm_verbs;
1071
1072struct ib_device {
1073 struct device *dma_device;
1074
1075 char name[IB_DEVICE_NAME_MAX];
1076
1077 struct list_head event_handler_list;
1078 spinlock_t event_handler_lock;
1079
1080 spinlock_t client_data_lock;
1081 struct list_head core_list;
1082 struct list_head client_data_list;
1083
1084 struct ib_cache cache;
1085 int *pkey_tbl_len;
1086 int *gid_tbl_len;
1087
1088 int num_comp_vectors;
1089
1090 struct iw_cm_verbs *iwcm;
1091
1092 int (*get_protocol_stats)(struct ib_device *device,
1093 union rdma_protocol_stats *stats);
1094 int (*query_device)(struct ib_device *device,
1095 struct ib_device_attr *device_attr);
1096 int (*query_port)(struct ib_device *device,
1097 u8 port_num,
1098 struct ib_port_attr *port_attr);
1099 enum rdma_link_layer (*get_link_layer)(struct ib_device *device,
1100 u8 port_num);
1101 int (*query_gid)(struct ib_device *device,
1102 u8 port_num, int index,
1103 union ib_gid *gid);
1104 int (*query_pkey)(struct ib_device *device,
1105 u8 port_num, u16 index, u16 *pkey);
1106 int (*modify_device)(struct ib_device *device,
1107 int device_modify_mask,
1108 struct ib_device_modify *device_modify);
1109 int (*modify_port)(struct ib_device *device,
1110 u8 port_num, int port_modify_mask,
1111 struct ib_port_modify *port_modify);
1112 struct ib_ucontext * (*alloc_ucontext)(struct ib_device *device,
1113 struct ib_udata *udata);
1114 int (*dealloc_ucontext)(struct ib_ucontext *context);
1115 int (*mmap)(struct ib_ucontext *context,
1116 struct vm_area_struct *vma);
1117 struct ib_pd * (*alloc_pd)(struct ib_device *device,
1118 struct ib_ucontext *context,
1119 struct ib_udata *udata);
1120 int (*dealloc_pd)(struct ib_pd *pd);
1121 struct ib_ah * (*create_ah)(struct ib_pd *pd,
1122 struct ib_ah_attr *ah_attr);
1123 int (*modify_ah)(struct ib_ah *ah,
1124 struct ib_ah_attr *ah_attr);
1125 int (*query_ah)(struct ib_ah *ah,
1126 struct ib_ah_attr *ah_attr);
1127 int (*destroy_ah)(struct ib_ah *ah);
1128 struct ib_srq * (*create_srq)(struct ib_pd *pd,
1129 struct ib_srq_init_attr *srq_init_attr,
1130 struct ib_udata *udata);
1131 int (*modify_srq)(struct ib_srq *srq,
1132 struct ib_srq_attr *srq_attr,
1133 enum ib_srq_attr_mask srq_attr_mask,
1134 struct ib_udata *udata);
1135 int (*query_srq)(struct ib_srq *srq,
1136 struct ib_srq_attr *srq_attr);
1137 int (*destroy_srq)(struct ib_srq *srq);
1138 int (*post_srq_recv)(struct ib_srq *srq,
1139 struct ib_recv_wr *recv_wr,
1140 struct ib_recv_wr **bad_recv_wr);
1141 struct ib_qp * (*create_qp)(struct ib_pd *pd,
1142 struct ib_qp_init_attr *qp_init_attr,
1143 struct ib_udata *udata);
1144 int (*modify_qp)(struct ib_qp *qp,
1145 struct ib_qp_attr *qp_attr,
1146 int qp_attr_mask,
1147 struct ib_udata *udata);
1148 int (*query_qp)(struct ib_qp *qp,
1149 struct ib_qp_attr *qp_attr,
1150 int qp_attr_mask,
1151 struct ib_qp_init_attr *qp_init_attr);
1152 int (*destroy_qp)(struct ib_qp *qp);
1153 int (*post_send)(struct ib_qp *qp,
1154 struct ib_send_wr *send_wr,
1155 struct ib_send_wr **bad_send_wr);
1156 int (*post_recv)(struct ib_qp *qp,
1157 struct ib_recv_wr *recv_wr,
1158 struct ib_recv_wr **bad_recv_wr);
1159 struct ib_cq * (*create_cq)(struct ib_device *device, int cqe,
1160 int comp_vector,
1161 struct ib_ucontext *context,
1162 struct ib_udata *udata);
1163 int (*modify_cq)(struct ib_cq *cq, u16 cq_count,
1164 u16 cq_period);
1165 int (*destroy_cq)(struct ib_cq *cq);
1166 int (*resize_cq)(struct ib_cq *cq, int cqe,
1167 struct ib_udata *udata);
1168 int (*poll_cq)(struct ib_cq *cq, int num_entries,
1169 struct ib_wc *wc);
1170 int (*peek_cq)(struct ib_cq *cq, int wc_cnt);
1171 int (*req_notify_cq)(struct ib_cq *cq,
1172 enum ib_cq_notify_flags flags);
1173 int (*req_ncomp_notif)(struct ib_cq *cq,
1174 int wc_cnt);
1175 struct ib_mr * (*get_dma_mr)(struct ib_pd *pd,
1176 int mr_access_flags);
1177 struct ib_mr * (*reg_phys_mr)(struct ib_pd *pd,
1178 struct ib_phys_buf *phys_buf_array,
1179 int num_phys_buf,
1180 int mr_access_flags,
1181 u64 *iova_start);
1182 struct ib_mr * (*reg_user_mr)(struct ib_pd *pd,
1183 u64 start, u64 length,
1184 u64 virt_addr,
1185 int mr_access_flags,
1186 struct ib_udata *udata);
1187 int (*query_mr)(struct ib_mr *mr,
1188 struct ib_mr_attr *mr_attr);
1189 int (*dereg_mr)(struct ib_mr *mr);
1190 struct ib_mr * (*alloc_fast_reg_mr)(struct ib_pd *pd,
1191 int max_page_list_len);
1192 struct ib_fast_reg_page_list * (*alloc_fast_reg_page_list)(struct ib_device *device,
1193 int page_list_len);
1194 void (*free_fast_reg_page_list)(struct ib_fast_reg_page_list *page_list);
1195 int (*rereg_phys_mr)(struct ib_mr *mr,
1196 int mr_rereg_mask,
1197 struct ib_pd *pd,
1198 struct ib_phys_buf *phys_buf_array,
1199 int num_phys_buf,
1200 int mr_access_flags,
1201 u64 *iova_start);
1202 struct ib_mw * (*alloc_mw)(struct ib_pd *pd);
1203 int (*bind_mw)(struct ib_qp *qp,
1204 struct ib_mw *mw,
1205 struct ib_mw_bind *mw_bind);
1206 int (*dealloc_mw)(struct ib_mw *mw);
1207 struct ib_fmr * (*alloc_fmr)(struct ib_pd *pd,
1208 int mr_access_flags,
1209 struct ib_fmr_attr *fmr_attr);
1210 int (*map_phys_fmr)(struct ib_fmr *fmr,
1211 u64 *page_list, int list_len,
1212 u64 iova);
1213 int (*unmap_fmr)(struct list_head *fmr_list);
1214 int (*dealloc_fmr)(struct ib_fmr *fmr);
1215 int (*attach_mcast)(struct ib_qp *qp,
1216 union ib_gid *gid,
1217 u16 lid);
1218 int (*detach_mcast)(struct ib_qp *qp,
1219 union ib_gid *gid,
1220 u16 lid);
1221 int (*process_mad)(struct ib_device *device,
1222 int process_mad_flags,
1223 u8 port_num,
1224 struct ib_wc *in_wc,
1225 struct ib_grh *in_grh,
1226 struct ib_mad *in_mad,
1227 struct ib_mad *out_mad);
1228 struct ib_xrcd * (*alloc_xrcd)(struct ib_device *device,
1229 struct ib_ucontext *ucontext,
1230 struct ib_udata *udata);
1231 int (*dealloc_xrcd)(struct ib_xrcd *xrcd);
1232
1233 struct ib_dma_mapping_ops *dma_ops;
1234
1235 struct module *owner;
1236 struct device dev;
1237 struct kobject *ports_parent;
1238 struct list_head port_list;
1239
1240 enum {
1241 IB_DEV_UNINITIALIZED,
1242 IB_DEV_REGISTERED,
1243 IB_DEV_UNREGISTERED
1244 } reg_state;
1245
1246 int uverbs_abi_ver;
1247 u64 uverbs_cmd_mask;
1248
1249 char node_desc[64];
1250 __be64 node_guid;
1251 u32 local_dma_lkey;
1252 u8 node_type;
1253 u8 phys_port_cnt;
1254};
1255
1256struct ib_client {
1257 char *name;
1258 void (*add) (struct ib_device *);
1259 void (*remove)(struct ib_device *);
1260
1261 struct list_head list;
1262};
1263
1264struct ib_device *ib_alloc_device(size_t size);
1265void ib_dealloc_device(struct ib_device *device);
1266
1267int ib_register_device(struct ib_device *device,
1268 int (*port_callback)(struct ib_device *,
1269 u8, struct kobject *));
1270void ib_unregister_device(struct ib_device *device);
1271
1272int ib_register_client (struct ib_client *client);
1273void ib_unregister_client(struct ib_client *client);
1274
1275void *ib_get_client_data(struct ib_device *device, struct ib_client *client);
1276void ib_set_client_data(struct ib_device *device, struct ib_client *client,
1277 void *data);
1278
1279static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len)
1280{
1281 return copy_from_user(dest, udata->inbuf, len) ? -EFAULT : 0;
1282}
1283
1284static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len)
1285{
1286 return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0;
1287}
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
1305 enum ib_qp_type type, enum ib_qp_attr_mask mask);
1306
1307int ib_register_event_handler (struct ib_event_handler *event_handler);
1308int ib_unregister_event_handler(struct ib_event_handler *event_handler);
1309void ib_dispatch_event(struct ib_event *event);
1310
1311int ib_query_device(struct ib_device *device,
1312 struct ib_device_attr *device_attr);
1313
1314int ib_query_port(struct ib_device *device,
1315 u8 port_num, struct ib_port_attr *port_attr);
1316
1317enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device,
1318 u8 port_num);
1319
1320int ib_query_gid(struct ib_device *device,
1321 u8 port_num, int index, union ib_gid *gid);
1322
1323int ib_query_pkey(struct ib_device *device,
1324 u8 port_num, u16 index, u16 *pkey);
1325
1326int ib_modify_device(struct ib_device *device,
1327 int device_modify_mask,
1328 struct ib_device_modify *device_modify);
1329
1330int ib_modify_port(struct ib_device *device,
1331 u8 port_num, int port_modify_mask,
1332 struct ib_port_modify *port_modify);
1333
1334int ib_find_gid(struct ib_device *device, union ib_gid *gid,
1335 u8 *port_num, u16 *index);
1336
1337int ib_find_pkey(struct ib_device *device,
1338 u8 port_num, u16 pkey, u16 *index);
1339
1340
1341
1342
1343
1344
1345
1346
1347struct ib_pd *ib_alloc_pd(struct ib_device *device);
1348
1349
1350
1351
1352
1353int ib_dealloc_pd(struct ib_pd *pd);
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr);
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, struct ib_wc *wc,
1377 struct ib_grh *grh, struct ib_ah_attr *ah_attr);
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, struct ib_wc *wc,
1392 struct ib_grh *grh, u8 port_num);
1393
1394
1395
1396
1397
1398
1399
1400
1401int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
1402
1403
1404
1405
1406
1407
1408
1409
1410int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
1411
1412
1413
1414
1415
1416int ib_destroy_ah(struct ib_ah *ah);
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431struct ib_srq *ib_create_srq(struct ib_pd *pd,
1432 struct ib_srq_init_attr *srq_init_attr);
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446int ib_modify_srq(struct ib_srq *srq,
1447 struct ib_srq_attr *srq_attr,
1448 enum ib_srq_attr_mask srq_attr_mask);
1449
1450
1451
1452
1453
1454
1455
1456int ib_query_srq(struct ib_srq *srq,
1457 struct ib_srq_attr *srq_attr);
1458
1459
1460
1461
1462
1463int ib_destroy_srq(struct ib_srq *srq);
1464
1465
1466
1467
1468
1469
1470
1471
1472static inline int ib_post_srq_recv(struct ib_srq *srq,
1473 struct ib_recv_wr *recv_wr,
1474 struct ib_recv_wr **bad_recv_wr)
1475{
1476 return srq->device->post_srq_recv(srq, recv_wr, bad_recv_wr);
1477}
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487struct ib_qp *ib_create_qp(struct ib_pd *pd,
1488 struct ib_qp_init_attr *qp_init_attr);
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499int ib_modify_qp(struct ib_qp *qp,
1500 struct ib_qp_attr *qp_attr,
1501 int qp_attr_mask);
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514int ib_query_qp(struct ib_qp *qp,
1515 struct ib_qp_attr *qp_attr,
1516 int qp_attr_mask,
1517 struct ib_qp_init_attr *qp_init_attr);
1518
1519
1520
1521
1522
1523int ib_destroy_qp(struct ib_qp *qp);
1524
1525
1526
1527
1528
1529
1530
1531
1532struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd,
1533 struct ib_qp_open_attr *qp_open_attr);
1534
1535
1536
1537
1538
1539
1540
1541
1542int ib_close_qp(struct ib_qp *qp);
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557static inline int ib_post_send(struct ib_qp *qp,
1558 struct ib_send_wr *send_wr,
1559 struct ib_send_wr **bad_send_wr)
1560{
1561 return qp->device->post_send(qp, send_wr, bad_send_wr);
1562}
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572static inline int ib_post_recv(struct ib_qp *qp,
1573 struct ib_recv_wr *recv_wr,
1574 struct ib_recv_wr **bad_recv_wr)
1575{
1576 return qp->device->post_recv(qp, recv_wr, bad_recv_wr);
1577}
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594struct ib_cq *ib_create_cq(struct ib_device *device,
1595 ib_comp_handler comp_handler,
1596 void (*event_handler)(struct ib_event *, void *),
1597 void *cq_context, int cqe, int comp_vector);
1598
1599
1600
1601
1602
1603
1604
1605
1606int ib_resize_cq(struct ib_cq *cq, int cqe);
1607
1608
1609
1610
1611
1612
1613
1614
1615int ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
1616
1617
1618
1619
1620
1621int ib_destroy_cq(struct ib_cq *cq);
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635static inline int ib_poll_cq(struct ib_cq *cq, int num_entries,
1636 struct ib_wc *wc)
1637{
1638 return cq->device->poll_cq(cq, num_entries, wc);
1639}
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651int ib_peek_cq(struct ib_cq *cq, int wc_cnt);
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680static inline int ib_req_notify_cq(struct ib_cq *cq,
1681 enum ib_cq_notify_flags flags)
1682{
1683 return cq->device->req_notify_cq(cq, flags);
1684}
1685
1686
1687
1688
1689
1690
1691
1692
1693static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt)
1694{
1695 return cq->device->req_ncomp_notif ?
1696 cq->device->req_ncomp_notif(cq, wc_cnt) :
1697 -ENOSYS;
1698}
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags);
1711
1712
1713
1714
1715
1716
1717static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
1718{
1719 if (dev->dma_ops)
1720 return dev->dma_ops->mapping_error(dev, dma_addr);
1721 return dma_mapping_error(dev->dma_device, dma_addr);
1722}
1723
1724
1725
1726
1727
1728
1729
1730
1731static inline u64 ib_dma_map_single(struct ib_device *dev,
1732 void *cpu_addr, size_t size,
1733 enum dma_data_direction direction)
1734{
1735 if (dev->dma_ops)
1736 return dev->dma_ops->map_single(dev, cpu_addr, size, direction);
1737 return dma_map_single(dev->dma_device, cpu_addr, size, direction);
1738}
1739
1740
1741
1742
1743
1744
1745
1746
1747static inline void ib_dma_unmap_single(struct ib_device *dev,
1748 u64 addr, size_t size,
1749 enum dma_data_direction direction)
1750{
1751 if (dev->dma_ops)
1752 dev->dma_ops->unmap_single(dev, addr, size, direction);
1753 else
1754 dma_unmap_single(dev->dma_device, addr, size, direction);
1755}
1756
1757static inline u64 ib_dma_map_single_attrs(struct ib_device *dev,
1758 void *cpu_addr, size_t size,
1759 enum dma_data_direction direction,
1760 struct dma_attrs *attrs)
1761{
1762 return dma_map_single_attrs(dev->dma_device, cpu_addr, size,
1763 direction, attrs);
1764}
1765
1766static inline void ib_dma_unmap_single_attrs(struct ib_device *dev,
1767 u64 addr, size_t size,
1768 enum dma_data_direction direction,
1769 struct dma_attrs *attrs)
1770{
1771 return dma_unmap_single_attrs(dev->dma_device, addr, size,
1772 direction, attrs);
1773}
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783static inline u64 ib_dma_map_page(struct ib_device *dev,
1784 struct page *page,
1785 unsigned long offset,
1786 size_t size,
1787 enum dma_data_direction direction)
1788{
1789 if (dev->dma_ops)
1790 return dev->dma_ops->map_page(dev, page, offset, size, direction);
1791 return dma_map_page(dev->dma_device, page, offset, size, direction);
1792}
1793
1794
1795
1796
1797
1798
1799
1800
1801static inline void ib_dma_unmap_page(struct ib_device *dev,
1802 u64 addr, size_t size,
1803 enum dma_data_direction direction)
1804{
1805 if (dev->dma_ops)
1806 dev->dma_ops->unmap_page(dev, addr, size, direction);
1807 else
1808 dma_unmap_page(dev->dma_device, addr, size, direction);
1809}
1810
1811
1812
1813
1814
1815
1816
1817
1818static inline int ib_dma_map_sg(struct ib_device *dev,
1819 struct scatterlist *sg, int nents,
1820 enum dma_data_direction direction)
1821{
1822 if (dev->dma_ops)
1823 return dev->dma_ops->map_sg(dev, sg, nents, direction);
1824 return dma_map_sg(dev->dma_device, sg, nents, direction);
1825}
1826
1827
1828
1829
1830
1831
1832
1833
1834static inline void ib_dma_unmap_sg(struct ib_device *dev,
1835 struct scatterlist *sg, int nents,
1836 enum dma_data_direction direction)
1837{
1838 if (dev->dma_ops)
1839 dev->dma_ops->unmap_sg(dev, sg, nents, direction);
1840 else
1841 dma_unmap_sg(dev->dma_device, sg, nents, direction);
1842}
1843
1844static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
1845 struct scatterlist *sg, int nents,
1846 enum dma_data_direction direction,
1847 struct dma_attrs *attrs)
1848{
1849 return dma_map_sg_attrs(dev->dma_device, sg, nents, direction, attrs);
1850}
1851
1852static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
1853 struct scatterlist *sg, int nents,
1854 enum dma_data_direction direction,
1855 struct dma_attrs *attrs)
1856{
1857 dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, attrs);
1858}
1859
1860
1861
1862
1863
1864static inline u64 ib_sg_dma_address(struct ib_device *dev,
1865 struct scatterlist *sg)
1866{
1867 if (dev->dma_ops)
1868 return dev->dma_ops->dma_address(dev, sg);
1869 return sg_dma_address(sg);
1870}
1871
1872
1873
1874
1875
1876
1877static inline unsigned int ib_sg_dma_len(struct ib_device *dev,
1878 struct scatterlist *sg)
1879{
1880 if (dev->dma_ops)
1881 return dev->dma_ops->dma_len(dev, sg);
1882 return sg_dma_len(sg);
1883}
1884
1885
1886
1887
1888
1889
1890
1891
1892static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev,
1893 u64 addr,
1894 size_t size,
1895 enum dma_data_direction dir)
1896{
1897 if (dev->dma_ops)
1898 dev->dma_ops->sync_single_for_cpu(dev, addr, size, dir);
1899 else
1900 dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
1901}
1902
1903
1904
1905
1906
1907
1908
1909
1910static inline void ib_dma_sync_single_for_device(struct ib_device *dev,
1911 u64 addr,
1912 size_t size,
1913 enum dma_data_direction dir)
1914{
1915 if (dev->dma_ops)
1916 dev->dma_ops->sync_single_for_device(dev, addr, size, dir);
1917 else
1918 dma_sync_single_for_device(dev->dma_device, addr, size, dir);
1919}
1920
1921
1922
1923
1924
1925
1926
1927
1928static inline void *ib_dma_alloc_coherent(struct ib_device *dev,
1929 size_t size,
1930 u64 *dma_handle,
1931 gfp_t flag)
1932{
1933 if (dev->dma_ops)
1934 return dev->dma_ops->alloc_coherent(dev, size, dma_handle, flag);
1935 else {
1936 dma_addr_t handle;
1937 void *ret;
1938
1939 ret = dma_alloc_coherent(dev->dma_device, size, &handle, flag);
1940 *dma_handle = handle;
1941 return ret;
1942 }
1943}
1944
1945
1946
1947
1948
1949
1950
1951
1952static inline void ib_dma_free_coherent(struct ib_device *dev,
1953 size_t size, void *cpu_addr,
1954 u64 dma_handle)
1955{
1956 if (dev->dma_ops)
1957 dev->dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
1958 else
1959 dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle);
1960}
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972struct ib_mr *ib_reg_phys_mr(struct ib_pd *pd,
1973 struct ib_phys_buf *phys_buf_array,
1974 int num_phys_buf,
1975 int mr_access_flags,
1976 u64 *iova_start);
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000int ib_rereg_phys_mr(struct ib_mr *mr,
2001 int mr_rereg_mask,
2002 struct ib_pd *pd,
2003 struct ib_phys_buf *phys_buf_array,
2004 int num_phys_buf,
2005 int mr_access_flags,
2006 u64 *iova_start);
2007
2008
2009
2010
2011
2012
2013int ib_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr);
2014
2015
2016
2017
2018
2019
2020int ib_dereg_mr(struct ib_mr *mr);
2021
2022
2023
2024
2025
2026
2027
2028
2029struct ib_mr *ib_alloc_fast_reg_mr(struct ib_pd *pd, int max_page_list_len);
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048struct ib_fast_reg_page_list *ib_alloc_fast_reg_page_list(
2049 struct ib_device *device, int page_list_len);
2050
2051
2052
2053
2054
2055
2056void ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list);
2057
2058
2059
2060
2061
2062
2063
2064static inline void ib_update_fast_reg_key(struct ib_mr *mr, u8 newkey)
2065{
2066 mr->lkey = (mr->lkey & 0xffffff00) | newkey;
2067 mr->rkey = (mr->rkey & 0xffffff00) | newkey;
2068}
2069
2070
2071
2072
2073
2074struct ib_mw *ib_alloc_mw(struct ib_pd *pd);
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085static inline int ib_bind_mw(struct ib_qp *qp,
2086 struct ib_mw *mw,
2087 struct ib_mw_bind *mw_bind)
2088{
2089
2090 return mw->device->bind_mw ?
2091 mw->device->bind_mw(qp, mw, mw_bind) :
2092 -ENOSYS;
2093}
2094
2095
2096
2097
2098
2099int ib_dealloc_mw(struct ib_mw *mw);
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
2111 int mr_access_flags,
2112 struct ib_fmr_attr *fmr_attr);
2113
2114
2115
2116
2117
2118
2119
2120
2121static inline int ib_map_phys_fmr(struct ib_fmr *fmr,
2122 u64 *page_list, int list_len,
2123 u64 iova)
2124{
2125 return fmr->device->map_phys_fmr(fmr, page_list, list_len, iova);
2126}
2127
2128
2129
2130
2131
2132int ib_unmap_fmr(struct list_head *fmr_list);
2133
2134
2135
2136
2137
2138int ib_dealloc_fmr(struct ib_fmr *fmr);
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
2153
2154
2155
2156
2157
2158
2159
2160int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
2161
2162
2163
2164
2165
2166struct ib_xrcd *ib_alloc_xrcd(struct ib_device *device);
2167
2168
2169
2170
2171
2172int ib_dealloc_xrcd(struct ib_xrcd *xrcd);
2173
2174#endif
2175