1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39#if !defined(IB_VERBS_H)
40#define IB_VERBS_H
41
42#include <linux/types.h>
43#include <linux/device.h>
44#include <linux/mm.h>
45#include <linux/dma-mapping.h>
46#include <linux/kref.h>
47#include <linux/list.h>
48#include <linux/rwsem.h>
49#include <linux/scatterlist.h>
50#include <linux/workqueue.h>
51
52#include <linux/atomic.h>
53#include <asm/uaccess.h>
54
55extern struct workqueue_struct *ib_wq;
56
57union ib_gid {
58 u8 raw[16];
59 struct {
60 __be64 subnet_prefix;
61 __be64 interface_id;
62 } global;
63};
64
65enum rdma_node_type {
66
67 RDMA_NODE_IB_CA = 1,
68 RDMA_NODE_IB_SWITCH,
69 RDMA_NODE_IB_ROUTER,
70 RDMA_NODE_RNIC
71};
72
73enum rdma_transport_type {
74 RDMA_TRANSPORT_IB,
75 RDMA_TRANSPORT_IWARP
76};
77
78enum rdma_transport_type
79rdma_node_get_transport(enum rdma_node_type node_type) __attribute_const__;
80
81enum rdma_link_layer {
82 IB_LINK_LAYER_UNSPECIFIED,
83 IB_LINK_LAYER_INFINIBAND,
84 IB_LINK_LAYER_ETHERNET,
85};
86
87enum ib_device_cap_flags {
88 IB_DEVICE_RESIZE_MAX_WR = 1,
89 IB_DEVICE_BAD_PKEY_CNTR = (1<<1),
90 IB_DEVICE_BAD_QKEY_CNTR = (1<<2),
91 IB_DEVICE_RAW_MULTI = (1<<3),
92 IB_DEVICE_AUTO_PATH_MIG = (1<<4),
93 IB_DEVICE_CHANGE_PHY_PORT = (1<<5),
94 IB_DEVICE_UD_AV_PORT_ENFORCE = (1<<6),
95 IB_DEVICE_CURR_QP_STATE_MOD = (1<<7),
96 IB_DEVICE_SHUTDOWN_PORT = (1<<8),
97 IB_DEVICE_INIT_TYPE = (1<<9),
98 IB_DEVICE_PORT_ACTIVE_EVENT = (1<<10),
99 IB_DEVICE_SYS_IMAGE_GUID = (1<<11),
100 IB_DEVICE_RC_RNR_NAK_GEN = (1<<12),
101 IB_DEVICE_SRQ_RESIZE = (1<<13),
102 IB_DEVICE_N_NOTIFY_CQ = (1<<14),
103 IB_DEVICE_LOCAL_DMA_LKEY = (1<<15),
104 IB_DEVICE_RESERVED = (1<<16),
105 IB_DEVICE_MEM_WINDOW = (1<<17),
106
107
108
109
110
111
112
113 IB_DEVICE_UD_IP_CSUM = (1<<18),
114 IB_DEVICE_UD_TSO = (1<<19),
115 IB_DEVICE_XRC = (1<<20),
116 IB_DEVICE_MEM_MGT_EXTENSIONS = (1<<21),
117 IB_DEVICE_BLOCK_MULTICAST_LOOPBACK = (1<<22),
118};
119
120enum ib_atomic_cap {
121 IB_ATOMIC_NONE,
122 IB_ATOMIC_HCA,
123 IB_ATOMIC_GLOB
124};
125
126struct ib_device_attr {
127 u64 fw_ver;
128 __be64 sys_image_guid;
129 u64 max_mr_size;
130 u64 page_size_cap;
131 u32 vendor_id;
132 u32 vendor_part_id;
133 u32 hw_ver;
134 int max_qp;
135 int max_qp_wr;
136 int device_cap_flags;
137 int max_sge;
138 int max_sge_rd;
139 int max_cq;
140 int max_cqe;
141 int max_mr;
142 int max_pd;
143 int max_qp_rd_atom;
144 int max_ee_rd_atom;
145 int max_res_rd_atom;
146 int max_qp_init_rd_atom;
147 int max_ee_init_rd_atom;
148 enum ib_atomic_cap atomic_cap;
149 enum ib_atomic_cap masked_atomic_cap;
150 int max_ee;
151 int max_rdd;
152 int max_mw;
153 int max_raw_ipv6_qp;
154 int max_raw_ethy_qp;
155 int max_mcast_grp;
156 int max_mcast_qp_attach;
157 int max_total_mcast_qp_attach;
158 int max_ah;
159 int max_fmr;
160 int max_map_per_fmr;
161 int max_srq;
162 int max_srq_wr;
163 int max_srq_sge;
164 unsigned int max_fast_reg_page_list_len;
165 u16 max_pkeys;
166 u8 local_ca_ack_delay;
167};
168
169enum ib_mtu {
170 IB_MTU_256 = 1,
171 IB_MTU_512 = 2,
172 IB_MTU_1024 = 3,
173 IB_MTU_2048 = 4,
174 IB_MTU_4096 = 5
175};
176
177static inline int ib_mtu_enum_to_int(enum ib_mtu mtu)
178{
179 switch (mtu) {
180 case IB_MTU_256: return 256;
181 case IB_MTU_512: return 512;
182 case IB_MTU_1024: return 1024;
183 case IB_MTU_2048: return 2048;
184 case IB_MTU_4096: return 4096;
185 default: return -1;
186 }
187}
188
189enum ib_port_state {
190 IB_PORT_NOP = 0,
191 IB_PORT_DOWN = 1,
192 IB_PORT_INIT = 2,
193 IB_PORT_ARMED = 3,
194 IB_PORT_ACTIVE = 4,
195 IB_PORT_ACTIVE_DEFER = 5
196};
197
198enum ib_port_cap_flags {
199 IB_PORT_SM = 1 << 1,
200 IB_PORT_NOTICE_SUP = 1 << 2,
201 IB_PORT_TRAP_SUP = 1 << 3,
202 IB_PORT_OPT_IPD_SUP = 1 << 4,
203 IB_PORT_AUTO_MIGR_SUP = 1 << 5,
204 IB_PORT_SL_MAP_SUP = 1 << 6,
205 IB_PORT_MKEY_NVRAM = 1 << 7,
206 IB_PORT_PKEY_NVRAM = 1 << 8,
207 IB_PORT_LED_INFO_SUP = 1 << 9,
208 IB_PORT_SM_DISABLED = 1 << 10,
209 IB_PORT_SYS_IMAGE_GUID_SUP = 1 << 11,
210 IB_PORT_PKEY_SW_EXT_PORT_TRAP_SUP = 1 << 12,
211 IB_PORT_EXTENDED_SPEEDS_SUP = 1 << 14,
212 IB_PORT_CM_SUP = 1 << 16,
213 IB_PORT_SNMP_TUNNEL_SUP = 1 << 17,
214 IB_PORT_REINIT_SUP = 1 << 18,
215 IB_PORT_DEVICE_MGMT_SUP = 1 << 19,
216 IB_PORT_VENDOR_CLASS_SUP = 1 << 20,
217 IB_PORT_DR_NOTICE_SUP = 1 << 21,
218 IB_PORT_CAP_MASK_NOTICE_SUP = 1 << 22,
219 IB_PORT_BOOT_MGMT_SUP = 1 << 23,
220 IB_PORT_LINK_LATENCY_SUP = 1 << 24,
221 IB_PORT_CLIENT_REG_SUP = 1 << 25
222};
223
224enum ib_port_width {
225 IB_WIDTH_1X = 1,
226 IB_WIDTH_4X = 2,
227 IB_WIDTH_8X = 4,
228 IB_WIDTH_12X = 8
229};
230
231static inline int ib_width_enum_to_int(enum ib_port_width width)
232{
233 switch (width) {
234 case IB_WIDTH_1X: return 1;
235 case IB_WIDTH_4X: return 4;
236 case IB_WIDTH_8X: return 8;
237 case IB_WIDTH_12X: return 12;
238 default: return -1;
239 }
240}
241
242struct ib_protocol_stats {
243
244};
245
246struct iw_protocol_stats {
247 u64 ipInReceives;
248 u64 ipInHdrErrors;
249 u64 ipInTooBigErrors;
250 u64 ipInNoRoutes;
251 u64 ipInAddrErrors;
252 u64 ipInUnknownProtos;
253 u64 ipInTruncatedPkts;
254 u64 ipInDiscards;
255 u64 ipInDelivers;
256 u64 ipOutForwDatagrams;
257 u64 ipOutRequests;
258 u64 ipOutDiscards;
259 u64 ipOutNoRoutes;
260 u64 ipReasmTimeout;
261 u64 ipReasmReqds;
262 u64 ipReasmOKs;
263 u64 ipReasmFails;
264 u64 ipFragOKs;
265 u64 ipFragFails;
266 u64 ipFragCreates;
267 u64 ipInMcastPkts;
268 u64 ipOutMcastPkts;
269 u64 ipInBcastPkts;
270 u64 ipOutBcastPkts;
271
272 u64 tcpRtoAlgorithm;
273 u64 tcpRtoMin;
274 u64 tcpRtoMax;
275 u64 tcpMaxConn;
276 u64 tcpActiveOpens;
277 u64 tcpPassiveOpens;
278 u64 tcpAttemptFails;
279 u64 tcpEstabResets;
280 u64 tcpCurrEstab;
281 u64 tcpInSegs;
282 u64 tcpOutSegs;
283 u64 tcpRetransSegs;
284 u64 tcpInErrs;
285 u64 tcpOutRsts;
286};
287
288union rdma_protocol_stats {
289 struct ib_protocol_stats ib;
290 struct iw_protocol_stats iw;
291};
292
293struct ib_port_attr {
294 enum ib_port_state state;
295 enum ib_mtu max_mtu;
296 enum ib_mtu active_mtu;
297 int gid_tbl_len;
298 u32 port_cap_flags;
299 u32 max_msg_sz;
300 u32 bad_pkey_cntr;
301 u32 qkey_viol_cntr;
302 u16 pkey_tbl_len;
303 u16 lid;
304 u16 sm_lid;
305 u8 lmc;
306 u8 max_vl_num;
307 u8 sm_sl;
308 u8 subnet_timeout;
309 u8 init_type_reply;
310 u8 active_width;
311 u8 active_speed;
312 u8 phys_state;
313};
314
315enum ib_device_modify_flags {
316 IB_DEVICE_MODIFY_SYS_IMAGE_GUID = 1 << 0,
317 IB_DEVICE_MODIFY_NODE_DESC = 1 << 1
318};
319
320struct ib_device_modify {
321 u64 sys_image_guid;
322 char node_desc[64];
323};
324
325enum ib_port_modify_flags {
326 IB_PORT_SHUTDOWN = 1,
327 IB_PORT_INIT_TYPE = (1<<2),
328 IB_PORT_RESET_QKEY_CNTR = (1<<3)
329};
330
331struct ib_port_modify {
332 u32 set_port_cap_mask;
333 u32 clr_port_cap_mask;
334 u8 init_type;
335};
336
337enum ib_event_type {
338 IB_EVENT_CQ_ERR,
339 IB_EVENT_QP_FATAL,
340 IB_EVENT_QP_REQ_ERR,
341 IB_EVENT_QP_ACCESS_ERR,
342 IB_EVENT_COMM_EST,
343 IB_EVENT_SQ_DRAINED,
344 IB_EVENT_PATH_MIG,
345 IB_EVENT_PATH_MIG_ERR,
346 IB_EVENT_DEVICE_FATAL,
347 IB_EVENT_PORT_ACTIVE,
348 IB_EVENT_PORT_ERR,
349 IB_EVENT_LID_CHANGE,
350 IB_EVENT_PKEY_CHANGE,
351 IB_EVENT_SM_CHANGE,
352 IB_EVENT_SRQ_ERR,
353 IB_EVENT_SRQ_LIMIT_REACHED,
354 IB_EVENT_QP_LAST_WQE_REACHED,
355 IB_EVENT_CLIENT_REREGISTER,
356 IB_EVENT_GID_CHANGE,
357};
358
359struct ib_event {
360 struct ib_device *device;
361 union {
362 struct ib_cq *cq;
363 struct ib_qp *qp;
364 struct ib_srq *srq;
365 u8 port_num;
366 } element;
367 enum ib_event_type event;
368};
369
370struct ib_event_handler {
371 struct ib_device *device;
372 void (*handler)(struct ib_event_handler *, struct ib_event *);
373 struct list_head list;
374};
375
376#define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler) \
377 do { \
378 (_ptr)->device = _device; \
379 (_ptr)->handler = _handler; \
380 INIT_LIST_HEAD(&(_ptr)->list); \
381 } while (0)
382
383struct ib_global_route {
384 union ib_gid dgid;
385 u32 flow_label;
386 u8 sgid_index;
387 u8 hop_limit;
388 u8 traffic_class;
389};
390
391struct ib_grh {
392 __be32 version_tclass_flow;
393 __be16 paylen;
394 u8 next_hdr;
395 u8 hop_limit;
396 union ib_gid sgid;
397 union ib_gid dgid;
398};
399
400enum {
401 IB_MULTICAST_QPN = 0xffffff
402};
403
404#define IB_LID_PERMISSIVE cpu_to_be16(0xFFFF)
405
406enum ib_ah_flags {
407 IB_AH_GRH = 1
408};
409
410enum ib_rate {
411 IB_RATE_PORT_CURRENT = 0,
412 IB_RATE_2_5_GBPS = 2,
413 IB_RATE_5_GBPS = 5,
414 IB_RATE_10_GBPS = 3,
415 IB_RATE_20_GBPS = 6,
416 IB_RATE_30_GBPS = 4,
417 IB_RATE_40_GBPS = 7,
418 IB_RATE_60_GBPS = 8,
419 IB_RATE_80_GBPS = 9,
420 IB_RATE_120_GBPS = 10,
421 IB_RATE_14_GBPS = 11,
422 IB_RATE_56_GBPS = 12,
423 IB_RATE_112_GBPS = 13,
424 IB_RATE_168_GBPS = 14,
425 IB_RATE_25_GBPS = 15,
426 IB_RATE_100_GBPS = 16,
427 IB_RATE_200_GBPS = 17,
428 IB_RATE_300_GBPS = 18
429};
430
431
432
433
434
435
436
437int ib_rate_to_mult(enum ib_rate rate) __attribute_const__;
438
439
440
441
442
443
444int ib_rate_to_mbps(enum ib_rate rate) __attribute_const__;
445
446
447
448
449
450
451enum ib_rate mult_to_ib_rate(int mult) __attribute_const__;
452
453struct ib_ah_attr {
454 struct ib_global_route grh;
455 u16 dlid;
456 u8 sl;
457 u8 src_path_bits;
458 u8 static_rate;
459 u8 ah_flags;
460 u8 port_num;
461};
462
463enum ib_wc_status {
464 IB_WC_SUCCESS,
465 IB_WC_LOC_LEN_ERR,
466 IB_WC_LOC_QP_OP_ERR,
467 IB_WC_LOC_EEC_OP_ERR,
468 IB_WC_LOC_PROT_ERR,
469 IB_WC_WR_FLUSH_ERR,
470 IB_WC_MW_BIND_ERR,
471 IB_WC_BAD_RESP_ERR,
472 IB_WC_LOC_ACCESS_ERR,
473 IB_WC_REM_INV_REQ_ERR,
474 IB_WC_REM_ACCESS_ERR,
475 IB_WC_REM_OP_ERR,
476 IB_WC_RETRY_EXC_ERR,
477 IB_WC_RNR_RETRY_EXC_ERR,
478 IB_WC_LOC_RDD_VIOL_ERR,
479 IB_WC_REM_INV_RD_REQ_ERR,
480 IB_WC_REM_ABORT_ERR,
481 IB_WC_INV_EECN_ERR,
482 IB_WC_INV_EEC_STATE_ERR,
483 IB_WC_FATAL_ERR,
484 IB_WC_RESP_TIMEOUT_ERR,
485 IB_WC_GENERAL_ERR
486};
487
488enum ib_wc_opcode {
489 IB_WC_SEND,
490 IB_WC_RDMA_WRITE,
491 IB_WC_RDMA_READ,
492 IB_WC_COMP_SWAP,
493 IB_WC_FETCH_ADD,
494 IB_WC_BIND_MW,
495 IB_WC_LSO,
496 IB_WC_LOCAL_INV,
497 IB_WC_FAST_REG_MR,
498 IB_WC_MASKED_COMP_SWAP,
499 IB_WC_MASKED_FETCH_ADD,
500
501
502
503
504 IB_WC_RECV = 1 << 7,
505 IB_WC_RECV_RDMA_WITH_IMM
506};
507
508enum ib_wc_flags {
509 IB_WC_GRH = 1,
510 IB_WC_WITH_IMM = (1<<1),
511 IB_WC_WITH_INVALIDATE = (1<<2),
512};
513
514struct ib_wc {
515 u64 wr_id;
516 enum ib_wc_status status;
517 enum ib_wc_opcode opcode;
518 u32 vendor_err;
519 u32 byte_len;
520 struct ib_qp *qp;
521 union {
522 __be32 imm_data;
523 u32 invalidate_rkey;
524 } ex;
525 u32 src_qp;
526 int wc_flags;
527 u16 pkey_index;
528 u16 slid;
529 u8 sl;
530 u8 dlid_path_bits;
531 u8 port_num;
532 int csum_ok;
533};
534
535enum ib_cq_notify_flags {
536 IB_CQ_SOLICITED = 1 << 0,
537 IB_CQ_NEXT_COMP = 1 << 1,
538 IB_CQ_SOLICITED_MASK = IB_CQ_SOLICITED | IB_CQ_NEXT_COMP,
539 IB_CQ_REPORT_MISSED_EVENTS = 1 << 2,
540};
541
542enum ib_srq_type {
543 IB_SRQT_BASIC,
544 IB_SRQT_XRC
545};
546
547enum ib_srq_attr_mask {
548 IB_SRQ_MAX_WR = 1 << 0,
549 IB_SRQ_LIMIT = 1 << 1,
550};
551
552struct ib_srq_attr {
553 u32 max_wr;
554 u32 max_sge;
555 u32 srq_limit;
556};
557
558struct ib_srq_init_attr {
559 void (*event_handler)(struct ib_event *, void *);
560 void *srq_context;
561 struct ib_srq_attr attr;
562 enum ib_srq_type srq_type;
563
564 union {
565 struct {
566 struct ib_xrcd *xrcd;
567 struct ib_cq *cq;
568 } xrc;
569 } ext;
570};
571
572struct ib_qp_cap {
573 u32 max_send_wr;
574 u32 max_recv_wr;
575 u32 max_send_sge;
576 u32 max_recv_sge;
577 u32 max_inline_data;
578};
579
580enum ib_sig_type {
581 IB_SIGNAL_ALL_WR,
582 IB_SIGNAL_REQ_WR
583};
584
585enum ib_qp_type {
586
587
588
589
590
591 IB_QPT_SMI,
592 IB_QPT_GSI,
593
594 IB_QPT_RC,
595 IB_QPT_UC,
596 IB_QPT_UD,
597 IB_QPT_RAW_IPV6,
598 IB_QPT_RAW_ETHERTYPE,
599
600 IB_QPT_XRC_INI = 9,
601 IB_QPT_XRC_TGT,
602 IB_QPT_MAX
603};
604
605enum ib_qp_create_flags {
606 IB_QP_CREATE_IPOIB_UD_LSO = 1 << 0,
607 IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK = 1 << 1,
608};
609
610struct ib_qp_init_attr {
611 void (*event_handler)(struct ib_event *, void *);
612 void *qp_context;
613 struct ib_cq *send_cq;
614 struct ib_cq *recv_cq;
615 struct ib_srq *srq;
616 struct ib_xrcd *xrcd;
617 struct ib_qp_cap cap;
618 enum ib_sig_type sq_sig_type;
619 enum ib_qp_type qp_type;
620 enum ib_qp_create_flags create_flags;
621 u8 port_num;
622};
623
624struct ib_qp_open_attr {
625 void (*event_handler)(struct ib_event *, void *);
626 void *qp_context;
627 u32 qp_num;
628 enum ib_qp_type qp_type;
629};
630
631enum ib_rnr_timeout {
632 IB_RNR_TIMER_655_36 = 0,
633 IB_RNR_TIMER_000_01 = 1,
634 IB_RNR_TIMER_000_02 = 2,
635 IB_RNR_TIMER_000_03 = 3,
636 IB_RNR_TIMER_000_04 = 4,
637 IB_RNR_TIMER_000_06 = 5,
638 IB_RNR_TIMER_000_08 = 6,
639 IB_RNR_TIMER_000_12 = 7,
640 IB_RNR_TIMER_000_16 = 8,
641 IB_RNR_TIMER_000_24 = 9,
642 IB_RNR_TIMER_000_32 = 10,
643 IB_RNR_TIMER_000_48 = 11,
644 IB_RNR_TIMER_000_64 = 12,
645 IB_RNR_TIMER_000_96 = 13,
646 IB_RNR_TIMER_001_28 = 14,
647 IB_RNR_TIMER_001_92 = 15,
648 IB_RNR_TIMER_002_56 = 16,
649 IB_RNR_TIMER_003_84 = 17,
650 IB_RNR_TIMER_005_12 = 18,
651 IB_RNR_TIMER_007_68 = 19,
652 IB_RNR_TIMER_010_24 = 20,
653 IB_RNR_TIMER_015_36 = 21,
654 IB_RNR_TIMER_020_48 = 22,
655 IB_RNR_TIMER_030_72 = 23,
656 IB_RNR_TIMER_040_96 = 24,
657 IB_RNR_TIMER_061_44 = 25,
658 IB_RNR_TIMER_081_92 = 26,
659 IB_RNR_TIMER_122_88 = 27,
660 IB_RNR_TIMER_163_84 = 28,
661 IB_RNR_TIMER_245_76 = 29,
662 IB_RNR_TIMER_327_68 = 30,
663 IB_RNR_TIMER_491_52 = 31
664};
665
666enum ib_qp_attr_mask {
667 IB_QP_STATE = 1,
668 IB_QP_CUR_STATE = (1<<1),
669 IB_QP_EN_SQD_ASYNC_NOTIFY = (1<<2),
670 IB_QP_ACCESS_FLAGS = (1<<3),
671 IB_QP_PKEY_INDEX = (1<<4),
672 IB_QP_PORT = (1<<5),
673 IB_QP_QKEY = (1<<6),
674 IB_QP_AV = (1<<7),
675 IB_QP_PATH_MTU = (1<<8),
676 IB_QP_TIMEOUT = (1<<9),
677 IB_QP_RETRY_CNT = (1<<10),
678 IB_QP_RNR_RETRY = (1<<11),
679 IB_QP_RQ_PSN = (1<<12),
680 IB_QP_MAX_QP_RD_ATOMIC = (1<<13),
681 IB_QP_ALT_PATH = (1<<14),
682 IB_QP_MIN_RNR_TIMER = (1<<15),
683 IB_QP_SQ_PSN = (1<<16),
684 IB_QP_MAX_DEST_RD_ATOMIC = (1<<17),
685 IB_QP_PATH_MIG_STATE = (1<<18),
686 IB_QP_CAP = (1<<19),
687 IB_QP_DEST_QPN = (1<<20)
688};
689
690enum ib_qp_state {
691 IB_QPS_RESET,
692 IB_QPS_INIT,
693 IB_QPS_RTR,
694 IB_QPS_RTS,
695 IB_QPS_SQD,
696 IB_QPS_SQE,
697 IB_QPS_ERR
698};
699
700enum ib_mig_state {
701 IB_MIG_MIGRATED,
702 IB_MIG_REARM,
703 IB_MIG_ARMED
704};
705
706struct ib_qp_attr {
707 enum ib_qp_state qp_state;
708 enum ib_qp_state cur_qp_state;
709 enum ib_mtu path_mtu;
710 enum ib_mig_state path_mig_state;
711 u32 qkey;
712 u32 rq_psn;
713 u32 sq_psn;
714 u32 dest_qp_num;
715 int qp_access_flags;
716 struct ib_qp_cap cap;
717 struct ib_ah_attr ah_attr;
718 struct ib_ah_attr alt_ah_attr;
719 u16 pkey_index;
720 u16 alt_pkey_index;
721 u8 en_sqd_async_notify;
722 u8 sq_draining;
723 u8 max_rd_atomic;
724 u8 max_dest_rd_atomic;
725 u8 min_rnr_timer;
726 u8 port_num;
727 u8 timeout;
728 u8 retry_cnt;
729 u8 rnr_retry;
730 u8 alt_port_num;
731 u8 alt_timeout;
732};
733
734enum ib_wr_opcode {
735 IB_WR_RDMA_WRITE,
736 IB_WR_RDMA_WRITE_WITH_IMM,
737 IB_WR_SEND,
738 IB_WR_SEND_WITH_IMM,
739 IB_WR_RDMA_READ,
740 IB_WR_ATOMIC_CMP_AND_SWP,
741 IB_WR_ATOMIC_FETCH_AND_ADD,
742 IB_WR_LSO,
743 IB_WR_SEND_WITH_INV,
744 IB_WR_RDMA_READ_WITH_INV,
745 IB_WR_LOCAL_INV,
746 IB_WR_FAST_REG_MR,
747 IB_WR_MASKED_ATOMIC_CMP_AND_SWP,
748 IB_WR_MASKED_ATOMIC_FETCH_AND_ADD,
749};
750
751enum ib_send_flags {
752 IB_SEND_FENCE = 1,
753 IB_SEND_SIGNALED = (1<<1),
754 IB_SEND_SOLICITED = (1<<2),
755 IB_SEND_INLINE = (1<<3),
756 IB_SEND_IP_CSUM = (1<<4)
757};
758
759struct ib_sge {
760 u64 addr;
761 u32 length;
762 u32 lkey;
763};
764
765struct ib_fast_reg_page_list {
766 struct ib_device *device;
767 u64 *page_list;
768 unsigned int max_page_list_len;
769};
770
771struct ib_send_wr {
772 struct ib_send_wr *next;
773 u64 wr_id;
774 struct ib_sge *sg_list;
775 int num_sge;
776 enum ib_wr_opcode opcode;
777 int send_flags;
778 union {
779 __be32 imm_data;
780 u32 invalidate_rkey;
781 } ex;
782 union {
783 struct {
784 u64 remote_addr;
785 u32 rkey;
786 } rdma;
787 struct {
788 u64 remote_addr;
789 u64 compare_add;
790 u64 swap;
791 u64 compare_add_mask;
792 u64 swap_mask;
793 u32 rkey;
794 } atomic;
795 struct {
796 struct ib_ah *ah;
797 void *header;
798 int hlen;
799 int mss;
800 u32 remote_qpn;
801 u32 remote_qkey;
802 u16 pkey_index;
803 u8 port_num;
804 } ud;
805 struct {
806 u64 iova_start;
807 struct ib_fast_reg_page_list *page_list;
808 unsigned int page_shift;
809 unsigned int page_list_len;
810 u32 length;
811 int access_flags;
812 u32 rkey;
813 } fast_reg;
814 } wr;
815 u32 xrc_remote_srq_num;
816};
817
818struct ib_recv_wr {
819 struct ib_recv_wr *next;
820 u64 wr_id;
821 struct ib_sge *sg_list;
822 int num_sge;
823};
824
825enum ib_access_flags {
826 IB_ACCESS_LOCAL_WRITE = 1,
827 IB_ACCESS_REMOTE_WRITE = (1<<1),
828 IB_ACCESS_REMOTE_READ = (1<<2),
829 IB_ACCESS_REMOTE_ATOMIC = (1<<3),
830 IB_ACCESS_MW_BIND = (1<<4)
831};
832
833struct ib_phys_buf {
834 u64 addr;
835 u64 size;
836};
837
838struct ib_mr_attr {
839 struct ib_pd *pd;
840 u64 device_virt_addr;
841 u64 size;
842 int mr_access_flags;
843 u32 lkey;
844 u32 rkey;
845};
846
847enum ib_mr_rereg_flags {
848 IB_MR_REREG_TRANS = 1,
849 IB_MR_REREG_PD = (1<<1),
850 IB_MR_REREG_ACCESS = (1<<2)
851};
852
853struct ib_mw_bind {
854 struct ib_mr *mr;
855 u64 wr_id;
856 u64 addr;
857 u32 length;
858 int send_flags;
859 int mw_access_flags;
860};
861
862struct ib_fmr_attr {
863 int max_pages;
864 int max_maps;
865 u8 page_shift;
866};
867
868struct ib_ucontext {
869 struct ib_device *device;
870 struct list_head pd_list;
871 struct list_head mr_list;
872 struct list_head mw_list;
873 struct list_head cq_list;
874 struct list_head qp_list;
875 struct list_head srq_list;
876 struct list_head ah_list;
877 struct list_head xrcd_list;
878 int closing;
879};
880
881struct ib_uobject {
882 u64 user_handle;
883 struct ib_ucontext *context;
884 void *object;
885 struct list_head list;
886 int id;
887 struct kref ref;
888 struct rw_semaphore mutex;
889 int live;
890};
891
892struct ib_udata {
893 void __user *inbuf;
894 void __user *outbuf;
895 size_t inlen;
896 size_t outlen;
897};
898
899struct ib_pd {
900 struct ib_device *device;
901 struct ib_uobject *uobject;
902 atomic_t usecnt;
903};
904
905struct ib_xrcd {
906 struct ib_device *device;
907 atomic_t usecnt;
908 struct inode *inode;
909
910 struct mutex tgt_qp_mutex;
911 struct list_head tgt_qp_list;
912};
913
914struct ib_ah {
915 struct ib_device *device;
916 struct ib_pd *pd;
917 struct ib_uobject *uobject;
918};
919
920typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context);
921
922struct ib_cq {
923 struct ib_device *device;
924 struct ib_uobject *uobject;
925 ib_comp_handler comp_handler;
926 void (*event_handler)(struct ib_event *, void *);
927 void *cq_context;
928 int cqe;
929 atomic_t usecnt;
930};
931
932struct ib_srq {
933 struct ib_device *device;
934 struct ib_pd *pd;
935 struct ib_uobject *uobject;
936 void (*event_handler)(struct ib_event *, void *);
937 void *srq_context;
938 enum ib_srq_type srq_type;
939 atomic_t usecnt;
940
941 union {
942 struct {
943 struct ib_xrcd *xrcd;
944 struct ib_cq *cq;
945 u32 srq_num;
946 } xrc;
947 } ext;
948};
949
950struct ib_qp {
951 struct ib_device *device;
952 struct ib_pd *pd;
953 struct ib_cq *send_cq;
954 struct ib_cq *recv_cq;
955 struct ib_srq *srq;
956 struct ib_xrcd *xrcd;
957 struct list_head xrcd_list;
958 atomic_t usecnt;
959 struct list_head open_list;
960 struct ib_qp *real_qp;
961 struct ib_uobject *uobject;
962 void (*event_handler)(struct ib_event *, void *);
963 void *qp_context;
964 u32 qp_num;
965 enum ib_qp_type qp_type;
966};
967
968struct ib_mr {
969 struct ib_device *device;
970 struct ib_pd *pd;
971 struct ib_uobject *uobject;
972 u32 lkey;
973 u32 rkey;
974 atomic_t usecnt;
975};
976
977struct ib_mw {
978 struct ib_device *device;
979 struct ib_pd *pd;
980 struct ib_uobject *uobject;
981 u32 rkey;
982};
983
984struct ib_fmr {
985 struct ib_device *device;
986 struct ib_pd *pd;
987 struct list_head list;
988 u32 lkey;
989 u32 rkey;
990};
991
992struct ib_mad;
993struct ib_grh;
994
995enum ib_process_mad_flags {
996 IB_MAD_IGNORE_MKEY = 1,
997 IB_MAD_IGNORE_BKEY = 2,
998 IB_MAD_IGNORE_ALL = IB_MAD_IGNORE_MKEY | IB_MAD_IGNORE_BKEY
999};
1000
1001enum ib_mad_result {
1002 IB_MAD_RESULT_FAILURE = 0,
1003 IB_MAD_RESULT_SUCCESS = 1 << 0,
1004 IB_MAD_RESULT_REPLY = 1 << 1,
1005 IB_MAD_RESULT_CONSUMED = 1 << 2
1006};
1007
1008#define IB_DEVICE_NAME_MAX 64
1009
1010struct ib_cache {
1011 rwlock_t lock;
1012 struct ib_event_handler event_handler;
1013 struct ib_pkey_cache **pkey_cache;
1014 struct ib_gid_cache **gid_cache;
1015 u8 *lmc_cache;
1016};
1017
1018struct ib_dma_mapping_ops {
1019 int (*mapping_error)(struct ib_device *dev,
1020 u64 dma_addr);
1021 u64 (*map_single)(struct ib_device *dev,
1022 void *ptr, size_t size,
1023 enum dma_data_direction direction);
1024 void (*unmap_single)(struct ib_device *dev,
1025 u64 addr, size_t size,
1026 enum dma_data_direction direction);
1027 u64 (*map_page)(struct ib_device *dev,
1028 struct page *page, unsigned long offset,
1029 size_t size,
1030 enum dma_data_direction direction);
1031 void (*unmap_page)(struct ib_device *dev,
1032 u64 addr, size_t size,
1033 enum dma_data_direction direction);
1034 int (*map_sg)(struct ib_device *dev,
1035 struct scatterlist *sg, int nents,
1036 enum dma_data_direction direction);
1037 void (*unmap_sg)(struct ib_device *dev,
1038 struct scatterlist *sg, int nents,
1039 enum dma_data_direction direction);
1040 u64 (*dma_address)(struct ib_device *dev,
1041 struct scatterlist *sg);
1042 unsigned int (*dma_len)(struct ib_device *dev,
1043 struct scatterlist *sg);
1044 void (*sync_single_for_cpu)(struct ib_device *dev,
1045 u64 dma_handle,
1046 size_t size,
1047 enum dma_data_direction dir);
1048 void (*sync_single_for_device)(struct ib_device *dev,
1049 u64 dma_handle,
1050 size_t size,
1051 enum dma_data_direction dir);
1052 void *(*alloc_coherent)(struct ib_device *dev,
1053 size_t size,
1054 u64 *dma_handle,
1055 gfp_t flag);
1056 void (*free_coherent)(struct ib_device *dev,
1057 size_t size, void *cpu_addr,
1058 u64 dma_handle);
1059};
1060
1061struct iw_cm_verbs;
1062
1063struct ib_device {
1064 struct device *dma_device;
1065
1066 char name[IB_DEVICE_NAME_MAX];
1067
1068 struct list_head event_handler_list;
1069 spinlock_t event_handler_lock;
1070
1071 spinlock_t client_data_lock;
1072 struct list_head core_list;
1073 struct list_head client_data_list;
1074
1075 struct ib_cache cache;
1076 int *pkey_tbl_len;
1077 int *gid_tbl_len;
1078
1079 int num_comp_vectors;
1080
1081 struct iw_cm_verbs *iwcm;
1082
1083 int (*get_protocol_stats)(struct ib_device *device,
1084 union rdma_protocol_stats *stats);
1085 int (*query_device)(struct ib_device *device,
1086 struct ib_device_attr *device_attr);
1087 int (*query_port)(struct ib_device *device,
1088 u8 port_num,
1089 struct ib_port_attr *port_attr);
1090 enum rdma_link_layer (*get_link_layer)(struct ib_device *device,
1091 u8 port_num);
1092 int (*query_gid)(struct ib_device *device,
1093 u8 port_num, int index,
1094 union ib_gid *gid);
1095 int (*query_pkey)(struct ib_device *device,
1096 u8 port_num, u16 index, u16 *pkey);
1097 int (*modify_device)(struct ib_device *device,
1098 int device_modify_mask,
1099 struct ib_device_modify *device_modify);
1100 int (*modify_port)(struct ib_device *device,
1101 u8 port_num, int port_modify_mask,
1102 struct ib_port_modify *port_modify);
1103 struct ib_ucontext * (*alloc_ucontext)(struct ib_device *device,
1104 struct ib_udata *udata);
1105 int (*dealloc_ucontext)(struct ib_ucontext *context);
1106 int (*mmap)(struct ib_ucontext *context,
1107 struct vm_area_struct *vma);
1108 struct ib_pd * (*alloc_pd)(struct ib_device *device,
1109 struct ib_ucontext *context,
1110 struct ib_udata *udata);
1111 int (*dealloc_pd)(struct ib_pd *pd);
1112 struct ib_ah * (*create_ah)(struct ib_pd *pd,
1113 struct ib_ah_attr *ah_attr);
1114 int (*modify_ah)(struct ib_ah *ah,
1115 struct ib_ah_attr *ah_attr);
1116 int (*query_ah)(struct ib_ah *ah,
1117 struct ib_ah_attr *ah_attr);
1118 int (*destroy_ah)(struct ib_ah *ah);
1119 struct ib_srq * (*create_srq)(struct ib_pd *pd,
1120 struct ib_srq_init_attr *srq_init_attr,
1121 struct ib_udata *udata);
1122 int (*modify_srq)(struct ib_srq *srq,
1123 struct ib_srq_attr *srq_attr,
1124 enum ib_srq_attr_mask srq_attr_mask,
1125 struct ib_udata *udata);
1126 int (*query_srq)(struct ib_srq *srq,
1127 struct ib_srq_attr *srq_attr);
1128 int (*destroy_srq)(struct ib_srq *srq);
1129 int (*post_srq_recv)(struct ib_srq *srq,
1130 struct ib_recv_wr *recv_wr,
1131 struct ib_recv_wr **bad_recv_wr);
1132 struct ib_qp * (*create_qp)(struct ib_pd *pd,
1133 struct ib_qp_init_attr *qp_init_attr,
1134 struct ib_udata *udata);
1135 int (*modify_qp)(struct ib_qp *qp,
1136 struct ib_qp_attr *qp_attr,
1137 int qp_attr_mask,
1138 struct ib_udata *udata);
1139 int (*query_qp)(struct ib_qp *qp,
1140 struct ib_qp_attr *qp_attr,
1141 int qp_attr_mask,
1142 struct ib_qp_init_attr *qp_init_attr);
1143 int (*destroy_qp)(struct ib_qp *qp);
1144 int (*post_send)(struct ib_qp *qp,
1145 struct ib_send_wr *send_wr,
1146 struct ib_send_wr **bad_send_wr);
1147 int (*post_recv)(struct ib_qp *qp,
1148 struct ib_recv_wr *recv_wr,
1149 struct ib_recv_wr **bad_recv_wr);
1150 struct ib_cq * (*create_cq)(struct ib_device *device, int cqe,
1151 int comp_vector,
1152 struct ib_ucontext *context,
1153 struct ib_udata *udata);
1154 int (*modify_cq)(struct ib_cq *cq, u16 cq_count,
1155 u16 cq_period);
1156 int (*destroy_cq)(struct ib_cq *cq);
1157 int (*resize_cq)(struct ib_cq *cq, int cqe,
1158 struct ib_udata *udata);
1159 int (*poll_cq)(struct ib_cq *cq, int num_entries,
1160 struct ib_wc *wc);
1161 int (*peek_cq)(struct ib_cq *cq, int wc_cnt);
1162 int (*req_notify_cq)(struct ib_cq *cq,
1163 enum ib_cq_notify_flags flags);
1164 int (*req_ncomp_notif)(struct ib_cq *cq,
1165 int wc_cnt);
1166 struct ib_mr * (*get_dma_mr)(struct ib_pd *pd,
1167 int mr_access_flags);
1168 struct ib_mr * (*reg_phys_mr)(struct ib_pd *pd,
1169 struct ib_phys_buf *phys_buf_array,
1170 int num_phys_buf,
1171 int mr_access_flags,
1172 u64 *iova_start);
1173 struct ib_mr * (*reg_user_mr)(struct ib_pd *pd,
1174 u64 start, u64 length,
1175 u64 virt_addr,
1176 int mr_access_flags,
1177 struct ib_udata *udata);
1178 int (*query_mr)(struct ib_mr *mr,
1179 struct ib_mr_attr *mr_attr);
1180 int (*dereg_mr)(struct ib_mr *mr);
1181 struct ib_mr * (*alloc_fast_reg_mr)(struct ib_pd *pd,
1182 int max_page_list_len);
1183 struct ib_fast_reg_page_list * (*alloc_fast_reg_page_list)(struct ib_device *device,
1184 int page_list_len);
1185 void (*free_fast_reg_page_list)(struct ib_fast_reg_page_list *page_list);
1186 int (*rereg_phys_mr)(struct ib_mr *mr,
1187 int mr_rereg_mask,
1188 struct ib_pd *pd,
1189 struct ib_phys_buf *phys_buf_array,
1190 int num_phys_buf,
1191 int mr_access_flags,
1192 u64 *iova_start);
1193 struct ib_mw * (*alloc_mw)(struct ib_pd *pd);
1194 int (*bind_mw)(struct ib_qp *qp,
1195 struct ib_mw *mw,
1196 struct ib_mw_bind *mw_bind);
1197 int (*dealloc_mw)(struct ib_mw *mw);
1198 struct ib_fmr * (*alloc_fmr)(struct ib_pd *pd,
1199 int mr_access_flags,
1200 struct ib_fmr_attr *fmr_attr);
1201 int (*map_phys_fmr)(struct ib_fmr *fmr,
1202 u64 *page_list, int list_len,
1203 u64 iova);
1204 int (*unmap_fmr)(struct list_head *fmr_list);
1205 int (*dealloc_fmr)(struct ib_fmr *fmr);
1206 int (*attach_mcast)(struct ib_qp *qp,
1207 union ib_gid *gid,
1208 u16 lid);
1209 int (*detach_mcast)(struct ib_qp *qp,
1210 union ib_gid *gid,
1211 u16 lid);
1212 int (*process_mad)(struct ib_device *device,
1213 int process_mad_flags,
1214 u8 port_num,
1215 struct ib_wc *in_wc,
1216 struct ib_grh *in_grh,
1217 struct ib_mad *in_mad,
1218 struct ib_mad *out_mad);
1219 struct ib_xrcd * (*alloc_xrcd)(struct ib_device *device,
1220 struct ib_ucontext *ucontext,
1221 struct ib_udata *udata);
1222 int (*dealloc_xrcd)(struct ib_xrcd *xrcd);
1223
1224 struct ib_dma_mapping_ops *dma_ops;
1225
1226 struct module *owner;
1227 struct device dev;
1228 struct kobject *ports_parent;
1229 struct list_head port_list;
1230
1231 enum {
1232 IB_DEV_UNINITIALIZED,
1233 IB_DEV_REGISTERED,
1234 IB_DEV_UNREGISTERED
1235 } reg_state;
1236
1237 int uverbs_abi_ver;
1238 u64 uverbs_cmd_mask;
1239
1240 char node_desc[64];
1241 __be64 node_guid;
1242 u32 local_dma_lkey;
1243 u8 node_type;
1244 u8 phys_port_cnt;
1245};
1246
1247struct ib_client {
1248 char *name;
1249 void (*add) (struct ib_device *);
1250 void (*remove)(struct ib_device *);
1251
1252 struct list_head list;
1253};
1254
1255struct ib_device *ib_alloc_device(size_t size);
1256void ib_dealloc_device(struct ib_device *device);
1257
1258int ib_register_device(struct ib_device *device,
1259 int (*port_callback)(struct ib_device *,
1260 u8, struct kobject *));
1261void ib_unregister_device(struct ib_device *device);
1262
1263int ib_register_client (struct ib_client *client);
1264void ib_unregister_client(struct ib_client *client);
1265
1266void *ib_get_client_data(struct ib_device *device, struct ib_client *client);
1267void ib_set_client_data(struct ib_device *device, struct ib_client *client,
1268 void *data);
1269
1270static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len)
1271{
1272 return copy_from_user(dest, udata->inbuf, len) ? -EFAULT : 0;
1273}
1274
1275static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len)
1276{
1277 return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0;
1278}
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
1296 enum ib_qp_type type, enum ib_qp_attr_mask mask);
1297
1298int ib_register_event_handler (struct ib_event_handler *event_handler);
1299int ib_unregister_event_handler(struct ib_event_handler *event_handler);
1300void ib_dispatch_event(struct ib_event *event);
1301
1302int ib_query_device(struct ib_device *device,
1303 struct ib_device_attr *device_attr);
1304
1305int ib_query_port(struct ib_device *device,
1306 u8 port_num, struct ib_port_attr *port_attr);
1307
1308enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device,
1309 u8 port_num);
1310
1311int ib_query_gid(struct ib_device *device,
1312 u8 port_num, int index, union ib_gid *gid);
1313
1314int ib_query_pkey(struct ib_device *device,
1315 u8 port_num, u16 index, u16 *pkey);
1316
1317int ib_modify_device(struct ib_device *device,
1318 int device_modify_mask,
1319 struct ib_device_modify *device_modify);
1320
1321int ib_modify_port(struct ib_device *device,
1322 u8 port_num, int port_modify_mask,
1323 struct ib_port_modify *port_modify);
1324
1325int ib_find_gid(struct ib_device *device, union ib_gid *gid,
1326 u8 *port_num, u16 *index);
1327
1328int ib_find_pkey(struct ib_device *device,
1329 u8 port_num, u16 pkey, u16 *index);
1330
1331
1332
1333
1334
1335
1336
1337
1338struct ib_pd *ib_alloc_pd(struct ib_device *device);
1339
1340
1341
1342
1343
1344int ib_dealloc_pd(struct ib_pd *pd);
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr);
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, struct ib_wc *wc,
1368 struct ib_grh *grh, struct ib_ah_attr *ah_attr);
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, struct ib_wc *wc,
1383 struct ib_grh *grh, u8 port_num);
1384
1385
1386
1387
1388
1389
1390
1391
1392int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
1393
1394
1395
1396
1397
1398
1399
1400
1401int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
1402
1403
1404
1405
1406
1407int ib_destroy_ah(struct ib_ah *ah);
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422struct ib_srq *ib_create_srq(struct ib_pd *pd,
1423 struct ib_srq_init_attr *srq_init_attr);
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437int ib_modify_srq(struct ib_srq *srq,
1438 struct ib_srq_attr *srq_attr,
1439 enum ib_srq_attr_mask srq_attr_mask);
1440
1441
1442
1443
1444
1445
1446
1447int ib_query_srq(struct ib_srq *srq,
1448 struct ib_srq_attr *srq_attr);
1449
1450
1451
1452
1453
1454int ib_destroy_srq(struct ib_srq *srq);
1455
1456
1457
1458
1459
1460
1461
1462
1463static inline int ib_post_srq_recv(struct ib_srq *srq,
1464 struct ib_recv_wr *recv_wr,
1465 struct ib_recv_wr **bad_recv_wr)
1466{
1467 return srq->device->post_srq_recv(srq, recv_wr, bad_recv_wr);
1468}
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478struct ib_qp *ib_create_qp(struct ib_pd *pd,
1479 struct ib_qp_init_attr *qp_init_attr);
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490int ib_modify_qp(struct ib_qp *qp,
1491 struct ib_qp_attr *qp_attr,
1492 int qp_attr_mask);
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505int ib_query_qp(struct ib_qp *qp,
1506 struct ib_qp_attr *qp_attr,
1507 int qp_attr_mask,
1508 struct ib_qp_init_attr *qp_init_attr);
1509
1510
1511
1512
1513
1514int ib_destroy_qp(struct ib_qp *qp);
1515
1516
1517
1518
1519
1520
1521
1522
1523struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd,
1524 struct ib_qp_open_attr *qp_open_attr);
1525
1526
1527
1528
1529
1530
1531
1532
1533int ib_close_qp(struct ib_qp *qp);
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548static inline int ib_post_send(struct ib_qp *qp,
1549 struct ib_send_wr *send_wr,
1550 struct ib_send_wr **bad_send_wr)
1551{
1552 return qp->device->post_send(qp, send_wr, bad_send_wr);
1553}
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563static inline int ib_post_recv(struct ib_qp *qp,
1564 struct ib_recv_wr *recv_wr,
1565 struct ib_recv_wr **bad_recv_wr)
1566{
1567 return qp->device->post_recv(qp, recv_wr, bad_recv_wr);
1568}
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585struct ib_cq *ib_create_cq(struct ib_device *device,
1586 ib_comp_handler comp_handler,
1587 void (*event_handler)(struct ib_event *, void *),
1588 void *cq_context, int cqe, int comp_vector);
1589
1590
1591
1592
1593
1594
1595
1596
1597int ib_resize_cq(struct ib_cq *cq, int cqe);
1598
1599
1600
1601
1602
1603
1604
1605
1606int ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
1607
1608
1609
1610
1611
1612int ib_destroy_cq(struct ib_cq *cq);
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626static inline int ib_poll_cq(struct ib_cq *cq, int num_entries,
1627 struct ib_wc *wc)
1628{
1629 return cq->device->poll_cq(cq, num_entries, wc);
1630}
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642int ib_peek_cq(struct ib_cq *cq, int wc_cnt);
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671static inline int ib_req_notify_cq(struct ib_cq *cq,
1672 enum ib_cq_notify_flags flags)
1673{
1674 return cq->device->req_notify_cq(cq, flags);
1675}
1676
1677
1678
1679
1680
1681
1682
1683
1684static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt)
1685{
1686 return cq->device->req_ncomp_notif ?
1687 cq->device->req_ncomp_notif(cq, wc_cnt) :
1688 -ENOSYS;
1689}
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags);
1702
1703
1704
1705
1706
1707
1708static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
1709{
1710 if (dev->dma_ops)
1711 return dev->dma_ops->mapping_error(dev, dma_addr);
1712 return dma_mapping_error(dev->dma_device, dma_addr);
1713}
1714
1715
1716
1717
1718
1719
1720
1721
1722static inline u64 ib_dma_map_single(struct ib_device *dev,
1723 void *cpu_addr, size_t size,
1724 enum dma_data_direction direction)
1725{
1726 if (dev->dma_ops)
1727 return dev->dma_ops->map_single(dev, cpu_addr, size, direction);
1728 return dma_map_single(dev->dma_device, cpu_addr, size, direction);
1729}
1730
1731
1732
1733
1734
1735
1736
1737
1738static inline void ib_dma_unmap_single(struct ib_device *dev,
1739 u64 addr, size_t size,
1740 enum dma_data_direction direction)
1741{
1742 if (dev->dma_ops)
1743 dev->dma_ops->unmap_single(dev, addr, size, direction);
1744 else
1745 dma_unmap_single(dev->dma_device, addr, size, direction);
1746}
1747
1748static inline u64 ib_dma_map_single_attrs(struct ib_device *dev,
1749 void *cpu_addr, size_t size,
1750 enum dma_data_direction direction,
1751 struct dma_attrs *attrs)
1752{
1753 return dma_map_single_attrs(dev->dma_device, cpu_addr, size,
1754 direction, attrs);
1755}
1756
1757static inline void ib_dma_unmap_single_attrs(struct ib_device *dev,
1758 u64 addr, size_t size,
1759 enum dma_data_direction direction,
1760 struct dma_attrs *attrs)
1761{
1762 return dma_unmap_single_attrs(dev->dma_device, addr, size,
1763 direction, attrs);
1764}
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774static inline u64 ib_dma_map_page(struct ib_device *dev,
1775 struct page *page,
1776 unsigned long offset,
1777 size_t size,
1778 enum dma_data_direction direction)
1779{
1780 if (dev->dma_ops)
1781 return dev->dma_ops->map_page(dev, page, offset, size, direction);
1782 return dma_map_page(dev->dma_device, page, offset, size, direction);
1783}
1784
1785
1786
1787
1788
1789
1790
1791
1792static inline void ib_dma_unmap_page(struct ib_device *dev,
1793 u64 addr, size_t size,
1794 enum dma_data_direction direction)
1795{
1796 if (dev->dma_ops)
1797 dev->dma_ops->unmap_page(dev, addr, size, direction);
1798 else
1799 dma_unmap_page(dev->dma_device, addr, size, direction);
1800}
1801
1802
1803
1804
1805
1806
1807
1808
1809static inline int ib_dma_map_sg(struct ib_device *dev,
1810 struct scatterlist *sg, int nents,
1811 enum dma_data_direction direction)
1812{
1813 if (dev->dma_ops)
1814 return dev->dma_ops->map_sg(dev, sg, nents, direction);
1815 return dma_map_sg(dev->dma_device, sg, nents, direction);
1816}
1817
1818
1819
1820
1821
1822
1823
1824
1825static inline void ib_dma_unmap_sg(struct ib_device *dev,
1826 struct scatterlist *sg, int nents,
1827 enum dma_data_direction direction)
1828{
1829 if (dev->dma_ops)
1830 dev->dma_ops->unmap_sg(dev, sg, nents, direction);
1831 else
1832 dma_unmap_sg(dev->dma_device, sg, nents, direction);
1833}
1834
1835static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
1836 struct scatterlist *sg, int nents,
1837 enum dma_data_direction direction,
1838 struct dma_attrs *attrs)
1839{
1840 return dma_map_sg_attrs(dev->dma_device, sg, nents, direction, attrs);
1841}
1842
1843static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
1844 struct scatterlist *sg, int nents,
1845 enum dma_data_direction direction,
1846 struct dma_attrs *attrs)
1847{
1848 dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, attrs);
1849}
1850
1851
1852
1853
1854
1855static inline u64 ib_sg_dma_address(struct ib_device *dev,
1856 struct scatterlist *sg)
1857{
1858 if (dev->dma_ops)
1859 return dev->dma_ops->dma_address(dev, sg);
1860 return sg_dma_address(sg);
1861}
1862
1863
1864
1865
1866
1867
1868static inline unsigned int ib_sg_dma_len(struct ib_device *dev,
1869 struct scatterlist *sg)
1870{
1871 if (dev->dma_ops)
1872 return dev->dma_ops->dma_len(dev, sg);
1873 return sg_dma_len(sg);
1874}
1875
1876
1877
1878
1879
1880
1881
1882
1883static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev,
1884 u64 addr,
1885 size_t size,
1886 enum dma_data_direction dir)
1887{
1888 if (dev->dma_ops)
1889 dev->dma_ops->sync_single_for_cpu(dev, addr, size, dir);
1890 else
1891 dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
1892}
1893
1894
1895
1896
1897
1898
1899
1900
1901static inline void ib_dma_sync_single_for_device(struct ib_device *dev,
1902 u64 addr,
1903 size_t size,
1904 enum dma_data_direction dir)
1905{
1906 if (dev->dma_ops)
1907 dev->dma_ops->sync_single_for_device(dev, addr, size, dir);
1908 else
1909 dma_sync_single_for_device(dev->dma_device, addr, size, dir);
1910}
1911
1912
1913
1914
1915
1916
1917
1918
1919static inline void *ib_dma_alloc_coherent(struct ib_device *dev,
1920 size_t size,
1921 u64 *dma_handle,
1922 gfp_t flag)
1923{
1924 if (dev->dma_ops)
1925 return dev->dma_ops->alloc_coherent(dev, size, dma_handle, flag);
1926 else {
1927 dma_addr_t handle;
1928 void *ret;
1929
1930 ret = dma_alloc_coherent(dev->dma_device, size, &handle, flag);
1931 *dma_handle = handle;
1932 return ret;
1933 }
1934}
1935
1936
1937
1938
1939
1940
1941
1942
1943static inline void ib_dma_free_coherent(struct ib_device *dev,
1944 size_t size, void *cpu_addr,
1945 u64 dma_handle)
1946{
1947 if (dev->dma_ops)
1948 dev->dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
1949 else
1950 dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle);
1951}
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963struct ib_mr *ib_reg_phys_mr(struct ib_pd *pd,
1964 struct ib_phys_buf *phys_buf_array,
1965 int num_phys_buf,
1966 int mr_access_flags,
1967 u64 *iova_start);
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991int ib_rereg_phys_mr(struct ib_mr *mr,
1992 int mr_rereg_mask,
1993 struct ib_pd *pd,
1994 struct ib_phys_buf *phys_buf_array,
1995 int num_phys_buf,
1996 int mr_access_flags,
1997 u64 *iova_start);
1998
1999
2000
2001
2002
2003
2004int ib_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr);
2005
2006
2007
2008
2009
2010
2011int ib_dereg_mr(struct ib_mr *mr);
2012
2013
2014
2015
2016
2017
2018
2019
2020struct ib_mr *ib_alloc_fast_reg_mr(struct ib_pd *pd, int max_page_list_len);
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039struct ib_fast_reg_page_list *ib_alloc_fast_reg_page_list(
2040 struct ib_device *device, int page_list_len);
2041
2042
2043
2044
2045
2046
2047void ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list);
2048
2049
2050
2051
2052
2053
2054
2055static inline void ib_update_fast_reg_key(struct ib_mr *mr, u8 newkey)
2056{
2057 mr->lkey = (mr->lkey & 0xffffff00) | newkey;
2058 mr->rkey = (mr->rkey & 0xffffff00) | newkey;
2059}
2060
2061
2062
2063
2064
2065struct ib_mw *ib_alloc_mw(struct ib_pd *pd);
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076static inline int ib_bind_mw(struct ib_qp *qp,
2077 struct ib_mw *mw,
2078 struct ib_mw_bind *mw_bind)
2079{
2080
2081 return mw->device->bind_mw ?
2082 mw->device->bind_mw(qp, mw, mw_bind) :
2083 -ENOSYS;
2084}
2085
2086
2087
2088
2089
2090int ib_dealloc_mw(struct ib_mw *mw);
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
2102 int mr_access_flags,
2103 struct ib_fmr_attr *fmr_attr);
2104
2105
2106
2107
2108
2109
2110
2111
2112static inline int ib_map_phys_fmr(struct ib_fmr *fmr,
2113 u64 *page_list, int list_len,
2114 u64 iova)
2115{
2116 return fmr->device->map_phys_fmr(fmr, page_list, list_len, iova);
2117}
2118
2119
2120
2121
2122
2123int ib_unmap_fmr(struct list_head *fmr_list);
2124
2125
2126
2127
2128
2129int ib_dealloc_fmr(struct ib_fmr *fmr);
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
2144
2145
2146
2147
2148
2149
2150
2151int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
2152
2153
2154
2155
2156
2157struct ib_xrcd *ib_alloc_xrcd(struct ib_device *device);
2158
2159
2160
2161
2162
2163int ib_dealloc_xrcd(struct ib_xrcd *xrcd);
2164
2165#endif
2166