1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39#if !defined(IB_VERBS_H)
40#define IB_VERBS_H
41
42#include <linux/types.h>
43#include <linux/device.h>
44#include <linux/mm.h>
45#include <linux/dma-mapping.h>
46#include <linux/kref.h>
47#include <linux/list.h>
48#include <linux/rwsem.h>
49#include <linux/scatterlist.h>
50#include <linux/workqueue.h>
51
52#include <asm/atomic.h>
53#include <asm/uaccess.h>
54
55extern struct workqueue_struct *ib_wq;
56
57union ib_gid {
58 u8 raw[16];
59 struct {
60 __be64 subnet_prefix;
61 __be64 interface_id;
62 } global;
63};
64
65enum rdma_node_type {
66
67 RDMA_NODE_IB_CA = 1,
68 RDMA_NODE_IB_SWITCH,
69 RDMA_NODE_IB_ROUTER,
70 RDMA_NODE_RNIC
71};
72
73enum rdma_transport_type {
74 RDMA_TRANSPORT_IB,
75 RDMA_TRANSPORT_IWARP
76};
77
78enum rdma_transport_type
79rdma_node_get_transport(enum rdma_node_type node_type) __attribute_const__;
80
81enum rdma_link_layer {
82 IB_LINK_LAYER_UNSPECIFIED,
83 IB_LINK_LAYER_INFINIBAND,
84 IB_LINK_LAYER_ETHERNET,
85};
86
87enum ib_device_cap_flags {
88 IB_DEVICE_RESIZE_MAX_WR = 1,
89 IB_DEVICE_BAD_PKEY_CNTR = (1<<1),
90 IB_DEVICE_BAD_QKEY_CNTR = (1<<2),
91 IB_DEVICE_RAW_MULTI = (1<<3),
92 IB_DEVICE_AUTO_PATH_MIG = (1<<4),
93 IB_DEVICE_CHANGE_PHY_PORT = (1<<5),
94 IB_DEVICE_UD_AV_PORT_ENFORCE = (1<<6),
95 IB_DEVICE_CURR_QP_STATE_MOD = (1<<7),
96 IB_DEVICE_SHUTDOWN_PORT = (1<<8),
97 IB_DEVICE_INIT_TYPE = (1<<9),
98 IB_DEVICE_PORT_ACTIVE_EVENT = (1<<10),
99 IB_DEVICE_SYS_IMAGE_GUID = (1<<11),
100 IB_DEVICE_RC_RNR_NAK_GEN = (1<<12),
101 IB_DEVICE_SRQ_RESIZE = (1<<13),
102 IB_DEVICE_N_NOTIFY_CQ = (1<<14),
103 IB_DEVICE_LOCAL_DMA_LKEY = (1<<15),
104 IB_DEVICE_RESERVED = (1<<16),
105 IB_DEVICE_MEM_WINDOW = (1<<17),
106
107
108
109
110
111
112
113 IB_DEVICE_UD_IP_CSUM = (1<<18),
114 IB_DEVICE_UD_TSO = (1<<19),
115 IB_DEVICE_MEM_MGT_EXTENSIONS = (1<<21),
116 IB_DEVICE_BLOCK_MULTICAST_LOOPBACK = (1<<22),
117};
118
119enum ib_atomic_cap {
120 IB_ATOMIC_NONE,
121 IB_ATOMIC_HCA,
122 IB_ATOMIC_GLOB
123};
124
125struct ib_device_attr {
126 u64 fw_ver;
127 __be64 sys_image_guid;
128 u64 max_mr_size;
129 u64 page_size_cap;
130 u32 vendor_id;
131 u32 vendor_part_id;
132 u32 hw_ver;
133 int max_qp;
134 int max_qp_wr;
135 int device_cap_flags;
136 int max_sge;
137 int max_sge_rd;
138 int max_cq;
139 int max_cqe;
140 int max_mr;
141 int max_pd;
142 int max_qp_rd_atom;
143 int max_ee_rd_atom;
144 int max_res_rd_atom;
145 int max_qp_init_rd_atom;
146 int max_ee_init_rd_atom;
147 enum ib_atomic_cap atomic_cap;
148 enum ib_atomic_cap masked_atomic_cap;
149 int max_ee;
150 int max_rdd;
151 int max_mw;
152 int max_raw_ipv6_qp;
153 int max_raw_ethy_qp;
154 int max_mcast_grp;
155 int max_mcast_qp_attach;
156 int max_total_mcast_qp_attach;
157 int max_ah;
158 int max_fmr;
159 int max_map_per_fmr;
160 int max_srq;
161 int max_srq_wr;
162 int max_srq_sge;
163 unsigned int max_fast_reg_page_list_len;
164 u16 max_pkeys;
165 u8 local_ca_ack_delay;
166};
167
168enum ib_mtu {
169 IB_MTU_256 = 1,
170 IB_MTU_512 = 2,
171 IB_MTU_1024 = 3,
172 IB_MTU_2048 = 4,
173 IB_MTU_4096 = 5
174};
175
176static inline int ib_mtu_enum_to_int(enum ib_mtu mtu)
177{
178 switch (mtu) {
179 case IB_MTU_256: return 256;
180 case IB_MTU_512: return 512;
181 case IB_MTU_1024: return 1024;
182 case IB_MTU_2048: return 2048;
183 case IB_MTU_4096: return 4096;
184 default: return -1;
185 }
186}
187
188enum ib_port_state {
189 IB_PORT_NOP = 0,
190 IB_PORT_DOWN = 1,
191 IB_PORT_INIT = 2,
192 IB_PORT_ARMED = 3,
193 IB_PORT_ACTIVE = 4,
194 IB_PORT_ACTIVE_DEFER = 5
195};
196
197enum ib_port_cap_flags {
198 IB_PORT_SM = 1 << 1,
199 IB_PORT_NOTICE_SUP = 1 << 2,
200 IB_PORT_TRAP_SUP = 1 << 3,
201 IB_PORT_OPT_IPD_SUP = 1 << 4,
202 IB_PORT_AUTO_MIGR_SUP = 1 << 5,
203 IB_PORT_SL_MAP_SUP = 1 << 6,
204 IB_PORT_MKEY_NVRAM = 1 << 7,
205 IB_PORT_PKEY_NVRAM = 1 << 8,
206 IB_PORT_LED_INFO_SUP = 1 << 9,
207 IB_PORT_SM_DISABLED = 1 << 10,
208 IB_PORT_SYS_IMAGE_GUID_SUP = 1 << 11,
209 IB_PORT_PKEY_SW_EXT_PORT_TRAP_SUP = 1 << 12,
210 IB_PORT_CM_SUP = 1 << 16,
211 IB_PORT_SNMP_TUNNEL_SUP = 1 << 17,
212 IB_PORT_REINIT_SUP = 1 << 18,
213 IB_PORT_DEVICE_MGMT_SUP = 1 << 19,
214 IB_PORT_VENDOR_CLASS_SUP = 1 << 20,
215 IB_PORT_DR_NOTICE_SUP = 1 << 21,
216 IB_PORT_CAP_MASK_NOTICE_SUP = 1 << 22,
217 IB_PORT_BOOT_MGMT_SUP = 1 << 23,
218 IB_PORT_LINK_LATENCY_SUP = 1 << 24,
219 IB_PORT_CLIENT_REG_SUP = 1 << 25
220};
221
222enum ib_port_width {
223 IB_WIDTH_1X = 1,
224 IB_WIDTH_4X = 2,
225 IB_WIDTH_8X = 4,
226 IB_WIDTH_12X = 8
227};
228
229static inline int ib_width_enum_to_int(enum ib_port_width width)
230{
231 switch (width) {
232 case IB_WIDTH_1X: return 1;
233 case IB_WIDTH_4X: return 4;
234 case IB_WIDTH_8X: return 8;
235 case IB_WIDTH_12X: return 12;
236 default: return -1;
237 }
238}
239
240struct ib_protocol_stats {
241
242};
243
244struct iw_protocol_stats {
245 u64 ipInReceives;
246 u64 ipInHdrErrors;
247 u64 ipInTooBigErrors;
248 u64 ipInNoRoutes;
249 u64 ipInAddrErrors;
250 u64 ipInUnknownProtos;
251 u64 ipInTruncatedPkts;
252 u64 ipInDiscards;
253 u64 ipInDelivers;
254 u64 ipOutForwDatagrams;
255 u64 ipOutRequests;
256 u64 ipOutDiscards;
257 u64 ipOutNoRoutes;
258 u64 ipReasmTimeout;
259 u64 ipReasmReqds;
260 u64 ipReasmOKs;
261 u64 ipReasmFails;
262 u64 ipFragOKs;
263 u64 ipFragFails;
264 u64 ipFragCreates;
265 u64 ipInMcastPkts;
266 u64 ipOutMcastPkts;
267 u64 ipInBcastPkts;
268 u64 ipOutBcastPkts;
269
270 u64 tcpRtoAlgorithm;
271 u64 tcpRtoMin;
272 u64 tcpRtoMax;
273 u64 tcpMaxConn;
274 u64 tcpActiveOpens;
275 u64 tcpPassiveOpens;
276 u64 tcpAttemptFails;
277 u64 tcpEstabResets;
278 u64 tcpCurrEstab;
279 u64 tcpInSegs;
280 u64 tcpOutSegs;
281 u64 tcpRetransSegs;
282 u64 tcpInErrs;
283 u64 tcpOutRsts;
284};
285
286union rdma_protocol_stats {
287 struct ib_protocol_stats ib;
288 struct iw_protocol_stats iw;
289};
290
291struct ib_port_attr {
292 enum ib_port_state state;
293 enum ib_mtu max_mtu;
294 enum ib_mtu active_mtu;
295 int gid_tbl_len;
296 u32 port_cap_flags;
297 u32 max_msg_sz;
298 u32 bad_pkey_cntr;
299 u32 qkey_viol_cntr;
300 u16 pkey_tbl_len;
301 u16 lid;
302 u16 sm_lid;
303 u8 lmc;
304 u8 max_vl_num;
305 u8 sm_sl;
306 u8 subnet_timeout;
307 u8 init_type_reply;
308 u8 active_width;
309 u8 active_speed;
310 u8 phys_state;
311};
312
313enum ib_device_modify_flags {
314 IB_DEVICE_MODIFY_SYS_IMAGE_GUID = 1 << 0,
315 IB_DEVICE_MODIFY_NODE_DESC = 1 << 1
316};
317
318struct ib_device_modify {
319 u64 sys_image_guid;
320 char node_desc[64];
321};
322
323enum ib_port_modify_flags {
324 IB_PORT_SHUTDOWN = 1,
325 IB_PORT_INIT_TYPE = (1<<2),
326 IB_PORT_RESET_QKEY_CNTR = (1<<3)
327};
328
329struct ib_port_modify {
330 u32 set_port_cap_mask;
331 u32 clr_port_cap_mask;
332 u8 init_type;
333};
334
335enum ib_event_type {
336 IB_EVENT_CQ_ERR,
337 IB_EVENT_QP_FATAL,
338 IB_EVENT_QP_REQ_ERR,
339 IB_EVENT_QP_ACCESS_ERR,
340 IB_EVENT_COMM_EST,
341 IB_EVENT_SQ_DRAINED,
342 IB_EVENT_PATH_MIG,
343 IB_EVENT_PATH_MIG_ERR,
344 IB_EVENT_DEVICE_FATAL,
345 IB_EVENT_PORT_ACTIVE,
346 IB_EVENT_PORT_ERR,
347 IB_EVENT_LID_CHANGE,
348 IB_EVENT_PKEY_CHANGE,
349 IB_EVENT_SM_CHANGE,
350 IB_EVENT_SRQ_ERR,
351 IB_EVENT_SRQ_LIMIT_REACHED,
352 IB_EVENT_QP_LAST_WQE_REACHED,
353 IB_EVENT_CLIENT_REREGISTER
354};
355
356struct ib_event {
357 struct ib_device *device;
358 union {
359 struct ib_cq *cq;
360 struct ib_qp *qp;
361 struct ib_srq *srq;
362 u8 port_num;
363 } element;
364 enum ib_event_type event;
365};
366
367struct ib_event_handler {
368 struct ib_device *device;
369 void (*handler)(struct ib_event_handler *, struct ib_event *);
370 struct list_head list;
371};
372
373#define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler) \
374 do { \
375 (_ptr)->device = _device; \
376 (_ptr)->handler = _handler; \
377 INIT_LIST_HEAD(&(_ptr)->list); \
378 } while (0)
379
380struct ib_global_route {
381 union ib_gid dgid;
382 u32 flow_label;
383 u8 sgid_index;
384 u8 hop_limit;
385 u8 traffic_class;
386};
387
388struct ib_grh {
389 __be32 version_tclass_flow;
390 __be16 paylen;
391 u8 next_hdr;
392 u8 hop_limit;
393 union ib_gid sgid;
394 union ib_gid dgid;
395};
396
397enum {
398 IB_MULTICAST_QPN = 0xffffff
399};
400
401#define IB_LID_PERMISSIVE cpu_to_be16(0xFFFF)
402
403enum ib_ah_flags {
404 IB_AH_GRH = 1
405};
406
407enum ib_rate {
408 IB_RATE_PORT_CURRENT = 0,
409 IB_RATE_2_5_GBPS = 2,
410 IB_RATE_5_GBPS = 5,
411 IB_RATE_10_GBPS = 3,
412 IB_RATE_20_GBPS = 6,
413 IB_RATE_30_GBPS = 4,
414 IB_RATE_40_GBPS = 7,
415 IB_RATE_60_GBPS = 8,
416 IB_RATE_80_GBPS = 9,
417 IB_RATE_120_GBPS = 10
418};
419
420
421
422
423
424
425
426int ib_rate_to_mult(enum ib_rate rate) __attribute_const__;
427
428
429
430
431
432
433enum ib_rate mult_to_ib_rate(int mult) __attribute_const__;
434
435struct ib_ah_attr {
436 struct ib_global_route grh;
437 u16 dlid;
438 u8 sl;
439 u8 src_path_bits;
440 u8 static_rate;
441 u8 ah_flags;
442 u8 port_num;
443};
444
445enum ib_wc_status {
446 IB_WC_SUCCESS,
447 IB_WC_LOC_LEN_ERR,
448 IB_WC_LOC_QP_OP_ERR,
449 IB_WC_LOC_EEC_OP_ERR,
450 IB_WC_LOC_PROT_ERR,
451 IB_WC_WR_FLUSH_ERR,
452 IB_WC_MW_BIND_ERR,
453 IB_WC_BAD_RESP_ERR,
454 IB_WC_LOC_ACCESS_ERR,
455 IB_WC_REM_INV_REQ_ERR,
456 IB_WC_REM_ACCESS_ERR,
457 IB_WC_REM_OP_ERR,
458 IB_WC_RETRY_EXC_ERR,
459 IB_WC_RNR_RETRY_EXC_ERR,
460 IB_WC_LOC_RDD_VIOL_ERR,
461 IB_WC_REM_INV_RD_REQ_ERR,
462 IB_WC_REM_ABORT_ERR,
463 IB_WC_INV_EECN_ERR,
464 IB_WC_INV_EEC_STATE_ERR,
465 IB_WC_FATAL_ERR,
466 IB_WC_RESP_TIMEOUT_ERR,
467 IB_WC_GENERAL_ERR
468};
469
470enum ib_wc_opcode {
471 IB_WC_SEND,
472 IB_WC_RDMA_WRITE,
473 IB_WC_RDMA_READ,
474 IB_WC_COMP_SWAP,
475 IB_WC_FETCH_ADD,
476 IB_WC_BIND_MW,
477 IB_WC_LSO,
478 IB_WC_LOCAL_INV,
479 IB_WC_FAST_REG_MR,
480 IB_WC_MASKED_COMP_SWAP,
481 IB_WC_MASKED_FETCH_ADD,
482
483
484
485
486 IB_WC_RECV = 1 << 7,
487 IB_WC_RECV_RDMA_WITH_IMM
488};
489
490enum ib_wc_flags {
491 IB_WC_GRH = 1,
492 IB_WC_WITH_IMM = (1<<1),
493 IB_WC_WITH_INVALIDATE = (1<<2),
494};
495
496struct ib_wc {
497 u64 wr_id;
498 enum ib_wc_status status;
499 enum ib_wc_opcode opcode;
500 u32 vendor_err;
501 u32 byte_len;
502 struct ib_qp *qp;
503 union {
504 __be32 imm_data;
505 u32 invalidate_rkey;
506 } ex;
507 u32 src_qp;
508 int wc_flags;
509 u16 pkey_index;
510 u16 slid;
511 u8 sl;
512 u8 dlid_path_bits;
513 u8 port_num;
514 int csum_ok;
515};
516
517enum ib_cq_notify_flags {
518 IB_CQ_SOLICITED = 1 << 0,
519 IB_CQ_NEXT_COMP = 1 << 1,
520 IB_CQ_SOLICITED_MASK = IB_CQ_SOLICITED | IB_CQ_NEXT_COMP,
521 IB_CQ_REPORT_MISSED_EVENTS = 1 << 2,
522};
523
524enum ib_srq_attr_mask {
525 IB_SRQ_MAX_WR = 1 << 0,
526 IB_SRQ_LIMIT = 1 << 1,
527};
528
529struct ib_srq_attr {
530 u32 max_wr;
531 u32 max_sge;
532 u32 srq_limit;
533};
534
535struct ib_srq_init_attr {
536 void (*event_handler)(struct ib_event *, void *);
537 void *srq_context;
538 struct ib_srq_attr attr;
539};
540
541struct ib_qp_cap {
542 u32 max_send_wr;
543 u32 max_recv_wr;
544 u32 max_send_sge;
545 u32 max_recv_sge;
546 u32 max_inline_data;
547};
548
549enum ib_sig_type {
550 IB_SIGNAL_ALL_WR,
551 IB_SIGNAL_REQ_WR
552};
553
554enum ib_qp_type {
555
556
557
558
559
560 IB_QPT_SMI,
561 IB_QPT_GSI,
562
563 IB_QPT_RC,
564 IB_QPT_UC,
565 IB_QPT_UD,
566 IB_QPT_RAW_IPV6,
567 IB_QPT_RAW_ETHERTYPE
568};
569
570enum ib_qp_create_flags {
571 IB_QP_CREATE_IPOIB_UD_LSO = 1 << 0,
572 IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK = 1 << 1,
573};
574
575struct ib_qp_init_attr {
576 void (*event_handler)(struct ib_event *, void *);
577 void *qp_context;
578 struct ib_cq *send_cq;
579 struct ib_cq *recv_cq;
580 struct ib_srq *srq;
581 struct ib_qp_cap cap;
582 enum ib_sig_type sq_sig_type;
583 enum ib_qp_type qp_type;
584 enum ib_qp_create_flags create_flags;
585 u8 port_num;
586};
587
588enum ib_rnr_timeout {
589 IB_RNR_TIMER_655_36 = 0,
590 IB_RNR_TIMER_000_01 = 1,
591 IB_RNR_TIMER_000_02 = 2,
592 IB_RNR_TIMER_000_03 = 3,
593 IB_RNR_TIMER_000_04 = 4,
594 IB_RNR_TIMER_000_06 = 5,
595 IB_RNR_TIMER_000_08 = 6,
596 IB_RNR_TIMER_000_12 = 7,
597 IB_RNR_TIMER_000_16 = 8,
598 IB_RNR_TIMER_000_24 = 9,
599 IB_RNR_TIMER_000_32 = 10,
600 IB_RNR_TIMER_000_48 = 11,
601 IB_RNR_TIMER_000_64 = 12,
602 IB_RNR_TIMER_000_96 = 13,
603 IB_RNR_TIMER_001_28 = 14,
604 IB_RNR_TIMER_001_92 = 15,
605 IB_RNR_TIMER_002_56 = 16,
606 IB_RNR_TIMER_003_84 = 17,
607 IB_RNR_TIMER_005_12 = 18,
608 IB_RNR_TIMER_007_68 = 19,
609 IB_RNR_TIMER_010_24 = 20,
610 IB_RNR_TIMER_015_36 = 21,
611 IB_RNR_TIMER_020_48 = 22,
612 IB_RNR_TIMER_030_72 = 23,
613 IB_RNR_TIMER_040_96 = 24,
614 IB_RNR_TIMER_061_44 = 25,
615 IB_RNR_TIMER_081_92 = 26,
616 IB_RNR_TIMER_122_88 = 27,
617 IB_RNR_TIMER_163_84 = 28,
618 IB_RNR_TIMER_245_76 = 29,
619 IB_RNR_TIMER_327_68 = 30,
620 IB_RNR_TIMER_491_52 = 31
621};
622
623enum ib_qp_attr_mask {
624 IB_QP_STATE = 1,
625 IB_QP_CUR_STATE = (1<<1),
626 IB_QP_EN_SQD_ASYNC_NOTIFY = (1<<2),
627 IB_QP_ACCESS_FLAGS = (1<<3),
628 IB_QP_PKEY_INDEX = (1<<4),
629 IB_QP_PORT = (1<<5),
630 IB_QP_QKEY = (1<<6),
631 IB_QP_AV = (1<<7),
632 IB_QP_PATH_MTU = (1<<8),
633 IB_QP_TIMEOUT = (1<<9),
634 IB_QP_RETRY_CNT = (1<<10),
635 IB_QP_RNR_RETRY = (1<<11),
636 IB_QP_RQ_PSN = (1<<12),
637 IB_QP_MAX_QP_RD_ATOMIC = (1<<13),
638 IB_QP_ALT_PATH = (1<<14),
639 IB_QP_MIN_RNR_TIMER = (1<<15),
640 IB_QP_SQ_PSN = (1<<16),
641 IB_QP_MAX_DEST_RD_ATOMIC = (1<<17),
642 IB_QP_PATH_MIG_STATE = (1<<18),
643 IB_QP_CAP = (1<<19),
644 IB_QP_DEST_QPN = (1<<20)
645};
646
647enum ib_qp_state {
648 IB_QPS_RESET,
649 IB_QPS_INIT,
650 IB_QPS_RTR,
651 IB_QPS_RTS,
652 IB_QPS_SQD,
653 IB_QPS_SQE,
654 IB_QPS_ERR
655};
656
657enum ib_mig_state {
658 IB_MIG_MIGRATED,
659 IB_MIG_REARM,
660 IB_MIG_ARMED
661};
662
663struct ib_qp_attr {
664 enum ib_qp_state qp_state;
665 enum ib_qp_state cur_qp_state;
666 enum ib_mtu path_mtu;
667 enum ib_mig_state path_mig_state;
668 u32 qkey;
669 u32 rq_psn;
670 u32 sq_psn;
671 u32 dest_qp_num;
672 int qp_access_flags;
673 struct ib_qp_cap cap;
674 struct ib_ah_attr ah_attr;
675 struct ib_ah_attr alt_ah_attr;
676 u16 pkey_index;
677 u16 alt_pkey_index;
678 u8 en_sqd_async_notify;
679 u8 sq_draining;
680 u8 max_rd_atomic;
681 u8 max_dest_rd_atomic;
682 u8 min_rnr_timer;
683 u8 port_num;
684 u8 timeout;
685 u8 retry_cnt;
686 u8 rnr_retry;
687 u8 alt_port_num;
688 u8 alt_timeout;
689};
690
691enum ib_wr_opcode {
692 IB_WR_RDMA_WRITE,
693 IB_WR_RDMA_WRITE_WITH_IMM,
694 IB_WR_SEND,
695 IB_WR_SEND_WITH_IMM,
696 IB_WR_RDMA_READ,
697 IB_WR_ATOMIC_CMP_AND_SWP,
698 IB_WR_ATOMIC_FETCH_AND_ADD,
699 IB_WR_LSO,
700 IB_WR_SEND_WITH_INV,
701 IB_WR_RDMA_READ_WITH_INV,
702 IB_WR_LOCAL_INV,
703 IB_WR_FAST_REG_MR,
704 IB_WR_MASKED_ATOMIC_CMP_AND_SWP,
705 IB_WR_MASKED_ATOMIC_FETCH_AND_ADD,
706};
707
708enum ib_send_flags {
709 IB_SEND_FENCE = 1,
710 IB_SEND_SIGNALED = (1<<1),
711 IB_SEND_SOLICITED = (1<<2),
712 IB_SEND_INLINE = (1<<3),
713 IB_SEND_IP_CSUM = (1<<4)
714};
715
716struct ib_sge {
717 u64 addr;
718 u32 length;
719 u32 lkey;
720};
721
722struct ib_fast_reg_page_list {
723 struct ib_device *device;
724 u64 *page_list;
725 unsigned int max_page_list_len;
726};
727
728struct ib_send_wr {
729 struct ib_send_wr *next;
730 u64 wr_id;
731 struct ib_sge *sg_list;
732 int num_sge;
733 enum ib_wr_opcode opcode;
734 int send_flags;
735 union {
736 __be32 imm_data;
737 u32 invalidate_rkey;
738 } ex;
739 union {
740 struct {
741 u64 remote_addr;
742 u32 rkey;
743 } rdma;
744 struct {
745 u64 remote_addr;
746 u64 compare_add;
747 u64 swap;
748 u64 compare_add_mask;
749 u64 swap_mask;
750 u32 rkey;
751 } atomic;
752 struct {
753 struct ib_ah *ah;
754 void *header;
755 int hlen;
756 int mss;
757 u32 remote_qpn;
758 u32 remote_qkey;
759 u16 pkey_index;
760 u8 port_num;
761 } ud;
762 struct {
763 u64 iova_start;
764 struct ib_fast_reg_page_list *page_list;
765 unsigned int page_shift;
766 unsigned int page_list_len;
767 u32 length;
768 int access_flags;
769 u32 rkey;
770 } fast_reg;
771 } wr;
772};
773
774struct ib_recv_wr {
775 struct ib_recv_wr *next;
776 u64 wr_id;
777 struct ib_sge *sg_list;
778 int num_sge;
779};
780
781enum ib_access_flags {
782 IB_ACCESS_LOCAL_WRITE = 1,
783 IB_ACCESS_REMOTE_WRITE = (1<<1),
784 IB_ACCESS_REMOTE_READ = (1<<2),
785 IB_ACCESS_REMOTE_ATOMIC = (1<<3),
786 IB_ACCESS_MW_BIND = (1<<4)
787};
788
789struct ib_phys_buf {
790 u64 addr;
791 u64 size;
792};
793
794struct ib_mr_attr {
795 struct ib_pd *pd;
796 u64 device_virt_addr;
797 u64 size;
798 int mr_access_flags;
799 u32 lkey;
800 u32 rkey;
801};
802
803enum ib_mr_rereg_flags {
804 IB_MR_REREG_TRANS = 1,
805 IB_MR_REREG_PD = (1<<1),
806 IB_MR_REREG_ACCESS = (1<<2)
807};
808
809struct ib_mw_bind {
810 struct ib_mr *mr;
811 u64 wr_id;
812 u64 addr;
813 u32 length;
814 int send_flags;
815 int mw_access_flags;
816};
817
818struct ib_fmr_attr {
819 int max_pages;
820 int max_maps;
821 u8 page_shift;
822};
823
824struct ib_ucontext {
825 struct ib_device *device;
826 struct list_head pd_list;
827 struct list_head mr_list;
828 struct list_head mw_list;
829 struct list_head cq_list;
830 struct list_head qp_list;
831 struct list_head srq_list;
832 struct list_head ah_list;
833 int closing;
834};
835
836struct ib_uobject {
837 u64 user_handle;
838 struct ib_ucontext *context;
839 void *object;
840 struct list_head list;
841 int id;
842 struct kref ref;
843 struct rw_semaphore mutex;
844 int live;
845};
846
847struct ib_udata {
848 void __user *inbuf;
849 void __user *outbuf;
850 size_t inlen;
851 size_t outlen;
852};
853
854struct ib_pd {
855 struct ib_device *device;
856 struct ib_uobject *uobject;
857 atomic_t usecnt;
858};
859
860struct ib_ah {
861 struct ib_device *device;
862 struct ib_pd *pd;
863 struct ib_uobject *uobject;
864};
865
866typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context);
867
868struct ib_cq {
869 struct ib_device *device;
870 struct ib_uobject *uobject;
871 ib_comp_handler comp_handler;
872 void (*event_handler)(struct ib_event *, void *);
873 void *cq_context;
874 int cqe;
875 atomic_t usecnt;
876};
877
878struct ib_srq {
879 struct ib_device *device;
880 struct ib_pd *pd;
881 struct ib_uobject *uobject;
882 void (*event_handler)(struct ib_event *, void *);
883 void *srq_context;
884 atomic_t usecnt;
885};
886
887struct ib_qp {
888 struct ib_device *device;
889 struct ib_pd *pd;
890 struct ib_cq *send_cq;
891 struct ib_cq *recv_cq;
892 struct ib_srq *srq;
893 struct ib_uobject *uobject;
894 void (*event_handler)(struct ib_event *, void *);
895 void *qp_context;
896 u32 qp_num;
897 enum ib_qp_type qp_type;
898};
899
900struct ib_mr {
901 struct ib_device *device;
902 struct ib_pd *pd;
903 struct ib_uobject *uobject;
904 u32 lkey;
905 u32 rkey;
906 atomic_t usecnt;
907};
908
909struct ib_mw {
910 struct ib_device *device;
911 struct ib_pd *pd;
912 struct ib_uobject *uobject;
913 u32 rkey;
914};
915
916struct ib_fmr {
917 struct ib_device *device;
918 struct ib_pd *pd;
919 struct list_head list;
920 u32 lkey;
921 u32 rkey;
922};
923
924struct ib_mad;
925struct ib_grh;
926
927enum ib_process_mad_flags {
928 IB_MAD_IGNORE_MKEY = 1,
929 IB_MAD_IGNORE_BKEY = 2,
930 IB_MAD_IGNORE_ALL = IB_MAD_IGNORE_MKEY | IB_MAD_IGNORE_BKEY
931};
932
933enum ib_mad_result {
934 IB_MAD_RESULT_FAILURE = 0,
935 IB_MAD_RESULT_SUCCESS = 1 << 0,
936 IB_MAD_RESULT_REPLY = 1 << 1,
937 IB_MAD_RESULT_CONSUMED = 1 << 2
938};
939
940#define IB_DEVICE_NAME_MAX 64
941
942struct ib_cache {
943 rwlock_t lock;
944 struct ib_event_handler event_handler;
945 struct ib_pkey_cache **pkey_cache;
946 struct ib_gid_cache **gid_cache;
947 u8 *lmc_cache;
948};
949
950struct ib_dma_mapping_ops {
951 int (*mapping_error)(struct ib_device *dev,
952 u64 dma_addr);
953 u64 (*map_single)(struct ib_device *dev,
954 void *ptr, size_t size,
955 enum dma_data_direction direction);
956 void (*unmap_single)(struct ib_device *dev,
957 u64 addr, size_t size,
958 enum dma_data_direction direction);
959 u64 (*map_page)(struct ib_device *dev,
960 struct page *page, unsigned long offset,
961 size_t size,
962 enum dma_data_direction direction);
963 void (*unmap_page)(struct ib_device *dev,
964 u64 addr, size_t size,
965 enum dma_data_direction direction);
966 int (*map_sg)(struct ib_device *dev,
967 struct scatterlist *sg, int nents,
968 enum dma_data_direction direction);
969 void (*unmap_sg)(struct ib_device *dev,
970 struct scatterlist *sg, int nents,
971 enum dma_data_direction direction);
972 u64 (*dma_address)(struct ib_device *dev,
973 struct scatterlist *sg);
974 unsigned int (*dma_len)(struct ib_device *dev,
975 struct scatterlist *sg);
976 void (*sync_single_for_cpu)(struct ib_device *dev,
977 u64 dma_handle,
978 size_t size,
979 enum dma_data_direction dir);
980 void (*sync_single_for_device)(struct ib_device *dev,
981 u64 dma_handle,
982 size_t size,
983 enum dma_data_direction dir);
984 void *(*alloc_coherent)(struct ib_device *dev,
985 size_t size,
986 u64 *dma_handle,
987 gfp_t flag);
988 void (*free_coherent)(struct ib_device *dev,
989 size_t size, void *cpu_addr,
990 u64 dma_handle);
991};
992
993struct iw_cm_verbs;
994
995struct ib_device {
996 struct device *dma_device;
997
998 char name[IB_DEVICE_NAME_MAX];
999
1000 struct list_head event_handler_list;
1001 spinlock_t event_handler_lock;
1002
1003 spinlock_t client_data_lock;
1004 struct list_head core_list;
1005 struct list_head client_data_list;
1006
1007 struct ib_cache cache;
1008 int *pkey_tbl_len;
1009 int *gid_tbl_len;
1010
1011 int num_comp_vectors;
1012
1013 struct iw_cm_verbs *iwcm;
1014
1015 int (*get_protocol_stats)(struct ib_device *device,
1016 union rdma_protocol_stats *stats);
1017 int (*query_device)(struct ib_device *device,
1018 struct ib_device_attr *device_attr);
1019 int (*query_port)(struct ib_device *device,
1020 u8 port_num,
1021 struct ib_port_attr *port_attr);
1022 enum rdma_link_layer (*get_link_layer)(struct ib_device *device,
1023 u8 port_num);
1024 int (*query_gid)(struct ib_device *device,
1025 u8 port_num, int index,
1026 union ib_gid *gid);
1027 int (*query_pkey)(struct ib_device *device,
1028 u8 port_num, u16 index, u16 *pkey);
1029 int (*modify_device)(struct ib_device *device,
1030 int device_modify_mask,
1031 struct ib_device_modify *device_modify);
1032 int (*modify_port)(struct ib_device *device,
1033 u8 port_num, int port_modify_mask,
1034 struct ib_port_modify *port_modify);
1035 struct ib_ucontext * (*alloc_ucontext)(struct ib_device *device,
1036 struct ib_udata *udata);
1037 int (*dealloc_ucontext)(struct ib_ucontext *context);
1038 int (*mmap)(struct ib_ucontext *context,
1039 struct vm_area_struct *vma);
1040 struct ib_pd * (*alloc_pd)(struct ib_device *device,
1041 struct ib_ucontext *context,
1042 struct ib_udata *udata);
1043 int (*dealloc_pd)(struct ib_pd *pd);
1044 struct ib_ah * (*create_ah)(struct ib_pd *pd,
1045 struct ib_ah_attr *ah_attr);
1046 int (*modify_ah)(struct ib_ah *ah,
1047 struct ib_ah_attr *ah_attr);
1048 int (*query_ah)(struct ib_ah *ah,
1049 struct ib_ah_attr *ah_attr);
1050 int (*destroy_ah)(struct ib_ah *ah);
1051 struct ib_srq * (*create_srq)(struct ib_pd *pd,
1052 struct ib_srq_init_attr *srq_init_attr,
1053 struct ib_udata *udata);
1054 int (*modify_srq)(struct ib_srq *srq,
1055 struct ib_srq_attr *srq_attr,
1056 enum ib_srq_attr_mask srq_attr_mask,
1057 struct ib_udata *udata);
1058 int (*query_srq)(struct ib_srq *srq,
1059 struct ib_srq_attr *srq_attr);
1060 int (*destroy_srq)(struct ib_srq *srq);
1061 int (*post_srq_recv)(struct ib_srq *srq,
1062 struct ib_recv_wr *recv_wr,
1063 struct ib_recv_wr **bad_recv_wr);
1064 struct ib_qp * (*create_qp)(struct ib_pd *pd,
1065 struct ib_qp_init_attr *qp_init_attr,
1066 struct ib_udata *udata);
1067 int (*modify_qp)(struct ib_qp *qp,
1068 struct ib_qp_attr *qp_attr,
1069 int qp_attr_mask,
1070 struct ib_udata *udata);
1071 int (*query_qp)(struct ib_qp *qp,
1072 struct ib_qp_attr *qp_attr,
1073 int qp_attr_mask,
1074 struct ib_qp_init_attr *qp_init_attr);
1075 int (*destroy_qp)(struct ib_qp *qp);
1076 int (*post_send)(struct ib_qp *qp,
1077 struct ib_send_wr *send_wr,
1078 struct ib_send_wr **bad_send_wr);
1079 int (*post_recv)(struct ib_qp *qp,
1080 struct ib_recv_wr *recv_wr,
1081 struct ib_recv_wr **bad_recv_wr);
1082 struct ib_cq * (*create_cq)(struct ib_device *device, int cqe,
1083 int comp_vector,
1084 struct ib_ucontext *context,
1085 struct ib_udata *udata);
1086 int (*modify_cq)(struct ib_cq *cq, u16 cq_count,
1087 u16 cq_period);
1088 int (*destroy_cq)(struct ib_cq *cq);
1089 int (*resize_cq)(struct ib_cq *cq, int cqe,
1090 struct ib_udata *udata);
1091 int (*poll_cq)(struct ib_cq *cq, int num_entries,
1092 struct ib_wc *wc);
1093 int (*peek_cq)(struct ib_cq *cq, int wc_cnt);
1094 int (*req_notify_cq)(struct ib_cq *cq,
1095 enum ib_cq_notify_flags flags);
1096 int (*req_ncomp_notif)(struct ib_cq *cq,
1097 int wc_cnt);
1098 struct ib_mr * (*get_dma_mr)(struct ib_pd *pd,
1099 int mr_access_flags);
1100 struct ib_mr * (*reg_phys_mr)(struct ib_pd *pd,
1101 struct ib_phys_buf *phys_buf_array,
1102 int num_phys_buf,
1103 int mr_access_flags,
1104 u64 *iova_start);
1105 struct ib_mr * (*reg_user_mr)(struct ib_pd *pd,
1106 u64 start, u64 length,
1107 u64 virt_addr,
1108 int mr_access_flags,
1109 struct ib_udata *udata);
1110 int (*query_mr)(struct ib_mr *mr,
1111 struct ib_mr_attr *mr_attr);
1112 int (*dereg_mr)(struct ib_mr *mr);
1113 struct ib_mr * (*alloc_fast_reg_mr)(struct ib_pd *pd,
1114 int max_page_list_len);
1115 struct ib_fast_reg_page_list * (*alloc_fast_reg_page_list)(struct ib_device *device,
1116 int page_list_len);
1117 void (*free_fast_reg_page_list)(struct ib_fast_reg_page_list *page_list);
1118 int (*rereg_phys_mr)(struct ib_mr *mr,
1119 int mr_rereg_mask,
1120 struct ib_pd *pd,
1121 struct ib_phys_buf *phys_buf_array,
1122 int num_phys_buf,
1123 int mr_access_flags,
1124 u64 *iova_start);
1125 struct ib_mw * (*alloc_mw)(struct ib_pd *pd);
1126 int (*bind_mw)(struct ib_qp *qp,
1127 struct ib_mw *mw,
1128 struct ib_mw_bind *mw_bind);
1129 int (*dealloc_mw)(struct ib_mw *mw);
1130 struct ib_fmr * (*alloc_fmr)(struct ib_pd *pd,
1131 int mr_access_flags,
1132 struct ib_fmr_attr *fmr_attr);
1133 int (*map_phys_fmr)(struct ib_fmr *fmr,
1134 u64 *page_list, int list_len,
1135 u64 iova);
1136 int (*unmap_fmr)(struct list_head *fmr_list);
1137 int (*dealloc_fmr)(struct ib_fmr *fmr);
1138 int (*attach_mcast)(struct ib_qp *qp,
1139 union ib_gid *gid,
1140 u16 lid);
1141 int (*detach_mcast)(struct ib_qp *qp,
1142 union ib_gid *gid,
1143 u16 lid);
1144 int (*process_mad)(struct ib_device *device,
1145 int process_mad_flags,
1146 u8 port_num,
1147 struct ib_wc *in_wc,
1148 struct ib_grh *in_grh,
1149 struct ib_mad *in_mad,
1150 struct ib_mad *out_mad);
1151
1152 struct ib_dma_mapping_ops *dma_ops;
1153
1154 struct module *owner;
1155 struct device dev;
1156 struct kobject *ports_parent;
1157 struct list_head port_list;
1158
1159 enum {
1160 IB_DEV_UNINITIALIZED,
1161 IB_DEV_REGISTERED,
1162 IB_DEV_UNREGISTERED
1163 } reg_state;
1164
1165 int uverbs_abi_ver;
1166 u64 uverbs_cmd_mask;
1167
1168 char node_desc[64];
1169 __be64 node_guid;
1170 u32 local_dma_lkey;
1171 u8 node_type;
1172 u8 phys_port_cnt;
1173};
1174
1175struct ib_client {
1176 char *name;
1177 void (*add) (struct ib_device *);
1178 void (*remove)(struct ib_device *);
1179
1180 struct list_head list;
1181};
1182
1183struct ib_device *ib_alloc_device(size_t size);
1184void ib_dealloc_device(struct ib_device *device);
1185
1186int ib_register_device(struct ib_device *device,
1187 int (*port_callback)(struct ib_device *,
1188 u8, struct kobject *));
1189void ib_unregister_device(struct ib_device *device);
1190
1191int ib_register_client (struct ib_client *client);
1192void ib_unregister_client(struct ib_client *client);
1193
1194void *ib_get_client_data(struct ib_device *device, struct ib_client *client);
1195void ib_set_client_data(struct ib_device *device, struct ib_client *client,
1196 void *data);
1197
1198static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len)
1199{
1200 return copy_from_user(dest, udata->inbuf, len) ? -EFAULT : 0;
1201}
1202
1203static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len)
1204{
1205 return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0;
1206}
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
1224 enum ib_qp_type type, enum ib_qp_attr_mask mask);
1225
1226int ib_register_event_handler (struct ib_event_handler *event_handler);
1227int ib_unregister_event_handler(struct ib_event_handler *event_handler);
1228void ib_dispatch_event(struct ib_event *event);
1229
1230int ib_query_device(struct ib_device *device,
1231 struct ib_device_attr *device_attr);
1232
1233int ib_query_port(struct ib_device *device,
1234 u8 port_num, struct ib_port_attr *port_attr);
1235
1236enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device,
1237 u8 port_num);
1238
1239int ib_query_gid(struct ib_device *device,
1240 u8 port_num, int index, union ib_gid *gid);
1241
1242int ib_query_pkey(struct ib_device *device,
1243 u8 port_num, u16 index, u16 *pkey);
1244
1245int ib_modify_device(struct ib_device *device,
1246 int device_modify_mask,
1247 struct ib_device_modify *device_modify);
1248
1249int ib_modify_port(struct ib_device *device,
1250 u8 port_num, int port_modify_mask,
1251 struct ib_port_modify *port_modify);
1252
1253int ib_find_gid(struct ib_device *device, union ib_gid *gid,
1254 u8 *port_num, u16 *index);
1255
1256int ib_find_pkey(struct ib_device *device,
1257 u8 port_num, u16 pkey, u16 *index);
1258
1259
1260
1261
1262
1263
1264
1265
1266struct ib_pd *ib_alloc_pd(struct ib_device *device);
1267
1268
1269
1270
1271
1272int ib_dealloc_pd(struct ib_pd *pd);
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr);
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, struct ib_wc *wc,
1296 struct ib_grh *grh, struct ib_ah_attr *ah_attr);
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, struct ib_wc *wc,
1311 struct ib_grh *grh, u8 port_num);
1312
1313
1314
1315
1316
1317
1318
1319
1320int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
1321
1322
1323
1324
1325
1326
1327
1328
1329int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
1330
1331
1332
1333
1334
1335int ib_destroy_ah(struct ib_ah *ah);
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350struct ib_srq *ib_create_srq(struct ib_pd *pd,
1351 struct ib_srq_init_attr *srq_init_attr);
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365int ib_modify_srq(struct ib_srq *srq,
1366 struct ib_srq_attr *srq_attr,
1367 enum ib_srq_attr_mask srq_attr_mask);
1368
1369
1370
1371
1372
1373
1374
1375int ib_query_srq(struct ib_srq *srq,
1376 struct ib_srq_attr *srq_attr);
1377
1378
1379
1380
1381
1382int ib_destroy_srq(struct ib_srq *srq);
1383
1384
1385
1386
1387
1388
1389
1390
1391static inline int ib_post_srq_recv(struct ib_srq *srq,
1392 struct ib_recv_wr *recv_wr,
1393 struct ib_recv_wr **bad_recv_wr)
1394{
1395 return srq->device->post_srq_recv(srq, recv_wr, bad_recv_wr);
1396}
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406struct ib_qp *ib_create_qp(struct ib_pd *pd,
1407 struct ib_qp_init_attr *qp_init_attr);
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418int ib_modify_qp(struct ib_qp *qp,
1419 struct ib_qp_attr *qp_attr,
1420 int qp_attr_mask);
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433int ib_query_qp(struct ib_qp *qp,
1434 struct ib_qp_attr *qp_attr,
1435 int qp_attr_mask,
1436 struct ib_qp_init_attr *qp_init_attr);
1437
1438
1439
1440
1441
1442int ib_destroy_qp(struct ib_qp *qp);
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457static inline int ib_post_send(struct ib_qp *qp,
1458 struct ib_send_wr *send_wr,
1459 struct ib_send_wr **bad_send_wr)
1460{
1461 return qp->device->post_send(qp, send_wr, bad_send_wr);
1462}
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472static inline int ib_post_recv(struct ib_qp *qp,
1473 struct ib_recv_wr *recv_wr,
1474 struct ib_recv_wr **bad_recv_wr)
1475{
1476 return qp->device->post_recv(qp, recv_wr, bad_recv_wr);
1477}
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494struct ib_cq *ib_create_cq(struct ib_device *device,
1495 ib_comp_handler comp_handler,
1496 void (*event_handler)(struct ib_event *, void *),
1497 void *cq_context, int cqe, int comp_vector);
1498
1499
1500
1501
1502
1503
1504
1505
1506int ib_resize_cq(struct ib_cq *cq, int cqe);
1507
1508
1509
1510
1511
1512
1513
1514
1515int ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
1516
1517
1518
1519
1520
1521int ib_destroy_cq(struct ib_cq *cq);
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535static inline int ib_poll_cq(struct ib_cq *cq, int num_entries,
1536 struct ib_wc *wc)
1537{
1538 return cq->device->poll_cq(cq, num_entries, wc);
1539}
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551int ib_peek_cq(struct ib_cq *cq, int wc_cnt);
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580static inline int ib_req_notify_cq(struct ib_cq *cq,
1581 enum ib_cq_notify_flags flags)
1582{
1583 return cq->device->req_notify_cq(cq, flags);
1584}
1585
1586
1587
1588
1589
1590
1591
1592
1593static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt)
1594{
1595 return cq->device->req_ncomp_notif ?
1596 cq->device->req_ncomp_notif(cq, wc_cnt) :
1597 -ENOSYS;
1598}
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags);
1611
1612
1613
1614
1615
1616
1617static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
1618{
1619 if (dev->dma_ops)
1620 return dev->dma_ops->mapping_error(dev, dma_addr);
1621 return dma_mapping_error(dev->dma_device, dma_addr);
1622}
1623
1624
1625
1626
1627
1628
1629
1630
1631static inline u64 ib_dma_map_single(struct ib_device *dev,
1632 void *cpu_addr, size_t size,
1633 enum dma_data_direction direction)
1634{
1635 if (dev->dma_ops)
1636 return dev->dma_ops->map_single(dev, cpu_addr, size, direction);
1637 return dma_map_single(dev->dma_device, cpu_addr, size, direction);
1638}
1639
1640
1641
1642
1643
1644
1645
1646
1647static inline void ib_dma_unmap_single(struct ib_device *dev,
1648 u64 addr, size_t size,
1649 enum dma_data_direction direction)
1650{
1651 if (dev->dma_ops)
1652 dev->dma_ops->unmap_single(dev, addr, size, direction);
1653 else
1654 dma_unmap_single(dev->dma_device, addr, size, direction);
1655}
1656
1657static inline u64 ib_dma_map_single_attrs(struct ib_device *dev,
1658 void *cpu_addr, size_t size,
1659 enum dma_data_direction direction,
1660 struct dma_attrs *attrs)
1661{
1662 return dma_map_single_attrs(dev->dma_device, cpu_addr, size,
1663 direction, attrs);
1664}
1665
1666static inline void ib_dma_unmap_single_attrs(struct ib_device *dev,
1667 u64 addr, size_t size,
1668 enum dma_data_direction direction,
1669 struct dma_attrs *attrs)
1670{
1671 return dma_unmap_single_attrs(dev->dma_device, addr, size,
1672 direction, attrs);
1673}
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683static inline u64 ib_dma_map_page(struct ib_device *dev,
1684 struct page *page,
1685 unsigned long offset,
1686 size_t size,
1687 enum dma_data_direction direction)
1688{
1689 if (dev->dma_ops)
1690 return dev->dma_ops->map_page(dev, page, offset, size, direction);
1691 return dma_map_page(dev->dma_device, page, offset, size, direction);
1692}
1693
1694
1695
1696
1697
1698
1699
1700
1701static inline void ib_dma_unmap_page(struct ib_device *dev,
1702 u64 addr, size_t size,
1703 enum dma_data_direction direction)
1704{
1705 if (dev->dma_ops)
1706 dev->dma_ops->unmap_page(dev, addr, size, direction);
1707 else
1708 dma_unmap_page(dev->dma_device, addr, size, direction);
1709}
1710
1711
1712
1713
1714
1715
1716
1717
1718static inline int ib_dma_map_sg(struct ib_device *dev,
1719 struct scatterlist *sg, int nents,
1720 enum dma_data_direction direction)
1721{
1722 if (dev->dma_ops)
1723 return dev->dma_ops->map_sg(dev, sg, nents, direction);
1724 return dma_map_sg(dev->dma_device, sg, nents, direction);
1725}
1726
1727
1728
1729
1730
1731
1732
1733
1734static inline void ib_dma_unmap_sg(struct ib_device *dev,
1735 struct scatterlist *sg, int nents,
1736 enum dma_data_direction direction)
1737{
1738 if (dev->dma_ops)
1739 dev->dma_ops->unmap_sg(dev, sg, nents, direction);
1740 else
1741 dma_unmap_sg(dev->dma_device, sg, nents, direction);
1742}
1743
1744static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
1745 struct scatterlist *sg, int nents,
1746 enum dma_data_direction direction,
1747 struct dma_attrs *attrs)
1748{
1749 return dma_map_sg_attrs(dev->dma_device, sg, nents, direction, attrs);
1750}
1751
1752static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
1753 struct scatterlist *sg, int nents,
1754 enum dma_data_direction direction,
1755 struct dma_attrs *attrs)
1756{
1757 dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, attrs);
1758}
1759
1760
1761
1762
1763
1764static inline u64 ib_sg_dma_address(struct ib_device *dev,
1765 struct scatterlist *sg)
1766{
1767 if (dev->dma_ops)
1768 return dev->dma_ops->dma_address(dev, sg);
1769 return sg_dma_address(sg);
1770}
1771
1772
1773
1774
1775
1776
1777static inline unsigned int ib_sg_dma_len(struct ib_device *dev,
1778 struct scatterlist *sg)
1779{
1780 if (dev->dma_ops)
1781 return dev->dma_ops->dma_len(dev, sg);
1782 return sg_dma_len(sg);
1783}
1784
1785
1786
1787
1788
1789
1790
1791
1792static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev,
1793 u64 addr,
1794 size_t size,
1795 enum dma_data_direction dir)
1796{
1797 if (dev->dma_ops)
1798 dev->dma_ops->sync_single_for_cpu(dev, addr, size, dir);
1799 else
1800 dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
1801}
1802
1803
1804
1805
1806
1807
1808
1809
1810static inline void ib_dma_sync_single_for_device(struct ib_device *dev,
1811 u64 addr,
1812 size_t size,
1813 enum dma_data_direction dir)
1814{
1815 if (dev->dma_ops)
1816 dev->dma_ops->sync_single_for_device(dev, addr, size, dir);
1817 else
1818 dma_sync_single_for_device(dev->dma_device, addr, size, dir);
1819}
1820
1821
1822
1823
1824
1825
1826
1827
1828static inline void *ib_dma_alloc_coherent(struct ib_device *dev,
1829 size_t size,
1830 u64 *dma_handle,
1831 gfp_t flag)
1832{
1833 if (dev->dma_ops)
1834 return dev->dma_ops->alloc_coherent(dev, size, dma_handle, flag);
1835 else {
1836 dma_addr_t handle;
1837 void *ret;
1838
1839 ret = dma_alloc_coherent(dev->dma_device, size, &handle, flag);
1840 *dma_handle = handle;
1841 return ret;
1842 }
1843}
1844
1845
1846
1847
1848
1849
1850
1851
1852static inline void ib_dma_free_coherent(struct ib_device *dev,
1853 size_t size, void *cpu_addr,
1854 u64 dma_handle)
1855{
1856 if (dev->dma_ops)
1857 dev->dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
1858 else
1859 dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle);
1860}
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872struct ib_mr *ib_reg_phys_mr(struct ib_pd *pd,
1873 struct ib_phys_buf *phys_buf_array,
1874 int num_phys_buf,
1875 int mr_access_flags,
1876 u64 *iova_start);
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900int ib_rereg_phys_mr(struct ib_mr *mr,
1901 int mr_rereg_mask,
1902 struct ib_pd *pd,
1903 struct ib_phys_buf *phys_buf_array,
1904 int num_phys_buf,
1905 int mr_access_flags,
1906 u64 *iova_start);
1907
1908
1909
1910
1911
1912
1913int ib_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr);
1914
1915
1916
1917
1918
1919
1920int ib_dereg_mr(struct ib_mr *mr);
1921
1922
1923
1924
1925
1926
1927
1928
1929struct ib_mr *ib_alloc_fast_reg_mr(struct ib_pd *pd, int max_page_list_len);
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948struct ib_fast_reg_page_list *ib_alloc_fast_reg_page_list(
1949 struct ib_device *device, int page_list_len);
1950
1951
1952
1953
1954
1955
1956void ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list);
1957
1958
1959
1960
1961
1962
1963
1964static inline void ib_update_fast_reg_key(struct ib_mr *mr, u8 newkey)
1965{
1966 mr->lkey = (mr->lkey & 0xffffff00) | newkey;
1967 mr->rkey = (mr->rkey & 0xffffff00) | newkey;
1968}
1969
1970
1971
1972
1973
1974struct ib_mw *ib_alloc_mw(struct ib_pd *pd);
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985static inline int ib_bind_mw(struct ib_qp *qp,
1986 struct ib_mw *mw,
1987 struct ib_mw_bind *mw_bind)
1988{
1989
1990 return mw->device->bind_mw ?
1991 mw->device->bind_mw(qp, mw, mw_bind) :
1992 -ENOSYS;
1993}
1994
1995
1996
1997
1998
1999int ib_dealloc_mw(struct ib_mw *mw);
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
2011 int mr_access_flags,
2012 struct ib_fmr_attr *fmr_attr);
2013
2014
2015
2016
2017
2018
2019
2020
2021static inline int ib_map_phys_fmr(struct ib_fmr *fmr,
2022 u64 *page_list, int list_len,
2023 u64 iova)
2024{
2025 return fmr->device->map_phys_fmr(fmr, page_list, list_len, iova);
2026}
2027
2028
2029
2030
2031
2032int ib_unmap_fmr(struct list_head *fmr_list);
2033
2034
2035
2036
2037
2038int ib_dealloc_fmr(struct ib_fmr *fmr);
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
2053
2054
2055
2056
2057
2058
2059
2060int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
2061
2062#endif
2063