1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39#if !defined(IB_VERBS_H)
40#define IB_VERBS_H
41
42#include <linux/types.h>
43#include <linux/device.h>
44#include <linux/mm.h>
45#include <linux/dma-mapping.h>
46#include <linux/kref.h>
47#include <linux/list.h>
48#include <linux/rwsem.h>
49#include <linux/scatterlist.h>
50
51#include <asm/atomic.h>
52#include <asm/uaccess.h>
53
54union ib_gid {
55 u8 raw[16];
56 struct {
57 __be64 subnet_prefix;
58 __be64 interface_id;
59 } global;
60};
61
62enum rdma_node_type {
63
64 RDMA_NODE_IB_CA = 1,
65 RDMA_NODE_IB_SWITCH,
66 RDMA_NODE_IB_ROUTER,
67 RDMA_NODE_RNIC
68};
69
70enum rdma_transport_type {
71 RDMA_TRANSPORT_IB,
72 RDMA_TRANSPORT_IWARP
73};
74
75enum rdma_transport_type
76rdma_node_get_transport(enum rdma_node_type node_type) __attribute_const__;
77
78enum ib_device_cap_flags {
79 IB_DEVICE_RESIZE_MAX_WR = 1,
80 IB_DEVICE_BAD_PKEY_CNTR = (1<<1),
81 IB_DEVICE_BAD_QKEY_CNTR = (1<<2),
82 IB_DEVICE_RAW_MULTI = (1<<3),
83 IB_DEVICE_AUTO_PATH_MIG = (1<<4),
84 IB_DEVICE_CHANGE_PHY_PORT = (1<<5),
85 IB_DEVICE_UD_AV_PORT_ENFORCE = (1<<6),
86 IB_DEVICE_CURR_QP_STATE_MOD = (1<<7),
87 IB_DEVICE_SHUTDOWN_PORT = (1<<8),
88 IB_DEVICE_INIT_TYPE = (1<<9),
89 IB_DEVICE_PORT_ACTIVE_EVENT = (1<<10),
90 IB_DEVICE_SYS_IMAGE_GUID = (1<<11),
91 IB_DEVICE_RC_RNR_NAK_GEN = (1<<12),
92 IB_DEVICE_SRQ_RESIZE = (1<<13),
93 IB_DEVICE_N_NOTIFY_CQ = (1<<14),
94 IB_DEVICE_LOCAL_DMA_LKEY = (1<<15),
95 IB_DEVICE_RESERVED = (1<<16),
96 IB_DEVICE_MEM_WINDOW = (1<<17),
97
98
99
100
101
102
103
104 IB_DEVICE_UD_IP_CSUM = (1<<18),
105 IB_DEVICE_UD_TSO = (1<<19),
106 IB_DEVICE_MEM_MGT_EXTENSIONS = (1<<21),
107 IB_DEVICE_BLOCK_MULTICAST_LOOPBACK = (1<<22),
108};
109
110enum ib_atomic_cap {
111 IB_ATOMIC_NONE,
112 IB_ATOMIC_HCA,
113 IB_ATOMIC_GLOB
114};
115
116struct ib_device_attr {
117 u64 fw_ver;
118 __be64 sys_image_guid;
119 u64 max_mr_size;
120 u64 page_size_cap;
121 u32 vendor_id;
122 u32 vendor_part_id;
123 u32 hw_ver;
124 int max_qp;
125 int max_qp_wr;
126 int device_cap_flags;
127 int max_sge;
128 int max_sge_rd;
129 int max_cq;
130 int max_cqe;
131 int max_mr;
132 int max_pd;
133 int max_qp_rd_atom;
134 int max_ee_rd_atom;
135 int max_res_rd_atom;
136 int max_qp_init_rd_atom;
137 int max_ee_init_rd_atom;
138 enum ib_atomic_cap atomic_cap;
139 int max_ee;
140 int max_rdd;
141 int max_mw;
142 int max_raw_ipv6_qp;
143 int max_raw_ethy_qp;
144 int max_mcast_grp;
145 int max_mcast_qp_attach;
146 int max_total_mcast_qp_attach;
147 int max_ah;
148 int max_fmr;
149 int max_map_per_fmr;
150 int max_srq;
151 int max_srq_wr;
152 int max_srq_sge;
153 unsigned int max_fast_reg_page_list_len;
154 u16 max_pkeys;
155 u8 local_ca_ack_delay;
156};
157
158enum ib_mtu {
159 IB_MTU_256 = 1,
160 IB_MTU_512 = 2,
161 IB_MTU_1024 = 3,
162 IB_MTU_2048 = 4,
163 IB_MTU_4096 = 5
164};
165
166static inline int ib_mtu_enum_to_int(enum ib_mtu mtu)
167{
168 switch (mtu) {
169 case IB_MTU_256: return 256;
170 case IB_MTU_512: return 512;
171 case IB_MTU_1024: return 1024;
172 case IB_MTU_2048: return 2048;
173 case IB_MTU_4096: return 4096;
174 default: return -1;
175 }
176}
177
178enum ib_port_state {
179 IB_PORT_NOP = 0,
180 IB_PORT_DOWN = 1,
181 IB_PORT_INIT = 2,
182 IB_PORT_ARMED = 3,
183 IB_PORT_ACTIVE = 4,
184 IB_PORT_ACTIVE_DEFER = 5
185};
186
187enum ib_port_cap_flags {
188 IB_PORT_SM = 1 << 1,
189 IB_PORT_NOTICE_SUP = 1 << 2,
190 IB_PORT_TRAP_SUP = 1 << 3,
191 IB_PORT_OPT_IPD_SUP = 1 << 4,
192 IB_PORT_AUTO_MIGR_SUP = 1 << 5,
193 IB_PORT_SL_MAP_SUP = 1 << 6,
194 IB_PORT_MKEY_NVRAM = 1 << 7,
195 IB_PORT_PKEY_NVRAM = 1 << 8,
196 IB_PORT_LED_INFO_SUP = 1 << 9,
197 IB_PORT_SM_DISABLED = 1 << 10,
198 IB_PORT_SYS_IMAGE_GUID_SUP = 1 << 11,
199 IB_PORT_PKEY_SW_EXT_PORT_TRAP_SUP = 1 << 12,
200 IB_PORT_CM_SUP = 1 << 16,
201 IB_PORT_SNMP_TUNNEL_SUP = 1 << 17,
202 IB_PORT_REINIT_SUP = 1 << 18,
203 IB_PORT_DEVICE_MGMT_SUP = 1 << 19,
204 IB_PORT_VENDOR_CLASS_SUP = 1 << 20,
205 IB_PORT_DR_NOTICE_SUP = 1 << 21,
206 IB_PORT_CAP_MASK_NOTICE_SUP = 1 << 22,
207 IB_PORT_BOOT_MGMT_SUP = 1 << 23,
208 IB_PORT_LINK_LATENCY_SUP = 1 << 24,
209 IB_PORT_CLIENT_REG_SUP = 1 << 25
210};
211
212enum ib_port_width {
213 IB_WIDTH_1X = 1,
214 IB_WIDTH_4X = 2,
215 IB_WIDTH_8X = 4,
216 IB_WIDTH_12X = 8
217};
218
219static inline int ib_width_enum_to_int(enum ib_port_width width)
220{
221 switch (width) {
222 case IB_WIDTH_1X: return 1;
223 case IB_WIDTH_4X: return 4;
224 case IB_WIDTH_8X: return 8;
225 case IB_WIDTH_12X: return 12;
226 default: return -1;
227 }
228}
229
230struct ib_protocol_stats {
231
232};
233
234struct iw_protocol_stats {
235 u64 ipInReceives;
236 u64 ipInHdrErrors;
237 u64 ipInTooBigErrors;
238 u64 ipInNoRoutes;
239 u64 ipInAddrErrors;
240 u64 ipInUnknownProtos;
241 u64 ipInTruncatedPkts;
242 u64 ipInDiscards;
243 u64 ipInDelivers;
244 u64 ipOutForwDatagrams;
245 u64 ipOutRequests;
246 u64 ipOutDiscards;
247 u64 ipOutNoRoutes;
248 u64 ipReasmTimeout;
249 u64 ipReasmReqds;
250 u64 ipReasmOKs;
251 u64 ipReasmFails;
252 u64 ipFragOKs;
253 u64 ipFragFails;
254 u64 ipFragCreates;
255 u64 ipInMcastPkts;
256 u64 ipOutMcastPkts;
257 u64 ipInBcastPkts;
258 u64 ipOutBcastPkts;
259
260 u64 tcpRtoAlgorithm;
261 u64 tcpRtoMin;
262 u64 tcpRtoMax;
263 u64 tcpMaxConn;
264 u64 tcpActiveOpens;
265 u64 tcpPassiveOpens;
266 u64 tcpAttemptFails;
267 u64 tcpEstabResets;
268 u64 tcpCurrEstab;
269 u64 tcpInSegs;
270 u64 tcpOutSegs;
271 u64 tcpRetransSegs;
272 u64 tcpInErrs;
273 u64 tcpOutRsts;
274};
275
276union rdma_protocol_stats {
277 struct ib_protocol_stats ib;
278 struct iw_protocol_stats iw;
279};
280
281struct ib_port_attr {
282 enum ib_port_state state;
283 enum ib_mtu max_mtu;
284 enum ib_mtu active_mtu;
285 int gid_tbl_len;
286 u32 port_cap_flags;
287 u32 max_msg_sz;
288 u32 bad_pkey_cntr;
289 u32 qkey_viol_cntr;
290 u16 pkey_tbl_len;
291 u16 lid;
292 u16 sm_lid;
293 u8 lmc;
294 u8 max_vl_num;
295 u8 sm_sl;
296 u8 subnet_timeout;
297 u8 init_type_reply;
298 u8 active_width;
299 u8 active_speed;
300 u8 phys_state;
301};
302
303enum ib_device_modify_flags {
304 IB_DEVICE_MODIFY_SYS_IMAGE_GUID = 1 << 0,
305 IB_DEVICE_MODIFY_NODE_DESC = 1 << 1
306};
307
308struct ib_device_modify {
309 u64 sys_image_guid;
310 char node_desc[64];
311};
312
313enum ib_port_modify_flags {
314 IB_PORT_SHUTDOWN = 1,
315 IB_PORT_INIT_TYPE = (1<<2),
316 IB_PORT_RESET_QKEY_CNTR = (1<<3)
317};
318
319struct ib_port_modify {
320 u32 set_port_cap_mask;
321 u32 clr_port_cap_mask;
322 u8 init_type;
323};
324
325enum ib_event_type {
326 IB_EVENT_CQ_ERR,
327 IB_EVENT_QP_FATAL,
328 IB_EVENT_QP_REQ_ERR,
329 IB_EVENT_QP_ACCESS_ERR,
330 IB_EVENT_COMM_EST,
331 IB_EVENT_SQ_DRAINED,
332 IB_EVENT_PATH_MIG,
333 IB_EVENT_PATH_MIG_ERR,
334 IB_EVENT_DEVICE_FATAL,
335 IB_EVENT_PORT_ACTIVE,
336 IB_EVENT_PORT_ERR,
337 IB_EVENT_LID_CHANGE,
338 IB_EVENT_PKEY_CHANGE,
339 IB_EVENT_SM_CHANGE,
340 IB_EVENT_SRQ_ERR,
341 IB_EVENT_SRQ_LIMIT_REACHED,
342 IB_EVENT_QP_LAST_WQE_REACHED,
343 IB_EVENT_CLIENT_REREGISTER
344};
345
346struct ib_event {
347 struct ib_device *device;
348 union {
349 struct ib_cq *cq;
350 struct ib_qp *qp;
351 struct ib_srq *srq;
352 u8 port_num;
353 } element;
354 enum ib_event_type event;
355};
356
357struct ib_event_handler {
358 struct ib_device *device;
359 void (*handler)(struct ib_event_handler *, struct ib_event *);
360 struct list_head list;
361};
362
363#define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler) \
364 do { \
365 (_ptr)->device = _device; \
366 (_ptr)->handler = _handler; \
367 INIT_LIST_HEAD(&(_ptr)->list); \
368 } while (0)
369
370struct ib_global_route {
371 union ib_gid dgid;
372 u32 flow_label;
373 u8 sgid_index;
374 u8 hop_limit;
375 u8 traffic_class;
376};
377
378struct ib_grh {
379 __be32 version_tclass_flow;
380 __be16 paylen;
381 u8 next_hdr;
382 u8 hop_limit;
383 union ib_gid sgid;
384 union ib_gid dgid;
385};
386
387enum {
388 IB_MULTICAST_QPN = 0xffffff
389};
390
391#define IB_LID_PERMISSIVE cpu_to_be16(0xFFFF)
392
393enum ib_ah_flags {
394 IB_AH_GRH = 1
395};
396
397enum ib_rate {
398 IB_RATE_PORT_CURRENT = 0,
399 IB_RATE_2_5_GBPS = 2,
400 IB_RATE_5_GBPS = 5,
401 IB_RATE_10_GBPS = 3,
402 IB_RATE_20_GBPS = 6,
403 IB_RATE_30_GBPS = 4,
404 IB_RATE_40_GBPS = 7,
405 IB_RATE_60_GBPS = 8,
406 IB_RATE_80_GBPS = 9,
407 IB_RATE_120_GBPS = 10
408};
409
410
411
412
413
414
415
416int ib_rate_to_mult(enum ib_rate rate) __attribute_const__;
417
418
419
420
421
422
423enum ib_rate mult_to_ib_rate(int mult) __attribute_const__;
424
425struct ib_ah_attr {
426 struct ib_global_route grh;
427 u16 dlid;
428 u8 sl;
429 u8 src_path_bits;
430 u8 static_rate;
431 u8 ah_flags;
432 u8 port_num;
433};
434
435enum ib_wc_status {
436 IB_WC_SUCCESS,
437 IB_WC_LOC_LEN_ERR,
438 IB_WC_LOC_QP_OP_ERR,
439 IB_WC_LOC_EEC_OP_ERR,
440 IB_WC_LOC_PROT_ERR,
441 IB_WC_WR_FLUSH_ERR,
442 IB_WC_MW_BIND_ERR,
443 IB_WC_BAD_RESP_ERR,
444 IB_WC_LOC_ACCESS_ERR,
445 IB_WC_REM_INV_REQ_ERR,
446 IB_WC_REM_ACCESS_ERR,
447 IB_WC_REM_OP_ERR,
448 IB_WC_RETRY_EXC_ERR,
449 IB_WC_RNR_RETRY_EXC_ERR,
450 IB_WC_LOC_RDD_VIOL_ERR,
451 IB_WC_REM_INV_RD_REQ_ERR,
452 IB_WC_REM_ABORT_ERR,
453 IB_WC_INV_EECN_ERR,
454 IB_WC_INV_EEC_STATE_ERR,
455 IB_WC_FATAL_ERR,
456 IB_WC_RESP_TIMEOUT_ERR,
457 IB_WC_GENERAL_ERR
458};
459
460enum ib_wc_opcode {
461 IB_WC_SEND,
462 IB_WC_RDMA_WRITE,
463 IB_WC_RDMA_READ,
464 IB_WC_COMP_SWAP,
465 IB_WC_FETCH_ADD,
466 IB_WC_BIND_MW,
467 IB_WC_LSO,
468 IB_WC_LOCAL_INV,
469 IB_WC_FAST_REG_MR,
470
471
472
473
474 IB_WC_RECV = 1 << 7,
475 IB_WC_RECV_RDMA_WITH_IMM
476};
477
478enum ib_wc_flags {
479 IB_WC_GRH = 1,
480 IB_WC_WITH_IMM = (1<<1),
481 IB_WC_WITH_INVALIDATE = (1<<2),
482};
483
484struct ib_wc {
485 u64 wr_id;
486 enum ib_wc_status status;
487 enum ib_wc_opcode opcode;
488 u32 vendor_err;
489 u32 byte_len;
490 struct ib_qp *qp;
491 union {
492 __be32 imm_data;
493 u32 invalidate_rkey;
494 } ex;
495 u32 src_qp;
496 int wc_flags;
497 u16 pkey_index;
498 u16 slid;
499 u8 sl;
500 u8 dlid_path_bits;
501 u8 port_num;
502 int csum_ok;
503};
504
505enum ib_cq_notify_flags {
506 IB_CQ_SOLICITED = 1 << 0,
507 IB_CQ_NEXT_COMP = 1 << 1,
508 IB_CQ_SOLICITED_MASK = IB_CQ_SOLICITED | IB_CQ_NEXT_COMP,
509 IB_CQ_REPORT_MISSED_EVENTS = 1 << 2,
510};
511
512enum ib_srq_attr_mask {
513 IB_SRQ_MAX_WR = 1 << 0,
514 IB_SRQ_LIMIT = 1 << 1,
515};
516
517struct ib_srq_attr {
518 u32 max_wr;
519 u32 max_sge;
520 u32 srq_limit;
521};
522
523struct ib_srq_init_attr {
524 void (*event_handler)(struct ib_event *, void *);
525 void *srq_context;
526 struct ib_srq_attr attr;
527};
528
529struct ib_qp_cap {
530 u32 max_send_wr;
531 u32 max_recv_wr;
532 u32 max_send_sge;
533 u32 max_recv_sge;
534 u32 max_inline_data;
535};
536
537enum ib_sig_type {
538 IB_SIGNAL_ALL_WR,
539 IB_SIGNAL_REQ_WR
540};
541
542enum ib_qp_type {
543
544
545
546
547
548 IB_QPT_SMI,
549 IB_QPT_GSI,
550
551 IB_QPT_RC,
552 IB_QPT_UC,
553 IB_QPT_UD,
554 IB_QPT_RAW_IPV6,
555 IB_QPT_RAW_ETY
556};
557
558enum ib_qp_create_flags {
559 IB_QP_CREATE_IPOIB_UD_LSO = 1 << 0,
560 IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK = 1 << 1,
561};
562
563struct ib_qp_init_attr {
564 void (*event_handler)(struct ib_event *, void *);
565 void *qp_context;
566 struct ib_cq *send_cq;
567 struct ib_cq *recv_cq;
568 struct ib_srq *srq;
569 struct ib_qp_cap cap;
570 enum ib_sig_type sq_sig_type;
571 enum ib_qp_type qp_type;
572 enum ib_qp_create_flags create_flags;
573 u8 port_num;
574};
575
576enum ib_rnr_timeout {
577 IB_RNR_TIMER_655_36 = 0,
578 IB_RNR_TIMER_000_01 = 1,
579 IB_RNR_TIMER_000_02 = 2,
580 IB_RNR_TIMER_000_03 = 3,
581 IB_RNR_TIMER_000_04 = 4,
582 IB_RNR_TIMER_000_06 = 5,
583 IB_RNR_TIMER_000_08 = 6,
584 IB_RNR_TIMER_000_12 = 7,
585 IB_RNR_TIMER_000_16 = 8,
586 IB_RNR_TIMER_000_24 = 9,
587 IB_RNR_TIMER_000_32 = 10,
588 IB_RNR_TIMER_000_48 = 11,
589 IB_RNR_TIMER_000_64 = 12,
590 IB_RNR_TIMER_000_96 = 13,
591 IB_RNR_TIMER_001_28 = 14,
592 IB_RNR_TIMER_001_92 = 15,
593 IB_RNR_TIMER_002_56 = 16,
594 IB_RNR_TIMER_003_84 = 17,
595 IB_RNR_TIMER_005_12 = 18,
596 IB_RNR_TIMER_007_68 = 19,
597 IB_RNR_TIMER_010_24 = 20,
598 IB_RNR_TIMER_015_36 = 21,
599 IB_RNR_TIMER_020_48 = 22,
600 IB_RNR_TIMER_030_72 = 23,
601 IB_RNR_TIMER_040_96 = 24,
602 IB_RNR_TIMER_061_44 = 25,
603 IB_RNR_TIMER_081_92 = 26,
604 IB_RNR_TIMER_122_88 = 27,
605 IB_RNR_TIMER_163_84 = 28,
606 IB_RNR_TIMER_245_76 = 29,
607 IB_RNR_TIMER_327_68 = 30,
608 IB_RNR_TIMER_491_52 = 31
609};
610
611enum ib_qp_attr_mask {
612 IB_QP_STATE = 1,
613 IB_QP_CUR_STATE = (1<<1),
614 IB_QP_EN_SQD_ASYNC_NOTIFY = (1<<2),
615 IB_QP_ACCESS_FLAGS = (1<<3),
616 IB_QP_PKEY_INDEX = (1<<4),
617 IB_QP_PORT = (1<<5),
618 IB_QP_QKEY = (1<<6),
619 IB_QP_AV = (1<<7),
620 IB_QP_PATH_MTU = (1<<8),
621 IB_QP_TIMEOUT = (1<<9),
622 IB_QP_RETRY_CNT = (1<<10),
623 IB_QP_RNR_RETRY = (1<<11),
624 IB_QP_RQ_PSN = (1<<12),
625 IB_QP_MAX_QP_RD_ATOMIC = (1<<13),
626 IB_QP_ALT_PATH = (1<<14),
627 IB_QP_MIN_RNR_TIMER = (1<<15),
628 IB_QP_SQ_PSN = (1<<16),
629 IB_QP_MAX_DEST_RD_ATOMIC = (1<<17),
630 IB_QP_PATH_MIG_STATE = (1<<18),
631 IB_QP_CAP = (1<<19),
632 IB_QP_DEST_QPN = (1<<20)
633};
634
635enum ib_qp_state {
636 IB_QPS_RESET,
637 IB_QPS_INIT,
638 IB_QPS_RTR,
639 IB_QPS_RTS,
640 IB_QPS_SQD,
641 IB_QPS_SQE,
642 IB_QPS_ERR
643};
644
645enum ib_mig_state {
646 IB_MIG_MIGRATED,
647 IB_MIG_REARM,
648 IB_MIG_ARMED
649};
650
651struct ib_qp_attr {
652 enum ib_qp_state qp_state;
653 enum ib_qp_state cur_qp_state;
654 enum ib_mtu path_mtu;
655 enum ib_mig_state path_mig_state;
656 u32 qkey;
657 u32 rq_psn;
658 u32 sq_psn;
659 u32 dest_qp_num;
660 int qp_access_flags;
661 struct ib_qp_cap cap;
662 struct ib_ah_attr ah_attr;
663 struct ib_ah_attr alt_ah_attr;
664 u16 pkey_index;
665 u16 alt_pkey_index;
666 u8 en_sqd_async_notify;
667 u8 sq_draining;
668 u8 max_rd_atomic;
669 u8 max_dest_rd_atomic;
670 u8 min_rnr_timer;
671 u8 port_num;
672 u8 timeout;
673 u8 retry_cnt;
674 u8 rnr_retry;
675 u8 alt_port_num;
676 u8 alt_timeout;
677};
678
679enum ib_wr_opcode {
680 IB_WR_RDMA_WRITE,
681 IB_WR_RDMA_WRITE_WITH_IMM,
682 IB_WR_SEND,
683 IB_WR_SEND_WITH_IMM,
684 IB_WR_RDMA_READ,
685 IB_WR_ATOMIC_CMP_AND_SWP,
686 IB_WR_ATOMIC_FETCH_AND_ADD,
687 IB_WR_LSO,
688 IB_WR_SEND_WITH_INV,
689 IB_WR_RDMA_READ_WITH_INV,
690 IB_WR_LOCAL_INV,
691 IB_WR_FAST_REG_MR,
692};
693
694enum ib_send_flags {
695 IB_SEND_FENCE = 1,
696 IB_SEND_SIGNALED = (1<<1),
697 IB_SEND_SOLICITED = (1<<2),
698 IB_SEND_INLINE = (1<<3),
699 IB_SEND_IP_CSUM = (1<<4)
700};
701
702struct ib_sge {
703 u64 addr;
704 u32 length;
705 u32 lkey;
706};
707
708struct ib_fast_reg_page_list {
709 struct ib_device *device;
710 u64 *page_list;
711 unsigned int max_page_list_len;
712};
713
714struct ib_send_wr {
715 struct ib_send_wr *next;
716 u64 wr_id;
717 struct ib_sge *sg_list;
718 int num_sge;
719 enum ib_wr_opcode opcode;
720 int send_flags;
721 union {
722 __be32 imm_data;
723 u32 invalidate_rkey;
724 } ex;
725 union {
726 struct {
727 u64 remote_addr;
728 u32 rkey;
729 } rdma;
730 struct {
731 u64 remote_addr;
732 u64 compare_add;
733 u64 swap;
734 u32 rkey;
735 } atomic;
736 struct {
737 struct ib_ah *ah;
738 void *header;
739 int hlen;
740 int mss;
741 u32 remote_qpn;
742 u32 remote_qkey;
743 u16 pkey_index;
744 u8 port_num;
745 } ud;
746 struct {
747 u64 iova_start;
748 struct ib_fast_reg_page_list *page_list;
749 unsigned int page_shift;
750 unsigned int page_list_len;
751 u32 length;
752 int access_flags;
753 u32 rkey;
754 } fast_reg;
755 } wr;
756};
757
758struct ib_recv_wr {
759 struct ib_recv_wr *next;
760 u64 wr_id;
761 struct ib_sge *sg_list;
762 int num_sge;
763};
764
765enum ib_access_flags {
766 IB_ACCESS_LOCAL_WRITE = 1,
767 IB_ACCESS_REMOTE_WRITE = (1<<1),
768 IB_ACCESS_REMOTE_READ = (1<<2),
769 IB_ACCESS_REMOTE_ATOMIC = (1<<3),
770 IB_ACCESS_MW_BIND = (1<<4)
771};
772
773struct ib_phys_buf {
774 u64 addr;
775 u64 size;
776};
777
778struct ib_mr_attr {
779 struct ib_pd *pd;
780 u64 device_virt_addr;
781 u64 size;
782 int mr_access_flags;
783 u32 lkey;
784 u32 rkey;
785};
786
787enum ib_mr_rereg_flags {
788 IB_MR_REREG_TRANS = 1,
789 IB_MR_REREG_PD = (1<<1),
790 IB_MR_REREG_ACCESS = (1<<2)
791};
792
793struct ib_mw_bind {
794 struct ib_mr *mr;
795 u64 wr_id;
796 u64 addr;
797 u32 length;
798 int send_flags;
799 int mw_access_flags;
800};
801
802struct ib_fmr_attr {
803 int max_pages;
804 int max_maps;
805 u8 page_shift;
806};
807
808struct ib_ucontext {
809 struct ib_device *device;
810 struct list_head pd_list;
811 struct list_head mr_list;
812 struct list_head mw_list;
813 struct list_head cq_list;
814 struct list_head qp_list;
815 struct list_head srq_list;
816 struct list_head ah_list;
817 int closing;
818};
819
820struct ib_uobject {
821 u64 user_handle;
822 struct ib_ucontext *context;
823 void *object;
824 struct list_head list;
825 int id;
826 struct kref ref;
827 struct rw_semaphore mutex;
828 int live;
829};
830
831struct ib_udata {
832 void __user *inbuf;
833 void __user *outbuf;
834 size_t inlen;
835 size_t outlen;
836};
837
838struct ib_pd {
839 struct ib_device *device;
840 struct ib_uobject *uobject;
841 atomic_t usecnt;
842};
843
844struct ib_ah {
845 struct ib_device *device;
846 struct ib_pd *pd;
847 struct ib_uobject *uobject;
848};
849
850typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context);
851
852struct ib_cq {
853 struct ib_device *device;
854 struct ib_uobject *uobject;
855 ib_comp_handler comp_handler;
856 void (*event_handler)(struct ib_event *, void *);
857 void *cq_context;
858 int cqe;
859 atomic_t usecnt;
860};
861
862struct ib_srq {
863 struct ib_device *device;
864 struct ib_pd *pd;
865 struct ib_uobject *uobject;
866 void (*event_handler)(struct ib_event *, void *);
867 void *srq_context;
868 atomic_t usecnt;
869};
870
871struct ib_qp {
872 struct ib_device *device;
873 struct ib_pd *pd;
874 struct ib_cq *send_cq;
875 struct ib_cq *recv_cq;
876 struct ib_srq *srq;
877 struct ib_uobject *uobject;
878 void (*event_handler)(struct ib_event *, void *);
879 void *qp_context;
880 u32 qp_num;
881 enum ib_qp_type qp_type;
882};
883
884struct ib_mr {
885 struct ib_device *device;
886 struct ib_pd *pd;
887 struct ib_uobject *uobject;
888 u32 lkey;
889 u32 rkey;
890 atomic_t usecnt;
891};
892
893struct ib_mw {
894 struct ib_device *device;
895 struct ib_pd *pd;
896 struct ib_uobject *uobject;
897 u32 rkey;
898};
899
900struct ib_fmr {
901 struct ib_device *device;
902 struct ib_pd *pd;
903 struct list_head list;
904 u32 lkey;
905 u32 rkey;
906};
907
908struct ib_mad;
909struct ib_grh;
910
911enum ib_process_mad_flags {
912 IB_MAD_IGNORE_MKEY = 1,
913 IB_MAD_IGNORE_BKEY = 2,
914 IB_MAD_IGNORE_ALL = IB_MAD_IGNORE_MKEY | IB_MAD_IGNORE_BKEY
915};
916
917enum ib_mad_result {
918 IB_MAD_RESULT_FAILURE = 0,
919 IB_MAD_RESULT_SUCCESS = 1 << 0,
920 IB_MAD_RESULT_REPLY = 1 << 1,
921 IB_MAD_RESULT_CONSUMED = 1 << 2
922};
923
924#define IB_DEVICE_NAME_MAX 64
925
926struct ib_cache {
927 rwlock_t lock;
928 struct ib_event_handler event_handler;
929 struct ib_pkey_cache **pkey_cache;
930 struct ib_gid_cache **gid_cache;
931 u8 *lmc_cache;
932};
933
934struct ib_dma_mapping_ops {
935 int (*mapping_error)(struct ib_device *dev,
936 u64 dma_addr);
937 u64 (*map_single)(struct ib_device *dev,
938 void *ptr, size_t size,
939 enum dma_data_direction direction);
940 void (*unmap_single)(struct ib_device *dev,
941 u64 addr, size_t size,
942 enum dma_data_direction direction);
943 u64 (*map_page)(struct ib_device *dev,
944 struct page *page, unsigned long offset,
945 size_t size,
946 enum dma_data_direction direction);
947 void (*unmap_page)(struct ib_device *dev,
948 u64 addr, size_t size,
949 enum dma_data_direction direction);
950 int (*map_sg)(struct ib_device *dev,
951 struct scatterlist *sg, int nents,
952 enum dma_data_direction direction);
953 void (*unmap_sg)(struct ib_device *dev,
954 struct scatterlist *sg, int nents,
955 enum dma_data_direction direction);
956 u64 (*dma_address)(struct ib_device *dev,
957 struct scatterlist *sg);
958 unsigned int (*dma_len)(struct ib_device *dev,
959 struct scatterlist *sg);
960 void (*sync_single_for_cpu)(struct ib_device *dev,
961 u64 dma_handle,
962 size_t size,
963 enum dma_data_direction dir);
964 void (*sync_single_for_device)(struct ib_device *dev,
965 u64 dma_handle,
966 size_t size,
967 enum dma_data_direction dir);
968 void *(*alloc_coherent)(struct ib_device *dev,
969 size_t size,
970 u64 *dma_handle,
971 gfp_t flag);
972 void (*free_coherent)(struct ib_device *dev,
973 size_t size, void *cpu_addr,
974 u64 dma_handle);
975};
976
977struct iw_cm_verbs;
978
979struct ib_device {
980 struct device *dma_device;
981
982 char name[IB_DEVICE_NAME_MAX];
983
984 struct list_head event_handler_list;
985 spinlock_t event_handler_lock;
986
987 struct list_head core_list;
988 struct list_head client_data_list;
989 spinlock_t client_data_lock;
990
991 struct ib_cache cache;
992 int *pkey_tbl_len;
993 int *gid_tbl_len;
994
995 int num_comp_vectors;
996
997 struct iw_cm_verbs *iwcm;
998
999 int (*get_protocol_stats)(struct ib_device *device,
1000 union rdma_protocol_stats *stats);
1001 int (*query_device)(struct ib_device *device,
1002 struct ib_device_attr *device_attr);
1003 int (*query_port)(struct ib_device *device,
1004 u8 port_num,
1005 struct ib_port_attr *port_attr);
1006 int (*query_gid)(struct ib_device *device,
1007 u8 port_num, int index,
1008 union ib_gid *gid);
1009 int (*query_pkey)(struct ib_device *device,
1010 u8 port_num, u16 index, u16 *pkey);
1011 int (*modify_device)(struct ib_device *device,
1012 int device_modify_mask,
1013 struct ib_device_modify *device_modify);
1014 int (*modify_port)(struct ib_device *device,
1015 u8 port_num, int port_modify_mask,
1016 struct ib_port_modify *port_modify);
1017 struct ib_ucontext * (*alloc_ucontext)(struct ib_device *device,
1018 struct ib_udata *udata);
1019 int (*dealloc_ucontext)(struct ib_ucontext *context);
1020 int (*mmap)(struct ib_ucontext *context,
1021 struct vm_area_struct *vma);
1022 struct ib_pd * (*alloc_pd)(struct ib_device *device,
1023 struct ib_ucontext *context,
1024 struct ib_udata *udata);
1025 int (*dealloc_pd)(struct ib_pd *pd);
1026 struct ib_ah * (*create_ah)(struct ib_pd *pd,
1027 struct ib_ah_attr *ah_attr);
1028 int (*modify_ah)(struct ib_ah *ah,
1029 struct ib_ah_attr *ah_attr);
1030 int (*query_ah)(struct ib_ah *ah,
1031 struct ib_ah_attr *ah_attr);
1032 int (*destroy_ah)(struct ib_ah *ah);
1033 struct ib_srq * (*create_srq)(struct ib_pd *pd,
1034 struct ib_srq_init_attr *srq_init_attr,
1035 struct ib_udata *udata);
1036 int (*modify_srq)(struct ib_srq *srq,
1037 struct ib_srq_attr *srq_attr,
1038 enum ib_srq_attr_mask srq_attr_mask,
1039 struct ib_udata *udata);
1040 int (*query_srq)(struct ib_srq *srq,
1041 struct ib_srq_attr *srq_attr);
1042 int (*destroy_srq)(struct ib_srq *srq);
1043 int (*post_srq_recv)(struct ib_srq *srq,
1044 struct ib_recv_wr *recv_wr,
1045 struct ib_recv_wr **bad_recv_wr);
1046 struct ib_qp * (*create_qp)(struct ib_pd *pd,
1047 struct ib_qp_init_attr *qp_init_attr,
1048 struct ib_udata *udata);
1049 int (*modify_qp)(struct ib_qp *qp,
1050 struct ib_qp_attr *qp_attr,
1051 int qp_attr_mask,
1052 struct ib_udata *udata);
1053 int (*query_qp)(struct ib_qp *qp,
1054 struct ib_qp_attr *qp_attr,
1055 int qp_attr_mask,
1056 struct ib_qp_init_attr *qp_init_attr);
1057 int (*destroy_qp)(struct ib_qp *qp);
1058 int (*post_send)(struct ib_qp *qp,
1059 struct ib_send_wr *send_wr,
1060 struct ib_send_wr **bad_send_wr);
1061 int (*post_recv)(struct ib_qp *qp,
1062 struct ib_recv_wr *recv_wr,
1063 struct ib_recv_wr **bad_recv_wr);
1064 struct ib_cq * (*create_cq)(struct ib_device *device, int cqe,
1065 int comp_vector,
1066 struct ib_ucontext *context,
1067 struct ib_udata *udata);
1068 int (*modify_cq)(struct ib_cq *cq, u16 cq_count,
1069 u16 cq_period);
1070 int (*destroy_cq)(struct ib_cq *cq);
1071 int (*resize_cq)(struct ib_cq *cq, int cqe,
1072 struct ib_udata *udata);
1073 int (*poll_cq)(struct ib_cq *cq, int num_entries,
1074 struct ib_wc *wc);
1075 int (*peek_cq)(struct ib_cq *cq, int wc_cnt);
1076 int (*req_notify_cq)(struct ib_cq *cq,
1077 enum ib_cq_notify_flags flags);
1078 int (*req_ncomp_notif)(struct ib_cq *cq,
1079 int wc_cnt);
1080 struct ib_mr * (*get_dma_mr)(struct ib_pd *pd,
1081 int mr_access_flags);
1082 struct ib_mr * (*reg_phys_mr)(struct ib_pd *pd,
1083 struct ib_phys_buf *phys_buf_array,
1084 int num_phys_buf,
1085 int mr_access_flags,
1086 u64 *iova_start);
1087 struct ib_mr * (*reg_user_mr)(struct ib_pd *pd,
1088 u64 start, u64 length,
1089 u64 virt_addr,
1090 int mr_access_flags,
1091 struct ib_udata *udata);
1092 int (*query_mr)(struct ib_mr *mr,
1093 struct ib_mr_attr *mr_attr);
1094 int (*dereg_mr)(struct ib_mr *mr);
1095 struct ib_mr * (*alloc_fast_reg_mr)(struct ib_pd *pd,
1096 int max_page_list_len);
1097 struct ib_fast_reg_page_list * (*alloc_fast_reg_page_list)(struct ib_device *device,
1098 int page_list_len);
1099 void (*free_fast_reg_page_list)(struct ib_fast_reg_page_list *page_list);
1100 int (*rereg_phys_mr)(struct ib_mr *mr,
1101 int mr_rereg_mask,
1102 struct ib_pd *pd,
1103 struct ib_phys_buf *phys_buf_array,
1104 int num_phys_buf,
1105 int mr_access_flags,
1106 u64 *iova_start);
1107 struct ib_mw * (*alloc_mw)(struct ib_pd *pd);
1108 int (*bind_mw)(struct ib_qp *qp,
1109 struct ib_mw *mw,
1110 struct ib_mw_bind *mw_bind);
1111 int (*dealloc_mw)(struct ib_mw *mw);
1112 struct ib_fmr * (*alloc_fmr)(struct ib_pd *pd,
1113 int mr_access_flags,
1114 struct ib_fmr_attr *fmr_attr);
1115 int (*map_phys_fmr)(struct ib_fmr *fmr,
1116 u64 *page_list, int list_len,
1117 u64 iova);
1118 int (*unmap_fmr)(struct list_head *fmr_list);
1119 int (*dealloc_fmr)(struct ib_fmr *fmr);
1120 int (*attach_mcast)(struct ib_qp *qp,
1121 union ib_gid *gid,
1122 u16 lid);
1123 int (*detach_mcast)(struct ib_qp *qp,
1124 union ib_gid *gid,
1125 u16 lid);
1126 int (*process_mad)(struct ib_device *device,
1127 int process_mad_flags,
1128 u8 port_num,
1129 struct ib_wc *in_wc,
1130 struct ib_grh *in_grh,
1131 struct ib_mad *in_mad,
1132 struct ib_mad *out_mad);
1133
1134 struct ib_dma_mapping_ops *dma_ops;
1135
1136 struct module *owner;
1137 struct device dev;
1138 struct kobject *ports_parent;
1139 struct list_head port_list;
1140
1141 enum {
1142 IB_DEV_UNINITIALIZED,
1143 IB_DEV_REGISTERED,
1144 IB_DEV_UNREGISTERED
1145 } reg_state;
1146
1147 u64 uverbs_cmd_mask;
1148 int uverbs_abi_ver;
1149
1150 char node_desc[64];
1151 __be64 node_guid;
1152 u32 local_dma_lkey;
1153 u8 node_type;
1154 u8 phys_port_cnt;
1155};
1156
1157struct ib_client {
1158 char *name;
1159 void (*add) (struct ib_device *);
1160 void (*remove)(struct ib_device *);
1161
1162 struct list_head list;
1163};
1164
1165struct ib_device *ib_alloc_device(size_t size);
1166void ib_dealloc_device(struct ib_device *device);
1167
1168int ib_register_device (struct ib_device *device);
1169void ib_unregister_device(struct ib_device *device);
1170
1171int ib_register_client (struct ib_client *client);
1172void ib_unregister_client(struct ib_client *client);
1173
1174void *ib_get_client_data(struct ib_device *device, struct ib_client *client);
1175void ib_set_client_data(struct ib_device *device, struct ib_client *client,
1176 void *data);
1177
1178static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len)
1179{
1180 return copy_from_user(dest, udata->inbuf, len) ? -EFAULT : 0;
1181}
1182
1183static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len)
1184{
1185 return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0;
1186}
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
1204 enum ib_qp_type type, enum ib_qp_attr_mask mask);
1205
1206int ib_register_event_handler (struct ib_event_handler *event_handler);
1207int ib_unregister_event_handler(struct ib_event_handler *event_handler);
1208void ib_dispatch_event(struct ib_event *event);
1209
1210int ib_query_device(struct ib_device *device,
1211 struct ib_device_attr *device_attr);
1212
1213int ib_query_port(struct ib_device *device,
1214 u8 port_num, struct ib_port_attr *port_attr);
1215
1216int ib_query_gid(struct ib_device *device,
1217 u8 port_num, int index, union ib_gid *gid);
1218
1219int ib_query_pkey(struct ib_device *device,
1220 u8 port_num, u16 index, u16 *pkey);
1221
1222int ib_modify_device(struct ib_device *device,
1223 int device_modify_mask,
1224 struct ib_device_modify *device_modify);
1225
1226int ib_modify_port(struct ib_device *device,
1227 u8 port_num, int port_modify_mask,
1228 struct ib_port_modify *port_modify);
1229
1230int ib_find_gid(struct ib_device *device, union ib_gid *gid,
1231 u8 *port_num, u16 *index);
1232
1233int ib_find_pkey(struct ib_device *device,
1234 u8 port_num, u16 pkey, u16 *index);
1235
1236
1237
1238
1239
1240
1241
1242
1243struct ib_pd *ib_alloc_pd(struct ib_device *device);
1244
1245
1246
1247
1248
1249int ib_dealloc_pd(struct ib_pd *pd);
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr);
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, struct ib_wc *wc,
1273 struct ib_grh *grh, struct ib_ah_attr *ah_attr);
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, struct ib_wc *wc,
1288 struct ib_grh *grh, u8 port_num);
1289
1290
1291
1292
1293
1294
1295
1296
1297int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
1298
1299
1300
1301
1302
1303
1304
1305
1306int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
1307
1308
1309
1310
1311
1312int ib_destroy_ah(struct ib_ah *ah);
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327struct ib_srq *ib_create_srq(struct ib_pd *pd,
1328 struct ib_srq_init_attr *srq_init_attr);
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342int ib_modify_srq(struct ib_srq *srq,
1343 struct ib_srq_attr *srq_attr,
1344 enum ib_srq_attr_mask srq_attr_mask);
1345
1346
1347
1348
1349
1350
1351
1352int ib_query_srq(struct ib_srq *srq,
1353 struct ib_srq_attr *srq_attr);
1354
1355
1356
1357
1358
1359int ib_destroy_srq(struct ib_srq *srq);
1360
1361
1362
1363
1364
1365
1366
1367
1368static inline int ib_post_srq_recv(struct ib_srq *srq,
1369 struct ib_recv_wr *recv_wr,
1370 struct ib_recv_wr **bad_recv_wr)
1371{
1372 return srq->device->post_srq_recv(srq, recv_wr, bad_recv_wr);
1373}
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383struct ib_qp *ib_create_qp(struct ib_pd *pd,
1384 struct ib_qp_init_attr *qp_init_attr);
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395int ib_modify_qp(struct ib_qp *qp,
1396 struct ib_qp_attr *qp_attr,
1397 int qp_attr_mask);
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410int ib_query_qp(struct ib_qp *qp,
1411 struct ib_qp_attr *qp_attr,
1412 int qp_attr_mask,
1413 struct ib_qp_init_attr *qp_init_attr);
1414
1415
1416
1417
1418
1419int ib_destroy_qp(struct ib_qp *qp);
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429static inline int ib_post_send(struct ib_qp *qp,
1430 struct ib_send_wr *send_wr,
1431 struct ib_send_wr **bad_send_wr)
1432{
1433 return qp->device->post_send(qp, send_wr, bad_send_wr);
1434}
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444static inline int ib_post_recv(struct ib_qp *qp,
1445 struct ib_recv_wr *recv_wr,
1446 struct ib_recv_wr **bad_recv_wr)
1447{
1448 return qp->device->post_recv(qp, recv_wr, bad_recv_wr);
1449}
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466struct ib_cq *ib_create_cq(struct ib_device *device,
1467 ib_comp_handler comp_handler,
1468 void (*event_handler)(struct ib_event *, void *),
1469 void *cq_context, int cqe, int comp_vector);
1470
1471
1472
1473
1474
1475
1476
1477
1478int ib_resize_cq(struct ib_cq *cq, int cqe);
1479
1480
1481
1482
1483
1484
1485
1486
1487int ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
1488
1489
1490
1491
1492
1493int ib_destroy_cq(struct ib_cq *cq);
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507static inline int ib_poll_cq(struct ib_cq *cq, int num_entries,
1508 struct ib_wc *wc)
1509{
1510 return cq->device->poll_cq(cq, num_entries, wc);
1511}
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523int ib_peek_cq(struct ib_cq *cq, int wc_cnt);
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552static inline int ib_req_notify_cq(struct ib_cq *cq,
1553 enum ib_cq_notify_flags flags)
1554{
1555 return cq->device->req_notify_cq(cq, flags);
1556}
1557
1558
1559
1560
1561
1562
1563
1564
1565static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt)
1566{
1567 return cq->device->req_ncomp_notif ?
1568 cq->device->req_ncomp_notif(cq, wc_cnt) :
1569 -ENOSYS;
1570}
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags);
1583
1584
1585
1586
1587
1588
1589static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
1590{
1591 if (dev->dma_ops)
1592 return dev->dma_ops->mapping_error(dev, dma_addr);
1593 return dma_mapping_error(dev->dma_device, dma_addr);
1594}
1595
1596
1597
1598
1599
1600
1601
1602
1603static inline u64 ib_dma_map_single(struct ib_device *dev,
1604 void *cpu_addr, size_t size,
1605 enum dma_data_direction direction)
1606{
1607 if (dev->dma_ops)
1608 return dev->dma_ops->map_single(dev, cpu_addr, size, direction);
1609 return dma_map_single(dev->dma_device, cpu_addr, size, direction);
1610}
1611
1612
1613
1614
1615
1616
1617
1618
1619static inline void ib_dma_unmap_single(struct ib_device *dev,
1620 u64 addr, size_t size,
1621 enum dma_data_direction direction)
1622{
1623 if (dev->dma_ops)
1624 dev->dma_ops->unmap_single(dev, addr, size, direction);
1625 else
1626 dma_unmap_single(dev->dma_device, addr, size, direction);
1627}
1628
1629static inline u64 ib_dma_map_single_attrs(struct ib_device *dev,
1630 void *cpu_addr, size_t size,
1631 enum dma_data_direction direction,
1632 struct dma_attrs *attrs)
1633{
1634 return dma_map_single_attrs(dev->dma_device, cpu_addr, size,
1635 direction, attrs);
1636}
1637
1638static inline void ib_dma_unmap_single_attrs(struct ib_device *dev,
1639 u64 addr, size_t size,
1640 enum dma_data_direction direction,
1641 struct dma_attrs *attrs)
1642{
1643 return dma_unmap_single_attrs(dev->dma_device, addr, size,
1644 direction, attrs);
1645}
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655static inline u64 ib_dma_map_page(struct ib_device *dev,
1656 struct page *page,
1657 unsigned long offset,
1658 size_t size,
1659 enum dma_data_direction direction)
1660{
1661 if (dev->dma_ops)
1662 return dev->dma_ops->map_page(dev, page, offset, size, direction);
1663 return dma_map_page(dev->dma_device, page, offset, size, direction);
1664}
1665
1666
1667
1668
1669
1670
1671
1672
1673static inline void ib_dma_unmap_page(struct ib_device *dev,
1674 u64 addr, size_t size,
1675 enum dma_data_direction direction)
1676{
1677 if (dev->dma_ops)
1678 dev->dma_ops->unmap_page(dev, addr, size, direction);
1679 else
1680 dma_unmap_page(dev->dma_device, addr, size, direction);
1681}
1682
1683
1684
1685
1686
1687
1688
1689
1690static inline int ib_dma_map_sg(struct ib_device *dev,
1691 struct scatterlist *sg, int nents,
1692 enum dma_data_direction direction)
1693{
1694 if (dev->dma_ops)
1695 return dev->dma_ops->map_sg(dev, sg, nents, direction);
1696 return dma_map_sg(dev->dma_device, sg, nents, direction);
1697}
1698
1699
1700
1701
1702
1703
1704
1705
1706static inline void ib_dma_unmap_sg(struct ib_device *dev,
1707 struct scatterlist *sg, int nents,
1708 enum dma_data_direction direction)
1709{
1710 if (dev->dma_ops)
1711 dev->dma_ops->unmap_sg(dev, sg, nents, direction);
1712 else
1713 dma_unmap_sg(dev->dma_device, sg, nents, direction);
1714}
1715
1716static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
1717 struct scatterlist *sg, int nents,
1718 enum dma_data_direction direction,
1719 struct dma_attrs *attrs)
1720{
1721 return dma_map_sg_attrs(dev->dma_device, sg, nents, direction, attrs);
1722}
1723
1724static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
1725 struct scatterlist *sg, int nents,
1726 enum dma_data_direction direction,
1727 struct dma_attrs *attrs)
1728{
1729 dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, attrs);
1730}
1731
1732
1733
1734
1735
1736static inline u64 ib_sg_dma_address(struct ib_device *dev,
1737 struct scatterlist *sg)
1738{
1739 if (dev->dma_ops)
1740 return dev->dma_ops->dma_address(dev, sg);
1741 return sg_dma_address(sg);
1742}
1743
1744
1745
1746
1747
1748
1749static inline unsigned int ib_sg_dma_len(struct ib_device *dev,
1750 struct scatterlist *sg)
1751{
1752 if (dev->dma_ops)
1753 return dev->dma_ops->dma_len(dev, sg);
1754 return sg_dma_len(sg);
1755}
1756
1757
1758
1759
1760
1761
1762
1763
1764static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev,
1765 u64 addr,
1766 size_t size,
1767 enum dma_data_direction dir)
1768{
1769 if (dev->dma_ops)
1770 dev->dma_ops->sync_single_for_cpu(dev, addr, size, dir);
1771 else
1772 dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
1773}
1774
1775
1776
1777
1778
1779
1780
1781
1782static inline void ib_dma_sync_single_for_device(struct ib_device *dev,
1783 u64 addr,
1784 size_t size,
1785 enum dma_data_direction dir)
1786{
1787 if (dev->dma_ops)
1788 dev->dma_ops->sync_single_for_device(dev, addr, size, dir);
1789 else
1790 dma_sync_single_for_device(dev->dma_device, addr, size, dir);
1791}
1792
1793
1794
1795
1796
1797
1798
1799
1800static inline void *ib_dma_alloc_coherent(struct ib_device *dev,
1801 size_t size,
1802 u64 *dma_handle,
1803 gfp_t flag)
1804{
1805 if (dev->dma_ops)
1806 return dev->dma_ops->alloc_coherent(dev, size, dma_handle, flag);
1807 else {
1808 dma_addr_t handle;
1809 void *ret;
1810
1811 ret = dma_alloc_coherent(dev->dma_device, size, &handle, flag);
1812 *dma_handle = handle;
1813 return ret;
1814 }
1815}
1816
1817
1818
1819
1820
1821
1822
1823
1824static inline void ib_dma_free_coherent(struct ib_device *dev,
1825 size_t size, void *cpu_addr,
1826 u64 dma_handle)
1827{
1828 if (dev->dma_ops)
1829 dev->dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
1830 else
1831 dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle);
1832}
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844struct ib_mr *ib_reg_phys_mr(struct ib_pd *pd,
1845 struct ib_phys_buf *phys_buf_array,
1846 int num_phys_buf,
1847 int mr_access_flags,
1848 u64 *iova_start);
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872int ib_rereg_phys_mr(struct ib_mr *mr,
1873 int mr_rereg_mask,
1874 struct ib_pd *pd,
1875 struct ib_phys_buf *phys_buf_array,
1876 int num_phys_buf,
1877 int mr_access_flags,
1878 u64 *iova_start);
1879
1880
1881
1882
1883
1884
1885int ib_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr);
1886
1887
1888
1889
1890
1891
1892int ib_dereg_mr(struct ib_mr *mr);
1893
1894
1895
1896
1897
1898
1899
1900
1901struct ib_mr *ib_alloc_fast_reg_mr(struct ib_pd *pd, int max_page_list_len);
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920struct ib_fast_reg_page_list *ib_alloc_fast_reg_page_list(
1921 struct ib_device *device, int page_list_len);
1922
1923
1924
1925
1926
1927
1928void ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list);
1929
1930
1931
1932
1933
1934
1935
1936static inline void ib_update_fast_reg_key(struct ib_mr *mr, u8 newkey)
1937{
1938 mr->lkey = (mr->lkey & 0xffffff00) | newkey;
1939 mr->rkey = (mr->rkey & 0xffffff00) | newkey;
1940}
1941
1942
1943
1944
1945
1946struct ib_mw *ib_alloc_mw(struct ib_pd *pd);
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957static inline int ib_bind_mw(struct ib_qp *qp,
1958 struct ib_mw *mw,
1959 struct ib_mw_bind *mw_bind)
1960{
1961
1962 return mw->device->bind_mw ?
1963 mw->device->bind_mw(qp, mw, mw_bind) :
1964 -ENOSYS;
1965}
1966
1967
1968
1969
1970
1971int ib_dealloc_mw(struct ib_mw *mw);
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
1983 int mr_access_flags,
1984 struct ib_fmr_attr *fmr_attr);
1985
1986
1987
1988
1989
1990
1991
1992
1993static inline int ib_map_phys_fmr(struct ib_fmr *fmr,
1994 u64 *page_list, int list_len,
1995 u64 iova)
1996{
1997 return fmr->device->map_phys_fmr(fmr, page_list, list_len, iova);
1998}
1999
2000
2001
2002
2003
2004int ib_unmap_fmr(struct list_head *fmr_list);
2005
2006
2007
2008
2009
2010int ib_dealloc_fmr(struct ib_fmr *fmr);
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
2025
2026
2027
2028
2029
2030
2031
2032int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
2033
2034#endif
2035