1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41#if !defined(IB_VERBS_H)
42#define IB_VERBS_H
43
44#include <linux/types.h>
45#include <linux/device.h>
46#include <linux/mm.h>
47#include <linux/dma-mapping.h>
48#include <linux/kref.h>
49#include <linux/list.h>
50#include <linux/rwsem.h>
51#include <linux/scatterlist.h>
52
53#include <asm/atomic.h>
54#include <asm/uaccess.h>
55
56union ib_gid {
57 u8 raw[16];
58 struct {
59 __be64 subnet_prefix;
60 __be64 interface_id;
61 } global;
62};
63
64enum rdma_node_type {
65
66 RDMA_NODE_IB_CA = 1,
67 RDMA_NODE_IB_SWITCH,
68 RDMA_NODE_IB_ROUTER,
69 RDMA_NODE_RNIC
70};
71
72enum rdma_transport_type {
73 RDMA_TRANSPORT_IB,
74 RDMA_TRANSPORT_IWARP
75};
76
77enum rdma_transport_type
78rdma_node_get_transport(enum rdma_node_type node_type) __attribute_const__;
79
80enum ib_device_cap_flags {
81 IB_DEVICE_RESIZE_MAX_WR = 1,
82 IB_DEVICE_BAD_PKEY_CNTR = (1<<1),
83 IB_DEVICE_BAD_QKEY_CNTR = (1<<2),
84 IB_DEVICE_RAW_MULTI = (1<<3),
85 IB_DEVICE_AUTO_PATH_MIG = (1<<4),
86 IB_DEVICE_CHANGE_PHY_PORT = (1<<5),
87 IB_DEVICE_UD_AV_PORT_ENFORCE = (1<<6),
88 IB_DEVICE_CURR_QP_STATE_MOD = (1<<7),
89 IB_DEVICE_SHUTDOWN_PORT = (1<<8),
90 IB_DEVICE_INIT_TYPE = (1<<9),
91 IB_DEVICE_PORT_ACTIVE_EVENT = (1<<10),
92 IB_DEVICE_SYS_IMAGE_GUID = (1<<11),
93 IB_DEVICE_RC_RNR_NAK_GEN = (1<<12),
94 IB_DEVICE_SRQ_RESIZE = (1<<13),
95 IB_DEVICE_N_NOTIFY_CQ = (1<<14),
96 IB_DEVICE_ZERO_STAG = (1<<15),
97 IB_DEVICE_SEND_W_INV = (1<<16),
98 IB_DEVICE_MEM_WINDOW = (1<<17)
99};
100
101enum ib_atomic_cap {
102 IB_ATOMIC_NONE,
103 IB_ATOMIC_HCA,
104 IB_ATOMIC_GLOB
105};
106
107struct ib_device_attr {
108 u64 fw_ver;
109 __be64 sys_image_guid;
110 u64 max_mr_size;
111 u64 page_size_cap;
112 u32 vendor_id;
113 u32 vendor_part_id;
114 u32 hw_ver;
115 int max_qp;
116 int max_qp_wr;
117 int device_cap_flags;
118 int max_sge;
119 int max_sge_rd;
120 int max_cq;
121 int max_cqe;
122 int max_mr;
123 int max_pd;
124 int max_qp_rd_atom;
125 int max_ee_rd_atom;
126 int max_res_rd_atom;
127 int max_qp_init_rd_atom;
128 int max_ee_init_rd_atom;
129 enum ib_atomic_cap atomic_cap;
130 int max_ee;
131 int max_rdd;
132 int max_mw;
133 int max_raw_ipv6_qp;
134 int max_raw_ethy_qp;
135 int max_mcast_grp;
136 int max_mcast_qp_attach;
137 int max_total_mcast_qp_attach;
138 int max_ah;
139 int max_fmr;
140 int max_map_per_fmr;
141 int max_srq;
142 int max_srq_wr;
143 int max_srq_sge;
144 u16 max_pkeys;
145 u8 local_ca_ack_delay;
146};
147
148enum ib_mtu {
149 IB_MTU_256 = 1,
150 IB_MTU_512 = 2,
151 IB_MTU_1024 = 3,
152 IB_MTU_2048 = 4,
153 IB_MTU_4096 = 5
154};
155
156static inline int ib_mtu_enum_to_int(enum ib_mtu mtu)
157{
158 switch (mtu) {
159 case IB_MTU_256: return 256;
160 case IB_MTU_512: return 512;
161 case IB_MTU_1024: return 1024;
162 case IB_MTU_2048: return 2048;
163 case IB_MTU_4096: return 4096;
164 default: return -1;
165 }
166}
167
168enum ib_port_state {
169 IB_PORT_NOP = 0,
170 IB_PORT_DOWN = 1,
171 IB_PORT_INIT = 2,
172 IB_PORT_ARMED = 3,
173 IB_PORT_ACTIVE = 4,
174 IB_PORT_ACTIVE_DEFER = 5
175};
176
177enum ib_port_cap_flags {
178 IB_PORT_SM = 1 << 1,
179 IB_PORT_NOTICE_SUP = 1 << 2,
180 IB_PORT_TRAP_SUP = 1 << 3,
181 IB_PORT_OPT_IPD_SUP = 1 << 4,
182 IB_PORT_AUTO_MIGR_SUP = 1 << 5,
183 IB_PORT_SL_MAP_SUP = 1 << 6,
184 IB_PORT_MKEY_NVRAM = 1 << 7,
185 IB_PORT_PKEY_NVRAM = 1 << 8,
186 IB_PORT_LED_INFO_SUP = 1 << 9,
187 IB_PORT_SM_DISABLED = 1 << 10,
188 IB_PORT_SYS_IMAGE_GUID_SUP = 1 << 11,
189 IB_PORT_PKEY_SW_EXT_PORT_TRAP_SUP = 1 << 12,
190 IB_PORT_CM_SUP = 1 << 16,
191 IB_PORT_SNMP_TUNNEL_SUP = 1 << 17,
192 IB_PORT_REINIT_SUP = 1 << 18,
193 IB_PORT_DEVICE_MGMT_SUP = 1 << 19,
194 IB_PORT_VENDOR_CLASS_SUP = 1 << 20,
195 IB_PORT_DR_NOTICE_SUP = 1 << 21,
196 IB_PORT_CAP_MASK_NOTICE_SUP = 1 << 22,
197 IB_PORT_BOOT_MGMT_SUP = 1 << 23,
198 IB_PORT_LINK_LATENCY_SUP = 1 << 24,
199 IB_PORT_CLIENT_REG_SUP = 1 << 25
200};
201
202enum ib_port_width {
203 IB_WIDTH_1X = 1,
204 IB_WIDTH_4X = 2,
205 IB_WIDTH_8X = 4,
206 IB_WIDTH_12X = 8
207};
208
209static inline int ib_width_enum_to_int(enum ib_port_width width)
210{
211 switch (width) {
212 case IB_WIDTH_1X: return 1;
213 case IB_WIDTH_4X: return 4;
214 case IB_WIDTH_8X: return 8;
215 case IB_WIDTH_12X: return 12;
216 default: return -1;
217 }
218}
219
220struct ib_port_attr {
221 enum ib_port_state state;
222 enum ib_mtu max_mtu;
223 enum ib_mtu active_mtu;
224 int gid_tbl_len;
225 u32 port_cap_flags;
226 u32 max_msg_sz;
227 u32 bad_pkey_cntr;
228 u32 qkey_viol_cntr;
229 u16 pkey_tbl_len;
230 u16 lid;
231 u16 sm_lid;
232 u8 lmc;
233 u8 max_vl_num;
234 u8 sm_sl;
235 u8 subnet_timeout;
236 u8 init_type_reply;
237 u8 active_width;
238 u8 active_speed;
239 u8 phys_state;
240};
241
242enum ib_device_modify_flags {
243 IB_DEVICE_MODIFY_SYS_IMAGE_GUID = 1 << 0,
244 IB_DEVICE_MODIFY_NODE_DESC = 1 << 1
245};
246
247struct ib_device_modify {
248 u64 sys_image_guid;
249 char node_desc[64];
250};
251
252enum ib_port_modify_flags {
253 IB_PORT_SHUTDOWN = 1,
254 IB_PORT_INIT_TYPE = (1<<2),
255 IB_PORT_RESET_QKEY_CNTR = (1<<3)
256};
257
258struct ib_port_modify {
259 u32 set_port_cap_mask;
260 u32 clr_port_cap_mask;
261 u8 init_type;
262};
263
264enum ib_event_type {
265 IB_EVENT_CQ_ERR,
266 IB_EVENT_QP_FATAL,
267 IB_EVENT_QP_REQ_ERR,
268 IB_EVENT_QP_ACCESS_ERR,
269 IB_EVENT_COMM_EST,
270 IB_EVENT_SQ_DRAINED,
271 IB_EVENT_PATH_MIG,
272 IB_EVENT_PATH_MIG_ERR,
273 IB_EVENT_DEVICE_FATAL,
274 IB_EVENT_PORT_ACTIVE,
275 IB_EVENT_PORT_ERR,
276 IB_EVENT_LID_CHANGE,
277 IB_EVENT_PKEY_CHANGE,
278 IB_EVENT_SM_CHANGE,
279 IB_EVENT_SRQ_ERR,
280 IB_EVENT_SRQ_LIMIT_REACHED,
281 IB_EVENT_QP_LAST_WQE_REACHED,
282 IB_EVENT_CLIENT_REREGISTER
283};
284
285struct ib_event {
286 struct ib_device *device;
287 union {
288 struct ib_cq *cq;
289 struct ib_qp *qp;
290 struct ib_srq *srq;
291 u8 port_num;
292 } element;
293 enum ib_event_type event;
294};
295
296struct ib_event_handler {
297 struct ib_device *device;
298 void (*handler)(struct ib_event_handler *, struct ib_event *);
299 struct list_head list;
300};
301
302#define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler) \
303 do { \
304 (_ptr)->device = _device; \
305 (_ptr)->handler = _handler; \
306 INIT_LIST_HEAD(&(_ptr)->list); \
307 } while (0)
308
309struct ib_global_route {
310 union ib_gid dgid;
311 u32 flow_label;
312 u8 sgid_index;
313 u8 hop_limit;
314 u8 traffic_class;
315};
316
317struct ib_grh {
318 __be32 version_tclass_flow;
319 __be16 paylen;
320 u8 next_hdr;
321 u8 hop_limit;
322 union ib_gid sgid;
323 union ib_gid dgid;
324};
325
326enum {
327 IB_MULTICAST_QPN = 0xffffff
328};
329
330#define IB_LID_PERMISSIVE __constant_htons(0xFFFF)
331
332enum ib_ah_flags {
333 IB_AH_GRH = 1
334};
335
336enum ib_rate {
337 IB_RATE_PORT_CURRENT = 0,
338 IB_RATE_2_5_GBPS = 2,
339 IB_RATE_5_GBPS = 5,
340 IB_RATE_10_GBPS = 3,
341 IB_RATE_20_GBPS = 6,
342 IB_RATE_30_GBPS = 4,
343 IB_RATE_40_GBPS = 7,
344 IB_RATE_60_GBPS = 8,
345 IB_RATE_80_GBPS = 9,
346 IB_RATE_120_GBPS = 10
347};
348
349
350
351
352
353
354
355int ib_rate_to_mult(enum ib_rate rate) __attribute_const__;
356
357
358
359
360
361
362enum ib_rate mult_to_ib_rate(int mult) __attribute_const__;
363
364struct ib_ah_attr {
365 struct ib_global_route grh;
366 u16 dlid;
367 u8 sl;
368 u8 src_path_bits;
369 u8 static_rate;
370 u8 ah_flags;
371 u8 port_num;
372};
373
374enum ib_wc_status {
375 IB_WC_SUCCESS,
376 IB_WC_LOC_LEN_ERR,
377 IB_WC_LOC_QP_OP_ERR,
378 IB_WC_LOC_EEC_OP_ERR,
379 IB_WC_LOC_PROT_ERR,
380 IB_WC_WR_FLUSH_ERR,
381 IB_WC_MW_BIND_ERR,
382 IB_WC_BAD_RESP_ERR,
383 IB_WC_LOC_ACCESS_ERR,
384 IB_WC_REM_INV_REQ_ERR,
385 IB_WC_REM_ACCESS_ERR,
386 IB_WC_REM_OP_ERR,
387 IB_WC_RETRY_EXC_ERR,
388 IB_WC_RNR_RETRY_EXC_ERR,
389 IB_WC_LOC_RDD_VIOL_ERR,
390 IB_WC_REM_INV_RD_REQ_ERR,
391 IB_WC_REM_ABORT_ERR,
392 IB_WC_INV_EECN_ERR,
393 IB_WC_INV_EEC_STATE_ERR,
394 IB_WC_FATAL_ERR,
395 IB_WC_RESP_TIMEOUT_ERR,
396 IB_WC_GENERAL_ERR
397};
398
399enum ib_wc_opcode {
400 IB_WC_SEND,
401 IB_WC_RDMA_WRITE,
402 IB_WC_RDMA_READ,
403 IB_WC_COMP_SWAP,
404 IB_WC_FETCH_ADD,
405 IB_WC_BIND_MW,
406
407
408
409
410 IB_WC_RECV = 1 << 7,
411 IB_WC_RECV_RDMA_WITH_IMM
412};
413
414enum ib_wc_flags {
415 IB_WC_GRH = 1,
416 IB_WC_WITH_IMM = (1<<1)
417};
418
419struct ib_wc {
420 u64 wr_id;
421 enum ib_wc_status status;
422 enum ib_wc_opcode opcode;
423 u32 vendor_err;
424 u32 byte_len;
425 struct ib_qp *qp;
426 __be32 imm_data;
427 u32 src_qp;
428 int wc_flags;
429 u16 pkey_index;
430 u16 slid;
431 u8 sl;
432 u8 dlid_path_bits;
433 u8 port_num;
434};
435
436enum ib_cq_notify_flags {
437 IB_CQ_SOLICITED = 1 << 0,
438 IB_CQ_NEXT_COMP = 1 << 1,
439 IB_CQ_SOLICITED_MASK = IB_CQ_SOLICITED | IB_CQ_NEXT_COMP,
440 IB_CQ_REPORT_MISSED_EVENTS = 1 << 2,
441};
442
443enum ib_srq_attr_mask {
444 IB_SRQ_MAX_WR = 1 << 0,
445 IB_SRQ_LIMIT = 1 << 1,
446};
447
448struct ib_srq_attr {
449 u32 max_wr;
450 u32 max_sge;
451 u32 srq_limit;
452};
453
454struct ib_srq_init_attr {
455 void (*event_handler)(struct ib_event *, void *);
456 void *srq_context;
457 struct ib_srq_attr attr;
458};
459
460struct ib_qp_cap {
461 u32 max_send_wr;
462 u32 max_recv_wr;
463 u32 max_send_sge;
464 u32 max_recv_sge;
465 u32 max_inline_data;
466};
467
468enum ib_sig_type {
469 IB_SIGNAL_ALL_WR,
470 IB_SIGNAL_REQ_WR
471};
472
473enum ib_qp_type {
474
475
476
477
478
479 IB_QPT_SMI,
480 IB_QPT_GSI,
481
482 IB_QPT_RC,
483 IB_QPT_UC,
484 IB_QPT_UD,
485 IB_QPT_RAW_IPV6,
486 IB_QPT_RAW_ETY
487};
488
489struct ib_qp_init_attr {
490 void (*event_handler)(struct ib_event *, void *);
491 void *qp_context;
492 struct ib_cq *send_cq;
493 struct ib_cq *recv_cq;
494 struct ib_srq *srq;
495 struct ib_qp_cap cap;
496 enum ib_sig_type sq_sig_type;
497 enum ib_qp_type qp_type;
498 u8 port_num;
499};
500
501enum ib_rnr_timeout {
502 IB_RNR_TIMER_655_36 = 0,
503 IB_RNR_TIMER_000_01 = 1,
504 IB_RNR_TIMER_000_02 = 2,
505 IB_RNR_TIMER_000_03 = 3,
506 IB_RNR_TIMER_000_04 = 4,
507 IB_RNR_TIMER_000_06 = 5,
508 IB_RNR_TIMER_000_08 = 6,
509 IB_RNR_TIMER_000_12 = 7,
510 IB_RNR_TIMER_000_16 = 8,
511 IB_RNR_TIMER_000_24 = 9,
512 IB_RNR_TIMER_000_32 = 10,
513 IB_RNR_TIMER_000_48 = 11,
514 IB_RNR_TIMER_000_64 = 12,
515 IB_RNR_TIMER_000_96 = 13,
516 IB_RNR_TIMER_001_28 = 14,
517 IB_RNR_TIMER_001_92 = 15,
518 IB_RNR_TIMER_002_56 = 16,
519 IB_RNR_TIMER_003_84 = 17,
520 IB_RNR_TIMER_005_12 = 18,
521 IB_RNR_TIMER_007_68 = 19,
522 IB_RNR_TIMER_010_24 = 20,
523 IB_RNR_TIMER_015_36 = 21,
524 IB_RNR_TIMER_020_48 = 22,
525 IB_RNR_TIMER_030_72 = 23,
526 IB_RNR_TIMER_040_96 = 24,
527 IB_RNR_TIMER_061_44 = 25,
528 IB_RNR_TIMER_081_92 = 26,
529 IB_RNR_TIMER_122_88 = 27,
530 IB_RNR_TIMER_163_84 = 28,
531 IB_RNR_TIMER_245_76 = 29,
532 IB_RNR_TIMER_327_68 = 30,
533 IB_RNR_TIMER_491_52 = 31
534};
535
536enum ib_qp_attr_mask {
537 IB_QP_STATE = 1,
538 IB_QP_CUR_STATE = (1<<1),
539 IB_QP_EN_SQD_ASYNC_NOTIFY = (1<<2),
540 IB_QP_ACCESS_FLAGS = (1<<3),
541 IB_QP_PKEY_INDEX = (1<<4),
542 IB_QP_PORT = (1<<5),
543 IB_QP_QKEY = (1<<6),
544 IB_QP_AV = (1<<7),
545 IB_QP_PATH_MTU = (1<<8),
546 IB_QP_TIMEOUT = (1<<9),
547 IB_QP_RETRY_CNT = (1<<10),
548 IB_QP_RNR_RETRY = (1<<11),
549 IB_QP_RQ_PSN = (1<<12),
550 IB_QP_MAX_QP_RD_ATOMIC = (1<<13),
551 IB_QP_ALT_PATH = (1<<14),
552 IB_QP_MIN_RNR_TIMER = (1<<15),
553 IB_QP_SQ_PSN = (1<<16),
554 IB_QP_MAX_DEST_RD_ATOMIC = (1<<17),
555 IB_QP_PATH_MIG_STATE = (1<<18),
556 IB_QP_CAP = (1<<19),
557 IB_QP_DEST_QPN = (1<<20)
558};
559
560enum ib_qp_state {
561 IB_QPS_RESET,
562 IB_QPS_INIT,
563 IB_QPS_RTR,
564 IB_QPS_RTS,
565 IB_QPS_SQD,
566 IB_QPS_SQE,
567 IB_QPS_ERR
568};
569
570enum ib_mig_state {
571 IB_MIG_MIGRATED,
572 IB_MIG_REARM,
573 IB_MIG_ARMED
574};
575
576struct ib_qp_attr {
577 enum ib_qp_state qp_state;
578 enum ib_qp_state cur_qp_state;
579 enum ib_mtu path_mtu;
580 enum ib_mig_state path_mig_state;
581 u32 qkey;
582 u32 rq_psn;
583 u32 sq_psn;
584 u32 dest_qp_num;
585 int qp_access_flags;
586 struct ib_qp_cap cap;
587 struct ib_ah_attr ah_attr;
588 struct ib_ah_attr alt_ah_attr;
589 u16 pkey_index;
590 u16 alt_pkey_index;
591 u8 en_sqd_async_notify;
592 u8 sq_draining;
593 u8 max_rd_atomic;
594 u8 max_dest_rd_atomic;
595 u8 min_rnr_timer;
596 u8 port_num;
597 u8 timeout;
598 u8 retry_cnt;
599 u8 rnr_retry;
600 u8 alt_port_num;
601 u8 alt_timeout;
602};
603
604enum ib_wr_opcode {
605 IB_WR_RDMA_WRITE,
606 IB_WR_RDMA_WRITE_WITH_IMM,
607 IB_WR_SEND,
608 IB_WR_SEND_WITH_IMM,
609 IB_WR_RDMA_READ,
610 IB_WR_ATOMIC_CMP_AND_SWP,
611 IB_WR_ATOMIC_FETCH_AND_ADD
612};
613
614enum ib_send_flags {
615 IB_SEND_FENCE = 1,
616 IB_SEND_SIGNALED = (1<<1),
617 IB_SEND_SOLICITED = (1<<2),
618 IB_SEND_INLINE = (1<<3)
619};
620
621struct ib_sge {
622 u64 addr;
623 u32 length;
624 u32 lkey;
625};
626
627struct ib_send_wr {
628 struct ib_send_wr *next;
629 u64 wr_id;
630 struct ib_sge *sg_list;
631 int num_sge;
632 enum ib_wr_opcode opcode;
633 int send_flags;
634 __be32 imm_data;
635 union {
636 struct {
637 u64 remote_addr;
638 u32 rkey;
639 } rdma;
640 struct {
641 u64 remote_addr;
642 u64 compare_add;
643 u64 swap;
644 u32 rkey;
645 } atomic;
646 struct {
647 struct ib_ah *ah;
648 u32 remote_qpn;
649 u32 remote_qkey;
650 u16 pkey_index;
651 u8 port_num;
652 } ud;
653 } wr;
654};
655
656struct ib_recv_wr {
657 struct ib_recv_wr *next;
658 u64 wr_id;
659 struct ib_sge *sg_list;
660 int num_sge;
661};
662
663enum ib_access_flags {
664 IB_ACCESS_LOCAL_WRITE = 1,
665 IB_ACCESS_REMOTE_WRITE = (1<<1),
666 IB_ACCESS_REMOTE_READ = (1<<2),
667 IB_ACCESS_REMOTE_ATOMIC = (1<<3),
668 IB_ACCESS_MW_BIND = (1<<4)
669};
670
671struct ib_phys_buf {
672 u64 addr;
673 u64 size;
674};
675
676struct ib_mr_attr {
677 struct ib_pd *pd;
678 u64 device_virt_addr;
679 u64 size;
680 int mr_access_flags;
681 u32 lkey;
682 u32 rkey;
683};
684
685enum ib_mr_rereg_flags {
686 IB_MR_REREG_TRANS = 1,
687 IB_MR_REREG_PD = (1<<1),
688 IB_MR_REREG_ACCESS = (1<<2)
689};
690
691struct ib_mw_bind {
692 struct ib_mr *mr;
693 u64 wr_id;
694 u64 addr;
695 u32 length;
696 int send_flags;
697 int mw_access_flags;
698};
699
700struct ib_fmr_attr {
701 int max_pages;
702 int max_maps;
703 u8 page_shift;
704};
705
706struct ib_ucontext {
707 struct ib_device *device;
708 struct list_head pd_list;
709 struct list_head mr_list;
710 struct list_head mw_list;
711 struct list_head cq_list;
712 struct list_head qp_list;
713 struct list_head srq_list;
714 struct list_head ah_list;
715 int closing;
716};
717
718struct ib_uobject {
719 u64 user_handle;
720 struct ib_ucontext *context;
721 void *object;
722 struct list_head list;
723 u32 id;
724 struct kref ref;
725 struct rw_semaphore mutex;
726 int live;
727};
728
729struct ib_udata {
730 void __user *inbuf;
731 void __user *outbuf;
732 size_t inlen;
733 size_t outlen;
734};
735
736struct ib_pd {
737 struct ib_device *device;
738 struct ib_uobject *uobject;
739 atomic_t usecnt;
740};
741
742struct ib_ah {
743 struct ib_device *device;
744 struct ib_pd *pd;
745 struct ib_uobject *uobject;
746};
747
748typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context);
749
750struct ib_cq {
751 struct ib_device *device;
752 struct ib_uobject *uobject;
753 ib_comp_handler comp_handler;
754 void (*event_handler)(struct ib_event *, void *);
755 void * cq_context;
756 int cqe;
757 atomic_t usecnt;
758};
759
760struct ib_srq {
761 struct ib_device *device;
762 struct ib_pd *pd;
763 struct ib_uobject *uobject;
764 void (*event_handler)(struct ib_event *, void *);
765 void *srq_context;
766 atomic_t usecnt;
767};
768
769struct ib_qp {
770 struct ib_device *device;
771 struct ib_pd *pd;
772 struct ib_cq *send_cq;
773 struct ib_cq *recv_cq;
774 struct ib_srq *srq;
775 struct ib_uobject *uobject;
776 void (*event_handler)(struct ib_event *, void *);
777 void *qp_context;
778 u32 qp_num;
779 enum ib_qp_type qp_type;
780};
781
782struct ib_mr {
783 struct ib_device *device;
784 struct ib_pd *pd;
785 struct ib_uobject *uobject;
786 u32 lkey;
787 u32 rkey;
788 atomic_t usecnt;
789};
790
791struct ib_mw {
792 struct ib_device *device;
793 struct ib_pd *pd;
794 struct ib_uobject *uobject;
795 u32 rkey;
796};
797
798struct ib_fmr {
799 struct ib_device *device;
800 struct ib_pd *pd;
801 struct list_head list;
802 u32 lkey;
803 u32 rkey;
804};
805
806struct ib_mad;
807struct ib_grh;
808
809enum ib_process_mad_flags {
810 IB_MAD_IGNORE_MKEY = 1,
811 IB_MAD_IGNORE_BKEY = 2,
812 IB_MAD_IGNORE_ALL = IB_MAD_IGNORE_MKEY | IB_MAD_IGNORE_BKEY
813};
814
815enum ib_mad_result {
816 IB_MAD_RESULT_FAILURE = 0,
817 IB_MAD_RESULT_SUCCESS = 1 << 0,
818 IB_MAD_RESULT_REPLY = 1 << 1,
819 IB_MAD_RESULT_CONSUMED = 1 << 2
820};
821
822#define IB_DEVICE_NAME_MAX 64
823
824struct ib_cache {
825 rwlock_t lock;
826 struct ib_event_handler event_handler;
827 struct ib_pkey_cache **pkey_cache;
828 struct ib_gid_cache **gid_cache;
829 u8 *lmc_cache;
830};
831
832struct ib_dma_mapping_ops {
833 int (*mapping_error)(struct ib_device *dev,
834 u64 dma_addr);
835 u64 (*map_single)(struct ib_device *dev,
836 void *ptr, size_t size,
837 enum dma_data_direction direction);
838 void (*unmap_single)(struct ib_device *dev,
839 u64 addr, size_t size,
840 enum dma_data_direction direction);
841 u64 (*map_page)(struct ib_device *dev,
842 struct page *page, unsigned long offset,
843 size_t size,
844 enum dma_data_direction direction);
845 void (*unmap_page)(struct ib_device *dev,
846 u64 addr, size_t size,
847 enum dma_data_direction direction);
848 int (*map_sg)(struct ib_device *dev,
849 struct scatterlist *sg, int nents,
850 enum dma_data_direction direction);
851 void (*unmap_sg)(struct ib_device *dev,
852 struct scatterlist *sg, int nents,
853 enum dma_data_direction direction);
854 u64 (*dma_address)(struct ib_device *dev,
855 struct scatterlist *sg);
856 unsigned int (*dma_len)(struct ib_device *dev,
857 struct scatterlist *sg);
858 void (*sync_single_for_cpu)(struct ib_device *dev,
859 u64 dma_handle,
860 size_t size,
861 enum dma_data_direction dir);
862 void (*sync_single_for_device)(struct ib_device *dev,
863 u64 dma_handle,
864 size_t size,
865 enum dma_data_direction dir);
866 void *(*alloc_coherent)(struct ib_device *dev,
867 size_t size,
868 u64 *dma_handle,
869 gfp_t flag);
870 void (*free_coherent)(struct ib_device *dev,
871 size_t size, void *cpu_addr,
872 u64 dma_handle);
873};
874
875struct iw_cm_verbs;
876
877struct ib_device {
878 struct device *dma_device;
879
880 char name[IB_DEVICE_NAME_MAX];
881
882 struct list_head event_handler_list;
883 spinlock_t event_handler_lock;
884
885 struct list_head core_list;
886 struct list_head client_data_list;
887 spinlock_t client_data_lock;
888
889 struct ib_cache cache;
890 int *pkey_tbl_len;
891 int *gid_tbl_len;
892
893 u32 flags;
894
895 int num_comp_vectors;
896
897 struct iw_cm_verbs *iwcm;
898
899 int (*query_device)(struct ib_device *device,
900 struct ib_device_attr *device_attr);
901 int (*query_port)(struct ib_device *device,
902 u8 port_num,
903 struct ib_port_attr *port_attr);
904 int (*query_gid)(struct ib_device *device,
905 u8 port_num, int index,
906 union ib_gid *gid);
907 int (*query_pkey)(struct ib_device *device,
908 u8 port_num, u16 index, u16 *pkey);
909 int (*modify_device)(struct ib_device *device,
910 int device_modify_mask,
911 struct ib_device_modify *device_modify);
912 int (*modify_port)(struct ib_device *device,
913 u8 port_num, int port_modify_mask,
914 struct ib_port_modify *port_modify);
915 struct ib_ucontext * (*alloc_ucontext)(struct ib_device *device,
916 struct ib_udata *udata);
917 int (*dealloc_ucontext)(struct ib_ucontext *context);
918 int (*mmap)(struct ib_ucontext *context,
919 struct vm_area_struct *vma);
920 struct ib_pd * (*alloc_pd)(struct ib_device *device,
921 struct ib_ucontext *context,
922 struct ib_udata *udata);
923 int (*dealloc_pd)(struct ib_pd *pd);
924 struct ib_ah * (*create_ah)(struct ib_pd *pd,
925 struct ib_ah_attr *ah_attr);
926 int (*modify_ah)(struct ib_ah *ah,
927 struct ib_ah_attr *ah_attr);
928 int (*query_ah)(struct ib_ah *ah,
929 struct ib_ah_attr *ah_attr);
930 int (*destroy_ah)(struct ib_ah *ah);
931 struct ib_srq * (*create_srq)(struct ib_pd *pd,
932 struct ib_srq_init_attr *srq_init_attr,
933 struct ib_udata *udata);
934 int (*modify_srq)(struct ib_srq *srq,
935 struct ib_srq_attr *srq_attr,
936 enum ib_srq_attr_mask srq_attr_mask,
937 struct ib_udata *udata);
938 int (*query_srq)(struct ib_srq *srq,
939 struct ib_srq_attr *srq_attr);
940 int (*destroy_srq)(struct ib_srq *srq);
941 int (*post_srq_recv)(struct ib_srq *srq,
942 struct ib_recv_wr *recv_wr,
943 struct ib_recv_wr **bad_recv_wr);
944 struct ib_qp * (*create_qp)(struct ib_pd *pd,
945 struct ib_qp_init_attr *qp_init_attr,
946 struct ib_udata *udata);
947 int (*modify_qp)(struct ib_qp *qp,
948 struct ib_qp_attr *qp_attr,
949 int qp_attr_mask,
950 struct ib_udata *udata);
951 int (*query_qp)(struct ib_qp *qp,
952 struct ib_qp_attr *qp_attr,
953 int qp_attr_mask,
954 struct ib_qp_init_attr *qp_init_attr);
955 int (*destroy_qp)(struct ib_qp *qp);
956 int (*post_send)(struct ib_qp *qp,
957 struct ib_send_wr *send_wr,
958 struct ib_send_wr **bad_send_wr);
959 int (*post_recv)(struct ib_qp *qp,
960 struct ib_recv_wr *recv_wr,
961 struct ib_recv_wr **bad_recv_wr);
962 struct ib_cq * (*create_cq)(struct ib_device *device, int cqe,
963 int comp_vector,
964 struct ib_ucontext *context,
965 struct ib_udata *udata);
966 int (*destroy_cq)(struct ib_cq *cq);
967 int (*resize_cq)(struct ib_cq *cq, int cqe,
968 struct ib_udata *udata);
969 int (*poll_cq)(struct ib_cq *cq, int num_entries,
970 struct ib_wc *wc);
971 int (*peek_cq)(struct ib_cq *cq, int wc_cnt);
972 int (*req_notify_cq)(struct ib_cq *cq,
973 enum ib_cq_notify_flags flags);
974 int (*req_ncomp_notif)(struct ib_cq *cq,
975 int wc_cnt);
976 struct ib_mr * (*get_dma_mr)(struct ib_pd *pd,
977 int mr_access_flags);
978 struct ib_mr * (*reg_phys_mr)(struct ib_pd *pd,
979 struct ib_phys_buf *phys_buf_array,
980 int num_phys_buf,
981 int mr_access_flags,
982 u64 *iova_start);
983 struct ib_mr * (*reg_user_mr)(struct ib_pd *pd,
984 u64 start, u64 length,
985 u64 virt_addr,
986 int mr_access_flags,
987 struct ib_udata *udata);
988 int (*query_mr)(struct ib_mr *mr,
989 struct ib_mr_attr *mr_attr);
990 int (*dereg_mr)(struct ib_mr *mr);
991 int (*rereg_phys_mr)(struct ib_mr *mr,
992 int mr_rereg_mask,
993 struct ib_pd *pd,
994 struct ib_phys_buf *phys_buf_array,
995 int num_phys_buf,
996 int mr_access_flags,
997 u64 *iova_start);
998 struct ib_mw * (*alloc_mw)(struct ib_pd *pd);
999 int (*bind_mw)(struct ib_qp *qp,
1000 struct ib_mw *mw,
1001 struct ib_mw_bind *mw_bind);
1002 int (*dealloc_mw)(struct ib_mw *mw);
1003 struct ib_fmr * (*alloc_fmr)(struct ib_pd *pd,
1004 int mr_access_flags,
1005 struct ib_fmr_attr *fmr_attr);
1006 int (*map_phys_fmr)(struct ib_fmr *fmr,
1007 u64 *page_list, int list_len,
1008 u64 iova);
1009 int (*unmap_fmr)(struct list_head *fmr_list);
1010 int (*dealloc_fmr)(struct ib_fmr *fmr);
1011 int (*attach_mcast)(struct ib_qp *qp,
1012 union ib_gid *gid,
1013 u16 lid);
1014 int (*detach_mcast)(struct ib_qp *qp,
1015 union ib_gid *gid,
1016 u16 lid);
1017 int (*process_mad)(struct ib_device *device,
1018 int process_mad_flags,
1019 u8 port_num,
1020 struct ib_wc *in_wc,
1021 struct ib_grh *in_grh,
1022 struct ib_mad *in_mad,
1023 struct ib_mad *out_mad);
1024
1025 struct ib_dma_mapping_ops *dma_ops;
1026
1027 struct module *owner;
1028 struct class_device class_dev;
1029 struct kobject ports_parent;
1030 struct list_head port_list;
1031
1032 enum {
1033 IB_DEV_UNINITIALIZED,
1034 IB_DEV_REGISTERED,
1035 IB_DEV_UNREGISTERED
1036 } reg_state;
1037
1038 u64 uverbs_cmd_mask;
1039 int uverbs_abi_ver;
1040
1041 char node_desc[64];
1042 __be64 node_guid;
1043 u8 node_type;
1044 u8 phys_port_cnt;
1045};
1046
1047struct ib_client {
1048 char *name;
1049 void (*add) (struct ib_device *);
1050 void (*remove)(struct ib_device *);
1051
1052 struct list_head list;
1053};
1054
1055struct ib_device *ib_alloc_device(size_t size);
1056void ib_dealloc_device(struct ib_device *device);
1057
1058int ib_register_device (struct ib_device *device);
1059void ib_unregister_device(struct ib_device *device);
1060
1061int ib_register_client (struct ib_client *client);
1062void ib_unregister_client(struct ib_client *client);
1063
1064void *ib_get_client_data(struct ib_device *device, struct ib_client *client);
1065void ib_set_client_data(struct ib_device *device, struct ib_client *client,
1066 void *data);
1067
1068static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len)
1069{
1070 return copy_from_user(dest, udata->inbuf, len) ? -EFAULT : 0;
1071}
1072
1073static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len)
1074{
1075 return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0;
1076}
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
1094 enum ib_qp_type type, enum ib_qp_attr_mask mask);
1095
1096int ib_register_event_handler (struct ib_event_handler *event_handler);
1097int ib_unregister_event_handler(struct ib_event_handler *event_handler);
1098void ib_dispatch_event(struct ib_event *event);
1099
1100int ib_query_device(struct ib_device *device,
1101 struct ib_device_attr *device_attr);
1102
1103int ib_query_port(struct ib_device *device,
1104 u8 port_num, struct ib_port_attr *port_attr);
1105
1106int ib_query_gid(struct ib_device *device,
1107 u8 port_num, int index, union ib_gid *gid);
1108
1109int ib_query_pkey(struct ib_device *device,
1110 u8 port_num, u16 index, u16 *pkey);
1111
1112int ib_modify_device(struct ib_device *device,
1113 int device_modify_mask,
1114 struct ib_device_modify *device_modify);
1115
1116int ib_modify_port(struct ib_device *device,
1117 u8 port_num, int port_modify_mask,
1118 struct ib_port_modify *port_modify);
1119
1120int ib_find_gid(struct ib_device *device, union ib_gid *gid,
1121 u8 *port_num, u16 *index);
1122
1123int ib_find_pkey(struct ib_device *device,
1124 u8 port_num, u16 pkey, u16 *index);
1125
1126
1127
1128
1129
1130
1131
1132
1133struct ib_pd *ib_alloc_pd(struct ib_device *device);
1134
1135
1136
1137
1138
1139int ib_dealloc_pd(struct ib_pd *pd);
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr);
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, struct ib_wc *wc,
1163 struct ib_grh *grh, struct ib_ah_attr *ah_attr);
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, struct ib_wc *wc,
1178 struct ib_grh *grh, u8 port_num);
1179
1180
1181
1182
1183
1184
1185
1186
1187int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
1188
1189
1190
1191
1192
1193
1194
1195
1196int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
1197
1198
1199
1200
1201
1202int ib_destroy_ah(struct ib_ah *ah);
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217struct ib_srq *ib_create_srq(struct ib_pd *pd,
1218 struct ib_srq_init_attr *srq_init_attr);
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232int ib_modify_srq(struct ib_srq *srq,
1233 struct ib_srq_attr *srq_attr,
1234 enum ib_srq_attr_mask srq_attr_mask);
1235
1236
1237
1238
1239
1240
1241
1242int ib_query_srq(struct ib_srq *srq,
1243 struct ib_srq_attr *srq_attr);
1244
1245
1246
1247
1248
1249int ib_destroy_srq(struct ib_srq *srq);
1250
1251
1252
1253
1254
1255
1256
1257
1258static inline int ib_post_srq_recv(struct ib_srq *srq,
1259 struct ib_recv_wr *recv_wr,
1260 struct ib_recv_wr **bad_recv_wr)
1261{
1262 return srq->device->post_srq_recv(srq, recv_wr, bad_recv_wr);
1263}
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273struct ib_qp *ib_create_qp(struct ib_pd *pd,
1274 struct ib_qp_init_attr *qp_init_attr);
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285int ib_modify_qp(struct ib_qp *qp,
1286 struct ib_qp_attr *qp_attr,
1287 int qp_attr_mask);
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300int ib_query_qp(struct ib_qp *qp,
1301 struct ib_qp_attr *qp_attr,
1302 int qp_attr_mask,
1303 struct ib_qp_init_attr *qp_init_attr);
1304
1305
1306
1307
1308
1309int ib_destroy_qp(struct ib_qp *qp);
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319static inline int ib_post_send(struct ib_qp *qp,
1320 struct ib_send_wr *send_wr,
1321 struct ib_send_wr **bad_send_wr)
1322{
1323 return qp->device->post_send(qp, send_wr, bad_send_wr);
1324}
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334static inline int ib_post_recv(struct ib_qp *qp,
1335 struct ib_recv_wr *recv_wr,
1336 struct ib_recv_wr **bad_recv_wr)
1337{
1338 return qp->device->post_recv(qp, recv_wr, bad_recv_wr);
1339}
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356struct ib_cq *ib_create_cq(struct ib_device *device,
1357 ib_comp_handler comp_handler,
1358 void (*event_handler)(struct ib_event *, void *),
1359 void *cq_context, int cqe, int comp_vector);
1360
1361
1362
1363
1364
1365
1366
1367
1368int ib_resize_cq(struct ib_cq *cq, int cqe);
1369
1370
1371
1372
1373
1374int ib_destroy_cq(struct ib_cq *cq);
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388static inline int ib_poll_cq(struct ib_cq *cq, int num_entries,
1389 struct ib_wc *wc)
1390{
1391 return cq->device->poll_cq(cq, num_entries, wc);
1392}
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404int ib_peek_cq(struct ib_cq *cq, int wc_cnt);
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433static inline int ib_req_notify_cq(struct ib_cq *cq,
1434 enum ib_cq_notify_flags flags)
1435{
1436 return cq->device->req_notify_cq(cq, flags);
1437}
1438
1439
1440
1441
1442
1443
1444
1445
1446static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt)
1447{
1448 return cq->device->req_ncomp_notif ?
1449 cq->device->req_ncomp_notif(cq, wc_cnt) :
1450 -ENOSYS;
1451}
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags);
1464
1465
1466
1467
1468
1469
1470static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
1471{
1472 if (dev->dma_ops)
1473 return dev->dma_ops->mapping_error(dev, dma_addr);
1474 return dma_mapping_error(dma_addr);
1475}
1476
1477
1478
1479
1480
1481
1482
1483
1484static inline u64 ib_dma_map_single(struct ib_device *dev,
1485 void *cpu_addr, size_t size,
1486 enum dma_data_direction direction)
1487{
1488 if (dev->dma_ops)
1489 return dev->dma_ops->map_single(dev, cpu_addr, size, direction);
1490 return dma_map_single(dev->dma_device, cpu_addr, size, direction);
1491}
1492
1493
1494
1495
1496
1497
1498
1499
1500static inline void ib_dma_unmap_single(struct ib_device *dev,
1501 u64 addr, size_t size,
1502 enum dma_data_direction direction)
1503{
1504 if (dev->dma_ops)
1505 dev->dma_ops->unmap_single(dev, addr, size, direction);
1506 else
1507 dma_unmap_single(dev->dma_device, addr, size, direction);
1508}
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518static inline u64 ib_dma_map_page(struct ib_device *dev,
1519 struct page *page,
1520 unsigned long offset,
1521 size_t size,
1522 enum dma_data_direction direction)
1523{
1524 if (dev->dma_ops)
1525 return dev->dma_ops->map_page(dev, page, offset, size, direction);
1526 return dma_map_page(dev->dma_device, page, offset, size, direction);
1527}
1528
1529
1530
1531
1532
1533
1534
1535
1536static inline void ib_dma_unmap_page(struct ib_device *dev,
1537 u64 addr, size_t size,
1538 enum dma_data_direction direction)
1539{
1540 if (dev->dma_ops)
1541 dev->dma_ops->unmap_page(dev, addr, size, direction);
1542 else
1543 dma_unmap_page(dev->dma_device, addr, size, direction);
1544}
1545
1546
1547
1548
1549
1550
1551
1552
1553static inline int ib_dma_map_sg(struct ib_device *dev,
1554 struct scatterlist *sg, int nents,
1555 enum dma_data_direction direction)
1556{
1557 if (dev->dma_ops)
1558 return dev->dma_ops->map_sg(dev, sg, nents, direction);
1559 return dma_map_sg(dev->dma_device, sg, nents, direction);
1560}
1561
1562
1563
1564
1565
1566
1567
1568
1569static inline void ib_dma_unmap_sg(struct ib_device *dev,
1570 struct scatterlist *sg, int nents,
1571 enum dma_data_direction direction)
1572{
1573 if (dev->dma_ops)
1574 dev->dma_ops->unmap_sg(dev, sg, nents, direction);
1575 else
1576 dma_unmap_sg(dev->dma_device, sg, nents, direction);
1577}
1578
1579
1580
1581
1582
1583
1584static inline u64 ib_sg_dma_address(struct ib_device *dev,
1585 struct scatterlist *sg)
1586{
1587 if (dev->dma_ops)
1588 return dev->dma_ops->dma_address(dev, sg);
1589 return sg_dma_address(sg);
1590}
1591
1592
1593
1594
1595
1596
1597static inline unsigned int ib_sg_dma_len(struct ib_device *dev,
1598 struct scatterlist *sg)
1599{
1600 if (dev->dma_ops)
1601 return dev->dma_ops->dma_len(dev, sg);
1602 return sg_dma_len(sg);
1603}
1604
1605
1606
1607
1608
1609
1610
1611
1612static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev,
1613 u64 addr,
1614 size_t size,
1615 enum dma_data_direction dir)
1616{
1617 if (dev->dma_ops)
1618 dev->dma_ops->sync_single_for_cpu(dev, addr, size, dir);
1619 else
1620 dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
1621}
1622
1623
1624
1625
1626
1627
1628
1629
1630static inline void ib_dma_sync_single_for_device(struct ib_device *dev,
1631 u64 addr,
1632 size_t size,
1633 enum dma_data_direction dir)
1634{
1635 if (dev->dma_ops)
1636 dev->dma_ops->sync_single_for_device(dev, addr, size, dir);
1637 else
1638 dma_sync_single_for_device(dev->dma_device, addr, size, dir);
1639}
1640
1641
1642
1643
1644
1645
1646
1647
1648static inline void *ib_dma_alloc_coherent(struct ib_device *dev,
1649 size_t size,
1650 u64 *dma_handle,
1651 gfp_t flag)
1652{
1653 if (dev->dma_ops)
1654 return dev->dma_ops->alloc_coherent(dev, size, dma_handle, flag);
1655 else {
1656 dma_addr_t handle;
1657 void *ret;
1658
1659 ret = dma_alloc_coherent(dev->dma_device, size, &handle, flag);
1660 *dma_handle = handle;
1661 return ret;
1662 }
1663}
1664
1665
1666
1667
1668
1669
1670
1671
1672static inline void ib_dma_free_coherent(struct ib_device *dev,
1673 size_t size, void *cpu_addr,
1674 u64 dma_handle)
1675{
1676 if (dev->dma_ops)
1677 dev->dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
1678 else
1679 dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle);
1680}
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692struct ib_mr *ib_reg_phys_mr(struct ib_pd *pd,
1693 struct ib_phys_buf *phys_buf_array,
1694 int num_phys_buf,
1695 int mr_access_flags,
1696 u64 *iova_start);
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720int ib_rereg_phys_mr(struct ib_mr *mr,
1721 int mr_rereg_mask,
1722 struct ib_pd *pd,
1723 struct ib_phys_buf *phys_buf_array,
1724 int num_phys_buf,
1725 int mr_access_flags,
1726 u64 *iova_start);
1727
1728
1729
1730
1731
1732
1733int ib_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr);
1734
1735
1736
1737
1738
1739
1740int ib_dereg_mr(struct ib_mr *mr);
1741
1742
1743
1744
1745
1746struct ib_mw *ib_alloc_mw(struct ib_pd *pd);
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757static inline int ib_bind_mw(struct ib_qp *qp,
1758 struct ib_mw *mw,
1759 struct ib_mw_bind *mw_bind)
1760{
1761
1762 return mw->device->bind_mw ?
1763 mw->device->bind_mw(qp, mw, mw_bind) :
1764 -ENOSYS;
1765}
1766
1767
1768
1769
1770
1771int ib_dealloc_mw(struct ib_mw *mw);
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
1783 int mr_access_flags,
1784 struct ib_fmr_attr *fmr_attr);
1785
1786
1787
1788
1789
1790
1791
1792
1793static inline int ib_map_phys_fmr(struct ib_fmr *fmr,
1794 u64 *page_list, int list_len,
1795 u64 iova)
1796{
1797 return fmr->device->map_phys_fmr(fmr, page_list, list_len, iova);
1798}
1799
1800
1801
1802
1803
1804int ib_unmap_fmr(struct list_head *fmr_list);
1805
1806
1807
1808
1809
1810int ib_dealloc_fmr(struct ib_fmr *fmr);
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
1825
1826
1827
1828
1829
1830
1831
1832int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
1833
1834#endif
1835