1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39#if !defined(IB_VERBS_H)
40#define IB_VERBS_H
41
42#include <linux/types.h>
43#include <linux/device.h>
44#include <linux/mm.h>
45#include <linux/dma-mapping.h>
46#include <linux/kref.h>
47#include <linux/list.h>
48#include <linux/rwsem.h>
49#include <linux/scatterlist.h>
50#include <linux/workqueue.h>
51#include <uapi/linux/if_ether.h>
52
53#include <linux/atomic.h>
54#include <linux/mmu_notifier.h>
55#include <asm/uaccess.h>
56
57extern struct workqueue_struct *ib_wq;
58
59union ib_gid {
60 u8 raw[16];
61 struct {
62 __be64 subnet_prefix;
63 __be64 interface_id;
64 } global;
65};
66
67enum rdma_node_type {
68
69 RDMA_NODE_IB_CA = 1,
70 RDMA_NODE_IB_SWITCH,
71 RDMA_NODE_IB_ROUTER,
72 RDMA_NODE_RNIC,
73 RDMA_NODE_USNIC,
74 RDMA_NODE_USNIC_UDP,
75};
76
77enum rdma_transport_type {
78 RDMA_TRANSPORT_IB,
79 RDMA_TRANSPORT_IWARP,
80 RDMA_TRANSPORT_USNIC,
81 RDMA_TRANSPORT_USNIC_UDP
82};
83
84enum rdma_protocol_type {
85 RDMA_PROTOCOL_IB,
86 RDMA_PROTOCOL_IBOE,
87 RDMA_PROTOCOL_IWARP,
88 RDMA_PROTOCOL_USNIC_UDP
89};
90
91__attribute_const__ enum rdma_transport_type
92rdma_node_get_transport(enum rdma_node_type node_type);
93
94enum rdma_link_layer {
95 IB_LINK_LAYER_UNSPECIFIED,
96 IB_LINK_LAYER_INFINIBAND,
97 IB_LINK_LAYER_ETHERNET,
98};
99
100enum ib_device_cap_flags {
101 IB_DEVICE_RESIZE_MAX_WR = 1,
102 IB_DEVICE_BAD_PKEY_CNTR = (1<<1),
103 IB_DEVICE_BAD_QKEY_CNTR = (1<<2),
104 IB_DEVICE_RAW_MULTI = (1<<3),
105 IB_DEVICE_AUTO_PATH_MIG = (1<<4),
106 IB_DEVICE_CHANGE_PHY_PORT = (1<<5),
107 IB_DEVICE_UD_AV_PORT_ENFORCE = (1<<6),
108 IB_DEVICE_CURR_QP_STATE_MOD = (1<<7),
109 IB_DEVICE_SHUTDOWN_PORT = (1<<8),
110 IB_DEVICE_INIT_TYPE = (1<<9),
111 IB_DEVICE_PORT_ACTIVE_EVENT = (1<<10),
112 IB_DEVICE_SYS_IMAGE_GUID = (1<<11),
113 IB_DEVICE_RC_RNR_NAK_GEN = (1<<12),
114 IB_DEVICE_SRQ_RESIZE = (1<<13),
115 IB_DEVICE_N_NOTIFY_CQ = (1<<14),
116 IB_DEVICE_LOCAL_DMA_LKEY = (1<<15),
117 IB_DEVICE_RESERVED = (1<<16),
118 IB_DEVICE_MEM_WINDOW = (1<<17),
119
120
121
122
123
124
125
126 IB_DEVICE_UD_IP_CSUM = (1<<18),
127 IB_DEVICE_UD_TSO = (1<<19),
128 IB_DEVICE_XRC = (1<<20),
129 IB_DEVICE_MEM_MGT_EXTENSIONS = (1<<21),
130 IB_DEVICE_BLOCK_MULTICAST_LOOPBACK = (1<<22),
131 IB_DEVICE_MEM_WINDOW_TYPE_2A = (1<<23),
132 IB_DEVICE_MEM_WINDOW_TYPE_2B = (1<<24),
133 IB_DEVICE_MANAGED_FLOW_STEERING = (1<<29),
134 IB_DEVICE_SIGNATURE_HANDOVER = (1<<30),
135 IB_DEVICE_ON_DEMAND_PAGING = (1<<31),
136};
137
138enum ib_signature_prot_cap {
139 IB_PROT_T10DIF_TYPE_1 = 1,
140 IB_PROT_T10DIF_TYPE_2 = 1 << 1,
141 IB_PROT_T10DIF_TYPE_3 = 1 << 2,
142};
143
144enum ib_signature_guard_cap {
145 IB_GUARD_T10DIF_CRC = 1,
146 IB_GUARD_T10DIF_CSUM = 1 << 1,
147};
148
149enum ib_atomic_cap {
150 IB_ATOMIC_NONE,
151 IB_ATOMIC_HCA,
152 IB_ATOMIC_GLOB
153};
154
155enum ib_odp_general_cap_bits {
156 IB_ODP_SUPPORT = 1 << 0,
157};
158
159enum ib_odp_transport_cap_bits {
160 IB_ODP_SUPPORT_SEND = 1 << 0,
161 IB_ODP_SUPPORT_RECV = 1 << 1,
162 IB_ODP_SUPPORT_WRITE = 1 << 2,
163 IB_ODP_SUPPORT_READ = 1 << 3,
164 IB_ODP_SUPPORT_ATOMIC = 1 << 4,
165};
166
167struct ib_odp_caps {
168 uint64_t general_caps;
169 struct {
170 uint32_t rc_odp_caps;
171 uint32_t uc_odp_caps;
172 uint32_t ud_odp_caps;
173 } per_transport_caps;
174};
175
176enum ib_cq_creation_flags {
177 IB_CQ_FLAGS_TIMESTAMP_COMPLETION = 1 << 0,
178};
179
180struct ib_cq_init_attr {
181 unsigned int cqe;
182 int comp_vector;
183 u32 flags;
184};
185
186struct ib_device_attr {
187 u64 fw_ver;
188 __be64 sys_image_guid;
189 u64 max_mr_size;
190 u64 page_size_cap;
191 u32 vendor_id;
192 u32 vendor_part_id;
193 u32 hw_ver;
194 int max_qp;
195 int max_qp_wr;
196 int device_cap_flags;
197 int max_sge;
198 int max_sge_rd;
199 int max_cq;
200 int max_cqe;
201 int max_mr;
202 int max_pd;
203 int max_qp_rd_atom;
204 int max_ee_rd_atom;
205 int max_res_rd_atom;
206 int max_qp_init_rd_atom;
207 int max_ee_init_rd_atom;
208 enum ib_atomic_cap atomic_cap;
209 enum ib_atomic_cap masked_atomic_cap;
210 int max_ee;
211 int max_rdd;
212 int max_mw;
213 int max_raw_ipv6_qp;
214 int max_raw_ethy_qp;
215 int max_mcast_grp;
216 int max_mcast_qp_attach;
217 int max_total_mcast_qp_attach;
218 int max_ah;
219 int max_fmr;
220 int max_map_per_fmr;
221 int max_srq;
222 int max_srq_wr;
223 int max_srq_sge;
224 unsigned int max_fast_reg_page_list_len;
225 u16 max_pkeys;
226 u8 local_ca_ack_delay;
227 int sig_prot_cap;
228 int sig_guard_cap;
229 struct ib_odp_caps odp_caps;
230 uint64_t timestamp_mask;
231 uint64_t hca_core_clock;
232};
233
234enum ib_mtu {
235 IB_MTU_256 = 1,
236 IB_MTU_512 = 2,
237 IB_MTU_1024 = 3,
238 IB_MTU_2048 = 4,
239 IB_MTU_4096 = 5
240};
241
242static inline int ib_mtu_enum_to_int(enum ib_mtu mtu)
243{
244 switch (mtu) {
245 case IB_MTU_256: return 256;
246 case IB_MTU_512: return 512;
247 case IB_MTU_1024: return 1024;
248 case IB_MTU_2048: return 2048;
249 case IB_MTU_4096: return 4096;
250 default: return -1;
251 }
252}
253
254enum ib_port_state {
255 IB_PORT_NOP = 0,
256 IB_PORT_DOWN = 1,
257 IB_PORT_INIT = 2,
258 IB_PORT_ARMED = 3,
259 IB_PORT_ACTIVE = 4,
260 IB_PORT_ACTIVE_DEFER = 5
261};
262
263enum ib_port_cap_flags {
264 IB_PORT_SM = 1 << 1,
265 IB_PORT_NOTICE_SUP = 1 << 2,
266 IB_PORT_TRAP_SUP = 1 << 3,
267 IB_PORT_OPT_IPD_SUP = 1 << 4,
268 IB_PORT_AUTO_MIGR_SUP = 1 << 5,
269 IB_PORT_SL_MAP_SUP = 1 << 6,
270 IB_PORT_MKEY_NVRAM = 1 << 7,
271 IB_PORT_PKEY_NVRAM = 1 << 8,
272 IB_PORT_LED_INFO_SUP = 1 << 9,
273 IB_PORT_SM_DISABLED = 1 << 10,
274 IB_PORT_SYS_IMAGE_GUID_SUP = 1 << 11,
275 IB_PORT_PKEY_SW_EXT_PORT_TRAP_SUP = 1 << 12,
276 IB_PORT_EXTENDED_SPEEDS_SUP = 1 << 14,
277 IB_PORT_CM_SUP = 1 << 16,
278 IB_PORT_SNMP_TUNNEL_SUP = 1 << 17,
279 IB_PORT_REINIT_SUP = 1 << 18,
280 IB_PORT_DEVICE_MGMT_SUP = 1 << 19,
281 IB_PORT_VENDOR_CLASS_SUP = 1 << 20,
282 IB_PORT_DR_NOTICE_SUP = 1 << 21,
283 IB_PORT_CAP_MASK_NOTICE_SUP = 1 << 22,
284 IB_PORT_BOOT_MGMT_SUP = 1 << 23,
285 IB_PORT_LINK_LATENCY_SUP = 1 << 24,
286 IB_PORT_CLIENT_REG_SUP = 1 << 25,
287 IB_PORT_IP_BASED_GIDS = 1 << 26
288};
289
290enum ib_port_width {
291 IB_WIDTH_1X = 1,
292 IB_WIDTH_4X = 2,
293 IB_WIDTH_8X = 4,
294 IB_WIDTH_12X = 8
295};
296
297static inline int ib_width_enum_to_int(enum ib_port_width width)
298{
299 switch (width) {
300 case IB_WIDTH_1X: return 1;
301 case IB_WIDTH_4X: return 4;
302 case IB_WIDTH_8X: return 8;
303 case IB_WIDTH_12X: return 12;
304 default: return -1;
305 }
306}
307
308enum ib_port_speed {
309 IB_SPEED_SDR = 1,
310 IB_SPEED_DDR = 2,
311 IB_SPEED_QDR = 4,
312 IB_SPEED_FDR10 = 8,
313 IB_SPEED_FDR = 16,
314 IB_SPEED_EDR = 32
315};
316
317struct ib_protocol_stats {
318
319};
320
321struct iw_protocol_stats {
322 u64 ipInReceives;
323 u64 ipInHdrErrors;
324 u64 ipInTooBigErrors;
325 u64 ipInNoRoutes;
326 u64 ipInAddrErrors;
327 u64 ipInUnknownProtos;
328 u64 ipInTruncatedPkts;
329 u64 ipInDiscards;
330 u64 ipInDelivers;
331 u64 ipOutForwDatagrams;
332 u64 ipOutRequests;
333 u64 ipOutDiscards;
334 u64 ipOutNoRoutes;
335 u64 ipReasmTimeout;
336 u64 ipReasmReqds;
337 u64 ipReasmOKs;
338 u64 ipReasmFails;
339 u64 ipFragOKs;
340 u64 ipFragFails;
341 u64 ipFragCreates;
342 u64 ipInMcastPkts;
343 u64 ipOutMcastPkts;
344 u64 ipInBcastPkts;
345 u64 ipOutBcastPkts;
346
347 u64 tcpRtoAlgorithm;
348 u64 tcpRtoMin;
349 u64 tcpRtoMax;
350 u64 tcpMaxConn;
351 u64 tcpActiveOpens;
352 u64 tcpPassiveOpens;
353 u64 tcpAttemptFails;
354 u64 tcpEstabResets;
355 u64 tcpCurrEstab;
356 u64 tcpInSegs;
357 u64 tcpOutSegs;
358 u64 tcpRetransSegs;
359 u64 tcpInErrs;
360 u64 tcpOutRsts;
361};
362
363union rdma_protocol_stats {
364 struct ib_protocol_stats ib;
365 struct iw_protocol_stats iw;
366};
367
368
369
370
371
372#define RDMA_CORE_CAP_IB_MAD 0x00000001
373#define RDMA_CORE_CAP_IB_SMI 0x00000002
374#define RDMA_CORE_CAP_IB_CM 0x00000004
375#define RDMA_CORE_CAP_IW_CM 0x00000008
376#define RDMA_CORE_CAP_IB_SA 0x00000010
377#define RDMA_CORE_CAP_OPA_MAD 0x00000020
378
379
380#define RDMA_CORE_CAP_AF_IB 0x00001000
381#define RDMA_CORE_CAP_ETH_AH 0x00002000
382
383
384#define RDMA_CORE_CAP_PROT_IB 0x00100000
385#define RDMA_CORE_CAP_PROT_ROCE 0x00200000
386#define RDMA_CORE_CAP_PROT_IWARP 0x00400000
387
388#define RDMA_CORE_PORT_IBA_IB (RDMA_CORE_CAP_PROT_IB \
389 | RDMA_CORE_CAP_IB_MAD \
390 | RDMA_CORE_CAP_IB_SMI \
391 | RDMA_CORE_CAP_IB_CM \
392 | RDMA_CORE_CAP_IB_SA \
393 | RDMA_CORE_CAP_AF_IB)
394#define RDMA_CORE_PORT_IBA_ROCE (RDMA_CORE_CAP_PROT_ROCE \
395 | RDMA_CORE_CAP_IB_MAD \
396 | RDMA_CORE_CAP_IB_CM \
397 | RDMA_CORE_CAP_AF_IB \
398 | RDMA_CORE_CAP_ETH_AH)
399#define RDMA_CORE_PORT_IWARP (RDMA_CORE_CAP_PROT_IWARP \
400 | RDMA_CORE_CAP_IW_CM)
401#define RDMA_CORE_PORT_INTEL_OPA (RDMA_CORE_PORT_IBA_IB \
402 | RDMA_CORE_CAP_OPA_MAD)
403
404struct ib_port_attr {
405 enum ib_port_state state;
406 enum ib_mtu max_mtu;
407 enum ib_mtu active_mtu;
408 int gid_tbl_len;
409 u32 port_cap_flags;
410 u32 max_msg_sz;
411 u32 bad_pkey_cntr;
412 u32 qkey_viol_cntr;
413 u16 pkey_tbl_len;
414 u16 lid;
415 u16 sm_lid;
416 u8 lmc;
417 u8 max_vl_num;
418 u8 sm_sl;
419 u8 subnet_timeout;
420 u8 init_type_reply;
421 u8 active_width;
422 u8 active_speed;
423 u8 phys_state;
424};
425
426enum ib_device_modify_flags {
427 IB_DEVICE_MODIFY_SYS_IMAGE_GUID = 1 << 0,
428 IB_DEVICE_MODIFY_NODE_DESC = 1 << 1
429};
430
431struct ib_device_modify {
432 u64 sys_image_guid;
433 char node_desc[64];
434};
435
436enum ib_port_modify_flags {
437 IB_PORT_SHUTDOWN = 1,
438 IB_PORT_INIT_TYPE = (1<<2),
439 IB_PORT_RESET_QKEY_CNTR = (1<<3)
440};
441
442struct ib_port_modify {
443 u32 set_port_cap_mask;
444 u32 clr_port_cap_mask;
445 u8 init_type;
446};
447
448enum ib_event_type {
449 IB_EVENT_CQ_ERR,
450 IB_EVENT_QP_FATAL,
451 IB_EVENT_QP_REQ_ERR,
452 IB_EVENT_QP_ACCESS_ERR,
453 IB_EVENT_COMM_EST,
454 IB_EVENT_SQ_DRAINED,
455 IB_EVENT_PATH_MIG,
456 IB_EVENT_PATH_MIG_ERR,
457 IB_EVENT_DEVICE_FATAL,
458 IB_EVENT_PORT_ACTIVE,
459 IB_EVENT_PORT_ERR,
460 IB_EVENT_LID_CHANGE,
461 IB_EVENT_PKEY_CHANGE,
462 IB_EVENT_SM_CHANGE,
463 IB_EVENT_SRQ_ERR,
464 IB_EVENT_SRQ_LIMIT_REACHED,
465 IB_EVENT_QP_LAST_WQE_REACHED,
466 IB_EVENT_CLIENT_REREGISTER,
467 IB_EVENT_GID_CHANGE,
468};
469
470__attribute_const__ const char *ib_event_msg(enum ib_event_type event);
471
472struct ib_event {
473 struct ib_device *device;
474 union {
475 struct ib_cq *cq;
476 struct ib_qp *qp;
477 struct ib_srq *srq;
478 u8 port_num;
479 } element;
480 enum ib_event_type event;
481};
482
483struct ib_event_handler {
484 struct ib_device *device;
485 void (*handler)(struct ib_event_handler *, struct ib_event *);
486 struct list_head list;
487};
488
489#define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler) \
490 do { \
491 (_ptr)->device = _device; \
492 (_ptr)->handler = _handler; \
493 INIT_LIST_HEAD(&(_ptr)->list); \
494 } while (0)
495
496struct ib_global_route {
497 union ib_gid dgid;
498 u32 flow_label;
499 u8 sgid_index;
500 u8 hop_limit;
501 u8 traffic_class;
502};
503
504struct ib_grh {
505 __be32 version_tclass_flow;
506 __be16 paylen;
507 u8 next_hdr;
508 u8 hop_limit;
509 union ib_gid sgid;
510 union ib_gid dgid;
511};
512
513enum {
514 IB_MULTICAST_QPN = 0xffffff
515};
516
517#define IB_LID_PERMISSIVE cpu_to_be16(0xFFFF)
518
519enum ib_ah_flags {
520 IB_AH_GRH = 1
521};
522
523enum ib_rate {
524 IB_RATE_PORT_CURRENT = 0,
525 IB_RATE_2_5_GBPS = 2,
526 IB_RATE_5_GBPS = 5,
527 IB_RATE_10_GBPS = 3,
528 IB_RATE_20_GBPS = 6,
529 IB_RATE_30_GBPS = 4,
530 IB_RATE_40_GBPS = 7,
531 IB_RATE_60_GBPS = 8,
532 IB_RATE_80_GBPS = 9,
533 IB_RATE_120_GBPS = 10,
534 IB_RATE_14_GBPS = 11,
535 IB_RATE_56_GBPS = 12,
536 IB_RATE_112_GBPS = 13,
537 IB_RATE_168_GBPS = 14,
538 IB_RATE_25_GBPS = 15,
539 IB_RATE_100_GBPS = 16,
540 IB_RATE_200_GBPS = 17,
541 IB_RATE_300_GBPS = 18
542};
543
544
545
546
547
548
549
550__attribute_const__ int ib_rate_to_mult(enum ib_rate rate);
551
552
553
554
555
556
557__attribute_const__ int ib_rate_to_mbps(enum ib_rate rate);
558
559enum ib_mr_create_flags {
560 IB_MR_SIGNATURE_EN = 1,
561};
562
563
564
565
566
567
568
569
570struct ib_mr_init_attr {
571 int max_reg_descriptors;
572 u32 flags;
573};
574
575
576
577
578
579
580enum ib_signature_type {
581 IB_SIG_TYPE_NONE,
582 IB_SIG_TYPE_T10_DIF,
583};
584
585
586
587
588
589
590enum ib_t10_dif_bg_type {
591 IB_T10DIF_CRC,
592 IB_T10DIF_CSUM
593};
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608struct ib_t10_dif_domain {
609 enum ib_t10_dif_bg_type bg_type;
610 u16 pi_interval;
611 u16 bg;
612 u16 app_tag;
613 u32 ref_tag;
614 bool ref_remap;
615 bool app_escape;
616 bool ref_escape;
617 u16 apptag_check_mask;
618};
619
620
621
622
623
624
625
626struct ib_sig_domain {
627 enum ib_signature_type sig_type;
628 union {
629 struct ib_t10_dif_domain dif;
630 } sig;
631};
632
633
634
635
636
637
638
639struct ib_sig_attrs {
640 u8 check_mask;
641 struct ib_sig_domain mem;
642 struct ib_sig_domain wire;
643};
644
645enum ib_sig_err_type {
646 IB_SIG_BAD_GUARD,
647 IB_SIG_BAD_REFTAG,
648 IB_SIG_BAD_APPTAG,
649};
650
651
652
653
654struct ib_sig_err {
655 enum ib_sig_err_type err_type;
656 u32 expected;
657 u32 actual;
658 u64 sig_err_offset;
659 u32 key;
660};
661
662enum ib_mr_status_check {
663 IB_MR_CHECK_SIG_STATUS = 1,
664};
665
666
667
668
669
670
671
672
673
674struct ib_mr_status {
675 u32 fail_status;
676 struct ib_sig_err sig_err;
677};
678
679
680
681
682
683
684__attribute_const__ enum ib_rate mult_to_ib_rate(int mult);
685
686struct ib_ah_attr {
687 struct ib_global_route grh;
688 u16 dlid;
689 u8 sl;
690 u8 src_path_bits;
691 u8 static_rate;
692 u8 ah_flags;
693 u8 port_num;
694 u8 dmac[ETH_ALEN];
695 u16 vlan_id;
696};
697
698enum ib_wc_status {
699 IB_WC_SUCCESS,
700 IB_WC_LOC_LEN_ERR,
701 IB_WC_LOC_QP_OP_ERR,
702 IB_WC_LOC_EEC_OP_ERR,
703 IB_WC_LOC_PROT_ERR,
704 IB_WC_WR_FLUSH_ERR,
705 IB_WC_MW_BIND_ERR,
706 IB_WC_BAD_RESP_ERR,
707 IB_WC_LOC_ACCESS_ERR,
708 IB_WC_REM_INV_REQ_ERR,
709 IB_WC_REM_ACCESS_ERR,
710 IB_WC_REM_OP_ERR,
711 IB_WC_RETRY_EXC_ERR,
712 IB_WC_RNR_RETRY_EXC_ERR,
713 IB_WC_LOC_RDD_VIOL_ERR,
714 IB_WC_REM_INV_RD_REQ_ERR,
715 IB_WC_REM_ABORT_ERR,
716 IB_WC_INV_EECN_ERR,
717 IB_WC_INV_EEC_STATE_ERR,
718 IB_WC_FATAL_ERR,
719 IB_WC_RESP_TIMEOUT_ERR,
720 IB_WC_GENERAL_ERR
721};
722
723__attribute_const__ const char *ib_wc_status_msg(enum ib_wc_status status);
724
725enum ib_wc_opcode {
726 IB_WC_SEND,
727 IB_WC_RDMA_WRITE,
728 IB_WC_RDMA_READ,
729 IB_WC_COMP_SWAP,
730 IB_WC_FETCH_ADD,
731 IB_WC_BIND_MW,
732 IB_WC_LSO,
733 IB_WC_LOCAL_INV,
734 IB_WC_FAST_REG_MR,
735 IB_WC_MASKED_COMP_SWAP,
736 IB_WC_MASKED_FETCH_ADD,
737
738
739
740
741 IB_WC_RECV = 1 << 7,
742 IB_WC_RECV_RDMA_WITH_IMM
743};
744
745enum ib_wc_flags {
746 IB_WC_GRH = 1,
747 IB_WC_WITH_IMM = (1<<1),
748 IB_WC_WITH_INVALIDATE = (1<<2),
749 IB_WC_IP_CSUM_OK = (1<<3),
750 IB_WC_WITH_SMAC = (1<<4),
751 IB_WC_WITH_VLAN = (1<<5),
752};
753
754struct ib_wc {
755 u64 wr_id;
756 enum ib_wc_status status;
757 enum ib_wc_opcode opcode;
758 u32 vendor_err;
759 u32 byte_len;
760 struct ib_qp *qp;
761 union {
762 __be32 imm_data;
763 u32 invalidate_rkey;
764 } ex;
765 u32 src_qp;
766 int wc_flags;
767 u16 pkey_index;
768 u16 slid;
769 u8 sl;
770 u8 dlid_path_bits;
771 u8 port_num;
772 u8 smac[ETH_ALEN];
773 u16 vlan_id;
774};
775
776enum ib_cq_notify_flags {
777 IB_CQ_SOLICITED = 1 << 0,
778 IB_CQ_NEXT_COMP = 1 << 1,
779 IB_CQ_SOLICITED_MASK = IB_CQ_SOLICITED | IB_CQ_NEXT_COMP,
780 IB_CQ_REPORT_MISSED_EVENTS = 1 << 2,
781};
782
783enum ib_srq_type {
784 IB_SRQT_BASIC,
785 IB_SRQT_XRC
786};
787
788enum ib_srq_attr_mask {
789 IB_SRQ_MAX_WR = 1 << 0,
790 IB_SRQ_LIMIT = 1 << 1,
791};
792
793struct ib_srq_attr {
794 u32 max_wr;
795 u32 max_sge;
796 u32 srq_limit;
797};
798
799struct ib_srq_init_attr {
800 void (*event_handler)(struct ib_event *, void *);
801 void *srq_context;
802 struct ib_srq_attr attr;
803 enum ib_srq_type srq_type;
804
805 union {
806 struct {
807 struct ib_xrcd *xrcd;
808 struct ib_cq *cq;
809 } xrc;
810 } ext;
811};
812
813struct ib_qp_cap {
814 u32 max_send_wr;
815 u32 max_recv_wr;
816 u32 max_send_sge;
817 u32 max_recv_sge;
818 u32 max_inline_data;
819};
820
821enum ib_sig_type {
822 IB_SIGNAL_ALL_WR,
823 IB_SIGNAL_REQ_WR
824};
825
826enum ib_qp_type {
827
828
829
830
831
832 IB_QPT_SMI,
833 IB_QPT_GSI,
834
835 IB_QPT_RC,
836 IB_QPT_UC,
837 IB_QPT_UD,
838 IB_QPT_RAW_IPV6,
839 IB_QPT_RAW_ETHERTYPE,
840 IB_QPT_RAW_PACKET = 8,
841 IB_QPT_XRC_INI = 9,
842 IB_QPT_XRC_TGT,
843 IB_QPT_MAX,
844
845
846
847
848 IB_QPT_RESERVED1 = 0x1000,
849 IB_QPT_RESERVED2,
850 IB_QPT_RESERVED3,
851 IB_QPT_RESERVED4,
852 IB_QPT_RESERVED5,
853 IB_QPT_RESERVED6,
854 IB_QPT_RESERVED7,
855 IB_QPT_RESERVED8,
856 IB_QPT_RESERVED9,
857 IB_QPT_RESERVED10,
858};
859
860enum ib_qp_create_flags {
861 IB_QP_CREATE_IPOIB_UD_LSO = 1 << 0,
862 IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK = 1 << 1,
863 IB_QP_CREATE_NETIF_QP = 1 << 5,
864 IB_QP_CREATE_SIGNATURE_EN = 1 << 6,
865 IB_QP_CREATE_USE_GFP_NOIO = 1 << 7,
866
867 IB_QP_CREATE_RESERVED_START = 1 << 26,
868 IB_QP_CREATE_RESERVED_END = 1 << 31,
869};
870
871
872
873
874
875
876
877struct ib_qp_init_attr {
878 void (*event_handler)(struct ib_event *, void *);
879 void *qp_context;
880 struct ib_cq *send_cq;
881 struct ib_cq *recv_cq;
882 struct ib_srq *srq;
883 struct ib_xrcd *xrcd;
884 struct ib_qp_cap cap;
885 enum ib_sig_type sq_sig_type;
886 enum ib_qp_type qp_type;
887 enum ib_qp_create_flags create_flags;
888 u8 port_num;
889};
890
891struct ib_qp_open_attr {
892 void (*event_handler)(struct ib_event *, void *);
893 void *qp_context;
894 u32 qp_num;
895 enum ib_qp_type qp_type;
896};
897
898enum ib_rnr_timeout {
899 IB_RNR_TIMER_655_36 = 0,
900 IB_RNR_TIMER_000_01 = 1,
901 IB_RNR_TIMER_000_02 = 2,
902 IB_RNR_TIMER_000_03 = 3,
903 IB_RNR_TIMER_000_04 = 4,
904 IB_RNR_TIMER_000_06 = 5,
905 IB_RNR_TIMER_000_08 = 6,
906 IB_RNR_TIMER_000_12 = 7,
907 IB_RNR_TIMER_000_16 = 8,
908 IB_RNR_TIMER_000_24 = 9,
909 IB_RNR_TIMER_000_32 = 10,
910 IB_RNR_TIMER_000_48 = 11,
911 IB_RNR_TIMER_000_64 = 12,
912 IB_RNR_TIMER_000_96 = 13,
913 IB_RNR_TIMER_001_28 = 14,
914 IB_RNR_TIMER_001_92 = 15,
915 IB_RNR_TIMER_002_56 = 16,
916 IB_RNR_TIMER_003_84 = 17,
917 IB_RNR_TIMER_005_12 = 18,
918 IB_RNR_TIMER_007_68 = 19,
919 IB_RNR_TIMER_010_24 = 20,
920 IB_RNR_TIMER_015_36 = 21,
921 IB_RNR_TIMER_020_48 = 22,
922 IB_RNR_TIMER_030_72 = 23,
923 IB_RNR_TIMER_040_96 = 24,
924 IB_RNR_TIMER_061_44 = 25,
925 IB_RNR_TIMER_081_92 = 26,
926 IB_RNR_TIMER_122_88 = 27,
927 IB_RNR_TIMER_163_84 = 28,
928 IB_RNR_TIMER_245_76 = 29,
929 IB_RNR_TIMER_327_68 = 30,
930 IB_RNR_TIMER_491_52 = 31
931};
932
933enum ib_qp_attr_mask {
934 IB_QP_STATE = 1,
935 IB_QP_CUR_STATE = (1<<1),
936 IB_QP_EN_SQD_ASYNC_NOTIFY = (1<<2),
937 IB_QP_ACCESS_FLAGS = (1<<3),
938 IB_QP_PKEY_INDEX = (1<<4),
939 IB_QP_PORT = (1<<5),
940 IB_QP_QKEY = (1<<6),
941 IB_QP_AV = (1<<7),
942 IB_QP_PATH_MTU = (1<<8),
943 IB_QP_TIMEOUT = (1<<9),
944 IB_QP_RETRY_CNT = (1<<10),
945 IB_QP_RNR_RETRY = (1<<11),
946 IB_QP_RQ_PSN = (1<<12),
947 IB_QP_MAX_QP_RD_ATOMIC = (1<<13),
948 IB_QP_ALT_PATH = (1<<14),
949 IB_QP_MIN_RNR_TIMER = (1<<15),
950 IB_QP_SQ_PSN = (1<<16),
951 IB_QP_MAX_DEST_RD_ATOMIC = (1<<17),
952 IB_QP_PATH_MIG_STATE = (1<<18),
953 IB_QP_CAP = (1<<19),
954 IB_QP_DEST_QPN = (1<<20),
955 IB_QP_SMAC = (1<<21),
956 IB_QP_ALT_SMAC = (1<<22),
957 IB_QP_VID = (1<<23),
958 IB_QP_ALT_VID = (1<<24),
959};
960
961enum ib_qp_state {
962 IB_QPS_RESET,
963 IB_QPS_INIT,
964 IB_QPS_RTR,
965 IB_QPS_RTS,
966 IB_QPS_SQD,
967 IB_QPS_SQE,
968 IB_QPS_ERR
969};
970
971enum ib_mig_state {
972 IB_MIG_MIGRATED,
973 IB_MIG_REARM,
974 IB_MIG_ARMED
975};
976
977enum ib_mw_type {
978 IB_MW_TYPE_1 = 1,
979 IB_MW_TYPE_2 = 2
980};
981
982struct ib_qp_attr {
983 enum ib_qp_state qp_state;
984 enum ib_qp_state cur_qp_state;
985 enum ib_mtu path_mtu;
986 enum ib_mig_state path_mig_state;
987 u32 qkey;
988 u32 rq_psn;
989 u32 sq_psn;
990 u32 dest_qp_num;
991 int qp_access_flags;
992 struct ib_qp_cap cap;
993 struct ib_ah_attr ah_attr;
994 struct ib_ah_attr alt_ah_attr;
995 u16 pkey_index;
996 u16 alt_pkey_index;
997 u8 en_sqd_async_notify;
998 u8 sq_draining;
999 u8 max_rd_atomic;
1000 u8 max_dest_rd_atomic;
1001 u8 min_rnr_timer;
1002 u8 port_num;
1003 u8 timeout;
1004 u8 retry_cnt;
1005 u8 rnr_retry;
1006 u8 alt_port_num;
1007 u8 alt_timeout;
1008 u8 smac[ETH_ALEN];
1009 u8 alt_smac[ETH_ALEN];
1010 u16 vlan_id;
1011 u16 alt_vlan_id;
1012};
1013
1014enum ib_wr_opcode {
1015 IB_WR_RDMA_WRITE,
1016 IB_WR_RDMA_WRITE_WITH_IMM,
1017 IB_WR_SEND,
1018 IB_WR_SEND_WITH_IMM,
1019 IB_WR_RDMA_READ,
1020 IB_WR_ATOMIC_CMP_AND_SWP,
1021 IB_WR_ATOMIC_FETCH_AND_ADD,
1022 IB_WR_LSO,
1023 IB_WR_SEND_WITH_INV,
1024 IB_WR_RDMA_READ_WITH_INV,
1025 IB_WR_LOCAL_INV,
1026 IB_WR_FAST_REG_MR,
1027 IB_WR_MASKED_ATOMIC_CMP_AND_SWP,
1028 IB_WR_MASKED_ATOMIC_FETCH_AND_ADD,
1029 IB_WR_BIND_MW,
1030 IB_WR_REG_SIG_MR,
1031
1032
1033
1034 IB_WR_RESERVED1 = 0xf0,
1035 IB_WR_RESERVED2,
1036 IB_WR_RESERVED3,
1037 IB_WR_RESERVED4,
1038 IB_WR_RESERVED5,
1039 IB_WR_RESERVED6,
1040 IB_WR_RESERVED7,
1041 IB_WR_RESERVED8,
1042 IB_WR_RESERVED9,
1043 IB_WR_RESERVED10,
1044};
1045
1046enum ib_send_flags {
1047 IB_SEND_FENCE = 1,
1048 IB_SEND_SIGNALED = (1<<1),
1049 IB_SEND_SOLICITED = (1<<2),
1050 IB_SEND_INLINE = (1<<3),
1051 IB_SEND_IP_CSUM = (1<<4),
1052
1053
1054 IB_SEND_RESERVED_START = (1 << 26),
1055 IB_SEND_RESERVED_END = (1 << 31),
1056};
1057
1058struct ib_sge {
1059 u64 addr;
1060 u32 length;
1061 u32 lkey;
1062};
1063
1064struct ib_fast_reg_page_list {
1065 struct ib_device *device;
1066 u64 *page_list;
1067 unsigned int max_page_list_len;
1068};
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080struct ib_mw_bind_info {
1081 struct ib_mr *mr;
1082 u64 addr;
1083 u64 length;
1084 int mw_access_flags;
1085};
1086
1087struct ib_send_wr {
1088 struct ib_send_wr *next;
1089 u64 wr_id;
1090 struct ib_sge *sg_list;
1091 int num_sge;
1092 enum ib_wr_opcode opcode;
1093 int send_flags;
1094 union {
1095 __be32 imm_data;
1096 u32 invalidate_rkey;
1097 } ex;
1098 union {
1099 struct {
1100 u64 remote_addr;
1101 u32 rkey;
1102 } rdma;
1103 struct {
1104 u64 remote_addr;
1105 u64 compare_add;
1106 u64 swap;
1107 u64 compare_add_mask;
1108 u64 swap_mask;
1109 u32 rkey;
1110 } atomic;
1111 struct {
1112 struct ib_ah *ah;
1113 void *header;
1114 int hlen;
1115 int mss;
1116 u32 remote_qpn;
1117 u32 remote_qkey;
1118 u16 pkey_index;
1119 u8 port_num;
1120 } ud;
1121 struct {
1122 u64 iova_start;
1123 struct ib_fast_reg_page_list *page_list;
1124 unsigned int page_shift;
1125 unsigned int page_list_len;
1126 u32 length;
1127 int access_flags;
1128 u32 rkey;
1129 } fast_reg;
1130 struct {
1131 struct ib_mw *mw;
1132
1133 u32 rkey;
1134 struct ib_mw_bind_info bind_info;
1135 } bind_mw;
1136 struct {
1137 struct ib_sig_attrs *sig_attrs;
1138 struct ib_mr *sig_mr;
1139 int access_flags;
1140 struct ib_sge *prot;
1141 } sig_handover;
1142 } wr;
1143 u32 xrc_remote_srq_num;
1144};
1145
1146struct ib_recv_wr {
1147 struct ib_recv_wr *next;
1148 u64 wr_id;
1149 struct ib_sge *sg_list;
1150 int num_sge;
1151};
1152
1153enum ib_access_flags {
1154 IB_ACCESS_LOCAL_WRITE = 1,
1155 IB_ACCESS_REMOTE_WRITE = (1<<1),
1156 IB_ACCESS_REMOTE_READ = (1<<2),
1157 IB_ACCESS_REMOTE_ATOMIC = (1<<3),
1158 IB_ACCESS_MW_BIND = (1<<4),
1159 IB_ZERO_BASED = (1<<5),
1160 IB_ACCESS_ON_DEMAND = (1<<6),
1161};
1162
1163struct ib_phys_buf {
1164 u64 addr;
1165 u64 size;
1166};
1167
1168struct ib_mr_attr {
1169 struct ib_pd *pd;
1170 u64 device_virt_addr;
1171 u64 size;
1172 int mr_access_flags;
1173 u32 lkey;
1174 u32 rkey;
1175};
1176
1177enum ib_mr_rereg_flags {
1178 IB_MR_REREG_TRANS = 1,
1179 IB_MR_REREG_PD = (1<<1),
1180 IB_MR_REREG_ACCESS = (1<<2),
1181 IB_MR_REREG_SUPPORTED = ((IB_MR_REREG_ACCESS << 1) - 1)
1182};
1183
1184
1185
1186
1187
1188
1189
1190struct ib_mw_bind {
1191 u64 wr_id;
1192 int send_flags;
1193 struct ib_mw_bind_info bind_info;
1194};
1195
1196struct ib_fmr_attr {
1197 int max_pages;
1198 int max_maps;
1199 u8 page_shift;
1200};
1201
1202struct ib_umem;
1203
1204struct ib_ucontext {
1205 struct ib_device *device;
1206 struct list_head pd_list;
1207 struct list_head mr_list;
1208 struct list_head mw_list;
1209 struct list_head cq_list;
1210 struct list_head qp_list;
1211 struct list_head srq_list;
1212 struct list_head ah_list;
1213 struct list_head xrcd_list;
1214 struct list_head rule_list;
1215 int closing;
1216
1217 struct pid *tgid;
1218#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1219 struct rb_root umem_tree;
1220
1221
1222
1223
1224 struct rw_semaphore umem_rwsem;
1225 void (*invalidate_range)(struct ib_umem *umem,
1226 unsigned long start, unsigned long end);
1227
1228 struct mmu_notifier mn;
1229 atomic_t notifier_count;
1230
1231 struct list_head no_private_counters;
1232 int odp_mrs_count;
1233#endif
1234};
1235
1236struct ib_uobject {
1237 u64 user_handle;
1238 struct ib_ucontext *context;
1239 void *object;
1240 struct list_head list;
1241 int id;
1242 struct kref ref;
1243 struct rw_semaphore mutex;
1244 int live;
1245};
1246
1247struct ib_udata {
1248 const void __user *inbuf;
1249 void __user *outbuf;
1250 size_t inlen;
1251 size_t outlen;
1252};
1253
1254struct ib_pd {
1255 struct ib_device *device;
1256 struct ib_uobject *uobject;
1257 atomic_t usecnt;
1258};
1259
1260struct ib_xrcd {
1261 struct ib_device *device;
1262 atomic_t usecnt;
1263 struct inode *inode;
1264
1265 struct mutex tgt_qp_mutex;
1266 struct list_head tgt_qp_list;
1267};
1268
1269struct ib_ah {
1270 struct ib_device *device;
1271 struct ib_pd *pd;
1272 struct ib_uobject *uobject;
1273};
1274
1275typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context);
1276
1277struct ib_cq {
1278 struct ib_device *device;
1279 struct ib_uobject *uobject;
1280 ib_comp_handler comp_handler;
1281 void (*event_handler)(struct ib_event *, void *);
1282 void *cq_context;
1283 int cqe;
1284 atomic_t usecnt;
1285};
1286
1287struct ib_srq {
1288 struct ib_device *device;
1289 struct ib_pd *pd;
1290 struct ib_uobject *uobject;
1291 void (*event_handler)(struct ib_event *, void *);
1292 void *srq_context;
1293 enum ib_srq_type srq_type;
1294 atomic_t usecnt;
1295
1296 union {
1297 struct {
1298 struct ib_xrcd *xrcd;
1299 struct ib_cq *cq;
1300 u32 srq_num;
1301 } xrc;
1302 } ext;
1303};
1304
1305struct ib_qp {
1306 struct ib_device *device;
1307 struct ib_pd *pd;
1308 struct ib_cq *send_cq;
1309 struct ib_cq *recv_cq;
1310 struct ib_srq *srq;
1311 struct ib_xrcd *xrcd;
1312 struct list_head xrcd_list;
1313
1314 atomic_t usecnt;
1315 struct list_head open_list;
1316 struct ib_qp *real_qp;
1317 struct ib_uobject *uobject;
1318 void (*event_handler)(struct ib_event *, void *);
1319 void *qp_context;
1320 u32 qp_num;
1321 enum ib_qp_type qp_type;
1322};
1323
1324struct ib_mr {
1325 struct ib_device *device;
1326 struct ib_pd *pd;
1327 struct ib_uobject *uobject;
1328 u32 lkey;
1329 u32 rkey;
1330 atomic_t usecnt;
1331};
1332
1333struct ib_mw {
1334 struct ib_device *device;
1335 struct ib_pd *pd;
1336 struct ib_uobject *uobject;
1337 u32 rkey;
1338 enum ib_mw_type type;
1339};
1340
1341struct ib_fmr {
1342 struct ib_device *device;
1343 struct ib_pd *pd;
1344 struct list_head list;
1345 u32 lkey;
1346 u32 rkey;
1347};
1348
1349
1350enum ib_flow_attr_type {
1351
1352 IB_FLOW_ATTR_NORMAL = 0x0,
1353
1354
1355
1356 IB_FLOW_ATTR_ALL_DEFAULT = 0x1,
1357
1358
1359
1360 IB_FLOW_ATTR_MC_DEFAULT = 0x2,
1361
1362 IB_FLOW_ATTR_SNIFFER = 0x3
1363};
1364
1365
1366enum ib_flow_spec_type {
1367
1368 IB_FLOW_SPEC_ETH = 0x20,
1369 IB_FLOW_SPEC_IB = 0x22,
1370
1371 IB_FLOW_SPEC_IPV4 = 0x30,
1372
1373 IB_FLOW_SPEC_TCP = 0x40,
1374 IB_FLOW_SPEC_UDP = 0x41
1375};
1376#define IB_FLOW_SPEC_LAYER_MASK 0xF0
1377#define IB_FLOW_SPEC_SUPPORT_LAYERS 4
1378
1379
1380
1381
1382enum ib_flow_domain {
1383 IB_FLOW_DOMAIN_USER,
1384 IB_FLOW_DOMAIN_ETHTOOL,
1385 IB_FLOW_DOMAIN_RFS,
1386 IB_FLOW_DOMAIN_NIC,
1387 IB_FLOW_DOMAIN_NUM
1388};
1389
1390struct ib_flow_eth_filter {
1391 u8 dst_mac[6];
1392 u8 src_mac[6];
1393 __be16 ether_type;
1394 __be16 vlan_tag;
1395};
1396
1397struct ib_flow_spec_eth {
1398 enum ib_flow_spec_type type;
1399 u16 size;
1400 struct ib_flow_eth_filter val;
1401 struct ib_flow_eth_filter mask;
1402};
1403
1404struct ib_flow_ib_filter {
1405 __be16 dlid;
1406 __u8 sl;
1407};
1408
1409struct ib_flow_spec_ib {
1410 enum ib_flow_spec_type type;
1411 u16 size;
1412 struct ib_flow_ib_filter val;
1413 struct ib_flow_ib_filter mask;
1414};
1415
1416struct ib_flow_ipv4_filter {
1417 __be32 src_ip;
1418 __be32 dst_ip;
1419};
1420
1421struct ib_flow_spec_ipv4 {
1422 enum ib_flow_spec_type type;
1423 u16 size;
1424 struct ib_flow_ipv4_filter val;
1425 struct ib_flow_ipv4_filter mask;
1426};
1427
1428struct ib_flow_tcp_udp_filter {
1429 __be16 dst_port;
1430 __be16 src_port;
1431};
1432
1433struct ib_flow_spec_tcp_udp {
1434 enum ib_flow_spec_type type;
1435 u16 size;
1436 struct ib_flow_tcp_udp_filter val;
1437 struct ib_flow_tcp_udp_filter mask;
1438};
1439
1440union ib_flow_spec {
1441 struct {
1442 enum ib_flow_spec_type type;
1443 u16 size;
1444 };
1445 struct ib_flow_spec_eth eth;
1446 struct ib_flow_spec_ib ib;
1447 struct ib_flow_spec_ipv4 ipv4;
1448 struct ib_flow_spec_tcp_udp tcp_udp;
1449};
1450
1451struct ib_flow_attr {
1452 enum ib_flow_attr_type type;
1453 u16 size;
1454 u16 priority;
1455 u32 flags;
1456 u8 num_of_specs;
1457 u8 port;
1458
1459
1460
1461
1462};
1463
1464struct ib_flow {
1465 struct ib_qp *qp;
1466 struct ib_uobject *uobject;
1467};
1468
1469struct ib_mad_hdr;
1470struct ib_grh;
1471
1472enum ib_process_mad_flags {
1473 IB_MAD_IGNORE_MKEY = 1,
1474 IB_MAD_IGNORE_BKEY = 2,
1475 IB_MAD_IGNORE_ALL = IB_MAD_IGNORE_MKEY | IB_MAD_IGNORE_BKEY
1476};
1477
1478enum ib_mad_result {
1479 IB_MAD_RESULT_FAILURE = 0,
1480 IB_MAD_RESULT_SUCCESS = 1 << 0,
1481 IB_MAD_RESULT_REPLY = 1 << 1,
1482 IB_MAD_RESULT_CONSUMED = 1 << 2
1483};
1484
1485#define IB_DEVICE_NAME_MAX 64
1486
1487struct ib_cache {
1488 rwlock_t lock;
1489 struct ib_event_handler event_handler;
1490 struct ib_pkey_cache **pkey_cache;
1491 struct ib_gid_cache **gid_cache;
1492 u8 *lmc_cache;
1493};
1494
1495struct ib_dma_mapping_ops {
1496 int (*mapping_error)(struct ib_device *dev,
1497 u64 dma_addr);
1498 u64 (*map_single)(struct ib_device *dev,
1499 void *ptr, size_t size,
1500 enum dma_data_direction direction);
1501 void (*unmap_single)(struct ib_device *dev,
1502 u64 addr, size_t size,
1503 enum dma_data_direction direction);
1504 u64 (*map_page)(struct ib_device *dev,
1505 struct page *page, unsigned long offset,
1506 size_t size,
1507 enum dma_data_direction direction);
1508 void (*unmap_page)(struct ib_device *dev,
1509 u64 addr, size_t size,
1510 enum dma_data_direction direction);
1511 int (*map_sg)(struct ib_device *dev,
1512 struct scatterlist *sg, int nents,
1513 enum dma_data_direction direction);
1514 void (*unmap_sg)(struct ib_device *dev,
1515 struct scatterlist *sg, int nents,
1516 enum dma_data_direction direction);
1517 void (*sync_single_for_cpu)(struct ib_device *dev,
1518 u64 dma_handle,
1519 size_t size,
1520 enum dma_data_direction dir);
1521 void (*sync_single_for_device)(struct ib_device *dev,
1522 u64 dma_handle,
1523 size_t size,
1524 enum dma_data_direction dir);
1525 void *(*alloc_coherent)(struct ib_device *dev,
1526 size_t size,
1527 u64 *dma_handle,
1528 gfp_t flag);
1529 void (*free_coherent)(struct ib_device *dev,
1530 size_t size, void *cpu_addr,
1531 u64 dma_handle);
1532};
1533
1534struct iw_cm_verbs;
1535
1536struct ib_port_immutable {
1537 int pkey_tbl_len;
1538 int gid_tbl_len;
1539 u32 core_cap_flags;
1540 u32 max_mad_size;
1541};
1542
1543struct ib_device {
1544 struct device *dma_device;
1545
1546 char name[IB_DEVICE_NAME_MAX];
1547
1548 struct list_head event_handler_list;
1549 spinlock_t event_handler_lock;
1550
1551 spinlock_t client_data_lock;
1552 struct list_head core_list;
1553 struct list_head client_data_list;
1554
1555 struct ib_cache cache;
1556
1557
1558
1559 struct ib_port_immutable *port_immutable;
1560
1561 int num_comp_vectors;
1562
1563 struct iw_cm_verbs *iwcm;
1564
1565 int (*get_protocol_stats)(struct ib_device *device,
1566 union rdma_protocol_stats *stats);
1567 int (*query_device)(struct ib_device *device,
1568 struct ib_device_attr *device_attr,
1569 struct ib_udata *udata);
1570 int (*query_port)(struct ib_device *device,
1571 u8 port_num,
1572 struct ib_port_attr *port_attr);
1573 enum rdma_link_layer (*get_link_layer)(struct ib_device *device,
1574 u8 port_num);
1575 int (*query_gid)(struct ib_device *device,
1576 u8 port_num, int index,
1577 union ib_gid *gid);
1578 int (*query_pkey)(struct ib_device *device,
1579 u8 port_num, u16 index, u16 *pkey);
1580 int (*modify_device)(struct ib_device *device,
1581 int device_modify_mask,
1582 struct ib_device_modify *device_modify);
1583 int (*modify_port)(struct ib_device *device,
1584 u8 port_num, int port_modify_mask,
1585 struct ib_port_modify *port_modify);
1586 struct ib_ucontext * (*alloc_ucontext)(struct ib_device *device,
1587 struct ib_udata *udata);
1588 int (*dealloc_ucontext)(struct ib_ucontext *context);
1589 int (*mmap)(struct ib_ucontext *context,
1590 struct vm_area_struct *vma);
1591 struct ib_pd * (*alloc_pd)(struct ib_device *device,
1592 struct ib_ucontext *context,
1593 struct ib_udata *udata);
1594 int (*dealloc_pd)(struct ib_pd *pd);
1595 struct ib_ah * (*create_ah)(struct ib_pd *pd,
1596 struct ib_ah_attr *ah_attr);
1597 int (*modify_ah)(struct ib_ah *ah,
1598 struct ib_ah_attr *ah_attr);
1599 int (*query_ah)(struct ib_ah *ah,
1600 struct ib_ah_attr *ah_attr);
1601 int (*destroy_ah)(struct ib_ah *ah);
1602 struct ib_srq * (*create_srq)(struct ib_pd *pd,
1603 struct ib_srq_init_attr *srq_init_attr,
1604 struct ib_udata *udata);
1605 int (*modify_srq)(struct ib_srq *srq,
1606 struct ib_srq_attr *srq_attr,
1607 enum ib_srq_attr_mask srq_attr_mask,
1608 struct ib_udata *udata);
1609 int (*query_srq)(struct ib_srq *srq,
1610 struct ib_srq_attr *srq_attr);
1611 int (*destroy_srq)(struct ib_srq *srq);
1612 int (*post_srq_recv)(struct ib_srq *srq,
1613 struct ib_recv_wr *recv_wr,
1614 struct ib_recv_wr **bad_recv_wr);
1615 struct ib_qp * (*create_qp)(struct ib_pd *pd,
1616 struct ib_qp_init_attr *qp_init_attr,
1617 struct ib_udata *udata);
1618 int (*modify_qp)(struct ib_qp *qp,
1619 struct ib_qp_attr *qp_attr,
1620 int qp_attr_mask,
1621 struct ib_udata *udata);
1622 int (*query_qp)(struct ib_qp *qp,
1623 struct ib_qp_attr *qp_attr,
1624 int qp_attr_mask,
1625 struct ib_qp_init_attr *qp_init_attr);
1626 int (*destroy_qp)(struct ib_qp *qp);
1627 int (*post_send)(struct ib_qp *qp,
1628 struct ib_send_wr *send_wr,
1629 struct ib_send_wr **bad_send_wr);
1630 int (*post_recv)(struct ib_qp *qp,
1631 struct ib_recv_wr *recv_wr,
1632 struct ib_recv_wr **bad_recv_wr);
1633 struct ib_cq * (*create_cq)(struct ib_device *device,
1634 const struct ib_cq_init_attr *attr,
1635 struct ib_ucontext *context,
1636 struct ib_udata *udata);
1637 int (*modify_cq)(struct ib_cq *cq, u16 cq_count,
1638 u16 cq_period);
1639 int (*destroy_cq)(struct ib_cq *cq);
1640 int (*resize_cq)(struct ib_cq *cq, int cqe,
1641 struct ib_udata *udata);
1642 int (*poll_cq)(struct ib_cq *cq, int num_entries,
1643 struct ib_wc *wc);
1644 int (*peek_cq)(struct ib_cq *cq, int wc_cnt);
1645 int (*req_notify_cq)(struct ib_cq *cq,
1646 enum ib_cq_notify_flags flags);
1647 int (*req_ncomp_notif)(struct ib_cq *cq,
1648 int wc_cnt);
1649 struct ib_mr * (*get_dma_mr)(struct ib_pd *pd,
1650 int mr_access_flags);
1651 struct ib_mr * (*reg_phys_mr)(struct ib_pd *pd,
1652 struct ib_phys_buf *phys_buf_array,
1653 int num_phys_buf,
1654 int mr_access_flags,
1655 u64 *iova_start);
1656 struct ib_mr * (*reg_user_mr)(struct ib_pd *pd,
1657 u64 start, u64 length,
1658 u64 virt_addr,
1659 int mr_access_flags,
1660 struct ib_udata *udata);
1661 int (*rereg_user_mr)(struct ib_mr *mr,
1662 int flags,
1663 u64 start, u64 length,
1664 u64 virt_addr,
1665 int mr_access_flags,
1666 struct ib_pd *pd,
1667 struct ib_udata *udata);
1668 int (*query_mr)(struct ib_mr *mr,
1669 struct ib_mr_attr *mr_attr);
1670 int (*dereg_mr)(struct ib_mr *mr);
1671 int (*destroy_mr)(struct ib_mr *mr);
1672 struct ib_mr * (*create_mr)(struct ib_pd *pd,
1673 struct ib_mr_init_attr *mr_init_attr);
1674 struct ib_mr * (*alloc_fast_reg_mr)(struct ib_pd *pd,
1675 int max_page_list_len);
1676 struct ib_fast_reg_page_list * (*alloc_fast_reg_page_list)(struct ib_device *device,
1677 int page_list_len);
1678 void (*free_fast_reg_page_list)(struct ib_fast_reg_page_list *page_list);
1679 int (*rereg_phys_mr)(struct ib_mr *mr,
1680 int mr_rereg_mask,
1681 struct ib_pd *pd,
1682 struct ib_phys_buf *phys_buf_array,
1683 int num_phys_buf,
1684 int mr_access_flags,
1685 u64 *iova_start);
1686 struct ib_mw * (*alloc_mw)(struct ib_pd *pd,
1687 enum ib_mw_type type);
1688 int (*bind_mw)(struct ib_qp *qp,
1689 struct ib_mw *mw,
1690 struct ib_mw_bind *mw_bind);
1691 int (*dealloc_mw)(struct ib_mw *mw);
1692 struct ib_fmr * (*alloc_fmr)(struct ib_pd *pd,
1693 int mr_access_flags,
1694 struct ib_fmr_attr *fmr_attr);
1695 int (*map_phys_fmr)(struct ib_fmr *fmr,
1696 u64 *page_list, int list_len,
1697 u64 iova);
1698 int (*unmap_fmr)(struct list_head *fmr_list);
1699 int (*dealloc_fmr)(struct ib_fmr *fmr);
1700 int (*attach_mcast)(struct ib_qp *qp,
1701 union ib_gid *gid,
1702 u16 lid);
1703 int (*detach_mcast)(struct ib_qp *qp,
1704 union ib_gid *gid,
1705 u16 lid);
1706 int (*process_mad)(struct ib_device *device,
1707 int process_mad_flags,
1708 u8 port_num,
1709 const struct ib_wc *in_wc,
1710 const struct ib_grh *in_grh,
1711 const struct ib_mad_hdr *in_mad,
1712 size_t in_mad_size,
1713 struct ib_mad_hdr *out_mad,
1714 size_t *out_mad_size,
1715 u16 *out_mad_pkey_index);
1716 struct ib_xrcd * (*alloc_xrcd)(struct ib_device *device,
1717 struct ib_ucontext *ucontext,
1718 struct ib_udata *udata);
1719 int (*dealloc_xrcd)(struct ib_xrcd *xrcd);
1720 struct ib_flow * (*create_flow)(struct ib_qp *qp,
1721 struct ib_flow_attr
1722 *flow_attr,
1723 int domain);
1724 int (*destroy_flow)(struct ib_flow *flow_id);
1725 int (*check_mr_status)(struct ib_mr *mr, u32 check_mask,
1726 struct ib_mr_status *mr_status);
1727
1728 struct ib_dma_mapping_ops *dma_ops;
1729
1730 struct module *owner;
1731 struct device dev;
1732 struct kobject *ports_parent;
1733 struct list_head port_list;
1734
1735 enum {
1736 IB_DEV_UNINITIALIZED,
1737 IB_DEV_REGISTERED,
1738 IB_DEV_UNREGISTERED
1739 } reg_state;
1740
1741 int uverbs_abi_ver;
1742 u64 uverbs_cmd_mask;
1743 u64 uverbs_ex_cmd_mask;
1744
1745 char node_desc[64];
1746 __be64 node_guid;
1747 u32 local_dma_lkey;
1748 u16 is_switch:1;
1749 u8 node_type;
1750 u8 phys_port_cnt;
1751
1752
1753
1754
1755
1756
1757
1758 int (*get_port_immutable)(struct ib_device *, u8, struct ib_port_immutable *);
1759};
1760
1761struct ib_client {
1762 char *name;
1763 void (*add) (struct ib_device *);
1764 void (*remove)(struct ib_device *);
1765
1766 struct list_head list;
1767};
1768
1769struct ib_device *ib_alloc_device(size_t size);
1770void ib_dealloc_device(struct ib_device *device);
1771
1772int ib_register_device(struct ib_device *device,
1773 int (*port_callback)(struct ib_device *,
1774 u8, struct kobject *));
1775void ib_unregister_device(struct ib_device *device);
1776
1777int ib_register_client (struct ib_client *client);
1778void ib_unregister_client(struct ib_client *client);
1779
1780void *ib_get_client_data(struct ib_device *device, struct ib_client *client);
1781void ib_set_client_data(struct ib_device *device, struct ib_client *client,
1782 void *data);
1783
1784static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len)
1785{
1786 return copy_from_user(dest, udata->inbuf, len) ? -EFAULT : 0;
1787}
1788
1789static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len)
1790{
1791 return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0;
1792}
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
1811 enum ib_qp_type type, enum ib_qp_attr_mask mask,
1812 enum rdma_link_layer ll);
1813
1814int ib_register_event_handler (struct ib_event_handler *event_handler);
1815int ib_unregister_event_handler(struct ib_event_handler *event_handler);
1816void ib_dispatch_event(struct ib_event *event);
1817
1818int ib_query_device(struct ib_device *device,
1819 struct ib_device_attr *device_attr);
1820
1821int ib_query_port(struct ib_device *device,
1822 u8 port_num, struct ib_port_attr *port_attr);
1823
1824enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device,
1825 u8 port_num);
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836static inline bool rdma_cap_ib_switch(const struct ib_device *device)
1837{
1838 return device->is_switch;
1839}
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849static inline u8 rdma_start_port(const struct ib_device *device)
1850{
1851 return rdma_cap_ib_switch(device) ? 0 : 1;
1852}
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862static inline u8 rdma_end_port(const struct ib_device *device)
1863{
1864 return rdma_cap_ib_switch(device) ? 0 : device->phys_port_cnt;
1865}
1866
1867static inline bool rdma_protocol_ib(const struct ib_device *device, u8 port_num)
1868{
1869 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IB;
1870}
1871
1872static inline bool rdma_protocol_roce(const struct ib_device *device, u8 port_num)
1873{
1874 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_ROCE;
1875}
1876
1877static inline bool rdma_protocol_iwarp(const struct ib_device *device, u8 port_num)
1878{
1879 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IWARP;
1880}
1881
1882static inline bool rdma_ib_or_roce(const struct ib_device *device, u8 port_num)
1883{
1884 return device->port_immutable[port_num].core_cap_flags &
1885 (RDMA_CORE_CAP_PROT_IB | RDMA_CORE_CAP_PROT_ROCE);
1886}
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900static inline bool rdma_cap_ib_mad(const struct ib_device *device, u8 port_num)
1901{
1902 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_MAD;
1903}
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924static inline bool rdma_cap_opa_mad(struct ib_device *device, u8 port_num)
1925{
1926 return (device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_OPA_MAD)
1927 == RDMA_CORE_CAP_OPA_MAD;
1928}
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950static inline bool rdma_cap_ib_smi(const struct ib_device *device, u8 port_num)
1951{
1952 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_SMI;
1953}
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970static inline bool rdma_cap_ib_cm(const struct ib_device *device, u8 port_num)
1971{
1972 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_CM;
1973}
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987static inline bool rdma_cap_iw_cm(const struct ib_device *device, u8 port_num)
1988{
1989 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IW_CM;
1990}
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007static inline bool rdma_cap_ib_sa(const struct ib_device *device, u8 port_num)
2008{
2009 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_SA;
2010}
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029static inline bool rdma_cap_ib_mcast(const struct ib_device *device, u8 port_num)
2030{
2031 return rdma_cap_ib_sa(device, port_num);
2032}
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047static inline bool rdma_cap_af_ib(const struct ib_device *device, u8 port_num)
2048{
2049 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_AF_IB;
2050}
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068static inline bool rdma_cap_eth_ah(const struct ib_device *device, u8 port_num)
2069{
2070 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_ETH_AH;
2071}
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095static inline bool rdma_cap_read_multi_sge(struct ib_device *device,
2096 u8 port_num)
2097{
2098 return !(device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IWARP);
2099}
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113static inline size_t rdma_max_mad_size(const struct ib_device *device, u8 port_num)
2114{
2115 return device->port_immutable[port_num].max_mad_size;
2116}
2117
2118int ib_query_gid(struct ib_device *device,
2119 u8 port_num, int index, union ib_gid *gid);
2120
2121int ib_query_pkey(struct ib_device *device,
2122 u8 port_num, u16 index, u16 *pkey);
2123
2124int ib_modify_device(struct ib_device *device,
2125 int device_modify_mask,
2126 struct ib_device_modify *device_modify);
2127
2128int ib_modify_port(struct ib_device *device,
2129 u8 port_num, int port_modify_mask,
2130 struct ib_port_modify *port_modify);
2131
2132int ib_find_gid(struct ib_device *device, union ib_gid *gid,
2133 u8 *port_num, u16 *index);
2134
2135int ib_find_pkey(struct ib_device *device,
2136 u8 port_num, u16 pkey, u16 *index);
2137
2138
2139
2140
2141
2142
2143
2144
2145struct ib_pd *ib_alloc_pd(struct ib_device *device);
2146
2147
2148
2149
2150
2151int ib_dealloc_pd(struct ib_pd *pd);
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr);
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174int ib_init_ah_from_wc(struct ib_device *device, u8 port_num,
2175 const struct ib_wc *wc, const struct ib_grh *grh,
2176 struct ib_ah_attr *ah_attr);
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc,
2191 const struct ib_grh *grh, u8 port_num);
2192
2193
2194
2195
2196
2197
2198
2199
2200int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
2201
2202
2203
2204
2205
2206
2207
2208
2209int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
2210
2211
2212
2213
2214
2215int ib_destroy_ah(struct ib_ah *ah);
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230struct ib_srq *ib_create_srq(struct ib_pd *pd,
2231 struct ib_srq_init_attr *srq_init_attr);
2232
2233
2234
2235
2236
2237
2238
2239
2240
2241
2242
2243
2244
2245int ib_modify_srq(struct ib_srq *srq,
2246 struct ib_srq_attr *srq_attr,
2247 enum ib_srq_attr_mask srq_attr_mask);
2248
2249
2250
2251
2252
2253
2254
2255int ib_query_srq(struct ib_srq *srq,
2256 struct ib_srq_attr *srq_attr);
2257
2258
2259
2260
2261
2262int ib_destroy_srq(struct ib_srq *srq);
2263
2264
2265
2266
2267
2268
2269
2270
2271static inline int ib_post_srq_recv(struct ib_srq *srq,
2272 struct ib_recv_wr *recv_wr,
2273 struct ib_recv_wr **bad_recv_wr)
2274{
2275 return srq->device->post_srq_recv(srq, recv_wr, bad_recv_wr);
2276}
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286struct ib_qp *ib_create_qp(struct ib_pd *pd,
2287 struct ib_qp_init_attr *qp_init_attr);
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298int ib_modify_qp(struct ib_qp *qp,
2299 struct ib_qp_attr *qp_attr,
2300 int qp_attr_mask);
2301
2302
2303
2304
2305
2306
2307
2308
2309
2310
2311
2312
2313int ib_query_qp(struct ib_qp *qp,
2314 struct ib_qp_attr *qp_attr,
2315 int qp_attr_mask,
2316 struct ib_qp_init_attr *qp_init_attr);
2317
2318
2319
2320
2321
2322int ib_destroy_qp(struct ib_qp *qp);
2323
2324
2325
2326
2327
2328
2329
2330
2331struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd,
2332 struct ib_qp_open_attr *qp_open_attr);
2333
2334
2335
2336
2337
2338
2339
2340
2341int ib_close_qp(struct ib_qp *qp);
2342
2343
2344
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356static inline int ib_post_send(struct ib_qp *qp,
2357 struct ib_send_wr *send_wr,
2358 struct ib_send_wr **bad_send_wr)
2359{
2360 return qp->device->post_send(qp, send_wr, bad_send_wr);
2361}
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371static inline int ib_post_recv(struct ib_qp *qp,
2372 struct ib_recv_wr *recv_wr,
2373 struct ib_recv_wr **bad_recv_wr)
2374{
2375 return qp->device->post_recv(qp, recv_wr, bad_recv_wr);
2376}
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390
2391struct ib_cq *ib_create_cq(struct ib_device *device,
2392 ib_comp_handler comp_handler,
2393 void (*event_handler)(struct ib_event *, void *),
2394 void *cq_context,
2395 const struct ib_cq_init_attr *cq_attr);
2396
2397
2398
2399
2400
2401
2402
2403
2404int ib_resize_cq(struct ib_cq *cq, int cqe);
2405
2406
2407
2408
2409
2410
2411
2412
2413int ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
2414
2415
2416
2417
2418
2419int ib_destroy_cq(struct ib_cq *cq);
2420
2421
2422
2423
2424
2425
2426
2427
2428
2429
2430
2431
2432
2433static inline int ib_poll_cq(struct ib_cq *cq, int num_entries,
2434 struct ib_wc *wc)
2435{
2436 return cq->device->poll_cq(cq, num_entries, wc);
2437}
2438
2439
2440
2441
2442
2443
2444
2445
2446
2447
2448
2449int ib_peek_cq(struct ib_cq *cq, int wc_cnt);
2450
2451
2452
2453
2454
2455
2456
2457
2458
2459
2460
2461
2462
2463
2464
2465
2466
2467
2468
2469
2470
2471
2472
2473
2474
2475
2476
2477
2478static inline int ib_req_notify_cq(struct ib_cq *cq,
2479 enum ib_cq_notify_flags flags)
2480{
2481 return cq->device->req_notify_cq(cq, flags);
2482}
2483
2484
2485
2486
2487
2488
2489
2490
2491static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt)
2492{
2493 return cq->device->req_ncomp_notif ?
2494 cq->device->req_ncomp_notif(cq, wc_cnt) :
2495 -ENOSYS;
2496}
2497
2498
2499
2500
2501
2502
2503
2504
2505
2506
2507
2508struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags);
2509
2510
2511
2512
2513
2514
2515static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
2516{
2517 if (dev->dma_ops)
2518 return dev->dma_ops->mapping_error(dev, dma_addr);
2519 return dma_mapping_error(dev->dma_device, dma_addr);
2520}
2521
2522
2523
2524
2525
2526
2527
2528
2529static inline u64 ib_dma_map_single(struct ib_device *dev,
2530 void *cpu_addr, size_t size,
2531 enum dma_data_direction direction)
2532{
2533 if (dev->dma_ops)
2534 return dev->dma_ops->map_single(dev, cpu_addr, size, direction);
2535 return dma_map_single(dev->dma_device, cpu_addr, size, direction);
2536}
2537
2538
2539
2540
2541
2542
2543
2544
2545static inline void ib_dma_unmap_single(struct ib_device *dev,
2546 u64 addr, size_t size,
2547 enum dma_data_direction direction)
2548{
2549 if (dev->dma_ops)
2550 dev->dma_ops->unmap_single(dev, addr, size, direction);
2551 else
2552 dma_unmap_single(dev->dma_device, addr, size, direction);
2553}
2554
2555static inline u64 ib_dma_map_single_attrs(struct ib_device *dev,
2556 void *cpu_addr, size_t size,
2557 enum dma_data_direction direction,
2558 struct dma_attrs *attrs)
2559{
2560 return dma_map_single_attrs(dev->dma_device, cpu_addr, size,
2561 direction, attrs);
2562}
2563
2564static inline void ib_dma_unmap_single_attrs(struct ib_device *dev,
2565 u64 addr, size_t size,
2566 enum dma_data_direction direction,
2567 struct dma_attrs *attrs)
2568{
2569 return dma_unmap_single_attrs(dev->dma_device, addr, size,
2570 direction, attrs);
2571}
2572
2573
2574
2575
2576
2577
2578
2579
2580
2581static inline u64 ib_dma_map_page(struct ib_device *dev,
2582 struct page *page,
2583 unsigned long offset,
2584 size_t size,
2585 enum dma_data_direction direction)
2586{
2587 if (dev->dma_ops)
2588 return dev->dma_ops->map_page(dev, page, offset, size, direction);
2589 return dma_map_page(dev->dma_device, page, offset, size, direction);
2590}
2591
2592
2593
2594
2595
2596
2597
2598
2599static inline void ib_dma_unmap_page(struct ib_device *dev,
2600 u64 addr, size_t size,
2601 enum dma_data_direction direction)
2602{
2603 if (dev->dma_ops)
2604 dev->dma_ops->unmap_page(dev, addr, size, direction);
2605 else
2606 dma_unmap_page(dev->dma_device, addr, size, direction);
2607}
2608
2609
2610
2611
2612
2613
2614
2615
2616static inline int ib_dma_map_sg(struct ib_device *dev,
2617 struct scatterlist *sg, int nents,
2618 enum dma_data_direction direction)
2619{
2620 if (dev->dma_ops)
2621 return dev->dma_ops->map_sg(dev, sg, nents, direction);
2622 return dma_map_sg(dev->dma_device, sg, nents, direction);
2623}
2624
2625
2626
2627
2628
2629
2630
2631
2632static inline void ib_dma_unmap_sg(struct ib_device *dev,
2633 struct scatterlist *sg, int nents,
2634 enum dma_data_direction direction)
2635{
2636 if (dev->dma_ops)
2637 dev->dma_ops->unmap_sg(dev, sg, nents, direction);
2638 else
2639 dma_unmap_sg(dev->dma_device, sg, nents, direction);
2640}
2641
2642static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
2643 struct scatterlist *sg, int nents,
2644 enum dma_data_direction direction,
2645 struct dma_attrs *attrs)
2646{
2647 return dma_map_sg_attrs(dev->dma_device, sg, nents, direction, attrs);
2648}
2649
2650static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
2651 struct scatterlist *sg, int nents,
2652 enum dma_data_direction direction,
2653 struct dma_attrs *attrs)
2654{
2655 dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, attrs);
2656}
2657
2658
2659
2660
2661
2662
2663
2664
2665static inline u64 ib_sg_dma_address(struct ib_device *dev,
2666 struct scatterlist *sg)
2667{
2668 return sg_dma_address(sg);
2669}
2670
2671
2672
2673
2674
2675
2676
2677
2678
2679static inline unsigned int ib_sg_dma_len(struct ib_device *dev,
2680 struct scatterlist *sg)
2681{
2682 return sg_dma_len(sg);
2683}
2684
2685
2686
2687
2688
2689
2690
2691
2692static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev,
2693 u64 addr,
2694 size_t size,
2695 enum dma_data_direction dir)
2696{
2697 if (dev->dma_ops)
2698 dev->dma_ops->sync_single_for_cpu(dev, addr, size, dir);
2699 else
2700 dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
2701}
2702
2703
2704
2705
2706
2707
2708
2709
2710static inline void ib_dma_sync_single_for_device(struct ib_device *dev,
2711 u64 addr,
2712 size_t size,
2713 enum dma_data_direction dir)
2714{
2715 if (dev->dma_ops)
2716 dev->dma_ops->sync_single_for_device(dev, addr, size, dir);
2717 else
2718 dma_sync_single_for_device(dev->dma_device, addr, size, dir);
2719}
2720
2721
2722
2723
2724
2725
2726
2727
2728static inline void *ib_dma_alloc_coherent(struct ib_device *dev,
2729 size_t size,
2730 u64 *dma_handle,
2731 gfp_t flag)
2732{
2733 if (dev->dma_ops)
2734 return dev->dma_ops->alloc_coherent(dev, size, dma_handle, flag);
2735 else {
2736 dma_addr_t handle;
2737 void *ret;
2738
2739 ret = dma_alloc_coherent(dev->dma_device, size, &handle, flag);
2740 *dma_handle = handle;
2741 return ret;
2742 }
2743}
2744
2745
2746
2747
2748
2749
2750
2751
2752static inline void ib_dma_free_coherent(struct ib_device *dev,
2753 size_t size, void *cpu_addr,
2754 u64 dma_handle)
2755{
2756 if (dev->dma_ops)
2757 dev->dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
2758 else
2759 dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle);
2760}
2761
2762
2763
2764
2765
2766
2767
2768
2769
2770
2771
2772struct ib_mr *ib_reg_phys_mr(struct ib_pd *pd,
2773 struct ib_phys_buf *phys_buf_array,
2774 int num_phys_buf,
2775 int mr_access_flags,
2776 u64 *iova_start);
2777
2778
2779
2780
2781
2782
2783
2784
2785
2786
2787
2788
2789
2790
2791
2792
2793
2794
2795
2796
2797
2798
2799
2800int ib_rereg_phys_mr(struct ib_mr *mr,
2801 int mr_rereg_mask,
2802 struct ib_pd *pd,
2803 struct ib_phys_buf *phys_buf_array,
2804 int num_phys_buf,
2805 int mr_access_flags,
2806 u64 *iova_start);
2807
2808
2809
2810
2811
2812
2813int ib_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr);
2814
2815
2816
2817
2818
2819
2820
2821
2822int ib_dereg_mr(struct ib_mr *mr);
2823
2824
2825
2826
2827
2828
2829
2830
2831struct ib_mr *ib_create_mr(struct ib_pd *pd,
2832 struct ib_mr_init_attr *mr_init_attr);
2833
2834
2835
2836
2837
2838
2839
2840
2841int ib_destroy_mr(struct ib_mr *mr);
2842
2843
2844
2845
2846
2847
2848
2849
2850struct ib_mr *ib_alloc_fast_reg_mr(struct ib_pd *pd, int max_page_list_len);
2851
2852
2853
2854
2855
2856
2857
2858
2859
2860
2861
2862
2863
2864
2865
2866
2867
2868
2869struct ib_fast_reg_page_list *ib_alloc_fast_reg_page_list(
2870 struct ib_device *device, int page_list_len);
2871
2872
2873
2874
2875
2876
2877void ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list);
2878
2879
2880
2881
2882
2883
2884
2885static inline void ib_update_fast_reg_key(struct ib_mr *mr, u8 newkey)
2886{
2887 mr->lkey = (mr->lkey & 0xffffff00) | newkey;
2888 mr->rkey = (mr->rkey & 0xffffff00) | newkey;
2889}
2890
2891
2892
2893
2894
2895
2896static inline u32 ib_inc_rkey(u32 rkey)
2897{
2898 const u32 mask = 0x000000ff;
2899 return ((rkey + 1) & mask) | (rkey & ~mask);
2900}
2901
2902
2903
2904
2905
2906
2907struct ib_mw *ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type);
2908
2909
2910
2911
2912
2913
2914
2915
2916
2917
2918
2919
2920
2921
2922static inline int ib_bind_mw(struct ib_qp *qp,
2923 struct ib_mw *mw,
2924 struct ib_mw_bind *mw_bind)
2925{
2926
2927 return mw->device->bind_mw ?
2928 mw->device->bind_mw(qp, mw, mw_bind) :
2929 -ENOSYS;
2930}
2931
2932
2933
2934
2935
2936int ib_dealloc_mw(struct ib_mw *mw);
2937
2938
2939
2940
2941
2942
2943
2944
2945
2946
2947struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
2948 int mr_access_flags,
2949 struct ib_fmr_attr *fmr_attr);
2950
2951
2952
2953
2954
2955
2956
2957
2958static inline int ib_map_phys_fmr(struct ib_fmr *fmr,
2959 u64 *page_list, int list_len,
2960 u64 iova)
2961{
2962 return fmr->device->map_phys_fmr(fmr, page_list, list_len, iova);
2963}
2964
2965
2966
2967
2968
2969int ib_unmap_fmr(struct list_head *fmr_list);
2970
2971
2972
2973
2974
2975int ib_dealloc_fmr(struct ib_fmr *fmr);
2976
2977
2978
2979
2980
2981
2982
2983
2984
2985
2986
2987
2988
2989int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
2990
2991
2992
2993
2994
2995
2996
2997int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
2998
2999
3000
3001
3002
3003struct ib_xrcd *ib_alloc_xrcd(struct ib_device *device);
3004
3005
3006
3007
3008
3009int ib_dealloc_xrcd(struct ib_xrcd *xrcd);
3010
3011struct ib_flow *ib_create_flow(struct ib_qp *qp,
3012 struct ib_flow_attr *flow_attr, int domain);
3013int ib_destroy_flow(struct ib_flow *flow_id);
3014
3015static inline int ib_check_mr_access(int flags)
3016{
3017
3018
3019
3020
3021 if (flags & (IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_REMOTE_WRITE) &&
3022 !(flags & IB_ACCESS_LOCAL_WRITE))
3023 return -EINVAL;
3024
3025 return 0;
3026}
3027
3028
3029
3030
3031
3032
3033
3034
3035
3036
3037
3038
3039
3040int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
3041 struct ib_mr_status *mr_status);
3042
3043#endif
3044