1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39#if !defined(IB_VERBS_H)
40#define IB_VERBS_H
41
42#include <linux/types.h>
43#include <linux/device.h>
44#include <linux/mm.h>
45#include <linux/dma-mapping.h>
46#include <linux/kref.h>
47#include <linux/list.h>
48#include <linux/rwsem.h>
49#include <linux/scatterlist.h>
50#include <linux/workqueue.h>
51#include <linux/socket.h>
52#include <uapi/linux/if_ether.h>
53
54#include <linux/atomic.h>
55#include <linux/mmu_notifier.h>
56#include <asm/uaccess.h>
57
58extern struct workqueue_struct *ib_wq;
59
60union ib_gid {
61 u8 raw[16];
62 struct {
63 __be64 subnet_prefix;
64 __be64 interface_id;
65 } global;
66};
67
68extern union ib_gid zgid;
69
70struct ib_gid_attr {
71 struct net_device *ndev;
72};
73
74enum rdma_node_type {
75
76 RDMA_NODE_IB_CA = 1,
77 RDMA_NODE_IB_SWITCH,
78 RDMA_NODE_IB_ROUTER,
79 RDMA_NODE_RNIC,
80 RDMA_NODE_USNIC,
81 RDMA_NODE_USNIC_UDP,
82};
83
84enum rdma_transport_type {
85 RDMA_TRANSPORT_IB,
86 RDMA_TRANSPORT_IWARP,
87 RDMA_TRANSPORT_USNIC,
88 RDMA_TRANSPORT_USNIC_UDP
89};
90
91enum rdma_protocol_type {
92 RDMA_PROTOCOL_IB,
93 RDMA_PROTOCOL_IBOE,
94 RDMA_PROTOCOL_IWARP,
95 RDMA_PROTOCOL_USNIC_UDP
96};
97
98__attribute_const__ enum rdma_transport_type
99rdma_node_get_transport(enum rdma_node_type node_type);
100
101enum rdma_link_layer {
102 IB_LINK_LAYER_UNSPECIFIED,
103 IB_LINK_LAYER_INFINIBAND,
104 IB_LINK_LAYER_ETHERNET,
105};
106
107enum ib_device_cap_flags {
108 IB_DEVICE_RESIZE_MAX_WR = 1,
109 IB_DEVICE_BAD_PKEY_CNTR = (1<<1),
110 IB_DEVICE_BAD_QKEY_CNTR = (1<<2),
111 IB_DEVICE_RAW_MULTI = (1<<3),
112 IB_DEVICE_AUTO_PATH_MIG = (1<<4),
113 IB_DEVICE_CHANGE_PHY_PORT = (1<<5),
114 IB_DEVICE_UD_AV_PORT_ENFORCE = (1<<6),
115 IB_DEVICE_CURR_QP_STATE_MOD = (1<<7),
116 IB_DEVICE_SHUTDOWN_PORT = (1<<8),
117 IB_DEVICE_INIT_TYPE = (1<<9),
118 IB_DEVICE_PORT_ACTIVE_EVENT = (1<<10),
119 IB_DEVICE_SYS_IMAGE_GUID = (1<<11),
120 IB_DEVICE_RC_RNR_NAK_GEN = (1<<12),
121 IB_DEVICE_SRQ_RESIZE = (1<<13),
122 IB_DEVICE_N_NOTIFY_CQ = (1<<14),
123 IB_DEVICE_LOCAL_DMA_LKEY = (1<<15),
124 IB_DEVICE_RESERVED = (1<<16),
125 IB_DEVICE_MEM_WINDOW = (1<<17),
126
127
128
129
130
131
132
133 IB_DEVICE_UD_IP_CSUM = (1<<18),
134 IB_DEVICE_UD_TSO = (1<<19),
135 IB_DEVICE_XRC = (1<<20),
136 IB_DEVICE_MEM_MGT_EXTENSIONS = (1<<21),
137 IB_DEVICE_BLOCK_MULTICAST_LOOPBACK = (1<<22),
138 IB_DEVICE_MEM_WINDOW_TYPE_2A = (1<<23),
139 IB_DEVICE_MEM_WINDOW_TYPE_2B = (1<<24),
140 IB_DEVICE_MANAGED_FLOW_STEERING = (1<<29),
141 IB_DEVICE_SIGNATURE_HANDOVER = (1<<30),
142 IB_DEVICE_ON_DEMAND_PAGING = (1<<31),
143};
144
145enum ib_signature_prot_cap {
146 IB_PROT_T10DIF_TYPE_1 = 1,
147 IB_PROT_T10DIF_TYPE_2 = 1 << 1,
148 IB_PROT_T10DIF_TYPE_3 = 1 << 2,
149};
150
151enum ib_signature_guard_cap {
152 IB_GUARD_T10DIF_CRC = 1,
153 IB_GUARD_T10DIF_CSUM = 1 << 1,
154};
155
156enum ib_atomic_cap {
157 IB_ATOMIC_NONE,
158 IB_ATOMIC_HCA,
159 IB_ATOMIC_GLOB
160};
161
162enum ib_odp_general_cap_bits {
163 IB_ODP_SUPPORT = 1 << 0,
164};
165
166enum ib_odp_transport_cap_bits {
167 IB_ODP_SUPPORT_SEND = 1 << 0,
168 IB_ODP_SUPPORT_RECV = 1 << 1,
169 IB_ODP_SUPPORT_WRITE = 1 << 2,
170 IB_ODP_SUPPORT_READ = 1 << 3,
171 IB_ODP_SUPPORT_ATOMIC = 1 << 4,
172};
173
174struct ib_odp_caps {
175 uint64_t general_caps;
176 struct {
177 uint32_t rc_odp_caps;
178 uint32_t uc_odp_caps;
179 uint32_t ud_odp_caps;
180 } per_transport_caps;
181};
182
183enum ib_cq_creation_flags {
184 IB_CQ_FLAGS_TIMESTAMP_COMPLETION = 1 << 0,
185};
186
187struct ib_cq_init_attr {
188 unsigned int cqe;
189 int comp_vector;
190 u32 flags;
191};
192
193struct ib_device_attr {
194 u64 fw_ver;
195 __be64 sys_image_guid;
196 u64 max_mr_size;
197 u64 page_size_cap;
198 u32 vendor_id;
199 u32 vendor_part_id;
200 u32 hw_ver;
201 int max_qp;
202 int max_qp_wr;
203 int device_cap_flags;
204 int max_sge;
205 int max_sge_rd;
206 int max_cq;
207 int max_cqe;
208 int max_mr;
209 int max_pd;
210 int max_qp_rd_atom;
211 int max_ee_rd_atom;
212 int max_res_rd_atom;
213 int max_qp_init_rd_atom;
214 int max_ee_init_rd_atom;
215 enum ib_atomic_cap atomic_cap;
216 enum ib_atomic_cap masked_atomic_cap;
217 int max_ee;
218 int max_rdd;
219 int max_mw;
220 int max_raw_ipv6_qp;
221 int max_raw_ethy_qp;
222 int max_mcast_grp;
223 int max_mcast_qp_attach;
224 int max_total_mcast_qp_attach;
225 int max_ah;
226 int max_fmr;
227 int max_map_per_fmr;
228 int max_srq;
229 int max_srq_wr;
230 int max_srq_sge;
231 unsigned int max_fast_reg_page_list_len;
232 u16 max_pkeys;
233 u8 local_ca_ack_delay;
234 int sig_prot_cap;
235 int sig_guard_cap;
236 struct ib_odp_caps odp_caps;
237 uint64_t timestamp_mask;
238 uint64_t hca_core_clock;
239};
240
241enum ib_mtu {
242 IB_MTU_256 = 1,
243 IB_MTU_512 = 2,
244 IB_MTU_1024 = 3,
245 IB_MTU_2048 = 4,
246 IB_MTU_4096 = 5
247};
248
249static inline int ib_mtu_enum_to_int(enum ib_mtu mtu)
250{
251 switch (mtu) {
252 case IB_MTU_256: return 256;
253 case IB_MTU_512: return 512;
254 case IB_MTU_1024: return 1024;
255 case IB_MTU_2048: return 2048;
256 case IB_MTU_4096: return 4096;
257 default: return -1;
258 }
259}
260
261enum ib_port_state {
262 IB_PORT_NOP = 0,
263 IB_PORT_DOWN = 1,
264 IB_PORT_INIT = 2,
265 IB_PORT_ARMED = 3,
266 IB_PORT_ACTIVE = 4,
267 IB_PORT_ACTIVE_DEFER = 5
268};
269
270enum ib_port_cap_flags {
271 IB_PORT_SM = 1 << 1,
272 IB_PORT_NOTICE_SUP = 1 << 2,
273 IB_PORT_TRAP_SUP = 1 << 3,
274 IB_PORT_OPT_IPD_SUP = 1 << 4,
275 IB_PORT_AUTO_MIGR_SUP = 1 << 5,
276 IB_PORT_SL_MAP_SUP = 1 << 6,
277 IB_PORT_MKEY_NVRAM = 1 << 7,
278 IB_PORT_PKEY_NVRAM = 1 << 8,
279 IB_PORT_LED_INFO_SUP = 1 << 9,
280 IB_PORT_SM_DISABLED = 1 << 10,
281 IB_PORT_SYS_IMAGE_GUID_SUP = 1 << 11,
282 IB_PORT_PKEY_SW_EXT_PORT_TRAP_SUP = 1 << 12,
283 IB_PORT_EXTENDED_SPEEDS_SUP = 1 << 14,
284 IB_PORT_CM_SUP = 1 << 16,
285 IB_PORT_SNMP_TUNNEL_SUP = 1 << 17,
286 IB_PORT_REINIT_SUP = 1 << 18,
287 IB_PORT_DEVICE_MGMT_SUP = 1 << 19,
288 IB_PORT_VENDOR_CLASS_SUP = 1 << 20,
289 IB_PORT_DR_NOTICE_SUP = 1 << 21,
290 IB_PORT_CAP_MASK_NOTICE_SUP = 1 << 22,
291 IB_PORT_BOOT_MGMT_SUP = 1 << 23,
292 IB_PORT_LINK_LATENCY_SUP = 1 << 24,
293 IB_PORT_CLIENT_REG_SUP = 1 << 25,
294 IB_PORT_IP_BASED_GIDS = 1 << 26,
295};
296
297enum ib_port_width {
298 IB_WIDTH_1X = 1,
299 IB_WIDTH_4X = 2,
300 IB_WIDTH_8X = 4,
301 IB_WIDTH_12X = 8
302};
303
304static inline int ib_width_enum_to_int(enum ib_port_width width)
305{
306 switch (width) {
307 case IB_WIDTH_1X: return 1;
308 case IB_WIDTH_4X: return 4;
309 case IB_WIDTH_8X: return 8;
310 case IB_WIDTH_12X: return 12;
311 default: return -1;
312 }
313}
314
315enum ib_port_speed {
316 IB_SPEED_SDR = 1,
317 IB_SPEED_DDR = 2,
318 IB_SPEED_QDR = 4,
319 IB_SPEED_FDR10 = 8,
320 IB_SPEED_FDR = 16,
321 IB_SPEED_EDR = 32
322};
323
324struct ib_protocol_stats {
325
326};
327
328struct iw_protocol_stats {
329 u64 ipInReceives;
330 u64 ipInHdrErrors;
331 u64 ipInTooBigErrors;
332 u64 ipInNoRoutes;
333 u64 ipInAddrErrors;
334 u64 ipInUnknownProtos;
335 u64 ipInTruncatedPkts;
336 u64 ipInDiscards;
337 u64 ipInDelivers;
338 u64 ipOutForwDatagrams;
339 u64 ipOutRequests;
340 u64 ipOutDiscards;
341 u64 ipOutNoRoutes;
342 u64 ipReasmTimeout;
343 u64 ipReasmReqds;
344 u64 ipReasmOKs;
345 u64 ipReasmFails;
346 u64 ipFragOKs;
347 u64 ipFragFails;
348 u64 ipFragCreates;
349 u64 ipInMcastPkts;
350 u64 ipOutMcastPkts;
351 u64 ipInBcastPkts;
352 u64 ipOutBcastPkts;
353
354 u64 tcpRtoAlgorithm;
355 u64 tcpRtoMin;
356 u64 tcpRtoMax;
357 u64 tcpMaxConn;
358 u64 tcpActiveOpens;
359 u64 tcpPassiveOpens;
360 u64 tcpAttemptFails;
361 u64 tcpEstabResets;
362 u64 tcpCurrEstab;
363 u64 tcpInSegs;
364 u64 tcpOutSegs;
365 u64 tcpRetransSegs;
366 u64 tcpInErrs;
367 u64 tcpOutRsts;
368};
369
370union rdma_protocol_stats {
371 struct ib_protocol_stats ib;
372 struct iw_protocol_stats iw;
373};
374
375
376
377
378
379#define RDMA_CORE_CAP_IB_MAD 0x00000001
380#define RDMA_CORE_CAP_IB_SMI 0x00000002
381#define RDMA_CORE_CAP_IB_CM 0x00000004
382#define RDMA_CORE_CAP_IW_CM 0x00000008
383#define RDMA_CORE_CAP_IB_SA 0x00000010
384#define RDMA_CORE_CAP_OPA_MAD 0x00000020
385
386
387#define RDMA_CORE_CAP_AF_IB 0x00001000
388#define RDMA_CORE_CAP_ETH_AH 0x00002000
389
390
391#define RDMA_CORE_CAP_PROT_IB 0x00100000
392#define RDMA_CORE_CAP_PROT_ROCE 0x00200000
393#define RDMA_CORE_CAP_PROT_IWARP 0x00400000
394
395#define RDMA_CORE_PORT_IBA_IB (RDMA_CORE_CAP_PROT_IB \
396 | RDMA_CORE_CAP_IB_MAD \
397 | RDMA_CORE_CAP_IB_SMI \
398 | RDMA_CORE_CAP_IB_CM \
399 | RDMA_CORE_CAP_IB_SA \
400 | RDMA_CORE_CAP_AF_IB)
401#define RDMA_CORE_PORT_IBA_ROCE (RDMA_CORE_CAP_PROT_ROCE \
402 | RDMA_CORE_CAP_IB_MAD \
403 | RDMA_CORE_CAP_IB_CM \
404 | RDMA_CORE_CAP_AF_IB \
405 | RDMA_CORE_CAP_ETH_AH)
406#define RDMA_CORE_PORT_IWARP (RDMA_CORE_CAP_PROT_IWARP \
407 | RDMA_CORE_CAP_IW_CM)
408#define RDMA_CORE_PORT_INTEL_OPA (RDMA_CORE_PORT_IBA_IB \
409 | RDMA_CORE_CAP_OPA_MAD)
410
411struct ib_port_attr {
412 enum ib_port_state state;
413 enum ib_mtu max_mtu;
414 enum ib_mtu active_mtu;
415 int gid_tbl_len;
416 u32 port_cap_flags;
417 u32 max_msg_sz;
418 u32 bad_pkey_cntr;
419 u32 qkey_viol_cntr;
420 u16 pkey_tbl_len;
421 u16 lid;
422 u16 sm_lid;
423 u8 lmc;
424 u8 max_vl_num;
425 u8 sm_sl;
426 u8 subnet_timeout;
427 u8 init_type_reply;
428 u8 active_width;
429 u8 active_speed;
430 u8 phys_state;
431};
432
433enum ib_device_modify_flags {
434 IB_DEVICE_MODIFY_SYS_IMAGE_GUID = 1 << 0,
435 IB_DEVICE_MODIFY_NODE_DESC = 1 << 1
436};
437
438struct ib_device_modify {
439 u64 sys_image_guid;
440 char node_desc[64];
441};
442
443enum ib_port_modify_flags {
444 IB_PORT_SHUTDOWN = 1,
445 IB_PORT_INIT_TYPE = (1<<2),
446 IB_PORT_RESET_QKEY_CNTR = (1<<3)
447};
448
449struct ib_port_modify {
450 u32 set_port_cap_mask;
451 u32 clr_port_cap_mask;
452 u8 init_type;
453};
454
455enum ib_event_type {
456 IB_EVENT_CQ_ERR,
457 IB_EVENT_QP_FATAL,
458 IB_EVENT_QP_REQ_ERR,
459 IB_EVENT_QP_ACCESS_ERR,
460 IB_EVENT_COMM_EST,
461 IB_EVENT_SQ_DRAINED,
462 IB_EVENT_PATH_MIG,
463 IB_EVENT_PATH_MIG_ERR,
464 IB_EVENT_DEVICE_FATAL,
465 IB_EVENT_PORT_ACTIVE,
466 IB_EVENT_PORT_ERR,
467 IB_EVENT_LID_CHANGE,
468 IB_EVENT_PKEY_CHANGE,
469 IB_EVENT_SM_CHANGE,
470 IB_EVENT_SRQ_ERR,
471 IB_EVENT_SRQ_LIMIT_REACHED,
472 IB_EVENT_QP_LAST_WQE_REACHED,
473 IB_EVENT_CLIENT_REREGISTER,
474 IB_EVENT_GID_CHANGE,
475};
476
477__attribute_const__ const char *ib_event_msg(enum ib_event_type event);
478
479struct ib_event {
480 struct ib_device *device;
481 union {
482 struct ib_cq *cq;
483 struct ib_qp *qp;
484 struct ib_srq *srq;
485 u8 port_num;
486 } element;
487 enum ib_event_type event;
488};
489
490struct ib_event_handler {
491 struct ib_device *device;
492 void (*handler)(struct ib_event_handler *, struct ib_event *);
493 struct list_head list;
494};
495
496#define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler) \
497 do { \
498 (_ptr)->device = _device; \
499 (_ptr)->handler = _handler; \
500 INIT_LIST_HEAD(&(_ptr)->list); \
501 } while (0)
502
503struct ib_global_route {
504 union ib_gid dgid;
505 u32 flow_label;
506 u8 sgid_index;
507 u8 hop_limit;
508 u8 traffic_class;
509};
510
511struct ib_grh {
512 __be32 version_tclass_flow;
513 __be16 paylen;
514 u8 next_hdr;
515 u8 hop_limit;
516 union ib_gid sgid;
517 union ib_gid dgid;
518};
519
520enum {
521 IB_MULTICAST_QPN = 0xffffff
522};
523
524#define IB_LID_PERMISSIVE cpu_to_be16(0xFFFF)
525
526enum ib_ah_flags {
527 IB_AH_GRH = 1
528};
529
530enum ib_rate {
531 IB_RATE_PORT_CURRENT = 0,
532 IB_RATE_2_5_GBPS = 2,
533 IB_RATE_5_GBPS = 5,
534 IB_RATE_10_GBPS = 3,
535 IB_RATE_20_GBPS = 6,
536 IB_RATE_30_GBPS = 4,
537 IB_RATE_40_GBPS = 7,
538 IB_RATE_60_GBPS = 8,
539 IB_RATE_80_GBPS = 9,
540 IB_RATE_120_GBPS = 10,
541 IB_RATE_14_GBPS = 11,
542 IB_RATE_56_GBPS = 12,
543 IB_RATE_112_GBPS = 13,
544 IB_RATE_168_GBPS = 14,
545 IB_RATE_25_GBPS = 15,
546 IB_RATE_100_GBPS = 16,
547 IB_RATE_200_GBPS = 17,
548 IB_RATE_300_GBPS = 18
549};
550
551
552
553
554
555
556
557__attribute_const__ int ib_rate_to_mult(enum ib_rate rate);
558
559
560
561
562
563
564__attribute_const__ int ib_rate_to_mbps(enum ib_rate rate);
565
566
567
568
569
570
571
572
573
574
575enum ib_mr_type {
576 IB_MR_TYPE_MEM_REG,
577 IB_MR_TYPE_SIGNATURE,
578};
579
580
581
582
583
584
585enum ib_signature_type {
586 IB_SIG_TYPE_NONE,
587 IB_SIG_TYPE_T10_DIF,
588};
589
590
591
592
593
594
595enum ib_t10_dif_bg_type {
596 IB_T10DIF_CRC,
597 IB_T10DIF_CSUM
598};
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613struct ib_t10_dif_domain {
614 enum ib_t10_dif_bg_type bg_type;
615 u16 pi_interval;
616 u16 bg;
617 u16 app_tag;
618 u32 ref_tag;
619 bool ref_remap;
620 bool app_escape;
621 bool ref_escape;
622 u16 apptag_check_mask;
623};
624
625
626
627
628
629
630
631struct ib_sig_domain {
632 enum ib_signature_type sig_type;
633 union {
634 struct ib_t10_dif_domain dif;
635 } sig;
636};
637
638
639
640
641
642
643
644struct ib_sig_attrs {
645 u8 check_mask;
646 struct ib_sig_domain mem;
647 struct ib_sig_domain wire;
648};
649
650enum ib_sig_err_type {
651 IB_SIG_BAD_GUARD,
652 IB_SIG_BAD_REFTAG,
653 IB_SIG_BAD_APPTAG,
654};
655
656
657
658
659struct ib_sig_err {
660 enum ib_sig_err_type err_type;
661 u32 expected;
662 u32 actual;
663 u64 sig_err_offset;
664 u32 key;
665};
666
667enum ib_mr_status_check {
668 IB_MR_CHECK_SIG_STATUS = 1,
669};
670
671
672
673
674
675
676
677
678
679struct ib_mr_status {
680 u32 fail_status;
681 struct ib_sig_err sig_err;
682};
683
684
685
686
687
688
689__attribute_const__ enum ib_rate mult_to_ib_rate(int mult);
690
691struct ib_ah_attr {
692 struct ib_global_route grh;
693 u16 dlid;
694 u8 sl;
695 u8 src_path_bits;
696 u8 static_rate;
697 u8 ah_flags;
698 u8 port_num;
699 u8 dmac[ETH_ALEN];
700 u16 vlan_id;
701};
702
703enum ib_wc_status {
704 IB_WC_SUCCESS,
705 IB_WC_LOC_LEN_ERR,
706 IB_WC_LOC_QP_OP_ERR,
707 IB_WC_LOC_EEC_OP_ERR,
708 IB_WC_LOC_PROT_ERR,
709 IB_WC_WR_FLUSH_ERR,
710 IB_WC_MW_BIND_ERR,
711 IB_WC_BAD_RESP_ERR,
712 IB_WC_LOC_ACCESS_ERR,
713 IB_WC_REM_INV_REQ_ERR,
714 IB_WC_REM_ACCESS_ERR,
715 IB_WC_REM_OP_ERR,
716 IB_WC_RETRY_EXC_ERR,
717 IB_WC_RNR_RETRY_EXC_ERR,
718 IB_WC_LOC_RDD_VIOL_ERR,
719 IB_WC_REM_INV_RD_REQ_ERR,
720 IB_WC_REM_ABORT_ERR,
721 IB_WC_INV_EECN_ERR,
722 IB_WC_INV_EEC_STATE_ERR,
723 IB_WC_FATAL_ERR,
724 IB_WC_RESP_TIMEOUT_ERR,
725 IB_WC_GENERAL_ERR
726};
727
728__attribute_const__ const char *ib_wc_status_msg(enum ib_wc_status status);
729
730enum ib_wc_opcode {
731 IB_WC_SEND,
732 IB_WC_RDMA_WRITE,
733 IB_WC_RDMA_READ,
734 IB_WC_COMP_SWAP,
735 IB_WC_FETCH_ADD,
736 IB_WC_BIND_MW,
737 IB_WC_LSO,
738 IB_WC_LOCAL_INV,
739 IB_WC_FAST_REG_MR,
740 IB_WC_MASKED_COMP_SWAP,
741 IB_WC_MASKED_FETCH_ADD,
742
743
744
745
746 IB_WC_RECV = 1 << 7,
747 IB_WC_RECV_RDMA_WITH_IMM
748};
749
750enum ib_wc_flags {
751 IB_WC_GRH = 1,
752 IB_WC_WITH_IMM = (1<<1),
753 IB_WC_WITH_INVALIDATE = (1<<2),
754 IB_WC_IP_CSUM_OK = (1<<3),
755 IB_WC_WITH_SMAC = (1<<4),
756 IB_WC_WITH_VLAN = (1<<5),
757};
758
759struct ib_wc {
760 u64 wr_id;
761 enum ib_wc_status status;
762 enum ib_wc_opcode opcode;
763 u32 vendor_err;
764 u32 byte_len;
765 struct ib_qp *qp;
766 union {
767 __be32 imm_data;
768 u32 invalidate_rkey;
769 } ex;
770 u32 src_qp;
771 int wc_flags;
772 u16 pkey_index;
773 u16 slid;
774 u8 sl;
775 u8 dlid_path_bits;
776 u8 port_num;
777 u8 smac[ETH_ALEN];
778 u16 vlan_id;
779};
780
781enum ib_cq_notify_flags {
782 IB_CQ_SOLICITED = 1 << 0,
783 IB_CQ_NEXT_COMP = 1 << 1,
784 IB_CQ_SOLICITED_MASK = IB_CQ_SOLICITED | IB_CQ_NEXT_COMP,
785 IB_CQ_REPORT_MISSED_EVENTS = 1 << 2,
786};
787
788enum ib_srq_type {
789 IB_SRQT_BASIC,
790 IB_SRQT_XRC
791};
792
793enum ib_srq_attr_mask {
794 IB_SRQ_MAX_WR = 1 << 0,
795 IB_SRQ_LIMIT = 1 << 1,
796};
797
798struct ib_srq_attr {
799 u32 max_wr;
800 u32 max_sge;
801 u32 srq_limit;
802};
803
804struct ib_srq_init_attr {
805 void (*event_handler)(struct ib_event *, void *);
806 void *srq_context;
807 struct ib_srq_attr attr;
808 enum ib_srq_type srq_type;
809
810 union {
811 struct {
812 struct ib_xrcd *xrcd;
813 struct ib_cq *cq;
814 } xrc;
815 } ext;
816};
817
818struct ib_qp_cap {
819 u32 max_send_wr;
820 u32 max_recv_wr;
821 u32 max_send_sge;
822 u32 max_recv_sge;
823 u32 max_inline_data;
824};
825
826enum ib_sig_type {
827 IB_SIGNAL_ALL_WR,
828 IB_SIGNAL_REQ_WR
829};
830
831enum ib_qp_type {
832
833
834
835
836
837 IB_QPT_SMI,
838 IB_QPT_GSI,
839
840 IB_QPT_RC,
841 IB_QPT_UC,
842 IB_QPT_UD,
843 IB_QPT_RAW_IPV6,
844 IB_QPT_RAW_ETHERTYPE,
845 IB_QPT_RAW_PACKET = 8,
846 IB_QPT_XRC_INI = 9,
847 IB_QPT_XRC_TGT,
848 IB_QPT_MAX,
849
850
851
852
853 IB_QPT_RESERVED1 = 0x1000,
854 IB_QPT_RESERVED2,
855 IB_QPT_RESERVED3,
856 IB_QPT_RESERVED4,
857 IB_QPT_RESERVED5,
858 IB_QPT_RESERVED6,
859 IB_QPT_RESERVED7,
860 IB_QPT_RESERVED8,
861 IB_QPT_RESERVED9,
862 IB_QPT_RESERVED10,
863};
864
865enum ib_qp_create_flags {
866 IB_QP_CREATE_IPOIB_UD_LSO = 1 << 0,
867 IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK = 1 << 1,
868 IB_QP_CREATE_NETIF_QP = 1 << 5,
869 IB_QP_CREATE_SIGNATURE_EN = 1 << 6,
870 IB_QP_CREATE_USE_GFP_NOIO = 1 << 7,
871
872 IB_QP_CREATE_RESERVED_START = 1 << 26,
873 IB_QP_CREATE_RESERVED_END = 1 << 31,
874};
875
876
877
878
879
880
881
882struct ib_qp_init_attr {
883 void (*event_handler)(struct ib_event *, void *);
884 void *qp_context;
885 struct ib_cq *send_cq;
886 struct ib_cq *recv_cq;
887 struct ib_srq *srq;
888 struct ib_xrcd *xrcd;
889 struct ib_qp_cap cap;
890 enum ib_sig_type sq_sig_type;
891 enum ib_qp_type qp_type;
892 enum ib_qp_create_flags create_flags;
893 u8 port_num;
894};
895
896struct ib_qp_open_attr {
897 void (*event_handler)(struct ib_event *, void *);
898 void *qp_context;
899 u32 qp_num;
900 enum ib_qp_type qp_type;
901};
902
903enum ib_rnr_timeout {
904 IB_RNR_TIMER_655_36 = 0,
905 IB_RNR_TIMER_000_01 = 1,
906 IB_RNR_TIMER_000_02 = 2,
907 IB_RNR_TIMER_000_03 = 3,
908 IB_RNR_TIMER_000_04 = 4,
909 IB_RNR_TIMER_000_06 = 5,
910 IB_RNR_TIMER_000_08 = 6,
911 IB_RNR_TIMER_000_12 = 7,
912 IB_RNR_TIMER_000_16 = 8,
913 IB_RNR_TIMER_000_24 = 9,
914 IB_RNR_TIMER_000_32 = 10,
915 IB_RNR_TIMER_000_48 = 11,
916 IB_RNR_TIMER_000_64 = 12,
917 IB_RNR_TIMER_000_96 = 13,
918 IB_RNR_TIMER_001_28 = 14,
919 IB_RNR_TIMER_001_92 = 15,
920 IB_RNR_TIMER_002_56 = 16,
921 IB_RNR_TIMER_003_84 = 17,
922 IB_RNR_TIMER_005_12 = 18,
923 IB_RNR_TIMER_007_68 = 19,
924 IB_RNR_TIMER_010_24 = 20,
925 IB_RNR_TIMER_015_36 = 21,
926 IB_RNR_TIMER_020_48 = 22,
927 IB_RNR_TIMER_030_72 = 23,
928 IB_RNR_TIMER_040_96 = 24,
929 IB_RNR_TIMER_061_44 = 25,
930 IB_RNR_TIMER_081_92 = 26,
931 IB_RNR_TIMER_122_88 = 27,
932 IB_RNR_TIMER_163_84 = 28,
933 IB_RNR_TIMER_245_76 = 29,
934 IB_RNR_TIMER_327_68 = 30,
935 IB_RNR_TIMER_491_52 = 31
936};
937
938enum ib_qp_attr_mask {
939 IB_QP_STATE = 1,
940 IB_QP_CUR_STATE = (1<<1),
941 IB_QP_EN_SQD_ASYNC_NOTIFY = (1<<2),
942 IB_QP_ACCESS_FLAGS = (1<<3),
943 IB_QP_PKEY_INDEX = (1<<4),
944 IB_QP_PORT = (1<<5),
945 IB_QP_QKEY = (1<<6),
946 IB_QP_AV = (1<<7),
947 IB_QP_PATH_MTU = (1<<8),
948 IB_QP_TIMEOUT = (1<<9),
949 IB_QP_RETRY_CNT = (1<<10),
950 IB_QP_RNR_RETRY = (1<<11),
951 IB_QP_RQ_PSN = (1<<12),
952 IB_QP_MAX_QP_RD_ATOMIC = (1<<13),
953 IB_QP_ALT_PATH = (1<<14),
954 IB_QP_MIN_RNR_TIMER = (1<<15),
955 IB_QP_SQ_PSN = (1<<16),
956 IB_QP_MAX_DEST_RD_ATOMIC = (1<<17),
957 IB_QP_PATH_MIG_STATE = (1<<18),
958 IB_QP_CAP = (1<<19),
959 IB_QP_DEST_QPN = (1<<20),
960 IB_QP_SMAC = (1<<21),
961 IB_QP_ALT_SMAC = (1<<22),
962 IB_QP_VID = (1<<23),
963 IB_QP_ALT_VID = (1<<24),
964};
965
966enum ib_qp_state {
967 IB_QPS_RESET,
968 IB_QPS_INIT,
969 IB_QPS_RTR,
970 IB_QPS_RTS,
971 IB_QPS_SQD,
972 IB_QPS_SQE,
973 IB_QPS_ERR
974};
975
976enum ib_mig_state {
977 IB_MIG_MIGRATED,
978 IB_MIG_REARM,
979 IB_MIG_ARMED
980};
981
982enum ib_mw_type {
983 IB_MW_TYPE_1 = 1,
984 IB_MW_TYPE_2 = 2
985};
986
987struct ib_qp_attr {
988 enum ib_qp_state qp_state;
989 enum ib_qp_state cur_qp_state;
990 enum ib_mtu path_mtu;
991 enum ib_mig_state path_mig_state;
992 u32 qkey;
993 u32 rq_psn;
994 u32 sq_psn;
995 u32 dest_qp_num;
996 int qp_access_flags;
997 struct ib_qp_cap cap;
998 struct ib_ah_attr ah_attr;
999 struct ib_ah_attr alt_ah_attr;
1000 u16 pkey_index;
1001 u16 alt_pkey_index;
1002 u8 en_sqd_async_notify;
1003 u8 sq_draining;
1004 u8 max_rd_atomic;
1005 u8 max_dest_rd_atomic;
1006 u8 min_rnr_timer;
1007 u8 port_num;
1008 u8 timeout;
1009 u8 retry_cnt;
1010 u8 rnr_retry;
1011 u8 alt_port_num;
1012 u8 alt_timeout;
1013 u8 smac[ETH_ALEN];
1014 u8 alt_smac[ETH_ALEN];
1015 u16 vlan_id;
1016 u16 alt_vlan_id;
1017};
1018
1019enum ib_wr_opcode {
1020 IB_WR_RDMA_WRITE,
1021 IB_WR_RDMA_WRITE_WITH_IMM,
1022 IB_WR_SEND,
1023 IB_WR_SEND_WITH_IMM,
1024 IB_WR_RDMA_READ,
1025 IB_WR_ATOMIC_CMP_AND_SWP,
1026 IB_WR_ATOMIC_FETCH_AND_ADD,
1027 IB_WR_LSO,
1028 IB_WR_SEND_WITH_INV,
1029 IB_WR_RDMA_READ_WITH_INV,
1030 IB_WR_LOCAL_INV,
1031 IB_WR_FAST_REG_MR,
1032 IB_WR_MASKED_ATOMIC_CMP_AND_SWP,
1033 IB_WR_MASKED_ATOMIC_FETCH_AND_ADD,
1034 IB_WR_BIND_MW,
1035 IB_WR_REG_SIG_MR,
1036
1037
1038
1039 IB_WR_RESERVED1 = 0xf0,
1040 IB_WR_RESERVED2,
1041 IB_WR_RESERVED3,
1042 IB_WR_RESERVED4,
1043 IB_WR_RESERVED5,
1044 IB_WR_RESERVED6,
1045 IB_WR_RESERVED7,
1046 IB_WR_RESERVED8,
1047 IB_WR_RESERVED9,
1048 IB_WR_RESERVED10,
1049};
1050
1051enum ib_send_flags {
1052 IB_SEND_FENCE = 1,
1053 IB_SEND_SIGNALED = (1<<1),
1054 IB_SEND_SOLICITED = (1<<2),
1055 IB_SEND_INLINE = (1<<3),
1056 IB_SEND_IP_CSUM = (1<<4),
1057
1058
1059 IB_SEND_RESERVED_START = (1 << 26),
1060 IB_SEND_RESERVED_END = (1 << 31),
1061};
1062
1063struct ib_sge {
1064 u64 addr;
1065 u32 length;
1066 u32 lkey;
1067};
1068
1069struct ib_fast_reg_page_list {
1070 struct ib_device *device;
1071 u64 *page_list;
1072 unsigned int max_page_list_len;
1073};
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085struct ib_mw_bind_info {
1086 struct ib_mr *mr;
1087 u64 addr;
1088 u64 length;
1089 int mw_access_flags;
1090};
1091
1092struct ib_send_wr {
1093 struct ib_send_wr *next;
1094 u64 wr_id;
1095 struct ib_sge *sg_list;
1096 int num_sge;
1097 enum ib_wr_opcode opcode;
1098 int send_flags;
1099 union {
1100 __be32 imm_data;
1101 u32 invalidate_rkey;
1102 } ex;
1103 union {
1104 struct {
1105 u64 remote_addr;
1106 u32 rkey;
1107 } rdma;
1108 struct {
1109 u64 remote_addr;
1110 u64 compare_add;
1111 u64 swap;
1112 u64 compare_add_mask;
1113 u64 swap_mask;
1114 u32 rkey;
1115 } atomic;
1116 struct {
1117 struct ib_ah *ah;
1118 void *header;
1119 int hlen;
1120 int mss;
1121 u32 remote_qpn;
1122 u32 remote_qkey;
1123 u16 pkey_index;
1124 u8 port_num;
1125 } ud;
1126 struct {
1127 u64 iova_start;
1128 struct ib_fast_reg_page_list *page_list;
1129 unsigned int page_shift;
1130 unsigned int page_list_len;
1131 u32 length;
1132 int access_flags;
1133 u32 rkey;
1134 } fast_reg;
1135 struct {
1136 struct ib_mw *mw;
1137
1138 u32 rkey;
1139 struct ib_mw_bind_info bind_info;
1140 } bind_mw;
1141 struct {
1142 struct ib_sig_attrs *sig_attrs;
1143 struct ib_mr *sig_mr;
1144 int access_flags;
1145 struct ib_sge *prot;
1146 } sig_handover;
1147 } wr;
1148 u32 xrc_remote_srq_num;
1149};
1150
1151struct ib_recv_wr {
1152 struct ib_recv_wr *next;
1153 u64 wr_id;
1154 struct ib_sge *sg_list;
1155 int num_sge;
1156};
1157
1158enum ib_access_flags {
1159 IB_ACCESS_LOCAL_WRITE = 1,
1160 IB_ACCESS_REMOTE_WRITE = (1<<1),
1161 IB_ACCESS_REMOTE_READ = (1<<2),
1162 IB_ACCESS_REMOTE_ATOMIC = (1<<3),
1163 IB_ACCESS_MW_BIND = (1<<4),
1164 IB_ZERO_BASED = (1<<5),
1165 IB_ACCESS_ON_DEMAND = (1<<6),
1166};
1167
1168struct ib_phys_buf {
1169 u64 addr;
1170 u64 size;
1171};
1172
1173struct ib_mr_attr {
1174 struct ib_pd *pd;
1175 u64 device_virt_addr;
1176 u64 size;
1177 int mr_access_flags;
1178 u32 lkey;
1179 u32 rkey;
1180};
1181
1182enum ib_mr_rereg_flags {
1183 IB_MR_REREG_TRANS = 1,
1184 IB_MR_REREG_PD = (1<<1),
1185 IB_MR_REREG_ACCESS = (1<<2),
1186 IB_MR_REREG_SUPPORTED = ((IB_MR_REREG_ACCESS << 1) - 1)
1187};
1188
1189
1190
1191
1192
1193
1194
1195struct ib_mw_bind {
1196 u64 wr_id;
1197 int send_flags;
1198 struct ib_mw_bind_info bind_info;
1199};
1200
1201struct ib_fmr_attr {
1202 int max_pages;
1203 int max_maps;
1204 u8 page_shift;
1205};
1206
1207struct ib_umem;
1208
1209struct ib_ucontext {
1210 struct ib_device *device;
1211 struct list_head pd_list;
1212 struct list_head mr_list;
1213 struct list_head mw_list;
1214 struct list_head cq_list;
1215 struct list_head qp_list;
1216 struct list_head srq_list;
1217 struct list_head ah_list;
1218 struct list_head xrcd_list;
1219 struct list_head rule_list;
1220 int closing;
1221
1222 struct pid *tgid;
1223#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1224 struct rb_root umem_tree;
1225
1226
1227
1228
1229 struct rw_semaphore umem_rwsem;
1230 void (*invalidate_range)(struct ib_umem *umem,
1231 unsigned long start, unsigned long end);
1232
1233 struct mmu_notifier mn;
1234 atomic_t notifier_count;
1235
1236 struct list_head no_private_counters;
1237 int odp_mrs_count;
1238#endif
1239};
1240
1241struct ib_uobject {
1242 u64 user_handle;
1243 struct ib_ucontext *context;
1244 void *object;
1245 struct list_head list;
1246 int id;
1247 struct kref ref;
1248 struct rw_semaphore mutex;
1249 int live;
1250};
1251
1252struct ib_udata {
1253 const void __user *inbuf;
1254 void __user *outbuf;
1255 size_t inlen;
1256 size_t outlen;
1257};
1258
1259struct ib_pd {
1260 u32 local_dma_lkey;
1261 struct ib_device *device;
1262 struct ib_uobject *uobject;
1263 atomic_t usecnt;
1264 struct ib_mr *local_mr;
1265};
1266
1267struct ib_xrcd {
1268 struct ib_device *device;
1269 atomic_t usecnt;
1270 struct inode *inode;
1271
1272 struct mutex tgt_qp_mutex;
1273 struct list_head tgt_qp_list;
1274};
1275
1276struct ib_ah {
1277 struct ib_device *device;
1278 struct ib_pd *pd;
1279 struct ib_uobject *uobject;
1280};
1281
1282typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context);
1283
1284struct ib_cq {
1285 struct ib_device *device;
1286 struct ib_uobject *uobject;
1287 ib_comp_handler comp_handler;
1288 void (*event_handler)(struct ib_event *, void *);
1289 void *cq_context;
1290 int cqe;
1291 atomic_t usecnt;
1292};
1293
1294struct ib_srq {
1295 struct ib_device *device;
1296 struct ib_pd *pd;
1297 struct ib_uobject *uobject;
1298 void (*event_handler)(struct ib_event *, void *);
1299 void *srq_context;
1300 enum ib_srq_type srq_type;
1301 atomic_t usecnt;
1302
1303 union {
1304 struct {
1305 struct ib_xrcd *xrcd;
1306 struct ib_cq *cq;
1307 u32 srq_num;
1308 } xrc;
1309 } ext;
1310};
1311
1312struct ib_qp {
1313 struct ib_device *device;
1314 struct ib_pd *pd;
1315 struct ib_cq *send_cq;
1316 struct ib_cq *recv_cq;
1317 struct ib_srq *srq;
1318 struct ib_xrcd *xrcd;
1319 struct list_head xrcd_list;
1320
1321 atomic_t usecnt;
1322 struct list_head open_list;
1323 struct ib_qp *real_qp;
1324 struct ib_uobject *uobject;
1325 void (*event_handler)(struct ib_event *, void *);
1326 void *qp_context;
1327 u32 qp_num;
1328 enum ib_qp_type qp_type;
1329};
1330
1331struct ib_mr {
1332 struct ib_device *device;
1333 struct ib_pd *pd;
1334 struct ib_uobject *uobject;
1335 u32 lkey;
1336 u32 rkey;
1337 atomic_t usecnt;
1338};
1339
1340struct ib_mw {
1341 struct ib_device *device;
1342 struct ib_pd *pd;
1343 struct ib_uobject *uobject;
1344 u32 rkey;
1345 enum ib_mw_type type;
1346};
1347
1348struct ib_fmr {
1349 struct ib_device *device;
1350 struct ib_pd *pd;
1351 struct list_head list;
1352 u32 lkey;
1353 u32 rkey;
1354};
1355
1356
1357enum ib_flow_attr_type {
1358
1359 IB_FLOW_ATTR_NORMAL = 0x0,
1360
1361
1362
1363 IB_FLOW_ATTR_ALL_DEFAULT = 0x1,
1364
1365
1366
1367 IB_FLOW_ATTR_MC_DEFAULT = 0x2,
1368
1369 IB_FLOW_ATTR_SNIFFER = 0x3
1370};
1371
1372
1373enum ib_flow_spec_type {
1374
1375 IB_FLOW_SPEC_ETH = 0x20,
1376 IB_FLOW_SPEC_IB = 0x22,
1377
1378 IB_FLOW_SPEC_IPV4 = 0x30,
1379
1380 IB_FLOW_SPEC_TCP = 0x40,
1381 IB_FLOW_SPEC_UDP = 0x41
1382};
1383#define IB_FLOW_SPEC_LAYER_MASK 0xF0
1384#define IB_FLOW_SPEC_SUPPORT_LAYERS 4
1385
1386
1387
1388
1389enum ib_flow_domain {
1390 IB_FLOW_DOMAIN_USER,
1391 IB_FLOW_DOMAIN_ETHTOOL,
1392 IB_FLOW_DOMAIN_RFS,
1393 IB_FLOW_DOMAIN_NIC,
1394 IB_FLOW_DOMAIN_NUM
1395};
1396
1397struct ib_flow_eth_filter {
1398 u8 dst_mac[6];
1399 u8 src_mac[6];
1400 __be16 ether_type;
1401 __be16 vlan_tag;
1402};
1403
1404struct ib_flow_spec_eth {
1405 enum ib_flow_spec_type type;
1406 u16 size;
1407 struct ib_flow_eth_filter val;
1408 struct ib_flow_eth_filter mask;
1409};
1410
1411struct ib_flow_ib_filter {
1412 __be16 dlid;
1413 __u8 sl;
1414};
1415
1416struct ib_flow_spec_ib {
1417 enum ib_flow_spec_type type;
1418 u16 size;
1419 struct ib_flow_ib_filter val;
1420 struct ib_flow_ib_filter mask;
1421};
1422
1423struct ib_flow_ipv4_filter {
1424 __be32 src_ip;
1425 __be32 dst_ip;
1426};
1427
1428struct ib_flow_spec_ipv4 {
1429 enum ib_flow_spec_type type;
1430 u16 size;
1431 struct ib_flow_ipv4_filter val;
1432 struct ib_flow_ipv4_filter mask;
1433};
1434
1435struct ib_flow_tcp_udp_filter {
1436 __be16 dst_port;
1437 __be16 src_port;
1438};
1439
1440struct ib_flow_spec_tcp_udp {
1441 enum ib_flow_spec_type type;
1442 u16 size;
1443 struct ib_flow_tcp_udp_filter val;
1444 struct ib_flow_tcp_udp_filter mask;
1445};
1446
1447union ib_flow_spec {
1448 struct {
1449 enum ib_flow_spec_type type;
1450 u16 size;
1451 };
1452 struct ib_flow_spec_eth eth;
1453 struct ib_flow_spec_ib ib;
1454 struct ib_flow_spec_ipv4 ipv4;
1455 struct ib_flow_spec_tcp_udp tcp_udp;
1456};
1457
1458struct ib_flow_attr {
1459 enum ib_flow_attr_type type;
1460 u16 size;
1461 u16 priority;
1462 u32 flags;
1463 u8 num_of_specs;
1464 u8 port;
1465
1466
1467
1468
1469};
1470
1471struct ib_flow {
1472 struct ib_qp *qp;
1473 struct ib_uobject *uobject;
1474};
1475
1476struct ib_mad_hdr;
1477struct ib_grh;
1478
1479enum ib_process_mad_flags {
1480 IB_MAD_IGNORE_MKEY = 1,
1481 IB_MAD_IGNORE_BKEY = 2,
1482 IB_MAD_IGNORE_ALL = IB_MAD_IGNORE_MKEY | IB_MAD_IGNORE_BKEY
1483};
1484
1485enum ib_mad_result {
1486 IB_MAD_RESULT_FAILURE = 0,
1487 IB_MAD_RESULT_SUCCESS = 1 << 0,
1488 IB_MAD_RESULT_REPLY = 1 << 1,
1489 IB_MAD_RESULT_CONSUMED = 1 << 2
1490};
1491
1492#define IB_DEVICE_NAME_MAX 64
1493
1494struct ib_cache {
1495 rwlock_t lock;
1496 struct ib_event_handler event_handler;
1497 struct ib_pkey_cache **pkey_cache;
1498 struct ib_gid_table **gid_cache;
1499 u8 *lmc_cache;
1500};
1501
1502struct ib_dma_mapping_ops {
1503 int (*mapping_error)(struct ib_device *dev,
1504 u64 dma_addr);
1505 u64 (*map_single)(struct ib_device *dev,
1506 void *ptr, size_t size,
1507 enum dma_data_direction direction);
1508 void (*unmap_single)(struct ib_device *dev,
1509 u64 addr, size_t size,
1510 enum dma_data_direction direction);
1511 u64 (*map_page)(struct ib_device *dev,
1512 struct page *page, unsigned long offset,
1513 size_t size,
1514 enum dma_data_direction direction);
1515 void (*unmap_page)(struct ib_device *dev,
1516 u64 addr, size_t size,
1517 enum dma_data_direction direction);
1518 int (*map_sg)(struct ib_device *dev,
1519 struct scatterlist *sg, int nents,
1520 enum dma_data_direction direction);
1521 void (*unmap_sg)(struct ib_device *dev,
1522 struct scatterlist *sg, int nents,
1523 enum dma_data_direction direction);
1524 void (*sync_single_for_cpu)(struct ib_device *dev,
1525 u64 dma_handle,
1526 size_t size,
1527 enum dma_data_direction dir);
1528 void (*sync_single_for_device)(struct ib_device *dev,
1529 u64 dma_handle,
1530 size_t size,
1531 enum dma_data_direction dir);
1532 void *(*alloc_coherent)(struct ib_device *dev,
1533 size_t size,
1534 u64 *dma_handle,
1535 gfp_t flag);
1536 void (*free_coherent)(struct ib_device *dev,
1537 size_t size, void *cpu_addr,
1538 u64 dma_handle);
1539};
1540
1541struct iw_cm_verbs;
1542
1543struct ib_port_immutable {
1544 int pkey_tbl_len;
1545 int gid_tbl_len;
1546 u32 core_cap_flags;
1547 u32 max_mad_size;
1548};
1549
1550struct ib_device {
1551 struct device *dma_device;
1552
1553 char name[IB_DEVICE_NAME_MAX];
1554
1555 struct list_head event_handler_list;
1556 spinlock_t event_handler_lock;
1557
1558 spinlock_t client_data_lock;
1559 struct list_head core_list;
1560
1561
1562 struct list_head client_data_list;
1563
1564 struct ib_cache cache;
1565
1566
1567
1568 struct ib_port_immutable *port_immutable;
1569
1570 int num_comp_vectors;
1571
1572 struct iw_cm_verbs *iwcm;
1573
1574 int (*get_protocol_stats)(struct ib_device *device,
1575 union rdma_protocol_stats *stats);
1576 int (*query_device)(struct ib_device *device,
1577 struct ib_device_attr *device_attr,
1578 struct ib_udata *udata);
1579 int (*query_port)(struct ib_device *device,
1580 u8 port_num,
1581 struct ib_port_attr *port_attr);
1582 enum rdma_link_layer (*get_link_layer)(struct ib_device *device,
1583 u8 port_num);
1584
1585
1586
1587
1588
1589
1590
1591 struct net_device *(*get_netdev)(struct ib_device *device,
1592 u8 port_num);
1593 int (*query_gid)(struct ib_device *device,
1594 u8 port_num, int index,
1595 union ib_gid *gid);
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609 int (*add_gid)(struct ib_device *device,
1610 u8 port_num,
1611 unsigned int index,
1612 const union ib_gid *gid,
1613 const struct ib_gid_attr *attr,
1614 void **context);
1615
1616
1617
1618
1619
1620
1621 int (*del_gid)(struct ib_device *device,
1622 u8 port_num,
1623 unsigned int index,
1624 void **context);
1625 int (*query_pkey)(struct ib_device *device,
1626 u8 port_num, u16 index, u16 *pkey);
1627 int (*modify_device)(struct ib_device *device,
1628 int device_modify_mask,
1629 struct ib_device_modify *device_modify);
1630 int (*modify_port)(struct ib_device *device,
1631 u8 port_num, int port_modify_mask,
1632 struct ib_port_modify *port_modify);
1633 struct ib_ucontext * (*alloc_ucontext)(struct ib_device *device,
1634 struct ib_udata *udata);
1635 int (*dealloc_ucontext)(struct ib_ucontext *context);
1636 int (*mmap)(struct ib_ucontext *context,
1637 struct vm_area_struct *vma);
1638 struct ib_pd * (*alloc_pd)(struct ib_device *device,
1639 struct ib_ucontext *context,
1640 struct ib_udata *udata);
1641 int (*dealloc_pd)(struct ib_pd *pd);
1642 struct ib_ah * (*create_ah)(struct ib_pd *pd,
1643 struct ib_ah_attr *ah_attr);
1644 int (*modify_ah)(struct ib_ah *ah,
1645 struct ib_ah_attr *ah_attr);
1646 int (*query_ah)(struct ib_ah *ah,
1647 struct ib_ah_attr *ah_attr);
1648 int (*destroy_ah)(struct ib_ah *ah);
1649 struct ib_srq * (*create_srq)(struct ib_pd *pd,
1650 struct ib_srq_init_attr *srq_init_attr,
1651 struct ib_udata *udata);
1652 int (*modify_srq)(struct ib_srq *srq,
1653 struct ib_srq_attr *srq_attr,
1654 enum ib_srq_attr_mask srq_attr_mask,
1655 struct ib_udata *udata);
1656 int (*query_srq)(struct ib_srq *srq,
1657 struct ib_srq_attr *srq_attr);
1658 int (*destroy_srq)(struct ib_srq *srq);
1659 int (*post_srq_recv)(struct ib_srq *srq,
1660 struct ib_recv_wr *recv_wr,
1661 struct ib_recv_wr **bad_recv_wr);
1662 struct ib_qp * (*create_qp)(struct ib_pd *pd,
1663 struct ib_qp_init_attr *qp_init_attr,
1664 struct ib_udata *udata);
1665 int (*modify_qp)(struct ib_qp *qp,
1666 struct ib_qp_attr *qp_attr,
1667 int qp_attr_mask,
1668 struct ib_udata *udata);
1669 int (*query_qp)(struct ib_qp *qp,
1670 struct ib_qp_attr *qp_attr,
1671 int qp_attr_mask,
1672 struct ib_qp_init_attr *qp_init_attr);
1673 int (*destroy_qp)(struct ib_qp *qp);
1674 int (*post_send)(struct ib_qp *qp,
1675 struct ib_send_wr *send_wr,
1676 struct ib_send_wr **bad_send_wr);
1677 int (*post_recv)(struct ib_qp *qp,
1678 struct ib_recv_wr *recv_wr,
1679 struct ib_recv_wr **bad_recv_wr);
1680 struct ib_cq * (*create_cq)(struct ib_device *device,
1681 const struct ib_cq_init_attr *attr,
1682 struct ib_ucontext *context,
1683 struct ib_udata *udata);
1684 int (*modify_cq)(struct ib_cq *cq, u16 cq_count,
1685 u16 cq_period);
1686 int (*destroy_cq)(struct ib_cq *cq);
1687 int (*resize_cq)(struct ib_cq *cq, int cqe,
1688 struct ib_udata *udata);
1689 int (*poll_cq)(struct ib_cq *cq, int num_entries,
1690 struct ib_wc *wc);
1691 int (*peek_cq)(struct ib_cq *cq, int wc_cnt);
1692 int (*req_notify_cq)(struct ib_cq *cq,
1693 enum ib_cq_notify_flags flags);
1694 int (*req_ncomp_notif)(struct ib_cq *cq,
1695 int wc_cnt);
1696 struct ib_mr * (*get_dma_mr)(struct ib_pd *pd,
1697 int mr_access_flags);
1698 struct ib_mr * (*reg_phys_mr)(struct ib_pd *pd,
1699 struct ib_phys_buf *phys_buf_array,
1700 int num_phys_buf,
1701 int mr_access_flags,
1702 u64 *iova_start);
1703 struct ib_mr * (*reg_user_mr)(struct ib_pd *pd,
1704 u64 start, u64 length,
1705 u64 virt_addr,
1706 int mr_access_flags,
1707 struct ib_udata *udata);
1708 int (*rereg_user_mr)(struct ib_mr *mr,
1709 int flags,
1710 u64 start, u64 length,
1711 u64 virt_addr,
1712 int mr_access_flags,
1713 struct ib_pd *pd,
1714 struct ib_udata *udata);
1715 int (*query_mr)(struct ib_mr *mr,
1716 struct ib_mr_attr *mr_attr);
1717 int (*dereg_mr)(struct ib_mr *mr);
1718 struct ib_mr * (*alloc_mr)(struct ib_pd *pd,
1719 enum ib_mr_type mr_type,
1720 u32 max_num_sg);
1721 struct ib_fast_reg_page_list * (*alloc_fast_reg_page_list)(struct ib_device *device,
1722 int page_list_len);
1723 void (*free_fast_reg_page_list)(struct ib_fast_reg_page_list *page_list);
1724 int (*rereg_phys_mr)(struct ib_mr *mr,
1725 int mr_rereg_mask,
1726 struct ib_pd *pd,
1727 struct ib_phys_buf *phys_buf_array,
1728 int num_phys_buf,
1729 int mr_access_flags,
1730 u64 *iova_start);
1731 struct ib_mw * (*alloc_mw)(struct ib_pd *pd,
1732 enum ib_mw_type type);
1733 int (*bind_mw)(struct ib_qp *qp,
1734 struct ib_mw *mw,
1735 struct ib_mw_bind *mw_bind);
1736 int (*dealloc_mw)(struct ib_mw *mw);
1737 struct ib_fmr * (*alloc_fmr)(struct ib_pd *pd,
1738 int mr_access_flags,
1739 struct ib_fmr_attr *fmr_attr);
1740 int (*map_phys_fmr)(struct ib_fmr *fmr,
1741 u64 *page_list, int list_len,
1742 u64 iova);
1743 int (*unmap_fmr)(struct list_head *fmr_list);
1744 int (*dealloc_fmr)(struct ib_fmr *fmr);
1745 int (*attach_mcast)(struct ib_qp *qp,
1746 union ib_gid *gid,
1747 u16 lid);
1748 int (*detach_mcast)(struct ib_qp *qp,
1749 union ib_gid *gid,
1750 u16 lid);
1751 int (*process_mad)(struct ib_device *device,
1752 int process_mad_flags,
1753 u8 port_num,
1754 const struct ib_wc *in_wc,
1755 const struct ib_grh *in_grh,
1756 const struct ib_mad_hdr *in_mad,
1757 size_t in_mad_size,
1758 struct ib_mad_hdr *out_mad,
1759 size_t *out_mad_size,
1760 u16 *out_mad_pkey_index);
1761 struct ib_xrcd * (*alloc_xrcd)(struct ib_device *device,
1762 struct ib_ucontext *ucontext,
1763 struct ib_udata *udata);
1764 int (*dealloc_xrcd)(struct ib_xrcd *xrcd);
1765 struct ib_flow * (*create_flow)(struct ib_qp *qp,
1766 struct ib_flow_attr
1767 *flow_attr,
1768 int domain);
1769 int (*destroy_flow)(struct ib_flow *flow_id);
1770 int (*check_mr_status)(struct ib_mr *mr, u32 check_mask,
1771 struct ib_mr_status *mr_status);
1772 void (*disassociate_ucontext)(struct ib_ucontext *ibcontext);
1773
1774 struct ib_dma_mapping_ops *dma_ops;
1775
1776 struct module *owner;
1777 struct device dev;
1778 struct kobject *ports_parent;
1779 struct list_head port_list;
1780
1781 enum {
1782 IB_DEV_UNINITIALIZED,
1783 IB_DEV_REGISTERED,
1784 IB_DEV_UNREGISTERED
1785 } reg_state;
1786
1787 int uverbs_abi_ver;
1788 u64 uverbs_cmd_mask;
1789 u64 uverbs_ex_cmd_mask;
1790
1791 char node_desc[64];
1792 __be64 node_guid;
1793 u32 local_dma_lkey;
1794 u16 is_switch:1;
1795 u8 node_type;
1796 u8 phys_port_cnt;
1797
1798
1799
1800
1801
1802
1803
1804 int (*get_port_immutable)(struct ib_device *, u8, struct ib_port_immutable *);
1805};
1806
1807struct ib_client {
1808 char *name;
1809 void (*add) (struct ib_device *);
1810 void (*remove)(struct ib_device *, void *client_data);
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827 struct net_device *(*get_net_dev_by_params)(
1828 struct ib_device *dev,
1829 u8 port,
1830 u16 pkey,
1831 const union ib_gid *gid,
1832 const struct sockaddr *addr,
1833 void *client_data);
1834 struct list_head list;
1835};
1836
1837struct ib_device *ib_alloc_device(size_t size);
1838void ib_dealloc_device(struct ib_device *device);
1839
1840int ib_register_device(struct ib_device *device,
1841 int (*port_callback)(struct ib_device *,
1842 u8, struct kobject *));
1843void ib_unregister_device(struct ib_device *device);
1844
1845int ib_register_client (struct ib_client *client);
1846void ib_unregister_client(struct ib_client *client);
1847
1848void *ib_get_client_data(struct ib_device *device, struct ib_client *client);
1849void ib_set_client_data(struct ib_device *device, struct ib_client *client,
1850 void *data);
1851
1852static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len)
1853{
1854 return copy_from_user(dest, udata->inbuf, len) ? -EFAULT : 0;
1855}
1856
1857static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len)
1858{
1859 return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0;
1860}
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
1879 enum ib_qp_type type, enum ib_qp_attr_mask mask,
1880 enum rdma_link_layer ll);
1881
1882int ib_register_event_handler (struct ib_event_handler *event_handler);
1883int ib_unregister_event_handler(struct ib_event_handler *event_handler);
1884void ib_dispatch_event(struct ib_event *event);
1885
1886int ib_query_device(struct ib_device *device,
1887 struct ib_device_attr *device_attr);
1888
1889int ib_query_port(struct ib_device *device,
1890 u8 port_num, struct ib_port_attr *port_attr);
1891
1892enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device,
1893 u8 port_num);
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904static inline bool rdma_cap_ib_switch(const struct ib_device *device)
1905{
1906 return device->is_switch;
1907}
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917static inline u8 rdma_start_port(const struct ib_device *device)
1918{
1919 return rdma_cap_ib_switch(device) ? 0 : 1;
1920}
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930static inline u8 rdma_end_port(const struct ib_device *device)
1931{
1932 return rdma_cap_ib_switch(device) ? 0 : device->phys_port_cnt;
1933}
1934
1935static inline bool rdma_protocol_ib(const struct ib_device *device, u8 port_num)
1936{
1937 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IB;
1938}
1939
1940static inline bool rdma_protocol_roce(const struct ib_device *device, u8 port_num)
1941{
1942 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_ROCE;
1943}
1944
1945static inline bool rdma_protocol_iwarp(const struct ib_device *device, u8 port_num)
1946{
1947 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IWARP;
1948}
1949
1950static inline bool rdma_ib_or_roce(const struct ib_device *device, u8 port_num)
1951{
1952 return device->port_immutable[port_num].core_cap_flags &
1953 (RDMA_CORE_CAP_PROT_IB | RDMA_CORE_CAP_PROT_ROCE);
1954}
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968static inline bool rdma_cap_ib_mad(const struct ib_device *device, u8 port_num)
1969{
1970 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_MAD;
1971}
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992static inline bool rdma_cap_opa_mad(struct ib_device *device, u8 port_num)
1993{
1994 return (device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_OPA_MAD)
1995 == RDMA_CORE_CAP_OPA_MAD;
1996}
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018static inline bool rdma_cap_ib_smi(const struct ib_device *device, u8 port_num)
2019{
2020 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_SMI;
2021}
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038static inline bool rdma_cap_ib_cm(const struct ib_device *device, u8 port_num)
2039{
2040 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_CM;
2041}
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055static inline bool rdma_cap_iw_cm(const struct ib_device *device, u8 port_num)
2056{
2057 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IW_CM;
2058}
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075static inline bool rdma_cap_ib_sa(const struct ib_device *device, u8 port_num)
2076{
2077 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_SA;
2078}
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097static inline bool rdma_cap_ib_mcast(const struct ib_device *device, u8 port_num)
2098{
2099 return rdma_cap_ib_sa(device, port_num);
2100}
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115static inline bool rdma_cap_af_ib(const struct ib_device *device, u8 port_num)
2116{
2117 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_AF_IB;
2118}
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136static inline bool rdma_cap_eth_ah(const struct ib_device *device, u8 port_num)
2137{
2138 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_ETH_AH;
2139}
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153static inline size_t rdma_max_mad_size(const struct ib_device *device, u8 port_num)
2154{
2155 return device->port_immutable[port_num].max_mad_size;
2156}
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171static inline bool rdma_cap_roce_gid_table(const struct ib_device *device,
2172 u8 port_num)
2173{
2174 return rdma_protocol_roce(device, port_num) &&
2175 device->add_gid && device->del_gid;
2176}
2177
2178int ib_query_gid(struct ib_device *device,
2179 u8 port_num, int index, union ib_gid *gid);
2180
2181int ib_query_pkey(struct ib_device *device,
2182 u8 port_num, u16 index, u16 *pkey);
2183
2184int ib_modify_device(struct ib_device *device,
2185 int device_modify_mask,
2186 struct ib_device_modify *device_modify);
2187
2188int ib_modify_port(struct ib_device *device,
2189 u8 port_num, int port_modify_mask,
2190 struct ib_port_modify *port_modify);
2191
2192int ib_find_gid(struct ib_device *device, union ib_gid *gid,
2193 u8 *port_num, u16 *index);
2194
2195int ib_find_pkey(struct ib_device *device,
2196 u8 port_num, u16 pkey, u16 *index);
2197
2198struct ib_pd *ib_alloc_pd(struct ib_device *device);
2199
2200void ib_dealloc_pd(struct ib_pd *pd);
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr);
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223int ib_init_ah_from_wc(struct ib_device *device, u8 port_num,
2224 const struct ib_wc *wc, const struct ib_grh *grh,
2225 struct ib_ah_attr *ah_attr);
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236
2237
2238
2239struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc,
2240 const struct ib_grh *grh, u8 port_num);
2241
2242
2243
2244
2245
2246
2247
2248
2249int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
2250
2251
2252
2253
2254
2255
2256
2257
2258int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
2259
2260
2261
2262
2263
2264int ib_destroy_ah(struct ib_ah *ah);
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279struct ib_srq *ib_create_srq(struct ib_pd *pd,
2280 struct ib_srq_init_attr *srq_init_attr);
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294int ib_modify_srq(struct ib_srq *srq,
2295 struct ib_srq_attr *srq_attr,
2296 enum ib_srq_attr_mask srq_attr_mask);
2297
2298
2299
2300
2301
2302
2303
2304int ib_query_srq(struct ib_srq *srq,
2305 struct ib_srq_attr *srq_attr);
2306
2307
2308
2309
2310
2311int ib_destroy_srq(struct ib_srq *srq);
2312
2313
2314
2315
2316
2317
2318
2319
2320static inline int ib_post_srq_recv(struct ib_srq *srq,
2321 struct ib_recv_wr *recv_wr,
2322 struct ib_recv_wr **bad_recv_wr)
2323{
2324 return srq->device->post_srq_recv(srq, recv_wr, bad_recv_wr);
2325}
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335struct ib_qp *ib_create_qp(struct ib_pd *pd,
2336 struct ib_qp_init_attr *qp_init_attr);
2337
2338
2339
2340
2341
2342
2343
2344
2345
2346
2347int ib_modify_qp(struct ib_qp *qp,
2348 struct ib_qp_attr *qp_attr,
2349 int qp_attr_mask);
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362int ib_query_qp(struct ib_qp *qp,
2363 struct ib_qp_attr *qp_attr,
2364 int qp_attr_mask,
2365 struct ib_qp_init_attr *qp_init_attr);
2366
2367
2368
2369
2370
2371int ib_destroy_qp(struct ib_qp *qp);
2372
2373
2374
2375
2376
2377
2378
2379
2380struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd,
2381 struct ib_qp_open_attr *qp_open_attr);
2382
2383
2384
2385
2386
2387
2388
2389
2390int ib_close_qp(struct ib_qp *qp);
2391
2392
2393
2394
2395
2396
2397
2398
2399
2400
2401
2402
2403
2404
2405static inline int ib_post_send(struct ib_qp *qp,
2406 struct ib_send_wr *send_wr,
2407 struct ib_send_wr **bad_send_wr)
2408{
2409 return qp->device->post_send(qp, send_wr, bad_send_wr);
2410}
2411
2412
2413
2414
2415
2416
2417
2418
2419
2420static inline int ib_post_recv(struct ib_qp *qp,
2421 struct ib_recv_wr *recv_wr,
2422 struct ib_recv_wr **bad_recv_wr)
2423{
2424 return qp->device->post_recv(qp, recv_wr, bad_recv_wr);
2425}
2426
2427
2428
2429
2430
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440struct ib_cq *ib_create_cq(struct ib_device *device,
2441 ib_comp_handler comp_handler,
2442 void (*event_handler)(struct ib_event *, void *),
2443 void *cq_context,
2444 const struct ib_cq_init_attr *cq_attr);
2445
2446
2447
2448
2449
2450
2451
2452
2453int ib_resize_cq(struct ib_cq *cq, int cqe);
2454
2455
2456
2457
2458
2459
2460
2461
2462int ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
2463
2464
2465
2466
2467
2468int ib_destroy_cq(struct ib_cq *cq);
2469
2470
2471
2472
2473
2474
2475
2476
2477
2478
2479
2480
2481
2482static inline int ib_poll_cq(struct ib_cq *cq, int num_entries,
2483 struct ib_wc *wc)
2484{
2485 return cq->device->poll_cq(cq, num_entries, wc);
2486}
2487
2488
2489
2490
2491
2492
2493
2494
2495
2496
2497
2498int ib_peek_cq(struct ib_cq *cq, int wc_cnt);
2499
2500
2501
2502
2503
2504
2505
2506
2507
2508
2509
2510
2511
2512
2513
2514
2515
2516
2517
2518
2519
2520
2521
2522
2523
2524
2525
2526
2527static inline int ib_req_notify_cq(struct ib_cq *cq,
2528 enum ib_cq_notify_flags flags)
2529{
2530 return cq->device->req_notify_cq(cq, flags);
2531}
2532
2533
2534
2535
2536
2537
2538
2539
2540static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt)
2541{
2542 return cq->device->req_ncomp_notif ?
2543 cq->device->req_ncomp_notif(cq, wc_cnt) :
2544 -ENOSYS;
2545}
2546
2547
2548
2549
2550
2551
2552
2553
2554
2555
2556
2557struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags);
2558
2559
2560
2561
2562
2563
2564static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
2565{
2566 if (dev->dma_ops)
2567 return dev->dma_ops->mapping_error(dev, dma_addr);
2568 return dma_mapping_error(dev->dma_device, dma_addr);
2569}
2570
2571
2572
2573
2574
2575
2576
2577
2578static inline u64 ib_dma_map_single(struct ib_device *dev,
2579 void *cpu_addr, size_t size,
2580 enum dma_data_direction direction)
2581{
2582 if (dev->dma_ops)
2583 return dev->dma_ops->map_single(dev, cpu_addr, size, direction);
2584 return dma_map_single(dev->dma_device, cpu_addr, size, direction);
2585}
2586
2587
2588
2589
2590
2591
2592
2593
2594static inline void ib_dma_unmap_single(struct ib_device *dev,
2595 u64 addr, size_t size,
2596 enum dma_data_direction direction)
2597{
2598 if (dev->dma_ops)
2599 dev->dma_ops->unmap_single(dev, addr, size, direction);
2600 else
2601 dma_unmap_single(dev->dma_device, addr, size, direction);
2602}
2603
2604static inline u64 ib_dma_map_single_attrs(struct ib_device *dev,
2605 void *cpu_addr, size_t size,
2606 enum dma_data_direction direction,
2607 struct dma_attrs *attrs)
2608{
2609 return dma_map_single_attrs(dev->dma_device, cpu_addr, size,
2610 direction, attrs);
2611}
2612
2613static inline void ib_dma_unmap_single_attrs(struct ib_device *dev,
2614 u64 addr, size_t size,
2615 enum dma_data_direction direction,
2616 struct dma_attrs *attrs)
2617{
2618 return dma_unmap_single_attrs(dev->dma_device, addr, size,
2619 direction, attrs);
2620}
2621
2622
2623
2624
2625
2626
2627
2628
2629
2630static inline u64 ib_dma_map_page(struct ib_device *dev,
2631 struct page *page,
2632 unsigned long offset,
2633 size_t size,
2634 enum dma_data_direction direction)
2635{
2636 if (dev->dma_ops)
2637 return dev->dma_ops->map_page(dev, page, offset, size, direction);
2638 return dma_map_page(dev->dma_device, page, offset, size, direction);
2639}
2640
2641
2642
2643
2644
2645
2646
2647
2648static inline void ib_dma_unmap_page(struct ib_device *dev,
2649 u64 addr, size_t size,
2650 enum dma_data_direction direction)
2651{
2652 if (dev->dma_ops)
2653 dev->dma_ops->unmap_page(dev, addr, size, direction);
2654 else
2655 dma_unmap_page(dev->dma_device, addr, size, direction);
2656}
2657
2658
2659
2660
2661
2662
2663
2664
2665static inline int ib_dma_map_sg(struct ib_device *dev,
2666 struct scatterlist *sg, int nents,
2667 enum dma_data_direction direction)
2668{
2669 if (dev->dma_ops)
2670 return dev->dma_ops->map_sg(dev, sg, nents, direction);
2671 return dma_map_sg(dev->dma_device, sg, nents, direction);
2672}
2673
2674
2675
2676
2677
2678
2679
2680
2681static inline void ib_dma_unmap_sg(struct ib_device *dev,
2682 struct scatterlist *sg, int nents,
2683 enum dma_data_direction direction)
2684{
2685 if (dev->dma_ops)
2686 dev->dma_ops->unmap_sg(dev, sg, nents, direction);
2687 else
2688 dma_unmap_sg(dev->dma_device, sg, nents, direction);
2689}
2690
2691static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
2692 struct scatterlist *sg, int nents,
2693 enum dma_data_direction direction,
2694 struct dma_attrs *attrs)
2695{
2696 return dma_map_sg_attrs(dev->dma_device, sg, nents, direction, attrs);
2697}
2698
2699static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
2700 struct scatterlist *sg, int nents,
2701 enum dma_data_direction direction,
2702 struct dma_attrs *attrs)
2703{
2704 dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, attrs);
2705}
2706
2707
2708
2709
2710
2711
2712
2713
2714static inline u64 ib_sg_dma_address(struct ib_device *dev,
2715 struct scatterlist *sg)
2716{
2717 return sg_dma_address(sg);
2718}
2719
2720
2721
2722
2723
2724
2725
2726
2727
2728static inline unsigned int ib_sg_dma_len(struct ib_device *dev,
2729 struct scatterlist *sg)
2730{
2731 return sg_dma_len(sg);
2732}
2733
2734
2735
2736
2737
2738
2739
2740
2741static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev,
2742 u64 addr,
2743 size_t size,
2744 enum dma_data_direction dir)
2745{
2746 if (dev->dma_ops)
2747 dev->dma_ops->sync_single_for_cpu(dev, addr, size, dir);
2748 else
2749 dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
2750}
2751
2752
2753
2754
2755
2756
2757
2758
2759static inline void ib_dma_sync_single_for_device(struct ib_device *dev,
2760 u64 addr,
2761 size_t size,
2762 enum dma_data_direction dir)
2763{
2764 if (dev->dma_ops)
2765 dev->dma_ops->sync_single_for_device(dev, addr, size, dir);
2766 else
2767 dma_sync_single_for_device(dev->dma_device, addr, size, dir);
2768}
2769
2770
2771
2772
2773
2774
2775
2776
2777static inline void *ib_dma_alloc_coherent(struct ib_device *dev,
2778 size_t size,
2779 u64 *dma_handle,
2780 gfp_t flag)
2781{
2782 if (dev->dma_ops)
2783 return dev->dma_ops->alloc_coherent(dev, size, dma_handle, flag);
2784 else {
2785 dma_addr_t handle;
2786 void *ret;
2787
2788 ret = dma_alloc_coherent(dev->dma_device, size, &handle, flag);
2789 *dma_handle = handle;
2790 return ret;
2791 }
2792}
2793
2794
2795
2796
2797
2798
2799
2800
2801static inline void ib_dma_free_coherent(struct ib_device *dev,
2802 size_t size, void *cpu_addr,
2803 u64 dma_handle)
2804{
2805 if (dev->dma_ops)
2806 dev->dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
2807 else
2808 dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle);
2809}
2810
2811
2812
2813
2814
2815
2816int ib_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr);
2817
2818
2819
2820
2821
2822
2823
2824
2825int ib_dereg_mr(struct ib_mr *mr);
2826
2827struct ib_mr *ib_alloc_mr(struct ib_pd *pd,
2828 enum ib_mr_type mr_type,
2829 u32 max_num_sg);
2830
2831
2832
2833
2834
2835
2836
2837
2838
2839
2840
2841
2842
2843
2844
2845
2846
2847
2848struct ib_fast_reg_page_list *ib_alloc_fast_reg_page_list(
2849 struct ib_device *device, int page_list_len);
2850
2851
2852
2853
2854
2855
2856void ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list);
2857
2858
2859
2860
2861
2862
2863
2864static inline void ib_update_fast_reg_key(struct ib_mr *mr, u8 newkey)
2865{
2866 mr->lkey = (mr->lkey & 0xffffff00) | newkey;
2867 mr->rkey = (mr->rkey & 0xffffff00) | newkey;
2868}
2869
2870
2871
2872
2873
2874
2875static inline u32 ib_inc_rkey(u32 rkey)
2876{
2877 const u32 mask = 0x000000ff;
2878 return ((rkey + 1) & mask) | (rkey & ~mask);
2879}
2880
2881
2882
2883
2884
2885
2886struct ib_mw *ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type);
2887
2888
2889
2890
2891
2892
2893
2894
2895
2896
2897
2898
2899
2900
2901static inline int ib_bind_mw(struct ib_qp *qp,
2902 struct ib_mw *mw,
2903 struct ib_mw_bind *mw_bind)
2904{
2905
2906 return mw->device->bind_mw ?
2907 mw->device->bind_mw(qp, mw, mw_bind) :
2908 -ENOSYS;
2909}
2910
2911
2912
2913
2914
2915int ib_dealloc_mw(struct ib_mw *mw);
2916
2917
2918
2919
2920
2921
2922
2923
2924
2925
2926struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
2927 int mr_access_flags,
2928 struct ib_fmr_attr *fmr_attr);
2929
2930
2931
2932
2933
2934
2935
2936
2937static inline int ib_map_phys_fmr(struct ib_fmr *fmr,
2938 u64 *page_list, int list_len,
2939 u64 iova)
2940{
2941 return fmr->device->map_phys_fmr(fmr, page_list, list_len, iova);
2942}
2943
2944
2945
2946
2947
2948int ib_unmap_fmr(struct list_head *fmr_list);
2949
2950
2951
2952
2953
2954int ib_dealloc_fmr(struct ib_fmr *fmr);
2955
2956
2957
2958
2959
2960
2961
2962
2963
2964
2965
2966
2967
2968int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
2969
2970
2971
2972
2973
2974
2975
2976int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
2977
2978
2979
2980
2981
2982struct ib_xrcd *ib_alloc_xrcd(struct ib_device *device);
2983
2984
2985
2986
2987
2988int ib_dealloc_xrcd(struct ib_xrcd *xrcd);
2989
2990struct ib_flow *ib_create_flow(struct ib_qp *qp,
2991 struct ib_flow_attr *flow_attr, int domain);
2992int ib_destroy_flow(struct ib_flow *flow_id);
2993
2994static inline int ib_check_mr_access(int flags)
2995{
2996
2997
2998
2999
3000 if (flags & (IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_REMOTE_WRITE) &&
3001 !(flags & IB_ACCESS_LOCAL_WRITE))
3002 return -EINVAL;
3003
3004 return 0;
3005}
3006
3007
3008
3009
3010
3011
3012
3013
3014
3015
3016
3017
3018
3019int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
3020 struct ib_mr_status *mr_status);
3021
3022struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, u8 port,
3023 u16 pkey, const union ib_gid *gid,
3024 const struct sockaddr *addr);
3025
3026#endif
3027