1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39#if !defined(IB_VERBS_H)
40#define IB_VERBS_H
41
42#include <linux/types.h>
43#include <linux/device.h>
44#include <linux/mm.h>
45#include <linux/dma-mapping.h>
46#include <linux/kref.h>
47#include <linux/list.h>
48#include <linux/rwsem.h>
49#include <linux/scatterlist.h>
50#include <linux/workqueue.h>
51#include <linux/socket.h>
52#include <linux/irq_poll.h>
53#include <uapi/linux/if_ether.h>
54#include <net/ipv6.h>
55#include <net/ip.h>
56#include <linux/string.h>
57#include <linux/slab.h>
58#include <linux/netdevice.h>
59
60#include <linux/if_link.h>
61#include <linux/atomic.h>
62#include <linux/mmu_notifier.h>
63#include <linux/uaccess.h>
64#include <linux/cgroup_rdma.h>
65#include <uapi/rdma/ib_user_verbs.h>
66
67#define IB_FW_VERSION_NAME_MAX ETHTOOL_FWVERS_LEN
68
69extern struct workqueue_struct *ib_wq;
70extern struct workqueue_struct *ib_comp_wq;
71
72union ib_gid {
73 u8 raw[16];
74 struct {
75 __be64 subnet_prefix;
76 __be64 interface_id;
77 } global;
78};
79
80extern union ib_gid zgid;
81
82enum ib_gid_type {
83
84 IB_GID_TYPE_IB = 0,
85 IB_GID_TYPE_ROCE = 0,
86 IB_GID_TYPE_ROCE_UDP_ENCAP = 1,
87 IB_GID_TYPE_SIZE
88};
89
90#define ROCE_V2_UDP_DPORT 4791
91struct ib_gid_attr {
92 enum ib_gid_type gid_type;
93 struct net_device *ndev;
94};
95
96enum rdma_node_type {
97
98 RDMA_NODE_IB_CA = 1,
99 RDMA_NODE_IB_SWITCH,
100 RDMA_NODE_IB_ROUTER,
101 RDMA_NODE_RNIC,
102 RDMA_NODE_USNIC,
103 RDMA_NODE_USNIC_UDP,
104};
105
106enum {
107
108 IB_SA_WELL_KNOWN_GUID = BIT_ULL(57) | 2,
109};
110
111enum rdma_transport_type {
112 RDMA_TRANSPORT_IB,
113 RDMA_TRANSPORT_IWARP,
114 RDMA_TRANSPORT_USNIC,
115 RDMA_TRANSPORT_USNIC_UDP
116};
117
118enum rdma_protocol_type {
119 RDMA_PROTOCOL_IB,
120 RDMA_PROTOCOL_IBOE,
121 RDMA_PROTOCOL_IWARP,
122 RDMA_PROTOCOL_USNIC_UDP
123};
124
125__attribute_const__ enum rdma_transport_type
126rdma_node_get_transport(enum rdma_node_type node_type);
127
128enum rdma_network_type {
129 RDMA_NETWORK_IB,
130 RDMA_NETWORK_ROCE_V1 = RDMA_NETWORK_IB,
131 RDMA_NETWORK_IPV4,
132 RDMA_NETWORK_IPV6
133};
134
135static inline enum ib_gid_type ib_network_to_gid_type(enum rdma_network_type network_type)
136{
137 if (network_type == RDMA_NETWORK_IPV4 ||
138 network_type == RDMA_NETWORK_IPV6)
139 return IB_GID_TYPE_ROCE_UDP_ENCAP;
140
141
142 return IB_GID_TYPE_IB;
143}
144
145static inline enum rdma_network_type ib_gid_to_network_type(enum ib_gid_type gid_type,
146 union ib_gid *gid)
147{
148 if (gid_type == IB_GID_TYPE_IB)
149 return RDMA_NETWORK_IB;
150
151 if (ipv6_addr_v4mapped((struct in6_addr *)gid))
152 return RDMA_NETWORK_IPV4;
153 else
154 return RDMA_NETWORK_IPV6;
155}
156
157enum rdma_link_layer {
158 IB_LINK_LAYER_UNSPECIFIED,
159 IB_LINK_LAYER_INFINIBAND,
160 IB_LINK_LAYER_ETHERNET,
161};
162
163enum ib_device_cap_flags {
164 IB_DEVICE_RESIZE_MAX_WR = (1 << 0),
165 IB_DEVICE_BAD_PKEY_CNTR = (1 << 1),
166 IB_DEVICE_BAD_QKEY_CNTR = (1 << 2),
167 IB_DEVICE_RAW_MULTI = (1 << 3),
168 IB_DEVICE_AUTO_PATH_MIG = (1 << 4),
169 IB_DEVICE_CHANGE_PHY_PORT = (1 << 5),
170 IB_DEVICE_UD_AV_PORT_ENFORCE = (1 << 6),
171 IB_DEVICE_CURR_QP_STATE_MOD = (1 << 7),
172 IB_DEVICE_SHUTDOWN_PORT = (1 << 8),
173
174 IB_DEVICE_PORT_ACTIVE_EVENT = (1 << 10),
175 IB_DEVICE_SYS_IMAGE_GUID = (1 << 11),
176 IB_DEVICE_RC_RNR_NAK_GEN = (1 << 12),
177 IB_DEVICE_SRQ_RESIZE = (1 << 13),
178 IB_DEVICE_N_NOTIFY_CQ = (1 << 14),
179
180
181
182
183
184
185
186
187 IB_DEVICE_LOCAL_DMA_LKEY = (1 << 15),
188
189 IB_DEVICE_MEM_WINDOW = (1 << 17),
190
191
192
193
194
195
196
197 IB_DEVICE_UD_IP_CSUM = (1 << 18),
198 IB_DEVICE_UD_TSO = (1 << 19),
199 IB_DEVICE_XRC = (1 << 20),
200
201
202
203
204
205
206
207
208
209
210 IB_DEVICE_MEM_MGT_EXTENSIONS = (1 << 21),
211 IB_DEVICE_BLOCK_MULTICAST_LOOPBACK = (1 << 22),
212 IB_DEVICE_MEM_WINDOW_TYPE_2A = (1 << 23),
213 IB_DEVICE_MEM_WINDOW_TYPE_2B = (1 << 24),
214 IB_DEVICE_RC_IP_CSUM = (1 << 25),
215
216 IB_DEVICE_RAW_IP_CSUM = (1 << 26),
217
218
219
220
221
222
223 IB_DEVICE_CROSS_CHANNEL = (1 << 27),
224 IB_DEVICE_MANAGED_FLOW_STEERING = (1 << 29),
225 IB_DEVICE_SIGNATURE_HANDOVER = (1 << 30),
226 IB_DEVICE_ON_DEMAND_PAGING = (1ULL << 31),
227 IB_DEVICE_SG_GAPS_REG = (1ULL << 32),
228 IB_DEVICE_VIRTUAL_FUNCTION = (1ULL << 33),
229
230 IB_DEVICE_RAW_SCATTER_FCS = (1ULL << 34),
231 IB_DEVICE_RDMA_NETDEV_OPA_VNIC = (1ULL << 35),
232};
233
234enum ib_signature_prot_cap {
235 IB_PROT_T10DIF_TYPE_1 = 1,
236 IB_PROT_T10DIF_TYPE_2 = 1 << 1,
237 IB_PROT_T10DIF_TYPE_3 = 1 << 2,
238};
239
240enum ib_signature_guard_cap {
241 IB_GUARD_T10DIF_CRC = 1,
242 IB_GUARD_T10DIF_CSUM = 1 << 1,
243};
244
245enum ib_atomic_cap {
246 IB_ATOMIC_NONE,
247 IB_ATOMIC_HCA,
248 IB_ATOMIC_GLOB
249};
250
251enum ib_odp_general_cap_bits {
252 IB_ODP_SUPPORT = 1 << 0,
253 IB_ODP_SUPPORT_IMPLICIT = 1 << 1,
254};
255
256enum ib_odp_transport_cap_bits {
257 IB_ODP_SUPPORT_SEND = 1 << 0,
258 IB_ODP_SUPPORT_RECV = 1 << 1,
259 IB_ODP_SUPPORT_WRITE = 1 << 2,
260 IB_ODP_SUPPORT_READ = 1 << 3,
261 IB_ODP_SUPPORT_ATOMIC = 1 << 4,
262};
263
264struct ib_odp_caps {
265 uint64_t general_caps;
266 struct {
267 uint32_t rc_odp_caps;
268 uint32_t uc_odp_caps;
269 uint32_t ud_odp_caps;
270 } per_transport_caps;
271};
272
273struct ib_rss_caps {
274
275
276
277
278 u32 supported_qpts;
279 u32 max_rwq_indirection_tables;
280 u32 max_rwq_indirection_table_size;
281};
282
283enum ib_tm_cap_flags {
284
285 IB_TM_CAP_RC = 1 << 0,
286};
287
288struct ib_tm_caps {
289
290 u32 max_rndv_hdr_size;
291
292 u32 max_num_tags;
293
294 u32 flags;
295
296 u32 max_ops;
297
298 u32 max_sge;
299};
300
301enum ib_cq_creation_flags {
302 IB_CQ_FLAGS_TIMESTAMP_COMPLETION = 1 << 0,
303 IB_CQ_FLAGS_IGNORE_OVERRUN = 1 << 1,
304};
305
306struct ib_cq_init_attr {
307 unsigned int cqe;
308 int comp_vector;
309 u32 flags;
310};
311
312struct ib_device_attr {
313 u64 fw_ver;
314 __be64 sys_image_guid;
315 u64 max_mr_size;
316 u64 page_size_cap;
317 u32 vendor_id;
318 u32 vendor_part_id;
319 u32 hw_ver;
320 int max_qp;
321 int max_qp_wr;
322 u64 device_cap_flags;
323 int max_sge;
324 int max_sge_rd;
325 int max_cq;
326 int max_cqe;
327 int max_mr;
328 int max_pd;
329 int max_qp_rd_atom;
330 int max_ee_rd_atom;
331 int max_res_rd_atom;
332 int max_qp_init_rd_atom;
333 int max_ee_init_rd_atom;
334 enum ib_atomic_cap atomic_cap;
335 enum ib_atomic_cap masked_atomic_cap;
336 int max_ee;
337 int max_rdd;
338 int max_mw;
339 int max_raw_ipv6_qp;
340 int max_raw_ethy_qp;
341 int max_mcast_grp;
342 int max_mcast_qp_attach;
343 int max_total_mcast_qp_attach;
344 int max_ah;
345 int max_fmr;
346 int max_map_per_fmr;
347 int max_srq;
348 int max_srq_wr;
349 int max_srq_sge;
350 unsigned int max_fast_reg_page_list_len;
351 u16 max_pkeys;
352 u8 local_ca_ack_delay;
353 int sig_prot_cap;
354 int sig_guard_cap;
355 struct ib_odp_caps odp_caps;
356 uint64_t timestamp_mask;
357 uint64_t hca_core_clock;
358 struct ib_rss_caps rss_caps;
359 u32 max_wq_type_rq;
360 u32 raw_packet_caps;
361 struct ib_tm_caps tm_caps;
362};
363
364enum ib_mtu {
365 IB_MTU_256 = 1,
366 IB_MTU_512 = 2,
367 IB_MTU_1024 = 3,
368 IB_MTU_2048 = 4,
369 IB_MTU_4096 = 5
370};
371
372static inline int ib_mtu_enum_to_int(enum ib_mtu mtu)
373{
374 switch (mtu) {
375 case IB_MTU_256: return 256;
376 case IB_MTU_512: return 512;
377 case IB_MTU_1024: return 1024;
378 case IB_MTU_2048: return 2048;
379 case IB_MTU_4096: return 4096;
380 default: return -1;
381 }
382}
383
384static inline enum ib_mtu ib_mtu_int_to_enum(int mtu)
385{
386 if (mtu >= 4096)
387 return IB_MTU_4096;
388 else if (mtu >= 2048)
389 return IB_MTU_2048;
390 else if (mtu >= 1024)
391 return IB_MTU_1024;
392 else if (mtu >= 512)
393 return IB_MTU_512;
394 else
395 return IB_MTU_256;
396}
397
398enum ib_port_state {
399 IB_PORT_NOP = 0,
400 IB_PORT_DOWN = 1,
401 IB_PORT_INIT = 2,
402 IB_PORT_ARMED = 3,
403 IB_PORT_ACTIVE = 4,
404 IB_PORT_ACTIVE_DEFER = 5
405};
406
407enum ib_port_cap_flags {
408 IB_PORT_SM = 1 << 1,
409 IB_PORT_NOTICE_SUP = 1 << 2,
410 IB_PORT_TRAP_SUP = 1 << 3,
411 IB_PORT_OPT_IPD_SUP = 1 << 4,
412 IB_PORT_AUTO_MIGR_SUP = 1 << 5,
413 IB_PORT_SL_MAP_SUP = 1 << 6,
414 IB_PORT_MKEY_NVRAM = 1 << 7,
415 IB_PORT_PKEY_NVRAM = 1 << 8,
416 IB_PORT_LED_INFO_SUP = 1 << 9,
417 IB_PORT_SM_DISABLED = 1 << 10,
418 IB_PORT_SYS_IMAGE_GUID_SUP = 1 << 11,
419 IB_PORT_PKEY_SW_EXT_PORT_TRAP_SUP = 1 << 12,
420 IB_PORT_EXTENDED_SPEEDS_SUP = 1 << 14,
421 IB_PORT_CM_SUP = 1 << 16,
422 IB_PORT_SNMP_TUNNEL_SUP = 1 << 17,
423 IB_PORT_REINIT_SUP = 1 << 18,
424 IB_PORT_DEVICE_MGMT_SUP = 1 << 19,
425 IB_PORT_VENDOR_CLASS_SUP = 1 << 20,
426 IB_PORT_DR_NOTICE_SUP = 1 << 21,
427 IB_PORT_CAP_MASK_NOTICE_SUP = 1 << 22,
428 IB_PORT_BOOT_MGMT_SUP = 1 << 23,
429 IB_PORT_LINK_LATENCY_SUP = 1 << 24,
430 IB_PORT_CLIENT_REG_SUP = 1 << 25,
431 IB_PORT_IP_BASED_GIDS = 1 << 26,
432};
433
434enum ib_port_width {
435 IB_WIDTH_1X = 1,
436 IB_WIDTH_4X = 2,
437 IB_WIDTH_8X = 4,
438 IB_WIDTH_12X = 8
439};
440
441static inline int ib_width_enum_to_int(enum ib_port_width width)
442{
443 switch (width) {
444 case IB_WIDTH_1X: return 1;
445 case IB_WIDTH_4X: return 4;
446 case IB_WIDTH_8X: return 8;
447 case IB_WIDTH_12X: return 12;
448 default: return -1;
449 }
450}
451
452enum ib_port_speed {
453 IB_SPEED_SDR = 1,
454 IB_SPEED_DDR = 2,
455 IB_SPEED_QDR = 4,
456 IB_SPEED_FDR10 = 8,
457 IB_SPEED_FDR = 16,
458 IB_SPEED_EDR = 32,
459 IB_SPEED_HDR = 64
460};
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478struct rdma_hw_stats {
479 unsigned long timestamp;
480 unsigned long lifespan;
481 const char * const *names;
482 int num_counters;
483 u64 value[];
484};
485
486#define RDMA_HW_STATS_DEFAULT_LIFESPAN 10
487
488
489
490
491
492
493
494static inline struct rdma_hw_stats *rdma_alloc_hw_stats_struct(
495 const char * const *names, int num_counters,
496 unsigned long lifespan)
497{
498 struct rdma_hw_stats *stats;
499
500 stats = kzalloc(sizeof(*stats) + num_counters * sizeof(u64),
501 GFP_KERNEL);
502 if (!stats)
503 return NULL;
504 stats->names = names;
505 stats->num_counters = num_counters;
506 stats->lifespan = msecs_to_jiffies(lifespan);
507
508 return stats;
509}
510
511
512
513
514
515
516#define RDMA_CORE_CAP_IB_MAD 0x00000001
517#define RDMA_CORE_CAP_IB_SMI 0x00000002
518#define RDMA_CORE_CAP_IB_CM 0x00000004
519#define RDMA_CORE_CAP_IW_CM 0x00000008
520#define RDMA_CORE_CAP_IB_SA 0x00000010
521#define RDMA_CORE_CAP_OPA_MAD 0x00000020
522
523
524#define RDMA_CORE_CAP_AF_IB 0x00001000
525#define RDMA_CORE_CAP_ETH_AH 0x00002000
526#define RDMA_CORE_CAP_OPA_AH 0x00004000
527
528
529#define RDMA_CORE_CAP_PROT_IB 0x00100000
530#define RDMA_CORE_CAP_PROT_ROCE 0x00200000
531#define RDMA_CORE_CAP_PROT_IWARP 0x00400000
532#define RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP 0x00800000
533#define RDMA_CORE_CAP_PROT_RAW_PACKET 0x01000000
534#define RDMA_CORE_CAP_PROT_USNIC 0x02000000
535
536#define RDMA_CORE_PORT_IBA_IB (RDMA_CORE_CAP_PROT_IB \
537 | RDMA_CORE_CAP_IB_MAD \
538 | RDMA_CORE_CAP_IB_SMI \
539 | RDMA_CORE_CAP_IB_CM \
540 | RDMA_CORE_CAP_IB_SA \
541 | RDMA_CORE_CAP_AF_IB)
542#define RDMA_CORE_PORT_IBA_ROCE (RDMA_CORE_CAP_PROT_ROCE \
543 | RDMA_CORE_CAP_IB_MAD \
544 | RDMA_CORE_CAP_IB_CM \
545 | RDMA_CORE_CAP_AF_IB \
546 | RDMA_CORE_CAP_ETH_AH)
547#define RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP \
548 (RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP \
549 | RDMA_CORE_CAP_IB_MAD \
550 | RDMA_CORE_CAP_IB_CM \
551 | RDMA_CORE_CAP_AF_IB \
552 | RDMA_CORE_CAP_ETH_AH)
553#define RDMA_CORE_PORT_IWARP (RDMA_CORE_CAP_PROT_IWARP \
554 | RDMA_CORE_CAP_IW_CM)
555#define RDMA_CORE_PORT_INTEL_OPA (RDMA_CORE_PORT_IBA_IB \
556 | RDMA_CORE_CAP_OPA_MAD)
557
558#define RDMA_CORE_PORT_RAW_PACKET (RDMA_CORE_CAP_PROT_RAW_PACKET)
559
560#define RDMA_CORE_PORT_USNIC (RDMA_CORE_CAP_PROT_USNIC)
561
562struct ib_port_attr {
563 u64 subnet_prefix;
564 enum ib_port_state state;
565 enum ib_mtu max_mtu;
566 enum ib_mtu active_mtu;
567 int gid_tbl_len;
568 u32 port_cap_flags;
569 u32 max_msg_sz;
570 u32 bad_pkey_cntr;
571 u32 qkey_viol_cntr;
572 u16 pkey_tbl_len;
573 u32 sm_lid;
574 u32 lid;
575 u8 lmc;
576 u8 max_vl_num;
577 u8 sm_sl;
578 u8 subnet_timeout;
579 u8 init_type_reply;
580 u8 active_width;
581 u8 active_speed;
582 u8 phys_state;
583 bool grh_required;
584};
585
586enum ib_device_modify_flags {
587 IB_DEVICE_MODIFY_SYS_IMAGE_GUID = 1 << 0,
588 IB_DEVICE_MODIFY_NODE_DESC = 1 << 1
589};
590
591#define IB_DEVICE_NODE_DESC_MAX 64
592
593struct ib_device_modify {
594 u64 sys_image_guid;
595 char node_desc[IB_DEVICE_NODE_DESC_MAX];
596};
597
598enum ib_port_modify_flags {
599 IB_PORT_SHUTDOWN = 1,
600 IB_PORT_INIT_TYPE = (1<<2),
601 IB_PORT_RESET_QKEY_CNTR = (1<<3),
602 IB_PORT_OPA_MASK_CHG = (1<<4)
603};
604
605struct ib_port_modify {
606 u32 set_port_cap_mask;
607 u32 clr_port_cap_mask;
608 u8 init_type;
609};
610
611enum ib_event_type {
612 IB_EVENT_CQ_ERR,
613 IB_EVENT_QP_FATAL,
614 IB_EVENT_QP_REQ_ERR,
615 IB_EVENT_QP_ACCESS_ERR,
616 IB_EVENT_COMM_EST,
617 IB_EVENT_SQ_DRAINED,
618 IB_EVENT_PATH_MIG,
619 IB_EVENT_PATH_MIG_ERR,
620 IB_EVENT_DEVICE_FATAL,
621 IB_EVENT_PORT_ACTIVE,
622 IB_EVENT_PORT_ERR,
623 IB_EVENT_LID_CHANGE,
624 IB_EVENT_PKEY_CHANGE,
625 IB_EVENT_SM_CHANGE,
626 IB_EVENT_SRQ_ERR,
627 IB_EVENT_SRQ_LIMIT_REACHED,
628 IB_EVENT_QP_LAST_WQE_REACHED,
629 IB_EVENT_CLIENT_REREGISTER,
630 IB_EVENT_GID_CHANGE,
631 IB_EVENT_WQ_FATAL,
632};
633
634const char *__attribute_const__ ib_event_msg(enum ib_event_type event);
635
636struct ib_event {
637 struct ib_device *device;
638 union {
639 struct ib_cq *cq;
640 struct ib_qp *qp;
641 struct ib_srq *srq;
642 struct ib_wq *wq;
643 u8 port_num;
644 } element;
645 enum ib_event_type event;
646};
647
648struct ib_event_handler {
649 struct ib_device *device;
650 void (*handler)(struct ib_event_handler *, struct ib_event *);
651 struct list_head list;
652};
653
654#define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler) \
655 do { \
656 (_ptr)->device = _device; \
657 (_ptr)->handler = _handler; \
658 INIT_LIST_HEAD(&(_ptr)->list); \
659 } while (0)
660
661struct ib_global_route {
662 union ib_gid dgid;
663 u32 flow_label;
664 u8 sgid_index;
665 u8 hop_limit;
666 u8 traffic_class;
667};
668
669struct ib_grh {
670 __be32 version_tclass_flow;
671 __be16 paylen;
672 u8 next_hdr;
673 u8 hop_limit;
674 union ib_gid sgid;
675 union ib_gid dgid;
676};
677
678union rdma_network_hdr {
679 struct ib_grh ibgrh;
680 struct {
681
682
683
684 u8 reserved[20];
685 struct iphdr roce4grh;
686 };
687};
688
689#define IB_QPN_MASK 0xFFFFFF
690
691enum {
692 IB_MULTICAST_QPN = 0xffffff
693};
694
695#define IB_LID_PERMISSIVE cpu_to_be16(0xFFFF)
696#define IB_MULTICAST_LID_BASE cpu_to_be16(0xC000)
697
698enum ib_ah_flags {
699 IB_AH_GRH = 1
700};
701
702enum ib_rate {
703 IB_RATE_PORT_CURRENT = 0,
704 IB_RATE_2_5_GBPS = 2,
705 IB_RATE_5_GBPS = 5,
706 IB_RATE_10_GBPS = 3,
707 IB_RATE_20_GBPS = 6,
708 IB_RATE_30_GBPS = 4,
709 IB_RATE_40_GBPS = 7,
710 IB_RATE_60_GBPS = 8,
711 IB_RATE_80_GBPS = 9,
712 IB_RATE_120_GBPS = 10,
713 IB_RATE_14_GBPS = 11,
714 IB_RATE_56_GBPS = 12,
715 IB_RATE_112_GBPS = 13,
716 IB_RATE_168_GBPS = 14,
717 IB_RATE_25_GBPS = 15,
718 IB_RATE_100_GBPS = 16,
719 IB_RATE_200_GBPS = 17,
720 IB_RATE_300_GBPS = 18
721};
722
723
724
725
726
727
728
729__attribute_const__ int ib_rate_to_mult(enum ib_rate rate);
730
731
732
733
734
735
736__attribute_const__ int ib_rate_to_mbps(enum ib_rate rate);
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751enum ib_mr_type {
752 IB_MR_TYPE_MEM_REG,
753 IB_MR_TYPE_SIGNATURE,
754 IB_MR_TYPE_SG_GAPS,
755};
756
757
758
759
760
761
762enum ib_signature_type {
763 IB_SIG_TYPE_NONE,
764 IB_SIG_TYPE_T10_DIF,
765};
766
767
768
769
770
771
772enum ib_t10_dif_bg_type {
773 IB_T10DIF_CRC,
774 IB_T10DIF_CSUM
775};
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790struct ib_t10_dif_domain {
791 enum ib_t10_dif_bg_type bg_type;
792 u16 pi_interval;
793 u16 bg;
794 u16 app_tag;
795 u32 ref_tag;
796 bool ref_remap;
797 bool app_escape;
798 bool ref_escape;
799 u16 apptag_check_mask;
800};
801
802
803
804
805
806
807
808struct ib_sig_domain {
809 enum ib_signature_type sig_type;
810 union {
811 struct ib_t10_dif_domain dif;
812 } sig;
813};
814
815
816
817
818
819
820
821struct ib_sig_attrs {
822 u8 check_mask;
823 struct ib_sig_domain mem;
824 struct ib_sig_domain wire;
825};
826
827enum ib_sig_err_type {
828 IB_SIG_BAD_GUARD,
829 IB_SIG_BAD_REFTAG,
830 IB_SIG_BAD_APPTAG,
831};
832
833
834
835
836struct ib_sig_err {
837 enum ib_sig_err_type err_type;
838 u32 expected;
839 u32 actual;
840 u64 sig_err_offset;
841 u32 key;
842};
843
844enum ib_mr_status_check {
845 IB_MR_CHECK_SIG_STATUS = 1,
846};
847
848
849
850
851
852
853
854
855
856struct ib_mr_status {
857 u32 fail_status;
858 struct ib_sig_err sig_err;
859};
860
861
862
863
864
865
866__attribute_const__ enum ib_rate mult_to_ib_rate(int mult);
867
868enum rdma_ah_attr_type {
869 RDMA_AH_ATTR_TYPE_IB,
870 RDMA_AH_ATTR_TYPE_ROCE,
871 RDMA_AH_ATTR_TYPE_OPA,
872};
873
874struct ib_ah_attr {
875 u16 dlid;
876 u8 src_path_bits;
877};
878
879struct roce_ah_attr {
880 u8 dmac[ETH_ALEN];
881};
882
883struct opa_ah_attr {
884 u32 dlid;
885 u8 src_path_bits;
886 bool make_grd;
887};
888
889struct rdma_ah_attr {
890 struct ib_global_route grh;
891 u8 sl;
892 u8 static_rate;
893 u8 port_num;
894 u8 ah_flags;
895 enum rdma_ah_attr_type type;
896 union {
897 struct ib_ah_attr ib;
898 struct roce_ah_attr roce;
899 struct opa_ah_attr opa;
900 };
901};
902
903enum ib_wc_status {
904 IB_WC_SUCCESS,
905 IB_WC_LOC_LEN_ERR,
906 IB_WC_LOC_QP_OP_ERR,
907 IB_WC_LOC_EEC_OP_ERR,
908 IB_WC_LOC_PROT_ERR,
909 IB_WC_WR_FLUSH_ERR,
910 IB_WC_MW_BIND_ERR,
911 IB_WC_BAD_RESP_ERR,
912 IB_WC_LOC_ACCESS_ERR,
913 IB_WC_REM_INV_REQ_ERR,
914 IB_WC_REM_ACCESS_ERR,
915 IB_WC_REM_OP_ERR,
916 IB_WC_RETRY_EXC_ERR,
917 IB_WC_RNR_RETRY_EXC_ERR,
918 IB_WC_LOC_RDD_VIOL_ERR,
919 IB_WC_REM_INV_RD_REQ_ERR,
920 IB_WC_REM_ABORT_ERR,
921 IB_WC_INV_EECN_ERR,
922 IB_WC_INV_EEC_STATE_ERR,
923 IB_WC_FATAL_ERR,
924 IB_WC_RESP_TIMEOUT_ERR,
925 IB_WC_GENERAL_ERR
926};
927
928const char *__attribute_const__ ib_wc_status_msg(enum ib_wc_status status);
929
930enum ib_wc_opcode {
931 IB_WC_SEND,
932 IB_WC_RDMA_WRITE,
933 IB_WC_RDMA_READ,
934 IB_WC_COMP_SWAP,
935 IB_WC_FETCH_ADD,
936 IB_WC_LSO,
937 IB_WC_LOCAL_INV,
938 IB_WC_REG_MR,
939 IB_WC_MASKED_COMP_SWAP,
940 IB_WC_MASKED_FETCH_ADD,
941
942
943
944
945 IB_WC_RECV = 1 << 7,
946 IB_WC_RECV_RDMA_WITH_IMM
947};
948
949enum ib_wc_flags {
950 IB_WC_GRH = 1,
951 IB_WC_WITH_IMM = (1<<1),
952 IB_WC_WITH_INVALIDATE = (1<<2),
953 IB_WC_IP_CSUM_OK = (1<<3),
954 IB_WC_WITH_SMAC = (1<<4),
955 IB_WC_WITH_VLAN = (1<<5),
956 IB_WC_WITH_NETWORK_HDR_TYPE = (1<<6),
957};
958
959struct ib_wc {
960 union {
961 u64 wr_id;
962 struct ib_cqe *wr_cqe;
963 };
964 enum ib_wc_status status;
965 enum ib_wc_opcode opcode;
966 u32 vendor_err;
967 u32 byte_len;
968 struct ib_qp *qp;
969 union {
970 __be32 imm_data;
971 u32 invalidate_rkey;
972 } ex;
973 u32 src_qp;
974 int wc_flags;
975 u16 pkey_index;
976 u32 slid;
977 u8 sl;
978 u8 dlid_path_bits;
979 u8 port_num;
980 u8 smac[ETH_ALEN];
981 u16 vlan_id;
982 u8 network_hdr_type;
983};
984
985enum ib_cq_notify_flags {
986 IB_CQ_SOLICITED = 1 << 0,
987 IB_CQ_NEXT_COMP = 1 << 1,
988 IB_CQ_SOLICITED_MASK = IB_CQ_SOLICITED | IB_CQ_NEXT_COMP,
989 IB_CQ_REPORT_MISSED_EVENTS = 1 << 2,
990};
991
992enum ib_srq_type {
993 IB_SRQT_BASIC,
994 IB_SRQT_XRC,
995 IB_SRQT_TM,
996};
997
998static inline bool ib_srq_has_cq(enum ib_srq_type srq_type)
999{
1000 return srq_type == IB_SRQT_XRC ||
1001 srq_type == IB_SRQT_TM;
1002}
1003
1004enum ib_srq_attr_mask {
1005 IB_SRQ_MAX_WR = 1 << 0,
1006 IB_SRQ_LIMIT = 1 << 1,
1007};
1008
1009struct ib_srq_attr {
1010 u32 max_wr;
1011 u32 max_sge;
1012 u32 srq_limit;
1013};
1014
1015struct ib_srq_init_attr {
1016 void (*event_handler)(struct ib_event *, void *);
1017 void *srq_context;
1018 struct ib_srq_attr attr;
1019 enum ib_srq_type srq_type;
1020
1021 struct {
1022 struct ib_cq *cq;
1023 union {
1024 struct {
1025 struct ib_xrcd *xrcd;
1026 } xrc;
1027
1028 struct {
1029 u32 max_num_tags;
1030 } tag_matching;
1031 };
1032 } ext;
1033};
1034
1035struct ib_qp_cap {
1036 u32 max_send_wr;
1037 u32 max_recv_wr;
1038 u32 max_send_sge;
1039 u32 max_recv_sge;
1040 u32 max_inline_data;
1041
1042
1043
1044
1045
1046
1047 u32 max_rdma_ctxs;
1048};
1049
1050enum ib_sig_type {
1051 IB_SIGNAL_ALL_WR,
1052 IB_SIGNAL_REQ_WR
1053};
1054
1055enum ib_qp_type {
1056
1057
1058
1059
1060
1061 IB_QPT_SMI,
1062 IB_QPT_GSI,
1063
1064 IB_QPT_RC,
1065 IB_QPT_UC,
1066 IB_QPT_UD,
1067 IB_QPT_RAW_IPV6,
1068 IB_QPT_RAW_ETHERTYPE,
1069 IB_QPT_RAW_PACKET = 8,
1070 IB_QPT_XRC_INI = 9,
1071 IB_QPT_XRC_TGT,
1072 IB_QPT_MAX,
1073
1074
1075
1076
1077 IB_QPT_RESERVED1 = 0x1000,
1078 IB_QPT_RESERVED2,
1079 IB_QPT_RESERVED3,
1080 IB_QPT_RESERVED4,
1081 IB_QPT_RESERVED5,
1082 IB_QPT_RESERVED6,
1083 IB_QPT_RESERVED7,
1084 IB_QPT_RESERVED8,
1085 IB_QPT_RESERVED9,
1086 IB_QPT_RESERVED10,
1087};
1088
1089enum ib_qp_create_flags {
1090 IB_QP_CREATE_IPOIB_UD_LSO = 1 << 0,
1091 IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK = 1 << 1,
1092 IB_QP_CREATE_CROSS_CHANNEL = 1 << 2,
1093 IB_QP_CREATE_MANAGED_SEND = 1 << 3,
1094 IB_QP_CREATE_MANAGED_RECV = 1 << 4,
1095 IB_QP_CREATE_NETIF_QP = 1 << 5,
1096 IB_QP_CREATE_SIGNATURE_EN = 1 << 6,
1097
1098 IB_QP_CREATE_SCATTER_FCS = 1 << 8,
1099 IB_QP_CREATE_CVLAN_STRIPPING = 1 << 9,
1100 IB_QP_CREATE_SOURCE_QPN = 1 << 10,
1101
1102 IB_QP_CREATE_RESERVED_START = 1 << 26,
1103 IB_QP_CREATE_RESERVED_END = 1 << 31,
1104};
1105
1106
1107
1108
1109
1110
1111struct ib_qp_init_attr {
1112 void (*event_handler)(struct ib_event *, void *);
1113 void *qp_context;
1114 struct ib_cq *send_cq;
1115 struct ib_cq *recv_cq;
1116 struct ib_srq *srq;
1117 struct ib_xrcd *xrcd;
1118 struct ib_qp_cap cap;
1119 enum ib_sig_type sq_sig_type;
1120 enum ib_qp_type qp_type;
1121 enum ib_qp_create_flags create_flags;
1122
1123
1124
1125
1126 u8 port_num;
1127 struct ib_rwq_ind_table *rwq_ind_tbl;
1128 u32 source_qpn;
1129};
1130
1131struct ib_qp_open_attr {
1132 void (*event_handler)(struct ib_event *, void *);
1133 void *qp_context;
1134 u32 qp_num;
1135 enum ib_qp_type qp_type;
1136};
1137
1138enum ib_rnr_timeout {
1139 IB_RNR_TIMER_655_36 = 0,
1140 IB_RNR_TIMER_000_01 = 1,
1141 IB_RNR_TIMER_000_02 = 2,
1142 IB_RNR_TIMER_000_03 = 3,
1143 IB_RNR_TIMER_000_04 = 4,
1144 IB_RNR_TIMER_000_06 = 5,
1145 IB_RNR_TIMER_000_08 = 6,
1146 IB_RNR_TIMER_000_12 = 7,
1147 IB_RNR_TIMER_000_16 = 8,
1148 IB_RNR_TIMER_000_24 = 9,
1149 IB_RNR_TIMER_000_32 = 10,
1150 IB_RNR_TIMER_000_48 = 11,
1151 IB_RNR_TIMER_000_64 = 12,
1152 IB_RNR_TIMER_000_96 = 13,
1153 IB_RNR_TIMER_001_28 = 14,
1154 IB_RNR_TIMER_001_92 = 15,
1155 IB_RNR_TIMER_002_56 = 16,
1156 IB_RNR_TIMER_003_84 = 17,
1157 IB_RNR_TIMER_005_12 = 18,
1158 IB_RNR_TIMER_007_68 = 19,
1159 IB_RNR_TIMER_010_24 = 20,
1160 IB_RNR_TIMER_015_36 = 21,
1161 IB_RNR_TIMER_020_48 = 22,
1162 IB_RNR_TIMER_030_72 = 23,
1163 IB_RNR_TIMER_040_96 = 24,
1164 IB_RNR_TIMER_061_44 = 25,
1165 IB_RNR_TIMER_081_92 = 26,
1166 IB_RNR_TIMER_122_88 = 27,
1167 IB_RNR_TIMER_163_84 = 28,
1168 IB_RNR_TIMER_245_76 = 29,
1169 IB_RNR_TIMER_327_68 = 30,
1170 IB_RNR_TIMER_491_52 = 31
1171};
1172
1173enum ib_qp_attr_mask {
1174 IB_QP_STATE = 1,
1175 IB_QP_CUR_STATE = (1<<1),
1176 IB_QP_EN_SQD_ASYNC_NOTIFY = (1<<2),
1177 IB_QP_ACCESS_FLAGS = (1<<3),
1178 IB_QP_PKEY_INDEX = (1<<4),
1179 IB_QP_PORT = (1<<5),
1180 IB_QP_QKEY = (1<<6),
1181 IB_QP_AV = (1<<7),
1182 IB_QP_PATH_MTU = (1<<8),
1183 IB_QP_TIMEOUT = (1<<9),
1184 IB_QP_RETRY_CNT = (1<<10),
1185 IB_QP_RNR_RETRY = (1<<11),
1186 IB_QP_RQ_PSN = (1<<12),
1187 IB_QP_MAX_QP_RD_ATOMIC = (1<<13),
1188 IB_QP_ALT_PATH = (1<<14),
1189 IB_QP_MIN_RNR_TIMER = (1<<15),
1190 IB_QP_SQ_PSN = (1<<16),
1191 IB_QP_MAX_DEST_RD_ATOMIC = (1<<17),
1192 IB_QP_PATH_MIG_STATE = (1<<18),
1193 IB_QP_CAP = (1<<19),
1194 IB_QP_DEST_QPN = (1<<20),
1195 IB_QP_RESERVED1 = (1<<21),
1196 IB_QP_RESERVED2 = (1<<22),
1197 IB_QP_RESERVED3 = (1<<23),
1198 IB_QP_RESERVED4 = (1<<24),
1199 IB_QP_RATE_LIMIT = (1<<25),
1200};
1201
1202enum ib_qp_state {
1203 IB_QPS_RESET,
1204 IB_QPS_INIT,
1205 IB_QPS_RTR,
1206 IB_QPS_RTS,
1207 IB_QPS_SQD,
1208 IB_QPS_SQE,
1209 IB_QPS_ERR
1210};
1211
1212enum ib_mig_state {
1213 IB_MIG_MIGRATED,
1214 IB_MIG_REARM,
1215 IB_MIG_ARMED
1216};
1217
1218enum ib_mw_type {
1219 IB_MW_TYPE_1 = 1,
1220 IB_MW_TYPE_2 = 2
1221};
1222
1223struct ib_qp_attr {
1224 enum ib_qp_state qp_state;
1225 enum ib_qp_state cur_qp_state;
1226 enum ib_mtu path_mtu;
1227 enum ib_mig_state path_mig_state;
1228 u32 qkey;
1229 u32 rq_psn;
1230 u32 sq_psn;
1231 u32 dest_qp_num;
1232 int qp_access_flags;
1233 struct ib_qp_cap cap;
1234 struct rdma_ah_attr ah_attr;
1235 struct rdma_ah_attr alt_ah_attr;
1236 u16 pkey_index;
1237 u16 alt_pkey_index;
1238 u8 en_sqd_async_notify;
1239 u8 sq_draining;
1240 u8 max_rd_atomic;
1241 u8 max_dest_rd_atomic;
1242 u8 min_rnr_timer;
1243 u8 port_num;
1244 u8 timeout;
1245 u8 retry_cnt;
1246 u8 rnr_retry;
1247 u8 alt_port_num;
1248 u8 alt_timeout;
1249 u32 rate_limit;
1250};
1251
1252enum ib_wr_opcode {
1253 IB_WR_RDMA_WRITE,
1254 IB_WR_RDMA_WRITE_WITH_IMM,
1255 IB_WR_SEND,
1256 IB_WR_SEND_WITH_IMM,
1257 IB_WR_RDMA_READ,
1258 IB_WR_ATOMIC_CMP_AND_SWP,
1259 IB_WR_ATOMIC_FETCH_AND_ADD,
1260 IB_WR_LSO,
1261 IB_WR_SEND_WITH_INV,
1262 IB_WR_RDMA_READ_WITH_INV,
1263 IB_WR_LOCAL_INV,
1264 IB_WR_REG_MR,
1265 IB_WR_MASKED_ATOMIC_CMP_AND_SWP,
1266 IB_WR_MASKED_ATOMIC_FETCH_AND_ADD,
1267 IB_WR_REG_SIG_MR,
1268
1269
1270
1271 IB_WR_RESERVED1 = 0xf0,
1272 IB_WR_RESERVED2,
1273 IB_WR_RESERVED3,
1274 IB_WR_RESERVED4,
1275 IB_WR_RESERVED5,
1276 IB_WR_RESERVED6,
1277 IB_WR_RESERVED7,
1278 IB_WR_RESERVED8,
1279 IB_WR_RESERVED9,
1280 IB_WR_RESERVED10,
1281};
1282
1283enum ib_send_flags {
1284 IB_SEND_FENCE = 1,
1285 IB_SEND_SIGNALED = (1<<1),
1286 IB_SEND_SOLICITED = (1<<2),
1287 IB_SEND_INLINE = (1<<3),
1288 IB_SEND_IP_CSUM = (1<<4),
1289
1290
1291 IB_SEND_RESERVED_START = (1 << 26),
1292 IB_SEND_RESERVED_END = (1 << 31),
1293};
1294
1295struct ib_sge {
1296 u64 addr;
1297 u32 length;
1298 u32 lkey;
1299};
1300
1301struct ib_cqe {
1302 void (*done)(struct ib_cq *cq, struct ib_wc *wc);
1303};
1304
1305struct ib_send_wr {
1306 struct ib_send_wr *next;
1307 union {
1308 u64 wr_id;
1309 struct ib_cqe *wr_cqe;
1310 };
1311 struct ib_sge *sg_list;
1312 int num_sge;
1313 enum ib_wr_opcode opcode;
1314 int send_flags;
1315 union {
1316 __be32 imm_data;
1317 u32 invalidate_rkey;
1318 } ex;
1319};
1320
1321struct ib_rdma_wr {
1322 struct ib_send_wr wr;
1323 u64 remote_addr;
1324 u32 rkey;
1325};
1326
1327static inline struct ib_rdma_wr *rdma_wr(struct ib_send_wr *wr)
1328{
1329 return container_of(wr, struct ib_rdma_wr, wr);
1330}
1331
1332struct ib_atomic_wr {
1333 struct ib_send_wr wr;
1334 u64 remote_addr;
1335 u64 compare_add;
1336 u64 swap;
1337 u64 compare_add_mask;
1338 u64 swap_mask;
1339 u32 rkey;
1340};
1341
1342static inline struct ib_atomic_wr *atomic_wr(struct ib_send_wr *wr)
1343{
1344 return container_of(wr, struct ib_atomic_wr, wr);
1345}
1346
1347struct ib_ud_wr {
1348 struct ib_send_wr wr;
1349 struct ib_ah *ah;
1350 void *header;
1351 int hlen;
1352 int mss;
1353 u32 remote_qpn;
1354 u32 remote_qkey;
1355 u16 pkey_index;
1356 u8 port_num;
1357};
1358
1359static inline struct ib_ud_wr *ud_wr(struct ib_send_wr *wr)
1360{
1361 return container_of(wr, struct ib_ud_wr, wr);
1362}
1363
1364struct ib_reg_wr {
1365 struct ib_send_wr wr;
1366 struct ib_mr *mr;
1367 u32 key;
1368 int access;
1369};
1370
1371static inline struct ib_reg_wr *reg_wr(struct ib_send_wr *wr)
1372{
1373 return container_of(wr, struct ib_reg_wr, wr);
1374}
1375
1376struct ib_sig_handover_wr {
1377 struct ib_send_wr wr;
1378 struct ib_sig_attrs *sig_attrs;
1379 struct ib_mr *sig_mr;
1380 int access_flags;
1381 struct ib_sge *prot;
1382};
1383
1384static inline struct ib_sig_handover_wr *sig_handover_wr(struct ib_send_wr *wr)
1385{
1386 return container_of(wr, struct ib_sig_handover_wr, wr);
1387}
1388
1389struct ib_recv_wr {
1390 struct ib_recv_wr *next;
1391 union {
1392 u64 wr_id;
1393 struct ib_cqe *wr_cqe;
1394 };
1395 struct ib_sge *sg_list;
1396 int num_sge;
1397};
1398
1399enum ib_access_flags {
1400 IB_ACCESS_LOCAL_WRITE = 1,
1401 IB_ACCESS_REMOTE_WRITE = (1<<1),
1402 IB_ACCESS_REMOTE_READ = (1<<2),
1403 IB_ACCESS_REMOTE_ATOMIC = (1<<3),
1404 IB_ACCESS_MW_BIND = (1<<4),
1405 IB_ZERO_BASED = (1<<5),
1406 IB_ACCESS_ON_DEMAND = (1<<6),
1407 IB_ACCESS_HUGETLB = (1<<7),
1408};
1409
1410
1411
1412
1413
1414enum ib_mr_rereg_flags {
1415 IB_MR_REREG_TRANS = 1,
1416 IB_MR_REREG_PD = (1<<1),
1417 IB_MR_REREG_ACCESS = (1<<2),
1418 IB_MR_REREG_SUPPORTED = ((IB_MR_REREG_ACCESS << 1) - 1)
1419};
1420
1421struct ib_fmr_attr {
1422 int max_pages;
1423 int max_maps;
1424 u8 page_shift;
1425};
1426
1427struct ib_umem;
1428
1429enum rdma_remove_reason {
1430
1431 RDMA_REMOVE_DESTROY,
1432
1433 RDMA_REMOVE_CLOSE,
1434
1435 RDMA_REMOVE_DRIVER_REMOVE,
1436
1437 RDMA_REMOVE_DURING_CLEANUP,
1438};
1439
1440struct ib_rdmacg_object {
1441#ifdef CONFIG_CGROUP_RDMA
1442 struct rdma_cgroup *cg;
1443#endif
1444};
1445
1446struct ib_ucontext {
1447 struct ib_device *device;
1448 struct ib_uverbs_file *ufile;
1449 int closing;
1450
1451
1452 struct mutex uobjects_lock;
1453 struct list_head uobjects;
1454
1455 struct rw_semaphore cleanup_rwsem;
1456 enum rdma_remove_reason cleanup_reason;
1457
1458 struct pid *tgid;
1459#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1460 struct rb_root_cached umem_tree;
1461
1462
1463
1464
1465 struct rw_semaphore umem_rwsem;
1466 void (*invalidate_range)(struct ib_umem *umem,
1467 unsigned long start, unsigned long end);
1468
1469 struct mmu_notifier mn;
1470 atomic_t notifier_count;
1471
1472 struct list_head no_private_counters;
1473 int odp_mrs_count;
1474#endif
1475
1476 struct ib_rdmacg_object cg_obj;
1477};
1478
1479struct ib_uobject {
1480 u64 user_handle;
1481 struct ib_ucontext *context;
1482 void *object;
1483 struct list_head list;
1484 struct ib_rdmacg_object cg_obj;
1485 int id;
1486 struct kref ref;
1487 atomic_t usecnt;
1488 struct rcu_head rcu;
1489
1490 const struct uverbs_obj_type *type;
1491};
1492
1493struct ib_uobject_file {
1494 struct ib_uobject uobj;
1495
1496 struct ib_uverbs_file *ufile;
1497};
1498
1499struct ib_udata {
1500 const void __user *inbuf;
1501 void __user *outbuf;
1502 size_t inlen;
1503 size_t outlen;
1504};
1505
1506struct ib_pd {
1507 u32 local_dma_lkey;
1508 u32 flags;
1509 struct ib_device *device;
1510 struct ib_uobject *uobject;
1511 atomic_t usecnt;
1512
1513 u32 unsafe_global_rkey;
1514
1515
1516
1517
1518 struct ib_mr *__internal_mr;
1519};
1520
1521struct ib_xrcd {
1522 struct ib_device *device;
1523 atomic_t usecnt;
1524 struct inode *inode;
1525
1526 struct mutex tgt_qp_mutex;
1527 struct list_head tgt_qp_list;
1528};
1529
1530struct ib_ah {
1531 struct ib_device *device;
1532 struct ib_pd *pd;
1533 struct ib_uobject *uobject;
1534 enum rdma_ah_attr_type type;
1535};
1536
1537typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context);
1538
1539enum ib_poll_context {
1540 IB_POLL_DIRECT,
1541 IB_POLL_SOFTIRQ,
1542 IB_POLL_WORKQUEUE,
1543};
1544
1545struct ib_cq {
1546 struct ib_device *device;
1547 struct ib_uobject *uobject;
1548 ib_comp_handler comp_handler;
1549 void (*event_handler)(struct ib_event *, void *);
1550 void *cq_context;
1551 int cqe;
1552 atomic_t usecnt;
1553 enum ib_poll_context poll_ctx;
1554 struct ib_wc *wc;
1555 union {
1556 struct irq_poll iop;
1557 struct work_struct work;
1558 };
1559};
1560
1561struct ib_srq {
1562 struct ib_device *device;
1563 struct ib_pd *pd;
1564 struct ib_uobject *uobject;
1565 void (*event_handler)(struct ib_event *, void *);
1566 void *srq_context;
1567 enum ib_srq_type srq_type;
1568 atomic_t usecnt;
1569
1570 struct {
1571 struct ib_cq *cq;
1572 union {
1573 struct {
1574 struct ib_xrcd *xrcd;
1575 u32 srq_num;
1576 } xrc;
1577 };
1578 } ext;
1579};
1580
1581enum ib_raw_packet_caps {
1582
1583
1584
1585 IB_RAW_PACKET_CAP_CVLAN_STRIPPING = (1 << 0),
1586
1587
1588 IB_RAW_PACKET_CAP_SCATTER_FCS = (1 << 1),
1589
1590 IB_RAW_PACKET_CAP_IP_CSUM = (1 << 2),
1591
1592
1593
1594 IB_RAW_PACKET_CAP_DELAY_DROP = (1 << 3),
1595};
1596
1597enum ib_wq_type {
1598 IB_WQT_RQ
1599};
1600
1601enum ib_wq_state {
1602 IB_WQS_RESET,
1603 IB_WQS_RDY,
1604 IB_WQS_ERR
1605};
1606
1607struct ib_wq {
1608 struct ib_device *device;
1609 struct ib_uobject *uobject;
1610 void *wq_context;
1611 void (*event_handler)(struct ib_event *, void *);
1612 struct ib_pd *pd;
1613 struct ib_cq *cq;
1614 u32 wq_num;
1615 enum ib_wq_state state;
1616 enum ib_wq_type wq_type;
1617 atomic_t usecnt;
1618};
1619
1620enum ib_wq_flags {
1621 IB_WQ_FLAGS_CVLAN_STRIPPING = 1 << 0,
1622 IB_WQ_FLAGS_SCATTER_FCS = 1 << 1,
1623 IB_WQ_FLAGS_DELAY_DROP = 1 << 2,
1624};
1625
1626struct ib_wq_init_attr {
1627 void *wq_context;
1628 enum ib_wq_type wq_type;
1629 u32 max_wr;
1630 u32 max_sge;
1631 struct ib_cq *cq;
1632 void (*event_handler)(struct ib_event *, void *);
1633 u32 create_flags;
1634};
1635
1636enum ib_wq_attr_mask {
1637 IB_WQ_STATE = 1 << 0,
1638 IB_WQ_CUR_STATE = 1 << 1,
1639 IB_WQ_FLAGS = 1 << 2,
1640};
1641
1642struct ib_wq_attr {
1643 enum ib_wq_state wq_state;
1644 enum ib_wq_state curr_wq_state;
1645 u32 flags;
1646 u32 flags_mask;
1647};
1648
1649struct ib_rwq_ind_table {
1650 struct ib_device *device;
1651 struct ib_uobject *uobject;
1652 atomic_t usecnt;
1653 u32 ind_tbl_num;
1654 u32 log_ind_tbl_size;
1655 struct ib_wq **ind_tbl;
1656};
1657
1658struct ib_rwq_ind_table_init_attr {
1659 u32 log_ind_tbl_size;
1660
1661 struct ib_wq **ind_tbl;
1662};
1663
1664enum port_pkey_state {
1665 IB_PORT_PKEY_NOT_VALID = 0,
1666 IB_PORT_PKEY_VALID = 1,
1667 IB_PORT_PKEY_LISTED = 2,
1668};
1669
1670struct ib_qp_security;
1671
1672struct ib_port_pkey {
1673 enum port_pkey_state state;
1674 u16 pkey_index;
1675 u8 port_num;
1676 struct list_head qp_list;
1677 struct list_head to_error_list;
1678 struct ib_qp_security *sec;
1679};
1680
1681struct ib_ports_pkeys {
1682 struct ib_port_pkey main;
1683 struct ib_port_pkey alt;
1684};
1685
1686struct ib_qp_security {
1687 struct ib_qp *qp;
1688 struct ib_device *dev;
1689
1690 struct mutex mutex;
1691 struct ib_ports_pkeys *ports_pkeys;
1692
1693
1694
1695 struct list_head shared_qp_list;
1696 void *security;
1697 bool destroying;
1698 atomic_t error_list_count;
1699 struct completion error_complete;
1700 int error_comps_pending;
1701};
1702
1703
1704
1705
1706
1707struct ib_qp {
1708 struct ib_device *device;
1709 struct ib_pd *pd;
1710 struct ib_cq *send_cq;
1711 struct ib_cq *recv_cq;
1712 spinlock_t mr_lock;
1713 int mrs_used;
1714 struct list_head rdma_mrs;
1715 struct list_head sig_mrs;
1716 struct ib_srq *srq;
1717 struct ib_xrcd *xrcd;
1718 struct list_head xrcd_list;
1719
1720
1721 atomic_t usecnt;
1722 struct list_head open_list;
1723 struct ib_qp *real_qp;
1724 struct ib_uobject *uobject;
1725 void (*event_handler)(struct ib_event *, void *);
1726 void *qp_context;
1727 u32 qp_num;
1728 u32 max_write_sge;
1729 u32 max_read_sge;
1730 enum ib_qp_type qp_type;
1731 struct ib_rwq_ind_table *rwq_ind_tbl;
1732 struct ib_qp_security *qp_sec;
1733 u8 port;
1734};
1735
1736struct ib_mr {
1737 struct ib_device *device;
1738 struct ib_pd *pd;
1739 u32 lkey;
1740 u32 rkey;
1741 u64 iova;
1742 u64 length;
1743 unsigned int page_size;
1744 bool need_inval;
1745 union {
1746 struct ib_uobject *uobject;
1747 struct list_head qp_entry;
1748 };
1749};
1750
1751struct ib_mw {
1752 struct ib_device *device;
1753 struct ib_pd *pd;
1754 struct ib_uobject *uobject;
1755 u32 rkey;
1756 enum ib_mw_type type;
1757};
1758
1759struct ib_fmr {
1760 struct ib_device *device;
1761 struct ib_pd *pd;
1762 struct list_head list;
1763 u32 lkey;
1764 u32 rkey;
1765};
1766
1767
1768enum ib_flow_attr_type {
1769
1770 IB_FLOW_ATTR_NORMAL = 0x0,
1771
1772
1773
1774 IB_FLOW_ATTR_ALL_DEFAULT = 0x1,
1775
1776
1777
1778 IB_FLOW_ATTR_MC_DEFAULT = 0x2,
1779
1780 IB_FLOW_ATTR_SNIFFER = 0x3
1781};
1782
1783
1784enum ib_flow_spec_type {
1785
1786 IB_FLOW_SPEC_ETH = 0x20,
1787 IB_FLOW_SPEC_IB = 0x22,
1788
1789 IB_FLOW_SPEC_IPV4 = 0x30,
1790 IB_FLOW_SPEC_IPV6 = 0x31,
1791
1792 IB_FLOW_SPEC_TCP = 0x40,
1793 IB_FLOW_SPEC_UDP = 0x41,
1794 IB_FLOW_SPEC_VXLAN_TUNNEL = 0x50,
1795 IB_FLOW_SPEC_INNER = 0x100,
1796
1797 IB_FLOW_SPEC_ACTION_TAG = 0x1000,
1798 IB_FLOW_SPEC_ACTION_DROP = 0x1001,
1799};
1800#define IB_FLOW_SPEC_LAYER_MASK 0xF0
1801#define IB_FLOW_SPEC_SUPPORT_LAYERS 8
1802
1803
1804
1805
1806enum ib_flow_domain {
1807 IB_FLOW_DOMAIN_USER,
1808 IB_FLOW_DOMAIN_ETHTOOL,
1809 IB_FLOW_DOMAIN_RFS,
1810 IB_FLOW_DOMAIN_NIC,
1811 IB_FLOW_DOMAIN_NUM
1812};
1813
1814enum ib_flow_flags {
1815 IB_FLOW_ATTR_FLAGS_DONT_TRAP = 1UL << 1,
1816 IB_FLOW_ATTR_FLAGS_RESERVED = 1UL << 2
1817};
1818
1819struct ib_flow_eth_filter {
1820 u8 dst_mac[6];
1821 u8 src_mac[6];
1822 __be16 ether_type;
1823 __be16 vlan_tag;
1824
1825 u8 real_sz[0];
1826};
1827
1828struct ib_flow_spec_eth {
1829 u32 type;
1830 u16 size;
1831 struct ib_flow_eth_filter val;
1832 struct ib_flow_eth_filter mask;
1833};
1834
1835struct ib_flow_ib_filter {
1836 __be16 dlid;
1837 __u8 sl;
1838
1839 u8 real_sz[0];
1840};
1841
1842struct ib_flow_spec_ib {
1843 u32 type;
1844 u16 size;
1845 struct ib_flow_ib_filter val;
1846 struct ib_flow_ib_filter mask;
1847};
1848
1849
1850enum ib_ipv4_flags {
1851 IB_IPV4_DONT_FRAG = 0x2,
1852 IB_IPV4_MORE_FRAG = 0X4
1853
1854};
1855
1856struct ib_flow_ipv4_filter {
1857 __be32 src_ip;
1858 __be32 dst_ip;
1859 u8 proto;
1860 u8 tos;
1861 u8 ttl;
1862 u8 flags;
1863
1864 u8 real_sz[0];
1865};
1866
1867struct ib_flow_spec_ipv4 {
1868 u32 type;
1869 u16 size;
1870 struct ib_flow_ipv4_filter val;
1871 struct ib_flow_ipv4_filter mask;
1872};
1873
1874struct ib_flow_ipv6_filter {
1875 u8 src_ip[16];
1876 u8 dst_ip[16];
1877 __be32 flow_label;
1878 u8 next_hdr;
1879 u8 traffic_class;
1880 u8 hop_limit;
1881
1882 u8 real_sz[0];
1883};
1884
1885struct ib_flow_spec_ipv6 {
1886 u32 type;
1887 u16 size;
1888 struct ib_flow_ipv6_filter val;
1889 struct ib_flow_ipv6_filter mask;
1890};
1891
1892struct ib_flow_tcp_udp_filter {
1893 __be16 dst_port;
1894 __be16 src_port;
1895
1896 u8 real_sz[0];
1897};
1898
1899struct ib_flow_spec_tcp_udp {
1900 u32 type;
1901 u16 size;
1902 struct ib_flow_tcp_udp_filter val;
1903 struct ib_flow_tcp_udp_filter mask;
1904};
1905
1906struct ib_flow_tunnel_filter {
1907 __be32 tunnel_id;
1908 u8 real_sz[0];
1909};
1910
1911
1912
1913
1914struct ib_flow_spec_tunnel {
1915 u32 type;
1916 u16 size;
1917 struct ib_flow_tunnel_filter val;
1918 struct ib_flow_tunnel_filter mask;
1919};
1920
1921struct ib_flow_spec_action_tag {
1922 enum ib_flow_spec_type type;
1923 u16 size;
1924 u32 tag_id;
1925};
1926
1927struct ib_flow_spec_action_drop {
1928 enum ib_flow_spec_type type;
1929 u16 size;
1930};
1931
1932union ib_flow_spec {
1933 struct {
1934 u32 type;
1935 u16 size;
1936 };
1937 struct ib_flow_spec_eth eth;
1938 struct ib_flow_spec_ib ib;
1939 struct ib_flow_spec_ipv4 ipv4;
1940 struct ib_flow_spec_tcp_udp tcp_udp;
1941 struct ib_flow_spec_ipv6 ipv6;
1942 struct ib_flow_spec_tunnel tunnel;
1943 struct ib_flow_spec_action_tag flow_tag;
1944 struct ib_flow_spec_action_drop drop;
1945};
1946
1947struct ib_flow_attr {
1948 enum ib_flow_attr_type type;
1949 u16 size;
1950 u16 priority;
1951 u32 flags;
1952 u8 num_of_specs;
1953 u8 port;
1954
1955
1956
1957
1958};
1959
1960struct ib_flow {
1961 struct ib_qp *qp;
1962 struct ib_uobject *uobject;
1963};
1964
1965struct ib_mad_hdr;
1966struct ib_grh;
1967
1968enum ib_process_mad_flags {
1969 IB_MAD_IGNORE_MKEY = 1,
1970 IB_MAD_IGNORE_BKEY = 2,
1971 IB_MAD_IGNORE_ALL = IB_MAD_IGNORE_MKEY | IB_MAD_IGNORE_BKEY
1972};
1973
1974enum ib_mad_result {
1975 IB_MAD_RESULT_FAILURE = 0,
1976 IB_MAD_RESULT_SUCCESS = 1 << 0,
1977 IB_MAD_RESULT_REPLY = 1 << 1,
1978 IB_MAD_RESULT_CONSUMED = 1 << 2
1979};
1980
1981struct ib_port_cache {
1982 u64 subnet_prefix;
1983 struct ib_pkey_cache *pkey;
1984 struct ib_gid_table *gid;
1985 u8 lmc;
1986 enum ib_port_state port_state;
1987};
1988
1989struct ib_cache {
1990 rwlock_t lock;
1991 struct ib_event_handler event_handler;
1992 struct ib_port_cache *ports;
1993};
1994
1995struct iw_cm_verbs;
1996
1997struct ib_port_immutable {
1998 int pkey_tbl_len;
1999 int gid_tbl_len;
2000 u32 core_cap_flags;
2001 u32 max_mad_size;
2002};
2003
2004
2005enum rdma_netdev_t {
2006 RDMA_NETDEV_OPA_VNIC,
2007 RDMA_NETDEV_IPOIB,
2008};
2009
2010
2011
2012
2013
2014struct rdma_netdev {
2015 void *clnt_priv;
2016 struct ib_device *hca;
2017 u8 port_num;
2018
2019
2020 void (*free_rdma_netdev)(struct net_device *netdev);
2021
2022
2023 void (*set_id)(struct net_device *netdev, int id);
2024
2025 int (*send)(struct net_device *dev, struct sk_buff *skb,
2026 struct ib_ah *address, u32 dqpn);
2027
2028 int (*attach_mcast)(struct net_device *dev, struct ib_device *hca,
2029 union ib_gid *gid, u16 mlid,
2030 int set_qkey, u32 qkey);
2031 int (*detach_mcast)(struct net_device *dev, struct ib_device *hca,
2032 union ib_gid *gid, u16 mlid);
2033};
2034
2035struct ib_port_pkey_list {
2036
2037 spinlock_t list_lock;
2038 struct list_head pkey_list;
2039};
2040
2041struct ib_device {
2042
2043 struct device *dma_device;
2044
2045 char name[IB_DEVICE_NAME_MAX];
2046
2047 struct list_head event_handler_list;
2048 spinlock_t event_handler_lock;
2049
2050 spinlock_t client_data_lock;
2051 struct list_head core_list;
2052
2053
2054 struct list_head client_data_list;
2055
2056 struct ib_cache cache;
2057
2058
2059
2060 struct ib_port_immutable *port_immutable;
2061
2062 int num_comp_vectors;
2063
2064 struct ib_port_pkey_list *port_pkey_list;
2065
2066 struct iw_cm_verbs *iwcm;
2067
2068
2069
2070
2071
2072
2073
2074 struct rdma_hw_stats *(*alloc_hw_stats)(struct ib_device *device,
2075 u8 port_num);
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088 int (*get_hw_stats)(struct ib_device *device,
2089 struct rdma_hw_stats *stats,
2090 u8 port, int index);
2091 int (*query_device)(struct ib_device *device,
2092 struct ib_device_attr *device_attr,
2093 struct ib_udata *udata);
2094 int (*query_port)(struct ib_device *device,
2095 u8 port_num,
2096 struct ib_port_attr *port_attr);
2097 enum rdma_link_layer (*get_link_layer)(struct ib_device *device,
2098 u8 port_num);
2099
2100
2101
2102
2103
2104
2105
2106 struct net_device *(*get_netdev)(struct ib_device *device,
2107 u8 port_num);
2108 int (*query_gid)(struct ib_device *device,
2109 u8 port_num, int index,
2110 union ib_gid *gid);
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124 int (*add_gid)(struct ib_device *device,
2125 u8 port_num,
2126 unsigned int index,
2127 const union ib_gid *gid,
2128 const struct ib_gid_attr *attr,
2129 void **context);
2130
2131
2132
2133
2134
2135
2136 int (*del_gid)(struct ib_device *device,
2137 u8 port_num,
2138 unsigned int index,
2139 void **context);
2140 int (*query_pkey)(struct ib_device *device,
2141 u8 port_num, u16 index, u16 *pkey);
2142 int (*modify_device)(struct ib_device *device,
2143 int device_modify_mask,
2144 struct ib_device_modify *device_modify);
2145 int (*modify_port)(struct ib_device *device,
2146 u8 port_num, int port_modify_mask,
2147 struct ib_port_modify *port_modify);
2148 struct ib_ucontext * (*alloc_ucontext)(struct ib_device *device,
2149 struct ib_udata *udata);
2150 int (*dealloc_ucontext)(struct ib_ucontext *context);
2151 int (*mmap)(struct ib_ucontext *context,
2152 struct vm_area_struct *vma);
2153 struct ib_pd * (*alloc_pd)(struct ib_device *device,
2154 struct ib_ucontext *context,
2155 struct ib_udata *udata);
2156 int (*dealloc_pd)(struct ib_pd *pd);
2157 struct ib_ah * (*create_ah)(struct ib_pd *pd,
2158 struct rdma_ah_attr *ah_attr,
2159 struct ib_udata *udata);
2160 int (*modify_ah)(struct ib_ah *ah,
2161 struct rdma_ah_attr *ah_attr);
2162 int (*query_ah)(struct ib_ah *ah,
2163 struct rdma_ah_attr *ah_attr);
2164 int (*destroy_ah)(struct ib_ah *ah);
2165 struct ib_srq * (*create_srq)(struct ib_pd *pd,
2166 struct ib_srq_init_attr *srq_init_attr,
2167 struct ib_udata *udata);
2168 int (*modify_srq)(struct ib_srq *srq,
2169 struct ib_srq_attr *srq_attr,
2170 enum ib_srq_attr_mask srq_attr_mask,
2171 struct ib_udata *udata);
2172 int (*query_srq)(struct ib_srq *srq,
2173 struct ib_srq_attr *srq_attr);
2174 int (*destroy_srq)(struct ib_srq *srq);
2175 int (*post_srq_recv)(struct ib_srq *srq,
2176 struct ib_recv_wr *recv_wr,
2177 struct ib_recv_wr **bad_recv_wr);
2178 struct ib_qp * (*create_qp)(struct ib_pd *pd,
2179 struct ib_qp_init_attr *qp_init_attr,
2180 struct ib_udata *udata);
2181 int (*modify_qp)(struct ib_qp *qp,
2182 struct ib_qp_attr *qp_attr,
2183 int qp_attr_mask,
2184 struct ib_udata *udata);
2185 int (*query_qp)(struct ib_qp *qp,
2186 struct ib_qp_attr *qp_attr,
2187 int qp_attr_mask,
2188 struct ib_qp_init_attr *qp_init_attr);
2189 int (*destroy_qp)(struct ib_qp *qp);
2190 int (*post_send)(struct ib_qp *qp,
2191 struct ib_send_wr *send_wr,
2192 struct ib_send_wr **bad_send_wr);
2193 int (*post_recv)(struct ib_qp *qp,
2194 struct ib_recv_wr *recv_wr,
2195 struct ib_recv_wr **bad_recv_wr);
2196 struct ib_cq * (*create_cq)(struct ib_device *device,
2197 const struct ib_cq_init_attr *attr,
2198 struct ib_ucontext *context,
2199 struct ib_udata *udata);
2200 int (*modify_cq)(struct ib_cq *cq, u16 cq_count,
2201 u16 cq_period);
2202 int (*destroy_cq)(struct ib_cq *cq);
2203 int (*resize_cq)(struct ib_cq *cq, int cqe,
2204 struct ib_udata *udata);
2205 int (*poll_cq)(struct ib_cq *cq, int num_entries,
2206 struct ib_wc *wc);
2207 int (*peek_cq)(struct ib_cq *cq, int wc_cnt);
2208 int (*req_notify_cq)(struct ib_cq *cq,
2209 enum ib_cq_notify_flags flags);
2210 int (*req_ncomp_notif)(struct ib_cq *cq,
2211 int wc_cnt);
2212 struct ib_mr * (*get_dma_mr)(struct ib_pd *pd,
2213 int mr_access_flags);
2214 struct ib_mr * (*reg_user_mr)(struct ib_pd *pd,
2215 u64 start, u64 length,
2216 u64 virt_addr,
2217 int mr_access_flags,
2218 struct ib_udata *udata);
2219 int (*rereg_user_mr)(struct ib_mr *mr,
2220 int flags,
2221 u64 start, u64 length,
2222 u64 virt_addr,
2223 int mr_access_flags,
2224 struct ib_pd *pd,
2225 struct ib_udata *udata);
2226 int (*dereg_mr)(struct ib_mr *mr);
2227 struct ib_mr * (*alloc_mr)(struct ib_pd *pd,
2228 enum ib_mr_type mr_type,
2229 u32 max_num_sg);
2230 int (*map_mr_sg)(struct ib_mr *mr,
2231 struct scatterlist *sg,
2232 int sg_nents,
2233 unsigned int *sg_offset);
2234 struct ib_mw * (*alloc_mw)(struct ib_pd *pd,
2235 enum ib_mw_type type,
2236 struct ib_udata *udata);
2237 int (*dealloc_mw)(struct ib_mw *mw);
2238 struct ib_fmr * (*alloc_fmr)(struct ib_pd *pd,
2239 int mr_access_flags,
2240 struct ib_fmr_attr *fmr_attr);
2241 int (*map_phys_fmr)(struct ib_fmr *fmr,
2242 u64 *page_list, int list_len,
2243 u64 iova);
2244 int (*unmap_fmr)(struct list_head *fmr_list);
2245 int (*dealloc_fmr)(struct ib_fmr *fmr);
2246 int (*attach_mcast)(struct ib_qp *qp,
2247 union ib_gid *gid,
2248 u16 lid);
2249 int (*detach_mcast)(struct ib_qp *qp,
2250 union ib_gid *gid,
2251 u16 lid);
2252 int (*process_mad)(struct ib_device *device,
2253 int process_mad_flags,
2254 u8 port_num,
2255 const struct ib_wc *in_wc,
2256 const struct ib_grh *in_grh,
2257 const struct ib_mad_hdr *in_mad,
2258 size_t in_mad_size,
2259 struct ib_mad_hdr *out_mad,
2260 size_t *out_mad_size,
2261 u16 *out_mad_pkey_index);
2262 struct ib_xrcd * (*alloc_xrcd)(struct ib_device *device,
2263 struct ib_ucontext *ucontext,
2264 struct ib_udata *udata);
2265 int (*dealloc_xrcd)(struct ib_xrcd *xrcd);
2266 struct ib_flow * (*create_flow)(struct ib_qp *qp,
2267 struct ib_flow_attr
2268 *flow_attr,
2269 int domain);
2270 int (*destroy_flow)(struct ib_flow *flow_id);
2271 int (*check_mr_status)(struct ib_mr *mr, u32 check_mask,
2272 struct ib_mr_status *mr_status);
2273 void (*disassociate_ucontext)(struct ib_ucontext *ibcontext);
2274 void (*drain_rq)(struct ib_qp *qp);
2275 void (*drain_sq)(struct ib_qp *qp);
2276 int (*set_vf_link_state)(struct ib_device *device, int vf, u8 port,
2277 int state);
2278 int (*get_vf_config)(struct ib_device *device, int vf, u8 port,
2279 struct ifla_vf_info *ivf);
2280 int (*get_vf_stats)(struct ib_device *device, int vf, u8 port,
2281 struct ifla_vf_stats *stats);
2282 int (*set_vf_guid)(struct ib_device *device, int vf, u8 port, u64 guid,
2283 int type);
2284 struct ib_wq * (*create_wq)(struct ib_pd *pd,
2285 struct ib_wq_init_attr *init_attr,
2286 struct ib_udata *udata);
2287 int (*destroy_wq)(struct ib_wq *wq);
2288 int (*modify_wq)(struct ib_wq *wq,
2289 struct ib_wq_attr *attr,
2290 u32 wq_attr_mask,
2291 struct ib_udata *udata);
2292 struct ib_rwq_ind_table * (*create_rwq_ind_table)(struct ib_device *device,
2293 struct ib_rwq_ind_table_init_attr *init_attr,
2294 struct ib_udata *udata);
2295 int (*destroy_rwq_ind_table)(struct ib_rwq_ind_table *wq_ind_table);
2296
2297
2298
2299
2300
2301
2302 struct net_device *(*alloc_rdma_netdev)(
2303 struct ib_device *device,
2304 u8 port_num,
2305 enum rdma_netdev_t type,
2306 const char *name,
2307 unsigned char name_assign_type,
2308 void (*setup)(struct net_device *));
2309
2310 struct module *owner;
2311 struct device dev;
2312 struct kobject *ports_parent;
2313 struct list_head port_list;
2314
2315 enum {
2316 IB_DEV_UNINITIALIZED,
2317 IB_DEV_REGISTERED,
2318 IB_DEV_UNREGISTERED
2319 } reg_state;
2320
2321 int uverbs_abi_ver;
2322 u64 uverbs_cmd_mask;
2323 u64 uverbs_ex_cmd_mask;
2324
2325 char node_desc[IB_DEVICE_NODE_DESC_MAX];
2326 __be64 node_guid;
2327 u32 local_dma_lkey;
2328 u16 is_switch:1;
2329 u8 node_type;
2330 u8 phys_port_cnt;
2331 struct ib_device_attr attrs;
2332 struct attribute_group *hw_stats_ag;
2333 struct rdma_hw_stats *hw_stats;
2334
2335#ifdef CONFIG_CGROUP_RDMA
2336 struct rdmacg_device cg_device;
2337#endif
2338
2339 u32 index;
2340
2341
2342
2343
2344
2345
2346
2347 int (*get_port_immutable)(struct ib_device *, u8, struct ib_port_immutable *);
2348 void (*get_dev_fw_str)(struct ib_device *, char *str);
2349 const struct cpumask *(*get_vector_affinity)(struct ib_device *ibdev,
2350 int comp_vector);
2351
2352 struct uverbs_root_spec *specs_root;
2353};
2354
2355struct ib_client {
2356 char *name;
2357 void (*add) (struct ib_device *);
2358 void (*remove)(struct ib_device *, void *client_data);
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374
2375 struct net_device *(*get_net_dev_by_params)(
2376 struct ib_device *dev,
2377 u8 port,
2378 u16 pkey,
2379 const union ib_gid *gid,
2380 const struct sockaddr *addr,
2381 void *client_data);
2382 struct list_head list;
2383};
2384
2385struct ib_device *ib_alloc_device(size_t size);
2386void ib_dealloc_device(struct ib_device *device);
2387
2388void ib_get_device_fw_str(struct ib_device *device, char *str);
2389
2390int ib_register_device(struct ib_device *device,
2391 int (*port_callback)(struct ib_device *,
2392 u8, struct kobject *));
2393void ib_unregister_device(struct ib_device *device);
2394
2395int ib_register_client (struct ib_client *client);
2396void ib_unregister_client(struct ib_client *client);
2397
2398void *ib_get_client_data(struct ib_device *device, struct ib_client *client);
2399void ib_set_client_data(struct ib_device *device, struct ib_client *client,
2400 void *data);
2401
2402static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len)
2403{
2404 return copy_from_user(dest, udata->inbuf, len) ? -EFAULT : 0;
2405}
2406
2407static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len)
2408{
2409 return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0;
2410}
2411
2412static inline bool ib_is_udata_cleared(struct ib_udata *udata,
2413 size_t offset,
2414 size_t len)
2415{
2416 const void __user *p = udata->inbuf + offset;
2417 bool ret;
2418 u8 *buf;
2419
2420 if (len > USHRT_MAX)
2421 return false;
2422
2423 buf = memdup_user(p, len);
2424 if (IS_ERR(buf))
2425 return false;
2426
2427 ret = !memchr_inv(buf, 0, len);
2428 kfree(buf);
2429 return ret;
2430}
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446
2447
2448int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
2449 enum ib_qp_type type, enum ib_qp_attr_mask mask,
2450 enum rdma_link_layer ll);
2451
2452void ib_register_event_handler(struct ib_event_handler *event_handler);
2453void ib_unregister_event_handler(struct ib_event_handler *event_handler);
2454void ib_dispatch_event(struct ib_event *event);
2455
2456int ib_query_port(struct ib_device *device,
2457 u8 port_num, struct ib_port_attr *port_attr);
2458
2459enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device,
2460 u8 port_num);
2461
2462
2463
2464
2465
2466
2467
2468
2469
2470
2471static inline bool rdma_cap_ib_switch(const struct ib_device *device)
2472{
2473 return device->is_switch;
2474}
2475
2476
2477
2478
2479
2480
2481
2482
2483
2484static inline u8 rdma_start_port(const struct ib_device *device)
2485{
2486 return rdma_cap_ib_switch(device) ? 0 : 1;
2487}
2488
2489
2490
2491
2492
2493
2494
2495
2496
2497static inline u8 rdma_end_port(const struct ib_device *device)
2498{
2499 return rdma_cap_ib_switch(device) ? 0 : device->phys_port_cnt;
2500}
2501
2502static inline int rdma_is_port_valid(const struct ib_device *device,
2503 unsigned int port)
2504{
2505 return (port >= rdma_start_port(device) &&
2506 port <= rdma_end_port(device));
2507}
2508
2509static inline bool rdma_protocol_ib(const struct ib_device *device, u8 port_num)
2510{
2511 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IB;
2512}
2513
2514static inline bool rdma_protocol_roce(const struct ib_device *device, u8 port_num)
2515{
2516 return device->port_immutable[port_num].core_cap_flags &
2517 (RDMA_CORE_CAP_PROT_ROCE | RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP);
2518}
2519
2520static inline bool rdma_protocol_roce_udp_encap(const struct ib_device *device, u8 port_num)
2521{
2522 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP;
2523}
2524
2525static inline bool rdma_protocol_roce_eth_encap(const struct ib_device *device, u8 port_num)
2526{
2527 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_ROCE;
2528}
2529
2530static inline bool rdma_protocol_iwarp(const struct ib_device *device, u8 port_num)
2531{
2532 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IWARP;
2533}
2534
2535static inline bool rdma_ib_or_roce(const struct ib_device *device, u8 port_num)
2536{
2537 return rdma_protocol_ib(device, port_num) ||
2538 rdma_protocol_roce(device, port_num);
2539}
2540
2541static inline bool rdma_protocol_raw_packet(const struct ib_device *device, u8 port_num)
2542{
2543 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_RAW_PACKET;
2544}
2545
2546static inline bool rdma_protocol_usnic(const struct ib_device *device, u8 port_num)
2547{
2548 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_USNIC;
2549}
2550
2551
2552
2553
2554
2555
2556
2557
2558
2559
2560
2561
2562
2563static inline bool rdma_cap_ib_mad(const struct ib_device *device, u8 port_num)
2564{
2565 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_MAD;
2566}
2567
2568
2569
2570
2571
2572
2573
2574
2575
2576
2577
2578
2579
2580
2581
2582
2583
2584
2585
2586
2587static inline bool rdma_cap_opa_mad(struct ib_device *device, u8 port_num)
2588{
2589 return (device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_OPA_MAD)
2590 == RDMA_CORE_CAP_OPA_MAD;
2591}
2592
2593
2594
2595
2596
2597
2598
2599
2600
2601
2602
2603
2604
2605
2606
2607
2608
2609
2610
2611
2612
2613static inline bool rdma_cap_ib_smi(const struct ib_device *device, u8 port_num)
2614{
2615 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_SMI;
2616}
2617
2618
2619
2620
2621
2622
2623
2624
2625
2626
2627
2628
2629
2630
2631
2632
2633static inline bool rdma_cap_ib_cm(const struct ib_device *device, u8 port_num)
2634{
2635 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_CM;
2636}
2637
2638
2639
2640
2641
2642
2643
2644
2645
2646
2647
2648
2649
2650static inline bool rdma_cap_iw_cm(const struct ib_device *device, u8 port_num)
2651{
2652 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IW_CM;
2653}
2654
2655
2656
2657
2658
2659
2660
2661
2662
2663
2664
2665
2666
2667
2668
2669
2670static inline bool rdma_cap_ib_sa(const struct ib_device *device, u8 port_num)
2671{
2672 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_SA;
2673}
2674
2675
2676
2677
2678
2679
2680
2681
2682
2683
2684
2685
2686
2687
2688
2689
2690
2691
2692static inline bool rdma_cap_ib_mcast(const struct ib_device *device, u8 port_num)
2693{
2694 return rdma_cap_ib_sa(device, port_num);
2695}
2696
2697
2698
2699
2700
2701
2702
2703
2704
2705
2706
2707
2708
2709
2710static inline bool rdma_cap_af_ib(const struct ib_device *device, u8 port_num)
2711{
2712 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_AF_IB;
2713}
2714
2715
2716
2717
2718
2719
2720
2721
2722
2723
2724
2725
2726
2727
2728
2729
2730
2731static inline bool rdma_cap_eth_ah(const struct ib_device *device, u8 port_num)
2732{
2733 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_ETH_AH;
2734}
2735
2736
2737
2738
2739
2740
2741
2742
2743
2744
2745static inline bool rdma_cap_opa_ah(struct ib_device *device, u8 port_num)
2746{
2747 return (device->port_immutable[port_num].core_cap_flags &
2748 RDMA_CORE_CAP_OPA_AH) == RDMA_CORE_CAP_OPA_AH;
2749}
2750
2751
2752
2753
2754
2755
2756
2757
2758
2759
2760
2761
2762
2763static inline size_t rdma_max_mad_size(const struct ib_device *device, u8 port_num)
2764{
2765 return device->port_immutable[port_num].max_mad_size;
2766}
2767
2768
2769
2770
2771
2772
2773
2774
2775
2776
2777
2778
2779
2780
2781static inline bool rdma_cap_roce_gid_table(const struct ib_device *device,
2782 u8 port_num)
2783{
2784 return rdma_protocol_roce(device, port_num) &&
2785 device->add_gid && device->del_gid;
2786}
2787
2788
2789
2790
2791static inline bool rdma_cap_read_inv(struct ib_device *dev, u32 port_num)
2792{
2793
2794
2795
2796
2797 return rdma_protocol_iwarp(dev, port_num);
2798}
2799
2800int ib_query_gid(struct ib_device *device,
2801 u8 port_num, int index, union ib_gid *gid,
2802 struct ib_gid_attr *attr);
2803
2804int ib_set_vf_link_state(struct ib_device *device, int vf, u8 port,
2805 int state);
2806int ib_get_vf_config(struct ib_device *device, int vf, u8 port,
2807 struct ifla_vf_info *info);
2808int ib_get_vf_stats(struct ib_device *device, int vf, u8 port,
2809 struct ifla_vf_stats *stats);
2810int ib_set_vf_guid(struct ib_device *device, int vf, u8 port, u64 guid,
2811 int type);
2812
2813int ib_query_pkey(struct ib_device *device,
2814 u8 port_num, u16 index, u16 *pkey);
2815
2816int ib_modify_device(struct ib_device *device,
2817 int device_modify_mask,
2818 struct ib_device_modify *device_modify);
2819
2820int ib_modify_port(struct ib_device *device,
2821 u8 port_num, int port_modify_mask,
2822 struct ib_port_modify *port_modify);
2823
2824int ib_find_gid(struct ib_device *device, union ib_gid *gid,
2825 enum ib_gid_type gid_type, struct net_device *ndev,
2826 u8 *port_num, u16 *index);
2827
2828int ib_find_pkey(struct ib_device *device,
2829 u8 port_num, u16 pkey, u16 *index);
2830
2831enum ib_pd_flags {
2832
2833
2834
2835
2836
2837
2838
2839
2840
2841 IB_PD_UNSAFE_GLOBAL_RKEY = 0x01,
2842};
2843
2844struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags,
2845 const char *caller);
2846#define ib_alloc_pd(device, flags) \
2847 __ib_alloc_pd((device), (flags), __func__)
2848void ib_dealloc_pd(struct ib_pd *pd);
2849
2850
2851
2852
2853
2854
2855
2856
2857
2858struct ib_ah *rdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr);
2859
2860
2861
2862
2863
2864
2865
2866
2867
2868int ib_get_gids_from_rdma_hdr(const union rdma_network_hdr *hdr,
2869 enum rdma_network_type net_type,
2870 union ib_gid *sgid, union ib_gid *dgid);
2871
2872
2873
2874
2875
2876int ib_get_rdma_header_version(const union rdma_network_hdr *hdr);
2877
2878
2879
2880
2881
2882
2883
2884
2885
2886
2887
2888
2889int ib_init_ah_from_wc(struct ib_device *device, u8 port_num,
2890 const struct ib_wc *wc, const struct ib_grh *grh,
2891 struct rdma_ah_attr *ah_attr);
2892
2893
2894
2895
2896
2897
2898
2899
2900
2901
2902
2903
2904
2905struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc,
2906 const struct ib_grh *grh, u8 port_num);
2907
2908
2909
2910
2911
2912
2913
2914
2915int rdma_modify_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
2916
2917
2918
2919
2920
2921
2922
2923
2924int rdma_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
2925
2926
2927
2928
2929
2930int rdma_destroy_ah(struct ib_ah *ah);
2931
2932
2933
2934
2935
2936
2937
2938
2939
2940
2941
2942
2943
2944
2945struct ib_srq *ib_create_srq(struct ib_pd *pd,
2946 struct ib_srq_init_attr *srq_init_attr);
2947
2948
2949
2950
2951
2952
2953
2954
2955
2956
2957
2958
2959
2960int ib_modify_srq(struct ib_srq *srq,
2961 struct ib_srq_attr *srq_attr,
2962 enum ib_srq_attr_mask srq_attr_mask);
2963
2964
2965
2966
2967
2968
2969
2970int ib_query_srq(struct ib_srq *srq,
2971 struct ib_srq_attr *srq_attr);
2972
2973
2974
2975
2976
2977int ib_destroy_srq(struct ib_srq *srq);
2978
2979
2980
2981
2982
2983
2984
2985
2986static inline int ib_post_srq_recv(struct ib_srq *srq,
2987 struct ib_recv_wr *recv_wr,
2988 struct ib_recv_wr **bad_recv_wr)
2989{
2990 return srq->device->post_srq_recv(srq, recv_wr, bad_recv_wr);
2991}
2992
2993
2994
2995
2996
2997
2998
2999
3000
3001struct ib_qp *ib_create_qp(struct ib_pd *pd,
3002 struct ib_qp_init_attr *qp_init_attr);
3003
3004
3005
3006
3007
3008
3009
3010
3011
3012
3013
3014
3015int ib_modify_qp_with_udata(struct ib_qp *qp,
3016 struct ib_qp_attr *attr,
3017 int attr_mask,
3018 struct ib_udata *udata);
3019
3020
3021
3022
3023
3024
3025
3026
3027
3028
3029int ib_modify_qp(struct ib_qp *qp,
3030 struct ib_qp_attr *qp_attr,
3031 int qp_attr_mask);
3032
3033
3034
3035
3036
3037
3038
3039
3040
3041
3042
3043
3044int ib_query_qp(struct ib_qp *qp,
3045 struct ib_qp_attr *qp_attr,
3046 int qp_attr_mask,
3047 struct ib_qp_init_attr *qp_init_attr);
3048
3049
3050
3051
3052
3053int ib_destroy_qp(struct ib_qp *qp);
3054
3055
3056
3057
3058
3059
3060
3061
3062struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd,
3063 struct ib_qp_open_attr *qp_open_attr);
3064
3065
3066
3067
3068
3069
3070
3071
3072int ib_close_qp(struct ib_qp *qp);
3073
3074
3075
3076
3077
3078
3079
3080
3081
3082
3083
3084
3085
3086
3087static inline int ib_post_send(struct ib_qp *qp,
3088 struct ib_send_wr *send_wr,
3089 struct ib_send_wr **bad_send_wr)
3090{
3091 return qp->device->post_send(qp, send_wr, bad_send_wr);
3092}
3093
3094
3095
3096
3097
3098
3099
3100
3101
3102static inline int ib_post_recv(struct ib_qp *qp,
3103 struct ib_recv_wr *recv_wr,
3104 struct ib_recv_wr **bad_recv_wr)
3105{
3106 return qp->device->post_recv(qp, recv_wr, bad_recv_wr);
3107}
3108
3109struct ib_cq *ib_alloc_cq(struct ib_device *dev, void *private,
3110 int nr_cqe, int comp_vector, enum ib_poll_context poll_ctx);
3111void ib_free_cq(struct ib_cq *cq);
3112int ib_process_cq_direct(struct ib_cq *cq, int budget);
3113
3114
3115
3116
3117
3118
3119
3120
3121
3122
3123
3124
3125
3126
3127struct ib_cq *ib_create_cq(struct ib_device *device,
3128 ib_comp_handler comp_handler,
3129 void (*event_handler)(struct ib_event *, void *),
3130 void *cq_context,
3131 const struct ib_cq_init_attr *cq_attr);
3132
3133
3134
3135
3136
3137
3138
3139
3140int ib_resize_cq(struct ib_cq *cq, int cqe);
3141
3142
3143
3144
3145
3146
3147
3148
3149int ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
3150
3151
3152
3153
3154
3155int ib_destroy_cq(struct ib_cq *cq);
3156
3157
3158
3159
3160
3161
3162
3163
3164
3165
3166
3167
3168
3169static inline int ib_poll_cq(struct ib_cq *cq, int num_entries,
3170 struct ib_wc *wc)
3171{
3172 return cq->device->poll_cq(cq, num_entries, wc);
3173}
3174
3175
3176
3177
3178
3179
3180
3181
3182
3183
3184
3185int ib_peek_cq(struct ib_cq *cq, int wc_cnt);
3186
3187
3188
3189
3190
3191
3192
3193
3194
3195
3196
3197
3198
3199
3200
3201
3202
3203
3204
3205
3206
3207
3208
3209
3210
3211
3212
3213
3214static inline int ib_req_notify_cq(struct ib_cq *cq,
3215 enum ib_cq_notify_flags flags)
3216{
3217 return cq->device->req_notify_cq(cq, flags);
3218}
3219
3220
3221
3222
3223
3224
3225
3226
3227static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt)
3228{
3229 return cq->device->req_ncomp_notif ?
3230 cq->device->req_ncomp_notif(cq, wc_cnt) :
3231 -ENOSYS;
3232}
3233
3234
3235
3236
3237
3238
3239static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
3240{
3241 return dma_mapping_error(dev->dma_device, dma_addr);
3242}
3243
3244
3245
3246
3247
3248
3249
3250
3251static inline u64 ib_dma_map_single(struct ib_device *dev,
3252 void *cpu_addr, size_t size,
3253 enum dma_data_direction direction)
3254{
3255 return dma_map_single(dev->dma_device, cpu_addr, size, direction);
3256}
3257
3258
3259
3260
3261
3262
3263
3264
3265static inline void ib_dma_unmap_single(struct ib_device *dev,
3266 u64 addr, size_t size,
3267 enum dma_data_direction direction)
3268{
3269 dma_unmap_single(dev->dma_device, addr, size, direction);
3270}
3271
3272
3273
3274
3275
3276
3277
3278
3279
3280static inline u64 ib_dma_map_page(struct ib_device *dev,
3281 struct page *page,
3282 unsigned long offset,
3283 size_t size,
3284 enum dma_data_direction direction)
3285{
3286 return dma_map_page(dev->dma_device, page, offset, size, direction);
3287}
3288
3289
3290
3291
3292
3293
3294
3295
3296static inline void ib_dma_unmap_page(struct ib_device *dev,
3297 u64 addr, size_t size,
3298 enum dma_data_direction direction)
3299{
3300 dma_unmap_page(dev->dma_device, addr, size, direction);
3301}
3302
3303
3304
3305
3306
3307
3308
3309
3310static inline int ib_dma_map_sg(struct ib_device *dev,
3311 struct scatterlist *sg, int nents,
3312 enum dma_data_direction direction)
3313{
3314 return dma_map_sg(dev->dma_device, sg, nents, direction);
3315}
3316
3317
3318
3319
3320
3321
3322
3323
3324static inline void ib_dma_unmap_sg(struct ib_device *dev,
3325 struct scatterlist *sg, int nents,
3326 enum dma_data_direction direction)
3327{
3328 dma_unmap_sg(dev->dma_device, sg, nents, direction);
3329}
3330
3331static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
3332 struct scatterlist *sg, int nents,
3333 enum dma_data_direction direction,
3334 unsigned long dma_attrs)
3335{
3336 return dma_map_sg_attrs(dev->dma_device, sg, nents, direction,
3337 dma_attrs);
3338}
3339
3340static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
3341 struct scatterlist *sg, int nents,
3342 enum dma_data_direction direction,
3343 unsigned long dma_attrs)
3344{
3345 dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, dma_attrs);
3346}
3347
3348
3349
3350
3351
3352
3353
3354
3355static inline u64 ib_sg_dma_address(struct ib_device *dev,
3356 struct scatterlist *sg)
3357{
3358 return sg_dma_address(sg);
3359}
3360
3361
3362
3363
3364
3365
3366
3367
3368
3369static inline unsigned int ib_sg_dma_len(struct ib_device *dev,
3370 struct scatterlist *sg)
3371{
3372 return sg_dma_len(sg);
3373}
3374
3375
3376
3377
3378
3379
3380
3381
3382static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev,
3383 u64 addr,
3384 size_t size,
3385 enum dma_data_direction dir)
3386{
3387 dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
3388}
3389
3390
3391
3392
3393
3394
3395
3396
3397static inline void ib_dma_sync_single_for_device(struct ib_device *dev,
3398 u64 addr,
3399 size_t size,
3400 enum dma_data_direction dir)
3401{
3402 dma_sync_single_for_device(dev->dma_device, addr, size, dir);
3403}
3404
3405
3406
3407
3408
3409
3410
3411
3412static inline void *ib_dma_alloc_coherent(struct ib_device *dev,
3413 size_t size,
3414 dma_addr_t *dma_handle,
3415 gfp_t flag)
3416{
3417 return dma_alloc_coherent(dev->dma_device, size, dma_handle, flag);
3418}
3419
3420
3421
3422
3423
3424
3425
3426
3427static inline void ib_dma_free_coherent(struct ib_device *dev,
3428 size_t size, void *cpu_addr,
3429 dma_addr_t dma_handle)
3430{
3431 dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle);
3432}
3433
3434
3435
3436
3437
3438
3439
3440
3441int ib_dereg_mr(struct ib_mr *mr);
3442
3443struct ib_mr *ib_alloc_mr(struct ib_pd *pd,
3444 enum ib_mr_type mr_type,
3445 u32 max_num_sg);
3446
3447
3448
3449
3450
3451
3452
3453static inline void ib_update_fast_reg_key(struct ib_mr *mr, u8 newkey)
3454{
3455 mr->lkey = (mr->lkey & 0xffffff00) | newkey;
3456 mr->rkey = (mr->rkey & 0xffffff00) | newkey;
3457}
3458
3459
3460
3461
3462
3463
3464static inline u32 ib_inc_rkey(u32 rkey)
3465{
3466 const u32 mask = 0x000000ff;
3467 return ((rkey + 1) & mask) | (rkey & ~mask);
3468}
3469
3470
3471
3472
3473
3474
3475
3476
3477
3478
3479struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
3480 int mr_access_flags,
3481 struct ib_fmr_attr *fmr_attr);
3482
3483
3484
3485
3486
3487
3488
3489
3490static inline int ib_map_phys_fmr(struct ib_fmr *fmr,
3491 u64 *page_list, int list_len,
3492 u64 iova)
3493{
3494 return fmr->device->map_phys_fmr(fmr, page_list, list_len, iova);
3495}
3496
3497
3498
3499
3500
3501int ib_unmap_fmr(struct list_head *fmr_list);
3502
3503
3504
3505
3506
3507int ib_dealloc_fmr(struct ib_fmr *fmr);
3508
3509
3510
3511
3512
3513
3514
3515
3516
3517
3518
3519
3520
3521int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
3522
3523
3524
3525
3526
3527
3528
3529int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
3530
3531
3532
3533
3534
3535struct ib_xrcd *ib_alloc_xrcd(struct ib_device *device);
3536
3537
3538
3539
3540
3541int ib_dealloc_xrcd(struct ib_xrcd *xrcd);
3542
3543struct ib_flow *ib_create_flow(struct ib_qp *qp,
3544 struct ib_flow_attr *flow_attr, int domain);
3545int ib_destroy_flow(struct ib_flow *flow_id);
3546
3547static inline int ib_check_mr_access(int flags)
3548{
3549
3550
3551
3552
3553 if (flags & (IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_REMOTE_WRITE) &&
3554 !(flags & IB_ACCESS_LOCAL_WRITE))
3555 return -EINVAL;
3556
3557 return 0;
3558}
3559
3560
3561
3562
3563
3564
3565
3566
3567
3568
3569
3570
3571
3572int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
3573 struct ib_mr_status *mr_status);
3574
3575struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, u8 port,
3576 u16 pkey, const union ib_gid *gid,
3577 const struct sockaddr *addr);
3578struct ib_wq *ib_create_wq(struct ib_pd *pd,
3579 struct ib_wq_init_attr *init_attr);
3580int ib_destroy_wq(struct ib_wq *wq);
3581int ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *attr,
3582 u32 wq_attr_mask);
3583struct ib_rwq_ind_table *ib_create_rwq_ind_table(struct ib_device *device,
3584 struct ib_rwq_ind_table_init_attr*
3585 wq_ind_table_init_attr);
3586int ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table);
3587
3588int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
3589 unsigned int *sg_offset, unsigned int page_size);
3590
3591static inline int
3592ib_map_mr_sg_zbva(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
3593 unsigned int *sg_offset, unsigned int page_size)
3594{
3595 int n;
3596
3597 n = ib_map_mr_sg(mr, sg, sg_nents, sg_offset, page_size);
3598 mr->iova = 0;
3599
3600 return n;
3601}
3602
3603int ib_sg_to_pages(struct ib_mr *mr, struct scatterlist *sgl, int sg_nents,
3604 unsigned int *sg_offset, int (*set_page)(struct ib_mr *, u64));
3605
3606void ib_drain_rq(struct ib_qp *qp);
3607void ib_drain_sq(struct ib_qp *qp);
3608void ib_drain_qp(struct ib_qp *qp);
3609
3610int ib_resolve_eth_dmac(struct ib_device *device,
3611 struct rdma_ah_attr *ah_attr);
3612int ib_get_eth_speed(struct ib_device *dev, u8 port_num, u8 *speed, u8 *width);
3613
3614static inline u8 *rdma_ah_retrieve_dmac(struct rdma_ah_attr *attr)
3615{
3616 if (attr->type == RDMA_AH_ATTR_TYPE_ROCE)
3617 return attr->roce.dmac;
3618 return NULL;
3619}
3620
3621static inline void rdma_ah_set_dlid(struct rdma_ah_attr *attr, u32 dlid)
3622{
3623 if (attr->type == RDMA_AH_ATTR_TYPE_IB)
3624 attr->ib.dlid = (u16)dlid;
3625 else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
3626 attr->opa.dlid = dlid;
3627}
3628
3629static inline u32 rdma_ah_get_dlid(const struct rdma_ah_attr *attr)
3630{
3631 if (attr->type == RDMA_AH_ATTR_TYPE_IB)
3632 return attr->ib.dlid;
3633 else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
3634 return attr->opa.dlid;
3635 return 0;
3636}
3637
3638static inline void rdma_ah_set_sl(struct rdma_ah_attr *attr, u8 sl)
3639{
3640 attr->sl = sl;
3641}
3642
3643static inline u8 rdma_ah_get_sl(const struct rdma_ah_attr *attr)
3644{
3645 return attr->sl;
3646}
3647
3648static inline void rdma_ah_set_path_bits(struct rdma_ah_attr *attr,
3649 u8 src_path_bits)
3650{
3651 if (attr->type == RDMA_AH_ATTR_TYPE_IB)
3652 attr->ib.src_path_bits = src_path_bits;
3653 else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
3654 attr->opa.src_path_bits = src_path_bits;
3655}
3656
3657static inline u8 rdma_ah_get_path_bits(const struct rdma_ah_attr *attr)
3658{
3659 if (attr->type == RDMA_AH_ATTR_TYPE_IB)
3660 return attr->ib.src_path_bits;
3661 else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
3662 return attr->opa.src_path_bits;
3663 return 0;
3664}
3665
3666static inline void rdma_ah_set_make_grd(struct rdma_ah_attr *attr,
3667 bool make_grd)
3668{
3669 if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
3670 attr->opa.make_grd = make_grd;
3671}
3672
3673static inline bool rdma_ah_get_make_grd(const struct rdma_ah_attr *attr)
3674{
3675 if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
3676 return attr->opa.make_grd;
3677 return false;
3678}
3679
3680static inline void rdma_ah_set_port_num(struct rdma_ah_attr *attr, u8 port_num)
3681{
3682 attr->port_num = port_num;
3683}
3684
3685static inline u8 rdma_ah_get_port_num(const struct rdma_ah_attr *attr)
3686{
3687 return attr->port_num;
3688}
3689
3690static inline void rdma_ah_set_static_rate(struct rdma_ah_attr *attr,
3691 u8 static_rate)
3692{
3693 attr->static_rate = static_rate;
3694}
3695
3696static inline u8 rdma_ah_get_static_rate(const struct rdma_ah_attr *attr)
3697{
3698 return attr->static_rate;
3699}
3700
3701static inline void rdma_ah_set_ah_flags(struct rdma_ah_attr *attr,
3702 enum ib_ah_flags flag)
3703{
3704 attr->ah_flags = flag;
3705}
3706
3707static inline enum ib_ah_flags
3708 rdma_ah_get_ah_flags(const struct rdma_ah_attr *attr)
3709{
3710 return attr->ah_flags;
3711}
3712
3713static inline const struct ib_global_route
3714 *rdma_ah_read_grh(const struct rdma_ah_attr *attr)
3715{
3716 return &attr->grh;
3717}
3718
3719
3720static inline struct ib_global_route
3721 *rdma_ah_retrieve_grh(struct rdma_ah_attr *attr)
3722{
3723 return &attr->grh;
3724}
3725
3726static inline void rdma_ah_set_dgid_raw(struct rdma_ah_attr *attr, void *dgid)
3727{
3728 struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
3729
3730 memcpy(grh->dgid.raw, dgid, sizeof(grh->dgid));
3731}
3732
3733static inline void rdma_ah_set_subnet_prefix(struct rdma_ah_attr *attr,
3734 __be64 prefix)
3735{
3736 struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
3737
3738 grh->dgid.global.subnet_prefix = prefix;
3739}
3740
3741static inline void rdma_ah_set_interface_id(struct rdma_ah_attr *attr,
3742 __be64 if_id)
3743{
3744 struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
3745
3746 grh->dgid.global.interface_id = if_id;
3747}
3748
3749static inline void rdma_ah_set_grh(struct rdma_ah_attr *attr,
3750 union ib_gid *dgid, u32 flow_label,
3751 u8 sgid_index, u8 hop_limit,
3752 u8 traffic_class)
3753{
3754 struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
3755
3756 attr->ah_flags = IB_AH_GRH;
3757 if (dgid)
3758 grh->dgid = *dgid;
3759 grh->flow_label = flow_label;
3760 grh->sgid_index = sgid_index;
3761 grh->hop_limit = hop_limit;
3762 grh->traffic_class = traffic_class;
3763}
3764
3765
3766static inline enum rdma_ah_attr_type rdma_ah_find_type(struct ib_device *dev,
3767 u32 port_num)
3768{
3769 if ((rdma_protocol_roce(dev, port_num)) ||
3770 (rdma_protocol_iwarp(dev, port_num)))
3771 return RDMA_AH_ATTR_TYPE_ROCE;
3772 else if ((rdma_protocol_ib(dev, port_num)) &&
3773 (rdma_cap_opa_ah(dev, port_num)))
3774 return RDMA_AH_ATTR_TYPE_OPA;
3775 else
3776 return RDMA_AH_ATTR_TYPE_IB;
3777}
3778
3779
3780
3781
3782
3783
3784
3785
3786
3787
3788static inline u16 ib_lid_cpu16(u32 lid)
3789{
3790 WARN_ON_ONCE(lid & 0xFFFF0000);
3791 return (u16)lid;
3792}
3793
3794
3795
3796
3797
3798
3799static inline __be16 ib_lid_be16(u32 lid)
3800{
3801 WARN_ON_ONCE(lid & 0xFFFF0000);
3802 return cpu_to_be16((u16)lid);
3803}
3804
3805
3806
3807
3808
3809
3810
3811
3812
3813
3814
3815static inline const struct cpumask *
3816ib_get_vector_affinity(struct ib_device *device, int comp_vector)
3817{
3818 if (comp_vector < 0 || comp_vector >= device->num_comp_vectors ||
3819 !device->get_vector_affinity)
3820 return NULL;
3821
3822 return device->get_vector_affinity(device, comp_vector);
3823
3824}
3825
3826#endif
3827