1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39#if !defined(IB_VERBS_H)
40#define IB_VERBS_H
41
42#include <linux/types.h>
43#include <linux/device.h>
44#include <linux/mm.h>
45#include <linux/dma-mapping.h>
46#include <linux/kref.h>
47#include <linux/list.h>
48#include <linux/rwsem.h>
49#include <linux/scatterlist.h>
50#include <linux/workqueue.h>
51#include <linux/socket.h>
52#include <linux/irq_poll.h>
53#include <uapi/linux/if_ether.h>
54#include <net/ipv6.h>
55#include <net/ip.h>
56#include <linux/string.h>
57#include <linux/slab.h>
58#include <linux/netdevice.h>
59
60#include <linux/if_link.h>
61#include <linux/atomic.h>
62#include <linux/mmu_notifier.h>
63#include <linux/uaccess.h>
64#include <linux/cgroup_rdma.h>
65#include <uapi/rdma/ib_user_verbs.h>
66#include <rdma/restrack.h>
67#include <uapi/rdma/rdma_user_ioctl.h>
68#include <uapi/rdma/ib_user_ioctl_verbs.h>
69
70#define IB_FW_VERSION_NAME_MAX ETHTOOL_FWVERS_LEN
71
72extern struct workqueue_struct *ib_wq;
73extern struct workqueue_struct *ib_comp_wq;
74
75union ib_gid {
76 u8 raw[16];
77 struct {
78 __be64 subnet_prefix;
79 __be64 interface_id;
80 } global;
81};
82
83extern union ib_gid zgid;
84
85enum ib_gid_type {
86
87 IB_GID_TYPE_IB = 0,
88 IB_GID_TYPE_ROCE = 0,
89 IB_GID_TYPE_ROCE_UDP_ENCAP = 1,
90 IB_GID_TYPE_SIZE
91};
92
93#define ROCE_V2_UDP_DPORT 4791
94struct ib_gid_attr {
95 struct net_device *ndev;
96 struct ib_device *device;
97 enum ib_gid_type gid_type;
98 u16 index;
99 u8 port_num;
100};
101
102enum rdma_node_type {
103
104 RDMA_NODE_IB_CA = 1,
105 RDMA_NODE_IB_SWITCH,
106 RDMA_NODE_IB_ROUTER,
107 RDMA_NODE_RNIC,
108 RDMA_NODE_USNIC,
109 RDMA_NODE_USNIC_UDP,
110};
111
112enum {
113
114 IB_SA_WELL_KNOWN_GUID = BIT_ULL(57) | 2,
115};
116
117enum rdma_transport_type {
118 RDMA_TRANSPORT_IB,
119 RDMA_TRANSPORT_IWARP,
120 RDMA_TRANSPORT_USNIC,
121 RDMA_TRANSPORT_USNIC_UDP
122};
123
124enum rdma_protocol_type {
125 RDMA_PROTOCOL_IB,
126 RDMA_PROTOCOL_IBOE,
127 RDMA_PROTOCOL_IWARP,
128 RDMA_PROTOCOL_USNIC_UDP
129};
130
131__attribute_const__ enum rdma_transport_type
132rdma_node_get_transport(enum rdma_node_type node_type);
133
134enum rdma_network_type {
135 RDMA_NETWORK_IB,
136 RDMA_NETWORK_ROCE_V1 = RDMA_NETWORK_IB,
137 RDMA_NETWORK_IPV4,
138 RDMA_NETWORK_IPV6
139};
140
141static inline enum ib_gid_type ib_network_to_gid_type(enum rdma_network_type network_type)
142{
143 if (network_type == RDMA_NETWORK_IPV4 ||
144 network_type == RDMA_NETWORK_IPV6)
145 return IB_GID_TYPE_ROCE_UDP_ENCAP;
146
147
148 return IB_GID_TYPE_IB;
149}
150
151static inline enum rdma_network_type ib_gid_to_network_type(enum ib_gid_type gid_type,
152 union ib_gid *gid)
153{
154 if (gid_type == IB_GID_TYPE_IB)
155 return RDMA_NETWORK_IB;
156
157 if (ipv6_addr_v4mapped((struct in6_addr *)gid))
158 return RDMA_NETWORK_IPV4;
159 else
160 return RDMA_NETWORK_IPV6;
161}
162
163enum rdma_link_layer {
164 IB_LINK_LAYER_UNSPECIFIED,
165 IB_LINK_LAYER_INFINIBAND,
166 IB_LINK_LAYER_ETHERNET,
167};
168
169enum ib_device_cap_flags {
170 IB_DEVICE_RESIZE_MAX_WR = (1 << 0),
171 IB_DEVICE_BAD_PKEY_CNTR = (1 << 1),
172 IB_DEVICE_BAD_QKEY_CNTR = (1 << 2),
173 IB_DEVICE_RAW_MULTI = (1 << 3),
174 IB_DEVICE_AUTO_PATH_MIG = (1 << 4),
175 IB_DEVICE_CHANGE_PHY_PORT = (1 << 5),
176 IB_DEVICE_UD_AV_PORT_ENFORCE = (1 << 6),
177 IB_DEVICE_CURR_QP_STATE_MOD = (1 << 7),
178 IB_DEVICE_SHUTDOWN_PORT = (1 << 8),
179
180 IB_DEVICE_PORT_ACTIVE_EVENT = (1 << 10),
181 IB_DEVICE_SYS_IMAGE_GUID = (1 << 11),
182 IB_DEVICE_RC_RNR_NAK_GEN = (1 << 12),
183 IB_DEVICE_SRQ_RESIZE = (1 << 13),
184 IB_DEVICE_N_NOTIFY_CQ = (1 << 14),
185
186
187
188
189
190
191
192
193 IB_DEVICE_LOCAL_DMA_LKEY = (1 << 15),
194
195 IB_DEVICE_MEM_WINDOW = (1 << 17),
196
197
198
199
200
201
202
203 IB_DEVICE_UD_IP_CSUM = (1 << 18),
204 IB_DEVICE_UD_TSO = (1 << 19),
205 IB_DEVICE_XRC = (1 << 20),
206
207
208
209
210
211
212
213
214
215
216 IB_DEVICE_MEM_MGT_EXTENSIONS = (1 << 21),
217 IB_DEVICE_BLOCK_MULTICAST_LOOPBACK = (1 << 22),
218 IB_DEVICE_MEM_WINDOW_TYPE_2A = (1 << 23),
219 IB_DEVICE_MEM_WINDOW_TYPE_2B = (1 << 24),
220 IB_DEVICE_RC_IP_CSUM = (1 << 25),
221
222 IB_DEVICE_RAW_IP_CSUM = (1 << 26),
223
224
225
226
227
228
229 IB_DEVICE_CROSS_CHANNEL = (1 << 27),
230 IB_DEVICE_MANAGED_FLOW_STEERING = (1 << 29),
231 IB_DEVICE_SIGNATURE_HANDOVER = (1 << 30),
232 IB_DEVICE_ON_DEMAND_PAGING = (1ULL << 31),
233 IB_DEVICE_SG_GAPS_REG = (1ULL << 32),
234 IB_DEVICE_VIRTUAL_FUNCTION = (1ULL << 33),
235
236 IB_DEVICE_RAW_SCATTER_FCS = (1ULL << 34),
237 IB_DEVICE_RDMA_NETDEV_OPA_VNIC = (1ULL << 35),
238
239 IB_DEVICE_PCI_WRITE_END_PADDING = (1ULL << 36),
240};
241
242enum ib_signature_prot_cap {
243 IB_PROT_T10DIF_TYPE_1 = 1,
244 IB_PROT_T10DIF_TYPE_2 = 1 << 1,
245 IB_PROT_T10DIF_TYPE_3 = 1 << 2,
246};
247
248enum ib_signature_guard_cap {
249 IB_GUARD_T10DIF_CRC = 1,
250 IB_GUARD_T10DIF_CSUM = 1 << 1,
251};
252
253enum ib_atomic_cap {
254 IB_ATOMIC_NONE,
255 IB_ATOMIC_HCA,
256 IB_ATOMIC_GLOB
257};
258
259enum ib_odp_general_cap_bits {
260 IB_ODP_SUPPORT = 1 << 0,
261 IB_ODP_SUPPORT_IMPLICIT = 1 << 1,
262};
263
264enum ib_odp_transport_cap_bits {
265 IB_ODP_SUPPORT_SEND = 1 << 0,
266 IB_ODP_SUPPORT_RECV = 1 << 1,
267 IB_ODP_SUPPORT_WRITE = 1 << 2,
268 IB_ODP_SUPPORT_READ = 1 << 3,
269 IB_ODP_SUPPORT_ATOMIC = 1 << 4,
270};
271
272struct ib_odp_caps {
273 uint64_t general_caps;
274 struct {
275 uint32_t rc_odp_caps;
276 uint32_t uc_odp_caps;
277 uint32_t ud_odp_caps;
278 } per_transport_caps;
279};
280
281struct ib_rss_caps {
282
283
284
285
286 u32 supported_qpts;
287 u32 max_rwq_indirection_tables;
288 u32 max_rwq_indirection_table_size;
289};
290
291enum ib_tm_cap_flags {
292
293 IB_TM_CAP_RC = 1 << 0,
294};
295
296struct ib_tm_caps {
297
298 u32 max_rndv_hdr_size;
299
300 u32 max_num_tags;
301
302 u32 flags;
303
304 u32 max_ops;
305
306 u32 max_sge;
307};
308
309struct ib_cq_init_attr {
310 unsigned int cqe;
311 int comp_vector;
312 u32 flags;
313};
314
315enum ib_cq_attr_mask {
316 IB_CQ_MODERATE = 1 << 0,
317};
318
319struct ib_cq_caps {
320 u16 max_cq_moderation_count;
321 u16 max_cq_moderation_period;
322};
323
324struct ib_dm_mr_attr {
325 u64 length;
326 u64 offset;
327 u32 access_flags;
328};
329
330struct ib_dm_alloc_attr {
331 u64 length;
332 u32 alignment;
333 u32 flags;
334};
335
336struct ib_device_attr {
337 u64 fw_ver;
338 __be64 sys_image_guid;
339 u64 max_mr_size;
340 u64 page_size_cap;
341 u32 vendor_id;
342 u32 vendor_part_id;
343 u32 hw_ver;
344 int max_qp;
345 int max_qp_wr;
346 u64 device_cap_flags;
347 int max_sge;
348 int max_sge_rd;
349 int max_cq;
350 int max_cqe;
351 int max_mr;
352 int max_pd;
353 int max_qp_rd_atom;
354 int max_ee_rd_atom;
355 int max_res_rd_atom;
356 int max_qp_init_rd_atom;
357 int max_ee_init_rd_atom;
358 enum ib_atomic_cap atomic_cap;
359 enum ib_atomic_cap masked_atomic_cap;
360 int max_ee;
361 int max_rdd;
362 int max_mw;
363 int max_raw_ipv6_qp;
364 int max_raw_ethy_qp;
365 int max_mcast_grp;
366 int max_mcast_qp_attach;
367 int max_total_mcast_qp_attach;
368 int max_ah;
369 int max_fmr;
370 int max_map_per_fmr;
371 int max_srq;
372 int max_srq_wr;
373 int max_srq_sge;
374 unsigned int max_fast_reg_page_list_len;
375 u16 max_pkeys;
376 u8 local_ca_ack_delay;
377 int sig_prot_cap;
378 int sig_guard_cap;
379 struct ib_odp_caps odp_caps;
380 uint64_t timestamp_mask;
381 uint64_t hca_core_clock;
382 struct ib_rss_caps rss_caps;
383 u32 max_wq_type_rq;
384 u32 raw_packet_caps;
385 struct ib_tm_caps tm_caps;
386 struct ib_cq_caps cq_caps;
387 u64 max_dm_size;
388};
389
390enum ib_mtu {
391 IB_MTU_256 = 1,
392 IB_MTU_512 = 2,
393 IB_MTU_1024 = 3,
394 IB_MTU_2048 = 4,
395 IB_MTU_4096 = 5
396};
397
398static inline int ib_mtu_enum_to_int(enum ib_mtu mtu)
399{
400 switch (mtu) {
401 case IB_MTU_256: return 256;
402 case IB_MTU_512: return 512;
403 case IB_MTU_1024: return 1024;
404 case IB_MTU_2048: return 2048;
405 case IB_MTU_4096: return 4096;
406 default: return -1;
407 }
408}
409
410static inline enum ib_mtu ib_mtu_int_to_enum(int mtu)
411{
412 if (mtu >= 4096)
413 return IB_MTU_4096;
414 else if (mtu >= 2048)
415 return IB_MTU_2048;
416 else if (mtu >= 1024)
417 return IB_MTU_1024;
418 else if (mtu >= 512)
419 return IB_MTU_512;
420 else
421 return IB_MTU_256;
422}
423
424enum ib_port_state {
425 IB_PORT_NOP = 0,
426 IB_PORT_DOWN = 1,
427 IB_PORT_INIT = 2,
428 IB_PORT_ARMED = 3,
429 IB_PORT_ACTIVE = 4,
430 IB_PORT_ACTIVE_DEFER = 5
431};
432
433enum ib_port_cap_flags {
434 IB_PORT_SM = 1 << 1,
435 IB_PORT_NOTICE_SUP = 1 << 2,
436 IB_PORT_TRAP_SUP = 1 << 3,
437 IB_PORT_OPT_IPD_SUP = 1 << 4,
438 IB_PORT_AUTO_MIGR_SUP = 1 << 5,
439 IB_PORT_SL_MAP_SUP = 1 << 6,
440 IB_PORT_MKEY_NVRAM = 1 << 7,
441 IB_PORT_PKEY_NVRAM = 1 << 8,
442 IB_PORT_LED_INFO_SUP = 1 << 9,
443 IB_PORT_SM_DISABLED = 1 << 10,
444 IB_PORT_SYS_IMAGE_GUID_SUP = 1 << 11,
445 IB_PORT_PKEY_SW_EXT_PORT_TRAP_SUP = 1 << 12,
446 IB_PORT_EXTENDED_SPEEDS_SUP = 1 << 14,
447 IB_PORT_CM_SUP = 1 << 16,
448 IB_PORT_SNMP_TUNNEL_SUP = 1 << 17,
449 IB_PORT_REINIT_SUP = 1 << 18,
450 IB_PORT_DEVICE_MGMT_SUP = 1 << 19,
451 IB_PORT_VENDOR_CLASS_SUP = 1 << 20,
452 IB_PORT_DR_NOTICE_SUP = 1 << 21,
453 IB_PORT_CAP_MASK_NOTICE_SUP = 1 << 22,
454 IB_PORT_BOOT_MGMT_SUP = 1 << 23,
455 IB_PORT_LINK_LATENCY_SUP = 1 << 24,
456 IB_PORT_CLIENT_REG_SUP = 1 << 25,
457 IB_PORT_IP_BASED_GIDS = 1 << 26,
458};
459
460enum ib_port_width {
461 IB_WIDTH_1X = 1,
462 IB_WIDTH_4X = 2,
463 IB_WIDTH_8X = 4,
464 IB_WIDTH_12X = 8
465};
466
467static inline int ib_width_enum_to_int(enum ib_port_width width)
468{
469 switch (width) {
470 case IB_WIDTH_1X: return 1;
471 case IB_WIDTH_4X: return 4;
472 case IB_WIDTH_8X: return 8;
473 case IB_WIDTH_12X: return 12;
474 default: return -1;
475 }
476}
477
478enum ib_port_speed {
479 IB_SPEED_SDR = 1,
480 IB_SPEED_DDR = 2,
481 IB_SPEED_QDR = 4,
482 IB_SPEED_FDR10 = 8,
483 IB_SPEED_FDR = 16,
484 IB_SPEED_EDR = 32,
485 IB_SPEED_HDR = 64
486};
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507struct rdma_hw_stats {
508 struct mutex lock;
509 unsigned long timestamp;
510 unsigned long lifespan;
511 const char * const *names;
512 int num_counters;
513 u64 value[];
514};
515
516#define RDMA_HW_STATS_DEFAULT_LIFESPAN 10
517
518
519
520
521
522
523
524static inline struct rdma_hw_stats *rdma_alloc_hw_stats_struct(
525 const char * const *names, int num_counters,
526 unsigned long lifespan)
527{
528 struct rdma_hw_stats *stats;
529
530 stats = kzalloc(sizeof(*stats) + num_counters * sizeof(u64),
531 GFP_KERNEL);
532 if (!stats)
533 return NULL;
534 stats->names = names;
535 stats->num_counters = num_counters;
536 stats->lifespan = msecs_to_jiffies(lifespan);
537
538 return stats;
539}
540
541
542
543
544
545
546#define RDMA_CORE_CAP_IB_MAD 0x00000001
547#define RDMA_CORE_CAP_IB_SMI 0x00000002
548#define RDMA_CORE_CAP_IB_CM 0x00000004
549#define RDMA_CORE_CAP_IW_CM 0x00000008
550#define RDMA_CORE_CAP_IB_SA 0x00000010
551#define RDMA_CORE_CAP_OPA_MAD 0x00000020
552
553
554#define RDMA_CORE_CAP_AF_IB 0x00001000
555#define RDMA_CORE_CAP_ETH_AH 0x00002000
556#define RDMA_CORE_CAP_OPA_AH 0x00004000
557
558
559#define RDMA_CORE_CAP_PROT_IB 0x00100000
560#define RDMA_CORE_CAP_PROT_ROCE 0x00200000
561#define RDMA_CORE_CAP_PROT_IWARP 0x00400000
562#define RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP 0x00800000
563#define RDMA_CORE_CAP_PROT_RAW_PACKET 0x01000000
564#define RDMA_CORE_CAP_PROT_USNIC 0x02000000
565
566#define RDMA_CORE_PORT_IBA_IB (RDMA_CORE_CAP_PROT_IB \
567 | RDMA_CORE_CAP_IB_MAD \
568 | RDMA_CORE_CAP_IB_SMI \
569 | RDMA_CORE_CAP_IB_CM \
570 | RDMA_CORE_CAP_IB_SA \
571 | RDMA_CORE_CAP_AF_IB)
572#define RDMA_CORE_PORT_IBA_ROCE (RDMA_CORE_CAP_PROT_ROCE \
573 | RDMA_CORE_CAP_IB_MAD \
574 | RDMA_CORE_CAP_IB_CM \
575 | RDMA_CORE_CAP_AF_IB \
576 | RDMA_CORE_CAP_ETH_AH)
577#define RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP \
578 (RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP \
579 | RDMA_CORE_CAP_IB_MAD \
580 | RDMA_CORE_CAP_IB_CM \
581 | RDMA_CORE_CAP_AF_IB \
582 | RDMA_CORE_CAP_ETH_AH)
583#define RDMA_CORE_PORT_IWARP (RDMA_CORE_CAP_PROT_IWARP \
584 | RDMA_CORE_CAP_IW_CM)
585#define RDMA_CORE_PORT_INTEL_OPA (RDMA_CORE_PORT_IBA_IB \
586 | RDMA_CORE_CAP_OPA_MAD)
587
588#define RDMA_CORE_PORT_RAW_PACKET (RDMA_CORE_CAP_PROT_RAW_PACKET)
589
590#define RDMA_CORE_PORT_USNIC (RDMA_CORE_CAP_PROT_USNIC)
591
592struct ib_port_attr {
593 u64 subnet_prefix;
594 enum ib_port_state state;
595 enum ib_mtu max_mtu;
596 enum ib_mtu active_mtu;
597 int gid_tbl_len;
598 u32 port_cap_flags;
599 u32 max_msg_sz;
600 u32 bad_pkey_cntr;
601 u32 qkey_viol_cntr;
602 u16 pkey_tbl_len;
603 u32 sm_lid;
604 u32 lid;
605 u8 lmc;
606 u8 max_vl_num;
607 u8 sm_sl;
608 u8 subnet_timeout;
609 u8 init_type_reply;
610 u8 active_width;
611 u8 active_speed;
612 u8 phys_state;
613 bool grh_required;
614};
615
616enum ib_device_modify_flags {
617 IB_DEVICE_MODIFY_SYS_IMAGE_GUID = 1 << 0,
618 IB_DEVICE_MODIFY_NODE_DESC = 1 << 1
619};
620
621#define IB_DEVICE_NODE_DESC_MAX 64
622
623struct ib_device_modify {
624 u64 sys_image_guid;
625 char node_desc[IB_DEVICE_NODE_DESC_MAX];
626};
627
628enum ib_port_modify_flags {
629 IB_PORT_SHUTDOWN = 1,
630 IB_PORT_INIT_TYPE = (1<<2),
631 IB_PORT_RESET_QKEY_CNTR = (1<<3),
632 IB_PORT_OPA_MASK_CHG = (1<<4)
633};
634
635struct ib_port_modify {
636 u32 set_port_cap_mask;
637 u32 clr_port_cap_mask;
638 u8 init_type;
639};
640
641enum ib_event_type {
642 IB_EVENT_CQ_ERR,
643 IB_EVENT_QP_FATAL,
644 IB_EVENT_QP_REQ_ERR,
645 IB_EVENT_QP_ACCESS_ERR,
646 IB_EVENT_COMM_EST,
647 IB_EVENT_SQ_DRAINED,
648 IB_EVENT_PATH_MIG,
649 IB_EVENT_PATH_MIG_ERR,
650 IB_EVENT_DEVICE_FATAL,
651 IB_EVENT_PORT_ACTIVE,
652 IB_EVENT_PORT_ERR,
653 IB_EVENT_LID_CHANGE,
654 IB_EVENT_PKEY_CHANGE,
655 IB_EVENT_SM_CHANGE,
656 IB_EVENT_SRQ_ERR,
657 IB_EVENT_SRQ_LIMIT_REACHED,
658 IB_EVENT_QP_LAST_WQE_REACHED,
659 IB_EVENT_CLIENT_REREGISTER,
660 IB_EVENT_GID_CHANGE,
661 IB_EVENT_WQ_FATAL,
662};
663
664const char *__attribute_const__ ib_event_msg(enum ib_event_type event);
665
666struct ib_event {
667 struct ib_device *device;
668 union {
669 struct ib_cq *cq;
670 struct ib_qp *qp;
671 struct ib_srq *srq;
672 struct ib_wq *wq;
673 u8 port_num;
674 } element;
675 enum ib_event_type event;
676};
677
678struct ib_event_handler {
679 struct ib_device *device;
680 void (*handler)(struct ib_event_handler *, struct ib_event *);
681 struct list_head list;
682};
683
684#define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler) \
685 do { \
686 (_ptr)->device = _device; \
687 (_ptr)->handler = _handler; \
688 INIT_LIST_HEAD(&(_ptr)->list); \
689 } while (0)
690
691struct ib_global_route {
692 union ib_gid dgid;
693 u32 flow_label;
694 u8 sgid_index;
695 u8 hop_limit;
696 u8 traffic_class;
697};
698
699struct ib_grh {
700 __be32 version_tclass_flow;
701 __be16 paylen;
702 u8 next_hdr;
703 u8 hop_limit;
704 union ib_gid sgid;
705 union ib_gid dgid;
706};
707
708union rdma_network_hdr {
709 struct ib_grh ibgrh;
710 struct {
711
712
713
714 u8 reserved[20];
715 struct iphdr roce4grh;
716 };
717};
718
719#define IB_QPN_MASK 0xFFFFFF
720
721enum {
722 IB_MULTICAST_QPN = 0xffffff
723};
724
725#define IB_LID_PERMISSIVE cpu_to_be16(0xFFFF)
726#define IB_MULTICAST_LID_BASE cpu_to_be16(0xC000)
727
728enum ib_ah_flags {
729 IB_AH_GRH = 1
730};
731
732enum ib_rate {
733 IB_RATE_PORT_CURRENT = 0,
734 IB_RATE_2_5_GBPS = 2,
735 IB_RATE_5_GBPS = 5,
736 IB_RATE_10_GBPS = 3,
737 IB_RATE_20_GBPS = 6,
738 IB_RATE_30_GBPS = 4,
739 IB_RATE_40_GBPS = 7,
740 IB_RATE_60_GBPS = 8,
741 IB_RATE_80_GBPS = 9,
742 IB_RATE_120_GBPS = 10,
743 IB_RATE_14_GBPS = 11,
744 IB_RATE_56_GBPS = 12,
745 IB_RATE_112_GBPS = 13,
746 IB_RATE_168_GBPS = 14,
747 IB_RATE_25_GBPS = 15,
748 IB_RATE_100_GBPS = 16,
749 IB_RATE_200_GBPS = 17,
750 IB_RATE_300_GBPS = 18
751};
752
753
754
755
756
757
758
759__attribute_const__ int ib_rate_to_mult(enum ib_rate rate);
760
761
762
763
764
765
766__attribute_const__ int ib_rate_to_mbps(enum ib_rate rate);
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781enum ib_mr_type {
782 IB_MR_TYPE_MEM_REG,
783 IB_MR_TYPE_SIGNATURE,
784 IB_MR_TYPE_SG_GAPS,
785};
786
787
788
789
790
791
792enum ib_signature_type {
793 IB_SIG_TYPE_NONE,
794 IB_SIG_TYPE_T10_DIF,
795};
796
797
798
799
800
801
802enum ib_t10_dif_bg_type {
803 IB_T10DIF_CRC,
804 IB_T10DIF_CSUM
805};
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820struct ib_t10_dif_domain {
821 enum ib_t10_dif_bg_type bg_type;
822 u16 pi_interval;
823 u16 bg;
824 u16 app_tag;
825 u32 ref_tag;
826 bool ref_remap;
827 bool app_escape;
828 bool ref_escape;
829 u16 apptag_check_mask;
830};
831
832
833
834
835
836
837
838struct ib_sig_domain {
839 enum ib_signature_type sig_type;
840 union {
841 struct ib_t10_dif_domain dif;
842 } sig;
843};
844
845
846
847
848
849
850
851struct ib_sig_attrs {
852 u8 check_mask;
853 struct ib_sig_domain mem;
854 struct ib_sig_domain wire;
855};
856
857enum ib_sig_err_type {
858 IB_SIG_BAD_GUARD,
859 IB_SIG_BAD_REFTAG,
860 IB_SIG_BAD_APPTAG,
861};
862
863
864
865
866struct ib_sig_err {
867 enum ib_sig_err_type err_type;
868 u32 expected;
869 u32 actual;
870 u64 sig_err_offset;
871 u32 key;
872};
873
874enum ib_mr_status_check {
875 IB_MR_CHECK_SIG_STATUS = 1,
876};
877
878
879
880
881
882
883
884
885
886struct ib_mr_status {
887 u32 fail_status;
888 struct ib_sig_err sig_err;
889};
890
891
892
893
894
895
896__attribute_const__ enum ib_rate mult_to_ib_rate(int mult);
897
898enum rdma_ah_attr_type {
899 RDMA_AH_ATTR_TYPE_UNDEFINED,
900 RDMA_AH_ATTR_TYPE_IB,
901 RDMA_AH_ATTR_TYPE_ROCE,
902 RDMA_AH_ATTR_TYPE_OPA,
903};
904
905struct ib_ah_attr {
906 u16 dlid;
907 u8 src_path_bits;
908};
909
910struct roce_ah_attr {
911 u8 dmac[ETH_ALEN];
912};
913
914struct opa_ah_attr {
915 u32 dlid;
916 u8 src_path_bits;
917 bool make_grd;
918};
919
920struct rdma_ah_attr {
921 struct ib_global_route grh;
922 u8 sl;
923 u8 static_rate;
924 u8 port_num;
925 u8 ah_flags;
926 enum rdma_ah_attr_type type;
927 union {
928 struct ib_ah_attr ib;
929 struct roce_ah_attr roce;
930 struct opa_ah_attr opa;
931 };
932};
933
934enum ib_wc_status {
935 IB_WC_SUCCESS,
936 IB_WC_LOC_LEN_ERR,
937 IB_WC_LOC_QP_OP_ERR,
938 IB_WC_LOC_EEC_OP_ERR,
939 IB_WC_LOC_PROT_ERR,
940 IB_WC_WR_FLUSH_ERR,
941 IB_WC_MW_BIND_ERR,
942 IB_WC_BAD_RESP_ERR,
943 IB_WC_LOC_ACCESS_ERR,
944 IB_WC_REM_INV_REQ_ERR,
945 IB_WC_REM_ACCESS_ERR,
946 IB_WC_REM_OP_ERR,
947 IB_WC_RETRY_EXC_ERR,
948 IB_WC_RNR_RETRY_EXC_ERR,
949 IB_WC_LOC_RDD_VIOL_ERR,
950 IB_WC_REM_INV_RD_REQ_ERR,
951 IB_WC_REM_ABORT_ERR,
952 IB_WC_INV_EECN_ERR,
953 IB_WC_INV_EEC_STATE_ERR,
954 IB_WC_FATAL_ERR,
955 IB_WC_RESP_TIMEOUT_ERR,
956 IB_WC_GENERAL_ERR
957};
958
959const char *__attribute_const__ ib_wc_status_msg(enum ib_wc_status status);
960
961enum ib_wc_opcode {
962 IB_WC_SEND,
963 IB_WC_RDMA_WRITE,
964 IB_WC_RDMA_READ,
965 IB_WC_COMP_SWAP,
966 IB_WC_FETCH_ADD,
967 IB_WC_LSO,
968 IB_WC_LOCAL_INV,
969 IB_WC_REG_MR,
970 IB_WC_MASKED_COMP_SWAP,
971 IB_WC_MASKED_FETCH_ADD,
972
973
974
975
976 IB_WC_RECV = 1 << 7,
977 IB_WC_RECV_RDMA_WITH_IMM
978};
979
980enum ib_wc_flags {
981 IB_WC_GRH = 1,
982 IB_WC_WITH_IMM = (1<<1),
983 IB_WC_WITH_INVALIDATE = (1<<2),
984 IB_WC_IP_CSUM_OK = (1<<3),
985 IB_WC_WITH_SMAC = (1<<4),
986 IB_WC_WITH_VLAN = (1<<5),
987 IB_WC_WITH_NETWORK_HDR_TYPE = (1<<6),
988};
989
990struct ib_wc {
991 union {
992 u64 wr_id;
993 struct ib_cqe *wr_cqe;
994 };
995 enum ib_wc_status status;
996 enum ib_wc_opcode opcode;
997 u32 vendor_err;
998 u32 byte_len;
999 struct ib_qp *qp;
1000 union {
1001 __be32 imm_data;
1002 u32 invalidate_rkey;
1003 } ex;
1004 u32 src_qp;
1005 u32 slid;
1006 int wc_flags;
1007 u16 pkey_index;
1008 u8 sl;
1009 u8 dlid_path_bits;
1010 u8 port_num;
1011 u8 smac[ETH_ALEN];
1012 u16 vlan_id;
1013 u8 network_hdr_type;
1014};
1015
1016enum ib_cq_notify_flags {
1017 IB_CQ_SOLICITED = 1 << 0,
1018 IB_CQ_NEXT_COMP = 1 << 1,
1019 IB_CQ_SOLICITED_MASK = IB_CQ_SOLICITED | IB_CQ_NEXT_COMP,
1020 IB_CQ_REPORT_MISSED_EVENTS = 1 << 2,
1021};
1022
1023enum ib_srq_type {
1024 IB_SRQT_BASIC,
1025 IB_SRQT_XRC,
1026 IB_SRQT_TM,
1027};
1028
1029static inline bool ib_srq_has_cq(enum ib_srq_type srq_type)
1030{
1031 return srq_type == IB_SRQT_XRC ||
1032 srq_type == IB_SRQT_TM;
1033}
1034
1035enum ib_srq_attr_mask {
1036 IB_SRQ_MAX_WR = 1 << 0,
1037 IB_SRQ_LIMIT = 1 << 1,
1038};
1039
1040struct ib_srq_attr {
1041 u32 max_wr;
1042 u32 max_sge;
1043 u32 srq_limit;
1044};
1045
1046struct ib_srq_init_attr {
1047 void (*event_handler)(struct ib_event *, void *);
1048 void *srq_context;
1049 struct ib_srq_attr attr;
1050 enum ib_srq_type srq_type;
1051
1052 struct {
1053 struct ib_cq *cq;
1054 union {
1055 struct {
1056 struct ib_xrcd *xrcd;
1057 } xrc;
1058
1059 struct {
1060 u32 max_num_tags;
1061 } tag_matching;
1062 };
1063 } ext;
1064};
1065
1066struct ib_qp_cap {
1067 u32 max_send_wr;
1068 u32 max_recv_wr;
1069 u32 max_send_sge;
1070 u32 max_recv_sge;
1071 u32 max_inline_data;
1072
1073
1074
1075
1076
1077
1078 u32 max_rdma_ctxs;
1079};
1080
1081enum ib_sig_type {
1082 IB_SIGNAL_ALL_WR,
1083 IB_SIGNAL_REQ_WR
1084};
1085
1086enum ib_qp_type {
1087
1088
1089
1090
1091
1092 IB_QPT_SMI,
1093 IB_QPT_GSI,
1094
1095 IB_QPT_RC,
1096 IB_QPT_UC,
1097 IB_QPT_UD,
1098 IB_QPT_RAW_IPV6,
1099 IB_QPT_RAW_ETHERTYPE,
1100 IB_QPT_RAW_PACKET = 8,
1101 IB_QPT_XRC_INI = 9,
1102 IB_QPT_XRC_TGT,
1103 IB_QPT_MAX,
1104 IB_QPT_DRIVER = 0xFF,
1105
1106
1107
1108
1109 IB_QPT_RESERVED1 = 0x1000,
1110 IB_QPT_RESERVED2,
1111 IB_QPT_RESERVED3,
1112 IB_QPT_RESERVED4,
1113 IB_QPT_RESERVED5,
1114 IB_QPT_RESERVED6,
1115 IB_QPT_RESERVED7,
1116 IB_QPT_RESERVED8,
1117 IB_QPT_RESERVED9,
1118 IB_QPT_RESERVED10,
1119};
1120
1121enum ib_qp_create_flags {
1122 IB_QP_CREATE_IPOIB_UD_LSO = 1 << 0,
1123 IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK = 1 << 1,
1124 IB_QP_CREATE_CROSS_CHANNEL = 1 << 2,
1125 IB_QP_CREATE_MANAGED_SEND = 1 << 3,
1126 IB_QP_CREATE_MANAGED_RECV = 1 << 4,
1127 IB_QP_CREATE_NETIF_QP = 1 << 5,
1128 IB_QP_CREATE_SIGNATURE_EN = 1 << 6,
1129
1130 IB_QP_CREATE_SCATTER_FCS = 1 << 8,
1131 IB_QP_CREATE_CVLAN_STRIPPING = 1 << 9,
1132 IB_QP_CREATE_SOURCE_QPN = 1 << 10,
1133 IB_QP_CREATE_PCI_WRITE_END_PADDING = 1 << 11,
1134
1135 IB_QP_CREATE_RESERVED_START = 1 << 26,
1136 IB_QP_CREATE_RESERVED_END = 1 << 31,
1137};
1138
1139
1140
1141
1142
1143
1144struct ib_qp_init_attr {
1145 void (*event_handler)(struct ib_event *, void *);
1146 void *qp_context;
1147 struct ib_cq *send_cq;
1148 struct ib_cq *recv_cq;
1149 struct ib_srq *srq;
1150 struct ib_xrcd *xrcd;
1151 struct ib_qp_cap cap;
1152 enum ib_sig_type sq_sig_type;
1153 enum ib_qp_type qp_type;
1154 enum ib_qp_create_flags create_flags;
1155
1156
1157
1158
1159 u8 port_num;
1160 struct ib_rwq_ind_table *rwq_ind_tbl;
1161 u32 source_qpn;
1162};
1163
1164struct ib_qp_open_attr {
1165 void (*event_handler)(struct ib_event *, void *);
1166 void *qp_context;
1167 u32 qp_num;
1168 enum ib_qp_type qp_type;
1169};
1170
1171enum ib_rnr_timeout {
1172 IB_RNR_TIMER_655_36 = 0,
1173 IB_RNR_TIMER_000_01 = 1,
1174 IB_RNR_TIMER_000_02 = 2,
1175 IB_RNR_TIMER_000_03 = 3,
1176 IB_RNR_TIMER_000_04 = 4,
1177 IB_RNR_TIMER_000_06 = 5,
1178 IB_RNR_TIMER_000_08 = 6,
1179 IB_RNR_TIMER_000_12 = 7,
1180 IB_RNR_TIMER_000_16 = 8,
1181 IB_RNR_TIMER_000_24 = 9,
1182 IB_RNR_TIMER_000_32 = 10,
1183 IB_RNR_TIMER_000_48 = 11,
1184 IB_RNR_TIMER_000_64 = 12,
1185 IB_RNR_TIMER_000_96 = 13,
1186 IB_RNR_TIMER_001_28 = 14,
1187 IB_RNR_TIMER_001_92 = 15,
1188 IB_RNR_TIMER_002_56 = 16,
1189 IB_RNR_TIMER_003_84 = 17,
1190 IB_RNR_TIMER_005_12 = 18,
1191 IB_RNR_TIMER_007_68 = 19,
1192 IB_RNR_TIMER_010_24 = 20,
1193 IB_RNR_TIMER_015_36 = 21,
1194 IB_RNR_TIMER_020_48 = 22,
1195 IB_RNR_TIMER_030_72 = 23,
1196 IB_RNR_TIMER_040_96 = 24,
1197 IB_RNR_TIMER_061_44 = 25,
1198 IB_RNR_TIMER_081_92 = 26,
1199 IB_RNR_TIMER_122_88 = 27,
1200 IB_RNR_TIMER_163_84 = 28,
1201 IB_RNR_TIMER_245_76 = 29,
1202 IB_RNR_TIMER_327_68 = 30,
1203 IB_RNR_TIMER_491_52 = 31
1204};
1205
1206enum ib_qp_attr_mask {
1207 IB_QP_STATE = 1,
1208 IB_QP_CUR_STATE = (1<<1),
1209 IB_QP_EN_SQD_ASYNC_NOTIFY = (1<<2),
1210 IB_QP_ACCESS_FLAGS = (1<<3),
1211 IB_QP_PKEY_INDEX = (1<<4),
1212 IB_QP_PORT = (1<<5),
1213 IB_QP_QKEY = (1<<6),
1214 IB_QP_AV = (1<<7),
1215 IB_QP_PATH_MTU = (1<<8),
1216 IB_QP_TIMEOUT = (1<<9),
1217 IB_QP_RETRY_CNT = (1<<10),
1218 IB_QP_RNR_RETRY = (1<<11),
1219 IB_QP_RQ_PSN = (1<<12),
1220 IB_QP_MAX_QP_RD_ATOMIC = (1<<13),
1221 IB_QP_ALT_PATH = (1<<14),
1222 IB_QP_MIN_RNR_TIMER = (1<<15),
1223 IB_QP_SQ_PSN = (1<<16),
1224 IB_QP_MAX_DEST_RD_ATOMIC = (1<<17),
1225 IB_QP_PATH_MIG_STATE = (1<<18),
1226 IB_QP_CAP = (1<<19),
1227 IB_QP_DEST_QPN = (1<<20),
1228 IB_QP_RESERVED1 = (1<<21),
1229 IB_QP_RESERVED2 = (1<<22),
1230 IB_QP_RESERVED3 = (1<<23),
1231 IB_QP_RESERVED4 = (1<<24),
1232 IB_QP_RATE_LIMIT = (1<<25),
1233};
1234
1235enum ib_qp_state {
1236 IB_QPS_RESET,
1237 IB_QPS_INIT,
1238 IB_QPS_RTR,
1239 IB_QPS_RTS,
1240 IB_QPS_SQD,
1241 IB_QPS_SQE,
1242 IB_QPS_ERR
1243};
1244
1245enum ib_mig_state {
1246 IB_MIG_MIGRATED,
1247 IB_MIG_REARM,
1248 IB_MIG_ARMED
1249};
1250
1251enum ib_mw_type {
1252 IB_MW_TYPE_1 = 1,
1253 IB_MW_TYPE_2 = 2
1254};
1255
1256struct ib_qp_attr {
1257 enum ib_qp_state qp_state;
1258 enum ib_qp_state cur_qp_state;
1259 enum ib_mtu path_mtu;
1260 enum ib_mig_state path_mig_state;
1261 u32 qkey;
1262 u32 rq_psn;
1263 u32 sq_psn;
1264 u32 dest_qp_num;
1265 int qp_access_flags;
1266 struct ib_qp_cap cap;
1267 struct rdma_ah_attr ah_attr;
1268 struct rdma_ah_attr alt_ah_attr;
1269 u16 pkey_index;
1270 u16 alt_pkey_index;
1271 u8 en_sqd_async_notify;
1272 u8 sq_draining;
1273 u8 max_rd_atomic;
1274 u8 max_dest_rd_atomic;
1275 u8 min_rnr_timer;
1276 u8 port_num;
1277 u8 timeout;
1278 u8 retry_cnt;
1279 u8 rnr_retry;
1280 u8 alt_port_num;
1281 u8 alt_timeout;
1282 u32 rate_limit;
1283};
1284
1285enum ib_wr_opcode {
1286 IB_WR_RDMA_WRITE,
1287 IB_WR_RDMA_WRITE_WITH_IMM,
1288 IB_WR_SEND,
1289 IB_WR_SEND_WITH_IMM,
1290 IB_WR_RDMA_READ,
1291 IB_WR_ATOMIC_CMP_AND_SWP,
1292 IB_WR_ATOMIC_FETCH_AND_ADD,
1293 IB_WR_LSO,
1294 IB_WR_SEND_WITH_INV,
1295 IB_WR_RDMA_READ_WITH_INV,
1296 IB_WR_LOCAL_INV,
1297 IB_WR_REG_MR,
1298 IB_WR_MASKED_ATOMIC_CMP_AND_SWP,
1299 IB_WR_MASKED_ATOMIC_FETCH_AND_ADD,
1300 IB_WR_REG_SIG_MR,
1301
1302
1303
1304 IB_WR_RESERVED1 = 0xf0,
1305 IB_WR_RESERVED2,
1306 IB_WR_RESERVED3,
1307 IB_WR_RESERVED4,
1308 IB_WR_RESERVED5,
1309 IB_WR_RESERVED6,
1310 IB_WR_RESERVED7,
1311 IB_WR_RESERVED8,
1312 IB_WR_RESERVED9,
1313 IB_WR_RESERVED10,
1314};
1315
1316enum ib_send_flags {
1317 IB_SEND_FENCE = 1,
1318 IB_SEND_SIGNALED = (1<<1),
1319 IB_SEND_SOLICITED = (1<<2),
1320 IB_SEND_INLINE = (1<<3),
1321 IB_SEND_IP_CSUM = (1<<4),
1322
1323
1324 IB_SEND_RESERVED_START = (1 << 26),
1325 IB_SEND_RESERVED_END = (1 << 31),
1326};
1327
1328struct ib_sge {
1329 u64 addr;
1330 u32 length;
1331 u32 lkey;
1332};
1333
1334struct ib_cqe {
1335 void (*done)(struct ib_cq *cq, struct ib_wc *wc);
1336};
1337
1338struct ib_send_wr {
1339 struct ib_send_wr *next;
1340 union {
1341 u64 wr_id;
1342 struct ib_cqe *wr_cqe;
1343 };
1344 struct ib_sge *sg_list;
1345 int num_sge;
1346 enum ib_wr_opcode opcode;
1347 int send_flags;
1348 union {
1349 __be32 imm_data;
1350 u32 invalidate_rkey;
1351 } ex;
1352};
1353
1354struct ib_rdma_wr {
1355 struct ib_send_wr wr;
1356 u64 remote_addr;
1357 u32 rkey;
1358};
1359
1360static inline struct ib_rdma_wr *rdma_wr(struct ib_send_wr *wr)
1361{
1362 return container_of(wr, struct ib_rdma_wr, wr);
1363}
1364
1365struct ib_atomic_wr {
1366 struct ib_send_wr wr;
1367 u64 remote_addr;
1368 u64 compare_add;
1369 u64 swap;
1370 u64 compare_add_mask;
1371 u64 swap_mask;
1372 u32 rkey;
1373};
1374
1375static inline struct ib_atomic_wr *atomic_wr(struct ib_send_wr *wr)
1376{
1377 return container_of(wr, struct ib_atomic_wr, wr);
1378}
1379
1380struct ib_ud_wr {
1381 struct ib_send_wr wr;
1382 struct ib_ah *ah;
1383 void *header;
1384 int hlen;
1385 int mss;
1386 u32 remote_qpn;
1387 u32 remote_qkey;
1388 u16 pkey_index;
1389 u8 port_num;
1390};
1391
1392static inline struct ib_ud_wr *ud_wr(struct ib_send_wr *wr)
1393{
1394 return container_of(wr, struct ib_ud_wr, wr);
1395}
1396
1397struct ib_reg_wr {
1398 struct ib_send_wr wr;
1399 struct ib_mr *mr;
1400 u32 key;
1401 int access;
1402};
1403
1404static inline struct ib_reg_wr *reg_wr(struct ib_send_wr *wr)
1405{
1406 return container_of(wr, struct ib_reg_wr, wr);
1407}
1408
1409struct ib_sig_handover_wr {
1410 struct ib_send_wr wr;
1411 struct ib_sig_attrs *sig_attrs;
1412 struct ib_mr *sig_mr;
1413 int access_flags;
1414 struct ib_sge *prot;
1415};
1416
1417static inline struct ib_sig_handover_wr *sig_handover_wr(struct ib_send_wr *wr)
1418{
1419 return container_of(wr, struct ib_sig_handover_wr, wr);
1420}
1421
1422struct ib_recv_wr {
1423 struct ib_recv_wr *next;
1424 union {
1425 u64 wr_id;
1426 struct ib_cqe *wr_cqe;
1427 };
1428 struct ib_sge *sg_list;
1429 int num_sge;
1430};
1431
1432enum ib_access_flags {
1433 IB_ACCESS_LOCAL_WRITE = 1,
1434 IB_ACCESS_REMOTE_WRITE = (1<<1),
1435 IB_ACCESS_REMOTE_READ = (1<<2),
1436 IB_ACCESS_REMOTE_ATOMIC = (1<<3),
1437 IB_ACCESS_MW_BIND = (1<<4),
1438 IB_ZERO_BASED = (1<<5),
1439 IB_ACCESS_ON_DEMAND = (1<<6),
1440 IB_ACCESS_HUGETLB = (1<<7),
1441};
1442
1443
1444
1445
1446
1447enum ib_mr_rereg_flags {
1448 IB_MR_REREG_TRANS = 1,
1449 IB_MR_REREG_PD = (1<<1),
1450 IB_MR_REREG_ACCESS = (1<<2),
1451 IB_MR_REREG_SUPPORTED = ((IB_MR_REREG_ACCESS << 1) - 1)
1452};
1453
1454struct ib_fmr_attr {
1455 int max_pages;
1456 int max_maps;
1457 u8 page_shift;
1458};
1459
1460struct ib_umem;
1461
1462enum rdma_remove_reason {
1463
1464 RDMA_REMOVE_DESTROY,
1465
1466 RDMA_REMOVE_CLOSE,
1467
1468 RDMA_REMOVE_DRIVER_REMOVE,
1469
1470 RDMA_REMOVE_DURING_CLEANUP,
1471};
1472
1473struct ib_rdmacg_object {
1474#ifdef CONFIG_CGROUP_RDMA
1475 struct rdma_cgroup *cg;
1476#endif
1477};
1478
1479struct ib_ucontext {
1480 struct ib_device *device;
1481 struct ib_uverbs_file *ufile;
1482 int closing;
1483
1484
1485 struct mutex uobjects_lock;
1486 struct list_head uobjects;
1487
1488 struct rw_semaphore cleanup_rwsem;
1489 enum rdma_remove_reason cleanup_reason;
1490
1491 struct pid *tgid;
1492#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1493 struct rb_root_cached umem_tree;
1494
1495
1496
1497
1498 struct rw_semaphore umem_rwsem;
1499 void (*invalidate_range)(struct ib_umem *umem,
1500 unsigned long start, unsigned long end);
1501
1502 struct mmu_notifier mn;
1503 atomic_t notifier_count;
1504
1505 struct list_head no_private_counters;
1506 int odp_mrs_count;
1507#endif
1508
1509 struct ib_rdmacg_object cg_obj;
1510};
1511
1512struct ib_uobject {
1513 u64 user_handle;
1514 struct ib_ucontext *context;
1515 void *object;
1516 struct list_head list;
1517 struct ib_rdmacg_object cg_obj;
1518 int id;
1519 struct kref ref;
1520 atomic_t usecnt;
1521 struct rcu_head rcu;
1522
1523 const struct uverbs_obj_type *type;
1524};
1525
1526struct ib_uobject_file {
1527 struct ib_uobject uobj;
1528
1529 struct ib_uverbs_file *ufile;
1530};
1531
1532struct ib_udata {
1533 const void __user *inbuf;
1534 void __user *outbuf;
1535 size_t inlen;
1536 size_t outlen;
1537};
1538
1539struct ib_pd {
1540 u32 local_dma_lkey;
1541 u32 flags;
1542 struct ib_device *device;
1543 struct ib_uobject *uobject;
1544 atomic_t usecnt;
1545
1546 u32 unsafe_global_rkey;
1547
1548
1549
1550
1551 struct ib_mr *__internal_mr;
1552 struct rdma_restrack_entry res;
1553};
1554
1555struct ib_xrcd {
1556 struct ib_device *device;
1557 atomic_t usecnt;
1558 struct inode *inode;
1559
1560 struct mutex tgt_qp_mutex;
1561 struct list_head tgt_qp_list;
1562};
1563
1564struct ib_ah {
1565 struct ib_device *device;
1566 struct ib_pd *pd;
1567 struct ib_uobject *uobject;
1568 enum rdma_ah_attr_type type;
1569};
1570
1571typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context);
1572
1573enum ib_poll_context {
1574 IB_POLL_DIRECT,
1575 IB_POLL_SOFTIRQ,
1576 IB_POLL_WORKQUEUE,
1577};
1578
1579struct ib_cq {
1580 struct ib_device *device;
1581 struct ib_uobject *uobject;
1582 ib_comp_handler comp_handler;
1583 void (*event_handler)(struct ib_event *, void *);
1584 void *cq_context;
1585 int cqe;
1586 atomic_t usecnt;
1587 enum ib_poll_context poll_ctx;
1588 struct ib_wc *wc;
1589 union {
1590 struct irq_poll iop;
1591 struct work_struct work;
1592 };
1593
1594
1595
1596 struct rdma_restrack_entry res;
1597};
1598
1599struct ib_srq {
1600 struct ib_device *device;
1601 struct ib_pd *pd;
1602 struct ib_uobject *uobject;
1603 void (*event_handler)(struct ib_event *, void *);
1604 void *srq_context;
1605 enum ib_srq_type srq_type;
1606 atomic_t usecnt;
1607
1608 struct {
1609 struct ib_cq *cq;
1610 union {
1611 struct {
1612 struct ib_xrcd *xrcd;
1613 u32 srq_num;
1614 } xrc;
1615 };
1616 } ext;
1617};
1618
1619enum ib_raw_packet_caps {
1620
1621
1622
1623 IB_RAW_PACKET_CAP_CVLAN_STRIPPING = (1 << 0),
1624
1625
1626 IB_RAW_PACKET_CAP_SCATTER_FCS = (1 << 1),
1627
1628 IB_RAW_PACKET_CAP_IP_CSUM = (1 << 2),
1629
1630
1631
1632 IB_RAW_PACKET_CAP_DELAY_DROP = (1 << 3),
1633};
1634
1635enum ib_wq_type {
1636 IB_WQT_RQ
1637};
1638
1639enum ib_wq_state {
1640 IB_WQS_RESET,
1641 IB_WQS_RDY,
1642 IB_WQS_ERR
1643};
1644
1645struct ib_wq {
1646 struct ib_device *device;
1647 struct ib_uobject *uobject;
1648 void *wq_context;
1649 void (*event_handler)(struct ib_event *, void *);
1650 struct ib_pd *pd;
1651 struct ib_cq *cq;
1652 u32 wq_num;
1653 enum ib_wq_state state;
1654 enum ib_wq_type wq_type;
1655 atomic_t usecnt;
1656};
1657
1658enum ib_wq_flags {
1659 IB_WQ_FLAGS_CVLAN_STRIPPING = 1 << 0,
1660 IB_WQ_FLAGS_SCATTER_FCS = 1 << 1,
1661 IB_WQ_FLAGS_DELAY_DROP = 1 << 2,
1662 IB_WQ_FLAGS_PCI_WRITE_END_PADDING = 1 << 3,
1663};
1664
1665struct ib_wq_init_attr {
1666 void *wq_context;
1667 enum ib_wq_type wq_type;
1668 u32 max_wr;
1669 u32 max_sge;
1670 struct ib_cq *cq;
1671 void (*event_handler)(struct ib_event *, void *);
1672 u32 create_flags;
1673};
1674
1675enum ib_wq_attr_mask {
1676 IB_WQ_STATE = 1 << 0,
1677 IB_WQ_CUR_STATE = 1 << 1,
1678 IB_WQ_FLAGS = 1 << 2,
1679};
1680
1681struct ib_wq_attr {
1682 enum ib_wq_state wq_state;
1683 enum ib_wq_state curr_wq_state;
1684 u32 flags;
1685 u32 flags_mask;
1686};
1687
1688struct ib_rwq_ind_table {
1689 struct ib_device *device;
1690 struct ib_uobject *uobject;
1691 atomic_t usecnt;
1692 u32 ind_tbl_num;
1693 u32 log_ind_tbl_size;
1694 struct ib_wq **ind_tbl;
1695};
1696
1697struct ib_rwq_ind_table_init_attr {
1698 u32 log_ind_tbl_size;
1699
1700 struct ib_wq **ind_tbl;
1701};
1702
1703enum port_pkey_state {
1704 IB_PORT_PKEY_NOT_VALID = 0,
1705 IB_PORT_PKEY_VALID = 1,
1706 IB_PORT_PKEY_LISTED = 2,
1707};
1708
1709struct ib_qp_security;
1710
1711struct ib_port_pkey {
1712 enum port_pkey_state state;
1713 u16 pkey_index;
1714 u8 port_num;
1715 struct list_head qp_list;
1716 struct list_head to_error_list;
1717 struct ib_qp_security *sec;
1718};
1719
1720struct ib_ports_pkeys {
1721 struct ib_port_pkey main;
1722 struct ib_port_pkey alt;
1723};
1724
1725struct ib_qp_security {
1726 struct ib_qp *qp;
1727 struct ib_device *dev;
1728
1729 struct mutex mutex;
1730 struct ib_ports_pkeys *ports_pkeys;
1731
1732
1733
1734 struct list_head shared_qp_list;
1735 void *security;
1736 bool destroying;
1737 atomic_t error_list_count;
1738 struct completion error_complete;
1739 int error_comps_pending;
1740};
1741
1742
1743
1744
1745
1746struct ib_qp {
1747 struct ib_device *device;
1748 struct ib_pd *pd;
1749 struct ib_cq *send_cq;
1750 struct ib_cq *recv_cq;
1751 spinlock_t mr_lock;
1752 int mrs_used;
1753 struct list_head rdma_mrs;
1754 struct list_head sig_mrs;
1755 struct ib_srq *srq;
1756 struct ib_xrcd *xrcd;
1757 struct list_head xrcd_list;
1758
1759
1760 atomic_t usecnt;
1761 struct list_head open_list;
1762 struct ib_qp *real_qp;
1763 struct ib_uobject *uobject;
1764 void (*event_handler)(struct ib_event *, void *);
1765 void *qp_context;
1766 u32 qp_num;
1767 u32 max_write_sge;
1768 u32 max_read_sge;
1769 enum ib_qp_type qp_type;
1770 struct ib_rwq_ind_table *rwq_ind_tbl;
1771 struct ib_qp_security *qp_sec;
1772 u8 port;
1773
1774
1775
1776
1777 struct rdma_restrack_entry res;
1778};
1779
1780struct ib_dm {
1781 struct ib_device *device;
1782 u32 length;
1783 u32 flags;
1784 struct ib_uobject *uobject;
1785 atomic_t usecnt;
1786};
1787
1788struct ib_mr {
1789 struct ib_device *device;
1790 struct ib_pd *pd;
1791 u32 lkey;
1792 u32 rkey;
1793 u64 iova;
1794 u64 length;
1795 unsigned int page_size;
1796 bool need_inval;
1797 union {
1798 struct ib_uobject *uobject;
1799 struct list_head qp_entry;
1800 };
1801
1802 struct ib_dm *dm;
1803
1804
1805
1806
1807 struct rdma_restrack_entry res;
1808};
1809
1810struct ib_mw {
1811 struct ib_device *device;
1812 struct ib_pd *pd;
1813 struct ib_uobject *uobject;
1814 u32 rkey;
1815 enum ib_mw_type type;
1816};
1817
1818struct ib_fmr {
1819 struct ib_device *device;
1820 struct ib_pd *pd;
1821 struct list_head list;
1822 u32 lkey;
1823 u32 rkey;
1824};
1825
1826
1827enum ib_flow_attr_type {
1828
1829 IB_FLOW_ATTR_NORMAL = 0x0,
1830
1831
1832
1833 IB_FLOW_ATTR_ALL_DEFAULT = 0x1,
1834
1835
1836
1837 IB_FLOW_ATTR_MC_DEFAULT = 0x2,
1838
1839 IB_FLOW_ATTR_SNIFFER = 0x3
1840};
1841
1842
1843enum ib_flow_spec_type {
1844
1845 IB_FLOW_SPEC_ETH = 0x20,
1846 IB_FLOW_SPEC_IB = 0x22,
1847
1848 IB_FLOW_SPEC_IPV4 = 0x30,
1849 IB_FLOW_SPEC_IPV6 = 0x31,
1850 IB_FLOW_SPEC_ESP = 0x34,
1851
1852 IB_FLOW_SPEC_TCP = 0x40,
1853 IB_FLOW_SPEC_UDP = 0x41,
1854 IB_FLOW_SPEC_VXLAN_TUNNEL = 0x50,
1855 IB_FLOW_SPEC_INNER = 0x100,
1856
1857 IB_FLOW_SPEC_ACTION_TAG = 0x1000,
1858 IB_FLOW_SPEC_ACTION_DROP = 0x1001,
1859 IB_FLOW_SPEC_ACTION_HANDLE = 0x1002,
1860};
1861#define IB_FLOW_SPEC_LAYER_MASK 0xF0
1862#define IB_FLOW_SPEC_SUPPORT_LAYERS 8
1863
1864
1865
1866
1867enum ib_flow_domain {
1868 IB_FLOW_DOMAIN_USER,
1869 IB_FLOW_DOMAIN_ETHTOOL,
1870 IB_FLOW_DOMAIN_RFS,
1871 IB_FLOW_DOMAIN_NIC,
1872 IB_FLOW_DOMAIN_NUM
1873};
1874
1875enum ib_flow_flags {
1876 IB_FLOW_ATTR_FLAGS_DONT_TRAP = 1UL << 1,
1877 IB_FLOW_ATTR_FLAGS_EGRESS = 1UL << 2,
1878 IB_FLOW_ATTR_FLAGS_RESERVED = 1UL << 3
1879};
1880
1881struct ib_flow_eth_filter {
1882 u8 dst_mac[6];
1883 u8 src_mac[6];
1884 __be16 ether_type;
1885 __be16 vlan_tag;
1886
1887 u8 real_sz[0];
1888};
1889
1890struct ib_flow_spec_eth {
1891 u32 type;
1892 u16 size;
1893 struct ib_flow_eth_filter val;
1894 struct ib_flow_eth_filter mask;
1895};
1896
1897struct ib_flow_ib_filter {
1898 __be16 dlid;
1899 __u8 sl;
1900
1901 u8 real_sz[0];
1902};
1903
1904struct ib_flow_spec_ib {
1905 u32 type;
1906 u16 size;
1907 struct ib_flow_ib_filter val;
1908 struct ib_flow_ib_filter mask;
1909};
1910
1911
1912enum ib_ipv4_flags {
1913 IB_IPV4_DONT_FRAG = 0x2,
1914 IB_IPV4_MORE_FRAG = 0X4
1915
1916};
1917
1918struct ib_flow_ipv4_filter {
1919 __be32 src_ip;
1920 __be32 dst_ip;
1921 u8 proto;
1922 u8 tos;
1923 u8 ttl;
1924 u8 flags;
1925
1926 u8 real_sz[0];
1927};
1928
1929struct ib_flow_spec_ipv4 {
1930 u32 type;
1931 u16 size;
1932 struct ib_flow_ipv4_filter val;
1933 struct ib_flow_ipv4_filter mask;
1934};
1935
1936struct ib_flow_ipv6_filter {
1937 u8 src_ip[16];
1938 u8 dst_ip[16];
1939 __be32 flow_label;
1940 u8 next_hdr;
1941 u8 traffic_class;
1942 u8 hop_limit;
1943
1944 u8 real_sz[0];
1945};
1946
1947struct ib_flow_spec_ipv6 {
1948 u32 type;
1949 u16 size;
1950 struct ib_flow_ipv6_filter val;
1951 struct ib_flow_ipv6_filter mask;
1952};
1953
1954struct ib_flow_tcp_udp_filter {
1955 __be16 dst_port;
1956 __be16 src_port;
1957
1958 u8 real_sz[0];
1959};
1960
1961struct ib_flow_spec_tcp_udp {
1962 u32 type;
1963 u16 size;
1964 struct ib_flow_tcp_udp_filter val;
1965 struct ib_flow_tcp_udp_filter mask;
1966};
1967
1968struct ib_flow_tunnel_filter {
1969 __be32 tunnel_id;
1970 u8 real_sz[0];
1971};
1972
1973
1974
1975
1976struct ib_flow_spec_tunnel {
1977 u32 type;
1978 u16 size;
1979 struct ib_flow_tunnel_filter val;
1980 struct ib_flow_tunnel_filter mask;
1981};
1982
1983struct ib_flow_esp_filter {
1984 __be32 spi;
1985 __be32 seq;
1986
1987 u8 real_sz[0];
1988};
1989
1990struct ib_flow_spec_esp {
1991 u32 type;
1992 u16 size;
1993 struct ib_flow_esp_filter val;
1994 struct ib_flow_esp_filter mask;
1995};
1996
1997struct ib_flow_spec_action_tag {
1998 enum ib_flow_spec_type type;
1999 u16 size;
2000 u32 tag_id;
2001};
2002
2003struct ib_flow_spec_action_drop {
2004 enum ib_flow_spec_type type;
2005 u16 size;
2006};
2007
2008struct ib_flow_spec_action_handle {
2009 enum ib_flow_spec_type type;
2010 u16 size;
2011 struct ib_flow_action *act;
2012};
2013
2014union ib_flow_spec {
2015 struct {
2016 u32 type;
2017 u16 size;
2018 };
2019 struct ib_flow_spec_eth eth;
2020 struct ib_flow_spec_ib ib;
2021 struct ib_flow_spec_ipv4 ipv4;
2022 struct ib_flow_spec_tcp_udp tcp_udp;
2023 struct ib_flow_spec_ipv6 ipv6;
2024 struct ib_flow_spec_tunnel tunnel;
2025 struct ib_flow_spec_esp esp;
2026 struct ib_flow_spec_action_tag flow_tag;
2027 struct ib_flow_spec_action_drop drop;
2028 struct ib_flow_spec_action_handle action;
2029};
2030
2031struct ib_flow_attr {
2032 enum ib_flow_attr_type type;
2033 u16 size;
2034 u16 priority;
2035 u32 flags;
2036 u8 num_of_specs;
2037 u8 port;
2038
2039
2040
2041
2042};
2043
2044struct ib_flow {
2045 struct ib_qp *qp;
2046 struct ib_uobject *uobject;
2047};
2048
2049enum ib_flow_action_type {
2050 IB_FLOW_ACTION_UNSPECIFIED,
2051 IB_FLOW_ACTION_ESP = 1,
2052};
2053
2054struct ib_flow_action_attrs_esp_keymats {
2055 enum ib_uverbs_flow_action_esp_keymat protocol;
2056 union {
2057 struct ib_uverbs_flow_action_esp_keymat_aes_gcm aes_gcm;
2058 } keymat;
2059};
2060
2061struct ib_flow_action_attrs_esp_replays {
2062 enum ib_uverbs_flow_action_esp_replay protocol;
2063 union {
2064 struct ib_uverbs_flow_action_esp_replay_bmp bmp;
2065 } replay;
2066};
2067
2068enum ib_flow_action_attrs_esp_flags {
2069
2070
2071
2072
2073
2074
2075 IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED = 1ULL << 32,
2076 IB_FLOW_ACTION_ESP_FLAGS_MOD_ESP_ATTRS = 1ULL << 33,
2077};
2078
2079struct ib_flow_spec_list {
2080 struct ib_flow_spec_list *next;
2081 union ib_flow_spec spec;
2082};
2083
2084struct ib_flow_action_attrs_esp {
2085 struct ib_flow_action_attrs_esp_keymats *keymat;
2086 struct ib_flow_action_attrs_esp_replays *replay;
2087 struct ib_flow_spec_list *encap;
2088
2089
2090
2091 u32 esn;
2092 u32 spi;
2093 u32 seq;
2094 u32 tfc_pad;
2095
2096 u64 flags;
2097 u64 hard_limit_pkts;
2098};
2099
2100struct ib_flow_action {
2101 struct ib_device *device;
2102 struct ib_uobject *uobject;
2103 enum ib_flow_action_type type;
2104 atomic_t usecnt;
2105};
2106
2107struct ib_mad_hdr;
2108struct ib_grh;
2109
2110enum ib_process_mad_flags {
2111 IB_MAD_IGNORE_MKEY = 1,
2112 IB_MAD_IGNORE_BKEY = 2,
2113 IB_MAD_IGNORE_ALL = IB_MAD_IGNORE_MKEY | IB_MAD_IGNORE_BKEY
2114};
2115
2116enum ib_mad_result {
2117 IB_MAD_RESULT_FAILURE = 0,
2118 IB_MAD_RESULT_SUCCESS = 1 << 0,
2119 IB_MAD_RESULT_REPLY = 1 << 1,
2120 IB_MAD_RESULT_CONSUMED = 1 << 2
2121};
2122
2123struct ib_port_cache {
2124 u64 subnet_prefix;
2125 struct ib_pkey_cache *pkey;
2126 struct ib_gid_table *gid;
2127 u8 lmc;
2128 enum ib_port_state port_state;
2129};
2130
2131struct ib_cache {
2132 rwlock_t lock;
2133 struct ib_event_handler event_handler;
2134 struct ib_port_cache *ports;
2135};
2136
2137struct iw_cm_verbs;
2138
2139struct ib_port_immutable {
2140 int pkey_tbl_len;
2141 int gid_tbl_len;
2142 u32 core_cap_flags;
2143 u32 max_mad_size;
2144};
2145
2146
2147enum rdma_netdev_t {
2148 RDMA_NETDEV_OPA_VNIC,
2149 RDMA_NETDEV_IPOIB,
2150};
2151
2152
2153
2154
2155
2156struct rdma_netdev {
2157 void *clnt_priv;
2158 struct ib_device *hca;
2159 u8 port_num;
2160
2161
2162 void (*free_rdma_netdev)(struct net_device *netdev);
2163
2164
2165 void (*set_id)(struct net_device *netdev, int id);
2166
2167 int (*send)(struct net_device *dev, struct sk_buff *skb,
2168 struct ib_ah *address, u32 dqpn);
2169
2170 int (*attach_mcast)(struct net_device *dev, struct ib_device *hca,
2171 union ib_gid *gid, u16 mlid,
2172 int set_qkey, u32 qkey);
2173 int (*detach_mcast)(struct net_device *dev, struct ib_device *hca,
2174 union ib_gid *gid, u16 mlid);
2175};
2176
2177struct ib_port_pkey_list {
2178
2179 spinlock_t list_lock;
2180 struct list_head pkey_list;
2181};
2182
2183struct uverbs_attr_bundle;
2184
2185struct ib_device {
2186
2187 struct device *dma_device;
2188
2189 char name[IB_DEVICE_NAME_MAX];
2190
2191 struct list_head event_handler_list;
2192 spinlock_t event_handler_lock;
2193
2194 spinlock_t client_data_lock;
2195 struct list_head core_list;
2196
2197
2198 struct list_head client_data_list;
2199
2200 struct ib_cache cache;
2201
2202
2203
2204 struct ib_port_immutable *port_immutable;
2205
2206 int num_comp_vectors;
2207
2208 struct ib_port_pkey_list *port_pkey_list;
2209
2210 struct iw_cm_verbs *iwcm;
2211
2212
2213
2214
2215
2216
2217
2218 struct rdma_hw_stats *(*alloc_hw_stats)(struct ib_device *device,
2219 u8 port_num);
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232 int (*get_hw_stats)(struct ib_device *device,
2233 struct rdma_hw_stats *stats,
2234 u8 port, int index);
2235 int (*query_device)(struct ib_device *device,
2236 struct ib_device_attr *device_attr,
2237 struct ib_udata *udata);
2238 int (*query_port)(struct ib_device *device,
2239 u8 port_num,
2240 struct ib_port_attr *port_attr);
2241 enum rdma_link_layer (*get_link_layer)(struct ib_device *device,
2242 u8 port_num);
2243
2244
2245
2246
2247
2248
2249
2250 struct net_device *(*get_netdev)(struct ib_device *device,
2251 u8 port_num);
2252
2253
2254
2255
2256 int (*query_gid)(struct ib_device *device,
2257 u8 port_num, int index,
2258 union ib_gid *gid);
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271 int (*add_gid)(const union ib_gid *gid,
2272 const struct ib_gid_attr *attr,
2273 void **context);
2274
2275
2276
2277
2278
2279
2280
2281 int (*del_gid)(const struct ib_gid_attr *attr,
2282 void **context);
2283 int (*query_pkey)(struct ib_device *device,
2284 u8 port_num, u16 index, u16 *pkey);
2285 int (*modify_device)(struct ib_device *device,
2286 int device_modify_mask,
2287 struct ib_device_modify *device_modify);
2288 int (*modify_port)(struct ib_device *device,
2289 u8 port_num, int port_modify_mask,
2290 struct ib_port_modify *port_modify);
2291 struct ib_ucontext * (*alloc_ucontext)(struct ib_device *device,
2292 struct ib_udata *udata);
2293 int (*dealloc_ucontext)(struct ib_ucontext *context);
2294 int (*mmap)(struct ib_ucontext *context,
2295 struct vm_area_struct *vma);
2296 struct ib_pd * (*alloc_pd)(struct ib_device *device,
2297 struct ib_ucontext *context,
2298 struct ib_udata *udata);
2299 int (*dealloc_pd)(struct ib_pd *pd);
2300 struct ib_ah * (*create_ah)(struct ib_pd *pd,
2301 struct rdma_ah_attr *ah_attr,
2302 struct ib_udata *udata);
2303 int (*modify_ah)(struct ib_ah *ah,
2304 struct rdma_ah_attr *ah_attr);
2305 int (*query_ah)(struct ib_ah *ah,
2306 struct rdma_ah_attr *ah_attr);
2307 int (*destroy_ah)(struct ib_ah *ah);
2308 struct ib_srq * (*create_srq)(struct ib_pd *pd,
2309 struct ib_srq_init_attr *srq_init_attr,
2310 struct ib_udata *udata);
2311 int (*modify_srq)(struct ib_srq *srq,
2312 struct ib_srq_attr *srq_attr,
2313 enum ib_srq_attr_mask srq_attr_mask,
2314 struct ib_udata *udata);
2315 int (*query_srq)(struct ib_srq *srq,
2316 struct ib_srq_attr *srq_attr);
2317 int (*destroy_srq)(struct ib_srq *srq);
2318 int (*post_srq_recv)(struct ib_srq *srq,
2319 struct ib_recv_wr *recv_wr,
2320 struct ib_recv_wr **bad_recv_wr);
2321 struct ib_qp * (*create_qp)(struct ib_pd *pd,
2322 struct ib_qp_init_attr *qp_init_attr,
2323 struct ib_udata *udata);
2324 int (*modify_qp)(struct ib_qp *qp,
2325 struct ib_qp_attr *qp_attr,
2326 int qp_attr_mask,
2327 struct ib_udata *udata);
2328 int (*query_qp)(struct ib_qp *qp,
2329 struct ib_qp_attr *qp_attr,
2330 int qp_attr_mask,
2331 struct ib_qp_init_attr *qp_init_attr);
2332 int (*destroy_qp)(struct ib_qp *qp);
2333 int (*post_send)(struct ib_qp *qp,
2334 struct ib_send_wr *send_wr,
2335 struct ib_send_wr **bad_send_wr);
2336 int (*post_recv)(struct ib_qp *qp,
2337 struct ib_recv_wr *recv_wr,
2338 struct ib_recv_wr **bad_recv_wr);
2339 struct ib_cq * (*create_cq)(struct ib_device *device,
2340 const struct ib_cq_init_attr *attr,
2341 struct ib_ucontext *context,
2342 struct ib_udata *udata);
2343 int (*modify_cq)(struct ib_cq *cq, u16 cq_count,
2344 u16 cq_period);
2345 int (*destroy_cq)(struct ib_cq *cq);
2346 int (*resize_cq)(struct ib_cq *cq, int cqe,
2347 struct ib_udata *udata);
2348 int (*poll_cq)(struct ib_cq *cq, int num_entries,
2349 struct ib_wc *wc);
2350 int (*peek_cq)(struct ib_cq *cq, int wc_cnt);
2351 int (*req_notify_cq)(struct ib_cq *cq,
2352 enum ib_cq_notify_flags flags);
2353 int (*req_ncomp_notif)(struct ib_cq *cq,
2354 int wc_cnt);
2355 struct ib_mr * (*get_dma_mr)(struct ib_pd *pd,
2356 int mr_access_flags);
2357 struct ib_mr * (*reg_user_mr)(struct ib_pd *pd,
2358 u64 start, u64 length,
2359 u64 virt_addr,
2360 int mr_access_flags,
2361 struct ib_udata *udata);
2362 int (*rereg_user_mr)(struct ib_mr *mr,
2363 int flags,
2364 u64 start, u64 length,
2365 u64 virt_addr,
2366 int mr_access_flags,
2367 struct ib_pd *pd,
2368 struct ib_udata *udata);
2369 int (*dereg_mr)(struct ib_mr *mr);
2370 struct ib_mr * (*alloc_mr)(struct ib_pd *pd,
2371 enum ib_mr_type mr_type,
2372 u32 max_num_sg);
2373 int (*map_mr_sg)(struct ib_mr *mr,
2374 struct scatterlist *sg,
2375 int sg_nents,
2376 unsigned int *sg_offset);
2377 struct ib_mw * (*alloc_mw)(struct ib_pd *pd,
2378 enum ib_mw_type type,
2379 struct ib_udata *udata);
2380 int (*dealloc_mw)(struct ib_mw *mw);
2381 struct ib_fmr * (*alloc_fmr)(struct ib_pd *pd,
2382 int mr_access_flags,
2383 struct ib_fmr_attr *fmr_attr);
2384 int (*map_phys_fmr)(struct ib_fmr *fmr,
2385 u64 *page_list, int list_len,
2386 u64 iova);
2387 int (*unmap_fmr)(struct list_head *fmr_list);
2388 int (*dealloc_fmr)(struct ib_fmr *fmr);
2389 int (*attach_mcast)(struct ib_qp *qp,
2390 union ib_gid *gid,
2391 u16 lid);
2392 int (*detach_mcast)(struct ib_qp *qp,
2393 union ib_gid *gid,
2394 u16 lid);
2395 int (*process_mad)(struct ib_device *device,
2396 int process_mad_flags,
2397 u8 port_num,
2398 const struct ib_wc *in_wc,
2399 const struct ib_grh *in_grh,
2400 const struct ib_mad_hdr *in_mad,
2401 size_t in_mad_size,
2402 struct ib_mad_hdr *out_mad,
2403 size_t *out_mad_size,
2404 u16 *out_mad_pkey_index);
2405 struct ib_xrcd * (*alloc_xrcd)(struct ib_device *device,
2406 struct ib_ucontext *ucontext,
2407 struct ib_udata *udata);
2408 int (*dealloc_xrcd)(struct ib_xrcd *xrcd);
2409 struct ib_flow * (*create_flow)(struct ib_qp *qp,
2410 struct ib_flow_attr
2411 *flow_attr,
2412 int domain);
2413 int (*destroy_flow)(struct ib_flow *flow_id);
2414 int (*check_mr_status)(struct ib_mr *mr, u32 check_mask,
2415 struct ib_mr_status *mr_status);
2416 void (*disassociate_ucontext)(struct ib_ucontext *ibcontext);
2417 void (*drain_rq)(struct ib_qp *qp);
2418 void (*drain_sq)(struct ib_qp *qp);
2419 int (*set_vf_link_state)(struct ib_device *device, int vf, u8 port,
2420 int state);
2421 int (*get_vf_config)(struct ib_device *device, int vf, u8 port,
2422 struct ifla_vf_info *ivf);
2423 int (*get_vf_stats)(struct ib_device *device, int vf, u8 port,
2424 struct ifla_vf_stats *stats);
2425 int (*set_vf_guid)(struct ib_device *device, int vf, u8 port, u64 guid,
2426 int type);
2427 struct ib_wq * (*create_wq)(struct ib_pd *pd,
2428 struct ib_wq_init_attr *init_attr,
2429 struct ib_udata *udata);
2430 int (*destroy_wq)(struct ib_wq *wq);
2431 int (*modify_wq)(struct ib_wq *wq,
2432 struct ib_wq_attr *attr,
2433 u32 wq_attr_mask,
2434 struct ib_udata *udata);
2435 struct ib_rwq_ind_table * (*create_rwq_ind_table)(struct ib_device *device,
2436 struct ib_rwq_ind_table_init_attr *init_attr,
2437 struct ib_udata *udata);
2438 int (*destroy_rwq_ind_table)(struct ib_rwq_ind_table *wq_ind_table);
2439 struct ib_flow_action * (*create_flow_action_esp)(struct ib_device *device,
2440 const struct ib_flow_action_attrs_esp *attr,
2441 struct uverbs_attr_bundle *attrs);
2442 int (*destroy_flow_action)(struct ib_flow_action *action);
2443 int (*modify_flow_action_esp)(struct ib_flow_action *action,
2444 const struct ib_flow_action_attrs_esp *attr,
2445 struct uverbs_attr_bundle *attrs);
2446 struct ib_dm * (*alloc_dm)(struct ib_device *device,
2447 struct ib_ucontext *context,
2448 struct ib_dm_alloc_attr *attr,
2449 struct uverbs_attr_bundle *attrs);
2450 int (*dealloc_dm)(struct ib_dm *dm);
2451 struct ib_mr * (*reg_dm_mr)(struct ib_pd *pd, struct ib_dm *dm,
2452 struct ib_dm_mr_attr *attr,
2453 struct uverbs_attr_bundle *attrs);
2454
2455
2456
2457
2458
2459
2460 struct net_device *(*alloc_rdma_netdev)(
2461 struct ib_device *device,
2462 u8 port_num,
2463 enum rdma_netdev_t type,
2464 const char *name,
2465 unsigned char name_assign_type,
2466 void (*setup)(struct net_device *));
2467
2468 struct module *owner;
2469 struct device dev;
2470 struct kobject *ports_parent;
2471 struct list_head port_list;
2472
2473 enum {
2474 IB_DEV_UNINITIALIZED,
2475 IB_DEV_REGISTERED,
2476 IB_DEV_UNREGISTERED
2477 } reg_state;
2478
2479 int uverbs_abi_ver;
2480 u64 uverbs_cmd_mask;
2481 u64 uverbs_ex_cmd_mask;
2482
2483 char node_desc[IB_DEVICE_NODE_DESC_MAX];
2484 __be64 node_guid;
2485 u32 local_dma_lkey;
2486 u16 is_switch:1;
2487 u8 node_type;
2488 u8 phys_port_cnt;
2489 struct ib_device_attr attrs;
2490 struct attribute_group *hw_stats_ag;
2491 struct rdma_hw_stats *hw_stats;
2492
2493#ifdef CONFIG_CGROUP_RDMA
2494 struct rdmacg_device cg_device;
2495#endif
2496
2497 u32 index;
2498
2499
2500
2501 struct rdma_restrack_root res;
2502
2503
2504
2505
2506
2507
2508
2509 int (*get_port_immutable)(struct ib_device *, u8, struct ib_port_immutable *);
2510 void (*get_dev_fw_str)(struct ib_device *, char *str);
2511 const struct cpumask *(*get_vector_affinity)(struct ib_device *ibdev,
2512 int comp_vector);
2513
2514 struct uverbs_root_spec *specs_root;
2515 enum rdma_driver_id driver_id;
2516};
2517
2518struct ib_client {
2519 char *name;
2520 void (*add) (struct ib_device *);
2521 void (*remove)(struct ib_device *, void *client_data);
2522
2523
2524
2525
2526
2527
2528
2529
2530
2531
2532
2533
2534
2535
2536
2537
2538 struct net_device *(*get_net_dev_by_params)(
2539 struct ib_device *dev,
2540 u8 port,
2541 u16 pkey,
2542 const union ib_gid *gid,
2543 const struct sockaddr *addr,
2544 void *client_data);
2545 struct list_head list;
2546};
2547
2548struct ib_device *ib_alloc_device(size_t size);
2549void ib_dealloc_device(struct ib_device *device);
2550
2551void ib_get_device_fw_str(struct ib_device *device, char *str);
2552
2553int ib_register_device(struct ib_device *device,
2554 int (*port_callback)(struct ib_device *,
2555 u8, struct kobject *));
2556void ib_unregister_device(struct ib_device *device);
2557
2558int ib_register_client (struct ib_client *client);
2559void ib_unregister_client(struct ib_client *client);
2560
2561void *ib_get_client_data(struct ib_device *device, struct ib_client *client);
2562void ib_set_client_data(struct ib_device *device, struct ib_client *client,
2563 void *data);
2564
2565static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len)
2566{
2567 return copy_from_user(dest, udata->inbuf, len) ? -EFAULT : 0;
2568}
2569
2570static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len)
2571{
2572 return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0;
2573}
2574
2575static inline bool ib_is_buffer_cleared(const void __user *p,
2576 size_t len)
2577{
2578 bool ret;
2579 u8 *buf;
2580
2581 if (len > USHRT_MAX)
2582 return false;
2583
2584 buf = memdup_user(p, len);
2585 if (IS_ERR(buf))
2586 return false;
2587
2588 ret = !memchr_inv(buf, 0, len);
2589 kfree(buf);
2590 return ret;
2591}
2592
2593static inline bool ib_is_udata_cleared(struct ib_udata *udata,
2594 size_t offset,
2595 size_t len)
2596{
2597 return ib_is_buffer_cleared(udata->inbuf + offset, len);
2598}
2599
2600
2601
2602
2603
2604
2605
2606
2607
2608
2609
2610
2611
2612
2613
2614
2615
2616bool ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
2617 enum ib_qp_type type, enum ib_qp_attr_mask mask,
2618 enum rdma_link_layer ll);
2619
2620void ib_register_event_handler(struct ib_event_handler *event_handler);
2621void ib_unregister_event_handler(struct ib_event_handler *event_handler);
2622void ib_dispatch_event(struct ib_event *event);
2623
2624int ib_query_port(struct ib_device *device,
2625 u8 port_num, struct ib_port_attr *port_attr);
2626
2627enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device,
2628 u8 port_num);
2629
2630
2631
2632
2633
2634
2635
2636
2637
2638
2639static inline bool rdma_cap_ib_switch(const struct ib_device *device)
2640{
2641 return device->is_switch;
2642}
2643
2644
2645
2646
2647
2648
2649
2650
2651
2652static inline u8 rdma_start_port(const struct ib_device *device)
2653{
2654 return rdma_cap_ib_switch(device) ? 0 : 1;
2655}
2656
2657
2658
2659
2660
2661
2662
2663
2664
2665static inline u8 rdma_end_port(const struct ib_device *device)
2666{
2667 return rdma_cap_ib_switch(device) ? 0 : device->phys_port_cnt;
2668}
2669
2670static inline int rdma_is_port_valid(const struct ib_device *device,
2671 unsigned int port)
2672{
2673 return (port >= rdma_start_port(device) &&
2674 port <= rdma_end_port(device));
2675}
2676
2677static inline bool rdma_protocol_ib(const struct ib_device *device, u8 port_num)
2678{
2679 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IB;
2680}
2681
2682static inline bool rdma_protocol_roce(const struct ib_device *device, u8 port_num)
2683{
2684 return device->port_immutable[port_num].core_cap_flags &
2685 (RDMA_CORE_CAP_PROT_ROCE | RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP);
2686}
2687
2688static inline bool rdma_protocol_roce_udp_encap(const struct ib_device *device, u8 port_num)
2689{
2690 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP;
2691}
2692
2693static inline bool rdma_protocol_roce_eth_encap(const struct ib_device *device, u8 port_num)
2694{
2695 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_ROCE;
2696}
2697
2698static inline bool rdma_protocol_iwarp(const struct ib_device *device, u8 port_num)
2699{
2700 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IWARP;
2701}
2702
2703static inline bool rdma_ib_or_roce(const struct ib_device *device, u8 port_num)
2704{
2705 return rdma_protocol_ib(device, port_num) ||
2706 rdma_protocol_roce(device, port_num);
2707}
2708
2709static inline bool rdma_protocol_raw_packet(const struct ib_device *device, u8 port_num)
2710{
2711 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_RAW_PACKET;
2712}
2713
2714static inline bool rdma_protocol_usnic(const struct ib_device *device, u8 port_num)
2715{
2716 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_USNIC;
2717}
2718
2719
2720
2721
2722
2723
2724
2725
2726
2727
2728
2729
2730
2731static inline bool rdma_cap_ib_mad(const struct ib_device *device, u8 port_num)
2732{
2733 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_MAD;
2734}
2735
2736
2737
2738
2739
2740
2741
2742
2743
2744
2745
2746
2747
2748
2749
2750
2751
2752
2753
2754
2755static inline bool rdma_cap_opa_mad(struct ib_device *device, u8 port_num)
2756{
2757 return (device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_OPA_MAD)
2758 == RDMA_CORE_CAP_OPA_MAD;
2759}
2760
2761
2762
2763
2764
2765
2766
2767
2768
2769
2770
2771
2772
2773
2774
2775
2776
2777
2778
2779
2780
2781static inline bool rdma_cap_ib_smi(const struct ib_device *device, u8 port_num)
2782{
2783 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_SMI;
2784}
2785
2786
2787
2788
2789
2790
2791
2792
2793
2794
2795
2796
2797
2798
2799
2800
2801static inline bool rdma_cap_ib_cm(const struct ib_device *device, u8 port_num)
2802{
2803 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_CM;
2804}
2805
2806
2807
2808
2809
2810
2811
2812
2813
2814
2815
2816
2817
2818static inline bool rdma_cap_iw_cm(const struct ib_device *device, u8 port_num)
2819{
2820 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IW_CM;
2821}
2822
2823
2824
2825
2826
2827
2828
2829
2830
2831
2832
2833
2834
2835
2836
2837
2838static inline bool rdma_cap_ib_sa(const struct ib_device *device, u8 port_num)
2839{
2840 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_SA;
2841}
2842
2843
2844
2845
2846
2847
2848
2849
2850
2851
2852
2853
2854
2855
2856
2857
2858
2859
2860static inline bool rdma_cap_ib_mcast(const struct ib_device *device, u8 port_num)
2861{
2862 return rdma_cap_ib_sa(device, port_num);
2863}
2864
2865
2866
2867
2868
2869
2870
2871
2872
2873
2874
2875
2876
2877
2878static inline bool rdma_cap_af_ib(const struct ib_device *device, u8 port_num)
2879{
2880 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_AF_IB;
2881}
2882
2883
2884
2885
2886
2887
2888
2889
2890
2891
2892
2893
2894
2895
2896
2897
2898
2899static inline bool rdma_cap_eth_ah(const struct ib_device *device, u8 port_num)
2900{
2901 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_ETH_AH;
2902}
2903
2904
2905
2906
2907
2908
2909
2910
2911
2912
2913static inline bool rdma_cap_opa_ah(struct ib_device *device, u8 port_num)
2914{
2915 return (device->port_immutable[port_num].core_cap_flags &
2916 RDMA_CORE_CAP_OPA_AH) == RDMA_CORE_CAP_OPA_AH;
2917}
2918
2919
2920
2921
2922
2923
2924
2925
2926
2927
2928
2929
2930
2931static inline size_t rdma_max_mad_size(const struct ib_device *device, u8 port_num)
2932{
2933 return device->port_immutable[port_num].max_mad_size;
2934}
2935
2936
2937
2938
2939
2940
2941
2942
2943
2944
2945
2946
2947
2948
2949static inline bool rdma_cap_roce_gid_table(const struct ib_device *device,
2950 u8 port_num)
2951{
2952 return rdma_protocol_roce(device, port_num) &&
2953 device->add_gid && device->del_gid;
2954}
2955
2956
2957
2958
2959static inline bool rdma_cap_read_inv(struct ib_device *dev, u32 port_num)
2960{
2961
2962
2963
2964
2965 return rdma_protocol_iwarp(dev, port_num);
2966}
2967
2968int ib_query_gid(struct ib_device *device,
2969 u8 port_num, int index, union ib_gid *gid,
2970 struct ib_gid_attr *attr);
2971
2972int ib_set_vf_link_state(struct ib_device *device, int vf, u8 port,
2973 int state);
2974int ib_get_vf_config(struct ib_device *device, int vf, u8 port,
2975 struct ifla_vf_info *info);
2976int ib_get_vf_stats(struct ib_device *device, int vf, u8 port,
2977 struct ifla_vf_stats *stats);
2978int ib_set_vf_guid(struct ib_device *device, int vf, u8 port, u64 guid,
2979 int type);
2980
2981int ib_query_pkey(struct ib_device *device,
2982 u8 port_num, u16 index, u16 *pkey);
2983
2984int ib_modify_device(struct ib_device *device,
2985 int device_modify_mask,
2986 struct ib_device_modify *device_modify);
2987
2988int ib_modify_port(struct ib_device *device,
2989 u8 port_num, int port_modify_mask,
2990 struct ib_port_modify *port_modify);
2991
2992int ib_find_gid(struct ib_device *device, union ib_gid *gid,
2993 u8 *port_num, u16 *index);
2994
2995int ib_find_pkey(struct ib_device *device,
2996 u8 port_num, u16 pkey, u16 *index);
2997
2998enum ib_pd_flags {
2999
3000
3001
3002
3003
3004
3005
3006
3007
3008 IB_PD_UNSAFE_GLOBAL_RKEY = 0x01,
3009};
3010
3011struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags,
3012 const char *caller);
3013#define ib_alloc_pd(device, flags) \
3014 __ib_alloc_pd((device), (flags), KBUILD_MODNAME)
3015void ib_dealloc_pd(struct ib_pd *pd);
3016
3017
3018
3019
3020
3021
3022
3023
3024
3025struct ib_ah *rdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr);
3026
3027
3028
3029
3030
3031
3032
3033
3034
3035
3036
3037
3038
3039struct ib_ah *rdma_create_user_ah(struct ib_pd *pd,
3040 struct rdma_ah_attr *ah_attr,
3041 struct ib_udata *udata);
3042
3043
3044
3045
3046
3047
3048
3049
3050int ib_get_gids_from_rdma_hdr(const union rdma_network_hdr *hdr,
3051 enum rdma_network_type net_type,
3052 union ib_gid *sgid, union ib_gid *dgid);
3053
3054
3055
3056
3057
3058int ib_get_rdma_header_version(const union rdma_network_hdr *hdr);
3059
3060
3061
3062
3063
3064
3065
3066
3067
3068
3069
3070
3071int ib_init_ah_attr_from_wc(struct ib_device *device, u8 port_num,
3072 const struct ib_wc *wc, const struct ib_grh *grh,
3073 struct rdma_ah_attr *ah_attr);
3074
3075
3076
3077
3078
3079
3080
3081
3082
3083
3084
3085
3086
3087struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc,
3088 const struct ib_grh *grh, u8 port_num);
3089
3090
3091
3092
3093
3094
3095
3096
3097int rdma_modify_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
3098
3099
3100
3101
3102
3103
3104
3105
3106int rdma_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
3107
3108
3109
3110
3111
3112int rdma_destroy_ah(struct ib_ah *ah);
3113
3114
3115
3116
3117
3118
3119
3120
3121
3122
3123
3124
3125
3126
3127struct ib_srq *ib_create_srq(struct ib_pd *pd,
3128 struct ib_srq_init_attr *srq_init_attr);
3129
3130
3131
3132
3133
3134
3135
3136
3137
3138
3139
3140
3141
3142int ib_modify_srq(struct ib_srq *srq,
3143 struct ib_srq_attr *srq_attr,
3144 enum ib_srq_attr_mask srq_attr_mask);
3145
3146
3147
3148
3149
3150
3151
3152int ib_query_srq(struct ib_srq *srq,
3153 struct ib_srq_attr *srq_attr);
3154
3155
3156
3157
3158
3159int ib_destroy_srq(struct ib_srq *srq);
3160
3161
3162
3163
3164
3165
3166
3167
3168static inline int ib_post_srq_recv(struct ib_srq *srq,
3169 struct ib_recv_wr *recv_wr,
3170 struct ib_recv_wr **bad_recv_wr)
3171{
3172 return srq->device->post_srq_recv(srq, recv_wr, bad_recv_wr);
3173}
3174
3175
3176
3177
3178
3179
3180
3181
3182
3183struct ib_qp *ib_create_qp(struct ib_pd *pd,
3184 struct ib_qp_init_attr *qp_init_attr);
3185
3186
3187
3188
3189
3190
3191
3192
3193
3194
3195
3196
3197int ib_modify_qp_with_udata(struct ib_qp *qp,
3198 struct ib_qp_attr *attr,
3199 int attr_mask,
3200 struct ib_udata *udata);
3201
3202
3203
3204
3205
3206
3207
3208
3209
3210
3211int ib_modify_qp(struct ib_qp *qp,
3212 struct ib_qp_attr *qp_attr,
3213 int qp_attr_mask);
3214
3215
3216
3217
3218
3219
3220
3221
3222
3223
3224
3225
3226int ib_query_qp(struct ib_qp *qp,
3227 struct ib_qp_attr *qp_attr,
3228 int qp_attr_mask,
3229 struct ib_qp_init_attr *qp_init_attr);
3230
3231
3232
3233
3234
3235int ib_destroy_qp(struct ib_qp *qp);
3236
3237
3238
3239
3240
3241
3242
3243
3244struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd,
3245 struct ib_qp_open_attr *qp_open_attr);
3246
3247
3248
3249
3250
3251
3252
3253
3254int ib_close_qp(struct ib_qp *qp);
3255
3256
3257
3258
3259
3260
3261
3262
3263
3264
3265
3266
3267
3268
3269static inline int ib_post_send(struct ib_qp *qp,
3270 struct ib_send_wr *send_wr,
3271 struct ib_send_wr **bad_send_wr)
3272{
3273 return qp->device->post_send(qp, send_wr, bad_send_wr);
3274}
3275
3276
3277
3278
3279
3280
3281
3282
3283
3284static inline int ib_post_recv(struct ib_qp *qp,
3285 struct ib_recv_wr *recv_wr,
3286 struct ib_recv_wr **bad_recv_wr)
3287{
3288 return qp->device->post_recv(qp, recv_wr, bad_recv_wr);
3289}
3290
3291struct ib_cq *__ib_alloc_cq(struct ib_device *dev, void *private,
3292 int nr_cqe, int comp_vector,
3293 enum ib_poll_context poll_ctx, const char *caller);
3294#define ib_alloc_cq(device, priv, nr_cqe, comp_vect, poll_ctx) \
3295 __ib_alloc_cq((device), (priv), (nr_cqe), (comp_vect), (poll_ctx), KBUILD_MODNAME)
3296
3297void ib_free_cq(struct ib_cq *cq);
3298int ib_process_cq_direct(struct ib_cq *cq, int budget);
3299
3300
3301
3302
3303
3304
3305
3306
3307
3308
3309
3310
3311
3312
3313struct ib_cq *ib_create_cq(struct ib_device *device,
3314 ib_comp_handler comp_handler,
3315 void (*event_handler)(struct ib_event *, void *),
3316 void *cq_context,
3317 const struct ib_cq_init_attr *cq_attr);
3318
3319
3320
3321
3322
3323
3324
3325
3326int ib_resize_cq(struct ib_cq *cq, int cqe);
3327
3328
3329
3330
3331
3332
3333
3334
3335int rdma_set_cq_moderation(struct ib_cq *cq, u16 cq_count, u16 cq_period);
3336
3337
3338
3339
3340
3341int ib_destroy_cq(struct ib_cq *cq);
3342
3343
3344
3345
3346
3347
3348
3349
3350
3351
3352
3353
3354
3355static inline int ib_poll_cq(struct ib_cq *cq, int num_entries,
3356 struct ib_wc *wc)
3357{
3358 return cq->device->poll_cq(cq, num_entries, wc);
3359}
3360
3361
3362
3363
3364
3365
3366
3367
3368
3369
3370
3371
3372
3373
3374
3375
3376
3377
3378
3379
3380
3381
3382
3383
3384
3385
3386
3387
3388static inline int ib_req_notify_cq(struct ib_cq *cq,
3389 enum ib_cq_notify_flags flags)
3390{
3391 return cq->device->req_notify_cq(cq, flags);
3392}
3393
3394
3395
3396
3397
3398
3399
3400
3401static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt)
3402{
3403 return cq->device->req_ncomp_notif ?
3404 cq->device->req_ncomp_notif(cq, wc_cnt) :
3405 -ENOSYS;
3406}
3407
3408
3409
3410
3411
3412
3413static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
3414{
3415 return dma_mapping_error(dev->dma_device, dma_addr);
3416}
3417
3418
3419
3420
3421
3422
3423
3424
3425static inline u64 ib_dma_map_single(struct ib_device *dev,
3426 void *cpu_addr, size_t size,
3427 enum dma_data_direction direction)
3428{
3429 return dma_map_single(dev->dma_device, cpu_addr, size, direction);
3430}
3431
3432
3433
3434
3435
3436
3437
3438
3439static inline void ib_dma_unmap_single(struct ib_device *dev,
3440 u64 addr, size_t size,
3441 enum dma_data_direction direction)
3442{
3443 dma_unmap_single(dev->dma_device, addr, size, direction);
3444}
3445
3446
3447
3448
3449
3450
3451
3452
3453
3454static inline u64 ib_dma_map_page(struct ib_device *dev,
3455 struct page *page,
3456 unsigned long offset,
3457 size_t size,
3458 enum dma_data_direction direction)
3459{
3460 return dma_map_page(dev->dma_device, page, offset, size, direction);
3461}
3462
3463
3464
3465
3466
3467
3468
3469
3470static inline void ib_dma_unmap_page(struct ib_device *dev,
3471 u64 addr, size_t size,
3472 enum dma_data_direction direction)
3473{
3474 dma_unmap_page(dev->dma_device, addr, size, direction);
3475}
3476
3477
3478
3479
3480
3481
3482
3483
3484static inline int ib_dma_map_sg(struct ib_device *dev,
3485 struct scatterlist *sg, int nents,
3486 enum dma_data_direction direction)
3487{
3488 return dma_map_sg(dev->dma_device, sg, nents, direction);
3489}
3490
3491
3492
3493
3494
3495
3496
3497
3498static inline void ib_dma_unmap_sg(struct ib_device *dev,
3499 struct scatterlist *sg, int nents,
3500 enum dma_data_direction direction)
3501{
3502 dma_unmap_sg(dev->dma_device, sg, nents, direction);
3503}
3504
3505static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
3506 struct scatterlist *sg, int nents,
3507 enum dma_data_direction direction,
3508 unsigned long dma_attrs)
3509{
3510 return dma_map_sg_attrs(dev->dma_device, sg, nents, direction,
3511 dma_attrs);
3512}
3513
3514static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
3515 struct scatterlist *sg, int nents,
3516 enum dma_data_direction direction,
3517 unsigned long dma_attrs)
3518{
3519 dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, dma_attrs);
3520}
3521
3522
3523
3524
3525
3526
3527
3528
3529static inline u64 ib_sg_dma_address(struct ib_device *dev,
3530 struct scatterlist *sg)
3531{
3532 return sg_dma_address(sg);
3533}
3534
3535
3536
3537
3538
3539
3540
3541
3542
3543static inline unsigned int ib_sg_dma_len(struct ib_device *dev,
3544 struct scatterlist *sg)
3545{
3546 return sg_dma_len(sg);
3547}
3548
3549
3550
3551
3552
3553
3554
3555
3556static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev,
3557 u64 addr,
3558 size_t size,
3559 enum dma_data_direction dir)
3560{
3561 dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
3562}
3563
3564
3565
3566
3567
3568
3569
3570
3571static inline void ib_dma_sync_single_for_device(struct ib_device *dev,
3572 u64 addr,
3573 size_t size,
3574 enum dma_data_direction dir)
3575{
3576 dma_sync_single_for_device(dev->dma_device, addr, size, dir);
3577}
3578
3579
3580
3581
3582
3583
3584
3585
3586static inline void *ib_dma_alloc_coherent(struct ib_device *dev,
3587 size_t size,
3588 dma_addr_t *dma_handle,
3589 gfp_t flag)
3590{
3591 return dma_alloc_coherent(dev->dma_device, size, dma_handle, flag);
3592}
3593
3594
3595
3596
3597
3598
3599
3600
3601static inline void ib_dma_free_coherent(struct ib_device *dev,
3602 size_t size, void *cpu_addr,
3603 dma_addr_t dma_handle)
3604{
3605 dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle);
3606}
3607
3608
3609
3610
3611
3612
3613
3614
3615int ib_dereg_mr(struct ib_mr *mr);
3616
3617struct ib_mr *ib_alloc_mr(struct ib_pd *pd,
3618 enum ib_mr_type mr_type,
3619 u32 max_num_sg);
3620
3621
3622
3623
3624
3625
3626
3627static inline void ib_update_fast_reg_key(struct ib_mr *mr, u8 newkey)
3628{
3629 mr->lkey = (mr->lkey & 0xffffff00) | newkey;
3630 mr->rkey = (mr->rkey & 0xffffff00) | newkey;
3631}
3632
3633
3634
3635
3636
3637
3638static inline u32 ib_inc_rkey(u32 rkey)
3639{
3640 const u32 mask = 0x000000ff;
3641 return ((rkey + 1) & mask) | (rkey & ~mask);
3642}
3643
3644
3645
3646
3647
3648
3649
3650
3651
3652
3653struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
3654 int mr_access_flags,
3655 struct ib_fmr_attr *fmr_attr);
3656
3657
3658
3659
3660
3661
3662
3663
3664static inline int ib_map_phys_fmr(struct ib_fmr *fmr,
3665 u64 *page_list, int list_len,
3666 u64 iova)
3667{
3668 return fmr->device->map_phys_fmr(fmr, page_list, list_len, iova);
3669}
3670
3671
3672
3673
3674
3675int ib_unmap_fmr(struct list_head *fmr_list);
3676
3677
3678
3679
3680
3681int ib_dealloc_fmr(struct ib_fmr *fmr);
3682
3683
3684
3685
3686
3687
3688
3689
3690
3691
3692
3693
3694
3695int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
3696
3697
3698
3699
3700
3701
3702
3703int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
3704
3705
3706
3707
3708
3709
3710struct ib_xrcd *__ib_alloc_xrcd(struct ib_device *device, const char *caller);
3711#define ib_alloc_xrcd(device) \
3712 __ib_alloc_xrcd((device), KBUILD_MODNAME)
3713
3714
3715
3716
3717
3718int ib_dealloc_xrcd(struct ib_xrcd *xrcd);
3719
3720struct ib_flow *ib_create_flow(struct ib_qp *qp,
3721 struct ib_flow_attr *flow_attr, int domain);
3722int ib_destroy_flow(struct ib_flow *flow_id);
3723
3724static inline int ib_check_mr_access(int flags)
3725{
3726
3727
3728
3729
3730 if (flags & (IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_REMOTE_WRITE) &&
3731 !(flags & IB_ACCESS_LOCAL_WRITE))
3732 return -EINVAL;
3733
3734 return 0;
3735}
3736
3737
3738
3739
3740
3741
3742
3743
3744
3745
3746
3747
3748
3749int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
3750 struct ib_mr_status *mr_status);
3751
3752struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, u8 port,
3753 u16 pkey, const union ib_gid *gid,
3754 const struct sockaddr *addr);
3755struct ib_wq *ib_create_wq(struct ib_pd *pd,
3756 struct ib_wq_init_attr *init_attr);
3757int ib_destroy_wq(struct ib_wq *wq);
3758int ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *attr,
3759 u32 wq_attr_mask);
3760struct ib_rwq_ind_table *ib_create_rwq_ind_table(struct ib_device *device,
3761 struct ib_rwq_ind_table_init_attr*
3762 wq_ind_table_init_attr);
3763int ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table);
3764
3765int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
3766 unsigned int *sg_offset, unsigned int page_size);
3767
3768static inline int
3769ib_map_mr_sg_zbva(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
3770 unsigned int *sg_offset, unsigned int page_size)
3771{
3772 int n;
3773
3774 n = ib_map_mr_sg(mr, sg, sg_nents, sg_offset, page_size);
3775 mr->iova = 0;
3776
3777 return n;
3778}
3779
3780int ib_sg_to_pages(struct ib_mr *mr, struct scatterlist *sgl, int sg_nents,
3781 unsigned int *sg_offset, int (*set_page)(struct ib_mr *, u64));
3782
3783void ib_drain_rq(struct ib_qp *qp);
3784void ib_drain_sq(struct ib_qp *qp);
3785void ib_drain_qp(struct ib_qp *qp);
3786
3787int ib_get_eth_speed(struct ib_device *dev, u8 port_num, u8 *speed, u8 *width);
3788
3789static inline u8 *rdma_ah_retrieve_dmac(struct rdma_ah_attr *attr)
3790{
3791 if (attr->type == RDMA_AH_ATTR_TYPE_ROCE)
3792 return attr->roce.dmac;
3793 return NULL;
3794}
3795
3796static inline void rdma_ah_set_dlid(struct rdma_ah_attr *attr, u32 dlid)
3797{
3798 if (attr->type == RDMA_AH_ATTR_TYPE_IB)
3799 attr->ib.dlid = (u16)dlid;
3800 else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
3801 attr->opa.dlid = dlid;
3802}
3803
3804static inline u32 rdma_ah_get_dlid(const struct rdma_ah_attr *attr)
3805{
3806 if (attr->type == RDMA_AH_ATTR_TYPE_IB)
3807 return attr->ib.dlid;
3808 else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
3809 return attr->opa.dlid;
3810 return 0;
3811}
3812
3813static inline void rdma_ah_set_sl(struct rdma_ah_attr *attr, u8 sl)
3814{
3815 attr->sl = sl;
3816}
3817
3818static inline u8 rdma_ah_get_sl(const struct rdma_ah_attr *attr)
3819{
3820 return attr->sl;
3821}
3822
3823static inline void rdma_ah_set_path_bits(struct rdma_ah_attr *attr,
3824 u8 src_path_bits)
3825{
3826 if (attr->type == RDMA_AH_ATTR_TYPE_IB)
3827 attr->ib.src_path_bits = src_path_bits;
3828 else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
3829 attr->opa.src_path_bits = src_path_bits;
3830}
3831
3832static inline u8 rdma_ah_get_path_bits(const struct rdma_ah_attr *attr)
3833{
3834 if (attr->type == RDMA_AH_ATTR_TYPE_IB)
3835 return attr->ib.src_path_bits;
3836 else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
3837 return attr->opa.src_path_bits;
3838 return 0;
3839}
3840
3841static inline void rdma_ah_set_make_grd(struct rdma_ah_attr *attr,
3842 bool make_grd)
3843{
3844 if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
3845 attr->opa.make_grd = make_grd;
3846}
3847
3848static inline bool rdma_ah_get_make_grd(const struct rdma_ah_attr *attr)
3849{
3850 if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
3851 return attr->opa.make_grd;
3852 return false;
3853}
3854
3855static inline void rdma_ah_set_port_num(struct rdma_ah_attr *attr, u8 port_num)
3856{
3857 attr->port_num = port_num;
3858}
3859
3860static inline u8 rdma_ah_get_port_num(const struct rdma_ah_attr *attr)
3861{
3862 return attr->port_num;
3863}
3864
3865static inline void rdma_ah_set_static_rate(struct rdma_ah_attr *attr,
3866 u8 static_rate)
3867{
3868 attr->static_rate = static_rate;
3869}
3870
3871static inline u8 rdma_ah_get_static_rate(const struct rdma_ah_attr *attr)
3872{
3873 return attr->static_rate;
3874}
3875
3876static inline void rdma_ah_set_ah_flags(struct rdma_ah_attr *attr,
3877 enum ib_ah_flags flag)
3878{
3879 attr->ah_flags = flag;
3880}
3881
3882static inline enum ib_ah_flags
3883 rdma_ah_get_ah_flags(const struct rdma_ah_attr *attr)
3884{
3885 return attr->ah_flags;
3886}
3887
3888static inline const struct ib_global_route
3889 *rdma_ah_read_grh(const struct rdma_ah_attr *attr)
3890{
3891 return &attr->grh;
3892}
3893
3894
3895static inline struct ib_global_route
3896 *rdma_ah_retrieve_grh(struct rdma_ah_attr *attr)
3897{
3898 return &attr->grh;
3899}
3900
3901static inline void rdma_ah_set_dgid_raw(struct rdma_ah_attr *attr, void *dgid)
3902{
3903 struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
3904
3905 memcpy(grh->dgid.raw, dgid, sizeof(grh->dgid));
3906}
3907
3908static inline void rdma_ah_set_subnet_prefix(struct rdma_ah_attr *attr,
3909 __be64 prefix)
3910{
3911 struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
3912
3913 grh->dgid.global.subnet_prefix = prefix;
3914}
3915
3916static inline void rdma_ah_set_interface_id(struct rdma_ah_attr *attr,
3917 __be64 if_id)
3918{
3919 struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
3920
3921 grh->dgid.global.interface_id = if_id;
3922}
3923
3924static inline void rdma_ah_set_grh(struct rdma_ah_attr *attr,
3925 union ib_gid *dgid, u32 flow_label,
3926 u8 sgid_index, u8 hop_limit,
3927 u8 traffic_class)
3928{
3929 struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
3930
3931 attr->ah_flags = IB_AH_GRH;
3932 if (dgid)
3933 grh->dgid = *dgid;
3934 grh->flow_label = flow_label;
3935 grh->sgid_index = sgid_index;
3936 grh->hop_limit = hop_limit;
3937 grh->traffic_class = traffic_class;
3938}
3939
3940
3941
3942
3943
3944
3945
3946static inline enum rdma_ah_attr_type rdma_ah_find_type(struct ib_device *dev,
3947 u8 port_num)
3948{
3949 if (rdma_protocol_roce(dev, port_num))
3950 return RDMA_AH_ATTR_TYPE_ROCE;
3951 if (rdma_protocol_ib(dev, port_num)) {
3952 if (rdma_cap_opa_ah(dev, port_num))
3953 return RDMA_AH_ATTR_TYPE_OPA;
3954 return RDMA_AH_ATTR_TYPE_IB;
3955 }
3956
3957 return RDMA_AH_ATTR_TYPE_UNDEFINED;
3958}
3959
3960
3961
3962
3963
3964
3965
3966
3967
3968
3969static inline u16 ib_lid_cpu16(u32 lid)
3970{
3971 WARN_ON_ONCE(lid & 0xFFFF0000);
3972 return (u16)lid;
3973}
3974
3975
3976
3977
3978
3979
3980static inline __be16 ib_lid_be16(u32 lid)
3981{
3982 WARN_ON_ONCE(lid & 0xFFFF0000);
3983 return cpu_to_be16((u16)lid);
3984}
3985
3986
3987
3988
3989
3990
3991
3992
3993
3994
3995
3996static inline const struct cpumask *
3997ib_get_vector_affinity(struct ib_device *device, int comp_vector)
3998{
3999 if (comp_vector < 0 || comp_vector >= device->num_comp_vectors ||
4000 !device->get_vector_affinity)
4001 return NULL;
4002
4003 return device->get_vector_affinity(device, comp_vector);
4004
4005}
4006
4007
4008
4009
4010
4011
4012
4013void rdma_roce_rescan_device(struct ib_device *ibdev);
4014
4015#endif
4016