1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39#if !defined(IB_VERBS_H)
40#define IB_VERBS_H
41
42#include <linux/types.h>
43#include <linux/device.h>
44#include <linux/mm.h>
45#include <linux/dma-mapping.h>
46#include <linux/kref.h>
47#include <linux/list.h>
48#include <linux/rwsem.h>
49#include <linux/scatterlist.h>
50#include <linux/workqueue.h>
51#include <linux/socket.h>
52#include <linux/irq_poll.h>
53#include <uapi/linux/if_ether.h>
54#include <net/ipv6.h>
55#include <net/ip.h>
56#include <linux/string.h>
57#include <linux/slab.h>
58#include <linux/netdevice.h>
59
60#include <linux/if_link.h>
61#include <linux/atomic.h>
62#include <linux/mmu_notifier.h>
63#include <linux/uaccess.h>
64#include <linux/cgroup_rdma.h>
65#include <uapi/rdma/ib_user_verbs.h>
66#include <rdma/restrack.h>
67
68#define IB_FW_VERSION_NAME_MAX ETHTOOL_FWVERS_LEN
69
70extern struct workqueue_struct *ib_wq;
71extern struct workqueue_struct *ib_comp_wq;
72
73union ib_gid {
74 u8 raw[16];
75 struct {
76 __be64 subnet_prefix;
77 __be64 interface_id;
78 } global;
79};
80
81extern union ib_gid zgid;
82
83enum ib_gid_type {
84
85 IB_GID_TYPE_IB = 0,
86 IB_GID_TYPE_ROCE = 0,
87 IB_GID_TYPE_ROCE_UDP_ENCAP = 1,
88 IB_GID_TYPE_SIZE
89};
90
91#define ROCE_V2_UDP_DPORT 4791
92struct ib_gid_attr {
93 enum ib_gid_type gid_type;
94 struct net_device *ndev;
95};
96
97enum rdma_node_type {
98
99 RDMA_NODE_IB_CA = 1,
100 RDMA_NODE_IB_SWITCH,
101 RDMA_NODE_IB_ROUTER,
102 RDMA_NODE_RNIC,
103 RDMA_NODE_USNIC,
104 RDMA_NODE_USNIC_UDP,
105};
106
107enum {
108
109 IB_SA_WELL_KNOWN_GUID = BIT_ULL(57) | 2,
110};
111
112enum rdma_transport_type {
113 RDMA_TRANSPORT_IB,
114 RDMA_TRANSPORT_IWARP,
115 RDMA_TRANSPORT_USNIC,
116 RDMA_TRANSPORT_USNIC_UDP
117};
118
119enum rdma_protocol_type {
120 RDMA_PROTOCOL_IB,
121 RDMA_PROTOCOL_IBOE,
122 RDMA_PROTOCOL_IWARP,
123 RDMA_PROTOCOL_USNIC_UDP
124};
125
126__attribute_const__ enum rdma_transport_type
127rdma_node_get_transport(enum rdma_node_type node_type);
128
129enum rdma_network_type {
130 RDMA_NETWORK_IB,
131 RDMA_NETWORK_ROCE_V1 = RDMA_NETWORK_IB,
132 RDMA_NETWORK_IPV4,
133 RDMA_NETWORK_IPV6
134};
135
136static inline enum ib_gid_type ib_network_to_gid_type(enum rdma_network_type network_type)
137{
138 if (network_type == RDMA_NETWORK_IPV4 ||
139 network_type == RDMA_NETWORK_IPV6)
140 return IB_GID_TYPE_ROCE_UDP_ENCAP;
141
142
143 return IB_GID_TYPE_IB;
144}
145
146static inline enum rdma_network_type ib_gid_to_network_type(enum ib_gid_type gid_type,
147 union ib_gid *gid)
148{
149 if (gid_type == IB_GID_TYPE_IB)
150 return RDMA_NETWORK_IB;
151
152 if (ipv6_addr_v4mapped((struct in6_addr *)gid))
153 return RDMA_NETWORK_IPV4;
154 else
155 return RDMA_NETWORK_IPV6;
156}
157
158enum rdma_link_layer {
159 IB_LINK_LAYER_UNSPECIFIED,
160 IB_LINK_LAYER_INFINIBAND,
161 IB_LINK_LAYER_ETHERNET,
162};
163
164enum ib_device_cap_flags {
165 IB_DEVICE_RESIZE_MAX_WR = (1 << 0),
166 IB_DEVICE_BAD_PKEY_CNTR = (1 << 1),
167 IB_DEVICE_BAD_QKEY_CNTR = (1 << 2),
168 IB_DEVICE_RAW_MULTI = (1 << 3),
169 IB_DEVICE_AUTO_PATH_MIG = (1 << 4),
170 IB_DEVICE_CHANGE_PHY_PORT = (1 << 5),
171 IB_DEVICE_UD_AV_PORT_ENFORCE = (1 << 6),
172 IB_DEVICE_CURR_QP_STATE_MOD = (1 << 7),
173 IB_DEVICE_SHUTDOWN_PORT = (1 << 8),
174
175 IB_DEVICE_PORT_ACTIVE_EVENT = (1 << 10),
176 IB_DEVICE_SYS_IMAGE_GUID = (1 << 11),
177 IB_DEVICE_RC_RNR_NAK_GEN = (1 << 12),
178 IB_DEVICE_SRQ_RESIZE = (1 << 13),
179 IB_DEVICE_N_NOTIFY_CQ = (1 << 14),
180
181
182
183
184
185
186
187
188 IB_DEVICE_LOCAL_DMA_LKEY = (1 << 15),
189
190 IB_DEVICE_MEM_WINDOW = (1 << 17),
191
192
193
194
195
196
197
198 IB_DEVICE_UD_IP_CSUM = (1 << 18),
199 IB_DEVICE_UD_TSO = (1 << 19),
200 IB_DEVICE_XRC = (1 << 20),
201
202
203
204
205
206
207
208
209
210
211 IB_DEVICE_MEM_MGT_EXTENSIONS = (1 << 21),
212 IB_DEVICE_BLOCK_MULTICAST_LOOPBACK = (1 << 22),
213 IB_DEVICE_MEM_WINDOW_TYPE_2A = (1 << 23),
214 IB_DEVICE_MEM_WINDOW_TYPE_2B = (1 << 24),
215 IB_DEVICE_RC_IP_CSUM = (1 << 25),
216
217 IB_DEVICE_RAW_IP_CSUM = (1 << 26),
218
219
220
221
222
223
224 IB_DEVICE_CROSS_CHANNEL = (1 << 27),
225 IB_DEVICE_MANAGED_FLOW_STEERING = (1 << 29),
226 IB_DEVICE_SIGNATURE_HANDOVER = (1 << 30),
227 IB_DEVICE_ON_DEMAND_PAGING = (1ULL << 31),
228 IB_DEVICE_SG_GAPS_REG = (1ULL << 32),
229 IB_DEVICE_VIRTUAL_FUNCTION = (1ULL << 33),
230
231 IB_DEVICE_RAW_SCATTER_FCS = (1ULL << 34),
232 IB_DEVICE_RDMA_NETDEV_OPA_VNIC = (1ULL << 35),
233
234 IB_DEVICE_PCI_WRITE_END_PADDING = (1ULL << 36),
235};
236
237enum ib_signature_prot_cap {
238 IB_PROT_T10DIF_TYPE_1 = 1,
239 IB_PROT_T10DIF_TYPE_2 = 1 << 1,
240 IB_PROT_T10DIF_TYPE_3 = 1 << 2,
241};
242
243enum ib_signature_guard_cap {
244 IB_GUARD_T10DIF_CRC = 1,
245 IB_GUARD_T10DIF_CSUM = 1 << 1,
246};
247
248enum ib_atomic_cap {
249 IB_ATOMIC_NONE,
250 IB_ATOMIC_HCA,
251 IB_ATOMIC_GLOB
252};
253
254enum ib_odp_general_cap_bits {
255 IB_ODP_SUPPORT = 1 << 0,
256 IB_ODP_SUPPORT_IMPLICIT = 1 << 1,
257};
258
259enum ib_odp_transport_cap_bits {
260 IB_ODP_SUPPORT_SEND = 1 << 0,
261 IB_ODP_SUPPORT_RECV = 1 << 1,
262 IB_ODP_SUPPORT_WRITE = 1 << 2,
263 IB_ODP_SUPPORT_READ = 1 << 3,
264 IB_ODP_SUPPORT_ATOMIC = 1 << 4,
265};
266
267struct ib_odp_caps {
268 uint64_t general_caps;
269 struct {
270 uint32_t rc_odp_caps;
271 uint32_t uc_odp_caps;
272 uint32_t ud_odp_caps;
273 } per_transport_caps;
274};
275
276struct ib_rss_caps {
277
278
279
280
281 u32 supported_qpts;
282 u32 max_rwq_indirection_tables;
283 u32 max_rwq_indirection_table_size;
284};
285
286enum ib_tm_cap_flags {
287
288 IB_TM_CAP_RC = 1 << 0,
289};
290
291struct ib_tm_caps {
292
293 u32 max_rndv_hdr_size;
294
295 u32 max_num_tags;
296
297 u32 flags;
298
299 u32 max_ops;
300
301 u32 max_sge;
302};
303
304struct ib_cq_init_attr {
305 unsigned int cqe;
306 int comp_vector;
307 u32 flags;
308};
309
310enum ib_cq_attr_mask {
311 IB_CQ_MODERATE = 1 << 0,
312};
313
314struct ib_cq_caps {
315 u16 max_cq_moderation_count;
316 u16 max_cq_moderation_period;
317};
318
319struct ib_device_attr {
320 u64 fw_ver;
321 __be64 sys_image_guid;
322 u64 max_mr_size;
323 u64 page_size_cap;
324 u32 vendor_id;
325 u32 vendor_part_id;
326 u32 hw_ver;
327 int max_qp;
328 int max_qp_wr;
329 u64 device_cap_flags;
330 int max_sge;
331 int max_sge_rd;
332 int max_cq;
333 int max_cqe;
334 int max_mr;
335 int max_pd;
336 int max_qp_rd_atom;
337 int max_ee_rd_atom;
338 int max_res_rd_atom;
339 int max_qp_init_rd_atom;
340 int max_ee_init_rd_atom;
341 enum ib_atomic_cap atomic_cap;
342 enum ib_atomic_cap masked_atomic_cap;
343 int max_ee;
344 int max_rdd;
345 int max_mw;
346 int max_raw_ipv6_qp;
347 int max_raw_ethy_qp;
348 int max_mcast_grp;
349 int max_mcast_qp_attach;
350 int max_total_mcast_qp_attach;
351 int max_ah;
352 int max_fmr;
353 int max_map_per_fmr;
354 int max_srq;
355 int max_srq_wr;
356 int max_srq_sge;
357 unsigned int max_fast_reg_page_list_len;
358 u16 max_pkeys;
359 u8 local_ca_ack_delay;
360 int sig_prot_cap;
361 int sig_guard_cap;
362 struct ib_odp_caps odp_caps;
363 uint64_t timestamp_mask;
364 uint64_t hca_core_clock;
365 struct ib_rss_caps rss_caps;
366 u32 max_wq_type_rq;
367 u32 raw_packet_caps;
368 struct ib_tm_caps tm_caps;
369 struct ib_cq_caps cq_caps;
370};
371
372enum ib_mtu {
373 IB_MTU_256 = 1,
374 IB_MTU_512 = 2,
375 IB_MTU_1024 = 3,
376 IB_MTU_2048 = 4,
377 IB_MTU_4096 = 5
378};
379
380static inline int ib_mtu_enum_to_int(enum ib_mtu mtu)
381{
382 switch (mtu) {
383 case IB_MTU_256: return 256;
384 case IB_MTU_512: return 512;
385 case IB_MTU_1024: return 1024;
386 case IB_MTU_2048: return 2048;
387 case IB_MTU_4096: return 4096;
388 default: return -1;
389 }
390}
391
392static inline enum ib_mtu ib_mtu_int_to_enum(int mtu)
393{
394 if (mtu >= 4096)
395 return IB_MTU_4096;
396 else if (mtu >= 2048)
397 return IB_MTU_2048;
398 else if (mtu >= 1024)
399 return IB_MTU_1024;
400 else if (mtu >= 512)
401 return IB_MTU_512;
402 else
403 return IB_MTU_256;
404}
405
406enum ib_port_state {
407 IB_PORT_NOP = 0,
408 IB_PORT_DOWN = 1,
409 IB_PORT_INIT = 2,
410 IB_PORT_ARMED = 3,
411 IB_PORT_ACTIVE = 4,
412 IB_PORT_ACTIVE_DEFER = 5
413};
414
415enum ib_port_cap_flags {
416 IB_PORT_SM = 1 << 1,
417 IB_PORT_NOTICE_SUP = 1 << 2,
418 IB_PORT_TRAP_SUP = 1 << 3,
419 IB_PORT_OPT_IPD_SUP = 1 << 4,
420 IB_PORT_AUTO_MIGR_SUP = 1 << 5,
421 IB_PORT_SL_MAP_SUP = 1 << 6,
422 IB_PORT_MKEY_NVRAM = 1 << 7,
423 IB_PORT_PKEY_NVRAM = 1 << 8,
424 IB_PORT_LED_INFO_SUP = 1 << 9,
425 IB_PORT_SM_DISABLED = 1 << 10,
426 IB_PORT_SYS_IMAGE_GUID_SUP = 1 << 11,
427 IB_PORT_PKEY_SW_EXT_PORT_TRAP_SUP = 1 << 12,
428 IB_PORT_EXTENDED_SPEEDS_SUP = 1 << 14,
429 IB_PORT_CM_SUP = 1 << 16,
430 IB_PORT_SNMP_TUNNEL_SUP = 1 << 17,
431 IB_PORT_REINIT_SUP = 1 << 18,
432 IB_PORT_DEVICE_MGMT_SUP = 1 << 19,
433 IB_PORT_VENDOR_CLASS_SUP = 1 << 20,
434 IB_PORT_DR_NOTICE_SUP = 1 << 21,
435 IB_PORT_CAP_MASK_NOTICE_SUP = 1 << 22,
436 IB_PORT_BOOT_MGMT_SUP = 1 << 23,
437 IB_PORT_LINK_LATENCY_SUP = 1 << 24,
438 IB_PORT_CLIENT_REG_SUP = 1 << 25,
439 IB_PORT_IP_BASED_GIDS = 1 << 26,
440};
441
442enum ib_port_width {
443 IB_WIDTH_1X = 1,
444 IB_WIDTH_4X = 2,
445 IB_WIDTH_8X = 4,
446 IB_WIDTH_12X = 8
447};
448
449static inline int ib_width_enum_to_int(enum ib_port_width width)
450{
451 switch (width) {
452 case IB_WIDTH_1X: return 1;
453 case IB_WIDTH_4X: return 4;
454 case IB_WIDTH_8X: return 8;
455 case IB_WIDTH_12X: return 12;
456 default: return -1;
457 }
458}
459
460enum ib_port_speed {
461 IB_SPEED_SDR = 1,
462 IB_SPEED_DDR = 2,
463 IB_SPEED_QDR = 4,
464 IB_SPEED_FDR10 = 8,
465 IB_SPEED_FDR = 16,
466 IB_SPEED_EDR = 32,
467 IB_SPEED_HDR = 64
468};
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486struct rdma_hw_stats {
487 unsigned long timestamp;
488 unsigned long lifespan;
489 const char * const *names;
490 int num_counters;
491 u64 value[];
492};
493
494#define RDMA_HW_STATS_DEFAULT_LIFESPAN 10
495
496
497
498
499
500
501
502static inline struct rdma_hw_stats *rdma_alloc_hw_stats_struct(
503 const char * const *names, int num_counters,
504 unsigned long lifespan)
505{
506 struct rdma_hw_stats *stats;
507
508 stats = kzalloc(sizeof(*stats) + num_counters * sizeof(u64),
509 GFP_KERNEL);
510 if (!stats)
511 return NULL;
512 stats->names = names;
513 stats->num_counters = num_counters;
514 stats->lifespan = msecs_to_jiffies(lifespan);
515
516 return stats;
517}
518
519
520
521
522
523
524#define RDMA_CORE_CAP_IB_MAD 0x00000001
525#define RDMA_CORE_CAP_IB_SMI 0x00000002
526#define RDMA_CORE_CAP_IB_CM 0x00000004
527#define RDMA_CORE_CAP_IW_CM 0x00000008
528#define RDMA_CORE_CAP_IB_SA 0x00000010
529#define RDMA_CORE_CAP_OPA_MAD 0x00000020
530
531
532#define RDMA_CORE_CAP_AF_IB 0x00001000
533#define RDMA_CORE_CAP_ETH_AH 0x00002000
534#define RDMA_CORE_CAP_OPA_AH 0x00004000
535
536
537#define RDMA_CORE_CAP_PROT_IB 0x00100000
538#define RDMA_CORE_CAP_PROT_ROCE 0x00200000
539#define RDMA_CORE_CAP_PROT_IWARP 0x00400000
540#define RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP 0x00800000
541#define RDMA_CORE_CAP_PROT_RAW_PACKET 0x01000000
542#define RDMA_CORE_CAP_PROT_USNIC 0x02000000
543
544#define RDMA_CORE_PORT_IBA_IB (RDMA_CORE_CAP_PROT_IB \
545 | RDMA_CORE_CAP_IB_MAD \
546 | RDMA_CORE_CAP_IB_SMI \
547 | RDMA_CORE_CAP_IB_CM \
548 | RDMA_CORE_CAP_IB_SA \
549 | RDMA_CORE_CAP_AF_IB)
550#define RDMA_CORE_PORT_IBA_ROCE (RDMA_CORE_CAP_PROT_ROCE \
551 | RDMA_CORE_CAP_IB_MAD \
552 | RDMA_CORE_CAP_IB_CM \
553 | RDMA_CORE_CAP_AF_IB \
554 | RDMA_CORE_CAP_ETH_AH)
555#define RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP \
556 (RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP \
557 | RDMA_CORE_CAP_IB_MAD \
558 | RDMA_CORE_CAP_IB_CM \
559 | RDMA_CORE_CAP_AF_IB \
560 | RDMA_CORE_CAP_ETH_AH)
561#define RDMA_CORE_PORT_IWARP (RDMA_CORE_CAP_PROT_IWARP \
562 | RDMA_CORE_CAP_IW_CM)
563#define RDMA_CORE_PORT_INTEL_OPA (RDMA_CORE_PORT_IBA_IB \
564 | RDMA_CORE_CAP_OPA_MAD)
565
566#define RDMA_CORE_PORT_RAW_PACKET (RDMA_CORE_CAP_PROT_RAW_PACKET)
567
568#define RDMA_CORE_PORT_USNIC (RDMA_CORE_CAP_PROT_USNIC)
569
570struct ib_port_attr {
571 u64 subnet_prefix;
572 enum ib_port_state state;
573 enum ib_mtu max_mtu;
574 enum ib_mtu active_mtu;
575 int gid_tbl_len;
576 u32 port_cap_flags;
577 u32 max_msg_sz;
578 u32 bad_pkey_cntr;
579 u32 qkey_viol_cntr;
580 u16 pkey_tbl_len;
581 u32 sm_lid;
582 u32 lid;
583 u8 lmc;
584 u8 max_vl_num;
585 u8 sm_sl;
586 u8 subnet_timeout;
587 u8 init_type_reply;
588 u8 active_width;
589 u8 active_speed;
590 u8 phys_state;
591 bool grh_required;
592};
593
594enum ib_device_modify_flags {
595 IB_DEVICE_MODIFY_SYS_IMAGE_GUID = 1 << 0,
596 IB_DEVICE_MODIFY_NODE_DESC = 1 << 1
597};
598
599#define IB_DEVICE_NODE_DESC_MAX 64
600
601struct ib_device_modify {
602 u64 sys_image_guid;
603 char node_desc[IB_DEVICE_NODE_DESC_MAX];
604};
605
606enum ib_port_modify_flags {
607 IB_PORT_SHUTDOWN = 1,
608 IB_PORT_INIT_TYPE = (1<<2),
609 IB_PORT_RESET_QKEY_CNTR = (1<<3),
610 IB_PORT_OPA_MASK_CHG = (1<<4)
611};
612
613struct ib_port_modify {
614 u32 set_port_cap_mask;
615 u32 clr_port_cap_mask;
616 u8 init_type;
617};
618
619enum ib_event_type {
620 IB_EVENT_CQ_ERR,
621 IB_EVENT_QP_FATAL,
622 IB_EVENT_QP_REQ_ERR,
623 IB_EVENT_QP_ACCESS_ERR,
624 IB_EVENT_COMM_EST,
625 IB_EVENT_SQ_DRAINED,
626 IB_EVENT_PATH_MIG,
627 IB_EVENT_PATH_MIG_ERR,
628 IB_EVENT_DEVICE_FATAL,
629 IB_EVENT_PORT_ACTIVE,
630 IB_EVENT_PORT_ERR,
631 IB_EVENT_LID_CHANGE,
632 IB_EVENT_PKEY_CHANGE,
633 IB_EVENT_SM_CHANGE,
634 IB_EVENT_SRQ_ERR,
635 IB_EVENT_SRQ_LIMIT_REACHED,
636 IB_EVENT_QP_LAST_WQE_REACHED,
637 IB_EVENT_CLIENT_REREGISTER,
638 IB_EVENT_GID_CHANGE,
639 IB_EVENT_WQ_FATAL,
640};
641
642const char *__attribute_const__ ib_event_msg(enum ib_event_type event);
643
644struct ib_event {
645 struct ib_device *device;
646 union {
647 struct ib_cq *cq;
648 struct ib_qp *qp;
649 struct ib_srq *srq;
650 struct ib_wq *wq;
651 u8 port_num;
652 } element;
653 enum ib_event_type event;
654};
655
656struct ib_event_handler {
657 struct ib_device *device;
658 void (*handler)(struct ib_event_handler *, struct ib_event *);
659 struct list_head list;
660};
661
662#define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler) \
663 do { \
664 (_ptr)->device = _device; \
665 (_ptr)->handler = _handler; \
666 INIT_LIST_HEAD(&(_ptr)->list); \
667 } while (0)
668
669struct ib_global_route {
670 union ib_gid dgid;
671 u32 flow_label;
672 u8 sgid_index;
673 u8 hop_limit;
674 u8 traffic_class;
675};
676
677struct ib_grh {
678 __be32 version_tclass_flow;
679 __be16 paylen;
680 u8 next_hdr;
681 u8 hop_limit;
682 union ib_gid sgid;
683 union ib_gid dgid;
684};
685
686union rdma_network_hdr {
687 struct ib_grh ibgrh;
688 struct {
689
690
691
692 u8 reserved[20];
693 struct iphdr roce4grh;
694 };
695};
696
697#define IB_QPN_MASK 0xFFFFFF
698
699enum {
700 IB_MULTICAST_QPN = 0xffffff
701};
702
703#define IB_LID_PERMISSIVE cpu_to_be16(0xFFFF)
704#define IB_MULTICAST_LID_BASE cpu_to_be16(0xC000)
705
706enum ib_ah_flags {
707 IB_AH_GRH = 1
708};
709
710enum ib_rate {
711 IB_RATE_PORT_CURRENT = 0,
712 IB_RATE_2_5_GBPS = 2,
713 IB_RATE_5_GBPS = 5,
714 IB_RATE_10_GBPS = 3,
715 IB_RATE_20_GBPS = 6,
716 IB_RATE_30_GBPS = 4,
717 IB_RATE_40_GBPS = 7,
718 IB_RATE_60_GBPS = 8,
719 IB_RATE_80_GBPS = 9,
720 IB_RATE_120_GBPS = 10,
721 IB_RATE_14_GBPS = 11,
722 IB_RATE_56_GBPS = 12,
723 IB_RATE_112_GBPS = 13,
724 IB_RATE_168_GBPS = 14,
725 IB_RATE_25_GBPS = 15,
726 IB_RATE_100_GBPS = 16,
727 IB_RATE_200_GBPS = 17,
728 IB_RATE_300_GBPS = 18
729};
730
731
732
733
734
735
736
737__attribute_const__ int ib_rate_to_mult(enum ib_rate rate);
738
739
740
741
742
743
744__attribute_const__ int ib_rate_to_mbps(enum ib_rate rate);
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759enum ib_mr_type {
760 IB_MR_TYPE_MEM_REG,
761 IB_MR_TYPE_SIGNATURE,
762 IB_MR_TYPE_SG_GAPS,
763};
764
765
766
767
768
769
770enum ib_signature_type {
771 IB_SIG_TYPE_NONE,
772 IB_SIG_TYPE_T10_DIF,
773};
774
775
776
777
778
779
780enum ib_t10_dif_bg_type {
781 IB_T10DIF_CRC,
782 IB_T10DIF_CSUM
783};
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798struct ib_t10_dif_domain {
799 enum ib_t10_dif_bg_type bg_type;
800 u16 pi_interval;
801 u16 bg;
802 u16 app_tag;
803 u32 ref_tag;
804 bool ref_remap;
805 bool app_escape;
806 bool ref_escape;
807 u16 apptag_check_mask;
808};
809
810
811
812
813
814
815
816struct ib_sig_domain {
817 enum ib_signature_type sig_type;
818 union {
819 struct ib_t10_dif_domain dif;
820 } sig;
821};
822
823
824
825
826
827
828
829struct ib_sig_attrs {
830 u8 check_mask;
831 struct ib_sig_domain mem;
832 struct ib_sig_domain wire;
833};
834
835enum ib_sig_err_type {
836 IB_SIG_BAD_GUARD,
837 IB_SIG_BAD_REFTAG,
838 IB_SIG_BAD_APPTAG,
839};
840
841
842
843
844struct ib_sig_err {
845 enum ib_sig_err_type err_type;
846 u32 expected;
847 u32 actual;
848 u64 sig_err_offset;
849 u32 key;
850};
851
852enum ib_mr_status_check {
853 IB_MR_CHECK_SIG_STATUS = 1,
854};
855
856
857
858
859
860
861
862
863
864struct ib_mr_status {
865 u32 fail_status;
866 struct ib_sig_err sig_err;
867};
868
869
870
871
872
873
874__attribute_const__ enum ib_rate mult_to_ib_rate(int mult);
875
876enum rdma_ah_attr_type {
877 RDMA_AH_ATTR_TYPE_UNDEFINED,
878 RDMA_AH_ATTR_TYPE_IB,
879 RDMA_AH_ATTR_TYPE_ROCE,
880 RDMA_AH_ATTR_TYPE_OPA,
881};
882
883struct ib_ah_attr {
884 u16 dlid;
885 u8 src_path_bits;
886};
887
888struct roce_ah_attr {
889 u8 dmac[ETH_ALEN];
890};
891
892struct opa_ah_attr {
893 u32 dlid;
894 u8 src_path_bits;
895 bool make_grd;
896};
897
898struct rdma_ah_attr {
899 struct ib_global_route grh;
900 u8 sl;
901 u8 static_rate;
902 u8 port_num;
903 u8 ah_flags;
904 enum rdma_ah_attr_type type;
905 union {
906 struct ib_ah_attr ib;
907 struct roce_ah_attr roce;
908 struct opa_ah_attr opa;
909 };
910};
911
912enum ib_wc_status {
913 IB_WC_SUCCESS,
914 IB_WC_LOC_LEN_ERR,
915 IB_WC_LOC_QP_OP_ERR,
916 IB_WC_LOC_EEC_OP_ERR,
917 IB_WC_LOC_PROT_ERR,
918 IB_WC_WR_FLUSH_ERR,
919 IB_WC_MW_BIND_ERR,
920 IB_WC_BAD_RESP_ERR,
921 IB_WC_LOC_ACCESS_ERR,
922 IB_WC_REM_INV_REQ_ERR,
923 IB_WC_REM_ACCESS_ERR,
924 IB_WC_REM_OP_ERR,
925 IB_WC_RETRY_EXC_ERR,
926 IB_WC_RNR_RETRY_EXC_ERR,
927 IB_WC_LOC_RDD_VIOL_ERR,
928 IB_WC_REM_INV_RD_REQ_ERR,
929 IB_WC_REM_ABORT_ERR,
930 IB_WC_INV_EECN_ERR,
931 IB_WC_INV_EEC_STATE_ERR,
932 IB_WC_FATAL_ERR,
933 IB_WC_RESP_TIMEOUT_ERR,
934 IB_WC_GENERAL_ERR
935};
936
937const char *__attribute_const__ ib_wc_status_msg(enum ib_wc_status status);
938
939enum ib_wc_opcode {
940 IB_WC_SEND,
941 IB_WC_RDMA_WRITE,
942 IB_WC_RDMA_READ,
943 IB_WC_COMP_SWAP,
944 IB_WC_FETCH_ADD,
945 IB_WC_LSO,
946 IB_WC_LOCAL_INV,
947 IB_WC_REG_MR,
948 IB_WC_MASKED_COMP_SWAP,
949 IB_WC_MASKED_FETCH_ADD,
950
951
952
953
954 IB_WC_RECV = 1 << 7,
955 IB_WC_RECV_RDMA_WITH_IMM
956};
957
958enum ib_wc_flags {
959 IB_WC_GRH = 1,
960 IB_WC_WITH_IMM = (1<<1),
961 IB_WC_WITH_INVALIDATE = (1<<2),
962 IB_WC_IP_CSUM_OK = (1<<3),
963 IB_WC_WITH_SMAC = (1<<4),
964 IB_WC_WITH_VLAN = (1<<5),
965 IB_WC_WITH_NETWORK_HDR_TYPE = (1<<6),
966};
967
968struct ib_wc {
969 union {
970 u64 wr_id;
971 struct ib_cqe *wr_cqe;
972 };
973 enum ib_wc_status status;
974 enum ib_wc_opcode opcode;
975 u32 vendor_err;
976 u32 byte_len;
977 struct ib_qp *qp;
978 union {
979 __be32 imm_data;
980 u32 invalidate_rkey;
981 } ex;
982 u32 src_qp;
983 u32 slid;
984 int wc_flags;
985 u16 pkey_index;
986 u8 sl;
987 u8 dlid_path_bits;
988 u8 port_num;
989 u8 smac[ETH_ALEN];
990 u16 vlan_id;
991 u8 network_hdr_type;
992};
993
994enum ib_cq_notify_flags {
995 IB_CQ_SOLICITED = 1 << 0,
996 IB_CQ_NEXT_COMP = 1 << 1,
997 IB_CQ_SOLICITED_MASK = IB_CQ_SOLICITED | IB_CQ_NEXT_COMP,
998 IB_CQ_REPORT_MISSED_EVENTS = 1 << 2,
999};
1000
1001enum ib_srq_type {
1002 IB_SRQT_BASIC,
1003 IB_SRQT_XRC,
1004 IB_SRQT_TM,
1005};
1006
1007static inline bool ib_srq_has_cq(enum ib_srq_type srq_type)
1008{
1009 return srq_type == IB_SRQT_XRC ||
1010 srq_type == IB_SRQT_TM;
1011}
1012
1013enum ib_srq_attr_mask {
1014 IB_SRQ_MAX_WR = 1 << 0,
1015 IB_SRQ_LIMIT = 1 << 1,
1016};
1017
1018struct ib_srq_attr {
1019 u32 max_wr;
1020 u32 max_sge;
1021 u32 srq_limit;
1022};
1023
1024struct ib_srq_init_attr {
1025 void (*event_handler)(struct ib_event *, void *);
1026 void *srq_context;
1027 struct ib_srq_attr attr;
1028 enum ib_srq_type srq_type;
1029
1030 struct {
1031 struct ib_cq *cq;
1032 union {
1033 struct {
1034 struct ib_xrcd *xrcd;
1035 } xrc;
1036
1037 struct {
1038 u32 max_num_tags;
1039 } tag_matching;
1040 };
1041 } ext;
1042};
1043
1044struct ib_qp_cap {
1045 u32 max_send_wr;
1046 u32 max_recv_wr;
1047 u32 max_send_sge;
1048 u32 max_recv_sge;
1049 u32 max_inline_data;
1050
1051
1052
1053
1054
1055
1056 u32 max_rdma_ctxs;
1057};
1058
1059enum ib_sig_type {
1060 IB_SIGNAL_ALL_WR,
1061 IB_SIGNAL_REQ_WR
1062};
1063
1064enum ib_qp_type {
1065
1066
1067
1068
1069
1070 IB_QPT_SMI,
1071 IB_QPT_GSI,
1072
1073 IB_QPT_RC,
1074 IB_QPT_UC,
1075 IB_QPT_UD,
1076 IB_QPT_RAW_IPV6,
1077 IB_QPT_RAW_ETHERTYPE,
1078 IB_QPT_RAW_PACKET = 8,
1079 IB_QPT_XRC_INI = 9,
1080 IB_QPT_XRC_TGT,
1081 IB_QPT_MAX,
1082 IB_QPT_DRIVER = 0xFF,
1083
1084
1085
1086
1087 IB_QPT_RESERVED1 = 0x1000,
1088 IB_QPT_RESERVED2,
1089 IB_QPT_RESERVED3,
1090 IB_QPT_RESERVED4,
1091 IB_QPT_RESERVED5,
1092 IB_QPT_RESERVED6,
1093 IB_QPT_RESERVED7,
1094 IB_QPT_RESERVED8,
1095 IB_QPT_RESERVED9,
1096 IB_QPT_RESERVED10,
1097};
1098
1099enum ib_qp_create_flags {
1100 IB_QP_CREATE_IPOIB_UD_LSO = 1 << 0,
1101 IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK = 1 << 1,
1102 IB_QP_CREATE_CROSS_CHANNEL = 1 << 2,
1103 IB_QP_CREATE_MANAGED_SEND = 1 << 3,
1104 IB_QP_CREATE_MANAGED_RECV = 1 << 4,
1105 IB_QP_CREATE_NETIF_QP = 1 << 5,
1106 IB_QP_CREATE_SIGNATURE_EN = 1 << 6,
1107
1108 IB_QP_CREATE_SCATTER_FCS = 1 << 8,
1109 IB_QP_CREATE_CVLAN_STRIPPING = 1 << 9,
1110 IB_QP_CREATE_SOURCE_QPN = 1 << 10,
1111 IB_QP_CREATE_PCI_WRITE_END_PADDING = 1 << 11,
1112
1113 IB_QP_CREATE_RESERVED_START = 1 << 26,
1114 IB_QP_CREATE_RESERVED_END = 1 << 31,
1115};
1116
1117
1118
1119
1120
1121
1122struct ib_qp_init_attr {
1123 void (*event_handler)(struct ib_event *, void *);
1124 void *qp_context;
1125 struct ib_cq *send_cq;
1126 struct ib_cq *recv_cq;
1127 struct ib_srq *srq;
1128 struct ib_xrcd *xrcd;
1129 struct ib_qp_cap cap;
1130 enum ib_sig_type sq_sig_type;
1131 enum ib_qp_type qp_type;
1132 enum ib_qp_create_flags create_flags;
1133
1134
1135
1136
1137 u8 port_num;
1138 struct ib_rwq_ind_table *rwq_ind_tbl;
1139 u32 source_qpn;
1140};
1141
1142struct ib_qp_open_attr {
1143 void (*event_handler)(struct ib_event *, void *);
1144 void *qp_context;
1145 u32 qp_num;
1146 enum ib_qp_type qp_type;
1147};
1148
1149enum ib_rnr_timeout {
1150 IB_RNR_TIMER_655_36 = 0,
1151 IB_RNR_TIMER_000_01 = 1,
1152 IB_RNR_TIMER_000_02 = 2,
1153 IB_RNR_TIMER_000_03 = 3,
1154 IB_RNR_TIMER_000_04 = 4,
1155 IB_RNR_TIMER_000_06 = 5,
1156 IB_RNR_TIMER_000_08 = 6,
1157 IB_RNR_TIMER_000_12 = 7,
1158 IB_RNR_TIMER_000_16 = 8,
1159 IB_RNR_TIMER_000_24 = 9,
1160 IB_RNR_TIMER_000_32 = 10,
1161 IB_RNR_TIMER_000_48 = 11,
1162 IB_RNR_TIMER_000_64 = 12,
1163 IB_RNR_TIMER_000_96 = 13,
1164 IB_RNR_TIMER_001_28 = 14,
1165 IB_RNR_TIMER_001_92 = 15,
1166 IB_RNR_TIMER_002_56 = 16,
1167 IB_RNR_TIMER_003_84 = 17,
1168 IB_RNR_TIMER_005_12 = 18,
1169 IB_RNR_TIMER_007_68 = 19,
1170 IB_RNR_TIMER_010_24 = 20,
1171 IB_RNR_TIMER_015_36 = 21,
1172 IB_RNR_TIMER_020_48 = 22,
1173 IB_RNR_TIMER_030_72 = 23,
1174 IB_RNR_TIMER_040_96 = 24,
1175 IB_RNR_TIMER_061_44 = 25,
1176 IB_RNR_TIMER_081_92 = 26,
1177 IB_RNR_TIMER_122_88 = 27,
1178 IB_RNR_TIMER_163_84 = 28,
1179 IB_RNR_TIMER_245_76 = 29,
1180 IB_RNR_TIMER_327_68 = 30,
1181 IB_RNR_TIMER_491_52 = 31
1182};
1183
1184enum ib_qp_attr_mask {
1185 IB_QP_STATE = 1,
1186 IB_QP_CUR_STATE = (1<<1),
1187 IB_QP_EN_SQD_ASYNC_NOTIFY = (1<<2),
1188 IB_QP_ACCESS_FLAGS = (1<<3),
1189 IB_QP_PKEY_INDEX = (1<<4),
1190 IB_QP_PORT = (1<<5),
1191 IB_QP_QKEY = (1<<6),
1192 IB_QP_AV = (1<<7),
1193 IB_QP_PATH_MTU = (1<<8),
1194 IB_QP_TIMEOUT = (1<<9),
1195 IB_QP_RETRY_CNT = (1<<10),
1196 IB_QP_RNR_RETRY = (1<<11),
1197 IB_QP_RQ_PSN = (1<<12),
1198 IB_QP_MAX_QP_RD_ATOMIC = (1<<13),
1199 IB_QP_ALT_PATH = (1<<14),
1200 IB_QP_MIN_RNR_TIMER = (1<<15),
1201 IB_QP_SQ_PSN = (1<<16),
1202 IB_QP_MAX_DEST_RD_ATOMIC = (1<<17),
1203 IB_QP_PATH_MIG_STATE = (1<<18),
1204 IB_QP_CAP = (1<<19),
1205 IB_QP_DEST_QPN = (1<<20),
1206 IB_QP_RESERVED1 = (1<<21),
1207 IB_QP_RESERVED2 = (1<<22),
1208 IB_QP_RESERVED3 = (1<<23),
1209 IB_QP_RESERVED4 = (1<<24),
1210 IB_QP_RATE_LIMIT = (1<<25),
1211};
1212
1213enum ib_qp_state {
1214 IB_QPS_RESET,
1215 IB_QPS_INIT,
1216 IB_QPS_RTR,
1217 IB_QPS_RTS,
1218 IB_QPS_SQD,
1219 IB_QPS_SQE,
1220 IB_QPS_ERR
1221};
1222
1223enum ib_mig_state {
1224 IB_MIG_MIGRATED,
1225 IB_MIG_REARM,
1226 IB_MIG_ARMED
1227};
1228
1229enum ib_mw_type {
1230 IB_MW_TYPE_1 = 1,
1231 IB_MW_TYPE_2 = 2
1232};
1233
1234struct ib_qp_attr {
1235 enum ib_qp_state qp_state;
1236 enum ib_qp_state cur_qp_state;
1237 enum ib_mtu path_mtu;
1238 enum ib_mig_state path_mig_state;
1239 u32 qkey;
1240 u32 rq_psn;
1241 u32 sq_psn;
1242 u32 dest_qp_num;
1243 int qp_access_flags;
1244 struct ib_qp_cap cap;
1245 struct rdma_ah_attr ah_attr;
1246 struct rdma_ah_attr alt_ah_attr;
1247 u16 pkey_index;
1248 u16 alt_pkey_index;
1249 u8 en_sqd_async_notify;
1250 u8 sq_draining;
1251 u8 max_rd_atomic;
1252 u8 max_dest_rd_atomic;
1253 u8 min_rnr_timer;
1254 u8 port_num;
1255 u8 timeout;
1256 u8 retry_cnt;
1257 u8 rnr_retry;
1258 u8 alt_port_num;
1259 u8 alt_timeout;
1260 u32 rate_limit;
1261};
1262
1263enum ib_wr_opcode {
1264 IB_WR_RDMA_WRITE,
1265 IB_WR_RDMA_WRITE_WITH_IMM,
1266 IB_WR_SEND,
1267 IB_WR_SEND_WITH_IMM,
1268 IB_WR_RDMA_READ,
1269 IB_WR_ATOMIC_CMP_AND_SWP,
1270 IB_WR_ATOMIC_FETCH_AND_ADD,
1271 IB_WR_LSO,
1272 IB_WR_SEND_WITH_INV,
1273 IB_WR_RDMA_READ_WITH_INV,
1274 IB_WR_LOCAL_INV,
1275 IB_WR_REG_MR,
1276 IB_WR_MASKED_ATOMIC_CMP_AND_SWP,
1277 IB_WR_MASKED_ATOMIC_FETCH_AND_ADD,
1278 IB_WR_REG_SIG_MR,
1279
1280
1281
1282 IB_WR_RESERVED1 = 0xf0,
1283 IB_WR_RESERVED2,
1284 IB_WR_RESERVED3,
1285 IB_WR_RESERVED4,
1286 IB_WR_RESERVED5,
1287 IB_WR_RESERVED6,
1288 IB_WR_RESERVED7,
1289 IB_WR_RESERVED8,
1290 IB_WR_RESERVED9,
1291 IB_WR_RESERVED10,
1292};
1293
1294enum ib_send_flags {
1295 IB_SEND_FENCE = 1,
1296 IB_SEND_SIGNALED = (1<<1),
1297 IB_SEND_SOLICITED = (1<<2),
1298 IB_SEND_INLINE = (1<<3),
1299 IB_SEND_IP_CSUM = (1<<4),
1300
1301
1302 IB_SEND_RESERVED_START = (1 << 26),
1303 IB_SEND_RESERVED_END = (1 << 31),
1304};
1305
1306struct ib_sge {
1307 u64 addr;
1308 u32 length;
1309 u32 lkey;
1310};
1311
1312struct ib_cqe {
1313 void (*done)(struct ib_cq *cq, struct ib_wc *wc);
1314};
1315
1316struct ib_send_wr {
1317 struct ib_send_wr *next;
1318 union {
1319 u64 wr_id;
1320 struct ib_cqe *wr_cqe;
1321 };
1322 struct ib_sge *sg_list;
1323 int num_sge;
1324 enum ib_wr_opcode opcode;
1325 int send_flags;
1326 union {
1327 __be32 imm_data;
1328 u32 invalidate_rkey;
1329 } ex;
1330};
1331
1332struct ib_rdma_wr {
1333 struct ib_send_wr wr;
1334 u64 remote_addr;
1335 u32 rkey;
1336};
1337
1338static inline struct ib_rdma_wr *rdma_wr(struct ib_send_wr *wr)
1339{
1340 return container_of(wr, struct ib_rdma_wr, wr);
1341}
1342
1343struct ib_atomic_wr {
1344 struct ib_send_wr wr;
1345 u64 remote_addr;
1346 u64 compare_add;
1347 u64 swap;
1348 u64 compare_add_mask;
1349 u64 swap_mask;
1350 u32 rkey;
1351};
1352
1353static inline struct ib_atomic_wr *atomic_wr(struct ib_send_wr *wr)
1354{
1355 return container_of(wr, struct ib_atomic_wr, wr);
1356}
1357
1358struct ib_ud_wr {
1359 struct ib_send_wr wr;
1360 struct ib_ah *ah;
1361 void *header;
1362 int hlen;
1363 int mss;
1364 u32 remote_qpn;
1365 u32 remote_qkey;
1366 u16 pkey_index;
1367 u8 port_num;
1368};
1369
1370static inline struct ib_ud_wr *ud_wr(struct ib_send_wr *wr)
1371{
1372 return container_of(wr, struct ib_ud_wr, wr);
1373}
1374
1375struct ib_reg_wr {
1376 struct ib_send_wr wr;
1377 struct ib_mr *mr;
1378 u32 key;
1379 int access;
1380};
1381
1382static inline struct ib_reg_wr *reg_wr(struct ib_send_wr *wr)
1383{
1384 return container_of(wr, struct ib_reg_wr, wr);
1385}
1386
1387struct ib_sig_handover_wr {
1388 struct ib_send_wr wr;
1389 struct ib_sig_attrs *sig_attrs;
1390 struct ib_mr *sig_mr;
1391 int access_flags;
1392 struct ib_sge *prot;
1393};
1394
1395static inline struct ib_sig_handover_wr *sig_handover_wr(struct ib_send_wr *wr)
1396{
1397 return container_of(wr, struct ib_sig_handover_wr, wr);
1398}
1399
1400struct ib_recv_wr {
1401 struct ib_recv_wr *next;
1402 union {
1403 u64 wr_id;
1404 struct ib_cqe *wr_cqe;
1405 };
1406 struct ib_sge *sg_list;
1407 int num_sge;
1408};
1409
1410enum ib_access_flags {
1411 IB_ACCESS_LOCAL_WRITE = 1,
1412 IB_ACCESS_REMOTE_WRITE = (1<<1),
1413 IB_ACCESS_REMOTE_READ = (1<<2),
1414 IB_ACCESS_REMOTE_ATOMIC = (1<<3),
1415 IB_ACCESS_MW_BIND = (1<<4),
1416 IB_ZERO_BASED = (1<<5),
1417 IB_ACCESS_ON_DEMAND = (1<<6),
1418 IB_ACCESS_HUGETLB = (1<<7),
1419};
1420
1421
1422
1423
1424
1425enum ib_mr_rereg_flags {
1426 IB_MR_REREG_TRANS = 1,
1427 IB_MR_REREG_PD = (1<<1),
1428 IB_MR_REREG_ACCESS = (1<<2),
1429 IB_MR_REREG_SUPPORTED = ((IB_MR_REREG_ACCESS << 1) - 1)
1430};
1431
1432struct ib_fmr_attr {
1433 int max_pages;
1434 int max_maps;
1435 u8 page_shift;
1436};
1437
1438struct ib_umem;
1439
1440enum rdma_remove_reason {
1441
1442 RDMA_REMOVE_DESTROY,
1443
1444 RDMA_REMOVE_CLOSE,
1445
1446 RDMA_REMOVE_DRIVER_REMOVE,
1447
1448 RDMA_REMOVE_DURING_CLEANUP,
1449};
1450
1451struct ib_rdmacg_object {
1452#ifdef CONFIG_CGROUP_RDMA
1453 struct rdma_cgroup *cg;
1454#endif
1455};
1456
1457struct ib_ucontext {
1458 struct ib_device *device;
1459 struct ib_uverbs_file *ufile;
1460 int closing;
1461
1462
1463 struct mutex uobjects_lock;
1464 struct list_head uobjects;
1465
1466 struct rw_semaphore cleanup_rwsem;
1467 enum rdma_remove_reason cleanup_reason;
1468
1469 struct pid *tgid;
1470#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1471 struct rb_root_cached umem_tree;
1472
1473
1474
1475
1476 struct rw_semaphore umem_rwsem;
1477 void (*invalidate_range)(struct ib_umem *umem,
1478 unsigned long start, unsigned long end);
1479
1480 struct mmu_notifier mn;
1481 atomic_t notifier_count;
1482
1483 struct list_head no_private_counters;
1484 int odp_mrs_count;
1485#endif
1486
1487 struct ib_rdmacg_object cg_obj;
1488};
1489
1490struct ib_uobject {
1491 u64 user_handle;
1492 struct ib_ucontext *context;
1493 void *object;
1494 struct list_head list;
1495 struct ib_rdmacg_object cg_obj;
1496 int id;
1497 struct kref ref;
1498 atomic_t usecnt;
1499 struct rcu_head rcu;
1500
1501 const struct uverbs_obj_type *type;
1502};
1503
1504struct ib_uobject_file {
1505 struct ib_uobject uobj;
1506
1507 struct ib_uverbs_file *ufile;
1508};
1509
1510struct ib_udata {
1511 const void __user *inbuf;
1512 void __user *outbuf;
1513 size_t inlen;
1514 size_t outlen;
1515};
1516
1517struct ib_pd {
1518 u32 local_dma_lkey;
1519 u32 flags;
1520 struct ib_device *device;
1521 struct ib_uobject *uobject;
1522 atomic_t usecnt;
1523
1524 u32 unsafe_global_rkey;
1525
1526
1527
1528
1529 struct ib_mr *__internal_mr;
1530 struct rdma_restrack_entry res;
1531};
1532
1533struct ib_xrcd {
1534 struct ib_device *device;
1535 atomic_t usecnt;
1536 struct inode *inode;
1537
1538 struct mutex tgt_qp_mutex;
1539 struct list_head tgt_qp_list;
1540};
1541
1542struct ib_ah {
1543 struct ib_device *device;
1544 struct ib_pd *pd;
1545 struct ib_uobject *uobject;
1546 enum rdma_ah_attr_type type;
1547};
1548
1549typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context);
1550
1551enum ib_poll_context {
1552 IB_POLL_DIRECT,
1553 IB_POLL_SOFTIRQ,
1554 IB_POLL_WORKQUEUE,
1555};
1556
1557struct ib_cq {
1558 struct ib_device *device;
1559 struct ib_uobject *uobject;
1560 ib_comp_handler comp_handler;
1561 void (*event_handler)(struct ib_event *, void *);
1562 void *cq_context;
1563 int cqe;
1564 atomic_t usecnt;
1565 enum ib_poll_context poll_ctx;
1566 struct ib_wc *wc;
1567 union {
1568 struct irq_poll iop;
1569 struct work_struct work;
1570 };
1571
1572
1573
1574 struct rdma_restrack_entry res;
1575};
1576
1577struct ib_srq {
1578 struct ib_device *device;
1579 struct ib_pd *pd;
1580 struct ib_uobject *uobject;
1581 void (*event_handler)(struct ib_event *, void *);
1582 void *srq_context;
1583 enum ib_srq_type srq_type;
1584 atomic_t usecnt;
1585
1586 struct {
1587 struct ib_cq *cq;
1588 union {
1589 struct {
1590 struct ib_xrcd *xrcd;
1591 u32 srq_num;
1592 } xrc;
1593 };
1594 } ext;
1595};
1596
1597enum ib_raw_packet_caps {
1598
1599
1600
1601 IB_RAW_PACKET_CAP_CVLAN_STRIPPING = (1 << 0),
1602
1603
1604 IB_RAW_PACKET_CAP_SCATTER_FCS = (1 << 1),
1605
1606 IB_RAW_PACKET_CAP_IP_CSUM = (1 << 2),
1607
1608
1609
1610 IB_RAW_PACKET_CAP_DELAY_DROP = (1 << 3),
1611};
1612
1613enum ib_wq_type {
1614 IB_WQT_RQ
1615};
1616
1617enum ib_wq_state {
1618 IB_WQS_RESET,
1619 IB_WQS_RDY,
1620 IB_WQS_ERR
1621};
1622
1623struct ib_wq {
1624 struct ib_device *device;
1625 struct ib_uobject *uobject;
1626 void *wq_context;
1627 void (*event_handler)(struct ib_event *, void *);
1628 struct ib_pd *pd;
1629 struct ib_cq *cq;
1630 u32 wq_num;
1631 enum ib_wq_state state;
1632 enum ib_wq_type wq_type;
1633 atomic_t usecnt;
1634};
1635
1636enum ib_wq_flags {
1637 IB_WQ_FLAGS_CVLAN_STRIPPING = 1 << 0,
1638 IB_WQ_FLAGS_SCATTER_FCS = 1 << 1,
1639 IB_WQ_FLAGS_DELAY_DROP = 1 << 2,
1640 IB_WQ_FLAGS_PCI_WRITE_END_PADDING = 1 << 3,
1641};
1642
1643struct ib_wq_init_attr {
1644 void *wq_context;
1645 enum ib_wq_type wq_type;
1646 u32 max_wr;
1647 u32 max_sge;
1648 struct ib_cq *cq;
1649 void (*event_handler)(struct ib_event *, void *);
1650 u32 create_flags;
1651};
1652
1653enum ib_wq_attr_mask {
1654 IB_WQ_STATE = 1 << 0,
1655 IB_WQ_CUR_STATE = 1 << 1,
1656 IB_WQ_FLAGS = 1 << 2,
1657};
1658
1659struct ib_wq_attr {
1660 enum ib_wq_state wq_state;
1661 enum ib_wq_state curr_wq_state;
1662 u32 flags;
1663 u32 flags_mask;
1664};
1665
1666struct ib_rwq_ind_table {
1667 struct ib_device *device;
1668 struct ib_uobject *uobject;
1669 atomic_t usecnt;
1670 u32 ind_tbl_num;
1671 u32 log_ind_tbl_size;
1672 struct ib_wq **ind_tbl;
1673};
1674
1675struct ib_rwq_ind_table_init_attr {
1676 u32 log_ind_tbl_size;
1677
1678 struct ib_wq **ind_tbl;
1679};
1680
1681enum port_pkey_state {
1682 IB_PORT_PKEY_NOT_VALID = 0,
1683 IB_PORT_PKEY_VALID = 1,
1684 IB_PORT_PKEY_LISTED = 2,
1685};
1686
1687struct ib_qp_security;
1688
1689struct ib_port_pkey {
1690 enum port_pkey_state state;
1691 u16 pkey_index;
1692 u8 port_num;
1693 struct list_head qp_list;
1694 struct list_head to_error_list;
1695 struct ib_qp_security *sec;
1696};
1697
1698struct ib_ports_pkeys {
1699 struct ib_port_pkey main;
1700 struct ib_port_pkey alt;
1701};
1702
1703struct ib_qp_security {
1704 struct ib_qp *qp;
1705 struct ib_device *dev;
1706
1707 struct mutex mutex;
1708 struct ib_ports_pkeys *ports_pkeys;
1709
1710
1711
1712 struct list_head shared_qp_list;
1713 void *security;
1714 bool destroying;
1715 atomic_t error_list_count;
1716 struct completion error_complete;
1717 int error_comps_pending;
1718};
1719
1720
1721
1722
1723
1724struct ib_qp {
1725 struct ib_device *device;
1726 struct ib_pd *pd;
1727 struct ib_cq *send_cq;
1728 struct ib_cq *recv_cq;
1729 spinlock_t mr_lock;
1730 int mrs_used;
1731 struct list_head rdma_mrs;
1732 struct list_head sig_mrs;
1733 struct ib_srq *srq;
1734 struct ib_xrcd *xrcd;
1735 struct list_head xrcd_list;
1736
1737
1738 atomic_t usecnt;
1739 struct list_head open_list;
1740 struct ib_qp *real_qp;
1741 struct ib_uobject *uobject;
1742 void (*event_handler)(struct ib_event *, void *);
1743 void *qp_context;
1744 u32 qp_num;
1745 u32 max_write_sge;
1746 u32 max_read_sge;
1747 enum ib_qp_type qp_type;
1748 struct ib_rwq_ind_table *rwq_ind_tbl;
1749 struct ib_qp_security *qp_sec;
1750 u8 port;
1751
1752
1753
1754
1755 struct rdma_restrack_entry res;
1756};
1757
1758struct ib_mr {
1759 struct ib_device *device;
1760 struct ib_pd *pd;
1761 u32 lkey;
1762 u32 rkey;
1763 u64 iova;
1764 u64 length;
1765 unsigned int page_size;
1766 bool need_inval;
1767 union {
1768 struct ib_uobject *uobject;
1769 struct list_head qp_entry;
1770 };
1771};
1772
1773struct ib_mw {
1774 struct ib_device *device;
1775 struct ib_pd *pd;
1776 struct ib_uobject *uobject;
1777 u32 rkey;
1778 enum ib_mw_type type;
1779};
1780
1781struct ib_fmr {
1782 struct ib_device *device;
1783 struct ib_pd *pd;
1784 struct list_head list;
1785 u32 lkey;
1786 u32 rkey;
1787};
1788
1789
1790enum ib_flow_attr_type {
1791
1792 IB_FLOW_ATTR_NORMAL = 0x0,
1793
1794
1795
1796 IB_FLOW_ATTR_ALL_DEFAULT = 0x1,
1797
1798
1799
1800 IB_FLOW_ATTR_MC_DEFAULT = 0x2,
1801
1802 IB_FLOW_ATTR_SNIFFER = 0x3
1803};
1804
1805
1806enum ib_flow_spec_type {
1807
1808 IB_FLOW_SPEC_ETH = 0x20,
1809 IB_FLOW_SPEC_IB = 0x22,
1810
1811 IB_FLOW_SPEC_IPV4 = 0x30,
1812 IB_FLOW_SPEC_IPV6 = 0x31,
1813
1814 IB_FLOW_SPEC_TCP = 0x40,
1815 IB_FLOW_SPEC_UDP = 0x41,
1816 IB_FLOW_SPEC_VXLAN_TUNNEL = 0x50,
1817 IB_FLOW_SPEC_INNER = 0x100,
1818
1819 IB_FLOW_SPEC_ACTION_TAG = 0x1000,
1820 IB_FLOW_SPEC_ACTION_DROP = 0x1001,
1821};
1822#define IB_FLOW_SPEC_LAYER_MASK 0xF0
1823#define IB_FLOW_SPEC_SUPPORT_LAYERS 8
1824
1825
1826
1827
1828enum ib_flow_domain {
1829 IB_FLOW_DOMAIN_USER,
1830 IB_FLOW_DOMAIN_ETHTOOL,
1831 IB_FLOW_DOMAIN_RFS,
1832 IB_FLOW_DOMAIN_NIC,
1833 IB_FLOW_DOMAIN_NUM
1834};
1835
1836enum ib_flow_flags {
1837 IB_FLOW_ATTR_FLAGS_DONT_TRAP = 1UL << 1,
1838 IB_FLOW_ATTR_FLAGS_RESERVED = 1UL << 2
1839};
1840
1841struct ib_flow_eth_filter {
1842 u8 dst_mac[6];
1843 u8 src_mac[6];
1844 __be16 ether_type;
1845 __be16 vlan_tag;
1846
1847 u8 real_sz[0];
1848};
1849
1850struct ib_flow_spec_eth {
1851 u32 type;
1852 u16 size;
1853 struct ib_flow_eth_filter val;
1854 struct ib_flow_eth_filter mask;
1855};
1856
1857struct ib_flow_ib_filter {
1858 __be16 dlid;
1859 __u8 sl;
1860
1861 u8 real_sz[0];
1862};
1863
1864struct ib_flow_spec_ib {
1865 u32 type;
1866 u16 size;
1867 struct ib_flow_ib_filter val;
1868 struct ib_flow_ib_filter mask;
1869};
1870
1871
1872enum ib_ipv4_flags {
1873 IB_IPV4_DONT_FRAG = 0x2,
1874 IB_IPV4_MORE_FRAG = 0X4
1875
1876};
1877
1878struct ib_flow_ipv4_filter {
1879 __be32 src_ip;
1880 __be32 dst_ip;
1881 u8 proto;
1882 u8 tos;
1883 u8 ttl;
1884 u8 flags;
1885
1886 u8 real_sz[0];
1887};
1888
1889struct ib_flow_spec_ipv4 {
1890 u32 type;
1891 u16 size;
1892 struct ib_flow_ipv4_filter val;
1893 struct ib_flow_ipv4_filter mask;
1894};
1895
1896struct ib_flow_ipv6_filter {
1897 u8 src_ip[16];
1898 u8 dst_ip[16];
1899 __be32 flow_label;
1900 u8 next_hdr;
1901 u8 traffic_class;
1902 u8 hop_limit;
1903
1904 u8 real_sz[0];
1905};
1906
1907struct ib_flow_spec_ipv6 {
1908 u32 type;
1909 u16 size;
1910 struct ib_flow_ipv6_filter val;
1911 struct ib_flow_ipv6_filter mask;
1912};
1913
1914struct ib_flow_tcp_udp_filter {
1915 __be16 dst_port;
1916 __be16 src_port;
1917
1918 u8 real_sz[0];
1919};
1920
1921struct ib_flow_spec_tcp_udp {
1922 u32 type;
1923 u16 size;
1924 struct ib_flow_tcp_udp_filter val;
1925 struct ib_flow_tcp_udp_filter mask;
1926};
1927
1928struct ib_flow_tunnel_filter {
1929 __be32 tunnel_id;
1930 u8 real_sz[0];
1931};
1932
1933
1934
1935
1936struct ib_flow_spec_tunnel {
1937 u32 type;
1938 u16 size;
1939 struct ib_flow_tunnel_filter val;
1940 struct ib_flow_tunnel_filter mask;
1941};
1942
1943struct ib_flow_spec_action_tag {
1944 enum ib_flow_spec_type type;
1945 u16 size;
1946 u32 tag_id;
1947};
1948
1949struct ib_flow_spec_action_drop {
1950 enum ib_flow_spec_type type;
1951 u16 size;
1952};
1953
1954union ib_flow_spec {
1955 struct {
1956 u32 type;
1957 u16 size;
1958 };
1959 struct ib_flow_spec_eth eth;
1960 struct ib_flow_spec_ib ib;
1961 struct ib_flow_spec_ipv4 ipv4;
1962 struct ib_flow_spec_tcp_udp tcp_udp;
1963 struct ib_flow_spec_ipv6 ipv6;
1964 struct ib_flow_spec_tunnel tunnel;
1965 struct ib_flow_spec_action_tag flow_tag;
1966 struct ib_flow_spec_action_drop drop;
1967};
1968
1969struct ib_flow_attr {
1970 enum ib_flow_attr_type type;
1971 u16 size;
1972 u16 priority;
1973 u32 flags;
1974 u8 num_of_specs;
1975 u8 port;
1976
1977
1978
1979
1980};
1981
1982struct ib_flow {
1983 struct ib_qp *qp;
1984 struct ib_uobject *uobject;
1985};
1986
1987struct ib_mad_hdr;
1988struct ib_grh;
1989
1990enum ib_process_mad_flags {
1991 IB_MAD_IGNORE_MKEY = 1,
1992 IB_MAD_IGNORE_BKEY = 2,
1993 IB_MAD_IGNORE_ALL = IB_MAD_IGNORE_MKEY | IB_MAD_IGNORE_BKEY
1994};
1995
1996enum ib_mad_result {
1997 IB_MAD_RESULT_FAILURE = 0,
1998 IB_MAD_RESULT_SUCCESS = 1 << 0,
1999 IB_MAD_RESULT_REPLY = 1 << 1,
2000 IB_MAD_RESULT_CONSUMED = 1 << 2
2001};
2002
2003struct ib_port_cache {
2004 u64 subnet_prefix;
2005 struct ib_pkey_cache *pkey;
2006 struct ib_gid_table *gid;
2007 u8 lmc;
2008 enum ib_port_state port_state;
2009};
2010
2011struct ib_cache {
2012 rwlock_t lock;
2013 struct ib_event_handler event_handler;
2014 struct ib_port_cache *ports;
2015};
2016
2017struct iw_cm_verbs;
2018
2019struct ib_port_immutable {
2020 int pkey_tbl_len;
2021 int gid_tbl_len;
2022 u32 core_cap_flags;
2023 u32 max_mad_size;
2024};
2025
2026
2027enum rdma_netdev_t {
2028 RDMA_NETDEV_OPA_VNIC,
2029 RDMA_NETDEV_IPOIB,
2030};
2031
2032
2033
2034
2035
2036struct rdma_netdev {
2037 void *clnt_priv;
2038 struct ib_device *hca;
2039 u8 port_num;
2040
2041
2042 void (*free_rdma_netdev)(struct net_device *netdev);
2043
2044
2045 void (*set_id)(struct net_device *netdev, int id);
2046
2047 int (*send)(struct net_device *dev, struct sk_buff *skb,
2048 struct ib_ah *address, u32 dqpn);
2049
2050 int (*attach_mcast)(struct net_device *dev, struct ib_device *hca,
2051 union ib_gid *gid, u16 mlid,
2052 int set_qkey, u32 qkey);
2053 int (*detach_mcast)(struct net_device *dev, struct ib_device *hca,
2054 union ib_gid *gid, u16 mlid);
2055};
2056
2057struct ib_port_pkey_list {
2058
2059 spinlock_t list_lock;
2060 struct list_head pkey_list;
2061};
2062
2063struct ib_device {
2064
2065 struct device *dma_device;
2066
2067 char name[IB_DEVICE_NAME_MAX];
2068
2069 struct list_head event_handler_list;
2070 spinlock_t event_handler_lock;
2071
2072 spinlock_t client_data_lock;
2073 struct list_head core_list;
2074
2075
2076 struct list_head client_data_list;
2077
2078 struct ib_cache cache;
2079
2080
2081
2082 struct ib_port_immutable *port_immutable;
2083
2084 int num_comp_vectors;
2085
2086 struct ib_port_pkey_list *port_pkey_list;
2087
2088 struct iw_cm_verbs *iwcm;
2089
2090
2091
2092
2093
2094
2095
2096 struct rdma_hw_stats *(*alloc_hw_stats)(struct ib_device *device,
2097 u8 port_num);
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110 int (*get_hw_stats)(struct ib_device *device,
2111 struct rdma_hw_stats *stats,
2112 u8 port, int index);
2113 int (*query_device)(struct ib_device *device,
2114 struct ib_device_attr *device_attr,
2115 struct ib_udata *udata);
2116 int (*query_port)(struct ib_device *device,
2117 u8 port_num,
2118 struct ib_port_attr *port_attr);
2119 enum rdma_link_layer (*get_link_layer)(struct ib_device *device,
2120 u8 port_num);
2121
2122
2123
2124
2125
2126
2127
2128 struct net_device *(*get_netdev)(struct ib_device *device,
2129 u8 port_num);
2130 int (*query_gid)(struct ib_device *device,
2131 u8 port_num, int index,
2132 union ib_gid *gid);
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146 int (*add_gid)(struct ib_device *device,
2147 u8 port_num,
2148 unsigned int index,
2149 const union ib_gid *gid,
2150 const struct ib_gid_attr *attr,
2151 void **context);
2152
2153
2154
2155
2156
2157
2158 int (*del_gid)(struct ib_device *device,
2159 u8 port_num,
2160 unsigned int index,
2161 void **context);
2162 int (*query_pkey)(struct ib_device *device,
2163 u8 port_num, u16 index, u16 *pkey);
2164 int (*modify_device)(struct ib_device *device,
2165 int device_modify_mask,
2166 struct ib_device_modify *device_modify);
2167 int (*modify_port)(struct ib_device *device,
2168 u8 port_num, int port_modify_mask,
2169 struct ib_port_modify *port_modify);
2170 struct ib_ucontext * (*alloc_ucontext)(struct ib_device *device,
2171 struct ib_udata *udata);
2172 int (*dealloc_ucontext)(struct ib_ucontext *context);
2173 int (*mmap)(struct ib_ucontext *context,
2174 struct vm_area_struct *vma);
2175 struct ib_pd * (*alloc_pd)(struct ib_device *device,
2176 struct ib_ucontext *context,
2177 struct ib_udata *udata);
2178 int (*dealloc_pd)(struct ib_pd *pd);
2179 struct ib_ah * (*create_ah)(struct ib_pd *pd,
2180 struct rdma_ah_attr *ah_attr,
2181 struct ib_udata *udata);
2182 int (*modify_ah)(struct ib_ah *ah,
2183 struct rdma_ah_attr *ah_attr);
2184 int (*query_ah)(struct ib_ah *ah,
2185 struct rdma_ah_attr *ah_attr);
2186 int (*destroy_ah)(struct ib_ah *ah);
2187 struct ib_srq * (*create_srq)(struct ib_pd *pd,
2188 struct ib_srq_init_attr *srq_init_attr,
2189 struct ib_udata *udata);
2190 int (*modify_srq)(struct ib_srq *srq,
2191 struct ib_srq_attr *srq_attr,
2192 enum ib_srq_attr_mask srq_attr_mask,
2193 struct ib_udata *udata);
2194 int (*query_srq)(struct ib_srq *srq,
2195 struct ib_srq_attr *srq_attr);
2196 int (*destroy_srq)(struct ib_srq *srq);
2197 int (*post_srq_recv)(struct ib_srq *srq,
2198 struct ib_recv_wr *recv_wr,
2199 struct ib_recv_wr **bad_recv_wr);
2200 struct ib_qp * (*create_qp)(struct ib_pd *pd,
2201 struct ib_qp_init_attr *qp_init_attr,
2202 struct ib_udata *udata);
2203 int (*modify_qp)(struct ib_qp *qp,
2204 struct ib_qp_attr *qp_attr,
2205 int qp_attr_mask,
2206 struct ib_udata *udata);
2207 int (*query_qp)(struct ib_qp *qp,
2208 struct ib_qp_attr *qp_attr,
2209 int qp_attr_mask,
2210 struct ib_qp_init_attr *qp_init_attr);
2211 int (*destroy_qp)(struct ib_qp *qp);
2212 int (*post_send)(struct ib_qp *qp,
2213 struct ib_send_wr *send_wr,
2214 struct ib_send_wr **bad_send_wr);
2215 int (*post_recv)(struct ib_qp *qp,
2216 struct ib_recv_wr *recv_wr,
2217 struct ib_recv_wr **bad_recv_wr);
2218 struct ib_cq * (*create_cq)(struct ib_device *device,
2219 const struct ib_cq_init_attr *attr,
2220 struct ib_ucontext *context,
2221 struct ib_udata *udata);
2222 int (*modify_cq)(struct ib_cq *cq, u16 cq_count,
2223 u16 cq_period);
2224 int (*destroy_cq)(struct ib_cq *cq);
2225 int (*resize_cq)(struct ib_cq *cq, int cqe,
2226 struct ib_udata *udata);
2227 int (*poll_cq)(struct ib_cq *cq, int num_entries,
2228 struct ib_wc *wc);
2229 int (*peek_cq)(struct ib_cq *cq, int wc_cnt);
2230 int (*req_notify_cq)(struct ib_cq *cq,
2231 enum ib_cq_notify_flags flags);
2232 int (*req_ncomp_notif)(struct ib_cq *cq,
2233 int wc_cnt);
2234 struct ib_mr * (*get_dma_mr)(struct ib_pd *pd,
2235 int mr_access_flags);
2236 struct ib_mr * (*reg_user_mr)(struct ib_pd *pd,
2237 u64 start, u64 length,
2238 u64 virt_addr,
2239 int mr_access_flags,
2240 struct ib_udata *udata);
2241 int (*rereg_user_mr)(struct ib_mr *mr,
2242 int flags,
2243 u64 start, u64 length,
2244 u64 virt_addr,
2245 int mr_access_flags,
2246 struct ib_pd *pd,
2247 struct ib_udata *udata);
2248 int (*dereg_mr)(struct ib_mr *mr);
2249 struct ib_mr * (*alloc_mr)(struct ib_pd *pd,
2250 enum ib_mr_type mr_type,
2251 u32 max_num_sg);
2252 int (*map_mr_sg)(struct ib_mr *mr,
2253 struct scatterlist *sg,
2254 int sg_nents,
2255 unsigned int *sg_offset);
2256 struct ib_mw * (*alloc_mw)(struct ib_pd *pd,
2257 enum ib_mw_type type,
2258 struct ib_udata *udata);
2259 int (*dealloc_mw)(struct ib_mw *mw);
2260 struct ib_fmr * (*alloc_fmr)(struct ib_pd *pd,
2261 int mr_access_flags,
2262 struct ib_fmr_attr *fmr_attr);
2263 int (*map_phys_fmr)(struct ib_fmr *fmr,
2264 u64 *page_list, int list_len,
2265 u64 iova);
2266 int (*unmap_fmr)(struct list_head *fmr_list);
2267 int (*dealloc_fmr)(struct ib_fmr *fmr);
2268 int (*attach_mcast)(struct ib_qp *qp,
2269 union ib_gid *gid,
2270 u16 lid);
2271 int (*detach_mcast)(struct ib_qp *qp,
2272 union ib_gid *gid,
2273 u16 lid);
2274 int (*process_mad)(struct ib_device *device,
2275 int process_mad_flags,
2276 u8 port_num,
2277 const struct ib_wc *in_wc,
2278 const struct ib_grh *in_grh,
2279 const struct ib_mad_hdr *in_mad,
2280 size_t in_mad_size,
2281 struct ib_mad_hdr *out_mad,
2282 size_t *out_mad_size,
2283 u16 *out_mad_pkey_index);
2284 struct ib_xrcd * (*alloc_xrcd)(struct ib_device *device,
2285 struct ib_ucontext *ucontext,
2286 struct ib_udata *udata);
2287 int (*dealloc_xrcd)(struct ib_xrcd *xrcd);
2288 struct ib_flow * (*create_flow)(struct ib_qp *qp,
2289 struct ib_flow_attr
2290 *flow_attr,
2291 int domain);
2292 int (*destroy_flow)(struct ib_flow *flow_id);
2293 int (*check_mr_status)(struct ib_mr *mr, u32 check_mask,
2294 struct ib_mr_status *mr_status);
2295 void (*disassociate_ucontext)(struct ib_ucontext *ibcontext);
2296 void (*drain_rq)(struct ib_qp *qp);
2297 void (*drain_sq)(struct ib_qp *qp);
2298 int (*set_vf_link_state)(struct ib_device *device, int vf, u8 port,
2299 int state);
2300 int (*get_vf_config)(struct ib_device *device, int vf, u8 port,
2301 struct ifla_vf_info *ivf);
2302 int (*get_vf_stats)(struct ib_device *device, int vf, u8 port,
2303 struct ifla_vf_stats *stats);
2304 int (*set_vf_guid)(struct ib_device *device, int vf, u8 port, u64 guid,
2305 int type);
2306 struct ib_wq * (*create_wq)(struct ib_pd *pd,
2307 struct ib_wq_init_attr *init_attr,
2308 struct ib_udata *udata);
2309 int (*destroy_wq)(struct ib_wq *wq);
2310 int (*modify_wq)(struct ib_wq *wq,
2311 struct ib_wq_attr *attr,
2312 u32 wq_attr_mask,
2313 struct ib_udata *udata);
2314 struct ib_rwq_ind_table * (*create_rwq_ind_table)(struct ib_device *device,
2315 struct ib_rwq_ind_table_init_attr *init_attr,
2316 struct ib_udata *udata);
2317 int (*destroy_rwq_ind_table)(struct ib_rwq_ind_table *wq_ind_table);
2318
2319
2320
2321
2322
2323
2324 struct net_device *(*alloc_rdma_netdev)(
2325 struct ib_device *device,
2326 u8 port_num,
2327 enum rdma_netdev_t type,
2328 const char *name,
2329 unsigned char name_assign_type,
2330 void (*setup)(struct net_device *));
2331
2332 struct module *owner;
2333 struct device dev;
2334 struct kobject *ports_parent;
2335 struct list_head port_list;
2336
2337 enum {
2338 IB_DEV_UNINITIALIZED,
2339 IB_DEV_REGISTERED,
2340 IB_DEV_UNREGISTERED
2341 } reg_state;
2342
2343 int uverbs_abi_ver;
2344 u64 uverbs_cmd_mask;
2345 u64 uverbs_ex_cmd_mask;
2346
2347 char node_desc[IB_DEVICE_NODE_DESC_MAX];
2348 __be64 node_guid;
2349 u32 local_dma_lkey;
2350 u16 is_switch:1;
2351 u8 node_type;
2352 u8 phys_port_cnt;
2353 struct ib_device_attr attrs;
2354 struct attribute_group *hw_stats_ag;
2355 struct rdma_hw_stats *hw_stats;
2356
2357#ifdef CONFIG_CGROUP_RDMA
2358 struct rdmacg_device cg_device;
2359#endif
2360
2361 u32 index;
2362
2363
2364
2365 struct rdma_restrack_root res;
2366
2367
2368
2369
2370
2371
2372
2373 int (*get_port_immutable)(struct ib_device *, u8, struct ib_port_immutable *);
2374 void (*get_dev_fw_str)(struct ib_device *, char *str);
2375 const struct cpumask *(*get_vector_affinity)(struct ib_device *ibdev,
2376 int comp_vector);
2377
2378 struct uverbs_root_spec *specs_root;
2379};
2380
2381struct ib_client {
2382 char *name;
2383 void (*add) (struct ib_device *);
2384 void (*remove)(struct ib_device *, void *client_data);
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394
2395
2396
2397
2398
2399
2400
2401 struct net_device *(*get_net_dev_by_params)(
2402 struct ib_device *dev,
2403 u8 port,
2404 u16 pkey,
2405 const union ib_gid *gid,
2406 const struct sockaddr *addr,
2407 void *client_data);
2408 struct list_head list;
2409};
2410
2411struct ib_device *ib_alloc_device(size_t size);
2412void ib_dealloc_device(struct ib_device *device);
2413
2414void ib_get_device_fw_str(struct ib_device *device, char *str);
2415
2416int ib_register_device(struct ib_device *device,
2417 int (*port_callback)(struct ib_device *,
2418 u8, struct kobject *));
2419void ib_unregister_device(struct ib_device *device);
2420
2421int ib_register_client (struct ib_client *client);
2422void ib_unregister_client(struct ib_client *client);
2423
2424void *ib_get_client_data(struct ib_device *device, struct ib_client *client);
2425void ib_set_client_data(struct ib_device *device, struct ib_client *client,
2426 void *data);
2427
2428static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len)
2429{
2430 return copy_from_user(dest, udata->inbuf, len) ? -EFAULT : 0;
2431}
2432
2433static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len)
2434{
2435 return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0;
2436}
2437
2438static inline bool ib_is_udata_cleared(struct ib_udata *udata,
2439 size_t offset,
2440 size_t len)
2441{
2442 const void __user *p = udata->inbuf + offset;
2443 bool ret;
2444 u8 *buf;
2445
2446 if (len > USHRT_MAX)
2447 return false;
2448
2449 buf = memdup_user(p, len);
2450 if (IS_ERR(buf))
2451 return false;
2452
2453 ret = !memchr_inv(buf, 0, len);
2454 kfree(buf);
2455 return ret;
2456}
2457
2458
2459
2460
2461
2462
2463
2464
2465
2466
2467
2468
2469
2470
2471
2472
2473
2474int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
2475 enum ib_qp_type type, enum ib_qp_attr_mask mask,
2476 enum rdma_link_layer ll);
2477
2478void ib_register_event_handler(struct ib_event_handler *event_handler);
2479void ib_unregister_event_handler(struct ib_event_handler *event_handler);
2480void ib_dispatch_event(struct ib_event *event);
2481
2482int ib_query_port(struct ib_device *device,
2483 u8 port_num, struct ib_port_attr *port_attr);
2484
2485enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device,
2486 u8 port_num);
2487
2488
2489
2490
2491
2492
2493
2494
2495
2496
2497static inline bool rdma_cap_ib_switch(const struct ib_device *device)
2498{
2499 return device->is_switch;
2500}
2501
2502
2503
2504
2505
2506
2507
2508
2509
2510static inline u8 rdma_start_port(const struct ib_device *device)
2511{
2512 return rdma_cap_ib_switch(device) ? 0 : 1;
2513}
2514
2515
2516
2517
2518
2519
2520
2521
2522
2523static inline u8 rdma_end_port(const struct ib_device *device)
2524{
2525 return rdma_cap_ib_switch(device) ? 0 : device->phys_port_cnt;
2526}
2527
2528static inline int rdma_is_port_valid(const struct ib_device *device,
2529 unsigned int port)
2530{
2531 return (port >= rdma_start_port(device) &&
2532 port <= rdma_end_port(device));
2533}
2534
2535static inline bool rdma_protocol_ib(const struct ib_device *device, u8 port_num)
2536{
2537 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IB;
2538}
2539
2540static inline bool rdma_protocol_roce(const struct ib_device *device, u8 port_num)
2541{
2542 return device->port_immutable[port_num].core_cap_flags &
2543 (RDMA_CORE_CAP_PROT_ROCE | RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP);
2544}
2545
2546static inline bool rdma_protocol_roce_udp_encap(const struct ib_device *device, u8 port_num)
2547{
2548 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP;
2549}
2550
2551static inline bool rdma_protocol_roce_eth_encap(const struct ib_device *device, u8 port_num)
2552{
2553 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_ROCE;
2554}
2555
2556static inline bool rdma_protocol_iwarp(const struct ib_device *device, u8 port_num)
2557{
2558 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IWARP;
2559}
2560
2561static inline bool rdma_ib_or_roce(const struct ib_device *device, u8 port_num)
2562{
2563 return rdma_protocol_ib(device, port_num) ||
2564 rdma_protocol_roce(device, port_num);
2565}
2566
2567static inline bool rdma_protocol_raw_packet(const struct ib_device *device, u8 port_num)
2568{
2569 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_RAW_PACKET;
2570}
2571
2572static inline bool rdma_protocol_usnic(const struct ib_device *device, u8 port_num)
2573{
2574 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_USNIC;
2575}
2576
2577
2578
2579
2580
2581
2582
2583
2584
2585
2586
2587
2588
2589static inline bool rdma_cap_ib_mad(const struct ib_device *device, u8 port_num)
2590{
2591 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_MAD;
2592}
2593
2594
2595
2596
2597
2598
2599
2600
2601
2602
2603
2604
2605
2606
2607
2608
2609
2610
2611
2612
2613static inline bool rdma_cap_opa_mad(struct ib_device *device, u8 port_num)
2614{
2615 return (device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_OPA_MAD)
2616 == RDMA_CORE_CAP_OPA_MAD;
2617}
2618
2619
2620
2621
2622
2623
2624
2625
2626
2627
2628
2629
2630
2631
2632
2633
2634
2635
2636
2637
2638
2639static inline bool rdma_cap_ib_smi(const struct ib_device *device, u8 port_num)
2640{
2641 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_SMI;
2642}
2643
2644
2645
2646
2647
2648
2649
2650
2651
2652
2653
2654
2655
2656
2657
2658
2659static inline bool rdma_cap_ib_cm(const struct ib_device *device, u8 port_num)
2660{
2661 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_CM;
2662}
2663
2664
2665
2666
2667
2668
2669
2670
2671
2672
2673
2674
2675
2676static inline bool rdma_cap_iw_cm(const struct ib_device *device, u8 port_num)
2677{
2678 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IW_CM;
2679}
2680
2681
2682
2683
2684
2685
2686
2687
2688
2689
2690
2691
2692
2693
2694
2695
2696static inline bool rdma_cap_ib_sa(const struct ib_device *device, u8 port_num)
2697{
2698 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_SA;
2699}
2700
2701
2702
2703
2704
2705
2706
2707
2708
2709
2710
2711
2712
2713
2714
2715
2716
2717
2718static inline bool rdma_cap_ib_mcast(const struct ib_device *device, u8 port_num)
2719{
2720 return rdma_cap_ib_sa(device, port_num);
2721}
2722
2723
2724
2725
2726
2727
2728
2729
2730
2731
2732
2733
2734
2735
2736static inline bool rdma_cap_af_ib(const struct ib_device *device, u8 port_num)
2737{
2738 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_AF_IB;
2739}
2740
2741
2742
2743
2744
2745
2746
2747
2748
2749
2750
2751
2752
2753
2754
2755
2756
2757static inline bool rdma_cap_eth_ah(const struct ib_device *device, u8 port_num)
2758{
2759 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_ETH_AH;
2760}
2761
2762
2763
2764
2765
2766
2767
2768
2769
2770
2771static inline bool rdma_cap_opa_ah(struct ib_device *device, u8 port_num)
2772{
2773 return (device->port_immutable[port_num].core_cap_flags &
2774 RDMA_CORE_CAP_OPA_AH) == RDMA_CORE_CAP_OPA_AH;
2775}
2776
2777
2778
2779
2780
2781
2782
2783
2784
2785
2786
2787
2788
2789static inline size_t rdma_max_mad_size(const struct ib_device *device, u8 port_num)
2790{
2791 return device->port_immutable[port_num].max_mad_size;
2792}
2793
2794
2795
2796
2797
2798
2799
2800
2801
2802
2803
2804
2805
2806
2807static inline bool rdma_cap_roce_gid_table(const struct ib_device *device,
2808 u8 port_num)
2809{
2810 return rdma_protocol_roce(device, port_num) &&
2811 device->add_gid && device->del_gid;
2812}
2813
2814
2815
2816
2817static inline bool rdma_cap_read_inv(struct ib_device *dev, u32 port_num)
2818{
2819
2820
2821
2822
2823 return rdma_protocol_iwarp(dev, port_num);
2824}
2825
2826int ib_query_gid(struct ib_device *device,
2827 u8 port_num, int index, union ib_gid *gid,
2828 struct ib_gid_attr *attr);
2829
2830int ib_set_vf_link_state(struct ib_device *device, int vf, u8 port,
2831 int state);
2832int ib_get_vf_config(struct ib_device *device, int vf, u8 port,
2833 struct ifla_vf_info *info);
2834int ib_get_vf_stats(struct ib_device *device, int vf, u8 port,
2835 struct ifla_vf_stats *stats);
2836int ib_set_vf_guid(struct ib_device *device, int vf, u8 port, u64 guid,
2837 int type);
2838
2839int ib_query_pkey(struct ib_device *device,
2840 u8 port_num, u16 index, u16 *pkey);
2841
2842int ib_modify_device(struct ib_device *device,
2843 int device_modify_mask,
2844 struct ib_device_modify *device_modify);
2845
2846int ib_modify_port(struct ib_device *device,
2847 u8 port_num, int port_modify_mask,
2848 struct ib_port_modify *port_modify);
2849
2850int ib_find_gid(struct ib_device *device, union ib_gid *gid,
2851 struct net_device *ndev, u8 *port_num, u16 *index);
2852
2853int ib_find_pkey(struct ib_device *device,
2854 u8 port_num, u16 pkey, u16 *index);
2855
2856enum ib_pd_flags {
2857
2858
2859
2860
2861
2862
2863
2864
2865
2866 IB_PD_UNSAFE_GLOBAL_RKEY = 0x01,
2867};
2868
2869struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags,
2870 const char *caller);
2871#define ib_alloc_pd(device, flags) \
2872 __ib_alloc_pd((device), (flags), KBUILD_MODNAME)
2873void ib_dealloc_pd(struct ib_pd *pd);
2874
2875
2876
2877
2878
2879
2880
2881
2882
2883struct ib_ah *rdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr);
2884
2885
2886
2887
2888
2889
2890
2891
2892
2893
2894
2895
2896
2897struct ib_ah *rdma_create_user_ah(struct ib_pd *pd,
2898 struct rdma_ah_attr *ah_attr,
2899 struct ib_udata *udata);
2900
2901
2902
2903
2904
2905
2906
2907
2908int ib_get_gids_from_rdma_hdr(const union rdma_network_hdr *hdr,
2909 enum rdma_network_type net_type,
2910 union ib_gid *sgid, union ib_gid *dgid);
2911
2912
2913
2914
2915
2916int ib_get_rdma_header_version(const union rdma_network_hdr *hdr);
2917
2918
2919
2920
2921
2922
2923
2924
2925
2926
2927
2928
2929int ib_init_ah_attr_from_wc(struct ib_device *device, u8 port_num,
2930 const struct ib_wc *wc, const struct ib_grh *grh,
2931 struct rdma_ah_attr *ah_attr);
2932
2933
2934
2935
2936
2937
2938
2939
2940
2941
2942
2943
2944
2945struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc,
2946 const struct ib_grh *grh, u8 port_num);
2947
2948
2949
2950
2951
2952
2953
2954
2955int rdma_modify_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
2956
2957
2958
2959
2960
2961
2962
2963
2964int rdma_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
2965
2966
2967
2968
2969
2970int rdma_destroy_ah(struct ib_ah *ah);
2971
2972
2973
2974
2975
2976
2977
2978
2979
2980
2981
2982
2983
2984
2985struct ib_srq *ib_create_srq(struct ib_pd *pd,
2986 struct ib_srq_init_attr *srq_init_attr);
2987
2988
2989
2990
2991
2992
2993
2994
2995
2996
2997
2998
2999
3000int ib_modify_srq(struct ib_srq *srq,
3001 struct ib_srq_attr *srq_attr,
3002 enum ib_srq_attr_mask srq_attr_mask);
3003
3004
3005
3006
3007
3008
3009
3010int ib_query_srq(struct ib_srq *srq,
3011 struct ib_srq_attr *srq_attr);
3012
3013
3014
3015
3016
3017int ib_destroy_srq(struct ib_srq *srq);
3018
3019
3020
3021
3022
3023
3024
3025
3026static inline int ib_post_srq_recv(struct ib_srq *srq,
3027 struct ib_recv_wr *recv_wr,
3028 struct ib_recv_wr **bad_recv_wr)
3029{
3030 return srq->device->post_srq_recv(srq, recv_wr, bad_recv_wr);
3031}
3032
3033
3034
3035
3036
3037
3038
3039
3040
3041struct ib_qp *ib_create_qp(struct ib_pd *pd,
3042 struct ib_qp_init_attr *qp_init_attr);
3043
3044
3045
3046
3047
3048
3049
3050
3051
3052
3053
3054
3055int ib_modify_qp_with_udata(struct ib_qp *qp,
3056 struct ib_qp_attr *attr,
3057 int attr_mask,
3058 struct ib_udata *udata);
3059
3060
3061
3062
3063
3064
3065
3066
3067
3068
3069int ib_modify_qp(struct ib_qp *qp,
3070 struct ib_qp_attr *qp_attr,
3071 int qp_attr_mask);
3072
3073
3074
3075
3076
3077
3078
3079
3080
3081
3082
3083
3084int ib_query_qp(struct ib_qp *qp,
3085 struct ib_qp_attr *qp_attr,
3086 int qp_attr_mask,
3087 struct ib_qp_init_attr *qp_init_attr);
3088
3089
3090
3091
3092
3093int ib_destroy_qp(struct ib_qp *qp);
3094
3095
3096
3097
3098
3099
3100
3101
3102struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd,
3103 struct ib_qp_open_attr *qp_open_attr);
3104
3105
3106
3107
3108
3109
3110
3111
3112int ib_close_qp(struct ib_qp *qp);
3113
3114
3115
3116
3117
3118
3119
3120
3121
3122
3123
3124
3125
3126
3127static inline int ib_post_send(struct ib_qp *qp,
3128 struct ib_send_wr *send_wr,
3129 struct ib_send_wr **bad_send_wr)
3130{
3131 return qp->device->post_send(qp, send_wr, bad_send_wr);
3132}
3133
3134
3135
3136
3137
3138
3139
3140
3141
3142static inline int ib_post_recv(struct ib_qp *qp,
3143 struct ib_recv_wr *recv_wr,
3144 struct ib_recv_wr **bad_recv_wr)
3145{
3146 return qp->device->post_recv(qp, recv_wr, bad_recv_wr);
3147}
3148
3149struct ib_cq *__ib_alloc_cq(struct ib_device *dev, void *private,
3150 int nr_cqe, int comp_vector,
3151 enum ib_poll_context poll_ctx, const char *caller);
3152#define ib_alloc_cq(device, priv, nr_cqe, comp_vect, poll_ctx) \
3153 __ib_alloc_cq((device), (priv), (nr_cqe), (comp_vect), (poll_ctx), KBUILD_MODNAME)
3154
3155void ib_free_cq(struct ib_cq *cq);
3156int ib_process_cq_direct(struct ib_cq *cq, int budget);
3157
3158
3159
3160
3161
3162
3163
3164
3165
3166
3167
3168
3169
3170
3171struct ib_cq *ib_create_cq(struct ib_device *device,
3172 ib_comp_handler comp_handler,
3173 void (*event_handler)(struct ib_event *, void *),
3174 void *cq_context,
3175 const struct ib_cq_init_attr *cq_attr);
3176
3177
3178
3179
3180
3181
3182
3183
3184int ib_resize_cq(struct ib_cq *cq, int cqe);
3185
3186
3187
3188
3189
3190
3191
3192
3193int rdma_set_cq_moderation(struct ib_cq *cq, u16 cq_count, u16 cq_period);
3194
3195
3196
3197
3198
3199int ib_destroy_cq(struct ib_cq *cq);
3200
3201
3202
3203
3204
3205
3206
3207
3208
3209
3210
3211
3212
3213static inline int ib_poll_cq(struct ib_cq *cq, int num_entries,
3214 struct ib_wc *wc)
3215{
3216 return cq->device->poll_cq(cq, num_entries, wc);
3217}
3218
3219
3220
3221
3222
3223
3224
3225
3226
3227
3228
3229int ib_peek_cq(struct ib_cq *cq, int wc_cnt);
3230
3231
3232
3233
3234
3235
3236
3237
3238
3239
3240
3241
3242
3243
3244
3245
3246
3247
3248
3249
3250
3251
3252
3253
3254
3255
3256
3257
3258static inline int ib_req_notify_cq(struct ib_cq *cq,
3259 enum ib_cq_notify_flags flags)
3260{
3261 return cq->device->req_notify_cq(cq, flags);
3262}
3263
3264
3265
3266
3267
3268
3269
3270
3271static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt)
3272{
3273 return cq->device->req_ncomp_notif ?
3274 cq->device->req_ncomp_notif(cq, wc_cnt) :
3275 -ENOSYS;
3276}
3277
3278
3279
3280
3281
3282
3283static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
3284{
3285 return dma_mapping_error(dev->dma_device, dma_addr);
3286}
3287
3288
3289
3290
3291
3292
3293
3294
3295static inline u64 ib_dma_map_single(struct ib_device *dev,
3296 void *cpu_addr, size_t size,
3297 enum dma_data_direction direction)
3298{
3299 return dma_map_single(dev->dma_device, cpu_addr, size, direction);
3300}
3301
3302
3303
3304
3305
3306
3307
3308
3309static inline void ib_dma_unmap_single(struct ib_device *dev,
3310 u64 addr, size_t size,
3311 enum dma_data_direction direction)
3312{
3313 dma_unmap_single(dev->dma_device, addr, size, direction);
3314}
3315
3316
3317
3318
3319
3320
3321
3322
3323
3324static inline u64 ib_dma_map_page(struct ib_device *dev,
3325 struct page *page,
3326 unsigned long offset,
3327 size_t size,
3328 enum dma_data_direction direction)
3329{
3330 return dma_map_page(dev->dma_device, page, offset, size, direction);
3331}
3332
3333
3334
3335
3336
3337
3338
3339
3340static inline void ib_dma_unmap_page(struct ib_device *dev,
3341 u64 addr, size_t size,
3342 enum dma_data_direction direction)
3343{
3344 dma_unmap_page(dev->dma_device, addr, size, direction);
3345}
3346
3347
3348
3349
3350
3351
3352
3353
3354static inline int ib_dma_map_sg(struct ib_device *dev,
3355 struct scatterlist *sg, int nents,
3356 enum dma_data_direction direction)
3357{
3358 return dma_map_sg(dev->dma_device, sg, nents, direction);
3359}
3360
3361
3362
3363
3364
3365
3366
3367
3368static inline void ib_dma_unmap_sg(struct ib_device *dev,
3369 struct scatterlist *sg, int nents,
3370 enum dma_data_direction direction)
3371{
3372 dma_unmap_sg(dev->dma_device, sg, nents, direction);
3373}
3374
3375static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
3376 struct scatterlist *sg, int nents,
3377 enum dma_data_direction direction,
3378 unsigned long dma_attrs)
3379{
3380 return dma_map_sg_attrs(dev->dma_device, sg, nents, direction,
3381 dma_attrs);
3382}
3383
3384static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
3385 struct scatterlist *sg, int nents,
3386 enum dma_data_direction direction,
3387 unsigned long dma_attrs)
3388{
3389 dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, dma_attrs);
3390}
3391
3392
3393
3394
3395
3396
3397
3398
3399static inline u64 ib_sg_dma_address(struct ib_device *dev,
3400 struct scatterlist *sg)
3401{
3402 return sg_dma_address(sg);
3403}
3404
3405
3406
3407
3408
3409
3410
3411
3412
3413static inline unsigned int ib_sg_dma_len(struct ib_device *dev,
3414 struct scatterlist *sg)
3415{
3416 return sg_dma_len(sg);
3417}
3418
3419
3420
3421
3422
3423
3424
3425
3426static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev,
3427 u64 addr,
3428 size_t size,
3429 enum dma_data_direction dir)
3430{
3431 dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
3432}
3433
3434
3435
3436
3437
3438
3439
3440
3441static inline void ib_dma_sync_single_for_device(struct ib_device *dev,
3442 u64 addr,
3443 size_t size,
3444 enum dma_data_direction dir)
3445{
3446 dma_sync_single_for_device(dev->dma_device, addr, size, dir);
3447}
3448
3449
3450
3451
3452
3453
3454
3455
3456static inline void *ib_dma_alloc_coherent(struct ib_device *dev,
3457 size_t size,
3458 dma_addr_t *dma_handle,
3459 gfp_t flag)
3460{
3461 return dma_alloc_coherent(dev->dma_device, size, dma_handle, flag);
3462}
3463
3464
3465
3466
3467
3468
3469
3470
3471static inline void ib_dma_free_coherent(struct ib_device *dev,
3472 size_t size, void *cpu_addr,
3473 dma_addr_t dma_handle)
3474{
3475 dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle);
3476}
3477
3478
3479
3480
3481
3482
3483
3484
3485int ib_dereg_mr(struct ib_mr *mr);
3486
3487struct ib_mr *ib_alloc_mr(struct ib_pd *pd,
3488 enum ib_mr_type mr_type,
3489 u32 max_num_sg);
3490
3491
3492
3493
3494
3495
3496
3497static inline void ib_update_fast_reg_key(struct ib_mr *mr, u8 newkey)
3498{
3499 mr->lkey = (mr->lkey & 0xffffff00) | newkey;
3500 mr->rkey = (mr->rkey & 0xffffff00) | newkey;
3501}
3502
3503
3504
3505
3506
3507
3508static inline u32 ib_inc_rkey(u32 rkey)
3509{
3510 const u32 mask = 0x000000ff;
3511 return ((rkey + 1) & mask) | (rkey & ~mask);
3512}
3513
3514
3515
3516
3517
3518
3519
3520
3521
3522
3523struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
3524 int mr_access_flags,
3525 struct ib_fmr_attr *fmr_attr);
3526
3527
3528
3529
3530
3531
3532
3533
3534static inline int ib_map_phys_fmr(struct ib_fmr *fmr,
3535 u64 *page_list, int list_len,
3536 u64 iova)
3537{
3538 return fmr->device->map_phys_fmr(fmr, page_list, list_len, iova);
3539}
3540
3541
3542
3543
3544
3545int ib_unmap_fmr(struct list_head *fmr_list);
3546
3547
3548
3549
3550
3551int ib_dealloc_fmr(struct ib_fmr *fmr);
3552
3553
3554
3555
3556
3557
3558
3559
3560
3561
3562
3563
3564
3565int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
3566
3567
3568
3569
3570
3571
3572
3573int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
3574
3575
3576
3577
3578
3579
3580struct ib_xrcd *__ib_alloc_xrcd(struct ib_device *device, const char *caller);
3581#define ib_alloc_xrcd(device) \
3582 __ib_alloc_xrcd((device), KBUILD_MODNAME)
3583
3584
3585
3586
3587
3588int ib_dealloc_xrcd(struct ib_xrcd *xrcd);
3589
3590struct ib_flow *ib_create_flow(struct ib_qp *qp,
3591 struct ib_flow_attr *flow_attr, int domain);
3592int ib_destroy_flow(struct ib_flow *flow_id);
3593
3594static inline int ib_check_mr_access(int flags)
3595{
3596
3597
3598
3599
3600 if (flags & (IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_REMOTE_WRITE) &&
3601 !(flags & IB_ACCESS_LOCAL_WRITE))
3602 return -EINVAL;
3603
3604 return 0;
3605}
3606
3607
3608
3609
3610
3611
3612
3613
3614
3615
3616
3617
3618
3619int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
3620 struct ib_mr_status *mr_status);
3621
3622struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, u8 port,
3623 u16 pkey, const union ib_gid *gid,
3624 const struct sockaddr *addr);
3625struct ib_wq *ib_create_wq(struct ib_pd *pd,
3626 struct ib_wq_init_attr *init_attr);
3627int ib_destroy_wq(struct ib_wq *wq);
3628int ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *attr,
3629 u32 wq_attr_mask);
3630struct ib_rwq_ind_table *ib_create_rwq_ind_table(struct ib_device *device,
3631 struct ib_rwq_ind_table_init_attr*
3632 wq_ind_table_init_attr);
3633int ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table);
3634
3635int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
3636 unsigned int *sg_offset, unsigned int page_size);
3637
3638static inline int
3639ib_map_mr_sg_zbva(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
3640 unsigned int *sg_offset, unsigned int page_size)
3641{
3642 int n;
3643
3644 n = ib_map_mr_sg(mr, sg, sg_nents, sg_offset, page_size);
3645 mr->iova = 0;
3646
3647 return n;
3648}
3649
3650int ib_sg_to_pages(struct ib_mr *mr, struct scatterlist *sgl, int sg_nents,
3651 unsigned int *sg_offset, int (*set_page)(struct ib_mr *, u64));
3652
3653void ib_drain_rq(struct ib_qp *qp);
3654void ib_drain_sq(struct ib_qp *qp);
3655void ib_drain_qp(struct ib_qp *qp);
3656
3657int ib_get_eth_speed(struct ib_device *dev, u8 port_num, u8 *speed, u8 *width);
3658
3659static inline u8 *rdma_ah_retrieve_dmac(struct rdma_ah_attr *attr)
3660{
3661 if (attr->type == RDMA_AH_ATTR_TYPE_ROCE)
3662 return attr->roce.dmac;
3663 return NULL;
3664}
3665
3666static inline void rdma_ah_set_dlid(struct rdma_ah_attr *attr, u32 dlid)
3667{
3668 if (attr->type == RDMA_AH_ATTR_TYPE_IB)
3669 attr->ib.dlid = (u16)dlid;
3670 else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
3671 attr->opa.dlid = dlid;
3672}
3673
3674static inline u32 rdma_ah_get_dlid(const struct rdma_ah_attr *attr)
3675{
3676 if (attr->type == RDMA_AH_ATTR_TYPE_IB)
3677 return attr->ib.dlid;
3678 else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
3679 return attr->opa.dlid;
3680 return 0;
3681}
3682
3683static inline void rdma_ah_set_sl(struct rdma_ah_attr *attr, u8 sl)
3684{
3685 attr->sl = sl;
3686}
3687
3688static inline u8 rdma_ah_get_sl(const struct rdma_ah_attr *attr)
3689{
3690 return attr->sl;
3691}
3692
3693static inline void rdma_ah_set_path_bits(struct rdma_ah_attr *attr,
3694 u8 src_path_bits)
3695{
3696 if (attr->type == RDMA_AH_ATTR_TYPE_IB)
3697 attr->ib.src_path_bits = src_path_bits;
3698 else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
3699 attr->opa.src_path_bits = src_path_bits;
3700}
3701
3702static inline u8 rdma_ah_get_path_bits(const struct rdma_ah_attr *attr)
3703{
3704 if (attr->type == RDMA_AH_ATTR_TYPE_IB)
3705 return attr->ib.src_path_bits;
3706 else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
3707 return attr->opa.src_path_bits;
3708 return 0;
3709}
3710
3711static inline void rdma_ah_set_make_grd(struct rdma_ah_attr *attr,
3712 bool make_grd)
3713{
3714 if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
3715 attr->opa.make_grd = make_grd;
3716}
3717
3718static inline bool rdma_ah_get_make_grd(const struct rdma_ah_attr *attr)
3719{
3720 if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
3721 return attr->opa.make_grd;
3722 return false;
3723}
3724
3725static inline void rdma_ah_set_port_num(struct rdma_ah_attr *attr, u8 port_num)
3726{
3727 attr->port_num = port_num;
3728}
3729
3730static inline u8 rdma_ah_get_port_num(const struct rdma_ah_attr *attr)
3731{
3732 return attr->port_num;
3733}
3734
3735static inline void rdma_ah_set_static_rate(struct rdma_ah_attr *attr,
3736 u8 static_rate)
3737{
3738 attr->static_rate = static_rate;
3739}
3740
3741static inline u8 rdma_ah_get_static_rate(const struct rdma_ah_attr *attr)
3742{
3743 return attr->static_rate;
3744}
3745
3746static inline void rdma_ah_set_ah_flags(struct rdma_ah_attr *attr,
3747 enum ib_ah_flags flag)
3748{
3749 attr->ah_flags = flag;
3750}
3751
3752static inline enum ib_ah_flags
3753 rdma_ah_get_ah_flags(const struct rdma_ah_attr *attr)
3754{
3755 return attr->ah_flags;
3756}
3757
3758static inline const struct ib_global_route
3759 *rdma_ah_read_grh(const struct rdma_ah_attr *attr)
3760{
3761 return &attr->grh;
3762}
3763
3764
3765static inline struct ib_global_route
3766 *rdma_ah_retrieve_grh(struct rdma_ah_attr *attr)
3767{
3768 return &attr->grh;
3769}
3770
3771static inline void rdma_ah_set_dgid_raw(struct rdma_ah_attr *attr, void *dgid)
3772{
3773 struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
3774
3775 memcpy(grh->dgid.raw, dgid, sizeof(grh->dgid));
3776}
3777
3778static inline void rdma_ah_set_subnet_prefix(struct rdma_ah_attr *attr,
3779 __be64 prefix)
3780{
3781 struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
3782
3783 grh->dgid.global.subnet_prefix = prefix;
3784}
3785
3786static inline void rdma_ah_set_interface_id(struct rdma_ah_attr *attr,
3787 __be64 if_id)
3788{
3789 struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
3790
3791 grh->dgid.global.interface_id = if_id;
3792}
3793
3794static inline void rdma_ah_set_grh(struct rdma_ah_attr *attr,
3795 union ib_gid *dgid, u32 flow_label,
3796 u8 sgid_index, u8 hop_limit,
3797 u8 traffic_class)
3798{
3799 struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
3800
3801 attr->ah_flags = IB_AH_GRH;
3802 if (dgid)
3803 grh->dgid = *dgid;
3804 grh->flow_label = flow_label;
3805 grh->sgid_index = sgid_index;
3806 grh->hop_limit = hop_limit;
3807 grh->traffic_class = traffic_class;
3808}
3809
3810
3811
3812
3813
3814
3815
3816static inline enum rdma_ah_attr_type rdma_ah_find_type(struct ib_device *dev,
3817 u8 port_num)
3818{
3819 if (rdma_protocol_roce(dev, port_num))
3820 return RDMA_AH_ATTR_TYPE_ROCE;
3821 if (rdma_protocol_ib(dev, port_num)) {
3822 if (rdma_cap_opa_ah(dev, port_num))
3823 return RDMA_AH_ATTR_TYPE_OPA;
3824 return RDMA_AH_ATTR_TYPE_IB;
3825 }
3826
3827 return RDMA_AH_ATTR_TYPE_UNDEFINED;
3828}
3829
3830
3831
3832
3833
3834
3835
3836
3837
3838
3839static inline u16 ib_lid_cpu16(u32 lid)
3840{
3841 WARN_ON_ONCE(lid & 0xFFFF0000);
3842 return (u16)lid;
3843}
3844
3845
3846
3847
3848
3849
3850static inline __be16 ib_lid_be16(u32 lid)
3851{
3852 WARN_ON_ONCE(lid & 0xFFFF0000);
3853 return cpu_to_be16((u16)lid);
3854}
3855
3856
3857
3858
3859
3860
3861
3862
3863
3864
3865
3866static inline const struct cpumask *
3867ib_get_vector_affinity(struct ib_device *device, int comp_vector)
3868{
3869 if (comp_vector < 0 || comp_vector >= device->num_comp_vectors ||
3870 !device->get_vector_affinity)
3871 return NULL;
3872
3873 return device->get_vector_affinity(device, comp_vector);
3874
3875}
3876
3877
3878
3879
3880
3881
3882
3883void rdma_roce_rescan_device(struct ib_device *ibdev);
3884
3885#endif
3886