1
2
3
4
5
6
7
8
9
10
11
12#ifndef IB_VERBS_H
13#define IB_VERBS_H
14
15#include <linux/ethtool.h>
16#include <linux/types.h>
17#include <linux/device.h>
18#include <linux/dma-mapping.h>
19#include <linux/kref.h>
20#include <linux/list.h>
21#include <linux/rwsem.h>
22#include <linux/workqueue.h>
23#include <linux/irq_poll.h>
24#include <uapi/linux/if_ether.h>
25#include <net/ipv6.h>
26#include <net/ip.h>
27#include <linux/string.h>
28#include <linux/slab.h>
29#include <linux/netdevice.h>
30#include <linux/refcount.h>
31#include <linux/if_link.h>
32#include <linux/atomic.h>
33#include <linux/mmu_notifier.h>
34#include <linux/uaccess.h>
35#include <linux/cgroup_rdma.h>
36#include <linux/irqflags.h>
37#include <linux/preempt.h>
38#include <linux/dim.h>
39#include <uapi/rdma/ib_user_verbs.h>
40#include <rdma/rdma_counter.h>
41#include <rdma/restrack.h>
42#include <rdma/signature.h>
43#include <uapi/rdma/rdma_user_ioctl.h>
44#include <uapi/rdma/ib_user_ioctl_verbs.h>
45
46#define IB_FW_VERSION_NAME_MAX ETHTOOL_FWVERS_LEN
47
48struct ib_umem_odp;
49struct ib_uqp_object;
50struct ib_usrq_object;
51struct ib_uwq_object;
52struct rdma_cm_id;
53struct ib_port;
54struct hw_stats_device_data;
55
56extern struct workqueue_struct *ib_wq;
57extern struct workqueue_struct *ib_comp_wq;
58extern struct workqueue_struct *ib_comp_unbound_wq;
59
60struct ib_ucq_object;
61
62__printf(3, 4) __cold
63void ibdev_printk(const char *level, const struct ib_device *ibdev,
64 const char *format, ...);
65__printf(2, 3) __cold
66void ibdev_emerg(const struct ib_device *ibdev, const char *format, ...);
67__printf(2, 3) __cold
68void ibdev_alert(const struct ib_device *ibdev, const char *format, ...);
69__printf(2, 3) __cold
70void ibdev_crit(const struct ib_device *ibdev, const char *format, ...);
71__printf(2, 3) __cold
72void ibdev_err(const struct ib_device *ibdev, const char *format, ...);
73__printf(2, 3) __cold
74void ibdev_warn(const struct ib_device *ibdev, const char *format, ...);
75__printf(2, 3) __cold
76void ibdev_notice(const struct ib_device *ibdev, const char *format, ...);
77__printf(2, 3) __cold
78void ibdev_info(const struct ib_device *ibdev, const char *format, ...);
79
80#if defined(CONFIG_DYNAMIC_DEBUG) || \
81 (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
82#define ibdev_dbg(__dev, format, args...) \
83 dynamic_ibdev_dbg(__dev, format, ##args)
84#else
85__printf(2, 3) __cold
86static inline
87void ibdev_dbg(const struct ib_device *ibdev, const char *format, ...) {}
88#endif
89
90#define ibdev_level_ratelimited(ibdev_level, ibdev, fmt, ...) \
91do { \
92 static DEFINE_RATELIMIT_STATE(_rs, \
93 DEFAULT_RATELIMIT_INTERVAL, \
94 DEFAULT_RATELIMIT_BURST); \
95 if (__ratelimit(&_rs)) \
96 ibdev_level(ibdev, fmt, ##__VA_ARGS__); \
97} while (0)
98
99#define ibdev_emerg_ratelimited(ibdev, fmt, ...) \
100 ibdev_level_ratelimited(ibdev_emerg, ibdev, fmt, ##__VA_ARGS__)
101#define ibdev_alert_ratelimited(ibdev, fmt, ...) \
102 ibdev_level_ratelimited(ibdev_alert, ibdev, fmt, ##__VA_ARGS__)
103#define ibdev_crit_ratelimited(ibdev, fmt, ...) \
104 ibdev_level_ratelimited(ibdev_crit, ibdev, fmt, ##__VA_ARGS__)
105#define ibdev_err_ratelimited(ibdev, fmt, ...) \
106 ibdev_level_ratelimited(ibdev_err, ibdev, fmt, ##__VA_ARGS__)
107#define ibdev_warn_ratelimited(ibdev, fmt, ...) \
108 ibdev_level_ratelimited(ibdev_warn, ibdev, fmt, ##__VA_ARGS__)
109#define ibdev_notice_ratelimited(ibdev, fmt, ...) \
110 ibdev_level_ratelimited(ibdev_notice, ibdev, fmt, ##__VA_ARGS__)
111#define ibdev_info_ratelimited(ibdev, fmt, ...) \
112 ibdev_level_ratelimited(ibdev_info, ibdev, fmt, ##__VA_ARGS__)
113
114#if defined(CONFIG_DYNAMIC_DEBUG) || \
115 (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
116
117#define ibdev_dbg_ratelimited(ibdev, fmt, ...) \
118do { \
119 static DEFINE_RATELIMIT_STATE(_rs, \
120 DEFAULT_RATELIMIT_INTERVAL, \
121 DEFAULT_RATELIMIT_BURST); \
122 DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \
123 if (DYNAMIC_DEBUG_BRANCH(descriptor) && __ratelimit(&_rs)) \
124 __dynamic_ibdev_dbg(&descriptor, ibdev, fmt, \
125 ##__VA_ARGS__); \
126} while (0)
127#else
128__printf(2, 3) __cold
129static inline
130void ibdev_dbg_ratelimited(const struct ib_device *ibdev, const char *format, ...) {}
131#endif
132
133union ib_gid {
134 u8 raw[16];
135 struct {
136 __be64 subnet_prefix;
137 __be64 interface_id;
138 } global;
139};
140
141extern union ib_gid zgid;
142
143enum ib_gid_type {
144 IB_GID_TYPE_IB = IB_UVERBS_GID_TYPE_IB,
145 IB_GID_TYPE_ROCE = IB_UVERBS_GID_TYPE_ROCE_V1,
146 IB_GID_TYPE_ROCE_UDP_ENCAP = IB_UVERBS_GID_TYPE_ROCE_V2,
147 IB_GID_TYPE_SIZE
148};
149
150#define ROCE_V2_UDP_DPORT 4791
151struct ib_gid_attr {
152 struct net_device __rcu *ndev;
153 struct ib_device *device;
154 union ib_gid gid;
155 enum ib_gid_type gid_type;
156 u16 index;
157 u32 port_num;
158};
159
160enum {
161
162 IB_SA_WELL_KNOWN_GUID = BIT_ULL(57) | 2,
163};
164
165enum rdma_transport_type {
166 RDMA_TRANSPORT_IB,
167 RDMA_TRANSPORT_IWARP,
168 RDMA_TRANSPORT_USNIC,
169 RDMA_TRANSPORT_USNIC_UDP,
170 RDMA_TRANSPORT_UNSPECIFIED,
171};
172
173enum rdma_protocol_type {
174 RDMA_PROTOCOL_IB,
175 RDMA_PROTOCOL_IBOE,
176 RDMA_PROTOCOL_IWARP,
177 RDMA_PROTOCOL_USNIC_UDP
178};
179
180__attribute_const__ enum rdma_transport_type
181rdma_node_get_transport(unsigned int node_type);
182
183enum rdma_network_type {
184 RDMA_NETWORK_IB,
185 RDMA_NETWORK_ROCE_V1,
186 RDMA_NETWORK_IPV4,
187 RDMA_NETWORK_IPV6
188};
189
190static inline enum ib_gid_type ib_network_to_gid_type(enum rdma_network_type network_type)
191{
192 if (network_type == RDMA_NETWORK_IPV4 ||
193 network_type == RDMA_NETWORK_IPV6)
194 return IB_GID_TYPE_ROCE_UDP_ENCAP;
195 else if (network_type == RDMA_NETWORK_ROCE_V1)
196 return IB_GID_TYPE_ROCE;
197 else
198 return IB_GID_TYPE_IB;
199}
200
201static inline enum rdma_network_type
202rdma_gid_attr_network_type(const struct ib_gid_attr *attr)
203{
204 if (attr->gid_type == IB_GID_TYPE_IB)
205 return RDMA_NETWORK_IB;
206
207 if (attr->gid_type == IB_GID_TYPE_ROCE)
208 return RDMA_NETWORK_ROCE_V1;
209
210 if (ipv6_addr_v4mapped((struct in6_addr *)&attr->gid))
211 return RDMA_NETWORK_IPV4;
212 else
213 return RDMA_NETWORK_IPV6;
214}
215
216enum rdma_link_layer {
217 IB_LINK_LAYER_UNSPECIFIED,
218 IB_LINK_LAYER_INFINIBAND,
219 IB_LINK_LAYER_ETHERNET,
220};
221
222enum ib_device_cap_flags {
223 IB_DEVICE_RESIZE_MAX_WR = (1 << 0),
224 IB_DEVICE_BAD_PKEY_CNTR = (1 << 1),
225 IB_DEVICE_BAD_QKEY_CNTR = (1 << 2),
226 IB_DEVICE_RAW_MULTI = (1 << 3),
227 IB_DEVICE_AUTO_PATH_MIG = (1 << 4),
228 IB_DEVICE_CHANGE_PHY_PORT = (1 << 5),
229 IB_DEVICE_UD_AV_PORT_ENFORCE = (1 << 6),
230 IB_DEVICE_CURR_QP_STATE_MOD = (1 << 7),
231 IB_DEVICE_SHUTDOWN_PORT = (1 << 8),
232
233 IB_DEVICE_PORT_ACTIVE_EVENT = (1 << 10),
234 IB_DEVICE_SYS_IMAGE_GUID = (1 << 11),
235 IB_DEVICE_RC_RNR_NAK_GEN = (1 << 12),
236 IB_DEVICE_SRQ_RESIZE = (1 << 13),
237 IB_DEVICE_N_NOTIFY_CQ = (1 << 14),
238
239
240
241
242
243
244
245
246 IB_DEVICE_LOCAL_DMA_LKEY = (1 << 15),
247
248 IB_DEVICE_MEM_WINDOW = (1 << 17),
249
250
251
252
253
254
255
256 IB_DEVICE_UD_IP_CSUM = (1 << 18),
257 IB_DEVICE_UD_TSO = (1 << 19),
258 IB_DEVICE_XRC = (1 << 20),
259
260
261
262
263
264
265
266
267
268
269 IB_DEVICE_MEM_MGT_EXTENSIONS = (1 << 21),
270 IB_DEVICE_BLOCK_MULTICAST_LOOPBACK = (1 << 22),
271 IB_DEVICE_MEM_WINDOW_TYPE_2A = (1 << 23),
272 IB_DEVICE_MEM_WINDOW_TYPE_2B = (1 << 24),
273 IB_DEVICE_RC_IP_CSUM = (1 << 25),
274
275 IB_DEVICE_RAW_IP_CSUM = (1 << 26),
276
277
278
279
280
281
282 IB_DEVICE_CROSS_CHANNEL = (1 << 27),
283 IB_DEVICE_MANAGED_FLOW_STEERING = (1 << 29),
284 IB_DEVICE_INTEGRITY_HANDOVER = (1 << 30),
285 IB_DEVICE_ON_DEMAND_PAGING = (1ULL << 31),
286 IB_DEVICE_SG_GAPS_REG = (1ULL << 32),
287 IB_DEVICE_VIRTUAL_FUNCTION = (1ULL << 33),
288
289 IB_DEVICE_RAW_SCATTER_FCS = (1ULL << 34),
290 IB_DEVICE_RDMA_NETDEV_OPA = (1ULL << 35),
291
292 IB_DEVICE_PCI_WRITE_END_PADDING = (1ULL << 36),
293 IB_DEVICE_ALLOW_USER_UNREG = (1ULL << 37),
294};
295
296enum ib_atomic_cap {
297 IB_ATOMIC_NONE,
298 IB_ATOMIC_HCA,
299 IB_ATOMIC_GLOB
300};
301
302enum ib_odp_general_cap_bits {
303 IB_ODP_SUPPORT = 1 << 0,
304 IB_ODP_SUPPORT_IMPLICIT = 1 << 1,
305};
306
307enum ib_odp_transport_cap_bits {
308 IB_ODP_SUPPORT_SEND = 1 << 0,
309 IB_ODP_SUPPORT_RECV = 1 << 1,
310 IB_ODP_SUPPORT_WRITE = 1 << 2,
311 IB_ODP_SUPPORT_READ = 1 << 3,
312 IB_ODP_SUPPORT_ATOMIC = 1 << 4,
313 IB_ODP_SUPPORT_SRQ_RECV = 1 << 5,
314};
315
316struct ib_odp_caps {
317 uint64_t general_caps;
318 struct {
319 uint32_t rc_odp_caps;
320 uint32_t uc_odp_caps;
321 uint32_t ud_odp_caps;
322 uint32_t xrc_odp_caps;
323 } per_transport_caps;
324};
325
326struct ib_rss_caps {
327
328
329
330
331 u32 supported_qpts;
332 u32 max_rwq_indirection_tables;
333 u32 max_rwq_indirection_table_size;
334};
335
336enum ib_tm_cap_flags {
337
338 IB_TM_CAP_RNDV_RC = 1 << 0,
339};
340
341struct ib_tm_caps {
342
343 u32 max_rndv_hdr_size;
344
345 u32 max_num_tags;
346
347 u32 flags;
348
349 u32 max_ops;
350
351 u32 max_sge;
352};
353
354struct ib_cq_init_attr {
355 unsigned int cqe;
356 u32 comp_vector;
357 u32 flags;
358};
359
360enum ib_cq_attr_mask {
361 IB_CQ_MODERATE = 1 << 0,
362};
363
364struct ib_cq_caps {
365 u16 max_cq_moderation_count;
366 u16 max_cq_moderation_period;
367};
368
369struct ib_dm_mr_attr {
370 u64 length;
371 u64 offset;
372 u32 access_flags;
373};
374
375struct ib_dm_alloc_attr {
376 u64 length;
377 u32 alignment;
378 u32 flags;
379};
380
381struct ib_device_attr {
382 u64 fw_ver;
383 __be64 sys_image_guid;
384 u64 max_mr_size;
385 u64 page_size_cap;
386 u32 vendor_id;
387 u32 vendor_part_id;
388 u32 hw_ver;
389 int max_qp;
390 int max_qp_wr;
391 u64 device_cap_flags;
392 int max_send_sge;
393 int max_recv_sge;
394 int max_sge_rd;
395 int max_cq;
396 int max_cqe;
397 int max_mr;
398 int max_pd;
399 int max_qp_rd_atom;
400 int max_ee_rd_atom;
401 int max_res_rd_atom;
402 int max_qp_init_rd_atom;
403 int max_ee_init_rd_atom;
404 enum ib_atomic_cap atomic_cap;
405 enum ib_atomic_cap masked_atomic_cap;
406 int max_ee;
407 int max_rdd;
408 int max_mw;
409 int max_raw_ipv6_qp;
410 int max_raw_ethy_qp;
411 int max_mcast_grp;
412 int max_mcast_qp_attach;
413 int max_total_mcast_qp_attach;
414 int max_ah;
415 int max_srq;
416 int max_srq_wr;
417 int max_srq_sge;
418 unsigned int max_fast_reg_page_list_len;
419 unsigned int max_pi_fast_reg_page_list_len;
420 u16 max_pkeys;
421 u8 local_ca_ack_delay;
422 int sig_prot_cap;
423 int sig_guard_cap;
424 struct ib_odp_caps odp_caps;
425 uint64_t timestamp_mask;
426 uint64_t hca_core_clock;
427 struct ib_rss_caps rss_caps;
428 u32 max_wq_type_rq;
429 u32 raw_packet_caps;
430 struct ib_tm_caps tm_caps;
431 struct ib_cq_caps cq_caps;
432 u64 max_dm_size;
433
434 u32 max_sgl_rd;
435};
436
437enum ib_mtu {
438 IB_MTU_256 = 1,
439 IB_MTU_512 = 2,
440 IB_MTU_1024 = 3,
441 IB_MTU_2048 = 4,
442 IB_MTU_4096 = 5
443};
444
445enum opa_mtu {
446 OPA_MTU_8192 = 6,
447 OPA_MTU_10240 = 7
448};
449
450static inline int ib_mtu_enum_to_int(enum ib_mtu mtu)
451{
452 switch (mtu) {
453 case IB_MTU_256: return 256;
454 case IB_MTU_512: return 512;
455 case IB_MTU_1024: return 1024;
456 case IB_MTU_2048: return 2048;
457 case IB_MTU_4096: return 4096;
458 default: return -1;
459 }
460}
461
462static inline enum ib_mtu ib_mtu_int_to_enum(int mtu)
463{
464 if (mtu >= 4096)
465 return IB_MTU_4096;
466 else if (mtu >= 2048)
467 return IB_MTU_2048;
468 else if (mtu >= 1024)
469 return IB_MTU_1024;
470 else if (mtu >= 512)
471 return IB_MTU_512;
472 else
473 return IB_MTU_256;
474}
475
476static inline int opa_mtu_enum_to_int(enum opa_mtu mtu)
477{
478 switch (mtu) {
479 case OPA_MTU_8192:
480 return 8192;
481 case OPA_MTU_10240:
482 return 10240;
483 default:
484 return(ib_mtu_enum_to_int((enum ib_mtu)mtu));
485 }
486}
487
488static inline enum opa_mtu opa_mtu_int_to_enum(int mtu)
489{
490 if (mtu >= 10240)
491 return OPA_MTU_10240;
492 else if (mtu >= 8192)
493 return OPA_MTU_8192;
494 else
495 return ((enum opa_mtu)ib_mtu_int_to_enum(mtu));
496}
497
498enum ib_port_state {
499 IB_PORT_NOP = 0,
500 IB_PORT_DOWN = 1,
501 IB_PORT_INIT = 2,
502 IB_PORT_ARMED = 3,
503 IB_PORT_ACTIVE = 4,
504 IB_PORT_ACTIVE_DEFER = 5
505};
506
507enum ib_port_phys_state {
508 IB_PORT_PHYS_STATE_SLEEP = 1,
509 IB_PORT_PHYS_STATE_POLLING = 2,
510 IB_PORT_PHYS_STATE_DISABLED = 3,
511 IB_PORT_PHYS_STATE_PORT_CONFIGURATION_TRAINING = 4,
512 IB_PORT_PHYS_STATE_LINK_UP = 5,
513 IB_PORT_PHYS_STATE_LINK_ERROR_RECOVERY = 6,
514 IB_PORT_PHYS_STATE_PHY_TEST = 7,
515};
516
517enum ib_port_width {
518 IB_WIDTH_1X = 1,
519 IB_WIDTH_2X = 16,
520 IB_WIDTH_4X = 2,
521 IB_WIDTH_8X = 4,
522 IB_WIDTH_12X = 8
523};
524
525static inline int ib_width_enum_to_int(enum ib_port_width width)
526{
527 switch (width) {
528 case IB_WIDTH_1X: return 1;
529 case IB_WIDTH_2X: return 2;
530 case IB_WIDTH_4X: return 4;
531 case IB_WIDTH_8X: return 8;
532 case IB_WIDTH_12X: return 12;
533 default: return -1;
534 }
535}
536
537enum ib_port_speed {
538 IB_SPEED_SDR = 1,
539 IB_SPEED_DDR = 2,
540 IB_SPEED_QDR = 4,
541 IB_SPEED_FDR10 = 8,
542 IB_SPEED_FDR = 16,
543 IB_SPEED_EDR = 32,
544 IB_SPEED_HDR = 64,
545 IB_SPEED_NDR = 128,
546};
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567struct rdma_hw_stats {
568 struct mutex lock;
569 unsigned long timestamp;
570 unsigned long lifespan;
571 const char * const *names;
572 int num_counters;
573 u64 value[];
574};
575
576#define RDMA_HW_STATS_DEFAULT_LIFESPAN 10
577
578
579
580
581
582
583
584static inline struct rdma_hw_stats *rdma_alloc_hw_stats_struct(
585 const char * const *names, int num_counters,
586 unsigned long lifespan)
587{
588 struct rdma_hw_stats *stats;
589
590 stats = kzalloc(sizeof(*stats) + num_counters * sizeof(u64),
591 GFP_KERNEL);
592 if (!stats)
593 return NULL;
594 stats->names = names;
595 stats->num_counters = num_counters;
596 stats->lifespan = msecs_to_jiffies(lifespan);
597
598 return stats;
599}
600
601
602
603
604
605
606#define RDMA_CORE_CAP_IB_MAD 0x00000001
607#define RDMA_CORE_CAP_IB_SMI 0x00000002
608#define RDMA_CORE_CAP_IB_CM 0x00000004
609#define RDMA_CORE_CAP_IW_CM 0x00000008
610#define RDMA_CORE_CAP_IB_SA 0x00000010
611#define RDMA_CORE_CAP_OPA_MAD 0x00000020
612
613
614#define RDMA_CORE_CAP_AF_IB 0x00001000
615#define RDMA_CORE_CAP_ETH_AH 0x00002000
616#define RDMA_CORE_CAP_OPA_AH 0x00004000
617#define RDMA_CORE_CAP_IB_GRH_REQUIRED 0x00008000
618
619
620#define RDMA_CORE_CAP_PROT_IB 0x00100000
621#define RDMA_CORE_CAP_PROT_ROCE 0x00200000
622#define RDMA_CORE_CAP_PROT_IWARP 0x00400000
623#define RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP 0x00800000
624#define RDMA_CORE_CAP_PROT_RAW_PACKET 0x01000000
625#define RDMA_CORE_CAP_PROT_USNIC 0x02000000
626
627#define RDMA_CORE_PORT_IB_GRH_REQUIRED (RDMA_CORE_CAP_IB_GRH_REQUIRED \
628 | RDMA_CORE_CAP_PROT_ROCE \
629 | RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP)
630
631#define RDMA_CORE_PORT_IBA_IB (RDMA_CORE_CAP_PROT_IB \
632 | RDMA_CORE_CAP_IB_MAD \
633 | RDMA_CORE_CAP_IB_SMI \
634 | RDMA_CORE_CAP_IB_CM \
635 | RDMA_CORE_CAP_IB_SA \
636 | RDMA_CORE_CAP_AF_IB)
637#define RDMA_CORE_PORT_IBA_ROCE (RDMA_CORE_CAP_PROT_ROCE \
638 | RDMA_CORE_CAP_IB_MAD \
639 | RDMA_CORE_CAP_IB_CM \
640 | RDMA_CORE_CAP_AF_IB \
641 | RDMA_CORE_CAP_ETH_AH)
642#define RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP \
643 (RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP \
644 | RDMA_CORE_CAP_IB_MAD \
645 | RDMA_CORE_CAP_IB_CM \
646 | RDMA_CORE_CAP_AF_IB \
647 | RDMA_CORE_CAP_ETH_AH)
648#define RDMA_CORE_PORT_IWARP (RDMA_CORE_CAP_PROT_IWARP \
649 | RDMA_CORE_CAP_IW_CM)
650#define RDMA_CORE_PORT_INTEL_OPA (RDMA_CORE_PORT_IBA_IB \
651 | RDMA_CORE_CAP_OPA_MAD)
652
653#define RDMA_CORE_PORT_RAW_PACKET (RDMA_CORE_CAP_PROT_RAW_PACKET)
654
655#define RDMA_CORE_PORT_USNIC (RDMA_CORE_CAP_PROT_USNIC)
656
657struct ib_port_attr {
658 u64 subnet_prefix;
659 enum ib_port_state state;
660 enum ib_mtu max_mtu;
661 enum ib_mtu active_mtu;
662 u32 phys_mtu;
663 int gid_tbl_len;
664 unsigned int ip_gids:1;
665
666 u32 port_cap_flags;
667 u32 max_msg_sz;
668 u32 bad_pkey_cntr;
669 u32 qkey_viol_cntr;
670 u16 pkey_tbl_len;
671 u32 sm_lid;
672 u32 lid;
673 u8 lmc;
674 u8 max_vl_num;
675 u8 sm_sl;
676 u8 subnet_timeout;
677 u8 init_type_reply;
678 u8 active_width;
679 u16 active_speed;
680 u8 phys_state;
681 u16 port_cap_flags2;
682};
683
684enum ib_device_modify_flags {
685 IB_DEVICE_MODIFY_SYS_IMAGE_GUID = 1 << 0,
686 IB_DEVICE_MODIFY_NODE_DESC = 1 << 1
687};
688
689#define IB_DEVICE_NODE_DESC_MAX 64
690
691struct ib_device_modify {
692 u64 sys_image_guid;
693 char node_desc[IB_DEVICE_NODE_DESC_MAX];
694};
695
696enum ib_port_modify_flags {
697 IB_PORT_SHUTDOWN = 1,
698 IB_PORT_INIT_TYPE = (1<<2),
699 IB_PORT_RESET_QKEY_CNTR = (1<<3),
700 IB_PORT_OPA_MASK_CHG = (1<<4)
701};
702
703struct ib_port_modify {
704 u32 set_port_cap_mask;
705 u32 clr_port_cap_mask;
706 u8 init_type;
707};
708
709enum ib_event_type {
710 IB_EVENT_CQ_ERR,
711 IB_EVENT_QP_FATAL,
712 IB_EVENT_QP_REQ_ERR,
713 IB_EVENT_QP_ACCESS_ERR,
714 IB_EVENT_COMM_EST,
715 IB_EVENT_SQ_DRAINED,
716 IB_EVENT_PATH_MIG,
717 IB_EVENT_PATH_MIG_ERR,
718 IB_EVENT_DEVICE_FATAL,
719 IB_EVENT_PORT_ACTIVE,
720 IB_EVENT_PORT_ERR,
721 IB_EVENT_LID_CHANGE,
722 IB_EVENT_PKEY_CHANGE,
723 IB_EVENT_SM_CHANGE,
724 IB_EVENT_SRQ_ERR,
725 IB_EVENT_SRQ_LIMIT_REACHED,
726 IB_EVENT_QP_LAST_WQE_REACHED,
727 IB_EVENT_CLIENT_REREGISTER,
728 IB_EVENT_GID_CHANGE,
729 IB_EVENT_WQ_FATAL,
730};
731
732const char *__attribute_const__ ib_event_msg(enum ib_event_type event);
733
734struct ib_event {
735 struct ib_device *device;
736 union {
737 struct ib_cq *cq;
738 struct ib_qp *qp;
739 struct ib_srq *srq;
740 struct ib_wq *wq;
741 u32 port_num;
742 } element;
743 enum ib_event_type event;
744};
745
746struct ib_event_handler {
747 struct ib_device *device;
748 void (*handler)(struct ib_event_handler *, struct ib_event *);
749 struct list_head list;
750};
751
752#define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler) \
753 do { \
754 (_ptr)->device = _device; \
755 (_ptr)->handler = _handler; \
756 INIT_LIST_HEAD(&(_ptr)->list); \
757 } while (0)
758
759struct ib_global_route {
760 const struct ib_gid_attr *sgid_attr;
761 union ib_gid dgid;
762 u32 flow_label;
763 u8 sgid_index;
764 u8 hop_limit;
765 u8 traffic_class;
766};
767
768struct ib_grh {
769 __be32 version_tclass_flow;
770 __be16 paylen;
771 u8 next_hdr;
772 u8 hop_limit;
773 union ib_gid sgid;
774 union ib_gid dgid;
775};
776
777union rdma_network_hdr {
778 struct ib_grh ibgrh;
779 struct {
780
781
782
783 u8 reserved[20];
784 struct iphdr roce4grh;
785 };
786};
787
788#define IB_QPN_MASK 0xFFFFFF
789
790enum {
791 IB_MULTICAST_QPN = 0xffffff
792};
793
794#define IB_LID_PERMISSIVE cpu_to_be16(0xFFFF)
795#define IB_MULTICAST_LID_BASE cpu_to_be16(0xC000)
796
797enum ib_ah_flags {
798 IB_AH_GRH = 1
799};
800
801enum ib_rate {
802 IB_RATE_PORT_CURRENT = 0,
803 IB_RATE_2_5_GBPS = 2,
804 IB_RATE_5_GBPS = 5,
805 IB_RATE_10_GBPS = 3,
806 IB_RATE_20_GBPS = 6,
807 IB_RATE_30_GBPS = 4,
808 IB_RATE_40_GBPS = 7,
809 IB_RATE_60_GBPS = 8,
810 IB_RATE_80_GBPS = 9,
811 IB_RATE_120_GBPS = 10,
812 IB_RATE_14_GBPS = 11,
813 IB_RATE_56_GBPS = 12,
814 IB_RATE_112_GBPS = 13,
815 IB_RATE_168_GBPS = 14,
816 IB_RATE_25_GBPS = 15,
817 IB_RATE_100_GBPS = 16,
818 IB_RATE_200_GBPS = 17,
819 IB_RATE_300_GBPS = 18,
820 IB_RATE_28_GBPS = 19,
821 IB_RATE_50_GBPS = 20,
822 IB_RATE_400_GBPS = 21,
823 IB_RATE_600_GBPS = 22,
824};
825
826
827
828
829
830
831
832__attribute_const__ int ib_rate_to_mult(enum ib_rate rate);
833
834
835
836
837
838
839__attribute_const__ int ib_rate_to_mbps(enum ib_rate rate);
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859enum ib_mr_type {
860 IB_MR_TYPE_MEM_REG,
861 IB_MR_TYPE_SG_GAPS,
862 IB_MR_TYPE_DM,
863 IB_MR_TYPE_USER,
864 IB_MR_TYPE_DMA,
865 IB_MR_TYPE_INTEGRITY,
866};
867
868enum ib_mr_status_check {
869 IB_MR_CHECK_SIG_STATUS = 1,
870};
871
872
873
874
875
876
877
878
879
880struct ib_mr_status {
881 u32 fail_status;
882 struct ib_sig_err sig_err;
883};
884
885
886
887
888
889
890__attribute_const__ enum ib_rate mult_to_ib_rate(int mult);
891
892struct rdma_ah_init_attr {
893 struct rdma_ah_attr *ah_attr;
894 u32 flags;
895 struct net_device *xmit_slave;
896};
897
898enum rdma_ah_attr_type {
899 RDMA_AH_ATTR_TYPE_UNDEFINED,
900 RDMA_AH_ATTR_TYPE_IB,
901 RDMA_AH_ATTR_TYPE_ROCE,
902 RDMA_AH_ATTR_TYPE_OPA,
903};
904
905struct ib_ah_attr {
906 u16 dlid;
907 u8 src_path_bits;
908};
909
910struct roce_ah_attr {
911 u8 dmac[ETH_ALEN];
912};
913
914struct opa_ah_attr {
915 u32 dlid;
916 u8 src_path_bits;
917 bool make_grd;
918};
919
920struct rdma_ah_attr {
921 struct ib_global_route grh;
922 u8 sl;
923 u8 static_rate;
924 u32 port_num;
925 u8 ah_flags;
926 enum rdma_ah_attr_type type;
927 union {
928 struct ib_ah_attr ib;
929 struct roce_ah_attr roce;
930 struct opa_ah_attr opa;
931 };
932};
933
934enum ib_wc_status {
935 IB_WC_SUCCESS,
936 IB_WC_LOC_LEN_ERR,
937 IB_WC_LOC_QP_OP_ERR,
938 IB_WC_LOC_EEC_OP_ERR,
939 IB_WC_LOC_PROT_ERR,
940 IB_WC_WR_FLUSH_ERR,
941 IB_WC_MW_BIND_ERR,
942 IB_WC_BAD_RESP_ERR,
943 IB_WC_LOC_ACCESS_ERR,
944 IB_WC_REM_INV_REQ_ERR,
945 IB_WC_REM_ACCESS_ERR,
946 IB_WC_REM_OP_ERR,
947 IB_WC_RETRY_EXC_ERR,
948 IB_WC_RNR_RETRY_EXC_ERR,
949 IB_WC_LOC_RDD_VIOL_ERR,
950 IB_WC_REM_INV_RD_REQ_ERR,
951 IB_WC_REM_ABORT_ERR,
952 IB_WC_INV_EECN_ERR,
953 IB_WC_INV_EEC_STATE_ERR,
954 IB_WC_FATAL_ERR,
955 IB_WC_RESP_TIMEOUT_ERR,
956 IB_WC_GENERAL_ERR
957};
958
959const char *__attribute_const__ ib_wc_status_msg(enum ib_wc_status status);
960
961enum ib_wc_opcode {
962 IB_WC_SEND = IB_UVERBS_WC_SEND,
963 IB_WC_RDMA_WRITE = IB_UVERBS_WC_RDMA_WRITE,
964 IB_WC_RDMA_READ = IB_UVERBS_WC_RDMA_READ,
965 IB_WC_COMP_SWAP = IB_UVERBS_WC_COMP_SWAP,
966 IB_WC_FETCH_ADD = IB_UVERBS_WC_FETCH_ADD,
967 IB_WC_BIND_MW = IB_UVERBS_WC_BIND_MW,
968 IB_WC_LOCAL_INV = IB_UVERBS_WC_LOCAL_INV,
969 IB_WC_LSO = IB_UVERBS_WC_TSO,
970 IB_WC_REG_MR,
971 IB_WC_MASKED_COMP_SWAP,
972 IB_WC_MASKED_FETCH_ADD,
973
974
975
976
977 IB_WC_RECV = 1 << 7,
978 IB_WC_RECV_RDMA_WITH_IMM
979};
980
981enum ib_wc_flags {
982 IB_WC_GRH = 1,
983 IB_WC_WITH_IMM = (1<<1),
984 IB_WC_WITH_INVALIDATE = (1<<2),
985 IB_WC_IP_CSUM_OK = (1<<3),
986 IB_WC_WITH_SMAC = (1<<4),
987 IB_WC_WITH_VLAN = (1<<5),
988 IB_WC_WITH_NETWORK_HDR_TYPE = (1<<6),
989};
990
991struct ib_wc {
992 union {
993 u64 wr_id;
994 struct ib_cqe *wr_cqe;
995 };
996 enum ib_wc_status status;
997 enum ib_wc_opcode opcode;
998 u32 vendor_err;
999 u32 byte_len;
1000 struct ib_qp *qp;
1001 union {
1002 __be32 imm_data;
1003 u32 invalidate_rkey;
1004 } ex;
1005 u32 src_qp;
1006 u32 slid;
1007 int wc_flags;
1008 u16 pkey_index;
1009 u8 sl;
1010 u8 dlid_path_bits;
1011 u32 port_num;
1012 u8 smac[ETH_ALEN];
1013 u16 vlan_id;
1014 u8 network_hdr_type;
1015};
1016
1017enum ib_cq_notify_flags {
1018 IB_CQ_SOLICITED = 1 << 0,
1019 IB_CQ_NEXT_COMP = 1 << 1,
1020 IB_CQ_SOLICITED_MASK = IB_CQ_SOLICITED | IB_CQ_NEXT_COMP,
1021 IB_CQ_REPORT_MISSED_EVENTS = 1 << 2,
1022};
1023
1024enum ib_srq_type {
1025 IB_SRQT_BASIC = IB_UVERBS_SRQT_BASIC,
1026 IB_SRQT_XRC = IB_UVERBS_SRQT_XRC,
1027 IB_SRQT_TM = IB_UVERBS_SRQT_TM,
1028};
1029
1030static inline bool ib_srq_has_cq(enum ib_srq_type srq_type)
1031{
1032 return srq_type == IB_SRQT_XRC ||
1033 srq_type == IB_SRQT_TM;
1034}
1035
1036enum ib_srq_attr_mask {
1037 IB_SRQ_MAX_WR = 1 << 0,
1038 IB_SRQ_LIMIT = 1 << 1,
1039};
1040
1041struct ib_srq_attr {
1042 u32 max_wr;
1043 u32 max_sge;
1044 u32 srq_limit;
1045};
1046
1047struct ib_srq_init_attr {
1048 void (*event_handler)(struct ib_event *, void *);
1049 void *srq_context;
1050 struct ib_srq_attr attr;
1051 enum ib_srq_type srq_type;
1052
1053 struct {
1054 struct ib_cq *cq;
1055 union {
1056 struct {
1057 struct ib_xrcd *xrcd;
1058 } xrc;
1059
1060 struct {
1061 u32 max_num_tags;
1062 } tag_matching;
1063 };
1064 } ext;
1065};
1066
1067struct ib_qp_cap {
1068 u32 max_send_wr;
1069 u32 max_recv_wr;
1070 u32 max_send_sge;
1071 u32 max_recv_sge;
1072 u32 max_inline_data;
1073
1074
1075
1076
1077
1078
1079 u32 max_rdma_ctxs;
1080};
1081
1082enum ib_sig_type {
1083 IB_SIGNAL_ALL_WR,
1084 IB_SIGNAL_REQ_WR
1085};
1086
1087enum ib_qp_type {
1088
1089
1090
1091
1092
1093 IB_QPT_SMI,
1094 IB_QPT_GSI,
1095
1096 IB_QPT_RC = IB_UVERBS_QPT_RC,
1097 IB_QPT_UC = IB_UVERBS_QPT_UC,
1098 IB_QPT_UD = IB_UVERBS_QPT_UD,
1099 IB_QPT_RAW_IPV6,
1100 IB_QPT_RAW_ETHERTYPE,
1101 IB_QPT_RAW_PACKET = IB_UVERBS_QPT_RAW_PACKET,
1102 IB_QPT_XRC_INI = IB_UVERBS_QPT_XRC_INI,
1103 IB_QPT_XRC_TGT = IB_UVERBS_QPT_XRC_TGT,
1104 IB_QPT_MAX,
1105 IB_QPT_DRIVER = IB_UVERBS_QPT_DRIVER,
1106
1107
1108
1109
1110 IB_QPT_RESERVED1 = 0x1000,
1111 IB_QPT_RESERVED2,
1112 IB_QPT_RESERVED3,
1113 IB_QPT_RESERVED4,
1114 IB_QPT_RESERVED5,
1115 IB_QPT_RESERVED6,
1116 IB_QPT_RESERVED7,
1117 IB_QPT_RESERVED8,
1118 IB_QPT_RESERVED9,
1119 IB_QPT_RESERVED10,
1120};
1121
1122enum ib_qp_create_flags {
1123 IB_QP_CREATE_IPOIB_UD_LSO = 1 << 0,
1124 IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK =
1125 IB_UVERBS_QP_CREATE_BLOCK_MULTICAST_LOOPBACK,
1126 IB_QP_CREATE_CROSS_CHANNEL = 1 << 2,
1127 IB_QP_CREATE_MANAGED_SEND = 1 << 3,
1128 IB_QP_CREATE_MANAGED_RECV = 1 << 4,
1129 IB_QP_CREATE_NETIF_QP = 1 << 5,
1130 IB_QP_CREATE_INTEGRITY_EN = 1 << 6,
1131 IB_QP_CREATE_NETDEV_USE = 1 << 7,
1132 IB_QP_CREATE_SCATTER_FCS =
1133 IB_UVERBS_QP_CREATE_SCATTER_FCS,
1134 IB_QP_CREATE_CVLAN_STRIPPING =
1135 IB_UVERBS_QP_CREATE_CVLAN_STRIPPING,
1136 IB_QP_CREATE_SOURCE_QPN = 1 << 10,
1137 IB_QP_CREATE_PCI_WRITE_END_PADDING =
1138 IB_UVERBS_QP_CREATE_PCI_WRITE_END_PADDING,
1139
1140 IB_QP_CREATE_RESERVED_START = 1 << 26,
1141 IB_QP_CREATE_RESERVED_END = 1 << 31,
1142};
1143
1144
1145
1146
1147
1148
1149struct ib_qp_init_attr {
1150
1151 void (*event_handler)(struct ib_event *, void *);
1152
1153 void *qp_context;
1154 struct ib_cq *send_cq;
1155 struct ib_cq *recv_cq;
1156 struct ib_srq *srq;
1157 struct ib_xrcd *xrcd;
1158 struct ib_qp_cap cap;
1159 enum ib_sig_type sq_sig_type;
1160 enum ib_qp_type qp_type;
1161 u32 create_flags;
1162
1163
1164
1165
1166 u32 port_num;
1167 struct ib_rwq_ind_table *rwq_ind_tbl;
1168 u32 source_qpn;
1169};
1170
1171struct ib_qp_open_attr {
1172 void (*event_handler)(struct ib_event *, void *);
1173 void *qp_context;
1174 u32 qp_num;
1175 enum ib_qp_type qp_type;
1176};
1177
1178enum ib_rnr_timeout {
1179 IB_RNR_TIMER_655_36 = 0,
1180 IB_RNR_TIMER_000_01 = 1,
1181 IB_RNR_TIMER_000_02 = 2,
1182 IB_RNR_TIMER_000_03 = 3,
1183 IB_RNR_TIMER_000_04 = 4,
1184 IB_RNR_TIMER_000_06 = 5,
1185 IB_RNR_TIMER_000_08 = 6,
1186 IB_RNR_TIMER_000_12 = 7,
1187 IB_RNR_TIMER_000_16 = 8,
1188 IB_RNR_TIMER_000_24 = 9,
1189 IB_RNR_TIMER_000_32 = 10,
1190 IB_RNR_TIMER_000_48 = 11,
1191 IB_RNR_TIMER_000_64 = 12,
1192 IB_RNR_TIMER_000_96 = 13,
1193 IB_RNR_TIMER_001_28 = 14,
1194 IB_RNR_TIMER_001_92 = 15,
1195 IB_RNR_TIMER_002_56 = 16,
1196 IB_RNR_TIMER_003_84 = 17,
1197 IB_RNR_TIMER_005_12 = 18,
1198 IB_RNR_TIMER_007_68 = 19,
1199 IB_RNR_TIMER_010_24 = 20,
1200 IB_RNR_TIMER_015_36 = 21,
1201 IB_RNR_TIMER_020_48 = 22,
1202 IB_RNR_TIMER_030_72 = 23,
1203 IB_RNR_TIMER_040_96 = 24,
1204 IB_RNR_TIMER_061_44 = 25,
1205 IB_RNR_TIMER_081_92 = 26,
1206 IB_RNR_TIMER_122_88 = 27,
1207 IB_RNR_TIMER_163_84 = 28,
1208 IB_RNR_TIMER_245_76 = 29,
1209 IB_RNR_TIMER_327_68 = 30,
1210 IB_RNR_TIMER_491_52 = 31
1211};
1212
1213enum ib_qp_attr_mask {
1214 IB_QP_STATE = 1,
1215 IB_QP_CUR_STATE = (1<<1),
1216 IB_QP_EN_SQD_ASYNC_NOTIFY = (1<<2),
1217 IB_QP_ACCESS_FLAGS = (1<<3),
1218 IB_QP_PKEY_INDEX = (1<<4),
1219 IB_QP_PORT = (1<<5),
1220 IB_QP_QKEY = (1<<6),
1221 IB_QP_AV = (1<<7),
1222 IB_QP_PATH_MTU = (1<<8),
1223 IB_QP_TIMEOUT = (1<<9),
1224 IB_QP_RETRY_CNT = (1<<10),
1225 IB_QP_RNR_RETRY = (1<<11),
1226 IB_QP_RQ_PSN = (1<<12),
1227 IB_QP_MAX_QP_RD_ATOMIC = (1<<13),
1228 IB_QP_ALT_PATH = (1<<14),
1229 IB_QP_MIN_RNR_TIMER = (1<<15),
1230 IB_QP_SQ_PSN = (1<<16),
1231 IB_QP_MAX_DEST_RD_ATOMIC = (1<<17),
1232 IB_QP_PATH_MIG_STATE = (1<<18),
1233 IB_QP_CAP = (1<<19),
1234 IB_QP_DEST_QPN = (1<<20),
1235 IB_QP_RESERVED1 = (1<<21),
1236 IB_QP_RESERVED2 = (1<<22),
1237 IB_QP_RESERVED3 = (1<<23),
1238 IB_QP_RESERVED4 = (1<<24),
1239 IB_QP_RATE_LIMIT = (1<<25),
1240
1241 IB_QP_ATTR_STANDARD_BITS = GENMASK(20, 0),
1242};
1243
1244enum ib_qp_state {
1245 IB_QPS_RESET,
1246 IB_QPS_INIT,
1247 IB_QPS_RTR,
1248 IB_QPS_RTS,
1249 IB_QPS_SQD,
1250 IB_QPS_SQE,
1251 IB_QPS_ERR
1252};
1253
1254enum ib_mig_state {
1255 IB_MIG_MIGRATED,
1256 IB_MIG_REARM,
1257 IB_MIG_ARMED
1258};
1259
1260enum ib_mw_type {
1261 IB_MW_TYPE_1 = 1,
1262 IB_MW_TYPE_2 = 2
1263};
1264
1265struct ib_qp_attr {
1266 enum ib_qp_state qp_state;
1267 enum ib_qp_state cur_qp_state;
1268 enum ib_mtu path_mtu;
1269 enum ib_mig_state path_mig_state;
1270 u32 qkey;
1271 u32 rq_psn;
1272 u32 sq_psn;
1273 u32 dest_qp_num;
1274 int qp_access_flags;
1275 struct ib_qp_cap cap;
1276 struct rdma_ah_attr ah_attr;
1277 struct rdma_ah_attr alt_ah_attr;
1278 u16 pkey_index;
1279 u16 alt_pkey_index;
1280 u8 en_sqd_async_notify;
1281 u8 sq_draining;
1282 u8 max_rd_atomic;
1283 u8 max_dest_rd_atomic;
1284 u8 min_rnr_timer;
1285 u32 port_num;
1286 u8 timeout;
1287 u8 retry_cnt;
1288 u8 rnr_retry;
1289 u32 alt_port_num;
1290 u8 alt_timeout;
1291 u32 rate_limit;
1292 struct net_device *xmit_slave;
1293};
1294
1295enum ib_wr_opcode {
1296
1297 IB_WR_RDMA_WRITE = IB_UVERBS_WR_RDMA_WRITE,
1298 IB_WR_RDMA_WRITE_WITH_IMM = IB_UVERBS_WR_RDMA_WRITE_WITH_IMM,
1299 IB_WR_SEND = IB_UVERBS_WR_SEND,
1300 IB_WR_SEND_WITH_IMM = IB_UVERBS_WR_SEND_WITH_IMM,
1301 IB_WR_RDMA_READ = IB_UVERBS_WR_RDMA_READ,
1302 IB_WR_ATOMIC_CMP_AND_SWP = IB_UVERBS_WR_ATOMIC_CMP_AND_SWP,
1303 IB_WR_ATOMIC_FETCH_AND_ADD = IB_UVERBS_WR_ATOMIC_FETCH_AND_ADD,
1304 IB_WR_BIND_MW = IB_UVERBS_WR_BIND_MW,
1305 IB_WR_LSO = IB_UVERBS_WR_TSO,
1306 IB_WR_SEND_WITH_INV = IB_UVERBS_WR_SEND_WITH_INV,
1307 IB_WR_RDMA_READ_WITH_INV = IB_UVERBS_WR_RDMA_READ_WITH_INV,
1308 IB_WR_LOCAL_INV = IB_UVERBS_WR_LOCAL_INV,
1309 IB_WR_MASKED_ATOMIC_CMP_AND_SWP =
1310 IB_UVERBS_WR_MASKED_ATOMIC_CMP_AND_SWP,
1311 IB_WR_MASKED_ATOMIC_FETCH_AND_ADD =
1312 IB_UVERBS_WR_MASKED_ATOMIC_FETCH_AND_ADD,
1313
1314
1315 IB_WR_REG_MR = 0x20,
1316 IB_WR_REG_MR_INTEGRITY,
1317
1318
1319
1320
1321 IB_WR_RESERVED1 = 0xf0,
1322 IB_WR_RESERVED2,
1323 IB_WR_RESERVED3,
1324 IB_WR_RESERVED4,
1325 IB_WR_RESERVED5,
1326 IB_WR_RESERVED6,
1327 IB_WR_RESERVED7,
1328 IB_WR_RESERVED8,
1329 IB_WR_RESERVED9,
1330 IB_WR_RESERVED10,
1331};
1332
1333enum ib_send_flags {
1334 IB_SEND_FENCE = 1,
1335 IB_SEND_SIGNALED = (1<<1),
1336 IB_SEND_SOLICITED = (1<<2),
1337 IB_SEND_INLINE = (1<<3),
1338 IB_SEND_IP_CSUM = (1<<4),
1339
1340
1341 IB_SEND_RESERVED_START = (1 << 26),
1342 IB_SEND_RESERVED_END = (1 << 31),
1343};
1344
1345struct ib_sge {
1346 u64 addr;
1347 u32 length;
1348 u32 lkey;
1349};
1350
1351struct ib_cqe {
1352 void (*done)(struct ib_cq *cq, struct ib_wc *wc);
1353};
1354
1355struct ib_send_wr {
1356 struct ib_send_wr *next;
1357 union {
1358 u64 wr_id;
1359 struct ib_cqe *wr_cqe;
1360 };
1361 struct ib_sge *sg_list;
1362 int num_sge;
1363 enum ib_wr_opcode opcode;
1364 int send_flags;
1365 union {
1366 __be32 imm_data;
1367 u32 invalidate_rkey;
1368 } ex;
1369};
1370
1371struct ib_rdma_wr {
1372 struct ib_send_wr wr;
1373 u64 remote_addr;
1374 u32 rkey;
1375};
1376
1377static inline const struct ib_rdma_wr *rdma_wr(const struct ib_send_wr *wr)
1378{
1379 return container_of(wr, struct ib_rdma_wr, wr);
1380}
1381
1382struct ib_atomic_wr {
1383 struct ib_send_wr wr;
1384 u64 remote_addr;
1385 u64 compare_add;
1386 u64 swap;
1387 u64 compare_add_mask;
1388 u64 swap_mask;
1389 u32 rkey;
1390};
1391
1392static inline const struct ib_atomic_wr *atomic_wr(const struct ib_send_wr *wr)
1393{
1394 return container_of(wr, struct ib_atomic_wr, wr);
1395}
1396
1397struct ib_ud_wr {
1398 struct ib_send_wr wr;
1399 struct ib_ah *ah;
1400 void *header;
1401 int hlen;
1402 int mss;
1403 u32 remote_qpn;
1404 u32 remote_qkey;
1405 u16 pkey_index;
1406 u32 port_num;
1407};
1408
1409static inline const struct ib_ud_wr *ud_wr(const struct ib_send_wr *wr)
1410{
1411 return container_of(wr, struct ib_ud_wr, wr);
1412}
1413
1414struct ib_reg_wr {
1415 struct ib_send_wr wr;
1416 struct ib_mr *mr;
1417 u32 key;
1418 int access;
1419};
1420
1421static inline const struct ib_reg_wr *reg_wr(const struct ib_send_wr *wr)
1422{
1423 return container_of(wr, struct ib_reg_wr, wr);
1424}
1425
1426struct ib_recv_wr {
1427 struct ib_recv_wr *next;
1428 union {
1429 u64 wr_id;
1430 struct ib_cqe *wr_cqe;
1431 };
1432 struct ib_sge *sg_list;
1433 int num_sge;
1434};
1435
1436enum ib_access_flags {
1437 IB_ACCESS_LOCAL_WRITE = IB_UVERBS_ACCESS_LOCAL_WRITE,
1438 IB_ACCESS_REMOTE_WRITE = IB_UVERBS_ACCESS_REMOTE_WRITE,
1439 IB_ACCESS_REMOTE_READ = IB_UVERBS_ACCESS_REMOTE_READ,
1440 IB_ACCESS_REMOTE_ATOMIC = IB_UVERBS_ACCESS_REMOTE_ATOMIC,
1441 IB_ACCESS_MW_BIND = IB_UVERBS_ACCESS_MW_BIND,
1442 IB_ZERO_BASED = IB_UVERBS_ACCESS_ZERO_BASED,
1443 IB_ACCESS_ON_DEMAND = IB_UVERBS_ACCESS_ON_DEMAND,
1444 IB_ACCESS_HUGETLB = IB_UVERBS_ACCESS_HUGETLB,
1445 IB_ACCESS_RELAXED_ORDERING = IB_UVERBS_ACCESS_RELAXED_ORDERING,
1446
1447 IB_ACCESS_OPTIONAL = IB_UVERBS_ACCESS_OPTIONAL_RANGE,
1448 IB_ACCESS_SUPPORTED =
1449 ((IB_ACCESS_HUGETLB << 1) - 1) | IB_ACCESS_OPTIONAL,
1450};
1451
1452
1453
1454
1455
1456enum ib_mr_rereg_flags {
1457 IB_MR_REREG_TRANS = 1,
1458 IB_MR_REREG_PD = (1<<1),
1459 IB_MR_REREG_ACCESS = (1<<2),
1460 IB_MR_REREG_SUPPORTED = ((IB_MR_REREG_ACCESS << 1) - 1)
1461};
1462
1463struct ib_umem;
1464
1465enum rdma_remove_reason {
1466
1467
1468
1469
1470 RDMA_REMOVE_DESTROY,
1471
1472 RDMA_REMOVE_CLOSE,
1473
1474 RDMA_REMOVE_DRIVER_REMOVE,
1475
1476 RDMA_REMOVE_ABORT,
1477
1478 RDMA_REMOVE_DRIVER_FAILURE,
1479};
1480
1481struct ib_rdmacg_object {
1482#ifdef CONFIG_CGROUP_RDMA
1483 struct rdma_cgroup *cg;
1484#endif
1485};
1486
1487struct ib_ucontext {
1488 struct ib_device *device;
1489 struct ib_uverbs_file *ufile;
1490
1491 struct ib_rdmacg_object cg_obj;
1492
1493
1494
1495 struct rdma_restrack_entry res;
1496 struct xarray mmap_xa;
1497};
1498
1499struct ib_uobject {
1500 u64 user_handle;
1501
1502 struct ib_uverbs_file *ufile;
1503
1504 struct ib_ucontext *context;
1505 void *object;
1506 struct list_head list;
1507 struct ib_rdmacg_object cg_obj;
1508 int id;
1509 struct kref ref;
1510 atomic_t usecnt;
1511 struct rcu_head rcu;
1512
1513 const struct uverbs_api_object *uapi_object;
1514};
1515
1516struct ib_udata {
1517 const void __user *inbuf;
1518 void __user *outbuf;
1519 size_t inlen;
1520 size_t outlen;
1521};
1522
1523struct ib_pd {
1524 u32 local_dma_lkey;
1525 u32 flags;
1526 struct ib_device *device;
1527 struct ib_uobject *uobject;
1528 atomic_t usecnt;
1529
1530 u32 unsafe_global_rkey;
1531
1532
1533
1534
1535 struct ib_mr *__internal_mr;
1536 struct rdma_restrack_entry res;
1537};
1538
1539struct ib_xrcd {
1540 struct ib_device *device;
1541 atomic_t usecnt;
1542 struct inode *inode;
1543 struct rw_semaphore tgt_qps_rwsem;
1544 struct xarray tgt_qps;
1545};
1546
1547struct ib_ah {
1548 struct ib_device *device;
1549 struct ib_pd *pd;
1550 struct ib_uobject *uobject;
1551 const struct ib_gid_attr *sgid_attr;
1552 enum rdma_ah_attr_type type;
1553};
1554
1555typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context);
1556
1557enum ib_poll_context {
1558 IB_POLL_SOFTIRQ,
1559 IB_POLL_WORKQUEUE,
1560 IB_POLL_UNBOUND_WORKQUEUE,
1561 IB_POLL_LAST_POOL_TYPE = IB_POLL_UNBOUND_WORKQUEUE,
1562
1563 IB_POLL_DIRECT,
1564};
1565
1566struct ib_cq {
1567 struct ib_device *device;
1568 struct ib_ucq_object *uobject;
1569 ib_comp_handler comp_handler;
1570 void (*event_handler)(struct ib_event *, void *);
1571 void *cq_context;
1572 int cqe;
1573 unsigned int cqe_used;
1574 atomic_t usecnt;
1575 enum ib_poll_context poll_ctx;
1576 struct ib_wc *wc;
1577 struct list_head pool_entry;
1578 union {
1579 struct irq_poll iop;
1580 struct work_struct work;
1581 };
1582 struct workqueue_struct *comp_wq;
1583 struct dim *dim;
1584
1585
1586 ktime_t timestamp;
1587 u8 interrupt:1;
1588 u8 shared:1;
1589 unsigned int comp_vector;
1590
1591
1592
1593
1594 struct rdma_restrack_entry res;
1595};
1596
1597struct ib_srq {
1598 struct ib_device *device;
1599 struct ib_pd *pd;
1600 struct ib_usrq_object *uobject;
1601 void (*event_handler)(struct ib_event *, void *);
1602 void *srq_context;
1603 enum ib_srq_type srq_type;
1604 atomic_t usecnt;
1605
1606 struct {
1607 struct ib_cq *cq;
1608 union {
1609 struct {
1610 struct ib_xrcd *xrcd;
1611 u32 srq_num;
1612 } xrc;
1613 };
1614 } ext;
1615
1616
1617
1618
1619 struct rdma_restrack_entry res;
1620};
1621
1622enum ib_raw_packet_caps {
1623
1624
1625
1626 IB_RAW_PACKET_CAP_CVLAN_STRIPPING = (1 << 0),
1627
1628
1629 IB_RAW_PACKET_CAP_SCATTER_FCS = (1 << 1),
1630
1631 IB_RAW_PACKET_CAP_IP_CSUM = (1 << 2),
1632
1633
1634
1635 IB_RAW_PACKET_CAP_DELAY_DROP = (1 << 3),
1636};
1637
1638enum ib_wq_type {
1639 IB_WQT_RQ = IB_UVERBS_WQT_RQ,
1640};
1641
1642enum ib_wq_state {
1643 IB_WQS_RESET,
1644 IB_WQS_RDY,
1645 IB_WQS_ERR
1646};
1647
1648struct ib_wq {
1649 struct ib_device *device;
1650 struct ib_uwq_object *uobject;
1651 void *wq_context;
1652 void (*event_handler)(struct ib_event *, void *);
1653 struct ib_pd *pd;
1654 struct ib_cq *cq;
1655 u32 wq_num;
1656 enum ib_wq_state state;
1657 enum ib_wq_type wq_type;
1658 atomic_t usecnt;
1659};
1660
1661enum ib_wq_flags {
1662 IB_WQ_FLAGS_CVLAN_STRIPPING = IB_UVERBS_WQ_FLAGS_CVLAN_STRIPPING,
1663 IB_WQ_FLAGS_SCATTER_FCS = IB_UVERBS_WQ_FLAGS_SCATTER_FCS,
1664 IB_WQ_FLAGS_DELAY_DROP = IB_UVERBS_WQ_FLAGS_DELAY_DROP,
1665 IB_WQ_FLAGS_PCI_WRITE_END_PADDING =
1666 IB_UVERBS_WQ_FLAGS_PCI_WRITE_END_PADDING,
1667};
1668
1669struct ib_wq_init_attr {
1670 void *wq_context;
1671 enum ib_wq_type wq_type;
1672 u32 max_wr;
1673 u32 max_sge;
1674 struct ib_cq *cq;
1675 void (*event_handler)(struct ib_event *, void *);
1676 u32 create_flags;
1677};
1678
1679enum ib_wq_attr_mask {
1680 IB_WQ_STATE = 1 << 0,
1681 IB_WQ_CUR_STATE = 1 << 1,
1682 IB_WQ_FLAGS = 1 << 2,
1683};
1684
1685struct ib_wq_attr {
1686 enum ib_wq_state wq_state;
1687 enum ib_wq_state curr_wq_state;
1688 u32 flags;
1689 u32 flags_mask;
1690};
1691
1692struct ib_rwq_ind_table {
1693 struct ib_device *device;
1694 struct ib_uobject *uobject;
1695 atomic_t usecnt;
1696 u32 ind_tbl_num;
1697 u32 log_ind_tbl_size;
1698 struct ib_wq **ind_tbl;
1699};
1700
1701struct ib_rwq_ind_table_init_attr {
1702 u32 log_ind_tbl_size;
1703
1704 struct ib_wq **ind_tbl;
1705};
1706
1707enum port_pkey_state {
1708 IB_PORT_PKEY_NOT_VALID = 0,
1709 IB_PORT_PKEY_VALID = 1,
1710 IB_PORT_PKEY_LISTED = 2,
1711};
1712
1713struct ib_qp_security;
1714
1715struct ib_port_pkey {
1716 enum port_pkey_state state;
1717 u16 pkey_index;
1718 u32 port_num;
1719 struct list_head qp_list;
1720 struct list_head to_error_list;
1721 struct ib_qp_security *sec;
1722};
1723
1724struct ib_ports_pkeys {
1725 struct ib_port_pkey main;
1726 struct ib_port_pkey alt;
1727};
1728
1729struct ib_qp_security {
1730 struct ib_qp *qp;
1731 struct ib_device *dev;
1732
1733 struct mutex mutex;
1734 struct ib_ports_pkeys *ports_pkeys;
1735
1736
1737
1738 struct list_head shared_qp_list;
1739 void *security;
1740 bool destroying;
1741 atomic_t error_list_count;
1742 struct completion error_complete;
1743 int error_comps_pending;
1744};
1745
1746
1747
1748
1749
1750struct ib_qp {
1751 struct ib_device *device;
1752 struct ib_pd *pd;
1753 struct ib_cq *send_cq;
1754 struct ib_cq *recv_cq;
1755 spinlock_t mr_lock;
1756 int mrs_used;
1757 struct list_head rdma_mrs;
1758 struct list_head sig_mrs;
1759 struct ib_srq *srq;
1760 struct ib_xrcd *xrcd;
1761 struct list_head xrcd_list;
1762
1763
1764 atomic_t usecnt;
1765 struct list_head open_list;
1766 struct ib_qp *real_qp;
1767 struct ib_uqp_object *uobject;
1768 void (*event_handler)(struct ib_event *, void *);
1769 void *qp_context;
1770
1771 const struct ib_gid_attr *av_sgid_attr;
1772 const struct ib_gid_attr *alt_path_sgid_attr;
1773 u32 qp_num;
1774 u32 max_write_sge;
1775 u32 max_read_sge;
1776 enum ib_qp_type qp_type;
1777 struct ib_rwq_ind_table *rwq_ind_tbl;
1778 struct ib_qp_security *qp_sec;
1779 u32 port;
1780
1781 bool integrity_en;
1782
1783
1784
1785 struct rdma_restrack_entry res;
1786
1787
1788 struct rdma_counter *counter;
1789};
1790
1791struct ib_dm {
1792 struct ib_device *device;
1793 u32 length;
1794 u32 flags;
1795 struct ib_uobject *uobject;
1796 atomic_t usecnt;
1797};
1798
1799struct ib_mr {
1800 struct ib_device *device;
1801 struct ib_pd *pd;
1802 u32 lkey;
1803 u32 rkey;
1804 u64 iova;
1805 u64 length;
1806 unsigned int page_size;
1807 enum ib_mr_type type;
1808 bool need_inval;
1809 union {
1810 struct ib_uobject *uobject;
1811 struct list_head qp_entry;
1812 };
1813
1814 struct ib_dm *dm;
1815 struct ib_sig_attrs *sig_attrs;
1816
1817
1818
1819 struct rdma_restrack_entry res;
1820};
1821
1822struct ib_mw {
1823 struct ib_device *device;
1824 struct ib_pd *pd;
1825 struct ib_uobject *uobject;
1826 u32 rkey;
1827 enum ib_mw_type type;
1828};
1829
1830
1831enum ib_flow_attr_type {
1832
1833 IB_FLOW_ATTR_NORMAL = 0x0,
1834
1835
1836
1837 IB_FLOW_ATTR_ALL_DEFAULT = 0x1,
1838
1839
1840
1841 IB_FLOW_ATTR_MC_DEFAULT = 0x2,
1842
1843 IB_FLOW_ATTR_SNIFFER = 0x3
1844};
1845
1846
1847enum ib_flow_spec_type {
1848
1849 IB_FLOW_SPEC_ETH = 0x20,
1850 IB_FLOW_SPEC_IB = 0x22,
1851
1852 IB_FLOW_SPEC_IPV4 = 0x30,
1853 IB_FLOW_SPEC_IPV6 = 0x31,
1854 IB_FLOW_SPEC_ESP = 0x34,
1855
1856 IB_FLOW_SPEC_TCP = 0x40,
1857 IB_FLOW_SPEC_UDP = 0x41,
1858 IB_FLOW_SPEC_VXLAN_TUNNEL = 0x50,
1859 IB_FLOW_SPEC_GRE = 0x51,
1860 IB_FLOW_SPEC_MPLS = 0x60,
1861 IB_FLOW_SPEC_INNER = 0x100,
1862
1863 IB_FLOW_SPEC_ACTION_TAG = 0x1000,
1864 IB_FLOW_SPEC_ACTION_DROP = 0x1001,
1865 IB_FLOW_SPEC_ACTION_HANDLE = 0x1002,
1866 IB_FLOW_SPEC_ACTION_COUNT = 0x1003,
1867};
1868#define IB_FLOW_SPEC_LAYER_MASK 0xF0
1869#define IB_FLOW_SPEC_SUPPORT_LAYERS 10
1870
1871enum ib_flow_flags {
1872 IB_FLOW_ATTR_FLAGS_DONT_TRAP = 1UL << 1,
1873 IB_FLOW_ATTR_FLAGS_EGRESS = 1UL << 2,
1874 IB_FLOW_ATTR_FLAGS_RESERVED = 1UL << 3
1875};
1876
1877struct ib_flow_eth_filter {
1878 u8 dst_mac[6];
1879 u8 src_mac[6];
1880 __be16 ether_type;
1881 __be16 vlan_tag;
1882
1883 u8 real_sz[];
1884};
1885
1886struct ib_flow_spec_eth {
1887 u32 type;
1888 u16 size;
1889 struct ib_flow_eth_filter val;
1890 struct ib_flow_eth_filter mask;
1891};
1892
1893struct ib_flow_ib_filter {
1894 __be16 dlid;
1895 __u8 sl;
1896
1897 u8 real_sz[];
1898};
1899
1900struct ib_flow_spec_ib {
1901 u32 type;
1902 u16 size;
1903 struct ib_flow_ib_filter val;
1904 struct ib_flow_ib_filter mask;
1905};
1906
1907
1908enum ib_ipv4_flags {
1909 IB_IPV4_DONT_FRAG = 0x2,
1910 IB_IPV4_MORE_FRAG = 0X4
1911
1912};
1913
1914struct ib_flow_ipv4_filter {
1915 __be32 src_ip;
1916 __be32 dst_ip;
1917 u8 proto;
1918 u8 tos;
1919 u8 ttl;
1920 u8 flags;
1921
1922 u8 real_sz[];
1923};
1924
1925struct ib_flow_spec_ipv4 {
1926 u32 type;
1927 u16 size;
1928 struct ib_flow_ipv4_filter val;
1929 struct ib_flow_ipv4_filter mask;
1930};
1931
1932struct ib_flow_ipv6_filter {
1933 u8 src_ip[16];
1934 u8 dst_ip[16];
1935 __be32 flow_label;
1936 u8 next_hdr;
1937 u8 traffic_class;
1938 u8 hop_limit;
1939
1940 u8 real_sz[];
1941};
1942
1943struct ib_flow_spec_ipv6 {
1944 u32 type;
1945 u16 size;
1946 struct ib_flow_ipv6_filter val;
1947 struct ib_flow_ipv6_filter mask;
1948};
1949
1950struct ib_flow_tcp_udp_filter {
1951 __be16 dst_port;
1952 __be16 src_port;
1953
1954 u8 real_sz[];
1955};
1956
1957struct ib_flow_spec_tcp_udp {
1958 u32 type;
1959 u16 size;
1960 struct ib_flow_tcp_udp_filter val;
1961 struct ib_flow_tcp_udp_filter mask;
1962};
1963
1964struct ib_flow_tunnel_filter {
1965 __be32 tunnel_id;
1966 u8 real_sz[];
1967};
1968
1969
1970
1971
1972struct ib_flow_spec_tunnel {
1973 u32 type;
1974 u16 size;
1975 struct ib_flow_tunnel_filter val;
1976 struct ib_flow_tunnel_filter mask;
1977};
1978
1979struct ib_flow_esp_filter {
1980 __be32 spi;
1981 __be32 seq;
1982
1983 u8 real_sz[];
1984};
1985
1986struct ib_flow_spec_esp {
1987 u32 type;
1988 u16 size;
1989 struct ib_flow_esp_filter val;
1990 struct ib_flow_esp_filter mask;
1991};
1992
1993struct ib_flow_gre_filter {
1994 __be16 c_ks_res0_ver;
1995 __be16 protocol;
1996 __be32 key;
1997
1998 u8 real_sz[];
1999};
2000
2001struct ib_flow_spec_gre {
2002 u32 type;
2003 u16 size;
2004 struct ib_flow_gre_filter val;
2005 struct ib_flow_gre_filter mask;
2006};
2007
2008struct ib_flow_mpls_filter {
2009 __be32 tag;
2010
2011 u8 real_sz[];
2012};
2013
2014struct ib_flow_spec_mpls {
2015 u32 type;
2016 u16 size;
2017 struct ib_flow_mpls_filter val;
2018 struct ib_flow_mpls_filter mask;
2019};
2020
2021struct ib_flow_spec_action_tag {
2022 enum ib_flow_spec_type type;
2023 u16 size;
2024 u32 tag_id;
2025};
2026
2027struct ib_flow_spec_action_drop {
2028 enum ib_flow_spec_type type;
2029 u16 size;
2030};
2031
2032struct ib_flow_spec_action_handle {
2033 enum ib_flow_spec_type type;
2034 u16 size;
2035 struct ib_flow_action *act;
2036};
2037
2038enum ib_counters_description {
2039 IB_COUNTER_PACKETS,
2040 IB_COUNTER_BYTES,
2041};
2042
2043struct ib_flow_spec_action_count {
2044 enum ib_flow_spec_type type;
2045 u16 size;
2046 struct ib_counters *counters;
2047};
2048
2049union ib_flow_spec {
2050 struct {
2051 u32 type;
2052 u16 size;
2053 };
2054 struct ib_flow_spec_eth eth;
2055 struct ib_flow_spec_ib ib;
2056 struct ib_flow_spec_ipv4 ipv4;
2057 struct ib_flow_spec_tcp_udp tcp_udp;
2058 struct ib_flow_spec_ipv6 ipv6;
2059 struct ib_flow_spec_tunnel tunnel;
2060 struct ib_flow_spec_esp esp;
2061 struct ib_flow_spec_gre gre;
2062 struct ib_flow_spec_mpls mpls;
2063 struct ib_flow_spec_action_tag flow_tag;
2064 struct ib_flow_spec_action_drop drop;
2065 struct ib_flow_spec_action_handle action;
2066 struct ib_flow_spec_action_count flow_count;
2067};
2068
2069struct ib_flow_attr {
2070 enum ib_flow_attr_type type;
2071 u16 size;
2072 u16 priority;
2073 u32 flags;
2074 u8 num_of_specs;
2075 u32 port;
2076 union ib_flow_spec flows[];
2077};
2078
2079struct ib_flow {
2080 struct ib_qp *qp;
2081 struct ib_device *device;
2082 struct ib_uobject *uobject;
2083};
2084
2085enum ib_flow_action_type {
2086 IB_FLOW_ACTION_UNSPECIFIED,
2087 IB_FLOW_ACTION_ESP = 1,
2088};
2089
2090struct ib_flow_action_attrs_esp_keymats {
2091 enum ib_uverbs_flow_action_esp_keymat protocol;
2092 union {
2093 struct ib_uverbs_flow_action_esp_keymat_aes_gcm aes_gcm;
2094 } keymat;
2095};
2096
2097struct ib_flow_action_attrs_esp_replays {
2098 enum ib_uverbs_flow_action_esp_replay protocol;
2099 union {
2100 struct ib_uverbs_flow_action_esp_replay_bmp bmp;
2101 } replay;
2102};
2103
2104enum ib_flow_action_attrs_esp_flags {
2105
2106
2107
2108
2109
2110
2111 IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED = 1ULL << 32,
2112 IB_FLOW_ACTION_ESP_FLAGS_MOD_ESP_ATTRS = 1ULL << 33,
2113};
2114
2115struct ib_flow_spec_list {
2116 struct ib_flow_spec_list *next;
2117 union ib_flow_spec spec;
2118};
2119
2120struct ib_flow_action_attrs_esp {
2121 struct ib_flow_action_attrs_esp_keymats *keymat;
2122 struct ib_flow_action_attrs_esp_replays *replay;
2123 struct ib_flow_spec_list *encap;
2124
2125
2126
2127 u32 esn;
2128 u32 spi;
2129 u32 seq;
2130 u32 tfc_pad;
2131
2132 u64 flags;
2133 u64 hard_limit_pkts;
2134};
2135
2136struct ib_flow_action {
2137 struct ib_device *device;
2138 struct ib_uobject *uobject;
2139 enum ib_flow_action_type type;
2140 atomic_t usecnt;
2141};
2142
2143struct ib_mad;
2144
2145enum ib_process_mad_flags {
2146 IB_MAD_IGNORE_MKEY = 1,
2147 IB_MAD_IGNORE_BKEY = 2,
2148 IB_MAD_IGNORE_ALL = IB_MAD_IGNORE_MKEY | IB_MAD_IGNORE_BKEY
2149};
2150
2151enum ib_mad_result {
2152 IB_MAD_RESULT_FAILURE = 0,
2153 IB_MAD_RESULT_SUCCESS = 1 << 0,
2154 IB_MAD_RESULT_REPLY = 1 << 1,
2155 IB_MAD_RESULT_CONSUMED = 1 << 2
2156};
2157
2158struct ib_port_cache {
2159 u64 subnet_prefix;
2160 struct ib_pkey_cache *pkey;
2161 struct ib_gid_table *gid;
2162 u8 lmc;
2163 enum ib_port_state port_state;
2164};
2165
2166struct ib_port_immutable {
2167 int pkey_tbl_len;
2168 int gid_tbl_len;
2169 u32 core_cap_flags;
2170 u32 max_mad_size;
2171};
2172
2173struct ib_port_data {
2174 struct ib_device *ib_dev;
2175
2176 struct ib_port_immutable immutable;
2177
2178 spinlock_t pkey_list_lock;
2179
2180 spinlock_t netdev_lock;
2181
2182 struct list_head pkey_list;
2183
2184 struct ib_port_cache cache;
2185
2186 struct net_device __rcu *netdev;
2187 struct hlist_node ndev_hash_link;
2188 struct rdma_port_counter port_counter;
2189 struct ib_port *sysfs;
2190};
2191
2192
2193enum rdma_netdev_t {
2194 RDMA_NETDEV_OPA_VNIC,
2195 RDMA_NETDEV_IPOIB,
2196};
2197
2198
2199
2200
2201
2202struct rdma_netdev {
2203 void *clnt_priv;
2204 struct ib_device *hca;
2205 u32 port_num;
2206 int mtu;
2207
2208
2209
2210
2211
2212
2213 void (*free_rdma_netdev)(struct net_device *netdev);
2214
2215
2216 void (*set_id)(struct net_device *netdev, int id);
2217
2218 int (*send)(struct net_device *dev, struct sk_buff *skb,
2219 struct ib_ah *address, u32 dqpn);
2220
2221 int (*attach_mcast)(struct net_device *dev, struct ib_device *hca,
2222 union ib_gid *gid, u16 mlid,
2223 int set_qkey, u32 qkey);
2224 int (*detach_mcast)(struct net_device *dev, struct ib_device *hca,
2225 union ib_gid *gid, u16 mlid);
2226
2227 void (*tx_timeout)(struct net_device *dev, unsigned int txqueue);
2228};
2229
2230struct rdma_netdev_alloc_params {
2231 size_t sizeof_priv;
2232 unsigned int txqs;
2233 unsigned int rxqs;
2234 void *param;
2235
2236 int (*initialize_rdma_netdev)(struct ib_device *device, u32 port_num,
2237 struct net_device *netdev, void *param);
2238};
2239
2240struct ib_odp_counters {
2241 atomic64_t faults;
2242 atomic64_t invalidations;
2243 atomic64_t prefetch;
2244};
2245
2246struct ib_counters {
2247 struct ib_device *device;
2248 struct ib_uobject *uobject;
2249
2250 atomic_t usecnt;
2251};
2252
2253struct ib_counters_read_attr {
2254 u64 *counters_buff;
2255 u32 ncounters;
2256 u32 flags;
2257};
2258
2259struct uverbs_attr_bundle;
2260struct iw_cm_id;
2261struct iw_cm_conn_param;
2262
2263#define INIT_RDMA_OBJ_SIZE(ib_struct, drv_struct, member) \
2264 .size_##ib_struct = \
2265 (sizeof(struct drv_struct) + \
2266 BUILD_BUG_ON_ZERO(offsetof(struct drv_struct, member)) + \
2267 BUILD_BUG_ON_ZERO( \
2268 !__same_type(((struct drv_struct *)NULL)->member, \
2269 struct ib_struct)))
2270
2271#define rdma_zalloc_drv_obj_gfp(ib_dev, ib_type, gfp) \
2272 ((struct ib_type *)kzalloc(ib_dev->ops.size_##ib_type, gfp))
2273
2274#define rdma_zalloc_drv_obj(ib_dev, ib_type) \
2275 rdma_zalloc_drv_obj_gfp(ib_dev, ib_type, GFP_KERNEL)
2276
2277#define DECLARE_RDMA_OBJ_SIZE(ib_struct) size_t size_##ib_struct
2278
2279struct rdma_user_mmap_entry {
2280 struct kref ref;
2281 struct ib_ucontext *ucontext;
2282 unsigned long start_pgoff;
2283 size_t npages;
2284 bool driver_removed;
2285};
2286
2287
2288static inline u64
2289rdma_user_mmap_get_offset(const struct rdma_user_mmap_entry *entry)
2290{
2291 return (u64)entry->start_pgoff << PAGE_SHIFT;
2292}
2293
2294
2295
2296
2297
2298
2299struct ib_device_ops {
2300 struct module *owner;
2301 enum rdma_driver_id driver_id;
2302 u32 uverbs_abi_ver;
2303 unsigned int uverbs_no_driver_id_binding:1;
2304
2305
2306
2307
2308
2309
2310 const struct attribute_group *device_group;
2311 const struct attribute_group **port_groups;
2312
2313 int (*post_send)(struct ib_qp *qp, const struct ib_send_wr *send_wr,
2314 const struct ib_send_wr **bad_send_wr);
2315 int (*post_recv)(struct ib_qp *qp, const struct ib_recv_wr *recv_wr,
2316 const struct ib_recv_wr **bad_recv_wr);
2317 void (*drain_rq)(struct ib_qp *qp);
2318 void (*drain_sq)(struct ib_qp *qp);
2319 int (*poll_cq)(struct ib_cq *cq, int num_entries, struct ib_wc *wc);
2320 int (*peek_cq)(struct ib_cq *cq, int wc_cnt);
2321 int (*req_notify_cq)(struct ib_cq *cq, enum ib_cq_notify_flags flags);
2322 int (*post_srq_recv)(struct ib_srq *srq,
2323 const struct ib_recv_wr *recv_wr,
2324 const struct ib_recv_wr **bad_recv_wr);
2325 int (*process_mad)(struct ib_device *device, int process_mad_flags,
2326 u32 port_num, const struct ib_wc *in_wc,
2327 const struct ib_grh *in_grh,
2328 const struct ib_mad *in_mad, struct ib_mad *out_mad,
2329 size_t *out_mad_size, u16 *out_mad_pkey_index);
2330 int (*query_device)(struct ib_device *device,
2331 struct ib_device_attr *device_attr,
2332 struct ib_udata *udata);
2333 int (*modify_device)(struct ib_device *device, int device_modify_mask,
2334 struct ib_device_modify *device_modify);
2335 void (*get_dev_fw_str)(struct ib_device *device, char *str);
2336 const struct cpumask *(*get_vector_affinity)(struct ib_device *ibdev,
2337 int comp_vector);
2338 int (*query_port)(struct ib_device *device, u32 port_num,
2339 struct ib_port_attr *port_attr);
2340 int (*modify_port)(struct ib_device *device, u32 port_num,
2341 int port_modify_mask,
2342 struct ib_port_modify *port_modify);
2343
2344
2345
2346
2347
2348
2349 int (*get_port_immutable)(struct ib_device *device, u32 port_num,
2350 struct ib_port_immutable *immutable);
2351 enum rdma_link_layer (*get_link_layer)(struct ib_device *device,
2352 u32 port_num);
2353
2354
2355
2356
2357
2358
2359
2360
2361 struct net_device *(*get_netdev)(struct ib_device *device,
2362 u32 port_num);
2363
2364
2365
2366
2367
2368
2369 struct net_device *(*alloc_rdma_netdev)(
2370 struct ib_device *device, u32 port_num, enum rdma_netdev_t type,
2371 const char *name, unsigned char name_assign_type,
2372 void (*setup)(struct net_device *));
2373
2374 int (*rdma_netdev_get_params)(struct ib_device *device, u32 port_num,
2375 enum rdma_netdev_t type,
2376 struct rdma_netdev_alloc_params *params);
2377
2378
2379
2380
2381
2382 int (*query_gid)(struct ib_device *device, u32 port_num, int index,
2383 union ib_gid *gid);
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394
2395
2396
2397 int (*add_gid)(const struct ib_gid_attr *attr, void **context);
2398
2399
2400
2401
2402
2403
2404
2405
2406 int (*del_gid)(const struct ib_gid_attr *attr, void **context);
2407 int (*query_pkey)(struct ib_device *device, u32 port_num, u16 index,
2408 u16 *pkey);
2409 int (*alloc_ucontext)(struct ib_ucontext *context,
2410 struct ib_udata *udata);
2411 void (*dealloc_ucontext)(struct ib_ucontext *context);
2412 int (*mmap)(struct ib_ucontext *context, struct vm_area_struct *vma);
2413
2414
2415
2416
2417
2418
2419 void (*mmap_free)(struct rdma_user_mmap_entry *entry);
2420 void (*disassociate_ucontext)(struct ib_ucontext *ibcontext);
2421 int (*alloc_pd)(struct ib_pd *pd, struct ib_udata *udata);
2422 int (*dealloc_pd)(struct ib_pd *pd, struct ib_udata *udata);
2423 int (*create_ah)(struct ib_ah *ah, struct rdma_ah_init_attr *attr,
2424 struct ib_udata *udata);
2425 int (*create_user_ah)(struct ib_ah *ah, struct rdma_ah_init_attr *attr,
2426 struct ib_udata *udata);
2427 int (*modify_ah)(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
2428 int (*query_ah)(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
2429 int (*destroy_ah)(struct ib_ah *ah, u32 flags);
2430 int (*create_srq)(struct ib_srq *srq,
2431 struct ib_srq_init_attr *srq_init_attr,
2432 struct ib_udata *udata);
2433 int (*modify_srq)(struct ib_srq *srq, struct ib_srq_attr *srq_attr,
2434 enum ib_srq_attr_mask srq_attr_mask,
2435 struct ib_udata *udata);
2436 int (*query_srq)(struct ib_srq *srq, struct ib_srq_attr *srq_attr);
2437 int (*destroy_srq)(struct ib_srq *srq, struct ib_udata *udata);
2438 struct ib_qp *(*create_qp)(struct ib_pd *pd,
2439 struct ib_qp_init_attr *qp_init_attr,
2440 struct ib_udata *udata);
2441 int (*modify_qp)(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
2442 int qp_attr_mask, struct ib_udata *udata);
2443 int (*query_qp)(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
2444 int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr);
2445 int (*destroy_qp)(struct ib_qp *qp, struct ib_udata *udata);
2446 int (*create_cq)(struct ib_cq *cq, const struct ib_cq_init_attr *attr,
2447 struct ib_udata *udata);
2448 int (*modify_cq)(struct ib_cq *cq, u16 cq_count, u16 cq_period);
2449 int (*destroy_cq)(struct ib_cq *cq, struct ib_udata *udata);
2450 int (*resize_cq)(struct ib_cq *cq, int cqe, struct ib_udata *udata);
2451 struct ib_mr *(*get_dma_mr)(struct ib_pd *pd, int mr_access_flags);
2452 struct ib_mr *(*reg_user_mr)(struct ib_pd *pd, u64 start, u64 length,
2453 u64 virt_addr, int mr_access_flags,
2454 struct ib_udata *udata);
2455 struct ib_mr *(*reg_user_mr_dmabuf)(struct ib_pd *pd, u64 offset,
2456 u64 length, u64 virt_addr, int fd,
2457 int mr_access_flags,
2458 struct ib_udata *udata);
2459 struct ib_mr *(*rereg_user_mr)(struct ib_mr *mr, int flags, u64 start,
2460 u64 length, u64 virt_addr,
2461 int mr_access_flags, struct ib_pd *pd,
2462 struct ib_udata *udata);
2463 int (*dereg_mr)(struct ib_mr *mr, struct ib_udata *udata);
2464 struct ib_mr *(*alloc_mr)(struct ib_pd *pd, enum ib_mr_type mr_type,
2465 u32 max_num_sg);
2466 struct ib_mr *(*alloc_mr_integrity)(struct ib_pd *pd,
2467 u32 max_num_data_sg,
2468 u32 max_num_meta_sg);
2469 int (*advise_mr)(struct ib_pd *pd,
2470 enum ib_uverbs_advise_mr_advice advice, u32 flags,
2471 struct ib_sge *sg_list, u32 num_sge,
2472 struct uverbs_attr_bundle *attrs);
2473
2474
2475
2476
2477
2478
2479
2480
2481 int (*map_mr_sg)(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
2482 unsigned int *sg_offset);
2483 int (*check_mr_status)(struct ib_mr *mr, u32 check_mask,
2484 struct ib_mr_status *mr_status);
2485 int (*alloc_mw)(struct ib_mw *mw, struct ib_udata *udata);
2486 int (*dealloc_mw)(struct ib_mw *mw);
2487 int (*attach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid);
2488 int (*detach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid);
2489 int (*alloc_xrcd)(struct ib_xrcd *xrcd, struct ib_udata *udata);
2490 int (*dealloc_xrcd)(struct ib_xrcd *xrcd, struct ib_udata *udata);
2491 struct ib_flow *(*create_flow)(struct ib_qp *qp,
2492 struct ib_flow_attr *flow_attr,
2493 struct ib_udata *udata);
2494 int (*destroy_flow)(struct ib_flow *flow_id);
2495 struct ib_flow_action *(*create_flow_action_esp)(
2496 struct ib_device *device,
2497 const struct ib_flow_action_attrs_esp *attr,
2498 struct uverbs_attr_bundle *attrs);
2499 int (*destroy_flow_action)(struct ib_flow_action *action);
2500 int (*modify_flow_action_esp)(
2501 struct ib_flow_action *action,
2502 const struct ib_flow_action_attrs_esp *attr,
2503 struct uverbs_attr_bundle *attrs);
2504 int (*set_vf_link_state)(struct ib_device *device, int vf, u32 port,
2505 int state);
2506 int (*get_vf_config)(struct ib_device *device, int vf, u32 port,
2507 struct ifla_vf_info *ivf);
2508 int (*get_vf_stats)(struct ib_device *device, int vf, u32 port,
2509 struct ifla_vf_stats *stats);
2510 int (*get_vf_guid)(struct ib_device *device, int vf, u32 port,
2511 struct ifla_vf_guid *node_guid,
2512 struct ifla_vf_guid *port_guid);
2513 int (*set_vf_guid)(struct ib_device *device, int vf, u32 port, u64 guid,
2514 int type);
2515 struct ib_wq *(*create_wq)(struct ib_pd *pd,
2516 struct ib_wq_init_attr *init_attr,
2517 struct ib_udata *udata);
2518 int (*destroy_wq)(struct ib_wq *wq, struct ib_udata *udata);
2519 int (*modify_wq)(struct ib_wq *wq, struct ib_wq_attr *attr,
2520 u32 wq_attr_mask, struct ib_udata *udata);
2521 int (*create_rwq_ind_table)(struct ib_rwq_ind_table *ib_rwq_ind_table,
2522 struct ib_rwq_ind_table_init_attr *init_attr,
2523 struct ib_udata *udata);
2524 int (*destroy_rwq_ind_table)(struct ib_rwq_ind_table *wq_ind_table);
2525 struct ib_dm *(*alloc_dm)(struct ib_device *device,
2526 struct ib_ucontext *context,
2527 struct ib_dm_alloc_attr *attr,
2528 struct uverbs_attr_bundle *attrs);
2529 int (*dealloc_dm)(struct ib_dm *dm, struct uverbs_attr_bundle *attrs);
2530 struct ib_mr *(*reg_dm_mr)(struct ib_pd *pd, struct ib_dm *dm,
2531 struct ib_dm_mr_attr *attr,
2532 struct uverbs_attr_bundle *attrs);
2533 int (*create_counters)(struct ib_counters *counters,
2534 struct uverbs_attr_bundle *attrs);
2535 int (*destroy_counters)(struct ib_counters *counters);
2536 int (*read_counters)(struct ib_counters *counters,
2537 struct ib_counters_read_attr *counters_read_attr,
2538 struct uverbs_attr_bundle *attrs);
2539 int (*map_mr_sg_pi)(struct ib_mr *mr, struct scatterlist *data_sg,
2540 int data_sg_nents, unsigned int *data_sg_offset,
2541 struct scatterlist *meta_sg, int meta_sg_nents,
2542 unsigned int *meta_sg_offset);
2543
2544
2545
2546
2547
2548
2549
2550 struct rdma_hw_stats *(*alloc_hw_device_stats)(struct ib_device *device);
2551 struct rdma_hw_stats *(*alloc_hw_port_stats)(struct ib_device *device,
2552 u32 port_num);
2553
2554
2555
2556
2557
2558
2559
2560
2561
2562
2563
2564
2565 int (*get_hw_stats)(struct ib_device *device,
2566 struct rdma_hw_stats *stats, u32 port, int index);
2567
2568
2569
2570
2571 int (*fill_res_mr_entry)(struct sk_buff *msg, struct ib_mr *ibmr);
2572 int (*fill_res_mr_entry_raw)(struct sk_buff *msg, struct ib_mr *ibmr);
2573 int (*fill_res_cq_entry)(struct sk_buff *msg, struct ib_cq *ibcq);
2574 int (*fill_res_cq_entry_raw)(struct sk_buff *msg, struct ib_cq *ibcq);
2575 int (*fill_res_qp_entry)(struct sk_buff *msg, struct ib_qp *ibqp);
2576 int (*fill_res_qp_entry_raw)(struct sk_buff *msg, struct ib_qp *ibqp);
2577 int (*fill_res_cm_id_entry)(struct sk_buff *msg, struct rdma_cm_id *id);
2578
2579
2580
2581
2582
2583
2584 int (*enable_driver)(struct ib_device *dev);
2585
2586
2587
2588 void (*dealloc_driver)(struct ib_device *dev);
2589
2590
2591 void (*iw_add_ref)(struct ib_qp *qp);
2592 void (*iw_rem_ref)(struct ib_qp *qp);
2593 struct ib_qp *(*iw_get_qp)(struct ib_device *device, int qpn);
2594 int (*iw_connect)(struct iw_cm_id *cm_id,
2595 struct iw_cm_conn_param *conn_param);
2596 int (*iw_accept)(struct iw_cm_id *cm_id,
2597 struct iw_cm_conn_param *conn_param);
2598 int (*iw_reject)(struct iw_cm_id *cm_id, const void *pdata,
2599 u8 pdata_len);
2600 int (*iw_create_listen)(struct iw_cm_id *cm_id, int backlog);
2601 int (*iw_destroy_listen)(struct iw_cm_id *cm_id);
2602
2603
2604
2605
2606
2607 int (*counter_bind_qp)(struct rdma_counter *counter, struct ib_qp *qp);
2608
2609
2610
2611
2612 int (*counter_unbind_qp)(struct ib_qp *qp);
2613
2614
2615
2616 int (*counter_dealloc)(struct rdma_counter *counter);
2617
2618
2619
2620
2621 struct rdma_hw_stats *(*counter_alloc_stats)(
2622 struct rdma_counter *counter);
2623
2624
2625
2626 int (*counter_update_stats)(struct rdma_counter *counter);
2627
2628
2629
2630
2631
2632 int (*fill_stat_mr_entry)(struct sk_buff *msg, struct ib_mr *ibmr);
2633
2634
2635 int (*query_ucontext)(struct ib_ucontext *context,
2636 struct uverbs_attr_bundle *attrs);
2637
2638 DECLARE_RDMA_OBJ_SIZE(ib_ah);
2639 DECLARE_RDMA_OBJ_SIZE(ib_counters);
2640 DECLARE_RDMA_OBJ_SIZE(ib_cq);
2641 DECLARE_RDMA_OBJ_SIZE(ib_mw);
2642 DECLARE_RDMA_OBJ_SIZE(ib_pd);
2643 DECLARE_RDMA_OBJ_SIZE(ib_rwq_ind_table);
2644 DECLARE_RDMA_OBJ_SIZE(ib_srq);
2645 DECLARE_RDMA_OBJ_SIZE(ib_ucontext);
2646 DECLARE_RDMA_OBJ_SIZE(ib_xrcd);
2647};
2648
2649struct ib_core_device {
2650
2651
2652
2653 struct device dev;
2654 possible_net_t rdma_net;
2655 struct kobject *ports_kobj;
2656 struct list_head port_list;
2657 struct ib_device *owner;
2658};
2659
2660struct rdma_restrack_root;
2661struct ib_device {
2662
2663 struct device *dma_device;
2664 struct ib_device_ops ops;
2665 char name[IB_DEVICE_NAME_MAX];
2666 struct rcu_head rcu_head;
2667
2668 struct list_head event_handler_list;
2669
2670 struct rw_semaphore event_handler_rwsem;
2671
2672
2673 spinlock_t qp_open_list_lock;
2674
2675 struct rw_semaphore client_data_rwsem;
2676 struct xarray client_data;
2677 struct mutex unregistration_lock;
2678
2679
2680 rwlock_t cache_lock;
2681
2682
2683
2684 struct ib_port_data *port_data;
2685
2686 int num_comp_vectors;
2687
2688 union {
2689 struct device dev;
2690 struct ib_core_device coredev;
2691 };
2692
2693
2694
2695
2696
2697
2698 const struct attribute_group *groups[4];
2699
2700 u64 uverbs_cmd_mask;
2701
2702 char node_desc[IB_DEVICE_NODE_DESC_MAX];
2703 __be64 node_guid;
2704 u32 local_dma_lkey;
2705 u16 is_switch:1;
2706
2707 u16 kverbs_provider:1;
2708
2709 u16 use_cq_dim:1;
2710 u8 node_type;
2711 u32 phys_port_cnt;
2712 struct ib_device_attr attrs;
2713 struct hw_stats_device_data *hw_stats_data;
2714
2715#ifdef CONFIG_CGROUP_RDMA
2716 struct rdmacg_device cg_device;
2717#endif
2718
2719 u32 index;
2720
2721 spinlock_t cq_pools_lock;
2722 struct list_head cq_pools[IB_POLL_LAST_POOL_TYPE + 1];
2723
2724 struct rdma_restrack_root *res;
2725
2726 const struct uapi_definition *driver_def;
2727
2728
2729
2730
2731
2732 refcount_t refcount;
2733 struct completion unreg_completion;
2734 struct work_struct unregistration_work;
2735
2736 const struct rdma_link_ops *link_ops;
2737
2738
2739 struct mutex compat_devs_mutex;
2740
2741 struct xarray compat_devs;
2742
2743
2744 char iw_ifname[IFNAMSIZ];
2745 u32 iw_driver_flags;
2746 u32 lag_flags;
2747};
2748
2749struct ib_client_nl_info;
2750struct ib_client {
2751 const char *name;
2752 int (*add)(struct ib_device *ibdev);
2753 void (*remove)(struct ib_device *, void *client_data);
2754 void (*rename)(struct ib_device *dev, void *client_data);
2755 int (*get_nl_info)(struct ib_device *ibdev, void *client_data,
2756 struct ib_client_nl_info *res);
2757 int (*get_global_nl_info)(struct ib_client_nl_info *res);
2758
2759
2760
2761
2762
2763
2764
2765
2766
2767
2768
2769
2770
2771
2772
2773
2774 struct net_device *(*get_net_dev_by_params)(
2775 struct ib_device *dev,
2776 u32 port,
2777 u16 pkey,
2778 const union ib_gid *gid,
2779 const struct sockaddr *addr,
2780 void *client_data);
2781
2782 refcount_t uses;
2783 struct completion uses_zero;
2784 u32 client_id;
2785
2786
2787 u8 no_kverbs_req:1;
2788};
2789
2790
2791
2792
2793
2794
2795
2796struct ib_block_iter {
2797
2798 struct scatterlist *__sg;
2799 dma_addr_t __dma_addr;
2800 unsigned int __sg_nents;
2801 unsigned int __sg_advance;
2802 unsigned int __pg_bit;
2803};
2804
2805struct ib_device *_ib_alloc_device(size_t size);
2806#define ib_alloc_device(drv_struct, member) \
2807 container_of(_ib_alloc_device(sizeof(struct drv_struct) + \
2808 BUILD_BUG_ON_ZERO(offsetof( \
2809 struct drv_struct, member))), \
2810 struct drv_struct, member)
2811
2812void ib_dealloc_device(struct ib_device *device);
2813
2814void ib_get_device_fw_str(struct ib_device *device, char *str);
2815
2816int ib_register_device(struct ib_device *device, const char *name,
2817 struct device *dma_device);
2818void ib_unregister_device(struct ib_device *device);
2819void ib_unregister_driver(enum rdma_driver_id driver_id);
2820void ib_unregister_device_and_put(struct ib_device *device);
2821void ib_unregister_device_queued(struct ib_device *ib_dev);
2822
2823int ib_register_client (struct ib_client *client);
2824void ib_unregister_client(struct ib_client *client);
2825
2826void __rdma_block_iter_start(struct ib_block_iter *biter,
2827 struct scatterlist *sglist,
2828 unsigned int nents,
2829 unsigned long pgsz);
2830bool __rdma_block_iter_next(struct ib_block_iter *biter);
2831
2832
2833
2834
2835
2836
2837static inline dma_addr_t
2838rdma_block_iter_dma_address(struct ib_block_iter *biter)
2839{
2840 return biter->__dma_addr & ~(BIT_ULL(biter->__pg_bit) - 1);
2841}
2842
2843
2844
2845
2846
2847
2848
2849
2850
2851
2852
2853#define rdma_for_each_block(sglist, biter, nents, pgsz) \
2854 for (__rdma_block_iter_start(biter, sglist, nents, \
2855 pgsz); \
2856 __rdma_block_iter_next(biter);)
2857
2858
2859
2860
2861
2862
2863
2864
2865
2866
2867
2868static inline void *ib_get_client_data(struct ib_device *device,
2869 struct ib_client *client)
2870{
2871 return xa_load(&device->client_data, client->client_id);
2872}
2873void ib_set_client_data(struct ib_device *device, struct ib_client *client,
2874 void *data);
2875void ib_set_device_ops(struct ib_device *device,
2876 const struct ib_device_ops *ops);
2877
2878int rdma_user_mmap_io(struct ib_ucontext *ucontext, struct vm_area_struct *vma,
2879 unsigned long pfn, unsigned long size, pgprot_t prot,
2880 struct rdma_user_mmap_entry *entry);
2881int rdma_user_mmap_entry_insert(struct ib_ucontext *ucontext,
2882 struct rdma_user_mmap_entry *entry,
2883 size_t length);
2884int rdma_user_mmap_entry_insert_range(struct ib_ucontext *ucontext,
2885 struct rdma_user_mmap_entry *entry,
2886 size_t length, u32 min_pgoff,
2887 u32 max_pgoff);
2888
2889struct rdma_user_mmap_entry *
2890rdma_user_mmap_entry_get_pgoff(struct ib_ucontext *ucontext,
2891 unsigned long pgoff);
2892struct rdma_user_mmap_entry *
2893rdma_user_mmap_entry_get(struct ib_ucontext *ucontext,
2894 struct vm_area_struct *vma);
2895void rdma_user_mmap_entry_put(struct rdma_user_mmap_entry *entry);
2896
2897void rdma_user_mmap_entry_remove(struct rdma_user_mmap_entry *entry);
2898
2899static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len)
2900{
2901 return copy_from_user(dest, udata->inbuf, len) ? -EFAULT : 0;
2902}
2903
2904static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len)
2905{
2906 return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0;
2907}
2908
2909static inline bool ib_is_buffer_cleared(const void __user *p,
2910 size_t len)
2911{
2912 bool ret;
2913 u8 *buf;
2914
2915 if (len > USHRT_MAX)
2916 return false;
2917
2918 buf = memdup_user(p, len);
2919 if (IS_ERR(buf))
2920 return false;
2921
2922 ret = !memchr_inv(buf, 0, len);
2923 kfree(buf);
2924 return ret;
2925}
2926
2927static inline bool ib_is_udata_cleared(struct ib_udata *udata,
2928 size_t offset,
2929 size_t len)
2930{
2931 return ib_is_buffer_cleared(udata->inbuf + offset, len);
2932}
2933
2934
2935
2936
2937
2938
2939
2940
2941
2942
2943
2944
2945
2946
2947
2948
2949bool ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
2950 enum ib_qp_type type, enum ib_qp_attr_mask mask);
2951
2952void ib_register_event_handler(struct ib_event_handler *event_handler);
2953void ib_unregister_event_handler(struct ib_event_handler *event_handler);
2954void ib_dispatch_event(const struct ib_event *event);
2955
2956int ib_query_port(struct ib_device *device,
2957 u32 port_num, struct ib_port_attr *port_attr);
2958
2959enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device,
2960 u32 port_num);
2961
2962
2963
2964
2965
2966
2967
2968
2969
2970
2971static inline bool rdma_cap_ib_switch(const struct ib_device *device)
2972{
2973 return device->is_switch;
2974}
2975
2976
2977
2978
2979
2980
2981
2982
2983
2984static inline u32 rdma_start_port(const struct ib_device *device)
2985{
2986 return rdma_cap_ib_switch(device) ? 0 : 1;
2987}
2988
2989
2990
2991
2992
2993
2994#define rdma_for_each_port(device, iter) \
2995 for (iter = rdma_start_port(device + \
2996 BUILD_BUG_ON_ZERO(!__same_type(u32, \
2997 iter))); \
2998 iter <= rdma_end_port(device); iter++)
2999
3000
3001
3002
3003
3004
3005
3006
3007
3008static inline u32 rdma_end_port(const struct ib_device *device)
3009{
3010 return rdma_cap_ib_switch(device) ? 0 : device->phys_port_cnt;
3011}
3012
3013static inline int rdma_is_port_valid(const struct ib_device *device,
3014 unsigned int port)
3015{
3016 return (port >= rdma_start_port(device) &&
3017 port <= rdma_end_port(device));
3018}
3019
3020static inline bool rdma_is_grh_required(const struct ib_device *device,
3021 u32 port_num)
3022{
3023 return device->port_data[port_num].immutable.core_cap_flags &
3024 RDMA_CORE_PORT_IB_GRH_REQUIRED;
3025}
3026
3027static inline bool rdma_protocol_ib(const struct ib_device *device,
3028 u32 port_num)
3029{
3030 return device->port_data[port_num].immutable.core_cap_flags &
3031 RDMA_CORE_CAP_PROT_IB;
3032}
3033
3034static inline bool rdma_protocol_roce(const struct ib_device *device,
3035 u32 port_num)
3036{
3037 return device->port_data[port_num].immutable.core_cap_flags &
3038 (RDMA_CORE_CAP_PROT_ROCE | RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP);
3039}
3040
3041static inline bool rdma_protocol_roce_udp_encap(const struct ib_device *device,
3042 u32 port_num)
3043{
3044 return device->port_data[port_num].immutable.core_cap_flags &
3045 RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP;
3046}
3047
3048static inline bool rdma_protocol_roce_eth_encap(const struct ib_device *device,
3049 u32 port_num)
3050{
3051 return device->port_data[port_num].immutable.core_cap_flags &
3052 RDMA_CORE_CAP_PROT_ROCE;
3053}
3054
3055static inline bool rdma_protocol_iwarp(const struct ib_device *device,
3056 u32 port_num)
3057{
3058 return device->port_data[port_num].immutable.core_cap_flags &
3059 RDMA_CORE_CAP_PROT_IWARP;
3060}
3061
3062static inline bool rdma_ib_or_roce(const struct ib_device *device,
3063 u32 port_num)
3064{
3065 return rdma_protocol_ib(device, port_num) ||
3066 rdma_protocol_roce(device, port_num);
3067}
3068
3069static inline bool rdma_protocol_raw_packet(const struct ib_device *device,
3070 u32 port_num)
3071{
3072 return device->port_data[port_num].immutable.core_cap_flags &
3073 RDMA_CORE_CAP_PROT_RAW_PACKET;
3074}
3075
3076static inline bool rdma_protocol_usnic(const struct ib_device *device,
3077 u32 port_num)
3078{
3079 return device->port_data[port_num].immutable.core_cap_flags &
3080 RDMA_CORE_CAP_PROT_USNIC;
3081}
3082
3083
3084
3085
3086
3087
3088
3089
3090
3091
3092
3093
3094
3095static inline bool rdma_cap_ib_mad(const struct ib_device *device, u32 port_num)
3096{
3097 return device->port_data[port_num].immutable.core_cap_flags &
3098 RDMA_CORE_CAP_IB_MAD;
3099}
3100
3101
3102
3103
3104
3105
3106
3107
3108
3109
3110
3111
3112
3113
3114
3115
3116
3117
3118
3119
3120static inline bool rdma_cap_opa_mad(struct ib_device *device, u32 port_num)
3121{
3122 return device->port_data[port_num].immutable.core_cap_flags &
3123 RDMA_CORE_CAP_OPA_MAD;
3124}
3125
3126
3127
3128
3129
3130
3131
3132
3133
3134
3135
3136
3137
3138
3139
3140
3141
3142
3143
3144
3145
3146static inline bool rdma_cap_ib_smi(const struct ib_device *device, u32 port_num)
3147{
3148 return device->port_data[port_num].immutable.core_cap_flags &
3149 RDMA_CORE_CAP_IB_SMI;
3150}
3151
3152
3153
3154
3155
3156
3157
3158
3159
3160
3161
3162
3163
3164
3165
3166
3167static inline bool rdma_cap_ib_cm(const struct ib_device *device, u32 port_num)
3168{
3169 return device->port_data[port_num].immutable.core_cap_flags &
3170 RDMA_CORE_CAP_IB_CM;
3171}
3172
3173
3174
3175
3176
3177
3178
3179
3180
3181
3182
3183
3184
3185static inline bool rdma_cap_iw_cm(const struct ib_device *device, u32 port_num)
3186{
3187 return device->port_data[port_num].immutable.core_cap_flags &
3188 RDMA_CORE_CAP_IW_CM;
3189}
3190
3191
3192
3193
3194
3195
3196
3197
3198
3199
3200
3201
3202
3203
3204
3205
3206static inline bool rdma_cap_ib_sa(const struct ib_device *device, u32 port_num)
3207{
3208 return device->port_data[port_num].immutable.core_cap_flags &
3209 RDMA_CORE_CAP_IB_SA;
3210}
3211
3212
3213
3214
3215
3216
3217
3218
3219
3220
3221
3222
3223
3224
3225
3226
3227
3228
3229static inline bool rdma_cap_ib_mcast(const struct ib_device *device,
3230 u32 port_num)
3231{
3232 return rdma_cap_ib_sa(device, port_num);
3233}
3234
3235
3236
3237
3238
3239
3240
3241
3242
3243
3244
3245
3246
3247
3248static inline bool rdma_cap_af_ib(const struct ib_device *device, u32 port_num)
3249{
3250 return device->port_data[port_num].immutable.core_cap_flags &
3251 RDMA_CORE_CAP_AF_IB;
3252}
3253
3254
3255
3256
3257
3258
3259
3260
3261
3262
3263
3264
3265
3266
3267
3268
3269
3270static inline bool rdma_cap_eth_ah(const struct ib_device *device, u32 port_num)
3271{
3272 return device->port_data[port_num].immutable.core_cap_flags &
3273 RDMA_CORE_CAP_ETH_AH;
3274}
3275
3276
3277
3278
3279
3280
3281
3282
3283
3284
3285static inline bool rdma_cap_opa_ah(struct ib_device *device, u32 port_num)
3286{
3287 return (device->port_data[port_num].immutable.core_cap_flags &
3288 RDMA_CORE_CAP_OPA_AH) == RDMA_CORE_CAP_OPA_AH;
3289}
3290
3291
3292
3293
3294
3295
3296
3297
3298
3299
3300
3301
3302
3303static inline size_t rdma_max_mad_size(const struct ib_device *device,
3304 u32 port_num)
3305{
3306 return device->port_data[port_num].immutable.max_mad_size;
3307}
3308
3309
3310
3311
3312
3313
3314
3315
3316
3317
3318
3319
3320
3321
3322static inline bool rdma_cap_roce_gid_table(const struct ib_device *device,
3323 u32 port_num)
3324{
3325 return rdma_protocol_roce(device, port_num) &&
3326 device->ops.add_gid && device->ops.del_gid;
3327}
3328
3329
3330
3331
3332static inline bool rdma_cap_read_inv(struct ib_device *dev, u32 port_num)
3333{
3334
3335
3336
3337
3338 return rdma_protocol_iwarp(dev, port_num);
3339}
3340
3341
3342
3343
3344
3345
3346
3347
3348static inline bool rdma_core_cap_opa_port(struct ib_device *device,
3349 u32 port_num)
3350{
3351 return (device->port_data[port_num].immutable.core_cap_flags &
3352 RDMA_CORE_PORT_INTEL_OPA) == RDMA_CORE_PORT_INTEL_OPA;
3353}
3354
3355
3356
3357
3358
3359
3360
3361
3362
3363
3364static inline int rdma_mtu_enum_to_int(struct ib_device *device, u32 port,
3365 int mtu)
3366{
3367 if (rdma_core_cap_opa_port(device, port))
3368 return opa_mtu_enum_to_int((enum opa_mtu)mtu);
3369 else
3370 return ib_mtu_enum_to_int((enum ib_mtu)mtu);
3371}
3372
3373
3374
3375
3376
3377
3378
3379
3380
3381static inline int rdma_mtu_from_attr(struct ib_device *device, u32 port,
3382 struct ib_port_attr *attr)
3383{
3384 if (rdma_core_cap_opa_port(device, port))
3385 return attr->phys_mtu;
3386 else
3387 return ib_mtu_enum_to_int(attr->max_mtu);
3388}
3389
3390int ib_set_vf_link_state(struct ib_device *device, int vf, u32 port,
3391 int state);
3392int ib_get_vf_config(struct ib_device *device, int vf, u32 port,
3393 struct ifla_vf_info *info);
3394int ib_get_vf_stats(struct ib_device *device, int vf, u32 port,
3395 struct ifla_vf_stats *stats);
3396int ib_get_vf_guid(struct ib_device *device, int vf, u32 port,
3397 struct ifla_vf_guid *node_guid,
3398 struct ifla_vf_guid *port_guid);
3399int ib_set_vf_guid(struct ib_device *device, int vf, u32 port, u64 guid,
3400 int type);
3401
3402int ib_query_pkey(struct ib_device *device,
3403 u32 port_num, u16 index, u16 *pkey);
3404
3405int ib_modify_device(struct ib_device *device,
3406 int device_modify_mask,
3407 struct ib_device_modify *device_modify);
3408
3409int ib_modify_port(struct ib_device *device,
3410 u32 port_num, int port_modify_mask,
3411 struct ib_port_modify *port_modify);
3412
3413int ib_find_gid(struct ib_device *device, union ib_gid *gid,
3414 u32 *port_num, u16 *index);
3415
3416int ib_find_pkey(struct ib_device *device,
3417 u32 port_num, u16 pkey, u16 *index);
3418
3419enum ib_pd_flags {
3420
3421
3422
3423
3424
3425
3426
3427
3428
3429 IB_PD_UNSAFE_GLOBAL_RKEY = 0x01,
3430};
3431
3432struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags,
3433 const char *caller);
3434
3435
3436
3437
3438
3439
3440
3441
3442
3443
3444
3445
3446#define ib_alloc_pd(device, flags) \
3447 __ib_alloc_pd((device), (flags), KBUILD_MODNAME)
3448
3449int ib_dealloc_pd_user(struct ib_pd *pd, struct ib_udata *udata);
3450
3451
3452
3453
3454
3455
3456
3457static inline void ib_dealloc_pd(struct ib_pd *pd)
3458{
3459 int ret = ib_dealloc_pd_user(pd, NULL);
3460
3461 WARN_ONCE(ret, "Destroy of kernel PD shouldn't fail");
3462}
3463
3464enum rdma_create_ah_flags {
3465
3466 RDMA_CREATE_AH_SLEEPABLE = BIT(0),
3467};
3468
3469
3470
3471
3472
3473
3474
3475
3476
3477
3478struct ib_ah *rdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr,
3479 u32 flags);
3480
3481
3482
3483
3484
3485
3486
3487
3488
3489
3490
3491
3492
3493struct ib_ah *rdma_create_user_ah(struct ib_pd *pd,
3494 struct rdma_ah_attr *ah_attr,
3495 struct ib_udata *udata);
3496
3497
3498
3499
3500
3501
3502
3503
3504int ib_get_gids_from_rdma_hdr(const union rdma_network_hdr *hdr,
3505 enum rdma_network_type net_type,
3506 union ib_gid *sgid, union ib_gid *dgid);
3507
3508
3509
3510
3511
3512int ib_get_rdma_header_version(const union rdma_network_hdr *hdr);
3513
3514
3515
3516
3517
3518
3519
3520
3521
3522
3523
3524
3525
3526
3527
3528
3529
3530
3531
3532int ib_init_ah_attr_from_wc(struct ib_device *device, u32 port_num,
3533 const struct ib_wc *wc, const struct ib_grh *grh,
3534 struct rdma_ah_attr *ah_attr);
3535
3536
3537
3538
3539
3540
3541
3542
3543
3544
3545
3546
3547
3548struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc,
3549 const struct ib_grh *grh, u32 port_num);
3550
3551
3552
3553
3554
3555
3556
3557
3558int rdma_modify_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
3559
3560
3561
3562
3563
3564
3565
3566
3567int rdma_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
3568
3569enum rdma_destroy_ah_flags {
3570
3571 RDMA_DESTROY_AH_SLEEPABLE = BIT(0),
3572};
3573
3574
3575
3576
3577
3578
3579
3580int rdma_destroy_ah_user(struct ib_ah *ah, u32 flags, struct ib_udata *udata);
3581
3582
3583
3584
3585
3586
3587
3588
3589static inline void rdma_destroy_ah(struct ib_ah *ah, u32 flags)
3590{
3591 int ret = rdma_destroy_ah_user(ah, flags, NULL);
3592
3593 WARN_ONCE(ret, "Destroy of kernel AH shouldn't fail");
3594}
3595
3596struct ib_srq *ib_create_srq_user(struct ib_pd *pd,
3597 struct ib_srq_init_attr *srq_init_attr,
3598 struct ib_usrq_object *uobject,
3599 struct ib_udata *udata);
3600static inline struct ib_srq *
3601ib_create_srq(struct ib_pd *pd, struct ib_srq_init_attr *srq_init_attr)
3602{
3603 if (!pd->device->ops.create_srq)
3604 return ERR_PTR(-EOPNOTSUPP);
3605
3606 return ib_create_srq_user(pd, srq_init_attr, NULL, NULL);
3607}
3608
3609
3610
3611
3612
3613
3614
3615
3616
3617
3618
3619
3620
3621int ib_modify_srq(struct ib_srq *srq,
3622 struct ib_srq_attr *srq_attr,
3623 enum ib_srq_attr_mask srq_attr_mask);
3624
3625
3626
3627
3628
3629
3630
3631int ib_query_srq(struct ib_srq *srq,
3632 struct ib_srq_attr *srq_attr);
3633
3634
3635
3636
3637
3638
3639int ib_destroy_srq_user(struct ib_srq *srq, struct ib_udata *udata);
3640
3641
3642
3643
3644
3645
3646
3647static inline void ib_destroy_srq(struct ib_srq *srq)
3648{
3649 int ret = ib_destroy_srq_user(srq, NULL);
3650
3651 WARN_ONCE(ret, "Destroy of kernel SRQ shouldn't fail");
3652}
3653
3654
3655
3656
3657
3658
3659
3660
3661static inline int ib_post_srq_recv(struct ib_srq *srq,
3662 const struct ib_recv_wr *recv_wr,
3663 const struct ib_recv_wr **bad_recv_wr)
3664{
3665 const struct ib_recv_wr *dummy;
3666
3667 return srq->device->ops.post_srq_recv(srq, recv_wr,
3668 bad_recv_wr ? : &dummy);
3669}
3670
3671struct ib_qp *ib_create_named_qp(struct ib_pd *pd,
3672 struct ib_qp_init_attr *qp_init_attr,
3673 const char *caller);
3674static inline struct ib_qp *ib_create_qp(struct ib_pd *pd,
3675 struct ib_qp_init_attr *init_attr)
3676{
3677 return ib_create_named_qp(pd, init_attr, KBUILD_MODNAME);
3678}
3679
3680
3681
3682
3683
3684
3685
3686
3687
3688
3689
3690
3691int ib_modify_qp_with_udata(struct ib_qp *qp,
3692 struct ib_qp_attr *attr,
3693 int attr_mask,
3694 struct ib_udata *udata);
3695
3696
3697
3698
3699
3700
3701
3702
3703
3704
3705int ib_modify_qp(struct ib_qp *qp,
3706 struct ib_qp_attr *qp_attr,
3707 int qp_attr_mask);
3708
3709
3710
3711
3712
3713
3714
3715
3716
3717
3718
3719
3720int ib_query_qp(struct ib_qp *qp,
3721 struct ib_qp_attr *qp_attr,
3722 int qp_attr_mask,
3723 struct ib_qp_init_attr *qp_init_attr);
3724
3725
3726
3727
3728
3729
3730int ib_destroy_qp_user(struct ib_qp *qp, struct ib_udata *udata);
3731
3732
3733
3734
3735
3736
3737
3738static inline int ib_destroy_qp(struct ib_qp *qp)
3739{
3740 return ib_destroy_qp_user(qp, NULL);
3741}
3742
3743
3744
3745
3746
3747
3748
3749
3750struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd,
3751 struct ib_qp_open_attr *qp_open_attr);
3752
3753
3754
3755
3756
3757
3758
3759
3760int ib_close_qp(struct ib_qp *qp);
3761
3762
3763
3764
3765
3766
3767
3768
3769
3770
3771
3772
3773
3774
3775static inline int ib_post_send(struct ib_qp *qp,
3776 const struct ib_send_wr *send_wr,
3777 const struct ib_send_wr **bad_send_wr)
3778{
3779 const struct ib_send_wr *dummy;
3780
3781 return qp->device->ops.post_send(qp, send_wr, bad_send_wr ? : &dummy);
3782}
3783
3784
3785
3786
3787
3788
3789
3790
3791
3792static inline int ib_post_recv(struct ib_qp *qp,
3793 const struct ib_recv_wr *recv_wr,
3794 const struct ib_recv_wr **bad_recv_wr)
3795{
3796 const struct ib_recv_wr *dummy;
3797
3798 return qp->device->ops.post_recv(qp, recv_wr, bad_recv_wr ? : &dummy);
3799}
3800
3801struct ib_cq *__ib_alloc_cq(struct ib_device *dev, void *private, int nr_cqe,
3802 int comp_vector, enum ib_poll_context poll_ctx,
3803 const char *caller);
3804static inline struct ib_cq *ib_alloc_cq(struct ib_device *dev, void *private,
3805 int nr_cqe, int comp_vector,
3806 enum ib_poll_context poll_ctx)
3807{
3808 return __ib_alloc_cq(dev, private, nr_cqe, comp_vector, poll_ctx,
3809 KBUILD_MODNAME);
3810}
3811
3812struct ib_cq *__ib_alloc_cq_any(struct ib_device *dev, void *private,
3813 int nr_cqe, enum ib_poll_context poll_ctx,
3814 const char *caller);
3815
3816
3817
3818
3819
3820
3821
3822
3823static inline struct ib_cq *ib_alloc_cq_any(struct ib_device *dev,
3824 void *private, int nr_cqe,
3825 enum ib_poll_context poll_ctx)
3826{
3827 return __ib_alloc_cq_any(dev, private, nr_cqe, poll_ctx,
3828 KBUILD_MODNAME);
3829}
3830
3831void ib_free_cq(struct ib_cq *cq);
3832int ib_process_cq_direct(struct ib_cq *cq, int budget);
3833
3834
3835
3836
3837
3838
3839
3840
3841
3842
3843
3844
3845
3846
3847struct ib_cq *__ib_create_cq(struct ib_device *device,
3848 ib_comp_handler comp_handler,
3849 void (*event_handler)(struct ib_event *, void *),
3850 void *cq_context,
3851 const struct ib_cq_init_attr *cq_attr,
3852 const char *caller);
3853#define ib_create_cq(device, cmp_hndlr, evt_hndlr, cq_ctxt, cq_attr) \
3854 __ib_create_cq((device), (cmp_hndlr), (evt_hndlr), (cq_ctxt), (cq_attr), KBUILD_MODNAME)
3855
3856
3857
3858
3859
3860
3861
3862
3863int ib_resize_cq(struct ib_cq *cq, int cqe);
3864
3865
3866
3867
3868
3869
3870
3871
3872int rdma_set_cq_moderation(struct ib_cq *cq, u16 cq_count, u16 cq_period);
3873
3874
3875
3876
3877
3878
3879int ib_destroy_cq_user(struct ib_cq *cq, struct ib_udata *udata);
3880
3881
3882
3883
3884
3885
3886
3887static inline void ib_destroy_cq(struct ib_cq *cq)
3888{
3889 int ret = ib_destroy_cq_user(cq, NULL);
3890
3891 WARN_ONCE(ret, "Destroy of kernel CQ shouldn't fail");
3892}
3893
3894
3895
3896
3897
3898
3899
3900
3901
3902
3903
3904
3905
3906static inline int ib_poll_cq(struct ib_cq *cq, int num_entries,
3907 struct ib_wc *wc)
3908{
3909 return cq->device->ops.poll_cq(cq, num_entries, wc);
3910}
3911
3912
3913
3914
3915
3916
3917
3918
3919
3920
3921
3922
3923
3924
3925
3926
3927
3928
3929
3930
3931
3932
3933
3934
3935
3936
3937
3938
3939static inline int ib_req_notify_cq(struct ib_cq *cq,
3940 enum ib_cq_notify_flags flags)
3941{
3942 return cq->device->ops.req_notify_cq(cq, flags);
3943}
3944
3945struct ib_cq *ib_cq_pool_get(struct ib_device *dev, unsigned int nr_cqe,
3946 int comp_vector_hint,
3947 enum ib_poll_context poll_ctx);
3948
3949void ib_cq_pool_put(struct ib_cq *cq, unsigned int nr_cqe);
3950
3951
3952
3953
3954
3955
3956static inline bool ib_uses_virt_dma(struct ib_device *dev)
3957{
3958 return IS_ENABLED(CONFIG_INFINIBAND_VIRT_DMA) && !dev->dma_device;
3959}
3960
3961
3962
3963
3964
3965
3966static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
3967{
3968 if (ib_uses_virt_dma(dev))
3969 return 0;
3970 return dma_mapping_error(dev->dma_device, dma_addr);
3971}
3972
3973
3974
3975
3976
3977
3978
3979
3980static inline u64 ib_dma_map_single(struct ib_device *dev,
3981 void *cpu_addr, size_t size,
3982 enum dma_data_direction direction)
3983{
3984 if (ib_uses_virt_dma(dev))
3985 return (uintptr_t)cpu_addr;
3986 return dma_map_single(dev->dma_device, cpu_addr, size, direction);
3987}
3988
3989
3990
3991
3992
3993
3994
3995
3996static inline void ib_dma_unmap_single(struct ib_device *dev,
3997 u64 addr, size_t size,
3998 enum dma_data_direction direction)
3999{
4000 if (!ib_uses_virt_dma(dev))
4001 dma_unmap_single(dev->dma_device, addr, size, direction);
4002}
4003
4004
4005
4006
4007
4008
4009
4010
4011
4012static inline u64 ib_dma_map_page(struct ib_device *dev,
4013 struct page *page,
4014 unsigned long offset,
4015 size_t size,
4016 enum dma_data_direction direction)
4017{
4018 if (ib_uses_virt_dma(dev))
4019 return (uintptr_t)(page_address(page) + offset);
4020 return dma_map_page(dev->dma_device, page, offset, size, direction);
4021}
4022
4023
4024
4025
4026
4027
4028
4029
4030static inline void ib_dma_unmap_page(struct ib_device *dev,
4031 u64 addr, size_t size,
4032 enum dma_data_direction direction)
4033{
4034 if (!ib_uses_virt_dma(dev))
4035 dma_unmap_page(dev->dma_device, addr, size, direction);
4036}
4037
4038int ib_dma_virt_map_sg(struct ib_device *dev, struct scatterlist *sg, int nents);
4039static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
4040 struct scatterlist *sg, int nents,
4041 enum dma_data_direction direction,
4042 unsigned long dma_attrs)
4043{
4044 if (ib_uses_virt_dma(dev))
4045 return ib_dma_virt_map_sg(dev, sg, nents);
4046 return dma_map_sg_attrs(dev->dma_device, sg, nents, direction,
4047 dma_attrs);
4048}
4049
4050static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
4051 struct scatterlist *sg, int nents,
4052 enum dma_data_direction direction,
4053 unsigned long dma_attrs)
4054{
4055 if (!ib_uses_virt_dma(dev))
4056 dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction,
4057 dma_attrs);
4058}
4059
4060
4061
4062
4063
4064
4065
4066
4067static inline int ib_dma_map_sg(struct ib_device *dev,
4068 struct scatterlist *sg, int nents,
4069 enum dma_data_direction direction)
4070{
4071 return ib_dma_map_sg_attrs(dev, sg, nents, direction, 0);
4072}
4073
4074
4075
4076
4077
4078
4079
4080
4081static inline void ib_dma_unmap_sg(struct ib_device *dev,
4082 struct scatterlist *sg, int nents,
4083 enum dma_data_direction direction)
4084{
4085 ib_dma_unmap_sg_attrs(dev, sg, nents, direction, 0);
4086}
4087
4088
4089
4090
4091
4092
4093
4094static inline unsigned int ib_dma_max_seg_size(struct ib_device *dev)
4095{
4096 if (ib_uses_virt_dma(dev))
4097 return UINT_MAX;
4098 return dma_get_max_seg_size(dev->dma_device);
4099}
4100
4101
4102
4103
4104
4105
4106
4107
4108static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev,
4109 u64 addr,
4110 size_t size,
4111 enum dma_data_direction dir)
4112{
4113 if (!ib_uses_virt_dma(dev))
4114 dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
4115}
4116
4117
4118
4119
4120
4121
4122
4123
4124static inline void ib_dma_sync_single_for_device(struct ib_device *dev,
4125 u64 addr,
4126 size_t size,
4127 enum dma_data_direction dir)
4128{
4129 if (!ib_uses_virt_dma(dev))
4130 dma_sync_single_for_device(dev->dma_device, addr, size, dir);
4131}
4132
4133
4134
4135
4136struct ib_mr *ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
4137 u64 virt_addr, int mr_access_flags);
4138
4139
4140int ib_advise_mr(struct ib_pd *pd, enum ib_uverbs_advise_mr_advice advice,
4141 u32 flags, struct ib_sge *sg_list, u32 num_sge);
4142
4143
4144
4145
4146
4147
4148
4149
4150int ib_dereg_mr_user(struct ib_mr *mr, struct ib_udata *udata);
4151
4152
4153
4154
4155
4156
4157
4158
4159
4160
4161static inline int ib_dereg_mr(struct ib_mr *mr)
4162{
4163 return ib_dereg_mr_user(mr, NULL);
4164}
4165
4166struct ib_mr *ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
4167 u32 max_num_sg);
4168
4169struct ib_mr *ib_alloc_mr_integrity(struct ib_pd *pd,
4170 u32 max_num_data_sg,
4171 u32 max_num_meta_sg);
4172
4173
4174
4175
4176
4177
4178
4179static inline void ib_update_fast_reg_key(struct ib_mr *mr, u8 newkey)
4180{
4181 mr->lkey = (mr->lkey & 0xffffff00) | newkey;
4182 mr->rkey = (mr->rkey & 0xffffff00) | newkey;
4183}
4184
4185
4186
4187
4188
4189
4190static inline u32 ib_inc_rkey(u32 rkey)
4191{
4192 const u32 mask = 0x000000ff;
4193 return ((rkey + 1) & mask) | (rkey & ~mask);
4194}
4195
4196
4197
4198
4199
4200
4201
4202
4203
4204
4205
4206
4207
4208int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
4209
4210
4211
4212
4213
4214
4215
4216int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
4217
4218struct ib_xrcd *ib_alloc_xrcd_user(struct ib_device *device,
4219 struct inode *inode, struct ib_udata *udata);
4220int ib_dealloc_xrcd_user(struct ib_xrcd *xrcd, struct ib_udata *udata);
4221
4222static inline int ib_check_mr_access(struct ib_device *ib_dev,
4223 unsigned int flags)
4224{
4225
4226
4227
4228
4229 if (flags & (IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_REMOTE_WRITE) &&
4230 !(flags & IB_ACCESS_LOCAL_WRITE))
4231 return -EINVAL;
4232
4233 if (flags & ~IB_ACCESS_SUPPORTED)
4234 return -EINVAL;
4235
4236 if (flags & IB_ACCESS_ON_DEMAND &&
4237 !(ib_dev->attrs.device_cap_flags & IB_DEVICE_ON_DEMAND_PAGING))
4238 return -EINVAL;
4239 return 0;
4240}
4241
4242static inline bool ib_access_writable(int access_flags)
4243{
4244
4245
4246
4247
4248
4249
4250
4251 return access_flags &
4252 (IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE |
4253 IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND);
4254}
4255
4256
4257
4258
4259
4260
4261
4262
4263
4264
4265
4266
4267
4268int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
4269 struct ib_mr_status *mr_status);
4270
4271
4272
4273
4274
4275
4276
4277
4278
4279
4280
4281
4282
4283
4284static inline bool ib_device_try_get(struct ib_device *dev)
4285{
4286 return refcount_inc_not_zero(&dev->refcount);
4287}
4288
4289void ib_device_put(struct ib_device *device);
4290struct ib_device *ib_device_get_by_netdev(struct net_device *ndev,
4291 enum rdma_driver_id driver_id);
4292struct ib_device *ib_device_get_by_name(const char *name,
4293 enum rdma_driver_id driver_id);
4294struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, u32 port,
4295 u16 pkey, const union ib_gid *gid,
4296 const struct sockaddr *addr);
4297int ib_device_set_netdev(struct ib_device *ib_dev, struct net_device *ndev,
4298 unsigned int port);
4299struct net_device *ib_device_netdev(struct ib_device *dev, u32 port);
4300
4301struct ib_wq *ib_create_wq(struct ib_pd *pd,
4302 struct ib_wq_init_attr *init_attr);
4303int ib_destroy_wq_user(struct ib_wq *wq, struct ib_udata *udata);
4304
4305int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
4306 unsigned int *sg_offset, unsigned int page_size);
4307int ib_map_mr_sg_pi(struct ib_mr *mr, struct scatterlist *data_sg,
4308 int data_sg_nents, unsigned int *data_sg_offset,
4309 struct scatterlist *meta_sg, int meta_sg_nents,
4310 unsigned int *meta_sg_offset, unsigned int page_size);
4311
4312static inline int
4313ib_map_mr_sg_zbva(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
4314 unsigned int *sg_offset, unsigned int page_size)
4315{
4316 int n;
4317
4318 n = ib_map_mr_sg(mr, sg, sg_nents, sg_offset, page_size);
4319 mr->iova = 0;
4320
4321 return n;
4322}
4323
4324int ib_sg_to_pages(struct ib_mr *mr, struct scatterlist *sgl, int sg_nents,
4325 unsigned int *sg_offset, int (*set_page)(struct ib_mr *, u64));
4326
4327void ib_drain_rq(struct ib_qp *qp);
4328void ib_drain_sq(struct ib_qp *qp);
4329void ib_drain_qp(struct ib_qp *qp);
4330
4331int ib_get_eth_speed(struct ib_device *dev, u32 port_num, u16 *speed,
4332 u8 *width);
4333
4334static inline u8 *rdma_ah_retrieve_dmac(struct rdma_ah_attr *attr)
4335{
4336 if (attr->type == RDMA_AH_ATTR_TYPE_ROCE)
4337 return attr->roce.dmac;
4338 return NULL;
4339}
4340
4341static inline void rdma_ah_set_dlid(struct rdma_ah_attr *attr, u32 dlid)
4342{
4343 if (attr->type == RDMA_AH_ATTR_TYPE_IB)
4344 attr->ib.dlid = (u16)dlid;
4345 else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4346 attr->opa.dlid = dlid;
4347}
4348
4349static inline u32 rdma_ah_get_dlid(const struct rdma_ah_attr *attr)
4350{
4351 if (attr->type == RDMA_AH_ATTR_TYPE_IB)
4352 return attr->ib.dlid;
4353 else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4354 return attr->opa.dlid;
4355 return 0;
4356}
4357
4358static inline void rdma_ah_set_sl(struct rdma_ah_attr *attr, u8 sl)
4359{
4360 attr->sl = sl;
4361}
4362
4363static inline u8 rdma_ah_get_sl(const struct rdma_ah_attr *attr)
4364{
4365 return attr->sl;
4366}
4367
4368static inline void rdma_ah_set_path_bits(struct rdma_ah_attr *attr,
4369 u8 src_path_bits)
4370{
4371 if (attr->type == RDMA_AH_ATTR_TYPE_IB)
4372 attr->ib.src_path_bits = src_path_bits;
4373 else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4374 attr->opa.src_path_bits = src_path_bits;
4375}
4376
4377static inline u8 rdma_ah_get_path_bits(const struct rdma_ah_attr *attr)
4378{
4379 if (attr->type == RDMA_AH_ATTR_TYPE_IB)
4380 return attr->ib.src_path_bits;
4381 else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4382 return attr->opa.src_path_bits;
4383 return 0;
4384}
4385
4386static inline void rdma_ah_set_make_grd(struct rdma_ah_attr *attr,
4387 bool make_grd)
4388{
4389 if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4390 attr->opa.make_grd = make_grd;
4391}
4392
4393static inline bool rdma_ah_get_make_grd(const struct rdma_ah_attr *attr)
4394{
4395 if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4396 return attr->opa.make_grd;
4397 return false;
4398}
4399
4400static inline void rdma_ah_set_port_num(struct rdma_ah_attr *attr, u32 port_num)
4401{
4402 attr->port_num = port_num;
4403}
4404
4405static inline u32 rdma_ah_get_port_num(const struct rdma_ah_attr *attr)
4406{
4407 return attr->port_num;
4408}
4409
4410static inline void rdma_ah_set_static_rate(struct rdma_ah_attr *attr,
4411 u8 static_rate)
4412{
4413 attr->static_rate = static_rate;
4414}
4415
4416static inline u8 rdma_ah_get_static_rate(const struct rdma_ah_attr *attr)
4417{
4418 return attr->static_rate;
4419}
4420
4421static inline void rdma_ah_set_ah_flags(struct rdma_ah_attr *attr,
4422 enum ib_ah_flags flag)
4423{
4424 attr->ah_flags = flag;
4425}
4426
4427static inline enum ib_ah_flags
4428 rdma_ah_get_ah_flags(const struct rdma_ah_attr *attr)
4429{
4430 return attr->ah_flags;
4431}
4432
4433static inline const struct ib_global_route
4434 *rdma_ah_read_grh(const struct rdma_ah_attr *attr)
4435{
4436 return &attr->grh;
4437}
4438
4439
4440static inline struct ib_global_route
4441 *rdma_ah_retrieve_grh(struct rdma_ah_attr *attr)
4442{
4443 return &attr->grh;
4444}
4445
4446static inline void rdma_ah_set_dgid_raw(struct rdma_ah_attr *attr, void *dgid)
4447{
4448 struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
4449
4450 memcpy(grh->dgid.raw, dgid, sizeof(grh->dgid));
4451}
4452
4453static inline void rdma_ah_set_subnet_prefix(struct rdma_ah_attr *attr,
4454 __be64 prefix)
4455{
4456 struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
4457
4458 grh->dgid.global.subnet_prefix = prefix;
4459}
4460
4461static inline void rdma_ah_set_interface_id(struct rdma_ah_attr *attr,
4462 __be64 if_id)
4463{
4464 struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
4465
4466 grh->dgid.global.interface_id = if_id;
4467}
4468
4469static inline void rdma_ah_set_grh(struct rdma_ah_attr *attr,
4470 union ib_gid *dgid, u32 flow_label,
4471 u8 sgid_index, u8 hop_limit,
4472 u8 traffic_class)
4473{
4474 struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
4475
4476 attr->ah_flags = IB_AH_GRH;
4477 if (dgid)
4478 grh->dgid = *dgid;
4479 grh->flow_label = flow_label;
4480 grh->sgid_index = sgid_index;
4481 grh->hop_limit = hop_limit;
4482 grh->traffic_class = traffic_class;
4483 grh->sgid_attr = NULL;
4484}
4485
4486void rdma_destroy_ah_attr(struct rdma_ah_attr *ah_attr);
4487void rdma_move_grh_sgid_attr(struct rdma_ah_attr *attr, union ib_gid *dgid,
4488 u32 flow_label, u8 hop_limit, u8 traffic_class,
4489 const struct ib_gid_attr *sgid_attr);
4490void rdma_copy_ah_attr(struct rdma_ah_attr *dest,
4491 const struct rdma_ah_attr *src);
4492void rdma_replace_ah_attr(struct rdma_ah_attr *old,
4493 const struct rdma_ah_attr *new);
4494void rdma_move_ah_attr(struct rdma_ah_attr *dest, struct rdma_ah_attr *src);
4495
4496
4497
4498
4499
4500
4501
4502static inline enum rdma_ah_attr_type rdma_ah_find_type(struct ib_device *dev,
4503 u32 port_num)
4504{
4505 if (rdma_protocol_roce(dev, port_num))
4506 return RDMA_AH_ATTR_TYPE_ROCE;
4507 if (rdma_protocol_ib(dev, port_num)) {
4508 if (rdma_cap_opa_ah(dev, port_num))
4509 return RDMA_AH_ATTR_TYPE_OPA;
4510 return RDMA_AH_ATTR_TYPE_IB;
4511 }
4512
4513 return RDMA_AH_ATTR_TYPE_UNDEFINED;
4514}
4515
4516
4517
4518
4519
4520
4521
4522
4523
4524
4525static inline u16 ib_lid_cpu16(u32 lid)
4526{
4527 WARN_ON_ONCE(lid & 0xFFFF0000);
4528 return (u16)lid;
4529}
4530
4531
4532
4533
4534
4535
4536static inline __be16 ib_lid_be16(u32 lid)
4537{
4538 WARN_ON_ONCE(lid & 0xFFFF0000);
4539 return cpu_to_be16((u16)lid);
4540}
4541
4542
4543
4544
4545
4546
4547
4548
4549
4550
4551
4552static inline const struct cpumask *
4553ib_get_vector_affinity(struct ib_device *device, int comp_vector)
4554{
4555 if (comp_vector < 0 || comp_vector >= device->num_comp_vectors ||
4556 !device->ops.get_vector_affinity)
4557 return NULL;
4558
4559 return device->ops.get_vector_affinity(device, comp_vector);
4560
4561}
4562
4563
4564
4565
4566
4567
4568
4569void rdma_roce_rescan_device(struct ib_device *ibdev);
4570
4571struct ib_ucontext *ib_uverbs_get_ucontext_file(struct ib_uverbs_file *ufile);
4572
4573int uverbs_destroy_def_handler(struct uverbs_attr_bundle *attrs);
4574
4575struct net_device *rdma_alloc_netdev(struct ib_device *device, u32 port_num,
4576 enum rdma_netdev_t type, const char *name,
4577 unsigned char name_assign_type,
4578 void (*setup)(struct net_device *));
4579
4580int rdma_init_netdev(struct ib_device *device, u32 port_num,
4581 enum rdma_netdev_t type, const char *name,
4582 unsigned char name_assign_type,
4583 void (*setup)(struct net_device *),
4584 struct net_device *netdev);
4585
4586
4587
4588
4589
4590
4591
4592
4593
4594static inline struct ib_device *rdma_device_to_ibdev(struct device *device)
4595{
4596 struct ib_core_device *coredev =
4597 container_of(device, struct ib_core_device, dev);
4598
4599 return coredev->owner;
4600}
4601
4602
4603
4604
4605
4606static inline int ibdev_to_node(struct ib_device *ibdev)
4607{
4608 struct device *parent = ibdev->dev.parent;
4609
4610 if (!parent)
4611 return NUMA_NO_NODE;
4612 return dev_to_node(parent);
4613}
4614
4615
4616
4617
4618
4619
4620
4621
4622
4623#define rdma_device_to_drv_device(dev, drv_dev_struct, ibdev_member) \
4624 container_of(rdma_device_to_ibdev(dev), drv_dev_struct, ibdev_member)
4625
4626bool rdma_dev_access_netns(const struct ib_device *device,
4627 const struct net *net);
4628
4629#define IB_ROCE_UDP_ENCAP_VALID_PORT_MIN (0xC000)
4630#define IB_ROCE_UDP_ENCAP_VALID_PORT_MAX (0xFFFF)
4631#define IB_GRH_FLOWLABEL_MASK (0x000FFFFF)
4632
4633
4634
4635
4636
4637
4638
4639
4640
4641static inline u16 rdma_flow_label_to_udp_sport(u32 fl)
4642{
4643 u32 fl_low = fl & 0x03fff, fl_high = fl & 0xFC000;
4644
4645 fl_low ^= fl_high >> 14;
4646 return (u16)(fl_low | IB_ROCE_UDP_ENCAP_VALID_PORT_MIN);
4647}
4648
4649
4650
4651
4652
4653
4654
4655
4656
4657
4658
4659
4660
4661
4662
4663
4664static inline u32 rdma_calc_flow_label(u32 lqpn, u32 rqpn)
4665{
4666 u64 v = (u64)lqpn * rqpn;
4667
4668 v ^= v >> 20;
4669 v ^= v >> 40;
4670
4671 return (u32)(v & IB_GRH_FLOWLABEL_MASK);
4672}
4673
4674const struct ib_port_immutable*
4675ib_port_immutable_read(struct ib_device *dev, unsigned int port);
4676#endif
4677