1
2
3
4
5
6
7
8
9
10
11
12#ifndef IB_VERBS_H
13#define IB_VERBS_H
14
15#include <linux/ethtool.h>
16#include <linux/types.h>
17#include <linux/device.h>
18#include <linux/dma-mapping.h>
19#include <linux/kref.h>
20#include <linux/list.h>
21#include <linux/rwsem.h>
22#include <linux/workqueue.h>
23#include <linux/irq_poll.h>
24#include <uapi/linux/if_ether.h>
25#include <net/ipv6.h>
26#include <net/ip.h>
27#include <linux/string.h>
28#include <linux/slab.h>
29#include <linux/netdevice.h>
30#include <linux/refcount.h>
31#include <linux/if_link.h>
32#include <linux/atomic.h>
33#include <linux/mmu_notifier.h>
34#include <linux/uaccess.h>
35#include <linux/cgroup_rdma.h>
36#include <linux/irqflags.h>
37#include <linux/preempt.h>
38#include <linux/dim.h>
39#include <uapi/rdma/ib_user_verbs.h>
40#include <rdma/rdma_counter.h>
41#include <rdma/restrack.h>
42#include <rdma/signature.h>
43#include <uapi/rdma/rdma_user_ioctl.h>
44#include <uapi/rdma/ib_user_ioctl_verbs.h>
45
46#define IB_FW_VERSION_NAME_MAX ETHTOOL_FWVERS_LEN
47
48struct ib_umem_odp;
49struct ib_uqp_object;
50struct ib_usrq_object;
51struct ib_uwq_object;
52struct rdma_cm_id;
53struct ib_port;
54struct hw_stats_device_data;
55
56extern struct workqueue_struct *ib_wq;
57extern struct workqueue_struct *ib_comp_wq;
58extern struct workqueue_struct *ib_comp_unbound_wq;
59
60struct ib_ucq_object;
61
62__printf(3, 4) __cold
63void ibdev_printk(const char *level, const struct ib_device *ibdev,
64 const char *format, ...);
65__printf(2, 3) __cold
66void ibdev_emerg(const struct ib_device *ibdev, const char *format, ...);
67__printf(2, 3) __cold
68void ibdev_alert(const struct ib_device *ibdev, const char *format, ...);
69__printf(2, 3) __cold
70void ibdev_crit(const struct ib_device *ibdev, const char *format, ...);
71__printf(2, 3) __cold
72void ibdev_err(const struct ib_device *ibdev, const char *format, ...);
73__printf(2, 3) __cold
74void ibdev_warn(const struct ib_device *ibdev, const char *format, ...);
75__printf(2, 3) __cold
76void ibdev_notice(const struct ib_device *ibdev, const char *format, ...);
77__printf(2, 3) __cold
78void ibdev_info(const struct ib_device *ibdev, const char *format, ...);
79
80#if defined(CONFIG_DYNAMIC_DEBUG) || \
81 (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
82#define ibdev_dbg(__dev, format, args...) \
83 dynamic_ibdev_dbg(__dev, format, ##args)
84#else
85__printf(2, 3) __cold
86static inline
87void ibdev_dbg(const struct ib_device *ibdev, const char *format, ...) {}
88#endif
89
90#define ibdev_level_ratelimited(ibdev_level, ibdev, fmt, ...) \
91do { \
92 static DEFINE_RATELIMIT_STATE(_rs, \
93 DEFAULT_RATELIMIT_INTERVAL, \
94 DEFAULT_RATELIMIT_BURST); \
95 if (__ratelimit(&_rs)) \
96 ibdev_level(ibdev, fmt, ##__VA_ARGS__); \
97} while (0)
98
99#define ibdev_emerg_ratelimited(ibdev, fmt, ...) \
100 ibdev_level_ratelimited(ibdev_emerg, ibdev, fmt, ##__VA_ARGS__)
101#define ibdev_alert_ratelimited(ibdev, fmt, ...) \
102 ibdev_level_ratelimited(ibdev_alert, ibdev, fmt, ##__VA_ARGS__)
103#define ibdev_crit_ratelimited(ibdev, fmt, ...) \
104 ibdev_level_ratelimited(ibdev_crit, ibdev, fmt, ##__VA_ARGS__)
105#define ibdev_err_ratelimited(ibdev, fmt, ...) \
106 ibdev_level_ratelimited(ibdev_err, ibdev, fmt, ##__VA_ARGS__)
107#define ibdev_warn_ratelimited(ibdev, fmt, ...) \
108 ibdev_level_ratelimited(ibdev_warn, ibdev, fmt, ##__VA_ARGS__)
109#define ibdev_notice_ratelimited(ibdev, fmt, ...) \
110 ibdev_level_ratelimited(ibdev_notice, ibdev, fmt, ##__VA_ARGS__)
111#define ibdev_info_ratelimited(ibdev, fmt, ...) \
112 ibdev_level_ratelimited(ibdev_info, ibdev, fmt, ##__VA_ARGS__)
113
114#if defined(CONFIG_DYNAMIC_DEBUG) || \
115 (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
116
117#define ibdev_dbg_ratelimited(ibdev, fmt, ...) \
118do { \
119 static DEFINE_RATELIMIT_STATE(_rs, \
120 DEFAULT_RATELIMIT_INTERVAL, \
121 DEFAULT_RATELIMIT_BURST); \
122 DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \
123 if (DYNAMIC_DEBUG_BRANCH(descriptor) && __ratelimit(&_rs)) \
124 __dynamic_ibdev_dbg(&descriptor, ibdev, fmt, \
125 ##__VA_ARGS__); \
126} while (0)
127#else
128__printf(2, 3) __cold
129static inline
130void ibdev_dbg_ratelimited(const struct ib_device *ibdev, const char *format, ...) {}
131#endif
132
133union ib_gid {
134 u8 raw[16];
135 struct {
136 __be64 subnet_prefix;
137 __be64 interface_id;
138 } global;
139};
140
141extern union ib_gid zgid;
142
143enum ib_gid_type {
144 IB_GID_TYPE_IB = IB_UVERBS_GID_TYPE_IB,
145 IB_GID_TYPE_ROCE = IB_UVERBS_GID_TYPE_ROCE_V1,
146 IB_GID_TYPE_ROCE_UDP_ENCAP = IB_UVERBS_GID_TYPE_ROCE_V2,
147 IB_GID_TYPE_SIZE
148};
149
150#define ROCE_V2_UDP_DPORT 4791
151struct ib_gid_attr {
152 struct net_device __rcu *ndev;
153 struct ib_device *device;
154 union ib_gid gid;
155 enum ib_gid_type gid_type;
156 u16 index;
157 u32 port_num;
158};
159
160enum {
161
162 IB_SA_WELL_KNOWN_GUID = BIT_ULL(57) | 2,
163};
164
165enum rdma_transport_type {
166 RDMA_TRANSPORT_IB,
167 RDMA_TRANSPORT_IWARP,
168 RDMA_TRANSPORT_USNIC,
169 RDMA_TRANSPORT_USNIC_UDP,
170 RDMA_TRANSPORT_UNSPECIFIED,
171};
172
173enum rdma_protocol_type {
174 RDMA_PROTOCOL_IB,
175 RDMA_PROTOCOL_IBOE,
176 RDMA_PROTOCOL_IWARP,
177 RDMA_PROTOCOL_USNIC_UDP
178};
179
180__attribute_const__ enum rdma_transport_type
181rdma_node_get_transport(unsigned int node_type);
182
183enum rdma_network_type {
184 RDMA_NETWORK_IB,
185 RDMA_NETWORK_ROCE_V1,
186 RDMA_NETWORK_IPV4,
187 RDMA_NETWORK_IPV6
188};
189
190static inline enum ib_gid_type ib_network_to_gid_type(enum rdma_network_type network_type)
191{
192 if (network_type == RDMA_NETWORK_IPV4 ||
193 network_type == RDMA_NETWORK_IPV6)
194 return IB_GID_TYPE_ROCE_UDP_ENCAP;
195 else if (network_type == RDMA_NETWORK_ROCE_V1)
196 return IB_GID_TYPE_ROCE;
197 else
198 return IB_GID_TYPE_IB;
199}
200
201static inline enum rdma_network_type
202rdma_gid_attr_network_type(const struct ib_gid_attr *attr)
203{
204 if (attr->gid_type == IB_GID_TYPE_IB)
205 return RDMA_NETWORK_IB;
206
207 if (attr->gid_type == IB_GID_TYPE_ROCE)
208 return RDMA_NETWORK_ROCE_V1;
209
210 if (ipv6_addr_v4mapped((struct in6_addr *)&attr->gid))
211 return RDMA_NETWORK_IPV4;
212 else
213 return RDMA_NETWORK_IPV6;
214}
215
216enum rdma_link_layer {
217 IB_LINK_LAYER_UNSPECIFIED,
218 IB_LINK_LAYER_INFINIBAND,
219 IB_LINK_LAYER_ETHERNET,
220};
221
222enum ib_device_cap_flags {
223 IB_DEVICE_RESIZE_MAX_WR = (1 << 0),
224 IB_DEVICE_BAD_PKEY_CNTR = (1 << 1),
225 IB_DEVICE_BAD_QKEY_CNTR = (1 << 2),
226 IB_DEVICE_RAW_MULTI = (1 << 3),
227 IB_DEVICE_AUTO_PATH_MIG = (1 << 4),
228 IB_DEVICE_CHANGE_PHY_PORT = (1 << 5),
229 IB_DEVICE_UD_AV_PORT_ENFORCE = (1 << 6),
230 IB_DEVICE_CURR_QP_STATE_MOD = (1 << 7),
231 IB_DEVICE_SHUTDOWN_PORT = (1 << 8),
232
233 IB_DEVICE_PORT_ACTIVE_EVENT = (1 << 10),
234 IB_DEVICE_SYS_IMAGE_GUID = (1 << 11),
235 IB_DEVICE_RC_RNR_NAK_GEN = (1 << 12),
236 IB_DEVICE_SRQ_RESIZE = (1 << 13),
237 IB_DEVICE_N_NOTIFY_CQ = (1 << 14),
238
239
240
241
242
243
244
245
246 IB_DEVICE_LOCAL_DMA_LKEY = (1 << 15),
247
248 IB_DEVICE_MEM_WINDOW = (1 << 17),
249
250
251
252
253
254
255
256 IB_DEVICE_UD_IP_CSUM = (1 << 18),
257 IB_DEVICE_UD_TSO = (1 << 19),
258 IB_DEVICE_XRC = (1 << 20),
259
260
261
262
263
264
265
266
267
268
269 IB_DEVICE_MEM_MGT_EXTENSIONS = (1 << 21),
270 IB_DEVICE_BLOCK_MULTICAST_LOOPBACK = (1 << 22),
271 IB_DEVICE_MEM_WINDOW_TYPE_2A = (1 << 23),
272 IB_DEVICE_MEM_WINDOW_TYPE_2B = (1 << 24),
273 IB_DEVICE_RC_IP_CSUM = (1 << 25),
274
275 IB_DEVICE_RAW_IP_CSUM = (1 << 26),
276
277
278
279
280
281
282 IB_DEVICE_CROSS_CHANNEL = (1 << 27),
283 IB_DEVICE_MANAGED_FLOW_STEERING = (1 << 29),
284 IB_DEVICE_INTEGRITY_HANDOVER = (1 << 30),
285 IB_DEVICE_ON_DEMAND_PAGING = (1ULL << 31),
286 IB_DEVICE_SG_GAPS_REG = (1ULL << 32),
287 IB_DEVICE_VIRTUAL_FUNCTION = (1ULL << 33),
288
289 IB_DEVICE_RAW_SCATTER_FCS = (1ULL << 34),
290 IB_DEVICE_RDMA_NETDEV_OPA = (1ULL << 35),
291
292 IB_DEVICE_PCI_WRITE_END_PADDING = (1ULL << 36),
293 IB_DEVICE_ALLOW_USER_UNREG = (1ULL << 37),
294};
295
296enum ib_atomic_cap {
297 IB_ATOMIC_NONE,
298 IB_ATOMIC_HCA,
299 IB_ATOMIC_GLOB
300};
301
302enum ib_odp_general_cap_bits {
303 IB_ODP_SUPPORT = 1 << 0,
304 IB_ODP_SUPPORT_IMPLICIT = 1 << 1,
305};
306
307enum ib_odp_transport_cap_bits {
308 IB_ODP_SUPPORT_SEND = 1 << 0,
309 IB_ODP_SUPPORT_RECV = 1 << 1,
310 IB_ODP_SUPPORT_WRITE = 1 << 2,
311 IB_ODP_SUPPORT_READ = 1 << 3,
312 IB_ODP_SUPPORT_ATOMIC = 1 << 4,
313 IB_ODP_SUPPORT_SRQ_RECV = 1 << 5,
314};
315
316struct ib_odp_caps {
317 uint64_t general_caps;
318 struct {
319 uint32_t rc_odp_caps;
320 uint32_t uc_odp_caps;
321 uint32_t ud_odp_caps;
322 uint32_t xrc_odp_caps;
323 } per_transport_caps;
324};
325
326struct ib_rss_caps {
327
328
329
330
331 u32 supported_qpts;
332 u32 max_rwq_indirection_tables;
333 u32 max_rwq_indirection_table_size;
334};
335
336enum ib_tm_cap_flags {
337
338 IB_TM_CAP_RNDV_RC = 1 << 0,
339};
340
341struct ib_tm_caps {
342
343 u32 max_rndv_hdr_size;
344
345 u32 max_num_tags;
346
347 u32 flags;
348
349 u32 max_ops;
350
351 u32 max_sge;
352};
353
354struct ib_cq_init_attr {
355 unsigned int cqe;
356 u32 comp_vector;
357 u32 flags;
358};
359
360enum ib_cq_attr_mask {
361 IB_CQ_MODERATE = 1 << 0,
362};
363
364struct ib_cq_caps {
365 u16 max_cq_moderation_count;
366 u16 max_cq_moderation_period;
367};
368
369struct ib_dm_mr_attr {
370 u64 length;
371 u64 offset;
372 u32 access_flags;
373};
374
375struct ib_dm_alloc_attr {
376 u64 length;
377 u32 alignment;
378 u32 flags;
379};
380
381struct ib_device_attr {
382 u64 fw_ver;
383 __be64 sys_image_guid;
384 u64 max_mr_size;
385 u64 page_size_cap;
386 u32 vendor_id;
387 u32 vendor_part_id;
388 u32 hw_ver;
389 int max_qp;
390 int max_qp_wr;
391 u64 device_cap_flags;
392 int max_send_sge;
393 int max_recv_sge;
394 int max_sge_rd;
395 int max_cq;
396 int max_cqe;
397 int max_mr;
398 int max_pd;
399 int max_qp_rd_atom;
400 int max_ee_rd_atom;
401 int max_res_rd_atom;
402 int max_qp_init_rd_atom;
403 int max_ee_init_rd_atom;
404 enum ib_atomic_cap atomic_cap;
405 enum ib_atomic_cap masked_atomic_cap;
406 int max_ee;
407 int max_rdd;
408 int max_mw;
409 int max_raw_ipv6_qp;
410 int max_raw_ethy_qp;
411 int max_mcast_grp;
412 int max_mcast_qp_attach;
413 int max_total_mcast_qp_attach;
414 int max_ah;
415 int max_srq;
416 int max_srq_wr;
417 int max_srq_sge;
418 unsigned int max_fast_reg_page_list_len;
419 unsigned int max_pi_fast_reg_page_list_len;
420 u16 max_pkeys;
421 u8 local_ca_ack_delay;
422 int sig_prot_cap;
423 int sig_guard_cap;
424 struct ib_odp_caps odp_caps;
425 uint64_t timestamp_mask;
426 uint64_t hca_core_clock;
427 struct ib_rss_caps rss_caps;
428 u32 max_wq_type_rq;
429 u32 raw_packet_caps;
430 struct ib_tm_caps tm_caps;
431 struct ib_cq_caps cq_caps;
432 u64 max_dm_size;
433
434 u32 max_sgl_rd;
435};
436
437enum ib_mtu {
438 IB_MTU_256 = 1,
439 IB_MTU_512 = 2,
440 IB_MTU_1024 = 3,
441 IB_MTU_2048 = 4,
442 IB_MTU_4096 = 5
443};
444
445enum opa_mtu {
446 OPA_MTU_8192 = 6,
447 OPA_MTU_10240 = 7
448};
449
450static inline int ib_mtu_enum_to_int(enum ib_mtu mtu)
451{
452 switch (mtu) {
453 case IB_MTU_256: return 256;
454 case IB_MTU_512: return 512;
455 case IB_MTU_1024: return 1024;
456 case IB_MTU_2048: return 2048;
457 case IB_MTU_4096: return 4096;
458 default: return -1;
459 }
460}
461
462static inline enum ib_mtu ib_mtu_int_to_enum(int mtu)
463{
464 if (mtu >= 4096)
465 return IB_MTU_4096;
466 else if (mtu >= 2048)
467 return IB_MTU_2048;
468 else if (mtu >= 1024)
469 return IB_MTU_1024;
470 else if (mtu >= 512)
471 return IB_MTU_512;
472 else
473 return IB_MTU_256;
474}
475
476static inline int opa_mtu_enum_to_int(enum opa_mtu mtu)
477{
478 switch (mtu) {
479 case OPA_MTU_8192:
480 return 8192;
481 case OPA_MTU_10240:
482 return 10240;
483 default:
484 return(ib_mtu_enum_to_int((enum ib_mtu)mtu));
485 }
486}
487
488static inline enum opa_mtu opa_mtu_int_to_enum(int mtu)
489{
490 if (mtu >= 10240)
491 return OPA_MTU_10240;
492 else if (mtu >= 8192)
493 return OPA_MTU_8192;
494 else
495 return ((enum opa_mtu)ib_mtu_int_to_enum(mtu));
496}
497
498enum ib_port_state {
499 IB_PORT_NOP = 0,
500 IB_PORT_DOWN = 1,
501 IB_PORT_INIT = 2,
502 IB_PORT_ARMED = 3,
503 IB_PORT_ACTIVE = 4,
504 IB_PORT_ACTIVE_DEFER = 5
505};
506
507enum ib_port_phys_state {
508 IB_PORT_PHYS_STATE_SLEEP = 1,
509 IB_PORT_PHYS_STATE_POLLING = 2,
510 IB_PORT_PHYS_STATE_DISABLED = 3,
511 IB_PORT_PHYS_STATE_PORT_CONFIGURATION_TRAINING = 4,
512 IB_PORT_PHYS_STATE_LINK_UP = 5,
513 IB_PORT_PHYS_STATE_LINK_ERROR_RECOVERY = 6,
514 IB_PORT_PHYS_STATE_PHY_TEST = 7,
515};
516
517enum ib_port_width {
518 IB_WIDTH_1X = 1,
519 IB_WIDTH_2X = 16,
520 IB_WIDTH_4X = 2,
521 IB_WIDTH_8X = 4,
522 IB_WIDTH_12X = 8
523};
524
525static inline int ib_width_enum_to_int(enum ib_port_width width)
526{
527 switch (width) {
528 case IB_WIDTH_1X: return 1;
529 case IB_WIDTH_2X: return 2;
530 case IB_WIDTH_4X: return 4;
531 case IB_WIDTH_8X: return 8;
532 case IB_WIDTH_12X: return 12;
533 default: return -1;
534 }
535}
536
537enum ib_port_speed {
538 IB_SPEED_SDR = 1,
539 IB_SPEED_DDR = 2,
540 IB_SPEED_QDR = 4,
541 IB_SPEED_FDR10 = 8,
542 IB_SPEED_FDR = 16,
543 IB_SPEED_EDR = 32,
544 IB_SPEED_HDR = 64,
545 IB_SPEED_NDR = 128,
546};
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567struct rdma_hw_stats {
568 struct mutex lock;
569 unsigned long timestamp;
570 unsigned long lifespan;
571 const char * const *names;
572 int num_counters;
573 u64 value[];
574};
575
576#define RDMA_HW_STATS_DEFAULT_LIFESPAN 10
577
578
579
580
581
582
583
584static inline struct rdma_hw_stats *rdma_alloc_hw_stats_struct(
585 const char * const *names, int num_counters,
586 unsigned long lifespan)
587{
588 struct rdma_hw_stats *stats;
589
590 stats = kzalloc(sizeof(*stats) + num_counters * sizeof(u64),
591 GFP_KERNEL);
592 if (!stats)
593 return NULL;
594 stats->names = names;
595 stats->num_counters = num_counters;
596 stats->lifespan = msecs_to_jiffies(lifespan);
597
598 return stats;
599}
600
601
602
603
604
605
606#define RDMA_CORE_CAP_IB_MAD 0x00000001
607#define RDMA_CORE_CAP_IB_SMI 0x00000002
608#define RDMA_CORE_CAP_IB_CM 0x00000004
609#define RDMA_CORE_CAP_IW_CM 0x00000008
610#define RDMA_CORE_CAP_IB_SA 0x00000010
611#define RDMA_CORE_CAP_OPA_MAD 0x00000020
612
613
614#define RDMA_CORE_CAP_AF_IB 0x00001000
615#define RDMA_CORE_CAP_ETH_AH 0x00002000
616#define RDMA_CORE_CAP_OPA_AH 0x00004000
617#define RDMA_CORE_CAP_IB_GRH_REQUIRED 0x00008000
618
619
620#define RDMA_CORE_CAP_PROT_IB 0x00100000
621#define RDMA_CORE_CAP_PROT_ROCE 0x00200000
622#define RDMA_CORE_CAP_PROT_IWARP 0x00400000
623#define RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP 0x00800000
624#define RDMA_CORE_CAP_PROT_RAW_PACKET 0x01000000
625#define RDMA_CORE_CAP_PROT_USNIC 0x02000000
626
627#define RDMA_CORE_PORT_IB_GRH_REQUIRED (RDMA_CORE_CAP_IB_GRH_REQUIRED \
628 | RDMA_CORE_CAP_PROT_ROCE \
629 | RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP)
630
631#define RDMA_CORE_PORT_IBA_IB (RDMA_CORE_CAP_PROT_IB \
632 | RDMA_CORE_CAP_IB_MAD \
633 | RDMA_CORE_CAP_IB_SMI \
634 | RDMA_CORE_CAP_IB_CM \
635 | RDMA_CORE_CAP_IB_SA \
636 | RDMA_CORE_CAP_AF_IB)
637#define RDMA_CORE_PORT_IBA_ROCE (RDMA_CORE_CAP_PROT_ROCE \
638 | RDMA_CORE_CAP_IB_MAD \
639 | RDMA_CORE_CAP_IB_CM \
640 | RDMA_CORE_CAP_AF_IB \
641 | RDMA_CORE_CAP_ETH_AH)
642#define RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP \
643 (RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP \
644 | RDMA_CORE_CAP_IB_MAD \
645 | RDMA_CORE_CAP_IB_CM \
646 | RDMA_CORE_CAP_AF_IB \
647 | RDMA_CORE_CAP_ETH_AH)
648#define RDMA_CORE_PORT_IWARP (RDMA_CORE_CAP_PROT_IWARP \
649 | RDMA_CORE_CAP_IW_CM)
650#define RDMA_CORE_PORT_INTEL_OPA (RDMA_CORE_PORT_IBA_IB \
651 | RDMA_CORE_CAP_OPA_MAD)
652
653#define RDMA_CORE_PORT_RAW_PACKET (RDMA_CORE_CAP_PROT_RAW_PACKET)
654
655#define RDMA_CORE_PORT_USNIC (RDMA_CORE_CAP_PROT_USNIC)
656
657struct ib_port_attr {
658 u64 subnet_prefix;
659 enum ib_port_state state;
660 enum ib_mtu max_mtu;
661 enum ib_mtu active_mtu;
662 u32 phys_mtu;
663 int gid_tbl_len;
664 unsigned int ip_gids:1;
665
666 u32 port_cap_flags;
667 u32 max_msg_sz;
668 u32 bad_pkey_cntr;
669 u32 qkey_viol_cntr;
670 u16 pkey_tbl_len;
671 u32 sm_lid;
672 u32 lid;
673 u8 lmc;
674 u8 max_vl_num;
675 u8 sm_sl;
676 u8 subnet_timeout;
677 u8 init_type_reply;
678 u8 active_width;
679 u16 active_speed;
680 u8 phys_state;
681 u16 port_cap_flags2;
682};
683
684enum ib_device_modify_flags {
685 IB_DEVICE_MODIFY_SYS_IMAGE_GUID = 1 << 0,
686 IB_DEVICE_MODIFY_NODE_DESC = 1 << 1
687};
688
689#define IB_DEVICE_NODE_DESC_MAX 64
690
691struct ib_device_modify {
692 u64 sys_image_guid;
693 char node_desc[IB_DEVICE_NODE_DESC_MAX];
694};
695
696enum ib_port_modify_flags {
697 IB_PORT_SHUTDOWN = 1,
698 IB_PORT_INIT_TYPE = (1<<2),
699 IB_PORT_RESET_QKEY_CNTR = (1<<3),
700 IB_PORT_OPA_MASK_CHG = (1<<4)
701};
702
703struct ib_port_modify {
704 u32 set_port_cap_mask;
705 u32 clr_port_cap_mask;
706 u8 init_type;
707};
708
709enum ib_event_type {
710 IB_EVENT_CQ_ERR,
711 IB_EVENT_QP_FATAL,
712 IB_EVENT_QP_REQ_ERR,
713 IB_EVENT_QP_ACCESS_ERR,
714 IB_EVENT_COMM_EST,
715 IB_EVENT_SQ_DRAINED,
716 IB_EVENT_PATH_MIG,
717 IB_EVENT_PATH_MIG_ERR,
718 IB_EVENT_DEVICE_FATAL,
719 IB_EVENT_PORT_ACTIVE,
720 IB_EVENT_PORT_ERR,
721 IB_EVENT_LID_CHANGE,
722 IB_EVENT_PKEY_CHANGE,
723 IB_EVENT_SM_CHANGE,
724 IB_EVENT_SRQ_ERR,
725 IB_EVENT_SRQ_LIMIT_REACHED,
726 IB_EVENT_QP_LAST_WQE_REACHED,
727 IB_EVENT_CLIENT_REREGISTER,
728 IB_EVENT_GID_CHANGE,
729 IB_EVENT_WQ_FATAL,
730};
731
732const char *__attribute_const__ ib_event_msg(enum ib_event_type event);
733
734struct ib_event {
735 struct ib_device *device;
736 union {
737 struct ib_cq *cq;
738 struct ib_qp *qp;
739 struct ib_srq *srq;
740 struct ib_wq *wq;
741 u32 port_num;
742 } element;
743 enum ib_event_type event;
744};
745
746struct ib_event_handler {
747 struct ib_device *device;
748 void (*handler)(struct ib_event_handler *, struct ib_event *);
749 struct list_head list;
750};
751
752#define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler) \
753 do { \
754 (_ptr)->device = _device; \
755 (_ptr)->handler = _handler; \
756 INIT_LIST_HEAD(&(_ptr)->list); \
757 } while (0)
758
759struct ib_global_route {
760 const struct ib_gid_attr *sgid_attr;
761 union ib_gid dgid;
762 u32 flow_label;
763 u8 sgid_index;
764 u8 hop_limit;
765 u8 traffic_class;
766};
767
768struct ib_grh {
769 __be32 version_tclass_flow;
770 __be16 paylen;
771 u8 next_hdr;
772 u8 hop_limit;
773 union ib_gid sgid;
774 union ib_gid dgid;
775};
776
777union rdma_network_hdr {
778 struct ib_grh ibgrh;
779 struct {
780
781
782
783 u8 reserved[20];
784 struct iphdr roce4grh;
785 };
786};
787
788#define IB_QPN_MASK 0xFFFFFF
789
790enum {
791 IB_MULTICAST_QPN = 0xffffff
792};
793
794#define IB_LID_PERMISSIVE cpu_to_be16(0xFFFF)
795#define IB_MULTICAST_LID_BASE cpu_to_be16(0xC000)
796
797enum ib_ah_flags {
798 IB_AH_GRH = 1
799};
800
801enum ib_rate {
802 IB_RATE_PORT_CURRENT = 0,
803 IB_RATE_2_5_GBPS = 2,
804 IB_RATE_5_GBPS = 5,
805 IB_RATE_10_GBPS = 3,
806 IB_RATE_20_GBPS = 6,
807 IB_RATE_30_GBPS = 4,
808 IB_RATE_40_GBPS = 7,
809 IB_RATE_60_GBPS = 8,
810 IB_RATE_80_GBPS = 9,
811 IB_RATE_120_GBPS = 10,
812 IB_RATE_14_GBPS = 11,
813 IB_RATE_56_GBPS = 12,
814 IB_RATE_112_GBPS = 13,
815 IB_RATE_168_GBPS = 14,
816 IB_RATE_25_GBPS = 15,
817 IB_RATE_100_GBPS = 16,
818 IB_RATE_200_GBPS = 17,
819 IB_RATE_300_GBPS = 18,
820 IB_RATE_28_GBPS = 19,
821 IB_RATE_50_GBPS = 20,
822 IB_RATE_400_GBPS = 21,
823 IB_RATE_600_GBPS = 22,
824};
825
826
827
828
829
830
831
832__attribute_const__ int ib_rate_to_mult(enum ib_rate rate);
833
834
835
836
837
838
839__attribute_const__ int ib_rate_to_mbps(enum ib_rate rate);
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859enum ib_mr_type {
860 IB_MR_TYPE_MEM_REG,
861 IB_MR_TYPE_SG_GAPS,
862 IB_MR_TYPE_DM,
863 IB_MR_TYPE_USER,
864 IB_MR_TYPE_DMA,
865 IB_MR_TYPE_INTEGRITY,
866};
867
868enum ib_mr_status_check {
869 IB_MR_CHECK_SIG_STATUS = 1,
870};
871
872
873
874
875
876
877
878
879
880struct ib_mr_status {
881 u32 fail_status;
882 struct ib_sig_err sig_err;
883};
884
885
886
887
888
889
890__attribute_const__ enum ib_rate mult_to_ib_rate(int mult);
891
892struct rdma_ah_init_attr {
893 struct rdma_ah_attr *ah_attr;
894 u32 flags;
895 struct net_device *xmit_slave;
896};
897
898enum rdma_ah_attr_type {
899 RDMA_AH_ATTR_TYPE_UNDEFINED,
900 RDMA_AH_ATTR_TYPE_IB,
901 RDMA_AH_ATTR_TYPE_ROCE,
902 RDMA_AH_ATTR_TYPE_OPA,
903};
904
905struct ib_ah_attr {
906 u16 dlid;
907 u8 src_path_bits;
908};
909
910struct roce_ah_attr {
911 u8 dmac[ETH_ALEN];
912};
913
914struct opa_ah_attr {
915 u32 dlid;
916 u8 src_path_bits;
917 bool make_grd;
918};
919
920struct rdma_ah_attr {
921 struct ib_global_route grh;
922 u8 sl;
923 u8 static_rate;
924 u32 port_num;
925 u8 ah_flags;
926 enum rdma_ah_attr_type type;
927 union {
928 struct ib_ah_attr ib;
929 struct roce_ah_attr roce;
930 struct opa_ah_attr opa;
931 };
932};
933
934enum ib_wc_status {
935 IB_WC_SUCCESS,
936 IB_WC_LOC_LEN_ERR,
937 IB_WC_LOC_QP_OP_ERR,
938 IB_WC_LOC_EEC_OP_ERR,
939 IB_WC_LOC_PROT_ERR,
940 IB_WC_WR_FLUSH_ERR,
941 IB_WC_MW_BIND_ERR,
942 IB_WC_BAD_RESP_ERR,
943 IB_WC_LOC_ACCESS_ERR,
944 IB_WC_REM_INV_REQ_ERR,
945 IB_WC_REM_ACCESS_ERR,
946 IB_WC_REM_OP_ERR,
947 IB_WC_RETRY_EXC_ERR,
948 IB_WC_RNR_RETRY_EXC_ERR,
949 IB_WC_LOC_RDD_VIOL_ERR,
950 IB_WC_REM_INV_RD_REQ_ERR,
951 IB_WC_REM_ABORT_ERR,
952 IB_WC_INV_EECN_ERR,
953 IB_WC_INV_EEC_STATE_ERR,
954 IB_WC_FATAL_ERR,
955 IB_WC_RESP_TIMEOUT_ERR,
956 IB_WC_GENERAL_ERR
957};
958
959const char *__attribute_const__ ib_wc_status_msg(enum ib_wc_status status);
960
961enum ib_wc_opcode {
962 IB_WC_SEND = IB_UVERBS_WC_SEND,
963 IB_WC_RDMA_WRITE = IB_UVERBS_WC_RDMA_WRITE,
964 IB_WC_RDMA_READ = IB_UVERBS_WC_RDMA_READ,
965 IB_WC_COMP_SWAP = IB_UVERBS_WC_COMP_SWAP,
966 IB_WC_FETCH_ADD = IB_UVERBS_WC_FETCH_ADD,
967 IB_WC_BIND_MW = IB_UVERBS_WC_BIND_MW,
968 IB_WC_LOCAL_INV = IB_UVERBS_WC_LOCAL_INV,
969 IB_WC_LSO = IB_UVERBS_WC_TSO,
970 IB_WC_REG_MR,
971 IB_WC_MASKED_COMP_SWAP,
972 IB_WC_MASKED_FETCH_ADD,
973
974
975
976
977 IB_WC_RECV = 1 << 7,
978 IB_WC_RECV_RDMA_WITH_IMM
979};
980
981enum ib_wc_flags {
982 IB_WC_GRH = 1,
983 IB_WC_WITH_IMM = (1<<1),
984 IB_WC_WITH_INVALIDATE = (1<<2),
985 IB_WC_IP_CSUM_OK = (1<<3),
986 IB_WC_WITH_SMAC = (1<<4),
987 IB_WC_WITH_VLAN = (1<<5),
988 IB_WC_WITH_NETWORK_HDR_TYPE = (1<<6),
989};
990
991struct ib_wc {
992 union {
993 u64 wr_id;
994 struct ib_cqe *wr_cqe;
995 };
996 enum ib_wc_status status;
997 enum ib_wc_opcode opcode;
998 u32 vendor_err;
999 u32 byte_len;
1000 struct ib_qp *qp;
1001 union {
1002 __be32 imm_data;
1003 u32 invalidate_rkey;
1004 } ex;
1005 u32 src_qp;
1006 u32 slid;
1007 int wc_flags;
1008 u16 pkey_index;
1009 u8 sl;
1010 u8 dlid_path_bits;
1011 u32 port_num;
1012 u8 smac[ETH_ALEN];
1013 u16 vlan_id;
1014 u8 network_hdr_type;
1015};
1016
1017enum ib_cq_notify_flags {
1018 IB_CQ_SOLICITED = 1 << 0,
1019 IB_CQ_NEXT_COMP = 1 << 1,
1020 IB_CQ_SOLICITED_MASK = IB_CQ_SOLICITED | IB_CQ_NEXT_COMP,
1021 IB_CQ_REPORT_MISSED_EVENTS = 1 << 2,
1022};
1023
1024enum ib_srq_type {
1025 IB_SRQT_BASIC = IB_UVERBS_SRQT_BASIC,
1026 IB_SRQT_XRC = IB_UVERBS_SRQT_XRC,
1027 IB_SRQT_TM = IB_UVERBS_SRQT_TM,
1028};
1029
1030static inline bool ib_srq_has_cq(enum ib_srq_type srq_type)
1031{
1032 return srq_type == IB_SRQT_XRC ||
1033 srq_type == IB_SRQT_TM;
1034}
1035
1036enum ib_srq_attr_mask {
1037 IB_SRQ_MAX_WR = 1 << 0,
1038 IB_SRQ_LIMIT = 1 << 1,
1039};
1040
1041struct ib_srq_attr {
1042 u32 max_wr;
1043 u32 max_sge;
1044 u32 srq_limit;
1045};
1046
1047struct ib_srq_init_attr {
1048 void (*event_handler)(struct ib_event *, void *);
1049 void *srq_context;
1050 struct ib_srq_attr attr;
1051 enum ib_srq_type srq_type;
1052
1053 struct {
1054 struct ib_cq *cq;
1055 union {
1056 struct {
1057 struct ib_xrcd *xrcd;
1058 } xrc;
1059
1060 struct {
1061 u32 max_num_tags;
1062 } tag_matching;
1063 };
1064 } ext;
1065};
1066
1067struct ib_qp_cap {
1068 u32 max_send_wr;
1069 u32 max_recv_wr;
1070 u32 max_send_sge;
1071 u32 max_recv_sge;
1072 u32 max_inline_data;
1073
1074
1075
1076
1077
1078
1079 u32 max_rdma_ctxs;
1080};
1081
1082enum ib_sig_type {
1083 IB_SIGNAL_ALL_WR,
1084 IB_SIGNAL_REQ_WR
1085};
1086
1087enum ib_qp_type {
1088
1089
1090
1091
1092
1093 IB_QPT_SMI,
1094 IB_QPT_GSI,
1095
1096 IB_QPT_RC = IB_UVERBS_QPT_RC,
1097 IB_QPT_UC = IB_UVERBS_QPT_UC,
1098 IB_QPT_UD = IB_UVERBS_QPT_UD,
1099 IB_QPT_RAW_IPV6,
1100 IB_QPT_RAW_ETHERTYPE,
1101 IB_QPT_RAW_PACKET = IB_UVERBS_QPT_RAW_PACKET,
1102 IB_QPT_XRC_INI = IB_UVERBS_QPT_XRC_INI,
1103 IB_QPT_XRC_TGT = IB_UVERBS_QPT_XRC_TGT,
1104 IB_QPT_MAX,
1105 IB_QPT_DRIVER = IB_UVERBS_QPT_DRIVER,
1106
1107
1108
1109
1110 IB_QPT_RESERVED1 = 0x1000,
1111 IB_QPT_RESERVED2,
1112 IB_QPT_RESERVED3,
1113 IB_QPT_RESERVED4,
1114 IB_QPT_RESERVED5,
1115 IB_QPT_RESERVED6,
1116 IB_QPT_RESERVED7,
1117 IB_QPT_RESERVED8,
1118 IB_QPT_RESERVED9,
1119 IB_QPT_RESERVED10,
1120};
1121
1122enum ib_qp_create_flags {
1123 IB_QP_CREATE_IPOIB_UD_LSO = 1 << 0,
1124 IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK =
1125 IB_UVERBS_QP_CREATE_BLOCK_MULTICAST_LOOPBACK,
1126 IB_QP_CREATE_CROSS_CHANNEL = 1 << 2,
1127 IB_QP_CREATE_MANAGED_SEND = 1 << 3,
1128 IB_QP_CREATE_MANAGED_RECV = 1 << 4,
1129 IB_QP_CREATE_NETIF_QP = 1 << 5,
1130 IB_QP_CREATE_INTEGRITY_EN = 1 << 6,
1131 IB_QP_CREATE_NETDEV_USE = 1 << 7,
1132 IB_QP_CREATE_SCATTER_FCS =
1133 IB_UVERBS_QP_CREATE_SCATTER_FCS,
1134 IB_QP_CREATE_CVLAN_STRIPPING =
1135 IB_UVERBS_QP_CREATE_CVLAN_STRIPPING,
1136 IB_QP_CREATE_SOURCE_QPN = 1 << 10,
1137 IB_QP_CREATE_PCI_WRITE_END_PADDING =
1138 IB_UVERBS_QP_CREATE_PCI_WRITE_END_PADDING,
1139
1140 IB_QP_CREATE_RESERVED_START = 1 << 26,
1141 IB_QP_CREATE_RESERVED_END = 1 << 31,
1142};
1143
1144
1145
1146
1147
1148
1149struct ib_qp_init_attr {
1150
1151 void (*event_handler)(struct ib_event *, void *);
1152
1153 void *qp_context;
1154 struct ib_cq *send_cq;
1155 struct ib_cq *recv_cq;
1156 struct ib_srq *srq;
1157 struct ib_xrcd *xrcd;
1158 struct ib_qp_cap cap;
1159 enum ib_sig_type sq_sig_type;
1160 enum ib_qp_type qp_type;
1161 u32 create_flags;
1162
1163
1164
1165
1166 u32 port_num;
1167 struct ib_rwq_ind_table *rwq_ind_tbl;
1168 u32 source_qpn;
1169};
1170
1171struct ib_qp_open_attr {
1172 void (*event_handler)(struct ib_event *, void *);
1173 void *qp_context;
1174 u32 qp_num;
1175 enum ib_qp_type qp_type;
1176};
1177
1178enum ib_rnr_timeout {
1179 IB_RNR_TIMER_655_36 = 0,
1180 IB_RNR_TIMER_000_01 = 1,
1181 IB_RNR_TIMER_000_02 = 2,
1182 IB_RNR_TIMER_000_03 = 3,
1183 IB_RNR_TIMER_000_04 = 4,
1184 IB_RNR_TIMER_000_06 = 5,
1185 IB_RNR_TIMER_000_08 = 6,
1186 IB_RNR_TIMER_000_12 = 7,
1187 IB_RNR_TIMER_000_16 = 8,
1188 IB_RNR_TIMER_000_24 = 9,
1189 IB_RNR_TIMER_000_32 = 10,
1190 IB_RNR_TIMER_000_48 = 11,
1191 IB_RNR_TIMER_000_64 = 12,
1192 IB_RNR_TIMER_000_96 = 13,
1193 IB_RNR_TIMER_001_28 = 14,
1194 IB_RNR_TIMER_001_92 = 15,
1195 IB_RNR_TIMER_002_56 = 16,
1196 IB_RNR_TIMER_003_84 = 17,
1197 IB_RNR_TIMER_005_12 = 18,
1198 IB_RNR_TIMER_007_68 = 19,
1199 IB_RNR_TIMER_010_24 = 20,
1200 IB_RNR_TIMER_015_36 = 21,
1201 IB_RNR_TIMER_020_48 = 22,
1202 IB_RNR_TIMER_030_72 = 23,
1203 IB_RNR_TIMER_040_96 = 24,
1204 IB_RNR_TIMER_061_44 = 25,
1205 IB_RNR_TIMER_081_92 = 26,
1206 IB_RNR_TIMER_122_88 = 27,
1207 IB_RNR_TIMER_163_84 = 28,
1208 IB_RNR_TIMER_245_76 = 29,
1209 IB_RNR_TIMER_327_68 = 30,
1210 IB_RNR_TIMER_491_52 = 31
1211};
1212
1213enum ib_qp_attr_mask {
1214 IB_QP_STATE = 1,
1215 IB_QP_CUR_STATE = (1<<1),
1216 IB_QP_EN_SQD_ASYNC_NOTIFY = (1<<2),
1217 IB_QP_ACCESS_FLAGS = (1<<3),
1218 IB_QP_PKEY_INDEX = (1<<4),
1219 IB_QP_PORT = (1<<5),
1220 IB_QP_QKEY = (1<<6),
1221 IB_QP_AV = (1<<7),
1222 IB_QP_PATH_MTU = (1<<8),
1223 IB_QP_TIMEOUT = (1<<9),
1224 IB_QP_RETRY_CNT = (1<<10),
1225 IB_QP_RNR_RETRY = (1<<11),
1226 IB_QP_RQ_PSN = (1<<12),
1227 IB_QP_MAX_QP_RD_ATOMIC = (1<<13),
1228 IB_QP_ALT_PATH = (1<<14),
1229 IB_QP_MIN_RNR_TIMER = (1<<15),
1230 IB_QP_SQ_PSN = (1<<16),
1231 IB_QP_MAX_DEST_RD_ATOMIC = (1<<17),
1232 IB_QP_PATH_MIG_STATE = (1<<18),
1233 IB_QP_CAP = (1<<19),
1234 IB_QP_DEST_QPN = (1<<20),
1235 IB_QP_RESERVED1 = (1<<21),
1236 IB_QP_RESERVED2 = (1<<22),
1237 IB_QP_RESERVED3 = (1<<23),
1238 IB_QP_RESERVED4 = (1<<24),
1239 IB_QP_RATE_LIMIT = (1<<25),
1240
1241 IB_QP_ATTR_STANDARD_BITS = GENMASK(20, 0),
1242};
1243
1244enum ib_qp_state {
1245 IB_QPS_RESET,
1246 IB_QPS_INIT,
1247 IB_QPS_RTR,
1248 IB_QPS_RTS,
1249 IB_QPS_SQD,
1250 IB_QPS_SQE,
1251 IB_QPS_ERR
1252};
1253
1254enum ib_mig_state {
1255 IB_MIG_MIGRATED,
1256 IB_MIG_REARM,
1257 IB_MIG_ARMED
1258};
1259
1260enum ib_mw_type {
1261 IB_MW_TYPE_1 = 1,
1262 IB_MW_TYPE_2 = 2
1263};
1264
1265struct ib_qp_attr {
1266 enum ib_qp_state qp_state;
1267 enum ib_qp_state cur_qp_state;
1268 enum ib_mtu path_mtu;
1269 enum ib_mig_state path_mig_state;
1270 u32 qkey;
1271 u32 rq_psn;
1272 u32 sq_psn;
1273 u32 dest_qp_num;
1274 int qp_access_flags;
1275 struct ib_qp_cap cap;
1276 struct rdma_ah_attr ah_attr;
1277 struct rdma_ah_attr alt_ah_attr;
1278 u16 pkey_index;
1279 u16 alt_pkey_index;
1280 u8 en_sqd_async_notify;
1281 u8 sq_draining;
1282 u8 max_rd_atomic;
1283 u8 max_dest_rd_atomic;
1284 u8 min_rnr_timer;
1285 u32 port_num;
1286 u8 timeout;
1287 u8 retry_cnt;
1288 u8 rnr_retry;
1289 u32 alt_port_num;
1290 u8 alt_timeout;
1291 u32 rate_limit;
1292 struct net_device *xmit_slave;
1293};
1294
1295enum ib_wr_opcode {
1296
1297 IB_WR_RDMA_WRITE = IB_UVERBS_WR_RDMA_WRITE,
1298 IB_WR_RDMA_WRITE_WITH_IMM = IB_UVERBS_WR_RDMA_WRITE_WITH_IMM,
1299 IB_WR_SEND = IB_UVERBS_WR_SEND,
1300 IB_WR_SEND_WITH_IMM = IB_UVERBS_WR_SEND_WITH_IMM,
1301 IB_WR_RDMA_READ = IB_UVERBS_WR_RDMA_READ,
1302 IB_WR_ATOMIC_CMP_AND_SWP = IB_UVERBS_WR_ATOMIC_CMP_AND_SWP,
1303 IB_WR_ATOMIC_FETCH_AND_ADD = IB_UVERBS_WR_ATOMIC_FETCH_AND_ADD,
1304 IB_WR_BIND_MW = IB_UVERBS_WR_BIND_MW,
1305 IB_WR_LSO = IB_UVERBS_WR_TSO,
1306 IB_WR_SEND_WITH_INV = IB_UVERBS_WR_SEND_WITH_INV,
1307 IB_WR_RDMA_READ_WITH_INV = IB_UVERBS_WR_RDMA_READ_WITH_INV,
1308 IB_WR_LOCAL_INV = IB_UVERBS_WR_LOCAL_INV,
1309 IB_WR_MASKED_ATOMIC_CMP_AND_SWP =
1310 IB_UVERBS_WR_MASKED_ATOMIC_CMP_AND_SWP,
1311 IB_WR_MASKED_ATOMIC_FETCH_AND_ADD =
1312 IB_UVERBS_WR_MASKED_ATOMIC_FETCH_AND_ADD,
1313
1314
1315 IB_WR_REG_MR = 0x20,
1316 IB_WR_REG_MR_INTEGRITY,
1317
1318
1319
1320
1321 IB_WR_RESERVED1 = 0xf0,
1322 IB_WR_RESERVED2,
1323 IB_WR_RESERVED3,
1324 IB_WR_RESERVED4,
1325 IB_WR_RESERVED5,
1326 IB_WR_RESERVED6,
1327 IB_WR_RESERVED7,
1328 IB_WR_RESERVED8,
1329 IB_WR_RESERVED9,
1330 IB_WR_RESERVED10,
1331};
1332
1333enum ib_send_flags {
1334 IB_SEND_FENCE = 1,
1335 IB_SEND_SIGNALED = (1<<1),
1336 IB_SEND_SOLICITED = (1<<2),
1337 IB_SEND_INLINE = (1<<3),
1338 IB_SEND_IP_CSUM = (1<<4),
1339
1340
1341 IB_SEND_RESERVED_START = (1 << 26),
1342 IB_SEND_RESERVED_END = (1 << 31),
1343};
1344
1345struct ib_sge {
1346 u64 addr;
1347 u32 length;
1348 u32 lkey;
1349};
1350
1351struct ib_cqe {
1352 void (*done)(struct ib_cq *cq, struct ib_wc *wc);
1353};
1354
1355struct ib_send_wr {
1356 struct ib_send_wr *next;
1357 union {
1358 u64 wr_id;
1359 struct ib_cqe *wr_cqe;
1360 };
1361 struct ib_sge *sg_list;
1362 int num_sge;
1363 enum ib_wr_opcode opcode;
1364 int send_flags;
1365 union {
1366 __be32 imm_data;
1367 u32 invalidate_rkey;
1368 } ex;
1369};
1370
1371struct ib_rdma_wr {
1372 struct ib_send_wr wr;
1373 u64 remote_addr;
1374 u32 rkey;
1375};
1376
1377static inline const struct ib_rdma_wr *rdma_wr(const struct ib_send_wr *wr)
1378{
1379 return container_of(wr, struct ib_rdma_wr, wr);
1380}
1381
1382struct ib_atomic_wr {
1383 struct ib_send_wr wr;
1384 u64 remote_addr;
1385 u64 compare_add;
1386 u64 swap;
1387 u64 compare_add_mask;
1388 u64 swap_mask;
1389 u32 rkey;
1390};
1391
1392static inline const struct ib_atomic_wr *atomic_wr(const struct ib_send_wr *wr)
1393{
1394 return container_of(wr, struct ib_atomic_wr, wr);
1395}
1396
1397struct ib_ud_wr {
1398 struct ib_send_wr wr;
1399 struct ib_ah *ah;
1400 void *header;
1401 int hlen;
1402 int mss;
1403 u32 remote_qpn;
1404 u32 remote_qkey;
1405 u16 pkey_index;
1406 u32 port_num;
1407};
1408
1409static inline const struct ib_ud_wr *ud_wr(const struct ib_send_wr *wr)
1410{
1411 return container_of(wr, struct ib_ud_wr, wr);
1412}
1413
1414struct ib_reg_wr {
1415 struct ib_send_wr wr;
1416 struct ib_mr *mr;
1417 u32 key;
1418 int access;
1419};
1420
1421static inline const struct ib_reg_wr *reg_wr(const struct ib_send_wr *wr)
1422{
1423 return container_of(wr, struct ib_reg_wr, wr);
1424}
1425
1426struct ib_recv_wr {
1427 struct ib_recv_wr *next;
1428 union {
1429 u64 wr_id;
1430 struct ib_cqe *wr_cqe;
1431 };
1432 struct ib_sge *sg_list;
1433 int num_sge;
1434};
1435
1436enum ib_access_flags {
1437 IB_ACCESS_LOCAL_WRITE = IB_UVERBS_ACCESS_LOCAL_WRITE,
1438 IB_ACCESS_REMOTE_WRITE = IB_UVERBS_ACCESS_REMOTE_WRITE,
1439 IB_ACCESS_REMOTE_READ = IB_UVERBS_ACCESS_REMOTE_READ,
1440 IB_ACCESS_REMOTE_ATOMIC = IB_UVERBS_ACCESS_REMOTE_ATOMIC,
1441 IB_ACCESS_MW_BIND = IB_UVERBS_ACCESS_MW_BIND,
1442 IB_ZERO_BASED = IB_UVERBS_ACCESS_ZERO_BASED,
1443 IB_ACCESS_ON_DEMAND = IB_UVERBS_ACCESS_ON_DEMAND,
1444 IB_ACCESS_HUGETLB = IB_UVERBS_ACCESS_HUGETLB,
1445 IB_ACCESS_RELAXED_ORDERING = IB_UVERBS_ACCESS_RELAXED_ORDERING,
1446
1447 IB_ACCESS_OPTIONAL = IB_UVERBS_ACCESS_OPTIONAL_RANGE,
1448 IB_ACCESS_SUPPORTED =
1449 ((IB_ACCESS_HUGETLB << 1) - 1) | IB_ACCESS_OPTIONAL,
1450};
1451
1452
1453
1454
1455
1456enum ib_mr_rereg_flags {
1457 IB_MR_REREG_TRANS = 1,
1458 IB_MR_REREG_PD = (1<<1),
1459 IB_MR_REREG_ACCESS = (1<<2),
1460 IB_MR_REREG_SUPPORTED = ((IB_MR_REREG_ACCESS << 1) - 1)
1461};
1462
1463struct ib_umem;
1464
1465enum rdma_remove_reason {
1466
1467
1468
1469
1470 RDMA_REMOVE_DESTROY,
1471
1472 RDMA_REMOVE_CLOSE,
1473
1474 RDMA_REMOVE_DRIVER_REMOVE,
1475
1476 RDMA_REMOVE_ABORT,
1477
1478 RDMA_REMOVE_DRIVER_FAILURE,
1479};
1480
1481struct ib_rdmacg_object {
1482#ifdef CONFIG_CGROUP_RDMA
1483 struct rdma_cgroup *cg;
1484#endif
1485};
1486
1487struct ib_ucontext {
1488 struct ib_device *device;
1489 struct ib_uverbs_file *ufile;
1490
1491 struct ib_rdmacg_object cg_obj;
1492
1493
1494
1495 struct rdma_restrack_entry res;
1496 struct xarray mmap_xa;
1497};
1498
1499struct ib_uobject {
1500 u64 user_handle;
1501
1502 struct ib_uverbs_file *ufile;
1503
1504 struct ib_ucontext *context;
1505 void *object;
1506 struct list_head list;
1507 struct ib_rdmacg_object cg_obj;
1508 int id;
1509 struct kref ref;
1510 atomic_t usecnt;
1511 struct rcu_head rcu;
1512
1513 const struct uverbs_api_object *uapi_object;
1514};
1515
1516struct ib_udata {
1517 const void __user *inbuf;
1518 void __user *outbuf;
1519 size_t inlen;
1520 size_t outlen;
1521};
1522
1523struct ib_pd {
1524 u32 local_dma_lkey;
1525 u32 flags;
1526 struct ib_device *device;
1527 struct ib_uobject *uobject;
1528 atomic_t usecnt;
1529
1530 u32 unsafe_global_rkey;
1531
1532
1533
1534
1535 struct ib_mr *__internal_mr;
1536 struct rdma_restrack_entry res;
1537};
1538
1539struct ib_xrcd {
1540 struct ib_device *device;
1541 atomic_t usecnt;
1542 struct inode *inode;
1543 struct rw_semaphore tgt_qps_rwsem;
1544 struct xarray tgt_qps;
1545};
1546
1547struct ib_ah {
1548 struct ib_device *device;
1549 struct ib_pd *pd;
1550 struct ib_uobject *uobject;
1551 const struct ib_gid_attr *sgid_attr;
1552 enum rdma_ah_attr_type type;
1553};
1554
1555typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context);
1556
1557enum ib_poll_context {
1558 IB_POLL_SOFTIRQ,
1559 IB_POLL_WORKQUEUE,
1560 IB_POLL_UNBOUND_WORKQUEUE,
1561 IB_POLL_LAST_POOL_TYPE = IB_POLL_UNBOUND_WORKQUEUE,
1562
1563 IB_POLL_DIRECT,
1564};
1565
1566struct ib_cq {
1567 struct ib_device *device;
1568 struct ib_ucq_object *uobject;
1569 ib_comp_handler comp_handler;
1570 void (*event_handler)(struct ib_event *, void *);
1571 void *cq_context;
1572 int cqe;
1573 unsigned int cqe_used;
1574 atomic_t usecnt;
1575 enum ib_poll_context poll_ctx;
1576 struct ib_wc *wc;
1577 struct list_head pool_entry;
1578 union {
1579 struct irq_poll iop;
1580 struct work_struct work;
1581 };
1582 struct workqueue_struct *comp_wq;
1583 struct dim *dim;
1584
1585
1586 ktime_t timestamp;
1587 u8 interrupt:1;
1588 u8 shared:1;
1589 unsigned int comp_vector;
1590
1591
1592
1593
1594 struct rdma_restrack_entry res;
1595};
1596
1597struct ib_srq {
1598 struct ib_device *device;
1599 struct ib_pd *pd;
1600 struct ib_usrq_object *uobject;
1601 void (*event_handler)(struct ib_event *, void *);
1602 void *srq_context;
1603 enum ib_srq_type srq_type;
1604 atomic_t usecnt;
1605
1606 struct {
1607 struct ib_cq *cq;
1608 union {
1609 struct {
1610 struct ib_xrcd *xrcd;
1611 u32 srq_num;
1612 } xrc;
1613 };
1614 } ext;
1615
1616
1617
1618
1619 struct rdma_restrack_entry res;
1620};
1621
1622enum ib_raw_packet_caps {
1623
1624
1625
1626 IB_RAW_PACKET_CAP_CVLAN_STRIPPING = (1 << 0),
1627
1628
1629 IB_RAW_PACKET_CAP_SCATTER_FCS = (1 << 1),
1630
1631 IB_RAW_PACKET_CAP_IP_CSUM = (1 << 2),
1632
1633
1634
1635 IB_RAW_PACKET_CAP_DELAY_DROP = (1 << 3),
1636};
1637
1638enum ib_wq_type {
1639 IB_WQT_RQ = IB_UVERBS_WQT_RQ,
1640};
1641
1642enum ib_wq_state {
1643 IB_WQS_RESET,
1644 IB_WQS_RDY,
1645 IB_WQS_ERR
1646};
1647
1648struct ib_wq {
1649 struct ib_device *device;
1650 struct ib_uwq_object *uobject;
1651 void *wq_context;
1652 void (*event_handler)(struct ib_event *, void *);
1653 struct ib_pd *pd;
1654 struct ib_cq *cq;
1655 u32 wq_num;
1656 enum ib_wq_state state;
1657 enum ib_wq_type wq_type;
1658 atomic_t usecnt;
1659};
1660
1661enum ib_wq_flags {
1662 IB_WQ_FLAGS_CVLAN_STRIPPING = IB_UVERBS_WQ_FLAGS_CVLAN_STRIPPING,
1663 IB_WQ_FLAGS_SCATTER_FCS = IB_UVERBS_WQ_FLAGS_SCATTER_FCS,
1664 IB_WQ_FLAGS_DELAY_DROP = IB_UVERBS_WQ_FLAGS_DELAY_DROP,
1665 IB_WQ_FLAGS_PCI_WRITE_END_PADDING =
1666 IB_UVERBS_WQ_FLAGS_PCI_WRITE_END_PADDING,
1667};
1668
1669struct ib_wq_init_attr {
1670 void *wq_context;
1671 enum ib_wq_type wq_type;
1672 u32 max_wr;
1673 u32 max_sge;
1674 struct ib_cq *cq;
1675 void (*event_handler)(struct ib_event *, void *);
1676 u32 create_flags;
1677};
1678
1679enum ib_wq_attr_mask {
1680 IB_WQ_STATE = 1 << 0,
1681 IB_WQ_CUR_STATE = 1 << 1,
1682 IB_WQ_FLAGS = 1 << 2,
1683};
1684
1685struct ib_wq_attr {
1686 enum ib_wq_state wq_state;
1687 enum ib_wq_state curr_wq_state;
1688 u32 flags;
1689 u32 flags_mask;
1690};
1691
1692struct ib_rwq_ind_table {
1693 struct ib_device *device;
1694 struct ib_uobject *uobject;
1695 atomic_t usecnt;
1696 u32 ind_tbl_num;
1697 u32 log_ind_tbl_size;
1698 struct ib_wq **ind_tbl;
1699};
1700
1701struct ib_rwq_ind_table_init_attr {
1702 u32 log_ind_tbl_size;
1703
1704 struct ib_wq **ind_tbl;
1705};
1706
1707enum port_pkey_state {
1708 IB_PORT_PKEY_NOT_VALID = 0,
1709 IB_PORT_PKEY_VALID = 1,
1710 IB_PORT_PKEY_LISTED = 2,
1711};
1712
1713struct ib_qp_security;
1714
1715struct ib_port_pkey {
1716 enum port_pkey_state state;
1717 u16 pkey_index;
1718 u32 port_num;
1719 struct list_head qp_list;
1720 struct list_head to_error_list;
1721 struct ib_qp_security *sec;
1722};
1723
1724struct ib_ports_pkeys {
1725 struct ib_port_pkey main;
1726 struct ib_port_pkey alt;
1727};
1728
1729struct ib_qp_security {
1730 struct ib_qp *qp;
1731 struct ib_device *dev;
1732
1733 struct mutex mutex;
1734 struct ib_ports_pkeys *ports_pkeys;
1735
1736
1737
1738 struct list_head shared_qp_list;
1739 void *security;
1740 bool destroying;
1741 atomic_t error_list_count;
1742 struct completion error_complete;
1743 int error_comps_pending;
1744};
1745
1746
1747
1748
1749
1750struct ib_qp {
1751 struct ib_device *device;
1752 struct ib_pd *pd;
1753 struct ib_cq *send_cq;
1754 struct ib_cq *recv_cq;
1755 spinlock_t mr_lock;
1756 int mrs_used;
1757 struct list_head rdma_mrs;
1758 struct list_head sig_mrs;
1759 struct ib_srq *srq;
1760 struct ib_xrcd *xrcd;
1761 struct list_head xrcd_list;
1762
1763
1764 atomic_t usecnt;
1765 struct list_head open_list;
1766 struct ib_qp *real_qp;
1767 struct ib_uqp_object *uobject;
1768 void (*event_handler)(struct ib_event *, void *);
1769 void *qp_context;
1770
1771 const struct ib_gid_attr *av_sgid_attr;
1772 const struct ib_gid_attr *alt_path_sgid_attr;
1773 u32 qp_num;
1774 u32 max_write_sge;
1775 u32 max_read_sge;
1776 enum ib_qp_type qp_type;
1777 struct ib_rwq_ind_table *rwq_ind_tbl;
1778 struct ib_qp_security *qp_sec;
1779 u32 port;
1780
1781 bool integrity_en;
1782
1783
1784
1785 struct rdma_restrack_entry res;
1786
1787
1788 struct rdma_counter *counter;
1789};
1790
1791struct ib_dm {
1792 struct ib_device *device;
1793 u32 length;
1794 u32 flags;
1795 struct ib_uobject *uobject;
1796 atomic_t usecnt;
1797};
1798
1799struct ib_mr {
1800 struct ib_device *device;
1801 struct ib_pd *pd;
1802 u32 lkey;
1803 u32 rkey;
1804 u64 iova;
1805 u64 length;
1806 unsigned int page_size;
1807 enum ib_mr_type type;
1808 bool need_inval;
1809 union {
1810 struct ib_uobject *uobject;
1811 struct list_head qp_entry;
1812 };
1813
1814 struct ib_dm *dm;
1815 struct ib_sig_attrs *sig_attrs;
1816
1817
1818
1819 struct rdma_restrack_entry res;
1820};
1821
1822struct ib_mw {
1823 struct ib_device *device;
1824 struct ib_pd *pd;
1825 struct ib_uobject *uobject;
1826 u32 rkey;
1827 enum ib_mw_type type;
1828};
1829
1830
1831enum ib_flow_attr_type {
1832
1833 IB_FLOW_ATTR_NORMAL = 0x0,
1834
1835
1836
1837 IB_FLOW_ATTR_ALL_DEFAULT = 0x1,
1838
1839
1840
1841 IB_FLOW_ATTR_MC_DEFAULT = 0x2,
1842
1843 IB_FLOW_ATTR_SNIFFER = 0x3
1844};
1845
1846
1847enum ib_flow_spec_type {
1848
1849 IB_FLOW_SPEC_ETH = 0x20,
1850 IB_FLOW_SPEC_IB = 0x22,
1851
1852 IB_FLOW_SPEC_IPV4 = 0x30,
1853 IB_FLOW_SPEC_IPV6 = 0x31,
1854 IB_FLOW_SPEC_ESP = 0x34,
1855
1856 IB_FLOW_SPEC_TCP = 0x40,
1857 IB_FLOW_SPEC_UDP = 0x41,
1858 IB_FLOW_SPEC_VXLAN_TUNNEL = 0x50,
1859 IB_FLOW_SPEC_GRE = 0x51,
1860 IB_FLOW_SPEC_MPLS = 0x60,
1861 IB_FLOW_SPEC_INNER = 0x100,
1862
1863 IB_FLOW_SPEC_ACTION_TAG = 0x1000,
1864 IB_FLOW_SPEC_ACTION_DROP = 0x1001,
1865 IB_FLOW_SPEC_ACTION_HANDLE = 0x1002,
1866 IB_FLOW_SPEC_ACTION_COUNT = 0x1003,
1867};
1868#define IB_FLOW_SPEC_LAYER_MASK 0xF0
1869#define IB_FLOW_SPEC_SUPPORT_LAYERS 10
1870
1871enum ib_flow_flags {
1872 IB_FLOW_ATTR_FLAGS_DONT_TRAP = 1UL << 1,
1873 IB_FLOW_ATTR_FLAGS_EGRESS = 1UL << 2,
1874 IB_FLOW_ATTR_FLAGS_RESERVED = 1UL << 3
1875};
1876
1877struct ib_flow_eth_filter {
1878 u8 dst_mac[6];
1879 u8 src_mac[6];
1880 __be16 ether_type;
1881 __be16 vlan_tag;
1882
1883 u8 real_sz[];
1884};
1885
1886struct ib_flow_spec_eth {
1887 u32 type;
1888 u16 size;
1889 struct ib_flow_eth_filter val;
1890 struct ib_flow_eth_filter mask;
1891};
1892
1893struct ib_flow_ib_filter {
1894 __be16 dlid;
1895 __u8 sl;
1896
1897 u8 real_sz[];
1898};
1899
1900struct ib_flow_spec_ib {
1901 u32 type;
1902 u16 size;
1903 struct ib_flow_ib_filter val;
1904 struct ib_flow_ib_filter mask;
1905};
1906
1907
1908enum ib_ipv4_flags {
1909 IB_IPV4_DONT_FRAG = 0x2,
1910 IB_IPV4_MORE_FRAG = 0X4
1911
1912};
1913
1914struct ib_flow_ipv4_filter {
1915 __be32 src_ip;
1916 __be32 dst_ip;
1917 u8 proto;
1918 u8 tos;
1919 u8 ttl;
1920 u8 flags;
1921
1922 u8 real_sz[];
1923};
1924
1925struct ib_flow_spec_ipv4 {
1926 u32 type;
1927 u16 size;
1928 struct ib_flow_ipv4_filter val;
1929 struct ib_flow_ipv4_filter mask;
1930};
1931
1932struct ib_flow_ipv6_filter {
1933 u8 src_ip[16];
1934 u8 dst_ip[16];
1935 __be32 flow_label;
1936 u8 next_hdr;
1937 u8 traffic_class;
1938 u8 hop_limit;
1939
1940 u8 real_sz[];
1941};
1942
1943struct ib_flow_spec_ipv6 {
1944 u32 type;
1945 u16 size;
1946 struct ib_flow_ipv6_filter val;
1947 struct ib_flow_ipv6_filter mask;
1948};
1949
1950struct ib_flow_tcp_udp_filter {
1951 __be16 dst_port;
1952 __be16 src_port;
1953
1954 u8 real_sz[];
1955};
1956
1957struct ib_flow_spec_tcp_udp {
1958 u32 type;
1959 u16 size;
1960 struct ib_flow_tcp_udp_filter val;
1961 struct ib_flow_tcp_udp_filter mask;
1962};
1963
1964struct ib_flow_tunnel_filter {
1965 __be32 tunnel_id;
1966 u8 real_sz[];
1967};
1968
1969
1970
1971
1972struct ib_flow_spec_tunnel {
1973 u32 type;
1974 u16 size;
1975 struct ib_flow_tunnel_filter val;
1976 struct ib_flow_tunnel_filter mask;
1977};
1978
1979struct ib_flow_esp_filter {
1980 __be32 spi;
1981 __be32 seq;
1982
1983 u8 real_sz[];
1984};
1985
1986struct ib_flow_spec_esp {
1987 u32 type;
1988 u16 size;
1989 struct ib_flow_esp_filter val;
1990 struct ib_flow_esp_filter mask;
1991};
1992
1993struct ib_flow_gre_filter {
1994 __be16 c_ks_res0_ver;
1995 __be16 protocol;
1996 __be32 key;
1997
1998 u8 real_sz[];
1999};
2000
2001struct ib_flow_spec_gre {
2002 u32 type;
2003 u16 size;
2004 struct ib_flow_gre_filter val;
2005 struct ib_flow_gre_filter mask;
2006};
2007
2008struct ib_flow_mpls_filter {
2009 __be32 tag;
2010
2011 u8 real_sz[];
2012};
2013
2014struct ib_flow_spec_mpls {
2015 u32 type;
2016 u16 size;
2017 struct ib_flow_mpls_filter val;
2018 struct ib_flow_mpls_filter mask;
2019};
2020
2021struct ib_flow_spec_action_tag {
2022 enum ib_flow_spec_type type;
2023 u16 size;
2024 u32 tag_id;
2025};
2026
2027struct ib_flow_spec_action_drop {
2028 enum ib_flow_spec_type type;
2029 u16 size;
2030};
2031
2032struct ib_flow_spec_action_handle {
2033 enum ib_flow_spec_type type;
2034 u16 size;
2035 struct ib_flow_action *act;
2036};
2037
2038enum ib_counters_description {
2039 IB_COUNTER_PACKETS,
2040 IB_COUNTER_BYTES,
2041};
2042
2043struct ib_flow_spec_action_count {
2044 enum ib_flow_spec_type type;
2045 u16 size;
2046 struct ib_counters *counters;
2047};
2048
2049union ib_flow_spec {
2050 struct {
2051 u32 type;
2052 u16 size;
2053 };
2054 struct ib_flow_spec_eth eth;
2055 struct ib_flow_spec_ib ib;
2056 struct ib_flow_spec_ipv4 ipv4;
2057 struct ib_flow_spec_tcp_udp tcp_udp;
2058 struct ib_flow_spec_ipv6 ipv6;
2059 struct ib_flow_spec_tunnel tunnel;
2060 struct ib_flow_spec_esp esp;
2061 struct ib_flow_spec_gre gre;
2062 struct ib_flow_spec_mpls mpls;
2063 struct ib_flow_spec_action_tag flow_tag;
2064 struct ib_flow_spec_action_drop drop;
2065 struct ib_flow_spec_action_handle action;
2066 struct ib_flow_spec_action_count flow_count;
2067};
2068
2069struct ib_flow_attr {
2070 enum ib_flow_attr_type type;
2071 u16 size;
2072 u16 priority;
2073 u32 flags;
2074 u8 num_of_specs;
2075 u32 port;
2076 union ib_flow_spec flows[];
2077};
2078
2079struct ib_flow {
2080 struct ib_qp *qp;
2081 struct ib_device *device;
2082 struct ib_uobject *uobject;
2083};
2084
2085enum ib_flow_action_type {
2086 IB_FLOW_ACTION_UNSPECIFIED,
2087 IB_FLOW_ACTION_ESP = 1,
2088};
2089
2090struct ib_flow_action_attrs_esp_keymats {
2091 enum ib_uverbs_flow_action_esp_keymat protocol;
2092 union {
2093 struct ib_uverbs_flow_action_esp_keymat_aes_gcm aes_gcm;
2094 } keymat;
2095};
2096
2097struct ib_flow_action_attrs_esp_replays {
2098 enum ib_uverbs_flow_action_esp_replay protocol;
2099 union {
2100 struct ib_uverbs_flow_action_esp_replay_bmp bmp;
2101 } replay;
2102};
2103
2104enum ib_flow_action_attrs_esp_flags {
2105
2106
2107
2108
2109
2110
2111 IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED = 1ULL << 32,
2112 IB_FLOW_ACTION_ESP_FLAGS_MOD_ESP_ATTRS = 1ULL << 33,
2113};
2114
2115struct ib_flow_spec_list {
2116 struct ib_flow_spec_list *next;
2117 union ib_flow_spec spec;
2118};
2119
2120struct ib_flow_action_attrs_esp {
2121 struct ib_flow_action_attrs_esp_keymats *keymat;
2122 struct ib_flow_action_attrs_esp_replays *replay;
2123 struct ib_flow_spec_list *encap;
2124
2125
2126
2127 u32 esn;
2128 u32 spi;
2129 u32 seq;
2130 u32 tfc_pad;
2131
2132 u64 flags;
2133 u64 hard_limit_pkts;
2134};
2135
2136struct ib_flow_action {
2137 struct ib_device *device;
2138 struct ib_uobject *uobject;
2139 enum ib_flow_action_type type;
2140 atomic_t usecnt;
2141};
2142
2143struct ib_mad;
2144
2145enum ib_process_mad_flags {
2146 IB_MAD_IGNORE_MKEY = 1,
2147 IB_MAD_IGNORE_BKEY = 2,
2148 IB_MAD_IGNORE_ALL = IB_MAD_IGNORE_MKEY | IB_MAD_IGNORE_BKEY
2149};
2150
2151enum ib_mad_result {
2152 IB_MAD_RESULT_FAILURE = 0,
2153 IB_MAD_RESULT_SUCCESS = 1 << 0,
2154 IB_MAD_RESULT_REPLY = 1 << 1,
2155 IB_MAD_RESULT_CONSUMED = 1 << 2
2156};
2157
2158struct ib_port_cache {
2159 u64 subnet_prefix;
2160 struct ib_pkey_cache *pkey;
2161 struct ib_gid_table *gid;
2162 u8 lmc;
2163 enum ib_port_state port_state;
2164};
2165
2166struct ib_port_immutable {
2167 int pkey_tbl_len;
2168 int gid_tbl_len;
2169 u32 core_cap_flags;
2170 u32 max_mad_size;
2171};
2172
2173struct ib_port_data {
2174 struct ib_device *ib_dev;
2175
2176 struct ib_port_immutable immutable;
2177
2178 spinlock_t pkey_list_lock;
2179
2180 spinlock_t netdev_lock;
2181
2182 struct list_head pkey_list;
2183
2184 struct ib_port_cache cache;
2185
2186 struct net_device __rcu *netdev;
2187 struct hlist_node ndev_hash_link;
2188 struct rdma_port_counter port_counter;
2189 struct ib_port *sysfs;
2190};
2191
2192
2193enum rdma_netdev_t {
2194 RDMA_NETDEV_OPA_VNIC,
2195 RDMA_NETDEV_IPOIB,
2196};
2197
2198
2199
2200
2201
2202struct rdma_netdev {
2203 void *clnt_priv;
2204 struct ib_device *hca;
2205 u32 port_num;
2206 int mtu;
2207
2208
2209
2210
2211
2212
2213 void (*free_rdma_netdev)(struct net_device *netdev);
2214
2215
2216 void (*set_id)(struct net_device *netdev, int id);
2217
2218 int (*send)(struct net_device *dev, struct sk_buff *skb,
2219 struct ib_ah *address, u32 dqpn);
2220
2221 int (*attach_mcast)(struct net_device *dev, struct ib_device *hca,
2222 union ib_gid *gid, u16 mlid,
2223 int set_qkey, u32 qkey);
2224 int (*detach_mcast)(struct net_device *dev, struct ib_device *hca,
2225 union ib_gid *gid, u16 mlid);
2226
2227 void (*tx_timeout)(struct net_device *dev, unsigned int txqueue);
2228};
2229
2230struct rdma_netdev_alloc_params {
2231 size_t sizeof_priv;
2232 unsigned int txqs;
2233 unsigned int rxqs;
2234 void *param;
2235
2236 int (*initialize_rdma_netdev)(struct ib_device *device, u32 port_num,
2237 struct net_device *netdev, void *param);
2238};
2239
2240struct ib_odp_counters {
2241 atomic64_t faults;
2242 atomic64_t invalidations;
2243 atomic64_t prefetch;
2244};
2245
2246struct ib_counters {
2247 struct ib_device *device;
2248 struct ib_uobject *uobject;
2249
2250 atomic_t usecnt;
2251};
2252
2253struct ib_counters_read_attr {
2254 u64 *counters_buff;
2255 u32 ncounters;
2256 u32 flags;
2257};
2258
2259struct uverbs_attr_bundle;
2260struct iw_cm_id;
2261struct iw_cm_conn_param;
2262
2263#define INIT_RDMA_OBJ_SIZE(ib_struct, drv_struct, member) \
2264 .size_##ib_struct = \
2265 (sizeof(struct drv_struct) + \
2266 BUILD_BUG_ON_ZERO(offsetof(struct drv_struct, member)) + \
2267 BUILD_BUG_ON_ZERO( \
2268 !__same_type(((struct drv_struct *)NULL)->member, \
2269 struct ib_struct)))
2270
2271#define rdma_zalloc_drv_obj_gfp(ib_dev, ib_type, gfp) \
2272 ((struct ib_type *)rdma_zalloc_obj(ib_dev, ib_dev->ops.size_##ib_type, \
2273 gfp, false))
2274
2275#define rdma_zalloc_drv_obj_numa(ib_dev, ib_type) \
2276 ((struct ib_type *)rdma_zalloc_obj(ib_dev, ib_dev->ops.size_##ib_type, \
2277 GFP_KERNEL, true))
2278
2279#define rdma_zalloc_drv_obj(ib_dev, ib_type) \
2280 rdma_zalloc_drv_obj_gfp(ib_dev, ib_type, GFP_KERNEL)
2281
2282#define DECLARE_RDMA_OBJ_SIZE(ib_struct) size_t size_##ib_struct
2283
2284struct rdma_user_mmap_entry {
2285 struct kref ref;
2286 struct ib_ucontext *ucontext;
2287 unsigned long start_pgoff;
2288 size_t npages;
2289 bool driver_removed;
2290};
2291
2292
2293static inline u64
2294rdma_user_mmap_get_offset(const struct rdma_user_mmap_entry *entry)
2295{
2296 return (u64)entry->start_pgoff << PAGE_SHIFT;
2297}
2298
2299
2300
2301
2302
2303
2304struct ib_device_ops {
2305 struct module *owner;
2306 enum rdma_driver_id driver_id;
2307 u32 uverbs_abi_ver;
2308 unsigned int uverbs_no_driver_id_binding:1;
2309
2310
2311
2312
2313
2314
2315 const struct attribute_group *device_group;
2316 const struct attribute_group **port_groups;
2317
2318 int (*post_send)(struct ib_qp *qp, const struct ib_send_wr *send_wr,
2319 const struct ib_send_wr **bad_send_wr);
2320 int (*post_recv)(struct ib_qp *qp, const struct ib_recv_wr *recv_wr,
2321 const struct ib_recv_wr **bad_recv_wr);
2322 void (*drain_rq)(struct ib_qp *qp);
2323 void (*drain_sq)(struct ib_qp *qp);
2324 int (*poll_cq)(struct ib_cq *cq, int num_entries, struct ib_wc *wc);
2325 int (*peek_cq)(struct ib_cq *cq, int wc_cnt);
2326 int (*req_notify_cq)(struct ib_cq *cq, enum ib_cq_notify_flags flags);
2327 int (*post_srq_recv)(struct ib_srq *srq,
2328 const struct ib_recv_wr *recv_wr,
2329 const struct ib_recv_wr **bad_recv_wr);
2330 int (*process_mad)(struct ib_device *device, int process_mad_flags,
2331 u32 port_num, const struct ib_wc *in_wc,
2332 const struct ib_grh *in_grh,
2333 const struct ib_mad *in_mad, struct ib_mad *out_mad,
2334 size_t *out_mad_size, u16 *out_mad_pkey_index);
2335 int (*query_device)(struct ib_device *device,
2336 struct ib_device_attr *device_attr,
2337 struct ib_udata *udata);
2338 int (*modify_device)(struct ib_device *device, int device_modify_mask,
2339 struct ib_device_modify *device_modify);
2340 void (*get_dev_fw_str)(struct ib_device *device, char *str);
2341 const struct cpumask *(*get_vector_affinity)(struct ib_device *ibdev,
2342 int comp_vector);
2343 int (*query_port)(struct ib_device *device, u32 port_num,
2344 struct ib_port_attr *port_attr);
2345 int (*modify_port)(struct ib_device *device, u32 port_num,
2346 int port_modify_mask,
2347 struct ib_port_modify *port_modify);
2348
2349
2350
2351
2352
2353
2354 int (*get_port_immutable)(struct ib_device *device, u32 port_num,
2355 struct ib_port_immutable *immutable);
2356 enum rdma_link_layer (*get_link_layer)(struct ib_device *device,
2357 u32 port_num);
2358
2359
2360
2361
2362
2363
2364
2365
2366 struct net_device *(*get_netdev)(struct ib_device *device,
2367 u32 port_num);
2368
2369
2370
2371
2372
2373
2374 struct net_device *(*alloc_rdma_netdev)(
2375 struct ib_device *device, u32 port_num, enum rdma_netdev_t type,
2376 const char *name, unsigned char name_assign_type,
2377 void (*setup)(struct net_device *));
2378
2379 int (*rdma_netdev_get_params)(struct ib_device *device, u32 port_num,
2380 enum rdma_netdev_t type,
2381 struct rdma_netdev_alloc_params *params);
2382
2383
2384
2385
2386
2387 int (*query_gid)(struct ib_device *device, u32 port_num, int index,
2388 union ib_gid *gid);
2389
2390
2391
2392
2393
2394
2395
2396
2397
2398
2399
2400
2401
2402 int (*add_gid)(const struct ib_gid_attr *attr, void **context);
2403
2404
2405
2406
2407
2408
2409
2410
2411 int (*del_gid)(const struct ib_gid_attr *attr, void **context);
2412 int (*query_pkey)(struct ib_device *device, u32 port_num, u16 index,
2413 u16 *pkey);
2414 int (*alloc_ucontext)(struct ib_ucontext *context,
2415 struct ib_udata *udata);
2416 void (*dealloc_ucontext)(struct ib_ucontext *context);
2417 int (*mmap)(struct ib_ucontext *context, struct vm_area_struct *vma);
2418
2419
2420
2421
2422
2423
2424 void (*mmap_free)(struct rdma_user_mmap_entry *entry);
2425 void (*disassociate_ucontext)(struct ib_ucontext *ibcontext);
2426 int (*alloc_pd)(struct ib_pd *pd, struct ib_udata *udata);
2427 int (*dealloc_pd)(struct ib_pd *pd, struct ib_udata *udata);
2428 int (*create_ah)(struct ib_ah *ah, struct rdma_ah_init_attr *attr,
2429 struct ib_udata *udata);
2430 int (*create_user_ah)(struct ib_ah *ah, struct rdma_ah_init_attr *attr,
2431 struct ib_udata *udata);
2432 int (*modify_ah)(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
2433 int (*query_ah)(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
2434 int (*destroy_ah)(struct ib_ah *ah, u32 flags);
2435 int (*create_srq)(struct ib_srq *srq,
2436 struct ib_srq_init_attr *srq_init_attr,
2437 struct ib_udata *udata);
2438 int (*modify_srq)(struct ib_srq *srq, struct ib_srq_attr *srq_attr,
2439 enum ib_srq_attr_mask srq_attr_mask,
2440 struct ib_udata *udata);
2441 int (*query_srq)(struct ib_srq *srq, struct ib_srq_attr *srq_attr);
2442 int (*destroy_srq)(struct ib_srq *srq, struct ib_udata *udata);
2443 int (*create_qp)(struct ib_qp *qp, struct ib_qp_init_attr *qp_init_attr,
2444 struct ib_udata *udata);
2445 int (*modify_qp)(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
2446 int qp_attr_mask, struct ib_udata *udata);
2447 int (*query_qp)(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
2448 int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr);
2449 int (*destroy_qp)(struct ib_qp *qp, struct ib_udata *udata);
2450 int (*create_cq)(struct ib_cq *cq, const struct ib_cq_init_attr *attr,
2451 struct ib_udata *udata);
2452 int (*modify_cq)(struct ib_cq *cq, u16 cq_count, u16 cq_period);
2453 int (*destroy_cq)(struct ib_cq *cq, struct ib_udata *udata);
2454 int (*resize_cq)(struct ib_cq *cq, int cqe, struct ib_udata *udata);
2455 struct ib_mr *(*get_dma_mr)(struct ib_pd *pd, int mr_access_flags);
2456 struct ib_mr *(*reg_user_mr)(struct ib_pd *pd, u64 start, u64 length,
2457 u64 virt_addr, int mr_access_flags,
2458 struct ib_udata *udata);
2459 struct ib_mr *(*reg_user_mr_dmabuf)(struct ib_pd *pd, u64 offset,
2460 u64 length, u64 virt_addr, int fd,
2461 int mr_access_flags,
2462 struct ib_udata *udata);
2463 struct ib_mr *(*rereg_user_mr)(struct ib_mr *mr, int flags, u64 start,
2464 u64 length, u64 virt_addr,
2465 int mr_access_flags, struct ib_pd *pd,
2466 struct ib_udata *udata);
2467 int (*dereg_mr)(struct ib_mr *mr, struct ib_udata *udata);
2468 struct ib_mr *(*alloc_mr)(struct ib_pd *pd, enum ib_mr_type mr_type,
2469 u32 max_num_sg);
2470 struct ib_mr *(*alloc_mr_integrity)(struct ib_pd *pd,
2471 u32 max_num_data_sg,
2472 u32 max_num_meta_sg);
2473 int (*advise_mr)(struct ib_pd *pd,
2474 enum ib_uverbs_advise_mr_advice advice, u32 flags,
2475 struct ib_sge *sg_list, u32 num_sge,
2476 struct uverbs_attr_bundle *attrs);
2477
2478
2479
2480
2481
2482
2483
2484
2485 int (*map_mr_sg)(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
2486 unsigned int *sg_offset);
2487 int (*check_mr_status)(struct ib_mr *mr, u32 check_mask,
2488 struct ib_mr_status *mr_status);
2489 int (*alloc_mw)(struct ib_mw *mw, struct ib_udata *udata);
2490 int (*dealloc_mw)(struct ib_mw *mw);
2491 int (*attach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid);
2492 int (*detach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid);
2493 int (*alloc_xrcd)(struct ib_xrcd *xrcd, struct ib_udata *udata);
2494 int (*dealloc_xrcd)(struct ib_xrcd *xrcd, struct ib_udata *udata);
2495 struct ib_flow *(*create_flow)(struct ib_qp *qp,
2496 struct ib_flow_attr *flow_attr,
2497 struct ib_udata *udata);
2498 int (*destroy_flow)(struct ib_flow *flow_id);
2499 struct ib_flow_action *(*create_flow_action_esp)(
2500 struct ib_device *device,
2501 const struct ib_flow_action_attrs_esp *attr,
2502 struct uverbs_attr_bundle *attrs);
2503 int (*destroy_flow_action)(struct ib_flow_action *action);
2504 int (*modify_flow_action_esp)(
2505 struct ib_flow_action *action,
2506 const struct ib_flow_action_attrs_esp *attr,
2507 struct uverbs_attr_bundle *attrs);
2508 int (*set_vf_link_state)(struct ib_device *device, int vf, u32 port,
2509 int state);
2510 int (*get_vf_config)(struct ib_device *device, int vf, u32 port,
2511 struct ifla_vf_info *ivf);
2512 int (*get_vf_stats)(struct ib_device *device, int vf, u32 port,
2513 struct ifla_vf_stats *stats);
2514 int (*get_vf_guid)(struct ib_device *device, int vf, u32 port,
2515 struct ifla_vf_guid *node_guid,
2516 struct ifla_vf_guid *port_guid);
2517 int (*set_vf_guid)(struct ib_device *device, int vf, u32 port, u64 guid,
2518 int type);
2519 struct ib_wq *(*create_wq)(struct ib_pd *pd,
2520 struct ib_wq_init_attr *init_attr,
2521 struct ib_udata *udata);
2522 int (*destroy_wq)(struct ib_wq *wq, struct ib_udata *udata);
2523 int (*modify_wq)(struct ib_wq *wq, struct ib_wq_attr *attr,
2524 u32 wq_attr_mask, struct ib_udata *udata);
2525 int (*create_rwq_ind_table)(struct ib_rwq_ind_table *ib_rwq_ind_table,
2526 struct ib_rwq_ind_table_init_attr *init_attr,
2527 struct ib_udata *udata);
2528 int (*destroy_rwq_ind_table)(struct ib_rwq_ind_table *wq_ind_table);
2529 struct ib_dm *(*alloc_dm)(struct ib_device *device,
2530 struct ib_ucontext *context,
2531 struct ib_dm_alloc_attr *attr,
2532 struct uverbs_attr_bundle *attrs);
2533 int (*dealloc_dm)(struct ib_dm *dm, struct uverbs_attr_bundle *attrs);
2534 struct ib_mr *(*reg_dm_mr)(struct ib_pd *pd, struct ib_dm *dm,
2535 struct ib_dm_mr_attr *attr,
2536 struct uverbs_attr_bundle *attrs);
2537 int (*create_counters)(struct ib_counters *counters,
2538 struct uverbs_attr_bundle *attrs);
2539 int (*destroy_counters)(struct ib_counters *counters);
2540 int (*read_counters)(struct ib_counters *counters,
2541 struct ib_counters_read_attr *counters_read_attr,
2542 struct uverbs_attr_bundle *attrs);
2543 int (*map_mr_sg_pi)(struct ib_mr *mr, struct scatterlist *data_sg,
2544 int data_sg_nents, unsigned int *data_sg_offset,
2545 struct scatterlist *meta_sg, int meta_sg_nents,
2546 unsigned int *meta_sg_offset);
2547
2548
2549
2550
2551
2552
2553
2554 struct rdma_hw_stats *(*alloc_hw_device_stats)(struct ib_device *device);
2555 struct rdma_hw_stats *(*alloc_hw_port_stats)(struct ib_device *device,
2556 u32 port_num);
2557
2558
2559
2560
2561
2562
2563
2564
2565
2566
2567
2568
2569 int (*get_hw_stats)(struct ib_device *device,
2570 struct rdma_hw_stats *stats, u32 port, int index);
2571
2572
2573
2574
2575 int (*fill_res_mr_entry)(struct sk_buff *msg, struct ib_mr *ibmr);
2576 int (*fill_res_mr_entry_raw)(struct sk_buff *msg, struct ib_mr *ibmr);
2577 int (*fill_res_cq_entry)(struct sk_buff *msg, struct ib_cq *ibcq);
2578 int (*fill_res_cq_entry_raw)(struct sk_buff *msg, struct ib_cq *ibcq);
2579 int (*fill_res_qp_entry)(struct sk_buff *msg, struct ib_qp *ibqp);
2580 int (*fill_res_qp_entry_raw)(struct sk_buff *msg, struct ib_qp *ibqp);
2581 int (*fill_res_cm_id_entry)(struct sk_buff *msg, struct rdma_cm_id *id);
2582
2583
2584
2585
2586
2587
2588 int (*enable_driver)(struct ib_device *dev);
2589
2590
2591
2592 void (*dealloc_driver)(struct ib_device *dev);
2593
2594
2595 void (*iw_add_ref)(struct ib_qp *qp);
2596 void (*iw_rem_ref)(struct ib_qp *qp);
2597 struct ib_qp *(*iw_get_qp)(struct ib_device *device, int qpn);
2598 int (*iw_connect)(struct iw_cm_id *cm_id,
2599 struct iw_cm_conn_param *conn_param);
2600 int (*iw_accept)(struct iw_cm_id *cm_id,
2601 struct iw_cm_conn_param *conn_param);
2602 int (*iw_reject)(struct iw_cm_id *cm_id, const void *pdata,
2603 u8 pdata_len);
2604 int (*iw_create_listen)(struct iw_cm_id *cm_id, int backlog);
2605 int (*iw_destroy_listen)(struct iw_cm_id *cm_id);
2606
2607
2608
2609
2610
2611 int (*counter_bind_qp)(struct rdma_counter *counter, struct ib_qp *qp);
2612
2613
2614
2615
2616 int (*counter_unbind_qp)(struct ib_qp *qp);
2617
2618
2619
2620 int (*counter_dealloc)(struct rdma_counter *counter);
2621
2622
2623
2624
2625 struct rdma_hw_stats *(*counter_alloc_stats)(
2626 struct rdma_counter *counter);
2627
2628
2629
2630 int (*counter_update_stats)(struct rdma_counter *counter);
2631
2632
2633
2634
2635
2636 int (*fill_stat_mr_entry)(struct sk_buff *msg, struct ib_mr *ibmr);
2637
2638
2639 int (*query_ucontext)(struct ib_ucontext *context,
2640 struct uverbs_attr_bundle *attrs);
2641
2642
2643
2644
2645
2646 int (*get_numa_node)(struct ib_device *dev);
2647
2648 DECLARE_RDMA_OBJ_SIZE(ib_ah);
2649 DECLARE_RDMA_OBJ_SIZE(ib_counters);
2650 DECLARE_RDMA_OBJ_SIZE(ib_cq);
2651 DECLARE_RDMA_OBJ_SIZE(ib_mw);
2652 DECLARE_RDMA_OBJ_SIZE(ib_pd);
2653 DECLARE_RDMA_OBJ_SIZE(ib_qp);
2654 DECLARE_RDMA_OBJ_SIZE(ib_rwq_ind_table);
2655 DECLARE_RDMA_OBJ_SIZE(ib_srq);
2656 DECLARE_RDMA_OBJ_SIZE(ib_ucontext);
2657 DECLARE_RDMA_OBJ_SIZE(ib_xrcd);
2658};
2659
2660struct ib_core_device {
2661
2662
2663
2664 struct device dev;
2665 possible_net_t rdma_net;
2666 struct kobject *ports_kobj;
2667 struct list_head port_list;
2668 struct ib_device *owner;
2669};
2670
2671struct rdma_restrack_root;
2672struct ib_device {
2673
2674 struct device *dma_device;
2675 struct ib_device_ops ops;
2676 char name[IB_DEVICE_NAME_MAX];
2677 struct rcu_head rcu_head;
2678
2679 struct list_head event_handler_list;
2680
2681 struct rw_semaphore event_handler_rwsem;
2682
2683
2684 spinlock_t qp_open_list_lock;
2685
2686 struct rw_semaphore client_data_rwsem;
2687 struct xarray client_data;
2688 struct mutex unregistration_lock;
2689
2690
2691 rwlock_t cache_lock;
2692
2693
2694
2695 struct ib_port_data *port_data;
2696
2697 int num_comp_vectors;
2698
2699 union {
2700 struct device dev;
2701 struct ib_core_device coredev;
2702 };
2703
2704
2705
2706
2707
2708
2709 const struct attribute_group *groups[4];
2710
2711 u64 uverbs_cmd_mask;
2712
2713 char node_desc[IB_DEVICE_NODE_DESC_MAX];
2714 __be64 node_guid;
2715 u32 local_dma_lkey;
2716 u16 is_switch:1;
2717
2718 u16 kverbs_provider:1;
2719
2720 u16 use_cq_dim:1;
2721 u8 node_type;
2722 u32 phys_port_cnt;
2723 struct ib_device_attr attrs;
2724 struct hw_stats_device_data *hw_stats_data;
2725
2726#ifdef CONFIG_CGROUP_RDMA
2727 struct rdmacg_device cg_device;
2728#endif
2729
2730 u32 index;
2731
2732 spinlock_t cq_pools_lock;
2733 struct list_head cq_pools[IB_POLL_LAST_POOL_TYPE + 1];
2734
2735 struct rdma_restrack_root *res;
2736
2737 const struct uapi_definition *driver_def;
2738
2739
2740
2741
2742
2743 refcount_t refcount;
2744 struct completion unreg_completion;
2745 struct work_struct unregistration_work;
2746
2747 const struct rdma_link_ops *link_ops;
2748
2749
2750 struct mutex compat_devs_mutex;
2751
2752 struct xarray compat_devs;
2753
2754
2755 char iw_ifname[IFNAMSIZ];
2756 u32 iw_driver_flags;
2757 u32 lag_flags;
2758};
2759
2760static inline void *rdma_zalloc_obj(struct ib_device *dev, size_t size,
2761 gfp_t gfp, bool is_numa_aware)
2762{
2763 if (is_numa_aware && dev->ops.get_numa_node)
2764 return kzalloc_node(size, gfp, dev->ops.get_numa_node(dev));
2765
2766 return kzalloc(size, gfp);
2767}
2768
2769struct ib_client_nl_info;
2770struct ib_client {
2771 const char *name;
2772 int (*add)(struct ib_device *ibdev);
2773 void (*remove)(struct ib_device *, void *client_data);
2774 void (*rename)(struct ib_device *dev, void *client_data);
2775 int (*get_nl_info)(struct ib_device *ibdev, void *client_data,
2776 struct ib_client_nl_info *res);
2777 int (*get_global_nl_info)(struct ib_client_nl_info *res);
2778
2779
2780
2781
2782
2783
2784
2785
2786
2787
2788
2789
2790
2791
2792
2793
2794 struct net_device *(*get_net_dev_by_params)(
2795 struct ib_device *dev,
2796 u32 port,
2797 u16 pkey,
2798 const union ib_gid *gid,
2799 const struct sockaddr *addr,
2800 void *client_data);
2801
2802 refcount_t uses;
2803 struct completion uses_zero;
2804 u32 client_id;
2805
2806
2807 u8 no_kverbs_req:1;
2808};
2809
2810
2811
2812
2813
2814
2815
2816struct ib_block_iter {
2817
2818 struct scatterlist *__sg;
2819 dma_addr_t __dma_addr;
2820 unsigned int __sg_nents;
2821 unsigned int __sg_advance;
2822 unsigned int __pg_bit;
2823};
2824
2825struct ib_device *_ib_alloc_device(size_t size);
2826#define ib_alloc_device(drv_struct, member) \
2827 container_of(_ib_alloc_device(sizeof(struct drv_struct) + \
2828 BUILD_BUG_ON_ZERO(offsetof( \
2829 struct drv_struct, member))), \
2830 struct drv_struct, member)
2831
2832void ib_dealloc_device(struct ib_device *device);
2833
2834void ib_get_device_fw_str(struct ib_device *device, char *str);
2835
2836int ib_register_device(struct ib_device *device, const char *name,
2837 struct device *dma_device);
2838void ib_unregister_device(struct ib_device *device);
2839void ib_unregister_driver(enum rdma_driver_id driver_id);
2840void ib_unregister_device_and_put(struct ib_device *device);
2841void ib_unregister_device_queued(struct ib_device *ib_dev);
2842
2843int ib_register_client (struct ib_client *client);
2844void ib_unregister_client(struct ib_client *client);
2845
2846void __rdma_block_iter_start(struct ib_block_iter *biter,
2847 struct scatterlist *sglist,
2848 unsigned int nents,
2849 unsigned long pgsz);
2850bool __rdma_block_iter_next(struct ib_block_iter *biter);
2851
2852
2853
2854
2855
2856
2857static inline dma_addr_t
2858rdma_block_iter_dma_address(struct ib_block_iter *biter)
2859{
2860 return biter->__dma_addr & ~(BIT_ULL(biter->__pg_bit) - 1);
2861}
2862
2863
2864
2865
2866
2867
2868
2869
2870
2871
2872
2873#define rdma_for_each_block(sglist, biter, nents, pgsz) \
2874 for (__rdma_block_iter_start(biter, sglist, nents, \
2875 pgsz); \
2876 __rdma_block_iter_next(biter);)
2877
2878
2879
2880
2881
2882
2883
2884
2885
2886
2887
2888static inline void *ib_get_client_data(struct ib_device *device,
2889 struct ib_client *client)
2890{
2891 return xa_load(&device->client_data, client->client_id);
2892}
2893void ib_set_client_data(struct ib_device *device, struct ib_client *client,
2894 void *data);
2895void ib_set_device_ops(struct ib_device *device,
2896 const struct ib_device_ops *ops);
2897
2898int rdma_user_mmap_io(struct ib_ucontext *ucontext, struct vm_area_struct *vma,
2899 unsigned long pfn, unsigned long size, pgprot_t prot,
2900 struct rdma_user_mmap_entry *entry);
2901int rdma_user_mmap_entry_insert(struct ib_ucontext *ucontext,
2902 struct rdma_user_mmap_entry *entry,
2903 size_t length);
2904int rdma_user_mmap_entry_insert_range(struct ib_ucontext *ucontext,
2905 struct rdma_user_mmap_entry *entry,
2906 size_t length, u32 min_pgoff,
2907 u32 max_pgoff);
2908
2909struct rdma_user_mmap_entry *
2910rdma_user_mmap_entry_get_pgoff(struct ib_ucontext *ucontext,
2911 unsigned long pgoff);
2912struct rdma_user_mmap_entry *
2913rdma_user_mmap_entry_get(struct ib_ucontext *ucontext,
2914 struct vm_area_struct *vma);
2915void rdma_user_mmap_entry_put(struct rdma_user_mmap_entry *entry);
2916
2917void rdma_user_mmap_entry_remove(struct rdma_user_mmap_entry *entry);
2918
2919static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len)
2920{
2921 return copy_from_user(dest, udata->inbuf, len) ? -EFAULT : 0;
2922}
2923
2924static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len)
2925{
2926 return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0;
2927}
2928
2929static inline bool ib_is_buffer_cleared(const void __user *p,
2930 size_t len)
2931{
2932 bool ret;
2933 u8 *buf;
2934
2935 if (len > USHRT_MAX)
2936 return false;
2937
2938 buf = memdup_user(p, len);
2939 if (IS_ERR(buf))
2940 return false;
2941
2942 ret = !memchr_inv(buf, 0, len);
2943 kfree(buf);
2944 return ret;
2945}
2946
2947static inline bool ib_is_udata_cleared(struct ib_udata *udata,
2948 size_t offset,
2949 size_t len)
2950{
2951 return ib_is_buffer_cleared(udata->inbuf + offset, len);
2952}
2953
2954
2955
2956
2957
2958
2959
2960
2961
2962
2963
2964
2965
2966
2967
2968
2969bool ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
2970 enum ib_qp_type type, enum ib_qp_attr_mask mask);
2971
2972void ib_register_event_handler(struct ib_event_handler *event_handler);
2973void ib_unregister_event_handler(struct ib_event_handler *event_handler);
2974void ib_dispatch_event(const struct ib_event *event);
2975
2976int ib_query_port(struct ib_device *device,
2977 u32 port_num, struct ib_port_attr *port_attr);
2978
2979enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device,
2980 u32 port_num);
2981
2982
2983
2984
2985
2986
2987
2988
2989
2990
2991static inline bool rdma_cap_ib_switch(const struct ib_device *device)
2992{
2993 return device->is_switch;
2994}
2995
2996
2997
2998
2999
3000
3001
3002
3003
3004static inline u32 rdma_start_port(const struct ib_device *device)
3005{
3006 return rdma_cap_ib_switch(device) ? 0 : 1;
3007}
3008
3009
3010
3011
3012
3013
3014#define rdma_for_each_port(device, iter) \
3015 for (iter = rdma_start_port(device + \
3016 BUILD_BUG_ON_ZERO(!__same_type(u32, \
3017 iter))); \
3018 iter <= rdma_end_port(device); iter++)
3019
3020
3021
3022
3023
3024
3025
3026
3027
3028static inline u32 rdma_end_port(const struct ib_device *device)
3029{
3030 return rdma_cap_ib_switch(device) ? 0 : device->phys_port_cnt;
3031}
3032
3033static inline int rdma_is_port_valid(const struct ib_device *device,
3034 unsigned int port)
3035{
3036 return (port >= rdma_start_port(device) &&
3037 port <= rdma_end_port(device));
3038}
3039
3040static inline bool rdma_is_grh_required(const struct ib_device *device,
3041 u32 port_num)
3042{
3043 return device->port_data[port_num].immutable.core_cap_flags &
3044 RDMA_CORE_PORT_IB_GRH_REQUIRED;
3045}
3046
3047static inline bool rdma_protocol_ib(const struct ib_device *device,
3048 u32 port_num)
3049{
3050 return device->port_data[port_num].immutable.core_cap_flags &
3051 RDMA_CORE_CAP_PROT_IB;
3052}
3053
3054static inline bool rdma_protocol_roce(const struct ib_device *device,
3055 u32 port_num)
3056{
3057 return device->port_data[port_num].immutable.core_cap_flags &
3058 (RDMA_CORE_CAP_PROT_ROCE | RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP);
3059}
3060
3061static inline bool rdma_protocol_roce_udp_encap(const struct ib_device *device,
3062 u32 port_num)
3063{
3064 return device->port_data[port_num].immutable.core_cap_flags &
3065 RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP;
3066}
3067
3068static inline bool rdma_protocol_roce_eth_encap(const struct ib_device *device,
3069 u32 port_num)
3070{
3071 return device->port_data[port_num].immutable.core_cap_flags &
3072 RDMA_CORE_CAP_PROT_ROCE;
3073}
3074
3075static inline bool rdma_protocol_iwarp(const struct ib_device *device,
3076 u32 port_num)
3077{
3078 return device->port_data[port_num].immutable.core_cap_flags &
3079 RDMA_CORE_CAP_PROT_IWARP;
3080}
3081
3082static inline bool rdma_ib_or_roce(const struct ib_device *device,
3083 u32 port_num)
3084{
3085 return rdma_protocol_ib(device, port_num) ||
3086 rdma_protocol_roce(device, port_num);
3087}
3088
3089static inline bool rdma_protocol_raw_packet(const struct ib_device *device,
3090 u32 port_num)
3091{
3092 return device->port_data[port_num].immutable.core_cap_flags &
3093 RDMA_CORE_CAP_PROT_RAW_PACKET;
3094}
3095
3096static inline bool rdma_protocol_usnic(const struct ib_device *device,
3097 u32 port_num)
3098{
3099 return device->port_data[port_num].immutable.core_cap_flags &
3100 RDMA_CORE_CAP_PROT_USNIC;
3101}
3102
3103
3104
3105
3106
3107
3108
3109
3110
3111
3112
3113
3114
3115static inline bool rdma_cap_ib_mad(const struct ib_device *device, u32 port_num)
3116{
3117 return device->port_data[port_num].immutable.core_cap_flags &
3118 RDMA_CORE_CAP_IB_MAD;
3119}
3120
3121
3122
3123
3124
3125
3126
3127
3128
3129
3130
3131
3132
3133
3134
3135
3136
3137
3138
3139
3140static inline bool rdma_cap_opa_mad(struct ib_device *device, u32 port_num)
3141{
3142 return device->port_data[port_num].immutable.core_cap_flags &
3143 RDMA_CORE_CAP_OPA_MAD;
3144}
3145
3146
3147
3148
3149
3150
3151
3152
3153
3154
3155
3156
3157
3158
3159
3160
3161
3162
3163
3164
3165
3166static inline bool rdma_cap_ib_smi(const struct ib_device *device, u32 port_num)
3167{
3168 return device->port_data[port_num].immutable.core_cap_flags &
3169 RDMA_CORE_CAP_IB_SMI;
3170}
3171
3172
3173
3174
3175
3176
3177
3178
3179
3180
3181
3182
3183
3184
3185
3186
3187static inline bool rdma_cap_ib_cm(const struct ib_device *device, u32 port_num)
3188{
3189 return device->port_data[port_num].immutable.core_cap_flags &
3190 RDMA_CORE_CAP_IB_CM;
3191}
3192
3193
3194
3195
3196
3197
3198
3199
3200
3201
3202
3203
3204
3205static inline bool rdma_cap_iw_cm(const struct ib_device *device, u32 port_num)
3206{
3207 return device->port_data[port_num].immutable.core_cap_flags &
3208 RDMA_CORE_CAP_IW_CM;
3209}
3210
3211
3212
3213
3214
3215
3216
3217
3218
3219
3220
3221
3222
3223
3224
3225
3226static inline bool rdma_cap_ib_sa(const struct ib_device *device, u32 port_num)
3227{
3228 return device->port_data[port_num].immutable.core_cap_flags &
3229 RDMA_CORE_CAP_IB_SA;
3230}
3231
3232
3233
3234
3235
3236
3237
3238
3239
3240
3241
3242
3243
3244
3245
3246
3247
3248
3249static inline bool rdma_cap_ib_mcast(const struct ib_device *device,
3250 u32 port_num)
3251{
3252 return rdma_cap_ib_sa(device, port_num);
3253}
3254
3255
3256
3257
3258
3259
3260
3261
3262
3263
3264
3265
3266
3267
3268static inline bool rdma_cap_af_ib(const struct ib_device *device, u32 port_num)
3269{
3270 return device->port_data[port_num].immutable.core_cap_flags &
3271 RDMA_CORE_CAP_AF_IB;
3272}
3273
3274
3275
3276
3277
3278
3279
3280
3281
3282
3283
3284
3285
3286
3287
3288
3289
3290static inline bool rdma_cap_eth_ah(const struct ib_device *device, u32 port_num)
3291{
3292 return device->port_data[port_num].immutable.core_cap_flags &
3293 RDMA_CORE_CAP_ETH_AH;
3294}
3295
3296
3297
3298
3299
3300
3301
3302
3303
3304
3305static inline bool rdma_cap_opa_ah(struct ib_device *device, u32 port_num)
3306{
3307 return (device->port_data[port_num].immutable.core_cap_flags &
3308 RDMA_CORE_CAP_OPA_AH) == RDMA_CORE_CAP_OPA_AH;
3309}
3310
3311
3312
3313
3314
3315
3316
3317
3318
3319
3320
3321
3322
3323static inline size_t rdma_max_mad_size(const struct ib_device *device,
3324 u32 port_num)
3325{
3326 return device->port_data[port_num].immutable.max_mad_size;
3327}
3328
3329
3330
3331
3332
3333
3334
3335
3336
3337
3338
3339
3340
3341
3342static inline bool rdma_cap_roce_gid_table(const struct ib_device *device,
3343 u32 port_num)
3344{
3345 return rdma_protocol_roce(device, port_num) &&
3346 device->ops.add_gid && device->ops.del_gid;
3347}
3348
3349
3350
3351
3352static inline bool rdma_cap_read_inv(struct ib_device *dev, u32 port_num)
3353{
3354
3355
3356
3357
3358 return rdma_protocol_iwarp(dev, port_num);
3359}
3360
3361
3362
3363
3364
3365
3366
3367
3368static inline bool rdma_core_cap_opa_port(struct ib_device *device,
3369 u32 port_num)
3370{
3371 return (device->port_data[port_num].immutable.core_cap_flags &
3372 RDMA_CORE_PORT_INTEL_OPA) == RDMA_CORE_PORT_INTEL_OPA;
3373}
3374
3375
3376
3377
3378
3379
3380
3381
3382
3383
3384static inline int rdma_mtu_enum_to_int(struct ib_device *device, u32 port,
3385 int mtu)
3386{
3387 if (rdma_core_cap_opa_port(device, port))
3388 return opa_mtu_enum_to_int((enum opa_mtu)mtu);
3389 else
3390 return ib_mtu_enum_to_int((enum ib_mtu)mtu);
3391}
3392
3393
3394
3395
3396
3397
3398
3399
3400
3401static inline int rdma_mtu_from_attr(struct ib_device *device, u32 port,
3402 struct ib_port_attr *attr)
3403{
3404 if (rdma_core_cap_opa_port(device, port))
3405 return attr->phys_mtu;
3406 else
3407 return ib_mtu_enum_to_int(attr->max_mtu);
3408}
3409
3410int ib_set_vf_link_state(struct ib_device *device, int vf, u32 port,
3411 int state);
3412int ib_get_vf_config(struct ib_device *device, int vf, u32 port,
3413 struct ifla_vf_info *info);
3414int ib_get_vf_stats(struct ib_device *device, int vf, u32 port,
3415 struct ifla_vf_stats *stats);
3416int ib_get_vf_guid(struct ib_device *device, int vf, u32 port,
3417 struct ifla_vf_guid *node_guid,
3418 struct ifla_vf_guid *port_guid);
3419int ib_set_vf_guid(struct ib_device *device, int vf, u32 port, u64 guid,
3420 int type);
3421
3422int ib_query_pkey(struct ib_device *device,
3423 u32 port_num, u16 index, u16 *pkey);
3424
3425int ib_modify_device(struct ib_device *device,
3426 int device_modify_mask,
3427 struct ib_device_modify *device_modify);
3428
3429int ib_modify_port(struct ib_device *device,
3430 u32 port_num, int port_modify_mask,
3431 struct ib_port_modify *port_modify);
3432
3433int ib_find_gid(struct ib_device *device, union ib_gid *gid,
3434 u32 *port_num, u16 *index);
3435
3436int ib_find_pkey(struct ib_device *device,
3437 u32 port_num, u16 pkey, u16 *index);
3438
3439enum ib_pd_flags {
3440
3441
3442
3443
3444
3445
3446
3447
3448
3449 IB_PD_UNSAFE_GLOBAL_RKEY = 0x01,
3450};
3451
3452struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags,
3453 const char *caller);
3454
3455
3456
3457
3458
3459
3460
3461
3462
3463
3464
3465
3466#define ib_alloc_pd(device, flags) \
3467 __ib_alloc_pd((device), (flags), KBUILD_MODNAME)
3468
3469int ib_dealloc_pd_user(struct ib_pd *pd, struct ib_udata *udata);
3470
3471
3472
3473
3474
3475
3476
3477static inline void ib_dealloc_pd(struct ib_pd *pd)
3478{
3479 int ret = ib_dealloc_pd_user(pd, NULL);
3480
3481 WARN_ONCE(ret, "Destroy of kernel PD shouldn't fail");
3482}
3483
3484enum rdma_create_ah_flags {
3485
3486 RDMA_CREATE_AH_SLEEPABLE = BIT(0),
3487};
3488
3489
3490
3491
3492
3493
3494
3495
3496
3497
3498struct ib_ah *rdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr,
3499 u32 flags);
3500
3501
3502
3503
3504
3505
3506
3507
3508
3509
3510
3511
3512
3513struct ib_ah *rdma_create_user_ah(struct ib_pd *pd,
3514 struct rdma_ah_attr *ah_attr,
3515 struct ib_udata *udata);
3516
3517
3518
3519
3520
3521
3522
3523
3524int ib_get_gids_from_rdma_hdr(const union rdma_network_hdr *hdr,
3525 enum rdma_network_type net_type,
3526 union ib_gid *sgid, union ib_gid *dgid);
3527
3528
3529
3530
3531
3532int ib_get_rdma_header_version(const union rdma_network_hdr *hdr);
3533
3534
3535
3536
3537
3538
3539
3540
3541
3542
3543
3544
3545
3546
3547
3548
3549
3550
3551
3552int ib_init_ah_attr_from_wc(struct ib_device *device, u32 port_num,
3553 const struct ib_wc *wc, const struct ib_grh *grh,
3554 struct rdma_ah_attr *ah_attr);
3555
3556
3557
3558
3559
3560
3561
3562
3563
3564
3565
3566
3567
3568struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc,
3569 const struct ib_grh *grh, u32 port_num);
3570
3571
3572
3573
3574
3575
3576
3577
3578int rdma_modify_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
3579
3580
3581
3582
3583
3584
3585
3586
3587int rdma_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
3588
3589enum rdma_destroy_ah_flags {
3590
3591 RDMA_DESTROY_AH_SLEEPABLE = BIT(0),
3592};
3593
3594
3595
3596
3597
3598
3599
3600int rdma_destroy_ah_user(struct ib_ah *ah, u32 flags, struct ib_udata *udata);
3601
3602
3603
3604
3605
3606
3607
3608
3609static inline void rdma_destroy_ah(struct ib_ah *ah, u32 flags)
3610{
3611 int ret = rdma_destroy_ah_user(ah, flags, NULL);
3612
3613 WARN_ONCE(ret, "Destroy of kernel AH shouldn't fail");
3614}
3615
3616struct ib_srq *ib_create_srq_user(struct ib_pd *pd,
3617 struct ib_srq_init_attr *srq_init_attr,
3618 struct ib_usrq_object *uobject,
3619 struct ib_udata *udata);
3620static inline struct ib_srq *
3621ib_create_srq(struct ib_pd *pd, struct ib_srq_init_attr *srq_init_attr)
3622{
3623 if (!pd->device->ops.create_srq)
3624 return ERR_PTR(-EOPNOTSUPP);
3625
3626 return ib_create_srq_user(pd, srq_init_attr, NULL, NULL);
3627}
3628
3629
3630
3631
3632
3633
3634
3635
3636
3637
3638
3639
3640
3641int ib_modify_srq(struct ib_srq *srq,
3642 struct ib_srq_attr *srq_attr,
3643 enum ib_srq_attr_mask srq_attr_mask);
3644
3645
3646
3647
3648
3649
3650
3651int ib_query_srq(struct ib_srq *srq,
3652 struct ib_srq_attr *srq_attr);
3653
3654
3655
3656
3657
3658
3659int ib_destroy_srq_user(struct ib_srq *srq, struct ib_udata *udata);
3660
3661
3662
3663
3664
3665
3666
3667static inline void ib_destroy_srq(struct ib_srq *srq)
3668{
3669 int ret = ib_destroy_srq_user(srq, NULL);
3670
3671 WARN_ONCE(ret, "Destroy of kernel SRQ shouldn't fail");
3672}
3673
3674
3675
3676
3677
3678
3679
3680
3681static inline int ib_post_srq_recv(struct ib_srq *srq,
3682 const struct ib_recv_wr *recv_wr,
3683 const struct ib_recv_wr **bad_recv_wr)
3684{
3685 const struct ib_recv_wr *dummy;
3686
3687 return srq->device->ops.post_srq_recv(srq, recv_wr,
3688 bad_recv_wr ? : &dummy);
3689}
3690
3691struct ib_qp *ib_create_qp_kernel(struct ib_pd *pd,
3692 struct ib_qp_init_attr *qp_init_attr,
3693 const char *caller);
3694
3695
3696
3697
3698
3699
3700
3701
3702static inline struct ib_qp *ib_create_qp(struct ib_pd *pd,
3703 struct ib_qp_init_attr *init_attr)
3704{
3705 return ib_create_qp_kernel(pd, init_attr, KBUILD_MODNAME);
3706}
3707
3708
3709
3710
3711
3712
3713
3714
3715
3716
3717
3718
3719int ib_modify_qp_with_udata(struct ib_qp *qp,
3720 struct ib_qp_attr *attr,
3721 int attr_mask,
3722 struct ib_udata *udata);
3723
3724
3725
3726
3727
3728
3729
3730
3731
3732
3733int ib_modify_qp(struct ib_qp *qp,
3734 struct ib_qp_attr *qp_attr,
3735 int qp_attr_mask);
3736
3737
3738
3739
3740
3741
3742
3743
3744
3745
3746
3747
3748int ib_query_qp(struct ib_qp *qp,
3749 struct ib_qp_attr *qp_attr,
3750 int qp_attr_mask,
3751 struct ib_qp_init_attr *qp_init_attr);
3752
3753
3754
3755
3756
3757
3758int ib_destroy_qp_user(struct ib_qp *qp, struct ib_udata *udata);
3759
3760
3761
3762
3763
3764
3765
3766static inline int ib_destroy_qp(struct ib_qp *qp)
3767{
3768 return ib_destroy_qp_user(qp, NULL);
3769}
3770
3771
3772
3773
3774
3775
3776
3777
3778struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd,
3779 struct ib_qp_open_attr *qp_open_attr);
3780
3781
3782
3783
3784
3785
3786
3787
3788int ib_close_qp(struct ib_qp *qp);
3789
3790
3791
3792
3793
3794
3795
3796
3797
3798
3799
3800
3801
3802
3803static inline int ib_post_send(struct ib_qp *qp,
3804 const struct ib_send_wr *send_wr,
3805 const struct ib_send_wr **bad_send_wr)
3806{
3807 const struct ib_send_wr *dummy;
3808
3809 return qp->device->ops.post_send(qp, send_wr, bad_send_wr ? : &dummy);
3810}
3811
3812
3813
3814
3815
3816
3817
3818
3819
3820static inline int ib_post_recv(struct ib_qp *qp,
3821 const struct ib_recv_wr *recv_wr,
3822 const struct ib_recv_wr **bad_recv_wr)
3823{
3824 const struct ib_recv_wr *dummy;
3825
3826 return qp->device->ops.post_recv(qp, recv_wr, bad_recv_wr ? : &dummy);
3827}
3828
3829struct ib_cq *__ib_alloc_cq(struct ib_device *dev, void *private, int nr_cqe,
3830 int comp_vector, enum ib_poll_context poll_ctx,
3831 const char *caller);
3832static inline struct ib_cq *ib_alloc_cq(struct ib_device *dev, void *private,
3833 int nr_cqe, int comp_vector,
3834 enum ib_poll_context poll_ctx)
3835{
3836 return __ib_alloc_cq(dev, private, nr_cqe, comp_vector, poll_ctx,
3837 KBUILD_MODNAME);
3838}
3839
3840struct ib_cq *__ib_alloc_cq_any(struct ib_device *dev, void *private,
3841 int nr_cqe, enum ib_poll_context poll_ctx,
3842 const char *caller);
3843
3844
3845
3846
3847
3848
3849
3850
3851static inline struct ib_cq *ib_alloc_cq_any(struct ib_device *dev,
3852 void *private, int nr_cqe,
3853 enum ib_poll_context poll_ctx)
3854{
3855 return __ib_alloc_cq_any(dev, private, nr_cqe, poll_ctx,
3856 KBUILD_MODNAME);
3857}
3858
3859void ib_free_cq(struct ib_cq *cq);
3860int ib_process_cq_direct(struct ib_cq *cq, int budget);
3861
3862
3863
3864
3865
3866
3867
3868
3869
3870
3871
3872
3873
3874
3875struct ib_cq *__ib_create_cq(struct ib_device *device,
3876 ib_comp_handler comp_handler,
3877 void (*event_handler)(struct ib_event *, void *),
3878 void *cq_context,
3879 const struct ib_cq_init_attr *cq_attr,
3880 const char *caller);
3881#define ib_create_cq(device, cmp_hndlr, evt_hndlr, cq_ctxt, cq_attr) \
3882 __ib_create_cq((device), (cmp_hndlr), (evt_hndlr), (cq_ctxt), (cq_attr), KBUILD_MODNAME)
3883
3884
3885
3886
3887
3888
3889
3890
3891int ib_resize_cq(struct ib_cq *cq, int cqe);
3892
3893
3894
3895
3896
3897
3898
3899
3900int rdma_set_cq_moderation(struct ib_cq *cq, u16 cq_count, u16 cq_period);
3901
3902
3903
3904
3905
3906
3907int ib_destroy_cq_user(struct ib_cq *cq, struct ib_udata *udata);
3908
3909
3910
3911
3912
3913
3914
3915static inline void ib_destroy_cq(struct ib_cq *cq)
3916{
3917 int ret = ib_destroy_cq_user(cq, NULL);
3918
3919 WARN_ONCE(ret, "Destroy of kernel CQ shouldn't fail");
3920}
3921
3922
3923
3924
3925
3926
3927
3928
3929
3930
3931
3932
3933
3934static inline int ib_poll_cq(struct ib_cq *cq, int num_entries,
3935 struct ib_wc *wc)
3936{
3937 return cq->device->ops.poll_cq(cq, num_entries, wc);
3938}
3939
3940
3941
3942
3943
3944
3945
3946
3947
3948
3949
3950
3951
3952
3953
3954
3955
3956
3957
3958
3959
3960
3961
3962
3963
3964
3965
3966
3967static inline int ib_req_notify_cq(struct ib_cq *cq,
3968 enum ib_cq_notify_flags flags)
3969{
3970 return cq->device->ops.req_notify_cq(cq, flags);
3971}
3972
3973struct ib_cq *ib_cq_pool_get(struct ib_device *dev, unsigned int nr_cqe,
3974 int comp_vector_hint,
3975 enum ib_poll_context poll_ctx);
3976
3977void ib_cq_pool_put(struct ib_cq *cq, unsigned int nr_cqe);
3978
3979
3980
3981
3982
3983
3984static inline bool ib_uses_virt_dma(struct ib_device *dev)
3985{
3986 return IS_ENABLED(CONFIG_INFINIBAND_VIRT_DMA) && !dev->dma_device;
3987}
3988
3989
3990
3991
3992
3993
3994static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
3995{
3996 if (ib_uses_virt_dma(dev))
3997 return 0;
3998 return dma_mapping_error(dev->dma_device, dma_addr);
3999}
4000
4001
4002
4003
4004
4005
4006
4007
4008static inline u64 ib_dma_map_single(struct ib_device *dev,
4009 void *cpu_addr, size_t size,
4010 enum dma_data_direction direction)
4011{
4012 if (ib_uses_virt_dma(dev))
4013 return (uintptr_t)cpu_addr;
4014 return dma_map_single(dev->dma_device, cpu_addr, size, direction);
4015}
4016
4017
4018
4019
4020
4021
4022
4023
4024static inline void ib_dma_unmap_single(struct ib_device *dev,
4025 u64 addr, size_t size,
4026 enum dma_data_direction direction)
4027{
4028 if (!ib_uses_virt_dma(dev))
4029 dma_unmap_single(dev->dma_device, addr, size, direction);
4030}
4031
4032
4033
4034
4035
4036
4037
4038
4039
4040static inline u64 ib_dma_map_page(struct ib_device *dev,
4041 struct page *page,
4042 unsigned long offset,
4043 size_t size,
4044 enum dma_data_direction direction)
4045{
4046 if (ib_uses_virt_dma(dev))
4047 return (uintptr_t)(page_address(page) + offset);
4048 return dma_map_page(dev->dma_device, page, offset, size, direction);
4049}
4050
4051
4052
4053
4054
4055
4056
4057
4058static inline void ib_dma_unmap_page(struct ib_device *dev,
4059 u64 addr, size_t size,
4060 enum dma_data_direction direction)
4061{
4062 if (!ib_uses_virt_dma(dev))
4063 dma_unmap_page(dev->dma_device, addr, size, direction);
4064}
4065
4066int ib_dma_virt_map_sg(struct ib_device *dev, struct scatterlist *sg, int nents);
4067static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
4068 struct scatterlist *sg, int nents,
4069 enum dma_data_direction direction,
4070 unsigned long dma_attrs)
4071{
4072 if (ib_uses_virt_dma(dev))
4073 return ib_dma_virt_map_sg(dev, sg, nents);
4074 return dma_map_sg_attrs(dev->dma_device, sg, nents, direction,
4075 dma_attrs);
4076}
4077
4078static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
4079 struct scatterlist *sg, int nents,
4080 enum dma_data_direction direction,
4081 unsigned long dma_attrs)
4082{
4083 if (!ib_uses_virt_dma(dev))
4084 dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction,
4085 dma_attrs);
4086}
4087
4088
4089
4090
4091
4092
4093
4094
4095static inline int ib_dma_map_sgtable_attrs(struct ib_device *dev,
4096 struct sg_table *sgt,
4097 enum dma_data_direction direction,
4098 unsigned long dma_attrs)
4099{
4100 if (ib_uses_virt_dma(dev)) {
4101 ib_dma_virt_map_sg(dev, sgt->sgl, sgt->orig_nents);
4102 return 0;
4103 }
4104 return dma_map_sgtable(dev->dma_device, sgt, direction, dma_attrs);
4105}
4106
4107static inline void ib_dma_unmap_sgtable_attrs(struct ib_device *dev,
4108 struct sg_table *sgt,
4109 enum dma_data_direction direction,
4110 unsigned long dma_attrs)
4111{
4112 if (!ib_uses_virt_dma(dev))
4113 dma_unmap_sgtable(dev->dma_device, sgt, direction, dma_attrs);
4114}
4115
4116
4117
4118
4119
4120
4121
4122
4123static inline int ib_dma_map_sg(struct ib_device *dev,
4124 struct scatterlist *sg, int nents,
4125 enum dma_data_direction direction)
4126{
4127 return ib_dma_map_sg_attrs(dev, sg, nents, direction, 0);
4128}
4129
4130
4131
4132
4133
4134
4135
4136
4137static inline void ib_dma_unmap_sg(struct ib_device *dev,
4138 struct scatterlist *sg, int nents,
4139 enum dma_data_direction direction)
4140{
4141 ib_dma_unmap_sg_attrs(dev, sg, nents, direction, 0);
4142}
4143
4144
4145
4146
4147
4148
4149
4150static inline unsigned int ib_dma_max_seg_size(struct ib_device *dev)
4151{
4152 if (ib_uses_virt_dma(dev))
4153 return UINT_MAX;
4154 return dma_get_max_seg_size(dev->dma_device);
4155}
4156
4157
4158
4159
4160
4161
4162
4163
4164static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev,
4165 u64 addr,
4166 size_t size,
4167 enum dma_data_direction dir)
4168{
4169 if (!ib_uses_virt_dma(dev))
4170 dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
4171}
4172
4173
4174
4175
4176
4177
4178
4179
4180static inline void ib_dma_sync_single_for_device(struct ib_device *dev,
4181 u64 addr,
4182 size_t size,
4183 enum dma_data_direction dir)
4184{
4185 if (!ib_uses_virt_dma(dev))
4186 dma_sync_single_for_device(dev->dma_device, addr, size, dir);
4187}
4188
4189
4190
4191
4192struct ib_mr *ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
4193 u64 virt_addr, int mr_access_flags);
4194
4195
4196int ib_advise_mr(struct ib_pd *pd, enum ib_uverbs_advise_mr_advice advice,
4197 u32 flags, struct ib_sge *sg_list, u32 num_sge);
4198
4199
4200
4201
4202
4203
4204
4205
4206int ib_dereg_mr_user(struct ib_mr *mr, struct ib_udata *udata);
4207
4208
4209
4210
4211
4212
4213
4214
4215
4216
4217static inline int ib_dereg_mr(struct ib_mr *mr)
4218{
4219 return ib_dereg_mr_user(mr, NULL);
4220}
4221
4222struct ib_mr *ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
4223 u32 max_num_sg);
4224
4225struct ib_mr *ib_alloc_mr_integrity(struct ib_pd *pd,
4226 u32 max_num_data_sg,
4227 u32 max_num_meta_sg);
4228
4229
4230
4231
4232
4233
4234
4235static inline void ib_update_fast_reg_key(struct ib_mr *mr, u8 newkey)
4236{
4237 mr->lkey = (mr->lkey & 0xffffff00) | newkey;
4238 mr->rkey = (mr->rkey & 0xffffff00) | newkey;
4239}
4240
4241
4242
4243
4244
4245
4246static inline u32 ib_inc_rkey(u32 rkey)
4247{
4248 const u32 mask = 0x000000ff;
4249 return ((rkey + 1) & mask) | (rkey & ~mask);
4250}
4251
4252
4253
4254
4255
4256
4257
4258
4259
4260
4261
4262
4263
4264int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
4265
4266
4267
4268
4269
4270
4271
4272int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
4273
4274struct ib_xrcd *ib_alloc_xrcd_user(struct ib_device *device,
4275 struct inode *inode, struct ib_udata *udata);
4276int ib_dealloc_xrcd_user(struct ib_xrcd *xrcd, struct ib_udata *udata);
4277
4278static inline int ib_check_mr_access(struct ib_device *ib_dev,
4279 unsigned int flags)
4280{
4281
4282
4283
4284
4285 if (flags & (IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_REMOTE_WRITE) &&
4286 !(flags & IB_ACCESS_LOCAL_WRITE))
4287 return -EINVAL;
4288
4289 if (flags & ~IB_ACCESS_SUPPORTED)
4290 return -EINVAL;
4291
4292 if (flags & IB_ACCESS_ON_DEMAND &&
4293 !(ib_dev->attrs.device_cap_flags & IB_DEVICE_ON_DEMAND_PAGING))
4294 return -EINVAL;
4295 return 0;
4296}
4297
4298static inline bool ib_access_writable(int access_flags)
4299{
4300
4301
4302
4303
4304
4305
4306
4307 return access_flags &
4308 (IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE |
4309 IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND);
4310}
4311
4312
4313
4314
4315
4316
4317
4318
4319
4320
4321
4322
4323
4324int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
4325 struct ib_mr_status *mr_status);
4326
4327
4328
4329
4330
4331
4332
4333
4334
4335
4336
4337
4338
4339
4340static inline bool ib_device_try_get(struct ib_device *dev)
4341{
4342 return refcount_inc_not_zero(&dev->refcount);
4343}
4344
4345void ib_device_put(struct ib_device *device);
4346struct ib_device *ib_device_get_by_netdev(struct net_device *ndev,
4347 enum rdma_driver_id driver_id);
4348struct ib_device *ib_device_get_by_name(const char *name,
4349 enum rdma_driver_id driver_id);
4350struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, u32 port,
4351 u16 pkey, const union ib_gid *gid,
4352 const struct sockaddr *addr);
4353int ib_device_set_netdev(struct ib_device *ib_dev, struct net_device *ndev,
4354 unsigned int port);
4355struct net_device *ib_device_netdev(struct ib_device *dev, u32 port);
4356
4357struct ib_wq *ib_create_wq(struct ib_pd *pd,
4358 struct ib_wq_init_attr *init_attr);
4359int ib_destroy_wq_user(struct ib_wq *wq, struct ib_udata *udata);
4360
4361int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
4362 unsigned int *sg_offset, unsigned int page_size);
4363int ib_map_mr_sg_pi(struct ib_mr *mr, struct scatterlist *data_sg,
4364 int data_sg_nents, unsigned int *data_sg_offset,
4365 struct scatterlist *meta_sg, int meta_sg_nents,
4366 unsigned int *meta_sg_offset, unsigned int page_size);
4367
4368static inline int
4369ib_map_mr_sg_zbva(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
4370 unsigned int *sg_offset, unsigned int page_size)
4371{
4372 int n;
4373
4374 n = ib_map_mr_sg(mr, sg, sg_nents, sg_offset, page_size);
4375 mr->iova = 0;
4376
4377 return n;
4378}
4379
4380int ib_sg_to_pages(struct ib_mr *mr, struct scatterlist *sgl, int sg_nents,
4381 unsigned int *sg_offset, int (*set_page)(struct ib_mr *, u64));
4382
4383void ib_drain_rq(struct ib_qp *qp);
4384void ib_drain_sq(struct ib_qp *qp);
4385void ib_drain_qp(struct ib_qp *qp);
4386
4387int ib_get_eth_speed(struct ib_device *dev, u32 port_num, u16 *speed,
4388 u8 *width);
4389
4390static inline u8 *rdma_ah_retrieve_dmac(struct rdma_ah_attr *attr)
4391{
4392 if (attr->type == RDMA_AH_ATTR_TYPE_ROCE)
4393 return attr->roce.dmac;
4394 return NULL;
4395}
4396
4397static inline void rdma_ah_set_dlid(struct rdma_ah_attr *attr, u32 dlid)
4398{
4399 if (attr->type == RDMA_AH_ATTR_TYPE_IB)
4400 attr->ib.dlid = (u16)dlid;
4401 else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4402 attr->opa.dlid = dlid;
4403}
4404
4405static inline u32 rdma_ah_get_dlid(const struct rdma_ah_attr *attr)
4406{
4407 if (attr->type == RDMA_AH_ATTR_TYPE_IB)
4408 return attr->ib.dlid;
4409 else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4410 return attr->opa.dlid;
4411 return 0;
4412}
4413
4414static inline void rdma_ah_set_sl(struct rdma_ah_attr *attr, u8 sl)
4415{
4416 attr->sl = sl;
4417}
4418
4419static inline u8 rdma_ah_get_sl(const struct rdma_ah_attr *attr)
4420{
4421 return attr->sl;
4422}
4423
4424static inline void rdma_ah_set_path_bits(struct rdma_ah_attr *attr,
4425 u8 src_path_bits)
4426{
4427 if (attr->type == RDMA_AH_ATTR_TYPE_IB)
4428 attr->ib.src_path_bits = src_path_bits;
4429 else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4430 attr->opa.src_path_bits = src_path_bits;
4431}
4432
4433static inline u8 rdma_ah_get_path_bits(const struct rdma_ah_attr *attr)
4434{
4435 if (attr->type == RDMA_AH_ATTR_TYPE_IB)
4436 return attr->ib.src_path_bits;
4437 else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4438 return attr->opa.src_path_bits;
4439 return 0;
4440}
4441
4442static inline void rdma_ah_set_make_grd(struct rdma_ah_attr *attr,
4443 bool make_grd)
4444{
4445 if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4446 attr->opa.make_grd = make_grd;
4447}
4448
4449static inline bool rdma_ah_get_make_grd(const struct rdma_ah_attr *attr)
4450{
4451 if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4452 return attr->opa.make_grd;
4453 return false;
4454}
4455
4456static inline void rdma_ah_set_port_num(struct rdma_ah_attr *attr, u32 port_num)
4457{
4458 attr->port_num = port_num;
4459}
4460
4461static inline u32 rdma_ah_get_port_num(const struct rdma_ah_attr *attr)
4462{
4463 return attr->port_num;
4464}
4465
4466static inline void rdma_ah_set_static_rate(struct rdma_ah_attr *attr,
4467 u8 static_rate)
4468{
4469 attr->static_rate = static_rate;
4470}
4471
4472static inline u8 rdma_ah_get_static_rate(const struct rdma_ah_attr *attr)
4473{
4474 return attr->static_rate;
4475}
4476
4477static inline void rdma_ah_set_ah_flags(struct rdma_ah_attr *attr,
4478 enum ib_ah_flags flag)
4479{
4480 attr->ah_flags = flag;
4481}
4482
4483static inline enum ib_ah_flags
4484 rdma_ah_get_ah_flags(const struct rdma_ah_attr *attr)
4485{
4486 return attr->ah_flags;
4487}
4488
4489static inline const struct ib_global_route
4490 *rdma_ah_read_grh(const struct rdma_ah_attr *attr)
4491{
4492 return &attr->grh;
4493}
4494
4495
4496static inline struct ib_global_route
4497 *rdma_ah_retrieve_grh(struct rdma_ah_attr *attr)
4498{
4499 return &attr->grh;
4500}
4501
4502static inline void rdma_ah_set_dgid_raw(struct rdma_ah_attr *attr, void *dgid)
4503{
4504 struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
4505
4506 memcpy(grh->dgid.raw, dgid, sizeof(grh->dgid));
4507}
4508
4509static inline void rdma_ah_set_subnet_prefix(struct rdma_ah_attr *attr,
4510 __be64 prefix)
4511{
4512 struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
4513
4514 grh->dgid.global.subnet_prefix = prefix;
4515}
4516
4517static inline void rdma_ah_set_interface_id(struct rdma_ah_attr *attr,
4518 __be64 if_id)
4519{
4520 struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
4521
4522 grh->dgid.global.interface_id = if_id;
4523}
4524
4525static inline void rdma_ah_set_grh(struct rdma_ah_attr *attr,
4526 union ib_gid *dgid, u32 flow_label,
4527 u8 sgid_index, u8 hop_limit,
4528 u8 traffic_class)
4529{
4530 struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
4531
4532 attr->ah_flags = IB_AH_GRH;
4533 if (dgid)
4534 grh->dgid = *dgid;
4535 grh->flow_label = flow_label;
4536 grh->sgid_index = sgid_index;
4537 grh->hop_limit = hop_limit;
4538 grh->traffic_class = traffic_class;
4539 grh->sgid_attr = NULL;
4540}
4541
4542void rdma_destroy_ah_attr(struct rdma_ah_attr *ah_attr);
4543void rdma_move_grh_sgid_attr(struct rdma_ah_attr *attr, union ib_gid *dgid,
4544 u32 flow_label, u8 hop_limit, u8 traffic_class,
4545 const struct ib_gid_attr *sgid_attr);
4546void rdma_copy_ah_attr(struct rdma_ah_attr *dest,
4547 const struct rdma_ah_attr *src);
4548void rdma_replace_ah_attr(struct rdma_ah_attr *old,
4549 const struct rdma_ah_attr *new);
4550void rdma_move_ah_attr(struct rdma_ah_attr *dest, struct rdma_ah_attr *src);
4551
4552
4553
4554
4555
4556
4557
4558static inline enum rdma_ah_attr_type rdma_ah_find_type(struct ib_device *dev,
4559 u32 port_num)
4560{
4561 if (rdma_protocol_roce(dev, port_num))
4562 return RDMA_AH_ATTR_TYPE_ROCE;
4563 if (rdma_protocol_ib(dev, port_num)) {
4564 if (rdma_cap_opa_ah(dev, port_num))
4565 return RDMA_AH_ATTR_TYPE_OPA;
4566 return RDMA_AH_ATTR_TYPE_IB;
4567 }
4568
4569 return RDMA_AH_ATTR_TYPE_UNDEFINED;
4570}
4571
4572
4573
4574
4575
4576
4577
4578
4579
4580
4581static inline u16 ib_lid_cpu16(u32 lid)
4582{
4583 WARN_ON_ONCE(lid & 0xFFFF0000);
4584 return (u16)lid;
4585}
4586
4587
4588
4589
4590
4591
4592static inline __be16 ib_lid_be16(u32 lid)
4593{
4594 WARN_ON_ONCE(lid & 0xFFFF0000);
4595 return cpu_to_be16((u16)lid);
4596}
4597
4598
4599
4600
4601
4602
4603
4604
4605
4606
4607
4608static inline const struct cpumask *
4609ib_get_vector_affinity(struct ib_device *device, int comp_vector)
4610{
4611 if (comp_vector < 0 || comp_vector >= device->num_comp_vectors ||
4612 !device->ops.get_vector_affinity)
4613 return NULL;
4614
4615 return device->ops.get_vector_affinity(device, comp_vector);
4616
4617}
4618
4619
4620
4621
4622
4623
4624
4625void rdma_roce_rescan_device(struct ib_device *ibdev);
4626
4627struct ib_ucontext *ib_uverbs_get_ucontext_file(struct ib_uverbs_file *ufile);
4628
4629int uverbs_destroy_def_handler(struct uverbs_attr_bundle *attrs);
4630
4631struct net_device *rdma_alloc_netdev(struct ib_device *device, u32 port_num,
4632 enum rdma_netdev_t type, const char *name,
4633 unsigned char name_assign_type,
4634 void (*setup)(struct net_device *));
4635
4636int rdma_init_netdev(struct ib_device *device, u32 port_num,
4637 enum rdma_netdev_t type, const char *name,
4638 unsigned char name_assign_type,
4639 void (*setup)(struct net_device *),
4640 struct net_device *netdev);
4641
4642
4643
4644
4645
4646
4647
4648
4649
4650static inline struct ib_device *rdma_device_to_ibdev(struct device *device)
4651{
4652 struct ib_core_device *coredev =
4653 container_of(device, struct ib_core_device, dev);
4654
4655 return coredev->owner;
4656}
4657
4658
4659
4660
4661
4662static inline int ibdev_to_node(struct ib_device *ibdev)
4663{
4664 struct device *parent = ibdev->dev.parent;
4665
4666 if (!parent)
4667 return NUMA_NO_NODE;
4668 return dev_to_node(parent);
4669}
4670
4671
4672
4673
4674
4675
4676
4677
4678
4679#define rdma_device_to_drv_device(dev, drv_dev_struct, ibdev_member) \
4680 container_of(rdma_device_to_ibdev(dev), drv_dev_struct, ibdev_member)
4681
4682bool rdma_dev_access_netns(const struct ib_device *device,
4683 const struct net *net);
4684
4685#define IB_ROCE_UDP_ENCAP_VALID_PORT_MIN (0xC000)
4686#define IB_ROCE_UDP_ENCAP_VALID_PORT_MAX (0xFFFF)
4687#define IB_GRH_FLOWLABEL_MASK (0x000FFFFF)
4688
4689
4690
4691
4692
4693
4694
4695
4696
4697static inline u16 rdma_flow_label_to_udp_sport(u32 fl)
4698{
4699 u32 fl_low = fl & 0x03fff, fl_high = fl & 0xFC000;
4700
4701 fl_low ^= fl_high >> 14;
4702 return (u16)(fl_low | IB_ROCE_UDP_ENCAP_VALID_PORT_MIN);
4703}
4704
4705
4706
4707
4708
4709
4710
4711
4712
4713
4714
4715
4716
4717
4718
4719
4720static inline u32 rdma_calc_flow_label(u32 lqpn, u32 rqpn)
4721{
4722 u64 v = (u64)lqpn * rqpn;
4723
4724 v ^= v >> 20;
4725 v ^= v >> 40;
4726
4727 return (u32)(v & IB_GRH_FLOWLABEL_MASK);
4728}
4729
4730const struct ib_port_immutable*
4731ib_port_immutable_read(struct ib_device *dev, unsigned int port);
4732#endif
4733