1
2
3
4
5
6
7
8
9
10
11
12#ifndef IB_VERBS_H
13#define IB_VERBS_H
14
15#include <linux/types.h>
16#include <linux/device.h>
17#include <linux/dma-mapping.h>
18#include <linux/kref.h>
19#include <linux/list.h>
20#include <linux/rwsem.h>
21#include <linux/workqueue.h>
22#include <linux/irq_poll.h>
23#include <uapi/linux/if_ether.h>
24#include <net/ipv6.h>
25#include <net/ip.h>
26#include <linux/string.h>
27#include <linux/slab.h>
28#include <linux/netdevice.h>
29#include <linux/refcount.h>
30#include <linux/if_link.h>
31#include <linux/atomic.h>
32#include <linux/mmu_notifier.h>
33#include <linux/uaccess.h>
34#include <linux/cgroup_rdma.h>
35#include <linux/irqflags.h>
36#include <linux/preempt.h>
37#include <linux/dim.h>
38#include <uapi/rdma/ib_user_verbs.h>
39#include <rdma/rdma_counter.h>
40#include <rdma/restrack.h>
41#include <rdma/signature.h>
42#include <uapi/rdma/rdma_user_ioctl.h>
43#include <uapi/rdma/ib_user_ioctl_verbs.h>
44
45#define IB_FW_VERSION_NAME_MAX ETHTOOL_FWVERS_LEN
46
47struct ib_umem_odp;
48struct ib_uqp_object;
49struct ib_usrq_object;
50struct ib_uwq_object;
51struct rdma_cm_id;
52
53extern struct workqueue_struct *ib_wq;
54extern struct workqueue_struct *ib_comp_wq;
55extern struct workqueue_struct *ib_comp_unbound_wq;
56
57struct ib_ucq_object;
58
59__printf(3, 4) __cold
60void ibdev_printk(const char *level, const struct ib_device *ibdev,
61 const char *format, ...);
62__printf(2, 3) __cold
63void ibdev_emerg(const struct ib_device *ibdev, const char *format, ...);
64__printf(2, 3) __cold
65void ibdev_alert(const struct ib_device *ibdev, const char *format, ...);
66__printf(2, 3) __cold
67void ibdev_crit(const struct ib_device *ibdev, const char *format, ...);
68__printf(2, 3) __cold
69void ibdev_err(const struct ib_device *ibdev, const char *format, ...);
70__printf(2, 3) __cold
71void ibdev_warn(const struct ib_device *ibdev, const char *format, ...);
72__printf(2, 3) __cold
73void ibdev_notice(const struct ib_device *ibdev, const char *format, ...);
74__printf(2, 3) __cold
75void ibdev_info(const struct ib_device *ibdev, const char *format, ...);
76
77#if defined(CONFIG_DYNAMIC_DEBUG) || \
78 (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
79#define ibdev_dbg(__dev, format, args...) \
80 dynamic_ibdev_dbg(__dev, format, ##args)
81#else
82__printf(2, 3) __cold
83static inline
84void ibdev_dbg(const struct ib_device *ibdev, const char *format, ...) {}
85#endif
86
87#define ibdev_level_ratelimited(ibdev_level, ibdev, fmt, ...) \
88do { \
89 static DEFINE_RATELIMIT_STATE(_rs, \
90 DEFAULT_RATELIMIT_INTERVAL, \
91 DEFAULT_RATELIMIT_BURST); \
92 if (__ratelimit(&_rs)) \
93 ibdev_level(ibdev, fmt, ##__VA_ARGS__); \
94} while (0)
95
96#define ibdev_emerg_ratelimited(ibdev, fmt, ...) \
97 ibdev_level_ratelimited(ibdev_emerg, ibdev, fmt, ##__VA_ARGS__)
98#define ibdev_alert_ratelimited(ibdev, fmt, ...) \
99 ibdev_level_ratelimited(ibdev_alert, ibdev, fmt, ##__VA_ARGS__)
100#define ibdev_crit_ratelimited(ibdev, fmt, ...) \
101 ibdev_level_ratelimited(ibdev_crit, ibdev, fmt, ##__VA_ARGS__)
102#define ibdev_err_ratelimited(ibdev, fmt, ...) \
103 ibdev_level_ratelimited(ibdev_err, ibdev, fmt, ##__VA_ARGS__)
104#define ibdev_warn_ratelimited(ibdev, fmt, ...) \
105 ibdev_level_ratelimited(ibdev_warn, ibdev, fmt, ##__VA_ARGS__)
106#define ibdev_notice_ratelimited(ibdev, fmt, ...) \
107 ibdev_level_ratelimited(ibdev_notice, ibdev, fmt, ##__VA_ARGS__)
108#define ibdev_info_ratelimited(ibdev, fmt, ...) \
109 ibdev_level_ratelimited(ibdev_info, ibdev, fmt, ##__VA_ARGS__)
110
111#if defined(CONFIG_DYNAMIC_DEBUG) || \
112 (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
113
114#define ibdev_dbg_ratelimited(ibdev, fmt, ...) \
115do { \
116 static DEFINE_RATELIMIT_STATE(_rs, \
117 DEFAULT_RATELIMIT_INTERVAL, \
118 DEFAULT_RATELIMIT_BURST); \
119 DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \
120 if (DYNAMIC_DEBUG_BRANCH(descriptor) && __ratelimit(&_rs)) \
121 __dynamic_ibdev_dbg(&descriptor, ibdev, fmt, \
122 ##__VA_ARGS__); \
123} while (0)
124#else
125__printf(2, 3) __cold
126static inline
127void ibdev_dbg_ratelimited(const struct ib_device *ibdev, const char *format, ...) {}
128#endif
129
130union ib_gid {
131 u8 raw[16];
132 struct {
133 __be64 subnet_prefix;
134 __be64 interface_id;
135 } global;
136};
137
138extern union ib_gid zgid;
139
140enum ib_gid_type {
141 IB_GID_TYPE_IB = IB_UVERBS_GID_TYPE_IB,
142 IB_GID_TYPE_ROCE = IB_UVERBS_GID_TYPE_ROCE_V1,
143 IB_GID_TYPE_ROCE_UDP_ENCAP = IB_UVERBS_GID_TYPE_ROCE_V2,
144 IB_GID_TYPE_SIZE
145};
146
147#define ROCE_V2_UDP_DPORT 4791
148struct ib_gid_attr {
149 struct net_device __rcu *ndev;
150 struct ib_device *device;
151 union ib_gid gid;
152 enum ib_gid_type gid_type;
153 u16 index;
154 u8 port_num;
155};
156
157enum {
158
159 IB_SA_WELL_KNOWN_GUID = BIT_ULL(57) | 2,
160};
161
162enum rdma_transport_type {
163 RDMA_TRANSPORT_IB,
164 RDMA_TRANSPORT_IWARP,
165 RDMA_TRANSPORT_USNIC,
166 RDMA_TRANSPORT_USNIC_UDP,
167 RDMA_TRANSPORT_UNSPECIFIED,
168};
169
170enum rdma_protocol_type {
171 RDMA_PROTOCOL_IB,
172 RDMA_PROTOCOL_IBOE,
173 RDMA_PROTOCOL_IWARP,
174 RDMA_PROTOCOL_USNIC_UDP
175};
176
177__attribute_const__ enum rdma_transport_type
178rdma_node_get_transport(unsigned int node_type);
179
180enum rdma_network_type {
181 RDMA_NETWORK_IB,
182 RDMA_NETWORK_ROCE_V1,
183 RDMA_NETWORK_IPV4,
184 RDMA_NETWORK_IPV6
185};
186
187static inline enum ib_gid_type ib_network_to_gid_type(enum rdma_network_type network_type)
188{
189 if (network_type == RDMA_NETWORK_IPV4 ||
190 network_type == RDMA_NETWORK_IPV6)
191 return IB_GID_TYPE_ROCE_UDP_ENCAP;
192 else if (network_type == RDMA_NETWORK_ROCE_V1)
193 return IB_GID_TYPE_ROCE;
194 else
195 return IB_GID_TYPE_IB;
196}
197
198static inline enum rdma_network_type
199rdma_gid_attr_network_type(const struct ib_gid_attr *attr)
200{
201 if (attr->gid_type == IB_GID_TYPE_IB)
202 return RDMA_NETWORK_IB;
203
204 if (attr->gid_type == IB_GID_TYPE_ROCE)
205 return RDMA_NETWORK_ROCE_V1;
206
207 if (ipv6_addr_v4mapped((struct in6_addr *)&attr->gid))
208 return RDMA_NETWORK_IPV4;
209 else
210 return RDMA_NETWORK_IPV6;
211}
212
213enum rdma_link_layer {
214 IB_LINK_LAYER_UNSPECIFIED,
215 IB_LINK_LAYER_INFINIBAND,
216 IB_LINK_LAYER_ETHERNET,
217};
218
219enum ib_device_cap_flags {
220 IB_DEVICE_RESIZE_MAX_WR = (1 << 0),
221 IB_DEVICE_BAD_PKEY_CNTR = (1 << 1),
222 IB_DEVICE_BAD_QKEY_CNTR = (1 << 2),
223 IB_DEVICE_RAW_MULTI = (1 << 3),
224 IB_DEVICE_AUTO_PATH_MIG = (1 << 4),
225 IB_DEVICE_CHANGE_PHY_PORT = (1 << 5),
226 IB_DEVICE_UD_AV_PORT_ENFORCE = (1 << 6),
227 IB_DEVICE_CURR_QP_STATE_MOD = (1 << 7),
228 IB_DEVICE_SHUTDOWN_PORT = (1 << 8),
229
230 IB_DEVICE_PORT_ACTIVE_EVENT = (1 << 10),
231 IB_DEVICE_SYS_IMAGE_GUID = (1 << 11),
232 IB_DEVICE_RC_RNR_NAK_GEN = (1 << 12),
233 IB_DEVICE_SRQ_RESIZE = (1 << 13),
234 IB_DEVICE_N_NOTIFY_CQ = (1 << 14),
235
236
237
238
239
240
241
242
243 IB_DEVICE_LOCAL_DMA_LKEY = (1 << 15),
244
245 IB_DEVICE_MEM_WINDOW = (1 << 17),
246
247
248
249
250
251
252
253 IB_DEVICE_UD_IP_CSUM = (1 << 18),
254 IB_DEVICE_UD_TSO = (1 << 19),
255 IB_DEVICE_XRC = (1 << 20),
256
257
258
259
260
261
262
263
264
265
266 IB_DEVICE_MEM_MGT_EXTENSIONS = (1 << 21),
267 IB_DEVICE_BLOCK_MULTICAST_LOOPBACK = (1 << 22),
268 IB_DEVICE_MEM_WINDOW_TYPE_2A = (1 << 23),
269 IB_DEVICE_MEM_WINDOW_TYPE_2B = (1 << 24),
270 IB_DEVICE_RC_IP_CSUM = (1 << 25),
271
272 IB_DEVICE_RAW_IP_CSUM = (1 << 26),
273
274
275
276
277
278
279 IB_DEVICE_CROSS_CHANNEL = (1 << 27),
280 IB_DEVICE_MANAGED_FLOW_STEERING = (1 << 29),
281 IB_DEVICE_INTEGRITY_HANDOVER = (1 << 30),
282 IB_DEVICE_ON_DEMAND_PAGING = (1ULL << 31),
283 IB_DEVICE_SG_GAPS_REG = (1ULL << 32),
284 IB_DEVICE_VIRTUAL_FUNCTION = (1ULL << 33),
285
286 IB_DEVICE_RAW_SCATTER_FCS = (1ULL << 34),
287 IB_DEVICE_RDMA_NETDEV_OPA = (1ULL << 35),
288
289 IB_DEVICE_PCI_WRITE_END_PADDING = (1ULL << 36),
290 IB_DEVICE_ALLOW_USER_UNREG = (1ULL << 37),
291};
292
293enum ib_atomic_cap {
294 IB_ATOMIC_NONE,
295 IB_ATOMIC_HCA,
296 IB_ATOMIC_GLOB
297};
298
299enum ib_odp_general_cap_bits {
300 IB_ODP_SUPPORT = 1 << 0,
301 IB_ODP_SUPPORT_IMPLICIT = 1 << 1,
302};
303
304enum ib_odp_transport_cap_bits {
305 IB_ODP_SUPPORT_SEND = 1 << 0,
306 IB_ODP_SUPPORT_RECV = 1 << 1,
307 IB_ODP_SUPPORT_WRITE = 1 << 2,
308 IB_ODP_SUPPORT_READ = 1 << 3,
309 IB_ODP_SUPPORT_ATOMIC = 1 << 4,
310 IB_ODP_SUPPORT_SRQ_RECV = 1 << 5,
311};
312
313struct ib_odp_caps {
314 uint64_t general_caps;
315 struct {
316 uint32_t rc_odp_caps;
317 uint32_t uc_odp_caps;
318 uint32_t ud_odp_caps;
319 uint32_t xrc_odp_caps;
320 } per_transport_caps;
321};
322
323struct ib_rss_caps {
324
325
326
327
328 u32 supported_qpts;
329 u32 max_rwq_indirection_tables;
330 u32 max_rwq_indirection_table_size;
331};
332
333enum ib_tm_cap_flags {
334
335 IB_TM_CAP_RNDV_RC = 1 << 0,
336};
337
338struct ib_tm_caps {
339
340 u32 max_rndv_hdr_size;
341
342 u32 max_num_tags;
343
344 u32 flags;
345
346 u32 max_ops;
347
348 u32 max_sge;
349};
350
351struct ib_cq_init_attr {
352 unsigned int cqe;
353 u32 comp_vector;
354 u32 flags;
355};
356
357enum ib_cq_attr_mask {
358 IB_CQ_MODERATE = 1 << 0,
359};
360
361struct ib_cq_caps {
362 u16 max_cq_moderation_count;
363 u16 max_cq_moderation_period;
364};
365
366struct ib_dm_mr_attr {
367 u64 length;
368 u64 offset;
369 u32 access_flags;
370};
371
372struct ib_dm_alloc_attr {
373 u64 length;
374 u32 alignment;
375 u32 flags;
376};
377
378struct ib_device_attr {
379 u64 fw_ver;
380 __be64 sys_image_guid;
381 u64 max_mr_size;
382 u64 page_size_cap;
383 u32 vendor_id;
384 u32 vendor_part_id;
385 u32 hw_ver;
386 int max_qp;
387 int max_qp_wr;
388 u64 device_cap_flags;
389 int max_send_sge;
390 int max_recv_sge;
391 int max_sge_rd;
392 int max_cq;
393 int max_cqe;
394 int max_mr;
395 int max_pd;
396 int max_qp_rd_atom;
397 int max_ee_rd_atom;
398 int max_res_rd_atom;
399 int max_qp_init_rd_atom;
400 int max_ee_init_rd_atom;
401 enum ib_atomic_cap atomic_cap;
402 enum ib_atomic_cap masked_atomic_cap;
403 int max_ee;
404 int max_rdd;
405 int max_mw;
406 int max_raw_ipv6_qp;
407 int max_raw_ethy_qp;
408 int max_mcast_grp;
409 int max_mcast_qp_attach;
410 int max_total_mcast_qp_attach;
411 int max_ah;
412 int max_srq;
413 int max_srq_wr;
414 int max_srq_sge;
415 unsigned int max_fast_reg_page_list_len;
416 unsigned int max_pi_fast_reg_page_list_len;
417 u16 max_pkeys;
418 u8 local_ca_ack_delay;
419 int sig_prot_cap;
420 int sig_guard_cap;
421 struct ib_odp_caps odp_caps;
422 uint64_t timestamp_mask;
423 uint64_t hca_core_clock;
424 struct ib_rss_caps rss_caps;
425 u32 max_wq_type_rq;
426 u32 raw_packet_caps;
427 struct ib_tm_caps tm_caps;
428 struct ib_cq_caps cq_caps;
429 u64 max_dm_size;
430
431 u32 max_sgl_rd;
432};
433
434enum ib_mtu {
435 IB_MTU_256 = 1,
436 IB_MTU_512 = 2,
437 IB_MTU_1024 = 3,
438 IB_MTU_2048 = 4,
439 IB_MTU_4096 = 5
440};
441
442enum opa_mtu {
443 OPA_MTU_8192 = 6,
444 OPA_MTU_10240 = 7
445};
446
447static inline int ib_mtu_enum_to_int(enum ib_mtu mtu)
448{
449 switch (mtu) {
450 case IB_MTU_256: return 256;
451 case IB_MTU_512: return 512;
452 case IB_MTU_1024: return 1024;
453 case IB_MTU_2048: return 2048;
454 case IB_MTU_4096: return 4096;
455 default: return -1;
456 }
457}
458
459static inline enum ib_mtu ib_mtu_int_to_enum(int mtu)
460{
461 if (mtu >= 4096)
462 return IB_MTU_4096;
463 else if (mtu >= 2048)
464 return IB_MTU_2048;
465 else if (mtu >= 1024)
466 return IB_MTU_1024;
467 else if (mtu >= 512)
468 return IB_MTU_512;
469 else
470 return IB_MTU_256;
471}
472
473static inline int opa_mtu_enum_to_int(enum opa_mtu mtu)
474{
475 switch (mtu) {
476 case OPA_MTU_8192:
477 return 8192;
478 case OPA_MTU_10240:
479 return 10240;
480 default:
481 return(ib_mtu_enum_to_int((enum ib_mtu)mtu));
482 }
483}
484
485static inline enum opa_mtu opa_mtu_int_to_enum(int mtu)
486{
487 if (mtu >= 10240)
488 return OPA_MTU_10240;
489 else if (mtu >= 8192)
490 return OPA_MTU_8192;
491 else
492 return ((enum opa_mtu)ib_mtu_int_to_enum(mtu));
493}
494
495enum ib_port_state {
496 IB_PORT_NOP = 0,
497 IB_PORT_DOWN = 1,
498 IB_PORT_INIT = 2,
499 IB_PORT_ARMED = 3,
500 IB_PORT_ACTIVE = 4,
501 IB_PORT_ACTIVE_DEFER = 5
502};
503
504enum ib_port_phys_state {
505 IB_PORT_PHYS_STATE_SLEEP = 1,
506 IB_PORT_PHYS_STATE_POLLING = 2,
507 IB_PORT_PHYS_STATE_DISABLED = 3,
508 IB_PORT_PHYS_STATE_PORT_CONFIGURATION_TRAINING = 4,
509 IB_PORT_PHYS_STATE_LINK_UP = 5,
510 IB_PORT_PHYS_STATE_LINK_ERROR_RECOVERY = 6,
511 IB_PORT_PHYS_STATE_PHY_TEST = 7,
512};
513
514enum ib_port_width {
515 IB_WIDTH_1X = 1,
516 IB_WIDTH_2X = 16,
517 IB_WIDTH_4X = 2,
518 IB_WIDTH_8X = 4,
519 IB_WIDTH_12X = 8
520};
521
522static inline int ib_width_enum_to_int(enum ib_port_width width)
523{
524 switch (width) {
525 case IB_WIDTH_1X: return 1;
526 case IB_WIDTH_2X: return 2;
527 case IB_WIDTH_4X: return 4;
528 case IB_WIDTH_8X: return 8;
529 case IB_WIDTH_12X: return 12;
530 default: return -1;
531 }
532}
533
534enum ib_port_speed {
535 IB_SPEED_SDR = 1,
536 IB_SPEED_DDR = 2,
537 IB_SPEED_QDR = 4,
538 IB_SPEED_FDR10 = 8,
539 IB_SPEED_FDR = 16,
540 IB_SPEED_EDR = 32,
541 IB_SPEED_HDR = 64,
542 IB_SPEED_NDR = 128,
543};
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564struct rdma_hw_stats {
565 struct mutex lock;
566 unsigned long timestamp;
567 unsigned long lifespan;
568 const char * const *names;
569 int num_counters;
570 u64 value[];
571};
572
573#define RDMA_HW_STATS_DEFAULT_LIFESPAN 10
574
575
576
577
578
579
580
581static inline struct rdma_hw_stats *rdma_alloc_hw_stats_struct(
582 const char * const *names, int num_counters,
583 unsigned long lifespan)
584{
585 struct rdma_hw_stats *stats;
586
587 stats = kzalloc(sizeof(*stats) + num_counters * sizeof(u64),
588 GFP_KERNEL);
589 if (!stats)
590 return NULL;
591 stats->names = names;
592 stats->num_counters = num_counters;
593 stats->lifespan = msecs_to_jiffies(lifespan);
594
595 return stats;
596}
597
598
599
600
601
602
603#define RDMA_CORE_CAP_IB_MAD 0x00000001
604#define RDMA_CORE_CAP_IB_SMI 0x00000002
605#define RDMA_CORE_CAP_IB_CM 0x00000004
606#define RDMA_CORE_CAP_IW_CM 0x00000008
607#define RDMA_CORE_CAP_IB_SA 0x00000010
608#define RDMA_CORE_CAP_OPA_MAD 0x00000020
609
610
611#define RDMA_CORE_CAP_AF_IB 0x00001000
612#define RDMA_CORE_CAP_ETH_AH 0x00002000
613#define RDMA_CORE_CAP_OPA_AH 0x00004000
614#define RDMA_CORE_CAP_IB_GRH_REQUIRED 0x00008000
615
616
617#define RDMA_CORE_CAP_PROT_IB 0x00100000
618#define RDMA_CORE_CAP_PROT_ROCE 0x00200000
619#define RDMA_CORE_CAP_PROT_IWARP 0x00400000
620#define RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP 0x00800000
621#define RDMA_CORE_CAP_PROT_RAW_PACKET 0x01000000
622#define RDMA_CORE_CAP_PROT_USNIC 0x02000000
623
624#define RDMA_CORE_PORT_IB_GRH_REQUIRED (RDMA_CORE_CAP_IB_GRH_REQUIRED \
625 | RDMA_CORE_CAP_PROT_ROCE \
626 | RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP)
627
628#define RDMA_CORE_PORT_IBA_IB (RDMA_CORE_CAP_PROT_IB \
629 | RDMA_CORE_CAP_IB_MAD \
630 | RDMA_CORE_CAP_IB_SMI \
631 | RDMA_CORE_CAP_IB_CM \
632 | RDMA_CORE_CAP_IB_SA \
633 | RDMA_CORE_CAP_AF_IB)
634#define RDMA_CORE_PORT_IBA_ROCE (RDMA_CORE_CAP_PROT_ROCE \
635 | RDMA_CORE_CAP_IB_MAD \
636 | RDMA_CORE_CAP_IB_CM \
637 | RDMA_CORE_CAP_AF_IB \
638 | RDMA_CORE_CAP_ETH_AH)
639#define RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP \
640 (RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP \
641 | RDMA_CORE_CAP_IB_MAD \
642 | RDMA_CORE_CAP_IB_CM \
643 | RDMA_CORE_CAP_AF_IB \
644 | RDMA_CORE_CAP_ETH_AH)
645#define RDMA_CORE_PORT_IWARP (RDMA_CORE_CAP_PROT_IWARP \
646 | RDMA_CORE_CAP_IW_CM)
647#define RDMA_CORE_PORT_INTEL_OPA (RDMA_CORE_PORT_IBA_IB \
648 | RDMA_CORE_CAP_OPA_MAD)
649
650#define RDMA_CORE_PORT_RAW_PACKET (RDMA_CORE_CAP_PROT_RAW_PACKET)
651
652#define RDMA_CORE_PORT_USNIC (RDMA_CORE_CAP_PROT_USNIC)
653
654struct ib_port_attr {
655 u64 subnet_prefix;
656 enum ib_port_state state;
657 enum ib_mtu max_mtu;
658 enum ib_mtu active_mtu;
659 u32 phys_mtu;
660 int gid_tbl_len;
661 unsigned int ip_gids:1;
662
663 u32 port_cap_flags;
664 u32 max_msg_sz;
665 u32 bad_pkey_cntr;
666 u32 qkey_viol_cntr;
667 u16 pkey_tbl_len;
668 u32 sm_lid;
669 u32 lid;
670 u8 lmc;
671 u8 max_vl_num;
672 u8 sm_sl;
673 u8 subnet_timeout;
674 u8 init_type_reply;
675 u8 active_width;
676 u16 active_speed;
677 u8 phys_state;
678 u16 port_cap_flags2;
679};
680
681enum ib_device_modify_flags {
682 IB_DEVICE_MODIFY_SYS_IMAGE_GUID = 1 << 0,
683 IB_DEVICE_MODIFY_NODE_DESC = 1 << 1
684};
685
686#define IB_DEVICE_NODE_DESC_MAX 64
687
688struct ib_device_modify {
689 u64 sys_image_guid;
690 char node_desc[IB_DEVICE_NODE_DESC_MAX];
691};
692
693enum ib_port_modify_flags {
694 IB_PORT_SHUTDOWN = 1,
695 IB_PORT_INIT_TYPE = (1<<2),
696 IB_PORT_RESET_QKEY_CNTR = (1<<3),
697 IB_PORT_OPA_MASK_CHG = (1<<4)
698};
699
700struct ib_port_modify {
701 u32 set_port_cap_mask;
702 u32 clr_port_cap_mask;
703 u8 init_type;
704};
705
706enum ib_event_type {
707 IB_EVENT_CQ_ERR,
708 IB_EVENT_QP_FATAL,
709 IB_EVENT_QP_REQ_ERR,
710 IB_EVENT_QP_ACCESS_ERR,
711 IB_EVENT_COMM_EST,
712 IB_EVENT_SQ_DRAINED,
713 IB_EVENT_PATH_MIG,
714 IB_EVENT_PATH_MIG_ERR,
715 IB_EVENT_DEVICE_FATAL,
716 IB_EVENT_PORT_ACTIVE,
717 IB_EVENT_PORT_ERR,
718 IB_EVENT_LID_CHANGE,
719 IB_EVENT_PKEY_CHANGE,
720 IB_EVENT_SM_CHANGE,
721 IB_EVENT_SRQ_ERR,
722 IB_EVENT_SRQ_LIMIT_REACHED,
723 IB_EVENT_QP_LAST_WQE_REACHED,
724 IB_EVENT_CLIENT_REREGISTER,
725 IB_EVENT_GID_CHANGE,
726 IB_EVENT_WQ_FATAL,
727};
728
729const char *__attribute_const__ ib_event_msg(enum ib_event_type event);
730
731struct ib_event {
732 struct ib_device *device;
733 union {
734 struct ib_cq *cq;
735 struct ib_qp *qp;
736 struct ib_srq *srq;
737 struct ib_wq *wq;
738 u8 port_num;
739 } element;
740 enum ib_event_type event;
741};
742
743struct ib_event_handler {
744 struct ib_device *device;
745 void (*handler)(struct ib_event_handler *, struct ib_event *);
746 struct list_head list;
747};
748
749#define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler) \
750 do { \
751 (_ptr)->device = _device; \
752 (_ptr)->handler = _handler; \
753 INIT_LIST_HEAD(&(_ptr)->list); \
754 } while (0)
755
756struct ib_global_route {
757 const struct ib_gid_attr *sgid_attr;
758 union ib_gid dgid;
759 u32 flow_label;
760 u8 sgid_index;
761 u8 hop_limit;
762 u8 traffic_class;
763};
764
765struct ib_grh {
766 __be32 version_tclass_flow;
767 __be16 paylen;
768 u8 next_hdr;
769 u8 hop_limit;
770 union ib_gid sgid;
771 union ib_gid dgid;
772};
773
774union rdma_network_hdr {
775 struct ib_grh ibgrh;
776 struct {
777
778
779
780 u8 reserved[20];
781 struct iphdr roce4grh;
782 };
783};
784
785#define IB_QPN_MASK 0xFFFFFF
786
787enum {
788 IB_MULTICAST_QPN = 0xffffff
789};
790
791#define IB_LID_PERMISSIVE cpu_to_be16(0xFFFF)
792#define IB_MULTICAST_LID_BASE cpu_to_be16(0xC000)
793
794enum ib_ah_flags {
795 IB_AH_GRH = 1
796};
797
798enum ib_rate {
799 IB_RATE_PORT_CURRENT = 0,
800 IB_RATE_2_5_GBPS = 2,
801 IB_RATE_5_GBPS = 5,
802 IB_RATE_10_GBPS = 3,
803 IB_RATE_20_GBPS = 6,
804 IB_RATE_30_GBPS = 4,
805 IB_RATE_40_GBPS = 7,
806 IB_RATE_60_GBPS = 8,
807 IB_RATE_80_GBPS = 9,
808 IB_RATE_120_GBPS = 10,
809 IB_RATE_14_GBPS = 11,
810 IB_RATE_56_GBPS = 12,
811 IB_RATE_112_GBPS = 13,
812 IB_RATE_168_GBPS = 14,
813 IB_RATE_25_GBPS = 15,
814 IB_RATE_100_GBPS = 16,
815 IB_RATE_200_GBPS = 17,
816 IB_RATE_300_GBPS = 18,
817 IB_RATE_28_GBPS = 19,
818 IB_RATE_50_GBPS = 20,
819 IB_RATE_400_GBPS = 21,
820 IB_RATE_600_GBPS = 22,
821};
822
823
824
825
826
827
828
829__attribute_const__ int ib_rate_to_mult(enum ib_rate rate);
830
831
832
833
834
835
836__attribute_const__ int ib_rate_to_mbps(enum ib_rate rate);
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856enum ib_mr_type {
857 IB_MR_TYPE_MEM_REG,
858 IB_MR_TYPE_SG_GAPS,
859 IB_MR_TYPE_DM,
860 IB_MR_TYPE_USER,
861 IB_MR_TYPE_DMA,
862 IB_MR_TYPE_INTEGRITY,
863};
864
865enum ib_mr_status_check {
866 IB_MR_CHECK_SIG_STATUS = 1,
867};
868
869
870
871
872
873
874
875
876
877struct ib_mr_status {
878 u32 fail_status;
879 struct ib_sig_err sig_err;
880};
881
882
883
884
885
886
887__attribute_const__ enum ib_rate mult_to_ib_rate(int mult);
888
889struct rdma_ah_init_attr {
890 struct rdma_ah_attr *ah_attr;
891 u32 flags;
892 struct net_device *xmit_slave;
893};
894
895enum rdma_ah_attr_type {
896 RDMA_AH_ATTR_TYPE_UNDEFINED,
897 RDMA_AH_ATTR_TYPE_IB,
898 RDMA_AH_ATTR_TYPE_ROCE,
899 RDMA_AH_ATTR_TYPE_OPA,
900};
901
902struct ib_ah_attr {
903 u16 dlid;
904 u8 src_path_bits;
905};
906
907struct roce_ah_attr {
908 u8 dmac[ETH_ALEN];
909};
910
911struct opa_ah_attr {
912 u32 dlid;
913 u8 src_path_bits;
914 bool make_grd;
915};
916
917struct rdma_ah_attr {
918 struct ib_global_route grh;
919 u8 sl;
920 u8 static_rate;
921 u8 port_num;
922 u8 ah_flags;
923 enum rdma_ah_attr_type type;
924 union {
925 struct ib_ah_attr ib;
926 struct roce_ah_attr roce;
927 struct opa_ah_attr opa;
928 };
929};
930
931enum ib_wc_status {
932 IB_WC_SUCCESS,
933 IB_WC_LOC_LEN_ERR,
934 IB_WC_LOC_QP_OP_ERR,
935 IB_WC_LOC_EEC_OP_ERR,
936 IB_WC_LOC_PROT_ERR,
937 IB_WC_WR_FLUSH_ERR,
938 IB_WC_MW_BIND_ERR,
939 IB_WC_BAD_RESP_ERR,
940 IB_WC_LOC_ACCESS_ERR,
941 IB_WC_REM_INV_REQ_ERR,
942 IB_WC_REM_ACCESS_ERR,
943 IB_WC_REM_OP_ERR,
944 IB_WC_RETRY_EXC_ERR,
945 IB_WC_RNR_RETRY_EXC_ERR,
946 IB_WC_LOC_RDD_VIOL_ERR,
947 IB_WC_REM_INV_RD_REQ_ERR,
948 IB_WC_REM_ABORT_ERR,
949 IB_WC_INV_EECN_ERR,
950 IB_WC_INV_EEC_STATE_ERR,
951 IB_WC_FATAL_ERR,
952 IB_WC_RESP_TIMEOUT_ERR,
953 IB_WC_GENERAL_ERR
954};
955
956const char *__attribute_const__ ib_wc_status_msg(enum ib_wc_status status);
957
958enum ib_wc_opcode {
959 IB_WC_SEND = IB_UVERBS_WC_SEND,
960 IB_WC_RDMA_WRITE = IB_UVERBS_WC_RDMA_WRITE,
961 IB_WC_RDMA_READ = IB_UVERBS_WC_RDMA_READ,
962 IB_WC_COMP_SWAP = IB_UVERBS_WC_COMP_SWAP,
963 IB_WC_FETCH_ADD = IB_UVERBS_WC_FETCH_ADD,
964 IB_WC_BIND_MW = IB_UVERBS_WC_BIND_MW,
965 IB_WC_LOCAL_INV = IB_UVERBS_WC_LOCAL_INV,
966 IB_WC_LSO = IB_UVERBS_WC_TSO,
967 IB_WC_REG_MR,
968 IB_WC_MASKED_COMP_SWAP,
969 IB_WC_MASKED_FETCH_ADD,
970
971
972
973
974 IB_WC_RECV = 1 << 7,
975 IB_WC_RECV_RDMA_WITH_IMM
976};
977
978enum ib_wc_flags {
979 IB_WC_GRH = 1,
980 IB_WC_WITH_IMM = (1<<1),
981 IB_WC_WITH_INVALIDATE = (1<<2),
982 IB_WC_IP_CSUM_OK = (1<<3),
983 IB_WC_WITH_SMAC = (1<<4),
984 IB_WC_WITH_VLAN = (1<<5),
985 IB_WC_WITH_NETWORK_HDR_TYPE = (1<<6),
986};
987
988struct ib_wc {
989 union {
990 u64 wr_id;
991 struct ib_cqe *wr_cqe;
992 };
993 enum ib_wc_status status;
994 enum ib_wc_opcode opcode;
995 u32 vendor_err;
996 u32 byte_len;
997 struct ib_qp *qp;
998 union {
999 __be32 imm_data;
1000 u32 invalidate_rkey;
1001 } ex;
1002 u32 src_qp;
1003 u32 slid;
1004 int wc_flags;
1005 u16 pkey_index;
1006 u8 sl;
1007 u8 dlid_path_bits;
1008 u8 port_num;
1009 u8 smac[ETH_ALEN];
1010 u16 vlan_id;
1011 u8 network_hdr_type;
1012};
1013
1014enum ib_cq_notify_flags {
1015 IB_CQ_SOLICITED = 1 << 0,
1016 IB_CQ_NEXT_COMP = 1 << 1,
1017 IB_CQ_SOLICITED_MASK = IB_CQ_SOLICITED | IB_CQ_NEXT_COMP,
1018 IB_CQ_REPORT_MISSED_EVENTS = 1 << 2,
1019};
1020
1021enum ib_srq_type {
1022 IB_SRQT_BASIC = IB_UVERBS_SRQT_BASIC,
1023 IB_SRQT_XRC = IB_UVERBS_SRQT_XRC,
1024 IB_SRQT_TM = IB_UVERBS_SRQT_TM,
1025};
1026
1027static inline bool ib_srq_has_cq(enum ib_srq_type srq_type)
1028{
1029 return srq_type == IB_SRQT_XRC ||
1030 srq_type == IB_SRQT_TM;
1031}
1032
1033enum ib_srq_attr_mask {
1034 IB_SRQ_MAX_WR = 1 << 0,
1035 IB_SRQ_LIMIT = 1 << 1,
1036};
1037
1038struct ib_srq_attr {
1039 u32 max_wr;
1040 u32 max_sge;
1041 u32 srq_limit;
1042};
1043
1044struct ib_srq_init_attr {
1045 void (*event_handler)(struct ib_event *, void *);
1046 void *srq_context;
1047 struct ib_srq_attr attr;
1048 enum ib_srq_type srq_type;
1049
1050 struct {
1051 struct ib_cq *cq;
1052 union {
1053 struct {
1054 struct ib_xrcd *xrcd;
1055 } xrc;
1056
1057 struct {
1058 u32 max_num_tags;
1059 } tag_matching;
1060 };
1061 } ext;
1062};
1063
1064struct ib_qp_cap {
1065 u32 max_send_wr;
1066 u32 max_recv_wr;
1067 u32 max_send_sge;
1068 u32 max_recv_sge;
1069 u32 max_inline_data;
1070
1071
1072
1073
1074
1075
1076 u32 max_rdma_ctxs;
1077};
1078
1079enum ib_sig_type {
1080 IB_SIGNAL_ALL_WR,
1081 IB_SIGNAL_REQ_WR
1082};
1083
1084enum ib_qp_type {
1085
1086
1087
1088
1089
1090 IB_QPT_SMI,
1091 IB_QPT_GSI,
1092
1093 IB_QPT_RC = IB_UVERBS_QPT_RC,
1094 IB_QPT_UC = IB_UVERBS_QPT_UC,
1095 IB_QPT_UD = IB_UVERBS_QPT_UD,
1096 IB_QPT_RAW_IPV6,
1097 IB_QPT_RAW_ETHERTYPE,
1098 IB_QPT_RAW_PACKET = IB_UVERBS_QPT_RAW_PACKET,
1099 IB_QPT_XRC_INI = IB_UVERBS_QPT_XRC_INI,
1100 IB_QPT_XRC_TGT = IB_UVERBS_QPT_XRC_TGT,
1101 IB_QPT_MAX,
1102 IB_QPT_DRIVER = IB_UVERBS_QPT_DRIVER,
1103
1104
1105
1106
1107 IB_QPT_RESERVED1 = 0x1000,
1108 IB_QPT_RESERVED2,
1109 IB_QPT_RESERVED3,
1110 IB_QPT_RESERVED4,
1111 IB_QPT_RESERVED5,
1112 IB_QPT_RESERVED6,
1113 IB_QPT_RESERVED7,
1114 IB_QPT_RESERVED8,
1115 IB_QPT_RESERVED9,
1116 IB_QPT_RESERVED10,
1117};
1118
1119enum ib_qp_create_flags {
1120 IB_QP_CREATE_IPOIB_UD_LSO = 1 << 0,
1121 IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK =
1122 IB_UVERBS_QP_CREATE_BLOCK_MULTICAST_LOOPBACK,
1123 IB_QP_CREATE_CROSS_CHANNEL = 1 << 2,
1124 IB_QP_CREATE_MANAGED_SEND = 1 << 3,
1125 IB_QP_CREATE_MANAGED_RECV = 1 << 4,
1126 IB_QP_CREATE_NETIF_QP = 1 << 5,
1127 IB_QP_CREATE_INTEGRITY_EN = 1 << 6,
1128 IB_QP_CREATE_NETDEV_USE = 1 << 7,
1129 IB_QP_CREATE_SCATTER_FCS =
1130 IB_UVERBS_QP_CREATE_SCATTER_FCS,
1131 IB_QP_CREATE_CVLAN_STRIPPING =
1132 IB_UVERBS_QP_CREATE_CVLAN_STRIPPING,
1133 IB_QP_CREATE_SOURCE_QPN = 1 << 10,
1134 IB_QP_CREATE_PCI_WRITE_END_PADDING =
1135 IB_UVERBS_QP_CREATE_PCI_WRITE_END_PADDING,
1136
1137 IB_QP_CREATE_RESERVED_START = 1 << 26,
1138 IB_QP_CREATE_RESERVED_END = 1 << 31,
1139};
1140
1141
1142
1143
1144
1145
1146struct ib_qp_init_attr {
1147
1148 void (*event_handler)(struct ib_event *, void *);
1149
1150 void *qp_context;
1151 struct ib_cq *send_cq;
1152 struct ib_cq *recv_cq;
1153 struct ib_srq *srq;
1154 struct ib_xrcd *xrcd;
1155 struct ib_qp_cap cap;
1156 enum ib_sig_type sq_sig_type;
1157 enum ib_qp_type qp_type;
1158 u32 create_flags;
1159
1160
1161
1162
1163 u8 port_num;
1164 struct ib_rwq_ind_table *rwq_ind_tbl;
1165 u32 source_qpn;
1166};
1167
1168struct ib_qp_open_attr {
1169 void (*event_handler)(struct ib_event *, void *);
1170 void *qp_context;
1171 u32 qp_num;
1172 enum ib_qp_type qp_type;
1173};
1174
1175enum ib_rnr_timeout {
1176 IB_RNR_TIMER_655_36 = 0,
1177 IB_RNR_TIMER_000_01 = 1,
1178 IB_RNR_TIMER_000_02 = 2,
1179 IB_RNR_TIMER_000_03 = 3,
1180 IB_RNR_TIMER_000_04 = 4,
1181 IB_RNR_TIMER_000_06 = 5,
1182 IB_RNR_TIMER_000_08 = 6,
1183 IB_RNR_TIMER_000_12 = 7,
1184 IB_RNR_TIMER_000_16 = 8,
1185 IB_RNR_TIMER_000_24 = 9,
1186 IB_RNR_TIMER_000_32 = 10,
1187 IB_RNR_TIMER_000_48 = 11,
1188 IB_RNR_TIMER_000_64 = 12,
1189 IB_RNR_TIMER_000_96 = 13,
1190 IB_RNR_TIMER_001_28 = 14,
1191 IB_RNR_TIMER_001_92 = 15,
1192 IB_RNR_TIMER_002_56 = 16,
1193 IB_RNR_TIMER_003_84 = 17,
1194 IB_RNR_TIMER_005_12 = 18,
1195 IB_RNR_TIMER_007_68 = 19,
1196 IB_RNR_TIMER_010_24 = 20,
1197 IB_RNR_TIMER_015_36 = 21,
1198 IB_RNR_TIMER_020_48 = 22,
1199 IB_RNR_TIMER_030_72 = 23,
1200 IB_RNR_TIMER_040_96 = 24,
1201 IB_RNR_TIMER_061_44 = 25,
1202 IB_RNR_TIMER_081_92 = 26,
1203 IB_RNR_TIMER_122_88 = 27,
1204 IB_RNR_TIMER_163_84 = 28,
1205 IB_RNR_TIMER_245_76 = 29,
1206 IB_RNR_TIMER_327_68 = 30,
1207 IB_RNR_TIMER_491_52 = 31
1208};
1209
1210enum ib_qp_attr_mask {
1211 IB_QP_STATE = 1,
1212 IB_QP_CUR_STATE = (1<<1),
1213 IB_QP_EN_SQD_ASYNC_NOTIFY = (1<<2),
1214 IB_QP_ACCESS_FLAGS = (1<<3),
1215 IB_QP_PKEY_INDEX = (1<<4),
1216 IB_QP_PORT = (1<<5),
1217 IB_QP_QKEY = (1<<6),
1218 IB_QP_AV = (1<<7),
1219 IB_QP_PATH_MTU = (1<<8),
1220 IB_QP_TIMEOUT = (1<<9),
1221 IB_QP_RETRY_CNT = (1<<10),
1222 IB_QP_RNR_RETRY = (1<<11),
1223 IB_QP_RQ_PSN = (1<<12),
1224 IB_QP_MAX_QP_RD_ATOMIC = (1<<13),
1225 IB_QP_ALT_PATH = (1<<14),
1226 IB_QP_MIN_RNR_TIMER = (1<<15),
1227 IB_QP_SQ_PSN = (1<<16),
1228 IB_QP_MAX_DEST_RD_ATOMIC = (1<<17),
1229 IB_QP_PATH_MIG_STATE = (1<<18),
1230 IB_QP_CAP = (1<<19),
1231 IB_QP_DEST_QPN = (1<<20),
1232 IB_QP_RESERVED1 = (1<<21),
1233 IB_QP_RESERVED2 = (1<<22),
1234 IB_QP_RESERVED3 = (1<<23),
1235 IB_QP_RESERVED4 = (1<<24),
1236 IB_QP_RATE_LIMIT = (1<<25),
1237};
1238
1239enum ib_qp_state {
1240 IB_QPS_RESET,
1241 IB_QPS_INIT,
1242 IB_QPS_RTR,
1243 IB_QPS_RTS,
1244 IB_QPS_SQD,
1245 IB_QPS_SQE,
1246 IB_QPS_ERR
1247};
1248
1249enum ib_mig_state {
1250 IB_MIG_MIGRATED,
1251 IB_MIG_REARM,
1252 IB_MIG_ARMED
1253};
1254
1255enum ib_mw_type {
1256 IB_MW_TYPE_1 = 1,
1257 IB_MW_TYPE_2 = 2
1258};
1259
1260struct ib_qp_attr {
1261 enum ib_qp_state qp_state;
1262 enum ib_qp_state cur_qp_state;
1263 enum ib_mtu path_mtu;
1264 enum ib_mig_state path_mig_state;
1265 u32 qkey;
1266 u32 rq_psn;
1267 u32 sq_psn;
1268 u32 dest_qp_num;
1269 int qp_access_flags;
1270 struct ib_qp_cap cap;
1271 struct rdma_ah_attr ah_attr;
1272 struct rdma_ah_attr alt_ah_attr;
1273 u16 pkey_index;
1274 u16 alt_pkey_index;
1275 u8 en_sqd_async_notify;
1276 u8 sq_draining;
1277 u8 max_rd_atomic;
1278 u8 max_dest_rd_atomic;
1279 u8 min_rnr_timer;
1280 u8 port_num;
1281 u8 timeout;
1282 u8 retry_cnt;
1283 u8 rnr_retry;
1284 u8 alt_port_num;
1285 u8 alt_timeout;
1286 u32 rate_limit;
1287 struct net_device *xmit_slave;
1288};
1289
1290enum ib_wr_opcode {
1291
1292 IB_WR_RDMA_WRITE = IB_UVERBS_WR_RDMA_WRITE,
1293 IB_WR_RDMA_WRITE_WITH_IMM = IB_UVERBS_WR_RDMA_WRITE_WITH_IMM,
1294 IB_WR_SEND = IB_UVERBS_WR_SEND,
1295 IB_WR_SEND_WITH_IMM = IB_UVERBS_WR_SEND_WITH_IMM,
1296 IB_WR_RDMA_READ = IB_UVERBS_WR_RDMA_READ,
1297 IB_WR_ATOMIC_CMP_AND_SWP = IB_UVERBS_WR_ATOMIC_CMP_AND_SWP,
1298 IB_WR_ATOMIC_FETCH_AND_ADD = IB_UVERBS_WR_ATOMIC_FETCH_AND_ADD,
1299 IB_WR_BIND_MW = IB_UVERBS_WR_BIND_MW,
1300 IB_WR_LSO = IB_UVERBS_WR_TSO,
1301 IB_WR_SEND_WITH_INV = IB_UVERBS_WR_SEND_WITH_INV,
1302 IB_WR_RDMA_READ_WITH_INV = IB_UVERBS_WR_RDMA_READ_WITH_INV,
1303 IB_WR_LOCAL_INV = IB_UVERBS_WR_LOCAL_INV,
1304 IB_WR_MASKED_ATOMIC_CMP_AND_SWP =
1305 IB_UVERBS_WR_MASKED_ATOMIC_CMP_AND_SWP,
1306 IB_WR_MASKED_ATOMIC_FETCH_AND_ADD =
1307 IB_UVERBS_WR_MASKED_ATOMIC_FETCH_AND_ADD,
1308
1309
1310 IB_WR_REG_MR = 0x20,
1311 IB_WR_REG_MR_INTEGRITY,
1312
1313
1314
1315
1316 IB_WR_RESERVED1 = 0xf0,
1317 IB_WR_RESERVED2,
1318 IB_WR_RESERVED3,
1319 IB_WR_RESERVED4,
1320 IB_WR_RESERVED5,
1321 IB_WR_RESERVED6,
1322 IB_WR_RESERVED7,
1323 IB_WR_RESERVED8,
1324 IB_WR_RESERVED9,
1325 IB_WR_RESERVED10,
1326};
1327
1328enum ib_send_flags {
1329 IB_SEND_FENCE = 1,
1330 IB_SEND_SIGNALED = (1<<1),
1331 IB_SEND_SOLICITED = (1<<2),
1332 IB_SEND_INLINE = (1<<3),
1333 IB_SEND_IP_CSUM = (1<<4),
1334
1335
1336 IB_SEND_RESERVED_START = (1 << 26),
1337 IB_SEND_RESERVED_END = (1 << 31),
1338};
1339
1340struct ib_sge {
1341 u64 addr;
1342 u32 length;
1343 u32 lkey;
1344};
1345
1346struct ib_cqe {
1347 void (*done)(struct ib_cq *cq, struct ib_wc *wc);
1348};
1349
1350struct ib_send_wr {
1351 struct ib_send_wr *next;
1352 union {
1353 u64 wr_id;
1354 struct ib_cqe *wr_cqe;
1355 };
1356 struct ib_sge *sg_list;
1357 int num_sge;
1358 enum ib_wr_opcode opcode;
1359 int send_flags;
1360 union {
1361 __be32 imm_data;
1362 u32 invalidate_rkey;
1363 } ex;
1364};
1365
1366struct ib_rdma_wr {
1367 struct ib_send_wr wr;
1368 u64 remote_addr;
1369 u32 rkey;
1370};
1371
1372static inline const struct ib_rdma_wr *rdma_wr(const struct ib_send_wr *wr)
1373{
1374 return container_of(wr, struct ib_rdma_wr, wr);
1375}
1376
1377struct ib_atomic_wr {
1378 struct ib_send_wr wr;
1379 u64 remote_addr;
1380 u64 compare_add;
1381 u64 swap;
1382 u64 compare_add_mask;
1383 u64 swap_mask;
1384 u32 rkey;
1385};
1386
1387static inline const struct ib_atomic_wr *atomic_wr(const struct ib_send_wr *wr)
1388{
1389 return container_of(wr, struct ib_atomic_wr, wr);
1390}
1391
1392struct ib_ud_wr {
1393 struct ib_send_wr wr;
1394 struct ib_ah *ah;
1395 void *header;
1396 int hlen;
1397 int mss;
1398 u32 remote_qpn;
1399 u32 remote_qkey;
1400 u16 pkey_index;
1401 u8 port_num;
1402};
1403
1404static inline const struct ib_ud_wr *ud_wr(const struct ib_send_wr *wr)
1405{
1406 return container_of(wr, struct ib_ud_wr, wr);
1407}
1408
1409struct ib_reg_wr {
1410 struct ib_send_wr wr;
1411 struct ib_mr *mr;
1412 u32 key;
1413 int access;
1414};
1415
1416static inline const struct ib_reg_wr *reg_wr(const struct ib_send_wr *wr)
1417{
1418 return container_of(wr, struct ib_reg_wr, wr);
1419}
1420
1421struct ib_recv_wr {
1422 struct ib_recv_wr *next;
1423 union {
1424 u64 wr_id;
1425 struct ib_cqe *wr_cqe;
1426 };
1427 struct ib_sge *sg_list;
1428 int num_sge;
1429};
1430
1431enum ib_access_flags {
1432 IB_ACCESS_LOCAL_WRITE = IB_UVERBS_ACCESS_LOCAL_WRITE,
1433 IB_ACCESS_REMOTE_WRITE = IB_UVERBS_ACCESS_REMOTE_WRITE,
1434 IB_ACCESS_REMOTE_READ = IB_UVERBS_ACCESS_REMOTE_READ,
1435 IB_ACCESS_REMOTE_ATOMIC = IB_UVERBS_ACCESS_REMOTE_ATOMIC,
1436 IB_ACCESS_MW_BIND = IB_UVERBS_ACCESS_MW_BIND,
1437 IB_ZERO_BASED = IB_UVERBS_ACCESS_ZERO_BASED,
1438 IB_ACCESS_ON_DEMAND = IB_UVERBS_ACCESS_ON_DEMAND,
1439 IB_ACCESS_HUGETLB = IB_UVERBS_ACCESS_HUGETLB,
1440 IB_ACCESS_RELAXED_ORDERING = IB_UVERBS_ACCESS_RELAXED_ORDERING,
1441
1442 IB_ACCESS_OPTIONAL = IB_UVERBS_ACCESS_OPTIONAL_RANGE,
1443 IB_ACCESS_SUPPORTED =
1444 ((IB_ACCESS_HUGETLB << 1) - 1) | IB_ACCESS_OPTIONAL,
1445};
1446
1447
1448
1449
1450
1451enum ib_mr_rereg_flags {
1452 IB_MR_REREG_TRANS = 1,
1453 IB_MR_REREG_PD = (1<<1),
1454 IB_MR_REREG_ACCESS = (1<<2),
1455 IB_MR_REREG_SUPPORTED = ((IB_MR_REREG_ACCESS << 1) - 1)
1456};
1457
1458struct ib_umem;
1459
1460enum rdma_remove_reason {
1461
1462
1463
1464
1465 RDMA_REMOVE_DESTROY,
1466
1467 RDMA_REMOVE_CLOSE,
1468
1469 RDMA_REMOVE_DRIVER_REMOVE,
1470
1471 RDMA_REMOVE_ABORT,
1472};
1473
1474struct ib_rdmacg_object {
1475#ifdef CONFIG_CGROUP_RDMA
1476 struct rdma_cgroup *cg;
1477#endif
1478};
1479
1480struct ib_ucontext {
1481 struct ib_device *device;
1482 struct ib_uverbs_file *ufile;
1483
1484 bool cleanup_retryable;
1485
1486 struct ib_rdmacg_object cg_obj;
1487
1488
1489
1490 struct rdma_restrack_entry res;
1491 struct xarray mmap_xa;
1492};
1493
1494struct ib_uobject {
1495 u64 user_handle;
1496
1497 struct ib_uverbs_file *ufile;
1498
1499 struct ib_ucontext *context;
1500 void *object;
1501 struct list_head list;
1502 struct ib_rdmacg_object cg_obj;
1503 int id;
1504 struct kref ref;
1505 atomic_t usecnt;
1506 struct rcu_head rcu;
1507
1508 const struct uverbs_api_object *uapi_object;
1509};
1510
1511struct ib_udata {
1512 const void __user *inbuf;
1513 void __user *outbuf;
1514 size_t inlen;
1515 size_t outlen;
1516};
1517
1518struct ib_pd {
1519 u32 local_dma_lkey;
1520 u32 flags;
1521 struct ib_device *device;
1522 struct ib_uobject *uobject;
1523 atomic_t usecnt;
1524
1525 u32 unsafe_global_rkey;
1526
1527
1528
1529
1530 struct ib_mr *__internal_mr;
1531 struct rdma_restrack_entry res;
1532};
1533
1534struct ib_xrcd {
1535 struct ib_device *device;
1536 atomic_t usecnt;
1537 struct inode *inode;
1538 struct rw_semaphore tgt_qps_rwsem;
1539 struct xarray tgt_qps;
1540};
1541
1542struct ib_ah {
1543 struct ib_device *device;
1544 struct ib_pd *pd;
1545 struct ib_uobject *uobject;
1546 const struct ib_gid_attr *sgid_attr;
1547 enum rdma_ah_attr_type type;
1548};
1549
1550typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context);
1551
1552enum ib_poll_context {
1553 IB_POLL_SOFTIRQ,
1554 IB_POLL_WORKQUEUE,
1555 IB_POLL_UNBOUND_WORKQUEUE,
1556 IB_POLL_LAST_POOL_TYPE = IB_POLL_UNBOUND_WORKQUEUE,
1557
1558 IB_POLL_DIRECT,
1559};
1560
1561struct ib_cq {
1562 struct ib_device *device;
1563 struct ib_ucq_object *uobject;
1564 ib_comp_handler comp_handler;
1565 void (*event_handler)(struct ib_event *, void *);
1566 void *cq_context;
1567 int cqe;
1568 unsigned int cqe_used;
1569 atomic_t usecnt;
1570 enum ib_poll_context poll_ctx;
1571 struct ib_wc *wc;
1572 struct list_head pool_entry;
1573 union {
1574 struct irq_poll iop;
1575 struct work_struct work;
1576 };
1577 struct workqueue_struct *comp_wq;
1578 struct dim *dim;
1579
1580
1581 ktime_t timestamp;
1582 u8 interrupt:1;
1583 u8 shared:1;
1584 unsigned int comp_vector;
1585
1586
1587
1588
1589 struct rdma_restrack_entry res;
1590};
1591
1592struct ib_srq {
1593 struct ib_device *device;
1594 struct ib_pd *pd;
1595 struct ib_usrq_object *uobject;
1596 void (*event_handler)(struct ib_event *, void *);
1597 void *srq_context;
1598 enum ib_srq_type srq_type;
1599 atomic_t usecnt;
1600
1601 struct {
1602 struct ib_cq *cq;
1603 union {
1604 struct {
1605 struct ib_xrcd *xrcd;
1606 u32 srq_num;
1607 } xrc;
1608 };
1609 } ext;
1610};
1611
1612enum ib_raw_packet_caps {
1613
1614
1615
1616 IB_RAW_PACKET_CAP_CVLAN_STRIPPING = (1 << 0),
1617
1618
1619 IB_RAW_PACKET_CAP_SCATTER_FCS = (1 << 1),
1620
1621 IB_RAW_PACKET_CAP_IP_CSUM = (1 << 2),
1622
1623
1624
1625 IB_RAW_PACKET_CAP_DELAY_DROP = (1 << 3),
1626};
1627
1628enum ib_wq_type {
1629 IB_WQT_RQ = IB_UVERBS_WQT_RQ,
1630};
1631
1632enum ib_wq_state {
1633 IB_WQS_RESET,
1634 IB_WQS_RDY,
1635 IB_WQS_ERR
1636};
1637
1638struct ib_wq {
1639 struct ib_device *device;
1640 struct ib_uwq_object *uobject;
1641 void *wq_context;
1642 void (*event_handler)(struct ib_event *, void *);
1643 struct ib_pd *pd;
1644 struct ib_cq *cq;
1645 u32 wq_num;
1646 enum ib_wq_state state;
1647 enum ib_wq_type wq_type;
1648 atomic_t usecnt;
1649};
1650
1651enum ib_wq_flags {
1652 IB_WQ_FLAGS_CVLAN_STRIPPING = IB_UVERBS_WQ_FLAGS_CVLAN_STRIPPING,
1653 IB_WQ_FLAGS_SCATTER_FCS = IB_UVERBS_WQ_FLAGS_SCATTER_FCS,
1654 IB_WQ_FLAGS_DELAY_DROP = IB_UVERBS_WQ_FLAGS_DELAY_DROP,
1655 IB_WQ_FLAGS_PCI_WRITE_END_PADDING =
1656 IB_UVERBS_WQ_FLAGS_PCI_WRITE_END_PADDING,
1657};
1658
1659struct ib_wq_init_attr {
1660 void *wq_context;
1661 enum ib_wq_type wq_type;
1662 u32 max_wr;
1663 u32 max_sge;
1664 struct ib_cq *cq;
1665 void (*event_handler)(struct ib_event *, void *);
1666 u32 create_flags;
1667};
1668
1669enum ib_wq_attr_mask {
1670 IB_WQ_STATE = 1 << 0,
1671 IB_WQ_CUR_STATE = 1 << 1,
1672 IB_WQ_FLAGS = 1 << 2,
1673};
1674
1675struct ib_wq_attr {
1676 enum ib_wq_state wq_state;
1677 enum ib_wq_state curr_wq_state;
1678 u32 flags;
1679 u32 flags_mask;
1680};
1681
1682struct ib_rwq_ind_table {
1683 struct ib_device *device;
1684 struct ib_uobject *uobject;
1685 atomic_t usecnt;
1686 u32 ind_tbl_num;
1687 u32 log_ind_tbl_size;
1688 struct ib_wq **ind_tbl;
1689};
1690
1691struct ib_rwq_ind_table_init_attr {
1692 u32 log_ind_tbl_size;
1693
1694 struct ib_wq **ind_tbl;
1695};
1696
1697enum port_pkey_state {
1698 IB_PORT_PKEY_NOT_VALID = 0,
1699 IB_PORT_PKEY_VALID = 1,
1700 IB_PORT_PKEY_LISTED = 2,
1701};
1702
1703struct ib_qp_security;
1704
1705struct ib_port_pkey {
1706 enum port_pkey_state state;
1707 u16 pkey_index;
1708 u8 port_num;
1709 struct list_head qp_list;
1710 struct list_head to_error_list;
1711 struct ib_qp_security *sec;
1712};
1713
1714struct ib_ports_pkeys {
1715 struct ib_port_pkey main;
1716 struct ib_port_pkey alt;
1717};
1718
1719struct ib_qp_security {
1720 struct ib_qp *qp;
1721 struct ib_device *dev;
1722
1723 struct mutex mutex;
1724 struct ib_ports_pkeys *ports_pkeys;
1725
1726
1727
1728 struct list_head shared_qp_list;
1729 void *security;
1730 bool destroying;
1731 atomic_t error_list_count;
1732 struct completion error_complete;
1733 int error_comps_pending;
1734};
1735
1736
1737
1738
1739
1740struct ib_qp {
1741 struct ib_device *device;
1742 struct ib_pd *pd;
1743 struct ib_cq *send_cq;
1744 struct ib_cq *recv_cq;
1745 spinlock_t mr_lock;
1746 int mrs_used;
1747 struct list_head rdma_mrs;
1748 struct list_head sig_mrs;
1749 struct ib_srq *srq;
1750 struct ib_xrcd *xrcd;
1751 struct list_head xrcd_list;
1752
1753
1754 atomic_t usecnt;
1755 struct list_head open_list;
1756 struct ib_qp *real_qp;
1757 struct ib_uqp_object *uobject;
1758 void (*event_handler)(struct ib_event *, void *);
1759 void *qp_context;
1760
1761 const struct ib_gid_attr *av_sgid_attr;
1762 const struct ib_gid_attr *alt_path_sgid_attr;
1763 u32 qp_num;
1764 u32 max_write_sge;
1765 u32 max_read_sge;
1766 enum ib_qp_type qp_type;
1767 struct ib_rwq_ind_table *rwq_ind_tbl;
1768 struct ib_qp_security *qp_sec;
1769 u8 port;
1770
1771 bool integrity_en;
1772
1773
1774
1775 struct rdma_restrack_entry res;
1776
1777
1778 struct rdma_counter *counter;
1779};
1780
1781struct ib_dm {
1782 struct ib_device *device;
1783 u32 length;
1784 u32 flags;
1785 struct ib_uobject *uobject;
1786 atomic_t usecnt;
1787};
1788
1789struct ib_mr {
1790 struct ib_device *device;
1791 struct ib_pd *pd;
1792 u32 lkey;
1793 u32 rkey;
1794 u64 iova;
1795 u64 length;
1796 unsigned int page_size;
1797 enum ib_mr_type type;
1798 bool need_inval;
1799 union {
1800 struct ib_uobject *uobject;
1801 struct list_head qp_entry;
1802 };
1803
1804 struct ib_dm *dm;
1805 struct ib_sig_attrs *sig_attrs;
1806
1807
1808
1809 struct rdma_restrack_entry res;
1810};
1811
1812struct ib_mw {
1813 struct ib_device *device;
1814 struct ib_pd *pd;
1815 struct ib_uobject *uobject;
1816 u32 rkey;
1817 enum ib_mw_type type;
1818};
1819
1820
1821enum ib_flow_attr_type {
1822
1823 IB_FLOW_ATTR_NORMAL = 0x0,
1824
1825
1826
1827 IB_FLOW_ATTR_ALL_DEFAULT = 0x1,
1828
1829
1830
1831 IB_FLOW_ATTR_MC_DEFAULT = 0x2,
1832
1833 IB_FLOW_ATTR_SNIFFER = 0x3
1834};
1835
1836
1837enum ib_flow_spec_type {
1838
1839 IB_FLOW_SPEC_ETH = 0x20,
1840 IB_FLOW_SPEC_IB = 0x22,
1841
1842 IB_FLOW_SPEC_IPV4 = 0x30,
1843 IB_FLOW_SPEC_IPV6 = 0x31,
1844 IB_FLOW_SPEC_ESP = 0x34,
1845
1846 IB_FLOW_SPEC_TCP = 0x40,
1847 IB_FLOW_SPEC_UDP = 0x41,
1848 IB_FLOW_SPEC_VXLAN_TUNNEL = 0x50,
1849 IB_FLOW_SPEC_GRE = 0x51,
1850 IB_FLOW_SPEC_MPLS = 0x60,
1851 IB_FLOW_SPEC_INNER = 0x100,
1852
1853 IB_FLOW_SPEC_ACTION_TAG = 0x1000,
1854 IB_FLOW_SPEC_ACTION_DROP = 0x1001,
1855 IB_FLOW_SPEC_ACTION_HANDLE = 0x1002,
1856 IB_FLOW_SPEC_ACTION_COUNT = 0x1003,
1857};
1858#define IB_FLOW_SPEC_LAYER_MASK 0xF0
1859#define IB_FLOW_SPEC_SUPPORT_LAYERS 10
1860
1861enum ib_flow_flags {
1862 IB_FLOW_ATTR_FLAGS_DONT_TRAP = 1UL << 1,
1863 IB_FLOW_ATTR_FLAGS_EGRESS = 1UL << 2,
1864 IB_FLOW_ATTR_FLAGS_RESERVED = 1UL << 3
1865};
1866
1867struct ib_flow_eth_filter {
1868 u8 dst_mac[6];
1869 u8 src_mac[6];
1870 __be16 ether_type;
1871 __be16 vlan_tag;
1872
1873 u8 real_sz[];
1874};
1875
1876struct ib_flow_spec_eth {
1877 u32 type;
1878 u16 size;
1879 struct ib_flow_eth_filter val;
1880 struct ib_flow_eth_filter mask;
1881};
1882
1883struct ib_flow_ib_filter {
1884 __be16 dlid;
1885 __u8 sl;
1886
1887 u8 real_sz[];
1888};
1889
1890struct ib_flow_spec_ib {
1891 u32 type;
1892 u16 size;
1893 struct ib_flow_ib_filter val;
1894 struct ib_flow_ib_filter mask;
1895};
1896
1897
1898enum ib_ipv4_flags {
1899 IB_IPV4_DONT_FRAG = 0x2,
1900 IB_IPV4_MORE_FRAG = 0X4
1901
1902};
1903
1904struct ib_flow_ipv4_filter {
1905 __be32 src_ip;
1906 __be32 dst_ip;
1907 u8 proto;
1908 u8 tos;
1909 u8 ttl;
1910 u8 flags;
1911
1912 u8 real_sz[];
1913};
1914
1915struct ib_flow_spec_ipv4 {
1916 u32 type;
1917 u16 size;
1918 struct ib_flow_ipv4_filter val;
1919 struct ib_flow_ipv4_filter mask;
1920};
1921
1922struct ib_flow_ipv6_filter {
1923 u8 src_ip[16];
1924 u8 dst_ip[16];
1925 __be32 flow_label;
1926 u8 next_hdr;
1927 u8 traffic_class;
1928 u8 hop_limit;
1929
1930 u8 real_sz[];
1931};
1932
1933struct ib_flow_spec_ipv6 {
1934 u32 type;
1935 u16 size;
1936 struct ib_flow_ipv6_filter val;
1937 struct ib_flow_ipv6_filter mask;
1938};
1939
1940struct ib_flow_tcp_udp_filter {
1941 __be16 dst_port;
1942 __be16 src_port;
1943
1944 u8 real_sz[];
1945};
1946
1947struct ib_flow_spec_tcp_udp {
1948 u32 type;
1949 u16 size;
1950 struct ib_flow_tcp_udp_filter val;
1951 struct ib_flow_tcp_udp_filter mask;
1952};
1953
1954struct ib_flow_tunnel_filter {
1955 __be32 tunnel_id;
1956 u8 real_sz[];
1957};
1958
1959
1960
1961
1962struct ib_flow_spec_tunnel {
1963 u32 type;
1964 u16 size;
1965 struct ib_flow_tunnel_filter val;
1966 struct ib_flow_tunnel_filter mask;
1967};
1968
1969struct ib_flow_esp_filter {
1970 __be32 spi;
1971 __be32 seq;
1972
1973 u8 real_sz[];
1974};
1975
1976struct ib_flow_spec_esp {
1977 u32 type;
1978 u16 size;
1979 struct ib_flow_esp_filter val;
1980 struct ib_flow_esp_filter mask;
1981};
1982
1983struct ib_flow_gre_filter {
1984 __be16 c_ks_res0_ver;
1985 __be16 protocol;
1986 __be32 key;
1987
1988 u8 real_sz[];
1989};
1990
1991struct ib_flow_spec_gre {
1992 u32 type;
1993 u16 size;
1994 struct ib_flow_gre_filter val;
1995 struct ib_flow_gre_filter mask;
1996};
1997
1998struct ib_flow_mpls_filter {
1999 __be32 tag;
2000
2001 u8 real_sz[];
2002};
2003
2004struct ib_flow_spec_mpls {
2005 u32 type;
2006 u16 size;
2007 struct ib_flow_mpls_filter val;
2008 struct ib_flow_mpls_filter mask;
2009};
2010
2011struct ib_flow_spec_action_tag {
2012 enum ib_flow_spec_type type;
2013 u16 size;
2014 u32 tag_id;
2015};
2016
2017struct ib_flow_spec_action_drop {
2018 enum ib_flow_spec_type type;
2019 u16 size;
2020};
2021
2022struct ib_flow_spec_action_handle {
2023 enum ib_flow_spec_type type;
2024 u16 size;
2025 struct ib_flow_action *act;
2026};
2027
2028enum ib_counters_description {
2029 IB_COUNTER_PACKETS,
2030 IB_COUNTER_BYTES,
2031};
2032
2033struct ib_flow_spec_action_count {
2034 enum ib_flow_spec_type type;
2035 u16 size;
2036 struct ib_counters *counters;
2037};
2038
2039union ib_flow_spec {
2040 struct {
2041 u32 type;
2042 u16 size;
2043 };
2044 struct ib_flow_spec_eth eth;
2045 struct ib_flow_spec_ib ib;
2046 struct ib_flow_spec_ipv4 ipv4;
2047 struct ib_flow_spec_tcp_udp tcp_udp;
2048 struct ib_flow_spec_ipv6 ipv6;
2049 struct ib_flow_spec_tunnel tunnel;
2050 struct ib_flow_spec_esp esp;
2051 struct ib_flow_spec_gre gre;
2052 struct ib_flow_spec_mpls mpls;
2053 struct ib_flow_spec_action_tag flow_tag;
2054 struct ib_flow_spec_action_drop drop;
2055 struct ib_flow_spec_action_handle action;
2056 struct ib_flow_spec_action_count flow_count;
2057};
2058
2059struct ib_flow_attr {
2060 enum ib_flow_attr_type type;
2061 u16 size;
2062 u16 priority;
2063 u32 flags;
2064 u8 num_of_specs;
2065 u8 port;
2066 union ib_flow_spec flows[];
2067};
2068
2069struct ib_flow {
2070 struct ib_qp *qp;
2071 struct ib_device *device;
2072 struct ib_uobject *uobject;
2073};
2074
2075enum ib_flow_action_type {
2076 IB_FLOW_ACTION_UNSPECIFIED,
2077 IB_FLOW_ACTION_ESP = 1,
2078};
2079
2080struct ib_flow_action_attrs_esp_keymats {
2081 enum ib_uverbs_flow_action_esp_keymat protocol;
2082 union {
2083 struct ib_uverbs_flow_action_esp_keymat_aes_gcm aes_gcm;
2084 } keymat;
2085};
2086
2087struct ib_flow_action_attrs_esp_replays {
2088 enum ib_uverbs_flow_action_esp_replay protocol;
2089 union {
2090 struct ib_uverbs_flow_action_esp_replay_bmp bmp;
2091 } replay;
2092};
2093
2094enum ib_flow_action_attrs_esp_flags {
2095
2096
2097
2098
2099
2100
2101 IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED = 1ULL << 32,
2102 IB_FLOW_ACTION_ESP_FLAGS_MOD_ESP_ATTRS = 1ULL << 33,
2103};
2104
2105struct ib_flow_spec_list {
2106 struct ib_flow_spec_list *next;
2107 union ib_flow_spec spec;
2108};
2109
2110struct ib_flow_action_attrs_esp {
2111 struct ib_flow_action_attrs_esp_keymats *keymat;
2112 struct ib_flow_action_attrs_esp_replays *replay;
2113 struct ib_flow_spec_list *encap;
2114
2115
2116
2117 u32 esn;
2118 u32 spi;
2119 u32 seq;
2120 u32 tfc_pad;
2121
2122 u64 flags;
2123 u64 hard_limit_pkts;
2124};
2125
2126struct ib_flow_action {
2127 struct ib_device *device;
2128 struct ib_uobject *uobject;
2129 enum ib_flow_action_type type;
2130 atomic_t usecnt;
2131};
2132
2133struct ib_mad;
2134struct ib_grh;
2135
2136enum ib_process_mad_flags {
2137 IB_MAD_IGNORE_MKEY = 1,
2138 IB_MAD_IGNORE_BKEY = 2,
2139 IB_MAD_IGNORE_ALL = IB_MAD_IGNORE_MKEY | IB_MAD_IGNORE_BKEY
2140};
2141
2142enum ib_mad_result {
2143 IB_MAD_RESULT_FAILURE = 0,
2144 IB_MAD_RESULT_SUCCESS = 1 << 0,
2145 IB_MAD_RESULT_REPLY = 1 << 1,
2146 IB_MAD_RESULT_CONSUMED = 1 << 2
2147};
2148
2149struct ib_port_cache {
2150 u64 subnet_prefix;
2151 struct ib_pkey_cache *pkey;
2152 struct ib_gid_table *gid;
2153 u8 lmc;
2154 enum ib_port_state port_state;
2155};
2156
2157struct ib_port_immutable {
2158 int pkey_tbl_len;
2159 int gid_tbl_len;
2160 u32 core_cap_flags;
2161 u32 max_mad_size;
2162};
2163
2164struct ib_port_data {
2165 struct ib_device *ib_dev;
2166
2167 struct ib_port_immutable immutable;
2168
2169 spinlock_t pkey_list_lock;
2170 struct list_head pkey_list;
2171
2172 struct ib_port_cache cache;
2173
2174 spinlock_t netdev_lock;
2175 struct net_device __rcu *netdev;
2176 struct hlist_node ndev_hash_link;
2177 struct rdma_port_counter port_counter;
2178 struct rdma_hw_stats *hw_stats;
2179};
2180
2181
2182enum rdma_netdev_t {
2183 RDMA_NETDEV_OPA_VNIC,
2184 RDMA_NETDEV_IPOIB,
2185};
2186
2187
2188
2189
2190
2191struct rdma_netdev {
2192 void *clnt_priv;
2193 struct ib_device *hca;
2194 u8 port_num;
2195 int mtu;
2196
2197
2198
2199
2200
2201
2202 void (*free_rdma_netdev)(struct net_device *netdev);
2203
2204
2205 void (*set_id)(struct net_device *netdev, int id);
2206
2207 int (*send)(struct net_device *dev, struct sk_buff *skb,
2208 struct ib_ah *address, u32 dqpn);
2209
2210 int (*attach_mcast)(struct net_device *dev, struct ib_device *hca,
2211 union ib_gid *gid, u16 mlid,
2212 int set_qkey, u32 qkey);
2213 int (*detach_mcast)(struct net_device *dev, struct ib_device *hca,
2214 union ib_gid *gid, u16 mlid);
2215};
2216
2217struct rdma_netdev_alloc_params {
2218 size_t sizeof_priv;
2219 unsigned int txqs;
2220 unsigned int rxqs;
2221 void *param;
2222
2223 int (*initialize_rdma_netdev)(struct ib_device *device, u8 port_num,
2224 struct net_device *netdev, void *param);
2225};
2226
2227struct ib_odp_counters {
2228 atomic64_t faults;
2229 atomic64_t invalidations;
2230 atomic64_t prefetch;
2231};
2232
2233struct ib_counters {
2234 struct ib_device *device;
2235 struct ib_uobject *uobject;
2236
2237 atomic_t usecnt;
2238};
2239
2240struct ib_counters_read_attr {
2241 u64 *counters_buff;
2242 u32 ncounters;
2243 u32 flags;
2244};
2245
2246struct uverbs_attr_bundle;
2247struct iw_cm_id;
2248struct iw_cm_conn_param;
2249
2250#define INIT_RDMA_OBJ_SIZE(ib_struct, drv_struct, member) \
2251 .size_##ib_struct = \
2252 (sizeof(struct drv_struct) + \
2253 BUILD_BUG_ON_ZERO(offsetof(struct drv_struct, member)) + \
2254 BUILD_BUG_ON_ZERO( \
2255 !__same_type(((struct drv_struct *)NULL)->member, \
2256 struct ib_struct)))
2257
2258#define rdma_zalloc_drv_obj_gfp(ib_dev, ib_type, gfp) \
2259 ((struct ib_type *)kzalloc(ib_dev->ops.size_##ib_type, gfp))
2260
2261#define rdma_zalloc_drv_obj(ib_dev, ib_type) \
2262 rdma_zalloc_drv_obj_gfp(ib_dev, ib_type, GFP_KERNEL)
2263
2264#define DECLARE_RDMA_OBJ_SIZE(ib_struct) size_t size_##ib_struct
2265
2266struct rdma_user_mmap_entry {
2267 struct kref ref;
2268 struct ib_ucontext *ucontext;
2269 unsigned long start_pgoff;
2270 size_t npages;
2271 bool driver_removed;
2272};
2273
2274
2275static inline u64
2276rdma_user_mmap_get_offset(const struct rdma_user_mmap_entry *entry)
2277{
2278 return (u64)entry->start_pgoff << PAGE_SHIFT;
2279}
2280
2281
2282
2283
2284
2285
2286struct ib_device_ops {
2287 struct module *owner;
2288 enum rdma_driver_id driver_id;
2289 u32 uverbs_abi_ver;
2290 unsigned int uverbs_no_driver_id_binding:1;
2291
2292 int (*post_send)(struct ib_qp *qp, const struct ib_send_wr *send_wr,
2293 const struct ib_send_wr **bad_send_wr);
2294 int (*post_recv)(struct ib_qp *qp, const struct ib_recv_wr *recv_wr,
2295 const struct ib_recv_wr **bad_recv_wr);
2296 void (*drain_rq)(struct ib_qp *qp);
2297 void (*drain_sq)(struct ib_qp *qp);
2298 int (*poll_cq)(struct ib_cq *cq, int num_entries, struct ib_wc *wc);
2299 int (*peek_cq)(struct ib_cq *cq, int wc_cnt);
2300 int (*req_notify_cq)(struct ib_cq *cq, enum ib_cq_notify_flags flags);
2301 int (*req_ncomp_notif)(struct ib_cq *cq, int wc_cnt);
2302 int (*post_srq_recv)(struct ib_srq *srq,
2303 const struct ib_recv_wr *recv_wr,
2304 const struct ib_recv_wr **bad_recv_wr);
2305 int (*process_mad)(struct ib_device *device, int process_mad_flags,
2306 u8 port_num, const struct ib_wc *in_wc,
2307 const struct ib_grh *in_grh,
2308 const struct ib_mad *in_mad, struct ib_mad *out_mad,
2309 size_t *out_mad_size, u16 *out_mad_pkey_index);
2310 int (*query_device)(struct ib_device *device,
2311 struct ib_device_attr *device_attr,
2312 struct ib_udata *udata);
2313 int (*modify_device)(struct ib_device *device, int device_modify_mask,
2314 struct ib_device_modify *device_modify);
2315 void (*get_dev_fw_str)(struct ib_device *device, char *str);
2316 const struct cpumask *(*get_vector_affinity)(struct ib_device *ibdev,
2317 int comp_vector);
2318 int (*query_port)(struct ib_device *device, u8 port_num,
2319 struct ib_port_attr *port_attr);
2320 int (*modify_port)(struct ib_device *device, u8 port_num,
2321 int port_modify_mask,
2322 struct ib_port_modify *port_modify);
2323
2324
2325
2326
2327
2328
2329 int (*get_port_immutable)(struct ib_device *device, u8 port_num,
2330 struct ib_port_immutable *immutable);
2331 enum rdma_link_layer (*get_link_layer)(struct ib_device *device,
2332 u8 port_num);
2333
2334
2335
2336
2337
2338
2339
2340
2341 struct net_device *(*get_netdev)(struct ib_device *device, u8 port_num);
2342
2343
2344
2345
2346
2347
2348 struct net_device *(*alloc_rdma_netdev)(
2349 struct ib_device *device, u8 port_num, enum rdma_netdev_t type,
2350 const char *name, unsigned char name_assign_type,
2351 void (*setup)(struct net_device *));
2352
2353 int (*rdma_netdev_get_params)(struct ib_device *device, u8 port_num,
2354 enum rdma_netdev_t type,
2355 struct rdma_netdev_alloc_params *params);
2356
2357
2358
2359
2360
2361 int (*query_gid)(struct ib_device *device, u8 port_num, int index,
2362 union ib_gid *gid);
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374
2375
2376 int (*add_gid)(const struct ib_gid_attr *attr, void **context);
2377
2378
2379
2380
2381
2382
2383
2384
2385 int (*del_gid)(const struct ib_gid_attr *attr, void **context);
2386 int (*query_pkey)(struct ib_device *device, u8 port_num, u16 index,
2387 u16 *pkey);
2388 int (*alloc_ucontext)(struct ib_ucontext *context,
2389 struct ib_udata *udata);
2390 void (*dealloc_ucontext)(struct ib_ucontext *context);
2391 int (*mmap)(struct ib_ucontext *context, struct vm_area_struct *vma);
2392
2393
2394
2395
2396
2397
2398 void (*mmap_free)(struct rdma_user_mmap_entry *entry);
2399 void (*disassociate_ucontext)(struct ib_ucontext *ibcontext);
2400 int (*alloc_pd)(struct ib_pd *pd, struct ib_udata *udata);
2401 int (*dealloc_pd)(struct ib_pd *pd, struct ib_udata *udata);
2402 int (*create_ah)(struct ib_ah *ah, struct rdma_ah_init_attr *attr,
2403 struct ib_udata *udata);
2404 int (*modify_ah)(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
2405 int (*query_ah)(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
2406 int (*destroy_ah)(struct ib_ah *ah, u32 flags);
2407 int (*create_srq)(struct ib_srq *srq,
2408 struct ib_srq_init_attr *srq_init_attr,
2409 struct ib_udata *udata);
2410 int (*modify_srq)(struct ib_srq *srq, struct ib_srq_attr *srq_attr,
2411 enum ib_srq_attr_mask srq_attr_mask,
2412 struct ib_udata *udata);
2413 int (*query_srq)(struct ib_srq *srq, struct ib_srq_attr *srq_attr);
2414 int (*destroy_srq)(struct ib_srq *srq, struct ib_udata *udata);
2415 struct ib_qp *(*create_qp)(struct ib_pd *pd,
2416 struct ib_qp_init_attr *qp_init_attr,
2417 struct ib_udata *udata);
2418 int (*modify_qp)(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
2419 int qp_attr_mask, struct ib_udata *udata);
2420 int (*query_qp)(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
2421 int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr);
2422 int (*destroy_qp)(struct ib_qp *qp, struct ib_udata *udata);
2423 int (*create_cq)(struct ib_cq *cq, const struct ib_cq_init_attr *attr,
2424 struct ib_udata *udata);
2425 int (*modify_cq)(struct ib_cq *cq, u16 cq_count, u16 cq_period);
2426 int (*destroy_cq)(struct ib_cq *cq, struct ib_udata *udata);
2427 int (*resize_cq)(struct ib_cq *cq, int cqe, struct ib_udata *udata);
2428 struct ib_mr *(*get_dma_mr)(struct ib_pd *pd, int mr_access_flags);
2429 struct ib_mr *(*reg_user_mr)(struct ib_pd *pd, u64 start, u64 length,
2430 u64 virt_addr, int mr_access_flags,
2431 struct ib_udata *udata);
2432 int (*rereg_user_mr)(struct ib_mr *mr, int flags, u64 start, u64 length,
2433 u64 virt_addr, int mr_access_flags,
2434 struct ib_pd *pd, struct ib_udata *udata);
2435 int (*dereg_mr)(struct ib_mr *mr, struct ib_udata *udata);
2436 struct ib_mr *(*alloc_mr)(struct ib_pd *pd, enum ib_mr_type mr_type,
2437 u32 max_num_sg);
2438 struct ib_mr *(*alloc_mr_integrity)(struct ib_pd *pd,
2439 u32 max_num_data_sg,
2440 u32 max_num_meta_sg);
2441 int (*advise_mr)(struct ib_pd *pd,
2442 enum ib_uverbs_advise_mr_advice advice, u32 flags,
2443 struct ib_sge *sg_list, u32 num_sge,
2444 struct uverbs_attr_bundle *attrs);
2445 int (*map_mr_sg)(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
2446 unsigned int *sg_offset);
2447 int (*check_mr_status)(struct ib_mr *mr, u32 check_mask,
2448 struct ib_mr_status *mr_status);
2449 int (*alloc_mw)(struct ib_mw *mw, struct ib_udata *udata);
2450 int (*dealloc_mw)(struct ib_mw *mw);
2451 int (*attach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid);
2452 int (*detach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid);
2453 int (*alloc_xrcd)(struct ib_xrcd *xrcd, struct ib_udata *udata);
2454 int (*dealloc_xrcd)(struct ib_xrcd *xrcd, struct ib_udata *udata);
2455 struct ib_flow *(*create_flow)(struct ib_qp *qp,
2456 struct ib_flow_attr *flow_attr,
2457 struct ib_udata *udata);
2458 int (*destroy_flow)(struct ib_flow *flow_id);
2459 struct ib_flow_action *(*create_flow_action_esp)(
2460 struct ib_device *device,
2461 const struct ib_flow_action_attrs_esp *attr,
2462 struct uverbs_attr_bundle *attrs);
2463 int (*destroy_flow_action)(struct ib_flow_action *action);
2464 int (*modify_flow_action_esp)(
2465 struct ib_flow_action *action,
2466 const struct ib_flow_action_attrs_esp *attr,
2467 struct uverbs_attr_bundle *attrs);
2468 int (*set_vf_link_state)(struct ib_device *device, int vf, u8 port,
2469 int state);
2470 int (*get_vf_config)(struct ib_device *device, int vf, u8 port,
2471 struct ifla_vf_info *ivf);
2472 int (*get_vf_stats)(struct ib_device *device, int vf, u8 port,
2473 struct ifla_vf_stats *stats);
2474 int (*get_vf_guid)(struct ib_device *device, int vf, u8 port,
2475 struct ifla_vf_guid *node_guid,
2476 struct ifla_vf_guid *port_guid);
2477 int (*set_vf_guid)(struct ib_device *device, int vf, u8 port, u64 guid,
2478 int type);
2479 struct ib_wq *(*create_wq)(struct ib_pd *pd,
2480 struct ib_wq_init_attr *init_attr,
2481 struct ib_udata *udata);
2482 int (*destroy_wq)(struct ib_wq *wq, struct ib_udata *udata);
2483 int (*modify_wq)(struct ib_wq *wq, struct ib_wq_attr *attr,
2484 u32 wq_attr_mask, struct ib_udata *udata);
2485 int (*create_rwq_ind_table)(struct ib_rwq_ind_table *ib_rwq_ind_table,
2486 struct ib_rwq_ind_table_init_attr *init_attr,
2487 struct ib_udata *udata);
2488 int (*destroy_rwq_ind_table)(struct ib_rwq_ind_table *wq_ind_table);
2489 struct ib_dm *(*alloc_dm)(struct ib_device *device,
2490 struct ib_ucontext *context,
2491 struct ib_dm_alloc_attr *attr,
2492 struct uverbs_attr_bundle *attrs);
2493 int (*dealloc_dm)(struct ib_dm *dm, struct uverbs_attr_bundle *attrs);
2494 struct ib_mr *(*reg_dm_mr)(struct ib_pd *pd, struct ib_dm *dm,
2495 struct ib_dm_mr_attr *attr,
2496 struct uverbs_attr_bundle *attrs);
2497 int (*create_counters)(struct ib_counters *counters,
2498 struct uverbs_attr_bundle *attrs);
2499 int (*destroy_counters)(struct ib_counters *counters);
2500 int (*read_counters)(struct ib_counters *counters,
2501 struct ib_counters_read_attr *counters_read_attr,
2502 struct uverbs_attr_bundle *attrs);
2503 int (*map_mr_sg_pi)(struct ib_mr *mr, struct scatterlist *data_sg,
2504 int data_sg_nents, unsigned int *data_sg_offset,
2505 struct scatterlist *meta_sg, int meta_sg_nents,
2506 unsigned int *meta_sg_offset);
2507
2508
2509
2510
2511
2512
2513
2514 struct rdma_hw_stats *(*alloc_hw_stats)(struct ib_device *device,
2515 u8 port_num);
2516
2517
2518
2519
2520
2521
2522
2523
2524
2525
2526
2527
2528 int (*get_hw_stats)(struct ib_device *device,
2529 struct rdma_hw_stats *stats, u8 port, int index);
2530
2531
2532
2533
2534 int (*init_port)(struct ib_device *device, u8 port_num,
2535 struct kobject *port_sysfs);
2536
2537
2538
2539 int (*fill_res_mr_entry)(struct sk_buff *msg, struct ib_mr *ibmr);
2540 int (*fill_res_mr_entry_raw)(struct sk_buff *msg, struct ib_mr *ibmr);
2541 int (*fill_res_cq_entry)(struct sk_buff *msg, struct ib_cq *ibcq);
2542 int (*fill_res_cq_entry_raw)(struct sk_buff *msg, struct ib_cq *ibcq);
2543 int (*fill_res_qp_entry)(struct sk_buff *msg, struct ib_qp *ibqp);
2544 int (*fill_res_qp_entry_raw)(struct sk_buff *msg, struct ib_qp *ibqp);
2545 int (*fill_res_cm_id_entry)(struct sk_buff *msg, struct rdma_cm_id *id);
2546
2547
2548
2549
2550
2551
2552 int (*enable_driver)(struct ib_device *dev);
2553
2554
2555
2556 void (*dealloc_driver)(struct ib_device *dev);
2557
2558
2559 void (*iw_add_ref)(struct ib_qp *qp);
2560 void (*iw_rem_ref)(struct ib_qp *qp);
2561 struct ib_qp *(*iw_get_qp)(struct ib_device *device, int qpn);
2562 int (*iw_connect)(struct iw_cm_id *cm_id,
2563 struct iw_cm_conn_param *conn_param);
2564 int (*iw_accept)(struct iw_cm_id *cm_id,
2565 struct iw_cm_conn_param *conn_param);
2566 int (*iw_reject)(struct iw_cm_id *cm_id, const void *pdata,
2567 u8 pdata_len);
2568 int (*iw_create_listen)(struct iw_cm_id *cm_id, int backlog);
2569 int (*iw_destroy_listen)(struct iw_cm_id *cm_id);
2570
2571
2572
2573
2574
2575 int (*counter_bind_qp)(struct rdma_counter *counter, struct ib_qp *qp);
2576
2577
2578
2579
2580 int (*counter_unbind_qp)(struct ib_qp *qp);
2581
2582
2583
2584 int (*counter_dealloc)(struct rdma_counter *counter);
2585
2586
2587
2588
2589 struct rdma_hw_stats *(*counter_alloc_stats)(
2590 struct rdma_counter *counter);
2591
2592
2593
2594 int (*counter_update_stats)(struct rdma_counter *counter);
2595
2596
2597
2598
2599
2600 int (*fill_stat_mr_entry)(struct sk_buff *msg, struct ib_mr *ibmr);
2601
2602
2603 int (*query_ucontext)(struct ib_ucontext *context,
2604 struct uverbs_attr_bundle *attrs);
2605
2606 DECLARE_RDMA_OBJ_SIZE(ib_ah);
2607 DECLARE_RDMA_OBJ_SIZE(ib_counters);
2608 DECLARE_RDMA_OBJ_SIZE(ib_cq);
2609 DECLARE_RDMA_OBJ_SIZE(ib_mw);
2610 DECLARE_RDMA_OBJ_SIZE(ib_pd);
2611 DECLARE_RDMA_OBJ_SIZE(ib_rwq_ind_table);
2612 DECLARE_RDMA_OBJ_SIZE(ib_srq);
2613 DECLARE_RDMA_OBJ_SIZE(ib_ucontext);
2614 DECLARE_RDMA_OBJ_SIZE(ib_xrcd);
2615};
2616
2617struct ib_core_device {
2618
2619
2620
2621 struct device dev;
2622 possible_net_t rdma_net;
2623 struct kobject *ports_kobj;
2624 struct list_head port_list;
2625 struct ib_device *owner;
2626};
2627
2628struct rdma_restrack_root;
2629struct ib_device {
2630
2631 struct device *dma_device;
2632 struct ib_device_ops ops;
2633 char name[IB_DEVICE_NAME_MAX];
2634 struct rcu_head rcu_head;
2635
2636 struct list_head event_handler_list;
2637
2638 struct rw_semaphore event_handler_rwsem;
2639
2640
2641 spinlock_t qp_open_list_lock;
2642
2643 struct rw_semaphore client_data_rwsem;
2644 struct xarray client_data;
2645 struct mutex unregistration_lock;
2646
2647
2648 rwlock_t cache_lock;
2649
2650
2651
2652 struct ib_port_data *port_data;
2653
2654 int num_comp_vectors;
2655
2656 union {
2657 struct device dev;
2658 struct ib_core_device coredev;
2659 };
2660
2661
2662
2663
2664
2665 const struct attribute_group *groups[3];
2666
2667 u64 uverbs_cmd_mask;
2668 u64 uverbs_ex_cmd_mask;
2669
2670 char node_desc[IB_DEVICE_NODE_DESC_MAX];
2671 __be64 node_guid;
2672 u32 local_dma_lkey;
2673 u16 is_switch:1;
2674
2675 u16 kverbs_provider:1;
2676
2677 u16 use_cq_dim:1;
2678 u8 node_type;
2679 u8 phys_port_cnt;
2680 struct ib_device_attr attrs;
2681 struct attribute_group *hw_stats_ag;
2682 struct rdma_hw_stats *hw_stats;
2683
2684#ifdef CONFIG_CGROUP_RDMA
2685 struct rdmacg_device cg_device;
2686#endif
2687
2688 u32 index;
2689
2690 spinlock_t cq_pools_lock;
2691 struct list_head cq_pools[IB_POLL_LAST_POOL_TYPE + 1];
2692
2693 struct rdma_restrack_root *res;
2694
2695 const struct uapi_definition *driver_def;
2696
2697
2698
2699
2700
2701 refcount_t refcount;
2702 struct completion unreg_completion;
2703 struct work_struct unregistration_work;
2704
2705 const struct rdma_link_ops *link_ops;
2706
2707
2708 struct mutex compat_devs_mutex;
2709
2710 struct xarray compat_devs;
2711
2712
2713 char iw_ifname[IFNAMSIZ];
2714 u32 iw_driver_flags;
2715 u32 lag_flags;
2716};
2717
2718struct ib_client_nl_info;
2719struct ib_client {
2720 const char *name;
2721 int (*add)(struct ib_device *ibdev);
2722 void (*remove)(struct ib_device *, void *client_data);
2723 void (*rename)(struct ib_device *dev, void *client_data);
2724 int (*get_nl_info)(struct ib_device *ibdev, void *client_data,
2725 struct ib_client_nl_info *res);
2726 int (*get_global_nl_info)(struct ib_client_nl_info *res);
2727
2728
2729
2730
2731
2732
2733
2734
2735
2736
2737
2738
2739
2740
2741
2742
2743 struct net_device *(*get_net_dev_by_params)(
2744 struct ib_device *dev,
2745 u8 port,
2746 u16 pkey,
2747 const union ib_gid *gid,
2748 const struct sockaddr *addr,
2749 void *client_data);
2750
2751 refcount_t uses;
2752 struct completion uses_zero;
2753 u32 client_id;
2754
2755
2756 u8 no_kverbs_req:1;
2757};
2758
2759
2760
2761
2762
2763
2764
2765struct ib_block_iter {
2766
2767 struct scatterlist *__sg;
2768 dma_addr_t __dma_addr;
2769 unsigned int __sg_nents;
2770 unsigned int __sg_advance;
2771 unsigned int __pg_bit;
2772};
2773
2774struct ib_device *_ib_alloc_device(size_t size);
2775#define ib_alloc_device(drv_struct, member) \
2776 container_of(_ib_alloc_device(sizeof(struct drv_struct) + \
2777 BUILD_BUG_ON_ZERO(offsetof( \
2778 struct drv_struct, member))), \
2779 struct drv_struct, member)
2780
2781void ib_dealloc_device(struct ib_device *device);
2782
2783void ib_get_device_fw_str(struct ib_device *device, char *str);
2784
2785int ib_register_device(struct ib_device *device, const char *name,
2786 struct device *dma_device);
2787void ib_unregister_device(struct ib_device *device);
2788void ib_unregister_driver(enum rdma_driver_id driver_id);
2789void ib_unregister_device_and_put(struct ib_device *device);
2790void ib_unregister_device_queued(struct ib_device *ib_dev);
2791
2792int ib_register_client (struct ib_client *client);
2793void ib_unregister_client(struct ib_client *client);
2794
2795void __rdma_block_iter_start(struct ib_block_iter *biter,
2796 struct scatterlist *sglist,
2797 unsigned int nents,
2798 unsigned long pgsz);
2799bool __rdma_block_iter_next(struct ib_block_iter *biter);
2800
2801
2802
2803
2804
2805
2806static inline dma_addr_t
2807rdma_block_iter_dma_address(struct ib_block_iter *biter)
2808{
2809 return biter->__dma_addr & ~(BIT_ULL(biter->__pg_bit) - 1);
2810}
2811
2812
2813
2814
2815
2816
2817
2818
2819
2820
2821
2822#define rdma_for_each_block(sglist, biter, nents, pgsz) \
2823 for (__rdma_block_iter_start(biter, sglist, nents, \
2824 pgsz); \
2825 __rdma_block_iter_next(biter);)
2826
2827
2828
2829
2830
2831
2832
2833
2834
2835
2836
2837static inline void *ib_get_client_data(struct ib_device *device,
2838 struct ib_client *client)
2839{
2840 return xa_load(&device->client_data, client->client_id);
2841}
2842void ib_set_client_data(struct ib_device *device, struct ib_client *client,
2843 void *data);
2844void ib_set_device_ops(struct ib_device *device,
2845 const struct ib_device_ops *ops);
2846
2847int rdma_user_mmap_io(struct ib_ucontext *ucontext, struct vm_area_struct *vma,
2848 unsigned long pfn, unsigned long size, pgprot_t prot,
2849 struct rdma_user_mmap_entry *entry);
2850int rdma_user_mmap_entry_insert(struct ib_ucontext *ucontext,
2851 struct rdma_user_mmap_entry *entry,
2852 size_t length);
2853int rdma_user_mmap_entry_insert_range(struct ib_ucontext *ucontext,
2854 struct rdma_user_mmap_entry *entry,
2855 size_t length, u32 min_pgoff,
2856 u32 max_pgoff);
2857
2858struct rdma_user_mmap_entry *
2859rdma_user_mmap_entry_get_pgoff(struct ib_ucontext *ucontext,
2860 unsigned long pgoff);
2861struct rdma_user_mmap_entry *
2862rdma_user_mmap_entry_get(struct ib_ucontext *ucontext,
2863 struct vm_area_struct *vma);
2864void rdma_user_mmap_entry_put(struct rdma_user_mmap_entry *entry);
2865
2866void rdma_user_mmap_entry_remove(struct rdma_user_mmap_entry *entry);
2867
2868static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len)
2869{
2870 return copy_from_user(dest, udata->inbuf, len) ? -EFAULT : 0;
2871}
2872
2873static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len)
2874{
2875 return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0;
2876}
2877
2878static inline bool ib_is_buffer_cleared(const void __user *p,
2879 size_t len)
2880{
2881 bool ret;
2882 u8 *buf;
2883
2884 if (len > USHRT_MAX)
2885 return false;
2886
2887 buf = memdup_user(p, len);
2888 if (IS_ERR(buf))
2889 return false;
2890
2891 ret = !memchr_inv(buf, 0, len);
2892 kfree(buf);
2893 return ret;
2894}
2895
2896static inline bool ib_is_udata_cleared(struct ib_udata *udata,
2897 size_t offset,
2898 size_t len)
2899{
2900 return ib_is_buffer_cleared(udata->inbuf + offset, len);
2901}
2902
2903
2904
2905
2906
2907
2908
2909
2910
2911
2912
2913
2914
2915
2916
2917
2918static inline bool ib_is_destroy_retryable(int ret, enum rdma_remove_reason why,
2919 struct ib_uobject *uobj)
2920{
2921 return ret && (why == RDMA_REMOVE_DESTROY ||
2922 uobj->context->cleanup_retryable);
2923}
2924
2925
2926
2927
2928
2929
2930
2931
2932
2933
2934static inline int ib_destroy_usecnt(atomic_t *usecnt,
2935 enum rdma_remove_reason why,
2936 struct ib_uobject *uobj)
2937{
2938 if (atomic_read(usecnt) && ib_is_destroy_retryable(-EBUSY, why, uobj))
2939 return -EBUSY;
2940 return 0;
2941}
2942
2943
2944
2945
2946
2947
2948
2949
2950
2951
2952
2953
2954
2955
2956
2957
2958bool ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
2959 enum ib_qp_type type, enum ib_qp_attr_mask mask);
2960
2961void ib_register_event_handler(struct ib_event_handler *event_handler);
2962void ib_unregister_event_handler(struct ib_event_handler *event_handler);
2963void ib_dispatch_event(const struct ib_event *event);
2964
2965int ib_query_port(struct ib_device *device,
2966 u8 port_num, struct ib_port_attr *port_attr);
2967
2968enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device,
2969 u8 port_num);
2970
2971
2972
2973
2974
2975
2976
2977
2978
2979
2980static inline bool rdma_cap_ib_switch(const struct ib_device *device)
2981{
2982 return device->is_switch;
2983}
2984
2985
2986
2987
2988
2989
2990
2991
2992
2993static inline u8 rdma_start_port(const struct ib_device *device)
2994{
2995 return rdma_cap_ib_switch(device) ? 0 : 1;
2996}
2997
2998
2999
3000
3001
3002
3003#define rdma_for_each_port(device, iter) \
3004 for (iter = rdma_start_port(device + BUILD_BUG_ON_ZERO(!__same_type( \
3005 unsigned int, iter))); \
3006 iter <= rdma_end_port(device); (iter)++)
3007
3008
3009
3010
3011
3012
3013
3014
3015
3016static inline u8 rdma_end_port(const struct ib_device *device)
3017{
3018 return rdma_cap_ib_switch(device) ? 0 : device->phys_port_cnt;
3019}
3020
3021static inline int rdma_is_port_valid(const struct ib_device *device,
3022 unsigned int port)
3023{
3024 return (port >= rdma_start_port(device) &&
3025 port <= rdma_end_port(device));
3026}
3027
3028static inline bool rdma_is_grh_required(const struct ib_device *device,
3029 u8 port_num)
3030{
3031 return device->port_data[port_num].immutable.core_cap_flags &
3032 RDMA_CORE_PORT_IB_GRH_REQUIRED;
3033}
3034
3035static inline bool rdma_protocol_ib(const struct ib_device *device, u8 port_num)
3036{
3037 return device->port_data[port_num].immutable.core_cap_flags &
3038 RDMA_CORE_CAP_PROT_IB;
3039}
3040
3041static inline bool rdma_protocol_roce(const struct ib_device *device, u8 port_num)
3042{
3043 return device->port_data[port_num].immutable.core_cap_flags &
3044 (RDMA_CORE_CAP_PROT_ROCE | RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP);
3045}
3046
3047static inline bool rdma_protocol_roce_udp_encap(const struct ib_device *device, u8 port_num)
3048{
3049 return device->port_data[port_num].immutable.core_cap_flags &
3050 RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP;
3051}
3052
3053static inline bool rdma_protocol_roce_eth_encap(const struct ib_device *device, u8 port_num)
3054{
3055 return device->port_data[port_num].immutable.core_cap_flags &
3056 RDMA_CORE_CAP_PROT_ROCE;
3057}
3058
3059static inline bool rdma_protocol_iwarp(const struct ib_device *device, u8 port_num)
3060{
3061 return device->port_data[port_num].immutable.core_cap_flags &
3062 RDMA_CORE_CAP_PROT_IWARP;
3063}
3064
3065static inline bool rdma_ib_or_roce(const struct ib_device *device, u8 port_num)
3066{
3067 return rdma_protocol_ib(device, port_num) ||
3068 rdma_protocol_roce(device, port_num);
3069}
3070
3071static inline bool rdma_protocol_raw_packet(const struct ib_device *device, u8 port_num)
3072{
3073 return device->port_data[port_num].immutable.core_cap_flags &
3074 RDMA_CORE_CAP_PROT_RAW_PACKET;
3075}
3076
3077static inline bool rdma_protocol_usnic(const struct ib_device *device, u8 port_num)
3078{
3079 return device->port_data[port_num].immutable.core_cap_flags &
3080 RDMA_CORE_CAP_PROT_USNIC;
3081}
3082
3083
3084
3085
3086
3087
3088
3089
3090
3091
3092
3093
3094
3095static inline bool rdma_cap_ib_mad(const struct ib_device *device, u8 port_num)
3096{
3097 return device->port_data[port_num].immutable.core_cap_flags &
3098 RDMA_CORE_CAP_IB_MAD;
3099}
3100
3101
3102
3103
3104
3105
3106
3107
3108
3109
3110
3111
3112
3113
3114
3115
3116
3117
3118
3119
3120static inline bool rdma_cap_opa_mad(struct ib_device *device, u8 port_num)
3121{
3122 return device->port_data[port_num].immutable.core_cap_flags &
3123 RDMA_CORE_CAP_OPA_MAD;
3124}
3125
3126
3127
3128
3129
3130
3131
3132
3133
3134
3135
3136
3137
3138
3139
3140
3141
3142
3143
3144
3145
3146static inline bool rdma_cap_ib_smi(const struct ib_device *device, u8 port_num)
3147{
3148 return device->port_data[port_num].immutable.core_cap_flags &
3149 RDMA_CORE_CAP_IB_SMI;
3150}
3151
3152
3153
3154
3155
3156
3157
3158
3159
3160
3161
3162
3163
3164
3165
3166
3167static inline bool rdma_cap_ib_cm(const struct ib_device *device, u8 port_num)
3168{
3169 return device->port_data[port_num].immutable.core_cap_flags &
3170 RDMA_CORE_CAP_IB_CM;
3171}
3172
3173
3174
3175
3176
3177
3178
3179
3180
3181
3182
3183
3184
3185static inline bool rdma_cap_iw_cm(const struct ib_device *device, u8 port_num)
3186{
3187 return device->port_data[port_num].immutable.core_cap_flags &
3188 RDMA_CORE_CAP_IW_CM;
3189}
3190
3191
3192
3193
3194
3195
3196
3197
3198
3199
3200
3201
3202
3203
3204
3205
3206static inline bool rdma_cap_ib_sa(const struct ib_device *device, u8 port_num)
3207{
3208 return device->port_data[port_num].immutable.core_cap_flags &
3209 RDMA_CORE_CAP_IB_SA;
3210}
3211
3212
3213
3214
3215
3216
3217
3218
3219
3220
3221
3222
3223
3224
3225
3226
3227
3228
3229static inline bool rdma_cap_ib_mcast(const struct ib_device *device, u8 port_num)
3230{
3231 return rdma_cap_ib_sa(device, port_num);
3232}
3233
3234
3235
3236
3237
3238
3239
3240
3241
3242
3243
3244
3245
3246
3247static inline bool rdma_cap_af_ib(const struct ib_device *device, u8 port_num)
3248{
3249 return device->port_data[port_num].immutable.core_cap_flags &
3250 RDMA_CORE_CAP_AF_IB;
3251}
3252
3253
3254
3255
3256
3257
3258
3259
3260
3261
3262
3263
3264
3265
3266
3267
3268
3269static inline bool rdma_cap_eth_ah(const struct ib_device *device, u8 port_num)
3270{
3271 return device->port_data[port_num].immutable.core_cap_flags &
3272 RDMA_CORE_CAP_ETH_AH;
3273}
3274
3275
3276
3277
3278
3279
3280
3281
3282
3283
3284static inline bool rdma_cap_opa_ah(struct ib_device *device, u8 port_num)
3285{
3286 return (device->port_data[port_num].immutable.core_cap_flags &
3287 RDMA_CORE_CAP_OPA_AH) == RDMA_CORE_CAP_OPA_AH;
3288}
3289
3290
3291
3292
3293
3294
3295
3296
3297
3298
3299
3300
3301
3302static inline size_t rdma_max_mad_size(const struct ib_device *device, u8 port_num)
3303{
3304 return device->port_data[port_num].immutable.max_mad_size;
3305}
3306
3307
3308
3309
3310
3311
3312
3313
3314
3315
3316
3317
3318
3319
3320static inline bool rdma_cap_roce_gid_table(const struct ib_device *device,
3321 u8 port_num)
3322{
3323 return rdma_protocol_roce(device, port_num) &&
3324 device->ops.add_gid && device->ops.del_gid;
3325}
3326
3327
3328
3329
3330static inline bool rdma_cap_read_inv(struct ib_device *dev, u32 port_num)
3331{
3332
3333
3334
3335
3336 return rdma_protocol_iwarp(dev, port_num);
3337}
3338
3339
3340
3341
3342
3343
3344
3345
3346static inline bool rdma_core_cap_opa_port(struct ib_device *device,
3347 u32 port_num)
3348{
3349 return (device->port_data[port_num].immutable.core_cap_flags &
3350 RDMA_CORE_PORT_INTEL_OPA) == RDMA_CORE_PORT_INTEL_OPA;
3351}
3352
3353
3354
3355
3356
3357
3358
3359
3360
3361
3362static inline int rdma_mtu_enum_to_int(struct ib_device *device, u8 port,
3363 int mtu)
3364{
3365 if (rdma_core_cap_opa_port(device, port))
3366 return opa_mtu_enum_to_int((enum opa_mtu)mtu);
3367 else
3368 return ib_mtu_enum_to_int((enum ib_mtu)mtu);
3369}
3370
3371
3372
3373
3374
3375
3376
3377
3378
3379static inline int rdma_mtu_from_attr(struct ib_device *device, u8 port,
3380 struct ib_port_attr *attr)
3381{
3382 if (rdma_core_cap_opa_port(device, port))
3383 return attr->phys_mtu;
3384 else
3385 return ib_mtu_enum_to_int(attr->max_mtu);
3386}
3387
3388int ib_set_vf_link_state(struct ib_device *device, int vf, u8 port,
3389 int state);
3390int ib_get_vf_config(struct ib_device *device, int vf, u8 port,
3391 struct ifla_vf_info *info);
3392int ib_get_vf_stats(struct ib_device *device, int vf, u8 port,
3393 struct ifla_vf_stats *stats);
3394int ib_get_vf_guid(struct ib_device *device, int vf, u8 port,
3395 struct ifla_vf_guid *node_guid,
3396 struct ifla_vf_guid *port_guid);
3397int ib_set_vf_guid(struct ib_device *device, int vf, u8 port, u64 guid,
3398 int type);
3399
3400int ib_query_pkey(struct ib_device *device,
3401 u8 port_num, u16 index, u16 *pkey);
3402
3403int ib_modify_device(struct ib_device *device,
3404 int device_modify_mask,
3405 struct ib_device_modify *device_modify);
3406
3407int ib_modify_port(struct ib_device *device,
3408 u8 port_num, int port_modify_mask,
3409 struct ib_port_modify *port_modify);
3410
3411int ib_find_gid(struct ib_device *device, union ib_gid *gid,
3412 u8 *port_num, u16 *index);
3413
3414int ib_find_pkey(struct ib_device *device,
3415 u8 port_num, u16 pkey, u16 *index);
3416
3417enum ib_pd_flags {
3418
3419
3420
3421
3422
3423
3424
3425
3426
3427 IB_PD_UNSAFE_GLOBAL_RKEY = 0x01,
3428};
3429
3430struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags,
3431 const char *caller);
3432
3433#define ib_alloc_pd(device, flags) \
3434 __ib_alloc_pd((device), (flags), KBUILD_MODNAME)
3435
3436int ib_dealloc_pd_user(struct ib_pd *pd, struct ib_udata *udata);
3437
3438
3439
3440
3441
3442
3443
3444static inline void ib_dealloc_pd(struct ib_pd *pd)
3445{
3446 int ret = ib_dealloc_pd_user(pd, NULL);
3447
3448 WARN_ONCE(ret, "Destroy of kernel PD shouldn't fail");
3449}
3450
3451enum rdma_create_ah_flags {
3452
3453 RDMA_CREATE_AH_SLEEPABLE = BIT(0),
3454};
3455
3456
3457
3458
3459
3460
3461
3462
3463
3464
3465struct ib_ah *rdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr,
3466 u32 flags);
3467
3468
3469
3470
3471
3472
3473
3474
3475
3476
3477
3478
3479
3480struct ib_ah *rdma_create_user_ah(struct ib_pd *pd,
3481 struct rdma_ah_attr *ah_attr,
3482 struct ib_udata *udata);
3483
3484
3485
3486
3487
3488
3489
3490
3491int ib_get_gids_from_rdma_hdr(const union rdma_network_hdr *hdr,
3492 enum rdma_network_type net_type,
3493 union ib_gid *sgid, union ib_gid *dgid);
3494
3495
3496
3497
3498
3499int ib_get_rdma_header_version(const union rdma_network_hdr *hdr);
3500
3501
3502
3503
3504
3505
3506
3507
3508
3509
3510
3511
3512
3513
3514
3515
3516
3517
3518
3519int ib_init_ah_attr_from_wc(struct ib_device *device, u8 port_num,
3520 const struct ib_wc *wc, const struct ib_grh *grh,
3521 struct rdma_ah_attr *ah_attr);
3522
3523
3524
3525
3526
3527
3528
3529
3530
3531
3532
3533
3534
3535struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc,
3536 const struct ib_grh *grh, u8 port_num);
3537
3538
3539
3540
3541
3542
3543
3544
3545int rdma_modify_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
3546
3547
3548
3549
3550
3551
3552
3553
3554int rdma_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
3555
3556enum rdma_destroy_ah_flags {
3557
3558 RDMA_DESTROY_AH_SLEEPABLE = BIT(0),
3559};
3560
3561
3562
3563
3564
3565
3566
3567int rdma_destroy_ah_user(struct ib_ah *ah, u32 flags, struct ib_udata *udata);
3568
3569
3570
3571
3572
3573
3574
3575
3576static inline void rdma_destroy_ah(struct ib_ah *ah, u32 flags)
3577{
3578 int ret = rdma_destroy_ah_user(ah, flags, NULL);
3579
3580 WARN_ONCE(ret, "Destroy of kernel AH shouldn't fail");
3581}
3582
3583struct ib_srq *ib_create_srq_user(struct ib_pd *pd,
3584 struct ib_srq_init_attr *srq_init_attr,
3585 struct ib_usrq_object *uobject,
3586 struct ib_udata *udata);
3587static inline struct ib_srq *
3588ib_create_srq(struct ib_pd *pd, struct ib_srq_init_attr *srq_init_attr)
3589{
3590 if (!pd->device->ops.create_srq)
3591 return ERR_PTR(-EOPNOTSUPP);
3592
3593 return ib_create_srq_user(pd, srq_init_attr, NULL, NULL);
3594}
3595
3596
3597
3598
3599
3600
3601
3602
3603
3604
3605
3606
3607
3608int ib_modify_srq(struct ib_srq *srq,
3609 struct ib_srq_attr *srq_attr,
3610 enum ib_srq_attr_mask srq_attr_mask);
3611
3612
3613
3614
3615
3616
3617
3618int ib_query_srq(struct ib_srq *srq,
3619 struct ib_srq_attr *srq_attr);
3620
3621
3622
3623
3624
3625
3626int ib_destroy_srq_user(struct ib_srq *srq, struct ib_udata *udata);
3627
3628
3629
3630
3631
3632
3633
3634static inline void ib_destroy_srq(struct ib_srq *srq)
3635{
3636 int ret = ib_destroy_srq_user(srq, NULL);
3637
3638 WARN_ONCE(ret, "Destroy of kernel SRQ shouldn't fail");
3639}
3640
3641
3642
3643
3644
3645
3646
3647
3648static inline int ib_post_srq_recv(struct ib_srq *srq,
3649 const struct ib_recv_wr *recv_wr,
3650 const struct ib_recv_wr **bad_recv_wr)
3651{
3652 const struct ib_recv_wr *dummy;
3653
3654 return srq->device->ops.post_srq_recv(srq, recv_wr,
3655 bad_recv_wr ? : &dummy);
3656}
3657
3658struct ib_qp *ib_create_qp(struct ib_pd *pd,
3659 struct ib_qp_init_attr *qp_init_attr);
3660
3661
3662
3663
3664
3665
3666
3667
3668
3669
3670
3671
3672int ib_modify_qp_with_udata(struct ib_qp *qp,
3673 struct ib_qp_attr *attr,
3674 int attr_mask,
3675 struct ib_udata *udata);
3676
3677
3678
3679
3680
3681
3682
3683
3684
3685
3686int ib_modify_qp(struct ib_qp *qp,
3687 struct ib_qp_attr *qp_attr,
3688 int qp_attr_mask);
3689
3690
3691
3692
3693
3694
3695
3696
3697
3698
3699
3700
3701int ib_query_qp(struct ib_qp *qp,
3702 struct ib_qp_attr *qp_attr,
3703 int qp_attr_mask,
3704 struct ib_qp_init_attr *qp_init_attr);
3705
3706
3707
3708
3709
3710
3711int ib_destroy_qp_user(struct ib_qp *qp, struct ib_udata *udata);
3712
3713
3714
3715
3716
3717
3718
3719static inline int ib_destroy_qp(struct ib_qp *qp)
3720{
3721 return ib_destroy_qp_user(qp, NULL);
3722}
3723
3724
3725
3726
3727
3728
3729
3730
3731struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd,
3732 struct ib_qp_open_attr *qp_open_attr);
3733
3734
3735
3736
3737
3738
3739
3740
3741int ib_close_qp(struct ib_qp *qp);
3742
3743
3744
3745
3746
3747
3748
3749
3750
3751
3752
3753
3754
3755
3756static inline int ib_post_send(struct ib_qp *qp,
3757 const struct ib_send_wr *send_wr,
3758 const struct ib_send_wr **bad_send_wr)
3759{
3760 const struct ib_send_wr *dummy;
3761
3762 return qp->device->ops.post_send(qp, send_wr, bad_send_wr ? : &dummy);
3763}
3764
3765
3766
3767
3768
3769
3770
3771
3772
3773static inline int ib_post_recv(struct ib_qp *qp,
3774 const struct ib_recv_wr *recv_wr,
3775 const struct ib_recv_wr **bad_recv_wr)
3776{
3777 const struct ib_recv_wr *dummy;
3778
3779 return qp->device->ops.post_recv(qp, recv_wr, bad_recv_wr ? : &dummy);
3780}
3781
3782struct ib_cq *__ib_alloc_cq(struct ib_device *dev, void *private, int nr_cqe,
3783 int comp_vector, enum ib_poll_context poll_ctx,
3784 const char *caller);
3785static inline struct ib_cq *ib_alloc_cq(struct ib_device *dev, void *private,
3786 int nr_cqe, int comp_vector,
3787 enum ib_poll_context poll_ctx)
3788{
3789 return __ib_alloc_cq(dev, private, nr_cqe, comp_vector, poll_ctx,
3790 KBUILD_MODNAME);
3791}
3792
3793struct ib_cq *__ib_alloc_cq_any(struct ib_device *dev, void *private,
3794 int nr_cqe, enum ib_poll_context poll_ctx,
3795 const char *caller);
3796
3797
3798
3799
3800
3801
3802
3803
3804static inline struct ib_cq *ib_alloc_cq_any(struct ib_device *dev,
3805 void *private, int nr_cqe,
3806 enum ib_poll_context poll_ctx)
3807{
3808 return __ib_alloc_cq_any(dev, private, nr_cqe, poll_ctx,
3809 KBUILD_MODNAME);
3810}
3811
3812void ib_free_cq(struct ib_cq *cq);
3813int ib_process_cq_direct(struct ib_cq *cq, int budget);
3814
3815
3816
3817
3818
3819
3820
3821
3822
3823
3824
3825
3826
3827
3828struct ib_cq *__ib_create_cq(struct ib_device *device,
3829 ib_comp_handler comp_handler,
3830 void (*event_handler)(struct ib_event *, void *),
3831 void *cq_context,
3832 const struct ib_cq_init_attr *cq_attr,
3833 const char *caller);
3834#define ib_create_cq(device, cmp_hndlr, evt_hndlr, cq_ctxt, cq_attr) \
3835 __ib_create_cq((device), (cmp_hndlr), (evt_hndlr), (cq_ctxt), (cq_attr), KBUILD_MODNAME)
3836
3837
3838
3839
3840
3841
3842
3843
3844int ib_resize_cq(struct ib_cq *cq, int cqe);
3845
3846
3847
3848
3849
3850
3851
3852
3853int rdma_set_cq_moderation(struct ib_cq *cq, u16 cq_count, u16 cq_period);
3854
3855
3856
3857
3858
3859
3860int ib_destroy_cq_user(struct ib_cq *cq, struct ib_udata *udata);
3861
3862
3863
3864
3865
3866
3867
3868static inline void ib_destroy_cq(struct ib_cq *cq)
3869{
3870 int ret = ib_destroy_cq_user(cq, NULL);
3871
3872 WARN_ONCE(ret, "Destroy of kernel CQ shouldn't fail");
3873}
3874
3875
3876
3877
3878
3879
3880
3881
3882
3883
3884
3885
3886
3887static inline int ib_poll_cq(struct ib_cq *cq, int num_entries,
3888 struct ib_wc *wc)
3889{
3890 return cq->device->ops.poll_cq(cq, num_entries, wc);
3891}
3892
3893
3894
3895
3896
3897
3898
3899
3900
3901
3902
3903
3904
3905
3906
3907
3908
3909
3910
3911
3912
3913
3914
3915
3916
3917
3918
3919
3920static inline int ib_req_notify_cq(struct ib_cq *cq,
3921 enum ib_cq_notify_flags flags)
3922{
3923 return cq->device->ops.req_notify_cq(cq, flags);
3924}
3925
3926struct ib_cq *ib_cq_pool_get(struct ib_device *dev, unsigned int nr_cqe,
3927 int comp_vector_hint,
3928 enum ib_poll_context poll_ctx);
3929
3930void ib_cq_pool_put(struct ib_cq *cq, unsigned int nr_cqe);
3931
3932
3933
3934
3935
3936
3937
3938
3939static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt)
3940{
3941 return cq->device->ops.req_ncomp_notif ?
3942 cq->device->ops.req_ncomp_notif(cq, wc_cnt) :
3943 -ENOSYS;
3944}
3945
3946
3947
3948
3949
3950
3951static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
3952{
3953 return dma_mapping_error(dev->dma_device, dma_addr);
3954}
3955
3956
3957
3958
3959
3960
3961
3962
3963static inline u64 ib_dma_map_single(struct ib_device *dev,
3964 void *cpu_addr, size_t size,
3965 enum dma_data_direction direction)
3966{
3967 return dma_map_single(dev->dma_device, cpu_addr, size, direction);
3968}
3969
3970
3971
3972
3973
3974
3975
3976
3977static inline void ib_dma_unmap_single(struct ib_device *dev,
3978 u64 addr, size_t size,
3979 enum dma_data_direction direction)
3980{
3981 dma_unmap_single(dev->dma_device, addr, size, direction);
3982}
3983
3984
3985
3986
3987
3988
3989
3990
3991
3992static inline u64 ib_dma_map_page(struct ib_device *dev,
3993 struct page *page,
3994 unsigned long offset,
3995 size_t size,
3996 enum dma_data_direction direction)
3997{
3998 return dma_map_page(dev->dma_device, page, offset, size, direction);
3999}
4000
4001
4002
4003
4004
4005
4006
4007
4008static inline void ib_dma_unmap_page(struct ib_device *dev,
4009 u64 addr, size_t size,
4010 enum dma_data_direction direction)
4011{
4012 dma_unmap_page(dev->dma_device, addr, size, direction);
4013}
4014
4015
4016
4017
4018
4019
4020
4021
4022static inline int ib_dma_map_sg(struct ib_device *dev,
4023 struct scatterlist *sg, int nents,
4024 enum dma_data_direction direction)
4025{
4026 return dma_map_sg(dev->dma_device, sg, nents, direction);
4027}
4028
4029
4030
4031
4032
4033
4034
4035
4036static inline void ib_dma_unmap_sg(struct ib_device *dev,
4037 struct scatterlist *sg, int nents,
4038 enum dma_data_direction direction)
4039{
4040 dma_unmap_sg(dev->dma_device, sg, nents, direction);
4041}
4042
4043static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
4044 struct scatterlist *sg, int nents,
4045 enum dma_data_direction direction,
4046 unsigned long dma_attrs)
4047{
4048 return dma_map_sg_attrs(dev->dma_device, sg, nents, direction,
4049 dma_attrs);
4050}
4051
4052static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
4053 struct scatterlist *sg, int nents,
4054 enum dma_data_direction direction,
4055 unsigned long dma_attrs)
4056{
4057 dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, dma_attrs);
4058}
4059
4060
4061
4062
4063
4064
4065
4066static inline unsigned int ib_dma_max_seg_size(struct ib_device *dev)
4067{
4068 return dma_get_max_seg_size(dev->dma_device);
4069}
4070
4071
4072
4073
4074
4075
4076
4077
4078static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev,
4079 u64 addr,
4080 size_t size,
4081 enum dma_data_direction dir)
4082{
4083 dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
4084}
4085
4086
4087
4088
4089
4090
4091
4092
4093static inline void ib_dma_sync_single_for_device(struct ib_device *dev,
4094 u64 addr,
4095 size_t size,
4096 enum dma_data_direction dir)
4097{
4098 dma_sync_single_for_device(dev->dma_device, addr, size, dir);
4099}
4100
4101
4102
4103
4104
4105
4106
4107
4108static inline void *ib_dma_alloc_coherent(struct ib_device *dev,
4109 size_t size,
4110 dma_addr_t *dma_handle,
4111 gfp_t flag)
4112{
4113 return dma_alloc_coherent(dev->dma_device, size, dma_handle, flag);
4114}
4115
4116
4117
4118
4119
4120
4121
4122
4123static inline void ib_dma_free_coherent(struct ib_device *dev,
4124 size_t size, void *cpu_addr,
4125 dma_addr_t dma_handle)
4126{
4127 dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle);
4128}
4129
4130
4131
4132
4133struct ib_mr *ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
4134 u64 virt_addr, int mr_access_flags);
4135
4136
4137int ib_advise_mr(struct ib_pd *pd, enum ib_uverbs_advise_mr_advice advice,
4138 u32 flags, struct ib_sge *sg_list, u32 num_sge);
4139
4140
4141
4142
4143
4144
4145
4146
4147int ib_dereg_mr_user(struct ib_mr *mr, struct ib_udata *udata);
4148
4149
4150
4151
4152
4153
4154
4155
4156
4157
4158static inline int ib_dereg_mr(struct ib_mr *mr)
4159{
4160 return ib_dereg_mr_user(mr, NULL);
4161}
4162
4163struct ib_mr *ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
4164 u32 max_num_sg);
4165
4166struct ib_mr *ib_alloc_mr_integrity(struct ib_pd *pd,
4167 u32 max_num_data_sg,
4168 u32 max_num_meta_sg);
4169
4170
4171
4172
4173
4174
4175
4176static inline void ib_update_fast_reg_key(struct ib_mr *mr, u8 newkey)
4177{
4178 mr->lkey = (mr->lkey & 0xffffff00) | newkey;
4179 mr->rkey = (mr->rkey & 0xffffff00) | newkey;
4180}
4181
4182
4183
4184
4185
4186
4187static inline u32 ib_inc_rkey(u32 rkey)
4188{
4189 const u32 mask = 0x000000ff;
4190 return ((rkey + 1) & mask) | (rkey & ~mask);
4191}
4192
4193
4194
4195
4196
4197
4198
4199
4200
4201
4202
4203
4204
4205int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
4206
4207
4208
4209
4210
4211
4212
4213int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
4214
4215struct ib_xrcd *ib_alloc_xrcd_user(struct ib_device *device,
4216 struct inode *inode, struct ib_udata *udata);
4217int ib_dealloc_xrcd_user(struct ib_xrcd *xrcd, struct ib_udata *udata);
4218
4219static inline int ib_check_mr_access(int flags)
4220{
4221
4222
4223
4224
4225 if (flags & (IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_REMOTE_WRITE) &&
4226 !(flags & IB_ACCESS_LOCAL_WRITE))
4227 return -EINVAL;
4228
4229 if (flags & ~IB_ACCESS_SUPPORTED)
4230 return -EINVAL;
4231
4232 return 0;
4233}
4234
4235static inline bool ib_access_writable(int access_flags)
4236{
4237
4238
4239
4240
4241
4242
4243
4244 return access_flags &
4245 (IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE |
4246 IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND);
4247}
4248
4249
4250
4251
4252
4253
4254
4255
4256
4257
4258
4259
4260
4261int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
4262 struct ib_mr_status *mr_status);
4263
4264
4265
4266
4267
4268
4269
4270
4271
4272
4273
4274
4275
4276
4277static inline bool ib_device_try_get(struct ib_device *dev)
4278{
4279 return refcount_inc_not_zero(&dev->refcount);
4280}
4281
4282void ib_device_put(struct ib_device *device);
4283struct ib_device *ib_device_get_by_netdev(struct net_device *ndev,
4284 enum rdma_driver_id driver_id);
4285struct ib_device *ib_device_get_by_name(const char *name,
4286 enum rdma_driver_id driver_id);
4287struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, u8 port,
4288 u16 pkey, const union ib_gid *gid,
4289 const struct sockaddr *addr);
4290int ib_device_set_netdev(struct ib_device *ib_dev, struct net_device *ndev,
4291 unsigned int port);
4292struct net_device *ib_device_netdev(struct ib_device *dev, u8 port);
4293
4294struct ib_wq *ib_create_wq(struct ib_pd *pd,
4295 struct ib_wq_init_attr *init_attr);
4296int ib_destroy_wq_user(struct ib_wq *wq, struct ib_udata *udata);
4297int ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *attr,
4298 u32 wq_attr_mask);
4299
4300int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
4301 unsigned int *sg_offset, unsigned int page_size);
4302int ib_map_mr_sg_pi(struct ib_mr *mr, struct scatterlist *data_sg,
4303 int data_sg_nents, unsigned int *data_sg_offset,
4304 struct scatterlist *meta_sg, int meta_sg_nents,
4305 unsigned int *meta_sg_offset, unsigned int page_size);
4306
4307static inline int
4308ib_map_mr_sg_zbva(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
4309 unsigned int *sg_offset, unsigned int page_size)
4310{
4311 int n;
4312
4313 n = ib_map_mr_sg(mr, sg, sg_nents, sg_offset, page_size);
4314 mr->iova = 0;
4315
4316 return n;
4317}
4318
4319int ib_sg_to_pages(struct ib_mr *mr, struct scatterlist *sgl, int sg_nents,
4320 unsigned int *sg_offset, int (*set_page)(struct ib_mr *, u64));
4321
4322void ib_drain_rq(struct ib_qp *qp);
4323void ib_drain_sq(struct ib_qp *qp);
4324void ib_drain_qp(struct ib_qp *qp);
4325
4326int ib_get_eth_speed(struct ib_device *dev, u8 port_num, u16 *speed, u8 *width);
4327
4328static inline u8 *rdma_ah_retrieve_dmac(struct rdma_ah_attr *attr)
4329{
4330 if (attr->type == RDMA_AH_ATTR_TYPE_ROCE)
4331 return attr->roce.dmac;
4332 return NULL;
4333}
4334
4335static inline void rdma_ah_set_dlid(struct rdma_ah_attr *attr, u32 dlid)
4336{
4337 if (attr->type == RDMA_AH_ATTR_TYPE_IB)
4338 attr->ib.dlid = (u16)dlid;
4339 else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4340 attr->opa.dlid = dlid;
4341}
4342
4343static inline u32 rdma_ah_get_dlid(const struct rdma_ah_attr *attr)
4344{
4345 if (attr->type == RDMA_AH_ATTR_TYPE_IB)
4346 return attr->ib.dlid;
4347 else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4348 return attr->opa.dlid;
4349 return 0;
4350}
4351
4352static inline void rdma_ah_set_sl(struct rdma_ah_attr *attr, u8 sl)
4353{
4354 attr->sl = sl;
4355}
4356
4357static inline u8 rdma_ah_get_sl(const struct rdma_ah_attr *attr)
4358{
4359 return attr->sl;
4360}
4361
4362static inline void rdma_ah_set_path_bits(struct rdma_ah_attr *attr,
4363 u8 src_path_bits)
4364{
4365 if (attr->type == RDMA_AH_ATTR_TYPE_IB)
4366 attr->ib.src_path_bits = src_path_bits;
4367 else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4368 attr->opa.src_path_bits = src_path_bits;
4369}
4370
4371static inline u8 rdma_ah_get_path_bits(const struct rdma_ah_attr *attr)
4372{
4373 if (attr->type == RDMA_AH_ATTR_TYPE_IB)
4374 return attr->ib.src_path_bits;
4375 else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4376 return attr->opa.src_path_bits;
4377 return 0;
4378}
4379
4380static inline void rdma_ah_set_make_grd(struct rdma_ah_attr *attr,
4381 bool make_grd)
4382{
4383 if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4384 attr->opa.make_grd = make_grd;
4385}
4386
4387static inline bool rdma_ah_get_make_grd(const struct rdma_ah_attr *attr)
4388{
4389 if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4390 return attr->opa.make_grd;
4391 return false;
4392}
4393
4394static inline void rdma_ah_set_port_num(struct rdma_ah_attr *attr, u8 port_num)
4395{
4396 attr->port_num = port_num;
4397}
4398
4399static inline u8 rdma_ah_get_port_num(const struct rdma_ah_attr *attr)
4400{
4401 return attr->port_num;
4402}
4403
4404static inline void rdma_ah_set_static_rate(struct rdma_ah_attr *attr,
4405 u8 static_rate)
4406{
4407 attr->static_rate = static_rate;
4408}
4409
4410static inline u8 rdma_ah_get_static_rate(const struct rdma_ah_attr *attr)
4411{
4412 return attr->static_rate;
4413}
4414
4415static inline void rdma_ah_set_ah_flags(struct rdma_ah_attr *attr,
4416 enum ib_ah_flags flag)
4417{
4418 attr->ah_flags = flag;
4419}
4420
4421static inline enum ib_ah_flags
4422 rdma_ah_get_ah_flags(const struct rdma_ah_attr *attr)
4423{
4424 return attr->ah_flags;
4425}
4426
4427static inline const struct ib_global_route
4428 *rdma_ah_read_grh(const struct rdma_ah_attr *attr)
4429{
4430 return &attr->grh;
4431}
4432
4433
4434static inline struct ib_global_route
4435 *rdma_ah_retrieve_grh(struct rdma_ah_attr *attr)
4436{
4437 return &attr->grh;
4438}
4439
4440static inline void rdma_ah_set_dgid_raw(struct rdma_ah_attr *attr, void *dgid)
4441{
4442 struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
4443
4444 memcpy(grh->dgid.raw, dgid, sizeof(grh->dgid));
4445}
4446
4447static inline void rdma_ah_set_subnet_prefix(struct rdma_ah_attr *attr,
4448 __be64 prefix)
4449{
4450 struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
4451
4452 grh->dgid.global.subnet_prefix = prefix;
4453}
4454
4455static inline void rdma_ah_set_interface_id(struct rdma_ah_attr *attr,
4456 __be64 if_id)
4457{
4458 struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
4459
4460 grh->dgid.global.interface_id = if_id;
4461}
4462
4463static inline void rdma_ah_set_grh(struct rdma_ah_attr *attr,
4464 union ib_gid *dgid, u32 flow_label,
4465 u8 sgid_index, u8 hop_limit,
4466 u8 traffic_class)
4467{
4468 struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
4469
4470 attr->ah_flags = IB_AH_GRH;
4471 if (dgid)
4472 grh->dgid = *dgid;
4473 grh->flow_label = flow_label;
4474 grh->sgid_index = sgid_index;
4475 grh->hop_limit = hop_limit;
4476 grh->traffic_class = traffic_class;
4477 grh->sgid_attr = NULL;
4478}
4479
4480void rdma_destroy_ah_attr(struct rdma_ah_attr *ah_attr);
4481void rdma_move_grh_sgid_attr(struct rdma_ah_attr *attr, union ib_gid *dgid,
4482 u32 flow_label, u8 hop_limit, u8 traffic_class,
4483 const struct ib_gid_attr *sgid_attr);
4484void rdma_copy_ah_attr(struct rdma_ah_attr *dest,
4485 const struct rdma_ah_attr *src);
4486void rdma_replace_ah_attr(struct rdma_ah_attr *old,
4487 const struct rdma_ah_attr *new);
4488void rdma_move_ah_attr(struct rdma_ah_attr *dest, struct rdma_ah_attr *src);
4489
4490
4491
4492
4493
4494
4495
4496static inline enum rdma_ah_attr_type rdma_ah_find_type(struct ib_device *dev,
4497 u8 port_num)
4498{
4499 if (rdma_protocol_roce(dev, port_num))
4500 return RDMA_AH_ATTR_TYPE_ROCE;
4501 if (rdma_protocol_ib(dev, port_num)) {
4502 if (rdma_cap_opa_ah(dev, port_num))
4503 return RDMA_AH_ATTR_TYPE_OPA;
4504 return RDMA_AH_ATTR_TYPE_IB;
4505 }
4506
4507 return RDMA_AH_ATTR_TYPE_UNDEFINED;
4508}
4509
4510
4511
4512
4513
4514
4515
4516
4517
4518
4519static inline u16 ib_lid_cpu16(u32 lid)
4520{
4521 WARN_ON_ONCE(lid & 0xFFFF0000);
4522 return (u16)lid;
4523}
4524
4525
4526
4527
4528
4529
4530static inline __be16 ib_lid_be16(u32 lid)
4531{
4532 WARN_ON_ONCE(lid & 0xFFFF0000);
4533 return cpu_to_be16((u16)lid);
4534}
4535
4536
4537
4538
4539
4540
4541
4542
4543
4544
4545
4546static inline const struct cpumask *
4547ib_get_vector_affinity(struct ib_device *device, int comp_vector)
4548{
4549 if (comp_vector < 0 || comp_vector >= device->num_comp_vectors ||
4550 !device->ops.get_vector_affinity)
4551 return NULL;
4552
4553 return device->ops.get_vector_affinity(device, comp_vector);
4554
4555}
4556
4557
4558
4559
4560
4561
4562
4563void rdma_roce_rescan_device(struct ib_device *ibdev);
4564
4565struct ib_ucontext *ib_uverbs_get_ucontext_file(struct ib_uverbs_file *ufile);
4566
4567int uverbs_destroy_def_handler(struct uverbs_attr_bundle *attrs);
4568
4569struct net_device *rdma_alloc_netdev(struct ib_device *device, u8 port_num,
4570 enum rdma_netdev_t type, const char *name,
4571 unsigned char name_assign_type,
4572 void (*setup)(struct net_device *));
4573
4574int rdma_init_netdev(struct ib_device *device, u8 port_num,
4575 enum rdma_netdev_t type, const char *name,
4576 unsigned char name_assign_type,
4577 void (*setup)(struct net_device *),
4578 struct net_device *netdev);
4579
4580
4581
4582
4583
4584
4585
4586
4587
4588
4589
4590
4591
4592
4593
4594
4595static inline void
4596rdma_set_device_sysfs_group(struct ib_device *dev,
4597 const struct attribute_group *group)
4598{
4599 dev->groups[1] = group;
4600}
4601
4602
4603
4604
4605
4606
4607
4608
4609
4610static inline struct ib_device *rdma_device_to_ibdev(struct device *device)
4611{
4612 struct ib_core_device *coredev =
4613 container_of(device, struct ib_core_device, dev);
4614
4615 return coredev->owner;
4616}
4617
4618
4619
4620
4621
4622
4623
4624
4625
4626#define rdma_device_to_drv_device(dev, drv_dev_struct, ibdev_member) \
4627 container_of(rdma_device_to_ibdev(dev), drv_dev_struct, ibdev_member)
4628
4629bool rdma_dev_access_netns(const struct ib_device *device,
4630 const struct net *net);
4631
4632#define IB_ROCE_UDP_ENCAP_VALID_PORT_MIN (0xC000)
4633#define IB_ROCE_UDP_ENCAP_VALID_PORT_MAX (0xFFFF)
4634#define IB_GRH_FLOWLABEL_MASK (0x000FFFFF)
4635
4636
4637
4638
4639
4640
4641
4642
4643
4644static inline u16 rdma_flow_label_to_udp_sport(u32 fl)
4645{
4646 u32 fl_low = fl & 0x03fff, fl_high = fl & 0xFC000;
4647
4648 fl_low ^= fl_high >> 14;
4649 return (u16)(fl_low | IB_ROCE_UDP_ENCAP_VALID_PORT_MIN);
4650}
4651
4652
4653
4654
4655
4656
4657
4658
4659
4660
4661
4662
4663
4664
4665
4666
4667static inline u32 rdma_calc_flow_label(u32 lqpn, u32 rqpn)
4668{
4669 u64 v = (u64)lqpn * rqpn;
4670
4671 v ^= v >> 20;
4672 v ^= v >> 40;
4673
4674 return (u32)(v & IB_GRH_FLOWLABEL_MASK);
4675}
4676#endif
4677