1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39#if !defined(IB_VERBS_H)
40#define IB_VERBS_H
41
42#include <linux/types.h>
43#include <linux/device.h>
44#include <linux/dma-mapping.h>
45#include <linux/kref.h>
46#include <linux/list.h>
47#include <linux/rwsem.h>
48#include <linux/workqueue.h>
49#include <linux/irq_poll.h>
50#include <uapi/linux/if_ether.h>
51#include <net/ipv6.h>
52#include <net/ip.h>
53#include <linux/string.h>
54#include <linux/slab.h>
55#include <linux/netdevice.h>
56#include <linux/refcount.h>
57#include <linux/if_link.h>
58#include <linux/atomic.h>
59#include <linux/mmu_notifier.h>
60#include <linux/uaccess.h>
61#include <linux/cgroup_rdma.h>
62#include <linux/irqflags.h>
63#include <linux/preempt.h>
64#include <linux/dim.h>
65#include <uapi/rdma/ib_user_verbs.h>
66#include <rdma/rdma_counter.h>
67#include <rdma/restrack.h>
68#include <rdma/signature.h>
69#include <uapi/rdma/rdma_user_ioctl.h>
70#include <uapi/rdma/ib_user_ioctl_verbs.h>
71
72#define IB_FW_VERSION_NAME_MAX ETHTOOL_FWVERS_LEN
73
74struct ib_umem_odp;
75struct ib_uqp_object;
76struct ib_usrq_object;
77struct ib_uwq_object;
78
79extern struct workqueue_struct *ib_wq;
80extern struct workqueue_struct *ib_comp_wq;
81extern struct workqueue_struct *ib_comp_unbound_wq;
82
83struct ib_ucq_object;
84
85__printf(3, 4) __cold
86void ibdev_printk(const char *level, const struct ib_device *ibdev,
87 const char *format, ...);
88__printf(2, 3) __cold
89void ibdev_emerg(const struct ib_device *ibdev, const char *format, ...);
90__printf(2, 3) __cold
91void ibdev_alert(const struct ib_device *ibdev, const char *format, ...);
92__printf(2, 3) __cold
93void ibdev_crit(const struct ib_device *ibdev, const char *format, ...);
94__printf(2, 3) __cold
95void ibdev_err(const struct ib_device *ibdev, const char *format, ...);
96__printf(2, 3) __cold
97void ibdev_warn(const struct ib_device *ibdev, const char *format, ...);
98__printf(2, 3) __cold
99void ibdev_notice(const struct ib_device *ibdev, const char *format, ...);
100__printf(2, 3) __cold
101void ibdev_info(const struct ib_device *ibdev, const char *format, ...);
102
103#if defined(CONFIG_DYNAMIC_DEBUG) || \
104 (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
105#define ibdev_dbg(__dev, format, args...) \
106 dynamic_ibdev_dbg(__dev, format, ##args)
107#else
108__printf(2, 3) __cold
109static inline
110void ibdev_dbg(const struct ib_device *ibdev, const char *format, ...) {}
111#endif
112
113#define ibdev_level_ratelimited(ibdev_level, ibdev, fmt, ...) \
114do { \
115 static DEFINE_RATELIMIT_STATE(_rs, \
116 DEFAULT_RATELIMIT_INTERVAL, \
117 DEFAULT_RATELIMIT_BURST); \
118 if (__ratelimit(&_rs)) \
119 ibdev_level(ibdev, fmt, ##__VA_ARGS__); \
120} while (0)
121
122#define ibdev_emerg_ratelimited(ibdev, fmt, ...) \
123 ibdev_level_ratelimited(ibdev_emerg, ibdev, fmt, ##__VA_ARGS__)
124#define ibdev_alert_ratelimited(ibdev, fmt, ...) \
125 ibdev_level_ratelimited(ibdev_alert, ibdev, fmt, ##__VA_ARGS__)
126#define ibdev_crit_ratelimited(ibdev, fmt, ...) \
127 ibdev_level_ratelimited(ibdev_crit, ibdev, fmt, ##__VA_ARGS__)
128#define ibdev_err_ratelimited(ibdev, fmt, ...) \
129 ibdev_level_ratelimited(ibdev_err, ibdev, fmt, ##__VA_ARGS__)
130#define ibdev_warn_ratelimited(ibdev, fmt, ...) \
131 ibdev_level_ratelimited(ibdev_warn, ibdev, fmt, ##__VA_ARGS__)
132#define ibdev_notice_ratelimited(ibdev, fmt, ...) \
133 ibdev_level_ratelimited(ibdev_notice, ibdev, fmt, ##__VA_ARGS__)
134#define ibdev_info_ratelimited(ibdev, fmt, ...) \
135 ibdev_level_ratelimited(ibdev_info, ibdev, fmt, ##__VA_ARGS__)
136
137#if defined(CONFIG_DYNAMIC_DEBUG) || \
138 (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
139
140#define ibdev_dbg_ratelimited(ibdev, fmt, ...) \
141do { \
142 static DEFINE_RATELIMIT_STATE(_rs, \
143 DEFAULT_RATELIMIT_INTERVAL, \
144 DEFAULT_RATELIMIT_BURST); \
145 DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \
146 if (DYNAMIC_DEBUG_BRANCH(descriptor) && __ratelimit(&_rs)) \
147 __dynamic_ibdev_dbg(&descriptor, ibdev, fmt, \
148 ##__VA_ARGS__); \
149} while (0)
150#else
151__printf(2, 3) __cold
152static inline
153void ibdev_dbg_ratelimited(const struct ib_device *ibdev, const char *format, ...) {}
154#endif
155
156union ib_gid {
157 u8 raw[16];
158 struct {
159 __be64 subnet_prefix;
160 __be64 interface_id;
161 } global;
162};
163
164extern union ib_gid zgid;
165
166enum ib_gid_type {
167
168 IB_GID_TYPE_IB = 0,
169 IB_GID_TYPE_ROCE = 0,
170 IB_GID_TYPE_ROCE_UDP_ENCAP = 1,
171 IB_GID_TYPE_SIZE
172};
173
174#define ROCE_V2_UDP_DPORT 4791
175struct ib_gid_attr {
176 struct net_device __rcu *ndev;
177 struct ib_device *device;
178 union ib_gid gid;
179 enum ib_gid_type gid_type;
180 u16 index;
181 u8 port_num;
182};
183
184enum {
185
186 IB_SA_WELL_KNOWN_GUID = BIT_ULL(57) | 2,
187};
188
189enum rdma_transport_type {
190 RDMA_TRANSPORT_IB,
191 RDMA_TRANSPORT_IWARP,
192 RDMA_TRANSPORT_USNIC,
193 RDMA_TRANSPORT_USNIC_UDP,
194 RDMA_TRANSPORT_UNSPECIFIED,
195};
196
197enum rdma_protocol_type {
198 RDMA_PROTOCOL_IB,
199 RDMA_PROTOCOL_IBOE,
200 RDMA_PROTOCOL_IWARP,
201 RDMA_PROTOCOL_USNIC_UDP
202};
203
204__attribute_const__ enum rdma_transport_type
205rdma_node_get_transport(unsigned int node_type);
206
207enum rdma_network_type {
208 RDMA_NETWORK_IB,
209 RDMA_NETWORK_ROCE_V1 = RDMA_NETWORK_IB,
210 RDMA_NETWORK_IPV4,
211 RDMA_NETWORK_IPV6
212};
213
214static inline enum ib_gid_type ib_network_to_gid_type(enum rdma_network_type network_type)
215{
216 if (network_type == RDMA_NETWORK_IPV4 ||
217 network_type == RDMA_NETWORK_IPV6)
218 return IB_GID_TYPE_ROCE_UDP_ENCAP;
219
220
221 return IB_GID_TYPE_IB;
222}
223
224static inline enum rdma_network_type
225rdma_gid_attr_network_type(const struct ib_gid_attr *attr)
226{
227 if (attr->gid_type == IB_GID_TYPE_IB)
228 return RDMA_NETWORK_IB;
229
230 if (ipv6_addr_v4mapped((struct in6_addr *)&attr->gid))
231 return RDMA_NETWORK_IPV4;
232 else
233 return RDMA_NETWORK_IPV6;
234}
235
236enum rdma_link_layer {
237 IB_LINK_LAYER_UNSPECIFIED,
238 IB_LINK_LAYER_INFINIBAND,
239 IB_LINK_LAYER_ETHERNET,
240};
241
242enum ib_device_cap_flags {
243 IB_DEVICE_RESIZE_MAX_WR = (1 << 0),
244 IB_DEVICE_BAD_PKEY_CNTR = (1 << 1),
245 IB_DEVICE_BAD_QKEY_CNTR = (1 << 2),
246 IB_DEVICE_RAW_MULTI = (1 << 3),
247 IB_DEVICE_AUTO_PATH_MIG = (1 << 4),
248 IB_DEVICE_CHANGE_PHY_PORT = (1 << 5),
249 IB_DEVICE_UD_AV_PORT_ENFORCE = (1 << 6),
250 IB_DEVICE_CURR_QP_STATE_MOD = (1 << 7),
251 IB_DEVICE_SHUTDOWN_PORT = (1 << 8),
252
253 IB_DEVICE_PORT_ACTIVE_EVENT = (1 << 10),
254 IB_DEVICE_SYS_IMAGE_GUID = (1 << 11),
255 IB_DEVICE_RC_RNR_NAK_GEN = (1 << 12),
256 IB_DEVICE_SRQ_RESIZE = (1 << 13),
257 IB_DEVICE_N_NOTIFY_CQ = (1 << 14),
258
259
260
261
262
263
264
265
266 IB_DEVICE_LOCAL_DMA_LKEY = (1 << 15),
267
268 IB_DEVICE_MEM_WINDOW = (1 << 17),
269
270
271
272
273
274
275
276 IB_DEVICE_UD_IP_CSUM = (1 << 18),
277 IB_DEVICE_UD_TSO = (1 << 19),
278 IB_DEVICE_XRC = (1 << 20),
279
280
281
282
283
284
285
286
287
288
289 IB_DEVICE_MEM_MGT_EXTENSIONS = (1 << 21),
290 IB_DEVICE_BLOCK_MULTICAST_LOOPBACK = (1 << 22),
291 IB_DEVICE_MEM_WINDOW_TYPE_2A = (1 << 23),
292 IB_DEVICE_MEM_WINDOW_TYPE_2B = (1 << 24),
293 IB_DEVICE_RC_IP_CSUM = (1 << 25),
294
295 IB_DEVICE_RAW_IP_CSUM = (1 << 26),
296
297
298
299
300
301
302 IB_DEVICE_CROSS_CHANNEL = (1 << 27),
303 IB_DEVICE_MANAGED_FLOW_STEERING = (1 << 29),
304 IB_DEVICE_INTEGRITY_HANDOVER = (1 << 30),
305 IB_DEVICE_ON_DEMAND_PAGING = (1ULL << 31),
306 IB_DEVICE_SG_GAPS_REG = (1ULL << 32),
307 IB_DEVICE_VIRTUAL_FUNCTION = (1ULL << 33),
308
309 IB_DEVICE_RAW_SCATTER_FCS = (1ULL << 34),
310 IB_DEVICE_RDMA_NETDEV_OPA = (1ULL << 35),
311
312 IB_DEVICE_PCI_WRITE_END_PADDING = (1ULL << 36),
313 IB_DEVICE_ALLOW_USER_UNREG = (1ULL << 37),
314};
315
316enum ib_atomic_cap {
317 IB_ATOMIC_NONE,
318 IB_ATOMIC_HCA,
319 IB_ATOMIC_GLOB
320};
321
322enum ib_odp_general_cap_bits {
323 IB_ODP_SUPPORT = 1 << 0,
324 IB_ODP_SUPPORT_IMPLICIT = 1 << 1,
325};
326
327enum ib_odp_transport_cap_bits {
328 IB_ODP_SUPPORT_SEND = 1 << 0,
329 IB_ODP_SUPPORT_RECV = 1 << 1,
330 IB_ODP_SUPPORT_WRITE = 1 << 2,
331 IB_ODP_SUPPORT_READ = 1 << 3,
332 IB_ODP_SUPPORT_ATOMIC = 1 << 4,
333 IB_ODP_SUPPORT_SRQ_RECV = 1 << 5,
334};
335
336struct ib_odp_caps {
337 uint64_t general_caps;
338 struct {
339 uint32_t rc_odp_caps;
340 uint32_t uc_odp_caps;
341 uint32_t ud_odp_caps;
342 uint32_t xrc_odp_caps;
343 } per_transport_caps;
344};
345
346struct ib_rss_caps {
347
348
349
350
351 u32 supported_qpts;
352 u32 max_rwq_indirection_tables;
353 u32 max_rwq_indirection_table_size;
354};
355
356enum ib_tm_cap_flags {
357
358 IB_TM_CAP_RNDV_RC = 1 << 0,
359};
360
361struct ib_tm_caps {
362
363 u32 max_rndv_hdr_size;
364
365 u32 max_num_tags;
366
367 u32 flags;
368
369 u32 max_ops;
370
371 u32 max_sge;
372};
373
374struct ib_cq_init_attr {
375 unsigned int cqe;
376 u32 comp_vector;
377 u32 flags;
378};
379
380enum ib_cq_attr_mask {
381 IB_CQ_MODERATE = 1 << 0,
382};
383
384struct ib_cq_caps {
385 u16 max_cq_moderation_count;
386 u16 max_cq_moderation_period;
387};
388
389struct ib_dm_mr_attr {
390 u64 length;
391 u64 offset;
392 u32 access_flags;
393};
394
395struct ib_dm_alloc_attr {
396 u64 length;
397 u32 alignment;
398 u32 flags;
399};
400
401struct ib_device_attr {
402 u64 fw_ver;
403 __be64 sys_image_guid;
404 u64 max_mr_size;
405 u64 page_size_cap;
406 u32 vendor_id;
407 u32 vendor_part_id;
408 u32 hw_ver;
409 int max_qp;
410 int max_qp_wr;
411 u64 device_cap_flags;
412 int max_send_sge;
413 int max_recv_sge;
414 int max_sge_rd;
415 int max_cq;
416 int max_cqe;
417 int max_mr;
418 int max_pd;
419 int max_qp_rd_atom;
420 int max_ee_rd_atom;
421 int max_res_rd_atom;
422 int max_qp_init_rd_atom;
423 int max_ee_init_rd_atom;
424 enum ib_atomic_cap atomic_cap;
425 enum ib_atomic_cap masked_atomic_cap;
426 int max_ee;
427 int max_rdd;
428 int max_mw;
429 int max_raw_ipv6_qp;
430 int max_raw_ethy_qp;
431 int max_mcast_grp;
432 int max_mcast_qp_attach;
433 int max_total_mcast_qp_attach;
434 int max_ah;
435 int max_srq;
436 int max_srq_wr;
437 int max_srq_sge;
438 unsigned int max_fast_reg_page_list_len;
439 unsigned int max_pi_fast_reg_page_list_len;
440 u16 max_pkeys;
441 u8 local_ca_ack_delay;
442 int sig_prot_cap;
443 int sig_guard_cap;
444 struct ib_odp_caps odp_caps;
445 uint64_t timestamp_mask;
446 uint64_t hca_core_clock;
447 struct ib_rss_caps rss_caps;
448 u32 max_wq_type_rq;
449 u32 raw_packet_caps;
450 struct ib_tm_caps tm_caps;
451 struct ib_cq_caps cq_caps;
452 u64 max_dm_size;
453
454 u32 max_sgl_rd;
455};
456
457enum ib_mtu {
458 IB_MTU_256 = 1,
459 IB_MTU_512 = 2,
460 IB_MTU_1024 = 3,
461 IB_MTU_2048 = 4,
462 IB_MTU_4096 = 5
463};
464
465enum opa_mtu {
466 OPA_MTU_8192 = 6,
467 OPA_MTU_10240 = 7
468};
469
470static inline int ib_mtu_enum_to_int(enum ib_mtu mtu)
471{
472 switch (mtu) {
473 case IB_MTU_256: return 256;
474 case IB_MTU_512: return 512;
475 case IB_MTU_1024: return 1024;
476 case IB_MTU_2048: return 2048;
477 case IB_MTU_4096: return 4096;
478 default: return -1;
479 }
480}
481
482static inline enum ib_mtu ib_mtu_int_to_enum(int mtu)
483{
484 if (mtu >= 4096)
485 return IB_MTU_4096;
486 else if (mtu >= 2048)
487 return IB_MTU_2048;
488 else if (mtu >= 1024)
489 return IB_MTU_1024;
490 else if (mtu >= 512)
491 return IB_MTU_512;
492 else
493 return IB_MTU_256;
494}
495
496static inline int opa_mtu_enum_to_int(enum opa_mtu mtu)
497{
498 switch (mtu) {
499 case OPA_MTU_8192:
500 return 8192;
501 case OPA_MTU_10240:
502 return 10240;
503 default:
504 return(ib_mtu_enum_to_int((enum ib_mtu)mtu));
505 }
506}
507
508static inline enum opa_mtu opa_mtu_int_to_enum(int mtu)
509{
510 if (mtu >= 10240)
511 return OPA_MTU_10240;
512 else if (mtu >= 8192)
513 return OPA_MTU_8192;
514 else
515 return ((enum opa_mtu)ib_mtu_int_to_enum(mtu));
516}
517
518enum ib_port_state {
519 IB_PORT_NOP = 0,
520 IB_PORT_DOWN = 1,
521 IB_PORT_INIT = 2,
522 IB_PORT_ARMED = 3,
523 IB_PORT_ACTIVE = 4,
524 IB_PORT_ACTIVE_DEFER = 5
525};
526
527enum ib_port_phys_state {
528 IB_PORT_PHYS_STATE_SLEEP = 1,
529 IB_PORT_PHYS_STATE_POLLING = 2,
530 IB_PORT_PHYS_STATE_DISABLED = 3,
531 IB_PORT_PHYS_STATE_PORT_CONFIGURATION_TRAINING = 4,
532 IB_PORT_PHYS_STATE_LINK_UP = 5,
533 IB_PORT_PHYS_STATE_LINK_ERROR_RECOVERY = 6,
534 IB_PORT_PHYS_STATE_PHY_TEST = 7,
535};
536
537enum ib_port_width {
538 IB_WIDTH_1X = 1,
539 IB_WIDTH_2X = 16,
540 IB_WIDTH_4X = 2,
541 IB_WIDTH_8X = 4,
542 IB_WIDTH_12X = 8
543};
544
545static inline int ib_width_enum_to_int(enum ib_port_width width)
546{
547 switch (width) {
548 case IB_WIDTH_1X: return 1;
549 case IB_WIDTH_2X: return 2;
550 case IB_WIDTH_4X: return 4;
551 case IB_WIDTH_8X: return 8;
552 case IB_WIDTH_12X: return 12;
553 default: return -1;
554 }
555}
556
557enum ib_port_speed {
558 IB_SPEED_SDR = 1,
559 IB_SPEED_DDR = 2,
560 IB_SPEED_QDR = 4,
561 IB_SPEED_FDR10 = 8,
562 IB_SPEED_FDR = 16,
563 IB_SPEED_EDR = 32,
564 IB_SPEED_HDR = 64
565};
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586struct rdma_hw_stats {
587 struct mutex lock;
588 unsigned long timestamp;
589 unsigned long lifespan;
590 const char * const *names;
591 int num_counters;
592 u64 value[];
593};
594
595#define RDMA_HW_STATS_DEFAULT_LIFESPAN 10
596
597
598
599
600
601
602
603static inline struct rdma_hw_stats *rdma_alloc_hw_stats_struct(
604 const char * const *names, int num_counters,
605 unsigned long lifespan)
606{
607 struct rdma_hw_stats *stats;
608
609 stats = kzalloc(sizeof(*stats) + num_counters * sizeof(u64),
610 GFP_KERNEL);
611 if (!stats)
612 return NULL;
613 stats->names = names;
614 stats->num_counters = num_counters;
615 stats->lifespan = msecs_to_jiffies(lifespan);
616
617 return stats;
618}
619
620
621
622
623
624
625#define RDMA_CORE_CAP_IB_MAD 0x00000001
626#define RDMA_CORE_CAP_IB_SMI 0x00000002
627#define RDMA_CORE_CAP_IB_CM 0x00000004
628#define RDMA_CORE_CAP_IW_CM 0x00000008
629#define RDMA_CORE_CAP_IB_SA 0x00000010
630#define RDMA_CORE_CAP_OPA_MAD 0x00000020
631
632
633#define RDMA_CORE_CAP_AF_IB 0x00001000
634#define RDMA_CORE_CAP_ETH_AH 0x00002000
635#define RDMA_CORE_CAP_OPA_AH 0x00004000
636#define RDMA_CORE_CAP_IB_GRH_REQUIRED 0x00008000
637
638
639#define RDMA_CORE_CAP_PROT_IB 0x00100000
640#define RDMA_CORE_CAP_PROT_ROCE 0x00200000
641#define RDMA_CORE_CAP_PROT_IWARP 0x00400000
642#define RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP 0x00800000
643#define RDMA_CORE_CAP_PROT_RAW_PACKET 0x01000000
644#define RDMA_CORE_CAP_PROT_USNIC 0x02000000
645
646#define RDMA_CORE_PORT_IB_GRH_REQUIRED (RDMA_CORE_CAP_IB_GRH_REQUIRED \
647 | RDMA_CORE_CAP_PROT_ROCE \
648 | RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP)
649
650#define RDMA_CORE_PORT_IBA_IB (RDMA_CORE_CAP_PROT_IB \
651 | RDMA_CORE_CAP_IB_MAD \
652 | RDMA_CORE_CAP_IB_SMI \
653 | RDMA_CORE_CAP_IB_CM \
654 | RDMA_CORE_CAP_IB_SA \
655 | RDMA_CORE_CAP_AF_IB)
656#define RDMA_CORE_PORT_IBA_ROCE (RDMA_CORE_CAP_PROT_ROCE \
657 | RDMA_CORE_CAP_IB_MAD \
658 | RDMA_CORE_CAP_IB_CM \
659 | RDMA_CORE_CAP_AF_IB \
660 | RDMA_CORE_CAP_ETH_AH)
661#define RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP \
662 (RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP \
663 | RDMA_CORE_CAP_IB_MAD \
664 | RDMA_CORE_CAP_IB_CM \
665 | RDMA_CORE_CAP_AF_IB \
666 | RDMA_CORE_CAP_ETH_AH)
667#define RDMA_CORE_PORT_IWARP (RDMA_CORE_CAP_PROT_IWARP \
668 | RDMA_CORE_CAP_IW_CM)
669#define RDMA_CORE_PORT_INTEL_OPA (RDMA_CORE_PORT_IBA_IB \
670 | RDMA_CORE_CAP_OPA_MAD)
671
672#define RDMA_CORE_PORT_RAW_PACKET (RDMA_CORE_CAP_PROT_RAW_PACKET)
673
674#define RDMA_CORE_PORT_USNIC (RDMA_CORE_CAP_PROT_USNIC)
675
676struct ib_port_attr {
677 u64 subnet_prefix;
678 enum ib_port_state state;
679 enum ib_mtu max_mtu;
680 enum ib_mtu active_mtu;
681 u32 phys_mtu;
682 int gid_tbl_len;
683 unsigned int ip_gids:1;
684
685 u32 port_cap_flags;
686 u32 max_msg_sz;
687 u32 bad_pkey_cntr;
688 u32 qkey_viol_cntr;
689 u16 pkey_tbl_len;
690 u32 sm_lid;
691 u32 lid;
692 u8 lmc;
693 u8 max_vl_num;
694 u8 sm_sl;
695 u8 subnet_timeout;
696 u8 init_type_reply;
697 u8 active_width;
698 u8 active_speed;
699 u8 phys_state;
700 u16 port_cap_flags2;
701};
702
703enum ib_device_modify_flags {
704 IB_DEVICE_MODIFY_SYS_IMAGE_GUID = 1 << 0,
705 IB_DEVICE_MODIFY_NODE_DESC = 1 << 1
706};
707
708#define IB_DEVICE_NODE_DESC_MAX 64
709
710struct ib_device_modify {
711 u64 sys_image_guid;
712 char node_desc[IB_DEVICE_NODE_DESC_MAX];
713};
714
715enum ib_port_modify_flags {
716 IB_PORT_SHUTDOWN = 1,
717 IB_PORT_INIT_TYPE = (1<<2),
718 IB_PORT_RESET_QKEY_CNTR = (1<<3),
719 IB_PORT_OPA_MASK_CHG = (1<<4)
720};
721
722struct ib_port_modify {
723 u32 set_port_cap_mask;
724 u32 clr_port_cap_mask;
725 u8 init_type;
726};
727
728enum ib_event_type {
729 IB_EVENT_CQ_ERR,
730 IB_EVENT_QP_FATAL,
731 IB_EVENT_QP_REQ_ERR,
732 IB_EVENT_QP_ACCESS_ERR,
733 IB_EVENT_COMM_EST,
734 IB_EVENT_SQ_DRAINED,
735 IB_EVENT_PATH_MIG,
736 IB_EVENT_PATH_MIG_ERR,
737 IB_EVENT_DEVICE_FATAL,
738 IB_EVENT_PORT_ACTIVE,
739 IB_EVENT_PORT_ERR,
740 IB_EVENT_LID_CHANGE,
741 IB_EVENT_PKEY_CHANGE,
742 IB_EVENT_SM_CHANGE,
743 IB_EVENT_SRQ_ERR,
744 IB_EVENT_SRQ_LIMIT_REACHED,
745 IB_EVENT_QP_LAST_WQE_REACHED,
746 IB_EVENT_CLIENT_REREGISTER,
747 IB_EVENT_GID_CHANGE,
748 IB_EVENT_WQ_FATAL,
749};
750
751const char *__attribute_const__ ib_event_msg(enum ib_event_type event);
752
753struct ib_event {
754 struct ib_device *device;
755 union {
756 struct ib_cq *cq;
757 struct ib_qp *qp;
758 struct ib_srq *srq;
759 struct ib_wq *wq;
760 u8 port_num;
761 } element;
762 enum ib_event_type event;
763};
764
765struct ib_event_handler {
766 struct ib_device *device;
767 void (*handler)(struct ib_event_handler *, struct ib_event *);
768 struct list_head list;
769};
770
771#define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler) \
772 do { \
773 (_ptr)->device = _device; \
774 (_ptr)->handler = _handler; \
775 INIT_LIST_HEAD(&(_ptr)->list); \
776 } while (0)
777
778struct ib_global_route {
779 const struct ib_gid_attr *sgid_attr;
780 union ib_gid dgid;
781 u32 flow_label;
782 u8 sgid_index;
783 u8 hop_limit;
784 u8 traffic_class;
785};
786
787struct ib_grh {
788 __be32 version_tclass_flow;
789 __be16 paylen;
790 u8 next_hdr;
791 u8 hop_limit;
792 union ib_gid sgid;
793 union ib_gid dgid;
794};
795
796union rdma_network_hdr {
797 struct ib_grh ibgrh;
798 struct {
799
800
801
802 u8 reserved[20];
803 struct iphdr roce4grh;
804 };
805};
806
807#define IB_QPN_MASK 0xFFFFFF
808
809enum {
810 IB_MULTICAST_QPN = 0xffffff
811};
812
813#define IB_LID_PERMISSIVE cpu_to_be16(0xFFFF)
814#define IB_MULTICAST_LID_BASE cpu_to_be16(0xC000)
815
816enum ib_ah_flags {
817 IB_AH_GRH = 1
818};
819
820enum ib_rate {
821 IB_RATE_PORT_CURRENT = 0,
822 IB_RATE_2_5_GBPS = 2,
823 IB_RATE_5_GBPS = 5,
824 IB_RATE_10_GBPS = 3,
825 IB_RATE_20_GBPS = 6,
826 IB_RATE_30_GBPS = 4,
827 IB_RATE_40_GBPS = 7,
828 IB_RATE_60_GBPS = 8,
829 IB_RATE_80_GBPS = 9,
830 IB_RATE_120_GBPS = 10,
831 IB_RATE_14_GBPS = 11,
832 IB_RATE_56_GBPS = 12,
833 IB_RATE_112_GBPS = 13,
834 IB_RATE_168_GBPS = 14,
835 IB_RATE_25_GBPS = 15,
836 IB_RATE_100_GBPS = 16,
837 IB_RATE_200_GBPS = 17,
838 IB_RATE_300_GBPS = 18,
839 IB_RATE_28_GBPS = 19,
840 IB_RATE_50_GBPS = 20,
841 IB_RATE_400_GBPS = 21,
842 IB_RATE_600_GBPS = 22,
843};
844
845
846
847
848
849
850
851__attribute_const__ int ib_rate_to_mult(enum ib_rate rate);
852
853
854
855
856
857
858__attribute_const__ int ib_rate_to_mbps(enum ib_rate rate);
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878enum ib_mr_type {
879 IB_MR_TYPE_MEM_REG,
880 IB_MR_TYPE_SG_GAPS,
881 IB_MR_TYPE_DM,
882 IB_MR_TYPE_USER,
883 IB_MR_TYPE_DMA,
884 IB_MR_TYPE_INTEGRITY,
885};
886
887enum ib_mr_status_check {
888 IB_MR_CHECK_SIG_STATUS = 1,
889};
890
891
892
893
894
895
896
897
898
899struct ib_mr_status {
900 u32 fail_status;
901 struct ib_sig_err sig_err;
902};
903
904
905
906
907
908
909__attribute_const__ enum ib_rate mult_to_ib_rate(int mult);
910
911struct rdma_ah_init_attr {
912 struct rdma_ah_attr *ah_attr;
913 u32 flags;
914 struct net_device *xmit_slave;
915};
916
917enum rdma_ah_attr_type {
918 RDMA_AH_ATTR_TYPE_UNDEFINED,
919 RDMA_AH_ATTR_TYPE_IB,
920 RDMA_AH_ATTR_TYPE_ROCE,
921 RDMA_AH_ATTR_TYPE_OPA,
922};
923
924struct ib_ah_attr {
925 u16 dlid;
926 u8 src_path_bits;
927};
928
929struct roce_ah_attr {
930 u8 dmac[ETH_ALEN];
931};
932
933struct opa_ah_attr {
934 u32 dlid;
935 u8 src_path_bits;
936 bool make_grd;
937};
938
939struct rdma_ah_attr {
940 struct ib_global_route grh;
941 u8 sl;
942 u8 static_rate;
943 u8 port_num;
944 u8 ah_flags;
945 enum rdma_ah_attr_type type;
946 union {
947 struct ib_ah_attr ib;
948 struct roce_ah_attr roce;
949 struct opa_ah_attr opa;
950 };
951};
952
953enum ib_wc_status {
954 IB_WC_SUCCESS,
955 IB_WC_LOC_LEN_ERR,
956 IB_WC_LOC_QP_OP_ERR,
957 IB_WC_LOC_EEC_OP_ERR,
958 IB_WC_LOC_PROT_ERR,
959 IB_WC_WR_FLUSH_ERR,
960 IB_WC_MW_BIND_ERR,
961 IB_WC_BAD_RESP_ERR,
962 IB_WC_LOC_ACCESS_ERR,
963 IB_WC_REM_INV_REQ_ERR,
964 IB_WC_REM_ACCESS_ERR,
965 IB_WC_REM_OP_ERR,
966 IB_WC_RETRY_EXC_ERR,
967 IB_WC_RNR_RETRY_EXC_ERR,
968 IB_WC_LOC_RDD_VIOL_ERR,
969 IB_WC_REM_INV_RD_REQ_ERR,
970 IB_WC_REM_ABORT_ERR,
971 IB_WC_INV_EECN_ERR,
972 IB_WC_INV_EEC_STATE_ERR,
973 IB_WC_FATAL_ERR,
974 IB_WC_RESP_TIMEOUT_ERR,
975 IB_WC_GENERAL_ERR
976};
977
978const char *__attribute_const__ ib_wc_status_msg(enum ib_wc_status status);
979
980enum ib_wc_opcode {
981 IB_WC_SEND,
982 IB_WC_RDMA_WRITE,
983 IB_WC_RDMA_READ,
984 IB_WC_COMP_SWAP,
985 IB_WC_FETCH_ADD,
986 IB_WC_LSO,
987 IB_WC_LOCAL_INV,
988 IB_WC_REG_MR,
989 IB_WC_MASKED_COMP_SWAP,
990 IB_WC_MASKED_FETCH_ADD,
991
992
993
994
995 IB_WC_RECV = 1 << 7,
996 IB_WC_RECV_RDMA_WITH_IMM
997};
998
999enum ib_wc_flags {
1000 IB_WC_GRH = 1,
1001 IB_WC_WITH_IMM = (1<<1),
1002 IB_WC_WITH_INVALIDATE = (1<<2),
1003 IB_WC_IP_CSUM_OK = (1<<3),
1004 IB_WC_WITH_SMAC = (1<<4),
1005 IB_WC_WITH_VLAN = (1<<5),
1006 IB_WC_WITH_NETWORK_HDR_TYPE = (1<<6),
1007};
1008
1009struct ib_wc {
1010 union {
1011 u64 wr_id;
1012 struct ib_cqe *wr_cqe;
1013 };
1014 enum ib_wc_status status;
1015 enum ib_wc_opcode opcode;
1016 u32 vendor_err;
1017 u32 byte_len;
1018 struct ib_qp *qp;
1019 union {
1020 __be32 imm_data;
1021 u32 invalidate_rkey;
1022 } ex;
1023 u32 src_qp;
1024 u32 slid;
1025 int wc_flags;
1026 u16 pkey_index;
1027 u8 sl;
1028 u8 dlid_path_bits;
1029 u8 port_num;
1030 u8 smac[ETH_ALEN];
1031 u16 vlan_id;
1032 u8 network_hdr_type;
1033};
1034
1035enum ib_cq_notify_flags {
1036 IB_CQ_SOLICITED = 1 << 0,
1037 IB_CQ_NEXT_COMP = 1 << 1,
1038 IB_CQ_SOLICITED_MASK = IB_CQ_SOLICITED | IB_CQ_NEXT_COMP,
1039 IB_CQ_REPORT_MISSED_EVENTS = 1 << 2,
1040};
1041
1042enum ib_srq_type {
1043 IB_SRQT_BASIC = IB_UVERBS_SRQT_BASIC,
1044 IB_SRQT_XRC = IB_UVERBS_SRQT_XRC,
1045 IB_SRQT_TM = IB_UVERBS_SRQT_TM,
1046};
1047
1048static inline bool ib_srq_has_cq(enum ib_srq_type srq_type)
1049{
1050 return srq_type == IB_SRQT_XRC ||
1051 srq_type == IB_SRQT_TM;
1052}
1053
1054enum ib_srq_attr_mask {
1055 IB_SRQ_MAX_WR = 1 << 0,
1056 IB_SRQ_LIMIT = 1 << 1,
1057};
1058
1059struct ib_srq_attr {
1060 u32 max_wr;
1061 u32 max_sge;
1062 u32 srq_limit;
1063};
1064
1065struct ib_srq_init_attr {
1066 void (*event_handler)(struct ib_event *, void *);
1067 void *srq_context;
1068 struct ib_srq_attr attr;
1069 enum ib_srq_type srq_type;
1070
1071 struct {
1072 struct ib_cq *cq;
1073 union {
1074 struct {
1075 struct ib_xrcd *xrcd;
1076 } xrc;
1077
1078 struct {
1079 u32 max_num_tags;
1080 } tag_matching;
1081 };
1082 } ext;
1083};
1084
1085struct ib_qp_cap {
1086 u32 max_send_wr;
1087 u32 max_recv_wr;
1088 u32 max_send_sge;
1089 u32 max_recv_sge;
1090 u32 max_inline_data;
1091
1092
1093
1094
1095
1096
1097 u32 max_rdma_ctxs;
1098};
1099
1100enum ib_sig_type {
1101 IB_SIGNAL_ALL_WR,
1102 IB_SIGNAL_REQ_WR
1103};
1104
1105enum ib_qp_type {
1106
1107
1108
1109
1110
1111 IB_QPT_SMI,
1112 IB_QPT_GSI,
1113
1114 IB_QPT_RC = IB_UVERBS_QPT_RC,
1115 IB_QPT_UC = IB_UVERBS_QPT_UC,
1116 IB_QPT_UD = IB_UVERBS_QPT_UD,
1117 IB_QPT_RAW_IPV6,
1118 IB_QPT_RAW_ETHERTYPE,
1119 IB_QPT_RAW_PACKET = IB_UVERBS_QPT_RAW_PACKET,
1120 IB_QPT_XRC_INI = IB_UVERBS_QPT_XRC_INI,
1121 IB_QPT_XRC_TGT = IB_UVERBS_QPT_XRC_TGT,
1122 IB_QPT_MAX,
1123 IB_QPT_DRIVER = IB_UVERBS_QPT_DRIVER,
1124
1125
1126
1127
1128 IB_QPT_RESERVED1 = 0x1000,
1129 IB_QPT_RESERVED2,
1130 IB_QPT_RESERVED3,
1131 IB_QPT_RESERVED4,
1132 IB_QPT_RESERVED5,
1133 IB_QPT_RESERVED6,
1134 IB_QPT_RESERVED7,
1135 IB_QPT_RESERVED8,
1136 IB_QPT_RESERVED9,
1137 IB_QPT_RESERVED10,
1138};
1139
1140enum ib_qp_create_flags {
1141 IB_QP_CREATE_IPOIB_UD_LSO = 1 << 0,
1142 IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK =
1143 IB_UVERBS_QP_CREATE_BLOCK_MULTICAST_LOOPBACK,
1144 IB_QP_CREATE_CROSS_CHANNEL = 1 << 2,
1145 IB_QP_CREATE_MANAGED_SEND = 1 << 3,
1146 IB_QP_CREATE_MANAGED_RECV = 1 << 4,
1147 IB_QP_CREATE_NETIF_QP = 1 << 5,
1148 IB_QP_CREATE_INTEGRITY_EN = 1 << 6,
1149 IB_QP_CREATE_NETDEV_USE = 1 << 7,
1150 IB_QP_CREATE_SCATTER_FCS =
1151 IB_UVERBS_QP_CREATE_SCATTER_FCS,
1152 IB_QP_CREATE_CVLAN_STRIPPING =
1153 IB_UVERBS_QP_CREATE_CVLAN_STRIPPING,
1154 IB_QP_CREATE_SOURCE_QPN = 1 << 10,
1155 IB_QP_CREATE_PCI_WRITE_END_PADDING =
1156 IB_UVERBS_QP_CREATE_PCI_WRITE_END_PADDING,
1157
1158 IB_QP_CREATE_RESERVED_START = 1 << 26,
1159 IB_QP_CREATE_RESERVED_END = 1 << 31,
1160};
1161
1162
1163
1164
1165
1166
1167struct ib_qp_init_attr {
1168
1169 void (*event_handler)(struct ib_event *, void *);
1170
1171 void *qp_context;
1172 struct ib_cq *send_cq;
1173 struct ib_cq *recv_cq;
1174 struct ib_srq *srq;
1175 struct ib_xrcd *xrcd;
1176 struct ib_qp_cap cap;
1177 enum ib_sig_type sq_sig_type;
1178 enum ib_qp_type qp_type;
1179 u32 create_flags;
1180
1181
1182
1183
1184 u8 port_num;
1185 struct ib_rwq_ind_table *rwq_ind_tbl;
1186 u32 source_qpn;
1187};
1188
1189struct ib_qp_open_attr {
1190 void (*event_handler)(struct ib_event *, void *);
1191 void *qp_context;
1192 u32 qp_num;
1193 enum ib_qp_type qp_type;
1194};
1195
1196enum ib_rnr_timeout {
1197 IB_RNR_TIMER_655_36 = 0,
1198 IB_RNR_TIMER_000_01 = 1,
1199 IB_RNR_TIMER_000_02 = 2,
1200 IB_RNR_TIMER_000_03 = 3,
1201 IB_RNR_TIMER_000_04 = 4,
1202 IB_RNR_TIMER_000_06 = 5,
1203 IB_RNR_TIMER_000_08 = 6,
1204 IB_RNR_TIMER_000_12 = 7,
1205 IB_RNR_TIMER_000_16 = 8,
1206 IB_RNR_TIMER_000_24 = 9,
1207 IB_RNR_TIMER_000_32 = 10,
1208 IB_RNR_TIMER_000_48 = 11,
1209 IB_RNR_TIMER_000_64 = 12,
1210 IB_RNR_TIMER_000_96 = 13,
1211 IB_RNR_TIMER_001_28 = 14,
1212 IB_RNR_TIMER_001_92 = 15,
1213 IB_RNR_TIMER_002_56 = 16,
1214 IB_RNR_TIMER_003_84 = 17,
1215 IB_RNR_TIMER_005_12 = 18,
1216 IB_RNR_TIMER_007_68 = 19,
1217 IB_RNR_TIMER_010_24 = 20,
1218 IB_RNR_TIMER_015_36 = 21,
1219 IB_RNR_TIMER_020_48 = 22,
1220 IB_RNR_TIMER_030_72 = 23,
1221 IB_RNR_TIMER_040_96 = 24,
1222 IB_RNR_TIMER_061_44 = 25,
1223 IB_RNR_TIMER_081_92 = 26,
1224 IB_RNR_TIMER_122_88 = 27,
1225 IB_RNR_TIMER_163_84 = 28,
1226 IB_RNR_TIMER_245_76 = 29,
1227 IB_RNR_TIMER_327_68 = 30,
1228 IB_RNR_TIMER_491_52 = 31
1229};
1230
1231enum ib_qp_attr_mask {
1232 IB_QP_STATE = 1,
1233 IB_QP_CUR_STATE = (1<<1),
1234 IB_QP_EN_SQD_ASYNC_NOTIFY = (1<<2),
1235 IB_QP_ACCESS_FLAGS = (1<<3),
1236 IB_QP_PKEY_INDEX = (1<<4),
1237 IB_QP_PORT = (1<<5),
1238 IB_QP_QKEY = (1<<6),
1239 IB_QP_AV = (1<<7),
1240 IB_QP_PATH_MTU = (1<<8),
1241 IB_QP_TIMEOUT = (1<<9),
1242 IB_QP_RETRY_CNT = (1<<10),
1243 IB_QP_RNR_RETRY = (1<<11),
1244 IB_QP_RQ_PSN = (1<<12),
1245 IB_QP_MAX_QP_RD_ATOMIC = (1<<13),
1246 IB_QP_ALT_PATH = (1<<14),
1247 IB_QP_MIN_RNR_TIMER = (1<<15),
1248 IB_QP_SQ_PSN = (1<<16),
1249 IB_QP_MAX_DEST_RD_ATOMIC = (1<<17),
1250 IB_QP_PATH_MIG_STATE = (1<<18),
1251 IB_QP_CAP = (1<<19),
1252 IB_QP_DEST_QPN = (1<<20),
1253 IB_QP_RESERVED1 = (1<<21),
1254 IB_QP_RESERVED2 = (1<<22),
1255 IB_QP_RESERVED3 = (1<<23),
1256 IB_QP_RESERVED4 = (1<<24),
1257 IB_QP_RATE_LIMIT = (1<<25),
1258};
1259
1260enum ib_qp_state {
1261 IB_QPS_RESET,
1262 IB_QPS_INIT,
1263 IB_QPS_RTR,
1264 IB_QPS_RTS,
1265 IB_QPS_SQD,
1266 IB_QPS_SQE,
1267 IB_QPS_ERR
1268};
1269
1270enum ib_mig_state {
1271 IB_MIG_MIGRATED,
1272 IB_MIG_REARM,
1273 IB_MIG_ARMED
1274};
1275
1276enum ib_mw_type {
1277 IB_MW_TYPE_1 = 1,
1278 IB_MW_TYPE_2 = 2
1279};
1280
1281struct ib_qp_attr {
1282 enum ib_qp_state qp_state;
1283 enum ib_qp_state cur_qp_state;
1284 enum ib_mtu path_mtu;
1285 enum ib_mig_state path_mig_state;
1286 u32 qkey;
1287 u32 rq_psn;
1288 u32 sq_psn;
1289 u32 dest_qp_num;
1290 int qp_access_flags;
1291 struct ib_qp_cap cap;
1292 struct rdma_ah_attr ah_attr;
1293 struct rdma_ah_attr alt_ah_attr;
1294 u16 pkey_index;
1295 u16 alt_pkey_index;
1296 u8 en_sqd_async_notify;
1297 u8 sq_draining;
1298 u8 max_rd_atomic;
1299 u8 max_dest_rd_atomic;
1300 u8 min_rnr_timer;
1301 u8 port_num;
1302 u8 timeout;
1303 u8 retry_cnt;
1304 u8 rnr_retry;
1305 u8 alt_port_num;
1306 u8 alt_timeout;
1307 u32 rate_limit;
1308 struct net_device *xmit_slave;
1309};
1310
1311enum ib_wr_opcode {
1312
1313 IB_WR_RDMA_WRITE = IB_UVERBS_WR_RDMA_WRITE,
1314 IB_WR_RDMA_WRITE_WITH_IMM = IB_UVERBS_WR_RDMA_WRITE_WITH_IMM,
1315 IB_WR_SEND = IB_UVERBS_WR_SEND,
1316 IB_WR_SEND_WITH_IMM = IB_UVERBS_WR_SEND_WITH_IMM,
1317 IB_WR_RDMA_READ = IB_UVERBS_WR_RDMA_READ,
1318 IB_WR_ATOMIC_CMP_AND_SWP = IB_UVERBS_WR_ATOMIC_CMP_AND_SWP,
1319 IB_WR_ATOMIC_FETCH_AND_ADD = IB_UVERBS_WR_ATOMIC_FETCH_AND_ADD,
1320 IB_WR_LSO = IB_UVERBS_WR_TSO,
1321 IB_WR_SEND_WITH_INV = IB_UVERBS_WR_SEND_WITH_INV,
1322 IB_WR_RDMA_READ_WITH_INV = IB_UVERBS_WR_RDMA_READ_WITH_INV,
1323 IB_WR_LOCAL_INV = IB_UVERBS_WR_LOCAL_INV,
1324 IB_WR_MASKED_ATOMIC_CMP_AND_SWP =
1325 IB_UVERBS_WR_MASKED_ATOMIC_CMP_AND_SWP,
1326 IB_WR_MASKED_ATOMIC_FETCH_AND_ADD =
1327 IB_UVERBS_WR_MASKED_ATOMIC_FETCH_AND_ADD,
1328
1329
1330 IB_WR_REG_MR = 0x20,
1331 IB_WR_REG_MR_INTEGRITY,
1332
1333
1334
1335
1336 IB_WR_RESERVED1 = 0xf0,
1337 IB_WR_RESERVED2,
1338 IB_WR_RESERVED3,
1339 IB_WR_RESERVED4,
1340 IB_WR_RESERVED5,
1341 IB_WR_RESERVED6,
1342 IB_WR_RESERVED7,
1343 IB_WR_RESERVED8,
1344 IB_WR_RESERVED9,
1345 IB_WR_RESERVED10,
1346};
1347
1348enum ib_send_flags {
1349 IB_SEND_FENCE = 1,
1350 IB_SEND_SIGNALED = (1<<1),
1351 IB_SEND_SOLICITED = (1<<2),
1352 IB_SEND_INLINE = (1<<3),
1353 IB_SEND_IP_CSUM = (1<<4),
1354
1355
1356 IB_SEND_RESERVED_START = (1 << 26),
1357 IB_SEND_RESERVED_END = (1 << 31),
1358};
1359
1360struct ib_sge {
1361 u64 addr;
1362 u32 length;
1363 u32 lkey;
1364};
1365
1366struct ib_cqe {
1367 void (*done)(struct ib_cq *cq, struct ib_wc *wc);
1368};
1369
1370struct ib_send_wr {
1371 struct ib_send_wr *next;
1372 union {
1373 u64 wr_id;
1374 struct ib_cqe *wr_cqe;
1375 };
1376 struct ib_sge *sg_list;
1377 int num_sge;
1378 enum ib_wr_opcode opcode;
1379 int send_flags;
1380 union {
1381 __be32 imm_data;
1382 u32 invalidate_rkey;
1383 } ex;
1384};
1385
1386struct ib_rdma_wr {
1387 struct ib_send_wr wr;
1388 u64 remote_addr;
1389 u32 rkey;
1390};
1391
1392static inline const struct ib_rdma_wr *rdma_wr(const struct ib_send_wr *wr)
1393{
1394 return container_of(wr, struct ib_rdma_wr, wr);
1395}
1396
1397struct ib_atomic_wr {
1398 struct ib_send_wr wr;
1399 u64 remote_addr;
1400 u64 compare_add;
1401 u64 swap;
1402 u64 compare_add_mask;
1403 u64 swap_mask;
1404 u32 rkey;
1405};
1406
1407static inline const struct ib_atomic_wr *atomic_wr(const struct ib_send_wr *wr)
1408{
1409 return container_of(wr, struct ib_atomic_wr, wr);
1410}
1411
1412struct ib_ud_wr {
1413 struct ib_send_wr wr;
1414 struct ib_ah *ah;
1415 void *header;
1416 int hlen;
1417 int mss;
1418 u32 remote_qpn;
1419 u32 remote_qkey;
1420 u16 pkey_index;
1421 u8 port_num;
1422};
1423
1424static inline const struct ib_ud_wr *ud_wr(const struct ib_send_wr *wr)
1425{
1426 return container_of(wr, struct ib_ud_wr, wr);
1427}
1428
1429struct ib_reg_wr {
1430 struct ib_send_wr wr;
1431 struct ib_mr *mr;
1432 u32 key;
1433 int access;
1434};
1435
1436static inline const struct ib_reg_wr *reg_wr(const struct ib_send_wr *wr)
1437{
1438 return container_of(wr, struct ib_reg_wr, wr);
1439}
1440
1441struct ib_recv_wr {
1442 struct ib_recv_wr *next;
1443 union {
1444 u64 wr_id;
1445 struct ib_cqe *wr_cqe;
1446 };
1447 struct ib_sge *sg_list;
1448 int num_sge;
1449};
1450
1451enum ib_access_flags {
1452 IB_ACCESS_LOCAL_WRITE = IB_UVERBS_ACCESS_LOCAL_WRITE,
1453 IB_ACCESS_REMOTE_WRITE = IB_UVERBS_ACCESS_REMOTE_WRITE,
1454 IB_ACCESS_REMOTE_READ = IB_UVERBS_ACCESS_REMOTE_READ,
1455 IB_ACCESS_REMOTE_ATOMIC = IB_UVERBS_ACCESS_REMOTE_ATOMIC,
1456 IB_ACCESS_MW_BIND = IB_UVERBS_ACCESS_MW_BIND,
1457 IB_ZERO_BASED = IB_UVERBS_ACCESS_ZERO_BASED,
1458 IB_ACCESS_ON_DEMAND = IB_UVERBS_ACCESS_ON_DEMAND,
1459 IB_ACCESS_HUGETLB = IB_UVERBS_ACCESS_HUGETLB,
1460 IB_ACCESS_RELAXED_ORDERING = IB_UVERBS_ACCESS_RELAXED_ORDERING,
1461
1462 IB_ACCESS_OPTIONAL = IB_UVERBS_ACCESS_OPTIONAL_RANGE,
1463 IB_ACCESS_SUPPORTED =
1464 ((IB_ACCESS_HUGETLB << 1) - 1) | IB_ACCESS_OPTIONAL,
1465};
1466
1467
1468
1469
1470
1471enum ib_mr_rereg_flags {
1472 IB_MR_REREG_TRANS = 1,
1473 IB_MR_REREG_PD = (1<<1),
1474 IB_MR_REREG_ACCESS = (1<<2),
1475 IB_MR_REREG_SUPPORTED = ((IB_MR_REREG_ACCESS << 1) - 1)
1476};
1477
1478struct ib_umem;
1479
1480enum rdma_remove_reason {
1481
1482
1483
1484
1485 RDMA_REMOVE_DESTROY,
1486
1487 RDMA_REMOVE_CLOSE,
1488
1489 RDMA_REMOVE_DRIVER_REMOVE,
1490
1491 RDMA_REMOVE_ABORT,
1492
1493
1494
1495
1496 RDMA_REMOVE_ABORT_HWOBJ,
1497};
1498
1499struct ib_rdmacg_object {
1500#ifdef CONFIG_CGROUP_RDMA
1501 struct rdma_cgroup *cg;
1502#endif
1503};
1504
1505struct ib_ucontext {
1506 struct ib_device *device;
1507 struct ib_uverbs_file *ufile;
1508
1509
1510
1511
1512
1513 bool closing;
1514
1515 bool cleanup_retryable;
1516
1517 struct ib_rdmacg_object cg_obj;
1518
1519
1520
1521 struct rdma_restrack_entry res;
1522 struct xarray mmap_xa;
1523};
1524
1525struct ib_uobject {
1526 u64 user_handle;
1527
1528 struct ib_uverbs_file *ufile;
1529
1530 struct ib_ucontext *context;
1531 void *object;
1532 struct list_head list;
1533 struct ib_rdmacg_object cg_obj;
1534 int id;
1535 struct kref ref;
1536 atomic_t usecnt;
1537 struct rcu_head rcu;
1538
1539 const struct uverbs_api_object *uapi_object;
1540};
1541
1542struct ib_udata {
1543 const void __user *inbuf;
1544 void __user *outbuf;
1545 size_t inlen;
1546 size_t outlen;
1547};
1548
1549struct ib_pd {
1550 u32 local_dma_lkey;
1551 u32 flags;
1552 struct ib_device *device;
1553 struct ib_uobject *uobject;
1554 atomic_t usecnt;
1555
1556 u32 unsafe_global_rkey;
1557
1558
1559
1560
1561 struct ib_mr *__internal_mr;
1562 struct rdma_restrack_entry res;
1563};
1564
1565struct ib_xrcd {
1566 struct ib_device *device;
1567 atomic_t usecnt;
1568 struct inode *inode;
1569
1570 struct mutex tgt_qp_mutex;
1571 struct list_head tgt_qp_list;
1572};
1573
1574struct ib_ah {
1575 struct ib_device *device;
1576 struct ib_pd *pd;
1577 struct ib_uobject *uobject;
1578 const struct ib_gid_attr *sgid_attr;
1579 enum rdma_ah_attr_type type;
1580};
1581
1582typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context);
1583
1584enum ib_poll_context {
1585 IB_POLL_SOFTIRQ,
1586 IB_POLL_WORKQUEUE,
1587 IB_POLL_UNBOUND_WORKQUEUE,
1588 IB_POLL_LAST_POOL_TYPE = IB_POLL_UNBOUND_WORKQUEUE,
1589
1590 IB_POLL_DIRECT,
1591};
1592
1593struct ib_cq {
1594 struct ib_device *device;
1595 struct ib_ucq_object *uobject;
1596 ib_comp_handler comp_handler;
1597 void (*event_handler)(struct ib_event *, void *);
1598 void *cq_context;
1599 int cqe;
1600 unsigned int cqe_used;
1601 atomic_t usecnt;
1602 enum ib_poll_context poll_ctx;
1603 struct ib_wc *wc;
1604 struct list_head pool_entry;
1605 union {
1606 struct irq_poll iop;
1607 struct work_struct work;
1608 };
1609 struct workqueue_struct *comp_wq;
1610 struct dim *dim;
1611
1612
1613 ktime_t timestamp;
1614 u8 interrupt:1;
1615 u8 shared:1;
1616 unsigned int comp_vector;
1617
1618
1619
1620
1621 struct rdma_restrack_entry res;
1622};
1623
1624struct ib_srq {
1625 struct ib_device *device;
1626 struct ib_pd *pd;
1627 struct ib_usrq_object *uobject;
1628 void (*event_handler)(struct ib_event *, void *);
1629 void *srq_context;
1630 enum ib_srq_type srq_type;
1631 atomic_t usecnt;
1632
1633 struct {
1634 struct ib_cq *cq;
1635 union {
1636 struct {
1637 struct ib_xrcd *xrcd;
1638 u32 srq_num;
1639 } xrc;
1640 };
1641 } ext;
1642};
1643
1644enum ib_raw_packet_caps {
1645
1646
1647
1648 IB_RAW_PACKET_CAP_CVLAN_STRIPPING = (1 << 0),
1649
1650
1651 IB_RAW_PACKET_CAP_SCATTER_FCS = (1 << 1),
1652
1653 IB_RAW_PACKET_CAP_IP_CSUM = (1 << 2),
1654
1655
1656
1657 IB_RAW_PACKET_CAP_DELAY_DROP = (1 << 3),
1658};
1659
1660enum ib_wq_type {
1661 IB_WQT_RQ = IB_UVERBS_WQT_RQ,
1662};
1663
1664enum ib_wq_state {
1665 IB_WQS_RESET,
1666 IB_WQS_RDY,
1667 IB_WQS_ERR
1668};
1669
1670struct ib_wq {
1671 struct ib_device *device;
1672 struct ib_uwq_object *uobject;
1673 void *wq_context;
1674 void (*event_handler)(struct ib_event *, void *);
1675 struct ib_pd *pd;
1676 struct ib_cq *cq;
1677 u32 wq_num;
1678 enum ib_wq_state state;
1679 enum ib_wq_type wq_type;
1680 atomic_t usecnt;
1681};
1682
1683enum ib_wq_flags {
1684 IB_WQ_FLAGS_CVLAN_STRIPPING = IB_UVERBS_WQ_FLAGS_CVLAN_STRIPPING,
1685 IB_WQ_FLAGS_SCATTER_FCS = IB_UVERBS_WQ_FLAGS_SCATTER_FCS,
1686 IB_WQ_FLAGS_DELAY_DROP = IB_UVERBS_WQ_FLAGS_DELAY_DROP,
1687 IB_WQ_FLAGS_PCI_WRITE_END_PADDING =
1688 IB_UVERBS_WQ_FLAGS_PCI_WRITE_END_PADDING,
1689};
1690
1691struct ib_wq_init_attr {
1692 void *wq_context;
1693 enum ib_wq_type wq_type;
1694 u32 max_wr;
1695 u32 max_sge;
1696 struct ib_cq *cq;
1697 void (*event_handler)(struct ib_event *, void *);
1698 u32 create_flags;
1699};
1700
1701enum ib_wq_attr_mask {
1702 IB_WQ_STATE = 1 << 0,
1703 IB_WQ_CUR_STATE = 1 << 1,
1704 IB_WQ_FLAGS = 1 << 2,
1705};
1706
1707struct ib_wq_attr {
1708 enum ib_wq_state wq_state;
1709 enum ib_wq_state curr_wq_state;
1710 u32 flags;
1711 u32 flags_mask;
1712};
1713
1714struct ib_rwq_ind_table {
1715 struct ib_device *device;
1716 struct ib_uobject *uobject;
1717 atomic_t usecnt;
1718 u32 ind_tbl_num;
1719 u32 log_ind_tbl_size;
1720 struct ib_wq **ind_tbl;
1721};
1722
1723struct ib_rwq_ind_table_init_attr {
1724 u32 log_ind_tbl_size;
1725
1726 struct ib_wq **ind_tbl;
1727};
1728
1729enum port_pkey_state {
1730 IB_PORT_PKEY_NOT_VALID = 0,
1731 IB_PORT_PKEY_VALID = 1,
1732 IB_PORT_PKEY_LISTED = 2,
1733};
1734
1735struct ib_qp_security;
1736
1737struct ib_port_pkey {
1738 enum port_pkey_state state;
1739 u16 pkey_index;
1740 u8 port_num;
1741 struct list_head qp_list;
1742 struct list_head to_error_list;
1743 struct ib_qp_security *sec;
1744};
1745
1746struct ib_ports_pkeys {
1747 struct ib_port_pkey main;
1748 struct ib_port_pkey alt;
1749};
1750
1751struct ib_qp_security {
1752 struct ib_qp *qp;
1753 struct ib_device *dev;
1754
1755 struct mutex mutex;
1756 struct ib_ports_pkeys *ports_pkeys;
1757
1758
1759
1760 struct list_head shared_qp_list;
1761 void *security;
1762 bool destroying;
1763 atomic_t error_list_count;
1764 struct completion error_complete;
1765 int error_comps_pending;
1766};
1767
1768
1769
1770
1771
1772struct ib_qp {
1773 struct ib_device *device;
1774 struct ib_pd *pd;
1775 struct ib_cq *send_cq;
1776 struct ib_cq *recv_cq;
1777 spinlock_t mr_lock;
1778 int mrs_used;
1779 struct list_head rdma_mrs;
1780 struct list_head sig_mrs;
1781 struct ib_srq *srq;
1782 struct ib_xrcd *xrcd;
1783 struct list_head xrcd_list;
1784
1785
1786 atomic_t usecnt;
1787 struct list_head open_list;
1788 struct ib_qp *real_qp;
1789 struct ib_uqp_object *uobject;
1790 void (*event_handler)(struct ib_event *, void *);
1791 void *qp_context;
1792
1793 const struct ib_gid_attr *av_sgid_attr;
1794 const struct ib_gid_attr *alt_path_sgid_attr;
1795 u32 qp_num;
1796 u32 max_write_sge;
1797 u32 max_read_sge;
1798 enum ib_qp_type qp_type;
1799 struct ib_rwq_ind_table *rwq_ind_tbl;
1800 struct ib_qp_security *qp_sec;
1801 u8 port;
1802
1803 bool integrity_en;
1804
1805
1806
1807 struct rdma_restrack_entry res;
1808
1809
1810 struct rdma_counter *counter;
1811};
1812
1813struct ib_dm {
1814 struct ib_device *device;
1815 u32 length;
1816 u32 flags;
1817 struct ib_uobject *uobject;
1818 atomic_t usecnt;
1819};
1820
1821struct ib_mr {
1822 struct ib_device *device;
1823 struct ib_pd *pd;
1824 u32 lkey;
1825 u32 rkey;
1826 u64 iova;
1827 u64 length;
1828 unsigned int page_size;
1829 enum ib_mr_type type;
1830 bool need_inval;
1831 union {
1832 struct ib_uobject *uobject;
1833 struct list_head qp_entry;
1834 };
1835
1836 struct ib_dm *dm;
1837 struct ib_sig_attrs *sig_attrs;
1838
1839
1840
1841 struct rdma_restrack_entry res;
1842};
1843
1844struct ib_mw {
1845 struct ib_device *device;
1846 struct ib_pd *pd;
1847 struct ib_uobject *uobject;
1848 u32 rkey;
1849 enum ib_mw_type type;
1850};
1851
1852
1853enum ib_flow_attr_type {
1854
1855 IB_FLOW_ATTR_NORMAL = 0x0,
1856
1857
1858
1859 IB_FLOW_ATTR_ALL_DEFAULT = 0x1,
1860
1861
1862
1863 IB_FLOW_ATTR_MC_DEFAULT = 0x2,
1864
1865 IB_FLOW_ATTR_SNIFFER = 0x3
1866};
1867
1868
1869enum ib_flow_spec_type {
1870
1871 IB_FLOW_SPEC_ETH = 0x20,
1872 IB_FLOW_SPEC_IB = 0x22,
1873
1874 IB_FLOW_SPEC_IPV4 = 0x30,
1875 IB_FLOW_SPEC_IPV6 = 0x31,
1876 IB_FLOW_SPEC_ESP = 0x34,
1877
1878 IB_FLOW_SPEC_TCP = 0x40,
1879 IB_FLOW_SPEC_UDP = 0x41,
1880 IB_FLOW_SPEC_VXLAN_TUNNEL = 0x50,
1881 IB_FLOW_SPEC_GRE = 0x51,
1882 IB_FLOW_SPEC_MPLS = 0x60,
1883 IB_FLOW_SPEC_INNER = 0x100,
1884
1885 IB_FLOW_SPEC_ACTION_TAG = 0x1000,
1886 IB_FLOW_SPEC_ACTION_DROP = 0x1001,
1887 IB_FLOW_SPEC_ACTION_HANDLE = 0x1002,
1888 IB_FLOW_SPEC_ACTION_COUNT = 0x1003,
1889};
1890#define IB_FLOW_SPEC_LAYER_MASK 0xF0
1891#define IB_FLOW_SPEC_SUPPORT_LAYERS 10
1892
1893
1894
1895
1896enum ib_flow_domain {
1897 IB_FLOW_DOMAIN_USER,
1898 IB_FLOW_DOMAIN_ETHTOOL,
1899 IB_FLOW_DOMAIN_RFS,
1900 IB_FLOW_DOMAIN_NIC,
1901 IB_FLOW_DOMAIN_NUM
1902};
1903
1904enum ib_flow_flags {
1905 IB_FLOW_ATTR_FLAGS_DONT_TRAP = 1UL << 1,
1906 IB_FLOW_ATTR_FLAGS_EGRESS = 1UL << 2,
1907 IB_FLOW_ATTR_FLAGS_RESERVED = 1UL << 3
1908};
1909
1910struct ib_flow_eth_filter {
1911 u8 dst_mac[6];
1912 u8 src_mac[6];
1913 __be16 ether_type;
1914 __be16 vlan_tag;
1915
1916 u8 real_sz[];
1917};
1918
1919struct ib_flow_spec_eth {
1920 u32 type;
1921 u16 size;
1922 struct ib_flow_eth_filter val;
1923 struct ib_flow_eth_filter mask;
1924};
1925
1926struct ib_flow_ib_filter {
1927 __be16 dlid;
1928 __u8 sl;
1929
1930 u8 real_sz[];
1931};
1932
1933struct ib_flow_spec_ib {
1934 u32 type;
1935 u16 size;
1936 struct ib_flow_ib_filter val;
1937 struct ib_flow_ib_filter mask;
1938};
1939
1940
1941enum ib_ipv4_flags {
1942 IB_IPV4_DONT_FRAG = 0x2,
1943 IB_IPV4_MORE_FRAG = 0X4
1944
1945};
1946
1947struct ib_flow_ipv4_filter {
1948 __be32 src_ip;
1949 __be32 dst_ip;
1950 u8 proto;
1951 u8 tos;
1952 u8 ttl;
1953 u8 flags;
1954
1955 u8 real_sz[];
1956};
1957
1958struct ib_flow_spec_ipv4 {
1959 u32 type;
1960 u16 size;
1961 struct ib_flow_ipv4_filter val;
1962 struct ib_flow_ipv4_filter mask;
1963};
1964
1965struct ib_flow_ipv6_filter {
1966 u8 src_ip[16];
1967 u8 dst_ip[16];
1968 __be32 flow_label;
1969 u8 next_hdr;
1970 u8 traffic_class;
1971 u8 hop_limit;
1972
1973 u8 real_sz[];
1974};
1975
1976struct ib_flow_spec_ipv6 {
1977 u32 type;
1978 u16 size;
1979 struct ib_flow_ipv6_filter val;
1980 struct ib_flow_ipv6_filter mask;
1981};
1982
1983struct ib_flow_tcp_udp_filter {
1984 __be16 dst_port;
1985 __be16 src_port;
1986
1987 u8 real_sz[];
1988};
1989
1990struct ib_flow_spec_tcp_udp {
1991 u32 type;
1992 u16 size;
1993 struct ib_flow_tcp_udp_filter val;
1994 struct ib_flow_tcp_udp_filter mask;
1995};
1996
1997struct ib_flow_tunnel_filter {
1998 __be32 tunnel_id;
1999 u8 real_sz[];
2000};
2001
2002
2003
2004
2005struct ib_flow_spec_tunnel {
2006 u32 type;
2007 u16 size;
2008 struct ib_flow_tunnel_filter val;
2009 struct ib_flow_tunnel_filter mask;
2010};
2011
2012struct ib_flow_esp_filter {
2013 __be32 spi;
2014 __be32 seq;
2015
2016 u8 real_sz[];
2017};
2018
2019struct ib_flow_spec_esp {
2020 u32 type;
2021 u16 size;
2022 struct ib_flow_esp_filter val;
2023 struct ib_flow_esp_filter mask;
2024};
2025
2026struct ib_flow_gre_filter {
2027 __be16 c_ks_res0_ver;
2028 __be16 protocol;
2029 __be32 key;
2030
2031 u8 real_sz[];
2032};
2033
2034struct ib_flow_spec_gre {
2035 u32 type;
2036 u16 size;
2037 struct ib_flow_gre_filter val;
2038 struct ib_flow_gre_filter mask;
2039};
2040
2041struct ib_flow_mpls_filter {
2042 __be32 tag;
2043
2044 u8 real_sz[];
2045};
2046
2047struct ib_flow_spec_mpls {
2048 u32 type;
2049 u16 size;
2050 struct ib_flow_mpls_filter val;
2051 struct ib_flow_mpls_filter mask;
2052};
2053
2054struct ib_flow_spec_action_tag {
2055 enum ib_flow_spec_type type;
2056 u16 size;
2057 u32 tag_id;
2058};
2059
2060struct ib_flow_spec_action_drop {
2061 enum ib_flow_spec_type type;
2062 u16 size;
2063};
2064
2065struct ib_flow_spec_action_handle {
2066 enum ib_flow_spec_type type;
2067 u16 size;
2068 struct ib_flow_action *act;
2069};
2070
2071enum ib_counters_description {
2072 IB_COUNTER_PACKETS,
2073 IB_COUNTER_BYTES,
2074};
2075
2076struct ib_flow_spec_action_count {
2077 enum ib_flow_spec_type type;
2078 u16 size;
2079 struct ib_counters *counters;
2080};
2081
2082union ib_flow_spec {
2083 struct {
2084 u32 type;
2085 u16 size;
2086 };
2087 struct ib_flow_spec_eth eth;
2088 struct ib_flow_spec_ib ib;
2089 struct ib_flow_spec_ipv4 ipv4;
2090 struct ib_flow_spec_tcp_udp tcp_udp;
2091 struct ib_flow_spec_ipv6 ipv6;
2092 struct ib_flow_spec_tunnel tunnel;
2093 struct ib_flow_spec_esp esp;
2094 struct ib_flow_spec_gre gre;
2095 struct ib_flow_spec_mpls mpls;
2096 struct ib_flow_spec_action_tag flow_tag;
2097 struct ib_flow_spec_action_drop drop;
2098 struct ib_flow_spec_action_handle action;
2099 struct ib_flow_spec_action_count flow_count;
2100};
2101
2102struct ib_flow_attr {
2103 enum ib_flow_attr_type type;
2104 u16 size;
2105 u16 priority;
2106 u32 flags;
2107 u8 num_of_specs;
2108 u8 port;
2109 union ib_flow_spec flows[];
2110};
2111
2112struct ib_flow {
2113 struct ib_qp *qp;
2114 struct ib_device *device;
2115 struct ib_uobject *uobject;
2116};
2117
2118enum ib_flow_action_type {
2119 IB_FLOW_ACTION_UNSPECIFIED,
2120 IB_FLOW_ACTION_ESP = 1,
2121};
2122
2123struct ib_flow_action_attrs_esp_keymats {
2124 enum ib_uverbs_flow_action_esp_keymat protocol;
2125 union {
2126 struct ib_uverbs_flow_action_esp_keymat_aes_gcm aes_gcm;
2127 } keymat;
2128};
2129
2130struct ib_flow_action_attrs_esp_replays {
2131 enum ib_uverbs_flow_action_esp_replay protocol;
2132 union {
2133 struct ib_uverbs_flow_action_esp_replay_bmp bmp;
2134 } replay;
2135};
2136
2137enum ib_flow_action_attrs_esp_flags {
2138
2139
2140
2141
2142
2143
2144 IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED = 1ULL << 32,
2145 IB_FLOW_ACTION_ESP_FLAGS_MOD_ESP_ATTRS = 1ULL << 33,
2146};
2147
2148struct ib_flow_spec_list {
2149 struct ib_flow_spec_list *next;
2150 union ib_flow_spec spec;
2151};
2152
2153struct ib_flow_action_attrs_esp {
2154 struct ib_flow_action_attrs_esp_keymats *keymat;
2155 struct ib_flow_action_attrs_esp_replays *replay;
2156 struct ib_flow_spec_list *encap;
2157
2158
2159
2160 u32 esn;
2161 u32 spi;
2162 u32 seq;
2163 u32 tfc_pad;
2164
2165 u64 flags;
2166 u64 hard_limit_pkts;
2167};
2168
2169struct ib_flow_action {
2170 struct ib_device *device;
2171 struct ib_uobject *uobject;
2172 enum ib_flow_action_type type;
2173 atomic_t usecnt;
2174};
2175
2176struct ib_mad;
2177struct ib_grh;
2178
2179enum ib_process_mad_flags {
2180 IB_MAD_IGNORE_MKEY = 1,
2181 IB_MAD_IGNORE_BKEY = 2,
2182 IB_MAD_IGNORE_ALL = IB_MAD_IGNORE_MKEY | IB_MAD_IGNORE_BKEY
2183};
2184
2185enum ib_mad_result {
2186 IB_MAD_RESULT_FAILURE = 0,
2187 IB_MAD_RESULT_SUCCESS = 1 << 0,
2188 IB_MAD_RESULT_REPLY = 1 << 1,
2189 IB_MAD_RESULT_CONSUMED = 1 << 2
2190};
2191
2192struct ib_port_cache {
2193 u64 subnet_prefix;
2194 struct ib_pkey_cache *pkey;
2195 struct ib_gid_table *gid;
2196 u8 lmc;
2197 enum ib_port_state port_state;
2198};
2199
2200struct ib_port_immutable {
2201 int pkey_tbl_len;
2202 int gid_tbl_len;
2203 u32 core_cap_flags;
2204 u32 max_mad_size;
2205};
2206
2207struct ib_port_data {
2208 struct ib_device *ib_dev;
2209
2210 struct ib_port_immutable immutable;
2211
2212 spinlock_t pkey_list_lock;
2213 struct list_head pkey_list;
2214
2215 struct ib_port_cache cache;
2216
2217 spinlock_t netdev_lock;
2218 struct net_device __rcu *netdev;
2219 struct hlist_node ndev_hash_link;
2220 struct rdma_port_counter port_counter;
2221 struct rdma_hw_stats *hw_stats;
2222};
2223
2224
2225enum rdma_netdev_t {
2226 RDMA_NETDEV_OPA_VNIC,
2227 RDMA_NETDEV_IPOIB,
2228};
2229
2230
2231
2232
2233
2234struct rdma_netdev {
2235 void *clnt_priv;
2236 struct ib_device *hca;
2237 u8 port_num;
2238 int mtu;
2239
2240
2241
2242
2243
2244
2245 void (*free_rdma_netdev)(struct net_device *netdev);
2246
2247
2248 void (*set_id)(struct net_device *netdev, int id);
2249
2250 int (*send)(struct net_device *dev, struct sk_buff *skb,
2251 struct ib_ah *address, u32 dqpn);
2252
2253 int (*attach_mcast)(struct net_device *dev, struct ib_device *hca,
2254 union ib_gid *gid, u16 mlid,
2255 int set_qkey, u32 qkey);
2256 int (*detach_mcast)(struct net_device *dev, struct ib_device *hca,
2257 union ib_gid *gid, u16 mlid);
2258};
2259
2260struct rdma_netdev_alloc_params {
2261 size_t sizeof_priv;
2262 unsigned int txqs;
2263 unsigned int rxqs;
2264 void *param;
2265
2266 int (*initialize_rdma_netdev)(struct ib_device *device, u8 port_num,
2267 struct net_device *netdev, void *param);
2268};
2269
2270struct ib_odp_counters {
2271 atomic64_t faults;
2272 atomic64_t invalidations;
2273};
2274
2275struct ib_counters {
2276 struct ib_device *device;
2277 struct ib_uobject *uobject;
2278
2279 atomic_t usecnt;
2280};
2281
2282struct ib_counters_read_attr {
2283 u64 *counters_buff;
2284 u32 ncounters;
2285 u32 flags;
2286};
2287
2288struct uverbs_attr_bundle;
2289struct iw_cm_id;
2290struct iw_cm_conn_param;
2291
2292#define INIT_RDMA_OBJ_SIZE(ib_struct, drv_struct, member) \
2293 .size_##ib_struct = \
2294 (sizeof(struct drv_struct) + \
2295 BUILD_BUG_ON_ZERO(offsetof(struct drv_struct, member)) + \
2296 BUILD_BUG_ON_ZERO( \
2297 !__same_type(((struct drv_struct *)NULL)->member, \
2298 struct ib_struct)))
2299
2300#define rdma_zalloc_drv_obj_gfp(ib_dev, ib_type, gfp) \
2301 ((struct ib_type *)kzalloc(ib_dev->ops.size_##ib_type, gfp))
2302
2303#define rdma_zalloc_drv_obj(ib_dev, ib_type) \
2304 rdma_zalloc_drv_obj_gfp(ib_dev, ib_type, GFP_KERNEL)
2305
2306#define DECLARE_RDMA_OBJ_SIZE(ib_struct) size_t size_##ib_struct
2307
2308struct rdma_user_mmap_entry {
2309 struct kref ref;
2310 struct ib_ucontext *ucontext;
2311 unsigned long start_pgoff;
2312 size_t npages;
2313 bool driver_removed;
2314};
2315
2316
2317static inline u64
2318rdma_user_mmap_get_offset(const struct rdma_user_mmap_entry *entry)
2319{
2320 return (u64)entry->start_pgoff << PAGE_SHIFT;
2321}
2322
2323
2324
2325
2326
2327
2328struct ib_device_ops {
2329 struct module *owner;
2330 enum rdma_driver_id driver_id;
2331 u32 uverbs_abi_ver;
2332 unsigned int uverbs_no_driver_id_binding:1;
2333
2334 int (*post_send)(struct ib_qp *qp, const struct ib_send_wr *send_wr,
2335 const struct ib_send_wr **bad_send_wr);
2336 int (*post_recv)(struct ib_qp *qp, const struct ib_recv_wr *recv_wr,
2337 const struct ib_recv_wr **bad_recv_wr);
2338 void (*drain_rq)(struct ib_qp *qp);
2339 void (*drain_sq)(struct ib_qp *qp);
2340 int (*poll_cq)(struct ib_cq *cq, int num_entries, struct ib_wc *wc);
2341 int (*peek_cq)(struct ib_cq *cq, int wc_cnt);
2342 int (*req_notify_cq)(struct ib_cq *cq, enum ib_cq_notify_flags flags);
2343 int (*req_ncomp_notif)(struct ib_cq *cq, int wc_cnt);
2344 int (*post_srq_recv)(struct ib_srq *srq,
2345 const struct ib_recv_wr *recv_wr,
2346 const struct ib_recv_wr **bad_recv_wr);
2347 int (*process_mad)(struct ib_device *device, int process_mad_flags,
2348 u8 port_num, const struct ib_wc *in_wc,
2349 const struct ib_grh *in_grh,
2350 const struct ib_mad *in_mad, struct ib_mad *out_mad,
2351 size_t *out_mad_size, u16 *out_mad_pkey_index);
2352 int (*query_device)(struct ib_device *device,
2353 struct ib_device_attr *device_attr,
2354 struct ib_udata *udata);
2355 int (*modify_device)(struct ib_device *device, int device_modify_mask,
2356 struct ib_device_modify *device_modify);
2357 void (*get_dev_fw_str)(struct ib_device *device, char *str);
2358 const struct cpumask *(*get_vector_affinity)(struct ib_device *ibdev,
2359 int comp_vector);
2360 int (*query_port)(struct ib_device *device, u8 port_num,
2361 struct ib_port_attr *port_attr);
2362 int (*modify_port)(struct ib_device *device, u8 port_num,
2363 int port_modify_mask,
2364 struct ib_port_modify *port_modify);
2365
2366
2367
2368
2369
2370
2371 int (*get_port_immutable)(struct ib_device *device, u8 port_num,
2372 struct ib_port_immutable *immutable);
2373 enum rdma_link_layer (*get_link_layer)(struct ib_device *device,
2374 u8 port_num);
2375
2376
2377
2378
2379
2380
2381
2382
2383 struct net_device *(*get_netdev)(struct ib_device *device, u8 port_num);
2384
2385
2386
2387
2388
2389
2390 struct net_device *(*alloc_rdma_netdev)(
2391 struct ib_device *device, u8 port_num, enum rdma_netdev_t type,
2392 const char *name, unsigned char name_assign_type,
2393 void (*setup)(struct net_device *));
2394
2395 int (*rdma_netdev_get_params)(struct ib_device *device, u8 port_num,
2396 enum rdma_netdev_t type,
2397 struct rdma_netdev_alloc_params *params);
2398
2399
2400
2401
2402
2403 int (*query_gid)(struct ib_device *device, u8 port_num, int index,
2404 union ib_gid *gid);
2405
2406
2407
2408
2409
2410
2411
2412
2413
2414
2415
2416
2417
2418 int (*add_gid)(const struct ib_gid_attr *attr, void **context);
2419
2420
2421
2422
2423
2424
2425
2426
2427 int (*del_gid)(const struct ib_gid_attr *attr, void **context);
2428 int (*query_pkey)(struct ib_device *device, u8 port_num, u16 index,
2429 u16 *pkey);
2430 int (*alloc_ucontext)(struct ib_ucontext *context,
2431 struct ib_udata *udata);
2432 void (*dealloc_ucontext)(struct ib_ucontext *context);
2433 int (*mmap)(struct ib_ucontext *context, struct vm_area_struct *vma);
2434
2435
2436
2437
2438
2439
2440 void (*mmap_free)(struct rdma_user_mmap_entry *entry);
2441 void (*disassociate_ucontext)(struct ib_ucontext *ibcontext);
2442 int (*alloc_pd)(struct ib_pd *pd, struct ib_udata *udata);
2443 void (*dealloc_pd)(struct ib_pd *pd, struct ib_udata *udata);
2444 int (*create_ah)(struct ib_ah *ah, struct rdma_ah_init_attr *attr,
2445 struct ib_udata *udata);
2446 int (*modify_ah)(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
2447 int (*query_ah)(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
2448 void (*destroy_ah)(struct ib_ah *ah, u32 flags);
2449 int (*create_srq)(struct ib_srq *srq,
2450 struct ib_srq_init_attr *srq_init_attr,
2451 struct ib_udata *udata);
2452 int (*modify_srq)(struct ib_srq *srq, struct ib_srq_attr *srq_attr,
2453 enum ib_srq_attr_mask srq_attr_mask,
2454 struct ib_udata *udata);
2455 int (*query_srq)(struct ib_srq *srq, struct ib_srq_attr *srq_attr);
2456 void (*destroy_srq)(struct ib_srq *srq, struct ib_udata *udata);
2457 struct ib_qp *(*create_qp)(struct ib_pd *pd,
2458 struct ib_qp_init_attr *qp_init_attr,
2459 struct ib_udata *udata);
2460 int (*modify_qp)(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
2461 int qp_attr_mask, struct ib_udata *udata);
2462 int (*query_qp)(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
2463 int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr);
2464 int (*destroy_qp)(struct ib_qp *qp, struct ib_udata *udata);
2465 int (*create_cq)(struct ib_cq *cq, const struct ib_cq_init_attr *attr,
2466 struct ib_udata *udata);
2467 int (*modify_cq)(struct ib_cq *cq, u16 cq_count, u16 cq_period);
2468 void (*destroy_cq)(struct ib_cq *cq, struct ib_udata *udata);
2469 int (*resize_cq)(struct ib_cq *cq, int cqe, struct ib_udata *udata);
2470 struct ib_mr *(*get_dma_mr)(struct ib_pd *pd, int mr_access_flags);
2471 struct ib_mr *(*reg_user_mr)(struct ib_pd *pd, u64 start, u64 length,
2472 u64 virt_addr, int mr_access_flags,
2473 struct ib_udata *udata);
2474 int (*rereg_user_mr)(struct ib_mr *mr, int flags, u64 start, u64 length,
2475 u64 virt_addr, int mr_access_flags,
2476 struct ib_pd *pd, struct ib_udata *udata);
2477 int (*dereg_mr)(struct ib_mr *mr, struct ib_udata *udata);
2478 struct ib_mr *(*alloc_mr)(struct ib_pd *pd, enum ib_mr_type mr_type,
2479 u32 max_num_sg, struct ib_udata *udata);
2480 struct ib_mr *(*alloc_mr_integrity)(struct ib_pd *pd,
2481 u32 max_num_data_sg,
2482 u32 max_num_meta_sg);
2483 int (*advise_mr)(struct ib_pd *pd,
2484 enum ib_uverbs_advise_mr_advice advice, u32 flags,
2485 struct ib_sge *sg_list, u32 num_sge,
2486 struct uverbs_attr_bundle *attrs);
2487 int (*map_mr_sg)(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
2488 unsigned int *sg_offset);
2489 int (*check_mr_status)(struct ib_mr *mr, u32 check_mask,
2490 struct ib_mr_status *mr_status);
2491 struct ib_mw *(*alloc_mw)(struct ib_pd *pd, enum ib_mw_type type,
2492 struct ib_udata *udata);
2493 int (*dealloc_mw)(struct ib_mw *mw);
2494 int (*attach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid);
2495 int (*detach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid);
2496 struct ib_xrcd *(*alloc_xrcd)(struct ib_device *device,
2497 struct ib_udata *udata);
2498 int (*dealloc_xrcd)(struct ib_xrcd *xrcd, struct ib_udata *udata);
2499 struct ib_flow *(*create_flow)(struct ib_qp *qp,
2500 struct ib_flow_attr *flow_attr,
2501 int domain, struct ib_udata *udata);
2502 int (*destroy_flow)(struct ib_flow *flow_id);
2503 struct ib_flow_action *(*create_flow_action_esp)(
2504 struct ib_device *device,
2505 const struct ib_flow_action_attrs_esp *attr,
2506 struct uverbs_attr_bundle *attrs);
2507 int (*destroy_flow_action)(struct ib_flow_action *action);
2508 int (*modify_flow_action_esp)(
2509 struct ib_flow_action *action,
2510 const struct ib_flow_action_attrs_esp *attr,
2511 struct uverbs_attr_bundle *attrs);
2512 int (*set_vf_link_state)(struct ib_device *device, int vf, u8 port,
2513 int state);
2514 int (*get_vf_config)(struct ib_device *device, int vf, u8 port,
2515 struct ifla_vf_info *ivf);
2516 int (*get_vf_stats)(struct ib_device *device, int vf, u8 port,
2517 struct ifla_vf_stats *stats);
2518 int (*get_vf_guid)(struct ib_device *device, int vf, u8 port,
2519 struct ifla_vf_guid *node_guid,
2520 struct ifla_vf_guid *port_guid);
2521 int (*set_vf_guid)(struct ib_device *device, int vf, u8 port, u64 guid,
2522 int type);
2523 struct ib_wq *(*create_wq)(struct ib_pd *pd,
2524 struct ib_wq_init_attr *init_attr,
2525 struct ib_udata *udata);
2526 void (*destroy_wq)(struct ib_wq *wq, struct ib_udata *udata);
2527 int (*modify_wq)(struct ib_wq *wq, struct ib_wq_attr *attr,
2528 u32 wq_attr_mask, struct ib_udata *udata);
2529 struct ib_rwq_ind_table *(*create_rwq_ind_table)(
2530 struct ib_device *device,
2531 struct ib_rwq_ind_table_init_attr *init_attr,
2532 struct ib_udata *udata);
2533 int (*destroy_rwq_ind_table)(struct ib_rwq_ind_table *wq_ind_table);
2534 struct ib_dm *(*alloc_dm)(struct ib_device *device,
2535 struct ib_ucontext *context,
2536 struct ib_dm_alloc_attr *attr,
2537 struct uverbs_attr_bundle *attrs);
2538 int (*dealloc_dm)(struct ib_dm *dm, struct uverbs_attr_bundle *attrs);
2539 struct ib_mr *(*reg_dm_mr)(struct ib_pd *pd, struct ib_dm *dm,
2540 struct ib_dm_mr_attr *attr,
2541 struct uverbs_attr_bundle *attrs);
2542 struct ib_counters *(*create_counters)(
2543 struct ib_device *device, struct uverbs_attr_bundle *attrs);
2544 int (*destroy_counters)(struct ib_counters *counters);
2545 int (*read_counters)(struct ib_counters *counters,
2546 struct ib_counters_read_attr *counters_read_attr,
2547 struct uverbs_attr_bundle *attrs);
2548 int (*map_mr_sg_pi)(struct ib_mr *mr, struct scatterlist *data_sg,
2549 int data_sg_nents, unsigned int *data_sg_offset,
2550 struct scatterlist *meta_sg, int meta_sg_nents,
2551 unsigned int *meta_sg_offset);
2552
2553
2554
2555
2556
2557
2558
2559 struct rdma_hw_stats *(*alloc_hw_stats)(struct ib_device *device,
2560 u8 port_num);
2561
2562
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573 int (*get_hw_stats)(struct ib_device *device,
2574 struct rdma_hw_stats *stats, u8 port, int index);
2575
2576
2577
2578
2579 int (*init_port)(struct ib_device *device, u8 port_num,
2580 struct kobject *port_sysfs);
2581
2582
2583
2584 int (*fill_res_entry)(struct sk_buff *msg,
2585 struct rdma_restrack_entry *entry);
2586
2587
2588
2589
2590
2591
2592 int (*enable_driver)(struct ib_device *dev);
2593
2594
2595
2596 void (*dealloc_driver)(struct ib_device *dev);
2597
2598
2599 void (*iw_add_ref)(struct ib_qp *qp);
2600 void (*iw_rem_ref)(struct ib_qp *qp);
2601 struct ib_qp *(*iw_get_qp)(struct ib_device *device, int qpn);
2602 int (*iw_connect)(struct iw_cm_id *cm_id,
2603 struct iw_cm_conn_param *conn_param);
2604 int (*iw_accept)(struct iw_cm_id *cm_id,
2605 struct iw_cm_conn_param *conn_param);
2606 int (*iw_reject)(struct iw_cm_id *cm_id, const void *pdata,
2607 u8 pdata_len);
2608 int (*iw_create_listen)(struct iw_cm_id *cm_id, int backlog);
2609 int (*iw_destroy_listen)(struct iw_cm_id *cm_id);
2610
2611
2612
2613
2614
2615 int (*counter_bind_qp)(struct rdma_counter *counter, struct ib_qp *qp);
2616
2617
2618
2619
2620 int (*counter_unbind_qp)(struct ib_qp *qp);
2621
2622
2623
2624 int (*counter_dealloc)(struct rdma_counter *counter);
2625
2626
2627
2628
2629 struct rdma_hw_stats *(*counter_alloc_stats)(
2630 struct rdma_counter *counter);
2631
2632
2633
2634 int (*counter_update_stats)(struct rdma_counter *counter);
2635
2636
2637
2638
2639
2640 int (*fill_stat_entry)(struct sk_buff *msg,
2641 struct rdma_restrack_entry *entry);
2642
2643 DECLARE_RDMA_OBJ_SIZE(ib_ah);
2644 DECLARE_RDMA_OBJ_SIZE(ib_cq);
2645 DECLARE_RDMA_OBJ_SIZE(ib_pd);
2646 DECLARE_RDMA_OBJ_SIZE(ib_srq);
2647 DECLARE_RDMA_OBJ_SIZE(ib_ucontext);
2648};
2649
2650struct ib_core_device {
2651
2652
2653
2654 struct device dev;
2655 possible_net_t rdma_net;
2656 struct kobject *ports_kobj;
2657 struct list_head port_list;
2658 struct ib_device *owner;
2659};
2660
2661struct rdma_restrack_root;
2662struct ib_device {
2663
2664 struct device *dma_device;
2665 struct ib_device_ops ops;
2666 char name[IB_DEVICE_NAME_MAX];
2667 struct rcu_head rcu_head;
2668
2669 struct list_head event_handler_list;
2670
2671 struct rw_semaphore event_handler_rwsem;
2672
2673
2674 spinlock_t qp_open_list_lock;
2675
2676 struct rw_semaphore client_data_rwsem;
2677 struct xarray client_data;
2678 struct mutex unregistration_lock;
2679
2680
2681 rwlock_t cache_lock;
2682
2683
2684
2685 struct ib_port_data *port_data;
2686
2687 int num_comp_vectors;
2688
2689 union {
2690 struct device dev;
2691 struct ib_core_device coredev;
2692 };
2693
2694
2695
2696
2697
2698 const struct attribute_group *groups[3];
2699
2700 u64 uverbs_cmd_mask;
2701 u64 uverbs_ex_cmd_mask;
2702
2703 char node_desc[IB_DEVICE_NODE_DESC_MAX];
2704 __be64 node_guid;
2705 u32 local_dma_lkey;
2706 u16 is_switch:1;
2707
2708 u16 kverbs_provider:1;
2709
2710 u16 use_cq_dim:1;
2711 u8 node_type;
2712 u8 phys_port_cnt;
2713 struct ib_device_attr attrs;
2714 struct attribute_group *hw_stats_ag;
2715 struct rdma_hw_stats *hw_stats;
2716
2717#ifdef CONFIG_CGROUP_RDMA
2718 struct rdmacg_device cg_device;
2719#endif
2720
2721 u32 index;
2722
2723 spinlock_t cq_pools_lock;
2724 struct list_head cq_pools[IB_POLL_LAST_POOL_TYPE + 1];
2725
2726 struct rdma_restrack_root *res;
2727
2728 const struct uapi_definition *driver_def;
2729
2730
2731
2732
2733
2734 refcount_t refcount;
2735 struct completion unreg_completion;
2736 struct work_struct unregistration_work;
2737
2738 const struct rdma_link_ops *link_ops;
2739
2740
2741 struct mutex compat_devs_mutex;
2742
2743 struct xarray compat_devs;
2744
2745
2746 char iw_ifname[IFNAMSIZ];
2747 u32 iw_driver_flags;
2748 u32 lag_flags;
2749};
2750
2751struct ib_client_nl_info;
2752struct ib_client {
2753 const char *name;
2754 int (*add)(struct ib_device *ibdev);
2755 void (*remove)(struct ib_device *, void *client_data);
2756 void (*rename)(struct ib_device *dev, void *client_data);
2757 int (*get_nl_info)(struct ib_device *ibdev, void *client_data,
2758 struct ib_client_nl_info *res);
2759 int (*get_global_nl_info)(struct ib_client_nl_info *res);
2760
2761
2762
2763
2764
2765
2766
2767
2768
2769
2770
2771
2772
2773
2774
2775
2776 struct net_device *(*get_net_dev_by_params)(
2777 struct ib_device *dev,
2778 u8 port,
2779 u16 pkey,
2780 const union ib_gid *gid,
2781 const struct sockaddr *addr,
2782 void *client_data);
2783
2784 refcount_t uses;
2785 struct completion uses_zero;
2786 u32 client_id;
2787
2788
2789 u8 no_kverbs_req:1;
2790};
2791
2792
2793
2794
2795
2796
2797
2798struct ib_block_iter {
2799
2800 struct scatterlist *__sg;
2801 dma_addr_t __dma_addr;
2802 unsigned int __sg_nents;
2803 unsigned int __sg_advance;
2804 unsigned int __pg_bit;
2805};
2806
2807struct ib_device *_ib_alloc_device(size_t size);
2808#define ib_alloc_device(drv_struct, member) \
2809 container_of(_ib_alloc_device(sizeof(struct drv_struct) + \
2810 BUILD_BUG_ON_ZERO(offsetof( \
2811 struct drv_struct, member))), \
2812 struct drv_struct, member)
2813
2814void ib_dealloc_device(struct ib_device *device);
2815
2816void ib_get_device_fw_str(struct ib_device *device, char *str);
2817
2818int ib_register_device(struct ib_device *device, const char *name);
2819void ib_unregister_device(struct ib_device *device);
2820void ib_unregister_driver(enum rdma_driver_id driver_id);
2821void ib_unregister_device_and_put(struct ib_device *device);
2822void ib_unregister_device_queued(struct ib_device *ib_dev);
2823
2824int ib_register_client (struct ib_client *client);
2825void ib_unregister_client(struct ib_client *client);
2826
2827void __rdma_block_iter_start(struct ib_block_iter *biter,
2828 struct scatterlist *sglist,
2829 unsigned int nents,
2830 unsigned long pgsz);
2831bool __rdma_block_iter_next(struct ib_block_iter *biter);
2832
2833
2834
2835
2836
2837
2838static inline dma_addr_t
2839rdma_block_iter_dma_address(struct ib_block_iter *biter)
2840{
2841 return biter->__dma_addr & ~(BIT_ULL(biter->__pg_bit) - 1);
2842}
2843
2844
2845
2846
2847
2848
2849
2850
2851
2852
2853
2854#define rdma_for_each_block(sglist, biter, nents, pgsz) \
2855 for (__rdma_block_iter_start(biter, sglist, nents, \
2856 pgsz); \
2857 __rdma_block_iter_next(biter);)
2858
2859
2860
2861
2862
2863
2864
2865
2866
2867
2868
2869static inline void *ib_get_client_data(struct ib_device *device,
2870 struct ib_client *client)
2871{
2872 return xa_load(&device->client_data, client->client_id);
2873}
2874void ib_set_client_data(struct ib_device *device, struct ib_client *client,
2875 void *data);
2876void ib_set_device_ops(struct ib_device *device,
2877 const struct ib_device_ops *ops);
2878
2879int rdma_user_mmap_io(struct ib_ucontext *ucontext, struct vm_area_struct *vma,
2880 unsigned long pfn, unsigned long size, pgprot_t prot,
2881 struct rdma_user_mmap_entry *entry);
2882int rdma_user_mmap_entry_insert(struct ib_ucontext *ucontext,
2883 struct rdma_user_mmap_entry *entry,
2884 size_t length);
2885int rdma_user_mmap_entry_insert_range(struct ib_ucontext *ucontext,
2886 struct rdma_user_mmap_entry *entry,
2887 size_t length, u32 min_pgoff,
2888 u32 max_pgoff);
2889
2890struct rdma_user_mmap_entry *
2891rdma_user_mmap_entry_get_pgoff(struct ib_ucontext *ucontext,
2892 unsigned long pgoff);
2893struct rdma_user_mmap_entry *
2894rdma_user_mmap_entry_get(struct ib_ucontext *ucontext,
2895 struct vm_area_struct *vma);
2896void rdma_user_mmap_entry_put(struct rdma_user_mmap_entry *entry);
2897
2898void rdma_user_mmap_entry_remove(struct rdma_user_mmap_entry *entry);
2899
2900static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len)
2901{
2902 return copy_from_user(dest, udata->inbuf, len) ? -EFAULT : 0;
2903}
2904
2905static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len)
2906{
2907 return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0;
2908}
2909
2910static inline bool ib_is_buffer_cleared(const void __user *p,
2911 size_t len)
2912{
2913 bool ret;
2914 u8 *buf;
2915
2916 if (len > USHRT_MAX)
2917 return false;
2918
2919 buf = memdup_user(p, len);
2920 if (IS_ERR(buf))
2921 return false;
2922
2923 ret = !memchr_inv(buf, 0, len);
2924 kfree(buf);
2925 return ret;
2926}
2927
2928static inline bool ib_is_udata_cleared(struct ib_udata *udata,
2929 size_t offset,
2930 size_t len)
2931{
2932 return ib_is_buffer_cleared(udata->inbuf + offset, len);
2933}
2934
2935
2936
2937
2938
2939
2940
2941
2942
2943
2944
2945
2946
2947
2948
2949
2950static inline bool ib_is_destroy_retryable(int ret, enum rdma_remove_reason why,
2951 struct ib_uobject *uobj)
2952{
2953 return ret && (why == RDMA_REMOVE_DESTROY ||
2954 uobj->context->cleanup_retryable);
2955}
2956
2957
2958
2959
2960
2961
2962
2963
2964
2965
2966static inline int ib_destroy_usecnt(atomic_t *usecnt,
2967 enum rdma_remove_reason why,
2968 struct ib_uobject *uobj)
2969{
2970 if (atomic_read(usecnt) && ib_is_destroy_retryable(-EBUSY, why, uobj))
2971 return -EBUSY;
2972 return 0;
2973}
2974
2975
2976
2977
2978
2979
2980
2981
2982
2983
2984
2985
2986
2987
2988
2989
2990bool ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
2991 enum ib_qp_type type, enum ib_qp_attr_mask mask);
2992
2993void ib_register_event_handler(struct ib_event_handler *event_handler);
2994void ib_unregister_event_handler(struct ib_event_handler *event_handler);
2995void ib_dispatch_event(const struct ib_event *event);
2996
2997int ib_query_port(struct ib_device *device,
2998 u8 port_num, struct ib_port_attr *port_attr);
2999
3000enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device,
3001 u8 port_num);
3002
3003
3004
3005
3006
3007
3008
3009
3010
3011
3012static inline bool rdma_cap_ib_switch(const struct ib_device *device)
3013{
3014 return device->is_switch;
3015}
3016
3017
3018
3019
3020
3021
3022
3023
3024
3025static inline u8 rdma_start_port(const struct ib_device *device)
3026{
3027 return rdma_cap_ib_switch(device) ? 0 : 1;
3028}
3029
3030
3031
3032
3033
3034
3035#define rdma_for_each_port(device, iter) \
3036 for (iter = rdma_start_port(device + BUILD_BUG_ON_ZERO(!__same_type( \
3037 unsigned int, iter))); \
3038 iter <= rdma_end_port(device); (iter)++)
3039
3040
3041
3042
3043
3044
3045
3046
3047
3048static inline u8 rdma_end_port(const struct ib_device *device)
3049{
3050 return rdma_cap_ib_switch(device) ? 0 : device->phys_port_cnt;
3051}
3052
3053static inline int rdma_is_port_valid(const struct ib_device *device,
3054 unsigned int port)
3055{
3056 return (port >= rdma_start_port(device) &&
3057 port <= rdma_end_port(device));
3058}
3059
3060static inline bool rdma_is_grh_required(const struct ib_device *device,
3061 u8 port_num)
3062{
3063 return device->port_data[port_num].immutable.core_cap_flags &
3064 RDMA_CORE_PORT_IB_GRH_REQUIRED;
3065}
3066
3067static inline bool rdma_protocol_ib(const struct ib_device *device, u8 port_num)
3068{
3069 return device->port_data[port_num].immutable.core_cap_flags &
3070 RDMA_CORE_CAP_PROT_IB;
3071}
3072
3073static inline bool rdma_protocol_roce(const struct ib_device *device, u8 port_num)
3074{
3075 return device->port_data[port_num].immutable.core_cap_flags &
3076 (RDMA_CORE_CAP_PROT_ROCE | RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP);
3077}
3078
3079static inline bool rdma_protocol_roce_udp_encap(const struct ib_device *device, u8 port_num)
3080{
3081 return device->port_data[port_num].immutable.core_cap_flags &
3082 RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP;
3083}
3084
3085static inline bool rdma_protocol_roce_eth_encap(const struct ib_device *device, u8 port_num)
3086{
3087 return device->port_data[port_num].immutable.core_cap_flags &
3088 RDMA_CORE_CAP_PROT_ROCE;
3089}
3090
3091static inline bool rdma_protocol_iwarp(const struct ib_device *device, u8 port_num)
3092{
3093 return device->port_data[port_num].immutable.core_cap_flags &
3094 RDMA_CORE_CAP_PROT_IWARP;
3095}
3096
3097static inline bool rdma_ib_or_roce(const struct ib_device *device, u8 port_num)
3098{
3099 return rdma_protocol_ib(device, port_num) ||
3100 rdma_protocol_roce(device, port_num);
3101}
3102
3103static inline bool rdma_protocol_raw_packet(const struct ib_device *device, u8 port_num)
3104{
3105 return device->port_data[port_num].immutable.core_cap_flags &
3106 RDMA_CORE_CAP_PROT_RAW_PACKET;
3107}
3108
3109static inline bool rdma_protocol_usnic(const struct ib_device *device, u8 port_num)
3110{
3111 return device->port_data[port_num].immutable.core_cap_flags &
3112 RDMA_CORE_CAP_PROT_USNIC;
3113}
3114
3115
3116
3117
3118
3119
3120
3121
3122
3123
3124
3125
3126
3127static inline bool rdma_cap_ib_mad(const struct ib_device *device, u8 port_num)
3128{
3129 return device->port_data[port_num].immutable.core_cap_flags &
3130 RDMA_CORE_CAP_IB_MAD;
3131}
3132
3133
3134
3135
3136
3137
3138
3139
3140
3141
3142
3143
3144
3145
3146
3147
3148
3149
3150
3151
3152static inline bool rdma_cap_opa_mad(struct ib_device *device, u8 port_num)
3153{
3154 return device->port_data[port_num].immutable.core_cap_flags &
3155 RDMA_CORE_CAP_OPA_MAD;
3156}
3157
3158
3159
3160
3161
3162
3163
3164
3165
3166
3167
3168
3169
3170
3171
3172
3173
3174
3175
3176
3177
3178static inline bool rdma_cap_ib_smi(const struct ib_device *device, u8 port_num)
3179{
3180 return device->port_data[port_num].immutable.core_cap_flags &
3181 RDMA_CORE_CAP_IB_SMI;
3182}
3183
3184
3185
3186
3187
3188
3189
3190
3191
3192
3193
3194
3195
3196
3197
3198
3199static inline bool rdma_cap_ib_cm(const struct ib_device *device, u8 port_num)
3200{
3201 return device->port_data[port_num].immutable.core_cap_flags &
3202 RDMA_CORE_CAP_IB_CM;
3203}
3204
3205
3206
3207
3208
3209
3210
3211
3212
3213
3214
3215
3216
3217static inline bool rdma_cap_iw_cm(const struct ib_device *device, u8 port_num)
3218{
3219 return device->port_data[port_num].immutable.core_cap_flags &
3220 RDMA_CORE_CAP_IW_CM;
3221}
3222
3223
3224
3225
3226
3227
3228
3229
3230
3231
3232
3233
3234
3235
3236
3237
3238static inline bool rdma_cap_ib_sa(const struct ib_device *device, u8 port_num)
3239{
3240 return device->port_data[port_num].immutable.core_cap_flags &
3241 RDMA_CORE_CAP_IB_SA;
3242}
3243
3244
3245
3246
3247
3248
3249
3250
3251
3252
3253
3254
3255
3256
3257
3258
3259
3260
3261static inline bool rdma_cap_ib_mcast(const struct ib_device *device, u8 port_num)
3262{
3263 return rdma_cap_ib_sa(device, port_num);
3264}
3265
3266
3267
3268
3269
3270
3271
3272
3273
3274
3275
3276
3277
3278
3279static inline bool rdma_cap_af_ib(const struct ib_device *device, u8 port_num)
3280{
3281 return device->port_data[port_num].immutable.core_cap_flags &
3282 RDMA_CORE_CAP_AF_IB;
3283}
3284
3285
3286
3287
3288
3289
3290
3291
3292
3293
3294
3295
3296
3297
3298
3299
3300
3301static inline bool rdma_cap_eth_ah(const struct ib_device *device, u8 port_num)
3302{
3303 return device->port_data[port_num].immutable.core_cap_flags &
3304 RDMA_CORE_CAP_ETH_AH;
3305}
3306
3307
3308
3309
3310
3311
3312
3313
3314
3315
3316static inline bool rdma_cap_opa_ah(struct ib_device *device, u8 port_num)
3317{
3318 return (device->port_data[port_num].immutable.core_cap_flags &
3319 RDMA_CORE_CAP_OPA_AH) == RDMA_CORE_CAP_OPA_AH;
3320}
3321
3322
3323
3324
3325
3326
3327
3328
3329
3330
3331
3332
3333
3334static inline size_t rdma_max_mad_size(const struct ib_device *device, u8 port_num)
3335{
3336 return device->port_data[port_num].immutable.max_mad_size;
3337}
3338
3339
3340
3341
3342
3343
3344
3345
3346
3347
3348
3349
3350
3351
3352static inline bool rdma_cap_roce_gid_table(const struct ib_device *device,
3353 u8 port_num)
3354{
3355 return rdma_protocol_roce(device, port_num) &&
3356 device->ops.add_gid && device->ops.del_gid;
3357}
3358
3359
3360
3361
3362static inline bool rdma_cap_read_inv(struct ib_device *dev, u32 port_num)
3363{
3364
3365
3366
3367
3368 return rdma_protocol_iwarp(dev, port_num);
3369}
3370
3371
3372
3373
3374
3375
3376
3377static inline unsigned int rdma_find_pg_bit(unsigned long addr,
3378 unsigned long pgsz_bitmap)
3379{
3380 unsigned long align;
3381 unsigned long pgsz;
3382
3383 align = addr & -addr;
3384
3385
3386
3387
3388 pgsz = pgsz_bitmap & ~(-align << 1);
3389 if (!pgsz)
3390 return __ffs(pgsz_bitmap);
3391
3392 return __fls(pgsz);
3393}
3394
3395
3396
3397
3398
3399
3400
3401
3402static inline bool rdma_core_cap_opa_port(struct ib_device *device,
3403 u32 port_num)
3404{
3405 return (device->port_data[port_num].immutable.core_cap_flags &
3406 RDMA_CORE_PORT_INTEL_OPA) == RDMA_CORE_PORT_INTEL_OPA;
3407}
3408
3409
3410
3411
3412
3413
3414
3415
3416
3417
3418static inline int rdma_mtu_enum_to_int(struct ib_device *device, u8 port,
3419 int mtu)
3420{
3421 if (rdma_core_cap_opa_port(device, port))
3422 return opa_mtu_enum_to_int((enum opa_mtu)mtu);
3423 else
3424 return ib_mtu_enum_to_int((enum ib_mtu)mtu);
3425}
3426
3427
3428
3429
3430
3431
3432
3433
3434
3435static inline int rdma_mtu_from_attr(struct ib_device *device, u8 port,
3436 struct ib_port_attr *attr)
3437{
3438 if (rdma_core_cap_opa_port(device, port))
3439 return attr->phys_mtu;
3440 else
3441 return ib_mtu_enum_to_int(attr->max_mtu);
3442}
3443
3444int ib_set_vf_link_state(struct ib_device *device, int vf, u8 port,
3445 int state);
3446int ib_get_vf_config(struct ib_device *device, int vf, u8 port,
3447 struct ifla_vf_info *info);
3448int ib_get_vf_stats(struct ib_device *device, int vf, u8 port,
3449 struct ifla_vf_stats *stats);
3450int ib_get_vf_guid(struct ib_device *device, int vf, u8 port,
3451 struct ifla_vf_guid *node_guid,
3452 struct ifla_vf_guid *port_guid);
3453int ib_set_vf_guid(struct ib_device *device, int vf, u8 port, u64 guid,
3454 int type);
3455
3456int ib_query_pkey(struct ib_device *device,
3457 u8 port_num, u16 index, u16 *pkey);
3458
3459int ib_modify_device(struct ib_device *device,
3460 int device_modify_mask,
3461 struct ib_device_modify *device_modify);
3462
3463int ib_modify_port(struct ib_device *device,
3464 u8 port_num, int port_modify_mask,
3465 struct ib_port_modify *port_modify);
3466
3467int ib_find_gid(struct ib_device *device, union ib_gid *gid,
3468 u8 *port_num, u16 *index);
3469
3470int ib_find_pkey(struct ib_device *device,
3471 u8 port_num, u16 pkey, u16 *index);
3472
3473enum ib_pd_flags {
3474
3475
3476
3477
3478
3479
3480
3481
3482
3483 IB_PD_UNSAFE_GLOBAL_RKEY = 0x01,
3484};
3485
3486struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags,
3487 const char *caller);
3488
3489#define ib_alloc_pd(device, flags) \
3490 __ib_alloc_pd((device), (flags), KBUILD_MODNAME)
3491
3492
3493
3494
3495
3496
3497void ib_dealloc_pd_user(struct ib_pd *pd, struct ib_udata *udata);
3498
3499
3500
3501
3502
3503
3504
3505static inline void ib_dealloc_pd(struct ib_pd *pd)
3506{
3507 ib_dealloc_pd_user(pd, NULL);
3508}
3509
3510enum rdma_create_ah_flags {
3511
3512 RDMA_CREATE_AH_SLEEPABLE = BIT(0),
3513};
3514
3515
3516
3517
3518
3519
3520
3521
3522
3523
3524struct ib_ah *rdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr,
3525 u32 flags);
3526
3527
3528
3529
3530
3531
3532
3533
3534
3535
3536
3537
3538
3539struct ib_ah *rdma_create_user_ah(struct ib_pd *pd,
3540 struct rdma_ah_attr *ah_attr,
3541 struct ib_udata *udata);
3542
3543
3544
3545
3546
3547
3548
3549
3550int ib_get_gids_from_rdma_hdr(const union rdma_network_hdr *hdr,
3551 enum rdma_network_type net_type,
3552 union ib_gid *sgid, union ib_gid *dgid);
3553
3554
3555
3556
3557
3558int ib_get_rdma_header_version(const union rdma_network_hdr *hdr);
3559
3560
3561
3562
3563
3564
3565
3566
3567
3568
3569
3570
3571
3572
3573
3574
3575
3576
3577
3578int ib_init_ah_attr_from_wc(struct ib_device *device, u8 port_num,
3579 const struct ib_wc *wc, const struct ib_grh *grh,
3580 struct rdma_ah_attr *ah_attr);
3581
3582
3583
3584
3585
3586
3587
3588
3589
3590
3591
3592
3593
3594struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc,
3595 const struct ib_grh *grh, u8 port_num);
3596
3597
3598
3599
3600
3601
3602
3603
3604int rdma_modify_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
3605
3606
3607
3608
3609
3610
3611
3612
3613int rdma_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
3614
3615enum rdma_destroy_ah_flags {
3616
3617 RDMA_DESTROY_AH_SLEEPABLE = BIT(0),
3618};
3619
3620
3621
3622
3623
3624
3625
3626int rdma_destroy_ah_user(struct ib_ah *ah, u32 flags, struct ib_udata *udata);
3627
3628
3629
3630
3631
3632
3633
3634
3635static inline int rdma_destroy_ah(struct ib_ah *ah, u32 flags)
3636{
3637 return rdma_destroy_ah_user(ah, flags, NULL);
3638}
3639
3640struct ib_srq *ib_create_srq_user(struct ib_pd *pd,
3641 struct ib_srq_init_attr *srq_init_attr,
3642 struct ib_usrq_object *uobject,
3643 struct ib_udata *udata);
3644static inline struct ib_srq *
3645ib_create_srq(struct ib_pd *pd, struct ib_srq_init_attr *srq_init_attr)
3646{
3647 if (!pd->device->ops.create_srq)
3648 return ERR_PTR(-EOPNOTSUPP);
3649
3650 return ib_create_srq_user(pd, srq_init_attr, NULL, NULL);
3651}
3652
3653
3654
3655
3656
3657
3658
3659
3660
3661
3662
3663
3664
3665int ib_modify_srq(struct ib_srq *srq,
3666 struct ib_srq_attr *srq_attr,
3667 enum ib_srq_attr_mask srq_attr_mask);
3668
3669
3670
3671
3672
3673
3674
3675int ib_query_srq(struct ib_srq *srq,
3676 struct ib_srq_attr *srq_attr);
3677
3678
3679
3680
3681
3682
3683int ib_destroy_srq_user(struct ib_srq *srq, struct ib_udata *udata);
3684
3685
3686
3687
3688
3689
3690
3691static inline int ib_destroy_srq(struct ib_srq *srq)
3692{
3693 return ib_destroy_srq_user(srq, NULL);
3694}
3695
3696
3697
3698
3699
3700
3701
3702
3703static inline int ib_post_srq_recv(struct ib_srq *srq,
3704 const struct ib_recv_wr *recv_wr,
3705 const struct ib_recv_wr **bad_recv_wr)
3706{
3707 const struct ib_recv_wr *dummy;
3708
3709 return srq->device->ops.post_srq_recv(srq, recv_wr,
3710 bad_recv_wr ? : &dummy);
3711}
3712
3713struct ib_qp *ib_create_qp(struct ib_pd *pd,
3714 struct ib_qp_init_attr *qp_init_attr);
3715
3716
3717
3718
3719
3720
3721
3722
3723
3724
3725
3726
3727int ib_modify_qp_with_udata(struct ib_qp *qp,
3728 struct ib_qp_attr *attr,
3729 int attr_mask,
3730 struct ib_udata *udata);
3731
3732
3733
3734
3735
3736
3737
3738
3739
3740
3741int ib_modify_qp(struct ib_qp *qp,
3742 struct ib_qp_attr *qp_attr,
3743 int qp_attr_mask);
3744
3745
3746
3747
3748
3749
3750
3751
3752
3753
3754
3755
3756int ib_query_qp(struct ib_qp *qp,
3757 struct ib_qp_attr *qp_attr,
3758 int qp_attr_mask,
3759 struct ib_qp_init_attr *qp_init_attr);
3760
3761
3762
3763
3764
3765
3766int ib_destroy_qp_user(struct ib_qp *qp, struct ib_udata *udata);
3767
3768
3769
3770
3771
3772
3773
3774static inline int ib_destroy_qp(struct ib_qp *qp)
3775{
3776 return ib_destroy_qp_user(qp, NULL);
3777}
3778
3779
3780
3781
3782
3783
3784
3785
3786struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd,
3787 struct ib_qp_open_attr *qp_open_attr);
3788
3789
3790
3791
3792
3793
3794
3795
3796int ib_close_qp(struct ib_qp *qp);
3797
3798
3799
3800
3801
3802
3803
3804
3805
3806
3807
3808
3809
3810
3811static inline int ib_post_send(struct ib_qp *qp,
3812 const struct ib_send_wr *send_wr,
3813 const struct ib_send_wr **bad_send_wr)
3814{
3815 const struct ib_send_wr *dummy;
3816
3817 return qp->device->ops.post_send(qp, send_wr, bad_send_wr ? : &dummy);
3818}
3819
3820
3821
3822
3823
3824
3825
3826
3827
3828static inline int ib_post_recv(struct ib_qp *qp,
3829 const struct ib_recv_wr *recv_wr,
3830 const struct ib_recv_wr **bad_recv_wr)
3831{
3832 const struct ib_recv_wr *dummy;
3833
3834 return qp->device->ops.post_recv(qp, recv_wr, bad_recv_wr ? : &dummy);
3835}
3836
3837struct ib_cq *__ib_alloc_cq_user(struct ib_device *dev, void *private,
3838 int nr_cqe, int comp_vector,
3839 enum ib_poll_context poll_ctx,
3840 const char *caller, struct ib_udata *udata);
3841
3842
3843
3844
3845
3846
3847
3848
3849
3850
3851static inline struct ib_cq *ib_alloc_cq_user(struct ib_device *dev,
3852 void *private, int nr_cqe,
3853 int comp_vector,
3854 enum ib_poll_context poll_ctx,
3855 struct ib_udata *udata)
3856{
3857 return __ib_alloc_cq_user(dev, private, nr_cqe, comp_vector, poll_ctx,
3858 KBUILD_MODNAME, udata);
3859}
3860
3861
3862
3863
3864
3865
3866
3867
3868
3869
3870
3871static inline struct ib_cq *ib_alloc_cq(struct ib_device *dev, void *private,
3872 int nr_cqe, int comp_vector,
3873 enum ib_poll_context poll_ctx)
3874{
3875 return ib_alloc_cq_user(dev, private, nr_cqe, comp_vector, poll_ctx,
3876 NULL);
3877}
3878
3879struct ib_cq *__ib_alloc_cq_any(struct ib_device *dev, void *private,
3880 int nr_cqe, enum ib_poll_context poll_ctx,
3881 const char *caller);
3882
3883
3884
3885
3886
3887
3888
3889
3890static inline struct ib_cq *ib_alloc_cq_any(struct ib_device *dev,
3891 void *private, int nr_cqe,
3892 enum ib_poll_context poll_ctx)
3893{
3894 return __ib_alloc_cq_any(dev, private, nr_cqe, poll_ctx,
3895 KBUILD_MODNAME);
3896}
3897
3898
3899
3900
3901
3902
3903
3904
3905void ib_free_cq_user(struct ib_cq *cq, struct ib_udata *udata);
3906
3907
3908
3909
3910
3911
3912
3913static inline void ib_free_cq(struct ib_cq *cq)
3914{
3915 ib_free_cq_user(cq, NULL);
3916}
3917
3918int ib_process_cq_direct(struct ib_cq *cq, int budget);
3919
3920
3921
3922
3923
3924
3925
3926
3927
3928
3929
3930
3931
3932
3933struct ib_cq *__ib_create_cq(struct ib_device *device,
3934 ib_comp_handler comp_handler,
3935 void (*event_handler)(struct ib_event *, void *),
3936 void *cq_context,
3937 const struct ib_cq_init_attr *cq_attr,
3938 const char *caller);
3939#define ib_create_cq(device, cmp_hndlr, evt_hndlr, cq_ctxt, cq_attr) \
3940 __ib_create_cq((device), (cmp_hndlr), (evt_hndlr), (cq_ctxt), (cq_attr), KBUILD_MODNAME)
3941
3942
3943
3944
3945
3946
3947
3948
3949int ib_resize_cq(struct ib_cq *cq, int cqe);
3950
3951
3952
3953
3954
3955
3956
3957
3958int rdma_set_cq_moderation(struct ib_cq *cq, u16 cq_count, u16 cq_period);
3959
3960
3961
3962
3963
3964
3965int ib_destroy_cq_user(struct ib_cq *cq, struct ib_udata *udata);
3966
3967
3968
3969
3970
3971
3972
3973static inline void ib_destroy_cq(struct ib_cq *cq)
3974{
3975 ib_destroy_cq_user(cq, NULL);
3976}
3977
3978
3979
3980
3981
3982
3983
3984
3985
3986
3987
3988
3989
3990static inline int ib_poll_cq(struct ib_cq *cq, int num_entries,
3991 struct ib_wc *wc)
3992{
3993 return cq->device->ops.poll_cq(cq, num_entries, wc);
3994}
3995
3996
3997
3998
3999
4000
4001
4002
4003
4004
4005
4006
4007
4008
4009
4010
4011
4012
4013
4014
4015
4016
4017
4018
4019
4020
4021
4022
4023static inline int ib_req_notify_cq(struct ib_cq *cq,
4024 enum ib_cq_notify_flags flags)
4025{
4026 return cq->device->ops.req_notify_cq(cq, flags);
4027}
4028
4029struct ib_cq *ib_cq_pool_get(struct ib_device *dev, unsigned int nr_cqe,
4030 int comp_vector_hint,
4031 enum ib_poll_context poll_ctx);
4032
4033void ib_cq_pool_put(struct ib_cq *cq, unsigned int nr_cqe);
4034
4035
4036
4037
4038
4039
4040
4041
4042static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt)
4043{
4044 return cq->device->ops.req_ncomp_notif ?
4045 cq->device->ops.req_ncomp_notif(cq, wc_cnt) :
4046 -ENOSYS;
4047}
4048
4049
4050
4051
4052
4053
4054static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
4055{
4056 return dma_mapping_error(dev->dma_device, dma_addr);
4057}
4058
4059
4060
4061
4062
4063
4064
4065
4066static inline u64 ib_dma_map_single(struct ib_device *dev,
4067 void *cpu_addr, size_t size,
4068 enum dma_data_direction direction)
4069{
4070 return dma_map_single(dev->dma_device, cpu_addr, size, direction);
4071}
4072
4073
4074
4075
4076
4077
4078
4079
4080static inline void ib_dma_unmap_single(struct ib_device *dev,
4081 u64 addr, size_t size,
4082 enum dma_data_direction direction)
4083{
4084 dma_unmap_single(dev->dma_device, addr, size, direction);
4085}
4086
4087
4088
4089
4090
4091
4092
4093
4094
4095static inline u64 ib_dma_map_page(struct ib_device *dev,
4096 struct page *page,
4097 unsigned long offset,
4098 size_t size,
4099 enum dma_data_direction direction)
4100{
4101 return dma_map_page(dev->dma_device, page, offset, size, direction);
4102}
4103
4104
4105
4106
4107
4108
4109
4110
4111static inline void ib_dma_unmap_page(struct ib_device *dev,
4112 u64 addr, size_t size,
4113 enum dma_data_direction direction)
4114{
4115 dma_unmap_page(dev->dma_device, addr, size, direction);
4116}
4117
4118
4119
4120
4121
4122
4123
4124
4125static inline int ib_dma_map_sg(struct ib_device *dev,
4126 struct scatterlist *sg, int nents,
4127 enum dma_data_direction direction)
4128{
4129 return dma_map_sg(dev->dma_device, sg, nents, direction);
4130}
4131
4132
4133
4134
4135
4136
4137
4138
4139static inline void ib_dma_unmap_sg(struct ib_device *dev,
4140 struct scatterlist *sg, int nents,
4141 enum dma_data_direction direction)
4142{
4143 dma_unmap_sg(dev->dma_device, sg, nents, direction);
4144}
4145
4146static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
4147 struct scatterlist *sg, int nents,
4148 enum dma_data_direction direction,
4149 unsigned long dma_attrs)
4150{
4151 return dma_map_sg_attrs(dev->dma_device, sg, nents, direction,
4152 dma_attrs);
4153}
4154
4155static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
4156 struct scatterlist *sg, int nents,
4157 enum dma_data_direction direction,
4158 unsigned long dma_attrs)
4159{
4160 dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, dma_attrs);
4161}
4162
4163
4164
4165
4166
4167
4168
4169static inline unsigned int ib_dma_max_seg_size(struct ib_device *dev)
4170{
4171 return dma_get_max_seg_size(dev->dma_device);
4172}
4173
4174
4175
4176
4177
4178
4179
4180
4181static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev,
4182 u64 addr,
4183 size_t size,
4184 enum dma_data_direction dir)
4185{
4186 dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
4187}
4188
4189
4190
4191
4192
4193
4194
4195
4196static inline void ib_dma_sync_single_for_device(struct ib_device *dev,
4197 u64 addr,
4198 size_t size,
4199 enum dma_data_direction dir)
4200{
4201 dma_sync_single_for_device(dev->dma_device, addr, size, dir);
4202}
4203
4204
4205
4206
4207
4208
4209
4210
4211static inline void *ib_dma_alloc_coherent(struct ib_device *dev,
4212 size_t size,
4213 dma_addr_t *dma_handle,
4214 gfp_t flag)
4215{
4216 return dma_alloc_coherent(dev->dma_device, size, dma_handle, flag);
4217}
4218
4219
4220
4221
4222
4223
4224
4225
4226static inline void ib_dma_free_coherent(struct ib_device *dev,
4227 size_t size, void *cpu_addr,
4228 dma_addr_t dma_handle)
4229{
4230 dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle);
4231}
4232
4233
4234
4235
4236struct ib_mr *ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
4237 u64 virt_addr, int mr_access_flags);
4238
4239
4240int ib_advise_mr(struct ib_pd *pd, enum ib_uverbs_advise_mr_advice advice,
4241 u32 flags, struct ib_sge *sg_list, u32 num_sge);
4242
4243
4244
4245
4246
4247
4248
4249
4250int ib_dereg_mr_user(struct ib_mr *mr, struct ib_udata *udata);
4251
4252
4253
4254
4255
4256
4257
4258
4259
4260
4261static inline int ib_dereg_mr(struct ib_mr *mr)
4262{
4263 return ib_dereg_mr_user(mr, NULL);
4264}
4265
4266struct ib_mr *ib_alloc_mr_user(struct ib_pd *pd, enum ib_mr_type mr_type,
4267 u32 max_num_sg, struct ib_udata *udata);
4268
4269static inline struct ib_mr *ib_alloc_mr(struct ib_pd *pd,
4270 enum ib_mr_type mr_type, u32 max_num_sg)
4271{
4272 return ib_alloc_mr_user(pd, mr_type, max_num_sg, NULL);
4273}
4274
4275struct ib_mr *ib_alloc_mr_integrity(struct ib_pd *pd,
4276 u32 max_num_data_sg,
4277 u32 max_num_meta_sg);
4278
4279
4280
4281
4282
4283
4284
4285static inline void ib_update_fast_reg_key(struct ib_mr *mr, u8 newkey)
4286{
4287 mr->lkey = (mr->lkey & 0xffffff00) | newkey;
4288 mr->rkey = (mr->rkey & 0xffffff00) | newkey;
4289}
4290
4291
4292
4293
4294
4295
4296static inline u32 ib_inc_rkey(u32 rkey)
4297{
4298 const u32 mask = 0x000000ff;
4299 return ((rkey + 1) & mask) | (rkey & ~mask);
4300}
4301
4302
4303
4304
4305
4306
4307
4308
4309
4310
4311
4312
4313
4314int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
4315
4316
4317
4318
4319
4320
4321
4322int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
4323
4324
4325
4326
4327
4328
4329struct ib_xrcd *__ib_alloc_xrcd(struct ib_device *device, const char *caller);
4330#define ib_alloc_xrcd(device) \
4331 __ib_alloc_xrcd((device), KBUILD_MODNAME)
4332
4333
4334
4335
4336
4337
4338int ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata);
4339
4340static inline int ib_check_mr_access(int flags)
4341{
4342
4343
4344
4345
4346 if (flags & (IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_REMOTE_WRITE) &&
4347 !(flags & IB_ACCESS_LOCAL_WRITE))
4348 return -EINVAL;
4349
4350 if (flags & ~IB_ACCESS_SUPPORTED)
4351 return -EINVAL;
4352
4353 return 0;
4354}
4355
4356static inline bool ib_access_writable(int access_flags)
4357{
4358
4359
4360
4361
4362
4363
4364
4365 return access_flags &
4366 (IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE |
4367 IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND);
4368}
4369
4370
4371
4372
4373
4374
4375
4376
4377
4378
4379
4380
4381
4382int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
4383 struct ib_mr_status *mr_status);
4384
4385
4386
4387
4388
4389
4390
4391
4392
4393
4394
4395
4396
4397
4398static inline bool ib_device_try_get(struct ib_device *dev)
4399{
4400 return refcount_inc_not_zero(&dev->refcount);
4401}
4402
4403void ib_device_put(struct ib_device *device);
4404struct ib_device *ib_device_get_by_netdev(struct net_device *ndev,
4405 enum rdma_driver_id driver_id);
4406struct ib_device *ib_device_get_by_name(const char *name,
4407 enum rdma_driver_id driver_id);
4408struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, u8 port,
4409 u16 pkey, const union ib_gid *gid,
4410 const struct sockaddr *addr);
4411int ib_device_set_netdev(struct ib_device *ib_dev, struct net_device *ndev,
4412 unsigned int port);
4413struct net_device *ib_device_netdev(struct ib_device *dev, u8 port);
4414
4415struct ib_wq *ib_create_wq(struct ib_pd *pd,
4416 struct ib_wq_init_attr *init_attr);
4417int ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata);
4418int ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *attr,
4419 u32 wq_attr_mask);
4420struct ib_rwq_ind_table *ib_create_rwq_ind_table(struct ib_device *device,
4421 struct ib_rwq_ind_table_init_attr*
4422 wq_ind_table_init_attr);
4423int ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table);
4424
4425int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
4426 unsigned int *sg_offset, unsigned int page_size);
4427int ib_map_mr_sg_pi(struct ib_mr *mr, struct scatterlist *data_sg,
4428 int data_sg_nents, unsigned int *data_sg_offset,
4429 struct scatterlist *meta_sg, int meta_sg_nents,
4430 unsigned int *meta_sg_offset, unsigned int page_size);
4431
4432static inline int
4433ib_map_mr_sg_zbva(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
4434 unsigned int *sg_offset, unsigned int page_size)
4435{
4436 int n;
4437
4438 n = ib_map_mr_sg(mr, sg, sg_nents, sg_offset, page_size);
4439 mr->iova = 0;
4440
4441 return n;
4442}
4443
4444int ib_sg_to_pages(struct ib_mr *mr, struct scatterlist *sgl, int sg_nents,
4445 unsigned int *sg_offset, int (*set_page)(struct ib_mr *, u64));
4446
4447void ib_drain_rq(struct ib_qp *qp);
4448void ib_drain_sq(struct ib_qp *qp);
4449void ib_drain_qp(struct ib_qp *qp);
4450
4451int ib_get_eth_speed(struct ib_device *dev, u8 port_num, u8 *speed, u8 *width);
4452
4453static inline u8 *rdma_ah_retrieve_dmac(struct rdma_ah_attr *attr)
4454{
4455 if (attr->type == RDMA_AH_ATTR_TYPE_ROCE)
4456 return attr->roce.dmac;
4457 return NULL;
4458}
4459
4460static inline void rdma_ah_set_dlid(struct rdma_ah_attr *attr, u32 dlid)
4461{
4462 if (attr->type == RDMA_AH_ATTR_TYPE_IB)
4463 attr->ib.dlid = (u16)dlid;
4464 else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4465 attr->opa.dlid = dlid;
4466}
4467
4468static inline u32 rdma_ah_get_dlid(const struct rdma_ah_attr *attr)
4469{
4470 if (attr->type == RDMA_AH_ATTR_TYPE_IB)
4471 return attr->ib.dlid;
4472 else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4473 return attr->opa.dlid;
4474 return 0;
4475}
4476
4477static inline void rdma_ah_set_sl(struct rdma_ah_attr *attr, u8 sl)
4478{
4479 attr->sl = sl;
4480}
4481
4482static inline u8 rdma_ah_get_sl(const struct rdma_ah_attr *attr)
4483{
4484 return attr->sl;
4485}
4486
4487static inline void rdma_ah_set_path_bits(struct rdma_ah_attr *attr,
4488 u8 src_path_bits)
4489{
4490 if (attr->type == RDMA_AH_ATTR_TYPE_IB)
4491 attr->ib.src_path_bits = src_path_bits;
4492 else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4493 attr->opa.src_path_bits = src_path_bits;
4494}
4495
4496static inline u8 rdma_ah_get_path_bits(const struct rdma_ah_attr *attr)
4497{
4498 if (attr->type == RDMA_AH_ATTR_TYPE_IB)
4499 return attr->ib.src_path_bits;
4500 else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4501 return attr->opa.src_path_bits;
4502 return 0;
4503}
4504
4505static inline void rdma_ah_set_make_grd(struct rdma_ah_attr *attr,
4506 bool make_grd)
4507{
4508 if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4509 attr->opa.make_grd = make_grd;
4510}
4511
4512static inline bool rdma_ah_get_make_grd(const struct rdma_ah_attr *attr)
4513{
4514 if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4515 return attr->opa.make_grd;
4516 return false;
4517}
4518
4519static inline void rdma_ah_set_port_num(struct rdma_ah_attr *attr, u8 port_num)
4520{
4521 attr->port_num = port_num;
4522}
4523
4524static inline u8 rdma_ah_get_port_num(const struct rdma_ah_attr *attr)
4525{
4526 return attr->port_num;
4527}
4528
4529static inline void rdma_ah_set_static_rate(struct rdma_ah_attr *attr,
4530 u8 static_rate)
4531{
4532 attr->static_rate = static_rate;
4533}
4534
4535static inline u8 rdma_ah_get_static_rate(const struct rdma_ah_attr *attr)
4536{
4537 return attr->static_rate;
4538}
4539
4540static inline void rdma_ah_set_ah_flags(struct rdma_ah_attr *attr,
4541 enum ib_ah_flags flag)
4542{
4543 attr->ah_flags = flag;
4544}
4545
4546static inline enum ib_ah_flags
4547 rdma_ah_get_ah_flags(const struct rdma_ah_attr *attr)
4548{
4549 return attr->ah_flags;
4550}
4551
4552static inline const struct ib_global_route
4553 *rdma_ah_read_grh(const struct rdma_ah_attr *attr)
4554{
4555 return &attr->grh;
4556}
4557
4558
4559static inline struct ib_global_route
4560 *rdma_ah_retrieve_grh(struct rdma_ah_attr *attr)
4561{
4562 return &attr->grh;
4563}
4564
4565static inline void rdma_ah_set_dgid_raw(struct rdma_ah_attr *attr, void *dgid)
4566{
4567 struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
4568
4569 memcpy(grh->dgid.raw, dgid, sizeof(grh->dgid));
4570}
4571
4572static inline void rdma_ah_set_subnet_prefix(struct rdma_ah_attr *attr,
4573 __be64 prefix)
4574{
4575 struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
4576
4577 grh->dgid.global.subnet_prefix = prefix;
4578}
4579
4580static inline void rdma_ah_set_interface_id(struct rdma_ah_attr *attr,
4581 __be64 if_id)
4582{
4583 struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
4584
4585 grh->dgid.global.interface_id = if_id;
4586}
4587
4588static inline void rdma_ah_set_grh(struct rdma_ah_attr *attr,
4589 union ib_gid *dgid, u32 flow_label,
4590 u8 sgid_index, u8 hop_limit,
4591 u8 traffic_class)
4592{
4593 struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
4594
4595 attr->ah_flags = IB_AH_GRH;
4596 if (dgid)
4597 grh->dgid = *dgid;
4598 grh->flow_label = flow_label;
4599 grh->sgid_index = sgid_index;
4600 grh->hop_limit = hop_limit;
4601 grh->traffic_class = traffic_class;
4602 grh->sgid_attr = NULL;
4603}
4604
4605void rdma_destroy_ah_attr(struct rdma_ah_attr *ah_attr);
4606void rdma_move_grh_sgid_attr(struct rdma_ah_attr *attr, union ib_gid *dgid,
4607 u32 flow_label, u8 hop_limit, u8 traffic_class,
4608 const struct ib_gid_attr *sgid_attr);
4609void rdma_copy_ah_attr(struct rdma_ah_attr *dest,
4610 const struct rdma_ah_attr *src);
4611void rdma_replace_ah_attr(struct rdma_ah_attr *old,
4612 const struct rdma_ah_attr *new);
4613void rdma_move_ah_attr(struct rdma_ah_attr *dest, struct rdma_ah_attr *src);
4614
4615
4616
4617
4618
4619
4620
4621static inline enum rdma_ah_attr_type rdma_ah_find_type(struct ib_device *dev,
4622 u8 port_num)
4623{
4624 if (rdma_protocol_roce(dev, port_num))
4625 return RDMA_AH_ATTR_TYPE_ROCE;
4626 if (rdma_protocol_ib(dev, port_num)) {
4627 if (rdma_cap_opa_ah(dev, port_num))
4628 return RDMA_AH_ATTR_TYPE_OPA;
4629 return RDMA_AH_ATTR_TYPE_IB;
4630 }
4631
4632 return RDMA_AH_ATTR_TYPE_UNDEFINED;
4633}
4634
4635
4636
4637
4638
4639
4640
4641
4642
4643
4644static inline u16 ib_lid_cpu16(u32 lid)
4645{
4646 WARN_ON_ONCE(lid & 0xFFFF0000);
4647 return (u16)lid;
4648}
4649
4650
4651
4652
4653
4654
4655static inline __be16 ib_lid_be16(u32 lid)
4656{
4657 WARN_ON_ONCE(lid & 0xFFFF0000);
4658 return cpu_to_be16((u16)lid);
4659}
4660
4661
4662
4663
4664
4665
4666
4667
4668
4669
4670
4671static inline const struct cpumask *
4672ib_get_vector_affinity(struct ib_device *device, int comp_vector)
4673{
4674 if (comp_vector < 0 || comp_vector >= device->num_comp_vectors ||
4675 !device->ops.get_vector_affinity)
4676 return NULL;
4677
4678 return device->ops.get_vector_affinity(device, comp_vector);
4679
4680}
4681
4682
4683
4684
4685
4686
4687
4688void rdma_roce_rescan_device(struct ib_device *ibdev);
4689
4690struct ib_ucontext *ib_uverbs_get_ucontext_file(struct ib_uverbs_file *ufile);
4691
4692int uverbs_destroy_def_handler(struct uverbs_attr_bundle *attrs);
4693
4694struct net_device *rdma_alloc_netdev(struct ib_device *device, u8 port_num,
4695 enum rdma_netdev_t type, const char *name,
4696 unsigned char name_assign_type,
4697 void (*setup)(struct net_device *));
4698
4699int rdma_init_netdev(struct ib_device *device, u8 port_num,
4700 enum rdma_netdev_t type, const char *name,
4701 unsigned char name_assign_type,
4702 void (*setup)(struct net_device *),
4703 struct net_device *netdev);
4704
4705
4706
4707
4708
4709
4710
4711
4712
4713
4714
4715
4716
4717
4718
4719
4720static inline void
4721rdma_set_device_sysfs_group(struct ib_device *dev,
4722 const struct attribute_group *group)
4723{
4724 dev->groups[1] = group;
4725}
4726
4727
4728
4729
4730
4731
4732
4733
4734
4735static inline struct ib_device *rdma_device_to_ibdev(struct device *device)
4736{
4737 struct ib_core_device *coredev =
4738 container_of(device, struct ib_core_device, dev);
4739
4740 return coredev->owner;
4741}
4742
4743
4744
4745
4746
4747
4748
4749
4750
4751#define rdma_device_to_drv_device(dev, drv_dev_struct, ibdev_member) \
4752 container_of(rdma_device_to_ibdev(dev), drv_dev_struct, ibdev_member)
4753
4754bool rdma_dev_access_netns(const struct ib_device *device,
4755 const struct net *net);
4756
4757#define IB_ROCE_UDP_ENCAP_VALID_PORT_MIN (0xC000)
4758#define IB_GRH_FLOWLABEL_MASK (0x000FFFFF)
4759
4760
4761
4762
4763
4764
4765
4766
4767
4768static inline u16 rdma_flow_label_to_udp_sport(u32 fl)
4769{
4770 u32 fl_low = fl & 0x03fff, fl_high = fl & 0xFC000;
4771
4772 fl_low ^= fl_high >> 14;
4773 return (u16)(fl_low | IB_ROCE_UDP_ENCAP_VALID_PORT_MIN);
4774}
4775
4776
4777
4778
4779
4780
4781
4782
4783
4784
4785
4786
4787
4788
4789
4790
4791static inline u32 rdma_calc_flow_label(u32 lqpn, u32 rqpn)
4792{
4793 u64 v = (u64)lqpn * rqpn;
4794
4795 v ^= v >> 20;
4796 v ^= v >> 40;
4797
4798 return (u32)(v & IB_GRH_FLOWLABEL_MASK);
4799}
4800#endif
4801