1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39#if !defined(IB_VERBS_H)
40#define IB_VERBS_H
41
42#include <linux/types.h>
43#include <linux/device.h>
44#include <linux/dma-mapping.h>
45#include <linux/kref.h>
46#include <linux/list.h>
47#include <linux/rwsem.h>
48#include <linux/workqueue.h>
49#include <linux/irq_poll.h>
50#include <uapi/linux/if_ether.h>
51#include <net/ipv6.h>
52#include <net/ip.h>
53#include <linux/string.h>
54#include <linux/slab.h>
55#include <linux/netdevice.h>
56#include <linux/refcount.h>
57#include <linux/if_link.h>
58#include <linux/atomic.h>
59#include <linux/mmu_notifier.h>
60#include <linux/uaccess.h>
61#include <linux/cgroup_rdma.h>
62#include <linux/irqflags.h>
63#include <linux/preempt.h>
64#include <linux/dim.h>
65#include <uapi/rdma/ib_user_verbs.h>
66#include <rdma/rdma_counter.h>
67#include <rdma/restrack.h>
68#include <rdma/signature.h>
69#include <uapi/rdma/rdma_user_ioctl.h>
70#include <uapi/rdma/ib_user_ioctl_verbs.h>
71
72#define IB_FW_VERSION_NAME_MAX ETHTOOL_FWVERS_LEN
73
74struct ib_umem_odp;
75
76extern struct workqueue_struct *ib_wq;
77extern struct workqueue_struct *ib_comp_wq;
78extern struct workqueue_struct *ib_comp_unbound_wq;
79
80__printf(3, 4) __cold
81void ibdev_printk(const char *level, const struct ib_device *ibdev,
82 const char *format, ...);
83__printf(2, 3) __cold
84void ibdev_emerg(const struct ib_device *ibdev, const char *format, ...);
85__printf(2, 3) __cold
86void ibdev_alert(const struct ib_device *ibdev, const char *format, ...);
87__printf(2, 3) __cold
88void ibdev_crit(const struct ib_device *ibdev, const char *format, ...);
89__printf(2, 3) __cold
90void ibdev_err(const struct ib_device *ibdev, const char *format, ...);
91__printf(2, 3) __cold
92void ibdev_warn(const struct ib_device *ibdev, const char *format, ...);
93__printf(2, 3) __cold
94void ibdev_notice(const struct ib_device *ibdev, const char *format, ...);
95__printf(2, 3) __cold
96void ibdev_info(const struct ib_device *ibdev, const char *format, ...);
97
98#if defined(CONFIG_DYNAMIC_DEBUG)
99#define ibdev_dbg(__dev, format, args...) \
100 dynamic_ibdev_dbg(__dev, format, ##args)
101#else
102__printf(2, 3) __cold
103static inline
104void ibdev_dbg(const struct ib_device *ibdev, const char *format, ...) {}
105#endif
106
107#define ibdev_level_ratelimited(ibdev_level, ibdev, fmt, ...) \
108do { \
109 static DEFINE_RATELIMIT_STATE(_rs, \
110 DEFAULT_RATELIMIT_INTERVAL, \
111 DEFAULT_RATELIMIT_BURST); \
112 if (__ratelimit(&_rs)) \
113 ibdev_level(ibdev, fmt, ##__VA_ARGS__); \
114} while (0)
115
116#define ibdev_emerg_ratelimited(ibdev, fmt, ...) \
117 ibdev_level_ratelimited(ibdev_emerg, ibdev, fmt, ##__VA_ARGS__)
118#define ibdev_alert_ratelimited(ibdev, fmt, ...) \
119 ibdev_level_ratelimited(ibdev_alert, ibdev, fmt, ##__VA_ARGS__)
120#define ibdev_crit_ratelimited(ibdev, fmt, ...) \
121 ibdev_level_ratelimited(ibdev_crit, ibdev, fmt, ##__VA_ARGS__)
122#define ibdev_err_ratelimited(ibdev, fmt, ...) \
123 ibdev_level_ratelimited(ibdev_err, ibdev, fmt, ##__VA_ARGS__)
124#define ibdev_warn_ratelimited(ibdev, fmt, ...) \
125 ibdev_level_ratelimited(ibdev_warn, ibdev, fmt, ##__VA_ARGS__)
126#define ibdev_notice_ratelimited(ibdev, fmt, ...) \
127 ibdev_level_ratelimited(ibdev_notice, ibdev, fmt, ##__VA_ARGS__)
128#define ibdev_info_ratelimited(ibdev, fmt, ...) \
129 ibdev_level_ratelimited(ibdev_info, ibdev, fmt, ##__VA_ARGS__)
130
131#if defined(CONFIG_DYNAMIC_DEBUG)
132
133#define ibdev_dbg_ratelimited(ibdev, fmt, ...) \
134do { \
135 static DEFINE_RATELIMIT_STATE(_rs, \
136 DEFAULT_RATELIMIT_INTERVAL, \
137 DEFAULT_RATELIMIT_BURST); \
138 DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \
139 if (DYNAMIC_DEBUG_BRANCH(descriptor) && __ratelimit(&_rs)) \
140 __dynamic_ibdev_dbg(&descriptor, ibdev, fmt, \
141 ##__VA_ARGS__); \
142} while (0)
143#else
144__printf(2, 3) __cold
145static inline
146void ibdev_dbg_ratelimited(const struct ib_device *ibdev, const char *format, ...) {}
147#endif
148
149union ib_gid {
150 u8 raw[16];
151 struct {
152 __be64 subnet_prefix;
153 __be64 interface_id;
154 } global;
155};
156
157extern union ib_gid zgid;
158
159enum ib_gid_type {
160
161 IB_GID_TYPE_IB = 0,
162 IB_GID_TYPE_ROCE = 0,
163 IB_GID_TYPE_ROCE_UDP_ENCAP = 1,
164 IB_GID_TYPE_SIZE
165};
166
167#define ROCE_V2_UDP_DPORT 4791
168struct ib_gid_attr {
169 struct net_device __rcu *ndev;
170 struct ib_device *device;
171 union ib_gid gid;
172 enum ib_gid_type gid_type;
173 u16 index;
174 u8 port_num;
175};
176
177enum {
178
179 IB_SA_WELL_KNOWN_GUID = BIT_ULL(57) | 2,
180};
181
182enum rdma_transport_type {
183 RDMA_TRANSPORT_IB,
184 RDMA_TRANSPORT_IWARP,
185 RDMA_TRANSPORT_USNIC,
186 RDMA_TRANSPORT_USNIC_UDP,
187 RDMA_TRANSPORT_UNSPECIFIED,
188};
189
190enum rdma_protocol_type {
191 RDMA_PROTOCOL_IB,
192 RDMA_PROTOCOL_IBOE,
193 RDMA_PROTOCOL_IWARP,
194 RDMA_PROTOCOL_USNIC_UDP
195};
196
197__attribute_const__ enum rdma_transport_type
198rdma_node_get_transport(unsigned int node_type);
199
200enum rdma_network_type {
201 RDMA_NETWORK_IB,
202 RDMA_NETWORK_ROCE_V1 = RDMA_NETWORK_IB,
203 RDMA_NETWORK_IPV4,
204 RDMA_NETWORK_IPV6
205};
206
207static inline enum ib_gid_type ib_network_to_gid_type(enum rdma_network_type network_type)
208{
209 if (network_type == RDMA_NETWORK_IPV4 ||
210 network_type == RDMA_NETWORK_IPV6)
211 return IB_GID_TYPE_ROCE_UDP_ENCAP;
212
213
214 return IB_GID_TYPE_IB;
215}
216
217static inline enum rdma_network_type
218rdma_gid_attr_network_type(const struct ib_gid_attr *attr)
219{
220 if (attr->gid_type == IB_GID_TYPE_IB)
221 return RDMA_NETWORK_IB;
222
223 if (ipv6_addr_v4mapped((struct in6_addr *)&attr->gid))
224 return RDMA_NETWORK_IPV4;
225 else
226 return RDMA_NETWORK_IPV6;
227}
228
229enum rdma_link_layer {
230 IB_LINK_LAYER_UNSPECIFIED,
231 IB_LINK_LAYER_INFINIBAND,
232 IB_LINK_LAYER_ETHERNET,
233};
234
235enum ib_device_cap_flags {
236 IB_DEVICE_RESIZE_MAX_WR = (1 << 0),
237 IB_DEVICE_BAD_PKEY_CNTR = (1 << 1),
238 IB_DEVICE_BAD_QKEY_CNTR = (1 << 2),
239 IB_DEVICE_RAW_MULTI = (1 << 3),
240 IB_DEVICE_AUTO_PATH_MIG = (1 << 4),
241 IB_DEVICE_CHANGE_PHY_PORT = (1 << 5),
242 IB_DEVICE_UD_AV_PORT_ENFORCE = (1 << 6),
243 IB_DEVICE_CURR_QP_STATE_MOD = (1 << 7),
244 IB_DEVICE_SHUTDOWN_PORT = (1 << 8),
245
246 IB_DEVICE_PORT_ACTIVE_EVENT = (1 << 10),
247 IB_DEVICE_SYS_IMAGE_GUID = (1 << 11),
248 IB_DEVICE_RC_RNR_NAK_GEN = (1 << 12),
249 IB_DEVICE_SRQ_RESIZE = (1 << 13),
250 IB_DEVICE_N_NOTIFY_CQ = (1 << 14),
251
252
253
254
255
256
257
258
259 IB_DEVICE_LOCAL_DMA_LKEY = (1 << 15),
260
261 IB_DEVICE_MEM_WINDOW = (1 << 17),
262
263
264
265
266
267
268
269 IB_DEVICE_UD_IP_CSUM = (1 << 18),
270 IB_DEVICE_UD_TSO = (1 << 19),
271 IB_DEVICE_XRC = (1 << 20),
272
273
274
275
276
277
278
279
280
281
282 IB_DEVICE_MEM_MGT_EXTENSIONS = (1 << 21),
283 IB_DEVICE_BLOCK_MULTICAST_LOOPBACK = (1 << 22),
284 IB_DEVICE_MEM_WINDOW_TYPE_2A = (1 << 23),
285 IB_DEVICE_MEM_WINDOW_TYPE_2B = (1 << 24),
286 IB_DEVICE_RC_IP_CSUM = (1 << 25),
287
288 IB_DEVICE_RAW_IP_CSUM = (1 << 26),
289
290
291
292
293
294
295 IB_DEVICE_CROSS_CHANNEL = (1 << 27),
296 IB_DEVICE_MANAGED_FLOW_STEERING = (1 << 29),
297 IB_DEVICE_INTEGRITY_HANDOVER = (1 << 30),
298 IB_DEVICE_ON_DEMAND_PAGING = (1ULL << 31),
299 IB_DEVICE_SG_GAPS_REG = (1ULL << 32),
300 IB_DEVICE_VIRTUAL_FUNCTION = (1ULL << 33),
301
302 IB_DEVICE_RAW_SCATTER_FCS = (1ULL << 34),
303 IB_DEVICE_RDMA_NETDEV_OPA_VNIC = (1ULL << 35),
304
305 IB_DEVICE_PCI_WRITE_END_PADDING = (1ULL << 36),
306 IB_DEVICE_ALLOW_USER_UNREG = (1ULL << 37),
307};
308
309enum ib_atomic_cap {
310 IB_ATOMIC_NONE,
311 IB_ATOMIC_HCA,
312 IB_ATOMIC_GLOB
313};
314
315enum ib_odp_general_cap_bits {
316 IB_ODP_SUPPORT = 1 << 0,
317 IB_ODP_SUPPORT_IMPLICIT = 1 << 1,
318};
319
320enum ib_odp_transport_cap_bits {
321 IB_ODP_SUPPORT_SEND = 1 << 0,
322 IB_ODP_SUPPORT_RECV = 1 << 1,
323 IB_ODP_SUPPORT_WRITE = 1 << 2,
324 IB_ODP_SUPPORT_READ = 1 << 3,
325 IB_ODP_SUPPORT_ATOMIC = 1 << 4,
326 IB_ODP_SUPPORT_SRQ_RECV = 1 << 5,
327};
328
329struct ib_odp_caps {
330 uint64_t general_caps;
331 struct {
332 uint32_t rc_odp_caps;
333 uint32_t uc_odp_caps;
334 uint32_t ud_odp_caps;
335 uint32_t xrc_odp_caps;
336 } per_transport_caps;
337};
338
339struct ib_rss_caps {
340
341
342
343
344 u32 supported_qpts;
345 u32 max_rwq_indirection_tables;
346 u32 max_rwq_indirection_table_size;
347};
348
349enum ib_tm_cap_flags {
350
351 IB_TM_CAP_RNDV_RC = 1 << 0,
352};
353
354struct ib_tm_caps {
355
356 u32 max_rndv_hdr_size;
357
358 u32 max_num_tags;
359
360 u32 flags;
361
362 u32 max_ops;
363
364 u32 max_sge;
365};
366
367struct ib_cq_init_attr {
368 unsigned int cqe;
369 u32 comp_vector;
370 u32 flags;
371};
372
373enum ib_cq_attr_mask {
374 IB_CQ_MODERATE = 1 << 0,
375};
376
377struct ib_cq_caps {
378 u16 max_cq_moderation_count;
379 u16 max_cq_moderation_period;
380};
381
382struct ib_dm_mr_attr {
383 u64 length;
384 u64 offset;
385 u32 access_flags;
386};
387
388struct ib_dm_alloc_attr {
389 u64 length;
390 u32 alignment;
391 u32 flags;
392};
393
394struct ib_device_attr {
395 u64 fw_ver;
396 __be64 sys_image_guid;
397 u64 max_mr_size;
398 u64 page_size_cap;
399 u32 vendor_id;
400 u32 vendor_part_id;
401 u32 hw_ver;
402 int max_qp;
403 int max_qp_wr;
404 u64 device_cap_flags;
405 int max_send_sge;
406 int max_recv_sge;
407 int max_sge_rd;
408 int max_cq;
409 int max_cqe;
410 int max_mr;
411 int max_pd;
412 int max_qp_rd_atom;
413 int max_ee_rd_atom;
414 int max_res_rd_atom;
415 int max_qp_init_rd_atom;
416 int max_ee_init_rd_atom;
417 enum ib_atomic_cap atomic_cap;
418 enum ib_atomic_cap masked_atomic_cap;
419 int max_ee;
420 int max_rdd;
421 int max_mw;
422 int max_raw_ipv6_qp;
423 int max_raw_ethy_qp;
424 int max_mcast_grp;
425 int max_mcast_qp_attach;
426 int max_total_mcast_qp_attach;
427 int max_ah;
428 int max_fmr;
429 int max_map_per_fmr;
430 int max_srq;
431 int max_srq_wr;
432 int max_srq_sge;
433 unsigned int max_fast_reg_page_list_len;
434 unsigned int max_pi_fast_reg_page_list_len;
435 u16 max_pkeys;
436 u8 local_ca_ack_delay;
437 int sig_prot_cap;
438 int sig_guard_cap;
439 struct ib_odp_caps odp_caps;
440 uint64_t timestamp_mask;
441 uint64_t hca_core_clock;
442 struct ib_rss_caps rss_caps;
443 u32 max_wq_type_rq;
444 u32 raw_packet_caps;
445 struct ib_tm_caps tm_caps;
446 struct ib_cq_caps cq_caps;
447 u64 max_dm_size;
448};
449
450enum ib_mtu {
451 IB_MTU_256 = 1,
452 IB_MTU_512 = 2,
453 IB_MTU_1024 = 3,
454 IB_MTU_2048 = 4,
455 IB_MTU_4096 = 5
456};
457
458static inline int ib_mtu_enum_to_int(enum ib_mtu mtu)
459{
460 switch (mtu) {
461 case IB_MTU_256: return 256;
462 case IB_MTU_512: return 512;
463 case IB_MTU_1024: return 1024;
464 case IB_MTU_2048: return 2048;
465 case IB_MTU_4096: return 4096;
466 default: return -1;
467 }
468}
469
470static inline enum ib_mtu ib_mtu_int_to_enum(int mtu)
471{
472 if (mtu >= 4096)
473 return IB_MTU_4096;
474 else if (mtu >= 2048)
475 return IB_MTU_2048;
476 else if (mtu >= 1024)
477 return IB_MTU_1024;
478 else if (mtu >= 512)
479 return IB_MTU_512;
480 else
481 return IB_MTU_256;
482}
483
484enum ib_port_state {
485 IB_PORT_NOP = 0,
486 IB_PORT_DOWN = 1,
487 IB_PORT_INIT = 2,
488 IB_PORT_ARMED = 3,
489 IB_PORT_ACTIVE = 4,
490 IB_PORT_ACTIVE_DEFER = 5
491};
492
493enum ib_port_phys_state {
494 IB_PORT_PHYS_STATE_SLEEP = 1,
495 IB_PORT_PHYS_STATE_POLLING = 2,
496 IB_PORT_PHYS_STATE_DISABLED = 3,
497 IB_PORT_PHYS_STATE_PORT_CONFIGURATION_TRAINING = 4,
498 IB_PORT_PHYS_STATE_LINK_UP = 5,
499 IB_PORT_PHYS_STATE_LINK_ERROR_RECOVERY = 6,
500 IB_PORT_PHYS_STATE_PHY_TEST = 7,
501};
502
503enum ib_port_width {
504 IB_WIDTH_1X = 1,
505 IB_WIDTH_2X = 16,
506 IB_WIDTH_4X = 2,
507 IB_WIDTH_8X = 4,
508 IB_WIDTH_12X = 8
509};
510
511static inline int ib_width_enum_to_int(enum ib_port_width width)
512{
513 switch (width) {
514 case IB_WIDTH_1X: return 1;
515 case IB_WIDTH_2X: return 2;
516 case IB_WIDTH_4X: return 4;
517 case IB_WIDTH_8X: return 8;
518 case IB_WIDTH_12X: return 12;
519 default: return -1;
520 }
521}
522
523enum ib_port_speed {
524 IB_SPEED_SDR = 1,
525 IB_SPEED_DDR = 2,
526 IB_SPEED_QDR = 4,
527 IB_SPEED_FDR10 = 8,
528 IB_SPEED_FDR = 16,
529 IB_SPEED_EDR = 32,
530 IB_SPEED_HDR = 64
531};
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552struct rdma_hw_stats {
553 struct mutex lock;
554 unsigned long timestamp;
555 unsigned long lifespan;
556 const char * const *names;
557 int num_counters;
558 u64 value[];
559};
560
561#define RDMA_HW_STATS_DEFAULT_LIFESPAN 10
562
563
564
565
566
567
568
569static inline struct rdma_hw_stats *rdma_alloc_hw_stats_struct(
570 const char * const *names, int num_counters,
571 unsigned long lifespan)
572{
573 struct rdma_hw_stats *stats;
574
575 stats = kzalloc(sizeof(*stats) + num_counters * sizeof(u64),
576 GFP_KERNEL);
577 if (!stats)
578 return NULL;
579 stats->names = names;
580 stats->num_counters = num_counters;
581 stats->lifespan = msecs_to_jiffies(lifespan);
582
583 return stats;
584}
585
586
587
588
589
590
591#define RDMA_CORE_CAP_IB_MAD 0x00000001
592#define RDMA_CORE_CAP_IB_SMI 0x00000002
593#define RDMA_CORE_CAP_IB_CM 0x00000004
594#define RDMA_CORE_CAP_IW_CM 0x00000008
595#define RDMA_CORE_CAP_IB_SA 0x00000010
596#define RDMA_CORE_CAP_OPA_MAD 0x00000020
597
598
599#define RDMA_CORE_CAP_AF_IB 0x00001000
600#define RDMA_CORE_CAP_ETH_AH 0x00002000
601#define RDMA_CORE_CAP_OPA_AH 0x00004000
602#define RDMA_CORE_CAP_IB_GRH_REQUIRED 0x00008000
603
604
605#define RDMA_CORE_CAP_PROT_IB 0x00100000
606#define RDMA_CORE_CAP_PROT_ROCE 0x00200000
607#define RDMA_CORE_CAP_PROT_IWARP 0x00400000
608#define RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP 0x00800000
609#define RDMA_CORE_CAP_PROT_RAW_PACKET 0x01000000
610#define RDMA_CORE_CAP_PROT_USNIC 0x02000000
611
612#define RDMA_CORE_PORT_IB_GRH_REQUIRED (RDMA_CORE_CAP_IB_GRH_REQUIRED \
613 | RDMA_CORE_CAP_PROT_ROCE \
614 | RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP)
615
616#define RDMA_CORE_PORT_IBA_IB (RDMA_CORE_CAP_PROT_IB \
617 | RDMA_CORE_CAP_IB_MAD \
618 | RDMA_CORE_CAP_IB_SMI \
619 | RDMA_CORE_CAP_IB_CM \
620 | RDMA_CORE_CAP_IB_SA \
621 | RDMA_CORE_CAP_AF_IB)
622#define RDMA_CORE_PORT_IBA_ROCE (RDMA_CORE_CAP_PROT_ROCE \
623 | RDMA_CORE_CAP_IB_MAD \
624 | RDMA_CORE_CAP_IB_CM \
625 | RDMA_CORE_CAP_AF_IB \
626 | RDMA_CORE_CAP_ETH_AH)
627#define RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP \
628 (RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP \
629 | RDMA_CORE_CAP_IB_MAD \
630 | RDMA_CORE_CAP_IB_CM \
631 | RDMA_CORE_CAP_AF_IB \
632 | RDMA_CORE_CAP_ETH_AH)
633#define RDMA_CORE_PORT_IWARP (RDMA_CORE_CAP_PROT_IWARP \
634 | RDMA_CORE_CAP_IW_CM)
635#define RDMA_CORE_PORT_INTEL_OPA (RDMA_CORE_PORT_IBA_IB \
636 | RDMA_CORE_CAP_OPA_MAD)
637
638#define RDMA_CORE_PORT_RAW_PACKET (RDMA_CORE_CAP_PROT_RAW_PACKET)
639
640#define RDMA_CORE_PORT_USNIC (RDMA_CORE_CAP_PROT_USNIC)
641
642struct ib_port_attr {
643 u64 subnet_prefix;
644 enum ib_port_state state;
645 enum ib_mtu max_mtu;
646 enum ib_mtu active_mtu;
647 int gid_tbl_len;
648 unsigned int ip_gids:1;
649
650 u32 port_cap_flags;
651 u32 max_msg_sz;
652 u32 bad_pkey_cntr;
653 u32 qkey_viol_cntr;
654 u16 pkey_tbl_len;
655 u32 sm_lid;
656 u32 lid;
657 u8 lmc;
658 u8 max_vl_num;
659 u8 sm_sl;
660 u8 subnet_timeout;
661 u8 init_type_reply;
662 u8 active_width;
663 u8 active_speed;
664 u8 phys_state;
665 u16 port_cap_flags2;
666};
667
668enum ib_device_modify_flags {
669 IB_DEVICE_MODIFY_SYS_IMAGE_GUID = 1 << 0,
670 IB_DEVICE_MODIFY_NODE_DESC = 1 << 1
671};
672
673#define IB_DEVICE_NODE_DESC_MAX 64
674
675struct ib_device_modify {
676 u64 sys_image_guid;
677 char node_desc[IB_DEVICE_NODE_DESC_MAX];
678};
679
680enum ib_port_modify_flags {
681 IB_PORT_SHUTDOWN = 1,
682 IB_PORT_INIT_TYPE = (1<<2),
683 IB_PORT_RESET_QKEY_CNTR = (1<<3),
684 IB_PORT_OPA_MASK_CHG = (1<<4)
685};
686
687struct ib_port_modify {
688 u32 set_port_cap_mask;
689 u32 clr_port_cap_mask;
690 u8 init_type;
691};
692
693enum ib_event_type {
694 IB_EVENT_CQ_ERR,
695 IB_EVENT_QP_FATAL,
696 IB_EVENT_QP_REQ_ERR,
697 IB_EVENT_QP_ACCESS_ERR,
698 IB_EVENT_COMM_EST,
699 IB_EVENT_SQ_DRAINED,
700 IB_EVENT_PATH_MIG,
701 IB_EVENT_PATH_MIG_ERR,
702 IB_EVENT_DEVICE_FATAL,
703 IB_EVENT_PORT_ACTIVE,
704 IB_EVENT_PORT_ERR,
705 IB_EVENT_LID_CHANGE,
706 IB_EVENT_PKEY_CHANGE,
707 IB_EVENT_SM_CHANGE,
708 IB_EVENT_SRQ_ERR,
709 IB_EVENT_SRQ_LIMIT_REACHED,
710 IB_EVENT_QP_LAST_WQE_REACHED,
711 IB_EVENT_CLIENT_REREGISTER,
712 IB_EVENT_GID_CHANGE,
713 IB_EVENT_WQ_FATAL,
714};
715
716const char *__attribute_const__ ib_event_msg(enum ib_event_type event);
717
718struct ib_event {
719 struct ib_device *device;
720 union {
721 struct ib_cq *cq;
722 struct ib_qp *qp;
723 struct ib_srq *srq;
724 struct ib_wq *wq;
725 u8 port_num;
726 } element;
727 enum ib_event_type event;
728};
729
730struct ib_event_handler {
731 struct ib_device *device;
732 void (*handler)(struct ib_event_handler *, struct ib_event *);
733 struct list_head list;
734};
735
736#define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler) \
737 do { \
738 (_ptr)->device = _device; \
739 (_ptr)->handler = _handler; \
740 INIT_LIST_HEAD(&(_ptr)->list); \
741 } while (0)
742
743struct ib_global_route {
744 const struct ib_gid_attr *sgid_attr;
745 union ib_gid dgid;
746 u32 flow_label;
747 u8 sgid_index;
748 u8 hop_limit;
749 u8 traffic_class;
750};
751
752struct ib_grh {
753 __be32 version_tclass_flow;
754 __be16 paylen;
755 u8 next_hdr;
756 u8 hop_limit;
757 union ib_gid sgid;
758 union ib_gid dgid;
759};
760
761union rdma_network_hdr {
762 struct ib_grh ibgrh;
763 struct {
764
765
766
767 u8 reserved[20];
768 struct iphdr roce4grh;
769 };
770};
771
772#define IB_QPN_MASK 0xFFFFFF
773
774enum {
775 IB_MULTICAST_QPN = 0xffffff
776};
777
778#define IB_LID_PERMISSIVE cpu_to_be16(0xFFFF)
779#define IB_MULTICAST_LID_BASE cpu_to_be16(0xC000)
780
781enum ib_ah_flags {
782 IB_AH_GRH = 1
783};
784
785enum ib_rate {
786 IB_RATE_PORT_CURRENT = 0,
787 IB_RATE_2_5_GBPS = 2,
788 IB_RATE_5_GBPS = 5,
789 IB_RATE_10_GBPS = 3,
790 IB_RATE_20_GBPS = 6,
791 IB_RATE_30_GBPS = 4,
792 IB_RATE_40_GBPS = 7,
793 IB_RATE_60_GBPS = 8,
794 IB_RATE_80_GBPS = 9,
795 IB_RATE_120_GBPS = 10,
796 IB_RATE_14_GBPS = 11,
797 IB_RATE_56_GBPS = 12,
798 IB_RATE_112_GBPS = 13,
799 IB_RATE_168_GBPS = 14,
800 IB_RATE_25_GBPS = 15,
801 IB_RATE_100_GBPS = 16,
802 IB_RATE_200_GBPS = 17,
803 IB_RATE_300_GBPS = 18,
804 IB_RATE_28_GBPS = 19,
805 IB_RATE_50_GBPS = 20,
806 IB_RATE_400_GBPS = 21,
807 IB_RATE_600_GBPS = 22,
808};
809
810
811
812
813
814
815
816__attribute_const__ int ib_rate_to_mult(enum ib_rate rate);
817
818
819
820
821
822
823__attribute_const__ int ib_rate_to_mbps(enum ib_rate rate);
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843enum ib_mr_type {
844 IB_MR_TYPE_MEM_REG,
845 IB_MR_TYPE_SG_GAPS,
846 IB_MR_TYPE_DM,
847 IB_MR_TYPE_USER,
848 IB_MR_TYPE_DMA,
849 IB_MR_TYPE_INTEGRITY,
850};
851
852enum ib_mr_status_check {
853 IB_MR_CHECK_SIG_STATUS = 1,
854};
855
856
857
858
859
860
861
862
863
864struct ib_mr_status {
865 u32 fail_status;
866 struct ib_sig_err sig_err;
867};
868
869
870
871
872
873
874__attribute_const__ enum ib_rate mult_to_ib_rate(int mult);
875
876enum rdma_ah_attr_type {
877 RDMA_AH_ATTR_TYPE_UNDEFINED,
878 RDMA_AH_ATTR_TYPE_IB,
879 RDMA_AH_ATTR_TYPE_ROCE,
880 RDMA_AH_ATTR_TYPE_OPA,
881};
882
883struct ib_ah_attr {
884 u16 dlid;
885 u8 src_path_bits;
886};
887
888struct roce_ah_attr {
889 u8 dmac[ETH_ALEN];
890};
891
892struct opa_ah_attr {
893 u32 dlid;
894 u8 src_path_bits;
895 bool make_grd;
896};
897
898struct rdma_ah_attr {
899 struct ib_global_route grh;
900 u8 sl;
901 u8 static_rate;
902 u8 port_num;
903 u8 ah_flags;
904 enum rdma_ah_attr_type type;
905 union {
906 struct ib_ah_attr ib;
907 struct roce_ah_attr roce;
908 struct opa_ah_attr opa;
909 };
910};
911
912enum ib_wc_status {
913 IB_WC_SUCCESS,
914 IB_WC_LOC_LEN_ERR,
915 IB_WC_LOC_QP_OP_ERR,
916 IB_WC_LOC_EEC_OP_ERR,
917 IB_WC_LOC_PROT_ERR,
918 IB_WC_WR_FLUSH_ERR,
919 IB_WC_MW_BIND_ERR,
920 IB_WC_BAD_RESP_ERR,
921 IB_WC_LOC_ACCESS_ERR,
922 IB_WC_REM_INV_REQ_ERR,
923 IB_WC_REM_ACCESS_ERR,
924 IB_WC_REM_OP_ERR,
925 IB_WC_RETRY_EXC_ERR,
926 IB_WC_RNR_RETRY_EXC_ERR,
927 IB_WC_LOC_RDD_VIOL_ERR,
928 IB_WC_REM_INV_RD_REQ_ERR,
929 IB_WC_REM_ABORT_ERR,
930 IB_WC_INV_EECN_ERR,
931 IB_WC_INV_EEC_STATE_ERR,
932 IB_WC_FATAL_ERR,
933 IB_WC_RESP_TIMEOUT_ERR,
934 IB_WC_GENERAL_ERR
935};
936
937const char *__attribute_const__ ib_wc_status_msg(enum ib_wc_status status);
938
939enum ib_wc_opcode {
940 IB_WC_SEND,
941 IB_WC_RDMA_WRITE,
942 IB_WC_RDMA_READ,
943 IB_WC_COMP_SWAP,
944 IB_WC_FETCH_ADD,
945 IB_WC_LSO,
946 IB_WC_LOCAL_INV,
947 IB_WC_REG_MR,
948 IB_WC_MASKED_COMP_SWAP,
949 IB_WC_MASKED_FETCH_ADD,
950
951
952
953
954 IB_WC_RECV = 1 << 7,
955 IB_WC_RECV_RDMA_WITH_IMM
956};
957
958enum ib_wc_flags {
959 IB_WC_GRH = 1,
960 IB_WC_WITH_IMM = (1<<1),
961 IB_WC_WITH_INVALIDATE = (1<<2),
962 IB_WC_IP_CSUM_OK = (1<<3),
963 IB_WC_WITH_SMAC = (1<<4),
964 IB_WC_WITH_VLAN = (1<<5),
965 IB_WC_WITH_NETWORK_HDR_TYPE = (1<<6),
966};
967
968struct ib_wc {
969 union {
970 u64 wr_id;
971 struct ib_cqe *wr_cqe;
972 };
973 enum ib_wc_status status;
974 enum ib_wc_opcode opcode;
975 u32 vendor_err;
976 u32 byte_len;
977 struct ib_qp *qp;
978 union {
979 __be32 imm_data;
980 u32 invalidate_rkey;
981 } ex;
982 u32 src_qp;
983 u32 slid;
984 int wc_flags;
985 u16 pkey_index;
986 u8 sl;
987 u8 dlid_path_bits;
988 u8 port_num;
989 u8 smac[ETH_ALEN];
990 u16 vlan_id;
991 u8 network_hdr_type;
992};
993
994enum ib_cq_notify_flags {
995 IB_CQ_SOLICITED = 1 << 0,
996 IB_CQ_NEXT_COMP = 1 << 1,
997 IB_CQ_SOLICITED_MASK = IB_CQ_SOLICITED | IB_CQ_NEXT_COMP,
998 IB_CQ_REPORT_MISSED_EVENTS = 1 << 2,
999};
1000
1001enum ib_srq_type {
1002 IB_SRQT_BASIC,
1003 IB_SRQT_XRC,
1004 IB_SRQT_TM,
1005};
1006
1007static inline bool ib_srq_has_cq(enum ib_srq_type srq_type)
1008{
1009 return srq_type == IB_SRQT_XRC ||
1010 srq_type == IB_SRQT_TM;
1011}
1012
1013enum ib_srq_attr_mask {
1014 IB_SRQ_MAX_WR = 1 << 0,
1015 IB_SRQ_LIMIT = 1 << 1,
1016};
1017
1018struct ib_srq_attr {
1019 u32 max_wr;
1020 u32 max_sge;
1021 u32 srq_limit;
1022};
1023
1024struct ib_srq_init_attr {
1025 void (*event_handler)(struct ib_event *, void *);
1026 void *srq_context;
1027 struct ib_srq_attr attr;
1028 enum ib_srq_type srq_type;
1029
1030 struct {
1031 struct ib_cq *cq;
1032 union {
1033 struct {
1034 struct ib_xrcd *xrcd;
1035 } xrc;
1036
1037 struct {
1038 u32 max_num_tags;
1039 } tag_matching;
1040 };
1041 } ext;
1042};
1043
1044struct ib_qp_cap {
1045 u32 max_send_wr;
1046 u32 max_recv_wr;
1047 u32 max_send_sge;
1048 u32 max_recv_sge;
1049 u32 max_inline_data;
1050
1051
1052
1053
1054
1055
1056 u32 max_rdma_ctxs;
1057};
1058
1059enum ib_sig_type {
1060 IB_SIGNAL_ALL_WR,
1061 IB_SIGNAL_REQ_WR
1062};
1063
1064enum ib_qp_type {
1065
1066
1067
1068
1069
1070 IB_QPT_SMI,
1071 IB_QPT_GSI,
1072
1073 IB_QPT_RC,
1074 IB_QPT_UC,
1075 IB_QPT_UD,
1076 IB_QPT_RAW_IPV6,
1077 IB_QPT_RAW_ETHERTYPE,
1078 IB_QPT_RAW_PACKET = 8,
1079 IB_QPT_XRC_INI = 9,
1080 IB_QPT_XRC_TGT,
1081 IB_QPT_MAX,
1082 IB_QPT_DRIVER = 0xFF,
1083
1084
1085
1086
1087 IB_QPT_RESERVED1 = 0x1000,
1088 IB_QPT_RESERVED2,
1089 IB_QPT_RESERVED3,
1090 IB_QPT_RESERVED4,
1091 IB_QPT_RESERVED5,
1092 IB_QPT_RESERVED6,
1093 IB_QPT_RESERVED7,
1094 IB_QPT_RESERVED8,
1095 IB_QPT_RESERVED9,
1096 IB_QPT_RESERVED10,
1097};
1098
1099enum ib_qp_create_flags {
1100 IB_QP_CREATE_IPOIB_UD_LSO = 1 << 0,
1101 IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK = 1 << 1,
1102 IB_QP_CREATE_CROSS_CHANNEL = 1 << 2,
1103 IB_QP_CREATE_MANAGED_SEND = 1 << 3,
1104 IB_QP_CREATE_MANAGED_RECV = 1 << 4,
1105 IB_QP_CREATE_NETIF_QP = 1 << 5,
1106 IB_QP_CREATE_INTEGRITY_EN = 1 << 6,
1107
1108 IB_QP_CREATE_SCATTER_FCS = 1 << 8,
1109 IB_QP_CREATE_CVLAN_STRIPPING = 1 << 9,
1110 IB_QP_CREATE_SOURCE_QPN = 1 << 10,
1111 IB_QP_CREATE_PCI_WRITE_END_PADDING = 1 << 11,
1112
1113 IB_QP_CREATE_RESERVED_START = 1 << 26,
1114 IB_QP_CREATE_RESERVED_END = 1 << 31,
1115};
1116
1117
1118
1119
1120
1121
1122struct ib_qp_init_attr {
1123
1124 void (*event_handler)(struct ib_event *, void *);
1125
1126 void *qp_context;
1127 struct ib_cq *send_cq;
1128 struct ib_cq *recv_cq;
1129 struct ib_srq *srq;
1130 struct ib_xrcd *xrcd;
1131 struct ib_qp_cap cap;
1132 enum ib_sig_type sq_sig_type;
1133 enum ib_qp_type qp_type;
1134 u32 create_flags;
1135
1136
1137
1138
1139 u8 port_num;
1140 struct ib_rwq_ind_table *rwq_ind_tbl;
1141 u32 source_qpn;
1142};
1143
1144struct ib_qp_open_attr {
1145 void (*event_handler)(struct ib_event *, void *);
1146 void *qp_context;
1147 u32 qp_num;
1148 enum ib_qp_type qp_type;
1149};
1150
1151enum ib_rnr_timeout {
1152 IB_RNR_TIMER_655_36 = 0,
1153 IB_RNR_TIMER_000_01 = 1,
1154 IB_RNR_TIMER_000_02 = 2,
1155 IB_RNR_TIMER_000_03 = 3,
1156 IB_RNR_TIMER_000_04 = 4,
1157 IB_RNR_TIMER_000_06 = 5,
1158 IB_RNR_TIMER_000_08 = 6,
1159 IB_RNR_TIMER_000_12 = 7,
1160 IB_RNR_TIMER_000_16 = 8,
1161 IB_RNR_TIMER_000_24 = 9,
1162 IB_RNR_TIMER_000_32 = 10,
1163 IB_RNR_TIMER_000_48 = 11,
1164 IB_RNR_TIMER_000_64 = 12,
1165 IB_RNR_TIMER_000_96 = 13,
1166 IB_RNR_TIMER_001_28 = 14,
1167 IB_RNR_TIMER_001_92 = 15,
1168 IB_RNR_TIMER_002_56 = 16,
1169 IB_RNR_TIMER_003_84 = 17,
1170 IB_RNR_TIMER_005_12 = 18,
1171 IB_RNR_TIMER_007_68 = 19,
1172 IB_RNR_TIMER_010_24 = 20,
1173 IB_RNR_TIMER_015_36 = 21,
1174 IB_RNR_TIMER_020_48 = 22,
1175 IB_RNR_TIMER_030_72 = 23,
1176 IB_RNR_TIMER_040_96 = 24,
1177 IB_RNR_TIMER_061_44 = 25,
1178 IB_RNR_TIMER_081_92 = 26,
1179 IB_RNR_TIMER_122_88 = 27,
1180 IB_RNR_TIMER_163_84 = 28,
1181 IB_RNR_TIMER_245_76 = 29,
1182 IB_RNR_TIMER_327_68 = 30,
1183 IB_RNR_TIMER_491_52 = 31
1184};
1185
1186enum ib_qp_attr_mask {
1187 IB_QP_STATE = 1,
1188 IB_QP_CUR_STATE = (1<<1),
1189 IB_QP_EN_SQD_ASYNC_NOTIFY = (1<<2),
1190 IB_QP_ACCESS_FLAGS = (1<<3),
1191 IB_QP_PKEY_INDEX = (1<<4),
1192 IB_QP_PORT = (1<<5),
1193 IB_QP_QKEY = (1<<6),
1194 IB_QP_AV = (1<<7),
1195 IB_QP_PATH_MTU = (1<<8),
1196 IB_QP_TIMEOUT = (1<<9),
1197 IB_QP_RETRY_CNT = (1<<10),
1198 IB_QP_RNR_RETRY = (1<<11),
1199 IB_QP_RQ_PSN = (1<<12),
1200 IB_QP_MAX_QP_RD_ATOMIC = (1<<13),
1201 IB_QP_ALT_PATH = (1<<14),
1202 IB_QP_MIN_RNR_TIMER = (1<<15),
1203 IB_QP_SQ_PSN = (1<<16),
1204 IB_QP_MAX_DEST_RD_ATOMIC = (1<<17),
1205 IB_QP_PATH_MIG_STATE = (1<<18),
1206 IB_QP_CAP = (1<<19),
1207 IB_QP_DEST_QPN = (1<<20),
1208 IB_QP_RESERVED1 = (1<<21),
1209 IB_QP_RESERVED2 = (1<<22),
1210 IB_QP_RESERVED3 = (1<<23),
1211 IB_QP_RESERVED4 = (1<<24),
1212 IB_QP_RATE_LIMIT = (1<<25),
1213};
1214
1215enum ib_qp_state {
1216 IB_QPS_RESET,
1217 IB_QPS_INIT,
1218 IB_QPS_RTR,
1219 IB_QPS_RTS,
1220 IB_QPS_SQD,
1221 IB_QPS_SQE,
1222 IB_QPS_ERR
1223};
1224
1225enum ib_mig_state {
1226 IB_MIG_MIGRATED,
1227 IB_MIG_REARM,
1228 IB_MIG_ARMED
1229};
1230
1231enum ib_mw_type {
1232 IB_MW_TYPE_1 = 1,
1233 IB_MW_TYPE_2 = 2
1234};
1235
1236struct ib_qp_attr {
1237 enum ib_qp_state qp_state;
1238 enum ib_qp_state cur_qp_state;
1239 enum ib_mtu path_mtu;
1240 enum ib_mig_state path_mig_state;
1241 u32 qkey;
1242 u32 rq_psn;
1243 u32 sq_psn;
1244 u32 dest_qp_num;
1245 int qp_access_flags;
1246 struct ib_qp_cap cap;
1247 struct rdma_ah_attr ah_attr;
1248 struct rdma_ah_attr alt_ah_attr;
1249 u16 pkey_index;
1250 u16 alt_pkey_index;
1251 u8 en_sqd_async_notify;
1252 u8 sq_draining;
1253 u8 max_rd_atomic;
1254 u8 max_dest_rd_atomic;
1255 u8 min_rnr_timer;
1256 u8 port_num;
1257 u8 timeout;
1258 u8 retry_cnt;
1259 u8 rnr_retry;
1260 u8 alt_port_num;
1261 u8 alt_timeout;
1262 u32 rate_limit;
1263};
1264
1265enum ib_wr_opcode {
1266
1267 IB_WR_RDMA_WRITE = IB_UVERBS_WR_RDMA_WRITE,
1268 IB_WR_RDMA_WRITE_WITH_IMM = IB_UVERBS_WR_RDMA_WRITE_WITH_IMM,
1269 IB_WR_SEND = IB_UVERBS_WR_SEND,
1270 IB_WR_SEND_WITH_IMM = IB_UVERBS_WR_SEND_WITH_IMM,
1271 IB_WR_RDMA_READ = IB_UVERBS_WR_RDMA_READ,
1272 IB_WR_ATOMIC_CMP_AND_SWP = IB_UVERBS_WR_ATOMIC_CMP_AND_SWP,
1273 IB_WR_ATOMIC_FETCH_AND_ADD = IB_UVERBS_WR_ATOMIC_FETCH_AND_ADD,
1274 IB_WR_LSO = IB_UVERBS_WR_TSO,
1275 IB_WR_SEND_WITH_INV = IB_UVERBS_WR_SEND_WITH_INV,
1276 IB_WR_RDMA_READ_WITH_INV = IB_UVERBS_WR_RDMA_READ_WITH_INV,
1277 IB_WR_LOCAL_INV = IB_UVERBS_WR_LOCAL_INV,
1278 IB_WR_MASKED_ATOMIC_CMP_AND_SWP =
1279 IB_UVERBS_WR_MASKED_ATOMIC_CMP_AND_SWP,
1280 IB_WR_MASKED_ATOMIC_FETCH_AND_ADD =
1281 IB_UVERBS_WR_MASKED_ATOMIC_FETCH_AND_ADD,
1282
1283
1284 IB_WR_REG_MR = 0x20,
1285 IB_WR_REG_MR_INTEGRITY,
1286
1287
1288
1289
1290 IB_WR_RESERVED1 = 0xf0,
1291 IB_WR_RESERVED2,
1292 IB_WR_RESERVED3,
1293 IB_WR_RESERVED4,
1294 IB_WR_RESERVED5,
1295 IB_WR_RESERVED6,
1296 IB_WR_RESERVED7,
1297 IB_WR_RESERVED8,
1298 IB_WR_RESERVED9,
1299 IB_WR_RESERVED10,
1300};
1301
1302enum ib_send_flags {
1303 IB_SEND_FENCE = 1,
1304 IB_SEND_SIGNALED = (1<<1),
1305 IB_SEND_SOLICITED = (1<<2),
1306 IB_SEND_INLINE = (1<<3),
1307 IB_SEND_IP_CSUM = (1<<4),
1308
1309
1310 IB_SEND_RESERVED_START = (1 << 26),
1311 IB_SEND_RESERVED_END = (1 << 31),
1312};
1313
1314struct ib_sge {
1315 u64 addr;
1316 u32 length;
1317 u32 lkey;
1318};
1319
1320struct ib_cqe {
1321 void (*done)(struct ib_cq *cq, struct ib_wc *wc);
1322};
1323
1324struct ib_send_wr {
1325 struct ib_send_wr *next;
1326 union {
1327 u64 wr_id;
1328 struct ib_cqe *wr_cqe;
1329 };
1330 struct ib_sge *sg_list;
1331 int num_sge;
1332 enum ib_wr_opcode opcode;
1333 int send_flags;
1334 union {
1335 __be32 imm_data;
1336 u32 invalidate_rkey;
1337 } ex;
1338};
1339
1340struct ib_rdma_wr {
1341 struct ib_send_wr wr;
1342 u64 remote_addr;
1343 u32 rkey;
1344};
1345
1346static inline const struct ib_rdma_wr *rdma_wr(const struct ib_send_wr *wr)
1347{
1348 return container_of(wr, struct ib_rdma_wr, wr);
1349}
1350
1351struct ib_atomic_wr {
1352 struct ib_send_wr wr;
1353 u64 remote_addr;
1354 u64 compare_add;
1355 u64 swap;
1356 u64 compare_add_mask;
1357 u64 swap_mask;
1358 u32 rkey;
1359};
1360
1361static inline const struct ib_atomic_wr *atomic_wr(const struct ib_send_wr *wr)
1362{
1363 return container_of(wr, struct ib_atomic_wr, wr);
1364}
1365
1366struct ib_ud_wr {
1367 struct ib_send_wr wr;
1368 struct ib_ah *ah;
1369 void *header;
1370 int hlen;
1371 int mss;
1372 u32 remote_qpn;
1373 u32 remote_qkey;
1374 u16 pkey_index;
1375 u8 port_num;
1376};
1377
1378static inline const struct ib_ud_wr *ud_wr(const struct ib_send_wr *wr)
1379{
1380 return container_of(wr, struct ib_ud_wr, wr);
1381}
1382
1383struct ib_reg_wr {
1384 struct ib_send_wr wr;
1385 struct ib_mr *mr;
1386 u32 key;
1387 int access;
1388};
1389
1390static inline const struct ib_reg_wr *reg_wr(const struct ib_send_wr *wr)
1391{
1392 return container_of(wr, struct ib_reg_wr, wr);
1393}
1394
1395struct ib_recv_wr {
1396 struct ib_recv_wr *next;
1397 union {
1398 u64 wr_id;
1399 struct ib_cqe *wr_cqe;
1400 };
1401 struct ib_sge *sg_list;
1402 int num_sge;
1403};
1404
1405enum ib_access_flags {
1406 IB_ACCESS_LOCAL_WRITE = IB_UVERBS_ACCESS_LOCAL_WRITE,
1407 IB_ACCESS_REMOTE_WRITE = IB_UVERBS_ACCESS_REMOTE_WRITE,
1408 IB_ACCESS_REMOTE_READ = IB_UVERBS_ACCESS_REMOTE_READ,
1409 IB_ACCESS_REMOTE_ATOMIC = IB_UVERBS_ACCESS_REMOTE_ATOMIC,
1410 IB_ACCESS_MW_BIND = IB_UVERBS_ACCESS_MW_BIND,
1411 IB_ZERO_BASED = IB_UVERBS_ACCESS_ZERO_BASED,
1412 IB_ACCESS_ON_DEMAND = IB_UVERBS_ACCESS_ON_DEMAND,
1413 IB_ACCESS_HUGETLB = IB_UVERBS_ACCESS_HUGETLB,
1414
1415 IB_ACCESS_SUPPORTED = ((IB_ACCESS_HUGETLB << 1) - 1)
1416};
1417
1418
1419
1420
1421
1422enum ib_mr_rereg_flags {
1423 IB_MR_REREG_TRANS = 1,
1424 IB_MR_REREG_PD = (1<<1),
1425 IB_MR_REREG_ACCESS = (1<<2),
1426 IB_MR_REREG_SUPPORTED = ((IB_MR_REREG_ACCESS << 1) - 1)
1427};
1428
1429struct ib_fmr_attr {
1430 int max_pages;
1431 int max_maps;
1432 u8 page_shift;
1433};
1434
1435struct ib_umem;
1436
1437enum rdma_remove_reason {
1438
1439
1440
1441
1442 RDMA_REMOVE_DESTROY,
1443
1444 RDMA_REMOVE_CLOSE,
1445
1446 RDMA_REMOVE_DRIVER_REMOVE,
1447
1448 RDMA_REMOVE_ABORT,
1449};
1450
1451struct ib_rdmacg_object {
1452#ifdef CONFIG_CGROUP_RDMA
1453 struct rdma_cgroup *cg;
1454#endif
1455};
1456
1457struct ib_ucontext {
1458 struct ib_device *device;
1459 struct ib_uverbs_file *ufile;
1460
1461
1462
1463
1464
1465 bool closing;
1466
1467 bool cleanup_retryable;
1468
1469 struct ib_rdmacg_object cg_obj;
1470
1471
1472
1473 struct rdma_restrack_entry res;
1474};
1475
1476struct ib_uobject {
1477 u64 user_handle;
1478
1479 struct ib_uverbs_file *ufile;
1480
1481 struct ib_ucontext *context;
1482 void *object;
1483 struct list_head list;
1484 struct ib_rdmacg_object cg_obj;
1485 int id;
1486 struct kref ref;
1487 atomic_t usecnt;
1488 struct rcu_head rcu;
1489
1490 const struct uverbs_api_object *uapi_object;
1491};
1492
1493struct ib_udata {
1494 const void __user *inbuf;
1495 void __user *outbuf;
1496 size_t inlen;
1497 size_t outlen;
1498};
1499
1500struct ib_pd {
1501 u32 local_dma_lkey;
1502 u32 flags;
1503 struct ib_device *device;
1504 struct ib_uobject *uobject;
1505 atomic_t usecnt;
1506
1507 u32 unsafe_global_rkey;
1508
1509
1510
1511
1512 struct ib_mr *__internal_mr;
1513 struct rdma_restrack_entry res;
1514};
1515
1516struct ib_xrcd {
1517 struct ib_device *device;
1518 atomic_t usecnt;
1519 struct inode *inode;
1520
1521 struct mutex tgt_qp_mutex;
1522 struct list_head tgt_qp_list;
1523};
1524
1525struct ib_ah {
1526 struct ib_device *device;
1527 struct ib_pd *pd;
1528 struct ib_uobject *uobject;
1529 const struct ib_gid_attr *sgid_attr;
1530 enum rdma_ah_attr_type type;
1531};
1532
1533typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context);
1534
1535enum ib_poll_context {
1536 IB_POLL_DIRECT,
1537 IB_POLL_SOFTIRQ,
1538 IB_POLL_WORKQUEUE,
1539 IB_POLL_UNBOUND_WORKQUEUE,
1540};
1541
1542struct ib_cq {
1543 struct ib_device *device;
1544 struct ib_uobject *uobject;
1545 ib_comp_handler comp_handler;
1546 void (*event_handler)(struct ib_event *, void *);
1547 void *cq_context;
1548 int cqe;
1549 atomic_t usecnt;
1550 enum ib_poll_context poll_ctx;
1551 struct ib_wc *wc;
1552 union {
1553 struct irq_poll iop;
1554 struct work_struct work;
1555 };
1556 struct workqueue_struct *comp_wq;
1557 struct dim *dim;
1558
1559
1560
1561 struct rdma_restrack_entry res;
1562};
1563
1564struct ib_srq {
1565 struct ib_device *device;
1566 struct ib_pd *pd;
1567 struct ib_uobject *uobject;
1568 void (*event_handler)(struct ib_event *, void *);
1569 void *srq_context;
1570 enum ib_srq_type srq_type;
1571 atomic_t usecnt;
1572
1573 struct {
1574 struct ib_cq *cq;
1575 union {
1576 struct {
1577 struct ib_xrcd *xrcd;
1578 u32 srq_num;
1579 } xrc;
1580 };
1581 } ext;
1582};
1583
1584enum ib_raw_packet_caps {
1585
1586
1587
1588 IB_RAW_PACKET_CAP_CVLAN_STRIPPING = (1 << 0),
1589
1590
1591 IB_RAW_PACKET_CAP_SCATTER_FCS = (1 << 1),
1592
1593 IB_RAW_PACKET_CAP_IP_CSUM = (1 << 2),
1594
1595
1596
1597 IB_RAW_PACKET_CAP_DELAY_DROP = (1 << 3),
1598};
1599
1600enum ib_wq_type {
1601 IB_WQT_RQ
1602};
1603
1604enum ib_wq_state {
1605 IB_WQS_RESET,
1606 IB_WQS_RDY,
1607 IB_WQS_ERR
1608};
1609
1610struct ib_wq {
1611 struct ib_device *device;
1612 struct ib_uobject *uobject;
1613 void *wq_context;
1614 void (*event_handler)(struct ib_event *, void *);
1615 struct ib_pd *pd;
1616 struct ib_cq *cq;
1617 u32 wq_num;
1618 enum ib_wq_state state;
1619 enum ib_wq_type wq_type;
1620 atomic_t usecnt;
1621};
1622
1623enum ib_wq_flags {
1624 IB_WQ_FLAGS_CVLAN_STRIPPING = 1 << 0,
1625 IB_WQ_FLAGS_SCATTER_FCS = 1 << 1,
1626 IB_WQ_FLAGS_DELAY_DROP = 1 << 2,
1627 IB_WQ_FLAGS_PCI_WRITE_END_PADDING = 1 << 3,
1628};
1629
1630struct ib_wq_init_attr {
1631 void *wq_context;
1632 enum ib_wq_type wq_type;
1633 u32 max_wr;
1634 u32 max_sge;
1635 struct ib_cq *cq;
1636 void (*event_handler)(struct ib_event *, void *);
1637 u32 create_flags;
1638};
1639
1640enum ib_wq_attr_mask {
1641 IB_WQ_STATE = 1 << 0,
1642 IB_WQ_CUR_STATE = 1 << 1,
1643 IB_WQ_FLAGS = 1 << 2,
1644};
1645
1646struct ib_wq_attr {
1647 enum ib_wq_state wq_state;
1648 enum ib_wq_state curr_wq_state;
1649 u32 flags;
1650 u32 flags_mask;
1651};
1652
1653struct ib_rwq_ind_table {
1654 struct ib_device *device;
1655 struct ib_uobject *uobject;
1656 atomic_t usecnt;
1657 u32 ind_tbl_num;
1658 u32 log_ind_tbl_size;
1659 struct ib_wq **ind_tbl;
1660};
1661
1662struct ib_rwq_ind_table_init_attr {
1663 u32 log_ind_tbl_size;
1664
1665 struct ib_wq **ind_tbl;
1666};
1667
1668enum port_pkey_state {
1669 IB_PORT_PKEY_NOT_VALID = 0,
1670 IB_PORT_PKEY_VALID = 1,
1671 IB_PORT_PKEY_LISTED = 2,
1672};
1673
1674struct ib_qp_security;
1675
1676struct ib_port_pkey {
1677 enum port_pkey_state state;
1678 u16 pkey_index;
1679 u8 port_num;
1680 struct list_head qp_list;
1681 struct list_head to_error_list;
1682 struct ib_qp_security *sec;
1683};
1684
1685struct ib_ports_pkeys {
1686 struct ib_port_pkey main;
1687 struct ib_port_pkey alt;
1688};
1689
1690struct ib_qp_security {
1691 struct ib_qp *qp;
1692 struct ib_device *dev;
1693
1694 struct mutex mutex;
1695 struct ib_ports_pkeys *ports_pkeys;
1696
1697
1698
1699 struct list_head shared_qp_list;
1700 void *security;
1701 bool destroying;
1702 atomic_t error_list_count;
1703 struct completion error_complete;
1704 int error_comps_pending;
1705};
1706
1707
1708
1709
1710
1711struct ib_qp {
1712 struct ib_device *device;
1713 struct ib_pd *pd;
1714 struct ib_cq *send_cq;
1715 struct ib_cq *recv_cq;
1716 spinlock_t mr_lock;
1717 int mrs_used;
1718 struct list_head rdma_mrs;
1719 struct list_head sig_mrs;
1720 struct ib_srq *srq;
1721 struct ib_xrcd *xrcd;
1722 struct list_head xrcd_list;
1723
1724
1725 atomic_t usecnt;
1726 struct list_head open_list;
1727 struct ib_qp *real_qp;
1728 struct ib_uobject *uobject;
1729 void (*event_handler)(struct ib_event *, void *);
1730 void *qp_context;
1731
1732 const struct ib_gid_attr *av_sgid_attr;
1733 const struct ib_gid_attr *alt_path_sgid_attr;
1734 u32 qp_num;
1735 u32 max_write_sge;
1736 u32 max_read_sge;
1737 enum ib_qp_type qp_type;
1738 struct ib_rwq_ind_table *rwq_ind_tbl;
1739 struct ib_qp_security *qp_sec;
1740 u8 port;
1741
1742 bool integrity_en;
1743
1744
1745
1746 struct rdma_restrack_entry res;
1747
1748
1749 struct rdma_counter *counter;
1750};
1751
1752struct ib_dm {
1753 struct ib_device *device;
1754 u32 length;
1755 u32 flags;
1756 struct ib_uobject *uobject;
1757 atomic_t usecnt;
1758};
1759
1760struct ib_mr {
1761 struct ib_device *device;
1762 struct ib_pd *pd;
1763 u32 lkey;
1764 u32 rkey;
1765 u64 iova;
1766 u64 length;
1767 unsigned int page_size;
1768 enum ib_mr_type type;
1769 bool need_inval;
1770 union {
1771 struct ib_uobject *uobject;
1772 struct list_head qp_entry;
1773 };
1774
1775 struct ib_dm *dm;
1776 struct ib_sig_attrs *sig_attrs;
1777
1778
1779
1780 struct rdma_restrack_entry res;
1781};
1782
1783struct ib_mw {
1784 struct ib_device *device;
1785 struct ib_pd *pd;
1786 struct ib_uobject *uobject;
1787 u32 rkey;
1788 enum ib_mw_type type;
1789};
1790
1791struct ib_fmr {
1792 struct ib_device *device;
1793 struct ib_pd *pd;
1794 struct list_head list;
1795 u32 lkey;
1796 u32 rkey;
1797};
1798
1799
1800enum ib_flow_attr_type {
1801
1802 IB_FLOW_ATTR_NORMAL = 0x0,
1803
1804
1805
1806 IB_FLOW_ATTR_ALL_DEFAULT = 0x1,
1807
1808
1809
1810 IB_FLOW_ATTR_MC_DEFAULT = 0x2,
1811
1812 IB_FLOW_ATTR_SNIFFER = 0x3
1813};
1814
1815
1816enum ib_flow_spec_type {
1817
1818 IB_FLOW_SPEC_ETH = 0x20,
1819 IB_FLOW_SPEC_IB = 0x22,
1820
1821 IB_FLOW_SPEC_IPV4 = 0x30,
1822 IB_FLOW_SPEC_IPV6 = 0x31,
1823 IB_FLOW_SPEC_ESP = 0x34,
1824
1825 IB_FLOW_SPEC_TCP = 0x40,
1826 IB_FLOW_SPEC_UDP = 0x41,
1827 IB_FLOW_SPEC_VXLAN_TUNNEL = 0x50,
1828 IB_FLOW_SPEC_GRE = 0x51,
1829 IB_FLOW_SPEC_MPLS = 0x60,
1830 IB_FLOW_SPEC_INNER = 0x100,
1831
1832 IB_FLOW_SPEC_ACTION_TAG = 0x1000,
1833 IB_FLOW_SPEC_ACTION_DROP = 0x1001,
1834 IB_FLOW_SPEC_ACTION_HANDLE = 0x1002,
1835 IB_FLOW_SPEC_ACTION_COUNT = 0x1003,
1836};
1837#define IB_FLOW_SPEC_LAYER_MASK 0xF0
1838#define IB_FLOW_SPEC_SUPPORT_LAYERS 10
1839
1840
1841
1842
1843enum ib_flow_domain {
1844 IB_FLOW_DOMAIN_USER,
1845 IB_FLOW_DOMAIN_ETHTOOL,
1846 IB_FLOW_DOMAIN_RFS,
1847 IB_FLOW_DOMAIN_NIC,
1848 IB_FLOW_DOMAIN_NUM
1849};
1850
1851enum ib_flow_flags {
1852 IB_FLOW_ATTR_FLAGS_DONT_TRAP = 1UL << 1,
1853 IB_FLOW_ATTR_FLAGS_EGRESS = 1UL << 2,
1854 IB_FLOW_ATTR_FLAGS_RESERVED = 1UL << 3
1855};
1856
1857struct ib_flow_eth_filter {
1858 u8 dst_mac[6];
1859 u8 src_mac[6];
1860 __be16 ether_type;
1861 __be16 vlan_tag;
1862
1863 u8 real_sz[0];
1864};
1865
1866struct ib_flow_spec_eth {
1867 u32 type;
1868 u16 size;
1869 struct ib_flow_eth_filter val;
1870 struct ib_flow_eth_filter mask;
1871};
1872
1873struct ib_flow_ib_filter {
1874 __be16 dlid;
1875 __u8 sl;
1876
1877 u8 real_sz[0];
1878};
1879
1880struct ib_flow_spec_ib {
1881 u32 type;
1882 u16 size;
1883 struct ib_flow_ib_filter val;
1884 struct ib_flow_ib_filter mask;
1885};
1886
1887
1888enum ib_ipv4_flags {
1889 IB_IPV4_DONT_FRAG = 0x2,
1890 IB_IPV4_MORE_FRAG = 0X4
1891
1892};
1893
1894struct ib_flow_ipv4_filter {
1895 __be32 src_ip;
1896 __be32 dst_ip;
1897 u8 proto;
1898 u8 tos;
1899 u8 ttl;
1900 u8 flags;
1901
1902 u8 real_sz[0];
1903};
1904
1905struct ib_flow_spec_ipv4 {
1906 u32 type;
1907 u16 size;
1908 struct ib_flow_ipv4_filter val;
1909 struct ib_flow_ipv4_filter mask;
1910};
1911
1912struct ib_flow_ipv6_filter {
1913 u8 src_ip[16];
1914 u8 dst_ip[16];
1915 __be32 flow_label;
1916 u8 next_hdr;
1917 u8 traffic_class;
1918 u8 hop_limit;
1919
1920 u8 real_sz[0];
1921};
1922
1923struct ib_flow_spec_ipv6 {
1924 u32 type;
1925 u16 size;
1926 struct ib_flow_ipv6_filter val;
1927 struct ib_flow_ipv6_filter mask;
1928};
1929
1930struct ib_flow_tcp_udp_filter {
1931 __be16 dst_port;
1932 __be16 src_port;
1933
1934 u8 real_sz[0];
1935};
1936
1937struct ib_flow_spec_tcp_udp {
1938 u32 type;
1939 u16 size;
1940 struct ib_flow_tcp_udp_filter val;
1941 struct ib_flow_tcp_udp_filter mask;
1942};
1943
1944struct ib_flow_tunnel_filter {
1945 __be32 tunnel_id;
1946 u8 real_sz[0];
1947};
1948
1949
1950
1951
1952struct ib_flow_spec_tunnel {
1953 u32 type;
1954 u16 size;
1955 struct ib_flow_tunnel_filter val;
1956 struct ib_flow_tunnel_filter mask;
1957};
1958
1959struct ib_flow_esp_filter {
1960 __be32 spi;
1961 __be32 seq;
1962
1963 u8 real_sz[0];
1964};
1965
1966struct ib_flow_spec_esp {
1967 u32 type;
1968 u16 size;
1969 struct ib_flow_esp_filter val;
1970 struct ib_flow_esp_filter mask;
1971};
1972
1973struct ib_flow_gre_filter {
1974 __be16 c_ks_res0_ver;
1975 __be16 protocol;
1976 __be32 key;
1977
1978 u8 real_sz[0];
1979};
1980
1981struct ib_flow_spec_gre {
1982 u32 type;
1983 u16 size;
1984 struct ib_flow_gre_filter val;
1985 struct ib_flow_gre_filter mask;
1986};
1987
1988struct ib_flow_mpls_filter {
1989 __be32 tag;
1990
1991 u8 real_sz[0];
1992};
1993
1994struct ib_flow_spec_mpls {
1995 u32 type;
1996 u16 size;
1997 struct ib_flow_mpls_filter val;
1998 struct ib_flow_mpls_filter mask;
1999};
2000
2001struct ib_flow_spec_action_tag {
2002 enum ib_flow_spec_type type;
2003 u16 size;
2004 u32 tag_id;
2005};
2006
2007struct ib_flow_spec_action_drop {
2008 enum ib_flow_spec_type type;
2009 u16 size;
2010};
2011
2012struct ib_flow_spec_action_handle {
2013 enum ib_flow_spec_type type;
2014 u16 size;
2015 struct ib_flow_action *act;
2016};
2017
2018enum ib_counters_description {
2019 IB_COUNTER_PACKETS,
2020 IB_COUNTER_BYTES,
2021};
2022
2023struct ib_flow_spec_action_count {
2024 enum ib_flow_spec_type type;
2025 u16 size;
2026 struct ib_counters *counters;
2027};
2028
2029union ib_flow_spec {
2030 struct {
2031 u32 type;
2032 u16 size;
2033 };
2034 struct ib_flow_spec_eth eth;
2035 struct ib_flow_spec_ib ib;
2036 struct ib_flow_spec_ipv4 ipv4;
2037 struct ib_flow_spec_tcp_udp tcp_udp;
2038 struct ib_flow_spec_ipv6 ipv6;
2039 struct ib_flow_spec_tunnel tunnel;
2040 struct ib_flow_spec_esp esp;
2041 struct ib_flow_spec_gre gre;
2042 struct ib_flow_spec_mpls mpls;
2043 struct ib_flow_spec_action_tag flow_tag;
2044 struct ib_flow_spec_action_drop drop;
2045 struct ib_flow_spec_action_handle action;
2046 struct ib_flow_spec_action_count flow_count;
2047};
2048
2049struct ib_flow_attr {
2050 enum ib_flow_attr_type type;
2051 u16 size;
2052 u16 priority;
2053 u32 flags;
2054 u8 num_of_specs;
2055 u8 port;
2056 union ib_flow_spec flows[];
2057};
2058
2059struct ib_flow {
2060 struct ib_qp *qp;
2061 struct ib_device *device;
2062 struct ib_uobject *uobject;
2063};
2064
2065enum ib_flow_action_type {
2066 IB_FLOW_ACTION_UNSPECIFIED,
2067 IB_FLOW_ACTION_ESP = 1,
2068};
2069
2070struct ib_flow_action_attrs_esp_keymats {
2071 enum ib_uverbs_flow_action_esp_keymat protocol;
2072 union {
2073 struct ib_uverbs_flow_action_esp_keymat_aes_gcm aes_gcm;
2074 } keymat;
2075};
2076
2077struct ib_flow_action_attrs_esp_replays {
2078 enum ib_uverbs_flow_action_esp_replay protocol;
2079 union {
2080 struct ib_uverbs_flow_action_esp_replay_bmp bmp;
2081 } replay;
2082};
2083
2084enum ib_flow_action_attrs_esp_flags {
2085
2086
2087
2088
2089
2090
2091 IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED = 1ULL << 32,
2092 IB_FLOW_ACTION_ESP_FLAGS_MOD_ESP_ATTRS = 1ULL << 33,
2093};
2094
2095struct ib_flow_spec_list {
2096 struct ib_flow_spec_list *next;
2097 union ib_flow_spec spec;
2098};
2099
2100struct ib_flow_action_attrs_esp {
2101 struct ib_flow_action_attrs_esp_keymats *keymat;
2102 struct ib_flow_action_attrs_esp_replays *replay;
2103 struct ib_flow_spec_list *encap;
2104
2105
2106
2107 u32 esn;
2108 u32 spi;
2109 u32 seq;
2110 u32 tfc_pad;
2111
2112 u64 flags;
2113 u64 hard_limit_pkts;
2114};
2115
2116struct ib_flow_action {
2117 struct ib_device *device;
2118 struct ib_uobject *uobject;
2119 enum ib_flow_action_type type;
2120 atomic_t usecnt;
2121};
2122
2123struct ib_mad_hdr;
2124struct ib_grh;
2125
2126enum ib_process_mad_flags {
2127 IB_MAD_IGNORE_MKEY = 1,
2128 IB_MAD_IGNORE_BKEY = 2,
2129 IB_MAD_IGNORE_ALL = IB_MAD_IGNORE_MKEY | IB_MAD_IGNORE_BKEY
2130};
2131
2132enum ib_mad_result {
2133 IB_MAD_RESULT_FAILURE = 0,
2134 IB_MAD_RESULT_SUCCESS = 1 << 0,
2135 IB_MAD_RESULT_REPLY = 1 << 1,
2136 IB_MAD_RESULT_CONSUMED = 1 << 2
2137};
2138
2139struct ib_port_cache {
2140 u64 subnet_prefix;
2141 struct ib_pkey_cache *pkey;
2142 struct ib_gid_table *gid;
2143 u8 lmc;
2144 enum ib_port_state port_state;
2145};
2146
2147struct ib_cache {
2148 rwlock_t lock;
2149 struct ib_event_handler event_handler;
2150};
2151
2152struct ib_port_immutable {
2153 int pkey_tbl_len;
2154 int gid_tbl_len;
2155 u32 core_cap_flags;
2156 u32 max_mad_size;
2157};
2158
2159struct ib_port_data {
2160 struct ib_device *ib_dev;
2161
2162 struct ib_port_immutable immutable;
2163
2164 spinlock_t pkey_list_lock;
2165 struct list_head pkey_list;
2166
2167 struct ib_port_cache cache;
2168
2169 spinlock_t netdev_lock;
2170 struct net_device __rcu *netdev;
2171 struct hlist_node ndev_hash_link;
2172 struct rdma_port_counter port_counter;
2173 struct rdma_hw_stats *hw_stats;
2174};
2175
2176
2177enum rdma_netdev_t {
2178 RDMA_NETDEV_OPA_VNIC,
2179 RDMA_NETDEV_IPOIB,
2180};
2181
2182
2183
2184
2185
2186struct rdma_netdev {
2187 void *clnt_priv;
2188 struct ib_device *hca;
2189 u8 port_num;
2190
2191
2192
2193
2194
2195
2196 void (*free_rdma_netdev)(struct net_device *netdev);
2197
2198
2199 void (*set_id)(struct net_device *netdev, int id);
2200
2201 int (*send)(struct net_device *dev, struct sk_buff *skb,
2202 struct ib_ah *address, u32 dqpn);
2203
2204 int (*attach_mcast)(struct net_device *dev, struct ib_device *hca,
2205 union ib_gid *gid, u16 mlid,
2206 int set_qkey, u32 qkey);
2207 int (*detach_mcast)(struct net_device *dev, struct ib_device *hca,
2208 union ib_gid *gid, u16 mlid);
2209};
2210
2211struct rdma_netdev_alloc_params {
2212 size_t sizeof_priv;
2213 unsigned int txqs;
2214 unsigned int rxqs;
2215 void *param;
2216
2217 int (*initialize_rdma_netdev)(struct ib_device *device, u8 port_num,
2218 struct net_device *netdev, void *param);
2219};
2220
2221struct ib_counters {
2222 struct ib_device *device;
2223 struct ib_uobject *uobject;
2224
2225 atomic_t usecnt;
2226};
2227
2228struct ib_counters_read_attr {
2229 u64 *counters_buff;
2230 u32 ncounters;
2231 u32 flags;
2232};
2233
2234struct uverbs_attr_bundle;
2235struct iw_cm_id;
2236struct iw_cm_conn_param;
2237
2238#define INIT_RDMA_OBJ_SIZE(ib_struct, drv_struct, member) \
2239 .size_##ib_struct = \
2240 (sizeof(struct drv_struct) + \
2241 BUILD_BUG_ON_ZERO(offsetof(struct drv_struct, member)) + \
2242 BUILD_BUG_ON_ZERO( \
2243 !__same_type(((struct drv_struct *)NULL)->member, \
2244 struct ib_struct)))
2245
2246#define rdma_zalloc_drv_obj_gfp(ib_dev, ib_type, gfp) \
2247 ((struct ib_type *)kzalloc(ib_dev->ops.size_##ib_type, gfp))
2248
2249#define rdma_zalloc_drv_obj(ib_dev, ib_type) \
2250 rdma_zalloc_drv_obj_gfp(ib_dev, ib_type, GFP_KERNEL)
2251
2252#define DECLARE_RDMA_OBJ_SIZE(ib_struct) size_t size_##ib_struct
2253
2254
2255
2256
2257
2258
2259struct ib_device_ops {
2260 struct module *owner;
2261 enum rdma_driver_id driver_id;
2262 u32 uverbs_abi_ver;
2263 unsigned int uverbs_no_driver_id_binding:1;
2264
2265 int (*post_send)(struct ib_qp *qp, const struct ib_send_wr *send_wr,
2266 const struct ib_send_wr **bad_send_wr);
2267 int (*post_recv)(struct ib_qp *qp, const struct ib_recv_wr *recv_wr,
2268 const struct ib_recv_wr **bad_recv_wr);
2269 void (*drain_rq)(struct ib_qp *qp);
2270 void (*drain_sq)(struct ib_qp *qp);
2271 int (*poll_cq)(struct ib_cq *cq, int num_entries, struct ib_wc *wc);
2272 int (*peek_cq)(struct ib_cq *cq, int wc_cnt);
2273 int (*req_notify_cq)(struct ib_cq *cq, enum ib_cq_notify_flags flags);
2274 int (*req_ncomp_notif)(struct ib_cq *cq, int wc_cnt);
2275 int (*post_srq_recv)(struct ib_srq *srq,
2276 const struct ib_recv_wr *recv_wr,
2277 const struct ib_recv_wr **bad_recv_wr);
2278 int (*process_mad)(struct ib_device *device, int process_mad_flags,
2279 u8 port_num, const struct ib_wc *in_wc,
2280 const struct ib_grh *in_grh,
2281 const struct ib_mad_hdr *in_mad, size_t in_mad_size,
2282 struct ib_mad_hdr *out_mad, size_t *out_mad_size,
2283 u16 *out_mad_pkey_index);
2284 int (*query_device)(struct ib_device *device,
2285 struct ib_device_attr *device_attr,
2286 struct ib_udata *udata);
2287 int (*modify_device)(struct ib_device *device, int device_modify_mask,
2288 struct ib_device_modify *device_modify);
2289 void (*get_dev_fw_str)(struct ib_device *device, char *str);
2290 const struct cpumask *(*get_vector_affinity)(struct ib_device *ibdev,
2291 int comp_vector);
2292 int (*query_port)(struct ib_device *device, u8 port_num,
2293 struct ib_port_attr *port_attr);
2294 int (*modify_port)(struct ib_device *device, u8 port_num,
2295 int port_modify_mask,
2296 struct ib_port_modify *port_modify);
2297
2298
2299
2300
2301
2302
2303 int (*get_port_immutable)(struct ib_device *device, u8 port_num,
2304 struct ib_port_immutable *immutable);
2305 enum rdma_link_layer (*get_link_layer)(struct ib_device *device,
2306 u8 port_num);
2307
2308
2309
2310
2311
2312
2313
2314
2315 struct net_device *(*get_netdev)(struct ib_device *device, u8 port_num);
2316
2317
2318
2319
2320
2321
2322 struct net_device *(*alloc_rdma_netdev)(
2323 struct ib_device *device, u8 port_num, enum rdma_netdev_t type,
2324 const char *name, unsigned char name_assign_type,
2325 void (*setup)(struct net_device *));
2326
2327 int (*rdma_netdev_get_params)(struct ib_device *device, u8 port_num,
2328 enum rdma_netdev_t type,
2329 struct rdma_netdev_alloc_params *params);
2330
2331
2332
2333
2334
2335 int (*query_gid)(struct ib_device *device, u8 port_num, int index,
2336 union ib_gid *gid);
2337
2338
2339
2340
2341
2342
2343
2344
2345
2346
2347
2348
2349
2350 int (*add_gid)(const struct ib_gid_attr *attr, void **context);
2351
2352
2353
2354
2355
2356
2357
2358
2359 int (*del_gid)(const struct ib_gid_attr *attr, void **context);
2360 int (*query_pkey)(struct ib_device *device, u8 port_num, u16 index,
2361 u16 *pkey);
2362 int (*alloc_ucontext)(struct ib_ucontext *context,
2363 struct ib_udata *udata);
2364 void (*dealloc_ucontext)(struct ib_ucontext *context);
2365 int (*mmap)(struct ib_ucontext *context, struct vm_area_struct *vma);
2366 void (*disassociate_ucontext)(struct ib_ucontext *ibcontext);
2367 int (*alloc_pd)(struct ib_pd *pd, struct ib_udata *udata);
2368 void (*dealloc_pd)(struct ib_pd *pd, struct ib_udata *udata);
2369 int (*create_ah)(struct ib_ah *ah, struct rdma_ah_attr *ah_attr,
2370 u32 flags, struct ib_udata *udata);
2371 int (*modify_ah)(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
2372 int (*query_ah)(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
2373 void (*destroy_ah)(struct ib_ah *ah, u32 flags);
2374 int (*create_srq)(struct ib_srq *srq,
2375 struct ib_srq_init_attr *srq_init_attr,
2376 struct ib_udata *udata);
2377 int (*modify_srq)(struct ib_srq *srq, struct ib_srq_attr *srq_attr,
2378 enum ib_srq_attr_mask srq_attr_mask,
2379 struct ib_udata *udata);
2380 int (*query_srq)(struct ib_srq *srq, struct ib_srq_attr *srq_attr);
2381 void (*destroy_srq)(struct ib_srq *srq, struct ib_udata *udata);
2382 struct ib_qp *(*create_qp)(struct ib_pd *pd,
2383 struct ib_qp_init_attr *qp_init_attr,
2384 struct ib_udata *udata);
2385 int (*modify_qp)(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
2386 int qp_attr_mask, struct ib_udata *udata);
2387 int (*query_qp)(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
2388 int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr);
2389 int (*destroy_qp)(struct ib_qp *qp, struct ib_udata *udata);
2390 int (*create_cq)(struct ib_cq *cq, const struct ib_cq_init_attr *attr,
2391 struct ib_udata *udata);
2392 int (*modify_cq)(struct ib_cq *cq, u16 cq_count, u16 cq_period);
2393 void (*destroy_cq)(struct ib_cq *cq, struct ib_udata *udata);
2394 int (*resize_cq)(struct ib_cq *cq, int cqe, struct ib_udata *udata);
2395 struct ib_mr *(*get_dma_mr)(struct ib_pd *pd, int mr_access_flags);
2396 struct ib_mr *(*reg_user_mr)(struct ib_pd *pd, u64 start, u64 length,
2397 u64 virt_addr, int mr_access_flags,
2398 struct ib_udata *udata);
2399 int (*rereg_user_mr)(struct ib_mr *mr, int flags, u64 start, u64 length,
2400 u64 virt_addr, int mr_access_flags,
2401 struct ib_pd *pd, struct ib_udata *udata);
2402 int (*dereg_mr)(struct ib_mr *mr, struct ib_udata *udata);
2403 struct ib_mr *(*alloc_mr)(struct ib_pd *pd, enum ib_mr_type mr_type,
2404 u32 max_num_sg, struct ib_udata *udata);
2405 struct ib_mr *(*alloc_mr_integrity)(struct ib_pd *pd,
2406 u32 max_num_data_sg,
2407 u32 max_num_meta_sg);
2408 int (*advise_mr)(struct ib_pd *pd,
2409 enum ib_uverbs_advise_mr_advice advice, u32 flags,
2410 struct ib_sge *sg_list, u32 num_sge,
2411 struct uverbs_attr_bundle *attrs);
2412 int (*map_mr_sg)(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
2413 unsigned int *sg_offset);
2414 int (*check_mr_status)(struct ib_mr *mr, u32 check_mask,
2415 struct ib_mr_status *mr_status);
2416 struct ib_mw *(*alloc_mw)(struct ib_pd *pd, enum ib_mw_type type,
2417 struct ib_udata *udata);
2418 int (*dealloc_mw)(struct ib_mw *mw);
2419 struct ib_fmr *(*alloc_fmr)(struct ib_pd *pd, int mr_access_flags,
2420 struct ib_fmr_attr *fmr_attr);
2421 int (*map_phys_fmr)(struct ib_fmr *fmr, u64 *page_list, int list_len,
2422 u64 iova);
2423 int (*unmap_fmr)(struct list_head *fmr_list);
2424 int (*dealloc_fmr)(struct ib_fmr *fmr);
2425 void (*invalidate_range)(struct ib_umem_odp *umem_odp,
2426 unsigned long start, unsigned long end);
2427 int (*attach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid);
2428 int (*detach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid);
2429 struct ib_xrcd *(*alloc_xrcd)(struct ib_device *device,
2430 struct ib_udata *udata);
2431 int (*dealloc_xrcd)(struct ib_xrcd *xrcd, struct ib_udata *udata);
2432 struct ib_flow *(*create_flow)(struct ib_qp *qp,
2433 struct ib_flow_attr *flow_attr,
2434 int domain, struct ib_udata *udata);
2435 int (*destroy_flow)(struct ib_flow *flow_id);
2436 struct ib_flow_action *(*create_flow_action_esp)(
2437 struct ib_device *device,
2438 const struct ib_flow_action_attrs_esp *attr,
2439 struct uverbs_attr_bundle *attrs);
2440 int (*destroy_flow_action)(struct ib_flow_action *action);
2441 int (*modify_flow_action_esp)(
2442 struct ib_flow_action *action,
2443 const struct ib_flow_action_attrs_esp *attr,
2444 struct uverbs_attr_bundle *attrs);
2445 int (*set_vf_link_state)(struct ib_device *device, int vf, u8 port,
2446 int state);
2447 int (*get_vf_config)(struct ib_device *device, int vf, u8 port,
2448 struct ifla_vf_info *ivf);
2449 int (*get_vf_stats)(struct ib_device *device, int vf, u8 port,
2450 struct ifla_vf_stats *stats);
2451 int (*set_vf_guid)(struct ib_device *device, int vf, u8 port, u64 guid,
2452 int type);
2453 struct ib_wq *(*create_wq)(struct ib_pd *pd,
2454 struct ib_wq_init_attr *init_attr,
2455 struct ib_udata *udata);
2456 void (*destroy_wq)(struct ib_wq *wq, struct ib_udata *udata);
2457 int (*modify_wq)(struct ib_wq *wq, struct ib_wq_attr *attr,
2458 u32 wq_attr_mask, struct ib_udata *udata);
2459 struct ib_rwq_ind_table *(*create_rwq_ind_table)(
2460 struct ib_device *device,
2461 struct ib_rwq_ind_table_init_attr *init_attr,
2462 struct ib_udata *udata);
2463 int (*destroy_rwq_ind_table)(struct ib_rwq_ind_table *wq_ind_table);
2464 struct ib_dm *(*alloc_dm)(struct ib_device *device,
2465 struct ib_ucontext *context,
2466 struct ib_dm_alloc_attr *attr,
2467 struct uverbs_attr_bundle *attrs);
2468 int (*dealloc_dm)(struct ib_dm *dm, struct uverbs_attr_bundle *attrs);
2469 struct ib_mr *(*reg_dm_mr)(struct ib_pd *pd, struct ib_dm *dm,
2470 struct ib_dm_mr_attr *attr,
2471 struct uverbs_attr_bundle *attrs);
2472 struct ib_counters *(*create_counters)(
2473 struct ib_device *device, struct uverbs_attr_bundle *attrs);
2474 int (*destroy_counters)(struct ib_counters *counters);
2475 int (*read_counters)(struct ib_counters *counters,
2476 struct ib_counters_read_attr *counters_read_attr,
2477 struct uverbs_attr_bundle *attrs);
2478 int (*map_mr_sg_pi)(struct ib_mr *mr, struct scatterlist *data_sg,
2479 int data_sg_nents, unsigned int *data_sg_offset,
2480 struct scatterlist *meta_sg, int meta_sg_nents,
2481 unsigned int *meta_sg_offset);
2482
2483
2484
2485
2486
2487
2488
2489 struct rdma_hw_stats *(*alloc_hw_stats)(struct ib_device *device,
2490 u8 port_num);
2491
2492
2493
2494
2495
2496
2497
2498
2499
2500
2501
2502
2503 int (*get_hw_stats)(struct ib_device *device,
2504 struct rdma_hw_stats *stats, u8 port, int index);
2505
2506
2507
2508
2509 int (*init_port)(struct ib_device *device, u8 port_num,
2510 struct kobject *port_sysfs);
2511
2512
2513
2514 int (*fill_res_entry)(struct sk_buff *msg,
2515 struct rdma_restrack_entry *entry);
2516
2517
2518
2519
2520
2521
2522 int (*enable_driver)(struct ib_device *dev);
2523
2524
2525
2526 void (*dealloc_driver)(struct ib_device *dev);
2527
2528
2529 void (*iw_add_ref)(struct ib_qp *qp);
2530 void (*iw_rem_ref)(struct ib_qp *qp);
2531 struct ib_qp *(*iw_get_qp)(struct ib_device *device, int qpn);
2532 int (*iw_connect)(struct iw_cm_id *cm_id,
2533 struct iw_cm_conn_param *conn_param);
2534 int (*iw_accept)(struct iw_cm_id *cm_id,
2535 struct iw_cm_conn_param *conn_param);
2536 int (*iw_reject)(struct iw_cm_id *cm_id, const void *pdata,
2537 u8 pdata_len);
2538 int (*iw_create_listen)(struct iw_cm_id *cm_id, int backlog);
2539 int (*iw_destroy_listen)(struct iw_cm_id *cm_id);
2540
2541
2542
2543
2544
2545 int (*counter_bind_qp)(struct rdma_counter *counter, struct ib_qp *qp);
2546
2547
2548
2549
2550 int (*counter_unbind_qp)(struct ib_qp *qp);
2551
2552
2553
2554 int (*counter_dealloc)(struct rdma_counter *counter);
2555
2556
2557
2558
2559 struct rdma_hw_stats *(*counter_alloc_stats)(
2560 struct rdma_counter *counter);
2561
2562
2563
2564 int (*counter_update_stats)(struct rdma_counter *counter);
2565
2566 DECLARE_RDMA_OBJ_SIZE(ib_ah);
2567 DECLARE_RDMA_OBJ_SIZE(ib_cq);
2568 DECLARE_RDMA_OBJ_SIZE(ib_pd);
2569 DECLARE_RDMA_OBJ_SIZE(ib_srq);
2570 DECLARE_RDMA_OBJ_SIZE(ib_ucontext);
2571};
2572
2573struct ib_core_device {
2574
2575
2576
2577 struct device dev;
2578 possible_net_t rdma_net;
2579 struct kobject *ports_kobj;
2580 struct list_head port_list;
2581 struct ib_device *owner;
2582};
2583
2584struct rdma_restrack_root;
2585struct ib_device {
2586
2587 struct device *dma_device;
2588 struct ib_device_ops ops;
2589 char name[IB_DEVICE_NAME_MAX];
2590 struct rcu_head rcu_head;
2591
2592 struct list_head event_handler_list;
2593 spinlock_t event_handler_lock;
2594
2595 struct rw_semaphore client_data_rwsem;
2596 struct xarray client_data;
2597 struct mutex unregistration_lock;
2598
2599 struct ib_cache cache;
2600
2601
2602
2603 struct ib_port_data *port_data;
2604
2605 int num_comp_vectors;
2606
2607 union {
2608 struct device dev;
2609 struct ib_core_device coredev;
2610 };
2611
2612
2613
2614
2615
2616 const struct attribute_group *groups[3];
2617
2618 u64 uverbs_cmd_mask;
2619 u64 uverbs_ex_cmd_mask;
2620
2621 char node_desc[IB_DEVICE_NODE_DESC_MAX];
2622 __be64 node_guid;
2623 u32 local_dma_lkey;
2624 u16 is_switch:1;
2625
2626 u16 kverbs_provider:1;
2627
2628 u16 use_cq_dim:1;
2629 u8 node_type;
2630 u8 phys_port_cnt;
2631 struct ib_device_attr attrs;
2632 struct attribute_group *hw_stats_ag;
2633 struct rdma_hw_stats *hw_stats;
2634
2635#ifdef CONFIG_CGROUP_RDMA
2636 struct rdmacg_device cg_device;
2637#endif
2638
2639 u32 index;
2640 struct rdma_restrack_root *res;
2641
2642 const struct uapi_definition *driver_def;
2643
2644
2645
2646
2647
2648 refcount_t refcount;
2649 struct completion unreg_completion;
2650 struct work_struct unregistration_work;
2651
2652 const struct rdma_link_ops *link_ops;
2653
2654
2655 struct mutex compat_devs_mutex;
2656
2657 struct xarray compat_devs;
2658
2659
2660 char iw_ifname[IFNAMSIZ];
2661 u32 iw_driver_flags;
2662};
2663
2664struct ib_client_nl_info;
2665struct ib_client {
2666 const char *name;
2667 void (*add) (struct ib_device *);
2668 void (*remove)(struct ib_device *, void *client_data);
2669 void (*rename)(struct ib_device *dev, void *client_data);
2670 int (*get_nl_info)(struct ib_device *ibdev, void *client_data,
2671 struct ib_client_nl_info *res);
2672 int (*get_global_nl_info)(struct ib_client_nl_info *res);
2673
2674
2675
2676
2677
2678
2679
2680
2681
2682
2683
2684
2685
2686
2687
2688
2689 struct net_device *(*get_net_dev_by_params)(
2690 struct ib_device *dev,
2691 u8 port,
2692 u16 pkey,
2693 const union ib_gid *gid,
2694 const struct sockaddr *addr,
2695 void *client_data);
2696
2697 refcount_t uses;
2698 struct completion uses_zero;
2699 u32 client_id;
2700
2701
2702 u8 no_kverbs_req:1;
2703};
2704
2705
2706
2707
2708
2709
2710
2711struct ib_block_iter {
2712
2713 struct scatterlist *__sg;
2714 dma_addr_t __dma_addr;
2715 unsigned int __sg_nents;
2716 unsigned int __sg_advance;
2717 unsigned int __pg_bit;
2718};
2719
2720struct ib_device *_ib_alloc_device(size_t size);
2721#define ib_alloc_device(drv_struct, member) \
2722 container_of(_ib_alloc_device(sizeof(struct drv_struct) + \
2723 BUILD_BUG_ON_ZERO(offsetof( \
2724 struct drv_struct, member))), \
2725 struct drv_struct, member)
2726
2727void ib_dealloc_device(struct ib_device *device);
2728
2729void ib_get_device_fw_str(struct ib_device *device, char *str);
2730
2731int ib_register_device(struct ib_device *device, const char *name);
2732void ib_unregister_device(struct ib_device *device);
2733void ib_unregister_driver(enum rdma_driver_id driver_id);
2734void ib_unregister_device_and_put(struct ib_device *device);
2735void ib_unregister_device_queued(struct ib_device *ib_dev);
2736
2737int ib_register_client (struct ib_client *client);
2738void ib_unregister_client(struct ib_client *client);
2739
2740void __rdma_block_iter_start(struct ib_block_iter *biter,
2741 struct scatterlist *sglist,
2742 unsigned int nents,
2743 unsigned long pgsz);
2744bool __rdma_block_iter_next(struct ib_block_iter *biter);
2745
2746
2747
2748
2749
2750
2751static inline dma_addr_t
2752rdma_block_iter_dma_address(struct ib_block_iter *biter)
2753{
2754 return biter->__dma_addr & ~(BIT_ULL(biter->__pg_bit) - 1);
2755}
2756
2757
2758
2759
2760
2761
2762
2763
2764
2765
2766
2767#define rdma_for_each_block(sglist, biter, nents, pgsz) \
2768 for (__rdma_block_iter_start(biter, sglist, nents, \
2769 pgsz); \
2770 __rdma_block_iter_next(biter);)
2771
2772
2773
2774
2775
2776
2777
2778
2779
2780
2781
2782static inline void *ib_get_client_data(struct ib_device *device,
2783 struct ib_client *client)
2784{
2785 return xa_load(&device->client_data, client->client_id);
2786}
2787void ib_set_client_data(struct ib_device *device, struct ib_client *client,
2788 void *data);
2789void ib_set_device_ops(struct ib_device *device,
2790 const struct ib_device_ops *ops);
2791
2792#if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS)
2793int rdma_user_mmap_io(struct ib_ucontext *ucontext, struct vm_area_struct *vma,
2794 unsigned long pfn, unsigned long size, pgprot_t prot);
2795#else
2796static inline int rdma_user_mmap_io(struct ib_ucontext *ucontext,
2797 struct vm_area_struct *vma,
2798 unsigned long pfn, unsigned long size,
2799 pgprot_t prot)
2800{
2801 return -EINVAL;
2802}
2803#endif
2804
2805static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len)
2806{
2807 return copy_from_user(dest, udata->inbuf, len) ? -EFAULT : 0;
2808}
2809
2810static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len)
2811{
2812 return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0;
2813}
2814
2815static inline bool ib_is_buffer_cleared(const void __user *p,
2816 size_t len)
2817{
2818 bool ret;
2819 u8 *buf;
2820
2821 if (len > USHRT_MAX)
2822 return false;
2823
2824 buf = memdup_user(p, len);
2825 if (IS_ERR(buf))
2826 return false;
2827
2828 ret = !memchr_inv(buf, 0, len);
2829 kfree(buf);
2830 return ret;
2831}
2832
2833static inline bool ib_is_udata_cleared(struct ib_udata *udata,
2834 size_t offset,
2835 size_t len)
2836{
2837 return ib_is_buffer_cleared(udata->inbuf + offset, len);
2838}
2839
2840
2841
2842
2843
2844
2845
2846
2847
2848
2849
2850
2851
2852
2853
2854
2855static inline bool ib_is_destroy_retryable(int ret, enum rdma_remove_reason why,
2856 struct ib_uobject *uobj)
2857{
2858 return ret && (why == RDMA_REMOVE_DESTROY ||
2859 uobj->context->cleanup_retryable);
2860}
2861
2862
2863
2864
2865
2866
2867
2868
2869
2870
2871static inline int ib_destroy_usecnt(atomic_t *usecnt,
2872 enum rdma_remove_reason why,
2873 struct ib_uobject *uobj)
2874{
2875 if (atomic_read(usecnt) && ib_is_destroy_retryable(-EBUSY, why, uobj))
2876 return -EBUSY;
2877 return 0;
2878}
2879
2880
2881
2882
2883
2884
2885
2886
2887
2888
2889
2890
2891
2892
2893
2894
2895bool ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
2896 enum ib_qp_type type, enum ib_qp_attr_mask mask);
2897
2898void ib_register_event_handler(struct ib_event_handler *event_handler);
2899void ib_unregister_event_handler(struct ib_event_handler *event_handler);
2900void ib_dispatch_event(struct ib_event *event);
2901
2902int ib_query_port(struct ib_device *device,
2903 u8 port_num, struct ib_port_attr *port_attr);
2904
2905enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device,
2906 u8 port_num);
2907
2908
2909
2910
2911
2912
2913
2914
2915
2916
2917static inline bool rdma_cap_ib_switch(const struct ib_device *device)
2918{
2919 return device->is_switch;
2920}
2921
2922
2923
2924
2925
2926
2927
2928
2929
2930static inline u8 rdma_start_port(const struct ib_device *device)
2931{
2932 return rdma_cap_ib_switch(device) ? 0 : 1;
2933}
2934
2935
2936
2937
2938
2939
2940#define rdma_for_each_port(device, iter) \
2941 for (iter = rdma_start_port(device + BUILD_BUG_ON_ZERO(!__same_type( \
2942 unsigned int, iter))); \
2943 iter <= rdma_end_port(device); (iter)++)
2944
2945
2946
2947
2948
2949
2950
2951
2952
2953static inline u8 rdma_end_port(const struct ib_device *device)
2954{
2955 return rdma_cap_ib_switch(device) ? 0 : device->phys_port_cnt;
2956}
2957
2958static inline int rdma_is_port_valid(const struct ib_device *device,
2959 unsigned int port)
2960{
2961 return (port >= rdma_start_port(device) &&
2962 port <= rdma_end_port(device));
2963}
2964
2965static inline bool rdma_is_grh_required(const struct ib_device *device,
2966 u8 port_num)
2967{
2968 return device->port_data[port_num].immutable.core_cap_flags &
2969 RDMA_CORE_PORT_IB_GRH_REQUIRED;
2970}
2971
2972static inline bool rdma_protocol_ib(const struct ib_device *device, u8 port_num)
2973{
2974 return device->port_data[port_num].immutable.core_cap_flags &
2975 RDMA_CORE_CAP_PROT_IB;
2976}
2977
2978static inline bool rdma_protocol_roce(const struct ib_device *device, u8 port_num)
2979{
2980 return device->port_data[port_num].immutable.core_cap_flags &
2981 (RDMA_CORE_CAP_PROT_ROCE | RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP);
2982}
2983
2984static inline bool rdma_protocol_roce_udp_encap(const struct ib_device *device, u8 port_num)
2985{
2986 return device->port_data[port_num].immutable.core_cap_flags &
2987 RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP;
2988}
2989
2990static inline bool rdma_protocol_roce_eth_encap(const struct ib_device *device, u8 port_num)
2991{
2992 return device->port_data[port_num].immutable.core_cap_flags &
2993 RDMA_CORE_CAP_PROT_ROCE;
2994}
2995
2996static inline bool rdma_protocol_iwarp(const struct ib_device *device, u8 port_num)
2997{
2998 return device->port_data[port_num].immutable.core_cap_flags &
2999 RDMA_CORE_CAP_PROT_IWARP;
3000}
3001
3002static inline bool rdma_ib_or_roce(const struct ib_device *device, u8 port_num)
3003{
3004 return rdma_protocol_ib(device, port_num) ||
3005 rdma_protocol_roce(device, port_num);
3006}
3007
3008static inline bool rdma_protocol_raw_packet(const struct ib_device *device, u8 port_num)
3009{
3010 return device->port_data[port_num].immutable.core_cap_flags &
3011 RDMA_CORE_CAP_PROT_RAW_PACKET;
3012}
3013
3014static inline bool rdma_protocol_usnic(const struct ib_device *device, u8 port_num)
3015{
3016 return device->port_data[port_num].immutable.core_cap_flags &
3017 RDMA_CORE_CAP_PROT_USNIC;
3018}
3019
3020
3021
3022
3023
3024
3025
3026
3027
3028
3029
3030
3031
3032static inline bool rdma_cap_ib_mad(const struct ib_device *device, u8 port_num)
3033{
3034 return device->port_data[port_num].immutable.core_cap_flags &
3035 RDMA_CORE_CAP_IB_MAD;
3036}
3037
3038
3039
3040
3041
3042
3043
3044
3045
3046
3047
3048
3049
3050
3051
3052
3053
3054
3055
3056
3057static inline bool rdma_cap_opa_mad(struct ib_device *device, u8 port_num)
3058{
3059 return device->port_data[port_num].immutable.core_cap_flags &
3060 RDMA_CORE_CAP_OPA_MAD;
3061}
3062
3063
3064
3065
3066
3067
3068
3069
3070
3071
3072
3073
3074
3075
3076
3077
3078
3079
3080
3081
3082
3083static inline bool rdma_cap_ib_smi(const struct ib_device *device, u8 port_num)
3084{
3085 return device->port_data[port_num].immutable.core_cap_flags &
3086 RDMA_CORE_CAP_IB_SMI;
3087}
3088
3089
3090
3091
3092
3093
3094
3095
3096
3097
3098
3099
3100
3101
3102
3103
3104static inline bool rdma_cap_ib_cm(const struct ib_device *device, u8 port_num)
3105{
3106 return device->port_data[port_num].immutable.core_cap_flags &
3107 RDMA_CORE_CAP_IB_CM;
3108}
3109
3110
3111
3112
3113
3114
3115
3116
3117
3118
3119
3120
3121
3122static inline bool rdma_cap_iw_cm(const struct ib_device *device, u8 port_num)
3123{
3124 return device->port_data[port_num].immutable.core_cap_flags &
3125 RDMA_CORE_CAP_IW_CM;
3126}
3127
3128
3129
3130
3131
3132
3133
3134
3135
3136
3137
3138
3139
3140
3141
3142
3143static inline bool rdma_cap_ib_sa(const struct ib_device *device, u8 port_num)
3144{
3145 return device->port_data[port_num].immutable.core_cap_flags &
3146 RDMA_CORE_CAP_IB_SA;
3147}
3148
3149
3150
3151
3152
3153
3154
3155
3156
3157
3158
3159
3160
3161
3162
3163
3164
3165
3166static inline bool rdma_cap_ib_mcast(const struct ib_device *device, u8 port_num)
3167{
3168 return rdma_cap_ib_sa(device, port_num);
3169}
3170
3171
3172
3173
3174
3175
3176
3177
3178
3179
3180
3181
3182
3183
3184static inline bool rdma_cap_af_ib(const struct ib_device *device, u8 port_num)
3185{
3186 return device->port_data[port_num].immutable.core_cap_flags &
3187 RDMA_CORE_CAP_AF_IB;
3188}
3189
3190
3191
3192
3193
3194
3195
3196
3197
3198
3199
3200
3201
3202
3203
3204
3205
3206static inline bool rdma_cap_eth_ah(const struct ib_device *device, u8 port_num)
3207{
3208 return device->port_data[port_num].immutable.core_cap_flags &
3209 RDMA_CORE_CAP_ETH_AH;
3210}
3211
3212
3213
3214
3215
3216
3217
3218
3219
3220
3221static inline bool rdma_cap_opa_ah(struct ib_device *device, u8 port_num)
3222{
3223 return (device->port_data[port_num].immutable.core_cap_flags &
3224 RDMA_CORE_CAP_OPA_AH) == RDMA_CORE_CAP_OPA_AH;
3225}
3226
3227
3228
3229
3230
3231
3232
3233
3234
3235
3236
3237
3238
3239static inline size_t rdma_max_mad_size(const struct ib_device *device, u8 port_num)
3240{
3241 return device->port_data[port_num].immutable.max_mad_size;
3242}
3243
3244
3245
3246
3247
3248
3249
3250
3251
3252
3253
3254
3255
3256
3257static inline bool rdma_cap_roce_gid_table(const struct ib_device *device,
3258 u8 port_num)
3259{
3260 return rdma_protocol_roce(device, port_num) &&
3261 device->ops.add_gid && device->ops.del_gid;
3262}
3263
3264
3265
3266
3267static inline bool rdma_cap_read_inv(struct ib_device *dev, u32 port_num)
3268{
3269
3270
3271
3272
3273 return rdma_protocol_iwarp(dev, port_num);
3274}
3275
3276
3277
3278
3279
3280
3281
3282static inline unsigned int rdma_find_pg_bit(unsigned long addr,
3283 unsigned long pgsz_bitmap)
3284{
3285 unsigned long align;
3286 unsigned long pgsz;
3287
3288 align = addr & -addr;
3289
3290
3291
3292
3293 pgsz = pgsz_bitmap & ~(-align << 1);
3294 if (!pgsz)
3295 return __ffs(pgsz_bitmap);
3296
3297 return __fls(pgsz);
3298}
3299
3300int ib_set_vf_link_state(struct ib_device *device, int vf, u8 port,
3301 int state);
3302int ib_get_vf_config(struct ib_device *device, int vf, u8 port,
3303 struct ifla_vf_info *info);
3304int ib_get_vf_stats(struct ib_device *device, int vf, u8 port,
3305 struct ifla_vf_stats *stats);
3306int ib_set_vf_guid(struct ib_device *device, int vf, u8 port, u64 guid,
3307 int type);
3308
3309int ib_query_pkey(struct ib_device *device,
3310 u8 port_num, u16 index, u16 *pkey);
3311
3312int ib_modify_device(struct ib_device *device,
3313 int device_modify_mask,
3314 struct ib_device_modify *device_modify);
3315
3316int ib_modify_port(struct ib_device *device,
3317 u8 port_num, int port_modify_mask,
3318 struct ib_port_modify *port_modify);
3319
3320int ib_find_gid(struct ib_device *device, union ib_gid *gid,
3321 u8 *port_num, u16 *index);
3322
3323int ib_find_pkey(struct ib_device *device,
3324 u8 port_num, u16 pkey, u16 *index);
3325
3326enum ib_pd_flags {
3327
3328
3329
3330
3331
3332
3333
3334
3335
3336 IB_PD_UNSAFE_GLOBAL_RKEY = 0x01,
3337};
3338
3339struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags,
3340 const char *caller);
3341
3342#define ib_alloc_pd(device, flags) \
3343 __ib_alloc_pd((device), (flags), KBUILD_MODNAME)
3344
3345
3346
3347
3348
3349
3350void ib_dealloc_pd_user(struct ib_pd *pd, struct ib_udata *udata);
3351
3352
3353
3354
3355
3356
3357
3358static inline void ib_dealloc_pd(struct ib_pd *pd)
3359{
3360 ib_dealloc_pd_user(pd, NULL);
3361}
3362
3363enum rdma_create_ah_flags {
3364
3365 RDMA_CREATE_AH_SLEEPABLE = BIT(0),
3366};
3367
3368
3369
3370
3371
3372
3373
3374
3375
3376
3377struct ib_ah *rdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr,
3378 u32 flags);
3379
3380
3381
3382
3383
3384
3385
3386
3387
3388
3389
3390
3391
3392struct ib_ah *rdma_create_user_ah(struct ib_pd *pd,
3393 struct rdma_ah_attr *ah_attr,
3394 struct ib_udata *udata);
3395
3396
3397
3398
3399
3400
3401
3402
3403int ib_get_gids_from_rdma_hdr(const union rdma_network_hdr *hdr,
3404 enum rdma_network_type net_type,
3405 union ib_gid *sgid, union ib_gid *dgid);
3406
3407
3408
3409
3410
3411int ib_get_rdma_header_version(const union rdma_network_hdr *hdr);
3412
3413
3414
3415
3416
3417
3418
3419
3420
3421
3422
3423
3424
3425
3426
3427
3428
3429
3430
3431int ib_init_ah_attr_from_wc(struct ib_device *device, u8 port_num,
3432 const struct ib_wc *wc, const struct ib_grh *grh,
3433 struct rdma_ah_attr *ah_attr);
3434
3435
3436
3437
3438
3439
3440
3441
3442
3443
3444
3445
3446
3447struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc,
3448 const struct ib_grh *grh, u8 port_num);
3449
3450
3451
3452
3453
3454
3455
3456
3457int rdma_modify_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
3458
3459
3460
3461
3462
3463
3464
3465
3466int rdma_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
3467
3468enum rdma_destroy_ah_flags {
3469
3470 RDMA_DESTROY_AH_SLEEPABLE = BIT(0),
3471};
3472
3473
3474
3475
3476
3477
3478
3479int rdma_destroy_ah_user(struct ib_ah *ah, u32 flags, struct ib_udata *udata);
3480
3481
3482
3483
3484
3485
3486
3487
3488static inline int rdma_destroy_ah(struct ib_ah *ah, u32 flags)
3489{
3490 return rdma_destroy_ah_user(ah, flags, NULL);
3491}
3492
3493
3494
3495
3496
3497
3498
3499
3500
3501
3502
3503
3504
3505
3506struct ib_srq *ib_create_srq(struct ib_pd *pd,
3507 struct ib_srq_init_attr *srq_init_attr);
3508
3509
3510
3511
3512
3513
3514
3515
3516
3517
3518
3519
3520
3521int ib_modify_srq(struct ib_srq *srq,
3522 struct ib_srq_attr *srq_attr,
3523 enum ib_srq_attr_mask srq_attr_mask);
3524
3525
3526
3527
3528
3529
3530
3531int ib_query_srq(struct ib_srq *srq,
3532 struct ib_srq_attr *srq_attr);
3533
3534
3535
3536
3537
3538
3539int ib_destroy_srq_user(struct ib_srq *srq, struct ib_udata *udata);
3540
3541
3542
3543
3544
3545
3546
3547static inline int ib_destroy_srq(struct ib_srq *srq)
3548{
3549 return ib_destroy_srq_user(srq, NULL);
3550}
3551
3552
3553
3554
3555
3556
3557
3558
3559static inline int ib_post_srq_recv(struct ib_srq *srq,
3560 const struct ib_recv_wr *recv_wr,
3561 const struct ib_recv_wr **bad_recv_wr)
3562{
3563 const struct ib_recv_wr *dummy;
3564
3565 return srq->device->ops.post_srq_recv(srq, recv_wr,
3566 bad_recv_wr ? : &dummy);
3567}
3568
3569
3570
3571
3572
3573
3574
3575
3576
3577
3578struct ib_qp *ib_create_qp_user(struct ib_pd *pd,
3579 struct ib_qp_init_attr *qp_init_attr,
3580 struct ib_udata *udata);
3581
3582
3583
3584
3585
3586
3587
3588
3589
3590
3591
3592
3593static inline struct ib_qp *ib_create_qp(struct ib_pd *pd,
3594 struct ib_qp_init_attr *qp_init_attr)
3595{
3596 return ib_create_qp_user(pd, qp_init_attr, NULL);
3597}
3598
3599
3600
3601
3602
3603
3604
3605
3606
3607
3608
3609
3610int ib_modify_qp_with_udata(struct ib_qp *qp,
3611 struct ib_qp_attr *attr,
3612 int attr_mask,
3613 struct ib_udata *udata);
3614
3615
3616
3617
3618
3619
3620
3621
3622
3623
3624int ib_modify_qp(struct ib_qp *qp,
3625 struct ib_qp_attr *qp_attr,
3626 int qp_attr_mask);
3627
3628
3629
3630
3631
3632
3633
3634
3635
3636
3637
3638
3639int ib_query_qp(struct ib_qp *qp,
3640 struct ib_qp_attr *qp_attr,
3641 int qp_attr_mask,
3642 struct ib_qp_init_attr *qp_init_attr);
3643
3644
3645
3646
3647
3648
3649int ib_destroy_qp_user(struct ib_qp *qp, struct ib_udata *udata);
3650
3651
3652
3653
3654
3655
3656
3657static inline int ib_destroy_qp(struct ib_qp *qp)
3658{
3659 return ib_destroy_qp_user(qp, NULL);
3660}
3661
3662
3663
3664
3665
3666
3667
3668
3669struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd,
3670 struct ib_qp_open_attr *qp_open_attr);
3671
3672
3673
3674
3675
3676
3677
3678
3679int ib_close_qp(struct ib_qp *qp);
3680
3681
3682
3683
3684
3685
3686
3687
3688
3689
3690
3691
3692
3693
3694static inline int ib_post_send(struct ib_qp *qp,
3695 const struct ib_send_wr *send_wr,
3696 const struct ib_send_wr **bad_send_wr)
3697{
3698 const struct ib_send_wr *dummy;
3699
3700 return qp->device->ops.post_send(qp, send_wr, bad_send_wr ? : &dummy);
3701}
3702
3703
3704
3705
3706
3707
3708
3709
3710
3711static inline int ib_post_recv(struct ib_qp *qp,
3712 const struct ib_recv_wr *recv_wr,
3713 const struct ib_recv_wr **bad_recv_wr)
3714{
3715 const struct ib_recv_wr *dummy;
3716
3717 return qp->device->ops.post_recv(qp, recv_wr, bad_recv_wr ? : &dummy);
3718}
3719
3720struct ib_cq *__ib_alloc_cq_user(struct ib_device *dev, void *private,
3721 int nr_cqe, int comp_vector,
3722 enum ib_poll_context poll_ctx,
3723 const char *caller, struct ib_udata *udata);
3724
3725
3726
3727
3728
3729
3730
3731
3732
3733
3734static inline struct ib_cq *ib_alloc_cq_user(struct ib_device *dev,
3735 void *private, int nr_cqe,
3736 int comp_vector,
3737 enum ib_poll_context poll_ctx,
3738 struct ib_udata *udata)
3739{
3740 return __ib_alloc_cq_user(dev, private, nr_cqe, comp_vector, poll_ctx,
3741 KBUILD_MODNAME, udata);
3742}
3743
3744
3745
3746
3747
3748
3749
3750
3751
3752
3753
3754static inline struct ib_cq *ib_alloc_cq(struct ib_device *dev, void *private,
3755 int nr_cqe, int comp_vector,
3756 enum ib_poll_context poll_ctx)
3757{
3758 return ib_alloc_cq_user(dev, private, nr_cqe, comp_vector, poll_ctx,
3759 NULL);
3760}
3761
3762struct ib_cq *__ib_alloc_cq_any(struct ib_device *dev, void *private,
3763 int nr_cqe, enum ib_poll_context poll_ctx,
3764 const char *caller);
3765
3766
3767
3768
3769
3770
3771
3772
3773static inline struct ib_cq *ib_alloc_cq_any(struct ib_device *dev,
3774 void *private, int nr_cqe,
3775 enum ib_poll_context poll_ctx)
3776{
3777 return __ib_alloc_cq_any(dev, private, nr_cqe, poll_ctx,
3778 KBUILD_MODNAME);
3779}
3780
3781
3782
3783
3784
3785
3786void ib_free_cq_user(struct ib_cq *cq, struct ib_udata *udata);
3787
3788
3789
3790
3791
3792
3793
3794static inline void ib_free_cq(struct ib_cq *cq)
3795{
3796 ib_free_cq_user(cq, NULL);
3797}
3798
3799int ib_process_cq_direct(struct ib_cq *cq, int budget);
3800
3801
3802
3803
3804
3805
3806
3807
3808
3809
3810
3811
3812
3813
3814struct ib_cq *__ib_create_cq(struct ib_device *device,
3815 ib_comp_handler comp_handler,
3816 void (*event_handler)(struct ib_event *, void *),
3817 void *cq_context,
3818 const struct ib_cq_init_attr *cq_attr,
3819 const char *caller);
3820#define ib_create_cq(device, cmp_hndlr, evt_hndlr, cq_ctxt, cq_attr) \
3821 __ib_create_cq((device), (cmp_hndlr), (evt_hndlr), (cq_ctxt), (cq_attr), KBUILD_MODNAME)
3822
3823
3824
3825
3826
3827
3828
3829
3830int ib_resize_cq(struct ib_cq *cq, int cqe);
3831
3832
3833
3834
3835
3836
3837
3838
3839int rdma_set_cq_moderation(struct ib_cq *cq, u16 cq_count, u16 cq_period);
3840
3841
3842
3843
3844
3845
3846int ib_destroy_cq_user(struct ib_cq *cq, struct ib_udata *udata);
3847
3848
3849
3850
3851
3852
3853
3854static inline void ib_destroy_cq(struct ib_cq *cq)
3855{
3856 ib_destroy_cq_user(cq, NULL);
3857}
3858
3859
3860
3861
3862
3863
3864
3865
3866
3867
3868
3869
3870
3871static inline int ib_poll_cq(struct ib_cq *cq, int num_entries,
3872 struct ib_wc *wc)
3873{
3874 return cq->device->ops.poll_cq(cq, num_entries, wc);
3875}
3876
3877
3878
3879
3880
3881
3882
3883
3884
3885
3886
3887
3888
3889
3890
3891
3892
3893
3894
3895
3896
3897
3898
3899
3900
3901
3902
3903
3904static inline int ib_req_notify_cq(struct ib_cq *cq,
3905 enum ib_cq_notify_flags flags)
3906{
3907 return cq->device->ops.req_notify_cq(cq, flags);
3908}
3909
3910
3911
3912
3913
3914
3915
3916
3917static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt)
3918{
3919 return cq->device->ops.req_ncomp_notif ?
3920 cq->device->ops.req_ncomp_notif(cq, wc_cnt) :
3921 -ENOSYS;
3922}
3923
3924
3925
3926
3927
3928
3929static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
3930{
3931 return dma_mapping_error(dev->dma_device, dma_addr);
3932}
3933
3934
3935
3936
3937
3938
3939
3940
3941static inline u64 ib_dma_map_single(struct ib_device *dev,
3942 void *cpu_addr, size_t size,
3943 enum dma_data_direction direction)
3944{
3945 return dma_map_single(dev->dma_device, cpu_addr, size, direction);
3946}
3947
3948
3949
3950
3951
3952
3953
3954
3955static inline void ib_dma_unmap_single(struct ib_device *dev,
3956 u64 addr, size_t size,
3957 enum dma_data_direction direction)
3958{
3959 dma_unmap_single(dev->dma_device, addr, size, direction);
3960}
3961
3962
3963
3964
3965
3966
3967
3968
3969
3970static inline u64 ib_dma_map_page(struct ib_device *dev,
3971 struct page *page,
3972 unsigned long offset,
3973 size_t size,
3974 enum dma_data_direction direction)
3975{
3976 return dma_map_page(dev->dma_device, page, offset, size, direction);
3977}
3978
3979
3980
3981
3982
3983
3984
3985
3986static inline void ib_dma_unmap_page(struct ib_device *dev,
3987 u64 addr, size_t size,
3988 enum dma_data_direction direction)
3989{
3990 dma_unmap_page(dev->dma_device, addr, size, direction);
3991}
3992
3993
3994
3995
3996
3997
3998
3999
4000static inline int ib_dma_map_sg(struct ib_device *dev,
4001 struct scatterlist *sg, int nents,
4002 enum dma_data_direction direction)
4003{
4004 return dma_map_sg(dev->dma_device, sg, nents, direction);
4005}
4006
4007
4008
4009
4010
4011
4012
4013
4014static inline void ib_dma_unmap_sg(struct ib_device *dev,
4015 struct scatterlist *sg, int nents,
4016 enum dma_data_direction direction)
4017{
4018 dma_unmap_sg(dev->dma_device, sg, nents, direction);
4019}
4020
4021static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
4022 struct scatterlist *sg, int nents,
4023 enum dma_data_direction direction,
4024 unsigned long dma_attrs)
4025{
4026 return dma_map_sg_attrs(dev->dma_device, sg, nents, direction,
4027 dma_attrs);
4028}
4029
4030static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
4031 struct scatterlist *sg, int nents,
4032 enum dma_data_direction direction,
4033 unsigned long dma_attrs)
4034{
4035 dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, dma_attrs);
4036}
4037
4038
4039
4040
4041
4042
4043
4044static inline unsigned int ib_dma_max_seg_size(struct ib_device *dev)
4045{
4046 struct device_dma_parameters *p = dev->dma_device->dma_parms;
4047
4048 return p ? p->max_segment_size : UINT_MAX;
4049}
4050
4051
4052
4053
4054
4055
4056
4057
4058static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev,
4059 u64 addr,
4060 size_t size,
4061 enum dma_data_direction dir)
4062{
4063 dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
4064}
4065
4066
4067
4068
4069
4070
4071
4072
4073static inline void ib_dma_sync_single_for_device(struct ib_device *dev,
4074 u64 addr,
4075 size_t size,
4076 enum dma_data_direction dir)
4077{
4078 dma_sync_single_for_device(dev->dma_device, addr, size, dir);
4079}
4080
4081
4082
4083
4084
4085
4086
4087
4088static inline void *ib_dma_alloc_coherent(struct ib_device *dev,
4089 size_t size,
4090 dma_addr_t *dma_handle,
4091 gfp_t flag)
4092{
4093 return dma_alloc_coherent(dev->dma_device, size, dma_handle, flag);
4094}
4095
4096
4097
4098
4099
4100
4101
4102
4103static inline void ib_dma_free_coherent(struct ib_device *dev,
4104 size_t size, void *cpu_addr,
4105 dma_addr_t dma_handle)
4106{
4107 dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle);
4108}
4109
4110
4111
4112
4113
4114
4115
4116
4117
4118int ib_dereg_mr_user(struct ib_mr *mr, struct ib_udata *udata);
4119
4120
4121
4122
4123
4124
4125
4126
4127
4128
4129static inline int ib_dereg_mr(struct ib_mr *mr)
4130{
4131 return ib_dereg_mr_user(mr, NULL);
4132}
4133
4134struct ib_mr *ib_alloc_mr_user(struct ib_pd *pd, enum ib_mr_type mr_type,
4135 u32 max_num_sg, struct ib_udata *udata);
4136
4137static inline struct ib_mr *ib_alloc_mr(struct ib_pd *pd,
4138 enum ib_mr_type mr_type, u32 max_num_sg)
4139{
4140 return ib_alloc_mr_user(pd, mr_type, max_num_sg, NULL);
4141}
4142
4143struct ib_mr *ib_alloc_mr_integrity(struct ib_pd *pd,
4144 u32 max_num_data_sg,
4145 u32 max_num_meta_sg);
4146
4147
4148
4149
4150
4151
4152
4153static inline void ib_update_fast_reg_key(struct ib_mr *mr, u8 newkey)
4154{
4155 mr->lkey = (mr->lkey & 0xffffff00) | newkey;
4156 mr->rkey = (mr->rkey & 0xffffff00) | newkey;
4157}
4158
4159
4160
4161
4162
4163
4164static inline u32 ib_inc_rkey(u32 rkey)
4165{
4166 const u32 mask = 0x000000ff;
4167 return ((rkey + 1) & mask) | (rkey & ~mask);
4168}
4169
4170
4171
4172
4173
4174
4175
4176
4177
4178
4179struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
4180 int mr_access_flags,
4181 struct ib_fmr_attr *fmr_attr);
4182
4183
4184
4185
4186
4187
4188
4189
4190static inline int ib_map_phys_fmr(struct ib_fmr *fmr,
4191 u64 *page_list, int list_len,
4192 u64 iova)
4193{
4194 return fmr->device->ops.map_phys_fmr(fmr, page_list, list_len, iova);
4195}
4196
4197
4198
4199
4200
4201int ib_unmap_fmr(struct list_head *fmr_list);
4202
4203
4204
4205
4206
4207int ib_dealloc_fmr(struct ib_fmr *fmr);
4208
4209
4210
4211
4212
4213
4214
4215
4216
4217
4218
4219
4220
4221int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
4222
4223
4224
4225
4226
4227
4228
4229int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
4230
4231
4232
4233
4234
4235
4236struct ib_xrcd *__ib_alloc_xrcd(struct ib_device *device, const char *caller);
4237#define ib_alloc_xrcd(device) \
4238 __ib_alloc_xrcd((device), KBUILD_MODNAME)
4239
4240
4241
4242
4243
4244
4245int ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata);
4246
4247static inline int ib_check_mr_access(int flags)
4248{
4249
4250
4251
4252
4253 if (flags & (IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_REMOTE_WRITE) &&
4254 !(flags & IB_ACCESS_LOCAL_WRITE))
4255 return -EINVAL;
4256
4257 return 0;
4258}
4259
4260static inline bool ib_access_writable(int access_flags)
4261{
4262
4263
4264
4265
4266
4267
4268
4269 return access_flags &
4270 (IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE |
4271 IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND);
4272}
4273
4274
4275
4276
4277
4278
4279
4280
4281
4282
4283
4284
4285
4286int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
4287 struct ib_mr_status *mr_status);
4288
4289
4290
4291
4292
4293
4294
4295
4296
4297
4298
4299
4300
4301
4302static inline bool ib_device_try_get(struct ib_device *dev)
4303{
4304 return refcount_inc_not_zero(&dev->refcount);
4305}
4306
4307void ib_device_put(struct ib_device *device);
4308struct ib_device *ib_device_get_by_netdev(struct net_device *ndev,
4309 enum rdma_driver_id driver_id);
4310struct ib_device *ib_device_get_by_name(const char *name,
4311 enum rdma_driver_id driver_id);
4312struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, u8 port,
4313 u16 pkey, const union ib_gid *gid,
4314 const struct sockaddr *addr);
4315int ib_device_set_netdev(struct ib_device *ib_dev, struct net_device *ndev,
4316 unsigned int port);
4317struct net_device *ib_device_netdev(struct ib_device *dev, u8 port);
4318
4319struct ib_wq *ib_create_wq(struct ib_pd *pd,
4320 struct ib_wq_init_attr *init_attr);
4321int ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata);
4322int ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *attr,
4323 u32 wq_attr_mask);
4324struct ib_rwq_ind_table *ib_create_rwq_ind_table(struct ib_device *device,
4325 struct ib_rwq_ind_table_init_attr*
4326 wq_ind_table_init_attr);
4327int ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table);
4328
4329int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
4330 unsigned int *sg_offset, unsigned int page_size);
4331int ib_map_mr_sg_pi(struct ib_mr *mr, struct scatterlist *data_sg,
4332 int data_sg_nents, unsigned int *data_sg_offset,
4333 struct scatterlist *meta_sg, int meta_sg_nents,
4334 unsigned int *meta_sg_offset, unsigned int page_size);
4335
4336static inline int
4337ib_map_mr_sg_zbva(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
4338 unsigned int *sg_offset, unsigned int page_size)
4339{
4340 int n;
4341
4342 n = ib_map_mr_sg(mr, sg, sg_nents, sg_offset, page_size);
4343 mr->iova = 0;
4344
4345 return n;
4346}
4347
4348int ib_sg_to_pages(struct ib_mr *mr, struct scatterlist *sgl, int sg_nents,
4349 unsigned int *sg_offset, int (*set_page)(struct ib_mr *, u64));
4350
4351void ib_drain_rq(struct ib_qp *qp);
4352void ib_drain_sq(struct ib_qp *qp);
4353void ib_drain_qp(struct ib_qp *qp);
4354
4355int ib_get_eth_speed(struct ib_device *dev, u8 port_num, u8 *speed, u8 *width);
4356
4357static inline u8 *rdma_ah_retrieve_dmac(struct rdma_ah_attr *attr)
4358{
4359 if (attr->type == RDMA_AH_ATTR_TYPE_ROCE)
4360 return attr->roce.dmac;
4361 return NULL;
4362}
4363
4364static inline void rdma_ah_set_dlid(struct rdma_ah_attr *attr, u32 dlid)
4365{
4366 if (attr->type == RDMA_AH_ATTR_TYPE_IB)
4367 attr->ib.dlid = (u16)dlid;
4368 else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4369 attr->opa.dlid = dlid;
4370}
4371
4372static inline u32 rdma_ah_get_dlid(const struct rdma_ah_attr *attr)
4373{
4374 if (attr->type == RDMA_AH_ATTR_TYPE_IB)
4375 return attr->ib.dlid;
4376 else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4377 return attr->opa.dlid;
4378 return 0;
4379}
4380
4381static inline void rdma_ah_set_sl(struct rdma_ah_attr *attr, u8 sl)
4382{
4383 attr->sl = sl;
4384}
4385
4386static inline u8 rdma_ah_get_sl(const struct rdma_ah_attr *attr)
4387{
4388 return attr->sl;
4389}
4390
4391static inline void rdma_ah_set_path_bits(struct rdma_ah_attr *attr,
4392 u8 src_path_bits)
4393{
4394 if (attr->type == RDMA_AH_ATTR_TYPE_IB)
4395 attr->ib.src_path_bits = src_path_bits;
4396 else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4397 attr->opa.src_path_bits = src_path_bits;
4398}
4399
4400static inline u8 rdma_ah_get_path_bits(const struct rdma_ah_attr *attr)
4401{
4402 if (attr->type == RDMA_AH_ATTR_TYPE_IB)
4403 return attr->ib.src_path_bits;
4404 else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4405 return attr->opa.src_path_bits;
4406 return 0;
4407}
4408
4409static inline void rdma_ah_set_make_grd(struct rdma_ah_attr *attr,
4410 bool make_grd)
4411{
4412 if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4413 attr->opa.make_grd = make_grd;
4414}
4415
4416static inline bool rdma_ah_get_make_grd(const struct rdma_ah_attr *attr)
4417{
4418 if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4419 return attr->opa.make_grd;
4420 return false;
4421}
4422
4423static inline void rdma_ah_set_port_num(struct rdma_ah_attr *attr, u8 port_num)
4424{
4425 attr->port_num = port_num;
4426}
4427
4428static inline u8 rdma_ah_get_port_num(const struct rdma_ah_attr *attr)
4429{
4430 return attr->port_num;
4431}
4432
4433static inline void rdma_ah_set_static_rate(struct rdma_ah_attr *attr,
4434 u8 static_rate)
4435{
4436 attr->static_rate = static_rate;
4437}
4438
4439static inline u8 rdma_ah_get_static_rate(const struct rdma_ah_attr *attr)
4440{
4441 return attr->static_rate;
4442}
4443
4444static inline void rdma_ah_set_ah_flags(struct rdma_ah_attr *attr,
4445 enum ib_ah_flags flag)
4446{
4447 attr->ah_flags = flag;
4448}
4449
4450static inline enum ib_ah_flags
4451 rdma_ah_get_ah_flags(const struct rdma_ah_attr *attr)
4452{
4453 return attr->ah_flags;
4454}
4455
4456static inline const struct ib_global_route
4457 *rdma_ah_read_grh(const struct rdma_ah_attr *attr)
4458{
4459 return &attr->grh;
4460}
4461
4462
4463static inline struct ib_global_route
4464 *rdma_ah_retrieve_grh(struct rdma_ah_attr *attr)
4465{
4466 return &attr->grh;
4467}
4468
4469static inline void rdma_ah_set_dgid_raw(struct rdma_ah_attr *attr, void *dgid)
4470{
4471 struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
4472
4473 memcpy(grh->dgid.raw, dgid, sizeof(grh->dgid));
4474}
4475
4476static inline void rdma_ah_set_subnet_prefix(struct rdma_ah_attr *attr,
4477 __be64 prefix)
4478{
4479 struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
4480
4481 grh->dgid.global.subnet_prefix = prefix;
4482}
4483
4484static inline void rdma_ah_set_interface_id(struct rdma_ah_attr *attr,
4485 __be64 if_id)
4486{
4487 struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
4488
4489 grh->dgid.global.interface_id = if_id;
4490}
4491
4492static inline void rdma_ah_set_grh(struct rdma_ah_attr *attr,
4493 union ib_gid *dgid, u32 flow_label,
4494 u8 sgid_index, u8 hop_limit,
4495 u8 traffic_class)
4496{
4497 struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
4498
4499 attr->ah_flags = IB_AH_GRH;
4500 if (dgid)
4501 grh->dgid = *dgid;
4502 grh->flow_label = flow_label;
4503 grh->sgid_index = sgid_index;
4504 grh->hop_limit = hop_limit;
4505 grh->traffic_class = traffic_class;
4506 grh->sgid_attr = NULL;
4507}
4508
4509void rdma_destroy_ah_attr(struct rdma_ah_attr *ah_attr);
4510void rdma_move_grh_sgid_attr(struct rdma_ah_attr *attr, union ib_gid *dgid,
4511 u32 flow_label, u8 hop_limit, u8 traffic_class,
4512 const struct ib_gid_attr *sgid_attr);
4513void rdma_copy_ah_attr(struct rdma_ah_attr *dest,
4514 const struct rdma_ah_attr *src);
4515void rdma_replace_ah_attr(struct rdma_ah_attr *old,
4516 const struct rdma_ah_attr *new);
4517void rdma_move_ah_attr(struct rdma_ah_attr *dest, struct rdma_ah_attr *src);
4518
4519
4520
4521
4522
4523
4524
4525static inline enum rdma_ah_attr_type rdma_ah_find_type(struct ib_device *dev,
4526 u8 port_num)
4527{
4528 if (rdma_protocol_roce(dev, port_num))
4529 return RDMA_AH_ATTR_TYPE_ROCE;
4530 if (rdma_protocol_ib(dev, port_num)) {
4531 if (rdma_cap_opa_ah(dev, port_num))
4532 return RDMA_AH_ATTR_TYPE_OPA;
4533 return RDMA_AH_ATTR_TYPE_IB;
4534 }
4535
4536 return RDMA_AH_ATTR_TYPE_UNDEFINED;
4537}
4538
4539
4540
4541
4542
4543
4544
4545
4546
4547
4548static inline u16 ib_lid_cpu16(u32 lid)
4549{
4550 WARN_ON_ONCE(lid & 0xFFFF0000);
4551 return (u16)lid;
4552}
4553
4554
4555
4556
4557
4558
4559static inline __be16 ib_lid_be16(u32 lid)
4560{
4561 WARN_ON_ONCE(lid & 0xFFFF0000);
4562 return cpu_to_be16((u16)lid);
4563}
4564
4565
4566
4567
4568
4569
4570
4571
4572
4573
4574
4575static inline const struct cpumask *
4576ib_get_vector_affinity(struct ib_device *device, int comp_vector)
4577{
4578 if (comp_vector < 0 || comp_vector >= device->num_comp_vectors ||
4579 !device->ops.get_vector_affinity)
4580 return NULL;
4581
4582 return device->ops.get_vector_affinity(device, comp_vector);
4583
4584}
4585
4586
4587
4588
4589
4590
4591
4592void rdma_roce_rescan_device(struct ib_device *ibdev);
4593
4594struct ib_ucontext *ib_uverbs_get_ucontext_file(struct ib_uverbs_file *ufile);
4595
4596int uverbs_destroy_def_handler(struct uverbs_attr_bundle *attrs);
4597
4598struct net_device *rdma_alloc_netdev(struct ib_device *device, u8 port_num,
4599 enum rdma_netdev_t type, const char *name,
4600 unsigned char name_assign_type,
4601 void (*setup)(struct net_device *));
4602
4603int rdma_init_netdev(struct ib_device *device, u8 port_num,
4604 enum rdma_netdev_t type, const char *name,
4605 unsigned char name_assign_type,
4606 void (*setup)(struct net_device *),
4607 struct net_device *netdev);
4608
4609
4610
4611
4612
4613
4614
4615
4616
4617
4618
4619
4620
4621
4622
4623
4624static inline void
4625rdma_set_device_sysfs_group(struct ib_device *dev,
4626 const struct attribute_group *group)
4627{
4628 dev->groups[1] = group;
4629}
4630
4631
4632
4633
4634
4635
4636
4637
4638
4639static inline struct ib_device *rdma_device_to_ibdev(struct device *device)
4640{
4641 struct ib_core_device *coredev =
4642 container_of(device, struct ib_core_device, dev);
4643
4644 return coredev->owner;
4645}
4646
4647
4648
4649
4650
4651
4652
4653
4654
4655#define rdma_device_to_drv_device(dev, drv_dev_struct, ibdev_member) \
4656 container_of(rdma_device_to_ibdev(dev), drv_dev_struct, ibdev_member)
4657
4658bool rdma_dev_access_netns(const struct ib_device *device,
4659 const struct net *net);
4660#endif
4661