1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39#if !defined(IB_VERBS_H)
40#define IB_VERBS_H
41
42#include <linux/types.h>
43#include <linux/device.h>
44#include <linux/dma-mapping.h>
45#include <linux/kref.h>
46#include <linux/list.h>
47#include <linux/rwsem.h>
48#include <linux/workqueue.h>
49#include <linux/irq_poll.h>
50#include <uapi/linux/if_ether.h>
51#include <net/ipv6.h>
52#include <net/ip.h>
53#include <linux/string.h>
54#include <linux/slab.h>
55#include <linux/netdevice.h>
56#include <linux/refcount.h>
57#include <linux/if_link.h>
58#include <linux/atomic.h>
59#include <linux/mmu_notifier.h>
60#include <linux/uaccess.h>
61#include <linux/cgroup_rdma.h>
62#include <linux/irqflags.h>
63#include <linux/preempt.h>
64#include <linux/dim.h>
65#include <uapi/rdma/ib_user_verbs.h>
66#include <rdma/rdma_counter.h>
67#include <rdma/restrack.h>
68#include <rdma/signature.h>
69#include <uapi/rdma/rdma_user_ioctl.h>
70#include <uapi/rdma/ib_user_ioctl_verbs.h>
71
72#define IB_FW_VERSION_NAME_MAX ETHTOOL_FWVERS_LEN
73
74struct ib_umem_odp;
75
76extern struct workqueue_struct *ib_wq;
77extern struct workqueue_struct *ib_comp_wq;
78extern struct workqueue_struct *ib_comp_unbound_wq;
79
80__printf(3, 4) __cold
81void ibdev_printk(const char *level, const struct ib_device *ibdev,
82 const char *format, ...);
83__printf(2, 3) __cold
84void ibdev_emerg(const struct ib_device *ibdev, const char *format, ...);
85__printf(2, 3) __cold
86void ibdev_alert(const struct ib_device *ibdev, const char *format, ...);
87__printf(2, 3) __cold
88void ibdev_crit(const struct ib_device *ibdev, const char *format, ...);
89__printf(2, 3) __cold
90void ibdev_err(const struct ib_device *ibdev, const char *format, ...);
91__printf(2, 3) __cold
92void ibdev_warn(const struct ib_device *ibdev, const char *format, ...);
93__printf(2, 3) __cold
94void ibdev_notice(const struct ib_device *ibdev, const char *format, ...);
95__printf(2, 3) __cold
96void ibdev_info(const struct ib_device *ibdev, const char *format, ...);
97
98#if defined(CONFIG_DYNAMIC_DEBUG)
99#define ibdev_dbg(__dev, format, args...) \
100 dynamic_ibdev_dbg(__dev, format, ##args)
101#elif defined(DEBUG)
102#define ibdev_dbg(__dev, format, args...) \
103 ibdev_printk(KERN_DEBUG, __dev, format, ##args)
104#else
105__printf(2, 3) __cold
106static inline
107void ibdev_dbg(const struct ib_device *ibdev, const char *format, ...) {}
108#endif
109
110union ib_gid {
111 u8 raw[16];
112 struct {
113 __be64 subnet_prefix;
114 __be64 interface_id;
115 } global;
116};
117
118extern union ib_gid zgid;
119
120enum ib_gid_type {
121
122 IB_GID_TYPE_IB = 0,
123 IB_GID_TYPE_ROCE = 0,
124 IB_GID_TYPE_ROCE_UDP_ENCAP = 1,
125 IB_GID_TYPE_SIZE
126};
127
128#define ROCE_V2_UDP_DPORT 4791
129struct ib_gid_attr {
130 struct net_device __rcu *ndev;
131 struct ib_device *device;
132 union ib_gid gid;
133 enum ib_gid_type gid_type;
134 u16 index;
135 u8 port_num;
136};
137
138enum {
139
140 IB_SA_WELL_KNOWN_GUID = BIT_ULL(57) | 2,
141};
142
143enum rdma_transport_type {
144 RDMA_TRANSPORT_IB,
145 RDMA_TRANSPORT_IWARP,
146 RDMA_TRANSPORT_USNIC,
147 RDMA_TRANSPORT_USNIC_UDP,
148 RDMA_TRANSPORT_UNSPECIFIED,
149};
150
151enum rdma_protocol_type {
152 RDMA_PROTOCOL_IB,
153 RDMA_PROTOCOL_IBOE,
154 RDMA_PROTOCOL_IWARP,
155 RDMA_PROTOCOL_USNIC_UDP
156};
157
158__attribute_const__ enum rdma_transport_type
159rdma_node_get_transport(unsigned int node_type);
160
161enum rdma_network_type {
162 RDMA_NETWORK_IB,
163 RDMA_NETWORK_ROCE_V1 = RDMA_NETWORK_IB,
164 RDMA_NETWORK_IPV4,
165 RDMA_NETWORK_IPV6
166};
167
168static inline enum ib_gid_type ib_network_to_gid_type(enum rdma_network_type network_type)
169{
170 if (network_type == RDMA_NETWORK_IPV4 ||
171 network_type == RDMA_NETWORK_IPV6)
172 return IB_GID_TYPE_ROCE_UDP_ENCAP;
173
174
175 return IB_GID_TYPE_IB;
176}
177
178static inline enum rdma_network_type
179rdma_gid_attr_network_type(const struct ib_gid_attr *attr)
180{
181 if (attr->gid_type == IB_GID_TYPE_IB)
182 return RDMA_NETWORK_IB;
183
184 if (ipv6_addr_v4mapped((struct in6_addr *)&attr->gid))
185 return RDMA_NETWORK_IPV4;
186 else
187 return RDMA_NETWORK_IPV6;
188}
189
190enum rdma_link_layer {
191 IB_LINK_LAYER_UNSPECIFIED,
192 IB_LINK_LAYER_INFINIBAND,
193 IB_LINK_LAYER_ETHERNET,
194};
195
196enum ib_device_cap_flags {
197 IB_DEVICE_RESIZE_MAX_WR = (1 << 0),
198 IB_DEVICE_BAD_PKEY_CNTR = (1 << 1),
199 IB_DEVICE_BAD_QKEY_CNTR = (1 << 2),
200 IB_DEVICE_RAW_MULTI = (1 << 3),
201 IB_DEVICE_AUTO_PATH_MIG = (1 << 4),
202 IB_DEVICE_CHANGE_PHY_PORT = (1 << 5),
203 IB_DEVICE_UD_AV_PORT_ENFORCE = (1 << 6),
204 IB_DEVICE_CURR_QP_STATE_MOD = (1 << 7),
205 IB_DEVICE_SHUTDOWN_PORT = (1 << 8),
206
207 IB_DEVICE_PORT_ACTIVE_EVENT = (1 << 10),
208 IB_DEVICE_SYS_IMAGE_GUID = (1 << 11),
209 IB_DEVICE_RC_RNR_NAK_GEN = (1 << 12),
210 IB_DEVICE_SRQ_RESIZE = (1 << 13),
211 IB_DEVICE_N_NOTIFY_CQ = (1 << 14),
212
213
214
215
216
217
218
219
220 IB_DEVICE_LOCAL_DMA_LKEY = (1 << 15),
221
222 IB_DEVICE_MEM_WINDOW = (1 << 17),
223
224
225
226
227
228
229
230 IB_DEVICE_UD_IP_CSUM = (1 << 18),
231 IB_DEVICE_UD_TSO = (1 << 19),
232 IB_DEVICE_XRC = (1 << 20),
233
234
235
236
237
238
239
240
241
242
243 IB_DEVICE_MEM_MGT_EXTENSIONS = (1 << 21),
244 IB_DEVICE_BLOCK_MULTICAST_LOOPBACK = (1 << 22),
245 IB_DEVICE_MEM_WINDOW_TYPE_2A = (1 << 23),
246 IB_DEVICE_MEM_WINDOW_TYPE_2B = (1 << 24),
247 IB_DEVICE_RC_IP_CSUM = (1 << 25),
248
249 IB_DEVICE_RAW_IP_CSUM = (1 << 26),
250
251
252
253
254
255
256 IB_DEVICE_CROSS_CHANNEL = (1 << 27),
257 IB_DEVICE_MANAGED_FLOW_STEERING = (1 << 29),
258 IB_DEVICE_INTEGRITY_HANDOVER = (1 << 30),
259 IB_DEVICE_ON_DEMAND_PAGING = (1ULL << 31),
260 IB_DEVICE_SG_GAPS_REG = (1ULL << 32),
261 IB_DEVICE_VIRTUAL_FUNCTION = (1ULL << 33),
262
263 IB_DEVICE_RAW_SCATTER_FCS = (1ULL << 34),
264 IB_DEVICE_RDMA_NETDEV_OPA_VNIC = (1ULL << 35),
265
266 IB_DEVICE_PCI_WRITE_END_PADDING = (1ULL << 36),
267 IB_DEVICE_ALLOW_USER_UNREG = (1ULL << 37),
268};
269
270enum ib_atomic_cap {
271 IB_ATOMIC_NONE,
272 IB_ATOMIC_HCA,
273 IB_ATOMIC_GLOB
274};
275
276enum ib_odp_general_cap_bits {
277 IB_ODP_SUPPORT = 1 << 0,
278 IB_ODP_SUPPORT_IMPLICIT = 1 << 1,
279};
280
281enum ib_odp_transport_cap_bits {
282 IB_ODP_SUPPORT_SEND = 1 << 0,
283 IB_ODP_SUPPORT_RECV = 1 << 1,
284 IB_ODP_SUPPORT_WRITE = 1 << 2,
285 IB_ODP_SUPPORT_READ = 1 << 3,
286 IB_ODP_SUPPORT_ATOMIC = 1 << 4,
287 IB_ODP_SUPPORT_SRQ_RECV = 1 << 5,
288};
289
290struct ib_odp_caps {
291 uint64_t general_caps;
292 struct {
293 uint32_t rc_odp_caps;
294 uint32_t uc_odp_caps;
295 uint32_t ud_odp_caps;
296 uint32_t xrc_odp_caps;
297 } per_transport_caps;
298};
299
300struct ib_rss_caps {
301
302
303
304
305 u32 supported_qpts;
306 u32 max_rwq_indirection_tables;
307 u32 max_rwq_indirection_table_size;
308};
309
310enum ib_tm_cap_flags {
311
312 IB_TM_CAP_RNDV_RC = 1 << 0,
313};
314
315struct ib_tm_caps {
316
317 u32 max_rndv_hdr_size;
318
319 u32 max_num_tags;
320
321 u32 flags;
322
323 u32 max_ops;
324
325 u32 max_sge;
326};
327
328struct ib_cq_init_attr {
329 unsigned int cqe;
330 int comp_vector;
331 u32 flags;
332};
333
334enum ib_cq_attr_mask {
335 IB_CQ_MODERATE = 1 << 0,
336};
337
338struct ib_cq_caps {
339 u16 max_cq_moderation_count;
340 u16 max_cq_moderation_period;
341};
342
343struct ib_dm_mr_attr {
344 u64 length;
345 u64 offset;
346 u32 access_flags;
347};
348
349struct ib_dm_alloc_attr {
350 u64 length;
351 u32 alignment;
352 u32 flags;
353};
354
355struct ib_device_attr {
356 u64 fw_ver;
357 __be64 sys_image_guid;
358 u64 max_mr_size;
359 u64 page_size_cap;
360 u32 vendor_id;
361 u32 vendor_part_id;
362 u32 hw_ver;
363 int max_qp;
364 int max_qp_wr;
365 u64 device_cap_flags;
366 int max_send_sge;
367 int max_recv_sge;
368 int max_sge_rd;
369 int max_cq;
370 int max_cqe;
371 int max_mr;
372 int max_pd;
373 int max_qp_rd_atom;
374 int max_ee_rd_atom;
375 int max_res_rd_atom;
376 int max_qp_init_rd_atom;
377 int max_ee_init_rd_atom;
378 enum ib_atomic_cap atomic_cap;
379 enum ib_atomic_cap masked_atomic_cap;
380 int max_ee;
381 int max_rdd;
382 int max_mw;
383 int max_raw_ipv6_qp;
384 int max_raw_ethy_qp;
385 int max_mcast_grp;
386 int max_mcast_qp_attach;
387 int max_total_mcast_qp_attach;
388 int max_ah;
389 int max_fmr;
390 int max_map_per_fmr;
391 int max_srq;
392 int max_srq_wr;
393 int max_srq_sge;
394 unsigned int max_fast_reg_page_list_len;
395 unsigned int max_pi_fast_reg_page_list_len;
396 u16 max_pkeys;
397 u8 local_ca_ack_delay;
398 int sig_prot_cap;
399 int sig_guard_cap;
400 struct ib_odp_caps odp_caps;
401 uint64_t timestamp_mask;
402 uint64_t hca_core_clock;
403 struct ib_rss_caps rss_caps;
404 u32 max_wq_type_rq;
405 u32 raw_packet_caps;
406 struct ib_tm_caps tm_caps;
407 struct ib_cq_caps cq_caps;
408 u64 max_dm_size;
409};
410
411enum ib_mtu {
412 IB_MTU_256 = 1,
413 IB_MTU_512 = 2,
414 IB_MTU_1024 = 3,
415 IB_MTU_2048 = 4,
416 IB_MTU_4096 = 5
417};
418
419static inline int ib_mtu_enum_to_int(enum ib_mtu mtu)
420{
421 switch (mtu) {
422 case IB_MTU_256: return 256;
423 case IB_MTU_512: return 512;
424 case IB_MTU_1024: return 1024;
425 case IB_MTU_2048: return 2048;
426 case IB_MTU_4096: return 4096;
427 default: return -1;
428 }
429}
430
431static inline enum ib_mtu ib_mtu_int_to_enum(int mtu)
432{
433 if (mtu >= 4096)
434 return IB_MTU_4096;
435 else if (mtu >= 2048)
436 return IB_MTU_2048;
437 else if (mtu >= 1024)
438 return IB_MTU_1024;
439 else if (mtu >= 512)
440 return IB_MTU_512;
441 else
442 return IB_MTU_256;
443}
444
445enum ib_port_state {
446 IB_PORT_NOP = 0,
447 IB_PORT_DOWN = 1,
448 IB_PORT_INIT = 2,
449 IB_PORT_ARMED = 3,
450 IB_PORT_ACTIVE = 4,
451 IB_PORT_ACTIVE_DEFER = 5
452};
453
454enum ib_port_width {
455 IB_WIDTH_1X = 1,
456 IB_WIDTH_2X = 16,
457 IB_WIDTH_4X = 2,
458 IB_WIDTH_8X = 4,
459 IB_WIDTH_12X = 8
460};
461
462static inline int ib_width_enum_to_int(enum ib_port_width width)
463{
464 switch (width) {
465 case IB_WIDTH_1X: return 1;
466 case IB_WIDTH_2X: return 2;
467 case IB_WIDTH_4X: return 4;
468 case IB_WIDTH_8X: return 8;
469 case IB_WIDTH_12X: return 12;
470 default: return -1;
471 }
472}
473
474enum ib_port_speed {
475 IB_SPEED_SDR = 1,
476 IB_SPEED_DDR = 2,
477 IB_SPEED_QDR = 4,
478 IB_SPEED_FDR10 = 8,
479 IB_SPEED_FDR = 16,
480 IB_SPEED_EDR = 32,
481 IB_SPEED_HDR = 64
482};
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503struct rdma_hw_stats {
504 struct mutex lock;
505 unsigned long timestamp;
506 unsigned long lifespan;
507 const char * const *names;
508 int num_counters;
509 u64 value[];
510};
511
512#define RDMA_HW_STATS_DEFAULT_LIFESPAN 10
513
514
515
516
517
518
519
520static inline struct rdma_hw_stats *rdma_alloc_hw_stats_struct(
521 const char * const *names, int num_counters,
522 unsigned long lifespan)
523{
524 struct rdma_hw_stats *stats;
525
526 stats = kzalloc(sizeof(*stats) + num_counters * sizeof(u64),
527 GFP_KERNEL);
528 if (!stats)
529 return NULL;
530 stats->names = names;
531 stats->num_counters = num_counters;
532 stats->lifespan = msecs_to_jiffies(lifespan);
533
534 return stats;
535}
536
537
538
539
540
541
542#define RDMA_CORE_CAP_IB_MAD 0x00000001
543#define RDMA_CORE_CAP_IB_SMI 0x00000002
544#define RDMA_CORE_CAP_IB_CM 0x00000004
545#define RDMA_CORE_CAP_IW_CM 0x00000008
546#define RDMA_CORE_CAP_IB_SA 0x00000010
547#define RDMA_CORE_CAP_OPA_MAD 0x00000020
548
549
550#define RDMA_CORE_CAP_AF_IB 0x00001000
551#define RDMA_CORE_CAP_ETH_AH 0x00002000
552#define RDMA_CORE_CAP_OPA_AH 0x00004000
553#define RDMA_CORE_CAP_IB_GRH_REQUIRED 0x00008000
554
555
556#define RDMA_CORE_CAP_PROT_IB 0x00100000
557#define RDMA_CORE_CAP_PROT_ROCE 0x00200000
558#define RDMA_CORE_CAP_PROT_IWARP 0x00400000
559#define RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP 0x00800000
560#define RDMA_CORE_CAP_PROT_RAW_PACKET 0x01000000
561#define RDMA_CORE_CAP_PROT_USNIC 0x02000000
562
563#define RDMA_CORE_PORT_IB_GRH_REQUIRED (RDMA_CORE_CAP_IB_GRH_REQUIRED \
564 | RDMA_CORE_CAP_PROT_ROCE \
565 | RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP)
566
567#define RDMA_CORE_PORT_IBA_IB (RDMA_CORE_CAP_PROT_IB \
568 | RDMA_CORE_CAP_IB_MAD \
569 | RDMA_CORE_CAP_IB_SMI \
570 | RDMA_CORE_CAP_IB_CM \
571 | RDMA_CORE_CAP_IB_SA \
572 | RDMA_CORE_CAP_AF_IB)
573#define RDMA_CORE_PORT_IBA_ROCE (RDMA_CORE_CAP_PROT_ROCE \
574 | RDMA_CORE_CAP_IB_MAD \
575 | RDMA_CORE_CAP_IB_CM \
576 | RDMA_CORE_CAP_AF_IB \
577 | RDMA_CORE_CAP_ETH_AH)
578#define RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP \
579 (RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP \
580 | RDMA_CORE_CAP_IB_MAD \
581 | RDMA_CORE_CAP_IB_CM \
582 | RDMA_CORE_CAP_AF_IB \
583 | RDMA_CORE_CAP_ETH_AH)
584#define RDMA_CORE_PORT_IWARP (RDMA_CORE_CAP_PROT_IWARP \
585 | RDMA_CORE_CAP_IW_CM)
586#define RDMA_CORE_PORT_INTEL_OPA (RDMA_CORE_PORT_IBA_IB \
587 | RDMA_CORE_CAP_OPA_MAD)
588
589#define RDMA_CORE_PORT_RAW_PACKET (RDMA_CORE_CAP_PROT_RAW_PACKET)
590
591#define RDMA_CORE_PORT_USNIC (RDMA_CORE_CAP_PROT_USNIC)
592
593struct ib_port_attr {
594 u64 subnet_prefix;
595 enum ib_port_state state;
596 enum ib_mtu max_mtu;
597 enum ib_mtu active_mtu;
598 int gid_tbl_len;
599 unsigned int ip_gids:1;
600
601 u32 port_cap_flags;
602 u32 max_msg_sz;
603 u32 bad_pkey_cntr;
604 u32 qkey_viol_cntr;
605 u16 pkey_tbl_len;
606 u32 sm_lid;
607 u32 lid;
608 u8 lmc;
609 u8 max_vl_num;
610 u8 sm_sl;
611 u8 subnet_timeout;
612 u8 init_type_reply;
613 u8 active_width;
614 u8 active_speed;
615 u8 phys_state;
616 u16 port_cap_flags2;
617};
618
619enum ib_device_modify_flags {
620 IB_DEVICE_MODIFY_SYS_IMAGE_GUID = 1 << 0,
621 IB_DEVICE_MODIFY_NODE_DESC = 1 << 1
622};
623
624#define IB_DEVICE_NODE_DESC_MAX 64
625
626struct ib_device_modify {
627 u64 sys_image_guid;
628 char node_desc[IB_DEVICE_NODE_DESC_MAX];
629};
630
631enum ib_port_modify_flags {
632 IB_PORT_SHUTDOWN = 1,
633 IB_PORT_INIT_TYPE = (1<<2),
634 IB_PORT_RESET_QKEY_CNTR = (1<<3),
635 IB_PORT_OPA_MASK_CHG = (1<<4)
636};
637
638struct ib_port_modify {
639 u32 set_port_cap_mask;
640 u32 clr_port_cap_mask;
641 u8 init_type;
642};
643
644enum ib_event_type {
645 IB_EVENT_CQ_ERR,
646 IB_EVENT_QP_FATAL,
647 IB_EVENT_QP_REQ_ERR,
648 IB_EVENT_QP_ACCESS_ERR,
649 IB_EVENT_COMM_EST,
650 IB_EVENT_SQ_DRAINED,
651 IB_EVENT_PATH_MIG,
652 IB_EVENT_PATH_MIG_ERR,
653 IB_EVENT_DEVICE_FATAL,
654 IB_EVENT_PORT_ACTIVE,
655 IB_EVENT_PORT_ERR,
656 IB_EVENT_LID_CHANGE,
657 IB_EVENT_PKEY_CHANGE,
658 IB_EVENT_SM_CHANGE,
659 IB_EVENT_SRQ_ERR,
660 IB_EVENT_SRQ_LIMIT_REACHED,
661 IB_EVENT_QP_LAST_WQE_REACHED,
662 IB_EVENT_CLIENT_REREGISTER,
663 IB_EVENT_GID_CHANGE,
664 IB_EVENT_WQ_FATAL,
665};
666
667const char *__attribute_const__ ib_event_msg(enum ib_event_type event);
668
669struct ib_event {
670 struct ib_device *device;
671 union {
672 struct ib_cq *cq;
673 struct ib_qp *qp;
674 struct ib_srq *srq;
675 struct ib_wq *wq;
676 u8 port_num;
677 } element;
678 enum ib_event_type event;
679};
680
681struct ib_event_handler {
682 struct ib_device *device;
683 void (*handler)(struct ib_event_handler *, struct ib_event *);
684 struct list_head list;
685};
686
687#define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler) \
688 do { \
689 (_ptr)->device = _device; \
690 (_ptr)->handler = _handler; \
691 INIT_LIST_HEAD(&(_ptr)->list); \
692 } while (0)
693
694struct ib_global_route {
695 const struct ib_gid_attr *sgid_attr;
696 union ib_gid dgid;
697 u32 flow_label;
698 u8 sgid_index;
699 u8 hop_limit;
700 u8 traffic_class;
701};
702
703struct ib_grh {
704 __be32 version_tclass_flow;
705 __be16 paylen;
706 u8 next_hdr;
707 u8 hop_limit;
708 union ib_gid sgid;
709 union ib_gid dgid;
710};
711
712union rdma_network_hdr {
713 struct ib_grh ibgrh;
714 struct {
715
716
717
718 u8 reserved[20];
719 struct iphdr roce4grh;
720 };
721};
722
723#define IB_QPN_MASK 0xFFFFFF
724
725enum {
726 IB_MULTICAST_QPN = 0xffffff
727};
728
729#define IB_LID_PERMISSIVE cpu_to_be16(0xFFFF)
730#define IB_MULTICAST_LID_BASE cpu_to_be16(0xC000)
731
732enum ib_ah_flags {
733 IB_AH_GRH = 1
734};
735
736enum ib_rate {
737 IB_RATE_PORT_CURRENT = 0,
738 IB_RATE_2_5_GBPS = 2,
739 IB_RATE_5_GBPS = 5,
740 IB_RATE_10_GBPS = 3,
741 IB_RATE_20_GBPS = 6,
742 IB_RATE_30_GBPS = 4,
743 IB_RATE_40_GBPS = 7,
744 IB_RATE_60_GBPS = 8,
745 IB_RATE_80_GBPS = 9,
746 IB_RATE_120_GBPS = 10,
747 IB_RATE_14_GBPS = 11,
748 IB_RATE_56_GBPS = 12,
749 IB_RATE_112_GBPS = 13,
750 IB_RATE_168_GBPS = 14,
751 IB_RATE_25_GBPS = 15,
752 IB_RATE_100_GBPS = 16,
753 IB_RATE_200_GBPS = 17,
754 IB_RATE_300_GBPS = 18,
755 IB_RATE_28_GBPS = 19,
756 IB_RATE_50_GBPS = 20,
757 IB_RATE_400_GBPS = 21,
758 IB_RATE_600_GBPS = 22,
759};
760
761
762
763
764
765
766
767__attribute_const__ int ib_rate_to_mult(enum ib_rate rate);
768
769
770
771
772
773
774__attribute_const__ int ib_rate_to_mbps(enum ib_rate rate);
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794enum ib_mr_type {
795 IB_MR_TYPE_MEM_REG,
796 IB_MR_TYPE_SG_GAPS,
797 IB_MR_TYPE_DM,
798 IB_MR_TYPE_USER,
799 IB_MR_TYPE_DMA,
800 IB_MR_TYPE_INTEGRITY,
801};
802
803enum ib_mr_status_check {
804 IB_MR_CHECK_SIG_STATUS = 1,
805};
806
807
808
809
810
811
812
813
814
815struct ib_mr_status {
816 u32 fail_status;
817 struct ib_sig_err sig_err;
818};
819
820
821
822
823
824
825__attribute_const__ enum ib_rate mult_to_ib_rate(int mult);
826
827enum rdma_ah_attr_type {
828 RDMA_AH_ATTR_TYPE_UNDEFINED,
829 RDMA_AH_ATTR_TYPE_IB,
830 RDMA_AH_ATTR_TYPE_ROCE,
831 RDMA_AH_ATTR_TYPE_OPA,
832};
833
834struct ib_ah_attr {
835 u16 dlid;
836 u8 src_path_bits;
837};
838
839struct roce_ah_attr {
840 u8 dmac[ETH_ALEN];
841};
842
843struct opa_ah_attr {
844 u32 dlid;
845 u8 src_path_bits;
846 bool make_grd;
847};
848
849struct rdma_ah_attr {
850 struct ib_global_route grh;
851 u8 sl;
852 u8 static_rate;
853 u8 port_num;
854 u8 ah_flags;
855 enum rdma_ah_attr_type type;
856 union {
857 struct ib_ah_attr ib;
858 struct roce_ah_attr roce;
859 struct opa_ah_attr opa;
860 };
861};
862
863enum ib_wc_status {
864 IB_WC_SUCCESS,
865 IB_WC_LOC_LEN_ERR,
866 IB_WC_LOC_QP_OP_ERR,
867 IB_WC_LOC_EEC_OP_ERR,
868 IB_WC_LOC_PROT_ERR,
869 IB_WC_WR_FLUSH_ERR,
870 IB_WC_MW_BIND_ERR,
871 IB_WC_BAD_RESP_ERR,
872 IB_WC_LOC_ACCESS_ERR,
873 IB_WC_REM_INV_REQ_ERR,
874 IB_WC_REM_ACCESS_ERR,
875 IB_WC_REM_OP_ERR,
876 IB_WC_RETRY_EXC_ERR,
877 IB_WC_RNR_RETRY_EXC_ERR,
878 IB_WC_LOC_RDD_VIOL_ERR,
879 IB_WC_REM_INV_RD_REQ_ERR,
880 IB_WC_REM_ABORT_ERR,
881 IB_WC_INV_EECN_ERR,
882 IB_WC_INV_EEC_STATE_ERR,
883 IB_WC_FATAL_ERR,
884 IB_WC_RESP_TIMEOUT_ERR,
885 IB_WC_GENERAL_ERR
886};
887
888const char *__attribute_const__ ib_wc_status_msg(enum ib_wc_status status);
889
890enum ib_wc_opcode {
891 IB_WC_SEND,
892 IB_WC_RDMA_WRITE,
893 IB_WC_RDMA_READ,
894 IB_WC_COMP_SWAP,
895 IB_WC_FETCH_ADD,
896 IB_WC_LSO,
897 IB_WC_LOCAL_INV,
898 IB_WC_REG_MR,
899 IB_WC_MASKED_COMP_SWAP,
900 IB_WC_MASKED_FETCH_ADD,
901
902
903
904
905 IB_WC_RECV = 1 << 7,
906 IB_WC_RECV_RDMA_WITH_IMM
907};
908
909enum ib_wc_flags {
910 IB_WC_GRH = 1,
911 IB_WC_WITH_IMM = (1<<1),
912 IB_WC_WITH_INVALIDATE = (1<<2),
913 IB_WC_IP_CSUM_OK = (1<<3),
914 IB_WC_WITH_SMAC = (1<<4),
915 IB_WC_WITH_VLAN = (1<<5),
916 IB_WC_WITH_NETWORK_HDR_TYPE = (1<<6),
917};
918
919struct ib_wc {
920 union {
921 u64 wr_id;
922 struct ib_cqe *wr_cqe;
923 };
924 enum ib_wc_status status;
925 enum ib_wc_opcode opcode;
926 u32 vendor_err;
927 u32 byte_len;
928 struct ib_qp *qp;
929 union {
930 __be32 imm_data;
931 u32 invalidate_rkey;
932 } ex;
933 u32 src_qp;
934 u32 slid;
935 int wc_flags;
936 u16 pkey_index;
937 u8 sl;
938 u8 dlid_path_bits;
939 u8 port_num;
940 u8 smac[ETH_ALEN];
941 u16 vlan_id;
942 u8 network_hdr_type;
943};
944
945enum ib_cq_notify_flags {
946 IB_CQ_SOLICITED = 1 << 0,
947 IB_CQ_NEXT_COMP = 1 << 1,
948 IB_CQ_SOLICITED_MASK = IB_CQ_SOLICITED | IB_CQ_NEXT_COMP,
949 IB_CQ_REPORT_MISSED_EVENTS = 1 << 2,
950};
951
952enum ib_srq_type {
953 IB_SRQT_BASIC,
954 IB_SRQT_XRC,
955 IB_SRQT_TM,
956};
957
958static inline bool ib_srq_has_cq(enum ib_srq_type srq_type)
959{
960 return srq_type == IB_SRQT_XRC ||
961 srq_type == IB_SRQT_TM;
962}
963
964enum ib_srq_attr_mask {
965 IB_SRQ_MAX_WR = 1 << 0,
966 IB_SRQ_LIMIT = 1 << 1,
967};
968
969struct ib_srq_attr {
970 u32 max_wr;
971 u32 max_sge;
972 u32 srq_limit;
973};
974
975struct ib_srq_init_attr {
976 void (*event_handler)(struct ib_event *, void *);
977 void *srq_context;
978 struct ib_srq_attr attr;
979 enum ib_srq_type srq_type;
980
981 struct {
982 struct ib_cq *cq;
983 union {
984 struct {
985 struct ib_xrcd *xrcd;
986 } xrc;
987
988 struct {
989 u32 max_num_tags;
990 } tag_matching;
991 };
992 } ext;
993};
994
995struct ib_qp_cap {
996 u32 max_send_wr;
997 u32 max_recv_wr;
998 u32 max_send_sge;
999 u32 max_recv_sge;
1000 u32 max_inline_data;
1001
1002
1003
1004
1005
1006
1007 u32 max_rdma_ctxs;
1008};
1009
1010enum ib_sig_type {
1011 IB_SIGNAL_ALL_WR,
1012 IB_SIGNAL_REQ_WR
1013};
1014
1015enum ib_qp_type {
1016
1017
1018
1019
1020
1021 IB_QPT_SMI,
1022 IB_QPT_GSI,
1023
1024 IB_QPT_RC,
1025 IB_QPT_UC,
1026 IB_QPT_UD,
1027 IB_QPT_RAW_IPV6,
1028 IB_QPT_RAW_ETHERTYPE,
1029 IB_QPT_RAW_PACKET = 8,
1030 IB_QPT_XRC_INI = 9,
1031 IB_QPT_XRC_TGT,
1032 IB_QPT_MAX,
1033 IB_QPT_DRIVER = 0xFF,
1034
1035
1036
1037
1038 IB_QPT_RESERVED1 = 0x1000,
1039 IB_QPT_RESERVED2,
1040 IB_QPT_RESERVED3,
1041 IB_QPT_RESERVED4,
1042 IB_QPT_RESERVED5,
1043 IB_QPT_RESERVED6,
1044 IB_QPT_RESERVED7,
1045 IB_QPT_RESERVED8,
1046 IB_QPT_RESERVED9,
1047 IB_QPT_RESERVED10,
1048};
1049
1050enum ib_qp_create_flags {
1051 IB_QP_CREATE_IPOIB_UD_LSO = 1 << 0,
1052 IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK = 1 << 1,
1053 IB_QP_CREATE_CROSS_CHANNEL = 1 << 2,
1054 IB_QP_CREATE_MANAGED_SEND = 1 << 3,
1055 IB_QP_CREATE_MANAGED_RECV = 1 << 4,
1056 IB_QP_CREATE_NETIF_QP = 1 << 5,
1057 IB_QP_CREATE_INTEGRITY_EN = 1 << 6,
1058
1059 IB_QP_CREATE_SCATTER_FCS = 1 << 8,
1060 IB_QP_CREATE_CVLAN_STRIPPING = 1 << 9,
1061 IB_QP_CREATE_SOURCE_QPN = 1 << 10,
1062 IB_QP_CREATE_PCI_WRITE_END_PADDING = 1 << 11,
1063
1064 IB_QP_CREATE_RESERVED_START = 1 << 26,
1065 IB_QP_CREATE_RESERVED_END = 1 << 31,
1066};
1067
1068
1069
1070
1071
1072
1073struct ib_qp_init_attr {
1074
1075 void (*event_handler)(struct ib_event *, void *);
1076
1077 void *qp_context;
1078 struct ib_cq *send_cq;
1079 struct ib_cq *recv_cq;
1080 struct ib_srq *srq;
1081 struct ib_xrcd *xrcd;
1082 struct ib_qp_cap cap;
1083 enum ib_sig_type sq_sig_type;
1084 enum ib_qp_type qp_type;
1085 u32 create_flags;
1086
1087
1088
1089
1090 u8 port_num;
1091 struct ib_rwq_ind_table *rwq_ind_tbl;
1092 u32 source_qpn;
1093};
1094
1095struct ib_qp_open_attr {
1096 void (*event_handler)(struct ib_event *, void *);
1097 void *qp_context;
1098 u32 qp_num;
1099 enum ib_qp_type qp_type;
1100};
1101
1102enum ib_rnr_timeout {
1103 IB_RNR_TIMER_655_36 = 0,
1104 IB_RNR_TIMER_000_01 = 1,
1105 IB_RNR_TIMER_000_02 = 2,
1106 IB_RNR_TIMER_000_03 = 3,
1107 IB_RNR_TIMER_000_04 = 4,
1108 IB_RNR_TIMER_000_06 = 5,
1109 IB_RNR_TIMER_000_08 = 6,
1110 IB_RNR_TIMER_000_12 = 7,
1111 IB_RNR_TIMER_000_16 = 8,
1112 IB_RNR_TIMER_000_24 = 9,
1113 IB_RNR_TIMER_000_32 = 10,
1114 IB_RNR_TIMER_000_48 = 11,
1115 IB_RNR_TIMER_000_64 = 12,
1116 IB_RNR_TIMER_000_96 = 13,
1117 IB_RNR_TIMER_001_28 = 14,
1118 IB_RNR_TIMER_001_92 = 15,
1119 IB_RNR_TIMER_002_56 = 16,
1120 IB_RNR_TIMER_003_84 = 17,
1121 IB_RNR_TIMER_005_12 = 18,
1122 IB_RNR_TIMER_007_68 = 19,
1123 IB_RNR_TIMER_010_24 = 20,
1124 IB_RNR_TIMER_015_36 = 21,
1125 IB_RNR_TIMER_020_48 = 22,
1126 IB_RNR_TIMER_030_72 = 23,
1127 IB_RNR_TIMER_040_96 = 24,
1128 IB_RNR_TIMER_061_44 = 25,
1129 IB_RNR_TIMER_081_92 = 26,
1130 IB_RNR_TIMER_122_88 = 27,
1131 IB_RNR_TIMER_163_84 = 28,
1132 IB_RNR_TIMER_245_76 = 29,
1133 IB_RNR_TIMER_327_68 = 30,
1134 IB_RNR_TIMER_491_52 = 31
1135};
1136
1137enum ib_qp_attr_mask {
1138 IB_QP_STATE = 1,
1139 IB_QP_CUR_STATE = (1<<1),
1140 IB_QP_EN_SQD_ASYNC_NOTIFY = (1<<2),
1141 IB_QP_ACCESS_FLAGS = (1<<3),
1142 IB_QP_PKEY_INDEX = (1<<4),
1143 IB_QP_PORT = (1<<5),
1144 IB_QP_QKEY = (1<<6),
1145 IB_QP_AV = (1<<7),
1146 IB_QP_PATH_MTU = (1<<8),
1147 IB_QP_TIMEOUT = (1<<9),
1148 IB_QP_RETRY_CNT = (1<<10),
1149 IB_QP_RNR_RETRY = (1<<11),
1150 IB_QP_RQ_PSN = (1<<12),
1151 IB_QP_MAX_QP_RD_ATOMIC = (1<<13),
1152 IB_QP_ALT_PATH = (1<<14),
1153 IB_QP_MIN_RNR_TIMER = (1<<15),
1154 IB_QP_SQ_PSN = (1<<16),
1155 IB_QP_MAX_DEST_RD_ATOMIC = (1<<17),
1156 IB_QP_PATH_MIG_STATE = (1<<18),
1157 IB_QP_CAP = (1<<19),
1158 IB_QP_DEST_QPN = (1<<20),
1159 IB_QP_RESERVED1 = (1<<21),
1160 IB_QP_RESERVED2 = (1<<22),
1161 IB_QP_RESERVED3 = (1<<23),
1162 IB_QP_RESERVED4 = (1<<24),
1163 IB_QP_RATE_LIMIT = (1<<25),
1164};
1165
1166enum ib_qp_state {
1167 IB_QPS_RESET,
1168 IB_QPS_INIT,
1169 IB_QPS_RTR,
1170 IB_QPS_RTS,
1171 IB_QPS_SQD,
1172 IB_QPS_SQE,
1173 IB_QPS_ERR
1174};
1175
1176enum ib_mig_state {
1177 IB_MIG_MIGRATED,
1178 IB_MIG_REARM,
1179 IB_MIG_ARMED
1180};
1181
1182enum ib_mw_type {
1183 IB_MW_TYPE_1 = 1,
1184 IB_MW_TYPE_2 = 2
1185};
1186
1187struct ib_qp_attr {
1188 enum ib_qp_state qp_state;
1189 enum ib_qp_state cur_qp_state;
1190 enum ib_mtu path_mtu;
1191 enum ib_mig_state path_mig_state;
1192 u32 qkey;
1193 u32 rq_psn;
1194 u32 sq_psn;
1195 u32 dest_qp_num;
1196 int qp_access_flags;
1197 struct ib_qp_cap cap;
1198 struct rdma_ah_attr ah_attr;
1199 struct rdma_ah_attr alt_ah_attr;
1200 u16 pkey_index;
1201 u16 alt_pkey_index;
1202 u8 en_sqd_async_notify;
1203 u8 sq_draining;
1204 u8 max_rd_atomic;
1205 u8 max_dest_rd_atomic;
1206 u8 min_rnr_timer;
1207 u8 port_num;
1208 u8 timeout;
1209 u8 retry_cnt;
1210 u8 rnr_retry;
1211 u8 alt_port_num;
1212 u8 alt_timeout;
1213 u32 rate_limit;
1214};
1215
1216enum ib_wr_opcode {
1217
1218 IB_WR_RDMA_WRITE = IB_UVERBS_WR_RDMA_WRITE,
1219 IB_WR_RDMA_WRITE_WITH_IMM = IB_UVERBS_WR_RDMA_WRITE_WITH_IMM,
1220 IB_WR_SEND = IB_UVERBS_WR_SEND,
1221 IB_WR_SEND_WITH_IMM = IB_UVERBS_WR_SEND_WITH_IMM,
1222 IB_WR_RDMA_READ = IB_UVERBS_WR_RDMA_READ,
1223 IB_WR_ATOMIC_CMP_AND_SWP = IB_UVERBS_WR_ATOMIC_CMP_AND_SWP,
1224 IB_WR_ATOMIC_FETCH_AND_ADD = IB_UVERBS_WR_ATOMIC_FETCH_AND_ADD,
1225 IB_WR_LSO = IB_UVERBS_WR_TSO,
1226 IB_WR_SEND_WITH_INV = IB_UVERBS_WR_SEND_WITH_INV,
1227 IB_WR_RDMA_READ_WITH_INV = IB_UVERBS_WR_RDMA_READ_WITH_INV,
1228 IB_WR_LOCAL_INV = IB_UVERBS_WR_LOCAL_INV,
1229 IB_WR_MASKED_ATOMIC_CMP_AND_SWP =
1230 IB_UVERBS_WR_MASKED_ATOMIC_CMP_AND_SWP,
1231 IB_WR_MASKED_ATOMIC_FETCH_AND_ADD =
1232 IB_UVERBS_WR_MASKED_ATOMIC_FETCH_AND_ADD,
1233
1234
1235 IB_WR_REG_MR = 0x20,
1236 IB_WR_REG_MR_INTEGRITY,
1237
1238
1239
1240
1241 IB_WR_RESERVED1 = 0xf0,
1242 IB_WR_RESERVED2,
1243 IB_WR_RESERVED3,
1244 IB_WR_RESERVED4,
1245 IB_WR_RESERVED5,
1246 IB_WR_RESERVED6,
1247 IB_WR_RESERVED7,
1248 IB_WR_RESERVED8,
1249 IB_WR_RESERVED9,
1250 IB_WR_RESERVED10,
1251};
1252
1253enum ib_send_flags {
1254 IB_SEND_FENCE = 1,
1255 IB_SEND_SIGNALED = (1<<1),
1256 IB_SEND_SOLICITED = (1<<2),
1257 IB_SEND_INLINE = (1<<3),
1258 IB_SEND_IP_CSUM = (1<<4),
1259
1260
1261 IB_SEND_RESERVED_START = (1 << 26),
1262 IB_SEND_RESERVED_END = (1 << 31),
1263};
1264
1265struct ib_sge {
1266 u64 addr;
1267 u32 length;
1268 u32 lkey;
1269};
1270
1271struct ib_cqe {
1272 void (*done)(struct ib_cq *cq, struct ib_wc *wc);
1273};
1274
1275struct ib_send_wr {
1276 struct ib_send_wr *next;
1277 union {
1278 u64 wr_id;
1279 struct ib_cqe *wr_cqe;
1280 };
1281 struct ib_sge *sg_list;
1282 int num_sge;
1283 enum ib_wr_opcode opcode;
1284 int send_flags;
1285 union {
1286 __be32 imm_data;
1287 u32 invalidate_rkey;
1288 } ex;
1289};
1290
1291struct ib_rdma_wr {
1292 struct ib_send_wr wr;
1293 u64 remote_addr;
1294 u32 rkey;
1295};
1296
1297static inline const struct ib_rdma_wr *rdma_wr(const struct ib_send_wr *wr)
1298{
1299 return container_of(wr, struct ib_rdma_wr, wr);
1300}
1301
1302struct ib_atomic_wr {
1303 struct ib_send_wr wr;
1304 u64 remote_addr;
1305 u64 compare_add;
1306 u64 swap;
1307 u64 compare_add_mask;
1308 u64 swap_mask;
1309 u32 rkey;
1310};
1311
1312static inline const struct ib_atomic_wr *atomic_wr(const struct ib_send_wr *wr)
1313{
1314 return container_of(wr, struct ib_atomic_wr, wr);
1315}
1316
1317struct ib_ud_wr {
1318 struct ib_send_wr wr;
1319 struct ib_ah *ah;
1320 void *header;
1321 int hlen;
1322 int mss;
1323 u32 remote_qpn;
1324 u32 remote_qkey;
1325 u16 pkey_index;
1326 u8 port_num;
1327};
1328
1329static inline const struct ib_ud_wr *ud_wr(const struct ib_send_wr *wr)
1330{
1331 return container_of(wr, struct ib_ud_wr, wr);
1332}
1333
1334struct ib_reg_wr {
1335 struct ib_send_wr wr;
1336 struct ib_mr *mr;
1337 u32 key;
1338 int access;
1339};
1340
1341static inline const struct ib_reg_wr *reg_wr(const struct ib_send_wr *wr)
1342{
1343 return container_of(wr, struct ib_reg_wr, wr);
1344}
1345
1346struct ib_recv_wr {
1347 struct ib_recv_wr *next;
1348 union {
1349 u64 wr_id;
1350 struct ib_cqe *wr_cqe;
1351 };
1352 struct ib_sge *sg_list;
1353 int num_sge;
1354};
1355
1356enum ib_access_flags {
1357 IB_ACCESS_LOCAL_WRITE = IB_UVERBS_ACCESS_LOCAL_WRITE,
1358 IB_ACCESS_REMOTE_WRITE = IB_UVERBS_ACCESS_REMOTE_WRITE,
1359 IB_ACCESS_REMOTE_READ = IB_UVERBS_ACCESS_REMOTE_READ,
1360 IB_ACCESS_REMOTE_ATOMIC = IB_UVERBS_ACCESS_REMOTE_ATOMIC,
1361 IB_ACCESS_MW_BIND = IB_UVERBS_ACCESS_MW_BIND,
1362 IB_ZERO_BASED = IB_UVERBS_ACCESS_ZERO_BASED,
1363 IB_ACCESS_ON_DEMAND = IB_UVERBS_ACCESS_ON_DEMAND,
1364 IB_ACCESS_HUGETLB = IB_UVERBS_ACCESS_HUGETLB,
1365
1366 IB_ACCESS_SUPPORTED = ((IB_ACCESS_HUGETLB << 1) - 1)
1367};
1368
1369
1370
1371
1372
1373enum ib_mr_rereg_flags {
1374 IB_MR_REREG_TRANS = 1,
1375 IB_MR_REREG_PD = (1<<1),
1376 IB_MR_REREG_ACCESS = (1<<2),
1377 IB_MR_REREG_SUPPORTED = ((IB_MR_REREG_ACCESS << 1) - 1)
1378};
1379
1380struct ib_fmr_attr {
1381 int max_pages;
1382 int max_maps;
1383 u8 page_shift;
1384};
1385
1386struct ib_umem;
1387
1388enum rdma_remove_reason {
1389
1390
1391
1392
1393 RDMA_REMOVE_DESTROY,
1394
1395 RDMA_REMOVE_CLOSE,
1396
1397 RDMA_REMOVE_DRIVER_REMOVE,
1398
1399 RDMA_REMOVE_ABORT,
1400};
1401
1402struct ib_rdmacg_object {
1403#ifdef CONFIG_CGROUP_RDMA
1404 struct rdma_cgroup *cg;
1405#endif
1406};
1407
1408struct ib_ucontext {
1409 struct ib_device *device;
1410 struct ib_uverbs_file *ufile;
1411
1412
1413
1414
1415
1416 bool closing;
1417
1418 bool cleanup_retryable;
1419
1420 void (*invalidate_range)(struct ib_umem_odp *umem_odp,
1421 unsigned long start, unsigned long end);
1422 struct mutex per_mm_list_lock;
1423 struct list_head per_mm_list;
1424
1425 struct ib_rdmacg_object cg_obj;
1426
1427
1428
1429 struct rdma_restrack_entry res;
1430};
1431
1432struct ib_uobject {
1433 u64 user_handle;
1434
1435 struct ib_uverbs_file *ufile;
1436
1437 struct ib_ucontext *context;
1438 void *object;
1439 struct list_head list;
1440 struct ib_rdmacg_object cg_obj;
1441 int id;
1442 struct kref ref;
1443 atomic_t usecnt;
1444 struct rcu_head rcu;
1445
1446 const struct uverbs_api_object *uapi_object;
1447};
1448
1449struct ib_udata {
1450 const void __user *inbuf;
1451 void __user *outbuf;
1452 size_t inlen;
1453 size_t outlen;
1454};
1455
1456struct ib_pd {
1457 u32 local_dma_lkey;
1458 u32 flags;
1459 struct ib_device *device;
1460 struct ib_uobject *uobject;
1461 atomic_t usecnt;
1462
1463 u32 unsafe_global_rkey;
1464
1465
1466
1467
1468 struct ib_mr *__internal_mr;
1469 struct rdma_restrack_entry res;
1470};
1471
1472struct ib_xrcd {
1473 struct ib_device *device;
1474 atomic_t usecnt;
1475 struct inode *inode;
1476
1477 struct mutex tgt_qp_mutex;
1478 struct list_head tgt_qp_list;
1479};
1480
1481struct ib_ah {
1482 struct ib_device *device;
1483 struct ib_pd *pd;
1484 struct ib_uobject *uobject;
1485 const struct ib_gid_attr *sgid_attr;
1486 enum rdma_ah_attr_type type;
1487};
1488
1489typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context);
1490
1491enum ib_poll_context {
1492 IB_POLL_DIRECT,
1493 IB_POLL_SOFTIRQ,
1494 IB_POLL_WORKQUEUE,
1495 IB_POLL_UNBOUND_WORKQUEUE,
1496};
1497
1498struct ib_cq {
1499 struct ib_device *device;
1500 struct ib_uobject *uobject;
1501 ib_comp_handler comp_handler;
1502 void (*event_handler)(struct ib_event *, void *);
1503 void *cq_context;
1504 int cqe;
1505 atomic_t usecnt;
1506 enum ib_poll_context poll_ctx;
1507 struct ib_wc *wc;
1508 union {
1509 struct irq_poll iop;
1510 struct work_struct work;
1511 };
1512 struct workqueue_struct *comp_wq;
1513 struct dim *dim;
1514
1515
1516
1517 struct rdma_restrack_entry res;
1518};
1519
1520struct ib_srq {
1521 struct ib_device *device;
1522 struct ib_pd *pd;
1523 struct ib_uobject *uobject;
1524 void (*event_handler)(struct ib_event *, void *);
1525 void *srq_context;
1526 enum ib_srq_type srq_type;
1527 atomic_t usecnt;
1528
1529 struct {
1530 struct ib_cq *cq;
1531 union {
1532 struct {
1533 struct ib_xrcd *xrcd;
1534 u32 srq_num;
1535 } xrc;
1536 };
1537 } ext;
1538};
1539
1540enum ib_raw_packet_caps {
1541
1542
1543
1544 IB_RAW_PACKET_CAP_CVLAN_STRIPPING = (1 << 0),
1545
1546
1547 IB_RAW_PACKET_CAP_SCATTER_FCS = (1 << 1),
1548
1549 IB_RAW_PACKET_CAP_IP_CSUM = (1 << 2),
1550
1551
1552
1553 IB_RAW_PACKET_CAP_DELAY_DROP = (1 << 3),
1554};
1555
1556enum ib_wq_type {
1557 IB_WQT_RQ
1558};
1559
1560enum ib_wq_state {
1561 IB_WQS_RESET,
1562 IB_WQS_RDY,
1563 IB_WQS_ERR
1564};
1565
1566struct ib_wq {
1567 struct ib_device *device;
1568 struct ib_uobject *uobject;
1569 void *wq_context;
1570 void (*event_handler)(struct ib_event *, void *);
1571 struct ib_pd *pd;
1572 struct ib_cq *cq;
1573 u32 wq_num;
1574 enum ib_wq_state state;
1575 enum ib_wq_type wq_type;
1576 atomic_t usecnt;
1577};
1578
1579enum ib_wq_flags {
1580 IB_WQ_FLAGS_CVLAN_STRIPPING = 1 << 0,
1581 IB_WQ_FLAGS_SCATTER_FCS = 1 << 1,
1582 IB_WQ_FLAGS_DELAY_DROP = 1 << 2,
1583 IB_WQ_FLAGS_PCI_WRITE_END_PADDING = 1 << 3,
1584};
1585
1586struct ib_wq_init_attr {
1587 void *wq_context;
1588 enum ib_wq_type wq_type;
1589 u32 max_wr;
1590 u32 max_sge;
1591 struct ib_cq *cq;
1592 void (*event_handler)(struct ib_event *, void *);
1593 u32 create_flags;
1594};
1595
1596enum ib_wq_attr_mask {
1597 IB_WQ_STATE = 1 << 0,
1598 IB_WQ_CUR_STATE = 1 << 1,
1599 IB_WQ_FLAGS = 1 << 2,
1600};
1601
1602struct ib_wq_attr {
1603 enum ib_wq_state wq_state;
1604 enum ib_wq_state curr_wq_state;
1605 u32 flags;
1606 u32 flags_mask;
1607};
1608
1609struct ib_rwq_ind_table {
1610 struct ib_device *device;
1611 struct ib_uobject *uobject;
1612 atomic_t usecnt;
1613 u32 ind_tbl_num;
1614 u32 log_ind_tbl_size;
1615 struct ib_wq **ind_tbl;
1616};
1617
1618struct ib_rwq_ind_table_init_attr {
1619 u32 log_ind_tbl_size;
1620
1621 struct ib_wq **ind_tbl;
1622};
1623
1624enum port_pkey_state {
1625 IB_PORT_PKEY_NOT_VALID = 0,
1626 IB_PORT_PKEY_VALID = 1,
1627 IB_PORT_PKEY_LISTED = 2,
1628};
1629
1630struct ib_qp_security;
1631
1632struct ib_port_pkey {
1633 enum port_pkey_state state;
1634 u16 pkey_index;
1635 u8 port_num;
1636 struct list_head qp_list;
1637 struct list_head to_error_list;
1638 struct ib_qp_security *sec;
1639};
1640
1641struct ib_ports_pkeys {
1642 struct ib_port_pkey main;
1643 struct ib_port_pkey alt;
1644};
1645
1646struct ib_qp_security {
1647 struct ib_qp *qp;
1648 struct ib_device *dev;
1649
1650 struct mutex mutex;
1651 struct ib_ports_pkeys *ports_pkeys;
1652
1653
1654
1655 struct list_head shared_qp_list;
1656 void *security;
1657 bool destroying;
1658 atomic_t error_list_count;
1659 struct completion error_complete;
1660 int error_comps_pending;
1661};
1662
1663
1664
1665
1666
1667struct ib_qp {
1668 struct ib_device *device;
1669 struct ib_pd *pd;
1670 struct ib_cq *send_cq;
1671 struct ib_cq *recv_cq;
1672 spinlock_t mr_lock;
1673 int mrs_used;
1674 struct list_head rdma_mrs;
1675 struct list_head sig_mrs;
1676 struct ib_srq *srq;
1677 struct ib_xrcd *xrcd;
1678 struct list_head xrcd_list;
1679
1680
1681 atomic_t usecnt;
1682 struct list_head open_list;
1683 struct ib_qp *real_qp;
1684 struct ib_uobject *uobject;
1685 void (*event_handler)(struct ib_event *, void *);
1686 void *qp_context;
1687
1688 const struct ib_gid_attr *av_sgid_attr;
1689 const struct ib_gid_attr *alt_path_sgid_attr;
1690 u32 qp_num;
1691 u32 max_write_sge;
1692 u32 max_read_sge;
1693 enum ib_qp_type qp_type;
1694 struct ib_rwq_ind_table *rwq_ind_tbl;
1695 struct ib_qp_security *qp_sec;
1696 u8 port;
1697
1698 bool integrity_en;
1699
1700
1701
1702 struct rdma_restrack_entry res;
1703
1704
1705 struct rdma_counter *counter;
1706};
1707
1708struct ib_dm {
1709 struct ib_device *device;
1710 u32 length;
1711 u32 flags;
1712 struct ib_uobject *uobject;
1713 atomic_t usecnt;
1714};
1715
1716struct ib_mr {
1717 struct ib_device *device;
1718 struct ib_pd *pd;
1719 u32 lkey;
1720 u32 rkey;
1721 u64 iova;
1722 u64 length;
1723 unsigned int page_size;
1724 enum ib_mr_type type;
1725 bool need_inval;
1726 union {
1727 struct ib_uobject *uobject;
1728 struct list_head qp_entry;
1729 };
1730
1731 struct ib_dm *dm;
1732 struct ib_sig_attrs *sig_attrs;
1733
1734
1735
1736 struct rdma_restrack_entry res;
1737};
1738
1739struct ib_mw {
1740 struct ib_device *device;
1741 struct ib_pd *pd;
1742 struct ib_uobject *uobject;
1743 u32 rkey;
1744 enum ib_mw_type type;
1745};
1746
1747struct ib_fmr {
1748 struct ib_device *device;
1749 struct ib_pd *pd;
1750 struct list_head list;
1751 u32 lkey;
1752 u32 rkey;
1753};
1754
1755
1756enum ib_flow_attr_type {
1757
1758 IB_FLOW_ATTR_NORMAL = 0x0,
1759
1760
1761
1762 IB_FLOW_ATTR_ALL_DEFAULT = 0x1,
1763
1764
1765
1766 IB_FLOW_ATTR_MC_DEFAULT = 0x2,
1767
1768 IB_FLOW_ATTR_SNIFFER = 0x3
1769};
1770
1771
1772enum ib_flow_spec_type {
1773
1774 IB_FLOW_SPEC_ETH = 0x20,
1775 IB_FLOW_SPEC_IB = 0x22,
1776
1777 IB_FLOW_SPEC_IPV4 = 0x30,
1778 IB_FLOW_SPEC_IPV6 = 0x31,
1779 IB_FLOW_SPEC_ESP = 0x34,
1780
1781 IB_FLOW_SPEC_TCP = 0x40,
1782 IB_FLOW_SPEC_UDP = 0x41,
1783 IB_FLOW_SPEC_VXLAN_TUNNEL = 0x50,
1784 IB_FLOW_SPEC_GRE = 0x51,
1785 IB_FLOW_SPEC_MPLS = 0x60,
1786 IB_FLOW_SPEC_INNER = 0x100,
1787
1788 IB_FLOW_SPEC_ACTION_TAG = 0x1000,
1789 IB_FLOW_SPEC_ACTION_DROP = 0x1001,
1790 IB_FLOW_SPEC_ACTION_HANDLE = 0x1002,
1791 IB_FLOW_SPEC_ACTION_COUNT = 0x1003,
1792};
1793#define IB_FLOW_SPEC_LAYER_MASK 0xF0
1794#define IB_FLOW_SPEC_SUPPORT_LAYERS 10
1795
1796
1797
1798
1799enum ib_flow_domain {
1800 IB_FLOW_DOMAIN_USER,
1801 IB_FLOW_DOMAIN_ETHTOOL,
1802 IB_FLOW_DOMAIN_RFS,
1803 IB_FLOW_DOMAIN_NIC,
1804 IB_FLOW_DOMAIN_NUM
1805};
1806
1807enum ib_flow_flags {
1808 IB_FLOW_ATTR_FLAGS_DONT_TRAP = 1UL << 1,
1809 IB_FLOW_ATTR_FLAGS_EGRESS = 1UL << 2,
1810 IB_FLOW_ATTR_FLAGS_RESERVED = 1UL << 3
1811};
1812
1813struct ib_flow_eth_filter {
1814 u8 dst_mac[6];
1815 u8 src_mac[6];
1816 __be16 ether_type;
1817 __be16 vlan_tag;
1818
1819 u8 real_sz[0];
1820};
1821
1822struct ib_flow_spec_eth {
1823 u32 type;
1824 u16 size;
1825 struct ib_flow_eth_filter val;
1826 struct ib_flow_eth_filter mask;
1827};
1828
1829struct ib_flow_ib_filter {
1830 __be16 dlid;
1831 __u8 sl;
1832
1833 u8 real_sz[0];
1834};
1835
1836struct ib_flow_spec_ib {
1837 u32 type;
1838 u16 size;
1839 struct ib_flow_ib_filter val;
1840 struct ib_flow_ib_filter mask;
1841};
1842
1843
1844enum ib_ipv4_flags {
1845 IB_IPV4_DONT_FRAG = 0x2,
1846 IB_IPV4_MORE_FRAG = 0X4
1847
1848};
1849
1850struct ib_flow_ipv4_filter {
1851 __be32 src_ip;
1852 __be32 dst_ip;
1853 u8 proto;
1854 u8 tos;
1855 u8 ttl;
1856 u8 flags;
1857
1858 u8 real_sz[0];
1859};
1860
1861struct ib_flow_spec_ipv4 {
1862 u32 type;
1863 u16 size;
1864 struct ib_flow_ipv4_filter val;
1865 struct ib_flow_ipv4_filter mask;
1866};
1867
1868struct ib_flow_ipv6_filter {
1869 u8 src_ip[16];
1870 u8 dst_ip[16];
1871 __be32 flow_label;
1872 u8 next_hdr;
1873 u8 traffic_class;
1874 u8 hop_limit;
1875
1876 u8 real_sz[0];
1877};
1878
1879struct ib_flow_spec_ipv6 {
1880 u32 type;
1881 u16 size;
1882 struct ib_flow_ipv6_filter val;
1883 struct ib_flow_ipv6_filter mask;
1884};
1885
1886struct ib_flow_tcp_udp_filter {
1887 __be16 dst_port;
1888 __be16 src_port;
1889
1890 u8 real_sz[0];
1891};
1892
1893struct ib_flow_spec_tcp_udp {
1894 u32 type;
1895 u16 size;
1896 struct ib_flow_tcp_udp_filter val;
1897 struct ib_flow_tcp_udp_filter mask;
1898};
1899
1900struct ib_flow_tunnel_filter {
1901 __be32 tunnel_id;
1902 u8 real_sz[0];
1903};
1904
1905
1906
1907
1908struct ib_flow_spec_tunnel {
1909 u32 type;
1910 u16 size;
1911 struct ib_flow_tunnel_filter val;
1912 struct ib_flow_tunnel_filter mask;
1913};
1914
1915struct ib_flow_esp_filter {
1916 __be32 spi;
1917 __be32 seq;
1918
1919 u8 real_sz[0];
1920};
1921
1922struct ib_flow_spec_esp {
1923 u32 type;
1924 u16 size;
1925 struct ib_flow_esp_filter val;
1926 struct ib_flow_esp_filter mask;
1927};
1928
1929struct ib_flow_gre_filter {
1930 __be16 c_ks_res0_ver;
1931 __be16 protocol;
1932 __be32 key;
1933
1934 u8 real_sz[0];
1935};
1936
1937struct ib_flow_spec_gre {
1938 u32 type;
1939 u16 size;
1940 struct ib_flow_gre_filter val;
1941 struct ib_flow_gre_filter mask;
1942};
1943
1944struct ib_flow_mpls_filter {
1945 __be32 tag;
1946
1947 u8 real_sz[0];
1948};
1949
1950struct ib_flow_spec_mpls {
1951 u32 type;
1952 u16 size;
1953 struct ib_flow_mpls_filter val;
1954 struct ib_flow_mpls_filter mask;
1955};
1956
1957struct ib_flow_spec_action_tag {
1958 enum ib_flow_spec_type type;
1959 u16 size;
1960 u32 tag_id;
1961};
1962
1963struct ib_flow_spec_action_drop {
1964 enum ib_flow_spec_type type;
1965 u16 size;
1966};
1967
1968struct ib_flow_spec_action_handle {
1969 enum ib_flow_spec_type type;
1970 u16 size;
1971 struct ib_flow_action *act;
1972};
1973
1974enum ib_counters_description {
1975 IB_COUNTER_PACKETS,
1976 IB_COUNTER_BYTES,
1977};
1978
1979struct ib_flow_spec_action_count {
1980 enum ib_flow_spec_type type;
1981 u16 size;
1982 struct ib_counters *counters;
1983};
1984
1985union ib_flow_spec {
1986 struct {
1987 u32 type;
1988 u16 size;
1989 };
1990 struct ib_flow_spec_eth eth;
1991 struct ib_flow_spec_ib ib;
1992 struct ib_flow_spec_ipv4 ipv4;
1993 struct ib_flow_spec_tcp_udp tcp_udp;
1994 struct ib_flow_spec_ipv6 ipv6;
1995 struct ib_flow_spec_tunnel tunnel;
1996 struct ib_flow_spec_esp esp;
1997 struct ib_flow_spec_gre gre;
1998 struct ib_flow_spec_mpls mpls;
1999 struct ib_flow_spec_action_tag flow_tag;
2000 struct ib_flow_spec_action_drop drop;
2001 struct ib_flow_spec_action_handle action;
2002 struct ib_flow_spec_action_count flow_count;
2003};
2004
2005struct ib_flow_attr {
2006 enum ib_flow_attr_type type;
2007 u16 size;
2008 u16 priority;
2009 u32 flags;
2010 u8 num_of_specs;
2011 u8 port;
2012 union ib_flow_spec flows[];
2013};
2014
2015struct ib_flow {
2016 struct ib_qp *qp;
2017 struct ib_device *device;
2018 struct ib_uobject *uobject;
2019};
2020
2021enum ib_flow_action_type {
2022 IB_FLOW_ACTION_UNSPECIFIED,
2023 IB_FLOW_ACTION_ESP = 1,
2024};
2025
2026struct ib_flow_action_attrs_esp_keymats {
2027 enum ib_uverbs_flow_action_esp_keymat protocol;
2028 union {
2029 struct ib_uverbs_flow_action_esp_keymat_aes_gcm aes_gcm;
2030 } keymat;
2031};
2032
2033struct ib_flow_action_attrs_esp_replays {
2034 enum ib_uverbs_flow_action_esp_replay protocol;
2035 union {
2036 struct ib_uverbs_flow_action_esp_replay_bmp bmp;
2037 } replay;
2038};
2039
2040enum ib_flow_action_attrs_esp_flags {
2041
2042
2043
2044
2045
2046
2047 IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED = 1ULL << 32,
2048 IB_FLOW_ACTION_ESP_FLAGS_MOD_ESP_ATTRS = 1ULL << 33,
2049};
2050
2051struct ib_flow_spec_list {
2052 struct ib_flow_spec_list *next;
2053 union ib_flow_spec spec;
2054};
2055
2056struct ib_flow_action_attrs_esp {
2057 struct ib_flow_action_attrs_esp_keymats *keymat;
2058 struct ib_flow_action_attrs_esp_replays *replay;
2059 struct ib_flow_spec_list *encap;
2060
2061
2062
2063 u32 esn;
2064 u32 spi;
2065 u32 seq;
2066 u32 tfc_pad;
2067
2068 u64 flags;
2069 u64 hard_limit_pkts;
2070};
2071
2072struct ib_flow_action {
2073 struct ib_device *device;
2074 struct ib_uobject *uobject;
2075 enum ib_flow_action_type type;
2076 atomic_t usecnt;
2077};
2078
2079struct ib_mad_hdr;
2080struct ib_grh;
2081
2082enum ib_process_mad_flags {
2083 IB_MAD_IGNORE_MKEY = 1,
2084 IB_MAD_IGNORE_BKEY = 2,
2085 IB_MAD_IGNORE_ALL = IB_MAD_IGNORE_MKEY | IB_MAD_IGNORE_BKEY
2086};
2087
2088enum ib_mad_result {
2089 IB_MAD_RESULT_FAILURE = 0,
2090 IB_MAD_RESULT_SUCCESS = 1 << 0,
2091 IB_MAD_RESULT_REPLY = 1 << 1,
2092 IB_MAD_RESULT_CONSUMED = 1 << 2
2093};
2094
2095struct ib_port_cache {
2096 u64 subnet_prefix;
2097 struct ib_pkey_cache *pkey;
2098 struct ib_gid_table *gid;
2099 u8 lmc;
2100 enum ib_port_state port_state;
2101};
2102
2103struct ib_cache {
2104 rwlock_t lock;
2105 struct ib_event_handler event_handler;
2106};
2107
2108struct ib_port_immutable {
2109 int pkey_tbl_len;
2110 int gid_tbl_len;
2111 u32 core_cap_flags;
2112 u32 max_mad_size;
2113};
2114
2115struct ib_port_data {
2116 struct ib_device *ib_dev;
2117
2118 struct ib_port_immutable immutable;
2119
2120 spinlock_t pkey_list_lock;
2121 struct list_head pkey_list;
2122
2123 struct ib_port_cache cache;
2124
2125 spinlock_t netdev_lock;
2126 struct net_device __rcu *netdev;
2127 struct hlist_node ndev_hash_link;
2128 struct rdma_port_counter port_counter;
2129 struct rdma_hw_stats *hw_stats;
2130};
2131
2132
2133enum rdma_netdev_t {
2134 RDMA_NETDEV_OPA_VNIC,
2135 RDMA_NETDEV_IPOIB,
2136};
2137
2138
2139
2140
2141
2142struct rdma_netdev {
2143 void *clnt_priv;
2144 struct ib_device *hca;
2145 u8 port_num;
2146
2147
2148
2149
2150
2151
2152 void (*free_rdma_netdev)(struct net_device *netdev);
2153
2154
2155 void (*set_id)(struct net_device *netdev, int id);
2156
2157 int (*send)(struct net_device *dev, struct sk_buff *skb,
2158 struct ib_ah *address, u32 dqpn);
2159
2160 int (*attach_mcast)(struct net_device *dev, struct ib_device *hca,
2161 union ib_gid *gid, u16 mlid,
2162 int set_qkey, u32 qkey);
2163 int (*detach_mcast)(struct net_device *dev, struct ib_device *hca,
2164 union ib_gid *gid, u16 mlid);
2165};
2166
2167struct rdma_netdev_alloc_params {
2168 size_t sizeof_priv;
2169 unsigned int txqs;
2170 unsigned int rxqs;
2171 void *param;
2172
2173 int (*initialize_rdma_netdev)(struct ib_device *device, u8 port_num,
2174 struct net_device *netdev, void *param);
2175};
2176
2177struct ib_counters {
2178 struct ib_device *device;
2179 struct ib_uobject *uobject;
2180
2181 atomic_t usecnt;
2182};
2183
2184struct ib_counters_read_attr {
2185 u64 *counters_buff;
2186 u32 ncounters;
2187 u32 flags;
2188};
2189
2190struct uverbs_attr_bundle;
2191struct iw_cm_id;
2192struct iw_cm_conn_param;
2193
2194#define INIT_RDMA_OBJ_SIZE(ib_struct, drv_struct, member) \
2195 .size_##ib_struct = \
2196 (sizeof(struct drv_struct) + \
2197 BUILD_BUG_ON_ZERO(offsetof(struct drv_struct, member)) + \
2198 BUILD_BUG_ON_ZERO( \
2199 !__same_type(((struct drv_struct *)NULL)->member, \
2200 struct ib_struct)))
2201
2202#define rdma_zalloc_drv_obj_gfp(ib_dev, ib_type, gfp) \
2203 ((struct ib_type *)kzalloc(ib_dev->ops.size_##ib_type, gfp))
2204
2205#define rdma_zalloc_drv_obj(ib_dev, ib_type) \
2206 rdma_zalloc_drv_obj_gfp(ib_dev, ib_type, GFP_KERNEL)
2207
2208#define DECLARE_RDMA_OBJ_SIZE(ib_struct) size_t size_##ib_struct
2209
2210
2211
2212
2213
2214
2215struct ib_device_ops {
2216 struct module *owner;
2217 enum rdma_driver_id driver_id;
2218 u32 uverbs_abi_ver;
2219 unsigned int uverbs_no_driver_id_binding:1;
2220
2221 int (*post_send)(struct ib_qp *qp, const struct ib_send_wr *send_wr,
2222 const struct ib_send_wr **bad_send_wr);
2223 int (*post_recv)(struct ib_qp *qp, const struct ib_recv_wr *recv_wr,
2224 const struct ib_recv_wr **bad_recv_wr);
2225 void (*drain_rq)(struct ib_qp *qp);
2226 void (*drain_sq)(struct ib_qp *qp);
2227 int (*poll_cq)(struct ib_cq *cq, int num_entries, struct ib_wc *wc);
2228 int (*peek_cq)(struct ib_cq *cq, int wc_cnt);
2229 int (*req_notify_cq)(struct ib_cq *cq, enum ib_cq_notify_flags flags);
2230 int (*req_ncomp_notif)(struct ib_cq *cq, int wc_cnt);
2231 int (*post_srq_recv)(struct ib_srq *srq,
2232 const struct ib_recv_wr *recv_wr,
2233 const struct ib_recv_wr **bad_recv_wr);
2234 int (*process_mad)(struct ib_device *device, int process_mad_flags,
2235 u8 port_num, const struct ib_wc *in_wc,
2236 const struct ib_grh *in_grh,
2237 const struct ib_mad_hdr *in_mad, size_t in_mad_size,
2238 struct ib_mad_hdr *out_mad, size_t *out_mad_size,
2239 u16 *out_mad_pkey_index);
2240 int (*query_device)(struct ib_device *device,
2241 struct ib_device_attr *device_attr,
2242 struct ib_udata *udata);
2243 int (*modify_device)(struct ib_device *device, int device_modify_mask,
2244 struct ib_device_modify *device_modify);
2245 void (*get_dev_fw_str)(struct ib_device *device, char *str);
2246 const struct cpumask *(*get_vector_affinity)(struct ib_device *ibdev,
2247 int comp_vector);
2248 int (*query_port)(struct ib_device *device, u8 port_num,
2249 struct ib_port_attr *port_attr);
2250 int (*modify_port)(struct ib_device *device, u8 port_num,
2251 int port_modify_mask,
2252 struct ib_port_modify *port_modify);
2253
2254
2255
2256
2257
2258
2259 int (*get_port_immutable)(struct ib_device *device, u8 port_num,
2260 struct ib_port_immutable *immutable);
2261 enum rdma_link_layer (*get_link_layer)(struct ib_device *device,
2262 u8 port_num);
2263
2264
2265
2266
2267
2268
2269
2270
2271 struct net_device *(*get_netdev)(struct ib_device *device, u8 port_num);
2272
2273
2274
2275
2276
2277
2278 struct net_device *(*alloc_rdma_netdev)(
2279 struct ib_device *device, u8 port_num, enum rdma_netdev_t type,
2280 const char *name, unsigned char name_assign_type,
2281 void (*setup)(struct net_device *));
2282
2283 int (*rdma_netdev_get_params)(struct ib_device *device, u8 port_num,
2284 enum rdma_netdev_t type,
2285 struct rdma_netdev_alloc_params *params);
2286
2287
2288
2289
2290
2291 int (*query_gid)(struct ib_device *device, u8 port_num, int index,
2292 union ib_gid *gid);
2293
2294
2295
2296
2297
2298
2299
2300
2301
2302
2303
2304
2305
2306 int (*add_gid)(const struct ib_gid_attr *attr, void **context);
2307
2308
2309
2310
2311
2312
2313
2314
2315 int (*del_gid)(const struct ib_gid_attr *attr, void **context);
2316 int (*query_pkey)(struct ib_device *device, u8 port_num, u16 index,
2317 u16 *pkey);
2318 int (*alloc_ucontext)(struct ib_ucontext *context,
2319 struct ib_udata *udata);
2320 void (*dealloc_ucontext)(struct ib_ucontext *context);
2321 int (*mmap)(struct ib_ucontext *context, struct vm_area_struct *vma);
2322 void (*disassociate_ucontext)(struct ib_ucontext *ibcontext);
2323 int (*alloc_pd)(struct ib_pd *pd, struct ib_udata *udata);
2324 void (*dealloc_pd)(struct ib_pd *pd, struct ib_udata *udata);
2325 int (*create_ah)(struct ib_ah *ah, struct rdma_ah_attr *ah_attr,
2326 u32 flags, struct ib_udata *udata);
2327 int (*modify_ah)(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
2328 int (*query_ah)(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
2329 void (*destroy_ah)(struct ib_ah *ah, u32 flags);
2330 int (*create_srq)(struct ib_srq *srq,
2331 struct ib_srq_init_attr *srq_init_attr,
2332 struct ib_udata *udata);
2333 int (*modify_srq)(struct ib_srq *srq, struct ib_srq_attr *srq_attr,
2334 enum ib_srq_attr_mask srq_attr_mask,
2335 struct ib_udata *udata);
2336 int (*query_srq)(struct ib_srq *srq, struct ib_srq_attr *srq_attr);
2337 void (*destroy_srq)(struct ib_srq *srq, struct ib_udata *udata);
2338 struct ib_qp *(*create_qp)(struct ib_pd *pd,
2339 struct ib_qp_init_attr *qp_init_attr,
2340 struct ib_udata *udata);
2341 int (*modify_qp)(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
2342 int qp_attr_mask, struct ib_udata *udata);
2343 int (*query_qp)(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
2344 int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr);
2345 int (*destroy_qp)(struct ib_qp *qp, struct ib_udata *udata);
2346 int (*create_cq)(struct ib_cq *cq, const struct ib_cq_init_attr *attr,
2347 struct ib_udata *udata);
2348 int (*modify_cq)(struct ib_cq *cq, u16 cq_count, u16 cq_period);
2349 void (*destroy_cq)(struct ib_cq *cq, struct ib_udata *udata);
2350 int (*resize_cq)(struct ib_cq *cq, int cqe, struct ib_udata *udata);
2351 struct ib_mr *(*get_dma_mr)(struct ib_pd *pd, int mr_access_flags);
2352 struct ib_mr *(*reg_user_mr)(struct ib_pd *pd, u64 start, u64 length,
2353 u64 virt_addr, int mr_access_flags,
2354 struct ib_udata *udata);
2355 int (*rereg_user_mr)(struct ib_mr *mr, int flags, u64 start, u64 length,
2356 u64 virt_addr, int mr_access_flags,
2357 struct ib_pd *pd, struct ib_udata *udata);
2358 int (*dereg_mr)(struct ib_mr *mr, struct ib_udata *udata);
2359 struct ib_mr *(*alloc_mr)(struct ib_pd *pd, enum ib_mr_type mr_type,
2360 u32 max_num_sg, struct ib_udata *udata);
2361 struct ib_mr *(*alloc_mr_integrity)(struct ib_pd *pd,
2362 u32 max_num_data_sg,
2363 u32 max_num_meta_sg);
2364 int (*advise_mr)(struct ib_pd *pd,
2365 enum ib_uverbs_advise_mr_advice advice, u32 flags,
2366 struct ib_sge *sg_list, u32 num_sge,
2367 struct uverbs_attr_bundle *attrs);
2368 int (*map_mr_sg)(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
2369 unsigned int *sg_offset);
2370 int (*check_mr_status)(struct ib_mr *mr, u32 check_mask,
2371 struct ib_mr_status *mr_status);
2372 struct ib_mw *(*alloc_mw)(struct ib_pd *pd, enum ib_mw_type type,
2373 struct ib_udata *udata);
2374 int (*dealloc_mw)(struct ib_mw *mw);
2375 struct ib_fmr *(*alloc_fmr)(struct ib_pd *pd, int mr_access_flags,
2376 struct ib_fmr_attr *fmr_attr);
2377 int (*map_phys_fmr)(struct ib_fmr *fmr, u64 *page_list, int list_len,
2378 u64 iova);
2379 int (*unmap_fmr)(struct list_head *fmr_list);
2380 int (*dealloc_fmr)(struct ib_fmr *fmr);
2381 int (*attach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid);
2382 int (*detach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid);
2383 struct ib_xrcd *(*alloc_xrcd)(struct ib_device *device,
2384 struct ib_udata *udata);
2385 int (*dealloc_xrcd)(struct ib_xrcd *xrcd, struct ib_udata *udata);
2386 struct ib_flow *(*create_flow)(struct ib_qp *qp,
2387 struct ib_flow_attr *flow_attr,
2388 int domain, struct ib_udata *udata);
2389 int (*destroy_flow)(struct ib_flow *flow_id);
2390 struct ib_flow_action *(*create_flow_action_esp)(
2391 struct ib_device *device,
2392 const struct ib_flow_action_attrs_esp *attr,
2393 struct uverbs_attr_bundle *attrs);
2394 int (*destroy_flow_action)(struct ib_flow_action *action);
2395 int (*modify_flow_action_esp)(
2396 struct ib_flow_action *action,
2397 const struct ib_flow_action_attrs_esp *attr,
2398 struct uverbs_attr_bundle *attrs);
2399 int (*set_vf_link_state)(struct ib_device *device, int vf, u8 port,
2400 int state);
2401 int (*get_vf_config)(struct ib_device *device, int vf, u8 port,
2402 struct ifla_vf_info *ivf);
2403 int (*get_vf_stats)(struct ib_device *device, int vf, u8 port,
2404 struct ifla_vf_stats *stats);
2405 int (*set_vf_guid)(struct ib_device *device, int vf, u8 port, u64 guid,
2406 int type);
2407 struct ib_wq *(*create_wq)(struct ib_pd *pd,
2408 struct ib_wq_init_attr *init_attr,
2409 struct ib_udata *udata);
2410 void (*destroy_wq)(struct ib_wq *wq, struct ib_udata *udata);
2411 int (*modify_wq)(struct ib_wq *wq, struct ib_wq_attr *attr,
2412 u32 wq_attr_mask, struct ib_udata *udata);
2413 struct ib_rwq_ind_table *(*create_rwq_ind_table)(
2414 struct ib_device *device,
2415 struct ib_rwq_ind_table_init_attr *init_attr,
2416 struct ib_udata *udata);
2417 int (*destroy_rwq_ind_table)(struct ib_rwq_ind_table *wq_ind_table);
2418 struct ib_dm *(*alloc_dm)(struct ib_device *device,
2419 struct ib_ucontext *context,
2420 struct ib_dm_alloc_attr *attr,
2421 struct uverbs_attr_bundle *attrs);
2422 int (*dealloc_dm)(struct ib_dm *dm, struct uverbs_attr_bundle *attrs);
2423 struct ib_mr *(*reg_dm_mr)(struct ib_pd *pd, struct ib_dm *dm,
2424 struct ib_dm_mr_attr *attr,
2425 struct uverbs_attr_bundle *attrs);
2426 struct ib_counters *(*create_counters)(
2427 struct ib_device *device, struct uverbs_attr_bundle *attrs);
2428 int (*destroy_counters)(struct ib_counters *counters);
2429 int (*read_counters)(struct ib_counters *counters,
2430 struct ib_counters_read_attr *counters_read_attr,
2431 struct uverbs_attr_bundle *attrs);
2432 int (*map_mr_sg_pi)(struct ib_mr *mr, struct scatterlist *data_sg,
2433 int data_sg_nents, unsigned int *data_sg_offset,
2434 struct scatterlist *meta_sg, int meta_sg_nents,
2435 unsigned int *meta_sg_offset);
2436
2437
2438
2439
2440
2441
2442
2443 struct rdma_hw_stats *(*alloc_hw_stats)(struct ib_device *device,
2444 u8 port_num);
2445
2446
2447
2448
2449
2450
2451
2452
2453
2454
2455
2456
2457 int (*get_hw_stats)(struct ib_device *device,
2458 struct rdma_hw_stats *stats, u8 port, int index);
2459
2460
2461
2462
2463 int (*init_port)(struct ib_device *device, u8 port_num,
2464 struct kobject *port_sysfs);
2465
2466
2467
2468 int (*fill_res_entry)(struct sk_buff *msg,
2469 struct rdma_restrack_entry *entry);
2470
2471
2472
2473
2474
2475
2476 int (*enable_driver)(struct ib_device *dev);
2477
2478
2479
2480 void (*dealloc_driver)(struct ib_device *dev);
2481
2482
2483 void (*iw_add_ref)(struct ib_qp *qp);
2484 void (*iw_rem_ref)(struct ib_qp *qp);
2485 struct ib_qp *(*iw_get_qp)(struct ib_device *device, int qpn);
2486 int (*iw_connect)(struct iw_cm_id *cm_id,
2487 struct iw_cm_conn_param *conn_param);
2488 int (*iw_accept)(struct iw_cm_id *cm_id,
2489 struct iw_cm_conn_param *conn_param);
2490 int (*iw_reject)(struct iw_cm_id *cm_id, const void *pdata,
2491 u8 pdata_len);
2492 int (*iw_create_listen)(struct iw_cm_id *cm_id, int backlog);
2493 int (*iw_destroy_listen)(struct iw_cm_id *cm_id);
2494
2495
2496
2497
2498
2499 int (*counter_bind_qp)(struct rdma_counter *counter, struct ib_qp *qp);
2500
2501
2502
2503
2504 int (*counter_unbind_qp)(struct ib_qp *qp);
2505
2506
2507
2508 int (*counter_dealloc)(struct rdma_counter *counter);
2509
2510
2511
2512
2513 struct rdma_hw_stats *(*counter_alloc_stats)(
2514 struct rdma_counter *counter);
2515
2516
2517
2518 int (*counter_update_stats)(struct rdma_counter *counter);
2519
2520 DECLARE_RDMA_OBJ_SIZE(ib_ah);
2521 DECLARE_RDMA_OBJ_SIZE(ib_cq);
2522 DECLARE_RDMA_OBJ_SIZE(ib_pd);
2523 DECLARE_RDMA_OBJ_SIZE(ib_srq);
2524 DECLARE_RDMA_OBJ_SIZE(ib_ucontext);
2525};
2526
2527struct ib_core_device {
2528
2529
2530
2531 struct device dev;
2532 possible_net_t rdma_net;
2533 struct kobject *ports_kobj;
2534 struct list_head port_list;
2535 struct ib_device *owner;
2536};
2537
2538struct rdma_restrack_root;
2539struct ib_device {
2540
2541 struct device *dma_device;
2542 struct ib_device_ops ops;
2543 char name[IB_DEVICE_NAME_MAX];
2544 struct rcu_head rcu_head;
2545
2546 struct list_head event_handler_list;
2547 spinlock_t event_handler_lock;
2548
2549 struct rw_semaphore client_data_rwsem;
2550 struct xarray client_data;
2551 struct mutex unregistration_lock;
2552
2553 struct ib_cache cache;
2554
2555
2556
2557 struct ib_port_data *port_data;
2558
2559 int num_comp_vectors;
2560
2561 union {
2562 struct device dev;
2563 struct ib_core_device coredev;
2564 };
2565
2566
2567
2568
2569
2570 const struct attribute_group *groups[3];
2571
2572 u64 uverbs_cmd_mask;
2573 u64 uverbs_ex_cmd_mask;
2574
2575 char node_desc[IB_DEVICE_NODE_DESC_MAX];
2576 __be64 node_guid;
2577 u32 local_dma_lkey;
2578 u16 is_switch:1;
2579
2580 u16 kverbs_provider:1;
2581
2582 u16 use_cq_dim:1;
2583 u8 node_type;
2584 u8 phys_port_cnt;
2585 struct ib_device_attr attrs;
2586 struct attribute_group *hw_stats_ag;
2587 struct rdma_hw_stats *hw_stats;
2588
2589#ifdef CONFIG_CGROUP_RDMA
2590 struct rdmacg_device cg_device;
2591#endif
2592
2593 u32 index;
2594 struct rdma_restrack_root *res;
2595
2596 const struct uapi_definition *driver_def;
2597
2598
2599
2600
2601
2602 refcount_t refcount;
2603 struct completion unreg_completion;
2604 struct work_struct unregistration_work;
2605
2606 const struct rdma_link_ops *link_ops;
2607
2608
2609 struct mutex compat_devs_mutex;
2610
2611 struct xarray compat_devs;
2612
2613
2614 char iw_ifname[IFNAMSIZ];
2615 u32 iw_driver_flags;
2616};
2617
2618struct ib_client_nl_info;
2619struct ib_client {
2620 const char *name;
2621 void (*add) (struct ib_device *);
2622 void (*remove)(struct ib_device *, void *client_data);
2623 void (*rename)(struct ib_device *dev, void *client_data);
2624 int (*get_nl_info)(struct ib_device *ibdev, void *client_data,
2625 struct ib_client_nl_info *res);
2626 int (*get_global_nl_info)(struct ib_client_nl_info *res);
2627
2628
2629
2630
2631
2632
2633
2634
2635
2636
2637
2638
2639
2640
2641
2642
2643 struct net_device *(*get_net_dev_by_params)(
2644 struct ib_device *dev,
2645 u8 port,
2646 u16 pkey,
2647 const union ib_gid *gid,
2648 const struct sockaddr *addr,
2649 void *client_data);
2650
2651 refcount_t uses;
2652 struct completion uses_zero;
2653 u32 client_id;
2654
2655
2656 u8 no_kverbs_req:1;
2657};
2658
2659
2660
2661
2662
2663
2664
2665struct ib_block_iter {
2666
2667 struct scatterlist *__sg;
2668 dma_addr_t __dma_addr;
2669 unsigned int __sg_nents;
2670 unsigned int __sg_advance;
2671 unsigned int __pg_bit;
2672};
2673
2674struct ib_device *_ib_alloc_device(size_t size);
2675#define ib_alloc_device(drv_struct, member) \
2676 container_of(_ib_alloc_device(sizeof(struct drv_struct) + \
2677 BUILD_BUG_ON_ZERO(offsetof( \
2678 struct drv_struct, member))), \
2679 struct drv_struct, member)
2680
2681void ib_dealloc_device(struct ib_device *device);
2682
2683void ib_get_device_fw_str(struct ib_device *device, char *str);
2684
2685int ib_register_device(struct ib_device *device, const char *name);
2686void ib_unregister_device(struct ib_device *device);
2687void ib_unregister_driver(enum rdma_driver_id driver_id);
2688void ib_unregister_device_and_put(struct ib_device *device);
2689void ib_unregister_device_queued(struct ib_device *ib_dev);
2690
2691int ib_register_client (struct ib_client *client);
2692void ib_unregister_client(struct ib_client *client);
2693
2694void __rdma_block_iter_start(struct ib_block_iter *biter,
2695 struct scatterlist *sglist,
2696 unsigned int nents,
2697 unsigned long pgsz);
2698bool __rdma_block_iter_next(struct ib_block_iter *biter);
2699
2700
2701
2702
2703
2704
2705static inline dma_addr_t
2706rdma_block_iter_dma_address(struct ib_block_iter *biter)
2707{
2708 return biter->__dma_addr & ~(BIT_ULL(biter->__pg_bit) - 1);
2709}
2710
2711
2712
2713
2714
2715
2716
2717
2718
2719
2720
2721#define rdma_for_each_block(sglist, biter, nents, pgsz) \
2722 for (__rdma_block_iter_start(biter, sglist, nents, \
2723 pgsz); \
2724 __rdma_block_iter_next(biter);)
2725
2726
2727
2728
2729
2730
2731
2732
2733
2734
2735
2736static inline void *ib_get_client_data(struct ib_device *device,
2737 struct ib_client *client)
2738{
2739 return xa_load(&device->client_data, client->client_id);
2740}
2741void ib_set_client_data(struct ib_device *device, struct ib_client *client,
2742 void *data);
2743void ib_set_device_ops(struct ib_device *device,
2744 const struct ib_device_ops *ops);
2745
2746#if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS)
2747int rdma_user_mmap_io(struct ib_ucontext *ucontext, struct vm_area_struct *vma,
2748 unsigned long pfn, unsigned long size, pgprot_t prot);
2749#else
2750static inline int rdma_user_mmap_io(struct ib_ucontext *ucontext,
2751 struct vm_area_struct *vma,
2752 unsigned long pfn, unsigned long size,
2753 pgprot_t prot)
2754{
2755 return -EINVAL;
2756}
2757#endif
2758
2759static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len)
2760{
2761 return copy_from_user(dest, udata->inbuf, len) ? -EFAULT : 0;
2762}
2763
2764static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len)
2765{
2766 return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0;
2767}
2768
2769static inline bool ib_is_buffer_cleared(const void __user *p,
2770 size_t len)
2771{
2772 bool ret;
2773 u8 *buf;
2774
2775 if (len > USHRT_MAX)
2776 return false;
2777
2778 buf = memdup_user(p, len);
2779 if (IS_ERR(buf))
2780 return false;
2781
2782 ret = !memchr_inv(buf, 0, len);
2783 kfree(buf);
2784 return ret;
2785}
2786
2787static inline bool ib_is_udata_cleared(struct ib_udata *udata,
2788 size_t offset,
2789 size_t len)
2790{
2791 return ib_is_buffer_cleared(udata->inbuf + offset, len);
2792}
2793
2794
2795
2796
2797
2798
2799
2800
2801
2802
2803
2804
2805
2806
2807
2808
2809static inline bool ib_is_destroy_retryable(int ret, enum rdma_remove_reason why,
2810 struct ib_uobject *uobj)
2811{
2812 return ret && (why == RDMA_REMOVE_DESTROY ||
2813 uobj->context->cleanup_retryable);
2814}
2815
2816
2817
2818
2819
2820
2821
2822
2823
2824
2825static inline int ib_destroy_usecnt(atomic_t *usecnt,
2826 enum rdma_remove_reason why,
2827 struct ib_uobject *uobj)
2828{
2829 if (atomic_read(usecnt) && ib_is_destroy_retryable(-EBUSY, why, uobj))
2830 return -EBUSY;
2831 return 0;
2832}
2833
2834
2835
2836
2837
2838
2839
2840
2841
2842
2843
2844
2845
2846
2847
2848
2849bool ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
2850 enum ib_qp_type type, enum ib_qp_attr_mask mask);
2851
2852void ib_register_event_handler(struct ib_event_handler *event_handler);
2853void ib_unregister_event_handler(struct ib_event_handler *event_handler);
2854void ib_dispatch_event(struct ib_event *event);
2855
2856int ib_query_port(struct ib_device *device,
2857 u8 port_num, struct ib_port_attr *port_attr);
2858
2859enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device,
2860 u8 port_num);
2861
2862
2863
2864
2865
2866
2867
2868
2869
2870
2871static inline bool rdma_cap_ib_switch(const struct ib_device *device)
2872{
2873 return device->is_switch;
2874}
2875
2876
2877
2878
2879
2880
2881
2882
2883
2884static inline u8 rdma_start_port(const struct ib_device *device)
2885{
2886 return rdma_cap_ib_switch(device) ? 0 : 1;
2887}
2888
2889
2890
2891
2892
2893
2894#define rdma_for_each_port(device, iter) \
2895 for (iter = rdma_start_port(device + BUILD_BUG_ON_ZERO(!__same_type( \
2896 unsigned int, iter))); \
2897 iter <= rdma_end_port(device); (iter)++)
2898
2899
2900
2901
2902
2903
2904
2905
2906
2907static inline u8 rdma_end_port(const struct ib_device *device)
2908{
2909 return rdma_cap_ib_switch(device) ? 0 : device->phys_port_cnt;
2910}
2911
2912static inline int rdma_is_port_valid(const struct ib_device *device,
2913 unsigned int port)
2914{
2915 return (port >= rdma_start_port(device) &&
2916 port <= rdma_end_port(device));
2917}
2918
2919static inline bool rdma_is_grh_required(const struct ib_device *device,
2920 u8 port_num)
2921{
2922 return device->port_data[port_num].immutable.core_cap_flags &
2923 RDMA_CORE_PORT_IB_GRH_REQUIRED;
2924}
2925
2926static inline bool rdma_protocol_ib(const struct ib_device *device, u8 port_num)
2927{
2928 return device->port_data[port_num].immutable.core_cap_flags &
2929 RDMA_CORE_CAP_PROT_IB;
2930}
2931
2932static inline bool rdma_protocol_roce(const struct ib_device *device, u8 port_num)
2933{
2934 return device->port_data[port_num].immutable.core_cap_flags &
2935 (RDMA_CORE_CAP_PROT_ROCE | RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP);
2936}
2937
2938static inline bool rdma_protocol_roce_udp_encap(const struct ib_device *device, u8 port_num)
2939{
2940 return device->port_data[port_num].immutable.core_cap_flags &
2941 RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP;
2942}
2943
2944static inline bool rdma_protocol_roce_eth_encap(const struct ib_device *device, u8 port_num)
2945{
2946 return device->port_data[port_num].immutable.core_cap_flags &
2947 RDMA_CORE_CAP_PROT_ROCE;
2948}
2949
2950static inline bool rdma_protocol_iwarp(const struct ib_device *device, u8 port_num)
2951{
2952 return device->port_data[port_num].immutable.core_cap_flags &
2953 RDMA_CORE_CAP_PROT_IWARP;
2954}
2955
2956static inline bool rdma_ib_or_roce(const struct ib_device *device, u8 port_num)
2957{
2958 return rdma_protocol_ib(device, port_num) ||
2959 rdma_protocol_roce(device, port_num);
2960}
2961
2962static inline bool rdma_protocol_raw_packet(const struct ib_device *device, u8 port_num)
2963{
2964 return device->port_data[port_num].immutable.core_cap_flags &
2965 RDMA_CORE_CAP_PROT_RAW_PACKET;
2966}
2967
2968static inline bool rdma_protocol_usnic(const struct ib_device *device, u8 port_num)
2969{
2970 return device->port_data[port_num].immutable.core_cap_flags &
2971 RDMA_CORE_CAP_PROT_USNIC;
2972}
2973
2974
2975
2976
2977
2978
2979
2980
2981
2982
2983
2984
2985
2986static inline bool rdma_cap_ib_mad(const struct ib_device *device, u8 port_num)
2987{
2988 return device->port_data[port_num].immutable.core_cap_flags &
2989 RDMA_CORE_CAP_IB_MAD;
2990}
2991
2992
2993
2994
2995
2996
2997
2998
2999
3000
3001
3002
3003
3004
3005
3006
3007
3008
3009
3010
3011static inline bool rdma_cap_opa_mad(struct ib_device *device, u8 port_num)
3012{
3013 return device->port_data[port_num].immutable.core_cap_flags &
3014 RDMA_CORE_CAP_OPA_MAD;
3015}
3016
3017
3018
3019
3020
3021
3022
3023
3024
3025
3026
3027
3028
3029
3030
3031
3032
3033
3034
3035
3036
3037static inline bool rdma_cap_ib_smi(const struct ib_device *device, u8 port_num)
3038{
3039 return device->port_data[port_num].immutable.core_cap_flags &
3040 RDMA_CORE_CAP_IB_SMI;
3041}
3042
3043
3044
3045
3046
3047
3048
3049
3050
3051
3052
3053
3054
3055
3056
3057
3058static inline bool rdma_cap_ib_cm(const struct ib_device *device, u8 port_num)
3059{
3060 return device->port_data[port_num].immutable.core_cap_flags &
3061 RDMA_CORE_CAP_IB_CM;
3062}
3063
3064
3065
3066
3067
3068
3069
3070
3071
3072
3073
3074
3075
3076static inline bool rdma_cap_iw_cm(const struct ib_device *device, u8 port_num)
3077{
3078 return device->port_data[port_num].immutable.core_cap_flags &
3079 RDMA_CORE_CAP_IW_CM;
3080}
3081
3082
3083
3084
3085
3086
3087
3088
3089
3090
3091
3092
3093
3094
3095
3096
3097static inline bool rdma_cap_ib_sa(const struct ib_device *device, u8 port_num)
3098{
3099 return device->port_data[port_num].immutable.core_cap_flags &
3100 RDMA_CORE_CAP_IB_SA;
3101}
3102
3103
3104
3105
3106
3107
3108
3109
3110
3111
3112
3113
3114
3115
3116
3117
3118
3119
3120static inline bool rdma_cap_ib_mcast(const struct ib_device *device, u8 port_num)
3121{
3122 return rdma_cap_ib_sa(device, port_num);
3123}
3124
3125
3126
3127
3128
3129
3130
3131
3132
3133
3134
3135
3136
3137
3138static inline bool rdma_cap_af_ib(const struct ib_device *device, u8 port_num)
3139{
3140 return device->port_data[port_num].immutable.core_cap_flags &
3141 RDMA_CORE_CAP_AF_IB;
3142}
3143
3144
3145
3146
3147
3148
3149
3150
3151
3152
3153
3154
3155
3156
3157
3158
3159
3160static inline bool rdma_cap_eth_ah(const struct ib_device *device, u8 port_num)
3161{
3162 return device->port_data[port_num].immutable.core_cap_flags &
3163 RDMA_CORE_CAP_ETH_AH;
3164}
3165
3166
3167
3168
3169
3170
3171
3172
3173
3174
3175static inline bool rdma_cap_opa_ah(struct ib_device *device, u8 port_num)
3176{
3177 return (device->port_data[port_num].immutable.core_cap_flags &
3178 RDMA_CORE_CAP_OPA_AH) == RDMA_CORE_CAP_OPA_AH;
3179}
3180
3181
3182
3183
3184
3185
3186
3187
3188
3189
3190
3191
3192
3193static inline size_t rdma_max_mad_size(const struct ib_device *device, u8 port_num)
3194{
3195 return device->port_data[port_num].immutable.max_mad_size;
3196}
3197
3198
3199
3200
3201
3202
3203
3204
3205
3206
3207
3208
3209
3210
3211static inline bool rdma_cap_roce_gid_table(const struct ib_device *device,
3212 u8 port_num)
3213{
3214 return rdma_protocol_roce(device, port_num) &&
3215 device->ops.add_gid && device->ops.del_gid;
3216}
3217
3218
3219
3220
3221static inline bool rdma_cap_read_inv(struct ib_device *dev, u32 port_num)
3222{
3223
3224
3225
3226
3227 return rdma_protocol_iwarp(dev, port_num);
3228}
3229
3230
3231
3232
3233
3234
3235
3236static inline unsigned int rdma_find_pg_bit(unsigned long addr,
3237 unsigned long pgsz_bitmap)
3238{
3239 unsigned long align;
3240 unsigned long pgsz;
3241
3242 align = addr & -addr;
3243
3244
3245
3246
3247 pgsz = pgsz_bitmap & ~(-align << 1);
3248 if (!pgsz)
3249 return __ffs(pgsz_bitmap);
3250
3251 return __fls(pgsz);
3252}
3253
3254int ib_set_vf_link_state(struct ib_device *device, int vf, u8 port,
3255 int state);
3256int ib_get_vf_config(struct ib_device *device, int vf, u8 port,
3257 struct ifla_vf_info *info);
3258int ib_get_vf_stats(struct ib_device *device, int vf, u8 port,
3259 struct ifla_vf_stats *stats);
3260int ib_set_vf_guid(struct ib_device *device, int vf, u8 port, u64 guid,
3261 int type);
3262
3263int ib_query_pkey(struct ib_device *device,
3264 u8 port_num, u16 index, u16 *pkey);
3265
3266int ib_modify_device(struct ib_device *device,
3267 int device_modify_mask,
3268 struct ib_device_modify *device_modify);
3269
3270int ib_modify_port(struct ib_device *device,
3271 u8 port_num, int port_modify_mask,
3272 struct ib_port_modify *port_modify);
3273
3274int ib_find_gid(struct ib_device *device, union ib_gid *gid,
3275 u8 *port_num, u16 *index);
3276
3277int ib_find_pkey(struct ib_device *device,
3278 u8 port_num, u16 pkey, u16 *index);
3279
3280enum ib_pd_flags {
3281
3282
3283
3284
3285
3286
3287
3288
3289
3290 IB_PD_UNSAFE_GLOBAL_RKEY = 0x01,
3291};
3292
3293struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags,
3294 const char *caller);
3295
3296#define ib_alloc_pd(device, flags) \
3297 __ib_alloc_pd((device), (flags), KBUILD_MODNAME)
3298
3299
3300
3301
3302
3303
3304void ib_dealloc_pd_user(struct ib_pd *pd, struct ib_udata *udata);
3305
3306
3307
3308
3309
3310
3311
3312static inline void ib_dealloc_pd(struct ib_pd *pd)
3313{
3314 ib_dealloc_pd_user(pd, NULL);
3315}
3316
3317enum rdma_create_ah_flags {
3318
3319 RDMA_CREATE_AH_SLEEPABLE = BIT(0),
3320};
3321
3322
3323
3324
3325
3326
3327
3328
3329
3330
3331struct ib_ah *rdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr,
3332 u32 flags);
3333
3334
3335
3336
3337
3338
3339
3340
3341
3342
3343
3344
3345
3346struct ib_ah *rdma_create_user_ah(struct ib_pd *pd,
3347 struct rdma_ah_attr *ah_attr,
3348 struct ib_udata *udata);
3349
3350
3351
3352
3353
3354
3355
3356
3357int ib_get_gids_from_rdma_hdr(const union rdma_network_hdr *hdr,
3358 enum rdma_network_type net_type,
3359 union ib_gid *sgid, union ib_gid *dgid);
3360
3361
3362
3363
3364
3365int ib_get_rdma_header_version(const union rdma_network_hdr *hdr);
3366
3367
3368
3369
3370
3371
3372
3373
3374
3375
3376
3377
3378
3379
3380
3381
3382
3383
3384
3385int ib_init_ah_attr_from_wc(struct ib_device *device, u8 port_num,
3386 const struct ib_wc *wc, const struct ib_grh *grh,
3387 struct rdma_ah_attr *ah_attr);
3388
3389
3390
3391
3392
3393
3394
3395
3396
3397
3398
3399
3400
3401struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc,
3402 const struct ib_grh *grh, u8 port_num);
3403
3404
3405
3406
3407
3408
3409
3410
3411int rdma_modify_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
3412
3413
3414
3415
3416
3417
3418
3419
3420int rdma_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
3421
3422enum rdma_destroy_ah_flags {
3423
3424 RDMA_DESTROY_AH_SLEEPABLE = BIT(0),
3425};
3426
3427
3428
3429
3430
3431
3432
3433int rdma_destroy_ah_user(struct ib_ah *ah, u32 flags, struct ib_udata *udata);
3434
3435
3436
3437
3438
3439
3440
3441
3442static inline int rdma_destroy_ah(struct ib_ah *ah, u32 flags)
3443{
3444 return rdma_destroy_ah_user(ah, flags, NULL);
3445}
3446
3447
3448
3449
3450
3451
3452
3453
3454
3455
3456
3457
3458
3459
3460struct ib_srq *ib_create_srq(struct ib_pd *pd,
3461 struct ib_srq_init_attr *srq_init_attr);
3462
3463
3464
3465
3466
3467
3468
3469
3470
3471
3472
3473
3474
3475int ib_modify_srq(struct ib_srq *srq,
3476 struct ib_srq_attr *srq_attr,
3477 enum ib_srq_attr_mask srq_attr_mask);
3478
3479
3480
3481
3482
3483
3484
3485int ib_query_srq(struct ib_srq *srq,
3486 struct ib_srq_attr *srq_attr);
3487
3488
3489
3490
3491
3492
3493int ib_destroy_srq_user(struct ib_srq *srq, struct ib_udata *udata);
3494
3495
3496
3497
3498
3499
3500
3501static inline int ib_destroy_srq(struct ib_srq *srq)
3502{
3503 return ib_destroy_srq_user(srq, NULL);
3504}
3505
3506
3507
3508
3509
3510
3511
3512
3513static inline int ib_post_srq_recv(struct ib_srq *srq,
3514 const struct ib_recv_wr *recv_wr,
3515 const struct ib_recv_wr **bad_recv_wr)
3516{
3517 const struct ib_recv_wr *dummy;
3518
3519 return srq->device->ops.post_srq_recv(srq, recv_wr,
3520 bad_recv_wr ? : &dummy);
3521}
3522
3523
3524
3525
3526
3527
3528
3529
3530
3531
3532struct ib_qp *ib_create_qp_user(struct ib_pd *pd,
3533 struct ib_qp_init_attr *qp_init_attr,
3534 struct ib_udata *udata);
3535
3536
3537
3538
3539
3540
3541
3542
3543
3544
3545
3546
3547static inline struct ib_qp *ib_create_qp(struct ib_pd *pd,
3548 struct ib_qp_init_attr *qp_init_attr)
3549{
3550 return ib_create_qp_user(pd, qp_init_attr, NULL);
3551}
3552
3553
3554
3555
3556
3557
3558
3559
3560
3561
3562
3563
3564int ib_modify_qp_with_udata(struct ib_qp *qp,
3565 struct ib_qp_attr *attr,
3566 int attr_mask,
3567 struct ib_udata *udata);
3568
3569
3570
3571
3572
3573
3574
3575
3576
3577
3578int ib_modify_qp(struct ib_qp *qp,
3579 struct ib_qp_attr *qp_attr,
3580 int qp_attr_mask);
3581
3582
3583
3584
3585
3586
3587
3588
3589
3590
3591
3592
3593int ib_query_qp(struct ib_qp *qp,
3594 struct ib_qp_attr *qp_attr,
3595 int qp_attr_mask,
3596 struct ib_qp_init_attr *qp_init_attr);
3597
3598
3599
3600
3601
3602
3603int ib_destroy_qp_user(struct ib_qp *qp, struct ib_udata *udata);
3604
3605
3606
3607
3608
3609
3610
3611static inline int ib_destroy_qp(struct ib_qp *qp)
3612{
3613 return ib_destroy_qp_user(qp, NULL);
3614}
3615
3616
3617
3618
3619
3620
3621
3622
3623struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd,
3624 struct ib_qp_open_attr *qp_open_attr);
3625
3626
3627
3628
3629
3630
3631
3632
3633int ib_close_qp(struct ib_qp *qp);
3634
3635
3636
3637
3638
3639
3640
3641
3642
3643
3644
3645
3646
3647
3648static inline int ib_post_send(struct ib_qp *qp,
3649 const struct ib_send_wr *send_wr,
3650 const struct ib_send_wr **bad_send_wr)
3651{
3652 const struct ib_send_wr *dummy;
3653
3654 return qp->device->ops.post_send(qp, send_wr, bad_send_wr ? : &dummy);
3655}
3656
3657
3658
3659
3660
3661
3662
3663
3664
3665static inline int ib_post_recv(struct ib_qp *qp,
3666 const struct ib_recv_wr *recv_wr,
3667 const struct ib_recv_wr **bad_recv_wr)
3668{
3669 const struct ib_recv_wr *dummy;
3670
3671 return qp->device->ops.post_recv(qp, recv_wr, bad_recv_wr ? : &dummy);
3672}
3673
3674struct ib_cq *__ib_alloc_cq_user(struct ib_device *dev, void *private,
3675 int nr_cqe, int comp_vector,
3676 enum ib_poll_context poll_ctx,
3677 const char *caller, struct ib_udata *udata);
3678
3679
3680
3681
3682
3683
3684
3685
3686
3687
3688static inline struct ib_cq *ib_alloc_cq_user(struct ib_device *dev,
3689 void *private, int nr_cqe,
3690 int comp_vector,
3691 enum ib_poll_context poll_ctx,
3692 struct ib_udata *udata)
3693{
3694 return __ib_alloc_cq_user(dev, private, nr_cqe, comp_vector, poll_ctx,
3695 KBUILD_MODNAME, udata);
3696}
3697
3698
3699
3700
3701
3702
3703
3704
3705
3706
3707
3708static inline struct ib_cq *ib_alloc_cq(struct ib_device *dev, void *private,
3709 int nr_cqe, int comp_vector,
3710 enum ib_poll_context poll_ctx)
3711{
3712 return ib_alloc_cq_user(dev, private, nr_cqe, comp_vector, poll_ctx,
3713 NULL);
3714}
3715
3716
3717
3718
3719
3720
3721void ib_free_cq_user(struct ib_cq *cq, struct ib_udata *udata);
3722
3723
3724
3725
3726
3727
3728
3729static inline void ib_free_cq(struct ib_cq *cq)
3730{
3731 ib_free_cq_user(cq, NULL);
3732}
3733
3734int ib_process_cq_direct(struct ib_cq *cq, int budget);
3735
3736
3737
3738
3739
3740
3741
3742
3743
3744
3745
3746
3747
3748
3749struct ib_cq *__ib_create_cq(struct ib_device *device,
3750 ib_comp_handler comp_handler,
3751 void (*event_handler)(struct ib_event *, void *),
3752 void *cq_context,
3753 const struct ib_cq_init_attr *cq_attr,
3754 const char *caller);
3755#define ib_create_cq(device, cmp_hndlr, evt_hndlr, cq_ctxt, cq_attr) \
3756 __ib_create_cq((device), (cmp_hndlr), (evt_hndlr), (cq_ctxt), (cq_attr), KBUILD_MODNAME)
3757
3758
3759
3760
3761
3762
3763
3764
3765int ib_resize_cq(struct ib_cq *cq, int cqe);
3766
3767
3768
3769
3770
3771
3772
3773
3774int rdma_set_cq_moderation(struct ib_cq *cq, u16 cq_count, u16 cq_period);
3775
3776
3777
3778
3779
3780
3781int ib_destroy_cq_user(struct ib_cq *cq, struct ib_udata *udata);
3782
3783
3784
3785
3786
3787
3788
3789static inline void ib_destroy_cq(struct ib_cq *cq)
3790{
3791 ib_destroy_cq_user(cq, NULL);
3792}
3793
3794
3795
3796
3797
3798
3799
3800
3801
3802
3803
3804
3805
3806static inline int ib_poll_cq(struct ib_cq *cq, int num_entries,
3807 struct ib_wc *wc)
3808{
3809 return cq->device->ops.poll_cq(cq, num_entries, wc);
3810}
3811
3812
3813
3814
3815
3816
3817
3818
3819
3820
3821
3822
3823
3824
3825
3826
3827
3828
3829
3830
3831
3832
3833
3834
3835
3836
3837
3838
3839static inline int ib_req_notify_cq(struct ib_cq *cq,
3840 enum ib_cq_notify_flags flags)
3841{
3842 return cq->device->ops.req_notify_cq(cq, flags);
3843}
3844
3845
3846
3847
3848
3849
3850
3851
3852static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt)
3853{
3854 return cq->device->ops.req_ncomp_notif ?
3855 cq->device->ops.req_ncomp_notif(cq, wc_cnt) :
3856 -ENOSYS;
3857}
3858
3859
3860
3861
3862
3863
3864static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
3865{
3866 return dma_mapping_error(dev->dma_device, dma_addr);
3867}
3868
3869
3870
3871
3872
3873
3874
3875
3876static inline u64 ib_dma_map_single(struct ib_device *dev,
3877 void *cpu_addr, size_t size,
3878 enum dma_data_direction direction)
3879{
3880 return dma_map_single(dev->dma_device, cpu_addr, size, direction);
3881}
3882
3883
3884
3885
3886
3887
3888
3889
3890static inline void ib_dma_unmap_single(struct ib_device *dev,
3891 u64 addr, size_t size,
3892 enum dma_data_direction direction)
3893{
3894 dma_unmap_single(dev->dma_device, addr, size, direction);
3895}
3896
3897
3898
3899
3900
3901
3902
3903
3904
3905static inline u64 ib_dma_map_page(struct ib_device *dev,
3906 struct page *page,
3907 unsigned long offset,
3908 size_t size,
3909 enum dma_data_direction direction)
3910{
3911 return dma_map_page(dev->dma_device, page, offset, size, direction);
3912}
3913
3914
3915
3916
3917
3918
3919
3920
3921static inline void ib_dma_unmap_page(struct ib_device *dev,
3922 u64 addr, size_t size,
3923 enum dma_data_direction direction)
3924{
3925 dma_unmap_page(dev->dma_device, addr, size, direction);
3926}
3927
3928
3929
3930
3931
3932
3933
3934
3935static inline int ib_dma_map_sg(struct ib_device *dev,
3936 struct scatterlist *sg, int nents,
3937 enum dma_data_direction direction)
3938{
3939 return dma_map_sg(dev->dma_device, sg, nents, direction);
3940}
3941
3942
3943
3944
3945
3946
3947
3948
3949static inline void ib_dma_unmap_sg(struct ib_device *dev,
3950 struct scatterlist *sg, int nents,
3951 enum dma_data_direction direction)
3952{
3953 dma_unmap_sg(dev->dma_device, sg, nents, direction);
3954}
3955
3956static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
3957 struct scatterlist *sg, int nents,
3958 enum dma_data_direction direction,
3959 unsigned long dma_attrs)
3960{
3961 return dma_map_sg_attrs(dev->dma_device, sg, nents, direction,
3962 dma_attrs);
3963}
3964
3965static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
3966 struct scatterlist *sg, int nents,
3967 enum dma_data_direction direction,
3968 unsigned long dma_attrs)
3969{
3970 dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, dma_attrs);
3971}
3972
3973
3974
3975
3976
3977
3978
3979static inline unsigned int ib_dma_max_seg_size(struct ib_device *dev)
3980{
3981 struct device_dma_parameters *p = dev->dma_device->dma_parms;
3982
3983 return p ? p->max_segment_size : UINT_MAX;
3984}
3985
3986
3987
3988
3989
3990
3991
3992
3993static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev,
3994 u64 addr,
3995 size_t size,
3996 enum dma_data_direction dir)
3997{
3998 dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
3999}
4000
4001
4002
4003
4004
4005
4006
4007
4008static inline void ib_dma_sync_single_for_device(struct ib_device *dev,
4009 u64 addr,
4010 size_t size,
4011 enum dma_data_direction dir)
4012{
4013 dma_sync_single_for_device(dev->dma_device, addr, size, dir);
4014}
4015
4016
4017
4018
4019
4020
4021
4022
4023static inline void *ib_dma_alloc_coherent(struct ib_device *dev,
4024 size_t size,
4025 dma_addr_t *dma_handle,
4026 gfp_t flag)
4027{
4028 return dma_alloc_coherent(dev->dma_device, size, dma_handle, flag);
4029}
4030
4031
4032
4033
4034
4035
4036
4037
4038static inline void ib_dma_free_coherent(struct ib_device *dev,
4039 size_t size, void *cpu_addr,
4040 dma_addr_t dma_handle)
4041{
4042 dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle);
4043}
4044
4045
4046
4047
4048
4049
4050
4051
4052
4053int ib_dereg_mr_user(struct ib_mr *mr, struct ib_udata *udata);
4054
4055
4056
4057
4058
4059
4060
4061
4062
4063
4064static inline int ib_dereg_mr(struct ib_mr *mr)
4065{
4066 return ib_dereg_mr_user(mr, NULL);
4067}
4068
4069struct ib_mr *ib_alloc_mr_user(struct ib_pd *pd, enum ib_mr_type mr_type,
4070 u32 max_num_sg, struct ib_udata *udata);
4071
4072static inline struct ib_mr *ib_alloc_mr(struct ib_pd *pd,
4073 enum ib_mr_type mr_type, u32 max_num_sg)
4074{
4075 return ib_alloc_mr_user(pd, mr_type, max_num_sg, NULL);
4076}
4077
4078struct ib_mr *ib_alloc_mr_integrity(struct ib_pd *pd,
4079 u32 max_num_data_sg,
4080 u32 max_num_meta_sg);
4081
4082
4083
4084
4085
4086
4087
4088static inline void ib_update_fast_reg_key(struct ib_mr *mr, u8 newkey)
4089{
4090 mr->lkey = (mr->lkey & 0xffffff00) | newkey;
4091 mr->rkey = (mr->rkey & 0xffffff00) | newkey;
4092}
4093
4094
4095
4096
4097
4098
4099static inline u32 ib_inc_rkey(u32 rkey)
4100{
4101 const u32 mask = 0x000000ff;
4102 return ((rkey + 1) & mask) | (rkey & ~mask);
4103}
4104
4105
4106
4107
4108
4109
4110
4111
4112
4113
4114struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
4115 int mr_access_flags,
4116 struct ib_fmr_attr *fmr_attr);
4117
4118
4119
4120
4121
4122
4123
4124
4125static inline int ib_map_phys_fmr(struct ib_fmr *fmr,
4126 u64 *page_list, int list_len,
4127 u64 iova)
4128{
4129 return fmr->device->ops.map_phys_fmr(fmr, page_list, list_len, iova);
4130}
4131
4132
4133
4134
4135
4136int ib_unmap_fmr(struct list_head *fmr_list);
4137
4138
4139
4140
4141
4142int ib_dealloc_fmr(struct ib_fmr *fmr);
4143
4144
4145
4146
4147
4148
4149
4150
4151
4152
4153
4154
4155
4156int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
4157
4158
4159
4160
4161
4162
4163
4164int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
4165
4166
4167
4168
4169
4170
4171struct ib_xrcd *__ib_alloc_xrcd(struct ib_device *device, const char *caller);
4172#define ib_alloc_xrcd(device) \
4173 __ib_alloc_xrcd((device), KBUILD_MODNAME)
4174
4175
4176
4177
4178
4179
4180int ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata);
4181
4182static inline int ib_check_mr_access(int flags)
4183{
4184
4185
4186
4187
4188 if (flags & (IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_REMOTE_WRITE) &&
4189 !(flags & IB_ACCESS_LOCAL_WRITE))
4190 return -EINVAL;
4191
4192 return 0;
4193}
4194
4195static inline bool ib_access_writable(int access_flags)
4196{
4197
4198
4199
4200
4201
4202
4203
4204 return access_flags &
4205 (IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE |
4206 IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND);
4207}
4208
4209
4210
4211
4212
4213
4214
4215
4216
4217
4218
4219
4220
4221int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
4222 struct ib_mr_status *mr_status);
4223
4224
4225
4226
4227
4228
4229
4230
4231
4232
4233
4234
4235
4236
4237static inline bool ib_device_try_get(struct ib_device *dev)
4238{
4239 return refcount_inc_not_zero(&dev->refcount);
4240}
4241
4242void ib_device_put(struct ib_device *device);
4243struct ib_device *ib_device_get_by_netdev(struct net_device *ndev,
4244 enum rdma_driver_id driver_id);
4245struct ib_device *ib_device_get_by_name(const char *name,
4246 enum rdma_driver_id driver_id);
4247struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, u8 port,
4248 u16 pkey, const union ib_gid *gid,
4249 const struct sockaddr *addr);
4250int ib_device_set_netdev(struct ib_device *ib_dev, struct net_device *ndev,
4251 unsigned int port);
4252struct net_device *ib_device_netdev(struct ib_device *dev, u8 port);
4253
4254struct ib_wq *ib_create_wq(struct ib_pd *pd,
4255 struct ib_wq_init_attr *init_attr);
4256int ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata);
4257int ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *attr,
4258 u32 wq_attr_mask);
4259struct ib_rwq_ind_table *ib_create_rwq_ind_table(struct ib_device *device,
4260 struct ib_rwq_ind_table_init_attr*
4261 wq_ind_table_init_attr);
4262int ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table);
4263
4264int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
4265 unsigned int *sg_offset, unsigned int page_size);
4266int ib_map_mr_sg_pi(struct ib_mr *mr, struct scatterlist *data_sg,
4267 int data_sg_nents, unsigned int *data_sg_offset,
4268 struct scatterlist *meta_sg, int meta_sg_nents,
4269 unsigned int *meta_sg_offset, unsigned int page_size);
4270
4271static inline int
4272ib_map_mr_sg_zbva(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
4273 unsigned int *sg_offset, unsigned int page_size)
4274{
4275 int n;
4276
4277 n = ib_map_mr_sg(mr, sg, sg_nents, sg_offset, page_size);
4278 mr->iova = 0;
4279
4280 return n;
4281}
4282
4283int ib_sg_to_pages(struct ib_mr *mr, struct scatterlist *sgl, int sg_nents,
4284 unsigned int *sg_offset, int (*set_page)(struct ib_mr *, u64));
4285
4286void ib_drain_rq(struct ib_qp *qp);
4287void ib_drain_sq(struct ib_qp *qp);
4288void ib_drain_qp(struct ib_qp *qp);
4289
4290int ib_get_eth_speed(struct ib_device *dev, u8 port_num, u8 *speed, u8 *width);
4291
4292static inline u8 *rdma_ah_retrieve_dmac(struct rdma_ah_attr *attr)
4293{
4294 if (attr->type == RDMA_AH_ATTR_TYPE_ROCE)
4295 return attr->roce.dmac;
4296 return NULL;
4297}
4298
4299static inline void rdma_ah_set_dlid(struct rdma_ah_attr *attr, u32 dlid)
4300{
4301 if (attr->type == RDMA_AH_ATTR_TYPE_IB)
4302 attr->ib.dlid = (u16)dlid;
4303 else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4304 attr->opa.dlid = dlid;
4305}
4306
4307static inline u32 rdma_ah_get_dlid(const struct rdma_ah_attr *attr)
4308{
4309 if (attr->type == RDMA_AH_ATTR_TYPE_IB)
4310 return attr->ib.dlid;
4311 else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4312 return attr->opa.dlid;
4313 return 0;
4314}
4315
4316static inline void rdma_ah_set_sl(struct rdma_ah_attr *attr, u8 sl)
4317{
4318 attr->sl = sl;
4319}
4320
4321static inline u8 rdma_ah_get_sl(const struct rdma_ah_attr *attr)
4322{
4323 return attr->sl;
4324}
4325
4326static inline void rdma_ah_set_path_bits(struct rdma_ah_attr *attr,
4327 u8 src_path_bits)
4328{
4329 if (attr->type == RDMA_AH_ATTR_TYPE_IB)
4330 attr->ib.src_path_bits = src_path_bits;
4331 else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4332 attr->opa.src_path_bits = src_path_bits;
4333}
4334
4335static inline u8 rdma_ah_get_path_bits(const struct rdma_ah_attr *attr)
4336{
4337 if (attr->type == RDMA_AH_ATTR_TYPE_IB)
4338 return attr->ib.src_path_bits;
4339 else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4340 return attr->opa.src_path_bits;
4341 return 0;
4342}
4343
4344static inline void rdma_ah_set_make_grd(struct rdma_ah_attr *attr,
4345 bool make_grd)
4346{
4347 if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4348 attr->opa.make_grd = make_grd;
4349}
4350
4351static inline bool rdma_ah_get_make_grd(const struct rdma_ah_attr *attr)
4352{
4353 if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4354 return attr->opa.make_grd;
4355 return false;
4356}
4357
4358static inline void rdma_ah_set_port_num(struct rdma_ah_attr *attr, u8 port_num)
4359{
4360 attr->port_num = port_num;
4361}
4362
4363static inline u8 rdma_ah_get_port_num(const struct rdma_ah_attr *attr)
4364{
4365 return attr->port_num;
4366}
4367
4368static inline void rdma_ah_set_static_rate(struct rdma_ah_attr *attr,
4369 u8 static_rate)
4370{
4371 attr->static_rate = static_rate;
4372}
4373
4374static inline u8 rdma_ah_get_static_rate(const struct rdma_ah_attr *attr)
4375{
4376 return attr->static_rate;
4377}
4378
4379static inline void rdma_ah_set_ah_flags(struct rdma_ah_attr *attr,
4380 enum ib_ah_flags flag)
4381{
4382 attr->ah_flags = flag;
4383}
4384
4385static inline enum ib_ah_flags
4386 rdma_ah_get_ah_flags(const struct rdma_ah_attr *attr)
4387{
4388 return attr->ah_flags;
4389}
4390
4391static inline const struct ib_global_route
4392 *rdma_ah_read_grh(const struct rdma_ah_attr *attr)
4393{
4394 return &attr->grh;
4395}
4396
4397
4398static inline struct ib_global_route
4399 *rdma_ah_retrieve_grh(struct rdma_ah_attr *attr)
4400{
4401 return &attr->grh;
4402}
4403
4404static inline void rdma_ah_set_dgid_raw(struct rdma_ah_attr *attr, void *dgid)
4405{
4406 struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
4407
4408 memcpy(grh->dgid.raw, dgid, sizeof(grh->dgid));
4409}
4410
4411static inline void rdma_ah_set_subnet_prefix(struct rdma_ah_attr *attr,
4412 __be64 prefix)
4413{
4414 struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
4415
4416 grh->dgid.global.subnet_prefix = prefix;
4417}
4418
4419static inline void rdma_ah_set_interface_id(struct rdma_ah_attr *attr,
4420 __be64 if_id)
4421{
4422 struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
4423
4424 grh->dgid.global.interface_id = if_id;
4425}
4426
4427static inline void rdma_ah_set_grh(struct rdma_ah_attr *attr,
4428 union ib_gid *dgid, u32 flow_label,
4429 u8 sgid_index, u8 hop_limit,
4430 u8 traffic_class)
4431{
4432 struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
4433
4434 attr->ah_flags = IB_AH_GRH;
4435 if (dgid)
4436 grh->dgid = *dgid;
4437 grh->flow_label = flow_label;
4438 grh->sgid_index = sgid_index;
4439 grh->hop_limit = hop_limit;
4440 grh->traffic_class = traffic_class;
4441 grh->sgid_attr = NULL;
4442}
4443
4444void rdma_destroy_ah_attr(struct rdma_ah_attr *ah_attr);
4445void rdma_move_grh_sgid_attr(struct rdma_ah_attr *attr, union ib_gid *dgid,
4446 u32 flow_label, u8 hop_limit, u8 traffic_class,
4447 const struct ib_gid_attr *sgid_attr);
4448void rdma_copy_ah_attr(struct rdma_ah_attr *dest,
4449 const struct rdma_ah_attr *src);
4450void rdma_replace_ah_attr(struct rdma_ah_attr *old,
4451 const struct rdma_ah_attr *new);
4452void rdma_move_ah_attr(struct rdma_ah_attr *dest, struct rdma_ah_attr *src);
4453
4454
4455
4456
4457
4458
4459
4460static inline enum rdma_ah_attr_type rdma_ah_find_type(struct ib_device *dev,
4461 u8 port_num)
4462{
4463 if (rdma_protocol_roce(dev, port_num))
4464 return RDMA_AH_ATTR_TYPE_ROCE;
4465 if (rdma_protocol_ib(dev, port_num)) {
4466 if (rdma_cap_opa_ah(dev, port_num))
4467 return RDMA_AH_ATTR_TYPE_OPA;
4468 return RDMA_AH_ATTR_TYPE_IB;
4469 }
4470
4471 return RDMA_AH_ATTR_TYPE_UNDEFINED;
4472}
4473
4474
4475
4476
4477
4478
4479
4480
4481
4482
4483static inline u16 ib_lid_cpu16(u32 lid)
4484{
4485 WARN_ON_ONCE(lid & 0xFFFF0000);
4486 return (u16)lid;
4487}
4488
4489
4490
4491
4492
4493
4494static inline __be16 ib_lid_be16(u32 lid)
4495{
4496 WARN_ON_ONCE(lid & 0xFFFF0000);
4497 return cpu_to_be16((u16)lid);
4498}
4499
4500
4501
4502
4503
4504
4505
4506
4507
4508
4509
4510static inline const struct cpumask *
4511ib_get_vector_affinity(struct ib_device *device, int comp_vector)
4512{
4513 if (comp_vector < 0 || comp_vector >= device->num_comp_vectors ||
4514 !device->ops.get_vector_affinity)
4515 return NULL;
4516
4517 return device->ops.get_vector_affinity(device, comp_vector);
4518
4519}
4520
4521
4522
4523
4524
4525
4526
4527void rdma_roce_rescan_device(struct ib_device *ibdev);
4528
4529struct ib_ucontext *ib_uverbs_get_ucontext_file(struct ib_uverbs_file *ufile);
4530
4531int uverbs_destroy_def_handler(struct uverbs_attr_bundle *attrs);
4532
4533struct net_device *rdma_alloc_netdev(struct ib_device *device, u8 port_num,
4534 enum rdma_netdev_t type, const char *name,
4535 unsigned char name_assign_type,
4536 void (*setup)(struct net_device *));
4537
4538int rdma_init_netdev(struct ib_device *device, u8 port_num,
4539 enum rdma_netdev_t type, const char *name,
4540 unsigned char name_assign_type,
4541 void (*setup)(struct net_device *),
4542 struct net_device *netdev);
4543
4544
4545
4546
4547
4548
4549
4550
4551
4552
4553
4554
4555
4556
4557
4558
4559static inline void
4560rdma_set_device_sysfs_group(struct ib_device *dev,
4561 const struct attribute_group *group)
4562{
4563 dev->groups[1] = group;
4564}
4565
4566
4567
4568
4569
4570
4571
4572
4573
4574static inline struct ib_device *rdma_device_to_ibdev(struct device *device)
4575{
4576 struct ib_core_device *coredev =
4577 container_of(device, struct ib_core_device, dev);
4578
4579 return coredev->owner;
4580}
4581
4582
4583
4584
4585
4586
4587
4588
4589
4590#define rdma_device_to_drv_device(dev, drv_dev_struct, ibdev_member) \
4591 container_of(rdma_device_to_ibdev(dev), drv_dev_struct, ibdev_member)
4592
4593bool rdma_dev_access_netns(const struct ib_device *device,
4594 const struct net *net);
4595#endif
4596