1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39#if !defined(IB_VERBS_H)
40#define IB_VERBS_H
41
42#include <linux/types.h>
43#include <linux/device.h>
44#include <linux/mm.h>
45#include <linux/dma-mapping.h>
46#include <linux/kref.h>
47#include <linux/list.h>
48#include <linux/rwsem.h>
49#include <linux/scatterlist.h>
50#include <linux/workqueue.h>
51#include <linux/socket.h>
52#include <linux/irq_poll.h>
53#include <uapi/linux/if_ether.h>
54#include <net/ipv6.h>
55#include <net/ip.h>
56#include <linux/string.h>
57#include <linux/slab.h>
58#include <linux/netdevice.h>
59
60#include <linux/if_link.h>
61#include <linux/atomic.h>
62#include <linux/mmu_notifier.h>
63#include <linux/uaccess.h>
64#include <linux/cgroup_rdma.h>
65#include <uapi/rdma/ib_user_verbs.h>
66
67#define IB_FW_VERSION_NAME_MAX ETHTOOL_FWVERS_LEN
68
69extern struct workqueue_struct *ib_wq;
70extern struct workqueue_struct *ib_comp_wq;
71
72union ib_gid {
73 u8 raw[16];
74 struct {
75 __be64 subnet_prefix;
76 __be64 interface_id;
77 } global;
78};
79
80extern union ib_gid zgid;
81
82enum ib_gid_type {
83
84 IB_GID_TYPE_IB = 0,
85 IB_GID_TYPE_ROCE = 0,
86 IB_GID_TYPE_ROCE_UDP_ENCAP = 1,
87 IB_GID_TYPE_SIZE
88};
89
90#define ROCE_V2_UDP_DPORT 4791
91struct ib_gid_attr {
92 enum ib_gid_type gid_type;
93 struct net_device *ndev;
94};
95
96enum rdma_node_type {
97
98 RDMA_NODE_IB_CA = 1,
99 RDMA_NODE_IB_SWITCH,
100 RDMA_NODE_IB_ROUTER,
101 RDMA_NODE_RNIC,
102 RDMA_NODE_USNIC,
103 RDMA_NODE_USNIC_UDP,
104};
105
106enum {
107
108 IB_SA_WELL_KNOWN_GUID = BIT_ULL(57) | 2,
109};
110
111enum rdma_transport_type {
112 RDMA_TRANSPORT_IB,
113 RDMA_TRANSPORT_IWARP,
114 RDMA_TRANSPORT_USNIC,
115 RDMA_TRANSPORT_USNIC_UDP
116};
117
118enum rdma_protocol_type {
119 RDMA_PROTOCOL_IB,
120 RDMA_PROTOCOL_IBOE,
121 RDMA_PROTOCOL_IWARP,
122 RDMA_PROTOCOL_USNIC_UDP
123};
124
125__attribute_const__ enum rdma_transport_type
126rdma_node_get_transport(enum rdma_node_type node_type);
127
128enum rdma_network_type {
129 RDMA_NETWORK_IB,
130 RDMA_NETWORK_ROCE_V1 = RDMA_NETWORK_IB,
131 RDMA_NETWORK_IPV4,
132 RDMA_NETWORK_IPV6
133};
134
135static inline enum ib_gid_type ib_network_to_gid_type(enum rdma_network_type network_type)
136{
137 if (network_type == RDMA_NETWORK_IPV4 ||
138 network_type == RDMA_NETWORK_IPV6)
139 return IB_GID_TYPE_ROCE_UDP_ENCAP;
140
141
142 return IB_GID_TYPE_IB;
143}
144
145static inline enum rdma_network_type ib_gid_to_network_type(enum ib_gid_type gid_type,
146 union ib_gid *gid)
147{
148 if (gid_type == IB_GID_TYPE_IB)
149 return RDMA_NETWORK_IB;
150
151 if (ipv6_addr_v4mapped((struct in6_addr *)gid))
152 return RDMA_NETWORK_IPV4;
153 else
154 return RDMA_NETWORK_IPV6;
155}
156
157enum rdma_link_layer {
158 IB_LINK_LAYER_UNSPECIFIED,
159 IB_LINK_LAYER_INFINIBAND,
160 IB_LINK_LAYER_ETHERNET,
161};
162
163enum ib_device_cap_flags {
164 IB_DEVICE_RESIZE_MAX_WR = (1 << 0),
165 IB_DEVICE_BAD_PKEY_CNTR = (1 << 1),
166 IB_DEVICE_BAD_QKEY_CNTR = (1 << 2),
167 IB_DEVICE_RAW_MULTI = (1 << 3),
168 IB_DEVICE_AUTO_PATH_MIG = (1 << 4),
169 IB_DEVICE_CHANGE_PHY_PORT = (1 << 5),
170 IB_DEVICE_UD_AV_PORT_ENFORCE = (1 << 6),
171 IB_DEVICE_CURR_QP_STATE_MOD = (1 << 7),
172 IB_DEVICE_SHUTDOWN_PORT = (1 << 8),
173
174 IB_DEVICE_PORT_ACTIVE_EVENT = (1 << 10),
175 IB_DEVICE_SYS_IMAGE_GUID = (1 << 11),
176 IB_DEVICE_RC_RNR_NAK_GEN = (1 << 12),
177 IB_DEVICE_SRQ_RESIZE = (1 << 13),
178 IB_DEVICE_N_NOTIFY_CQ = (1 << 14),
179
180
181
182
183
184
185
186
187 IB_DEVICE_LOCAL_DMA_LKEY = (1 << 15),
188
189 IB_DEVICE_MEM_WINDOW = (1 << 17),
190
191
192
193
194
195
196
197 IB_DEVICE_UD_IP_CSUM = (1 << 18),
198 IB_DEVICE_UD_TSO = (1 << 19),
199 IB_DEVICE_XRC = (1 << 20),
200
201
202
203
204
205
206
207
208
209
210 IB_DEVICE_MEM_MGT_EXTENSIONS = (1 << 21),
211 IB_DEVICE_BLOCK_MULTICAST_LOOPBACK = (1 << 22),
212 IB_DEVICE_MEM_WINDOW_TYPE_2A = (1 << 23),
213 IB_DEVICE_MEM_WINDOW_TYPE_2B = (1 << 24),
214 IB_DEVICE_RC_IP_CSUM = (1 << 25),
215
216 IB_DEVICE_RAW_IP_CSUM = (1 << 26),
217
218
219
220
221
222
223 IB_DEVICE_CROSS_CHANNEL = (1 << 27),
224 IB_DEVICE_MANAGED_FLOW_STEERING = (1 << 29),
225 IB_DEVICE_SIGNATURE_HANDOVER = (1 << 30),
226 IB_DEVICE_ON_DEMAND_PAGING = (1ULL << 31),
227 IB_DEVICE_SG_GAPS_REG = (1ULL << 32),
228 IB_DEVICE_VIRTUAL_FUNCTION = (1ULL << 33),
229
230 IB_DEVICE_RAW_SCATTER_FCS = (1ULL << 34),
231 IB_DEVICE_RDMA_NETDEV_OPA_VNIC = (1ULL << 35),
232
233 IB_DEVICE_PCI_WRITE_END_PADDING = (1ULL << 36),
234};
235
236enum ib_signature_prot_cap {
237 IB_PROT_T10DIF_TYPE_1 = 1,
238 IB_PROT_T10DIF_TYPE_2 = 1 << 1,
239 IB_PROT_T10DIF_TYPE_3 = 1 << 2,
240};
241
242enum ib_signature_guard_cap {
243 IB_GUARD_T10DIF_CRC = 1,
244 IB_GUARD_T10DIF_CSUM = 1 << 1,
245};
246
247enum ib_atomic_cap {
248 IB_ATOMIC_NONE,
249 IB_ATOMIC_HCA,
250 IB_ATOMIC_GLOB
251};
252
253enum ib_odp_general_cap_bits {
254 IB_ODP_SUPPORT = 1 << 0,
255 IB_ODP_SUPPORT_IMPLICIT = 1 << 1,
256};
257
258enum ib_odp_transport_cap_bits {
259 IB_ODP_SUPPORT_SEND = 1 << 0,
260 IB_ODP_SUPPORT_RECV = 1 << 1,
261 IB_ODP_SUPPORT_WRITE = 1 << 2,
262 IB_ODP_SUPPORT_READ = 1 << 3,
263 IB_ODP_SUPPORT_ATOMIC = 1 << 4,
264};
265
266struct ib_odp_caps {
267 uint64_t general_caps;
268 struct {
269 uint32_t rc_odp_caps;
270 uint32_t uc_odp_caps;
271 uint32_t ud_odp_caps;
272 } per_transport_caps;
273};
274
275struct ib_rss_caps {
276
277
278
279
280 u32 supported_qpts;
281 u32 max_rwq_indirection_tables;
282 u32 max_rwq_indirection_table_size;
283};
284
285enum ib_tm_cap_flags {
286
287 IB_TM_CAP_RC = 1 << 0,
288};
289
290struct ib_tm_caps {
291
292 u32 max_rndv_hdr_size;
293
294 u32 max_num_tags;
295
296 u32 flags;
297
298 u32 max_ops;
299
300 u32 max_sge;
301};
302
303enum ib_cq_creation_flags {
304 IB_CQ_FLAGS_TIMESTAMP_COMPLETION = 1 << 0,
305 IB_CQ_FLAGS_IGNORE_OVERRUN = 1 << 1,
306};
307
308struct ib_cq_init_attr {
309 unsigned int cqe;
310 int comp_vector;
311 u32 flags;
312};
313
314enum ib_cq_attr_mask {
315 IB_CQ_MODERATE = 1 << 0,
316};
317
318struct ib_cq_caps {
319 u16 max_cq_moderation_count;
320 u16 max_cq_moderation_period;
321};
322
323struct ib_device_attr {
324 u64 fw_ver;
325 __be64 sys_image_guid;
326 u64 max_mr_size;
327 u64 page_size_cap;
328 u32 vendor_id;
329 u32 vendor_part_id;
330 u32 hw_ver;
331 int max_qp;
332 int max_qp_wr;
333 u64 device_cap_flags;
334 int max_sge;
335 int max_sge_rd;
336 int max_cq;
337 int max_cqe;
338 int max_mr;
339 int max_pd;
340 int max_qp_rd_atom;
341 int max_ee_rd_atom;
342 int max_res_rd_atom;
343 int max_qp_init_rd_atom;
344 int max_ee_init_rd_atom;
345 enum ib_atomic_cap atomic_cap;
346 enum ib_atomic_cap masked_atomic_cap;
347 int max_ee;
348 int max_rdd;
349 int max_mw;
350 int max_raw_ipv6_qp;
351 int max_raw_ethy_qp;
352 int max_mcast_grp;
353 int max_mcast_qp_attach;
354 int max_total_mcast_qp_attach;
355 int max_ah;
356 int max_fmr;
357 int max_map_per_fmr;
358 int max_srq;
359 int max_srq_wr;
360 int max_srq_sge;
361 unsigned int max_fast_reg_page_list_len;
362 u16 max_pkeys;
363 u8 local_ca_ack_delay;
364 int sig_prot_cap;
365 int sig_guard_cap;
366 struct ib_odp_caps odp_caps;
367 uint64_t timestamp_mask;
368 uint64_t hca_core_clock;
369 struct ib_rss_caps rss_caps;
370 u32 max_wq_type_rq;
371 u32 raw_packet_caps;
372 struct ib_tm_caps tm_caps;
373 struct ib_cq_caps cq_caps;
374};
375
376enum ib_mtu {
377 IB_MTU_256 = 1,
378 IB_MTU_512 = 2,
379 IB_MTU_1024 = 3,
380 IB_MTU_2048 = 4,
381 IB_MTU_4096 = 5
382};
383
384static inline int ib_mtu_enum_to_int(enum ib_mtu mtu)
385{
386 switch (mtu) {
387 case IB_MTU_256: return 256;
388 case IB_MTU_512: return 512;
389 case IB_MTU_1024: return 1024;
390 case IB_MTU_2048: return 2048;
391 case IB_MTU_4096: return 4096;
392 default: return -1;
393 }
394}
395
396static inline enum ib_mtu ib_mtu_int_to_enum(int mtu)
397{
398 if (mtu >= 4096)
399 return IB_MTU_4096;
400 else if (mtu >= 2048)
401 return IB_MTU_2048;
402 else if (mtu >= 1024)
403 return IB_MTU_1024;
404 else if (mtu >= 512)
405 return IB_MTU_512;
406 else
407 return IB_MTU_256;
408}
409
410enum ib_port_state {
411 IB_PORT_NOP = 0,
412 IB_PORT_DOWN = 1,
413 IB_PORT_INIT = 2,
414 IB_PORT_ARMED = 3,
415 IB_PORT_ACTIVE = 4,
416 IB_PORT_ACTIVE_DEFER = 5
417};
418
419enum ib_port_cap_flags {
420 IB_PORT_SM = 1 << 1,
421 IB_PORT_NOTICE_SUP = 1 << 2,
422 IB_PORT_TRAP_SUP = 1 << 3,
423 IB_PORT_OPT_IPD_SUP = 1 << 4,
424 IB_PORT_AUTO_MIGR_SUP = 1 << 5,
425 IB_PORT_SL_MAP_SUP = 1 << 6,
426 IB_PORT_MKEY_NVRAM = 1 << 7,
427 IB_PORT_PKEY_NVRAM = 1 << 8,
428 IB_PORT_LED_INFO_SUP = 1 << 9,
429 IB_PORT_SM_DISABLED = 1 << 10,
430 IB_PORT_SYS_IMAGE_GUID_SUP = 1 << 11,
431 IB_PORT_PKEY_SW_EXT_PORT_TRAP_SUP = 1 << 12,
432 IB_PORT_EXTENDED_SPEEDS_SUP = 1 << 14,
433 IB_PORT_CM_SUP = 1 << 16,
434 IB_PORT_SNMP_TUNNEL_SUP = 1 << 17,
435 IB_PORT_REINIT_SUP = 1 << 18,
436 IB_PORT_DEVICE_MGMT_SUP = 1 << 19,
437 IB_PORT_VENDOR_CLASS_SUP = 1 << 20,
438 IB_PORT_DR_NOTICE_SUP = 1 << 21,
439 IB_PORT_CAP_MASK_NOTICE_SUP = 1 << 22,
440 IB_PORT_BOOT_MGMT_SUP = 1 << 23,
441 IB_PORT_LINK_LATENCY_SUP = 1 << 24,
442 IB_PORT_CLIENT_REG_SUP = 1 << 25,
443 IB_PORT_IP_BASED_GIDS = 1 << 26,
444};
445
446enum ib_port_width {
447 IB_WIDTH_1X = 1,
448 IB_WIDTH_4X = 2,
449 IB_WIDTH_8X = 4,
450 IB_WIDTH_12X = 8
451};
452
453static inline int ib_width_enum_to_int(enum ib_port_width width)
454{
455 switch (width) {
456 case IB_WIDTH_1X: return 1;
457 case IB_WIDTH_4X: return 4;
458 case IB_WIDTH_8X: return 8;
459 case IB_WIDTH_12X: return 12;
460 default: return -1;
461 }
462}
463
464enum ib_port_speed {
465 IB_SPEED_SDR = 1,
466 IB_SPEED_DDR = 2,
467 IB_SPEED_QDR = 4,
468 IB_SPEED_FDR10 = 8,
469 IB_SPEED_FDR = 16,
470 IB_SPEED_EDR = 32,
471 IB_SPEED_HDR = 64
472};
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490struct rdma_hw_stats {
491 unsigned long timestamp;
492 unsigned long lifespan;
493 const char * const *names;
494 int num_counters;
495 u64 value[];
496};
497
498#define RDMA_HW_STATS_DEFAULT_LIFESPAN 10
499
500
501
502
503
504
505
506static inline struct rdma_hw_stats *rdma_alloc_hw_stats_struct(
507 const char * const *names, int num_counters,
508 unsigned long lifespan)
509{
510 struct rdma_hw_stats *stats;
511
512 stats = kzalloc(sizeof(*stats) + num_counters * sizeof(u64),
513 GFP_KERNEL);
514 if (!stats)
515 return NULL;
516 stats->names = names;
517 stats->num_counters = num_counters;
518 stats->lifespan = msecs_to_jiffies(lifespan);
519
520 return stats;
521}
522
523
524
525
526
527
528#define RDMA_CORE_CAP_IB_MAD 0x00000001
529#define RDMA_CORE_CAP_IB_SMI 0x00000002
530#define RDMA_CORE_CAP_IB_CM 0x00000004
531#define RDMA_CORE_CAP_IW_CM 0x00000008
532#define RDMA_CORE_CAP_IB_SA 0x00000010
533#define RDMA_CORE_CAP_OPA_MAD 0x00000020
534
535
536#define RDMA_CORE_CAP_AF_IB 0x00001000
537#define RDMA_CORE_CAP_ETH_AH 0x00002000
538#define RDMA_CORE_CAP_OPA_AH 0x00004000
539
540
541#define RDMA_CORE_CAP_PROT_IB 0x00100000
542#define RDMA_CORE_CAP_PROT_ROCE 0x00200000
543#define RDMA_CORE_CAP_PROT_IWARP 0x00400000
544#define RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP 0x00800000
545#define RDMA_CORE_CAP_PROT_RAW_PACKET 0x01000000
546#define RDMA_CORE_CAP_PROT_USNIC 0x02000000
547
548#define RDMA_CORE_PORT_IBA_IB (RDMA_CORE_CAP_PROT_IB \
549 | RDMA_CORE_CAP_IB_MAD \
550 | RDMA_CORE_CAP_IB_SMI \
551 | RDMA_CORE_CAP_IB_CM \
552 | RDMA_CORE_CAP_IB_SA \
553 | RDMA_CORE_CAP_AF_IB)
554#define RDMA_CORE_PORT_IBA_ROCE (RDMA_CORE_CAP_PROT_ROCE \
555 | RDMA_CORE_CAP_IB_MAD \
556 | RDMA_CORE_CAP_IB_CM \
557 | RDMA_CORE_CAP_AF_IB \
558 | RDMA_CORE_CAP_ETH_AH)
559#define RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP \
560 (RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP \
561 | RDMA_CORE_CAP_IB_MAD \
562 | RDMA_CORE_CAP_IB_CM \
563 | RDMA_CORE_CAP_AF_IB \
564 | RDMA_CORE_CAP_ETH_AH)
565#define RDMA_CORE_PORT_IWARP (RDMA_CORE_CAP_PROT_IWARP \
566 | RDMA_CORE_CAP_IW_CM)
567#define RDMA_CORE_PORT_INTEL_OPA (RDMA_CORE_PORT_IBA_IB \
568 | RDMA_CORE_CAP_OPA_MAD)
569
570#define RDMA_CORE_PORT_RAW_PACKET (RDMA_CORE_CAP_PROT_RAW_PACKET)
571
572#define RDMA_CORE_PORT_USNIC (RDMA_CORE_CAP_PROT_USNIC)
573
574struct ib_port_attr {
575 u64 subnet_prefix;
576 enum ib_port_state state;
577 enum ib_mtu max_mtu;
578 enum ib_mtu active_mtu;
579 int gid_tbl_len;
580 u32 port_cap_flags;
581 u32 max_msg_sz;
582 u32 bad_pkey_cntr;
583 u32 qkey_viol_cntr;
584 u16 pkey_tbl_len;
585 u32 sm_lid;
586 u32 lid;
587 u8 lmc;
588 u8 max_vl_num;
589 u8 sm_sl;
590 u8 subnet_timeout;
591 u8 init_type_reply;
592 u8 active_width;
593 u8 active_speed;
594 u8 phys_state;
595 bool grh_required;
596};
597
598enum ib_device_modify_flags {
599 IB_DEVICE_MODIFY_SYS_IMAGE_GUID = 1 << 0,
600 IB_DEVICE_MODIFY_NODE_DESC = 1 << 1
601};
602
603#define IB_DEVICE_NODE_DESC_MAX 64
604
605struct ib_device_modify {
606 u64 sys_image_guid;
607 char node_desc[IB_DEVICE_NODE_DESC_MAX];
608};
609
610enum ib_port_modify_flags {
611 IB_PORT_SHUTDOWN = 1,
612 IB_PORT_INIT_TYPE = (1<<2),
613 IB_PORT_RESET_QKEY_CNTR = (1<<3),
614 IB_PORT_OPA_MASK_CHG = (1<<4)
615};
616
617struct ib_port_modify {
618 u32 set_port_cap_mask;
619 u32 clr_port_cap_mask;
620 u8 init_type;
621};
622
623enum ib_event_type {
624 IB_EVENT_CQ_ERR,
625 IB_EVENT_QP_FATAL,
626 IB_EVENT_QP_REQ_ERR,
627 IB_EVENT_QP_ACCESS_ERR,
628 IB_EVENT_COMM_EST,
629 IB_EVENT_SQ_DRAINED,
630 IB_EVENT_PATH_MIG,
631 IB_EVENT_PATH_MIG_ERR,
632 IB_EVENT_DEVICE_FATAL,
633 IB_EVENT_PORT_ACTIVE,
634 IB_EVENT_PORT_ERR,
635 IB_EVENT_LID_CHANGE,
636 IB_EVENT_PKEY_CHANGE,
637 IB_EVENT_SM_CHANGE,
638 IB_EVENT_SRQ_ERR,
639 IB_EVENT_SRQ_LIMIT_REACHED,
640 IB_EVENT_QP_LAST_WQE_REACHED,
641 IB_EVENT_CLIENT_REREGISTER,
642 IB_EVENT_GID_CHANGE,
643 IB_EVENT_WQ_FATAL,
644};
645
646const char *__attribute_const__ ib_event_msg(enum ib_event_type event);
647
648struct ib_event {
649 struct ib_device *device;
650 union {
651 struct ib_cq *cq;
652 struct ib_qp *qp;
653 struct ib_srq *srq;
654 struct ib_wq *wq;
655 u8 port_num;
656 } element;
657 enum ib_event_type event;
658};
659
660struct ib_event_handler {
661 struct ib_device *device;
662 void (*handler)(struct ib_event_handler *, struct ib_event *);
663 struct list_head list;
664};
665
666#define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler) \
667 do { \
668 (_ptr)->device = _device; \
669 (_ptr)->handler = _handler; \
670 INIT_LIST_HEAD(&(_ptr)->list); \
671 } while (0)
672
673struct ib_global_route {
674 union ib_gid dgid;
675 u32 flow_label;
676 u8 sgid_index;
677 u8 hop_limit;
678 u8 traffic_class;
679};
680
681struct ib_grh {
682 __be32 version_tclass_flow;
683 __be16 paylen;
684 u8 next_hdr;
685 u8 hop_limit;
686 union ib_gid sgid;
687 union ib_gid dgid;
688};
689
690union rdma_network_hdr {
691 struct ib_grh ibgrh;
692 struct {
693
694
695
696 u8 reserved[20];
697 struct iphdr roce4grh;
698 };
699};
700
701#define IB_QPN_MASK 0xFFFFFF
702
703enum {
704 IB_MULTICAST_QPN = 0xffffff
705};
706
707#define IB_LID_PERMISSIVE cpu_to_be16(0xFFFF)
708#define IB_MULTICAST_LID_BASE cpu_to_be16(0xC000)
709
710enum ib_ah_flags {
711 IB_AH_GRH = 1
712};
713
714enum ib_rate {
715 IB_RATE_PORT_CURRENT = 0,
716 IB_RATE_2_5_GBPS = 2,
717 IB_RATE_5_GBPS = 5,
718 IB_RATE_10_GBPS = 3,
719 IB_RATE_20_GBPS = 6,
720 IB_RATE_30_GBPS = 4,
721 IB_RATE_40_GBPS = 7,
722 IB_RATE_60_GBPS = 8,
723 IB_RATE_80_GBPS = 9,
724 IB_RATE_120_GBPS = 10,
725 IB_RATE_14_GBPS = 11,
726 IB_RATE_56_GBPS = 12,
727 IB_RATE_112_GBPS = 13,
728 IB_RATE_168_GBPS = 14,
729 IB_RATE_25_GBPS = 15,
730 IB_RATE_100_GBPS = 16,
731 IB_RATE_200_GBPS = 17,
732 IB_RATE_300_GBPS = 18
733};
734
735
736
737
738
739
740
741__attribute_const__ int ib_rate_to_mult(enum ib_rate rate);
742
743
744
745
746
747
748__attribute_const__ int ib_rate_to_mbps(enum ib_rate rate);
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763enum ib_mr_type {
764 IB_MR_TYPE_MEM_REG,
765 IB_MR_TYPE_SIGNATURE,
766 IB_MR_TYPE_SG_GAPS,
767};
768
769
770
771
772
773
774enum ib_signature_type {
775 IB_SIG_TYPE_NONE,
776 IB_SIG_TYPE_T10_DIF,
777};
778
779
780
781
782
783
784enum ib_t10_dif_bg_type {
785 IB_T10DIF_CRC,
786 IB_T10DIF_CSUM
787};
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802struct ib_t10_dif_domain {
803 enum ib_t10_dif_bg_type bg_type;
804 u16 pi_interval;
805 u16 bg;
806 u16 app_tag;
807 u32 ref_tag;
808 bool ref_remap;
809 bool app_escape;
810 bool ref_escape;
811 u16 apptag_check_mask;
812};
813
814
815
816
817
818
819
820struct ib_sig_domain {
821 enum ib_signature_type sig_type;
822 union {
823 struct ib_t10_dif_domain dif;
824 } sig;
825};
826
827
828
829
830
831
832
833struct ib_sig_attrs {
834 u8 check_mask;
835 struct ib_sig_domain mem;
836 struct ib_sig_domain wire;
837};
838
839enum ib_sig_err_type {
840 IB_SIG_BAD_GUARD,
841 IB_SIG_BAD_REFTAG,
842 IB_SIG_BAD_APPTAG,
843};
844
845
846
847
848struct ib_sig_err {
849 enum ib_sig_err_type err_type;
850 u32 expected;
851 u32 actual;
852 u64 sig_err_offset;
853 u32 key;
854};
855
856enum ib_mr_status_check {
857 IB_MR_CHECK_SIG_STATUS = 1,
858};
859
860
861
862
863
864
865
866
867
868struct ib_mr_status {
869 u32 fail_status;
870 struct ib_sig_err sig_err;
871};
872
873
874
875
876
877
878__attribute_const__ enum ib_rate mult_to_ib_rate(int mult);
879
880enum rdma_ah_attr_type {
881 RDMA_AH_ATTR_TYPE_IB,
882 RDMA_AH_ATTR_TYPE_ROCE,
883 RDMA_AH_ATTR_TYPE_OPA,
884};
885
886struct ib_ah_attr {
887 u16 dlid;
888 u8 src_path_bits;
889};
890
891struct roce_ah_attr {
892 u8 dmac[ETH_ALEN];
893};
894
895struct opa_ah_attr {
896 u32 dlid;
897 u8 src_path_bits;
898 bool make_grd;
899};
900
901struct rdma_ah_attr {
902 struct ib_global_route grh;
903 u8 sl;
904 u8 static_rate;
905 u8 port_num;
906 u8 ah_flags;
907 enum rdma_ah_attr_type type;
908 union {
909 struct ib_ah_attr ib;
910 struct roce_ah_attr roce;
911 struct opa_ah_attr opa;
912 };
913};
914
915enum ib_wc_status {
916 IB_WC_SUCCESS,
917 IB_WC_LOC_LEN_ERR,
918 IB_WC_LOC_QP_OP_ERR,
919 IB_WC_LOC_EEC_OP_ERR,
920 IB_WC_LOC_PROT_ERR,
921 IB_WC_WR_FLUSH_ERR,
922 IB_WC_MW_BIND_ERR,
923 IB_WC_BAD_RESP_ERR,
924 IB_WC_LOC_ACCESS_ERR,
925 IB_WC_REM_INV_REQ_ERR,
926 IB_WC_REM_ACCESS_ERR,
927 IB_WC_REM_OP_ERR,
928 IB_WC_RETRY_EXC_ERR,
929 IB_WC_RNR_RETRY_EXC_ERR,
930 IB_WC_LOC_RDD_VIOL_ERR,
931 IB_WC_REM_INV_RD_REQ_ERR,
932 IB_WC_REM_ABORT_ERR,
933 IB_WC_INV_EECN_ERR,
934 IB_WC_INV_EEC_STATE_ERR,
935 IB_WC_FATAL_ERR,
936 IB_WC_RESP_TIMEOUT_ERR,
937 IB_WC_GENERAL_ERR
938};
939
940const char *__attribute_const__ ib_wc_status_msg(enum ib_wc_status status);
941
942enum ib_wc_opcode {
943 IB_WC_SEND,
944 IB_WC_RDMA_WRITE,
945 IB_WC_RDMA_READ,
946 IB_WC_COMP_SWAP,
947 IB_WC_FETCH_ADD,
948 IB_WC_LSO,
949 IB_WC_LOCAL_INV,
950 IB_WC_REG_MR,
951 IB_WC_MASKED_COMP_SWAP,
952 IB_WC_MASKED_FETCH_ADD,
953
954
955
956
957 IB_WC_RECV = 1 << 7,
958 IB_WC_RECV_RDMA_WITH_IMM
959};
960
961enum ib_wc_flags {
962 IB_WC_GRH = 1,
963 IB_WC_WITH_IMM = (1<<1),
964 IB_WC_WITH_INVALIDATE = (1<<2),
965 IB_WC_IP_CSUM_OK = (1<<3),
966 IB_WC_WITH_SMAC = (1<<4),
967 IB_WC_WITH_VLAN = (1<<5),
968 IB_WC_WITH_NETWORK_HDR_TYPE = (1<<6),
969};
970
971struct ib_wc {
972 union {
973 u64 wr_id;
974 struct ib_cqe *wr_cqe;
975 };
976 enum ib_wc_status status;
977 enum ib_wc_opcode opcode;
978 u32 vendor_err;
979 u32 byte_len;
980 struct ib_qp *qp;
981 union {
982 __be32 imm_data;
983 u32 invalidate_rkey;
984 } ex;
985 u32 src_qp;
986 int wc_flags;
987 u16 pkey_index;
988 u32 slid;
989 u8 sl;
990 u8 dlid_path_bits;
991 u8 port_num;
992 u8 smac[ETH_ALEN];
993 u16 vlan_id;
994 u8 network_hdr_type;
995};
996
997enum ib_cq_notify_flags {
998 IB_CQ_SOLICITED = 1 << 0,
999 IB_CQ_NEXT_COMP = 1 << 1,
1000 IB_CQ_SOLICITED_MASK = IB_CQ_SOLICITED | IB_CQ_NEXT_COMP,
1001 IB_CQ_REPORT_MISSED_EVENTS = 1 << 2,
1002};
1003
1004enum ib_srq_type {
1005 IB_SRQT_BASIC,
1006 IB_SRQT_XRC,
1007 IB_SRQT_TM,
1008};
1009
1010static inline bool ib_srq_has_cq(enum ib_srq_type srq_type)
1011{
1012 return srq_type == IB_SRQT_XRC ||
1013 srq_type == IB_SRQT_TM;
1014}
1015
1016enum ib_srq_attr_mask {
1017 IB_SRQ_MAX_WR = 1 << 0,
1018 IB_SRQ_LIMIT = 1 << 1,
1019};
1020
1021struct ib_srq_attr {
1022 u32 max_wr;
1023 u32 max_sge;
1024 u32 srq_limit;
1025};
1026
1027struct ib_srq_init_attr {
1028 void (*event_handler)(struct ib_event *, void *);
1029 void *srq_context;
1030 struct ib_srq_attr attr;
1031 enum ib_srq_type srq_type;
1032
1033 struct {
1034 struct ib_cq *cq;
1035 union {
1036 struct {
1037 struct ib_xrcd *xrcd;
1038 } xrc;
1039
1040 struct {
1041 u32 max_num_tags;
1042 } tag_matching;
1043 };
1044 } ext;
1045};
1046
1047struct ib_qp_cap {
1048 u32 max_send_wr;
1049 u32 max_recv_wr;
1050 u32 max_send_sge;
1051 u32 max_recv_sge;
1052 u32 max_inline_data;
1053
1054
1055
1056
1057
1058
1059 u32 max_rdma_ctxs;
1060};
1061
1062enum ib_sig_type {
1063 IB_SIGNAL_ALL_WR,
1064 IB_SIGNAL_REQ_WR
1065};
1066
1067enum ib_qp_type {
1068
1069
1070
1071
1072
1073 IB_QPT_SMI,
1074 IB_QPT_GSI,
1075
1076 IB_QPT_RC,
1077 IB_QPT_UC,
1078 IB_QPT_UD,
1079 IB_QPT_RAW_IPV6,
1080 IB_QPT_RAW_ETHERTYPE,
1081 IB_QPT_RAW_PACKET = 8,
1082 IB_QPT_XRC_INI = 9,
1083 IB_QPT_XRC_TGT,
1084 IB_QPT_MAX,
1085
1086
1087
1088
1089 IB_QPT_RESERVED1 = 0x1000,
1090 IB_QPT_RESERVED2,
1091 IB_QPT_RESERVED3,
1092 IB_QPT_RESERVED4,
1093 IB_QPT_RESERVED5,
1094 IB_QPT_RESERVED6,
1095 IB_QPT_RESERVED7,
1096 IB_QPT_RESERVED8,
1097 IB_QPT_RESERVED9,
1098 IB_QPT_RESERVED10,
1099};
1100
1101enum ib_qp_create_flags {
1102 IB_QP_CREATE_IPOIB_UD_LSO = 1 << 0,
1103 IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK = 1 << 1,
1104 IB_QP_CREATE_CROSS_CHANNEL = 1 << 2,
1105 IB_QP_CREATE_MANAGED_SEND = 1 << 3,
1106 IB_QP_CREATE_MANAGED_RECV = 1 << 4,
1107 IB_QP_CREATE_NETIF_QP = 1 << 5,
1108 IB_QP_CREATE_SIGNATURE_EN = 1 << 6,
1109
1110 IB_QP_CREATE_SCATTER_FCS = 1 << 8,
1111 IB_QP_CREATE_CVLAN_STRIPPING = 1 << 9,
1112 IB_QP_CREATE_SOURCE_QPN = 1 << 10,
1113 IB_QP_CREATE_PCI_WRITE_END_PADDING = 1 << 11,
1114
1115 IB_QP_CREATE_RESERVED_START = 1 << 26,
1116 IB_QP_CREATE_RESERVED_END = 1 << 31,
1117};
1118
1119
1120
1121
1122
1123
1124struct ib_qp_init_attr {
1125 void (*event_handler)(struct ib_event *, void *);
1126 void *qp_context;
1127 struct ib_cq *send_cq;
1128 struct ib_cq *recv_cq;
1129 struct ib_srq *srq;
1130 struct ib_xrcd *xrcd;
1131 struct ib_qp_cap cap;
1132 enum ib_sig_type sq_sig_type;
1133 enum ib_qp_type qp_type;
1134 enum ib_qp_create_flags create_flags;
1135
1136
1137
1138
1139 u8 port_num;
1140 struct ib_rwq_ind_table *rwq_ind_tbl;
1141 u32 source_qpn;
1142};
1143
1144struct ib_qp_open_attr {
1145 void (*event_handler)(struct ib_event *, void *);
1146 void *qp_context;
1147 u32 qp_num;
1148 enum ib_qp_type qp_type;
1149};
1150
1151enum ib_rnr_timeout {
1152 IB_RNR_TIMER_655_36 = 0,
1153 IB_RNR_TIMER_000_01 = 1,
1154 IB_RNR_TIMER_000_02 = 2,
1155 IB_RNR_TIMER_000_03 = 3,
1156 IB_RNR_TIMER_000_04 = 4,
1157 IB_RNR_TIMER_000_06 = 5,
1158 IB_RNR_TIMER_000_08 = 6,
1159 IB_RNR_TIMER_000_12 = 7,
1160 IB_RNR_TIMER_000_16 = 8,
1161 IB_RNR_TIMER_000_24 = 9,
1162 IB_RNR_TIMER_000_32 = 10,
1163 IB_RNR_TIMER_000_48 = 11,
1164 IB_RNR_TIMER_000_64 = 12,
1165 IB_RNR_TIMER_000_96 = 13,
1166 IB_RNR_TIMER_001_28 = 14,
1167 IB_RNR_TIMER_001_92 = 15,
1168 IB_RNR_TIMER_002_56 = 16,
1169 IB_RNR_TIMER_003_84 = 17,
1170 IB_RNR_TIMER_005_12 = 18,
1171 IB_RNR_TIMER_007_68 = 19,
1172 IB_RNR_TIMER_010_24 = 20,
1173 IB_RNR_TIMER_015_36 = 21,
1174 IB_RNR_TIMER_020_48 = 22,
1175 IB_RNR_TIMER_030_72 = 23,
1176 IB_RNR_TIMER_040_96 = 24,
1177 IB_RNR_TIMER_061_44 = 25,
1178 IB_RNR_TIMER_081_92 = 26,
1179 IB_RNR_TIMER_122_88 = 27,
1180 IB_RNR_TIMER_163_84 = 28,
1181 IB_RNR_TIMER_245_76 = 29,
1182 IB_RNR_TIMER_327_68 = 30,
1183 IB_RNR_TIMER_491_52 = 31
1184};
1185
1186enum ib_qp_attr_mask {
1187 IB_QP_STATE = 1,
1188 IB_QP_CUR_STATE = (1<<1),
1189 IB_QP_EN_SQD_ASYNC_NOTIFY = (1<<2),
1190 IB_QP_ACCESS_FLAGS = (1<<3),
1191 IB_QP_PKEY_INDEX = (1<<4),
1192 IB_QP_PORT = (1<<5),
1193 IB_QP_QKEY = (1<<6),
1194 IB_QP_AV = (1<<7),
1195 IB_QP_PATH_MTU = (1<<8),
1196 IB_QP_TIMEOUT = (1<<9),
1197 IB_QP_RETRY_CNT = (1<<10),
1198 IB_QP_RNR_RETRY = (1<<11),
1199 IB_QP_RQ_PSN = (1<<12),
1200 IB_QP_MAX_QP_RD_ATOMIC = (1<<13),
1201 IB_QP_ALT_PATH = (1<<14),
1202 IB_QP_MIN_RNR_TIMER = (1<<15),
1203 IB_QP_SQ_PSN = (1<<16),
1204 IB_QP_MAX_DEST_RD_ATOMIC = (1<<17),
1205 IB_QP_PATH_MIG_STATE = (1<<18),
1206 IB_QP_CAP = (1<<19),
1207 IB_QP_DEST_QPN = (1<<20),
1208 IB_QP_RESERVED1 = (1<<21),
1209 IB_QP_RESERVED2 = (1<<22),
1210 IB_QP_RESERVED3 = (1<<23),
1211 IB_QP_RESERVED4 = (1<<24),
1212 IB_QP_RATE_LIMIT = (1<<25),
1213};
1214
1215enum ib_qp_state {
1216 IB_QPS_RESET,
1217 IB_QPS_INIT,
1218 IB_QPS_RTR,
1219 IB_QPS_RTS,
1220 IB_QPS_SQD,
1221 IB_QPS_SQE,
1222 IB_QPS_ERR
1223};
1224
1225enum ib_mig_state {
1226 IB_MIG_MIGRATED,
1227 IB_MIG_REARM,
1228 IB_MIG_ARMED
1229};
1230
1231enum ib_mw_type {
1232 IB_MW_TYPE_1 = 1,
1233 IB_MW_TYPE_2 = 2
1234};
1235
1236struct ib_qp_attr {
1237 enum ib_qp_state qp_state;
1238 enum ib_qp_state cur_qp_state;
1239 enum ib_mtu path_mtu;
1240 enum ib_mig_state path_mig_state;
1241 u32 qkey;
1242 u32 rq_psn;
1243 u32 sq_psn;
1244 u32 dest_qp_num;
1245 int qp_access_flags;
1246 struct ib_qp_cap cap;
1247 struct rdma_ah_attr ah_attr;
1248 struct rdma_ah_attr alt_ah_attr;
1249 u16 pkey_index;
1250 u16 alt_pkey_index;
1251 u8 en_sqd_async_notify;
1252 u8 sq_draining;
1253 u8 max_rd_atomic;
1254 u8 max_dest_rd_atomic;
1255 u8 min_rnr_timer;
1256 u8 port_num;
1257 u8 timeout;
1258 u8 retry_cnt;
1259 u8 rnr_retry;
1260 u8 alt_port_num;
1261 u8 alt_timeout;
1262 u32 rate_limit;
1263};
1264
1265enum ib_wr_opcode {
1266 IB_WR_RDMA_WRITE,
1267 IB_WR_RDMA_WRITE_WITH_IMM,
1268 IB_WR_SEND,
1269 IB_WR_SEND_WITH_IMM,
1270 IB_WR_RDMA_READ,
1271 IB_WR_ATOMIC_CMP_AND_SWP,
1272 IB_WR_ATOMIC_FETCH_AND_ADD,
1273 IB_WR_LSO,
1274 IB_WR_SEND_WITH_INV,
1275 IB_WR_RDMA_READ_WITH_INV,
1276 IB_WR_LOCAL_INV,
1277 IB_WR_REG_MR,
1278 IB_WR_MASKED_ATOMIC_CMP_AND_SWP,
1279 IB_WR_MASKED_ATOMIC_FETCH_AND_ADD,
1280 IB_WR_REG_SIG_MR,
1281
1282
1283
1284 IB_WR_RESERVED1 = 0xf0,
1285 IB_WR_RESERVED2,
1286 IB_WR_RESERVED3,
1287 IB_WR_RESERVED4,
1288 IB_WR_RESERVED5,
1289 IB_WR_RESERVED6,
1290 IB_WR_RESERVED7,
1291 IB_WR_RESERVED8,
1292 IB_WR_RESERVED9,
1293 IB_WR_RESERVED10,
1294};
1295
1296enum ib_send_flags {
1297 IB_SEND_FENCE = 1,
1298 IB_SEND_SIGNALED = (1<<1),
1299 IB_SEND_SOLICITED = (1<<2),
1300 IB_SEND_INLINE = (1<<3),
1301 IB_SEND_IP_CSUM = (1<<4),
1302
1303
1304 IB_SEND_RESERVED_START = (1 << 26),
1305 IB_SEND_RESERVED_END = (1 << 31),
1306};
1307
1308struct ib_sge {
1309 u64 addr;
1310 u32 length;
1311 u32 lkey;
1312};
1313
1314struct ib_cqe {
1315 void (*done)(struct ib_cq *cq, struct ib_wc *wc);
1316};
1317
1318struct ib_send_wr {
1319 struct ib_send_wr *next;
1320 union {
1321 u64 wr_id;
1322 struct ib_cqe *wr_cqe;
1323 };
1324 struct ib_sge *sg_list;
1325 int num_sge;
1326 enum ib_wr_opcode opcode;
1327 int send_flags;
1328 union {
1329 __be32 imm_data;
1330 u32 invalidate_rkey;
1331 } ex;
1332};
1333
1334struct ib_rdma_wr {
1335 struct ib_send_wr wr;
1336 u64 remote_addr;
1337 u32 rkey;
1338};
1339
1340static inline struct ib_rdma_wr *rdma_wr(struct ib_send_wr *wr)
1341{
1342 return container_of(wr, struct ib_rdma_wr, wr);
1343}
1344
1345struct ib_atomic_wr {
1346 struct ib_send_wr wr;
1347 u64 remote_addr;
1348 u64 compare_add;
1349 u64 swap;
1350 u64 compare_add_mask;
1351 u64 swap_mask;
1352 u32 rkey;
1353};
1354
1355static inline struct ib_atomic_wr *atomic_wr(struct ib_send_wr *wr)
1356{
1357 return container_of(wr, struct ib_atomic_wr, wr);
1358}
1359
1360struct ib_ud_wr {
1361 struct ib_send_wr wr;
1362 struct ib_ah *ah;
1363 void *header;
1364 int hlen;
1365 int mss;
1366 u32 remote_qpn;
1367 u32 remote_qkey;
1368 u16 pkey_index;
1369 u8 port_num;
1370};
1371
1372static inline struct ib_ud_wr *ud_wr(struct ib_send_wr *wr)
1373{
1374 return container_of(wr, struct ib_ud_wr, wr);
1375}
1376
1377struct ib_reg_wr {
1378 struct ib_send_wr wr;
1379 struct ib_mr *mr;
1380 u32 key;
1381 int access;
1382};
1383
1384static inline struct ib_reg_wr *reg_wr(struct ib_send_wr *wr)
1385{
1386 return container_of(wr, struct ib_reg_wr, wr);
1387}
1388
1389struct ib_sig_handover_wr {
1390 struct ib_send_wr wr;
1391 struct ib_sig_attrs *sig_attrs;
1392 struct ib_mr *sig_mr;
1393 int access_flags;
1394 struct ib_sge *prot;
1395};
1396
1397static inline struct ib_sig_handover_wr *sig_handover_wr(struct ib_send_wr *wr)
1398{
1399 return container_of(wr, struct ib_sig_handover_wr, wr);
1400}
1401
1402struct ib_recv_wr {
1403 struct ib_recv_wr *next;
1404 union {
1405 u64 wr_id;
1406 struct ib_cqe *wr_cqe;
1407 };
1408 struct ib_sge *sg_list;
1409 int num_sge;
1410};
1411
1412enum ib_access_flags {
1413 IB_ACCESS_LOCAL_WRITE = 1,
1414 IB_ACCESS_REMOTE_WRITE = (1<<1),
1415 IB_ACCESS_REMOTE_READ = (1<<2),
1416 IB_ACCESS_REMOTE_ATOMIC = (1<<3),
1417 IB_ACCESS_MW_BIND = (1<<4),
1418 IB_ZERO_BASED = (1<<5),
1419 IB_ACCESS_ON_DEMAND = (1<<6),
1420 IB_ACCESS_HUGETLB = (1<<7),
1421};
1422
1423
1424
1425
1426
1427enum ib_mr_rereg_flags {
1428 IB_MR_REREG_TRANS = 1,
1429 IB_MR_REREG_PD = (1<<1),
1430 IB_MR_REREG_ACCESS = (1<<2),
1431 IB_MR_REREG_SUPPORTED = ((IB_MR_REREG_ACCESS << 1) - 1)
1432};
1433
1434struct ib_fmr_attr {
1435 int max_pages;
1436 int max_maps;
1437 u8 page_shift;
1438};
1439
1440struct ib_umem;
1441
1442enum rdma_remove_reason {
1443
1444 RDMA_REMOVE_DESTROY,
1445
1446 RDMA_REMOVE_CLOSE,
1447
1448 RDMA_REMOVE_DRIVER_REMOVE,
1449
1450 RDMA_REMOVE_DURING_CLEANUP,
1451};
1452
1453struct ib_rdmacg_object {
1454#ifdef CONFIG_CGROUP_RDMA
1455 struct rdma_cgroup *cg;
1456#endif
1457};
1458
1459struct ib_ucontext {
1460 struct ib_device *device;
1461 struct ib_uverbs_file *ufile;
1462 int closing;
1463
1464
1465 struct mutex uobjects_lock;
1466 struct list_head uobjects;
1467
1468 struct rw_semaphore cleanup_rwsem;
1469 enum rdma_remove_reason cleanup_reason;
1470
1471 struct pid *tgid;
1472#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1473 struct rb_root_cached umem_tree;
1474
1475
1476
1477
1478 struct rw_semaphore umem_rwsem;
1479 void (*invalidate_range)(struct ib_umem *umem,
1480 unsigned long start, unsigned long end);
1481
1482 struct mmu_notifier mn;
1483 atomic_t notifier_count;
1484
1485 struct list_head no_private_counters;
1486 int odp_mrs_count;
1487#endif
1488
1489 struct ib_rdmacg_object cg_obj;
1490};
1491
1492struct ib_uobject {
1493 u64 user_handle;
1494 struct ib_ucontext *context;
1495 void *object;
1496 struct list_head list;
1497 struct ib_rdmacg_object cg_obj;
1498 int id;
1499 struct kref ref;
1500 atomic_t usecnt;
1501 struct rcu_head rcu;
1502
1503 const struct uverbs_obj_type *type;
1504};
1505
1506struct ib_uobject_file {
1507 struct ib_uobject uobj;
1508
1509 struct ib_uverbs_file *ufile;
1510};
1511
1512struct ib_udata {
1513 const void __user *inbuf;
1514 void __user *outbuf;
1515 size_t inlen;
1516 size_t outlen;
1517};
1518
1519struct ib_pd {
1520 u32 local_dma_lkey;
1521 u32 flags;
1522 struct ib_device *device;
1523 struct ib_uobject *uobject;
1524 atomic_t usecnt;
1525
1526 u32 unsafe_global_rkey;
1527
1528
1529
1530
1531 struct ib_mr *__internal_mr;
1532};
1533
1534struct ib_xrcd {
1535 struct ib_device *device;
1536 atomic_t usecnt;
1537 struct inode *inode;
1538
1539 struct mutex tgt_qp_mutex;
1540 struct list_head tgt_qp_list;
1541};
1542
1543struct ib_ah {
1544 struct ib_device *device;
1545 struct ib_pd *pd;
1546 struct ib_uobject *uobject;
1547 enum rdma_ah_attr_type type;
1548};
1549
1550typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context);
1551
1552enum ib_poll_context {
1553 IB_POLL_DIRECT,
1554 IB_POLL_SOFTIRQ,
1555 IB_POLL_WORKQUEUE,
1556};
1557
1558struct ib_cq {
1559 struct ib_device *device;
1560 struct ib_uobject *uobject;
1561 ib_comp_handler comp_handler;
1562 void (*event_handler)(struct ib_event *, void *);
1563 void *cq_context;
1564 int cqe;
1565 atomic_t usecnt;
1566 enum ib_poll_context poll_ctx;
1567 struct ib_wc *wc;
1568 union {
1569 struct irq_poll iop;
1570 struct work_struct work;
1571 };
1572};
1573
1574struct ib_srq {
1575 struct ib_device *device;
1576 struct ib_pd *pd;
1577 struct ib_uobject *uobject;
1578 void (*event_handler)(struct ib_event *, void *);
1579 void *srq_context;
1580 enum ib_srq_type srq_type;
1581 atomic_t usecnt;
1582
1583 struct {
1584 struct ib_cq *cq;
1585 union {
1586 struct {
1587 struct ib_xrcd *xrcd;
1588 u32 srq_num;
1589 } xrc;
1590 };
1591 } ext;
1592};
1593
1594enum ib_raw_packet_caps {
1595
1596
1597
1598 IB_RAW_PACKET_CAP_CVLAN_STRIPPING = (1 << 0),
1599
1600
1601 IB_RAW_PACKET_CAP_SCATTER_FCS = (1 << 1),
1602
1603 IB_RAW_PACKET_CAP_IP_CSUM = (1 << 2),
1604
1605
1606
1607 IB_RAW_PACKET_CAP_DELAY_DROP = (1 << 3),
1608};
1609
1610enum ib_wq_type {
1611 IB_WQT_RQ
1612};
1613
1614enum ib_wq_state {
1615 IB_WQS_RESET,
1616 IB_WQS_RDY,
1617 IB_WQS_ERR
1618};
1619
1620struct ib_wq {
1621 struct ib_device *device;
1622 struct ib_uobject *uobject;
1623 void *wq_context;
1624 void (*event_handler)(struct ib_event *, void *);
1625 struct ib_pd *pd;
1626 struct ib_cq *cq;
1627 u32 wq_num;
1628 enum ib_wq_state state;
1629 enum ib_wq_type wq_type;
1630 atomic_t usecnt;
1631};
1632
1633enum ib_wq_flags {
1634 IB_WQ_FLAGS_CVLAN_STRIPPING = 1 << 0,
1635 IB_WQ_FLAGS_SCATTER_FCS = 1 << 1,
1636 IB_WQ_FLAGS_DELAY_DROP = 1 << 2,
1637 IB_WQ_FLAGS_PCI_WRITE_END_PADDING = 1 << 3,
1638};
1639
1640struct ib_wq_init_attr {
1641 void *wq_context;
1642 enum ib_wq_type wq_type;
1643 u32 max_wr;
1644 u32 max_sge;
1645 struct ib_cq *cq;
1646 void (*event_handler)(struct ib_event *, void *);
1647 u32 create_flags;
1648};
1649
1650enum ib_wq_attr_mask {
1651 IB_WQ_STATE = 1 << 0,
1652 IB_WQ_CUR_STATE = 1 << 1,
1653 IB_WQ_FLAGS = 1 << 2,
1654};
1655
1656struct ib_wq_attr {
1657 enum ib_wq_state wq_state;
1658 enum ib_wq_state curr_wq_state;
1659 u32 flags;
1660 u32 flags_mask;
1661};
1662
1663struct ib_rwq_ind_table {
1664 struct ib_device *device;
1665 struct ib_uobject *uobject;
1666 atomic_t usecnt;
1667 u32 ind_tbl_num;
1668 u32 log_ind_tbl_size;
1669 struct ib_wq **ind_tbl;
1670};
1671
1672struct ib_rwq_ind_table_init_attr {
1673 u32 log_ind_tbl_size;
1674
1675 struct ib_wq **ind_tbl;
1676};
1677
1678enum port_pkey_state {
1679 IB_PORT_PKEY_NOT_VALID = 0,
1680 IB_PORT_PKEY_VALID = 1,
1681 IB_PORT_PKEY_LISTED = 2,
1682};
1683
1684struct ib_qp_security;
1685
1686struct ib_port_pkey {
1687 enum port_pkey_state state;
1688 u16 pkey_index;
1689 u8 port_num;
1690 struct list_head qp_list;
1691 struct list_head to_error_list;
1692 struct ib_qp_security *sec;
1693};
1694
1695struct ib_ports_pkeys {
1696 struct ib_port_pkey main;
1697 struct ib_port_pkey alt;
1698};
1699
1700struct ib_qp_security {
1701 struct ib_qp *qp;
1702 struct ib_device *dev;
1703
1704 struct mutex mutex;
1705 struct ib_ports_pkeys *ports_pkeys;
1706
1707
1708
1709 struct list_head shared_qp_list;
1710 void *security;
1711 bool destroying;
1712 atomic_t error_list_count;
1713 struct completion error_complete;
1714 int error_comps_pending;
1715};
1716
1717
1718
1719
1720
1721struct ib_qp {
1722 struct ib_device *device;
1723 struct ib_pd *pd;
1724 struct ib_cq *send_cq;
1725 struct ib_cq *recv_cq;
1726 spinlock_t mr_lock;
1727 int mrs_used;
1728 struct list_head rdma_mrs;
1729 struct list_head sig_mrs;
1730 struct ib_srq *srq;
1731 struct ib_xrcd *xrcd;
1732 struct list_head xrcd_list;
1733
1734
1735 atomic_t usecnt;
1736 struct list_head open_list;
1737 struct ib_qp *real_qp;
1738 struct ib_uobject *uobject;
1739 void (*event_handler)(struct ib_event *, void *);
1740 void *qp_context;
1741 u32 qp_num;
1742 u32 max_write_sge;
1743 u32 max_read_sge;
1744 enum ib_qp_type qp_type;
1745 struct ib_rwq_ind_table *rwq_ind_tbl;
1746 struct ib_qp_security *qp_sec;
1747 u8 port;
1748};
1749
1750struct ib_mr {
1751 struct ib_device *device;
1752 struct ib_pd *pd;
1753 u32 lkey;
1754 u32 rkey;
1755 u64 iova;
1756 u64 length;
1757 unsigned int page_size;
1758 bool need_inval;
1759 union {
1760 struct ib_uobject *uobject;
1761 struct list_head qp_entry;
1762 };
1763};
1764
1765struct ib_mw {
1766 struct ib_device *device;
1767 struct ib_pd *pd;
1768 struct ib_uobject *uobject;
1769 u32 rkey;
1770 enum ib_mw_type type;
1771};
1772
1773struct ib_fmr {
1774 struct ib_device *device;
1775 struct ib_pd *pd;
1776 struct list_head list;
1777 u32 lkey;
1778 u32 rkey;
1779};
1780
1781
1782enum ib_flow_attr_type {
1783
1784 IB_FLOW_ATTR_NORMAL = 0x0,
1785
1786
1787
1788 IB_FLOW_ATTR_ALL_DEFAULT = 0x1,
1789
1790
1791
1792 IB_FLOW_ATTR_MC_DEFAULT = 0x2,
1793
1794 IB_FLOW_ATTR_SNIFFER = 0x3
1795};
1796
1797
1798enum ib_flow_spec_type {
1799
1800 IB_FLOW_SPEC_ETH = 0x20,
1801 IB_FLOW_SPEC_IB = 0x22,
1802
1803 IB_FLOW_SPEC_IPV4 = 0x30,
1804 IB_FLOW_SPEC_IPV6 = 0x31,
1805
1806 IB_FLOW_SPEC_TCP = 0x40,
1807 IB_FLOW_SPEC_UDP = 0x41,
1808 IB_FLOW_SPEC_VXLAN_TUNNEL = 0x50,
1809 IB_FLOW_SPEC_INNER = 0x100,
1810
1811 IB_FLOW_SPEC_ACTION_TAG = 0x1000,
1812 IB_FLOW_SPEC_ACTION_DROP = 0x1001,
1813};
1814#define IB_FLOW_SPEC_LAYER_MASK 0xF0
1815#define IB_FLOW_SPEC_SUPPORT_LAYERS 8
1816
1817
1818
1819
1820enum ib_flow_domain {
1821 IB_FLOW_DOMAIN_USER,
1822 IB_FLOW_DOMAIN_ETHTOOL,
1823 IB_FLOW_DOMAIN_RFS,
1824 IB_FLOW_DOMAIN_NIC,
1825 IB_FLOW_DOMAIN_NUM
1826};
1827
1828enum ib_flow_flags {
1829 IB_FLOW_ATTR_FLAGS_DONT_TRAP = 1UL << 1,
1830 IB_FLOW_ATTR_FLAGS_RESERVED = 1UL << 2
1831};
1832
1833struct ib_flow_eth_filter {
1834 u8 dst_mac[6];
1835 u8 src_mac[6];
1836 __be16 ether_type;
1837 __be16 vlan_tag;
1838
1839 u8 real_sz[0];
1840};
1841
1842struct ib_flow_spec_eth {
1843 u32 type;
1844 u16 size;
1845 struct ib_flow_eth_filter val;
1846 struct ib_flow_eth_filter mask;
1847};
1848
1849struct ib_flow_ib_filter {
1850 __be16 dlid;
1851 __u8 sl;
1852
1853 u8 real_sz[0];
1854};
1855
1856struct ib_flow_spec_ib {
1857 u32 type;
1858 u16 size;
1859 struct ib_flow_ib_filter val;
1860 struct ib_flow_ib_filter mask;
1861};
1862
1863
1864enum ib_ipv4_flags {
1865 IB_IPV4_DONT_FRAG = 0x2,
1866 IB_IPV4_MORE_FRAG = 0X4
1867
1868};
1869
1870struct ib_flow_ipv4_filter {
1871 __be32 src_ip;
1872 __be32 dst_ip;
1873 u8 proto;
1874 u8 tos;
1875 u8 ttl;
1876 u8 flags;
1877
1878 u8 real_sz[0];
1879};
1880
1881struct ib_flow_spec_ipv4 {
1882 u32 type;
1883 u16 size;
1884 struct ib_flow_ipv4_filter val;
1885 struct ib_flow_ipv4_filter mask;
1886};
1887
1888struct ib_flow_ipv6_filter {
1889 u8 src_ip[16];
1890 u8 dst_ip[16];
1891 __be32 flow_label;
1892 u8 next_hdr;
1893 u8 traffic_class;
1894 u8 hop_limit;
1895
1896 u8 real_sz[0];
1897};
1898
1899struct ib_flow_spec_ipv6 {
1900 u32 type;
1901 u16 size;
1902 struct ib_flow_ipv6_filter val;
1903 struct ib_flow_ipv6_filter mask;
1904};
1905
1906struct ib_flow_tcp_udp_filter {
1907 __be16 dst_port;
1908 __be16 src_port;
1909
1910 u8 real_sz[0];
1911};
1912
1913struct ib_flow_spec_tcp_udp {
1914 u32 type;
1915 u16 size;
1916 struct ib_flow_tcp_udp_filter val;
1917 struct ib_flow_tcp_udp_filter mask;
1918};
1919
1920struct ib_flow_tunnel_filter {
1921 __be32 tunnel_id;
1922 u8 real_sz[0];
1923};
1924
1925
1926
1927
1928struct ib_flow_spec_tunnel {
1929 u32 type;
1930 u16 size;
1931 struct ib_flow_tunnel_filter val;
1932 struct ib_flow_tunnel_filter mask;
1933};
1934
1935struct ib_flow_spec_action_tag {
1936 enum ib_flow_spec_type type;
1937 u16 size;
1938 u32 tag_id;
1939};
1940
1941struct ib_flow_spec_action_drop {
1942 enum ib_flow_spec_type type;
1943 u16 size;
1944};
1945
1946union ib_flow_spec {
1947 struct {
1948 u32 type;
1949 u16 size;
1950 };
1951 struct ib_flow_spec_eth eth;
1952 struct ib_flow_spec_ib ib;
1953 struct ib_flow_spec_ipv4 ipv4;
1954 struct ib_flow_spec_tcp_udp tcp_udp;
1955 struct ib_flow_spec_ipv6 ipv6;
1956 struct ib_flow_spec_tunnel tunnel;
1957 struct ib_flow_spec_action_tag flow_tag;
1958 struct ib_flow_spec_action_drop drop;
1959};
1960
1961struct ib_flow_attr {
1962 enum ib_flow_attr_type type;
1963 u16 size;
1964 u16 priority;
1965 u32 flags;
1966 u8 num_of_specs;
1967 u8 port;
1968
1969
1970
1971
1972};
1973
1974struct ib_flow {
1975 struct ib_qp *qp;
1976 struct ib_uobject *uobject;
1977};
1978
1979struct ib_mad_hdr;
1980struct ib_grh;
1981
1982enum ib_process_mad_flags {
1983 IB_MAD_IGNORE_MKEY = 1,
1984 IB_MAD_IGNORE_BKEY = 2,
1985 IB_MAD_IGNORE_ALL = IB_MAD_IGNORE_MKEY | IB_MAD_IGNORE_BKEY
1986};
1987
1988enum ib_mad_result {
1989 IB_MAD_RESULT_FAILURE = 0,
1990 IB_MAD_RESULT_SUCCESS = 1 << 0,
1991 IB_MAD_RESULT_REPLY = 1 << 1,
1992 IB_MAD_RESULT_CONSUMED = 1 << 2
1993};
1994
1995struct ib_port_cache {
1996 u64 subnet_prefix;
1997 struct ib_pkey_cache *pkey;
1998 struct ib_gid_table *gid;
1999 u8 lmc;
2000 enum ib_port_state port_state;
2001};
2002
2003struct ib_cache {
2004 rwlock_t lock;
2005 struct ib_event_handler event_handler;
2006 struct ib_port_cache *ports;
2007};
2008
2009struct iw_cm_verbs;
2010
2011struct ib_port_immutable {
2012 int pkey_tbl_len;
2013 int gid_tbl_len;
2014 u32 core_cap_flags;
2015 u32 max_mad_size;
2016};
2017
2018
2019enum rdma_netdev_t {
2020 RDMA_NETDEV_OPA_VNIC,
2021 RDMA_NETDEV_IPOIB,
2022};
2023
2024
2025
2026
2027
2028struct rdma_netdev {
2029 void *clnt_priv;
2030 struct ib_device *hca;
2031 u8 port_num;
2032
2033
2034 void (*free_rdma_netdev)(struct net_device *netdev);
2035
2036
2037 void (*set_id)(struct net_device *netdev, int id);
2038
2039 int (*send)(struct net_device *dev, struct sk_buff *skb,
2040 struct ib_ah *address, u32 dqpn);
2041
2042 int (*attach_mcast)(struct net_device *dev, struct ib_device *hca,
2043 union ib_gid *gid, u16 mlid,
2044 int set_qkey, u32 qkey);
2045 int (*detach_mcast)(struct net_device *dev, struct ib_device *hca,
2046 union ib_gid *gid, u16 mlid);
2047};
2048
2049struct ib_port_pkey_list {
2050
2051 spinlock_t list_lock;
2052 struct list_head pkey_list;
2053};
2054
2055struct ib_device {
2056
2057 struct device *dma_device;
2058
2059 char name[IB_DEVICE_NAME_MAX];
2060
2061 struct list_head event_handler_list;
2062 spinlock_t event_handler_lock;
2063
2064 spinlock_t client_data_lock;
2065 struct list_head core_list;
2066
2067
2068 struct list_head client_data_list;
2069
2070 struct ib_cache cache;
2071
2072
2073
2074 struct ib_port_immutable *port_immutable;
2075
2076 int num_comp_vectors;
2077
2078 struct ib_port_pkey_list *port_pkey_list;
2079
2080 struct iw_cm_verbs *iwcm;
2081
2082
2083
2084
2085
2086
2087
2088 struct rdma_hw_stats *(*alloc_hw_stats)(struct ib_device *device,
2089 u8 port_num);
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102 int (*get_hw_stats)(struct ib_device *device,
2103 struct rdma_hw_stats *stats,
2104 u8 port, int index);
2105 int (*query_device)(struct ib_device *device,
2106 struct ib_device_attr *device_attr,
2107 struct ib_udata *udata);
2108 int (*query_port)(struct ib_device *device,
2109 u8 port_num,
2110 struct ib_port_attr *port_attr);
2111 enum rdma_link_layer (*get_link_layer)(struct ib_device *device,
2112 u8 port_num);
2113
2114
2115
2116
2117
2118
2119
2120 struct net_device *(*get_netdev)(struct ib_device *device,
2121 u8 port_num);
2122 int (*query_gid)(struct ib_device *device,
2123 u8 port_num, int index,
2124 union ib_gid *gid);
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138 int (*add_gid)(struct ib_device *device,
2139 u8 port_num,
2140 unsigned int index,
2141 const union ib_gid *gid,
2142 const struct ib_gid_attr *attr,
2143 void **context);
2144
2145
2146
2147
2148
2149
2150 int (*del_gid)(struct ib_device *device,
2151 u8 port_num,
2152 unsigned int index,
2153 void **context);
2154 int (*query_pkey)(struct ib_device *device,
2155 u8 port_num, u16 index, u16 *pkey);
2156 int (*modify_device)(struct ib_device *device,
2157 int device_modify_mask,
2158 struct ib_device_modify *device_modify);
2159 int (*modify_port)(struct ib_device *device,
2160 u8 port_num, int port_modify_mask,
2161 struct ib_port_modify *port_modify);
2162 struct ib_ucontext * (*alloc_ucontext)(struct ib_device *device,
2163 struct ib_udata *udata);
2164 int (*dealloc_ucontext)(struct ib_ucontext *context);
2165 int (*mmap)(struct ib_ucontext *context,
2166 struct vm_area_struct *vma);
2167 struct ib_pd * (*alloc_pd)(struct ib_device *device,
2168 struct ib_ucontext *context,
2169 struct ib_udata *udata);
2170 int (*dealloc_pd)(struct ib_pd *pd);
2171 struct ib_ah * (*create_ah)(struct ib_pd *pd,
2172 struct rdma_ah_attr *ah_attr,
2173 struct ib_udata *udata);
2174 int (*modify_ah)(struct ib_ah *ah,
2175 struct rdma_ah_attr *ah_attr);
2176 int (*query_ah)(struct ib_ah *ah,
2177 struct rdma_ah_attr *ah_attr);
2178 int (*destroy_ah)(struct ib_ah *ah);
2179 struct ib_srq * (*create_srq)(struct ib_pd *pd,
2180 struct ib_srq_init_attr *srq_init_attr,
2181 struct ib_udata *udata);
2182 int (*modify_srq)(struct ib_srq *srq,
2183 struct ib_srq_attr *srq_attr,
2184 enum ib_srq_attr_mask srq_attr_mask,
2185 struct ib_udata *udata);
2186 int (*query_srq)(struct ib_srq *srq,
2187 struct ib_srq_attr *srq_attr);
2188 int (*destroy_srq)(struct ib_srq *srq);
2189 int (*post_srq_recv)(struct ib_srq *srq,
2190 struct ib_recv_wr *recv_wr,
2191 struct ib_recv_wr **bad_recv_wr);
2192 struct ib_qp * (*create_qp)(struct ib_pd *pd,
2193 struct ib_qp_init_attr *qp_init_attr,
2194 struct ib_udata *udata);
2195 int (*modify_qp)(struct ib_qp *qp,
2196 struct ib_qp_attr *qp_attr,
2197 int qp_attr_mask,
2198 struct ib_udata *udata);
2199 int (*query_qp)(struct ib_qp *qp,
2200 struct ib_qp_attr *qp_attr,
2201 int qp_attr_mask,
2202 struct ib_qp_init_attr *qp_init_attr);
2203 int (*destroy_qp)(struct ib_qp *qp);
2204 int (*post_send)(struct ib_qp *qp,
2205 struct ib_send_wr *send_wr,
2206 struct ib_send_wr **bad_send_wr);
2207 int (*post_recv)(struct ib_qp *qp,
2208 struct ib_recv_wr *recv_wr,
2209 struct ib_recv_wr **bad_recv_wr);
2210 struct ib_cq * (*create_cq)(struct ib_device *device,
2211 const struct ib_cq_init_attr *attr,
2212 struct ib_ucontext *context,
2213 struct ib_udata *udata);
2214 int (*modify_cq)(struct ib_cq *cq, u16 cq_count,
2215 u16 cq_period);
2216 int (*destroy_cq)(struct ib_cq *cq);
2217 int (*resize_cq)(struct ib_cq *cq, int cqe,
2218 struct ib_udata *udata);
2219 int (*poll_cq)(struct ib_cq *cq, int num_entries,
2220 struct ib_wc *wc);
2221 int (*peek_cq)(struct ib_cq *cq, int wc_cnt);
2222 int (*req_notify_cq)(struct ib_cq *cq,
2223 enum ib_cq_notify_flags flags);
2224 int (*req_ncomp_notif)(struct ib_cq *cq,
2225 int wc_cnt);
2226 struct ib_mr * (*get_dma_mr)(struct ib_pd *pd,
2227 int mr_access_flags);
2228 struct ib_mr * (*reg_user_mr)(struct ib_pd *pd,
2229 u64 start, u64 length,
2230 u64 virt_addr,
2231 int mr_access_flags,
2232 struct ib_udata *udata);
2233 int (*rereg_user_mr)(struct ib_mr *mr,
2234 int flags,
2235 u64 start, u64 length,
2236 u64 virt_addr,
2237 int mr_access_flags,
2238 struct ib_pd *pd,
2239 struct ib_udata *udata);
2240 int (*dereg_mr)(struct ib_mr *mr);
2241 struct ib_mr * (*alloc_mr)(struct ib_pd *pd,
2242 enum ib_mr_type mr_type,
2243 u32 max_num_sg);
2244 int (*map_mr_sg)(struct ib_mr *mr,
2245 struct scatterlist *sg,
2246 int sg_nents,
2247 unsigned int *sg_offset);
2248 struct ib_mw * (*alloc_mw)(struct ib_pd *pd,
2249 enum ib_mw_type type,
2250 struct ib_udata *udata);
2251 int (*dealloc_mw)(struct ib_mw *mw);
2252 struct ib_fmr * (*alloc_fmr)(struct ib_pd *pd,
2253 int mr_access_flags,
2254 struct ib_fmr_attr *fmr_attr);
2255 int (*map_phys_fmr)(struct ib_fmr *fmr,
2256 u64 *page_list, int list_len,
2257 u64 iova);
2258 int (*unmap_fmr)(struct list_head *fmr_list);
2259 int (*dealloc_fmr)(struct ib_fmr *fmr);
2260 int (*attach_mcast)(struct ib_qp *qp,
2261 union ib_gid *gid,
2262 u16 lid);
2263 int (*detach_mcast)(struct ib_qp *qp,
2264 union ib_gid *gid,
2265 u16 lid);
2266 int (*process_mad)(struct ib_device *device,
2267 int process_mad_flags,
2268 u8 port_num,
2269 const struct ib_wc *in_wc,
2270 const struct ib_grh *in_grh,
2271 const struct ib_mad_hdr *in_mad,
2272 size_t in_mad_size,
2273 struct ib_mad_hdr *out_mad,
2274 size_t *out_mad_size,
2275 u16 *out_mad_pkey_index);
2276 struct ib_xrcd * (*alloc_xrcd)(struct ib_device *device,
2277 struct ib_ucontext *ucontext,
2278 struct ib_udata *udata);
2279 int (*dealloc_xrcd)(struct ib_xrcd *xrcd);
2280 struct ib_flow * (*create_flow)(struct ib_qp *qp,
2281 struct ib_flow_attr
2282 *flow_attr,
2283 int domain);
2284 int (*destroy_flow)(struct ib_flow *flow_id);
2285 int (*check_mr_status)(struct ib_mr *mr, u32 check_mask,
2286 struct ib_mr_status *mr_status);
2287 void (*disassociate_ucontext)(struct ib_ucontext *ibcontext);
2288 void (*drain_rq)(struct ib_qp *qp);
2289 void (*drain_sq)(struct ib_qp *qp);
2290 int (*set_vf_link_state)(struct ib_device *device, int vf, u8 port,
2291 int state);
2292 int (*get_vf_config)(struct ib_device *device, int vf, u8 port,
2293 struct ifla_vf_info *ivf);
2294 int (*get_vf_stats)(struct ib_device *device, int vf, u8 port,
2295 struct ifla_vf_stats *stats);
2296 int (*set_vf_guid)(struct ib_device *device, int vf, u8 port, u64 guid,
2297 int type);
2298 struct ib_wq * (*create_wq)(struct ib_pd *pd,
2299 struct ib_wq_init_attr *init_attr,
2300 struct ib_udata *udata);
2301 int (*destroy_wq)(struct ib_wq *wq);
2302 int (*modify_wq)(struct ib_wq *wq,
2303 struct ib_wq_attr *attr,
2304 u32 wq_attr_mask,
2305 struct ib_udata *udata);
2306 struct ib_rwq_ind_table * (*create_rwq_ind_table)(struct ib_device *device,
2307 struct ib_rwq_ind_table_init_attr *init_attr,
2308 struct ib_udata *udata);
2309 int (*destroy_rwq_ind_table)(struct ib_rwq_ind_table *wq_ind_table);
2310
2311
2312
2313
2314
2315
2316 struct net_device *(*alloc_rdma_netdev)(
2317 struct ib_device *device,
2318 u8 port_num,
2319 enum rdma_netdev_t type,
2320 const char *name,
2321 unsigned char name_assign_type,
2322 void (*setup)(struct net_device *));
2323
2324 struct module *owner;
2325 struct device dev;
2326 struct kobject *ports_parent;
2327 struct list_head port_list;
2328
2329 enum {
2330 IB_DEV_UNINITIALIZED,
2331 IB_DEV_REGISTERED,
2332 IB_DEV_UNREGISTERED
2333 } reg_state;
2334
2335 int uverbs_abi_ver;
2336 u64 uverbs_cmd_mask;
2337 u64 uverbs_ex_cmd_mask;
2338
2339 char node_desc[IB_DEVICE_NODE_DESC_MAX];
2340 __be64 node_guid;
2341 u32 local_dma_lkey;
2342 u16 is_switch:1;
2343 u8 node_type;
2344 u8 phys_port_cnt;
2345 struct ib_device_attr attrs;
2346 struct attribute_group *hw_stats_ag;
2347 struct rdma_hw_stats *hw_stats;
2348
2349#ifdef CONFIG_CGROUP_RDMA
2350 struct rdmacg_device cg_device;
2351#endif
2352
2353 u32 index;
2354
2355
2356
2357
2358
2359
2360
2361 int (*get_port_immutable)(struct ib_device *, u8, struct ib_port_immutable *);
2362 void (*get_dev_fw_str)(struct ib_device *, char *str);
2363 const struct cpumask *(*get_vector_affinity)(struct ib_device *ibdev,
2364 int comp_vector);
2365
2366 struct uverbs_root_spec *specs_root;
2367};
2368
2369struct ib_client {
2370 char *name;
2371 void (*add) (struct ib_device *);
2372 void (*remove)(struct ib_device *, void *client_data);
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389 struct net_device *(*get_net_dev_by_params)(
2390 struct ib_device *dev,
2391 u8 port,
2392 u16 pkey,
2393 const union ib_gid *gid,
2394 const struct sockaddr *addr,
2395 void *client_data);
2396 struct list_head list;
2397};
2398
2399struct ib_device *ib_alloc_device(size_t size);
2400void ib_dealloc_device(struct ib_device *device);
2401
2402void ib_get_device_fw_str(struct ib_device *device, char *str);
2403
2404int ib_register_device(struct ib_device *device,
2405 int (*port_callback)(struct ib_device *,
2406 u8, struct kobject *));
2407void ib_unregister_device(struct ib_device *device);
2408
2409int ib_register_client (struct ib_client *client);
2410void ib_unregister_client(struct ib_client *client);
2411
2412void *ib_get_client_data(struct ib_device *device, struct ib_client *client);
2413void ib_set_client_data(struct ib_device *device, struct ib_client *client,
2414 void *data);
2415
2416static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len)
2417{
2418 return copy_from_user(dest, udata->inbuf, len) ? -EFAULT : 0;
2419}
2420
2421static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len)
2422{
2423 return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0;
2424}
2425
2426static inline bool ib_is_udata_cleared(struct ib_udata *udata,
2427 size_t offset,
2428 size_t len)
2429{
2430 const void __user *p = udata->inbuf + offset;
2431 bool ret;
2432 u8 *buf;
2433
2434 if (len > USHRT_MAX)
2435 return false;
2436
2437 buf = memdup_user(p, len);
2438 if (IS_ERR(buf))
2439 return false;
2440
2441 ret = !memchr_inv(buf, 0, len);
2442 kfree(buf);
2443 return ret;
2444}
2445
2446
2447
2448
2449
2450
2451
2452
2453
2454
2455
2456
2457
2458
2459
2460
2461
2462int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
2463 enum ib_qp_type type, enum ib_qp_attr_mask mask,
2464 enum rdma_link_layer ll);
2465
2466void ib_register_event_handler(struct ib_event_handler *event_handler);
2467void ib_unregister_event_handler(struct ib_event_handler *event_handler);
2468void ib_dispatch_event(struct ib_event *event);
2469
2470int ib_query_port(struct ib_device *device,
2471 u8 port_num, struct ib_port_attr *port_attr);
2472
2473enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device,
2474 u8 port_num);
2475
2476
2477
2478
2479
2480
2481
2482
2483
2484
2485static inline bool rdma_cap_ib_switch(const struct ib_device *device)
2486{
2487 return device->is_switch;
2488}
2489
2490
2491
2492
2493
2494
2495
2496
2497
2498static inline u8 rdma_start_port(const struct ib_device *device)
2499{
2500 return rdma_cap_ib_switch(device) ? 0 : 1;
2501}
2502
2503
2504
2505
2506
2507
2508
2509
2510
2511static inline u8 rdma_end_port(const struct ib_device *device)
2512{
2513 return rdma_cap_ib_switch(device) ? 0 : device->phys_port_cnt;
2514}
2515
2516static inline int rdma_is_port_valid(const struct ib_device *device,
2517 unsigned int port)
2518{
2519 return (port >= rdma_start_port(device) &&
2520 port <= rdma_end_port(device));
2521}
2522
2523static inline bool rdma_protocol_ib(const struct ib_device *device, u8 port_num)
2524{
2525 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IB;
2526}
2527
2528static inline bool rdma_protocol_roce(const struct ib_device *device, u8 port_num)
2529{
2530 return device->port_immutable[port_num].core_cap_flags &
2531 (RDMA_CORE_CAP_PROT_ROCE | RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP);
2532}
2533
2534static inline bool rdma_protocol_roce_udp_encap(const struct ib_device *device, u8 port_num)
2535{
2536 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP;
2537}
2538
2539static inline bool rdma_protocol_roce_eth_encap(const struct ib_device *device, u8 port_num)
2540{
2541 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_ROCE;
2542}
2543
2544static inline bool rdma_protocol_iwarp(const struct ib_device *device, u8 port_num)
2545{
2546 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IWARP;
2547}
2548
2549static inline bool rdma_ib_or_roce(const struct ib_device *device, u8 port_num)
2550{
2551 return rdma_protocol_ib(device, port_num) ||
2552 rdma_protocol_roce(device, port_num);
2553}
2554
2555static inline bool rdma_protocol_raw_packet(const struct ib_device *device, u8 port_num)
2556{
2557 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_RAW_PACKET;
2558}
2559
2560static inline bool rdma_protocol_usnic(const struct ib_device *device, u8 port_num)
2561{
2562 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_USNIC;
2563}
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574
2575
2576
2577static inline bool rdma_cap_ib_mad(const struct ib_device *device, u8 port_num)
2578{
2579 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_MAD;
2580}
2581
2582
2583
2584
2585
2586
2587
2588
2589
2590
2591
2592
2593
2594
2595
2596
2597
2598
2599
2600
2601static inline bool rdma_cap_opa_mad(struct ib_device *device, u8 port_num)
2602{
2603 return (device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_OPA_MAD)
2604 == RDMA_CORE_CAP_OPA_MAD;
2605}
2606
2607
2608
2609
2610
2611
2612
2613
2614
2615
2616
2617
2618
2619
2620
2621
2622
2623
2624
2625
2626
2627static inline bool rdma_cap_ib_smi(const struct ib_device *device, u8 port_num)
2628{
2629 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_SMI;
2630}
2631
2632
2633
2634
2635
2636
2637
2638
2639
2640
2641
2642
2643
2644
2645
2646
2647static inline bool rdma_cap_ib_cm(const struct ib_device *device, u8 port_num)
2648{
2649 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_CM;
2650}
2651
2652
2653
2654
2655
2656
2657
2658
2659
2660
2661
2662
2663
2664static inline bool rdma_cap_iw_cm(const struct ib_device *device, u8 port_num)
2665{
2666 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IW_CM;
2667}
2668
2669
2670
2671
2672
2673
2674
2675
2676
2677
2678
2679
2680
2681
2682
2683
2684static inline bool rdma_cap_ib_sa(const struct ib_device *device, u8 port_num)
2685{
2686 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_SA;
2687}
2688
2689
2690
2691
2692
2693
2694
2695
2696
2697
2698
2699
2700
2701
2702
2703
2704
2705
2706static inline bool rdma_cap_ib_mcast(const struct ib_device *device, u8 port_num)
2707{
2708 return rdma_cap_ib_sa(device, port_num);
2709}
2710
2711
2712
2713
2714
2715
2716
2717
2718
2719
2720
2721
2722
2723
2724static inline bool rdma_cap_af_ib(const struct ib_device *device, u8 port_num)
2725{
2726 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_AF_IB;
2727}
2728
2729
2730
2731
2732
2733
2734
2735
2736
2737
2738
2739
2740
2741
2742
2743
2744
2745static inline bool rdma_cap_eth_ah(const struct ib_device *device, u8 port_num)
2746{
2747 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_ETH_AH;
2748}
2749
2750
2751
2752
2753
2754
2755
2756
2757
2758
2759static inline bool rdma_cap_opa_ah(struct ib_device *device, u8 port_num)
2760{
2761 return (device->port_immutable[port_num].core_cap_flags &
2762 RDMA_CORE_CAP_OPA_AH) == RDMA_CORE_CAP_OPA_AH;
2763}
2764
2765
2766
2767
2768
2769
2770
2771
2772
2773
2774
2775
2776
2777static inline size_t rdma_max_mad_size(const struct ib_device *device, u8 port_num)
2778{
2779 return device->port_immutable[port_num].max_mad_size;
2780}
2781
2782
2783
2784
2785
2786
2787
2788
2789
2790
2791
2792
2793
2794
2795static inline bool rdma_cap_roce_gid_table(const struct ib_device *device,
2796 u8 port_num)
2797{
2798 return rdma_protocol_roce(device, port_num) &&
2799 device->add_gid && device->del_gid;
2800}
2801
2802
2803
2804
2805static inline bool rdma_cap_read_inv(struct ib_device *dev, u32 port_num)
2806{
2807
2808
2809
2810
2811 return rdma_protocol_iwarp(dev, port_num);
2812}
2813
2814int ib_query_gid(struct ib_device *device,
2815 u8 port_num, int index, union ib_gid *gid,
2816 struct ib_gid_attr *attr);
2817
2818int ib_set_vf_link_state(struct ib_device *device, int vf, u8 port,
2819 int state);
2820int ib_get_vf_config(struct ib_device *device, int vf, u8 port,
2821 struct ifla_vf_info *info);
2822int ib_get_vf_stats(struct ib_device *device, int vf, u8 port,
2823 struct ifla_vf_stats *stats);
2824int ib_set_vf_guid(struct ib_device *device, int vf, u8 port, u64 guid,
2825 int type);
2826
2827int ib_query_pkey(struct ib_device *device,
2828 u8 port_num, u16 index, u16 *pkey);
2829
2830int ib_modify_device(struct ib_device *device,
2831 int device_modify_mask,
2832 struct ib_device_modify *device_modify);
2833
2834int ib_modify_port(struct ib_device *device,
2835 u8 port_num, int port_modify_mask,
2836 struct ib_port_modify *port_modify);
2837
2838int ib_find_gid(struct ib_device *device, union ib_gid *gid,
2839 enum ib_gid_type gid_type, struct net_device *ndev,
2840 u8 *port_num, u16 *index);
2841
2842int ib_find_pkey(struct ib_device *device,
2843 u8 port_num, u16 pkey, u16 *index);
2844
2845enum ib_pd_flags {
2846
2847
2848
2849
2850
2851
2852
2853
2854
2855 IB_PD_UNSAFE_GLOBAL_RKEY = 0x01,
2856};
2857
2858struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags,
2859 const char *caller);
2860#define ib_alloc_pd(device, flags) \
2861 __ib_alloc_pd((device), (flags), __func__)
2862void ib_dealloc_pd(struct ib_pd *pd);
2863
2864
2865
2866
2867
2868
2869
2870
2871
2872struct ib_ah *rdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr);
2873
2874
2875
2876
2877
2878
2879
2880
2881
2882
2883
2884
2885
2886struct ib_ah *rdma_create_user_ah(struct ib_pd *pd,
2887 struct rdma_ah_attr *ah_attr,
2888 struct ib_udata *udata);
2889
2890
2891
2892
2893
2894
2895
2896
2897int ib_get_gids_from_rdma_hdr(const union rdma_network_hdr *hdr,
2898 enum rdma_network_type net_type,
2899 union ib_gid *sgid, union ib_gid *dgid);
2900
2901
2902
2903
2904
2905int ib_get_rdma_header_version(const union rdma_network_hdr *hdr);
2906
2907
2908
2909
2910
2911
2912
2913
2914
2915
2916
2917
2918int ib_init_ah_from_wc(struct ib_device *device, u8 port_num,
2919 const struct ib_wc *wc, const struct ib_grh *grh,
2920 struct rdma_ah_attr *ah_attr);
2921
2922
2923
2924
2925
2926
2927
2928
2929
2930
2931
2932
2933
2934struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc,
2935 const struct ib_grh *grh, u8 port_num);
2936
2937
2938
2939
2940
2941
2942
2943
2944int rdma_modify_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
2945
2946
2947
2948
2949
2950
2951
2952
2953int rdma_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
2954
2955
2956
2957
2958
2959int rdma_destroy_ah(struct ib_ah *ah);
2960
2961
2962
2963
2964
2965
2966
2967
2968
2969
2970
2971
2972
2973
2974struct ib_srq *ib_create_srq(struct ib_pd *pd,
2975 struct ib_srq_init_attr *srq_init_attr);
2976
2977
2978
2979
2980
2981
2982
2983
2984
2985
2986
2987
2988
2989int ib_modify_srq(struct ib_srq *srq,
2990 struct ib_srq_attr *srq_attr,
2991 enum ib_srq_attr_mask srq_attr_mask);
2992
2993
2994
2995
2996
2997
2998
2999int ib_query_srq(struct ib_srq *srq,
3000 struct ib_srq_attr *srq_attr);
3001
3002
3003
3004
3005
3006int ib_destroy_srq(struct ib_srq *srq);
3007
3008
3009
3010
3011
3012
3013
3014
3015static inline int ib_post_srq_recv(struct ib_srq *srq,
3016 struct ib_recv_wr *recv_wr,
3017 struct ib_recv_wr **bad_recv_wr)
3018{
3019 return srq->device->post_srq_recv(srq, recv_wr, bad_recv_wr);
3020}
3021
3022
3023
3024
3025
3026
3027
3028
3029
3030struct ib_qp *ib_create_qp(struct ib_pd *pd,
3031 struct ib_qp_init_attr *qp_init_attr);
3032
3033
3034
3035
3036
3037
3038
3039
3040
3041
3042
3043
3044int ib_modify_qp_with_udata(struct ib_qp *qp,
3045 struct ib_qp_attr *attr,
3046 int attr_mask,
3047 struct ib_udata *udata);
3048
3049
3050
3051
3052
3053
3054
3055
3056
3057
3058int ib_modify_qp(struct ib_qp *qp,
3059 struct ib_qp_attr *qp_attr,
3060 int qp_attr_mask);
3061
3062
3063
3064
3065
3066
3067
3068
3069
3070
3071
3072
3073int ib_query_qp(struct ib_qp *qp,
3074 struct ib_qp_attr *qp_attr,
3075 int qp_attr_mask,
3076 struct ib_qp_init_attr *qp_init_attr);
3077
3078
3079
3080
3081
3082int ib_destroy_qp(struct ib_qp *qp);
3083
3084
3085
3086
3087
3088
3089
3090
3091struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd,
3092 struct ib_qp_open_attr *qp_open_attr);
3093
3094
3095
3096
3097
3098
3099
3100
3101int ib_close_qp(struct ib_qp *qp);
3102
3103
3104
3105
3106
3107
3108
3109
3110
3111
3112
3113
3114
3115
3116static inline int ib_post_send(struct ib_qp *qp,
3117 struct ib_send_wr *send_wr,
3118 struct ib_send_wr **bad_send_wr)
3119{
3120 return qp->device->post_send(qp, send_wr, bad_send_wr);
3121}
3122
3123
3124
3125
3126
3127
3128
3129
3130
3131static inline int ib_post_recv(struct ib_qp *qp,
3132 struct ib_recv_wr *recv_wr,
3133 struct ib_recv_wr **bad_recv_wr)
3134{
3135 return qp->device->post_recv(qp, recv_wr, bad_recv_wr);
3136}
3137
3138struct ib_cq *ib_alloc_cq(struct ib_device *dev, void *private,
3139 int nr_cqe, int comp_vector, enum ib_poll_context poll_ctx);
3140void ib_free_cq(struct ib_cq *cq);
3141int ib_process_cq_direct(struct ib_cq *cq, int budget);
3142
3143
3144
3145
3146
3147
3148
3149
3150
3151
3152
3153
3154
3155
3156struct ib_cq *ib_create_cq(struct ib_device *device,
3157 ib_comp_handler comp_handler,
3158 void (*event_handler)(struct ib_event *, void *),
3159 void *cq_context,
3160 const struct ib_cq_init_attr *cq_attr);
3161
3162
3163
3164
3165
3166
3167
3168
3169int ib_resize_cq(struct ib_cq *cq, int cqe);
3170
3171
3172
3173
3174
3175
3176
3177
3178int rdma_set_cq_moderation(struct ib_cq *cq, u16 cq_count, u16 cq_period);
3179
3180
3181
3182
3183
3184int ib_destroy_cq(struct ib_cq *cq);
3185
3186
3187
3188
3189
3190
3191
3192
3193
3194
3195
3196
3197
3198static inline int ib_poll_cq(struct ib_cq *cq, int num_entries,
3199 struct ib_wc *wc)
3200{
3201 return cq->device->poll_cq(cq, num_entries, wc);
3202}
3203
3204
3205
3206
3207
3208
3209
3210
3211
3212
3213
3214int ib_peek_cq(struct ib_cq *cq, int wc_cnt);
3215
3216
3217
3218
3219
3220
3221
3222
3223
3224
3225
3226
3227
3228
3229
3230
3231
3232
3233
3234
3235
3236
3237
3238
3239
3240
3241
3242
3243static inline int ib_req_notify_cq(struct ib_cq *cq,
3244 enum ib_cq_notify_flags flags)
3245{
3246 return cq->device->req_notify_cq(cq, flags);
3247}
3248
3249
3250
3251
3252
3253
3254
3255
3256static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt)
3257{
3258 return cq->device->req_ncomp_notif ?
3259 cq->device->req_ncomp_notif(cq, wc_cnt) :
3260 -ENOSYS;
3261}
3262
3263
3264
3265
3266
3267
3268static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
3269{
3270 return dma_mapping_error(dev->dma_device, dma_addr);
3271}
3272
3273
3274
3275
3276
3277
3278
3279
3280static inline u64 ib_dma_map_single(struct ib_device *dev,
3281 void *cpu_addr, size_t size,
3282 enum dma_data_direction direction)
3283{
3284 return dma_map_single(dev->dma_device, cpu_addr, size, direction);
3285}
3286
3287
3288
3289
3290
3291
3292
3293
3294static inline void ib_dma_unmap_single(struct ib_device *dev,
3295 u64 addr, size_t size,
3296 enum dma_data_direction direction)
3297{
3298 dma_unmap_single(dev->dma_device, addr, size, direction);
3299}
3300
3301
3302
3303
3304
3305
3306
3307
3308
3309static inline u64 ib_dma_map_page(struct ib_device *dev,
3310 struct page *page,
3311 unsigned long offset,
3312 size_t size,
3313 enum dma_data_direction direction)
3314{
3315 return dma_map_page(dev->dma_device, page, offset, size, direction);
3316}
3317
3318
3319
3320
3321
3322
3323
3324
3325static inline void ib_dma_unmap_page(struct ib_device *dev,
3326 u64 addr, size_t size,
3327 enum dma_data_direction direction)
3328{
3329 dma_unmap_page(dev->dma_device, addr, size, direction);
3330}
3331
3332
3333
3334
3335
3336
3337
3338
3339static inline int ib_dma_map_sg(struct ib_device *dev,
3340 struct scatterlist *sg, int nents,
3341 enum dma_data_direction direction)
3342{
3343 return dma_map_sg(dev->dma_device, sg, nents, direction);
3344}
3345
3346
3347
3348
3349
3350
3351
3352
3353static inline void ib_dma_unmap_sg(struct ib_device *dev,
3354 struct scatterlist *sg, int nents,
3355 enum dma_data_direction direction)
3356{
3357 dma_unmap_sg(dev->dma_device, sg, nents, direction);
3358}
3359
3360static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
3361 struct scatterlist *sg, int nents,
3362 enum dma_data_direction direction,
3363 unsigned long dma_attrs)
3364{
3365 return dma_map_sg_attrs(dev->dma_device, sg, nents, direction,
3366 dma_attrs);
3367}
3368
3369static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
3370 struct scatterlist *sg, int nents,
3371 enum dma_data_direction direction,
3372 unsigned long dma_attrs)
3373{
3374 dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, dma_attrs);
3375}
3376
3377
3378
3379
3380
3381
3382
3383
3384static inline u64 ib_sg_dma_address(struct ib_device *dev,
3385 struct scatterlist *sg)
3386{
3387 return sg_dma_address(sg);
3388}
3389
3390
3391
3392
3393
3394
3395
3396
3397
3398static inline unsigned int ib_sg_dma_len(struct ib_device *dev,
3399 struct scatterlist *sg)
3400{
3401 return sg_dma_len(sg);
3402}
3403
3404
3405
3406
3407
3408
3409
3410
3411static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev,
3412 u64 addr,
3413 size_t size,
3414 enum dma_data_direction dir)
3415{
3416 dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
3417}
3418
3419
3420
3421
3422
3423
3424
3425
3426static inline void ib_dma_sync_single_for_device(struct ib_device *dev,
3427 u64 addr,
3428 size_t size,
3429 enum dma_data_direction dir)
3430{
3431 dma_sync_single_for_device(dev->dma_device, addr, size, dir);
3432}
3433
3434
3435
3436
3437
3438
3439
3440
3441static inline void *ib_dma_alloc_coherent(struct ib_device *dev,
3442 size_t size,
3443 dma_addr_t *dma_handle,
3444 gfp_t flag)
3445{
3446 return dma_alloc_coherent(dev->dma_device, size, dma_handle, flag);
3447}
3448
3449
3450
3451
3452
3453
3454
3455
3456static inline void ib_dma_free_coherent(struct ib_device *dev,
3457 size_t size, void *cpu_addr,
3458 dma_addr_t dma_handle)
3459{
3460 dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle);
3461}
3462
3463
3464
3465
3466
3467
3468
3469
3470int ib_dereg_mr(struct ib_mr *mr);
3471
3472struct ib_mr *ib_alloc_mr(struct ib_pd *pd,
3473 enum ib_mr_type mr_type,
3474 u32 max_num_sg);
3475
3476
3477
3478
3479
3480
3481
3482static inline void ib_update_fast_reg_key(struct ib_mr *mr, u8 newkey)
3483{
3484 mr->lkey = (mr->lkey & 0xffffff00) | newkey;
3485 mr->rkey = (mr->rkey & 0xffffff00) | newkey;
3486}
3487
3488
3489
3490
3491
3492
3493static inline u32 ib_inc_rkey(u32 rkey)
3494{
3495 const u32 mask = 0x000000ff;
3496 return ((rkey + 1) & mask) | (rkey & ~mask);
3497}
3498
3499
3500
3501
3502
3503
3504
3505
3506
3507
3508struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
3509 int mr_access_flags,
3510 struct ib_fmr_attr *fmr_attr);
3511
3512
3513
3514
3515
3516
3517
3518
3519static inline int ib_map_phys_fmr(struct ib_fmr *fmr,
3520 u64 *page_list, int list_len,
3521 u64 iova)
3522{
3523 return fmr->device->map_phys_fmr(fmr, page_list, list_len, iova);
3524}
3525
3526
3527
3528
3529
3530int ib_unmap_fmr(struct list_head *fmr_list);
3531
3532
3533
3534
3535
3536int ib_dealloc_fmr(struct ib_fmr *fmr);
3537
3538
3539
3540
3541
3542
3543
3544
3545
3546
3547
3548
3549
3550int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
3551
3552
3553
3554
3555
3556
3557
3558int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
3559
3560
3561
3562
3563
3564struct ib_xrcd *ib_alloc_xrcd(struct ib_device *device);
3565
3566
3567
3568
3569
3570int ib_dealloc_xrcd(struct ib_xrcd *xrcd);
3571
3572struct ib_flow *ib_create_flow(struct ib_qp *qp,
3573 struct ib_flow_attr *flow_attr, int domain);
3574int ib_destroy_flow(struct ib_flow *flow_id);
3575
3576static inline int ib_check_mr_access(int flags)
3577{
3578
3579
3580
3581
3582 if (flags & (IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_REMOTE_WRITE) &&
3583 !(flags & IB_ACCESS_LOCAL_WRITE))
3584 return -EINVAL;
3585
3586 return 0;
3587}
3588
3589
3590
3591
3592
3593
3594
3595
3596
3597
3598
3599
3600
3601int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
3602 struct ib_mr_status *mr_status);
3603
3604struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, u8 port,
3605 u16 pkey, const union ib_gid *gid,
3606 const struct sockaddr *addr);
3607struct ib_wq *ib_create_wq(struct ib_pd *pd,
3608 struct ib_wq_init_attr *init_attr);
3609int ib_destroy_wq(struct ib_wq *wq);
3610int ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *attr,
3611 u32 wq_attr_mask);
3612struct ib_rwq_ind_table *ib_create_rwq_ind_table(struct ib_device *device,
3613 struct ib_rwq_ind_table_init_attr*
3614 wq_ind_table_init_attr);
3615int ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table);
3616
3617int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
3618 unsigned int *sg_offset, unsigned int page_size);
3619
3620static inline int
3621ib_map_mr_sg_zbva(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
3622 unsigned int *sg_offset, unsigned int page_size)
3623{
3624 int n;
3625
3626 n = ib_map_mr_sg(mr, sg, sg_nents, sg_offset, page_size);
3627 mr->iova = 0;
3628
3629 return n;
3630}
3631
3632int ib_sg_to_pages(struct ib_mr *mr, struct scatterlist *sgl, int sg_nents,
3633 unsigned int *sg_offset, int (*set_page)(struct ib_mr *, u64));
3634
3635void ib_drain_rq(struct ib_qp *qp);
3636void ib_drain_sq(struct ib_qp *qp);
3637void ib_drain_qp(struct ib_qp *qp);
3638
3639int ib_get_eth_speed(struct ib_device *dev, u8 port_num, u8 *speed, u8 *width);
3640
3641static inline u8 *rdma_ah_retrieve_dmac(struct rdma_ah_attr *attr)
3642{
3643 if (attr->type == RDMA_AH_ATTR_TYPE_ROCE)
3644 return attr->roce.dmac;
3645 return NULL;
3646}
3647
3648static inline void rdma_ah_set_dlid(struct rdma_ah_attr *attr, u32 dlid)
3649{
3650 if (attr->type == RDMA_AH_ATTR_TYPE_IB)
3651 attr->ib.dlid = (u16)dlid;
3652 else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
3653 attr->opa.dlid = dlid;
3654}
3655
3656static inline u32 rdma_ah_get_dlid(const struct rdma_ah_attr *attr)
3657{
3658 if (attr->type == RDMA_AH_ATTR_TYPE_IB)
3659 return attr->ib.dlid;
3660 else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
3661 return attr->opa.dlid;
3662 return 0;
3663}
3664
3665static inline void rdma_ah_set_sl(struct rdma_ah_attr *attr, u8 sl)
3666{
3667 attr->sl = sl;
3668}
3669
3670static inline u8 rdma_ah_get_sl(const struct rdma_ah_attr *attr)
3671{
3672 return attr->sl;
3673}
3674
3675static inline void rdma_ah_set_path_bits(struct rdma_ah_attr *attr,
3676 u8 src_path_bits)
3677{
3678 if (attr->type == RDMA_AH_ATTR_TYPE_IB)
3679 attr->ib.src_path_bits = src_path_bits;
3680 else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
3681 attr->opa.src_path_bits = src_path_bits;
3682}
3683
3684static inline u8 rdma_ah_get_path_bits(const struct rdma_ah_attr *attr)
3685{
3686 if (attr->type == RDMA_AH_ATTR_TYPE_IB)
3687 return attr->ib.src_path_bits;
3688 else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
3689 return attr->opa.src_path_bits;
3690 return 0;
3691}
3692
3693static inline void rdma_ah_set_make_grd(struct rdma_ah_attr *attr,
3694 bool make_grd)
3695{
3696 if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
3697 attr->opa.make_grd = make_grd;
3698}
3699
3700static inline bool rdma_ah_get_make_grd(const struct rdma_ah_attr *attr)
3701{
3702 if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
3703 return attr->opa.make_grd;
3704 return false;
3705}
3706
3707static inline void rdma_ah_set_port_num(struct rdma_ah_attr *attr, u8 port_num)
3708{
3709 attr->port_num = port_num;
3710}
3711
3712static inline u8 rdma_ah_get_port_num(const struct rdma_ah_attr *attr)
3713{
3714 return attr->port_num;
3715}
3716
3717static inline void rdma_ah_set_static_rate(struct rdma_ah_attr *attr,
3718 u8 static_rate)
3719{
3720 attr->static_rate = static_rate;
3721}
3722
3723static inline u8 rdma_ah_get_static_rate(const struct rdma_ah_attr *attr)
3724{
3725 return attr->static_rate;
3726}
3727
3728static inline void rdma_ah_set_ah_flags(struct rdma_ah_attr *attr,
3729 enum ib_ah_flags flag)
3730{
3731 attr->ah_flags = flag;
3732}
3733
3734static inline enum ib_ah_flags
3735 rdma_ah_get_ah_flags(const struct rdma_ah_attr *attr)
3736{
3737 return attr->ah_flags;
3738}
3739
3740static inline const struct ib_global_route
3741 *rdma_ah_read_grh(const struct rdma_ah_attr *attr)
3742{
3743 return &attr->grh;
3744}
3745
3746
3747static inline struct ib_global_route
3748 *rdma_ah_retrieve_grh(struct rdma_ah_attr *attr)
3749{
3750 return &attr->grh;
3751}
3752
3753static inline void rdma_ah_set_dgid_raw(struct rdma_ah_attr *attr, void *dgid)
3754{
3755 struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
3756
3757 memcpy(grh->dgid.raw, dgid, sizeof(grh->dgid));
3758}
3759
3760static inline void rdma_ah_set_subnet_prefix(struct rdma_ah_attr *attr,
3761 __be64 prefix)
3762{
3763 struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
3764
3765 grh->dgid.global.subnet_prefix = prefix;
3766}
3767
3768static inline void rdma_ah_set_interface_id(struct rdma_ah_attr *attr,
3769 __be64 if_id)
3770{
3771 struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
3772
3773 grh->dgid.global.interface_id = if_id;
3774}
3775
3776static inline void rdma_ah_set_grh(struct rdma_ah_attr *attr,
3777 union ib_gid *dgid, u32 flow_label,
3778 u8 sgid_index, u8 hop_limit,
3779 u8 traffic_class)
3780{
3781 struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
3782
3783 attr->ah_flags = IB_AH_GRH;
3784 if (dgid)
3785 grh->dgid = *dgid;
3786 grh->flow_label = flow_label;
3787 grh->sgid_index = sgid_index;
3788 grh->hop_limit = hop_limit;
3789 grh->traffic_class = traffic_class;
3790}
3791
3792
3793static inline enum rdma_ah_attr_type rdma_ah_find_type(struct ib_device *dev,
3794 u32 port_num)
3795{
3796 if ((rdma_protocol_roce(dev, port_num)) ||
3797 (rdma_protocol_iwarp(dev, port_num)))
3798 return RDMA_AH_ATTR_TYPE_ROCE;
3799 else if ((rdma_protocol_ib(dev, port_num)) &&
3800 (rdma_cap_opa_ah(dev, port_num)))
3801 return RDMA_AH_ATTR_TYPE_OPA;
3802 else
3803 return RDMA_AH_ATTR_TYPE_IB;
3804}
3805
3806
3807
3808
3809
3810
3811
3812
3813
3814
3815static inline u16 ib_lid_cpu16(u32 lid)
3816{
3817 WARN_ON_ONCE(lid & 0xFFFF0000);
3818 return (u16)lid;
3819}
3820
3821
3822
3823
3824
3825
3826static inline __be16 ib_lid_be16(u32 lid)
3827{
3828 WARN_ON_ONCE(lid & 0xFFFF0000);
3829 return cpu_to_be16((u16)lid);
3830}
3831
3832
3833
3834
3835
3836
3837
3838
3839
3840
3841
3842static inline const struct cpumask *
3843ib_get_vector_affinity(struct ib_device *device, int comp_vector)
3844{
3845 if (comp_vector < 0 || comp_vector >= device->num_comp_vectors ||
3846 !device->get_vector_affinity)
3847 return NULL;
3848
3849 return device->get_vector_affinity(device, comp_vector);
3850
3851}
3852
3853#endif
3854