1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39#if !defined(IB_VERBS_H)
40#define IB_VERBS_H
41
42#include <linux/types.h>
43#include <linux/device.h>
44#include <linux/mm.h>
45#include <linux/dma-mapping.h>
46#include <linux/kref.h>
47#include <linux/list.h>
48#include <linux/rwsem.h>
49#include <linux/scatterlist.h>
50#include <linux/workqueue.h>
51#include <linux/socket.h>
52#include <linux/irq_poll.h>
53#include <uapi/linux/if_ether.h>
54#include <net/ipv6.h>
55#include <net/ip.h>
56#include <linux/string.h>
57#include <linux/slab.h>
58
59#include <linux/if_link.h>
60#include <linux/atomic.h>
61#include <linux/mmu_notifier.h>
62#include <linux/uaccess.h>
63
64extern struct workqueue_struct *ib_wq;
65extern struct workqueue_struct *ib_comp_wq;
66
67union ib_gid {
68 u8 raw[16];
69 struct {
70 __be64 subnet_prefix;
71 __be64 interface_id;
72 } global;
73};
74
75extern union ib_gid zgid;
76
77enum ib_gid_type {
78
79 IB_GID_TYPE_IB = 0,
80 IB_GID_TYPE_ROCE = 0,
81 IB_GID_TYPE_ROCE_UDP_ENCAP = 1,
82 IB_GID_TYPE_SIZE
83};
84
85#define ROCE_V2_UDP_DPORT 4791
86struct ib_gid_attr {
87 enum ib_gid_type gid_type;
88 struct net_device *ndev;
89};
90
91enum rdma_node_type {
92
93 RDMA_NODE_IB_CA = 1,
94 RDMA_NODE_IB_SWITCH,
95 RDMA_NODE_IB_ROUTER,
96 RDMA_NODE_RNIC,
97 RDMA_NODE_USNIC,
98 RDMA_NODE_USNIC_UDP,
99};
100
101enum {
102
103 IB_SA_WELL_KNOWN_GUID = BIT_ULL(57) | 2,
104};
105
106enum rdma_transport_type {
107 RDMA_TRANSPORT_IB,
108 RDMA_TRANSPORT_IWARP,
109 RDMA_TRANSPORT_USNIC,
110 RDMA_TRANSPORT_USNIC_UDP
111};
112
113enum rdma_protocol_type {
114 RDMA_PROTOCOL_IB,
115 RDMA_PROTOCOL_IBOE,
116 RDMA_PROTOCOL_IWARP,
117 RDMA_PROTOCOL_USNIC_UDP
118};
119
120__attribute_const__ enum rdma_transport_type
121rdma_node_get_transport(enum rdma_node_type node_type);
122
123enum rdma_network_type {
124 RDMA_NETWORK_IB,
125 RDMA_NETWORK_ROCE_V1 = RDMA_NETWORK_IB,
126 RDMA_NETWORK_IPV4,
127 RDMA_NETWORK_IPV6
128};
129
130static inline enum ib_gid_type ib_network_to_gid_type(enum rdma_network_type network_type)
131{
132 if (network_type == RDMA_NETWORK_IPV4 ||
133 network_type == RDMA_NETWORK_IPV6)
134 return IB_GID_TYPE_ROCE_UDP_ENCAP;
135
136
137 return IB_GID_TYPE_IB;
138}
139
140static inline enum rdma_network_type ib_gid_to_network_type(enum ib_gid_type gid_type,
141 union ib_gid *gid)
142{
143 if (gid_type == IB_GID_TYPE_IB)
144 return RDMA_NETWORK_IB;
145
146 if (ipv6_addr_v4mapped((struct in6_addr *)gid))
147 return RDMA_NETWORK_IPV4;
148 else
149 return RDMA_NETWORK_IPV6;
150}
151
152enum rdma_link_layer {
153 IB_LINK_LAYER_UNSPECIFIED,
154 IB_LINK_LAYER_INFINIBAND,
155 IB_LINK_LAYER_ETHERNET,
156};
157
158enum ib_device_cap_flags {
159 IB_DEVICE_RESIZE_MAX_WR = (1 << 0),
160 IB_DEVICE_BAD_PKEY_CNTR = (1 << 1),
161 IB_DEVICE_BAD_QKEY_CNTR = (1 << 2),
162 IB_DEVICE_RAW_MULTI = (1 << 3),
163 IB_DEVICE_AUTO_PATH_MIG = (1 << 4),
164 IB_DEVICE_CHANGE_PHY_PORT = (1 << 5),
165 IB_DEVICE_UD_AV_PORT_ENFORCE = (1 << 6),
166 IB_DEVICE_CURR_QP_STATE_MOD = (1 << 7),
167 IB_DEVICE_SHUTDOWN_PORT = (1 << 8),
168 IB_DEVICE_INIT_TYPE = (1 << 9),
169 IB_DEVICE_PORT_ACTIVE_EVENT = (1 << 10),
170 IB_DEVICE_SYS_IMAGE_GUID = (1 << 11),
171 IB_DEVICE_RC_RNR_NAK_GEN = (1 << 12),
172 IB_DEVICE_SRQ_RESIZE = (1 << 13),
173 IB_DEVICE_N_NOTIFY_CQ = (1 << 14),
174
175
176
177
178
179
180
181
182 IB_DEVICE_LOCAL_DMA_LKEY = (1 << 15),
183 IB_DEVICE_RESERVED = (1 << 16),
184 IB_DEVICE_MEM_WINDOW = (1 << 17),
185
186
187
188
189
190
191
192 IB_DEVICE_UD_IP_CSUM = (1 << 18),
193 IB_DEVICE_UD_TSO = (1 << 19),
194 IB_DEVICE_XRC = (1 << 20),
195
196
197
198
199
200
201
202
203
204
205 IB_DEVICE_MEM_MGT_EXTENSIONS = (1 << 21),
206 IB_DEVICE_BLOCK_MULTICAST_LOOPBACK = (1 << 22),
207 IB_DEVICE_MEM_WINDOW_TYPE_2A = (1 << 23),
208 IB_DEVICE_MEM_WINDOW_TYPE_2B = (1 << 24),
209 IB_DEVICE_RC_IP_CSUM = (1 << 25),
210 IB_DEVICE_RAW_IP_CSUM = (1 << 26),
211
212
213
214
215
216
217 IB_DEVICE_CROSS_CHANNEL = (1 << 27),
218 IB_DEVICE_MANAGED_FLOW_STEERING = (1 << 29),
219 IB_DEVICE_SIGNATURE_HANDOVER = (1 << 30),
220 IB_DEVICE_ON_DEMAND_PAGING = (1ULL << 31),
221 IB_DEVICE_SG_GAPS_REG = (1ULL << 32),
222 IB_DEVICE_VIRTUAL_FUNCTION = (1ULL << 33),
223 IB_DEVICE_RAW_SCATTER_FCS = (1ULL << 34),
224};
225
226enum ib_signature_prot_cap {
227 IB_PROT_T10DIF_TYPE_1 = 1,
228 IB_PROT_T10DIF_TYPE_2 = 1 << 1,
229 IB_PROT_T10DIF_TYPE_3 = 1 << 2,
230};
231
232enum ib_signature_guard_cap {
233 IB_GUARD_T10DIF_CRC = 1,
234 IB_GUARD_T10DIF_CSUM = 1 << 1,
235};
236
237enum ib_atomic_cap {
238 IB_ATOMIC_NONE,
239 IB_ATOMIC_HCA,
240 IB_ATOMIC_GLOB
241};
242
243enum ib_odp_general_cap_bits {
244 IB_ODP_SUPPORT = 1 << 0,
245};
246
247enum ib_odp_transport_cap_bits {
248 IB_ODP_SUPPORT_SEND = 1 << 0,
249 IB_ODP_SUPPORT_RECV = 1 << 1,
250 IB_ODP_SUPPORT_WRITE = 1 << 2,
251 IB_ODP_SUPPORT_READ = 1 << 3,
252 IB_ODP_SUPPORT_ATOMIC = 1 << 4,
253};
254
255struct ib_odp_caps {
256 uint64_t general_caps;
257 struct {
258 uint32_t rc_odp_caps;
259 uint32_t uc_odp_caps;
260 uint32_t ud_odp_caps;
261 } per_transport_caps;
262};
263
264struct ib_rss_caps {
265
266
267
268
269 u32 supported_qpts;
270 u32 max_rwq_indirection_tables;
271 u32 max_rwq_indirection_table_size;
272};
273
274enum ib_cq_creation_flags {
275 IB_CQ_FLAGS_TIMESTAMP_COMPLETION = 1 << 0,
276 IB_CQ_FLAGS_IGNORE_OVERRUN = 1 << 1,
277};
278
279struct ib_cq_init_attr {
280 unsigned int cqe;
281 int comp_vector;
282 u32 flags;
283};
284
285struct ib_device_attr {
286 u64 fw_ver;
287 __be64 sys_image_guid;
288 u64 max_mr_size;
289 u64 page_size_cap;
290 u32 vendor_id;
291 u32 vendor_part_id;
292 u32 hw_ver;
293 int max_qp;
294 int max_qp_wr;
295 u64 device_cap_flags;
296 int max_sge;
297 int max_sge_rd;
298 int max_cq;
299 int max_cqe;
300 int max_mr;
301 int max_pd;
302 int max_qp_rd_atom;
303 int max_ee_rd_atom;
304 int max_res_rd_atom;
305 int max_qp_init_rd_atom;
306 int max_ee_init_rd_atom;
307 enum ib_atomic_cap atomic_cap;
308 enum ib_atomic_cap masked_atomic_cap;
309 int max_ee;
310 int max_rdd;
311 int max_mw;
312 int max_raw_ipv6_qp;
313 int max_raw_ethy_qp;
314 int max_mcast_grp;
315 int max_mcast_qp_attach;
316 int max_total_mcast_qp_attach;
317 int max_ah;
318 int max_fmr;
319 int max_map_per_fmr;
320 int max_srq;
321 int max_srq_wr;
322 int max_srq_sge;
323 unsigned int max_fast_reg_page_list_len;
324 u16 max_pkeys;
325 u8 local_ca_ack_delay;
326 int sig_prot_cap;
327 int sig_guard_cap;
328 struct ib_odp_caps odp_caps;
329 uint64_t timestamp_mask;
330 uint64_t hca_core_clock;
331 struct ib_rss_caps rss_caps;
332 u32 max_wq_type_rq;
333};
334
335enum ib_mtu {
336 IB_MTU_256 = 1,
337 IB_MTU_512 = 2,
338 IB_MTU_1024 = 3,
339 IB_MTU_2048 = 4,
340 IB_MTU_4096 = 5
341};
342
343static inline int ib_mtu_enum_to_int(enum ib_mtu mtu)
344{
345 switch (mtu) {
346 case IB_MTU_256: return 256;
347 case IB_MTU_512: return 512;
348 case IB_MTU_1024: return 1024;
349 case IB_MTU_2048: return 2048;
350 case IB_MTU_4096: return 4096;
351 default: return -1;
352 }
353}
354
355static inline enum ib_mtu ib_mtu_int_to_enum(int mtu)
356{
357 if (mtu >= 4096)
358 return IB_MTU_4096;
359 else if (mtu >= 2048)
360 return IB_MTU_2048;
361 else if (mtu >= 1024)
362 return IB_MTU_1024;
363 else if (mtu >= 512)
364 return IB_MTU_512;
365 else
366 return IB_MTU_256;
367}
368
369enum ib_port_state {
370 IB_PORT_NOP = 0,
371 IB_PORT_DOWN = 1,
372 IB_PORT_INIT = 2,
373 IB_PORT_ARMED = 3,
374 IB_PORT_ACTIVE = 4,
375 IB_PORT_ACTIVE_DEFER = 5
376};
377
378enum ib_port_cap_flags {
379 IB_PORT_SM = 1 << 1,
380 IB_PORT_NOTICE_SUP = 1 << 2,
381 IB_PORT_TRAP_SUP = 1 << 3,
382 IB_PORT_OPT_IPD_SUP = 1 << 4,
383 IB_PORT_AUTO_MIGR_SUP = 1 << 5,
384 IB_PORT_SL_MAP_SUP = 1 << 6,
385 IB_PORT_MKEY_NVRAM = 1 << 7,
386 IB_PORT_PKEY_NVRAM = 1 << 8,
387 IB_PORT_LED_INFO_SUP = 1 << 9,
388 IB_PORT_SM_DISABLED = 1 << 10,
389 IB_PORT_SYS_IMAGE_GUID_SUP = 1 << 11,
390 IB_PORT_PKEY_SW_EXT_PORT_TRAP_SUP = 1 << 12,
391 IB_PORT_EXTENDED_SPEEDS_SUP = 1 << 14,
392 IB_PORT_CM_SUP = 1 << 16,
393 IB_PORT_SNMP_TUNNEL_SUP = 1 << 17,
394 IB_PORT_REINIT_SUP = 1 << 18,
395 IB_PORT_DEVICE_MGMT_SUP = 1 << 19,
396 IB_PORT_VENDOR_CLASS_SUP = 1 << 20,
397 IB_PORT_DR_NOTICE_SUP = 1 << 21,
398 IB_PORT_CAP_MASK_NOTICE_SUP = 1 << 22,
399 IB_PORT_BOOT_MGMT_SUP = 1 << 23,
400 IB_PORT_LINK_LATENCY_SUP = 1 << 24,
401 IB_PORT_CLIENT_REG_SUP = 1 << 25,
402 IB_PORT_IP_BASED_GIDS = 1 << 26,
403};
404
405enum ib_port_width {
406 IB_WIDTH_1X = 1,
407 IB_WIDTH_4X = 2,
408 IB_WIDTH_8X = 4,
409 IB_WIDTH_12X = 8
410};
411
412static inline int ib_width_enum_to_int(enum ib_port_width width)
413{
414 switch (width) {
415 case IB_WIDTH_1X: return 1;
416 case IB_WIDTH_4X: return 4;
417 case IB_WIDTH_8X: return 8;
418 case IB_WIDTH_12X: return 12;
419 default: return -1;
420 }
421}
422
423enum ib_port_speed {
424 IB_SPEED_SDR = 1,
425 IB_SPEED_DDR = 2,
426 IB_SPEED_QDR = 4,
427 IB_SPEED_FDR10 = 8,
428 IB_SPEED_FDR = 16,
429 IB_SPEED_EDR = 32
430};
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448struct rdma_hw_stats {
449 unsigned long timestamp;
450 unsigned long lifespan;
451 const char * const *names;
452 int num_counters;
453 u64 value[];
454};
455
456#define RDMA_HW_STATS_DEFAULT_LIFESPAN 10
457
458
459
460
461
462
463
464static inline struct rdma_hw_stats *rdma_alloc_hw_stats_struct(
465 const char * const *names, int num_counters,
466 unsigned long lifespan)
467{
468 struct rdma_hw_stats *stats;
469
470 stats = kzalloc(sizeof(*stats) + num_counters * sizeof(u64),
471 GFP_KERNEL);
472 if (!stats)
473 return NULL;
474 stats->names = names;
475 stats->num_counters = num_counters;
476 stats->lifespan = msecs_to_jiffies(lifespan);
477
478 return stats;
479}
480
481
482
483
484
485
486#define RDMA_CORE_CAP_IB_MAD 0x00000001
487#define RDMA_CORE_CAP_IB_SMI 0x00000002
488#define RDMA_CORE_CAP_IB_CM 0x00000004
489#define RDMA_CORE_CAP_IW_CM 0x00000008
490#define RDMA_CORE_CAP_IB_SA 0x00000010
491#define RDMA_CORE_CAP_OPA_MAD 0x00000020
492
493
494#define RDMA_CORE_CAP_AF_IB 0x00001000
495#define RDMA_CORE_CAP_ETH_AH 0x00002000
496
497
498#define RDMA_CORE_CAP_PROT_IB 0x00100000
499#define RDMA_CORE_CAP_PROT_ROCE 0x00200000
500#define RDMA_CORE_CAP_PROT_IWARP 0x00400000
501#define RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP 0x00800000
502
503#define RDMA_CORE_PORT_IBA_IB (RDMA_CORE_CAP_PROT_IB \
504 | RDMA_CORE_CAP_IB_MAD \
505 | RDMA_CORE_CAP_IB_SMI \
506 | RDMA_CORE_CAP_IB_CM \
507 | RDMA_CORE_CAP_IB_SA \
508 | RDMA_CORE_CAP_AF_IB)
509#define RDMA_CORE_PORT_IBA_ROCE (RDMA_CORE_CAP_PROT_ROCE \
510 | RDMA_CORE_CAP_IB_MAD \
511 | RDMA_CORE_CAP_IB_CM \
512 | RDMA_CORE_CAP_AF_IB \
513 | RDMA_CORE_CAP_ETH_AH)
514#define RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP \
515 (RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP \
516 | RDMA_CORE_CAP_IB_MAD \
517 | RDMA_CORE_CAP_IB_CM \
518 | RDMA_CORE_CAP_AF_IB \
519 | RDMA_CORE_CAP_ETH_AH)
520#define RDMA_CORE_PORT_IWARP (RDMA_CORE_CAP_PROT_IWARP \
521 | RDMA_CORE_CAP_IW_CM)
522#define RDMA_CORE_PORT_INTEL_OPA (RDMA_CORE_PORT_IBA_IB \
523 | RDMA_CORE_CAP_OPA_MAD)
524
525struct ib_port_attr {
526 u64 subnet_prefix;
527 enum ib_port_state state;
528 enum ib_mtu max_mtu;
529 enum ib_mtu active_mtu;
530 int gid_tbl_len;
531 u32 port_cap_flags;
532 u32 max_msg_sz;
533 u32 bad_pkey_cntr;
534 u32 qkey_viol_cntr;
535 u16 pkey_tbl_len;
536 u16 lid;
537 u16 sm_lid;
538 u8 lmc;
539 u8 max_vl_num;
540 u8 sm_sl;
541 u8 subnet_timeout;
542 u8 init_type_reply;
543 u8 active_width;
544 u8 active_speed;
545 u8 phys_state;
546 bool grh_required;
547};
548
549enum ib_device_modify_flags {
550 IB_DEVICE_MODIFY_SYS_IMAGE_GUID = 1 << 0,
551 IB_DEVICE_MODIFY_NODE_DESC = 1 << 1
552};
553
554#define IB_DEVICE_NODE_DESC_MAX 64
555
556struct ib_device_modify {
557 u64 sys_image_guid;
558 char node_desc[IB_DEVICE_NODE_DESC_MAX];
559};
560
561enum ib_port_modify_flags {
562 IB_PORT_SHUTDOWN = 1,
563 IB_PORT_INIT_TYPE = (1<<2),
564 IB_PORT_RESET_QKEY_CNTR = (1<<3)
565};
566
567struct ib_port_modify {
568 u32 set_port_cap_mask;
569 u32 clr_port_cap_mask;
570 u8 init_type;
571};
572
573enum ib_event_type {
574 IB_EVENT_CQ_ERR,
575 IB_EVENT_QP_FATAL,
576 IB_EVENT_QP_REQ_ERR,
577 IB_EVENT_QP_ACCESS_ERR,
578 IB_EVENT_COMM_EST,
579 IB_EVENT_SQ_DRAINED,
580 IB_EVENT_PATH_MIG,
581 IB_EVENT_PATH_MIG_ERR,
582 IB_EVENT_DEVICE_FATAL,
583 IB_EVENT_PORT_ACTIVE,
584 IB_EVENT_PORT_ERR,
585 IB_EVENT_LID_CHANGE,
586 IB_EVENT_PKEY_CHANGE,
587 IB_EVENT_SM_CHANGE,
588 IB_EVENT_SRQ_ERR,
589 IB_EVENT_SRQ_LIMIT_REACHED,
590 IB_EVENT_QP_LAST_WQE_REACHED,
591 IB_EVENT_CLIENT_REREGISTER,
592 IB_EVENT_GID_CHANGE,
593 IB_EVENT_WQ_FATAL,
594};
595
596const char *__attribute_const__ ib_event_msg(enum ib_event_type event);
597
598struct ib_event {
599 struct ib_device *device;
600 union {
601 struct ib_cq *cq;
602 struct ib_qp *qp;
603 struct ib_srq *srq;
604 struct ib_wq *wq;
605 u8 port_num;
606 } element;
607 enum ib_event_type event;
608};
609
610struct ib_event_handler {
611 struct ib_device *device;
612 void (*handler)(struct ib_event_handler *, struct ib_event *);
613 struct list_head list;
614};
615
616#define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler) \
617 do { \
618 (_ptr)->device = _device; \
619 (_ptr)->handler = _handler; \
620 INIT_LIST_HEAD(&(_ptr)->list); \
621 } while (0)
622
623struct ib_global_route {
624 union ib_gid dgid;
625 u32 flow_label;
626 u8 sgid_index;
627 u8 hop_limit;
628 u8 traffic_class;
629};
630
631struct ib_grh {
632 __be32 version_tclass_flow;
633 __be16 paylen;
634 u8 next_hdr;
635 u8 hop_limit;
636 union ib_gid sgid;
637 union ib_gid dgid;
638};
639
640union rdma_network_hdr {
641 struct ib_grh ibgrh;
642 struct {
643
644
645
646 u8 reserved[20];
647 struct iphdr roce4grh;
648 };
649};
650
651enum {
652 IB_MULTICAST_QPN = 0xffffff
653};
654
655#define IB_LID_PERMISSIVE cpu_to_be16(0xFFFF)
656#define IB_MULTICAST_LID_BASE cpu_to_be16(0xC000)
657
658enum ib_ah_flags {
659 IB_AH_GRH = 1
660};
661
662enum ib_rate {
663 IB_RATE_PORT_CURRENT = 0,
664 IB_RATE_2_5_GBPS = 2,
665 IB_RATE_5_GBPS = 5,
666 IB_RATE_10_GBPS = 3,
667 IB_RATE_20_GBPS = 6,
668 IB_RATE_30_GBPS = 4,
669 IB_RATE_40_GBPS = 7,
670 IB_RATE_60_GBPS = 8,
671 IB_RATE_80_GBPS = 9,
672 IB_RATE_120_GBPS = 10,
673 IB_RATE_14_GBPS = 11,
674 IB_RATE_56_GBPS = 12,
675 IB_RATE_112_GBPS = 13,
676 IB_RATE_168_GBPS = 14,
677 IB_RATE_25_GBPS = 15,
678 IB_RATE_100_GBPS = 16,
679 IB_RATE_200_GBPS = 17,
680 IB_RATE_300_GBPS = 18
681};
682
683
684
685
686
687
688
689__attribute_const__ int ib_rate_to_mult(enum ib_rate rate);
690
691
692
693
694
695
696__attribute_const__ int ib_rate_to_mbps(enum ib_rate rate);
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711enum ib_mr_type {
712 IB_MR_TYPE_MEM_REG,
713 IB_MR_TYPE_SIGNATURE,
714 IB_MR_TYPE_SG_GAPS,
715};
716
717
718
719
720
721
722enum ib_signature_type {
723 IB_SIG_TYPE_NONE,
724 IB_SIG_TYPE_T10_DIF,
725};
726
727
728
729
730
731
732enum ib_t10_dif_bg_type {
733 IB_T10DIF_CRC,
734 IB_T10DIF_CSUM
735};
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750struct ib_t10_dif_domain {
751 enum ib_t10_dif_bg_type bg_type;
752 u16 pi_interval;
753 u16 bg;
754 u16 app_tag;
755 u32 ref_tag;
756 bool ref_remap;
757 bool app_escape;
758 bool ref_escape;
759 u16 apptag_check_mask;
760};
761
762
763
764
765
766
767
768struct ib_sig_domain {
769 enum ib_signature_type sig_type;
770 union {
771 struct ib_t10_dif_domain dif;
772 } sig;
773};
774
775
776
777
778
779
780
781struct ib_sig_attrs {
782 u8 check_mask;
783 struct ib_sig_domain mem;
784 struct ib_sig_domain wire;
785};
786
787enum ib_sig_err_type {
788 IB_SIG_BAD_GUARD,
789 IB_SIG_BAD_REFTAG,
790 IB_SIG_BAD_APPTAG,
791};
792
793
794
795
796struct ib_sig_err {
797 enum ib_sig_err_type err_type;
798 u32 expected;
799 u32 actual;
800 u64 sig_err_offset;
801 u32 key;
802};
803
804enum ib_mr_status_check {
805 IB_MR_CHECK_SIG_STATUS = 1,
806};
807
808
809
810
811
812
813
814
815
816struct ib_mr_status {
817 u32 fail_status;
818 struct ib_sig_err sig_err;
819};
820
821
822
823
824
825
826__attribute_const__ enum ib_rate mult_to_ib_rate(int mult);
827
828struct ib_ah_attr {
829 struct ib_global_route grh;
830 u16 dlid;
831 u8 sl;
832 u8 src_path_bits;
833 u8 static_rate;
834 u8 ah_flags;
835 u8 port_num;
836 u8 dmac[ETH_ALEN];
837};
838
839enum ib_wc_status {
840 IB_WC_SUCCESS,
841 IB_WC_LOC_LEN_ERR,
842 IB_WC_LOC_QP_OP_ERR,
843 IB_WC_LOC_EEC_OP_ERR,
844 IB_WC_LOC_PROT_ERR,
845 IB_WC_WR_FLUSH_ERR,
846 IB_WC_MW_BIND_ERR,
847 IB_WC_BAD_RESP_ERR,
848 IB_WC_LOC_ACCESS_ERR,
849 IB_WC_REM_INV_REQ_ERR,
850 IB_WC_REM_ACCESS_ERR,
851 IB_WC_REM_OP_ERR,
852 IB_WC_RETRY_EXC_ERR,
853 IB_WC_RNR_RETRY_EXC_ERR,
854 IB_WC_LOC_RDD_VIOL_ERR,
855 IB_WC_REM_INV_RD_REQ_ERR,
856 IB_WC_REM_ABORT_ERR,
857 IB_WC_INV_EECN_ERR,
858 IB_WC_INV_EEC_STATE_ERR,
859 IB_WC_FATAL_ERR,
860 IB_WC_RESP_TIMEOUT_ERR,
861 IB_WC_GENERAL_ERR
862};
863
864const char *__attribute_const__ ib_wc_status_msg(enum ib_wc_status status);
865
866enum ib_wc_opcode {
867 IB_WC_SEND,
868 IB_WC_RDMA_WRITE,
869 IB_WC_RDMA_READ,
870 IB_WC_COMP_SWAP,
871 IB_WC_FETCH_ADD,
872 IB_WC_LSO,
873 IB_WC_LOCAL_INV,
874 IB_WC_REG_MR,
875 IB_WC_MASKED_COMP_SWAP,
876 IB_WC_MASKED_FETCH_ADD,
877
878
879
880
881 IB_WC_RECV = 1 << 7,
882 IB_WC_RECV_RDMA_WITH_IMM
883};
884
885enum ib_wc_flags {
886 IB_WC_GRH = 1,
887 IB_WC_WITH_IMM = (1<<1),
888 IB_WC_WITH_INVALIDATE = (1<<2),
889 IB_WC_IP_CSUM_OK = (1<<3),
890 IB_WC_WITH_SMAC = (1<<4),
891 IB_WC_WITH_VLAN = (1<<5),
892 IB_WC_WITH_NETWORK_HDR_TYPE = (1<<6),
893};
894
895struct ib_wc {
896 union {
897 u64 wr_id;
898 struct ib_cqe *wr_cqe;
899 };
900 enum ib_wc_status status;
901 enum ib_wc_opcode opcode;
902 u32 vendor_err;
903 u32 byte_len;
904 struct ib_qp *qp;
905 union {
906 __be32 imm_data;
907 u32 invalidate_rkey;
908 } ex;
909 u32 src_qp;
910 int wc_flags;
911 u16 pkey_index;
912 u16 slid;
913 u8 sl;
914 u8 dlid_path_bits;
915 u8 port_num;
916 u8 smac[ETH_ALEN];
917 u16 vlan_id;
918 u8 network_hdr_type;
919};
920
921enum ib_cq_notify_flags {
922 IB_CQ_SOLICITED = 1 << 0,
923 IB_CQ_NEXT_COMP = 1 << 1,
924 IB_CQ_SOLICITED_MASK = IB_CQ_SOLICITED | IB_CQ_NEXT_COMP,
925 IB_CQ_REPORT_MISSED_EVENTS = 1 << 2,
926};
927
928enum ib_srq_type {
929 IB_SRQT_BASIC,
930 IB_SRQT_XRC
931};
932
933enum ib_srq_attr_mask {
934 IB_SRQ_MAX_WR = 1 << 0,
935 IB_SRQ_LIMIT = 1 << 1,
936};
937
938struct ib_srq_attr {
939 u32 max_wr;
940 u32 max_sge;
941 u32 srq_limit;
942};
943
944struct ib_srq_init_attr {
945 void (*event_handler)(struct ib_event *, void *);
946 void *srq_context;
947 struct ib_srq_attr attr;
948 enum ib_srq_type srq_type;
949
950 union {
951 struct {
952 struct ib_xrcd *xrcd;
953 struct ib_cq *cq;
954 } xrc;
955 } ext;
956};
957
958struct ib_qp_cap {
959 u32 max_send_wr;
960 u32 max_recv_wr;
961 u32 max_send_sge;
962 u32 max_recv_sge;
963 u32 max_inline_data;
964
965
966
967
968
969
970 u32 max_rdma_ctxs;
971};
972
973enum ib_sig_type {
974 IB_SIGNAL_ALL_WR,
975 IB_SIGNAL_REQ_WR
976};
977
978enum ib_qp_type {
979
980
981
982
983
984 IB_QPT_SMI,
985 IB_QPT_GSI,
986
987 IB_QPT_RC,
988 IB_QPT_UC,
989 IB_QPT_UD,
990 IB_QPT_RAW_IPV6,
991 IB_QPT_RAW_ETHERTYPE,
992 IB_QPT_RAW_PACKET = 8,
993 IB_QPT_XRC_INI = 9,
994 IB_QPT_XRC_TGT,
995 IB_QPT_MAX,
996
997
998
999
1000 IB_QPT_RESERVED1 = 0x1000,
1001 IB_QPT_RESERVED2,
1002 IB_QPT_RESERVED3,
1003 IB_QPT_RESERVED4,
1004 IB_QPT_RESERVED5,
1005 IB_QPT_RESERVED6,
1006 IB_QPT_RESERVED7,
1007 IB_QPT_RESERVED8,
1008 IB_QPT_RESERVED9,
1009 IB_QPT_RESERVED10,
1010};
1011
1012enum ib_qp_create_flags {
1013 IB_QP_CREATE_IPOIB_UD_LSO = 1 << 0,
1014 IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK = 1 << 1,
1015 IB_QP_CREATE_CROSS_CHANNEL = 1 << 2,
1016 IB_QP_CREATE_MANAGED_SEND = 1 << 3,
1017 IB_QP_CREATE_MANAGED_RECV = 1 << 4,
1018 IB_QP_CREATE_NETIF_QP = 1 << 5,
1019 IB_QP_CREATE_SIGNATURE_EN = 1 << 6,
1020 IB_QP_CREATE_USE_GFP_NOIO = 1 << 7,
1021 IB_QP_CREATE_SCATTER_FCS = 1 << 8,
1022
1023 IB_QP_CREATE_RESERVED_START = 1 << 26,
1024 IB_QP_CREATE_RESERVED_END = 1 << 31,
1025};
1026
1027
1028
1029
1030
1031
1032struct ib_qp_init_attr {
1033 void (*event_handler)(struct ib_event *, void *);
1034 void *qp_context;
1035 struct ib_cq *send_cq;
1036 struct ib_cq *recv_cq;
1037 struct ib_srq *srq;
1038 struct ib_xrcd *xrcd;
1039 struct ib_qp_cap cap;
1040 enum ib_sig_type sq_sig_type;
1041 enum ib_qp_type qp_type;
1042 enum ib_qp_create_flags create_flags;
1043
1044
1045
1046
1047 u8 port_num;
1048 struct ib_rwq_ind_table *rwq_ind_tbl;
1049};
1050
1051struct ib_qp_open_attr {
1052 void (*event_handler)(struct ib_event *, void *);
1053 void *qp_context;
1054 u32 qp_num;
1055 enum ib_qp_type qp_type;
1056};
1057
1058enum ib_rnr_timeout {
1059 IB_RNR_TIMER_655_36 = 0,
1060 IB_RNR_TIMER_000_01 = 1,
1061 IB_RNR_TIMER_000_02 = 2,
1062 IB_RNR_TIMER_000_03 = 3,
1063 IB_RNR_TIMER_000_04 = 4,
1064 IB_RNR_TIMER_000_06 = 5,
1065 IB_RNR_TIMER_000_08 = 6,
1066 IB_RNR_TIMER_000_12 = 7,
1067 IB_RNR_TIMER_000_16 = 8,
1068 IB_RNR_TIMER_000_24 = 9,
1069 IB_RNR_TIMER_000_32 = 10,
1070 IB_RNR_TIMER_000_48 = 11,
1071 IB_RNR_TIMER_000_64 = 12,
1072 IB_RNR_TIMER_000_96 = 13,
1073 IB_RNR_TIMER_001_28 = 14,
1074 IB_RNR_TIMER_001_92 = 15,
1075 IB_RNR_TIMER_002_56 = 16,
1076 IB_RNR_TIMER_003_84 = 17,
1077 IB_RNR_TIMER_005_12 = 18,
1078 IB_RNR_TIMER_007_68 = 19,
1079 IB_RNR_TIMER_010_24 = 20,
1080 IB_RNR_TIMER_015_36 = 21,
1081 IB_RNR_TIMER_020_48 = 22,
1082 IB_RNR_TIMER_030_72 = 23,
1083 IB_RNR_TIMER_040_96 = 24,
1084 IB_RNR_TIMER_061_44 = 25,
1085 IB_RNR_TIMER_081_92 = 26,
1086 IB_RNR_TIMER_122_88 = 27,
1087 IB_RNR_TIMER_163_84 = 28,
1088 IB_RNR_TIMER_245_76 = 29,
1089 IB_RNR_TIMER_327_68 = 30,
1090 IB_RNR_TIMER_491_52 = 31
1091};
1092
1093enum ib_qp_attr_mask {
1094 IB_QP_STATE = 1,
1095 IB_QP_CUR_STATE = (1<<1),
1096 IB_QP_EN_SQD_ASYNC_NOTIFY = (1<<2),
1097 IB_QP_ACCESS_FLAGS = (1<<3),
1098 IB_QP_PKEY_INDEX = (1<<4),
1099 IB_QP_PORT = (1<<5),
1100 IB_QP_QKEY = (1<<6),
1101 IB_QP_AV = (1<<7),
1102 IB_QP_PATH_MTU = (1<<8),
1103 IB_QP_TIMEOUT = (1<<9),
1104 IB_QP_RETRY_CNT = (1<<10),
1105 IB_QP_RNR_RETRY = (1<<11),
1106 IB_QP_RQ_PSN = (1<<12),
1107 IB_QP_MAX_QP_RD_ATOMIC = (1<<13),
1108 IB_QP_ALT_PATH = (1<<14),
1109 IB_QP_MIN_RNR_TIMER = (1<<15),
1110 IB_QP_SQ_PSN = (1<<16),
1111 IB_QP_MAX_DEST_RD_ATOMIC = (1<<17),
1112 IB_QP_PATH_MIG_STATE = (1<<18),
1113 IB_QP_CAP = (1<<19),
1114 IB_QP_DEST_QPN = (1<<20),
1115 IB_QP_RESERVED1 = (1<<21),
1116 IB_QP_RESERVED2 = (1<<22),
1117 IB_QP_RESERVED3 = (1<<23),
1118 IB_QP_RESERVED4 = (1<<24),
1119 IB_QP_RATE_LIMIT = (1<<25),
1120};
1121
1122enum ib_qp_state {
1123 IB_QPS_RESET,
1124 IB_QPS_INIT,
1125 IB_QPS_RTR,
1126 IB_QPS_RTS,
1127 IB_QPS_SQD,
1128 IB_QPS_SQE,
1129 IB_QPS_ERR
1130};
1131
1132enum ib_mig_state {
1133 IB_MIG_MIGRATED,
1134 IB_MIG_REARM,
1135 IB_MIG_ARMED
1136};
1137
1138enum ib_mw_type {
1139 IB_MW_TYPE_1 = 1,
1140 IB_MW_TYPE_2 = 2
1141};
1142
1143struct ib_qp_attr {
1144 enum ib_qp_state qp_state;
1145 enum ib_qp_state cur_qp_state;
1146 enum ib_mtu path_mtu;
1147 enum ib_mig_state path_mig_state;
1148 u32 qkey;
1149 u32 rq_psn;
1150 u32 sq_psn;
1151 u32 dest_qp_num;
1152 int qp_access_flags;
1153 struct ib_qp_cap cap;
1154 struct ib_ah_attr ah_attr;
1155 struct ib_ah_attr alt_ah_attr;
1156 u16 pkey_index;
1157 u16 alt_pkey_index;
1158 u8 en_sqd_async_notify;
1159 u8 sq_draining;
1160 u8 max_rd_atomic;
1161 u8 max_dest_rd_atomic;
1162 u8 min_rnr_timer;
1163 u8 port_num;
1164 u8 timeout;
1165 u8 retry_cnt;
1166 u8 rnr_retry;
1167 u8 alt_port_num;
1168 u8 alt_timeout;
1169 u32 rate_limit;
1170};
1171
1172enum ib_wr_opcode {
1173 IB_WR_RDMA_WRITE,
1174 IB_WR_RDMA_WRITE_WITH_IMM,
1175 IB_WR_SEND,
1176 IB_WR_SEND_WITH_IMM,
1177 IB_WR_RDMA_READ,
1178 IB_WR_ATOMIC_CMP_AND_SWP,
1179 IB_WR_ATOMIC_FETCH_AND_ADD,
1180 IB_WR_LSO,
1181 IB_WR_SEND_WITH_INV,
1182 IB_WR_RDMA_READ_WITH_INV,
1183 IB_WR_LOCAL_INV,
1184 IB_WR_REG_MR,
1185 IB_WR_MASKED_ATOMIC_CMP_AND_SWP,
1186 IB_WR_MASKED_ATOMIC_FETCH_AND_ADD,
1187 IB_WR_REG_SIG_MR,
1188
1189
1190
1191 IB_WR_RESERVED1 = 0xf0,
1192 IB_WR_RESERVED2,
1193 IB_WR_RESERVED3,
1194 IB_WR_RESERVED4,
1195 IB_WR_RESERVED5,
1196 IB_WR_RESERVED6,
1197 IB_WR_RESERVED7,
1198 IB_WR_RESERVED8,
1199 IB_WR_RESERVED9,
1200 IB_WR_RESERVED10,
1201};
1202
1203enum ib_send_flags {
1204 IB_SEND_FENCE = 1,
1205 IB_SEND_SIGNALED = (1<<1),
1206 IB_SEND_SOLICITED = (1<<2),
1207 IB_SEND_INLINE = (1<<3),
1208 IB_SEND_IP_CSUM = (1<<4),
1209
1210
1211 IB_SEND_RESERVED_START = (1 << 26),
1212 IB_SEND_RESERVED_END = (1 << 31),
1213};
1214
1215struct ib_sge {
1216 u64 addr;
1217 u32 length;
1218 u32 lkey;
1219};
1220
1221struct ib_cqe {
1222 void (*done)(struct ib_cq *cq, struct ib_wc *wc);
1223};
1224
1225struct ib_send_wr {
1226 struct ib_send_wr *next;
1227 union {
1228 u64 wr_id;
1229 struct ib_cqe *wr_cqe;
1230 };
1231 struct ib_sge *sg_list;
1232 int num_sge;
1233 enum ib_wr_opcode opcode;
1234 int send_flags;
1235 union {
1236 __be32 imm_data;
1237 u32 invalidate_rkey;
1238 } ex;
1239};
1240
1241struct ib_rdma_wr {
1242 struct ib_send_wr wr;
1243 u64 remote_addr;
1244 u32 rkey;
1245};
1246
1247static inline struct ib_rdma_wr *rdma_wr(struct ib_send_wr *wr)
1248{
1249 return container_of(wr, struct ib_rdma_wr, wr);
1250}
1251
1252struct ib_atomic_wr {
1253 struct ib_send_wr wr;
1254 u64 remote_addr;
1255 u64 compare_add;
1256 u64 swap;
1257 u64 compare_add_mask;
1258 u64 swap_mask;
1259 u32 rkey;
1260};
1261
1262static inline struct ib_atomic_wr *atomic_wr(struct ib_send_wr *wr)
1263{
1264 return container_of(wr, struct ib_atomic_wr, wr);
1265}
1266
1267struct ib_ud_wr {
1268 struct ib_send_wr wr;
1269 struct ib_ah *ah;
1270 void *header;
1271 int hlen;
1272 int mss;
1273 u32 remote_qpn;
1274 u32 remote_qkey;
1275 u16 pkey_index;
1276 u8 port_num;
1277};
1278
1279static inline struct ib_ud_wr *ud_wr(struct ib_send_wr *wr)
1280{
1281 return container_of(wr, struct ib_ud_wr, wr);
1282}
1283
1284struct ib_reg_wr {
1285 struct ib_send_wr wr;
1286 struct ib_mr *mr;
1287 u32 key;
1288 int access;
1289};
1290
1291static inline struct ib_reg_wr *reg_wr(struct ib_send_wr *wr)
1292{
1293 return container_of(wr, struct ib_reg_wr, wr);
1294}
1295
1296struct ib_sig_handover_wr {
1297 struct ib_send_wr wr;
1298 struct ib_sig_attrs *sig_attrs;
1299 struct ib_mr *sig_mr;
1300 int access_flags;
1301 struct ib_sge *prot;
1302};
1303
1304static inline struct ib_sig_handover_wr *sig_handover_wr(struct ib_send_wr *wr)
1305{
1306 return container_of(wr, struct ib_sig_handover_wr, wr);
1307}
1308
1309struct ib_recv_wr {
1310 struct ib_recv_wr *next;
1311 union {
1312 u64 wr_id;
1313 struct ib_cqe *wr_cqe;
1314 };
1315 struct ib_sge *sg_list;
1316 int num_sge;
1317};
1318
1319enum ib_access_flags {
1320 IB_ACCESS_LOCAL_WRITE = 1,
1321 IB_ACCESS_REMOTE_WRITE = (1<<1),
1322 IB_ACCESS_REMOTE_READ = (1<<2),
1323 IB_ACCESS_REMOTE_ATOMIC = (1<<3),
1324 IB_ACCESS_MW_BIND = (1<<4),
1325 IB_ZERO_BASED = (1<<5),
1326 IB_ACCESS_ON_DEMAND = (1<<6),
1327};
1328
1329
1330
1331
1332
1333enum ib_mr_rereg_flags {
1334 IB_MR_REREG_TRANS = 1,
1335 IB_MR_REREG_PD = (1<<1),
1336 IB_MR_REREG_ACCESS = (1<<2),
1337 IB_MR_REREG_SUPPORTED = ((IB_MR_REREG_ACCESS << 1) - 1)
1338};
1339
1340struct ib_fmr_attr {
1341 int max_pages;
1342 int max_maps;
1343 u8 page_shift;
1344};
1345
1346struct ib_umem;
1347
1348struct ib_ucontext {
1349 struct ib_device *device;
1350 struct list_head pd_list;
1351 struct list_head mr_list;
1352 struct list_head mw_list;
1353 struct list_head cq_list;
1354 struct list_head qp_list;
1355 struct list_head srq_list;
1356 struct list_head ah_list;
1357 struct list_head xrcd_list;
1358 struct list_head rule_list;
1359 struct list_head wq_list;
1360 struct list_head rwq_ind_tbl_list;
1361 int closing;
1362
1363 struct pid *tgid;
1364#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1365 struct rb_root umem_tree;
1366
1367
1368
1369
1370 struct rw_semaphore umem_rwsem;
1371 void (*invalidate_range)(struct ib_umem *umem,
1372 unsigned long start, unsigned long end);
1373
1374 struct mmu_notifier mn;
1375 atomic_t notifier_count;
1376
1377 struct list_head no_private_counters;
1378 int odp_mrs_count;
1379#endif
1380};
1381
1382struct ib_uobject {
1383 u64 user_handle;
1384 struct ib_ucontext *context;
1385 void *object;
1386 struct list_head list;
1387 int id;
1388 struct kref ref;
1389 struct rw_semaphore mutex;
1390 struct rcu_head rcu;
1391 int live;
1392};
1393
1394struct ib_udata {
1395 const void __user *inbuf;
1396 void __user *outbuf;
1397 size_t inlen;
1398 size_t outlen;
1399};
1400
1401struct ib_pd {
1402 u32 local_dma_lkey;
1403 u32 flags;
1404 struct ib_device *device;
1405 struct ib_uobject *uobject;
1406 atomic_t usecnt;
1407
1408 u32 unsafe_global_rkey;
1409
1410
1411
1412
1413 struct ib_mr *__internal_mr;
1414};
1415
1416struct ib_xrcd {
1417 struct ib_device *device;
1418 atomic_t usecnt;
1419 struct inode *inode;
1420
1421 struct mutex tgt_qp_mutex;
1422 struct list_head tgt_qp_list;
1423};
1424
1425struct ib_ah {
1426 struct ib_device *device;
1427 struct ib_pd *pd;
1428 struct ib_uobject *uobject;
1429};
1430
1431typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context);
1432
1433enum ib_poll_context {
1434 IB_POLL_DIRECT,
1435 IB_POLL_SOFTIRQ,
1436 IB_POLL_WORKQUEUE,
1437};
1438
1439struct ib_cq {
1440 struct ib_device *device;
1441 struct ib_uobject *uobject;
1442 ib_comp_handler comp_handler;
1443 void (*event_handler)(struct ib_event *, void *);
1444 void *cq_context;
1445 int cqe;
1446 atomic_t usecnt;
1447 enum ib_poll_context poll_ctx;
1448 struct ib_wc *wc;
1449 union {
1450 struct irq_poll iop;
1451 struct work_struct work;
1452 };
1453};
1454
1455struct ib_srq {
1456 struct ib_device *device;
1457 struct ib_pd *pd;
1458 struct ib_uobject *uobject;
1459 void (*event_handler)(struct ib_event *, void *);
1460 void *srq_context;
1461 enum ib_srq_type srq_type;
1462 atomic_t usecnt;
1463
1464 union {
1465 struct {
1466 struct ib_xrcd *xrcd;
1467 struct ib_cq *cq;
1468 u32 srq_num;
1469 } xrc;
1470 } ext;
1471};
1472
1473enum ib_wq_type {
1474 IB_WQT_RQ
1475};
1476
1477enum ib_wq_state {
1478 IB_WQS_RESET,
1479 IB_WQS_RDY,
1480 IB_WQS_ERR
1481};
1482
1483struct ib_wq {
1484 struct ib_device *device;
1485 struct ib_uobject *uobject;
1486 void *wq_context;
1487 void (*event_handler)(struct ib_event *, void *);
1488 struct ib_pd *pd;
1489 struct ib_cq *cq;
1490 u32 wq_num;
1491 enum ib_wq_state state;
1492 enum ib_wq_type wq_type;
1493 atomic_t usecnt;
1494};
1495
1496struct ib_wq_init_attr {
1497 void *wq_context;
1498 enum ib_wq_type wq_type;
1499 u32 max_wr;
1500 u32 max_sge;
1501 struct ib_cq *cq;
1502 void (*event_handler)(struct ib_event *, void *);
1503};
1504
1505enum ib_wq_attr_mask {
1506 IB_WQ_STATE = 1 << 0,
1507 IB_WQ_CUR_STATE = 1 << 1,
1508};
1509
1510struct ib_wq_attr {
1511 enum ib_wq_state wq_state;
1512 enum ib_wq_state curr_wq_state;
1513};
1514
1515struct ib_rwq_ind_table {
1516 struct ib_device *device;
1517 struct ib_uobject *uobject;
1518 atomic_t usecnt;
1519 u32 ind_tbl_num;
1520 u32 log_ind_tbl_size;
1521 struct ib_wq **ind_tbl;
1522};
1523
1524struct ib_rwq_ind_table_init_attr {
1525 u32 log_ind_tbl_size;
1526
1527 struct ib_wq **ind_tbl;
1528};
1529
1530
1531
1532
1533
1534struct ib_qp {
1535 struct ib_device *device;
1536 struct ib_pd *pd;
1537 struct ib_cq *send_cq;
1538 struct ib_cq *recv_cq;
1539 spinlock_t mr_lock;
1540 int mrs_used;
1541 struct list_head rdma_mrs;
1542 struct list_head sig_mrs;
1543 struct ib_srq *srq;
1544 struct ib_xrcd *xrcd;
1545 struct list_head xrcd_list;
1546
1547
1548 atomic_t usecnt;
1549 struct list_head open_list;
1550 struct ib_qp *real_qp;
1551 struct ib_uobject *uobject;
1552 void (*event_handler)(struct ib_event *, void *);
1553 void *qp_context;
1554 u32 qp_num;
1555 u32 max_write_sge;
1556 u32 max_read_sge;
1557 enum ib_qp_type qp_type;
1558 struct ib_rwq_ind_table *rwq_ind_tbl;
1559};
1560
1561struct ib_mr {
1562 struct ib_device *device;
1563 struct ib_pd *pd;
1564 u32 lkey;
1565 u32 rkey;
1566 u64 iova;
1567 u32 length;
1568 unsigned int page_size;
1569 bool need_inval;
1570 union {
1571 struct ib_uobject *uobject;
1572 struct list_head qp_entry;
1573 };
1574};
1575
1576struct ib_mw {
1577 struct ib_device *device;
1578 struct ib_pd *pd;
1579 struct ib_uobject *uobject;
1580 u32 rkey;
1581 enum ib_mw_type type;
1582};
1583
1584struct ib_fmr {
1585 struct ib_device *device;
1586 struct ib_pd *pd;
1587 struct list_head list;
1588 u32 lkey;
1589 u32 rkey;
1590};
1591
1592
1593enum ib_flow_attr_type {
1594
1595 IB_FLOW_ATTR_NORMAL = 0x0,
1596
1597
1598
1599 IB_FLOW_ATTR_ALL_DEFAULT = 0x1,
1600
1601
1602
1603 IB_FLOW_ATTR_MC_DEFAULT = 0x2,
1604
1605 IB_FLOW_ATTR_SNIFFER = 0x3
1606};
1607
1608
1609enum ib_flow_spec_type {
1610
1611 IB_FLOW_SPEC_ETH = 0x20,
1612 IB_FLOW_SPEC_IB = 0x22,
1613
1614 IB_FLOW_SPEC_IPV4 = 0x30,
1615 IB_FLOW_SPEC_IPV6 = 0x31,
1616
1617 IB_FLOW_SPEC_TCP = 0x40,
1618 IB_FLOW_SPEC_UDP = 0x41,
1619 IB_FLOW_SPEC_VXLAN_TUNNEL = 0x50,
1620 IB_FLOW_SPEC_INNER = 0x100,
1621};
1622#define IB_FLOW_SPEC_LAYER_MASK 0xF0
1623#define IB_FLOW_SPEC_SUPPORT_LAYERS 8
1624
1625
1626
1627
1628enum ib_flow_domain {
1629 IB_FLOW_DOMAIN_USER,
1630 IB_FLOW_DOMAIN_ETHTOOL,
1631 IB_FLOW_DOMAIN_RFS,
1632 IB_FLOW_DOMAIN_NIC,
1633 IB_FLOW_DOMAIN_NUM
1634};
1635
1636enum ib_flow_flags {
1637 IB_FLOW_ATTR_FLAGS_DONT_TRAP = 1UL << 1,
1638 IB_FLOW_ATTR_FLAGS_RESERVED = 1UL << 2
1639};
1640
1641struct ib_flow_eth_filter {
1642 u8 dst_mac[6];
1643 u8 src_mac[6];
1644 __be16 ether_type;
1645 __be16 vlan_tag;
1646
1647 u8 real_sz[0];
1648};
1649
1650struct ib_flow_spec_eth {
1651 u32 type;
1652 u16 size;
1653 struct ib_flow_eth_filter val;
1654 struct ib_flow_eth_filter mask;
1655};
1656
1657struct ib_flow_ib_filter {
1658 __be16 dlid;
1659 __u8 sl;
1660
1661 u8 real_sz[0];
1662};
1663
1664struct ib_flow_spec_ib {
1665 u32 type;
1666 u16 size;
1667 struct ib_flow_ib_filter val;
1668 struct ib_flow_ib_filter mask;
1669};
1670
1671
1672enum ib_ipv4_flags {
1673 IB_IPV4_DONT_FRAG = 0x2,
1674 IB_IPV4_MORE_FRAG = 0X4
1675
1676};
1677
1678struct ib_flow_ipv4_filter {
1679 __be32 src_ip;
1680 __be32 dst_ip;
1681 u8 proto;
1682 u8 tos;
1683 u8 ttl;
1684 u8 flags;
1685
1686 u8 real_sz[0];
1687};
1688
1689struct ib_flow_spec_ipv4 {
1690 u32 type;
1691 u16 size;
1692 struct ib_flow_ipv4_filter val;
1693 struct ib_flow_ipv4_filter mask;
1694};
1695
1696struct ib_flow_ipv6_filter {
1697 u8 src_ip[16];
1698 u8 dst_ip[16];
1699 __be32 flow_label;
1700 u8 next_hdr;
1701 u8 traffic_class;
1702 u8 hop_limit;
1703
1704 u8 real_sz[0];
1705};
1706
1707struct ib_flow_spec_ipv6 {
1708 u32 type;
1709 u16 size;
1710 struct ib_flow_ipv6_filter val;
1711 struct ib_flow_ipv6_filter mask;
1712};
1713
1714struct ib_flow_tcp_udp_filter {
1715 __be16 dst_port;
1716 __be16 src_port;
1717
1718 u8 real_sz[0];
1719};
1720
1721struct ib_flow_spec_tcp_udp {
1722 u32 type;
1723 u16 size;
1724 struct ib_flow_tcp_udp_filter val;
1725 struct ib_flow_tcp_udp_filter mask;
1726};
1727
1728struct ib_flow_tunnel_filter {
1729 __be32 tunnel_id;
1730 u8 real_sz[0];
1731};
1732
1733
1734
1735
1736struct ib_flow_spec_tunnel {
1737 u32 type;
1738 u16 size;
1739 struct ib_flow_tunnel_filter val;
1740 struct ib_flow_tunnel_filter mask;
1741};
1742
1743union ib_flow_spec {
1744 struct {
1745 u32 type;
1746 u16 size;
1747 };
1748 struct ib_flow_spec_eth eth;
1749 struct ib_flow_spec_ib ib;
1750 struct ib_flow_spec_ipv4 ipv4;
1751 struct ib_flow_spec_tcp_udp tcp_udp;
1752 struct ib_flow_spec_ipv6 ipv6;
1753 struct ib_flow_spec_tunnel tunnel;
1754};
1755
1756struct ib_flow_attr {
1757 enum ib_flow_attr_type type;
1758 u16 size;
1759 u16 priority;
1760 u32 flags;
1761 u8 num_of_specs;
1762 u8 port;
1763
1764
1765
1766
1767};
1768
1769struct ib_flow {
1770 struct ib_qp *qp;
1771 struct ib_uobject *uobject;
1772};
1773
1774struct ib_mad_hdr;
1775struct ib_grh;
1776
1777enum ib_process_mad_flags {
1778 IB_MAD_IGNORE_MKEY = 1,
1779 IB_MAD_IGNORE_BKEY = 2,
1780 IB_MAD_IGNORE_ALL = IB_MAD_IGNORE_MKEY | IB_MAD_IGNORE_BKEY
1781};
1782
1783enum ib_mad_result {
1784 IB_MAD_RESULT_FAILURE = 0,
1785 IB_MAD_RESULT_SUCCESS = 1 << 0,
1786 IB_MAD_RESULT_REPLY = 1 << 1,
1787 IB_MAD_RESULT_CONSUMED = 1 << 2
1788};
1789
1790#define IB_DEVICE_NAME_MAX 64
1791
1792struct ib_cache {
1793 rwlock_t lock;
1794 struct ib_event_handler event_handler;
1795 struct ib_pkey_cache **pkey_cache;
1796 struct ib_gid_table **gid_cache;
1797 u8 *lmc_cache;
1798};
1799
1800struct ib_dma_mapping_ops {
1801 int (*mapping_error)(struct ib_device *dev,
1802 u64 dma_addr);
1803 u64 (*map_single)(struct ib_device *dev,
1804 void *ptr, size_t size,
1805 enum dma_data_direction direction);
1806 void (*unmap_single)(struct ib_device *dev,
1807 u64 addr, size_t size,
1808 enum dma_data_direction direction);
1809 u64 (*map_page)(struct ib_device *dev,
1810 struct page *page, unsigned long offset,
1811 size_t size,
1812 enum dma_data_direction direction);
1813 void (*unmap_page)(struct ib_device *dev,
1814 u64 addr, size_t size,
1815 enum dma_data_direction direction);
1816 int (*map_sg)(struct ib_device *dev,
1817 struct scatterlist *sg, int nents,
1818 enum dma_data_direction direction);
1819 void (*unmap_sg)(struct ib_device *dev,
1820 struct scatterlist *sg, int nents,
1821 enum dma_data_direction direction);
1822 int (*map_sg_attrs)(struct ib_device *dev,
1823 struct scatterlist *sg, int nents,
1824 enum dma_data_direction direction,
1825 unsigned long attrs);
1826 void (*unmap_sg_attrs)(struct ib_device *dev,
1827 struct scatterlist *sg, int nents,
1828 enum dma_data_direction direction,
1829 unsigned long attrs);
1830 void (*sync_single_for_cpu)(struct ib_device *dev,
1831 u64 dma_handle,
1832 size_t size,
1833 enum dma_data_direction dir);
1834 void (*sync_single_for_device)(struct ib_device *dev,
1835 u64 dma_handle,
1836 size_t size,
1837 enum dma_data_direction dir);
1838 void *(*alloc_coherent)(struct ib_device *dev,
1839 size_t size,
1840 u64 *dma_handle,
1841 gfp_t flag);
1842 void (*free_coherent)(struct ib_device *dev,
1843 size_t size, void *cpu_addr,
1844 u64 dma_handle);
1845};
1846
1847struct iw_cm_verbs;
1848
1849struct ib_port_immutable {
1850 int pkey_tbl_len;
1851 int gid_tbl_len;
1852 u32 core_cap_flags;
1853 u32 max_mad_size;
1854};
1855
1856struct ib_device {
1857 struct device *dma_device;
1858
1859 char name[IB_DEVICE_NAME_MAX];
1860
1861 struct list_head event_handler_list;
1862 spinlock_t event_handler_lock;
1863
1864 spinlock_t client_data_lock;
1865 struct list_head core_list;
1866
1867
1868 struct list_head client_data_list;
1869
1870 struct ib_cache cache;
1871
1872
1873
1874 struct ib_port_immutable *port_immutable;
1875
1876 int num_comp_vectors;
1877
1878 struct iw_cm_verbs *iwcm;
1879
1880
1881
1882
1883
1884
1885
1886 struct rdma_hw_stats *(*alloc_hw_stats)(struct ib_device *device,
1887 u8 port_num);
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900 int (*get_hw_stats)(struct ib_device *device,
1901 struct rdma_hw_stats *stats,
1902 u8 port, int index);
1903 int (*query_device)(struct ib_device *device,
1904 struct ib_device_attr *device_attr,
1905 struct ib_udata *udata);
1906 int (*query_port)(struct ib_device *device,
1907 u8 port_num,
1908 struct ib_port_attr *port_attr);
1909 enum rdma_link_layer (*get_link_layer)(struct ib_device *device,
1910 u8 port_num);
1911
1912
1913
1914
1915
1916
1917
1918 struct net_device *(*get_netdev)(struct ib_device *device,
1919 u8 port_num);
1920 int (*query_gid)(struct ib_device *device,
1921 u8 port_num, int index,
1922 union ib_gid *gid);
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936 int (*add_gid)(struct ib_device *device,
1937 u8 port_num,
1938 unsigned int index,
1939 const union ib_gid *gid,
1940 const struct ib_gid_attr *attr,
1941 void **context);
1942
1943
1944
1945
1946
1947
1948 int (*del_gid)(struct ib_device *device,
1949 u8 port_num,
1950 unsigned int index,
1951 void **context);
1952 int (*query_pkey)(struct ib_device *device,
1953 u8 port_num, u16 index, u16 *pkey);
1954 int (*modify_device)(struct ib_device *device,
1955 int device_modify_mask,
1956 struct ib_device_modify *device_modify);
1957 int (*modify_port)(struct ib_device *device,
1958 u8 port_num, int port_modify_mask,
1959 struct ib_port_modify *port_modify);
1960 struct ib_ucontext * (*alloc_ucontext)(struct ib_device *device,
1961 struct ib_udata *udata);
1962 int (*dealloc_ucontext)(struct ib_ucontext *context);
1963 int (*mmap)(struct ib_ucontext *context,
1964 struct vm_area_struct *vma);
1965 struct ib_pd * (*alloc_pd)(struct ib_device *device,
1966 struct ib_ucontext *context,
1967 struct ib_udata *udata);
1968 int (*dealloc_pd)(struct ib_pd *pd);
1969 struct ib_ah * (*create_ah)(struct ib_pd *pd,
1970 struct ib_ah_attr *ah_attr,
1971 struct ib_udata *udata);
1972 int (*modify_ah)(struct ib_ah *ah,
1973 struct ib_ah_attr *ah_attr);
1974 int (*query_ah)(struct ib_ah *ah,
1975 struct ib_ah_attr *ah_attr);
1976 int (*destroy_ah)(struct ib_ah *ah);
1977 struct ib_srq * (*create_srq)(struct ib_pd *pd,
1978 struct ib_srq_init_attr *srq_init_attr,
1979 struct ib_udata *udata);
1980 int (*modify_srq)(struct ib_srq *srq,
1981 struct ib_srq_attr *srq_attr,
1982 enum ib_srq_attr_mask srq_attr_mask,
1983 struct ib_udata *udata);
1984 int (*query_srq)(struct ib_srq *srq,
1985 struct ib_srq_attr *srq_attr);
1986 int (*destroy_srq)(struct ib_srq *srq);
1987 int (*post_srq_recv)(struct ib_srq *srq,
1988 struct ib_recv_wr *recv_wr,
1989 struct ib_recv_wr **bad_recv_wr);
1990 struct ib_qp * (*create_qp)(struct ib_pd *pd,
1991 struct ib_qp_init_attr *qp_init_attr,
1992 struct ib_udata *udata);
1993 int (*modify_qp)(struct ib_qp *qp,
1994 struct ib_qp_attr *qp_attr,
1995 int qp_attr_mask,
1996 struct ib_udata *udata);
1997 int (*query_qp)(struct ib_qp *qp,
1998 struct ib_qp_attr *qp_attr,
1999 int qp_attr_mask,
2000 struct ib_qp_init_attr *qp_init_attr);
2001 int (*destroy_qp)(struct ib_qp *qp);
2002 int (*post_send)(struct ib_qp *qp,
2003 struct ib_send_wr *send_wr,
2004 struct ib_send_wr **bad_send_wr);
2005 int (*post_recv)(struct ib_qp *qp,
2006 struct ib_recv_wr *recv_wr,
2007 struct ib_recv_wr **bad_recv_wr);
2008 struct ib_cq * (*create_cq)(struct ib_device *device,
2009 const struct ib_cq_init_attr *attr,
2010 struct ib_ucontext *context,
2011 struct ib_udata *udata);
2012 int (*modify_cq)(struct ib_cq *cq, u16 cq_count,
2013 u16 cq_period);
2014 int (*destroy_cq)(struct ib_cq *cq);
2015 int (*resize_cq)(struct ib_cq *cq, int cqe,
2016 struct ib_udata *udata);
2017 int (*poll_cq)(struct ib_cq *cq, int num_entries,
2018 struct ib_wc *wc);
2019 int (*peek_cq)(struct ib_cq *cq, int wc_cnt);
2020 int (*req_notify_cq)(struct ib_cq *cq,
2021 enum ib_cq_notify_flags flags);
2022 int (*req_ncomp_notif)(struct ib_cq *cq,
2023 int wc_cnt);
2024 struct ib_mr * (*get_dma_mr)(struct ib_pd *pd,
2025 int mr_access_flags);
2026 struct ib_mr * (*reg_user_mr)(struct ib_pd *pd,
2027 u64 start, u64 length,
2028 u64 virt_addr,
2029 int mr_access_flags,
2030 struct ib_udata *udata);
2031 int (*rereg_user_mr)(struct ib_mr *mr,
2032 int flags,
2033 u64 start, u64 length,
2034 u64 virt_addr,
2035 int mr_access_flags,
2036 struct ib_pd *pd,
2037 struct ib_udata *udata);
2038 int (*dereg_mr)(struct ib_mr *mr);
2039 struct ib_mr * (*alloc_mr)(struct ib_pd *pd,
2040 enum ib_mr_type mr_type,
2041 u32 max_num_sg);
2042 int (*map_mr_sg)(struct ib_mr *mr,
2043 struct scatterlist *sg,
2044 int sg_nents,
2045 unsigned int *sg_offset);
2046 struct ib_mw * (*alloc_mw)(struct ib_pd *pd,
2047 enum ib_mw_type type,
2048 struct ib_udata *udata);
2049 int (*dealloc_mw)(struct ib_mw *mw);
2050 struct ib_fmr * (*alloc_fmr)(struct ib_pd *pd,
2051 int mr_access_flags,
2052 struct ib_fmr_attr *fmr_attr);
2053 int (*map_phys_fmr)(struct ib_fmr *fmr,
2054 u64 *page_list, int list_len,
2055 u64 iova);
2056 int (*unmap_fmr)(struct list_head *fmr_list);
2057 int (*dealloc_fmr)(struct ib_fmr *fmr);
2058 int (*attach_mcast)(struct ib_qp *qp,
2059 union ib_gid *gid,
2060 u16 lid);
2061 int (*detach_mcast)(struct ib_qp *qp,
2062 union ib_gid *gid,
2063 u16 lid);
2064 int (*process_mad)(struct ib_device *device,
2065 int process_mad_flags,
2066 u8 port_num,
2067 const struct ib_wc *in_wc,
2068 const struct ib_grh *in_grh,
2069 const struct ib_mad_hdr *in_mad,
2070 size_t in_mad_size,
2071 struct ib_mad_hdr *out_mad,
2072 size_t *out_mad_size,
2073 u16 *out_mad_pkey_index);
2074 struct ib_xrcd * (*alloc_xrcd)(struct ib_device *device,
2075 struct ib_ucontext *ucontext,
2076 struct ib_udata *udata);
2077 int (*dealloc_xrcd)(struct ib_xrcd *xrcd);
2078 struct ib_flow * (*create_flow)(struct ib_qp *qp,
2079 struct ib_flow_attr
2080 *flow_attr,
2081 int domain);
2082 int (*destroy_flow)(struct ib_flow *flow_id);
2083 int (*check_mr_status)(struct ib_mr *mr, u32 check_mask,
2084 struct ib_mr_status *mr_status);
2085 void (*disassociate_ucontext)(struct ib_ucontext *ibcontext);
2086 void (*drain_rq)(struct ib_qp *qp);
2087 void (*drain_sq)(struct ib_qp *qp);
2088 int (*set_vf_link_state)(struct ib_device *device, int vf, u8 port,
2089 int state);
2090 int (*get_vf_config)(struct ib_device *device, int vf, u8 port,
2091 struct ifla_vf_info *ivf);
2092 int (*get_vf_stats)(struct ib_device *device, int vf, u8 port,
2093 struct ifla_vf_stats *stats);
2094 int (*set_vf_guid)(struct ib_device *device, int vf, u8 port, u64 guid,
2095 int type);
2096 struct ib_wq * (*create_wq)(struct ib_pd *pd,
2097 struct ib_wq_init_attr *init_attr,
2098 struct ib_udata *udata);
2099 int (*destroy_wq)(struct ib_wq *wq);
2100 int (*modify_wq)(struct ib_wq *wq,
2101 struct ib_wq_attr *attr,
2102 u32 wq_attr_mask,
2103 struct ib_udata *udata);
2104 struct ib_rwq_ind_table * (*create_rwq_ind_table)(struct ib_device *device,
2105 struct ib_rwq_ind_table_init_attr *init_attr,
2106 struct ib_udata *udata);
2107 int (*destroy_rwq_ind_table)(struct ib_rwq_ind_table *wq_ind_table);
2108 struct ib_dma_mapping_ops *dma_ops;
2109
2110 struct module *owner;
2111 struct device dev;
2112 struct kobject *ports_parent;
2113 struct list_head port_list;
2114
2115 enum {
2116 IB_DEV_UNINITIALIZED,
2117 IB_DEV_REGISTERED,
2118 IB_DEV_UNREGISTERED
2119 } reg_state;
2120
2121 int uverbs_abi_ver;
2122 u64 uverbs_cmd_mask;
2123 u64 uverbs_ex_cmd_mask;
2124
2125 char node_desc[IB_DEVICE_NODE_DESC_MAX];
2126 __be64 node_guid;
2127 u32 local_dma_lkey;
2128 u16 is_switch:1;
2129 u8 node_type;
2130 u8 phys_port_cnt;
2131 struct ib_device_attr attrs;
2132 struct attribute_group *hw_stats_ag;
2133 struct rdma_hw_stats *hw_stats;
2134
2135
2136
2137
2138
2139
2140
2141 int (*get_port_immutable)(struct ib_device *, u8, struct ib_port_immutable *);
2142 void (*get_dev_fw_str)(struct ib_device *, char *str, size_t str_len);
2143};
2144
2145struct ib_client {
2146 char *name;
2147 void (*add) (struct ib_device *);
2148 void (*remove)(struct ib_device *, void *client_data);
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165 struct net_device *(*get_net_dev_by_params)(
2166 struct ib_device *dev,
2167 u8 port,
2168 u16 pkey,
2169 const union ib_gid *gid,
2170 const struct sockaddr *addr,
2171 void *client_data);
2172 struct list_head list;
2173};
2174
2175struct ib_device *ib_alloc_device(size_t size);
2176void ib_dealloc_device(struct ib_device *device);
2177
2178void ib_get_device_fw_str(struct ib_device *device, char *str, size_t str_len);
2179
2180int ib_register_device(struct ib_device *device,
2181 int (*port_callback)(struct ib_device *,
2182 u8, struct kobject *));
2183void ib_unregister_device(struct ib_device *device);
2184
2185int ib_register_client (struct ib_client *client);
2186void ib_unregister_client(struct ib_client *client);
2187
2188void *ib_get_client_data(struct ib_device *device, struct ib_client *client);
2189void ib_set_client_data(struct ib_device *device, struct ib_client *client,
2190 void *data);
2191
2192static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len)
2193{
2194 return copy_from_user(dest, udata->inbuf, len) ? -EFAULT : 0;
2195}
2196
2197static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len)
2198{
2199 return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0;
2200}
2201
2202static inline bool ib_is_udata_cleared(struct ib_udata *udata,
2203 size_t offset,
2204 size_t len)
2205{
2206 const void __user *p = udata->inbuf + offset;
2207 bool ret;
2208 u8 *buf;
2209
2210 if (len > USHRT_MAX)
2211 return false;
2212
2213 buf = memdup_user(p, len);
2214 if (IS_ERR(buf))
2215 return false;
2216
2217 ret = !memchr_inv(buf, 0, len);
2218 kfree(buf);
2219 return ret;
2220}
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236
2237
2238int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
2239 enum ib_qp_type type, enum ib_qp_attr_mask mask,
2240 enum rdma_link_layer ll);
2241
2242int ib_register_event_handler (struct ib_event_handler *event_handler);
2243int ib_unregister_event_handler(struct ib_event_handler *event_handler);
2244void ib_dispatch_event(struct ib_event *event);
2245
2246int ib_query_port(struct ib_device *device,
2247 u8 port_num, struct ib_port_attr *port_attr);
2248
2249enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device,
2250 u8 port_num);
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260
2261static inline bool rdma_cap_ib_switch(const struct ib_device *device)
2262{
2263 return device->is_switch;
2264}
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274static inline u8 rdma_start_port(const struct ib_device *device)
2275{
2276 return rdma_cap_ib_switch(device) ? 0 : 1;
2277}
2278
2279
2280
2281
2282
2283
2284
2285
2286
2287static inline u8 rdma_end_port(const struct ib_device *device)
2288{
2289 return rdma_cap_ib_switch(device) ? 0 : device->phys_port_cnt;
2290}
2291
2292static inline bool rdma_protocol_ib(const struct ib_device *device, u8 port_num)
2293{
2294 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IB;
2295}
2296
2297static inline bool rdma_protocol_roce(const struct ib_device *device, u8 port_num)
2298{
2299 return device->port_immutable[port_num].core_cap_flags &
2300 (RDMA_CORE_CAP_PROT_ROCE | RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP);
2301}
2302
2303static inline bool rdma_protocol_roce_udp_encap(const struct ib_device *device, u8 port_num)
2304{
2305 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP;
2306}
2307
2308static inline bool rdma_protocol_roce_eth_encap(const struct ib_device *device, u8 port_num)
2309{
2310 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_ROCE;
2311}
2312
2313static inline bool rdma_protocol_iwarp(const struct ib_device *device, u8 port_num)
2314{
2315 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IWARP;
2316}
2317
2318static inline bool rdma_ib_or_roce(const struct ib_device *device, u8 port_num)
2319{
2320 return rdma_protocol_ib(device, port_num) ||
2321 rdma_protocol_roce(device, port_num);
2322}
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336static inline bool rdma_cap_ib_mad(const struct ib_device *device, u8 port_num)
2337{
2338 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_MAD;
2339}
2340
2341
2342
2343
2344
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360static inline bool rdma_cap_opa_mad(struct ib_device *device, u8 port_num)
2361{
2362 return (device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_OPA_MAD)
2363 == RDMA_CORE_CAP_OPA_MAD;
2364}
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386static inline bool rdma_cap_ib_smi(const struct ib_device *device, u8 port_num)
2387{
2388 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_SMI;
2389}
2390
2391
2392
2393
2394
2395
2396
2397
2398
2399
2400
2401
2402
2403
2404
2405
2406static inline bool rdma_cap_ib_cm(const struct ib_device *device, u8 port_num)
2407{
2408 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_CM;
2409}
2410
2411
2412
2413
2414
2415
2416
2417
2418
2419
2420
2421
2422
2423static inline bool rdma_cap_iw_cm(const struct ib_device *device, u8 port_num)
2424{
2425 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IW_CM;
2426}
2427
2428
2429
2430
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441
2442
2443static inline bool rdma_cap_ib_sa(const struct ib_device *device, u8 port_num)
2444{
2445 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_SA;
2446}
2447
2448
2449
2450
2451
2452
2453
2454
2455
2456
2457
2458
2459
2460
2461
2462
2463
2464
2465static inline bool rdma_cap_ib_mcast(const struct ib_device *device, u8 port_num)
2466{
2467 return rdma_cap_ib_sa(device, port_num);
2468}
2469
2470
2471
2472
2473
2474
2475
2476
2477
2478
2479
2480
2481
2482
2483static inline bool rdma_cap_af_ib(const struct ib_device *device, u8 port_num)
2484{
2485 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_AF_IB;
2486}
2487
2488
2489
2490
2491
2492
2493
2494
2495
2496
2497
2498
2499
2500
2501
2502
2503
2504static inline bool rdma_cap_eth_ah(const struct ib_device *device, u8 port_num)
2505{
2506 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_ETH_AH;
2507}
2508
2509
2510
2511
2512
2513
2514
2515
2516
2517
2518
2519
2520
2521static inline size_t rdma_max_mad_size(const struct ib_device *device, u8 port_num)
2522{
2523 return device->port_immutable[port_num].max_mad_size;
2524}
2525
2526
2527
2528
2529
2530
2531
2532
2533
2534
2535
2536
2537
2538
2539static inline bool rdma_cap_roce_gid_table(const struct ib_device *device,
2540 u8 port_num)
2541{
2542 return rdma_protocol_roce(device, port_num) &&
2543 device->add_gid && device->del_gid;
2544}
2545
2546
2547
2548
2549static inline bool rdma_cap_read_inv(struct ib_device *dev, u32 port_num)
2550{
2551
2552
2553
2554
2555 return rdma_protocol_iwarp(dev, port_num);
2556}
2557
2558int ib_query_gid(struct ib_device *device,
2559 u8 port_num, int index, union ib_gid *gid,
2560 struct ib_gid_attr *attr);
2561
2562int ib_set_vf_link_state(struct ib_device *device, int vf, u8 port,
2563 int state);
2564int ib_get_vf_config(struct ib_device *device, int vf, u8 port,
2565 struct ifla_vf_info *info);
2566int ib_get_vf_stats(struct ib_device *device, int vf, u8 port,
2567 struct ifla_vf_stats *stats);
2568int ib_set_vf_guid(struct ib_device *device, int vf, u8 port, u64 guid,
2569 int type);
2570
2571int ib_query_pkey(struct ib_device *device,
2572 u8 port_num, u16 index, u16 *pkey);
2573
2574int ib_modify_device(struct ib_device *device,
2575 int device_modify_mask,
2576 struct ib_device_modify *device_modify);
2577
2578int ib_modify_port(struct ib_device *device,
2579 u8 port_num, int port_modify_mask,
2580 struct ib_port_modify *port_modify);
2581
2582int ib_find_gid(struct ib_device *device, union ib_gid *gid,
2583 enum ib_gid_type gid_type, struct net_device *ndev,
2584 u8 *port_num, u16 *index);
2585
2586int ib_find_pkey(struct ib_device *device,
2587 u8 port_num, u16 pkey, u16 *index);
2588
2589enum ib_pd_flags {
2590
2591
2592
2593
2594
2595
2596
2597
2598
2599 IB_PD_UNSAFE_GLOBAL_RKEY = 0x01,
2600};
2601
2602struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags,
2603 const char *caller);
2604#define ib_alloc_pd(device, flags) \
2605 __ib_alloc_pd((device), (flags), __func__)
2606void ib_dealloc_pd(struct ib_pd *pd);
2607
2608
2609
2610
2611
2612
2613
2614
2615
2616struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr);
2617
2618
2619
2620
2621
2622
2623
2624
2625
2626int ib_get_gids_from_rdma_hdr(const union rdma_network_hdr *hdr,
2627 enum rdma_network_type net_type,
2628 union ib_gid *sgid, union ib_gid *dgid);
2629
2630
2631
2632
2633
2634int ib_get_rdma_header_version(const union rdma_network_hdr *hdr);
2635
2636
2637
2638
2639
2640
2641
2642
2643
2644
2645
2646
2647int ib_init_ah_from_wc(struct ib_device *device, u8 port_num,
2648 const struct ib_wc *wc, const struct ib_grh *grh,
2649 struct ib_ah_attr *ah_attr);
2650
2651
2652
2653
2654
2655
2656
2657
2658
2659
2660
2661
2662
2663struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc,
2664 const struct ib_grh *grh, u8 port_num);
2665
2666
2667
2668
2669
2670
2671
2672
2673int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
2674
2675
2676
2677
2678
2679
2680
2681
2682int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
2683
2684
2685
2686
2687
2688int ib_destroy_ah(struct ib_ah *ah);
2689
2690
2691
2692
2693
2694
2695
2696
2697
2698
2699
2700
2701
2702
2703struct ib_srq *ib_create_srq(struct ib_pd *pd,
2704 struct ib_srq_init_attr *srq_init_attr);
2705
2706
2707
2708
2709
2710
2711
2712
2713
2714
2715
2716
2717
2718int ib_modify_srq(struct ib_srq *srq,
2719 struct ib_srq_attr *srq_attr,
2720 enum ib_srq_attr_mask srq_attr_mask);
2721
2722
2723
2724
2725
2726
2727
2728int ib_query_srq(struct ib_srq *srq,
2729 struct ib_srq_attr *srq_attr);
2730
2731
2732
2733
2734
2735int ib_destroy_srq(struct ib_srq *srq);
2736
2737
2738
2739
2740
2741
2742
2743
2744static inline int ib_post_srq_recv(struct ib_srq *srq,
2745 struct ib_recv_wr *recv_wr,
2746 struct ib_recv_wr **bad_recv_wr)
2747{
2748 return srq->device->post_srq_recv(srq, recv_wr, bad_recv_wr);
2749}
2750
2751
2752
2753
2754
2755
2756
2757
2758
2759struct ib_qp *ib_create_qp(struct ib_pd *pd,
2760 struct ib_qp_init_attr *qp_init_attr);
2761
2762
2763
2764
2765
2766
2767
2768
2769
2770
2771int ib_modify_qp(struct ib_qp *qp,
2772 struct ib_qp_attr *qp_attr,
2773 int qp_attr_mask);
2774
2775
2776
2777
2778
2779
2780
2781
2782
2783
2784
2785
2786int ib_query_qp(struct ib_qp *qp,
2787 struct ib_qp_attr *qp_attr,
2788 int qp_attr_mask,
2789 struct ib_qp_init_attr *qp_init_attr);
2790
2791
2792
2793
2794
2795int ib_destroy_qp(struct ib_qp *qp);
2796
2797
2798
2799
2800
2801
2802
2803
2804struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd,
2805 struct ib_qp_open_attr *qp_open_attr);
2806
2807
2808
2809
2810
2811
2812
2813
2814int ib_close_qp(struct ib_qp *qp);
2815
2816
2817
2818
2819
2820
2821
2822
2823
2824
2825
2826
2827
2828
2829static inline int ib_post_send(struct ib_qp *qp,
2830 struct ib_send_wr *send_wr,
2831 struct ib_send_wr **bad_send_wr)
2832{
2833 return qp->device->post_send(qp, send_wr, bad_send_wr);
2834}
2835
2836
2837
2838
2839
2840
2841
2842
2843
2844static inline int ib_post_recv(struct ib_qp *qp,
2845 struct ib_recv_wr *recv_wr,
2846 struct ib_recv_wr **bad_recv_wr)
2847{
2848 return qp->device->post_recv(qp, recv_wr, bad_recv_wr);
2849}
2850
2851struct ib_cq *ib_alloc_cq(struct ib_device *dev, void *private,
2852 int nr_cqe, int comp_vector, enum ib_poll_context poll_ctx);
2853void ib_free_cq(struct ib_cq *cq);
2854int ib_process_cq_direct(struct ib_cq *cq, int budget);
2855
2856
2857
2858
2859
2860
2861
2862
2863
2864
2865
2866
2867
2868
2869struct ib_cq *ib_create_cq(struct ib_device *device,
2870 ib_comp_handler comp_handler,
2871 void (*event_handler)(struct ib_event *, void *),
2872 void *cq_context,
2873 const struct ib_cq_init_attr *cq_attr);
2874
2875
2876
2877
2878
2879
2880
2881
2882int ib_resize_cq(struct ib_cq *cq, int cqe);
2883
2884
2885
2886
2887
2888
2889
2890
2891int ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
2892
2893
2894
2895
2896
2897int ib_destroy_cq(struct ib_cq *cq);
2898
2899
2900
2901
2902
2903
2904
2905
2906
2907
2908
2909
2910
2911static inline int ib_poll_cq(struct ib_cq *cq, int num_entries,
2912 struct ib_wc *wc)
2913{
2914 return cq->device->poll_cq(cq, num_entries, wc);
2915}
2916
2917
2918
2919
2920
2921
2922
2923
2924
2925
2926
2927int ib_peek_cq(struct ib_cq *cq, int wc_cnt);
2928
2929
2930
2931
2932
2933
2934
2935
2936
2937
2938
2939
2940
2941
2942
2943
2944
2945
2946
2947
2948
2949
2950
2951
2952
2953
2954
2955
2956static inline int ib_req_notify_cq(struct ib_cq *cq,
2957 enum ib_cq_notify_flags flags)
2958{
2959 return cq->device->req_notify_cq(cq, flags);
2960}
2961
2962
2963
2964
2965
2966
2967
2968
2969static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt)
2970{
2971 return cq->device->req_ncomp_notif ?
2972 cq->device->req_ncomp_notif(cq, wc_cnt) :
2973 -ENOSYS;
2974}
2975
2976
2977
2978
2979
2980
2981static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
2982{
2983 if (dev->dma_ops)
2984 return dev->dma_ops->mapping_error(dev, dma_addr);
2985 return dma_mapping_error(dev->dma_device, dma_addr);
2986}
2987
2988
2989
2990
2991
2992
2993
2994
2995static inline u64 ib_dma_map_single(struct ib_device *dev,
2996 void *cpu_addr, size_t size,
2997 enum dma_data_direction direction)
2998{
2999 if (dev->dma_ops)
3000 return dev->dma_ops->map_single(dev, cpu_addr, size, direction);
3001 return dma_map_single(dev->dma_device, cpu_addr, size, direction);
3002}
3003
3004
3005
3006
3007
3008
3009
3010
3011static inline void ib_dma_unmap_single(struct ib_device *dev,
3012 u64 addr, size_t size,
3013 enum dma_data_direction direction)
3014{
3015 if (dev->dma_ops)
3016 dev->dma_ops->unmap_single(dev, addr, size, direction);
3017 else
3018 dma_unmap_single(dev->dma_device, addr, size, direction);
3019}
3020
3021static inline u64 ib_dma_map_single_attrs(struct ib_device *dev,
3022 void *cpu_addr, size_t size,
3023 enum dma_data_direction direction,
3024 unsigned long dma_attrs)
3025{
3026 return dma_map_single_attrs(dev->dma_device, cpu_addr, size,
3027 direction, dma_attrs);
3028}
3029
3030static inline void ib_dma_unmap_single_attrs(struct ib_device *dev,
3031 u64 addr, size_t size,
3032 enum dma_data_direction direction,
3033 unsigned long dma_attrs)
3034{
3035 return dma_unmap_single_attrs(dev->dma_device, addr, size,
3036 direction, dma_attrs);
3037}
3038
3039
3040
3041
3042
3043
3044
3045
3046
3047static inline u64 ib_dma_map_page(struct ib_device *dev,
3048 struct page *page,
3049 unsigned long offset,
3050 size_t size,
3051 enum dma_data_direction direction)
3052{
3053 if (dev->dma_ops)
3054 return dev->dma_ops->map_page(dev, page, offset, size, direction);
3055 return dma_map_page(dev->dma_device, page, offset, size, direction);
3056}
3057
3058
3059
3060
3061
3062
3063
3064
3065static inline void ib_dma_unmap_page(struct ib_device *dev,
3066 u64 addr, size_t size,
3067 enum dma_data_direction direction)
3068{
3069 if (dev->dma_ops)
3070 dev->dma_ops->unmap_page(dev, addr, size, direction);
3071 else
3072 dma_unmap_page(dev->dma_device, addr, size, direction);
3073}
3074
3075
3076
3077
3078
3079
3080
3081
3082static inline int ib_dma_map_sg(struct ib_device *dev,
3083 struct scatterlist *sg, int nents,
3084 enum dma_data_direction direction)
3085{
3086 if (dev->dma_ops)
3087 return dev->dma_ops->map_sg(dev, sg, nents, direction);
3088 return dma_map_sg(dev->dma_device, sg, nents, direction);
3089}
3090
3091
3092
3093
3094
3095
3096
3097
3098static inline void ib_dma_unmap_sg(struct ib_device *dev,
3099 struct scatterlist *sg, int nents,
3100 enum dma_data_direction direction)
3101{
3102 if (dev->dma_ops)
3103 dev->dma_ops->unmap_sg(dev, sg, nents, direction);
3104 else
3105 dma_unmap_sg(dev->dma_device, sg, nents, direction);
3106}
3107
3108static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
3109 struct scatterlist *sg, int nents,
3110 enum dma_data_direction direction,
3111 unsigned long dma_attrs)
3112{
3113 if (dev->dma_ops)
3114 return dev->dma_ops->map_sg_attrs(dev, sg, nents, direction,
3115 dma_attrs);
3116 else
3117 return dma_map_sg_attrs(dev->dma_device, sg, nents, direction,
3118 dma_attrs);
3119}
3120
3121static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
3122 struct scatterlist *sg, int nents,
3123 enum dma_data_direction direction,
3124 unsigned long dma_attrs)
3125{
3126 if (dev->dma_ops)
3127 return dev->dma_ops->unmap_sg_attrs(dev, sg, nents, direction,
3128 dma_attrs);
3129 else
3130 dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction,
3131 dma_attrs);
3132}
3133
3134
3135
3136
3137
3138
3139
3140
3141static inline u64 ib_sg_dma_address(struct ib_device *dev,
3142 struct scatterlist *sg)
3143{
3144 return sg_dma_address(sg);
3145}
3146
3147
3148
3149
3150
3151
3152
3153
3154
3155static inline unsigned int ib_sg_dma_len(struct ib_device *dev,
3156 struct scatterlist *sg)
3157{
3158 return sg_dma_len(sg);
3159}
3160
3161
3162
3163
3164
3165
3166
3167
3168static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev,
3169 u64 addr,
3170 size_t size,
3171 enum dma_data_direction dir)
3172{
3173 if (dev->dma_ops)
3174 dev->dma_ops->sync_single_for_cpu(dev, addr, size, dir);
3175 else
3176 dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
3177}
3178
3179
3180
3181
3182
3183
3184
3185
3186static inline void ib_dma_sync_single_for_device(struct ib_device *dev,
3187 u64 addr,
3188 size_t size,
3189 enum dma_data_direction dir)
3190{
3191 if (dev->dma_ops)
3192 dev->dma_ops->sync_single_for_device(dev, addr, size, dir);
3193 else
3194 dma_sync_single_for_device(dev->dma_device, addr, size, dir);
3195}
3196
3197
3198
3199
3200
3201
3202
3203
3204static inline void *ib_dma_alloc_coherent(struct ib_device *dev,
3205 size_t size,
3206 u64 *dma_handle,
3207 gfp_t flag)
3208{
3209 if (dev->dma_ops)
3210 return dev->dma_ops->alloc_coherent(dev, size, dma_handle, flag);
3211 else {
3212 dma_addr_t handle;
3213 void *ret;
3214
3215 ret = dma_alloc_coherent(dev->dma_device, size, &handle, flag);
3216 *dma_handle = handle;
3217 return ret;
3218 }
3219}
3220
3221
3222
3223
3224
3225
3226
3227
3228static inline void ib_dma_free_coherent(struct ib_device *dev,
3229 size_t size, void *cpu_addr,
3230 u64 dma_handle)
3231{
3232 if (dev->dma_ops)
3233 dev->dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
3234 else
3235 dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle);
3236}
3237
3238
3239
3240
3241
3242
3243
3244
3245int ib_dereg_mr(struct ib_mr *mr);
3246
3247struct ib_mr *ib_alloc_mr(struct ib_pd *pd,
3248 enum ib_mr_type mr_type,
3249 u32 max_num_sg);
3250
3251
3252
3253
3254
3255
3256
3257static inline void ib_update_fast_reg_key(struct ib_mr *mr, u8 newkey)
3258{
3259 mr->lkey = (mr->lkey & 0xffffff00) | newkey;
3260 mr->rkey = (mr->rkey & 0xffffff00) | newkey;
3261}
3262
3263
3264
3265
3266
3267
3268static inline u32 ib_inc_rkey(u32 rkey)
3269{
3270 const u32 mask = 0x000000ff;
3271 return ((rkey + 1) & mask) | (rkey & ~mask);
3272}
3273
3274
3275
3276
3277
3278
3279
3280
3281
3282
3283struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
3284 int mr_access_flags,
3285 struct ib_fmr_attr *fmr_attr);
3286
3287
3288
3289
3290
3291
3292
3293
3294static inline int ib_map_phys_fmr(struct ib_fmr *fmr,
3295 u64 *page_list, int list_len,
3296 u64 iova)
3297{
3298 return fmr->device->map_phys_fmr(fmr, page_list, list_len, iova);
3299}
3300
3301
3302
3303
3304
3305int ib_unmap_fmr(struct list_head *fmr_list);
3306
3307
3308
3309
3310
3311int ib_dealloc_fmr(struct ib_fmr *fmr);
3312
3313
3314
3315
3316
3317
3318
3319
3320
3321
3322
3323
3324
3325int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
3326
3327
3328
3329
3330
3331
3332
3333int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
3334
3335
3336
3337
3338
3339struct ib_xrcd *ib_alloc_xrcd(struct ib_device *device);
3340
3341
3342
3343
3344
3345int ib_dealloc_xrcd(struct ib_xrcd *xrcd);
3346
3347struct ib_flow *ib_create_flow(struct ib_qp *qp,
3348 struct ib_flow_attr *flow_attr, int domain);
3349int ib_destroy_flow(struct ib_flow *flow_id);
3350
3351static inline int ib_check_mr_access(int flags)
3352{
3353
3354
3355
3356
3357 if (flags & (IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_REMOTE_WRITE) &&
3358 !(flags & IB_ACCESS_LOCAL_WRITE))
3359 return -EINVAL;
3360
3361 return 0;
3362}
3363
3364
3365
3366
3367
3368
3369
3370
3371
3372
3373
3374
3375
3376int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
3377 struct ib_mr_status *mr_status);
3378
3379struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, u8 port,
3380 u16 pkey, const union ib_gid *gid,
3381 const struct sockaddr *addr);
3382struct ib_wq *ib_create_wq(struct ib_pd *pd,
3383 struct ib_wq_init_attr *init_attr);
3384int ib_destroy_wq(struct ib_wq *wq);
3385int ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *attr,
3386 u32 wq_attr_mask);
3387struct ib_rwq_ind_table *ib_create_rwq_ind_table(struct ib_device *device,
3388 struct ib_rwq_ind_table_init_attr*
3389 wq_ind_table_init_attr);
3390int ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table);
3391
3392int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
3393 unsigned int *sg_offset, unsigned int page_size);
3394
3395static inline int
3396ib_map_mr_sg_zbva(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
3397 unsigned int *sg_offset, unsigned int page_size)
3398{
3399 int n;
3400
3401 n = ib_map_mr_sg(mr, sg, sg_nents, sg_offset, page_size);
3402 mr->iova = 0;
3403
3404 return n;
3405}
3406
3407int ib_sg_to_pages(struct ib_mr *mr, struct scatterlist *sgl, int sg_nents,
3408 unsigned int *sg_offset, int (*set_page)(struct ib_mr *, u64));
3409
3410void ib_drain_rq(struct ib_qp *qp);
3411void ib_drain_sq(struct ib_qp *qp);
3412void ib_drain_qp(struct ib_qp *qp);
3413
3414int ib_resolve_eth_dmac(struct ib_device *device,
3415 struct ib_ah_attr *ah_attr);
3416#endif
3417