1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32#ifndef _QED_RDMA_H
33#define _QED_RDMA_H
34#include <linux/types.h>
35#include <linux/bitops.h>
36#include <linux/kernel.h>
37#include <linux/list.h>
38#include <linux/slab.h>
39#include <linux/spinlock.h>
40#include <linux/qed/qed_if.h>
41#include <linux/qed/qed_rdma_if.h>
42#include "qed.h"
43#include "qed_dev_api.h"
44#include "qed_hsi.h"
45#include "qed_iwarp.h"
46#include "qed_roce.h"
47
48#define QED_RDMA_MAX_FMR (RDMA_MAX_TIDS)
49#define QED_RDMA_MAX_P_KEY (1)
50#define QED_RDMA_MAX_WQE (0x7FFF)
51#define QED_RDMA_MAX_SRQ_WQE_ELEM (0x7FFF)
52#define QED_RDMA_PAGE_SIZE_CAPS (0xFFFFF000)
53#define QED_RDMA_ACK_DELAY (15)
54#define QED_RDMA_MAX_MR_SIZE (0x10000000000ULL)
55#define QED_RDMA_MAX_CQS (RDMA_MAX_CQS)
56#define QED_RDMA_MAX_MRS (RDMA_MAX_TIDS)
57
58#define QED_RDMA_MAX_SRQ_ELEM_PER_WQE (RDMA_MAX_SGE_PER_RQ_WQE + 1)
59#define QED_RDMA_MAX_SGE_PER_SRQ_WQE (RDMA_MAX_SGE_PER_RQ_WQE)
60#define QED_RDMA_SRQ_WQE_ELEM_SIZE (16)
61#define QED_RDMA_MAX_SRQS (32 * 1024)
62
63#define QED_RDMA_MAX_CQE_32_BIT (0x7FFFFFFF - 1)
64#define QED_RDMA_MAX_CQE_16_BIT (0x7FFF - 1)
65
66enum qed_rdma_toggle_bit {
67 QED_RDMA_TOGGLE_BIT_CLEAR = 0,
68 QED_RDMA_TOGGLE_BIT_SET = 1
69};
70
71#define QED_RDMA_MAX_BMAP_NAME (10)
72struct qed_bmap {
73 unsigned long *bitmap;
74 u32 max_count;
75 char name[QED_RDMA_MAX_BMAP_NAME];
76};
77
78struct qed_rdma_info {
79
80 spinlock_t lock;
81
82 struct qed_bmap cq_map;
83 struct qed_bmap pd_map;
84 struct qed_bmap tid_map;
85 struct qed_bmap qp_map;
86 struct qed_bmap srq_map;
87 struct qed_bmap cid_map;
88 struct qed_bmap tcp_cid_map;
89 struct qed_bmap real_cid_map;
90 struct qed_bmap dpi_map;
91 struct qed_bmap toggle_bits;
92 struct qed_rdma_events events;
93 struct qed_rdma_device *dev;
94 struct qed_rdma_port *port;
95 u32 last_tid;
96 u8 num_cnqs;
97 u32 num_qps;
98 u32 num_mrs;
99 u32 num_srqs;
100 u16 srq_id_offset;
101 u16 queue_zone_base;
102 u16 max_queue_zones;
103 enum protocol_type proto;
104 struct qed_iwarp_info iwarp;
105 u8 active:1;
106};
107
108struct qed_rdma_qp {
109 struct regpair qp_handle;
110 struct regpair qp_handle_async;
111 u32 qpid;
112 u16 icid;
113 enum qed_roce_qp_state cur_state;
114 enum qed_iwarp_qp_state iwarp_state;
115 bool use_srq;
116 bool signal_all;
117 bool fmr_and_reserved_lkey;
118
119 bool incoming_rdma_read_en;
120 bool incoming_rdma_write_en;
121 bool incoming_atomic_en;
122 bool e2e_flow_control_en;
123
124 u16 pd;
125 u16 pkey;
126 u32 dest_qp;
127 u16 mtu;
128 u16 srq_id;
129 u8 traffic_class_tos;
130 u8 hop_limit_ttl;
131 u16 dpi;
132 u32 flow_label;
133 bool lb_indication;
134 u16 vlan_id;
135 u32 ack_timeout;
136 u8 retry_cnt;
137 u8 rnr_retry_cnt;
138 u8 min_rnr_nak_timer;
139 bool sqd_async;
140 union qed_gid sgid;
141 union qed_gid dgid;
142 enum roce_mode roce_mode;
143 u16 udp_src_port;
144 u8 stats_queue;
145
146
147 u8 max_rd_atomic_req;
148 u32 sq_psn;
149 u16 sq_cq_id;
150 u16 sq_num_pages;
151 dma_addr_t sq_pbl_ptr;
152 void *orq;
153 dma_addr_t orq_phys_addr;
154 u8 orq_num_pages;
155 bool req_offloaded;
156
157
158 u8 max_rd_atomic_resp;
159 u32 rq_psn;
160 u16 rq_cq_id;
161 u16 rq_num_pages;
162 dma_addr_t rq_pbl_ptr;
163 void *irq;
164 dma_addr_t irq_phys_addr;
165 u8 irq_num_pages;
166 bool resp_offloaded;
167 u32 cq_prod;
168
169 u8 remote_mac_addr[6];
170 u8 local_mac_addr[6];
171
172 void *shared_queue;
173 dma_addr_t shared_queue_phys_addr;
174 struct qed_iwarp_ep *ep;
175};
176
177#if IS_ENABLED(CONFIG_QED_RDMA)
178void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
179void qed_rdma_dpm_conf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
180int qed_rdma_info_alloc(struct qed_hwfn *p_hwfn);
181void qed_rdma_info_free(struct qed_hwfn *p_hwfn);
182#else
183static inline void qed_rdma_dpm_conf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) {}
184static inline void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn,
185 struct qed_ptt *p_ptt) {}
186static inline int qed_rdma_info_alloc(struct qed_hwfn *p_hwfn) {return -EINVAL;}
187static inline void qed_rdma_info_free(struct qed_hwfn *p_hwfn) {}
188#endif
189
190int
191qed_rdma_bmap_alloc(struct qed_hwfn *p_hwfn,
192 struct qed_bmap *bmap, u32 max_count, char *name);
193
194void
195qed_rdma_bmap_free(struct qed_hwfn *p_hwfn, struct qed_bmap *bmap, bool check);
196
197int
198qed_rdma_bmap_alloc_id(struct qed_hwfn *p_hwfn,
199 struct qed_bmap *bmap, u32 *id_num);
200
201void
202qed_bmap_set_id(struct qed_hwfn *p_hwfn, struct qed_bmap *bmap, u32 id_num);
203
204void
205qed_bmap_release_id(struct qed_hwfn *p_hwfn, struct qed_bmap *bmap, u32 id_num);
206
207int
208qed_bmap_test_id(struct qed_hwfn *p_hwfn, struct qed_bmap *bmap, u32 id_num);
209
210void qed_rdma_set_fw_mac(u16 *p_fw_mac, u8 *p_qed_mac);
211
212bool qed_rdma_allocated_qps(struct qed_hwfn *p_hwfn);
213#endif
214