1
2
3
4
5
6#ifndef HFI1_TID_RDMA_H
7#define HFI1_TID_RDMA_H
8
9#include <linux/circ_buf.h>
10#include "common.h"
11
12
13#define CIRC_ADD(val, add, size) (((val) + (add)) & ((size) - 1))
14#define CIRC_NEXT(val, size) CIRC_ADD(val, 1, size)
15#define CIRC_PREV(val, size) CIRC_ADD(val, -1, size)
16
17#define TID_RDMA_MIN_SEGMENT_SIZE BIT(18)
18#define TID_RDMA_MAX_SEGMENT_SIZE BIT(18)
19#define TID_RDMA_MAX_PAGES (BIT(18) >> PAGE_SHIFT)
20
21
22
23
24
25
26
27
28
29
30#define HFI1_S_TID_BUSY_SET BIT(0)
31
32#define HFI1_R_TID_RSC_TIMER BIT(2)
33
34
35#define HFI1_S_TID_WAIT_INTERLCK BIT(5)
36#define HFI1_R_TID_WAIT_INTERLCK BIT(6)
37
38
39#define HFI1_S_TID_RETRY_TIMER BIT(17)
40
41#define HFI1_R_TID_SW_PSN BIT(19)
42
43
44
45
46
47
48
49
50
51
52
53
54
55#define HFI1_TID_RDMA_WRITE_CNT 8
56
57struct tid_rdma_params {
58 struct rcu_head rcu_head;
59 u32 qp;
60 u32 max_len;
61 u16 jkey;
62 u8 max_read;
63 u8 max_write;
64 u8 timeout;
65 u8 urg;
66 u8 version;
67};
68
69struct tid_rdma_qp_params {
70 struct work_struct trigger_work;
71 struct tid_rdma_params local;
72 struct tid_rdma_params __rcu *remote;
73};
74
75
76struct tid_flow_state {
77 u32 generation;
78 u32 psn;
79 u32 r_next_psn;
80 u8 index;
81 u8 last_index;
82 u8 flags;
83};
84
85enum tid_rdma_req_state {
86 TID_REQUEST_INACTIVE = 0,
87 TID_REQUEST_INIT,
88 TID_REQUEST_INIT_RESEND,
89 TID_REQUEST_ACTIVE,
90 TID_REQUEST_RESEND,
91 TID_REQUEST_RESEND_ACTIVE,
92 TID_REQUEST_QUEUED,
93 TID_REQUEST_SYNC,
94 TID_REQUEST_RNR_NAK,
95 TID_REQUEST_COMPLETE,
96};
97
98struct tid_rdma_request {
99 struct rvt_qp *qp;
100 struct hfi1_ctxtdata *rcd;
101 union {
102 struct rvt_swqe *swqe;
103 struct rvt_ack_entry *ack;
104 } e;
105
106 struct tid_rdma_flow *flows;
107 struct rvt_sge_state ss;
108 u16 n_flows;
109 u16 setup_head;
110 u16 clear_tail;
111 u16 flow_idx;
112 u16 acked_tail;
113
114 u32 seg_len;
115 u32 total_len;
116 u32 r_ack_psn;
117 u32 r_flow_psn;
118 u32 r_last_acked;
119 u32 s_next_psn;
120
121 u32 total_segs;
122 u32 cur_seg;
123 u32 comp_seg;
124 u32 ack_seg;
125 u32 alloc_seg;
126 u32 isge;
127 u32 ack_pending;
128
129 enum tid_rdma_req_state state;
130};
131
132
133
134
135
136
137
138struct flow_state {
139 u32 flags;
140 u32 resp_ib_psn;
141 u32 generation;
142 u32 spsn;
143 u32 lpsn;
144 u32 r_next_psn;
145
146
147 u32 ib_spsn;
148 u32 ib_lpsn;
149};
150
151struct tid_rdma_pageset {
152 dma_addr_t addr : 48;
153 u8 idx: 8;
154 u8 count : 7;
155 u8 mapped: 1;
156};
157
158
159
160
161
162
163
164
165struct kern_tid_node {
166 struct tid_group *grp;
167 u8 map;
168 u8 cnt;
169};
170
171
172struct tid_rdma_flow {
173
174
175
176
177
178
179 struct flow_state flow_state;
180 struct tid_rdma_request *req;
181 u32 tid_qpn;
182 u32 tid_offset;
183 u32 length;
184 u32 sent;
185 u8 tnode_cnt;
186 u8 tidcnt;
187 u8 tid_idx;
188 u8 idx;
189 u8 npagesets;
190 u8 npkts;
191 u8 pkt;
192 u8 resync_npkts;
193 struct kern_tid_node tnode[TID_RDMA_MAX_PAGES];
194 struct tid_rdma_pageset pagesets[TID_RDMA_MAX_PAGES];
195 u32 tid_entry[TID_RDMA_MAX_PAGES];
196};
197
198enum tid_rnr_nak_state {
199 TID_RNR_NAK_INIT = 0,
200 TID_RNR_NAK_SEND,
201 TID_RNR_NAK_SENT,
202};
203
204bool tid_rdma_conn_req(struct rvt_qp *qp, u64 *data);
205bool tid_rdma_conn_reply(struct rvt_qp *qp, u64 data);
206bool tid_rdma_conn_resp(struct rvt_qp *qp, u64 *data);
207void tid_rdma_conn_error(struct rvt_qp *qp);
208void tid_rdma_opfn_init(struct rvt_qp *qp, struct tid_rdma_params *p);
209
210int hfi1_kern_exp_rcv_init(struct hfi1_ctxtdata *rcd, int reinit);
211int hfi1_kern_exp_rcv_setup(struct tid_rdma_request *req,
212 struct rvt_sge_state *ss, bool *last);
213int hfi1_kern_exp_rcv_clear(struct tid_rdma_request *req);
214void hfi1_kern_exp_rcv_clear_all(struct tid_rdma_request *req);
215void __trdma_clean_swqe(struct rvt_qp *qp, struct rvt_swqe *wqe);
216
217
218
219
220
221
222static inline void trdma_clean_swqe(struct rvt_qp *qp, struct rvt_swqe *wqe)
223{
224 if (!wqe->priv)
225 return;
226 __trdma_clean_swqe(qp, wqe);
227}
228
229void hfi1_kern_read_tid_flow_free(struct rvt_qp *qp);
230
231int hfi1_qp_priv_init(struct rvt_dev_info *rdi, struct rvt_qp *qp,
232 struct ib_qp_init_attr *init_attr);
233void hfi1_qp_priv_tid_free(struct rvt_dev_info *rdi, struct rvt_qp *qp);
234
235void hfi1_tid_rdma_flush_wait(struct rvt_qp *qp);
236
237int hfi1_kern_setup_hw_flow(struct hfi1_ctxtdata *rcd, struct rvt_qp *qp);
238void hfi1_kern_clear_hw_flow(struct hfi1_ctxtdata *rcd, struct rvt_qp *qp);
239void hfi1_kern_init_ctxt_generations(struct hfi1_ctxtdata *rcd);
240
241struct cntr_entry;
242u64 hfi1_access_sw_tid_wait(const struct cntr_entry *entry,
243 void *context, int vl, int mode, u64 data);
244
245u32 hfi1_build_tid_rdma_read_packet(struct rvt_swqe *wqe,
246 struct ib_other_headers *ohdr,
247 u32 *bth1, u32 *bth2, u32 *len);
248u32 hfi1_build_tid_rdma_read_req(struct rvt_qp *qp, struct rvt_swqe *wqe,
249 struct ib_other_headers *ohdr, u32 *bth1,
250 u32 *bth2, u32 *len);
251void hfi1_rc_rcv_tid_rdma_read_req(struct hfi1_packet *packet);
252u32 hfi1_build_tid_rdma_read_resp(struct rvt_qp *qp, struct rvt_ack_entry *e,
253 struct ib_other_headers *ohdr, u32 *bth0,
254 u32 *bth1, u32 *bth2, u32 *len, bool *last);
255void hfi1_rc_rcv_tid_rdma_read_resp(struct hfi1_packet *packet);
256bool hfi1_handle_kdeth_eflags(struct hfi1_ctxtdata *rcd,
257 struct hfi1_pportdata *ppd,
258 struct hfi1_packet *packet);
259void hfi1_tid_rdma_restart_req(struct rvt_qp *qp, struct rvt_swqe *wqe,
260 u32 *bth2);
261void hfi1_qp_kern_exp_rcv_clear_all(struct rvt_qp *qp);
262bool hfi1_tid_rdma_wqe_interlock(struct rvt_qp *qp, struct rvt_swqe *wqe);
263
264void setup_tid_rdma_wqe(struct rvt_qp *qp, struct rvt_swqe *wqe);
265static inline void hfi1_setup_tid_rdma_wqe(struct rvt_qp *qp,
266 struct rvt_swqe *wqe)
267{
268 if (wqe->priv &&
269 (wqe->wr.opcode == IB_WR_RDMA_READ ||
270 wqe->wr.opcode == IB_WR_RDMA_WRITE) &&
271 wqe->length >= TID_RDMA_MIN_SEGMENT_SIZE)
272 setup_tid_rdma_wqe(qp, wqe);
273}
274
275u32 hfi1_build_tid_rdma_write_req(struct rvt_qp *qp, struct rvt_swqe *wqe,
276 struct ib_other_headers *ohdr,
277 u32 *bth1, u32 *bth2, u32 *len);
278
279void hfi1_compute_tid_rdma_flow_wt(void);
280
281void hfi1_rc_rcv_tid_rdma_write_req(struct hfi1_packet *packet);
282
283u32 hfi1_build_tid_rdma_write_resp(struct rvt_qp *qp, struct rvt_ack_entry *e,
284 struct ib_other_headers *ohdr, u32 *bth1,
285 u32 bth2, u32 *len,
286 struct rvt_sge_state **ss);
287
288void hfi1_del_tid_reap_timer(struct rvt_qp *qp);
289
290void hfi1_rc_rcv_tid_rdma_write_resp(struct hfi1_packet *packet);
291
292bool hfi1_build_tid_rdma_packet(struct rvt_swqe *wqe,
293 struct ib_other_headers *ohdr,
294 u32 *bth1, u32 *bth2, u32 *len);
295
296void hfi1_rc_rcv_tid_rdma_write_data(struct hfi1_packet *packet);
297
298u32 hfi1_build_tid_rdma_write_ack(struct rvt_qp *qp, struct rvt_ack_entry *e,
299 struct ib_other_headers *ohdr, u16 iflow,
300 u32 *bth1, u32 *bth2);
301
302void hfi1_rc_rcv_tid_rdma_ack(struct hfi1_packet *packet);
303
304void hfi1_add_tid_retry_timer(struct rvt_qp *qp);
305void hfi1_del_tid_retry_timer(struct rvt_qp *qp);
306
307u32 hfi1_build_tid_rdma_resync(struct rvt_qp *qp, struct rvt_swqe *wqe,
308 struct ib_other_headers *ohdr, u32 *bth1,
309 u32 *bth2, u16 fidx);
310
311void hfi1_rc_rcv_tid_rdma_resync(struct hfi1_packet *packet);
312
313struct hfi1_pkt_state;
314int hfi1_make_tid_rdma_pkt(struct rvt_qp *qp, struct hfi1_pkt_state *ps);
315
316void _hfi1_do_tid_send(struct work_struct *work);
317
318bool hfi1_schedule_tid_send(struct rvt_qp *qp);
319
320bool hfi1_tid_rdma_ack_interlock(struct rvt_qp *qp, struct rvt_ack_entry *e);
321
322#endif
323