1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#include <linux/acpi.h>
34#include <linux/etherdevice.h>
35#include <linux/interrupt.h>
36#include <linux/kernel.h>
37#include <linux/types.h>
38#include <net/addrconf.h>
39#include <rdma/ib_addr.h>
40#include <rdma/ib_cache.h>
41#include <rdma/ib_umem.h>
42#include <rdma/uverbs_ioctl.h>
43
44#include "hnae3.h"
45#include "hns_roce_common.h"
46#include "hns_roce_device.h"
47#include "hns_roce_cmd.h"
48#include "hns_roce_hem.h"
49#include "hns_roce_hw_v2.h"
50
51static void set_data_seg_v2(struct hns_roce_v2_wqe_data_seg *dseg,
52 struct ib_sge *sg)
53{
54 dseg->lkey = cpu_to_le32(sg->lkey);
55 dseg->addr = cpu_to_le64(sg->addr);
56 dseg->len = cpu_to_le32(sg->length);
57}
58
59static void set_frmr_seg(struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
60 struct hns_roce_wqe_frmr_seg *fseg,
61 const struct ib_reg_wr *wr)
62{
63 struct hns_roce_mr *mr = to_hr_mr(wr->mr);
64
65
66 roce_set_bit(rc_sq_wqe->byte_4,
67 V2_RC_FRMR_WQE_BYTE_4_BIND_EN_S,
68 wr->access & IB_ACCESS_MW_BIND ? 1 : 0);
69 roce_set_bit(rc_sq_wqe->byte_4,
70 V2_RC_FRMR_WQE_BYTE_4_ATOMIC_S,
71 wr->access & IB_ACCESS_REMOTE_ATOMIC ? 1 : 0);
72 roce_set_bit(rc_sq_wqe->byte_4,
73 V2_RC_FRMR_WQE_BYTE_4_RR_S,
74 wr->access & IB_ACCESS_REMOTE_READ ? 1 : 0);
75 roce_set_bit(rc_sq_wqe->byte_4,
76 V2_RC_FRMR_WQE_BYTE_4_RW_S,
77 wr->access & IB_ACCESS_REMOTE_WRITE ? 1 : 0);
78 roce_set_bit(rc_sq_wqe->byte_4,
79 V2_RC_FRMR_WQE_BYTE_4_LW_S,
80 wr->access & IB_ACCESS_LOCAL_WRITE ? 1 : 0);
81
82
83 rc_sq_wqe->msg_len = cpu_to_le32(mr->pbl_ba & 0xffffffff);
84 rc_sq_wqe->inv_key = cpu_to_le32(mr->pbl_ba >> 32);
85
86 rc_sq_wqe->byte_16 = cpu_to_le32(wr->mr->length & 0xffffffff);
87 rc_sq_wqe->byte_20 = cpu_to_le32(wr->mr->length >> 32);
88 rc_sq_wqe->rkey = cpu_to_le32(wr->key);
89 rc_sq_wqe->va = cpu_to_le64(wr->mr->iova);
90
91 fseg->pbl_size = cpu_to_le32(mr->pbl_size);
92 roce_set_field(fseg->mode_buf_pg_sz,
93 V2_RC_FRMR_WQE_BYTE_40_PBL_BUF_PG_SZ_M,
94 V2_RC_FRMR_WQE_BYTE_40_PBL_BUF_PG_SZ_S,
95 mr->pbl_buf_pg_sz + PG_SHIFT_OFFSET);
96 roce_set_bit(fseg->mode_buf_pg_sz,
97 V2_RC_FRMR_WQE_BYTE_40_BLK_MODE_S, 0);
98}
99
100static void set_atomic_seg(struct hns_roce_wqe_atomic_seg *aseg,
101 const struct ib_atomic_wr *wr)
102{
103 if (wr->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
104 aseg->fetchadd_swap_data = cpu_to_le64(wr->swap);
105 aseg->cmp_data = cpu_to_le64(wr->compare_add);
106 } else {
107 aseg->fetchadd_swap_data = cpu_to_le64(wr->compare_add);
108 aseg->cmp_data = 0;
109 }
110}
111
112static void set_extend_sge(struct hns_roce_qp *qp, const struct ib_send_wr *wr,
113 unsigned int *sge_ind)
114{
115 struct hns_roce_v2_wqe_data_seg *dseg;
116 struct ib_sge *sg;
117 int num_in_wqe = 0;
118 int extend_sge_num;
119 int fi_sge_num;
120 int se_sge_num;
121 int shift;
122 int i;
123
124 if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC)
125 num_in_wqe = HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE;
126 extend_sge_num = wr->num_sge - num_in_wqe;
127 sg = wr->sg_list + num_in_wqe;
128 shift = qp->hr_buf.page_shift;
129
130
131
132
133
134
135 dseg = get_send_extend_sge(qp, (*sge_ind) & (qp->sge.sge_cnt - 1));
136 fi_sge_num = (round_up((uintptr_t)dseg, 1 << shift) -
137 (uintptr_t)dseg) /
138 sizeof(struct hns_roce_v2_wqe_data_seg);
139 if (extend_sge_num > fi_sge_num) {
140 se_sge_num = extend_sge_num - fi_sge_num;
141 for (i = 0; i < fi_sge_num; i++) {
142 set_data_seg_v2(dseg++, sg + i);
143 (*sge_ind)++;
144 }
145 dseg = get_send_extend_sge(qp,
146 (*sge_ind) & (qp->sge.sge_cnt - 1));
147 for (i = 0; i < se_sge_num; i++) {
148 set_data_seg_v2(dseg++, sg + fi_sge_num + i);
149 (*sge_ind)++;
150 }
151 } else {
152 for (i = 0; i < extend_sge_num; i++) {
153 set_data_seg_v2(dseg++, sg + i);
154 (*sge_ind)++;
155 }
156 }
157}
158
159static int set_rwqe_data_seg(struct ib_qp *ibqp, const struct ib_send_wr *wr,
160 struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
161 void *wqe, unsigned int *sge_ind,
162 const struct ib_send_wr **bad_wr)
163{
164 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
165 struct hns_roce_v2_wqe_data_seg *dseg = wqe;
166 struct hns_roce_qp *qp = to_hr_qp(ibqp);
167 int i;
168
169 if (wr->send_flags & IB_SEND_INLINE && wr->num_sge) {
170 if (le32_to_cpu(rc_sq_wqe->msg_len) >
171 hr_dev->caps.max_sq_inline) {
172 *bad_wr = wr;
173 dev_err(hr_dev->dev, "inline len(1-%d)=%d, illegal",
174 rc_sq_wqe->msg_len, hr_dev->caps.max_sq_inline);
175 return -EINVAL;
176 }
177
178 if (wr->opcode == IB_WR_RDMA_READ) {
179 *bad_wr = wr;
180 dev_err(hr_dev->dev, "Not support inline data!\n");
181 return -EINVAL;
182 }
183
184 for (i = 0; i < wr->num_sge; i++) {
185 memcpy(wqe, ((void *)wr->sg_list[i].addr),
186 wr->sg_list[i].length);
187 wqe += wr->sg_list[i].length;
188 }
189
190 roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_INLINE_S,
191 1);
192 } else {
193 if (wr->num_sge <= HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE) {
194 for (i = 0; i < wr->num_sge; i++) {
195 if (likely(wr->sg_list[i].length)) {
196 set_data_seg_v2(dseg, wr->sg_list + i);
197 dseg++;
198 }
199 }
200 } else {
201 roce_set_field(rc_sq_wqe->byte_20,
202 V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M,
203 V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S,
204 (*sge_ind) & (qp->sge.sge_cnt - 1));
205
206 for (i = 0; i < HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE; i++) {
207 if (likely(wr->sg_list[i].length)) {
208 set_data_seg_v2(dseg, wr->sg_list + i);
209 dseg++;
210 }
211 }
212
213 set_extend_sge(qp, wr, sge_ind);
214 }
215
216 roce_set_field(rc_sq_wqe->byte_16,
217 V2_RC_SEND_WQE_BYTE_16_SGE_NUM_M,
218 V2_RC_SEND_WQE_BYTE_16_SGE_NUM_S, wr->num_sge);
219 }
220
221 return 0;
222}
223
224static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
225 const struct ib_qp_attr *attr,
226 int attr_mask, enum ib_qp_state cur_state,
227 enum ib_qp_state new_state);
228
229static int hns_roce_v2_post_send(struct ib_qp *ibqp,
230 const struct ib_send_wr *wr,
231 const struct ib_send_wr **bad_wr)
232{
233 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
234 struct hns_roce_ah *ah = to_hr_ah(ud_wr(wr)->ah);
235 struct hns_roce_v2_ud_send_wqe *ud_sq_wqe;
236 struct hns_roce_v2_rc_send_wqe *rc_sq_wqe;
237 struct hns_roce_qp *qp = to_hr_qp(ibqp);
238 struct hns_roce_wqe_frmr_seg *fseg;
239 struct device *dev = hr_dev->dev;
240 struct hns_roce_v2_db sq_db;
241 struct ib_qp_attr attr;
242 unsigned int sge_ind = 0;
243 unsigned int owner_bit;
244 unsigned long flags;
245 unsigned int ind;
246 void *wqe = NULL;
247 bool loopback;
248 int attr_mask;
249 u32 tmp_len;
250 int ret = 0;
251 u32 hr_op;
252 u8 *smac;
253 int nreq;
254 int i;
255
256 if (unlikely(ibqp->qp_type != IB_QPT_RC &&
257 ibqp->qp_type != IB_QPT_GSI &&
258 ibqp->qp_type != IB_QPT_UD)) {
259 dev_err(dev, "Not supported QP(0x%x)type!\n", ibqp->qp_type);
260 *bad_wr = wr;
261 return -EOPNOTSUPP;
262 }
263
264 if (unlikely(qp->state == IB_QPS_RESET || qp->state == IB_QPS_INIT ||
265 qp->state == IB_QPS_RTR)) {
266 dev_err(dev, "Post WQE fail, QP state %d err!\n", qp->state);
267 *bad_wr = wr;
268 return -EINVAL;
269 }
270
271 spin_lock_irqsave(&qp->sq.lock, flags);
272 ind = qp->sq_next_wqe;
273 sge_ind = qp->next_sge;
274
275 for (nreq = 0; wr; ++nreq, wr = wr->next) {
276 if (hns_roce_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
277 ret = -ENOMEM;
278 *bad_wr = wr;
279 goto out;
280 }
281
282 if (unlikely(wr->num_sge > qp->sq.max_gs)) {
283 dev_err(dev, "num_sge=%d > qp->sq.max_gs=%d\n",
284 wr->num_sge, qp->sq.max_gs);
285 ret = -EINVAL;
286 *bad_wr = wr;
287 goto out;
288 }
289
290 wqe = get_send_wqe(qp, ind & (qp->sq.wqe_cnt - 1));
291 qp->sq.wrid[(qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1)] =
292 wr->wr_id;
293
294 owner_bit =
295 ~(((qp->sq.head + nreq) >> ilog2(qp->sq.wqe_cnt)) & 0x1);
296 tmp_len = 0;
297
298
299 if (ibqp->qp_type == IB_QPT_GSI) {
300 ud_sq_wqe = wqe;
301 memset(ud_sq_wqe, 0, sizeof(*ud_sq_wqe));
302
303 roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_0_M,
304 V2_UD_SEND_WQE_DMAC_0_S, ah->av.mac[0]);
305 roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_1_M,
306 V2_UD_SEND_WQE_DMAC_1_S, ah->av.mac[1]);
307 roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_2_M,
308 V2_UD_SEND_WQE_DMAC_2_S, ah->av.mac[2]);
309 roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_3_M,
310 V2_UD_SEND_WQE_DMAC_3_S, ah->av.mac[3]);
311 roce_set_field(ud_sq_wqe->byte_48,
312 V2_UD_SEND_WQE_BYTE_48_DMAC_4_M,
313 V2_UD_SEND_WQE_BYTE_48_DMAC_4_S,
314 ah->av.mac[4]);
315 roce_set_field(ud_sq_wqe->byte_48,
316 V2_UD_SEND_WQE_BYTE_48_DMAC_5_M,
317 V2_UD_SEND_WQE_BYTE_48_DMAC_5_S,
318 ah->av.mac[5]);
319
320
321 smac = (u8 *)hr_dev->dev_addr[qp->port];
322 loopback = ether_addr_equal_unaligned(ah->av.mac,
323 smac) ? 1 : 0;
324
325 roce_set_bit(ud_sq_wqe->byte_40,
326 V2_UD_SEND_WQE_BYTE_40_LBI_S, loopback);
327
328 roce_set_field(ud_sq_wqe->byte_4,
329 V2_UD_SEND_WQE_BYTE_4_OPCODE_M,
330 V2_UD_SEND_WQE_BYTE_4_OPCODE_S,
331 HNS_ROCE_V2_WQE_OP_SEND);
332
333 for (i = 0; i < wr->num_sge; i++)
334 tmp_len += wr->sg_list[i].length;
335
336 ud_sq_wqe->msg_len =
337 cpu_to_le32(le32_to_cpu(ud_sq_wqe->msg_len) + tmp_len);
338
339 switch (wr->opcode) {
340 case IB_WR_SEND_WITH_IMM:
341 case IB_WR_RDMA_WRITE_WITH_IMM:
342 ud_sq_wqe->immtdata =
343 cpu_to_le32(be32_to_cpu(wr->ex.imm_data));
344 break;
345 default:
346 ud_sq_wqe->immtdata = 0;
347 break;
348 }
349
350
351 roce_set_bit(ud_sq_wqe->byte_4,
352 V2_UD_SEND_WQE_BYTE_4_CQE_S,
353 (wr->send_flags & IB_SEND_SIGNALED) ? 1 : 0);
354
355
356 roce_set_bit(ud_sq_wqe->byte_4,
357 V2_UD_SEND_WQE_BYTE_4_SE_S,
358 (wr->send_flags & IB_SEND_SOLICITED) ? 1 : 0);
359
360 roce_set_bit(ud_sq_wqe->byte_4,
361 V2_UD_SEND_WQE_BYTE_4_OWNER_S, owner_bit);
362
363 roce_set_field(ud_sq_wqe->byte_16,
364 V2_UD_SEND_WQE_BYTE_16_PD_M,
365 V2_UD_SEND_WQE_BYTE_16_PD_S,
366 to_hr_pd(ibqp->pd)->pdn);
367
368 roce_set_field(ud_sq_wqe->byte_16,
369 V2_UD_SEND_WQE_BYTE_16_SGE_NUM_M,
370 V2_UD_SEND_WQE_BYTE_16_SGE_NUM_S,
371 wr->num_sge);
372
373 roce_set_field(ud_sq_wqe->byte_20,
374 V2_UD_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M,
375 V2_UD_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S,
376 sge_ind & (qp->sge.sge_cnt - 1));
377
378 roce_set_field(ud_sq_wqe->byte_24,
379 V2_UD_SEND_WQE_BYTE_24_UDPSPN_M,
380 V2_UD_SEND_WQE_BYTE_24_UDPSPN_S, 0);
381 ud_sq_wqe->qkey =
382 cpu_to_le32(ud_wr(wr)->remote_qkey & 0x80000000 ?
383 qp->qkey : ud_wr(wr)->remote_qkey);
384 roce_set_field(ud_sq_wqe->byte_32,
385 V2_UD_SEND_WQE_BYTE_32_DQPN_M,
386 V2_UD_SEND_WQE_BYTE_32_DQPN_S,
387 ud_wr(wr)->remote_qpn);
388
389 roce_set_field(ud_sq_wqe->byte_36,
390 V2_UD_SEND_WQE_BYTE_36_VLAN_M,
391 V2_UD_SEND_WQE_BYTE_36_VLAN_S,
392 le16_to_cpu(ah->av.vlan));
393 roce_set_field(ud_sq_wqe->byte_36,
394 V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_M,
395 V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_S,
396 ah->av.hop_limit);
397 roce_set_field(ud_sq_wqe->byte_36,
398 V2_UD_SEND_WQE_BYTE_36_TCLASS_M,
399 V2_UD_SEND_WQE_BYTE_36_TCLASS_S,
400 ah->av.sl_tclass_flowlabel >>
401 HNS_ROCE_TCLASS_SHIFT);
402 roce_set_field(ud_sq_wqe->byte_40,
403 V2_UD_SEND_WQE_BYTE_40_FLOW_LABEL_M,
404 V2_UD_SEND_WQE_BYTE_40_FLOW_LABEL_S,
405 ah->av.sl_tclass_flowlabel &
406 HNS_ROCE_FLOW_LABEL_MASK);
407 roce_set_field(ud_sq_wqe->byte_40,
408 V2_UD_SEND_WQE_BYTE_40_SL_M,
409 V2_UD_SEND_WQE_BYTE_40_SL_S,
410 le32_to_cpu(ah->av.sl_tclass_flowlabel) >>
411 HNS_ROCE_SL_SHIFT);
412 roce_set_field(ud_sq_wqe->byte_40,
413 V2_UD_SEND_WQE_BYTE_40_PORTN_M,
414 V2_UD_SEND_WQE_BYTE_40_PORTN_S,
415 qp->port);
416
417 roce_set_bit(ud_sq_wqe->byte_40,
418 V2_UD_SEND_WQE_BYTE_40_UD_VLAN_EN_S,
419 ah->av.vlan_en ? 1 : 0);
420 roce_set_field(ud_sq_wqe->byte_48,
421 V2_UD_SEND_WQE_BYTE_48_SGID_INDX_M,
422 V2_UD_SEND_WQE_BYTE_48_SGID_INDX_S,
423 hns_get_gid_index(hr_dev, qp->phy_port,
424 ah->av.gid_index));
425
426 memcpy(&ud_sq_wqe->dgid[0], &ah->av.dgid[0],
427 GID_LEN_V2);
428
429 set_extend_sge(qp, wr, &sge_ind);
430 ind++;
431 } else if (ibqp->qp_type == IB_QPT_RC) {
432 rc_sq_wqe = wqe;
433 memset(rc_sq_wqe, 0, sizeof(*rc_sq_wqe));
434 for (i = 0; i < wr->num_sge; i++)
435 tmp_len += wr->sg_list[i].length;
436
437 rc_sq_wqe->msg_len =
438 cpu_to_le32(le32_to_cpu(rc_sq_wqe->msg_len) + tmp_len);
439
440 switch (wr->opcode) {
441 case IB_WR_SEND_WITH_IMM:
442 case IB_WR_RDMA_WRITE_WITH_IMM:
443 rc_sq_wqe->immtdata =
444 cpu_to_le32(be32_to_cpu(wr->ex.imm_data));
445 break;
446 case IB_WR_SEND_WITH_INV:
447 rc_sq_wqe->inv_key =
448 cpu_to_le32(wr->ex.invalidate_rkey);
449 break;
450 default:
451 rc_sq_wqe->immtdata = 0;
452 break;
453 }
454
455 roce_set_bit(rc_sq_wqe->byte_4,
456 V2_RC_SEND_WQE_BYTE_4_FENCE_S,
457 (wr->send_flags & IB_SEND_FENCE) ? 1 : 0);
458
459 roce_set_bit(rc_sq_wqe->byte_4,
460 V2_RC_SEND_WQE_BYTE_4_SE_S,
461 (wr->send_flags & IB_SEND_SOLICITED) ? 1 : 0);
462
463 roce_set_bit(rc_sq_wqe->byte_4,
464 V2_RC_SEND_WQE_BYTE_4_CQE_S,
465 (wr->send_flags & IB_SEND_SIGNALED) ? 1 : 0);
466
467 roce_set_bit(rc_sq_wqe->byte_4,
468 V2_RC_SEND_WQE_BYTE_4_OWNER_S, owner_bit);
469
470 wqe += sizeof(struct hns_roce_v2_rc_send_wqe);
471 switch (wr->opcode) {
472 case IB_WR_RDMA_READ:
473 hr_op = HNS_ROCE_V2_WQE_OP_RDMA_READ;
474 rc_sq_wqe->rkey =
475 cpu_to_le32(rdma_wr(wr)->rkey);
476 rc_sq_wqe->va =
477 cpu_to_le64(rdma_wr(wr)->remote_addr);
478 break;
479 case IB_WR_RDMA_WRITE:
480 hr_op = HNS_ROCE_V2_WQE_OP_RDMA_WRITE;
481 rc_sq_wqe->rkey =
482 cpu_to_le32(rdma_wr(wr)->rkey);
483 rc_sq_wqe->va =
484 cpu_to_le64(rdma_wr(wr)->remote_addr);
485 break;
486 case IB_WR_RDMA_WRITE_WITH_IMM:
487 hr_op = HNS_ROCE_V2_WQE_OP_RDMA_WRITE_WITH_IMM;
488 rc_sq_wqe->rkey =
489 cpu_to_le32(rdma_wr(wr)->rkey);
490 rc_sq_wqe->va =
491 cpu_to_le64(rdma_wr(wr)->remote_addr);
492 break;
493 case IB_WR_SEND:
494 hr_op = HNS_ROCE_V2_WQE_OP_SEND;
495 break;
496 case IB_WR_SEND_WITH_INV:
497 hr_op = HNS_ROCE_V2_WQE_OP_SEND_WITH_INV;
498 break;
499 case IB_WR_SEND_WITH_IMM:
500 hr_op = HNS_ROCE_V2_WQE_OP_SEND_WITH_IMM;
501 break;
502 case IB_WR_LOCAL_INV:
503 hr_op = HNS_ROCE_V2_WQE_OP_LOCAL_INV;
504 roce_set_bit(rc_sq_wqe->byte_4,
505 V2_RC_SEND_WQE_BYTE_4_SO_S, 1);
506 rc_sq_wqe->inv_key =
507 cpu_to_le32(wr->ex.invalidate_rkey);
508 break;
509 case IB_WR_REG_MR:
510 hr_op = HNS_ROCE_V2_WQE_OP_FAST_REG_PMR;
511 fseg = wqe;
512 set_frmr_seg(rc_sq_wqe, fseg, reg_wr(wr));
513 break;
514 case IB_WR_ATOMIC_CMP_AND_SWP:
515 hr_op = HNS_ROCE_V2_WQE_OP_ATOM_CMP_AND_SWAP;
516 rc_sq_wqe->rkey =
517 cpu_to_le32(atomic_wr(wr)->rkey);
518 rc_sq_wqe->va =
519 cpu_to_le64(atomic_wr(wr)->remote_addr);
520 break;
521 case IB_WR_ATOMIC_FETCH_AND_ADD:
522 hr_op = HNS_ROCE_V2_WQE_OP_ATOM_FETCH_AND_ADD;
523 rc_sq_wqe->rkey =
524 cpu_to_le32(atomic_wr(wr)->rkey);
525 rc_sq_wqe->va =
526 cpu_to_le64(atomic_wr(wr)->remote_addr);
527 break;
528 case IB_WR_MASKED_ATOMIC_CMP_AND_SWP:
529 hr_op =
530 HNS_ROCE_V2_WQE_OP_ATOM_MSK_CMP_AND_SWAP;
531 break;
532 case IB_WR_MASKED_ATOMIC_FETCH_AND_ADD:
533 hr_op =
534 HNS_ROCE_V2_WQE_OP_ATOM_MSK_FETCH_AND_ADD;
535 break;
536 default:
537 hr_op = HNS_ROCE_V2_WQE_OP_MASK;
538 break;
539 }
540
541 roce_set_field(rc_sq_wqe->byte_4,
542 V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
543 V2_RC_SEND_WQE_BYTE_4_OPCODE_S, hr_op);
544
545 if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
546 wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
547 struct hns_roce_v2_wqe_data_seg *dseg;
548
549 dseg = wqe;
550 set_data_seg_v2(dseg, wr->sg_list);
551 wqe += sizeof(struct hns_roce_v2_wqe_data_seg);
552 set_atomic_seg(wqe, atomic_wr(wr));
553 roce_set_field(rc_sq_wqe->byte_16,
554 V2_RC_SEND_WQE_BYTE_16_SGE_NUM_M,
555 V2_RC_SEND_WQE_BYTE_16_SGE_NUM_S,
556 wr->num_sge);
557 } else if (wr->opcode != IB_WR_REG_MR) {
558 ret = set_rwqe_data_seg(ibqp, wr, rc_sq_wqe,
559 wqe, &sge_ind, bad_wr);
560 if (ret)
561 goto out;
562 }
563
564 ind++;
565 } else {
566 dev_err(dev, "Illegal qp_type(0x%x)\n", ibqp->qp_type);
567 spin_unlock_irqrestore(&qp->sq.lock, flags);
568 *bad_wr = wr;
569 return -EOPNOTSUPP;
570 }
571 }
572
573out:
574 if (likely(nreq)) {
575 qp->sq.head += nreq;
576
577 wmb();
578
579 sq_db.byte_4 = 0;
580 sq_db.parameter = 0;
581
582 roce_set_field(sq_db.byte_4, V2_DB_BYTE_4_TAG_M,
583 V2_DB_BYTE_4_TAG_S, qp->doorbell_qpn);
584 roce_set_field(sq_db.byte_4, V2_DB_BYTE_4_CMD_M,
585 V2_DB_BYTE_4_CMD_S, HNS_ROCE_V2_SQ_DB);
586 roce_set_field(sq_db.parameter, V2_DB_PARAMETER_IDX_M,
587 V2_DB_PARAMETER_IDX_S,
588 qp->sq.head & ((qp->sq.wqe_cnt << 1) - 1));
589 roce_set_field(sq_db.parameter, V2_DB_PARAMETER_SL_M,
590 V2_DB_PARAMETER_SL_S, qp->sl);
591
592 hns_roce_write64(hr_dev, (__le32 *)&sq_db, qp->sq.db_reg_l);
593
594 qp->sq_next_wqe = ind;
595 qp->next_sge = sge_ind;
596
597 if (qp->state == IB_QPS_ERR) {
598 attr_mask = IB_QP_STATE;
599 attr.qp_state = IB_QPS_ERR;
600
601 ret = hns_roce_v2_modify_qp(&qp->ibqp, &attr, attr_mask,
602 qp->state, IB_QPS_ERR);
603 if (ret) {
604 spin_unlock_irqrestore(&qp->sq.lock, flags);
605 *bad_wr = wr;
606 return ret;
607 }
608 }
609 }
610
611 spin_unlock_irqrestore(&qp->sq.lock, flags);
612
613 return ret;
614}
615
616static int hns_roce_v2_post_recv(struct ib_qp *ibqp,
617 const struct ib_recv_wr *wr,
618 const struct ib_recv_wr **bad_wr)
619{
620 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
621 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
622 struct hns_roce_v2_wqe_data_seg *dseg;
623 struct hns_roce_rinl_sge *sge_list;
624 struct device *dev = hr_dev->dev;
625 struct ib_qp_attr attr;
626 unsigned long flags;
627 void *wqe = NULL;
628 int attr_mask;
629 int ret = 0;
630 int nreq;
631 int ind;
632 int i;
633
634 spin_lock_irqsave(&hr_qp->rq.lock, flags);
635 ind = hr_qp->rq.head & (hr_qp->rq.wqe_cnt - 1);
636
637 if (hr_qp->state == IB_QPS_RESET) {
638 spin_unlock_irqrestore(&hr_qp->rq.lock, flags);
639 *bad_wr = wr;
640 return -EINVAL;
641 }
642
643 for (nreq = 0; wr; ++nreq, wr = wr->next) {
644 if (hns_roce_wq_overflow(&hr_qp->rq, nreq,
645 hr_qp->ibqp.recv_cq)) {
646 ret = -ENOMEM;
647 *bad_wr = wr;
648 goto out;
649 }
650
651 if (unlikely(wr->num_sge > hr_qp->rq.max_gs)) {
652 dev_err(dev, "rq:num_sge=%d > qp->sq.max_gs=%d\n",
653 wr->num_sge, hr_qp->rq.max_gs);
654 ret = -EINVAL;
655 *bad_wr = wr;
656 goto out;
657 }
658
659 wqe = get_recv_wqe(hr_qp, ind);
660 dseg = (struct hns_roce_v2_wqe_data_seg *)wqe;
661 for (i = 0; i < wr->num_sge; i++) {
662 if (!wr->sg_list[i].length)
663 continue;
664 set_data_seg_v2(dseg, wr->sg_list + i);
665 dseg++;
666 }
667
668 if (i < hr_qp->rq.max_gs) {
669 dseg->lkey = cpu_to_le32(HNS_ROCE_INVALID_LKEY);
670 dseg->addr = 0;
671 }
672
673
674 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) {
675 sge_list = hr_qp->rq_inl_buf.wqe_list[ind].sg_list;
676 hr_qp->rq_inl_buf.wqe_list[ind].sge_cnt =
677 (u32)wr->num_sge;
678 for (i = 0; i < wr->num_sge; i++) {
679 sge_list[i].addr =
680 (void *)(u64)wr->sg_list[i].addr;
681 sge_list[i].len = wr->sg_list[i].length;
682 }
683 }
684
685 hr_qp->rq.wrid[ind] = wr->wr_id;
686
687 ind = (ind + 1) & (hr_qp->rq.wqe_cnt - 1);
688 }
689
690out:
691 if (likely(nreq)) {
692 hr_qp->rq.head += nreq;
693
694 wmb();
695
696 *hr_qp->rdb.db_record = hr_qp->rq.head & 0xffff;
697
698 if (hr_qp->state == IB_QPS_ERR) {
699 attr_mask = IB_QP_STATE;
700 attr.qp_state = IB_QPS_ERR;
701
702 ret = hns_roce_v2_modify_qp(&hr_qp->ibqp, &attr,
703 attr_mask, hr_qp->state,
704 IB_QPS_ERR);
705 if (ret) {
706 spin_unlock_irqrestore(&hr_qp->rq.lock, flags);
707 *bad_wr = wr;
708 return ret;
709 }
710 }
711 }
712 spin_unlock_irqrestore(&hr_qp->rq.lock, flags);
713
714 return ret;
715}
716
717static int hns_roce_v2_cmd_hw_reseted(struct hns_roce_dev *hr_dev,
718 unsigned long instance_stage,
719 unsigned long reset_stage)
720{
721
722
723
724
725
726
727
728
729
730 hr_dev->is_reset = true;
731 hr_dev->dis_db = true;
732
733 if (reset_stage == HNS_ROCE_STATE_RST_INIT ||
734 instance_stage == HNS_ROCE_STATE_INIT)
735 return CMD_RST_PRC_EBUSY;
736
737 return CMD_RST_PRC_SUCCESS;
738}
739
740static int hns_roce_v2_cmd_hw_resetting(struct hns_roce_dev *hr_dev,
741 unsigned long instance_stage,
742 unsigned long reset_stage)
743{
744 struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
745 struct hnae3_handle *handle = priv->handle;
746 const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
747
748
749
750
751
752
753
754
755
756
757 hr_dev->dis_db = true;
758 if (!ops->get_hw_reset_stat(handle))
759 hr_dev->is_reset = true;
760
761 if (!hr_dev->is_reset || reset_stage == HNS_ROCE_STATE_RST_INIT ||
762 instance_stage == HNS_ROCE_STATE_INIT)
763 return CMD_RST_PRC_EBUSY;
764
765 return CMD_RST_PRC_SUCCESS;
766}
767
768static int hns_roce_v2_cmd_sw_resetting(struct hns_roce_dev *hr_dev)
769{
770 struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
771 struct hnae3_handle *handle = priv->handle;
772 const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
773
774
775
776
777
778 hr_dev->dis_db = true;
779 if (ops->ae_dev_reset_cnt(handle) != hr_dev->reset_cnt)
780 hr_dev->is_reset = true;
781
782 return CMD_RST_PRC_EBUSY;
783}
784
785static int hns_roce_v2_rst_process_cmd(struct hns_roce_dev *hr_dev)
786{
787 struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
788 struct hnae3_handle *handle = priv->handle;
789 const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
790 unsigned long instance_stage;
791 unsigned long reset_stage;
792 unsigned long reset_cnt;
793 bool sw_resetting;
794 bool hw_resetting;
795
796 if (hr_dev->is_reset)
797 return CMD_RST_PRC_SUCCESS;
798
799
800
801
802
803
804
805
806 instance_stage = handle->rinfo.instance_state;
807 reset_stage = handle->rinfo.reset_state;
808 reset_cnt = ops->ae_dev_reset_cnt(handle);
809 hw_resetting = ops->get_hw_reset_stat(handle);
810 sw_resetting = ops->ae_dev_resetting(handle);
811
812 if (reset_cnt != hr_dev->reset_cnt)
813 return hns_roce_v2_cmd_hw_reseted(hr_dev, instance_stage,
814 reset_stage);
815 else if (hw_resetting)
816 return hns_roce_v2_cmd_hw_resetting(hr_dev, instance_stage,
817 reset_stage);
818 else if (sw_resetting && instance_stage == HNS_ROCE_STATE_INIT)
819 return hns_roce_v2_cmd_sw_resetting(hr_dev);
820
821 return 0;
822}
823
824static int hns_roce_cmq_space(struct hns_roce_v2_cmq_ring *ring)
825{
826 int ntu = ring->next_to_use;
827 int ntc = ring->next_to_clean;
828 int used = (ntu - ntc + ring->desc_num) % ring->desc_num;
829
830 return ring->desc_num - used - 1;
831}
832
833static int hns_roce_alloc_cmq_desc(struct hns_roce_dev *hr_dev,
834 struct hns_roce_v2_cmq_ring *ring)
835{
836 int size = ring->desc_num * sizeof(struct hns_roce_cmq_desc);
837
838 ring->desc = kzalloc(size, GFP_KERNEL);
839 if (!ring->desc)
840 return -ENOMEM;
841
842 ring->desc_dma_addr = dma_map_single(hr_dev->dev, ring->desc, size,
843 DMA_BIDIRECTIONAL);
844 if (dma_mapping_error(hr_dev->dev, ring->desc_dma_addr)) {
845 ring->desc_dma_addr = 0;
846 kfree(ring->desc);
847 ring->desc = NULL;
848 return -ENOMEM;
849 }
850
851 return 0;
852}
853
854static void hns_roce_free_cmq_desc(struct hns_roce_dev *hr_dev,
855 struct hns_roce_v2_cmq_ring *ring)
856{
857 dma_unmap_single(hr_dev->dev, ring->desc_dma_addr,
858 ring->desc_num * sizeof(struct hns_roce_cmq_desc),
859 DMA_BIDIRECTIONAL);
860
861 ring->desc_dma_addr = 0;
862 kfree(ring->desc);
863}
864
865static int hns_roce_init_cmq_ring(struct hns_roce_dev *hr_dev, bool ring_type)
866{
867 struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
868 struct hns_roce_v2_cmq_ring *ring = (ring_type == TYPE_CSQ) ?
869 &priv->cmq.csq : &priv->cmq.crq;
870
871 ring->flag = ring_type;
872 ring->next_to_clean = 0;
873 ring->next_to_use = 0;
874
875 return hns_roce_alloc_cmq_desc(hr_dev, ring);
876}
877
878static void hns_roce_cmq_init_regs(struct hns_roce_dev *hr_dev, bool ring_type)
879{
880 struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
881 struct hns_roce_v2_cmq_ring *ring = (ring_type == TYPE_CSQ) ?
882 &priv->cmq.csq : &priv->cmq.crq;
883 dma_addr_t dma = ring->desc_dma_addr;
884
885 if (ring_type == TYPE_CSQ) {
886 roce_write(hr_dev, ROCEE_TX_CMQ_BASEADDR_L_REG, (u32)dma);
887 roce_write(hr_dev, ROCEE_TX_CMQ_BASEADDR_H_REG,
888 upper_32_bits(dma));
889 roce_write(hr_dev, ROCEE_TX_CMQ_DEPTH_REG,
890 (ring->desc_num >> HNS_ROCE_CMQ_DESC_NUM_S) |
891 HNS_ROCE_CMQ_ENABLE);
892 roce_write(hr_dev, ROCEE_TX_CMQ_HEAD_REG, 0);
893 roce_write(hr_dev, ROCEE_TX_CMQ_TAIL_REG, 0);
894 } else {
895 roce_write(hr_dev, ROCEE_RX_CMQ_BASEADDR_L_REG, (u32)dma);
896 roce_write(hr_dev, ROCEE_RX_CMQ_BASEADDR_H_REG,
897 upper_32_bits(dma));
898 roce_write(hr_dev, ROCEE_RX_CMQ_DEPTH_REG,
899 (ring->desc_num >> HNS_ROCE_CMQ_DESC_NUM_S) |
900 HNS_ROCE_CMQ_ENABLE);
901 roce_write(hr_dev, ROCEE_RX_CMQ_HEAD_REG, 0);
902 roce_write(hr_dev, ROCEE_RX_CMQ_TAIL_REG, 0);
903 }
904}
905
906static int hns_roce_v2_cmq_init(struct hns_roce_dev *hr_dev)
907{
908 struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
909 int ret;
910
911
912 priv->cmq.csq.desc_num = CMD_CSQ_DESC_NUM;
913 priv->cmq.crq.desc_num = CMD_CRQ_DESC_NUM;
914
915
916 spin_lock_init(&priv->cmq.csq.lock);
917 spin_lock_init(&priv->cmq.crq.lock);
918
919
920 priv->cmq.tx_timeout = HNS_ROCE_CMQ_TX_TIMEOUT;
921
922
923 ret = hns_roce_init_cmq_ring(hr_dev, TYPE_CSQ);
924 if (ret) {
925 dev_err(hr_dev->dev, "Init CSQ error, ret = %d.\n", ret);
926 return ret;
927 }
928
929
930 ret = hns_roce_init_cmq_ring(hr_dev, TYPE_CRQ);
931 if (ret) {
932 dev_err(hr_dev->dev, "Init CRQ error, ret = %d.\n", ret);
933 goto err_crq;
934 }
935
936
937 hns_roce_cmq_init_regs(hr_dev, TYPE_CSQ);
938
939
940 hns_roce_cmq_init_regs(hr_dev, TYPE_CRQ);
941
942 return 0;
943
944err_crq:
945 hns_roce_free_cmq_desc(hr_dev, &priv->cmq.csq);
946
947 return ret;
948}
949
950static void hns_roce_v2_cmq_exit(struct hns_roce_dev *hr_dev)
951{
952 struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
953
954 hns_roce_free_cmq_desc(hr_dev, &priv->cmq.csq);
955 hns_roce_free_cmq_desc(hr_dev, &priv->cmq.crq);
956}
957
958static void hns_roce_cmq_setup_basic_desc(struct hns_roce_cmq_desc *desc,
959 enum hns_roce_opcode_type opcode,
960 bool is_read)
961{
962 memset((void *)desc, 0, sizeof(struct hns_roce_cmq_desc));
963 desc->opcode = cpu_to_le16(opcode);
964 desc->flag =
965 cpu_to_le16(HNS_ROCE_CMD_FLAG_NO_INTR | HNS_ROCE_CMD_FLAG_IN);
966 if (is_read)
967 desc->flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_WR);
968 else
969 desc->flag &= cpu_to_le16(~HNS_ROCE_CMD_FLAG_WR);
970}
971
972static int hns_roce_cmq_csq_done(struct hns_roce_dev *hr_dev)
973{
974 struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
975 u32 head = roce_read(hr_dev, ROCEE_TX_CMQ_HEAD_REG);
976
977 return head == priv->cmq.csq.next_to_use;
978}
979
980static int hns_roce_cmq_csq_clean(struct hns_roce_dev *hr_dev)
981{
982 struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
983 struct hns_roce_v2_cmq_ring *csq = &priv->cmq.csq;
984 struct hns_roce_cmq_desc *desc;
985 u16 ntc = csq->next_to_clean;
986 u32 head;
987 int clean = 0;
988
989 desc = &csq->desc[ntc];
990 head = roce_read(hr_dev, ROCEE_TX_CMQ_HEAD_REG);
991 while (head != ntc) {
992 memset(desc, 0, sizeof(*desc));
993 ntc++;
994 if (ntc == csq->desc_num)
995 ntc = 0;
996 desc = &csq->desc[ntc];
997 clean++;
998 }
999 csq->next_to_clean = ntc;
1000
1001 return clean;
1002}
1003
1004static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
1005 struct hns_roce_cmq_desc *desc, int num)
1006{
1007 struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
1008 struct hns_roce_v2_cmq_ring *csq = &priv->cmq.csq;
1009 struct hns_roce_cmq_desc *desc_to_use;
1010 bool complete = false;
1011 u32 timeout = 0;
1012 int handle = 0;
1013 u16 desc_ret;
1014 int ret = 0;
1015 int ntc;
1016
1017 spin_lock_bh(&csq->lock);
1018
1019 if (num > hns_roce_cmq_space(csq)) {
1020 spin_unlock_bh(&csq->lock);
1021 return -EBUSY;
1022 }
1023
1024
1025
1026
1027
1028 ntc = csq->next_to_use;
1029
1030 while (handle < num) {
1031 desc_to_use = &csq->desc[csq->next_to_use];
1032 *desc_to_use = desc[handle];
1033 dev_dbg(hr_dev->dev, "set cmq desc:\n");
1034 csq->next_to_use++;
1035 if (csq->next_to_use == csq->desc_num)
1036 csq->next_to_use = 0;
1037 handle++;
1038 }
1039
1040
1041 roce_write(hr_dev, ROCEE_TX_CMQ_TAIL_REG, csq->next_to_use);
1042
1043
1044
1045
1046
1047 if ((desc->flag) & HNS_ROCE_CMD_FLAG_NO_INTR) {
1048 do {
1049 if (hns_roce_cmq_csq_done(hr_dev))
1050 break;
1051 udelay(1);
1052 timeout++;
1053 } while (timeout < priv->cmq.tx_timeout);
1054 }
1055
1056 if (hns_roce_cmq_csq_done(hr_dev)) {
1057 complete = true;
1058 handle = 0;
1059 while (handle < num) {
1060
1061 desc_to_use = &csq->desc[ntc];
1062 desc[handle] = *desc_to_use;
1063 dev_dbg(hr_dev->dev, "Get cmq desc:\n");
1064 desc_ret = desc[handle].retval;
1065 if (desc_ret == CMD_EXEC_SUCCESS)
1066 ret = 0;
1067 else
1068 ret = -EIO;
1069 priv->cmq.last_status = desc_ret;
1070 ntc++;
1071 handle++;
1072 if (ntc == csq->desc_num)
1073 ntc = 0;
1074 }
1075 }
1076
1077 if (!complete)
1078 ret = -EAGAIN;
1079
1080
1081 handle = hns_roce_cmq_csq_clean(hr_dev);
1082 if (handle != num)
1083 dev_warn(hr_dev->dev, "Cleaned %d, need to clean %d\n",
1084 handle, num);
1085
1086 spin_unlock_bh(&csq->lock);
1087
1088 return ret;
1089}
1090
1091static int hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
1092 struct hns_roce_cmq_desc *desc, int num)
1093{
1094 int retval;
1095 int ret;
1096
1097 ret = hns_roce_v2_rst_process_cmd(hr_dev);
1098 if (ret == CMD_RST_PRC_SUCCESS)
1099 return 0;
1100 if (ret == CMD_RST_PRC_EBUSY)
1101 return ret;
1102
1103 ret = __hns_roce_cmq_send(hr_dev, desc, num);
1104 if (ret) {
1105 retval = hns_roce_v2_rst_process_cmd(hr_dev);
1106 if (retval == CMD_RST_PRC_SUCCESS)
1107 return 0;
1108 else if (retval == CMD_RST_PRC_EBUSY)
1109 return retval;
1110 }
1111
1112 return ret;
1113}
1114
1115static int hns_roce_cmq_query_hw_info(struct hns_roce_dev *hr_dev)
1116{
1117 struct hns_roce_query_version *resp;
1118 struct hns_roce_cmq_desc desc;
1119 int ret;
1120
1121 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_HW_VER, true);
1122 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1123 if (ret)
1124 return ret;
1125
1126 resp = (struct hns_roce_query_version *)desc.data;
1127 hr_dev->hw_rev = le32_to_cpu(resp->rocee_hw_version);
1128 hr_dev->vendor_id = hr_dev->pci_dev->vendor;
1129
1130 return 0;
1131}
1132
1133static int hns_roce_query_fw_ver(struct hns_roce_dev *hr_dev)
1134{
1135 struct hns_roce_query_fw_info *resp;
1136 struct hns_roce_cmq_desc desc;
1137 int ret;
1138
1139 hns_roce_cmq_setup_basic_desc(&desc, HNS_QUERY_FW_VER, true);
1140 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1141 if (ret)
1142 return ret;
1143
1144 resp = (struct hns_roce_query_fw_info *)desc.data;
1145 hr_dev->caps.fw_ver = (u64)(le32_to_cpu(resp->fw_ver));
1146
1147 return 0;
1148}
1149
1150static int hns_roce_config_global_param(struct hns_roce_dev *hr_dev)
1151{
1152 struct hns_roce_cfg_global_param *req;
1153 struct hns_roce_cmq_desc desc;
1154
1155 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_GLOBAL_PARAM,
1156 false);
1157
1158 req = (struct hns_roce_cfg_global_param *)desc.data;
1159 memset(req, 0, sizeof(*req));
1160 roce_set_field(req->time_cfg_udp_port,
1161 CFG_GLOBAL_PARAM_DATA_0_ROCEE_TIME_1US_CFG_M,
1162 CFG_GLOBAL_PARAM_DATA_0_ROCEE_TIME_1US_CFG_S, 0x3e8);
1163 roce_set_field(req->time_cfg_udp_port,
1164 CFG_GLOBAL_PARAM_DATA_0_ROCEE_UDP_PORT_M,
1165 CFG_GLOBAL_PARAM_DATA_0_ROCEE_UDP_PORT_S, 0x12b7);
1166
1167 return hns_roce_cmq_send(hr_dev, &desc, 1);
1168}
1169
1170static int hns_roce_query_pf_resource(struct hns_roce_dev *hr_dev)
1171{
1172 struct hns_roce_cmq_desc desc[2];
1173 struct hns_roce_pf_res_a *req_a;
1174 struct hns_roce_pf_res_b *req_b;
1175 int ret;
1176 int i;
1177
1178 for (i = 0; i < 2; i++) {
1179 hns_roce_cmq_setup_basic_desc(&desc[i],
1180 HNS_ROCE_OPC_QUERY_PF_RES, true);
1181
1182 if (i == 0)
1183 desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1184 else
1185 desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1186 }
1187
1188 ret = hns_roce_cmq_send(hr_dev, desc, 2);
1189 if (ret)
1190 return ret;
1191
1192 req_a = (struct hns_roce_pf_res_a *)desc[0].data;
1193 req_b = (struct hns_roce_pf_res_b *)desc[1].data;
1194
1195 hr_dev->caps.qpc_bt_num = roce_get_field(req_a->qpc_bt_idx_num,
1196 PF_RES_DATA_1_PF_QPC_BT_NUM_M,
1197 PF_RES_DATA_1_PF_QPC_BT_NUM_S);
1198 hr_dev->caps.srqc_bt_num = roce_get_field(req_a->srqc_bt_idx_num,
1199 PF_RES_DATA_2_PF_SRQC_BT_NUM_M,
1200 PF_RES_DATA_2_PF_SRQC_BT_NUM_S);
1201 hr_dev->caps.cqc_bt_num = roce_get_field(req_a->cqc_bt_idx_num,
1202 PF_RES_DATA_3_PF_CQC_BT_NUM_M,
1203 PF_RES_DATA_3_PF_CQC_BT_NUM_S);
1204 hr_dev->caps.mpt_bt_num = roce_get_field(req_a->mpt_bt_idx_num,
1205 PF_RES_DATA_4_PF_MPT_BT_NUM_M,
1206 PF_RES_DATA_4_PF_MPT_BT_NUM_S);
1207
1208 hr_dev->caps.sl_num = roce_get_field(req_b->qid_idx_sl_num,
1209 PF_RES_DATA_3_PF_SL_NUM_M,
1210 PF_RES_DATA_3_PF_SL_NUM_S);
1211 hr_dev->caps.sccc_bt_num = roce_get_field(req_b->sccc_bt_idx_num,
1212 PF_RES_DATA_4_PF_SCCC_BT_NUM_M,
1213 PF_RES_DATA_4_PF_SCCC_BT_NUM_S);
1214
1215 return 0;
1216}
1217
1218static int hns_roce_query_pf_timer_resource(struct hns_roce_dev *hr_dev)
1219{
1220 struct hns_roce_pf_timer_res_a *req_a;
1221 struct hns_roce_cmq_desc desc[2];
1222 int ret, i;
1223
1224 for (i = 0; i < 2; i++) {
1225 hns_roce_cmq_setup_basic_desc(&desc[i],
1226 HNS_ROCE_OPC_QUERY_PF_TIMER_RES,
1227 true);
1228
1229 if (i == 0)
1230 desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1231 else
1232 desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1233 }
1234
1235 ret = hns_roce_cmq_send(hr_dev, desc, 2);
1236 if (ret)
1237 return ret;
1238
1239 req_a = (struct hns_roce_pf_timer_res_a *)desc[0].data;
1240
1241 hr_dev->caps.qpc_timer_bt_num =
1242 roce_get_field(req_a->qpc_timer_bt_idx_num,
1243 PF_RES_DATA_1_PF_QPC_TIMER_BT_NUM_M,
1244 PF_RES_DATA_1_PF_QPC_TIMER_BT_NUM_S);
1245 hr_dev->caps.cqc_timer_bt_num =
1246 roce_get_field(req_a->cqc_timer_bt_idx_num,
1247 PF_RES_DATA_2_PF_CQC_TIMER_BT_NUM_M,
1248 PF_RES_DATA_2_PF_CQC_TIMER_BT_NUM_S);
1249
1250 return 0;
1251}
1252
1253static int hns_roce_set_vf_switch_param(struct hns_roce_dev *hr_dev,
1254 int vf_id)
1255{
1256 struct hns_roce_cmq_desc desc;
1257 struct hns_roce_vf_switch *swt;
1258 int ret;
1259
1260 swt = (struct hns_roce_vf_switch *)desc.data;
1261 hns_roce_cmq_setup_basic_desc(&desc, HNS_SWITCH_PARAMETER_CFG, true);
1262 swt->rocee_sel |= cpu_to_le16(HNS_ICL_SWITCH_CMD_ROCEE_SEL);
1263 roce_set_field(swt->fun_id,
1264 VF_SWITCH_DATA_FUN_ID_VF_ID_M,
1265 VF_SWITCH_DATA_FUN_ID_VF_ID_S,
1266 vf_id);
1267 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1268 if (ret)
1269 return ret;
1270 desc.flag =
1271 cpu_to_le16(HNS_ROCE_CMD_FLAG_NO_INTR | HNS_ROCE_CMD_FLAG_IN);
1272 desc.flag &= cpu_to_le16(~HNS_ROCE_CMD_FLAG_WR);
1273 roce_set_bit(swt->cfg, VF_SWITCH_DATA_CFG_ALW_LPBK_S, 1);
1274 roce_set_bit(swt->cfg, VF_SWITCH_DATA_CFG_ALW_LCL_LPBK_S, 1);
1275 roce_set_bit(swt->cfg, VF_SWITCH_DATA_CFG_ALW_DST_OVRD_S, 1);
1276
1277 return hns_roce_cmq_send(hr_dev, &desc, 1);
1278}
1279
1280static int hns_roce_alloc_vf_resource(struct hns_roce_dev *hr_dev)
1281{
1282 struct hns_roce_cmq_desc desc[2];
1283 struct hns_roce_vf_res_a *req_a;
1284 struct hns_roce_vf_res_b *req_b;
1285 int i;
1286
1287 req_a = (struct hns_roce_vf_res_a *)desc[0].data;
1288 req_b = (struct hns_roce_vf_res_b *)desc[1].data;
1289 memset(req_a, 0, sizeof(*req_a));
1290 memset(req_b, 0, sizeof(*req_b));
1291 for (i = 0; i < 2; i++) {
1292 hns_roce_cmq_setup_basic_desc(&desc[i],
1293 HNS_ROCE_OPC_ALLOC_VF_RES, false);
1294
1295 if (i == 0)
1296 desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1297 else
1298 desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1299
1300 if (i == 0) {
1301 roce_set_field(req_a->vf_qpc_bt_idx_num,
1302 VF_RES_A_DATA_1_VF_QPC_BT_IDX_M,
1303 VF_RES_A_DATA_1_VF_QPC_BT_IDX_S, 0);
1304 roce_set_field(req_a->vf_qpc_bt_idx_num,
1305 VF_RES_A_DATA_1_VF_QPC_BT_NUM_M,
1306 VF_RES_A_DATA_1_VF_QPC_BT_NUM_S,
1307 HNS_ROCE_VF_QPC_BT_NUM);
1308
1309 roce_set_field(req_a->vf_srqc_bt_idx_num,
1310 VF_RES_A_DATA_2_VF_SRQC_BT_IDX_M,
1311 VF_RES_A_DATA_2_VF_SRQC_BT_IDX_S, 0);
1312 roce_set_field(req_a->vf_srqc_bt_idx_num,
1313 VF_RES_A_DATA_2_VF_SRQC_BT_NUM_M,
1314 VF_RES_A_DATA_2_VF_SRQC_BT_NUM_S,
1315 HNS_ROCE_VF_SRQC_BT_NUM);
1316
1317 roce_set_field(req_a->vf_cqc_bt_idx_num,
1318 VF_RES_A_DATA_3_VF_CQC_BT_IDX_M,
1319 VF_RES_A_DATA_3_VF_CQC_BT_IDX_S, 0);
1320 roce_set_field(req_a->vf_cqc_bt_idx_num,
1321 VF_RES_A_DATA_3_VF_CQC_BT_NUM_M,
1322 VF_RES_A_DATA_3_VF_CQC_BT_NUM_S,
1323 HNS_ROCE_VF_CQC_BT_NUM);
1324
1325 roce_set_field(req_a->vf_mpt_bt_idx_num,
1326 VF_RES_A_DATA_4_VF_MPT_BT_IDX_M,
1327 VF_RES_A_DATA_4_VF_MPT_BT_IDX_S, 0);
1328 roce_set_field(req_a->vf_mpt_bt_idx_num,
1329 VF_RES_A_DATA_4_VF_MPT_BT_NUM_M,
1330 VF_RES_A_DATA_4_VF_MPT_BT_NUM_S,
1331 HNS_ROCE_VF_MPT_BT_NUM);
1332
1333 roce_set_field(req_a->vf_eqc_bt_idx_num,
1334 VF_RES_A_DATA_5_VF_EQC_IDX_M,
1335 VF_RES_A_DATA_5_VF_EQC_IDX_S, 0);
1336 roce_set_field(req_a->vf_eqc_bt_idx_num,
1337 VF_RES_A_DATA_5_VF_EQC_NUM_M,
1338 VF_RES_A_DATA_5_VF_EQC_NUM_S,
1339 HNS_ROCE_VF_EQC_NUM);
1340 } else {
1341 roce_set_field(req_b->vf_smac_idx_num,
1342 VF_RES_B_DATA_1_VF_SMAC_IDX_M,
1343 VF_RES_B_DATA_1_VF_SMAC_IDX_S, 0);
1344 roce_set_field(req_b->vf_smac_idx_num,
1345 VF_RES_B_DATA_1_VF_SMAC_NUM_M,
1346 VF_RES_B_DATA_1_VF_SMAC_NUM_S,
1347 HNS_ROCE_VF_SMAC_NUM);
1348
1349 roce_set_field(req_b->vf_sgid_idx_num,
1350 VF_RES_B_DATA_2_VF_SGID_IDX_M,
1351 VF_RES_B_DATA_2_VF_SGID_IDX_S, 0);
1352 roce_set_field(req_b->vf_sgid_idx_num,
1353 VF_RES_B_DATA_2_VF_SGID_NUM_M,
1354 VF_RES_B_DATA_2_VF_SGID_NUM_S,
1355 HNS_ROCE_VF_SGID_NUM);
1356
1357 roce_set_field(req_b->vf_qid_idx_sl_num,
1358 VF_RES_B_DATA_3_VF_QID_IDX_M,
1359 VF_RES_B_DATA_3_VF_QID_IDX_S, 0);
1360 roce_set_field(req_b->vf_qid_idx_sl_num,
1361 VF_RES_B_DATA_3_VF_SL_NUM_M,
1362 VF_RES_B_DATA_3_VF_SL_NUM_S,
1363 HNS_ROCE_VF_SL_NUM);
1364
1365 roce_set_field(req_b->vf_sccc_idx_num,
1366 VF_RES_B_DATA_4_VF_SCCC_BT_IDX_M,
1367 VF_RES_B_DATA_4_VF_SCCC_BT_IDX_S, 0);
1368 roce_set_field(req_b->vf_sccc_idx_num,
1369 VF_RES_B_DATA_4_VF_SCCC_BT_NUM_M,
1370 VF_RES_B_DATA_4_VF_SCCC_BT_NUM_S,
1371 HNS_ROCE_VF_SCCC_BT_NUM);
1372 }
1373 }
1374
1375 return hns_roce_cmq_send(hr_dev, desc, 2);
1376}
1377
1378static int hns_roce_v2_set_bt(struct hns_roce_dev *hr_dev)
1379{
1380 u8 srqc_hop_num = hr_dev->caps.srqc_hop_num;
1381 u8 qpc_hop_num = hr_dev->caps.qpc_hop_num;
1382 u8 cqc_hop_num = hr_dev->caps.cqc_hop_num;
1383 u8 mpt_hop_num = hr_dev->caps.mpt_hop_num;
1384 u8 sccc_hop_num = hr_dev->caps.sccc_hop_num;
1385 struct hns_roce_cfg_bt_attr *req;
1386 struct hns_roce_cmq_desc desc;
1387
1388 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_BT_ATTR, false);
1389 req = (struct hns_roce_cfg_bt_attr *)desc.data;
1390 memset(req, 0, sizeof(*req));
1391
1392 roce_set_field(req->vf_qpc_cfg, CFG_BT_ATTR_DATA_0_VF_QPC_BA_PGSZ_M,
1393 CFG_BT_ATTR_DATA_0_VF_QPC_BA_PGSZ_S,
1394 hr_dev->caps.qpc_ba_pg_sz + PG_SHIFT_OFFSET);
1395 roce_set_field(req->vf_qpc_cfg, CFG_BT_ATTR_DATA_0_VF_QPC_BUF_PGSZ_M,
1396 CFG_BT_ATTR_DATA_0_VF_QPC_BUF_PGSZ_S,
1397 hr_dev->caps.qpc_buf_pg_sz + PG_SHIFT_OFFSET);
1398 roce_set_field(req->vf_qpc_cfg, CFG_BT_ATTR_DATA_0_VF_QPC_HOPNUM_M,
1399 CFG_BT_ATTR_DATA_0_VF_QPC_HOPNUM_S,
1400 qpc_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : qpc_hop_num);
1401
1402 roce_set_field(req->vf_srqc_cfg, CFG_BT_ATTR_DATA_1_VF_SRQC_BA_PGSZ_M,
1403 CFG_BT_ATTR_DATA_1_VF_SRQC_BA_PGSZ_S,
1404 hr_dev->caps.srqc_ba_pg_sz + PG_SHIFT_OFFSET);
1405 roce_set_field(req->vf_srqc_cfg, CFG_BT_ATTR_DATA_1_VF_SRQC_BUF_PGSZ_M,
1406 CFG_BT_ATTR_DATA_1_VF_SRQC_BUF_PGSZ_S,
1407 hr_dev->caps.srqc_buf_pg_sz + PG_SHIFT_OFFSET);
1408 roce_set_field(req->vf_srqc_cfg, CFG_BT_ATTR_DATA_1_VF_SRQC_HOPNUM_M,
1409 CFG_BT_ATTR_DATA_1_VF_SRQC_HOPNUM_S,
1410 srqc_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : srqc_hop_num);
1411
1412 roce_set_field(req->vf_cqc_cfg, CFG_BT_ATTR_DATA_2_VF_CQC_BA_PGSZ_M,
1413 CFG_BT_ATTR_DATA_2_VF_CQC_BA_PGSZ_S,
1414 hr_dev->caps.cqc_ba_pg_sz + PG_SHIFT_OFFSET);
1415 roce_set_field(req->vf_cqc_cfg, CFG_BT_ATTR_DATA_2_VF_CQC_BUF_PGSZ_M,
1416 CFG_BT_ATTR_DATA_2_VF_CQC_BUF_PGSZ_S,
1417 hr_dev->caps.cqc_buf_pg_sz + PG_SHIFT_OFFSET);
1418 roce_set_field(req->vf_cqc_cfg, CFG_BT_ATTR_DATA_2_VF_CQC_HOPNUM_M,
1419 CFG_BT_ATTR_DATA_2_VF_CQC_HOPNUM_S,
1420 cqc_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : cqc_hop_num);
1421
1422 roce_set_field(req->vf_mpt_cfg, CFG_BT_ATTR_DATA_3_VF_MPT_BA_PGSZ_M,
1423 CFG_BT_ATTR_DATA_3_VF_MPT_BA_PGSZ_S,
1424 hr_dev->caps.mpt_ba_pg_sz + PG_SHIFT_OFFSET);
1425 roce_set_field(req->vf_mpt_cfg, CFG_BT_ATTR_DATA_3_VF_MPT_BUF_PGSZ_M,
1426 CFG_BT_ATTR_DATA_3_VF_MPT_BUF_PGSZ_S,
1427 hr_dev->caps.mpt_buf_pg_sz + PG_SHIFT_OFFSET);
1428 roce_set_field(req->vf_mpt_cfg, CFG_BT_ATTR_DATA_3_VF_MPT_HOPNUM_M,
1429 CFG_BT_ATTR_DATA_3_VF_MPT_HOPNUM_S,
1430 mpt_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : mpt_hop_num);
1431
1432 roce_set_field(req->vf_sccc_cfg,
1433 CFG_BT_ATTR_DATA_4_VF_SCCC_BA_PGSZ_M,
1434 CFG_BT_ATTR_DATA_4_VF_SCCC_BA_PGSZ_S,
1435 hr_dev->caps.sccc_ba_pg_sz + PG_SHIFT_OFFSET);
1436 roce_set_field(req->vf_sccc_cfg,
1437 CFG_BT_ATTR_DATA_4_VF_SCCC_BUF_PGSZ_M,
1438 CFG_BT_ATTR_DATA_4_VF_SCCC_BUF_PGSZ_S,
1439 hr_dev->caps.sccc_buf_pg_sz + PG_SHIFT_OFFSET);
1440 roce_set_field(req->vf_sccc_cfg,
1441 CFG_BT_ATTR_DATA_4_VF_SCCC_HOPNUM_M,
1442 CFG_BT_ATTR_DATA_4_VF_SCCC_HOPNUM_S,
1443 sccc_hop_num ==
1444 HNS_ROCE_HOP_NUM_0 ? 0 : sccc_hop_num);
1445
1446 return hns_roce_cmq_send(hr_dev, &desc, 1);
1447}
1448
1449static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
1450{
1451 struct hns_roce_caps *caps = &hr_dev->caps;
1452 int ret;
1453
1454 ret = hns_roce_cmq_query_hw_info(hr_dev);
1455 if (ret) {
1456 dev_err(hr_dev->dev, "Query hardware version fail, ret = %d.\n",
1457 ret);
1458 return ret;
1459 }
1460
1461 ret = hns_roce_query_fw_ver(hr_dev);
1462 if (ret) {
1463 dev_err(hr_dev->dev, "Query firmware version fail, ret = %d.\n",
1464 ret);
1465 return ret;
1466 }
1467
1468 ret = hns_roce_config_global_param(hr_dev);
1469 if (ret) {
1470 dev_err(hr_dev->dev, "Configure global param fail, ret = %d.\n",
1471 ret);
1472 return ret;
1473 }
1474
1475
1476 ret = hns_roce_query_pf_resource(hr_dev);
1477 if (ret) {
1478 dev_err(hr_dev->dev, "Query pf resource fail, ret = %d.\n",
1479 ret);
1480 return ret;
1481 }
1482
1483 if (hr_dev->pci_dev->revision == 0x21) {
1484 ret = hns_roce_query_pf_timer_resource(hr_dev);
1485 if (ret) {
1486 dev_err(hr_dev->dev,
1487 "Query pf timer resource fail, ret = %d.\n",
1488 ret);
1489 return ret;
1490 }
1491 }
1492
1493 ret = hns_roce_alloc_vf_resource(hr_dev);
1494 if (ret) {
1495 dev_err(hr_dev->dev, "Allocate vf resource fail, ret = %d.\n",
1496 ret);
1497 return ret;
1498 }
1499
1500 if (hr_dev->pci_dev->revision == 0x21) {
1501 ret = hns_roce_set_vf_switch_param(hr_dev, 0);
1502 if (ret) {
1503 dev_err(hr_dev->dev,
1504 "Set function switch param fail, ret = %d.\n",
1505 ret);
1506 return ret;
1507 }
1508 }
1509
1510 hr_dev->vendor_part_id = hr_dev->pci_dev->device;
1511 hr_dev->sys_image_guid = be64_to_cpu(hr_dev->ib_dev.node_guid);
1512
1513 caps->num_qps = HNS_ROCE_V2_MAX_QP_NUM;
1514 caps->max_wqes = HNS_ROCE_V2_MAX_WQE_NUM;
1515 caps->num_cqs = HNS_ROCE_V2_MAX_CQ_NUM;
1516 caps->num_srqs = HNS_ROCE_V2_MAX_SRQ_NUM;
1517 caps->min_cqes = HNS_ROCE_MIN_CQE_NUM;
1518 caps->max_cqes = HNS_ROCE_V2_MAX_CQE_NUM;
1519 caps->max_srqwqes = HNS_ROCE_V2_MAX_SRQWQE_NUM;
1520 caps->max_sq_sg = HNS_ROCE_V2_MAX_SQ_SGE_NUM;
1521 caps->max_extend_sg = HNS_ROCE_V2_MAX_EXTEND_SGE_NUM;
1522 caps->max_rq_sg = HNS_ROCE_V2_MAX_RQ_SGE_NUM;
1523 caps->max_sq_inline = HNS_ROCE_V2_MAX_SQ_INLINE;
1524 caps->max_srq_sg = HNS_ROCE_V2_MAX_SRQ_SGE_NUM;
1525 caps->num_uars = HNS_ROCE_V2_UAR_NUM;
1526 caps->phy_num_uars = HNS_ROCE_V2_PHY_UAR_NUM;
1527 caps->num_aeq_vectors = HNS_ROCE_V2_AEQE_VEC_NUM;
1528 caps->num_comp_vectors = HNS_ROCE_V2_COMP_VEC_NUM;
1529 caps->num_other_vectors = HNS_ROCE_V2_ABNORMAL_VEC_NUM;
1530 caps->num_mtpts = HNS_ROCE_V2_MAX_MTPT_NUM;
1531 caps->num_mtt_segs = HNS_ROCE_V2_MAX_MTT_SEGS;
1532 caps->num_cqe_segs = HNS_ROCE_V2_MAX_CQE_SEGS;
1533 caps->num_srqwqe_segs = HNS_ROCE_V2_MAX_SRQWQE_SEGS;
1534 caps->num_idx_segs = HNS_ROCE_V2_MAX_IDX_SEGS;
1535 caps->num_pds = HNS_ROCE_V2_MAX_PD_NUM;
1536 caps->max_qp_init_rdma = HNS_ROCE_V2_MAX_QP_INIT_RDMA;
1537 caps->max_qp_dest_rdma = HNS_ROCE_V2_MAX_QP_DEST_RDMA;
1538 caps->max_sq_desc_sz = HNS_ROCE_V2_MAX_SQ_DESC_SZ;
1539 caps->max_rq_desc_sz = HNS_ROCE_V2_MAX_RQ_DESC_SZ;
1540 caps->max_srq_desc_sz = HNS_ROCE_V2_MAX_SRQ_DESC_SZ;
1541 caps->qpc_entry_sz = HNS_ROCE_V2_QPC_ENTRY_SZ;
1542 caps->irrl_entry_sz = HNS_ROCE_V2_IRRL_ENTRY_SZ;
1543 caps->trrl_entry_sz = HNS_ROCE_V2_TRRL_ENTRY_SZ;
1544 caps->cqc_entry_sz = HNS_ROCE_V2_CQC_ENTRY_SZ;
1545 caps->srqc_entry_sz = HNS_ROCE_V2_SRQC_ENTRY_SZ;
1546 caps->mtpt_entry_sz = HNS_ROCE_V2_MTPT_ENTRY_SZ;
1547 caps->mtt_entry_sz = HNS_ROCE_V2_MTT_ENTRY_SZ;
1548 caps->idx_entry_sz = 4;
1549 caps->cq_entry_sz = HNS_ROCE_V2_CQE_ENTRY_SIZE;
1550 caps->page_size_cap = HNS_ROCE_V2_PAGE_SIZE_SUPPORTED;
1551 caps->reserved_lkey = 0;
1552 caps->reserved_pds = 0;
1553 caps->reserved_mrws = 1;
1554 caps->reserved_uars = 0;
1555 caps->reserved_cqs = 0;
1556 caps->reserved_srqs = 0;
1557 caps->reserved_qps = HNS_ROCE_V2_RSV_QPS;
1558
1559 caps->qpc_ba_pg_sz = 0;
1560 caps->qpc_buf_pg_sz = 0;
1561 caps->qpc_hop_num = HNS_ROCE_CONTEXT_HOP_NUM;
1562 caps->srqc_ba_pg_sz = 0;
1563 caps->srqc_buf_pg_sz = 0;
1564 caps->srqc_hop_num = HNS_ROCE_CONTEXT_HOP_NUM;
1565 caps->cqc_ba_pg_sz = 0;
1566 caps->cqc_buf_pg_sz = 0;
1567 caps->cqc_hop_num = HNS_ROCE_CONTEXT_HOP_NUM;
1568 caps->mpt_ba_pg_sz = 0;
1569 caps->mpt_buf_pg_sz = 0;
1570 caps->mpt_hop_num = HNS_ROCE_CONTEXT_HOP_NUM;
1571 caps->pbl_ba_pg_sz = 2;
1572 caps->pbl_buf_pg_sz = 0;
1573 caps->pbl_hop_num = HNS_ROCE_PBL_HOP_NUM;
1574 caps->mtt_ba_pg_sz = 0;
1575 caps->mtt_buf_pg_sz = 0;
1576 caps->mtt_hop_num = HNS_ROCE_MTT_HOP_NUM;
1577 caps->cqe_ba_pg_sz = 0;
1578 caps->cqe_buf_pg_sz = 0;
1579 caps->cqe_hop_num = HNS_ROCE_CQE_HOP_NUM;
1580 caps->srqwqe_ba_pg_sz = 0;
1581 caps->srqwqe_buf_pg_sz = 0;
1582 caps->srqwqe_hop_num = HNS_ROCE_SRQWQE_HOP_NUM;
1583 caps->idx_ba_pg_sz = 0;
1584 caps->idx_buf_pg_sz = 0;
1585 caps->idx_hop_num = HNS_ROCE_IDX_HOP_NUM;
1586 caps->eqe_ba_pg_sz = 0;
1587 caps->eqe_buf_pg_sz = 0;
1588 caps->eqe_hop_num = HNS_ROCE_EQE_HOP_NUM;
1589 caps->tsq_buf_pg_sz = 0;
1590 caps->chunk_sz = HNS_ROCE_V2_TABLE_CHUNK_SIZE;
1591
1592 caps->flags = HNS_ROCE_CAP_FLAG_REREG_MR |
1593 HNS_ROCE_CAP_FLAG_ROCE_V1_V2 |
1594 HNS_ROCE_CAP_FLAG_RQ_INLINE |
1595 HNS_ROCE_CAP_FLAG_RECORD_DB |
1596 HNS_ROCE_CAP_FLAG_SQ_RECORD_DB;
1597
1598 if (hr_dev->pci_dev->revision == 0x21)
1599 caps->flags |= HNS_ROCE_CAP_FLAG_MW |
1600 HNS_ROCE_CAP_FLAG_FRMR;
1601
1602 caps->pkey_table_len[0] = 1;
1603 caps->gid_table_len[0] = HNS_ROCE_V2_GID_INDEX_NUM;
1604 caps->ceqe_depth = HNS_ROCE_V2_COMP_EQE_NUM;
1605 caps->aeqe_depth = HNS_ROCE_V2_ASYNC_EQE_NUM;
1606 caps->local_ca_ack_delay = 0;
1607 caps->max_mtu = IB_MTU_4096;
1608
1609 caps->max_srqs = HNS_ROCE_V2_MAX_SRQ;
1610 caps->max_srq_wrs = HNS_ROCE_V2_MAX_SRQ_WR;
1611 caps->max_srq_sges = HNS_ROCE_V2_MAX_SRQ_SGE;
1612
1613 if (hr_dev->pci_dev->revision == 0x21) {
1614 caps->flags |= HNS_ROCE_CAP_FLAG_ATOMIC |
1615 HNS_ROCE_CAP_FLAG_SRQ |
1616 HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL;
1617
1618 caps->num_qpc_timer = HNS_ROCE_V2_MAX_QPC_TIMER_NUM;
1619 caps->qpc_timer_entry_sz = HNS_ROCE_V2_QPC_TIMER_ENTRY_SZ;
1620 caps->qpc_timer_ba_pg_sz = 0;
1621 caps->qpc_timer_buf_pg_sz = 0;
1622 caps->qpc_timer_hop_num = HNS_ROCE_HOP_NUM_0;
1623 caps->num_cqc_timer = HNS_ROCE_V2_MAX_CQC_TIMER_NUM;
1624 caps->cqc_timer_entry_sz = HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ;
1625 caps->cqc_timer_ba_pg_sz = 0;
1626 caps->cqc_timer_buf_pg_sz = 0;
1627 caps->cqc_timer_hop_num = HNS_ROCE_HOP_NUM_0;
1628
1629 caps->sccc_entry_sz = HNS_ROCE_V2_SCCC_ENTRY_SZ;
1630 caps->sccc_ba_pg_sz = 0;
1631 caps->sccc_buf_pg_sz = 0;
1632 caps->sccc_hop_num = HNS_ROCE_SCCC_HOP_NUM;
1633 }
1634
1635 ret = hns_roce_v2_set_bt(hr_dev);
1636 if (ret)
1637 dev_err(hr_dev->dev, "Configure bt attribute fail, ret = %d.\n",
1638 ret);
1639
1640 return ret;
1641}
1642
1643static int hns_roce_config_link_table(struct hns_roce_dev *hr_dev,
1644 enum hns_roce_link_table_type type)
1645{
1646 struct hns_roce_cmq_desc desc[2];
1647 struct hns_roce_cfg_llm_a *req_a =
1648 (struct hns_roce_cfg_llm_a *)desc[0].data;
1649 struct hns_roce_cfg_llm_b *req_b =
1650 (struct hns_roce_cfg_llm_b *)desc[1].data;
1651 struct hns_roce_v2_priv *priv = hr_dev->priv;
1652 struct hns_roce_link_table *link_tbl;
1653 struct hns_roce_link_table_entry *entry;
1654 enum hns_roce_opcode_type opcode;
1655 u32 page_num;
1656 int i;
1657
1658 switch (type) {
1659 case TSQ_LINK_TABLE:
1660 link_tbl = &priv->tsq;
1661 opcode = HNS_ROCE_OPC_CFG_EXT_LLM;
1662 break;
1663 case TPQ_LINK_TABLE:
1664 link_tbl = &priv->tpq;
1665 opcode = HNS_ROCE_OPC_CFG_TMOUT_LLM;
1666 break;
1667 default:
1668 return -EINVAL;
1669 }
1670
1671 page_num = link_tbl->npages;
1672 entry = link_tbl->table.buf;
1673 memset(req_a, 0, sizeof(*req_a));
1674 memset(req_b, 0, sizeof(*req_b));
1675
1676 for (i = 0; i < 2; i++) {
1677 hns_roce_cmq_setup_basic_desc(&desc[i], opcode, false);
1678
1679 if (i == 0)
1680 desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1681 else
1682 desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1683
1684 if (i == 0) {
1685 req_a->base_addr_l = link_tbl->table.map & 0xffffffff;
1686 req_a->base_addr_h = (link_tbl->table.map >> 32) &
1687 0xffffffff;
1688 roce_set_field(req_a->depth_pgsz_init_en,
1689 CFG_LLM_QUE_DEPTH_M,
1690 CFG_LLM_QUE_DEPTH_S,
1691 link_tbl->npages);
1692 roce_set_field(req_a->depth_pgsz_init_en,
1693 CFG_LLM_QUE_PGSZ_M,
1694 CFG_LLM_QUE_PGSZ_S,
1695 link_tbl->pg_sz);
1696 req_a->head_ba_l = entry[0].blk_ba0;
1697 req_a->head_ba_h_nxtptr = entry[0].blk_ba1_nxt_ptr;
1698 roce_set_field(req_a->head_ptr,
1699 CFG_LLM_HEAD_PTR_M,
1700 CFG_LLM_HEAD_PTR_S, 0);
1701 } else {
1702 req_b->tail_ba_l = entry[page_num - 1].blk_ba0;
1703 roce_set_field(req_b->tail_ba_h,
1704 CFG_LLM_TAIL_BA_H_M,
1705 CFG_LLM_TAIL_BA_H_S,
1706 entry[page_num - 1].blk_ba1_nxt_ptr &
1707 HNS_ROCE_LINK_TABLE_BA1_M);
1708 roce_set_field(req_b->tail_ptr,
1709 CFG_LLM_TAIL_PTR_M,
1710 CFG_LLM_TAIL_PTR_S,
1711 (entry[page_num - 2].blk_ba1_nxt_ptr &
1712 HNS_ROCE_LINK_TABLE_NXT_PTR_M) >>
1713 HNS_ROCE_LINK_TABLE_NXT_PTR_S);
1714 }
1715 }
1716 roce_set_field(req_a->depth_pgsz_init_en,
1717 CFG_LLM_INIT_EN_M, CFG_LLM_INIT_EN_S, 1);
1718
1719 return hns_roce_cmq_send(hr_dev, desc, 2);
1720}
1721
1722static int hns_roce_init_link_table(struct hns_roce_dev *hr_dev,
1723 enum hns_roce_link_table_type type)
1724{
1725 struct hns_roce_v2_priv *priv = hr_dev->priv;
1726 struct hns_roce_link_table *link_tbl;
1727 struct hns_roce_link_table_entry *entry;
1728 struct device *dev = hr_dev->dev;
1729 u32 buf_chk_sz;
1730 dma_addr_t t;
1731 int func_num = 1;
1732 int pg_num_a;
1733 int pg_num_b;
1734 int pg_num;
1735 int size;
1736 int i;
1737
1738 switch (type) {
1739 case TSQ_LINK_TABLE:
1740 link_tbl = &priv->tsq;
1741 buf_chk_sz = 1 << (hr_dev->caps.tsq_buf_pg_sz + PAGE_SHIFT);
1742 pg_num_a = hr_dev->caps.num_qps * 8 / buf_chk_sz;
1743 pg_num_b = hr_dev->caps.sl_num * 4 + 2;
1744 break;
1745 case TPQ_LINK_TABLE:
1746 link_tbl = &priv->tpq;
1747 buf_chk_sz = 1 << (hr_dev->caps.tpq_buf_pg_sz + PAGE_SHIFT);
1748 pg_num_a = hr_dev->caps.num_cqs * 4 / buf_chk_sz;
1749 pg_num_b = 2 * 4 * func_num + 2;
1750 break;
1751 default:
1752 return -EINVAL;
1753 }
1754
1755 pg_num = max(pg_num_a, pg_num_b);
1756 size = pg_num * sizeof(struct hns_roce_link_table_entry);
1757
1758 link_tbl->table.buf = dma_alloc_coherent(dev, size,
1759 &link_tbl->table.map,
1760 GFP_KERNEL);
1761 if (!link_tbl->table.buf)
1762 goto out;
1763
1764 link_tbl->pg_list = kcalloc(pg_num, sizeof(*link_tbl->pg_list),
1765 GFP_KERNEL);
1766 if (!link_tbl->pg_list)
1767 goto err_kcalloc_failed;
1768
1769 entry = link_tbl->table.buf;
1770 for (i = 0; i < pg_num; ++i) {
1771 link_tbl->pg_list[i].buf = dma_alloc_coherent(dev, buf_chk_sz,
1772 &t, GFP_KERNEL);
1773 if (!link_tbl->pg_list[i].buf)
1774 goto err_alloc_buf_failed;
1775
1776 link_tbl->pg_list[i].map = t;
1777 memset(link_tbl->pg_list[i].buf, 0, buf_chk_sz);
1778
1779 entry[i].blk_ba0 = (t >> 12) & 0xffffffff;
1780 roce_set_field(entry[i].blk_ba1_nxt_ptr,
1781 HNS_ROCE_LINK_TABLE_BA1_M,
1782 HNS_ROCE_LINK_TABLE_BA1_S,
1783 t >> 44);
1784
1785 if (i < (pg_num - 1))
1786 roce_set_field(entry[i].blk_ba1_nxt_ptr,
1787 HNS_ROCE_LINK_TABLE_NXT_PTR_M,
1788 HNS_ROCE_LINK_TABLE_NXT_PTR_S,
1789 i + 1);
1790 }
1791 link_tbl->npages = pg_num;
1792 link_tbl->pg_sz = buf_chk_sz;
1793
1794 return hns_roce_config_link_table(hr_dev, type);
1795
1796err_alloc_buf_failed:
1797 for (i -= 1; i >= 0; i--)
1798 dma_free_coherent(dev, buf_chk_sz,
1799 link_tbl->pg_list[i].buf,
1800 link_tbl->pg_list[i].map);
1801 kfree(link_tbl->pg_list);
1802
1803err_kcalloc_failed:
1804 dma_free_coherent(dev, size, link_tbl->table.buf,
1805 link_tbl->table.map);
1806
1807out:
1808 return -ENOMEM;
1809}
1810
1811static void hns_roce_free_link_table(struct hns_roce_dev *hr_dev,
1812 struct hns_roce_link_table *link_tbl)
1813{
1814 struct device *dev = hr_dev->dev;
1815 int size;
1816 int i;
1817
1818 size = link_tbl->npages * sizeof(struct hns_roce_link_table_entry);
1819
1820 for (i = 0; i < link_tbl->npages; ++i)
1821 if (link_tbl->pg_list[i].buf)
1822 dma_free_coherent(dev, link_tbl->pg_sz,
1823 link_tbl->pg_list[i].buf,
1824 link_tbl->pg_list[i].map);
1825 kfree(link_tbl->pg_list);
1826
1827 dma_free_coherent(dev, size, link_tbl->table.buf,
1828 link_tbl->table.map);
1829}
1830
1831static int hns_roce_v2_init(struct hns_roce_dev *hr_dev)
1832{
1833 struct hns_roce_v2_priv *priv = hr_dev->priv;
1834 int qpc_count, cqc_count;
1835 int ret, i;
1836
1837
1838 ret = hns_roce_init_link_table(hr_dev, TSQ_LINK_TABLE);
1839 if (ret) {
1840 dev_err(hr_dev->dev, "TSQ init failed, ret = %d.\n", ret);
1841 return ret;
1842 }
1843
1844 ret = hns_roce_init_link_table(hr_dev, TPQ_LINK_TABLE);
1845 if (ret) {
1846 dev_err(hr_dev->dev, "TPQ init failed, ret = %d.\n", ret);
1847 goto err_tpq_init_failed;
1848 }
1849
1850
1851 for (qpc_count = 0; qpc_count < hr_dev->caps.qpc_timer_bt_num;
1852 qpc_count++) {
1853 ret = hns_roce_table_get(hr_dev, &hr_dev->qpc_timer_table,
1854 qpc_count);
1855 if (ret) {
1856 dev_err(hr_dev->dev, "QPC Timer get failed\n");
1857 goto err_qpc_timer_failed;
1858 }
1859 }
1860
1861
1862 for (cqc_count = 0; cqc_count < hr_dev->caps.cqc_timer_bt_num;
1863 cqc_count++) {
1864 ret = hns_roce_table_get(hr_dev, &hr_dev->cqc_timer_table,
1865 cqc_count);
1866 if (ret) {
1867 dev_err(hr_dev->dev, "CQC Timer get failed\n");
1868 goto err_cqc_timer_failed;
1869 }
1870 }
1871
1872 return 0;
1873
1874err_cqc_timer_failed:
1875 for (i = 0; i < cqc_count; i++)
1876 hns_roce_table_put(hr_dev, &hr_dev->cqc_timer_table, i);
1877
1878err_qpc_timer_failed:
1879 for (i = 0; i < qpc_count; i++)
1880 hns_roce_table_put(hr_dev, &hr_dev->qpc_timer_table, i);
1881
1882 hns_roce_free_link_table(hr_dev, &priv->tpq);
1883
1884err_tpq_init_failed:
1885 hns_roce_free_link_table(hr_dev, &priv->tsq);
1886
1887 return ret;
1888}
1889
1890static void hns_roce_v2_exit(struct hns_roce_dev *hr_dev)
1891{
1892 struct hns_roce_v2_priv *priv = hr_dev->priv;
1893
1894 hns_roce_free_link_table(hr_dev, &priv->tpq);
1895 hns_roce_free_link_table(hr_dev, &priv->tsq);
1896}
1897
1898static int hns_roce_query_mbox_status(struct hns_roce_dev *hr_dev)
1899{
1900 struct hns_roce_cmq_desc desc;
1901 struct hns_roce_mbox_status *mb_st =
1902 (struct hns_roce_mbox_status *)desc.data;
1903 enum hns_roce_cmd_return_status status;
1904
1905 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_MB_ST, true);
1906
1907 status = hns_roce_cmq_send(hr_dev, &desc, 1);
1908 if (status)
1909 return status;
1910
1911 return cpu_to_le32(mb_st->mb_status_hw_run);
1912}
1913
1914static int hns_roce_v2_cmd_pending(struct hns_roce_dev *hr_dev)
1915{
1916 u32 status = hns_roce_query_mbox_status(hr_dev);
1917
1918 return status >> HNS_ROCE_HW_RUN_BIT_SHIFT;
1919}
1920
1921static int hns_roce_v2_cmd_complete(struct hns_roce_dev *hr_dev)
1922{
1923 u32 status = hns_roce_query_mbox_status(hr_dev);
1924
1925 return status & HNS_ROCE_HW_MB_STATUS_MASK;
1926}
1927
1928static int hns_roce_mbox_post(struct hns_roce_dev *hr_dev, u64 in_param,
1929 u64 out_param, u32 in_modifier, u8 op_modifier,
1930 u16 op, u16 token, int event)
1931{
1932 struct hns_roce_cmq_desc desc;
1933 struct hns_roce_post_mbox *mb = (struct hns_roce_post_mbox *)desc.data;
1934
1935 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_POST_MB, false);
1936
1937 mb->in_param_l = cpu_to_le64(in_param);
1938 mb->in_param_h = cpu_to_le64(in_param) >> 32;
1939 mb->out_param_l = cpu_to_le64(out_param);
1940 mb->out_param_h = cpu_to_le64(out_param) >> 32;
1941 mb->cmd_tag = cpu_to_le32(in_modifier << 8 | op);
1942 mb->token_event_en = cpu_to_le32(event << 16 | token);
1943
1944 return hns_roce_cmq_send(hr_dev, &desc, 1);
1945}
1946
1947static int hns_roce_v2_post_mbox(struct hns_roce_dev *hr_dev, u64 in_param,
1948 u64 out_param, u32 in_modifier, u8 op_modifier,
1949 u16 op, u16 token, int event)
1950{
1951 struct device *dev = hr_dev->dev;
1952 unsigned long end;
1953 int ret;
1954
1955 end = msecs_to_jiffies(HNS_ROCE_V2_GO_BIT_TIMEOUT_MSECS) + jiffies;
1956 while (hns_roce_v2_cmd_pending(hr_dev)) {
1957 if (time_after(jiffies, end)) {
1958 dev_dbg(dev, "jiffies=%d end=%d\n", (int)jiffies,
1959 (int)end);
1960 return -EAGAIN;
1961 }
1962 cond_resched();
1963 }
1964
1965 ret = hns_roce_mbox_post(hr_dev, in_param, out_param, in_modifier,
1966 op_modifier, op, token, event);
1967 if (ret)
1968 dev_err(dev, "Post mailbox fail(%d)\n", ret);
1969
1970 return ret;
1971}
1972
1973static int hns_roce_v2_chk_mbox(struct hns_roce_dev *hr_dev,
1974 unsigned long timeout)
1975{
1976 struct device *dev = hr_dev->dev;
1977 unsigned long end = 0;
1978 u32 status;
1979
1980 end = msecs_to_jiffies(timeout) + jiffies;
1981 while (hns_roce_v2_cmd_pending(hr_dev) && time_before(jiffies, end))
1982 cond_resched();
1983
1984 if (hns_roce_v2_cmd_pending(hr_dev)) {
1985 dev_err(dev, "[cmd_poll]hw run cmd TIMEDOUT!\n");
1986 return -ETIMEDOUT;
1987 }
1988
1989 status = hns_roce_v2_cmd_complete(hr_dev);
1990 if (status != 0x1) {
1991 if (status == CMD_RST_PRC_EBUSY)
1992 return status;
1993
1994 dev_err(dev, "mailbox status 0x%x!\n", status);
1995 return -EBUSY;
1996 }
1997
1998 return 0;
1999}
2000
2001static int hns_roce_config_sgid_table(struct hns_roce_dev *hr_dev,
2002 int gid_index, const union ib_gid *gid,
2003 enum hns_roce_sgid_type sgid_type)
2004{
2005 struct hns_roce_cmq_desc desc;
2006 struct hns_roce_cfg_sgid_tb *sgid_tb =
2007 (struct hns_roce_cfg_sgid_tb *)desc.data;
2008 u32 *p;
2009
2010 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_SGID_TB, false);
2011
2012 roce_set_field(sgid_tb->table_idx_rsv,
2013 CFG_SGID_TB_TABLE_IDX_M,
2014 CFG_SGID_TB_TABLE_IDX_S, gid_index);
2015 roce_set_field(sgid_tb->vf_sgid_type_rsv,
2016 CFG_SGID_TB_VF_SGID_TYPE_M,
2017 CFG_SGID_TB_VF_SGID_TYPE_S, sgid_type);
2018
2019 p = (u32 *)&gid->raw[0];
2020 sgid_tb->vf_sgid_l = cpu_to_le32(*p);
2021
2022 p = (u32 *)&gid->raw[4];
2023 sgid_tb->vf_sgid_ml = cpu_to_le32(*p);
2024
2025 p = (u32 *)&gid->raw[8];
2026 sgid_tb->vf_sgid_mh = cpu_to_le32(*p);
2027
2028 p = (u32 *)&gid->raw[0xc];
2029 sgid_tb->vf_sgid_h = cpu_to_le32(*p);
2030
2031 return hns_roce_cmq_send(hr_dev, &desc, 1);
2032}
2033
2034static int hns_roce_v2_set_gid(struct hns_roce_dev *hr_dev, u8 port,
2035 int gid_index, const union ib_gid *gid,
2036 const struct ib_gid_attr *attr)
2037{
2038 enum hns_roce_sgid_type sgid_type = GID_TYPE_FLAG_ROCE_V1;
2039 int ret;
2040
2041 if (!gid || !attr)
2042 return -EINVAL;
2043
2044 if (attr->gid_type == IB_GID_TYPE_ROCE)
2045 sgid_type = GID_TYPE_FLAG_ROCE_V1;
2046
2047 if (attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) {
2048 if (ipv6_addr_v4mapped((void *)gid))
2049 sgid_type = GID_TYPE_FLAG_ROCE_V2_IPV4;
2050 else
2051 sgid_type = GID_TYPE_FLAG_ROCE_V2_IPV6;
2052 }
2053
2054 ret = hns_roce_config_sgid_table(hr_dev, gid_index, gid, sgid_type);
2055 if (ret)
2056 dev_err(hr_dev->dev, "Configure sgid table failed(%d)!\n", ret);
2057
2058 return ret;
2059}
2060
2061static int hns_roce_v2_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port,
2062 u8 *addr)
2063{
2064 struct hns_roce_cmq_desc desc;
2065 struct hns_roce_cfg_smac_tb *smac_tb =
2066 (struct hns_roce_cfg_smac_tb *)desc.data;
2067 u16 reg_smac_h;
2068 u32 reg_smac_l;
2069
2070 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_SMAC_TB, false);
2071
2072 reg_smac_l = *(u32 *)(&addr[0]);
2073 reg_smac_h = *(u16 *)(&addr[4]);
2074
2075 memset(smac_tb, 0, sizeof(*smac_tb));
2076 roce_set_field(smac_tb->tb_idx_rsv,
2077 CFG_SMAC_TB_IDX_M,
2078 CFG_SMAC_TB_IDX_S, phy_port);
2079 roce_set_field(smac_tb->vf_smac_h_rsv,
2080 CFG_SMAC_TB_VF_SMAC_H_M,
2081 CFG_SMAC_TB_VF_SMAC_H_S, reg_smac_h);
2082 smac_tb->vf_smac_l = reg_smac_l;
2083
2084 return hns_roce_cmq_send(hr_dev, &desc, 1);
2085}
2086
2087static int set_mtpt_pbl(struct hns_roce_v2_mpt_entry *mpt_entry,
2088 struct hns_roce_mr *mr)
2089{
2090 struct sg_dma_page_iter sg_iter;
2091 u64 page_addr;
2092 u64 *pages;
2093 int i;
2094
2095 mpt_entry->pbl_size = cpu_to_le32(mr->pbl_size);
2096 mpt_entry->pbl_ba_l = cpu_to_le32(lower_32_bits(mr->pbl_ba >> 3));
2097 roce_set_field(mpt_entry->byte_48_mode_ba,
2098 V2_MPT_BYTE_48_PBL_BA_H_M, V2_MPT_BYTE_48_PBL_BA_H_S,
2099 upper_32_bits(mr->pbl_ba >> 3));
2100
2101 pages = (u64 *)__get_free_page(GFP_KERNEL);
2102 if (!pages)
2103 return -ENOMEM;
2104
2105 i = 0;
2106 for_each_sg_dma_page(mr->umem->sg_head.sgl, &sg_iter, mr->umem->nmap, 0) {
2107 page_addr = sg_page_iter_dma_address(&sg_iter);
2108 pages[i] = page_addr >> 6;
2109
2110
2111 if (i >= HNS_ROCE_V2_MAX_INNER_MTPT_NUM - 1)
2112 goto found;
2113 i++;
2114 }
2115found:
2116 mpt_entry->pa0_l = cpu_to_le32(lower_32_bits(pages[0]));
2117 roce_set_field(mpt_entry->byte_56_pa0_h, V2_MPT_BYTE_56_PA0_H_M,
2118 V2_MPT_BYTE_56_PA0_H_S, upper_32_bits(pages[0]));
2119
2120 mpt_entry->pa1_l = cpu_to_le32(lower_32_bits(pages[1]));
2121 roce_set_field(mpt_entry->byte_64_buf_pa1, V2_MPT_BYTE_64_PA1_H_M,
2122 V2_MPT_BYTE_64_PA1_H_S, upper_32_bits(pages[1]));
2123 roce_set_field(mpt_entry->byte_64_buf_pa1,
2124 V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M,
2125 V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S,
2126 mr->pbl_buf_pg_sz + PG_SHIFT_OFFSET);
2127
2128 free_page((unsigned long)pages);
2129
2130 return 0;
2131}
2132
2133static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
2134 unsigned long mtpt_idx)
2135{
2136 struct hns_roce_v2_mpt_entry *mpt_entry;
2137 int ret;
2138
2139 mpt_entry = mb_buf;
2140 memset(mpt_entry, 0, sizeof(*mpt_entry));
2141
2142 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_MPT_ST_M,
2143 V2_MPT_BYTE_4_MPT_ST_S, V2_MPT_ST_VALID);
2144 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PBL_HOP_NUM_M,
2145 V2_MPT_BYTE_4_PBL_HOP_NUM_S, mr->pbl_hop_num ==
2146 HNS_ROCE_HOP_NUM_0 ? 0 : mr->pbl_hop_num);
2147 roce_set_field(mpt_entry->byte_4_pd_hop_st,
2148 V2_MPT_BYTE_4_PBL_BA_PG_SZ_M,
2149 V2_MPT_BYTE_4_PBL_BA_PG_SZ_S,
2150 mr->pbl_ba_pg_sz + PG_SHIFT_OFFSET);
2151 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
2152 V2_MPT_BYTE_4_PD_S, mr->pd);
2153
2154 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RA_EN_S, 0);
2155 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 0);
2156 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_L_INV_EN_S, 1);
2157 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_BIND_EN_S,
2158 (mr->access & IB_ACCESS_MW_BIND ? 1 : 0));
2159 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_ATOMIC_EN_S,
2160 mr->access & IB_ACCESS_REMOTE_ATOMIC ? 1 : 0);
2161 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RR_EN_S,
2162 (mr->access & IB_ACCESS_REMOTE_READ ? 1 : 0));
2163 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RW_EN_S,
2164 (mr->access & IB_ACCESS_REMOTE_WRITE ? 1 : 0));
2165 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_LW_EN_S,
2166 (mr->access & IB_ACCESS_LOCAL_WRITE ? 1 : 0));
2167
2168 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_PA_S,
2169 mr->type == MR_TYPE_MR ? 0 : 1);
2170 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_INNER_PA_VLD_S,
2171 1);
2172
2173 mpt_entry->len_l = cpu_to_le32(lower_32_bits(mr->size));
2174 mpt_entry->len_h = cpu_to_le32(upper_32_bits(mr->size));
2175 mpt_entry->lkey = cpu_to_le32(mr->key);
2176 mpt_entry->va_l = cpu_to_le32(lower_32_bits(mr->iova));
2177 mpt_entry->va_h = cpu_to_le32(upper_32_bits(mr->iova));
2178
2179 if (mr->type == MR_TYPE_DMA)
2180 return 0;
2181
2182 ret = set_mtpt_pbl(mpt_entry, mr);
2183
2184 return ret;
2185}
2186
2187static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev,
2188 struct hns_roce_mr *mr, int flags,
2189 u32 pdn, int mr_access_flags, u64 iova,
2190 u64 size, void *mb_buf)
2191{
2192 struct hns_roce_v2_mpt_entry *mpt_entry = mb_buf;
2193 int ret = 0;
2194
2195 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_MPT_ST_M,
2196 V2_MPT_BYTE_4_MPT_ST_S, V2_MPT_ST_VALID);
2197
2198 if (flags & IB_MR_REREG_PD) {
2199 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
2200 V2_MPT_BYTE_4_PD_S, pdn);
2201 mr->pd = pdn;
2202 }
2203
2204 if (flags & IB_MR_REREG_ACCESS) {
2205 roce_set_bit(mpt_entry->byte_8_mw_cnt_en,
2206 V2_MPT_BYTE_8_BIND_EN_S,
2207 (mr_access_flags & IB_ACCESS_MW_BIND ? 1 : 0));
2208 roce_set_bit(mpt_entry->byte_8_mw_cnt_en,
2209 V2_MPT_BYTE_8_ATOMIC_EN_S,
2210 mr_access_flags & IB_ACCESS_REMOTE_ATOMIC ? 1 : 0);
2211 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RR_EN_S,
2212 mr_access_flags & IB_ACCESS_REMOTE_READ ? 1 : 0);
2213 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RW_EN_S,
2214 mr_access_flags & IB_ACCESS_REMOTE_WRITE ? 1 : 0);
2215 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_LW_EN_S,
2216 mr_access_flags & IB_ACCESS_LOCAL_WRITE ? 1 : 0);
2217 }
2218
2219 if (flags & IB_MR_REREG_TRANS) {
2220 mpt_entry->va_l = cpu_to_le32(lower_32_bits(iova));
2221 mpt_entry->va_h = cpu_to_le32(upper_32_bits(iova));
2222 mpt_entry->len_l = cpu_to_le32(lower_32_bits(size));
2223 mpt_entry->len_h = cpu_to_le32(upper_32_bits(size));
2224
2225 mr->iova = iova;
2226 mr->size = size;
2227
2228 ret = set_mtpt_pbl(mpt_entry, mr);
2229 }
2230
2231 return ret;
2232}
2233
2234static int hns_roce_v2_frmr_write_mtpt(void *mb_buf, struct hns_roce_mr *mr)
2235{
2236 struct hns_roce_v2_mpt_entry *mpt_entry;
2237
2238 mpt_entry = mb_buf;
2239 memset(mpt_entry, 0, sizeof(*mpt_entry));
2240
2241 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_MPT_ST_M,
2242 V2_MPT_BYTE_4_MPT_ST_S, V2_MPT_ST_FREE);
2243 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PBL_HOP_NUM_M,
2244 V2_MPT_BYTE_4_PBL_HOP_NUM_S, 1);
2245 roce_set_field(mpt_entry->byte_4_pd_hop_st,
2246 V2_MPT_BYTE_4_PBL_BA_PG_SZ_M,
2247 V2_MPT_BYTE_4_PBL_BA_PG_SZ_S,
2248 mr->pbl_ba_pg_sz + PG_SHIFT_OFFSET);
2249 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
2250 V2_MPT_BYTE_4_PD_S, mr->pd);
2251
2252 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RA_EN_S, 1);
2253 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 1);
2254 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_L_INV_EN_S, 1);
2255
2256 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_FRE_S, 1);
2257 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_PA_S, 0);
2258 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_MR_MW_S, 0);
2259 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_BPD_S, 1);
2260
2261 mpt_entry->pbl_size = cpu_to_le32(mr->pbl_size);
2262
2263 mpt_entry->pbl_ba_l = cpu_to_le32(lower_32_bits(mr->pbl_ba >> 3));
2264 roce_set_field(mpt_entry->byte_48_mode_ba, V2_MPT_BYTE_48_PBL_BA_H_M,
2265 V2_MPT_BYTE_48_PBL_BA_H_S,
2266 upper_32_bits(mr->pbl_ba >> 3));
2267
2268 roce_set_field(mpt_entry->byte_64_buf_pa1,
2269 V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M,
2270 V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S,
2271 mr->pbl_buf_pg_sz + PG_SHIFT_OFFSET);
2272
2273 return 0;
2274}
2275
2276static int hns_roce_v2_mw_write_mtpt(void *mb_buf, struct hns_roce_mw *mw)
2277{
2278 struct hns_roce_v2_mpt_entry *mpt_entry;
2279
2280 mpt_entry = mb_buf;
2281 memset(mpt_entry, 0, sizeof(*mpt_entry));
2282
2283 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_MPT_ST_M,
2284 V2_MPT_BYTE_4_MPT_ST_S, V2_MPT_ST_FREE);
2285 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
2286 V2_MPT_BYTE_4_PD_S, mw->pdn);
2287 roce_set_field(mpt_entry->byte_4_pd_hop_st,
2288 V2_MPT_BYTE_4_PBL_HOP_NUM_M,
2289 V2_MPT_BYTE_4_PBL_HOP_NUM_S,
2290 mw->pbl_hop_num == HNS_ROCE_HOP_NUM_0 ?
2291 0 : mw->pbl_hop_num);
2292 roce_set_field(mpt_entry->byte_4_pd_hop_st,
2293 V2_MPT_BYTE_4_PBL_BA_PG_SZ_M,
2294 V2_MPT_BYTE_4_PBL_BA_PG_SZ_S,
2295 mw->pbl_ba_pg_sz + PG_SHIFT_OFFSET);
2296
2297 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 1);
2298 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_L_INV_EN_S, 1);
2299
2300 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_PA_S, 0);
2301 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_MR_MW_S, 1);
2302 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_BPD_S, 1);
2303 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_BQP_S,
2304 mw->ibmw.type == IB_MW_TYPE_1 ? 0 : 1);
2305
2306 roce_set_field(mpt_entry->byte_64_buf_pa1,
2307 V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M,
2308 V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S,
2309 mw->pbl_buf_pg_sz + PG_SHIFT_OFFSET);
2310
2311 mpt_entry->lkey = cpu_to_le32(mw->rkey);
2312
2313 return 0;
2314}
2315
2316static void *get_cqe_v2(struct hns_roce_cq *hr_cq, int n)
2317{
2318 return hns_roce_buf_offset(&hr_cq->hr_buf.hr_buf,
2319 n * HNS_ROCE_V2_CQE_ENTRY_SIZE);
2320}
2321
2322static void *get_sw_cqe_v2(struct hns_roce_cq *hr_cq, int n)
2323{
2324 struct hns_roce_v2_cqe *cqe = get_cqe_v2(hr_cq, n & hr_cq->ib_cq.cqe);
2325
2326
2327 return (roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_OWNER_S) ^
2328 !!(n & (hr_cq->ib_cq.cqe + 1))) ? cqe : NULL;
2329}
2330
2331static struct hns_roce_v2_cqe *next_cqe_sw_v2(struct hns_roce_cq *hr_cq)
2332{
2333 return get_sw_cqe_v2(hr_cq, hr_cq->cons_index);
2334}
2335
2336static void *get_srq_wqe(struct hns_roce_srq *srq, int n)
2337{
2338 return hns_roce_buf_offset(&srq->buf, n << srq->wqe_shift);
2339}
2340
2341static void hns_roce_free_srq_wqe(struct hns_roce_srq *srq, int wqe_index)
2342{
2343 u32 bitmap_num;
2344 int bit_num;
2345
2346
2347 spin_lock(&srq->lock);
2348
2349 bitmap_num = wqe_index / (sizeof(u64) * 8);
2350 bit_num = wqe_index % (sizeof(u64) * 8);
2351 srq->idx_que.bitmap[bitmap_num] |= (1ULL << bit_num);
2352 srq->tail++;
2353
2354 spin_unlock(&srq->lock);
2355}
2356
2357static void hns_roce_v2_cq_set_ci(struct hns_roce_cq *hr_cq, u32 cons_index)
2358{
2359 *hr_cq->set_ci_db = cons_index & 0xffffff;
2360}
2361
2362static void __hns_roce_v2_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
2363 struct hns_roce_srq *srq)
2364{
2365 struct hns_roce_v2_cqe *cqe, *dest;
2366 u32 prod_index;
2367 int nfreed = 0;
2368 int wqe_index;
2369 u8 owner_bit;
2370
2371 for (prod_index = hr_cq->cons_index; get_sw_cqe_v2(hr_cq, prod_index);
2372 ++prod_index) {
2373 if (prod_index == hr_cq->cons_index + hr_cq->ib_cq.cqe)
2374 break;
2375 }
2376
2377
2378
2379
2380
2381 while ((int) --prod_index - (int) hr_cq->cons_index >= 0) {
2382 cqe = get_cqe_v2(hr_cq, prod_index & hr_cq->ib_cq.cqe);
2383 if ((roce_get_field(cqe->byte_16, V2_CQE_BYTE_16_LCL_QPN_M,
2384 V2_CQE_BYTE_16_LCL_QPN_S) &
2385 HNS_ROCE_V2_CQE_QPN_MASK) == qpn) {
2386 if (srq &&
2387 roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_S_R_S)) {
2388 wqe_index = roce_get_field(cqe->byte_4,
2389 V2_CQE_BYTE_4_WQE_INDX_M,
2390 V2_CQE_BYTE_4_WQE_INDX_S);
2391 hns_roce_free_srq_wqe(srq, wqe_index);
2392 }
2393 ++nfreed;
2394 } else if (nfreed) {
2395 dest = get_cqe_v2(hr_cq, (prod_index + nfreed) &
2396 hr_cq->ib_cq.cqe);
2397 owner_bit = roce_get_bit(dest->byte_4,
2398 V2_CQE_BYTE_4_OWNER_S);
2399 memcpy(dest, cqe, sizeof(*cqe));
2400 roce_set_bit(dest->byte_4, V2_CQE_BYTE_4_OWNER_S,
2401 owner_bit);
2402 }
2403 }
2404
2405 if (nfreed) {
2406 hr_cq->cons_index += nfreed;
2407
2408
2409
2410
2411 wmb();
2412 hns_roce_v2_cq_set_ci(hr_cq, hr_cq->cons_index);
2413 }
2414}
2415
2416static void hns_roce_v2_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
2417 struct hns_roce_srq *srq)
2418{
2419 spin_lock_irq(&hr_cq->lock);
2420 __hns_roce_v2_cq_clean(hr_cq, qpn, srq);
2421 spin_unlock_irq(&hr_cq->lock);
2422}
2423
2424static void hns_roce_v2_write_cqc(struct hns_roce_dev *hr_dev,
2425 struct hns_roce_cq *hr_cq, void *mb_buf,
2426 u64 *mtts, dma_addr_t dma_handle, int nent,
2427 u32 vector)
2428{
2429 struct hns_roce_v2_cq_context *cq_context;
2430
2431 cq_context = mb_buf;
2432 memset(cq_context, 0, sizeof(*cq_context));
2433
2434 roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_CQ_ST_M,
2435 V2_CQC_BYTE_4_CQ_ST_S, V2_CQ_STATE_VALID);
2436 roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_ARM_ST_M,
2437 V2_CQC_BYTE_4_ARM_ST_S, REG_NXT_CEQE);
2438 roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_SHIFT_M,
2439 V2_CQC_BYTE_4_SHIFT_S, ilog2((unsigned int)nent));
2440 roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_CEQN_M,
2441 V2_CQC_BYTE_4_CEQN_S, vector);
2442 cq_context->byte_4_pg_ceqn = cpu_to_le32(cq_context->byte_4_pg_ceqn);
2443
2444 roce_set_field(cq_context->byte_8_cqn, V2_CQC_BYTE_8_CQN_M,
2445 V2_CQC_BYTE_8_CQN_S, hr_cq->cqn);
2446
2447 cq_context->cqe_cur_blk_addr = (u32)(mtts[0] >> PAGE_ADDR_SHIFT);
2448 cq_context->cqe_cur_blk_addr =
2449 cpu_to_le32(cq_context->cqe_cur_blk_addr);
2450
2451 roce_set_field(cq_context->byte_16_hop_addr,
2452 V2_CQC_BYTE_16_CQE_CUR_BLK_ADDR_M,
2453 V2_CQC_BYTE_16_CQE_CUR_BLK_ADDR_S,
2454 cpu_to_le32((mtts[0]) >> (32 + PAGE_ADDR_SHIFT)));
2455 roce_set_field(cq_context->byte_16_hop_addr,
2456 V2_CQC_BYTE_16_CQE_HOP_NUM_M,
2457 V2_CQC_BYTE_16_CQE_HOP_NUM_S, hr_dev->caps.cqe_hop_num ==
2458 HNS_ROCE_HOP_NUM_0 ? 0 : hr_dev->caps.cqe_hop_num);
2459
2460 cq_context->cqe_nxt_blk_addr = (u32)(mtts[1] >> PAGE_ADDR_SHIFT);
2461 roce_set_field(cq_context->byte_24_pgsz_addr,
2462 V2_CQC_BYTE_24_CQE_NXT_BLK_ADDR_M,
2463 V2_CQC_BYTE_24_CQE_NXT_BLK_ADDR_S,
2464 cpu_to_le32((mtts[1]) >> (32 + PAGE_ADDR_SHIFT)));
2465 roce_set_field(cq_context->byte_24_pgsz_addr,
2466 V2_CQC_BYTE_24_CQE_BA_PG_SZ_M,
2467 V2_CQC_BYTE_24_CQE_BA_PG_SZ_S,
2468 hr_dev->caps.cqe_ba_pg_sz + PG_SHIFT_OFFSET);
2469 roce_set_field(cq_context->byte_24_pgsz_addr,
2470 V2_CQC_BYTE_24_CQE_BUF_PG_SZ_M,
2471 V2_CQC_BYTE_24_CQE_BUF_PG_SZ_S,
2472 hr_dev->caps.cqe_buf_pg_sz + PG_SHIFT_OFFSET);
2473
2474 cq_context->cqe_ba = (u32)(dma_handle >> 3);
2475
2476 roce_set_field(cq_context->byte_40_cqe_ba, V2_CQC_BYTE_40_CQE_BA_M,
2477 V2_CQC_BYTE_40_CQE_BA_S, (dma_handle >> (32 + 3)));
2478
2479 if (hr_cq->db_en)
2480 roce_set_bit(cq_context->byte_44_db_record,
2481 V2_CQC_BYTE_44_DB_RECORD_EN_S, 1);
2482
2483 roce_set_field(cq_context->byte_44_db_record,
2484 V2_CQC_BYTE_44_DB_RECORD_ADDR_M,
2485 V2_CQC_BYTE_44_DB_RECORD_ADDR_S,
2486 ((u32)hr_cq->db.dma) >> 1);
2487 cq_context->db_record_addr = hr_cq->db.dma >> 32;
2488
2489 roce_set_field(cq_context->byte_56_cqe_period_maxcnt,
2490 V2_CQC_BYTE_56_CQ_MAX_CNT_M,
2491 V2_CQC_BYTE_56_CQ_MAX_CNT_S,
2492 HNS_ROCE_V2_CQ_DEFAULT_BURST_NUM);
2493 roce_set_field(cq_context->byte_56_cqe_period_maxcnt,
2494 V2_CQC_BYTE_56_CQ_PERIOD_M,
2495 V2_CQC_BYTE_56_CQ_PERIOD_S,
2496 HNS_ROCE_V2_CQ_DEFAULT_INTERVAL);
2497}
2498
2499static int hns_roce_v2_req_notify_cq(struct ib_cq *ibcq,
2500 enum ib_cq_notify_flags flags)
2501{
2502 struct hns_roce_dev *hr_dev = to_hr_dev(ibcq->device);
2503 struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
2504 u32 notification_flag;
2505 u32 doorbell[2];
2506
2507 doorbell[0] = 0;
2508 doorbell[1] = 0;
2509
2510 notification_flag = (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
2511 V2_CQ_DB_REQ_NOT : V2_CQ_DB_REQ_NOT_SOL;
2512
2513
2514
2515
2516 roce_set_field(doorbell[0], V2_CQ_DB_BYTE_4_TAG_M, V2_DB_BYTE_4_TAG_S,
2517 hr_cq->cqn);
2518 roce_set_field(doorbell[0], V2_CQ_DB_BYTE_4_CMD_M, V2_DB_BYTE_4_CMD_S,
2519 HNS_ROCE_V2_CQ_DB_NTR);
2520 roce_set_field(doorbell[1], V2_CQ_DB_PARAMETER_CONS_IDX_M,
2521 V2_CQ_DB_PARAMETER_CONS_IDX_S,
2522 hr_cq->cons_index & ((hr_cq->cq_depth << 1) - 1));
2523 roce_set_field(doorbell[1], V2_CQ_DB_PARAMETER_CMD_SN_M,
2524 V2_CQ_DB_PARAMETER_CMD_SN_S, hr_cq->arm_sn & 0x3);
2525 roce_set_bit(doorbell[1], V2_CQ_DB_PARAMETER_NOTIFY_S,
2526 notification_flag);
2527
2528 hns_roce_write64(hr_dev, doorbell, hr_cq->cq_db_l);
2529
2530 return 0;
2531}
2532
2533static int hns_roce_handle_recv_inl_wqe(struct hns_roce_v2_cqe *cqe,
2534 struct hns_roce_qp **cur_qp,
2535 struct ib_wc *wc)
2536{
2537 struct hns_roce_rinl_sge *sge_list;
2538 u32 wr_num, wr_cnt, sge_num;
2539 u32 sge_cnt, data_len, size;
2540 void *wqe_buf;
2541
2542 wr_num = roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_WQE_INDX_M,
2543 V2_CQE_BYTE_4_WQE_INDX_S) & 0xffff;
2544 wr_cnt = wr_num & ((*cur_qp)->rq.wqe_cnt - 1);
2545
2546 sge_list = (*cur_qp)->rq_inl_buf.wqe_list[wr_cnt].sg_list;
2547 sge_num = (*cur_qp)->rq_inl_buf.wqe_list[wr_cnt].sge_cnt;
2548 wqe_buf = get_recv_wqe(*cur_qp, wr_cnt);
2549 data_len = wc->byte_len;
2550
2551 for (sge_cnt = 0; (sge_cnt < sge_num) && (data_len); sge_cnt++) {
2552 size = min(sge_list[sge_cnt].len, data_len);
2553 memcpy((void *)sge_list[sge_cnt].addr, wqe_buf, size);
2554
2555 data_len -= size;
2556 wqe_buf += size;
2557 }
2558
2559 if (data_len) {
2560 wc->status = IB_WC_LOC_LEN_ERR;
2561 return -EAGAIN;
2562 }
2563
2564 return 0;
2565}
2566
2567static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
2568 struct hns_roce_qp **cur_qp, struct ib_wc *wc)
2569{
2570 struct hns_roce_srq *srq = NULL;
2571 struct hns_roce_dev *hr_dev;
2572 struct hns_roce_v2_cqe *cqe;
2573 struct hns_roce_qp *hr_qp;
2574 struct hns_roce_wq *wq;
2575 struct ib_qp_attr attr;
2576 int attr_mask;
2577 int is_send;
2578 u16 wqe_ctr;
2579 u32 opcode;
2580 u32 status;
2581 int qpn;
2582 int ret;
2583
2584
2585 cqe = next_cqe_sw_v2(hr_cq);
2586 if (!cqe)
2587 return -EAGAIN;
2588
2589 ++hr_cq->cons_index;
2590
2591 rmb();
2592
2593
2594 is_send = !roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_S_R_S);
2595
2596 qpn = roce_get_field(cqe->byte_16, V2_CQE_BYTE_16_LCL_QPN_M,
2597 V2_CQE_BYTE_16_LCL_QPN_S);
2598
2599 if (!*cur_qp || (qpn & HNS_ROCE_V2_CQE_QPN_MASK) != (*cur_qp)->qpn) {
2600 hr_dev = to_hr_dev(hr_cq->ib_cq.device);
2601 hr_qp = __hns_roce_qp_lookup(hr_dev, qpn);
2602 if (unlikely(!hr_qp)) {
2603 dev_err(hr_dev->dev, "CQ %06lx with entry for unknown QPN %06x\n",
2604 hr_cq->cqn, (qpn & HNS_ROCE_V2_CQE_QPN_MASK));
2605 return -EINVAL;
2606 }
2607 *cur_qp = hr_qp;
2608 }
2609
2610 wc->qp = &(*cur_qp)->ibqp;
2611 wc->vendor_err = 0;
2612
2613 if (is_send) {
2614 wq = &(*cur_qp)->sq;
2615 if ((*cur_qp)->sq_signal_bits) {
2616
2617
2618
2619
2620
2621 wqe_ctr = (u16)roce_get_field(cqe->byte_4,
2622 V2_CQE_BYTE_4_WQE_INDX_M,
2623 V2_CQE_BYTE_4_WQE_INDX_S);
2624 wq->tail += (wqe_ctr - (u16)wq->tail) &
2625 (wq->wqe_cnt - 1);
2626 }
2627
2628 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
2629 ++wq->tail;
2630 } else if ((*cur_qp)->ibqp.srq) {
2631 srq = to_hr_srq((*cur_qp)->ibqp.srq);
2632 wqe_ctr = le16_to_cpu(roce_get_field(cqe->byte_4,
2633 V2_CQE_BYTE_4_WQE_INDX_M,
2634 V2_CQE_BYTE_4_WQE_INDX_S));
2635 wc->wr_id = srq->wrid[wqe_ctr];
2636 hns_roce_free_srq_wqe(srq, wqe_ctr);
2637 } else {
2638
2639 wq = &(*cur_qp)->rq;
2640 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
2641 ++wq->tail;
2642 }
2643
2644 status = roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_STATUS_M,
2645 V2_CQE_BYTE_4_STATUS_S);
2646 switch (status & HNS_ROCE_V2_CQE_STATUS_MASK) {
2647 case HNS_ROCE_CQE_V2_SUCCESS:
2648 wc->status = IB_WC_SUCCESS;
2649 break;
2650 case HNS_ROCE_CQE_V2_LOCAL_LENGTH_ERR:
2651 wc->status = IB_WC_LOC_LEN_ERR;
2652 break;
2653 case HNS_ROCE_CQE_V2_LOCAL_QP_OP_ERR:
2654 wc->status = IB_WC_LOC_QP_OP_ERR;
2655 break;
2656 case HNS_ROCE_CQE_V2_LOCAL_PROT_ERR:
2657 wc->status = IB_WC_LOC_PROT_ERR;
2658 break;
2659 case HNS_ROCE_CQE_V2_WR_FLUSH_ERR:
2660 wc->status = IB_WC_WR_FLUSH_ERR;
2661 break;
2662 case HNS_ROCE_CQE_V2_MW_BIND_ERR:
2663 wc->status = IB_WC_MW_BIND_ERR;
2664 break;
2665 case HNS_ROCE_CQE_V2_BAD_RESP_ERR:
2666 wc->status = IB_WC_BAD_RESP_ERR;
2667 break;
2668 case HNS_ROCE_CQE_V2_LOCAL_ACCESS_ERR:
2669 wc->status = IB_WC_LOC_ACCESS_ERR;
2670 break;
2671 case HNS_ROCE_CQE_V2_REMOTE_INVAL_REQ_ERR:
2672 wc->status = IB_WC_REM_INV_REQ_ERR;
2673 break;
2674 case HNS_ROCE_CQE_V2_REMOTE_ACCESS_ERR:
2675 wc->status = IB_WC_REM_ACCESS_ERR;
2676 break;
2677 case HNS_ROCE_CQE_V2_REMOTE_OP_ERR:
2678 wc->status = IB_WC_REM_OP_ERR;
2679 break;
2680 case HNS_ROCE_CQE_V2_TRANSPORT_RETRY_EXC_ERR:
2681 wc->status = IB_WC_RETRY_EXC_ERR;
2682 break;
2683 case HNS_ROCE_CQE_V2_RNR_RETRY_EXC_ERR:
2684 wc->status = IB_WC_RNR_RETRY_EXC_ERR;
2685 break;
2686 case HNS_ROCE_CQE_V2_REMOTE_ABORT_ERR:
2687 wc->status = IB_WC_REM_ABORT_ERR;
2688 break;
2689 default:
2690 wc->status = IB_WC_GENERAL_ERR;
2691 break;
2692 }
2693
2694
2695 if ((wc->status != IB_WC_SUCCESS) &&
2696 (wc->status != IB_WC_WR_FLUSH_ERR)) {
2697 attr_mask = IB_QP_STATE;
2698 attr.qp_state = IB_QPS_ERR;
2699 return hns_roce_v2_modify_qp(&(*cur_qp)->ibqp,
2700 &attr, attr_mask,
2701 (*cur_qp)->state, IB_QPS_ERR);
2702 }
2703
2704 if (wc->status == IB_WC_WR_FLUSH_ERR)
2705 return 0;
2706
2707 if (is_send) {
2708 wc->wc_flags = 0;
2709
2710 switch (roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_OPCODE_M,
2711 V2_CQE_BYTE_4_OPCODE_S) & 0x1f) {
2712 case HNS_ROCE_SQ_OPCODE_SEND:
2713 wc->opcode = IB_WC_SEND;
2714 break;
2715 case HNS_ROCE_SQ_OPCODE_SEND_WITH_INV:
2716 wc->opcode = IB_WC_SEND;
2717 break;
2718 case HNS_ROCE_SQ_OPCODE_SEND_WITH_IMM:
2719 wc->opcode = IB_WC_SEND;
2720 wc->wc_flags |= IB_WC_WITH_IMM;
2721 break;
2722 case HNS_ROCE_SQ_OPCODE_RDMA_READ:
2723 wc->opcode = IB_WC_RDMA_READ;
2724 wc->byte_len = le32_to_cpu(cqe->byte_cnt);
2725 break;
2726 case HNS_ROCE_SQ_OPCODE_RDMA_WRITE:
2727 wc->opcode = IB_WC_RDMA_WRITE;
2728 break;
2729 case HNS_ROCE_SQ_OPCODE_RDMA_WRITE_WITH_IMM:
2730 wc->opcode = IB_WC_RDMA_WRITE;
2731 wc->wc_flags |= IB_WC_WITH_IMM;
2732 break;
2733 case HNS_ROCE_SQ_OPCODE_LOCAL_INV:
2734 wc->opcode = IB_WC_LOCAL_INV;
2735 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
2736 break;
2737 case HNS_ROCE_SQ_OPCODE_ATOMIC_COMP_AND_SWAP:
2738 wc->opcode = IB_WC_COMP_SWAP;
2739 wc->byte_len = 8;
2740 break;
2741 case HNS_ROCE_SQ_OPCODE_ATOMIC_FETCH_AND_ADD:
2742 wc->opcode = IB_WC_FETCH_ADD;
2743 wc->byte_len = 8;
2744 break;
2745 case HNS_ROCE_SQ_OPCODE_ATOMIC_MASK_COMP_AND_SWAP:
2746 wc->opcode = IB_WC_MASKED_COMP_SWAP;
2747 wc->byte_len = 8;
2748 break;
2749 case HNS_ROCE_SQ_OPCODE_ATOMIC_MASK_FETCH_AND_ADD:
2750 wc->opcode = IB_WC_MASKED_FETCH_ADD;
2751 wc->byte_len = 8;
2752 break;
2753 case HNS_ROCE_SQ_OPCODE_FAST_REG_WR:
2754 wc->opcode = IB_WC_REG_MR;
2755 break;
2756 case HNS_ROCE_SQ_OPCODE_BIND_MW:
2757 wc->opcode = IB_WC_REG_MR;
2758 break;
2759 default:
2760 wc->status = IB_WC_GENERAL_ERR;
2761 break;
2762 }
2763 } else {
2764
2765 wc->byte_len = le32_to_cpu(cqe->byte_cnt);
2766
2767 opcode = roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_OPCODE_M,
2768 V2_CQE_BYTE_4_OPCODE_S);
2769 switch (opcode & 0x1f) {
2770 case HNS_ROCE_V2_OPCODE_RDMA_WRITE_IMM:
2771 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
2772 wc->wc_flags = IB_WC_WITH_IMM;
2773 wc->ex.imm_data =
2774 cpu_to_be32(le32_to_cpu(cqe->immtdata));
2775 break;
2776 case HNS_ROCE_V2_OPCODE_SEND:
2777 wc->opcode = IB_WC_RECV;
2778 wc->wc_flags = 0;
2779 break;
2780 case HNS_ROCE_V2_OPCODE_SEND_WITH_IMM:
2781 wc->opcode = IB_WC_RECV;
2782 wc->wc_flags = IB_WC_WITH_IMM;
2783 wc->ex.imm_data =
2784 cpu_to_be32(le32_to_cpu(cqe->immtdata));
2785 break;
2786 case HNS_ROCE_V2_OPCODE_SEND_WITH_INV:
2787 wc->opcode = IB_WC_RECV;
2788 wc->wc_flags = IB_WC_WITH_INVALIDATE;
2789 wc->ex.invalidate_rkey = le32_to_cpu(cqe->rkey);
2790 break;
2791 default:
2792 wc->status = IB_WC_GENERAL_ERR;
2793 break;
2794 }
2795
2796 if ((wc->qp->qp_type == IB_QPT_RC ||
2797 wc->qp->qp_type == IB_QPT_UC) &&
2798 (opcode == HNS_ROCE_V2_OPCODE_SEND ||
2799 opcode == HNS_ROCE_V2_OPCODE_SEND_WITH_IMM ||
2800 opcode == HNS_ROCE_V2_OPCODE_SEND_WITH_INV) &&
2801 (roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_RQ_INLINE_S))) {
2802 ret = hns_roce_handle_recv_inl_wqe(cqe, cur_qp, wc);
2803 if (ret)
2804 return -EAGAIN;
2805 }
2806
2807 wc->sl = (u8)roce_get_field(cqe->byte_32, V2_CQE_BYTE_32_SL_M,
2808 V2_CQE_BYTE_32_SL_S);
2809 wc->src_qp = (u8)roce_get_field(cqe->byte_32,
2810 V2_CQE_BYTE_32_RMT_QPN_M,
2811 V2_CQE_BYTE_32_RMT_QPN_S);
2812 wc->slid = 0;
2813 wc->wc_flags |= (roce_get_bit(cqe->byte_32,
2814 V2_CQE_BYTE_32_GRH_S) ?
2815 IB_WC_GRH : 0);
2816 wc->port_num = roce_get_field(cqe->byte_32,
2817 V2_CQE_BYTE_32_PORTN_M, V2_CQE_BYTE_32_PORTN_S);
2818 wc->pkey_index = 0;
2819 memcpy(wc->smac, cqe->smac, 4);
2820 wc->smac[4] = roce_get_field(cqe->byte_28,
2821 V2_CQE_BYTE_28_SMAC_4_M,
2822 V2_CQE_BYTE_28_SMAC_4_S);
2823 wc->smac[5] = roce_get_field(cqe->byte_28,
2824 V2_CQE_BYTE_28_SMAC_5_M,
2825 V2_CQE_BYTE_28_SMAC_5_S);
2826 if (roce_get_bit(cqe->byte_28, V2_CQE_BYTE_28_VID_VLD_S)) {
2827 wc->vlan_id = (u16)roce_get_field(cqe->byte_28,
2828 V2_CQE_BYTE_28_VID_M,
2829 V2_CQE_BYTE_28_VID_S);
2830 } else {
2831 wc->vlan_id = 0xffff;
2832 }
2833
2834 wc->wc_flags |= (IB_WC_WITH_VLAN | IB_WC_WITH_SMAC);
2835 wc->network_hdr_type = roce_get_field(cqe->byte_28,
2836 V2_CQE_BYTE_28_PORT_TYPE_M,
2837 V2_CQE_BYTE_28_PORT_TYPE_S);
2838 }
2839
2840 return 0;
2841}
2842
2843static int hns_roce_v2_poll_cq(struct ib_cq *ibcq, int num_entries,
2844 struct ib_wc *wc)
2845{
2846 struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
2847 struct hns_roce_qp *cur_qp = NULL;
2848 unsigned long flags;
2849 int npolled;
2850
2851 spin_lock_irqsave(&hr_cq->lock, flags);
2852
2853 for (npolled = 0; npolled < num_entries; ++npolled) {
2854 if (hns_roce_v2_poll_one(hr_cq, &cur_qp, wc + npolled))
2855 break;
2856 }
2857
2858 if (npolled) {
2859
2860 wmb();
2861 hns_roce_v2_cq_set_ci(hr_cq, hr_cq->cons_index);
2862 }
2863
2864 spin_unlock_irqrestore(&hr_cq->lock, flags);
2865
2866 return npolled;
2867}
2868
2869static int hns_roce_v2_set_hem(struct hns_roce_dev *hr_dev,
2870 struct hns_roce_hem_table *table, int obj,
2871 int step_idx)
2872{
2873 struct device *dev = hr_dev->dev;
2874 struct hns_roce_cmd_mailbox *mailbox;
2875 struct hns_roce_hem_iter iter;
2876 struct hns_roce_hem_mhop mhop;
2877 struct hns_roce_hem *hem;
2878 unsigned long mhop_obj = obj;
2879 int i, j, k;
2880 int ret = 0;
2881 u64 hem_idx = 0;
2882 u64 l1_idx = 0;
2883 u64 bt_ba = 0;
2884 u32 chunk_ba_num;
2885 u32 hop_num;
2886 u16 op = 0xff;
2887
2888 if (!hns_roce_check_whether_mhop(hr_dev, table->type))
2889 return 0;
2890
2891 hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, &mhop);
2892 i = mhop.l0_idx;
2893 j = mhop.l1_idx;
2894 k = mhop.l2_idx;
2895 hop_num = mhop.hop_num;
2896 chunk_ba_num = mhop.bt_chunk_size / 8;
2897
2898 if (hop_num == 2) {
2899 hem_idx = i * chunk_ba_num * chunk_ba_num + j * chunk_ba_num +
2900 k;
2901 l1_idx = i * chunk_ba_num + j;
2902 } else if (hop_num == 1) {
2903 hem_idx = i * chunk_ba_num + j;
2904 } else if (hop_num == HNS_ROCE_HOP_NUM_0) {
2905 hem_idx = i;
2906 }
2907
2908 switch (table->type) {
2909 case HEM_TYPE_QPC:
2910 op = HNS_ROCE_CMD_WRITE_QPC_BT0;
2911 break;
2912 case HEM_TYPE_MTPT:
2913 op = HNS_ROCE_CMD_WRITE_MPT_BT0;
2914 break;
2915 case HEM_TYPE_CQC:
2916 op = HNS_ROCE_CMD_WRITE_CQC_BT0;
2917 break;
2918 case HEM_TYPE_SRQC:
2919 op = HNS_ROCE_CMD_WRITE_SRQC_BT0;
2920 break;
2921 case HEM_TYPE_SCCC:
2922 op = HNS_ROCE_CMD_WRITE_SCCC_BT0;
2923 break;
2924 case HEM_TYPE_QPC_TIMER:
2925 op = HNS_ROCE_CMD_WRITE_QPC_TIMER_BT0;
2926 break;
2927 case HEM_TYPE_CQC_TIMER:
2928 op = HNS_ROCE_CMD_WRITE_CQC_TIMER_BT0;
2929 break;
2930 default:
2931 dev_warn(dev, "Table %d not to be written by mailbox!\n",
2932 table->type);
2933 return 0;
2934 }
2935
2936 if (table->type == HEM_TYPE_SCCC && step_idx)
2937 return 0;
2938
2939 op += step_idx;
2940
2941 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
2942 if (IS_ERR(mailbox))
2943 return PTR_ERR(mailbox);
2944
2945 if (table->type == HEM_TYPE_SCCC)
2946 obj = mhop.l0_idx;
2947
2948 if (check_whether_last_step(hop_num, step_idx)) {
2949 hem = table->hem[hem_idx];
2950 for (hns_roce_hem_first(hem, &iter);
2951 !hns_roce_hem_last(&iter); hns_roce_hem_next(&iter)) {
2952 bt_ba = hns_roce_hem_addr(&iter);
2953
2954
2955 ret = hns_roce_cmd_mbox(hr_dev, bt_ba, mailbox->dma,
2956 obj, 0, op,
2957 HNS_ROCE_CMD_TIMEOUT_MSECS);
2958 }
2959 } else {
2960 if (step_idx == 0)
2961 bt_ba = table->bt_l0_dma_addr[i];
2962 else if (step_idx == 1 && hop_num == 2)
2963 bt_ba = table->bt_l1_dma_addr[l1_idx];
2964
2965
2966 ret = hns_roce_cmd_mbox(hr_dev, bt_ba, mailbox->dma, obj,
2967 0, op, HNS_ROCE_CMD_TIMEOUT_MSECS);
2968 }
2969
2970 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
2971 return ret;
2972}
2973
2974static int hns_roce_v2_clear_hem(struct hns_roce_dev *hr_dev,
2975 struct hns_roce_hem_table *table, int obj,
2976 int step_idx)
2977{
2978 struct device *dev = hr_dev->dev;
2979 struct hns_roce_cmd_mailbox *mailbox;
2980 int ret = 0;
2981 u16 op = 0xff;
2982
2983 if (!hns_roce_check_whether_mhop(hr_dev, table->type))
2984 return 0;
2985
2986 switch (table->type) {
2987 case HEM_TYPE_QPC:
2988 op = HNS_ROCE_CMD_DESTROY_QPC_BT0;
2989 break;
2990 case HEM_TYPE_MTPT:
2991 op = HNS_ROCE_CMD_DESTROY_MPT_BT0;
2992 break;
2993 case HEM_TYPE_CQC:
2994 op = HNS_ROCE_CMD_DESTROY_CQC_BT0;
2995 break;
2996 case HEM_TYPE_SCCC:
2997 case HEM_TYPE_QPC_TIMER:
2998 case HEM_TYPE_CQC_TIMER:
2999 break;
3000 case HEM_TYPE_SRQC:
3001 op = HNS_ROCE_CMD_DESTROY_SRQC_BT0;
3002 break;
3003 default:
3004 dev_warn(dev, "Table %d not to be destroyed by mailbox!\n",
3005 table->type);
3006 return 0;
3007 }
3008
3009 if (table->type == HEM_TYPE_SCCC ||
3010 table->type == HEM_TYPE_QPC_TIMER ||
3011 table->type == HEM_TYPE_CQC_TIMER)
3012 return 0;
3013
3014 op += step_idx;
3015
3016 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
3017 if (IS_ERR(mailbox))
3018 return PTR_ERR(mailbox);
3019
3020
3021 ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, obj, 0, op,
3022 HNS_ROCE_CMD_TIMEOUT_MSECS);
3023
3024 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
3025 return ret;
3026}
3027
3028static int hns_roce_v2_qp_modify(struct hns_roce_dev *hr_dev,
3029 struct hns_roce_mtt *mtt,
3030 enum ib_qp_state cur_state,
3031 enum ib_qp_state new_state,
3032 struct hns_roce_v2_qp_context *context,
3033 struct hns_roce_qp *hr_qp)
3034{
3035 struct hns_roce_cmd_mailbox *mailbox;
3036 int ret;
3037
3038 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
3039 if (IS_ERR(mailbox))
3040 return PTR_ERR(mailbox);
3041
3042 memcpy(mailbox->buf, context, sizeof(*context) * 2);
3043
3044 ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, hr_qp->qpn, 0,
3045 HNS_ROCE_CMD_MODIFY_QPC,
3046 HNS_ROCE_CMD_TIMEOUT_MSECS);
3047
3048 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
3049
3050 return ret;
3051}
3052
3053static void set_access_flags(struct hns_roce_qp *hr_qp,
3054 struct hns_roce_v2_qp_context *context,
3055 struct hns_roce_v2_qp_context *qpc_mask,
3056 const struct ib_qp_attr *attr, int attr_mask)
3057{
3058 u8 dest_rd_atomic;
3059 u32 access_flags;
3060
3061 dest_rd_atomic = (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) ?
3062 attr->max_dest_rd_atomic : hr_qp->resp_depth;
3063
3064 access_flags = (attr_mask & IB_QP_ACCESS_FLAGS) ?
3065 attr->qp_access_flags : hr_qp->atomic_rd_en;
3066
3067 if (!dest_rd_atomic)
3068 access_flags &= IB_ACCESS_REMOTE_WRITE;
3069
3070 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
3071 !!(access_flags & IB_ACCESS_REMOTE_READ));
3072 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S, 0);
3073
3074 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
3075 !!(access_flags & IB_ACCESS_REMOTE_WRITE));
3076 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S, 0);
3077
3078 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
3079 !!(access_flags & IB_ACCESS_REMOTE_ATOMIC));
3080 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S, 0);
3081}
3082
3083static void modify_qp_reset_to_init(struct ib_qp *ibqp,
3084 const struct ib_qp_attr *attr,
3085 int attr_mask,
3086 struct hns_roce_v2_qp_context *context,
3087 struct hns_roce_v2_qp_context *qpc_mask)
3088{
3089 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
3090 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
3091
3092
3093
3094
3095
3096
3097
3098 roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M,
3099 V2_QPC_BYTE_4_TST_S, to_hr_qp_type(hr_qp->ibqp.qp_type));
3100 roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M,
3101 V2_QPC_BYTE_4_TST_S, 0);
3102
3103 if (ibqp->qp_type == IB_QPT_GSI)
3104 roce_set_field(context->byte_4_sqpn_tst,
3105 V2_QPC_BYTE_4_SGE_SHIFT_M,
3106 V2_QPC_BYTE_4_SGE_SHIFT_S,
3107 ilog2((unsigned int)hr_qp->sge.sge_cnt));
3108 else
3109 roce_set_field(context->byte_4_sqpn_tst,
3110 V2_QPC_BYTE_4_SGE_SHIFT_M,
3111 V2_QPC_BYTE_4_SGE_SHIFT_S,
3112 hr_qp->sq.max_gs > 2 ?
3113 ilog2((unsigned int)hr_qp->sge.sge_cnt) : 0);
3114
3115 roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SGE_SHIFT_M,
3116 V2_QPC_BYTE_4_SGE_SHIFT_S, 0);
3117
3118 roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
3119 V2_QPC_BYTE_4_SQPN_S, hr_qp->qpn);
3120 roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
3121 V2_QPC_BYTE_4_SQPN_S, 0);
3122
3123 roce_set_field(context->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M,
3124 V2_QPC_BYTE_16_PD_S, to_hr_pd(ibqp->pd)->pdn);
3125 roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M,
3126 V2_QPC_BYTE_16_PD_S, 0);
3127
3128 roce_set_field(context->byte_20_smac_sgid_idx, V2_QPC_BYTE_20_RQWS_M,
3129 V2_QPC_BYTE_20_RQWS_S, ilog2(hr_qp->rq.max_gs));
3130 roce_set_field(qpc_mask->byte_20_smac_sgid_idx, V2_QPC_BYTE_20_RQWS_M,
3131 V2_QPC_BYTE_20_RQWS_S, 0);
3132
3133 roce_set_field(context->byte_20_smac_sgid_idx,
3134 V2_QPC_BYTE_20_SQ_SHIFT_M, V2_QPC_BYTE_20_SQ_SHIFT_S,
3135 ilog2((unsigned int)hr_qp->sq.wqe_cnt));
3136 roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
3137 V2_QPC_BYTE_20_SQ_SHIFT_M, V2_QPC_BYTE_20_SQ_SHIFT_S, 0);
3138
3139 roce_set_field(context->byte_20_smac_sgid_idx,
3140 V2_QPC_BYTE_20_RQ_SHIFT_M, V2_QPC_BYTE_20_RQ_SHIFT_S,
3141 (hr_qp->ibqp.qp_type == IB_QPT_XRC_INI ||
3142 hr_qp->ibqp.qp_type == IB_QPT_XRC_TGT || ibqp->srq) ? 0 :
3143 ilog2((unsigned int)hr_qp->rq.wqe_cnt));
3144 roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
3145 V2_QPC_BYTE_20_RQ_SHIFT_M, V2_QPC_BYTE_20_RQ_SHIFT_S, 0);
3146
3147
3148 roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_VLAN_ID_M,
3149 V2_QPC_BYTE_24_VLAN_ID_S, 0xfff);
3150 roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_VLAN_ID_M,
3151 V2_QPC_BYTE_24_VLAN_ID_S, 0);
3152
3153
3154
3155
3156
3157
3158 roce_set_bit(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_SQ_TX_ERR_S, 0);
3159 roce_set_bit(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_SQ_RX_ERR_S, 0);
3160 roce_set_bit(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_RQ_TX_ERR_S, 0);
3161 roce_set_bit(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_RQ_RX_ERR_S, 0);
3162
3163 roce_set_field(qpc_mask->byte_60_qpst_tempid, V2_QPC_BYTE_60_TEMPID_M,
3164 V2_QPC_BYTE_60_TEMPID_S, 0);
3165
3166 roce_set_field(qpc_mask->byte_60_qpst_tempid,
3167 V2_QPC_BYTE_60_SCC_TOKEN_M, V2_QPC_BYTE_60_SCC_TOKEN_S,
3168 0);
3169 roce_set_bit(qpc_mask->byte_60_qpst_tempid,
3170 V2_QPC_BYTE_60_SQ_DB_DOING_S, 0);
3171 roce_set_bit(qpc_mask->byte_60_qpst_tempid,
3172 V2_QPC_BYTE_60_RQ_DB_DOING_S, 0);
3173 roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_CNP_TX_FLAG_S, 0);
3174 roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_CE_FLAG_S, 0);
3175
3176 if (hr_qp->rdb_en) {
3177 roce_set_bit(context->byte_68_rq_db,
3178 V2_QPC_BYTE_68_RQ_RECORD_EN_S, 1);
3179 roce_set_bit(qpc_mask->byte_68_rq_db,
3180 V2_QPC_BYTE_68_RQ_RECORD_EN_S, 0);
3181 }
3182
3183 roce_set_field(context->byte_68_rq_db,
3184 V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_M,
3185 V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_S,
3186 ((u32)hr_qp->rdb.dma) >> 1);
3187 roce_set_field(qpc_mask->byte_68_rq_db,
3188 V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_M,
3189 V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_S, 0);
3190 context->rq_db_record_addr = hr_qp->rdb.dma >> 32;
3191 qpc_mask->rq_db_record_addr = 0;
3192
3193 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RQIE_S,
3194 (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) ? 1 : 0);
3195 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RQIE_S, 0);
3196
3197 roce_set_field(context->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
3198 V2_QPC_BYTE_80_RX_CQN_S, to_hr_cq(ibqp->recv_cq)->cqn);
3199 roce_set_field(qpc_mask->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
3200 V2_QPC_BYTE_80_RX_CQN_S, 0);
3201 if (ibqp->srq) {
3202 roce_set_field(context->byte_76_srqn_op_en,
3203 V2_QPC_BYTE_76_SRQN_M, V2_QPC_BYTE_76_SRQN_S,
3204 to_hr_srq(ibqp->srq)->srqn);
3205 roce_set_field(qpc_mask->byte_76_srqn_op_en,
3206 V2_QPC_BYTE_76_SRQN_M, V2_QPC_BYTE_76_SRQN_S, 0);
3207 roce_set_bit(context->byte_76_srqn_op_en,
3208 V2_QPC_BYTE_76_SRQ_EN_S, 1);
3209 roce_set_bit(qpc_mask->byte_76_srqn_op_en,
3210 V2_QPC_BYTE_76_SRQ_EN_S, 0);
3211 }
3212
3213 roce_set_field(qpc_mask->byte_84_rq_ci_pi,
3214 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
3215 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, 0);
3216 roce_set_field(qpc_mask->byte_84_rq_ci_pi,
3217 V2_QPC_BYTE_84_RQ_CONSUMER_IDX_M,
3218 V2_QPC_BYTE_84_RQ_CONSUMER_IDX_S, 0);
3219
3220 roce_set_field(qpc_mask->byte_92_srq_info, V2_QPC_BYTE_92_SRQ_INFO_M,
3221 V2_QPC_BYTE_92_SRQ_INFO_S, 0);
3222
3223 roce_set_field(qpc_mask->byte_96_rx_reqmsn, V2_QPC_BYTE_96_RX_REQ_MSN_M,
3224 V2_QPC_BYTE_96_RX_REQ_MSN_S, 0);
3225
3226 roce_set_field(qpc_mask->byte_104_rq_sge,
3227 V2_QPC_BYTE_104_RQ_CUR_WQE_SGE_NUM_M,
3228 V2_QPC_BYTE_104_RQ_CUR_WQE_SGE_NUM_S, 0);
3229
3230 roce_set_bit(qpc_mask->byte_108_rx_reqepsn,
3231 V2_QPC_BYTE_108_RX_REQ_PSN_ERR_S, 0);
3232 roce_set_field(qpc_mask->byte_108_rx_reqepsn,
3233 V2_QPC_BYTE_108_RX_REQ_LAST_OPTYPE_M,
3234 V2_QPC_BYTE_108_RX_REQ_LAST_OPTYPE_S, 0);
3235 roce_set_bit(qpc_mask->byte_108_rx_reqepsn,
3236 V2_QPC_BYTE_108_RX_REQ_RNR_S, 0);
3237
3238 qpc_mask->rq_rnr_timer = 0;
3239 qpc_mask->rx_msg_len = 0;
3240 qpc_mask->rx_rkey_pkt_info = 0;
3241 qpc_mask->rx_va = 0;
3242
3243 roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_HEAD_MAX_M,
3244 V2_QPC_BYTE_132_TRRL_HEAD_MAX_S, 0);
3245 roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_TAIL_MAX_M,
3246 V2_QPC_BYTE_132_TRRL_TAIL_MAX_S, 0);
3247
3248 roce_set_bit(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RQ_RTY_WAIT_DO_S,
3249 0);
3250 roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RAQ_TRRL_HEAD_M,
3251 V2_QPC_BYTE_140_RAQ_TRRL_HEAD_S, 0);
3252 roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RAQ_TRRL_TAIL_M,
3253 V2_QPC_BYTE_140_RAQ_TRRL_TAIL_S, 0);
3254
3255 roce_set_field(qpc_mask->byte_144_raq,
3256 V2_QPC_BYTE_144_RAQ_RTY_INI_PSN_M,
3257 V2_QPC_BYTE_144_RAQ_RTY_INI_PSN_S, 0);
3258 roce_set_field(qpc_mask->byte_144_raq, V2_QPC_BYTE_144_RAQ_CREDIT_M,
3259 V2_QPC_BYTE_144_RAQ_CREDIT_S, 0);
3260 roce_set_bit(qpc_mask->byte_144_raq, V2_QPC_BYTE_144_RESP_RTY_FLG_S, 0);
3261
3262 roce_set_field(qpc_mask->byte_148_raq, V2_QPC_BYTE_148_RQ_MSN_M,
3263 V2_QPC_BYTE_148_RQ_MSN_S, 0);
3264 roce_set_field(qpc_mask->byte_148_raq, V2_QPC_BYTE_148_RAQ_SYNDROME_M,
3265 V2_QPC_BYTE_148_RAQ_SYNDROME_S, 0);
3266
3267 roce_set_field(qpc_mask->byte_152_raq, V2_QPC_BYTE_152_RAQ_PSN_M,
3268 V2_QPC_BYTE_152_RAQ_PSN_S, 0);
3269 roce_set_field(qpc_mask->byte_152_raq,
3270 V2_QPC_BYTE_152_RAQ_TRRL_RTY_HEAD_M,
3271 V2_QPC_BYTE_152_RAQ_TRRL_RTY_HEAD_S, 0);
3272
3273 roce_set_field(qpc_mask->byte_156_raq, V2_QPC_BYTE_156_RAQ_USE_PKTN_M,
3274 V2_QPC_BYTE_156_RAQ_USE_PKTN_S, 0);
3275
3276 roce_set_field(qpc_mask->byte_160_sq_ci_pi,
3277 V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M,
3278 V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S, 0);
3279 roce_set_field(qpc_mask->byte_160_sq_ci_pi,
3280 V2_QPC_BYTE_160_SQ_CONSUMER_IDX_M,
3281 V2_QPC_BYTE_160_SQ_CONSUMER_IDX_S, 0);
3282
3283 roce_set_bit(qpc_mask->byte_168_irrl_idx,
3284 V2_QPC_BYTE_168_POLL_DB_WAIT_DO_S, 0);
3285 roce_set_bit(qpc_mask->byte_168_irrl_idx,
3286 V2_QPC_BYTE_168_SCC_TOKEN_FORBID_SQ_DEQ_S, 0);
3287 roce_set_bit(qpc_mask->byte_168_irrl_idx,
3288 V2_QPC_BYTE_168_WAIT_ACK_TIMEOUT_S, 0);
3289 roce_set_bit(qpc_mask->byte_168_irrl_idx,
3290 V2_QPC_BYTE_168_MSG_RTY_LP_FLG_S, 0);
3291 roce_set_bit(qpc_mask->byte_168_irrl_idx,
3292 V2_QPC_BYTE_168_SQ_INVLD_FLG_S, 0);
3293 roce_set_field(qpc_mask->byte_168_irrl_idx,
3294 V2_QPC_BYTE_168_IRRL_IDX_LSB_M,
3295 V2_QPC_BYTE_168_IRRL_IDX_LSB_S, 0);
3296
3297 roce_set_field(context->byte_172_sq_psn, V2_QPC_BYTE_172_ACK_REQ_FREQ_M,
3298 V2_QPC_BYTE_172_ACK_REQ_FREQ_S, 4);
3299 roce_set_field(qpc_mask->byte_172_sq_psn,
3300 V2_QPC_BYTE_172_ACK_REQ_FREQ_M,
3301 V2_QPC_BYTE_172_ACK_REQ_FREQ_S, 0);
3302
3303 roce_set_bit(qpc_mask->byte_172_sq_psn, V2_QPC_BYTE_172_MSG_RNR_FLG_S,
3304 0);
3305
3306 roce_set_bit(context->byte_172_sq_psn, V2_QPC_BYTE_172_FRE_S, 1);
3307 roce_set_bit(qpc_mask->byte_172_sq_psn, V2_QPC_BYTE_172_FRE_S, 0);
3308
3309 roce_set_field(qpc_mask->byte_176_msg_pktn,
3310 V2_QPC_BYTE_176_MSG_USE_PKTN_M,
3311 V2_QPC_BYTE_176_MSG_USE_PKTN_S, 0);
3312 roce_set_field(qpc_mask->byte_176_msg_pktn,
3313 V2_QPC_BYTE_176_IRRL_HEAD_PRE_M,
3314 V2_QPC_BYTE_176_IRRL_HEAD_PRE_S, 0);
3315
3316 roce_set_field(qpc_mask->byte_184_irrl_idx,
3317 V2_QPC_BYTE_184_IRRL_IDX_MSB_M,
3318 V2_QPC_BYTE_184_IRRL_IDX_MSB_S, 0);
3319
3320 qpc_mask->cur_sge_offset = 0;
3321
3322 roce_set_field(qpc_mask->byte_192_ext_sge,
3323 V2_QPC_BYTE_192_CUR_SGE_IDX_M,
3324 V2_QPC_BYTE_192_CUR_SGE_IDX_S, 0);
3325 roce_set_field(qpc_mask->byte_192_ext_sge,
3326 V2_QPC_BYTE_192_EXT_SGE_NUM_LEFT_M,
3327 V2_QPC_BYTE_192_EXT_SGE_NUM_LEFT_S, 0);
3328
3329 roce_set_field(qpc_mask->byte_196_sq_psn, V2_QPC_BYTE_196_IRRL_HEAD_M,
3330 V2_QPC_BYTE_196_IRRL_HEAD_S, 0);
3331
3332 roce_set_field(qpc_mask->byte_200_sq_max, V2_QPC_BYTE_200_SQ_MAX_IDX_M,
3333 V2_QPC_BYTE_200_SQ_MAX_IDX_S, 0);
3334 roce_set_field(qpc_mask->byte_200_sq_max,
3335 V2_QPC_BYTE_200_LCL_OPERATED_CNT_M,
3336 V2_QPC_BYTE_200_LCL_OPERATED_CNT_S, 0);
3337
3338 roce_set_bit(qpc_mask->byte_208_irrl, V2_QPC_BYTE_208_PKT_RNR_FLG_S, 0);
3339 roce_set_bit(qpc_mask->byte_208_irrl, V2_QPC_BYTE_208_PKT_RTY_FLG_S, 0);
3340
3341 roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_CHECK_FLG_M,
3342 V2_QPC_BYTE_212_CHECK_FLG_S, 0);
3343
3344 qpc_mask->sq_timer = 0;
3345
3346 roce_set_field(qpc_mask->byte_220_retry_psn_msn,
3347 V2_QPC_BYTE_220_RETRY_MSG_MSN_M,
3348 V2_QPC_BYTE_220_RETRY_MSG_MSN_S, 0);
3349 roce_set_field(qpc_mask->byte_232_irrl_sge,
3350 V2_QPC_BYTE_232_IRRL_SGE_IDX_M,
3351 V2_QPC_BYTE_232_IRRL_SGE_IDX_S, 0);
3352
3353 roce_set_bit(qpc_mask->byte_232_irrl_sge, V2_QPC_BYTE_232_SO_LP_VLD_S,
3354 0);
3355 roce_set_bit(qpc_mask->byte_232_irrl_sge,
3356 V2_QPC_BYTE_232_FENCE_LP_VLD_S, 0);
3357 roce_set_bit(qpc_mask->byte_232_irrl_sge, V2_QPC_BYTE_232_IRRL_LP_VLD_S,
3358 0);
3359
3360 qpc_mask->irrl_cur_sge_offset = 0;
3361
3362 roce_set_field(qpc_mask->byte_240_irrl_tail,
3363 V2_QPC_BYTE_240_IRRL_TAIL_REAL_M,
3364 V2_QPC_BYTE_240_IRRL_TAIL_REAL_S, 0);
3365 roce_set_field(qpc_mask->byte_240_irrl_tail,
3366 V2_QPC_BYTE_240_IRRL_TAIL_RD_M,
3367 V2_QPC_BYTE_240_IRRL_TAIL_RD_S, 0);
3368 roce_set_field(qpc_mask->byte_240_irrl_tail,
3369 V2_QPC_BYTE_240_RX_ACK_MSN_M,
3370 V2_QPC_BYTE_240_RX_ACK_MSN_S, 0);
3371
3372 roce_set_field(qpc_mask->byte_248_ack_psn, V2_QPC_BYTE_248_IRRL_PSN_M,
3373 V2_QPC_BYTE_248_IRRL_PSN_S, 0);
3374 roce_set_bit(qpc_mask->byte_248_ack_psn, V2_QPC_BYTE_248_ACK_PSN_ERR_S,
3375 0);
3376 roce_set_field(qpc_mask->byte_248_ack_psn,
3377 V2_QPC_BYTE_248_ACK_LAST_OPTYPE_M,
3378 V2_QPC_BYTE_248_ACK_LAST_OPTYPE_S, 0);
3379 roce_set_bit(qpc_mask->byte_248_ack_psn, V2_QPC_BYTE_248_IRRL_PSN_VLD_S,
3380 0);
3381 roce_set_bit(qpc_mask->byte_248_ack_psn,
3382 V2_QPC_BYTE_248_RNR_RETRY_FLAG_S, 0);
3383 roce_set_bit(qpc_mask->byte_248_ack_psn, V2_QPC_BYTE_248_CQ_ERR_IND_S,
3384 0);
3385
3386 hr_qp->access_flags = attr->qp_access_flags;
3387 roce_set_field(context->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
3388 V2_QPC_BYTE_252_TX_CQN_S, to_hr_cq(ibqp->send_cq)->cqn);
3389 roce_set_field(qpc_mask->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
3390 V2_QPC_BYTE_252_TX_CQN_S, 0);
3391
3392 roce_set_field(qpc_mask->byte_252_err_txcqn, V2_QPC_BYTE_252_ERR_TYPE_M,
3393 V2_QPC_BYTE_252_ERR_TYPE_S, 0);
3394
3395 roce_set_field(qpc_mask->byte_256_sqflush_rqcqe,
3396 V2_QPC_BYTE_256_RQ_CQE_IDX_M,
3397 V2_QPC_BYTE_256_RQ_CQE_IDX_S, 0);
3398 roce_set_field(qpc_mask->byte_256_sqflush_rqcqe,
3399 V2_QPC_BYTE_256_SQ_FLUSH_IDX_M,
3400 V2_QPC_BYTE_256_SQ_FLUSH_IDX_S, 0);
3401}
3402
3403static void modify_qp_init_to_init(struct ib_qp *ibqp,
3404 const struct ib_qp_attr *attr, int attr_mask,
3405 struct hns_roce_v2_qp_context *context,
3406 struct hns_roce_v2_qp_context *qpc_mask)
3407{
3408 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
3409
3410
3411
3412
3413
3414
3415
3416 roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M,
3417 V2_QPC_BYTE_4_TST_S, to_hr_qp_type(hr_qp->ibqp.qp_type));
3418 roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M,
3419 V2_QPC_BYTE_4_TST_S, 0);
3420
3421 if (ibqp->qp_type == IB_QPT_GSI)
3422 roce_set_field(context->byte_4_sqpn_tst,
3423 V2_QPC_BYTE_4_SGE_SHIFT_M,
3424 V2_QPC_BYTE_4_SGE_SHIFT_S,
3425 ilog2((unsigned int)hr_qp->sge.sge_cnt));
3426 else
3427 roce_set_field(context->byte_4_sqpn_tst,
3428 V2_QPC_BYTE_4_SGE_SHIFT_M,
3429 V2_QPC_BYTE_4_SGE_SHIFT_S, hr_qp->sq.max_gs > 2 ?
3430 ilog2((unsigned int)hr_qp->sge.sge_cnt) : 0);
3431
3432 roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SGE_SHIFT_M,
3433 V2_QPC_BYTE_4_SGE_SHIFT_S, 0);
3434
3435 if (attr_mask & IB_QP_ACCESS_FLAGS) {
3436 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
3437 !!(attr->qp_access_flags & IB_ACCESS_REMOTE_READ));
3438 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
3439 0);
3440
3441 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
3442 !!(attr->qp_access_flags &
3443 IB_ACCESS_REMOTE_WRITE));
3444 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
3445 0);
3446
3447 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
3448 !!(attr->qp_access_flags &
3449 IB_ACCESS_REMOTE_ATOMIC));
3450 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
3451 0);
3452 } else {
3453 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
3454 !!(hr_qp->access_flags & IB_ACCESS_REMOTE_READ));
3455 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
3456 0);
3457
3458 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
3459 !!(hr_qp->access_flags & IB_ACCESS_REMOTE_WRITE));
3460 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
3461 0);
3462
3463 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
3464 !!(hr_qp->access_flags & IB_ACCESS_REMOTE_ATOMIC));
3465 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
3466 0);
3467 }
3468
3469 roce_set_field(context->byte_20_smac_sgid_idx,
3470 V2_QPC_BYTE_20_SQ_SHIFT_M, V2_QPC_BYTE_20_SQ_SHIFT_S,
3471 ilog2((unsigned int)hr_qp->sq.wqe_cnt));
3472 roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
3473 V2_QPC_BYTE_20_SQ_SHIFT_M, V2_QPC_BYTE_20_SQ_SHIFT_S, 0);
3474
3475 roce_set_field(context->byte_20_smac_sgid_idx,
3476 V2_QPC_BYTE_20_RQ_SHIFT_M, V2_QPC_BYTE_20_RQ_SHIFT_S,
3477 (hr_qp->ibqp.qp_type == IB_QPT_XRC_INI ||
3478 hr_qp->ibqp.qp_type == IB_QPT_XRC_TGT || ibqp->srq) ? 0 :
3479 ilog2((unsigned int)hr_qp->rq.wqe_cnt));
3480 roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
3481 V2_QPC_BYTE_20_RQ_SHIFT_M, V2_QPC_BYTE_20_RQ_SHIFT_S, 0);
3482
3483 roce_set_field(context->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M,
3484 V2_QPC_BYTE_16_PD_S, to_hr_pd(ibqp->pd)->pdn);
3485 roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M,
3486 V2_QPC_BYTE_16_PD_S, 0);
3487
3488 roce_set_field(context->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
3489 V2_QPC_BYTE_80_RX_CQN_S, to_hr_cq(ibqp->recv_cq)->cqn);
3490 roce_set_field(qpc_mask->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
3491 V2_QPC_BYTE_80_RX_CQN_S, 0);
3492
3493 roce_set_field(context->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
3494 V2_QPC_BYTE_252_TX_CQN_S, to_hr_cq(ibqp->send_cq)->cqn);
3495 roce_set_field(qpc_mask->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
3496 V2_QPC_BYTE_252_TX_CQN_S, 0);
3497
3498 if (ibqp->srq) {
3499 roce_set_bit(context->byte_76_srqn_op_en,
3500 V2_QPC_BYTE_76_SRQ_EN_S, 1);
3501 roce_set_bit(qpc_mask->byte_76_srqn_op_en,
3502 V2_QPC_BYTE_76_SRQ_EN_S, 0);
3503 roce_set_field(context->byte_76_srqn_op_en,
3504 V2_QPC_BYTE_76_SRQN_M, V2_QPC_BYTE_76_SRQN_S,
3505 to_hr_srq(ibqp->srq)->srqn);
3506 roce_set_field(qpc_mask->byte_76_srqn_op_en,
3507 V2_QPC_BYTE_76_SRQN_M, V2_QPC_BYTE_76_SRQN_S, 0);
3508 }
3509
3510 roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
3511 V2_QPC_BYTE_4_SQPN_S, hr_qp->qpn);
3512 roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
3513 V2_QPC_BYTE_4_SQPN_S, 0);
3514
3515 if (attr_mask & IB_QP_DEST_QPN) {
3516 roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_DQPN_M,
3517 V2_QPC_BYTE_56_DQPN_S, hr_qp->qpn);
3518 roce_set_field(qpc_mask->byte_56_dqpn_err,
3519 V2_QPC_BYTE_56_DQPN_M, V2_QPC_BYTE_56_DQPN_S, 0);
3520 }
3521}
3522
3523static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
3524 const struct ib_qp_attr *attr, int attr_mask,
3525 struct hns_roce_v2_qp_context *context,
3526 struct hns_roce_v2_qp_context *qpc_mask)
3527{
3528 const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
3529 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
3530 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
3531 struct device *dev = hr_dev->dev;
3532 dma_addr_t dma_handle_3;
3533 dma_addr_t dma_handle_2;
3534 dma_addr_t dma_handle;
3535 u32 page_size;
3536 u8 port_num;
3537 u64 *mtts_3;
3538 u64 *mtts_2;
3539 u64 *mtts;
3540 u8 *dmac;
3541 u8 *smac;
3542 int port;
3543
3544
3545 mtts = hns_roce_table_find(hr_dev, &hr_dev->mr_table.mtt_table,
3546 hr_qp->mtt.first_seg, &dma_handle);
3547 if (!mtts) {
3548 dev_err(dev, "qp buf pa find failed\n");
3549 return -EINVAL;
3550 }
3551
3552
3553 mtts_2 = hns_roce_table_find(hr_dev, &hr_dev->qp_table.irrl_table,
3554 hr_qp->qpn, &dma_handle_2);
3555 if (!mtts_2) {
3556 dev_err(dev, "qp irrl_table find failed\n");
3557 return -EINVAL;
3558 }
3559
3560
3561 mtts_3 = hns_roce_table_find(hr_dev, &hr_dev->qp_table.trrl_table,
3562 hr_qp->qpn, &dma_handle_3);
3563 if (!mtts_3) {
3564 dev_err(dev, "qp trrl_table find failed\n");
3565 return -EINVAL;
3566 }
3567
3568 if (attr_mask & IB_QP_ALT_PATH) {
3569 dev_err(dev, "INIT2RTR attr_mask (0x%x) error\n", attr_mask);
3570 return -EINVAL;
3571 }
3572
3573 dmac = (u8 *)attr->ah_attr.roce.dmac;
3574 context->wqe_sge_ba = (u32)(dma_handle >> 3);
3575 qpc_mask->wqe_sge_ba = 0;
3576
3577
3578
3579
3580
3581
3582
3583 roce_set_field(context->byte_12_sq_hop, V2_QPC_BYTE_12_WQE_SGE_BA_M,
3584 V2_QPC_BYTE_12_WQE_SGE_BA_S, dma_handle >> (32 + 3));
3585 roce_set_field(qpc_mask->byte_12_sq_hop, V2_QPC_BYTE_12_WQE_SGE_BA_M,
3586 V2_QPC_BYTE_12_WQE_SGE_BA_S, 0);
3587
3588 roce_set_field(context->byte_12_sq_hop, V2_QPC_BYTE_12_SQ_HOP_NUM_M,
3589 V2_QPC_BYTE_12_SQ_HOP_NUM_S,
3590 hr_dev->caps.mtt_hop_num == HNS_ROCE_HOP_NUM_0 ?
3591 0 : hr_dev->caps.mtt_hop_num);
3592 roce_set_field(qpc_mask->byte_12_sq_hop, V2_QPC_BYTE_12_SQ_HOP_NUM_M,
3593 V2_QPC_BYTE_12_SQ_HOP_NUM_S, 0);
3594
3595 roce_set_field(context->byte_20_smac_sgid_idx,
3596 V2_QPC_BYTE_20_SGE_HOP_NUM_M,
3597 V2_QPC_BYTE_20_SGE_HOP_NUM_S,
3598 ((ibqp->qp_type == IB_QPT_GSI) || hr_qp->sq.max_gs > 2) ?
3599 hr_dev->caps.mtt_hop_num : 0);
3600 roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
3601 V2_QPC_BYTE_20_SGE_HOP_NUM_M,
3602 V2_QPC_BYTE_20_SGE_HOP_NUM_S, 0);
3603
3604 roce_set_field(context->byte_20_smac_sgid_idx,
3605 V2_QPC_BYTE_20_RQ_HOP_NUM_M,
3606 V2_QPC_BYTE_20_RQ_HOP_NUM_S,
3607 hr_dev->caps.mtt_hop_num == HNS_ROCE_HOP_NUM_0 ?
3608 0 : hr_dev->caps.mtt_hop_num);
3609 roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
3610 V2_QPC_BYTE_20_RQ_HOP_NUM_M,
3611 V2_QPC_BYTE_20_RQ_HOP_NUM_S, 0);
3612
3613 roce_set_field(context->byte_16_buf_ba_pg_sz,
3614 V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_M,
3615 V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_S,
3616 hr_dev->caps.mtt_ba_pg_sz + PG_SHIFT_OFFSET);
3617 roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz,
3618 V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_M,
3619 V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_S, 0);
3620
3621 roce_set_field(context->byte_16_buf_ba_pg_sz,
3622 V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_M,
3623 V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_S,
3624 hr_dev->caps.mtt_buf_pg_sz + PG_SHIFT_OFFSET);
3625 roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz,
3626 V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_M,
3627 V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_S, 0);
3628
3629 page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
3630 context->rq_cur_blk_addr = (u32)(mtts[hr_qp->rq.offset / page_size]
3631 >> PAGE_ADDR_SHIFT);
3632 qpc_mask->rq_cur_blk_addr = 0;
3633
3634 roce_set_field(context->byte_92_srq_info,
3635 V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_M,
3636 V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_S,
3637 mtts[hr_qp->rq.offset / page_size]
3638 >> (32 + PAGE_ADDR_SHIFT));
3639 roce_set_field(qpc_mask->byte_92_srq_info,
3640 V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_M,
3641 V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_S, 0);
3642
3643 context->rq_nxt_blk_addr = (u32)(mtts[hr_qp->rq.offset / page_size + 1]
3644 >> PAGE_ADDR_SHIFT);
3645 qpc_mask->rq_nxt_blk_addr = 0;
3646
3647 roce_set_field(context->byte_104_rq_sge,
3648 V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_M,
3649 V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_S,
3650 mtts[hr_qp->rq.offset / page_size + 1]
3651 >> (32 + PAGE_ADDR_SHIFT));
3652 roce_set_field(qpc_mask->byte_104_rq_sge,
3653 V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_M,
3654 V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_S, 0);
3655
3656 roce_set_field(context->byte_132_trrl, V2_QPC_BYTE_132_TRRL_BA_M,
3657 V2_QPC_BYTE_132_TRRL_BA_S, dma_handle_3 >> 4);
3658 roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_BA_M,
3659 V2_QPC_BYTE_132_TRRL_BA_S, 0);
3660 context->trrl_ba = (u32)(dma_handle_3 >> (16 + 4));
3661 qpc_mask->trrl_ba = 0;
3662 roce_set_field(context->byte_140_raq, V2_QPC_BYTE_140_TRRL_BA_M,
3663 V2_QPC_BYTE_140_TRRL_BA_S,
3664 (u32)(dma_handle_3 >> (32 + 16 + 4)));
3665 roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_TRRL_BA_M,
3666 V2_QPC_BYTE_140_TRRL_BA_S, 0);
3667
3668 context->irrl_ba = (u32)(dma_handle_2 >> 6);
3669 qpc_mask->irrl_ba = 0;
3670 roce_set_field(context->byte_208_irrl, V2_QPC_BYTE_208_IRRL_BA_M,
3671 V2_QPC_BYTE_208_IRRL_BA_S,
3672 dma_handle_2 >> (32 + 6));
3673 roce_set_field(qpc_mask->byte_208_irrl, V2_QPC_BYTE_208_IRRL_BA_M,
3674 V2_QPC_BYTE_208_IRRL_BA_S, 0);
3675
3676 roce_set_bit(context->byte_208_irrl, V2_QPC_BYTE_208_RMT_E2E_S, 1);
3677 roce_set_bit(qpc_mask->byte_208_irrl, V2_QPC_BYTE_208_RMT_E2E_S, 0);
3678
3679 roce_set_bit(context->byte_252_err_txcqn, V2_QPC_BYTE_252_SIG_TYPE_S,
3680 hr_qp->sq_signal_bits);
3681 roce_set_bit(qpc_mask->byte_252_err_txcqn, V2_QPC_BYTE_252_SIG_TYPE_S,
3682 0);
3683
3684 port = (attr_mask & IB_QP_PORT) ? (attr->port_num - 1) : hr_qp->port;
3685
3686 smac = (u8 *)hr_dev->dev_addr[port];
3687
3688 if (ether_addr_equal_unaligned(dmac, smac) ||
3689 hr_dev->loop_idc == 0x1) {
3690 roce_set_bit(context->byte_28_at_fl, V2_QPC_BYTE_28_LBI_S, 1);
3691 roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_LBI_S, 0);
3692 }
3693
3694 if (attr_mask & IB_QP_DEST_QPN) {
3695 roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_DQPN_M,
3696 V2_QPC_BYTE_56_DQPN_S, attr->dest_qp_num);
3697 roce_set_field(qpc_mask->byte_56_dqpn_err,
3698 V2_QPC_BYTE_56_DQPN_M, V2_QPC_BYTE_56_DQPN_S, 0);
3699 }
3700
3701
3702 port_num = rdma_ah_get_port_num(&attr->ah_attr);
3703 roce_set_field(context->byte_20_smac_sgid_idx,
3704 V2_QPC_BYTE_20_SGID_IDX_M,
3705 V2_QPC_BYTE_20_SGID_IDX_S,
3706 hns_get_gid_index(hr_dev, port_num - 1,
3707 grh->sgid_index));
3708 roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
3709 V2_QPC_BYTE_20_SGID_IDX_M,
3710 V2_QPC_BYTE_20_SGID_IDX_S, 0);
3711 memcpy(&(context->dmac), dmac, 4);
3712 roce_set_field(context->byte_52_udpspn_dmac, V2_QPC_BYTE_52_DMAC_M,
3713 V2_QPC_BYTE_52_DMAC_S, *((u16 *)(&dmac[4])));
3714 qpc_mask->dmac = 0;
3715 roce_set_field(qpc_mask->byte_52_udpspn_dmac, V2_QPC_BYTE_52_DMAC_M,
3716 V2_QPC_BYTE_52_DMAC_S, 0);
3717
3718 roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_LP_PKTN_INI_M,
3719 V2_QPC_BYTE_56_LP_PKTN_INI_S, 4);
3720 roce_set_field(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_LP_PKTN_INI_M,
3721 V2_QPC_BYTE_56_LP_PKTN_INI_S, 0);
3722
3723 if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_UD)
3724 roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
3725 V2_QPC_BYTE_24_MTU_S, IB_MTU_4096);
3726 else if (attr_mask & IB_QP_PATH_MTU)
3727 roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
3728 V2_QPC_BYTE_24_MTU_S, attr->path_mtu);
3729
3730 roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
3731 V2_QPC_BYTE_24_MTU_S, 0);
3732
3733 roce_set_field(context->byte_84_rq_ci_pi,
3734 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
3735 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, hr_qp->rq.head);
3736 roce_set_field(qpc_mask->byte_84_rq_ci_pi,
3737 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
3738 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, 0);
3739
3740 roce_set_field(qpc_mask->byte_84_rq_ci_pi,
3741 V2_QPC_BYTE_84_RQ_CONSUMER_IDX_M,
3742 V2_QPC_BYTE_84_RQ_CONSUMER_IDX_S, 0);
3743 roce_set_bit(qpc_mask->byte_108_rx_reqepsn,
3744 V2_QPC_BYTE_108_RX_REQ_PSN_ERR_S, 0);
3745 roce_set_field(qpc_mask->byte_96_rx_reqmsn, V2_QPC_BYTE_96_RX_REQ_MSN_M,
3746 V2_QPC_BYTE_96_RX_REQ_MSN_S, 0);
3747 roce_set_field(qpc_mask->byte_108_rx_reqepsn,
3748 V2_QPC_BYTE_108_RX_REQ_LAST_OPTYPE_M,
3749 V2_QPC_BYTE_108_RX_REQ_LAST_OPTYPE_S, 0);
3750
3751 context->rq_rnr_timer = 0;
3752 qpc_mask->rq_rnr_timer = 0;
3753
3754 roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_HEAD_MAX_M,
3755 V2_QPC_BYTE_132_TRRL_HEAD_MAX_S, 0);
3756 roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_TAIL_MAX_M,
3757 V2_QPC_BYTE_132_TRRL_TAIL_MAX_S, 0);
3758
3759 roce_set_field(context->byte_168_irrl_idx,
3760 V2_QPC_BYTE_168_LP_SGEN_INI_M,
3761 V2_QPC_BYTE_168_LP_SGEN_INI_S, 3);
3762 roce_set_field(qpc_mask->byte_168_irrl_idx,
3763 V2_QPC_BYTE_168_LP_SGEN_INI_M,
3764 V2_QPC_BYTE_168_LP_SGEN_INI_S, 0);
3765
3766 return 0;
3767}
3768
3769static int modify_qp_rtr_to_rts(struct ib_qp *ibqp,
3770 const struct ib_qp_attr *attr, int attr_mask,
3771 struct hns_roce_v2_qp_context *context,
3772 struct hns_roce_v2_qp_context *qpc_mask)
3773{
3774 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
3775 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
3776 struct device *dev = hr_dev->dev;
3777 dma_addr_t dma_handle;
3778 u32 page_size;
3779 u64 *mtts;
3780
3781
3782 mtts = hns_roce_table_find(hr_dev, &hr_dev->mr_table.mtt_table,
3783 hr_qp->mtt.first_seg, &dma_handle);
3784 if (!mtts) {
3785 dev_err(dev, "qp buf pa find failed\n");
3786 return -EINVAL;
3787 }
3788
3789
3790 if ((attr_mask & IB_QP_ALT_PATH) ||
3791 (attr_mask & IB_QP_PATH_MIG_STATE)) {
3792 dev_err(dev, "RTR2RTS attr_mask (0x%x)error\n", attr_mask);
3793 return -EINVAL;
3794 }
3795
3796
3797
3798
3799
3800
3801
3802 context->sq_cur_blk_addr = (u32)(mtts[0] >> PAGE_ADDR_SHIFT);
3803 roce_set_field(context->byte_168_irrl_idx,
3804 V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_M,
3805 V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_S,
3806 mtts[0] >> (32 + PAGE_ADDR_SHIFT));
3807 qpc_mask->sq_cur_blk_addr = 0;
3808 roce_set_field(qpc_mask->byte_168_irrl_idx,
3809 V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_M,
3810 V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_S, 0);
3811
3812 page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
3813 context->sq_cur_sge_blk_addr =
3814 ((ibqp->qp_type == IB_QPT_GSI) || hr_qp->sq.max_gs > 2) ?
3815 ((u32)(mtts[hr_qp->sge.offset / page_size]
3816 >> PAGE_ADDR_SHIFT)) : 0;
3817 roce_set_field(context->byte_184_irrl_idx,
3818 V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_M,
3819 V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_S,
3820 ((ibqp->qp_type == IB_QPT_GSI) || hr_qp->sq.max_gs > 2) ?
3821 (mtts[hr_qp->sge.offset / page_size] >>
3822 (32 + PAGE_ADDR_SHIFT)) : 0);
3823 qpc_mask->sq_cur_sge_blk_addr = 0;
3824 roce_set_field(qpc_mask->byte_184_irrl_idx,
3825 V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_M,
3826 V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_S, 0);
3827
3828 context->rx_sq_cur_blk_addr = (u32)(mtts[0] >> PAGE_ADDR_SHIFT);
3829 roce_set_field(context->byte_232_irrl_sge,
3830 V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_M,
3831 V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_S,
3832 mtts[0] >> (32 + PAGE_ADDR_SHIFT));
3833 qpc_mask->rx_sq_cur_blk_addr = 0;
3834 roce_set_field(qpc_mask->byte_232_irrl_sge,
3835 V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_M,
3836 V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_S, 0);
3837
3838
3839
3840
3841
3842
3843 roce_set_field(qpc_mask->byte_232_irrl_sge,
3844 V2_QPC_BYTE_232_IRRL_SGE_IDX_M,
3845 V2_QPC_BYTE_232_IRRL_SGE_IDX_S, 0);
3846
3847 roce_set_field(qpc_mask->byte_240_irrl_tail,
3848 V2_QPC_BYTE_240_RX_ACK_MSN_M,
3849 V2_QPC_BYTE_240_RX_ACK_MSN_S, 0);
3850
3851 roce_set_field(qpc_mask->byte_248_ack_psn,
3852 V2_QPC_BYTE_248_ACK_LAST_OPTYPE_M,
3853 V2_QPC_BYTE_248_ACK_LAST_OPTYPE_S, 0);
3854 roce_set_bit(qpc_mask->byte_248_ack_psn,
3855 V2_QPC_BYTE_248_IRRL_PSN_VLD_S, 0);
3856 roce_set_field(qpc_mask->byte_248_ack_psn,
3857 V2_QPC_BYTE_248_IRRL_PSN_M,
3858 V2_QPC_BYTE_248_IRRL_PSN_S, 0);
3859
3860 roce_set_field(qpc_mask->byte_240_irrl_tail,
3861 V2_QPC_BYTE_240_IRRL_TAIL_REAL_M,
3862 V2_QPC_BYTE_240_IRRL_TAIL_REAL_S, 0);
3863
3864 roce_set_field(qpc_mask->byte_220_retry_psn_msn,
3865 V2_QPC_BYTE_220_RETRY_MSG_MSN_M,
3866 V2_QPC_BYTE_220_RETRY_MSG_MSN_S, 0);
3867
3868 roce_set_bit(qpc_mask->byte_248_ack_psn,
3869 V2_QPC_BYTE_248_RNR_RETRY_FLAG_S, 0);
3870
3871 roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_CHECK_FLG_M,
3872 V2_QPC_BYTE_212_CHECK_FLG_S, 0);
3873
3874 roce_set_field(context->byte_212_lsn, V2_QPC_BYTE_212_LSN_M,
3875 V2_QPC_BYTE_212_LSN_S, 0x100);
3876 roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_LSN_M,
3877 V2_QPC_BYTE_212_LSN_S, 0);
3878
3879 roce_set_field(qpc_mask->byte_196_sq_psn, V2_QPC_BYTE_196_IRRL_HEAD_M,
3880 V2_QPC_BYTE_196_IRRL_HEAD_S, 0);
3881
3882 return 0;
3883}
3884
3885static inline bool hns_roce_v2_check_qp_stat(enum ib_qp_state cur_state,
3886 enum ib_qp_state new_state)
3887{
3888
3889 if ((cur_state != IB_QPS_RESET &&
3890 (new_state == IB_QPS_ERR || new_state == IB_QPS_RESET)) ||
3891 ((cur_state == IB_QPS_RTS || cur_state == IB_QPS_SQD) &&
3892 (new_state == IB_QPS_RTS || new_state == IB_QPS_SQD)) ||
3893 (cur_state == IB_QPS_SQE && new_state == IB_QPS_RTS))
3894 return true;
3895
3896 return false;
3897
3898}
3899
3900static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
3901 const struct ib_qp_attr *attr,
3902 int attr_mask, enum ib_qp_state cur_state,
3903 enum ib_qp_state new_state)
3904{
3905 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
3906 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
3907 struct hns_roce_v2_qp_context *context;
3908 struct hns_roce_v2_qp_context *qpc_mask;
3909 struct device *dev = hr_dev->dev;
3910 int ret = -EINVAL;
3911
3912 context = kcalloc(2, sizeof(*context), GFP_ATOMIC);
3913 if (!context)
3914 return -ENOMEM;
3915
3916 qpc_mask = context + 1;
3917
3918
3919
3920
3921
3922
3923 memset(qpc_mask, 0xff, sizeof(*qpc_mask));
3924 if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
3925 memset(qpc_mask, 0, sizeof(*qpc_mask));
3926 modify_qp_reset_to_init(ibqp, attr, attr_mask, context,
3927 qpc_mask);
3928 } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) {
3929 modify_qp_init_to_init(ibqp, attr, attr_mask, context,
3930 qpc_mask);
3931 } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
3932 ret = modify_qp_init_to_rtr(ibqp, attr, attr_mask, context,
3933 qpc_mask);
3934 if (ret)
3935 goto out;
3936 } else if (cur_state == IB_QPS_RTR && new_state == IB_QPS_RTS) {
3937 ret = modify_qp_rtr_to_rts(ibqp, attr, attr_mask, context,
3938 qpc_mask);
3939 if (ret)
3940 goto out;
3941 } else if (hns_roce_v2_check_qp_stat(cur_state, new_state)) {
3942
3943 ;
3944 } else {
3945 dev_err(dev, "Illegal state for QP!\n");
3946 ret = -EINVAL;
3947 goto out;
3948 }
3949
3950
3951 if (new_state == IB_QPS_ERR) {
3952 roce_set_field(context->byte_160_sq_ci_pi,
3953 V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M,
3954 V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S,
3955 hr_qp->sq.head);
3956 roce_set_field(qpc_mask->byte_160_sq_ci_pi,
3957 V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M,
3958 V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S, 0);
3959
3960 if (!ibqp->srq) {
3961 roce_set_field(context->byte_84_rq_ci_pi,
3962 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
3963 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S,
3964 hr_qp->rq.head);
3965 roce_set_field(qpc_mask->byte_84_rq_ci_pi,
3966 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
3967 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, 0);
3968 }
3969 }
3970
3971 if (attr_mask & IB_QP_AV) {
3972 const struct ib_global_route *grh =
3973 rdma_ah_read_grh(&attr->ah_attr);
3974 const struct ib_gid_attr *gid_attr = NULL;
3975 int is_roce_protocol;
3976 u16 vlan = 0xffff;
3977 u8 ib_port;
3978 u8 hr_port;
3979
3980 ib_port = (attr_mask & IB_QP_PORT) ? attr->port_num :
3981 hr_qp->port + 1;
3982 hr_port = ib_port - 1;
3983 is_roce_protocol = rdma_cap_eth_ah(&hr_dev->ib_dev, ib_port) &&
3984 rdma_ah_get_ah_flags(&attr->ah_attr) & IB_AH_GRH;
3985
3986 if (is_roce_protocol) {
3987 gid_attr = attr->ah_attr.grh.sgid_attr;
3988 ret = rdma_read_gid_l2_fields(gid_attr, &vlan, NULL);
3989 if (ret)
3990 goto out;
3991 }
3992
3993 if (vlan < VLAN_CFI_MASK) {
3994 roce_set_bit(context->byte_76_srqn_op_en,
3995 V2_QPC_BYTE_76_RQ_VLAN_EN_S, 1);
3996 roce_set_bit(qpc_mask->byte_76_srqn_op_en,
3997 V2_QPC_BYTE_76_RQ_VLAN_EN_S, 0);
3998 roce_set_bit(context->byte_168_irrl_idx,
3999 V2_QPC_BYTE_168_SQ_VLAN_EN_S, 1);
4000 roce_set_bit(qpc_mask->byte_168_irrl_idx,
4001 V2_QPC_BYTE_168_SQ_VLAN_EN_S, 0);
4002 }
4003
4004 roce_set_field(context->byte_24_mtu_tc,
4005 V2_QPC_BYTE_24_VLAN_ID_M,
4006 V2_QPC_BYTE_24_VLAN_ID_S, vlan);
4007 roce_set_field(qpc_mask->byte_24_mtu_tc,
4008 V2_QPC_BYTE_24_VLAN_ID_M,
4009 V2_QPC_BYTE_24_VLAN_ID_S, 0);
4010
4011 if (grh->sgid_index >= hr_dev->caps.gid_table_len[hr_port]) {
4012 dev_err(hr_dev->dev,
4013 "sgid_index(%u) too large. max is %d\n",
4014 grh->sgid_index,
4015 hr_dev->caps.gid_table_len[hr_port]);
4016 ret = -EINVAL;
4017 goto out;
4018 }
4019
4020 if (attr->ah_attr.type != RDMA_AH_ATTR_TYPE_ROCE) {
4021 dev_err(hr_dev->dev, "ah attr is not RDMA roce type\n");
4022 ret = -EINVAL;
4023 goto out;
4024 }
4025
4026 roce_set_field(context->byte_52_udpspn_dmac,
4027 V2_QPC_BYTE_52_UDPSPN_M, V2_QPC_BYTE_52_UDPSPN_S,
4028 (gid_attr->gid_type != IB_GID_TYPE_ROCE_UDP_ENCAP) ?
4029 0 : 0x12b7);
4030
4031 roce_set_field(qpc_mask->byte_52_udpspn_dmac,
4032 V2_QPC_BYTE_52_UDPSPN_M,
4033 V2_QPC_BYTE_52_UDPSPN_S, 0);
4034
4035 roce_set_field(context->byte_20_smac_sgid_idx,
4036 V2_QPC_BYTE_20_SGID_IDX_M,
4037 V2_QPC_BYTE_20_SGID_IDX_S, grh->sgid_index);
4038
4039 roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
4040 V2_QPC_BYTE_20_SGID_IDX_M,
4041 V2_QPC_BYTE_20_SGID_IDX_S, 0);
4042
4043 roce_set_field(context->byte_24_mtu_tc,
4044 V2_QPC_BYTE_24_HOP_LIMIT_M,
4045 V2_QPC_BYTE_24_HOP_LIMIT_S, grh->hop_limit);
4046 roce_set_field(qpc_mask->byte_24_mtu_tc,
4047 V2_QPC_BYTE_24_HOP_LIMIT_M,
4048 V2_QPC_BYTE_24_HOP_LIMIT_S, 0);
4049
4050 if (hr_dev->pci_dev->revision == 0x21 &&
4051 gid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP)
4052 roce_set_field(context->byte_24_mtu_tc,
4053 V2_QPC_BYTE_24_TC_M, V2_QPC_BYTE_24_TC_S,
4054 grh->traffic_class >> 2);
4055 else
4056 roce_set_field(context->byte_24_mtu_tc,
4057 V2_QPC_BYTE_24_TC_M, V2_QPC_BYTE_24_TC_S,
4058 grh->traffic_class);
4059 roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M,
4060 V2_QPC_BYTE_24_TC_S, 0);
4061 roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_FL_M,
4062 V2_QPC_BYTE_28_FL_S, grh->flow_label);
4063 roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_FL_M,
4064 V2_QPC_BYTE_28_FL_S, 0);
4065 memcpy(context->dgid, grh->dgid.raw, sizeof(grh->dgid.raw));
4066 memset(qpc_mask->dgid, 0, sizeof(grh->dgid.raw));
4067 roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_SL_M,
4068 V2_QPC_BYTE_28_SL_S,
4069 rdma_ah_get_sl(&attr->ah_attr));
4070 roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_SL_M,
4071 V2_QPC_BYTE_28_SL_S, 0);
4072 hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
4073 }
4074
4075 if (attr_mask & IB_QP_TIMEOUT) {
4076 if (attr->timeout < 31) {
4077 roce_set_field(context->byte_28_at_fl,
4078 V2_QPC_BYTE_28_AT_M, V2_QPC_BYTE_28_AT_S,
4079 attr->timeout);
4080 roce_set_field(qpc_mask->byte_28_at_fl,
4081 V2_QPC_BYTE_28_AT_M, V2_QPC_BYTE_28_AT_S,
4082 0);
4083 } else {
4084 dev_warn(dev, "Local ACK timeout shall be 0 to 30.\n");
4085 }
4086 }
4087
4088 if (attr_mask & IB_QP_RETRY_CNT) {
4089 roce_set_field(context->byte_212_lsn,
4090 V2_QPC_BYTE_212_RETRY_NUM_INIT_M,
4091 V2_QPC_BYTE_212_RETRY_NUM_INIT_S,
4092 attr->retry_cnt);
4093 roce_set_field(qpc_mask->byte_212_lsn,
4094 V2_QPC_BYTE_212_RETRY_NUM_INIT_M,
4095 V2_QPC_BYTE_212_RETRY_NUM_INIT_S, 0);
4096
4097 roce_set_field(context->byte_212_lsn,
4098 V2_QPC_BYTE_212_RETRY_CNT_M,
4099 V2_QPC_BYTE_212_RETRY_CNT_S,
4100 attr->retry_cnt);
4101 roce_set_field(qpc_mask->byte_212_lsn,
4102 V2_QPC_BYTE_212_RETRY_CNT_M,
4103 V2_QPC_BYTE_212_RETRY_CNT_S, 0);
4104 }
4105
4106 if (attr_mask & IB_QP_RNR_RETRY) {
4107 roce_set_field(context->byte_244_rnr_rxack,
4108 V2_QPC_BYTE_244_RNR_NUM_INIT_M,
4109 V2_QPC_BYTE_244_RNR_NUM_INIT_S, attr->rnr_retry);
4110 roce_set_field(qpc_mask->byte_244_rnr_rxack,
4111 V2_QPC_BYTE_244_RNR_NUM_INIT_M,
4112 V2_QPC_BYTE_244_RNR_NUM_INIT_S, 0);
4113
4114 roce_set_field(context->byte_244_rnr_rxack,
4115 V2_QPC_BYTE_244_RNR_CNT_M,
4116 V2_QPC_BYTE_244_RNR_CNT_S, attr->rnr_retry);
4117 roce_set_field(qpc_mask->byte_244_rnr_rxack,
4118 V2_QPC_BYTE_244_RNR_CNT_M,
4119 V2_QPC_BYTE_244_RNR_CNT_S, 0);
4120 }
4121
4122 if (attr_mask & IB_QP_SQ_PSN) {
4123 roce_set_field(context->byte_172_sq_psn,
4124 V2_QPC_BYTE_172_SQ_CUR_PSN_M,
4125 V2_QPC_BYTE_172_SQ_CUR_PSN_S, attr->sq_psn);
4126 roce_set_field(qpc_mask->byte_172_sq_psn,
4127 V2_QPC_BYTE_172_SQ_CUR_PSN_M,
4128 V2_QPC_BYTE_172_SQ_CUR_PSN_S, 0);
4129
4130 roce_set_field(context->byte_196_sq_psn,
4131 V2_QPC_BYTE_196_SQ_MAX_PSN_M,
4132 V2_QPC_BYTE_196_SQ_MAX_PSN_S, attr->sq_psn);
4133 roce_set_field(qpc_mask->byte_196_sq_psn,
4134 V2_QPC_BYTE_196_SQ_MAX_PSN_M,
4135 V2_QPC_BYTE_196_SQ_MAX_PSN_S, 0);
4136
4137 roce_set_field(context->byte_220_retry_psn_msn,
4138 V2_QPC_BYTE_220_RETRY_MSG_PSN_M,
4139 V2_QPC_BYTE_220_RETRY_MSG_PSN_S, attr->sq_psn);
4140 roce_set_field(qpc_mask->byte_220_retry_psn_msn,
4141 V2_QPC_BYTE_220_RETRY_MSG_PSN_M,
4142 V2_QPC_BYTE_220_RETRY_MSG_PSN_S, 0);
4143
4144 roce_set_field(context->byte_224_retry_msg,
4145 V2_QPC_BYTE_224_RETRY_MSG_PSN_M,
4146 V2_QPC_BYTE_224_RETRY_MSG_PSN_S,
4147 attr->sq_psn >> 16);
4148 roce_set_field(qpc_mask->byte_224_retry_msg,
4149 V2_QPC_BYTE_224_RETRY_MSG_PSN_M,
4150 V2_QPC_BYTE_224_RETRY_MSG_PSN_S, 0);
4151
4152 roce_set_field(context->byte_224_retry_msg,
4153 V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_M,
4154 V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_S,
4155 attr->sq_psn);
4156 roce_set_field(qpc_mask->byte_224_retry_msg,
4157 V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_M,
4158 V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_S, 0);
4159
4160 roce_set_field(context->byte_244_rnr_rxack,
4161 V2_QPC_BYTE_244_RX_ACK_EPSN_M,
4162 V2_QPC_BYTE_244_RX_ACK_EPSN_S, attr->sq_psn);
4163 roce_set_field(qpc_mask->byte_244_rnr_rxack,
4164 V2_QPC_BYTE_244_RX_ACK_EPSN_M,
4165 V2_QPC_BYTE_244_RX_ACK_EPSN_S, 0);
4166 }
4167
4168 if ((attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) &&
4169 attr->max_dest_rd_atomic) {
4170 roce_set_field(context->byte_140_raq, V2_QPC_BYTE_140_RR_MAX_M,
4171 V2_QPC_BYTE_140_RR_MAX_S,
4172 fls(attr->max_dest_rd_atomic - 1));
4173 roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RR_MAX_M,
4174 V2_QPC_BYTE_140_RR_MAX_S, 0);
4175 }
4176
4177 if ((attr_mask & IB_QP_MAX_QP_RD_ATOMIC) && attr->max_rd_atomic) {
4178 roce_set_field(context->byte_208_irrl, V2_QPC_BYTE_208_SR_MAX_M,
4179 V2_QPC_BYTE_208_SR_MAX_S,
4180 fls(attr->max_rd_atomic - 1));
4181 roce_set_field(qpc_mask->byte_208_irrl,
4182 V2_QPC_BYTE_208_SR_MAX_M,
4183 V2_QPC_BYTE_208_SR_MAX_S, 0);
4184 }
4185
4186 if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC))
4187 set_access_flags(hr_qp, context, qpc_mask, attr, attr_mask);
4188
4189 if (attr_mask & IB_QP_MIN_RNR_TIMER) {
4190 roce_set_field(context->byte_80_rnr_rx_cqn,
4191 V2_QPC_BYTE_80_MIN_RNR_TIME_M,
4192 V2_QPC_BYTE_80_MIN_RNR_TIME_S,
4193 attr->min_rnr_timer);
4194 roce_set_field(qpc_mask->byte_80_rnr_rx_cqn,
4195 V2_QPC_BYTE_80_MIN_RNR_TIME_M,
4196 V2_QPC_BYTE_80_MIN_RNR_TIME_S, 0);
4197 }
4198
4199
4200 if (attr_mask & IB_QP_RQ_PSN) {
4201 roce_set_field(context->byte_108_rx_reqepsn,
4202 V2_QPC_BYTE_108_RX_REQ_EPSN_M,
4203 V2_QPC_BYTE_108_RX_REQ_EPSN_S, attr->rq_psn);
4204 roce_set_field(qpc_mask->byte_108_rx_reqepsn,
4205 V2_QPC_BYTE_108_RX_REQ_EPSN_M,
4206 V2_QPC_BYTE_108_RX_REQ_EPSN_S, 0);
4207
4208 roce_set_field(context->byte_152_raq, V2_QPC_BYTE_152_RAQ_PSN_M,
4209 V2_QPC_BYTE_152_RAQ_PSN_S, attr->rq_psn - 1);
4210 roce_set_field(qpc_mask->byte_152_raq,
4211 V2_QPC_BYTE_152_RAQ_PSN_M,
4212 V2_QPC_BYTE_152_RAQ_PSN_S, 0);
4213 }
4214
4215 if (attr_mask & IB_QP_QKEY) {
4216 context->qkey_xrcd = attr->qkey;
4217 qpc_mask->qkey_xrcd = 0;
4218 hr_qp->qkey = attr->qkey;
4219 }
4220
4221 roce_set_bit(context->byte_108_rx_reqepsn, V2_QPC_BYTE_108_INV_CREDIT_S,
4222 ibqp->srq ? 1 : 0);
4223 roce_set_bit(qpc_mask->byte_108_rx_reqepsn,
4224 V2_QPC_BYTE_108_INV_CREDIT_S, 0);
4225
4226
4227 roce_set_field(context->byte_60_qpst_tempid, V2_QPC_BYTE_60_QP_ST_M,
4228 V2_QPC_BYTE_60_QP_ST_S, new_state);
4229 roce_set_field(qpc_mask->byte_60_qpst_tempid, V2_QPC_BYTE_60_QP_ST_M,
4230 V2_QPC_BYTE_60_QP_ST_S, 0);
4231
4232
4233 ret = hns_roce_v2_qp_modify(hr_dev, &hr_qp->mtt, cur_state, new_state,
4234 context, hr_qp);
4235 if (ret) {
4236 dev_err(dev, "hns_roce_qp_modify failed(%d)\n", ret);
4237 goto out;
4238 }
4239
4240 hr_qp->state = new_state;
4241
4242 if (attr_mask & IB_QP_ACCESS_FLAGS)
4243 hr_qp->atomic_rd_en = attr->qp_access_flags;
4244
4245 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
4246 hr_qp->resp_depth = attr->max_dest_rd_atomic;
4247 if (attr_mask & IB_QP_PORT) {
4248 hr_qp->port = attr->port_num - 1;
4249 hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port];
4250 }
4251
4252 if (new_state == IB_QPS_RESET && !ibqp->uobject) {
4253 hns_roce_v2_cq_clean(to_hr_cq(ibqp->recv_cq), hr_qp->qpn,
4254 ibqp->srq ? to_hr_srq(ibqp->srq) : NULL);
4255 if (ibqp->send_cq != ibqp->recv_cq)
4256 hns_roce_v2_cq_clean(to_hr_cq(ibqp->send_cq),
4257 hr_qp->qpn, NULL);
4258
4259 hr_qp->rq.head = 0;
4260 hr_qp->rq.tail = 0;
4261 hr_qp->sq.head = 0;
4262 hr_qp->sq.tail = 0;
4263 hr_qp->sq_next_wqe = 0;
4264 hr_qp->next_sge = 0;
4265 if (hr_qp->rq.wqe_cnt)
4266 *hr_qp->rdb.db_record = 0;
4267 }
4268
4269out:
4270 kfree(context);
4271 return ret;
4272}
4273
4274static inline enum ib_qp_state to_ib_qp_st(enum hns_roce_v2_qp_state state)
4275{
4276 switch (state) {
4277 case HNS_ROCE_QP_ST_RST: return IB_QPS_RESET;
4278 case HNS_ROCE_QP_ST_INIT: return IB_QPS_INIT;
4279 case HNS_ROCE_QP_ST_RTR: return IB_QPS_RTR;
4280 case HNS_ROCE_QP_ST_RTS: return IB_QPS_RTS;
4281 case HNS_ROCE_QP_ST_SQ_DRAINING:
4282 case HNS_ROCE_QP_ST_SQD: return IB_QPS_SQD;
4283 case HNS_ROCE_QP_ST_SQER: return IB_QPS_SQE;
4284 case HNS_ROCE_QP_ST_ERR: return IB_QPS_ERR;
4285 default: return -1;
4286 }
4287}
4288
4289static int hns_roce_v2_query_qpc(struct hns_roce_dev *hr_dev,
4290 struct hns_roce_qp *hr_qp,
4291 struct hns_roce_v2_qp_context *hr_context)
4292{
4293 struct hns_roce_cmd_mailbox *mailbox;
4294 int ret;
4295
4296 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
4297 if (IS_ERR(mailbox))
4298 return PTR_ERR(mailbox);
4299
4300 ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, hr_qp->qpn, 0,
4301 HNS_ROCE_CMD_QUERY_QPC,
4302 HNS_ROCE_CMD_TIMEOUT_MSECS);
4303 if (ret) {
4304 dev_err(hr_dev->dev, "QUERY QP cmd process error\n");
4305 goto out;
4306 }
4307
4308 memcpy(hr_context, mailbox->buf, sizeof(*hr_context));
4309
4310out:
4311 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
4312 return ret;
4313}
4314
4315static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
4316 int qp_attr_mask,
4317 struct ib_qp_init_attr *qp_init_attr)
4318{
4319 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4320 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
4321 struct hns_roce_v2_qp_context *context;
4322 struct device *dev = hr_dev->dev;
4323 int tmp_qp_state;
4324 int state;
4325 int ret;
4326
4327 context = kzalloc(sizeof(*context), GFP_KERNEL);
4328 if (!context)
4329 return -ENOMEM;
4330
4331 memset(qp_attr, 0, sizeof(*qp_attr));
4332 memset(qp_init_attr, 0, sizeof(*qp_init_attr));
4333
4334 mutex_lock(&hr_qp->mutex);
4335
4336 if (hr_qp->state == IB_QPS_RESET) {
4337 qp_attr->qp_state = IB_QPS_RESET;
4338 ret = 0;
4339 goto done;
4340 }
4341
4342 ret = hns_roce_v2_query_qpc(hr_dev, hr_qp, context);
4343 if (ret) {
4344 dev_err(dev, "query qpc error\n");
4345 ret = -EINVAL;
4346 goto out;
4347 }
4348
4349 state = roce_get_field(context->byte_60_qpst_tempid,
4350 V2_QPC_BYTE_60_QP_ST_M, V2_QPC_BYTE_60_QP_ST_S);
4351 tmp_qp_state = to_ib_qp_st((enum hns_roce_v2_qp_state)state);
4352 if (tmp_qp_state == -1) {
4353 dev_err(dev, "Illegal ib_qp_state\n");
4354 ret = -EINVAL;
4355 goto out;
4356 }
4357 hr_qp->state = (u8)tmp_qp_state;
4358 qp_attr->qp_state = (enum ib_qp_state)hr_qp->state;
4359 qp_attr->path_mtu = (enum ib_mtu)roce_get_field(context->byte_24_mtu_tc,
4360 V2_QPC_BYTE_24_MTU_M,
4361 V2_QPC_BYTE_24_MTU_S);
4362 qp_attr->path_mig_state = IB_MIG_ARMED;
4363 qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
4364 if (hr_qp->ibqp.qp_type == IB_QPT_UD)
4365 qp_attr->qkey = V2_QKEY_VAL;
4366
4367 qp_attr->rq_psn = roce_get_field(context->byte_108_rx_reqepsn,
4368 V2_QPC_BYTE_108_RX_REQ_EPSN_M,
4369 V2_QPC_BYTE_108_RX_REQ_EPSN_S);
4370 qp_attr->sq_psn = (u32)roce_get_field(context->byte_172_sq_psn,
4371 V2_QPC_BYTE_172_SQ_CUR_PSN_M,
4372 V2_QPC_BYTE_172_SQ_CUR_PSN_S);
4373 qp_attr->dest_qp_num = (u8)roce_get_field(context->byte_56_dqpn_err,
4374 V2_QPC_BYTE_56_DQPN_M,
4375 V2_QPC_BYTE_56_DQPN_S);
4376 qp_attr->qp_access_flags = ((roce_get_bit(context->byte_76_srqn_op_en,
4377 V2_QPC_BYTE_76_RRE_S)) << 2) |
4378 ((roce_get_bit(context->byte_76_srqn_op_en,
4379 V2_QPC_BYTE_76_RWE_S)) << 1) |
4380 ((roce_get_bit(context->byte_76_srqn_op_en,
4381 V2_QPC_BYTE_76_ATE_S)) << 3);
4382 if (hr_qp->ibqp.qp_type == IB_QPT_RC ||
4383 hr_qp->ibqp.qp_type == IB_QPT_UC) {
4384 struct ib_global_route *grh =
4385 rdma_ah_retrieve_grh(&qp_attr->ah_attr);
4386
4387 rdma_ah_set_sl(&qp_attr->ah_attr,
4388 roce_get_field(context->byte_28_at_fl,
4389 V2_QPC_BYTE_28_SL_M,
4390 V2_QPC_BYTE_28_SL_S));
4391 grh->flow_label = roce_get_field(context->byte_28_at_fl,
4392 V2_QPC_BYTE_28_FL_M,
4393 V2_QPC_BYTE_28_FL_S);
4394 grh->sgid_index = roce_get_field(context->byte_20_smac_sgid_idx,
4395 V2_QPC_BYTE_20_SGID_IDX_M,
4396 V2_QPC_BYTE_20_SGID_IDX_S);
4397 grh->hop_limit = roce_get_field(context->byte_24_mtu_tc,
4398 V2_QPC_BYTE_24_HOP_LIMIT_M,
4399 V2_QPC_BYTE_24_HOP_LIMIT_S);
4400 grh->traffic_class = roce_get_field(context->byte_24_mtu_tc,
4401 V2_QPC_BYTE_24_TC_M,
4402 V2_QPC_BYTE_24_TC_S);
4403
4404 memcpy(grh->dgid.raw, context->dgid, sizeof(grh->dgid.raw));
4405 }
4406
4407 qp_attr->port_num = hr_qp->port + 1;
4408 qp_attr->sq_draining = 0;
4409 qp_attr->max_rd_atomic = 1 << roce_get_field(context->byte_208_irrl,
4410 V2_QPC_BYTE_208_SR_MAX_M,
4411 V2_QPC_BYTE_208_SR_MAX_S);
4412 qp_attr->max_dest_rd_atomic = 1 << roce_get_field(context->byte_140_raq,
4413 V2_QPC_BYTE_140_RR_MAX_M,
4414 V2_QPC_BYTE_140_RR_MAX_S);
4415 qp_attr->min_rnr_timer = (u8)roce_get_field(context->byte_80_rnr_rx_cqn,
4416 V2_QPC_BYTE_80_MIN_RNR_TIME_M,
4417 V2_QPC_BYTE_80_MIN_RNR_TIME_S);
4418 qp_attr->timeout = (u8)roce_get_field(context->byte_28_at_fl,
4419 V2_QPC_BYTE_28_AT_M,
4420 V2_QPC_BYTE_28_AT_S);
4421 qp_attr->retry_cnt = roce_get_field(context->byte_212_lsn,
4422 V2_QPC_BYTE_212_RETRY_CNT_M,
4423 V2_QPC_BYTE_212_RETRY_CNT_S);
4424 qp_attr->rnr_retry = context->rq_rnr_timer;
4425
4426done:
4427 qp_attr->cur_qp_state = qp_attr->qp_state;
4428 qp_attr->cap.max_recv_wr = hr_qp->rq.wqe_cnt;
4429 qp_attr->cap.max_recv_sge = hr_qp->rq.max_gs;
4430
4431 if (!ibqp->uobject) {
4432 qp_attr->cap.max_send_wr = hr_qp->sq.wqe_cnt;
4433 qp_attr->cap.max_send_sge = hr_qp->sq.max_gs;
4434 } else {
4435 qp_attr->cap.max_send_wr = 0;
4436 qp_attr->cap.max_send_sge = 0;
4437 }
4438
4439 qp_init_attr->cap = qp_attr->cap;
4440
4441out:
4442 mutex_unlock(&hr_qp->mutex);
4443 kfree(context);
4444 return ret;
4445}
4446
4447static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev,
4448 struct hns_roce_qp *hr_qp,
4449 struct ib_udata *udata)
4450{
4451 struct hns_roce_cq *send_cq, *recv_cq;
4452 struct device *dev = hr_dev->dev;
4453 int ret;
4454
4455 if (hr_qp->ibqp.qp_type == IB_QPT_RC && hr_qp->state != IB_QPS_RESET) {
4456
4457 ret = hns_roce_v2_modify_qp(&hr_qp->ibqp, NULL, 0,
4458 hr_qp->state, IB_QPS_RESET);
4459 if (ret) {
4460 dev_err(dev, "modify QP %06lx to ERR failed.\n",
4461 hr_qp->qpn);
4462 return ret;
4463 }
4464 }
4465
4466 send_cq = to_hr_cq(hr_qp->ibqp.send_cq);
4467 recv_cq = to_hr_cq(hr_qp->ibqp.recv_cq);
4468
4469 hns_roce_lock_cqs(send_cq, recv_cq);
4470
4471 if (!udata) {
4472 __hns_roce_v2_cq_clean(recv_cq, hr_qp->qpn, hr_qp->ibqp.srq ?
4473 to_hr_srq(hr_qp->ibqp.srq) : NULL);
4474 if (send_cq != recv_cq)
4475 __hns_roce_v2_cq_clean(send_cq, hr_qp->qpn, NULL);
4476 }
4477
4478 hns_roce_qp_remove(hr_dev, hr_qp);
4479
4480 hns_roce_unlock_cqs(send_cq, recv_cq);
4481
4482 hns_roce_qp_free(hr_dev, hr_qp);
4483
4484
4485 if ((hr_qp->ibqp.qp_type == IB_QPT_RC) ||
4486 (hr_qp->ibqp.qp_type == IB_QPT_UC) ||
4487 (hr_qp->ibqp.qp_type == IB_QPT_UD))
4488 hns_roce_release_range_qp(hr_dev, hr_qp->qpn, 1);
4489
4490 hns_roce_mtt_cleanup(hr_dev, &hr_qp->mtt);
4491
4492 if (udata) {
4493 struct hns_roce_ucontext *context =
4494 rdma_udata_to_drv_context(
4495 udata,
4496 struct hns_roce_ucontext,
4497 ibucontext);
4498
4499 if (hr_qp->sq.wqe_cnt && (hr_qp->sdb_en == 1))
4500 hns_roce_db_unmap_user(context, &hr_qp->sdb);
4501
4502 if (hr_qp->rq.wqe_cnt && (hr_qp->rdb_en == 1))
4503 hns_roce_db_unmap_user(context, &hr_qp->rdb);
4504 ib_umem_release(hr_qp->umem);
4505 } else {
4506 kfree(hr_qp->sq.wrid);
4507 kfree(hr_qp->rq.wrid);
4508 hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf);
4509 if (hr_qp->rq.wqe_cnt)
4510 hns_roce_free_db(hr_dev, &hr_qp->rdb);
4511 }
4512
4513 if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) &&
4514 hr_qp->rq.wqe_cnt) {
4515 kfree(hr_qp->rq_inl_buf.wqe_list[0].sg_list);
4516 kfree(hr_qp->rq_inl_buf.wqe_list);
4517 }
4518
4519 return 0;
4520}
4521
4522static int hns_roce_v2_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
4523{
4524 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4525 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
4526 int ret;
4527
4528 ret = hns_roce_v2_destroy_qp_common(hr_dev, hr_qp, udata);
4529 if (ret) {
4530 dev_err(hr_dev->dev, "Destroy qp failed(%d)\n", ret);
4531 return ret;
4532 }
4533
4534 if (hr_qp->ibqp.qp_type == IB_QPT_GSI)
4535 kfree(hr_to_hr_sqp(hr_qp));
4536 else
4537 kfree(hr_qp);
4538
4539 return 0;
4540}
4541
4542static int hns_roce_v2_qp_flow_control_init(struct hns_roce_dev *hr_dev,
4543 struct hns_roce_qp *hr_qp)
4544{
4545 struct hns_roce_sccc_clr_done *resp;
4546 struct hns_roce_sccc_clr *clr;
4547 struct hns_roce_cmq_desc desc;
4548 int ret, i;
4549
4550 mutex_lock(&hr_dev->qp_table.scc_mutex);
4551
4552
4553 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_RESET_SCCC, false);
4554 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
4555 if (ret) {
4556 dev_err(hr_dev->dev, "Reset SCC ctx failed(%d)\n", ret);
4557 goto out;
4558 }
4559
4560
4561 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CLR_SCCC, false);
4562 clr = (struct hns_roce_sccc_clr *)desc.data;
4563 clr->qpn = cpu_to_le32(hr_qp->qpn);
4564 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
4565 if (ret) {
4566 dev_err(hr_dev->dev, "Clear SCC ctx failed(%d)\n", ret);
4567 goto out;
4568 }
4569
4570
4571 resp = (struct hns_roce_sccc_clr_done *)desc.data;
4572 for (i = 0; i <= HNS_ROCE_CMQ_SCC_CLR_DONE_CNT; i++) {
4573 hns_roce_cmq_setup_basic_desc(&desc,
4574 HNS_ROCE_OPC_QUERY_SCCC, true);
4575 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
4576 if (ret) {
4577 dev_err(hr_dev->dev, "Query clr cmq failed(%d)\n", ret);
4578 goto out;
4579 }
4580
4581 if (resp->clr_done)
4582 goto out;
4583
4584 msleep(20);
4585 }
4586
4587 dev_err(hr_dev->dev, "Query SCC clr done flag overtime.\n");
4588 ret = -ETIMEDOUT;
4589
4590out:
4591 mutex_unlock(&hr_dev->qp_table.scc_mutex);
4592 return ret;
4593}
4594
4595static int hns_roce_v2_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
4596{
4597 struct hns_roce_dev *hr_dev = to_hr_dev(cq->device);
4598 struct hns_roce_v2_cq_context *cq_context;
4599 struct hns_roce_cq *hr_cq = to_hr_cq(cq);
4600 struct hns_roce_v2_cq_context *cqc_mask;
4601 struct hns_roce_cmd_mailbox *mailbox;
4602 int ret;
4603
4604 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
4605 if (IS_ERR(mailbox))
4606 return PTR_ERR(mailbox);
4607
4608 cq_context = mailbox->buf;
4609 cqc_mask = (struct hns_roce_v2_cq_context *)mailbox->buf + 1;
4610
4611 memset(cqc_mask, 0xff, sizeof(*cqc_mask));
4612
4613 roce_set_field(cq_context->byte_56_cqe_period_maxcnt,
4614 V2_CQC_BYTE_56_CQ_MAX_CNT_M, V2_CQC_BYTE_56_CQ_MAX_CNT_S,
4615 cq_count);
4616 roce_set_field(cqc_mask->byte_56_cqe_period_maxcnt,
4617 V2_CQC_BYTE_56_CQ_MAX_CNT_M, V2_CQC_BYTE_56_CQ_MAX_CNT_S,
4618 0);
4619 roce_set_field(cq_context->byte_56_cqe_period_maxcnt,
4620 V2_CQC_BYTE_56_CQ_PERIOD_M, V2_CQC_BYTE_56_CQ_PERIOD_S,
4621 cq_period);
4622 roce_set_field(cqc_mask->byte_56_cqe_period_maxcnt,
4623 V2_CQC_BYTE_56_CQ_PERIOD_M, V2_CQC_BYTE_56_CQ_PERIOD_S,
4624 0);
4625
4626 ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, hr_cq->cqn, 1,
4627 HNS_ROCE_CMD_MODIFY_CQC,
4628 HNS_ROCE_CMD_TIMEOUT_MSECS);
4629 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
4630 if (ret)
4631 dev_err(hr_dev->dev, "MODIFY CQ Failed to cmd mailbox.\n");
4632
4633 return ret;
4634}
4635
4636static void hns_roce_set_qps_to_err(struct hns_roce_dev *hr_dev, u32 qpn)
4637{
4638 struct hns_roce_qp *hr_qp;
4639 struct ib_qp_attr attr;
4640 int attr_mask;
4641 int ret;
4642
4643 hr_qp = __hns_roce_qp_lookup(hr_dev, qpn);
4644 if (!hr_qp) {
4645 dev_warn(hr_dev->dev, "no hr_qp can be found!\n");
4646 return;
4647 }
4648
4649 if (hr_qp->ibqp.uobject) {
4650 if (hr_qp->sdb_en == 1) {
4651 hr_qp->sq.head = *(int *)(hr_qp->sdb.virt_addr);
4652 if (hr_qp->rdb_en == 1)
4653 hr_qp->rq.head = *(int *)(hr_qp->rdb.virt_addr);
4654 } else {
4655 dev_warn(hr_dev->dev, "flush cqe is unsupported in userspace!\n");
4656 return;
4657 }
4658 }
4659
4660 attr_mask = IB_QP_STATE;
4661 attr.qp_state = IB_QPS_ERR;
4662 ret = hns_roce_v2_modify_qp(&hr_qp->ibqp, &attr, attr_mask,
4663 hr_qp->state, IB_QPS_ERR);
4664 if (ret)
4665 dev_err(hr_dev->dev, "failed to modify qp %d to err state.\n",
4666 qpn);
4667}
4668
4669static void hns_roce_irq_work_handle(struct work_struct *work)
4670{
4671 struct hns_roce_work *irq_work =
4672 container_of(work, struct hns_roce_work, work);
4673 struct device *dev = irq_work->hr_dev->dev;
4674 u32 qpn = irq_work->qpn;
4675 u32 cqn = irq_work->cqn;
4676
4677 switch (irq_work->event_type) {
4678 case HNS_ROCE_EVENT_TYPE_PATH_MIG:
4679 dev_info(dev, "Path migrated succeeded.\n");
4680 break;
4681 case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
4682 dev_warn(dev, "Path migration failed.\n");
4683 break;
4684 case HNS_ROCE_EVENT_TYPE_COMM_EST:
4685 dev_info(dev, "Communication established.\n");
4686 break;
4687 case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
4688 dev_warn(dev, "Send queue drained.\n");
4689 break;
4690 case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
4691 dev_err(dev, "Local work queue 0x%x catas error, sub_type:%d\n",
4692 qpn, irq_work->sub_type);
4693 hns_roce_set_qps_to_err(irq_work->hr_dev, qpn);
4694 break;
4695 case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
4696 dev_err(dev, "Invalid request local work queue 0x%x error.\n",
4697 qpn);
4698 hns_roce_set_qps_to_err(irq_work->hr_dev, qpn);
4699 break;
4700 case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
4701 dev_err(dev, "Local access violation work queue 0x%x error, sub_type:%d\n",
4702 qpn, irq_work->sub_type);
4703 hns_roce_set_qps_to_err(irq_work->hr_dev, qpn);
4704 break;
4705 case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
4706 dev_warn(dev, "SRQ limit reach.\n");
4707 break;
4708 case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
4709 dev_warn(dev, "SRQ last wqe reach.\n");
4710 break;
4711 case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
4712 dev_err(dev, "SRQ catas error.\n");
4713 break;
4714 case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
4715 dev_err(dev, "CQ 0x%x access err.\n", cqn);
4716 break;
4717 case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
4718 dev_warn(dev, "CQ 0x%x overflow\n", cqn);
4719 break;
4720 case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW:
4721 dev_warn(dev, "DB overflow.\n");
4722 break;
4723 case HNS_ROCE_EVENT_TYPE_FLR:
4724 dev_warn(dev, "Function level reset.\n");
4725 break;
4726 default:
4727 break;
4728 }
4729
4730 kfree(irq_work);
4731}
4732
4733static void hns_roce_v2_init_irq_work(struct hns_roce_dev *hr_dev,
4734 struct hns_roce_eq *eq,
4735 u32 qpn, u32 cqn)
4736{
4737 struct hns_roce_work *irq_work;
4738
4739 irq_work = kzalloc(sizeof(struct hns_roce_work), GFP_ATOMIC);
4740 if (!irq_work)
4741 return;
4742
4743 INIT_WORK(&(irq_work->work), hns_roce_irq_work_handle);
4744 irq_work->hr_dev = hr_dev;
4745 irq_work->qpn = qpn;
4746 irq_work->cqn = cqn;
4747 irq_work->event_type = eq->event_type;
4748 irq_work->sub_type = eq->sub_type;
4749 queue_work(hr_dev->irq_workq, &(irq_work->work));
4750}
4751
4752static void set_eq_cons_index_v2(struct hns_roce_eq *eq)
4753{
4754 struct hns_roce_dev *hr_dev = eq->hr_dev;
4755 u32 doorbell[2];
4756
4757 doorbell[0] = 0;
4758 doorbell[1] = 0;
4759
4760 if (eq->type_flag == HNS_ROCE_AEQ) {
4761 roce_set_field(doorbell[0], HNS_ROCE_V2_EQ_DB_CMD_M,
4762 HNS_ROCE_V2_EQ_DB_CMD_S,
4763 eq->arm_st == HNS_ROCE_V2_EQ_ALWAYS_ARMED ?
4764 HNS_ROCE_EQ_DB_CMD_AEQ :
4765 HNS_ROCE_EQ_DB_CMD_AEQ_ARMED);
4766 } else {
4767 roce_set_field(doorbell[0], HNS_ROCE_V2_EQ_DB_TAG_M,
4768 HNS_ROCE_V2_EQ_DB_TAG_S, eq->eqn);
4769
4770 roce_set_field(doorbell[0], HNS_ROCE_V2_EQ_DB_CMD_M,
4771 HNS_ROCE_V2_EQ_DB_CMD_S,
4772 eq->arm_st == HNS_ROCE_V2_EQ_ALWAYS_ARMED ?
4773 HNS_ROCE_EQ_DB_CMD_CEQ :
4774 HNS_ROCE_EQ_DB_CMD_CEQ_ARMED);
4775 }
4776
4777 roce_set_field(doorbell[1], HNS_ROCE_V2_EQ_DB_PARA_M,
4778 HNS_ROCE_V2_EQ_DB_PARA_S,
4779 (eq->cons_index & HNS_ROCE_V2_CONS_IDX_M));
4780
4781 hns_roce_write64(hr_dev, doorbell, eq->doorbell);
4782}
4783
4784static struct hns_roce_aeqe *get_aeqe_v2(struct hns_roce_eq *eq, u32 entry)
4785{
4786 u32 buf_chk_sz;
4787 unsigned long off;
4788
4789 buf_chk_sz = 1 << (eq->eqe_buf_pg_sz + PAGE_SHIFT);
4790 off = (entry & (eq->entries - 1)) * HNS_ROCE_AEQ_ENTRY_SIZE;
4791
4792 return (struct hns_roce_aeqe *)((char *)(eq->buf_list->buf) +
4793 off % buf_chk_sz);
4794}
4795
4796static struct hns_roce_aeqe *mhop_get_aeqe(struct hns_roce_eq *eq, u32 entry)
4797{
4798 u32 buf_chk_sz;
4799 unsigned long off;
4800
4801 buf_chk_sz = 1 << (eq->eqe_buf_pg_sz + PAGE_SHIFT);
4802
4803 off = (entry & (eq->entries - 1)) * HNS_ROCE_AEQ_ENTRY_SIZE;
4804
4805 if (eq->hop_num == HNS_ROCE_HOP_NUM_0)
4806 return (struct hns_roce_aeqe *)((u8 *)(eq->bt_l0) +
4807 off % buf_chk_sz);
4808 else
4809 return (struct hns_roce_aeqe *)((u8 *)
4810 (eq->buf[off / buf_chk_sz]) + off % buf_chk_sz);
4811}
4812
4813static struct hns_roce_aeqe *next_aeqe_sw_v2(struct hns_roce_eq *eq)
4814{
4815 struct hns_roce_aeqe *aeqe;
4816
4817 if (!eq->hop_num)
4818 aeqe = get_aeqe_v2(eq, eq->cons_index);
4819 else
4820 aeqe = mhop_get_aeqe(eq, eq->cons_index);
4821
4822 return (roce_get_bit(aeqe->asyn, HNS_ROCE_V2_AEQ_AEQE_OWNER_S) ^
4823 !!(eq->cons_index & eq->entries)) ? aeqe : NULL;
4824}
4825
4826static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev,
4827 struct hns_roce_eq *eq)
4828{
4829 struct device *dev = hr_dev->dev;
4830 struct hns_roce_aeqe *aeqe;
4831 int aeqe_found = 0;
4832 int event_type;
4833 int sub_type;
4834 u32 srqn;
4835 u32 qpn;
4836 u32 cqn;
4837
4838 while ((aeqe = next_aeqe_sw_v2(eq))) {
4839
4840
4841
4842
4843 dma_rmb();
4844
4845 event_type = roce_get_field(aeqe->asyn,
4846 HNS_ROCE_V2_AEQE_EVENT_TYPE_M,
4847 HNS_ROCE_V2_AEQE_EVENT_TYPE_S);
4848 sub_type = roce_get_field(aeqe->asyn,
4849 HNS_ROCE_V2_AEQE_SUB_TYPE_M,
4850 HNS_ROCE_V2_AEQE_SUB_TYPE_S);
4851 qpn = roce_get_field(aeqe->event.qp_event.qp,
4852 HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_M,
4853 HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_S);
4854 cqn = roce_get_field(aeqe->event.cq_event.cq,
4855 HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_M,
4856 HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_S);
4857 srqn = roce_get_field(aeqe->event.srq_event.srq,
4858 HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_M,
4859 HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_S);
4860
4861 switch (event_type) {
4862 case HNS_ROCE_EVENT_TYPE_PATH_MIG:
4863 case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
4864 case HNS_ROCE_EVENT_TYPE_COMM_EST:
4865 case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
4866 case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
4867 case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
4868 case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
4869 case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
4870 hns_roce_qp_event(hr_dev, qpn, event_type);
4871 break;
4872 case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
4873 case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
4874 hns_roce_srq_event(hr_dev, srqn, event_type);
4875 break;
4876 case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
4877 case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
4878 hns_roce_cq_event(hr_dev, cqn, event_type);
4879 break;
4880 case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW:
4881 break;
4882 case HNS_ROCE_EVENT_TYPE_MB:
4883 hns_roce_cmd_event(hr_dev,
4884 le16_to_cpu(aeqe->event.cmd.token),
4885 aeqe->event.cmd.status,
4886 le64_to_cpu(aeqe->event.cmd.out_param));
4887 break;
4888 case HNS_ROCE_EVENT_TYPE_CEQ_OVERFLOW:
4889 break;
4890 case HNS_ROCE_EVENT_TYPE_FLR:
4891 break;
4892 default:
4893 dev_err(dev, "Unhandled event %d on EQ %d at idx %u.\n",
4894 event_type, eq->eqn, eq->cons_index);
4895 break;
4896 }
4897
4898 eq->event_type = event_type;
4899 eq->sub_type = sub_type;
4900 ++eq->cons_index;
4901 aeqe_found = 1;
4902
4903 if (eq->cons_index > (2 * eq->entries - 1)) {
4904 dev_warn(dev, "cons_index overflow, set back to 0.\n");
4905 eq->cons_index = 0;
4906 }
4907 hns_roce_v2_init_irq_work(hr_dev, eq, qpn, cqn);
4908 }
4909
4910 set_eq_cons_index_v2(eq);
4911 return aeqe_found;
4912}
4913
4914static struct hns_roce_ceqe *get_ceqe_v2(struct hns_roce_eq *eq, u32 entry)
4915{
4916 u32 buf_chk_sz;
4917 unsigned long off;
4918
4919 buf_chk_sz = 1 << (eq->eqe_buf_pg_sz + PAGE_SHIFT);
4920 off = (entry & (eq->entries - 1)) * HNS_ROCE_CEQ_ENTRY_SIZE;
4921
4922 return (struct hns_roce_ceqe *)((char *)(eq->buf_list->buf) +
4923 off % buf_chk_sz);
4924}
4925
4926static struct hns_roce_ceqe *mhop_get_ceqe(struct hns_roce_eq *eq, u32 entry)
4927{
4928 u32 buf_chk_sz;
4929 unsigned long off;
4930
4931 buf_chk_sz = 1 << (eq->eqe_buf_pg_sz + PAGE_SHIFT);
4932
4933 off = (entry & (eq->entries - 1)) * HNS_ROCE_CEQ_ENTRY_SIZE;
4934
4935 if (eq->hop_num == HNS_ROCE_HOP_NUM_0)
4936 return (struct hns_roce_ceqe *)((u8 *)(eq->bt_l0) +
4937 off % buf_chk_sz);
4938 else
4939 return (struct hns_roce_ceqe *)((u8 *)(eq->buf[off /
4940 buf_chk_sz]) + off % buf_chk_sz);
4941}
4942
4943static struct hns_roce_ceqe *next_ceqe_sw_v2(struct hns_roce_eq *eq)
4944{
4945 struct hns_roce_ceqe *ceqe;
4946
4947 if (!eq->hop_num)
4948 ceqe = get_ceqe_v2(eq, eq->cons_index);
4949 else
4950 ceqe = mhop_get_ceqe(eq, eq->cons_index);
4951
4952 return (!!(roce_get_bit(ceqe->comp, HNS_ROCE_V2_CEQ_CEQE_OWNER_S))) ^
4953 (!!(eq->cons_index & eq->entries)) ? ceqe : NULL;
4954}
4955
4956static int hns_roce_v2_ceq_int(struct hns_roce_dev *hr_dev,
4957 struct hns_roce_eq *eq)
4958{
4959 struct device *dev = hr_dev->dev;
4960 struct hns_roce_ceqe *ceqe;
4961 int ceqe_found = 0;
4962 u32 cqn;
4963
4964 while ((ceqe = next_ceqe_sw_v2(eq))) {
4965
4966
4967
4968
4969 dma_rmb();
4970
4971 cqn = roce_get_field(ceqe->comp,
4972 HNS_ROCE_V2_CEQE_COMP_CQN_M,
4973 HNS_ROCE_V2_CEQE_COMP_CQN_S);
4974
4975 hns_roce_cq_completion(hr_dev, cqn);
4976
4977 ++eq->cons_index;
4978 ceqe_found = 1;
4979
4980 if (eq->cons_index > (2 * eq->entries - 1)) {
4981 dev_warn(dev, "cons_index overflow, set back to 0.\n");
4982 eq->cons_index = 0;
4983 }
4984 }
4985
4986 set_eq_cons_index_v2(eq);
4987
4988 return ceqe_found;
4989}
4990
4991static irqreturn_t hns_roce_v2_msix_interrupt_eq(int irq, void *eq_ptr)
4992{
4993 struct hns_roce_eq *eq = eq_ptr;
4994 struct hns_roce_dev *hr_dev = eq->hr_dev;
4995 int int_work = 0;
4996
4997 if (eq->type_flag == HNS_ROCE_CEQ)
4998
4999 int_work = hns_roce_v2_ceq_int(hr_dev, eq);
5000 else
5001
5002 int_work = hns_roce_v2_aeq_int(hr_dev, eq);
5003
5004 return IRQ_RETVAL(int_work);
5005}
5006
5007static irqreturn_t hns_roce_v2_msix_interrupt_abn(int irq, void *dev_id)
5008{
5009 struct hns_roce_dev *hr_dev = dev_id;
5010 struct device *dev = hr_dev->dev;
5011 int int_work = 0;
5012 u32 int_st;
5013 u32 int_en;
5014
5015
5016 int_st = roce_read(hr_dev, ROCEE_VF_ABN_INT_ST_REG);
5017 int_en = roce_read(hr_dev, ROCEE_VF_ABN_INT_EN_REG);
5018
5019 if (roce_get_bit(int_st, HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S)) {
5020 struct pci_dev *pdev = hr_dev->pci_dev;
5021 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
5022 const struct hnae3_ae_ops *ops = ae_dev->ops;
5023
5024 dev_err(dev, "AEQ overflow!\n");
5025
5026 roce_set_bit(int_st, HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S, 1);
5027 roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, int_st);
5028
5029
5030 if (ops->set_default_reset_request)
5031 ops->set_default_reset_request(ae_dev,
5032 HNAE3_FUNC_RESET);
5033 if (ops->reset_event)
5034 ops->reset_event(pdev, NULL);
5035
5036 roce_set_bit(int_en, HNS_ROCE_V2_VF_ABN_INT_EN_S, 1);
5037 roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en);
5038
5039 int_work = 1;
5040 } else if (roce_get_bit(int_st, HNS_ROCE_V2_VF_INT_ST_BUS_ERR_S)) {
5041 dev_err(dev, "BUS ERR!\n");
5042
5043 roce_set_bit(int_st, HNS_ROCE_V2_VF_INT_ST_BUS_ERR_S, 1);
5044 roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, int_st);
5045
5046 roce_set_bit(int_en, HNS_ROCE_V2_VF_ABN_INT_EN_S, 1);
5047 roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en);
5048
5049 int_work = 1;
5050 } else if (roce_get_bit(int_st, HNS_ROCE_V2_VF_INT_ST_OTHER_ERR_S)) {
5051 dev_err(dev, "OTHER ERR!\n");
5052
5053 roce_set_bit(int_st, HNS_ROCE_V2_VF_INT_ST_OTHER_ERR_S, 1);
5054 roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, int_st);
5055
5056 roce_set_bit(int_en, HNS_ROCE_V2_VF_ABN_INT_EN_S, 1);
5057 roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en);
5058
5059 int_work = 1;
5060 } else
5061 dev_err(dev, "There is no abnormal irq found!\n");
5062
5063 return IRQ_RETVAL(int_work);
5064}
5065
5066static void hns_roce_v2_int_mask_enable(struct hns_roce_dev *hr_dev,
5067 int eq_num, int enable_flag)
5068{
5069 int i;
5070
5071 if (enable_flag == EQ_ENABLE) {
5072 for (i = 0; i < eq_num; i++)
5073 roce_write(hr_dev, ROCEE_VF_EVENT_INT_EN_REG +
5074 i * EQ_REG_OFFSET,
5075 HNS_ROCE_V2_VF_EVENT_INT_EN_M);
5076
5077 roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG,
5078 HNS_ROCE_V2_VF_ABN_INT_EN_M);
5079 roce_write(hr_dev, ROCEE_VF_ABN_INT_CFG_REG,
5080 HNS_ROCE_V2_VF_ABN_INT_CFG_M);
5081 } else {
5082 for (i = 0; i < eq_num; i++)
5083 roce_write(hr_dev, ROCEE_VF_EVENT_INT_EN_REG +
5084 i * EQ_REG_OFFSET,
5085 HNS_ROCE_V2_VF_EVENT_INT_EN_M & 0x0);
5086
5087 roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG,
5088 HNS_ROCE_V2_VF_ABN_INT_EN_M & 0x0);
5089 roce_write(hr_dev, ROCEE_VF_ABN_INT_CFG_REG,
5090 HNS_ROCE_V2_VF_ABN_INT_CFG_M & 0x0);
5091 }
5092}
5093
5094static void hns_roce_v2_destroy_eqc(struct hns_roce_dev *hr_dev, int eqn)
5095{
5096 struct device *dev = hr_dev->dev;
5097 int ret;
5098
5099 if (eqn < hr_dev->caps.num_comp_vectors)
5100 ret = hns_roce_cmd_mbox(hr_dev, 0, 0, eqn & HNS_ROCE_V2_EQN_M,
5101 0, HNS_ROCE_CMD_DESTROY_CEQC,
5102 HNS_ROCE_CMD_TIMEOUT_MSECS);
5103 else
5104 ret = hns_roce_cmd_mbox(hr_dev, 0, 0, eqn & HNS_ROCE_V2_EQN_M,
5105 0, HNS_ROCE_CMD_DESTROY_AEQC,
5106 HNS_ROCE_CMD_TIMEOUT_MSECS);
5107 if (ret)
5108 dev_err(dev, "[mailbox cmd] destroy eqc(%d) failed.\n", eqn);
5109}
5110
5111static void hns_roce_mhop_free_eq(struct hns_roce_dev *hr_dev,
5112 struct hns_roce_eq *eq)
5113{
5114 struct device *dev = hr_dev->dev;
5115 u64 idx;
5116 u64 size;
5117 u32 buf_chk_sz;
5118 u32 bt_chk_sz;
5119 u32 mhop_num;
5120 int eqe_alloc;
5121 int i = 0;
5122 int j = 0;
5123
5124 mhop_num = hr_dev->caps.eqe_hop_num;
5125 buf_chk_sz = 1 << (hr_dev->caps.eqe_buf_pg_sz + PAGE_SHIFT);
5126 bt_chk_sz = 1 << (hr_dev->caps.eqe_ba_pg_sz + PAGE_SHIFT);
5127
5128
5129 if (mhop_num == HNS_ROCE_HOP_NUM_0) {
5130 dma_free_coherent(dev, (unsigned int)(eq->entries *
5131 eq->eqe_size), eq->bt_l0, eq->l0_dma);
5132 return;
5133 }
5134
5135
5136 dma_free_coherent(dev, bt_chk_sz, eq->bt_l0, eq->l0_dma);
5137 if (mhop_num == 1) {
5138 for (i = 0; i < eq->l0_last_num; i++) {
5139 if (i == eq->l0_last_num - 1) {
5140 eqe_alloc = i * (buf_chk_sz / eq->eqe_size);
5141 size = (eq->entries - eqe_alloc) * eq->eqe_size;
5142 dma_free_coherent(dev, size, eq->buf[i],
5143 eq->buf_dma[i]);
5144 break;
5145 }
5146 dma_free_coherent(dev, buf_chk_sz, eq->buf[i],
5147 eq->buf_dma[i]);
5148 }
5149 } else if (mhop_num == 2) {
5150 for (i = 0; i < eq->l0_last_num; i++) {
5151 dma_free_coherent(dev, bt_chk_sz, eq->bt_l1[i],
5152 eq->l1_dma[i]);
5153
5154 for (j = 0; j < bt_chk_sz / 8; j++) {
5155 idx = i * (bt_chk_sz / 8) + j;
5156 if ((i == eq->l0_last_num - 1)
5157 && j == eq->l1_last_num - 1) {
5158 eqe_alloc = (buf_chk_sz / eq->eqe_size)
5159 * idx;
5160 size = (eq->entries - eqe_alloc)
5161 * eq->eqe_size;
5162 dma_free_coherent(dev, size,
5163 eq->buf[idx],
5164 eq->buf_dma[idx]);
5165 break;
5166 }
5167 dma_free_coherent(dev, buf_chk_sz, eq->buf[idx],
5168 eq->buf_dma[idx]);
5169 }
5170 }
5171 }
5172 kfree(eq->buf_dma);
5173 kfree(eq->buf);
5174 kfree(eq->l1_dma);
5175 kfree(eq->bt_l1);
5176 eq->buf_dma = NULL;
5177 eq->buf = NULL;
5178 eq->l1_dma = NULL;
5179 eq->bt_l1 = NULL;
5180}
5181
5182static void hns_roce_v2_free_eq(struct hns_roce_dev *hr_dev,
5183 struct hns_roce_eq *eq)
5184{
5185 u32 buf_chk_sz;
5186
5187 buf_chk_sz = 1 << (eq->eqe_buf_pg_sz + PAGE_SHIFT);
5188
5189 if (hr_dev->caps.eqe_hop_num) {
5190 hns_roce_mhop_free_eq(hr_dev, eq);
5191 return;
5192 }
5193
5194 if (eq->buf_list)
5195 dma_free_coherent(hr_dev->dev, buf_chk_sz,
5196 eq->buf_list->buf, eq->buf_list->map);
5197}
5198
5199static void hns_roce_config_eqc(struct hns_roce_dev *hr_dev,
5200 struct hns_roce_eq *eq,
5201 void *mb_buf)
5202{
5203 struct hns_roce_eq_context *eqc;
5204
5205 eqc = mb_buf;
5206 memset(eqc, 0, sizeof(struct hns_roce_eq_context));
5207
5208
5209 eq->doorbell = hr_dev->reg_base + ROCEE_VF_EQ_DB_CFG0_REG;
5210 eq->hop_num = hr_dev->caps.eqe_hop_num;
5211 eq->cons_index = 0;
5212 eq->over_ignore = HNS_ROCE_V2_EQ_OVER_IGNORE_0;
5213 eq->coalesce = HNS_ROCE_V2_EQ_COALESCE_0;
5214 eq->arm_st = HNS_ROCE_V2_EQ_ALWAYS_ARMED;
5215 eq->eqe_ba_pg_sz = hr_dev->caps.eqe_ba_pg_sz;
5216 eq->eqe_buf_pg_sz = hr_dev->caps.eqe_buf_pg_sz;
5217 eq->shift = ilog2((unsigned int)eq->entries);
5218
5219 if (!eq->hop_num)
5220 eq->eqe_ba = eq->buf_list->map;
5221 else
5222 eq->eqe_ba = eq->l0_dma;
5223
5224
5225 roce_set_field(eqc->byte_4,
5226 HNS_ROCE_EQC_EQ_ST_M,
5227 HNS_ROCE_EQC_EQ_ST_S,
5228 HNS_ROCE_V2_EQ_STATE_VALID);
5229
5230
5231 roce_set_field(eqc->byte_4,
5232 HNS_ROCE_EQC_HOP_NUM_M,
5233 HNS_ROCE_EQC_HOP_NUM_S, eq->hop_num);
5234
5235
5236 roce_set_field(eqc->byte_4,
5237 HNS_ROCE_EQC_OVER_IGNORE_M,
5238 HNS_ROCE_EQC_OVER_IGNORE_S, eq->over_ignore);
5239
5240
5241 roce_set_field(eqc->byte_4,
5242 HNS_ROCE_EQC_COALESCE_M,
5243 HNS_ROCE_EQC_COALESCE_S, eq->coalesce);
5244
5245
5246 roce_set_field(eqc->byte_4,
5247 HNS_ROCE_EQC_ARM_ST_M,
5248 HNS_ROCE_EQC_ARM_ST_S, eq->arm_st);
5249
5250
5251 roce_set_field(eqc->byte_4,
5252 HNS_ROCE_EQC_EQN_M,
5253 HNS_ROCE_EQC_EQN_S, eq->eqn);
5254
5255
5256 roce_set_field(eqc->byte_4,
5257 HNS_ROCE_EQC_EQE_CNT_M,
5258 HNS_ROCE_EQC_EQE_CNT_S,
5259 HNS_ROCE_EQ_INIT_EQE_CNT);
5260
5261
5262 roce_set_field(eqc->byte_8,
5263 HNS_ROCE_EQC_BA_PG_SZ_M,
5264 HNS_ROCE_EQC_BA_PG_SZ_S,
5265 eq->eqe_ba_pg_sz + PG_SHIFT_OFFSET);
5266
5267
5268 roce_set_field(eqc->byte_8,
5269 HNS_ROCE_EQC_BUF_PG_SZ_M,
5270 HNS_ROCE_EQC_BUF_PG_SZ_S,
5271 eq->eqe_buf_pg_sz + PG_SHIFT_OFFSET);
5272
5273
5274 roce_set_field(eqc->byte_8,
5275 HNS_ROCE_EQC_PROD_INDX_M,
5276 HNS_ROCE_EQC_PROD_INDX_S,
5277 HNS_ROCE_EQ_INIT_PROD_IDX);
5278
5279
5280 roce_set_field(eqc->byte_12,
5281 HNS_ROCE_EQC_MAX_CNT_M,
5282 HNS_ROCE_EQC_MAX_CNT_S, eq->eq_max_cnt);
5283
5284
5285 roce_set_field(eqc->byte_12,
5286 HNS_ROCE_EQC_PERIOD_M,
5287 HNS_ROCE_EQC_PERIOD_S, eq->eq_period);
5288
5289
5290 roce_set_field(eqc->eqe_report_timer,
5291 HNS_ROCE_EQC_REPORT_TIMER_M,
5292 HNS_ROCE_EQC_REPORT_TIMER_S,
5293 HNS_ROCE_EQ_INIT_REPORT_TIMER);
5294
5295
5296 roce_set_field(eqc->eqe_ba0,
5297 HNS_ROCE_EQC_EQE_BA_L_M,
5298 HNS_ROCE_EQC_EQE_BA_L_S, eq->eqe_ba >> 3);
5299
5300
5301 roce_set_field(eqc->eqe_ba1,
5302 HNS_ROCE_EQC_EQE_BA_H_M,
5303 HNS_ROCE_EQC_EQE_BA_H_S, eq->eqe_ba >> 35);
5304
5305
5306 roce_set_field(eqc->byte_28,
5307 HNS_ROCE_EQC_SHIFT_M,
5308 HNS_ROCE_EQC_SHIFT_S, eq->shift);
5309
5310
5311 roce_set_field(eqc->byte_28,
5312 HNS_ROCE_EQC_MSI_INDX_M,
5313 HNS_ROCE_EQC_MSI_INDX_S,
5314 HNS_ROCE_EQ_INIT_MSI_IDX);
5315
5316
5317 roce_set_field(eqc->byte_28,
5318 HNS_ROCE_EQC_CUR_EQE_BA_L_M,
5319 HNS_ROCE_EQC_CUR_EQE_BA_L_S, eq->cur_eqe_ba >> 12);
5320
5321
5322 roce_set_field(eqc->byte_32,
5323 HNS_ROCE_EQC_CUR_EQE_BA_M_M,
5324 HNS_ROCE_EQC_CUR_EQE_BA_M_S, eq->cur_eqe_ba >> 28);
5325
5326
5327 roce_set_field(eqc->byte_36,
5328 HNS_ROCE_EQC_CUR_EQE_BA_H_M,
5329 HNS_ROCE_EQC_CUR_EQE_BA_H_S, eq->cur_eqe_ba >> 60);
5330
5331
5332 roce_set_field(eqc->byte_36,
5333 HNS_ROCE_EQC_CONS_INDX_M,
5334 HNS_ROCE_EQC_CONS_INDX_S,
5335 HNS_ROCE_EQ_INIT_CONS_IDX);
5336
5337
5338 roce_set_field(eqc->nxt_eqe_ba0,
5339 HNS_ROCE_EQC_NXT_EQE_BA_L_M,
5340 HNS_ROCE_EQC_NXT_EQE_BA_L_S, eq->nxt_eqe_ba >> 12);
5341
5342
5343 roce_set_field(eqc->nxt_eqe_ba1,
5344 HNS_ROCE_EQC_NXT_EQE_BA_H_M,
5345 HNS_ROCE_EQC_NXT_EQE_BA_H_S, eq->nxt_eqe_ba >> 44);
5346}
5347
5348static int hns_roce_mhop_alloc_eq(struct hns_roce_dev *hr_dev,
5349 struct hns_roce_eq *eq)
5350{
5351 struct device *dev = hr_dev->dev;
5352 int eq_alloc_done = 0;
5353 int eq_buf_cnt = 0;
5354 int eqe_alloc;
5355 u32 buf_chk_sz;
5356 u32 bt_chk_sz;
5357 u32 mhop_num;
5358 u64 size;
5359 u64 idx;
5360 int ba_num;
5361 int bt_num;
5362 int record_i;
5363 int record_j;
5364 int i = 0;
5365 int j = 0;
5366
5367 mhop_num = hr_dev->caps.eqe_hop_num;
5368 buf_chk_sz = 1 << (hr_dev->caps.eqe_buf_pg_sz + PAGE_SHIFT);
5369 bt_chk_sz = 1 << (hr_dev->caps.eqe_ba_pg_sz + PAGE_SHIFT);
5370
5371 ba_num = (PAGE_ALIGN(eq->entries * eq->eqe_size) + buf_chk_sz - 1)
5372 / buf_chk_sz;
5373 bt_num = (ba_num + bt_chk_sz / 8 - 1) / (bt_chk_sz / 8);
5374
5375
5376 if (mhop_num == HNS_ROCE_HOP_NUM_0) {
5377 if (eq->entries > buf_chk_sz / eq->eqe_size) {
5378 dev_err(dev, "eq entries %d is larger than buf_pg_sz!",
5379 eq->entries);
5380 return -EINVAL;
5381 }
5382 eq->bt_l0 = dma_alloc_coherent(dev, eq->entries * eq->eqe_size,
5383 &(eq->l0_dma), GFP_KERNEL);
5384 if (!eq->bt_l0)
5385 return -ENOMEM;
5386
5387 eq->cur_eqe_ba = eq->l0_dma;
5388 eq->nxt_eqe_ba = 0;
5389
5390 memset(eq->bt_l0, 0, eq->entries * eq->eqe_size);
5391
5392 return 0;
5393 }
5394
5395 eq->buf_dma = kcalloc(ba_num, sizeof(*eq->buf_dma), GFP_KERNEL);
5396 if (!eq->buf_dma)
5397 return -ENOMEM;
5398 eq->buf = kcalloc(ba_num, sizeof(*eq->buf), GFP_KERNEL);
5399 if (!eq->buf)
5400 goto err_kcalloc_buf;
5401
5402 if (mhop_num == 2) {
5403 eq->l1_dma = kcalloc(bt_num, sizeof(*eq->l1_dma), GFP_KERNEL);
5404 if (!eq->l1_dma)
5405 goto err_kcalloc_l1_dma;
5406
5407 eq->bt_l1 = kcalloc(bt_num, sizeof(*eq->bt_l1), GFP_KERNEL);
5408 if (!eq->bt_l1)
5409 goto err_kcalloc_bt_l1;
5410 }
5411
5412
5413 eq->bt_l0 = dma_alloc_coherent(dev, bt_chk_sz, &eq->l0_dma, GFP_KERNEL);
5414 if (!eq->bt_l0)
5415 goto err_dma_alloc_l0;
5416
5417 if (mhop_num == 1) {
5418 if (ba_num > (bt_chk_sz / 8))
5419 dev_err(dev, "ba_num %d is too large for 1 hop\n",
5420 ba_num);
5421
5422
5423 for (i = 0; i < bt_chk_sz / 8; i++) {
5424 if (eq_buf_cnt + 1 < ba_num) {
5425 size = buf_chk_sz;
5426 } else {
5427 eqe_alloc = i * (buf_chk_sz / eq->eqe_size);
5428 size = (eq->entries - eqe_alloc) * eq->eqe_size;
5429 }
5430 eq->buf[i] = dma_alloc_coherent(dev, size,
5431 &(eq->buf_dma[i]),
5432 GFP_KERNEL);
5433 if (!eq->buf[i])
5434 goto err_dma_alloc_buf;
5435
5436 *(eq->bt_l0 + i) = eq->buf_dma[i];
5437
5438 eq_buf_cnt++;
5439 if (eq_buf_cnt >= ba_num)
5440 break;
5441 }
5442 eq->cur_eqe_ba = eq->buf_dma[0];
5443 eq->nxt_eqe_ba = eq->buf_dma[1];
5444
5445 } else if (mhop_num == 2) {
5446
5447 for (i = 0; i < bt_chk_sz / 8; i++) {
5448 eq->bt_l1[i] = dma_alloc_coherent(dev, bt_chk_sz,
5449 &(eq->l1_dma[i]),
5450 GFP_KERNEL);
5451 if (!eq->bt_l1[i])
5452 goto err_dma_alloc_l1;
5453 *(eq->bt_l0 + i) = eq->l1_dma[i];
5454
5455 for (j = 0; j < bt_chk_sz / 8; j++) {
5456 idx = i * bt_chk_sz / 8 + j;
5457 if (eq_buf_cnt + 1 < ba_num) {
5458 size = buf_chk_sz;
5459 } else {
5460 eqe_alloc = (buf_chk_sz / eq->eqe_size)
5461 * idx;
5462 size = (eq->entries - eqe_alloc)
5463 * eq->eqe_size;
5464 }
5465 eq->buf[idx] = dma_alloc_coherent(dev, size,
5466 &(eq->buf_dma[idx]),
5467 GFP_KERNEL);
5468 if (!eq->buf[idx])
5469 goto err_dma_alloc_buf;
5470
5471 *(eq->bt_l1[i] + j) = eq->buf_dma[idx];
5472
5473 eq_buf_cnt++;
5474 if (eq_buf_cnt >= ba_num) {
5475 eq_alloc_done = 1;
5476 break;
5477 }
5478 }
5479
5480 if (eq_alloc_done)
5481 break;
5482 }
5483 eq->cur_eqe_ba = eq->buf_dma[0];
5484 eq->nxt_eqe_ba = eq->buf_dma[1];
5485 }
5486
5487 eq->l0_last_num = i + 1;
5488 if (mhop_num == 2)
5489 eq->l1_last_num = j + 1;
5490
5491 return 0;
5492
5493err_dma_alloc_l1:
5494 dma_free_coherent(dev, bt_chk_sz, eq->bt_l0, eq->l0_dma);
5495 eq->bt_l0 = NULL;
5496 eq->l0_dma = 0;
5497 for (i -= 1; i >= 0; i--) {
5498 dma_free_coherent(dev, bt_chk_sz, eq->bt_l1[i],
5499 eq->l1_dma[i]);
5500
5501 for (j = 0; j < bt_chk_sz / 8; j++) {
5502 idx = i * bt_chk_sz / 8 + j;
5503 dma_free_coherent(dev, buf_chk_sz, eq->buf[idx],
5504 eq->buf_dma[idx]);
5505 }
5506 }
5507 goto err_dma_alloc_l0;
5508
5509err_dma_alloc_buf:
5510 dma_free_coherent(dev, bt_chk_sz, eq->bt_l0, eq->l0_dma);
5511 eq->bt_l0 = NULL;
5512 eq->l0_dma = 0;
5513
5514 if (mhop_num == 1)
5515 for (i -= 1; i >= 0; i--)
5516 dma_free_coherent(dev, buf_chk_sz, eq->buf[i],
5517 eq->buf_dma[i]);
5518 else if (mhop_num == 2) {
5519 record_i = i;
5520 record_j = j;
5521 for (; i >= 0; i--) {
5522 dma_free_coherent(dev, bt_chk_sz, eq->bt_l1[i],
5523 eq->l1_dma[i]);
5524
5525 for (j = 0; j < bt_chk_sz / 8; j++) {
5526 if (i == record_i && j >= record_j)
5527 break;
5528
5529 idx = i * bt_chk_sz / 8 + j;
5530 dma_free_coherent(dev, buf_chk_sz,
5531 eq->buf[idx],
5532 eq->buf_dma[idx]);
5533 }
5534 }
5535 }
5536
5537err_dma_alloc_l0:
5538 kfree(eq->bt_l1);
5539 eq->bt_l1 = NULL;
5540
5541err_kcalloc_bt_l1:
5542 kfree(eq->l1_dma);
5543 eq->l1_dma = NULL;
5544
5545err_kcalloc_l1_dma:
5546 kfree(eq->buf);
5547 eq->buf = NULL;
5548
5549err_kcalloc_buf:
5550 kfree(eq->buf_dma);
5551 eq->buf_dma = NULL;
5552
5553 return -ENOMEM;
5554}
5555
5556static int hns_roce_v2_create_eq(struct hns_roce_dev *hr_dev,
5557 struct hns_roce_eq *eq,
5558 unsigned int eq_cmd)
5559{
5560 struct device *dev = hr_dev->dev;
5561 struct hns_roce_cmd_mailbox *mailbox;
5562 u32 buf_chk_sz = 0;
5563 int ret;
5564
5565
5566 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
5567 if (IS_ERR(mailbox))
5568 return PTR_ERR(mailbox);
5569
5570 if (!hr_dev->caps.eqe_hop_num) {
5571 buf_chk_sz = 1 << (hr_dev->caps.eqe_buf_pg_sz + PAGE_SHIFT);
5572
5573 eq->buf_list = kzalloc(sizeof(struct hns_roce_buf_list),
5574 GFP_KERNEL);
5575 if (!eq->buf_list) {
5576 ret = -ENOMEM;
5577 goto free_cmd_mbox;
5578 }
5579
5580 eq->buf_list->buf = dma_alloc_coherent(dev, buf_chk_sz,
5581 &(eq->buf_list->map),
5582 GFP_KERNEL);
5583 if (!eq->buf_list->buf) {
5584 ret = -ENOMEM;
5585 goto err_alloc_buf;
5586 }
5587
5588 } else {
5589 ret = hns_roce_mhop_alloc_eq(hr_dev, eq);
5590 if (ret) {
5591 ret = -ENOMEM;
5592 goto free_cmd_mbox;
5593 }
5594 }
5595
5596 hns_roce_config_eqc(hr_dev, eq, mailbox->buf);
5597
5598 ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, eq->eqn, 0,
5599 eq_cmd, HNS_ROCE_CMD_TIMEOUT_MSECS);
5600 if (ret) {
5601 dev_err(dev, "[mailbox cmd] create eqc failed.\n");
5602 goto err_cmd_mbox;
5603 }
5604
5605 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
5606
5607 return 0;
5608
5609err_cmd_mbox:
5610 if (!hr_dev->caps.eqe_hop_num)
5611 dma_free_coherent(dev, buf_chk_sz, eq->buf_list->buf,
5612 eq->buf_list->map);
5613 else {
5614 hns_roce_mhop_free_eq(hr_dev, eq);
5615 goto free_cmd_mbox;
5616 }
5617
5618err_alloc_buf:
5619 kfree(eq->buf_list);
5620
5621free_cmd_mbox:
5622 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
5623
5624 return ret;
5625}
5626
5627static int hns_roce_v2_init_eq_table(struct hns_roce_dev *hr_dev)
5628{
5629 struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
5630 struct device *dev = hr_dev->dev;
5631 struct hns_roce_eq *eq;
5632 unsigned int eq_cmd;
5633 int irq_num;
5634 int eq_num;
5635 int other_num;
5636 int comp_num;
5637 int aeq_num;
5638 int i, j, k;
5639 int ret;
5640
5641 other_num = hr_dev->caps.num_other_vectors;
5642 comp_num = hr_dev->caps.num_comp_vectors;
5643 aeq_num = hr_dev->caps.num_aeq_vectors;
5644
5645 eq_num = comp_num + aeq_num;
5646 irq_num = eq_num + other_num;
5647
5648 eq_table->eq = kcalloc(eq_num, sizeof(*eq_table->eq), GFP_KERNEL);
5649 if (!eq_table->eq)
5650 return -ENOMEM;
5651
5652 for (i = 0; i < irq_num; i++) {
5653 hr_dev->irq_names[i] = kzalloc(HNS_ROCE_INT_NAME_LEN,
5654 GFP_KERNEL);
5655 if (!hr_dev->irq_names[i]) {
5656 ret = -ENOMEM;
5657 goto err_failed_kzalloc;
5658 }
5659 }
5660
5661
5662 for (j = 0; j < eq_num; j++) {
5663 eq = &eq_table->eq[j];
5664 eq->hr_dev = hr_dev;
5665 eq->eqn = j;
5666 if (j < comp_num) {
5667
5668 eq_cmd = HNS_ROCE_CMD_CREATE_CEQC;
5669 eq->type_flag = HNS_ROCE_CEQ;
5670 eq->entries = hr_dev->caps.ceqe_depth;
5671 eq->eqe_size = HNS_ROCE_CEQ_ENTRY_SIZE;
5672 eq->irq = hr_dev->irq[j + other_num + aeq_num];
5673 eq->eq_max_cnt = HNS_ROCE_CEQ_DEFAULT_BURST_NUM;
5674 eq->eq_period = HNS_ROCE_CEQ_DEFAULT_INTERVAL;
5675 } else {
5676
5677 eq_cmd = HNS_ROCE_CMD_CREATE_AEQC;
5678 eq->type_flag = HNS_ROCE_AEQ;
5679 eq->entries = hr_dev->caps.aeqe_depth;
5680 eq->eqe_size = HNS_ROCE_AEQ_ENTRY_SIZE;
5681 eq->irq = hr_dev->irq[j - comp_num + other_num];
5682 eq->eq_max_cnt = HNS_ROCE_AEQ_DEFAULT_BURST_NUM;
5683 eq->eq_period = HNS_ROCE_AEQ_DEFAULT_INTERVAL;
5684 }
5685
5686 ret = hns_roce_v2_create_eq(hr_dev, eq, eq_cmd);
5687 if (ret) {
5688 dev_err(dev, "eq create failed.\n");
5689 goto err_create_eq_fail;
5690 }
5691 }
5692
5693
5694 hns_roce_v2_int_mask_enable(hr_dev, eq_num, EQ_ENABLE);
5695
5696
5697 for (k = 0; k < irq_num; k++)
5698 if (k < other_num)
5699 snprintf((char *)hr_dev->irq_names[k],
5700 HNS_ROCE_INT_NAME_LEN, "hns-abn-%d", k);
5701 else if (k < (other_num + aeq_num))
5702 snprintf((char *)hr_dev->irq_names[k],
5703 HNS_ROCE_INT_NAME_LEN, "hns-aeq-%d",
5704 k - other_num);
5705 else
5706 snprintf((char *)hr_dev->irq_names[k],
5707 HNS_ROCE_INT_NAME_LEN, "hns-ceq-%d",
5708 k - other_num - aeq_num);
5709
5710 for (k = 0; k < irq_num; k++) {
5711 if (k < other_num)
5712 ret = request_irq(hr_dev->irq[k],
5713 hns_roce_v2_msix_interrupt_abn,
5714 0, hr_dev->irq_names[k], hr_dev);
5715
5716 else if (k < (other_num + comp_num))
5717 ret = request_irq(eq_table->eq[k - other_num].irq,
5718 hns_roce_v2_msix_interrupt_eq,
5719 0, hr_dev->irq_names[k + aeq_num],
5720 &eq_table->eq[k - other_num]);
5721 else
5722 ret = request_irq(eq_table->eq[k - other_num].irq,
5723 hns_roce_v2_msix_interrupt_eq,
5724 0, hr_dev->irq_names[k - comp_num],
5725 &eq_table->eq[k - other_num]);
5726 if (ret) {
5727 dev_err(dev, "Request irq error!\n");
5728 goto err_request_irq_fail;
5729 }
5730 }
5731
5732 hr_dev->irq_workq =
5733 create_singlethread_workqueue("hns_roce_irq_workqueue");
5734 if (!hr_dev->irq_workq) {
5735 dev_err(dev, "Create irq workqueue failed!\n");
5736 ret = -ENOMEM;
5737 goto err_request_irq_fail;
5738 }
5739
5740 return 0;
5741
5742err_request_irq_fail:
5743 for (k -= 1; k >= 0; k--)
5744 if (k < other_num)
5745 free_irq(hr_dev->irq[k], hr_dev);
5746 else
5747 free_irq(eq_table->eq[k - other_num].irq,
5748 &eq_table->eq[k - other_num]);
5749
5750err_create_eq_fail:
5751 for (j -= 1; j >= 0; j--)
5752 hns_roce_v2_free_eq(hr_dev, &eq_table->eq[j]);
5753
5754err_failed_kzalloc:
5755 for (i -= 1; i >= 0; i--)
5756 kfree(hr_dev->irq_names[i]);
5757 kfree(eq_table->eq);
5758
5759 return ret;
5760}
5761
5762static void hns_roce_v2_cleanup_eq_table(struct hns_roce_dev *hr_dev)
5763{
5764 struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
5765 int irq_num;
5766 int eq_num;
5767 int i;
5768
5769 eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors;
5770 irq_num = eq_num + hr_dev->caps.num_other_vectors;
5771
5772
5773 hns_roce_v2_int_mask_enable(hr_dev, eq_num, EQ_DISABLE);
5774
5775 for (i = 0; i < hr_dev->caps.num_other_vectors; i++)
5776 free_irq(hr_dev->irq[i], hr_dev);
5777
5778 for (i = 0; i < eq_num; i++) {
5779 hns_roce_v2_destroy_eqc(hr_dev, i);
5780
5781 free_irq(eq_table->eq[i].irq, &eq_table->eq[i]);
5782
5783 hns_roce_v2_free_eq(hr_dev, &eq_table->eq[i]);
5784 }
5785
5786 for (i = 0; i < irq_num; i++)
5787 kfree(hr_dev->irq_names[i]);
5788
5789 kfree(eq_table->eq);
5790
5791 flush_workqueue(hr_dev->irq_workq);
5792 destroy_workqueue(hr_dev->irq_workq);
5793}
5794
5795static void hns_roce_v2_write_srqc(struct hns_roce_dev *hr_dev,
5796 struct hns_roce_srq *srq, u32 pdn, u16 xrcd,
5797 u32 cqn, void *mb_buf, u64 *mtts_wqe,
5798 u64 *mtts_idx, dma_addr_t dma_handle_wqe,
5799 dma_addr_t dma_handle_idx)
5800{
5801 struct hns_roce_srq_context *srq_context;
5802
5803 srq_context = mb_buf;
5804 memset(srq_context, 0, sizeof(*srq_context));
5805
5806 roce_set_field(srq_context->byte_4_srqn_srqst, SRQC_BYTE_4_SRQ_ST_M,
5807 SRQC_BYTE_4_SRQ_ST_S, 1);
5808
5809 roce_set_field(srq_context->byte_4_srqn_srqst,
5810 SRQC_BYTE_4_SRQ_WQE_HOP_NUM_M,
5811 SRQC_BYTE_4_SRQ_WQE_HOP_NUM_S,
5812 (hr_dev->caps.srqwqe_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 :
5813 hr_dev->caps.srqwqe_hop_num));
5814 roce_set_field(srq_context->byte_4_srqn_srqst,
5815 SRQC_BYTE_4_SRQ_SHIFT_M, SRQC_BYTE_4_SRQ_SHIFT_S,
5816 ilog2(srq->max));
5817
5818 roce_set_field(srq_context->byte_4_srqn_srqst, SRQC_BYTE_4_SRQN_M,
5819 SRQC_BYTE_4_SRQN_S, srq->srqn);
5820
5821 roce_set_field(srq_context->byte_8_limit_wl, SRQC_BYTE_8_SRQ_LIMIT_WL_M,
5822 SRQC_BYTE_8_SRQ_LIMIT_WL_S, 0);
5823
5824 roce_set_field(srq_context->byte_12_xrcd, SRQC_BYTE_12_SRQ_XRCD_M,
5825 SRQC_BYTE_12_SRQ_XRCD_S, xrcd);
5826
5827 srq_context->wqe_bt_ba = cpu_to_le32((u32)(dma_handle_wqe >> 3));
5828
5829 roce_set_field(srq_context->byte_24_wqe_bt_ba,
5830 SRQC_BYTE_24_SRQ_WQE_BT_BA_M,
5831 SRQC_BYTE_24_SRQ_WQE_BT_BA_S,
5832 cpu_to_le32(dma_handle_wqe >> 35));
5833
5834 roce_set_field(srq_context->byte_28_rqws_pd, SRQC_BYTE_28_PD_M,
5835 SRQC_BYTE_28_PD_S, pdn);
5836 roce_set_field(srq_context->byte_28_rqws_pd, SRQC_BYTE_28_RQWS_M,
5837 SRQC_BYTE_28_RQWS_S, srq->max_gs <= 0 ? 0 :
5838 fls(srq->max_gs - 1));
5839
5840 srq_context->idx_bt_ba = (u32)(dma_handle_idx >> 3);
5841 srq_context->idx_bt_ba = cpu_to_le32(srq_context->idx_bt_ba);
5842 roce_set_field(srq_context->rsv_idx_bt_ba,
5843 SRQC_BYTE_36_SRQ_IDX_BT_BA_M,
5844 SRQC_BYTE_36_SRQ_IDX_BT_BA_S,
5845 cpu_to_le32(dma_handle_idx >> 35));
5846
5847 srq_context->idx_cur_blk_addr = (u32)(mtts_idx[0] >> PAGE_ADDR_SHIFT);
5848 srq_context->idx_cur_blk_addr =
5849 cpu_to_le32(srq_context->idx_cur_blk_addr);
5850 roce_set_field(srq_context->byte_44_idxbufpgsz_addr,
5851 SRQC_BYTE_44_SRQ_IDX_CUR_BLK_ADDR_M,
5852 SRQC_BYTE_44_SRQ_IDX_CUR_BLK_ADDR_S,
5853 cpu_to_le32((mtts_idx[0]) >> (32 + PAGE_ADDR_SHIFT)));
5854 roce_set_field(srq_context->byte_44_idxbufpgsz_addr,
5855 SRQC_BYTE_44_SRQ_IDX_HOP_NUM_M,
5856 SRQC_BYTE_44_SRQ_IDX_HOP_NUM_S,
5857 hr_dev->caps.idx_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 :
5858 hr_dev->caps.idx_hop_num);
5859
5860 roce_set_field(srq_context->byte_44_idxbufpgsz_addr,
5861 SRQC_BYTE_44_SRQ_IDX_BA_PG_SZ_M,
5862 SRQC_BYTE_44_SRQ_IDX_BA_PG_SZ_S,
5863 hr_dev->caps.idx_ba_pg_sz);
5864 roce_set_field(srq_context->byte_44_idxbufpgsz_addr,
5865 SRQC_BYTE_44_SRQ_IDX_BUF_PG_SZ_M,
5866 SRQC_BYTE_44_SRQ_IDX_BUF_PG_SZ_S,
5867 hr_dev->caps.idx_buf_pg_sz);
5868
5869 srq_context->idx_nxt_blk_addr = (u32)(mtts_idx[1] >> PAGE_ADDR_SHIFT);
5870 srq_context->idx_nxt_blk_addr =
5871 cpu_to_le32(srq_context->idx_nxt_blk_addr);
5872 roce_set_field(srq_context->rsv_idxnxtblkaddr,
5873 SRQC_BYTE_52_SRQ_IDX_NXT_BLK_ADDR_M,
5874 SRQC_BYTE_52_SRQ_IDX_NXT_BLK_ADDR_S,
5875 cpu_to_le32((mtts_idx[1]) >> (32 + PAGE_ADDR_SHIFT)));
5876 roce_set_field(srq_context->byte_56_xrc_cqn,
5877 SRQC_BYTE_56_SRQ_XRC_CQN_M, SRQC_BYTE_56_SRQ_XRC_CQN_S,
5878 cqn);
5879 roce_set_field(srq_context->byte_56_xrc_cqn,
5880 SRQC_BYTE_56_SRQ_WQE_BA_PG_SZ_M,
5881 SRQC_BYTE_56_SRQ_WQE_BA_PG_SZ_S,
5882 hr_dev->caps.srqwqe_ba_pg_sz + PG_SHIFT_OFFSET);
5883 roce_set_field(srq_context->byte_56_xrc_cqn,
5884 SRQC_BYTE_56_SRQ_WQE_BUF_PG_SZ_M,
5885 SRQC_BYTE_56_SRQ_WQE_BUF_PG_SZ_S,
5886 hr_dev->caps.srqwqe_buf_pg_sz + PG_SHIFT_OFFSET);
5887
5888 roce_set_bit(srq_context->db_record_addr_record_en,
5889 SRQC_BYTE_60_SRQ_RECORD_EN_S, 0);
5890}
5891
5892static int hns_roce_v2_modify_srq(struct ib_srq *ibsrq,
5893 struct ib_srq_attr *srq_attr,
5894 enum ib_srq_attr_mask srq_attr_mask,
5895 struct ib_udata *udata)
5896{
5897 struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
5898 struct hns_roce_srq *srq = to_hr_srq(ibsrq);
5899 struct hns_roce_srq_context *srq_context;
5900 struct hns_roce_srq_context *srqc_mask;
5901 struct hns_roce_cmd_mailbox *mailbox;
5902 int ret;
5903
5904 if (srq_attr_mask & IB_SRQ_LIMIT) {
5905 if (srq_attr->srq_limit >= srq->max)
5906 return -EINVAL;
5907
5908 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
5909 if (IS_ERR(mailbox))
5910 return PTR_ERR(mailbox);
5911
5912 srq_context = mailbox->buf;
5913 srqc_mask = (struct hns_roce_srq_context *)mailbox->buf + 1;
5914
5915 memset(srqc_mask, 0xff, sizeof(*srqc_mask));
5916
5917 roce_set_field(srq_context->byte_8_limit_wl,
5918 SRQC_BYTE_8_SRQ_LIMIT_WL_M,
5919 SRQC_BYTE_8_SRQ_LIMIT_WL_S, srq_attr->srq_limit);
5920 roce_set_field(srqc_mask->byte_8_limit_wl,
5921 SRQC_BYTE_8_SRQ_LIMIT_WL_M,
5922 SRQC_BYTE_8_SRQ_LIMIT_WL_S, 0);
5923
5924 ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, srq->srqn, 0,
5925 HNS_ROCE_CMD_MODIFY_SRQC,
5926 HNS_ROCE_CMD_TIMEOUT_MSECS);
5927 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
5928 if (ret) {
5929 dev_err(hr_dev->dev,
5930 "MODIFY SRQ Failed to cmd mailbox.\n");
5931 return ret;
5932 }
5933 }
5934
5935 return 0;
5936}
5937
5938static int hns_roce_v2_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
5939{
5940 struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
5941 struct hns_roce_srq *srq = to_hr_srq(ibsrq);
5942 struct hns_roce_srq_context *srq_context;
5943 struct hns_roce_cmd_mailbox *mailbox;
5944 int limit_wl;
5945 int ret;
5946
5947 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
5948 if (IS_ERR(mailbox))
5949 return PTR_ERR(mailbox);
5950
5951 srq_context = mailbox->buf;
5952 ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, srq->srqn, 0,
5953 HNS_ROCE_CMD_QUERY_SRQC,
5954 HNS_ROCE_CMD_TIMEOUT_MSECS);
5955 if (ret) {
5956 dev_err(hr_dev->dev, "QUERY SRQ cmd process error\n");
5957 goto out;
5958 }
5959
5960 limit_wl = roce_get_field(srq_context->byte_8_limit_wl,
5961 SRQC_BYTE_8_SRQ_LIMIT_WL_M,
5962 SRQC_BYTE_8_SRQ_LIMIT_WL_S);
5963
5964 attr->srq_limit = limit_wl;
5965 attr->max_wr = srq->max - 1;
5966 attr->max_sge = srq->max_gs;
5967
5968 memcpy(srq_context, mailbox->buf, sizeof(*srq_context));
5969
5970out:
5971 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
5972 return ret;
5973}
5974
5975static int find_empty_entry(struct hns_roce_idx_que *idx_que)
5976{
5977 int bit_num;
5978 int i;
5979
5980
5981 for (i = 0; idx_que->bitmap[i] == 0; ++i)
5982 ;
5983 bit_num = ffs(idx_que->bitmap[i]);
5984 idx_que->bitmap[i] &= ~(1ULL << (bit_num - 1));
5985
5986 return i * sizeof(u64) * 8 + (bit_num - 1);
5987}
5988
5989static void fill_idx_queue(struct hns_roce_idx_que *idx_que,
5990 int cur_idx, int wqe_idx)
5991{
5992 unsigned int *addr;
5993
5994 addr = (unsigned int *)hns_roce_buf_offset(&idx_que->idx_buf,
5995 cur_idx * idx_que->entry_sz);
5996 *addr = wqe_idx;
5997}
5998
5999static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq,
6000 const struct ib_recv_wr *wr,
6001 const struct ib_recv_wr **bad_wr)
6002{
6003 struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
6004 struct hns_roce_srq *srq = to_hr_srq(ibsrq);
6005 struct hns_roce_v2_wqe_data_seg *dseg;
6006 struct hns_roce_v2_db srq_db;
6007 unsigned long flags;
6008 int ret = 0;
6009 int wqe_idx;
6010 void *wqe;
6011 int nreq;
6012 int ind;
6013 int i;
6014
6015 spin_lock_irqsave(&srq->lock, flags);
6016
6017 ind = srq->head & (srq->max - 1);
6018
6019 for (nreq = 0; wr; ++nreq, wr = wr->next) {
6020 if (unlikely(wr->num_sge > srq->max_gs)) {
6021 ret = -EINVAL;
6022 *bad_wr = wr;
6023 break;
6024 }
6025
6026 if (unlikely(srq->head == srq->tail)) {
6027 ret = -ENOMEM;
6028 *bad_wr = wr;
6029 break;
6030 }
6031
6032 wqe_idx = find_empty_entry(&srq->idx_que);
6033 fill_idx_queue(&srq->idx_que, ind, wqe_idx);
6034 wqe = get_srq_wqe(srq, wqe_idx);
6035 dseg = (struct hns_roce_v2_wqe_data_seg *)wqe;
6036
6037 for (i = 0; i < wr->num_sge; ++i) {
6038 dseg[i].len = cpu_to_le32(wr->sg_list[i].length);
6039 dseg[i].lkey = cpu_to_le32(wr->sg_list[i].lkey);
6040 dseg[i].addr = cpu_to_le64(wr->sg_list[i].addr);
6041 }
6042
6043 if (i < srq->max_gs) {
6044 dseg->len = 0;
6045 dseg->lkey = cpu_to_le32(0x100);
6046 dseg->addr = 0;
6047 }
6048
6049 srq->wrid[wqe_idx] = wr->wr_id;
6050 ind = (ind + 1) & (srq->max - 1);
6051 }
6052
6053 if (likely(nreq)) {
6054 srq->head += nreq;
6055
6056
6057
6058
6059
6060 wmb();
6061
6062 srq_db.byte_4 = HNS_ROCE_V2_SRQ_DB << 24 | srq->srqn;
6063 srq_db.parameter = srq->head;
6064
6065 hns_roce_write64(hr_dev, (__le32 *)&srq_db, srq->db_reg_l);
6066
6067 }
6068
6069 spin_unlock_irqrestore(&srq->lock, flags);
6070
6071 return ret;
6072}
6073
6074static const struct hns_roce_dfx_hw hns_roce_dfx_hw_v2 = {
6075 .query_cqc_info = hns_roce_v2_query_cqc_info,
6076};
6077
6078static const struct ib_device_ops hns_roce_v2_dev_ops = {
6079 .destroy_qp = hns_roce_v2_destroy_qp,
6080 .modify_cq = hns_roce_v2_modify_cq,
6081 .poll_cq = hns_roce_v2_poll_cq,
6082 .post_recv = hns_roce_v2_post_recv,
6083 .post_send = hns_roce_v2_post_send,
6084 .query_qp = hns_roce_v2_query_qp,
6085 .req_notify_cq = hns_roce_v2_req_notify_cq,
6086};
6087
6088static const struct ib_device_ops hns_roce_v2_dev_srq_ops = {
6089 .modify_srq = hns_roce_v2_modify_srq,
6090 .post_srq_recv = hns_roce_v2_post_srq_recv,
6091 .query_srq = hns_roce_v2_query_srq,
6092};
6093
6094static const struct hns_roce_hw hns_roce_hw_v2 = {
6095 .cmq_init = hns_roce_v2_cmq_init,
6096 .cmq_exit = hns_roce_v2_cmq_exit,
6097 .hw_profile = hns_roce_v2_profile,
6098 .hw_init = hns_roce_v2_init,
6099 .hw_exit = hns_roce_v2_exit,
6100 .post_mbox = hns_roce_v2_post_mbox,
6101 .chk_mbox = hns_roce_v2_chk_mbox,
6102 .rst_prc_mbox = hns_roce_v2_rst_process_cmd,
6103 .set_gid = hns_roce_v2_set_gid,
6104 .set_mac = hns_roce_v2_set_mac,
6105 .write_mtpt = hns_roce_v2_write_mtpt,
6106 .rereg_write_mtpt = hns_roce_v2_rereg_write_mtpt,
6107 .frmr_write_mtpt = hns_roce_v2_frmr_write_mtpt,
6108 .mw_write_mtpt = hns_roce_v2_mw_write_mtpt,
6109 .write_cqc = hns_roce_v2_write_cqc,
6110 .set_hem = hns_roce_v2_set_hem,
6111 .clear_hem = hns_roce_v2_clear_hem,
6112 .modify_qp = hns_roce_v2_modify_qp,
6113 .query_qp = hns_roce_v2_query_qp,
6114 .destroy_qp = hns_roce_v2_destroy_qp,
6115 .qp_flow_control_init = hns_roce_v2_qp_flow_control_init,
6116 .modify_cq = hns_roce_v2_modify_cq,
6117 .post_send = hns_roce_v2_post_send,
6118 .post_recv = hns_roce_v2_post_recv,
6119 .req_notify_cq = hns_roce_v2_req_notify_cq,
6120 .poll_cq = hns_roce_v2_poll_cq,
6121 .init_eq = hns_roce_v2_init_eq_table,
6122 .cleanup_eq = hns_roce_v2_cleanup_eq_table,
6123 .write_srqc = hns_roce_v2_write_srqc,
6124 .modify_srq = hns_roce_v2_modify_srq,
6125 .query_srq = hns_roce_v2_query_srq,
6126 .post_srq_recv = hns_roce_v2_post_srq_recv,
6127 .hns_roce_dev_ops = &hns_roce_v2_dev_ops,
6128 .hns_roce_dev_srq_ops = &hns_roce_v2_dev_srq_ops,
6129};
6130
6131static const struct pci_device_id hns_roce_hw_v2_pci_tbl[] = {
6132 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
6133 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
6134 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
6135 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
6136 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
6137
6138 {0, }
6139};
6140
6141MODULE_DEVICE_TABLE(pci, hns_roce_hw_v2_pci_tbl);
6142
6143static int hns_roce_hw_v2_get_cfg(struct hns_roce_dev *hr_dev,
6144 struct hnae3_handle *handle)
6145{
6146 struct hns_roce_v2_priv *priv = hr_dev->priv;
6147 int i;
6148
6149 hr_dev->hw = &hns_roce_hw_v2;
6150 hr_dev->dfx = &hns_roce_dfx_hw_v2;
6151 hr_dev->sdb_offset = ROCEE_DB_SQ_L_0_REG;
6152 hr_dev->odb_offset = hr_dev->sdb_offset;
6153
6154
6155 hr_dev->reg_base = handle->rinfo.roce_io_base;
6156 hr_dev->caps.num_ports = 1;
6157 hr_dev->iboe.netdevs[0] = handle->rinfo.netdev;
6158 hr_dev->iboe.phy_port[0] = 0;
6159
6160 addrconf_addr_eui48((u8 *)&hr_dev->ib_dev.node_guid,
6161 hr_dev->iboe.netdevs[0]->dev_addr);
6162
6163 for (i = 0; i < HNS_ROCE_V2_MAX_IRQ_NUM; i++)
6164 hr_dev->irq[i] = pci_irq_vector(handle->pdev,
6165 i + handle->rinfo.base_vector);
6166
6167
6168 hr_dev->cmd_mod = 1;
6169 hr_dev->loop_idc = 0;
6170
6171 hr_dev->reset_cnt = handle->ae_algo->ops->ae_dev_reset_cnt(handle);
6172 priv->handle = handle;
6173
6174 return 0;
6175}
6176
6177static int __hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
6178{
6179 struct hns_roce_dev *hr_dev;
6180 int ret;
6181
6182 hr_dev = ib_alloc_device(hns_roce_dev, ib_dev);
6183 if (!hr_dev)
6184 return -ENOMEM;
6185
6186 hr_dev->priv = kzalloc(sizeof(struct hns_roce_v2_priv), GFP_KERNEL);
6187 if (!hr_dev->priv) {
6188 ret = -ENOMEM;
6189 goto error_failed_kzalloc;
6190 }
6191
6192 hr_dev->pci_dev = handle->pdev;
6193 hr_dev->dev = &handle->pdev->dev;
6194
6195 ret = hns_roce_hw_v2_get_cfg(hr_dev, handle);
6196 if (ret) {
6197 dev_err(hr_dev->dev, "Get Configuration failed!\n");
6198 goto error_failed_get_cfg;
6199 }
6200
6201 ret = hns_roce_init(hr_dev);
6202 if (ret) {
6203 dev_err(hr_dev->dev, "RoCE Engine init failed!\n");
6204 goto error_failed_get_cfg;
6205 }
6206
6207 handle->priv = hr_dev;
6208
6209 return 0;
6210
6211error_failed_get_cfg:
6212 kfree(hr_dev->priv);
6213
6214error_failed_kzalloc:
6215 ib_dealloc_device(&hr_dev->ib_dev);
6216
6217 return ret;
6218}
6219
6220static void __hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle,
6221 bool reset)
6222{
6223 struct hns_roce_dev *hr_dev = (struct hns_roce_dev *)handle->priv;
6224
6225 if (!hr_dev)
6226 return;
6227
6228 handle->priv = NULL;
6229 hns_roce_exit(hr_dev);
6230 kfree(hr_dev->priv);
6231 ib_dealloc_device(&hr_dev->ib_dev);
6232}
6233
6234static int hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
6235{
6236 const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
6237 const struct pci_device_id *id;
6238 struct device *dev = &handle->pdev->dev;
6239 int ret;
6240
6241 handle->rinfo.instance_state = HNS_ROCE_STATE_INIT;
6242
6243 if (ops->ae_dev_resetting(handle) || ops->get_hw_reset_stat(handle)) {
6244 handle->rinfo.instance_state = HNS_ROCE_STATE_NON_INIT;
6245 goto reset_chk_err;
6246 }
6247
6248 id = pci_match_id(hns_roce_hw_v2_pci_tbl, handle->pdev);
6249 if (!id)
6250 return 0;
6251
6252 ret = __hns_roce_hw_v2_init_instance(handle);
6253 if (ret) {
6254 handle->rinfo.instance_state = HNS_ROCE_STATE_NON_INIT;
6255 dev_err(dev, "RoCE instance init failed! ret = %d\n", ret);
6256 if (ops->ae_dev_resetting(handle) ||
6257 ops->get_hw_reset_stat(handle))
6258 goto reset_chk_err;
6259 else
6260 return ret;
6261 }
6262
6263 handle->rinfo.instance_state = HNS_ROCE_STATE_INITED;
6264
6265
6266 return 0;
6267
6268reset_chk_err:
6269 dev_err(dev, "Device is busy in resetting state.\n"
6270 "please retry later.\n");
6271
6272 return -EBUSY;
6273}
6274
6275static void hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle,
6276 bool reset)
6277{
6278 if (handle->rinfo.instance_state != HNS_ROCE_STATE_INITED)
6279 return;
6280
6281 handle->rinfo.instance_state = HNS_ROCE_STATE_UNINIT;
6282
6283 __hns_roce_hw_v2_uninit_instance(handle, reset);
6284
6285 handle->rinfo.instance_state = HNS_ROCE_STATE_NON_INIT;
6286}
6287static int hns_roce_hw_v2_reset_notify_down(struct hnae3_handle *handle)
6288{
6289 struct hns_roce_dev *hr_dev;
6290 struct ib_event event;
6291
6292 if (handle->rinfo.instance_state != HNS_ROCE_STATE_INITED) {
6293 set_bit(HNS_ROCE_RST_DIRECT_RETURN, &handle->rinfo.state);
6294 return 0;
6295 }
6296
6297 handle->rinfo.reset_state = HNS_ROCE_STATE_RST_DOWN;
6298 clear_bit(HNS_ROCE_RST_DIRECT_RETURN, &handle->rinfo.state);
6299
6300 hr_dev = (struct hns_roce_dev *)handle->priv;
6301 if (!hr_dev)
6302 return 0;
6303
6304 hr_dev->active = false;
6305 hr_dev->dis_db = true;
6306
6307 event.event = IB_EVENT_DEVICE_FATAL;
6308 event.device = &hr_dev->ib_dev;
6309 event.element.port_num = 1;
6310 ib_dispatch_event(&event);
6311
6312 return 0;
6313}
6314
6315static int hns_roce_hw_v2_reset_notify_init(struct hnae3_handle *handle)
6316{
6317 struct device *dev = &handle->pdev->dev;
6318 int ret;
6319
6320 if (test_and_clear_bit(HNS_ROCE_RST_DIRECT_RETURN,
6321 &handle->rinfo.state)) {
6322 handle->rinfo.reset_state = HNS_ROCE_STATE_RST_INITED;
6323 return 0;
6324 }
6325
6326 handle->rinfo.reset_state = HNS_ROCE_STATE_RST_INIT;
6327
6328 dev_info(&handle->pdev->dev, "In reset process RoCE client reinit.\n");
6329 ret = __hns_roce_hw_v2_init_instance(handle);
6330 if (ret) {
6331
6332
6333
6334
6335 handle->priv = NULL;
6336 dev_err(dev, "In reset process RoCE reinit failed %d.\n", ret);
6337 } else {
6338 handle->rinfo.reset_state = HNS_ROCE_STATE_RST_INITED;
6339 dev_info(dev, "Reset done, RoCE client reinit finished.\n");
6340 }
6341
6342 return ret;
6343}
6344
6345static int hns_roce_hw_v2_reset_notify_uninit(struct hnae3_handle *handle)
6346{
6347 if (test_bit(HNS_ROCE_RST_DIRECT_RETURN, &handle->rinfo.state))
6348 return 0;
6349
6350 handle->rinfo.reset_state = HNS_ROCE_STATE_RST_UNINIT;
6351 dev_info(&handle->pdev->dev, "In reset process RoCE client uninit.\n");
6352 msleep(100);
6353 __hns_roce_hw_v2_uninit_instance(handle, false);
6354
6355 return 0;
6356}
6357
6358static int hns_roce_hw_v2_reset_notify(struct hnae3_handle *handle,
6359 enum hnae3_reset_notify_type type)
6360{
6361 int ret = 0;
6362
6363 switch (type) {
6364 case HNAE3_DOWN_CLIENT:
6365 ret = hns_roce_hw_v2_reset_notify_down(handle);
6366 break;
6367 case HNAE3_INIT_CLIENT:
6368 ret = hns_roce_hw_v2_reset_notify_init(handle);
6369 break;
6370 case HNAE3_UNINIT_CLIENT:
6371 ret = hns_roce_hw_v2_reset_notify_uninit(handle);
6372 break;
6373 default:
6374 break;
6375 }
6376
6377 return ret;
6378}
6379
6380static const struct hnae3_client_ops hns_roce_hw_v2_ops = {
6381 .init_instance = hns_roce_hw_v2_init_instance,
6382 .uninit_instance = hns_roce_hw_v2_uninit_instance,
6383 .reset_notify = hns_roce_hw_v2_reset_notify,
6384};
6385
6386static struct hnae3_client hns_roce_hw_v2_client = {
6387 .name = "hns_roce_hw_v2",
6388 .type = HNAE3_CLIENT_ROCE,
6389 .ops = &hns_roce_hw_v2_ops,
6390};
6391
6392static int __init hns_roce_hw_v2_init(void)
6393{
6394 return hnae3_register_client(&hns_roce_hw_v2_client);
6395}
6396
6397static void __exit hns_roce_hw_v2_exit(void)
6398{
6399 hnae3_unregister_client(&hns_roce_hw_v2_client);
6400}
6401
6402module_init(hns_roce_hw_v2_init);
6403module_exit(hns_roce_hw_v2_exit);
6404
6405MODULE_LICENSE("Dual BSD/GPL");
6406MODULE_AUTHOR("Wei Hu <xavier.huwei@huawei.com>");
6407MODULE_AUTHOR("Lijun Ou <oulijun@huawei.com>");
6408MODULE_AUTHOR("Shaobo Xu <xushaobo2@huawei.com>");
6409MODULE_DESCRIPTION("Hisilicon Hip08 Family RoCE Driver");
6410