1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#include <linux/acpi.h>
34#include <linux/etherdevice.h>
35#include <linux/interrupt.h>
36#include <linux/kernel.h>
37#include <linux/types.h>
38#include <net/addrconf.h>
39#include <rdma/ib_addr.h>
40#include <rdma/ib_cache.h>
41#include <rdma/ib_umem.h>
42#include <rdma/uverbs_ioctl.h>
43
44#include "hnae3.h"
45#include "hns_roce_common.h"
46#include "hns_roce_device.h"
47#include "hns_roce_cmd.h"
48#include "hns_roce_hem.h"
49#include "hns_roce_hw_v2.h"
50
51static void set_data_seg_v2(struct hns_roce_v2_wqe_data_seg *dseg,
52 struct ib_sge *sg)
53{
54 dseg->lkey = cpu_to_le32(sg->lkey);
55 dseg->addr = cpu_to_le64(sg->addr);
56 dseg->len = cpu_to_le32(sg->length);
57}
58
59
60
61
62
63
64
65
66#define HR_OPC_MAP(ib_key, hr_key) \
67 [IB_WR_ ## ib_key] = 1 + HNS_ROCE_V2_WQE_OP_ ## hr_key
68
69static const u32 hns_roce_op_code[] = {
70 HR_OPC_MAP(RDMA_WRITE, RDMA_WRITE),
71 HR_OPC_MAP(RDMA_WRITE_WITH_IMM, RDMA_WRITE_WITH_IMM),
72 HR_OPC_MAP(SEND, SEND),
73 HR_OPC_MAP(SEND_WITH_IMM, SEND_WITH_IMM),
74 HR_OPC_MAP(RDMA_READ, RDMA_READ),
75 HR_OPC_MAP(ATOMIC_CMP_AND_SWP, ATOM_CMP_AND_SWAP),
76 HR_OPC_MAP(ATOMIC_FETCH_AND_ADD, ATOM_FETCH_AND_ADD),
77 HR_OPC_MAP(SEND_WITH_INV, SEND_WITH_INV),
78 HR_OPC_MAP(LOCAL_INV, LOCAL_INV),
79 HR_OPC_MAP(MASKED_ATOMIC_CMP_AND_SWP, ATOM_MSK_CMP_AND_SWAP),
80 HR_OPC_MAP(MASKED_ATOMIC_FETCH_AND_ADD, ATOM_MSK_FETCH_AND_ADD),
81 HR_OPC_MAP(REG_MR, FAST_REG_PMR),
82};
83
84static u32 to_hr_opcode(u32 ib_opcode)
85{
86 if (ib_opcode >= ARRAY_SIZE(hns_roce_op_code))
87 return HNS_ROCE_V2_WQE_OP_MASK;
88
89 return hns_roce_op_code[ib_opcode] ? hns_roce_op_code[ib_opcode] - 1 :
90 HNS_ROCE_V2_WQE_OP_MASK;
91}
92
93static void set_frmr_seg(struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
94 const struct ib_reg_wr *wr)
95{
96 struct hns_roce_wqe_frmr_seg *fseg =
97 (void *)rc_sq_wqe + sizeof(struct hns_roce_v2_rc_send_wqe);
98 struct hns_roce_mr *mr = to_hr_mr(wr->mr);
99 u64 pbl_ba;
100
101
102 roce_set_bit(rc_sq_wqe->byte_4, V2_RC_FRMR_WQE_BYTE_4_BIND_EN_S,
103 wr->access & IB_ACCESS_MW_BIND ? 1 : 0);
104 roce_set_bit(rc_sq_wqe->byte_4, V2_RC_FRMR_WQE_BYTE_4_ATOMIC_S,
105 wr->access & IB_ACCESS_REMOTE_ATOMIC ? 1 : 0);
106 roce_set_bit(rc_sq_wqe->byte_4, V2_RC_FRMR_WQE_BYTE_4_RR_S,
107 wr->access & IB_ACCESS_REMOTE_READ ? 1 : 0);
108 roce_set_bit(rc_sq_wqe->byte_4, V2_RC_FRMR_WQE_BYTE_4_RW_S,
109 wr->access & IB_ACCESS_REMOTE_WRITE ? 1 : 0);
110 roce_set_bit(rc_sq_wqe->byte_4, V2_RC_FRMR_WQE_BYTE_4_LW_S,
111 wr->access & IB_ACCESS_LOCAL_WRITE ? 1 : 0);
112
113
114 pbl_ba = mr->pbl_mtr.hem_cfg.root_ba;
115 rc_sq_wqe->msg_len = cpu_to_le32(lower_32_bits(pbl_ba));
116 rc_sq_wqe->inv_key = cpu_to_le32(upper_32_bits(pbl_ba));
117
118 rc_sq_wqe->byte_16 = cpu_to_le32(wr->mr->length & 0xffffffff);
119 rc_sq_wqe->byte_20 = cpu_to_le32(wr->mr->length >> 32);
120 rc_sq_wqe->rkey = cpu_to_le32(wr->key);
121 rc_sq_wqe->va = cpu_to_le64(wr->mr->iova);
122
123 fseg->pbl_size = cpu_to_le32(mr->npages);
124 roce_set_field(fseg->mode_buf_pg_sz,
125 V2_RC_FRMR_WQE_BYTE_40_PBL_BUF_PG_SZ_M,
126 V2_RC_FRMR_WQE_BYTE_40_PBL_BUF_PG_SZ_S,
127 to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.buf_pg_shift));
128 roce_set_bit(fseg->mode_buf_pg_sz,
129 V2_RC_FRMR_WQE_BYTE_40_BLK_MODE_S, 0);
130}
131
132static void set_atomic_seg(const struct ib_send_wr *wr,
133 struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
134 unsigned int valid_num_sge)
135{
136 struct hns_roce_v2_wqe_data_seg *dseg =
137 (void *)rc_sq_wqe + sizeof(struct hns_roce_v2_rc_send_wqe);
138 struct hns_roce_wqe_atomic_seg *aseg =
139 (void *)dseg + sizeof(struct hns_roce_v2_wqe_data_seg);
140
141 set_data_seg_v2(dseg, wr->sg_list);
142
143 if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
144 aseg->fetchadd_swap_data = cpu_to_le64(atomic_wr(wr)->swap);
145 aseg->cmp_data = cpu_to_le64(atomic_wr(wr)->compare_add);
146 } else {
147 aseg->fetchadd_swap_data =
148 cpu_to_le64(atomic_wr(wr)->compare_add);
149 aseg->cmp_data = 0;
150 }
151
152 roce_set_field(rc_sq_wqe->byte_16, V2_RC_SEND_WQE_BYTE_16_SGE_NUM_M,
153 V2_RC_SEND_WQE_BYTE_16_SGE_NUM_S, valid_num_sge);
154}
155
156static int fill_ext_sge_inl_data(struct hns_roce_qp *qp,
157 const struct ib_send_wr *wr,
158 unsigned int *sge_idx, u32 msg_len)
159{
160 struct ib_device *ibdev = &(to_hr_dev(qp->ibqp.device))->ib_dev;
161 unsigned int dseg_len = sizeof(struct hns_roce_v2_wqe_data_seg);
162 unsigned int ext_sge_sz = qp->sq.max_gs * dseg_len;
163 unsigned int left_len_in_pg;
164 unsigned int idx = *sge_idx;
165 unsigned int i = 0;
166 unsigned int len;
167 void *addr;
168 void *dseg;
169
170 if (msg_len > ext_sge_sz) {
171 ibdev_err(ibdev,
172 "no enough extended sge space for inline data.\n");
173 return -EINVAL;
174 }
175
176 dseg = hns_roce_get_extend_sge(qp, idx & (qp->sge.sge_cnt - 1));
177 left_len_in_pg = hr_hw_page_align((uintptr_t)dseg) - (uintptr_t)dseg;
178 len = wr->sg_list[0].length;
179 addr = (void *)(unsigned long)(wr->sg_list[0].addr);
180
181
182
183
184
185
186 while (1) {
187 if (len <= left_len_in_pg) {
188 memcpy(dseg, addr, len);
189
190 idx += len / dseg_len;
191
192 i++;
193 if (i >= wr->num_sge)
194 break;
195
196 left_len_in_pg -= len;
197 len = wr->sg_list[i].length;
198 addr = (void *)(unsigned long)(wr->sg_list[i].addr);
199 dseg += len;
200 } else {
201 memcpy(dseg, addr, left_len_in_pg);
202
203 len -= left_len_in_pg;
204 addr += left_len_in_pg;
205 idx += left_len_in_pg / dseg_len;
206 dseg = hns_roce_get_extend_sge(qp,
207 idx & (qp->sge.sge_cnt - 1));
208 left_len_in_pg = 1 << HNS_HW_PAGE_SHIFT;
209 }
210 }
211
212 *sge_idx = idx;
213
214 return 0;
215}
216
217static void set_extend_sge(struct hns_roce_qp *qp, struct ib_sge *sge,
218 unsigned int *sge_ind, unsigned int cnt)
219{
220 struct hns_roce_v2_wqe_data_seg *dseg;
221 unsigned int idx = *sge_ind;
222
223 while (cnt > 0) {
224 dseg = hns_roce_get_extend_sge(qp, idx & (qp->sge.sge_cnt - 1));
225 if (likely(sge->length)) {
226 set_data_seg_v2(dseg, sge);
227 idx++;
228 cnt--;
229 }
230 sge++;
231 }
232
233 *sge_ind = idx;
234}
235
236static bool check_inl_data_len(struct hns_roce_qp *qp, unsigned int len)
237{
238 struct hns_roce_dev *hr_dev = to_hr_dev(qp->ibqp.device);
239 int mtu = ib_mtu_enum_to_int(qp->path_mtu);
240
241 if (len > qp->max_inline_data || len > mtu) {
242 ibdev_err(&hr_dev->ib_dev,
243 "invalid length of data, data len = %u, max inline len = %u, path mtu = %d.\n",
244 len, qp->max_inline_data, mtu);
245 return false;
246 }
247
248 return true;
249}
250
251static int set_rc_inl(struct hns_roce_qp *qp, const struct ib_send_wr *wr,
252 struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
253 unsigned int *sge_idx)
254{
255 struct hns_roce_dev *hr_dev = to_hr_dev(qp->ibqp.device);
256 u32 msg_len = le32_to_cpu(rc_sq_wqe->msg_len);
257 struct ib_device *ibdev = &hr_dev->ib_dev;
258 unsigned int curr_idx = *sge_idx;
259 void *dseg = rc_sq_wqe;
260 unsigned int i;
261 int ret;
262
263 if (unlikely(wr->opcode == IB_WR_RDMA_READ)) {
264 ibdev_err(ibdev, "invalid inline parameters!\n");
265 return -EINVAL;
266 }
267
268 if (!check_inl_data_len(qp, msg_len))
269 return -EINVAL;
270
271 dseg += sizeof(struct hns_roce_v2_rc_send_wqe);
272
273 roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_INLINE_S, 1);
274
275 if (msg_len <= HNS_ROCE_V2_MAX_RC_INL_INN_SZ) {
276 roce_set_bit(rc_sq_wqe->byte_20,
277 V2_RC_SEND_WQE_BYTE_20_INL_TYPE_S, 0);
278
279 for (i = 0; i < wr->num_sge; i++) {
280 memcpy(dseg, ((void *)wr->sg_list[i].addr),
281 wr->sg_list[i].length);
282 dseg += wr->sg_list[i].length;
283 }
284 } else {
285 roce_set_bit(rc_sq_wqe->byte_20,
286 V2_RC_SEND_WQE_BYTE_20_INL_TYPE_S, 1);
287
288 ret = fill_ext_sge_inl_data(qp, wr, &curr_idx, msg_len);
289 if (ret)
290 return ret;
291
292 roce_set_field(rc_sq_wqe->byte_16,
293 V2_RC_SEND_WQE_BYTE_16_SGE_NUM_M,
294 V2_RC_SEND_WQE_BYTE_16_SGE_NUM_S,
295 curr_idx - *sge_idx);
296 }
297
298 *sge_idx = curr_idx;
299
300 return 0;
301}
302
303static int set_rwqe_data_seg(struct ib_qp *ibqp, const struct ib_send_wr *wr,
304 struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
305 unsigned int *sge_ind,
306 unsigned int valid_num_sge)
307{
308 struct hns_roce_v2_wqe_data_seg *dseg =
309 (void *)rc_sq_wqe + sizeof(struct hns_roce_v2_rc_send_wqe);
310 struct hns_roce_qp *qp = to_hr_qp(ibqp);
311 int j = 0;
312 int i;
313
314 roce_set_field(rc_sq_wqe->byte_20,
315 V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M,
316 V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S,
317 (*sge_ind) & (qp->sge.sge_cnt - 1));
318
319 if (wr->send_flags & IB_SEND_INLINE)
320 return set_rc_inl(qp, wr, rc_sq_wqe, sge_ind);
321
322 if (valid_num_sge <= HNS_ROCE_SGE_IN_WQE) {
323 for (i = 0; i < wr->num_sge; i++) {
324 if (likely(wr->sg_list[i].length)) {
325 set_data_seg_v2(dseg, wr->sg_list + i);
326 dseg++;
327 }
328 }
329 } else {
330 for (i = 0; i < wr->num_sge && j < HNS_ROCE_SGE_IN_WQE; i++) {
331 if (likely(wr->sg_list[i].length)) {
332 set_data_seg_v2(dseg, wr->sg_list + i);
333 dseg++;
334 j++;
335 }
336 }
337
338 set_extend_sge(qp, wr->sg_list + i, sge_ind,
339 valid_num_sge - HNS_ROCE_SGE_IN_WQE);
340 }
341
342 roce_set_field(rc_sq_wqe->byte_16,
343 V2_RC_SEND_WQE_BYTE_16_SGE_NUM_M,
344 V2_RC_SEND_WQE_BYTE_16_SGE_NUM_S, valid_num_sge);
345
346 return 0;
347}
348
349static int check_send_valid(struct hns_roce_dev *hr_dev,
350 struct hns_roce_qp *hr_qp)
351{
352 struct ib_device *ibdev = &hr_dev->ib_dev;
353 struct ib_qp *ibqp = &hr_qp->ibqp;
354
355 if (unlikely(ibqp->qp_type != IB_QPT_RC &&
356 ibqp->qp_type != IB_QPT_GSI &&
357 ibqp->qp_type != IB_QPT_UD)) {
358 ibdev_err(ibdev, "Not supported QP(0x%x)type!\n",
359 ibqp->qp_type);
360 return -EOPNOTSUPP;
361 } else if (unlikely(hr_qp->state == IB_QPS_RESET ||
362 hr_qp->state == IB_QPS_INIT ||
363 hr_qp->state == IB_QPS_RTR)) {
364 ibdev_err(ibdev, "failed to post WQE, QP state %hhu!\n",
365 hr_qp->state);
366 return -EINVAL;
367 } else if (unlikely(hr_dev->state >= HNS_ROCE_DEVICE_STATE_RST_DOWN)) {
368 ibdev_err(ibdev, "failed to post WQE, dev state %d!\n",
369 hr_dev->state);
370 return -EIO;
371 }
372
373 return 0;
374}
375
376static unsigned int calc_wr_sge_num(const struct ib_send_wr *wr,
377 unsigned int *sge_len)
378{
379 unsigned int valid_num = 0;
380 unsigned int len = 0;
381 int i;
382
383 for (i = 0; i < wr->num_sge; i++) {
384 if (likely(wr->sg_list[i].length)) {
385 len += wr->sg_list[i].length;
386 valid_num++;
387 }
388 }
389
390 *sge_len = len;
391 return valid_num;
392}
393
394static __le32 get_immtdata(const struct ib_send_wr *wr)
395{
396 switch (wr->opcode) {
397 case IB_WR_SEND_WITH_IMM:
398 case IB_WR_RDMA_WRITE_WITH_IMM:
399 return cpu_to_le32(be32_to_cpu(wr->ex.imm_data));
400 default:
401 return 0;
402 }
403}
404
405static int set_ud_opcode(struct hns_roce_v2_ud_send_wqe *ud_sq_wqe,
406 const struct ib_send_wr *wr)
407{
408 u32 ib_op = wr->opcode;
409
410 if (ib_op != IB_WR_SEND && ib_op != IB_WR_SEND_WITH_IMM)
411 return -EINVAL;
412
413 ud_sq_wqe->immtdata = get_immtdata(wr);
414
415 roce_set_field(ud_sq_wqe->byte_4, V2_UD_SEND_WQE_BYTE_4_OPCODE_M,
416 V2_UD_SEND_WQE_BYTE_4_OPCODE_S, to_hr_opcode(ib_op));
417
418 return 0;
419}
420
421static int fill_ud_av(struct hns_roce_v2_ud_send_wqe *ud_sq_wqe,
422 struct hns_roce_ah *ah)
423{
424 struct ib_device *ib_dev = ah->ibah.device;
425 struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
426
427 roce_set_field(ud_sq_wqe->byte_24, V2_UD_SEND_WQE_BYTE_24_UDPSPN_M,
428 V2_UD_SEND_WQE_BYTE_24_UDPSPN_S, ah->av.udp_sport);
429
430 roce_set_field(ud_sq_wqe->byte_36, V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_M,
431 V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_S, ah->av.hop_limit);
432 roce_set_field(ud_sq_wqe->byte_36, V2_UD_SEND_WQE_BYTE_36_TCLASS_M,
433 V2_UD_SEND_WQE_BYTE_36_TCLASS_S, ah->av.tclass);
434 roce_set_field(ud_sq_wqe->byte_40, V2_UD_SEND_WQE_BYTE_40_FLOW_LABEL_M,
435 V2_UD_SEND_WQE_BYTE_40_FLOW_LABEL_S, ah->av.flowlabel);
436
437 if (WARN_ON(ah->av.sl > MAX_SERVICE_LEVEL))
438 return -EINVAL;
439
440 roce_set_field(ud_sq_wqe->byte_40, V2_UD_SEND_WQE_BYTE_40_SL_M,
441 V2_UD_SEND_WQE_BYTE_40_SL_S, ah->av.sl);
442
443 ud_sq_wqe->sgid_index = ah->av.gid_index;
444
445 memcpy(ud_sq_wqe->dmac, ah->av.mac, ETH_ALEN);
446 memcpy(ud_sq_wqe->dgid, ah->av.dgid, GID_LEN_V2);
447
448 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
449 return 0;
450
451 roce_set_bit(ud_sq_wqe->byte_40, V2_UD_SEND_WQE_BYTE_40_UD_VLAN_EN_S,
452 ah->av.vlan_en);
453 roce_set_field(ud_sq_wqe->byte_36, V2_UD_SEND_WQE_BYTE_36_VLAN_M,
454 V2_UD_SEND_WQE_BYTE_36_VLAN_S, ah->av.vlan_id);
455
456 return 0;
457}
458
459static inline int set_ud_wqe(struct hns_roce_qp *qp,
460 const struct ib_send_wr *wr,
461 void *wqe, unsigned int *sge_idx,
462 unsigned int owner_bit)
463{
464 struct hns_roce_ah *ah = to_hr_ah(ud_wr(wr)->ah);
465 struct hns_roce_v2_ud_send_wqe *ud_sq_wqe = wqe;
466 unsigned int curr_idx = *sge_idx;
467 unsigned int valid_num_sge;
468 u32 msg_len = 0;
469 int ret;
470
471 valid_num_sge = calc_wr_sge_num(wr, &msg_len);
472 memset(ud_sq_wqe, 0, sizeof(*ud_sq_wqe));
473
474 ret = set_ud_opcode(ud_sq_wqe, wr);
475 if (WARN_ON(ret))
476 return ret;
477
478 ud_sq_wqe->msg_len = cpu_to_le32(msg_len);
479
480 roce_set_bit(ud_sq_wqe->byte_4, V2_UD_SEND_WQE_BYTE_4_CQE_S,
481 !!(wr->send_flags & IB_SEND_SIGNALED));
482
483 roce_set_bit(ud_sq_wqe->byte_4, V2_UD_SEND_WQE_BYTE_4_SE_S,
484 !!(wr->send_flags & IB_SEND_SOLICITED));
485
486 roce_set_field(ud_sq_wqe->byte_16, V2_UD_SEND_WQE_BYTE_16_PD_M,
487 V2_UD_SEND_WQE_BYTE_16_PD_S, to_hr_pd(qp->ibqp.pd)->pdn);
488
489 roce_set_field(ud_sq_wqe->byte_16, V2_UD_SEND_WQE_BYTE_16_SGE_NUM_M,
490 V2_UD_SEND_WQE_BYTE_16_SGE_NUM_S, valid_num_sge);
491
492 roce_set_field(ud_sq_wqe->byte_20,
493 V2_UD_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M,
494 V2_UD_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S,
495 curr_idx & (qp->sge.sge_cnt - 1));
496
497 ud_sq_wqe->qkey = cpu_to_le32(ud_wr(wr)->remote_qkey & 0x80000000 ?
498 qp->qkey : ud_wr(wr)->remote_qkey);
499 roce_set_field(ud_sq_wqe->byte_32, V2_UD_SEND_WQE_BYTE_32_DQPN_M,
500 V2_UD_SEND_WQE_BYTE_32_DQPN_S, ud_wr(wr)->remote_qpn);
501
502 ret = fill_ud_av(ud_sq_wqe, ah);
503 if (ret)
504 return ret;
505
506 set_extend_sge(qp, wr->sg_list, &curr_idx, valid_num_sge);
507
508
509
510
511
512
513
514 if (qp->en_flags & HNS_ROCE_QP_CAP_OWNER_DB)
515 dma_wmb();
516
517 *sge_idx = curr_idx;
518 roce_set_bit(ud_sq_wqe->byte_4, V2_UD_SEND_WQE_BYTE_4_OWNER_S,
519 owner_bit);
520
521 return 0;
522}
523
524static int set_rc_opcode(struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
525 const struct ib_send_wr *wr)
526{
527 u32 ib_op = wr->opcode;
528
529 rc_sq_wqe->immtdata = get_immtdata(wr);
530
531 switch (ib_op) {
532 case IB_WR_RDMA_READ:
533 case IB_WR_RDMA_WRITE:
534 case IB_WR_RDMA_WRITE_WITH_IMM:
535 rc_sq_wqe->rkey = cpu_to_le32(rdma_wr(wr)->rkey);
536 rc_sq_wqe->va = cpu_to_le64(rdma_wr(wr)->remote_addr);
537 break;
538 case IB_WR_SEND:
539 case IB_WR_SEND_WITH_IMM:
540 break;
541 case IB_WR_ATOMIC_CMP_AND_SWP:
542 case IB_WR_ATOMIC_FETCH_AND_ADD:
543 rc_sq_wqe->rkey = cpu_to_le32(atomic_wr(wr)->rkey);
544 rc_sq_wqe->va = cpu_to_le64(atomic_wr(wr)->remote_addr);
545 break;
546 case IB_WR_REG_MR:
547 set_frmr_seg(rc_sq_wqe, reg_wr(wr));
548 break;
549 case IB_WR_LOCAL_INV:
550 roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_SO_S, 1);
551 fallthrough;
552 case IB_WR_SEND_WITH_INV:
553 rc_sq_wqe->inv_key = cpu_to_le32(wr->ex.invalidate_rkey);
554 break;
555 default:
556 return -EINVAL;
557 }
558
559 roce_set_field(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
560 V2_RC_SEND_WQE_BYTE_4_OPCODE_S, to_hr_opcode(ib_op));
561
562 return 0;
563}
564static inline int set_rc_wqe(struct hns_roce_qp *qp,
565 const struct ib_send_wr *wr,
566 void *wqe, unsigned int *sge_idx,
567 unsigned int owner_bit)
568{
569 struct hns_roce_v2_rc_send_wqe *rc_sq_wqe = wqe;
570 unsigned int curr_idx = *sge_idx;
571 unsigned int valid_num_sge;
572 u32 msg_len = 0;
573 int ret;
574
575 valid_num_sge = calc_wr_sge_num(wr, &msg_len);
576 memset(rc_sq_wqe, 0, sizeof(*rc_sq_wqe));
577
578 rc_sq_wqe->msg_len = cpu_to_le32(msg_len);
579
580 ret = set_rc_opcode(rc_sq_wqe, wr);
581 if (WARN_ON(ret))
582 return ret;
583
584 roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_FENCE_S,
585 (wr->send_flags & IB_SEND_FENCE) ? 1 : 0);
586
587 roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_SE_S,
588 (wr->send_flags & IB_SEND_SOLICITED) ? 1 : 0);
589
590 roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_CQE_S,
591 (wr->send_flags & IB_SEND_SIGNALED) ? 1 : 0);
592
593 if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
594 wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD)
595 set_atomic_seg(wr, rc_sq_wqe, valid_num_sge);
596 else if (wr->opcode != IB_WR_REG_MR)
597 ret = set_rwqe_data_seg(&qp->ibqp, wr, rc_sq_wqe,
598 &curr_idx, valid_num_sge);
599
600
601
602
603
604
605
606 if (qp->en_flags & HNS_ROCE_QP_CAP_OWNER_DB)
607 dma_wmb();
608
609 *sge_idx = curr_idx;
610 roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_OWNER_S,
611 owner_bit);
612
613 return ret;
614}
615
616static inline void update_sq_db(struct hns_roce_dev *hr_dev,
617 struct hns_roce_qp *qp)
618{
619
620
621
622
623
624
625
626
627
628 if (qp->state == IB_QPS_ERR) {
629 if (!test_and_set_bit(HNS_ROCE_FLUSH_FLAG, &qp->flush_flag))
630 init_flush_work(hr_dev, qp);
631 } else {
632 struct hns_roce_v2_db sq_db = {};
633
634 roce_set_field(sq_db.byte_4, V2_DB_BYTE_4_TAG_M,
635 V2_DB_BYTE_4_TAG_S, qp->doorbell_qpn);
636 roce_set_field(sq_db.byte_4, V2_DB_BYTE_4_CMD_M,
637 V2_DB_BYTE_4_CMD_S, HNS_ROCE_V2_SQ_DB);
638 roce_set_field(sq_db.parameter, V2_DB_PARAMETER_IDX_M,
639 V2_DB_PARAMETER_IDX_S, qp->sq.head);
640 roce_set_field(sq_db.parameter, V2_DB_PARAMETER_SL_M,
641 V2_DB_PARAMETER_SL_S, qp->sl);
642
643 hns_roce_write64(hr_dev, (__le32 *)&sq_db, qp->sq.db_reg_l);
644 }
645}
646
647static int hns_roce_v2_post_send(struct ib_qp *ibqp,
648 const struct ib_send_wr *wr,
649 const struct ib_send_wr **bad_wr)
650{
651 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
652 struct ib_device *ibdev = &hr_dev->ib_dev;
653 struct hns_roce_qp *qp = to_hr_qp(ibqp);
654 unsigned long flags = 0;
655 unsigned int owner_bit;
656 unsigned int sge_idx;
657 unsigned int wqe_idx;
658 void *wqe = NULL;
659 u32 nreq;
660 int ret;
661
662 spin_lock_irqsave(&qp->sq.lock, flags);
663
664 ret = check_send_valid(hr_dev, qp);
665 if (unlikely(ret)) {
666 *bad_wr = wr;
667 nreq = 0;
668 goto out;
669 }
670
671 sge_idx = qp->next_sge;
672
673 for (nreq = 0; wr; ++nreq, wr = wr->next) {
674 if (hns_roce_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
675 ret = -ENOMEM;
676 *bad_wr = wr;
677 goto out;
678 }
679
680 wqe_idx = (qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1);
681
682 if (unlikely(wr->num_sge > qp->sq.max_gs)) {
683 ibdev_err(ibdev, "num_sge = %d > qp->sq.max_gs = %u.\n",
684 wr->num_sge, qp->sq.max_gs);
685 ret = -EINVAL;
686 *bad_wr = wr;
687 goto out;
688 }
689
690 wqe = hns_roce_get_send_wqe(qp, wqe_idx);
691 qp->sq.wrid[wqe_idx] = wr->wr_id;
692 owner_bit =
693 ~(((qp->sq.head + nreq) >> ilog2(qp->sq.wqe_cnt)) & 0x1);
694
695
696 if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_UD)
697 ret = set_ud_wqe(qp, wr, wqe, &sge_idx, owner_bit);
698 else if (ibqp->qp_type == IB_QPT_RC)
699 ret = set_rc_wqe(qp, wr, wqe, &sge_idx, owner_bit);
700
701 if (unlikely(ret)) {
702 *bad_wr = wr;
703 goto out;
704 }
705 }
706
707out:
708 if (likely(nreq)) {
709 qp->sq.head += nreq;
710 qp->next_sge = sge_idx;
711
712 wmb();
713 update_sq_db(hr_dev, qp);
714 }
715
716 spin_unlock_irqrestore(&qp->sq.lock, flags);
717
718 return ret;
719}
720
721static int check_recv_valid(struct hns_roce_dev *hr_dev,
722 struct hns_roce_qp *hr_qp)
723{
724 if (unlikely(hr_dev->state >= HNS_ROCE_DEVICE_STATE_RST_DOWN))
725 return -EIO;
726 else if (hr_qp->state == IB_QPS_RESET)
727 return -EINVAL;
728
729 return 0;
730}
731
732static int hns_roce_v2_post_recv(struct ib_qp *ibqp,
733 const struct ib_recv_wr *wr,
734 const struct ib_recv_wr **bad_wr)
735{
736 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
737 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
738 struct ib_device *ibdev = &hr_dev->ib_dev;
739 struct hns_roce_v2_wqe_data_seg *dseg;
740 struct hns_roce_rinl_sge *sge_list;
741 unsigned long flags;
742 void *wqe = NULL;
743 u32 wqe_idx;
744 int nreq;
745 int ret;
746 int i;
747
748 spin_lock_irqsave(&hr_qp->rq.lock, flags);
749
750 ret = check_recv_valid(hr_dev, hr_qp);
751 if (unlikely(ret)) {
752 *bad_wr = wr;
753 nreq = 0;
754 goto out;
755 }
756
757 for (nreq = 0; wr; ++nreq, wr = wr->next) {
758 if (unlikely(hns_roce_wq_overflow(&hr_qp->rq, nreq,
759 hr_qp->ibqp.recv_cq))) {
760 ret = -ENOMEM;
761 *bad_wr = wr;
762 goto out;
763 }
764
765 wqe_idx = (hr_qp->rq.head + nreq) & (hr_qp->rq.wqe_cnt - 1);
766
767 if (unlikely(wr->num_sge > hr_qp->rq.max_gs)) {
768 ibdev_err(ibdev, "num_sge = %d >= max_sge = %u.\n",
769 wr->num_sge, hr_qp->rq.max_gs);
770 ret = -EINVAL;
771 *bad_wr = wr;
772 goto out;
773 }
774
775 wqe = hns_roce_get_recv_wqe(hr_qp, wqe_idx);
776 dseg = (struct hns_roce_v2_wqe_data_seg *)wqe;
777 for (i = 0; i < wr->num_sge; i++) {
778 if (!wr->sg_list[i].length)
779 continue;
780 set_data_seg_v2(dseg, wr->sg_list + i);
781 dseg++;
782 }
783
784 if (wr->num_sge < hr_qp->rq.max_gs) {
785 dseg->lkey = cpu_to_le32(HNS_ROCE_INVALID_LKEY);
786 dseg->addr = 0;
787 }
788
789
790 if (hr_qp->rq_inl_buf.wqe_cnt) {
791 sge_list = hr_qp->rq_inl_buf.wqe_list[wqe_idx].sg_list;
792 hr_qp->rq_inl_buf.wqe_list[wqe_idx].sge_cnt =
793 (u32)wr->num_sge;
794 for (i = 0; i < wr->num_sge; i++) {
795 sge_list[i].addr =
796 (void *)(u64)wr->sg_list[i].addr;
797 sge_list[i].len = wr->sg_list[i].length;
798 }
799 }
800
801 hr_qp->rq.wrid[wqe_idx] = wr->wr_id;
802 }
803
804out:
805 if (likely(nreq)) {
806 hr_qp->rq.head += nreq;
807
808 wmb();
809
810
811
812
813
814
815
816
817
818
819 if (hr_qp->state == IB_QPS_ERR) {
820 if (!test_and_set_bit(HNS_ROCE_FLUSH_FLAG,
821 &hr_qp->flush_flag))
822 init_flush_work(hr_dev, hr_qp);
823 } else {
824 *hr_qp->rdb.db_record = hr_qp->rq.head & 0xffff;
825 }
826 }
827 spin_unlock_irqrestore(&hr_qp->rq.lock, flags);
828
829 return ret;
830}
831
832static void *get_srq_wqe(struct hns_roce_srq *srq, int n)
833{
834 return hns_roce_buf_offset(srq->buf_mtr.kmem, n << srq->wqe_shift);
835}
836
837static void *get_idx_buf(struct hns_roce_idx_que *idx_que, unsigned int n)
838{
839 return hns_roce_buf_offset(idx_que->mtr.kmem,
840 n << idx_que->entry_shift);
841}
842
843static void hns_roce_free_srq_wqe(struct hns_roce_srq *srq, int wqe_index)
844{
845
846 spin_lock(&srq->lock);
847
848 bitmap_clear(srq->idx_que.bitmap, wqe_index, 1);
849 srq->tail++;
850
851 spin_unlock(&srq->lock);
852}
853
854static int find_empty_entry(struct hns_roce_idx_que *idx_que,
855 unsigned long size)
856{
857 int wqe_idx;
858
859 if (unlikely(bitmap_full(idx_que->bitmap, size)))
860 return -ENOSPC;
861
862 wqe_idx = find_first_zero_bit(idx_que->bitmap, size);
863
864 bitmap_set(idx_que->bitmap, wqe_idx, 1);
865
866 return wqe_idx;
867}
868
869static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq,
870 const struct ib_recv_wr *wr,
871 const struct ib_recv_wr **bad_wr)
872{
873 struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
874 struct hns_roce_srq *srq = to_hr_srq(ibsrq);
875 struct hns_roce_v2_wqe_data_seg *dseg;
876 struct hns_roce_v2_db srq_db;
877 unsigned long flags;
878 unsigned int ind;
879 __le32 *srq_idx;
880 int ret = 0;
881 int wqe_idx;
882 void *wqe;
883 int nreq;
884 int i;
885
886 spin_lock_irqsave(&srq->lock, flags);
887
888 ind = srq->head & (srq->wqe_cnt - 1);
889
890 for (nreq = 0; wr; ++nreq, wr = wr->next) {
891 if (unlikely(wr->num_sge >= srq->max_gs)) {
892 ret = -EINVAL;
893 *bad_wr = wr;
894 break;
895 }
896
897 if (unlikely(srq->head == srq->tail)) {
898 ret = -ENOMEM;
899 *bad_wr = wr;
900 break;
901 }
902
903 wqe_idx = find_empty_entry(&srq->idx_que, srq->wqe_cnt);
904 if (unlikely(wqe_idx < 0)) {
905 ret = -ENOMEM;
906 *bad_wr = wr;
907 break;
908 }
909
910 wqe = get_srq_wqe(srq, wqe_idx);
911 dseg = (struct hns_roce_v2_wqe_data_seg *)wqe;
912
913 for (i = 0; i < wr->num_sge; ++i) {
914 dseg[i].len = cpu_to_le32(wr->sg_list[i].length);
915 dseg[i].lkey = cpu_to_le32(wr->sg_list[i].lkey);
916 dseg[i].addr = cpu_to_le64(wr->sg_list[i].addr);
917 }
918
919 if (wr->num_sge < srq->max_gs) {
920 dseg[i].len = 0;
921 dseg[i].lkey = cpu_to_le32(0x100);
922 dseg[i].addr = 0;
923 }
924
925 srq_idx = get_idx_buf(&srq->idx_que, ind);
926 *srq_idx = cpu_to_le32(wqe_idx);
927
928 srq->wrid[wqe_idx] = wr->wr_id;
929 ind = (ind + 1) & (srq->wqe_cnt - 1);
930 }
931
932 if (likely(nreq)) {
933 srq->head += nreq;
934
935
936
937
938
939 wmb();
940
941 srq_db.byte_4 =
942 cpu_to_le32(HNS_ROCE_V2_SRQ_DB << V2_DB_BYTE_4_CMD_S |
943 (srq->srqn & V2_DB_BYTE_4_TAG_M));
944 srq_db.parameter =
945 cpu_to_le32(srq->head & V2_DB_PARAMETER_IDX_M);
946
947 hns_roce_write64(hr_dev, (__le32 *)&srq_db, srq->db_reg_l);
948 }
949
950 spin_unlock_irqrestore(&srq->lock, flags);
951
952 return ret;
953}
954
955static int hns_roce_v2_cmd_hw_reseted(struct hns_roce_dev *hr_dev,
956 unsigned long instance_stage,
957 unsigned long reset_stage)
958{
959
960
961
962
963
964
965
966
967
968 hr_dev->is_reset = true;
969 hr_dev->dis_db = true;
970
971 if (reset_stage == HNS_ROCE_STATE_RST_INIT ||
972 instance_stage == HNS_ROCE_STATE_INIT)
973 return CMD_RST_PRC_EBUSY;
974
975 return CMD_RST_PRC_SUCCESS;
976}
977
978static int hns_roce_v2_cmd_hw_resetting(struct hns_roce_dev *hr_dev,
979 unsigned long instance_stage,
980 unsigned long reset_stage)
981{
982 struct hns_roce_v2_priv *priv = hr_dev->priv;
983 struct hnae3_handle *handle = priv->handle;
984 const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
985
986
987
988
989
990
991
992
993
994
995 hr_dev->dis_db = true;
996 if (!ops->get_hw_reset_stat(handle))
997 hr_dev->is_reset = true;
998
999 if (!hr_dev->is_reset || reset_stage == HNS_ROCE_STATE_RST_INIT ||
1000 instance_stage == HNS_ROCE_STATE_INIT)
1001 return CMD_RST_PRC_EBUSY;
1002
1003 return CMD_RST_PRC_SUCCESS;
1004}
1005
1006static int hns_roce_v2_cmd_sw_resetting(struct hns_roce_dev *hr_dev)
1007{
1008 struct hns_roce_v2_priv *priv = hr_dev->priv;
1009 struct hnae3_handle *handle = priv->handle;
1010 const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
1011
1012
1013
1014
1015
1016 hr_dev->dis_db = true;
1017 if (ops->ae_dev_reset_cnt(handle) != hr_dev->reset_cnt)
1018 hr_dev->is_reset = true;
1019
1020 return CMD_RST_PRC_EBUSY;
1021}
1022
1023static int hns_roce_v2_rst_process_cmd(struct hns_roce_dev *hr_dev)
1024{
1025 struct hns_roce_v2_priv *priv = hr_dev->priv;
1026 struct hnae3_handle *handle = priv->handle;
1027 const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
1028 unsigned long instance_stage;
1029 unsigned long reset_stage;
1030 unsigned long reset_cnt;
1031 bool sw_resetting;
1032 bool hw_resetting;
1033
1034 if (hr_dev->is_reset)
1035 return CMD_RST_PRC_SUCCESS;
1036
1037
1038
1039
1040
1041
1042
1043
1044 instance_stage = handle->rinfo.instance_state;
1045 reset_stage = handle->rinfo.reset_state;
1046 reset_cnt = ops->ae_dev_reset_cnt(handle);
1047 hw_resetting = ops->get_cmdq_stat(handle);
1048 sw_resetting = ops->ae_dev_resetting(handle);
1049
1050 if (reset_cnt != hr_dev->reset_cnt)
1051 return hns_roce_v2_cmd_hw_reseted(hr_dev, instance_stage,
1052 reset_stage);
1053 else if (hw_resetting)
1054 return hns_roce_v2_cmd_hw_resetting(hr_dev, instance_stage,
1055 reset_stage);
1056 else if (sw_resetting && instance_stage == HNS_ROCE_STATE_INIT)
1057 return hns_roce_v2_cmd_sw_resetting(hr_dev);
1058
1059 return 0;
1060}
1061
1062static int hns_roce_cmq_space(struct hns_roce_v2_cmq_ring *ring)
1063{
1064 int ntu = ring->next_to_use;
1065 int ntc = ring->next_to_clean;
1066 int used = (ntu - ntc + ring->desc_num) % ring->desc_num;
1067
1068 return ring->desc_num - used - 1;
1069}
1070
1071static int hns_roce_alloc_cmq_desc(struct hns_roce_dev *hr_dev,
1072 struct hns_roce_v2_cmq_ring *ring)
1073{
1074 int size = ring->desc_num * sizeof(struct hns_roce_cmq_desc);
1075
1076 ring->desc = kzalloc(size, GFP_KERNEL);
1077 if (!ring->desc)
1078 return -ENOMEM;
1079
1080 ring->desc_dma_addr = dma_map_single(hr_dev->dev, ring->desc, size,
1081 DMA_BIDIRECTIONAL);
1082 if (dma_mapping_error(hr_dev->dev, ring->desc_dma_addr)) {
1083 ring->desc_dma_addr = 0;
1084 kfree(ring->desc);
1085 ring->desc = NULL;
1086 return -ENOMEM;
1087 }
1088
1089 return 0;
1090}
1091
1092static void hns_roce_free_cmq_desc(struct hns_roce_dev *hr_dev,
1093 struct hns_roce_v2_cmq_ring *ring)
1094{
1095 dma_unmap_single(hr_dev->dev, ring->desc_dma_addr,
1096 ring->desc_num * sizeof(struct hns_roce_cmq_desc),
1097 DMA_BIDIRECTIONAL);
1098
1099 ring->desc_dma_addr = 0;
1100 kfree(ring->desc);
1101}
1102
1103static int hns_roce_init_cmq_ring(struct hns_roce_dev *hr_dev, bool ring_type)
1104{
1105 struct hns_roce_v2_priv *priv = hr_dev->priv;
1106 struct hns_roce_v2_cmq_ring *ring = (ring_type == TYPE_CSQ) ?
1107 &priv->cmq.csq : &priv->cmq.crq;
1108
1109 ring->flag = ring_type;
1110 ring->next_to_clean = 0;
1111 ring->next_to_use = 0;
1112
1113 return hns_roce_alloc_cmq_desc(hr_dev, ring);
1114}
1115
1116static void hns_roce_cmq_init_regs(struct hns_roce_dev *hr_dev, bool ring_type)
1117{
1118 struct hns_roce_v2_priv *priv = hr_dev->priv;
1119 struct hns_roce_v2_cmq_ring *ring = (ring_type == TYPE_CSQ) ?
1120 &priv->cmq.csq : &priv->cmq.crq;
1121 dma_addr_t dma = ring->desc_dma_addr;
1122
1123 if (ring_type == TYPE_CSQ) {
1124 roce_write(hr_dev, ROCEE_TX_CMQ_BASEADDR_L_REG, (u32)dma);
1125 roce_write(hr_dev, ROCEE_TX_CMQ_BASEADDR_H_REG,
1126 upper_32_bits(dma));
1127 roce_write(hr_dev, ROCEE_TX_CMQ_DEPTH_REG,
1128 (u32)ring->desc_num >> HNS_ROCE_CMQ_DESC_NUM_S);
1129 roce_write(hr_dev, ROCEE_TX_CMQ_HEAD_REG, 0);
1130 roce_write(hr_dev, ROCEE_TX_CMQ_TAIL_REG, 0);
1131 } else {
1132 roce_write(hr_dev, ROCEE_RX_CMQ_BASEADDR_L_REG, (u32)dma);
1133 roce_write(hr_dev, ROCEE_RX_CMQ_BASEADDR_H_REG,
1134 upper_32_bits(dma));
1135 roce_write(hr_dev, ROCEE_RX_CMQ_DEPTH_REG,
1136 (u32)ring->desc_num >> HNS_ROCE_CMQ_DESC_NUM_S);
1137 roce_write(hr_dev, ROCEE_RX_CMQ_HEAD_REG, 0);
1138 roce_write(hr_dev, ROCEE_RX_CMQ_TAIL_REG, 0);
1139 }
1140}
1141
1142static int hns_roce_v2_cmq_init(struct hns_roce_dev *hr_dev)
1143{
1144 struct hns_roce_v2_priv *priv = hr_dev->priv;
1145 int ret;
1146
1147
1148 priv->cmq.csq.desc_num = CMD_CSQ_DESC_NUM;
1149 priv->cmq.crq.desc_num = CMD_CRQ_DESC_NUM;
1150
1151
1152 spin_lock_init(&priv->cmq.csq.lock);
1153 spin_lock_init(&priv->cmq.crq.lock);
1154
1155
1156 priv->cmq.tx_timeout = HNS_ROCE_CMQ_TX_TIMEOUT;
1157
1158
1159 ret = hns_roce_init_cmq_ring(hr_dev, TYPE_CSQ);
1160 if (ret) {
1161 dev_err(hr_dev->dev, "Init CSQ error, ret = %d.\n", ret);
1162 return ret;
1163 }
1164
1165
1166 ret = hns_roce_init_cmq_ring(hr_dev, TYPE_CRQ);
1167 if (ret) {
1168 dev_err(hr_dev->dev, "Init CRQ error, ret = %d.\n", ret);
1169 goto err_crq;
1170 }
1171
1172
1173 hns_roce_cmq_init_regs(hr_dev, TYPE_CSQ);
1174
1175
1176 hns_roce_cmq_init_regs(hr_dev, TYPE_CRQ);
1177
1178 return 0;
1179
1180err_crq:
1181 hns_roce_free_cmq_desc(hr_dev, &priv->cmq.csq);
1182
1183 return ret;
1184}
1185
1186static void hns_roce_v2_cmq_exit(struct hns_roce_dev *hr_dev)
1187{
1188 struct hns_roce_v2_priv *priv = hr_dev->priv;
1189
1190 hns_roce_free_cmq_desc(hr_dev, &priv->cmq.csq);
1191 hns_roce_free_cmq_desc(hr_dev, &priv->cmq.crq);
1192}
1193
1194static void hns_roce_cmq_setup_basic_desc(struct hns_roce_cmq_desc *desc,
1195 enum hns_roce_opcode_type opcode,
1196 bool is_read)
1197{
1198 memset((void *)desc, 0, sizeof(struct hns_roce_cmq_desc));
1199 desc->opcode = cpu_to_le16(opcode);
1200 desc->flag =
1201 cpu_to_le16(HNS_ROCE_CMD_FLAG_NO_INTR | HNS_ROCE_CMD_FLAG_IN);
1202 if (is_read)
1203 desc->flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_WR);
1204 else
1205 desc->flag &= cpu_to_le16(~HNS_ROCE_CMD_FLAG_WR);
1206}
1207
1208static int hns_roce_cmq_csq_done(struct hns_roce_dev *hr_dev)
1209{
1210 u32 head = roce_read(hr_dev, ROCEE_TX_CMQ_HEAD_REG);
1211 struct hns_roce_v2_priv *priv = hr_dev->priv;
1212
1213 return head == priv->cmq.csq.next_to_use;
1214}
1215
1216static int hns_roce_cmq_csq_clean(struct hns_roce_dev *hr_dev)
1217{
1218 struct hns_roce_v2_priv *priv = hr_dev->priv;
1219 struct hns_roce_v2_cmq_ring *csq = &priv->cmq.csq;
1220 struct hns_roce_cmq_desc *desc;
1221 u16 ntc = csq->next_to_clean;
1222 u32 head;
1223 int clean = 0;
1224
1225 desc = &csq->desc[ntc];
1226 head = roce_read(hr_dev, ROCEE_TX_CMQ_HEAD_REG);
1227 while (head != ntc) {
1228 memset(desc, 0, sizeof(*desc));
1229 ntc++;
1230 if (ntc == csq->desc_num)
1231 ntc = 0;
1232 desc = &csq->desc[ntc];
1233 clean++;
1234 }
1235 csq->next_to_clean = ntc;
1236
1237 return clean;
1238}
1239
1240static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
1241 struct hns_roce_cmq_desc *desc, int num)
1242{
1243 struct hns_roce_v2_priv *priv = hr_dev->priv;
1244 struct hns_roce_v2_cmq_ring *csq = &priv->cmq.csq;
1245 struct hns_roce_cmq_desc *desc_to_use;
1246 bool complete = false;
1247 u32 timeout = 0;
1248 int handle = 0;
1249 u16 desc_ret;
1250 int ret = 0;
1251 int ntc;
1252
1253 spin_lock_bh(&csq->lock);
1254
1255 if (num > hns_roce_cmq_space(csq)) {
1256 spin_unlock_bh(&csq->lock);
1257 return -EBUSY;
1258 }
1259
1260
1261
1262
1263
1264 ntc = csq->next_to_use;
1265
1266 while (handle < num) {
1267 desc_to_use = &csq->desc[csq->next_to_use];
1268 *desc_to_use = desc[handle];
1269 dev_dbg(hr_dev->dev, "set cmq desc:\n");
1270 csq->next_to_use++;
1271 if (csq->next_to_use == csq->desc_num)
1272 csq->next_to_use = 0;
1273 handle++;
1274 }
1275
1276
1277 roce_write(hr_dev, ROCEE_TX_CMQ_TAIL_REG, csq->next_to_use);
1278
1279
1280
1281
1282
1283 if (le16_to_cpu(desc->flag) & HNS_ROCE_CMD_FLAG_NO_INTR) {
1284 do {
1285 if (hns_roce_cmq_csq_done(hr_dev))
1286 break;
1287 udelay(1);
1288 timeout++;
1289 } while (timeout < priv->cmq.tx_timeout);
1290 }
1291
1292 if (hns_roce_cmq_csq_done(hr_dev)) {
1293 complete = true;
1294 handle = 0;
1295 while (handle < num) {
1296
1297 desc_to_use = &csq->desc[ntc];
1298 desc[handle] = *desc_to_use;
1299 dev_dbg(hr_dev->dev, "Get cmq desc:\n");
1300 desc_ret = le16_to_cpu(desc[handle].retval);
1301 if (desc_ret == CMD_EXEC_SUCCESS)
1302 ret = 0;
1303 else
1304 ret = -EIO;
1305 priv->cmq.last_status = desc_ret;
1306 ntc++;
1307 handle++;
1308 if (ntc == csq->desc_num)
1309 ntc = 0;
1310 }
1311 }
1312
1313 if (!complete)
1314 ret = -EAGAIN;
1315
1316
1317 handle = hns_roce_cmq_csq_clean(hr_dev);
1318 if (handle != num)
1319 dev_warn(hr_dev->dev, "Cleaned %d, need to clean %d\n",
1320 handle, num);
1321
1322 spin_unlock_bh(&csq->lock);
1323
1324 return ret;
1325}
1326
1327static int hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
1328 struct hns_roce_cmq_desc *desc, int num)
1329{
1330 int retval;
1331 int ret;
1332
1333 ret = hns_roce_v2_rst_process_cmd(hr_dev);
1334 if (ret == CMD_RST_PRC_SUCCESS)
1335 return 0;
1336 if (ret == CMD_RST_PRC_EBUSY)
1337 return -EBUSY;
1338
1339 ret = __hns_roce_cmq_send(hr_dev, desc, num);
1340 if (ret) {
1341 retval = hns_roce_v2_rst_process_cmd(hr_dev);
1342 if (retval == CMD_RST_PRC_SUCCESS)
1343 return 0;
1344 else if (retval == CMD_RST_PRC_EBUSY)
1345 return -EBUSY;
1346 }
1347
1348 return ret;
1349}
1350
1351static int hns_roce_cmq_query_hw_info(struct hns_roce_dev *hr_dev)
1352{
1353 struct hns_roce_query_version *resp;
1354 struct hns_roce_cmq_desc desc;
1355 int ret;
1356
1357 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_HW_VER, true);
1358 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1359 if (ret)
1360 return ret;
1361
1362 resp = (struct hns_roce_query_version *)desc.data;
1363 hr_dev->hw_rev = le16_to_cpu(resp->rocee_hw_version);
1364 hr_dev->vendor_id = hr_dev->pci_dev->vendor;
1365
1366 return 0;
1367}
1368
1369static bool hns_roce_func_clr_chk_rst(struct hns_roce_dev *hr_dev)
1370{
1371 struct hns_roce_v2_priv *priv = hr_dev->priv;
1372 struct hnae3_handle *handle = priv->handle;
1373 const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
1374 unsigned long reset_cnt;
1375 bool sw_resetting;
1376 bool hw_resetting;
1377
1378 reset_cnt = ops->ae_dev_reset_cnt(handle);
1379 hw_resetting = ops->get_hw_reset_stat(handle);
1380 sw_resetting = ops->ae_dev_resetting(handle);
1381
1382 if (reset_cnt != hr_dev->reset_cnt || hw_resetting || sw_resetting)
1383 return true;
1384
1385 return false;
1386}
1387
1388static void hns_roce_func_clr_rst_prc(struct hns_roce_dev *hr_dev, int retval,
1389 int flag)
1390{
1391 struct hns_roce_v2_priv *priv = hr_dev->priv;
1392 struct hnae3_handle *handle = priv->handle;
1393 const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
1394 unsigned long instance_stage;
1395 unsigned long reset_cnt;
1396 unsigned long end;
1397 bool sw_resetting;
1398 bool hw_resetting;
1399
1400 instance_stage = handle->rinfo.instance_state;
1401 reset_cnt = ops->ae_dev_reset_cnt(handle);
1402 hw_resetting = ops->get_hw_reset_stat(handle);
1403 sw_resetting = ops->ae_dev_resetting(handle);
1404
1405 if (reset_cnt != hr_dev->reset_cnt) {
1406 hr_dev->dis_db = true;
1407 hr_dev->is_reset = true;
1408 dev_info(hr_dev->dev, "Func clear success after reset.\n");
1409 } else if (hw_resetting) {
1410 hr_dev->dis_db = true;
1411
1412 dev_warn(hr_dev->dev,
1413 "Func clear is pending, device in resetting state.\n");
1414 end = HNS_ROCE_V2_HW_RST_TIMEOUT;
1415 while (end) {
1416 if (!ops->get_hw_reset_stat(handle)) {
1417 hr_dev->is_reset = true;
1418 dev_info(hr_dev->dev,
1419 "Func clear success after reset.\n");
1420 return;
1421 }
1422 msleep(HNS_ROCE_V2_HW_RST_COMPLETION_WAIT);
1423 end -= HNS_ROCE_V2_HW_RST_COMPLETION_WAIT;
1424 }
1425
1426 dev_warn(hr_dev->dev, "Func clear failed.\n");
1427 } else if (sw_resetting && instance_stage == HNS_ROCE_STATE_INIT) {
1428 hr_dev->dis_db = true;
1429
1430 dev_warn(hr_dev->dev,
1431 "Func clear is pending, device in resetting state.\n");
1432 end = HNS_ROCE_V2_HW_RST_TIMEOUT;
1433 while (end) {
1434 if (ops->ae_dev_reset_cnt(handle) !=
1435 hr_dev->reset_cnt) {
1436 hr_dev->is_reset = true;
1437 dev_info(hr_dev->dev,
1438 "Func clear success after sw reset\n");
1439 return;
1440 }
1441 msleep(HNS_ROCE_V2_HW_RST_COMPLETION_WAIT);
1442 end -= HNS_ROCE_V2_HW_RST_COMPLETION_WAIT;
1443 }
1444
1445 dev_warn(hr_dev->dev, "Func clear failed because of unfinished sw reset\n");
1446 } else {
1447 if (retval && !flag)
1448 dev_warn(hr_dev->dev,
1449 "Func clear read failed, ret = %d.\n", retval);
1450
1451 dev_warn(hr_dev->dev, "Func clear failed.\n");
1452 }
1453}
1454static void hns_roce_function_clear(struct hns_roce_dev *hr_dev)
1455{
1456 bool fclr_write_fail_flag = false;
1457 struct hns_roce_func_clear *resp;
1458 struct hns_roce_cmq_desc desc;
1459 unsigned long end;
1460 int ret = 0;
1461
1462 if (hns_roce_func_clr_chk_rst(hr_dev))
1463 goto out;
1464
1465 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_FUNC_CLEAR, false);
1466 resp = (struct hns_roce_func_clear *)desc.data;
1467
1468 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1469 if (ret) {
1470 fclr_write_fail_flag = true;
1471 dev_err(hr_dev->dev, "Func clear write failed, ret = %d.\n",
1472 ret);
1473 goto out;
1474 }
1475
1476 msleep(HNS_ROCE_V2_READ_FUNC_CLEAR_FLAG_INTERVAL);
1477 end = HNS_ROCE_V2_FUNC_CLEAR_TIMEOUT_MSECS;
1478 while (end) {
1479 if (hns_roce_func_clr_chk_rst(hr_dev))
1480 goto out;
1481 msleep(HNS_ROCE_V2_READ_FUNC_CLEAR_FLAG_FAIL_WAIT);
1482 end -= HNS_ROCE_V2_READ_FUNC_CLEAR_FLAG_FAIL_WAIT;
1483
1484 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_FUNC_CLEAR,
1485 true);
1486
1487 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1488 if (ret)
1489 continue;
1490
1491 if (roce_get_bit(resp->func_done, FUNC_CLEAR_RST_FUN_DONE_S)) {
1492 hr_dev->is_reset = true;
1493 return;
1494 }
1495 }
1496
1497out:
1498 hns_roce_func_clr_rst_prc(hr_dev, ret, fclr_write_fail_flag);
1499}
1500
1501static int hns_roce_query_fw_ver(struct hns_roce_dev *hr_dev)
1502{
1503 struct hns_roce_query_fw_info *resp;
1504 struct hns_roce_cmq_desc desc;
1505 int ret;
1506
1507 hns_roce_cmq_setup_basic_desc(&desc, HNS_QUERY_FW_VER, true);
1508 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1509 if (ret)
1510 return ret;
1511
1512 resp = (struct hns_roce_query_fw_info *)desc.data;
1513 hr_dev->caps.fw_ver = (u64)(le32_to_cpu(resp->fw_ver));
1514
1515 return 0;
1516}
1517
1518static int hns_roce_config_global_param(struct hns_roce_dev *hr_dev)
1519{
1520 struct hns_roce_cfg_global_param *req;
1521 struct hns_roce_cmq_desc desc;
1522
1523 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_GLOBAL_PARAM,
1524 false);
1525
1526 req = (struct hns_roce_cfg_global_param *)desc.data;
1527 memset(req, 0, sizeof(*req));
1528 roce_set_field(req->time_cfg_udp_port,
1529 CFG_GLOBAL_PARAM_DATA_0_ROCEE_TIME_1US_CFG_M,
1530 CFG_GLOBAL_PARAM_DATA_0_ROCEE_TIME_1US_CFG_S, 0x3e8);
1531 roce_set_field(req->time_cfg_udp_port,
1532 CFG_GLOBAL_PARAM_DATA_0_ROCEE_UDP_PORT_M,
1533 CFG_GLOBAL_PARAM_DATA_0_ROCEE_UDP_PORT_S, 0x12b7);
1534
1535 return hns_roce_cmq_send(hr_dev, &desc, 1);
1536}
1537
1538static int hns_roce_query_pf_resource(struct hns_roce_dev *hr_dev)
1539{
1540 struct hns_roce_cmq_desc desc[2];
1541 struct hns_roce_pf_res_a *req_a;
1542 struct hns_roce_pf_res_b *req_b;
1543 int ret;
1544 int i;
1545
1546 for (i = 0; i < 2; i++) {
1547 hns_roce_cmq_setup_basic_desc(&desc[i],
1548 HNS_ROCE_OPC_QUERY_PF_RES, true);
1549
1550 if (i == 0)
1551 desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1552 else
1553 desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1554 }
1555
1556 ret = hns_roce_cmq_send(hr_dev, desc, 2);
1557 if (ret)
1558 return ret;
1559
1560 req_a = (struct hns_roce_pf_res_a *)desc[0].data;
1561 req_b = (struct hns_roce_pf_res_b *)desc[1].data;
1562
1563 hr_dev->caps.qpc_bt_num = roce_get_field(req_a->qpc_bt_idx_num,
1564 PF_RES_DATA_1_PF_QPC_BT_NUM_M,
1565 PF_RES_DATA_1_PF_QPC_BT_NUM_S);
1566 hr_dev->caps.srqc_bt_num = roce_get_field(req_a->srqc_bt_idx_num,
1567 PF_RES_DATA_2_PF_SRQC_BT_NUM_M,
1568 PF_RES_DATA_2_PF_SRQC_BT_NUM_S);
1569 hr_dev->caps.cqc_bt_num = roce_get_field(req_a->cqc_bt_idx_num,
1570 PF_RES_DATA_3_PF_CQC_BT_NUM_M,
1571 PF_RES_DATA_3_PF_CQC_BT_NUM_S);
1572 hr_dev->caps.mpt_bt_num = roce_get_field(req_a->mpt_bt_idx_num,
1573 PF_RES_DATA_4_PF_MPT_BT_NUM_M,
1574 PF_RES_DATA_4_PF_MPT_BT_NUM_S);
1575
1576 hr_dev->caps.sl_num = roce_get_field(req_b->qid_idx_sl_num,
1577 PF_RES_DATA_3_PF_SL_NUM_M,
1578 PF_RES_DATA_3_PF_SL_NUM_S);
1579 hr_dev->caps.sccc_bt_num = roce_get_field(req_b->sccc_bt_idx_num,
1580 PF_RES_DATA_4_PF_SCCC_BT_NUM_M,
1581 PF_RES_DATA_4_PF_SCCC_BT_NUM_S);
1582
1583 hr_dev->caps.gmv_bt_num = roce_get_field(req_b->gmv_idx_num,
1584 PF_RES_DATA_5_PF_GMV_BT_NUM_M,
1585 PF_RES_DATA_5_PF_GMV_BT_NUM_S);
1586
1587 return 0;
1588}
1589
1590static int hns_roce_query_pf_timer_resource(struct hns_roce_dev *hr_dev)
1591{
1592 struct hns_roce_pf_timer_res_a *req_a;
1593 struct hns_roce_cmq_desc desc;
1594 int ret;
1595
1596 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_PF_TIMER_RES,
1597 true);
1598
1599 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1600 if (ret)
1601 return ret;
1602
1603 req_a = (struct hns_roce_pf_timer_res_a *)desc.data;
1604
1605 hr_dev->caps.qpc_timer_bt_num =
1606 roce_get_field(req_a->qpc_timer_bt_idx_num,
1607 PF_RES_DATA_1_PF_QPC_TIMER_BT_NUM_M,
1608 PF_RES_DATA_1_PF_QPC_TIMER_BT_NUM_S);
1609 hr_dev->caps.cqc_timer_bt_num =
1610 roce_get_field(req_a->cqc_timer_bt_idx_num,
1611 PF_RES_DATA_2_PF_CQC_TIMER_BT_NUM_M,
1612 PF_RES_DATA_2_PF_CQC_TIMER_BT_NUM_S);
1613
1614 return 0;
1615}
1616
1617static int hns_roce_set_vf_switch_param(struct hns_roce_dev *hr_dev, int vf_id)
1618{
1619 struct hns_roce_cmq_desc desc;
1620 struct hns_roce_vf_switch *swt;
1621 int ret;
1622
1623 swt = (struct hns_roce_vf_switch *)desc.data;
1624 hns_roce_cmq_setup_basic_desc(&desc, HNS_SWITCH_PARAMETER_CFG, true);
1625 swt->rocee_sel |= cpu_to_le32(HNS_ICL_SWITCH_CMD_ROCEE_SEL);
1626 roce_set_field(swt->fun_id, VF_SWITCH_DATA_FUN_ID_VF_ID_M,
1627 VF_SWITCH_DATA_FUN_ID_VF_ID_S, vf_id);
1628 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1629 if (ret)
1630 return ret;
1631
1632 desc.flag =
1633 cpu_to_le16(HNS_ROCE_CMD_FLAG_NO_INTR | HNS_ROCE_CMD_FLAG_IN);
1634 desc.flag &= cpu_to_le16(~HNS_ROCE_CMD_FLAG_WR);
1635 roce_set_bit(swt->cfg, VF_SWITCH_DATA_CFG_ALW_LPBK_S, 1);
1636 roce_set_bit(swt->cfg, VF_SWITCH_DATA_CFG_ALW_LCL_LPBK_S, 0);
1637 roce_set_bit(swt->cfg, VF_SWITCH_DATA_CFG_ALW_DST_OVRD_S, 1);
1638
1639 return hns_roce_cmq_send(hr_dev, &desc, 1);
1640}
1641
1642static int hns_roce_alloc_vf_resource(struct hns_roce_dev *hr_dev)
1643{
1644 struct hns_roce_cmq_desc desc[2];
1645 struct hns_roce_vf_res_a *req_a;
1646 struct hns_roce_vf_res_b *req_b;
1647 int i;
1648
1649 req_a = (struct hns_roce_vf_res_a *)desc[0].data;
1650 req_b = (struct hns_roce_vf_res_b *)desc[1].data;
1651 for (i = 0; i < 2; i++) {
1652 hns_roce_cmq_setup_basic_desc(&desc[i],
1653 HNS_ROCE_OPC_ALLOC_VF_RES, false);
1654
1655 if (i == 0)
1656 desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1657 else
1658 desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1659 }
1660
1661 roce_set_field(req_a->vf_qpc_bt_idx_num,
1662 VF_RES_A_DATA_1_VF_QPC_BT_IDX_M,
1663 VF_RES_A_DATA_1_VF_QPC_BT_IDX_S, 0);
1664 roce_set_field(req_a->vf_qpc_bt_idx_num,
1665 VF_RES_A_DATA_1_VF_QPC_BT_NUM_M,
1666 VF_RES_A_DATA_1_VF_QPC_BT_NUM_S, HNS_ROCE_VF_QPC_BT_NUM);
1667
1668 roce_set_field(req_a->vf_srqc_bt_idx_num,
1669 VF_RES_A_DATA_2_VF_SRQC_BT_IDX_M,
1670 VF_RES_A_DATA_2_VF_SRQC_BT_IDX_S, 0);
1671 roce_set_field(req_a->vf_srqc_bt_idx_num,
1672 VF_RES_A_DATA_2_VF_SRQC_BT_NUM_M,
1673 VF_RES_A_DATA_2_VF_SRQC_BT_NUM_S,
1674 HNS_ROCE_VF_SRQC_BT_NUM);
1675
1676 roce_set_field(req_a->vf_cqc_bt_idx_num,
1677 VF_RES_A_DATA_3_VF_CQC_BT_IDX_M,
1678 VF_RES_A_DATA_3_VF_CQC_BT_IDX_S, 0);
1679 roce_set_field(req_a->vf_cqc_bt_idx_num,
1680 VF_RES_A_DATA_3_VF_CQC_BT_NUM_M,
1681 VF_RES_A_DATA_3_VF_CQC_BT_NUM_S, HNS_ROCE_VF_CQC_BT_NUM);
1682
1683 roce_set_field(req_a->vf_mpt_bt_idx_num,
1684 VF_RES_A_DATA_4_VF_MPT_BT_IDX_M,
1685 VF_RES_A_DATA_4_VF_MPT_BT_IDX_S, 0);
1686 roce_set_field(req_a->vf_mpt_bt_idx_num,
1687 VF_RES_A_DATA_4_VF_MPT_BT_NUM_M,
1688 VF_RES_A_DATA_4_VF_MPT_BT_NUM_S, HNS_ROCE_VF_MPT_BT_NUM);
1689
1690 roce_set_field(req_a->vf_eqc_bt_idx_num, VF_RES_A_DATA_5_VF_EQC_IDX_M,
1691 VF_RES_A_DATA_5_VF_EQC_IDX_S, 0);
1692 roce_set_field(req_a->vf_eqc_bt_idx_num, VF_RES_A_DATA_5_VF_EQC_NUM_M,
1693 VF_RES_A_DATA_5_VF_EQC_NUM_S, HNS_ROCE_VF_EQC_NUM);
1694
1695 roce_set_field(req_b->vf_smac_idx_num, VF_RES_B_DATA_1_VF_SMAC_IDX_M,
1696 VF_RES_B_DATA_1_VF_SMAC_IDX_S, 0);
1697 roce_set_field(req_b->vf_smac_idx_num, VF_RES_B_DATA_1_VF_SMAC_NUM_M,
1698 VF_RES_B_DATA_1_VF_SMAC_NUM_S, HNS_ROCE_VF_SMAC_NUM);
1699
1700 roce_set_field(req_b->vf_sgid_idx_num, VF_RES_B_DATA_2_VF_SGID_IDX_M,
1701 VF_RES_B_DATA_2_VF_SGID_IDX_S, 0);
1702 roce_set_field(req_b->vf_sgid_idx_num, VF_RES_B_DATA_2_VF_SGID_NUM_M,
1703 VF_RES_B_DATA_2_VF_SGID_NUM_S, HNS_ROCE_VF_SGID_NUM);
1704
1705 roce_set_field(req_b->vf_qid_idx_sl_num, VF_RES_B_DATA_3_VF_QID_IDX_M,
1706 VF_RES_B_DATA_3_VF_QID_IDX_S, 0);
1707 roce_set_field(req_b->vf_qid_idx_sl_num, VF_RES_B_DATA_3_VF_SL_NUM_M,
1708 VF_RES_B_DATA_3_VF_SL_NUM_S, HNS_ROCE_VF_SL_NUM);
1709
1710 roce_set_field(req_b->vf_sccc_idx_num, VF_RES_B_DATA_4_VF_SCCC_BT_IDX_M,
1711 VF_RES_B_DATA_4_VF_SCCC_BT_IDX_S, 0);
1712 roce_set_field(req_b->vf_sccc_idx_num, VF_RES_B_DATA_4_VF_SCCC_BT_NUM_M,
1713 VF_RES_B_DATA_4_VF_SCCC_BT_NUM_S,
1714 HNS_ROCE_VF_SCCC_BT_NUM);
1715
1716 return hns_roce_cmq_send(hr_dev, desc, 2);
1717}
1718
1719static int hns_roce_v2_set_bt(struct hns_roce_dev *hr_dev)
1720{
1721 u8 srqc_hop_num = hr_dev->caps.srqc_hop_num;
1722 u8 qpc_hop_num = hr_dev->caps.qpc_hop_num;
1723 u8 cqc_hop_num = hr_dev->caps.cqc_hop_num;
1724 u8 mpt_hop_num = hr_dev->caps.mpt_hop_num;
1725 u8 sccc_hop_num = hr_dev->caps.sccc_hop_num;
1726 struct hns_roce_cfg_bt_attr *req;
1727 struct hns_roce_cmq_desc desc;
1728
1729 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_BT_ATTR, false);
1730 req = (struct hns_roce_cfg_bt_attr *)desc.data;
1731 memset(req, 0, sizeof(*req));
1732
1733 roce_set_field(req->vf_qpc_cfg, CFG_BT_ATTR_DATA_0_VF_QPC_BA_PGSZ_M,
1734 CFG_BT_ATTR_DATA_0_VF_QPC_BA_PGSZ_S,
1735 hr_dev->caps.qpc_ba_pg_sz + PG_SHIFT_OFFSET);
1736 roce_set_field(req->vf_qpc_cfg, CFG_BT_ATTR_DATA_0_VF_QPC_BUF_PGSZ_M,
1737 CFG_BT_ATTR_DATA_0_VF_QPC_BUF_PGSZ_S,
1738 hr_dev->caps.qpc_buf_pg_sz + PG_SHIFT_OFFSET);
1739 roce_set_field(req->vf_qpc_cfg, CFG_BT_ATTR_DATA_0_VF_QPC_HOPNUM_M,
1740 CFG_BT_ATTR_DATA_0_VF_QPC_HOPNUM_S,
1741 qpc_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : qpc_hop_num);
1742
1743 roce_set_field(req->vf_srqc_cfg, CFG_BT_ATTR_DATA_1_VF_SRQC_BA_PGSZ_M,
1744 CFG_BT_ATTR_DATA_1_VF_SRQC_BA_PGSZ_S,
1745 hr_dev->caps.srqc_ba_pg_sz + PG_SHIFT_OFFSET);
1746 roce_set_field(req->vf_srqc_cfg, CFG_BT_ATTR_DATA_1_VF_SRQC_BUF_PGSZ_M,
1747 CFG_BT_ATTR_DATA_1_VF_SRQC_BUF_PGSZ_S,
1748 hr_dev->caps.srqc_buf_pg_sz + PG_SHIFT_OFFSET);
1749 roce_set_field(req->vf_srqc_cfg, CFG_BT_ATTR_DATA_1_VF_SRQC_HOPNUM_M,
1750 CFG_BT_ATTR_DATA_1_VF_SRQC_HOPNUM_S,
1751 srqc_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : srqc_hop_num);
1752
1753 roce_set_field(req->vf_cqc_cfg, CFG_BT_ATTR_DATA_2_VF_CQC_BA_PGSZ_M,
1754 CFG_BT_ATTR_DATA_2_VF_CQC_BA_PGSZ_S,
1755 hr_dev->caps.cqc_ba_pg_sz + PG_SHIFT_OFFSET);
1756 roce_set_field(req->vf_cqc_cfg, CFG_BT_ATTR_DATA_2_VF_CQC_BUF_PGSZ_M,
1757 CFG_BT_ATTR_DATA_2_VF_CQC_BUF_PGSZ_S,
1758 hr_dev->caps.cqc_buf_pg_sz + PG_SHIFT_OFFSET);
1759 roce_set_field(req->vf_cqc_cfg, CFG_BT_ATTR_DATA_2_VF_CQC_HOPNUM_M,
1760 CFG_BT_ATTR_DATA_2_VF_CQC_HOPNUM_S,
1761 cqc_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : cqc_hop_num);
1762
1763 roce_set_field(req->vf_mpt_cfg, CFG_BT_ATTR_DATA_3_VF_MPT_BA_PGSZ_M,
1764 CFG_BT_ATTR_DATA_3_VF_MPT_BA_PGSZ_S,
1765 hr_dev->caps.mpt_ba_pg_sz + PG_SHIFT_OFFSET);
1766 roce_set_field(req->vf_mpt_cfg, CFG_BT_ATTR_DATA_3_VF_MPT_BUF_PGSZ_M,
1767 CFG_BT_ATTR_DATA_3_VF_MPT_BUF_PGSZ_S,
1768 hr_dev->caps.mpt_buf_pg_sz + PG_SHIFT_OFFSET);
1769 roce_set_field(req->vf_mpt_cfg, CFG_BT_ATTR_DATA_3_VF_MPT_HOPNUM_M,
1770 CFG_BT_ATTR_DATA_3_VF_MPT_HOPNUM_S,
1771 mpt_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : mpt_hop_num);
1772
1773 roce_set_field(req->vf_sccc_cfg,
1774 CFG_BT_ATTR_DATA_4_VF_SCCC_BA_PGSZ_M,
1775 CFG_BT_ATTR_DATA_4_VF_SCCC_BA_PGSZ_S,
1776 hr_dev->caps.sccc_ba_pg_sz + PG_SHIFT_OFFSET);
1777 roce_set_field(req->vf_sccc_cfg,
1778 CFG_BT_ATTR_DATA_4_VF_SCCC_BUF_PGSZ_M,
1779 CFG_BT_ATTR_DATA_4_VF_SCCC_BUF_PGSZ_S,
1780 hr_dev->caps.sccc_buf_pg_sz + PG_SHIFT_OFFSET);
1781 roce_set_field(req->vf_sccc_cfg,
1782 CFG_BT_ATTR_DATA_4_VF_SCCC_HOPNUM_M,
1783 CFG_BT_ATTR_DATA_4_VF_SCCC_HOPNUM_S,
1784 sccc_hop_num ==
1785 HNS_ROCE_HOP_NUM_0 ? 0 : sccc_hop_num);
1786
1787 return hns_roce_cmq_send(hr_dev, &desc, 1);
1788}
1789
1790static void set_default_caps(struct hns_roce_dev *hr_dev)
1791{
1792 struct hns_roce_caps *caps = &hr_dev->caps;
1793
1794 caps->num_qps = HNS_ROCE_V2_MAX_QP_NUM;
1795 caps->max_wqes = HNS_ROCE_V2_MAX_WQE_NUM;
1796 caps->num_cqs = HNS_ROCE_V2_MAX_CQ_NUM;
1797 caps->num_srqs = HNS_ROCE_V2_MAX_SRQ_NUM;
1798 caps->min_cqes = HNS_ROCE_MIN_CQE_NUM;
1799 caps->max_cqes = HNS_ROCE_V2_MAX_CQE_NUM;
1800 caps->max_sq_sg = HNS_ROCE_V2_MAX_SQ_SGE_NUM;
1801 caps->max_extend_sg = HNS_ROCE_V2_MAX_EXTEND_SGE_NUM;
1802 caps->max_rq_sg = HNS_ROCE_V2_MAX_RQ_SGE_NUM;
1803 caps->max_sq_inline = HNS_ROCE_V2_MAX_SQ_INLINE;
1804 caps->num_uars = HNS_ROCE_V2_UAR_NUM;
1805 caps->phy_num_uars = HNS_ROCE_V2_PHY_UAR_NUM;
1806 caps->num_aeq_vectors = HNS_ROCE_V2_AEQE_VEC_NUM;
1807 caps->num_comp_vectors = HNS_ROCE_V2_COMP_VEC_NUM;
1808 caps->num_other_vectors = HNS_ROCE_V2_ABNORMAL_VEC_NUM;
1809 caps->num_mtpts = HNS_ROCE_V2_MAX_MTPT_NUM;
1810 caps->num_mtt_segs = HNS_ROCE_V2_MAX_MTT_SEGS;
1811 caps->num_cqe_segs = HNS_ROCE_V2_MAX_CQE_SEGS;
1812 caps->num_srqwqe_segs = HNS_ROCE_V2_MAX_SRQWQE_SEGS;
1813 caps->num_idx_segs = HNS_ROCE_V2_MAX_IDX_SEGS;
1814 caps->num_pds = HNS_ROCE_V2_MAX_PD_NUM;
1815 caps->max_qp_init_rdma = HNS_ROCE_V2_MAX_QP_INIT_RDMA;
1816 caps->max_qp_dest_rdma = HNS_ROCE_V2_MAX_QP_DEST_RDMA;
1817 caps->max_sq_desc_sz = HNS_ROCE_V2_MAX_SQ_DESC_SZ;
1818 caps->max_rq_desc_sz = HNS_ROCE_V2_MAX_RQ_DESC_SZ;
1819 caps->max_srq_desc_sz = HNS_ROCE_V2_MAX_SRQ_DESC_SZ;
1820 caps->qpc_sz = HNS_ROCE_V2_QPC_SZ;
1821 caps->irrl_entry_sz = HNS_ROCE_V2_IRRL_ENTRY_SZ;
1822 caps->trrl_entry_sz = HNS_ROCE_V2_EXT_ATOMIC_TRRL_ENTRY_SZ;
1823 caps->cqc_entry_sz = HNS_ROCE_V2_CQC_ENTRY_SZ;
1824 caps->srqc_entry_sz = HNS_ROCE_V2_SRQC_ENTRY_SZ;
1825 caps->mtpt_entry_sz = HNS_ROCE_V2_MTPT_ENTRY_SZ;
1826 caps->mtt_entry_sz = HNS_ROCE_V2_MTT_ENTRY_SZ;
1827 caps->idx_entry_sz = HNS_ROCE_V2_IDX_ENTRY_SZ;
1828 caps->cqe_sz = HNS_ROCE_V2_CQE_SIZE;
1829 caps->page_size_cap = HNS_ROCE_V2_PAGE_SIZE_SUPPORTED;
1830 caps->reserved_lkey = 0;
1831 caps->reserved_pds = 0;
1832 caps->reserved_mrws = 1;
1833 caps->reserved_uars = 0;
1834 caps->reserved_cqs = 0;
1835 caps->reserved_srqs = 0;
1836 caps->reserved_qps = HNS_ROCE_V2_RSV_QPS;
1837
1838 caps->qpc_ba_pg_sz = 0;
1839 caps->qpc_buf_pg_sz = 0;
1840 caps->qpc_hop_num = HNS_ROCE_CONTEXT_HOP_NUM;
1841 caps->srqc_ba_pg_sz = 0;
1842 caps->srqc_buf_pg_sz = 0;
1843 caps->srqc_hop_num = HNS_ROCE_CONTEXT_HOP_NUM;
1844 caps->cqc_ba_pg_sz = 0;
1845 caps->cqc_buf_pg_sz = 0;
1846 caps->cqc_hop_num = HNS_ROCE_CONTEXT_HOP_NUM;
1847 caps->mpt_ba_pg_sz = 0;
1848 caps->mpt_buf_pg_sz = 0;
1849 caps->mpt_hop_num = HNS_ROCE_CONTEXT_HOP_NUM;
1850 caps->mtt_ba_pg_sz = 0;
1851 caps->mtt_buf_pg_sz = 0;
1852 caps->mtt_hop_num = HNS_ROCE_MTT_HOP_NUM;
1853 caps->wqe_sq_hop_num = HNS_ROCE_SQWQE_HOP_NUM;
1854 caps->wqe_sge_hop_num = HNS_ROCE_EXT_SGE_HOP_NUM;
1855 caps->wqe_rq_hop_num = HNS_ROCE_RQWQE_HOP_NUM;
1856 caps->cqe_ba_pg_sz = HNS_ROCE_BA_PG_SZ_SUPPORTED_256K;
1857 caps->cqe_buf_pg_sz = 0;
1858 caps->cqe_hop_num = HNS_ROCE_CQE_HOP_NUM;
1859 caps->srqwqe_ba_pg_sz = 0;
1860 caps->srqwqe_buf_pg_sz = 0;
1861 caps->srqwqe_hop_num = HNS_ROCE_SRQWQE_HOP_NUM;
1862 caps->idx_ba_pg_sz = 0;
1863 caps->idx_buf_pg_sz = 0;
1864 caps->idx_hop_num = HNS_ROCE_IDX_HOP_NUM;
1865 caps->chunk_sz = HNS_ROCE_V2_TABLE_CHUNK_SIZE;
1866
1867 caps->flags = HNS_ROCE_CAP_FLAG_REREG_MR |
1868 HNS_ROCE_CAP_FLAG_ROCE_V1_V2 |
1869 HNS_ROCE_CAP_FLAG_RQ_INLINE |
1870 HNS_ROCE_CAP_FLAG_RECORD_DB |
1871 HNS_ROCE_CAP_FLAG_SQ_RECORD_DB;
1872
1873 caps->pkey_table_len[0] = 1;
1874 caps->gid_table_len[0] = HNS_ROCE_V2_GID_INDEX_NUM;
1875 caps->ceqe_depth = HNS_ROCE_V2_COMP_EQE_NUM;
1876 caps->aeqe_depth = HNS_ROCE_V2_ASYNC_EQE_NUM;
1877 caps->aeqe_size = HNS_ROCE_AEQE_SIZE;
1878 caps->ceqe_size = HNS_ROCE_CEQE_SIZE;
1879 caps->local_ca_ack_delay = 0;
1880 caps->max_mtu = IB_MTU_4096;
1881
1882 caps->max_srq_wrs = HNS_ROCE_V2_MAX_SRQ_WR;
1883 caps->max_srq_sges = HNS_ROCE_V2_MAX_SRQ_SGE;
1884
1885 caps->flags |= HNS_ROCE_CAP_FLAG_ATOMIC | HNS_ROCE_CAP_FLAG_MW |
1886 HNS_ROCE_CAP_FLAG_SRQ | HNS_ROCE_CAP_FLAG_FRMR |
1887 HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL;
1888
1889 caps->num_qpc_timer = HNS_ROCE_V2_MAX_QPC_TIMER_NUM;
1890 caps->qpc_timer_entry_sz = HNS_ROCE_V2_QPC_TIMER_ENTRY_SZ;
1891 caps->qpc_timer_ba_pg_sz = 0;
1892 caps->qpc_timer_buf_pg_sz = 0;
1893 caps->qpc_timer_hop_num = HNS_ROCE_HOP_NUM_0;
1894 caps->num_cqc_timer = HNS_ROCE_V2_MAX_CQC_TIMER_NUM;
1895 caps->cqc_timer_entry_sz = HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ;
1896 caps->cqc_timer_ba_pg_sz = 0;
1897 caps->cqc_timer_buf_pg_sz = 0;
1898 caps->cqc_timer_hop_num = HNS_ROCE_HOP_NUM_0;
1899
1900 caps->sccc_sz = HNS_ROCE_V2_SCCC_SZ;
1901 caps->sccc_ba_pg_sz = 0;
1902 caps->sccc_buf_pg_sz = 0;
1903 caps->sccc_hop_num = HNS_ROCE_SCCC_HOP_NUM;
1904
1905 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) {
1906 caps->aeqe_size = HNS_ROCE_V3_EQE_SIZE;
1907 caps->ceqe_size = HNS_ROCE_V3_EQE_SIZE;
1908 caps->cqe_sz = HNS_ROCE_V3_CQE_SIZE;
1909 caps->qpc_sz = HNS_ROCE_V3_QPC_SZ;
1910 caps->sccc_sz = HNS_ROCE_V3_SCCC_SZ;
1911 caps->gmv_entry_sz = HNS_ROCE_V3_GMV_ENTRY_SZ;
1912 caps->gmv_entry_num = caps->gmv_bt_num * (PAGE_SIZE /
1913 caps->gmv_entry_sz);
1914 caps->gmv_hop_num = HNS_ROCE_HOP_NUM_0;
1915 caps->gmv_ba_pg_sz = 0;
1916 caps->gmv_buf_pg_sz = 0;
1917 caps->gid_table_len[0] = caps->gmv_bt_num * (HNS_HW_PAGE_SIZE /
1918 caps->gmv_entry_sz);
1919 }
1920}
1921
1922static void calc_pg_sz(u32 obj_num, u32 obj_size, u32 hop_num, u32 ctx_bt_num,
1923 u32 *buf_page_size, u32 *bt_page_size, u32 hem_type)
1924{
1925 u64 obj_per_chunk;
1926 u64 bt_chunk_size = PAGE_SIZE;
1927 u64 buf_chunk_size = PAGE_SIZE;
1928 u64 obj_per_chunk_default = buf_chunk_size / obj_size;
1929
1930 *buf_page_size = 0;
1931 *bt_page_size = 0;
1932
1933 switch (hop_num) {
1934 case 3:
1935 obj_per_chunk = ctx_bt_num * (bt_chunk_size / BA_BYTE_LEN) *
1936 (bt_chunk_size / BA_BYTE_LEN) *
1937 (bt_chunk_size / BA_BYTE_LEN) *
1938 obj_per_chunk_default;
1939 break;
1940 case 2:
1941 obj_per_chunk = ctx_bt_num * (bt_chunk_size / BA_BYTE_LEN) *
1942 (bt_chunk_size / BA_BYTE_LEN) *
1943 obj_per_chunk_default;
1944 break;
1945 case 1:
1946 obj_per_chunk = ctx_bt_num * (bt_chunk_size / BA_BYTE_LEN) *
1947 obj_per_chunk_default;
1948 break;
1949 case HNS_ROCE_HOP_NUM_0:
1950 obj_per_chunk = ctx_bt_num * obj_per_chunk_default;
1951 break;
1952 default:
1953 pr_err("table %u not support hop_num = %u!\n", hem_type,
1954 hop_num);
1955 return;
1956 }
1957
1958 if (hem_type >= HEM_TYPE_MTT)
1959 *bt_page_size = ilog2(DIV_ROUND_UP(obj_num, obj_per_chunk));
1960 else
1961 *buf_page_size = ilog2(DIV_ROUND_UP(obj_num, obj_per_chunk));
1962}
1963
1964static int hns_roce_query_pf_caps(struct hns_roce_dev *hr_dev)
1965{
1966 struct hns_roce_cmq_desc desc[HNS_ROCE_QUERY_PF_CAPS_CMD_NUM];
1967 struct hns_roce_caps *caps = &hr_dev->caps;
1968 struct hns_roce_query_pf_caps_a *resp_a;
1969 struct hns_roce_query_pf_caps_b *resp_b;
1970 struct hns_roce_query_pf_caps_c *resp_c;
1971 struct hns_roce_query_pf_caps_d *resp_d;
1972 struct hns_roce_query_pf_caps_e *resp_e;
1973 int ctx_hop_num;
1974 int pbl_hop_num;
1975 int ret;
1976 int i;
1977
1978 for (i = 0; i < HNS_ROCE_QUERY_PF_CAPS_CMD_NUM; i++) {
1979 hns_roce_cmq_setup_basic_desc(&desc[i],
1980 HNS_ROCE_OPC_QUERY_PF_CAPS_NUM,
1981 true);
1982 if (i < (HNS_ROCE_QUERY_PF_CAPS_CMD_NUM - 1))
1983 desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1984 else
1985 desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1986 }
1987
1988 ret = hns_roce_cmq_send(hr_dev, desc, HNS_ROCE_QUERY_PF_CAPS_CMD_NUM);
1989 if (ret)
1990 return ret;
1991
1992 resp_a = (struct hns_roce_query_pf_caps_a *)desc[0].data;
1993 resp_b = (struct hns_roce_query_pf_caps_b *)desc[1].data;
1994 resp_c = (struct hns_roce_query_pf_caps_c *)desc[2].data;
1995 resp_d = (struct hns_roce_query_pf_caps_d *)desc[3].data;
1996 resp_e = (struct hns_roce_query_pf_caps_e *)desc[4].data;
1997
1998 caps->local_ca_ack_delay = resp_a->local_ca_ack_delay;
1999 caps->max_sq_sg = le16_to_cpu(resp_a->max_sq_sg);
2000 caps->max_sq_inline = le16_to_cpu(resp_a->max_sq_inline);
2001 caps->max_rq_sg = le16_to_cpu(resp_a->max_rq_sg);
2002 caps->max_extend_sg = le32_to_cpu(resp_a->max_extend_sg);
2003 caps->num_qpc_timer = le16_to_cpu(resp_a->num_qpc_timer);
2004 caps->num_cqc_timer = le16_to_cpu(resp_a->num_cqc_timer);
2005 caps->max_srq_sges = le16_to_cpu(resp_a->max_srq_sges);
2006 caps->num_aeq_vectors = resp_a->num_aeq_vectors;
2007 caps->num_other_vectors = resp_a->num_other_vectors;
2008 caps->max_sq_desc_sz = resp_a->max_sq_desc_sz;
2009 caps->max_rq_desc_sz = resp_a->max_rq_desc_sz;
2010 caps->max_srq_desc_sz = resp_a->max_srq_desc_sz;
2011 caps->cqe_sz = HNS_ROCE_V2_CQE_SIZE;
2012
2013 caps->mtpt_entry_sz = resp_b->mtpt_entry_sz;
2014 caps->irrl_entry_sz = resp_b->irrl_entry_sz;
2015 caps->trrl_entry_sz = resp_b->trrl_entry_sz;
2016 caps->cqc_entry_sz = resp_b->cqc_entry_sz;
2017 caps->srqc_entry_sz = resp_b->srqc_entry_sz;
2018 caps->idx_entry_sz = resp_b->idx_entry_sz;
2019 caps->sccc_sz = resp_b->sccc_sz;
2020 caps->max_mtu = resp_b->max_mtu;
2021 caps->qpc_sz = HNS_ROCE_V2_QPC_SZ;
2022 caps->min_cqes = resp_b->min_cqes;
2023 caps->min_wqes = resp_b->min_wqes;
2024 caps->page_size_cap = le32_to_cpu(resp_b->page_size_cap);
2025 caps->pkey_table_len[0] = resp_b->pkey_table_len;
2026 caps->phy_num_uars = resp_b->phy_num_uars;
2027 ctx_hop_num = resp_b->ctx_hop_num;
2028 pbl_hop_num = resp_b->pbl_hop_num;
2029
2030 caps->num_pds = 1 << roce_get_field(resp_c->cap_flags_num_pds,
2031 V2_QUERY_PF_CAPS_C_NUM_PDS_M,
2032 V2_QUERY_PF_CAPS_C_NUM_PDS_S);
2033 caps->flags = roce_get_field(resp_c->cap_flags_num_pds,
2034 V2_QUERY_PF_CAPS_C_CAP_FLAGS_M,
2035 V2_QUERY_PF_CAPS_C_CAP_FLAGS_S);
2036 caps->flags |= le16_to_cpu(resp_d->cap_flags_ex) <<
2037 HNS_ROCE_CAP_FLAGS_EX_SHIFT;
2038
2039 caps->num_cqs = 1 << roce_get_field(resp_c->max_gid_num_cqs,
2040 V2_QUERY_PF_CAPS_C_NUM_CQS_M,
2041 V2_QUERY_PF_CAPS_C_NUM_CQS_S);
2042 caps->gid_table_len[0] = roce_get_field(resp_c->max_gid_num_cqs,
2043 V2_QUERY_PF_CAPS_C_MAX_GID_M,
2044 V2_QUERY_PF_CAPS_C_MAX_GID_S);
2045 caps->max_cqes = 1 << roce_get_field(resp_c->cq_depth,
2046 V2_QUERY_PF_CAPS_C_CQ_DEPTH_M,
2047 V2_QUERY_PF_CAPS_C_CQ_DEPTH_S);
2048 caps->num_mtpts = 1 << roce_get_field(resp_c->num_mrws,
2049 V2_QUERY_PF_CAPS_C_NUM_MRWS_M,
2050 V2_QUERY_PF_CAPS_C_NUM_MRWS_S);
2051 caps->num_qps = 1 << roce_get_field(resp_c->ord_num_qps,
2052 V2_QUERY_PF_CAPS_C_NUM_QPS_M,
2053 V2_QUERY_PF_CAPS_C_NUM_QPS_S);
2054 caps->max_qp_init_rdma = roce_get_field(resp_c->ord_num_qps,
2055 V2_QUERY_PF_CAPS_C_MAX_ORD_M,
2056 V2_QUERY_PF_CAPS_C_MAX_ORD_S);
2057 caps->max_qp_dest_rdma = caps->max_qp_init_rdma;
2058 caps->max_wqes = 1 << le16_to_cpu(resp_c->sq_depth);
2059 caps->num_srqs = 1 << roce_get_field(resp_d->wq_hop_num_max_srqs,
2060 V2_QUERY_PF_CAPS_D_NUM_SRQS_M,
2061 V2_QUERY_PF_CAPS_D_NUM_SRQS_S);
2062 caps->max_srq_wrs = 1 << le16_to_cpu(resp_d->srq_depth);
2063 caps->ceqe_depth = 1 << roce_get_field(resp_d->num_ceqs_ceq_depth,
2064 V2_QUERY_PF_CAPS_D_CEQ_DEPTH_M,
2065 V2_QUERY_PF_CAPS_D_CEQ_DEPTH_S);
2066 caps->num_comp_vectors = roce_get_field(resp_d->num_ceqs_ceq_depth,
2067 V2_QUERY_PF_CAPS_D_NUM_CEQS_M,
2068 V2_QUERY_PF_CAPS_D_NUM_CEQS_S);
2069 caps->aeqe_depth = 1 << roce_get_field(resp_d->arm_st_aeq_depth,
2070 V2_QUERY_PF_CAPS_D_AEQ_DEPTH_M,
2071 V2_QUERY_PF_CAPS_D_AEQ_DEPTH_S);
2072 caps->default_aeq_arm_st = roce_get_field(resp_d->arm_st_aeq_depth,
2073 V2_QUERY_PF_CAPS_D_AEQ_ARM_ST_M,
2074 V2_QUERY_PF_CAPS_D_AEQ_ARM_ST_S);
2075 caps->default_ceq_arm_st = roce_get_field(resp_d->arm_st_aeq_depth,
2076 V2_QUERY_PF_CAPS_D_CEQ_ARM_ST_M,
2077 V2_QUERY_PF_CAPS_D_CEQ_ARM_ST_S);
2078 caps->reserved_pds = roce_get_field(resp_d->num_uars_rsv_pds,
2079 V2_QUERY_PF_CAPS_D_RSV_PDS_M,
2080 V2_QUERY_PF_CAPS_D_RSV_PDS_S);
2081 caps->num_uars = 1 << roce_get_field(resp_d->num_uars_rsv_pds,
2082 V2_QUERY_PF_CAPS_D_NUM_UARS_M,
2083 V2_QUERY_PF_CAPS_D_NUM_UARS_S);
2084 caps->reserved_qps = roce_get_field(resp_d->rsv_uars_rsv_qps,
2085 V2_QUERY_PF_CAPS_D_RSV_QPS_M,
2086 V2_QUERY_PF_CAPS_D_RSV_QPS_S);
2087 caps->reserved_uars = roce_get_field(resp_d->rsv_uars_rsv_qps,
2088 V2_QUERY_PF_CAPS_D_RSV_UARS_M,
2089 V2_QUERY_PF_CAPS_D_RSV_UARS_S);
2090 caps->reserved_mrws = roce_get_field(resp_e->chunk_size_shift_rsv_mrws,
2091 V2_QUERY_PF_CAPS_E_RSV_MRWS_M,
2092 V2_QUERY_PF_CAPS_E_RSV_MRWS_S);
2093 caps->chunk_sz = 1 << roce_get_field(resp_e->chunk_size_shift_rsv_mrws,
2094 V2_QUERY_PF_CAPS_E_CHUNK_SIZE_SHIFT_M,
2095 V2_QUERY_PF_CAPS_E_CHUNK_SIZE_SHIFT_S);
2096 caps->reserved_cqs = roce_get_field(resp_e->rsv_cqs,
2097 V2_QUERY_PF_CAPS_E_RSV_CQS_M,
2098 V2_QUERY_PF_CAPS_E_RSV_CQS_S);
2099 caps->reserved_srqs = roce_get_field(resp_e->rsv_srqs,
2100 V2_QUERY_PF_CAPS_E_RSV_SRQS_M,
2101 V2_QUERY_PF_CAPS_E_RSV_SRQS_S);
2102 caps->reserved_lkey = roce_get_field(resp_e->rsv_lkey,
2103 V2_QUERY_PF_CAPS_E_RSV_LKEYS_M,
2104 V2_QUERY_PF_CAPS_E_RSV_LKEYS_S);
2105 caps->default_ceq_max_cnt = le16_to_cpu(resp_e->ceq_max_cnt);
2106 caps->default_ceq_period = le16_to_cpu(resp_e->ceq_period);
2107 caps->default_aeq_max_cnt = le16_to_cpu(resp_e->aeq_max_cnt);
2108 caps->default_aeq_period = le16_to_cpu(resp_e->aeq_period);
2109
2110 caps->qpc_timer_entry_sz = HNS_ROCE_V2_QPC_TIMER_ENTRY_SZ;
2111 caps->cqc_timer_entry_sz = HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ;
2112 caps->mtt_entry_sz = HNS_ROCE_V2_MTT_ENTRY_SZ;
2113 caps->num_mtt_segs = HNS_ROCE_V2_MAX_MTT_SEGS;
2114 caps->ceqe_size = HNS_ROCE_CEQE_SIZE;
2115 caps->aeqe_size = HNS_ROCE_AEQE_SIZE;
2116 caps->mtt_ba_pg_sz = 0;
2117 caps->num_cqe_segs = HNS_ROCE_V2_MAX_CQE_SEGS;
2118 caps->num_srqwqe_segs = HNS_ROCE_V2_MAX_SRQWQE_SEGS;
2119 caps->num_idx_segs = HNS_ROCE_V2_MAX_IDX_SEGS;
2120
2121 caps->qpc_hop_num = ctx_hop_num;
2122 caps->srqc_hop_num = ctx_hop_num;
2123 caps->cqc_hop_num = ctx_hop_num;
2124 caps->mpt_hop_num = ctx_hop_num;
2125 caps->mtt_hop_num = pbl_hop_num;
2126 caps->cqe_hop_num = pbl_hop_num;
2127 caps->srqwqe_hop_num = pbl_hop_num;
2128 caps->idx_hop_num = pbl_hop_num;
2129 caps->wqe_sq_hop_num = roce_get_field(resp_d->wq_hop_num_max_srqs,
2130 V2_QUERY_PF_CAPS_D_SQWQE_HOP_NUM_M,
2131 V2_QUERY_PF_CAPS_D_SQWQE_HOP_NUM_S);
2132 caps->wqe_sge_hop_num = roce_get_field(resp_d->wq_hop_num_max_srqs,
2133 V2_QUERY_PF_CAPS_D_EX_SGE_HOP_NUM_M,
2134 V2_QUERY_PF_CAPS_D_EX_SGE_HOP_NUM_S);
2135 caps->wqe_rq_hop_num = roce_get_field(resp_d->wq_hop_num_max_srqs,
2136 V2_QUERY_PF_CAPS_D_RQWQE_HOP_NUM_M,
2137 V2_QUERY_PF_CAPS_D_RQWQE_HOP_NUM_S);
2138
2139 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) {
2140 caps->ceqe_size = HNS_ROCE_V3_EQE_SIZE;
2141 caps->aeqe_size = HNS_ROCE_V3_EQE_SIZE;
2142 caps->cqe_sz = HNS_ROCE_V3_CQE_SIZE;
2143 caps->qpc_sz = HNS_ROCE_V3_QPC_SZ;
2144 caps->sccc_sz = HNS_ROCE_V3_SCCC_SZ;
2145 caps->gmv_entry_sz = HNS_ROCE_V3_GMV_ENTRY_SZ;
2146 caps->gmv_entry_num = caps->gmv_bt_num * (PAGE_SIZE /
2147 caps->gmv_entry_sz);
2148 caps->gmv_hop_num = HNS_ROCE_HOP_NUM_0;
2149 caps->gmv_ba_pg_sz = 0;
2150 caps->gmv_buf_pg_sz = 0;
2151 caps->gid_table_len[0] = caps->gmv_bt_num *
2152 (HNS_HW_PAGE_SIZE / caps->gmv_entry_sz);
2153 }
2154
2155 calc_pg_sz(caps->num_qps, caps->qpc_sz, caps->qpc_hop_num,
2156 caps->qpc_bt_num, &caps->qpc_buf_pg_sz, &caps->qpc_ba_pg_sz,
2157 HEM_TYPE_QPC);
2158 calc_pg_sz(caps->num_mtpts, caps->mtpt_entry_sz, caps->mpt_hop_num,
2159 caps->mpt_bt_num, &caps->mpt_buf_pg_sz, &caps->mpt_ba_pg_sz,
2160 HEM_TYPE_MTPT);
2161 calc_pg_sz(caps->num_cqs, caps->cqc_entry_sz, caps->cqc_hop_num,
2162 caps->cqc_bt_num, &caps->cqc_buf_pg_sz, &caps->cqc_ba_pg_sz,
2163 HEM_TYPE_CQC);
2164 calc_pg_sz(caps->num_srqs, caps->srqc_entry_sz, caps->srqc_hop_num,
2165 caps->srqc_bt_num, &caps->srqc_buf_pg_sz,
2166 &caps->srqc_ba_pg_sz, HEM_TYPE_SRQC);
2167
2168 caps->sccc_hop_num = ctx_hop_num;
2169 caps->qpc_timer_hop_num = HNS_ROCE_HOP_NUM_0;
2170 caps->cqc_timer_hop_num = HNS_ROCE_HOP_NUM_0;
2171
2172 calc_pg_sz(caps->num_qps, caps->sccc_sz,
2173 caps->sccc_hop_num, caps->sccc_bt_num,
2174 &caps->sccc_buf_pg_sz, &caps->sccc_ba_pg_sz,
2175 HEM_TYPE_SCCC);
2176 calc_pg_sz(caps->num_cqc_timer, caps->cqc_timer_entry_sz,
2177 caps->cqc_timer_hop_num, caps->cqc_timer_bt_num,
2178 &caps->cqc_timer_buf_pg_sz,
2179 &caps->cqc_timer_ba_pg_sz, HEM_TYPE_CQC_TIMER);
2180
2181 calc_pg_sz(caps->num_cqe_segs, caps->mtt_entry_sz, caps->cqe_hop_num,
2182 1, &caps->cqe_buf_pg_sz, &caps->cqe_ba_pg_sz, HEM_TYPE_CQE);
2183 calc_pg_sz(caps->num_srqwqe_segs, caps->mtt_entry_sz,
2184 caps->srqwqe_hop_num, 1, &caps->srqwqe_buf_pg_sz,
2185 &caps->srqwqe_ba_pg_sz, HEM_TYPE_SRQWQE);
2186 calc_pg_sz(caps->num_idx_segs, caps->idx_entry_sz, caps->idx_hop_num,
2187 1, &caps->idx_buf_pg_sz, &caps->idx_ba_pg_sz, HEM_TYPE_IDX);
2188
2189 return 0;
2190}
2191
2192static int hns_roce_config_qpc_size(struct hns_roce_dev *hr_dev)
2193{
2194 struct hns_roce_cmq_desc desc;
2195 struct hns_roce_cfg_entry_size *cfg_size =
2196 (struct hns_roce_cfg_entry_size *)desc.data;
2197
2198 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_ENTRY_SIZE,
2199 false);
2200
2201 cfg_size->type = cpu_to_le32(HNS_ROCE_CFG_QPC_SIZE);
2202 cfg_size->size = cpu_to_le32(hr_dev->caps.qpc_sz);
2203
2204 return hns_roce_cmq_send(hr_dev, &desc, 1);
2205}
2206
2207static int hns_roce_config_sccc_size(struct hns_roce_dev *hr_dev)
2208{
2209 struct hns_roce_cmq_desc desc;
2210 struct hns_roce_cfg_entry_size *cfg_size =
2211 (struct hns_roce_cfg_entry_size *)desc.data;
2212
2213 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_ENTRY_SIZE,
2214 false);
2215
2216 cfg_size->type = cpu_to_le32(HNS_ROCE_CFG_SCCC_SIZE);
2217 cfg_size->size = cpu_to_le32(hr_dev->caps.sccc_sz);
2218
2219 return hns_roce_cmq_send(hr_dev, &desc, 1);
2220}
2221
2222static int hns_roce_config_entry_size(struct hns_roce_dev *hr_dev)
2223{
2224 int ret;
2225
2226 if (hr_dev->pci_dev->revision < PCI_REVISION_ID_HIP09)
2227 return 0;
2228
2229 ret = hns_roce_config_qpc_size(hr_dev);
2230 if (ret) {
2231 dev_err(hr_dev->dev, "failed to cfg qpc sz, ret = %d.\n", ret);
2232 return ret;
2233 }
2234
2235 ret = hns_roce_config_sccc_size(hr_dev);
2236 if (ret)
2237 dev_err(hr_dev->dev, "failed to cfg sccc sz, ret = %d.\n", ret);
2238
2239 return ret;
2240}
2241
2242static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
2243{
2244 struct hns_roce_caps *caps = &hr_dev->caps;
2245 int ret;
2246
2247 ret = hns_roce_cmq_query_hw_info(hr_dev);
2248 if (ret) {
2249 dev_err(hr_dev->dev, "Query hardware version fail, ret = %d.\n",
2250 ret);
2251 return ret;
2252 }
2253
2254 ret = hns_roce_query_fw_ver(hr_dev);
2255 if (ret) {
2256 dev_err(hr_dev->dev, "Query firmware version fail, ret = %d.\n",
2257 ret);
2258 return ret;
2259 }
2260
2261 ret = hns_roce_config_global_param(hr_dev);
2262 if (ret) {
2263 dev_err(hr_dev->dev, "Configure global param fail, ret = %d.\n",
2264 ret);
2265 return ret;
2266 }
2267
2268
2269 ret = hns_roce_query_pf_resource(hr_dev);
2270 if (ret) {
2271 dev_err(hr_dev->dev, "Query pf resource fail, ret = %d.\n",
2272 ret);
2273 return ret;
2274 }
2275
2276 ret = hns_roce_query_pf_timer_resource(hr_dev);
2277 if (ret) {
2278 dev_err(hr_dev->dev,
2279 "failed to query pf timer resource, ret = %d.\n", ret);
2280 return ret;
2281 }
2282
2283 ret = hns_roce_set_vf_switch_param(hr_dev, 0);
2284 if (ret) {
2285 dev_err(hr_dev->dev,
2286 "failed to set function switch param, ret = %d.\n",
2287 ret);
2288 return ret;
2289 }
2290
2291 hr_dev->vendor_part_id = hr_dev->pci_dev->device;
2292 hr_dev->sys_image_guid = be64_to_cpu(hr_dev->ib_dev.node_guid);
2293
2294 caps->pbl_ba_pg_sz = HNS_ROCE_BA_PG_SZ_SUPPORTED_16K;
2295 caps->pbl_buf_pg_sz = 0;
2296 caps->pbl_hop_num = HNS_ROCE_PBL_HOP_NUM;
2297 caps->eqe_ba_pg_sz = 0;
2298 caps->eqe_buf_pg_sz = 0;
2299 caps->eqe_hop_num = HNS_ROCE_EQE_HOP_NUM;
2300 caps->tsq_buf_pg_sz = 0;
2301
2302 ret = hns_roce_query_pf_caps(hr_dev);
2303 if (ret)
2304 set_default_caps(hr_dev);
2305
2306 ret = hns_roce_alloc_vf_resource(hr_dev);
2307 if (ret) {
2308 dev_err(hr_dev->dev, "Allocate vf resource fail, ret = %d.\n",
2309 ret);
2310 return ret;
2311 }
2312
2313 ret = hns_roce_v2_set_bt(hr_dev);
2314 if (ret) {
2315 dev_err(hr_dev->dev,
2316 "Configure bt attribute fail, ret = %d.\n", ret);
2317 return ret;
2318 }
2319
2320
2321 ret = hns_roce_config_entry_size(hr_dev);
2322
2323 return ret;
2324}
2325
2326static int hns_roce_config_link_table(struct hns_roce_dev *hr_dev,
2327 enum hns_roce_link_table_type type)
2328{
2329 struct hns_roce_cmq_desc desc[2];
2330 struct hns_roce_cfg_llm_a *req_a =
2331 (struct hns_roce_cfg_llm_a *)desc[0].data;
2332 struct hns_roce_cfg_llm_b *req_b =
2333 (struct hns_roce_cfg_llm_b *)desc[1].data;
2334 struct hns_roce_v2_priv *priv = hr_dev->priv;
2335 struct hns_roce_link_table *link_tbl;
2336 struct hns_roce_link_table_entry *entry;
2337 enum hns_roce_opcode_type opcode;
2338 u32 page_num;
2339 int i;
2340
2341 switch (type) {
2342 case TSQ_LINK_TABLE:
2343 link_tbl = &priv->tsq;
2344 opcode = HNS_ROCE_OPC_CFG_EXT_LLM;
2345 break;
2346 case TPQ_LINK_TABLE:
2347 link_tbl = &priv->tpq;
2348 opcode = HNS_ROCE_OPC_CFG_TMOUT_LLM;
2349 break;
2350 default:
2351 return -EINVAL;
2352 }
2353
2354 page_num = link_tbl->npages;
2355 entry = link_tbl->table.buf;
2356
2357 for (i = 0; i < 2; i++) {
2358 hns_roce_cmq_setup_basic_desc(&desc[i], opcode, false);
2359
2360 if (i == 0)
2361 desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
2362 else
2363 desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
2364 }
2365
2366 req_a->base_addr_l = cpu_to_le32(link_tbl->table.map & 0xffffffff);
2367 req_a->base_addr_h = cpu_to_le32(link_tbl->table.map >> 32);
2368 roce_set_field(req_a->depth_pgsz_init_en, CFG_LLM_QUE_DEPTH_M,
2369 CFG_LLM_QUE_DEPTH_S, link_tbl->npages);
2370 roce_set_field(req_a->depth_pgsz_init_en, CFG_LLM_QUE_PGSZ_M,
2371 CFG_LLM_QUE_PGSZ_S, link_tbl->pg_sz);
2372 roce_set_field(req_a->depth_pgsz_init_en, CFG_LLM_INIT_EN_M,
2373 CFG_LLM_INIT_EN_S, 1);
2374 req_a->head_ba_l = cpu_to_le32(entry[0].blk_ba0);
2375 req_a->head_ba_h_nxtptr = cpu_to_le32(entry[0].blk_ba1_nxt_ptr);
2376 roce_set_field(req_a->head_ptr, CFG_LLM_HEAD_PTR_M, CFG_LLM_HEAD_PTR_S,
2377 0);
2378
2379 req_b->tail_ba_l = cpu_to_le32(entry[page_num - 1].blk_ba0);
2380 roce_set_field(req_b->tail_ba_h, CFG_LLM_TAIL_BA_H_M,
2381 CFG_LLM_TAIL_BA_H_S,
2382 entry[page_num - 1].blk_ba1_nxt_ptr &
2383 HNS_ROCE_LINK_TABLE_BA1_M);
2384 roce_set_field(req_b->tail_ptr, CFG_LLM_TAIL_PTR_M, CFG_LLM_TAIL_PTR_S,
2385 (entry[page_num - 2].blk_ba1_nxt_ptr &
2386 HNS_ROCE_LINK_TABLE_NXT_PTR_M) >>
2387 HNS_ROCE_LINK_TABLE_NXT_PTR_S);
2388
2389 return hns_roce_cmq_send(hr_dev, desc, 2);
2390}
2391
2392static int hns_roce_init_link_table(struct hns_roce_dev *hr_dev,
2393 enum hns_roce_link_table_type type)
2394{
2395 struct hns_roce_v2_priv *priv = hr_dev->priv;
2396 struct hns_roce_link_table *link_tbl;
2397 struct hns_roce_link_table_entry *entry;
2398 struct device *dev = hr_dev->dev;
2399 u32 buf_chk_sz;
2400 dma_addr_t t;
2401 int func_num = 1;
2402 u32 pg_num_a;
2403 u32 pg_num_b;
2404 u32 pg_num;
2405 u32 size;
2406 int i;
2407
2408 switch (type) {
2409 case TSQ_LINK_TABLE:
2410 link_tbl = &priv->tsq;
2411 buf_chk_sz = 1 << (hr_dev->caps.tsq_buf_pg_sz + PAGE_SHIFT);
2412 pg_num_a = hr_dev->caps.num_qps * 8 / buf_chk_sz;
2413 pg_num_b = hr_dev->caps.sl_num * 4 + 2;
2414 break;
2415 case TPQ_LINK_TABLE:
2416 link_tbl = &priv->tpq;
2417 buf_chk_sz = 1 << (hr_dev->caps.tpq_buf_pg_sz + PAGE_SHIFT);
2418 pg_num_a = hr_dev->caps.num_cqs * 4 / buf_chk_sz;
2419 pg_num_b = 2 * 4 * func_num + 2;
2420 break;
2421 default:
2422 return -EINVAL;
2423 }
2424
2425 pg_num = max(pg_num_a, pg_num_b);
2426 size = pg_num * sizeof(struct hns_roce_link_table_entry);
2427
2428 link_tbl->table.buf = dma_alloc_coherent(dev, size,
2429 &link_tbl->table.map,
2430 GFP_KERNEL);
2431 if (!link_tbl->table.buf)
2432 goto out;
2433
2434 link_tbl->pg_list = kcalloc(pg_num, sizeof(*link_tbl->pg_list),
2435 GFP_KERNEL);
2436 if (!link_tbl->pg_list)
2437 goto err_kcalloc_failed;
2438
2439 entry = link_tbl->table.buf;
2440 for (i = 0; i < pg_num; ++i) {
2441 link_tbl->pg_list[i].buf = dma_alloc_coherent(dev, buf_chk_sz,
2442 &t, GFP_KERNEL);
2443 if (!link_tbl->pg_list[i].buf)
2444 goto err_alloc_buf_failed;
2445
2446 link_tbl->pg_list[i].map = t;
2447
2448 entry[i].blk_ba0 = (u32)(t >> 12);
2449 entry[i].blk_ba1_nxt_ptr = (u32)(t >> 44);
2450
2451 if (i < (pg_num - 1))
2452 entry[i].blk_ba1_nxt_ptr |=
2453 (i + 1) << HNS_ROCE_LINK_TABLE_NXT_PTR_S;
2454 }
2455 link_tbl->npages = pg_num;
2456 link_tbl->pg_sz = buf_chk_sz;
2457
2458 return hns_roce_config_link_table(hr_dev, type);
2459
2460err_alloc_buf_failed:
2461 for (i -= 1; i >= 0; i--)
2462 dma_free_coherent(dev, buf_chk_sz,
2463 link_tbl->pg_list[i].buf,
2464 link_tbl->pg_list[i].map);
2465 kfree(link_tbl->pg_list);
2466
2467err_kcalloc_failed:
2468 dma_free_coherent(dev, size, link_tbl->table.buf,
2469 link_tbl->table.map);
2470
2471out:
2472 return -ENOMEM;
2473}
2474
2475static void hns_roce_free_link_table(struct hns_roce_dev *hr_dev,
2476 struct hns_roce_link_table *link_tbl)
2477{
2478 struct device *dev = hr_dev->dev;
2479 int size;
2480 int i;
2481
2482 size = link_tbl->npages * sizeof(struct hns_roce_link_table_entry);
2483
2484 for (i = 0; i < link_tbl->npages; ++i)
2485 if (link_tbl->pg_list[i].buf)
2486 dma_free_coherent(dev, link_tbl->pg_sz,
2487 link_tbl->pg_list[i].buf,
2488 link_tbl->pg_list[i].map);
2489 kfree(link_tbl->pg_list);
2490
2491 dma_free_coherent(dev, size, link_tbl->table.buf,
2492 link_tbl->table.map);
2493}
2494
2495static int get_hem_table(struct hns_roce_dev *hr_dev)
2496{
2497 unsigned int qpc_count;
2498 unsigned int cqc_count;
2499 unsigned int gmv_count;
2500 int ret;
2501 int i;
2502
2503
2504 for (qpc_count = 0; qpc_count < hr_dev->caps.qpc_timer_bt_num;
2505 qpc_count++) {
2506 ret = hns_roce_table_get(hr_dev, &hr_dev->qpc_timer_table,
2507 qpc_count);
2508 if (ret) {
2509 dev_err(hr_dev->dev, "QPC Timer get failed\n");
2510 goto err_qpc_timer_failed;
2511 }
2512 }
2513
2514
2515 for (cqc_count = 0; cqc_count < hr_dev->caps.cqc_timer_bt_num;
2516 cqc_count++) {
2517 ret = hns_roce_table_get(hr_dev, &hr_dev->cqc_timer_table,
2518 cqc_count);
2519 if (ret) {
2520 dev_err(hr_dev->dev, "CQC Timer get failed\n");
2521 goto err_cqc_timer_failed;
2522 }
2523 }
2524
2525
2526 for (gmv_count = 0; gmv_count < hr_dev->caps.gmv_entry_num;
2527 gmv_count++) {
2528 ret = hns_roce_table_get(hr_dev, &hr_dev->gmv_table, gmv_count);
2529 if (ret) {
2530 dev_err(hr_dev->dev,
2531 "failed to get gmv table, ret = %d.\n", ret);
2532 goto err_gmv_failed;
2533 }
2534 }
2535
2536 return 0;
2537
2538err_gmv_failed:
2539 for (i = 0; i < gmv_count; i++)
2540 hns_roce_table_put(hr_dev, &hr_dev->gmv_table, i);
2541
2542err_cqc_timer_failed:
2543 for (i = 0; i < cqc_count; i++)
2544 hns_roce_table_put(hr_dev, &hr_dev->cqc_timer_table, i);
2545
2546err_qpc_timer_failed:
2547 for (i = 0; i < qpc_count; i++)
2548 hns_roce_table_put(hr_dev, &hr_dev->qpc_timer_table, i);
2549
2550 return ret;
2551}
2552
2553static int hns_roce_v2_init(struct hns_roce_dev *hr_dev)
2554{
2555 struct hns_roce_v2_priv *priv = hr_dev->priv;
2556 int ret;
2557
2558
2559 ret = hns_roce_init_link_table(hr_dev, TSQ_LINK_TABLE);
2560 if (ret) {
2561 dev_err(hr_dev->dev, "failed to init TSQ, ret = %d.\n", ret);
2562 return ret;
2563 }
2564
2565 ret = hns_roce_init_link_table(hr_dev, TPQ_LINK_TABLE);
2566 if (ret) {
2567 dev_err(hr_dev->dev, "failed to init TPQ, ret = %d.\n", ret);
2568 goto err_tpq_init_failed;
2569 }
2570
2571 ret = get_hem_table(hr_dev);
2572 if (ret)
2573 goto err_get_hem_table_failed;
2574
2575 return 0;
2576
2577err_get_hem_table_failed:
2578 hns_roce_free_link_table(hr_dev, &priv->tpq);
2579
2580err_tpq_init_failed:
2581 hns_roce_free_link_table(hr_dev, &priv->tsq);
2582
2583 return ret;
2584}
2585
2586static void hns_roce_v2_exit(struct hns_roce_dev *hr_dev)
2587{
2588 struct hns_roce_v2_priv *priv = hr_dev->priv;
2589
2590 hns_roce_function_clear(hr_dev);
2591
2592 hns_roce_free_link_table(hr_dev, &priv->tpq);
2593 hns_roce_free_link_table(hr_dev, &priv->tsq);
2594}
2595
2596static int hns_roce_query_mbox_status(struct hns_roce_dev *hr_dev)
2597{
2598 struct hns_roce_cmq_desc desc;
2599 struct hns_roce_mbox_status *mb_st =
2600 (struct hns_roce_mbox_status *)desc.data;
2601 int status;
2602
2603 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_MB_ST, true);
2604
2605 status = hns_roce_cmq_send(hr_dev, &desc, 1);
2606 if (status)
2607 return status;
2608
2609 return le32_to_cpu(mb_st->mb_status_hw_run);
2610}
2611
2612static int hns_roce_v2_cmd_pending(struct hns_roce_dev *hr_dev)
2613{
2614 u32 status = hns_roce_query_mbox_status(hr_dev);
2615
2616 return status >> HNS_ROCE_HW_RUN_BIT_SHIFT;
2617}
2618
2619static int hns_roce_v2_cmd_complete(struct hns_roce_dev *hr_dev)
2620{
2621 u32 status = hns_roce_query_mbox_status(hr_dev);
2622
2623 return status & HNS_ROCE_HW_MB_STATUS_MASK;
2624}
2625
2626static int hns_roce_mbox_post(struct hns_roce_dev *hr_dev, u64 in_param,
2627 u64 out_param, u32 in_modifier, u8 op_modifier,
2628 u16 op, u16 token, int event)
2629{
2630 struct hns_roce_cmq_desc desc;
2631 struct hns_roce_post_mbox *mb = (struct hns_roce_post_mbox *)desc.data;
2632
2633 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_POST_MB, false);
2634
2635 mb->in_param_l = cpu_to_le32(in_param);
2636 mb->in_param_h = cpu_to_le32(in_param >> 32);
2637 mb->out_param_l = cpu_to_le32(out_param);
2638 mb->out_param_h = cpu_to_le32(out_param >> 32);
2639 mb->cmd_tag = cpu_to_le32(in_modifier << 8 | op);
2640 mb->token_event_en = cpu_to_le32(event << 16 | token);
2641
2642 return hns_roce_cmq_send(hr_dev, &desc, 1);
2643}
2644
2645static int hns_roce_v2_post_mbox(struct hns_roce_dev *hr_dev, u64 in_param,
2646 u64 out_param, u32 in_modifier, u8 op_modifier,
2647 u16 op, u16 token, int event)
2648{
2649 struct device *dev = hr_dev->dev;
2650 unsigned long end;
2651 int ret;
2652
2653 end = msecs_to_jiffies(HNS_ROCE_V2_GO_BIT_TIMEOUT_MSECS) + jiffies;
2654 while (hns_roce_v2_cmd_pending(hr_dev)) {
2655 if (time_after(jiffies, end)) {
2656 dev_dbg(dev, "jiffies=%d end=%d\n", (int)jiffies,
2657 (int)end);
2658 return -EAGAIN;
2659 }
2660 cond_resched();
2661 }
2662
2663 ret = hns_roce_mbox_post(hr_dev, in_param, out_param, in_modifier,
2664 op_modifier, op, token, event);
2665 if (ret)
2666 dev_err(dev, "Post mailbox fail(%d)\n", ret);
2667
2668 return ret;
2669}
2670
2671static int hns_roce_v2_chk_mbox(struct hns_roce_dev *hr_dev,
2672 unsigned int timeout)
2673{
2674 struct device *dev = hr_dev->dev;
2675 unsigned long end;
2676 u32 status;
2677
2678 end = msecs_to_jiffies(timeout) + jiffies;
2679 while (hns_roce_v2_cmd_pending(hr_dev) && time_before(jiffies, end))
2680 cond_resched();
2681
2682 if (hns_roce_v2_cmd_pending(hr_dev)) {
2683 dev_err(dev, "[cmd_poll]hw run cmd TIMEDOUT!\n");
2684 return -ETIMEDOUT;
2685 }
2686
2687 status = hns_roce_v2_cmd_complete(hr_dev);
2688 if (status != 0x1) {
2689 if (status == CMD_RST_PRC_EBUSY)
2690 return status;
2691
2692 dev_err(dev, "mailbox status 0x%x!\n", status);
2693 return -EBUSY;
2694 }
2695
2696 return 0;
2697}
2698
2699static void copy_gid(void *dest, const union ib_gid *gid)
2700{
2701#define GID_SIZE 4
2702 const union ib_gid *src = gid;
2703 __le32 (*p)[GID_SIZE] = dest;
2704 int i;
2705
2706 if (!gid)
2707 src = &zgid;
2708
2709 for (i = 0; i < GID_SIZE; i++)
2710 (*p)[i] = cpu_to_le32(*(u32 *)&src->raw[i * sizeof(u32)]);
2711}
2712
2713static int config_sgid_table(struct hns_roce_dev *hr_dev,
2714 int gid_index, const union ib_gid *gid,
2715 enum hns_roce_sgid_type sgid_type)
2716{
2717 struct hns_roce_cmq_desc desc;
2718 struct hns_roce_cfg_sgid_tb *sgid_tb =
2719 (struct hns_roce_cfg_sgid_tb *)desc.data;
2720
2721 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_SGID_TB, false);
2722
2723 roce_set_field(sgid_tb->table_idx_rsv, CFG_SGID_TB_TABLE_IDX_M,
2724 CFG_SGID_TB_TABLE_IDX_S, gid_index);
2725 roce_set_field(sgid_tb->vf_sgid_type_rsv, CFG_SGID_TB_VF_SGID_TYPE_M,
2726 CFG_SGID_TB_VF_SGID_TYPE_S, sgid_type);
2727
2728 copy_gid(&sgid_tb->vf_sgid_l, gid);
2729
2730 return hns_roce_cmq_send(hr_dev, &desc, 1);
2731}
2732
2733static int config_gmv_table(struct hns_roce_dev *hr_dev,
2734 int gid_index, const union ib_gid *gid,
2735 enum hns_roce_sgid_type sgid_type,
2736 const struct ib_gid_attr *attr)
2737{
2738 struct hns_roce_cmq_desc desc[2];
2739 struct hns_roce_cfg_gmv_tb_a *tb_a =
2740 (struct hns_roce_cfg_gmv_tb_a *)desc[0].data;
2741 struct hns_roce_cfg_gmv_tb_b *tb_b =
2742 (struct hns_roce_cfg_gmv_tb_b *)desc[1].data;
2743
2744 u16 vlan_id = VLAN_CFI_MASK;
2745 u8 mac[ETH_ALEN] = {};
2746 int ret;
2747
2748 if (gid) {
2749 ret = rdma_read_gid_l2_fields(attr, &vlan_id, mac);
2750 if (ret)
2751 return ret;
2752 }
2753
2754 hns_roce_cmq_setup_basic_desc(&desc[0], HNS_ROCE_OPC_CFG_GMV_TBL, false);
2755 desc[0].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
2756
2757 hns_roce_cmq_setup_basic_desc(&desc[1], HNS_ROCE_OPC_CFG_GMV_TBL, false);
2758
2759 copy_gid(&tb_a->vf_sgid_l, gid);
2760
2761 roce_set_field(tb_a->vf_sgid_type_vlan, CFG_GMV_TB_VF_SGID_TYPE_M,
2762 CFG_GMV_TB_VF_SGID_TYPE_S, sgid_type);
2763 roce_set_bit(tb_a->vf_sgid_type_vlan, CFG_GMV_TB_VF_VLAN_EN_S,
2764 vlan_id < VLAN_CFI_MASK);
2765 roce_set_field(tb_a->vf_sgid_type_vlan, CFG_GMV_TB_VF_VLAN_ID_M,
2766 CFG_GMV_TB_VF_VLAN_ID_S, vlan_id);
2767
2768 tb_b->vf_smac_l = cpu_to_le32(*(u32 *)mac);
2769 roce_set_field(tb_b->vf_smac_h, CFG_GMV_TB_SMAC_H_M,
2770 CFG_GMV_TB_SMAC_H_S, *(u16 *)&mac[4]);
2771
2772 roce_set_field(tb_b->table_idx_rsv, CFG_GMV_TB_SGID_IDX_M,
2773 CFG_GMV_TB_SGID_IDX_S, gid_index);
2774
2775 return hns_roce_cmq_send(hr_dev, desc, 2);
2776}
2777
2778static int hns_roce_v2_set_gid(struct hns_roce_dev *hr_dev, u8 port,
2779 int gid_index, const union ib_gid *gid,
2780 const struct ib_gid_attr *attr)
2781{
2782 enum hns_roce_sgid_type sgid_type = GID_TYPE_FLAG_ROCE_V1;
2783 int ret;
2784
2785 if (gid) {
2786 if (attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) {
2787 if (ipv6_addr_v4mapped((void *)gid))
2788 sgid_type = GID_TYPE_FLAG_ROCE_V2_IPV4;
2789 else
2790 sgid_type = GID_TYPE_FLAG_ROCE_V2_IPV6;
2791 } else if (attr->gid_type == IB_GID_TYPE_ROCE) {
2792 sgid_type = GID_TYPE_FLAG_ROCE_V1;
2793 }
2794 }
2795
2796 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
2797 ret = config_gmv_table(hr_dev, gid_index, gid, sgid_type, attr);
2798 else
2799 ret = config_sgid_table(hr_dev, gid_index, gid, sgid_type);
2800
2801 if (ret)
2802 ibdev_err(&hr_dev->ib_dev, "failed to set gid, ret = %d!\n",
2803 ret);
2804
2805 return ret;
2806}
2807
2808static int hns_roce_v2_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port,
2809 u8 *addr)
2810{
2811 struct hns_roce_cmq_desc desc;
2812 struct hns_roce_cfg_smac_tb *smac_tb =
2813 (struct hns_roce_cfg_smac_tb *)desc.data;
2814 u16 reg_smac_h;
2815 u32 reg_smac_l;
2816
2817 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_SMAC_TB, false);
2818
2819 reg_smac_l = *(u32 *)(&addr[0]);
2820 reg_smac_h = *(u16 *)(&addr[4]);
2821
2822 roce_set_field(smac_tb->tb_idx_rsv, CFG_SMAC_TB_IDX_M,
2823 CFG_SMAC_TB_IDX_S, phy_port);
2824 roce_set_field(smac_tb->vf_smac_h_rsv, CFG_SMAC_TB_VF_SMAC_H_M,
2825 CFG_SMAC_TB_VF_SMAC_H_S, reg_smac_h);
2826 smac_tb->vf_smac_l = cpu_to_le32(reg_smac_l);
2827
2828 return hns_roce_cmq_send(hr_dev, &desc, 1);
2829}
2830
2831static int set_mtpt_pbl(struct hns_roce_dev *hr_dev,
2832 struct hns_roce_v2_mpt_entry *mpt_entry,
2833 struct hns_roce_mr *mr)
2834{
2835 u64 pages[HNS_ROCE_V2_MAX_INNER_MTPT_NUM] = { 0 };
2836 struct ib_device *ibdev = &hr_dev->ib_dev;
2837 dma_addr_t pbl_ba;
2838 int i, count;
2839
2840 count = hns_roce_mtr_find(hr_dev, &mr->pbl_mtr, 0, pages,
2841 ARRAY_SIZE(pages), &pbl_ba);
2842 if (count < 1) {
2843 ibdev_err(ibdev, "failed to find PBL mtr, count = %d.\n",
2844 count);
2845 return -ENOBUFS;
2846 }
2847
2848
2849 for (i = 0; i < count; i++)
2850 pages[i] >>= 6;
2851
2852 mpt_entry->pbl_size = cpu_to_le32(mr->npages);
2853 mpt_entry->pbl_ba_l = cpu_to_le32(pbl_ba >> 3);
2854 roce_set_field(mpt_entry->byte_48_mode_ba,
2855 V2_MPT_BYTE_48_PBL_BA_H_M, V2_MPT_BYTE_48_PBL_BA_H_S,
2856 upper_32_bits(pbl_ba >> 3));
2857
2858 mpt_entry->pa0_l = cpu_to_le32(lower_32_bits(pages[0]));
2859 roce_set_field(mpt_entry->byte_56_pa0_h, V2_MPT_BYTE_56_PA0_H_M,
2860 V2_MPT_BYTE_56_PA0_H_S, upper_32_bits(pages[0]));
2861
2862 mpt_entry->pa1_l = cpu_to_le32(lower_32_bits(pages[1]));
2863 roce_set_field(mpt_entry->byte_64_buf_pa1, V2_MPT_BYTE_64_PA1_H_M,
2864 V2_MPT_BYTE_64_PA1_H_S, upper_32_bits(pages[1]));
2865 roce_set_field(mpt_entry->byte_64_buf_pa1,
2866 V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M,
2867 V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S,
2868 to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.buf_pg_shift));
2869
2870 return 0;
2871}
2872
2873static int hns_roce_v2_write_mtpt(struct hns_roce_dev *hr_dev,
2874 void *mb_buf, struct hns_roce_mr *mr,
2875 unsigned long mtpt_idx)
2876{
2877 struct hns_roce_v2_mpt_entry *mpt_entry;
2878 int ret;
2879
2880 mpt_entry = mb_buf;
2881 memset(mpt_entry, 0, sizeof(*mpt_entry));
2882
2883 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_MPT_ST_M,
2884 V2_MPT_BYTE_4_MPT_ST_S, V2_MPT_ST_VALID);
2885 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PBL_HOP_NUM_M,
2886 V2_MPT_BYTE_4_PBL_HOP_NUM_S, mr->pbl_hop_num ==
2887 HNS_ROCE_HOP_NUM_0 ? 0 : mr->pbl_hop_num);
2888 roce_set_field(mpt_entry->byte_4_pd_hop_st,
2889 V2_MPT_BYTE_4_PBL_BA_PG_SZ_M,
2890 V2_MPT_BYTE_4_PBL_BA_PG_SZ_S,
2891 to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.ba_pg_shift));
2892 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
2893 V2_MPT_BYTE_4_PD_S, mr->pd);
2894
2895 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RA_EN_S, 0);
2896 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 0);
2897 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_L_INV_EN_S, 1);
2898 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_BIND_EN_S,
2899 (mr->access & IB_ACCESS_MW_BIND ? 1 : 0));
2900 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_ATOMIC_EN_S,
2901 mr->access & IB_ACCESS_REMOTE_ATOMIC ? 1 : 0);
2902 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RR_EN_S,
2903 (mr->access & IB_ACCESS_REMOTE_READ ? 1 : 0));
2904 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RW_EN_S,
2905 (mr->access & IB_ACCESS_REMOTE_WRITE ? 1 : 0));
2906 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_LW_EN_S,
2907 (mr->access & IB_ACCESS_LOCAL_WRITE ? 1 : 0));
2908
2909 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_PA_S,
2910 mr->type == MR_TYPE_MR ? 0 : 1);
2911 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_INNER_PA_VLD_S,
2912 1);
2913
2914 mpt_entry->len_l = cpu_to_le32(lower_32_bits(mr->size));
2915 mpt_entry->len_h = cpu_to_le32(upper_32_bits(mr->size));
2916 mpt_entry->lkey = cpu_to_le32(mr->key);
2917 mpt_entry->va_l = cpu_to_le32(lower_32_bits(mr->iova));
2918 mpt_entry->va_h = cpu_to_le32(upper_32_bits(mr->iova));
2919
2920 if (mr->type == MR_TYPE_DMA)
2921 return 0;
2922
2923 ret = set_mtpt_pbl(hr_dev, mpt_entry, mr);
2924
2925 return ret;
2926}
2927
2928static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev,
2929 struct hns_roce_mr *mr, int flags,
2930 u32 pdn, int mr_access_flags, u64 iova,
2931 u64 size, void *mb_buf)
2932{
2933 struct hns_roce_v2_mpt_entry *mpt_entry = mb_buf;
2934 int ret = 0;
2935
2936 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_MPT_ST_M,
2937 V2_MPT_BYTE_4_MPT_ST_S, V2_MPT_ST_VALID);
2938
2939 if (flags & IB_MR_REREG_PD) {
2940 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
2941 V2_MPT_BYTE_4_PD_S, pdn);
2942 mr->pd = pdn;
2943 }
2944
2945 if (flags & IB_MR_REREG_ACCESS) {
2946 roce_set_bit(mpt_entry->byte_8_mw_cnt_en,
2947 V2_MPT_BYTE_8_BIND_EN_S,
2948 (mr_access_flags & IB_ACCESS_MW_BIND ? 1 : 0));
2949 roce_set_bit(mpt_entry->byte_8_mw_cnt_en,
2950 V2_MPT_BYTE_8_ATOMIC_EN_S,
2951 mr_access_flags & IB_ACCESS_REMOTE_ATOMIC ? 1 : 0);
2952 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RR_EN_S,
2953 mr_access_flags & IB_ACCESS_REMOTE_READ ? 1 : 0);
2954 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RW_EN_S,
2955 mr_access_flags & IB_ACCESS_REMOTE_WRITE ? 1 : 0);
2956 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_LW_EN_S,
2957 mr_access_flags & IB_ACCESS_LOCAL_WRITE ? 1 : 0);
2958 }
2959
2960 if (flags & IB_MR_REREG_TRANS) {
2961 mpt_entry->va_l = cpu_to_le32(lower_32_bits(iova));
2962 mpt_entry->va_h = cpu_to_le32(upper_32_bits(iova));
2963 mpt_entry->len_l = cpu_to_le32(lower_32_bits(size));
2964 mpt_entry->len_h = cpu_to_le32(upper_32_bits(size));
2965
2966 mr->iova = iova;
2967 mr->size = size;
2968
2969 ret = set_mtpt_pbl(hr_dev, mpt_entry, mr);
2970 }
2971
2972 return ret;
2973}
2974
2975static int hns_roce_v2_frmr_write_mtpt(struct hns_roce_dev *hr_dev,
2976 void *mb_buf, struct hns_roce_mr *mr)
2977{
2978 struct ib_device *ibdev = &hr_dev->ib_dev;
2979 struct hns_roce_v2_mpt_entry *mpt_entry;
2980 dma_addr_t pbl_ba = 0;
2981
2982 mpt_entry = mb_buf;
2983 memset(mpt_entry, 0, sizeof(*mpt_entry));
2984
2985 if (hns_roce_mtr_find(hr_dev, &mr->pbl_mtr, 0, NULL, 0, &pbl_ba) < 0) {
2986 ibdev_err(ibdev, "failed to find frmr mtr.\n");
2987 return -ENOBUFS;
2988 }
2989
2990 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_MPT_ST_M,
2991 V2_MPT_BYTE_4_MPT_ST_S, V2_MPT_ST_FREE);
2992 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PBL_HOP_NUM_M,
2993 V2_MPT_BYTE_4_PBL_HOP_NUM_S, 1);
2994 roce_set_field(mpt_entry->byte_4_pd_hop_st,
2995 V2_MPT_BYTE_4_PBL_BA_PG_SZ_M,
2996 V2_MPT_BYTE_4_PBL_BA_PG_SZ_S,
2997 to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.ba_pg_shift));
2998 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
2999 V2_MPT_BYTE_4_PD_S, mr->pd);
3000
3001 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RA_EN_S, 1);
3002 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 1);
3003 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_L_INV_EN_S, 1);
3004
3005 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_FRE_S, 1);
3006 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_PA_S, 0);
3007 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_MR_MW_S, 0);
3008 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_BPD_S, 1);
3009
3010 mpt_entry->pbl_size = cpu_to_le32(mr->npages);
3011
3012 mpt_entry->pbl_ba_l = cpu_to_le32(lower_32_bits(pbl_ba >> 3));
3013 roce_set_field(mpt_entry->byte_48_mode_ba, V2_MPT_BYTE_48_PBL_BA_H_M,
3014 V2_MPT_BYTE_48_PBL_BA_H_S,
3015 upper_32_bits(pbl_ba >> 3));
3016
3017 roce_set_field(mpt_entry->byte_64_buf_pa1,
3018 V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M,
3019 V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S,
3020 to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.buf_pg_shift));
3021
3022 return 0;
3023}
3024
3025static int hns_roce_v2_mw_write_mtpt(void *mb_buf, struct hns_roce_mw *mw)
3026{
3027 struct hns_roce_v2_mpt_entry *mpt_entry;
3028
3029 mpt_entry = mb_buf;
3030 memset(mpt_entry, 0, sizeof(*mpt_entry));
3031
3032 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_MPT_ST_M,
3033 V2_MPT_BYTE_4_MPT_ST_S, V2_MPT_ST_FREE);
3034 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
3035 V2_MPT_BYTE_4_PD_S, mw->pdn);
3036 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PBL_HOP_NUM_M,
3037 V2_MPT_BYTE_4_PBL_HOP_NUM_S,
3038 mw->pbl_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 :
3039 mw->pbl_hop_num);
3040 roce_set_field(mpt_entry->byte_4_pd_hop_st,
3041 V2_MPT_BYTE_4_PBL_BA_PG_SZ_M,
3042 V2_MPT_BYTE_4_PBL_BA_PG_SZ_S,
3043 mw->pbl_ba_pg_sz + PG_SHIFT_OFFSET);
3044
3045 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 1);
3046 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_L_INV_EN_S, 1);
3047 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_LW_EN_S, 1);
3048
3049 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_PA_S, 0);
3050 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_MR_MW_S, 1);
3051 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_BPD_S, 1);
3052 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_BQP_S,
3053 mw->ibmw.type == IB_MW_TYPE_1 ? 0 : 1);
3054
3055 roce_set_field(mpt_entry->byte_64_buf_pa1,
3056 V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M,
3057 V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S,
3058 mw->pbl_buf_pg_sz + PG_SHIFT_OFFSET);
3059
3060 mpt_entry->lkey = cpu_to_le32(mw->rkey);
3061
3062 return 0;
3063}
3064
3065static void *get_cqe_v2(struct hns_roce_cq *hr_cq, int n)
3066{
3067 return hns_roce_buf_offset(hr_cq->mtr.kmem, n * hr_cq->cqe_size);
3068}
3069
3070static void *get_sw_cqe_v2(struct hns_roce_cq *hr_cq, unsigned int n)
3071{
3072 struct hns_roce_v2_cqe *cqe = get_cqe_v2(hr_cq, n & hr_cq->ib_cq.cqe);
3073
3074
3075 return (roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_OWNER_S) ^
3076 !!(n & hr_cq->cq_depth)) ? cqe : NULL;
3077}
3078
3079static inline void hns_roce_v2_cq_set_ci(struct hns_roce_cq *hr_cq, u32 ci)
3080{
3081 *hr_cq->set_ci_db = ci & V2_CQ_DB_PARAMETER_CONS_IDX_M;
3082}
3083
3084static void __hns_roce_v2_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
3085 struct hns_roce_srq *srq)
3086{
3087 struct hns_roce_v2_cqe *cqe, *dest;
3088 u32 prod_index;
3089 int nfreed = 0;
3090 int wqe_index;
3091 u8 owner_bit;
3092
3093 for (prod_index = hr_cq->cons_index; get_sw_cqe_v2(hr_cq, prod_index);
3094 ++prod_index) {
3095 if (prod_index > hr_cq->cons_index + hr_cq->ib_cq.cqe)
3096 break;
3097 }
3098
3099
3100
3101
3102
3103 while ((int) --prod_index - (int) hr_cq->cons_index >= 0) {
3104 cqe = get_cqe_v2(hr_cq, prod_index & hr_cq->ib_cq.cqe);
3105 if ((roce_get_field(cqe->byte_16, V2_CQE_BYTE_16_LCL_QPN_M,
3106 V2_CQE_BYTE_16_LCL_QPN_S) &
3107 HNS_ROCE_V2_CQE_QPN_MASK) == qpn) {
3108 if (srq &&
3109 roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_S_R_S)) {
3110 wqe_index = roce_get_field(cqe->byte_4,
3111 V2_CQE_BYTE_4_WQE_INDX_M,
3112 V2_CQE_BYTE_4_WQE_INDX_S);
3113 hns_roce_free_srq_wqe(srq, wqe_index);
3114 }
3115 ++nfreed;
3116 } else if (nfreed) {
3117 dest = get_cqe_v2(hr_cq, (prod_index + nfreed) &
3118 hr_cq->ib_cq.cqe);
3119 owner_bit = roce_get_bit(dest->byte_4,
3120 V2_CQE_BYTE_4_OWNER_S);
3121 memcpy(dest, cqe, sizeof(*cqe));
3122 roce_set_bit(dest->byte_4, V2_CQE_BYTE_4_OWNER_S,
3123 owner_bit);
3124 }
3125 }
3126
3127 if (nfreed) {
3128 hr_cq->cons_index += nfreed;
3129
3130
3131
3132
3133 wmb();
3134 hns_roce_v2_cq_set_ci(hr_cq, hr_cq->cons_index);
3135 }
3136}
3137
3138static void hns_roce_v2_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
3139 struct hns_roce_srq *srq)
3140{
3141 spin_lock_irq(&hr_cq->lock);
3142 __hns_roce_v2_cq_clean(hr_cq, qpn, srq);
3143 spin_unlock_irq(&hr_cq->lock);
3144}
3145
3146static void hns_roce_v2_write_cqc(struct hns_roce_dev *hr_dev,
3147 struct hns_roce_cq *hr_cq, void *mb_buf,
3148 u64 *mtts, dma_addr_t dma_handle)
3149{
3150 struct hns_roce_v2_cq_context *cq_context;
3151
3152 cq_context = mb_buf;
3153 memset(cq_context, 0, sizeof(*cq_context));
3154
3155 roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_CQ_ST_M,
3156 V2_CQC_BYTE_4_CQ_ST_S, V2_CQ_STATE_VALID);
3157 roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_ARM_ST_M,
3158 V2_CQC_BYTE_4_ARM_ST_S, REG_NXT_CEQE);
3159 roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_SHIFT_M,
3160 V2_CQC_BYTE_4_SHIFT_S, ilog2(hr_cq->cq_depth));
3161 roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_CEQN_M,
3162 V2_CQC_BYTE_4_CEQN_S, hr_cq->vector);
3163
3164 roce_set_field(cq_context->byte_8_cqn, V2_CQC_BYTE_8_CQN_M,
3165 V2_CQC_BYTE_8_CQN_S, hr_cq->cqn);
3166
3167 roce_set_field(cq_context->byte_8_cqn, V2_CQC_BYTE_8_CQE_SIZE_M,
3168 V2_CQC_BYTE_8_CQE_SIZE_S, hr_cq->cqe_size ==
3169 HNS_ROCE_V3_CQE_SIZE ? 1 : 0);
3170
3171 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_STASH)
3172 hr_reg_enable(cq_context, CQC_STASH);
3173
3174 cq_context->cqe_cur_blk_addr = cpu_to_le32(to_hr_hw_page_addr(mtts[0]));
3175
3176 roce_set_field(cq_context->byte_16_hop_addr,
3177 V2_CQC_BYTE_16_CQE_CUR_BLK_ADDR_M,
3178 V2_CQC_BYTE_16_CQE_CUR_BLK_ADDR_S,
3179 upper_32_bits(to_hr_hw_page_addr(mtts[0])));
3180 roce_set_field(cq_context->byte_16_hop_addr,
3181 V2_CQC_BYTE_16_CQE_HOP_NUM_M,
3182 V2_CQC_BYTE_16_CQE_HOP_NUM_S, hr_dev->caps.cqe_hop_num ==
3183 HNS_ROCE_HOP_NUM_0 ? 0 : hr_dev->caps.cqe_hop_num);
3184
3185 cq_context->cqe_nxt_blk_addr = cpu_to_le32(to_hr_hw_page_addr(mtts[1]));
3186 roce_set_field(cq_context->byte_24_pgsz_addr,
3187 V2_CQC_BYTE_24_CQE_NXT_BLK_ADDR_M,
3188 V2_CQC_BYTE_24_CQE_NXT_BLK_ADDR_S,
3189 upper_32_bits(to_hr_hw_page_addr(mtts[1])));
3190 roce_set_field(cq_context->byte_24_pgsz_addr,
3191 V2_CQC_BYTE_24_CQE_BA_PG_SZ_M,
3192 V2_CQC_BYTE_24_CQE_BA_PG_SZ_S,
3193 to_hr_hw_page_shift(hr_cq->mtr.hem_cfg.ba_pg_shift));
3194 roce_set_field(cq_context->byte_24_pgsz_addr,
3195 V2_CQC_BYTE_24_CQE_BUF_PG_SZ_M,
3196 V2_CQC_BYTE_24_CQE_BUF_PG_SZ_S,
3197 to_hr_hw_page_shift(hr_cq->mtr.hem_cfg.buf_pg_shift));
3198
3199 cq_context->cqe_ba = cpu_to_le32(dma_handle >> 3);
3200
3201 roce_set_field(cq_context->byte_40_cqe_ba, V2_CQC_BYTE_40_CQE_BA_M,
3202 V2_CQC_BYTE_40_CQE_BA_S, (dma_handle >> (32 + 3)));
3203
3204 roce_set_bit(cq_context->byte_44_db_record,
3205 V2_CQC_BYTE_44_DB_RECORD_EN_S,
3206 (hr_cq->flags & HNS_ROCE_CQ_FLAG_RECORD_DB) ? 1 : 0);
3207
3208 roce_set_field(cq_context->byte_44_db_record,
3209 V2_CQC_BYTE_44_DB_RECORD_ADDR_M,
3210 V2_CQC_BYTE_44_DB_RECORD_ADDR_S,
3211 ((u32)hr_cq->db.dma) >> 1);
3212 cq_context->db_record_addr = cpu_to_le32(hr_cq->db.dma >> 32);
3213
3214 roce_set_field(cq_context->byte_56_cqe_period_maxcnt,
3215 V2_CQC_BYTE_56_CQ_MAX_CNT_M,
3216 V2_CQC_BYTE_56_CQ_MAX_CNT_S,
3217 HNS_ROCE_V2_CQ_DEFAULT_BURST_NUM);
3218 roce_set_field(cq_context->byte_56_cqe_period_maxcnt,
3219 V2_CQC_BYTE_56_CQ_PERIOD_M,
3220 V2_CQC_BYTE_56_CQ_PERIOD_S,
3221 HNS_ROCE_V2_CQ_DEFAULT_INTERVAL);
3222}
3223
3224static int hns_roce_v2_req_notify_cq(struct ib_cq *ibcq,
3225 enum ib_cq_notify_flags flags)
3226{
3227 struct hns_roce_dev *hr_dev = to_hr_dev(ibcq->device);
3228 struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
3229 u32 notification_flag;
3230 __le32 doorbell[2];
3231
3232 doorbell[0] = 0;
3233 doorbell[1] = 0;
3234
3235 notification_flag = (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
3236 V2_CQ_DB_REQ_NOT : V2_CQ_DB_REQ_NOT_SOL;
3237
3238
3239
3240
3241 roce_set_field(doorbell[0], V2_CQ_DB_BYTE_4_TAG_M, V2_DB_BYTE_4_TAG_S,
3242 hr_cq->cqn);
3243 roce_set_field(doorbell[0], V2_CQ_DB_BYTE_4_CMD_M, V2_DB_BYTE_4_CMD_S,
3244 HNS_ROCE_V2_CQ_DB_NTR);
3245 roce_set_field(doorbell[1], V2_CQ_DB_PARAMETER_CONS_IDX_M,
3246 V2_CQ_DB_PARAMETER_CONS_IDX_S, hr_cq->cons_index);
3247 roce_set_field(doorbell[1], V2_CQ_DB_PARAMETER_CMD_SN_M,
3248 V2_CQ_DB_PARAMETER_CMD_SN_S, hr_cq->arm_sn & 0x3);
3249 roce_set_bit(doorbell[1], V2_CQ_DB_PARAMETER_NOTIFY_S,
3250 notification_flag);
3251
3252 hns_roce_write64(hr_dev, doorbell, hr_cq->cq_db_l);
3253
3254 return 0;
3255}
3256
3257static int hns_roce_handle_recv_inl_wqe(struct hns_roce_v2_cqe *cqe,
3258 struct hns_roce_qp **cur_qp,
3259 struct ib_wc *wc)
3260{
3261 struct hns_roce_rinl_sge *sge_list;
3262 u32 wr_num, wr_cnt, sge_num;
3263 u32 sge_cnt, data_len, size;
3264 void *wqe_buf;
3265
3266 wr_num = roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_WQE_INDX_M,
3267 V2_CQE_BYTE_4_WQE_INDX_S) & 0xffff;
3268 wr_cnt = wr_num & ((*cur_qp)->rq.wqe_cnt - 1);
3269
3270 sge_list = (*cur_qp)->rq_inl_buf.wqe_list[wr_cnt].sg_list;
3271 sge_num = (*cur_qp)->rq_inl_buf.wqe_list[wr_cnt].sge_cnt;
3272 wqe_buf = hns_roce_get_recv_wqe(*cur_qp, wr_cnt);
3273 data_len = wc->byte_len;
3274
3275 for (sge_cnt = 0; (sge_cnt < sge_num) && (data_len); sge_cnt++) {
3276 size = min(sge_list[sge_cnt].len, data_len);
3277 memcpy((void *)sge_list[sge_cnt].addr, wqe_buf, size);
3278
3279 data_len -= size;
3280 wqe_buf += size;
3281 }
3282
3283 if (unlikely(data_len)) {
3284 wc->status = IB_WC_LOC_LEN_ERR;
3285 return -EAGAIN;
3286 }
3287
3288 return 0;
3289}
3290
3291static int sw_comp(struct hns_roce_qp *hr_qp, struct hns_roce_wq *wq,
3292 int num_entries, struct ib_wc *wc)
3293{
3294 unsigned int left;
3295 int npolled = 0;
3296
3297 left = wq->head - wq->tail;
3298 if (left == 0)
3299 return 0;
3300
3301 left = min_t(unsigned int, (unsigned int)num_entries, left);
3302 while (npolled < left) {
3303 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
3304 wc->status = IB_WC_WR_FLUSH_ERR;
3305 wc->vendor_err = 0;
3306 wc->qp = &hr_qp->ibqp;
3307
3308 wq->tail++;
3309 wc++;
3310 npolled++;
3311 }
3312
3313 return npolled;
3314}
3315
3316static int hns_roce_v2_sw_poll_cq(struct hns_roce_cq *hr_cq, int num_entries,
3317 struct ib_wc *wc)
3318{
3319 struct hns_roce_qp *hr_qp;
3320 int npolled = 0;
3321
3322 list_for_each_entry(hr_qp, &hr_cq->sq_list, sq_node) {
3323 npolled += sw_comp(hr_qp, &hr_qp->sq,
3324 num_entries - npolled, wc + npolled);
3325 if (npolled >= num_entries)
3326 goto out;
3327 }
3328
3329 list_for_each_entry(hr_qp, &hr_cq->rq_list, rq_node) {
3330 npolled += sw_comp(hr_qp, &hr_qp->rq,
3331 num_entries - npolled, wc + npolled);
3332 if (npolled >= num_entries)
3333 goto out;
3334 }
3335
3336out:
3337 return npolled;
3338}
3339
3340static void get_cqe_status(struct hns_roce_dev *hr_dev, struct hns_roce_qp *qp,
3341 struct hns_roce_cq *cq, struct hns_roce_v2_cqe *cqe,
3342 struct ib_wc *wc)
3343{
3344 static const struct {
3345 u32 cqe_status;
3346 enum ib_wc_status wc_status;
3347 } map[] = {
3348 { HNS_ROCE_CQE_V2_SUCCESS, IB_WC_SUCCESS },
3349 { HNS_ROCE_CQE_V2_LOCAL_LENGTH_ERR, IB_WC_LOC_LEN_ERR },
3350 { HNS_ROCE_CQE_V2_LOCAL_QP_OP_ERR, IB_WC_LOC_QP_OP_ERR },
3351 { HNS_ROCE_CQE_V2_LOCAL_PROT_ERR, IB_WC_LOC_PROT_ERR },
3352 { HNS_ROCE_CQE_V2_WR_FLUSH_ERR, IB_WC_WR_FLUSH_ERR },
3353 { HNS_ROCE_CQE_V2_MW_BIND_ERR, IB_WC_MW_BIND_ERR },
3354 { HNS_ROCE_CQE_V2_BAD_RESP_ERR, IB_WC_BAD_RESP_ERR },
3355 { HNS_ROCE_CQE_V2_LOCAL_ACCESS_ERR, IB_WC_LOC_ACCESS_ERR },
3356 { HNS_ROCE_CQE_V2_REMOTE_INVAL_REQ_ERR, IB_WC_REM_INV_REQ_ERR },
3357 { HNS_ROCE_CQE_V2_REMOTE_ACCESS_ERR, IB_WC_REM_ACCESS_ERR },
3358 { HNS_ROCE_CQE_V2_REMOTE_OP_ERR, IB_WC_REM_OP_ERR },
3359 { HNS_ROCE_CQE_V2_TRANSPORT_RETRY_EXC_ERR,
3360 IB_WC_RETRY_EXC_ERR },
3361 { HNS_ROCE_CQE_V2_RNR_RETRY_EXC_ERR, IB_WC_RNR_RETRY_EXC_ERR },
3362 { HNS_ROCE_CQE_V2_REMOTE_ABORT_ERR, IB_WC_REM_ABORT_ERR },
3363 { HNS_ROCE_CQE_V2_GENERAL_ERR, IB_WC_GENERAL_ERR}
3364 };
3365
3366 u32 cqe_status = roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_STATUS_M,
3367 V2_CQE_BYTE_4_STATUS_S);
3368 int i;
3369
3370 wc->status = IB_WC_GENERAL_ERR;
3371 for (i = 0; i < ARRAY_SIZE(map); i++)
3372 if (cqe_status == map[i].cqe_status) {
3373 wc->status = map[i].wc_status;
3374 break;
3375 }
3376
3377 if (likely(wc->status == IB_WC_SUCCESS ||
3378 wc->status == IB_WC_WR_FLUSH_ERR))
3379 return;
3380
3381 ibdev_err(&hr_dev->ib_dev, "error cqe status 0x%x:\n", cqe_status);
3382 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_NONE, 16, 4, cqe,
3383 cq->cqe_size, false);
3384
3385
3386
3387
3388
3389
3390 if (cqe_status == HNS_ROCE_CQE_V2_GENERAL_ERR)
3391 return;
3392
3393
3394
3395
3396
3397
3398
3399
3400
3401
3402 if (!test_and_set_bit(HNS_ROCE_FLUSH_FLAG, &qp->flush_flag))
3403 init_flush_work(hr_dev, qp);
3404}
3405
3406static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
3407 struct hns_roce_qp **cur_qp, struct ib_wc *wc)
3408{
3409 struct hns_roce_dev *hr_dev = to_hr_dev(hr_cq->ib_cq.device);
3410 struct hns_roce_srq *srq = NULL;
3411 struct hns_roce_v2_cqe *cqe;
3412 struct hns_roce_qp *hr_qp;
3413 struct hns_roce_wq *wq;
3414 int is_send;
3415 u16 wqe_ctr;
3416 u32 opcode;
3417 u32 qpn;
3418 int ret;
3419
3420
3421 cqe = get_sw_cqe_v2(hr_cq, hr_cq->cons_index);
3422 if (!cqe)
3423 return -EAGAIN;
3424
3425 ++hr_cq->cons_index;
3426
3427 rmb();
3428
3429
3430 is_send = !roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_S_R_S);
3431
3432 qpn = roce_get_field(cqe->byte_16, V2_CQE_BYTE_16_LCL_QPN_M,
3433 V2_CQE_BYTE_16_LCL_QPN_S);
3434
3435 if (!*cur_qp || (qpn & HNS_ROCE_V2_CQE_QPN_MASK) != (*cur_qp)->qpn) {
3436 hr_qp = __hns_roce_qp_lookup(hr_dev, qpn);
3437 if (unlikely(!hr_qp)) {
3438 ibdev_err(&hr_dev->ib_dev,
3439 "CQ %06lx with entry for unknown QPN %06x\n",
3440 hr_cq->cqn, qpn & HNS_ROCE_V2_CQE_QPN_MASK);
3441 return -EINVAL;
3442 }
3443 *cur_qp = hr_qp;
3444 }
3445
3446 wc->qp = &(*cur_qp)->ibqp;
3447 wc->vendor_err = 0;
3448
3449 if (is_send) {
3450 wq = &(*cur_qp)->sq;
3451 if ((*cur_qp)->sq_signal_bits) {
3452
3453
3454
3455
3456
3457 wqe_ctr = (u16)roce_get_field(cqe->byte_4,
3458 V2_CQE_BYTE_4_WQE_INDX_M,
3459 V2_CQE_BYTE_4_WQE_INDX_S);
3460 wq->tail += (wqe_ctr - (u16)wq->tail) &
3461 (wq->wqe_cnt - 1);
3462 }
3463
3464 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
3465 ++wq->tail;
3466 } else if ((*cur_qp)->ibqp.srq) {
3467 srq = to_hr_srq((*cur_qp)->ibqp.srq);
3468 wqe_ctr = (u16)roce_get_field(cqe->byte_4,
3469 V2_CQE_BYTE_4_WQE_INDX_M,
3470 V2_CQE_BYTE_4_WQE_INDX_S);
3471 wc->wr_id = srq->wrid[wqe_ctr];
3472 hns_roce_free_srq_wqe(srq, wqe_ctr);
3473 } else {
3474
3475 wq = &(*cur_qp)->rq;
3476 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
3477 ++wq->tail;
3478 }
3479
3480 get_cqe_status(hr_dev, *cur_qp, hr_cq, cqe, wc);
3481 if (unlikely(wc->status != IB_WC_SUCCESS))
3482 return 0;
3483
3484 if (is_send) {
3485 wc->wc_flags = 0;
3486
3487 switch (roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_OPCODE_M,
3488 V2_CQE_BYTE_4_OPCODE_S) & 0x1f) {
3489 case HNS_ROCE_V2_WQE_OP_SEND:
3490 wc->opcode = IB_WC_SEND;
3491 break;
3492 case HNS_ROCE_V2_WQE_OP_SEND_WITH_INV:
3493 wc->opcode = IB_WC_SEND;
3494 break;
3495 case HNS_ROCE_V2_WQE_OP_SEND_WITH_IMM:
3496 wc->opcode = IB_WC_SEND;
3497 wc->wc_flags |= IB_WC_WITH_IMM;
3498 break;
3499 case HNS_ROCE_V2_WQE_OP_RDMA_READ:
3500 wc->opcode = IB_WC_RDMA_READ;
3501 wc->byte_len = le32_to_cpu(cqe->byte_cnt);
3502 break;
3503 case HNS_ROCE_V2_WQE_OP_RDMA_WRITE:
3504 wc->opcode = IB_WC_RDMA_WRITE;
3505 break;
3506 case HNS_ROCE_V2_WQE_OP_RDMA_WRITE_WITH_IMM:
3507 wc->opcode = IB_WC_RDMA_WRITE;
3508 wc->wc_flags |= IB_WC_WITH_IMM;
3509 break;
3510 case HNS_ROCE_V2_WQE_OP_LOCAL_INV:
3511 wc->opcode = IB_WC_LOCAL_INV;
3512 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
3513 break;
3514 case HNS_ROCE_V2_WQE_OP_ATOM_CMP_AND_SWAP:
3515 wc->opcode = IB_WC_COMP_SWAP;
3516 wc->byte_len = 8;
3517 break;
3518 case HNS_ROCE_V2_WQE_OP_ATOM_FETCH_AND_ADD:
3519 wc->opcode = IB_WC_FETCH_ADD;
3520 wc->byte_len = 8;
3521 break;
3522 case HNS_ROCE_V2_WQE_OP_ATOM_MSK_CMP_AND_SWAP:
3523 wc->opcode = IB_WC_MASKED_COMP_SWAP;
3524 wc->byte_len = 8;
3525 break;
3526 case HNS_ROCE_V2_WQE_OP_ATOM_MSK_FETCH_AND_ADD:
3527 wc->opcode = IB_WC_MASKED_FETCH_ADD;
3528 wc->byte_len = 8;
3529 break;
3530 case HNS_ROCE_V2_WQE_OP_FAST_REG_PMR:
3531 wc->opcode = IB_WC_REG_MR;
3532 break;
3533 case HNS_ROCE_V2_WQE_OP_BIND_MW:
3534 wc->opcode = IB_WC_REG_MR;
3535 break;
3536 default:
3537 wc->status = IB_WC_GENERAL_ERR;
3538 break;
3539 }
3540 } else {
3541
3542 wc->byte_len = le32_to_cpu(cqe->byte_cnt);
3543
3544 opcode = roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_OPCODE_M,
3545 V2_CQE_BYTE_4_OPCODE_S);
3546 switch (opcode & 0x1f) {
3547 case HNS_ROCE_V2_OPCODE_RDMA_WRITE_IMM:
3548 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
3549 wc->wc_flags = IB_WC_WITH_IMM;
3550 wc->ex.imm_data =
3551 cpu_to_be32(le32_to_cpu(cqe->immtdata));
3552 break;
3553 case HNS_ROCE_V2_OPCODE_SEND:
3554 wc->opcode = IB_WC_RECV;
3555 wc->wc_flags = 0;
3556 break;
3557 case HNS_ROCE_V2_OPCODE_SEND_WITH_IMM:
3558 wc->opcode = IB_WC_RECV;
3559 wc->wc_flags = IB_WC_WITH_IMM;
3560 wc->ex.imm_data =
3561 cpu_to_be32(le32_to_cpu(cqe->immtdata));
3562 break;
3563 case HNS_ROCE_V2_OPCODE_SEND_WITH_INV:
3564 wc->opcode = IB_WC_RECV;
3565 wc->wc_flags = IB_WC_WITH_INVALIDATE;
3566 wc->ex.invalidate_rkey = le32_to_cpu(cqe->rkey);
3567 break;
3568 default:
3569 wc->status = IB_WC_GENERAL_ERR;
3570 break;
3571 }
3572
3573 if ((wc->qp->qp_type == IB_QPT_RC ||
3574 wc->qp->qp_type == IB_QPT_UC) &&
3575 (opcode == HNS_ROCE_V2_OPCODE_SEND ||
3576 opcode == HNS_ROCE_V2_OPCODE_SEND_WITH_IMM ||
3577 opcode == HNS_ROCE_V2_OPCODE_SEND_WITH_INV) &&
3578 (roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_RQ_INLINE_S))) {
3579 ret = hns_roce_handle_recv_inl_wqe(cqe, cur_qp, wc);
3580 if (unlikely(ret))
3581 return -EAGAIN;
3582 }
3583
3584 wc->sl = (u8)roce_get_field(cqe->byte_32, V2_CQE_BYTE_32_SL_M,
3585 V2_CQE_BYTE_32_SL_S);
3586 wc->src_qp = (u8)roce_get_field(cqe->byte_32,
3587 V2_CQE_BYTE_32_RMT_QPN_M,
3588 V2_CQE_BYTE_32_RMT_QPN_S);
3589 wc->slid = 0;
3590 wc->wc_flags |= (roce_get_bit(cqe->byte_32,
3591 V2_CQE_BYTE_32_GRH_S) ?
3592 IB_WC_GRH : 0);
3593 wc->port_num = roce_get_field(cqe->byte_32,
3594 V2_CQE_BYTE_32_PORTN_M, V2_CQE_BYTE_32_PORTN_S);
3595 wc->pkey_index = 0;
3596
3597 if (roce_get_bit(cqe->byte_28, V2_CQE_BYTE_28_VID_VLD_S)) {
3598 wc->vlan_id = (u16)roce_get_field(cqe->byte_28,
3599 V2_CQE_BYTE_28_VID_M,
3600 V2_CQE_BYTE_28_VID_S);
3601 wc->wc_flags |= IB_WC_WITH_VLAN;
3602 } else {
3603 wc->vlan_id = 0xffff;
3604 }
3605
3606 wc->network_hdr_type = roce_get_field(cqe->byte_28,
3607 V2_CQE_BYTE_28_PORT_TYPE_M,
3608 V2_CQE_BYTE_28_PORT_TYPE_S);
3609 }
3610
3611 return 0;
3612}
3613
3614static int hns_roce_v2_poll_cq(struct ib_cq *ibcq, int num_entries,
3615 struct ib_wc *wc)
3616{
3617 struct hns_roce_dev *hr_dev = to_hr_dev(ibcq->device);
3618 struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
3619 struct hns_roce_qp *cur_qp = NULL;
3620 unsigned long flags;
3621 int npolled;
3622
3623 spin_lock_irqsave(&hr_cq->lock, flags);
3624
3625
3626
3627
3628
3629
3630
3631
3632 if (hr_dev->state == HNS_ROCE_DEVICE_STATE_UNINIT) {
3633 npolled = hns_roce_v2_sw_poll_cq(hr_cq, num_entries, wc);
3634 goto out;
3635 }
3636
3637 for (npolled = 0; npolled < num_entries; ++npolled) {
3638 if (hns_roce_v2_poll_one(hr_cq, &cur_qp, wc + npolled))
3639 break;
3640 }
3641
3642 if (npolled) {
3643
3644 wmb();
3645 hns_roce_v2_cq_set_ci(hr_cq, hr_cq->cons_index);
3646 }
3647
3648out:
3649 spin_unlock_irqrestore(&hr_cq->lock, flags);
3650
3651 return npolled;
3652}
3653
3654static int get_op_for_set_hem(struct hns_roce_dev *hr_dev, u32 type,
3655 int step_idx)
3656{
3657 int op;
3658
3659 if (type == HEM_TYPE_SCCC && step_idx)
3660 return -EINVAL;
3661
3662 switch (type) {
3663 case HEM_TYPE_QPC:
3664 op = HNS_ROCE_CMD_WRITE_QPC_BT0;
3665 break;
3666 case HEM_TYPE_MTPT:
3667 op = HNS_ROCE_CMD_WRITE_MPT_BT0;
3668 break;
3669 case HEM_TYPE_CQC:
3670 op = HNS_ROCE_CMD_WRITE_CQC_BT0;
3671 break;
3672 case HEM_TYPE_SRQC:
3673 op = HNS_ROCE_CMD_WRITE_SRQC_BT0;
3674 break;
3675 case HEM_TYPE_SCCC:
3676 op = HNS_ROCE_CMD_WRITE_SCCC_BT0;
3677 break;
3678 case HEM_TYPE_QPC_TIMER:
3679 op = HNS_ROCE_CMD_WRITE_QPC_TIMER_BT0;
3680 break;
3681 case HEM_TYPE_CQC_TIMER:
3682 op = HNS_ROCE_CMD_WRITE_CQC_TIMER_BT0;
3683 break;
3684 default:
3685 dev_warn(hr_dev->dev,
3686 "table %u not to be written by mailbox!\n", type);
3687 return -EINVAL;
3688 }
3689
3690 return op + step_idx;
3691}
3692
3693static int set_hem_to_hw(struct hns_roce_dev *hr_dev, int obj, u64 bt_ba,
3694 u32 hem_type, int step_idx)
3695{
3696 struct hns_roce_cmd_mailbox *mailbox;
3697 struct hns_roce_cmq_desc desc;
3698 struct hns_roce_cfg_gmv_bt *gmv_bt =
3699 (struct hns_roce_cfg_gmv_bt *)desc.data;
3700 int ret;
3701 int op;
3702
3703 if (hem_type == HEM_TYPE_GMV) {
3704 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_GMV_BT,
3705 false);
3706
3707 gmv_bt->gmv_ba_l = cpu_to_le32(bt_ba >> HNS_HW_PAGE_SHIFT);
3708 gmv_bt->gmv_ba_h = cpu_to_le32(bt_ba >> (HNS_HW_PAGE_SHIFT +
3709 32));
3710 gmv_bt->gmv_bt_idx = cpu_to_le32(obj /
3711 (HNS_HW_PAGE_SIZE / hr_dev->caps.gmv_entry_sz));
3712
3713 return hns_roce_cmq_send(hr_dev, &desc, 1);
3714 }
3715
3716 op = get_op_for_set_hem(hr_dev, hem_type, step_idx);
3717 if (op < 0)
3718 return 0;
3719
3720 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
3721 if (IS_ERR(mailbox))
3722 return PTR_ERR(mailbox);
3723
3724 ret = hns_roce_cmd_mbox(hr_dev, bt_ba, mailbox->dma, obj,
3725 0, op, HNS_ROCE_CMD_TIMEOUT_MSECS);
3726
3727 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
3728
3729 return ret;
3730}
3731
3732static int hns_roce_v2_set_hem(struct hns_roce_dev *hr_dev,
3733 struct hns_roce_hem_table *table, int obj,
3734 int step_idx)
3735{
3736 struct hns_roce_hem_iter iter;
3737 struct hns_roce_hem_mhop mhop;
3738 struct hns_roce_hem *hem;
3739 unsigned long mhop_obj = obj;
3740 int i, j, k;
3741 int ret = 0;
3742 u64 hem_idx = 0;
3743 u64 l1_idx = 0;
3744 u64 bt_ba = 0;
3745 u32 chunk_ba_num;
3746 u32 hop_num;
3747
3748 if (!hns_roce_check_whether_mhop(hr_dev, table->type))
3749 return 0;
3750
3751 hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, &mhop);
3752 i = mhop.l0_idx;
3753 j = mhop.l1_idx;
3754 k = mhop.l2_idx;
3755 hop_num = mhop.hop_num;
3756 chunk_ba_num = mhop.bt_chunk_size / 8;
3757
3758 if (hop_num == 2) {
3759 hem_idx = i * chunk_ba_num * chunk_ba_num + j * chunk_ba_num +
3760 k;
3761 l1_idx = i * chunk_ba_num + j;
3762 } else if (hop_num == 1) {
3763 hem_idx = i * chunk_ba_num + j;
3764 } else if (hop_num == HNS_ROCE_HOP_NUM_0) {
3765 hem_idx = i;
3766 }
3767
3768 if (table->type == HEM_TYPE_SCCC)
3769 obj = mhop.l0_idx;
3770
3771 if (check_whether_last_step(hop_num, step_idx)) {
3772 hem = table->hem[hem_idx];
3773 for (hns_roce_hem_first(hem, &iter);
3774 !hns_roce_hem_last(&iter); hns_roce_hem_next(&iter)) {
3775 bt_ba = hns_roce_hem_addr(&iter);
3776 ret = set_hem_to_hw(hr_dev, obj, bt_ba, table->type,
3777 step_idx);
3778 }
3779 } else {
3780 if (step_idx == 0)
3781 bt_ba = table->bt_l0_dma_addr[i];
3782 else if (step_idx == 1 && hop_num == 2)
3783 bt_ba = table->bt_l1_dma_addr[l1_idx];
3784
3785 ret = set_hem_to_hw(hr_dev, obj, bt_ba, table->type, step_idx);
3786 }
3787
3788 return ret;
3789}
3790
3791static int hns_roce_v2_clear_hem(struct hns_roce_dev *hr_dev,
3792 struct hns_roce_hem_table *table, int obj,
3793 int step_idx)
3794{
3795 struct device *dev = hr_dev->dev;
3796 struct hns_roce_cmd_mailbox *mailbox;
3797 int ret;
3798 u16 op = 0xff;
3799
3800 if (!hns_roce_check_whether_mhop(hr_dev, table->type))
3801 return 0;
3802
3803 switch (table->type) {
3804 case HEM_TYPE_QPC:
3805 op = HNS_ROCE_CMD_DESTROY_QPC_BT0;
3806 break;
3807 case HEM_TYPE_MTPT:
3808 op = HNS_ROCE_CMD_DESTROY_MPT_BT0;
3809 break;
3810 case HEM_TYPE_CQC:
3811 op = HNS_ROCE_CMD_DESTROY_CQC_BT0;
3812 break;
3813 case HEM_TYPE_SRQC:
3814 op = HNS_ROCE_CMD_DESTROY_SRQC_BT0;
3815 break;
3816 case HEM_TYPE_SCCC:
3817 case HEM_TYPE_QPC_TIMER:
3818 case HEM_TYPE_CQC_TIMER:
3819 case HEM_TYPE_GMV:
3820 return 0;
3821 default:
3822 dev_warn(dev, "table %u not to be destroyed by mailbox!\n",
3823 table->type);
3824 return 0;
3825 }
3826
3827 op += step_idx;
3828
3829 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
3830 if (IS_ERR(mailbox))
3831 return PTR_ERR(mailbox);
3832
3833
3834 ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, obj, 0, op,
3835 HNS_ROCE_CMD_TIMEOUT_MSECS);
3836
3837 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
3838 return ret;
3839}
3840
3841static int hns_roce_v2_qp_modify(struct hns_roce_dev *hr_dev,
3842 struct hns_roce_v2_qp_context *context,
3843 struct hns_roce_v2_qp_context *qpc_mask,
3844 struct hns_roce_qp *hr_qp)
3845{
3846 struct hns_roce_cmd_mailbox *mailbox;
3847 int qpc_size;
3848 int ret;
3849
3850 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
3851 if (IS_ERR(mailbox))
3852 return PTR_ERR(mailbox);
3853
3854
3855 qpc_size = hr_dev->caps.qpc_sz;
3856 memcpy(mailbox->buf, context, qpc_size);
3857 memcpy(mailbox->buf + qpc_size, qpc_mask, qpc_size);
3858
3859 ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, hr_qp->qpn, 0,
3860 HNS_ROCE_CMD_MODIFY_QPC,
3861 HNS_ROCE_CMD_TIMEOUT_MSECS);
3862
3863 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
3864
3865 return ret;
3866}
3867
3868static void set_access_flags(struct hns_roce_qp *hr_qp,
3869 struct hns_roce_v2_qp_context *context,
3870 struct hns_roce_v2_qp_context *qpc_mask,
3871 const struct ib_qp_attr *attr, int attr_mask)
3872{
3873 u8 dest_rd_atomic;
3874 u32 access_flags;
3875
3876 dest_rd_atomic = (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) ?
3877 attr->max_dest_rd_atomic : hr_qp->resp_depth;
3878
3879 access_flags = (attr_mask & IB_QP_ACCESS_FLAGS) ?
3880 attr->qp_access_flags : hr_qp->atomic_rd_en;
3881
3882 if (!dest_rd_atomic)
3883 access_flags &= IB_ACCESS_REMOTE_WRITE;
3884
3885 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
3886 !!(access_flags & IB_ACCESS_REMOTE_READ));
3887 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S, 0);
3888
3889 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
3890 !!(access_flags & IB_ACCESS_REMOTE_WRITE));
3891 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S, 0);
3892
3893 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
3894 !!(access_flags & IB_ACCESS_REMOTE_ATOMIC));
3895 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S, 0);
3896 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_EXT_ATE_S,
3897 !!(access_flags & IB_ACCESS_REMOTE_ATOMIC));
3898 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_EXT_ATE_S, 0);
3899}
3900
3901static void set_qpc_wqe_cnt(struct hns_roce_qp *hr_qp,
3902 struct hns_roce_v2_qp_context *context,
3903 struct hns_roce_v2_qp_context *qpc_mask)
3904{
3905 roce_set_field(context->byte_4_sqpn_tst,
3906 V2_QPC_BYTE_4_SGE_SHIFT_M, V2_QPC_BYTE_4_SGE_SHIFT_S,
3907 to_hr_hem_entries_shift(hr_qp->sge.sge_cnt,
3908 hr_qp->sge.sge_shift));
3909
3910 roce_set_field(context->byte_20_smac_sgid_idx,
3911 V2_QPC_BYTE_20_SQ_SHIFT_M, V2_QPC_BYTE_20_SQ_SHIFT_S,
3912 ilog2(hr_qp->sq.wqe_cnt));
3913
3914 roce_set_field(context->byte_20_smac_sgid_idx,
3915 V2_QPC_BYTE_20_RQ_SHIFT_M, V2_QPC_BYTE_20_RQ_SHIFT_S,
3916 ilog2(hr_qp->rq.wqe_cnt));
3917}
3918
3919static void modify_qp_reset_to_init(struct ib_qp *ibqp,
3920 const struct ib_qp_attr *attr,
3921 int attr_mask,
3922 struct hns_roce_v2_qp_context *context,
3923 struct hns_roce_v2_qp_context *qpc_mask)
3924{
3925 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
3926 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
3927
3928
3929
3930
3931
3932
3933
3934 roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M,
3935 V2_QPC_BYTE_4_TST_S, to_hr_qp_type(hr_qp->ibqp.qp_type));
3936
3937 roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
3938 V2_QPC_BYTE_4_SQPN_S, hr_qp->qpn);
3939
3940 roce_set_field(context->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M,
3941 V2_QPC_BYTE_16_PD_S, to_hr_pd(ibqp->pd)->pdn);
3942
3943 roce_set_field(context->byte_20_smac_sgid_idx, V2_QPC_BYTE_20_RQWS_M,
3944 V2_QPC_BYTE_20_RQWS_S, ilog2(hr_qp->rq.max_gs));
3945
3946 set_qpc_wqe_cnt(hr_qp, context, qpc_mask);
3947
3948
3949 roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_VLAN_ID_M,
3950 V2_QPC_BYTE_24_VLAN_ID_S, 0xfff);
3951
3952 if (hr_qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB)
3953 roce_set_bit(context->byte_68_rq_db,
3954 V2_QPC_BYTE_68_RQ_RECORD_EN_S, 1);
3955
3956 roce_set_field(context->byte_68_rq_db,
3957 V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_M,
3958 V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_S,
3959 ((u32)hr_qp->rdb.dma) >> 1);
3960 context->rq_db_record_addr = cpu_to_le32(hr_qp->rdb.dma >> 32);
3961
3962 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RQIE_S,
3963 (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) ? 1 : 0);
3964
3965 roce_set_field(context->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
3966 V2_QPC_BYTE_80_RX_CQN_S, to_hr_cq(ibqp->recv_cq)->cqn);
3967 if (ibqp->srq) {
3968 roce_set_field(context->byte_76_srqn_op_en,
3969 V2_QPC_BYTE_76_SRQN_M, V2_QPC_BYTE_76_SRQN_S,
3970 to_hr_srq(ibqp->srq)->srqn);
3971 roce_set_bit(context->byte_76_srqn_op_en,
3972 V2_QPC_BYTE_76_SRQ_EN_S, 1);
3973 }
3974
3975 roce_set_bit(context->byte_172_sq_psn, V2_QPC_BYTE_172_FRE_S, 1);
3976
3977 roce_set_field(context->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
3978 V2_QPC_BYTE_252_TX_CQN_S, to_hr_cq(ibqp->send_cq)->cqn);
3979
3980 if (hr_dev->caps.qpc_sz < HNS_ROCE_V3_QPC_SZ)
3981 return;
3982
3983 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_STASH)
3984 hr_reg_enable(&context->ext, QPCEX_STASH);
3985}
3986
3987static void modify_qp_init_to_init(struct ib_qp *ibqp,
3988 const struct ib_qp_attr *attr, int attr_mask,
3989 struct hns_roce_v2_qp_context *context,
3990 struct hns_roce_v2_qp_context *qpc_mask)
3991{
3992 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
3993
3994
3995
3996
3997
3998
3999
4000 roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M,
4001 V2_QPC_BYTE_4_TST_S, to_hr_qp_type(hr_qp->ibqp.qp_type));
4002 roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M,
4003 V2_QPC_BYTE_4_TST_S, 0);
4004
4005 roce_set_field(context->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M,
4006 V2_QPC_BYTE_16_PD_S, to_hr_pd(ibqp->pd)->pdn);
4007 roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M,
4008 V2_QPC_BYTE_16_PD_S, 0);
4009
4010 roce_set_field(context->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
4011 V2_QPC_BYTE_80_RX_CQN_S, to_hr_cq(ibqp->recv_cq)->cqn);
4012 roce_set_field(qpc_mask->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
4013 V2_QPC_BYTE_80_RX_CQN_S, 0);
4014
4015 roce_set_field(context->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
4016 V2_QPC_BYTE_252_TX_CQN_S, to_hr_cq(ibqp->send_cq)->cqn);
4017 roce_set_field(qpc_mask->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
4018 V2_QPC_BYTE_252_TX_CQN_S, 0);
4019
4020 if (ibqp->srq) {
4021 roce_set_bit(context->byte_76_srqn_op_en,
4022 V2_QPC_BYTE_76_SRQ_EN_S, 1);
4023 roce_set_bit(qpc_mask->byte_76_srqn_op_en,
4024 V2_QPC_BYTE_76_SRQ_EN_S, 0);
4025 roce_set_field(context->byte_76_srqn_op_en,
4026 V2_QPC_BYTE_76_SRQN_M, V2_QPC_BYTE_76_SRQN_S,
4027 to_hr_srq(ibqp->srq)->srqn);
4028 roce_set_field(qpc_mask->byte_76_srqn_op_en,
4029 V2_QPC_BYTE_76_SRQN_M, V2_QPC_BYTE_76_SRQN_S, 0);
4030 }
4031
4032 roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
4033 V2_QPC_BYTE_4_SQPN_S, hr_qp->qpn);
4034 roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
4035 V2_QPC_BYTE_4_SQPN_S, 0);
4036
4037 if (attr_mask & IB_QP_DEST_QPN) {
4038 roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_DQPN_M,
4039 V2_QPC_BYTE_56_DQPN_S, hr_qp->qpn);
4040 roce_set_field(qpc_mask->byte_56_dqpn_err,
4041 V2_QPC_BYTE_56_DQPN_M, V2_QPC_BYTE_56_DQPN_S, 0);
4042 }
4043}
4044
4045static int config_qp_rq_buf(struct hns_roce_dev *hr_dev,
4046 struct hns_roce_qp *hr_qp,
4047 struct hns_roce_v2_qp_context *context,
4048 struct hns_roce_v2_qp_context *qpc_mask)
4049{
4050 u64 mtts[MTT_MIN_COUNT] = { 0 };
4051 u64 wqe_sge_ba;
4052 int count;
4053
4054
4055 count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, hr_qp->rq.offset, mtts,
4056 MTT_MIN_COUNT, &wqe_sge_ba);
4057 if (hr_qp->rq.wqe_cnt && count < 1) {
4058 ibdev_err(&hr_dev->ib_dev,
4059 "failed to find RQ WQE, QPN = 0x%lx.\n", hr_qp->qpn);
4060 return -EINVAL;
4061 }
4062
4063 context->wqe_sge_ba = cpu_to_le32(wqe_sge_ba >> 3);
4064 qpc_mask->wqe_sge_ba = 0;
4065
4066
4067
4068
4069
4070
4071
4072 roce_set_field(context->byte_12_sq_hop, V2_QPC_BYTE_12_WQE_SGE_BA_M,
4073 V2_QPC_BYTE_12_WQE_SGE_BA_S, wqe_sge_ba >> (32 + 3));
4074 roce_set_field(qpc_mask->byte_12_sq_hop, V2_QPC_BYTE_12_WQE_SGE_BA_M,
4075 V2_QPC_BYTE_12_WQE_SGE_BA_S, 0);
4076
4077 roce_set_field(context->byte_12_sq_hop, V2_QPC_BYTE_12_SQ_HOP_NUM_M,
4078 V2_QPC_BYTE_12_SQ_HOP_NUM_S,
4079 to_hr_hem_hopnum(hr_dev->caps.wqe_sq_hop_num,
4080 hr_qp->sq.wqe_cnt));
4081 roce_set_field(qpc_mask->byte_12_sq_hop, V2_QPC_BYTE_12_SQ_HOP_NUM_M,
4082 V2_QPC_BYTE_12_SQ_HOP_NUM_S, 0);
4083
4084 roce_set_field(context->byte_20_smac_sgid_idx,
4085 V2_QPC_BYTE_20_SGE_HOP_NUM_M,
4086 V2_QPC_BYTE_20_SGE_HOP_NUM_S,
4087 to_hr_hem_hopnum(hr_dev->caps.wqe_sge_hop_num,
4088 hr_qp->sge.sge_cnt));
4089 roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
4090 V2_QPC_BYTE_20_SGE_HOP_NUM_M,
4091 V2_QPC_BYTE_20_SGE_HOP_NUM_S, 0);
4092
4093 roce_set_field(context->byte_20_smac_sgid_idx,
4094 V2_QPC_BYTE_20_RQ_HOP_NUM_M,
4095 V2_QPC_BYTE_20_RQ_HOP_NUM_S,
4096 to_hr_hem_hopnum(hr_dev->caps.wqe_rq_hop_num,
4097 hr_qp->rq.wqe_cnt));
4098
4099 roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
4100 V2_QPC_BYTE_20_RQ_HOP_NUM_M,
4101 V2_QPC_BYTE_20_RQ_HOP_NUM_S, 0);
4102
4103 roce_set_field(context->byte_16_buf_ba_pg_sz,
4104 V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_M,
4105 V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_S,
4106 to_hr_hw_page_shift(hr_qp->mtr.hem_cfg.ba_pg_shift));
4107 roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz,
4108 V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_M,
4109 V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_S, 0);
4110
4111 roce_set_field(context->byte_16_buf_ba_pg_sz,
4112 V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_M,
4113 V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_S,
4114 to_hr_hw_page_shift(hr_qp->mtr.hem_cfg.buf_pg_shift));
4115 roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz,
4116 V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_M,
4117 V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_S, 0);
4118
4119 context->rq_cur_blk_addr = cpu_to_le32(to_hr_hw_page_addr(mtts[0]));
4120 qpc_mask->rq_cur_blk_addr = 0;
4121
4122 roce_set_field(context->byte_92_srq_info,
4123 V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_M,
4124 V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_S,
4125 upper_32_bits(to_hr_hw_page_addr(mtts[0])));
4126 roce_set_field(qpc_mask->byte_92_srq_info,
4127 V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_M,
4128 V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_S, 0);
4129
4130 context->rq_nxt_blk_addr = cpu_to_le32(to_hr_hw_page_addr(mtts[1]));
4131 qpc_mask->rq_nxt_blk_addr = 0;
4132
4133 roce_set_field(context->byte_104_rq_sge,
4134 V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_M,
4135 V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_S,
4136 upper_32_bits(to_hr_hw_page_addr(mtts[1])));
4137 roce_set_field(qpc_mask->byte_104_rq_sge,
4138 V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_M,
4139 V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_S, 0);
4140
4141 roce_set_field(context->byte_84_rq_ci_pi,
4142 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
4143 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, hr_qp->rq.head);
4144 roce_set_field(qpc_mask->byte_84_rq_ci_pi,
4145 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
4146 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, 0);
4147
4148 roce_set_field(qpc_mask->byte_84_rq_ci_pi,
4149 V2_QPC_BYTE_84_RQ_CONSUMER_IDX_M,
4150 V2_QPC_BYTE_84_RQ_CONSUMER_IDX_S, 0);
4151
4152 return 0;
4153}
4154
4155static int config_qp_sq_buf(struct hns_roce_dev *hr_dev,
4156 struct hns_roce_qp *hr_qp,
4157 struct hns_roce_v2_qp_context *context,
4158 struct hns_roce_v2_qp_context *qpc_mask)
4159{
4160 struct ib_device *ibdev = &hr_dev->ib_dev;
4161 u64 sge_cur_blk = 0;
4162 u64 sq_cur_blk = 0;
4163 int count;
4164
4165
4166 count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, 0, &sq_cur_blk, 1, NULL);
4167 if (count < 1) {
4168 ibdev_err(ibdev, "failed to find QP(0x%lx) SQ buf.\n",
4169 hr_qp->qpn);
4170 return -EINVAL;
4171 }
4172 if (hr_qp->sge.sge_cnt > 0) {
4173 count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr,
4174 hr_qp->sge.offset,
4175 &sge_cur_blk, 1, NULL);
4176 if (count < 1) {
4177 ibdev_err(ibdev, "failed to find QP(0x%lx) SGE buf.\n",
4178 hr_qp->qpn);
4179 return -EINVAL;
4180 }
4181 }
4182
4183
4184
4185
4186
4187
4188
4189 context->sq_cur_blk_addr = cpu_to_le32(to_hr_hw_page_addr(sq_cur_blk));
4190 roce_set_field(context->byte_168_irrl_idx,
4191 V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_M,
4192 V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_S,
4193 upper_32_bits(to_hr_hw_page_addr(sq_cur_blk)));
4194 qpc_mask->sq_cur_blk_addr = 0;
4195 roce_set_field(qpc_mask->byte_168_irrl_idx,
4196 V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_M,
4197 V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_S, 0);
4198
4199 context->sq_cur_sge_blk_addr =
4200 cpu_to_le32(to_hr_hw_page_addr(sge_cur_blk));
4201 roce_set_field(context->byte_184_irrl_idx,
4202 V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_M,
4203 V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_S,
4204 upper_32_bits(to_hr_hw_page_addr(sge_cur_blk)));
4205 qpc_mask->sq_cur_sge_blk_addr = 0;
4206 roce_set_field(qpc_mask->byte_184_irrl_idx,
4207 V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_M,
4208 V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_S, 0);
4209
4210 context->rx_sq_cur_blk_addr =
4211 cpu_to_le32(to_hr_hw_page_addr(sq_cur_blk));
4212 roce_set_field(context->byte_232_irrl_sge,
4213 V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_M,
4214 V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_S,
4215 upper_32_bits(to_hr_hw_page_addr(sq_cur_blk)));
4216 qpc_mask->rx_sq_cur_blk_addr = 0;
4217 roce_set_field(qpc_mask->byte_232_irrl_sge,
4218 V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_M,
4219 V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_S, 0);
4220
4221 return 0;
4222}
4223
4224static inline enum ib_mtu get_mtu(struct ib_qp *ibqp,
4225 const struct ib_qp_attr *attr)
4226{
4227 if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_UD)
4228 return IB_MTU_4096;
4229
4230 return attr->path_mtu;
4231}
4232
4233static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
4234 const struct ib_qp_attr *attr, int attr_mask,
4235 struct hns_roce_v2_qp_context *context,
4236 struct hns_roce_v2_qp_context *qpc_mask)
4237{
4238 const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
4239 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4240 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
4241 struct ib_device *ibdev = &hr_dev->ib_dev;
4242 dma_addr_t trrl_ba;
4243 dma_addr_t irrl_ba;
4244 enum ib_mtu mtu;
4245 u8 lp_pktn_ini;
4246 u8 port_num;
4247 u64 *mtts;
4248 u8 *dmac;
4249 u8 *smac;
4250 int port;
4251 int ret;
4252
4253 ret = config_qp_rq_buf(hr_dev, hr_qp, context, qpc_mask);
4254 if (ret) {
4255 ibdev_err(ibdev, "failed to config rq buf, ret = %d.\n", ret);
4256 return ret;
4257 }
4258
4259
4260 mtts = hns_roce_table_find(hr_dev, &hr_dev->qp_table.irrl_table,
4261 hr_qp->qpn, &irrl_ba);
4262 if (!mtts) {
4263 ibdev_err(ibdev, "failed to find qp irrl_table.\n");
4264 return -EINVAL;
4265 }
4266
4267
4268 mtts = hns_roce_table_find(hr_dev, &hr_dev->qp_table.trrl_table,
4269 hr_qp->qpn, &trrl_ba);
4270 if (!mtts) {
4271 ibdev_err(ibdev, "failed to find qp trrl_table.\n");
4272 return -EINVAL;
4273 }
4274
4275 if (attr_mask & IB_QP_ALT_PATH) {
4276 ibdev_err(ibdev, "INIT2RTR attr_mask (0x%x) error.\n",
4277 attr_mask);
4278 return -EINVAL;
4279 }
4280
4281 roce_set_field(context->byte_132_trrl, V2_QPC_BYTE_132_TRRL_BA_M,
4282 V2_QPC_BYTE_132_TRRL_BA_S, trrl_ba >> 4);
4283 roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_BA_M,
4284 V2_QPC_BYTE_132_TRRL_BA_S, 0);
4285 context->trrl_ba = cpu_to_le32(trrl_ba >> (16 + 4));
4286 qpc_mask->trrl_ba = 0;
4287 roce_set_field(context->byte_140_raq, V2_QPC_BYTE_140_TRRL_BA_M,
4288 V2_QPC_BYTE_140_TRRL_BA_S,
4289 (u32)(trrl_ba >> (32 + 16 + 4)));
4290 roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_TRRL_BA_M,
4291 V2_QPC_BYTE_140_TRRL_BA_S, 0);
4292
4293 context->irrl_ba = cpu_to_le32(irrl_ba >> 6);
4294 qpc_mask->irrl_ba = 0;
4295 roce_set_field(context->byte_208_irrl, V2_QPC_BYTE_208_IRRL_BA_M,
4296 V2_QPC_BYTE_208_IRRL_BA_S,
4297 irrl_ba >> (32 + 6));
4298 roce_set_field(qpc_mask->byte_208_irrl, V2_QPC_BYTE_208_IRRL_BA_M,
4299 V2_QPC_BYTE_208_IRRL_BA_S, 0);
4300
4301 roce_set_bit(context->byte_208_irrl, V2_QPC_BYTE_208_RMT_E2E_S, 1);
4302 roce_set_bit(qpc_mask->byte_208_irrl, V2_QPC_BYTE_208_RMT_E2E_S, 0);
4303
4304 roce_set_bit(context->byte_252_err_txcqn, V2_QPC_BYTE_252_SIG_TYPE_S,
4305 hr_qp->sq_signal_bits);
4306 roce_set_bit(qpc_mask->byte_252_err_txcqn, V2_QPC_BYTE_252_SIG_TYPE_S,
4307 0);
4308
4309 port = (attr_mask & IB_QP_PORT) ? (attr->port_num - 1) : hr_qp->port;
4310
4311 smac = (u8 *)hr_dev->dev_addr[port];
4312 dmac = (u8 *)attr->ah_attr.roce.dmac;
4313
4314 if (ether_addr_equal_unaligned(dmac, smac) ||
4315 hr_dev->loop_idc == 0x1) {
4316 roce_set_bit(context->byte_28_at_fl, V2_QPC_BYTE_28_LBI_S, 1);
4317 roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_LBI_S, 0);
4318 }
4319
4320 if (attr_mask & IB_QP_DEST_QPN) {
4321 roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_DQPN_M,
4322 V2_QPC_BYTE_56_DQPN_S, attr->dest_qp_num);
4323 roce_set_field(qpc_mask->byte_56_dqpn_err,
4324 V2_QPC_BYTE_56_DQPN_M, V2_QPC_BYTE_56_DQPN_S, 0);
4325 }
4326
4327
4328 port_num = rdma_ah_get_port_num(&attr->ah_attr);
4329 roce_set_field(context->byte_20_smac_sgid_idx,
4330 V2_QPC_BYTE_20_SGID_IDX_M, V2_QPC_BYTE_20_SGID_IDX_S,
4331 hns_get_gid_index(hr_dev, port_num - 1,
4332 grh->sgid_index));
4333 roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
4334 V2_QPC_BYTE_20_SGID_IDX_M, V2_QPC_BYTE_20_SGID_IDX_S, 0);
4335
4336 memcpy(&(context->dmac), dmac, sizeof(u32));
4337 roce_set_field(context->byte_52_udpspn_dmac, V2_QPC_BYTE_52_DMAC_M,
4338 V2_QPC_BYTE_52_DMAC_S, *((u16 *)(&dmac[4])));
4339 qpc_mask->dmac = 0;
4340 roce_set_field(qpc_mask->byte_52_udpspn_dmac, V2_QPC_BYTE_52_DMAC_M,
4341 V2_QPC_BYTE_52_DMAC_S, 0);
4342
4343 mtu = get_mtu(ibqp, attr);
4344 hr_qp->path_mtu = mtu;
4345
4346 if (attr_mask & IB_QP_PATH_MTU) {
4347 roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
4348 V2_QPC_BYTE_24_MTU_S, mtu);
4349 roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
4350 V2_QPC_BYTE_24_MTU_S, 0);
4351 }
4352
4353#define MAX_LP_MSG_LEN 65536
4354
4355 lp_pktn_ini = ilog2(MAX_LP_MSG_LEN / ib_mtu_enum_to_int(mtu));
4356
4357 roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_LP_PKTN_INI_M,
4358 V2_QPC_BYTE_56_LP_PKTN_INI_S, lp_pktn_ini);
4359 roce_set_field(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_LP_PKTN_INI_M,
4360 V2_QPC_BYTE_56_LP_PKTN_INI_S, 0);
4361
4362
4363 roce_set_field(context->byte_172_sq_psn, V2_QPC_BYTE_172_ACK_REQ_FREQ_M,
4364 V2_QPC_BYTE_172_ACK_REQ_FREQ_S, lp_pktn_ini);
4365 roce_set_field(qpc_mask->byte_172_sq_psn,
4366 V2_QPC_BYTE_172_ACK_REQ_FREQ_M,
4367 V2_QPC_BYTE_172_ACK_REQ_FREQ_S, 0);
4368
4369 roce_set_bit(qpc_mask->byte_108_rx_reqepsn,
4370 V2_QPC_BYTE_108_RX_REQ_PSN_ERR_S, 0);
4371 roce_set_field(qpc_mask->byte_96_rx_reqmsn, V2_QPC_BYTE_96_RX_REQ_MSN_M,
4372 V2_QPC_BYTE_96_RX_REQ_MSN_S, 0);
4373 roce_set_field(qpc_mask->byte_108_rx_reqepsn,
4374 V2_QPC_BYTE_108_RX_REQ_LAST_OPTYPE_M,
4375 V2_QPC_BYTE_108_RX_REQ_LAST_OPTYPE_S, 0);
4376
4377 context->rq_rnr_timer = 0;
4378 qpc_mask->rq_rnr_timer = 0;
4379
4380 roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_HEAD_MAX_M,
4381 V2_QPC_BYTE_132_TRRL_HEAD_MAX_S, 0);
4382 roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_TAIL_MAX_M,
4383 V2_QPC_BYTE_132_TRRL_TAIL_MAX_S, 0);
4384
4385
4386 roce_set_field(context->byte_168_irrl_idx,
4387 V2_QPC_BYTE_168_LP_SGEN_INI_M,
4388 V2_QPC_BYTE_168_LP_SGEN_INI_S, 3);
4389 roce_set_field(qpc_mask->byte_168_irrl_idx,
4390 V2_QPC_BYTE_168_LP_SGEN_INI_M,
4391 V2_QPC_BYTE_168_LP_SGEN_INI_S, 0);
4392
4393 return 0;
4394}
4395
4396static int modify_qp_rtr_to_rts(struct ib_qp *ibqp,
4397 const struct ib_qp_attr *attr, int attr_mask,
4398 struct hns_roce_v2_qp_context *context,
4399 struct hns_roce_v2_qp_context *qpc_mask)
4400{
4401 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4402 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
4403 struct ib_device *ibdev = &hr_dev->ib_dev;
4404 int ret;
4405
4406
4407 if (attr_mask & (IB_QP_ALT_PATH | IB_QP_PATH_MIG_STATE)) {
4408 ibdev_err(ibdev, "RTR2RTS attr_mask (0x%x)error\n", attr_mask);
4409 return -EINVAL;
4410 }
4411
4412 ret = config_qp_sq_buf(hr_dev, hr_qp, context, qpc_mask);
4413 if (ret) {
4414 ibdev_err(ibdev, "failed to config sq buf, ret = %d.\n", ret);
4415 return ret;
4416 }
4417
4418
4419
4420
4421
4422
4423 roce_set_field(qpc_mask->byte_232_irrl_sge,
4424 V2_QPC_BYTE_232_IRRL_SGE_IDX_M,
4425 V2_QPC_BYTE_232_IRRL_SGE_IDX_S, 0);
4426
4427 roce_set_field(qpc_mask->byte_240_irrl_tail,
4428 V2_QPC_BYTE_240_RX_ACK_MSN_M,
4429 V2_QPC_BYTE_240_RX_ACK_MSN_S, 0);
4430
4431 roce_set_field(qpc_mask->byte_248_ack_psn,
4432 V2_QPC_BYTE_248_ACK_LAST_OPTYPE_M,
4433 V2_QPC_BYTE_248_ACK_LAST_OPTYPE_S, 0);
4434 roce_set_bit(qpc_mask->byte_248_ack_psn,
4435 V2_QPC_BYTE_248_IRRL_PSN_VLD_S, 0);
4436 roce_set_field(qpc_mask->byte_248_ack_psn,
4437 V2_QPC_BYTE_248_IRRL_PSN_M,
4438 V2_QPC_BYTE_248_IRRL_PSN_S, 0);
4439
4440 roce_set_field(qpc_mask->byte_240_irrl_tail,
4441 V2_QPC_BYTE_240_IRRL_TAIL_REAL_M,
4442 V2_QPC_BYTE_240_IRRL_TAIL_REAL_S, 0);
4443
4444 roce_set_field(qpc_mask->byte_220_retry_psn_msn,
4445 V2_QPC_BYTE_220_RETRY_MSG_MSN_M,
4446 V2_QPC_BYTE_220_RETRY_MSG_MSN_S, 0);
4447
4448 roce_set_bit(qpc_mask->byte_248_ack_psn,
4449 V2_QPC_BYTE_248_RNR_RETRY_FLAG_S, 0);
4450
4451 roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_CHECK_FLG_M,
4452 V2_QPC_BYTE_212_CHECK_FLG_S, 0);
4453
4454 roce_set_field(context->byte_212_lsn, V2_QPC_BYTE_212_LSN_M,
4455 V2_QPC_BYTE_212_LSN_S, 0x100);
4456 roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_LSN_M,
4457 V2_QPC_BYTE_212_LSN_S, 0);
4458
4459 roce_set_field(qpc_mask->byte_196_sq_psn, V2_QPC_BYTE_196_IRRL_HEAD_M,
4460 V2_QPC_BYTE_196_IRRL_HEAD_S, 0);
4461
4462 return 0;
4463}
4464
4465static inline u16 get_udp_sport(u32 fl, u32 lqpn, u32 rqpn)
4466{
4467 if (!fl)
4468 fl = rdma_calc_flow_label(lqpn, rqpn);
4469
4470 return rdma_flow_label_to_udp_sport(fl);
4471}
4472
4473static int hns_roce_v2_set_path(struct ib_qp *ibqp,
4474 const struct ib_qp_attr *attr,
4475 int attr_mask,
4476 struct hns_roce_v2_qp_context *context,
4477 struct hns_roce_v2_qp_context *qpc_mask)
4478{
4479 const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
4480 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4481 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
4482 struct ib_device *ibdev = &hr_dev->ib_dev;
4483 const struct ib_gid_attr *gid_attr = NULL;
4484 int is_roce_protocol;
4485 u16 vlan_id = 0xffff;
4486 bool is_udp = false;
4487 u8 ib_port;
4488 u8 hr_port;
4489 int ret;
4490
4491 ib_port = (attr_mask & IB_QP_PORT) ? attr->port_num : hr_qp->port + 1;
4492 hr_port = ib_port - 1;
4493 is_roce_protocol = rdma_cap_eth_ah(&hr_dev->ib_dev, ib_port) &&
4494 rdma_ah_get_ah_flags(&attr->ah_attr) & IB_AH_GRH;
4495
4496 if (is_roce_protocol) {
4497 gid_attr = attr->ah_attr.grh.sgid_attr;
4498 ret = rdma_read_gid_l2_fields(gid_attr, &vlan_id, NULL);
4499 if (ret)
4500 return ret;
4501
4502 if (gid_attr)
4503 is_udp = (gid_attr->gid_type ==
4504 IB_GID_TYPE_ROCE_UDP_ENCAP);
4505 }
4506
4507
4508 if (vlan_id < VLAN_N_VID &&
4509 hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) {
4510 roce_set_bit(context->byte_76_srqn_op_en,
4511 V2_QPC_BYTE_76_RQ_VLAN_EN_S, 1);
4512 roce_set_bit(qpc_mask->byte_76_srqn_op_en,
4513 V2_QPC_BYTE_76_RQ_VLAN_EN_S, 0);
4514 roce_set_bit(context->byte_168_irrl_idx,
4515 V2_QPC_BYTE_168_SQ_VLAN_EN_S, 1);
4516 roce_set_bit(qpc_mask->byte_168_irrl_idx,
4517 V2_QPC_BYTE_168_SQ_VLAN_EN_S, 0);
4518 }
4519
4520 roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_VLAN_ID_M,
4521 V2_QPC_BYTE_24_VLAN_ID_S, vlan_id);
4522 roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_VLAN_ID_M,
4523 V2_QPC_BYTE_24_VLAN_ID_S, 0);
4524
4525 if (grh->sgid_index >= hr_dev->caps.gid_table_len[hr_port]) {
4526 ibdev_err(ibdev, "sgid_index(%u) too large. max is %d\n",
4527 grh->sgid_index, hr_dev->caps.gid_table_len[hr_port]);
4528 return -EINVAL;
4529 }
4530
4531 if (attr->ah_attr.type != RDMA_AH_ATTR_TYPE_ROCE) {
4532 ibdev_err(ibdev, "ah attr is not RDMA roce type\n");
4533 return -EINVAL;
4534 }
4535
4536 roce_set_field(context->byte_52_udpspn_dmac, V2_QPC_BYTE_52_UDPSPN_M,
4537 V2_QPC_BYTE_52_UDPSPN_S,
4538 is_udp ? get_udp_sport(grh->flow_label, ibqp->qp_num,
4539 attr->dest_qp_num) : 0);
4540
4541 roce_set_field(qpc_mask->byte_52_udpspn_dmac, V2_QPC_BYTE_52_UDPSPN_M,
4542 V2_QPC_BYTE_52_UDPSPN_S, 0);
4543
4544 roce_set_field(context->byte_20_smac_sgid_idx,
4545 V2_QPC_BYTE_20_SGID_IDX_M, V2_QPC_BYTE_20_SGID_IDX_S,
4546 grh->sgid_index);
4547
4548 roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
4549 V2_QPC_BYTE_20_SGID_IDX_M, V2_QPC_BYTE_20_SGID_IDX_S, 0);
4550
4551 roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_HOP_LIMIT_M,
4552 V2_QPC_BYTE_24_HOP_LIMIT_S, grh->hop_limit);
4553 roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_HOP_LIMIT_M,
4554 V2_QPC_BYTE_24_HOP_LIMIT_S, 0);
4555
4556 roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M,
4557 V2_QPC_BYTE_24_TC_S, get_tclass(&attr->ah_attr.grh));
4558 roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M,
4559 V2_QPC_BYTE_24_TC_S, 0);
4560
4561 roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_FL_M,
4562 V2_QPC_BYTE_28_FL_S, grh->flow_label);
4563 roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_FL_M,
4564 V2_QPC_BYTE_28_FL_S, 0);
4565 memcpy(context->dgid, grh->dgid.raw, sizeof(grh->dgid.raw));
4566 memset(qpc_mask->dgid, 0, sizeof(grh->dgid.raw));
4567
4568 hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
4569 if (unlikely(hr_qp->sl > MAX_SERVICE_LEVEL)) {
4570 ibdev_err(ibdev,
4571 "failed to fill QPC, sl (%d) shouldn't be larger than %d.\n",
4572 hr_qp->sl, MAX_SERVICE_LEVEL);
4573 return -EINVAL;
4574 }
4575
4576 roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_SL_M,
4577 V2_QPC_BYTE_28_SL_S, hr_qp->sl);
4578 roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_SL_M,
4579 V2_QPC_BYTE_28_SL_S, 0);
4580
4581 return 0;
4582}
4583
4584static bool check_qp_state(enum ib_qp_state cur_state,
4585 enum ib_qp_state new_state)
4586{
4587 static const bool sm[][IB_QPS_ERR + 1] = {
4588 [IB_QPS_RESET] = { [IB_QPS_RESET] = true,
4589 [IB_QPS_INIT] = true },
4590 [IB_QPS_INIT] = { [IB_QPS_RESET] = true,
4591 [IB_QPS_INIT] = true,
4592 [IB_QPS_RTR] = true,
4593 [IB_QPS_ERR] = true },
4594 [IB_QPS_RTR] = { [IB_QPS_RESET] = true,
4595 [IB_QPS_RTS] = true,
4596 [IB_QPS_ERR] = true },
4597 [IB_QPS_RTS] = { [IB_QPS_RESET] = true,
4598 [IB_QPS_RTS] = true,
4599 [IB_QPS_ERR] = true },
4600 [IB_QPS_SQD] = {},
4601 [IB_QPS_SQE] = {},
4602 [IB_QPS_ERR] = { [IB_QPS_RESET] = true, [IB_QPS_ERR] = true }
4603 };
4604
4605 return sm[cur_state][new_state];
4606}
4607
4608static int hns_roce_v2_set_abs_fields(struct ib_qp *ibqp,
4609 const struct ib_qp_attr *attr,
4610 int attr_mask,
4611 enum ib_qp_state cur_state,
4612 enum ib_qp_state new_state,
4613 struct hns_roce_v2_qp_context *context,
4614 struct hns_roce_v2_qp_context *qpc_mask)
4615{
4616 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4617 int ret = 0;
4618
4619 if (!check_qp_state(cur_state, new_state)) {
4620 ibdev_err(&hr_dev->ib_dev, "Illegal state for QP!\n");
4621 return -EINVAL;
4622 }
4623
4624 if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
4625 memset(qpc_mask, 0, hr_dev->caps.qpc_sz);
4626 modify_qp_reset_to_init(ibqp, attr, attr_mask, context,
4627 qpc_mask);
4628 } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) {
4629 modify_qp_init_to_init(ibqp, attr, attr_mask, context,
4630 qpc_mask);
4631 } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
4632 ret = modify_qp_init_to_rtr(ibqp, attr, attr_mask, context,
4633 qpc_mask);
4634 } else if (cur_state == IB_QPS_RTR && new_state == IB_QPS_RTS) {
4635 ret = modify_qp_rtr_to_rts(ibqp, attr, attr_mask, context,
4636 qpc_mask);
4637 }
4638
4639 return ret;
4640}
4641
4642static int hns_roce_v2_set_opt_fields(struct ib_qp *ibqp,
4643 const struct ib_qp_attr *attr,
4644 int attr_mask,
4645 struct hns_roce_v2_qp_context *context,
4646 struct hns_roce_v2_qp_context *qpc_mask)
4647{
4648 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4649 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
4650 int ret = 0;
4651
4652 if (attr_mask & IB_QP_AV) {
4653 ret = hns_roce_v2_set_path(ibqp, attr, attr_mask, context,
4654 qpc_mask);
4655 if (ret)
4656 return ret;
4657 }
4658
4659 if (attr_mask & IB_QP_TIMEOUT) {
4660 if (attr->timeout < 31) {
4661 roce_set_field(context->byte_28_at_fl,
4662 V2_QPC_BYTE_28_AT_M, V2_QPC_BYTE_28_AT_S,
4663 attr->timeout);
4664 roce_set_field(qpc_mask->byte_28_at_fl,
4665 V2_QPC_BYTE_28_AT_M, V2_QPC_BYTE_28_AT_S,
4666 0);
4667 } else {
4668 ibdev_warn(&hr_dev->ib_dev,
4669 "Local ACK timeout shall be 0 to 30.\n");
4670 }
4671 }
4672
4673 if (attr_mask & IB_QP_RETRY_CNT) {
4674 roce_set_field(context->byte_212_lsn,
4675 V2_QPC_BYTE_212_RETRY_NUM_INIT_M,
4676 V2_QPC_BYTE_212_RETRY_NUM_INIT_S,
4677 attr->retry_cnt);
4678 roce_set_field(qpc_mask->byte_212_lsn,
4679 V2_QPC_BYTE_212_RETRY_NUM_INIT_M,
4680 V2_QPC_BYTE_212_RETRY_NUM_INIT_S, 0);
4681
4682 roce_set_field(context->byte_212_lsn,
4683 V2_QPC_BYTE_212_RETRY_CNT_M,
4684 V2_QPC_BYTE_212_RETRY_CNT_S, attr->retry_cnt);
4685 roce_set_field(qpc_mask->byte_212_lsn,
4686 V2_QPC_BYTE_212_RETRY_CNT_M,
4687 V2_QPC_BYTE_212_RETRY_CNT_S, 0);
4688 }
4689
4690 if (attr_mask & IB_QP_RNR_RETRY) {
4691 roce_set_field(context->byte_244_rnr_rxack,
4692 V2_QPC_BYTE_244_RNR_NUM_INIT_M,
4693 V2_QPC_BYTE_244_RNR_NUM_INIT_S, attr->rnr_retry);
4694 roce_set_field(qpc_mask->byte_244_rnr_rxack,
4695 V2_QPC_BYTE_244_RNR_NUM_INIT_M,
4696 V2_QPC_BYTE_244_RNR_NUM_INIT_S, 0);
4697
4698 roce_set_field(context->byte_244_rnr_rxack,
4699 V2_QPC_BYTE_244_RNR_CNT_M,
4700 V2_QPC_BYTE_244_RNR_CNT_S, attr->rnr_retry);
4701 roce_set_field(qpc_mask->byte_244_rnr_rxack,
4702 V2_QPC_BYTE_244_RNR_CNT_M,
4703 V2_QPC_BYTE_244_RNR_CNT_S, 0);
4704 }
4705
4706
4707 if (attr_mask & IB_QP_SQ_PSN) {
4708 roce_set_field(context->byte_172_sq_psn,
4709 V2_QPC_BYTE_172_SQ_CUR_PSN_M,
4710 V2_QPC_BYTE_172_SQ_CUR_PSN_S, attr->sq_psn);
4711 roce_set_field(qpc_mask->byte_172_sq_psn,
4712 V2_QPC_BYTE_172_SQ_CUR_PSN_M,
4713 V2_QPC_BYTE_172_SQ_CUR_PSN_S, 0);
4714
4715 roce_set_field(context->byte_196_sq_psn,
4716 V2_QPC_BYTE_196_SQ_MAX_PSN_M,
4717 V2_QPC_BYTE_196_SQ_MAX_PSN_S, attr->sq_psn);
4718 roce_set_field(qpc_mask->byte_196_sq_psn,
4719 V2_QPC_BYTE_196_SQ_MAX_PSN_M,
4720 V2_QPC_BYTE_196_SQ_MAX_PSN_S, 0);
4721
4722 roce_set_field(context->byte_220_retry_psn_msn,
4723 V2_QPC_BYTE_220_RETRY_MSG_PSN_M,
4724 V2_QPC_BYTE_220_RETRY_MSG_PSN_S, attr->sq_psn);
4725 roce_set_field(qpc_mask->byte_220_retry_psn_msn,
4726 V2_QPC_BYTE_220_RETRY_MSG_PSN_M,
4727 V2_QPC_BYTE_220_RETRY_MSG_PSN_S, 0);
4728
4729 roce_set_field(context->byte_224_retry_msg,
4730 V2_QPC_BYTE_224_RETRY_MSG_PSN_M,
4731 V2_QPC_BYTE_224_RETRY_MSG_PSN_S,
4732 attr->sq_psn >> V2_QPC_BYTE_220_RETRY_MSG_PSN_S);
4733 roce_set_field(qpc_mask->byte_224_retry_msg,
4734 V2_QPC_BYTE_224_RETRY_MSG_PSN_M,
4735 V2_QPC_BYTE_224_RETRY_MSG_PSN_S, 0);
4736
4737 roce_set_field(context->byte_224_retry_msg,
4738 V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_M,
4739 V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_S,
4740 attr->sq_psn);
4741 roce_set_field(qpc_mask->byte_224_retry_msg,
4742 V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_M,
4743 V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_S, 0);
4744
4745 roce_set_field(context->byte_244_rnr_rxack,
4746 V2_QPC_BYTE_244_RX_ACK_EPSN_M,
4747 V2_QPC_BYTE_244_RX_ACK_EPSN_S, attr->sq_psn);
4748 roce_set_field(qpc_mask->byte_244_rnr_rxack,
4749 V2_QPC_BYTE_244_RX_ACK_EPSN_M,
4750 V2_QPC_BYTE_244_RX_ACK_EPSN_S, 0);
4751 }
4752
4753 if ((attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) &&
4754 attr->max_dest_rd_atomic) {
4755 roce_set_field(context->byte_140_raq, V2_QPC_BYTE_140_RR_MAX_M,
4756 V2_QPC_BYTE_140_RR_MAX_S,
4757 fls(attr->max_dest_rd_atomic - 1));
4758 roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RR_MAX_M,
4759 V2_QPC_BYTE_140_RR_MAX_S, 0);
4760 }
4761
4762 if ((attr_mask & IB_QP_MAX_QP_RD_ATOMIC) && attr->max_rd_atomic) {
4763 roce_set_field(context->byte_208_irrl, V2_QPC_BYTE_208_SR_MAX_M,
4764 V2_QPC_BYTE_208_SR_MAX_S,
4765 fls(attr->max_rd_atomic - 1));
4766 roce_set_field(qpc_mask->byte_208_irrl,
4767 V2_QPC_BYTE_208_SR_MAX_M,
4768 V2_QPC_BYTE_208_SR_MAX_S, 0);
4769 }
4770
4771 if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC))
4772 set_access_flags(hr_qp, context, qpc_mask, attr, attr_mask);
4773
4774 if (attr_mask & IB_QP_MIN_RNR_TIMER) {
4775 roce_set_field(context->byte_80_rnr_rx_cqn,
4776 V2_QPC_BYTE_80_MIN_RNR_TIME_M,
4777 V2_QPC_BYTE_80_MIN_RNR_TIME_S,
4778 attr->min_rnr_timer);
4779 roce_set_field(qpc_mask->byte_80_rnr_rx_cqn,
4780 V2_QPC_BYTE_80_MIN_RNR_TIME_M,
4781 V2_QPC_BYTE_80_MIN_RNR_TIME_S, 0);
4782 }
4783
4784
4785 if (attr_mask & IB_QP_RQ_PSN) {
4786 roce_set_field(context->byte_108_rx_reqepsn,
4787 V2_QPC_BYTE_108_RX_REQ_EPSN_M,
4788 V2_QPC_BYTE_108_RX_REQ_EPSN_S, attr->rq_psn);
4789 roce_set_field(qpc_mask->byte_108_rx_reqepsn,
4790 V2_QPC_BYTE_108_RX_REQ_EPSN_M,
4791 V2_QPC_BYTE_108_RX_REQ_EPSN_S, 0);
4792
4793 roce_set_field(context->byte_152_raq, V2_QPC_BYTE_152_RAQ_PSN_M,
4794 V2_QPC_BYTE_152_RAQ_PSN_S, attr->rq_psn - 1);
4795 roce_set_field(qpc_mask->byte_152_raq,
4796 V2_QPC_BYTE_152_RAQ_PSN_M,
4797 V2_QPC_BYTE_152_RAQ_PSN_S, 0);
4798 }
4799
4800 if (attr_mask & IB_QP_QKEY) {
4801 context->qkey_xrcd = cpu_to_le32(attr->qkey);
4802 qpc_mask->qkey_xrcd = 0;
4803 hr_qp->qkey = attr->qkey;
4804 }
4805
4806 return ret;
4807}
4808
4809static void hns_roce_v2_record_opt_fields(struct ib_qp *ibqp,
4810 const struct ib_qp_attr *attr,
4811 int attr_mask)
4812{
4813 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4814 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
4815
4816 if (attr_mask & IB_QP_ACCESS_FLAGS)
4817 hr_qp->atomic_rd_en = attr->qp_access_flags;
4818
4819 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
4820 hr_qp->resp_depth = attr->max_dest_rd_atomic;
4821 if (attr_mask & IB_QP_PORT) {
4822 hr_qp->port = attr->port_num - 1;
4823 hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port];
4824 }
4825}
4826
4827static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
4828 const struct ib_qp_attr *attr,
4829 int attr_mask, enum ib_qp_state cur_state,
4830 enum ib_qp_state new_state)
4831{
4832 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4833 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
4834 struct hns_roce_v2_qp_context ctx[2];
4835 struct hns_roce_v2_qp_context *context = ctx;
4836 struct hns_roce_v2_qp_context *qpc_mask = ctx + 1;
4837 struct ib_device *ibdev = &hr_dev->ib_dev;
4838 unsigned long sq_flag = 0;
4839 unsigned long rq_flag = 0;
4840 int ret;
4841
4842 if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
4843 return -EOPNOTSUPP;
4844
4845
4846
4847
4848
4849
4850
4851 memset(context, 0, hr_dev->caps.qpc_sz);
4852 memset(qpc_mask, 0xff, hr_dev->caps.qpc_sz);
4853
4854 ret = hns_roce_v2_set_abs_fields(ibqp, attr, attr_mask, cur_state,
4855 new_state, context, qpc_mask);
4856 if (ret)
4857 goto out;
4858
4859
4860 if (new_state == IB_QPS_ERR) {
4861 spin_lock_irqsave(&hr_qp->sq.lock, sq_flag);
4862 hr_qp->state = IB_QPS_ERR;
4863 roce_set_field(context->byte_160_sq_ci_pi,
4864 V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M,
4865 V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S,
4866 hr_qp->sq.head);
4867 roce_set_field(qpc_mask->byte_160_sq_ci_pi,
4868 V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M,
4869 V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S, 0);
4870 spin_unlock_irqrestore(&hr_qp->sq.lock, sq_flag);
4871
4872 if (!ibqp->srq) {
4873 spin_lock_irqsave(&hr_qp->rq.lock, rq_flag);
4874 roce_set_field(context->byte_84_rq_ci_pi,
4875 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
4876 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S,
4877 hr_qp->rq.head);
4878 roce_set_field(qpc_mask->byte_84_rq_ci_pi,
4879 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
4880 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, 0);
4881 spin_unlock_irqrestore(&hr_qp->rq.lock, rq_flag);
4882 }
4883 }
4884
4885
4886 ret = hns_roce_v2_set_opt_fields(ibqp, attr, attr_mask, context,
4887 qpc_mask);
4888 if (ret)
4889 goto out;
4890
4891 roce_set_bit(context->byte_108_rx_reqepsn, V2_QPC_BYTE_108_INV_CREDIT_S,
4892 ibqp->srq ? 1 : 0);
4893 roce_set_bit(qpc_mask->byte_108_rx_reqepsn,
4894 V2_QPC_BYTE_108_INV_CREDIT_S, 0);
4895
4896
4897 roce_set_field(context->byte_60_qpst_tempid, V2_QPC_BYTE_60_QP_ST_M,
4898 V2_QPC_BYTE_60_QP_ST_S, new_state);
4899 roce_set_field(qpc_mask->byte_60_qpst_tempid, V2_QPC_BYTE_60_QP_ST_M,
4900 V2_QPC_BYTE_60_QP_ST_S, 0);
4901
4902
4903 ret = hns_roce_v2_qp_modify(hr_dev, context, qpc_mask, hr_qp);
4904 if (ret) {
4905 ibdev_err(ibdev, "failed to modify QP, ret = %d.\n", ret);
4906 goto out;
4907 }
4908
4909 hr_qp->state = new_state;
4910
4911 hns_roce_v2_record_opt_fields(ibqp, attr, attr_mask);
4912
4913 if (new_state == IB_QPS_RESET && !ibqp->uobject) {
4914 hns_roce_v2_cq_clean(to_hr_cq(ibqp->recv_cq), hr_qp->qpn,
4915 ibqp->srq ? to_hr_srq(ibqp->srq) : NULL);
4916 if (ibqp->send_cq != ibqp->recv_cq)
4917 hns_roce_v2_cq_clean(to_hr_cq(ibqp->send_cq),
4918 hr_qp->qpn, NULL);
4919
4920 hr_qp->rq.head = 0;
4921 hr_qp->rq.tail = 0;
4922 hr_qp->sq.head = 0;
4923 hr_qp->sq.tail = 0;
4924 hr_qp->next_sge = 0;
4925 if (hr_qp->rq.wqe_cnt)
4926 *hr_qp->rdb.db_record = 0;
4927 }
4928
4929out:
4930 return ret;
4931}
4932
4933static int to_ib_qp_st(enum hns_roce_v2_qp_state state)
4934{
4935 static const enum ib_qp_state map[] = {
4936 [HNS_ROCE_QP_ST_RST] = IB_QPS_RESET,
4937 [HNS_ROCE_QP_ST_INIT] = IB_QPS_INIT,
4938 [HNS_ROCE_QP_ST_RTR] = IB_QPS_RTR,
4939 [HNS_ROCE_QP_ST_RTS] = IB_QPS_RTS,
4940 [HNS_ROCE_QP_ST_SQD] = IB_QPS_SQD,
4941 [HNS_ROCE_QP_ST_SQER] = IB_QPS_SQE,
4942 [HNS_ROCE_QP_ST_ERR] = IB_QPS_ERR,
4943 [HNS_ROCE_QP_ST_SQ_DRAINING] = IB_QPS_SQD
4944 };
4945
4946 return (state < ARRAY_SIZE(map)) ? map[state] : -1;
4947}
4948
4949static int hns_roce_v2_query_qpc(struct hns_roce_dev *hr_dev,
4950 struct hns_roce_qp *hr_qp,
4951 struct hns_roce_v2_qp_context *hr_context)
4952{
4953 struct hns_roce_cmd_mailbox *mailbox;
4954 int ret;
4955
4956 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
4957 if (IS_ERR(mailbox))
4958 return PTR_ERR(mailbox);
4959
4960 ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, hr_qp->qpn, 0,
4961 HNS_ROCE_CMD_QUERY_QPC,
4962 HNS_ROCE_CMD_TIMEOUT_MSECS);
4963 if (ret)
4964 goto out;
4965
4966 memcpy(hr_context, mailbox->buf, hr_dev->caps.qpc_sz);
4967
4968out:
4969 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
4970 return ret;
4971}
4972
4973static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
4974 int qp_attr_mask,
4975 struct ib_qp_init_attr *qp_init_attr)
4976{
4977 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4978 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
4979 struct hns_roce_v2_qp_context context = {};
4980 struct ib_device *ibdev = &hr_dev->ib_dev;
4981 int tmp_qp_state;
4982 int state;
4983 int ret;
4984
4985 memset(qp_attr, 0, sizeof(*qp_attr));
4986 memset(qp_init_attr, 0, sizeof(*qp_init_attr));
4987
4988 mutex_lock(&hr_qp->mutex);
4989
4990 if (hr_qp->state == IB_QPS_RESET) {
4991 qp_attr->qp_state = IB_QPS_RESET;
4992 ret = 0;
4993 goto done;
4994 }
4995
4996 ret = hns_roce_v2_query_qpc(hr_dev, hr_qp, &context);
4997 if (ret) {
4998 ibdev_err(ibdev, "failed to query QPC, ret = %d.\n", ret);
4999 ret = -EINVAL;
5000 goto out;
5001 }
5002
5003 state = roce_get_field(context.byte_60_qpst_tempid,
5004 V2_QPC_BYTE_60_QP_ST_M, V2_QPC_BYTE_60_QP_ST_S);
5005 tmp_qp_state = to_ib_qp_st((enum hns_roce_v2_qp_state)state);
5006 if (tmp_qp_state == -1) {
5007 ibdev_err(ibdev, "Illegal ib_qp_state\n");
5008 ret = -EINVAL;
5009 goto out;
5010 }
5011 hr_qp->state = (u8)tmp_qp_state;
5012 qp_attr->qp_state = (enum ib_qp_state)hr_qp->state;
5013 qp_attr->path_mtu = (enum ib_mtu)roce_get_field(context.byte_24_mtu_tc,
5014 V2_QPC_BYTE_24_MTU_M,
5015 V2_QPC_BYTE_24_MTU_S);
5016 qp_attr->path_mig_state = IB_MIG_ARMED;
5017 qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
5018 if (hr_qp->ibqp.qp_type == IB_QPT_UD)
5019 qp_attr->qkey = le32_to_cpu(context.qkey_xrcd);
5020
5021 qp_attr->rq_psn = roce_get_field(context.byte_108_rx_reqepsn,
5022 V2_QPC_BYTE_108_RX_REQ_EPSN_M,
5023 V2_QPC_BYTE_108_RX_REQ_EPSN_S);
5024 qp_attr->sq_psn = (u32)roce_get_field(context.byte_172_sq_psn,
5025 V2_QPC_BYTE_172_SQ_CUR_PSN_M,
5026 V2_QPC_BYTE_172_SQ_CUR_PSN_S);
5027 qp_attr->dest_qp_num = (u8)roce_get_field(context.byte_56_dqpn_err,
5028 V2_QPC_BYTE_56_DQPN_M,
5029 V2_QPC_BYTE_56_DQPN_S);
5030 qp_attr->qp_access_flags = ((roce_get_bit(context.byte_76_srqn_op_en,
5031 V2_QPC_BYTE_76_RRE_S)) << V2_QP_RRE_S) |
5032 ((roce_get_bit(context.byte_76_srqn_op_en,
5033 V2_QPC_BYTE_76_RWE_S)) << V2_QP_RWE_S) |
5034 ((roce_get_bit(context.byte_76_srqn_op_en,
5035 V2_QPC_BYTE_76_ATE_S)) << V2_QP_ATE_S);
5036
5037 if (hr_qp->ibqp.qp_type == IB_QPT_RC ||
5038 hr_qp->ibqp.qp_type == IB_QPT_UC) {
5039 struct ib_global_route *grh =
5040 rdma_ah_retrieve_grh(&qp_attr->ah_attr);
5041
5042 rdma_ah_set_sl(&qp_attr->ah_attr,
5043 roce_get_field(context.byte_28_at_fl,
5044 V2_QPC_BYTE_28_SL_M,
5045 V2_QPC_BYTE_28_SL_S));
5046 grh->flow_label = roce_get_field(context.byte_28_at_fl,
5047 V2_QPC_BYTE_28_FL_M,
5048 V2_QPC_BYTE_28_FL_S);
5049 grh->sgid_index = roce_get_field(context.byte_20_smac_sgid_idx,
5050 V2_QPC_BYTE_20_SGID_IDX_M,
5051 V2_QPC_BYTE_20_SGID_IDX_S);
5052 grh->hop_limit = roce_get_field(context.byte_24_mtu_tc,
5053 V2_QPC_BYTE_24_HOP_LIMIT_M,
5054 V2_QPC_BYTE_24_HOP_LIMIT_S);
5055 grh->traffic_class = roce_get_field(context.byte_24_mtu_tc,
5056 V2_QPC_BYTE_24_TC_M,
5057 V2_QPC_BYTE_24_TC_S);
5058
5059 memcpy(grh->dgid.raw, context.dgid, sizeof(grh->dgid.raw));
5060 }
5061
5062 qp_attr->port_num = hr_qp->port + 1;
5063 qp_attr->sq_draining = 0;
5064 qp_attr->max_rd_atomic = 1 << roce_get_field(context.byte_208_irrl,
5065 V2_QPC_BYTE_208_SR_MAX_M,
5066 V2_QPC_BYTE_208_SR_MAX_S);
5067 qp_attr->max_dest_rd_atomic = 1 << roce_get_field(context.byte_140_raq,
5068 V2_QPC_BYTE_140_RR_MAX_M,
5069 V2_QPC_BYTE_140_RR_MAX_S);
5070 qp_attr->min_rnr_timer = (u8)roce_get_field(context.byte_80_rnr_rx_cqn,
5071 V2_QPC_BYTE_80_MIN_RNR_TIME_M,
5072 V2_QPC_BYTE_80_MIN_RNR_TIME_S);
5073 qp_attr->timeout = (u8)roce_get_field(context.byte_28_at_fl,
5074 V2_QPC_BYTE_28_AT_M,
5075 V2_QPC_BYTE_28_AT_S);
5076 qp_attr->retry_cnt = roce_get_field(context.byte_212_lsn,
5077 V2_QPC_BYTE_212_RETRY_NUM_INIT_M,
5078 V2_QPC_BYTE_212_RETRY_NUM_INIT_S);
5079 qp_attr->rnr_retry = roce_get_field(context.byte_244_rnr_rxack,
5080 V2_QPC_BYTE_244_RNR_NUM_INIT_M,
5081 V2_QPC_BYTE_244_RNR_NUM_INIT_S);
5082
5083done:
5084 qp_attr->cur_qp_state = qp_attr->qp_state;
5085 qp_attr->cap.max_recv_wr = hr_qp->rq.wqe_cnt;
5086 qp_attr->cap.max_recv_sge = hr_qp->rq.max_gs;
5087
5088 if (!ibqp->uobject) {
5089 qp_attr->cap.max_send_wr = hr_qp->sq.wqe_cnt;
5090 qp_attr->cap.max_send_sge = hr_qp->sq.max_gs;
5091 } else {
5092 qp_attr->cap.max_send_wr = 0;
5093 qp_attr->cap.max_send_sge = 0;
5094 }
5095
5096 qp_init_attr->cap = qp_attr->cap;
5097 qp_init_attr->sq_sig_type = hr_qp->sq_signal_bits;
5098
5099out:
5100 mutex_unlock(&hr_qp->mutex);
5101 return ret;
5102}
5103
5104static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev,
5105 struct hns_roce_qp *hr_qp,
5106 struct ib_udata *udata)
5107{
5108 struct ib_device *ibdev = &hr_dev->ib_dev;
5109 struct hns_roce_cq *send_cq, *recv_cq;
5110 unsigned long flags;
5111 int ret = 0;
5112
5113 if ((hr_qp->ibqp.qp_type == IB_QPT_RC ||
5114 hr_qp->ibqp.qp_type == IB_QPT_UD) &&
5115 hr_qp->state != IB_QPS_RESET) {
5116
5117 ret = hns_roce_v2_modify_qp(&hr_qp->ibqp, NULL, 0,
5118 hr_qp->state, IB_QPS_RESET);
5119 if (ret)
5120 ibdev_err(ibdev,
5121 "failed to modify QP to RST, ret = %d.\n",
5122 ret);
5123 }
5124
5125 send_cq = hr_qp->ibqp.send_cq ? to_hr_cq(hr_qp->ibqp.send_cq) : NULL;
5126 recv_cq = hr_qp->ibqp.recv_cq ? to_hr_cq(hr_qp->ibqp.recv_cq) : NULL;
5127
5128 spin_lock_irqsave(&hr_dev->qp_list_lock, flags);
5129 hns_roce_lock_cqs(send_cq, recv_cq);
5130
5131 if (!udata) {
5132 if (recv_cq)
5133 __hns_roce_v2_cq_clean(recv_cq, hr_qp->qpn,
5134 (hr_qp->ibqp.srq ?
5135 to_hr_srq(hr_qp->ibqp.srq) :
5136 NULL));
5137
5138 if (send_cq && send_cq != recv_cq)
5139 __hns_roce_v2_cq_clean(send_cq, hr_qp->qpn, NULL);
5140
5141 }
5142
5143 hns_roce_qp_remove(hr_dev, hr_qp);
5144
5145 hns_roce_unlock_cqs(send_cq, recv_cq);
5146 spin_unlock_irqrestore(&hr_dev->qp_list_lock, flags);
5147
5148 return ret;
5149}
5150
5151static int hns_roce_v2_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
5152{
5153 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
5154 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
5155 int ret;
5156
5157 ret = hns_roce_v2_destroy_qp_common(hr_dev, hr_qp, udata);
5158 if (ret)
5159 ibdev_err(&hr_dev->ib_dev,
5160 "failed to destroy QP, QPN = 0x%06lx, ret = %d.\n",
5161 hr_qp->qpn, ret);
5162
5163 hns_roce_qp_destroy(hr_dev, hr_qp, udata);
5164
5165 return 0;
5166}
5167
5168static int hns_roce_v2_qp_flow_control_init(struct hns_roce_dev *hr_dev,
5169 struct hns_roce_qp *hr_qp)
5170{
5171 struct ib_device *ibdev = &hr_dev->ib_dev;
5172 struct hns_roce_sccc_clr_done *resp;
5173 struct hns_roce_sccc_clr *clr;
5174 struct hns_roce_cmq_desc desc;
5175 int ret, i;
5176
5177 mutex_lock(&hr_dev->qp_table.scc_mutex);
5178
5179
5180 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_RESET_SCCC, false);
5181 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
5182 if (ret) {
5183 ibdev_err(ibdev, "failed to reset SCC ctx, ret = %d.\n", ret);
5184 goto out;
5185 }
5186
5187
5188 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CLR_SCCC, false);
5189 clr = (struct hns_roce_sccc_clr *)desc.data;
5190 clr->qpn = cpu_to_le32(hr_qp->qpn);
5191 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
5192 if (ret) {
5193 ibdev_err(ibdev, "failed to clear SCC ctx, ret = %d.\n", ret);
5194 goto out;
5195 }
5196
5197
5198 resp = (struct hns_roce_sccc_clr_done *)desc.data;
5199 for (i = 0; i <= HNS_ROCE_CMQ_SCC_CLR_DONE_CNT; i++) {
5200 hns_roce_cmq_setup_basic_desc(&desc,
5201 HNS_ROCE_OPC_QUERY_SCCC, true);
5202 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
5203 if (ret) {
5204 ibdev_err(ibdev, "failed to query clr cmq, ret = %d\n",
5205 ret);
5206 goto out;
5207 }
5208
5209 if (resp->clr_done)
5210 goto out;
5211
5212 msleep(20);
5213 }
5214
5215 ibdev_err(ibdev, "Query SCC clr done flag overtime.\n");
5216 ret = -ETIMEDOUT;
5217
5218out:
5219 mutex_unlock(&hr_dev->qp_table.scc_mutex);
5220 return ret;
5221}
5222
5223static void hns_roce_v2_write_srqc(struct hns_roce_dev *hr_dev,
5224 struct hns_roce_srq *srq, u32 pdn, u16 xrcd,
5225 u32 cqn, void *mb_buf, u64 *mtts_wqe,
5226 u64 *mtts_idx, dma_addr_t dma_handle_wqe,
5227 dma_addr_t dma_handle_idx)
5228{
5229 struct hns_roce_srq_context *srq_context;
5230
5231 srq_context = mb_buf;
5232 memset(srq_context, 0, sizeof(*srq_context));
5233
5234 roce_set_field(srq_context->byte_4_srqn_srqst, SRQC_BYTE_4_SRQ_ST_M,
5235 SRQC_BYTE_4_SRQ_ST_S, 1);
5236
5237 roce_set_field(srq_context->byte_4_srqn_srqst,
5238 SRQC_BYTE_4_SRQ_WQE_HOP_NUM_M,
5239 SRQC_BYTE_4_SRQ_WQE_HOP_NUM_S,
5240 to_hr_hem_hopnum(hr_dev->caps.srqwqe_hop_num,
5241 srq->wqe_cnt));
5242 roce_set_field(srq_context->byte_4_srqn_srqst,
5243 SRQC_BYTE_4_SRQ_SHIFT_M, SRQC_BYTE_4_SRQ_SHIFT_S,
5244 ilog2(srq->wqe_cnt));
5245
5246 roce_set_field(srq_context->byte_4_srqn_srqst, SRQC_BYTE_4_SRQN_M,
5247 SRQC_BYTE_4_SRQN_S, srq->srqn);
5248
5249 roce_set_field(srq_context->byte_8_limit_wl, SRQC_BYTE_8_SRQ_LIMIT_WL_M,
5250 SRQC_BYTE_8_SRQ_LIMIT_WL_S, 0);
5251
5252 roce_set_field(srq_context->byte_12_xrcd, SRQC_BYTE_12_SRQ_XRCD_M,
5253 SRQC_BYTE_12_SRQ_XRCD_S, xrcd);
5254
5255 srq_context->wqe_bt_ba = cpu_to_le32((u32)(dma_handle_wqe >> 3));
5256
5257 roce_set_field(srq_context->byte_24_wqe_bt_ba,
5258 SRQC_BYTE_24_SRQ_WQE_BT_BA_M,
5259 SRQC_BYTE_24_SRQ_WQE_BT_BA_S,
5260 dma_handle_wqe >> 35);
5261
5262 roce_set_field(srq_context->byte_28_rqws_pd, SRQC_BYTE_28_PD_M,
5263 SRQC_BYTE_28_PD_S, pdn);
5264 roce_set_field(srq_context->byte_28_rqws_pd, SRQC_BYTE_28_RQWS_M,
5265 SRQC_BYTE_28_RQWS_S, srq->max_gs <= 0 ? 0 :
5266 fls(srq->max_gs - 1));
5267
5268 srq_context->idx_bt_ba = cpu_to_le32(dma_handle_idx >> 3);
5269 roce_set_field(srq_context->rsv_idx_bt_ba,
5270 SRQC_BYTE_36_SRQ_IDX_BT_BA_M,
5271 SRQC_BYTE_36_SRQ_IDX_BT_BA_S,
5272 dma_handle_idx >> 35);
5273
5274 srq_context->idx_cur_blk_addr =
5275 cpu_to_le32(to_hr_hw_page_addr(mtts_idx[0]));
5276 roce_set_field(srq_context->byte_44_idxbufpgsz_addr,
5277 SRQC_BYTE_44_SRQ_IDX_CUR_BLK_ADDR_M,
5278 SRQC_BYTE_44_SRQ_IDX_CUR_BLK_ADDR_S,
5279 upper_32_bits(to_hr_hw_page_addr(mtts_idx[0])));
5280 roce_set_field(srq_context->byte_44_idxbufpgsz_addr,
5281 SRQC_BYTE_44_SRQ_IDX_HOP_NUM_M,
5282 SRQC_BYTE_44_SRQ_IDX_HOP_NUM_S,
5283 to_hr_hem_hopnum(hr_dev->caps.idx_hop_num,
5284 srq->wqe_cnt));
5285
5286 roce_set_field(srq_context->byte_44_idxbufpgsz_addr,
5287 SRQC_BYTE_44_SRQ_IDX_BA_PG_SZ_M,
5288 SRQC_BYTE_44_SRQ_IDX_BA_PG_SZ_S,
5289 to_hr_hw_page_shift(srq->idx_que.mtr.hem_cfg.ba_pg_shift));
5290 roce_set_field(srq_context->byte_44_idxbufpgsz_addr,
5291 SRQC_BYTE_44_SRQ_IDX_BUF_PG_SZ_M,
5292 SRQC_BYTE_44_SRQ_IDX_BUF_PG_SZ_S,
5293 to_hr_hw_page_shift(srq->idx_que.mtr.hem_cfg.buf_pg_shift));
5294
5295 srq_context->idx_nxt_blk_addr =
5296 cpu_to_le32(to_hr_hw_page_addr(mtts_idx[1]));
5297 roce_set_field(srq_context->rsv_idxnxtblkaddr,
5298 SRQC_BYTE_52_SRQ_IDX_NXT_BLK_ADDR_M,
5299 SRQC_BYTE_52_SRQ_IDX_NXT_BLK_ADDR_S,
5300 upper_32_bits(to_hr_hw_page_addr(mtts_idx[1])));
5301 roce_set_field(srq_context->byte_56_xrc_cqn,
5302 SRQC_BYTE_56_SRQ_XRC_CQN_M, SRQC_BYTE_56_SRQ_XRC_CQN_S,
5303 cqn);
5304 roce_set_field(srq_context->byte_56_xrc_cqn,
5305 SRQC_BYTE_56_SRQ_WQE_BA_PG_SZ_M,
5306 SRQC_BYTE_56_SRQ_WQE_BA_PG_SZ_S,
5307 to_hr_hw_page_shift(srq->buf_mtr.hem_cfg.ba_pg_shift));
5308 roce_set_field(srq_context->byte_56_xrc_cqn,
5309 SRQC_BYTE_56_SRQ_WQE_BUF_PG_SZ_M,
5310 SRQC_BYTE_56_SRQ_WQE_BUF_PG_SZ_S,
5311 to_hr_hw_page_shift(srq->buf_mtr.hem_cfg.buf_pg_shift));
5312
5313 roce_set_bit(srq_context->db_record_addr_record_en,
5314 SRQC_BYTE_60_SRQ_RECORD_EN_S, 0);
5315}
5316
5317static int hns_roce_v2_modify_srq(struct ib_srq *ibsrq,
5318 struct ib_srq_attr *srq_attr,
5319 enum ib_srq_attr_mask srq_attr_mask,
5320 struct ib_udata *udata)
5321{
5322 struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
5323 struct hns_roce_srq *srq = to_hr_srq(ibsrq);
5324 struct hns_roce_srq_context *srq_context;
5325 struct hns_roce_srq_context *srqc_mask;
5326 struct hns_roce_cmd_mailbox *mailbox;
5327 int ret;
5328
5329
5330 if (srq_attr_mask & IB_SRQ_MAX_WR)
5331 return -EINVAL;
5332
5333 if (srq_attr_mask & IB_SRQ_LIMIT) {
5334 if (srq_attr->srq_limit >= srq->wqe_cnt)
5335 return -EINVAL;
5336
5337 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
5338 if (IS_ERR(mailbox))
5339 return PTR_ERR(mailbox);
5340
5341 srq_context = mailbox->buf;
5342 srqc_mask = (struct hns_roce_srq_context *)mailbox->buf + 1;
5343
5344 memset(srqc_mask, 0xff, sizeof(*srqc_mask));
5345
5346 roce_set_field(srq_context->byte_8_limit_wl,
5347 SRQC_BYTE_8_SRQ_LIMIT_WL_M,
5348 SRQC_BYTE_8_SRQ_LIMIT_WL_S, srq_attr->srq_limit);
5349 roce_set_field(srqc_mask->byte_8_limit_wl,
5350 SRQC_BYTE_8_SRQ_LIMIT_WL_M,
5351 SRQC_BYTE_8_SRQ_LIMIT_WL_S, 0);
5352
5353 ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, srq->srqn, 0,
5354 HNS_ROCE_CMD_MODIFY_SRQC,
5355 HNS_ROCE_CMD_TIMEOUT_MSECS);
5356 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
5357 if (ret) {
5358 ibdev_err(&hr_dev->ib_dev,
5359 "failed to handle cmd of modifying SRQ, ret = %d.\n",
5360 ret);
5361 return ret;
5362 }
5363 }
5364
5365 return 0;
5366}
5367
5368static int hns_roce_v2_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
5369{
5370 struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
5371 struct hns_roce_srq *srq = to_hr_srq(ibsrq);
5372 struct hns_roce_srq_context *srq_context;
5373 struct hns_roce_cmd_mailbox *mailbox;
5374 int limit_wl;
5375 int ret;
5376
5377 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
5378 if (IS_ERR(mailbox))
5379 return PTR_ERR(mailbox);
5380
5381 srq_context = mailbox->buf;
5382 ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, srq->srqn, 0,
5383 HNS_ROCE_CMD_QUERY_SRQC,
5384 HNS_ROCE_CMD_TIMEOUT_MSECS);
5385 if (ret) {
5386 ibdev_err(&hr_dev->ib_dev,
5387 "failed to process cmd of querying SRQ, ret = %d.\n",
5388 ret);
5389 goto out;
5390 }
5391
5392 limit_wl = roce_get_field(srq_context->byte_8_limit_wl,
5393 SRQC_BYTE_8_SRQ_LIMIT_WL_M,
5394 SRQC_BYTE_8_SRQ_LIMIT_WL_S);
5395
5396 attr->srq_limit = limit_wl;
5397 attr->max_wr = srq->wqe_cnt - 1;
5398 attr->max_sge = srq->max_gs;
5399
5400out:
5401 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
5402 return ret;
5403}
5404
5405static int hns_roce_v2_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
5406{
5407 struct hns_roce_dev *hr_dev = to_hr_dev(cq->device);
5408 struct hns_roce_v2_cq_context *cq_context;
5409 struct hns_roce_cq *hr_cq = to_hr_cq(cq);
5410 struct hns_roce_v2_cq_context *cqc_mask;
5411 struct hns_roce_cmd_mailbox *mailbox;
5412 int ret;
5413
5414 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
5415 if (IS_ERR(mailbox))
5416 return PTR_ERR(mailbox);
5417
5418 cq_context = mailbox->buf;
5419 cqc_mask = (struct hns_roce_v2_cq_context *)mailbox->buf + 1;
5420
5421 memset(cqc_mask, 0xff, sizeof(*cqc_mask));
5422
5423 roce_set_field(cq_context->byte_56_cqe_period_maxcnt,
5424 V2_CQC_BYTE_56_CQ_MAX_CNT_M, V2_CQC_BYTE_56_CQ_MAX_CNT_S,
5425 cq_count);
5426 roce_set_field(cqc_mask->byte_56_cqe_period_maxcnt,
5427 V2_CQC_BYTE_56_CQ_MAX_CNT_M, V2_CQC_BYTE_56_CQ_MAX_CNT_S,
5428 0);
5429 roce_set_field(cq_context->byte_56_cqe_period_maxcnt,
5430 V2_CQC_BYTE_56_CQ_PERIOD_M, V2_CQC_BYTE_56_CQ_PERIOD_S,
5431 cq_period);
5432 roce_set_field(cqc_mask->byte_56_cqe_period_maxcnt,
5433 V2_CQC_BYTE_56_CQ_PERIOD_M, V2_CQC_BYTE_56_CQ_PERIOD_S,
5434 0);
5435
5436 ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, hr_cq->cqn, 1,
5437 HNS_ROCE_CMD_MODIFY_CQC,
5438 HNS_ROCE_CMD_TIMEOUT_MSECS);
5439 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
5440 if (ret)
5441 ibdev_err(&hr_dev->ib_dev,
5442 "failed to process cmd when modifying CQ, ret = %d.\n",
5443 ret);
5444
5445 return ret;
5446}
5447
5448static void hns_roce_irq_work_handle(struct work_struct *work)
5449{
5450 struct hns_roce_work *irq_work =
5451 container_of(work, struct hns_roce_work, work);
5452 struct ib_device *ibdev = &irq_work->hr_dev->ib_dev;
5453
5454 switch (irq_work->event_type) {
5455 case HNS_ROCE_EVENT_TYPE_PATH_MIG:
5456 ibdev_info(ibdev, "Path migrated succeeded.\n");
5457 break;
5458 case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
5459 ibdev_warn(ibdev, "Path migration failed.\n");
5460 break;
5461 case HNS_ROCE_EVENT_TYPE_COMM_EST:
5462 break;
5463 case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
5464 ibdev_warn(ibdev, "Send queue drained.\n");
5465 break;
5466 case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
5467 ibdev_err(ibdev, "Local work queue 0x%x catast error, sub_event type is: %d\n",
5468 irq_work->queue_num, irq_work->sub_type);
5469 break;
5470 case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
5471 ibdev_err(ibdev, "Invalid request local work queue 0x%x error.\n",
5472 irq_work->queue_num);
5473 break;
5474 case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
5475 ibdev_err(ibdev, "Local access violation work queue 0x%x error, sub_event type is: %d\n",
5476 irq_work->queue_num, irq_work->sub_type);
5477 break;
5478 case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
5479 ibdev_warn(ibdev, "SRQ limit reach.\n");
5480 break;
5481 case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
5482 ibdev_warn(ibdev, "SRQ last wqe reach.\n");
5483 break;
5484 case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
5485 ibdev_err(ibdev, "SRQ catas error.\n");
5486 break;
5487 case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
5488 ibdev_err(ibdev, "CQ 0x%x access err.\n", irq_work->queue_num);
5489 break;
5490 case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
5491 ibdev_warn(ibdev, "CQ 0x%x overflow\n", irq_work->queue_num);
5492 break;
5493 case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW:
5494 ibdev_warn(ibdev, "DB overflow.\n");
5495 break;
5496 case HNS_ROCE_EVENT_TYPE_FLR:
5497 ibdev_warn(ibdev, "Function level reset.\n");
5498 break;
5499 default:
5500 break;
5501 }
5502
5503 kfree(irq_work);
5504}
5505
5506static void hns_roce_v2_init_irq_work(struct hns_roce_dev *hr_dev,
5507 struct hns_roce_eq *eq, u32 queue_num)
5508{
5509 struct hns_roce_work *irq_work;
5510
5511 irq_work = kzalloc(sizeof(struct hns_roce_work), GFP_ATOMIC);
5512 if (!irq_work)
5513 return;
5514
5515 INIT_WORK(&(irq_work->work), hns_roce_irq_work_handle);
5516 irq_work->hr_dev = hr_dev;
5517 irq_work->event_type = eq->event_type;
5518 irq_work->sub_type = eq->sub_type;
5519 irq_work->queue_num = queue_num;
5520 queue_work(hr_dev->irq_workq, &(irq_work->work));
5521}
5522
5523static void set_eq_cons_index_v2(struct hns_roce_eq *eq)
5524{
5525 struct hns_roce_dev *hr_dev = eq->hr_dev;
5526 __le32 doorbell[2] = {};
5527
5528 if (eq->type_flag == HNS_ROCE_AEQ) {
5529 roce_set_field(doorbell[0], HNS_ROCE_V2_EQ_DB_CMD_M,
5530 HNS_ROCE_V2_EQ_DB_CMD_S,
5531 eq->arm_st == HNS_ROCE_V2_EQ_ALWAYS_ARMED ?
5532 HNS_ROCE_EQ_DB_CMD_AEQ :
5533 HNS_ROCE_EQ_DB_CMD_AEQ_ARMED);
5534 } else {
5535 roce_set_field(doorbell[0], HNS_ROCE_V2_EQ_DB_TAG_M,
5536 HNS_ROCE_V2_EQ_DB_TAG_S, eq->eqn);
5537
5538 roce_set_field(doorbell[0], HNS_ROCE_V2_EQ_DB_CMD_M,
5539 HNS_ROCE_V2_EQ_DB_CMD_S,
5540 eq->arm_st == HNS_ROCE_V2_EQ_ALWAYS_ARMED ?
5541 HNS_ROCE_EQ_DB_CMD_CEQ :
5542 HNS_ROCE_EQ_DB_CMD_CEQ_ARMED);
5543 }
5544
5545 roce_set_field(doorbell[1], HNS_ROCE_V2_EQ_DB_PARA_M,
5546 HNS_ROCE_V2_EQ_DB_PARA_S,
5547 (eq->cons_index & HNS_ROCE_V2_CONS_IDX_M));
5548
5549 hns_roce_write64(hr_dev, doorbell, eq->doorbell);
5550}
5551
5552static struct hns_roce_aeqe *next_aeqe_sw_v2(struct hns_roce_eq *eq)
5553{
5554 struct hns_roce_aeqe *aeqe;
5555
5556 aeqe = hns_roce_buf_offset(eq->mtr.kmem,
5557 (eq->cons_index & (eq->entries - 1)) *
5558 eq->eqe_size);
5559
5560 return (roce_get_bit(aeqe->asyn, HNS_ROCE_V2_AEQ_AEQE_OWNER_S) ^
5561 !!(eq->cons_index & eq->entries)) ? aeqe : NULL;
5562}
5563
5564static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev,
5565 struct hns_roce_eq *eq)
5566{
5567 struct device *dev = hr_dev->dev;
5568 struct hns_roce_aeqe *aeqe = next_aeqe_sw_v2(eq);
5569 int aeqe_found = 0;
5570 int event_type;
5571 u32 queue_num;
5572 int sub_type;
5573
5574 while (aeqe) {
5575
5576
5577
5578 dma_rmb();
5579
5580 event_type = roce_get_field(aeqe->asyn,
5581 HNS_ROCE_V2_AEQE_EVENT_TYPE_M,
5582 HNS_ROCE_V2_AEQE_EVENT_TYPE_S);
5583 sub_type = roce_get_field(aeqe->asyn,
5584 HNS_ROCE_V2_AEQE_SUB_TYPE_M,
5585 HNS_ROCE_V2_AEQE_SUB_TYPE_S);
5586 queue_num = roce_get_field(aeqe->event.queue_event.num,
5587 HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_M,
5588 HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_S);
5589
5590 switch (event_type) {
5591 case HNS_ROCE_EVENT_TYPE_PATH_MIG:
5592 case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
5593 case HNS_ROCE_EVENT_TYPE_COMM_EST:
5594 case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
5595 case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
5596 case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
5597 case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
5598 case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
5599 hns_roce_qp_event(hr_dev, queue_num, event_type);
5600 break;
5601 case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
5602 case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
5603 hns_roce_srq_event(hr_dev, queue_num, event_type);
5604 break;
5605 case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
5606 case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
5607 hns_roce_cq_event(hr_dev, queue_num, event_type);
5608 break;
5609 case HNS_ROCE_EVENT_TYPE_MB:
5610 hns_roce_cmd_event(hr_dev,
5611 le16_to_cpu(aeqe->event.cmd.token),
5612 aeqe->event.cmd.status,
5613 le64_to_cpu(aeqe->event.cmd.out_param));
5614 break;
5615 case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW:
5616 case HNS_ROCE_EVENT_TYPE_FLR:
5617 break;
5618 default:
5619 dev_err(dev, "Unhandled event %d on EQ %d at idx %u.\n",
5620 event_type, eq->eqn, eq->cons_index);
5621 break;
5622 }
5623
5624 eq->event_type = event_type;
5625 eq->sub_type = sub_type;
5626 ++eq->cons_index;
5627 aeqe_found = 1;
5628
5629 if (eq->cons_index > (2 * eq->entries - 1))
5630 eq->cons_index = 0;
5631
5632 hns_roce_v2_init_irq_work(hr_dev, eq, queue_num);
5633
5634 aeqe = next_aeqe_sw_v2(eq);
5635 }
5636
5637 set_eq_cons_index_v2(eq);
5638 return aeqe_found;
5639}
5640
5641static struct hns_roce_ceqe *next_ceqe_sw_v2(struct hns_roce_eq *eq)
5642{
5643 struct hns_roce_ceqe *ceqe;
5644
5645 ceqe = hns_roce_buf_offset(eq->mtr.kmem,
5646 (eq->cons_index & (eq->entries - 1)) *
5647 eq->eqe_size);
5648
5649 return (!!(roce_get_bit(ceqe->comp, HNS_ROCE_V2_CEQ_CEQE_OWNER_S))) ^
5650 (!!(eq->cons_index & eq->entries)) ? ceqe : NULL;
5651}
5652
5653static int hns_roce_v2_ceq_int(struct hns_roce_dev *hr_dev,
5654 struct hns_roce_eq *eq)
5655{
5656 struct hns_roce_ceqe *ceqe = next_ceqe_sw_v2(eq);
5657 int ceqe_found = 0;
5658 u32 cqn;
5659
5660 while (ceqe) {
5661
5662
5663
5664 dma_rmb();
5665
5666 cqn = roce_get_field(ceqe->comp, HNS_ROCE_V2_CEQE_COMP_CQN_M,
5667 HNS_ROCE_V2_CEQE_COMP_CQN_S);
5668
5669 hns_roce_cq_completion(hr_dev, cqn);
5670
5671 ++eq->cons_index;
5672 ceqe_found = 1;
5673
5674 if (eq->cons_index > (EQ_DEPTH_COEFF * eq->entries - 1))
5675 eq->cons_index = 0;
5676
5677 ceqe = next_ceqe_sw_v2(eq);
5678 }
5679
5680 set_eq_cons_index_v2(eq);
5681
5682 return ceqe_found;
5683}
5684
5685static irqreturn_t hns_roce_v2_msix_interrupt_eq(int irq, void *eq_ptr)
5686{
5687 struct hns_roce_eq *eq = eq_ptr;
5688 struct hns_roce_dev *hr_dev = eq->hr_dev;
5689 int int_work;
5690
5691 if (eq->type_flag == HNS_ROCE_CEQ)
5692
5693 int_work = hns_roce_v2_ceq_int(hr_dev, eq);
5694 else
5695
5696 int_work = hns_roce_v2_aeq_int(hr_dev, eq);
5697
5698 return IRQ_RETVAL(int_work);
5699}
5700
5701static irqreturn_t hns_roce_v2_msix_interrupt_abn(int irq, void *dev_id)
5702{
5703 struct hns_roce_dev *hr_dev = dev_id;
5704 struct device *dev = hr_dev->dev;
5705 int int_work = 0;
5706 u32 int_st;
5707 u32 int_en;
5708
5709
5710 int_st = roce_read(hr_dev, ROCEE_VF_ABN_INT_ST_REG);
5711 int_en = roce_read(hr_dev, ROCEE_VF_ABN_INT_EN_REG);
5712
5713 if (int_st & BIT(HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S)) {
5714 struct pci_dev *pdev = hr_dev->pci_dev;
5715 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
5716 const struct hnae3_ae_ops *ops = ae_dev->ops;
5717
5718 dev_err(dev, "AEQ overflow!\n");
5719
5720 int_st |= 1 << HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S;
5721 roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, int_st);
5722
5723
5724 if (ops->set_default_reset_request)
5725 ops->set_default_reset_request(ae_dev,
5726 HNAE3_FUNC_RESET);
5727 if (ops->reset_event)
5728 ops->reset_event(pdev, NULL);
5729
5730 int_en |= 1 << HNS_ROCE_V2_VF_ABN_INT_EN_S;
5731 roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en);
5732
5733 int_work = 1;
5734 } else if (int_st & BIT(HNS_ROCE_V2_VF_INT_ST_BUS_ERR_S)) {
5735 dev_err(dev, "BUS ERR!\n");
5736
5737 int_st |= 1 << HNS_ROCE_V2_VF_INT_ST_BUS_ERR_S;
5738 roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, int_st);
5739
5740 int_en |= 1 << HNS_ROCE_V2_VF_ABN_INT_EN_S;
5741 roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en);
5742
5743 int_work = 1;
5744 } else if (int_st & BIT(HNS_ROCE_V2_VF_INT_ST_OTHER_ERR_S)) {
5745 dev_err(dev, "OTHER ERR!\n");
5746
5747 int_st |= 1 << HNS_ROCE_V2_VF_INT_ST_OTHER_ERR_S;
5748 roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, int_st);
5749
5750 int_en |= 1 << HNS_ROCE_V2_VF_ABN_INT_EN_S;
5751 roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en);
5752
5753 int_work = 1;
5754 } else
5755 dev_err(dev, "There is no abnormal irq found!\n");
5756
5757 return IRQ_RETVAL(int_work);
5758}
5759
5760static void hns_roce_v2_int_mask_enable(struct hns_roce_dev *hr_dev,
5761 int eq_num, int enable_flag)
5762{
5763 int i;
5764
5765 if (enable_flag == EQ_ENABLE) {
5766 for (i = 0; i < eq_num; i++)
5767 roce_write(hr_dev, ROCEE_VF_EVENT_INT_EN_REG +
5768 i * EQ_REG_OFFSET,
5769 HNS_ROCE_V2_VF_EVENT_INT_EN_M);
5770
5771 roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG,
5772 HNS_ROCE_V2_VF_ABN_INT_EN_M);
5773 roce_write(hr_dev, ROCEE_VF_ABN_INT_CFG_REG,
5774 HNS_ROCE_V2_VF_ABN_INT_CFG_M);
5775 } else {
5776 for (i = 0; i < eq_num; i++)
5777 roce_write(hr_dev, ROCEE_VF_EVENT_INT_EN_REG +
5778 i * EQ_REG_OFFSET,
5779 HNS_ROCE_V2_VF_EVENT_INT_EN_M & 0x0);
5780
5781 roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG,
5782 HNS_ROCE_V2_VF_ABN_INT_EN_M & 0x0);
5783 roce_write(hr_dev, ROCEE_VF_ABN_INT_CFG_REG,
5784 HNS_ROCE_V2_VF_ABN_INT_CFG_M & 0x0);
5785 }
5786}
5787
5788static void hns_roce_v2_destroy_eqc(struct hns_roce_dev *hr_dev, int eqn)
5789{
5790 struct device *dev = hr_dev->dev;
5791 int ret;
5792
5793 if (eqn < hr_dev->caps.num_comp_vectors)
5794 ret = hns_roce_cmd_mbox(hr_dev, 0, 0, eqn & HNS_ROCE_V2_EQN_M,
5795 0, HNS_ROCE_CMD_DESTROY_CEQC,
5796 HNS_ROCE_CMD_TIMEOUT_MSECS);
5797 else
5798 ret = hns_roce_cmd_mbox(hr_dev, 0, 0, eqn & HNS_ROCE_V2_EQN_M,
5799 0, HNS_ROCE_CMD_DESTROY_AEQC,
5800 HNS_ROCE_CMD_TIMEOUT_MSECS);
5801 if (ret)
5802 dev_err(dev, "[mailbox cmd] destroy eqc(%d) failed.\n", eqn);
5803}
5804
5805static void free_eq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq)
5806{
5807 hns_roce_mtr_destroy(hr_dev, &eq->mtr);
5808}
5809
5810static int config_eqc(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq,
5811 void *mb_buf)
5812{
5813 u64 eqe_ba[MTT_MIN_COUNT] = { 0 };
5814 struct hns_roce_eq_context *eqc;
5815 u64 bt_ba = 0;
5816 int count;
5817
5818 eqc = mb_buf;
5819 memset(eqc, 0, sizeof(struct hns_roce_eq_context));
5820
5821
5822 eq->doorbell = hr_dev->reg_base + ROCEE_VF_EQ_DB_CFG0_REG;
5823 eq->cons_index = 0;
5824 eq->over_ignore = HNS_ROCE_V2_EQ_OVER_IGNORE_0;
5825 eq->coalesce = HNS_ROCE_V2_EQ_COALESCE_0;
5826 eq->arm_st = HNS_ROCE_V2_EQ_ALWAYS_ARMED;
5827 eq->shift = ilog2((unsigned int)eq->entries);
5828
5829
5830 count = hns_roce_mtr_find(hr_dev, &eq->mtr, 0, eqe_ba, MTT_MIN_COUNT,
5831 &bt_ba);
5832 if (count < 1) {
5833 dev_err(hr_dev->dev, "failed to find EQE mtr\n");
5834 return -ENOBUFS;
5835 }
5836
5837
5838 roce_set_field(eqc->byte_4, HNS_ROCE_EQC_EQ_ST_M, HNS_ROCE_EQC_EQ_ST_S,
5839 HNS_ROCE_V2_EQ_STATE_VALID);
5840
5841
5842 roce_set_field(eqc->byte_4, HNS_ROCE_EQC_HOP_NUM_M,
5843 HNS_ROCE_EQC_HOP_NUM_S, eq->hop_num);
5844
5845
5846 roce_set_field(eqc->byte_4, HNS_ROCE_EQC_OVER_IGNORE_M,
5847 HNS_ROCE_EQC_OVER_IGNORE_S, eq->over_ignore);
5848
5849
5850 roce_set_field(eqc->byte_4, HNS_ROCE_EQC_COALESCE_M,
5851 HNS_ROCE_EQC_COALESCE_S, eq->coalesce);
5852
5853
5854 roce_set_field(eqc->byte_4, HNS_ROCE_EQC_ARM_ST_M,
5855 HNS_ROCE_EQC_ARM_ST_S, eq->arm_st);
5856
5857
5858 roce_set_field(eqc->byte_4, HNS_ROCE_EQC_EQN_M, HNS_ROCE_EQC_EQN_S,
5859 eq->eqn);
5860
5861
5862 roce_set_field(eqc->byte_4, HNS_ROCE_EQC_EQE_CNT_M,
5863 HNS_ROCE_EQC_EQE_CNT_S, HNS_ROCE_EQ_INIT_EQE_CNT);
5864
5865
5866 roce_set_field(eqc->byte_8, HNS_ROCE_EQC_BA_PG_SZ_M,
5867 HNS_ROCE_EQC_BA_PG_SZ_S,
5868 to_hr_hw_page_shift(eq->mtr.hem_cfg.ba_pg_shift));
5869
5870
5871 roce_set_field(eqc->byte_8, HNS_ROCE_EQC_BUF_PG_SZ_M,
5872 HNS_ROCE_EQC_BUF_PG_SZ_S,
5873 to_hr_hw_page_shift(eq->mtr.hem_cfg.buf_pg_shift));
5874
5875
5876 roce_set_field(eqc->byte_8, HNS_ROCE_EQC_PROD_INDX_M,
5877 HNS_ROCE_EQC_PROD_INDX_S, HNS_ROCE_EQ_INIT_PROD_IDX);
5878
5879
5880 roce_set_field(eqc->byte_12, HNS_ROCE_EQC_MAX_CNT_M,
5881 HNS_ROCE_EQC_MAX_CNT_S, eq->eq_max_cnt);
5882
5883
5884 roce_set_field(eqc->byte_12, HNS_ROCE_EQC_PERIOD_M,
5885 HNS_ROCE_EQC_PERIOD_S, eq->eq_period);
5886
5887
5888 roce_set_field(eqc->eqe_report_timer, HNS_ROCE_EQC_REPORT_TIMER_M,
5889 HNS_ROCE_EQC_REPORT_TIMER_S,
5890 HNS_ROCE_EQ_INIT_REPORT_TIMER);
5891
5892
5893 roce_set_field(eqc->eqe_ba0, HNS_ROCE_EQC_EQE_BA_L_M,
5894 HNS_ROCE_EQC_EQE_BA_L_S, bt_ba >> 3);
5895
5896
5897 roce_set_field(eqc->eqe_ba1, HNS_ROCE_EQC_EQE_BA_H_M,
5898 HNS_ROCE_EQC_EQE_BA_H_S, bt_ba >> 35);
5899
5900
5901 roce_set_field(eqc->byte_28, HNS_ROCE_EQC_SHIFT_M, HNS_ROCE_EQC_SHIFT_S,
5902 eq->shift);
5903
5904
5905 roce_set_field(eqc->byte_28, HNS_ROCE_EQC_MSI_INDX_M,
5906 HNS_ROCE_EQC_MSI_INDX_S, HNS_ROCE_EQ_INIT_MSI_IDX);
5907
5908
5909 roce_set_field(eqc->byte_28, HNS_ROCE_EQC_CUR_EQE_BA_L_M,
5910 HNS_ROCE_EQC_CUR_EQE_BA_L_S, eqe_ba[0] >> 12);
5911
5912
5913 roce_set_field(eqc->byte_32, HNS_ROCE_EQC_CUR_EQE_BA_M_M,
5914 HNS_ROCE_EQC_CUR_EQE_BA_M_S, eqe_ba[0] >> 28);
5915
5916
5917 roce_set_field(eqc->byte_36, HNS_ROCE_EQC_CUR_EQE_BA_H_M,
5918 HNS_ROCE_EQC_CUR_EQE_BA_H_S, eqe_ba[0] >> 60);
5919
5920
5921 roce_set_field(eqc->byte_36, HNS_ROCE_EQC_CONS_INDX_M,
5922 HNS_ROCE_EQC_CONS_INDX_S, HNS_ROCE_EQ_INIT_CONS_IDX);
5923
5924 roce_set_field(eqc->byte_40, HNS_ROCE_EQC_NXT_EQE_BA_L_M,
5925 HNS_ROCE_EQC_NXT_EQE_BA_L_S, eqe_ba[1] >> 12);
5926
5927 roce_set_field(eqc->byte_44, HNS_ROCE_EQC_NXT_EQE_BA_H_M,
5928 HNS_ROCE_EQC_NXT_EQE_BA_H_S, eqe_ba[1] >> 44);
5929
5930 roce_set_field(eqc->byte_44, HNS_ROCE_EQC_EQE_SIZE_M,
5931 HNS_ROCE_EQC_EQE_SIZE_S,
5932 eq->eqe_size == HNS_ROCE_V3_EQE_SIZE ? 1 : 0);
5933
5934 return 0;
5935}
5936
5937static int alloc_eq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq)
5938{
5939 struct hns_roce_buf_attr buf_attr = {};
5940 int err;
5941
5942 if (hr_dev->caps.eqe_hop_num == HNS_ROCE_HOP_NUM_0)
5943 eq->hop_num = 0;
5944 else
5945 eq->hop_num = hr_dev->caps.eqe_hop_num;
5946
5947 buf_attr.page_shift = hr_dev->caps.eqe_buf_pg_sz + HNS_HW_PAGE_SHIFT;
5948 buf_attr.region[0].size = eq->entries * eq->eqe_size;
5949 buf_attr.region[0].hopnum = eq->hop_num;
5950 buf_attr.region_count = 1;
5951 buf_attr.fixed_page = true;
5952
5953 err = hns_roce_mtr_create(hr_dev, &eq->mtr, &buf_attr,
5954 hr_dev->caps.eqe_ba_pg_sz +
5955 HNS_HW_PAGE_SHIFT, NULL, 0);
5956 if (err)
5957 dev_err(hr_dev->dev, "Failed to alloc EQE mtr, err %d\n", err);
5958
5959 return err;
5960}
5961
5962static int hns_roce_v2_create_eq(struct hns_roce_dev *hr_dev,
5963 struct hns_roce_eq *eq,
5964 unsigned int eq_cmd)
5965{
5966 struct hns_roce_cmd_mailbox *mailbox;
5967 int ret;
5968
5969
5970 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
5971 if (IS_ERR_OR_NULL(mailbox))
5972 return -ENOMEM;
5973
5974 ret = alloc_eq_buf(hr_dev, eq);
5975 if (ret)
5976 goto free_cmd_mbox;
5977
5978 ret = config_eqc(hr_dev, eq, mailbox->buf);
5979 if (ret)
5980 goto err_cmd_mbox;
5981
5982 ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, eq->eqn, 0,
5983 eq_cmd, HNS_ROCE_CMD_TIMEOUT_MSECS);
5984 if (ret) {
5985 dev_err(hr_dev->dev, "[mailbox cmd] create eqc failed.\n");
5986 goto err_cmd_mbox;
5987 }
5988
5989 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
5990
5991 return 0;
5992
5993err_cmd_mbox:
5994 free_eq_buf(hr_dev, eq);
5995
5996free_cmd_mbox:
5997 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
5998
5999 return ret;
6000}
6001
6002static int __hns_roce_request_irq(struct hns_roce_dev *hr_dev, int irq_num,
6003 int comp_num, int aeq_num, int other_num)
6004{
6005 struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
6006 int i, j;
6007 int ret;
6008
6009 for (i = 0; i < irq_num; i++) {
6010 hr_dev->irq_names[i] = kzalloc(HNS_ROCE_INT_NAME_LEN,
6011 GFP_KERNEL);
6012 if (!hr_dev->irq_names[i]) {
6013 ret = -ENOMEM;
6014 goto err_kzalloc_failed;
6015 }
6016 }
6017
6018
6019 for (j = 0; j < other_num; j++)
6020 snprintf((char *)hr_dev->irq_names[j], HNS_ROCE_INT_NAME_LEN,
6021 "hns-abn-%d", j);
6022
6023 for (j = other_num; j < (other_num + aeq_num); j++)
6024 snprintf((char *)hr_dev->irq_names[j], HNS_ROCE_INT_NAME_LEN,
6025 "hns-aeq-%d", j - other_num);
6026
6027 for (j = (other_num + aeq_num); j < irq_num; j++)
6028 snprintf((char *)hr_dev->irq_names[j], HNS_ROCE_INT_NAME_LEN,
6029 "hns-ceq-%d", j - other_num - aeq_num);
6030
6031 for (j = 0; j < irq_num; j++) {
6032 if (j < other_num)
6033 ret = request_irq(hr_dev->irq[j],
6034 hns_roce_v2_msix_interrupt_abn,
6035 0, hr_dev->irq_names[j], hr_dev);
6036
6037 else if (j < (other_num + comp_num))
6038 ret = request_irq(eq_table->eq[j - other_num].irq,
6039 hns_roce_v2_msix_interrupt_eq,
6040 0, hr_dev->irq_names[j + aeq_num],
6041 &eq_table->eq[j - other_num]);
6042 else
6043 ret = request_irq(eq_table->eq[j - other_num].irq,
6044 hns_roce_v2_msix_interrupt_eq,
6045 0, hr_dev->irq_names[j - comp_num],
6046 &eq_table->eq[j - other_num]);
6047 if (ret) {
6048 dev_err(hr_dev->dev, "Request irq error!\n");
6049 goto err_request_failed;
6050 }
6051 }
6052
6053 return 0;
6054
6055err_request_failed:
6056 for (j -= 1; j >= 0; j--)
6057 if (j < other_num)
6058 free_irq(hr_dev->irq[j], hr_dev);
6059 else
6060 free_irq(eq_table->eq[j - other_num].irq,
6061 &eq_table->eq[j - other_num]);
6062
6063err_kzalloc_failed:
6064 for (i -= 1; i >= 0; i--)
6065 kfree(hr_dev->irq_names[i]);
6066
6067 return ret;
6068}
6069
6070static void __hns_roce_free_irq(struct hns_roce_dev *hr_dev)
6071{
6072 int irq_num;
6073 int eq_num;
6074 int i;
6075
6076 eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors;
6077 irq_num = eq_num + hr_dev->caps.num_other_vectors;
6078
6079 for (i = 0; i < hr_dev->caps.num_other_vectors; i++)
6080 free_irq(hr_dev->irq[i], hr_dev);
6081
6082 for (i = 0; i < eq_num; i++)
6083 free_irq(hr_dev->eq_table.eq[i].irq, &hr_dev->eq_table.eq[i]);
6084
6085 for (i = 0; i < irq_num; i++)
6086 kfree(hr_dev->irq_names[i]);
6087}
6088
6089static int hns_roce_v2_init_eq_table(struct hns_roce_dev *hr_dev)
6090{
6091 struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
6092 struct device *dev = hr_dev->dev;
6093 struct hns_roce_eq *eq;
6094 unsigned int eq_cmd;
6095 int irq_num;
6096 int eq_num;
6097 int other_num;
6098 int comp_num;
6099 int aeq_num;
6100 int i;
6101 int ret;
6102
6103 other_num = hr_dev->caps.num_other_vectors;
6104 comp_num = hr_dev->caps.num_comp_vectors;
6105 aeq_num = hr_dev->caps.num_aeq_vectors;
6106
6107 eq_num = comp_num + aeq_num;
6108 irq_num = eq_num + other_num;
6109
6110 eq_table->eq = kcalloc(eq_num, sizeof(*eq_table->eq), GFP_KERNEL);
6111 if (!eq_table->eq)
6112 return -ENOMEM;
6113
6114
6115 for (i = 0; i < eq_num; i++) {
6116 eq = &eq_table->eq[i];
6117 eq->hr_dev = hr_dev;
6118 eq->eqn = i;
6119 if (i < comp_num) {
6120
6121 eq_cmd = HNS_ROCE_CMD_CREATE_CEQC;
6122 eq->type_flag = HNS_ROCE_CEQ;
6123 eq->entries = hr_dev->caps.ceqe_depth;
6124 eq->eqe_size = hr_dev->caps.ceqe_size;
6125 eq->irq = hr_dev->irq[i + other_num + aeq_num];
6126 eq->eq_max_cnt = HNS_ROCE_CEQ_DEFAULT_BURST_NUM;
6127 eq->eq_period = HNS_ROCE_CEQ_DEFAULT_INTERVAL;
6128 } else {
6129
6130 eq_cmd = HNS_ROCE_CMD_CREATE_AEQC;
6131 eq->type_flag = HNS_ROCE_AEQ;
6132 eq->entries = hr_dev->caps.aeqe_depth;
6133 eq->eqe_size = hr_dev->caps.aeqe_size;
6134 eq->irq = hr_dev->irq[i - comp_num + other_num];
6135 eq->eq_max_cnt = HNS_ROCE_AEQ_DEFAULT_BURST_NUM;
6136 eq->eq_period = HNS_ROCE_AEQ_DEFAULT_INTERVAL;
6137 }
6138
6139 ret = hns_roce_v2_create_eq(hr_dev, eq, eq_cmd);
6140 if (ret) {
6141 dev_err(dev, "eq create failed.\n");
6142 goto err_create_eq_fail;
6143 }
6144 }
6145
6146
6147 hns_roce_v2_int_mask_enable(hr_dev, eq_num, EQ_ENABLE);
6148
6149 ret = __hns_roce_request_irq(hr_dev, irq_num, comp_num,
6150 aeq_num, other_num);
6151 if (ret) {
6152 dev_err(dev, "Request irq failed.\n");
6153 goto err_request_irq_fail;
6154 }
6155
6156 hr_dev->irq_workq = alloc_ordered_workqueue("hns_roce_irq_workq", 0);
6157 if (!hr_dev->irq_workq) {
6158 dev_err(dev, "Create irq workqueue failed!\n");
6159 ret = -ENOMEM;
6160 goto err_create_wq_fail;
6161 }
6162
6163 return 0;
6164
6165err_create_wq_fail:
6166 __hns_roce_free_irq(hr_dev);
6167
6168err_request_irq_fail:
6169 hns_roce_v2_int_mask_enable(hr_dev, eq_num, EQ_DISABLE);
6170
6171err_create_eq_fail:
6172 for (i -= 1; i >= 0; i--)
6173 free_eq_buf(hr_dev, &eq_table->eq[i]);
6174 kfree(eq_table->eq);
6175
6176 return ret;
6177}
6178
6179static void hns_roce_v2_cleanup_eq_table(struct hns_roce_dev *hr_dev)
6180{
6181 struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
6182 int eq_num;
6183 int i;
6184
6185 eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors;
6186
6187
6188 hns_roce_v2_int_mask_enable(hr_dev, eq_num, EQ_DISABLE);
6189
6190 __hns_roce_free_irq(hr_dev);
6191
6192 for (i = 0; i < eq_num; i++) {
6193 hns_roce_v2_destroy_eqc(hr_dev, i);
6194
6195 free_eq_buf(hr_dev, &eq_table->eq[i]);
6196 }
6197
6198 kfree(eq_table->eq);
6199
6200 flush_workqueue(hr_dev->irq_workq);
6201 destroy_workqueue(hr_dev->irq_workq);
6202}
6203
6204static const struct hns_roce_dfx_hw hns_roce_dfx_hw_v2 = {
6205 .query_cqc_info = hns_roce_v2_query_cqc_info,
6206};
6207
6208static const struct ib_device_ops hns_roce_v2_dev_ops = {
6209 .destroy_qp = hns_roce_v2_destroy_qp,
6210 .modify_cq = hns_roce_v2_modify_cq,
6211 .poll_cq = hns_roce_v2_poll_cq,
6212 .post_recv = hns_roce_v2_post_recv,
6213 .post_send = hns_roce_v2_post_send,
6214 .query_qp = hns_roce_v2_query_qp,
6215 .req_notify_cq = hns_roce_v2_req_notify_cq,
6216};
6217
6218static const struct ib_device_ops hns_roce_v2_dev_srq_ops = {
6219 .modify_srq = hns_roce_v2_modify_srq,
6220 .post_srq_recv = hns_roce_v2_post_srq_recv,
6221 .query_srq = hns_roce_v2_query_srq,
6222};
6223
6224static const struct hns_roce_hw hns_roce_hw_v2 = {
6225 .cmq_init = hns_roce_v2_cmq_init,
6226 .cmq_exit = hns_roce_v2_cmq_exit,
6227 .hw_profile = hns_roce_v2_profile,
6228 .hw_init = hns_roce_v2_init,
6229 .hw_exit = hns_roce_v2_exit,
6230 .post_mbox = hns_roce_v2_post_mbox,
6231 .chk_mbox = hns_roce_v2_chk_mbox,
6232 .rst_prc_mbox = hns_roce_v2_rst_process_cmd,
6233 .set_gid = hns_roce_v2_set_gid,
6234 .set_mac = hns_roce_v2_set_mac,
6235 .write_mtpt = hns_roce_v2_write_mtpt,
6236 .rereg_write_mtpt = hns_roce_v2_rereg_write_mtpt,
6237 .frmr_write_mtpt = hns_roce_v2_frmr_write_mtpt,
6238 .mw_write_mtpt = hns_roce_v2_mw_write_mtpt,
6239 .write_cqc = hns_roce_v2_write_cqc,
6240 .set_hem = hns_roce_v2_set_hem,
6241 .clear_hem = hns_roce_v2_clear_hem,
6242 .modify_qp = hns_roce_v2_modify_qp,
6243 .query_qp = hns_roce_v2_query_qp,
6244 .destroy_qp = hns_roce_v2_destroy_qp,
6245 .qp_flow_control_init = hns_roce_v2_qp_flow_control_init,
6246 .modify_cq = hns_roce_v2_modify_cq,
6247 .post_send = hns_roce_v2_post_send,
6248 .post_recv = hns_roce_v2_post_recv,
6249 .req_notify_cq = hns_roce_v2_req_notify_cq,
6250 .poll_cq = hns_roce_v2_poll_cq,
6251 .init_eq = hns_roce_v2_init_eq_table,
6252 .cleanup_eq = hns_roce_v2_cleanup_eq_table,
6253 .write_srqc = hns_roce_v2_write_srqc,
6254 .modify_srq = hns_roce_v2_modify_srq,
6255 .query_srq = hns_roce_v2_query_srq,
6256 .post_srq_recv = hns_roce_v2_post_srq_recv,
6257 .hns_roce_dev_ops = &hns_roce_v2_dev_ops,
6258 .hns_roce_dev_srq_ops = &hns_roce_v2_dev_srq_ops,
6259};
6260
6261static const struct pci_device_id hns_roce_hw_v2_pci_tbl[] = {
6262 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
6263 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
6264 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
6265 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
6266 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
6267 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_200G_RDMA), 0},
6268
6269 {0, }
6270};
6271
6272MODULE_DEVICE_TABLE(pci, hns_roce_hw_v2_pci_tbl);
6273
6274static void hns_roce_hw_v2_get_cfg(struct hns_roce_dev *hr_dev,
6275 struct hnae3_handle *handle)
6276{
6277 struct hns_roce_v2_priv *priv = hr_dev->priv;
6278 int i;
6279
6280 hr_dev->pci_dev = handle->pdev;
6281 hr_dev->dev = &handle->pdev->dev;
6282 hr_dev->hw = &hns_roce_hw_v2;
6283 hr_dev->dfx = &hns_roce_dfx_hw_v2;
6284 hr_dev->sdb_offset = ROCEE_DB_SQ_L_0_REG;
6285 hr_dev->odb_offset = hr_dev->sdb_offset;
6286
6287
6288 hr_dev->reg_base = handle->rinfo.roce_io_base;
6289 hr_dev->caps.num_ports = 1;
6290 hr_dev->iboe.netdevs[0] = handle->rinfo.netdev;
6291 hr_dev->iboe.phy_port[0] = 0;
6292
6293 addrconf_addr_eui48((u8 *)&hr_dev->ib_dev.node_guid,
6294 hr_dev->iboe.netdevs[0]->dev_addr);
6295
6296 for (i = 0; i < HNS_ROCE_V2_MAX_IRQ_NUM; i++)
6297 hr_dev->irq[i] = pci_irq_vector(handle->pdev,
6298 i + handle->rinfo.base_vector);
6299
6300
6301 hr_dev->cmd_mod = 1;
6302 hr_dev->loop_idc = 0;
6303
6304 hr_dev->reset_cnt = handle->ae_algo->ops->ae_dev_reset_cnt(handle);
6305 priv->handle = handle;
6306}
6307
6308static int __hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
6309{
6310 struct hns_roce_dev *hr_dev;
6311 int ret;
6312
6313 hr_dev = ib_alloc_device(hns_roce_dev, ib_dev);
6314 if (!hr_dev)
6315 return -ENOMEM;
6316
6317 hr_dev->priv = kzalloc(sizeof(struct hns_roce_v2_priv), GFP_KERNEL);
6318 if (!hr_dev->priv) {
6319 ret = -ENOMEM;
6320 goto error_failed_kzalloc;
6321 }
6322
6323 hns_roce_hw_v2_get_cfg(hr_dev, handle);
6324
6325 ret = hns_roce_init(hr_dev);
6326 if (ret) {
6327 dev_err(hr_dev->dev, "RoCE Engine init failed!\n");
6328 goto error_failed_get_cfg;
6329 }
6330
6331 handle->priv = hr_dev;
6332
6333 return 0;
6334
6335error_failed_get_cfg:
6336 kfree(hr_dev->priv);
6337
6338error_failed_kzalloc:
6339 ib_dealloc_device(&hr_dev->ib_dev);
6340
6341 return ret;
6342}
6343
6344static void __hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle,
6345 bool reset)
6346{
6347 struct hns_roce_dev *hr_dev = handle->priv;
6348
6349 if (!hr_dev)
6350 return;
6351
6352 handle->priv = NULL;
6353
6354 hr_dev->state = HNS_ROCE_DEVICE_STATE_UNINIT;
6355 hns_roce_handle_device_err(hr_dev);
6356
6357 hns_roce_exit(hr_dev);
6358 kfree(hr_dev->priv);
6359 ib_dealloc_device(&hr_dev->ib_dev);
6360}
6361
6362static int hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
6363{
6364 const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
6365 const struct pci_device_id *id;
6366 struct device *dev = &handle->pdev->dev;
6367 int ret;
6368
6369 handle->rinfo.instance_state = HNS_ROCE_STATE_INIT;
6370
6371 if (ops->ae_dev_resetting(handle) || ops->get_hw_reset_stat(handle)) {
6372 handle->rinfo.instance_state = HNS_ROCE_STATE_NON_INIT;
6373 goto reset_chk_err;
6374 }
6375
6376 id = pci_match_id(hns_roce_hw_v2_pci_tbl, handle->pdev);
6377 if (!id)
6378 return 0;
6379
6380 ret = __hns_roce_hw_v2_init_instance(handle);
6381 if (ret) {
6382 handle->rinfo.instance_state = HNS_ROCE_STATE_NON_INIT;
6383 dev_err(dev, "RoCE instance init failed! ret = %d\n", ret);
6384 if (ops->ae_dev_resetting(handle) ||
6385 ops->get_hw_reset_stat(handle))
6386 goto reset_chk_err;
6387 else
6388 return ret;
6389 }
6390
6391 handle->rinfo.instance_state = HNS_ROCE_STATE_INITED;
6392
6393
6394 return 0;
6395
6396reset_chk_err:
6397 dev_err(dev, "Device is busy in resetting state.\n"
6398 "please retry later.\n");
6399
6400 return -EBUSY;
6401}
6402
6403static void hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle,
6404 bool reset)
6405{
6406 if (handle->rinfo.instance_state != HNS_ROCE_STATE_INITED)
6407 return;
6408
6409 handle->rinfo.instance_state = HNS_ROCE_STATE_UNINIT;
6410
6411 __hns_roce_hw_v2_uninit_instance(handle, reset);
6412
6413 handle->rinfo.instance_state = HNS_ROCE_STATE_NON_INIT;
6414}
6415static int hns_roce_hw_v2_reset_notify_down(struct hnae3_handle *handle)
6416{
6417 struct hns_roce_dev *hr_dev;
6418
6419 if (handle->rinfo.instance_state != HNS_ROCE_STATE_INITED) {
6420 set_bit(HNS_ROCE_RST_DIRECT_RETURN, &handle->rinfo.state);
6421 return 0;
6422 }
6423
6424 handle->rinfo.reset_state = HNS_ROCE_STATE_RST_DOWN;
6425 clear_bit(HNS_ROCE_RST_DIRECT_RETURN, &handle->rinfo.state);
6426
6427 hr_dev = handle->priv;
6428 if (!hr_dev)
6429 return 0;
6430
6431 hr_dev->is_reset = true;
6432 hr_dev->active = false;
6433 hr_dev->dis_db = true;
6434
6435 hr_dev->state = HNS_ROCE_DEVICE_STATE_RST_DOWN;
6436
6437 return 0;
6438}
6439
6440static int hns_roce_hw_v2_reset_notify_init(struct hnae3_handle *handle)
6441{
6442 struct device *dev = &handle->pdev->dev;
6443 int ret;
6444
6445 if (test_and_clear_bit(HNS_ROCE_RST_DIRECT_RETURN,
6446 &handle->rinfo.state)) {
6447 handle->rinfo.reset_state = HNS_ROCE_STATE_RST_INITED;
6448 return 0;
6449 }
6450
6451 handle->rinfo.reset_state = HNS_ROCE_STATE_RST_INIT;
6452
6453 dev_info(&handle->pdev->dev, "In reset process RoCE client reinit.\n");
6454 ret = __hns_roce_hw_v2_init_instance(handle);
6455 if (ret) {
6456
6457
6458
6459
6460 handle->priv = NULL;
6461 dev_err(dev, "In reset process RoCE reinit failed %d.\n", ret);
6462 } else {
6463 handle->rinfo.reset_state = HNS_ROCE_STATE_RST_INITED;
6464 dev_info(dev, "Reset done, RoCE client reinit finished.\n");
6465 }
6466
6467 return ret;
6468}
6469
6470static int hns_roce_hw_v2_reset_notify_uninit(struct hnae3_handle *handle)
6471{
6472 if (test_bit(HNS_ROCE_RST_DIRECT_RETURN, &handle->rinfo.state))
6473 return 0;
6474
6475 handle->rinfo.reset_state = HNS_ROCE_STATE_RST_UNINIT;
6476 dev_info(&handle->pdev->dev, "In reset process RoCE client uninit.\n");
6477 msleep(HNS_ROCE_V2_HW_RST_UNINT_DELAY);
6478 __hns_roce_hw_v2_uninit_instance(handle, false);
6479
6480 return 0;
6481}
6482
6483static int hns_roce_hw_v2_reset_notify(struct hnae3_handle *handle,
6484 enum hnae3_reset_notify_type type)
6485{
6486 int ret = 0;
6487
6488 switch (type) {
6489 case HNAE3_DOWN_CLIENT:
6490 ret = hns_roce_hw_v2_reset_notify_down(handle);
6491 break;
6492 case HNAE3_INIT_CLIENT:
6493 ret = hns_roce_hw_v2_reset_notify_init(handle);
6494 break;
6495 case HNAE3_UNINIT_CLIENT:
6496 ret = hns_roce_hw_v2_reset_notify_uninit(handle);
6497 break;
6498 default:
6499 break;
6500 }
6501
6502 return ret;
6503}
6504
6505static const struct hnae3_client_ops hns_roce_hw_v2_ops = {
6506 .init_instance = hns_roce_hw_v2_init_instance,
6507 .uninit_instance = hns_roce_hw_v2_uninit_instance,
6508 .reset_notify = hns_roce_hw_v2_reset_notify,
6509};
6510
6511static struct hnae3_client hns_roce_hw_v2_client = {
6512 .name = "hns_roce_hw_v2",
6513 .type = HNAE3_CLIENT_ROCE,
6514 .ops = &hns_roce_hw_v2_ops,
6515};
6516
6517static int __init hns_roce_hw_v2_init(void)
6518{
6519 return hnae3_register_client(&hns_roce_hw_v2_client);
6520}
6521
6522static void __exit hns_roce_hw_v2_exit(void)
6523{
6524 hnae3_unregister_client(&hns_roce_hw_v2_client);
6525}
6526
6527module_init(hns_roce_hw_v2_init);
6528module_exit(hns_roce_hw_v2_exit);
6529
6530MODULE_LICENSE("Dual BSD/GPL");
6531MODULE_AUTHOR("Wei Hu <xavier.huwei@huawei.com>");
6532MODULE_AUTHOR("Lijun Ou <oulijun@huawei.com>");
6533MODULE_AUTHOR("Shaobo Xu <xushaobo2@huawei.com>");
6534MODULE_DESCRIPTION("Hisilicon Hip08 Family RoCE Driver");
6535