1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#include <linux/acpi.h>
34#include <linux/etherdevice.h>
35#include <linux/interrupt.h>
36#include <linux/iopoll.h>
37#include <linux/kernel.h>
38#include <linux/types.h>
39#include <net/addrconf.h>
40#include <rdma/ib_addr.h>
41#include <rdma/ib_cache.h>
42#include <rdma/ib_umem.h>
43#include <rdma/uverbs_ioctl.h>
44
45#include "hnae3.h"
46#include "hns_roce_common.h"
47#include "hns_roce_device.h"
48#include "hns_roce_cmd.h"
49#include "hns_roce_hem.h"
50#include "hns_roce_hw_v2.h"
51
52enum {
53 CMD_RST_PRC_OTHERS,
54 CMD_RST_PRC_SUCCESS,
55 CMD_RST_PRC_EBUSY,
56};
57
58static inline void set_data_seg_v2(struct hns_roce_v2_wqe_data_seg *dseg,
59 struct ib_sge *sg)
60{
61 dseg->lkey = cpu_to_le32(sg->lkey);
62 dseg->addr = cpu_to_le64(sg->addr);
63 dseg->len = cpu_to_le32(sg->length);
64}
65
66
67
68
69
70
71
72
73#define HR_OPC_MAP(ib_key, hr_key) \
74 [IB_WR_ ## ib_key] = 1 + HNS_ROCE_V2_WQE_OP_ ## hr_key
75
76static const u32 hns_roce_op_code[] = {
77 HR_OPC_MAP(RDMA_WRITE, RDMA_WRITE),
78 HR_OPC_MAP(RDMA_WRITE_WITH_IMM, RDMA_WRITE_WITH_IMM),
79 HR_OPC_MAP(SEND, SEND),
80 HR_OPC_MAP(SEND_WITH_IMM, SEND_WITH_IMM),
81 HR_OPC_MAP(RDMA_READ, RDMA_READ),
82 HR_OPC_MAP(ATOMIC_CMP_AND_SWP, ATOM_CMP_AND_SWAP),
83 HR_OPC_MAP(ATOMIC_FETCH_AND_ADD, ATOM_FETCH_AND_ADD),
84 HR_OPC_MAP(SEND_WITH_INV, SEND_WITH_INV),
85 HR_OPC_MAP(LOCAL_INV, LOCAL_INV),
86 HR_OPC_MAP(MASKED_ATOMIC_CMP_AND_SWP, ATOM_MSK_CMP_AND_SWAP),
87 HR_OPC_MAP(MASKED_ATOMIC_FETCH_AND_ADD, ATOM_MSK_FETCH_AND_ADD),
88 HR_OPC_MAP(REG_MR, FAST_REG_PMR),
89};
90
91static u32 to_hr_opcode(u32 ib_opcode)
92{
93 if (ib_opcode >= ARRAY_SIZE(hns_roce_op_code))
94 return HNS_ROCE_V2_WQE_OP_MASK;
95
96 return hns_roce_op_code[ib_opcode] ? hns_roce_op_code[ib_opcode] - 1 :
97 HNS_ROCE_V2_WQE_OP_MASK;
98}
99
100static void set_frmr_seg(struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
101 const struct ib_reg_wr *wr)
102{
103 struct hns_roce_wqe_frmr_seg *fseg =
104 (void *)rc_sq_wqe + sizeof(struct hns_roce_v2_rc_send_wqe);
105 struct hns_roce_mr *mr = to_hr_mr(wr->mr);
106 u64 pbl_ba;
107
108
109 hr_reg_write_bool(fseg, FRMR_BIND_EN, wr->access & IB_ACCESS_MW_BIND);
110 hr_reg_write_bool(fseg, FRMR_ATOMIC,
111 wr->access & IB_ACCESS_REMOTE_ATOMIC);
112 hr_reg_write_bool(fseg, FRMR_RR, wr->access & IB_ACCESS_REMOTE_READ);
113 hr_reg_write_bool(fseg, FRMR_RW, wr->access & IB_ACCESS_REMOTE_WRITE);
114 hr_reg_write_bool(fseg, FRMR_LW, wr->access & IB_ACCESS_LOCAL_WRITE);
115
116
117 pbl_ba = mr->pbl_mtr.hem_cfg.root_ba;
118 rc_sq_wqe->msg_len = cpu_to_le32(lower_32_bits(pbl_ba));
119 rc_sq_wqe->inv_key = cpu_to_le32(upper_32_bits(pbl_ba));
120
121 rc_sq_wqe->byte_16 = cpu_to_le32(wr->mr->length & 0xffffffff);
122 rc_sq_wqe->byte_20 = cpu_to_le32(wr->mr->length >> 32);
123 rc_sq_wqe->rkey = cpu_to_le32(wr->key);
124 rc_sq_wqe->va = cpu_to_le64(wr->mr->iova);
125
126 hr_reg_write(fseg, FRMR_PBL_SIZE, mr->npages);
127 hr_reg_write(fseg, FRMR_PBL_BUF_PG_SZ,
128 to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.buf_pg_shift));
129 hr_reg_clear(fseg, FRMR_BLK_MODE);
130}
131
132static void set_atomic_seg(const struct ib_send_wr *wr,
133 struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
134 unsigned int valid_num_sge)
135{
136 struct hns_roce_v2_wqe_data_seg *dseg =
137 (void *)rc_sq_wqe + sizeof(struct hns_roce_v2_rc_send_wqe);
138 struct hns_roce_wqe_atomic_seg *aseg =
139 (void *)dseg + sizeof(struct hns_roce_v2_wqe_data_seg);
140
141 set_data_seg_v2(dseg, wr->sg_list);
142
143 if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
144 aseg->fetchadd_swap_data = cpu_to_le64(atomic_wr(wr)->swap);
145 aseg->cmp_data = cpu_to_le64(atomic_wr(wr)->compare_add);
146 } else {
147 aseg->fetchadd_swap_data =
148 cpu_to_le64(atomic_wr(wr)->compare_add);
149 aseg->cmp_data = 0;
150 }
151
152 roce_set_field(rc_sq_wqe->byte_16, V2_RC_SEND_WQE_BYTE_16_SGE_NUM_M,
153 V2_RC_SEND_WQE_BYTE_16_SGE_NUM_S, valid_num_sge);
154}
155
156static int fill_ext_sge_inl_data(struct hns_roce_qp *qp,
157 const struct ib_send_wr *wr,
158 unsigned int *sge_idx, u32 msg_len)
159{
160 struct ib_device *ibdev = &(to_hr_dev(qp->ibqp.device))->ib_dev;
161 unsigned int dseg_len = sizeof(struct hns_roce_v2_wqe_data_seg);
162 unsigned int ext_sge_sz = qp->sq.max_gs * dseg_len;
163 unsigned int left_len_in_pg;
164 unsigned int idx = *sge_idx;
165 unsigned int i = 0;
166 unsigned int len;
167 void *addr;
168 void *dseg;
169
170 if (msg_len > ext_sge_sz) {
171 ibdev_err(ibdev,
172 "no enough extended sge space for inline data.\n");
173 return -EINVAL;
174 }
175
176 dseg = hns_roce_get_extend_sge(qp, idx & (qp->sge.sge_cnt - 1));
177 left_len_in_pg = hr_hw_page_align((uintptr_t)dseg) - (uintptr_t)dseg;
178 len = wr->sg_list[0].length;
179 addr = (void *)(unsigned long)(wr->sg_list[0].addr);
180
181
182
183
184
185
186 while (1) {
187 if (len <= left_len_in_pg) {
188 memcpy(dseg, addr, len);
189
190 idx += len / dseg_len;
191
192 i++;
193 if (i >= wr->num_sge)
194 break;
195
196 left_len_in_pg -= len;
197 len = wr->sg_list[i].length;
198 addr = (void *)(unsigned long)(wr->sg_list[i].addr);
199 dseg += len;
200 } else {
201 memcpy(dseg, addr, left_len_in_pg);
202
203 len -= left_len_in_pg;
204 addr += left_len_in_pg;
205 idx += left_len_in_pg / dseg_len;
206 dseg = hns_roce_get_extend_sge(qp,
207 idx & (qp->sge.sge_cnt - 1));
208 left_len_in_pg = 1 << HNS_HW_PAGE_SHIFT;
209 }
210 }
211
212 *sge_idx = idx;
213
214 return 0;
215}
216
217static void set_extend_sge(struct hns_roce_qp *qp, struct ib_sge *sge,
218 unsigned int *sge_ind, unsigned int cnt)
219{
220 struct hns_roce_v2_wqe_data_seg *dseg;
221 unsigned int idx = *sge_ind;
222
223 while (cnt > 0) {
224 dseg = hns_roce_get_extend_sge(qp, idx & (qp->sge.sge_cnt - 1));
225 if (likely(sge->length)) {
226 set_data_seg_v2(dseg, sge);
227 idx++;
228 cnt--;
229 }
230 sge++;
231 }
232
233 *sge_ind = idx;
234}
235
236static bool check_inl_data_len(struct hns_roce_qp *qp, unsigned int len)
237{
238 struct hns_roce_dev *hr_dev = to_hr_dev(qp->ibqp.device);
239 int mtu = ib_mtu_enum_to_int(qp->path_mtu);
240
241 if (len > qp->max_inline_data || len > mtu) {
242 ibdev_err(&hr_dev->ib_dev,
243 "invalid length of data, data len = %u, max inline len = %u, path mtu = %d.\n",
244 len, qp->max_inline_data, mtu);
245 return false;
246 }
247
248 return true;
249}
250
251static int set_rc_inl(struct hns_roce_qp *qp, const struct ib_send_wr *wr,
252 struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
253 unsigned int *sge_idx)
254{
255 struct hns_roce_dev *hr_dev = to_hr_dev(qp->ibqp.device);
256 u32 msg_len = le32_to_cpu(rc_sq_wqe->msg_len);
257 struct ib_device *ibdev = &hr_dev->ib_dev;
258 unsigned int curr_idx = *sge_idx;
259 void *dseg = rc_sq_wqe;
260 unsigned int i;
261 int ret;
262
263 if (unlikely(wr->opcode == IB_WR_RDMA_READ)) {
264 ibdev_err(ibdev, "invalid inline parameters!\n");
265 return -EINVAL;
266 }
267
268 if (!check_inl_data_len(qp, msg_len))
269 return -EINVAL;
270
271 dseg += sizeof(struct hns_roce_v2_rc_send_wqe);
272
273 if (msg_len <= HNS_ROCE_V2_MAX_RC_INL_INN_SZ) {
274 roce_set_bit(rc_sq_wqe->byte_20,
275 V2_RC_SEND_WQE_BYTE_20_INL_TYPE_S, 0);
276
277 for (i = 0; i < wr->num_sge; i++) {
278 memcpy(dseg, ((void *)wr->sg_list[i].addr),
279 wr->sg_list[i].length);
280 dseg += wr->sg_list[i].length;
281 }
282 } else {
283 roce_set_bit(rc_sq_wqe->byte_20,
284 V2_RC_SEND_WQE_BYTE_20_INL_TYPE_S, 1);
285
286 ret = fill_ext_sge_inl_data(qp, wr, &curr_idx, msg_len);
287 if (ret)
288 return ret;
289
290 roce_set_field(rc_sq_wqe->byte_16,
291 V2_RC_SEND_WQE_BYTE_16_SGE_NUM_M,
292 V2_RC_SEND_WQE_BYTE_16_SGE_NUM_S,
293 curr_idx - *sge_idx);
294 }
295
296 *sge_idx = curr_idx;
297
298 return 0;
299}
300
301static int set_rwqe_data_seg(struct ib_qp *ibqp, const struct ib_send_wr *wr,
302 struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
303 unsigned int *sge_ind,
304 unsigned int valid_num_sge)
305{
306 struct hns_roce_v2_wqe_data_seg *dseg =
307 (void *)rc_sq_wqe + sizeof(struct hns_roce_v2_rc_send_wqe);
308 struct hns_roce_qp *qp = to_hr_qp(ibqp);
309 int j = 0;
310 int i;
311
312 roce_set_field(rc_sq_wqe->byte_20,
313 V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M,
314 V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S,
315 (*sge_ind) & (qp->sge.sge_cnt - 1));
316
317 roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_INLINE_S,
318 !!(wr->send_flags & IB_SEND_INLINE));
319 if (wr->send_flags & IB_SEND_INLINE)
320 return set_rc_inl(qp, wr, rc_sq_wqe, sge_ind);
321
322 if (valid_num_sge <= HNS_ROCE_SGE_IN_WQE) {
323 for (i = 0; i < wr->num_sge; i++) {
324 if (likely(wr->sg_list[i].length)) {
325 set_data_seg_v2(dseg, wr->sg_list + i);
326 dseg++;
327 }
328 }
329 } else {
330 for (i = 0; i < wr->num_sge && j < HNS_ROCE_SGE_IN_WQE; i++) {
331 if (likely(wr->sg_list[i].length)) {
332 set_data_seg_v2(dseg, wr->sg_list + i);
333 dseg++;
334 j++;
335 }
336 }
337
338 set_extend_sge(qp, wr->sg_list + i, sge_ind,
339 valid_num_sge - HNS_ROCE_SGE_IN_WQE);
340 }
341
342 roce_set_field(rc_sq_wqe->byte_16,
343 V2_RC_SEND_WQE_BYTE_16_SGE_NUM_M,
344 V2_RC_SEND_WQE_BYTE_16_SGE_NUM_S, valid_num_sge);
345
346 return 0;
347}
348
349static int check_send_valid(struct hns_roce_dev *hr_dev,
350 struct hns_roce_qp *hr_qp)
351{
352 struct ib_device *ibdev = &hr_dev->ib_dev;
353 struct ib_qp *ibqp = &hr_qp->ibqp;
354
355 if (unlikely(ibqp->qp_type != IB_QPT_RC &&
356 ibqp->qp_type != IB_QPT_GSI &&
357 ibqp->qp_type != IB_QPT_UD)) {
358 ibdev_err(ibdev, "Not supported QP(0x%x)type!\n",
359 ibqp->qp_type);
360 return -EOPNOTSUPP;
361 } else if (unlikely(hr_qp->state == IB_QPS_RESET ||
362 hr_qp->state == IB_QPS_INIT ||
363 hr_qp->state == IB_QPS_RTR)) {
364 ibdev_err(ibdev, "failed to post WQE, QP state %u!\n",
365 hr_qp->state);
366 return -EINVAL;
367 } else if (unlikely(hr_dev->state >= HNS_ROCE_DEVICE_STATE_RST_DOWN)) {
368 ibdev_err(ibdev, "failed to post WQE, dev state %d!\n",
369 hr_dev->state);
370 return -EIO;
371 }
372
373 return 0;
374}
375
376static unsigned int calc_wr_sge_num(const struct ib_send_wr *wr,
377 unsigned int *sge_len)
378{
379 unsigned int valid_num = 0;
380 unsigned int len = 0;
381 int i;
382
383 for (i = 0; i < wr->num_sge; i++) {
384 if (likely(wr->sg_list[i].length)) {
385 len += wr->sg_list[i].length;
386 valid_num++;
387 }
388 }
389
390 *sge_len = len;
391 return valid_num;
392}
393
394static __le32 get_immtdata(const struct ib_send_wr *wr)
395{
396 switch (wr->opcode) {
397 case IB_WR_SEND_WITH_IMM:
398 case IB_WR_RDMA_WRITE_WITH_IMM:
399 return cpu_to_le32(be32_to_cpu(wr->ex.imm_data));
400 default:
401 return 0;
402 }
403}
404
405static int set_ud_opcode(struct hns_roce_v2_ud_send_wqe *ud_sq_wqe,
406 const struct ib_send_wr *wr)
407{
408 u32 ib_op = wr->opcode;
409
410 if (ib_op != IB_WR_SEND && ib_op != IB_WR_SEND_WITH_IMM)
411 return -EINVAL;
412
413 ud_sq_wqe->immtdata = get_immtdata(wr);
414
415 roce_set_field(ud_sq_wqe->byte_4, V2_UD_SEND_WQE_BYTE_4_OPCODE_M,
416 V2_UD_SEND_WQE_BYTE_4_OPCODE_S, to_hr_opcode(ib_op));
417
418 return 0;
419}
420
421static int fill_ud_av(struct hns_roce_v2_ud_send_wqe *ud_sq_wqe,
422 struct hns_roce_ah *ah)
423{
424 struct ib_device *ib_dev = ah->ibah.device;
425 struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
426
427 roce_set_field(ud_sq_wqe->byte_24, V2_UD_SEND_WQE_BYTE_24_UDPSPN_M,
428 V2_UD_SEND_WQE_BYTE_24_UDPSPN_S, ah->av.udp_sport);
429
430 roce_set_field(ud_sq_wqe->byte_36, V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_M,
431 V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_S, ah->av.hop_limit);
432 roce_set_field(ud_sq_wqe->byte_36, V2_UD_SEND_WQE_BYTE_36_TCLASS_M,
433 V2_UD_SEND_WQE_BYTE_36_TCLASS_S, ah->av.tclass);
434 roce_set_field(ud_sq_wqe->byte_40, V2_UD_SEND_WQE_BYTE_40_FLOW_LABEL_M,
435 V2_UD_SEND_WQE_BYTE_40_FLOW_LABEL_S, ah->av.flowlabel);
436
437 if (WARN_ON(ah->av.sl > MAX_SERVICE_LEVEL))
438 return -EINVAL;
439
440 roce_set_field(ud_sq_wqe->byte_40, V2_UD_SEND_WQE_BYTE_40_SL_M,
441 V2_UD_SEND_WQE_BYTE_40_SL_S, ah->av.sl);
442
443 ud_sq_wqe->sgid_index = ah->av.gid_index;
444
445 memcpy(ud_sq_wqe->dmac, ah->av.mac, ETH_ALEN);
446 memcpy(ud_sq_wqe->dgid, ah->av.dgid, GID_LEN_V2);
447
448 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
449 return 0;
450
451 roce_set_bit(ud_sq_wqe->byte_40, V2_UD_SEND_WQE_BYTE_40_UD_VLAN_EN_S,
452 ah->av.vlan_en);
453 roce_set_field(ud_sq_wqe->byte_36, V2_UD_SEND_WQE_BYTE_36_VLAN_M,
454 V2_UD_SEND_WQE_BYTE_36_VLAN_S, ah->av.vlan_id);
455
456 return 0;
457}
458
459static inline int set_ud_wqe(struct hns_roce_qp *qp,
460 const struct ib_send_wr *wr,
461 void *wqe, unsigned int *sge_idx,
462 unsigned int owner_bit)
463{
464 struct hns_roce_ah *ah = to_hr_ah(ud_wr(wr)->ah);
465 struct hns_roce_v2_ud_send_wqe *ud_sq_wqe = wqe;
466 unsigned int curr_idx = *sge_idx;
467 unsigned int valid_num_sge;
468 u32 msg_len = 0;
469 int ret;
470
471 valid_num_sge = calc_wr_sge_num(wr, &msg_len);
472
473 ret = set_ud_opcode(ud_sq_wqe, wr);
474 if (WARN_ON(ret))
475 return ret;
476
477 ud_sq_wqe->msg_len = cpu_to_le32(msg_len);
478
479 roce_set_bit(ud_sq_wqe->byte_4, V2_UD_SEND_WQE_BYTE_4_CQE_S,
480 !!(wr->send_flags & IB_SEND_SIGNALED));
481
482 roce_set_bit(ud_sq_wqe->byte_4, V2_UD_SEND_WQE_BYTE_4_SE_S,
483 !!(wr->send_flags & IB_SEND_SOLICITED));
484
485 roce_set_field(ud_sq_wqe->byte_16, V2_UD_SEND_WQE_BYTE_16_PD_M,
486 V2_UD_SEND_WQE_BYTE_16_PD_S, to_hr_pd(qp->ibqp.pd)->pdn);
487
488 roce_set_field(ud_sq_wqe->byte_16, V2_UD_SEND_WQE_BYTE_16_SGE_NUM_M,
489 V2_UD_SEND_WQE_BYTE_16_SGE_NUM_S, valid_num_sge);
490
491 roce_set_field(ud_sq_wqe->byte_20,
492 V2_UD_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M,
493 V2_UD_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S,
494 curr_idx & (qp->sge.sge_cnt - 1));
495
496 ud_sq_wqe->qkey = cpu_to_le32(ud_wr(wr)->remote_qkey & 0x80000000 ?
497 qp->qkey : ud_wr(wr)->remote_qkey);
498 roce_set_field(ud_sq_wqe->byte_32, V2_UD_SEND_WQE_BYTE_32_DQPN_M,
499 V2_UD_SEND_WQE_BYTE_32_DQPN_S, ud_wr(wr)->remote_qpn);
500
501 ret = fill_ud_av(ud_sq_wqe, ah);
502 if (ret)
503 return ret;
504
505 qp->sl = to_hr_ah(ud_wr(wr)->ah)->av.sl;
506
507 set_extend_sge(qp, wr->sg_list, &curr_idx, valid_num_sge);
508
509
510
511
512
513
514
515 if (qp->en_flags & HNS_ROCE_QP_CAP_OWNER_DB)
516 dma_wmb();
517
518 *sge_idx = curr_idx;
519 roce_set_bit(ud_sq_wqe->byte_4, V2_UD_SEND_WQE_BYTE_4_OWNER_S,
520 owner_bit);
521
522 return 0;
523}
524
525static int set_rc_opcode(struct hns_roce_dev *hr_dev,
526 struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
527 const struct ib_send_wr *wr)
528{
529 u32 ib_op = wr->opcode;
530 int ret = 0;
531
532 rc_sq_wqe->immtdata = get_immtdata(wr);
533
534 switch (ib_op) {
535 case IB_WR_RDMA_READ:
536 case IB_WR_RDMA_WRITE:
537 case IB_WR_RDMA_WRITE_WITH_IMM:
538 rc_sq_wqe->rkey = cpu_to_le32(rdma_wr(wr)->rkey);
539 rc_sq_wqe->va = cpu_to_le64(rdma_wr(wr)->remote_addr);
540 break;
541 case IB_WR_SEND:
542 case IB_WR_SEND_WITH_IMM:
543 break;
544 case IB_WR_ATOMIC_CMP_AND_SWP:
545 case IB_WR_ATOMIC_FETCH_AND_ADD:
546 rc_sq_wqe->rkey = cpu_to_le32(atomic_wr(wr)->rkey);
547 rc_sq_wqe->va = cpu_to_le64(atomic_wr(wr)->remote_addr);
548 break;
549 case IB_WR_REG_MR:
550 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
551 set_frmr_seg(rc_sq_wqe, reg_wr(wr));
552 else
553 ret = -EOPNOTSUPP;
554 break;
555 case IB_WR_LOCAL_INV:
556 roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_SO_S, 1);
557 fallthrough;
558 case IB_WR_SEND_WITH_INV:
559 rc_sq_wqe->inv_key = cpu_to_le32(wr->ex.invalidate_rkey);
560 break;
561 default:
562 ret = -EINVAL;
563 }
564
565 if (unlikely(ret))
566 return ret;
567
568 roce_set_field(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
569 V2_RC_SEND_WQE_BYTE_4_OPCODE_S, to_hr_opcode(ib_op));
570
571 return ret;
572}
573static inline int set_rc_wqe(struct hns_roce_qp *qp,
574 const struct ib_send_wr *wr,
575 void *wqe, unsigned int *sge_idx,
576 unsigned int owner_bit)
577{
578 struct hns_roce_dev *hr_dev = to_hr_dev(qp->ibqp.device);
579 struct hns_roce_v2_rc_send_wqe *rc_sq_wqe = wqe;
580 unsigned int curr_idx = *sge_idx;
581 unsigned int valid_num_sge;
582 u32 msg_len = 0;
583 int ret;
584
585 valid_num_sge = calc_wr_sge_num(wr, &msg_len);
586
587 rc_sq_wqe->msg_len = cpu_to_le32(msg_len);
588
589 ret = set_rc_opcode(hr_dev, rc_sq_wqe, wr);
590 if (WARN_ON(ret))
591 return ret;
592
593 roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_FENCE_S,
594 (wr->send_flags & IB_SEND_FENCE) ? 1 : 0);
595
596 roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_SE_S,
597 (wr->send_flags & IB_SEND_SOLICITED) ? 1 : 0);
598
599 roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_CQE_S,
600 (wr->send_flags & IB_SEND_SIGNALED) ? 1 : 0);
601
602 if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
603 wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD)
604 set_atomic_seg(wr, rc_sq_wqe, valid_num_sge);
605 else if (wr->opcode != IB_WR_REG_MR)
606 ret = set_rwqe_data_seg(&qp->ibqp, wr, rc_sq_wqe,
607 &curr_idx, valid_num_sge);
608
609
610
611
612
613
614
615 if (qp->en_flags & HNS_ROCE_QP_CAP_OWNER_DB)
616 dma_wmb();
617
618 *sge_idx = curr_idx;
619 roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_OWNER_S,
620 owner_bit);
621
622 return ret;
623}
624
625static inline void update_sq_db(struct hns_roce_dev *hr_dev,
626 struct hns_roce_qp *qp)
627{
628 if (unlikely(qp->state == IB_QPS_ERR)) {
629 flush_cqe(hr_dev, qp);
630 } else {
631 struct hns_roce_v2_db sq_db = {};
632
633 hr_reg_write(&sq_db, DB_TAG, qp->doorbell_qpn);
634 hr_reg_write(&sq_db, DB_CMD, HNS_ROCE_V2_SQ_DB);
635 hr_reg_write(&sq_db, DB_PI, qp->sq.head);
636 hr_reg_write(&sq_db, DB_SL, qp->sl);
637
638 hns_roce_write64(hr_dev, (__le32 *)&sq_db, qp->sq.db_reg);
639 }
640}
641
642static inline void update_rq_db(struct hns_roce_dev *hr_dev,
643 struct hns_roce_qp *qp)
644{
645 if (unlikely(qp->state == IB_QPS_ERR)) {
646 flush_cqe(hr_dev, qp);
647 } else {
648 if (likely(qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB)) {
649 *qp->rdb.db_record =
650 qp->rq.head & V2_DB_PRODUCER_IDX_M;
651 } else {
652 struct hns_roce_v2_db rq_db = {};
653
654 hr_reg_write(&rq_db, DB_TAG, qp->qpn);
655 hr_reg_write(&rq_db, DB_CMD, HNS_ROCE_V2_RQ_DB);
656 hr_reg_write(&rq_db, DB_PI, qp->rq.head);
657
658 hns_roce_write64(hr_dev, (__le32 *)&rq_db,
659 qp->rq.db_reg);
660 }
661 }
662}
663
664static void hns_roce_write512(struct hns_roce_dev *hr_dev, u64 *val,
665 u64 __iomem *dest)
666{
667#define HNS_ROCE_WRITE_TIMES 8
668 struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
669 struct hnae3_handle *handle = priv->handle;
670 const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
671 int i;
672
673 if (!hr_dev->dis_db && !ops->get_hw_reset_stat(handle))
674 for (i = 0; i < HNS_ROCE_WRITE_TIMES; i++)
675 writeq_relaxed(*(val + i), dest + i);
676}
677
678static void write_dwqe(struct hns_roce_dev *hr_dev, struct hns_roce_qp *qp,
679 void *wqe)
680{
681#define HNS_ROCE_SL_SHIFT 2
682 struct hns_roce_v2_rc_send_wqe *rc_sq_wqe = wqe;
683
684
685 roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_FLAG_S, 1);
686 roce_set_field(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_DB_SL_L_M,
687 V2_RC_SEND_WQE_BYTE_4_DB_SL_L_S, qp->sl);
688 roce_set_field(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_DB_SL_H_M,
689 V2_RC_SEND_WQE_BYTE_4_DB_SL_H_S,
690 qp->sl >> HNS_ROCE_SL_SHIFT);
691 roce_set_field(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_WQE_INDEX_M,
692 V2_RC_SEND_WQE_BYTE_4_WQE_INDEX_S, qp->sq.head);
693
694 hns_roce_write512(hr_dev, wqe, qp->sq.db_reg);
695}
696
697static int hns_roce_v2_post_send(struct ib_qp *ibqp,
698 const struct ib_send_wr *wr,
699 const struct ib_send_wr **bad_wr)
700{
701 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
702 struct ib_device *ibdev = &hr_dev->ib_dev;
703 struct hns_roce_qp *qp = to_hr_qp(ibqp);
704 unsigned long flags = 0;
705 unsigned int owner_bit;
706 unsigned int sge_idx;
707 unsigned int wqe_idx;
708 void *wqe = NULL;
709 u32 nreq;
710 int ret;
711
712 spin_lock_irqsave(&qp->sq.lock, flags);
713
714 ret = check_send_valid(hr_dev, qp);
715 if (unlikely(ret)) {
716 *bad_wr = wr;
717 nreq = 0;
718 goto out;
719 }
720
721 sge_idx = qp->next_sge;
722
723 for (nreq = 0; wr; ++nreq, wr = wr->next) {
724 if (hns_roce_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
725 ret = -ENOMEM;
726 *bad_wr = wr;
727 goto out;
728 }
729
730 wqe_idx = (qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1);
731
732 if (unlikely(wr->num_sge > qp->sq.max_gs)) {
733 ibdev_err(ibdev, "num_sge = %d > qp->sq.max_gs = %u.\n",
734 wr->num_sge, qp->sq.max_gs);
735 ret = -EINVAL;
736 *bad_wr = wr;
737 goto out;
738 }
739
740 wqe = hns_roce_get_send_wqe(qp, wqe_idx);
741 qp->sq.wrid[wqe_idx] = wr->wr_id;
742 owner_bit =
743 ~(((qp->sq.head + nreq) >> ilog2(qp->sq.wqe_cnt)) & 0x1);
744
745
746 if (ibqp->qp_type == IB_QPT_RC)
747 ret = set_rc_wqe(qp, wr, wqe, &sge_idx, owner_bit);
748 else
749 ret = set_ud_wqe(qp, wr, wqe, &sge_idx, owner_bit);
750
751 if (unlikely(ret)) {
752 *bad_wr = wr;
753 goto out;
754 }
755 }
756
757out:
758 if (likely(nreq)) {
759 qp->sq.head += nreq;
760 qp->next_sge = sge_idx;
761
762 if (nreq == 1 && (qp->en_flags & HNS_ROCE_QP_CAP_DIRECT_WQE))
763 write_dwqe(hr_dev, qp, wqe);
764 else
765 update_sq_db(hr_dev, qp);
766 }
767
768 spin_unlock_irqrestore(&qp->sq.lock, flags);
769
770 return ret;
771}
772
773static int check_recv_valid(struct hns_roce_dev *hr_dev,
774 struct hns_roce_qp *hr_qp)
775{
776 struct ib_device *ibdev = &hr_dev->ib_dev;
777 struct ib_qp *ibqp = &hr_qp->ibqp;
778
779 if (unlikely(ibqp->qp_type != IB_QPT_RC &&
780 ibqp->qp_type != IB_QPT_GSI &&
781 ibqp->qp_type != IB_QPT_UD)) {
782 ibdev_err(ibdev, "unsupported qp type, qp_type = %d.\n",
783 ibqp->qp_type);
784 return -EOPNOTSUPP;
785 }
786
787 if (unlikely(hr_dev->state >= HNS_ROCE_DEVICE_STATE_RST_DOWN))
788 return -EIO;
789
790 if (hr_qp->state == IB_QPS_RESET)
791 return -EINVAL;
792
793 return 0;
794}
795
796static void fill_recv_sge_to_wqe(const struct ib_recv_wr *wr, void *wqe,
797 u32 max_sge, bool rsv)
798{
799 struct hns_roce_v2_wqe_data_seg *dseg = wqe;
800 u32 i, cnt;
801
802 for (i = 0, cnt = 0; i < wr->num_sge; i++) {
803
804 if (!wr->sg_list[i].length)
805 continue;
806 set_data_seg_v2(dseg + cnt, wr->sg_list + i);
807 cnt++;
808 }
809
810
811 if (rsv) {
812 dseg[cnt].lkey = cpu_to_le32(HNS_ROCE_INVALID_LKEY);
813 dseg[cnt].addr = 0;
814 dseg[cnt].len = cpu_to_le32(HNS_ROCE_INVALID_SGE_LENGTH);
815 } else {
816
817 if (cnt < max_sge)
818 memset(dseg + cnt, 0,
819 (max_sge - cnt) * HNS_ROCE_SGE_SIZE);
820 }
821}
822
823static void fill_rq_wqe(struct hns_roce_qp *hr_qp, const struct ib_recv_wr *wr,
824 u32 wqe_idx, u32 max_sge)
825{
826 struct hns_roce_rinl_sge *sge_list;
827 void *wqe = NULL;
828 u32 i;
829
830 wqe = hns_roce_get_recv_wqe(hr_qp, wqe_idx);
831 fill_recv_sge_to_wqe(wr, wqe, max_sge, hr_qp->rq.rsv_sge);
832
833
834 if (hr_qp->rq_inl_buf.wqe_cnt) {
835 sge_list = hr_qp->rq_inl_buf.wqe_list[wqe_idx].sg_list;
836 hr_qp->rq_inl_buf.wqe_list[wqe_idx].sge_cnt = (u32)wr->num_sge;
837 for (i = 0; i < wr->num_sge; i++) {
838 sge_list[i].addr = (void *)(u64)wr->sg_list[i].addr;
839 sge_list[i].len = wr->sg_list[i].length;
840 }
841 }
842}
843
844static int hns_roce_v2_post_recv(struct ib_qp *ibqp,
845 const struct ib_recv_wr *wr,
846 const struct ib_recv_wr **bad_wr)
847{
848 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
849 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
850 struct ib_device *ibdev = &hr_dev->ib_dev;
851 u32 wqe_idx, nreq, max_sge;
852 unsigned long flags;
853 int ret;
854
855 spin_lock_irqsave(&hr_qp->rq.lock, flags);
856
857 ret = check_recv_valid(hr_dev, hr_qp);
858 if (unlikely(ret)) {
859 *bad_wr = wr;
860 nreq = 0;
861 goto out;
862 }
863
864 max_sge = hr_qp->rq.max_gs - hr_qp->rq.rsv_sge;
865 for (nreq = 0; wr; ++nreq, wr = wr->next) {
866 if (unlikely(hns_roce_wq_overflow(&hr_qp->rq, nreq,
867 hr_qp->ibqp.recv_cq))) {
868 ret = -ENOMEM;
869 *bad_wr = wr;
870 goto out;
871 }
872
873 if (unlikely(wr->num_sge > max_sge)) {
874 ibdev_err(ibdev, "num_sge = %d >= max_sge = %u.\n",
875 wr->num_sge, max_sge);
876 ret = -EINVAL;
877 *bad_wr = wr;
878 goto out;
879 }
880
881 wqe_idx = (hr_qp->rq.head + nreq) & (hr_qp->rq.wqe_cnt - 1);
882 fill_rq_wqe(hr_qp, wr, wqe_idx, max_sge);
883 hr_qp->rq.wrid[wqe_idx] = wr->wr_id;
884 }
885
886out:
887 if (likely(nreq)) {
888 hr_qp->rq.head += nreq;
889
890 update_rq_db(hr_dev, hr_qp);
891 }
892 spin_unlock_irqrestore(&hr_qp->rq.lock, flags);
893
894 return ret;
895}
896
897static void *get_srq_wqe_buf(struct hns_roce_srq *srq, u32 n)
898{
899 return hns_roce_buf_offset(srq->buf_mtr.kmem, n << srq->wqe_shift);
900}
901
902static void *get_idx_buf(struct hns_roce_idx_que *idx_que, u32 n)
903{
904 return hns_roce_buf_offset(idx_que->mtr.kmem,
905 n << idx_que->entry_shift);
906}
907
908static void hns_roce_free_srq_wqe(struct hns_roce_srq *srq, u32 wqe_index)
909{
910
911 spin_lock(&srq->lock);
912
913 bitmap_clear(srq->idx_que.bitmap, wqe_index, 1);
914 srq->idx_que.tail++;
915
916 spin_unlock(&srq->lock);
917}
918
919static int hns_roce_srqwq_overflow(struct hns_roce_srq *srq)
920{
921 struct hns_roce_idx_que *idx_que = &srq->idx_que;
922
923 return idx_que->head - idx_que->tail >= srq->wqe_cnt;
924}
925
926static int check_post_srq_valid(struct hns_roce_srq *srq, u32 max_sge,
927 const struct ib_recv_wr *wr)
928{
929 struct ib_device *ib_dev = srq->ibsrq.device;
930
931 if (unlikely(wr->num_sge > max_sge)) {
932 ibdev_err(ib_dev,
933 "failed to check sge, wr->num_sge = %d, max_sge = %u.\n",
934 wr->num_sge, max_sge);
935 return -EINVAL;
936 }
937
938 if (unlikely(hns_roce_srqwq_overflow(srq))) {
939 ibdev_err(ib_dev,
940 "failed to check srqwq status, srqwq is full.\n");
941 return -ENOMEM;
942 }
943
944 return 0;
945}
946
947static int get_srq_wqe_idx(struct hns_roce_srq *srq, u32 *wqe_idx)
948{
949 struct hns_roce_idx_que *idx_que = &srq->idx_que;
950 u32 pos;
951
952 pos = find_first_zero_bit(idx_que->bitmap, srq->wqe_cnt);
953 if (unlikely(pos == srq->wqe_cnt))
954 return -ENOSPC;
955
956 bitmap_set(idx_que->bitmap, pos, 1);
957 *wqe_idx = pos;
958 return 0;
959}
960
961static void fill_wqe_idx(struct hns_roce_srq *srq, unsigned int wqe_idx)
962{
963 struct hns_roce_idx_que *idx_que = &srq->idx_que;
964 unsigned int head;
965 __le32 *buf;
966
967 head = idx_que->head & (srq->wqe_cnt - 1);
968
969 buf = get_idx_buf(idx_que, head);
970 *buf = cpu_to_le32(wqe_idx);
971
972 idx_que->head++;
973}
974
975static void update_srq_db(struct hns_roce_v2_db *db, struct hns_roce_srq *srq)
976{
977 hr_reg_write(db, DB_TAG, srq->srqn);
978 hr_reg_write(db, DB_CMD, HNS_ROCE_V2_SRQ_DB);
979 hr_reg_write(db, DB_PI, srq->idx_que.head);
980}
981
982static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq,
983 const struct ib_recv_wr *wr,
984 const struct ib_recv_wr **bad_wr)
985{
986 struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
987 struct hns_roce_srq *srq = to_hr_srq(ibsrq);
988 struct hns_roce_v2_db srq_db;
989 unsigned long flags;
990 int ret = 0;
991 u32 max_sge;
992 u32 wqe_idx;
993 void *wqe;
994 u32 nreq;
995
996 spin_lock_irqsave(&srq->lock, flags);
997
998 max_sge = srq->max_gs - srq->rsv_sge;
999 for (nreq = 0; wr; ++nreq, wr = wr->next) {
1000 ret = check_post_srq_valid(srq, max_sge, wr);
1001 if (ret) {
1002 *bad_wr = wr;
1003 break;
1004 }
1005
1006 ret = get_srq_wqe_idx(srq, &wqe_idx);
1007 if (unlikely(ret)) {
1008 *bad_wr = wr;
1009 break;
1010 }
1011
1012 wqe = get_srq_wqe_buf(srq, wqe_idx);
1013 fill_recv_sge_to_wqe(wr, wqe, max_sge, srq->rsv_sge);
1014 fill_wqe_idx(srq, wqe_idx);
1015 srq->wrid[wqe_idx] = wr->wr_id;
1016 }
1017
1018 if (likely(nreq)) {
1019 update_srq_db(&srq_db, srq);
1020
1021 hns_roce_write64(hr_dev, (__le32 *)&srq_db, srq->db_reg);
1022 }
1023
1024 spin_unlock_irqrestore(&srq->lock, flags);
1025
1026 return ret;
1027}
1028
1029static u32 hns_roce_v2_cmd_hw_reseted(struct hns_roce_dev *hr_dev,
1030 unsigned long instance_stage,
1031 unsigned long reset_stage)
1032{
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042 hr_dev->is_reset = true;
1043 hr_dev->dis_db = true;
1044
1045 if (reset_stage == HNS_ROCE_STATE_RST_INIT ||
1046 instance_stage == HNS_ROCE_STATE_INIT)
1047 return CMD_RST_PRC_EBUSY;
1048
1049 return CMD_RST_PRC_SUCCESS;
1050}
1051
1052static u32 hns_roce_v2_cmd_hw_resetting(struct hns_roce_dev *hr_dev,
1053 unsigned long instance_stage,
1054 unsigned long reset_stage)
1055{
1056#define HW_RESET_TIMEOUT_US 1000000
1057#define HW_RESET_SLEEP_US 1000
1058
1059 struct hns_roce_v2_priv *priv = hr_dev->priv;
1060 struct hnae3_handle *handle = priv->handle;
1061 const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
1062 unsigned long val;
1063 int ret;
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074 hr_dev->dis_db = true;
1075
1076 ret = read_poll_timeout(ops->ae_dev_reset_cnt, val,
1077 val > hr_dev->reset_cnt, HW_RESET_SLEEP_US,
1078 HW_RESET_TIMEOUT_US, false, handle);
1079 if (!ret)
1080 hr_dev->is_reset = true;
1081
1082 if (!hr_dev->is_reset || reset_stage == HNS_ROCE_STATE_RST_INIT ||
1083 instance_stage == HNS_ROCE_STATE_INIT)
1084 return CMD_RST_PRC_EBUSY;
1085
1086 return CMD_RST_PRC_SUCCESS;
1087}
1088
1089static u32 hns_roce_v2_cmd_sw_resetting(struct hns_roce_dev *hr_dev)
1090{
1091 struct hns_roce_v2_priv *priv = hr_dev->priv;
1092 struct hnae3_handle *handle = priv->handle;
1093 const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
1094
1095
1096
1097
1098
1099 hr_dev->dis_db = true;
1100 if (ops->ae_dev_reset_cnt(handle) != hr_dev->reset_cnt)
1101 hr_dev->is_reset = true;
1102
1103 return CMD_RST_PRC_EBUSY;
1104}
1105
1106static u32 check_aedev_reset_status(struct hns_roce_dev *hr_dev,
1107 struct hnae3_handle *handle)
1108{
1109 const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
1110 unsigned long instance_stage;
1111 unsigned long reset_stage;
1112 unsigned long reset_cnt;
1113 bool sw_resetting;
1114 bool hw_resetting;
1115
1116
1117
1118
1119
1120
1121
1122
1123 instance_stage = handle->rinfo.instance_state;
1124 reset_stage = handle->rinfo.reset_state;
1125 reset_cnt = ops->ae_dev_reset_cnt(handle);
1126 if (reset_cnt != hr_dev->reset_cnt)
1127 return hns_roce_v2_cmd_hw_reseted(hr_dev, instance_stage,
1128 reset_stage);
1129
1130 hw_resetting = ops->get_cmdq_stat(handle);
1131 if (hw_resetting)
1132 return hns_roce_v2_cmd_hw_resetting(hr_dev, instance_stage,
1133 reset_stage);
1134
1135 sw_resetting = ops->ae_dev_resetting(handle);
1136 if (sw_resetting && instance_stage == HNS_ROCE_STATE_INIT)
1137 return hns_roce_v2_cmd_sw_resetting(hr_dev);
1138
1139 return CMD_RST_PRC_OTHERS;
1140}
1141
1142static bool check_device_is_in_reset(struct hns_roce_dev *hr_dev)
1143{
1144 struct hns_roce_v2_priv *priv = hr_dev->priv;
1145 struct hnae3_handle *handle = priv->handle;
1146 const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
1147
1148 if (hr_dev->reset_cnt != ops->ae_dev_reset_cnt(handle))
1149 return true;
1150
1151 if (ops->get_hw_reset_stat(handle))
1152 return true;
1153
1154 if (ops->ae_dev_resetting(handle))
1155 return true;
1156
1157 return false;
1158}
1159
1160static bool v2_chk_mbox_is_avail(struct hns_roce_dev *hr_dev, bool *busy)
1161{
1162 struct hns_roce_v2_priv *priv = hr_dev->priv;
1163 u32 status;
1164
1165 if (hr_dev->is_reset)
1166 status = CMD_RST_PRC_SUCCESS;
1167 else
1168 status = check_aedev_reset_status(hr_dev, priv->handle);
1169
1170 *busy = (status == CMD_RST_PRC_EBUSY);
1171
1172 return status == CMD_RST_PRC_OTHERS;
1173}
1174
1175static int hns_roce_alloc_cmq_desc(struct hns_roce_dev *hr_dev,
1176 struct hns_roce_v2_cmq_ring *ring)
1177{
1178 int size = ring->desc_num * sizeof(struct hns_roce_cmq_desc);
1179
1180 ring->desc = dma_alloc_coherent(hr_dev->dev, size,
1181 &ring->desc_dma_addr, GFP_KERNEL);
1182 if (!ring->desc)
1183 return -ENOMEM;
1184
1185 return 0;
1186}
1187
1188static void hns_roce_free_cmq_desc(struct hns_roce_dev *hr_dev,
1189 struct hns_roce_v2_cmq_ring *ring)
1190{
1191 dma_free_coherent(hr_dev->dev,
1192 ring->desc_num * sizeof(struct hns_roce_cmq_desc),
1193 ring->desc, ring->desc_dma_addr);
1194
1195 ring->desc_dma_addr = 0;
1196}
1197
1198static int init_csq(struct hns_roce_dev *hr_dev,
1199 struct hns_roce_v2_cmq_ring *csq)
1200{
1201 dma_addr_t dma;
1202 int ret;
1203
1204 csq->desc_num = CMD_CSQ_DESC_NUM;
1205 spin_lock_init(&csq->lock);
1206 csq->flag = TYPE_CSQ;
1207 csq->head = 0;
1208
1209 ret = hns_roce_alloc_cmq_desc(hr_dev, csq);
1210 if (ret)
1211 return ret;
1212
1213 dma = csq->desc_dma_addr;
1214 roce_write(hr_dev, ROCEE_TX_CMQ_BASEADDR_L_REG, lower_32_bits(dma));
1215 roce_write(hr_dev, ROCEE_TX_CMQ_BASEADDR_H_REG, upper_32_bits(dma));
1216 roce_write(hr_dev, ROCEE_TX_CMQ_DEPTH_REG,
1217 (u32)csq->desc_num >> HNS_ROCE_CMQ_DESC_NUM_S);
1218
1219
1220 roce_write(hr_dev, ROCEE_TX_CMQ_CI_REG, 0);
1221 roce_write(hr_dev, ROCEE_TX_CMQ_PI_REG, 0);
1222
1223 return 0;
1224}
1225
1226static int hns_roce_v2_cmq_init(struct hns_roce_dev *hr_dev)
1227{
1228 struct hns_roce_v2_priv *priv = hr_dev->priv;
1229 int ret;
1230
1231 priv->cmq.tx_timeout = HNS_ROCE_CMQ_TX_TIMEOUT;
1232
1233 ret = init_csq(hr_dev, &priv->cmq.csq);
1234 if (ret)
1235 dev_err(hr_dev->dev, "failed to init CSQ, ret = %d.\n", ret);
1236
1237 return ret;
1238}
1239
1240static void hns_roce_v2_cmq_exit(struct hns_roce_dev *hr_dev)
1241{
1242 struct hns_roce_v2_priv *priv = hr_dev->priv;
1243
1244 hns_roce_free_cmq_desc(hr_dev, &priv->cmq.csq);
1245}
1246
1247static void hns_roce_cmq_setup_basic_desc(struct hns_roce_cmq_desc *desc,
1248 enum hns_roce_opcode_type opcode,
1249 bool is_read)
1250{
1251 memset((void *)desc, 0, sizeof(struct hns_roce_cmq_desc));
1252 desc->opcode = cpu_to_le16(opcode);
1253 desc->flag = cpu_to_le16(HNS_ROCE_CMD_FLAG_IN);
1254 if (is_read)
1255 desc->flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_WR);
1256 else
1257 desc->flag &= cpu_to_le16(~HNS_ROCE_CMD_FLAG_WR);
1258}
1259
1260static int hns_roce_cmq_csq_done(struct hns_roce_dev *hr_dev)
1261{
1262 u32 tail = roce_read(hr_dev, ROCEE_TX_CMQ_CI_REG);
1263 struct hns_roce_v2_priv *priv = hr_dev->priv;
1264
1265 return tail == priv->cmq.csq.head;
1266}
1267
1268static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
1269 struct hns_roce_cmq_desc *desc, int num)
1270{
1271 struct hns_roce_v2_priv *priv = hr_dev->priv;
1272 struct hns_roce_v2_cmq_ring *csq = &priv->cmq.csq;
1273 u32 timeout = 0;
1274 u16 desc_ret;
1275 u32 tail;
1276 int ret;
1277 int i;
1278
1279 spin_lock_bh(&csq->lock);
1280
1281 tail = csq->head;
1282
1283 for (i = 0; i < num; i++) {
1284 csq->desc[csq->head++] = desc[i];
1285 if (csq->head == csq->desc_num)
1286 csq->head = 0;
1287 }
1288
1289
1290 roce_write(hr_dev, ROCEE_TX_CMQ_PI_REG, csq->head);
1291
1292 do {
1293 if (hns_roce_cmq_csq_done(hr_dev))
1294 break;
1295 udelay(1);
1296 } while (++timeout < priv->cmq.tx_timeout);
1297
1298 if (hns_roce_cmq_csq_done(hr_dev)) {
1299 for (ret = 0, i = 0; i < num; i++) {
1300
1301 desc[i] = csq->desc[tail++];
1302 if (tail == csq->desc_num)
1303 tail = 0;
1304
1305 desc_ret = le16_to_cpu(desc[i].retval);
1306 if (likely(desc_ret == CMD_EXEC_SUCCESS))
1307 continue;
1308
1309 dev_err_ratelimited(hr_dev->dev,
1310 "Cmdq IO error, opcode = 0x%x, return = 0x%x.\n",
1311 desc->opcode, desc_ret);
1312 ret = -EIO;
1313 }
1314 } else {
1315
1316 tail = roce_read(hr_dev, ROCEE_TX_CMQ_CI_REG);
1317 dev_warn(hr_dev->dev, "CMDQ move tail from %u to %u.\n",
1318 csq->head, tail);
1319 csq->head = tail;
1320
1321 ret = -EAGAIN;
1322 }
1323
1324 spin_unlock_bh(&csq->lock);
1325
1326 return ret;
1327}
1328
1329static int hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
1330 struct hns_roce_cmq_desc *desc, int num)
1331{
1332 bool busy;
1333 int ret;
1334
1335 if (!v2_chk_mbox_is_avail(hr_dev, &busy))
1336 return busy ? -EBUSY : 0;
1337
1338 ret = __hns_roce_cmq_send(hr_dev, desc, num);
1339 if (ret) {
1340 if (!v2_chk_mbox_is_avail(hr_dev, &busy))
1341 return busy ? -EBUSY : 0;
1342 }
1343
1344 return ret;
1345}
1346
1347static int config_hem_ba_to_hw(struct hns_roce_dev *hr_dev,
1348 dma_addr_t base_addr, u8 cmd, unsigned long tag)
1349{
1350 struct hns_roce_cmd_mailbox *mbox;
1351 int ret;
1352
1353 mbox = hns_roce_alloc_cmd_mailbox(hr_dev);
1354 if (IS_ERR(mbox))
1355 return PTR_ERR(mbox);
1356
1357 ret = hns_roce_cmd_mbox(hr_dev, base_addr, mbox->dma, cmd, tag);
1358 hns_roce_free_cmd_mailbox(hr_dev, mbox);
1359 return ret;
1360}
1361
1362static int hns_roce_cmq_query_hw_info(struct hns_roce_dev *hr_dev)
1363{
1364 struct hns_roce_query_version *resp;
1365 struct hns_roce_cmq_desc desc;
1366 int ret;
1367
1368 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_HW_VER, true);
1369 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1370 if (ret)
1371 return ret;
1372
1373 resp = (struct hns_roce_query_version *)desc.data;
1374 hr_dev->hw_rev = le16_to_cpu(resp->rocee_hw_version);
1375 hr_dev->vendor_id = hr_dev->pci_dev->vendor;
1376
1377 return 0;
1378}
1379
1380static void func_clr_hw_resetting_state(struct hns_roce_dev *hr_dev,
1381 struct hnae3_handle *handle)
1382{
1383 const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
1384 unsigned long end;
1385
1386 hr_dev->dis_db = true;
1387
1388 dev_warn(hr_dev->dev,
1389 "Func clear is pending, device in resetting state.\n");
1390 end = HNS_ROCE_V2_HW_RST_TIMEOUT;
1391 while (end) {
1392 if (!ops->get_hw_reset_stat(handle)) {
1393 hr_dev->is_reset = true;
1394 dev_info(hr_dev->dev,
1395 "Func clear success after reset.\n");
1396 return;
1397 }
1398 msleep(HNS_ROCE_V2_HW_RST_COMPLETION_WAIT);
1399 end -= HNS_ROCE_V2_HW_RST_COMPLETION_WAIT;
1400 }
1401
1402 dev_warn(hr_dev->dev, "Func clear failed.\n");
1403}
1404
1405static void func_clr_sw_resetting_state(struct hns_roce_dev *hr_dev,
1406 struct hnae3_handle *handle)
1407{
1408 const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
1409 unsigned long end;
1410
1411 hr_dev->dis_db = true;
1412
1413 dev_warn(hr_dev->dev,
1414 "Func clear is pending, device in resetting state.\n");
1415 end = HNS_ROCE_V2_HW_RST_TIMEOUT;
1416 while (end) {
1417 if (ops->ae_dev_reset_cnt(handle) !=
1418 hr_dev->reset_cnt) {
1419 hr_dev->is_reset = true;
1420 dev_info(hr_dev->dev,
1421 "Func clear success after sw reset\n");
1422 return;
1423 }
1424 msleep(HNS_ROCE_V2_HW_RST_COMPLETION_WAIT);
1425 end -= HNS_ROCE_V2_HW_RST_COMPLETION_WAIT;
1426 }
1427
1428 dev_warn(hr_dev->dev, "Func clear failed because of unfinished sw reset\n");
1429}
1430
1431static void hns_roce_func_clr_rst_proc(struct hns_roce_dev *hr_dev, int retval,
1432 int flag)
1433{
1434 struct hns_roce_v2_priv *priv = hr_dev->priv;
1435 struct hnae3_handle *handle = priv->handle;
1436 const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
1437
1438 if (ops->ae_dev_reset_cnt(handle) != hr_dev->reset_cnt) {
1439 hr_dev->dis_db = true;
1440 hr_dev->is_reset = true;
1441 dev_info(hr_dev->dev, "Func clear success after reset.\n");
1442 return;
1443 }
1444
1445 if (ops->get_hw_reset_stat(handle)) {
1446 func_clr_hw_resetting_state(hr_dev, handle);
1447 return;
1448 }
1449
1450 if (ops->ae_dev_resetting(handle) &&
1451 handle->rinfo.instance_state == HNS_ROCE_STATE_INIT) {
1452 func_clr_sw_resetting_state(hr_dev, handle);
1453 return;
1454 }
1455
1456 if (retval && !flag)
1457 dev_warn(hr_dev->dev,
1458 "Func clear read failed, ret = %d.\n", retval);
1459
1460 dev_warn(hr_dev->dev, "Func clear failed.\n");
1461}
1462
1463static void __hns_roce_function_clear(struct hns_roce_dev *hr_dev, int vf_id)
1464{
1465 bool fclr_write_fail_flag = false;
1466 struct hns_roce_func_clear *resp;
1467 struct hns_roce_cmq_desc desc;
1468 unsigned long end;
1469 int ret = 0;
1470
1471 if (check_device_is_in_reset(hr_dev))
1472 goto out;
1473
1474 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_FUNC_CLEAR, false);
1475 resp = (struct hns_roce_func_clear *)desc.data;
1476 resp->rst_funcid_en = cpu_to_le32(vf_id);
1477
1478 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1479 if (ret) {
1480 fclr_write_fail_flag = true;
1481 dev_err(hr_dev->dev, "Func clear write failed, ret = %d.\n",
1482 ret);
1483 goto out;
1484 }
1485
1486 msleep(HNS_ROCE_V2_READ_FUNC_CLEAR_FLAG_INTERVAL);
1487 end = HNS_ROCE_V2_FUNC_CLEAR_TIMEOUT_MSECS;
1488 while (end) {
1489 if (check_device_is_in_reset(hr_dev))
1490 goto out;
1491 msleep(HNS_ROCE_V2_READ_FUNC_CLEAR_FLAG_FAIL_WAIT);
1492 end -= HNS_ROCE_V2_READ_FUNC_CLEAR_FLAG_FAIL_WAIT;
1493
1494 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_FUNC_CLEAR,
1495 true);
1496
1497 resp->rst_funcid_en = cpu_to_le32(vf_id);
1498 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1499 if (ret)
1500 continue;
1501
1502 if (roce_get_bit(resp->func_done, FUNC_CLEAR_RST_FUN_DONE_S)) {
1503 if (vf_id == 0)
1504 hr_dev->is_reset = true;
1505 return;
1506 }
1507 }
1508
1509out:
1510 hns_roce_func_clr_rst_proc(hr_dev, ret, fclr_write_fail_flag);
1511}
1512
1513static void hns_roce_free_vf_resource(struct hns_roce_dev *hr_dev, int vf_id)
1514{
1515 enum hns_roce_opcode_type opcode = HNS_ROCE_OPC_ALLOC_VF_RES;
1516 struct hns_roce_cmq_desc desc[2];
1517 struct hns_roce_cmq_req *req_a;
1518
1519 req_a = (struct hns_roce_cmq_req *)desc[0].data;
1520 hns_roce_cmq_setup_basic_desc(&desc[0], opcode, false);
1521 desc[0].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1522 hns_roce_cmq_setup_basic_desc(&desc[1], opcode, false);
1523 hr_reg_write(req_a, FUNC_RES_A_VF_ID, vf_id);
1524 hns_roce_cmq_send(hr_dev, desc, 2);
1525}
1526
1527static void hns_roce_function_clear(struct hns_roce_dev *hr_dev)
1528{
1529 int i;
1530
1531 for (i = hr_dev->func_num - 1; i >= 0; i--) {
1532 __hns_roce_function_clear(hr_dev, i);
1533 if (i != 0)
1534 hns_roce_free_vf_resource(hr_dev, i);
1535 }
1536}
1537
1538static int hns_roce_clear_extdb_list_info(struct hns_roce_dev *hr_dev)
1539{
1540 struct hns_roce_cmq_desc desc;
1541 int ret;
1542
1543 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CLEAR_EXTDB_LIST_INFO,
1544 false);
1545 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1546 if (ret)
1547 ibdev_err(&hr_dev->ib_dev,
1548 "failed to clear extended doorbell info, ret = %d.\n",
1549 ret);
1550
1551 return ret;
1552}
1553
1554static int hns_roce_query_fw_ver(struct hns_roce_dev *hr_dev)
1555{
1556 struct hns_roce_query_fw_info *resp;
1557 struct hns_roce_cmq_desc desc;
1558 int ret;
1559
1560 hns_roce_cmq_setup_basic_desc(&desc, HNS_QUERY_FW_VER, true);
1561 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1562 if (ret)
1563 return ret;
1564
1565 resp = (struct hns_roce_query_fw_info *)desc.data;
1566 hr_dev->caps.fw_ver = (u64)(le32_to_cpu(resp->fw_ver));
1567
1568 return 0;
1569}
1570
1571static int hns_roce_query_func_info(struct hns_roce_dev *hr_dev)
1572{
1573 struct hns_roce_cmq_desc desc;
1574 int ret;
1575
1576 if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) {
1577 hr_dev->func_num = 1;
1578 return 0;
1579 }
1580
1581 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_FUNC_INFO,
1582 true);
1583 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1584 if (ret) {
1585 hr_dev->func_num = 1;
1586 return ret;
1587 }
1588
1589 hr_dev->func_num = le32_to_cpu(desc.func_info.own_func_num);
1590 hr_dev->cong_algo_tmpl_id = le32_to_cpu(desc.func_info.own_mac_id);
1591
1592 return 0;
1593}
1594
1595static int hns_roce_config_global_param(struct hns_roce_dev *hr_dev)
1596{
1597 struct hns_roce_cmq_desc desc;
1598 struct hns_roce_cmq_req *req = (struct hns_roce_cmq_req *)desc.data;
1599 u32 clock_cycles_of_1us;
1600
1601 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_GLOBAL_PARAM,
1602 false);
1603
1604 if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08)
1605 clock_cycles_of_1us = HNS_ROCE_1NS_CFG;
1606 else
1607 clock_cycles_of_1us = HNS_ROCE_1US_CFG;
1608
1609 hr_reg_write(req, CFG_GLOBAL_PARAM_1US_CYCLES, clock_cycles_of_1us);
1610 hr_reg_write(req, CFG_GLOBAL_PARAM_UDP_PORT, ROCE_V2_UDP_DPORT);
1611
1612 return hns_roce_cmq_send(hr_dev, &desc, 1);
1613}
1614
1615static int load_func_res_caps(struct hns_roce_dev *hr_dev, bool is_vf)
1616{
1617 struct hns_roce_cmq_desc desc[2];
1618 struct hns_roce_cmq_req *r_a = (struct hns_roce_cmq_req *)desc[0].data;
1619 struct hns_roce_cmq_req *r_b = (struct hns_roce_cmq_req *)desc[1].data;
1620 struct hns_roce_caps *caps = &hr_dev->caps;
1621 enum hns_roce_opcode_type opcode;
1622 u32 func_num;
1623 int ret;
1624
1625 if (is_vf) {
1626 opcode = HNS_ROCE_OPC_QUERY_VF_RES;
1627 func_num = 1;
1628 } else {
1629 opcode = HNS_ROCE_OPC_QUERY_PF_RES;
1630 func_num = hr_dev->func_num;
1631 }
1632
1633 hns_roce_cmq_setup_basic_desc(&desc[0], opcode, true);
1634 desc[0].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1635 hns_roce_cmq_setup_basic_desc(&desc[1], opcode, true);
1636
1637 ret = hns_roce_cmq_send(hr_dev, desc, 2);
1638 if (ret)
1639 return ret;
1640
1641 caps->qpc_bt_num = hr_reg_read(r_a, FUNC_RES_A_QPC_BT_NUM) / func_num;
1642 caps->srqc_bt_num = hr_reg_read(r_a, FUNC_RES_A_SRQC_BT_NUM) / func_num;
1643 caps->cqc_bt_num = hr_reg_read(r_a, FUNC_RES_A_CQC_BT_NUM) / func_num;
1644 caps->mpt_bt_num = hr_reg_read(r_a, FUNC_RES_A_MPT_BT_NUM) / func_num;
1645 caps->eqc_bt_num = hr_reg_read(r_a, FUNC_RES_A_EQC_BT_NUM) / func_num;
1646 caps->smac_bt_num = hr_reg_read(r_b, FUNC_RES_B_SMAC_NUM) / func_num;
1647 caps->sgid_bt_num = hr_reg_read(r_b, FUNC_RES_B_SGID_NUM) / func_num;
1648 caps->sccc_bt_num = hr_reg_read(r_b, FUNC_RES_B_SCCC_BT_NUM) / func_num;
1649
1650 if (is_vf) {
1651 caps->sl_num = hr_reg_read(r_b, FUNC_RES_V_QID_NUM) / func_num;
1652 caps->gmv_bt_num = hr_reg_read(r_b, FUNC_RES_V_GMV_BT_NUM) /
1653 func_num;
1654 } else {
1655 caps->sl_num = hr_reg_read(r_b, FUNC_RES_B_QID_NUM) / func_num;
1656 caps->gmv_bt_num = hr_reg_read(r_b, FUNC_RES_B_GMV_BT_NUM) /
1657 func_num;
1658 }
1659
1660 return 0;
1661}
1662
1663static int load_ext_cfg_caps(struct hns_roce_dev *hr_dev, bool is_vf)
1664{
1665 struct hns_roce_cmq_desc desc;
1666 struct hns_roce_cmq_req *req = (struct hns_roce_cmq_req *)desc.data;
1667 struct hns_roce_caps *caps = &hr_dev->caps;
1668 u32 func_num, qp_num;
1669 int ret;
1670
1671 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_EXT_CFG, true);
1672 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1673 if (ret)
1674 return ret;
1675
1676 func_num = is_vf ? 1 : max_t(u32, 1, hr_dev->func_num);
1677 qp_num = hr_reg_read(req, EXT_CFG_QP_PI_NUM) / func_num;
1678 caps->num_pi_qps = round_down(qp_num, HNS_ROCE_QP_BANK_NUM);
1679
1680 qp_num = hr_reg_read(req, EXT_CFG_QP_NUM) / func_num;
1681 caps->num_qps = round_down(qp_num, HNS_ROCE_QP_BANK_NUM);
1682
1683 return 0;
1684}
1685
1686static int load_pf_timer_res_caps(struct hns_roce_dev *hr_dev)
1687{
1688 struct hns_roce_cmq_desc desc;
1689 struct hns_roce_cmq_req *req = (struct hns_roce_cmq_req *)desc.data;
1690 struct hns_roce_caps *caps = &hr_dev->caps;
1691 int ret;
1692
1693 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_PF_TIMER_RES,
1694 true);
1695
1696 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1697 if (ret)
1698 return ret;
1699
1700 caps->qpc_timer_bt_num = hr_reg_read(req, PF_TIMER_RES_QPC_ITEM_NUM);
1701 caps->cqc_timer_bt_num = hr_reg_read(req, PF_TIMER_RES_CQC_ITEM_NUM);
1702
1703 return 0;
1704}
1705
1706static int query_func_resource_caps(struct hns_roce_dev *hr_dev, bool is_vf)
1707{
1708 struct device *dev = hr_dev->dev;
1709 int ret;
1710
1711 ret = load_func_res_caps(hr_dev, is_vf);
1712 if (ret) {
1713 dev_err(dev, "failed to load res caps, ret = %d (%s).\n", ret,
1714 is_vf ? "vf" : "pf");
1715 return ret;
1716 }
1717
1718 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) {
1719 ret = load_ext_cfg_caps(hr_dev, is_vf);
1720 if (ret)
1721 dev_err(dev, "failed to load ext cfg, ret = %d (%s).\n",
1722 ret, is_vf ? "vf" : "pf");
1723 }
1724
1725 return ret;
1726}
1727
1728static int hns_roce_query_pf_resource(struct hns_roce_dev *hr_dev)
1729{
1730 struct device *dev = hr_dev->dev;
1731 int ret;
1732
1733 ret = query_func_resource_caps(hr_dev, false);
1734 if (ret)
1735 return ret;
1736
1737 ret = load_pf_timer_res_caps(hr_dev);
1738 if (ret)
1739 dev_err(dev, "failed to load pf timer resource, ret = %d.\n",
1740 ret);
1741
1742 return ret;
1743}
1744
1745static int hns_roce_query_vf_resource(struct hns_roce_dev *hr_dev)
1746{
1747 return query_func_resource_caps(hr_dev, true);
1748}
1749
1750static int __hns_roce_set_vf_switch_param(struct hns_roce_dev *hr_dev,
1751 u32 vf_id)
1752{
1753 struct hns_roce_vf_switch *swt;
1754 struct hns_roce_cmq_desc desc;
1755 int ret;
1756
1757 swt = (struct hns_roce_vf_switch *)desc.data;
1758 hns_roce_cmq_setup_basic_desc(&desc, HNS_SWITCH_PARAMETER_CFG, true);
1759 swt->rocee_sel |= cpu_to_le32(HNS_ICL_SWITCH_CMD_ROCEE_SEL);
1760 roce_set_field(swt->fun_id, VF_SWITCH_DATA_FUN_ID_VF_ID_M,
1761 VF_SWITCH_DATA_FUN_ID_VF_ID_S, vf_id);
1762 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1763 if (ret)
1764 return ret;
1765
1766 desc.flag = cpu_to_le16(HNS_ROCE_CMD_FLAG_IN);
1767 desc.flag &= cpu_to_le16(~HNS_ROCE_CMD_FLAG_WR);
1768 roce_set_bit(swt->cfg, VF_SWITCH_DATA_CFG_ALW_LPBK_S, 1);
1769 roce_set_bit(swt->cfg, VF_SWITCH_DATA_CFG_ALW_LCL_LPBK_S, 0);
1770 roce_set_bit(swt->cfg, VF_SWITCH_DATA_CFG_ALW_DST_OVRD_S, 1);
1771
1772 return hns_roce_cmq_send(hr_dev, &desc, 1);
1773}
1774
1775static int hns_roce_set_vf_switch_param(struct hns_roce_dev *hr_dev)
1776{
1777 u32 vf_id;
1778 int ret;
1779
1780 for (vf_id = 0; vf_id < hr_dev->func_num; vf_id++) {
1781 ret = __hns_roce_set_vf_switch_param(hr_dev, vf_id);
1782 if (ret)
1783 return ret;
1784 }
1785 return 0;
1786}
1787
1788static int config_vf_hem_resource(struct hns_roce_dev *hr_dev, int vf_id)
1789{
1790 struct hns_roce_cmq_desc desc[2];
1791 struct hns_roce_cmq_req *r_a = (struct hns_roce_cmq_req *)desc[0].data;
1792 struct hns_roce_cmq_req *r_b = (struct hns_roce_cmq_req *)desc[1].data;
1793 enum hns_roce_opcode_type opcode = HNS_ROCE_OPC_ALLOC_VF_RES;
1794 struct hns_roce_caps *caps = &hr_dev->caps;
1795
1796 hns_roce_cmq_setup_basic_desc(&desc[0], opcode, false);
1797 desc[0].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1798 hns_roce_cmq_setup_basic_desc(&desc[1], opcode, false);
1799
1800 hr_reg_write(r_a, FUNC_RES_A_VF_ID, vf_id);
1801
1802 hr_reg_write(r_a, FUNC_RES_A_QPC_BT_NUM, caps->qpc_bt_num);
1803 hr_reg_write(r_a, FUNC_RES_A_QPC_BT_IDX, vf_id * caps->qpc_bt_num);
1804 hr_reg_write(r_a, FUNC_RES_A_SRQC_BT_NUM, caps->srqc_bt_num);
1805 hr_reg_write(r_a, FUNC_RES_A_SRQC_BT_IDX, vf_id * caps->srqc_bt_num);
1806 hr_reg_write(r_a, FUNC_RES_A_CQC_BT_NUM, caps->cqc_bt_num);
1807 hr_reg_write(r_a, FUNC_RES_A_CQC_BT_IDX, vf_id * caps->cqc_bt_num);
1808 hr_reg_write(r_a, FUNC_RES_A_MPT_BT_NUM, caps->mpt_bt_num);
1809 hr_reg_write(r_a, FUNC_RES_A_MPT_BT_IDX, vf_id * caps->mpt_bt_num);
1810 hr_reg_write(r_a, FUNC_RES_A_EQC_BT_NUM, caps->eqc_bt_num);
1811 hr_reg_write(r_a, FUNC_RES_A_EQC_BT_IDX, vf_id * caps->eqc_bt_num);
1812 hr_reg_write(r_b, FUNC_RES_V_QID_NUM, caps->sl_num);
1813 hr_reg_write(r_b, FUNC_RES_B_QID_IDX, vf_id * caps->sl_num);
1814 hr_reg_write(r_b, FUNC_RES_B_SCCC_BT_NUM, caps->sccc_bt_num);
1815 hr_reg_write(r_b, FUNC_RES_B_SCCC_BT_IDX, vf_id * caps->sccc_bt_num);
1816
1817 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) {
1818 hr_reg_write(r_b, FUNC_RES_V_GMV_BT_NUM, caps->gmv_bt_num);
1819 hr_reg_write(r_b, FUNC_RES_B_GMV_BT_IDX,
1820 vf_id * caps->gmv_bt_num);
1821 } else {
1822 hr_reg_write(r_b, FUNC_RES_B_SGID_NUM, caps->sgid_bt_num);
1823 hr_reg_write(r_b, FUNC_RES_B_SGID_IDX,
1824 vf_id * caps->sgid_bt_num);
1825 hr_reg_write(r_b, FUNC_RES_B_SMAC_NUM, caps->smac_bt_num);
1826 hr_reg_write(r_b, FUNC_RES_B_SMAC_IDX,
1827 vf_id * caps->smac_bt_num);
1828 }
1829
1830 return hns_roce_cmq_send(hr_dev, desc, 2);
1831}
1832
1833static int config_vf_ext_resource(struct hns_roce_dev *hr_dev, u32 vf_id)
1834{
1835 struct hns_roce_cmq_desc desc;
1836 struct hns_roce_cmq_req *req = (struct hns_roce_cmq_req *)desc.data;
1837 struct hns_roce_caps *caps = &hr_dev->caps;
1838
1839 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_EXT_CFG, false);
1840
1841 hr_reg_write(req, EXT_CFG_VF_ID, vf_id);
1842
1843 hr_reg_write(req, EXT_CFG_QP_PI_NUM, caps->num_pi_qps);
1844 hr_reg_write(req, EXT_CFG_QP_PI_IDX, vf_id * caps->num_pi_qps);
1845 hr_reg_write(req, EXT_CFG_QP_NUM, caps->num_qps);
1846 hr_reg_write(req, EXT_CFG_QP_IDX, vf_id * caps->num_qps);
1847
1848 return hns_roce_cmq_send(hr_dev, &desc, 1);
1849}
1850
1851static int hns_roce_alloc_vf_resource(struct hns_roce_dev *hr_dev)
1852{
1853 u32 func_num = max_t(u32, 1, hr_dev->func_num);
1854 u32 vf_id;
1855 int ret;
1856
1857 for (vf_id = 0; vf_id < func_num; vf_id++) {
1858 ret = config_vf_hem_resource(hr_dev, vf_id);
1859 if (ret) {
1860 dev_err(hr_dev->dev,
1861 "failed to config vf-%u hem res, ret = %d.\n",
1862 vf_id, ret);
1863 return ret;
1864 }
1865
1866 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) {
1867 ret = config_vf_ext_resource(hr_dev, vf_id);
1868 if (ret) {
1869 dev_err(hr_dev->dev,
1870 "failed to config vf-%u ext res, ret = %d.\n",
1871 vf_id, ret);
1872 return ret;
1873 }
1874 }
1875 }
1876
1877 return 0;
1878}
1879
1880static int hns_roce_v2_set_bt(struct hns_roce_dev *hr_dev)
1881{
1882 struct hns_roce_cmq_desc desc;
1883 struct hns_roce_cmq_req *req = (struct hns_roce_cmq_req *)desc.data;
1884 struct hns_roce_caps *caps = &hr_dev->caps;
1885
1886 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_BT_ATTR, false);
1887
1888 hr_reg_write(req, CFG_BT_ATTR_QPC_BA_PGSZ,
1889 caps->qpc_ba_pg_sz + PG_SHIFT_OFFSET);
1890 hr_reg_write(req, CFG_BT_ATTR_QPC_BUF_PGSZ,
1891 caps->qpc_buf_pg_sz + PG_SHIFT_OFFSET);
1892 hr_reg_write(req, CFG_BT_ATTR_QPC_HOPNUM,
1893 to_hr_hem_hopnum(caps->qpc_hop_num, caps->num_qps));
1894
1895 hr_reg_write(req, CFG_BT_ATTR_SRQC_BA_PGSZ,
1896 caps->srqc_ba_pg_sz + PG_SHIFT_OFFSET);
1897 hr_reg_write(req, CFG_BT_ATTR_SRQC_BUF_PGSZ,
1898 caps->srqc_buf_pg_sz + PG_SHIFT_OFFSET);
1899 hr_reg_write(req, CFG_BT_ATTR_SRQC_HOPNUM,
1900 to_hr_hem_hopnum(caps->srqc_hop_num, caps->num_srqs));
1901
1902 hr_reg_write(req, CFG_BT_ATTR_CQC_BA_PGSZ,
1903 caps->cqc_ba_pg_sz + PG_SHIFT_OFFSET);
1904 hr_reg_write(req, CFG_BT_ATTR_CQC_BUF_PGSZ,
1905 caps->cqc_buf_pg_sz + PG_SHIFT_OFFSET);
1906 hr_reg_write(req, CFG_BT_ATTR_CQC_HOPNUM,
1907 to_hr_hem_hopnum(caps->cqc_hop_num, caps->num_cqs));
1908
1909 hr_reg_write(req, CFG_BT_ATTR_MPT_BA_PGSZ,
1910 caps->mpt_ba_pg_sz + PG_SHIFT_OFFSET);
1911 hr_reg_write(req, CFG_BT_ATTR_MPT_BUF_PGSZ,
1912 caps->mpt_buf_pg_sz + PG_SHIFT_OFFSET);
1913 hr_reg_write(req, CFG_BT_ATTR_MPT_HOPNUM,
1914 to_hr_hem_hopnum(caps->mpt_hop_num, caps->num_mtpts));
1915
1916 hr_reg_write(req, CFG_BT_ATTR_SCCC_BA_PGSZ,
1917 caps->sccc_ba_pg_sz + PG_SHIFT_OFFSET);
1918 hr_reg_write(req, CFG_BT_ATTR_SCCC_BUF_PGSZ,
1919 caps->sccc_buf_pg_sz + PG_SHIFT_OFFSET);
1920 hr_reg_write(req, CFG_BT_ATTR_SCCC_HOPNUM,
1921 to_hr_hem_hopnum(caps->sccc_hop_num, caps->num_qps));
1922
1923 return hns_roce_cmq_send(hr_dev, &desc, 1);
1924}
1925
1926
1927static void set_default_caps(struct hns_roce_dev *hr_dev)
1928{
1929 struct hns_roce_caps *caps = &hr_dev->caps;
1930
1931 caps->num_qps = HNS_ROCE_V2_MAX_QP_NUM;
1932 caps->max_wqes = HNS_ROCE_V2_MAX_WQE_NUM;
1933 caps->num_cqs = HNS_ROCE_V2_MAX_CQ_NUM;
1934 caps->num_srqs = HNS_ROCE_V2_MAX_SRQ_NUM;
1935 caps->min_cqes = HNS_ROCE_MIN_CQE_NUM;
1936 caps->max_cqes = HNS_ROCE_V2_MAX_CQE_NUM;
1937 caps->max_sq_sg = HNS_ROCE_V2_MAX_SQ_SGE_NUM;
1938 caps->max_extend_sg = HNS_ROCE_V2_MAX_EXTEND_SGE_NUM;
1939 caps->max_rq_sg = HNS_ROCE_V2_MAX_RQ_SGE_NUM;
1940
1941 caps->num_uars = HNS_ROCE_V2_UAR_NUM;
1942 caps->phy_num_uars = HNS_ROCE_V2_PHY_UAR_NUM;
1943 caps->num_aeq_vectors = HNS_ROCE_V2_AEQE_VEC_NUM;
1944 caps->num_other_vectors = HNS_ROCE_V2_ABNORMAL_VEC_NUM;
1945 caps->num_comp_vectors = 0;
1946
1947 caps->num_mtpts = HNS_ROCE_V2_MAX_MTPT_NUM;
1948 caps->num_pds = HNS_ROCE_V2_MAX_PD_NUM;
1949 caps->num_qpc_timer = HNS_ROCE_V2_MAX_QPC_TIMER_NUM;
1950 caps->num_cqc_timer = HNS_ROCE_V2_MAX_CQC_TIMER_NUM;
1951
1952 caps->max_qp_init_rdma = HNS_ROCE_V2_MAX_QP_INIT_RDMA;
1953 caps->max_qp_dest_rdma = HNS_ROCE_V2_MAX_QP_DEST_RDMA;
1954 caps->max_sq_desc_sz = HNS_ROCE_V2_MAX_SQ_DESC_SZ;
1955 caps->max_rq_desc_sz = HNS_ROCE_V2_MAX_RQ_DESC_SZ;
1956 caps->max_srq_desc_sz = HNS_ROCE_V2_MAX_SRQ_DESC_SZ;
1957 caps->irrl_entry_sz = HNS_ROCE_V2_IRRL_ENTRY_SZ;
1958 caps->trrl_entry_sz = HNS_ROCE_V2_EXT_ATOMIC_TRRL_ENTRY_SZ;
1959 caps->cqc_entry_sz = HNS_ROCE_V2_CQC_ENTRY_SZ;
1960 caps->srqc_entry_sz = HNS_ROCE_V2_SRQC_ENTRY_SZ;
1961 caps->mtpt_entry_sz = HNS_ROCE_V2_MTPT_ENTRY_SZ;
1962 caps->idx_entry_sz = HNS_ROCE_V2_IDX_ENTRY_SZ;
1963 caps->page_size_cap = HNS_ROCE_V2_PAGE_SIZE_SUPPORTED;
1964 caps->reserved_lkey = 0;
1965 caps->reserved_pds = 0;
1966 caps->reserved_mrws = 1;
1967 caps->reserved_uars = 0;
1968 caps->reserved_cqs = 0;
1969 caps->reserved_srqs = 0;
1970 caps->reserved_qps = HNS_ROCE_V2_RSV_QPS;
1971
1972 caps->qpc_hop_num = HNS_ROCE_CONTEXT_HOP_NUM;
1973 caps->srqc_hop_num = HNS_ROCE_CONTEXT_HOP_NUM;
1974 caps->cqc_hop_num = HNS_ROCE_CONTEXT_HOP_NUM;
1975 caps->mpt_hop_num = HNS_ROCE_CONTEXT_HOP_NUM;
1976 caps->sccc_hop_num = HNS_ROCE_SCCC_HOP_NUM;
1977
1978 caps->mtt_hop_num = HNS_ROCE_MTT_HOP_NUM;
1979 caps->wqe_sq_hop_num = HNS_ROCE_SQWQE_HOP_NUM;
1980 caps->wqe_sge_hop_num = HNS_ROCE_EXT_SGE_HOP_NUM;
1981 caps->wqe_rq_hop_num = HNS_ROCE_RQWQE_HOP_NUM;
1982 caps->cqe_hop_num = HNS_ROCE_CQE_HOP_NUM;
1983 caps->srqwqe_hop_num = HNS_ROCE_SRQWQE_HOP_NUM;
1984 caps->idx_hop_num = HNS_ROCE_IDX_HOP_NUM;
1985 caps->chunk_sz = HNS_ROCE_V2_TABLE_CHUNK_SIZE;
1986
1987 caps->flags = HNS_ROCE_CAP_FLAG_REREG_MR |
1988 HNS_ROCE_CAP_FLAG_ROCE_V1_V2 |
1989 HNS_ROCE_CAP_FLAG_CQ_RECORD_DB |
1990 HNS_ROCE_CAP_FLAG_QP_RECORD_DB;
1991
1992 caps->pkey_table_len[0] = 1;
1993 caps->ceqe_depth = HNS_ROCE_V2_COMP_EQE_NUM;
1994 caps->aeqe_depth = HNS_ROCE_V2_ASYNC_EQE_NUM;
1995 caps->local_ca_ack_delay = 0;
1996 caps->max_mtu = IB_MTU_4096;
1997
1998 caps->max_srq_wrs = HNS_ROCE_V2_MAX_SRQ_WR;
1999 caps->max_srq_sges = HNS_ROCE_V2_MAX_SRQ_SGE;
2000
2001 caps->flags |= HNS_ROCE_CAP_FLAG_ATOMIC | HNS_ROCE_CAP_FLAG_MW |
2002 HNS_ROCE_CAP_FLAG_SRQ | HNS_ROCE_CAP_FLAG_FRMR |
2003 HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL | HNS_ROCE_CAP_FLAG_XRC;
2004
2005 caps->gid_table_len[0] = HNS_ROCE_V2_GID_INDEX_NUM;
2006
2007 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) {
2008 caps->flags |= HNS_ROCE_CAP_FLAG_STASH |
2009 HNS_ROCE_CAP_FLAG_DIRECT_WQE;
2010 caps->max_sq_inline = HNS_ROCE_V3_MAX_SQ_INLINE;
2011 } else {
2012 caps->max_sq_inline = HNS_ROCE_V2_MAX_SQ_INLINE;
2013
2014
2015 caps->qpc_sz = HNS_ROCE_V2_QPC_SZ;
2016 caps->sccc_sz = HNS_ROCE_V2_SCCC_SZ;
2017 caps->cqe_sz = HNS_ROCE_V2_CQE_SIZE;
2018 }
2019}
2020
2021static void calc_pg_sz(u32 obj_num, u32 obj_size, u32 hop_num, u32 ctx_bt_num,
2022 u32 *buf_page_size, u32 *bt_page_size, u32 hem_type)
2023{
2024 u64 obj_per_chunk;
2025 u64 bt_chunk_size = PAGE_SIZE;
2026 u64 buf_chunk_size = PAGE_SIZE;
2027 u64 obj_per_chunk_default = buf_chunk_size / obj_size;
2028
2029 *buf_page_size = 0;
2030 *bt_page_size = 0;
2031
2032 switch (hop_num) {
2033 case 3:
2034 obj_per_chunk = ctx_bt_num * (bt_chunk_size / BA_BYTE_LEN) *
2035 (bt_chunk_size / BA_BYTE_LEN) *
2036 (bt_chunk_size / BA_BYTE_LEN) *
2037 obj_per_chunk_default;
2038 break;
2039 case 2:
2040 obj_per_chunk = ctx_bt_num * (bt_chunk_size / BA_BYTE_LEN) *
2041 (bt_chunk_size / BA_BYTE_LEN) *
2042 obj_per_chunk_default;
2043 break;
2044 case 1:
2045 obj_per_chunk = ctx_bt_num * (bt_chunk_size / BA_BYTE_LEN) *
2046 obj_per_chunk_default;
2047 break;
2048 case HNS_ROCE_HOP_NUM_0:
2049 obj_per_chunk = ctx_bt_num * obj_per_chunk_default;
2050 break;
2051 default:
2052 pr_err("table %u not support hop_num = %u!\n", hem_type,
2053 hop_num);
2054 return;
2055 }
2056
2057 if (hem_type >= HEM_TYPE_MTT)
2058 *bt_page_size = ilog2(DIV_ROUND_UP(obj_num, obj_per_chunk));
2059 else
2060 *buf_page_size = ilog2(DIV_ROUND_UP(obj_num, obj_per_chunk));
2061}
2062
2063static void set_hem_page_size(struct hns_roce_dev *hr_dev)
2064{
2065 struct hns_roce_caps *caps = &hr_dev->caps;
2066
2067
2068 caps->eqe_ba_pg_sz = 0;
2069 caps->eqe_buf_pg_sz = 0;
2070
2071
2072 caps->llm_buf_pg_sz = 0;
2073
2074
2075 caps->mpt_ba_pg_sz = 0;
2076 caps->mpt_buf_pg_sz = 0;
2077 caps->pbl_ba_pg_sz = HNS_ROCE_BA_PG_SZ_SUPPORTED_16K;
2078 caps->pbl_buf_pg_sz = 0;
2079 calc_pg_sz(caps->num_mtpts, caps->mtpt_entry_sz, caps->mpt_hop_num,
2080 caps->mpt_bt_num, &caps->mpt_buf_pg_sz, &caps->mpt_ba_pg_sz,
2081 HEM_TYPE_MTPT);
2082
2083
2084 caps->qpc_ba_pg_sz = 0;
2085 caps->qpc_buf_pg_sz = 0;
2086 caps->qpc_timer_ba_pg_sz = 0;
2087 caps->qpc_timer_buf_pg_sz = 0;
2088 caps->sccc_ba_pg_sz = 0;
2089 caps->sccc_buf_pg_sz = 0;
2090 caps->mtt_ba_pg_sz = 0;
2091 caps->mtt_buf_pg_sz = 0;
2092 calc_pg_sz(caps->num_qps, caps->qpc_sz, caps->qpc_hop_num,
2093 caps->qpc_bt_num, &caps->qpc_buf_pg_sz, &caps->qpc_ba_pg_sz,
2094 HEM_TYPE_QPC);
2095
2096 if (caps->flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL)
2097 calc_pg_sz(caps->num_qps, caps->sccc_sz, caps->sccc_hop_num,
2098 caps->sccc_bt_num, &caps->sccc_buf_pg_sz,
2099 &caps->sccc_ba_pg_sz, HEM_TYPE_SCCC);
2100
2101
2102 caps->cqc_ba_pg_sz = 0;
2103 caps->cqc_buf_pg_sz = 0;
2104 caps->cqc_timer_ba_pg_sz = 0;
2105 caps->cqc_timer_buf_pg_sz = 0;
2106 caps->cqe_ba_pg_sz = HNS_ROCE_BA_PG_SZ_SUPPORTED_256K;
2107 caps->cqe_buf_pg_sz = 0;
2108 calc_pg_sz(caps->num_cqs, caps->cqc_entry_sz, caps->cqc_hop_num,
2109 caps->cqc_bt_num, &caps->cqc_buf_pg_sz, &caps->cqc_ba_pg_sz,
2110 HEM_TYPE_CQC);
2111 calc_pg_sz(caps->max_cqes, caps->cqe_sz, caps->cqe_hop_num,
2112 1, &caps->cqe_buf_pg_sz, &caps->cqe_ba_pg_sz, HEM_TYPE_CQE);
2113
2114
2115 if (caps->flags & HNS_ROCE_CAP_FLAG_SRQ) {
2116 caps->srqc_ba_pg_sz = 0;
2117 caps->srqc_buf_pg_sz = 0;
2118 caps->srqwqe_ba_pg_sz = 0;
2119 caps->srqwqe_buf_pg_sz = 0;
2120 caps->idx_ba_pg_sz = 0;
2121 caps->idx_buf_pg_sz = 0;
2122 calc_pg_sz(caps->num_srqs, caps->srqc_entry_sz,
2123 caps->srqc_hop_num, caps->srqc_bt_num,
2124 &caps->srqc_buf_pg_sz, &caps->srqc_ba_pg_sz,
2125 HEM_TYPE_SRQC);
2126 calc_pg_sz(caps->num_srqwqe_segs, caps->mtt_entry_sz,
2127 caps->srqwqe_hop_num, 1, &caps->srqwqe_buf_pg_sz,
2128 &caps->srqwqe_ba_pg_sz, HEM_TYPE_SRQWQE);
2129 calc_pg_sz(caps->num_idx_segs, caps->idx_entry_sz,
2130 caps->idx_hop_num, 1, &caps->idx_buf_pg_sz,
2131 &caps->idx_ba_pg_sz, HEM_TYPE_IDX);
2132 }
2133
2134
2135 caps->gmv_ba_pg_sz = 0;
2136 caps->gmv_buf_pg_sz = 0;
2137}
2138
2139
2140static void apply_func_caps(struct hns_roce_dev *hr_dev)
2141{
2142 struct hns_roce_caps *caps = &hr_dev->caps;
2143 struct hns_roce_v2_priv *priv = hr_dev->priv;
2144
2145
2146 caps->qpc_timer_entry_sz = HNS_ROCE_V2_QPC_TIMER_ENTRY_SZ;
2147 caps->cqc_timer_entry_sz = HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ;
2148 caps->mtt_entry_sz = HNS_ROCE_V2_MTT_ENTRY_SZ;
2149
2150 caps->pbl_hop_num = HNS_ROCE_PBL_HOP_NUM;
2151 caps->qpc_timer_hop_num = HNS_ROCE_HOP_NUM_0;
2152 caps->cqc_timer_hop_num = HNS_ROCE_HOP_NUM_0;
2153
2154 caps->num_xrcds = HNS_ROCE_V2_MAX_XRCD_NUM;
2155 caps->reserved_xrcds = HNS_ROCE_V2_RSV_XRCD_NUM;
2156
2157 caps->num_mtt_segs = HNS_ROCE_V2_MAX_MTT_SEGS;
2158 caps->num_srqwqe_segs = HNS_ROCE_V2_MAX_SRQWQE_SEGS;
2159 caps->num_idx_segs = HNS_ROCE_V2_MAX_IDX_SEGS;
2160
2161 if (!caps->num_comp_vectors)
2162 caps->num_comp_vectors = min_t(u32, caps->eqc_bt_num - 1,
2163 (u32)priv->handle->rinfo.num_vectors - 2);
2164
2165 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) {
2166 caps->eqe_hop_num = HNS_ROCE_V3_EQE_HOP_NUM;
2167 caps->ceqe_size = HNS_ROCE_V3_EQE_SIZE;
2168 caps->aeqe_size = HNS_ROCE_V3_EQE_SIZE;
2169
2170
2171 caps->qpc_sz = HNS_ROCE_V3_QPC_SZ;
2172 caps->cqe_sz = HNS_ROCE_V3_CQE_SIZE;
2173 caps->sccc_sz = HNS_ROCE_V3_SCCC_SZ;
2174
2175
2176 caps->gmv_entry_sz = HNS_ROCE_V3_GMV_ENTRY_SZ;
2177
2178 caps->gmv_hop_num = HNS_ROCE_HOP_NUM_0;
2179 caps->gid_table_len[0] = caps->gmv_bt_num *
2180 (HNS_HW_PAGE_SIZE / caps->gmv_entry_sz);
2181
2182 caps->gmv_entry_num = caps->gmv_bt_num * (PAGE_SIZE /
2183 caps->gmv_entry_sz);
2184 } else {
2185 u32 func_num = max_t(u32, 1, hr_dev->func_num);
2186
2187 caps->eqe_hop_num = HNS_ROCE_V2_EQE_HOP_NUM;
2188 caps->ceqe_size = HNS_ROCE_CEQE_SIZE;
2189 caps->aeqe_size = HNS_ROCE_AEQE_SIZE;
2190 caps->gid_table_len[0] /= func_num;
2191 }
2192
2193 if (hr_dev->is_vf) {
2194 caps->default_aeq_arm_st = 0x3;
2195 caps->default_ceq_arm_st = 0x3;
2196 caps->default_ceq_max_cnt = 0x1;
2197 caps->default_ceq_period = 0x10;
2198 caps->default_aeq_max_cnt = 0x1;
2199 caps->default_aeq_period = 0x10;
2200 }
2201
2202 set_hem_page_size(hr_dev);
2203}
2204
2205static int hns_roce_query_pf_caps(struct hns_roce_dev *hr_dev)
2206{
2207 struct hns_roce_cmq_desc desc[HNS_ROCE_QUERY_PF_CAPS_CMD_NUM];
2208 struct hns_roce_caps *caps = &hr_dev->caps;
2209 struct hns_roce_query_pf_caps_a *resp_a;
2210 struct hns_roce_query_pf_caps_b *resp_b;
2211 struct hns_roce_query_pf_caps_c *resp_c;
2212 struct hns_roce_query_pf_caps_d *resp_d;
2213 struct hns_roce_query_pf_caps_e *resp_e;
2214 int ctx_hop_num;
2215 int pbl_hop_num;
2216 int ret;
2217 int i;
2218
2219 for (i = 0; i < HNS_ROCE_QUERY_PF_CAPS_CMD_NUM; i++) {
2220 hns_roce_cmq_setup_basic_desc(&desc[i],
2221 HNS_ROCE_OPC_QUERY_PF_CAPS_NUM,
2222 true);
2223 if (i < (HNS_ROCE_QUERY_PF_CAPS_CMD_NUM - 1))
2224 desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
2225 else
2226 desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
2227 }
2228
2229 ret = hns_roce_cmq_send(hr_dev, desc, HNS_ROCE_QUERY_PF_CAPS_CMD_NUM);
2230 if (ret)
2231 return ret;
2232
2233 resp_a = (struct hns_roce_query_pf_caps_a *)desc[0].data;
2234 resp_b = (struct hns_roce_query_pf_caps_b *)desc[1].data;
2235 resp_c = (struct hns_roce_query_pf_caps_c *)desc[2].data;
2236 resp_d = (struct hns_roce_query_pf_caps_d *)desc[3].data;
2237 resp_e = (struct hns_roce_query_pf_caps_e *)desc[4].data;
2238
2239 caps->local_ca_ack_delay = resp_a->local_ca_ack_delay;
2240 caps->max_sq_sg = le16_to_cpu(resp_a->max_sq_sg);
2241 caps->max_sq_inline = le16_to_cpu(resp_a->max_sq_inline);
2242 caps->max_rq_sg = le16_to_cpu(resp_a->max_rq_sg);
2243 caps->max_rq_sg = roundup_pow_of_two(caps->max_rq_sg);
2244 caps->max_extend_sg = le32_to_cpu(resp_a->max_extend_sg);
2245 caps->num_qpc_timer = le16_to_cpu(resp_a->num_qpc_timer);
2246 caps->num_cqc_timer = le16_to_cpu(resp_a->num_cqc_timer);
2247 caps->max_srq_sges = le16_to_cpu(resp_a->max_srq_sges);
2248 caps->max_srq_sges = roundup_pow_of_two(caps->max_srq_sges);
2249 caps->num_aeq_vectors = resp_a->num_aeq_vectors;
2250 caps->num_other_vectors = resp_a->num_other_vectors;
2251 caps->max_sq_desc_sz = resp_a->max_sq_desc_sz;
2252 caps->max_rq_desc_sz = resp_a->max_rq_desc_sz;
2253 caps->max_srq_desc_sz = resp_a->max_srq_desc_sz;
2254 caps->cqe_sz = resp_a->cqe_sz;
2255
2256 caps->mtpt_entry_sz = resp_b->mtpt_entry_sz;
2257 caps->irrl_entry_sz = resp_b->irrl_entry_sz;
2258 caps->trrl_entry_sz = resp_b->trrl_entry_sz;
2259 caps->cqc_entry_sz = resp_b->cqc_entry_sz;
2260 caps->srqc_entry_sz = resp_b->srqc_entry_sz;
2261 caps->idx_entry_sz = resp_b->idx_entry_sz;
2262 caps->sccc_sz = resp_b->sccc_sz;
2263 caps->max_mtu = resp_b->max_mtu;
2264 caps->qpc_sz = le16_to_cpu(resp_b->qpc_sz);
2265 caps->min_cqes = resp_b->min_cqes;
2266 caps->min_wqes = resp_b->min_wqes;
2267 caps->page_size_cap = le32_to_cpu(resp_b->page_size_cap);
2268 caps->pkey_table_len[0] = resp_b->pkey_table_len;
2269 caps->phy_num_uars = resp_b->phy_num_uars;
2270 ctx_hop_num = resp_b->ctx_hop_num;
2271 pbl_hop_num = resp_b->pbl_hop_num;
2272
2273 caps->num_pds = 1 << roce_get_field(resp_c->cap_flags_num_pds,
2274 V2_QUERY_PF_CAPS_C_NUM_PDS_M,
2275 V2_QUERY_PF_CAPS_C_NUM_PDS_S);
2276 caps->flags = roce_get_field(resp_c->cap_flags_num_pds,
2277 V2_QUERY_PF_CAPS_C_CAP_FLAGS_M,
2278 V2_QUERY_PF_CAPS_C_CAP_FLAGS_S);
2279 caps->flags |= le16_to_cpu(resp_d->cap_flags_ex) <<
2280 HNS_ROCE_CAP_FLAGS_EX_SHIFT;
2281
2282 caps->num_cqs = 1 << roce_get_field(resp_c->max_gid_num_cqs,
2283 V2_QUERY_PF_CAPS_C_NUM_CQS_M,
2284 V2_QUERY_PF_CAPS_C_NUM_CQS_S);
2285 caps->gid_table_len[0] = roce_get_field(resp_c->max_gid_num_cqs,
2286 V2_QUERY_PF_CAPS_C_MAX_GID_M,
2287 V2_QUERY_PF_CAPS_C_MAX_GID_S);
2288
2289 caps->max_cqes = 1 << roce_get_field(resp_c->cq_depth,
2290 V2_QUERY_PF_CAPS_C_CQ_DEPTH_M,
2291 V2_QUERY_PF_CAPS_C_CQ_DEPTH_S);
2292 caps->num_mtpts = 1 << roce_get_field(resp_c->num_mrws,
2293 V2_QUERY_PF_CAPS_C_NUM_MRWS_M,
2294 V2_QUERY_PF_CAPS_C_NUM_MRWS_S);
2295 caps->num_qps = 1 << roce_get_field(resp_c->ord_num_qps,
2296 V2_QUERY_PF_CAPS_C_NUM_QPS_M,
2297 V2_QUERY_PF_CAPS_C_NUM_QPS_S);
2298 caps->max_qp_init_rdma = roce_get_field(resp_c->ord_num_qps,
2299 V2_QUERY_PF_CAPS_C_MAX_ORD_M,
2300 V2_QUERY_PF_CAPS_C_MAX_ORD_S);
2301 caps->max_qp_dest_rdma = caps->max_qp_init_rdma;
2302 caps->max_wqes = 1 << le16_to_cpu(resp_c->sq_depth);
2303 caps->num_srqs = 1 << roce_get_field(resp_d->wq_hop_num_max_srqs,
2304 V2_QUERY_PF_CAPS_D_NUM_SRQS_M,
2305 V2_QUERY_PF_CAPS_D_NUM_SRQS_S);
2306 caps->cong_type = roce_get_field(resp_d->wq_hop_num_max_srqs,
2307 V2_QUERY_PF_CAPS_D_CONG_TYPE_M,
2308 V2_QUERY_PF_CAPS_D_CONG_TYPE_S);
2309 caps->max_srq_wrs = 1 << le16_to_cpu(resp_d->srq_depth);
2310
2311 caps->ceqe_depth = 1 << roce_get_field(resp_d->num_ceqs_ceq_depth,
2312 V2_QUERY_PF_CAPS_D_CEQ_DEPTH_M,
2313 V2_QUERY_PF_CAPS_D_CEQ_DEPTH_S);
2314 caps->num_comp_vectors = roce_get_field(resp_d->num_ceqs_ceq_depth,
2315 V2_QUERY_PF_CAPS_D_NUM_CEQS_M,
2316 V2_QUERY_PF_CAPS_D_NUM_CEQS_S);
2317
2318 caps->aeqe_depth = 1 << roce_get_field(resp_d->arm_st_aeq_depth,
2319 V2_QUERY_PF_CAPS_D_AEQ_DEPTH_M,
2320 V2_QUERY_PF_CAPS_D_AEQ_DEPTH_S);
2321 caps->default_aeq_arm_st = roce_get_field(resp_d->arm_st_aeq_depth,
2322 V2_QUERY_PF_CAPS_D_AEQ_ARM_ST_M,
2323 V2_QUERY_PF_CAPS_D_AEQ_ARM_ST_S);
2324 caps->default_ceq_arm_st = roce_get_field(resp_d->arm_st_aeq_depth,
2325 V2_QUERY_PF_CAPS_D_CEQ_ARM_ST_M,
2326 V2_QUERY_PF_CAPS_D_CEQ_ARM_ST_S);
2327 caps->reserved_pds = roce_get_field(resp_d->num_uars_rsv_pds,
2328 V2_QUERY_PF_CAPS_D_RSV_PDS_M,
2329 V2_QUERY_PF_CAPS_D_RSV_PDS_S);
2330 caps->num_uars = 1 << roce_get_field(resp_d->num_uars_rsv_pds,
2331 V2_QUERY_PF_CAPS_D_NUM_UARS_M,
2332 V2_QUERY_PF_CAPS_D_NUM_UARS_S);
2333 caps->reserved_qps = roce_get_field(resp_d->rsv_uars_rsv_qps,
2334 V2_QUERY_PF_CAPS_D_RSV_QPS_M,
2335 V2_QUERY_PF_CAPS_D_RSV_QPS_S);
2336 caps->reserved_uars = roce_get_field(resp_d->rsv_uars_rsv_qps,
2337 V2_QUERY_PF_CAPS_D_RSV_UARS_M,
2338 V2_QUERY_PF_CAPS_D_RSV_UARS_S);
2339 caps->reserved_mrws = roce_get_field(resp_e->chunk_size_shift_rsv_mrws,
2340 V2_QUERY_PF_CAPS_E_RSV_MRWS_M,
2341 V2_QUERY_PF_CAPS_E_RSV_MRWS_S);
2342 caps->chunk_sz = 1 << roce_get_field(resp_e->chunk_size_shift_rsv_mrws,
2343 V2_QUERY_PF_CAPS_E_CHUNK_SIZE_SHIFT_M,
2344 V2_QUERY_PF_CAPS_E_CHUNK_SIZE_SHIFT_S);
2345 caps->reserved_cqs = roce_get_field(resp_e->rsv_cqs,
2346 V2_QUERY_PF_CAPS_E_RSV_CQS_M,
2347 V2_QUERY_PF_CAPS_E_RSV_CQS_S);
2348 caps->reserved_srqs = roce_get_field(resp_e->rsv_srqs,
2349 V2_QUERY_PF_CAPS_E_RSV_SRQS_M,
2350 V2_QUERY_PF_CAPS_E_RSV_SRQS_S);
2351 caps->reserved_lkey = roce_get_field(resp_e->rsv_lkey,
2352 V2_QUERY_PF_CAPS_E_RSV_LKEYS_M,
2353 V2_QUERY_PF_CAPS_E_RSV_LKEYS_S);
2354 caps->default_ceq_max_cnt = le16_to_cpu(resp_e->ceq_max_cnt);
2355 caps->default_ceq_period = le16_to_cpu(resp_e->ceq_period);
2356 caps->default_aeq_max_cnt = le16_to_cpu(resp_e->aeq_max_cnt);
2357 caps->default_aeq_period = le16_to_cpu(resp_e->aeq_period);
2358
2359 caps->qpc_hop_num = ctx_hop_num;
2360 caps->sccc_hop_num = ctx_hop_num;
2361 caps->srqc_hop_num = ctx_hop_num;
2362 caps->cqc_hop_num = ctx_hop_num;
2363 caps->mpt_hop_num = ctx_hop_num;
2364 caps->mtt_hop_num = pbl_hop_num;
2365 caps->cqe_hop_num = pbl_hop_num;
2366 caps->srqwqe_hop_num = pbl_hop_num;
2367 caps->idx_hop_num = pbl_hop_num;
2368 caps->wqe_sq_hop_num = roce_get_field(resp_d->wq_hop_num_max_srqs,
2369 V2_QUERY_PF_CAPS_D_SQWQE_HOP_NUM_M,
2370 V2_QUERY_PF_CAPS_D_SQWQE_HOP_NUM_S);
2371 caps->wqe_sge_hop_num = roce_get_field(resp_d->wq_hop_num_max_srqs,
2372 V2_QUERY_PF_CAPS_D_EX_SGE_HOP_NUM_M,
2373 V2_QUERY_PF_CAPS_D_EX_SGE_HOP_NUM_S);
2374 caps->wqe_rq_hop_num = roce_get_field(resp_d->wq_hop_num_max_srqs,
2375 V2_QUERY_PF_CAPS_D_RQWQE_HOP_NUM_M,
2376 V2_QUERY_PF_CAPS_D_RQWQE_HOP_NUM_S);
2377
2378 return 0;
2379}
2380
2381static int config_hem_entry_size(struct hns_roce_dev *hr_dev, u32 type, u32 val)
2382{
2383 struct hns_roce_cmq_desc desc;
2384 struct hns_roce_cmq_req *req = (struct hns_roce_cmq_req *)desc.data;
2385
2386 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_ENTRY_SIZE,
2387 false);
2388
2389 hr_reg_write(req, CFG_HEM_ENTRY_SIZE_TYPE, type);
2390 hr_reg_write(req, CFG_HEM_ENTRY_SIZE_VALUE, val);
2391
2392 return hns_roce_cmq_send(hr_dev, &desc, 1);
2393}
2394
2395static int hns_roce_config_entry_size(struct hns_roce_dev *hr_dev)
2396{
2397 struct hns_roce_caps *caps = &hr_dev->caps;
2398 int ret;
2399
2400 if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08)
2401 return 0;
2402
2403 ret = config_hem_entry_size(hr_dev, HNS_ROCE_CFG_QPC_SIZE,
2404 caps->qpc_sz);
2405 if (ret) {
2406 dev_err(hr_dev->dev, "failed to cfg qpc sz, ret = %d.\n", ret);
2407 return ret;
2408 }
2409
2410 ret = config_hem_entry_size(hr_dev, HNS_ROCE_CFG_SCCC_SIZE,
2411 caps->sccc_sz);
2412 if (ret)
2413 dev_err(hr_dev->dev, "failed to cfg sccc sz, ret = %d.\n", ret);
2414
2415 return ret;
2416}
2417
2418static int hns_roce_v2_vf_profile(struct hns_roce_dev *hr_dev)
2419{
2420 struct device *dev = hr_dev->dev;
2421 int ret;
2422
2423 hr_dev->func_num = 1;
2424
2425 set_default_caps(hr_dev);
2426
2427 ret = hns_roce_query_vf_resource(hr_dev);
2428 if (ret) {
2429 dev_err(dev, "failed to query VF resource, ret = %d.\n", ret);
2430 return ret;
2431 }
2432
2433 apply_func_caps(hr_dev);
2434
2435 ret = hns_roce_v2_set_bt(hr_dev);
2436 if (ret)
2437 dev_err(dev, "failed to config VF BA table, ret = %d.\n", ret);
2438
2439 return ret;
2440}
2441
2442static int hns_roce_v2_pf_profile(struct hns_roce_dev *hr_dev)
2443{
2444 struct device *dev = hr_dev->dev;
2445 int ret;
2446
2447 ret = hns_roce_query_func_info(hr_dev);
2448 if (ret) {
2449 dev_err(dev, "failed to query func info, ret = %d.\n", ret);
2450 return ret;
2451 }
2452
2453 ret = hns_roce_config_global_param(hr_dev);
2454 if (ret) {
2455 dev_err(dev, "failed to config global param, ret = %d.\n", ret);
2456 return ret;
2457 }
2458
2459 ret = hns_roce_set_vf_switch_param(hr_dev);
2460 if (ret) {
2461 dev_err(dev, "failed to set switch param, ret = %d.\n", ret);
2462 return ret;
2463 }
2464
2465 ret = hns_roce_query_pf_caps(hr_dev);
2466 if (ret)
2467 set_default_caps(hr_dev);
2468
2469 ret = hns_roce_query_pf_resource(hr_dev);
2470 if (ret) {
2471 dev_err(dev, "failed to query pf resource, ret = %d.\n", ret);
2472 return ret;
2473 }
2474
2475 apply_func_caps(hr_dev);
2476
2477 ret = hns_roce_alloc_vf_resource(hr_dev);
2478 if (ret) {
2479 dev_err(dev, "failed to alloc vf resource, ret = %d.\n", ret);
2480 return ret;
2481 }
2482
2483 ret = hns_roce_v2_set_bt(hr_dev);
2484 if (ret) {
2485 dev_err(dev, "failed to config BA table, ret = %d.\n", ret);
2486 return ret;
2487 }
2488
2489
2490 return hns_roce_config_entry_size(hr_dev);
2491}
2492
2493static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
2494{
2495 struct device *dev = hr_dev->dev;
2496 int ret;
2497
2498 ret = hns_roce_cmq_query_hw_info(hr_dev);
2499 if (ret) {
2500 dev_err(dev, "failed to query hardware info, ret = %d.\n", ret);
2501 return ret;
2502 }
2503
2504 ret = hns_roce_query_fw_ver(hr_dev);
2505 if (ret) {
2506 dev_err(dev, "failed to query firmware info, ret = %d.\n", ret);
2507 return ret;
2508 }
2509
2510 hr_dev->vendor_part_id = hr_dev->pci_dev->device;
2511 hr_dev->sys_image_guid = be64_to_cpu(hr_dev->ib_dev.node_guid);
2512
2513 if (hr_dev->is_vf)
2514 return hns_roce_v2_vf_profile(hr_dev);
2515 else
2516 return hns_roce_v2_pf_profile(hr_dev);
2517}
2518
2519static void config_llm_table(struct hns_roce_buf *data_buf, void *cfg_buf)
2520{
2521 u32 i, next_ptr, page_num;
2522 __le64 *entry = cfg_buf;
2523 dma_addr_t addr;
2524 u64 val;
2525
2526 page_num = data_buf->npages;
2527 for (i = 0; i < page_num; i++) {
2528 addr = hns_roce_buf_page(data_buf, i);
2529 if (i == (page_num - 1))
2530 next_ptr = 0;
2531 else
2532 next_ptr = i + 1;
2533
2534 val = HNS_ROCE_EXT_LLM_ENTRY(addr, (u64)next_ptr);
2535 entry[i] = cpu_to_le64(val);
2536 }
2537}
2538
2539static int set_llm_cfg_to_hw(struct hns_roce_dev *hr_dev,
2540 struct hns_roce_link_table *table)
2541{
2542 struct hns_roce_cmq_desc desc[2];
2543 struct hns_roce_cmq_req *r_a = (struct hns_roce_cmq_req *)desc[0].data;
2544 struct hns_roce_cmq_req *r_b = (struct hns_roce_cmq_req *)desc[1].data;
2545 struct hns_roce_buf *buf = table->buf;
2546 enum hns_roce_opcode_type opcode;
2547 dma_addr_t addr;
2548
2549 opcode = HNS_ROCE_OPC_CFG_EXT_LLM;
2550 hns_roce_cmq_setup_basic_desc(&desc[0], opcode, false);
2551 desc[0].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
2552 hns_roce_cmq_setup_basic_desc(&desc[1], opcode, false);
2553
2554 hr_reg_write(r_a, CFG_LLM_A_BA_L, lower_32_bits(table->table.map));
2555 hr_reg_write(r_a, CFG_LLM_A_BA_H, upper_32_bits(table->table.map));
2556 hr_reg_write(r_a, CFG_LLM_A_DEPTH, buf->npages);
2557 hr_reg_write(r_a, CFG_LLM_A_PGSZ, to_hr_hw_page_shift(buf->page_shift));
2558 hr_reg_enable(r_a, CFG_LLM_A_INIT_EN);
2559
2560 addr = to_hr_hw_page_addr(hns_roce_buf_page(buf, 0));
2561 hr_reg_write(r_a, CFG_LLM_A_HEAD_BA_L, lower_32_bits(addr));
2562 hr_reg_write(r_a, CFG_LLM_A_HEAD_BA_H, upper_32_bits(addr));
2563 hr_reg_write(r_a, CFG_LLM_A_HEAD_NXTPTR, 1);
2564 hr_reg_write(r_a, CFG_LLM_A_HEAD_PTR, 0);
2565
2566 addr = to_hr_hw_page_addr(hns_roce_buf_page(buf, buf->npages - 1));
2567 hr_reg_write(r_b, CFG_LLM_B_TAIL_BA_L, lower_32_bits(addr));
2568 hr_reg_write(r_b, CFG_LLM_B_TAIL_BA_H, upper_32_bits(addr));
2569 hr_reg_write(r_b, CFG_LLM_B_TAIL_PTR, buf->npages - 1);
2570
2571 return hns_roce_cmq_send(hr_dev, desc, 2);
2572}
2573
2574static struct hns_roce_link_table *
2575alloc_link_table_buf(struct hns_roce_dev *hr_dev)
2576{
2577 struct hns_roce_v2_priv *priv = hr_dev->priv;
2578 struct hns_roce_link_table *link_tbl;
2579 u32 pg_shift, size, min_size;
2580
2581 link_tbl = &priv->ext_llm;
2582 pg_shift = hr_dev->caps.llm_buf_pg_sz + PAGE_SHIFT;
2583 size = hr_dev->caps.num_qps * HNS_ROCE_V2_EXT_LLM_ENTRY_SZ;
2584 min_size = HNS_ROCE_EXT_LLM_MIN_PAGES(hr_dev->caps.sl_num) << pg_shift;
2585
2586
2587 size = max(size, min_size);
2588 link_tbl->buf = hns_roce_buf_alloc(hr_dev, size, pg_shift, 0);
2589 if (IS_ERR(link_tbl->buf))
2590 return ERR_PTR(-ENOMEM);
2591
2592
2593 size = link_tbl->buf->npages * sizeof(u64);
2594 link_tbl->table.buf = dma_alloc_coherent(hr_dev->dev, size,
2595 &link_tbl->table.map,
2596 GFP_KERNEL);
2597 if (!link_tbl->table.buf) {
2598 hns_roce_buf_free(hr_dev, link_tbl->buf);
2599 return ERR_PTR(-ENOMEM);
2600 }
2601
2602 return link_tbl;
2603}
2604
2605static void free_link_table_buf(struct hns_roce_dev *hr_dev,
2606 struct hns_roce_link_table *tbl)
2607{
2608 if (tbl->buf) {
2609 u32 size = tbl->buf->npages * sizeof(u64);
2610
2611 dma_free_coherent(hr_dev->dev, size, tbl->table.buf,
2612 tbl->table.map);
2613 }
2614
2615 hns_roce_buf_free(hr_dev, tbl->buf);
2616}
2617
2618static int hns_roce_init_link_table(struct hns_roce_dev *hr_dev)
2619{
2620 struct hns_roce_link_table *link_tbl;
2621 int ret;
2622
2623 link_tbl = alloc_link_table_buf(hr_dev);
2624 if (IS_ERR(link_tbl))
2625 return -ENOMEM;
2626
2627 if (WARN_ON(link_tbl->buf->npages > HNS_ROCE_V2_EXT_LLM_MAX_DEPTH)) {
2628 ret = -EINVAL;
2629 goto err_alloc;
2630 }
2631
2632 config_llm_table(link_tbl->buf, link_tbl->table.buf);
2633 ret = set_llm_cfg_to_hw(hr_dev, link_tbl);
2634 if (ret)
2635 goto err_alloc;
2636
2637 return 0;
2638
2639err_alloc:
2640 free_link_table_buf(hr_dev, link_tbl);
2641 return ret;
2642}
2643
2644static void hns_roce_free_link_table(struct hns_roce_dev *hr_dev)
2645{
2646 struct hns_roce_v2_priv *priv = hr_dev->priv;
2647
2648 free_link_table_buf(hr_dev, &priv->ext_llm);
2649}
2650
2651static void free_dip_list(struct hns_roce_dev *hr_dev)
2652{
2653 struct hns_roce_dip *hr_dip;
2654 struct hns_roce_dip *tmp;
2655 unsigned long flags;
2656
2657 spin_lock_irqsave(&hr_dev->dip_list_lock, flags);
2658
2659 list_for_each_entry_safe(hr_dip, tmp, &hr_dev->dip_list, node) {
2660 list_del(&hr_dip->node);
2661 kfree(hr_dip);
2662 }
2663
2664 spin_unlock_irqrestore(&hr_dev->dip_list_lock, flags);
2665}
2666
2667static void free_mr_exit(struct hns_roce_dev *hr_dev)
2668{
2669 struct hns_roce_v2_priv *priv = hr_dev->priv;
2670 struct hns_roce_v2_free_mr *free_mr = &priv->free_mr;
2671 int ret;
2672 int i;
2673
2674 for (i = 0; i < ARRAY_SIZE(free_mr->rsv_qp); i++) {
2675 if (free_mr->rsv_qp[i]) {
2676 ret = ib_destroy_qp(free_mr->rsv_qp[i]);
2677 if (ret)
2678 ibdev_err(&hr_dev->ib_dev,
2679 "failed to destroy qp in free mr.\n");
2680
2681 free_mr->rsv_qp[i] = NULL;
2682 }
2683 }
2684
2685 if (free_mr->rsv_cq) {
2686 ib_destroy_cq(free_mr->rsv_cq);
2687 free_mr->rsv_cq = NULL;
2688 }
2689
2690 if (free_mr->rsv_pd) {
2691 ib_dealloc_pd(free_mr->rsv_pd);
2692 free_mr->rsv_pd = NULL;
2693 }
2694}
2695
2696static int free_mr_alloc_res(struct hns_roce_dev *hr_dev)
2697{
2698 struct hns_roce_v2_priv *priv = hr_dev->priv;
2699 struct hns_roce_v2_free_mr *free_mr = &priv->free_mr;
2700 struct ib_device *ibdev = &hr_dev->ib_dev;
2701 struct ib_cq_init_attr cq_init_attr = {};
2702 struct ib_qp_init_attr qp_init_attr = {};
2703 struct ib_pd *pd;
2704 struct ib_cq *cq;
2705 struct ib_qp *qp;
2706 int ret;
2707 int i;
2708
2709 pd = ib_alloc_pd(ibdev, 0);
2710 if (IS_ERR(pd)) {
2711 ibdev_err(ibdev, "failed to create pd for free mr.\n");
2712 return PTR_ERR(pd);
2713 }
2714 free_mr->rsv_pd = pd;
2715
2716 cq_init_attr.cqe = HNS_ROCE_FREE_MR_USED_CQE_NUM;
2717 cq = ib_create_cq(ibdev, NULL, NULL, NULL, &cq_init_attr);
2718 if (IS_ERR(cq)) {
2719 ibdev_err(ibdev, "failed to create cq for free mr.\n");
2720 ret = PTR_ERR(cq);
2721 goto create_failed;
2722 }
2723 free_mr->rsv_cq = cq;
2724
2725 qp_init_attr.qp_type = IB_QPT_RC;
2726 qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
2727 qp_init_attr.send_cq = free_mr->rsv_cq;
2728 qp_init_attr.recv_cq = free_mr->rsv_cq;
2729 for (i = 0; i < ARRAY_SIZE(free_mr->rsv_qp); i++) {
2730 qp_init_attr.cap.max_send_wr = HNS_ROCE_FREE_MR_USED_SQWQE_NUM;
2731 qp_init_attr.cap.max_send_sge = HNS_ROCE_FREE_MR_USED_SQSGE_NUM;
2732 qp_init_attr.cap.max_recv_wr = HNS_ROCE_FREE_MR_USED_RQWQE_NUM;
2733 qp_init_attr.cap.max_recv_sge = HNS_ROCE_FREE_MR_USED_RQSGE_NUM;
2734
2735 qp = ib_create_qp(free_mr->rsv_pd, &qp_init_attr);
2736 if (IS_ERR(qp)) {
2737 ibdev_err(ibdev, "failed to create qp for free mr.\n");
2738 ret = PTR_ERR(qp);
2739 goto create_failed;
2740 }
2741
2742 free_mr->rsv_qp[i] = qp;
2743 }
2744
2745 return 0;
2746
2747create_failed:
2748 free_mr_exit(hr_dev);
2749
2750 return ret;
2751}
2752
2753static int free_mr_modify_rsv_qp(struct hns_roce_dev *hr_dev,
2754 struct ib_qp_attr *attr, int sl_num)
2755{
2756 struct hns_roce_v2_priv *priv = hr_dev->priv;
2757 struct hns_roce_v2_free_mr *free_mr = &priv->free_mr;
2758 struct ib_device *ibdev = &hr_dev->ib_dev;
2759 struct hns_roce_qp *hr_qp;
2760 int loopback;
2761 int mask;
2762 int ret;
2763
2764 hr_qp = to_hr_qp(free_mr->rsv_qp[sl_num]);
2765 hr_qp->free_mr_en = 1;
2766
2767 mask = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT | IB_QP_ACCESS_FLAGS;
2768 attr->qp_state = IB_QPS_INIT;
2769 attr->port_num = 1;
2770 attr->qp_access_flags = IB_ACCESS_REMOTE_WRITE;
2771 ret = ib_modify_qp(&hr_qp->ibqp, attr, mask);
2772 if (ret) {
2773 ibdev_err(ibdev, "failed to modify qp to init, ret = %d.\n",
2774 ret);
2775 return ret;
2776 }
2777
2778 loopback = hr_dev->loop_idc;
2779
2780 hr_dev->loop_idc = 1;
2781
2782 mask = IB_QP_STATE | IB_QP_AV | IB_QP_PATH_MTU | IB_QP_DEST_QPN |
2783 IB_QP_RQ_PSN | IB_QP_MAX_DEST_RD_ATOMIC | IB_QP_MIN_RNR_TIMER;
2784 attr->qp_state = IB_QPS_RTR;
2785 attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
2786 attr->path_mtu = IB_MTU_256;
2787 attr->dest_qp_num = hr_qp->qpn;
2788 attr->rq_psn = HNS_ROCE_FREE_MR_USED_PSN;
2789
2790 rdma_ah_set_sl(&attr->ah_attr, (u8)sl_num);
2791
2792 ret = ib_modify_qp(&hr_qp->ibqp, attr, mask);
2793 hr_dev->loop_idc = loopback;
2794 if (ret) {
2795 ibdev_err(ibdev, "failed to modify qp to rtr, ret = %d.\n",
2796 ret);
2797 return ret;
2798 }
2799
2800 mask = IB_QP_STATE | IB_QP_SQ_PSN | IB_QP_RETRY_CNT | IB_QP_TIMEOUT |
2801 IB_QP_RNR_RETRY | IB_QP_MAX_QP_RD_ATOMIC;
2802 attr->qp_state = IB_QPS_RTS;
2803 attr->sq_psn = HNS_ROCE_FREE_MR_USED_PSN;
2804 attr->retry_cnt = HNS_ROCE_FREE_MR_USED_QP_RETRY_CNT;
2805 attr->timeout = HNS_ROCE_FREE_MR_USED_QP_TIMEOUT;
2806 ret = ib_modify_qp(&hr_qp->ibqp, attr, mask);
2807 if (ret)
2808 ibdev_err(ibdev, "failed to modify qp to rts, ret = %d.\n",
2809 ret);
2810
2811 return ret;
2812}
2813
2814static int free_mr_modify_qp(struct hns_roce_dev *hr_dev)
2815{
2816 struct hns_roce_v2_priv *priv = hr_dev->priv;
2817 struct hns_roce_v2_free_mr *free_mr = &priv->free_mr;
2818 struct ib_qp_attr attr = {};
2819 int ret;
2820 int i;
2821
2822 rdma_ah_set_grh(&attr.ah_attr, NULL, 0, 0, 1, 0);
2823 rdma_ah_set_static_rate(&attr.ah_attr, 3);
2824 rdma_ah_set_port_num(&attr.ah_attr, 1);
2825
2826 for (i = 0; i < ARRAY_SIZE(free_mr->rsv_qp); i++) {
2827 ret = free_mr_modify_rsv_qp(hr_dev, &attr, i);
2828 if (ret)
2829 return ret;
2830 }
2831
2832 return 0;
2833}
2834
2835static int free_mr_init(struct hns_roce_dev *hr_dev)
2836{
2837 int ret;
2838
2839 ret = free_mr_alloc_res(hr_dev);
2840 if (ret)
2841 return ret;
2842
2843 ret = free_mr_modify_qp(hr_dev);
2844 if (ret)
2845 goto err_modify_qp;
2846
2847 return 0;
2848
2849err_modify_qp:
2850 free_mr_exit(hr_dev);
2851
2852 return ret;
2853}
2854
2855static int get_hem_table(struct hns_roce_dev *hr_dev)
2856{
2857 unsigned int qpc_count;
2858 unsigned int cqc_count;
2859 unsigned int gmv_count;
2860 int ret;
2861 int i;
2862
2863
2864 for (gmv_count = 0; gmv_count < hr_dev->caps.gmv_entry_num;
2865 gmv_count++) {
2866 ret = hns_roce_table_get(hr_dev, &hr_dev->gmv_table, gmv_count);
2867 if (ret)
2868 goto err_gmv_failed;
2869 }
2870
2871 if (hr_dev->is_vf)
2872 return 0;
2873
2874
2875 for (qpc_count = 0; qpc_count < hr_dev->caps.qpc_timer_bt_num;
2876 qpc_count++) {
2877 ret = hns_roce_table_get(hr_dev, &hr_dev->qpc_timer_table,
2878 qpc_count);
2879 if (ret) {
2880 dev_err(hr_dev->dev, "QPC Timer get failed\n");
2881 goto err_qpc_timer_failed;
2882 }
2883 }
2884
2885
2886 for (cqc_count = 0; cqc_count < hr_dev->caps.cqc_timer_bt_num;
2887 cqc_count++) {
2888 ret = hns_roce_table_get(hr_dev, &hr_dev->cqc_timer_table,
2889 cqc_count);
2890 if (ret) {
2891 dev_err(hr_dev->dev, "CQC Timer get failed\n");
2892 goto err_cqc_timer_failed;
2893 }
2894 }
2895
2896 return 0;
2897
2898err_cqc_timer_failed:
2899 for (i = 0; i < cqc_count; i++)
2900 hns_roce_table_put(hr_dev, &hr_dev->cqc_timer_table, i);
2901
2902err_qpc_timer_failed:
2903 for (i = 0; i < qpc_count; i++)
2904 hns_roce_table_put(hr_dev, &hr_dev->qpc_timer_table, i);
2905
2906err_gmv_failed:
2907 for (i = 0; i < gmv_count; i++)
2908 hns_roce_table_put(hr_dev, &hr_dev->gmv_table, i);
2909
2910 return ret;
2911}
2912
2913static void put_hem_table(struct hns_roce_dev *hr_dev)
2914{
2915 int i;
2916
2917 for (i = 0; i < hr_dev->caps.gmv_entry_num; i++)
2918 hns_roce_table_put(hr_dev, &hr_dev->gmv_table, i);
2919
2920 if (hr_dev->is_vf)
2921 return;
2922
2923 for (i = 0; i < hr_dev->caps.qpc_timer_bt_num; i++)
2924 hns_roce_table_put(hr_dev, &hr_dev->qpc_timer_table, i);
2925
2926 for (i = 0; i < hr_dev->caps.cqc_timer_bt_num; i++)
2927 hns_roce_table_put(hr_dev, &hr_dev->cqc_timer_table, i);
2928}
2929
2930static int hns_roce_v2_init(struct hns_roce_dev *hr_dev)
2931{
2932 int ret;
2933
2934
2935 ret = hns_roce_clear_extdb_list_info(hr_dev);
2936 if (ret)
2937 return ret;
2938
2939 ret = get_hem_table(hr_dev);
2940 if (ret)
2941 return ret;
2942
2943 if (hr_dev->is_vf)
2944 return 0;
2945
2946 ret = hns_roce_init_link_table(hr_dev);
2947 if (ret) {
2948 dev_err(hr_dev->dev, "failed to init llm, ret = %d.\n", ret);
2949 goto err_llm_init_failed;
2950 }
2951
2952 return 0;
2953
2954err_llm_init_failed:
2955 put_hem_table(hr_dev);
2956
2957 return ret;
2958}
2959
2960static void hns_roce_v2_exit(struct hns_roce_dev *hr_dev)
2961{
2962 hns_roce_function_clear(hr_dev);
2963
2964 if (!hr_dev->is_vf)
2965 hns_roce_free_link_table(hr_dev);
2966
2967 if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP09)
2968 free_dip_list(hr_dev);
2969}
2970
2971static int hns_roce_mbox_post(struct hns_roce_dev *hr_dev,
2972 struct hns_roce_mbox_msg *mbox_msg)
2973{
2974 struct hns_roce_cmq_desc desc;
2975 struct hns_roce_post_mbox *mb = (struct hns_roce_post_mbox *)desc.data;
2976
2977 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_POST_MB, false);
2978
2979 mb->in_param_l = cpu_to_le32(mbox_msg->in_param);
2980 mb->in_param_h = cpu_to_le32(mbox_msg->in_param >> 32);
2981 mb->out_param_l = cpu_to_le32(mbox_msg->out_param);
2982 mb->out_param_h = cpu_to_le32(mbox_msg->out_param >> 32);
2983 mb->cmd_tag = cpu_to_le32(mbox_msg->tag << 8 | mbox_msg->cmd);
2984 mb->token_event_en = cpu_to_le32(mbox_msg->event_en << 16 |
2985 mbox_msg->token);
2986
2987 return hns_roce_cmq_send(hr_dev, &desc, 1);
2988}
2989
2990static int v2_wait_mbox_complete(struct hns_roce_dev *hr_dev, u32 timeout,
2991 u8 *complete_status)
2992{
2993 struct hns_roce_mbox_status *mb_st;
2994 struct hns_roce_cmq_desc desc;
2995 unsigned long end;
2996 int ret = -EBUSY;
2997 u32 status;
2998 bool busy;
2999
3000 mb_st = (struct hns_roce_mbox_status *)desc.data;
3001 end = msecs_to_jiffies(timeout) + jiffies;
3002 while (v2_chk_mbox_is_avail(hr_dev, &busy)) {
3003 status = 0;
3004 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_MB_ST,
3005 true);
3006 ret = __hns_roce_cmq_send(hr_dev, &desc, 1);
3007 if (!ret) {
3008 status = le32_to_cpu(mb_st->mb_status_hw_run);
3009
3010 if (!(status & MB_ST_HW_RUN_M))
3011 break;
3012 } else if (!v2_chk_mbox_is_avail(hr_dev, &busy)) {
3013 break;
3014 }
3015
3016 if (time_after(jiffies, end)) {
3017 dev_err_ratelimited(hr_dev->dev,
3018 "failed to wait mbox status 0x%x\n",
3019 status);
3020 return -ETIMEDOUT;
3021 }
3022
3023 cond_resched();
3024 ret = -EBUSY;
3025 }
3026
3027 if (!ret) {
3028 *complete_status = (u8)(status & MB_ST_COMPLETE_M);
3029 } else if (!v2_chk_mbox_is_avail(hr_dev, &busy)) {
3030
3031 ret = 0;
3032 *complete_status = MB_ST_COMPLETE_M;
3033 }
3034
3035 return ret;
3036}
3037
3038static int v2_post_mbox(struct hns_roce_dev *hr_dev,
3039 struct hns_roce_mbox_msg *mbox_msg)
3040{
3041 u8 status = 0;
3042 int ret;
3043
3044
3045 ret = v2_wait_mbox_complete(hr_dev, HNS_ROCE_V2_GO_BIT_TIMEOUT_MSECS,
3046 &status);
3047 if (unlikely(ret)) {
3048 dev_err_ratelimited(hr_dev->dev,
3049 "failed to check post mbox status = 0x%x, ret = %d.\n",
3050 status, ret);
3051 return ret;
3052 }
3053
3054
3055 ret = hns_roce_mbox_post(hr_dev, mbox_msg);
3056 if (ret)
3057 dev_err_ratelimited(hr_dev->dev,
3058 "failed to post mailbox, ret = %d.\n", ret);
3059
3060 return ret;
3061}
3062
3063static int v2_poll_mbox_done(struct hns_roce_dev *hr_dev)
3064{
3065 u8 status = 0;
3066 int ret;
3067
3068 ret = v2_wait_mbox_complete(hr_dev, HNS_ROCE_CMD_TIMEOUT_MSECS,
3069 &status);
3070 if (!ret) {
3071 if (status != MB_ST_COMPLETE_SUCC)
3072 return -EBUSY;
3073 } else {
3074 dev_err_ratelimited(hr_dev->dev,
3075 "failed to check mbox status = 0x%x, ret = %d.\n",
3076 status, ret);
3077 }
3078
3079 return ret;
3080}
3081
3082static void copy_gid(void *dest, const union ib_gid *gid)
3083{
3084#define GID_SIZE 4
3085 const union ib_gid *src = gid;
3086 __le32 (*p)[GID_SIZE] = dest;
3087 int i;
3088
3089 if (!gid)
3090 src = &zgid;
3091
3092 for (i = 0; i < GID_SIZE; i++)
3093 (*p)[i] = cpu_to_le32(*(u32 *)&src->raw[i * sizeof(u32)]);
3094}
3095
3096static int config_sgid_table(struct hns_roce_dev *hr_dev,
3097 int gid_index, const union ib_gid *gid,
3098 enum hns_roce_sgid_type sgid_type)
3099{
3100 struct hns_roce_cmq_desc desc;
3101 struct hns_roce_cfg_sgid_tb *sgid_tb =
3102 (struct hns_roce_cfg_sgid_tb *)desc.data;
3103
3104 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_SGID_TB, false);
3105
3106 roce_set_field(sgid_tb->table_idx_rsv, CFG_SGID_TB_TABLE_IDX_M,
3107 CFG_SGID_TB_TABLE_IDX_S, gid_index);
3108 roce_set_field(sgid_tb->vf_sgid_type_rsv, CFG_SGID_TB_VF_SGID_TYPE_M,
3109 CFG_SGID_TB_VF_SGID_TYPE_S, sgid_type);
3110
3111 copy_gid(&sgid_tb->vf_sgid_l, gid);
3112
3113 return hns_roce_cmq_send(hr_dev, &desc, 1);
3114}
3115
3116static int config_gmv_table(struct hns_roce_dev *hr_dev,
3117 int gid_index, const union ib_gid *gid,
3118 enum hns_roce_sgid_type sgid_type,
3119 const struct ib_gid_attr *attr)
3120{
3121 struct hns_roce_cmq_desc desc[2];
3122 struct hns_roce_cfg_gmv_tb_a *tb_a =
3123 (struct hns_roce_cfg_gmv_tb_a *)desc[0].data;
3124 struct hns_roce_cfg_gmv_tb_b *tb_b =
3125 (struct hns_roce_cfg_gmv_tb_b *)desc[1].data;
3126
3127 u16 vlan_id = VLAN_CFI_MASK;
3128 u8 mac[ETH_ALEN] = {};
3129 int ret;
3130
3131 if (gid) {
3132 ret = rdma_read_gid_l2_fields(attr, &vlan_id, mac);
3133 if (ret)
3134 return ret;
3135 }
3136
3137 hns_roce_cmq_setup_basic_desc(&desc[0], HNS_ROCE_OPC_CFG_GMV_TBL, false);
3138 desc[0].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
3139
3140 hns_roce_cmq_setup_basic_desc(&desc[1], HNS_ROCE_OPC_CFG_GMV_TBL, false);
3141
3142 copy_gid(&tb_a->vf_sgid_l, gid);
3143
3144 roce_set_field(tb_a->vf_sgid_type_vlan, CFG_GMV_TB_VF_SGID_TYPE_M,
3145 CFG_GMV_TB_VF_SGID_TYPE_S, sgid_type);
3146 roce_set_bit(tb_a->vf_sgid_type_vlan, CFG_GMV_TB_VF_VLAN_EN_S,
3147 vlan_id < VLAN_CFI_MASK);
3148 roce_set_field(tb_a->vf_sgid_type_vlan, CFG_GMV_TB_VF_VLAN_ID_M,
3149 CFG_GMV_TB_VF_VLAN_ID_S, vlan_id);
3150
3151 tb_b->vf_smac_l = cpu_to_le32(*(u32 *)mac);
3152 roce_set_field(tb_b->vf_smac_h, CFG_GMV_TB_SMAC_H_M,
3153 CFG_GMV_TB_SMAC_H_S, *(u16 *)&mac[4]);
3154
3155 roce_set_field(tb_b->table_idx_rsv, CFG_GMV_TB_SGID_IDX_M,
3156 CFG_GMV_TB_SGID_IDX_S, gid_index);
3157
3158 return hns_roce_cmq_send(hr_dev, desc, 2);
3159}
3160
3161static int hns_roce_v2_set_gid(struct hns_roce_dev *hr_dev, int gid_index,
3162 const union ib_gid *gid,
3163 const struct ib_gid_attr *attr)
3164{
3165 enum hns_roce_sgid_type sgid_type = GID_TYPE_FLAG_ROCE_V1;
3166 int ret;
3167
3168 if (gid) {
3169 if (attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) {
3170 if (ipv6_addr_v4mapped((void *)gid))
3171 sgid_type = GID_TYPE_FLAG_ROCE_V2_IPV4;
3172 else
3173 sgid_type = GID_TYPE_FLAG_ROCE_V2_IPV6;
3174 } else if (attr->gid_type == IB_GID_TYPE_ROCE) {
3175 sgid_type = GID_TYPE_FLAG_ROCE_V1;
3176 }
3177 }
3178
3179 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
3180 ret = config_gmv_table(hr_dev, gid_index, gid, sgid_type, attr);
3181 else
3182 ret = config_sgid_table(hr_dev, gid_index, gid, sgid_type);
3183
3184 if (ret)
3185 ibdev_err(&hr_dev->ib_dev, "failed to set gid, ret = %d!\n",
3186 ret);
3187
3188 return ret;
3189}
3190
3191static int hns_roce_v2_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port,
3192 const u8 *addr)
3193{
3194 struct hns_roce_cmq_desc desc;
3195 struct hns_roce_cfg_smac_tb *smac_tb =
3196 (struct hns_roce_cfg_smac_tb *)desc.data;
3197 u16 reg_smac_h;
3198 u32 reg_smac_l;
3199
3200 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_SMAC_TB, false);
3201
3202 reg_smac_l = *(u32 *)(&addr[0]);
3203 reg_smac_h = *(u16 *)(&addr[4]);
3204
3205 roce_set_field(smac_tb->tb_idx_rsv, CFG_SMAC_TB_IDX_M,
3206 CFG_SMAC_TB_IDX_S, phy_port);
3207 roce_set_field(smac_tb->vf_smac_h_rsv, CFG_SMAC_TB_VF_SMAC_H_M,
3208 CFG_SMAC_TB_VF_SMAC_H_S, reg_smac_h);
3209 smac_tb->vf_smac_l = cpu_to_le32(reg_smac_l);
3210
3211 return hns_roce_cmq_send(hr_dev, &desc, 1);
3212}
3213
3214static int set_mtpt_pbl(struct hns_roce_dev *hr_dev,
3215 struct hns_roce_v2_mpt_entry *mpt_entry,
3216 struct hns_roce_mr *mr)
3217{
3218 u64 pages[HNS_ROCE_V2_MAX_INNER_MTPT_NUM] = { 0 };
3219 struct ib_device *ibdev = &hr_dev->ib_dev;
3220 dma_addr_t pbl_ba;
3221 int i, count;
3222
3223 count = hns_roce_mtr_find(hr_dev, &mr->pbl_mtr, 0, pages,
3224 ARRAY_SIZE(pages), &pbl_ba);
3225 if (count < 1) {
3226 ibdev_err(ibdev, "failed to find PBL mtr, count = %d.\n",
3227 count);
3228 return -ENOBUFS;
3229 }
3230
3231
3232 for (i = 0; i < count; i++)
3233 pages[i] >>= 6;
3234
3235 mpt_entry->pbl_size = cpu_to_le32(mr->npages);
3236 mpt_entry->pbl_ba_l = cpu_to_le32(pbl_ba >> 3);
3237 roce_set_field(mpt_entry->byte_48_mode_ba,
3238 V2_MPT_BYTE_48_PBL_BA_H_M, V2_MPT_BYTE_48_PBL_BA_H_S,
3239 upper_32_bits(pbl_ba >> 3));
3240
3241 mpt_entry->pa0_l = cpu_to_le32(lower_32_bits(pages[0]));
3242 roce_set_field(mpt_entry->byte_56_pa0_h, V2_MPT_BYTE_56_PA0_H_M,
3243 V2_MPT_BYTE_56_PA0_H_S, upper_32_bits(pages[0]));
3244
3245 mpt_entry->pa1_l = cpu_to_le32(lower_32_bits(pages[1]));
3246 roce_set_field(mpt_entry->byte_64_buf_pa1, V2_MPT_BYTE_64_PA1_H_M,
3247 V2_MPT_BYTE_64_PA1_H_S, upper_32_bits(pages[1]));
3248 roce_set_field(mpt_entry->byte_64_buf_pa1,
3249 V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M,
3250 V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S,
3251 to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.buf_pg_shift));
3252
3253 return 0;
3254}
3255
3256static int hns_roce_v2_write_mtpt(struct hns_roce_dev *hr_dev,
3257 void *mb_buf, struct hns_roce_mr *mr)
3258{
3259 struct hns_roce_v2_mpt_entry *mpt_entry;
3260 int ret;
3261
3262 mpt_entry = mb_buf;
3263 memset(mpt_entry, 0, sizeof(*mpt_entry));
3264
3265 hr_reg_write(mpt_entry, MPT_ST, V2_MPT_ST_VALID);
3266 hr_reg_write(mpt_entry, MPT_PD, mr->pd);
3267 hr_reg_enable(mpt_entry, MPT_L_INV_EN);
3268
3269 hr_reg_write_bool(mpt_entry, MPT_BIND_EN,
3270 mr->access & IB_ACCESS_MW_BIND);
3271 hr_reg_write_bool(mpt_entry, MPT_ATOMIC_EN,
3272 mr->access & IB_ACCESS_REMOTE_ATOMIC);
3273 hr_reg_write_bool(mpt_entry, MPT_RR_EN,
3274 mr->access & IB_ACCESS_REMOTE_READ);
3275 hr_reg_write_bool(mpt_entry, MPT_RW_EN,
3276 mr->access & IB_ACCESS_REMOTE_WRITE);
3277 hr_reg_write_bool(mpt_entry, MPT_LW_EN,
3278 mr->access & IB_ACCESS_LOCAL_WRITE);
3279
3280 mpt_entry->len_l = cpu_to_le32(lower_32_bits(mr->size));
3281 mpt_entry->len_h = cpu_to_le32(upper_32_bits(mr->size));
3282 mpt_entry->lkey = cpu_to_le32(mr->key);
3283 mpt_entry->va_l = cpu_to_le32(lower_32_bits(mr->iova));
3284 mpt_entry->va_h = cpu_to_le32(upper_32_bits(mr->iova));
3285
3286 if (mr->type != MR_TYPE_MR)
3287 hr_reg_enable(mpt_entry, MPT_PA);
3288
3289 if (mr->type == MR_TYPE_DMA)
3290 return 0;
3291
3292 if (mr->pbl_hop_num != HNS_ROCE_HOP_NUM_0)
3293 hr_reg_write(mpt_entry, MPT_PBL_HOP_NUM, mr->pbl_hop_num);
3294
3295 hr_reg_write(mpt_entry, MPT_PBL_BA_PG_SZ,
3296 to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.ba_pg_shift));
3297 hr_reg_enable(mpt_entry, MPT_INNER_PA_VLD);
3298
3299 ret = set_mtpt_pbl(hr_dev, mpt_entry, mr);
3300
3301 return ret;
3302}
3303
3304static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev,
3305 struct hns_roce_mr *mr, int flags,
3306 void *mb_buf)
3307{
3308 struct hns_roce_v2_mpt_entry *mpt_entry = mb_buf;
3309 u32 mr_access_flags = mr->access;
3310 int ret = 0;
3311
3312 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_MPT_ST_M,
3313 V2_MPT_BYTE_4_MPT_ST_S, V2_MPT_ST_VALID);
3314
3315 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
3316 V2_MPT_BYTE_4_PD_S, mr->pd);
3317
3318 if (flags & IB_MR_REREG_ACCESS) {
3319 roce_set_bit(mpt_entry->byte_8_mw_cnt_en,
3320 V2_MPT_BYTE_8_BIND_EN_S,
3321 (mr_access_flags & IB_ACCESS_MW_BIND ? 1 : 0));
3322 roce_set_bit(mpt_entry->byte_8_mw_cnt_en,
3323 V2_MPT_BYTE_8_ATOMIC_EN_S,
3324 mr_access_flags & IB_ACCESS_REMOTE_ATOMIC ? 1 : 0);
3325 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RR_EN_S,
3326 mr_access_flags & IB_ACCESS_REMOTE_READ ? 1 : 0);
3327 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RW_EN_S,
3328 mr_access_flags & IB_ACCESS_REMOTE_WRITE ? 1 : 0);
3329 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_LW_EN_S,
3330 mr_access_flags & IB_ACCESS_LOCAL_WRITE ? 1 : 0);
3331 }
3332
3333 if (flags & IB_MR_REREG_TRANS) {
3334 mpt_entry->va_l = cpu_to_le32(lower_32_bits(mr->iova));
3335 mpt_entry->va_h = cpu_to_le32(upper_32_bits(mr->iova));
3336 mpt_entry->len_l = cpu_to_le32(lower_32_bits(mr->size));
3337 mpt_entry->len_h = cpu_to_le32(upper_32_bits(mr->size));
3338
3339 ret = set_mtpt_pbl(hr_dev, mpt_entry, mr);
3340 }
3341
3342 return ret;
3343}
3344
3345static int hns_roce_v2_frmr_write_mtpt(struct hns_roce_dev *hr_dev,
3346 void *mb_buf, struct hns_roce_mr *mr)
3347{
3348 struct ib_device *ibdev = &hr_dev->ib_dev;
3349 struct hns_roce_v2_mpt_entry *mpt_entry;
3350 dma_addr_t pbl_ba = 0;
3351
3352 mpt_entry = mb_buf;
3353 memset(mpt_entry, 0, sizeof(*mpt_entry));
3354
3355 if (hns_roce_mtr_find(hr_dev, &mr->pbl_mtr, 0, NULL, 0, &pbl_ba) < 0) {
3356 ibdev_err(ibdev, "failed to find frmr mtr.\n");
3357 return -ENOBUFS;
3358 }
3359
3360 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_MPT_ST_M,
3361 V2_MPT_BYTE_4_MPT_ST_S, V2_MPT_ST_FREE);
3362 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PBL_HOP_NUM_M,
3363 V2_MPT_BYTE_4_PBL_HOP_NUM_S, 1);
3364 roce_set_field(mpt_entry->byte_4_pd_hop_st,
3365 V2_MPT_BYTE_4_PBL_BA_PG_SZ_M,
3366 V2_MPT_BYTE_4_PBL_BA_PG_SZ_S,
3367 to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.ba_pg_shift));
3368 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
3369 V2_MPT_BYTE_4_PD_S, mr->pd);
3370
3371 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RA_EN_S, 1);
3372 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 1);
3373 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_L_INV_EN_S, 1);
3374
3375 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_FRE_S, 1);
3376 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_PA_S, 0);
3377 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_MR_MW_S, 0);
3378 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_BPD_S, 1);
3379
3380 mpt_entry->pbl_size = cpu_to_le32(mr->npages);
3381
3382 mpt_entry->pbl_ba_l = cpu_to_le32(lower_32_bits(pbl_ba >> 3));
3383 roce_set_field(mpt_entry->byte_48_mode_ba, V2_MPT_BYTE_48_PBL_BA_H_M,
3384 V2_MPT_BYTE_48_PBL_BA_H_S,
3385 upper_32_bits(pbl_ba >> 3));
3386
3387 roce_set_field(mpt_entry->byte_64_buf_pa1,
3388 V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M,
3389 V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S,
3390 to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.buf_pg_shift));
3391
3392 return 0;
3393}
3394
3395static int hns_roce_v2_mw_write_mtpt(void *mb_buf, struct hns_roce_mw *mw)
3396{
3397 struct hns_roce_v2_mpt_entry *mpt_entry;
3398
3399 mpt_entry = mb_buf;
3400 memset(mpt_entry, 0, sizeof(*mpt_entry));
3401
3402 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_MPT_ST_M,
3403 V2_MPT_BYTE_4_MPT_ST_S, V2_MPT_ST_FREE);
3404 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
3405 V2_MPT_BYTE_4_PD_S, mw->pdn);
3406 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PBL_HOP_NUM_M,
3407 V2_MPT_BYTE_4_PBL_HOP_NUM_S,
3408 mw->pbl_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 :
3409 mw->pbl_hop_num);
3410 roce_set_field(mpt_entry->byte_4_pd_hop_st,
3411 V2_MPT_BYTE_4_PBL_BA_PG_SZ_M,
3412 V2_MPT_BYTE_4_PBL_BA_PG_SZ_S,
3413 mw->pbl_ba_pg_sz + PG_SHIFT_OFFSET);
3414
3415 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 1);
3416 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_L_INV_EN_S, 1);
3417 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_LW_EN_S, 1);
3418
3419 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_PA_S, 0);
3420 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_MR_MW_S, 1);
3421 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_BPD_S, 1);
3422 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_BQP_S,
3423 mw->ibmw.type == IB_MW_TYPE_1 ? 0 : 1);
3424
3425 roce_set_field(mpt_entry->byte_64_buf_pa1,
3426 V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M,
3427 V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S,
3428 mw->pbl_buf_pg_sz + PG_SHIFT_OFFSET);
3429
3430 mpt_entry->lkey = cpu_to_le32(mw->rkey);
3431
3432 return 0;
3433}
3434
3435static int free_mr_post_send_lp_wqe(struct hns_roce_qp *hr_qp)
3436{
3437 struct hns_roce_dev *hr_dev = to_hr_dev(hr_qp->ibqp.device);
3438 struct ib_device *ibdev = &hr_dev->ib_dev;
3439 const struct ib_send_wr *bad_wr;
3440 struct ib_rdma_wr rdma_wr = {};
3441 struct ib_send_wr *send_wr;
3442 int ret;
3443
3444 send_wr = &rdma_wr.wr;
3445 send_wr->opcode = IB_WR_RDMA_WRITE;
3446
3447 ret = hns_roce_v2_post_send(&hr_qp->ibqp, send_wr, &bad_wr);
3448 if (ret) {
3449 ibdev_err(ibdev, "failed to post wqe for free mr, ret = %d.\n",
3450 ret);
3451 return ret;
3452 }
3453
3454 return 0;
3455}
3456
3457static int hns_roce_v2_poll_cq(struct ib_cq *ibcq, int num_entries,
3458 struct ib_wc *wc);
3459
3460static void free_mr_send_cmd_to_hw(struct hns_roce_dev *hr_dev)
3461{
3462 struct hns_roce_v2_priv *priv = hr_dev->priv;
3463 struct hns_roce_v2_free_mr *free_mr = &priv->free_mr;
3464 struct ib_wc wc[ARRAY_SIZE(free_mr->rsv_qp)];
3465 struct ib_device *ibdev = &hr_dev->ib_dev;
3466 struct hns_roce_qp *hr_qp;
3467 unsigned long end;
3468 int cqe_cnt = 0;
3469 int npolled;
3470 int ret;
3471 int i;
3472
3473
3474
3475
3476
3477 if (priv->handle->rinfo.reset_state == HNS_ROCE_STATE_RST_INIT ||
3478 priv->handle->rinfo.instance_state == HNS_ROCE_STATE_INIT ||
3479 hr_dev->state == HNS_ROCE_DEVICE_STATE_UNINIT)
3480 return;
3481
3482 mutex_lock(&free_mr->mutex);
3483
3484 for (i = 0; i < ARRAY_SIZE(free_mr->rsv_qp); i++) {
3485 hr_qp = to_hr_qp(free_mr->rsv_qp[i]);
3486
3487 ret = free_mr_post_send_lp_wqe(hr_qp);
3488 if (ret) {
3489 ibdev_err(ibdev,
3490 "failed to send wqe (qp:0x%lx) for free mr, ret = %d.\n",
3491 hr_qp->qpn, ret);
3492 break;
3493 }
3494
3495 cqe_cnt++;
3496 }
3497
3498 end = msecs_to_jiffies(HNS_ROCE_V2_FREE_MR_TIMEOUT) + jiffies;
3499 while (cqe_cnt) {
3500 npolled = hns_roce_v2_poll_cq(free_mr->rsv_cq, cqe_cnt, wc);
3501 if (npolled < 0) {
3502 ibdev_err(ibdev,
3503 "failed to poll cqe for free mr, remain %d cqe.\n",
3504 cqe_cnt);
3505 goto out;
3506 }
3507
3508 if (time_after(jiffies, end)) {
3509 ibdev_err(ibdev,
3510 "failed to poll cqe for free mr and timeout, remain %d cqe.\n",
3511 cqe_cnt);
3512 goto out;
3513 }
3514 cqe_cnt -= npolled;
3515 }
3516
3517out:
3518 mutex_unlock(&free_mr->mutex);
3519}
3520
3521static void hns_roce_v2_dereg_mr(struct hns_roce_dev *hr_dev)
3522{
3523 if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08)
3524 free_mr_send_cmd_to_hw(hr_dev);
3525}
3526
3527static void *get_cqe_v2(struct hns_roce_cq *hr_cq, int n)
3528{
3529 return hns_roce_buf_offset(hr_cq->mtr.kmem, n * hr_cq->cqe_size);
3530}
3531
3532static void *get_sw_cqe_v2(struct hns_roce_cq *hr_cq, unsigned int n)
3533{
3534 struct hns_roce_v2_cqe *cqe = get_cqe_v2(hr_cq, n & hr_cq->ib_cq.cqe);
3535
3536
3537 return (hr_reg_read(cqe, CQE_OWNER) ^ !!(n & hr_cq->cq_depth)) ? cqe :
3538 NULL;
3539}
3540
3541static inline void update_cq_db(struct hns_roce_dev *hr_dev,
3542 struct hns_roce_cq *hr_cq)
3543{
3544 if (likely(hr_cq->flags & HNS_ROCE_CQ_FLAG_RECORD_DB)) {
3545 *hr_cq->set_ci_db = hr_cq->cons_index & V2_CQ_DB_CONS_IDX_M;
3546 } else {
3547 struct hns_roce_v2_db cq_db = {};
3548
3549 hr_reg_write(&cq_db, DB_TAG, hr_cq->cqn);
3550 hr_reg_write(&cq_db, DB_CMD, HNS_ROCE_V2_CQ_DB);
3551 hr_reg_write(&cq_db, DB_CQ_CI, hr_cq->cons_index);
3552 hr_reg_write(&cq_db, DB_CQ_CMD_SN, 1);
3553
3554 hns_roce_write64(hr_dev, (__le32 *)&cq_db, hr_cq->db_reg);
3555 }
3556}
3557
3558static void __hns_roce_v2_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
3559 struct hns_roce_srq *srq)
3560{
3561 struct hns_roce_dev *hr_dev = to_hr_dev(hr_cq->ib_cq.device);
3562 struct hns_roce_v2_cqe *cqe, *dest;
3563 u32 prod_index;
3564 int nfreed = 0;
3565 int wqe_index;
3566 u8 owner_bit;
3567
3568 for (prod_index = hr_cq->cons_index; get_sw_cqe_v2(hr_cq, prod_index);
3569 ++prod_index) {
3570 if (prod_index > hr_cq->cons_index + hr_cq->ib_cq.cqe)
3571 break;
3572 }
3573
3574
3575
3576
3577
3578 while ((int) --prod_index - (int) hr_cq->cons_index >= 0) {
3579 cqe = get_cqe_v2(hr_cq, prod_index & hr_cq->ib_cq.cqe);
3580 if (hr_reg_read(cqe, CQE_LCL_QPN) == qpn) {
3581 if (srq && hr_reg_read(cqe, CQE_S_R)) {
3582 wqe_index = hr_reg_read(cqe, CQE_WQE_IDX);
3583 hns_roce_free_srq_wqe(srq, wqe_index);
3584 }
3585 ++nfreed;
3586 } else if (nfreed) {
3587 dest = get_cqe_v2(hr_cq, (prod_index + nfreed) &
3588 hr_cq->ib_cq.cqe);
3589 owner_bit = hr_reg_read(dest, CQE_OWNER);
3590 memcpy(dest, cqe, hr_cq->cqe_size);
3591 hr_reg_write(dest, CQE_OWNER, owner_bit);
3592 }
3593 }
3594
3595 if (nfreed) {
3596 hr_cq->cons_index += nfreed;
3597 update_cq_db(hr_dev, hr_cq);
3598 }
3599}
3600
3601static void hns_roce_v2_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
3602 struct hns_roce_srq *srq)
3603{
3604 spin_lock_irq(&hr_cq->lock);
3605 __hns_roce_v2_cq_clean(hr_cq, qpn, srq);
3606 spin_unlock_irq(&hr_cq->lock);
3607}
3608
3609static void hns_roce_v2_write_cqc(struct hns_roce_dev *hr_dev,
3610 struct hns_roce_cq *hr_cq, void *mb_buf,
3611 u64 *mtts, dma_addr_t dma_handle)
3612{
3613 struct hns_roce_v2_cq_context *cq_context;
3614
3615 cq_context = mb_buf;
3616 memset(cq_context, 0, sizeof(*cq_context));
3617
3618 hr_reg_write(cq_context, CQC_CQ_ST, V2_CQ_STATE_VALID);
3619 hr_reg_write(cq_context, CQC_ARM_ST, NO_ARMED);
3620 hr_reg_write(cq_context, CQC_SHIFT, ilog2(hr_cq->cq_depth));
3621 hr_reg_write(cq_context, CQC_CEQN, hr_cq->vector);
3622 hr_reg_write(cq_context, CQC_CQN, hr_cq->cqn);
3623
3624 if (hr_cq->cqe_size == HNS_ROCE_V3_CQE_SIZE)
3625 hr_reg_write(cq_context, CQC_CQE_SIZE, CQE_SIZE_64B);
3626
3627 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_STASH)
3628 hr_reg_enable(cq_context, CQC_STASH);
3629
3630 hr_reg_write(cq_context, CQC_CQE_CUR_BLK_ADDR_L,
3631 to_hr_hw_page_addr(mtts[0]));
3632 hr_reg_write(cq_context, CQC_CQE_CUR_BLK_ADDR_H,
3633 upper_32_bits(to_hr_hw_page_addr(mtts[0])));
3634 hr_reg_write(cq_context, CQC_CQE_HOP_NUM, hr_dev->caps.cqe_hop_num ==
3635 HNS_ROCE_HOP_NUM_0 ? 0 : hr_dev->caps.cqe_hop_num);
3636 hr_reg_write(cq_context, CQC_CQE_NEX_BLK_ADDR_L,
3637 to_hr_hw_page_addr(mtts[1]));
3638 hr_reg_write(cq_context, CQC_CQE_NEX_BLK_ADDR_H,
3639 upper_32_bits(to_hr_hw_page_addr(mtts[1])));
3640 hr_reg_write(cq_context, CQC_CQE_BAR_PG_SZ,
3641 to_hr_hw_page_shift(hr_cq->mtr.hem_cfg.ba_pg_shift));
3642 hr_reg_write(cq_context, CQC_CQE_BUF_PG_SZ,
3643 to_hr_hw_page_shift(hr_cq->mtr.hem_cfg.buf_pg_shift));
3644 hr_reg_write(cq_context, CQC_CQE_BA_L, dma_handle >> 3);
3645 hr_reg_write(cq_context, CQC_CQE_BA_H, (dma_handle >> (32 + 3)));
3646 hr_reg_write_bool(cq_context, CQC_DB_RECORD_EN,
3647 hr_cq->flags & HNS_ROCE_CQ_FLAG_RECORD_DB);
3648 hr_reg_write(cq_context, CQC_CQE_DB_RECORD_ADDR_L,
3649 ((u32)hr_cq->db.dma) >> 1);
3650 hr_reg_write(cq_context, CQC_CQE_DB_RECORD_ADDR_H,
3651 hr_cq->db.dma >> 32);
3652 hr_reg_write(cq_context, CQC_CQ_MAX_CNT,
3653 HNS_ROCE_V2_CQ_DEFAULT_BURST_NUM);
3654 hr_reg_write(cq_context, CQC_CQ_PERIOD,
3655 HNS_ROCE_V2_CQ_DEFAULT_INTERVAL);
3656}
3657
3658static int hns_roce_v2_req_notify_cq(struct ib_cq *ibcq,
3659 enum ib_cq_notify_flags flags)
3660{
3661 struct hns_roce_dev *hr_dev = to_hr_dev(ibcq->device);
3662 struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
3663 struct hns_roce_v2_db cq_db = {};
3664 u32 notify_flag;
3665
3666
3667
3668
3669
3670 notify_flag = (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
3671 V2_CQ_DB_REQ_NOT : V2_CQ_DB_REQ_NOT_SOL;
3672
3673 hr_reg_write(&cq_db, DB_TAG, hr_cq->cqn);
3674 hr_reg_write(&cq_db, DB_CMD, HNS_ROCE_V2_CQ_DB_NOTIFY);
3675 hr_reg_write(&cq_db, DB_CQ_CI, hr_cq->cons_index);
3676 hr_reg_write(&cq_db, DB_CQ_CMD_SN, hr_cq->arm_sn);
3677 hr_reg_write(&cq_db, DB_CQ_NOTIFY, notify_flag);
3678
3679 hns_roce_write64(hr_dev, (__le32 *)&cq_db, hr_cq->db_reg);
3680
3681 return 0;
3682}
3683
3684static int hns_roce_handle_recv_inl_wqe(struct hns_roce_v2_cqe *cqe,
3685 struct hns_roce_qp *qp,
3686 struct ib_wc *wc)
3687{
3688 struct hns_roce_rinl_sge *sge_list;
3689 u32 wr_num, wr_cnt, sge_num;
3690 u32 sge_cnt, data_len, size;
3691 void *wqe_buf;
3692
3693 wr_num = hr_reg_read(cqe, CQE_WQE_IDX);
3694 wr_cnt = wr_num & (qp->rq.wqe_cnt - 1);
3695
3696 sge_list = qp->rq_inl_buf.wqe_list[wr_cnt].sg_list;
3697 sge_num = qp->rq_inl_buf.wqe_list[wr_cnt].sge_cnt;
3698 wqe_buf = hns_roce_get_recv_wqe(qp, wr_cnt);
3699 data_len = wc->byte_len;
3700
3701 for (sge_cnt = 0; (sge_cnt < sge_num) && (data_len); sge_cnt++) {
3702 size = min(sge_list[sge_cnt].len, data_len);
3703 memcpy((void *)sge_list[sge_cnt].addr, wqe_buf, size);
3704
3705 data_len -= size;
3706 wqe_buf += size;
3707 }
3708
3709 if (unlikely(data_len)) {
3710 wc->status = IB_WC_LOC_LEN_ERR;
3711 return -EAGAIN;
3712 }
3713
3714 return 0;
3715}
3716
3717static int sw_comp(struct hns_roce_qp *hr_qp, struct hns_roce_wq *wq,
3718 int num_entries, struct ib_wc *wc)
3719{
3720 unsigned int left;
3721 int npolled = 0;
3722
3723 left = wq->head - wq->tail;
3724 if (left == 0)
3725 return 0;
3726
3727 left = min_t(unsigned int, (unsigned int)num_entries, left);
3728 while (npolled < left) {
3729 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
3730 wc->status = IB_WC_WR_FLUSH_ERR;
3731 wc->vendor_err = 0;
3732 wc->qp = &hr_qp->ibqp;
3733
3734 wq->tail++;
3735 wc++;
3736 npolled++;
3737 }
3738
3739 return npolled;
3740}
3741
3742static int hns_roce_v2_sw_poll_cq(struct hns_roce_cq *hr_cq, int num_entries,
3743 struct ib_wc *wc)
3744{
3745 struct hns_roce_qp *hr_qp;
3746 int npolled = 0;
3747
3748 list_for_each_entry(hr_qp, &hr_cq->sq_list, sq_node) {
3749 npolled += sw_comp(hr_qp, &hr_qp->sq,
3750 num_entries - npolled, wc + npolled);
3751 if (npolled >= num_entries)
3752 goto out;
3753 }
3754
3755 list_for_each_entry(hr_qp, &hr_cq->rq_list, rq_node) {
3756 npolled += sw_comp(hr_qp, &hr_qp->rq,
3757 num_entries - npolled, wc + npolled);
3758 if (npolled >= num_entries)
3759 goto out;
3760 }
3761
3762out:
3763 return npolled;
3764}
3765
3766static void get_cqe_status(struct hns_roce_dev *hr_dev, struct hns_roce_qp *qp,
3767 struct hns_roce_cq *cq, struct hns_roce_v2_cqe *cqe,
3768 struct ib_wc *wc)
3769{
3770 static const struct {
3771 u32 cqe_status;
3772 enum ib_wc_status wc_status;
3773 } map[] = {
3774 { HNS_ROCE_CQE_V2_SUCCESS, IB_WC_SUCCESS },
3775 { HNS_ROCE_CQE_V2_LOCAL_LENGTH_ERR, IB_WC_LOC_LEN_ERR },
3776 { HNS_ROCE_CQE_V2_LOCAL_QP_OP_ERR, IB_WC_LOC_QP_OP_ERR },
3777 { HNS_ROCE_CQE_V2_LOCAL_PROT_ERR, IB_WC_LOC_PROT_ERR },
3778 { HNS_ROCE_CQE_V2_WR_FLUSH_ERR, IB_WC_WR_FLUSH_ERR },
3779 { HNS_ROCE_CQE_V2_MW_BIND_ERR, IB_WC_MW_BIND_ERR },
3780 { HNS_ROCE_CQE_V2_BAD_RESP_ERR, IB_WC_BAD_RESP_ERR },
3781 { HNS_ROCE_CQE_V2_LOCAL_ACCESS_ERR, IB_WC_LOC_ACCESS_ERR },
3782 { HNS_ROCE_CQE_V2_REMOTE_INVAL_REQ_ERR, IB_WC_REM_INV_REQ_ERR },
3783 { HNS_ROCE_CQE_V2_REMOTE_ACCESS_ERR, IB_WC_REM_ACCESS_ERR },
3784 { HNS_ROCE_CQE_V2_REMOTE_OP_ERR, IB_WC_REM_OP_ERR },
3785 { HNS_ROCE_CQE_V2_TRANSPORT_RETRY_EXC_ERR,
3786 IB_WC_RETRY_EXC_ERR },
3787 { HNS_ROCE_CQE_V2_RNR_RETRY_EXC_ERR, IB_WC_RNR_RETRY_EXC_ERR },
3788 { HNS_ROCE_CQE_V2_REMOTE_ABORT_ERR, IB_WC_REM_ABORT_ERR },
3789 { HNS_ROCE_CQE_V2_GENERAL_ERR, IB_WC_GENERAL_ERR}
3790 };
3791
3792 u32 cqe_status = hr_reg_read(cqe, CQE_STATUS);
3793 int i;
3794
3795 wc->status = IB_WC_GENERAL_ERR;
3796 for (i = 0; i < ARRAY_SIZE(map); i++)
3797 if (cqe_status == map[i].cqe_status) {
3798 wc->status = map[i].wc_status;
3799 break;
3800 }
3801
3802 if (likely(wc->status == IB_WC_SUCCESS ||
3803 wc->status == IB_WC_WR_FLUSH_ERR))
3804 return;
3805
3806 ibdev_err(&hr_dev->ib_dev, "error cqe status 0x%x:\n", cqe_status);
3807 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_NONE, 16, 4, cqe,
3808 cq->cqe_size, false);
3809 wc->vendor_err = hr_reg_read(cqe, CQE_SUB_STATUS);
3810
3811
3812
3813
3814
3815
3816 if (cqe_status == HNS_ROCE_CQE_V2_GENERAL_ERR)
3817 return;
3818
3819 flush_cqe(hr_dev, qp);
3820}
3821
3822static int get_cur_qp(struct hns_roce_cq *hr_cq, struct hns_roce_v2_cqe *cqe,
3823 struct hns_roce_qp **cur_qp)
3824{
3825 struct hns_roce_dev *hr_dev = to_hr_dev(hr_cq->ib_cq.device);
3826 struct hns_roce_qp *hr_qp = *cur_qp;
3827 u32 qpn;
3828
3829 qpn = hr_reg_read(cqe, CQE_LCL_QPN);
3830
3831 if (!hr_qp || qpn != hr_qp->qpn) {
3832 hr_qp = __hns_roce_qp_lookup(hr_dev, qpn);
3833 if (unlikely(!hr_qp)) {
3834 ibdev_err(&hr_dev->ib_dev,
3835 "CQ %06lx with entry for unknown QPN %06x\n",
3836 hr_cq->cqn, qpn);
3837 return -EINVAL;
3838 }
3839 *cur_qp = hr_qp;
3840 }
3841
3842 return 0;
3843}
3844
3845
3846
3847
3848
3849
3850
3851
3852#define HR_WC_OP_MAP(hr_key, ib_key) \
3853 [HNS_ROCE_V2_WQE_OP_ ## hr_key] = 1 + IB_WC_ ## ib_key
3854
3855static const u32 wc_send_op_map[] = {
3856 HR_WC_OP_MAP(SEND, SEND),
3857 HR_WC_OP_MAP(SEND_WITH_INV, SEND),
3858 HR_WC_OP_MAP(SEND_WITH_IMM, SEND),
3859 HR_WC_OP_MAP(RDMA_READ, RDMA_READ),
3860 HR_WC_OP_MAP(RDMA_WRITE, RDMA_WRITE),
3861 HR_WC_OP_MAP(RDMA_WRITE_WITH_IMM, RDMA_WRITE),
3862 HR_WC_OP_MAP(LOCAL_INV, LOCAL_INV),
3863 HR_WC_OP_MAP(ATOM_CMP_AND_SWAP, COMP_SWAP),
3864 HR_WC_OP_MAP(ATOM_FETCH_AND_ADD, FETCH_ADD),
3865 HR_WC_OP_MAP(ATOM_MSK_CMP_AND_SWAP, MASKED_COMP_SWAP),
3866 HR_WC_OP_MAP(ATOM_MSK_FETCH_AND_ADD, MASKED_FETCH_ADD),
3867 HR_WC_OP_MAP(FAST_REG_PMR, REG_MR),
3868 HR_WC_OP_MAP(BIND_MW, REG_MR),
3869};
3870
3871static int to_ib_wc_send_op(u32 hr_opcode)
3872{
3873 if (hr_opcode >= ARRAY_SIZE(wc_send_op_map))
3874 return -EINVAL;
3875
3876 return wc_send_op_map[hr_opcode] ? wc_send_op_map[hr_opcode] - 1 :
3877 -EINVAL;
3878}
3879
3880static const u32 wc_recv_op_map[] = {
3881 HR_WC_OP_MAP(RDMA_WRITE_WITH_IMM, WITH_IMM),
3882 HR_WC_OP_MAP(SEND, RECV),
3883 HR_WC_OP_MAP(SEND_WITH_IMM, WITH_IMM),
3884 HR_WC_OP_MAP(SEND_WITH_INV, RECV),
3885};
3886
3887static int to_ib_wc_recv_op(u32 hr_opcode)
3888{
3889 if (hr_opcode >= ARRAY_SIZE(wc_recv_op_map))
3890 return -EINVAL;
3891
3892 return wc_recv_op_map[hr_opcode] ? wc_recv_op_map[hr_opcode] - 1 :
3893 -EINVAL;
3894}
3895
3896static void fill_send_wc(struct ib_wc *wc, struct hns_roce_v2_cqe *cqe)
3897{
3898 u32 hr_opcode;
3899 int ib_opcode;
3900
3901 wc->wc_flags = 0;
3902
3903 hr_opcode = hr_reg_read(cqe, CQE_OPCODE);
3904 switch (hr_opcode) {
3905 case HNS_ROCE_V2_WQE_OP_RDMA_READ:
3906 wc->byte_len = le32_to_cpu(cqe->byte_cnt);
3907 break;
3908 case HNS_ROCE_V2_WQE_OP_SEND_WITH_IMM:
3909 case HNS_ROCE_V2_WQE_OP_RDMA_WRITE_WITH_IMM:
3910 wc->wc_flags |= IB_WC_WITH_IMM;
3911 break;
3912 case HNS_ROCE_V2_WQE_OP_LOCAL_INV:
3913 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
3914 break;
3915 case HNS_ROCE_V2_WQE_OP_ATOM_CMP_AND_SWAP:
3916 case HNS_ROCE_V2_WQE_OP_ATOM_FETCH_AND_ADD:
3917 case HNS_ROCE_V2_WQE_OP_ATOM_MSK_CMP_AND_SWAP:
3918 case HNS_ROCE_V2_WQE_OP_ATOM_MSK_FETCH_AND_ADD:
3919 wc->byte_len = 8;
3920 break;
3921 default:
3922 break;
3923 }
3924
3925 ib_opcode = to_ib_wc_send_op(hr_opcode);
3926 if (ib_opcode < 0)
3927 wc->status = IB_WC_GENERAL_ERR;
3928 else
3929 wc->opcode = ib_opcode;
3930}
3931
3932static inline bool is_rq_inl_enabled(struct ib_wc *wc, u32 hr_opcode,
3933 struct hns_roce_v2_cqe *cqe)
3934{
3935 return wc->qp->qp_type != IB_QPT_UD && wc->qp->qp_type != IB_QPT_GSI &&
3936 (hr_opcode == HNS_ROCE_V2_OPCODE_SEND ||
3937 hr_opcode == HNS_ROCE_V2_OPCODE_SEND_WITH_IMM ||
3938 hr_opcode == HNS_ROCE_V2_OPCODE_SEND_WITH_INV) &&
3939 hr_reg_read(cqe, CQE_RQ_INLINE);
3940}
3941
3942static int fill_recv_wc(struct ib_wc *wc, struct hns_roce_v2_cqe *cqe)
3943{
3944 struct hns_roce_qp *qp = to_hr_qp(wc->qp);
3945 u32 hr_opcode;
3946 int ib_opcode;
3947 int ret;
3948
3949 wc->byte_len = le32_to_cpu(cqe->byte_cnt);
3950
3951 hr_opcode = hr_reg_read(cqe, CQE_OPCODE);
3952 switch (hr_opcode) {
3953 case HNS_ROCE_V2_OPCODE_RDMA_WRITE_IMM:
3954 case HNS_ROCE_V2_OPCODE_SEND_WITH_IMM:
3955 wc->wc_flags = IB_WC_WITH_IMM;
3956 wc->ex.imm_data = cpu_to_be32(le32_to_cpu(cqe->immtdata));
3957 break;
3958 case HNS_ROCE_V2_OPCODE_SEND_WITH_INV:
3959 wc->wc_flags = IB_WC_WITH_INVALIDATE;
3960 wc->ex.invalidate_rkey = le32_to_cpu(cqe->rkey);
3961 break;
3962 default:
3963 wc->wc_flags = 0;
3964 }
3965
3966 ib_opcode = to_ib_wc_recv_op(hr_opcode);
3967 if (ib_opcode < 0)
3968 wc->status = IB_WC_GENERAL_ERR;
3969 else
3970 wc->opcode = ib_opcode;
3971
3972 if (is_rq_inl_enabled(wc, hr_opcode, cqe)) {
3973 ret = hns_roce_handle_recv_inl_wqe(cqe, qp, wc);
3974 if (unlikely(ret))
3975 return ret;
3976 }
3977
3978 wc->sl = hr_reg_read(cqe, CQE_SL);
3979 wc->src_qp = hr_reg_read(cqe, CQE_RMT_QPN);
3980 wc->slid = 0;
3981 wc->wc_flags |= hr_reg_read(cqe, CQE_GRH) ? IB_WC_GRH : 0;
3982 wc->port_num = hr_reg_read(cqe, CQE_PORTN);
3983 wc->pkey_index = 0;
3984
3985 if (hr_reg_read(cqe, CQE_VID_VLD)) {
3986 wc->vlan_id = hr_reg_read(cqe, CQE_VID);
3987 wc->wc_flags |= IB_WC_WITH_VLAN;
3988 } else {
3989 wc->vlan_id = 0xffff;
3990 }
3991
3992 wc->network_hdr_type = hr_reg_read(cqe, CQE_PORT_TYPE);
3993
3994 return 0;
3995}
3996
3997static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
3998 struct hns_roce_qp **cur_qp, struct ib_wc *wc)
3999{
4000 struct hns_roce_dev *hr_dev = to_hr_dev(hr_cq->ib_cq.device);
4001 struct hns_roce_qp *qp = *cur_qp;
4002 struct hns_roce_srq *srq = NULL;
4003 struct hns_roce_v2_cqe *cqe;
4004 struct hns_roce_wq *wq;
4005 int is_send;
4006 u16 wqe_idx;
4007 int ret;
4008
4009 cqe = get_sw_cqe_v2(hr_cq, hr_cq->cons_index);
4010 if (!cqe)
4011 return -EAGAIN;
4012
4013 ++hr_cq->cons_index;
4014
4015 rmb();
4016
4017 ret = get_cur_qp(hr_cq, cqe, &qp);
4018 if (ret)
4019 return ret;
4020
4021 wc->qp = &qp->ibqp;
4022 wc->vendor_err = 0;
4023
4024 wqe_idx = hr_reg_read(cqe, CQE_WQE_IDX);
4025
4026 is_send = !hr_reg_read(cqe, CQE_S_R);
4027 if (is_send) {
4028 wq = &qp->sq;
4029
4030
4031
4032
4033 if (qp->sq_signal_bits)
4034 wq->tail += (wqe_idx - (u16)wq->tail) &
4035 (wq->wqe_cnt - 1);
4036
4037 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
4038 ++wq->tail;
4039
4040 fill_send_wc(wc, cqe);
4041 } else {
4042 if (qp->ibqp.srq) {
4043 srq = to_hr_srq(qp->ibqp.srq);
4044 wc->wr_id = srq->wrid[wqe_idx];
4045 hns_roce_free_srq_wqe(srq, wqe_idx);
4046 } else {
4047 wq = &qp->rq;
4048 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
4049 ++wq->tail;
4050 }
4051
4052 ret = fill_recv_wc(wc, cqe);
4053 }
4054
4055 get_cqe_status(hr_dev, qp, hr_cq, cqe, wc);
4056 if (unlikely(wc->status != IB_WC_SUCCESS))
4057 return 0;
4058
4059 return ret;
4060}
4061
4062static int hns_roce_v2_poll_cq(struct ib_cq *ibcq, int num_entries,
4063 struct ib_wc *wc)
4064{
4065 struct hns_roce_dev *hr_dev = to_hr_dev(ibcq->device);
4066 struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
4067 struct hns_roce_qp *cur_qp = NULL;
4068 unsigned long flags;
4069 int npolled;
4070
4071 spin_lock_irqsave(&hr_cq->lock, flags);
4072
4073
4074
4075
4076
4077
4078
4079
4080 if (hr_dev->state == HNS_ROCE_DEVICE_STATE_UNINIT) {
4081 npolled = hns_roce_v2_sw_poll_cq(hr_cq, num_entries, wc);
4082 goto out;
4083 }
4084
4085 for (npolled = 0; npolled < num_entries; ++npolled) {
4086 if (hns_roce_v2_poll_one(hr_cq, &cur_qp, wc + npolled))
4087 break;
4088 }
4089
4090 if (npolled)
4091 update_cq_db(hr_dev, hr_cq);
4092
4093out:
4094 spin_unlock_irqrestore(&hr_cq->lock, flags);
4095
4096 return npolled;
4097}
4098
4099static int get_op_for_set_hem(struct hns_roce_dev *hr_dev, u32 type,
4100 u32 step_idx, u8 *mbox_cmd)
4101{
4102 u8 cmd;
4103
4104 switch (type) {
4105 case HEM_TYPE_QPC:
4106 cmd = HNS_ROCE_CMD_WRITE_QPC_BT0;
4107 break;
4108 case HEM_TYPE_MTPT:
4109 cmd = HNS_ROCE_CMD_WRITE_MPT_BT0;
4110 break;
4111 case HEM_TYPE_CQC:
4112 cmd = HNS_ROCE_CMD_WRITE_CQC_BT0;
4113 break;
4114 case HEM_TYPE_SRQC:
4115 cmd = HNS_ROCE_CMD_WRITE_SRQC_BT0;
4116 break;
4117 case HEM_TYPE_SCCC:
4118 cmd = HNS_ROCE_CMD_WRITE_SCCC_BT0;
4119 break;
4120 case HEM_TYPE_QPC_TIMER:
4121 cmd = HNS_ROCE_CMD_WRITE_QPC_TIMER_BT0;
4122 break;
4123 case HEM_TYPE_CQC_TIMER:
4124 cmd = HNS_ROCE_CMD_WRITE_CQC_TIMER_BT0;
4125 break;
4126 default:
4127 dev_warn(hr_dev->dev, "failed to check hem type %u.\n", type);
4128 return -EINVAL;
4129 }
4130
4131 *mbox_cmd = cmd + step_idx;
4132
4133 return 0;
4134}
4135
4136static int config_gmv_ba_to_hw(struct hns_roce_dev *hr_dev, unsigned long obj,
4137 dma_addr_t base_addr)
4138{
4139 struct hns_roce_cmq_desc desc;
4140 struct hns_roce_cmq_req *req = (struct hns_roce_cmq_req *)desc.data;
4141 u32 idx = obj / (HNS_HW_PAGE_SIZE / hr_dev->caps.gmv_entry_sz);
4142 u64 addr = to_hr_hw_page_addr(base_addr);
4143
4144 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_GMV_BT, false);
4145
4146 hr_reg_write(req, CFG_GMV_BT_BA_L, lower_32_bits(addr));
4147 hr_reg_write(req, CFG_GMV_BT_BA_H, upper_32_bits(addr));
4148 hr_reg_write(req, CFG_GMV_BT_IDX, idx);
4149
4150 return hns_roce_cmq_send(hr_dev, &desc, 1);
4151}
4152
4153static int set_hem_to_hw(struct hns_roce_dev *hr_dev, int obj,
4154 dma_addr_t base_addr, u32 hem_type, u32 step_idx)
4155{
4156 int ret;
4157 u8 cmd;
4158
4159 if (unlikely(hem_type == HEM_TYPE_GMV))
4160 return config_gmv_ba_to_hw(hr_dev, obj, base_addr);
4161
4162 if (unlikely(hem_type == HEM_TYPE_SCCC && step_idx))
4163 return 0;
4164
4165 ret = get_op_for_set_hem(hr_dev, hem_type, step_idx, &cmd);
4166 if (ret < 0)
4167 return ret;
4168
4169 return config_hem_ba_to_hw(hr_dev, base_addr, cmd, obj);
4170}
4171
4172static int hns_roce_v2_set_hem(struct hns_roce_dev *hr_dev,
4173 struct hns_roce_hem_table *table, int obj,
4174 u32 step_idx)
4175{
4176 struct hns_roce_hem_iter iter;
4177 struct hns_roce_hem_mhop mhop;
4178 struct hns_roce_hem *hem;
4179 unsigned long mhop_obj = obj;
4180 int i, j, k;
4181 int ret = 0;
4182 u64 hem_idx = 0;
4183 u64 l1_idx = 0;
4184 u64 bt_ba = 0;
4185 u32 chunk_ba_num;
4186 u32 hop_num;
4187
4188 if (!hns_roce_check_whether_mhop(hr_dev, table->type))
4189 return 0;
4190
4191 hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, &mhop);
4192 i = mhop.l0_idx;
4193 j = mhop.l1_idx;
4194 k = mhop.l2_idx;
4195 hop_num = mhop.hop_num;
4196 chunk_ba_num = mhop.bt_chunk_size / 8;
4197
4198 if (hop_num == 2) {
4199 hem_idx = i * chunk_ba_num * chunk_ba_num + j * chunk_ba_num +
4200 k;
4201 l1_idx = i * chunk_ba_num + j;
4202 } else if (hop_num == 1) {
4203 hem_idx = i * chunk_ba_num + j;
4204 } else if (hop_num == HNS_ROCE_HOP_NUM_0) {
4205 hem_idx = i;
4206 }
4207
4208 if (table->type == HEM_TYPE_SCCC)
4209 obj = mhop.l0_idx;
4210
4211 if (check_whether_last_step(hop_num, step_idx)) {
4212 hem = table->hem[hem_idx];
4213 for (hns_roce_hem_first(hem, &iter);
4214 !hns_roce_hem_last(&iter); hns_roce_hem_next(&iter)) {
4215 bt_ba = hns_roce_hem_addr(&iter);
4216 ret = set_hem_to_hw(hr_dev, obj, bt_ba, table->type,
4217 step_idx);
4218 }
4219 } else {
4220 if (step_idx == 0)
4221 bt_ba = table->bt_l0_dma_addr[i];
4222 else if (step_idx == 1 && hop_num == 2)
4223 bt_ba = table->bt_l1_dma_addr[l1_idx];
4224
4225 ret = set_hem_to_hw(hr_dev, obj, bt_ba, table->type, step_idx);
4226 }
4227
4228 return ret;
4229}
4230
4231static int hns_roce_v2_clear_hem(struct hns_roce_dev *hr_dev,
4232 struct hns_roce_hem_table *table,
4233 int tag, u32 step_idx)
4234{
4235 struct hns_roce_cmd_mailbox *mailbox;
4236 struct device *dev = hr_dev->dev;
4237 u8 cmd = 0xff;
4238 int ret;
4239
4240 if (!hns_roce_check_whether_mhop(hr_dev, table->type))
4241 return 0;
4242
4243 switch (table->type) {
4244 case HEM_TYPE_QPC:
4245 cmd = HNS_ROCE_CMD_DESTROY_QPC_BT0;
4246 break;
4247 case HEM_TYPE_MTPT:
4248 cmd = HNS_ROCE_CMD_DESTROY_MPT_BT0;
4249 break;
4250 case HEM_TYPE_CQC:
4251 cmd = HNS_ROCE_CMD_DESTROY_CQC_BT0;
4252 break;
4253 case HEM_TYPE_SRQC:
4254 cmd = HNS_ROCE_CMD_DESTROY_SRQC_BT0;
4255 break;
4256 case HEM_TYPE_SCCC:
4257 case HEM_TYPE_QPC_TIMER:
4258 case HEM_TYPE_CQC_TIMER:
4259 case HEM_TYPE_GMV:
4260 return 0;
4261 default:
4262 dev_warn(dev, "table %u not to be destroyed by mailbox!\n",
4263 table->type);
4264 return 0;
4265 }
4266
4267 cmd += step_idx;
4268
4269 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
4270 if (IS_ERR(mailbox))
4271 return PTR_ERR(mailbox);
4272
4273 ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, cmd, tag);
4274
4275 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
4276 return ret;
4277}
4278
4279static int hns_roce_v2_qp_modify(struct hns_roce_dev *hr_dev,
4280 struct hns_roce_v2_qp_context *context,
4281 struct hns_roce_v2_qp_context *qpc_mask,
4282 struct hns_roce_qp *hr_qp)
4283{
4284 struct hns_roce_cmd_mailbox *mailbox;
4285 int qpc_size;
4286 int ret;
4287
4288 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
4289 if (IS_ERR(mailbox))
4290 return PTR_ERR(mailbox);
4291
4292
4293 qpc_size = hr_dev->caps.qpc_sz;
4294 memcpy(mailbox->buf, context, qpc_size);
4295 memcpy(mailbox->buf + qpc_size, qpc_mask, qpc_size);
4296
4297 ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0,
4298 HNS_ROCE_CMD_MODIFY_QPC, hr_qp->qpn);
4299
4300 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
4301
4302 return ret;
4303}
4304
4305static void set_access_flags(struct hns_roce_qp *hr_qp,
4306 struct hns_roce_v2_qp_context *context,
4307 struct hns_roce_v2_qp_context *qpc_mask,
4308 const struct ib_qp_attr *attr, int attr_mask)
4309{
4310 u8 dest_rd_atomic;
4311 u32 access_flags;
4312
4313 dest_rd_atomic = (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) ?
4314 attr->max_dest_rd_atomic : hr_qp->resp_depth;
4315
4316 access_flags = (attr_mask & IB_QP_ACCESS_FLAGS) ?
4317 attr->qp_access_flags : hr_qp->atomic_rd_en;
4318
4319 if (!dest_rd_atomic)
4320 access_flags &= IB_ACCESS_REMOTE_WRITE;
4321
4322 hr_reg_write_bool(context, QPC_RRE,
4323 access_flags & IB_ACCESS_REMOTE_READ);
4324 hr_reg_clear(qpc_mask, QPC_RRE);
4325
4326 hr_reg_write_bool(context, QPC_RWE,
4327 access_flags & IB_ACCESS_REMOTE_WRITE);
4328 hr_reg_clear(qpc_mask, QPC_RWE);
4329
4330 hr_reg_write_bool(context, QPC_ATE,
4331 access_flags & IB_ACCESS_REMOTE_ATOMIC);
4332 hr_reg_clear(qpc_mask, QPC_ATE);
4333 hr_reg_write_bool(context, QPC_EXT_ATE,
4334 access_flags & IB_ACCESS_REMOTE_ATOMIC);
4335 hr_reg_clear(qpc_mask, QPC_EXT_ATE);
4336}
4337
4338static void set_qpc_wqe_cnt(struct hns_roce_qp *hr_qp,
4339 struct hns_roce_v2_qp_context *context,
4340 struct hns_roce_v2_qp_context *qpc_mask)
4341{
4342 hr_reg_write(context, QPC_SGE_SHIFT,
4343 to_hr_hem_entries_shift(hr_qp->sge.sge_cnt,
4344 hr_qp->sge.sge_shift));
4345
4346 hr_reg_write(context, QPC_SQ_SHIFT, ilog2(hr_qp->sq.wqe_cnt));
4347
4348 hr_reg_write(context, QPC_RQ_SHIFT, ilog2(hr_qp->rq.wqe_cnt));
4349}
4350
4351static inline int get_cqn(struct ib_cq *ib_cq)
4352{
4353 return ib_cq ? to_hr_cq(ib_cq)->cqn : 0;
4354}
4355
4356static inline int get_pdn(struct ib_pd *ib_pd)
4357{
4358 return ib_pd ? to_hr_pd(ib_pd)->pdn : 0;
4359}
4360
4361static void modify_qp_reset_to_init(struct ib_qp *ibqp,
4362 const struct ib_qp_attr *attr,
4363 int attr_mask,
4364 struct hns_roce_v2_qp_context *context,
4365 struct hns_roce_v2_qp_context *qpc_mask)
4366{
4367 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4368 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
4369
4370
4371
4372
4373
4374
4375
4376 hr_reg_write(context, QPC_TST, to_hr_qp_type(ibqp->qp_type));
4377
4378 hr_reg_write(context, QPC_PD, get_pdn(ibqp->pd));
4379
4380 hr_reg_write(context, QPC_RQWS, ilog2(hr_qp->rq.max_gs));
4381
4382 set_qpc_wqe_cnt(hr_qp, context, qpc_mask);
4383
4384
4385 hr_reg_write(context, QPC_VLAN_ID, 0xfff);
4386
4387 if (ibqp->qp_type == IB_QPT_XRC_TGT) {
4388 context->qkey_xrcd = cpu_to_le32(hr_qp->xrcdn);
4389
4390 hr_reg_enable(context, QPC_XRC_QP_TYPE);
4391 }
4392
4393 if (hr_qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB)
4394 hr_reg_enable(context, QPC_RQ_RECORD_EN);
4395
4396 if (hr_qp->en_flags & HNS_ROCE_QP_CAP_OWNER_DB)
4397 hr_reg_enable(context, QPC_OWNER_MODE);
4398
4399 hr_reg_write(context, QPC_RQ_DB_RECORD_ADDR_L,
4400 lower_32_bits(hr_qp->rdb.dma) >> 1);
4401 hr_reg_write(context, QPC_RQ_DB_RECORD_ADDR_H,
4402 upper_32_bits(hr_qp->rdb.dma));
4403
4404 if (ibqp->qp_type != IB_QPT_UD && ibqp->qp_type != IB_QPT_GSI)
4405 hr_reg_write_bool(context, QPC_RQIE,
4406 hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE);
4407
4408 hr_reg_write(context, QPC_RX_CQN, get_cqn(ibqp->recv_cq));
4409
4410 if (ibqp->srq) {
4411 hr_reg_enable(context, QPC_SRQ_EN);
4412 hr_reg_write(context, QPC_SRQN, to_hr_srq(ibqp->srq)->srqn);
4413 }
4414
4415 hr_reg_enable(context, QPC_FRE);
4416
4417 hr_reg_write(context, QPC_TX_CQN, get_cqn(ibqp->send_cq));
4418
4419 if (hr_dev->caps.qpc_sz < HNS_ROCE_V3_QPC_SZ)
4420 return;
4421
4422 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_STASH)
4423 hr_reg_enable(&context->ext, QPCEX_STASH);
4424}
4425
4426static void modify_qp_init_to_init(struct ib_qp *ibqp,
4427 const struct ib_qp_attr *attr, int attr_mask,
4428 struct hns_roce_v2_qp_context *context,
4429 struct hns_roce_v2_qp_context *qpc_mask)
4430{
4431
4432
4433
4434
4435
4436
4437 hr_reg_write(context, QPC_TST, to_hr_qp_type(ibqp->qp_type));
4438 hr_reg_clear(qpc_mask, QPC_TST);
4439
4440 hr_reg_write(context, QPC_PD, get_pdn(ibqp->pd));
4441 hr_reg_clear(qpc_mask, QPC_PD);
4442
4443 hr_reg_write(context, QPC_RX_CQN, get_cqn(ibqp->recv_cq));
4444 hr_reg_clear(qpc_mask, QPC_RX_CQN);
4445
4446 hr_reg_write(context, QPC_TX_CQN, get_cqn(ibqp->send_cq));
4447 hr_reg_clear(qpc_mask, QPC_TX_CQN);
4448
4449 if (ibqp->srq) {
4450 hr_reg_enable(context, QPC_SRQ_EN);
4451 hr_reg_clear(qpc_mask, QPC_SRQ_EN);
4452 hr_reg_write(context, QPC_SRQN, to_hr_srq(ibqp->srq)->srqn);
4453 hr_reg_clear(qpc_mask, QPC_SRQN);
4454 }
4455}
4456
4457static int config_qp_rq_buf(struct hns_roce_dev *hr_dev,
4458 struct hns_roce_qp *hr_qp,
4459 struct hns_roce_v2_qp_context *context,
4460 struct hns_roce_v2_qp_context *qpc_mask)
4461{
4462 u64 mtts[MTT_MIN_COUNT] = { 0 };
4463 u64 wqe_sge_ba;
4464 int count;
4465
4466
4467 count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, hr_qp->rq.offset, mtts,
4468 MTT_MIN_COUNT, &wqe_sge_ba);
4469 if (hr_qp->rq.wqe_cnt && count < 1) {
4470 ibdev_err(&hr_dev->ib_dev,
4471 "failed to find RQ WQE, QPN = 0x%lx.\n", hr_qp->qpn);
4472 return -EINVAL;
4473 }
4474
4475 context->wqe_sge_ba = cpu_to_le32(wqe_sge_ba >> 3);
4476 qpc_mask->wqe_sge_ba = 0;
4477
4478
4479
4480
4481
4482
4483
4484 hr_reg_write(context, QPC_WQE_SGE_BA_H, wqe_sge_ba >> (32 + 3));
4485 hr_reg_clear(qpc_mask, QPC_WQE_SGE_BA_H);
4486
4487 hr_reg_write(context, QPC_SQ_HOP_NUM,
4488 to_hr_hem_hopnum(hr_dev->caps.wqe_sq_hop_num,
4489 hr_qp->sq.wqe_cnt));
4490 hr_reg_clear(qpc_mask, QPC_SQ_HOP_NUM);
4491
4492 hr_reg_write(context, QPC_SGE_HOP_NUM,
4493 to_hr_hem_hopnum(hr_dev->caps.wqe_sge_hop_num,
4494 hr_qp->sge.sge_cnt));
4495 hr_reg_clear(qpc_mask, QPC_SGE_HOP_NUM);
4496
4497 hr_reg_write(context, QPC_RQ_HOP_NUM,
4498 to_hr_hem_hopnum(hr_dev->caps.wqe_rq_hop_num,
4499 hr_qp->rq.wqe_cnt));
4500
4501 hr_reg_clear(qpc_mask, QPC_RQ_HOP_NUM);
4502
4503 hr_reg_write(context, QPC_WQE_SGE_BA_PG_SZ,
4504 to_hr_hw_page_shift(hr_qp->mtr.hem_cfg.ba_pg_shift));
4505 hr_reg_clear(qpc_mask, QPC_WQE_SGE_BA_PG_SZ);
4506
4507 hr_reg_write(context, QPC_WQE_SGE_BUF_PG_SZ,
4508 to_hr_hw_page_shift(hr_qp->mtr.hem_cfg.buf_pg_shift));
4509 hr_reg_clear(qpc_mask, QPC_WQE_SGE_BUF_PG_SZ);
4510
4511 context->rq_cur_blk_addr = cpu_to_le32(to_hr_hw_page_addr(mtts[0]));
4512 qpc_mask->rq_cur_blk_addr = 0;
4513
4514 hr_reg_write(context, QPC_RQ_CUR_BLK_ADDR_H,
4515 upper_32_bits(to_hr_hw_page_addr(mtts[0])));
4516 hr_reg_clear(qpc_mask, QPC_RQ_CUR_BLK_ADDR_H);
4517
4518 context->rq_nxt_blk_addr = cpu_to_le32(to_hr_hw_page_addr(mtts[1]));
4519 qpc_mask->rq_nxt_blk_addr = 0;
4520
4521 hr_reg_write(context, QPC_RQ_NXT_BLK_ADDR_H,
4522 upper_32_bits(to_hr_hw_page_addr(mtts[1])));
4523 hr_reg_clear(qpc_mask, QPC_RQ_NXT_BLK_ADDR_H);
4524
4525 return 0;
4526}
4527
4528static int config_qp_sq_buf(struct hns_roce_dev *hr_dev,
4529 struct hns_roce_qp *hr_qp,
4530 struct hns_roce_v2_qp_context *context,
4531 struct hns_roce_v2_qp_context *qpc_mask)
4532{
4533 struct ib_device *ibdev = &hr_dev->ib_dev;
4534 u64 sge_cur_blk = 0;
4535 u64 sq_cur_blk = 0;
4536 int count;
4537
4538
4539 count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, 0, &sq_cur_blk, 1, NULL);
4540 if (count < 1) {
4541 ibdev_err(ibdev, "failed to find QP(0x%lx) SQ buf.\n",
4542 hr_qp->qpn);
4543 return -EINVAL;
4544 }
4545 if (hr_qp->sge.sge_cnt > 0) {
4546 count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr,
4547 hr_qp->sge.offset,
4548 &sge_cur_blk, 1, NULL);
4549 if (count < 1) {
4550 ibdev_err(ibdev, "failed to find QP(0x%lx) SGE buf.\n",
4551 hr_qp->qpn);
4552 return -EINVAL;
4553 }
4554 }
4555
4556
4557
4558
4559
4560
4561
4562 hr_reg_write(context, QPC_SQ_CUR_BLK_ADDR_L,
4563 lower_32_bits(to_hr_hw_page_addr(sq_cur_blk)));
4564 hr_reg_write(context, QPC_SQ_CUR_BLK_ADDR_H,
4565 upper_32_bits(to_hr_hw_page_addr(sq_cur_blk)));
4566 hr_reg_clear(qpc_mask, QPC_SQ_CUR_BLK_ADDR_L);
4567 hr_reg_clear(qpc_mask, QPC_SQ_CUR_BLK_ADDR_H);
4568
4569 hr_reg_write(context, QPC_SQ_CUR_SGE_BLK_ADDR_L,
4570 lower_32_bits(to_hr_hw_page_addr(sge_cur_blk)));
4571 hr_reg_write(context, QPC_SQ_CUR_SGE_BLK_ADDR_H,
4572 upper_32_bits(to_hr_hw_page_addr(sge_cur_blk)));
4573 hr_reg_clear(qpc_mask, QPC_SQ_CUR_SGE_BLK_ADDR_L);
4574 hr_reg_clear(qpc_mask, QPC_SQ_CUR_SGE_BLK_ADDR_H);
4575
4576 hr_reg_write(context, QPC_RX_SQ_CUR_BLK_ADDR_L,
4577 lower_32_bits(to_hr_hw_page_addr(sq_cur_blk)));
4578 hr_reg_write(context, QPC_RX_SQ_CUR_BLK_ADDR_H,
4579 upper_32_bits(to_hr_hw_page_addr(sq_cur_blk)));
4580 hr_reg_clear(qpc_mask, QPC_RX_SQ_CUR_BLK_ADDR_L);
4581 hr_reg_clear(qpc_mask, QPC_RX_SQ_CUR_BLK_ADDR_H);
4582
4583 return 0;
4584}
4585
4586static inline enum ib_mtu get_mtu(struct ib_qp *ibqp,
4587 const struct ib_qp_attr *attr)
4588{
4589 if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_UD)
4590 return IB_MTU_4096;
4591
4592 return attr->path_mtu;
4593}
4594
4595static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
4596 const struct ib_qp_attr *attr, int attr_mask,
4597 struct hns_roce_v2_qp_context *context,
4598 struct hns_roce_v2_qp_context *qpc_mask)
4599{
4600 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4601 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
4602 struct ib_device *ibdev = &hr_dev->ib_dev;
4603 dma_addr_t trrl_ba;
4604 dma_addr_t irrl_ba;
4605 enum ib_mtu ib_mtu;
4606 const u8 *smac;
4607 u8 lp_pktn_ini;
4608 u64 *mtts;
4609 u8 *dmac;
4610 u32 port;
4611 int mtu;
4612 int ret;
4613
4614 ret = config_qp_rq_buf(hr_dev, hr_qp, context, qpc_mask);
4615 if (ret) {
4616 ibdev_err(ibdev, "failed to config rq buf, ret = %d.\n", ret);
4617 return ret;
4618 }
4619
4620
4621 mtts = hns_roce_table_find(hr_dev, &hr_dev->qp_table.irrl_table,
4622 hr_qp->qpn, &irrl_ba);
4623 if (!mtts) {
4624 ibdev_err(ibdev, "failed to find qp irrl_table.\n");
4625 return -EINVAL;
4626 }
4627
4628
4629 mtts = hns_roce_table_find(hr_dev, &hr_dev->qp_table.trrl_table,
4630 hr_qp->qpn, &trrl_ba);
4631 if (!mtts) {
4632 ibdev_err(ibdev, "failed to find qp trrl_table.\n");
4633 return -EINVAL;
4634 }
4635
4636 if (attr_mask & IB_QP_ALT_PATH) {
4637 ibdev_err(ibdev, "INIT2RTR attr_mask (0x%x) error.\n",
4638 attr_mask);
4639 return -EINVAL;
4640 }
4641
4642 hr_reg_write(context, QPC_TRRL_BA_L, trrl_ba >> 4);
4643 hr_reg_clear(qpc_mask, QPC_TRRL_BA_L);
4644 context->trrl_ba = cpu_to_le32(trrl_ba >> (16 + 4));
4645 qpc_mask->trrl_ba = 0;
4646 hr_reg_write(context, QPC_TRRL_BA_H, trrl_ba >> (32 + 16 + 4));
4647 hr_reg_clear(qpc_mask, QPC_TRRL_BA_H);
4648
4649 context->irrl_ba = cpu_to_le32(irrl_ba >> 6);
4650 qpc_mask->irrl_ba = 0;
4651 hr_reg_write(context, QPC_IRRL_BA_H, irrl_ba >> (32 + 6));
4652 hr_reg_clear(qpc_mask, QPC_IRRL_BA_H);
4653
4654 hr_reg_enable(context, QPC_RMT_E2E);
4655 hr_reg_clear(qpc_mask, QPC_RMT_E2E);
4656
4657 hr_reg_write(context, QPC_SIG_TYPE, hr_qp->sq_signal_bits);
4658 hr_reg_clear(qpc_mask, QPC_SIG_TYPE);
4659
4660 port = (attr_mask & IB_QP_PORT) ? (attr->port_num - 1) : hr_qp->port;
4661
4662 smac = (const u8 *)hr_dev->dev_addr[port];
4663 dmac = (u8 *)attr->ah_attr.roce.dmac;
4664
4665 if (ether_addr_equal_unaligned(dmac, smac) ||
4666 hr_dev->loop_idc == 0x1) {
4667 hr_reg_write(context, QPC_LBI, hr_dev->loop_idc);
4668 hr_reg_clear(qpc_mask, QPC_LBI);
4669 }
4670
4671 if (attr_mask & IB_QP_DEST_QPN) {
4672 hr_reg_write(context, QPC_DQPN, attr->dest_qp_num);
4673 hr_reg_clear(qpc_mask, QPC_DQPN);
4674 }
4675
4676 memcpy(&(context->dmac), dmac, sizeof(u32));
4677 hr_reg_write(context, QPC_DMAC_H, *((u16 *)(&dmac[4])));
4678 qpc_mask->dmac = 0;
4679 hr_reg_clear(qpc_mask, QPC_DMAC_H);
4680
4681 ib_mtu = get_mtu(ibqp, attr);
4682 hr_qp->path_mtu = ib_mtu;
4683
4684 mtu = ib_mtu_enum_to_int(ib_mtu);
4685 if (WARN_ON(mtu <= 0))
4686 return -EINVAL;
4687#define MAX_LP_MSG_LEN 16384
4688
4689 lp_pktn_ini = ilog2(MAX_LP_MSG_LEN / mtu);
4690 if (WARN_ON(lp_pktn_ini >= 0xF))
4691 return -EINVAL;
4692
4693 if (attr_mask & IB_QP_PATH_MTU) {
4694 hr_reg_write(context, QPC_MTU, ib_mtu);
4695 hr_reg_clear(qpc_mask, QPC_MTU);
4696 }
4697
4698 hr_reg_write(context, QPC_LP_PKTN_INI, lp_pktn_ini);
4699 hr_reg_clear(qpc_mask, QPC_LP_PKTN_INI);
4700
4701
4702 hr_reg_write(context, QPC_ACK_REQ_FREQ, lp_pktn_ini);
4703 hr_reg_clear(qpc_mask, QPC_ACK_REQ_FREQ);
4704
4705 hr_reg_clear(qpc_mask, QPC_RX_REQ_PSN_ERR);
4706 hr_reg_clear(qpc_mask, QPC_RX_REQ_MSN);
4707 hr_reg_clear(qpc_mask, QPC_RX_REQ_LAST_OPTYPE);
4708
4709 context->rq_rnr_timer = 0;
4710 qpc_mask->rq_rnr_timer = 0;
4711
4712 hr_reg_clear(qpc_mask, QPC_TRRL_HEAD_MAX);
4713 hr_reg_clear(qpc_mask, QPC_TRRL_TAIL_MAX);
4714
4715
4716 hr_reg_write(context, QPC_LP_SGEN_INI, 3);
4717 hr_reg_clear(qpc_mask, QPC_LP_SGEN_INI);
4718
4719 return 0;
4720}
4721
4722static int modify_qp_rtr_to_rts(struct ib_qp *ibqp,
4723 const struct ib_qp_attr *attr, int attr_mask,
4724 struct hns_roce_v2_qp_context *context,
4725 struct hns_roce_v2_qp_context *qpc_mask)
4726{
4727 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4728 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
4729 struct ib_device *ibdev = &hr_dev->ib_dev;
4730 int ret;
4731
4732
4733 if (attr_mask & (IB_QP_ALT_PATH | IB_QP_PATH_MIG_STATE)) {
4734 ibdev_err(ibdev, "RTR2RTS attr_mask (0x%x)error\n", attr_mask);
4735 return -EINVAL;
4736 }
4737
4738 ret = config_qp_sq_buf(hr_dev, hr_qp, context, qpc_mask);
4739 if (ret) {
4740 ibdev_err(ibdev, "failed to config sq buf, ret = %d.\n", ret);
4741 return ret;
4742 }
4743
4744
4745
4746
4747
4748
4749 hr_reg_clear(qpc_mask, QPC_IRRL_SGE_IDX);
4750
4751 hr_reg_clear(qpc_mask, QPC_RX_ACK_MSN);
4752
4753 hr_reg_clear(qpc_mask, QPC_ACK_LAST_OPTYPE);
4754 hr_reg_clear(qpc_mask, QPC_IRRL_PSN_VLD);
4755 hr_reg_clear(qpc_mask, QPC_IRRL_PSN);
4756
4757 hr_reg_clear(qpc_mask, QPC_IRRL_TAIL_REAL);
4758
4759 hr_reg_clear(qpc_mask, QPC_RETRY_MSG_MSN);
4760
4761 hr_reg_clear(qpc_mask, QPC_RNR_RETRY_FLAG);
4762
4763 hr_reg_clear(qpc_mask, QPC_CHECK_FLG);
4764
4765 hr_reg_clear(qpc_mask, QPC_V2_IRRL_HEAD);
4766
4767 return 0;
4768}
4769
4770static int get_dip_ctx_idx(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
4771 u32 *dip_idx)
4772{
4773 const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
4774 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4775 u32 *spare_idx = hr_dev->qp_table.idx_table.spare_idx;
4776 u32 *head = &hr_dev->qp_table.idx_table.head;
4777 u32 *tail = &hr_dev->qp_table.idx_table.tail;
4778 struct hns_roce_dip *hr_dip;
4779 unsigned long flags;
4780 int ret = 0;
4781
4782 spin_lock_irqsave(&hr_dev->dip_list_lock, flags);
4783
4784 spare_idx[*tail] = ibqp->qp_num;
4785 *tail = (*tail == hr_dev->caps.num_qps - 1) ? 0 : (*tail + 1);
4786
4787 list_for_each_entry(hr_dip, &hr_dev->dip_list, node) {
4788 if (!memcmp(grh->dgid.raw, hr_dip->dgid, 16)) {
4789 *dip_idx = hr_dip->dip_idx;
4790 goto out;
4791 }
4792 }
4793
4794
4795
4796
4797 hr_dip = kzalloc(sizeof(*hr_dip), GFP_ATOMIC);
4798 if (!hr_dip) {
4799 ret = -ENOMEM;
4800 goto out;
4801 }
4802
4803 memcpy(hr_dip->dgid, grh->dgid.raw, sizeof(grh->dgid.raw));
4804 hr_dip->dip_idx = *dip_idx = spare_idx[*head];
4805 *head = (*head == hr_dev->caps.num_qps - 1) ? 0 : (*head + 1);
4806 list_add_tail(&hr_dip->node, &hr_dev->dip_list);
4807
4808out:
4809 spin_unlock_irqrestore(&hr_dev->dip_list_lock, flags);
4810 return ret;
4811}
4812
4813enum {
4814 CONG_DCQCN,
4815 CONG_WINDOW,
4816};
4817
4818enum {
4819 UNSUPPORT_CONG_LEVEL,
4820 SUPPORT_CONG_LEVEL,
4821};
4822
4823enum {
4824 CONG_LDCP,
4825 CONG_HC3,
4826};
4827
4828enum {
4829 DIP_INVALID,
4830 DIP_VALID,
4831};
4832
4833enum {
4834 WND_LIMIT,
4835 WND_UNLIMIT,
4836};
4837
4838static int check_cong_type(struct ib_qp *ibqp,
4839 struct hns_roce_congestion_algorithm *cong_alg)
4840{
4841 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4842
4843
4844 switch (hr_dev->caps.cong_type) {
4845 case CONG_TYPE_DCQCN:
4846 cong_alg->alg_sel = CONG_DCQCN;
4847 cong_alg->alg_sub_sel = UNSUPPORT_CONG_LEVEL;
4848 cong_alg->dip_vld = DIP_INVALID;
4849 cong_alg->wnd_mode_sel = WND_LIMIT;
4850 break;
4851 case CONG_TYPE_LDCP:
4852 cong_alg->alg_sel = CONG_WINDOW;
4853 cong_alg->alg_sub_sel = CONG_LDCP;
4854 cong_alg->dip_vld = DIP_INVALID;
4855 cong_alg->wnd_mode_sel = WND_UNLIMIT;
4856 break;
4857 case CONG_TYPE_HC3:
4858 cong_alg->alg_sel = CONG_WINDOW;
4859 cong_alg->alg_sub_sel = CONG_HC3;
4860 cong_alg->dip_vld = DIP_INVALID;
4861 cong_alg->wnd_mode_sel = WND_LIMIT;
4862 break;
4863 case CONG_TYPE_DIP:
4864 cong_alg->alg_sel = CONG_DCQCN;
4865 cong_alg->alg_sub_sel = UNSUPPORT_CONG_LEVEL;
4866 cong_alg->dip_vld = DIP_VALID;
4867 cong_alg->wnd_mode_sel = WND_LIMIT;
4868 break;
4869 default:
4870 ibdev_err(&hr_dev->ib_dev,
4871 "error type(%u) for congestion selection.\n",
4872 hr_dev->caps.cong_type);
4873 return -EINVAL;
4874 }
4875
4876 return 0;
4877}
4878
4879static int fill_cong_field(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
4880 struct hns_roce_v2_qp_context *context,
4881 struct hns_roce_v2_qp_context *qpc_mask)
4882{
4883 const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
4884 struct hns_roce_congestion_algorithm cong_field;
4885 struct ib_device *ibdev = ibqp->device;
4886 struct hns_roce_dev *hr_dev = to_hr_dev(ibdev);
4887 u32 dip_idx = 0;
4888 int ret;
4889
4890 if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08 ||
4891 grh->sgid_attr->gid_type == IB_GID_TYPE_ROCE)
4892 return 0;
4893
4894 ret = check_cong_type(ibqp, &cong_field);
4895 if (ret)
4896 return ret;
4897
4898 hr_reg_write(context, QPC_CONG_ALGO_TMPL_ID, hr_dev->cong_algo_tmpl_id +
4899 hr_dev->caps.cong_type * HNS_ROCE_CONG_SIZE);
4900 hr_reg_clear(qpc_mask, QPC_CONG_ALGO_TMPL_ID);
4901 hr_reg_write(&context->ext, QPCEX_CONG_ALG_SEL, cong_field.alg_sel);
4902 hr_reg_clear(&qpc_mask->ext, QPCEX_CONG_ALG_SEL);
4903 hr_reg_write(&context->ext, QPCEX_CONG_ALG_SUB_SEL,
4904 cong_field.alg_sub_sel);
4905 hr_reg_clear(&qpc_mask->ext, QPCEX_CONG_ALG_SUB_SEL);
4906 hr_reg_write(&context->ext, QPCEX_DIP_CTX_IDX_VLD, cong_field.dip_vld);
4907 hr_reg_clear(&qpc_mask->ext, QPCEX_DIP_CTX_IDX_VLD);
4908 hr_reg_write(&context->ext, QPCEX_SQ_RQ_NOT_FORBID_EN,
4909 cong_field.wnd_mode_sel);
4910 hr_reg_clear(&qpc_mask->ext, QPCEX_SQ_RQ_NOT_FORBID_EN);
4911
4912
4913 if (cong_field.dip_vld == 0)
4914 return 0;
4915
4916 ret = get_dip_ctx_idx(ibqp, attr, &dip_idx);
4917 if (ret) {
4918 ibdev_err(ibdev, "failed to fill cong field, ret = %d.\n", ret);
4919 return ret;
4920 }
4921
4922 hr_reg_write(&context->ext, QPCEX_DIP_CTX_IDX, dip_idx);
4923 hr_reg_write(&qpc_mask->ext, QPCEX_DIP_CTX_IDX, 0);
4924
4925 return 0;
4926}
4927
4928static int hns_roce_v2_set_path(struct ib_qp *ibqp,
4929 const struct ib_qp_attr *attr,
4930 int attr_mask,
4931 struct hns_roce_v2_qp_context *context,
4932 struct hns_roce_v2_qp_context *qpc_mask)
4933{
4934 const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
4935 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4936 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
4937 struct ib_device *ibdev = &hr_dev->ib_dev;
4938 const struct ib_gid_attr *gid_attr = NULL;
4939 int is_roce_protocol;
4940 u16 vlan_id = 0xffff;
4941 bool is_udp = false;
4942 u8 ib_port;
4943 u8 hr_port;
4944 int ret;
4945
4946
4947
4948
4949
4950
4951 if (hr_qp->free_mr_en) {
4952 hr_reg_write(context, QPC_SL, rdma_ah_get_sl(&attr->ah_attr));
4953 hr_reg_clear(qpc_mask, QPC_SL);
4954 hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
4955 return 0;
4956 }
4957
4958 ib_port = (attr_mask & IB_QP_PORT) ? attr->port_num : hr_qp->port + 1;
4959 hr_port = ib_port - 1;
4960 is_roce_protocol = rdma_cap_eth_ah(&hr_dev->ib_dev, ib_port) &&
4961 rdma_ah_get_ah_flags(&attr->ah_attr) & IB_AH_GRH;
4962
4963 if (is_roce_protocol) {
4964 gid_attr = attr->ah_attr.grh.sgid_attr;
4965 ret = rdma_read_gid_l2_fields(gid_attr, &vlan_id, NULL);
4966 if (ret)
4967 return ret;
4968
4969 if (gid_attr)
4970 is_udp = (gid_attr->gid_type ==
4971 IB_GID_TYPE_ROCE_UDP_ENCAP);
4972 }
4973
4974
4975 if (vlan_id < VLAN_N_VID &&
4976 hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) {
4977 hr_reg_enable(context, QPC_RQ_VLAN_EN);
4978 hr_reg_clear(qpc_mask, QPC_RQ_VLAN_EN);
4979 hr_reg_enable(context, QPC_SQ_VLAN_EN);
4980 hr_reg_clear(qpc_mask, QPC_SQ_VLAN_EN);
4981 }
4982
4983 hr_reg_write(context, QPC_VLAN_ID, vlan_id);
4984 hr_reg_clear(qpc_mask, QPC_VLAN_ID);
4985
4986 if (grh->sgid_index >= hr_dev->caps.gid_table_len[hr_port]) {
4987 ibdev_err(ibdev, "sgid_index(%u) too large. max is %d\n",
4988 grh->sgid_index, hr_dev->caps.gid_table_len[hr_port]);
4989 return -EINVAL;
4990 }
4991
4992 if (attr->ah_attr.type != RDMA_AH_ATTR_TYPE_ROCE) {
4993 ibdev_err(ibdev, "ah attr is not RDMA roce type\n");
4994 return -EINVAL;
4995 }
4996
4997 hr_reg_write(context, QPC_UDPSPN,
4998 is_udp ? rdma_get_udp_sport(grh->flow_label, ibqp->qp_num,
4999 attr->dest_qp_num) :
5000 0);
5001
5002 hr_reg_clear(qpc_mask, QPC_UDPSPN);
5003
5004 hr_reg_write(context, QPC_GMV_IDX, grh->sgid_index);
5005
5006 hr_reg_clear(qpc_mask, QPC_GMV_IDX);
5007
5008 hr_reg_write(context, QPC_HOPLIMIT, grh->hop_limit);
5009 hr_reg_clear(qpc_mask, QPC_HOPLIMIT);
5010
5011 ret = fill_cong_field(ibqp, attr, context, qpc_mask);
5012 if (ret)
5013 return ret;
5014
5015 hr_reg_write(context, QPC_TC, get_tclass(&attr->ah_attr.grh));
5016 hr_reg_clear(qpc_mask, QPC_TC);
5017
5018 hr_reg_write(context, QPC_FL, grh->flow_label);
5019 hr_reg_clear(qpc_mask, QPC_FL);
5020 memcpy(context->dgid, grh->dgid.raw, sizeof(grh->dgid.raw));
5021 memset(qpc_mask->dgid, 0, sizeof(grh->dgid.raw));
5022
5023 hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
5024 if (unlikely(hr_qp->sl > MAX_SERVICE_LEVEL)) {
5025 ibdev_err(ibdev,
5026 "failed to fill QPC, sl (%u) shouldn't be larger than %d.\n",
5027 hr_qp->sl, MAX_SERVICE_LEVEL);
5028 return -EINVAL;
5029 }
5030
5031 hr_reg_write(context, QPC_SL, hr_qp->sl);
5032 hr_reg_clear(qpc_mask, QPC_SL);
5033
5034 return 0;
5035}
5036
5037static bool check_qp_state(enum ib_qp_state cur_state,
5038 enum ib_qp_state new_state)
5039{
5040 static const bool sm[][IB_QPS_ERR + 1] = {
5041 [IB_QPS_RESET] = { [IB_QPS_RESET] = true,
5042 [IB_QPS_INIT] = true },
5043 [IB_QPS_INIT] = { [IB_QPS_RESET] = true,
5044 [IB_QPS_INIT] = true,
5045 [IB_QPS_RTR] = true,
5046 [IB_QPS_ERR] = true },
5047 [IB_QPS_RTR] = { [IB_QPS_RESET] = true,
5048 [IB_QPS_RTS] = true,
5049 [IB_QPS_ERR] = true },
5050 [IB_QPS_RTS] = { [IB_QPS_RESET] = true,
5051 [IB_QPS_RTS] = true,
5052 [IB_QPS_ERR] = true },
5053 [IB_QPS_SQD] = {},
5054 [IB_QPS_SQE] = {},
5055 [IB_QPS_ERR] = { [IB_QPS_RESET] = true,
5056 [IB_QPS_ERR] = true }
5057 };
5058
5059 return sm[cur_state][new_state];
5060}
5061
5062static int hns_roce_v2_set_abs_fields(struct ib_qp *ibqp,
5063 const struct ib_qp_attr *attr,
5064 int attr_mask,
5065 enum ib_qp_state cur_state,
5066 enum ib_qp_state new_state,
5067 struct hns_roce_v2_qp_context *context,
5068 struct hns_roce_v2_qp_context *qpc_mask)
5069{
5070 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
5071 int ret = 0;
5072
5073 if (!check_qp_state(cur_state, new_state)) {
5074 ibdev_err(&hr_dev->ib_dev, "Illegal state for QP!\n");
5075 return -EINVAL;
5076 }
5077
5078 if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
5079 memset(qpc_mask, 0, hr_dev->caps.qpc_sz);
5080 modify_qp_reset_to_init(ibqp, attr, attr_mask, context,
5081 qpc_mask);
5082 } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) {
5083 modify_qp_init_to_init(ibqp, attr, attr_mask, context,
5084 qpc_mask);
5085 } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
5086 ret = modify_qp_init_to_rtr(ibqp, attr, attr_mask, context,
5087 qpc_mask);
5088 } else if (cur_state == IB_QPS_RTR && new_state == IB_QPS_RTS) {
5089 ret = modify_qp_rtr_to_rts(ibqp, attr, attr_mask, context,
5090 qpc_mask);
5091 }
5092
5093 return ret;
5094}
5095
5096static bool check_qp_timeout_cfg_range(struct hns_roce_dev *hr_dev, u8 *timeout)
5097{
5098#define QP_ACK_TIMEOUT_MAX_HIP08 20
5099#define QP_ACK_TIMEOUT_OFFSET 10
5100#define QP_ACK_TIMEOUT_MAX 31
5101
5102 if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) {
5103 if (*timeout > QP_ACK_TIMEOUT_MAX_HIP08) {
5104 ibdev_warn(&hr_dev->ib_dev,
5105 "Local ACK timeout shall be 0 to 20.\n");
5106 return false;
5107 }
5108 *timeout += QP_ACK_TIMEOUT_OFFSET;
5109 } else if (hr_dev->pci_dev->revision > PCI_REVISION_ID_HIP08) {
5110 if (*timeout > QP_ACK_TIMEOUT_MAX) {
5111 ibdev_warn(&hr_dev->ib_dev,
5112 "Local ACK timeout shall be 0 to 31.\n");
5113 return false;
5114 }
5115 }
5116
5117 return true;
5118}
5119
5120static int hns_roce_v2_set_opt_fields(struct ib_qp *ibqp,
5121 const struct ib_qp_attr *attr,
5122 int attr_mask,
5123 struct hns_roce_v2_qp_context *context,
5124 struct hns_roce_v2_qp_context *qpc_mask)
5125{
5126 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
5127 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
5128 int ret = 0;
5129 u8 timeout;
5130
5131 if (attr_mask & IB_QP_AV) {
5132 ret = hns_roce_v2_set_path(ibqp, attr, attr_mask, context,
5133 qpc_mask);
5134 if (ret)
5135 return ret;
5136 }
5137
5138 if (attr_mask & IB_QP_TIMEOUT) {
5139 timeout = attr->timeout;
5140 if (check_qp_timeout_cfg_range(hr_dev, &timeout)) {
5141 hr_reg_write(context, QPC_AT, timeout);
5142 hr_reg_clear(qpc_mask, QPC_AT);
5143 }
5144 }
5145
5146 if (attr_mask & IB_QP_RETRY_CNT) {
5147 hr_reg_write(context, QPC_RETRY_NUM_INIT, attr->retry_cnt);
5148 hr_reg_clear(qpc_mask, QPC_RETRY_NUM_INIT);
5149
5150 hr_reg_write(context, QPC_RETRY_CNT, attr->retry_cnt);
5151 hr_reg_clear(qpc_mask, QPC_RETRY_CNT);
5152 }
5153
5154 if (attr_mask & IB_QP_RNR_RETRY) {
5155 hr_reg_write(context, QPC_RNR_NUM_INIT, attr->rnr_retry);
5156 hr_reg_clear(qpc_mask, QPC_RNR_NUM_INIT);
5157
5158 hr_reg_write(context, QPC_RNR_CNT, attr->rnr_retry);
5159 hr_reg_clear(qpc_mask, QPC_RNR_CNT);
5160 }
5161
5162 if (attr_mask & IB_QP_SQ_PSN) {
5163 hr_reg_write(context, QPC_SQ_CUR_PSN, attr->sq_psn);
5164 hr_reg_clear(qpc_mask, QPC_SQ_CUR_PSN);
5165
5166 hr_reg_write(context, QPC_SQ_MAX_PSN, attr->sq_psn);
5167 hr_reg_clear(qpc_mask, QPC_SQ_MAX_PSN);
5168
5169 hr_reg_write(context, QPC_RETRY_MSG_PSN_L, attr->sq_psn);
5170 hr_reg_clear(qpc_mask, QPC_RETRY_MSG_PSN_L);
5171
5172 hr_reg_write(context, QPC_RETRY_MSG_PSN_H,
5173 attr->sq_psn >> RETRY_MSG_PSN_SHIFT);
5174 hr_reg_clear(qpc_mask, QPC_RETRY_MSG_PSN_H);
5175
5176 hr_reg_write(context, QPC_RETRY_MSG_FPKT_PSN, attr->sq_psn);
5177 hr_reg_clear(qpc_mask, QPC_RETRY_MSG_FPKT_PSN);
5178
5179 hr_reg_write(context, QPC_RX_ACK_EPSN, attr->sq_psn);
5180 hr_reg_clear(qpc_mask, QPC_RX_ACK_EPSN);
5181 }
5182
5183 if ((attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) &&
5184 attr->max_dest_rd_atomic) {
5185 hr_reg_write(context, QPC_RR_MAX,
5186 fls(attr->max_dest_rd_atomic - 1));
5187 hr_reg_clear(qpc_mask, QPC_RR_MAX);
5188 }
5189
5190 if ((attr_mask & IB_QP_MAX_QP_RD_ATOMIC) && attr->max_rd_atomic) {
5191 hr_reg_write(context, QPC_SR_MAX, fls(attr->max_rd_atomic - 1));
5192 hr_reg_clear(qpc_mask, QPC_SR_MAX);
5193 }
5194
5195 if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC))
5196 set_access_flags(hr_qp, context, qpc_mask, attr, attr_mask);
5197
5198 if (attr_mask & IB_QP_MIN_RNR_TIMER) {
5199 hr_reg_write(context, QPC_MIN_RNR_TIME,
5200 hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08 ?
5201 HNS_ROCE_RNR_TIMER_10NS : attr->min_rnr_timer);
5202 hr_reg_clear(qpc_mask, QPC_MIN_RNR_TIME);
5203 }
5204
5205 if (attr_mask & IB_QP_RQ_PSN) {
5206 hr_reg_write(context, QPC_RX_REQ_EPSN, attr->rq_psn);
5207 hr_reg_clear(qpc_mask, QPC_RX_REQ_EPSN);
5208
5209 hr_reg_write(context, QPC_RAQ_PSN, attr->rq_psn - 1);
5210 hr_reg_clear(qpc_mask, QPC_RAQ_PSN);
5211 }
5212
5213 if (attr_mask & IB_QP_QKEY) {
5214 context->qkey_xrcd = cpu_to_le32(attr->qkey);
5215 qpc_mask->qkey_xrcd = 0;
5216 hr_qp->qkey = attr->qkey;
5217 }
5218
5219 return ret;
5220}
5221
5222static void hns_roce_v2_record_opt_fields(struct ib_qp *ibqp,
5223 const struct ib_qp_attr *attr,
5224 int attr_mask)
5225{
5226 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
5227 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
5228
5229 if (attr_mask & IB_QP_ACCESS_FLAGS)
5230 hr_qp->atomic_rd_en = attr->qp_access_flags;
5231
5232 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
5233 hr_qp->resp_depth = attr->max_dest_rd_atomic;
5234 if (attr_mask & IB_QP_PORT) {
5235 hr_qp->port = attr->port_num - 1;
5236 hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port];
5237 }
5238}
5239
5240static void clear_qp(struct hns_roce_qp *hr_qp)
5241{
5242 struct ib_qp *ibqp = &hr_qp->ibqp;
5243
5244 if (ibqp->send_cq)
5245 hns_roce_v2_cq_clean(to_hr_cq(ibqp->send_cq),
5246 hr_qp->qpn, NULL);
5247
5248 if (ibqp->recv_cq && ibqp->recv_cq != ibqp->send_cq)
5249 hns_roce_v2_cq_clean(to_hr_cq(ibqp->recv_cq),
5250 hr_qp->qpn, ibqp->srq ?
5251 to_hr_srq(ibqp->srq) : NULL);
5252
5253 if (hr_qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB)
5254 *hr_qp->rdb.db_record = 0;
5255
5256 hr_qp->rq.head = 0;
5257 hr_qp->rq.tail = 0;
5258 hr_qp->sq.head = 0;
5259 hr_qp->sq.tail = 0;
5260 hr_qp->next_sge = 0;
5261}
5262
5263static void v2_set_flushed_fields(struct ib_qp *ibqp,
5264 struct hns_roce_v2_qp_context *context,
5265 struct hns_roce_v2_qp_context *qpc_mask)
5266{
5267 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
5268 unsigned long sq_flag = 0;
5269 unsigned long rq_flag = 0;
5270
5271 if (ibqp->qp_type == IB_QPT_XRC_TGT)
5272 return;
5273
5274 spin_lock_irqsave(&hr_qp->sq.lock, sq_flag);
5275 hr_reg_write(context, QPC_SQ_PRODUCER_IDX, hr_qp->sq.head);
5276 hr_reg_clear(qpc_mask, QPC_SQ_PRODUCER_IDX);
5277 hr_qp->state = IB_QPS_ERR;
5278 spin_unlock_irqrestore(&hr_qp->sq.lock, sq_flag);
5279
5280 if (ibqp->srq || ibqp->qp_type == IB_QPT_XRC_INI)
5281 return;
5282
5283 spin_lock_irqsave(&hr_qp->rq.lock, rq_flag);
5284 hr_reg_write(context, QPC_RQ_PRODUCER_IDX, hr_qp->rq.head);
5285 hr_reg_clear(qpc_mask, QPC_RQ_PRODUCER_IDX);
5286 spin_unlock_irqrestore(&hr_qp->rq.lock, rq_flag);
5287}
5288
5289static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
5290 const struct ib_qp_attr *attr,
5291 int attr_mask, enum ib_qp_state cur_state,
5292 enum ib_qp_state new_state)
5293{
5294 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
5295 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
5296 struct hns_roce_v2_qp_context ctx[2];
5297 struct hns_roce_v2_qp_context *context = ctx;
5298 struct hns_roce_v2_qp_context *qpc_mask = ctx + 1;
5299 struct ib_device *ibdev = &hr_dev->ib_dev;
5300 int ret;
5301
5302 if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
5303 return -EOPNOTSUPP;
5304
5305
5306
5307
5308
5309
5310
5311 memset(context, 0, hr_dev->caps.qpc_sz);
5312 memset(qpc_mask, 0xff, hr_dev->caps.qpc_sz);
5313
5314 ret = hns_roce_v2_set_abs_fields(ibqp, attr, attr_mask, cur_state,
5315 new_state, context, qpc_mask);
5316 if (ret)
5317 goto out;
5318
5319
5320 if (new_state == IB_QPS_ERR)
5321 v2_set_flushed_fields(ibqp, context, qpc_mask);
5322
5323
5324 ret = hns_roce_v2_set_opt_fields(ibqp, attr, attr_mask, context,
5325 qpc_mask);
5326 if (ret)
5327 goto out;
5328
5329 hr_reg_write_bool(context, QPC_INV_CREDIT,
5330 to_hr_qp_type(hr_qp->ibqp.qp_type) == SERV_TYPE_XRC ||
5331 ibqp->srq);
5332 hr_reg_clear(qpc_mask, QPC_INV_CREDIT);
5333
5334
5335 hr_reg_write(context, QPC_QP_ST, new_state);
5336 hr_reg_clear(qpc_mask, QPC_QP_ST);
5337
5338
5339 ret = hns_roce_v2_qp_modify(hr_dev, context, qpc_mask, hr_qp);
5340 if (ret) {
5341 ibdev_err(ibdev, "failed to modify QP, ret = %d.\n", ret);
5342 goto out;
5343 }
5344
5345 hr_qp->state = new_state;
5346
5347 hns_roce_v2_record_opt_fields(ibqp, attr, attr_mask);
5348
5349 if (new_state == IB_QPS_RESET && !ibqp->uobject)
5350 clear_qp(hr_qp);
5351
5352out:
5353 return ret;
5354}
5355
5356static int to_ib_qp_st(enum hns_roce_v2_qp_state state)
5357{
5358 static const enum ib_qp_state map[] = {
5359 [HNS_ROCE_QP_ST_RST] = IB_QPS_RESET,
5360 [HNS_ROCE_QP_ST_INIT] = IB_QPS_INIT,
5361 [HNS_ROCE_QP_ST_RTR] = IB_QPS_RTR,
5362 [HNS_ROCE_QP_ST_RTS] = IB_QPS_RTS,
5363 [HNS_ROCE_QP_ST_SQD] = IB_QPS_SQD,
5364 [HNS_ROCE_QP_ST_SQER] = IB_QPS_SQE,
5365 [HNS_ROCE_QP_ST_ERR] = IB_QPS_ERR,
5366 [HNS_ROCE_QP_ST_SQ_DRAINING] = IB_QPS_SQD
5367 };
5368
5369 return (state < ARRAY_SIZE(map)) ? map[state] : -1;
5370}
5371
5372static int hns_roce_v2_query_qpc(struct hns_roce_dev *hr_dev,
5373 struct hns_roce_qp *hr_qp,
5374 struct hns_roce_v2_qp_context *hr_context)
5375{
5376 struct hns_roce_cmd_mailbox *mailbox;
5377 int ret;
5378
5379 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
5380 if (IS_ERR(mailbox))
5381 return PTR_ERR(mailbox);
5382
5383 ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, HNS_ROCE_CMD_QUERY_QPC,
5384 hr_qp->qpn);
5385 if (ret)
5386 goto out;
5387
5388 memcpy(hr_context, mailbox->buf, hr_dev->caps.qpc_sz);
5389
5390out:
5391 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
5392 return ret;
5393}
5394
5395static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
5396 int qp_attr_mask,
5397 struct ib_qp_init_attr *qp_init_attr)
5398{
5399 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
5400 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
5401 struct hns_roce_v2_qp_context context = {};
5402 struct ib_device *ibdev = &hr_dev->ib_dev;
5403 int tmp_qp_state;
5404 int state;
5405 int ret;
5406
5407 memset(qp_attr, 0, sizeof(*qp_attr));
5408 memset(qp_init_attr, 0, sizeof(*qp_init_attr));
5409
5410 mutex_lock(&hr_qp->mutex);
5411
5412 if (hr_qp->state == IB_QPS_RESET) {
5413 qp_attr->qp_state = IB_QPS_RESET;
5414 ret = 0;
5415 goto done;
5416 }
5417
5418 ret = hns_roce_v2_query_qpc(hr_dev, hr_qp, &context);
5419 if (ret) {
5420 ibdev_err(ibdev, "failed to query QPC, ret = %d.\n", ret);
5421 ret = -EINVAL;
5422 goto out;
5423 }
5424
5425 state = hr_reg_read(&context, QPC_QP_ST);
5426 tmp_qp_state = to_ib_qp_st((enum hns_roce_v2_qp_state)state);
5427 if (tmp_qp_state == -1) {
5428 ibdev_err(ibdev, "Illegal ib_qp_state\n");
5429 ret = -EINVAL;
5430 goto out;
5431 }
5432 hr_qp->state = (u8)tmp_qp_state;
5433 qp_attr->qp_state = (enum ib_qp_state)hr_qp->state;
5434 qp_attr->path_mtu = (enum ib_mtu)hr_reg_read(&context, QPC_MTU);
5435 qp_attr->path_mig_state = IB_MIG_ARMED;
5436 qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
5437 if (hr_qp->ibqp.qp_type == IB_QPT_UD)
5438 qp_attr->qkey = le32_to_cpu(context.qkey_xrcd);
5439
5440 qp_attr->rq_psn = hr_reg_read(&context, QPC_RX_REQ_EPSN);
5441 qp_attr->sq_psn = (u32)hr_reg_read(&context, QPC_SQ_CUR_PSN);
5442 qp_attr->dest_qp_num = hr_reg_read(&context, QPC_DQPN);
5443 qp_attr->qp_access_flags =
5444 ((hr_reg_read(&context, QPC_RRE)) << V2_QP_RRE_S) |
5445 ((hr_reg_read(&context, QPC_RWE)) << V2_QP_RWE_S) |
5446 ((hr_reg_read(&context, QPC_ATE)) << V2_QP_ATE_S);
5447
5448 if (hr_qp->ibqp.qp_type == IB_QPT_RC ||
5449 hr_qp->ibqp.qp_type == IB_QPT_XRC_INI ||
5450 hr_qp->ibqp.qp_type == IB_QPT_XRC_TGT) {
5451 struct ib_global_route *grh =
5452 rdma_ah_retrieve_grh(&qp_attr->ah_attr);
5453
5454 rdma_ah_set_sl(&qp_attr->ah_attr,
5455 hr_reg_read(&context, QPC_SL));
5456 grh->flow_label = hr_reg_read(&context, QPC_FL);
5457 grh->sgid_index = hr_reg_read(&context, QPC_GMV_IDX);
5458 grh->hop_limit = hr_reg_read(&context, QPC_HOPLIMIT);
5459 grh->traffic_class = hr_reg_read(&context, QPC_TC);
5460
5461 memcpy(grh->dgid.raw, context.dgid, sizeof(grh->dgid.raw));
5462 }
5463
5464 qp_attr->port_num = hr_qp->port + 1;
5465 qp_attr->sq_draining = 0;
5466 qp_attr->max_rd_atomic = 1 << hr_reg_read(&context, QPC_SR_MAX);
5467 qp_attr->max_dest_rd_atomic = 1 << hr_reg_read(&context, QPC_RR_MAX);
5468
5469 qp_attr->min_rnr_timer = (u8)hr_reg_read(&context, QPC_MIN_RNR_TIME);
5470 qp_attr->timeout = (u8)hr_reg_read(&context, QPC_AT);
5471 qp_attr->retry_cnt = hr_reg_read(&context, QPC_RETRY_NUM_INIT);
5472 qp_attr->rnr_retry = hr_reg_read(&context, QPC_RNR_NUM_INIT);
5473
5474done:
5475 qp_attr->cur_qp_state = qp_attr->qp_state;
5476 qp_attr->cap.max_recv_wr = hr_qp->rq.wqe_cnt;
5477 qp_attr->cap.max_recv_sge = hr_qp->rq.max_gs - hr_qp->rq.rsv_sge;
5478 qp_attr->cap.max_inline_data = hr_qp->max_inline_data;
5479
5480 qp_attr->cap.max_send_wr = hr_qp->sq.wqe_cnt;
5481 qp_attr->cap.max_send_sge = hr_qp->sq.max_gs;
5482
5483 qp_init_attr->qp_context = ibqp->qp_context;
5484 qp_init_attr->qp_type = ibqp->qp_type;
5485 qp_init_attr->recv_cq = ibqp->recv_cq;
5486 qp_init_attr->send_cq = ibqp->send_cq;
5487 qp_init_attr->srq = ibqp->srq;
5488 qp_init_attr->cap = qp_attr->cap;
5489 qp_init_attr->sq_sig_type = hr_qp->sq_signal_bits;
5490
5491out:
5492 mutex_unlock(&hr_qp->mutex);
5493 return ret;
5494}
5495
5496static inline int modify_qp_is_ok(struct hns_roce_qp *hr_qp)
5497{
5498 return ((hr_qp->ibqp.qp_type == IB_QPT_RC ||
5499 hr_qp->ibqp.qp_type == IB_QPT_UD ||
5500 hr_qp->ibqp.qp_type == IB_QPT_XRC_INI ||
5501 hr_qp->ibqp.qp_type == IB_QPT_XRC_TGT) &&
5502 hr_qp->state != IB_QPS_RESET);
5503}
5504
5505static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev,
5506 struct hns_roce_qp *hr_qp,
5507 struct ib_udata *udata)
5508{
5509 struct ib_device *ibdev = &hr_dev->ib_dev;
5510 struct hns_roce_cq *send_cq, *recv_cq;
5511 unsigned long flags;
5512 int ret = 0;
5513
5514 if (modify_qp_is_ok(hr_qp)) {
5515
5516 ret = hns_roce_v2_modify_qp(&hr_qp->ibqp, NULL, 0,
5517 hr_qp->state, IB_QPS_RESET);
5518 if (ret)
5519 ibdev_err(ibdev,
5520 "failed to modify QP to RST, ret = %d.\n",
5521 ret);
5522 }
5523
5524 send_cq = hr_qp->ibqp.send_cq ? to_hr_cq(hr_qp->ibqp.send_cq) : NULL;
5525 recv_cq = hr_qp->ibqp.recv_cq ? to_hr_cq(hr_qp->ibqp.recv_cq) : NULL;
5526
5527 spin_lock_irqsave(&hr_dev->qp_list_lock, flags);
5528 hns_roce_lock_cqs(send_cq, recv_cq);
5529
5530 if (!udata) {
5531 if (recv_cq)
5532 __hns_roce_v2_cq_clean(recv_cq, hr_qp->qpn,
5533 (hr_qp->ibqp.srq ?
5534 to_hr_srq(hr_qp->ibqp.srq) :
5535 NULL));
5536
5537 if (send_cq && send_cq != recv_cq)
5538 __hns_roce_v2_cq_clean(send_cq, hr_qp->qpn, NULL);
5539 }
5540
5541 hns_roce_qp_remove(hr_dev, hr_qp);
5542
5543 hns_roce_unlock_cqs(send_cq, recv_cq);
5544 spin_unlock_irqrestore(&hr_dev->qp_list_lock, flags);
5545
5546 return ret;
5547}
5548
5549static int hns_roce_v2_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
5550{
5551 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
5552 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
5553 int ret;
5554
5555 ret = hns_roce_v2_destroy_qp_common(hr_dev, hr_qp, udata);
5556 if (ret)
5557 ibdev_err(&hr_dev->ib_dev,
5558 "failed to destroy QP, QPN = 0x%06lx, ret = %d.\n",
5559 hr_qp->qpn, ret);
5560
5561 hns_roce_qp_destroy(hr_dev, hr_qp, udata);
5562
5563 return 0;
5564}
5565
5566static int hns_roce_v2_qp_flow_control_init(struct hns_roce_dev *hr_dev,
5567 struct hns_roce_qp *hr_qp)
5568{
5569 struct ib_device *ibdev = &hr_dev->ib_dev;
5570 struct hns_roce_sccc_clr_done *resp;
5571 struct hns_roce_sccc_clr *clr;
5572 struct hns_roce_cmq_desc desc;
5573 int ret, i;
5574
5575 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
5576 return 0;
5577
5578 mutex_lock(&hr_dev->qp_table.scc_mutex);
5579
5580
5581 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_RESET_SCCC, false);
5582 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
5583 if (ret) {
5584 ibdev_err(ibdev, "failed to reset SCC ctx, ret = %d.\n", ret);
5585 goto out;
5586 }
5587
5588
5589 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CLR_SCCC, false);
5590 clr = (struct hns_roce_sccc_clr *)desc.data;
5591 clr->qpn = cpu_to_le32(hr_qp->qpn);
5592 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
5593 if (ret) {
5594 ibdev_err(ibdev, "failed to clear SCC ctx, ret = %d.\n", ret);
5595 goto out;
5596 }
5597
5598
5599 resp = (struct hns_roce_sccc_clr_done *)desc.data;
5600 for (i = 0; i <= HNS_ROCE_CMQ_SCC_CLR_DONE_CNT; i++) {
5601 hns_roce_cmq_setup_basic_desc(&desc,
5602 HNS_ROCE_OPC_QUERY_SCCC, true);
5603 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
5604 if (ret) {
5605 ibdev_err(ibdev, "failed to query clr cmq, ret = %d\n",
5606 ret);
5607 goto out;
5608 }
5609
5610 if (resp->clr_done)
5611 goto out;
5612
5613 msleep(20);
5614 }
5615
5616 ibdev_err(ibdev, "Query SCC clr done flag overtime.\n");
5617 ret = -ETIMEDOUT;
5618
5619out:
5620 mutex_unlock(&hr_dev->qp_table.scc_mutex);
5621 return ret;
5622}
5623
5624#define DMA_IDX_SHIFT 3
5625#define DMA_WQE_SHIFT 3
5626
5627static int hns_roce_v2_write_srqc_index_queue(struct hns_roce_srq *srq,
5628 struct hns_roce_srq_context *ctx)
5629{
5630 struct hns_roce_idx_que *idx_que = &srq->idx_que;
5631 struct ib_device *ibdev = srq->ibsrq.device;
5632 struct hns_roce_dev *hr_dev = to_hr_dev(ibdev);
5633 u64 mtts_idx[MTT_MIN_COUNT] = {};
5634 dma_addr_t dma_handle_idx = 0;
5635 int ret;
5636
5637
5638 ret = hns_roce_mtr_find(hr_dev, &idx_que->mtr, 0, mtts_idx,
5639 ARRAY_SIZE(mtts_idx), &dma_handle_idx);
5640 if (ret < 1) {
5641 ibdev_err(ibdev, "failed to find mtr for SRQ idx, ret = %d.\n",
5642 ret);
5643 return -ENOBUFS;
5644 }
5645
5646 hr_reg_write(ctx, SRQC_IDX_HOP_NUM,
5647 to_hr_hem_hopnum(hr_dev->caps.idx_hop_num, srq->wqe_cnt));
5648
5649 hr_reg_write(ctx, SRQC_IDX_BT_BA_L, dma_handle_idx >> DMA_IDX_SHIFT);
5650 hr_reg_write(ctx, SRQC_IDX_BT_BA_H,
5651 upper_32_bits(dma_handle_idx >> DMA_IDX_SHIFT));
5652
5653 hr_reg_write(ctx, SRQC_IDX_BA_PG_SZ,
5654 to_hr_hw_page_shift(idx_que->mtr.hem_cfg.ba_pg_shift));
5655 hr_reg_write(ctx, SRQC_IDX_BUF_PG_SZ,
5656 to_hr_hw_page_shift(idx_que->mtr.hem_cfg.buf_pg_shift));
5657
5658 hr_reg_write(ctx, SRQC_IDX_CUR_BLK_ADDR_L,
5659 to_hr_hw_page_addr(mtts_idx[0]));
5660 hr_reg_write(ctx, SRQC_IDX_CUR_BLK_ADDR_H,
5661 upper_32_bits(to_hr_hw_page_addr(mtts_idx[0])));
5662
5663 hr_reg_write(ctx, SRQC_IDX_NXT_BLK_ADDR_L,
5664 to_hr_hw_page_addr(mtts_idx[1]));
5665 hr_reg_write(ctx, SRQC_IDX_NXT_BLK_ADDR_H,
5666 upper_32_bits(to_hr_hw_page_addr(mtts_idx[1])));
5667
5668 return 0;
5669}
5670
5671static int hns_roce_v2_write_srqc(struct hns_roce_srq *srq, void *mb_buf)
5672{
5673 struct ib_device *ibdev = srq->ibsrq.device;
5674 struct hns_roce_dev *hr_dev = to_hr_dev(ibdev);
5675 struct hns_roce_srq_context *ctx = mb_buf;
5676 u64 mtts_wqe[MTT_MIN_COUNT] = {};
5677 dma_addr_t dma_handle_wqe = 0;
5678 int ret;
5679
5680 memset(ctx, 0, sizeof(*ctx));
5681
5682
5683 ret = hns_roce_mtr_find(hr_dev, &srq->buf_mtr, 0, mtts_wqe,
5684 ARRAY_SIZE(mtts_wqe), &dma_handle_wqe);
5685 if (ret < 1) {
5686 ibdev_err(ibdev, "failed to find mtr for SRQ WQE, ret = %d.\n",
5687 ret);
5688 return -ENOBUFS;
5689 }
5690
5691 hr_reg_write(ctx, SRQC_SRQ_ST, 1);
5692 hr_reg_write_bool(ctx, SRQC_SRQ_TYPE,
5693 srq->ibsrq.srq_type == IB_SRQT_XRC);
5694 hr_reg_write(ctx, SRQC_PD, to_hr_pd(srq->ibsrq.pd)->pdn);
5695 hr_reg_write(ctx, SRQC_SRQN, srq->srqn);
5696 hr_reg_write(ctx, SRQC_XRCD, srq->xrcdn);
5697 hr_reg_write(ctx, SRQC_XRC_CQN, srq->cqn);
5698 hr_reg_write(ctx, SRQC_SHIFT, ilog2(srq->wqe_cnt));
5699 hr_reg_write(ctx, SRQC_RQWS,
5700 srq->max_gs <= 0 ? 0 : fls(srq->max_gs - 1));
5701
5702 hr_reg_write(ctx, SRQC_WQE_HOP_NUM,
5703 to_hr_hem_hopnum(hr_dev->caps.srqwqe_hop_num,
5704 srq->wqe_cnt));
5705
5706 hr_reg_write(ctx, SRQC_WQE_BT_BA_L, dma_handle_wqe >> DMA_WQE_SHIFT);
5707 hr_reg_write(ctx, SRQC_WQE_BT_BA_H,
5708 upper_32_bits(dma_handle_wqe >> DMA_WQE_SHIFT));
5709
5710 hr_reg_write(ctx, SRQC_WQE_BA_PG_SZ,
5711 to_hr_hw_page_shift(srq->buf_mtr.hem_cfg.ba_pg_shift));
5712 hr_reg_write(ctx, SRQC_WQE_BUF_PG_SZ,
5713 to_hr_hw_page_shift(srq->buf_mtr.hem_cfg.buf_pg_shift));
5714
5715 return hns_roce_v2_write_srqc_index_queue(srq, ctx);
5716}
5717
5718static int hns_roce_v2_modify_srq(struct ib_srq *ibsrq,
5719 struct ib_srq_attr *srq_attr,
5720 enum ib_srq_attr_mask srq_attr_mask,
5721 struct ib_udata *udata)
5722{
5723 struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
5724 struct hns_roce_srq *srq = to_hr_srq(ibsrq);
5725 struct hns_roce_srq_context *srq_context;
5726 struct hns_roce_srq_context *srqc_mask;
5727 struct hns_roce_cmd_mailbox *mailbox;
5728 int ret;
5729
5730
5731 if (srq_attr_mask & IB_SRQ_MAX_WR)
5732 return -EINVAL;
5733
5734 if (srq_attr_mask & IB_SRQ_LIMIT) {
5735 if (srq_attr->srq_limit > srq->wqe_cnt)
5736 return -EINVAL;
5737
5738 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
5739 if (IS_ERR(mailbox))
5740 return PTR_ERR(mailbox);
5741
5742 srq_context = mailbox->buf;
5743 srqc_mask = (struct hns_roce_srq_context *)mailbox->buf + 1;
5744
5745 memset(srqc_mask, 0xff, sizeof(*srqc_mask));
5746
5747 hr_reg_write(srq_context, SRQC_LIMIT_WL, srq_attr->srq_limit);
5748 hr_reg_clear(srqc_mask, SRQC_LIMIT_WL);
5749
5750 ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0,
5751 HNS_ROCE_CMD_MODIFY_SRQC, srq->srqn);
5752 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
5753 if (ret) {
5754 ibdev_err(&hr_dev->ib_dev,
5755 "failed to handle cmd of modifying SRQ, ret = %d.\n",
5756 ret);
5757 return ret;
5758 }
5759 }
5760
5761 return 0;
5762}
5763
5764static int hns_roce_v2_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
5765{
5766 struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
5767 struct hns_roce_srq *srq = to_hr_srq(ibsrq);
5768 struct hns_roce_srq_context *srq_context;
5769 struct hns_roce_cmd_mailbox *mailbox;
5770 int ret;
5771
5772 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
5773 if (IS_ERR(mailbox))
5774 return PTR_ERR(mailbox);
5775
5776 srq_context = mailbox->buf;
5777 ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma,
5778 HNS_ROCE_CMD_QUERY_SRQC, srq->srqn);
5779 if (ret) {
5780 ibdev_err(&hr_dev->ib_dev,
5781 "failed to process cmd of querying SRQ, ret = %d.\n",
5782 ret);
5783 goto out;
5784 }
5785
5786 attr->srq_limit = hr_reg_read(srq_context, SRQC_LIMIT_WL);
5787 attr->max_wr = srq->wqe_cnt;
5788 attr->max_sge = srq->max_gs - srq->rsv_sge;
5789
5790out:
5791 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
5792 return ret;
5793}
5794
5795static int hns_roce_v2_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
5796{
5797 struct hns_roce_dev *hr_dev = to_hr_dev(cq->device);
5798 struct hns_roce_v2_cq_context *cq_context;
5799 struct hns_roce_cq *hr_cq = to_hr_cq(cq);
5800 struct hns_roce_v2_cq_context *cqc_mask;
5801 struct hns_roce_cmd_mailbox *mailbox;
5802 int ret;
5803
5804 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
5805 if (IS_ERR(mailbox))
5806 return PTR_ERR(mailbox);
5807
5808 cq_context = mailbox->buf;
5809 cqc_mask = (struct hns_roce_v2_cq_context *)mailbox->buf + 1;
5810
5811 memset(cqc_mask, 0xff, sizeof(*cqc_mask));
5812
5813 hr_reg_write(cq_context, CQC_CQ_MAX_CNT, cq_count);
5814 hr_reg_clear(cqc_mask, CQC_CQ_MAX_CNT);
5815
5816 if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) {
5817 if (cq_period * HNS_ROCE_CLOCK_ADJUST > USHRT_MAX) {
5818 dev_info(hr_dev->dev,
5819 "cq_period(%u) reached the upper limit, adjusted to 65.\n",
5820 cq_period);
5821 cq_period = HNS_ROCE_MAX_CQ_PERIOD;
5822 }
5823 cq_period *= HNS_ROCE_CLOCK_ADJUST;
5824 }
5825 hr_reg_write(cq_context, CQC_CQ_PERIOD, cq_period);
5826 hr_reg_clear(cqc_mask, CQC_CQ_PERIOD);
5827
5828 ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0,
5829 HNS_ROCE_CMD_MODIFY_CQC, hr_cq->cqn);
5830 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
5831 if (ret)
5832 ibdev_err(&hr_dev->ib_dev,
5833 "failed to process cmd when modifying CQ, ret = %d.\n",
5834 ret);
5835
5836 return ret;
5837}
5838
5839static void hns_roce_irq_work_handle(struct work_struct *work)
5840{
5841 struct hns_roce_work *irq_work =
5842 container_of(work, struct hns_roce_work, work);
5843 struct ib_device *ibdev = &irq_work->hr_dev->ib_dev;
5844
5845 switch (irq_work->event_type) {
5846 case HNS_ROCE_EVENT_TYPE_PATH_MIG:
5847 ibdev_info(ibdev, "Path migrated succeeded.\n");
5848 break;
5849 case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
5850 ibdev_warn(ibdev, "Path migration failed.\n");
5851 break;
5852 case HNS_ROCE_EVENT_TYPE_COMM_EST:
5853 break;
5854 case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
5855 ibdev_warn(ibdev, "Send queue drained.\n");
5856 break;
5857 case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
5858 ibdev_err(ibdev, "Local work queue 0x%x catast error, sub_event type is: %d\n",
5859 irq_work->queue_num, irq_work->sub_type);
5860 break;
5861 case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
5862 ibdev_err(ibdev, "Invalid request local work queue 0x%x error.\n",
5863 irq_work->queue_num);
5864 break;
5865 case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
5866 ibdev_err(ibdev, "Local access violation work queue 0x%x error, sub_event type is: %d\n",
5867 irq_work->queue_num, irq_work->sub_type);
5868 break;
5869 case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
5870 ibdev_warn(ibdev, "SRQ limit reach.\n");
5871 break;
5872 case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
5873 ibdev_warn(ibdev, "SRQ last wqe reach.\n");
5874 break;
5875 case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
5876 ibdev_err(ibdev, "SRQ catas error.\n");
5877 break;
5878 case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
5879 ibdev_err(ibdev, "CQ 0x%x access err.\n", irq_work->queue_num);
5880 break;
5881 case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
5882 ibdev_warn(ibdev, "CQ 0x%x overflow\n", irq_work->queue_num);
5883 break;
5884 case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW:
5885 ibdev_warn(ibdev, "DB overflow.\n");
5886 break;
5887 case HNS_ROCE_EVENT_TYPE_FLR:
5888 ibdev_warn(ibdev, "Function level reset.\n");
5889 break;
5890 case HNS_ROCE_EVENT_TYPE_XRCD_VIOLATION:
5891 ibdev_err(ibdev, "xrc domain violation error.\n");
5892 break;
5893 case HNS_ROCE_EVENT_TYPE_INVALID_XRCETH:
5894 ibdev_err(ibdev, "invalid xrceth error.\n");
5895 break;
5896 default:
5897 break;
5898 }
5899
5900 kfree(irq_work);
5901}
5902
5903static void hns_roce_v2_init_irq_work(struct hns_roce_dev *hr_dev,
5904 struct hns_roce_eq *eq, u32 queue_num)
5905{
5906 struct hns_roce_work *irq_work;
5907
5908 irq_work = kzalloc(sizeof(struct hns_roce_work), GFP_ATOMIC);
5909 if (!irq_work)
5910 return;
5911
5912 INIT_WORK(&(irq_work->work), hns_roce_irq_work_handle);
5913 irq_work->hr_dev = hr_dev;
5914 irq_work->event_type = eq->event_type;
5915 irq_work->sub_type = eq->sub_type;
5916 irq_work->queue_num = queue_num;
5917 queue_work(hr_dev->irq_workq, &(irq_work->work));
5918}
5919
5920static void update_eq_db(struct hns_roce_eq *eq)
5921{
5922 struct hns_roce_dev *hr_dev = eq->hr_dev;
5923 struct hns_roce_v2_db eq_db = {};
5924
5925 if (eq->type_flag == HNS_ROCE_AEQ) {
5926 hr_reg_write(&eq_db, EQ_DB_CMD,
5927 eq->arm_st == HNS_ROCE_V2_EQ_ALWAYS_ARMED ?
5928 HNS_ROCE_EQ_DB_CMD_AEQ :
5929 HNS_ROCE_EQ_DB_CMD_AEQ_ARMED);
5930 } else {
5931 hr_reg_write(&eq_db, EQ_DB_TAG, eq->eqn);
5932
5933 hr_reg_write(&eq_db, EQ_DB_CMD,
5934 eq->arm_st == HNS_ROCE_V2_EQ_ALWAYS_ARMED ?
5935 HNS_ROCE_EQ_DB_CMD_CEQ :
5936 HNS_ROCE_EQ_DB_CMD_CEQ_ARMED);
5937 }
5938
5939 hr_reg_write(&eq_db, EQ_DB_CI, eq->cons_index);
5940
5941 hns_roce_write64(hr_dev, (__le32 *)&eq_db, eq->db_reg);
5942}
5943
5944static struct hns_roce_aeqe *next_aeqe_sw_v2(struct hns_roce_eq *eq)
5945{
5946 struct hns_roce_aeqe *aeqe;
5947
5948 aeqe = hns_roce_buf_offset(eq->mtr.kmem,
5949 (eq->cons_index & (eq->entries - 1)) *
5950 eq->eqe_size);
5951
5952 return (roce_get_bit(aeqe->asyn, HNS_ROCE_V2_AEQ_AEQE_OWNER_S) ^
5953 !!(eq->cons_index & eq->entries)) ? aeqe : NULL;
5954}
5955
5956static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev,
5957 struct hns_roce_eq *eq)
5958{
5959 struct device *dev = hr_dev->dev;
5960 struct hns_roce_aeqe *aeqe = next_aeqe_sw_v2(eq);
5961 int aeqe_found = 0;
5962 int event_type;
5963 u32 queue_num;
5964 int sub_type;
5965
5966 while (aeqe) {
5967
5968
5969
5970 dma_rmb();
5971
5972 event_type = roce_get_field(aeqe->asyn,
5973 HNS_ROCE_V2_AEQE_EVENT_TYPE_M,
5974 HNS_ROCE_V2_AEQE_EVENT_TYPE_S);
5975 sub_type = roce_get_field(aeqe->asyn,
5976 HNS_ROCE_V2_AEQE_SUB_TYPE_M,
5977 HNS_ROCE_V2_AEQE_SUB_TYPE_S);
5978 queue_num = roce_get_field(aeqe->event.queue_event.num,
5979 HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_M,
5980 HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_S);
5981
5982 switch (event_type) {
5983 case HNS_ROCE_EVENT_TYPE_PATH_MIG:
5984 case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
5985 case HNS_ROCE_EVENT_TYPE_COMM_EST:
5986 case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
5987 case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
5988 case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
5989 case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
5990 case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
5991 case HNS_ROCE_EVENT_TYPE_XRCD_VIOLATION:
5992 case HNS_ROCE_EVENT_TYPE_INVALID_XRCETH:
5993 hns_roce_qp_event(hr_dev, queue_num, event_type);
5994 break;
5995 case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
5996 case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
5997 hns_roce_srq_event(hr_dev, queue_num, event_type);
5998 break;
5999 case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
6000 case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
6001 hns_roce_cq_event(hr_dev, queue_num, event_type);
6002 break;
6003 case HNS_ROCE_EVENT_TYPE_MB:
6004 hns_roce_cmd_event(hr_dev,
6005 le16_to_cpu(aeqe->event.cmd.token),
6006 aeqe->event.cmd.status,
6007 le64_to_cpu(aeqe->event.cmd.out_param));
6008 break;
6009 case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW:
6010 case HNS_ROCE_EVENT_TYPE_FLR:
6011 break;
6012 default:
6013 dev_err(dev, "Unhandled event %d on EQ %d at idx %u.\n",
6014 event_type, eq->eqn, eq->cons_index);
6015 break;
6016 }
6017
6018 eq->event_type = event_type;
6019 eq->sub_type = sub_type;
6020 ++eq->cons_index;
6021 aeqe_found = 1;
6022
6023 hns_roce_v2_init_irq_work(hr_dev, eq, queue_num);
6024
6025 aeqe = next_aeqe_sw_v2(eq);
6026 }
6027
6028 update_eq_db(eq);
6029 return aeqe_found;
6030}
6031
6032static struct hns_roce_ceqe *next_ceqe_sw_v2(struct hns_roce_eq *eq)
6033{
6034 struct hns_roce_ceqe *ceqe;
6035
6036 ceqe = hns_roce_buf_offset(eq->mtr.kmem,
6037 (eq->cons_index & (eq->entries - 1)) *
6038 eq->eqe_size);
6039
6040 return (!!(roce_get_bit(ceqe->comp, HNS_ROCE_V2_CEQ_CEQE_OWNER_S))) ^
6041 (!!(eq->cons_index & eq->entries)) ? ceqe : NULL;
6042}
6043
6044static int hns_roce_v2_ceq_int(struct hns_roce_dev *hr_dev,
6045 struct hns_roce_eq *eq)
6046{
6047 struct hns_roce_ceqe *ceqe = next_ceqe_sw_v2(eq);
6048 int ceqe_found = 0;
6049 u32 cqn;
6050
6051 while (ceqe) {
6052
6053
6054
6055 dma_rmb();
6056
6057 cqn = roce_get_field(ceqe->comp, HNS_ROCE_V2_CEQE_COMP_CQN_M,
6058 HNS_ROCE_V2_CEQE_COMP_CQN_S);
6059
6060 hns_roce_cq_completion(hr_dev, cqn);
6061
6062 ++eq->cons_index;
6063 ceqe_found = 1;
6064
6065 ceqe = next_ceqe_sw_v2(eq);
6066 }
6067
6068 update_eq_db(eq);
6069
6070 return ceqe_found;
6071}
6072
6073static irqreturn_t hns_roce_v2_msix_interrupt_eq(int irq, void *eq_ptr)
6074{
6075 struct hns_roce_eq *eq = eq_ptr;
6076 struct hns_roce_dev *hr_dev = eq->hr_dev;
6077 int int_work;
6078
6079 if (eq->type_flag == HNS_ROCE_CEQ)
6080
6081 int_work = hns_roce_v2_ceq_int(hr_dev, eq);
6082 else
6083
6084 int_work = hns_roce_v2_aeq_int(hr_dev, eq);
6085
6086 return IRQ_RETVAL(int_work);
6087}
6088
6089static irqreturn_t hns_roce_v2_msix_interrupt_abn(int irq, void *dev_id)
6090{
6091 struct hns_roce_dev *hr_dev = dev_id;
6092 struct device *dev = hr_dev->dev;
6093 int int_work = 0;
6094 u32 int_st;
6095 u32 int_en;
6096
6097
6098 int_st = roce_read(hr_dev, ROCEE_VF_ABN_INT_ST_REG);
6099 int_en = roce_read(hr_dev, ROCEE_VF_ABN_INT_EN_REG);
6100
6101 if (int_st & BIT(HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S)) {
6102 struct pci_dev *pdev = hr_dev->pci_dev;
6103 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
6104 const struct hnae3_ae_ops *ops = ae_dev->ops;
6105
6106 dev_err(dev, "AEQ overflow!\n");
6107
6108 int_st |= 1 << HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S;
6109 roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, int_st);
6110
6111
6112 if (ops->set_default_reset_request)
6113 ops->set_default_reset_request(ae_dev,
6114 HNAE3_FUNC_RESET);
6115 if (ops->reset_event)
6116 ops->reset_event(pdev, NULL);
6117
6118 int_en |= 1 << HNS_ROCE_V2_VF_ABN_INT_EN_S;
6119 roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en);
6120
6121 int_work = 1;
6122 } else if (int_st & BIT(HNS_ROCE_V2_VF_INT_ST_RAS_INT_S)) {
6123 dev_err(dev, "RAS interrupt!\n");
6124
6125 int_st |= 1 << HNS_ROCE_V2_VF_INT_ST_RAS_INT_S;
6126 roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, int_st);
6127
6128 int_en |= 1 << HNS_ROCE_V2_VF_ABN_INT_EN_S;
6129 roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en);
6130
6131 int_work = 1;
6132 } else {
6133 dev_err(dev, "There is no abnormal irq found!\n");
6134 }
6135
6136 return IRQ_RETVAL(int_work);
6137}
6138
6139static void hns_roce_v2_int_mask_enable(struct hns_roce_dev *hr_dev,
6140 int eq_num, u32 enable_flag)
6141{
6142 int i;
6143
6144 for (i = 0; i < eq_num; i++)
6145 roce_write(hr_dev, ROCEE_VF_EVENT_INT_EN_REG +
6146 i * EQ_REG_OFFSET, enable_flag);
6147
6148 roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, enable_flag);
6149 roce_write(hr_dev, ROCEE_VF_ABN_INT_CFG_REG, enable_flag);
6150}
6151
6152static void hns_roce_v2_destroy_eqc(struct hns_roce_dev *hr_dev, u32 eqn)
6153{
6154 struct device *dev = hr_dev->dev;
6155 int ret;
6156 u8 cmd;
6157
6158 if (eqn < hr_dev->caps.num_comp_vectors)
6159 cmd = HNS_ROCE_CMD_DESTROY_CEQC;
6160 else
6161 cmd = HNS_ROCE_CMD_DESTROY_AEQC;
6162
6163 ret = hns_roce_destroy_hw_ctx(hr_dev, cmd, eqn & HNS_ROCE_V2_EQN_M);
6164 if (ret)
6165 dev_err(dev, "[mailbox cmd] destroy eqc(%u) failed.\n", eqn);
6166}
6167
6168static void free_eq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq)
6169{
6170 hns_roce_mtr_destroy(hr_dev, &eq->mtr);
6171}
6172
6173static void init_eq_config(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq)
6174{
6175 eq->db_reg = hr_dev->reg_base + ROCEE_VF_EQ_DB_CFG0_REG;
6176 eq->cons_index = 0;
6177 eq->over_ignore = HNS_ROCE_V2_EQ_OVER_IGNORE_0;
6178 eq->coalesce = HNS_ROCE_V2_EQ_COALESCE_0;
6179 eq->arm_st = HNS_ROCE_V2_EQ_ALWAYS_ARMED;
6180 eq->shift = ilog2((unsigned int)eq->entries);
6181}
6182
6183static int config_eqc(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq,
6184 void *mb_buf)
6185{
6186 u64 eqe_ba[MTT_MIN_COUNT] = { 0 };
6187 struct hns_roce_eq_context *eqc;
6188 u64 bt_ba = 0;
6189 int count;
6190
6191 eqc = mb_buf;
6192 memset(eqc, 0, sizeof(struct hns_roce_eq_context));
6193
6194 init_eq_config(hr_dev, eq);
6195
6196
6197 count = hns_roce_mtr_find(hr_dev, &eq->mtr, 0, eqe_ba, MTT_MIN_COUNT,
6198 &bt_ba);
6199 if (count < 1) {
6200 dev_err(hr_dev->dev, "failed to find EQE mtr\n");
6201 return -ENOBUFS;
6202 }
6203
6204 hr_reg_write(eqc, EQC_EQ_ST, HNS_ROCE_V2_EQ_STATE_VALID);
6205 hr_reg_write(eqc, EQC_EQE_HOP_NUM, eq->hop_num);
6206 hr_reg_write(eqc, EQC_OVER_IGNORE, eq->over_ignore);
6207 hr_reg_write(eqc, EQC_COALESCE, eq->coalesce);
6208 hr_reg_write(eqc, EQC_ARM_ST, eq->arm_st);
6209 hr_reg_write(eqc, EQC_EQN, eq->eqn);
6210 hr_reg_write(eqc, EQC_EQE_CNT, HNS_ROCE_EQ_INIT_EQE_CNT);
6211 hr_reg_write(eqc, EQC_EQE_BA_PG_SZ,
6212 to_hr_hw_page_shift(eq->mtr.hem_cfg.ba_pg_shift));
6213 hr_reg_write(eqc, EQC_EQE_BUF_PG_SZ,
6214 to_hr_hw_page_shift(eq->mtr.hem_cfg.buf_pg_shift));
6215 hr_reg_write(eqc, EQC_EQ_PROD_INDX, HNS_ROCE_EQ_INIT_PROD_IDX);
6216 hr_reg_write(eqc, EQC_EQ_MAX_CNT, eq->eq_max_cnt);
6217
6218 if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) {
6219 if (eq->eq_period * HNS_ROCE_CLOCK_ADJUST > USHRT_MAX) {
6220 dev_info(hr_dev->dev, "eq_period(%u) reached the upper limit, adjusted to 65.\n",
6221 eq->eq_period);
6222 eq->eq_period = HNS_ROCE_MAX_EQ_PERIOD;
6223 }
6224 eq->eq_period *= HNS_ROCE_CLOCK_ADJUST;
6225 }
6226
6227 hr_reg_write(eqc, EQC_EQ_PERIOD, eq->eq_period);
6228 hr_reg_write(eqc, EQC_EQE_REPORT_TIMER, HNS_ROCE_EQ_INIT_REPORT_TIMER);
6229 hr_reg_write(eqc, EQC_EQE_BA_L, bt_ba >> 3);
6230 hr_reg_write(eqc, EQC_EQE_BA_H, bt_ba >> 35);
6231 hr_reg_write(eqc, EQC_SHIFT, eq->shift);
6232 hr_reg_write(eqc, EQC_MSI_INDX, HNS_ROCE_EQ_INIT_MSI_IDX);
6233 hr_reg_write(eqc, EQC_CUR_EQE_BA_L, eqe_ba[0] >> 12);
6234 hr_reg_write(eqc, EQC_CUR_EQE_BA_M, eqe_ba[0] >> 28);
6235 hr_reg_write(eqc, EQC_CUR_EQE_BA_H, eqe_ba[0] >> 60);
6236 hr_reg_write(eqc, EQC_EQ_CONS_INDX, HNS_ROCE_EQ_INIT_CONS_IDX);
6237 hr_reg_write(eqc, EQC_NEX_EQE_BA_L, eqe_ba[1] >> 12);
6238 hr_reg_write(eqc, EQC_NEX_EQE_BA_H, eqe_ba[1] >> 44);
6239 hr_reg_write(eqc, EQC_EQE_SIZE, eq->eqe_size == HNS_ROCE_V3_EQE_SIZE);
6240
6241 return 0;
6242}
6243
6244static int alloc_eq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq)
6245{
6246 struct hns_roce_buf_attr buf_attr = {};
6247 int err;
6248
6249 if (hr_dev->caps.eqe_hop_num == HNS_ROCE_HOP_NUM_0)
6250 eq->hop_num = 0;
6251 else
6252 eq->hop_num = hr_dev->caps.eqe_hop_num;
6253
6254 buf_attr.page_shift = hr_dev->caps.eqe_buf_pg_sz + PAGE_SHIFT;
6255 buf_attr.region[0].size = eq->entries * eq->eqe_size;
6256 buf_attr.region[0].hopnum = eq->hop_num;
6257 buf_attr.region_count = 1;
6258
6259 err = hns_roce_mtr_create(hr_dev, &eq->mtr, &buf_attr,
6260 hr_dev->caps.eqe_ba_pg_sz + PAGE_SHIFT, NULL,
6261 0);
6262 if (err)
6263 dev_err(hr_dev->dev, "Failed to alloc EQE mtr, err %d\n", err);
6264
6265 return err;
6266}
6267
6268static int hns_roce_v2_create_eq(struct hns_roce_dev *hr_dev,
6269 struct hns_roce_eq *eq, u8 eq_cmd)
6270{
6271 struct hns_roce_cmd_mailbox *mailbox;
6272 int ret;
6273
6274
6275 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
6276 if (IS_ERR(mailbox))
6277 return PTR_ERR(mailbox);
6278
6279 ret = alloc_eq_buf(hr_dev, eq);
6280 if (ret)
6281 goto free_cmd_mbox;
6282
6283 ret = config_eqc(hr_dev, eq, mailbox->buf);
6284 if (ret)
6285 goto err_cmd_mbox;
6286
6287 ret = hns_roce_create_hw_ctx(hr_dev, mailbox, eq_cmd, eq->eqn);
6288 if (ret) {
6289 dev_err(hr_dev->dev, "[mailbox cmd] create eqc failed.\n");
6290 goto err_cmd_mbox;
6291 }
6292
6293 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
6294
6295 return 0;
6296
6297err_cmd_mbox:
6298 free_eq_buf(hr_dev, eq);
6299
6300free_cmd_mbox:
6301 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
6302
6303 return ret;
6304}
6305
6306static int __hns_roce_request_irq(struct hns_roce_dev *hr_dev, int irq_num,
6307 int comp_num, int aeq_num, int other_num)
6308{
6309 struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
6310 int i, j;
6311 int ret;
6312
6313 for (i = 0; i < irq_num; i++) {
6314 hr_dev->irq_names[i] = kzalloc(HNS_ROCE_INT_NAME_LEN,
6315 GFP_KERNEL);
6316 if (!hr_dev->irq_names[i]) {
6317 ret = -ENOMEM;
6318 goto err_kzalloc_failed;
6319 }
6320 }
6321
6322
6323 for (j = 0; j < other_num; j++)
6324 snprintf((char *)hr_dev->irq_names[j], HNS_ROCE_INT_NAME_LEN,
6325 "hns-abn-%d", j);
6326
6327 for (j = other_num; j < (other_num + aeq_num); j++)
6328 snprintf((char *)hr_dev->irq_names[j], HNS_ROCE_INT_NAME_LEN,
6329 "hns-aeq-%d", j - other_num);
6330
6331 for (j = (other_num + aeq_num); j < irq_num; j++)
6332 snprintf((char *)hr_dev->irq_names[j], HNS_ROCE_INT_NAME_LEN,
6333 "hns-ceq-%d", j - other_num - aeq_num);
6334
6335 for (j = 0; j < irq_num; j++) {
6336 if (j < other_num)
6337 ret = request_irq(hr_dev->irq[j],
6338 hns_roce_v2_msix_interrupt_abn,
6339 0, hr_dev->irq_names[j], hr_dev);
6340
6341 else if (j < (other_num + comp_num))
6342 ret = request_irq(eq_table->eq[j - other_num].irq,
6343 hns_roce_v2_msix_interrupt_eq,
6344 0, hr_dev->irq_names[j + aeq_num],
6345 &eq_table->eq[j - other_num]);
6346 else
6347 ret = request_irq(eq_table->eq[j - other_num].irq,
6348 hns_roce_v2_msix_interrupt_eq,
6349 0, hr_dev->irq_names[j - comp_num],
6350 &eq_table->eq[j - other_num]);
6351 if (ret) {
6352 dev_err(hr_dev->dev, "Request irq error!\n");
6353 goto err_request_failed;
6354 }
6355 }
6356
6357 return 0;
6358
6359err_request_failed:
6360 for (j -= 1; j >= 0; j--)
6361 if (j < other_num)
6362 free_irq(hr_dev->irq[j], hr_dev);
6363 else
6364 free_irq(eq_table->eq[j - other_num].irq,
6365 &eq_table->eq[j - other_num]);
6366
6367err_kzalloc_failed:
6368 for (i -= 1; i >= 0; i--)
6369 kfree(hr_dev->irq_names[i]);
6370
6371 return ret;
6372}
6373
6374static void __hns_roce_free_irq(struct hns_roce_dev *hr_dev)
6375{
6376 int irq_num;
6377 int eq_num;
6378 int i;
6379
6380 eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors;
6381 irq_num = eq_num + hr_dev->caps.num_other_vectors;
6382
6383 for (i = 0; i < hr_dev->caps.num_other_vectors; i++)
6384 free_irq(hr_dev->irq[i], hr_dev);
6385
6386 for (i = 0; i < eq_num; i++)
6387 free_irq(hr_dev->eq_table.eq[i].irq, &hr_dev->eq_table.eq[i]);
6388
6389 for (i = 0; i < irq_num; i++)
6390 kfree(hr_dev->irq_names[i]);
6391}
6392
6393static int hns_roce_v2_init_eq_table(struct hns_roce_dev *hr_dev)
6394{
6395 struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
6396 struct device *dev = hr_dev->dev;
6397 struct hns_roce_eq *eq;
6398 int other_num;
6399 int comp_num;
6400 int aeq_num;
6401 int irq_num;
6402 int eq_num;
6403 u8 eq_cmd;
6404 int ret;
6405 int i;
6406
6407 other_num = hr_dev->caps.num_other_vectors;
6408 comp_num = hr_dev->caps.num_comp_vectors;
6409 aeq_num = hr_dev->caps.num_aeq_vectors;
6410
6411 eq_num = comp_num + aeq_num;
6412 irq_num = eq_num + other_num;
6413
6414 eq_table->eq = kcalloc(eq_num, sizeof(*eq_table->eq), GFP_KERNEL);
6415 if (!eq_table->eq)
6416 return -ENOMEM;
6417
6418
6419 for (i = 0; i < eq_num; i++) {
6420 eq = &eq_table->eq[i];
6421 eq->hr_dev = hr_dev;
6422 eq->eqn = i;
6423 if (i < comp_num) {
6424
6425 eq_cmd = HNS_ROCE_CMD_CREATE_CEQC;
6426 eq->type_flag = HNS_ROCE_CEQ;
6427 eq->entries = hr_dev->caps.ceqe_depth;
6428 eq->eqe_size = hr_dev->caps.ceqe_size;
6429 eq->irq = hr_dev->irq[i + other_num + aeq_num];
6430 eq->eq_max_cnt = HNS_ROCE_CEQ_DEFAULT_BURST_NUM;
6431 eq->eq_period = HNS_ROCE_CEQ_DEFAULT_INTERVAL;
6432 } else {
6433
6434 eq_cmd = HNS_ROCE_CMD_CREATE_AEQC;
6435 eq->type_flag = HNS_ROCE_AEQ;
6436 eq->entries = hr_dev->caps.aeqe_depth;
6437 eq->eqe_size = hr_dev->caps.aeqe_size;
6438 eq->irq = hr_dev->irq[i - comp_num + other_num];
6439 eq->eq_max_cnt = HNS_ROCE_AEQ_DEFAULT_BURST_NUM;
6440 eq->eq_period = HNS_ROCE_AEQ_DEFAULT_INTERVAL;
6441 }
6442
6443 ret = hns_roce_v2_create_eq(hr_dev, eq, eq_cmd);
6444 if (ret) {
6445 dev_err(dev, "failed to create eq.\n");
6446 goto err_create_eq_fail;
6447 }
6448 }
6449
6450 hr_dev->irq_workq = alloc_ordered_workqueue("hns_roce_irq_workq", 0);
6451 if (!hr_dev->irq_workq) {
6452 dev_err(dev, "failed to create irq workqueue.\n");
6453 ret = -ENOMEM;
6454 goto err_create_eq_fail;
6455 }
6456
6457 ret = __hns_roce_request_irq(hr_dev, irq_num, comp_num, aeq_num,
6458 other_num);
6459 if (ret) {
6460 dev_err(dev, "failed to request irq.\n");
6461 goto err_request_irq_fail;
6462 }
6463
6464
6465 hns_roce_v2_int_mask_enable(hr_dev, eq_num, EQ_ENABLE);
6466
6467 return 0;
6468
6469err_request_irq_fail:
6470 destroy_workqueue(hr_dev->irq_workq);
6471
6472err_create_eq_fail:
6473 for (i -= 1; i >= 0; i--)
6474 free_eq_buf(hr_dev, &eq_table->eq[i]);
6475 kfree(eq_table->eq);
6476
6477 return ret;
6478}
6479
6480static void hns_roce_v2_cleanup_eq_table(struct hns_roce_dev *hr_dev)
6481{
6482 struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
6483 int eq_num;
6484 int i;
6485
6486 eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors;
6487
6488
6489 hns_roce_v2_int_mask_enable(hr_dev, eq_num, EQ_DISABLE);
6490
6491 __hns_roce_free_irq(hr_dev);
6492 destroy_workqueue(hr_dev->irq_workq);
6493
6494 for (i = 0; i < eq_num; i++) {
6495 hns_roce_v2_destroy_eqc(hr_dev, i);
6496
6497 free_eq_buf(hr_dev, &eq_table->eq[i]);
6498 }
6499
6500 kfree(eq_table->eq);
6501}
6502
6503static const struct hns_roce_dfx_hw hns_roce_dfx_hw_v2 = {
6504 .query_cqc_info = hns_roce_v2_query_cqc_info,
6505};
6506
6507static const struct ib_device_ops hns_roce_v2_dev_ops = {
6508 .destroy_qp = hns_roce_v2_destroy_qp,
6509 .modify_cq = hns_roce_v2_modify_cq,
6510 .poll_cq = hns_roce_v2_poll_cq,
6511 .post_recv = hns_roce_v2_post_recv,
6512 .post_send = hns_roce_v2_post_send,
6513 .query_qp = hns_roce_v2_query_qp,
6514 .req_notify_cq = hns_roce_v2_req_notify_cq,
6515};
6516
6517static const struct ib_device_ops hns_roce_v2_dev_srq_ops = {
6518 .modify_srq = hns_roce_v2_modify_srq,
6519 .post_srq_recv = hns_roce_v2_post_srq_recv,
6520 .query_srq = hns_roce_v2_query_srq,
6521};
6522
6523static const struct hns_roce_hw hns_roce_hw_v2 = {
6524 .cmq_init = hns_roce_v2_cmq_init,
6525 .cmq_exit = hns_roce_v2_cmq_exit,
6526 .hw_profile = hns_roce_v2_profile,
6527 .hw_init = hns_roce_v2_init,
6528 .hw_exit = hns_roce_v2_exit,
6529 .post_mbox = v2_post_mbox,
6530 .poll_mbox_done = v2_poll_mbox_done,
6531 .chk_mbox_avail = v2_chk_mbox_is_avail,
6532 .set_gid = hns_roce_v2_set_gid,
6533 .set_mac = hns_roce_v2_set_mac,
6534 .write_mtpt = hns_roce_v2_write_mtpt,
6535 .rereg_write_mtpt = hns_roce_v2_rereg_write_mtpt,
6536 .frmr_write_mtpt = hns_roce_v2_frmr_write_mtpt,
6537 .mw_write_mtpt = hns_roce_v2_mw_write_mtpt,
6538 .write_cqc = hns_roce_v2_write_cqc,
6539 .set_hem = hns_roce_v2_set_hem,
6540 .clear_hem = hns_roce_v2_clear_hem,
6541 .modify_qp = hns_roce_v2_modify_qp,
6542 .dereg_mr = hns_roce_v2_dereg_mr,
6543 .qp_flow_control_init = hns_roce_v2_qp_flow_control_init,
6544 .init_eq = hns_roce_v2_init_eq_table,
6545 .cleanup_eq = hns_roce_v2_cleanup_eq_table,
6546 .write_srqc = hns_roce_v2_write_srqc,
6547 .hns_roce_dev_ops = &hns_roce_v2_dev_ops,
6548 .hns_roce_dev_srq_ops = &hns_roce_v2_dev_srq_ops,
6549};
6550
6551static const struct pci_device_id hns_roce_hw_v2_pci_tbl[] = {
6552 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
6553 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
6554 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
6555 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
6556 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
6557 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_200G_RDMA), 0},
6558 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_RDMA_DCB_PFC_VF),
6559 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
6560
6561 {0, }
6562};
6563
6564MODULE_DEVICE_TABLE(pci, hns_roce_hw_v2_pci_tbl);
6565
6566static void hns_roce_hw_v2_get_cfg(struct hns_roce_dev *hr_dev,
6567 struct hnae3_handle *handle)
6568{
6569 struct hns_roce_v2_priv *priv = hr_dev->priv;
6570 const struct pci_device_id *id;
6571 int i;
6572
6573 hr_dev->pci_dev = handle->pdev;
6574 id = pci_match_id(hns_roce_hw_v2_pci_tbl, hr_dev->pci_dev);
6575 hr_dev->is_vf = id->driver_data;
6576 hr_dev->dev = &handle->pdev->dev;
6577 hr_dev->hw = &hns_roce_hw_v2;
6578 hr_dev->dfx = &hns_roce_dfx_hw_v2;
6579 hr_dev->sdb_offset = ROCEE_DB_SQ_L_0_REG;
6580 hr_dev->odb_offset = hr_dev->sdb_offset;
6581
6582
6583 hr_dev->reg_base = handle->rinfo.roce_io_base;
6584 hr_dev->mem_base = handle->rinfo.roce_mem_base;
6585 hr_dev->caps.num_ports = 1;
6586 hr_dev->iboe.netdevs[0] = handle->rinfo.netdev;
6587 hr_dev->iboe.phy_port[0] = 0;
6588
6589 addrconf_addr_eui48((u8 *)&hr_dev->ib_dev.node_guid,
6590 hr_dev->iboe.netdevs[0]->dev_addr);
6591
6592 for (i = 0; i < handle->rinfo.num_vectors; i++)
6593 hr_dev->irq[i] = pci_irq_vector(handle->pdev,
6594 i + handle->rinfo.base_vector);
6595
6596
6597 hr_dev->cmd_mod = 1;
6598 hr_dev->loop_idc = 0;
6599
6600 hr_dev->reset_cnt = handle->ae_algo->ops->ae_dev_reset_cnt(handle);
6601 priv->handle = handle;
6602}
6603
6604static int __hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
6605{
6606 struct hns_roce_dev *hr_dev;
6607 int ret;
6608
6609 hr_dev = ib_alloc_device(hns_roce_dev, ib_dev);
6610 if (!hr_dev)
6611 return -ENOMEM;
6612
6613 hr_dev->priv = kzalloc(sizeof(struct hns_roce_v2_priv), GFP_KERNEL);
6614 if (!hr_dev->priv) {
6615 ret = -ENOMEM;
6616 goto error_failed_kzalloc;
6617 }
6618
6619 hns_roce_hw_v2_get_cfg(hr_dev, handle);
6620
6621 ret = hns_roce_init(hr_dev);
6622 if (ret) {
6623 dev_err(hr_dev->dev, "RoCE Engine init failed!\n");
6624 goto error_failed_cfg;
6625 }
6626
6627 if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) {
6628 ret = free_mr_init(hr_dev);
6629 if (ret) {
6630 dev_err(hr_dev->dev, "failed to init free mr!\n");
6631 goto error_failed_roce_init;
6632 }
6633 }
6634
6635 handle->priv = hr_dev;
6636
6637 return 0;
6638
6639error_failed_roce_init:
6640 hns_roce_exit(hr_dev);
6641
6642error_failed_cfg:
6643 kfree(hr_dev->priv);
6644
6645error_failed_kzalloc:
6646 ib_dealloc_device(&hr_dev->ib_dev);
6647
6648 return ret;
6649}
6650
6651static void __hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle,
6652 bool reset)
6653{
6654 struct hns_roce_dev *hr_dev = handle->priv;
6655
6656 if (!hr_dev)
6657 return;
6658
6659 handle->priv = NULL;
6660
6661 hr_dev->state = HNS_ROCE_DEVICE_STATE_UNINIT;
6662 hns_roce_handle_device_err(hr_dev);
6663
6664 if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08)
6665 free_mr_exit(hr_dev);
6666
6667 hns_roce_exit(hr_dev);
6668 kfree(hr_dev->priv);
6669 ib_dealloc_device(&hr_dev->ib_dev);
6670}
6671
6672static int hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
6673{
6674 const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
6675 const struct pci_device_id *id;
6676 struct device *dev = &handle->pdev->dev;
6677 int ret;
6678
6679 handle->rinfo.instance_state = HNS_ROCE_STATE_INIT;
6680
6681 if (ops->ae_dev_resetting(handle) || ops->get_hw_reset_stat(handle)) {
6682 handle->rinfo.instance_state = HNS_ROCE_STATE_NON_INIT;
6683 goto reset_chk_err;
6684 }
6685
6686 id = pci_match_id(hns_roce_hw_v2_pci_tbl, handle->pdev);
6687 if (!id)
6688 return 0;
6689
6690 if (id->driver_data && handle->pdev->revision == PCI_REVISION_ID_HIP08)
6691 return 0;
6692
6693 ret = __hns_roce_hw_v2_init_instance(handle);
6694 if (ret) {
6695 handle->rinfo.instance_state = HNS_ROCE_STATE_NON_INIT;
6696 dev_err(dev, "RoCE instance init failed! ret = %d\n", ret);
6697 if (ops->ae_dev_resetting(handle) ||
6698 ops->get_hw_reset_stat(handle))
6699 goto reset_chk_err;
6700 else
6701 return ret;
6702 }
6703
6704 handle->rinfo.instance_state = HNS_ROCE_STATE_INITED;
6705
6706 return 0;
6707
6708reset_chk_err:
6709 dev_err(dev, "Device is busy in resetting state.\n"
6710 "please retry later.\n");
6711
6712 return -EBUSY;
6713}
6714
6715static void hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle,
6716 bool reset)
6717{
6718 if (handle->rinfo.instance_state != HNS_ROCE_STATE_INITED)
6719 return;
6720
6721 handle->rinfo.instance_state = HNS_ROCE_STATE_UNINIT;
6722
6723 __hns_roce_hw_v2_uninit_instance(handle, reset);
6724
6725 handle->rinfo.instance_state = HNS_ROCE_STATE_NON_INIT;
6726}
6727static int hns_roce_hw_v2_reset_notify_down(struct hnae3_handle *handle)
6728{
6729 struct hns_roce_dev *hr_dev;
6730
6731 if (handle->rinfo.instance_state != HNS_ROCE_STATE_INITED) {
6732 set_bit(HNS_ROCE_RST_DIRECT_RETURN, &handle->rinfo.state);
6733 return 0;
6734 }
6735
6736 handle->rinfo.reset_state = HNS_ROCE_STATE_RST_DOWN;
6737 clear_bit(HNS_ROCE_RST_DIRECT_RETURN, &handle->rinfo.state);
6738
6739 hr_dev = handle->priv;
6740 if (!hr_dev)
6741 return 0;
6742
6743 hr_dev->active = false;
6744 hr_dev->dis_db = true;
6745 hr_dev->state = HNS_ROCE_DEVICE_STATE_RST_DOWN;
6746
6747 return 0;
6748}
6749
6750static int hns_roce_hw_v2_reset_notify_init(struct hnae3_handle *handle)
6751{
6752 struct device *dev = &handle->pdev->dev;
6753 int ret;
6754
6755 if (test_and_clear_bit(HNS_ROCE_RST_DIRECT_RETURN,
6756 &handle->rinfo.state)) {
6757 handle->rinfo.reset_state = HNS_ROCE_STATE_RST_INITED;
6758 return 0;
6759 }
6760
6761 handle->rinfo.reset_state = HNS_ROCE_STATE_RST_INIT;
6762
6763 dev_info(&handle->pdev->dev, "In reset process RoCE client reinit.\n");
6764 ret = __hns_roce_hw_v2_init_instance(handle);
6765 if (ret) {
6766
6767
6768
6769
6770 handle->priv = NULL;
6771 dev_err(dev, "In reset process RoCE reinit failed %d.\n", ret);
6772 } else {
6773 handle->rinfo.reset_state = HNS_ROCE_STATE_RST_INITED;
6774 dev_info(dev, "Reset done, RoCE client reinit finished.\n");
6775 }
6776
6777 return ret;
6778}
6779
6780static int hns_roce_hw_v2_reset_notify_uninit(struct hnae3_handle *handle)
6781{
6782 if (test_bit(HNS_ROCE_RST_DIRECT_RETURN, &handle->rinfo.state))
6783 return 0;
6784
6785 handle->rinfo.reset_state = HNS_ROCE_STATE_RST_UNINIT;
6786 dev_info(&handle->pdev->dev, "In reset process RoCE client uninit.\n");
6787 msleep(HNS_ROCE_V2_HW_RST_UNINT_DELAY);
6788 __hns_roce_hw_v2_uninit_instance(handle, false);
6789
6790 return 0;
6791}
6792
6793static int hns_roce_hw_v2_reset_notify(struct hnae3_handle *handle,
6794 enum hnae3_reset_notify_type type)
6795{
6796 int ret = 0;
6797
6798 switch (type) {
6799 case HNAE3_DOWN_CLIENT:
6800 ret = hns_roce_hw_v2_reset_notify_down(handle);
6801 break;
6802 case HNAE3_INIT_CLIENT:
6803 ret = hns_roce_hw_v2_reset_notify_init(handle);
6804 break;
6805 case HNAE3_UNINIT_CLIENT:
6806 ret = hns_roce_hw_v2_reset_notify_uninit(handle);
6807 break;
6808 default:
6809 break;
6810 }
6811
6812 return ret;
6813}
6814
6815static const struct hnae3_client_ops hns_roce_hw_v2_ops = {
6816 .init_instance = hns_roce_hw_v2_init_instance,
6817 .uninit_instance = hns_roce_hw_v2_uninit_instance,
6818 .reset_notify = hns_roce_hw_v2_reset_notify,
6819};
6820
6821static struct hnae3_client hns_roce_hw_v2_client = {
6822 .name = "hns_roce_hw_v2",
6823 .type = HNAE3_CLIENT_ROCE,
6824 .ops = &hns_roce_hw_v2_ops,
6825};
6826
6827static int __init hns_roce_hw_v2_init(void)
6828{
6829 return hnae3_register_client(&hns_roce_hw_v2_client);
6830}
6831
6832static void __exit hns_roce_hw_v2_exit(void)
6833{
6834 hnae3_unregister_client(&hns_roce_hw_v2_client);
6835}
6836
6837module_init(hns_roce_hw_v2_init);
6838module_exit(hns_roce_hw_v2_exit);
6839
6840MODULE_LICENSE("Dual BSD/GPL");
6841MODULE_AUTHOR("Wei Hu <xavier.huwei@huawei.com>");
6842MODULE_AUTHOR("Lijun Ou <oulijun@huawei.com>");
6843MODULE_AUTHOR("Shaobo Xu <xushaobo2@huawei.com>");
6844MODULE_DESCRIPTION("Hisilicon Hip08 Family RoCE Driver");
6845