1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34#include <linux/platform_device.h>
35#include <rdma/ib_addr.h>
36#include <rdma/ib_umem.h>
37#include "hns_roce_common.h"
38#include "hns_roce_device.h"
39#include "hns_roce_hem.h"
40#include <rdma/hns-abi.h>
41
42#define SQP_NUM (2 * HNS_ROCE_MAX_PORTS)
43
44void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type)
45{
46 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
47 struct device *dev = hr_dev->dev;
48 struct hns_roce_qp *qp;
49
50 spin_lock(&qp_table->lock);
51
52 qp = __hns_roce_qp_lookup(hr_dev, qpn);
53 if (qp)
54 atomic_inc(&qp->refcount);
55
56 spin_unlock(&qp_table->lock);
57
58 if (!qp) {
59 dev_warn(dev, "Async event for bogus QP %08x\n", qpn);
60 return;
61 }
62
63 qp->event(qp, (enum hns_roce_event)event_type);
64
65 if (atomic_dec_and_test(&qp->refcount))
66 complete(&qp->free);
67}
68EXPORT_SYMBOL_GPL(hns_roce_qp_event);
69
70static void hns_roce_ib_qp_event(struct hns_roce_qp *hr_qp,
71 enum hns_roce_event type)
72{
73 struct ib_event event;
74 struct ib_qp *ibqp = &hr_qp->ibqp;
75
76 if (ibqp->event_handler) {
77 event.device = ibqp->device;
78 event.element.qp = ibqp;
79 switch (type) {
80 case HNS_ROCE_EVENT_TYPE_PATH_MIG:
81 event.event = IB_EVENT_PATH_MIG;
82 break;
83 case HNS_ROCE_EVENT_TYPE_COMM_EST:
84 event.event = IB_EVENT_COMM_EST;
85 break;
86 case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
87 event.event = IB_EVENT_SQ_DRAINED;
88 break;
89 case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
90 event.event = IB_EVENT_QP_LAST_WQE_REACHED;
91 break;
92 case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
93 event.event = IB_EVENT_QP_FATAL;
94 break;
95 case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
96 event.event = IB_EVENT_PATH_MIG_ERR;
97 break;
98 case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
99 event.event = IB_EVENT_QP_REQ_ERR;
100 break;
101 case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
102 event.event = IB_EVENT_QP_ACCESS_ERR;
103 break;
104 default:
105 dev_dbg(ibqp->device->dev.parent, "roce_ib: Unexpected event type %d on QP %06lx\n",
106 type, hr_qp->qpn);
107 return;
108 }
109 ibqp->event_handler(&event, ibqp->qp_context);
110 }
111}
112
113static int hns_roce_reserve_range_qp(struct hns_roce_dev *hr_dev, int cnt,
114 int align, unsigned long *base)
115{
116 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
117
118 return hns_roce_bitmap_alloc_range(&qp_table->bitmap, cnt, align, base);
119}
120
121enum hns_roce_qp_state to_hns_roce_state(enum ib_qp_state state)
122{
123 switch (state) {
124 case IB_QPS_RESET:
125 return HNS_ROCE_QP_STATE_RST;
126 case IB_QPS_INIT:
127 return HNS_ROCE_QP_STATE_INIT;
128 case IB_QPS_RTR:
129 return HNS_ROCE_QP_STATE_RTR;
130 case IB_QPS_RTS:
131 return HNS_ROCE_QP_STATE_RTS;
132 case IB_QPS_SQD:
133 return HNS_ROCE_QP_STATE_SQD;
134 case IB_QPS_ERR:
135 return HNS_ROCE_QP_STATE_ERR;
136 default:
137 return HNS_ROCE_QP_NUM_STATE;
138 }
139}
140EXPORT_SYMBOL_GPL(to_hns_roce_state);
141
142static int hns_roce_gsi_qp_alloc(struct hns_roce_dev *hr_dev, unsigned long qpn,
143 struct hns_roce_qp *hr_qp)
144{
145 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
146 int ret;
147
148 if (!qpn)
149 return -EINVAL;
150
151 hr_qp->qpn = qpn;
152
153 spin_lock_irq(&qp_table->lock);
154 ret = radix_tree_insert(&hr_dev->qp_table_tree,
155 hr_qp->qpn & (hr_dev->caps.num_qps - 1), hr_qp);
156 spin_unlock_irq(&qp_table->lock);
157 if (ret) {
158 dev_err(hr_dev->dev, "QPC radix_tree_insert failed\n");
159 goto err_put_irrl;
160 }
161
162 atomic_set(&hr_qp->refcount, 1);
163 init_completion(&hr_qp->free);
164
165 return 0;
166
167err_put_irrl:
168
169 return ret;
170}
171
172static int hns_roce_qp_alloc(struct hns_roce_dev *hr_dev, unsigned long qpn,
173 struct hns_roce_qp *hr_qp)
174{
175 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
176 struct device *dev = hr_dev->dev;
177 int ret;
178
179 if (!qpn)
180 return -EINVAL;
181
182 hr_qp->qpn = qpn;
183
184
185 ret = hns_roce_table_get(hr_dev, &qp_table->qp_table, hr_qp->qpn);
186 if (ret) {
187 dev_err(dev, "QPC table get failed\n");
188 goto err_out;
189 }
190
191
192 ret = hns_roce_table_get(hr_dev, &qp_table->irrl_table, hr_qp->qpn);
193 if (ret) {
194 dev_err(dev, "IRRL table get failed\n");
195 goto err_put_qp;
196 }
197
198 if (hr_dev->caps.trrl_entry_sz) {
199
200 ret = hns_roce_table_get(hr_dev, &qp_table->trrl_table,
201 hr_qp->qpn);
202 if (ret) {
203 dev_err(dev, "TRRL table get failed\n");
204 goto err_put_irrl;
205 }
206 }
207
208 spin_lock_irq(&qp_table->lock);
209 ret = radix_tree_insert(&hr_dev->qp_table_tree,
210 hr_qp->qpn & (hr_dev->caps.num_qps - 1), hr_qp);
211 spin_unlock_irq(&qp_table->lock);
212 if (ret) {
213 dev_err(dev, "QPC radix_tree_insert failed\n");
214 goto err_put_trrl;
215 }
216
217 atomic_set(&hr_qp->refcount, 1);
218 init_completion(&hr_qp->free);
219
220 return 0;
221
222err_put_trrl:
223 if (hr_dev->caps.trrl_entry_sz)
224 hns_roce_table_put(hr_dev, &qp_table->trrl_table, hr_qp->qpn);
225
226err_put_irrl:
227 hns_roce_table_put(hr_dev, &qp_table->irrl_table, hr_qp->qpn);
228
229err_put_qp:
230 hns_roce_table_put(hr_dev, &qp_table->qp_table, hr_qp->qpn);
231
232err_out:
233 return ret;
234}
235
236void hns_roce_qp_remove(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
237{
238 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
239 unsigned long flags;
240
241 spin_lock_irqsave(&qp_table->lock, flags);
242 radix_tree_delete(&hr_dev->qp_table_tree,
243 hr_qp->qpn & (hr_dev->caps.num_qps - 1));
244 spin_unlock_irqrestore(&qp_table->lock, flags);
245}
246EXPORT_SYMBOL_GPL(hns_roce_qp_remove);
247
248void hns_roce_qp_free(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
249{
250 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
251
252 if (atomic_dec_and_test(&hr_qp->refcount))
253 complete(&hr_qp->free);
254 wait_for_completion(&hr_qp->free);
255
256 if ((hr_qp->ibqp.qp_type) != IB_QPT_GSI) {
257 if (hr_dev->caps.trrl_entry_sz)
258 hns_roce_table_put(hr_dev, &qp_table->trrl_table,
259 hr_qp->qpn);
260 hns_roce_table_put(hr_dev, &qp_table->irrl_table, hr_qp->qpn);
261 hns_roce_table_put(hr_dev, &qp_table->qp_table, hr_qp->qpn);
262 }
263}
264EXPORT_SYMBOL_GPL(hns_roce_qp_free);
265
266void hns_roce_release_range_qp(struct hns_roce_dev *hr_dev, int base_qpn,
267 int cnt)
268{
269 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
270
271 if (base_qpn < SQP_NUM)
272 return;
273
274 hns_roce_bitmap_free_range(&qp_table->bitmap, base_qpn, cnt, BITMAP_RR);
275}
276EXPORT_SYMBOL_GPL(hns_roce_release_range_qp);
277
278static int hns_roce_set_rq_size(struct hns_roce_dev *hr_dev,
279 struct ib_qp_cap *cap, int is_user, int has_srq,
280 struct hns_roce_qp *hr_qp)
281{
282 struct device *dev = hr_dev->dev;
283 u32 max_cnt;
284
285
286 if (cap->max_recv_wr > hr_dev->caps.max_wqes ||
287 cap->max_recv_sge > hr_dev->caps.max_rq_sg) {
288 dev_err(dev, "RQ WR or sge error!max_recv_wr=%d max_recv_sge=%d\n",
289 cap->max_recv_wr, cap->max_recv_sge);
290 return -EINVAL;
291 }
292
293
294 if (has_srq) {
295 if (cap->max_recv_wr) {
296 dev_dbg(dev, "srq no need config max_recv_wr\n");
297 return -EINVAL;
298 }
299
300 hr_qp->rq.wqe_cnt = hr_qp->rq.max_gs = 0;
301 } else {
302 if (is_user && (!cap->max_recv_wr || !cap->max_recv_sge)) {
303 dev_err(dev, "user space no need config max_recv_wr max_recv_sge\n");
304 return -EINVAL;
305 }
306
307 if (hr_dev->caps.min_wqes)
308 max_cnt = max(cap->max_recv_wr, hr_dev->caps.min_wqes);
309 else
310 max_cnt = cap->max_recv_wr;
311
312 hr_qp->rq.wqe_cnt = roundup_pow_of_two(max_cnt);
313
314 if ((u32)hr_qp->rq.wqe_cnt > hr_dev->caps.max_wqes) {
315 dev_err(dev, "while setting rq size, rq.wqe_cnt too large\n");
316 return -EINVAL;
317 }
318
319 max_cnt = max(1U, cap->max_recv_sge);
320 hr_qp->rq.max_gs = roundup_pow_of_two(max_cnt);
321 if (hr_dev->caps.max_rq_sg <= 2)
322 hr_qp->rq.wqe_shift =
323 ilog2(hr_dev->caps.max_rq_desc_sz);
324 else
325 hr_qp->rq.wqe_shift =
326 ilog2(hr_dev->caps.max_rq_desc_sz
327 * hr_qp->rq.max_gs);
328 }
329
330 cap->max_recv_wr = hr_qp->rq.max_post = hr_qp->rq.wqe_cnt;
331 cap->max_recv_sge = hr_qp->rq.max_gs;
332
333 return 0;
334}
335
336static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev,
337 struct ib_qp_cap *cap,
338 struct hns_roce_qp *hr_qp,
339 struct hns_roce_ib_create_qp *ucmd)
340{
341 u32 roundup_sq_stride = roundup_pow_of_two(hr_dev->caps.max_sq_desc_sz);
342 u8 max_sq_stride = ilog2(roundup_sq_stride);
343 u32 page_size;
344 u32 max_cnt;
345
346
347 if ((u32)(1 << ucmd->log_sq_bb_count) > hr_dev->caps.max_wqes ||
348 ucmd->log_sq_stride > max_sq_stride ||
349 ucmd->log_sq_stride < HNS_ROCE_IB_MIN_SQ_STRIDE) {
350 dev_err(hr_dev->dev, "check SQ size error!\n");
351 return -EINVAL;
352 }
353
354 if (cap->max_send_sge > hr_dev->caps.max_sq_sg) {
355 dev_err(hr_dev->dev, "SQ sge error! max_send_sge=%d\n",
356 cap->max_send_sge);
357 return -EINVAL;
358 }
359
360 hr_qp->sq.wqe_cnt = 1 << ucmd->log_sq_bb_count;
361 hr_qp->sq.wqe_shift = ucmd->log_sq_stride;
362
363 max_cnt = max(1U, cap->max_send_sge);
364 if (hr_dev->caps.max_sq_sg <= 2)
365 hr_qp->sq.max_gs = roundup_pow_of_two(max_cnt);
366 else
367 hr_qp->sq.max_gs = max_cnt;
368
369 if (hr_qp->sq.max_gs > 2)
370 hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt *
371 (hr_qp->sq.max_gs - 2));
372 hr_qp->sge.sge_shift = 4;
373
374
375 if (hr_dev->caps.max_sq_sg <= 2) {
376 hr_qp->buff_size = HNS_ROCE_ALOGN_UP((hr_qp->rq.wqe_cnt <<
377 hr_qp->rq.wqe_shift), PAGE_SIZE) +
378 HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt <<
379 hr_qp->sq.wqe_shift), PAGE_SIZE);
380
381 hr_qp->sq.offset = 0;
382 hr_qp->rq.offset = HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt <<
383 hr_qp->sq.wqe_shift), PAGE_SIZE);
384 } else {
385 page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
386 hr_qp->buff_size = HNS_ROCE_ALOGN_UP((hr_qp->rq.wqe_cnt <<
387 hr_qp->rq.wqe_shift), page_size) +
388 HNS_ROCE_ALOGN_UP((hr_qp->sge.sge_cnt <<
389 hr_qp->sge.sge_shift), page_size) +
390 HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt <<
391 hr_qp->sq.wqe_shift), page_size);
392
393 hr_qp->sq.offset = 0;
394 if (hr_qp->sge.sge_cnt) {
395 hr_qp->sge.offset = HNS_ROCE_ALOGN_UP(
396 (hr_qp->sq.wqe_cnt <<
397 hr_qp->sq.wqe_shift),
398 page_size);
399 hr_qp->rq.offset = hr_qp->sge.offset +
400 HNS_ROCE_ALOGN_UP((hr_qp->sge.sge_cnt <<
401 hr_qp->sge.sge_shift),
402 page_size);
403 } else {
404 hr_qp->rq.offset = HNS_ROCE_ALOGN_UP(
405 (hr_qp->sq.wqe_cnt <<
406 hr_qp->sq.wqe_shift),
407 page_size);
408 }
409 }
410
411 return 0;
412}
413
414static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev,
415 struct ib_qp_cap *cap,
416 struct hns_roce_qp *hr_qp)
417{
418 struct device *dev = hr_dev->dev;
419 u32 page_size;
420 u32 max_cnt;
421 int size;
422
423 if (cap->max_send_wr > hr_dev->caps.max_wqes ||
424 cap->max_send_sge > hr_dev->caps.max_sq_sg ||
425 cap->max_inline_data > hr_dev->caps.max_sq_inline) {
426 dev_err(dev, "SQ WR or sge or inline data error!\n");
427 return -EINVAL;
428 }
429
430 hr_qp->sq.wqe_shift = ilog2(hr_dev->caps.max_sq_desc_sz);
431 hr_qp->sq_max_wqes_per_wr = 1;
432 hr_qp->sq_spare_wqes = 0;
433
434 if (hr_dev->caps.min_wqes)
435 max_cnt = max(cap->max_send_wr, hr_dev->caps.min_wqes);
436 else
437 max_cnt = cap->max_send_wr;
438
439 hr_qp->sq.wqe_cnt = roundup_pow_of_two(max_cnt);
440 if ((u32)hr_qp->sq.wqe_cnt > hr_dev->caps.max_wqes) {
441 dev_err(dev, "while setting kernel sq size, sq.wqe_cnt too large\n");
442 return -EINVAL;
443 }
444
445
446 max_cnt = max(1U, cap->max_send_sge);
447 if (hr_dev->caps.max_sq_sg <= 2)
448 hr_qp->sq.max_gs = roundup_pow_of_two(max_cnt);
449 else
450 hr_qp->sq.max_gs = max_cnt;
451
452 if (hr_qp->sq.max_gs > 2) {
453 hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt *
454 (hr_qp->sq.max_gs - 2));
455 hr_qp->sge.sge_shift = 4;
456 }
457
458
459 if (hr_dev->caps.max_sq_sg > 2 && hr_qp->ibqp.qp_type == IB_QPT_GSI) {
460 hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt *
461 hr_qp->sq.max_gs);
462 hr_qp->sge.sge_shift = 4;
463 }
464
465
466 page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
467 hr_qp->sq.offset = 0;
468 size = HNS_ROCE_ALOGN_UP(hr_qp->sq.wqe_cnt << hr_qp->sq.wqe_shift,
469 page_size);
470
471 if (hr_dev->caps.max_sq_sg > 2 && hr_qp->sge.sge_cnt) {
472 hr_qp->sge.offset = size;
473 size += HNS_ROCE_ALOGN_UP(hr_qp->sge.sge_cnt <<
474 hr_qp->sge.sge_shift, page_size);
475 }
476
477 hr_qp->rq.offset = size;
478 size += HNS_ROCE_ALOGN_UP((hr_qp->rq.wqe_cnt << hr_qp->rq.wqe_shift),
479 page_size);
480 hr_qp->buff_size = size;
481
482
483 cap->max_send_wr = hr_qp->sq.max_post = hr_qp->sq.wqe_cnt;
484 cap->max_send_sge = hr_qp->sq.max_gs;
485
486
487 cap->max_inline_data = 0;
488
489 return 0;
490}
491
492static int hns_roce_qp_has_rq(struct ib_qp_init_attr *attr)
493{
494 if (attr->qp_type == IB_QPT_XRC_INI ||
495 attr->qp_type == IB_QPT_XRC_TGT || attr->srq)
496 return 0;
497
498 return 1;
499}
500
501static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
502 struct ib_pd *ib_pd,
503 struct ib_qp_init_attr *init_attr,
504 struct ib_udata *udata, unsigned long sqpn,
505 struct hns_roce_qp *hr_qp)
506{
507 struct device *dev = hr_dev->dev;
508 struct hns_roce_ib_create_qp ucmd;
509 struct hns_roce_ib_create_qp_resp resp = {};
510 unsigned long qpn = 0;
511 int ret = 0;
512 u32 page_shift;
513 u32 npages;
514 int i;
515
516 mutex_init(&hr_qp->mutex);
517 spin_lock_init(&hr_qp->sq.lock);
518 spin_lock_init(&hr_qp->rq.lock);
519
520 hr_qp->state = IB_QPS_RESET;
521
522 hr_qp->ibqp.qp_type = init_attr->qp_type;
523
524 if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
525 hr_qp->sq_signal_bits = cpu_to_le32(IB_SIGNAL_ALL_WR);
526 else
527 hr_qp->sq_signal_bits = cpu_to_le32(IB_SIGNAL_REQ_WR);
528
529 ret = hns_roce_set_rq_size(hr_dev, &init_attr->cap, !!ib_pd->uobject,
530 !!init_attr->srq, hr_qp);
531 if (ret) {
532 dev_err(dev, "hns_roce_set_rq_size failed\n");
533 goto err_out;
534 }
535
536 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) {
537
538 hr_qp->rq_inl_buf.wqe_list = kcalloc(hr_qp->rq.wqe_cnt,
539 sizeof(struct hns_roce_rinl_wqe),
540 GFP_KERNEL);
541 if (!hr_qp->rq_inl_buf.wqe_list) {
542 ret = -ENOMEM;
543 goto err_out;
544 }
545
546 hr_qp->rq_inl_buf.wqe_cnt = hr_qp->rq.wqe_cnt;
547
548
549 hr_qp->rq_inl_buf.wqe_list[0].sg_list =
550 kcalloc(hr_qp->rq_inl_buf.wqe_cnt,
551 init_attr->cap.max_recv_sge *
552 sizeof(struct hns_roce_rinl_sge),
553 GFP_KERNEL);
554 if (!hr_qp->rq_inl_buf.wqe_list[0].sg_list) {
555 ret = -ENOMEM;
556 goto err_wqe_list;
557 }
558
559 for (i = 1; i < hr_qp->rq_inl_buf.wqe_cnt; i++)
560
561 hr_qp->rq_inl_buf.wqe_list[i].sg_list =
562 &hr_qp->rq_inl_buf.wqe_list[0].sg_list[i *
563 init_attr->cap.max_recv_sge];
564 }
565
566 if (ib_pd->uobject) {
567 if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
568 dev_err(dev, "ib_copy_from_udata error for create qp\n");
569 ret = -EFAULT;
570 goto err_rq_sge_list;
571 }
572
573 ret = hns_roce_set_user_sq_size(hr_dev, &init_attr->cap, hr_qp,
574 &ucmd);
575 if (ret) {
576 dev_err(dev, "hns_roce_set_user_sq_size error for create qp\n");
577 goto err_rq_sge_list;
578 }
579
580 hr_qp->umem = ib_umem_get(ib_pd->uobject->context,
581 ucmd.buf_addr, hr_qp->buff_size, 0,
582 0);
583 if (IS_ERR(hr_qp->umem)) {
584 dev_err(dev, "ib_umem_get error for create qp\n");
585 ret = PTR_ERR(hr_qp->umem);
586 goto err_rq_sge_list;
587 }
588
589 hr_qp->mtt.mtt_type = MTT_TYPE_WQE;
590 if (hr_dev->caps.mtt_buf_pg_sz) {
591 npages = (ib_umem_page_count(hr_qp->umem) +
592 (1 << hr_dev->caps.mtt_buf_pg_sz) - 1) /
593 (1 << hr_dev->caps.mtt_buf_pg_sz);
594 page_shift = PAGE_SHIFT + hr_dev->caps.mtt_buf_pg_sz;
595 ret = hns_roce_mtt_init(hr_dev, npages,
596 page_shift,
597 &hr_qp->mtt);
598 } else {
599 ret = hns_roce_mtt_init(hr_dev,
600 ib_umem_page_count(hr_qp->umem),
601 hr_qp->umem->page_shift,
602 &hr_qp->mtt);
603 }
604 if (ret) {
605 dev_err(dev, "hns_roce_mtt_init error for create qp\n");
606 goto err_buf;
607 }
608
609 ret = hns_roce_ib_umem_write_mtt(hr_dev, &hr_qp->mtt,
610 hr_qp->umem);
611 if (ret) {
612 dev_err(dev, "hns_roce_ib_umem_write_mtt error for create qp\n");
613 goto err_mtt;
614 }
615
616 if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) &&
617 (udata->outlen >= sizeof(resp)) &&
618 hns_roce_qp_has_rq(init_attr)) {
619 ret = hns_roce_db_map_user(
620 to_hr_ucontext(ib_pd->uobject->context),
621 ucmd.db_addr, &hr_qp->rdb);
622 if (ret) {
623 dev_err(dev, "rq record doorbell map failed!\n");
624 goto err_mtt;
625 }
626 }
627 } else {
628 if (init_attr->create_flags &
629 IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) {
630 dev_err(dev, "init_attr->create_flags error!\n");
631 ret = -EINVAL;
632 goto err_rq_sge_list;
633 }
634
635 if (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO) {
636 dev_err(dev, "init_attr->create_flags error!\n");
637 ret = -EINVAL;
638 goto err_rq_sge_list;
639 }
640
641
642 ret = hns_roce_set_kernel_sq_size(hr_dev, &init_attr->cap,
643 hr_qp);
644 if (ret) {
645 dev_err(dev, "hns_roce_set_kernel_sq_size error!\n");
646 goto err_rq_sge_list;
647 }
648
649
650 hr_qp->sq.db_reg_l = hr_dev->reg_base + hr_dev->sdb_offset +
651 DB_REG_OFFSET * hr_dev->priv_uar.index;
652 hr_qp->rq.db_reg_l = hr_dev->reg_base + hr_dev->odb_offset +
653 DB_REG_OFFSET * hr_dev->priv_uar.index;
654
655 if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) &&
656 hns_roce_qp_has_rq(init_attr)) {
657 ret = hns_roce_alloc_db(hr_dev, &hr_qp->rdb, 0);
658 if (ret) {
659 dev_err(dev, "rq record doorbell alloc failed!\n");
660 goto err_rq_sge_list;
661 }
662 *hr_qp->rdb.db_record = 0;
663 hr_qp->rdb_en = 1;
664 }
665
666
667 page_shift = PAGE_SHIFT + hr_dev->caps.mtt_buf_pg_sz;
668 if (hns_roce_buf_alloc(hr_dev, hr_qp->buff_size,
669 (1 << page_shift) * 2,
670 &hr_qp->hr_buf, page_shift)) {
671 dev_err(dev, "hns_roce_buf_alloc error!\n");
672 ret = -ENOMEM;
673 goto err_db;
674 }
675
676 hr_qp->mtt.mtt_type = MTT_TYPE_WQE;
677
678 ret = hns_roce_mtt_init(hr_dev, hr_qp->hr_buf.npages,
679 hr_qp->hr_buf.page_shift, &hr_qp->mtt);
680 if (ret) {
681 dev_err(dev, "hns_roce_mtt_init error for kernel create qp\n");
682 goto err_buf;
683 }
684
685 ret = hns_roce_buf_write_mtt(hr_dev, &hr_qp->mtt,
686 &hr_qp->hr_buf);
687 if (ret) {
688 dev_err(dev, "hns_roce_buf_write_mtt error for kernel create qp\n");
689 goto err_mtt;
690 }
691
692 hr_qp->sq.wrid = kmalloc_array(hr_qp->sq.wqe_cnt, sizeof(u64),
693 GFP_KERNEL);
694 hr_qp->rq.wrid = kmalloc_array(hr_qp->rq.wqe_cnt, sizeof(u64),
695 GFP_KERNEL);
696 if (!hr_qp->sq.wrid || !hr_qp->rq.wrid) {
697 ret = -ENOMEM;
698 goto err_wrid;
699 }
700 }
701
702 if (sqpn) {
703 qpn = sqpn;
704 } else {
705
706 ret = hns_roce_reserve_range_qp(hr_dev, 1, 1, &qpn);
707 if (ret) {
708 dev_err(dev, "hns_roce_reserve_range_qp alloc qpn error\n");
709 goto err_wrid;
710 }
711 }
712
713 if (init_attr->qp_type == IB_QPT_GSI &&
714 hr_dev->hw_rev == HNS_ROCE_HW_VER1) {
715
716 ret = hns_roce_gsi_qp_alloc(hr_dev, qpn, hr_qp);
717 if (ret) {
718 dev_err(dev, "hns_roce_qp_alloc failed!\n");
719 goto err_qpn;
720 }
721 } else {
722 ret = hns_roce_qp_alloc(hr_dev, qpn, hr_qp);
723 if (ret) {
724 dev_err(dev, "hns_roce_qp_alloc failed!\n");
725 goto err_qpn;
726 }
727 }
728
729 if (sqpn)
730 hr_qp->doorbell_qpn = 1;
731 else
732 hr_qp->doorbell_qpn = cpu_to_le64(hr_qp->qpn);
733
734 if (ib_pd->uobject && (udata->outlen >= sizeof(resp)) &&
735 (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB)) {
736
737
738 resp.cap_flags |= HNS_ROCE_SUPPORT_RQ_RECORD_DB;
739 ret = ib_copy_to_udata(udata, &resp, sizeof(resp));
740 if (ret)
741 goto err_qp;
742
743 hr_qp->rdb_en = 1;
744 }
745 hr_qp->event = hns_roce_ib_qp_event;
746
747 return 0;
748
749err_qp:
750 if (init_attr->qp_type == IB_QPT_GSI &&
751 hr_dev->hw_rev == HNS_ROCE_HW_VER1)
752 hns_roce_qp_remove(hr_dev, hr_qp);
753 else
754 hns_roce_qp_free(hr_dev, hr_qp);
755
756err_qpn:
757 if (!sqpn)
758 hns_roce_release_range_qp(hr_dev, qpn, 1);
759
760err_wrid:
761 if (ib_pd->uobject) {
762 if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) &&
763 (udata->outlen >= sizeof(resp)) &&
764 hns_roce_qp_has_rq(init_attr))
765 hns_roce_db_unmap_user(
766 to_hr_ucontext(ib_pd->uobject->context),
767 &hr_qp->rdb);
768 } else {
769 kfree(hr_qp->sq.wrid);
770 kfree(hr_qp->rq.wrid);
771 }
772
773err_mtt:
774 hns_roce_mtt_cleanup(hr_dev, &hr_qp->mtt);
775
776err_buf:
777 if (ib_pd->uobject)
778 ib_umem_release(hr_qp->umem);
779 else
780 hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf);
781
782err_db:
783 if (!ib_pd->uobject && hns_roce_qp_has_rq(init_attr) &&
784 (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB))
785 hns_roce_free_db(hr_dev, &hr_qp->rdb);
786
787err_rq_sge_list:
788 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE)
789 kfree(hr_qp->rq_inl_buf.wqe_list[0].sg_list);
790
791err_wqe_list:
792 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE)
793 kfree(hr_qp->rq_inl_buf.wqe_list);
794
795err_out:
796 return ret;
797}
798
799struct ib_qp *hns_roce_create_qp(struct ib_pd *pd,
800 struct ib_qp_init_attr *init_attr,
801 struct ib_udata *udata)
802{
803 struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
804 struct device *dev = hr_dev->dev;
805 struct hns_roce_sqp *hr_sqp;
806 struct hns_roce_qp *hr_qp;
807 int ret;
808
809 switch (init_attr->qp_type) {
810 case IB_QPT_RC: {
811 hr_qp = kzalloc(sizeof(*hr_qp), GFP_KERNEL);
812 if (!hr_qp)
813 return ERR_PTR(-ENOMEM);
814
815 ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata, 0,
816 hr_qp);
817 if (ret) {
818 dev_err(dev, "Create RC QP failed\n");
819 kfree(hr_qp);
820 return ERR_PTR(ret);
821 }
822
823 hr_qp->ibqp.qp_num = hr_qp->qpn;
824
825 break;
826 }
827 case IB_QPT_GSI: {
828
829 if (pd->uobject) {
830 dev_err(dev, "not support usr space GSI\n");
831 return ERR_PTR(-EINVAL);
832 }
833
834 hr_sqp = kzalloc(sizeof(*hr_sqp), GFP_KERNEL);
835 if (!hr_sqp)
836 return ERR_PTR(-ENOMEM);
837
838 hr_qp = &hr_sqp->hr_qp;
839 hr_qp->port = init_attr->port_num - 1;
840 hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port];
841
842
843 if (hr_dev->caps.max_sq_sg <= 2)
844 hr_qp->ibqp.qp_num = HNS_ROCE_MAX_PORTS +
845 hr_dev->iboe.phy_port[hr_qp->port];
846 else
847 hr_qp->ibqp.qp_num = 1;
848
849 ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata,
850 hr_qp->ibqp.qp_num, hr_qp);
851 if (ret) {
852 dev_err(dev, "Create GSI QP failed!\n");
853 kfree(hr_sqp);
854 return ERR_PTR(ret);
855 }
856
857 break;
858 }
859 default:{
860 dev_err(dev, "not support QP type %d\n", init_attr->qp_type);
861 return ERR_PTR(-EINVAL);
862 }
863 }
864
865 return &hr_qp->ibqp;
866}
867EXPORT_SYMBOL_GPL(hns_roce_create_qp);
868
869int to_hr_qp_type(int qp_type)
870{
871 int transport_type;
872
873 if (qp_type == IB_QPT_RC)
874 transport_type = SERV_TYPE_RC;
875 else if (qp_type == IB_QPT_UC)
876 transport_type = SERV_TYPE_UC;
877 else if (qp_type == IB_QPT_UD)
878 transport_type = SERV_TYPE_UD;
879 else if (qp_type == IB_QPT_GSI)
880 transport_type = SERV_TYPE_UD;
881 else
882 transport_type = -1;
883
884 return transport_type;
885}
886EXPORT_SYMBOL_GPL(to_hr_qp_type);
887
888int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
889 int attr_mask, struct ib_udata *udata)
890{
891 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
892 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
893 enum ib_qp_state cur_state, new_state;
894 struct device *dev = hr_dev->dev;
895 int ret = -EINVAL;
896 int p;
897 enum ib_mtu active_mtu;
898
899 mutex_lock(&hr_qp->mutex);
900
901 cur_state = attr_mask & IB_QP_CUR_STATE ?
902 attr->cur_qp_state : (enum ib_qp_state)hr_qp->state;
903 new_state = attr_mask & IB_QP_STATE ?
904 attr->qp_state : cur_state;
905
906 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask,
907 IB_LINK_LAYER_ETHERNET)) {
908 dev_err(dev, "ib_modify_qp_is_ok failed\n");
909 goto out;
910 }
911
912 if ((attr_mask & IB_QP_PORT) &&
913 (attr->port_num == 0 || attr->port_num > hr_dev->caps.num_ports)) {
914 dev_err(dev, "attr port_num invalid.attr->port_num=%d\n",
915 attr->port_num);
916 goto out;
917 }
918
919 if (attr_mask & IB_QP_PKEY_INDEX) {
920 p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port;
921 if (attr->pkey_index >= hr_dev->caps.pkey_table_len[p]) {
922 dev_err(dev, "attr pkey_index invalid.attr->pkey_index=%d\n",
923 attr->pkey_index);
924 goto out;
925 }
926 }
927
928 if (attr_mask & IB_QP_PATH_MTU) {
929 p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port;
930 active_mtu = iboe_get_mtu(hr_dev->iboe.netdevs[p]->mtu);
931
932 if ((hr_dev->caps.max_mtu == IB_MTU_4096 &&
933 attr->path_mtu > IB_MTU_4096) ||
934 (hr_dev->caps.max_mtu == IB_MTU_2048 &&
935 attr->path_mtu > IB_MTU_2048) ||
936 attr->path_mtu < IB_MTU_256 ||
937 attr->path_mtu > active_mtu) {
938 dev_err(dev, "attr path_mtu(%d)invalid while modify qp",
939 attr->path_mtu);
940 goto out;
941 }
942 }
943
944 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
945 attr->max_rd_atomic > hr_dev->caps.max_qp_init_rdma) {
946 dev_err(dev, "attr max_rd_atomic invalid.attr->max_rd_atomic=%d\n",
947 attr->max_rd_atomic);
948 goto out;
949 }
950
951 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
952 attr->max_dest_rd_atomic > hr_dev->caps.max_qp_dest_rdma) {
953 dev_err(dev, "attr max_dest_rd_atomic invalid.attr->max_dest_rd_atomic=%d\n",
954 attr->max_dest_rd_atomic);
955 goto out;
956 }
957
958 if (cur_state == new_state && cur_state == IB_QPS_RESET) {
959 if (hr_dev->caps.min_wqes) {
960 ret = -EPERM;
961 dev_err(dev, "cur_state=%d new_state=%d\n", cur_state,
962 new_state);
963 } else {
964 ret = 0;
965 }
966
967 goto out;
968 }
969
970 ret = hr_dev->hw->modify_qp(ibqp, attr, attr_mask, cur_state,
971 new_state);
972
973out:
974 mutex_unlock(&hr_qp->mutex);
975
976 return ret;
977}
978
979void hns_roce_lock_cqs(struct hns_roce_cq *send_cq, struct hns_roce_cq *recv_cq)
980 __acquires(&send_cq->lock) __acquires(&recv_cq->lock)
981{
982 if (send_cq == recv_cq) {
983 spin_lock_irq(&send_cq->lock);
984 __acquire(&recv_cq->lock);
985 } else if (send_cq->cqn < recv_cq->cqn) {
986 spin_lock_irq(&send_cq->lock);
987 spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING);
988 } else {
989 spin_lock_irq(&recv_cq->lock);
990 spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING);
991 }
992}
993EXPORT_SYMBOL_GPL(hns_roce_lock_cqs);
994
995void hns_roce_unlock_cqs(struct hns_roce_cq *send_cq,
996 struct hns_roce_cq *recv_cq) __releases(&send_cq->lock)
997 __releases(&recv_cq->lock)
998{
999 if (send_cq == recv_cq) {
1000 __release(&recv_cq->lock);
1001 spin_unlock_irq(&send_cq->lock);
1002 } else if (send_cq->cqn < recv_cq->cqn) {
1003 spin_unlock(&recv_cq->lock);
1004 spin_unlock_irq(&send_cq->lock);
1005 } else {
1006 spin_unlock(&send_cq->lock);
1007 spin_unlock_irq(&recv_cq->lock);
1008 }
1009}
1010EXPORT_SYMBOL_GPL(hns_roce_unlock_cqs);
1011
1012static void *get_wqe(struct hns_roce_qp *hr_qp, int offset)
1013{
1014
1015 return hns_roce_buf_offset(&hr_qp->hr_buf, offset);
1016}
1017
1018void *get_recv_wqe(struct hns_roce_qp *hr_qp, int n)
1019{
1020 return get_wqe(hr_qp, hr_qp->rq.offset + (n << hr_qp->rq.wqe_shift));
1021}
1022EXPORT_SYMBOL_GPL(get_recv_wqe);
1023
1024void *get_send_wqe(struct hns_roce_qp *hr_qp, int n)
1025{
1026 return get_wqe(hr_qp, hr_qp->sq.offset + (n << hr_qp->sq.wqe_shift));
1027}
1028EXPORT_SYMBOL_GPL(get_send_wqe);
1029
1030void *get_send_extend_sge(struct hns_roce_qp *hr_qp, int n)
1031{
1032 return hns_roce_buf_offset(&hr_qp->hr_buf, hr_qp->sge.offset +
1033 (n << hr_qp->sge.sge_shift));
1034}
1035EXPORT_SYMBOL_GPL(get_send_extend_sge);
1036
1037bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, int nreq,
1038 struct ib_cq *ib_cq)
1039{
1040 struct hns_roce_cq *hr_cq;
1041 u32 cur;
1042
1043 cur = hr_wq->head - hr_wq->tail;
1044 if (likely(cur + nreq < hr_wq->max_post))
1045 return false;
1046
1047 hr_cq = to_hr_cq(ib_cq);
1048 spin_lock(&hr_cq->lock);
1049 cur = hr_wq->head - hr_wq->tail;
1050 spin_unlock(&hr_cq->lock);
1051
1052 return cur + nreq >= hr_wq->max_post;
1053}
1054EXPORT_SYMBOL_GPL(hns_roce_wq_overflow);
1055
1056int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev)
1057{
1058 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
1059 int reserved_from_top = 0;
1060 int ret;
1061
1062 spin_lock_init(&qp_table->lock);
1063 INIT_RADIX_TREE(&hr_dev->qp_table_tree, GFP_ATOMIC);
1064
1065
1066 ret = hns_roce_bitmap_init(&qp_table->bitmap, hr_dev->caps.num_qps,
1067 hr_dev->caps.num_qps - 1, SQP_NUM,
1068 reserved_from_top);
1069 if (ret) {
1070 dev_err(hr_dev->dev, "qp bitmap init failed!error=%d\n",
1071 ret);
1072 return ret;
1073 }
1074
1075 return 0;
1076}
1077
1078void hns_roce_cleanup_qp_table(struct hns_roce_dev *hr_dev)
1079{
1080 hns_roce_bitmap_cleanup(&hr_dev->qp_table.bitmap);
1081}
1082