1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#include <linux/gfp.h>
34#include <linux/export.h>
35#include <linux/mlx5/cmd.h>
36#include <linux/mlx5/qp.h>
37#include <linux/mlx5/driver.h>
38#include <linux/mlx5/transobj.h>
39
40#include "mlx5_core.h"
41#include "lib/eq.h"
42
43static int mlx5_core_drain_dct(struct mlx5_core_dev *dev,
44 struct mlx5_core_dct *dct);
45
46static struct mlx5_core_rsc_common *
47mlx5_get_rsc(struct mlx5_qp_table *table, u32 rsn)
48{
49 struct mlx5_core_rsc_common *common;
50 unsigned long flags;
51
52 spin_lock_irqsave(&table->lock, flags);
53
54 common = radix_tree_lookup(&table->tree, rsn);
55 if (common)
56 refcount_inc(&common->refcount);
57
58 spin_unlock_irqrestore(&table->lock, flags);
59
60 return common;
61}
62
63void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common)
64{
65 if (refcount_dec_and_test(&common->refcount))
66 complete(&common->free);
67}
68
69static u64 qp_allowed_event_types(void)
70{
71 u64 mask;
72
73 mask = BIT(MLX5_EVENT_TYPE_PATH_MIG) |
74 BIT(MLX5_EVENT_TYPE_COMM_EST) |
75 BIT(MLX5_EVENT_TYPE_SQ_DRAINED) |
76 BIT(MLX5_EVENT_TYPE_SRQ_LAST_WQE) |
77 BIT(MLX5_EVENT_TYPE_WQ_CATAS_ERROR) |
78 BIT(MLX5_EVENT_TYPE_PATH_MIG_FAILED) |
79 BIT(MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR) |
80 BIT(MLX5_EVENT_TYPE_WQ_ACCESS_ERROR);
81
82 return mask;
83}
84
85static u64 rq_allowed_event_types(void)
86{
87 u64 mask;
88
89 mask = BIT(MLX5_EVENT_TYPE_SRQ_LAST_WQE) |
90 BIT(MLX5_EVENT_TYPE_WQ_CATAS_ERROR);
91
92 return mask;
93}
94
95static u64 sq_allowed_event_types(void)
96{
97 return BIT(MLX5_EVENT_TYPE_WQ_CATAS_ERROR);
98}
99
100static u64 dct_allowed_event_types(void)
101{
102 return BIT(MLX5_EVENT_TYPE_DCT_DRAINED);
103}
104
105static bool is_event_type_allowed(int rsc_type, int event_type)
106{
107 switch (rsc_type) {
108 case MLX5_EVENT_QUEUE_TYPE_QP:
109 return BIT(event_type) & qp_allowed_event_types();
110 case MLX5_EVENT_QUEUE_TYPE_RQ:
111 return BIT(event_type) & rq_allowed_event_types();
112 case MLX5_EVENT_QUEUE_TYPE_SQ:
113 return BIT(event_type) & sq_allowed_event_types();
114 case MLX5_EVENT_QUEUE_TYPE_DCT:
115 return BIT(event_type) & dct_allowed_event_types();
116 default:
117 WARN(1, "Event arrived for unknown resource type");
118 return false;
119 }
120}
121
122static int rsc_event_notifier(struct notifier_block *nb,
123 unsigned long type, void *data)
124{
125 struct mlx5_core_rsc_common *common;
126 struct mlx5_qp_table *table;
127 struct mlx5_core_dev *dev;
128 struct mlx5_core_dct *dct;
129 u8 event_type = (u8)type;
130 struct mlx5_core_qp *qp;
131 struct mlx5_priv *priv;
132 struct mlx5_eqe *eqe;
133 u32 rsn;
134
135 switch (event_type) {
136 case MLX5_EVENT_TYPE_DCT_DRAINED:
137 eqe = data;
138 rsn = be32_to_cpu(eqe->data.dct.dctn) & 0xffffff;
139 rsn |= (MLX5_RES_DCT << MLX5_USER_INDEX_LEN);
140 break;
141 case MLX5_EVENT_TYPE_PATH_MIG:
142 case MLX5_EVENT_TYPE_COMM_EST:
143 case MLX5_EVENT_TYPE_SQ_DRAINED:
144 case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
145 case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
146 case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
147 case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
148 case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
149 eqe = data;
150 rsn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff;
151 rsn |= (eqe->data.qp_srq.type << MLX5_USER_INDEX_LEN);
152 break;
153 default:
154 return NOTIFY_DONE;
155 }
156
157 table = container_of(nb, struct mlx5_qp_table, nb);
158 priv = container_of(table, struct mlx5_priv, qp_table);
159 dev = container_of(priv, struct mlx5_core_dev, priv);
160
161 mlx5_core_dbg(dev, "event (%d) arrived on resource 0x%x\n", eqe->type, rsn);
162
163 common = mlx5_get_rsc(table, rsn);
164 if (!common) {
165 mlx5_core_dbg(dev, "Async event for unknown resource 0x%x\n", rsn);
166 return NOTIFY_OK;
167 }
168
169 if (!is_event_type_allowed((rsn >> MLX5_USER_INDEX_LEN), event_type)) {
170 mlx5_core_warn(dev, "event 0x%.2x is not allowed on resource 0x%.8x\n",
171 event_type, rsn);
172 goto out;
173 }
174
175 switch (common->res) {
176 case MLX5_RES_QP:
177 case MLX5_RES_RQ:
178 case MLX5_RES_SQ:
179 qp = (struct mlx5_core_qp *)common;
180 qp->event(qp, event_type);
181 break;
182 case MLX5_RES_DCT:
183 dct = (struct mlx5_core_dct *)common;
184 if (event_type == MLX5_EVENT_TYPE_DCT_DRAINED)
185 complete(&dct->drained);
186 break;
187 default:
188 mlx5_core_warn(dev, "invalid resource type for 0x%x\n", rsn);
189 }
190out:
191 mlx5_core_put_rsc(common);
192
193 return NOTIFY_OK;
194}
195
196static int create_resource_common(struct mlx5_core_dev *dev,
197 struct mlx5_core_qp *qp,
198 int rsc_type)
199{
200 struct mlx5_qp_table *table = &dev->priv.qp_table;
201 int err;
202
203 qp->common.res = rsc_type;
204 spin_lock_irq(&table->lock);
205 err = radix_tree_insert(&table->tree,
206 qp->qpn | (rsc_type << MLX5_USER_INDEX_LEN),
207 qp);
208 spin_unlock_irq(&table->lock);
209 if (err)
210 return err;
211
212 refcount_set(&qp->common.refcount, 1);
213 init_completion(&qp->common.free);
214 qp->pid = current->pid;
215
216 return 0;
217}
218
219static void destroy_resource_common(struct mlx5_core_dev *dev,
220 struct mlx5_core_qp *qp)
221{
222 struct mlx5_qp_table *table = &dev->priv.qp_table;
223 unsigned long flags;
224
225 spin_lock_irqsave(&table->lock, flags);
226 radix_tree_delete(&table->tree,
227 qp->qpn | (qp->common.res << MLX5_USER_INDEX_LEN));
228 spin_unlock_irqrestore(&table->lock, flags);
229 mlx5_core_put_rsc((struct mlx5_core_rsc_common *)qp);
230 wait_for_completion(&qp->common.free);
231}
232
233static int _mlx5_core_destroy_dct(struct mlx5_core_dev *dev,
234 struct mlx5_core_dct *dct, bool need_cleanup)
235{
236 u32 out[MLX5_ST_SZ_DW(destroy_dct_out)] = {0};
237 u32 in[MLX5_ST_SZ_DW(destroy_dct_in)] = {0};
238 struct mlx5_core_qp *qp = &dct->mqp;
239 int err;
240
241 err = mlx5_core_drain_dct(dev, dct);
242 if (err) {
243 if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
244 goto destroy;
245 } else {
246 mlx5_core_warn(
247 dev, "failed drain DCT 0x%x with error 0x%x\n",
248 qp->qpn, err);
249 return err;
250 }
251 }
252 wait_for_completion(&dct->drained);
253destroy:
254 if (need_cleanup)
255 destroy_resource_common(dev, &dct->mqp);
256 MLX5_SET(destroy_dct_in, in, opcode, MLX5_CMD_OP_DESTROY_DCT);
257 MLX5_SET(destroy_dct_in, in, dctn, qp->qpn);
258 MLX5_SET(destroy_dct_in, in, uid, qp->uid);
259 err = mlx5_cmd_exec(dev, (void *)&in, sizeof(in),
260 (void *)&out, sizeof(out));
261 return err;
262}
263
264int mlx5_core_create_dct(struct mlx5_core_dev *dev,
265 struct mlx5_core_dct *dct,
266 u32 *in, int inlen,
267 u32 *out, int outlen)
268{
269 struct mlx5_core_qp *qp = &dct->mqp;
270 int err;
271
272 init_completion(&dct->drained);
273 MLX5_SET(create_dct_in, in, opcode, MLX5_CMD_OP_CREATE_DCT);
274
275 err = mlx5_cmd_exec(dev, in, inlen, out, outlen);
276 if (err) {
277 mlx5_core_warn(dev, "create DCT failed, ret %d\n", err);
278 return err;
279 }
280
281 qp->qpn = MLX5_GET(create_dct_out, out, dctn);
282 qp->uid = MLX5_GET(create_dct_in, in, uid);
283 err = create_resource_common(dev, qp, MLX5_RES_DCT);
284 if (err)
285 goto err_cmd;
286
287 return 0;
288err_cmd:
289 _mlx5_core_destroy_dct(dev, dct, false);
290 return err;
291}
292EXPORT_SYMBOL_GPL(mlx5_core_create_dct);
293
294int mlx5_core_create_qp(struct mlx5_core_dev *dev,
295 struct mlx5_core_qp *qp,
296 u32 *in, int inlen)
297{
298 u32 out[MLX5_ST_SZ_DW(create_qp_out)] = {0};
299 u32 dout[MLX5_ST_SZ_DW(destroy_qp_out)];
300 u32 din[MLX5_ST_SZ_DW(destroy_qp_in)];
301 int err;
302
303 MLX5_SET(create_qp_in, in, opcode, MLX5_CMD_OP_CREATE_QP);
304
305 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
306 if (err)
307 return err;
308
309 qp->uid = MLX5_GET(create_qp_in, in, uid);
310 qp->qpn = MLX5_GET(create_qp_out, out, qpn);
311 mlx5_core_dbg(dev, "qpn = 0x%x\n", qp->qpn);
312
313 err = create_resource_common(dev, qp, MLX5_RES_QP);
314 if (err)
315 goto err_cmd;
316
317 err = mlx5_debug_qp_add(dev, qp);
318 if (err)
319 mlx5_core_dbg(dev, "failed adding QP 0x%x to debug file system\n",
320 qp->qpn);
321
322 atomic_inc(&dev->num_qps);
323
324 return 0;
325
326err_cmd:
327 memset(din, 0, sizeof(din));
328 memset(dout, 0, sizeof(dout));
329 MLX5_SET(destroy_qp_in, din, opcode, MLX5_CMD_OP_DESTROY_QP);
330 MLX5_SET(destroy_qp_in, din, qpn, qp->qpn);
331 MLX5_SET(destroy_qp_in, din, uid, qp->uid);
332 mlx5_cmd_exec(dev, din, sizeof(din), dout, sizeof(dout));
333 return err;
334}
335EXPORT_SYMBOL_GPL(mlx5_core_create_qp);
336
337static int mlx5_core_drain_dct(struct mlx5_core_dev *dev,
338 struct mlx5_core_dct *dct)
339{
340 u32 out[MLX5_ST_SZ_DW(drain_dct_out)] = {0};
341 u32 in[MLX5_ST_SZ_DW(drain_dct_in)] = {0};
342 struct mlx5_core_qp *qp = &dct->mqp;
343
344 MLX5_SET(drain_dct_in, in, opcode, MLX5_CMD_OP_DRAIN_DCT);
345 MLX5_SET(drain_dct_in, in, dctn, qp->qpn);
346 MLX5_SET(drain_dct_in, in, uid, qp->uid);
347 return mlx5_cmd_exec(dev, (void *)&in, sizeof(in),
348 (void *)&out, sizeof(out));
349}
350
351int mlx5_core_destroy_dct(struct mlx5_core_dev *dev,
352 struct mlx5_core_dct *dct)
353{
354 return _mlx5_core_destroy_dct(dev, dct, true);
355}
356EXPORT_SYMBOL_GPL(mlx5_core_destroy_dct);
357
358int mlx5_core_destroy_qp(struct mlx5_core_dev *dev,
359 struct mlx5_core_qp *qp)
360{
361 u32 out[MLX5_ST_SZ_DW(destroy_qp_out)] = {0};
362 u32 in[MLX5_ST_SZ_DW(destroy_qp_in)] = {0};
363 int err;
364
365 mlx5_debug_qp_remove(dev, qp);
366
367 destroy_resource_common(dev, qp);
368
369 MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP);
370 MLX5_SET(destroy_qp_in, in, qpn, qp->qpn);
371 MLX5_SET(destroy_qp_in, in, uid, qp->uid);
372 err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
373 if (err)
374 return err;
375
376 atomic_dec(&dev->num_qps);
377 return 0;
378}
379EXPORT_SYMBOL_GPL(mlx5_core_destroy_qp);
380
381int mlx5_core_set_delay_drop(struct mlx5_core_dev *dev,
382 u32 timeout_usec)
383{
384 u32 out[MLX5_ST_SZ_DW(set_delay_drop_params_out)] = {0};
385 u32 in[MLX5_ST_SZ_DW(set_delay_drop_params_in)] = {0};
386
387 MLX5_SET(set_delay_drop_params_in, in, opcode,
388 MLX5_CMD_OP_SET_DELAY_DROP_PARAMS);
389 MLX5_SET(set_delay_drop_params_in, in, delay_drop_timeout,
390 timeout_usec / 100);
391 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
392}
393EXPORT_SYMBOL_GPL(mlx5_core_set_delay_drop);
394
395struct mbox_info {
396 u32 *in;
397 u32 *out;
398 int inlen;
399 int outlen;
400};
401
402static int mbox_alloc(struct mbox_info *mbox, int inlen, int outlen)
403{
404 mbox->inlen = inlen;
405 mbox->outlen = outlen;
406 mbox->in = kzalloc(mbox->inlen, GFP_KERNEL);
407 mbox->out = kzalloc(mbox->outlen, GFP_KERNEL);
408 if (!mbox->in || !mbox->out) {
409 kfree(mbox->in);
410 kfree(mbox->out);
411 return -ENOMEM;
412 }
413
414 return 0;
415}
416
417static void mbox_free(struct mbox_info *mbox)
418{
419 kfree(mbox->in);
420 kfree(mbox->out);
421}
422
423static int modify_qp_mbox_alloc(struct mlx5_core_dev *dev, u16 opcode, int qpn,
424 u32 opt_param_mask, void *qpc,
425 struct mbox_info *mbox, u16 uid)
426{
427 mbox->out = NULL;
428 mbox->in = NULL;
429
430#define MBOX_ALLOC(mbox, typ) \
431 mbox_alloc(mbox, MLX5_ST_SZ_BYTES(typ##_in), MLX5_ST_SZ_BYTES(typ##_out))
432
433#define MOD_QP_IN_SET(typ, in, _opcode, _qpn, _uid) \
434 do { \
435 MLX5_SET(typ##_in, in, opcode, _opcode); \
436 MLX5_SET(typ##_in, in, qpn, _qpn); \
437 MLX5_SET(typ##_in, in, uid, _uid); \
438 } while (0)
439
440#define MOD_QP_IN_SET_QPC(typ, in, _opcode, _qpn, _opt_p, _qpc, _uid) \
441 do { \
442 MOD_QP_IN_SET(typ, in, _opcode, _qpn, _uid); \
443 MLX5_SET(typ##_in, in, opt_param_mask, _opt_p); \
444 memcpy(MLX5_ADDR_OF(typ##_in, in, qpc), _qpc, \
445 MLX5_ST_SZ_BYTES(qpc)); \
446 } while (0)
447
448 switch (opcode) {
449
450 case MLX5_CMD_OP_2RST_QP:
451 if (MBOX_ALLOC(mbox, qp_2rst))
452 return -ENOMEM;
453 MOD_QP_IN_SET(qp_2rst, mbox->in, opcode, qpn, uid);
454 break;
455 case MLX5_CMD_OP_2ERR_QP:
456 if (MBOX_ALLOC(mbox, qp_2err))
457 return -ENOMEM;
458 MOD_QP_IN_SET(qp_2err, mbox->in, opcode, qpn, uid);
459 break;
460
461
462 case MLX5_CMD_OP_RST2INIT_QP:
463 if (MBOX_ALLOC(mbox, rst2init_qp))
464 return -ENOMEM;
465 MOD_QP_IN_SET_QPC(rst2init_qp, mbox->in, opcode, qpn,
466 opt_param_mask, qpc, uid);
467 break;
468 case MLX5_CMD_OP_INIT2RTR_QP:
469 if (MBOX_ALLOC(mbox, init2rtr_qp))
470 return -ENOMEM;
471 MOD_QP_IN_SET_QPC(init2rtr_qp, mbox->in, opcode, qpn,
472 opt_param_mask, qpc, uid);
473 break;
474 case MLX5_CMD_OP_RTR2RTS_QP:
475 if (MBOX_ALLOC(mbox, rtr2rts_qp))
476 return -ENOMEM;
477 MOD_QP_IN_SET_QPC(rtr2rts_qp, mbox->in, opcode, qpn,
478 opt_param_mask, qpc, uid);
479 break;
480 case MLX5_CMD_OP_RTS2RTS_QP:
481 if (MBOX_ALLOC(mbox, rts2rts_qp))
482 return -ENOMEM;
483 MOD_QP_IN_SET_QPC(rts2rts_qp, mbox->in, opcode, qpn,
484 opt_param_mask, qpc, uid);
485 break;
486 case MLX5_CMD_OP_SQERR2RTS_QP:
487 if (MBOX_ALLOC(mbox, sqerr2rts_qp))
488 return -ENOMEM;
489 MOD_QP_IN_SET_QPC(sqerr2rts_qp, mbox->in, opcode, qpn,
490 opt_param_mask, qpc, uid);
491 break;
492 case MLX5_CMD_OP_INIT2INIT_QP:
493 if (MBOX_ALLOC(mbox, init2init_qp))
494 return -ENOMEM;
495 MOD_QP_IN_SET_QPC(init2init_qp, mbox->in, opcode, qpn,
496 opt_param_mask, qpc, uid);
497 break;
498 default:
499 mlx5_core_err(dev, "Unknown transition for modify QP: OP(0x%x) QPN(0x%x)\n",
500 opcode, qpn);
501 return -EINVAL;
502 }
503 return 0;
504}
505
506int mlx5_core_qp_modify(struct mlx5_core_dev *dev, u16 opcode,
507 u32 opt_param_mask, void *qpc,
508 struct mlx5_core_qp *qp)
509{
510 struct mbox_info mbox;
511 int err;
512
513 err = modify_qp_mbox_alloc(dev, opcode, qp->qpn,
514 opt_param_mask, qpc, &mbox, qp->uid);
515 if (err)
516 return err;
517
518 err = mlx5_cmd_exec(dev, mbox.in, mbox.inlen, mbox.out, mbox.outlen);
519 mbox_free(&mbox);
520 return err;
521}
522EXPORT_SYMBOL_GPL(mlx5_core_qp_modify);
523
524void mlx5_init_qp_table(struct mlx5_core_dev *dev)
525{
526 struct mlx5_qp_table *table = &dev->priv.qp_table;
527
528 memset(table, 0, sizeof(*table));
529 spin_lock_init(&table->lock);
530 INIT_RADIX_TREE(&table->tree, GFP_ATOMIC);
531 mlx5_qp_debugfs_init(dev);
532
533 table->nb.notifier_call = rsc_event_notifier;
534 mlx5_notifier_register(dev, &table->nb);
535}
536
537void mlx5_cleanup_qp_table(struct mlx5_core_dev *dev)
538{
539 struct mlx5_qp_table *table = &dev->priv.qp_table;
540
541 mlx5_notifier_unregister(dev, &table->nb);
542 mlx5_qp_debugfs_cleanup(dev);
543}
544
545int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
546 u32 *out, int outlen)
547{
548 u32 in[MLX5_ST_SZ_DW(query_qp_in)] = {0};
549
550 MLX5_SET(query_qp_in, in, opcode, MLX5_CMD_OP_QUERY_QP);
551 MLX5_SET(query_qp_in, in, qpn, qp->qpn);
552 return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
553}
554EXPORT_SYMBOL_GPL(mlx5_core_qp_query);
555
556int mlx5_core_dct_query(struct mlx5_core_dev *dev, struct mlx5_core_dct *dct,
557 u32 *out, int outlen)
558{
559 u32 in[MLX5_ST_SZ_DW(query_dct_in)] = {0};
560 struct mlx5_core_qp *qp = &dct->mqp;
561
562 MLX5_SET(query_dct_in, in, opcode, MLX5_CMD_OP_QUERY_DCT);
563 MLX5_SET(query_dct_in, in, dctn, qp->qpn);
564
565 return mlx5_cmd_exec(dev, (void *)&in, sizeof(in),
566 (void *)out, outlen);
567}
568EXPORT_SYMBOL_GPL(mlx5_core_dct_query);
569
570int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn)
571{
572 u32 out[MLX5_ST_SZ_DW(alloc_xrcd_out)] = {0};
573 u32 in[MLX5_ST_SZ_DW(alloc_xrcd_in)] = {0};
574 int err;
575
576 MLX5_SET(alloc_xrcd_in, in, opcode, MLX5_CMD_OP_ALLOC_XRCD);
577 err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
578 if (!err)
579 *xrcdn = MLX5_GET(alloc_xrcd_out, out, xrcd);
580 return err;
581}
582EXPORT_SYMBOL_GPL(mlx5_core_xrcd_alloc);
583
584int mlx5_core_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn)
585{
586 u32 out[MLX5_ST_SZ_DW(dealloc_xrcd_out)] = {0};
587 u32 in[MLX5_ST_SZ_DW(dealloc_xrcd_in)] = {0};
588
589 MLX5_SET(dealloc_xrcd_in, in, opcode, MLX5_CMD_OP_DEALLOC_XRCD);
590 MLX5_SET(dealloc_xrcd_in, in, xrcd, xrcdn);
591 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
592}
593EXPORT_SYMBOL_GPL(mlx5_core_xrcd_dealloc);
594
595static void destroy_rq_tracked(struct mlx5_core_dev *dev, u32 rqn, u16 uid)
596{
597 u32 in[MLX5_ST_SZ_DW(destroy_rq_in)] = {};
598 u32 out[MLX5_ST_SZ_DW(destroy_rq_out)] = {};
599
600 MLX5_SET(destroy_rq_in, in, opcode, MLX5_CMD_OP_DESTROY_RQ);
601 MLX5_SET(destroy_rq_in, in, rqn, rqn);
602 MLX5_SET(destroy_rq_in, in, uid, uid);
603 mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
604}
605
606int mlx5_core_create_rq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
607 struct mlx5_core_qp *rq)
608{
609 int err;
610 u32 rqn;
611
612 err = mlx5_core_create_rq(dev, in, inlen, &rqn);
613 if (err)
614 return err;
615
616 rq->uid = MLX5_GET(create_rq_in, in, uid);
617 rq->qpn = rqn;
618 err = create_resource_common(dev, rq, MLX5_RES_RQ);
619 if (err)
620 goto err_destroy_rq;
621
622 return 0;
623
624err_destroy_rq:
625 destroy_rq_tracked(dev, rq->qpn, rq->uid);
626
627 return err;
628}
629EXPORT_SYMBOL(mlx5_core_create_rq_tracked);
630
631void mlx5_core_destroy_rq_tracked(struct mlx5_core_dev *dev,
632 struct mlx5_core_qp *rq)
633{
634 destroy_resource_common(dev, rq);
635 destroy_rq_tracked(dev, rq->qpn, rq->uid);
636}
637EXPORT_SYMBOL(mlx5_core_destroy_rq_tracked);
638
639static void destroy_sq_tracked(struct mlx5_core_dev *dev, u32 sqn, u16 uid)
640{
641 u32 in[MLX5_ST_SZ_DW(destroy_sq_in)] = {};
642 u32 out[MLX5_ST_SZ_DW(destroy_sq_out)] = {};
643
644 MLX5_SET(destroy_sq_in, in, opcode, MLX5_CMD_OP_DESTROY_SQ);
645 MLX5_SET(destroy_sq_in, in, sqn, sqn);
646 MLX5_SET(destroy_sq_in, in, uid, uid);
647 mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
648}
649
650int mlx5_core_create_sq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
651 struct mlx5_core_qp *sq)
652{
653 int err;
654 u32 sqn;
655
656 err = mlx5_core_create_sq(dev, in, inlen, &sqn);
657 if (err)
658 return err;
659
660 sq->uid = MLX5_GET(create_sq_in, in, uid);
661 sq->qpn = sqn;
662 err = create_resource_common(dev, sq, MLX5_RES_SQ);
663 if (err)
664 goto err_destroy_sq;
665
666 return 0;
667
668err_destroy_sq:
669 destroy_sq_tracked(dev, sq->qpn, sq->uid);
670
671 return err;
672}
673EXPORT_SYMBOL(mlx5_core_create_sq_tracked);
674
675void mlx5_core_destroy_sq_tracked(struct mlx5_core_dev *dev,
676 struct mlx5_core_qp *sq)
677{
678 destroy_resource_common(dev, sq);
679 destroy_sq_tracked(dev, sq->qpn, sq->uid);
680}
681EXPORT_SYMBOL(mlx5_core_destroy_sq_tracked);
682
683int mlx5_core_alloc_q_counter(struct mlx5_core_dev *dev, u16 *counter_id)
684{
685 u32 in[MLX5_ST_SZ_DW(alloc_q_counter_in)] = {0};
686 u32 out[MLX5_ST_SZ_DW(alloc_q_counter_out)] = {0};
687 int err;
688
689 MLX5_SET(alloc_q_counter_in, in, opcode, MLX5_CMD_OP_ALLOC_Q_COUNTER);
690 err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
691 if (!err)
692 *counter_id = MLX5_GET(alloc_q_counter_out, out,
693 counter_set_id);
694 return err;
695}
696EXPORT_SYMBOL_GPL(mlx5_core_alloc_q_counter);
697
698int mlx5_core_dealloc_q_counter(struct mlx5_core_dev *dev, u16 counter_id)
699{
700 u32 in[MLX5_ST_SZ_DW(dealloc_q_counter_in)] = {0};
701 u32 out[MLX5_ST_SZ_DW(dealloc_q_counter_out)] = {0};
702
703 MLX5_SET(dealloc_q_counter_in, in, opcode,
704 MLX5_CMD_OP_DEALLOC_Q_COUNTER);
705 MLX5_SET(dealloc_q_counter_in, in, counter_set_id, counter_id);
706 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
707}
708EXPORT_SYMBOL_GPL(mlx5_core_dealloc_q_counter);
709
710int mlx5_core_query_q_counter(struct mlx5_core_dev *dev, u16 counter_id,
711 int reset, void *out, int out_size)
712{
713 u32 in[MLX5_ST_SZ_DW(query_q_counter_in)] = {0};
714
715 MLX5_SET(query_q_counter_in, in, opcode, MLX5_CMD_OP_QUERY_Q_COUNTER);
716 MLX5_SET(query_q_counter_in, in, clear, reset);
717 MLX5_SET(query_q_counter_in, in, counter_set_id, counter_id);
718 return mlx5_cmd_exec(dev, in, sizeof(in), out, out_size);
719}
720EXPORT_SYMBOL_GPL(mlx5_core_query_q_counter);
721
722struct mlx5_core_rsc_common *mlx5_core_res_hold(struct mlx5_core_dev *dev,
723 int res_num,
724 enum mlx5_res_type res_type)
725{
726 u32 rsn = res_num | (res_type << MLX5_USER_INDEX_LEN);
727 struct mlx5_qp_table *table = &dev->priv.qp_table;
728
729 return mlx5_get_rsc(table, rsn);
730}
731EXPORT_SYMBOL_GPL(mlx5_core_res_hold);
732
733void mlx5_core_res_put(struct mlx5_core_rsc_common *res)
734{
735 mlx5_core_put_rsc(res);
736}
737EXPORT_SYMBOL_GPL(mlx5_core_res_put);
738