1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34#include <linux/gfp.h>
35#include <linux/export.h>
36#include <linux/mlx5/cmd.h>
37#include <linux/mlx5/qp.h>
38#include <linux/mlx5/driver.h>
39#include <linux/mlx5/transobj.h>
40
41#include "mlx5_core.h"
42
43static struct mlx5_core_rsc_common *mlx5_get_rsc(struct mlx5_core_dev *dev,
44 u32 rsn)
45{
46 struct mlx5_qp_table *table = &dev->priv.qp_table;
47 struct mlx5_core_rsc_common *common;
48
49 spin_lock(&table->lock);
50
51 common = radix_tree_lookup(&table->tree, rsn);
52 if (common)
53 atomic_inc(&common->refcount);
54
55 spin_unlock(&table->lock);
56
57 if (!common) {
58 mlx5_core_warn(dev, "Async event for bogus resource 0x%x\n",
59 rsn);
60 return NULL;
61 }
62 return common;
63}
64
65void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common)
66{
67 if (atomic_dec_and_test(&common->refcount))
68 complete(&common->free);
69}
70
71static u64 qp_allowed_event_types(void)
72{
73 u64 mask;
74
75 mask = BIT(MLX5_EVENT_TYPE_PATH_MIG) |
76 BIT(MLX5_EVENT_TYPE_COMM_EST) |
77 BIT(MLX5_EVENT_TYPE_SQ_DRAINED) |
78 BIT(MLX5_EVENT_TYPE_SRQ_LAST_WQE) |
79 BIT(MLX5_EVENT_TYPE_WQ_CATAS_ERROR) |
80 BIT(MLX5_EVENT_TYPE_PATH_MIG_FAILED) |
81 BIT(MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR) |
82 BIT(MLX5_EVENT_TYPE_WQ_ACCESS_ERROR);
83
84 return mask;
85}
86
87static u64 rq_allowed_event_types(void)
88{
89 u64 mask;
90
91 mask = BIT(MLX5_EVENT_TYPE_SRQ_LAST_WQE) |
92 BIT(MLX5_EVENT_TYPE_WQ_CATAS_ERROR);
93
94 return mask;
95}
96
97static u64 sq_allowed_event_types(void)
98{
99 return BIT(MLX5_EVENT_TYPE_WQ_CATAS_ERROR);
100}
101
102static bool is_event_type_allowed(int rsc_type, int event_type)
103{
104 switch (rsc_type) {
105 case MLX5_EVENT_QUEUE_TYPE_QP:
106 return BIT(event_type) & qp_allowed_event_types();
107 case MLX5_EVENT_QUEUE_TYPE_RQ:
108 return BIT(event_type) & rq_allowed_event_types();
109 case MLX5_EVENT_QUEUE_TYPE_SQ:
110 return BIT(event_type) & sq_allowed_event_types();
111 default:
112 WARN(1, "Event arrived for unknown resource type");
113 return false;
114 }
115}
116
117void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type)
118{
119 struct mlx5_core_rsc_common *common = mlx5_get_rsc(dev, rsn);
120 struct mlx5_core_qp *qp;
121
122 if (!common)
123 return;
124
125 if (!is_event_type_allowed((rsn >> MLX5_USER_INDEX_LEN), event_type)) {
126 mlx5_core_warn(dev, "event 0x%.2x is not allowed on resource 0x%.8x\n",
127 event_type, rsn);
128 return;
129 }
130
131 switch (common->res) {
132 case MLX5_RES_QP:
133 case MLX5_RES_RQ:
134 case MLX5_RES_SQ:
135 qp = (struct mlx5_core_qp *)common;
136 qp->event(qp, event_type);
137 break;
138
139 default:
140 mlx5_core_warn(dev, "invalid resource type for 0x%x\n", rsn);
141 }
142
143 mlx5_core_put_rsc(common);
144}
145
146#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
147void mlx5_eq_pagefault(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe)
148{
149 struct mlx5_eqe_page_fault *pf_eqe = &eqe->data.page_fault;
150 int qpn = be32_to_cpu(pf_eqe->flags_qpn) & MLX5_QPN_MASK;
151 struct mlx5_core_rsc_common *common = mlx5_get_rsc(dev, qpn);
152 struct mlx5_core_qp *qp =
153 container_of(common, struct mlx5_core_qp, common);
154 struct mlx5_pagefault pfault;
155
156 if (!qp) {
157 mlx5_core_warn(dev, "ODP event for non-existent QP %06x\n",
158 qpn);
159 return;
160 }
161
162 pfault.event_subtype = eqe->sub_type;
163 pfault.flags = (be32_to_cpu(pf_eqe->flags_qpn) >> MLX5_QPN_BITS) &
164 (MLX5_PFAULT_REQUESTOR | MLX5_PFAULT_WRITE | MLX5_PFAULT_RDMA);
165 pfault.bytes_committed = be32_to_cpu(
166 pf_eqe->bytes_committed);
167
168 mlx5_core_dbg(dev,
169 "PAGE_FAULT: subtype: 0x%02x, flags: 0x%02x,\n",
170 eqe->sub_type, pfault.flags);
171
172 switch (eqe->sub_type) {
173 case MLX5_PFAULT_SUBTYPE_RDMA:
174
175 pfault.rdma.r_key =
176 be32_to_cpu(pf_eqe->rdma.r_key);
177 pfault.rdma.packet_size =
178 be16_to_cpu(pf_eqe->rdma.packet_length);
179 pfault.rdma.rdma_op_len =
180 be32_to_cpu(pf_eqe->rdma.rdma_op_len);
181 pfault.rdma.rdma_va =
182 be64_to_cpu(pf_eqe->rdma.rdma_va);
183 mlx5_core_dbg(dev,
184 "PAGE_FAULT: qpn: 0x%06x, r_key: 0x%08x,\n",
185 qpn, pfault.rdma.r_key);
186 mlx5_core_dbg(dev,
187 "PAGE_FAULT: rdma_op_len: 0x%08x,\n",
188 pfault.rdma.rdma_op_len);
189 mlx5_core_dbg(dev,
190 "PAGE_FAULT: rdma_va: 0x%016llx,\n",
191 pfault.rdma.rdma_va);
192 mlx5_core_dbg(dev,
193 "PAGE_FAULT: bytes_committed: 0x%06x\n",
194 pfault.bytes_committed);
195 break;
196
197 case MLX5_PFAULT_SUBTYPE_WQE:
198
199 pfault.wqe.wqe_index =
200 be16_to_cpu(pf_eqe->wqe.wqe_index);
201 pfault.wqe.packet_size =
202 be16_to_cpu(pf_eqe->wqe.packet_length);
203 mlx5_core_dbg(dev,
204 "PAGE_FAULT: qpn: 0x%06x, wqe_index: 0x%04x,\n",
205 qpn, pfault.wqe.wqe_index);
206 mlx5_core_dbg(dev,
207 "PAGE_FAULT: bytes_committed: 0x%06x\n",
208 pfault.bytes_committed);
209 break;
210
211 default:
212 mlx5_core_warn(dev,
213 "Unsupported page fault event sub-type: 0x%02hhx, QP %06x\n",
214 eqe->sub_type, qpn);
215
216
217
218 }
219
220 if (qp->pfault_handler) {
221 qp->pfault_handler(qp, &pfault);
222 } else {
223 mlx5_core_err(dev,
224 "ODP event for QP %08x, without a fault handler in QP\n",
225 qpn);
226
227
228
229 }
230
231 mlx5_core_put_rsc(common);
232}
233#endif
234
235static int create_qprqsq_common(struct mlx5_core_dev *dev,
236 struct mlx5_core_qp *qp,
237 int rsc_type)
238{
239 struct mlx5_qp_table *table = &dev->priv.qp_table;
240 int err;
241
242 qp->common.res = rsc_type;
243 spin_lock_irq(&table->lock);
244 err = radix_tree_insert(&table->tree,
245 qp->qpn | (rsc_type << MLX5_USER_INDEX_LEN),
246 qp);
247 spin_unlock_irq(&table->lock);
248 if (err)
249 return err;
250
251 atomic_set(&qp->common.refcount, 1);
252 init_completion(&qp->common.free);
253 qp->pid = current->pid;
254
255 return 0;
256}
257
258static void destroy_qprqsq_common(struct mlx5_core_dev *dev,
259 struct mlx5_core_qp *qp)
260{
261 struct mlx5_qp_table *table = &dev->priv.qp_table;
262 unsigned long flags;
263
264 spin_lock_irqsave(&table->lock, flags);
265 radix_tree_delete(&table->tree,
266 qp->qpn | (qp->common.res << MLX5_USER_INDEX_LEN));
267 spin_unlock_irqrestore(&table->lock, flags);
268 mlx5_core_put_rsc((struct mlx5_core_rsc_common *)qp);
269 wait_for_completion(&qp->common.free);
270}
271
272int mlx5_core_create_qp(struct mlx5_core_dev *dev,
273 struct mlx5_core_qp *qp,
274 u32 *in, int inlen)
275{
276 u32 out[MLX5_ST_SZ_DW(create_qp_out)] = {0};
277 u32 dout[MLX5_ST_SZ_DW(destroy_qp_out)];
278 u32 din[MLX5_ST_SZ_DW(destroy_qp_in)];
279 int err;
280
281 MLX5_SET(create_qp_in, in, opcode, MLX5_CMD_OP_CREATE_QP);
282
283 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
284 if (err)
285 return err;
286
287 qp->qpn = MLX5_GET(create_qp_out, out, qpn);
288 mlx5_core_dbg(dev, "qpn = 0x%x\n", qp->qpn);
289
290 err = create_qprqsq_common(dev, qp, MLX5_RES_QP);
291 if (err)
292 goto err_cmd;
293
294 err = mlx5_debug_qp_add(dev, qp);
295 if (err)
296 mlx5_core_dbg(dev, "failed adding QP 0x%x to debug file system\n",
297 qp->qpn);
298
299 atomic_inc(&dev->num_qps);
300
301 return 0;
302
303err_cmd:
304 memset(din, 0, sizeof(din));
305 memset(dout, 0, sizeof(dout));
306 MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP);
307 MLX5_SET(destroy_qp_in, in, qpn, qp->qpn);
308 mlx5_cmd_exec(dev, din, sizeof(din), dout, sizeof(dout));
309 return err;
310}
311EXPORT_SYMBOL_GPL(mlx5_core_create_qp);
312
313int mlx5_core_destroy_qp(struct mlx5_core_dev *dev,
314 struct mlx5_core_qp *qp)
315{
316 u32 out[MLX5_ST_SZ_DW(destroy_qp_out)] = {0};
317 u32 in[MLX5_ST_SZ_DW(destroy_qp_in)] = {0};
318 int err;
319
320 mlx5_debug_qp_remove(dev, qp);
321
322 destroy_qprqsq_common(dev, qp);
323
324 MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP);
325 MLX5_SET(destroy_qp_in, in, qpn, qp->qpn);
326 err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
327 if (err)
328 return err;
329
330 atomic_dec(&dev->num_qps);
331 return 0;
332}
333EXPORT_SYMBOL_GPL(mlx5_core_destroy_qp);
334
335struct mbox_info {
336 u32 *in;
337 u32 *out;
338 int inlen;
339 int outlen;
340};
341
342static int mbox_alloc(struct mbox_info *mbox, int inlen, int outlen)
343{
344 mbox->inlen = inlen;
345 mbox->outlen = outlen;
346 mbox->in = kzalloc(mbox->inlen, GFP_KERNEL);
347 mbox->out = kzalloc(mbox->outlen, GFP_KERNEL);
348 if (!mbox->in || !mbox->out) {
349 kfree(mbox->in);
350 kfree(mbox->out);
351 return -ENOMEM;
352 }
353
354 return 0;
355}
356
357static void mbox_free(struct mbox_info *mbox)
358{
359 kfree(mbox->in);
360 kfree(mbox->out);
361}
362
363static int modify_qp_mbox_alloc(struct mlx5_core_dev *dev, u16 opcode, int qpn,
364 u32 opt_param_mask, void *qpc,
365 struct mbox_info *mbox)
366{
367 mbox->out = NULL;
368 mbox->in = NULL;
369
370#define MBOX_ALLOC(mbox, typ) \
371 mbox_alloc(mbox, MLX5_ST_SZ_BYTES(typ##_in), MLX5_ST_SZ_BYTES(typ##_out))
372
373#define MOD_QP_IN_SET(typ, in, _opcode, _qpn) \
374 MLX5_SET(typ##_in, in, opcode, _opcode); \
375 MLX5_SET(typ##_in, in, qpn, _qpn)
376
377#define MOD_QP_IN_SET_QPC(typ, in, _opcode, _qpn, _opt_p, _qpc) \
378 MOD_QP_IN_SET(typ, in, _opcode, _qpn); \
379 MLX5_SET(typ##_in, in, opt_param_mask, _opt_p); \
380 memcpy(MLX5_ADDR_OF(typ##_in, in, qpc), _qpc, MLX5_ST_SZ_BYTES(qpc))
381
382 switch (opcode) {
383
384 case MLX5_CMD_OP_2RST_QP:
385 if (MBOX_ALLOC(mbox, qp_2rst))
386 return -ENOMEM;
387 MOD_QP_IN_SET(qp_2rst, mbox->in, opcode, qpn);
388 break;
389 case MLX5_CMD_OP_2ERR_QP:
390 if (MBOX_ALLOC(mbox, qp_2err))
391 return -ENOMEM;
392 MOD_QP_IN_SET(qp_2err, mbox->in, opcode, qpn);
393 break;
394
395
396 case MLX5_CMD_OP_RST2INIT_QP:
397 if (MBOX_ALLOC(mbox, rst2init_qp))
398 return -ENOMEM;
399 MOD_QP_IN_SET_QPC(rst2init_qp, mbox->in, opcode, qpn,
400 opt_param_mask, qpc);
401 break;
402 case MLX5_CMD_OP_INIT2RTR_QP:
403 if (MBOX_ALLOC(mbox, init2rtr_qp))
404 return -ENOMEM;
405 MOD_QP_IN_SET_QPC(init2rtr_qp, mbox->in, opcode, qpn,
406 opt_param_mask, qpc);
407 break;
408 case MLX5_CMD_OP_RTR2RTS_QP:
409 if (MBOX_ALLOC(mbox, rtr2rts_qp))
410 return -ENOMEM;
411 MOD_QP_IN_SET_QPC(rtr2rts_qp, mbox->in, opcode, qpn,
412 opt_param_mask, qpc);
413 break;
414 case MLX5_CMD_OP_RTS2RTS_QP:
415 if (MBOX_ALLOC(mbox, rts2rts_qp))
416 return -ENOMEM;
417 MOD_QP_IN_SET_QPC(rts2rts_qp, mbox->in, opcode, qpn,
418 opt_param_mask, qpc);
419 break;
420 case MLX5_CMD_OP_SQERR2RTS_QP:
421 if (MBOX_ALLOC(mbox, sqerr2rts_qp))
422 return -ENOMEM;
423 MOD_QP_IN_SET_QPC(sqerr2rts_qp, mbox->in, opcode, qpn,
424 opt_param_mask, qpc);
425 break;
426 case MLX5_CMD_OP_INIT2INIT_QP:
427 if (MBOX_ALLOC(mbox, init2init_qp))
428 return -ENOMEM;
429 MOD_QP_IN_SET_QPC(init2init_qp, mbox->in, opcode, qpn,
430 opt_param_mask, qpc);
431 break;
432 default:
433 mlx5_core_err(dev, "Unknown transition for modify QP: OP(0x%x) QPN(0x%x)\n",
434 opcode, qpn);
435 return -EINVAL;
436 }
437 return 0;
438}
439
440int mlx5_core_qp_modify(struct mlx5_core_dev *dev, u16 opcode,
441 u32 opt_param_mask, void *qpc,
442 struct mlx5_core_qp *qp)
443{
444 struct mbox_info mbox;
445 int err;
446
447 err = modify_qp_mbox_alloc(dev, opcode, qp->qpn,
448 opt_param_mask, qpc, &mbox);
449 if (err)
450 return err;
451
452 err = mlx5_cmd_exec(dev, mbox.in, mbox.inlen, mbox.out, mbox.outlen);
453 mbox_free(&mbox);
454 return err;
455}
456EXPORT_SYMBOL_GPL(mlx5_core_qp_modify);
457
458void mlx5_init_qp_table(struct mlx5_core_dev *dev)
459{
460 struct mlx5_qp_table *table = &dev->priv.qp_table;
461
462 memset(table, 0, sizeof(*table));
463 spin_lock_init(&table->lock);
464 INIT_RADIX_TREE(&table->tree, GFP_ATOMIC);
465 mlx5_qp_debugfs_init(dev);
466}
467
468void mlx5_cleanup_qp_table(struct mlx5_core_dev *dev)
469{
470 mlx5_qp_debugfs_cleanup(dev);
471}
472
473int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
474 u32 *out, int outlen)
475{
476 u32 in[MLX5_ST_SZ_DW(query_qp_in)] = {0};
477
478 MLX5_SET(query_qp_in, in, opcode, MLX5_CMD_OP_QUERY_QP);
479 MLX5_SET(query_qp_in, in, qpn, qp->qpn);
480 return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
481}
482EXPORT_SYMBOL_GPL(mlx5_core_qp_query);
483
484int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn)
485{
486 u32 out[MLX5_ST_SZ_DW(alloc_xrcd_out)] = {0};
487 u32 in[MLX5_ST_SZ_DW(alloc_xrcd_in)] = {0};
488 int err;
489
490 MLX5_SET(alloc_xrcd_in, in, opcode, MLX5_CMD_OP_ALLOC_XRCD);
491 err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
492 if (!err)
493 *xrcdn = MLX5_GET(alloc_xrcd_out, out, xrcd);
494 return err;
495}
496EXPORT_SYMBOL_GPL(mlx5_core_xrcd_alloc);
497
498int mlx5_core_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn)
499{
500 u32 out[MLX5_ST_SZ_DW(dealloc_xrcd_out)] = {0};
501 u32 in[MLX5_ST_SZ_DW(dealloc_xrcd_in)] = {0};
502
503 MLX5_SET(dealloc_xrcd_in, in, opcode, MLX5_CMD_OP_DEALLOC_XRCD);
504 MLX5_SET(dealloc_xrcd_in, in, xrcd, xrcdn);
505 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
506}
507EXPORT_SYMBOL_GPL(mlx5_core_xrcd_dealloc);
508
509#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
510int mlx5_core_page_fault_resume(struct mlx5_core_dev *dev, u32 qpn,
511 u8 flags, int error)
512{
513 u32 out[MLX5_ST_SZ_DW(page_fault_resume_out)] = {0};
514 u32 in[MLX5_ST_SZ_DW(page_fault_resume_in)] = {0};
515
516 MLX5_SET(page_fault_resume_in, in, opcode,
517 MLX5_CMD_OP_PAGE_FAULT_RESUME);
518 MLX5_SET(page_fault_resume_in, in, qpn, qpn);
519
520 if (flags & MLX5_PAGE_FAULT_RESUME_REQUESTOR)
521 MLX5_SET(page_fault_resume_in, in, req_res, 1);
522 if (flags & MLX5_PAGE_FAULT_RESUME_WRITE)
523 MLX5_SET(page_fault_resume_in, in, read_write, 1);
524 if (flags & MLX5_PAGE_FAULT_RESUME_RDMA)
525 MLX5_SET(page_fault_resume_in, in, rdma, 1);
526 if (error)
527 MLX5_SET(page_fault_resume_in, in, error, 1);
528
529 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
530}
531EXPORT_SYMBOL_GPL(mlx5_core_page_fault_resume);
532#endif
533
534int mlx5_core_create_rq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
535 struct mlx5_core_qp *rq)
536{
537 int err;
538 u32 rqn;
539
540 err = mlx5_core_create_rq(dev, in, inlen, &rqn);
541 if (err)
542 return err;
543
544 rq->qpn = rqn;
545 err = create_qprqsq_common(dev, rq, MLX5_RES_RQ);
546 if (err)
547 goto err_destroy_rq;
548
549 return 0;
550
551err_destroy_rq:
552 mlx5_core_destroy_rq(dev, rq->qpn);
553
554 return err;
555}
556EXPORT_SYMBOL(mlx5_core_create_rq_tracked);
557
558void mlx5_core_destroy_rq_tracked(struct mlx5_core_dev *dev,
559 struct mlx5_core_qp *rq)
560{
561 destroy_qprqsq_common(dev, rq);
562 mlx5_core_destroy_rq(dev, rq->qpn);
563}
564EXPORT_SYMBOL(mlx5_core_destroy_rq_tracked);
565
566int mlx5_core_create_sq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
567 struct mlx5_core_qp *sq)
568{
569 int err;
570 u32 sqn;
571
572 err = mlx5_core_create_sq(dev, in, inlen, &sqn);
573 if (err)
574 return err;
575
576 sq->qpn = sqn;
577 err = create_qprqsq_common(dev, sq, MLX5_RES_SQ);
578 if (err)
579 goto err_destroy_sq;
580
581 return 0;
582
583err_destroy_sq:
584 mlx5_core_destroy_sq(dev, sq->qpn);
585
586 return err;
587}
588EXPORT_SYMBOL(mlx5_core_create_sq_tracked);
589
590void mlx5_core_destroy_sq_tracked(struct mlx5_core_dev *dev,
591 struct mlx5_core_qp *sq)
592{
593 destroy_qprqsq_common(dev, sq);
594 mlx5_core_destroy_sq(dev, sq->qpn);
595}
596EXPORT_SYMBOL(mlx5_core_destroy_sq_tracked);
597
598int mlx5_core_alloc_q_counter(struct mlx5_core_dev *dev, u16 *counter_id)
599{
600 u32 in[MLX5_ST_SZ_DW(alloc_q_counter_in)] = {0};
601 u32 out[MLX5_ST_SZ_DW(alloc_q_counter_out)] = {0};
602 int err;
603
604 MLX5_SET(alloc_q_counter_in, in, opcode, MLX5_CMD_OP_ALLOC_Q_COUNTER);
605 err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
606 if (!err)
607 *counter_id = MLX5_GET(alloc_q_counter_out, out,
608 counter_set_id);
609 return err;
610}
611EXPORT_SYMBOL_GPL(mlx5_core_alloc_q_counter);
612
613int mlx5_core_dealloc_q_counter(struct mlx5_core_dev *dev, u16 counter_id)
614{
615 u32 in[MLX5_ST_SZ_DW(dealloc_q_counter_in)] = {0};
616 u32 out[MLX5_ST_SZ_DW(dealloc_q_counter_out)] = {0};
617
618 MLX5_SET(dealloc_q_counter_in, in, opcode,
619 MLX5_CMD_OP_DEALLOC_Q_COUNTER);
620 MLX5_SET(dealloc_q_counter_in, in, counter_set_id, counter_id);
621 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
622}
623EXPORT_SYMBOL_GPL(mlx5_core_dealloc_q_counter);
624
625int mlx5_core_query_q_counter(struct mlx5_core_dev *dev, u16 counter_id,
626 int reset, void *out, int out_size)
627{
628 u32 in[MLX5_ST_SZ_DW(query_q_counter_in)] = {0};
629
630 MLX5_SET(query_q_counter_in, in, opcode, MLX5_CMD_OP_QUERY_Q_COUNTER);
631 MLX5_SET(query_q_counter_in, in, clear, reset);
632 MLX5_SET(query_q_counter_in, in, counter_set_id, counter_id);
633 return mlx5_cmd_exec(dev, in, sizeof(in), out, out_size);
634}
635EXPORT_SYMBOL_GPL(mlx5_core_query_q_counter);
636
637int mlx5_core_query_out_of_buffer(struct mlx5_core_dev *dev, u16 counter_id,
638 u32 *out_of_buffer)
639{
640 int outlen = MLX5_ST_SZ_BYTES(query_q_counter_out);
641 void *out;
642 int err;
643
644 out = mlx5_vzalloc(outlen);
645 if (!out)
646 return -ENOMEM;
647
648 err = mlx5_core_query_q_counter(dev, counter_id, 0, out, outlen);
649 if (!err)
650 *out_of_buffer = MLX5_GET(query_q_counter_out, out,
651 out_of_buffer);
652
653 kfree(out);
654 return err;
655}
656