1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#include <linux/interrupt.h>
34#include <linux/notifier.h>
35#include <linux/module.h>
36#include <linux/mlx5/driver.h>
37#include <linux/mlx5/vport.h>
38#include <linux/mlx5/eq.h>
39#ifdef CONFIG_RFS_ACCEL
40#include <linux/cpu_rmap.h>
41#endif
42#include "mlx5_core.h"
43#include "lib/eq.h"
44#include "fpga/core.h"
45#include "eswitch.h"
46#include "lib/clock.h"
47#include "diag/fw_tracer.h"
48
49enum {
50 MLX5_EQE_OWNER_INIT_VAL = 0x1,
51};
52
53enum {
54 MLX5_EQ_STATE_ARMED = 0x9,
55 MLX5_EQ_STATE_FIRED = 0xa,
56 MLX5_EQ_STATE_ALWAYS_ARMED = 0xb,
57};
58
59enum {
60 MLX5_EQ_DOORBEL_OFFSET = 0x40,
61};
62
63
64
65
66
67enum {
68 MLX5_EQ_POLLING_BUDGET = 128,
69};
70
71static_assert(MLX5_EQ_POLLING_BUDGET <= MLX5_NUM_SPARE_EQE);
72
73struct mlx5_eq_table {
74 struct list_head comp_eqs_list;
75 struct mlx5_eq_async pages_eq;
76 struct mlx5_eq_async cmd_eq;
77 struct mlx5_eq_async async_eq;
78
79 struct atomic_notifier_head nh[MLX5_EVENT_TYPE_MAX];
80
81
82 struct mlx5_nb cq_err_nb;
83
84 struct mutex lock;
85 int num_comp_eqs;
86 struct mlx5_irq_table *irq_table;
87};
88
89#define MLX5_ASYNC_EVENT_MASK ((1ull << MLX5_EVENT_TYPE_PATH_MIG) | \
90 (1ull << MLX5_EVENT_TYPE_COMM_EST) | \
91 (1ull << MLX5_EVENT_TYPE_SQ_DRAINED) | \
92 (1ull << MLX5_EVENT_TYPE_CQ_ERROR) | \
93 (1ull << MLX5_EVENT_TYPE_WQ_CATAS_ERROR) | \
94 (1ull << MLX5_EVENT_TYPE_PATH_MIG_FAILED) | \
95 (1ull << MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \
96 (1ull << MLX5_EVENT_TYPE_WQ_ACCESS_ERROR) | \
97 (1ull << MLX5_EVENT_TYPE_PORT_CHANGE) | \
98 (1ull << MLX5_EVENT_TYPE_SRQ_CATAS_ERROR) | \
99 (1ull << MLX5_EVENT_TYPE_SRQ_LAST_WQE) | \
100 (1ull << MLX5_EVENT_TYPE_SRQ_RQ_LIMIT))
101
102static int mlx5_cmd_destroy_eq(struct mlx5_core_dev *dev, u8 eqn)
103{
104 u32 in[MLX5_ST_SZ_DW(destroy_eq_in)] = {};
105
106 MLX5_SET(destroy_eq_in, in, opcode, MLX5_CMD_OP_DESTROY_EQ);
107 MLX5_SET(destroy_eq_in, in, eq_number, eqn);
108 return mlx5_cmd_exec_in(dev, destroy_eq, in);
109}
110
111
112static struct mlx5_core_cq *mlx5_eq_cq_get(struct mlx5_eq *eq, u32 cqn)
113{
114 struct mlx5_cq_table *table = &eq->cq_table;
115 struct mlx5_core_cq *cq = NULL;
116
117 rcu_read_lock();
118 cq = radix_tree_lookup(&table->tree, cqn);
119 if (likely(cq))
120 mlx5_cq_hold(cq);
121 rcu_read_unlock();
122
123 return cq;
124}
125
126static int mlx5_eq_comp_int(struct notifier_block *nb,
127 __always_unused unsigned long action,
128 __always_unused void *data)
129{
130 struct mlx5_eq_comp *eq_comp =
131 container_of(nb, struct mlx5_eq_comp, irq_nb);
132 struct mlx5_eq *eq = &eq_comp->core;
133 struct mlx5_eqe *eqe;
134 int num_eqes = 0;
135 u32 cqn = -1;
136
137 eqe = next_eqe_sw(eq);
138 if (!eqe)
139 goto out;
140
141 do {
142 struct mlx5_core_cq *cq;
143
144
145
146
147 dma_rmb();
148
149 cqn = be32_to_cpu(eqe->data.comp.cqn) & 0xffffff;
150
151 cq = mlx5_eq_cq_get(eq, cqn);
152 if (likely(cq)) {
153 ++cq->arm_sn;
154 cq->comp(cq, eqe);
155 mlx5_cq_put(cq);
156 } else {
157 dev_dbg_ratelimited(eq->dev->device,
158 "Completion event for bogus CQ 0x%x\n", cqn);
159 }
160
161 ++eq->cons_index;
162
163 } while ((++num_eqes < MLX5_EQ_POLLING_BUDGET) && (eqe = next_eqe_sw(eq)));
164
165out:
166 eq_update_ci(eq, 1);
167
168 if (cqn != -1)
169 tasklet_schedule(&eq_comp->tasklet_ctx.task);
170
171 return 0;
172}
173
174
175
176
177
178
179u32 mlx5_eq_poll_irq_disabled(struct mlx5_eq_comp *eq)
180{
181 u32 count_eqe;
182
183 disable_irq(eq->core.irqn);
184 count_eqe = eq->core.cons_index;
185 mlx5_eq_comp_int(&eq->irq_nb, 0, NULL);
186 count_eqe = eq->core.cons_index - count_eqe;
187 enable_irq(eq->core.irqn);
188
189 return count_eqe;
190}
191
192static void mlx5_eq_async_int_lock(struct mlx5_eq_async *eq, bool recovery,
193 unsigned long *flags)
194 __acquires(&eq->lock)
195{
196 if (!recovery)
197 spin_lock(&eq->lock);
198 else
199 spin_lock_irqsave(&eq->lock, *flags);
200}
201
202static void mlx5_eq_async_int_unlock(struct mlx5_eq_async *eq, bool recovery,
203 unsigned long *flags)
204 __releases(&eq->lock)
205{
206 if (!recovery)
207 spin_unlock(&eq->lock);
208 else
209 spin_unlock_irqrestore(&eq->lock, *flags);
210}
211
212enum async_eq_nb_action {
213 ASYNC_EQ_IRQ_HANDLER = 0,
214 ASYNC_EQ_RECOVER = 1,
215};
216
217static int mlx5_eq_async_int(struct notifier_block *nb,
218 unsigned long action, void *data)
219{
220 struct mlx5_eq_async *eq_async =
221 container_of(nb, struct mlx5_eq_async, irq_nb);
222 struct mlx5_eq *eq = &eq_async->core;
223 struct mlx5_eq_table *eqt;
224 struct mlx5_core_dev *dev;
225 struct mlx5_eqe *eqe;
226 unsigned long flags = 0;
227 int num_eqes = 0;
228 bool recovery;
229
230 dev = eq->dev;
231 eqt = dev->priv.eq_table;
232
233 recovery = action == ASYNC_EQ_RECOVER;
234 mlx5_eq_async_int_lock(eq_async, recovery, &flags);
235
236 eqe = next_eqe_sw(eq);
237 if (!eqe)
238 goto out;
239
240 do {
241
242
243
244
245 dma_rmb();
246
247 atomic_notifier_call_chain(&eqt->nh[eqe->type], eqe->type, eqe);
248 atomic_notifier_call_chain(&eqt->nh[MLX5_EVENT_TYPE_NOTIFY_ANY], eqe->type, eqe);
249
250 ++eq->cons_index;
251
252 } while ((++num_eqes < MLX5_EQ_POLLING_BUDGET) && (eqe = next_eqe_sw(eq)));
253
254out:
255 eq_update_ci(eq, 1);
256 mlx5_eq_async_int_unlock(eq_async, recovery, &flags);
257
258 return unlikely(recovery) ? num_eqes : 0;
259}
260
261void mlx5_cmd_eq_recover(struct mlx5_core_dev *dev)
262{
263 struct mlx5_eq_async *eq = &dev->priv.eq_table->cmd_eq;
264 int eqes;
265
266 eqes = mlx5_eq_async_int(&eq->irq_nb, ASYNC_EQ_RECOVER, NULL);
267 if (eqes)
268 mlx5_core_warn(dev, "Recovered %d EQEs on cmd_eq\n", eqes);
269}
270
271static void init_eq_buf(struct mlx5_eq *eq)
272{
273 struct mlx5_eqe *eqe;
274 int i;
275
276 for (i = 0; i < eq->nent; i++) {
277 eqe = get_eqe(eq, i);
278 eqe->owner = MLX5_EQE_OWNER_INIT_VAL;
279 }
280}
281
282static int
283create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
284 struct mlx5_eq_param *param)
285{
286 struct mlx5_cq_table *cq_table = &eq->cq_table;
287 u32 out[MLX5_ST_SZ_DW(create_eq_out)] = {0};
288 struct mlx5_priv *priv = &dev->priv;
289 u8 vecidx = param->irq_index;
290 __be64 *pas;
291 void *eqc;
292 int inlen;
293 u32 *in;
294 int err;
295 int i;
296
297
298 memset(cq_table, 0, sizeof(*cq_table));
299 spin_lock_init(&cq_table->lock);
300 INIT_RADIX_TREE(&cq_table->tree, GFP_ATOMIC);
301
302 eq->nent = roundup_pow_of_two(param->nent + MLX5_NUM_SPARE_EQE);
303 eq->cons_index = 0;
304 err = mlx5_buf_alloc(dev, eq->nent * MLX5_EQE_SIZE, &eq->buf);
305 if (err)
306 return err;
307
308 init_eq_buf(eq);
309
310 inlen = MLX5_ST_SZ_BYTES(create_eq_in) +
311 MLX5_FLD_SZ_BYTES(create_eq_in, pas[0]) * eq->buf.npages;
312
313 in = kvzalloc(inlen, GFP_KERNEL);
314 if (!in) {
315 err = -ENOMEM;
316 goto err_buf;
317 }
318
319 pas = (__be64 *)MLX5_ADDR_OF(create_eq_in, in, pas);
320 mlx5_fill_page_array(&eq->buf, pas);
321
322 MLX5_SET(create_eq_in, in, opcode, MLX5_CMD_OP_CREATE_EQ);
323 if (!param->mask[0] && MLX5_CAP_GEN(dev, log_max_uctx))
324 MLX5_SET(create_eq_in, in, uid, MLX5_SHARED_RESOURCE_UID);
325
326 for (i = 0; i < 4; i++)
327 MLX5_ARRAY_SET64(create_eq_in, in, event_bitmask, i,
328 param->mask[i]);
329
330 eqc = MLX5_ADDR_OF(create_eq_in, in, eq_context_entry);
331 MLX5_SET(eqc, eqc, log_eq_size, ilog2(eq->nent));
332 MLX5_SET(eqc, eqc, uar_page, priv->uar->index);
333 MLX5_SET(eqc, eqc, intr, vecidx);
334 MLX5_SET(eqc, eqc, log_page_size,
335 eq->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
336
337 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
338 if (err)
339 goto err_in;
340
341 eq->vecidx = vecidx;
342 eq->eqn = MLX5_GET(create_eq_out, out, eq_number);
343 eq->irqn = pci_irq_vector(dev->pdev, vecidx);
344 eq->dev = dev;
345 eq->doorbell = priv->uar->map + MLX5_EQ_DOORBEL_OFFSET;
346
347 err = mlx5_debug_eq_add(dev, eq);
348 if (err)
349 goto err_eq;
350
351 kvfree(in);
352 return 0;
353
354err_eq:
355 mlx5_cmd_destroy_eq(dev, eq->eqn);
356
357err_in:
358 kvfree(in);
359
360err_buf:
361 mlx5_buf_free(dev, &eq->buf);
362 return err;
363}
364
365
366
367
368
369
370
371
372
373
374
375int mlx5_eq_enable(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
376 struct notifier_block *nb)
377{
378 struct mlx5_eq_table *eq_table = dev->priv.eq_table;
379 int err;
380
381 err = mlx5_irq_attach_nb(eq_table->irq_table, eq->vecidx, nb);
382 if (!err)
383 eq_update_ci(eq, 1);
384
385 return err;
386}
387EXPORT_SYMBOL(mlx5_eq_enable);
388
389
390
391
392
393
394
395
396
397void mlx5_eq_disable(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
398 struct notifier_block *nb)
399{
400 struct mlx5_eq_table *eq_table = dev->priv.eq_table;
401
402 mlx5_irq_detach_nb(eq_table->irq_table, eq->vecidx, nb);
403}
404EXPORT_SYMBOL(mlx5_eq_disable);
405
406static int destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
407{
408 int err;
409
410 mlx5_debug_eq_remove(dev, eq);
411
412 err = mlx5_cmd_destroy_eq(dev, eq->eqn);
413 if (err)
414 mlx5_core_warn(dev, "failed to destroy a previously created eq: eqn %d\n",
415 eq->eqn);
416 synchronize_irq(eq->irqn);
417
418 mlx5_buf_free(dev, &eq->buf);
419
420 return err;
421}
422
423int mlx5_eq_add_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq)
424{
425 struct mlx5_cq_table *table = &eq->cq_table;
426 int err;
427
428 spin_lock(&table->lock);
429 err = radix_tree_insert(&table->tree, cq->cqn, cq);
430 spin_unlock(&table->lock);
431
432 return err;
433}
434
435void mlx5_eq_del_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq)
436{
437 struct mlx5_cq_table *table = &eq->cq_table;
438 struct mlx5_core_cq *tmp;
439
440 spin_lock(&table->lock);
441 tmp = radix_tree_delete(&table->tree, cq->cqn);
442 spin_unlock(&table->lock);
443
444 if (!tmp) {
445 mlx5_core_dbg(eq->dev, "cq 0x%x not found in eq 0x%x tree\n",
446 eq->eqn, cq->cqn);
447 return;
448 }
449
450 if (tmp != cq)
451 mlx5_core_dbg(eq->dev, "corruption on cqn 0x%x in eq 0x%x\n",
452 eq->eqn, cq->cqn);
453}
454
455int mlx5_eq_table_init(struct mlx5_core_dev *dev)
456{
457 struct mlx5_eq_table *eq_table;
458 int i;
459
460 eq_table = kvzalloc(sizeof(*eq_table), GFP_KERNEL);
461 if (!eq_table)
462 return -ENOMEM;
463
464 dev->priv.eq_table = eq_table;
465
466 mlx5_eq_debugfs_init(dev);
467
468 mutex_init(&eq_table->lock);
469 for (i = 0; i < MLX5_EVENT_TYPE_MAX; i++)
470 ATOMIC_INIT_NOTIFIER_HEAD(&eq_table->nh[i]);
471
472 eq_table->irq_table = mlx5_irq_table_get(dev);
473 return 0;
474}
475
476void mlx5_eq_table_cleanup(struct mlx5_core_dev *dev)
477{
478 mlx5_eq_debugfs_cleanup(dev);
479 kvfree(dev->priv.eq_table);
480}
481
482
483
484static int create_async_eq(struct mlx5_core_dev *dev,
485 struct mlx5_eq *eq, struct mlx5_eq_param *param)
486{
487 struct mlx5_eq_table *eq_table = dev->priv.eq_table;
488 int err;
489
490 mutex_lock(&eq_table->lock);
491
492 if (param->irq_index != 0) {
493 err = -EINVAL;
494 goto unlock;
495 }
496
497 err = create_map_eq(dev, eq, param);
498unlock:
499 mutex_unlock(&eq_table->lock);
500 return err;
501}
502
503static int destroy_async_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
504{
505 struct mlx5_eq_table *eq_table = dev->priv.eq_table;
506 int err;
507
508 mutex_lock(&eq_table->lock);
509 err = destroy_unmap_eq(dev, eq);
510 mutex_unlock(&eq_table->lock);
511 return err;
512}
513
514static int cq_err_event_notifier(struct notifier_block *nb,
515 unsigned long type, void *data)
516{
517 struct mlx5_eq_table *eqt;
518 struct mlx5_core_cq *cq;
519 struct mlx5_eqe *eqe;
520 struct mlx5_eq *eq;
521 u32 cqn;
522
523
524
525 eqt = mlx5_nb_cof(nb, struct mlx5_eq_table, cq_err_nb);
526 eq = &eqt->async_eq.core;
527 eqe = data;
528
529 cqn = be32_to_cpu(eqe->data.cq_err.cqn) & 0xffffff;
530 mlx5_core_warn(eq->dev, "CQ error on CQN 0x%x, syndrome 0x%x\n",
531 cqn, eqe->data.cq_err.syndrome);
532
533 cq = mlx5_eq_cq_get(eq, cqn);
534 if (unlikely(!cq)) {
535 mlx5_core_warn(eq->dev, "Async event for bogus CQ 0x%x\n", cqn);
536 return NOTIFY_OK;
537 }
538
539 if (cq->event)
540 cq->event(cq, type);
541
542 mlx5_cq_put(cq);
543
544 return NOTIFY_OK;
545}
546
547static void gather_user_async_events(struct mlx5_core_dev *dev, u64 mask[4])
548{
549 __be64 *user_unaffiliated_events;
550 __be64 *user_affiliated_events;
551 int i;
552
553 user_affiliated_events =
554 MLX5_CAP_DEV_EVENT(dev, user_affiliated_events);
555 user_unaffiliated_events =
556 MLX5_CAP_DEV_EVENT(dev, user_unaffiliated_events);
557
558 for (i = 0; i < 4; i++)
559 mask[i] |= be64_to_cpu(user_affiliated_events[i] |
560 user_unaffiliated_events[i]);
561}
562
563static void gather_async_events_mask(struct mlx5_core_dev *dev, u64 mask[4])
564{
565 u64 async_event_mask = MLX5_ASYNC_EVENT_MASK;
566
567 if (MLX5_VPORT_MANAGER(dev))
568 async_event_mask |= (1ull << MLX5_EVENT_TYPE_NIC_VPORT_CHANGE);
569
570 if (MLX5_CAP_GEN(dev, general_notification_event))
571 async_event_mask |= (1ull << MLX5_EVENT_TYPE_GENERAL_EVENT);
572
573 if (MLX5_CAP_GEN(dev, port_module_event))
574 async_event_mask |= (1ull << MLX5_EVENT_TYPE_PORT_MODULE_EVENT);
575 else
576 mlx5_core_dbg(dev, "port_module_event is not set\n");
577
578 if (MLX5_PPS_CAP(dev))
579 async_event_mask |= (1ull << MLX5_EVENT_TYPE_PPS_EVENT);
580
581 if (MLX5_CAP_GEN(dev, fpga))
582 async_event_mask |= (1ull << MLX5_EVENT_TYPE_FPGA_ERROR) |
583 (1ull << MLX5_EVENT_TYPE_FPGA_QP_ERROR);
584 if (MLX5_CAP_GEN_MAX(dev, dct))
585 async_event_mask |= (1ull << MLX5_EVENT_TYPE_DCT_DRAINED);
586
587 if (MLX5_CAP_GEN(dev, temp_warn_event))
588 async_event_mask |= (1ull << MLX5_EVENT_TYPE_TEMP_WARN_EVENT);
589
590 if (MLX5_CAP_MCAM_REG(dev, tracer_registers))
591 async_event_mask |= (1ull << MLX5_EVENT_TYPE_DEVICE_TRACER);
592
593 if (MLX5_CAP_GEN(dev, max_num_of_monitor_counters))
594 async_event_mask |= (1ull << MLX5_EVENT_TYPE_MONITOR_COUNTER);
595
596 if (mlx5_eswitch_is_funcs_handler(dev))
597 async_event_mask |=
598 (1ull << MLX5_EVENT_TYPE_ESW_FUNCTIONS_CHANGED);
599
600 if (MLX5_CAP_GEN_MAX(dev, vhca_state))
601 async_event_mask |= (1ull << MLX5_EVENT_TYPE_VHCA_STATE_CHANGE);
602
603 mask[0] = async_event_mask;
604
605 if (MLX5_CAP_GEN(dev, event_cap))
606 gather_user_async_events(dev, mask);
607}
608
609static int
610setup_async_eq(struct mlx5_core_dev *dev, struct mlx5_eq_async *eq,
611 struct mlx5_eq_param *param, const char *name)
612{
613 int err;
614
615 eq->irq_nb.notifier_call = mlx5_eq_async_int;
616 spin_lock_init(&eq->lock);
617
618 err = create_async_eq(dev, &eq->core, param);
619 if (err) {
620 mlx5_core_warn(dev, "failed to create %s EQ %d\n", name, err);
621 return err;
622 }
623 err = mlx5_eq_enable(dev, &eq->core, &eq->irq_nb);
624 if (err) {
625 mlx5_core_warn(dev, "failed to enable %s EQ %d\n", name, err);
626 destroy_async_eq(dev, &eq->core);
627 }
628 return err;
629}
630
631static void cleanup_async_eq(struct mlx5_core_dev *dev,
632 struct mlx5_eq_async *eq, const char *name)
633{
634 int err;
635
636 mlx5_eq_disable(dev, &eq->core, &eq->irq_nb);
637 err = destroy_async_eq(dev, &eq->core);
638 if (err)
639 mlx5_core_err(dev, "failed to destroy %s eq, err(%d)\n",
640 name, err);
641}
642
643static int create_async_eqs(struct mlx5_core_dev *dev)
644{
645 struct mlx5_eq_table *table = dev->priv.eq_table;
646 struct mlx5_eq_param param = {};
647 int err;
648
649 MLX5_NB_INIT(&table->cq_err_nb, cq_err_event_notifier, CQ_ERROR);
650 mlx5_eq_notifier_register(dev, &table->cq_err_nb);
651
652 param = (struct mlx5_eq_param) {
653 .irq_index = 0,
654 .nent = MLX5_NUM_CMD_EQE,
655 .mask[0] = 1ull << MLX5_EVENT_TYPE_CMD,
656 };
657 mlx5_cmd_allowed_opcode(dev, MLX5_CMD_OP_CREATE_EQ);
658 err = setup_async_eq(dev, &table->cmd_eq, ¶m, "cmd");
659 if (err)
660 goto err1;
661
662 mlx5_cmd_use_events(dev);
663 mlx5_cmd_allowed_opcode(dev, CMD_ALLOWED_OPCODE_ALL);
664
665 param = (struct mlx5_eq_param) {
666 .irq_index = 0,
667 .nent = MLX5_NUM_ASYNC_EQE,
668 };
669
670 gather_async_events_mask(dev, param.mask);
671 err = setup_async_eq(dev, &table->async_eq, ¶m, "async");
672 if (err)
673 goto err2;
674
675 param = (struct mlx5_eq_param) {
676 .irq_index = 0,
677 .nent = 1,
678 .mask[0] = 1ull << MLX5_EVENT_TYPE_PAGE_REQUEST,
679 };
680
681 err = setup_async_eq(dev, &table->pages_eq, ¶m, "pages");
682 if (err)
683 goto err3;
684
685 return 0;
686
687err3:
688 cleanup_async_eq(dev, &table->async_eq, "async");
689err2:
690 mlx5_cmd_use_polling(dev);
691 cleanup_async_eq(dev, &table->cmd_eq, "cmd");
692err1:
693 mlx5_cmd_allowed_opcode(dev, CMD_ALLOWED_OPCODE_ALL);
694 mlx5_eq_notifier_unregister(dev, &table->cq_err_nb);
695 return err;
696}
697
698static void destroy_async_eqs(struct mlx5_core_dev *dev)
699{
700 struct mlx5_eq_table *table = dev->priv.eq_table;
701
702 cleanup_async_eq(dev, &table->pages_eq, "pages");
703 cleanup_async_eq(dev, &table->async_eq, "async");
704 mlx5_cmd_allowed_opcode(dev, MLX5_CMD_OP_DESTROY_EQ);
705 mlx5_cmd_use_polling(dev);
706 cleanup_async_eq(dev, &table->cmd_eq, "cmd");
707 mlx5_cmd_allowed_opcode(dev, CMD_ALLOWED_OPCODE_ALL);
708 mlx5_eq_notifier_unregister(dev, &table->cq_err_nb);
709}
710
711struct mlx5_eq *mlx5_get_async_eq(struct mlx5_core_dev *dev)
712{
713 return &dev->priv.eq_table->async_eq.core;
714}
715
716void mlx5_eq_synchronize_async_irq(struct mlx5_core_dev *dev)
717{
718 synchronize_irq(dev->priv.eq_table->async_eq.core.irqn);
719}
720
721void mlx5_eq_synchronize_cmd_irq(struct mlx5_core_dev *dev)
722{
723 synchronize_irq(dev->priv.eq_table->cmd_eq.core.irqn);
724}
725
726
727
728
729struct mlx5_eq *
730mlx5_eq_create_generic(struct mlx5_core_dev *dev,
731 struct mlx5_eq_param *param)
732{
733 struct mlx5_eq *eq = kvzalloc(sizeof(*eq), GFP_KERNEL);
734 int err;
735
736 if (!eq)
737 return ERR_PTR(-ENOMEM);
738
739 err = create_async_eq(dev, eq, param);
740 if (err) {
741 kvfree(eq);
742 eq = ERR_PTR(err);
743 }
744
745 return eq;
746}
747EXPORT_SYMBOL(mlx5_eq_create_generic);
748
749int mlx5_eq_destroy_generic(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
750{
751 int err;
752
753 if (IS_ERR(eq))
754 return -EINVAL;
755
756 err = destroy_async_eq(dev, eq);
757 if (err)
758 goto out;
759
760 kvfree(eq);
761out:
762 return err;
763}
764EXPORT_SYMBOL(mlx5_eq_destroy_generic);
765
766struct mlx5_eqe *mlx5_eq_get_eqe(struct mlx5_eq *eq, u32 cc)
767{
768 u32 ci = eq->cons_index + cc;
769 struct mlx5_eqe *eqe;
770
771 eqe = get_eqe(eq, ci & (eq->nent - 1));
772 eqe = ((eqe->owner & 1) ^ !!(ci & eq->nent)) ? NULL : eqe;
773
774
775
776 if (eqe)
777 dma_rmb();
778
779 return eqe;
780}
781EXPORT_SYMBOL(mlx5_eq_get_eqe);
782
783void mlx5_eq_update_ci(struct mlx5_eq *eq, u32 cc, bool arm)
784{
785 __be32 __iomem *addr = eq->doorbell + (arm ? 0 : 2);
786 u32 val;
787
788 eq->cons_index += cc;
789 val = (eq->cons_index & 0xffffff) | (eq->eqn << 24);
790
791 __raw_writel((__force u32)cpu_to_be32(val), addr);
792
793 wmb();
794}
795EXPORT_SYMBOL(mlx5_eq_update_ci);
796
797static void destroy_comp_eqs(struct mlx5_core_dev *dev)
798{
799 struct mlx5_eq_table *table = dev->priv.eq_table;
800 struct mlx5_eq_comp *eq, *n;
801
802 list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) {
803 list_del(&eq->list);
804 mlx5_eq_disable(dev, &eq->core, &eq->irq_nb);
805 if (destroy_unmap_eq(dev, &eq->core))
806 mlx5_core_warn(dev, "failed to destroy comp EQ 0x%x\n",
807 eq->core.eqn);
808 tasklet_disable(&eq->tasklet_ctx.task);
809 kfree(eq);
810 }
811}
812
813static int create_comp_eqs(struct mlx5_core_dev *dev)
814{
815 struct mlx5_eq_table *table = dev->priv.eq_table;
816 struct mlx5_eq_comp *eq;
817 int ncomp_eqs;
818 int nent;
819 int err;
820 int i;
821
822 INIT_LIST_HEAD(&table->comp_eqs_list);
823 ncomp_eqs = table->num_comp_eqs;
824 nent = MLX5_COMP_EQ_SIZE;
825 for (i = 0; i < ncomp_eqs; i++) {
826 int vecidx = i + MLX5_IRQ_VEC_COMP_BASE;
827 struct mlx5_eq_param param = {};
828
829 eq = kzalloc(sizeof(*eq), GFP_KERNEL);
830 if (!eq) {
831 err = -ENOMEM;
832 goto clean;
833 }
834
835 INIT_LIST_HEAD(&eq->tasklet_ctx.list);
836 INIT_LIST_HEAD(&eq->tasklet_ctx.process_list);
837 spin_lock_init(&eq->tasklet_ctx.lock);
838 tasklet_init(&eq->tasklet_ctx.task, mlx5_cq_tasklet_cb,
839 (unsigned long)&eq->tasklet_ctx);
840
841 eq->irq_nb.notifier_call = mlx5_eq_comp_int;
842 param = (struct mlx5_eq_param) {
843 .irq_index = vecidx,
844 .nent = nent,
845 };
846 err = create_map_eq(dev, &eq->core, ¶m);
847 if (err) {
848 kfree(eq);
849 goto clean;
850 }
851 err = mlx5_eq_enable(dev, &eq->core, &eq->irq_nb);
852 if (err) {
853 destroy_unmap_eq(dev, &eq->core);
854 kfree(eq);
855 goto clean;
856 }
857
858 mlx5_core_dbg(dev, "allocated completion EQN %d\n", eq->core.eqn);
859
860 list_add_tail(&eq->list, &table->comp_eqs_list);
861 }
862
863 return 0;
864
865clean:
866 destroy_comp_eqs(dev);
867 return err;
868}
869
870int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn,
871 unsigned int *irqn)
872{
873 struct mlx5_eq_table *table = dev->priv.eq_table;
874 struct mlx5_eq_comp *eq, *n;
875 int err = -ENOENT;
876 int i = 0;
877
878 list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) {
879 if (i++ == vector) {
880 *eqn = eq->core.eqn;
881 *irqn = eq->core.irqn;
882 err = 0;
883 break;
884 }
885 }
886
887 return err;
888}
889EXPORT_SYMBOL(mlx5_vector2eqn);
890
891unsigned int mlx5_comp_vectors_count(struct mlx5_core_dev *dev)
892{
893 return dev->priv.eq_table->num_comp_eqs;
894}
895EXPORT_SYMBOL(mlx5_comp_vectors_count);
896
897struct cpumask *
898mlx5_comp_irq_get_affinity_mask(struct mlx5_core_dev *dev, int vector)
899{
900 int vecidx = vector + MLX5_IRQ_VEC_COMP_BASE;
901
902 return mlx5_irq_get_affinity_mask(dev->priv.eq_table->irq_table,
903 vecidx);
904}
905EXPORT_SYMBOL(mlx5_comp_irq_get_affinity_mask);
906
907#ifdef CONFIG_RFS_ACCEL
908struct cpu_rmap *mlx5_eq_table_get_rmap(struct mlx5_core_dev *dev)
909{
910 return mlx5_irq_get_rmap(dev->priv.eq_table->irq_table);
911}
912#endif
913
914struct mlx5_eq_comp *mlx5_eqn2comp_eq(struct mlx5_core_dev *dev, int eqn)
915{
916 struct mlx5_eq_table *table = dev->priv.eq_table;
917 struct mlx5_eq_comp *eq;
918
919 list_for_each_entry(eq, &table->comp_eqs_list, list) {
920 if (eq->core.eqn == eqn)
921 return eq;
922 }
923
924 return ERR_PTR(-ENOENT);
925}
926
927
928void mlx5_core_eq_free_irqs(struct mlx5_core_dev *dev)
929{
930 struct mlx5_eq_table *table = dev->priv.eq_table;
931
932 mutex_lock(&table->lock);
933 mlx5_irq_table_destroy(dev);
934 mutex_unlock(&table->lock);
935}
936
937#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
938#define MLX5_MAX_ASYNC_EQS 4
939#else
940#define MLX5_MAX_ASYNC_EQS 3
941#endif
942
943int mlx5_eq_table_create(struct mlx5_core_dev *dev)
944{
945 struct mlx5_eq_table *eq_table = dev->priv.eq_table;
946 int num_eqs = MLX5_CAP_GEN(dev, max_num_eqs) ?
947 MLX5_CAP_GEN(dev, max_num_eqs) :
948 1 << MLX5_CAP_GEN(dev, log_max_eq);
949 int err;
950
951 eq_table->num_comp_eqs =
952 min_t(int,
953 mlx5_irq_get_num_comp(eq_table->irq_table),
954 num_eqs - MLX5_MAX_ASYNC_EQS);
955
956 err = create_async_eqs(dev);
957 if (err) {
958 mlx5_core_err(dev, "Failed to create async EQs\n");
959 goto err_async_eqs;
960 }
961
962 err = create_comp_eqs(dev);
963 if (err) {
964 mlx5_core_err(dev, "Failed to create completion EQs\n");
965 goto err_comp_eqs;
966 }
967
968 return 0;
969err_comp_eqs:
970 destroy_async_eqs(dev);
971err_async_eqs:
972 return err;
973}
974
975void mlx5_eq_table_destroy(struct mlx5_core_dev *dev)
976{
977 destroy_comp_eqs(dev);
978 destroy_async_eqs(dev);
979}
980
981int mlx5_eq_notifier_register(struct mlx5_core_dev *dev, struct mlx5_nb *nb)
982{
983 struct mlx5_eq_table *eqt = dev->priv.eq_table;
984
985 return atomic_notifier_chain_register(&eqt->nh[nb->event_type], &nb->nb);
986}
987EXPORT_SYMBOL(mlx5_eq_notifier_register);
988
989int mlx5_eq_notifier_unregister(struct mlx5_core_dev *dev, struct mlx5_nb *nb)
990{
991 struct mlx5_eq_table *eqt = dev->priv.eq_table;
992
993 return atomic_notifier_chain_unregister(&eqt->nh[nb->event_type], &nb->nb);
994}
995EXPORT_SYMBOL(mlx5_eq_notifier_unregister);
996