1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#include <linux/interrupt.h>
34#include <linux/notifier.h>
35#include <linux/module.h>
36#include <linux/mlx5/driver.h>
37#include <linux/mlx5/vport.h>
38#include <linux/mlx5/eq.h>
39#ifdef CONFIG_RFS_ACCEL
40#include <linux/cpu_rmap.h>
41#endif
42#include "mlx5_core.h"
43#include "lib/eq.h"
44#include "fpga/core.h"
45#include "eswitch.h"
46#include "lib/clock.h"
47#include "diag/fw_tracer.h"
48
49enum {
50 MLX5_EQE_OWNER_INIT_VAL = 0x1,
51};
52
53enum {
54 MLX5_EQ_STATE_ARMED = 0x9,
55 MLX5_EQ_STATE_FIRED = 0xa,
56 MLX5_EQ_STATE_ALWAYS_ARMED = 0xb,
57};
58
59enum {
60 MLX5_EQ_DOORBEL_OFFSET = 0x40,
61};
62
63
64
65
66
67enum {
68 MLX5_EQ_POLLING_BUDGET = 128,
69};
70
71static_assert(MLX5_EQ_POLLING_BUDGET <= MLX5_NUM_SPARE_EQE);
72
73struct mlx5_eq_table {
74 struct list_head comp_eqs_list;
75 struct mlx5_eq_async pages_eq;
76 struct mlx5_eq_async cmd_eq;
77 struct mlx5_eq_async async_eq;
78
79 struct atomic_notifier_head nh[MLX5_EVENT_TYPE_MAX];
80
81
82 struct mlx5_nb cq_err_nb;
83
84 struct mutex lock;
85 int num_comp_eqs;
86 struct mlx5_irq_table *irq_table;
87};
88
89#define MLX5_ASYNC_EVENT_MASK ((1ull << MLX5_EVENT_TYPE_PATH_MIG) | \
90 (1ull << MLX5_EVENT_TYPE_COMM_EST) | \
91 (1ull << MLX5_EVENT_TYPE_SQ_DRAINED) | \
92 (1ull << MLX5_EVENT_TYPE_CQ_ERROR) | \
93 (1ull << MLX5_EVENT_TYPE_WQ_CATAS_ERROR) | \
94 (1ull << MLX5_EVENT_TYPE_PATH_MIG_FAILED) | \
95 (1ull << MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \
96 (1ull << MLX5_EVENT_TYPE_WQ_ACCESS_ERROR) | \
97 (1ull << MLX5_EVENT_TYPE_PORT_CHANGE) | \
98 (1ull << MLX5_EVENT_TYPE_SRQ_CATAS_ERROR) | \
99 (1ull << MLX5_EVENT_TYPE_SRQ_LAST_WQE) | \
100 (1ull << MLX5_EVENT_TYPE_SRQ_RQ_LIMIT))
101
102static int mlx5_cmd_destroy_eq(struct mlx5_core_dev *dev, u8 eqn)
103{
104 u32 in[MLX5_ST_SZ_DW(destroy_eq_in)] = {};
105
106 MLX5_SET(destroy_eq_in, in, opcode, MLX5_CMD_OP_DESTROY_EQ);
107 MLX5_SET(destroy_eq_in, in, eq_number, eqn);
108 return mlx5_cmd_exec_in(dev, destroy_eq, in);
109}
110
111
112static struct mlx5_core_cq *mlx5_eq_cq_get(struct mlx5_eq *eq, u32 cqn)
113{
114 struct mlx5_cq_table *table = &eq->cq_table;
115 struct mlx5_core_cq *cq = NULL;
116
117 rcu_read_lock();
118 cq = radix_tree_lookup(&table->tree, cqn);
119 if (likely(cq))
120 mlx5_cq_hold(cq);
121 rcu_read_unlock();
122
123 return cq;
124}
125
126static int mlx5_eq_comp_int(struct notifier_block *nb,
127 __always_unused unsigned long action,
128 __always_unused void *data)
129{
130 struct mlx5_eq_comp *eq_comp =
131 container_of(nb, struct mlx5_eq_comp, irq_nb);
132 struct mlx5_eq *eq = &eq_comp->core;
133 struct mlx5_eqe *eqe;
134 int num_eqes = 0;
135 u32 cqn = -1;
136
137 eqe = next_eqe_sw(eq);
138 if (!eqe)
139 return 0;
140
141 do {
142 struct mlx5_core_cq *cq;
143
144
145
146
147 dma_rmb();
148
149 cqn = be32_to_cpu(eqe->data.comp.cqn) & 0xffffff;
150
151 cq = mlx5_eq_cq_get(eq, cqn);
152 if (likely(cq)) {
153 ++cq->arm_sn;
154 cq->comp(cq, eqe);
155 mlx5_cq_put(cq);
156 } else {
157 dev_dbg_ratelimited(eq->dev->device,
158 "Completion event for bogus CQ 0x%x\n", cqn);
159 }
160
161 ++eq->cons_index;
162
163 } while ((++num_eqes < MLX5_EQ_POLLING_BUDGET) && (eqe = next_eqe_sw(eq)));
164 eq_update_ci(eq, 1);
165
166 if (cqn != -1)
167 tasklet_schedule(&eq_comp->tasklet_ctx.task);
168
169 return 0;
170}
171
172
173
174
175
176
177u32 mlx5_eq_poll_irq_disabled(struct mlx5_eq_comp *eq)
178{
179 u32 count_eqe;
180
181 disable_irq(eq->core.irqn);
182 count_eqe = eq->core.cons_index;
183 mlx5_eq_comp_int(&eq->irq_nb, 0, NULL);
184 count_eqe = eq->core.cons_index - count_eqe;
185 enable_irq(eq->core.irqn);
186
187 return count_eqe;
188}
189
190static void mlx5_eq_async_int_lock(struct mlx5_eq_async *eq, bool recovery,
191 unsigned long *flags)
192 __acquires(&eq->lock)
193{
194 if (!recovery)
195 spin_lock(&eq->lock);
196 else
197 spin_lock_irqsave(&eq->lock, *flags);
198}
199
200static void mlx5_eq_async_int_unlock(struct mlx5_eq_async *eq, bool recovery,
201 unsigned long *flags)
202 __releases(&eq->lock)
203{
204 if (!recovery)
205 spin_unlock(&eq->lock);
206 else
207 spin_unlock_irqrestore(&eq->lock, *flags);
208}
209
210enum async_eq_nb_action {
211 ASYNC_EQ_IRQ_HANDLER = 0,
212 ASYNC_EQ_RECOVER = 1,
213};
214
215static int mlx5_eq_async_int(struct notifier_block *nb,
216 unsigned long action, void *data)
217{
218 struct mlx5_eq_async *eq_async =
219 container_of(nb, struct mlx5_eq_async, irq_nb);
220 struct mlx5_eq *eq = &eq_async->core;
221 struct mlx5_eq_table *eqt;
222 struct mlx5_core_dev *dev;
223 struct mlx5_eqe *eqe;
224 unsigned long flags;
225 int num_eqes = 0;
226 bool recovery;
227
228 dev = eq->dev;
229 eqt = dev->priv.eq_table;
230
231 recovery = action == ASYNC_EQ_RECOVER;
232 mlx5_eq_async_int_lock(eq_async, recovery, &flags);
233
234 eqe = next_eqe_sw(eq);
235 if (!eqe)
236 goto out;
237
238 do {
239
240
241
242
243 dma_rmb();
244
245 atomic_notifier_call_chain(&eqt->nh[eqe->type], eqe->type, eqe);
246 atomic_notifier_call_chain(&eqt->nh[MLX5_EVENT_TYPE_NOTIFY_ANY], eqe->type, eqe);
247
248 ++eq->cons_index;
249
250 } while ((++num_eqes < MLX5_EQ_POLLING_BUDGET) && (eqe = next_eqe_sw(eq)));
251 eq_update_ci(eq, 1);
252
253out:
254 mlx5_eq_async_int_unlock(eq_async, recovery, &flags);
255
256 return unlikely(recovery) ? num_eqes : 0;
257}
258
259void mlx5_cmd_eq_recover(struct mlx5_core_dev *dev)
260{
261 struct mlx5_eq_async *eq = &dev->priv.eq_table->cmd_eq;
262 int eqes;
263
264 eqes = mlx5_eq_async_int(&eq->irq_nb, ASYNC_EQ_RECOVER, NULL);
265 if (eqes)
266 mlx5_core_warn(dev, "Recovered %d EQEs on cmd_eq\n", eqes);
267}
268
269static void init_eq_buf(struct mlx5_eq *eq)
270{
271 struct mlx5_eqe *eqe;
272 int i;
273
274 for (i = 0; i < eq->nent; i++) {
275 eqe = get_eqe(eq, i);
276 eqe->owner = MLX5_EQE_OWNER_INIT_VAL;
277 }
278}
279
280static int
281create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
282 struct mlx5_eq_param *param)
283{
284 struct mlx5_cq_table *cq_table = &eq->cq_table;
285 u32 out[MLX5_ST_SZ_DW(create_eq_out)] = {0};
286 struct mlx5_priv *priv = &dev->priv;
287 u8 vecidx = param->irq_index;
288 __be64 *pas;
289 void *eqc;
290 int inlen;
291 u32 *in;
292 int err;
293 int i;
294
295
296 memset(cq_table, 0, sizeof(*cq_table));
297 spin_lock_init(&cq_table->lock);
298 INIT_RADIX_TREE(&cq_table->tree, GFP_ATOMIC);
299
300 eq->nent = roundup_pow_of_two(param->nent + MLX5_NUM_SPARE_EQE);
301 eq->cons_index = 0;
302 err = mlx5_buf_alloc(dev, eq->nent * MLX5_EQE_SIZE, &eq->buf);
303 if (err)
304 return err;
305
306 init_eq_buf(eq);
307
308 inlen = MLX5_ST_SZ_BYTES(create_eq_in) +
309 MLX5_FLD_SZ_BYTES(create_eq_in, pas[0]) * eq->buf.npages;
310
311 in = kvzalloc(inlen, GFP_KERNEL);
312 if (!in) {
313 err = -ENOMEM;
314 goto err_buf;
315 }
316
317 pas = (__be64 *)MLX5_ADDR_OF(create_eq_in, in, pas);
318 mlx5_fill_page_array(&eq->buf, pas);
319
320 MLX5_SET(create_eq_in, in, opcode, MLX5_CMD_OP_CREATE_EQ);
321 if (!param->mask[0] && MLX5_CAP_GEN(dev, log_max_uctx))
322 MLX5_SET(create_eq_in, in, uid, MLX5_SHARED_RESOURCE_UID);
323
324 for (i = 0; i < 4; i++)
325 MLX5_ARRAY_SET64(create_eq_in, in, event_bitmask, i,
326 param->mask[i]);
327
328 eqc = MLX5_ADDR_OF(create_eq_in, in, eq_context_entry);
329 MLX5_SET(eqc, eqc, log_eq_size, ilog2(eq->nent));
330 MLX5_SET(eqc, eqc, uar_page, priv->uar->index);
331 MLX5_SET(eqc, eqc, intr, vecidx);
332 MLX5_SET(eqc, eqc, log_page_size,
333 eq->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
334
335 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
336 if (err)
337 goto err_in;
338
339 eq->vecidx = vecidx;
340 eq->eqn = MLX5_GET(create_eq_out, out, eq_number);
341 eq->irqn = pci_irq_vector(dev->pdev, vecidx);
342 eq->dev = dev;
343 eq->doorbell = priv->uar->map + MLX5_EQ_DOORBEL_OFFSET;
344
345 err = mlx5_debug_eq_add(dev, eq);
346 if (err)
347 goto err_eq;
348
349 kvfree(in);
350 return 0;
351
352err_eq:
353 mlx5_cmd_destroy_eq(dev, eq->eqn);
354
355err_in:
356 kvfree(in);
357
358err_buf:
359 mlx5_buf_free(dev, &eq->buf);
360 return err;
361}
362
363
364
365
366
367
368
369
370
371
372
373int mlx5_eq_enable(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
374 struct notifier_block *nb)
375{
376 struct mlx5_eq_table *eq_table = dev->priv.eq_table;
377 int err;
378
379 err = mlx5_irq_attach_nb(eq_table->irq_table, eq->vecidx, nb);
380 if (!err)
381 eq_update_ci(eq, 1);
382
383 return err;
384}
385EXPORT_SYMBOL(mlx5_eq_enable);
386
387
388
389
390
391
392
393
394
395void mlx5_eq_disable(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
396 struct notifier_block *nb)
397{
398 struct mlx5_eq_table *eq_table = dev->priv.eq_table;
399
400 mlx5_irq_detach_nb(eq_table->irq_table, eq->vecidx, nb);
401}
402EXPORT_SYMBOL(mlx5_eq_disable);
403
404static int destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
405{
406 int err;
407
408 mlx5_debug_eq_remove(dev, eq);
409
410 err = mlx5_cmd_destroy_eq(dev, eq->eqn);
411 if (err)
412 mlx5_core_warn(dev, "failed to destroy a previously created eq: eqn %d\n",
413 eq->eqn);
414 synchronize_irq(eq->irqn);
415
416 mlx5_buf_free(dev, &eq->buf);
417
418 return err;
419}
420
421int mlx5_eq_add_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq)
422{
423 struct mlx5_cq_table *table = &eq->cq_table;
424 int err;
425
426 spin_lock(&table->lock);
427 err = radix_tree_insert(&table->tree, cq->cqn, cq);
428 spin_unlock(&table->lock);
429
430 return err;
431}
432
433void mlx5_eq_del_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq)
434{
435 struct mlx5_cq_table *table = &eq->cq_table;
436 struct mlx5_core_cq *tmp;
437
438 spin_lock(&table->lock);
439 tmp = radix_tree_delete(&table->tree, cq->cqn);
440 spin_unlock(&table->lock);
441
442 if (!tmp) {
443 mlx5_core_dbg(eq->dev, "cq 0x%x not found in eq 0x%x tree\n",
444 eq->eqn, cq->cqn);
445 return;
446 }
447
448 if (tmp != cq)
449 mlx5_core_dbg(eq->dev, "corruption on cqn 0x%x in eq 0x%x\n",
450 eq->eqn, cq->cqn);
451}
452
453int mlx5_eq_table_init(struct mlx5_core_dev *dev)
454{
455 struct mlx5_eq_table *eq_table;
456 int i;
457
458 eq_table = kvzalloc(sizeof(*eq_table), GFP_KERNEL);
459 if (!eq_table)
460 return -ENOMEM;
461
462 dev->priv.eq_table = eq_table;
463
464 mlx5_eq_debugfs_init(dev);
465
466 mutex_init(&eq_table->lock);
467 for (i = 0; i < MLX5_EVENT_TYPE_MAX; i++)
468 ATOMIC_INIT_NOTIFIER_HEAD(&eq_table->nh[i]);
469
470 eq_table->irq_table = mlx5_irq_table_get(dev);
471 return 0;
472}
473
474void mlx5_eq_table_cleanup(struct mlx5_core_dev *dev)
475{
476 mlx5_eq_debugfs_cleanup(dev);
477 kvfree(dev->priv.eq_table);
478}
479
480
481
482static int create_async_eq(struct mlx5_core_dev *dev,
483 struct mlx5_eq *eq, struct mlx5_eq_param *param)
484{
485 struct mlx5_eq_table *eq_table = dev->priv.eq_table;
486 int err;
487
488 mutex_lock(&eq_table->lock);
489
490 if (param->irq_index != 0) {
491 err = -EINVAL;
492 goto unlock;
493 }
494
495 err = create_map_eq(dev, eq, param);
496unlock:
497 mutex_unlock(&eq_table->lock);
498 return err;
499}
500
501static int destroy_async_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
502{
503 struct mlx5_eq_table *eq_table = dev->priv.eq_table;
504 int err;
505
506 mutex_lock(&eq_table->lock);
507 err = destroy_unmap_eq(dev, eq);
508 mutex_unlock(&eq_table->lock);
509 return err;
510}
511
512static int cq_err_event_notifier(struct notifier_block *nb,
513 unsigned long type, void *data)
514{
515 struct mlx5_eq_table *eqt;
516 struct mlx5_core_cq *cq;
517 struct mlx5_eqe *eqe;
518 struct mlx5_eq *eq;
519 u32 cqn;
520
521
522
523 eqt = mlx5_nb_cof(nb, struct mlx5_eq_table, cq_err_nb);
524 eq = &eqt->async_eq.core;
525 eqe = data;
526
527 cqn = be32_to_cpu(eqe->data.cq_err.cqn) & 0xffffff;
528 mlx5_core_warn(eq->dev, "CQ error on CQN 0x%x, syndrome 0x%x\n",
529 cqn, eqe->data.cq_err.syndrome);
530
531 cq = mlx5_eq_cq_get(eq, cqn);
532 if (unlikely(!cq)) {
533 mlx5_core_warn(eq->dev, "Async event for bogus CQ 0x%x\n", cqn);
534 return NOTIFY_OK;
535 }
536
537 if (cq->event)
538 cq->event(cq, type);
539
540 mlx5_cq_put(cq);
541
542 return NOTIFY_OK;
543}
544
545static void gather_user_async_events(struct mlx5_core_dev *dev, u64 mask[4])
546{
547 __be64 *user_unaffiliated_events;
548 __be64 *user_affiliated_events;
549 int i;
550
551 user_affiliated_events =
552 MLX5_CAP_DEV_EVENT(dev, user_affiliated_events);
553 user_unaffiliated_events =
554 MLX5_CAP_DEV_EVENT(dev, user_unaffiliated_events);
555
556 for (i = 0; i < 4; i++)
557 mask[i] |= be64_to_cpu(user_affiliated_events[i] |
558 user_unaffiliated_events[i]);
559}
560
561static void gather_async_events_mask(struct mlx5_core_dev *dev, u64 mask[4])
562{
563 u64 async_event_mask = MLX5_ASYNC_EVENT_MASK;
564
565 if (MLX5_VPORT_MANAGER(dev))
566 async_event_mask |= (1ull << MLX5_EVENT_TYPE_NIC_VPORT_CHANGE);
567
568 if (MLX5_CAP_GEN(dev, general_notification_event))
569 async_event_mask |= (1ull << MLX5_EVENT_TYPE_GENERAL_EVENT);
570
571 if (MLX5_CAP_GEN(dev, port_module_event))
572 async_event_mask |= (1ull << MLX5_EVENT_TYPE_PORT_MODULE_EVENT);
573 else
574 mlx5_core_dbg(dev, "port_module_event is not set\n");
575
576 if (MLX5_PPS_CAP(dev))
577 async_event_mask |= (1ull << MLX5_EVENT_TYPE_PPS_EVENT);
578
579 if (MLX5_CAP_GEN(dev, fpga))
580 async_event_mask |= (1ull << MLX5_EVENT_TYPE_FPGA_ERROR) |
581 (1ull << MLX5_EVENT_TYPE_FPGA_QP_ERROR);
582 if (MLX5_CAP_GEN_MAX(dev, dct))
583 async_event_mask |= (1ull << MLX5_EVENT_TYPE_DCT_DRAINED);
584
585 if (MLX5_CAP_GEN(dev, temp_warn_event))
586 async_event_mask |= (1ull << MLX5_EVENT_TYPE_TEMP_WARN_EVENT);
587
588 if (MLX5_CAP_MCAM_REG(dev, tracer_registers))
589 async_event_mask |= (1ull << MLX5_EVENT_TYPE_DEVICE_TRACER);
590
591 if (MLX5_CAP_GEN(dev, max_num_of_monitor_counters))
592 async_event_mask |= (1ull << MLX5_EVENT_TYPE_MONITOR_COUNTER);
593
594 if (mlx5_eswitch_is_funcs_handler(dev))
595 async_event_mask |=
596 (1ull << MLX5_EVENT_TYPE_ESW_FUNCTIONS_CHANGED);
597
598 if (MLX5_CAP_GEN_MAX(dev, vhca_state))
599 async_event_mask |= (1ull << MLX5_EVENT_TYPE_VHCA_STATE_CHANGE);
600
601 mask[0] = async_event_mask;
602
603 if (MLX5_CAP_GEN(dev, event_cap))
604 gather_user_async_events(dev, mask);
605}
606
607static int
608setup_async_eq(struct mlx5_core_dev *dev, struct mlx5_eq_async *eq,
609 struct mlx5_eq_param *param, const char *name)
610{
611 int err;
612
613 eq->irq_nb.notifier_call = mlx5_eq_async_int;
614 spin_lock_init(&eq->lock);
615
616 err = create_async_eq(dev, &eq->core, param);
617 if (err) {
618 mlx5_core_warn(dev, "failed to create %s EQ %d\n", name, err);
619 return err;
620 }
621 err = mlx5_eq_enable(dev, &eq->core, &eq->irq_nb);
622 if (err) {
623 mlx5_core_warn(dev, "failed to enable %s EQ %d\n", name, err);
624 destroy_async_eq(dev, &eq->core);
625 }
626 return err;
627}
628
629static void cleanup_async_eq(struct mlx5_core_dev *dev,
630 struct mlx5_eq_async *eq, const char *name)
631{
632 int err;
633
634 mlx5_eq_disable(dev, &eq->core, &eq->irq_nb);
635 err = destroy_async_eq(dev, &eq->core);
636 if (err)
637 mlx5_core_err(dev, "failed to destroy %s eq, err(%d)\n",
638 name, err);
639}
640
641static int create_async_eqs(struct mlx5_core_dev *dev)
642{
643 struct mlx5_eq_table *table = dev->priv.eq_table;
644 struct mlx5_eq_param param = {};
645 int err;
646
647 MLX5_NB_INIT(&table->cq_err_nb, cq_err_event_notifier, CQ_ERROR);
648 mlx5_eq_notifier_register(dev, &table->cq_err_nb);
649
650 param = (struct mlx5_eq_param) {
651 .irq_index = 0,
652 .nent = MLX5_NUM_CMD_EQE,
653 .mask[0] = 1ull << MLX5_EVENT_TYPE_CMD,
654 };
655 mlx5_cmd_allowed_opcode(dev, MLX5_CMD_OP_CREATE_EQ);
656 err = setup_async_eq(dev, &table->cmd_eq, ¶m, "cmd");
657 if (err)
658 goto err1;
659
660 mlx5_cmd_use_events(dev);
661 mlx5_cmd_allowed_opcode(dev, CMD_ALLOWED_OPCODE_ALL);
662
663 param = (struct mlx5_eq_param) {
664 .irq_index = 0,
665 .nent = MLX5_NUM_ASYNC_EQE,
666 };
667
668 gather_async_events_mask(dev, param.mask);
669 err = setup_async_eq(dev, &table->async_eq, ¶m, "async");
670 if (err)
671 goto err2;
672
673 param = (struct mlx5_eq_param) {
674 .irq_index = 0,
675 .nent = 1,
676 .mask[0] = 1ull << MLX5_EVENT_TYPE_PAGE_REQUEST,
677 };
678
679 err = setup_async_eq(dev, &table->pages_eq, ¶m, "pages");
680 if (err)
681 goto err3;
682
683 return 0;
684
685err3:
686 cleanup_async_eq(dev, &table->async_eq, "async");
687err2:
688 mlx5_cmd_use_polling(dev);
689 cleanup_async_eq(dev, &table->cmd_eq, "cmd");
690err1:
691 mlx5_cmd_allowed_opcode(dev, CMD_ALLOWED_OPCODE_ALL);
692 mlx5_eq_notifier_unregister(dev, &table->cq_err_nb);
693 return err;
694}
695
696static void destroy_async_eqs(struct mlx5_core_dev *dev)
697{
698 struct mlx5_eq_table *table = dev->priv.eq_table;
699
700 cleanup_async_eq(dev, &table->pages_eq, "pages");
701 cleanup_async_eq(dev, &table->async_eq, "async");
702 mlx5_cmd_allowed_opcode(dev, MLX5_CMD_OP_DESTROY_EQ);
703 mlx5_cmd_use_polling(dev);
704 cleanup_async_eq(dev, &table->cmd_eq, "cmd");
705 mlx5_cmd_allowed_opcode(dev, CMD_ALLOWED_OPCODE_ALL);
706 mlx5_eq_notifier_unregister(dev, &table->cq_err_nb);
707}
708
709struct mlx5_eq *mlx5_get_async_eq(struct mlx5_core_dev *dev)
710{
711 return &dev->priv.eq_table->async_eq.core;
712}
713
714void mlx5_eq_synchronize_async_irq(struct mlx5_core_dev *dev)
715{
716 synchronize_irq(dev->priv.eq_table->async_eq.core.irqn);
717}
718
719void mlx5_eq_synchronize_cmd_irq(struct mlx5_core_dev *dev)
720{
721 synchronize_irq(dev->priv.eq_table->cmd_eq.core.irqn);
722}
723
724
725
726
727struct mlx5_eq *
728mlx5_eq_create_generic(struct mlx5_core_dev *dev,
729 struct mlx5_eq_param *param)
730{
731 struct mlx5_eq *eq = kvzalloc(sizeof(*eq), GFP_KERNEL);
732 int err;
733
734 if (!eq)
735 return ERR_PTR(-ENOMEM);
736
737 err = create_async_eq(dev, eq, param);
738 if (err) {
739 kvfree(eq);
740 eq = ERR_PTR(err);
741 }
742
743 return eq;
744}
745EXPORT_SYMBOL(mlx5_eq_create_generic);
746
747int mlx5_eq_destroy_generic(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
748{
749 int err;
750
751 if (IS_ERR(eq))
752 return -EINVAL;
753
754 err = destroy_async_eq(dev, eq);
755 if (err)
756 goto out;
757
758 kvfree(eq);
759out:
760 return err;
761}
762EXPORT_SYMBOL(mlx5_eq_destroy_generic);
763
764struct mlx5_eqe *mlx5_eq_get_eqe(struct mlx5_eq *eq, u32 cc)
765{
766 u32 ci = eq->cons_index + cc;
767 struct mlx5_eqe *eqe;
768
769 eqe = get_eqe(eq, ci & (eq->nent - 1));
770 eqe = ((eqe->owner & 1) ^ !!(ci & eq->nent)) ? NULL : eqe;
771
772
773
774 if (eqe)
775 dma_rmb();
776
777 return eqe;
778}
779EXPORT_SYMBOL(mlx5_eq_get_eqe);
780
781void mlx5_eq_update_ci(struct mlx5_eq *eq, u32 cc, bool arm)
782{
783 __be32 __iomem *addr = eq->doorbell + (arm ? 0 : 2);
784 u32 val;
785
786 eq->cons_index += cc;
787 val = (eq->cons_index & 0xffffff) | (eq->eqn << 24);
788
789 __raw_writel((__force u32)cpu_to_be32(val), addr);
790
791 wmb();
792}
793EXPORT_SYMBOL(mlx5_eq_update_ci);
794
795static void destroy_comp_eqs(struct mlx5_core_dev *dev)
796{
797 struct mlx5_eq_table *table = dev->priv.eq_table;
798 struct mlx5_eq_comp *eq, *n;
799
800 list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) {
801 list_del(&eq->list);
802 mlx5_eq_disable(dev, &eq->core, &eq->irq_nb);
803 if (destroy_unmap_eq(dev, &eq->core))
804 mlx5_core_warn(dev, "failed to destroy comp EQ 0x%x\n",
805 eq->core.eqn);
806 tasklet_disable(&eq->tasklet_ctx.task);
807 kfree(eq);
808 }
809}
810
811static int create_comp_eqs(struct mlx5_core_dev *dev)
812{
813 struct mlx5_eq_table *table = dev->priv.eq_table;
814 struct mlx5_eq_comp *eq;
815 int ncomp_eqs;
816 int nent;
817 int err;
818 int i;
819
820 INIT_LIST_HEAD(&table->comp_eqs_list);
821 ncomp_eqs = table->num_comp_eqs;
822 nent = MLX5_COMP_EQ_SIZE;
823 for (i = 0; i < ncomp_eqs; i++) {
824 int vecidx = i + MLX5_IRQ_VEC_COMP_BASE;
825 struct mlx5_eq_param param = {};
826
827 eq = kzalloc(sizeof(*eq), GFP_KERNEL);
828 if (!eq) {
829 err = -ENOMEM;
830 goto clean;
831 }
832
833 INIT_LIST_HEAD(&eq->tasklet_ctx.list);
834 INIT_LIST_HEAD(&eq->tasklet_ctx.process_list);
835 spin_lock_init(&eq->tasklet_ctx.lock);
836 tasklet_setup(&eq->tasklet_ctx.task, mlx5_cq_tasklet_cb);
837
838 eq->irq_nb.notifier_call = mlx5_eq_comp_int;
839 param = (struct mlx5_eq_param) {
840 .irq_index = vecidx,
841 .nent = nent,
842 };
843 err = create_map_eq(dev, &eq->core, ¶m);
844 if (err) {
845 kfree(eq);
846 goto clean;
847 }
848 err = mlx5_eq_enable(dev, &eq->core, &eq->irq_nb);
849 if (err) {
850 destroy_unmap_eq(dev, &eq->core);
851 kfree(eq);
852 goto clean;
853 }
854
855 mlx5_core_dbg(dev, "allocated completion EQN %d\n", eq->core.eqn);
856
857 list_add_tail(&eq->list, &table->comp_eqs_list);
858 }
859
860 return 0;
861
862clean:
863 destroy_comp_eqs(dev);
864 return err;
865}
866
867int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn,
868 unsigned int *irqn)
869{
870 struct mlx5_eq_table *table = dev->priv.eq_table;
871 struct mlx5_eq_comp *eq, *n;
872 int err = -ENOENT;
873 int i = 0;
874
875 list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) {
876 if (i++ == vector) {
877 *eqn = eq->core.eqn;
878 *irqn = eq->core.irqn;
879 err = 0;
880 break;
881 }
882 }
883
884 return err;
885}
886EXPORT_SYMBOL(mlx5_vector2eqn);
887
888unsigned int mlx5_comp_vectors_count(struct mlx5_core_dev *dev)
889{
890 return dev->priv.eq_table->num_comp_eqs;
891}
892EXPORT_SYMBOL(mlx5_comp_vectors_count);
893
894struct cpumask *
895mlx5_comp_irq_get_affinity_mask(struct mlx5_core_dev *dev, int vector)
896{
897 int vecidx = vector + MLX5_IRQ_VEC_COMP_BASE;
898
899 return mlx5_irq_get_affinity_mask(dev->priv.eq_table->irq_table,
900 vecidx);
901}
902EXPORT_SYMBOL(mlx5_comp_irq_get_affinity_mask);
903
904#ifdef CONFIG_RFS_ACCEL
905struct cpu_rmap *mlx5_eq_table_get_rmap(struct mlx5_core_dev *dev)
906{
907 return mlx5_irq_get_rmap(dev->priv.eq_table->irq_table);
908}
909#endif
910
911struct mlx5_eq_comp *mlx5_eqn2comp_eq(struct mlx5_core_dev *dev, int eqn)
912{
913 struct mlx5_eq_table *table = dev->priv.eq_table;
914 struct mlx5_eq_comp *eq;
915
916 list_for_each_entry(eq, &table->comp_eqs_list, list) {
917 if (eq->core.eqn == eqn)
918 return eq;
919 }
920
921 return ERR_PTR(-ENOENT);
922}
923
924
925void mlx5_core_eq_free_irqs(struct mlx5_core_dev *dev)
926{
927 struct mlx5_eq_table *table = dev->priv.eq_table;
928
929 mutex_lock(&table->lock);
930 mlx5_irq_table_destroy(dev);
931 mutex_unlock(&table->lock);
932}
933
934#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
935#define MLX5_MAX_ASYNC_EQS 4
936#else
937#define MLX5_MAX_ASYNC_EQS 3
938#endif
939
940int mlx5_eq_table_create(struct mlx5_core_dev *dev)
941{
942 struct mlx5_eq_table *eq_table = dev->priv.eq_table;
943 int num_eqs = MLX5_CAP_GEN(dev, max_num_eqs) ?
944 MLX5_CAP_GEN(dev, max_num_eqs) :
945 1 << MLX5_CAP_GEN(dev, log_max_eq);
946 int err;
947
948 eq_table->num_comp_eqs =
949 min_t(int,
950 mlx5_irq_get_num_comp(eq_table->irq_table),
951 num_eqs - MLX5_MAX_ASYNC_EQS);
952
953 err = create_async_eqs(dev);
954 if (err) {
955 mlx5_core_err(dev, "Failed to create async EQs\n");
956 goto err_async_eqs;
957 }
958
959 err = create_comp_eqs(dev);
960 if (err) {
961 mlx5_core_err(dev, "Failed to create completion EQs\n");
962 goto err_comp_eqs;
963 }
964
965 return 0;
966err_comp_eqs:
967 destroy_async_eqs(dev);
968err_async_eqs:
969 return err;
970}
971
972void mlx5_eq_table_destroy(struct mlx5_core_dev *dev)
973{
974 destroy_comp_eqs(dev);
975 destroy_async_eqs(dev);
976}
977
978int mlx5_eq_notifier_register(struct mlx5_core_dev *dev, struct mlx5_nb *nb)
979{
980 struct mlx5_eq_table *eqt = dev->priv.eq_table;
981
982 return atomic_notifier_chain_register(&eqt->nh[nb->event_type], &nb->nb);
983}
984EXPORT_SYMBOL(mlx5_eq_notifier_register);
985
986int mlx5_eq_notifier_unregister(struct mlx5_core_dev *dev, struct mlx5_nb *nb)
987{
988 struct mlx5_eq_table *eqt = dev->priv.eq_table;
989
990 return atomic_notifier_chain_unregister(&eqt->nh[nb->event_type], &nb->nb);
991}
992EXPORT_SYMBOL(mlx5_eq_notifier_unregister);
993