linux/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
   2/* Copyright (c) 2018 Mellanox Technologies */
   3
   4#ifndef __LIB_MLX5_EQ_H__
   5#define __LIB_MLX5_EQ_H__
   6#include <linux/mlx5/driver.h>
   7#include <linux/mlx5/eq.h>
   8#include <linux/mlx5/cq.h>
   9
  10#define MLX5_EQE_SIZE       (sizeof(struct mlx5_eqe))
  11
  12struct mlx5_eq_tasklet {
  13        struct list_head      list;
  14        struct list_head      process_list;
  15        struct tasklet_struct task;
  16        spinlock_t            lock; /* lock completion tasklet list */
  17};
  18
  19struct mlx5_cq_table {
  20        spinlock_t              lock;   /* protect radix tree */
  21        struct radix_tree_root  tree;
  22};
  23
  24struct mlx5_eq {
  25        struct mlx5_core_dev    *dev;
  26        struct mlx5_cq_table    cq_table;
  27        __be32 __iomem          *doorbell;
  28        u32                     cons_index;
  29        struct mlx5_frag_buf    buf;
  30        unsigned int            vecidx;
  31        unsigned int            irqn;
  32        u8                      eqn;
  33        int                     nent;
  34        struct mlx5_rsc_debug   *dbg;
  35};
  36
  37struct mlx5_eq_async {
  38        struct mlx5_eq          core;
  39        struct notifier_block   irq_nb;
  40        spinlock_t              lock; /* To avoid irq EQ handle races with resiliency flows */
  41};
  42
  43struct mlx5_eq_comp {
  44        struct mlx5_eq          core;
  45        struct notifier_block   irq_nb;
  46        struct mlx5_eq_tasklet  tasklet_ctx;
  47        struct list_head        list;
  48};
  49
  50static inline struct mlx5_eqe *get_eqe(struct mlx5_eq *eq, u32 entry)
  51{
  52        return mlx5_buf_offset(&eq->buf, entry * MLX5_EQE_SIZE);
  53}
  54
  55static inline struct mlx5_eqe *next_eqe_sw(struct mlx5_eq *eq)
  56{
  57        struct mlx5_eqe *eqe = get_eqe(eq, eq->cons_index & (eq->nent - 1));
  58
  59        return ((eqe->owner & 1) ^ !!(eq->cons_index & eq->nent)) ? NULL : eqe;
  60}
  61
  62static inline void eq_update_ci(struct mlx5_eq *eq, int arm)
  63{
  64        __be32 __iomem *addr = eq->doorbell + (arm ? 0 : 2);
  65        u32 val = (eq->cons_index & 0xffffff) | (eq->eqn << 24);
  66
  67        __raw_writel((__force u32)cpu_to_be32(val), addr);
  68        /* We still want ordering, just not swabbing, so add a barrier */
  69        mb();
  70}
  71
  72int mlx5_eq_table_init(struct mlx5_core_dev *dev);
  73void mlx5_eq_table_cleanup(struct mlx5_core_dev *dev);
  74int mlx5_eq_table_create(struct mlx5_core_dev *dev);
  75void mlx5_eq_table_destroy(struct mlx5_core_dev *dev);
  76
  77int mlx5_eq_add_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq);
  78void mlx5_eq_del_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq);
  79struct mlx5_eq_comp *mlx5_eqn2comp_eq(struct mlx5_core_dev *dev, int eqn);
  80struct mlx5_eq *mlx5_get_async_eq(struct mlx5_core_dev *dev);
  81void mlx5_cq_tasklet_cb(struct tasklet_struct *t);
  82struct cpumask *mlx5_eq_comp_cpumask(struct mlx5_core_dev *dev, int ix);
  83
  84u32 mlx5_eq_poll_irq_disabled(struct mlx5_eq_comp *eq);
  85void mlx5_cmd_eq_recover(struct mlx5_core_dev *dev);
  86void mlx5_eq_synchronize_async_irq(struct mlx5_core_dev *dev);
  87void mlx5_eq_synchronize_cmd_irq(struct mlx5_core_dev *dev);
  88
  89int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
  90void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
  91void mlx5_eq_debugfs_init(struct mlx5_core_dev *dev);
  92void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev *dev);
  93
  94/* This function should only be called after mlx5_cmd_force_teardown_hca */
  95void mlx5_core_eq_free_irqs(struct mlx5_core_dev *dev);
  96
  97#ifdef CONFIG_RFS_ACCEL
  98struct cpu_rmap *mlx5_eq_table_get_rmap(struct mlx5_core_dev *dev);
  99#endif
 100
 101#endif
 102