1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#ifndef MLX5_CORE_CQ_H
34#define MLX5_CORE_CQ_H
35
36#include <rdma/ib_verbs.h>
37#include <linux/mlx5/driver.h>
38
39
40struct mlx5_core_cq {
41 u32 cqn;
42 int cqe_sz;
43 __be32 *set_ci_db;
44 __be32 *arm_db;
45 atomic_t refcount;
46 struct completion free;
47 unsigned vector;
48 unsigned int irqn;
49 void (*comp) (struct mlx5_core_cq *);
50 void (*event) (struct mlx5_core_cq *, enum mlx5_event);
51 struct mlx5_uar *uar;
52 u32 cons_index;
53 unsigned arm_sn;
54 struct mlx5_rsc_debug *dbg;
55 int pid;
56 struct {
57 struct list_head list;
58 void (*comp)(struct mlx5_core_cq *);
59 void *priv;
60 } tasklet_ctx;
61 int reset_notify_added;
62 struct list_head reset_notify;
63};
64
65
66enum {
67 MLX5_CQE_SYNDROME_LOCAL_LENGTH_ERR = 0x01,
68 MLX5_CQE_SYNDROME_LOCAL_QP_OP_ERR = 0x02,
69 MLX5_CQE_SYNDROME_LOCAL_PROT_ERR = 0x04,
70 MLX5_CQE_SYNDROME_WR_FLUSH_ERR = 0x05,
71 MLX5_CQE_SYNDROME_MW_BIND_ERR = 0x06,
72 MLX5_CQE_SYNDROME_BAD_RESP_ERR = 0x10,
73 MLX5_CQE_SYNDROME_LOCAL_ACCESS_ERR = 0x11,
74 MLX5_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR = 0x12,
75 MLX5_CQE_SYNDROME_REMOTE_ACCESS_ERR = 0x13,
76 MLX5_CQE_SYNDROME_REMOTE_OP_ERR = 0x14,
77 MLX5_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR = 0x15,
78 MLX5_CQE_SYNDROME_RNR_RETRY_EXC_ERR = 0x16,
79 MLX5_CQE_SYNDROME_REMOTE_ABORTED_ERR = 0x22,
80};
81
82enum {
83 MLX5_CQE_OWNER_MASK = 1,
84 MLX5_CQE_REQ = 0,
85 MLX5_CQE_RESP_WR_IMM = 1,
86 MLX5_CQE_RESP_SEND = 2,
87 MLX5_CQE_RESP_SEND_IMM = 3,
88 MLX5_CQE_RESP_SEND_INV = 4,
89 MLX5_CQE_RESIZE_CQ = 5,
90 MLX5_CQE_SIG_ERR = 12,
91 MLX5_CQE_REQ_ERR = 13,
92 MLX5_CQE_RESP_ERR = 14,
93 MLX5_CQE_INVALID = 15,
94};
95
96enum {
97 MLX5_CQ_MODIFY_PERIOD = 1 << 0,
98 MLX5_CQ_MODIFY_COUNT = 1 << 1,
99 MLX5_CQ_MODIFY_OVERRUN = 1 << 2,
100};
101
102enum {
103 MLX5_CQ_OPMOD_RESIZE = 1,
104 MLX5_MODIFY_CQ_MASK_LOG_SIZE = 1 << 0,
105 MLX5_MODIFY_CQ_MASK_PG_OFFSET = 1 << 1,
106 MLX5_MODIFY_CQ_MASK_PG_SIZE = 1 << 2,
107};
108
109struct mlx5_cq_modify_params {
110 int type;
111 union {
112 struct {
113 u32 page_offset;
114 u8 log_cq_size;
115 } resize;
116
117 struct {
118 } moder;
119
120 struct {
121 } mapping;
122 } params;
123};
124
125enum {
126 CQE_SIZE_64 = 0,
127 CQE_SIZE_128 = 1,
128};
129
130static inline int cqe_sz_to_mlx_sz(u8 size)
131{
132 return size == 64 ? CQE_SIZE_64 : CQE_SIZE_128;
133}
134
135static inline void mlx5_cq_set_ci(struct mlx5_core_cq *cq)
136{
137 *cq->set_ci_db = cpu_to_be32(cq->cons_index & 0xffffff);
138}
139
140enum {
141 MLX5_CQ_DB_REQ_NOT_SOL = 1 << 24,
142 MLX5_CQ_DB_REQ_NOT = 0 << 24
143};
144
145static inline void mlx5_cq_arm(struct mlx5_core_cq *cq, u32 cmd,
146 void __iomem *uar_page,
147 spinlock_t *doorbell_lock,
148 u32 cons_index)
149{
150 __be32 doorbell[2];
151 u32 sn;
152 u32 ci;
153
154 sn = cq->arm_sn & 3;
155 ci = cons_index & 0xffffff;
156
157 *cq->arm_db = cpu_to_be32(sn << 28 | cmd | ci);
158
159
160
161
162 wmb();
163
164 doorbell[0] = cpu_to_be32(sn << 28 | cmd | ci);
165 doorbell[1] = cpu_to_be32(cq->cqn);
166
167 mlx5_write64(doorbell, uar_page + MLX5_CQ_DOORBELL, doorbell_lock);
168}
169
170int mlx5_init_cq_table(struct mlx5_core_dev *dev);
171void mlx5_cleanup_cq_table(struct mlx5_core_dev *dev);
172int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
173 u32 *in, int inlen);
174int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq);
175int mlx5_core_query_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
176 u32 *out, int outlen);
177int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
178 u32 *in, int inlen);
179int mlx5_core_modify_cq_moderation(struct mlx5_core_dev *dev,
180 struct mlx5_core_cq *cq, u16 cq_period,
181 u16 cq_max_count);
182int mlx5_debug_cq_add(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq);
183void mlx5_debug_cq_remove(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq);
184
185#endif
186