1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#ifndef MLX5_CORE_CQ_H
34#define MLX5_CORE_CQ_H
35
36#include <rdma/ib_verbs.h>
37#include <linux/mlx5/driver.h>
38
39
40struct mlx5_core_cq {
41 u32 cqn;
42 int cqe_sz;
43 __be32 *set_ci_db;
44 __be32 *arm_db;
45 atomic_t refcount;
46 struct completion free;
47 unsigned vector;
48 unsigned int irqn;
49 void (*comp) (struct mlx5_core_cq *);
50 void (*event) (struct mlx5_core_cq *, enum mlx5_event);
51 struct mlx5_uar *uar;
52 u32 cons_index;
53 unsigned arm_sn;
54 struct mlx5_rsc_debug *dbg;
55 int pid;
56};
57
58
59enum {
60 MLX5_CQE_SYNDROME_LOCAL_LENGTH_ERR = 0x01,
61 MLX5_CQE_SYNDROME_LOCAL_QP_OP_ERR = 0x02,
62 MLX5_CQE_SYNDROME_LOCAL_PROT_ERR = 0x04,
63 MLX5_CQE_SYNDROME_WR_FLUSH_ERR = 0x05,
64 MLX5_CQE_SYNDROME_MW_BIND_ERR = 0x06,
65 MLX5_CQE_SYNDROME_BAD_RESP_ERR = 0x10,
66 MLX5_CQE_SYNDROME_LOCAL_ACCESS_ERR = 0x11,
67 MLX5_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR = 0x12,
68 MLX5_CQE_SYNDROME_REMOTE_ACCESS_ERR = 0x13,
69 MLX5_CQE_SYNDROME_REMOTE_OP_ERR = 0x14,
70 MLX5_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR = 0x15,
71 MLX5_CQE_SYNDROME_RNR_RETRY_EXC_ERR = 0x16,
72 MLX5_CQE_SYNDROME_REMOTE_ABORTED_ERR = 0x22,
73};
74
75enum {
76 MLX5_CQE_OWNER_MASK = 1,
77 MLX5_CQE_REQ = 0,
78 MLX5_CQE_RESP_WR_IMM = 1,
79 MLX5_CQE_RESP_SEND = 2,
80 MLX5_CQE_RESP_SEND_IMM = 3,
81 MLX5_CQE_RESP_SEND_INV = 4,
82 MLX5_CQE_RESIZE_CQ = 5,
83 MLX5_CQE_SIG_ERR = 12,
84 MLX5_CQE_REQ_ERR = 13,
85 MLX5_CQE_RESP_ERR = 14,
86 MLX5_CQE_INVALID = 15,
87};
88
89enum {
90 MLX5_CQ_MODIFY_PERIOD = 1 << 0,
91 MLX5_CQ_MODIFY_COUNT = 1 << 1,
92 MLX5_CQ_MODIFY_OVERRUN = 1 << 2,
93};
94
95enum {
96 MLX5_CQ_OPMOD_RESIZE = 1,
97 MLX5_MODIFY_CQ_MASK_LOG_SIZE = 1 << 0,
98 MLX5_MODIFY_CQ_MASK_PG_OFFSET = 1 << 1,
99 MLX5_MODIFY_CQ_MASK_PG_SIZE = 1 << 2,
100};
101
102struct mlx5_cq_modify_params {
103 int type;
104 union {
105 struct {
106 u32 page_offset;
107 u8 log_cq_size;
108 } resize;
109
110 struct {
111 } moder;
112
113 struct {
114 } mapping;
115 } params;
116};
117
118enum {
119 CQE_SIZE_64 = 0,
120 CQE_SIZE_128 = 1,
121};
122
123static inline int cqe_sz_to_mlx_sz(u8 size)
124{
125 return size == 64 ? CQE_SIZE_64 : CQE_SIZE_128;
126}
127
128static inline void mlx5_cq_set_ci(struct mlx5_core_cq *cq)
129{
130 *cq->set_ci_db = cpu_to_be32(cq->cons_index & 0xffffff);
131}
132
133enum {
134 MLX5_CQ_DB_REQ_NOT_SOL = 1 << 24,
135 MLX5_CQ_DB_REQ_NOT = 0 << 24
136};
137
138static inline void mlx5_cq_arm(struct mlx5_core_cq *cq, u32 cmd,
139 void __iomem *uar_page,
140 spinlock_t *doorbell_lock,
141 u32 cons_index)
142{
143 __be32 doorbell[2];
144 u32 sn;
145 u32 ci;
146
147 sn = cq->arm_sn & 3;
148 ci = cons_index & 0xffffff;
149
150 *cq->arm_db = cpu_to_be32(sn << 28 | cmd | ci);
151
152
153
154
155 wmb();
156
157 doorbell[0] = cpu_to_be32(sn << 28 | cmd | ci);
158 doorbell[1] = cpu_to_be32(cq->cqn);
159
160 mlx5_write64(doorbell, uar_page + MLX5_CQ_DOORBELL, doorbell_lock);
161}
162
163int mlx5_init_cq_table(struct mlx5_core_dev *dev);
164void mlx5_cleanup_cq_table(struct mlx5_core_dev *dev);
165int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
166 struct mlx5_create_cq_mbox_in *in, int inlen);
167int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq);
168int mlx5_core_query_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
169 struct mlx5_query_cq_mbox_out *out);
170int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
171 struct mlx5_modify_cq_mbox_in *in, int in_sz);
172int mlx5_core_modify_cq_moderation(struct mlx5_core_dev *dev,
173 struct mlx5_core_cq *cq, u16 cq_period,
174 u16 cq_max_count);
175int mlx5_debug_cq_add(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq);
176void mlx5_debug_cq_remove(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq);
177
178#endif
179