1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#include <linux/mlx5/driver.h>
34#include "wq.h"
35#include "mlx5_core.h"
36
37u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq)
38{
39 return (u32)wq->fbc.sz_m1 + 1;
40}
41
42u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq)
43{
44 return wq->fbc.sz_m1 + 1;
45}
46
47u32 mlx5_wq_ll_get_size(struct mlx5_wq_ll *wq)
48{
49 return (u32)wq->fbc.sz_m1 + 1;
50}
51
52static u32 mlx5_wq_cyc_get_byte_size(struct mlx5_wq_cyc *wq)
53{
54 return mlx5_wq_cyc_get_size(wq) << wq->fbc.log_stride;
55}
56
57static u32 mlx5_wq_qp_get_byte_size(struct mlx5_wq_qp *wq)
58{
59 return mlx5_wq_cyc_get_byte_size(&wq->rq) +
60 mlx5_wq_cyc_get_byte_size(&wq->sq);
61}
62
63static u32 mlx5_cqwq_get_byte_size(struct mlx5_cqwq *wq)
64{
65 return mlx5_cqwq_get_size(wq) << wq->fbc.log_stride;
66}
67
68static u32 mlx5_wq_ll_get_byte_size(struct mlx5_wq_ll *wq)
69{
70 return mlx5_wq_ll_get_size(wq) << wq->fbc.log_stride;
71}
72
73int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
74 void *wqc, struct mlx5_wq_cyc *wq,
75 struct mlx5_wq_ctrl *wq_ctrl)
76{
77 struct mlx5_frag_buf_ctrl *fbc = &wq->fbc;
78 int err;
79
80 mlx5_fill_fbc(MLX5_GET(wq, wqc, log_wq_stride),
81 MLX5_GET(wq, wqc, log_wq_sz),
82 fbc);
83 wq->sz = wq->fbc.sz_m1 + 1;
84
85 err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
86 if (err) {
87 mlx5_core_warn(mdev, "mlx5_db_alloc_node() failed, %d\n", err);
88 return err;
89 }
90
91 err = mlx5_frag_buf_alloc_node(mdev, mlx5_wq_cyc_get_byte_size(wq),
92 &wq_ctrl->buf, param->buf_numa_node);
93 if (err) {
94 mlx5_core_warn(mdev, "mlx5_frag_buf_alloc_node() failed, %d\n", err);
95 goto err_db_free;
96 }
97
98 fbc->frag_buf = wq_ctrl->buf;
99 wq->db = wq_ctrl->db.db;
100
101 wq_ctrl->mdev = mdev;
102
103 return 0;
104
105err_db_free:
106 mlx5_db_free(mdev, &wq_ctrl->db);
107
108 return err;
109}
110
111static void mlx5_qp_set_frag_buf(struct mlx5_frag_buf *buf,
112 struct mlx5_wq_qp *qp)
113{
114 struct mlx5_frag_buf_ctrl *sq_fbc;
115 struct mlx5_frag_buf *rqb, *sqb;
116
117 rqb = &qp->rq.fbc.frag_buf;
118 *rqb = *buf;
119 rqb->size = mlx5_wq_cyc_get_byte_size(&qp->rq);
120 rqb->npages = DIV_ROUND_UP(rqb->size, PAGE_SIZE);
121
122 sq_fbc = &qp->sq.fbc;
123 sqb = &sq_fbc->frag_buf;
124 *sqb = *buf;
125 sqb->size = mlx5_wq_cyc_get_byte_size(&qp->sq);
126 sqb->npages = DIV_ROUND_UP(sqb->size, PAGE_SIZE);
127 sqb->frags += rqb->npages;
128 if (sq_fbc->strides_offset)
129 sqb->frags--;
130}
131
132int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
133 void *qpc, struct mlx5_wq_qp *wq,
134 struct mlx5_wq_ctrl *wq_ctrl)
135{
136 u16 sq_strides_offset;
137 u32 rq_pg_remainder;
138 int err;
139
140 mlx5_fill_fbc(MLX5_GET(qpc, qpc, log_rq_stride) + 4,
141 MLX5_GET(qpc, qpc, log_rq_size),
142 &wq->rq.fbc);
143
144 rq_pg_remainder = mlx5_wq_cyc_get_byte_size(&wq->rq) % PAGE_SIZE;
145 sq_strides_offset = rq_pg_remainder / MLX5_SEND_WQE_BB;
146
147 mlx5_fill_fbc_offset(ilog2(MLX5_SEND_WQE_BB),
148 MLX5_GET(qpc, qpc, log_sq_size),
149 sq_strides_offset,
150 &wq->sq.fbc);
151
152 err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
153 if (err) {
154 mlx5_core_warn(mdev, "mlx5_db_alloc_node() failed, %d\n", err);
155 return err;
156 }
157
158 err = mlx5_frag_buf_alloc_node(mdev, mlx5_wq_qp_get_byte_size(wq),
159 &wq_ctrl->buf, param->buf_numa_node);
160 if (err) {
161 mlx5_core_warn(mdev, "mlx5_frag_buf_alloc_node() failed, %d\n", err);
162 goto err_db_free;
163 }
164
165 mlx5_qp_set_frag_buf(&wq_ctrl->buf, wq);
166
167 wq->rq.db = &wq_ctrl->db.db[MLX5_RCV_DBR];
168 wq->sq.db = &wq_ctrl->db.db[MLX5_SND_DBR];
169
170 wq_ctrl->mdev = mdev;
171
172 return 0;
173
174err_db_free:
175 mlx5_db_free(mdev, &wq_ctrl->db);
176
177 return err;
178}
179
180int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
181 void *cqc, struct mlx5_cqwq *wq,
182 struct mlx5_wq_ctrl *wq_ctrl)
183{
184 int err;
185
186 mlx5_core_init_cq_frag_buf(&wq->fbc, cqc);
187
188 err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
189 if (err) {
190 mlx5_core_warn(mdev, "mlx5_db_alloc_node() failed, %d\n", err);
191 return err;
192 }
193
194 err = mlx5_frag_buf_alloc_node(mdev, mlx5_cqwq_get_byte_size(wq),
195 &wq_ctrl->buf,
196 param->buf_numa_node);
197 if (err) {
198 mlx5_core_warn(mdev, "mlx5_frag_buf_alloc_node() failed, %d\n",
199 err);
200 goto err_db_free;
201 }
202
203 wq->fbc.frag_buf = wq_ctrl->buf;
204 wq->db = wq_ctrl->db.db;
205
206 wq_ctrl->mdev = mdev;
207
208 return 0;
209
210err_db_free:
211 mlx5_db_free(mdev, &wq_ctrl->db);
212
213 return err;
214}
215
216int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
217 void *wqc, struct mlx5_wq_ll *wq,
218 struct mlx5_wq_ctrl *wq_ctrl)
219{
220 struct mlx5_frag_buf_ctrl *fbc = &wq->fbc;
221 struct mlx5_wqe_srq_next_seg *next_seg;
222 int err;
223 int i;
224
225 mlx5_fill_fbc(MLX5_GET(wq, wqc, log_wq_stride),
226 MLX5_GET(wq, wqc, log_wq_sz),
227 fbc);
228
229 err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
230 if (err) {
231 mlx5_core_warn(mdev, "mlx5_db_alloc_node() failed, %d\n", err);
232 return err;
233 }
234
235 err = mlx5_frag_buf_alloc_node(mdev, mlx5_wq_ll_get_byte_size(wq),
236 &wq_ctrl->buf, param->buf_numa_node);
237 if (err) {
238 mlx5_core_warn(mdev, "mlx5_frag_buf_alloc_node() failed, %d\n", err);
239 goto err_db_free;
240 }
241
242 wq->fbc.frag_buf = wq_ctrl->buf;
243 wq->db = wq_ctrl->db.db;
244
245 for (i = 0; i < fbc->sz_m1; i++) {
246 next_seg = mlx5_wq_ll_get_wqe(wq, i);
247 next_seg->next_wqe_index = cpu_to_be16(i + 1);
248 }
249 next_seg = mlx5_wq_ll_get_wqe(wq, i);
250 wq->tail_next = &next_seg->next_wqe_index;
251
252 wq_ctrl->mdev = mdev;
253
254 return 0;
255
256err_db_free:
257 mlx5_db_free(mdev, &wq_ctrl->db);
258
259 return err;
260}
261
262void mlx5_wq_destroy(struct mlx5_wq_ctrl *wq_ctrl)
263{
264 mlx5_frag_buf_free(wq_ctrl->mdev, &wq_ctrl->buf);
265 mlx5_db_free(wq_ctrl->mdev, &wq_ctrl->db);
266}
267
268