1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#include <linux/mlx5/driver.h>
34#include "wq.h"
35#include "mlx5_core.h"
36
37u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq)
38{
39 return (u32)wq->fbc.sz_m1 + 1;
40}
41
42u32 mlx5_wq_cyc_get_frag_size(struct mlx5_wq_cyc *wq)
43{
44 return (u32)wq->fbc.frag_sz_m1 + 1;
45}
46
47u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq)
48{
49 return wq->fbc.sz_m1 + 1;
50}
51
52u32 mlx5_wq_ll_get_size(struct mlx5_wq_ll *wq)
53{
54 return (u32)wq->fbc.sz_m1 + 1;
55}
56
57static u32 mlx5_wq_cyc_get_byte_size(struct mlx5_wq_cyc *wq)
58{
59 return mlx5_wq_cyc_get_size(wq) << wq->fbc.log_stride;
60}
61
62static u32 mlx5_wq_qp_get_byte_size(struct mlx5_wq_qp *wq)
63{
64 return mlx5_wq_cyc_get_byte_size(&wq->rq) +
65 mlx5_wq_cyc_get_byte_size(&wq->sq);
66}
67
68static u32 mlx5_cqwq_get_byte_size(struct mlx5_cqwq *wq)
69{
70 return mlx5_cqwq_get_size(wq) << wq->fbc.log_stride;
71}
72
73static u32 mlx5_wq_ll_get_byte_size(struct mlx5_wq_ll *wq)
74{
75 return mlx5_wq_ll_get_size(wq) << wq->fbc.log_stride;
76}
77
78int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
79 void *wqc, struct mlx5_wq_cyc *wq,
80 struct mlx5_wq_ctrl *wq_ctrl)
81{
82 struct mlx5_frag_buf_ctrl *fbc = &wq->fbc;
83 int err;
84
85 mlx5_fill_fbc(MLX5_GET(wq, wqc, log_wq_stride),
86 MLX5_GET(wq, wqc, log_wq_sz),
87 fbc);
88 wq->sz = wq->fbc.sz_m1 + 1;
89
90 err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
91 if (err) {
92 mlx5_core_warn(mdev, "mlx5_db_alloc_node() failed, %d\n", err);
93 return err;
94 }
95
96 err = mlx5_frag_buf_alloc_node(mdev, mlx5_wq_cyc_get_byte_size(wq),
97 &wq_ctrl->buf, param->buf_numa_node);
98 if (err) {
99 mlx5_core_warn(mdev, "mlx5_frag_buf_alloc_node() failed, %d\n", err);
100 goto err_db_free;
101 }
102
103 fbc->frag_buf = wq_ctrl->buf;
104 wq->db = wq_ctrl->db.db;
105
106 wq_ctrl->mdev = mdev;
107
108 return 0;
109
110err_db_free:
111 mlx5_db_free(mdev, &wq_ctrl->db);
112
113 return err;
114}
115
116static void mlx5_qp_set_frag_buf(struct mlx5_frag_buf *buf,
117 struct mlx5_wq_qp *qp)
118{
119 struct mlx5_frag_buf_ctrl *sq_fbc;
120 struct mlx5_frag_buf *rqb, *sqb;
121
122 rqb = &qp->rq.fbc.frag_buf;
123 *rqb = *buf;
124 rqb->size = mlx5_wq_cyc_get_byte_size(&qp->rq);
125 rqb->npages = DIV_ROUND_UP(rqb->size, PAGE_SIZE);
126
127 sq_fbc = &qp->sq.fbc;
128 sqb = &sq_fbc->frag_buf;
129 *sqb = *buf;
130 sqb->size = mlx5_wq_cyc_get_byte_size(&qp->sq);
131 sqb->npages = DIV_ROUND_UP(sqb->size, PAGE_SIZE);
132 sqb->frags += rqb->npages;
133 if (sq_fbc->strides_offset)
134 sqb->frags--;
135}
136
137int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
138 void *qpc, struct mlx5_wq_qp *wq,
139 struct mlx5_wq_ctrl *wq_ctrl)
140{
141 u32 sq_strides_offset;
142 int err;
143
144 mlx5_fill_fbc(MLX5_GET(qpc, qpc, log_rq_stride) + 4,
145 MLX5_GET(qpc, qpc, log_rq_size),
146 &wq->rq.fbc);
147
148 sq_strides_offset =
149 ((wq->rq.fbc.frag_sz_m1 + 1) % PAGE_SIZE) / MLX5_SEND_WQE_BB;
150
151 mlx5_fill_fbc_offset(ilog2(MLX5_SEND_WQE_BB),
152 MLX5_GET(qpc, qpc, log_sq_size),
153 sq_strides_offset,
154 &wq->sq.fbc);
155
156 err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
157 if (err) {
158 mlx5_core_warn(mdev, "mlx5_db_alloc_node() failed, %d\n", err);
159 return err;
160 }
161
162 err = mlx5_frag_buf_alloc_node(mdev, mlx5_wq_qp_get_byte_size(wq),
163 &wq_ctrl->buf, param->buf_numa_node);
164 if (err) {
165 mlx5_core_warn(mdev, "mlx5_frag_buf_alloc_node() failed, %d\n", err);
166 goto err_db_free;
167 }
168
169 mlx5_qp_set_frag_buf(&wq_ctrl->buf, wq);
170
171 wq->rq.db = &wq_ctrl->db.db[MLX5_RCV_DBR];
172 wq->sq.db = &wq_ctrl->db.db[MLX5_SND_DBR];
173
174 wq_ctrl->mdev = mdev;
175
176 return 0;
177
178err_db_free:
179 mlx5_db_free(mdev, &wq_ctrl->db);
180
181 return err;
182}
183
184int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
185 void *cqc, struct mlx5_cqwq *wq,
186 struct mlx5_wq_ctrl *wq_ctrl)
187{
188 int err;
189
190 mlx5_core_init_cq_frag_buf(&wq->fbc, cqc);
191
192 err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
193 if (err) {
194 mlx5_core_warn(mdev, "mlx5_db_alloc_node() failed, %d\n", err);
195 return err;
196 }
197
198 err = mlx5_frag_buf_alloc_node(mdev, mlx5_cqwq_get_byte_size(wq),
199 &wq_ctrl->buf,
200 param->buf_numa_node);
201 if (err) {
202 mlx5_core_warn(mdev, "mlx5_frag_buf_alloc_node() failed, %d\n",
203 err);
204 goto err_db_free;
205 }
206
207 wq->fbc.frag_buf = wq_ctrl->buf;
208 wq->db = wq_ctrl->db.db;
209
210 wq_ctrl->mdev = mdev;
211
212 return 0;
213
214err_db_free:
215 mlx5_db_free(mdev, &wq_ctrl->db);
216
217 return err;
218}
219
220int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
221 void *wqc, struct mlx5_wq_ll *wq,
222 struct mlx5_wq_ctrl *wq_ctrl)
223{
224 struct mlx5_frag_buf_ctrl *fbc = &wq->fbc;
225 struct mlx5_wqe_srq_next_seg *next_seg;
226 int err;
227 int i;
228
229 mlx5_fill_fbc(MLX5_GET(wq, wqc, log_wq_stride),
230 MLX5_GET(wq, wqc, log_wq_sz),
231 fbc);
232
233 err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
234 if (err) {
235 mlx5_core_warn(mdev, "mlx5_db_alloc_node() failed, %d\n", err);
236 return err;
237 }
238
239 err = mlx5_frag_buf_alloc_node(mdev, mlx5_wq_ll_get_byte_size(wq),
240 &wq_ctrl->buf, param->buf_numa_node);
241 if (err) {
242 mlx5_core_warn(mdev, "mlx5_frag_buf_alloc_node() failed, %d\n", err);
243 goto err_db_free;
244 }
245
246 wq->fbc.frag_buf = wq_ctrl->buf;
247 wq->db = wq_ctrl->db.db;
248
249 for (i = 0; i < fbc->sz_m1; i++) {
250 next_seg = mlx5_wq_ll_get_wqe(wq, i);
251 next_seg->next_wqe_index = cpu_to_be16(i + 1);
252 }
253 next_seg = mlx5_wq_ll_get_wqe(wq, i);
254 wq->tail_next = &next_seg->next_wqe_index;
255
256 wq_ctrl->mdev = mdev;
257
258 return 0;
259
260err_db_free:
261 mlx5_db_free(mdev, &wq_ctrl->db);
262
263 return err;
264}
265
266void mlx5_wq_destroy(struct mlx5_wq_ctrl *wq_ctrl)
267{
268 mlx5_frag_buf_free(wq_ctrl->mdev, &wq_ctrl->buf);
269 mlx5_db_free(wq_ctrl->mdev, &wq_ctrl->db);
270}
271
272