1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#include <linux/platform_device.h>
34#include <rdma/ib_umem.h>
35#include <rdma/uverbs_ioctl.h>
36#include "hns_roce_device.h"
37#include "hns_roce_cmd.h"
38#include "hns_roce_hem.h"
39#include <rdma/hns-abi.h>
40#include "hns_roce_common.h"
41
42static int alloc_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
43{
44 struct hns_roce_cmd_mailbox *mailbox;
45 struct hns_roce_cq_table *cq_table;
46 struct ib_device *ibdev = &hr_dev->ib_dev;
47 u64 mtts[MTT_MIN_COUNT] = { 0 };
48 dma_addr_t dma_handle;
49 int ret;
50
51 ret = hns_roce_mtr_find(hr_dev, &hr_cq->mtr, 0, mtts, ARRAY_SIZE(mtts),
52 &dma_handle);
53 if (ret < 1) {
54 ibdev_err(ibdev, "Failed to find CQ mtr\n");
55 return -EINVAL;
56 }
57
58 cq_table = &hr_dev->cq_table;
59 ret = hns_roce_bitmap_alloc(&cq_table->bitmap, &hr_cq->cqn);
60 if (ret) {
61 ibdev_err(ibdev, "Failed to alloc CQ bitmap, err %d\n", ret);
62 return ret;
63 }
64
65
66 ret = hns_roce_table_get(hr_dev, &cq_table->table, hr_cq->cqn);
67 if (ret) {
68 ibdev_err(ibdev, "Failed to get CQ(0x%lx) context, err %d\n",
69 hr_cq->cqn, ret);
70 goto err_out;
71 }
72
73 ret = xa_err(xa_store(&cq_table->array, hr_cq->cqn, hr_cq, GFP_KERNEL));
74 if (ret) {
75 ibdev_err(ibdev, "Failed to xa_store CQ\n");
76 goto err_put;
77 }
78
79
80 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
81 if (IS_ERR(mailbox)) {
82 ret = PTR_ERR(mailbox);
83 goto err_xa;
84 }
85
86 hr_dev->hw->write_cqc(hr_dev, hr_cq, mailbox->buf, mtts, dma_handle);
87
88
89 ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, hr_cq->cqn, 0,
90 HNS_ROCE_CMD_CREATE_CQC, HNS_ROCE_CMD_TIMEOUT_MSECS);
91 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
92 if (ret) {
93 ibdev_err(ibdev,
94 "Failed to send create cmd for CQ(0x%lx), err %d\n",
95 hr_cq->cqn, ret);
96 goto err_xa;
97 }
98
99 hr_cq->cons_index = 0;
100 hr_cq->arm_sn = 1;
101
102 atomic_set(&hr_cq->refcount, 1);
103 init_completion(&hr_cq->free);
104
105 return 0;
106
107err_xa:
108 xa_erase(&cq_table->array, hr_cq->cqn);
109
110err_put:
111 hns_roce_table_put(hr_dev, &cq_table->table, hr_cq->cqn);
112
113err_out:
114 hns_roce_bitmap_free(&cq_table->bitmap, hr_cq->cqn, BITMAP_NO_RR);
115 return ret;
116}
117
118static void free_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
119{
120 struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
121 struct device *dev = hr_dev->dev;
122 int ret;
123
124 ret = hns_roce_cmd_mbox(hr_dev, 0, 0, hr_cq->cqn, 1,
125 HNS_ROCE_CMD_DESTROY_CQC,
126 HNS_ROCE_CMD_TIMEOUT_MSECS);
127 if (ret)
128 dev_err(dev, "DESTROY_CQ failed (%d) for CQN %06lx\n", ret,
129 hr_cq->cqn);
130
131 xa_erase(&cq_table->array, hr_cq->cqn);
132
133
134 synchronize_irq(hr_dev->eq_table.eq[hr_cq->vector].irq);
135
136
137 if (atomic_dec_and_test(&hr_cq->refcount))
138 complete(&hr_cq->free);
139 wait_for_completion(&hr_cq->free);
140
141 hns_roce_table_put(hr_dev, &cq_table->table, hr_cq->cqn);
142 hns_roce_bitmap_free(&cq_table->bitmap, hr_cq->cqn, BITMAP_NO_RR);
143}
144
145static int alloc_cq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq,
146 struct ib_udata *udata, unsigned long addr)
147{
148 struct ib_device *ibdev = &hr_dev->ib_dev;
149 struct hns_roce_buf_attr buf_attr = {};
150 int err;
151
152 buf_attr.page_shift = hr_dev->caps.cqe_buf_pg_sz + HNS_HW_PAGE_SHIFT;
153 buf_attr.region[0].size = hr_cq->cq_depth * hr_dev->caps.cq_entry_sz;
154 buf_attr.region[0].hopnum = hr_dev->caps.cqe_hop_num;
155 buf_attr.region_count = 1;
156 buf_attr.fixed_page = true;
157
158 err = hns_roce_mtr_create(hr_dev, &hr_cq->mtr, &buf_attr,
159 hr_dev->caps.cqe_ba_pg_sz + HNS_HW_PAGE_SHIFT,
160 udata, addr);
161 if (err)
162 ibdev_err(ibdev, "Failed to alloc CQ mtr, err %d\n", err);
163
164 return err;
165}
166
167static void free_cq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
168{
169 hns_roce_mtr_destroy(hr_dev, &hr_cq->mtr);
170}
171
172static int alloc_cq_db(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq,
173 struct ib_udata *udata, unsigned long addr,
174 struct hns_roce_ib_create_cq_resp *resp)
175{
176 bool has_db = hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB;
177 struct hns_roce_ucontext *uctx;
178 int err;
179
180 if (udata) {
181 if (has_db &&
182 udata->outlen >= offsetofend(typeof(*resp), cap_flags)) {
183 uctx = rdma_udata_to_drv_context(udata,
184 struct hns_roce_ucontext, ibucontext);
185 err = hns_roce_db_map_user(uctx, udata, addr,
186 &hr_cq->db);
187 if (err)
188 return err;
189 hr_cq->flags |= HNS_ROCE_CQ_FLAG_RECORD_DB;
190 resp->cap_flags |= HNS_ROCE_CQ_FLAG_RECORD_DB;
191 }
192 } else {
193 if (has_db) {
194 err = hns_roce_alloc_db(hr_dev, &hr_cq->db, 1);
195 if (err)
196 return err;
197 hr_cq->set_ci_db = hr_cq->db.db_record;
198 *hr_cq->set_ci_db = 0;
199 hr_cq->flags |= HNS_ROCE_CQ_FLAG_RECORD_DB;
200 }
201 hr_cq->cq_db_l = hr_dev->reg_base + hr_dev->odb_offset +
202 DB_REG_OFFSET * hr_dev->priv_uar.index;
203 }
204
205 return 0;
206}
207
208static void free_cq_db(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq,
209 struct ib_udata *udata)
210{
211 struct hns_roce_ucontext *uctx;
212
213 if (!(hr_cq->flags & HNS_ROCE_CQ_FLAG_RECORD_DB))
214 return;
215
216 hr_cq->flags &= ~HNS_ROCE_CQ_FLAG_RECORD_DB;
217 if (udata) {
218 uctx = rdma_udata_to_drv_context(udata,
219 struct hns_roce_ucontext,
220 ibucontext);
221 hns_roce_db_unmap_user(uctx, &hr_cq->db);
222 } else {
223 hns_roce_free_db(hr_dev, &hr_cq->db);
224 }
225}
226
227int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
228 struct ib_udata *udata)
229{
230 struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device);
231 struct hns_roce_ib_create_cq_resp resp = {};
232 struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq);
233 struct ib_device *ibdev = &hr_dev->ib_dev;
234 struct hns_roce_ib_create_cq ucmd = {};
235 int vector = attr->comp_vector;
236 u32 cq_entries = attr->cqe;
237 int ret;
238
239 if (cq_entries < 1 || cq_entries > hr_dev->caps.max_cqes) {
240 ibdev_err(ibdev, "Failed to check CQ count %d max=%d\n",
241 cq_entries, hr_dev->caps.max_cqes);
242 return -EINVAL;
243 }
244
245 if (vector >= hr_dev->caps.num_comp_vectors) {
246 ibdev_err(ibdev, "Failed to check CQ vector=%d max=%d\n",
247 vector, hr_dev->caps.num_comp_vectors);
248 return -EINVAL;
249 }
250
251 cq_entries = max(cq_entries, hr_dev->caps.min_cqes);
252 cq_entries = roundup_pow_of_two(cq_entries);
253 hr_cq->ib_cq.cqe = cq_entries - 1;
254 hr_cq->cq_depth = cq_entries;
255 hr_cq->vector = vector;
256 spin_lock_init(&hr_cq->lock);
257 INIT_LIST_HEAD(&hr_cq->sq_list);
258 INIT_LIST_HEAD(&hr_cq->rq_list);
259
260 if (udata) {
261 ret = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd));
262 if (ret) {
263 ibdev_err(ibdev, "Failed to copy CQ udata, err %d\n",
264 ret);
265 return ret;
266 }
267 }
268
269 ret = alloc_cq_buf(hr_dev, hr_cq, udata, ucmd.buf_addr);
270 if (ret) {
271 ibdev_err(ibdev, "Failed to alloc CQ buf, err %d\n", ret);
272 return ret;
273 }
274
275 ret = alloc_cq_db(hr_dev, hr_cq, udata, ucmd.db_addr, &resp);
276 if (ret) {
277 ibdev_err(ibdev, "Failed to alloc CQ db, err %d\n", ret);
278 goto err_cq_buf;
279 }
280
281 ret = alloc_cqc(hr_dev, hr_cq);
282 if (ret) {
283 ibdev_err(ibdev, "Failed to alloc CQ context, err %d\n", ret);
284 goto err_cq_db;
285 }
286
287
288
289
290
291
292
293 if (!udata && hr_cq->tptr_addr)
294 *hr_cq->tptr_addr = 0;
295
296 if (udata) {
297 resp.cqn = hr_cq->cqn;
298 ret = ib_copy_to_udata(udata, &resp, sizeof(resp));
299 if (ret)
300 goto err_cqc;
301 }
302
303 return 0;
304
305err_cqc:
306 free_cqc(hr_dev, hr_cq);
307err_cq_db:
308 free_cq_db(hr_dev, hr_cq, udata);
309err_cq_buf:
310 free_cq_buf(hr_dev, hr_cq);
311 return ret;
312}
313
314void hns_roce_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
315{
316 struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device);
317 struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq);
318
319 if (hr_dev->hw->destroy_cq)
320 hr_dev->hw->destroy_cq(ib_cq, udata);
321
322 free_cq_buf(hr_dev, hr_cq);
323 free_cq_db(hr_dev, hr_cq, udata);
324 free_cqc(hr_dev, hr_cq);
325}
326
327void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn)
328{
329 struct hns_roce_cq *hr_cq;
330 struct ib_cq *ibcq;
331
332 hr_cq = xa_load(&hr_dev->cq_table.array,
333 cqn & (hr_dev->caps.num_cqs - 1));
334 if (!hr_cq) {
335 dev_warn(hr_dev->dev, "Completion event for bogus CQ 0x%06x\n",
336 cqn);
337 return;
338 }
339
340 ++hr_cq->arm_sn;
341 ibcq = &hr_cq->ib_cq;
342 if (ibcq->comp_handler)
343 ibcq->comp_handler(ibcq, ibcq->cq_context);
344}
345
346void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type)
347{
348 struct device *dev = hr_dev->dev;
349 struct hns_roce_cq *hr_cq;
350 struct ib_event event;
351 struct ib_cq *ibcq;
352
353 hr_cq = xa_load(&hr_dev->cq_table.array,
354 cqn & (hr_dev->caps.num_cqs - 1));
355 if (!hr_cq) {
356 dev_warn(dev, "Async event for bogus CQ 0x%06x\n", cqn);
357 return;
358 }
359
360 if (event_type != HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID &&
361 event_type != HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR &&
362 event_type != HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW) {
363 dev_err(dev, "Unexpected event type 0x%x on CQ 0x%06x\n",
364 event_type, cqn);
365 return;
366 }
367
368 atomic_inc(&hr_cq->refcount);
369
370 ibcq = &hr_cq->ib_cq;
371 if (ibcq->event_handler) {
372 event.device = ibcq->device;
373 event.element.cq = ibcq;
374 event.event = IB_EVENT_CQ_ERR;
375 ibcq->event_handler(&event, ibcq->cq_context);
376 }
377
378 if (atomic_dec_and_test(&hr_cq->refcount))
379 complete(&hr_cq->free);
380}
381
382int hns_roce_init_cq_table(struct hns_roce_dev *hr_dev)
383{
384 struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
385
386 xa_init(&cq_table->array);
387
388 return hns_roce_bitmap_init(&cq_table->bitmap, hr_dev->caps.num_cqs,
389 hr_dev->caps.num_cqs - 1,
390 hr_dev->caps.reserved_cqs, 0);
391}
392
393void hns_roce_cleanup_cq_table(struct hns_roce_dev *hr_dev)
394{
395 hns_roce_bitmap_cleanup(&hr_dev->cq_table.bitmap);
396}
397