1
2
3
4
5
6#include <rdma/ib_umem.h>
7#include "hns_roce_device.h"
8#include "hns_roce_cmd.h"
9#include "hns_roce_hem.h"
10
11void hns_roce_srq_event(struct hns_roce_dev *hr_dev, u32 srqn, int event_type)
12{
13 struct hns_roce_srq_table *srq_table = &hr_dev->srq_table;
14 struct hns_roce_srq *srq;
15
16 xa_lock(&srq_table->xa);
17 srq = xa_load(&srq_table->xa, srqn & (hr_dev->caps.num_srqs - 1));
18 if (srq)
19 atomic_inc(&srq->refcount);
20 xa_unlock(&srq_table->xa);
21
22 if (!srq) {
23 dev_warn(hr_dev->dev, "Async event for bogus SRQ %08x\n", srqn);
24 return;
25 }
26
27 srq->event(srq, event_type);
28
29 if (atomic_dec_and_test(&srq->refcount))
30 complete(&srq->free);
31}
32
33static void hns_roce_ib_srq_event(struct hns_roce_srq *srq,
34 enum hns_roce_event event_type)
35{
36 struct hns_roce_dev *hr_dev = to_hr_dev(srq->ibsrq.device);
37 struct ib_srq *ibsrq = &srq->ibsrq;
38 struct ib_event event;
39
40 if (ibsrq->event_handler) {
41 event.device = ibsrq->device;
42 event.element.srq = ibsrq;
43 switch (event_type) {
44 case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
45 event.event = IB_EVENT_SRQ_LIMIT_REACHED;
46 break;
47 case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
48 event.event = IB_EVENT_SRQ_ERR;
49 break;
50 default:
51 dev_err(hr_dev->dev,
52 "hns_roce:Unexpected event type 0x%x on SRQ %06lx\n",
53 event_type, srq->srqn);
54 return;
55 }
56
57 ibsrq->event_handler(&event, ibsrq->srq_context);
58 }
59}
60
61static int hns_roce_hw_create_srq(struct hns_roce_dev *dev,
62 struct hns_roce_cmd_mailbox *mailbox,
63 unsigned long srq_num)
64{
65 return hns_roce_cmd_mbox(dev, mailbox->dma, 0, srq_num, 0,
66 HNS_ROCE_CMD_CREATE_SRQ,
67 HNS_ROCE_CMD_TIMEOUT_MSECS);
68}
69
70static int hns_roce_hw_destroy_srq(struct hns_roce_dev *dev,
71 struct hns_roce_cmd_mailbox *mailbox,
72 unsigned long srq_num)
73{
74 return hns_roce_cmd_mbox(dev, 0, mailbox ? mailbox->dma : 0, srq_num,
75 mailbox ? 0 : 1, HNS_ROCE_CMD_DESTROY_SRQ,
76 HNS_ROCE_CMD_TIMEOUT_MSECS);
77}
78
79static int alloc_srqc(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq,
80 u32 pdn, u32 cqn, u16 xrcd, u64 db_rec_addr)
81{
82 struct hns_roce_srq_table *srq_table = &hr_dev->srq_table;
83 struct ib_device *ibdev = &hr_dev->ib_dev;
84 struct hns_roce_cmd_mailbox *mailbox;
85 u64 mtts_wqe[MTT_MIN_COUNT] = { 0 };
86 u64 mtts_idx[MTT_MIN_COUNT] = { 0 };
87 dma_addr_t dma_handle_wqe = 0;
88 dma_addr_t dma_handle_idx = 0;
89 int ret;
90
91
92 ret = hns_roce_mtr_find(hr_dev, &srq->buf_mtr, 0, mtts_wqe,
93 ARRAY_SIZE(mtts_wqe), &dma_handle_wqe);
94 if (ret < 1) {
95 ibdev_err(ibdev, "failed to find mtr for SRQ WQE, ret = %d.\n",
96 ret);
97 return -ENOBUFS;
98 }
99
100
101 ret = hns_roce_mtr_find(hr_dev, &srq->idx_que.mtr, 0, mtts_idx,
102 ARRAY_SIZE(mtts_idx), &dma_handle_idx);
103 if (ret < 1) {
104 ibdev_err(ibdev, "failed to find mtr for SRQ idx, ret = %d.\n",
105 ret);
106 return -ENOBUFS;
107 }
108
109 ret = hns_roce_bitmap_alloc(&srq_table->bitmap, &srq->srqn);
110 if (ret) {
111 ibdev_err(ibdev,
112 "failed to alloc SRQ number, ret = %d.\n", ret);
113 return -ENOMEM;
114 }
115
116 ret = hns_roce_table_get(hr_dev, &srq_table->table, srq->srqn);
117 if (ret) {
118 ibdev_err(ibdev, "failed to get SRQC table, ret = %d.\n", ret);
119 goto err_out;
120 }
121
122 ret = xa_err(xa_store(&srq_table->xa, srq->srqn, srq, GFP_KERNEL));
123 if (ret) {
124 ibdev_err(ibdev, "failed to store SRQC, ret = %d.\n", ret);
125 goto err_put;
126 }
127
128 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
129 if (IS_ERR_OR_NULL(mailbox)) {
130 ret = -ENOMEM;
131 ibdev_err(ibdev, "failed to alloc mailbox for SRQC.\n");
132 goto err_xa;
133 }
134
135 hr_dev->hw->write_srqc(hr_dev, srq, pdn, xrcd, cqn, mailbox->buf,
136 mtts_wqe, mtts_idx, dma_handle_wqe,
137 dma_handle_idx);
138
139 ret = hns_roce_hw_create_srq(hr_dev, mailbox, srq->srqn);
140 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
141 if (ret) {
142 ibdev_err(ibdev, "failed to config SRQC, ret = %d.\n", ret);
143 goto err_xa;
144 }
145
146 atomic_set(&srq->refcount, 1);
147 init_completion(&srq->free);
148 return ret;
149
150err_xa:
151 xa_erase(&srq_table->xa, srq->srqn);
152
153err_put:
154 hns_roce_table_put(hr_dev, &srq_table->table, srq->srqn);
155
156err_out:
157 hns_roce_bitmap_free(&srq_table->bitmap, srq->srqn, BITMAP_NO_RR);
158 return ret;
159}
160
161static void free_srqc(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
162{
163 struct hns_roce_srq_table *srq_table = &hr_dev->srq_table;
164 int ret;
165
166 ret = hns_roce_hw_destroy_srq(hr_dev, NULL, srq->srqn);
167 if (ret)
168 dev_err(hr_dev->dev, "DESTROY_SRQ failed (%d) for SRQN %06lx\n",
169 ret, srq->srqn);
170
171 xa_erase(&srq_table->xa, srq->srqn);
172
173 if (atomic_dec_and_test(&srq->refcount))
174 complete(&srq->free);
175 wait_for_completion(&srq->free);
176
177 hns_roce_table_put(hr_dev, &srq_table->table, srq->srqn);
178 hns_roce_bitmap_free(&srq_table->bitmap, srq->srqn, BITMAP_NO_RR);
179}
180
181static int alloc_srq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq,
182 struct ib_udata *udata, unsigned long addr)
183{
184 struct ib_device *ibdev = &hr_dev->ib_dev;
185 struct hns_roce_buf_attr buf_attr = {};
186 int err;
187
188 srq->wqe_shift = ilog2(roundup_pow_of_two(max(HNS_ROCE_SGE_SIZE,
189 HNS_ROCE_SGE_SIZE *
190 srq->max_gs)));
191
192 buf_attr.page_shift = hr_dev->caps.srqwqe_buf_pg_sz + HNS_HW_PAGE_SHIFT;
193 buf_attr.region[0].size = to_hr_hem_entries_size(srq->wqe_cnt,
194 srq->wqe_shift);
195 buf_attr.region[0].hopnum = hr_dev->caps.srqwqe_hop_num;
196 buf_attr.region_count = 1;
197 buf_attr.fixed_page = true;
198
199 err = hns_roce_mtr_create(hr_dev, &srq->buf_mtr, &buf_attr,
200 hr_dev->caps.srqwqe_ba_pg_sz +
201 HNS_HW_PAGE_SHIFT, udata, addr);
202 if (err)
203 ibdev_err(ibdev,
204 "failed to alloc SRQ buf mtr, ret = %d.\n", err);
205
206 return err;
207}
208
209static void free_srq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
210{
211 hns_roce_mtr_destroy(hr_dev, &srq->buf_mtr);
212}
213
214static int alloc_srq_idx(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq,
215 struct ib_udata *udata, unsigned long addr)
216{
217 struct hns_roce_idx_que *idx_que = &srq->idx_que;
218 struct ib_device *ibdev = &hr_dev->ib_dev;
219 struct hns_roce_buf_attr buf_attr = {};
220 int err;
221
222 srq->idx_que.entry_shift = ilog2(HNS_ROCE_IDX_QUE_ENTRY_SZ);
223
224 buf_attr.page_shift = hr_dev->caps.idx_buf_pg_sz + HNS_HW_PAGE_SHIFT;
225 buf_attr.region[0].size = to_hr_hem_entries_size(srq->wqe_cnt,
226 srq->idx_que.entry_shift);
227 buf_attr.region[0].hopnum = hr_dev->caps.idx_hop_num;
228 buf_attr.region_count = 1;
229 buf_attr.fixed_page = true;
230
231 err = hns_roce_mtr_create(hr_dev, &idx_que->mtr, &buf_attr,
232 hr_dev->caps.idx_ba_pg_sz + HNS_HW_PAGE_SHIFT,
233 udata, addr);
234 if (err) {
235 ibdev_err(ibdev,
236 "failed to alloc SRQ idx mtr, ret = %d.\n", err);
237 return err;
238 }
239
240 if (!udata) {
241 idx_que->bitmap = bitmap_zalloc(srq->wqe_cnt, GFP_KERNEL);
242 if (!idx_que->bitmap) {
243 ibdev_err(ibdev, "failed to alloc SRQ idx bitmap.\n");
244 err = -ENOMEM;
245 goto err_idx_mtr;
246 }
247 }
248
249 return 0;
250err_idx_mtr:
251 hns_roce_mtr_destroy(hr_dev, &idx_que->mtr);
252
253 return err;
254}
255
256static void free_srq_idx(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
257{
258 struct hns_roce_idx_que *idx_que = &srq->idx_que;
259
260 bitmap_free(idx_que->bitmap);
261 idx_que->bitmap = NULL;
262 hns_roce_mtr_destroy(hr_dev, &idx_que->mtr);
263}
264
265static int alloc_srq_wrid(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
266{
267 srq->head = 0;
268 srq->tail = srq->wqe_cnt - 1;
269 srq->wrid = kvmalloc_array(srq->wqe_cnt, sizeof(u64), GFP_KERNEL);
270 if (!srq->wrid)
271 return -ENOMEM;
272
273 return 0;
274}
275
276static void free_srq_wrid(struct hns_roce_srq *srq)
277{
278 kfree(srq->wrid);
279 srq->wrid = NULL;
280}
281
282int hns_roce_create_srq(struct ib_srq *ib_srq,
283 struct ib_srq_init_attr *init_attr,
284 struct ib_udata *udata)
285{
286 struct hns_roce_dev *hr_dev = to_hr_dev(ib_srq->device);
287 struct hns_roce_ib_create_srq_resp resp = {};
288 struct hns_roce_srq *srq = to_hr_srq(ib_srq);
289 struct ib_device *ibdev = &hr_dev->ib_dev;
290 struct hns_roce_ib_create_srq ucmd = {};
291 int ret;
292 u32 cqn;
293
294 if (init_attr->srq_type != IB_SRQT_BASIC &&
295 init_attr->srq_type != IB_SRQT_XRC)
296 return -EOPNOTSUPP;
297
298
299 if (init_attr->attr.max_wr >= hr_dev->caps.max_srq_wrs ||
300 init_attr->attr.max_sge > hr_dev->caps.max_srq_sges)
301 return -EINVAL;
302
303 mutex_init(&srq->mutex);
304 spin_lock_init(&srq->lock);
305
306 srq->wqe_cnt = roundup_pow_of_two(init_attr->attr.max_wr + 1);
307 srq->max_gs = init_attr->attr.max_sge;
308
309 if (udata) {
310 ret = ib_copy_from_udata(&ucmd, udata,
311 min(udata->inlen, sizeof(ucmd)));
312 if (ret) {
313 ibdev_err(ibdev, "failed to copy SRQ udata, ret = %d.\n",
314 ret);
315 return ret;
316 }
317 }
318
319 ret = alloc_srq_buf(hr_dev, srq, udata, ucmd.buf_addr);
320 if (ret) {
321 ibdev_err(ibdev,
322 "failed to alloc SRQ buffer, ret = %d.\n", ret);
323 return ret;
324 }
325
326 ret = alloc_srq_idx(hr_dev, srq, udata, ucmd.que_addr);
327 if (ret) {
328 ibdev_err(ibdev, "failed to alloc SRQ idx, ret = %d.\n", ret);
329 goto err_buf_alloc;
330 }
331
332 if (!udata) {
333 ret = alloc_srq_wrid(hr_dev, srq);
334 if (ret) {
335 ibdev_err(ibdev, "failed to alloc SRQ wrid, ret = %d.\n",
336 ret);
337 goto err_idx_alloc;
338 }
339 }
340
341 cqn = ib_srq_has_cq(init_attr->srq_type) ?
342 to_hr_cq(init_attr->ext.cq)->cqn : 0;
343 srq->db_reg_l = hr_dev->reg_base + SRQ_DB_REG;
344
345 ret = alloc_srqc(hr_dev, srq, to_hr_pd(ib_srq->pd)->pdn, cqn, 0, 0);
346 if (ret) {
347 ibdev_err(ibdev,
348 "failed to alloc SRQ context, ret = %d.\n", ret);
349 goto err_wrid_alloc;
350 }
351
352 srq->event = hns_roce_ib_srq_event;
353 resp.srqn = srq->srqn;
354
355 if (udata) {
356 ret = ib_copy_to_udata(udata, &resp,
357 min(udata->outlen, sizeof(resp)));
358 if (ret)
359 goto err_srqc_alloc;
360 }
361
362 return 0;
363
364err_srqc_alloc:
365 free_srqc(hr_dev, srq);
366err_wrid_alloc:
367 free_srq_wrid(srq);
368err_idx_alloc:
369 free_srq_idx(hr_dev, srq);
370err_buf_alloc:
371 free_srq_buf(hr_dev, srq);
372 return ret;
373}
374
375int hns_roce_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
376{
377 struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
378 struct hns_roce_srq *srq = to_hr_srq(ibsrq);
379
380 free_srqc(hr_dev, srq);
381 free_srq_idx(hr_dev, srq);
382 free_srq_wrid(srq);
383 free_srq_buf(hr_dev, srq);
384 return 0;
385}
386
387int hns_roce_init_srq_table(struct hns_roce_dev *hr_dev)
388{
389 struct hns_roce_srq_table *srq_table = &hr_dev->srq_table;
390
391 xa_init(&srq_table->xa);
392
393 return hns_roce_bitmap_init(&srq_table->bitmap, hr_dev->caps.num_srqs,
394 hr_dev->caps.num_srqs - 1,
395 hr_dev->caps.reserved_srqs, 0);
396}
397
398void hns_roce_cleanup_srq_table(struct hns_roce_dev *hr_dev)
399{
400 hns_roce_bitmap_cleanup(&hr_dev->srq_table.bitmap);
401}
402