1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34#include <linux/platform_device.h>
35#include <linux/vmalloc.h>
36#include <rdma/ib_umem.h>
37#include "hns_roce_device.h"
38#include "hns_roce_cmd.h"
39#include "hns_roce_hem.h"
40
41static u32 hw_index_to_key(unsigned long ind)
42{
43 return (u32)(ind >> 24) | (ind << 8);
44}
45
46unsigned long key_to_hw_index(u32 key)
47{
48 return (key << 24) | (key >> 8);
49}
50EXPORT_SYMBOL_GPL(key_to_hw_index);
51
52static int hns_roce_sw2hw_mpt(struct hns_roce_dev *hr_dev,
53 struct hns_roce_cmd_mailbox *mailbox,
54 unsigned long mpt_index)
55{
56 return hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, mpt_index, 0,
57 HNS_ROCE_CMD_SW2HW_MPT,
58 HNS_ROCE_CMD_TIMEOUT_MSECS);
59}
60
61int hns_roce_hw2sw_mpt(struct hns_roce_dev *hr_dev,
62 struct hns_roce_cmd_mailbox *mailbox,
63 unsigned long mpt_index)
64{
65 return hns_roce_cmd_mbox(hr_dev, 0, mailbox ? mailbox->dma : 0,
66 mpt_index, !mailbox, HNS_ROCE_CMD_HW2SW_MPT,
67 HNS_ROCE_CMD_TIMEOUT_MSECS);
68}
69EXPORT_SYMBOL_GPL(hns_roce_hw2sw_mpt);
70
71static int hns_roce_buddy_alloc(struct hns_roce_buddy *buddy, int order,
72 unsigned long *seg)
73{
74 int o;
75 u32 m;
76
77 spin_lock(&buddy->lock);
78
79 for (o = order; o <= buddy->max_order; ++o) {
80 if (buddy->num_free[o]) {
81 m = 1 << (buddy->max_order - o);
82 *seg = find_first_bit(buddy->bits[o], m);
83 if (*seg < m)
84 goto found;
85 }
86 }
87 spin_unlock(&buddy->lock);
88 return -1;
89
90 found:
91 clear_bit(*seg, buddy->bits[o]);
92 --buddy->num_free[o];
93
94 while (o > order) {
95 --o;
96 *seg <<= 1;
97 set_bit(*seg ^ 1, buddy->bits[o]);
98 ++buddy->num_free[o];
99 }
100
101 spin_unlock(&buddy->lock);
102
103 *seg <<= order;
104 return 0;
105}
106
107static void hns_roce_buddy_free(struct hns_roce_buddy *buddy, unsigned long seg,
108 int order)
109{
110 seg >>= order;
111
112 spin_lock(&buddy->lock);
113
114 while (test_bit(seg ^ 1, buddy->bits[order])) {
115 clear_bit(seg ^ 1, buddy->bits[order]);
116 --buddy->num_free[order];
117 seg >>= 1;
118 ++order;
119 }
120
121 set_bit(seg, buddy->bits[order]);
122 ++buddy->num_free[order];
123
124 spin_unlock(&buddy->lock);
125}
126
127static int hns_roce_buddy_init(struct hns_roce_buddy *buddy, int max_order)
128{
129 int i, s;
130
131 buddy->max_order = max_order;
132 spin_lock_init(&buddy->lock);
133 buddy->bits = kcalloc(buddy->max_order + 1,
134 sizeof(*buddy->bits),
135 GFP_KERNEL);
136 buddy->num_free = kcalloc(buddy->max_order + 1,
137 sizeof(*buddy->num_free),
138 GFP_KERNEL);
139 if (!buddy->bits || !buddy->num_free)
140 goto err_out;
141
142 for (i = 0; i <= buddy->max_order; ++i) {
143 s = BITS_TO_LONGS(1 << (buddy->max_order - i));
144 buddy->bits[i] = kcalloc(s, sizeof(long), GFP_KERNEL |
145 __GFP_NOWARN);
146 if (!buddy->bits[i]) {
147 buddy->bits[i] = vzalloc(array_size(s, sizeof(long)));
148 if (!buddy->bits[i])
149 goto err_out_free;
150 }
151 }
152
153 set_bit(0, buddy->bits[buddy->max_order]);
154 buddy->num_free[buddy->max_order] = 1;
155
156 return 0;
157
158err_out_free:
159 for (i = 0; i <= buddy->max_order; ++i)
160 kvfree(buddy->bits[i]);
161
162err_out:
163 kfree(buddy->bits);
164 kfree(buddy->num_free);
165 return -ENOMEM;
166}
167
168static void hns_roce_buddy_cleanup(struct hns_roce_buddy *buddy)
169{
170 int i;
171
172 for (i = 0; i <= buddy->max_order; ++i)
173 kvfree(buddy->bits[i]);
174
175 kfree(buddy->bits);
176 kfree(buddy->num_free);
177}
178
179static int hns_roce_alloc_mtt_range(struct hns_roce_dev *hr_dev, int order,
180 unsigned long *seg, u32 mtt_type)
181{
182 struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
183 struct hns_roce_hem_table *table;
184 struct hns_roce_buddy *buddy;
185 int ret;
186
187 if (mtt_type == MTT_TYPE_WQE) {
188 buddy = &mr_table->mtt_buddy;
189 table = &mr_table->mtt_table;
190 } else {
191 buddy = &mr_table->mtt_cqe_buddy;
192 table = &mr_table->mtt_cqe_table;
193 }
194
195 ret = hns_roce_buddy_alloc(buddy, order, seg);
196 if (ret == -1)
197 return -1;
198
199 if (hns_roce_table_get_range(hr_dev, table, *seg,
200 *seg + (1 << order) - 1)) {
201 hns_roce_buddy_free(buddy, *seg, order);
202 return -1;
203 }
204
205 return 0;
206}
207
208int hns_roce_mtt_init(struct hns_roce_dev *hr_dev, int npages, int page_shift,
209 struct hns_roce_mtt *mtt)
210{
211 int ret;
212 int i;
213
214
215 if (!npages) {
216 mtt->order = -1;
217 mtt->page_shift = HNS_ROCE_HEM_PAGE_SHIFT;
218 return 0;
219 }
220
221
222 mtt->page_shift = page_shift;
223
224
225 for (mtt->order = 0, i = HNS_ROCE_MTT_ENTRY_PER_SEG; i < npages;
226 i <<= 1)
227 ++mtt->order;
228
229
230 ret = hns_roce_alloc_mtt_range(hr_dev, mtt->order, &mtt->first_seg,
231 mtt->mtt_type);
232 if (ret == -1)
233 return -ENOMEM;
234
235 return 0;
236}
237
238void hns_roce_mtt_cleanup(struct hns_roce_dev *hr_dev, struct hns_roce_mtt *mtt)
239{
240 struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
241
242 if (mtt->order < 0)
243 return;
244
245 if (mtt->mtt_type == MTT_TYPE_WQE) {
246 hns_roce_buddy_free(&mr_table->mtt_buddy, mtt->first_seg,
247 mtt->order);
248 hns_roce_table_put_range(hr_dev, &mr_table->mtt_table,
249 mtt->first_seg,
250 mtt->first_seg + (1 << mtt->order) - 1);
251 } else {
252 hns_roce_buddy_free(&mr_table->mtt_cqe_buddy, mtt->first_seg,
253 mtt->order);
254 hns_roce_table_put_range(hr_dev, &mr_table->mtt_cqe_table,
255 mtt->first_seg,
256 mtt->first_seg + (1 << mtt->order) - 1);
257 }
258}
259EXPORT_SYMBOL_GPL(hns_roce_mtt_cleanup);
260
261static void hns_roce_loop_free(struct hns_roce_dev *hr_dev,
262 struct hns_roce_mr *mr, int err_loop_index,
263 int loop_i, int loop_j)
264{
265 struct device *dev = hr_dev->dev;
266 u32 mhop_num;
267 u32 pbl_bt_sz;
268 u64 bt_idx;
269 int i, j;
270
271 pbl_bt_sz = 1 << (hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT);
272 mhop_num = hr_dev->caps.pbl_hop_num;
273
274 i = loop_i;
275 if (mhop_num == 3 && err_loop_index == 2) {
276 for (; i >= 0; i--) {
277 dma_free_coherent(dev, pbl_bt_sz, mr->pbl_bt_l1[i],
278 mr->pbl_l1_dma_addr[i]);
279
280 for (j = 0; j < pbl_bt_sz / 8; j++) {
281 if (i == loop_i && j >= loop_j)
282 break;
283
284 bt_idx = i * pbl_bt_sz / 8 + j;
285 dma_free_coherent(dev, pbl_bt_sz,
286 mr->pbl_bt_l2[bt_idx],
287 mr->pbl_l2_dma_addr[bt_idx]);
288 }
289 }
290 } else if (mhop_num == 3 && err_loop_index == 1) {
291 for (i -= 1; i >= 0; i--) {
292 dma_free_coherent(dev, pbl_bt_sz, mr->pbl_bt_l1[i],
293 mr->pbl_l1_dma_addr[i]);
294
295 for (j = 0; j < pbl_bt_sz / 8; j++) {
296 bt_idx = i * pbl_bt_sz / 8 + j;
297 dma_free_coherent(dev, pbl_bt_sz,
298 mr->pbl_bt_l2[bt_idx],
299 mr->pbl_l2_dma_addr[bt_idx]);
300 }
301 }
302 } else if (mhop_num == 2 && err_loop_index == 1) {
303 for (i -= 1; i >= 0; i--)
304 dma_free_coherent(dev, pbl_bt_sz, mr->pbl_bt_l1[i],
305 mr->pbl_l1_dma_addr[i]);
306 } else {
307 dev_warn(dev, "not support: mhop_num=%d, err_loop_index=%d.",
308 mhop_num, err_loop_index);
309 return;
310 }
311
312 dma_free_coherent(dev, pbl_bt_sz, mr->pbl_bt_l0, mr->pbl_l0_dma_addr);
313 mr->pbl_bt_l0 = NULL;
314 mr->pbl_l0_dma_addr = 0;
315}
316
317
318static int hns_roce_mhop_alloc(struct hns_roce_dev *hr_dev, int npages,
319 struct hns_roce_mr *mr)
320{
321 struct device *dev = hr_dev->dev;
322 int mr_alloc_done = 0;
323 int npages_allocated;
324 int i = 0, j = 0;
325 u32 pbl_bt_sz;
326 u32 mhop_num;
327 u64 pbl_last_bt_num;
328 u64 pbl_bt_cnt = 0;
329 u64 bt_idx;
330 u64 size;
331
332 mhop_num = (mr->type == MR_TYPE_FRMR ? 1 : hr_dev->caps.pbl_hop_num);
333 pbl_bt_sz = 1 << (hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT);
334 pbl_last_bt_num = (npages + pbl_bt_sz / 8 - 1) / (pbl_bt_sz / 8);
335
336 if (mhop_num == HNS_ROCE_HOP_NUM_0)
337 return 0;
338
339
340 if (mhop_num == 1) {
341 if (npages > pbl_bt_sz / 8) {
342 dev_err(dev, "npages %d is larger than buf_pg_sz!",
343 npages);
344 return -EINVAL;
345 }
346 mr->pbl_buf = dma_alloc_coherent(dev, npages * 8,
347 &(mr->pbl_dma_addr),
348 GFP_KERNEL);
349 if (!mr->pbl_buf)
350 return -ENOMEM;
351
352 mr->pbl_size = npages;
353 mr->pbl_ba = mr->pbl_dma_addr;
354 mr->pbl_hop_num = mhop_num;
355 mr->pbl_ba_pg_sz = hr_dev->caps.pbl_ba_pg_sz;
356 mr->pbl_buf_pg_sz = hr_dev->caps.pbl_buf_pg_sz;
357 return 0;
358 }
359
360 mr->pbl_l1_dma_addr = kcalloc(pbl_bt_sz / 8,
361 sizeof(*mr->pbl_l1_dma_addr),
362 GFP_KERNEL);
363 if (!mr->pbl_l1_dma_addr)
364 return -ENOMEM;
365
366 mr->pbl_bt_l1 = kcalloc(pbl_bt_sz / 8, sizeof(*mr->pbl_bt_l1),
367 GFP_KERNEL);
368 if (!mr->pbl_bt_l1)
369 goto err_kcalloc_bt_l1;
370
371 if (mhop_num == 3) {
372 mr->pbl_l2_dma_addr = kcalloc(pbl_last_bt_num,
373 sizeof(*mr->pbl_l2_dma_addr),
374 GFP_KERNEL);
375 if (!mr->pbl_l2_dma_addr)
376 goto err_kcalloc_l2_dma;
377
378 mr->pbl_bt_l2 = kcalloc(pbl_last_bt_num,
379 sizeof(*mr->pbl_bt_l2),
380 GFP_KERNEL);
381 if (!mr->pbl_bt_l2)
382 goto err_kcalloc_bt_l2;
383 }
384
385
386 mr->pbl_bt_l0 = dma_alloc_coherent(dev, pbl_bt_sz,
387 &(mr->pbl_l0_dma_addr),
388 GFP_KERNEL);
389 if (!mr->pbl_bt_l0)
390 goto err_dma_alloc_l0;
391
392 if (mhop_num == 2) {
393
394 for (i = 0; i < pbl_bt_sz / 8; i++) {
395 if (pbl_bt_cnt + 1 < pbl_last_bt_num) {
396 size = pbl_bt_sz;
397 } else {
398 npages_allocated = i * (pbl_bt_sz / 8);
399 size = (npages - npages_allocated) * 8;
400 }
401 mr->pbl_bt_l1[i] = dma_alloc_coherent(dev, size,
402 &(mr->pbl_l1_dma_addr[i]),
403 GFP_KERNEL);
404 if (!mr->pbl_bt_l1[i]) {
405 hns_roce_loop_free(hr_dev, mr, 1, i, 0);
406 goto err_dma_alloc_l0;
407 }
408
409 *(mr->pbl_bt_l0 + i) = mr->pbl_l1_dma_addr[i];
410
411 pbl_bt_cnt++;
412 if (pbl_bt_cnt >= pbl_last_bt_num)
413 break;
414 }
415 } else if (mhop_num == 3) {
416
417 for (i = 0; i < pbl_bt_sz / 8; i++) {
418 mr->pbl_bt_l1[i] = dma_alloc_coherent(dev, pbl_bt_sz,
419 &(mr->pbl_l1_dma_addr[i]),
420 GFP_KERNEL);
421 if (!mr->pbl_bt_l1[i]) {
422 hns_roce_loop_free(hr_dev, mr, 1, i, 0);
423 goto err_dma_alloc_l0;
424 }
425
426 *(mr->pbl_bt_l0 + i) = mr->pbl_l1_dma_addr[i];
427
428 for (j = 0; j < pbl_bt_sz / 8; j++) {
429 bt_idx = i * pbl_bt_sz / 8 + j;
430
431 if (pbl_bt_cnt + 1 < pbl_last_bt_num) {
432 size = pbl_bt_sz;
433 } else {
434 npages_allocated = bt_idx *
435 (pbl_bt_sz / 8);
436 size = (npages - npages_allocated) * 8;
437 }
438 mr->pbl_bt_l2[bt_idx] = dma_alloc_coherent(
439 dev, size,
440 &(mr->pbl_l2_dma_addr[bt_idx]),
441 GFP_KERNEL);
442 if (!mr->pbl_bt_l2[bt_idx]) {
443 hns_roce_loop_free(hr_dev, mr, 2, i, j);
444 goto err_dma_alloc_l0;
445 }
446
447 *(mr->pbl_bt_l1[i] + j) =
448 mr->pbl_l2_dma_addr[bt_idx];
449
450 pbl_bt_cnt++;
451 if (pbl_bt_cnt >= pbl_last_bt_num) {
452 mr_alloc_done = 1;
453 break;
454 }
455 }
456
457 if (mr_alloc_done)
458 break;
459 }
460 }
461
462 mr->l0_chunk_last_num = i + 1;
463 if (mhop_num == 3)
464 mr->l1_chunk_last_num = j + 1;
465
466 mr->pbl_size = npages;
467 mr->pbl_ba = mr->pbl_l0_dma_addr;
468 mr->pbl_hop_num = hr_dev->caps.pbl_hop_num;
469 mr->pbl_ba_pg_sz = hr_dev->caps.pbl_ba_pg_sz;
470 mr->pbl_buf_pg_sz = hr_dev->caps.pbl_buf_pg_sz;
471
472 return 0;
473
474err_dma_alloc_l0:
475 kfree(mr->pbl_bt_l2);
476 mr->pbl_bt_l2 = NULL;
477
478err_kcalloc_bt_l2:
479 kfree(mr->pbl_l2_dma_addr);
480 mr->pbl_l2_dma_addr = NULL;
481
482err_kcalloc_l2_dma:
483 kfree(mr->pbl_bt_l1);
484 mr->pbl_bt_l1 = NULL;
485
486err_kcalloc_bt_l1:
487 kfree(mr->pbl_l1_dma_addr);
488 mr->pbl_l1_dma_addr = NULL;
489
490 return -ENOMEM;
491}
492
493static int hns_roce_mr_alloc(struct hns_roce_dev *hr_dev, u32 pd, u64 iova,
494 u64 size, u32 access, int npages,
495 struct hns_roce_mr *mr)
496{
497 struct device *dev = hr_dev->dev;
498 unsigned long index = 0;
499 int ret = 0;
500
501
502 ret = hns_roce_bitmap_alloc(&hr_dev->mr_table.mtpt_bitmap, &index);
503 if (ret == -1)
504 return -ENOMEM;
505
506 mr->iova = iova;
507 mr->size = size;
508 mr->pd = pd;
509 mr->access = access;
510 mr->enabled = 0;
511 mr->key = hw_index_to_key(index);
512
513 if (size == ~0ull) {
514 mr->pbl_buf = NULL;
515 mr->pbl_dma_addr = 0;
516
517 mr->pbl_bt_l2 = NULL;
518 mr->pbl_bt_l1 = NULL;
519 mr->pbl_bt_l0 = NULL;
520 mr->pbl_l2_dma_addr = NULL;
521 mr->pbl_l1_dma_addr = NULL;
522 mr->pbl_l0_dma_addr = 0;
523 } else {
524 if (!hr_dev->caps.pbl_hop_num) {
525 mr->pbl_buf = dma_alloc_coherent(dev, npages * 8,
526 &(mr->pbl_dma_addr),
527 GFP_KERNEL);
528 if (!mr->pbl_buf)
529 return -ENOMEM;
530 } else {
531 ret = hns_roce_mhop_alloc(hr_dev, npages, mr);
532 }
533 }
534
535 return ret;
536}
537
538static void hns_roce_mhop_free(struct hns_roce_dev *hr_dev,
539 struct hns_roce_mr *mr)
540{
541 struct device *dev = hr_dev->dev;
542 int npages_allocated;
543 int npages;
544 int i, j;
545 u32 pbl_bt_sz;
546 u32 mhop_num;
547 u64 bt_idx;
548
549 npages = mr->pbl_size;
550 pbl_bt_sz = 1 << (hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT);
551 mhop_num = (mr->type == MR_TYPE_FRMR) ? 1 : hr_dev->caps.pbl_hop_num;
552
553 if (mhop_num == HNS_ROCE_HOP_NUM_0)
554 return;
555
556
557 if (mhop_num == 1) {
558 dma_free_coherent(dev, (unsigned int)(npages * 8),
559 mr->pbl_buf, mr->pbl_dma_addr);
560 return;
561 }
562
563 dma_free_coherent(dev, pbl_bt_sz, mr->pbl_bt_l0,
564 mr->pbl_l0_dma_addr);
565
566 if (mhop_num == 2) {
567 for (i = 0; i < mr->l0_chunk_last_num; i++) {
568 if (i == mr->l0_chunk_last_num - 1) {
569 npages_allocated = i * (pbl_bt_sz / 8);
570
571 dma_free_coherent(dev,
572 (npages - npages_allocated) * 8,
573 mr->pbl_bt_l1[i],
574 mr->pbl_l1_dma_addr[i]);
575
576 break;
577 }
578
579 dma_free_coherent(dev, pbl_bt_sz, mr->pbl_bt_l1[i],
580 mr->pbl_l1_dma_addr[i]);
581 }
582 } else if (mhop_num == 3) {
583 for (i = 0; i < mr->l0_chunk_last_num; i++) {
584 dma_free_coherent(dev, pbl_bt_sz, mr->pbl_bt_l1[i],
585 mr->pbl_l1_dma_addr[i]);
586
587 for (j = 0; j < pbl_bt_sz / 8; j++) {
588 bt_idx = i * (pbl_bt_sz / 8) + j;
589
590 if ((i == mr->l0_chunk_last_num - 1)
591 && j == mr->l1_chunk_last_num - 1) {
592 npages_allocated = bt_idx *
593 (pbl_bt_sz / 8);
594
595 dma_free_coherent(dev,
596 (npages - npages_allocated) * 8,
597 mr->pbl_bt_l2[bt_idx],
598 mr->pbl_l2_dma_addr[bt_idx]);
599
600 break;
601 }
602
603 dma_free_coherent(dev, pbl_bt_sz,
604 mr->pbl_bt_l2[bt_idx],
605 mr->pbl_l2_dma_addr[bt_idx]);
606 }
607 }
608 }
609
610 kfree(mr->pbl_bt_l1);
611 kfree(mr->pbl_l1_dma_addr);
612 mr->pbl_bt_l1 = NULL;
613 mr->pbl_l1_dma_addr = NULL;
614 if (mhop_num == 3) {
615 kfree(mr->pbl_bt_l2);
616 kfree(mr->pbl_l2_dma_addr);
617 mr->pbl_bt_l2 = NULL;
618 mr->pbl_l2_dma_addr = NULL;
619 }
620}
621
622static void hns_roce_mr_free(struct hns_roce_dev *hr_dev,
623 struct hns_roce_mr *mr)
624{
625 struct device *dev = hr_dev->dev;
626 int npages = 0;
627 int ret;
628
629 if (mr->enabled) {
630 ret = hns_roce_hw2sw_mpt(hr_dev, NULL, key_to_hw_index(mr->key)
631 & (hr_dev->caps.num_mtpts - 1));
632 if (ret)
633 dev_warn(dev, "HW2SW_MPT failed (%d)\n", ret);
634 }
635
636 if (mr->size != ~0ULL) {
637 if (mr->type == MR_TYPE_MR)
638 npages = ib_umem_page_count(mr->umem);
639
640 if (!hr_dev->caps.pbl_hop_num)
641 dma_free_coherent(dev, (unsigned int)(npages * 8),
642 mr->pbl_buf, mr->pbl_dma_addr);
643 else
644 hns_roce_mhop_free(hr_dev, mr);
645 }
646
647 if (mr->enabled)
648 hns_roce_table_put(hr_dev, &hr_dev->mr_table.mtpt_table,
649 key_to_hw_index(mr->key));
650
651 hns_roce_bitmap_free(&hr_dev->mr_table.mtpt_bitmap,
652 key_to_hw_index(mr->key), BITMAP_NO_RR);
653}
654
655static int hns_roce_mr_enable(struct hns_roce_dev *hr_dev,
656 struct hns_roce_mr *mr)
657{
658 int ret;
659 unsigned long mtpt_idx = key_to_hw_index(mr->key);
660 struct device *dev = hr_dev->dev;
661 struct hns_roce_cmd_mailbox *mailbox;
662 struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
663
664
665 ret = hns_roce_table_get(hr_dev, &mr_table->mtpt_table, mtpt_idx);
666 if (ret)
667 return ret;
668
669
670 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
671 if (IS_ERR(mailbox)) {
672 ret = PTR_ERR(mailbox);
673 goto err_table;
674 }
675
676 if (mr->type != MR_TYPE_FRMR)
677 ret = hr_dev->hw->write_mtpt(mailbox->buf, mr, mtpt_idx);
678 else
679 ret = hr_dev->hw->frmr_write_mtpt(mailbox->buf, mr);
680 if (ret) {
681 dev_err(dev, "Write mtpt fail!\n");
682 goto err_page;
683 }
684
685 ret = hns_roce_sw2hw_mpt(hr_dev, mailbox,
686 mtpt_idx & (hr_dev->caps.num_mtpts - 1));
687 if (ret) {
688 dev_err(dev, "SW2HW_MPT failed (%d)\n", ret);
689 goto err_page;
690 }
691
692 mr->enabled = 1;
693 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
694
695 return 0;
696
697err_page:
698 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
699
700err_table:
701 hns_roce_table_put(hr_dev, &mr_table->mtpt_table, mtpt_idx);
702 return ret;
703}
704
705static int hns_roce_write_mtt_chunk(struct hns_roce_dev *hr_dev,
706 struct hns_roce_mtt *mtt, u32 start_index,
707 u32 npages, u64 *page_list)
708{
709 struct hns_roce_hem_table *table;
710 dma_addr_t dma_handle;
711 __le64 *mtts;
712 u32 s = start_index * sizeof(u64);
713 u32 bt_page_size;
714 u32 i;
715
716 if (mtt->mtt_type == MTT_TYPE_WQE)
717 bt_page_size = 1 << (hr_dev->caps.mtt_ba_pg_sz + PAGE_SHIFT);
718 else
719 bt_page_size = 1 << (hr_dev->caps.cqe_ba_pg_sz + PAGE_SHIFT);
720
721
722 if (start_index / (bt_page_size / sizeof(u64)) !=
723 (start_index + npages - 1) / (bt_page_size / sizeof(u64)))
724 return -EINVAL;
725
726 if (start_index & (HNS_ROCE_MTT_ENTRY_PER_SEG - 1))
727 return -EINVAL;
728
729 if (mtt->mtt_type == MTT_TYPE_WQE)
730 table = &hr_dev->mr_table.mtt_table;
731 else
732 table = &hr_dev->mr_table.mtt_cqe_table;
733
734 mtts = hns_roce_table_find(hr_dev, table,
735 mtt->first_seg + s / hr_dev->caps.mtt_entry_sz,
736 &dma_handle);
737 if (!mtts)
738 return -ENOMEM;
739
740
741 for (i = 0; i < npages; ++i) {
742 if (!hr_dev->caps.mtt_hop_num)
743 mtts[i] = cpu_to_le64(page_list[i] >> PAGE_ADDR_SHIFT);
744 else
745 mtts[i] = cpu_to_le64(page_list[i]);
746 }
747
748 return 0;
749}
750
751static int hns_roce_write_mtt(struct hns_roce_dev *hr_dev,
752 struct hns_roce_mtt *mtt, u32 start_index,
753 u32 npages, u64 *page_list)
754{
755 int chunk;
756 int ret;
757 u32 bt_page_size;
758
759 if (mtt->order < 0)
760 return -EINVAL;
761
762 if (mtt->mtt_type == MTT_TYPE_WQE)
763 bt_page_size = 1 << (hr_dev->caps.mtt_ba_pg_sz + PAGE_SHIFT);
764 else
765 bt_page_size = 1 << (hr_dev->caps.cqe_ba_pg_sz + PAGE_SHIFT);
766
767 while (npages > 0) {
768 chunk = min_t(int, bt_page_size / sizeof(u64), npages);
769
770 ret = hns_roce_write_mtt_chunk(hr_dev, mtt, start_index, chunk,
771 page_list);
772 if (ret)
773 return ret;
774
775 npages -= chunk;
776 start_index += chunk;
777 page_list += chunk;
778 }
779
780 return 0;
781}
782
783int hns_roce_buf_write_mtt(struct hns_roce_dev *hr_dev,
784 struct hns_roce_mtt *mtt, struct hns_roce_buf *buf)
785{
786 u64 *page_list;
787 int ret;
788 u32 i;
789
790 page_list = kmalloc_array(buf->npages, sizeof(*page_list), GFP_KERNEL);
791 if (!page_list)
792 return -ENOMEM;
793
794 for (i = 0; i < buf->npages; ++i) {
795 if (buf->nbufs == 1)
796 page_list[i] = buf->direct.map + (i << buf->page_shift);
797 else
798 page_list[i] = buf->page_list[i].map;
799
800 }
801 ret = hns_roce_write_mtt(hr_dev, mtt, 0, buf->npages, page_list);
802
803 kfree(page_list);
804
805 return ret;
806}
807
808int hns_roce_init_mr_table(struct hns_roce_dev *hr_dev)
809{
810 struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
811 int ret;
812
813 ret = hns_roce_bitmap_init(&mr_table->mtpt_bitmap,
814 hr_dev->caps.num_mtpts,
815 hr_dev->caps.num_mtpts - 1,
816 hr_dev->caps.reserved_mrws, 0);
817 if (ret)
818 return ret;
819
820 ret = hns_roce_buddy_init(&mr_table->mtt_buddy,
821 ilog2(hr_dev->caps.num_mtt_segs));
822 if (ret)
823 goto err_buddy;
824
825 if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE)) {
826 ret = hns_roce_buddy_init(&mr_table->mtt_cqe_buddy,
827 ilog2(hr_dev->caps.num_cqe_segs));
828 if (ret)
829 goto err_buddy_cqe;
830 }
831 return 0;
832
833err_buddy_cqe:
834 hns_roce_buddy_cleanup(&mr_table->mtt_buddy);
835
836err_buddy:
837 hns_roce_bitmap_cleanup(&mr_table->mtpt_bitmap);
838 return ret;
839}
840
841void hns_roce_cleanup_mr_table(struct hns_roce_dev *hr_dev)
842{
843 struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
844
845 hns_roce_buddy_cleanup(&mr_table->mtt_buddy);
846 if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE))
847 hns_roce_buddy_cleanup(&mr_table->mtt_cqe_buddy);
848 hns_roce_bitmap_cleanup(&mr_table->mtpt_bitmap);
849}
850
851struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc)
852{
853 struct hns_roce_mr *mr;
854 int ret;
855
856 mr = kmalloc(sizeof(*mr), GFP_KERNEL);
857 if (mr == NULL)
858 return ERR_PTR(-ENOMEM);
859
860 mr->type = MR_TYPE_DMA;
861
862
863 ret = hns_roce_mr_alloc(to_hr_dev(pd->device), to_hr_pd(pd)->pdn, 0,
864 ~0ULL, acc, 0, mr);
865 if (ret)
866 goto err_free;
867
868 ret = hns_roce_mr_enable(to_hr_dev(pd->device), mr);
869 if (ret)
870 goto err_mr;
871
872 mr->ibmr.rkey = mr->ibmr.lkey = mr->key;
873 mr->umem = NULL;
874
875 return &mr->ibmr;
876
877err_mr:
878 hns_roce_mr_free(to_hr_dev(pd->device), mr);
879
880err_free:
881 kfree(mr);
882 return ERR_PTR(ret);
883}
884
885int hns_roce_ib_umem_write_mtt(struct hns_roce_dev *hr_dev,
886 struct hns_roce_mtt *mtt, struct ib_umem *umem)
887{
888 struct device *dev = hr_dev->dev;
889 struct scatterlist *sg;
890 unsigned int order;
891 int i, k, entry;
892 int npage = 0;
893 int ret = 0;
894 int len;
895 u64 page_addr;
896 u64 *pages;
897 u32 bt_page_size;
898 u32 n;
899
900 order = mtt->mtt_type == MTT_TYPE_WQE ? hr_dev->caps.mtt_ba_pg_sz :
901 hr_dev->caps.cqe_ba_pg_sz;
902 bt_page_size = 1 << (order + PAGE_SHIFT);
903
904 pages = (u64 *) __get_free_pages(GFP_KERNEL, order);
905 if (!pages)
906 return -ENOMEM;
907
908 i = n = 0;
909
910 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
911 len = sg_dma_len(sg) >> PAGE_SHIFT;
912 for (k = 0; k < len; ++k) {
913 page_addr =
914 sg_dma_address(sg) + (k << umem->page_shift);
915 if (!(npage % (1 << (mtt->page_shift - PAGE_SHIFT)))) {
916 if (page_addr & ((1 << mtt->page_shift) - 1)) {
917 dev_err(dev, "page_addr 0x%llx is not page_shift %d alignment!\n",
918 page_addr, mtt->page_shift);
919 ret = -EINVAL;
920 goto out;
921 }
922 pages[i++] = page_addr;
923 }
924 npage++;
925 if (i == bt_page_size / sizeof(u64)) {
926 ret = hns_roce_write_mtt(hr_dev, mtt, n, i,
927 pages);
928 if (ret)
929 goto out;
930 n += i;
931 i = 0;
932 }
933 }
934 }
935
936 if (i)
937 ret = hns_roce_write_mtt(hr_dev, mtt, n, i, pages);
938
939out:
940 free_pages((unsigned long) pages, order);
941 return ret;
942}
943
944static int hns_roce_ib_umem_write_mr(struct hns_roce_dev *hr_dev,
945 struct hns_roce_mr *mr,
946 struct ib_umem *umem)
947{
948 struct scatterlist *sg;
949 int i = 0, j = 0, k;
950 int entry;
951 int len;
952 u64 page_addr;
953 u32 pbl_bt_sz;
954
955 if (hr_dev->caps.pbl_hop_num == HNS_ROCE_HOP_NUM_0)
956 return 0;
957
958 pbl_bt_sz = 1 << (hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT);
959 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
960 len = sg_dma_len(sg) >> PAGE_SHIFT;
961 for (k = 0; k < len; ++k) {
962 page_addr = sg_dma_address(sg) +
963 (k << umem->page_shift);
964
965 if (!hr_dev->caps.pbl_hop_num) {
966 mr->pbl_buf[i++] = page_addr >> 12;
967 } else if (hr_dev->caps.pbl_hop_num == 1) {
968 mr->pbl_buf[i++] = page_addr;
969 } else {
970 if (hr_dev->caps.pbl_hop_num == 2)
971 mr->pbl_bt_l1[i][j] = page_addr;
972 else if (hr_dev->caps.pbl_hop_num == 3)
973 mr->pbl_bt_l2[i][j] = page_addr;
974
975 j++;
976 if (j >= (pbl_bt_sz / 8)) {
977 i++;
978 j = 0;
979 }
980 }
981 }
982 }
983
984
985 mb();
986
987 return 0;
988}
989
990struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
991 u64 virt_addr, int access_flags,
992 struct ib_udata *udata)
993{
994 struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
995 struct device *dev = hr_dev->dev;
996 struct hns_roce_mr *mr;
997 int bt_size;
998 int ret;
999 int n;
1000 int i;
1001
1002 mr = kmalloc(sizeof(*mr), GFP_KERNEL);
1003 if (!mr)
1004 return ERR_PTR(-ENOMEM);
1005
1006 mr->umem = ib_umem_get(pd->uobject->context, start, length,
1007 access_flags, 0);
1008 if (IS_ERR(mr->umem)) {
1009 ret = PTR_ERR(mr->umem);
1010 goto err_free;
1011 }
1012
1013 n = ib_umem_page_count(mr->umem);
1014
1015 if (!hr_dev->caps.pbl_hop_num) {
1016 if (n > HNS_ROCE_MAX_MTPT_PBL_NUM) {
1017 dev_err(dev,
1018 " MR len %lld err. MR is limited to 4G at most!\n",
1019 length);
1020 ret = -EINVAL;
1021 goto err_umem;
1022 }
1023 } else {
1024 int pbl_size = 1;
1025
1026 bt_size = (1 << (hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT)) / 8;
1027 for (i = 0; i < hr_dev->caps.pbl_hop_num; i++)
1028 pbl_size *= bt_size;
1029 if (n > pbl_size) {
1030 dev_err(dev,
1031 " MR len %lld err. MR page num is limited to %d!\n",
1032 length, pbl_size);
1033 ret = -EINVAL;
1034 goto err_umem;
1035 }
1036 }
1037
1038 mr->type = MR_TYPE_MR;
1039
1040 ret = hns_roce_mr_alloc(hr_dev, to_hr_pd(pd)->pdn, virt_addr, length,
1041 access_flags, n, mr);
1042 if (ret)
1043 goto err_umem;
1044
1045 ret = hns_roce_ib_umem_write_mr(hr_dev, mr, mr->umem);
1046 if (ret)
1047 goto err_mr;
1048
1049 ret = hns_roce_mr_enable(hr_dev, mr);
1050 if (ret)
1051 goto err_mr;
1052
1053 mr->ibmr.rkey = mr->ibmr.lkey = mr->key;
1054
1055 return &mr->ibmr;
1056
1057err_mr:
1058 hns_roce_mr_free(hr_dev, mr);
1059
1060err_umem:
1061 ib_umem_release(mr->umem);
1062
1063err_free:
1064 kfree(mr);
1065 return ERR_PTR(ret);
1066}
1067
1068int hns_roce_rereg_user_mr(struct ib_mr *ibmr, int flags, u64 start, u64 length,
1069 u64 virt_addr, int mr_access_flags, struct ib_pd *pd,
1070 struct ib_udata *udata)
1071{
1072 struct hns_roce_dev *hr_dev = to_hr_dev(ibmr->device);
1073 struct hns_roce_mr *mr = to_hr_mr(ibmr);
1074 struct hns_roce_cmd_mailbox *mailbox;
1075 struct device *dev = hr_dev->dev;
1076 unsigned long mtpt_idx;
1077 u32 pdn = 0;
1078 int npages;
1079 int ret;
1080
1081 if (!mr->enabled)
1082 return -EINVAL;
1083
1084 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
1085 if (IS_ERR(mailbox))
1086 return PTR_ERR(mailbox);
1087
1088 mtpt_idx = key_to_hw_index(mr->key) & (hr_dev->caps.num_mtpts - 1);
1089 ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, mtpt_idx, 0,
1090 HNS_ROCE_CMD_QUERY_MPT,
1091 HNS_ROCE_CMD_TIMEOUT_MSECS);
1092 if (ret)
1093 goto free_cmd_mbox;
1094
1095 ret = hns_roce_hw2sw_mpt(hr_dev, NULL, mtpt_idx);
1096 if (ret)
1097 dev_warn(dev, "HW2SW_MPT failed (%d)\n", ret);
1098
1099 mr->enabled = 0;
1100
1101 if (flags & IB_MR_REREG_PD)
1102 pdn = to_hr_pd(pd)->pdn;
1103
1104 if (flags & IB_MR_REREG_TRANS) {
1105 if (mr->size != ~0ULL) {
1106 npages = ib_umem_page_count(mr->umem);
1107
1108 if (hr_dev->caps.pbl_hop_num)
1109 hns_roce_mhop_free(hr_dev, mr);
1110 else
1111 dma_free_coherent(dev, npages * 8, mr->pbl_buf,
1112 mr->pbl_dma_addr);
1113 }
1114 ib_umem_release(mr->umem);
1115
1116 mr->umem = ib_umem_get(ibmr->uobject->context, start, length,
1117 mr_access_flags, 0);
1118 if (IS_ERR(mr->umem)) {
1119 ret = PTR_ERR(mr->umem);
1120 mr->umem = NULL;
1121 goto free_cmd_mbox;
1122 }
1123 npages = ib_umem_page_count(mr->umem);
1124
1125 if (hr_dev->caps.pbl_hop_num) {
1126 ret = hns_roce_mhop_alloc(hr_dev, npages, mr);
1127 if (ret)
1128 goto release_umem;
1129 } else {
1130 mr->pbl_buf = dma_alloc_coherent(dev, npages * 8,
1131 &(mr->pbl_dma_addr),
1132 GFP_KERNEL);
1133 if (!mr->pbl_buf) {
1134 ret = -ENOMEM;
1135 goto release_umem;
1136 }
1137 }
1138 }
1139
1140 ret = hr_dev->hw->rereg_write_mtpt(hr_dev, mr, flags, pdn,
1141 mr_access_flags, virt_addr,
1142 length, mailbox->buf);
1143 if (ret) {
1144 if (flags & IB_MR_REREG_TRANS)
1145 goto release_umem;
1146 else
1147 goto free_cmd_mbox;
1148 }
1149
1150 if (flags & IB_MR_REREG_TRANS) {
1151 ret = hns_roce_ib_umem_write_mr(hr_dev, mr, mr->umem);
1152 if (ret) {
1153 if (mr->size != ~0ULL) {
1154 npages = ib_umem_page_count(mr->umem);
1155
1156 if (hr_dev->caps.pbl_hop_num)
1157 hns_roce_mhop_free(hr_dev, mr);
1158 else
1159 dma_free_coherent(dev, npages * 8,
1160 mr->pbl_buf,
1161 mr->pbl_dma_addr);
1162 }
1163
1164 goto release_umem;
1165 }
1166 }
1167
1168 ret = hns_roce_sw2hw_mpt(hr_dev, mailbox, mtpt_idx);
1169 if (ret) {
1170 dev_err(dev, "SW2HW_MPT failed (%d)\n", ret);
1171 goto release_umem;
1172 }
1173
1174 mr->enabled = 1;
1175 if (flags & IB_MR_REREG_ACCESS)
1176 mr->access = mr_access_flags;
1177
1178 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
1179
1180 return 0;
1181
1182release_umem:
1183 ib_umem_release(mr->umem);
1184
1185free_cmd_mbox:
1186 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
1187
1188 return ret;
1189}
1190
1191int hns_roce_dereg_mr(struct ib_mr *ibmr)
1192{
1193 struct hns_roce_dev *hr_dev = to_hr_dev(ibmr->device);
1194 struct hns_roce_mr *mr = to_hr_mr(ibmr);
1195 int ret = 0;
1196
1197 if (hr_dev->hw->dereg_mr) {
1198 ret = hr_dev->hw->dereg_mr(hr_dev, mr);
1199 } else {
1200 hns_roce_mr_free(hr_dev, mr);
1201
1202 if (mr->umem)
1203 ib_umem_release(mr->umem);
1204
1205 kfree(mr);
1206 }
1207
1208 return ret;
1209}
1210
1211struct ib_mr *hns_roce_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
1212 u32 max_num_sg)
1213{
1214 struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
1215 struct device *dev = hr_dev->dev;
1216 struct hns_roce_mr *mr;
1217 u64 length;
1218 u32 page_size;
1219 int ret;
1220
1221 page_size = 1 << (hr_dev->caps.pbl_buf_pg_sz + PAGE_SHIFT);
1222 length = max_num_sg * page_size;
1223
1224 if (mr_type != IB_MR_TYPE_MEM_REG)
1225 return ERR_PTR(-EINVAL);
1226
1227 if (max_num_sg > HNS_ROCE_FRMR_MAX_PA) {
1228 dev_err(dev, "max_num_sg larger than %d\n",
1229 HNS_ROCE_FRMR_MAX_PA);
1230 return ERR_PTR(-EINVAL);
1231 }
1232
1233 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1234 if (!mr)
1235 return ERR_PTR(-ENOMEM);
1236
1237 mr->type = MR_TYPE_FRMR;
1238
1239
1240 ret = hns_roce_mr_alloc(hr_dev, to_hr_pd(pd)->pdn, 0, length,
1241 0, max_num_sg, mr);
1242 if (ret)
1243 goto err_free;
1244
1245 ret = hns_roce_mr_enable(hr_dev, mr);
1246 if (ret)
1247 goto err_mr;
1248
1249 mr->ibmr.rkey = mr->ibmr.lkey = mr->key;
1250 mr->umem = NULL;
1251
1252 return &mr->ibmr;
1253
1254err_mr:
1255 hns_roce_mr_free(to_hr_dev(pd->device), mr);
1256
1257err_free:
1258 kfree(mr);
1259 return ERR_PTR(ret);
1260}
1261
1262static int hns_roce_set_page(struct ib_mr *ibmr, u64 addr)
1263{
1264 struct hns_roce_mr *mr = to_hr_mr(ibmr);
1265
1266 mr->pbl_buf[mr->npages++] = cpu_to_le64(addr);
1267
1268 return 0;
1269}
1270
1271int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
1272 unsigned int *sg_offset)
1273{
1274 struct hns_roce_mr *mr = to_hr_mr(ibmr);
1275
1276 mr->npages = 0;
1277
1278 return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, hns_roce_set_page);
1279}
1280
1281static void hns_roce_mw_free(struct hns_roce_dev *hr_dev,
1282 struct hns_roce_mw *mw)
1283{
1284 struct device *dev = hr_dev->dev;
1285 int ret;
1286
1287 if (mw->enabled) {
1288 ret = hns_roce_hw2sw_mpt(hr_dev, NULL, key_to_hw_index(mw->rkey)
1289 & (hr_dev->caps.num_mtpts - 1));
1290 if (ret)
1291 dev_warn(dev, "MW HW2SW_MPT failed (%d)\n", ret);
1292
1293 hns_roce_table_put(hr_dev, &hr_dev->mr_table.mtpt_table,
1294 key_to_hw_index(mw->rkey));
1295 }
1296
1297 hns_roce_bitmap_free(&hr_dev->mr_table.mtpt_bitmap,
1298 key_to_hw_index(mw->rkey), BITMAP_NO_RR);
1299}
1300
1301static int hns_roce_mw_enable(struct hns_roce_dev *hr_dev,
1302 struct hns_roce_mw *mw)
1303{
1304 struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
1305 struct hns_roce_cmd_mailbox *mailbox;
1306 struct device *dev = hr_dev->dev;
1307 unsigned long mtpt_idx = key_to_hw_index(mw->rkey);
1308 int ret;
1309
1310
1311 ret = hns_roce_table_get(hr_dev, &mr_table->mtpt_table, mtpt_idx);
1312 if (ret)
1313 return ret;
1314
1315 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
1316 if (IS_ERR(mailbox)) {
1317 ret = PTR_ERR(mailbox);
1318 goto err_table;
1319 }
1320
1321 ret = hr_dev->hw->mw_write_mtpt(mailbox->buf, mw);
1322 if (ret) {
1323 dev_err(dev, "MW write mtpt fail!\n");
1324 goto err_page;
1325 }
1326
1327 ret = hns_roce_sw2hw_mpt(hr_dev, mailbox,
1328 mtpt_idx & (hr_dev->caps.num_mtpts - 1));
1329 if (ret) {
1330 dev_err(dev, "MW sw2hw_mpt failed (%d)\n", ret);
1331 goto err_page;
1332 }
1333
1334 mw->enabled = 1;
1335
1336 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
1337
1338 return 0;
1339
1340err_page:
1341 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
1342
1343err_table:
1344 hns_roce_table_put(hr_dev, &mr_table->mtpt_table, mtpt_idx);
1345
1346 return ret;
1347}
1348
1349struct ib_mw *hns_roce_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type,
1350 struct ib_udata *udata)
1351{
1352 struct hns_roce_dev *hr_dev = to_hr_dev(ib_pd->device);
1353 struct hns_roce_mw *mw;
1354 unsigned long index = 0;
1355 int ret;
1356
1357 mw = kmalloc(sizeof(*mw), GFP_KERNEL);
1358 if (!mw)
1359 return ERR_PTR(-ENOMEM);
1360
1361
1362 ret = hns_roce_bitmap_alloc(&hr_dev->mr_table.mtpt_bitmap, &index);
1363 if (ret)
1364 goto err_bitmap;
1365
1366 mw->rkey = hw_index_to_key(index);
1367
1368 mw->ibmw.rkey = mw->rkey;
1369 mw->ibmw.type = type;
1370 mw->pdn = to_hr_pd(ib_pd)->pdn;
1371 mw->pbl_hop_num = hr_dev->caps.pbl_hop_num;
1372 mw->pbl_ba_pg_sz = hr_dev->caps.pbl_ba_pg_sz;
1373 mw->pbl_buf_pg_sz = hr_dev->caps.pbl_buf_pg_sz;
1374
1375 ret = hns_roce_mw_enable(hr_dev, mw);
1376 if (ret)
1377 goto err_mw;
1378
1379 return &mw->ibmw;
1380
1381err_mw:
1382 hns_roce_mw_free(hr_dev, mw);
1383
1384err_bitmap:
1385 kfree(mw);
1386
1387 return ERR_PTR(ret);
1388}
1389
1390int hns_roce_dealloc_mw(struct ib_mw *ibmw)
1391{
1392 struct hns_roce_dev *hr_dev = to_hr_dev(ibmw->device);
1393 struct hns_roce_mw *mw = to_hr_mw(ibmw);
1394
1395 hns_roce_mw_free(hr_dev, mw);
1396 kfree(mw);
1397
1398 return 0;
1399}
1400