1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34#include <linux/platform_device.h>
35#include <linux/vmalloc.h>
36#include <rdma/ib_umem.h>
37#include "hns_roce_device.h"
38#include "hns_roce_cmd.h"
39#include "hns_roce_hem.h"
40
41static u32 hw_index_to_key(unsigned long ind)
42{
43 return (u32)(ind >> 24) | (ind << 8);
44}
45
46unsigned long key_to_hw_index(u32 key)
47{
48 return (key << 24) | (key >> 8);
49}
50
51static int hns_roce_sw2hw_mpt(struct hns_roce_dev *hr_dev,
52 struct hns_roce_cmd_mailbox *mailbox,
53 unsigned long mpt_index)
54{
55 return hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, mpt_index, 0,
56 HNS_ROCE_CMD_SW2HW_MPT,
57 HNS_ROCE_CMD_TIMEOUT_MSECS);
58}
59
60int hns_roce_hw2sw_mpt(struct hns_roce_dev *hr_dev,
61 struct hns_roce_cmd_mailbox *mailbox,
62 unsigned long mpt_index)
63{
64 return hns_roce_cmd_mbox(hr_dev, 0, mailbox ? mailbox->dma : 0,
65 mpt_index, !mailbox, HNS_ROCE_CMD_HW2SW_MPT,
66 HNS_ROCE_CMD_TIMEOUT_MSECS);
67}
68
69static int hns_roce_buddy_alloc(struct hns_roce_buddy *buddy, int order,
70 unsigned long *seg)
71{
72 int o;
73 u32 m;
74
75 spin_lock(&buddy->lock);
76
77 for (o = order; o <= buddy->max_order; ++o) {
78 if (buddy->num_free[o]) {
79 m = 1 << (buddy->max_order - o);
80 *seg = find_first_bit(buddy->bits[o], m);
81 if (*seg < m)
82 goto found;
83 }
84 }
85 spin_unlock(&buddy->lock);
86 return -1;
87
88 found:
89 clear_bit(*seg, buddy->bits[o]);
90 --buddy->num_free[o];
91
92 while (o > order) {
93 --o;
94 *seg <<= 1;
95 set_bit(*seg ^ 1, buddy->bits[o]);
96 ++buddy->num_free[o];
97 }
98
99 spin_unlock(&buddy->lock);
100
101 *seg <<= order;
102 return 0;
103}
104
105static void hns_roce_buddy_free(struct hns_roce_buddy *buddy, unsigned long seg,
106 int order)
107{
108 seg >>= order;
109
110 spin_lock(&buddy->lock);
111
112 while (test_bit(seg ^ 1, buddy->bits[order])) {
113 clear_bit(seg ^ 1, buddy->bits[order]);
114 --buddy->num_free[order];
115 seg >>= 1;
116 ++order;
117 }
118
119 set_bit(seg, buddy->bits[order]);
120 ++buddy->num_free[order];
121
122 spin_unlock(&buddy->lock);
123}
124
125static int hns_roce_buddy_init(struct hns_roce_buddy *buddy, int max_order)
126{
127 int i, s;
128
129 buddy->max_order = max_order;
130 spin_lock_init(&buddy->lock);
131 buddy->bits = kcalloc(buddy->max_order + 1,
132 sizeof(*buddy->bits),
133 GFP_KERNEL);
134 buddy->num_free = kcalloc(buddy->max_order + 1,
135 sizeof(*buddy->num_free),
136 GFP_KERNEL);
137 if (!buddy->bits || !buddy->num_free)
138 goto err_out;
139
140 for (i = 0; i <= buddy->max_order; ++i) {
141 s = BITS_TO_LONGS(1 << (buddy->max_order - i));
142 buddy->bits[i] = kcalloc(s, sizeof(long), GFP_KERNEL |
143 __GFP_NOWARN);
144 if (!buddy->bits[i]) {
145 buddy->bits[i] = vzalloc(array_size(s, sizeof(long)));
146 if (!buddy->bits[i])
147 goto err_out_free;
148 }
149 }
150
151 set_bit(0, buddy->bits[buddy->max_order]);
152 buddy->num_free[buddy->max_order] = 1;
153
154 return 0;
155
156err_out_free:
157 for (i = 0; i <= buddy->max_order; ++i)
158 kvfree(buddy->bits[i]);
159
160err_out:
161 kfree(buddy->bits);
162 kfree(buddy->num_free);
163 return -ENOMEM;
164}
165
166static void hns_roce_buddy_cleanup(struct hns_roce_buddy *buddy)
167{
168 int i;
169
170 for (i = 0; i <= buddy->max_order; ++i)
171 kvfree(buddy->bits[i]);
172
173 kfree(buddy->bits);
174 kfree(buddy->num_free);
175}
176
177static int hns_roce_alloc_mtt_range(struct hns_roce_dev *hr_dev, int order,
178 unsigned long *seg, u32 mtt_type)
179{
180 struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
181 struct hns_roce_hem_table *table;
182 struct hns_roce_buddy *buddy;
183 int ret;
184
185 switch (mtt_type) {
186 case MTT_TYPE_WQE:
187 buddy = &mr_table->mtt_buddy;
188 table = &mr_table->mtt_table;
189 break;
190 case MTT_TYPE_CQE:
191 buddy = &mr_table->mtt_cqe_buddy;
192 table = &mr_table->mtt_cqe_table;
193 break;
194 case MTT_TYPE_SRQWQE:
195 buddy = &mr_table->mtt_srqwqe_buddy;
196 table = &mr_table->mtt_srqwqe_table;
197 break;
198 case MTT_TYPE_IDX:
199 buddy = &mr_table->mtt_idx_buddy;
200 table = &mr_table->mtt_idx_table;
201 break;
202 default:
203 dev_err(hr_dev->dev, "Unsupport MTT table type: %d\n",
204 mtt_type);
205 return -EINVAL;
206 }
207
208 ret = hns_roce_buddy_alloc(buddy, order, seg);
209 if (ret == -1)
210 return -1;
211
212 if (hns_roce_table_get_range(hr_dev, table, *seg,
213 *seg + (1 << order) - 1)) {
214 hns_roce_buddy_free(buddy, *seg, order);
215 return -1;
216 }
217
218 return 0;
219}
220
221int hns_roce_mtt_init(struct hns_roce_dev *hr_dev, int npages, int page_shift,
222 struct hns_roce_mtt *mtt)
223{
224 int ret;
225 int i;
226
227
228 if (!npages) {
229 mtt->order = -1;
230 mtt->page_shift = HNS_ROCE_HEM_PAGE_SHIFT;
231 return 0;
232 }
233
234
235 mtt->page_shift = page_shift;
236
237
238 for (mtt->order = 0, i = HNS_ROCE_MTT_ENTRY_PER_SEG; i < npages;
239 i <<= 1)
240 ++mtt->order;
241
242
243 ret = hns_roce_alloc_mtt_range(hr_dev, mtt->order, &mtt->first_seg,
244 mtt->mtt_type);
245 if (ret == -1)
246 return -ENOMEM;
247
248 return 0;
249}
250
251void hns_roce_mtt_cleanup(struct hns_roce_dev *hr_dev, struct hns_roce_mtt *mtt)
252{
253 struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
254
255 if (mtt->order < 0)
256 return;
257
258 switch (mtt->mtt_type) {
259 case MTT_TYPE_WQE:
260 hns_roce_buddy_free(&mr_table->mtt_buddy, mtt->first_seg,
261 mtt->order);
262 hns_roce_table_put_range(hr_dev, &mr_table->mtt_table,
263 mtt->first_seg,
264 mtt->first_seg + (1 << mtt->order) - 1);
265 break;
266 case MTT_TYPE_CQE:
267 hns_roce_buddy_free(&mr_table->mtt_cqe_buddy, mtt->first_seg,
268 mtt->order);
269 hns_roce_table_put_range(hr_dev, &mr_table->mtt_cqe_table,
270 mtt->first_seg,
271 mtt->first_seg + (1 << mtt->order) - 1);
272 break;
273 case MTT_TYPE_SRQWQE:
274 hns_roce_buddy_free(&mr_table->mtt_srqwqe_buddy, mtt->first_seg,
275 mtt->order);
276 hns_roce_table_put_range(hr_dev, &mr_table->mtt_srqwqe_table,
277 mtt->first_seg,
278 mtt->first_seg + (1 << mtt->order) - 1);
279 break;
280 case MTT_TYPE_IDX:
281 hns_roce_buddy_free(&mr_table->mtt_idx_buddy, mtt->first_seg,
282 mtt->order);
283 hns_roce_table_put_range(hr_dev, &mr_table->mtt_idx_table,
284 mtt->first_seg,
285 mtt->first_seg + (1 << mtt->order) - 1);
286 break;
287 default:
288 dev_err(hr_dev->dev,
289 "Unsupport mtt type %d, clean mtt failed\n",
290 mtt->mtt_type);
291 break;
292 }
293}
294
295static void hns_roce_loop_free(struct hns_roce_dev *hr_dev,
296 struct hns_roce_mr *mr, int err_loop_index,
297 int loop_i, int loop_j)
298{
299 struct device *dev = hr_dev->dev;
300 u32 mhop_num;
301 u32 pbl_bt_sz;
302 u64 bt_idx;
303 int i, j;
304
305 pbl_bt_sz = 1 << (hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT);
306 mhop_num = hr_dev->caps.pbl_hop_num;
307
308 i = loop_i;
309 if (mhop_num == 3 && err_loop_index == 2) {
310 for (; i >= 0; i--) {
311 dma_free_coherent(dev, pbl_bt_sz, mr->pbl_bt_l1[i],
312 mr->pbl_l1_dma_addr[i]);
313
314 for (j = 0; j < pbl_bt_sz / BA_BYTE_LEN; j++) {
315 if (i == loop_i && j >= loop_j)
316 break;
317
318 bt_idx = i * pbl_bt_sz / BA_BYTE_LEN + j;
319 dma_free_coherent(dev, pbl_bt_sz,
320 mr->pbl_bt_l2[bt_idx],
321 mr->pbl_l2_dma_addr[bt_idx]);
322 }
323 }
324 } else if (mhop_num == 3 && err_loop_index == 1) {
325 for (i -= 1; i >= 0; i--) {
326 dma_free_coherent(dev, pbl_bt_sz, mr->pbl_bt_l1[i],
327 mr->pbl_l1_dma_addr[i]);
328
329 for (j = 0; j < pbl_bt_sz / BA_BYTE_LEN; j++) {
330 bt_idx = i * pbl_bt_sz / BA_BYTE_LEN + j;
331 dma_free_coherent(dev, pbl_bt_sz,
332 mr->pbl_bt_l2[bt_idx],
333 mr->pbl_l2_dma_addr[bt_idx]);
334 }
335 }
336 } else if (mhop_num == 2 && err_loop_index == 1) {
337 for (i -= 1; i >= 0; i--)
338 dma_free_coherent(dev, pbl_bt_sz, mr->pbl_bt_l1[i],
339 mr->pbl_l1_dma_addr[i]);
340 } else {
341 dev_warn(dev, "not support: mhop_num=%d, err_loop_index=%d.",
342 mhop_num, err_loop_index);
343 return;
344 }
345
346 dma_free_coherent(dev, pbl_bt_sz, mr->pbl_bt_l0, mr->pbl_l0_dma_addr);
347 mr->pbl_bt_l0 = NULL;
348 mr->pbl_l0_dma_addr = 0;
349}
350
351
352static int hns_roce_mhop_alloc(struct hns_roce_dev *hr_dev, int npages,
353 struct hns_roce_mr *mr)
354{
355 struct device *dev = hr_dev->dev;
356 int mr_alloc_done = 0;
357 int npages_allocated;
358 int i = 0, j = 0;
359 u32 pbl_bt_sz;
360 u32 mhop_num;
361 u64 pbl_last_bt_num;
362 u64 pbl_bt_cnt = 0;
363 u64 bt_idx;
364 u64 size;
365
366 mhop_num = (mr->type == MR_TYPE_FRMR ? 1 : hr_dev->caps.pbl_hop_num);
367 pbl_bt_sz = 1 << (hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT);
368 pbl_last_bt_num = (npages + pbl_bt_sz / 8 - 1) / (pbl_bt_sz / 8);
369
370 if (mhop_num == HNS_ROCE_HOP_NUM_0)
371 return 0;
372
373
374 if (mhop_num == 1) {
375 if (npages > pbl_bt_sz / 8) {
376 dev_err(dev, "npages %d is larger than buf_pg_sz!",
377 npages);
378 return -EINVAL;
379 }
380 mr->pbl_buf = dma_alloc_coherent(dev, npages * 8,
381 &(mr->pbl_dma_addr),
382 GFP_KERNEL);
383 if (!mr->pbl_buf)
384 return -ENOMEM;
385
386 mr->pbl_size = npages;
387 mr->pbl_ba = mr->pbl_dma_addr;
388 mr->pbl_hop_num = mhop_num;
389 mr->pbl_ba_pg_sz = hr_dev->caps.pbl_ba_pg_sz;
390 mr->pbl_buf_pg_sz = hr_dev->caps.pbl_buf_pg_sz;
391 return 0;
392 }
393
394 mr->pbl_l1_dma_addr = kcalloc(pbl_bt_sz / 8,
395 sizeof(*mr->pbl_l1_dma_addr),
396 GFP_KERNEL);
397 if (!mr->pbl_l1_dma_addr)
398 return -ENOMEM;
399
400 mr->pbl_bt_l1 = kcalloc(pbl_bt_sz / 8, sizeof(*mr->pbl_bt_l1),
401 GFP_KERNEL);
402 if (!mr->pbl_bt_l1)
403 goto err_kcalloc_bt_l1;
404
405 if (mhop_num == 3) {
406 mr->pbl_l2_dma_addr = kcalloc(pbl_last_bt_num,
407 sizeof(*mr->pbl_l2_dma_addr),
408 GFP_KERNEL);
409 if (!mr->pbl_l2_dma_addr)
410 goto err_kcalloc_l2_dma;
411
412 mr->pbl_bt_l2 = kcalloc(pbl_last_bt_num,
413 sizeof(*mr->pbl_bt_l2),
414 GFP_KERNEL);
415 if (!mr->pbl_bt_l2)
416 goto err_kcalloc_bt_l2;
417 }
418
419
420 mr->pbl_bt_l0 = dma_alloc_coherent(dev, pbl_bt_sz,
421 &(mr->pbl_l0_dma_addr),
422 GFP_KERNEL);
423 if (!mr->pbl_bt_l0)
424 goto err_dma_alloc_l0;
425
426 if (mhop_num == 2) {
427
428 for (i = 0; i < pbl_bt_sz / 8; i++) {
429 if (pbl_bt_cnt + 1 < pbl_last_bt_num) {
430 size = pbl_bt_sz;
431 } else {
432 npages_allocated = i * (pbl_bt_sz / 8);
433 size = (npages - npages_allocated) * 8;
434 }
435 mr->pbl_bt_l1[i] = dma_alloc_coherent(dev, size,
436 &(mr->pbl_l1_dma_addr[i]),
437 GFP_KERNEL);
438 if (!mr->pbl_bt_l1[i]) {
439 hns_roce_loop_free(hr_dev, mr, 1, i, 0);
440 goto err_dma_alloc_l0;
441 }
442
443 *(mr->pbl_bt_l0 + i) = mr->pbl_l1_dma_addr[i];
444
445 pbl_bt_cnt++;
446 if (pbl_bt_cnt >= pbl_last_bt_num)
447 break;
448 }
449 } else if (mhop_num == 3) {
450
451 for (i = 0; i < pbl_bt_sz / 8; i++) {
452 mr->pbl_bt_l1[i] = dma_alloc_coherent(dev, pbl_bt_sz,
453 &(mr->pbl_l1_dma_addr[i]),
454 GFP_KERNEL);
455 if (!mr->pbl_bt_l1[i]) {
456 hns_roce_loop_free(hr_dev, mr, 1, i, 0);
457 goto err_dma_alloc_l0;
458 }
459
460 *(mr->pbl_bt_l0 + i) = mr->pbl_l1_dma_addr[i];
461
462 for (j = 0; j < pbl_bt_sz / 8; j++) {
463 bt_idx = i * pbl_bt_sz / 8 + j;
464
465 if (pbl_bt_cnt + 1 < pbl_last_bt_num) {
466 size = pbl_bt_sz;
467 } else {
468 npages_allocated = bt_idx *
469 (pbl_bt_sz / 8);
470 size = (npages - npages_allocated) * 8;
471 }
472 mr->pbl_bt_l2[bt_idx] = dma_alloc_coherent(
473 dev, size,
474 &(mr->pbl_l2_dma_addr[bt_idx]),
475 GFP_KERNEL);
476 if (!mr->pbl_bt_l2[bt_idx]) {
477 hns_roce_loop_free(hr_dev, mr, 2, i, j);
478 goto err_dma_alloc_l0;
479 }
480
481 *(mr->pbl_bt_l1[i] + j) =
482 mr->pbl_l2_dma_addr[bt_idx];
483
484 pbl_bt_cnt++;
485 if (pbl_bt_cnt >= pbl_last_bt_num) {
486 mr_alloc_done = 1;
487 break;
488 }
489 }
490
491 if (mr_alloc_done)
492 break;
493 }
494 }
495
496 mr->l0_chunk_last_num = i + 1;
497 if (mhop_num == 3)
498 mr->l1_chunk_last_num = j + 1;
499
500 mr->pbl_size = npages;
501 mr->pbl_ba = mr->pbl_l0_dma_addr;
502 mr->pbl_hop_num = hr_dev->caps.pbl_hop_num;
503 mr->pbl_ba_pg_sz = hr_dev->caps.pbl_ba_pg_sz;
504 mr->pbl_buf_pg_sz = hr_dev->caps.pbl_buf_pg_sz;
505
506 return 0;
507
508err_dma_alloc_l0:
509 kfree(mr->pbl_bt_l2);
510 mr->pbl_bt_l2 = NULL;
511
512err_kcalloc_bt_l2:
513 kfree(mr->pbl_l2_dma_addr);
514 mr->pbl_l2_dma_addr = NULL;
515
516err_kcalloc_l2_dma:
517 kfree(mr->pbl_bt_l1);
518 mr->pbl_bt_l1 = NULL;
519
520err_kcalloc_bt_l1:
521 kfree(mr->pbl_l1_dma_addr);
522 mr->pbl_l1_dma_addr = NULL;
523
524 return -ENOMEM;
525}
526
527static int hns_roce_mr_alloc(struct hns_roce_dev *hr_dev, u32 pd, u64 iova,
528 u64 size, u32 access, int npages,
529 struct hns_roce_mr *mr)
530{
531 struct device *dev = hr_dev->dev;
532 unsigned long index = 0;
533 int ret;
534
535
536 ret = hns_roce_bitmap_alloc(&hr_dev->mr_table.mtpt_bitmap, &index);
537 if (ret == -1)
538 return -ENOMEM;
539
540 mr->iova = iova;
541 mr->size = size;
542 mr->pd = pd;
543 mr->access = access;
544 mr->enabled = 0;
545 mr->key = hw_index_to_key(index);
546
547 if (size == ~0ull) {
548 mr->pbl_buf = NULL;
549 mr->pbl_dma_addr = 0;
550
551 mr->pbl_bt_l2 = NULL;
552 mr->pbl_bt_l1 = NULL;
553 mr->pbl_bt_l0 = NULL;
554 mr->pbl_l2_dma_addr = NULL;
555 mr->pbl_l1_dma_addr = NULL;
556 mr->pbl_l0_dma_addr = 0;
557 } else {
558 if (!hr_dev->caps.pbl_hop_num) {
559 mr->pbl_buf = dma_alloc_coherent(dev,
560 npages * BA_BYTE_LEN,
561 &(mr->pbl_dma_addr),
562 GFP_KERNEL);
563 if (!mr->pbl_buf)
564 return -ENOMEM;
565 } else {
566 ret = hns_roce_mhop_alloc(hr_dev, npages, mr);
567 }
568 }
569
570 return ret;
571}
572
573static void hns_roce_mhop_free(struct hns_roce_dev *hr_dev,
574 struct hns_roce_mr *mr)
575{
576 struct device *dev = hr_dev->dev;
577 int npages_allocated;
578 int npages;
579 int i, j;
580 u32 pbl_bt_sz;
581 u32 mhop_num;
582 u64 bt_idx;
583
584 npages = mr->pbl_size;
585 pbl_bt_sz = 1 << (hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT);
586 mhop_num = (mr->type == MR_TYPE_FRMR) ? 1 : hr_dev->caps.pbl_hop_num;
587
588 if (mhop_num == HNS_ROCE_HOP_NUM_0)
589 return;
590
591 if (mhop_num == 1) {
592 dma_free_coherent(dev, (unsigned int)(npages * BA_BYTE_LEN),
593 mr->pbl_buf, mr->pbl_dma_addr);
594 return;
595 }
596
597 dma_free_coherent(dev, pbl_bt_sz, mr->pbl_bt_l0,
598 mr->pbl_l0_dma_addr);
599
600 if (mhop_num == 2) {
601 for (i = 0; i < mr->l0_chunk_last_num; i++) {
602 if (i == mr->l0_chunk_last_num - 1) {
603 npages_allocated =
604 i * (pbl_bt_sz / BA_BYTE_LEN);
605
606 dma_free_coherent(dev,
607 (npages - npages_allocated) * BA_BYTE_LEN,
608 mr->pbl_bt_l1[i],
609 mr->pbl_l1_dma_addr[i]);
610
611 break;
612 }
613
614 dma_free_coherent(dev, pbl_bt_sz, mr->pbl_bt_l1[i],
615 mr->pbl_l1_dma_addr[i]);
616 }
617 } else if (mhop_num == 3) {
618 for (i = 0; i < mr->l0_chunk_last_num; i++) {
619 dma_free_coherent(dev, pbl_bt_sz, mr->pbl_bt_l1[i],
620 mr->pbl_l1_dma_addr[i]);
621
622 for (j = 0; j < pbl_bt_sz / BA_BYTE_LEN; j++) {
623 bt_idx = i * (pbl_bt_sz / BA_BYTE_LEN) + j;
624
625 if ((i == mr->l0_chunk_last_num - 1)
626 && j == mr->l1_chunk_last_num - 1) {
627 npages_allocated = bt_idx *
628 (pbl_bt_sz / BA_BYTE_LEN);
629
630 dma_free_coherent(dev,
631 (npages - npages_allocated) *
632 BA_BYTE_LEN,
633 mr->pbl_bt_l2[bt_idx],
634 mr->pbl_l2_dma_addr[bt_idx]);
635
636 break;
637 }
638
639 dma_free_coherent(dev, pbl_bt_sz,
640 mr->pbl_bt_l2[bt_idx],
641 mr->pbl_l2_dma_addr[bt_idx]);
642 }
643 }
644 }
645
646 kfree(mr->pbl_bt_l1);
647 kfree(mr->pbl_l1_dma_addr);
648 mr->pbl_bt_l1 = NULL;
649 mr->pbl_l1_dma_addr = NULL;
650 if (mhop_num == 3) {
651 kfree(mr->pbl_bt_l2);
652 kfree(mr->pbl_l2_dma_addr);
653 mr->pbl_bt_l2 = NULL;
654 mr->pbl_l2_dma_addr = NULL;
655 }
656}
657
658static void hns_roce_mr_free(struct hns_roce_dev *hr_dev,
659 struct hns_roce_mr *mr)
660{
661 struct device *dev = hr_dev->dev;
662 int npages = 0;
663 int ret;
664
665 if (mr->enabled) {
666 ret = hns_roce_hw2sw_mpt(hr_dev, NULL, key_to_hw_index(mr->key)
667 & (hr_dev->caps.num_mtpts - 1));
668 if (ret)
669 dev_warn(dev, "HW2SW_MPT failed (%d)\n", ret);
670 }
671
672 if (mr->size != ~0ULL) {
673 if (mr->type == MR_TYPE_MR)
674 npages = ib_umem_page_count(mr->umem);
675
676 if (!hr_dev->caps.pbl_hop_num)
677 dma_free_coherent(dev,
678 (unsigned int)(npages * BA_BYTE_LEN),
679 mr->pbl_buf, mr->pbl_dma_addr);
680 else
681 hns_roce_mhop_free(hr_dev, mr);
682 }
683
684 if (mr->enabled)
685 hns_roce_table_put(hr_dev, &hr_dev->mr_table.mtpt_table,
686 key_to_hw_index(mr->key));
687
688 hns_roce_bitmap_free(&hr_dev->mr_table.mtpt_bitmap,
689 key_to_hw_index(mr->key), BITMAP_NO_RR);
690}
691
692static int hns_roce_mr_enable(struct hns_roce_dev *hr_dev,
693 struct hns_roce_mr *mr)
694{
695 int ret;
696 unsigned long mtpt_idx = key_to_hw_index(mr->key);
697 struct device *dev = hr_dev->dev;
698 struct hns_roce_cmd_mailbox *mailbox;
699 struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
700
701
702 ret = hns_roce_table_get(hr_dev, &mr_table->mtpt_table, mtpt_idx);
703 if (ret)
704 return ret;
705
706
707 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
708 if (IS_ERR(mailbox)) {
709 ret = PTR_ERR(mailbox);
710 goto err_table;
711 }
712
713 if (mr->type != MR_TYPE_FRMR)
714 ret = hr_dev->hw->write_mtpt(mailbox->buf, mr, mtpt_idx);
715 else
716 ret = hr_dev->hw->frmr_write_mtpt(mailbox->buf, mr);
717 if (ret) {
718 dev_err(dev, "Write mtpt fail!\n");
719 goto err_page;
720 }
721
722 ret = hns_roce_sw2hw_mpt(hr_dev, mailbox,
723 mtpt_idx & (hr_dev->caps.num_mtpts - 1));
724 if (ret) {
725 dev_err(dev, "SW2HW_MPT failed (%d)\n", ret);
726 goto err_page;
727 }
728
729 mr->enabled = 1;
730 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
731
732 return 0;
733
734err_page:
735 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
736
737err_table:
738 hns_roce_table_put(hr_dev, &mr_table->mtpt_table, mtpt_idx);
739 return ret;
740}
741
742static int hns_roce_write_mtt_chunk(struct hns_roce_dev *hr_dev,
743 struct hns_roce_mtt *mtt, u32 start_index,
744 u32 npages, u64 *page_list)
745{
746 struct hns_roce_hem_table *table;
747 dma_addr_t dma_handle;
748 __le64 *mtts;
749 u32 bt_page_size;
750 u32 i;
751
752 switch (mtt->mtt_type) {
753 case MTT_TYPE_WQE:
754 table = &hr_dev->mr_table.mtt_table;
755 bt_page_size = 1 << (hr_dev->caps.mtt_ba_pg_sz + PAGE_SHIFT);
756 break;
757 case MTT_TYPE_CQE:
758 table = &hr_dev->mr_table.mtt_cqe_table;
759 bt_page_size = 1 << (hr_dev->caps.cqe_ba_pg_sz + PAGE_SHIFT);
760 break;
761 case MTT_TYPE_SRQWQE:
762 table = &hr_dev->mr_table.mtt_srqwqe_table;
763 bt_page_size = 1 << (hr_dev->caps.srqwqe_ba_pg_sz + PAGE_SHIFT);
764 break;
765 case MTT_TYPE_IDX:
766 table = &hr_dev->mr_table.mtt_idx_table;
767 bt_page_size = 1 << (hr_dev->caps.idx_ba_pg_sz + PAGE_SHIFT);
768 break;
769 default:
770 return -EINVAL;
771 }
772
773
774 if (start_index / (bt_page_size / sizeof(u64)) !=
775 (start_index + npages - 1) / (bt_page_size / sizeof(u64)))
776 return -EINVAL;
777
778 if (start_index & (HNS_ROCE_MTT_ENTRY_PER_SEG - 1))
779 return -EINVAL;
780
781 mtts = hns_roce_table_find(hr_dev, table,
782 mtt->first_seg +
783 start_index / HNS_ROCE_MTT_ENTRY_PER_SEG,
784 &dma_handle);
785 if (!mtts)
786 return -ENOMEM;
787
788
789 for (i = 0; i < npages; ++i) {
790 if (!hr_dev->caps.mtt_hop_num)
791 mtts[i] = cpu_to_le64(page_list[i] >> PAGE_ADDR_SHIFT);
792 else
793 mtts[i] = cpu_to_le64(page_list[i]);
794 }
795
796 return 0;
797}
798
799static int hns_roce_write_mtt(struct hns_roce_dev *hr_dev,
800 struct hns_roce_mtt *mtt, u32 start_index,
801 u32 npages, u64 *page_list)
802{
803 int chunk;
804 int ret;
805 u32 bt_page_size;
806
807 if (mtt->order < 0)
808 return -EINVAL;
809
810 switch (mtt->mtt_type) {
811 case MTT_TYPE_WQE:
812 bt_page_size = 1 << (hr_dev->caps.mtt_ba_pg_sz + PAGE_SHIFT);
813 break;
814 case MTT_TYPE_CQE:
815 bt_page_size = 1 << (hr_dev->caps.cqe_ba_pg_sz + PAGE_SHIFT);
816 break;
817 case MTT_TYPE_SRQWQE:
818 bt_page_size = 1 << (hr_dev->caps.srqwqe_ba_pg_sz + PAGE_SHIFT);
819 break;
820 case MTT_TYPE_IDX:
821 bt_page_size = 1 << (hr_dev->caps.idx_ba_pg_sz + PAGE_SHIFT);
822 break;
823 default:
824 dev_err(hr_dev->dev,
825 "Unsupport mtt type %d, write mtt failed\n",
826 mtt->mtt_type);
827 return -EINVAL;
828 }
829
830 while (npages > 0) {
831 chunk = min_t(int, bt_page_size / sizeof(u64), npages);
832
833 ret = hns_roce_write_mtt_chunk(hr_dev, mtt, start_index, chunk,
834 page_list);
835 if (ret)
836 return ret;
837
838 npages -= chunk;
839 start_index += chunk;
840 page_list += chunk;
841 }
842
843 return 0;
844}
845
846int hns_roce_buf_write_mtt(struct hns_roce_dev *hr_dev,
847 struct hns_roce_mtt *mtt, struct hns_roce_buf *buf)
848{
849 u64 *page_list;
850 int ret;
851 u32 i;
852
853 page_list = kmalloc_array(buf->npages, sizeof(*page_list), GFP_KERNEL);
854 if (!page_list)
855 return -ENOMEM;
856
857 for (i = 0; i < buf->npages; ++i) {
858 if (buf->nbufs == 1)
859 page_list[i] = buf->direct.map + (i << buf->page_shift);
860 else
861 page_list[i] = buf->page_list[i].map;
862
863 }
864 ret = hns_roce_write_mtt(hr_dev, mtt, 0, buf->npages, page_list);
865
866 kfree(page_list);
867
868 return ret;
869}
870
871int hns_roce_init_mr_table(struct hns_roce_dev *hr_dev)
872{
873 struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
874 int ret;
875
876 ret = hns_roce_bitmap_init(&mr_table->mtpt_bitmap,
877 hr_dev->caps.num_mtpts,
878 hr_dev->caps.num_mtpts - 1,
879 hr_dev->caps.reserved_mrws, 0);
880 if (ret)
881 return ret;
882
883 ret = hns_roce_buddy_init(&mr_table->mtt_buddy,
884 ilog2(hr_dev->caps.num_mtt_segs));
885 if (ret)
886 goto err_buddy;
887
888 if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE)) {
889 ret = hns_roce_buddy_init(&mr_table->mtt_cqe_buddy,
890 ilog2(hr_dev->caps.num_cqe_segs));
891 if (ret)
892 goto err_buddy_cqe;
893 }
894
895 if (hr_dev->caps.num_srqwqe_segs) {
896 ret = hns_roce_buddy_init(&mr_table->mtt_srqwqe_buddy,
897 ilog2(hr_dev->caps.num_srqwqe_segs));
898 if (ret)
899 goto err_buddy_srqwqe;
900 }
901
902 if (hr_dev->caps.num_idx_segs) {
903 ret = hns_roce_buddy_init(&mr_table->mtt_idx_buddy,
904 ilog2(hr_dev->caps.num_idx_segs));
905 if (ret)
906 goto err_buddy_idx;
907 }
908
909 return 0;
910
911err_buddy_idx:
912 if (hr_dev->caps.num_srqwqe_segs)
913 hns_roce_buddy_cleanup(&mr_table->mtt_srqwqe_buddy);
914
915err_buddy_srqwqe:
916 if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE))
917 hns_roce_buddy_cleanup(&mr_table->mtt_cqe_buddy);
918
919err_buddy_cqe:
920 hns_roce_buddy_cleanup(&mr_table->mtt_buddy);
921
922err_buddy:
923 hns_roce_bitmap_cleanup(&mr_table->mtpt_bitmap);
924 return ret;
925}
926
927void hns_roce_cleanup_mr_table(struct hns_roce_dev *hr_dev)
928{
929 struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
930
931 if (hr_dev->caps.num_idx_segs)
932 hns_roce_buddy_cleanup(&mr_table->mtt_idx_buddy);
933 if (hr_dev->caps.num_srqwqe_segs)
934 hns_roce_buddy_cleanup(&mr_table->mtt_srqwqe_buddy);
935 hns_roce_buddy_cleanup(&mr_table->mtt_buddy);
936 if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE))
937 hns_roce_buddy_cleanup(&mr_table->mtt_cqe_buddy);
938 hns_roce_bitmap_cleanup(&mr_table->mtpt_bitmap);
939}
940
941struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc)
942{
943 struct hns_roce_mr *mr;
944 int ret;
945
946 mr = kmalloc(sizeof(*mr), GFP_KERNEL);
947 if (mr == NULL)
948 return ERR_PTR(-ENOMEM);
949
950 mr->type = MR_TYPE_DMA;
951
952
953 ret = hns_roce_mr_alloc(to_hr_dev(pd->device), to_hr_pd(pd)->pdn, 0,
954 ~0ULL, acc, 0, mr);
955 if (ret)
956 goto err_free;
957
958 ret = hns_roce_mr_enable(to_hr_dev(pd->device), mr);
959 if (ret)
960 goto err_mr;
961
962 mr->ibmr.rkey = mr->ibmr.lkey = mr->key;
963 mr->umem = NULL;
964
965 return &mr->ibmr;
966
967err_mr:
968 hns_roce_mr_free(to_hr_dev(pd->device), mr);
969
970err_free:
971 kfree(mr);
972 return ERR_PTR(ret);
973}
974
975int hns_roce_ib_umem_write_mtt(struct hns_roce_dev *hr_dev,
976 struct hns_roce_mtt *mtt, struct ib_umem *umem)
977{
978 struct device *dev = hr_dev->dev;
979 struct sg_dma_page_iter sg_iter;
980 unsigned int order;
981 int npage = 0;
982 int ret = 0;
983 int i;
984 u64 page_addr;
985 u64 *pages;
986 u32 bt_page_size;
987 u32 n;
988
989 switch (mtt->mtt_type) {
990 case MTT_TYPE_WQE:
991 order = hr_dev->caps.mtt_ba_pg_sz;
992 break;
993 case MTT_TYPE_CQE:
994 order = hr_dev->caps.cqe_ba_pg_sz;
995 break;
996 case MTT_TYPE_SRQWQE:
997 order = hr_dev->caps.srqwqe_ba_pg_sz;
998 break;
999 case MTT_TYPE_IDX:
1000 order = hr_dev->caps.idx_ba_pg_sz;
1001 break;
1002 default:
1003 dev_err(dev, "Unsupport mtt type %d, write mtt failed\n",
1004 mtt->mtt_type);
1005 return -EINVAL;
1006 }
1007
1008 bt_page_size = 1 << (order + PAGE_SHIFT);
1009
1010 pages = (u64 *) __get_free_pages(GFP_KERNEL, order);
1011 if (!pages)
1012 return -ENOMEM;
1013
1014 i = n = 0;
1015
1016 for_each_sg_dma_page(umem->sg_head.sgl, &sg_iter, umem->nmap, 0) {
1017 page_addr = sg_page_iter_dma_address(&sg_iter);
1018 if (!(npage % (1 << (mtt->page_shift - PAGE_SHIFT)))) {
1019 if (page_addr & ((1 << mtt->page_shift) - 1)) {
1020 dev_err(dev,
1021 "page_addr 0x%llx is not page_shift %d alignment!\n",
1022 page_addr, mtt->page_shift);
1023 ret = -EINVAL;
1024 goto out;
1025 }
1026 pages[i++] = page_addr;
1027 }
1028 npage++;
1029 if (i == bt_page_size / sizeof(u64)) {
1030 ret = hns_roce_write_mtt(hr_dev, mtt, n, i, pages);
1031 if (ret)
1032 goto out;
1033 n += i;
1034 i = 0;
1035 }
1036 }
1037
1038 if (i)
1039 ret = hns_roce_write_mtt(hr_dev, mtt, n, i, pages);
1040
1041out:
1042 free_pages((unsigned long) pages, order);
1043 return ret;
1044}
1045
1046static int hns_roce_ib_umem_write_mr(struct hns_roce_dev *hr_dev,
1047 struct hns_roce_mr *mr,
1048 struct ib_umem *umem)
1049{
1050 struct sg_dma_page_iter sg_iter;
1051 int i = 0, j = 0;
1052 u64 page_addr;
1053 u32 pbl_bt_sz;
1054
1055 if (hr_dev->caps.pbl_hop_num == HNS_ROCE_HOP_NUM_0)
1056 return 0;
1057
1058 pbl_bt_sz = 1 << (hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT);
1059 for_each_sg_dma_page(umem->sg_head.sgl, &sg_iter, umem->nmap, 0) {
1060 page_addr = sg_page_iter_dma_address(&sg_iter);
1061 if (!hr_dev->caps.pbl_hop_num) {
1062
1063 mr->pbl_buf[i++] = page_addr >> 12;
1064 } else if (hr_dev->caps.pbl_hop_num == 1) {
1065 mr->pbl_buf[i++] = page_addr;
1066 } else {
1067 if (hr_dev->caps.pbl_hop_num == 2)
1068 mr->pbl_bt_l1[i][j] = page_addr;
1069 else if (hr_dev->caps.pbl_hop_num == 3)
1070 mr->pbl_bt_l2[i][j] = page_addr;
1071
1072 j++;
1073 if (j >= (pbl_bt_sz / BA_BYTE_LEN)) {
1074 i++;
1075 j = 0;
1076 }
1077 }
1078 }
1079
1080
1081 mb();
1082
1083 return 0;
1084}
1085
1086struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
1087 u64 virt_addr, int access_flags,
1088 struct ib_udata *udata)
1089{
1090 struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
1091 struct device *dev = hr_dev->dev;
1092 struct hns_roce_mr *mr;
1093 int bt_size;
1094 int ret;
1095 int n;
1096 int i;
1097
1098 mr = kmalloc(sizeof(*mr), GFP_KERNEL);
1099 if (!mr)
1100 return ERR_PTR(-ENOMEM);
1101
1102 mr->umem = ib_umem_get(udata, start, length, access_flags, 0);
1103 if (IS_ERR(mr->umem)) {
1104 ret = PTR_ERR(mr->umem);
1105 goto err_free;
1106 }
1107
1108 n = ib_umem_page_count(mr->umem);
1109
1110 if (!hr_dev->caps.pbl_hop_num) {
1111 if (n > HNS_ROCE_MAX_MTPT_PBL_NUM) {
1112 dev_err(dev,
1113 " MR len %lld err. MR is limited to 4G at most!\n",
1114 length);
1115 ret = -EINVAL;
1116 goto err_umem;
1117 }
1118 } else {
1119 u64 pbl_size = 1;
1120
1121 bt_size = (1 << (hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT)) /
1122 BA_BYTE_LEN;
1123 for (i = 0; i < hr_dev->caps.pbl_hop_num; i++)
1124 pbl_size *= bt_size;
1125 if (n > pbl_size) {
1126 dev_err(dev,
1127 " MR len %lld err. MR page num is limited to %lld!\n",
1128 length, pbl_size);
1129 ret = -EINVAL;
1130 goto err_umem;
1131 }
1132 }
1133
1134 mr->type = MR_TYPE_MR;
1135
1136 ret = hns_roce_mr_alloc(hr_dev, to_hr_pd(pd)->pdn, virt_addr, length,
1137 access_flags, n, mr);
1138 if (ret)
1139 goto err_umem;
1140
1141 ret = hns_roce_ib_umem_write_mr(hr_dev, mr, mr->umem);
1142 if (ret)
1143 goto err_mr;
1144
1145 ret = hns_roce_mr_enable(hr_dev, mr);
1146 if (ret)
1147 goto err_mr;
1148
1149 mr->ibmr.rkey = mr->ibmr.lkey = mr->key;
1150
1151 return &mr->ibmr;
1152
1153err_mr:
1154 hns_roce_mr_free(hr_dev, mr);
1155
1156err_umem:
1157 ib_umem_release(mr->umem);
1158
1159err_free:
1160 kfree(mr);
1161 return ERR_PTR(ret);
1162}
1163
1164int hns_roce_rereg_user_mr(struct ib_mr *ibmr, int flags, u64 start, u64 length,
1165 u64 virt_addr, int mr_access_flags, struct ib_pd *pd,
1166 struct ib_udata *udata)
1167{
1168 struct hns_roce_dev *hr_dev = to_hr_dev(ibmr->device);
1169 struct hns_roce_mr *mr = to_hr_mr(ibmr);
1170 struct hns_roce_cmd_mailbox *mailbox;
1171 struct device *dev = hr_dev->dev;
1172 unsigned long mtpt_idx;
1173 u32 pdn = 0;
1174 int npages;
1175 int ret;
1176
1177 if (!mr->enabled)
1178 return -EINVAL;
1179
1180 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
1181 if (IS_ERR(mailbox))
1182 return PTR_ERR(mailbox);
1183
1184 mtpt_idx = key_to_hw_index(mr->key) & (hr_dev->caps.num_mtpts - 1);
1185 ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, mtpt_idx, 0,
1186 HNS_ROCE_CMD_QUERY_MPT,
1187 HNS_ROCE_CMD_TIMEOUT_MSECS);
1188 if (ret)
1189 goto free_cmd_mbox;
1190
1191 ret = hns_roce_hw2sw_mpt(hr_dev, NULL, mtpt_idx);
1192 if (ret)
1193 dev_warn(dev, "HW2SW_MPT failed (%d)\n", ret);
1194
1195 mr->enabled = 0;
1196
1197 if (flags & IB_MR_REREG_PD)
1198 pdn = to_hr_pd(pd)->pdn;
1199
1200 if (flags & IB_MR_REREG_TRANS) {
1201 if (mr->size != ~0ULL) {
1202 npages = ib_umem_page_count(mr->umem);
1203
1204 if (hr_dev->caps.pbl_hop_num)
1205 hns_roce_mhop_free(hr_dev, mr);
1206 else
1207 dma_free_coherent(dev, npages * 8, mr->pbl_buf,
1208 mr->pbl_dma_addr);
1209 }
1210 ib_umem_release(mr->umem);
1211
1212 mr->umem =
1213 ib_umem_get(udata, start, length, mr_access_flags, 0);
1214 if (IS_ERR(mr->umem)) {
1215 ret = PTR_ERR(mr->umem);
1216 mr->umem = NULL;
1217 goto free_cmd_mbox;
1218 }
1219 npages = ib_umem_page_count(mr->umem);
1220
1221 if (hr_dev->caps.pbl_hop_num) {
1222 ret = hns_roce_mhop_alloc(hr_dev, npages, mr);
1223 if (ret)
1224 goto release_umem;
1225 } else {
1226 mr->pbl_buf = dma_alloc_coherent(dev, npages * 8,
1227 &(mr->pbl_dma_addr),
1228 GFP_KERNEL);
1229 if (!mr->pbl_buf) {
1230 ret = -ENOMEM;
1231 goto release_umem;
1232 }
1233 }
1234 }
1235
1236 ret = hr_dev->hw->rereg_write_mtpt(hr_dev, mr, flags, pdn,
1237 mr_access_flags, virt_addr,
1238 length, mailbox->buf);
1239 if (ret) {
1240 if (flags & IB_MR_REREG_TRANS)
1241 goto release_umem;
1242 else
1243 goto free_cmd_mbox;
1244 }
1245
1246 if (flags & IB_MR_REREG_TRANS) {
1247 ret = hns_roce_ib_umem_write_mr(hr_dev, mr, mr->umem);
1248 if (ret) {
1249 if (mr->size != ~0ULL) {
1250 npages = ib_umem_page_count(mr->umem);
1251
1252 if (hr_dev->caps.pbl_hop_num)
1253 hns_roce_mhop_free(hr_dev, mr);
1254 else
1255 dma_free_coherent(dev, npages * 8,
1256 mr->pbl_buf,
1257 mr->pbl_dma_addr);
1258 }
1259
1260 goto release_umem;
1261 }
1262 }
1263
1264 ret = hns_roce_sw2hw_mpt(hr_dev, mailbox, mtpt_idx);
1265 if (ret) {
1266 dev_err(dev, "SW2HW_MPT failed (%d)\n", ret);
1267 goto release_umem;
1268 }
1269
1270 mr->enabled = 1;
1271 if (flags & IB_MR_REREG_ACCESS)
1272 mr->access = mr_access_flags;
1273
1274 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
1275
1276 return 0;
1277
1278release_umem:
1279 ib_umem_release(mr->umem);
1280
1281free_cmd_mbox:
1282 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
1283
1284 return ret;
1285}
1286
1287int hns_roce_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
1288{
1289 struct hns_roce_dev *hr_dev = to_hr_dev(ibmr->device);
1290 struct hns_roce_mr *mr = to_hr_mr(ibmr);
1291 int ret = 0;
1292
1293 if (hr_dev->hw->dereg_mr) {
1294 ret = hr_dev->hw->dereg_mr(hr_dev, mr, udata);
1295 } else {
1296 hns_roce_mr_free(hr_dev, mr);
1297
1298 ib_umem_release(mr->umem);
1299 kfree(mr);
1300 }
1301
1302 return ret;
1303}
1304
1305struct ib_mr *hns_roce_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
1306 u32 max_num_sg, struct ib_udata *udata)
1307{
1308 struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
1309 struct device *dev = hr_dev->dev;
1310 struct hns_roce_mr *mr;
1311 u64 length;
1312 u32 page_size;
1313 int ret;
1314
1315 page_size = 1 << (hr_dev->caps.pbl_buf_pg_sz + PAGE_SHIFT);
1316 length = max_num_sg * page_size;
1317
1318 if (mr_type != IB_MR_TYPE_MEM_REG)
1319 return ERR_PTR(-EINVAL);
1320
1321 if (max_num_sg > HNS_ROCE_FRMR_MAX_PA) {
1322 dev_err(dev, "max_num_sg larger than %d\n",
1323 HNS_ROCE_FRMR_MAX_PA);
1324 return ERR_PTR(-EINVAL);
1325 }
1326
1327 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1328 if (!mr)
1329 return ERR_PTR(-ENOMEM);
1330
1331 mr->type = MR_TYPE_FRMR;
1332
1333
1334 ret = hns_roce_mr_alloc(hr_dev, to_hr_pd(pd)->pdn, 0, length,
1335 0, max_num_sg, mr);
1336 if (ret)
1337 goto err_free;
1338
1339 ret = hns_roce_mr_enable(hr_dev, mr);
1340 if (ret)
1341 goto err_mr;
1342
1343 mr->ibmr.rkey = mr->ibmr.lkey = mr->key;
1344 mr->umem = NULL;
1345
1346 return &mr->ibmr;
1347
1348err_mr:
1349 hns_roce_mr_free(to_hr_dev(pd->device), mr);
1350
1351err_free:
1352 kfree(mr);
1353 return ERR_PTR(ret);
1354}
1355
1356static int hns_roce_set_page(struct ib_mr *ibmr, u64 addr)
1357{
1358 struct hns_roce_mr *mr = to_hr_mr(ibmr);
1359
1360 mr->pbl_buf[mr->npages++] = cpu_to_le64(addr);
1361
1362 return 0;
1363}
1364
1365int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
1366 unsigned int *sg_offset)
1367{
1368 struct hns_roce_mr *mr = to_hr_mr(ibmr);
1369
1370 mr->npages = 0;
1371
1372 return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, hns_roce_set_page);
1373}
1374
1375static void hns_roce_mw_free(struct hns_roce_dev *hr_dev,
1376 struct hns_roce_mw *mw)
1377{
1378 struct device *dev = hr_dev->dev;
1379 int ret;
1380
1381 if (mw->enabled) {
1382 ret = hns_roce_hw2sw_mpt(hr_dev, NULL, key_to_hw_index(mw->rkey)
1383 & (hr_dev->caps.num_mtpts - 1));
1384 if (ret)
1385 dev_warn(dev, "MW HW2SW_MPT failed (%d)\n", ret);
1386
1387 hns_roce_table_put(hr_dev, &hr_dev->mr_table.mtpt_table,
1388 key_to_hw_index(mw->rkey));
1389 }
1390
1391 hns_roce_bitmap_free(&hr_dev->mr_table.mtpt_bitmap,
1392 key_to_hw_index(mw->rkey), BITMAP_NO_RR);
1393}
1394
1395static int hns_roce_mw_enable(struct hns_roce_dev *hr_dev,
1396 struct hns_roce_mw *mw)
1397{
1398 struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
1399 struct hns_roce_cmd_mailbox *mailbox;
1400 struct device *dev = hr_dev->dev;
1401 unsigned long mtpt_idx = key_to_hw_index(mw->rkey);
1402 int ret;
1403
1404
1405 ret = hns_roce_table_get(hr_dev, &mr_table->mtpt_table, mtpt_idx);
1406 if (ret)
1407 return ret;
1408
1409 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
1410 if (IS_ERR(mailbox)) {
1411 ret = PTR_ERR(mailbox);
1412 goto err_table;
1413 }
1414
1415 ret = hr_dev->hw->mw_write_mtpt(mailbox->buf, mw);
1416 if (ret) {
1417 dev_err(dev, "MW write mtpt fail!\n");
1418 goto err_page;
1419 }
1420
1421 ret = hns_roce_sw2hw_mpt(hr_dev, mailbox,
1422 mtpt_idx & (hr_dev->caps.num_mtpts - 1));
1423 if (ret) {
1424 dev_err(dev, "MW sw2hw_mpt failed (%d)\n", ret);
1425 goto err_page;
1426 }
1427
1428 mw->enabled = 1;
1429
1430 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
1431
1432 return 0;
1433
1434err_page:
1435 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
1436
1437err_table:
1438 hns_roce_table_put(hr_dev, &mr_table->mtpt_table, mtpt_idx);
1439
1440 return ret;
1441}
1442
1443struct ib_mw *hns_roce_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type,
1444 struct ib_udata *udata)
1445{
1446 struct hns_roce_dev *hr_dev = to_hr_dev(ib_pd->device);
1447 struct hns_roce_mw *mw;
1448 unsigned long index = 0;
1449 int ret;
1450
1451 mw = kmalloc(sizeof(*mw), GFP_KERNEL);
1452 if (!mw)
1453 return ERR_PTR(-ENOMEM);
1454
1455
1456 ret = hns_roce_bitmap_alloc(&hr_dev->mr_table.mtpt_bitmap, &index);
1457 if (ret)
1458 goto err_bitmap;
1459
1460 mw->rkey = hw_index_to_key(index);
1461
1462 mw->ibmw.rkey = mw->rkey;
1463 mw->ibmw.type = type;
1464 mw->pdn = to_hr_pd(ib_pd)->pdn;
1465 mw->pbl_hop_num = hr_dev->caps.pbl_hop_num;
1466 mw->pbl_ba_pg_sz = hr_dev->caps.pbl_ba_pg_sz;
1467 mw->pbl_buf_pg_sz = hr_dev->caps.pbl_buf_pg_sz;
1468
1469 ret = hns_roce_mw_enable(hr_dev, mw);
1470 if (ret)
1471 goto err_mw;
1472
1473 return &mw->ibmw;
1474
1475err_mw:
1476 hns_roce_mw_free(hr_dev, mw);
1477
1478err_bitmap:
1479 kfree(mw);
1480
1481 return ERR_PTR(ret);
1482}
1483
1484int hns_roce_dealloc_mw(struct ib_mw *ibmw)
1485{
1486 struct hns_roce_dev *hr_dev = to_hr_dev(ibmw->device);
1487 struct hns_roce_mw *mw = to_hr_mw(ibmw);
1488
1489 hns_roce_mw_free(hr_dev, mw);
1490 kfree(mw);
1491
1492 return 0;
1493}
1494
1495void hns_roce_mtr_init(struct hns_roce_mtr *mtr, int bt_pg_shift,
1496 int buf_pg_shift)
1497{
1498 hns_roce_hem_list_init(&mtr->hem_list, bt_pg_shift);
1499 mtr->buf_pg_shift = buf_pg_shift;
1500}
1501
1502void hns_roce_mtr_cleanup(struct hns_roce_dev *hr_dev,
1503 struct hns_roce_mtr *mtr)
1504{
1505 hns_roce_hem_list_release(hr_dev, &mtr->hem_list);
1506}
1507
1508static int hns_roce_write_mtr(struct hns_roce_dev *hr_dev,
1509 struct hns_roce_mtr *mtr, dma_addr_t *bufs,
1510 struct hns_roce_buf_region *r)
1511{
1512 int offset;
1513 int count;
1514 int npage;
1515 u64 *mtts;
1516 int end;
1517 int i;
1518
1519 offset = r->offset;
1520 end = offset + r->count;
1521 npage = 0;
1522 while (offset < end) {
1523 mtts = hns_roce_hem_list_find_mtt(hr_dev, &mtr->hem_list,
1524 offset, &count, NULL);
1525 if (!mtts)
1526 return -ENOBUFS;
1527
1528
1529 for (i = 0; i < count; i++) {
1530 if (hr_dev->hw_rev == HNS_ROCE_HW_VER1)
1531 mtts[i] = cpu_to_le64(bufs[npage] >>
1532 PAGE_ADDR_SHIFT);
1533 else
1534 mtts[i] = cpu_to_le64(bufs[npage]);
1535
1536 npage++;
1537 }
1538 offset += count;
1539 }
1540
1541 return 0;
1542}
1543
1544int hns_roce_mtr_attach(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
1545 dma_addr_t **bufs, struct hns_roce_buf_region *regions,
1546 int region_cnt)
1547{
1548 struct hns_roce_buf_region *r;
1549 int ret;
1550 int i;
1551
1552 ret = hns_roce_hem_list_request(hr_dev, &mtr->hem_list, regions,
1553 region_cnt);
1554 if (ret)
1555 return ret;
1556
1557 for (i = 0; i < region_cnt; i++) {
1558 r = ®ions[i];
1559 ret = hns_roce_write_mtr(hr_dev, mtr, bufs[i], r);
1560 if (ret) {
1561 dev_err(hr_dev->dev,
1562 "write mtr[%d/%d] err %d,offset=%d.\n",
1563 i, region_cnt, ret, r->offset);
1564 goto err_write;
1565 }
1566 }
1567
1568 return 0;
1569
1570err_write:
1571 hns_roce_hem_list_release(hr_dev, &mtr->hem_list);
1572
1573 return ret;
1574}
1575
1576int hns_roce_mtr_find(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
1577 int offset, u64 *mtt_buf, int mtt_max, u64 *base_addr)
1578{
1579 u64 *mtts = mtt_buf;
1580 int mtt_count;
1581 int total = 0;
1582 u64 *addr;
1583 int npage;
1584 int left;
1585
1586 if (mtts == NULL || mtt_max < 1)
1587 goto done;
1588
1589 left = mtt_max;
1590 while (left > 0) {
1591 mtt_count = 0;
1592 addr = hns_roce_hem_list_find_mtt(hr_dev, &mtr->hem_list,
1593 offset + total,
1594 &mtt_count, NULL);
1595 if (!addr || !mtt_count)
1596 goto done;
1597
1598 npage = min(mtt_count, left);
1599 memcpy(&mtts[total], addr, BA_BYTE_LEN * npage);
1600 left -= npage;
1601 total += npage;
1602 }
1603
1604done:
1605 if (base_addr)
1606 *base_addr = mtr->hem_list.root_ba;
1607
1608 return total;
1609}
1610