1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34#include <linux/platform_device.h>
35#include <linux/vmalloc.h>
36#include <rdma/ib_umem.h>
37#include "hns_roce_device.h"
38#include "hns_roce_cmd.h"
39#include "hns_roce_hem.h"
40
41static u32 hw_index_to_key(unsigned long ind)
42{
43 return (u32)(ind >> 24) | (ind << 8);
44}
45
46unsigned long key_to_hw_index(u32 key)
47{
48 return (key << 24) | (key >> 8);
49}
50
51static int hns_roce_hw_create_mpt(struct hns_roce_dev *hr_dev,
52 struct hns_roce_cmd_mailbox *mailbox,
53 unsigned long mpt_index)
54{
55 return hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, mpt_index, 0,
56 HNS_ROCE_CMD_CREATE_MPT,
57 HNS_ROCE_CMD_TIMEOUT_MSECS);
58}
59
60int hns_roce_hw_destroy_mpt(struct hns_roce_dev *hr_dev,
61 struct hns_roce_cmd_mailbox *mailbox,
62 unsigned long mpt_index)
63{
64 return hns_roce_cmd_mbox(hr_dev, 0, mailbox ? mailbox->dma : 0,
65 mpt_index, !mailbox, HNS_ROCE_CMD_DESTROY_MPT,
66 HNS_ROCE_CMD_TIMEOUT_MSECS);
67}
68
69static int alloc_mr_key(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr,
70 u32 pd, u64 iova, u64 size, u32 access)
71{
72 struct ib_device *ibdev = &hr_dev->ib_dev;
73 unsigned long obj = 0;
74 int err;
75
76
77 err = hns_roce_bitmap_alloc(&hr_dev->mr_table.mtpt_bitmap, &obj);
78 if (err) {
79 ibdev_err(ibdev,
80 "failed to alloc bitmap for MR key, ret = %d.\n",
81 err);
82 return -ENOMEM;
83 }
84
85 mr->iova = iova;
86 mr->size = size;
87 mr->pd = pd;
88 mr->access = access;
89 mr->enabled = 0;
90 mr->key = hw_index_to_key(obj);
91
92 err = hns_roce_table_get(hr_dev, &hr_dev->mr_table.mtpt_table, obj);
93 if (err) {
94 ibdev_err(ibdev, "failed to alloc mtpt, ret = %d.\n", err);
95 goto err_free_bitmap;
96 }
97
98 return 0;
99err_free_bitmap:
100 hns_roce_bitmap_free(&hr_dev->mr_table.mtpt_bitmap, obj, BITMAP_NO_RR);
101 return err;
102}
103
104static void free_mr_key(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr)
105{
106 unsigned long obj = key_to_hw_index(mr->key);
107
108 hns_roce_table_put(hr_dev, &hr_dev->mr_table.mtpt_table, obj);
109 hns_roce_bitmap_free(&hr_dev->mr_table.mtpt_bitmap, obj, BITMAP_NO_RR);
110}
111
112static int alloc_mr_pbl(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr,
113 size_t length, struct ib_udata *udata, u64 start,
114 int access)
115{
116 struct ib_device *ibdev = &hr_dev->ib_dev;
117 bool is_fast = mr->type == MR_TYPE_FRMR;
118 struct hns_roce_buf_attr buf_attr = {};
119 int err;
120
121 mr->pbl_hop_num = is_fast ? 1 : hr_dev->caps.pbl_hop_num;
122 buf_attr.page_shift = is_fast ? PAGE_SHIFT :
123 hr_dev->caps.pbl_buf_pg_sz + PAGE_SHIFT;
124 buf_attr.region[0].size = length;
125 buf_attr.region[0].hopnum = mr->pbl_hop_num;
126 buf_attr.region_count = 1;
127 buf_attr.fixed_page = true;
128 buf_attr.user_access = access;
129
130 buf_attr.mtt_only = is_fast;
131
132 err = hns_roce_mtr_create(hr_dev, &mr->pbl_mtr, &buf_attr,
133 hr_dev->caps.pbl_ba_pg_sz + HNS_HW_PAGE_SHIFT,
134 udata, start);
135 if (err)
136 ibdev_err(ibdev, "failed to alloc pbl mtr, ret = %d.\n", err);
137 else
138 mr->npages = mr->pbl_mtr.hem_cfg.buf_pg_count;
139
140 return err;
141}
142
143static void free_mr_pbl(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr)
144{
145 hns_roce_mtr_destroy(hr_dev, &mr->pbl_mtr);
146}
147
148static void hns_roce_mr_free(struct hns_roce_dev *hr_dev,
149 struct hns_roce_mr *mr)
150{
151 struct ib_device *ibdev = &hr_dev->ib_dev;
152 int ret;
153
154 if (mr->enabled) {
155 ret = hns_roce_hw_destroy_mpt(hr_dev, NULL,
156 key_to_hw_index(mr->key) &
157 (hr_dev->caps.num_mtpts - 1));
158 if (ret)
159 ibdev_warn(ibdev, "failed to destroy mpt, ret = %d.\n",
160 ret);
161 }
162
163 free_mr_pbl(hr_dev, mr);
164 free_mr_key(hr_dev, mr);
165}
166
167static int hns_roce_mr_enable(struct hns_roce_dev *hr_dev,
168 struct hns_roce_mr *mr)
169{
170 unsigned long mtpt_idx = key_to_hw_index(mr->key);
171 struct hns_roce_cmd_mailbox *mailbox;
172 struct device *dev = hr_dev->dev;
173 int ret;
174
175
176 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
177 if (IS_ERR(mailbox)) {
178 ret = PTR_ERR(mailbox);
179 return ret;
180 }
181
182 if (mr->type != MR_TYPE_FRMR)
183 ret = hr_dev->hw->write_mtpt(hr_dev, mailbox->buf, mr,
184 mtpt_idx);
185 else
186 ret = hr_dev->hw->frmr_write_mtpt(hr_dev, mailbox->buf, mr);
187 if (ret) {
188 dev_err(dev, "failed to write mtpt, ret = %d.\n", ret);
189 goto err_page;
190 }
191
192 ret = hns_roce_hw_create_mpt(hr_dev, mailbox,
193 mtpt_idx & (hr_dev->caps.num_mtpts - 1));
194 if (ret) {
195 dev_err(dev, "failed to create mpt, ret = %d.\n", ret);
196 goto err_page;
197 }
198
199 mr->enabled = 1;
200 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
201
202 return 0;
203
204err_page:
205 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
206
207 return ret;
208}
209
210int hns_roce_init_mr_table(struct hns_roce_dev *hr_dev)
211{
212 struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
213 int ret;
214
215 ret = hns_roce_bitmap_init(&mr_table->mtpt_bitmap,
216 hr_dev->caps.num_mtpts,
217 hr_dev->caps.num_mtpts - 1,
218 hr_dev->caps.reserved_mrws, 0);
219 return ret;
220}
221
222void hns_roce_cleanup_mr_table(struct hns_roce_dev *hr_dev)
223{
224 struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
225
226 hns_roce_bitmap_cleanup(&mr_table->mtpt_bitmap);
227}
228
229struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc)
230{
231 struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
232 struct hns_roce_mr *mr;
233 int ret;
234
235 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
236 if (mr == NULL)
237 return ERR_PTR(-ENOMEM);
238
239 mr->type = MR_TYPE_DMA;
240
241
242 hns_roce_hem_list_init(&mr->pbl_mtr.hem_list);
243 ret = alloc_mr_key(hr_dev, mr, to_hr_pd(pd)->pdn, 0, 0, acc);
244 if (ret)
245 goto err_free;
246
247 ret = hns_roce_mr_enable(to_hr_dev(pd->device), mr);
248 if (ret)
249 goto err_mr;
250
251 mr->ibmr.rkey = mr->ibmr.lkey = mr->key;
252
253 return &mr->ibmr;
254err_mr:
255 free_mr_key(hr_dev, mr);
256
257err_free:
258 kfree(mr);
259 return ERR_PTR(ret);
260}
261
262struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
263 u64 virt_addr, int access_flags,
264 struct ib_udata *udata)
265{
266 struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
267 struct hns_roce_mr *mr;
268 int ret;
269
270 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
271 if (!mr)
272 return ERR_PTR(-ENOMEM);
273
274 mr->type = MR_TYPE_MR;
275 ret = alloc_mr_key(hr_dev, mr, to_hr_pd(pd)->pdn, virt_addr, length,
276 access_flags);
277 if (ret)
278 goto err_alloc_mr;
279
280 ret = alloc_mr_pbl(hr_dev, mr, length, udata, start, access_flags);
281 if (ret)
282 goto err_alloc_key;
283
284 ret = hns_roce_mr_enable(hr_dev, mr);
285 if (ret)
286 goto err_alloc_pbl;
287
288 mr->ibmr.rkey = mr->ibmr.lkey = mr->key;
289 mr->ibmr.length = length;
290
291 return &mr->ibmr;
292
293err_alloc_pbl:
294 free_mr_pbl(hr_dev, mr);
295err_alloc_key:
296 free_mr_key(hr_dev, mr);
297err_alloc_mr:
298 kfree(mr);
299 return ERR_PTR(ret);
300}
301
302static int rereg_mr_trans(struct ib_mr *ibmr, int flags,
303 u64 start, u64 length,
304 u64 virt_addr, int mr_access_flags,
305 struct hns_roce_cmd_mailbox *mailbox,
306 u32 pdn, struct ib_udata *udata)
307{
308 struct hns_roce_dev *hr_dev = to_hr_dev(ibmr->device);
309 struct ib_device *ibdev = &hr_dev->ib_dev;
310 struct hns_roce_mr *mr = to_hr_mr(ibmr);
311 int ret;
312
313 free_mr_pbl(hr_dev, mr);
314 ret = alloc_mr_pbl(hr_dev, mr, length, udata, start, mr_access_flags);
315 if (ret) {
316 ibdev_err(ibdev, "failed to create mr PBL, ret = %d.\n", ret);
317 return ret;
318 }
319
320 ret = hr_dev->hw->rereg_write_mtpt(hr_dev, mr, flags, pdn,
321 mr_access_flags, virt_addr,
322 length, mailbox->buf);
323 if (ret) {
324 ibdev_err(ibdev, "failed to write mtpt, ret = %d.\n", ret);
325 free_mr_pbl(hr_dev, mr);
326 }
327
328 return ret;
329}
330
331struct ib_mr *hns_roce_rereg_user_mr(struct ib_mr *ibmr, int flags, u64 start,
332 u64 length, u64 virt_addr,
333 int mr_access_flags, struct ib_pd *pd,
334 struct ib_udata *udata)
335{
336 struct hns_roce_dev *hr_dev = to_hr_dev(ibmr->device);
337 struct ib_device *ib_dev = &hr_dev->ib_dev;
338 struct hns_roce_mr *mr = to_hr_mr(ibmr);
339 struct hns_roce_cmd_mailbox *mailbox;
340 unsigned long mtpt_idx;
341 u32 pdn = 0;
342 int ret;
343
344 if (!mr->enabled)
345 return ERR_PTR(-EINVAL);
346
347 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
348 if (IS_ERR(mailbox))
349 return ERR_CAST(mailbox);
350
351 mtpt_idx = key_to_hw_index(mr->key) & (hr_dev->caps.num_mtpts - 1);
352 ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, mtpt_idx, 0,
353 HNS_ROCE_CMD_QUERY_MPT,
354 HNS_ROCE_CMD_TIMEOUT_MSECS);
355 if (ret)
356 goto free_cmd_mbox;
357
358 ret = hns_roce_hw_destroy_mpt(hr_dev, NULL, mtpt_idx);
359 if (ret)
360 ibdev_warn(ib_dev, "failed to destroy MPT, ret = %d.\n", ret);
361
362 mr->enabled = 0;
363
364 if (flags & IB_MR_REREG_PD)
365 pdn = to_hr_pd(pd)->pdn;
366
367 if (flags & IB_MR_REREG_TRANS) {
368 ret = rereg_mr_trans(ibmr, flags,
369 start, length,
370 virt_addr, mr_access_flags,
371 mailbox, pdn, udata);
372 if (ret)
373 goto free_cmd_mbox;
374 } else {
375 ret = hr_dev->hw->rereg_write_mtpt(hr_dev, mr, flags, pdn,
376 mr_access_flags, virt_addr,
377 length, mailbox->buf);
378 if (ret)
379 goto free_cmd_mbox;
380 }
381
382 ret = hns_roce_hw_create_mpt(hr_dev, mailbox, mtpt_idx);
383 if (ret) {
384 ibdev_err(ib_dev, "failed to create MPT, ret = %d.\n", ret);
385 goto free_cmd_mbox;
386 }
387
388 mr->enabled = 1;
389 if (flags & IB_MR_REREG_ACCESS)
390 mr->access = mr_access_flags;
391
392 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
393
394 return NULL;
395
396free_cmd_mbox:
397 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
398
399 return ERR_PTR(ret);
400}
401
402int hns_roce_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
403{
404 struct hns_roce_dev *hr_dev = to_hr_dev(ibmr->device);
405 struct hns_roce_mr *mr = to_hr_mr(ibmr);
406 int ret = 0;
407
408 if (hr_dev->hw->dereg_mr) {
409 ret = hr_dev->hw->dereg_mr(hr_dev, mr, udata);
410 } else {
411 hns_roce_mr_free(hr_dev, mr);
412 kfree(mr);
413 }
414
415 return ret;
416}
417
418struct ib_mr *hns_roce_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
419 u32 max_num_sg)
420{
421 struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
422 struct device *dev = hr_dev->dev;
423 struct hns_roce_mr *mr;
424 u64 length;
425 int ret;
426
427 if (mr_type != IB_MR_TYPE_MEM_REG)
428 return ERR_PTR(-EINVAL);
429
430 if (max_num_sg > HNS_ROCE_FRMR_MAX_PA) {
431 dev_err(dev, "max_num_sg larger than %d\n",
432 HNS_ROCE_FRMR_MAX_PA);
433 return ERR_PTR(-EINVAL);
434 }
435
436 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
437 if (!mr)
438 return ERR_PTR(-ENOMEM);
439
440 mr->type = MR_TYPE_FRMR;
441
442
443 length = max_num_sg * (1 << PAGE_SHIFT);
444 ret = alloc_mr_key(hr_dev, mr, to_hr_pd(pd)->pdn, 0, length, 0);
445 if (ret)
446 goto err_free;
447
448 ret = alloc_mr_pbl(hr_dev, mr, length, NULL, 0, 0);
449 if (ret)
450 goto err_key;
451
452 ret = hns_roce_mr_enable(hr_dev, mr);
453 if (ret)
454 goto err_pbl;
455
456 mr->ibmr.rkey = mr->ibmr.lkey = mr->key;
457 mr->ibmr.length = length;
458
459 return &mr->ibmr;
460
461err_key:
462 free_mr_key(hr_dev, mr);
463err_pbl:
464 free_mr_pbl(hr_dev, mr);
465err_free:
466 kfree(mr);
467 return ERR_PTR(ret);
468}
469
470static int hns_roce_set_page(struct ib_mr *ibmr, u64 addr)
471{
472 struct hns_roce_mr *mr = to_hr_mr(ibmr);
473
474 if (likely(mr->npages < mr->pbl_mtr.hem_cfg.buf_pg_count)) {
475 mr->page_list[mr->npages++] = addr;
476 return 0;
477 }
478
479 return -ENOBUFS;
480}
481
482int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
483 unsigned int *sg_offset)
484{
485 struct hns_roce_dev *hr_dev = to_hr_dev(ibmr->device);
486 struct ib_device *ibdev = &hr_dev->ib_dev;
487 struct hns_roce_mr *mr = to_hr_mr(ibmr);
488 struct hns_roce_mtr *mtr = &mr->pbl_mtr;
489 int ret = 0;
490
491 mr->npages = 0;
492 mr->page_list = kvcalloc(mr->pbl_mtr.hem_cfg.buf_pg_count,
493 sizeof(dma_addr_t), GFP_KERNEL);
494 if (!mr->page_list)
495 return ret;
496
497 ret = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, hns_roce_set_page);
498 if (ret < 1) {
499 ibdev_err(ibdev, "failed to store sg pages %u %u, cnt = %d.\n",
500 mr->npages, mr->pbl_mtr.hem_cfg.buf_pg_count, ret);
501 goto err_page_list;
502 }
503
504 mtr->hem_cfg.region[0].offset = 0;
505 mtr->hem_cfg.region[0].count = mr->npages;
506 mtr->hem_cfg.region[0].hopnum = mr->pbl_hop_num;
507 mtr->hem_cfg.region_count = 1;
508 ret = hns_roce_mtr_map(hr_dev, mtr, mr->page_list, mr->npages);
509 if (ret) {
510 ibdev_err(ibdev, "failed to map sg mtr, ret = %d.\n", ret);
511 ret = 0;
512 } else {
513 mr->pbl_mtr.hem_cfg.buf_pg_shift = (u32)ilog2(ibmr->page_size);
514 ret = mr->npages;
515 }
516
517err_page_list:
518 kvfree(mr->page_list);
519 mr->page_list = NULL;
520
521 return ret;
522}
523
524static void hns_roce_mw_free(struct hns_roce_dev *hr_dev,
525 struct hns_roce_mw *mw)
526{
527 struct device *dev = hr_dev->dev;
528 int ret;
529
530 if (mw->enabled) {
531 ret = hns_roce_hw_destroy_mpt(hr_dev, NULL,
532 key_to_hw_index(mw->rkey) &
533 (hr_dev->caps.num_mtpts - 1));
534 if (ret)
535 dev_warn(dev, "MW DESTROY_MPT failed (%d)\n", ret);
536
537 hns_roce_table_put(hr_dev, &hr_dev->mr_table.mtpt_table,
538 key_to_hw_index(mw->rkey));
539 }
540
541 hns_roce_bitmap_free(&hr_dev->mr_table.mtpt_bitmap,
542 key_to_hw_index(mw->rkey), BITMAP_NO_RR);
543}
544
545static int hns_roce_mw_enable(struct hns_roce_dev *hr_dev,
546 struct hns_roce_mw *mw)
547{
548 struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
549 struct hns_roce_cmd_mailbox *mailbox;
550 struct device *dev = hr_dev->dev;
551 unsigned long mtpt_idx = key_to_hw_index(mw->rkey);
552 int ret;
553
554
555 ret = hns_roce_table_get(hr_dev, &mr_table->mtpt_table, mtpt_idx);
556 if (ret)
557 return ret;
558
559 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
560 if (IS_ERR(mailbox)) {
561 ret = PTR_ERR(mailbox);
562 goto err_table;
563 }
564
565 ret = hr_dev->hw->mw_write_mtpt(mailbox->buf, mw);
566 if (ret) {
567 dev_err(dev, "MW write mtpt fail!\n");
568 goto err_page;
569 }
570
571 ret = hns_roce_hw_create_mpt(hr_dev, mailbox,
572 mtpt_idx & (hr_dev->caps.num_mtpts - 1));
573 if (ret) {
574 dev_err(dev, "MW CREATE_MPT failed (%d)\n", ret);
575 goto err_page;
576 }
577
578 mw->enabled = 1;
579
580 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
581
582 return 0;
583
584err_page:
585 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
586
587err_table:
588 hns_roce_table_put(hr_dev, &mr_table->mtpt_table, mtpt_idx);
589
590 return ret;
591}
592
593int hns_roce_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata)
594{
595 struct hns_roce_dev *hr_dev = to_hr_dev(ibmw->device);
596 struct hns_roce_mw *mw = to_hr_mw(ibmw);
597 unsigned long index = 0;
598 int ret;
599
600
601 ret = hns_roce_bitmap_alloc(&hr_dev->mr_table.mtpt_bitmap, &index);
602 if (ret)
603 return ret;
604
605 mw->rkey = hw_index_to_key(index);
606
607 ibmw->rkey = mw->rkey;
608 mw->pdn = to_hr_pd(ibmw->pd)->pdn;
609 mw->pbl_hop_num = hr_dev->caps.pbl_hop_num;
610 mw->pbl_ba_pg_sz = hr_dev->caps.pbl_ba_pg_sz;
611 mw->pbl_buf_pg_sz = hr_dev->caps.pbl_buf_pg_sz;
612
613 ret = hns_roce_mw_enable(hr_dev, mw);
614 if (ret)
615 goto err_mw;
616
617 return 0;
618
619err_mw:
620 hns_roce_mw_free(hr_dev, mw);
621 return ret;
622}
623
624int hns_roce_dealloc_mw(struct ib_mw *ibmw)
625{
626 struct hns_roce_dev *hr_dev = to_hr_dev(ibmw->device);
627 struct hns_roce_mw *mw = to_hr_mw(ibmw);
628
629 hns_roce_mw_free(hr_dev, mw);
630 return 0;
631}
632
633static int mtr_map_region(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
634 dma_addr_t *pages, struct hns_roce_buf_region *region)
635{
636 __le64 *mtts;
637 int offset;
638 int count;
639 int npage;
640 u64 addr;
641 int end;
642 int i;
643
644
645 if (!region->hopnum)
646 return 0;
647
648 offset = region->offset;
649 end = offset + region->count;
650 npage = 0;
651 while (offset < end) {
652 mtts = hns_roce_hem_list_find_mtt(hr_dev, &mtr->hem_list,
653 offset, &count, NULL);
654 if (!mtts)
655 return -ENOBUFS;
656
657 for (i = 0; i < count; i++) {
658 if (hr_dev->hw_rev == HNS_ROCE_HW_VER1)
659 addr = to_hr_hw_page_addr(pages[npage]);
660 else
661 addr = pages[npage];
662
663 mtts[i] = cpu_to_le64(addr);
664 npage++;
665 }
666 offset += count;
667 }
668
669 return 0;
670}
671
672static inline bool mtr_has_mtt(struct hns_roce_buf_attr *attr)
673{
674 int i;
675
676 for (i = 0; i < attr->region_count; i++)
677 if (attr->region[i].hopnum != HNS_ROCE_HOP_NUM_0 &&
678 attr->region[i].hopnum > 0)
679 return true;
680
681
682
683
684
685 return false;
686}
687
688static inline size_t mtr_bufs_size(struct hns_roce_buf_attr *attr)
689{
690 size_t size = 0;
691 int i;
692
693 for (i = 0; i < attr->region_count; i++)
694 size += attr->region[i].size;
695
696 return size;
697}
698
699
700
701
702
703static inline int mtr_check_direct_pages(dma_addr_t *pages, int page_count,
704 unsigned int page_shift)
705{
706 size_t page_size = 1 << page_shift;
707 int i;
708
709 for (i = 1; i < page_count; i++)
710 if (pages[i] - pages[i - 1] != page_size)
711 return i;
712
713 return 0;
714}
715
716static void mtr_free_bufs(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr)
717{
718
719 if (mtr->umem) {
720 ib_umem_release(mtr->umem);
721 mtr->umem = NULL;
722 }
723
724
725 if (mtr->kmem) {
726 hns_roce_buf_free(hr_dev, mtr->kmem);
727 mtr->kmem = NULL;
728 }
729}
730
731static int mtr_alloc_bufs(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
732 struct hns_roce_buf_attr *buf_attr, bool is_direct,
733 struct ib_udata *udata, unsigned long user_addr)
734{
735 struct ib_device *ibdev = &hr_dev->ib_dev;
736 unsigned int best_pg_shift;
737 int all_pg_count = 0;
738 size_t total_size;
739 int ret;
740
741 total_size = mtr_bufs_size(buf_attr);
742 if (total_size < 1) {
743 ibdev_err(ibdev, "failed to check mtr size\n.");
744 return -EINVAL;
745 }
746
747 if (udata) {
748 unsigned long pgsz_bitmap;
749 unsigned long page_size;
750
751 mtr->kmem = NULL;
752 mtr->umem = ib_umem_get(ibdev, user_addr, total_size,
753 buf_attr->user_access);
754 if (IS_ERR_OR_NULL(mtr->umem)) {
755 ibdev_err(ibdev, "failed to get umem, ret = %ld.\n",
756 PTR_ERR(mtr->umem));
757 return -ENOMEM;
758 }
759 if (buf_attr->fixed_page)
760 pgsz_bitmap = 1 << buf_attr->page_shift;
761 else
762 pgsz_bitmap = GENMASK(buf_attr->page_shift, PAGE_SHIFT);
763
764 page_size = ib_umem_find_best_pgsz(mtr->umem, pgsz_bitmap,
765 user_addr);
766 if (!page_size)
767 return -EINVAL;
768 best_pg_shift = order_base_2(page_size);
769 all_pg_count = ib_umem_num_dma_blocks(mtr->umem, page_size);
770 ret = 0;
771 } else {
772 mtr->umem = NULL;
773 mtr->kmem =
774 hns_roce_buf_alloc(hr_dev, total_size,
775 buf_attr->page_shift,
776 is_direct ? HNS_ROCE_BUF_DIRECT : 0);
777 if (IS_ERR(mtr->kmem)) {
778 ibdev_err(ibdev, "failed to alloc kmem, ret = %ld.\n",
779 PTR_ERR(mtr->kmem));
780 return PTR_ERR(mtr->kmem);
781 }
782
783 best_pg_shift = buf_attr->page_shift;
784 all_pg_count = mtr->kmem->npages;
785 }
786
787
788 if (best_pg_shift < HNS_HW_PAGE_SHIFT || all_pg_count < 1) {
789 ret = -EINVAL;
790 ibdev_err(ibdev,
791 "failed to check mtr, page shift = %u count = %d.\n",
792 best_pg_shift, all_pg_count);
793 goto err_alloc_mem;
794 }
795
796 mtr->hem_cfg.buf_pg_shift = best_pg_shift;
797 mtr->hem_cfg.buf_pg_count = all_pg_count;
798
799 return 0;
800err_alloc_mem:
801 mtr_free_bufs(hr_dev, mtr);
802 return ret;
803}
804
805static int mtr_get_pages(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
806 dma_addr_t *pages, int count, unsigned int page_shift)
807{
808 struct ib_device *ibdev = &hr_dev->ib_dev;
809 int npage;
810 int err;
811
812 if (mtr->umem)
813 npage = hns_roce_get_umem_bufs(hr_dev, pages, count, 0,
814 mtr->umem, page_shift);
815 else
816 npage = hns_roce_get_kmem_bufs(hr_dev, pages, count, 0,
817 mtr->kmem);
818
819 if (mtr->hem_cfg.is_direct && npage > 1) {
820 err = mtr_check_direct_pages(pages, npage, page_shift);
821 if (err) {
822 ibdev_err(ibdev, "Failed to check %s direct page-%d\n",
823 mtr->umem ? "user" : "kernel", err);
824 npage = err;
825 }
826 }
827
828 return npage;
829}
830
831int hns_roce_mtr_map(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
832 dma_addr_t *pages, unsigned int page_cnt)
833{
834 struct ib_device *ibdev = &hr_dev->ib_dev;
835 struct hns_roce_buf_region *r;
836 unsigned int i;
837 int err;
838
839
840
841
842
843 if (mtr->hem_cfg.is_direct) {
844 mtr->hem_cfg.root_ba = pages[0];
845 return 0;
846 }
847
848 for (i = 0; i < mtr->hem_cfg.region_count; i++) {
849 r = &mtr->hem_cfg.region[i];
850 if (r->offset + r->count > page_cnt) {
851 err = -EINVAL;
852 ibdev_err(ibdev,
853 "failed to check mtr%u end %u + %u, max %u.\n",
854 i, r->offset, r->count, page_cnt);
855 return err;
856 }
857
858 err = mtr_map_region(hr_dev, mtr, &pages[r->offset], r);
859 if (err) {
860 ibdev_err(ibdev,
861 "failed to map mtr%u offset %u, ret = %d.\n",
862 i, r->offset, err);
863 return err;
864 }
865 }
866
867 return 0;
868}
869
870int hns_roce_mtr_find(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
871 int offset, u64 *mtt_buf, int mtt_max, u64 *base_addr)
872{
873 struct hns_roce_hem_cfg *cfg = &mtr->hem_cfg;
874 int mtt_count, left;
875 int start_index;
876 int total = 0;
877 __le64 *mtts;
878 u32 npage;
879 u64 addr;
880
881 if (!mtt_buf || mtt_max < 1)
882 goto done;
883
884
885 if (cfg->is_direct) {
886 start_index = offset >> HNS_HW_PAGE_SHIFT;
887 for (mtt_count = 0; mtt_count < cfg->region_count &&
888 total < mtt_max; mtt_count++) {
889 npage = cfg->region[mtt_count].offset;
890 if (npage < start_index)
891 continue;
892
893 addr = cfg->root_ba + (npage << HNS_HW_PAGE_SHIFT);
894 if (hr_dev->hw_rev == HNS_ROCE_HW_VER1)
895 mtt_buf[total] = to_hr_hw_page_addr(addr);
896 else
897 mtt_buf[total] = addr;
898
899 total++;
900 }
901
902 goto done;
903 }
904
905 start_index = offset >> cfg->buf_pg_shift;
906 left = mtt_max;
907 while (left > 0) {
908 mtt_count = 0;
909 mtts = hns_roce_hem_list_find_mtt(hr_dev, &mtr->hem_list,
910 start_index + total,
911 &mtt_count, NULL);
912 if (!mtts || !mtt_count)
913 goto done;
914
915 npage = min(mtt_count, left);
916 left -= npage;
917 for (mtt_count = 0; mtt_count < npage; mtt_count++)
918 mtt_buf[total++] = le64_to_cpu(mtts[mtt_count]);
919 }
920
921done:
922 if (base_addr)
923 *base_addr = cfg->root_ba;
924
925 return total;
926}
927
928static int mtr_init_buf_cfg(struct hns_roce_dev *hr_dev,
929 struct hns_roce_buf_attr *attr,
930 struct hns_roce_hem_cfg *cfg,
931 unsigned int *buf_page_shift)
932{
933 struct hns_roce_buf_region *r;
934 unsigned int page_shift;
935 int page_cnt = 0;
936 size_t buf_size;
937 int region_cnt;
938
939 if (cfg->is_direct) {
940 buf_size = cfg->buf_pg_count << cfg->buf_pg_shift;
941 page_cnt = DIV_ROUND_UP(buf_size, HNS_HW_PAGE_SIZE);
942
943
944
945
946 cfg->buf_pg_shift = HNS_HW_PAGE_SHIFT + order_base_2(page_cnt);
947 if (attr->region_count > 1) {
948 cfg->buf_pg_count = page_cnt;
949 page_shift = HNS_HW_PAGE_SHIFT;
950 } else {
951 cfg->buf_pg_count = 1;
952 page_shift = cfg->buf_pg_shift;
953 if (buf_size != 1 << page_shift) {
954 ibdev_err(&hr_dev->ib_dev,
955 "failed to check direct size %zu shift %d.\n",
956 buf_size, page_shift);
957 return -EINVAL;
958 }
959 }
960 } else {
961 page_shift = cfg->buf_pg_shift;
962 }
963
964
965 for (page_cnt = 0, region_cnt = 0; page_cnt < cfg->buf_pg_count &&
966 region_cnt < attr->region_count &&
967 region_cnt < ARRAY_SIZE(cfg->region); region_cnt++) {
968 r = &cfg->region[region_cnt];
969 r->offset = page_cnt;
970 buf_size = hr_hw_page_align(attr->region[region_cnt].size);
971 r->count = DIV_ROUND_UP(buf_size, 1 << page_shift);
972 page_cnt += r->count;
973 r->hopnum = to_hr_hem_hopnum(attr->region[region_cnt].hopnum,
974 r->count);
975 }
976
977 if (region_cnt < 1) {
978 ibdev_err(&hr_dev->ib_dev,
979 "failed to check mtr region count, pages = %d.\n",
980 cfg->buf_pg_count);
981 return -ENOBUFS;
982 }
983
984 cfg->region_count = region_cnt;
985 *buf_page_shift = page_shift;
986
987 return page_cnt;
988}
989
990
991
992
993
994
995
996
997
998
999int hns_roce_mtr_create(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
1000 struct hns_roce_buf_attr *buf_attr,
1001 unsigned int ba_page_shift, struct ib_udata *udata,
1002 unsigned long user_addr)
1003{
1004 struct hns_roce_hem_cfg *cfg = &mtr->hem_cfg;
1005 struct ib_device *ibdev = &hr_dev->ib_dev;
1006 unsigned int buf_page_shift = 0;
1007 dma_addr_t *pages = NULL;
1008 int all_pg_cnt;
1009 int get_pg_cnt;
1010 int ret = 0;
1011
1012
1013 cfg->is_direct = !mtr_has_mtt(buf_attr);
1014
1015
1016 if (buf_attr->mtt_only) {
1017 cfg->buf_pg_shift = buf_attr->page_shift;
1018 cfg->buf_pg_count = mtr_bufs_size(buf_attr) >>
1019 buf_attr->page_shift;
1020 mtr->umem = NULL;
1021 mtr->kmem = NULL;
1022 } else {
1023 ret = mtr_alloc_bufs(hr_dev, mtr, buf_attr, cfg->is_direct,
1024 udata, user_addr);
1025 if (ret) {
1026 ibdev_err(ibdev,
1027 "failed to alloc mtr bufs, ret = %d.\n", ret);
1028 return ret;
1029 }
1030 }
1031
1032 all_pg_cnt = mtr_init_buf_cfg(hr_dev, buf_attr, cfg, &buf_page_shift);
1033 if (all_pg_cnt < 1) {
1034 ret = -ENOBUFS;
1035 ibdev_err(ibdev, "failed to init mtr buf cfg.\n");
1036 goto err_alloc_bufs;
1037 }
1038
1039 hns_roce_hem_list_init(&mtr->hem_list);
1040 if (!cfg->is_direct) {
1041 ret = hns_roce_hem_list_request(hr_dev, &mtr->hem_list,
1042 cfg->region, cfg->region_count,
1043 ba_page_shift);
1044 if (ret) {
1045 ibdev_err(ibdev, "failed to request mtr hem, ret = %d.\n",
1046 ret);
1047 goto err_alloc_bufs;
1048 }
1049 cfg->root_ba = mtr->hem_list.root_ba;
1050 cfg->ba_pg_shift = ba_page_shift;
1051 } else {
1052 cfg->ba_pg_shift = cfg->buf_pg_shift;
1053 }
1054
1055
1056 if (buf_attr->mtt_only)
1057 return 0;
1058
1059
1060 pages = kvcalloc(all_pg_cnt, sizeof(dma_addr_t), GFP_KERNEL);
1061 if (!pages) {
1062 ret = -ENOMEM;
1063 ibdev_err(ibdev, "failed to alloc mtr page list %d.\n",
1064 all_pg_cnt);
1065 goto err_alloc_hem_list;
1066 }
1067
1068 get_pg_cnt = mtr_get_pages(hr_dev, mtr, pages, all_pg_cnt,
1069 buf_page_shift);
1070 if (get_pg_cnt != all_pg_cnt) {
1071 ibdev_err(ibdev, "failed to get mtr page %d != %d.\n",
1072 get_pg_cnt, all_pg_cnt);
1073 ret = -ENOBUFS;
1074 goto err_alloc_page_list;
1075 }
1076
1077
1078 ret = hns_roce_mtr_map(hr_dev, mtr, pages, all_pg_cnt);
1079 if (ret) {
1080 ibdev_err(ibdev, "failed to map mtr pages, ret = %d.\n", ret);
1081 goto err_alloc_page_list;
1082 }
1083
1084
1085 kvfree(pages);
1086 return 0;
1087err_alloc_page_list:
1088 kvfree(pages);
1089err_alloc_hem_list:
1090 hns_roce_hem_list_release(hr_dev, &mtr->hem_list);
1091err_alloc_bufs:
1092 mtr_free_bufs(hr_dev, mtr);
1093 return ret;
1094}
1095
1096void hns_roce_mtr_destroy(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr)
1097{
1098
1099 hns_roce_hem_list_release(hr_dev, &mtr->hem_list);
1100
1101
1102 mtr_free_bufs(hr_dev, mtr);
1103}
1104