1
2
3
4
5
6
7
8
9
10
11#include <errno.h>
12#include <inttypes.h>
13#include <stddef.h>
14#include <stdint.h>
15#include <string.h>
16
17
18#ifdef PEDANTIC
19#pragma GCC diagnostic ignored "-Wpedantic"
20#endif
21#include <infiniband/verbs.h>
22#ifdef PEDANTIC
23#pragma GCC diagnostic error "-Wpedantic"
24#endif
25
26#include <rte_branch_prediction.h>
27#include <rte_common.h>
28#include <rte_eal_memconfig.h>
29#include <rte_errno.h>
30#include <rte_malloc.h>
31#include <rte_memory.h>
32#include <rte_mempool.h>
33#include <rte_rwlock.h>
34
35#include "mlx4_glue.h"
36#include "mlx4_mr.h"
37#include "mlx4_rxtx.h"
38#include "mlx4_utils.h"
39
40struct mr_find_contig_memsegs_data {
41 uintptr_t addr;
42 uintptr_t start;
43 uintptr_t end;
44 const struct rte_memseg_list *msl;
45};
46
47struct mr_update_mp_data {
48 struct rte_eth_dev *dev;
49 struct mlx4_mr_ctrl *mr_ctrl;
50 int ret;
51};
52
53
54
55
56
57
58
59
60
61
62
63
64
65static int
66mr_btree_expand(struct mlx4_mr_btree *bt, int n)
67{
68 void *mem;
69 int ret = 0;
70
71 if (n <= bt->size)
72 return ret;
73
74
75
76
77
78
79
80 mem = rte_realloc(bt->table, n * sizeof(struct mlx4_mr_cache), 0);
81 if (mem == NULL) {
82
83 WARN("failed to expand MR B-tree (%p) table", (void *)bt);
84 ret = -1;
85 } else {
86 DEBUG("expanded MR B-tree table (size=%u)", n);
87 bt->table = mem;
88 bt->size = n;
89 }
90 return ret;
91}
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108static uint32_t
109mr_btree_lookup(struct mlx4_mr_btree *bt, uint16_t *idx, uintptr_t addr)
110{
111 struct mlx4_mr_cache *lkp_tbl;
112 uint16_t n;
113 uint16_t base = 0;
114
115 MLX4_ASSERT(bt != NULL);
116 lkp_tbl = *bt->table;
117 n = bt->len;
118
119 MLX4_ASSERT(bt->len > 0 || (lkp_tbl[0].start == 0 &&
120 lkp_tbl[0].lkey == UINT32_MAX));
121
122 do {
123 register uint16_t delta = n >> 1;
124
125 if (addr < lkp_tbl[base + delta].start) {
126 n = delta;
127 } else {
128 base += delta;
129 n -= delta;
130 }
131 } while (n > 1);
132 MLX4_ASSERT(addr >= lkp_tbl[base].start);
133 *idx = base;
134 if (addr < lkp_tbl[base].end)
135 return lkp_tbl[base].lkey;
136
137 return UINT32_MAX;
138}
139
140
141
142
143
144
145
146
147
148
149
150
151static int
152mr_btree_insert(struct mlx4_mr_btree *bt, struct mlx4_mr_cache *entry)
153{
154 struct mlx4_mr_cache *lkp_tbl;
155 uint16_t idx = 0;
156 size_t shift;
157
158 MLX4_ASSERT(bt != NULL);
159 MLX4_ASSERT(bt->len <= bt->size);
160 MLX4_ASSERT(bt->len > 0);
161 lkp_tbl = *bt->table;
162
163 if (mr_btree_lookup(bt, &idx, entry->start) != UINT32_MAX) {
164 DEBUG("abort insertion to B-tree(%p): already exist at"
165 " idx=%u [0x%" PRIxPTR ", 0x%" PRIxPTR ") lkey=0x%x",
166 (void *)bt, idx, entry->start, entry->end, entry->lkey);
167
168 return 0;
169 }
170
171 if (unlikely(bt->len == bt->size)) {
172 bt->overflow = 1;
173 return -1;
174 }
175
176 ++idx;
177 shift = (bt->len - idx) * sizeof(struct mlx4_mr_cache);
178 if (shift)
179 memmove(&lkp_tbl[idx + 1], &lkp_tbl[idx], shift);
180 lkp_tbl[idx] = *entry;
181 bt->len++;
182 DEBUG("inserted B-tree(%p)[%u],"
183 " [0x%" PRIxPTR ", 0x%" PRIxPTR ") lkey=0x%x",
184 (void *)bt, idx, entry->start, entry->end, entry->lkey);
185 return 0;
186}
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201int
202mlx4_mr_btree_init(struct mlx4_mr_btree *bt, int n, int socket)
203{
204 if (bt == NULL) {
205 rte_errno = EINVAL;
206 return -rte_errno;
207 }
208 memset(bt, 0, sizeof(*bt));
209 bt->table = rte_calloc_socket("B-tree table",
210 n, sizeof(struct mlx4_mr_cache),
211 0, socket);
212 if (bt->table == NULL) {
213 rte_errno = ENOMEM;
214 ERROR("failed to allocate memory for btree cache on socket %d",
215 socket);
216 return -rte_errno;
217 }
218 bt->size = n;
219
220 (*bt->table)[bt->len++] = (struct mlx4_mr_cache) {
221 .lkey = UINT32_MAX,
222 };
223 DEBUG("initialized B-tree %p with table %p",
224 (void *)bt, (void *)bt->table);
225 return 0;
226}
227
228
229
230
231
232
233
234void
235mlx4_mr_btree_free(struct mlx4_mr_btree *bt)
236{
237 if (bt == NULL)
238 return;
239 DEBUG("freeing B-tree %p with table %p", (void *)bt, (void *)bt->table);
240 rte_free(bt->table);
241 memset(bt, 0, sizeof(*bt));
242}
243
244#ifdef RTE_LIBRTE_MLX4_DEBUG
245
246
247
248
249
250
251void
252mlx4_mr_btree_dump(struct mlx4_mr_btree *bt)
253{
254 int idx;
255 struct mlx4_mr_cache *lkp_tbl;
256
257 if (bt == NULL)
258 return;
259 lkp_tbl = *bt->table;
260 for (idx = 0; idx < bt->len; ++idx) {
261 struct mlx4_mr_cache *entry = &lkp_tbl[idx];
262
263 DEBUG("B-tree(%p)[%u],"
264 " [0x%" PRIxPTR ", 0x%" PRIxPTR ") lkey=0x%x",
265 (void *)bt, idx, entry->start, entry->end, entry->lkey);
266 }
267}
268#endif
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284static int
285mr_find_next_chunk(struct mlx4_mr *mr, struct mlx4_mr_cache *entry,
286 int base_idx)
287{
288 uintptr_t start = 0;
289 uintptr_t end = 0;
290 uint32_t idx = 0;
291
292
293 if (mr->msl == NULL) {
294 struct ibv_mr *ibv_mr = mr->ibv_mr;
295
296 MLX4_ASSERT(mr->ms_bmp_n == 1);
297 MLX4_ASSERT(mr->ms_n == 1);
298 MLX4_ASSERT(base_idx == 0);
299
300
301
302
303 entry->start = (uintptr_t)ibv_mr->addr;
304 entry->end = (uintptr_t)ibv_mr->addr + mr->ibv_mr->length;
305 entry->lkey = rte_cpu_to_be_32(mr->ibv_mr->lkey);
306
307 return 1;
308 }
309 for (idx = base_idx; idx < mr->ms_bmp_n; ++idx) {
310 if (rte_bitmap_get(mr->ms_bmp, idx)) {
311 const struct rte_memseg_list *msl;
312 const struct rte_memseg *ms;
313
314 msl = mr->msl;
315 ms = rte_fbarray_get(&msl->memseg_arr,
316 mr->ms_base_idx + idx);
317 MLX4_ASSERT(msl->page_sz == ms->hugepage_sz);
318 if (!start)
319 start = ms->addr_64;
320 end = ms->addr_64 + ms->hugepage_sz;
321 } else if (start) {
322
323 break;
324 }
325 }
326 if (start) {
327
328 entry->start = start;
329 entry->end = end;
330 entry->lkey = rte_cpu_to_be_32(mr->ibv_mr->lkey);
331 }
332 return idx;
333}
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348static int
349mr_insert_dev_cache(struct rte_eth_dev *dev, struct mlx4_mr *mr)
350{
351 struct mlx4_priv *priv = dev->data->dev_private;
352 unsigned int n;
353
354 DEBUG("port %u inserting MR(%p) to global cache",
355 dev->data->port_id, (void *)mr);
356 for (n = 0; n < mr->ms_bmp_n; ) {
357 struct mlx4_mr_cache entry;
358
359 memset(&entry, 0, sizeof(entry));
360
361 n = mr_find_next_chunk(mr, &entry, n);
362 if (!entry.end)
363 break;
364 if (mr_btree_insert(&priv->mr.cache, &entry) < 0) {
365
366
367
368
369 return -1;
370 }
371 }
372 return 0;
373}
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388static struct mlx4_mr *
389mr_lookup_dev_list(struct rte_eth_dev *dev, struct mlx4_mr_cache *entry,
390 uintptr_t addr)
391{
392 struct mlx4_priv *priv = dev->data->dev_private;
393 struct mlx4_mr *mr;
394
395
396 LIST_FOREACH(mr, &priv->mr.mr_list, mr) {
397 unsigned int n;
398
399 if (mr->ms_n == 0)
400 continue;
401 for (n = 0; n < mr->ms_bmp_n; ) {
402 struct mlx4_mr_cache ret;
403
404 memset(&ret, 0, sizeof(ret));
405 n = mr_find_next_chunk(mr, &ret, n);
406 if (addr >= ret.start && addr < ret.end) {
407
408 *entry = ret;
409 return mr;
410 }
411 }
412 }
413 return NULL;
414}
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429static uint32_t
430mr_lookup_dev(struct rte_eth_dev *dev, struct mlx4_mr_cache *entry,
431 uintptr_t addr)
432{
433 struct mlx4_priv *priv = dev->data->dev_private;
434 uint16_t idx;
435 uint32_t lkey = UINT32_MAX;
436 struct mlx4_mr *mr;
437
438
439
440
441
442
443
444 if (!unlikely(priv->mr.cache.overflow)) {
445 lkey = mr_btree_lookup(&priv->mr.cache, &idx, addr);
446 if (lkey != UINT32_MAX)
447 *entry = (*priv->mr.cache.table)[idx];
448 } else {
449
450 mr = mr_lookup_dev_list(dev, entry, addr);
451 if (mr != NULL)
452 lkey = entry->lkey;
453 }
454 MLX4_ASSERT(lkey == UINT32_MAX || (addr >= entry->start &&
455 addr < entry->end));
456 return lkey;
457}
458
459
460
461
462
463
464
465
466static void
467mr_free(struct mlx4_mr *mr)
468{
469 if (mr == NULL)
470 return;
471 DEBUG("freeing MR(%p):", (void *)mr);
472 if (mr->ibv_mr != NULL)
473 claim_zero(mlx4_glue->dereg_mr(mr->ibv_mr));
474 if (mr->ms_bmp != NULL)
475 rte_bitmap_free(mr->ms_bmp);
476 rte_free(mr);
477}
478
479
480
481
482
483
484
485static void
486mlx4_mr_garbage_collect(struct rte_eth_dev *dev)
487{
488 struct mlx4_priv *priv = dev->data->dev_private;
489 struct mlx4_mr *mr_next;
490 struct mlx4_mr_list free_list = LIST_HEAD_INITIALIZER(free_list);
491
492
493 MLX4_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
494
495
496
497
498 rte_rwlock_write_lock(&priv->mr.rwlock);
499
500 free_list = priv->mr.mr_free_list;
501 LIST_INIT(&priv->mr.mr_free_list);
502 rte_rwlock_write_unlock(&priv->mr.rwlock);
503
504 mr_next = LIST_FIRST(&free_list);
505 while (mr_next != NULL) {
506 struct mlx4_mr *mr = mr_next;
507
508 mr_next = LIST_NEXT(mr, mr);
509 mr_free(mr);
510 }
511}
512
513
514static int
515mr_find_contig_memsegs_cb(const struct rte_memseg_list *msl,
516 const struct rte_memseg *ms, size_t len, void *arg)
517{
518 struct mr_find_contig_memsegs_data *data = arg;
519
520 if (data->addr < ms->addr_64 || data->addr >= ms->addr_64 + len)
521 return 0;
522
523 data->start = ms->addr_64;
524 data->end = ms->addr_64 + len;
525 data->msl = msl;
526 return 1;
527}
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547static uint32_t
548mlx4_mr_create_secondary(struct rte_eth_dev *dev, struct mlx4_mr_cache *entry,
549 uintptr_t addr)
550{
551 struct mlx4_priv *priv = dev->data->dev_private;
552 int ret;
553
554 DEBUG("port %u requesting MR creation for address (%p)",
555 dev->data->port_id, (void *)addr);
556 ret = mlx4_mp_req_mr_create(dev, addr);
557 if (ret) {
558 DEBUG("port %u fail to request MR creation for address (%p)",
559 dev->data->port_id, (void *)addr);
560 return UINT32_MAX;
561 }
562 rte_rwlock_read_lock(&priv->mr.rwlock);
563
564 mr_lookup_dev(dev, entry, addr);
565
566 MLX4_ASSERT(entry->lkey != UINT32_MAX);
567 rte_rwlock_read_unlock(&priv->mr.rwlock);
568 DEBUG("port %u MR CREATED by primary process for %p:\n"
569 " [0x%" PRIxPTR ", 0x%" PRIxPTR "), lkey=0x%x",
570 dev->data->port_id, (void *)addr,
571 entry->start, entry->end, entry->lkey);
572 return entry->lkey;
573}
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591uint32_t
592mlx4_mr_create_primary(struct rte_eth_dev *dev, struct mlx4_mr_cache *entry,
593 uintptr_t addr)
594{
595 struct mlx4_priv *priv = dev->data->dev_private;
596 const struct rte_memseg_list *msl;
597 const struct rte_memseg *ms;
598 struct mlx4_mr *mr = NULL;
599 size_t len;
600 uint32_t ms_n;
601 uint32_t bmp_size;
602 void *bmp_mem;
603 int ms_idx_shift = -1;
604 unsigned int n;
605 struct mr_find_contig_memsegs_data data = {
606 .addr = addr,
607 };
608 struct mr_find_contig_memsegs_data data_re;
609
610 DEBUG("port %u creating a MR using address (%p)",
611 dev->data->port_id, (void *)addr);
612
613
614
615
616
617
618
619 mlx4_mr_garbage_collect(dev);
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634 if (!priv->mr_ext_memseg_en) {
635 data.msl = rte_mem_virt2memseg_list((void *)addr);
636 data.start = RTE_ALIGN_FLOOR(addr, data.msl->page_sz);
637 data.end = data.start + data.msl->page_sz;
638 } else if (!rte_memseg_contig_walk(mr_find_contig_memsegs_cb, &data)) {
639 WARN("port %u unable to find virtually contiguous"
640 " chunk for address (%p)."
641 " rte_memseg_contig_walk() failed.",
642 dev->data->port_id, (void *)addr);
643 rte_errno = ENXIO;
644 goto err_nolock;
645 }
646alloc_resources:
647
648 MLX4_ASSERT(rte_is_aligned((void *)data.start, data.msl->page_sz));
649 MLX4_ASSERT(rte_is_aligned((void *)data.end, data.msl->page_sz));
650 msl = data.msl;
651 ms = rte_mem_virt2memseg((void *)data.start, msl);
652 len = data.end - data.start;
653 MLX4_ASSERT(msl->page_sz == ms->hugepage_sz);
654
655 ms_n = len / msl->page_sz;
656 DEBUG("port %u extending %p to [0x%" PRIxPTR ", 0x%" PRIxPTR "),"
657 " page_sz=0x%" PRIx64 ", ms_n=%u",
658 dev->data->port_id, (void *)addr,
659 data.start, data.end, msl->page_sz, ms_n);
660
661 bmp_size = rte_bitmap_get_memory_footprint(ms_n);
662 mr = rte_zmalloc_socket(NULL,
663 RTE_ALIGN_CEIL(sizeof(*mr),
664 RTE_CACHE_LINE_SIZE) +
665 bmp_size,
666 RTE_CACHE_LINE_SIZE, msl->socket_id);
667 if (mr == NULL) {
668 WARN("port %u unable to allocate memory for a new MR of"
669 " address (%p).",
670 dev->data->port_id, (void *)addr);
671 rte_errno = ENOMEM;
672 goto err_nolock;
673 }
674 mr->msl = msl;
675
676
677
678
679
680 mr->ms_base_idx = rte_fbarray_find_idx(&msl->memseg_arr, ms);
681 bmp_mem = RTE_PTR_ALIGN_CEIL(mr + 1, RTE_CACHE_LINE_SIZE);
682 mr->ms_bmp = rte_bitmap_init(ms_n, bmp_mem, bmp_size);
683 if (mr->ms_bmp == NULL) {
684 WARN("port %u unable to initialize bitmap for a new MR of"
685 " address (%p).",
686 dev->data->port_id, (void *)addr);
687 rte_errno = EINVAL;
688 goto err_nolock;
689 }
690
691
692
693
694
695
696
697
698 rte_mcfg_mem_read_lock();
699 data_re = data;
700 if (len > msl->page_sz &&
701 !rte_memseg_contig_walk(mr_find_contig_memsegs_cb, &data_re)) {
702 WARN("port %u unable to find virtually contiguous"
703 " chunk for address (%p)."
704 " rte_memseg_contig_walk() failed.",
705 dev->data->port_id, (void *)addr);
706 rte_errno = ENXIO;
707 goto err_memlock;
708 }
709 if (data.start != data_re.start || data.end != data_re.end) {
710
711
712
713
714 data.start = RTE_ALIGN_FLOOR(addr, msl->page_sz);
715 data.end = data.start + msl->page_sz;
716 rte_mcfg_mem_read_unlock();
717 mr_free(mr);
718 goto alloc_resources;
719 }
720 MLX4_ASSERT(data.msl == data_re.msl);
721 rte_rwlock_write_lock(&priv->mr.rwlock);
722
723
724
725
726 if (mr_lookup_dev(dev, entry, addr) != UINT32_MAX) {
727
728
729
730
731
732 mr_btree_insert(&priv->mr.cache, entry);
733 DEBUG("port %u found MR for %p on final lookup, abort",
734 dev->data->port_id, (void *)addr);
735 rte_rwlock_write_unlock(&priv->mr.rwlock);
736 rte_mcfg_mem_read_unlock();
737
738
739
740
741 mr_free(mr);
742 return entry->lkey;
743 }
744
745
746
747
748
749 for (n = 0; n < ms_n; ++n) {
750 uintptr_t start;
751 struct mlx4_mr_cache ret;
752
753 memset(&ret, 0, sizeof(ret));
754 start = data_re.start + n * msl->page_sz;
755
756 if (mr_lookup_dev(dev, &ret, start) == UINT32_MAX) {
757
758
759
760
761 if (ms_idx_shift == -1) {
762 mr->ms_base_idx += n;
763 data.start = start;
764 ms_idx_shift = n;
765 }
766 data.end = start + msl->page_sz;
767 rte_bitmap_set(mr->ms_bmp, n - ms_idx_shift);
768 ++mr->ms_n;
769 }
770 }
771 len = data.end - data.start;
772 mr->ms_bmp_n = len / msl->page_sz;
773 MLX4_ASSERT(ms_idx_shift + mr->ms_bmp_n <= ms_n);
774
775
776
777
778
779
780 mr->ibv_mr = mlx4_glue->reg_mr(priv->pd, (void *)data.start, len,
781 IBV_ACCESS_LOCAL_WRITE);
782 if (mr->ibv_mr == NULL) {
783 WARN("port %u fail to create a verbs MR for address (%p)",
784 dev->data->port_id, (void *)addr);
785 rte_errno = EINVAL;
786 goto err_mrlock;
787 }
788 MLX4_ASSERT((uintptr_t)mr->ibv_mr->addr == data.start);
789 MLX4_ASSERT(mr->ibv_mr->length == len);
790 LIST_INSERT_HEAD(&priv->mr.mr_list, mr, mr);
791 DEBUG("port %u MR CREATED (%p) for %p:\n"
792 " [0x%" PRIxPTR ", 0x%" PRIxPTR "),"
793 " lkey=0x%x base_idx=%u ms_n=%u, ms_bmp_n=%u",
794 dev->data->port_id, (void *)mr, (void *)addr,
795 data.start, data.end, rte_cpu_to_be_32(mr->ibv_mr->lkey),
796 mr->ms_base_idx, mr->ms_n, mr->ms_bmp_n);
797
798 mr_insert_dev_cache(dev, mr);
799
800 mr_lookup_dev(dev, entry, addr);
801
802 MLX4_ASSERT(entry->lkey != UINT32_MAX);
803 rte_rwlock_write_unlock(&priv->mr.rwlock);
804 rte_mcfg_mem_read_unlock();
805 return entry->lkey;
806err_mrlock:
807 rte_rwlock_write_unlock(&priv->mr.rwlock);
808err_memlock:
809 rte_mcfg_mem_read_unlock();
810err_nolock:
811
812
813
814
815
816
817 mr_free(mr);
818 return UINT32_MAX;
819}
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836static uint32_t
837mlx4_mr_create(struct rte_eth_dev *dev, struct mlx4_mr_cache *entry,
838 uintptr_t addr)
839{
840 uint32_t ret = 0;
841
842 switch (rte_eal_process_type()) {
843 case RTE_PROC_PRIMARY:
844 ret = mlx4_mr_create_primary(dev, entry, addr);
845 break;
846 case RTE_PROC_SECONDARY:
847 ret = mlx4_mr_create_secondary(dev, entry, addr);
848 break;
849 default:
850 break;
851 }
852 return ret;
853}
854
855
856
857
858
859
860
861static void
862mr_rebuild_dev_cache(struct rte_eth_dev *dev)
863{
864 struct mlx4_priv *priv = dev->data->dev_private;
865 struct mlx4_mr *mr;
866
867 DEBUG("port %u rebuild dev cache[]", dev->data->port_id);
868
869 priv->mr.cache.len = 1;
870 priv->mr.cache.overflow = 0;
871
872 LIST_FOREACH(mr, &priv->mr.mr_list, mr)
873 if (mr_insert_dev_cache(dev, mr) < 0)
874 return;
875}
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893static void
894mlx4_mr_mem_event_free_cb(struct rte_eth_dev *dev, const void *addr, size_t len)
895{
896 struct mlx4_priv *priv = dev->data->dev_private;
897 const struct rte_memseg_list *msl;
898 struct mlx4_mr *mr;
899 int ms_n;
900 int i;
901 int rebuild = 0;
902
903 DEBUG("port %u free callback: addr=%p, len=%zu",
904 dev->data->port_id, addr, len);
905 msl = rte_mem_virt2memseg_list(addr);
906
907 MLX4_ASSERT((uintptr_t)addr ==
908 RTE_ALIGN((uintptr_t)addr, msl->page_sz));
909 MLX4_ASSERT(len == RTE_ALIGN(len, msl->page_sz));
910 ms_n = len / msl->page_sz;
911 rte_rwlock_write_lock(&priv->mr.rwlock);
912
913 for (i = 0; i < ms_n; ++i) {
914 const struct rte_memseg *ms;
915 struct mlx4_mr_cache entry;
916 uintptr_t start;
917 int ms_idx;
918 uint32_t pos;
919
920
921 start = (uintptr_t)addr + i * msl->page_sz;
922 mr = mr_lookup_dev_list(dev, &entry, start);
923 if (mr == NULL)
924 continue;
925 MLX4_ASSERT(mr->msl);
926 ms = rte_mem_virt2memseg((void *)start, msl);
927 MLX4_ASSERT(ms != NULL);
928 MLX4_ASSERT(msl->page_sz == ms->hugepage_sz);
929 ms_idx = rte_fbarray_find_idx(&msl->memseg_arr, ms);
930 pos = ms_idx - mr->ms_base_idx;
931 MLX4_ASSERT(rte_bitmap_get(mr->ms_bmp, pos));
932 MLX4_ASSERT(pos < mr->ms_bmp_n);
933 DEBUG("port %u MR(%p): clear bitmap[%u] for addr %p",
934 dev->data->port_id, (void *)mr, pos, (void *)start);
935 rte_bitmap_clear(mr->ms_bmp, pos);
936 if (--mr->ms_n == 0) {
937 LIST_REMOVE(mr, mr);
938 LIST_INSERT_HEAD(&priv->mr.mr_free_list, mr, mr);
939 DEBUG("port %u remove MR(%p) from list",
940 dev->data->port_id, (void *)mr);
941 }
942
943
944
945
946 rebuild = 1;
947 }
948 if (rebuild) {
949 mr_rebuild_dev_cache(dev);
950
951
952
953
954
955 ++priv->mr.dev_gen;
956 DEBUG("broadcasting local cache flush, gen=%d",
957 priv->mr.dev_gen);
958 }
959 rte_rwlock_write_unlock(&priv->mr.rwlock);
960#ifdef RTE_LIBRTE_MLX4_DEBUG
961 if (rebuild)
962 mlx4_mr_dump_dev(dev);
963#endif
964}
965
966
967
968
969
970
971
972
973
974
975
976void
977mlx4_mr_mem_event_cb(enum rte_mem_event event_type, const void *addr,
978 size_t len, void *arg __rte_unused)
979{
980 struct mlx4_priv *priv;
981 struct mlx4_dev_list *dev_list = &mlx4_shared_data->mem_event_cb_list;
982
983
984 MLX4_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
985 switch (event_type) {
986 case RTE_MEM_EVENT_FREE:
987 rte_rwlock_read_lock(&mlx4_shared_data->mem_event_rwlock);
988
989 LIST_FOREACH(priv, dev_list, mem_event_cb)
990 mlx4_mr_mem_event_free_cb(ETH_DEV(priv), addr, len);
991 rte_rwlock_read_unlock(&mlx4_shared_data->mem_event_rwlock);
992 break;
993 case RTE_MEM_EVENT_ALLOC:
994 default:
995 break;
996 }
997}
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016static uint32_t
1017mlx4_mr_lookup_dev(struct rte_eth_dev *dev, struct mlx4_mr_ctrl *mr_ctrl,
1018 struct mlx4_mr_cache *entry, uintptr_t addr)
1019{
1020 struct mlx4_priv *priv = dev->data->dev_private;
1021 struct mlx4_mr_btree *bt = &mr_ctrl->cache_bh;
1022 uint16_t idx;
1023 uint32_t lkey;
1024
1025
1026 if (unlikely(bt->len == bt->size))
1027 mr_btree_expand(bt, bt->size << 1);
1028
1029 rte_rwlock_read_lock(&priv->mr.rwlock);
1030 lkey = mr_btree_lookup(&priv->mr.cache, &idx, addr);
1031 if (lkey != UINT32_MAX) {
1032
1033 *entry = (*priv->mr.cache.table)[idx];
1034 rte_rwlock_read_unlock(&priv->mr.rwlock);
1035
1036
1037
1038
1039
1040 mr_btree_insert(bt, entry);
1041 return lkey;
1042 }
1043 rte_rwlock_read_unlock(&priv->mr.rwlock);
1044
1045 lkey = mlx4_mr_create(dev, entry, addr);
1046
1047
1048
1049
1050
1051
1052 if (lkey != UINT32_MAX)
1053 mr_btree_insert(bt, entry);
1054 return lkey;
1055}
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072static uint32_t
1073mlx4_mr_addr2mr_bh(struct rte_eth_dev *dev, struct mlx4_mr_ctrl *mr_ctrl,
1074 uintptr_t addr)
1075{
1076 uint32_t lkey;
1077 uint16_t bh_idx = 0;
1078
1079 struct mlx4_mr_cache *repl = &mr_ctrl->cache[mr_ctrl->head];
1080
1081
1082 lkey = mr_btree_lookup(&mr_ctrl->cache_bh, &bh_idx, addr);
1083
1084 if (likely(lkey != UINT32_MAX)) {
1085 *repl = (*mr_ctrl->cache_bh.table)[bh_idx];
1086 } else {
1087
1088
1089
1090
1091
1092 lkey = mlx4_mr_lookup_dev(dev, mr_ctrl, repl, addr);
1093 if (unlikely(lkey == UINT32_MAX))
1094 return UINT32_MAX;
1095 }
1096
1097 mr_ctrl->mru = mr_ctrl->head;
1098
1099 mr_ctrl->head = (mr_ctrl->head + 1) % MLX4_MR_CACHE_N;
1100 return lkey;
1101}
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114uint32_t
1115mlx4_rx_addr2mr_bh(struct rxq *rxq, uintptr_t addr)
1116{
1117 struct mlx4_mr_ctrl *mr_ctrl = &rxq->mr_ctrl;
1118 struct mlx4_priv *priv = rxq->priv;
1119
1120 return mlx4_mr_addr2mr_bh(ETH_DEV(priv), mr_ctrl, addr);
1121}
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134static uint32_t
1135mlx4_tx_addr2mr_bh(struct txq *txq, uintptr_t addr)
1136{
1137 struct mlx4_mr_ctrl *mr_ctrl = &txq->mr_ctrl;
1138 struct mlx4_priv *priv = txq->priv;
1139
1140 return mlx4_mr_addr2mr_bh(ETH_DEV(priv), mr_ctrl, addr);
1141}
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155uint32_t
1156mlx4_tx_mb2mr_bh(struct txq *txq, struct rte_mbuf *mb)
1157{
1158 uintptr_t addr = (uintptr_t)mb->buf_addr;
1159 uint32_t lkey;
1160
1161 lkey = mlx4_tx_addr2mr_bh(txq, addr);
1162 if (lkey == UINT32_MAX && rte_errno == ENXIO) {
1163
1164 return mlx4_tx_update_ext_mp(txq, addr, mlx4_mb2mp(mb));
1165 }
1166 return lkey;
1167}
1168
1169
1170
1171
1172
1173
1174
1175void
1176mlx4_mr_flush_local_cache(struct mlx4_mr_ctrl *mr_ctrl)
1177{
1178
1179 mr_ctrl->mru = 0;
1180
1181 mr_ctrl->head = 0;
1182 memset(mr_ctrl->cache, 0, sizeof(mr_ctrl->cache));
1183
1184 mr_ctrl->cache_bh.len = 1;
1185 mr_ctrl->cache_bh.overflow = 0;
1186
1187 mr_ctrl->cur_gen = *mr_ctrl->dev_gen_ptr;
1188 DEBUG("mr_ctrl(%p): flushed, cur_gen=%d",
1189 (void *)mr_ctrl, mr_ctrl->cur_gen);
1190}
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205static void
1206mlx4_mr_update_ext_mp_cb(struct rte_mempool *mp, void *opaque,
1207 struct rte_mempool_memhdr *memhdr,
1208 unsigned mem_idx __rte_unused)
1209{
1210 struct mr_update_mp_data *data = opaque;
1211 struct rte_eth_dev *dev = data->dev;
1212 struct mlx4_priv *priv = dev->data->dev_private;
1213 struct mlx4_mr_ctrl *mr_ctrl = data->mr_ctrl;
1214 struct mlx4_mr *mr = NULL;
1215 uintptr_t addr = (uintptr_t)memhdr->addr;
1216 size_t len = memhdr->len;
1217 struct mlx4_mr_cache entry;
1218 uint32_t lkey;
1219
1220 MLX4_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
1221
1222 rte_rwlock_read_lock(&priv->mr.rwlock);
1223 lkey = mr_lookup_dev(dev, &entry, addr);
1224 rte_rwlock_read_unlock(&priv->mr.rwlock);
1225 if (lkey != UINT32_MAX)
1226 return;
1227 mr = rte_zmalloc_socket(NULL,
1228 RTE_ALIGN_CEIL(sizeof(*mr),
1229 RTE_CACHE_LINE_SIZE),
1230 RTE_CACHE_LINE_SIZE, mp->socket_id);
1231 if (mr == NULL) {
1232 WARN("port %u unable to allocate memory for a new MR of"
1233 " mempool (%s).",
1234 dev->data->port_id, mp->name);
1235 data->ret = -1;
1236 return;
1237 }
1238 DEBUG("port %u register MR for chunk #%d of mempool (%s)",
1239 dev->data->port_id, mem_idx, mp->name);
1240 mr->ibv_mr = mlx4_glue->reg_mr(priv->pd, (void *)addr, len,
1241 IBV_ACCESS_LOCAL_WRITE);
1242 if (mr->ibv_mr == NULL) {
1243 WARN("port %u fail to create a verbs MR for address (%p)",
1244 dev->data->port_id, (void *)addr);
1245 rte_free(mr);
1246 data->ret = -1;
1247 return;
1248 }
1249 mr->msl = NULL;
1250 mr->ms_bmp = NULL;
1251 mr->ms_n = 1;
1252 mr->ms_bmp_n = 1;
1253 rte_rwlock_write_lock(&priv->mr.rwlock);
1254 LIST_INSERT_HEAD(&priv->mr.mr_list, mr, mr);
1255 DEBUG("port %u MR CREATED (%p) for external memory %p:\n"
1256 " [0x%" PRIxPTR ", 0x%" PRIxPTR "),"
1257 " lkey=0x%x base_idx=%u ms_n=%u, ms_bmp_n=%u",
1258 dev->data->port_id, (void *)mr, (void *)addr,
1259 addr, addr + len, rte_cpu_to_be_32(mr->ibv_mr->lkey),
1260 mr->ms_base_idx, mr->ms_n, mr->ms_bmp_n);
1261
1262 mr_insert_dev_cache(dev, mr);
1263 rte_rwlock_write_unlock(&priv->mr.rwlock);
1264
1265 mlx4_mr_addr2mr_bh(dev, mr_ctrl, addr);
1266}
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282static uint32_t
1283mlx4_mr_update_ext_mp(struct rte_eth_dev *dev, struct mlx4_mr_ctrl *mr_ctrl,
1284 struct rte_mempool *mp)
1285{
1286 struct mr_update_mp_data data = {
1287 .dev = dev,
1288 .mr_ctrl = mr_ctrl,
1289 .ret = 0,
1290 };
1291
1292 rte_mempool_mem_iter(mp, mlx4_mr_update_ext_mp_cb, &data);
1293 return data.ret;
1294}
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310uint32_t
1311mlx4_tx_update_ext_mp(struct txq *txq, uintptr_t addr, struct rte_mempool *mp)
1312{
1313 struct mlx4_mr_ctrl *mr_ctrl = &txq->mr_ctrl;
1314 struct mlx4_priv *priv = txq->priv;
1315
1316 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1317 WARN("port %u using address (%p) from unregistered mempool"
1318 " having externally allocated memory"
1319 " in secondary process, please create mempool"
1320 " prior to rte_eth_dev_start()",
1321 PORT_ID(priv), (void *)addr);
1322 return UINT32_MAX;
1323 }
1324 mlx4_mr_update_ext_mp(ETH_DEV(priv), mr_ctrl, mp);
1325 return mlx4_tx_addr2mr_bh(txq, addr);
1326}
1327
1328
1329static void
1330mlx4_mr_update_mp_cb(struct rte_mempool *mp __rte_unused, void *opaque,
1331 struct rte_mempool_memhdr *memhdr,
1332 unsigned mem_idx __rte_unused)
1333{
1334 struct mr_update_mp_data *data = opaque;
1335 uint32_t lkey;
1336
1337
1338 if (data->ret < 0)
1339 return;
1340
1341 lkey = mlx4_mr_addr2mr_bh(data->dev, data->mr_ctrl,
1342 (uintptr_t)memhdr->addr);
1343 if (lkey == UINT32_MAX)
1344 data->ret = -1;
1345}
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360int
1361mlx4_mr_update_mp(struct rte_eth_dev *dev, struct mlx4_mr_ctrl *mr_ctrl,
1362 struct rte_mempool *mp)
1363{
1364 struct mr_update_mp_data data = {
1365 .dev = dev,
1366 .mr_ctrl = mr_ctrl,
1367 .ret = 0,
1368 };
1369
1370 rte_mempool_mem_iter(mp, mlx4_mr_update_mp_cb, &data);
1371 if (data.ret < 0 && rte_errno == ENXIO) {
1372
1373 return mlx4_mr_update_ext_mp(dev, mr_ctrl, mp);
1374 }
1375 return data.ret;
1376}
1377
1378#ifdef RTE_LIBRTE_MLX4_DEBUG
1379
1380
1381
1382
1383
1384
1385void
1386mlx4_mr_dump_dev(struct rte_eth_dev *dev)
1387{
1388 struct mlx4_priv *priv = dev->data->dev_private;
1389 struct mlx4_mr *mr;
1390 int mr_n = 0;
1391 int chunk_n = 0;
1392
1393 rte_rwlock_read_lock(&priv->mr.rwlock);
1394
1395 LIST_FOREACH(mr, &priv->mr.mr_list, mr) {
1396 unsigned int n;
1397
1398 DEBUG("port %u MR[%u], LKey = 0x%x, ms_n = %u, ms_bmp_n = %u",
1399 dev->data->port_id, mr_n++,
1400 rte_cpu_to_be_32(mr->ibv_mr->lkey),
1401 mr->ms_n, mr->ms_bmp_n);
1402 if (mr->ms_n == 0)
1403 continue;
1404 for (n = 0; n < mr->ms_bmp_n; ) {
1405 struct mlx4_mr_cache ret;
1406
1407 memset(&ret, 0, sizeof(ret));
1408 n = mr_find_next_chunk(mr, &ret, n);
1409 if (!ret.end)
1410 break;
1411 DEBUG(" chunk[%u], [0x%" PRIxPTR ", 0x%" PRIxPTR ")",
1412 chunk_n++, ret.start, ret.end);
1413 }
1414 }
1415 DEBUG("port %u dumping global cache", dev->data->port_id);
1416 mlx4_mr_btree_dump(&priv->mr.cache);
1417 rte_rwlock_read_unlock(&priv->mr.rwlock);
1418}
1419#endif
1420
1421
1422
1423
1424
1425
1426
1427
1428void
1429mlx4_mr_release(struct rte_eth_dev *dev)
1430{
1431 struct mlx4_priv *priv = dev->data->dev_private;
1432 struct mlx4_mr *mr_next;
1433
1434
1435 rte_rwlock_write_lock(&mlx4_shared_data->mem_event_rwlock);
1436 LIST_REMOVE(priv, mem_event_cb);
1437 rte_rwlock_write_unlock(&mlx4_shared_data->mem_event_rwlock);
1438#ifdef RTE_LIBRTE_MLX4_DEBUG
1439 mlx4_mr_dump_dev(dev);
1440#endif
1441 rte_rwlock_write_lock(&priv->mr.rwlock);
1442
1443 mr_next = LIST_FIRST(&priv->mr.mr_list);
1444 while (mr_next != NULL) {
1445 struct mlx4_mr *mr = mr_next;
1446
1447 mr_next = LIST_NEXT(mr, mr);
1448 LIST_REMOVE(mr, mr);
1449 LIST_INSERT_HEAD(&priv->mr.mr_free_list, mr, mr);
1450 }
1451 LIST_INIT(&priv->mr.mr_list);
1452
1453 mlx4_mr_btree_free(&priv->mr.cache);
1454 rte_rwlock_write_unlock(&priv->mr.rwlock);
1455
1456 mlx4_mr_garbage_collect(dev);
1457}
1458