1
2
3
4
5#include <string.h>
6#include <unistd.h>
7
8#include <bus_pci_driver.h>
9#include <rte_common.h>
10#include <rte_eal.h>
11#include <rte_lcore.h>
12#include <rte_mempool.h>
13#include <rte_pci.h>
14#include <rte_dmadev.h>
15#include <rte_dmadev_pmd.h>
16
17#include <roc_api.h>
18#include <cnxk_dmadev.h>
19
20static int
21cnxk_dmadev_info_get(const struct rte_dma_dev *dev,
22 struct rte_dma_info *dev_info, uint32_t size)
23{
24 RTE_SET_USED(dev);
25 RTE_SET_USED(size);
26
27 dev_info->max_vchans = 1;
28 dev_info->nb_vchans = 1;
29 dev_info->dev_capa = RTE_DMA_CAPA_MEM_TO_MEM |
30 RTE_DMA_CAPA_MEM_TO_DEV | RTE_DMA_CAPA_DEV_TO_MEM |
31 RTE_DMA_CAPA_DEV_TO_DEV | RTE_DMA_CAPA_OPS_COPY |
32 RTE_DMA_CAPA_OPS_COPY_SG;
33 dev_info->max_desc = DPI_MAX_DESC;
34 dev_info->min_desc = 1;
35 dev_info->max_sges = DPI_MAX_POINTER;
36
37 return 0;
38}
39
40static int
41cnxk_dmadev_configure(struct rte_dma_dev *dev,
42 const struct rte_dma_conf *conf, uint32_t conf_sz)
43{
44 struct cnxk_dpi_vf_s *dpivf = NULL;
45 int rc = 0;
46
47 RTE_SET_USED(conf);
48 RTE_SET_USED(conf);
49 RTE_SET_USED(conf_sz);
50 RTE_SET_USED(conf_sz);
51 dpivf = dev->fp_obj->dev_private;
52 rc = roc_dpi_configure(&dpivf->rdpi);
53 if (rc < 0)
54 plt_err("DMA configure failed err = %d", rc);
55
56 return rc;
57}
58
59static int
60cnxk_dmadev_vchan_setup(struct rte_dma_dev *dev, uint16_t vchan,
61 const struct rte_dma_vchan_conf *conf,
62 uint32_t conf_sz)
63{
64 struct cnxk_dpi_vf_s *dpivf = dev->fp_obj->dev_private;
65 struct cnxk_dpi_compl_s *comp_data;
66 union dpi_instr_hdr_s *header = &dpivf->conf.hdr;
67 int i;
68
69 RTE_SET_USED(vchan);
70 RTE_SET_USED(conf_sz);
71
72 header->cn9k.pt = DPI_HDR_PT_ZBW_CA;
73
74 switch (conf->direction) {
75 case RTE_DMA_DIR_DEV_TO_MEM:
76 header->cn9k.xtype = DPI_XTYPE_INBOUND;
77 header->cn9k.lport = conf->src_port.pcie.coreid;
78 header->cn9k.fport = 0;
79 header->cn9k.pvfe = 1;
80 break;
81 case RTE_DMA_DIR_MEM_TO_DEV:
82 header->cn9k.xtype = DPI_XTYPE_OUTBOUND;
83 header->cn9k.lport = 0;
84 header->cn9k.fport = conf->dst_port.pcie.coreid;
85 header->cn9k.pvfe = 1;
86 break;
87 case RTE_DMA_DIR_MEM_TO_MEM:
88 header->cn9k.xtype = DPI_XTYPE_INTERNAL_ONLY;
89 header->cn9k.lport = 0;
90 header->cn9k.fport = 0;
91 header->cn9k.pvfe = 0;
92 break;
93 case RTE_DMA_DIR_DEV_TO_DEV:
94 header->cn9k.xtype = DPI_XTYPE_EXTERNAL_ONLY;
95 header->cn9k.lport = conf->src_port.pcie.coreid;
96 header->cn9k.fport = conf->dst_port.pcie.coreid;
97 };
98
99 for (i = 0; i < conf->nb_desc; i++) {
100 comp_data = rte_zmalloc(NULL, sizeof(*comp_data), 0);
101 if (comp_data == NULL) {
102 plt_err("Failed to allocate for comp_data");
103 return -ENOMEM;
104 }
105 comp_data->cdata = DPI_REQ_CDATA;
106 dpivf->conf.c_desc.compl_ptr[i] = comp_data;
107 };
108 dpivf->conf.c_desc.max_cnt = DPI_MAX_DESC;
109 dpivf->conf.c_desc.head = 0;
110 dpivf->conf.c_desc.tail = 0;
111
112 return 0;
113}
114
115static int
116cn10k_dmadev_vchan_setup(struct rte_dma_dev *dev, uint16_t vchan,
117 const struct rte_dma_vchan_conf *conf,
118 uint32_t conf_sz)
119{
120 struct cnxk_dpi_vf_s *dpivf = dev->fp_obj->dev_private;
121 struct cnxk_dpi_compl_s *comp_data;
122 union dpi_instr_hdr_s *header = &dpivf->conf.hdr;
123 int i;
124
125 RTE_SET_USED(vchan);
126 RTE_SET_USED(conf_sz);
127
128 header->cn10k.pt = DPI_HDR_PT_ZBW_CA;
129
130 switch (conf->direction) {
131 case RTE_DMA_DIR_DEV_TO_MEM:
132 header->cn10k.xtype = DPI_XTYPE_INBOUND;
133 header->cn10k.lport = conf->src_port.pcie.coreid;
134 header->cn10k.fport = 0;
135 header->cn10k.pvfe = 1;
136 break;
137 case RTE_DMA_DIR_MEM_TO_DEV:
138 header->cn10k.xtype = DPI_XTYPE_OUTBOUND;
139 header->cn10k.lport = 0;
140 header->cn10k.fport = conf->dst_port.pcie.coreid;
141 header->cn10k.pvfe = 1;
142 break;
143 case RTE_DMA_DIR_MEM_TO_MEM:
144 header->cn10k.xtype = DPI_XTYPE_INTERNAL_ONLY;
145 header->cn10k.lport = 0;
146 header->cn10k.fport = 0;
147 header->cn10k.pvfe = 0;
148 break;
149 case RTE_DMA_DIR_DEV_TO_DEV:
150 header->cn10k.xtype = DPI_XTYPE_EXTERNAL_ONLY;
151 header->cn10k.lport = conf->src_port.pcie.coreid;
152 header->cn10k.fport = conf->dst_port.pcie.coreid;
153 };
154
155 for (i = 0; i < conf->nb_desc; i++) {
156 comp_data = rte_zmalloc(NULL, sizeof(*comp_data), 0);
157 if (comp_data == NULL) {
158 plt_err("Failed to allocate for comp_data");
159 return -ENOMEM;
160 }
161 comp_data->cdata = DPI_REQ_CDATA;
162 dpivf->conf.c_desc.compl_ptr[i] = comp_data;
163 };
164 dpivf->conf.c_desc.max_cnt = DPI_MAX_DESC;
165 dpivf->conf.c_desc.head = 0;
166 dpivf->conf.c_desc.tail = 0;
167
168 return 0;
169}
170
171static int
172cnxk_dmadev_start(struct rte_dma_dev *dev)
173{
174 struct cnxk_dpi_vf_s *dpivf = dev->fp_obj->dev_private;
175
176 dpivf->desc_idx = 0;
177 dpivf->num_words = 0;
178 roc_dpi_enable(&dpivf->rdpi);
179
180 return 0;
181}
182
183static int
184cnxk_dmadev_stop(struct rte_dma_dev *dev)
185{
186 struct cnxk_dpi_vf_s *dpivf = dev->fp_obj->dev_private;
187
188 roc_dpi_disable(&dpivf->rdpi);
189
190 return 0;
191}
192
193static int
194cnxk_dmadev_close(struct rte_dma_dev *dev)
195{
196 struct cnxk_dpi_vf_s *dpivf = dev->fp_obj->dev_private;
197
198 roc_dpi_disable(&dpivf->rdpi);
199 roc_dpi_dev_fini(&dpivf->rdpi);
200
201 return 0;
202}
203
204static inline int
205__dpi_queue_write(struct roc_dpi *dpi, uint64_t *cmds, int cmd_count)
206{
207 uint64_t *ptr = dpi->chunk_base;
208
209 if ((cmd_count < DPI_MIN_CMD_SIZE) || (cmd_count > DPI_MAX_CMD_SIZE) ||
210 cmds == NULL)
211 return -EINVAL;
212
213
214
215
216
217 if (dpi->chunk_head + cmd_count < dpi->pool_size_m1) {
218 ptr += dpi->chunk_head;
219 dpi->chunk_head += cmd_count;
220 while (cmd_count--)
221 *ptr++ = *cmds++;
222 } else {
223 int count;
224 uint64_t *new_buff = dpi->chunk_next;
225
226 dpi->chunk_next =
227 (void *)roc_npa_aura_op_alloc(dpi->aura_handle, 0);
228 if (!dpi->chunk_next) {
229 plt_err("Failed to alloc next buffer from NPA");
230 return -ENOMEM;
231 }
232
233
234
235
236
237 count = dpi->pool_size_m1 - dpi->chunk_head;
238 ptr += dpi->chunk_head;
239 cmd_count -= count;
240 while (count--)
241 *ptr++ = *cmds++;
242
243
244
245
246
247 *ptr++ = (uint64_t)new_buff;
248 *ptr = 0;
249
250
251
252
253
254
255 dpi->chunk_base = new_buff;
256 dpi->chunk_head = cmd_count;
257 ptr = new_buff;
258 while (cmd_count--)
259 *ptr++ = *cmds++;
260
261
262 if (dpi->chunk_head >= dpi->pool_size_m1) {
263 new_buff = dpi->chunk_next;
264 dpi->chunk_next =
265 (void *)roc_npa_aura_op_alloc(dpi->aura_handle,
266 0);
267 if (!dpi->chunk_next) {
268 plt_err("Failed to alloc next buffer from NPA");
269 return -ENOMEM;
270 }
271
272 *ptr = (uint64_t)new_buff;
273 dpi->chunk_base = new_buff;
274 dpi->chunk_head = 0;
275 }
276 }
277
278 return 0;
279}
280
281static int
282cnxk_dmadev_copy(void *dev_private, uint16_t vchan, rte_iova_t src,
283 rte_iova_t dst, uint32_t length, uint64_t flags)
284{
285 struct cnxk_dpi_vf_s *dpivf = dev_private;
286 union dpi_instr_hdr_s *header = &dpivf->conf.hdr;
287 struct cnxk_dpi_compl_s *comp_ptr;
288 rte_iova_t fptr, lptr;
289 int num_words = 0;
290 int rc;
291
292 RTE_SET_USED(vchan);
293
294 comp_ptr = dpivf->conf.c_desc.compl_ptr[dpivf->conf.c_desc.tail];
295 comp_ptr->cdata = DPI_REQ_CDATA;
296 header->cn9k.ptr = (uint64_t)comp_ptr;
297 STRM_INC(dpivf->conf.c_desc);
298
299 header->cn9k.nfst = 1;
300 header->cn9k.nlst = 1;
301
302
303
304
305
306 if (header->cn9k.xtype == DPI_XTYPE_INBOUND) {
307 fptr = dst;
308 lptr = src;
309 } else {
310 fptr = src;
311 lptr = dst;
312 }
313
314 dpivf->cmd[0] = header->u[0];
315 dpivf->cmd[1] = header->u[1];
316 dpivf->cmd[2] = header->u[2];
317
318 num_words += 4;
319 dpivf->cmd[num_words++] = length;
320 dpivf->cmd[num_words++] = fptr;
321 dpivf->cmd[num_words++] = length;
322 dpivf->cmd[num_words++] = lptr;
323
324 rc = __dpi_queue_write(&dpivf->rdpi, dpivf->cmd, num_words);
325 if (!rc) {
326 if (flags & RTE_DMA_OP_FLAG_SUBMIT) {
327 rte_wmb();
328 plt_write64(num_words,
329 dpivf->rdpi.rbase + DPI_VDMA_DBELL);
330 dpivf->stats.submitted++;
331 }
332 dpivf->num_words += num_words;
333 }
334
335 return dpivf->desc_idx++;
336}
337
338static int
339cnxk_dmadev_copy_sg(void *dev_private, uint16_t vchan,
340 const struct rte_dma_sge *src,
341 const struct rte_dma_sge *dst,
342 uint16_t nb_src, uint16_t nb_dst, uint64_t flags)
343{
344 struct cnxk_dpi_vf_s *dpivf = dev_private;
345 union dpi_instr_hdr_s *header = &dpivf->conf.hdr;
346 const struct rte_dma_sge *fptr, *lptr;
347 struct cnxk_dpi_compl_s *comp_ptr;
348 int num_words = 0;
349 int i, rc;
350
351 RTE_SET_USED(vchan);
352
353 comp_ptr = dpivf->conf.c_desc.compl_ptr[dpivf->conf.c_desc.tail];
354 comp_ptr->cdata = DPI_REQ_CDATA;
355 header->cn9k.ptr = (uint64_t)comp_ptr;
356 STRM_INC(dpivf->conf.c_desc);
357
358
359
360
361
362 if (header->cn9k.xtype == DPI_XTYPE_INBOUND) {
363 header->cn9k.nfst = nb_dst & 0xf;
364 header->cn9k.nlst = nb_src & 0xf;
365 fptr = &dst[0];
366 lptr = &src[0];
367 } else {
368 header->cn9k.nfst = nb_src & 0xf;
369 header->cn9k.nlst = nb_dst & 0xf;
370 fptr = &src[0];
371 lptr = &dst[0];
372 }
373
374 dpivf->cmd[0] = header->u[0];
375 dpivf->cmd[1] = header->u[1];
376 dpivf->cmd[2] = header->u[2];
377 num_words += 4;
378 for (i = 0; i < header->cn9k.nfst; i++) {
379 dpivf->cmd[num_words++] = (uint64_t)fptr->length;
380 dpivf->cmd[num_words++] = fptr->addr;
381 fptr++;
382 }
383
384 for (i = 0; i < header->cn9k.nlst; i++) {
385 dpivf->cmd[num_words++] = (uint64_t)lptr->length;
386 dpivf->cmd[num_words++] = lptr->addr;
387 lptr++;
388 }
389
390 rc = __dpi_queue_write(&dpivf->rdpi, dpivf->cmd, num_words);
391 if (!rc) {
392 if (flags & RTE_DMA_OP_FLAG_SUBMIT) {
393 rte_wmb();
394 plt_write64(num_words,
395 dpivf->rdpi.rbase + DPI_VDMA_DBELL);
396 dpivf->stats.submitted += nb_src;
397 }
398 dpivf->num_words += num_words;
399 }
400
401 return (rc < 0) ? rc : dpivf->desc_idx++;
402}
403
404static int
405cn10k_dmadev_copy(void *dev_private, uint16_t vchan, rte_iova_t src,
406 rte_iova_t dst, uint32_t length, uint64_t flags)
407{
408 struct cnxk_dpi_vf_s *dpivf = dev_private;
409 union dpi_instr_hdr_s *header = &dpivf->conf.hdr;
410 struct cnxk_dpi_compl_s *comp_ptr;
411 rte_iova_t fptr, lptr;
412 int num_words = 0;
413 int rc;
414
415 RTE_SET_USED(vchan);
416
417 comp_ptr = dpivf->conf.c_desc.compl_ptr[dpivf->conf.c_desc.tail];
418 comp_ptr->cdata = DPI_REQ_CDATA;
419 header->cn10k.ptr = (uint64_t)comp_ptr;
420 STRM_INC(dpivf->conf.c_desc);
421
422 header->cn10k.nfst = 1;
423 header->cn10k.nlst = 1;
424
425 fptr = src;
426 lptr = dst;
427
428 dpivf->cmd[0] = header->u[0];
429 dpivf->cmd[1] = header->u[1];
430 dpivf->cmd[2] = header->u[2];
431
432 num_words += 4;
433 dpivf->cmd[num_words++] = length;
434 dpivf->cmd[num_words++] = fptr;
435 dpivf->cmd[num_words++] = length;
436 dpivf->cmd[num_words++] = lptr;
437
438 rc = __dpi_queue_write(&dpivf->rdpi, dpivf->cmd, num_words);
439 if (!rc) {
440 if (flags & RTE_DMA_OP_FLAG_SUBMIT) {
441 rte_wmb();
442 plt_write64(num_words,
443 dpivf->rdpi.rbase + DPI_VDMA_DBELL);
444 dpivf->stats.submitted++;
445 }
446 dpivf->num_words += num_words;
447 }
448
449 return dpivf->desc_idx++;
450}
451
452static int
453cn10k_dmadev_copy_sg(void *dev_private, uint16_t vchan,
454 const struct rte_dma_sge *src,
455 const struct rte_dma_sge *dst, uint16_t nb_src,
456 uint16_t nb_dst, uint64_t flags)
457{
458 struct cnxk_dpi_vf_s *dpivf = dev_private;
459 union dpi_instr_hdr_s *header = &dpivf->conf.hdr;
460 const struct rte_dma_sge *fptr, *lptr;
461 struct cnxk_dpi_compl_s *comp_ptr;
462 int num_words = 0;
463 int i, rc;
464
465 RTE_SET_USED(vchan);
466
467 comp_ptr = dpivf->conf.c_desc.compl_ptr[dpivf->conf.c_desc.tail];
468 comp_ptr->cdata = DPI_REQ_CDATA;
469 header->cn10k.ptr = (uint64_t)comp_ptr;
470 STRM_INC(dpivf->conf.c_desc);
471
472 header->cn10k.nfst = nb_src & 0xf;
473 header->cn10k.nlst = nb_dst & 0xf;
474 fptr = &src[0];
475 lptr = &dst[0];
476
477 dpivf->cmd[0] = header->u[0];
478 dpivf->cmd[1] = header->u[1];
479 dpivf->cmd[2] = header->u[2];
480 num_words += 4;
481
482 for (i = 0; i < header->cn10k.nfst; i++) {
483 dpivf->cmd[num_words++] = (uint64_t)fptr->length;
484 dpivf->cmd[num_words++] = fptr->addr;
485 fptr++;
486 }
487
488 for (i = 0; i < header->cn10k.nlst; i++) {
489 dpivf->cmd[num_words++] = (uint64_t)lptr->length;
490 dpivf->cmd[num_words++] = lptr->addr;
491 lptr++;
492 }
493
494 rc = __dpi_queue_write(&dpivf->rdpi, dpivf->cmd, num_words);
495 if (!rc) {
496 if (flags & RTE_DMA_OP_FLAG_SUBMIT) {
497 rte_wmb();
498 plt_write64(num_words,
499 dpivf->rdpi.rbase + DPI_VDMA_DBELL);
500 dpivf->stats.submitted += nb_src;
501 }
502 dpivf->num_words += num_words;
503 }
504
505 return (rc < 0) ? rc : dpivf->desc_idx++;
506}
507
508static uint16_t
509cnxk_dmadev_completed(void *dev_private, uint16_t vchan, const uint16_t nb_cpls,
510 uint16_t *last_idx, bool *has_error)
511{
512 struct cnxk_dpi_vf_s *dpivf = dev_private;
513 int cnt;
514
515 RTE_SET_USED(vchan);
516
517 if (dpivf->stats.submitted == dpivf->stats.completed)
518 return 0;
519
520 for (cnt = 0; cnt < nb_cpls; cnt++) {
521 struct cnxk_dpi_compl_s *comp_ptr =
522 dpivf->conf.c_desc.compl_ptr[cnt];
523
524 if (comp_ptr->cdata) {
525 if (comp_ptr->cdata == DPI_REQ_CDATA)
526 break;
527 *has_error = 1;
528 dpivf->stats.errors++;
529 break;
530 }
531 }
532
533 *last_idx = cnt - 1;
534 dpivf->conf.c_desc.tail = cnt;
535 dpivf->stats.completed += cnt;
536
537 return cnt;
538}
539
540static uint16_t
541cnxk_dmadev_completed_status(void *dev_private, uint16_t vchan,
542 const uint16_t nb_cpls, uint16_t *last_idx,
543 enum rte_dma_status_code *status)
544{
545 struct cnxk_dpi_vf_s *dpivf = dev_private;
546 int cnt;
547
548 RTE_SET_USED(vchan);
549 RTE_SET_USED(last_idx);
550 for (cnt = 0; cnt < nb_cpls; cnt++) {
551 struct cnxk_dpi_compl_s *comp_ptr =
552 dpivf->conf.c_desc.compl_ptr[cnt];
553 status[cnt] = comp_ptr->cdata;
554 if (status[cnt]) {
555 if (status[cnt] == DPI_REQ_CDATA)
556 break;
557
558 dpivf->stats.errors++;
559 }
560 }
561
562 *last_idx = cnt - 1;
563 dpivf->conf.c_desc.tail = 0;
564 dpivf->stats.completed += cnt;
565
566 return cnt;
567}
568
569static int
570cnxk_dmadev_submit(void *dev_private, uint16_t vchan __rte_unused)
571{
572 struct cnxk_dpi_vf_s *dpivf = dev_private;
573
574 rte_wmb();
575 plt_write64(dpivf->num_words, dpivf->rdpi.rbase + DPI_VDMA_DBELL);
576 dpivf->stats.submitted++;
577
578 return 0;
579}
580
581static int
582cnxk_stats_get(const struct rte_dma_dev *dev, uint16_t vchan,
583 struct rte_dma_stats *rte_stats, uint32_t size)
584{
585 struct cnxk_dpi_vf_s *dpivf = dev->fp_obj->dev_private;
586 struct rte_dma_stats *stats = &dpivf->stats;
587
588 RTE_SET_USED(vchan);
589
590 if (size < sizeof(rte_stats))
591 return -EINVAL;
592 if (rte_stats == NULL)
593 return -EINVAL;
594
595 *rte_stats = *stats;
596 return 0;
597}
598
599static int
600cnxk_stats_reset(struct rte_dma_dev *dev, uint16_t vchan __rte_unused)
601{
602 struct cnxk_dpi_vf_s *dpivf = dev->fp_obj->dev_private;
603
604 dpivf->stats = (struct rte_dma_stats){0};
605 return 0;
606}
607
608static const struct rte_dma_dev_ops cn10k_dmadev_ops = {
609 .dev_close = cnxk_dmadev_close,
610 .dev_configure = cnxk_dmadev_configure,
611 .dev_info_get = cnxk_dmadev_info_get,
612 .dev_start = cnxk_dmadev_start,
613 .dev_stop = cnxk_dmadev_stop,
614 .stats_get = cnxk_stats_get,
615 .stats_reset = cnxk_stats_reset,
616 .vchan_setup = cn10k_dmadev_vchan_setup,
617};
618
619static const struct rte_dma_dev_ops cnxk_dmadev_ops = {
620 .dev_close = cnxk_dmadev_close,
621 .dev_configure = cnxk_dmadev_configure,
622 .dev_info_get = cnxk_dmadev_info_get,
623 .dev_start = cnxk_dmadev_start,
624 .dev_stop = cnxk_dmadev_stop,
625 .stats_get = cnxk_stats_get,
626 .stats_reset = cnxk_stats_reset,
627 .vchan_setup = cnxk_dmadev_vchan_setup,
628};
629
630static int
631cnxk_dmadev_probe(struct rte_pci_driver *pci_drv __rte_unused,
632 struct rte_pci_device *pci_dev)
633{
634 struct cnxk_dpi_vf_s *dpivf = NULL;
635 char name[RTE_DEV_NAME_MAX_LEN];
636 struct rte_dma_dev *dmadev;
637 struct roc_dpi *rdpi = NULL;
638 int rc;
639
640 if (!pci_dev->mem_resource[0].addr)
641 return -ENODEV;
642
643 rc = roc_plt_init();
644 if (rc) {
645 plt_err("Failed to initialize platform model, rc=%d", rc);
646 return rc;
647 }
648 memset(name, 0, sizeof(name));
649 rte_pci_device_name(&pci_dev->addr, name, sizeof(name));
650
651 dmadev = rte_dma_pmd_allocate(name, pci_dev->device.numa_node,
652 sizeof(*dpivf));
653 if (dmadev == NULL) {
654 plt_err("dma device allocation failed for %s", name);
655 return -ENOMEM;
656 }
657
658 dpivf = dmadev->data->dev_private;
659
660 dmadev->device = &pci_dev->device;
661 dmadev->fp_obj->dev_private = dpivf;
662 dmadev->dev_ops = &cnxk_dmadev_ops;
663
664 dmadev->fp_obj->copy = cnxk_dmadev_copy;
665 dmadev->fp_obj->copy_sg = cnxk_dmadev_copy_sg;
666 dmadev->fp_obj->submit = cnxk_dmadev_submit;
667 dmadev->fp_obj->completed = cnxk_dmadev_completed;
668 dmadev->fp_obj->completed_status = cnxk_dmadev_completed_status;
669
670 if (pci_dev->id.subsystem_device_id == PCI_SUBSYSTEM_DEVID_CN10KA ||
671 pci_dev->id.subsystem_device_id == PCI_SUBSYSTEM_DEVID_CNF10KA ||
672 pci_dev->id.subsystem_device_id == PCI_SUBSYSTEM_DEVID_CN10KB) {
673 dmadev->dev_ops = &cn10k_dmadev_ops;
674 dmadev->fp_obj->copy = cn10k_dmadev_copy;
675 dmadev->fp_obj->copy_sg = cn10k_dmadev_copy_sg;
676 }
677
678 rdpi = &dpivf->rdpi;
679
680 rdpi->pci_dev = pci_dev;
681 rc = roc_dpi_dev_init(rdpi);
682 if (rc < 0)
683 goto err_out_free;
684
685 return 0;
686
687err_out_free:
688 if (dmadev)
689 rte_dma_pmd_release(name);
690
691 return rc;
692}
693
694static int
695cnxk_dmadev_remove(struct rte_pci_device *pci_dev)
696{
697 char name[RTE_DEV_NAME_MAX_LEN];
698
699 memset(name, 0, sizeof(name));
700 rte_pci_device_name(&pci_dev->addr, name, sizeof(name));
701
702 return rte_dma_pmd_release(name);
703}
704
705static const struct rte_pci_id cnxk_dma_pci_map[] = {
706 {
707 RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
708 PCI_DEVID_CNXK_DPI_VF)
709 },
710 {
711 .vendor_id = 0,
712 },
713};
714
715static struct rte_pci_driver cnxk_dmadev = {
716 .id_table = cnxk_dma_pci_map,
717 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_NEED_IOVA_AS_VA,
718 .probe = cnxk_dmadev_probe,
719 .remove = cnxk_dmadev_remove,
720};
721
722RTE_PMD_REGISTER_PCI(cnxk_dmadev_pci_driver, cnxk_dmadev);
723RTE_PMD_REGISTER_PCI_TABLE(cnxk_dmadev_pci_driver, cnxk_dma_pci_map);
724RTE_PMD_REGISTER_KMOD_DEP(cnxk_dmadev_pci_driver, "vfio-pci");
725