1
2
3
4
5#include <sys/ioctl.h>
6#include <sys/epoll.h>
7#include <rte_kvargs.h>
8#include <ethdev_vdev.h>
9#include <rte_bus_vdev.h>
10#include <rte_ether.h>
11#include <dpaa_of.h>
12
13#include "pfe_logs.h"
14#include "pfe_mod.h"
15
16#define PFE_MAX_MACS 1
17#define PFE_VDEV_GEM_ID_ARG "intf"
18
19struct pfe_vdev_init_params {
20 int8_t gem_id;
21};
22static struct pfe *g_pfe;
23
24static uint64_t dev_rx_offloads_sup =
25 DEV_RX_OFFLOAD_IPV4_CKSUM |
26 DEV_RX_OFFLOAD_UDP_CKSUM |
27 DEV_RX_OFFLOAD_TCP_CKSUM;
28
29
30static uint64_t dev_tx_offloads_sup =
31 DEV_TX_OFFLOAD_IPV4_CKSUM |
32 DEV_TX_OFFLOAD_UDP_CKSUM |
33 DEV_TX_OFFLOAD_TCP_CKSUM;
34
35
36
37
38
39unsigned int pfe_svr = SVR_LS1012A_REV1;
40static void *cbus_emac_base[3];
41static void *cbus_gpi_base[3];
42
43
44
45static int
46pfe_gemac_init(struct pfe_eth_priv_s *priv)
47{
48 struct gemac_cfg cfg;
49
50 cfg.speed = SPEED_1000M;
51 cfg.duplex = DUPLEX_FULL;
52
53 gemac_set_config(priv->EMAC_baseaddr, &cfg);
54 gemac_allow_broadcast(priv->EMAC_baseaddr);
55 gemac_enable_1536_rx(priv->EMAC_baseaddr);
56 gemac_enable_stacked_vlan(priv->EMAC_baseaddr);
57 gemac_enable_pause_rx(priv->EMAC_baseaddr);
58 gemac_set_bus_width(priv->EMAC_baseaddr, 64);
59 gemac_enable_rx_checksum_offload(priv->EMAC_baseaddr);
60
61 return 0;
62}
63
64static void
65pfe_soc_version_get(void)
66{
67 FILE *svr_file = NULL;
68 unsigned int svr_ver = 0;
69
70 PMD_INIT_FUNC_TRACE();
71
72 svr_file = fopen(PFE_SOC_ID_FILE, "r");
73 if (!svr_file) {
74 PFE_PMD_ERR("Unable to open SoC device");
75 return;
76 }
77
78 if (fscanf(svr_file, "svr:%x", &svr_ver) > 0)
79 pfe_svr = svr_ver;
80 else
81 PFE_PMD_ERR("Unable to read SoC device");
82
83 fclose(svr_file);
84}
85
86static int pfe_eth_start(struct pfe_eth_priv_s *priv)
87{
88 gpi_enable(priv->GPI_baseaddr);
89 gemac_enable(priv->EMAC_baseaddr);
90
91 return 0;
92}
93
94static void
95pfe_eth_flush_txQ(struct pfe_eth_priv_s *priv, int tx_q_num, int
96 __rte_unused from_tx, __rte_unused int n_desc)
97{
98 struct rte_mbuf *mbuf;
99 unsigned int flags;
100
101
102 while ((mbuf = hif_lib_tx_get_next_complete(&priv->client,
103 tx_q_num, &flags,
104 HIF_TX_DESC_NT))) {
105 if (mbuf) {
106 mbuf->next = NULL;
107 mbuf->nb_segs = 1;
108 rte_pktmbuf_free(mbuf);
109 }
110 }
111}
112
113
114static void
115pfe_eth_flush_tx(struct pfe_eth_priv_s *priv)
116{
117 unsigned int ii;
118
119 for (ii = 0; ii < emac_txq_cnt; ii++)
120 pfe_eth_flush_txQ(priv, ii, 0, 0);
121}
122
123static int
124pfe_eth_event_handler(void *data, int event, __rte_unused int qno)
125{
126 struct pfe_eth_priv_s *priv = data;
127
128 switch (event) {
129 case EVENT_TXDONE_IND:
130 pfe_eth_flush_tx(priv);
131 hif_lib_event_handler_start(&priv->client, EVENT_TXDONE_IND, 0);
132 break;
133 case EVENT_HIGH_RX_WM:
134 default:
135 break;
136 }
137
138 return 0;
139}
140
141static uint16_t
142pfe_recv_pkts_on_intr(void *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
143{
144 struct hif_client_rx_queue *queue = rxq;
145 struct pfe_eth_priv_s *priv = queue->priv;
146 struct epoll_event epoll_ev;
147 uint64_t ticks = 1;
148 int ret;
149 int have_something, work_done;
150
151#define RESET_STATUS (HIF_INT | HIF_RXPKT_INT)
152
153
154 pfe_tx_do_cleanup(priv->pfe);
155 have_something = pfe_hif_rx_process(priv->pfe, nb_pkts);
156 work_done = hif_lib_receive_pkt(rxq, priv->pfe->hif.shm->pool,
157 rx_pkts, nb_pkts);
158
159 if (!have_something || !work_done) {
160 writel(RESET_STATUS, HIF_INT_SRC);
161 writel(readl(HIF_INT_ENABLE) | HIF_RXPKT_INT, HIF_INT_ENABLE);
162 ret = epoll_wait(priv->pfe->hif.epoll_fd, &epoll_ev, 1, ticks);
163 if (ret < 0 && errno != EINTR)
164 PFE_PMD_ERR("epoll_wait fails with %d\n", errno);
165 }
166
167 return work_done;
168}
169
170static uint16_t
171pfe_recv_pkts(void *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
172{
173 struct hif_client_rx_queue *queue = rxq;
174 struct pfe_eth_priv_s *priv = queue->priv;
175 struct rte_mempool *pool;
176
177
178 pfe_tx_do_cleanup(priv->pfe);
179 pfe_hif_rx_process(priv->pfe, nb_pkts);
180 pool = priv->pfe->hif.shm->pool;
181
182 return hif_lib_receive_pkt(rxq, pool, rx_pkts, nb_pkts);
183}
184
185static uint16_t
186pfe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
187{
188 struct hif_client_tx_queue *queue = tx_queue;
189 struct pfe_eth_priv_s *priv = queue->priv;
190 struct rte_eth_stats *stats = &priv->stats;
191 int i;
192
193 for (i = 0; i < nb_pkts; i++) {
194 if (tx_pkts[i]->nb_segs > 1) {
195 struct rte_mbuf *mbuf;
196 int j;
197
198 hif_lib_xmit_pkt(&priv->client, queue->queue_id,
199 (void *)(size_t)rte_pktmbuf_iova(tx_pkts[i]),
200 tx_pkts[i]->buf_addr + tx_pkts[i]->data_off,
201 tx_pkts[i]->data_len, 0x0, HIF_FIRST_BUFFER,
202 tx_pkts[i]);
203
204 mbuf = tx_pkts[i]->next;
205 for (j = 0; j < (tx_pkts[i]->nb_segs - 2); j++) {
206 hif_lib_xmit_pkt(&priv->client, queue->queue_id,
207 (void *)(size_t)rte_pktmbuf_iova(mbuf),
208 mbuf->buf_addr + mbuf->data_off,
209 mbuf->data_len,
210 0x0, 0x0, mbuf);
211 mbuf = mbuf->next;
212 }
213
214 hif_lib_xmit_pkt(&priv->client, queue->queue_id,
215 (void *)(size_t)rte_pktmbuf_iova(mbuf),
216 mbuf->buf_addr + mbuf->data_off,
217 mbuf->data_len,
218 0x0, HIF_LAST_BUFFER | HIF_DATA_VALID,
219 mbuf);
220 } else {
221 hif_lib_xmit_pkt(&priv->client, queue->queue_id,
222 (void *)(size_t)rte_pktmbuf_iova(tx_pkts[i]),
223 tx_pkts[i]->buf_addr + tx_pkts[i]->data_off,
224 tx_pkts[i]->pkt_len, 0 ,
225 HIF_FIRST_BUFFER | HIF_LAST_BUFFER |
226 HIF_DATA_VALID,
227 tx_pkts[i]);
228 }
229 stats->obytes += tx_pkts[i]->pkt_len;
230 hif_tx_dma_start();
231 }
232 stats->opackets += nb_pkts;
233 pfe_tx_do_cleanup(priv->pfe);
234
235 return nb_pkts;
236}
237
238static uint16_t
239pfe_dummy_xmit_pkts(__rte_unused void *tx_queue,
240 __rte_unused struct rte_mbuf **tx_pkts,
241 __rte_unused uint16_t nb_pkts)
242{
243 return 0;
244}
245
246static uint16_t
247pfe_dummy_recv_pkts(__rte_unused void *rxq,
248 __rte_unused struct rte_mbuf **rx_pkts,
249 __rte_unused uint16_t nb_pkts)
250{
251 return 0;
252}
253
254static int
255pfe_eth_open(struct rte_eth_dev *dev)
256{
257 struct pfe_eth_priv_s *priv = dev->data->dev_private;
258 struct hif_client_s *client;
259 struct hif_shm *hif_shm;
260 int rc;
261
262
263 client = &priv->client;
264
265 if (client->pfe) {
266 hif_shm = client->pfe->hif.shm;
267
268
269
270 if (!test_bit(PFE_CL_GEM0 + priv->id,
271 &hif_shm->g_client_status[0])) {
272
273 memset(client, 0, sizeof(*client));
274 client->id = PFE_CL_GEM0 + priv->id;
275 client->tx_qn = emac_txq_cnt;
276 client->rx_qn = EMAC_RXQ_CNT;
277 client->priv = priv;
278 client->pfe = priv->pfe;
279 client->port_id = dev->data->port_id;
280 client->event_handler = pfe_eth_event_handler;
281
282 client->tx_qsize = EMAC_TXQ_DEPTH;
283 client->rx_qsize = EMAC_RXQ_DEPTH;
284
285 rc = hif_lib_client_register(client);
286 if (rc) {
287 PFE_PMD_ERR("hif_lib_client_register(%d)"
288 " failed", client->id);
289 goto err0;
290 }
291 } else {
292
293 int ret = 0;
294 struct rte_mbuf *rx_pkts[32];
295
296 ret = hif_lib_receive_pkt(&client->rx_q[0],
297 hif_shm->pool, rx_pkts, 32);
298 while (ret) {
299 int i;
300 for (i = 0; i < ret; i++)
301 rte_pktmbuf_free(rx_pkts[i]);
302 ret = hif_lib_receive_pkt(&client->rx_q[0],
303 hif_shm->pool,
304 rx_pkts, 32);
305 }
306 }
307 } else {
308
309 memset(client, 0, sizeof(*client));
310 client->id = PFE_CL_GEM0 + priv->id;
311 client->tx_qn = emac_txq_cnt;
312 client->rx_qn = EMAC_RXQ_CNT;
313 client->priv = priv;
314 client->pfe = priv->pfe;
315 client->port_id = dev->data->port_id;
316 client->event_handler = pfe_eth_event_handler;
317
318 client->tx_qsize = EMAC_TXQ_DEPTH;
319 client->rx_qsize = EMAC_RXQ_DEPTH;
320
321 rc = hif_lib_client_register(client);
322 if (rc) {
323 PFE_PMD_ERR("hif_lib_client_register(%d) failed",
324 client->id);
325 goto err0;
326 }
327 }
328 rc = pfe_eth_start(priv);
329 dev->rx_pkt_burst = &pfe_recv_pkts;
330 dev->tx_pkt_burst = &pfe_xmit_pkts;
331
332 if (getenv("PFE_INTR_SUPPORT")) {
333 dev->rx_pkt_burst = &pfe_recv_pkts_on_intr;
334 PFE_PMD_INFO("PFE INTERRUPT Mode enabled");
335 }
336
337
338err0:
339 return rc;
340}
341
342static int
343pfe_eth_open_cdev(struct pfe_eth_priv_s *priv)
344{
345 int pfe_cdev_fd;
346
347 if (priv == NULL)
348 return -1;
349
350 pfe_cdev_fd = open(PFE_CDEV_PATH, O_RDONLY);
351 if (pfe_cdev_fd < 0) {
352 PFE_PMD_WARN("Unable to open PFE device file (%s).\n",
353 PFE_CDEV_PATH);
354 PFE_PMD_WARN("Link status update will not be available.\n");
355 priv->link_fd = PFE_CDEV_INVALID_FD;
356 return -1;
357 }
358
359 priv->link_fd = pfe_cdev_fd;
360
361 return 0;
362}
363
364static void
365pfe_eth_close_cdev(struct pfe_eth_priv_s *priv)
366{
367 if (priv == NULL)
368 return;
369
370 if (priv->link_fd != PFE_CDEV_INVALID_FD) {
371 close(priv->link_fd);
372 priv->link_fd = PFE_CDEV_INVALID_FD;
373 }
374}
375
376static int
377pfe_eth_stop(struct rte_eth_dev *dev)
378{
379 struct pfe_eth_priv_s *priv = dev->data->dev_private;
380
381 dev->data->dev_started = 0;
382
383 gemac_disable(priv->EMAC_baseaddr);
384 gpi_disable(priv->GPI_baseaddr);
385
386 dev->rx_pkt_burst = &pfe_dummy_recv_pkts;
387 dev->tx_pkt_burst = &pfe_dummy_xmit_pkts;
388
389 return 0;
390}
391
392static int
393pfe_eth_close(struct rte_eth_dev *dev)
394{
395 int ret;
396 PMD_INIT_FUNC_TRACE();
397
398 if (!dev)
399 return -1;
400
401 if (!g_pfe)
402 return -1;
403
404 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
405 return 0;
406
407 ret = pfe_eth_stop(dev);
408
409 pfe_eth_close_cdev(dev->data->dev_private);
410
411 munmap(g_pfe->cbus_baseaddr, g_pfe->cbus_size);
412 g_pfe->nb_devs--;
413
414 if (g_pfe->nb_devs == 0) {
415 pfe_hif_exit(g_pfe);
416 pfe_hif_lib_exit(g_pfe);
417 rte_free(g_pfe);
418 g_pfe = NULL;
419 }
420
421 return ret;
422}
423
424static int
425pfe_eth_configure(struct rte_eth_dev *dev __rte_unused)
426{
427 return 0;
428}
429
430static int
431pfe_eth_info(struct rte_eth_dev *dev,
432 struct rte_eth_dev_info *dev_info)
433{
434 dev_info->max_mac_addrs = PFE_MAX_MACS;
435 dev_info->max_rx_queues = dev->data->nb_rx_queues;
436 dev_info->max_tx_queues = dev->data->nb_tx_queues;
437 dev_info->min_rx_bufsize = HIF_RX_PKT_MIN_SIZE;
438 dev_info->min_mtu = RTE_ETHER_MIN_MTU;
439 dev_info->rx_offload_capa = dev_rx_offloads_sup;
440 dev_info->tx_offload_capa = dev_tx_offloads_sup;
441 if (pfe_svr == SVR_LS1012A_REV1) {
442 dev_info->max_rx_pktlen = MAX_MTU_ON_REV1 + PFE_ETH_OVERHEAD;
443 dev_info->max_mtu = MAX_MTU_ON_REV1;
444 } else {
445 dev_info->max_rx_pktlen = JUMBO_FRAME_SIZE;
446 dev_info->max_mtu = JUMBO_FRAME_SIZE - PFE_ETH_OVERHEAD;
447 }
448
449 return 0;
450}
451
452
453
454
455static int
456pfe_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
457 __rte_unused uint16_t nb_rx_desc,
458 __rte_unused unsigned int socket_id,
459 __rte_unused const struct rte_eth_rxconf *rx_conf,
460 struct rte_mempool *mb_pool)
461{
462 int rc = 0;
463 struct pfe *pfe;
464 struct pfe_eth_priv_s *priv = dev->data->dev_private;
465
466 pfe = priv->pfe;
467
468 if (queue_idx >= EMAC_RXQ_CNT) {
469 PFE_PMD_ERR("Invalid queue idx = %d, Max queues = %d",
470 queue_idx, EMAC_RXQ_CNT);
471 return -1;
472 }
473
474 if (!pfe->hif.setuped) {
475 rc = pfe_hif_shm_init(pfe->hif.shm, mb_pool);
476 if (rc) {
477 PFE_PMD_ERR("Could not allocate buffer descriptors");
478 return -1;
479 }
480
481 pfe->hif.shm->pool = mb_pool;
482 if (pfe_hif_init_buffers(&pfe->hif)) {
483 PFE_PMD_ERR("Could not initialize buffer descriptors");
484 return -1;
485 }
486 hif_init();
487 hif_rx_enable();
488 hif_tx_enable();
489 pfe->hif.setuped = 1;
490 }
491 dev->data->rx_queues[queue_idx] = &priv->client.rx_q[queue_idx];
492 priv->client.rx_q[queue_idx].queue_id = queue_idx;
493
494 return 0;
495}
496
497static void
498pfe_rx_queue_release(void *q __rte_unused)
499{
500 PMD_INIT_FUNC_TRACE();
501}
502
503static void
504pfe_tx_queue_release(void *q __rte_unused)
505{
506 PMD_INIT_FUNC_TRACE();
507}
508
509static int
510pfe_tx_queue_setup(struct rte_eth_dev *dev,
511 uint16_t queue_idx,
512 __rte_unused uint16_t nb_desc,
513 __rte_unused unsigned int socket_id,
514 __rte_unused const struct rte_eth_txconf *tx_conf)
515{
516 struct pfe_eth_priv_s *priv = dev->data->dev_private;
517
518 if (queue_idx >= emac_txq_cnt) {
519 PFE_PMD_ERR("Invalid queue idx = %d, Max queues = %d",
520 queue_idx, emac_txq_cnt);
521 return -1;
522 }
523 dev->data->tx_queues[queue_idx] = &priv->client.tx_q[queue_idx];
524 priv->client.tx_q[queue_idx].queue_id = queue_idx;
525 return 0;
526}
527
528static const uint32_t *
529pfe_supported_ptypes_get(struct rte_eth_dev *dev)
530{
531 static const uint32_t ptypes[] = {
532
533 RTE_PTYPE_L2_ETHER,
534 RTE_PTYPE_L3_IPV4,
535 RTE_PTYPE_L3_IPV4_EXT,
536 RTE_PTYPE_L3_IPV6,
537 RTE_PTYPE_L3_IPV6_EXT,
538 RTE_PTYPE_L4_TCP,
539 RTE_PTYPE_L4_UDP,
540 RTE_PTYPE_L4_SCTP
541 };
542
543 if (dev->rx_pkt_burst == pfe_recv_pkts ||
544 dev->rx_pkt_burst == pfe_recv_pkts_on_intr)
545 return ptypes;
546 return NULL;
547}
548
549static inline int
550pfe_eth_atomic_read_link_status(struct rte_eth_dev *dev,
551 struct rte_eth_link *link)
552{
553 struct rte_eth_link *dst = link;
554 struct rte_eth_link *src = &dev->data->dev_link;
555
556 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
557 *(uint64_t *)src) == 0)
558 return -1;
559
560 return 0;
561}
562
563static inline int
564pfe_eth_atomic_write_link_status(struct rte_eth_dev *dev,
565 struct rte_eth_link *link)
566{
567 struct rte_eth_link *dst = &dev->data->dev_link;
568 struct rte_eth_link *src = link;
569
570 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
571 *(uint64_t *)src) == 0)
572 return -1;
573
574 return 0;
575}
576
577static int
578pfe_eth_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused)
579{
580 int ret, ioctl_cmd = 0;
581 struct pfe_eth_priv_s *priv = dev->data->dev_private;
582 struct rte_eth_link link, old;
583 unsigned int lstatus = 1;
584
585 memset(&old, 0, sizeof(old));
586 memset(&link, 0, sizeof(struct rte_eth_link));
587
588 pfe_eth_atomic_read_link_status(dev, &old);
589
590
591
592
593 if (priv->link_fd != PFE_CDEV_INVALID_FD) {
594 if (priv->id == 0)
595 ioctl_cmd = PFE_CDEV_ETH0_STATE_GET;
596 if (priv->id == 1)
597 ioctl_cmd = PFE_CDEV_ETH1_STATE_GET;
598
599 ret = ioctl(priv->link_fd, ioctl_cmd, &lstatus);
600 if (ret != 0) {
601 PFE_PMD_ERR("Unable to fetch link status (ioctl)\n");
602
603 link.link_status = 1;
604 }
605 PFE_PMD_DEBUG("Fetched link state (%d) for dev %d.\n",
606 lstatus, priv->id);
607 }
608
609 if (old.link_status == lstatus) {
610
611 PFE_PMD_DEBUG("No change in link status; Not updating.\n");
612 return -1;
613 }
614
615 link.link_status = lstatus;
616 link.link_speed = ETH_LINK_SPEED_1G;
617 link.link_duplex = ETH_LINK_FULL_DUPLEX;
618 link.link_autoneg = ETH_LINK_AUTONEG;
619
620 pfe_eth_atomic_write_link_status(dev, &link);
621
622 PFE_PMD_INFO("Port (%d) link is %s\n", dev->data->port_id,
623 link.link_status ? "up" : "down");
624
625 return 0;
626}
627
628static int
629pfe_promiscuous_enable(struct rte_eth_dev *dev)
630{
631 struct pfe_eth_priv_s *priv = dev->data->dev_private;
632
633 priv->promisc = 1;
634 dev->data->promiscuous = 1;
635 gemac_enable_copy_all(priv->EMAC_baseaddr);
636
637 return 0;
638}
639
640static int
641pfe_promiscuous_disable(struct rte_eth_dev *dev)
642{
643 struct pfe_eth_priv_s *priv = dev->data->dev_private;
644
645 priv->promisc = 0;
646 dev->data->promiscuous = 0;
647 gemac_disable_copy_all(priv->EMAC_baseaddr);
648
649 return 0;
650}
651
652static int
653pfe_allmulticast_enable(struct rte_eth_dev *dev)
654{
655 struct pfe_eth_priv_s *priv = dev->data->dev_private;
656 struct pfe_mac_addr hash_addr;
657
658
659 hash_addr.bottom = 0xFFFFFFFF;
660 hash_addr.top = 0xFFFFFFFF;
661 gemac_set_hash(priv->EMAC_baseaddr, &hash_addr);
662 dev->data->all_multicast = 1;
663
664 return 0;
665}
666
667static int
668pfe_link_down(struct rte_eth_dev *dev)
669{
670 return pfe_eth_stop(dev);
671}
672
673static int
674pfe_link_up(struct rte_eth_dev *dev)
675{
676 struct pfe_eth_priv_s *priv = dev->data->dev_private;
677
678 pfe_eth_start(priv);
679 return 0;
680}
681
682static int
683pfe_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
684{
685 int ret;
686 struct pfe_eth_priv_s *priv = dev->data->dev_private;
687 uint16_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
688
689
690 ret = gemac_set_rx(priv->EMAC_baseaddr, frame_size);
691 if (!ret)
692 dev->data->mtu = mtu;
693
694 return ret;
695}
696
697
698
699static int
700pfe_eth_enet_addr_byte_mac(u8 *enet_byte_addr,
701 struct pfe_mac_addr *enet_addr)
702{
703 if (!enet_byte_addr || !enet_addr) {
704 return -1;
705
706 } else {
707 enet_addr->bottom = enet_byte_addr[0] |
708 (enet_byte_addr[1] << 8) |
709 (enet_byte_addr[2] << 16) |
710 (enet_byte_addr[3] << 24);
711 enet_addr->top = enet_byte_addr[4] |
712 (enet_byte_addr[5] << 8);
713 return 0;
714 }
715}
716
717static int
718pfe_dev_set_mac_addr(struct rte_eth_dev *dev,
719 struct rte_ether_addr *addr)
720{
721 struct pfe_eth_priv_s *priv = dev->data->dev_private;
722 struct pfe_mac_addr spec_addr;
723 int ret;
724
725 ret = pfe_eth_enet_addr_byte_mac(addr->addr_bytes, &spec_addr);
726 if (ret)
727 return ret;
728
729 gemac_set_laddrN(priv->EMAC_baseaddr,
730 (struct pfe_mac_addr *)&spec_addr, 1);
731 rte_ether_addr_copy(addr, &dev->data->mac_addrs[0]);
732 return 0;
733}
734
735static int
736pfe_stats_get(struct rte_eth_dev *dev,
737 struct rte_eth_stats *stats)
738{
739 struct pfe_eth_priv_s *priv = dev->data->dev_private;
740 struct rte_eth_stats *eth_stats = &priv->stats;
741
742 if (stats == NULL)
743 return -1;
744
745 memset(stats, 0, sizeof(struct rte_eth_stats));
746
747 stats->ipackets = eth_stats->ipackets;
748 stats->ibytes = eth_stats->ibytes;
749 stats->opackets = eth_stats->opackets;
750 stats->obytes = eth_stats->obytes;
751
752 return 0;
753}
754
755static const struct eth_dev_ops ops = {
756 .dev_start = pfe_eth_open,
757 .dev_stop = pfe_eth_stop,
758 .dev_close = pfe_eth_close,
759 .dev_configure = pfe_eth_configure,
760 .dev_infos_get = pfe_eth_info,
761 .rx_queue_setup = pfe_rx_queue_setup,
762 .rx_queue_release = pfe_rx_queue_release,
763 .tx_queue_setup = pfe_tx_queue_setup,
764 .tx_queue_release = pfe_tx_queue_release,
765 .dev_supported_ptypes_get = pfe_supported_ptypes_get,
766 .link_update = pfe_eth_link_update,
767 .promiscuous_enable = pfe_promiscuous_enable,
768 .promiscuous_disable = pfe_promiscuous_disable,
769 .allmulticast_enable = pfe_allmulticast_enable,
770 .dev_set_link_down = pfe_link_down,
771 .dev_set_link_up = pfe_link_up,
772 .mtu_set = pfe_mtu_set,
773 .mac_addr_set = pfe_dev_set_mac_addr,
774 .stats_get = pfe_stats_get,
775};
776
777static int
778pfe_eth_init(struct rte_vdev_device *vdev, struct pfe *pfe, int id)
779{
780 struct rte_eth_dev *eth_dev = NULL;
781 struct pfe_eth_priv_s *priv = NULL;
782 struct ls1012a_eth_platform_data *einfo;
783 struct ls1012a_pfe_platform_data *pfe_info;
784 struct rte_ether_addr addr;
785 int err;
786
787 eth_dev = rte_eth_vdev_allocate(vdev, sizeof(*priv));
788 if (eth_dev == NULL)
789 return -ENOMEM;
790
791
792 pfe_info = (struct ls1012a_pfe_platform_data *)&pfe->platform_data;
793 if (!pfe_info) {
794 PFE_PMD_ERR("pfe missing additional platform data");
795 err = -ENODEV;
796 goto err0;
797 }
798
799 einfo = (struct ls1012a_eth_platform_data *)pfe_info->ls1012a_eth_pdata;
800
801
802 if (!einfo) {
803 PFE_PMD_ERR("pfe missing additional gemacs platform data");
804 err = -ENODEV;
805 goto err0;
806 }
807
808 priv = eth_dev->data->dev_private;
809 priv->ndev = eth_dev;
810 priv->id = einfo[id].gem_id;
811 priv->pfe = pfe;
812
813 pfe->eth.eth_priv[id] = priv;
814
815
816 priv->einfo = &einfo[id];
817 priv->EMAC_baseaddr = cbus_emac_base[id];
818 priv->PHY_baseaddr = cbus_emac_base[id];
819 priv->GPI_baseaddr = cbus_gpi_base[id];
820
821#define HIF_GEMAC_TMUQ_BASE 6
822 priv->low_tmu_q = HIF_GEMAC_TMUQ_BASE + (id * 2);
823 priv->high_tmu_q = priv->low_tmu_q + 1;
824
825 rte_spinlock_init(&priv->lock);
826
827
828 eth_dev->data->mac_addrs = rte_zmalloc("mac_addr",
829 ETHER_ADDR_LEN * PFE_MAX_MACS, 0);
830 if (eth_dev->data->mac_addrs == NULL) {
831 PFE_PMD_ERR("Failed to allocate mem %d to store MAC addresses",
832 ETHER_ADDR_LEN * PFE_MAX_MACS);
833 err = -ENOMEM;
834 goto err0;
835 }
836
837 memcpy(addr.addr_bytes, priv->einfo->mac_addr,
838 ETH_ALEN);
839
840 pfe_dev_set_mac_addr(eth_dev, &addr);
841 rte_ether_addr_copy(&addr, ð_dev->data->mac_addrs[0]);
842
843 eth_dev->data->mtu = 1500;
844 eth_dev->dev_ops = &ops;
845 err = pfe_eth_stop(eth_dev);
846 if (err != 0)
847 goto err0;
848 pfe_gemac_init(priv);
849
850 eth_dev->data->nb_rx_queues = 1;
851 eth_dev->data->nb_tx_queues = 1;
852
853 eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
854
855
856
857
858
859 pfe_eth_open_cdev(priv);
860 rte_eth_dev_probing_finish(eth_dev);
861
862 return 0;
863err0:
864 rte_eth_dev_release_port(eth_dev);
865 return err;
866}
867
868static int
869pfe_get_gemac_if_proprties(struct pfe *pfe,
870 __rte_unused const struct device_node *parent,
871 unsigned int port, unsigned int if_cnt,
872 struct ls1012a_pfe_platform_data *pdata)
873{
874 const struct device_node *gem = NULL;
875 size_t size;
876 unsigned int ii = 0, phy_id = 0;
877 const u32 *addr;
878 const void *mac_addr;
879
880 for (ii = 0; ii < if_cnt; ii++) {
881 gem = of_get_next_child(parent, gem);
882 if (!gem)
883 goto err;
884 addr = of_get_property(gem, "reg", &size);
885 if (addr && (rte_be_to_cpu_32((unsigned int)*addr) == port))
886 break;
887 }
888
889 if (ii >= if_cnt) {
890 PFE_PMD_ERR("Failed to find interface = %d", if_cnt);
891 goto err;
892 }
893
894 pdata->ls1012a_eth_pdata[port].gem_id = port;
895
896 mac_addr = of_get_mac_address(gem);
897
898 if (mac_addr) {
899 memcpy(pdata->ls1012a_eth_pdata[port].mac_addr, mac_addr,
900 ETH_ALEN);
901 }
902
903 addr = of_get_property(gem, "fsl,mdio-mux-val", &size);
904 if (!addr) {
905 PFE_PMD_ERR("Invalid mdio-mux-val....");
906 } else {
907 phy_id = rte_be_to_cpu_32((unsigned int)*addr);
908 pdata->ls1012a_eth_pdata[port].mdio_muxval = phy_id;
909 }
910 if (pdata->ls1012a_eth_pdata[port].phy_id < 32)
911 pfe->mdio_muxval[pdata->ls1012a_eth_pdata[port].phy_id] =
912 pdata->ls1012a_eth_pdata[port].mdio_muxval;
913
914 return 0;
915
916err:
917 return -1;
918}
919
920
921static int
922parse_integer_arg(const char *key __rte_unused,
923 const char *value, void *extra_args)
924{
925 int i;
926 char *end;
927 errno = 0;
928
929 i = strtol(value, &end, 10);
930 if (*end != 0 || errno != 0 || i < 0 || i > 1) {
931 PFE_PMD_ERR("Supported Port IDS are 0 and 1");
932 return -EINVAL;
933 }
934
935 *((uint32_t *)extra_args) = i;
936
937 return 0;
938}
939
940static int
941pfe_parse_vdev_init_params(struct pfe_vdev_init_params *params,
942 struct rte_vdev_device *dev)
943{
944 struct rte_kvargs *kvlist = NULL;
945 int ret = 0;
946
947 static const char * const pfe_vdev_valid_params[] = {
948 PFE_VDEV_GEM_ID_ARG,
949 NULL
950 };
951
952 const char *input_args = rte_vdev_device_args(dev);
953
954 if (!input_args)
955 return -1;
956
957 kvlist = rte_kvargs_parse(input_args, pfe_vdev_valid_params);
958 if (kvlist == NULL)
959 return -1;
960
961 ret = rte_kvargs_process(kvlist,
962 PFE_VDEV_GEM_ID_ARG,
963 &parse_integer_arg,
964 ¶ms->gem_id);
965 rte_kvargs_free(kvlist);
966 return ret;
967}
968
969static int
970pmd_pfe_probe(struct rte_vdev_device *vdev)
971{
972 const u32 *prop;
973 const struct device_node *np;
974 const char *name;
975 const uint32_t *addr;
976 uint64_t cbus_addr, ddr_size, cbus_size;
977 int rc = -1, fd = -1, gem_id;
978 unsigned int ii, interface_count = 0;
979 size_t size = 0;
980 struct pfe_vdev_init_params init_params = {
981 .gem_id = -1
982 };
983
984 name = rte_vdev_device_name(vdev);
985 rc = pfe_parse_vdev_init_params(&init_params, vdev);
986 if (rc < 0)
987 return -EINVAL;
988
989 PFE_PMD_LOG(INFO, "Initializing pmd_pfe for %s Given gem-id %d",
990 name, init_params.gem_id);
991
992 if (g_pfe) {
993 if (g_pfe->nb_devs >= g_pfe->max_intf) {
994 PFE_PMD_ERR("PFE %d dev already created Max is %d",
995 g_pfe->nb_devs, g_pfe->max_intf);
996 return -EINVAL;
997 }
998 goto eth_init;
999 }
1000
1001 g_pfe = rte_zmalloc(NULL, sizeof(*g_pfe), RTE_CACHE_LINE_SIZE);
1002 if (g_pfe == NULL)
1003 return -EINVAL;
1004
1005
1006 rc = of_init();
1007 if (rc) {
1008 PFE_PMD_ERR("of_init failed with ret: %d", rc);
1009 goto err;
1010 }
1011
1012 np = of_find_compatible_node(NULL, NULL, "fsl,pfe");
1013 if (!np) {
1014 PFE_PMD_ERR("Invalid device node");
1015 rc = -EINVAL;
1016 goto err;
1017 }
1018
1019 addr = of_get_address(np, 0, &cbus_size, NULL);
1020 if (!addr) {
1021 PFE_PMD_ERR("of_get_address cannot return qman address\n");
1022 goto err;
1023 }
1024 cbus_addr = of_translate_address(np, addr);
1025 if (!cbus_addr) {
1026 PFE_PMD_ERR("of_translate_address failed\n");
1027 goto err;
1028 }
1029
1030 addr = of_get_address(np, 1, &ddr_size, NULL);
1031 if (!addr) {
1032 PFE_PMD_ERR("of_get_address cannot return qman address\n");
1033 goto err;
1034 }
1035
1036 g_pfe->ddr_phys_baseaddr = of_translate_address(np, addr);
1037 if (!g_pfe->ddr_phys_baseaddr) {
1038 PFE_PMD_ERR("of_translate_address failed\n");
1039 goto err;
1040 }
1041
1042 g_pfe->ddr_baseaddr = pfe_mem_ptov(g_pfe->ddr_phys_baseaddr);
1043 g_pfe->ddr_size = ddr_size;
1044 g_pfe->cbus_size = cbus_size;
1045
1046 fd = open("/dev/mem", O_RDWR);
1047 g_pfe->cbus_baseaddr = mmap(NULL, cbus_size, PROT_READ | PROT_WRITE,
1048 MAP_SHARED, fd, cbus_addr);
1049 close(fd);
1050 if (g_pfe->cbus_baseaddr == MAP_FAILED) {
1051 PFE_PMD_ERR("Can not map cbus base");
1052 rc = -EINVAL;
1053 goto err;
1054 }
1055
1056
1057 prop = of_get_property(np, "fsl,pfe-num-interfaces", &size);
1058 if (!prop) {
1059 PFE_PMD_ERR("Failed to read number of interfaces");
1060 rc = -ENXIO;
1061 goto err_prop;
1062 }
1063
1064 interface_count = rte_be_to_cpu_32((unsigned int)*prop);
1065 if (interface_count <= 0) {
1066 PFE_PMD_ERR("No ethernet interface count : %d",
1067 interface_count);
1068 rc = -ENXIO;
1069 goto err_prop;
1070 }
1071 PFE_PMD_INFO("num interfaces = %d ", interface_count);
1072
1073 g_pfe->max_intf = interface_count;
1074 g_pfe->platform_data.ls1012a_mdio_pdata[0].phy_mask = 0xffffffff;
1075
1076 for (ii = 0; ii < interface_count; ii++) {
1077 pfe_get_gemac_if_proprties(g_pfe, np, ii, interface_count,
1078 &g_pfe->platform_data);
1079 }
1080
1081 pfe_lib_init(g_pfe->cbus_baseaddr, g_pfe->ddr_baseaddr,
1082 g_pfe->ddr_phys_baseaddr, g_pfe->ddr_size);
1083
1084 PFE_PMD_INFO("CLASS version: %x", readl(CLASS_VERSION));
1085 PFE_PMD_INFO("TMU version: %x", readl(TMU_VERSION));
1086
1087 PFE_PMD_INFO("BMU1 version: %x", readl(BMU1_BASE_ADDR + BMU_VERSION));
1088 PFE_PMD_INFO("BMU2 version: %x", readl(BMU2_BASE_ADDR + BMU_VERSION));
1089
1090 PFE_PMD_INFO("EGPI1 version: %x", readl(EGPI1_BASE_ADDR + GPI_VERSION));
1091 PFE_PMD_INFO("EGPI2 version: %x", readl(EGPI2_BASE_ADDR + GPI_VERSION));
1092 PFE_PMD_INFO("HGPI version: %x", readl(HGPI_BASE_ADDR + GPI_VERSION));
1093
1094 PFE_PMD_INFO("HIF version: %x", readl(HIF_VERSION));
1095 PFE_PMD_INFO("HIF NOPCY version: %x", readl(HIF_NOCPY_VERSION));
1096
1097 cbus_emac_base[0] = EMAC1_BASE_ADDR;
1098 cbus_emac_base[1] = EMAC2_BASE_ADDR;
1099
1100 cbus_gpi_base[0] = EGPI1_BASE_ADDR;
1101 cbus_gpi_base[1] = EGPI2_BASE_ADDR;
1102
1103 rc = pfe_hif_lib_init(g_pfe);
1104 if (rc < 0)
1105 goto err_hif_lib;
1106
1107 rc = pfe_hif_init(g_pfe);
1108 if (rc < 0)
1109 goto err_hif;
1110 pfe_soc_version_get();
1111eth_init:
1112 if (init_params.gem_id < 0)
1113 gem_id = g_pfe->nb_devs;
1114 else
1115 gem_id = init_params.gem_id;
1116
1117 PFE_PMD_LOG(INFO, "Init pmd_pfe for %s gem-id %d(given =%d)",
1118 name, gem_id, init_params.gem_id);
1119
1120 rc = pfe_eth_init(vdev, g_pfe, gem_id);
1121 if (rc < 0)
1122 goto err_eth;
1123 else
1124 g_pfe->nb_devs++;
1125
1126 return 0;
1127
1128err_eth:
1129 pfe_hif_exit(g_pfe);
1130
1131err_hif:
1132 pfe_hif_lib_exit(g_pfe);
1133
1134err_hif_lib:
1135err_prop:
1136 munmap(g_pfe->cbus_baseaddr, cbus_size);
1137err:
1138 rte_free(g_pfe);
1139 return rc;
1140}
1141
1142static int
1143pmd_pfe_remove(struct rte_vdev_device *vdev)
1144{
1145 const char *name;
1146 struct rte_eth_dev *eth_dev = NULL;
1147 int ret = 0;
1148
1149 name = rte_vdev_device_name(vdev);
1150 if (name == NULL)
1151 return -EINVAL;
1152
1153 PFE_PMD_INFO("Closing eventdev sw device %s", name);
1154
1155 if (!g_pfe)
1156 return 0;
1157
1158 eth_dev = rte_eth_dev_allocated(name);
1159 if (eth_dev) {
1160 pfe_eth_close(eth_dev);
1161 ret = rte_eth_dev_release_port(eth_dev);
1162 }
1163
1164 return ret;
1165}
1166
1167static
1168struct rte_vdev_driver pmd_pfe_drv = {
1169 .probe = pmd_pfe_probe,
1170 .remove = pmd_pfe_remove,
1171};
1172
1173RTE_PMD_REGISTER_VDEV(PFE_NAME_PMD, pmd_pfe_drv);
1174RTE_PMD_REGISTER_PARAM_STRING(PFE_NAME_PMD, PFE_VDEV_GEM_ID_ARG "=<int> ");
1175RTE_LOG_REGISTER_DEFAULT(pfe_logtype_pmd, NOTICE);
1176