1
2
3
4
5
6
7#include <nfb/nfb.h>
8#include <nfb/ndp.h>
9#include <netcope/rxmac.h>
10#include <netcope/txmac.h>
11
12#include <ethdev_pci.h>
13#include <rte_kvargs.h>
14
15#include "nfb_stats.h"
16#include "nfb_rx.h"
17#include "nfb_tx.h"
18#include "nfb_rxmode.h"
19#include "nfb.h"
20
21
22
23
24static const struct rte_ether_addr eth_addr = {
25 .addr_bytes = { 0x00, 0x11, 0x17, 0x00, 0x00, 0x00 }
26};
27
28
29
30
31
32
33
34
35
36
37
38static void
39nfb_nc_rxmac_init(struct nfb_device *nfb,
40 struct nc_rxmac *rxmac[RTE_MAX_NC_RXMAC],
41 uint16_t *max_rxmac)
42{
43 *max_rxmac = 0;
44 while ((rxmac[*max_rxmac] = nc_rxmac_open_index(nfb, *max_rxmac)))
45 ++(*max_rxmac);
46}
47
48
49
50
51
52
53
54
55
56
57
58static void
59nfb_nc_txmac_init(struct nfb_device *nfb,
60 struct nc_txmac *txmac[RTE_MAX_NC_TXMAC],
61 uint16_t *max_txmac)
62{
63 *max_txmac = 0;
64 while ((txmac[*max_txmac] = nc_txmac_open_index(nfb, *max_txmac)))
65 ++(*max_txmac);
66}
67
68
69
70
71
72
73
74
75
76static void
77nfb_nc_rxmac_deinit(struct nc_rxmac *rxmac[RTE_MAX_NC_RXMAC],
78 uint16_t max_rxmac)
79{
80 for (; max_rxmac > 0; --max_rxmac) {
81 nc_rxmac_close(rxmac[max_rxmac]);
82 rxmac[max_rxmac] = NULL;
83 }
84}
85
86
87
88
89
90
91
92
93
94static void
95nfb_nc_txmac_deinit(struct nc_txmac *txmac[RTE_MAX_NC_TXMAC],
96 uint16_t max_txmac)
97{
98 for (; max_txmac > 0; --max_txmac) {
99 nc_txmac_close(txmac[max_txmac]);
100 txmac[max_txmac] = NULL;
101 }
102}
103
104
105
106
107
108
109
110
111
112
113
114
115static int
116nfb_eth_dev_start(struct rte_eth_dev *dev)
117{
118 int ret;
119 uint16_t i;
120 uint16_t nb_rx = dev->data->nb_rx_queues;
121 uint16_t nb_tx = dev->data->nb_tx_queues;
122
123 for (i = 0; i < nb_rx; i++) {
124 ret = nfb_eth_rx_queue_start(dev, i);
125 if (ret != 0)
126 goto err_rx;
127 }
128
129 for (i = 0; i < nb_tx; i++) {
130 ret = nfb_eth_tx_queue_start(dev, i);
131 if (ret != 0)
132 goto err_tx;
133 }
134
135 return 0;
136
137err_tx:
138 for (i = 0; i < nb_tx; i++)
139 nfb_eth_tx_queue_stop(dev, i);
140err_rx:
141 for (i = 0; i < nb_rx; i++)
142 nfb_eth_rx_queue_stop(dev, i);
143 return ret;
144}
145
146
147
148
149
150
151
152
153
154static int
155nfb_eth_dev_stop(struct rte_eth_dev *dev)
156{
157 uint16_t i;
158 uint16_t nb_rx = dev->data->nb_rx_queues;
159 uint16_t nb_tx = dev->data->nb_tx_queues;
160
161 dev->data->dev_started = 0;
162
163 for (i = 0; i < nb_tx; i++)
164 nfb_eth_tx_queue_stop(dev, i);
165
166 for (i = 0; i < nb_rx; i++)
167 nfb_eth_rx_queue_stop(dev, i);
168
169 return 0;
170}
171
172
173
174
175
176
177
178
179
180
181static int
182nfb_eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
183{
184 return 0;
185}
186
187
188
189
190
191
192
193
194
195static int
196nfb_eth_dev_info(struct rte_eth_dev *dev,
197 struct rte_eth_dev_info *dev_info)
198{
199 dev_info->max_mac_addrs = 1;
200 dev_info->max_rx_pktlen = (uint32_t)-1;
201 dev_info->max_rx_queues = dev->data->nb_rx_queues;
202 dev_info->max_tx_queues = dev->data->nb_tx_queues;
203 dev_info->speed_capa = RTE_ETH_LINK_SPEED_100G;
204
205 return 0;
206}
207
208
209
210
211
212
213
214
215
216static int
217nfb_eth_dev_close(struct rte_eth_dev *dev)
218{
219 struct pmd_internals *internals = dev->data->dev_private;
220 uint16_t i;
221 uint16_t nb_rx = dev->data->nb_rx_queues;
222 uint16_t nb_tx = dev->data->nb_tx_queues;
223 int ret;
224
225 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
226 return 0;
227
228 ret = nfb_eth_dev_stop(dev);
229
230 nfb_nc_rxmac_deinit(internals->rxmac, internals->max_rxmac);
231 nfb_nc_txmac_deinit(internals->txmac, internals->max_txmac);
232
233 for (i = 0; i < nb_rx; i++) {
234 nfb_eth_rx_queue_release(dev, i);
235 dev->data->rx_queues[i] = NULL;
236 }
237 dev->data->nb_rx_queues = 0;
238 for (i = 0; i < nb_tx; i++) {
239 nfb_eth_tx_queue_release(dev, i);
240 dev->data->tx_queues[i] = NULL;
241 }
242 dev->data->nb_tx_queues = 0;
243
244 return ret;
245}
246
247
248
249
250
251
252
253
254
255
256
257
258static int
259nfb_eth_link_update(struct rte_eth_dev *dev,
260 int wait_to_complete __rte_unused)
261{
262 uint16_t i;
263 struct nc_rxmac_status status;
264 struct rte_eth_link link;
265 memset(&link, 0, sizeof(link));
266
267 struct pmd_internals *internals = dev->data->dev_private;
268
269 status.speed = MAC_SPEED_UNKNOWN;
270
271 link.link_speed = RTE_ETH_SPEED_NUM_NONE;
272 link.link_status = RTE_ETH_LINK_DOWN;
273 link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
274 link.link_autoneg = RTE_ETH_LINK_SPEED_FIXED;
275
276 if (internals->rxmac[0] != NULL) {
277 nc_rxmac_read_status(internals->rxmac[0], &status);
278
279 switch (status.speed) {
280 case MAC_SPEED_10G:
281 link.link_speed = RTE_ETH_SPEED_NUM_10G;
282 break;
283 case MAC_SPEED_40G:
284 link.link_speed = RTE_ETH_SPEED_NUM_40G;
285 break;
286 case MAC_SPEED_100G:
287 link.link_speed = RTE_ETH_SPEED_NUM_100G;
288 break;
289 default:
290 link.link_speed = RTE_ETH_SPEED_NUM_NONE;
291 break;
292 }
293 }
294
295 for (i = 0; i < internals->max_rxmac; ++i) {
296 nc_rxmac_read_status(internals->rxmac[i], &status);
297
298 if (status.enabled && status.link_up) {
299 link.link_status = RTE_ETH_LINK_UP;
300 break;
301 }
302 }
303
304 rte_eth_linkstatus_set(dev, &link);
305
306 return 0;
307}
308
309
310
311
312
313
314
315
316
317
318static int
319nfb_eth_dev_set_link_up(struct rte_eth_dev *dev)
320{
321 struct pmd_internals *internals = (struct pmd_internals *)
322 dev->data->dev_private;
323
324 uint16_t i;
325 for (i = 0; i < internals->max_rxmac; ++i)
326 nc_rxmac_enable(internals->rxmac[i]);
327
328 for (i = 0; i < internals->max_txmac; ++i)
329 nc_txmac_enable(internals->txmac[i]);
330
331 return 0;
332}
333
334
335
336
337
338
339
340
341
342
343static int
344nfb_eth_dev_set_link_down(struct rte_eth_dev *dev)
345{
346 struct pmd_internals *internals = (struct pmd_internals *)
347 dev->data->dev_private;
348
349 uint16_t i;
350 for (i = 0; i < internals->max_rxmac; ++i)
351 nc_rxmac_disable(internals->rxmac[i]);
352
353 for (i = 0; i < internals->max_txmac; ++i)
354 nc_txmac_disable(internals->txmac[i]);
355
356 return 0;
357}
358
359
360
361
362
363
364
365
366
367
368
369
370static int
371nfb_eth_mac_addr_set(struct rte_eth_dev *dev,
372 struct rte_ether_addr *mac_addr)
373{
374 unsigned int i;
375 uint64_t mac = 0;
376 struct rte_eth_dev_data *data = dev->data;
377 struct pmd_internals *internals = (struct pmd_internals *)
378 data->dev_private;
379
380 if (!rte_is_valid_assigned_ether_addr(mac_addr))
381 return -EINVAL;
382
383 for (i = 0; i < RTE_ETHER_ADDR_LEN; i++) {
384 mac <<= 8;
385 mac |= mac_addr->addr_bytes[i] & 0xFF;
386 }
387
388 for (i = 0; i < internals->max_rxmac; ++i)
389 nc_rxmac_set_mac(internals->rxmac[i], 0, mac, 1);
390
391 rte_ether_addr_copy(mac_addr, data->mac_addrs);
392 return 0;
393}
394
395static const struct eth_dev_ops ops = {
396 .dev_start = nfb_eth_dev_start,
397 .dev_stop = nfb_eth_dev_stop,
398 .dev_set_link_up = nfb_eth_dev_set_link_up,
399 .dev_set_link_down = nfb_eth_dev_set_link_down,
400 .dev_close = nfb_eth_dev_close,
401 .dev_configure = nfb_eth_dev_configure,
402 .dev_infos_get = nfb_eth_dev_info,
403 .promiscuous_enable = nfb_eth_promiscuous_enable,
404 .promiscuous_disable = nfb_eth_promiscuous_disable,
405 .allmulticast_enable = nfb_eth_allmulticast_enable,
406 .allmulticast_disable = nfb_eth_allmulticast_disable,
407 .rx_queue_start = nfb_eth_rx_queue_start,
408 .rx_queue_stop = nfb_eth_rx_queue_stop,
409 .tx_queue_start = nfb_eth_tx_queue_start,
410 .tx_queue_stop = nfb_eth_tx_queue_stop,
411 .rx_queue_setup = nfb_eth_rx_queue_setup,
412 .tx_queue_setup = nfb_eth_tx_queue_setup,
413 .rx_queue_release = nfb_eth_rx_queue_release,
414 .tx_queue_release = nfb_eth_tx_queue_release,
415 .link_update = nfb_eth_link_update,
416 .stats_get = nfb_eth_stats_get,
417 .stats_reset = nfb_eth_stats_reset,
418 .mac_addr_set = nfb_eth_mac_addr_set,
419};
420
421
422
423
424
425
426
427
428
429
430static int
431nfb_eth_dev_init(struct rte_eth_dev *dev)
432{
433 struct rte_eth_dev_data *data = dev->data;
434 struct pmd_internals *internals = (struct pmd_internals *)
435 data->dev_private;
436 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
437 struct rte_pci_addr *pci_addr = &pci_dev->addr;
438 struct rte_ether_addr eth_addr_init;
439 struct rte_kvargs *kvlist;
440
441 RTE_LOG(INFO, PMD, "Initializing NFB device (" PCI_PRI_FMT ")\n",
442 pci_addr->domain, pci_addr->bus, pci_addr->devid,
443 pci_addr->function);
444
445 snprintf(internals->nfb_dev, PATH_MAX,
446 "/dev/nfb/by-pci-slot/" PCI_PRI_FMT,
447 pci_addr->domain, pci_addr->bus, pci_addr->devid,
448 pci_addr->function);
449
450
451 if (dev->device->devargs != NULL &&
452 dev->device->devargs->args != NULL &&
453 strlen(dev->device->devargs->args) > 0) {
454 kvlist = rte_kvargs_parse(dev->device->devargs->args,
455 VALID_KEYS);
456 if (kvlist == NULL) {
457 RTE_LOG(ERR, PMD, "Failed to parse device arguments %s",
458 dev->device->devargs->args);
459 rte_kvargs_free(kvlist);
460 return -EINVAL;
461 }
462 rte_kvargs_free(kvlist);
463 }
464
465
466
467
468
469
470 internals->nfb = nfb_open(internals->nfb_dev);
471 if (internals->nfb == NULL) {
472 RTE_LOG(ERR, PMD, "nfb_open(): failed to open %s",
473 internals->nfb_dev);
474 return -EINVAL;
475 }
476 data->nb_rx_queues = ndp_get_rx_queue_available_count(internals->nfb);
477 data->nb_tx_queues = ndp_get_tx_queue_available_count(internals->nfb);
478
479 RTE_LOG(INFO, PMD, "Available NDP queues RX: %u TX: %u\n",
480 data->nb_rx_queues, data->nb_tx_queues);
481
482 nfb_nc_rxmac_init(internals->nfb,
483 internals->rxmac,
484 &internals->max_rxmac);
485 nfb_nc_txmac_init(internals->nfb,
486 internals->txmac,
487 &internals->max_txmac);
488
489
490 dev->rx_pkt_burst = nfb_eth_ndp_rx;
491 dev->tx_pkt_burst = nfb_eth_ndp_tx;
492
493
494 dev->dev_ops = &ops;
495
496
497 nfb_eth_link_update(dev, 0);
498
499
500 data->mac_addrs = rte_zmalloc(data->name, sizeof(struct rte_ether_addr),
501 RTE_CACHE_LINE_SIZE);
502 if (data->mac_addrs == NULL) {
503 RTE_LOG(ERR, PMD, "Could not alloc space for MAC address!\n");
504 nfb_close(internals->nfb);
505 return -EINVAL;
506 }
507
508 rte_eth_random_addr(eth_addr_init.addr_bytes);
509 eth_addr_init.addr_bytes[0] = eth_addr.addr_bytes[0];
510 eth_addr_init.addr_bytes[1] = eth_addr.addr_bytes[1];
511 eth_addr_init.addr_bytes[2] = eth_addr.addr_bytes[2];
512
513 nfb_eth_mac_addr_set(dev, ð_addr_init);
514
515 data->promiscuous = nfb_eth_promiscuous_get(dev);
516 data->all_multicast = nfb_eth_allmulticast_get(dev);
517 internals->rx_filter_original = data->promiscuous;
518
519 dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
520
521 RTE_LOG(INFO, PMD, "NFB device ("
522 PCI_PRI_FMT ") successfully initialized\n",
523 pci_addr->domain, pci_addr->bus, pci_addr->devid,
524 pci_addr->function);
525
526 return 0;
527}
528
529
530
531
532
533
534
535
536
537
538static int
539nfb_eth_dev_uninit(struct rte_eth_dev *dev)
540{
541 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
542 struct rte_pci_addr *pci_addr = &pci_dev->addr;
543
544 nfb_eth_dev_close(dev);
545
546 RTE_LOG(INFO, PMD, "NFB device ("
547 PCI_PRI_FMT ") successfully uninitialized\n",
548 pci_addr->domain, pci_addr->bus, pci_addr->devid,
549 pci_addr->function);
550
551 return 0;
552}
553
554static const struct rte_pci_id nfb_pci_id_table[] = {
555 { RTE_PCI_DEVICE(PCI_VENDOR_ID_NETCOPE, PCI_DEVICE_ID_NFB_40G2) },
556 { RTE_PCI_DEVICE(PCI_VENDOR_ID_NETCOPE, PCI_DEVICE_ID_NFB_100G2) },
557 { RTE_PCI_DEVICE(PCI_VENDOR_ID_NETCOPE, PCI_DEVICE_ID_NFB_200G2QL) },
558 { RTE_PCI_DEVICE(PCI_VENDOR_ID_SILICOM, PCI_DEVICE_ID_FB2CGG3) },
559 { RTE_PCI_DEVICE(PCI_VENDOR_ID_SILICOM, PCI_DEVICE_ID_FB2CGG3D) },
560 { .vendor_id = 0, }
561};
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576static int
577nfb_eth_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
578 struct rte_pci_device *pci_dev)
579{
580 return rte_eth_dev_pci_generic_probe(pci_dev,
581 sizeof(struct pmd_internals), nfb_eth_dev_init);
582}
583
584
585
586
587
588
589
590
591
592
593
594
595static int
596nfb_eth_pci_remove(struct rte_pci_device *pci_dev)
597{
598 return rte_eth_dev_pci_generic_remove(pci_dev, nfb_eth_dev_uninit);
599}
600
601static struct rte_pci_driver nfb_eth_driver = {
602 .id_table = nfb_pci_id_table,
603 .probe = nfb_eth_pci_probe,
604 .remove = nfb_eth_pci_remove,
605};
606
607RTE_PMD_REGISTER_PCI(RTE_NFB_DRIVER_NAME, nfb_eth_driver);
608RTE_PMD_REGISTER_PCI_TABLE(RTE_NFB_DRIVER_NAME, nfb_pci_id_table);
609RTE_PMD_REGISTER_KMOD_DEP(RTE_NFB_DRIVER_NAME, "* nfb");
610RTE_PMD_REGISTER_PARAM_STRING(RTE_NFB_DRIVER_NAME, TIMESTAMP_ARG "=<0|1>");
611