1
2
3
4
5
6
7
8#include "bnx2x.h"
9#include "bnx2x_rxtx.h"
10
11#include <rte_string_fns.h>
12#include <rte_dev.h>
13#include <ethdev_pci.h>
14#include <rte_alarm.h>
15
16
17
18
19#define BROADCOM_PCI_VENDOR_ID 0x14E4
20#define QLOGIC_PCI_VENDOR_ID 0x1077
21static const struct rte_pci_id pci_id_bnx2x_map[] = {
22 { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57800) },
23 { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57711) },
24 { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57810) },
25 { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57811) },
26 { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57840_OBS) },
27 { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57840_4_10) },
28 { RTE_PCI_DEVICE(QLOGIC_PCI_VENDOR_ID, CHIP_NUM_57840_4_10) },
29 { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57840_2_20) },
30#ifdef RTE_LIBRTE_BNX2X_MF_SUPPORT
31 { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57810_MF) },
32 { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57811_MF) },
33 { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57840_MF) },
34 { RTE_PCI_DEVICE(QLOGIC_PCI_VENDOR_ID, CHIP_NUM_57840_MF) },
35#endif
36 { .vendor_id = 0, }
37};
38
39static const struct rte_pci_id pci_id_bnx2xvf_map[] = {
40 { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57800_VF) },
41 { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57810_VF) },
42 { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57811_VF) },
43 { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57840_VF) },
44 { RTE_PCI_DEVICE(QLOGIC_PCI_VENDOR_ID, CHIP_NUM_57840_VF) },
45 { .vendor_id = 0, }
46};
47
48struct rte_bnx2x_xstats_name_off {
49 char name[RTE_ETH_XSTATS_NAME_SIZE];
50 uint32_t offset_hi;
51 uint32_t offset_lo;
52};
53
54static const struct rte_bnx2x_xstats_name_off bnx2x_xstats_strings[] = {
55 {"rx_buffer_drops",
56 offsetof(struct bnx2x_eth_stats, brb_drop_hi),
57 offsetof(struct bnx2x_eth_stats, brb_drop_lo)},
58 {"rx_buffer_truncates",
59 offsetof(struct bnx2x_eth_stats, brb_truncate_hi),
60 offsetof(struct bnx2x_eth_stats, brb_truncate_lo)},
61 {"rx_buffer_truncate_discard",
62 offsetof(struct bnx2x_eth_stats, brb_truncate_discard),
63 offsetof(struct bnx2x_eth_stats, brb_truncate_discard)},
64 {"mac_filter_discard",
65 offsetof(struct bnx2x_eth_stats, mac_filter_discard),
66 offsetof(struct bnx2x_eth_stats, mac_filter_discard)},
67 {"no_match_vlan_tag_discard",
68 offsetof(struct bnx2x_eth_stats, mf_tag_discard),
69 offsetof(struct bnx2x_eth_stats, mf_tag_discard)},
70 {"tx_pause",
71 offsetof(struct bnx2x_eth_stats, pause_frames_sent_hi),
72 offsetof(struct bnx2x_eth_stats, pause_frames_sent_lo)},
73 {"rx_pause",
74 offsetof(struct bnx2x_eth_stats, pause_frames_received_hi),
75 offsetof(struct bnx2x_eth_stats, pause_frames_received_lo)},
76 {"tx_priority_flow_control",
77 offsetof(struct bnx2x_eth_stats, pfc_frames_sent_hi),
78 offsetof(struct bnx2x_eth_stats, pfc_frames_sent_lo)},
79 {"rx_priority_flow_control",
80 offsetof(struct bnx2x_eth_stats, pfc_frames_received_hi),
81 offsetof(struct bnx2x_eth_stats, pfc_frames_received_lo)}
82};
83
84static int
85bnx2x_link_update(struct rte_eth_dev *dev)
86{
87 struct bnx2x_softc *sc = dev->data->dev_private;
88 struct rte_eth_link link;
89
90 PMD_INIT_FUNC_TRACE(sc);
91
92 memset(&link, 0, sizeof(link));
93 mb();
94 link.link_speed = sc->link_vars.line_speed;
95 switch (sc->link_vars.duplex) {
96 case DUPLEX_FULL:
97 link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
98 break;
99 case DUPLEX_HALF:
100 link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
101 break;
102 }
103 link.link_autoneg = !(dev->data->dev_conf.link_speeds &
104 RTE_ETH_LINK_SPEED_FIXED);
105 link.link_status = sc->link_vars.link_up;
106
107 return rte_eth_linkstatus_set(dev, &link);
108}
109
110static void
111bnx2x_interrupt_action(struct rte_eth_dev *dev, int intr_cxt)
112{
113 struct bnx2x_softc *sc = dev->data->dev_private;
114 uint32_t link_status;
115
116 bnx2x_intr_legacy(sc);
117
118 if ((atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_GO) &&
119 !intr_cxt)
120 bnx2x_periodic_callout(sc);
121 link_status = REG_RD(sc, sc->link_params.shmem_base +
122 offsetof(struct shmem_region,
123 port_mb[sc->link_params.port].link_status));
124 if ((link_status & LINK_STATUS_LINK_UP) != dev->data->dev_link.link_status)
125 bnx2x_link_update(dev);
126}
127
128static void
129bnx2x_interrupt_handler(void *param)
130{
131 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
132 struct bnx2x_softc *sc = dev->data->dev_private;
133
134 PMD_DEBUG_PERIODIC_LOG(INFO, sc, "Interrupt handled");
135
136 bnx2x_interrupt_action(dev, 1);
137 rte_intr_ack(sc->pci_dev->intr_handle);
138}
139
140static void bnx2x_periodic_start(void *param)
141{
142 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
143 struct bnx2x_softc *sc = dev->data->dev_private;
144 int ret = 0;
145
146 atomic_store_rel_long(&sc->periodic_flags, PERIODIC_GO);
147 bnx2x_interrupt_action(dev, 0);
148 if (IS_PF(sc)) {
149 ret = rte_eal_alarm_set(BNX2X_SP_TIMER_PERIOD,
150 bnx2x_periodic_start, (void *)dev);
151 if (ret) {
152 PMD_DRV_LOG(ERR, sc, "Unable to start periodic"
153 " timer rc %d", ret);
154 }
155 }
156}
157
158void bnx2x_periodic_stop(void *param)
159{
160 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
161 struct bnx2x_softc *sc = dev->data->dev_private;
162
163 atomic_store_rel_long(&sc->periodic_flags, PERIODIC_STOP);
164
165 rte_eal_alarm_cancel(bnx2x_periodic_start, (void *)dev);
166
167 PMD_DRV_LOG(DEBUG, sc, "Periodic poll stopped");
168}
169
170
171
172
173
174static int
175bnx2x_dev_configure(struct rte_eth_dev *dev)
176{
177 struct bnx2x_softc *sc = dev->data->dev_private;
178
179 int mp_ncpus = sysconf(_SC_NPROCESSORS_CONF);
180
181 PMD_INIT_FUNC_TRACE(sc);
182
183 sc->mtu = dev->data->dev_conf.rxmode.mtu;
184
185 if (dev->data->nb_tx_queues > dev->data->nb_rx_queues) {
186 PMD_DRV_LOG(ERR, sc, "The number of TX queues is greater than number of RX queues");
187 return -EINVAL;
188 }
189
190 sc->num_queues = MAX(dev->data->nb_rx_queues, dev->data->nb_tx_queues);
191 if (sc->num_queues > mp_ncpus) {
192 PMD_DRV_LOG(ERR, sc, "The number of queues is more than number of CPUs");
193 return -EINVAL;
194 }
195
196 PMD_DRV_LOG(DEBUG, sc, "num_queues=%d, mtu=%d",
197 sc->num_queues, sc->mtu);
198
199
200 if (bnx2x_alloc_ilt_mem(sc) != 0) {
201 PMD_DRV_LOG(ERR, sc, "bnx2x_alloc_ilt_mem was failed");
202 return -ENXIO;
203 }
204
205 bnx2x_dev_rxtx_init_dummy(dev);
206 return 0;
207}
208
209static int
210bnx2x_dev_start(struct rte_eth_dev *dev)
211{
212 struct bnx2x_softc *sc = dev->data->dev_private;
213 int ret = 0;
214
215 PMD_INIT_FUNC_TRACE(sc);
216
217
218 if (IS_PF(sc)) {
219 if (atomic_load_acq_long(&sc->periodic_flags) ==
220 PERIODIC_STOP) {
221 bnx2x_periodic_start(dev);
222 PMD_DRV_LOG(DEBUG, sc, "Periodic poll re-started");
223 }
224 }
225
226 ret = bnx2x_init(sc);
227 if (ret) {
228 PMD_DRV_LOG(DEBUG, sc, "bnx2x_init failed (%d)", ret);
229 return -1;
230 }
231
232 if (IS_PF(sc)) {
233 rte_intr_callback_register(sc->pci_dev->intr_handle,
234 bnx2x_interrupt_handler, (void *)dev);
235
236 if (rte_intr_enable(sc->pci_dev->intr_handle))
237 PMD_DRV_LOG(ERR, sc, "rte_intr_enable failed");
238 }
239
240
241 if (IS_VF(sc))
242 bnx2x_vfpf_set_mcast(sc, sc->mc_addrs, sc->mc_addrs_num);
243 bnx2x_dev_rxtx_init(dev);
244
245 bnx2x_print_device_info(sc);
246
247 return ret;
248}
249
250static int
251bnx2x_dev_stop(struct rte_eth_dev *dev)
252{
253 struct bnx2x_softc *sc = dev->data->dev_private;
254 int ret = 0;
255
256 PMD_INIT_FUNC_TRACE(sc);
257
258 bnx2x_dev_rxtx_init_dummy(dev);
259
260 if (IS_PF(sc)) {
261 rte_intr_disable(sc->pci_dev->intr_handle);
262 rte_intr_callback_unregister(sc->pci_dev->intr_handle,
263 bnx2x_interrupt_handler, (void *)dev);
264
265
266 bnx2x_periodic_stop(dev);
267 }
268
269
270
271
272 if (IS_VF(sc))
273 bnx2x_vfpf_set_mcast(sc, NULL, 0);
274 ret = bnx2x_nic_unload(sc, UNLOAD_NORMAL, FALSE);
275 if (ret) {
276 PMD_DRV_LOG(DEBUG, sc, "bnx2x_nic_unload failed (%d)", ret);
277 return ret;
278 }
279
280 return 0;
281}
282
283static int
284bnx2x_dev_close(struct rte_eth_dev *dev)
285{
286 struct bnx2x_softc *sc = dev->data->dev_private;
287
288 PMD_INIT_FUNC_TRACE(sc);
289
290
291 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
292 return 0;
293
294 if (IS_VF(sc))
295 bnx2x_vf_close(sc);
296
297 bnx2x_dev_clear_queues(dev);
298 memset(&(dev->data->dev_link), 0 , sizeof(struct rte_eth_link));
299
300
301 bnx2x_free_ilt_mem(sc);
302
303
304 dev->data->mac_addrs = NULL;
305
306 return 0;
307}
308
309static int
310bnx2x_promisc_enable(struct rte_eth_dev *dev)
311{
312 struct bnx2x_softc *sc = dev->data->dev_private;
313
314 PMD_INIT_FUNC_TRACE(sc);
315 sc->rx_mode = BNX2X_RX_MODE_PROMISC;
316 if (rte_eth_allmulticast_get(dev->data->port_id) == 1)
317 sc->rx_mode = BNX2X_RX_MODE_ALLMULTI_PROMISC;
318 bnx2x_set_rx_mode(sc);
319
320 return 0;
321}
322
323static int
324bnx2x_promisc_disable(struct rte_eth_dev *dev)
325{
326 struct bnx2x_softc *sc = dev->data->dev_private;
327
328 PMD_INIT_FUNC_TRACE(sc);
329 sc->rx_mode = BNX2X_RX_MODE_NORMAL;
330 if (rte_eth_allmulticast_get(dev->data->port_id) == 1)
331 sc->rx_mode = BNX2X_RX_MODE_ALLMULTI;
332 bnx2x_set_rx_mode(sc);
333
334 return 0;
335}
336
337static int
338bnx2x_dev_allmulticast_enable(struct rte_eth_dev *dev)
339{
340 struct bnx2x_softc *sc = dev->data->dev_private;
341
342 PMD_INIT_FUNC_TRACE(sc);
343 sc->rx_mode = BNX2X_RX_MODE_ALLMULTI;
344 if (rte_eth_promiscuous_get(dev->data->port_id) == 1)
345 sc->rx_mode = BNX2X_RX_MODE_ALLMULTI_PROMISC;
346 bnx2x_set_rx_mode(sc);
347
348 return 0;
349}
350
351static int
352bnx2x_dev_allmulticast_disable(struct rte_eth_dev *dev)
353{
354 struct bnx2x_softc *sc = dev->data->dev_private;
355
356 PMD_INIT_FUNC_TRACE(sc);
357 sc->rx_mode = BNX2X_RX_MODE_NORMAL;
358 if (rte_eth_promiscuous_get(dev->data->port_id) == 1)
359 sc->rx_mode = BNX2X_RX_MODE_PROMISC;
360 bnx2x_set_rx_mode(sc);
361
362 return 0;
363}
364
365static int
366bnx2x_dev_set_mc_addr_list(struct rte_eth_dev *dev,
367 struct rte_ether_addr *mc_addrs, uint32_t mc_addrs_num)
368{
369 struct bnx2x_softc *sc = dev->data->dev_private;
370 int err;
371 PMD_INIT_FUNC_TRACE(sc);
372
373 err = bnx2x_vfpf_set_mcast(sc, NULL, 0);
374 if (err)
375 return err;
376 sc->mc_addrs_num = 0;
377
378
379 err = bnx2x_vfpf_set_mcast(sc, mc_addrs, mc_addrs_num);
380 if (err)
381 return err;
382
383 sc->mc_addrs_num = mc_addrs_num;
384 memcpy(sc->mc_addrs, mc_addrs, mc_addrs_num * sizeof(*mc_addrs));
385
386 return 0;
387}
388
389static int
390bnx2x_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
391{
392 struct bnx2x_softc *sc = dev->data->dev_private;
393
394 PMD_INIT_FUNC_TRACE(sc);
395
396 return bnx2x_link_update(dev);
397}
398
399static int
400bnx2xvf_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
401{
402 struct bnx2x_softc *sc = dev->data->dev_private;
403 int ret = 0;
404
405 ret = bnx2x_link_update(dev);
406
407 bnx2x_check_bull(sc);
408 if (sc->old_bulletin.valid_bitmap & (1 << CHANNEL_DOWN)) {
409 PMD_DRV_LOG(ERR, sc, "PF indicated channel is down."
410 "VF device is no longer operational");
411 dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
412 }
413
414 return ret;
415}
416
417static int
418bnx2x_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
419{
420 struct bnx2x_softc *sc = dev->data->dev_private;
421 uint32_t brb_truncate_discard;
422 uint64_t brb_drops;
423 uint64_t brb_truncates;
424
425 PMD_INIT_FUNC_TRACE(sc);
426
427 bnx2x_stats_handle(sc, STATS_EVENT_UPDATE);
428
429 memset(stats, 0, sizeof (struct rte_eth_stats));
430
431 stats->ipackets =
432 HILO_U64(sc->eth_stats.total_unicast_packets_received_hi,
433 sc->eth_stats.total_unicast_packets_received_lo) +
434 HILO_U64(sc->eth_stats.total_multicast_packets_received_hi,
435 sc->eth_stats.total_multicast_packets_received_lo) +
436 HILO_U64(sc->eth_stats.total_broadcast_packets_received_hi,
437 sc->eth_stats.total_broadcast_packets_received_lo);
438
439 stats->opackets =
440 HILO_U64(sc->eth_stats.total_unicast_packets_transmitted_hi,
441 sc->eth_stats.total_unicast_packets_transmitted_lo) +
442 HILO_U64(sc->eth_stats.total_multicast_packets_transmitted_hi,
443 sc->eth_stats.total_multicast_packets_transmitted_lo) +
444 HILO_U64(sc->eth_stats.total_broadcast_packets_transmitted_hi,
445 sc->eth_stats.total_broadcast_packets_transmitted_lo);
446
447 stats->ibytes =
448 HILO_U64(sc->eth_stats.total_bytes_received_hi,
449 sc->eth_stats.total_bytes_received_lo);
450
451 stats->obytes =
452 HILO_U64(sc->eth_stats.total_bytes_transmitted_hi,
453 sc->eth_stats.total_bytes_transmitted_lo);
454
455 stats->ierrors =
456 HILO_U64(sc->eth_stats.error_bytes_received_hi,
457 sc->eth_stats.error_bytes_received_lo);
458
459 stats->oerrors = 0;
460
461 stats->rx_nombuf =
462 HILO_U64(sc->eth_stats.no_buff_discard_hi,
463 sc->eth_stats.no_buff_discard_lo);
464
465 brb_drops =
466 HILO_U64(sc->eth_stats.brb_drop_hi,
467 sc->eth_stats.brb_drop_lo);
468
469 brb_truncates =
470 HILO_U64(sc->eth_stats.brb_truncate_hi,
471 sc->eth_stats.brb_truncate_lo);
472
473 brb_truncate_discard = sc->eth_stats.brb_truncate_discard;
474
475 stats->imissed = brb_drops + brb_truncates +
476 brb_truncate_discard + stats->rx_nombuf;
477
478 return 0;
479}
480
481static int
482bnx2x_get_xstats_names(__rte_unused struct rte_eth_dev *dev,
483 struct rte_eth_xstat_name *xstats_names,
484 __rte_unused unsigned limit)
485{
486 unsigned int i, stat_cnt = RTE_DIM(bnx2x_xstats_strings);
487
488 if (xstats_names != NULL)
489 for (i = 0; i < stat_cnt; i++)
490 strlcpy(xstats_names[i].name,
491 bnx2x_xstats_strings[i].name,
492 sizeof(xstats_names[i].name));
493
494 return stat_cnt;
495}
496
497static int
498bnx2x_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
499 unsigned int n)
500{
501 struct bnx2x_softc *sc = dev->data->dev_private;
502 unsigned int num = RTE_DIM(bnx2x_xstats_strings);
503
504 if (n < num)
505 return num;
506
507 bnx2x_stats_handle(sc, STATS_EVENT_UPDATE);
508
509 for (num = 0; num < n; num++) {
510 if (bnx2x_xstats_strings[num].offset_hi !=
511 bnx2x_xstats_strings[num].offset_lo)
512 xstats[num].value = HILO_U64(
513 *(uint32_t *)((char *)&sc->eth_stats +
514 bnx2x_xstats_strings[num].offset_hi),
515 *(uint32_t *)((char *)&sc->eth_stats +
516 bnx2x_xstats_strings[num].offset_lo));
517 else
518 xstats[num].value =
519 *(uint64_t *)((char *)&sc->eth_stats +
520 bnx2x_xstats_strings[num].offset_lo);
521 xstats[num].id = num;
522 }
523
524 return num;
525}
526
527static int
528bnx2x_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
529{
530 struct bnx2x_softc *sc = dev->data->dev_private;
531
532 dev_info->max_rx_queues = sc->max_rx_queues;
533 dev_info->max_tx_queues = sc->max_tx_queues;
534 dev_info->min_rx_bufsize = BNX2X_MIN_RX_BUF_SIZE;
535 dev_info->max_rx_pktlen = BNX2X_MAX_RX_PKT_LEN;
536 dev_info->max_mac_addrs = BNX2X_MAX_MAC_ADDRS;
537 dev_info->speed_capa = RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_20G;
538
539 dev_info->rx_desc_lim.nb_max = MAX_RX_AVAIL;
540 dev_info->rx_desc_lim.nb_min = MIN_RX_SIZE_NONTPA;
541 dev_info->rx_desc_lim.nb_mtu_seg_max = 1;
542 dev_info->tx_desc_lim.nb_max = MAX_TX_AVAIL;
543
544 return 0;
545}
546
547static int
548bnx2x_mac_addr_add(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
549 uint32_t index, uint32_t pool)
550{
551 struct bnx2x_softc *sc = dev->data->dev_private;
552
553 if (sc->mac_ops.mac_addr_add) {
554 sc->mac_ops.mac_addr_add(dev, mac_addr, index, pool);
555 return 0;
556 }
557 return -ENOTSUP;
558}
559
560static void
561bnx2x_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
562{
563 struct bnx2x_softc *sc = dev->data->dev_private;
564
565 if (sc->mac_ops.mac_addr_remove)
566 sc->mac_ops.mac_addr_remove(dev, index);
567}
568
569static const struct eth_dev_ops bnx2x_eth_dev_ops = {
570 .dev_configure = bnx2x_dev_configure,
571 .dev_start = bnx2x_dev_start,
572 .dev_stop = bnx2x_dev_stop,
573 .dev_close = bnx2x_dev_close,
574 .promiscuous_enable = bnx2x_promisc_enable,
575 .promiscuous_disable = bnx2x_promisc_disable,
576 .allmulticast_enable = bnx2x_dev_allmulticast_enable,
577 .allmulticast_disable = bnx2x_dev_allmulticast_disable,
578 .link_update = bnx2x_dev_link_update,
579 .stats_get = bnx2x_dev_stats_get,
580 .xstats_get = bnx2x_dev_xstats_get,
581 .xstats_get_names = bnx2x_get_xstats_names,
582 .dev_infos_get = bnx2x_dev_infos_get,
583 .rx_queue_setup = bnx2x_dev_rx_queue_setup,
584 .rx_queue_release = bnx2x_dev_rx_queue_release,
585 .tx_queue_setup = bnx2x_dev_tx_queue_setup,
586 .tx_queue_release = bnx2x_dev_tx_queue_release,
587 .mac_addr_add = bnx2x_mac_addr_add,
588 .mac_addr_remove = bnx2x_mac_addr_remove,
589};
590
591
592
593
594static const struct eth_dev_ops bnx2xvf_eth_dev_ops = {
595 .dev_configure = bnx2x_dev_configure,
596 .dev_start = bnx2x_dev_start,
597 .dev_stop = bnx2x_dev_stop,
598 .dev_close = bnx2x_dev_close,
599 .promiscuous_enable = bnx2x_promisc_enable,
600 .promiscuous_disable = bnx2x_promisc_disable,
601 .allmulticast_enable = bnx2x_dev_allmulticast_enable,
602 .allmulticast_disable = bnx2x_dev_allmulticast_disable,
603 .set_mc_addr_list = bnx2x_dev_set_mc_addr_list,
604 .link_update = bnx2xvf_dev_link_update,
605 .stats_get = bnx2x_dev_stats_get,
606 .xstats_get = bnx2x_dev_xstats_get,
607 .xstats_get_names = bnx2x_get_xstats_names,
608 .dev_infos_get = bnx2x_dev_infos_get,
609 .rx_queue_setup = bnx2x_dev_rx_queue_setup,
610 .rx_queue_release = bnx2x_dev_rx_queue_release,
611 .tx_queue_setup = bnx2x_dev_tx_queue_setup,
612 .tx_queue_release = bnx2x_dev_tx_queue_release,
613 .mac_addr_add = bnx2x_mac_addr_add,
614 .mac_addr_remove = bnx2x_mac_addr_remove,
615};
616
617
618static int
619bnx2x_common_dev_init(struct rte_eth_dev *eth_dev, int is_vf)
620{
621 int ret = 0;
622 struct rte_pci_device *pci_dev;
623 struct rte_pci_addr pci_addr;
624 struct bnx2x_softc *sc;
625 static bool adapter_info = true;
626
627
628 sc = eth_dev->data->dev_private;
629 pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
630 pci_addr = pci_dev->addr;
631
632 snprintf(sc->devinfo.name, NAME_SIZE, PCI_SHORT_PRI_FMT ":dpdk-port-%u",
633 pci_addr.bus, pci_addr.devid, pci_addr.function,
634 eth_dev->data->port_id);
635
636 PMD_INIT_FUNC_TRACE(sc);
637
638 eth_dev->dev_ops = is_vf ? &bnx2xvf_eth_dev_ops : &bnx2x_eth_dev_ops;
639
640 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
641 PMD_DRV_LOG(ERR, sc, "Skipping device init from secondary process");
642 return 0;
643 }
644
645 rte_eth_copy_pci_info(eth_dev, pci_dev);
646
647 sc->pcie_bus = pci_dev->addr.bus;
648 sc->pcie_device = pci_dev->addr.devid;
649
650 sc->devinfo.vendor_id = pci_dev->id.vendor_id;
651 sc->devinfo.device_id = pci_dev->id.device_id;
652 sc->devinfo.subvendor_id = pci_dev->id.subsystem_vendor_id;
653 sc->devinfo.subdevice_id = pci_dev->id.subsystem_device_id;
654
655 if (is_vf)
656 sc->flags = BNX2X_IS_VF_FLAG;
657
658 sc->pcie_func = pci_dev->addr.function;
659 sc->bar[BAR0].base_addr = (void *)pci_dev->mem_resource[0].addr;
660 if (is_vf)
661 sc->bar[BAR1].base_addr = (void *)
662 ((uintptr_t)pci_dev->mem_resource[0].addr + PXP_VF_ADDR_DB_START);
663 else
664 sc->bar[BAR1].base_addr = pci_dev->mem_resource[2].addr;
665
666 assert(sc->bar[BAR0].base_addr);
667 assert(sc->bar[BAR1].base_addr);
668
669 bnx2x_load_firmware(sc);
670 assert(sc->firmware);
671
672 if (eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP)
673 sc->udp_rss = 1;
674
675 sc->rx_budget = BNX2X_RX_BUDGET;
676 sc->hc_rx_ticks = BNX2X_RX_TICKS;
677 sc->hc_tx_ticks = BNX2X_TX_TICKS;
678
679 sc->interrupt_mode = INTR_MODE_SINGLE_MSIX;
680 sc->rx_mode = BNX2X_RX_MODE_NORMAL;
681
682 sc->pci_dev = pci_dev;
683 ret = bnx2x_attach(sc);
684 if (ret) {
685 PMD_DRV_LOG(ERR, sc, "bnx2x_attach failed (%d)", ret);
686 return ret;
687 }
688
689
690 if (adapter_info) {
691 bnx2x_print_adapter_info(sc);
692 adapter_info = false;
693 }
694
695
696 if (IS_PF(sc)) {
697 PMD_DRV_LOG(DEBUG, sc, "Scheduling periodic poll for slowpath link events");
698 ret = rte_eal_alarm_set(BNX2X_SP_TIMER_PERIOD,
699 bnx2x_periodic_start, (void *)eth_dev);
700 if (ret) {
701 PMD_DRV_LOG(ERR, sc, "Unable to start periodic"
702 " timer rc %d", ret);
703 return -EINVAL;
704 }
705 }
706
707 eth_dev->data->mac_addrs =
708 (struct rte_ether_addr *)sc->link_params.mac_addr;
709
710 if (IS_VF(sc)) {
711 rte_spinlock_init(&sc->vf2pf_lock);
712
713 ret = bnx2x_dma_alloc(sc, sizeof(struct bnx2x_vf_mbx_msg),
714 &sc->vf2pf_mbox_mapping, "vf2pf_mbox",
715 RTE_CACHE_LINE_SIZE);
716 if (ret)
717 goto out;
718
719 sc->vf2pf_mbox = (struct bnx2x_vf_mbx_msg *)
720 sc->vf2pf_mbox_mapping.vaddr;
721
722 ret = bnx2x_dma_alloc(sc, sizeof(struct bnx2x_vf_bulletin),
723 &sc->pf2vf_bulletin_mapping, "vf2pf_bull",
724 RTE_CACHE_LINE_SIZE);
725 if (ret)
726 goto out;
727
728 sc->pf2vf_bulletin = (struct bnx2x_vf_bulletin *)
729 sc->pf2vf_bulletin_mapping.vaddr;
730
731 ret = bnx2x_vf_get_resources(sc, sc->max_tx_queues,
732 sc->max_rx_queues);
733 if (ret)
734 goto out;
735 }
736
737 return 0;
738
739out:
740 if (IS_PF(sc))
741 bnx2x_periodic_stop(eth_dev);
742
743 return ret;
744}
745
746static int
747eth_bnx2x_dev_init(struct rte_eth_dev *eth_dev)
748{
749 struct bnx2x_softc *sc = eth_dev->data->dev_private;
750 PMD_INIT_FUNC_TRACE(sc);
751 return bnx2x_common_dev_init(eth_dev, 0);
752}
753
754static int
755eth_bnx2xvf_dev_init(struct rte_eth_dev *eth_dev)
756{
757 struct bnx2x_softc *sc = eth_dev->data->dev_private;
758 PMD_INIT_FUNC_TRACE(sc);
759 return bnx2x_common_dev_init(eth_dev, 1);
760}
761
762static int eth_bnx2x_dev_uninit(struct rte_eth_dev *eth_dev)
763{
764 struct bnx2x_softc *sc = eth_dev->data->dev_private;
765 PMD_INIT_FUNC_TRACE(sc);
766 bnx2x_dev_close(eth_dev);
767 return 0;
768}
769
770static struct rte_pci_driver rte_bnx2x_pmd;
771static struct rte_pci_driver rte_bnx2xvf_pmd;
772
773static int eth_bnx2x_pci_probe(struct rte_pci_driver *pci_drv,
774 struct rte_pci_device *pci_dev)
775{
776 if (pci_drv == &rte_bnx2x_pmd)
777 return rte_eth_dev_pci_generic_probe(pci_dev,
778 sizeof(struct bnx2x_softc), eth_bnx2x_dev_init);
779 else if (pci_drv == &rte_bnx2xvf_pmd)
780 return rte_eth_dev_pci_generic_probe(pci_dev,
781 sizeof(struct bnx2x_softc), eth_bnx2xvf_dev_init);
782 else
783 return -EINVAL;
784}
785
786static int eth_bnx2x_pci_remove(struct rte_pci_device *pci_dev)
787{
788 return rte_eth_dev_pci_generic_remove(pci_dev, eth_bnx2x_dev_uninit);
789}
790
791static struct rte_pci_driver rte_bnx2x_pmd = {
792 .id_table = pci_id_bnx2x_map,
793 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
794 .probe = eth_bnx2x_pci_probe,
795 .remove = eth_bnx2x_pci_remove,
796};
797
798
799
800
801static struct rte_pci_driver rte_bnx2xvf_pmd = {
802 .id_table = pci_id_bnx2xvf_map,
803 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
804 .probe = eth_bnx2x_pci_probe,
805 .remove = eth_bnx2x_pci_remove,
806};
807
808RTE_PMD_REGISTER_PCI(net_bnx2x, rte_bnx2x_pmd);
809RTE_PMD_REGISTER_PCI_TABLE(net_bnx2x, pci_id_bnx2x_map);
810RTE_PMD_REGISTER_KMOD_DEP(net_bnx2x, "* igb_uio | uio_pci_generic | vfio-pci");
811RTE_PMD_REGISTER_PCI(net_bnx2xvf, rte_bnx2xvf_pmd);
812RTE_PMD_REGISTER_PCI_TABLE(net_bnx2xvf, pci_id_bnx2xvf_map);
813RTE_PMD_REGISTER_KMOD_DEP(net_bnx2xvf, "* igb_uio | vfio-pci");
814RTE_LOG_REGISTER_SUFFIX(bnx2x_logtype_init, init, NOTICE);
815RTE_LOG_REGISTER_SUFFIX(bnx2x_logtype_driver, driver, NOTICE);
816