1
2
3
4
5
6#include "axgbe_rxtx.h"
7#include "axgbe_ethdev.h"
8#include "axgbe_common.h"
9#include "axgbe_phy.h"
10#include "axgbe_regs.h"
11#include "rte_time.h"
12
13#include "eal_filesystem.h"
14
15static int eth_axgbe_dev_init(struct rte_eth_dev *eth_dev);
16static int axgbe_dev_configure(struct rte_eth_dev *dev);
17static int axgbe_dev_start(struct rte_eth_dev *dev);
18static int axgbe_dev_stop(struct rte_eth_dev *dev);
19static void axgbe_dev_interrupt_handler(void *param);
20static int axgbe_dev_close(struct rte_eth_dev *dev);
21static int axgbe_dev_reset(struct rte_eth_dev *dev);
22static int axgbe_dev_promiscuous_enable(struct rte_eth_dev *dev);
23static int axgbe_dev_promiscuous_disable(struct rte_eth_dev *dev);
24static int axgbe_dev_allmulticast_enable(struct rte_eth_dev *dev);
25static int axgbe_dev_allmulticast_disable(struct rte_eth_dev *dev);
26static int axgbe_dev_mac_addr_set(struct rte_eth_dev *dev,
27 struct rte_ether_addr *mac_addr);
28static int axgbe_dev_mac_addr_add(struct rte_eth_dev *dev,
29 struct rte_ether_addr *mac_addr,
30 uint32_t index,
31 uint32_t vmdq);
32static void axgbe_dev_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index);
33static int axgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
34 struct rte_ether_addr *mc_addr_set,
35 uint32_t nb_mc_addr);
36static int axgbe_dev_uc_hash_table_set(struct rte_eth_dev *dev,
37 struct rte_ether_addr *mac_addr,
38 uint8_t add);
39static int axgbe_dev_uc_all_hash_table_set(struct rte_eth_dev *dev,
40 uint8_t add);
41static int axgbe_dev_link_update(struct rte_eth_dev *dev,
42 int wait_to_complete);
43static int axgbe_dev_get_regs(struct rte_eth_dev *dev,
44 struct rte_dev_reg_info *regs);
45static int axgbe_dev_stats_get(struct rte_eth_dev *dev,
46 struct rte_eth_stats *stats);
47static int axgbe_dev_stats_reset(struct rte_eth_dev *dev);
48static int axgbe_dev_xstats_get(struct rte_eth_dev *dev,
49 struct rte_eth_xstat *stats,
50 unsigned int n);
51static int
52axgbe_dev_xstats_get_names(struct rte_eth_dev *dev,
53 struct rte_eth_xstat_name *xstats_names,
54 unsigned int size);
55static int
56axgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev,
57 const uint64_t *ids,
58 uint64_t *values,
59 unsigned int n);
60static int
61axgbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
62 const uint64_t *ids,
63 struct rte_eth_xstat_name *xstats_names,
64 unsigned int size);
65static int axgbe_dev_xstats_reset(struct rte_eth_dev *dev);
66static int axgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
67 struct rte_eth_rss_reta_entry64 *reta_conf,
68 uint16_t reta_size);
69static int axgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
70 struct rte_eth_rss_reta_entry64 *reta_conf,
71 uint16_t reta_size);
72static int axgbe_dev_rss_hash_update(struct rte_eth_dev *dev,
73 struct rte_eth_rss_conf *rss_conf);
74static int axgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
75 struct rte_eth_rss_conf *rss_conf);
76static int axgbe_dev_info_get(struct rte_eth_dev *dev,
77 struct rte_eth_dev_info *dev_info);
78static int axgbe_flow_ctrl_get(struct rte_eth_dev *dev,
79 struct rte_eth_fc_conf *fc_conf);
80static int axgbe_flow_ctrl_set(struct rte_eth_dev *dev,
81 struct rte_eth_fc_conf *fc_conf);
82static int axgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev,
83 struct rte_eth_pfc_conf *pfc_conf);
84static void axgbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
85 struct rte_eth_rxq_info *qinfo);
86static void axgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
87 struct rte_eth_txq_info *qinfo);
88const uint32_t *axgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev);
89static int axgb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
90
91static int
92axgbe_timesync_enable(struct rte_eth_dev *dev);
93static int
94axgbe_timesync_disable(struct rte_eth_dev *dev);
95static int
96axgbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
97 struct timespec *timestamp, uint32_t flags);
98static int
99axgbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
100 struct timespec *timestamp);
101static int
102axgbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta);
103static int
104axgbe_timesync_read_time(struct rte_eth_dev *dev,
105 struct timespec *timestamp);
106static int
107axgbe_timesync_write_time(struct rte_eth_dev *dev,
108 const struct timespec *timestamp);
109static void
110axgbe_set_tstamp_time(struct axgbe_port *pdata, unsigned int sec,
111 unsigned int nsec);
112static void
113axgbe_update_tstamp_addend(struct axgbe_port *pdata,
114 unsigned int addend);
115static int
116 axgbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vid, int on);
117static int axgbe_vlan_tpid_set(struct rte_eth_dev *dev,
118 enum rte_vlan_type vlan_type, uint16_t tpid);
119static int axgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask);
120
121struct axgbe_xstats {
122 char name[RTE_ETH_XSTATS_NAME_SIZE];
123 int offset;
124};
125
126#define AXGMAC_MMC_STAT(_string, _var) \
127 { _string, \
128 offsetof(struct axgbe_mmc_stats, _var), \
129 }
130
131static const struct axgbe_xstats axgbe_xstats_strings[] = {
132 AXGMAC_MMC_STAT("tx_bytes", txoctetcount_gb),
133 AXGMAC_MMC_STAT("tx_packets", txframecount_gb),
134 AXGMAC_MMC_STAT("tx_unicast_packets", txunicastframes_gb),
135 AXGMAC_MMC_STAT("tx_broadcast_packets", txbroadcastframes_gb),
136 AXGMAC_MMC_STAT("tx_multicast_packets", txmulticastframes_gb),
137 AXGMAC_MMC_STAT("tx_vlan_packets", txvlanframes_g),
138 AXGMAC_MMC_STAT("tx_64_byte_packets", tx64octets_gb),
139 AXGMAC_MMC_STAT("tx_65_to_127_byte_packets", tx65to127octets_gb),
140 AXGMAC_MMC_STAT("tx_128_to_255_byte_packets", tx128to255octets_gb),
141 AXGMAC_MMC_STAT("tx_256_to_511_byte_packets", tx256to511octets_gb),
142 AXGMAC_MMC_STAT("tx_512_to_1023_byte_packets", tx512to1023octets_gb),
143 AXGMAC_MMC_STAT("tx_1024_to_max_byte_packets", tx1024tomaxoctets_gb),
144 AXGMAC_MMC_STAT("tx_underflow_errors", txunderflowerror),
145 AXGMAC_MMC_STAT("tx_pause_frames", txpauseframes),
146
147 AXGMAC_MMC_STAT("rx_bytes", rxoctetcount_gb),
148 AXGMAC_MMC_STAT("rx_packets", rxframecount_gb),
149 AXGMAC_MMC_STAT("rx_unicast_packets", rxunicastframes_g),
150 AXGMAC_MMC_STAT("rx_broadcast_packets", rxbroadcastframes_g),
151 AXGMAC_MMC_STAT("rx_multicast_packets", rxmulticastframes_g),
152 AXGMAC_MMC_STAT("rx_vlan_packets", rxvlanframes_gb),
153 AXGMAC_MMC_STAT("rx_64_byte_packets", rx64octets_gb),
154 AXGMAC_MMC_STAT("rx_65_to_127_byte_packets", rx65to127octets_gb),
155 AXGMAC_MMC_STAT("rx_128_to_255_byte_packets", rx128to255octets_gb),
156 AXGMAC_MMC_STAT("rx_256_to_511_byte_packets", rx256to511octets_gb),
157 AXGMAC_MMC_STAT("rx_512_to_1023_byte_packets", rx512to1023octets_gb),
158 AXGMAC_MMC_STAT("rx_1024_to_max_byte_packets", rx1024tomaxoctets_gb),
159 AXGMAC_MMC_STAT("rx_undersize_packets", rxundersize_g),
160 AXGMAC_MMC_STAT("rx_oversize_packets", rxoversize_g),
161 AXGMAC_MMC_STAT("rx_crc_errors", rxcrcerror),
162 AXGMAC_MMC_STAT("rx_crc_errors_small_packets", rxrunterror),
163 AXGMAC_MMC_STAT("rx_crc_errors_giant_packets", rxjabbererror),
164 AXGMAC_MMC_STAT("rx_length_errors", rxlengtherror),
165 AXGMAC_MMC_STAT("rx_out_of_range_errors", rxoutofrangetype),
166 AXGMAC_MMC_STAT("rx_fifo_overflow_errors", rxfifooverflow),
167 AXGMAC_MMC_STAT("rx_watchdog_errors", rxwatchdogerror),
168 AXGMAC_MMC_STAT("rx_pause_frames", rxpauseframes),
169};
170
171#define AXGBE_XSTATS_COUNT ARRAY_SIZE(axgbe_xstats_strings)
172
173
174#define AMD_PCI_VENDOR_ID 0x1022
175#define AMD_PCI_RV_ROOT_COMPLEX_ID 0x15d0
176#define AMD_PCI_YC_ROOT_COMPLEX_ID 0x14b5
177#define AMD_PCI_SNOWY_ROOT_COMPLEX_ID 0x1450
178#define AMD_PCI_AXGBE_DEVICE_V2A 0x1458
179#define AMD_PCI_AXGBE_DEVICE_V2B 0x1459
180
181static const struct rte_pci_id pci_id_axgbe_map[] = {
182 {RTE_PCI_DEVICE(AMD_PCI_VENDOR_ID, AMD_PCI_AXGBE_DEVICE_V2A)},
183 {RTE_PCI_DEVICE(AMD_PCI_VENDOR_ID, AMD_PCI_AXGBE_DEVICE_V2B)},
184 { .vendor_id = 0, },
185};
186
187static struct axgbe_version_data axgbe_v2a = {
188 .init_function_ptrs_phy_impl = axgbe_init_function_ptrs_phy_v2,
189 .xpcs_access = AXGBE_XPCS_ACCESS_V2,
190 .mmc_64bit = 1,
191 .tx_max_fifo_size = 229376,
192 .rx_max_fifo_size = 229376,
193 .tx_tstamp_workaround = 1,
194 .ecc_support = 1,
195 .i2c_support = 1,
196 .an_cdr_workaround = 1,
197};
198
199static struct axgbe_version_data axgbe_v2b = {
200 .init_function_ptrs_phy_impl = axgbe_init_function_ptrs_phy_v2,
201 .xpcs_access = AXGBE_XPCS_ACCESS_V2,
202 .mmc_64bit = 1,
203 .tx_max_fifo_size = 65536,
204 .rx_max_fifo_size = 65536,
205 .tx_tstamp_workaround = 1,
206 .ecc_support = 1,
207 .i2c_support = 1,
208 .an_cdr_workaround = 1,
209};
210
211static const struct rte_eth_desc_lim rx_desc_lim = {
212 .nb_max = AXGBE_MAX_RING_DESC,
213 .nb_min = AXGBE_MIN_RING_DESC,
214 .nb_align = 8,
215};
216
217static const struct rte_eth_desc_lim tx_desc_lim = {
218 .nb_max = AXGBE_MAX_RING_DESC,
219 .nb_min = AXGBE_MIN_RING_DESC,
220 .nb_align = 8,
221};
222
223static const struct eth_dev_ops axgbe_eth_dev_ops = {
224 .dev_configure = axgbe_dev_configure,
225 .dev_start = axgbe_dev_start,
226 .dev_stop = axgbe_dev_stop,
227 .dev_close = axgbe_dev_close,
228 .dev_reset = axgbe_dev_reset,
229 .promiscuous_enable = axgbe_dev_promiscuous_enable,
230 .promiscuous_disable = axgbe_dev_promiscuous_disable,
231 .allmulticast_enable = axgbe_dev_allmulticast_enable,
232 .allmulticast_disable = axgbe_dev_allmulticast_disable,
233 .mac_addr_set = axgbe_dev_mac_addr_set,
234 .mac_addr_add = axgbe_dev_mac_addr_add,
235 .mac_addr_remove = axgbe_dev_mac_addr_remove,
236 .set_mc_addr_list = axgbe_dev_set_mc_addr_list,
237 .uc_hash_table_set = axgbe_dev_uc_hash_table_set,
238 .uc_all_hash_table_set = axgbe_dev_uc_all_hash_table_set,
239 .link_update = axgbe_dev_link_update,
240 .get_reg = axgbe_dev_get_regs,
241 .stats_get = axgbe_dev_stats_get,
242 .stats_reset = axgbe_dev_stats_reset,
243 .xstats_get = axgbe_dev_xstats_get,
244 .xstats_reset = axgbe_dev_xstats_reset,
245 .xstats_get_names = axgbe_dev_xstats_get_names,
246 .xstats_get_names_by_id = axgbe_dev_xstats_get_names_by_id,
247 .xstats_get_by_id = axgbe_dev_xstats_get_by_id,
248 .reta_update = axgbe_dev_rss_reta_update,
249 .reta_query = axgbe_dev_rss_reta_query,
250 .rss_hash_update = axgbe_dev_rss_hash_update,
251 .rss_hash_conf_get = axgbe_dev_rss_hash_conf_get,
252 .dev_infos_get = axgbe_dev_info_get,
253 .rx_queue_setup = axgbe_dev_rx_queue_setup,
254 .rx_queue_release = axgbe_dev_rx_queue_release,
255 .tx_queue_setup = axgbe_dev_tx_queue_setup,
256 .tx_queue_release = axgbe_dev_tx_queue_release,
257 .flow_ctrl_get = axgbe_flow_ctrl_get,
258 .flow_ctrl_set = axgbe_flow_ctrl_set,
259 .priority_flow_ctrl_set = axgbe_priority_flow_ctrl_set,
260 .rxq_info_get = axgbe_rxq_info_get,
261 .txq_info_get = axgbe_txq_info_get,
262 .dev_supported_ptypes_get = axgbe_dev_supported_ptypes_get,
263 .mtu_set = axgb_mtu_set,
264 .vlan_filter_set = axgbe_vlan_filter_set,
265 .vlan_tpid_set = axgbe_vlan_tpid_set,
266 .vlan_offload_set = axgbe_vlan_offload_set,
267 .timesync_enable = axgbe_timesync_enable,
268 .timesync_disable = axgbe_timesync_disable,
269 .timesync_read_rx_timestamp = axgbe_timesync_read_rx_timestamp,
270 .timesync_read_tx_timestamp = axgbe_timesync_read_tx_timestamp,
271 .timesync_adjust_time = axgbe_timesync_adjust_time,
272 .timesync_read_time = axgbe_timesync_read_time,
273 .timesync_write_time = axgbe_timesync_write_time,
274 .fw_version_get = axgbe_dev_fw_version_get,
275};
276
277static int axgbe_phy_reset(struct axgbe_port *pdata)
278{
279 pdata->phy_link = -1;
280 pdata->phy_speed = SPEED_UNKNOWN;
281 return pdata->phy_if.phy_reset(pdata);
282}
283
284
285
286
287
288
289
290
291
292
293
294
295
296static void
297axgbe_dev_interrupt_handler(void *param)
298{
299 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
300 struct axgbe_port *pdata = dev->data->dev_private;
301 unsigned int dma_isr, dma_ch_isr;
302
303 pdata->phy_if.an_isr(pdata);
304
305 dma_isr = AXGMAC_IOREAD(pdata, DMA_ISR);
306 PMD_DRV_LOG(DEBUG, "DMA_ISR=%#010x\n", dma_isr);
307 if (dma_isr) {
308 if (dma_isr & 1) {
309 dma_ch_isr =
310 AXGMAC_DMA_IOREAD((struct axgbe_rx_queue *)
311 pdata->rx_queues[0],
312 DMA_CH_SR);
313 PMD_DRV_LOG(DEBUG, "DMA_CH0_ISR=%#010x\n", dma_ch_isr);
314 AXGMAC_DMA_IOWRITE((struct axgbe_rx_queue *)
315 pdata->rx_queues[0],
316 DMA_CH_SR, dma_ch_isr);
317 }
318 }
319
320 rte_intr_ack(pdata->pci_dev->intr_handle);
321}
322
323
324
325
326
327static int
328axgbe_dev_configure(struct rte_eth_dev *dev)
329{
330 struct axgbe_port *pdata = dev->data->dev_private;
331
332 pdata->rx_csum_enable = dev->data->dev_conf.rxmode.offloads &
333 RTE_ETH_RX_OFFLOAD_CHECKSUM;
334 return 0;
335}
336
337static int
338axgbe_dev_rx_mq_config(struct rte_eth_dev *dev)
339{
340 struct axgbe_port *pdata = dev->data->dev_private;
341
342 if (dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_RSS)
343 pdata->rss_enable = 1;
344 else if (dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_NONE)
345 pdata->rss_enable = 0;
346 else
347 return -1;
348 return 0;
349}
350
351static int
352axgbe_dev_start(struct rte_eth_dev *dev)
353{
354 struct axgbe_port *pdata = dev->data->dev_private;
355 int ret;
356 struct rte_eth_dev_data *dev_data = dev->data;
357 uint16_t max_pkt_len;
358
359 dev->dev_ops = &axgbe_eth_dev_ops;
360
361 PMD_INIT_FUNC_TRACE();
362
363
364 ret = axgbe_dev_rx_mq_config(dev);
365 if (ret) {
366 PMD_DRV_LOG(ERR, "Unable to config RX MQ\n");
367 return ret;
368 }
369 ret = axgbe_phy_reset(pdata);
370 if (ret) {
371 PMD_DRV_LOG(ERR, "phy reset failed\n");
372 return ret;
373 }
374 ret = pdata->hw_if.init(pdata);
375 if (ret) {
376 PMD_DRV_LOG(ERR, "dev_init failed\n");
377 return ret;
378 }
379
380
381 rte_intr_enable(pdata->pci_dev->intr_handle);
382
383
384 pdata->phy_if.phy_start(pdata);
385 axgbe_dev_enable_tx(dev);
386 axgbe_dev_enable_rx(dev);
387
388 rte_bit_relaxed_clear32(AXGBE_STOPPED, &pdata->dev_state);
389 rte_bit_relaxed_clear32(AXGBE_DOWN, &pdata->dev_state);
390
391 max_pkt_len = dev_data->mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
392 if ((dev_data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER) ||
393 max_pkt_len > pdata->rx_buf_size)
394 dev_data->scattered_rx = 1;
395
396
397 if (dev_data->scattered_rx)
398 dev->rx_pkt_burst = ð_axgbe_recv_scattered_pkts;
399 else
400 dev->rx_pkt_burst = &axgbe_recv_pkts;
401
402 return 0;
403}
404
405
406static int
407axgbe_dev_stop(struct rte_eth_dev *dev)
408{
409 struct axgbe_port *pdata = dev->data->dev_private;
410
411 PMD_INIT_FUNC_TRACE();
412
413 rte_intr_disable(pdata->pci_dev->intr_handle);
414
415 if (rte_bit_relaxed_get32(AXGBE_STOPPED, &pdata->dev_state))
416 return 0;
417
418 rte_bit_relaxed_set32(AXGBE_STOPPED, &pdata->dev_state);
419 axgbe_dev_disable_tx(dev);
420 axgbe_dev_disable_rx(dev);
421
422 pdata->phy_if.phy_stop(pdata);
423 pdata->hw_if.exit(pdata);
424 memset(&dev->data->dev_link, 0, sizeof(struct rte_eth_link));
425 rte_bit_relaxed_set32(AXGBE_DOWN, &pdata->dev_state);
426
427 return 0;
428}
429
430static int
431axgbe_dev_promiscuous_enable(struct rte_eth_dev *dev)
432{
433 struct axgbe_port *pdata = dev->data->dev_private;
434
435 PMD_INIT_FUNC_TRACE();
436
437 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, 1);
438
439 return 0;
440}
441
442static int
443axgbe_dev_promiscuous_disable(struct rte_eth_dev *dev)
444{
445 struct axgbe_port *pdata = dev->data->dev_private;
446
447 PMD_INIT_FUNC_TRACE();
448
449 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, 0);
450
451 return 0;
452}
453
454static int
455axgbe_dev_allmulticast_enable(struct rte_eth_dev *dev)
456{
457 struct axgbe_port *pdata = dev->data->dev_private;
458
459 PMD_INIT_FUNC_TRACE();
460
461 if (AXGMAC_IOREAD_BITS(pdata, MAC_PFR, PM))
462 return 0;
463 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, 1);
464
465 return 0;
466}
467
468static int
469axgbe_dev_allmulticast_disable(struct rte_eth_dev *dev)
470{
471 struct axgbe_port *pdata = dev->data->dev_private;
472
473 PMD_INIT_FUNC_TRACE();
474
475 if (!AXGMAC_IOREAD_BITS(pdata, MAC_PFR, PM))
476 return 0;
477 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, 0);
478
479 return 0;
480}
481
482static int
483axgbe_dev_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr)
484{
485 struct axgbe_port *pdata = dev->data->dev_private;
486
487
488 axgbe_set_mac_addn_addr(pdata, (u8 *)mac_addr, 0);
489
490 return 0;
491}
492
493static int
494axgbe_dev_mac_addr_add(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
495 uint32_t index, uint32_t pool __rte_unused)
496{
497 struct axgbe_port *pdata = dev->data->dev_private;
498 struct axgbe_hw_features *hw_feat = &pdata->hw_feat;
499
500 if (index > hw_feat->addn_mac) {
501 PMD_DRV_LOG(ERR, "Invalid Index %d\n", index);
502 return -EINVAL;
503 }
504 axgbe_set_mac_addn_addr(pdata, (u8 *)mac_addr, index);
505 return 0;
506}
507
508static int
509axgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
510 struct rte_eth_rss_reta_entry64 *reta_conf,
511 uint16_t reta_size)
512{
513 struct axgbe_port *pdata = dev->data->dev_private;
514 unsigned int i, idx, shift;
515 int ret;
516
517 if (!pdata->rss_enable) {
518 PMD_DRV_LOG(ERR, "RSS not enabled\n");
519 return -ENOTSUP;
520 }
521
522 if (reta_size == 0 || reta_size > AXGBE_RSS_MAX_TABLE_SIZE) {
523 PMD_DRV_LOG(ERR, "reta_size %d is not supported\n", reta_size);
524 return -EINVAL;
525 }
526
527 for (i = 0; i < reta_size; i++) {
528 idx = i / RTE_ETH_RETA_GROUP_SIZE;
529 shift = i % RTE_ETH_RETA_GROUP_SIZE;
530 if ((reta_conf[idx].mask & (1ULL << shift)) == 0)
531 continue;
532 pdata->rss_table[i] = reta_conf[idx].reta[shift];
533 }
534
535
536 ret = axgbe_write_rss_lookup_table(pdata);
537 return ret;
538}
539
540static int
541axgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
542 struct rte_eth_rss_reta_entry64 *reta_conf,
543 uint16_t reta_size)
544{
545 struct axgbe_port *pdata = dev->data->dev_private;
546 unsigned int i, idx, shift;
547
548 if (!pdata->rss_enable) {
549 PMD_DRV_LOG(ERR, "RSS not enabled\n");
550 return -ENOTSUP;
551 }
552
553 if (reta_size == 0 || reta_size > AXGBE_RSS_MAX_TABLE_SIZE) {
554 PMD_DRV_LOG(ERR, "reta_size %d is not supported\n", reta_size);
555 return -EINVAL;
556 }
557
558 for (i = 0; i < reta_size; i++) {
559 idx = i / RTE_ETH_RETA_GROUP_SIZE;
560 shift = i % RTE_ETH_RETA_GROUP_SIZE;
561 if ((reta_conf[idx].mask & (1ULL << shift)) == 0)
562 continue;
563 reta_conf[idx].reta[shift] = pdata->rss_table[i];
564 }
565 return 0;
566}
567
568static int
569axgbe_dev_rss_hash_update(struct rte_eth_dev *dev,
570 struct rte_eth_rss_conf *rss_conf)
571{
572 struct axgbe_port *pdata = dev->data->dev_private;
573 int ret;
574
575 if (!pdata->rss_enable) {
576 PMD_DRV_LOG(ERR, "RSS not enabled\n");
577 return -ENOTSUP;
578 }
579
580 if (rss_conf == NULL) {
581 PMD_DRV_LOG(ERR, "rss_conf value isn't valid\n");
582 return -EINVAL;
583 }
584
585 if (rss_conf->rss_key != NULL &&
586 rss_conf->rss_key_len == AXGBE_RSS_HASH_KEY_SIZE) {
587 rte_memcpy(pdata->rss_key, rss_conf->rss_key,
588 AXGBE_RSS_HASH_KEY_SIZE);
589
590 ret = axgbe_write_rss_hash_key(pdata);
591 if (ret != 0)
592 return ret;
593 }
594
595 pdata->rss_hf = rss_conf->rss_hf & AXGBE_RSS_OFFLOAD;
596
597 if (pdata->rss_hf & (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_IPV6))
598 AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, IP2TE, 1);
599 if (pdata->rss_hf &
600 (RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_NONFRAG_IPV6_TCP))
601 AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, TCP4TE, 1);
602 if (pdata->rss_hf &
603 (RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_NONFRAG_IPV6_UDP))
604 AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1);
605
606
607 AXGMAC_IOWRITE(pdata, MAC_RSSCR, pdata->rss_options);
608
609 return 0;
610}
611
612static int
613axgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
614 struct rte_eth_rss_conf *rss_conf)
615{
616 struct axgbe_port *pdata = dev->data->dev_private;
617
618 if (!pdata->rss_enable) {
619 PMD_DRV_LOG(ERR, "RSS not enabled\n");
620 return -ENOTSUP;
621 }
622
623 if (rss_conf == NULL) {
624 PMD_DRV_LOG(ERR, "rss_conf value isn't valid\n");
625 return -EINVAL;
626 }
627
628 if (rss_conf->rss_key != NULL &&
629 rss_conf->rss_key_len >= AXGBE_RSS_HASH_KEY_SIZE) {
630 rte_memcpy(rss_conf->rss_key, pdata->rss_key,
631 AXGBE_RSS_HASH_KEY_SIZE);
632 }
633 rss_conf->rss_key_len = AXGBE_RSS_HASH_KEY_SIZE;
634 rss_conf->rss_hf = pdata->rss_hf;
635 return 0;
636}
637
638static int
639axgbe_dev_reset(struct rte_eth_dev *dev)
640{
641 int ret = 0;
642
643 ret = axgbe_dev_close(dev);
644 if (ret)
645 return ret;
646
647 ret = eth_axgbe_dev_init(dev);
648
649 return ret;
650}
651
652static void
653axgbe_dev_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
654{
655 struct axgbe_port *pdata = dev->data->dev_private;
656 struct axgbe_hw_features *hw_feat = &pdata->hw_feat;
657
658 if (index > hw_feat->addn_mac) {
659 PMD_DRV_LOG(ERR, "Invalid Index %d\n", index);
660 return;
661 }
662 axgbe_set_mac_addn_addr(pdata, NULL, index);
663}
664
665static int
666axgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
667 struct rte_ether_addr *mc_addr_set,
668 uint32_t nb_mc_addr)
669{
670 struct axgbe_port *pdata = dev->data->dev_private;
671 struct axgbe_hw_features *hw_feat = &pdata->hw_feat;
672 uint32_t index = 1;
673 uint32_t i;
674
675 if (nb_mc_addr > hw_feat->addn_mac) {
676 PMD_DRV_LOG(ERR, "Invalid Index %d\n", nb_mc_addr);
677 return -EINVAL;
678 }
679
680
681 for (i = 1; i < hw_feat->addn_mac; i++) {
682 if (rte_is_zero_ether_addr(&dev->data->mac_addrs[i]))
683 continue;
684 memset(&dev->data->mac_addrs[i], 0,
685 sizeof(struct rte_ether_addr));
686 }
687
688 while (nb_mc_addr--)
689 axgbe_set_mac_addn_addr(pdata, (u8 *)mc_addr_set++, index++);
690
691 return 0;
692}
693
694static int
695axgbe_dev_uc_hash_table_set(struct rte_eth_dev *dev,
696 struct rte_ether_addr *mac_addr, uint8_t add)
697{
698 struct axgbe_port *pdata = dev->data->dev_private;
699 struct axgbe_hw_features *hw_feat = &pdata->hw_feat;
700
701 if (!hw_feat->hash_table_size) {
702 PMD_DRV_LOG(ERR, "MAC Hash Table not supported\n");
703 return -ENOTSUP;
704 }
705
706 axgbe_set_mac_hash_table(pdata, (u8 *)mac_addr, add);
707
708 if (pdata->uc_hash_mac_addr > 0) {
709 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HPF, 1);
710 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HUC, 1);
711 } else {
712 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HPF, 0);
713 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HUC, 0);
714 }
715 return 0;
716}
717
718static int
719axgbe_dev_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t add)
720{
721 struct axgbe_port *pdata = dev->data->dev_private;
722 struct axgbe_hw_features *hw_feat = &pdata->hw_feat;
723 uint32_t index;
724
725 if (!hw_feat->hash_table_size) {
726 PMD_DRV_LOG(ERR, "MAC Hash Table not supported\n");
727 return -ENOTSUP;
728 }
729
730 for (index = 0; index < pdata->hash_table_count; index++) {
731 if (add)
732 pdata->uc_hash_table[index] = ~0;
733 else
734 pdata->uc_hash_table[index] = 0;
735
736 PMD_DRV_LOG(DEBUG, "%s MAC hash table at Index %#x\n",
737 add ? "set" : "clear", index);
738
739 AXGMAC_IOWRITE(pdata, MAC_HTR(index),
740 pdata->uc_hash_table[index]);
741 }
742
743 if (add) {
744 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HPF, 1);
745 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HUC, 1);
746 } else {
747 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HPF, 0);
748 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HUC, 0);
749 }
750 return 0;
751}
752
753
754static int
755axgbe_dev_link_update(struct rte_eth_dev *dev,
756 int wait_to_complete __rte_unused)
757{
758 struct axgbe_port *pdata = dev->data->dev_private;
759 struct rte_eth_link link;
760 int ret = 0;
761
762 PMD_INIT_FUNC_TRACE();
763 rte_delay_ms(800);
764
765 pdata->phy_if.phy_status(pdata);
766
767 memset(&link, 0, sizeof(struct rte_eth_link));
768 link.link_duplex = pdata->phy.duplex;
769 link.link_status = pdata->phy_link;
770 link.link_speed = pdata->phy_speed;
771 link.link_autoneg = !(dev->data->dev_conf.link_speeds &
772 RTE_ETH_LINK_SPEED_FIXED);
773 ret = rte_eth_linkstatus_set(dev, &link);
774 if (ret == -1)
775 PMD_DRV_LOG(ERR, "No change in link status\n");
776
777 return ret;
778}
779
780static int
781axgbe_dev_get_regs(struct rte_eth_dev *dev, struct rte_dev_reg_info *regs)
782{
783 struct axgbe_port *pdata = dev->data->dev_private;
784
785 if (regs->data == NULL) {
786 regs->length = axgbe_regs_get_count(pdata);
787 regs->width = sizeof(uint32_t);
788 return 0;
789 }
790
791
792 if (regs->length &&
793 regs->length != (uint32_t)axgbe_regs_get_count(pdata))
794 return -ENOTSUP;
795
796 regs->version = pdata->pci_dev->id.vendor_id << 16 |
797 pdata->pci_dev->id.device_id;
798 axgbe_regs_dump(pdata, regs->data);
799 return 0;
800}
801static void axgbe_read_mmc_stats(struct axgbe_port *pdata)
802{
803 struct axgbe_mmc_stats *stats = &pdata->mmc_stats;
804
805
806 AXGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 1);
807
808
809 stats->txoctetcount_gb +=
810 AXGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_GB_LO);
811 stats->txoctetcount_gb +=
812 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_GB_HI) << 32);
813
814 stats->txframecount_gb +=
815 AXGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_GB_LO);
816 stats->txframecount_gb +=
817 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_GB_HI) << 32);
818
819 stats->txbroadcastframes_g +=
820 AXGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_G_LO);
821 stats->txbroadcastframes_g +=
822 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_G_HI) << 32);
823
824 stats->txmulticastframes_g +=
825 AXGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_G_LO);
826 stats->txmulticastframes_g +=
827 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_G_HI) << 32);
828
829 stats->tx64octets_gb +=
830 AXGMAC_IOREAD(pdata, MMC_TX64OCTETS_GB_LO);
831 stats->tx64octets_gb +=
832 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX64OCTETS_GB_HI) << 32);
833
834 stats->tx65to127octets_gb +=
835 AXGMAC_IOREAD(pdata, MMC_TX65TO127OCTETS_GB_LO);
836 stats->tx65to127octets_gb +=
837 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX65TO127OCTETS_GB_HI) << 32);
838
839 stats->tx128to255octets_gb +=
840 AXGMAC_IOREAD(pdata, MMC_TX128TO255OCTETS_GB_LO);
841 stats->tx128to255octets_gb +=
842 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX128TO255OCTETS_GB_HI) << 32);
843
844 stats->tx256to511octets_gb +=
845 AXGMAC_IOREAD(pdata, MMC_TX256TO511OCTETS_GB_LO);
846 stats->tx256to511octets_gb +=
847 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX256TO511OCTETS_GB_HI) << 32);
848
849 stats->tx512to1023octets_gb +=
850 AXGMAC_IOREAD(pdata, MMC_TX512TO1023OCTETS_GB_LO);
851 stats->tx512to1023octets_gb +=
852 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX512TO1023OCTETS_GB_HI) << 32);
853
854 stats->tx1024tomaxoctets_gb +=
855 AXGMAC_IOREAD(pdata, MMC_TX1024TOMAXOCTETS_GB_LO);
856 stats->tx1024tomaxoctets_gb +=
857 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX1024TOMAXOCTETS_GB_HI) << 32);
858
859 stats->txunicastframes_gb +=
860 AXGMAC_IOREAD(pdata, MMC_TXUNICASTFRAMES_GB_LO);
861 stats->txunicastframes_gb +=
862 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXUNICASTFRAMES_GB_HI) << 32);
863
864 stats->txmulticastframes_gb +=
865 AXGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_GB_LO);
866 stats->txmulticastframes_gb +=
867 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_GB_HI) << 32);
868
869 stats->txbroadcastframes_g +=
870 AXGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_GB_LO);
871 stats->txbroadcastframes_g +=
872 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_GB_HI) << 32);
873
874 stats->txunderflowerror +=
875 AXGMAC_IOREAD(pdata, MMC_TXUNDERFLOWERROR_LO);
876 stats->txunderflowerror +=
877 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXUNDERFLOWERROR_HI) << 32);
878
879 stats->txoctetcount_g +=
880 AXGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_G_LO);
881 stats->txoctetcount_g +=
882 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_G_HI) << 32);
883
884 stats->txframecount_g +=
885 AXGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_G_LO);
886 stats->txframecount_g +=
887 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_G_HI) << 32);
888
889 stats->txpauseframes +=
890 AXGMAC_IOREAD(pdata, MMC_TXPAUSEFRAMES_LO);
891 stats->txpauseframes +=
892 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXPAUSEFRAMES_HI) << 32);
893
894 stats->txvlanframes_g +=
895 AXGMAC_IOREAD(pdata, MMC_TXVLANFRAMES_G_LO);
896 stats->txvlanframes_g +=
897 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXVLANFRAMES_G_HI) << 32);
898
899
900 stats->rxframecount_gb +=
901 AXGMAC_IOREAD(pdata, MMC_RXFRAMECOUNT_GB_LO);
902 stats->rxframecount_gb +=
903 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXFRAMECOUNT_GB_HI) << 32);
904
905 stats->rxoctetcount_gb +=
906 AXGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_GB_LO);
907 stats->rxoctetcount_gb +=
908 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_GB_HI) << 32);
909
910 stats->rxoctetcount_g +=
911 AXGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_G_LO);
912 stats->rxoctetcount_g +=
913 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_G_HI) << 32);
914
915 stats->rxbroadcastframes_g +=
916 AXGMAC_IOREAD(pdata, MMC_RXBROADCASTFRAMES_G_LO);
917 stats->rxbroadcastframes_g +=
918 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXBROADCASTFRAMES_G_HI) << 32);
919
920 stats->rxmulticastframes_g +=
921 AXGMAC_IOREAD(pdata, MMC_RXMULTICASTFRAMES_G_LO);
922 stats->rxmulticastframes_g +=
923 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXMULTICASTFRAMES_G_HI) << 32);
924
925 stats->rxcrcerror +=
926 AXGMAC_IOREAD(pdata, MMC_RXCRCERROR_LO);
927 stats->rxcrcerror +=
928 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXCRCERROR_HI) << 32);
929
930 stats->rxrunterror +=
931 AXGMAC_IOREAD(pdata, MMC_RXRUNTERROR);
932
933 stats->rxjabbererror +=
934 AXGMAC_IOREAD(pdata, MMC_RXJABBERERROR);
935
936 stats->rxundersize_g +=
937 AXGMAC_IOREAD(pdata, MMC_RXUNDERSIZE_G);
938
939 stats->rxoversize_g +=
940 AXGMAC_IOREAD(pdata, MMC_RXOVERSIZE_G);
941
942 stats->rx64octets_gb +=
943 AXGMAC_IOREAD(pdata, MMC_RX64OCTETS_GB_LO);
944 stats->rx64octets_gb +=
945 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX64OCTETS_GB_HI) << 32);
946
947 stats->rx65to127octets_gb +=
948 AXGMAC_IOREAD(pdata, MMC_RX65TO127OCTETS_GB_LO);
949 stats->rx65to127octets_gb +=
950 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX65TO127OCTETS_GB_HI) << 32);
951
952 stats->rx128to255octets_gb +=
953 AXGMAC_IOREAD(pdata, MMC_RX128TO255OCTETS_GB_LO);
954 stats->rx128to255octets_gb +=
955 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX128TO255OCTETS_GB_HI) << 32);
956
957 stats->rx256to511octets_gb +=
958 AXGMAC_IOREAD(pdata, MMC_RX256TO511OCTETS_GB_LO);
959 stats->rx256to511octets_gb +=
960 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX256TO511OCTETS_GB_HI) << 32);
961
962 stats->rx512to1023octets_gb +=
963 AXGMAC_IOREAD(pdata, MMC_RX512TO1023OCTETS_GB_LO);
964 stats->rx512to1023octets_gb +=
965 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX512TO1023OCTETS_GB_HI) << 32);
966
967 stats->rx1024tomaxoctets_gb +=
968 AXGMAC_IOREAD(pdata, MMC_RX1024TOMAXOCTETS_GB_LO);
969 stats->rx1024tomaxoctets_gb +=
970 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX1024TOMAXOCTETS_GB_HI) << 32);
971
972 stats->rxunicastframes_g +=
973 AXGMAC_IOREAD(pdata, MMC_RXUNICASTFRAMES_G_LO);
974 stats->rxunicastframes_g +=
975 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXUNICASTFRAMES_G_HI) << 32);
976
977 stats->rxlengtherror +=
978 AXGMAC_IOREAD(pdata, MMC_RXLENGTHERROR_LO);
979 stats->rxlengtherror +=
980 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXLENGTHERROR_HI) << 32);
981
982 stats->rxoutofrangetype +=
983 AXGMAC_IOREAD(pdata, MMC_RXOUTOFRANGETYPE_LO);
984 stats->rxoutofrangetype +=
985 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXOUTOFRANGETYPE_HI) << 32);
986
987 stats->rxpauseframes +=
988 AXGMAC_IOREAD(pdata, MMC_RXPAUSEFRAMES_LO);
989 stats->rxpauseframes +=
990 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXPAUSEFRAMES_HI) << 32);
991
992 stats->rxfifooverflow +=
993 AXGMAC_IOREAD(pdata, MMC_RXFIFOOVERFLOW_LO);
994 stats->rxfifooverflow +=
995 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXFIFOOVERFLOW_HI) << 32);
996
997 stats->rxvlanframes_gb +=
998 AXGMAC_IOREAD(pdata, MMC_RXVLANFRAMES_GB_LO);
999 stats->rxvlanframes_gb +=
1000 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXVLANFRAMES_GB_HI) << 32);
1001
1002 stats->rxwatchdogerror +=
1003 AXGMAC_IOREAD(pdata, MMC_RXWATCHDOGERROR);
1004
1005
1006 AXGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 0);
1007}
1008
1009static int
1010axgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats,
1011 unsigned int n)
1012{
1013 struct axgbe_port *pdata = dev->data->dev_private;
1014 unsigned int i;
1015
1016 if (n < AXGBE_XSTATS_COUNT)
1017 return AXGBE_XSTATS_COUNT;
1018
1019 axgbe_read_mmc_stats(pdata);
1020
1021 for (i = 0; i < AXGBE_XSTATS_COUNT; i++) {
1022 stats[i].id = i;
1023 stats[i].value = *(u64 *)((uint8_t *)&pdata->mmc_stats +
1024 axgbe_xstats_strings[i].offset);
1025 }
1026
1027 return AXGBE_XSTATS_COUNT;
1028}
1029
1030static int
1031axgbe_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
1032 struct rte_eth_xstat_name *xstats_names,
1033 unsigned int n)
1034{
1035 unsigned int i;
1036
1037 if (n >= AXGBE_XSTATS_COUNT && xstats_names) {
1038 for (i = 0; i < AXGBE_XSTATS_COUNT; ++i) {
1039 snprintf(xstats_names[i].name,
1040 RTE_ETH_XSTATS_NAME_SIZE, "%s",
1041 axgbe_xstats_strings[i].name);
1042 }
1043 }
1044
1045 return AXGBE_XSTATS_COUNT;
1046}
1047
1048static int
1049axgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
1050 uint64_t *values, unsigned int n)
1051{
1052 unsigned int i;
1053 uint64_t values_copy[AXGBE_XSTATS_COUNT];
1054
1055 if (!ids) {
1056 struct axgbe_port *pdata = dev->data->dev_private;
1057
1058 if (n < AXGBE_XSTATS_COUNT)
1059 return AXGBE_XSTATS_COUNT;
1060
1061 axgbe_read_mmc_stats(pdata);
1062
1063 for (i = 0; i < AXGBE_XSTATS_COUNT; i++) {
1064 values[i] = *(u64 *)((uint8_t *)&pdata->mmc_stats +
1065 axgbe_xstats_strings[i].offset);
1066 }
1067
1068 return i;
1069 }
1070
1071 axgbe_dev_xstats_get_by_id(dev, NULL, values_copy, AXGBE_XSTATS_COUNT);
1072
1073 for (i = 0; i < n; i++) {
1074 if (ids[i] >= AXGBE_XSTATS_COUNT) {
1075 PMD_DRV_LOG(ERR, "id value isn't valid\n");
1076 return -1;
1077 }
1078 values[i] = values_copy[ids[i]];
1079 }
1080 return n;
1081}
1082
1083static int
1084axgbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
1085 const uint64_t *ids,
1086 struct rte_eth_xstat_name *xstats_names,
1087 unsigned int size)
1088{
1089 struct rte_eth_xstat_name xstats_names_copy[AXGBE_XSTATS_COUNT];
1090 unsigned int i;
1091
1092 if (!ids)
1093 return axgbe_dev_xstats_get_names(dev, xstats_names, size);
1094
1095 axgbe_dev_xstats_get_names(dev, xstats_names_copy, size);
1096
1097 for (i = 0; i < size; i++) {
1098 if (ids[i] >= AXGBE_XSTATS_COUNT) {
1099 PMD_DRV_LOG(ERR, "id value isn't valid\n");
1100 return -1;
1101 }
1102 strcpy(xstats_names[i].name, xstats_names_copy[ids[i]].name);
1103 }
1104 return size;
1105}
1106
1107static int
1108axgbe_dev_xstats_reset(struct rte_eth_dev *dev)
1109{
1110 struct axgbe_port *pdata = dev->data->dev_private;
1111 struct axgbe_mmc_stats *stats = &pdata->mmc_stats;
1112
1113
1114 axgbe_read_mmc_stats(pdata);
1115
1116
1117 memset(stats, 0, sizeof(*stats));
1118
1119 return 0;
1120}
1121
1122static int
1123axgbe_dev_stats_get(struct rte_eth_dev *dev,
1124 struct rte_eth_stats *stats)
1125{
1126 struct axgbe_rx_queue *rxq;
1127 struct axgbe_tx_queue *txq;
1128 struct axgbe_port *pdata = dev->data->dev_private;
1129 struct axgbe_mmc_stats *mmc_stats = &pdata->mmc_stats;
1130 unsigned int i;
1131
1132 axgbe_read_mmc_stats(pdata);
1133
1134 stats->imissed = mmc_stats->rxfifooverflow;
1135
1136 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1137 rxq = dev->data->rx_queues[i];
1138 if (rxq) {
1139 stats->q_ipackets[i] = rxq->pkts;
1140 stats->ipackets += rxq->pkts;
1141 stats->q_ibytes[i] = rxq->bytes;
1142 stats->ibytes += rxq->bytes;
1143 stats->rx_nombuf += rxq->rx_mbuf_alloc_failed;
1144 stats->q_errors[i] = rxq->errors
1145 + rxq->rx_mbuf_alloc_failed;
1146 stats->ierrors += rxq->errors;
1147 } else {
1148 PMD_DRV_LOG(DEBUG, "Rx queue not setup for port %d\n",
1149 dev->data->port_id);
1150 }
1151 }
1152
1153 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1154 txq = dev->data->tx_queues[i];
1155 if (txq) {
1156 stats->q_opackets[i] = txq->pkts;
1157 stats->opackets += txq->pkts;
1158 stats->q_obytes[i] = txq->bytes;
1159 stats->obytes += txq->bytes;
1160 stats->oerrors += txq->errors;
1161 } else {
1162 PMD_DRV_LOG(DEBUG, "Tx queue not setup for port %d\n",
1163 dev->data->port_id);
1164 }
1165 }
1166
1167 return 0;
1168}
1169
1170static int
1171axgbe_dev_stats_reset(struct rte_eth_dev *dev)
1172{
1173 struct axgbe_rx_queue *rxq;
1174 struct axgbe_tx_queue *txq;
1175 unsigned int i;
1176
1177 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1178 rxq = dev->data->rx_queues[i];
1179 if (rxq) {
1180 rxq->pkts = 0;
1181 rxq->bytes = 0;
1182 rxq->errors = 0;
1183 rxq->rx_mbuf_alloc_failed = 0;
1184 } else {
1185 PMD_DRV_LOG(DEBUG, "Rx queue not setup for port %d\n",
1186 dev->data->port_id);
1187 }
1188 }
1189 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1190 txq = dev->data->tx_queues[i];
1191 if (txq) {
1192 txq->pkts = 0;
1193 txq->bytes = 0;
1194 txq->errors = 0;
1195 } else {
1196 PMD_DRV_LOG(DEBUG, "Tx queue not setup for port %d\n",
1197 dev->data->port_id);
1198 }
1199 }
1200
1201 return 0;
1202}
1203
1204static int
1205axgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1206{
1207 struct axgbe_port *pdata = dev->data->dev_private;
1208
1209 dev_info->max_rx_queues = pdata->rx_ring_count;
1210 dev_info->max_tx_queues = pdata->tx_ring_count;
1211 dev_info->min_rx_bufsize = AXGBE_RX_MIN_BUF_SIZE;
1212 dev_info->max_rx_pktlen = AXGBE_RX_MAX_BUF_SIZE;
1213 dev_info->max_mac_addrs = pdata->hw_feat.addn_mac + 1;
1214 dev_info->max_hash_mac_addrs = pdata->hw_feat.hash_table_size;
1215 dev_info->speed_capa = RTE_ETH_LINK_SPEED_10G;
1216
1217 dev_info->rx_offload_capa =
1218 RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
1219 RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
1220 RTE_ETH_RX_OFFLOAD_VLAN_EXTEND |
1221 RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
1222 RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
1223 RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
1224 RTE_ETH_RX_OFFLOAD_SCATTER |
1225 RTE_ETH_RX_OFFLOAD_KEEP_CRC;
1226
1227 dev_info->tx_offload_capa =
1228 RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
1229 RTE_ETH_TX_OFFLOAD_QINQ_INSERT |
1230 RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
1231 RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
1232 RTE_ETH_TX_OFFLOAD_TCP_CKSUM;
1233
1234 if (pdata->hw_feat.rss) {
1235 dev_info->flow_type_rss_offloads = AXGBE_RSS_OFFLOAD;
1236 dev_info->reta_size = pdata->hw_feat.hash_table_size;
1237 dev_info->hash_key_size = AXGBE_RSS_HASH_KEY_SIZE;
1238 }
1239
1240 dev_info->rx_desc_lim = rx_desc_lim;
1241 dev_info->tx_desc_lim = tx_desc_lim;
1242
1243 dev_info->default_rxconf = (struct rte_eth_rxconf) {
1244 .rx_free_thresh = AXGBE_RX_FREE_THRESH,
1245 };
1246
1247 dev_info->default_txconf = (struct rte_eth_txconf) {
1248 .tx_free_thresh = AXGBE_TX_FREE_THRESH,
1249 };
1250
1251 return 0;
1252}
1253
1254static int
1255axgbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1256{
1257 struct axgbe_port *pdata = dev->data->dev_private;
1258 struct xgbe_fc_info fc = pdata->fc;
1259 unsigned int reg, reg_val = 0;
1260
1261 reg = MAC_Q0TFCR;
1262 reg_val = AXGMAC_IOREAD(pdata, reg);
1263 fc.low_water[0] = AXGMAC_MTL_IOREAD_BITS(pdata, 0, MTL_Q_RQFCR, RFA);
1264 fc.high_water[0] = AXGMAC_MTL_IOREAD_BITS(pdata, 0, MTL_Q_RQFCR, RFD);
1265 fc.pause_time[0] = AXGMAC_GET_BITS(reg_val, MAC_Q0TFCR, PT);
1266 fc.autoneg = pdata->pause_autoneg;
1267
1268 if (pdata->rx_pause && pdata->tx_pause)
1269 fc.mode = RTE_ETH_FC_FULL;
1270 else if (pdata->rx_pause)
1271 fc.mode = RTE_ETH_FC_RX_PAUSE;
1272 else if (pdata->tx_pause)
1273 fc.mode = RTE_ETH_FC_TX_PAUSE;
1274 else
1275 fc.mode = RTE_ETH_FC_NONE;
1276
1277 fc_conf->high_water = (1024 + (fc.low_water[0] << 9)) / 1024;
1278 fc_conf->low_water = (1024 + (fc.high_water[0] << 9)) / 1024;
1279 fc_conf->pause_time = fc.pause_time[0];
1280 fc_conf->send_xon = fc.send_xon;
1281 fc_conf->mode = fc.mode;
1282
1283 return 0;
1284}
1285
1286static int
1287axgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1288{
1289 struct axgbe_port *pdata = dev->data->dev_private;
1290 struct xgbe_fc_info fc = pdata->fc;
1291 unsigned int reg, reg_val = 0;
1292 reg = MAC_Q0TFCR;
1293
1294 pdata->pause_autoneg = fc_conf->autoneg;
1295 pdata->phy.pause_autoneg = pdata->pause_autoneg;
1296 fc.send_xon = fc_conf->send_xon;
1297 AXGMAC_MTL_IOWRITE_BITS(pdata, 0, MTL_Q_RQFCR, RFA,
1298 AXGMAC_FLOW_CONTROL_VALUE(1024 * fc_conf->high_water));
1299 AXGMAC_MTL_IOWRITE_BITS(pdata, 0, MTL_Q_RQFCR, RFD,
1300 AXGMAC_FLOW_CONTROL_VALUE(1024 * fc_conf->low_water));
1301 AXGMAC_SET_BITS(reg_val, MAC_Q0TFCR, PT, fc_conf->pause_time);
1302 AXGMAC_IOWRITE(pdata, reg, reg_val);
1303 fc.mode = fc_conf->mode;
1304
1305 if (fc.mode == RTE_ETH_FC_FULL) {
1306 pdata->tx_pause = 1;
1307 pdata->rx_pause = 1;
1308 } else if (fc.mode == RTE_ETH_FC_RX_PAUSE) {
1309 pdata->tx_pause = 0;
1310 pdata->rx_pause = 1;
1311 } else if (fc.mode == RTE_ETH_FC_TX_PAUSE) {
1312 pdata->tx_pause = 1;
1313 pdata->rx_pause = 0;
1314 } else {
1315 pdata->tx_pause = 0;
1316 pdata->rx_pause = 0;
1317 }
1318
1319 if (pdata->tx_pause != (unsigned int)pdata->phy.tx_pause)
1320 pdata->hw_if.config_tx_flow_control(pdata);
1321
1322 if (pdata->rx_pause != (unsigned int)pdata->phy.rx_pause)
1323 pdata->hw_if.config_rx_flow_control(pdata);
1324
1325 pdata->hw_if.config_flow_control(pdata);
1326 pdata->phy.tx_pause = pdata->tx_pause;
1327 pdata->phy.rx_pause = pdata->rx_pause;
1328
1329 return 0;
1330}
1331
1332static int
1333axgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev,
1334 struct rte_eth_pfc_conf *pfc_conf)
1335{
1336 struct axgbe_port *pdata = dev->data->dev_private;
1337 struct xgbe_fc_info fc = pdata->fc;
1338 uint8_t tc_num;
1339
1340 tc_num = pdata->pfc_map[pfc_conf->priority];
1341
1342 if (pfc_conf->priority >= pdata->hw_feat.tc_cnt) {
1343 PMD_INIT_LOG(ERR, "Max supported traffic class: %d\n",
1344 pdata->hw_feat.tc_cnt);
1345 return -EINVAL;
1346 }
1347
1348 pdata->pause_autoneg = pfc_conf->fc.autoneg;
1349 pdata->phy.pause_autoneg = pdata->pause_autoneg;
1350 fc.send_xon = pfc_conf->fc.send_xon;
1351 AXGMAC_MTL_IOWRITE_BITS(pdata, tc_num, MTL_Q_RQFCR, RFA,
1352 AXGMAC_FLOW_CONTROL_VALUE(1024 * pfc_conf->fc.high_water));
1353 AXGMAC_MTL_IOWRITE_BITS(pdata, tc_num, MTL_Q_RQFCR, RFD,
1354 AXGMAC_FLOW_CONTROL_VALUE(1024 * pfc_conf->fc.low_water));
1355
1356 switch (tc_num) {
1357 case 0:
1358 AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM0R,
1359 PSTC0, pfc_conf->fc.pause_time);
1360 break;
1361 case 1:
1362 AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM0R,
1363 PSTC1, pfc_conf->fc.pause_time);
1364 break;
1365 case 2:
1366 AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM0R,
1367 PSTC2, pfc_conf->fc.pause_time);
1368 break;
1369 case 3:
1370 AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM0R,
1371 PSTC3, pfc_conf->fc.pause_time);
1372 break;
1373 case 4:
1374 AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM1R,
1375 PSTC4, pfc_conf->fc.pause_time);
1376 break;
1377 case 5:
1378 AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM1R,
1379 PSTC5, pfc_conf->fc.pause_time);
1380 break;
1381 case 7:
1382 AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM1R,
1383 PSTC6, pfc_conf->fc.pause_time);
1384 break;
1385 case 6:
1386 AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM1R,
1387 PSTC7, pfc_conf->fc.pause_time);
1388 break;
1389 }
1390
1391 fc.mode = pfc_conf->fc.mode;
1392
1393 if (fc.mode == RTE_ETH_FC_FULL) {
1394 pdata->tx_pause = 1;
1395 pdata->rx_pause = 1;
1396 AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 1);
1397 } else if (fc.mode == RTE_ETH_FC_RX_PAUSE) {
1398 pdata->tx_pause = 0;
1399 pdata->rx_pause = 1;
1400 AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 1);
1401 } else if (fc.mode == RTE_ETH_FC_TX_PAUSE) {
1402 pdata->tx_pause = 1;
1403 pdata->rx_pause = 0;
1404 AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 0);
1405 } else {
1406 pdata->tx_pause = 0;
1407 pdata->rx_pause = 0;
1408 AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 0);
1409 }
1410
1411 if (pdata->tx_pause != (unsigned int)pdata->phy.tx_pause)
1412 pdata->hw_if.config_tx_flow_control(pdata);
1413
1414 if (pdata->rx_pause != (unsigned int)pdata->phy.rx_pause)
1415 pdata->hw_if.config_rx_flow_control(pdata);
1416 pdata->hw_if.config_flow_control(pdata);
1417 pdata->phy.tx_pause = pdata->tx_pause;
1418 pdata->phy.rx_pause = pdata->rx_pause;
1419
1420 return 0;
1421}
1422
1423void
1424axgbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
1425 struct rte_eth_rxq_info *qinfo)
1426{
1427 struct axgbe_rx_queue *rxq;
1428
1429 rxq = dev->data->rx_queues[queue_id];
1430 qinfo->mp = rxq->mb_pool;
1431 qinfo->scattered_rx = dev->data->scattered_rx;
1432 qinfo->nb_desc = rxq->nb_desc;
1433 qinfo->conf.rx_free_thresh = rxq->free_thresh;
1434}
1435
1436void
1437axgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
1438 struct rte_eth_txq_info *qinfo)
1439{
1440 struct axgbe_tx_queue *txq;
1441
1442 txq = dev->data->tx_queues[queue_id];
1443 qinfo->nb_desc = txq->nb_desc;
1444 qinfo->conf.tx_free_thresh = txq->free_thresh;
1445}
1446const uint32_t *
1447axgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev)
1448{
1449 static const uint32_t ptypes[] = {
1450 RTE_PTYPE_L2_ETHER,
1451 RTE_PTYPE_L2_ETHER_TIMESYNC,
1452 RTE_PTYPE_L2_ETHER_LLDP,
1453 RTE_PTYPE_L2_ETHER_ARP,
1454 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
1455 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
1456 RTE_PTYPE_L4_FRAG,
1457 RTE_PTYPE_L4_ICMP,
1458 RTE_PTYPE_L4_NONFRAG,
1459 RTE_PTYPE_L4_SCTP,
1460 RTE_PTYPE_L4_TCP,
1461 RTE_PTYPE_L4_UDP,
1462 RTE_PTYPE_TUNNEL_GRENAT,
1463 RTE_PTYPE_TUNNEL_IP,
1464 RTE_PTYPE_INNER_L2_ETHER,
1465 RTE_PTYPE_INNER_L2_ETHER_VLAN,
1466 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
1467 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
1468 RTE_PTYPE_INNER_L4_FRAG,
1469 RTE_PTYPE_INNER_L4_ICMP,
1470 RTE_PTYPE_INNER_L4_NONFRAG,
1471 RTE_PTYPE_INNER_L4_SCTP,
1472 RTE_PTYPE_INNER_L4_TCP,
1473 RTE_PTYPE_INNER_L4_UDP,
1474 RTE_PTYPE_UNKNOWN
1475 };
1476
1477 if (dev->rx_pkt_burst == axgbe_recv_pkts)
1478 return ptypes;
1479 return NULL;
1480}
1481
1482static int axgb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
1483{
1484 struct axgbe_port *pdata = dev->data->dev_private;
1485 unsigned int val;
1486
1487
1488 if (dev->data->dev_started) {
1489 PMD_DRV_LOG(ERR, "port %d must be stopped before configuration",
1490 dev->data->port_id);
1491 return -EBUSY;
1492 }
1493 val = mtu > RTE_ETHER_MTU ? 1 : 0;
1494 AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, JE, val);
1495
1496 return 0;
1497}
1498
1499static void
1500axgbe_update_tstamp_time(struct axgbe_port *pdata,
1501 unsigned int sec, unsigned int nsec, int addsub)
1502{
1503 unsigned int count = 100;
1504 uint32_t sub_val = 0;
1505 uint32_t sub_val_sec = 0xFFFFFFFF;
1506 uint32_t sub_val_nsec = 0x3B9ACA00;
1507
1508 if (addsub) {
1509 if (sec)
1510 sub_val = sub_val_sec - (sec - 1);
1511 else
1512 sub_val = sec;
1513
1514 AXGMAC_IOWRITE(pdata, MAC_STSUR, sub_val);
1515 sub_val = sub_val_nsec - nsec;
1516 AXGMAC_IOWRITE(pdata, MAC_STNUR, sub_val);
1517 AXGMAC_IOWRITE_BITS(pdata, MAC_STNUR, ADDSUB, 1);
1518 } else {
1519 AXGMAC_IOWRITE(pdata, MAC_STSUR, sec);
1520 AXGMAC_IOWRITE_BITS(pdata, MAC_STNUR, ADDSUB, 0);
1521 AXGMAC_IOWRITE(pdata, MAC_STNUR, nsec);
1522 }
1523 AXGMAC_IOWRITE_BITS(pdata, MAC_TSCR, TSUPDT, 1);
1524
1525 while (--count && AXGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSUPDT))
1526 rte_delay_ms(1);
1527}
1528
1529static inline uint64_t
1530div_u64_rem(uint64_t dividend, uint32_t divisor, uint32_t *remainder)
1531{
1532 *remainder = dividend % divisor;
1533 return dividend / divisor;
1534}
1535
1536static inline uint64_t
1537div_u64(uint64_t dividend, uint32_t divisor)
1538{
1539 uint32_t remainder;
1540 return div_u64_rem(dividend, divisor, &remainder);
1541}
1542
1543static int
1544axgbe_adjfreq(struct axgbe_port *pdata, int64_t delta)
1545{
1546 uint64_t adjust;
1547 uint32_t addend, diff;
1548 unsigned int neg_adjust = 0;
1549
1550 if (delta < 0) {
1551 neg_adjust = 1;
1552 delta = -delta;
1553 }
1554 adjust = (uint64_t)pdata->tstamp_addend;
1555 adjust *= delta;
1556 diff = (uint32_t)div_u64(adjust, 1000000000UL);
1557 addend = (neg_adjust) ? pdata->tstamp_addend - diff :
1558 pdata->tstamp_addend + diff;
1559 pdata->tstamp_addend = addend;
1560 axgbe_update_tstamp_addend(pdata, addend);
1561 return 0;
1562}
1563
1564static int
1565axgbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
1566{
1567 struct axgbe_port *pdata = dev->data->dev_private;
1568 struct timespec timestamp_delta;
1569
1570 axgbe_adjfreq(pdata, delta);
1571 pdata->systime_tc.nsec += delta;
1572
1573 if (delta < 0) {
1574 delta = -delta;
1575 timestamp_delta = rte_ns_to_timespec(delta);
1576 axgbe_update_tstamp_time(pdata, timestamp_delta.tv_sec,
1577 timestamp_delta.tv_nsec, 1);
1578 } else {
1579 timestamp_delta = rte_ns_to_timespec(delta);
1580 axgbe_update_tstamp_time(pdata, timestamp_delta.tv_sec,
1581 timestamp_delta.tv_nsec, 0);
1582 }
1583 return 0;
1584}
1585
1586static int
1587axgbe_timesync_read_time(struct rte_eth_dev *dev,
1588 struct timespec *timestamp)
1589{
1590 uint64_t nsec;
1591 struct axgbe_port *pdata = dev->data->dev_private;
1592
1593 nsec = AXGMAC_IOREAD(pdata, MAC_STSR);
1594 nsec *= NSEC_PER_SEC;
1595 nsec += AXGMAC_IOREAD(pdata, MAC_STNR);
1596 *timestamp = rte_ns_to_timespec(nsec);
1597 return 0;
1598}
1599static int
1600axgbe_timesync_write_time(struct rte_eth_dev *dev,
1601 const struct timespec *timestamp)
1602{
1603 unsigned int count = 100;
1604 struct axgbe_port *pdata = dev->data->dev_private;
1605
1606 AXGMAC_IOWRITE(pdata, MAC_STSUR, timestamp->tv_sec);
1607 AXGMAC_IOWRITE(pdata, MAC_STNUR, timestamp->tv_nsec);
1608 AXGMAC_IOWRITE_BITS(pdata, MAC_TSCR, TSUPDT, 1);
1609
1610 while (--count && AXGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSUPDT))
1611 rte_delay_ms(1);
1612 if (!count)
1613 PMD_DRV_LOG(ERR, "Timed out update timestamp\n");
1614 return 0;
1615}
1616
1617static void
1618axgbe_update_tstamp_addend(struct axgbe_port *pdata,
1619 uint32_t addend)
1620{
1621 unsigned int count = 100;
1622
1623 AXGMAC_IOWRITE(pdata, MAC_TSAR, addend);
1624 AXGMAC_IOWRITE_BITS(pdata, MAC_TSCR, TSADDREG, 1);
1625
1626
1627 while (--count && AXGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSADDREG))
1628 rte_delay_ms(1);
1629 if (!count)
1630 PMD_DRV_LOG(ERR, "Timed out updating timestamp addend register\n");
1631}
1632
1633static void
1634axgbe_set_tstamp_time(struct axgbe_port *pdata, unsigned int sec,
1635 unsigned int nsec)
1636{
1637 unsigned int count = 100;
1638
1639
1640 AXGMAC_IOWRITE(pdata, MAC_STSUR, sec);
1641
1642 AXGMAC_IOWRITE(pdata, MAC_STNUR, nsec);
1643
1644 AXGMAC_IOWRITE_BITS(pdata, MAC_TSCR, TSINIT, 1);
1645
1646
1647 while (--count && AXGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSINIT))
1648 rte_delay_ms(1);
1649 if (!count)
1650 PMD_DRV_LOG(ERR, "Timed out initializing timestamp\n");
1651}
1652
1653static int
1654axgbe_timesync_enable(struct rte_eth_dev *dev)
1655{
1656 struct axgbe_port *pdata = dev->data->dev_private;
1657 unsigned int mac_tscr = 0;
1658 uint64_t dividend;
1659 struct timespec timestamp;
1660 uint64_t nsec;
1661
1662
1663 AXGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSCTRLSSR, 1);
1664
1665
1666 AXGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSCFUPDT, 1);
1667
1668
1669 AXGMAC_SET_BITS(mac_tscr, MAC_TSCR, TXTSSTSM, 1);
1670
1671 AXGMAC_IOWRITE(pdata, MAC_TSCR, mac_tscr);
1672
1673
1674 AXGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1);
1675 AXGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
1676
1677 AXGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENALL, 1);
1678
1679
1680 AXGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1681 AXGMAC_IOWRITE(pdata, MAC_TSCR, mac_tscr);
1682
1683
1684 if (!AXGMAC_GET_BITS(mac_tscr, MAC_TSCR, TSENA)) {
1685 PMD_DRV_LOG(ERR, "Exiting as timestamp is not enabled\n");
1686 return 0;
1687 }
1688
1689
1690 AXGMAC_IOWRITE_BITS(pdata, MAC_SSIR, SSINC, AXGBE_TSTAMP_SSINC);
1691
1692 AXGMAC_IOWRITE_BITS(pdata, MAC_SSIR, SNSINC, AXGBE_TSTAMP_SNSINC);
1693
1694 pdata->ptpclk_rate = AXGBE_V2_PTP_CLOCK_FREQ;
1695 dividend = 50000000;
1696 dividend <<= 32;
1697 pdata->tstamp_addend = div_u64(dividend, pdata->ptpclk_rate);
1698
1699 axgbe_update_tstamp_addend(pdata, pdata->tstamp_addend);
1700 axgbe_set_tstamp_time(pdata, 0, 0);
1701
1702
1703 memset(&pdata->systime_tc, 0, sizeof(struct rte_timecounter));
1704
1705 pdata->systime_tc.cc_mask = AXGBE_CYCLECOUNTER_MASK;
1706 pdata->systime_tc.cc_shift = 0;
1707 pdata->systime_tc.nsec_mask = 0;
1708
1709 PMD_DRV_LOG(DEBUG, "Initializing system time counter with realtime\n");
1710
1711
1712 clock_gettime(CLOCK_REALTIME, ×tamp);
1713 nsec = rte_timespec_to_ns(×tamp);
1714 nsec = rte_timecounter_update(&pdata->systime_tc, nsec);
1715 axgbe_set_tstamp_time(pdata, timestamp.tv_sec, timestamp.tv_nsec);
1716 return 0;
1717}
1718
1719static int
1720axgbe_timesync_disable(struct rte_eth_dev *dev)
1721{
1722 struct axgbe_port *pdata = dev->data->dev_private;
1723 unsigned int mac_tscr = 0;
1724
1725
1726 AXGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENALL, 0);
1727
1728 AXGMAC_IOWRITE_BITS(pdata, MAC_TSCR, TSADDREG, 0);
1729
1730 AXGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSCFUPDT, 0);
1731
1732 AXGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 0);
1733 return 0;
1734}
1735
1736static int
1737axgbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
1738 struct timespec *timestamp, uint32_t flags)
1739{
1740 uint64_t nsec = 0;
1741 volatile union axgbe_rx_desc *desc;
1742 uint16_t idx, pmt;
1743 struct axgbe_rx_queue *rxq = *dev->data->rx_queues;
1744
1745 idx = AXGBE_GET_DESC_IDX(rxq, rxq->cur);
1746 desc = &rxq->desc[idx];
1747
1748 while (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, OWN))
1749 rte_delay_ms(1);
1750 if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, CTXT)) {
1751 if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_CONTEXT_DESC3, TSA) &&
1752 !AXGMAC_GET_BITS_LE(desc->write.desc3,
1753 RX_CONTEXT_DESC3, TSD)) {
1754 pmt = AXGMAC_GET_BITS_LE(desc->write.desc3,
1755 RX_CONTEXT_DESC3, PMT);
1756 nsec = rte_le_to_cpu_32(desc->write.desc1);
1757 nsec *= NSEC_PER_SEC;
1758 nsec += rte_le_to_cpu_32(desc->write.desc0);
1759 if (nsec != 0xffffffffffffffffULL) {
1760 if (pmt == 0x01)
1761 *timestamp = rte_ns_to_timespec(nsec);
1762 PMD_DRV_LOG(DEBUG,
1763 "flags = 0x%x nsec = %"PRIu64"\n",
1764 flags, nsec);
1765 }
1766 }
1767 }
1768
1769 return 0;
1770}
1771
1772static int
1773axgbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
1774 struct timespec *timestamp)
1775{
1776 uint64_t nsec;
1777 struct axgbe_port *pdata = dev->data->dev_private;
1778 unsigned int tx_snr, tx_ssr;
1779
1780 rte_delay_us(5);
1781 if (pdata->vdata->tx_tstamp_workaround) {
1782 tx_snr = AXGMAC_IOREAD(pdata, MAC_TXSNR);
1783 tx_ssr = AXGMAC_IOREAD(pdata, MAC_TXSSR);
1784
1785 } else {
1786 tx_ssr = AXGMAC_IOREAD(pdata, MAC_TXSSR);
1787 tx_snr = AXGMAC_IOREAD(pdata, MAC_TXSNR);
1788 }
1789 if (AXGMAC_GET_BITS(tx_snr, MAC_TXSNR, TXTSSTSMIS)) {
1790 PMD_DRV_LOG(DEBUG, "Waiting for TXTSSTSMIS\n");
1791 return 0;
1792 }
1793 nsec = tx_ssr;
1794 nsec *= NSEC_PER_SEC;
1795 nsec += tx_snr;
1796 PMD_DRV_LOG(DEBUG, "nsec = %"PRIu64" tx_ssr = %d tx_snr = %d\n",
1797 nsec, tx_ssr, tx_snr);
1798 *timestamp = rte_ns_to_timespec(nsec);
1799 return 0;
1800}
1801
1802static int
1803axgbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vid, int on)
1804{
1805 struct axgbe_port *pdata = dev->data->dev_private;
1806 unsigned long vid_bit, vid_idx;
1807
1808 vid_bit = VLAN_TABLE_BIT(vid);
1809 vid_idx = VLAN_TABLE_IDX(vid);
1810
1811 if (on) {
1812 PMD_DRV_LOG(DEBUG, "Set VLAN vid=%d for device = %s\n",
1813 vid, pdata->eth_dev->device->name);
1814 pdata->active_vlans[vid_idx] |= vid_bit;
1815 } else {
1816 PMD_DRV_LOG(DEBUG, "Reset VLAN vid=%d for device = %s\n",
1817 vid, pdata->eth_dev->device->name);
1818 pdata->active_vlans[vid_idx] &= ~vid_bit;
1819 }
1820 pdata->hw_if.update_vlan_hash_table(pdata);
1821 return 0;
1822}
1823
1824static int
1825axgbe_vlan_tpid_set(struct rte_eth_dev *dev,
1826 enum rte_vlan_type vlan_type,
1827 uint16_t tpid)
1828{
1829 struct axgbe_port *pdata = dev->data->dev_private;
1830 uint32_t reg = 0;
1831 uint32_t qinq = 0;
1832
1833 qinq = AXGMAC_IOREAD_BITS(pdata, MAC_VLANTR, EDVLP);
1834 PMD_DRV_LOG(DEBUG, "EDVLP: qinq = 0x%x\n", qinq);
1835
1836 switch (vlan_type) {
1837 case RTE_ETH_VLAN_TYPE_INNER:
1838 PMD_DRV_LOG(DEBUG, "RTE_ETH_VLAN_TYPE_INNER\n");
1839 if (qinq) {
1840 if (tpid != 0x8100 && tpid != 0x88a8)
1841 PMD_DRV_LOG(ERR,
1842 "tag supported 0x8100/0x88A8\n");
1843 PMD_DRV_LOG(DEBUG, "qinq with inner tag\n");
1844
1845
1846 AXGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ERIVLT, 1);
1847 reg = AXGMAC_IOREAD_BITS(pdata, MAC_VLANTR, ERIVLT);
1848 PMD_DRV_LOG(DEBUG, "bit ERIVLT = 0x%x\n", reg);
1849
1850 } else {
1851 PMD_DRV_LOG(ERR,
1852 "Inner type not supported in single tag\n");
1853 }
1854 break;
1855 case RTE_ETH_VLAN_TYPE_OUTER:
1856 PMD_DRV_LOG(DEBUG, "RTE_ETH_VLAN_TYPE_OUTER\n");
1857 if (qinq) {
1858 PMD_DRV_LOG(DEBUG, "double tagging is enabled\n");
1859
1860 AXGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ERIVLT, 0);
1861 reg = AXGMAC_IOREAD_BITS(pdata, MAC_VLANTR, ERIVLT);
1862 PMD_DRV_LOG(DEBUG, "bit ERIVLT = 0x%x\n", reg);
1863
1864 AXGMAC_IOWRITE_BITS(pdata, MAC_VLANIR, CSVL, 1);
1865 reg = AXGMAC_IOREAD_BITS(pdata, MAC_VLANIR, CSVL);
1866 PMD_DRV_LOG(DEBUG, "bit CSVL = 0x%x\n", reg);
1867 } else {
1868 if (tpid != 0x8100 && tpid != 0x88a8)
1869 PMD_DRV_LOG(ERR,
1870 "tag supported 0x8100/0x88A8\n");
1871 }
1872 break;
1873 case RTE_ETH_VLAN_TYPE_MAX:
1874 PMD_DRV_LOG(ERR, "RTE_ETH_VLAN_TYPE_MAX\n");
1875 break;
1876 case RTE_ETH_VLAN_TYPE_UNKNOWN:
1877 PMD_DRV_LOG(ERR, "RTE_ETH_VLAN_TYPE_UNKNOWN\n");
1878 break;
1879 }
1880 return 0;
1881}
1882
1883static void axgbe_vlan_extend_enable(struct axgbe_port *pdata)
1884{
1885 int qinq = 0;
1886
1887 AXGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EDVLP, 1);
1888 qinq = AXGMAC_IOREAD_BITS(pdata, MAC_VLANTR, EDVLP);
1889 PMD_DRV_LOG(DEBUG, "vlan double tag enabled EDVLP:qinq=0x%x\n", qinq);
1890}
1891
1892static void axgbe_vlan_extend_disable(struct axgbe_port *pdata)
1893{
1894 int qinq = 0;
1895
1896 AXGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EDVLP, 0);
1897 qinq = AXGMAC_IOREAD_BITS(pdata, MAC_VLANTR, EDVLP);
1898 PMD_DRV_LOG(DEBUG, "vlan double tag disable EDVLP:qinq=0x%x\n", qinq);
1899}
1900
1901static int
1902axgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1903{
1904 struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
1905 struct axgbe_port *pdata = dev->data->dev_private;
1906
1907
1908 AXGMAC_IOWRITE_BITS(pdata, MAC_VLANIR, CSVL, 0);
1909 AXGMAC_IOWRITE_BITS(pdata, MAC_VLANIR, VLTI, 1);
1910
1911 if (mask & RTE_ETH_VLAN_STRIP_MASK) {
1912 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
1913 PMD_DRV_LOG(DEBUG, "Strip ON for device = %s\n",
1914 pdata->eth_dev->device->name);
1915 pdata->hw_if.enable_rx_vlan_stripping(pdata);
1916 } else {
1917 PMD_DRV_LOG(DEBUG, "Strip OFF for device = %s\n",
1918 pdata->eth_dev->device->name);
1919 pdata->hw_if.disable_rx_vlan_stripping(pdata);
1920 }
1921 }
1922 if (mask & RTE_ETH_VLAN_FILTER_MASK) {
1923 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) {
1924 PMD_DRV_LOG(DEBUG, "Filter ON for device = %s\n",
1925 pdata->eth_dev->device->name);
1926 pdata->hw_if.enable_rx_vlan_filtering(pdata);
1927 } else {
1928 PMD_DRV_LOG(DEBUG, "Filter OFF for device = %s\n",
1929 pdata->eth_dev->device->name);
1930 pdata->hw_if.disable_rx_vlan_filtering(pdata);
1931 }
1932 }
1933 if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
1934 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND) {
1935 PMD_DRV_LOG(DEBUG, "enabling vlan extended mode\n");
1936 axgbe_vlan_extend_enable(pdata);
1937
1938 axgbe_vlan_tpid_set(dev, RTE_ETH_VLAN_TYPE_OUTER,
1939 RTE_ETHER_TYPE_VLAN);
1940 axgbe_vlan_tpid_set(dev, RTE_ETH_VLAN_TYPE_INNER,
1941 RTE_ETHER_TYPE_VLAN);
1942 } else {
1943 PMD_DRV_LOG(DEBUG, "disabling vlan extended mode\n");
1944 axgbe_vlan_extend_disable(pdata);
1945 }
1946 }
1947 return 0;
1948}
1949
1950static void axgbe_get_all_hw_features(struct axgbe_port *pdata)
1951{
1952 unsigned int mac_hfr0, mac_hfr1, mac_hfr2, mac_hfr3;
1953 struct axgbe_hw_features *hw_feat = &pdata->hw_feat;
1954
1955 mac_hfr0 = AXGMAC_IOREAD(pdata, MAC_HWF0R);
1956 mac_hfr1 = AXGMAC_IOREAD(pdata, MAC_HWF1R);
1957 mac_hfr2 = AXGMAC_IOREAD(pdata, MAC_HWF2R);
1958 mac_hfr3 = AXGMAC_IOREAD(pdata, MAC_HWF3R);
1959
1960 memset(hw_feat, 0, sizeof(*hw_feat));
1961
1962 hw_feat->version = AXGMAC_IOREAD(pdata, MAC_VR);
1963
1964
1965 hw_feat->gmii = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, GMIISEL);
1966 hw_feat->vlhash = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, VLHASH);
1967 hw_feat->sma = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SMASEL);
1968 hw_feat->rwk = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RWKSEL);
1969 hw_feat->mgk = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MGKSEL);
1970 hw_feat->mmc = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MMCSEL);
1971 hw_feat->aoe = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, ARPOFFSEL);
1972 hw_feat->ts = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSEL);
1973 hw_feat->eee = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, EEESEL);
1974 hw_feat->tx_coe = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TXCOESEL);
1975 hw_feat->rx_coe = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RXCOESEL);
1976 hw_feat->addn_mac = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R,
1977 ADDMACADRSEL);
1978 hw_feat->ts_src = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSTSSEL);
1979 hw_feat->sa_vlan_ins = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SAVLANINS);
1980
1981
1982 hw_feat->rx_fifo_size = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
1983 RXFIFOSIZE);
1984 hw_feat->tx_fifo_size = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
1985 TXFIFOSIZE);
1986 hw_feat->adv_ts_hi = AXGMAC_GET_BITS(mac_hfr1,
1987 MAC_HWF1R, ADVTHWORD);
1988 hw_feat->dma_width = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, ADDR64);
1989 hw_feat->dcb = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DCBEN);
1990 hw_feat->sph = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, SPHEN);
1991 hw_feat->tso = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, TSOEN);
1992 hw_feat->dma_debug = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DBGMEMA);
1993 hw_feat->rss = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, RSSEN);
1994 hw_feat->tc_cnt = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, NUMTC);
1995 hw_feat->hash_table_size = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
1996 HASHTBLSZ);
1997 hw_feat->l3l4_filter_num = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
1998 L3L4FNUM);
1999
2000
2001 hw_feat->rx_q_cnt = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXQCNT);
2002 hw_feat->tx_q_cnt = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXQCNT);
2003 hw_feat->rx_ch_cnt = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXCHCNT);
2004 hw_feat->tx_ch_cnt = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXCHCNT);
2005 hw_feat->pps_out_num = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, PPSOUTNUM);
2006 hw_feat->aux_snap_num = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R,
2007 AUXSNAPNUM);
2008
2009
2010 hw_feat->tx_q_vlan_tag_ins = AXGMAC_GET_BITS(mac_hfr3,
2011 MAC_HWF3R, CBTISEL);
2012 hw_feat->no_of_vlan_extn = AXGMAC_GET_BITS(mac_hfr3,
2013 MAC_HWF3R, NRVF);
2014
2015
2016 switch (hw_feat->hash_table_size) {
2017 case 0:
2018 break;
2019 case 1:
2020 hw_feat->hash_table_size = 64;
2021 break;
2022 case 2:
2023 hw_feat->hash_table_size = 128;
2024 break;
2025 case 3:
2026 hw_feat->hash_table_size = 256;
2027 break;
2028 }
2029
2030
2031 switch (hw_feat->dma_width) {
2032 case 0:
2033 hw_feat->dma_width = 32;
2034 break;
2035 case 1:
2036 hw_feat->dma_width = 40;
2037 break;
2038 case 2:
2039 hw_feat->dma_width = 48;
2040 break;
2041 default:
2042 hw_feat->dma_width = 32;
2043 }
2044
2045
2046
2047
2048 hw_feat->rx_q_cnt++;
2049 hw_feat->tx_q_cnt++;
2050 hw_feat->rx_ch_cnt++;
2051 hw_feat->tx_ch_cnt++;
2052 hw_feat->tc_cnt++;
2053
2054
2055 hw_feat->rx_fifo_size = 1 << (hw_feat->rx_fifo_size + 7);
2056 hw_feat->tx_fifo_size = 1 << (hw_feat->tx_fifo_size + 7);
2057}
2058
2059static void axgbe_init_all_fptrs(struct axgbe_port *pdata)
2060{
2061 axgbe_init_function_ptrs_dev(&pdata->hw_if);
2062 axgbe_init_function_ptrs_phy(&pdata->phy_if);
2063 axgbe_init_function_ptrs_i2c(&pdata->i2c_if);
2064 pdata->vdata->init_function_ptrs_phy_impl(&pdata->phy_if);
2065}
2066
2067static void axgbe_set_counts(struct axgbe_port *pdata)
2068{
2069
2070 axgbe_init_all_fptrs(pdata);
2071
2072
2073 axgbe_get_all_hw_features(pdata);
2074
2075
2076 if (!pdata->tx_max_channel_count)
2077 pdata->tx_max_channel_count = pdata->hw_feat.tx_ch_cnt;
2078 if (!pdata->rx_max_channel_count)
2079 pdata->rx_max_channel_count = pdata->hw_feat.rx_ch_cnt;
2080
2081 if (!pdata->tx_max_q_count)
2082 pdata->tx_max_q_count = pdata->hw_feat.tx_q_cnt;
2083 if (!pdata->rx_max_q_count)
2084 pdata->rx_max_q_count = pdata->hw_feat.rx_q_cnt;
2085
2086
2087
2088
2089
2090
2091
2092
2093 pdata->tx_ring_count = RTE_MIN(pdata->hw_feat.tx_ch_cnt,
2094 pdata->tx_max_channel_count);
2095 pdata->tx_ring_count = RTE_MIN(pdata->tx_ring_count,
2096 pdata->tx_max_q_count);
2097
2098 pdata->tx_q_count = pdata->tx_ring_count;
2099
2100 pdata->rx_ring_count = RTE_MIN(pdata->hw_feat.rx_ch_cnt,
2101 pdata->rx_max_channel_count);
2102
2103 pdata->rx_q_count = RTE_MIN(pdata->hw_feat.rx_q_cnt,
2104 pdata->rx_max_q_count);
2105}
2106
2107static void axgbe_default_config(struct axgbe_port *pdata)
2108{
2109 pdata->pblx8 = DMA_PBL_X8_ENABLE;
2110 pdata->tx_sf_mode = MTL_TSF_ENABLE;
2111 pdata->tx_threshold = MTL_TX_THRESHOLD_64;
2112 pdata->tx_pbl = DMA_PBL_32;
2113 pdata->tx_osp_mode = DMA_OSP_ENABLE;
2114 pdata->rx_sf_mode = MTL_RSF_ENABLE;
2115 pdata->rx_threshold = MTL_RX_THRESHOLD_64;
2116 pdata->rx_pbl = DMA_PBL_32;
2117 pdata->pause_autoneg = 1;
2118 pdata->tx_pause = 0;
2119 pdata->rx_pause = 0;
2120 pdata->phy_speed = SPEED_UNKNOWN;
2121 pdata->power_down = 0;
2122}
2123
2124
2125
2126
2127static uint16_t
2128get_pci_rc_devid(void)
2129{
2130 char pci_sysfs[PATH_MAX];
2131 const struct rte_pci_addr pci_rc_addr = {0, 0, 0, 0};
2132 unsigned long device_id;
2133
2134 snprintf(pci_sysfs, sizeof(pci_sysfs), "%s/" PCI_PRI_FMT "/device",
2135 rte_pci_get_sysfs_path(), pci_rc_addr.domain,
2136 pci_rc_addr.bus, pci_rc_addr.devid, pci_rc_addr.function);
2137
2138
2139 if (eal_parse_sysfs_value(pci_sysfs, &device_id) < 0) {
2140 PMD_INIT_LOG(ERR, "Error in reading PCI sysfs\n");
2141 return 0;
2142 }
2143
2144 return (uint16_t)device_id;
2145}
2146
2147
2148
2149
2150static int
2151eth_axgbe_dev_init(struct rte_eth_dev *eth_dev)
2152{
2153 PMD_INIT_FUNC_TRACE();
2154 struct axgbe_port *pdata;
2155 struct rte_pci_device *pci_dev;
2156 uint32_t reg, mac_lo, mac_hi;
2157 uint32_t len;
2158 int ret;
2159
2160 eth_dev->dev_ops = &axgbe_eth_dev_ops;
2161
2162 eth_dev->rx_descriptor_status = axgbe_dev_rx_descriptor_status;
2163 eth_dev->tx_descriptor_status = axgbe_dev_tx_descriptor_status;
2164
2165
2166
2167
2168
2169 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2170 return 0;
2171
2172 eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
2173
2174 pdata = eth_dev->data->dev_private;
2175
2176 rte_bit_relaxed_set32(AXGBE_DOWN, &pdata->dev_state);
2177 rte_bit_relaxed_set32(AXGBE_STOPPED, &pdata->dev_state);
2178 pdata->eth_dev = eth_dev;
2179
2180 pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
2181 pdata->pci_dev = pci_dev;
2182
2183 pdata->xgmac_regs =
2184 (void *)pci_dev->mem_resource[AXGBE_AXGMAC_BAR].addr;
2185 pdata->xprop_regs = (void *)((uint8_t *)pdata->xgmac_regs
2186 + AXGBE_MAC_PROP_OFFSET);
2187 pdata->xi2c_regs = (void *)((uint8_t *)pdata->xgmac_regs
2188 + AXGBE_I2C_CTRL_OFFSET);
2189 pdata->xpcs_regs = (void *)pci_dev->mem_resource[AXGBE_XPCS_BAR].addr;
2190
2191
2192 if (pci_dev->id.device_id == AMD_PCI_AXGBE_DEVICE_V2A)
2193 pdata->vdata = &axgbe_v2a;
2194 else
2195 pdata->vdata = &axgbe_v2b;
2196
2197
2198
2199
2200 switch (get_pci_rc_devid()) {
2201 case AMD_PCI_RV_ROOT_COMPLEX_ID:
2202 pdata->xpcs_window_def_reg = PCS_V2_RV_WINDOW_DEF;
2203 pdata->xpcs_window_sel_reg = PCS_V2_RV_WINDOW_SELECT;
2204 break;
2205 case AMD_PCI_YC_ROOT_COMPLEX_ID:
2206 pdata->xpcs_window_def_reg = PCS_V2_YC_WINDOW_DEF;
2207 pdata->xpcs_window_sel_reg = PCS_V2_YC_WINDOW_SELECT;
2208
2209 pdata->vdata->an_cdr_workaround = 0;
2210 break;
2211 case AMD_PCI_SNOWY_ROOT_COMPLEX_ID:
2212 pdata->xpcs_window_def_reg = PCS_V2_WINDOW_DEF;
2213 pdata->xpcs_window_sel_reg = PCS_V2_WINDOW_SELECT;
2214 break;
2215 default:
2216 PMD_DRV_LOG(ERR, "No supported devices found\n");
2217 return -ENODEV;
2218 }
2219
2220
2221 reg = XPCS32_IOREAD(pdata, pdata->xpcs_window_def_reg);
2222 pdata->xpcs_window = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, OFFSET);
2223 pdata->xpcs_window <<= 6;
2224 pdata->xpcs_window_size = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, SIZE);
2225 pdata->xpcs_window_size = 1 << (pdata->xpcs_window_size + 7);
2226 pdata->xpcs_window_mask = pdata->xpcs_window_size - 1;
2227
2228 PMD_INIT_LOG(DEBUG,
2229 "xpcs window :%x, size :%x, mask :%x ", pdata->xpcs_window,
2230 pdata->xpcs_window_size, pdata->xpcs_window_mask);
2231 XP_IOWRITE(pdata, XP_INT_EN, 0x1fffff);
2232
2233
2234 mac_lo = XP_IOREAD(pdata, XP_MAC_ADDR_LO);
2235 mac_hi = XP_IOREAD(pdata, XP_MAC_ADDR_HI);
2236 pdata->mac_addr.addr_bytes[0] = mac_lo & 0xff;
2237 pdata->mac_addr.addr_bytes[1] = (mac_lo >> 8) & 0xff;
2238 pdata->mac_addr.addr_bytes[2] = (mac_lo >> 16) & 0xff;
2239 pdata->mac_addr.addr_bytes[3] = (mac_lo >> 24) & 0xff;
2240 pdata->mac_addr.addr_bytes[4] = mac_hi & 0xff;
2241 pdata->mac_addr.addr_bytes[5] = (mac_hi >> 8) & 0xff;
2242
2243 len = RTE_ETHER_ADDR_LEN * AXGBE_MAX_MAC_ADDRS;
2244 eth_dev->data->mac_addrs = rte_zmalloc("axgbe_mac_addr", len, 0);
2245
2246 if (!eth_dev->data->mac_addrs) {
2247 PMD_INIT_LOG(ERR,
2248 "Failed to alloc %u bytes needed to "
2249 "store MAC addresses", len);
2250 return -ENOMEM;
2251 }
2252
2253
2254 len = RTE_ETHER_ADDR_LEN * AXGBE_MAX_HASH_MAC_ADDRS;
2255 eth_dev->data->hash_mac_addrs = rte_zmalloc("axgbe_hash_mac_addr",
2256 len, 0);
2257
2258 if (eth_dev->data->hash_mac_addrs == NULL) {
2259 PMD_INIT_LOG(ERR,
2260 "Failed to allocate %d bytes needed to "
2261 "store MAC addresses", len);
2262 return -ENOMEM;
2263 }
2264
2265 if (!rte_is_valid_assigned_ether_addr(&pdata->mac_addr))
2266 rte_eth_random_addr(pdata->mac_addr.addr_bytes);
2267
2268
2269 rte_ether_addr_copy(&pdata->mac_addr, ð_dev->data->mac_addrs[0]);
2270
2271
2272 pdata->sysclk_rate = AXGBE_V2_DMA_CLOCK_FREQ;
2273 pdata->ptpclk_rate = AXGBE_V2_PTP_CLOCK_FREQ;
2274
2275
2276 pdata->coherent = 1;
2277 pdata->axdomain = AXGBE_DMA_OS_AXDOMAIN;
2278 pdata->arcache = AXGBE_DMA_OS_ARCACHE;
2279 pdata->awcache = AXGBE_DMA_OS_AWCACHE;
2280
2281
2282 reg = XP_IOREAD(pdata, XP_PROP_1);
2283 pdata->tx_max_channel_count = XP_GET_BITS(reg, XP_PROP_1, MAX_TX_DMA);
2284 pdata->rx_max_channel_count = XP_GET_BITS(reg, XP_PROP_1, MAX_RX_DMA);
2285 pdata->tx_max_q_count = XP_GET_BITS(reg, XP_PROP_1, MAX_TX_QUEUES);
2286 pdata->rx_max_q_count = XP_GET_BITS(reg, XP_PROP_1, MAX_RX_QUEUES);
2287
2288
2289 axgbe_set_counts(pdata);
2290
2291
2292 reg = XP_IOREAD(pdata, XP_PROP_2);
2293 pdata->tx_max_fifo_size = XP_GET_BITS(reg, XP_PROP_2, TX_FIFO_SIZE);
2294 pdata->tx_max_fifo_size *= 16384;
2295 pdata->tx_max_fifo_size = RTE_MIN(pdata->tx_max_fifo_size,
2296 pdata->vdata->tx_max_fifo_size);
2297 pdata->rx_max_fifo_size = XP_GET_BITS(reg, XP_PROP_2, RX_FIFO_SIZE);
2298 pdata->rx_max_fifo_size *= 16384;
2299 pdata->rx_max_fifo_size = RTE_MIN(pdata->rx_max_fifo_size,
2300 pdata->vdata->rx_max_fifo_size);
2301
2302 ret = pdata->hw_if.exit(pdata);
2303 if (ret)
2304 PMD_DRV_LOG(ERR, "hw_if->exit EBUSY error\n");
2305
2306
2307 axgbe_default_config(pdata);
2308
2309
2310 if (!pdata->tx_max_fifo_size)
2311 pdata->tx_max_fifo_size = pdata->hw_feat.tx_fifo_size;
2312 if (!pdata->rx_max_fifo_size)
2313 pdata->rx_max_fifo_size = pdata->hw_feat.rx_fifo_size;
2314
2315 pdata->tx_desc_count = AXGBE_MAX_RING_DESC;
2316 pdata->rx_desc_count = AXGBE_MAX_RING_DESC;
2317 pthread_mutex_init(&pdata->xpcs_mutex, NULL);
2318 pthread_mutex_init(&pdata->i2c_mutex, NULL);
2319 pthread_mutex_init(&pdata->an_mutex, NULL);
2320 pthread_mutex_init(&pdata->phy_mutex, NULL);
2321
2322 ret = pdata->phy_if.phy_init(pdata);
2323 if (ret) {
2324 rte_free(eth_dev->data->mac_addrs);
2325 eth_dev->data->mac_addrs = NULL;
2326 return ret;
2327 }
2328
2329 rte_intr_callback_register(pci_dev->intr_handle,
2330 axgbe_dev_interrupt_handler,
2331 (void *)eth_dev);
2332 PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
2333 eth_dev->data->port_id, pci_dev->id.vendor_id,
2334 pci_dev->id.device_id);
2335
2336 return 0;
2337}
2338
2339static int
2340axgbe_dev_close(struct rte_eth_dev *eth_dev)
2341{
2342 struct rte_pci_device *pci_dev;
2343
2344 PMD_INIT_FUNC_TRACE();
2345
2346 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2347 return 0;
2348
2349 pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
2350 axgbe_dev_clear_queues(eth_dev);
2351
2352
2353 rte_intr_disable(pci_dev->intr_handle);
2354 rte_intr_callback_unregister(pci_dev->intr_handle,
2355 axgbe_dev_interrupt_handler,
2356 (void *)eth_dev);
2357
2358 return 0;
2359}
2360
2361static int eth_axgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
2362 struct rte_pci_device *pci_dev)
2363{
2364 return rte_eth_dev_pci_generic_probe(pci_dev,
2365 sizeof(struct axgbe_port), eth_axgbe_dev_init);
2366}
2367
2368static int eth_axgbe_pci_remove(struct rte_pci_device *pci_dev)
2369{
2370 return rte_eth_dev_pci_generic_remove(pci_dev, axgbe_dev_close);
2371}
2372
2373static struct rte_pci_driver rte_axgbe_pmd = {
2374 .id_table = pci_id_axgbe_map,
2375 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
2376 .probe = eth_axgbe_pci_probe,
2377 .remove = eth_axgbe_pci_remove,
2378};
2379
2380RTE_PMD_REGISTER_PCI(net_axgbe, rte_axgbe_pmd);
2381RTE_PMD_REGISTER_PCI_TABLE(net_axgbe, pci_id_axgbe_map);
2382RTE_PMD_REGISTER_KMOD_DEP(net_axgbe, "* igb_uio | uio_pci_generic | vfio-pci");
2383RTE_LOG_REGISTER_SUFFIX(axgbe_logtype_init, init, NOTICE);
2384RTE_LOG_REGISTER_SUFFIX(axgbe_logtype_driver, driver, NOTICE);
2385