1
2
3
4
5
6#include "axgbe_ethdev.h"
7#include "axgbe_common.h"
8#include "axgbe_phy.h"
9#include "axgbe_rxtx.h"
10
11static uint32_t bitrev32(uint32_t x)
12{
13 x = (x >> 16) | (x << 16);
14 x = (((x & 0xff00ff00) >> 8) | ((x & 0x00ff00ff) << 8));
15 x = (((x & 0xf0f0f0f0) >> 4) | ((x & 0x0f0f0f0f) << 4));
16 x = (((x & 0xcccccccc) >> 2) | ((x & 0x33333333) << 2));
17 x = (((x & 0xaaaaaaaa) >> 1) | ((x & 0x55555555) << 1));
18 return x;
19}
20
21
22static int get_lastbit_set(int x)
23{
24 int r = 32;
25
26 if (!x)
27 return 0;
28 if (!(x & 0xffff0000)) {
29 x <<= 16;
30 r -= 16;
31 }
32 if (!(x & 0xff000000)) {
33 x <<= 8;
34 r -= 8;
35 }
36 if (!(x & 0xf0000000)) {
37 x <<= 4;
38 r -= 4;
39 }
40 if (!(x & 0xc0000000)) {
41 x <<= 2;
42 r -= 2;
43 }
44 if (!(x & 0x80000000)) {
45 x <<= 1;
46 r -= 1;
47 }
48 return r;
49}
50
51static inline unsigned int axgbe_get_max_frame(struct axgbe_port *pdata)
52{
53 return pdata->eth_dev->data->mtu + RTE_ETHER_HDR_LEN +
54 RTE_ETHER_CRC_LEN + RTE_VLAN_HLEN;
55}
56
57
58static int mdio_complete(struct axgbe_port *pdata)
59{
60 if (!AXGMAC_IOREAD_BITS(pdata, MAC_MDIOSCCDR, BUSY))
61 return 1;
62
63 return 0;
64}
65
66static int axgbe_write_ext_mii_regs(struct axgbe_port *pdata, int addr,
67 int reg, u16 val)
68{
69 unsigned int mdio_sca, mdio_sccd;
70 uint64_t timeout;
71
72 mdio_sca = 0;
73 AXGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, REG, reg);
74 AXGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, addr);
75 AXGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca);
76
77 mdio_sccd = 0;
78 AXGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, DATA, val);
79 AXGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, CMD, 1);
80 AXGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, BUSY, 1);
81 AXGMAC_IOWRITE(pdata, MAC_MDIOSCCDR, mdio_sccd);
82
83 timeout = rte_get_timer_cycles() + rte_get_timer_hz();
84 while (time_before(rte_get_timer_cycles(), timeout)) {
85 rte_delay_us(100);
86 if (mdio_complete(pdata))
87 return 0;
88 }
89
90 PMD_DRV_LOG(ERR, "Mdio write operation timed out\n");
91 return -ETIMEDOUT;
92}
93
94static int axgbe_read_ext_mii_regs(struct axgbe_port *pdata, int addr,
95 int reg)
96{
97 unsigned int mdio_sca, mdio_sccd;
98 uint64_t timeout;
99
100 mdio_sca = 0;
101 AXGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, REG, reg);
102 AXGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, addr);
103 AXGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca);
104
105 mdio_sccd = 0;
106 AXGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, CMD, 3);
107 AXGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, BUSY, 1);
108 AXGMAC_IOWRITE(pdata, MAC_MDIOSCCDR, mdio_sccd);
109
110 timeout = rte_get_timer_cycles() + rte_get_timer_hz();
111
112 while (time_before(rte_get_timer_cycles(), timeout)) {
113 rte_delay_us(100);
114 if (mdio_complete(pdata))
115 goto success;
116 }
117
118 PMD_DRV_LOG(ERR, "Mdio read operation timed out\n");
119 return -ETIMEDOUT;
120
121success:
122 return AXGMAC_IOREAD_BITS(pdata, MAC_MDIOSCCDR, DATA);
123}
124
125static int axgbe_set_ext_mii_mode(struct axgbe_port *pdata, unsigned int port,
126 enum axgbe_mdio_mode mode)
127{
128 unsigned int reg_val = 0;
129
130 switch (mode) {
131 case AXGBE_MDIO_MODE_CL22:
132 if (port > AXGMAC_MAX_C22_PORT)
133 return -EINVAL;
134 reg_val |= (1 << port);
135 break;
136 case AXGBE_MDIO_MODE_CL45:
137 break;
138 default:
139 return -EINVAL;
140 }
141 AXGMAC_IOWRITE(pdata, MAC_MDIOCL22R, reg_val);
142
143 return 0;
144}
145
146static int axgbe_read_mmd_regs_v2(struct axgbe_port *pdata,
147 int prtad __rte_unused, int mmd_reg)
148{
149 unsigned int mmd_address, index, offset;
150 int mmd_data;
151
152 if (mmd_reg & MII_ADDR_C45)
153 mmd_address = mmd_reg & ~MII_ADDR_C45;
154 else
155 mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
156
157
158
159
160
161
162
163
164
165
166 mmd_address <<= 1;
167 index = mmd_address & ~pdata->xpcs_window_mask;
168 offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask);
169
170 pthread_mutex_lock(&pdata->xpcs_mutex);
171
172 XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index);
173 mmd_data = XPCS16_IOREAD(pdata, offset);
174
175 pthread_mutex_unlock(&pdata->xpcs_mutex);
176
177 return mmd_data;
178}
179
180static void axgbe_write_mmd_regs_v2(struct axgbe_port *pdata,
181 int prtad __rte_unused,
182 int mmd_reg, int mmd_data)
183{
184 unsigned int mmd_address, index, offset;
185
186 if (mmd_reg & MII_ADDR_C45)
187 mmd_address = mmd_reg & ~MII_ADDR_C45;
188 else
189 mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
190
191
192
193
194
195
196
197
198
199
200 mmd_address <<= 1;
201 index = mmd_address & ~pdata->xpcs_window_mask;
202 offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask);
203
204 pthread_mutex_lock(&pdata->xpcs_mutex);
205
206 XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index);
207 XPCS16_IOWRITE(pdata, offset, mmd_data);
208
209 pthread_mutex_unlock(&pdata->xpcs_mutex);
210}
211
212static int axgbe_read_mmd_regs(struct axgbe_port *pdata, int prtad,
213 int mmd_reg)
214{
215 switch (pdata->vdata->xpcs_access) {
216 case AXGBE_XPCS_ACCESS_V1:
217 PMD_DRV_LOG(ERR, "PHY_Version 1 is not supported\n");
218 return -1;
219 case AXGBE_XPCS_ACCESS_V2:
220 default:
221 return axgbe_read_mmd_regs_v2(pdata, prtad, mmd_reg);
222 }
223}
224
225static void axgbe_write_mmd_regs(struct axgbe_port *pdata, int prtad,
226 int mmd_reg, int mmd_data)
227{
228 switch (pdata->vdata->xpcs_access) {
229 case AXGBE_XPCS_ACCESS_V1:
230 PMD_DRV_LOG(ERR, "PHY_Version 1 is not supported\n");
231 return;
232 case AXGBE_XPCS_ACCESS_V2:
233 default:
234 return axgbe_write_mmd_regs_v2(pdata, prtad, mmd_reg, mmd_data);
235 }
236}
237
238static int axgbe_set_speed(struct axgbe_port *pdata, int speed)
239{
240 unsigned int ss;
241
242 switch (speed) {
243 case SPEED_1000:
244 ss = 0x03;
245 break;
246 case SPEED_2500:
247 ss = 0x02;
248 break;
249 case SPEED_10000:
250 ss = 0x00;
251 break;
252 default:
253 return -EINVAL;
254 }
255
256 if (AXGMAC_IOREAD_BITS(pdata, MAC_TCR, SS) != ss)
257 AXGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, ss);
258
259 return 0;
260}
261
262static int axgbe_disable_tx_flow_control(struct axgbe_port *pdata)
263{
264 unsigned int max_q_count, q_count;
265 unsigned int reg, reg_val;
266 unsigned int i;
267
268
269 for (i = 0; i < pdata->rx_q_count; i++)
270 AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, 0);
271
272
273 max_q_count = AXGMAC_MAX_FLOW_CONTROL_QUEUES;
274 q_count = RTE_MIN(pdata->tx_q_count,
275 max_q_count);
276 reg = MAC_Q0TFCR;
277 for (i = 0; i < q_count; i++) {
278 reg_val = AXGMAC_IOREAD(pdata, reg);
279 AXGMAC_SET_BITS(reg_val, MAC_Q0TFCR, TFE, 0);
280 AXGMAC_IOWRITE(pdata, reg, reg_val);
281
282 reg += MAC_QTFCR_INC;
283 }
284
285 return 0;
286}
287
288static int axgbe_enable_tx_flow_control(struct axgbe_port *pdata)
289{
290 unsigned int max_q_count, q_count;
291 unsigned int reg, reg_val;
292 unsigned int i;
293
294
295 for (i = 0; i < pdata->rx_q_count; i++) {
296 unsigned int ehfc = 0;
297
298
299 if (pdata->rx_rfd[i])
300 ehfc = 1;
301
302 AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, ehfc);
303
304 PMD_DRV_LOG(DEBUG, "flow control %s for RXq%u\n",
305 ehfc ? "enabled" : "disabled", i);
306 }
307
308
309 max_q_count = AXGMAC_MAX_FLOW_CONTROL_QUEUES;
310 q_count = RTE_MIN(pdata->tx_q_count,
311 max_q_count);
312 reg = MAC_Q0TFCR;
313 for (i = 0; i < q_count; i++) {
314 reg_val = AXGMAC_IOREAD(pdata, reg);
315
316
317 AXGMAC_SET_BITS(reg_val, MAC_Q0TFCR, TFE, 1);
318
319 AXGMAC_SET_BITS(reg_val, MAC_Q0TFCR, PT, 0xffff);
320
321 AXGMAC_IOWRITE(pdata, reg, reg_val);
322
323 reg += MAC_QTFCR_INC;
324 }
325
326 return 0;
327}
328
329static int axgbe_disable_rx_flow_control(struct axgbe_port *pdata)
330{
331 AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, RFE, 0);
332
333 return 0;
334}
335
336static int axgbe_enable_rx_flow_control(struct axgbe_port *pdata)
337{
338 AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, RFE, 1);
339
340 return 0;
341}
342
343static int axgbe_config_tx_flow_control(struct axgbe_port *pdata)
344{
345 if (pdata->tx_pause)
346 axgbe_enable_tx_flow_control(pdata);
347 else
348 axgbe_disable_tx_flow_control(pdata);
349
350 return 0;
351}
352
353static int axgbe_config_rx_flow_control(struct axgbe_port *pdata)
354{
355 if (pdata->rx_pause)
356 axgbe_enable_rx_flow_control(pdata);
357 else
358 axgbe_disable_rx_flow_control(pdata);
359
360 return 0;
361}
362
363static void axgbe_config_flow_control(struct axgbe_port *pdata)
364{
365 axgbe_config_tx_flow_control(pdata);
366 axgbe_config_rx_flow_control(pdata);
367
368 AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 0);
369}
370
371static void axgbe_queue_flow_control_threshold(struct axgbe_port *pdata,
372 unsigned int queue,
373 unsigned int q_fifo_size)
374{
375 unsigned int frame_fifo_size;
376 unsigned int rfa, rfd;
377
378 frame_fifo_size = AXGMAC_FLOW_CONTROL_ALIGN(axgbe_get_max_frame(pdata));
379
380
381
382
383
384
385 if (q_fifo_size <= 2048) {
386
387 pdata->rx_rfa[queue] = 0;
388 pdata->rx_rfd[queue] = 0;
389 return;
390 }
391
392 if (q_fifo_size <= 4096) {
393
394 pdata->rx_rfa[queue] = 0;
395 pdata->rx_rfd[queue] = 1;
396 return;
397 }
398
399 if (q_fifo_size <= frame_fifo_size) {
400
401 pdata->rx_rfa[queue] = 2;
402 pdata->rx_rfd[queue] = 5;
403 return;
404 }
405
406 if (q_fifo_size <= (frame_fifo_size * 3)) {
407
408
409
410
411 rfa = q_fifo_size - frame_fifo_size;
412 rfd = rfa + (frame_fifo_size / 2);
413 } else {
414
415
416
417 rfa = frame_fifo_size * 2;
418 rfa += AXGMAC_FLOW_CONTROL_UNIT;
419 rfd = rfa + frame_fifo_size;
420 }
421
422 pdata->rx_rfa[queue] = AXGMAC_FLOW_CONTROL_VALUE(rfa);
423 pdata->rx_rfd[queue] = AXGMAC_FLOW_CONTROL_VALUE(rfd);
424}
425
426static void axgbe_calculate_flow_control_threshold(struct axgbe_port *pdata)
427{
428 unsigned int q_fifo_size;
429 unsigned int i;
430
431 for (i = 0; i < pdata->rx_q_count; i++) {
432 q_fifo_size = (pdata->fifo + 1) * AXGMAC_FIFO_UNIT;
433
434 axgbe_queue_flow_control_threshold(pdata, i, q_fifo_size);
435 }
436}
437
438static void axgbe_config_flow_control_threshold(struct axgbe_port *pdata)
439{
440 unsigned int i;
441
442 for (i = 0; i < pdata->rx_q_count; i++) {
443 AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFA,
444 pdata->rx_rfa[i]);
445 AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFD,
446 pdata->rx_rfd[i]);
447 }
448}
449
450static int axgbe_enable_rx_vlan_stripping(struct axgbe_port *pdata)
451{
452
453 AXGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLRXS, 1);
454
455
456 AXGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, DOVLTC, 1);
457
458
459 AXGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ERSVLM, 0);
460
461
462 AXGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ESVL, 0);
463
464
465 AXGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLS, 0x3);
466 return 0;
467}
468
469static int axgbe_disable_rx_vlan_stripping(struct axgbe_port *pdata)
470{
471 AXGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLS, 0);
472 return 0;
473}
474
475static int axgbe_enable_rx_vlan_filtering(struct axgbe_port *pdata)
476{
477
478 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, VTFE, 1);
479
480
481 AXGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VTHM, 1);
482
483
484 AXGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VTIM, 0);
485
486
487 AXGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ETV, 1);
488
489
490
491
492
493
494
495 AXGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VL, 1);
496 return 0;
497}
498
499static int axgbe_disable_rx_vlan_filtering(struct axgbe_port *pdata)
500{
501
502 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, VTFE, 0);
503 return 0;
504}
505
506static u32 axgbe_vid_crc32_le(__le16 vid_le)
507{
508 u32 poly = 0xedb88320;
509 u32 crc = ~0;
510 u32 temp = 0;
511 unsigned char *data = (unsigned char *)&vid_le;
512 unsigned char data_byte = 0;
513 int i, bits;
514
515 bits = get_lastbit_set(VLAN_VID_MASK);
516 for (i = 0; i < bits; i++) {
517 if ((i % 8) == 0)
518 data_byte = data[i / 8];
519
520 temp = ((crc & 1) ^ data_byte) & 1;
521 crc >>= 1;
522 data_byte >>= 1;
523
524 if (temp)
525 crc ^= poly;
526 }
527 return crc;
528}
529
530static int axgbe_update_vlan_hash_table(struct axgbe_port *pdata)
531{
532 u32 crc = 0;
533 u16 vid;
534 __le16 vid_le = 0;
535 u16 vlan_hash_table = 0;
536 unsigned int reg = 0;
537 unsigned long vid_idx, vid_valid;
538
539
540 for (vid = 0; vid < VLAN_N_VID; vid++) {
541 vid_idx = VLAN_TABLE_IDX(vid);
542 vid_valid = pdata->active_vlans[vid_idx];
543 vid_valid = (unsigned long)vid_valid >> (vid - (64 * vid_idx));
544 if (vid_valid & 1)
545 PMD_DRV_LOG(DEBUG,
546 "vid:%d pdata->active_vlans[%ld]=0x%lx\n",
547 vid, vid_idx, pdata->active_vlans[vid_idx]);
548 else
549 continue;
550
551 vid_le = rte_cpu_to_le_16(vid);
552 crc = bitrev32(~axgbe_vid_crc32_le(vid_le)) >> 28;
553 vlan_hash_table |= (1 << crc);
554 PMD_DRV_LOG(DEBUG, "crc = %d vlan_hash_table = 0x%x\n",
555 crc, vlan_hash_table);
556 }
557
558 AXGMAC_IOWRITE_BITS(pdata, MAC_VLANHTR, VLHT, vlan_hash_table);
559 reg = AXGMAC_IOREAD(pdata, MAC_VLANHTR);
560 PMD_DRV_LOG(DEBUG, "vlan_hash_table reg val = 0x%x\n", reg);
561 return 0;
562}
563
564static int __axgbe_exit(struct axgbe_port *pdata)
565{
566 unsigned int count = 2000;
567
568
569 AXGMAC_IOWRITE_BITS(pdata, DMA_MR, SWR, 1);
570 rte_delay_us(10);
571
572
573 while (--count && AXGMAC_IOREAD_BITS(pdata, DMA_MR, SWR))
574 rte_delay_us(500);
575
576 if (!count)
577 return -EBUSY;
578
579 return 0;
580}
581
582static int axgbe_exit(struct axgbe_port *pdata)
583{
584 int ret;
585
586
587
588
589 ret = __axgbe_exit(pdata);
590 if (ret)
591 return ret;
592
593 return __axgbe_exit(pdata);
594}
595
596static int axgbe_flush_tx_queues(struct axgbe_port *pdata)
597{
598 unsigned int i, count;
599
600 if (AXGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) < 0x21)
601 return 0;
602
603 for (i = 0; i < pdata->tx_q_count; i++)
604 AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, FTQ, 1);
605
606
607 for (i = 0; i < pdata->tx_q_count; i++) {
608 count = 2000;
609 while (--count && AXGMAC_MTL_IOREAD_BITS(pdata, i,
610 MTL_Q_TQOMR, FTQ))
611 rte_delay_us(500);
612
613 if (!count)
614 return -EBUSY;
615 }
616
617 return 0;
618}
619
620static void axgbe_config_dma_bus(struct axgbe_port *pdata)
621{
622
623 AXGMAC_IOWRITE_BITS(pdata, DMA_SBMR, EAME, 1);
624
625
626 AXGMAC_IOWRITE_BITS(pdata, DMA_SBMR, RD_OSR, 0x3f);
627 AXGMAC_IOWRITE_BITS(pdata, DMA_SBMR, WR_OSR, 0x3f);
628
629
630 AXGMAC_IOWRITE_BITS(pdata, DMA_SBMR, UNDEF, 1);
631 AXGMAC_IOWRITE_BITS(pdata, DMA_SBMR, BLEN_32, 1);
632 AXGMAC_IOWRITE_BITS(pdata, DMA_SBMR, AAL, 1);
633}
634
635static void axgbe_config_dma_cache(struct axgbe_port *pdata)
636{
637 unsigned int arcache, awcache, arwcache;
638
639 arcache = 0;
640 AXGMAC_SET_BITS(arcache, DMA_AXIARCR, DRC, 0x3);
641 AXGMAC_IOWRITE(pdata, DMA_AXIARCR, arcache);
642
643 awcache = 0;
644 AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, DWC, 0x3);
645 AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPC, 0x3);
646 AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPD, 0x1);
647 AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHC, 0x3);
648 AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHD, 0x1);
649 AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RDC, 0x3);
650 AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RDD, 0x1);
651 AXGMAC_IOWRITE(pdata, DMA_AXIAWCR, awcache);
652
653 arwcache = 0;
654 AXGMAC_SET_BITS(arwcache, DMA_AXIAWRCR, TDWD, 0x1);
655 AXGMAC_SET_BITS(arwcache, DMA_AXIAWRCR, TDWC, 0x3);
656 AXGMAC_SET_BITS(arwcache, DMA_AXIAWRCR, RDRC, 0x3);
657 AXGMAC_IOWRITE(pdata, DMA_AXIAWRCR, arwcache);
658}
659
660static void axgbe_config_edma_control(struct axgbe_port *pdata)
661{
662 AXGMAC_IOWRITE(pdata, EDMA_TX_CONTROL, 0x5);
663 AXGMAC_IOWRITE(pdata, EDMA_RX_CONTROL, 0x5);
664}
665
666static int axgbe_config_osp_mode(struct axgbe_port *pdata)
667{
668
669
670
671 struct axgbe_tx_queue *txq;
672 unsigned int i;
673
674 for (i = 0; i < pdata->eth_dev->data->nb_tx_queues; i++) {
675 txq = pdata->eth_dev->data->tx_queues[i];
676 AXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_TCR, OSP,
677 pdata->tx_osp_mode);
678 }
679
680 return 0;
681}
682
683static int axgbe_config_pblx8(struct axgbe_port *pdata)
684{
685 struct axgbe_tx_queue *txq;
686 unsigned int i;
687
688 for (i = 0; i < pdata->eth_dev->data->nb_tx_queues; i++) {
689 txq = pdata->eth_dev->data->tx_queues[i];
690 AXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_CR, PBLX8,
691 pdata->pblx8);
692 }
693 return 0;
694}
695
696static int axgbe_config_tx_pbl_val(struct axgbe_port *pdata)
697{
698 struct axgbe_tx_queue *txq;
699 unsigned int i;
700
701 for (i = 0; i < pdata->eth_dev->data->nb_tx_queues; i++) {
702 txq = pdata->eth_dev->data->tx_queues[i];
703 AXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_TCR, PBL,
704 pdata->tx_pbl);
705 }
706
707 return 0;
708}
709
710static int axgbe_config_rx_pbl_val(struct axgbe_port *pdata)
711{
712 struct axgbe_rx_queue *rxq;
713 unsigned int i;
714
715 for (i = 0; i < pdata->eth_dev->data->nb_rx_queues; i++) {
716 rxq = pdata->eth_dev->data->rx_queues[i];
717 AXGMAC_DMA_IOWRITE_BITS(rxq, DMA_CH_RCR, PBL,
718 pdata->rx_pbl);
719 }
720
721 return 0;
722}
723
724static void axgbe_config_rx_buffer_size(struct axgbe_port *pdata)
725{
726 struct axgbe_rx_queue *rxq;
727 unsigned int i;
728
729 for (i = 0; i < pdata->eth_dev->data->nb_rx_queues; i++) {
730 rxq = pdata->eth_dev->data->rx_queues[i];
731
732 rxq->buf_size = rte_pktmbuf_data_room_size(rxq->mb_pool) -
733 RTE_PKTMBUF_HEADROOM;
734 rxq->buf_size = (rxq->buf_size + AXGBE_RX_BUF_ALIGN - 1) &
735 ~(AXGBE_RX_BUF_ALIGN - 1);
736
737 if (rxq->buf_size > pdata->rx_buf_size)
738 pdata->rx_buf_size = rxq->buf_size;
739
740 AXGMAC_DMA_IOWRITE_BITS(rxq, DMA_CH_RCR, RBSZ,
741 rxq->buf_size);
742 }
743}
744
745static int axgbe_write_rss_reg(struct axgbe_port *pdata, unsigned int type,
746 unsigned int index, unsigned int val)
747{
748 unsigned int wait;
749
750 if (AXGMAC_IOREAD_BITS(pdata, MAC_RSSAR, OB))
751 return -EBUSY;
752
753 AXGMAC_IOWRITE(pdata, MAC_RSSDR, val);
754
755 AXGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, RSSIA, index);
756 AXGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, ADDRT, type);
757 AXGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, CT, 0);
758 AXGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, OB, 1);
759
760 wait = 1000;
761 while (wait--) {
762 if (!AXGMAC_IOREAD_BITS(pdata, MAC_RSSAR, OB))
763 return 0;
764
765 rte_delay_us(1500);
766 }
767
768 return -EBUSY;
769}
770
771int axgbe_write_rss_hash_key(struct axgbe_port *pdata)
772{
773 struct rte_eth_rss_conf *rss_conf;
774 unsigned int key_regs = sizeof(pdata->rss_key) / sizeof(u32);
775 unsigned int *key;
776 int ret;
777
778 rss_conf = &pdata->eth_dev->data->dev_conf.rx_adv_conf.rss_conf;
779
780 if (!rss_conf->rss_key)
781 key = (unsigned int *)&pdata->rss_key;
782 else
783 key = (unsigned int *)&rss_conf->rss_key;
784
785 while (key_regs--) {
786 ret = axgbe_write_rss_reg(pdata, AXGBE_RSS_HASH_KEY_TYPE,
787 key_regs, *key++);
788 if (ret)
789 return ret;
790 }
791
792 return 0;
793}
794
795int axgbe_write_rss_lookup_table(struct axgbe_port *pdata)
796{
797 unsigned int i;
798 int ret;
799
800 for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++) {
801 ret = axgbe_write_rss_reg(pdata,
802 AXGBE_RSS_LOOKUP_TABLE_TYPE, i,
803 pdata->rss_table[i]);
804 if (ret)
805 return ret;
806 }
807
808 return 0;
809}
810
811static int axgbe_enable_rss(struct axgbe_port *pdata)
812{
813 int ret;
814
815
816 ret = axgbe_write_rss_hash_key(pdata);
817 if (ret)
818 return ret;
819
820
821 ret = axgbe_write_rss_lookup_table(pdata);
822 if (ret)
823 return ret;
824
825
826 AXGMAC_IOWRITE(pdata, MAC_RSSCR, pdata->rss_options);
827
828
829 AXGMAC_IOWRITE_BITS(pdata, MAC_RSSCR, RSSE, 1);
830
831 return 0;
832}
833
834static void axgbe_rss_options(struct axgbe_port *pdata)
835{
836 struct rte_eth_rss_conf *rss_conf;
837 uint64_t rss_hf;
838
839 rss_conf = &pdata->eth_dev->data->dev_conf.rx_adv_conf.rss_conf;
840 pdata->rss_hf = rss_conf->rss_hf;
841 rss_hf = rss_conf->rss_hf;
842
843 if (rss_hf & (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_IPV6))
844 AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, IP2TE, 1);
845 if (rss_hf & (RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_NONFRAG_IPV6_TCP))
846 AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, TCP4TE, 1);
847 if (rss_hf & (RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_NONFRAG_IPV6_UDP))
848 AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1);
849}
850
851static int axgbe_config_rss(struct axgbe_port *pdata)
852{
853 uint32_t i;
854
855 if (pdata->rss_enable) {
856
857 uint32_t *key = (uint32_t *)pdata->rss_key;
858
859 for (i = 0; i < sizeof(pdata->rss_key) / 4; i++)
860 *key++ = (uint32_t)rte_rand();
861 for (i = 0; i < AXGBE_RSS_MAX_TABLE_SIZE; i++)
862 AXGMAC_SET_BITS(pdata->rss_table[i], MAC_RSSDR, DMCH,
863 i % pdata->eth_dev->data->nb_rx_queues);
864 axgbe_rss_options(pdata);
865 if (axgbe_enable_rss(pdata)) {
866 PMD_DRV_LOG(ERR, "Error in enabling RSS support\n");
867 return -1;
868 }
869 } else {
870 AXGMAC_IOWRITE_BITS(pdata, MAC_RSSCR, RSSE, 0);
871 }
872
873 return 0;
874}
875
876static void axgbe_enable_dma_interrupts(struct axgbe_port *pdata)
877{
878 struct axgbe_tx_queue *txq;
879 unsigned int dma_ch_isr, dma_ch_ier;
880 unsigned int i;
881
882 for (i = 0; i < pdata->eth_dev->data->nb_tx_queues; i++) {
883 txq = pdata->eth_dev->data->tx_queues[i];
884
885
886 dma_ch_isr = AXGMAC_DMA_IOREAD(txq, DMA_CH_SR);
887 AXGMAC_DMA_IOWRITE(txq, DMA_CH_SR, dma_ch_isr);
888
889
890 dma_ch_ier = 0;
891
892
893
894
895
896
897 AXGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, NIE, 0);
898 AXGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, AIE, 1);
899 AXGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, FBEE, 1);
900
901
902
903
904
905
906
907 AXGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RBUE, 0);
908
909 AXGMAC_DMA_IOWRITE(txq, DMA_CH_IER, dma_ch_ier);
910 }
911}
912
913static void wrapper_tx_desc_init(struct axgbe_port *pdata)
914{
915 struct axgbe_tx_queue *txq;
916 unsigned int i;
917
918 for (i = 0; i < pdata->eth_dev->data->nb_tx_queues; i++) {
919 txq = pdata->eth_dev->data->tx_queues[i];
920 txq->cur = 0;
921 txq->dirty = 0;
922
923 AXGMAC_DMA_IOWRITE(txq, DMA_CH_TDRLR, txq->nb_desc - 1);
924
925 AXGMAC_DMA_IOWRITE(txq, DMA_CH_TDLR_HI,
926 high32_value(txq->ring_phys_addr));
927 AXGMAC_DMA_IOWRITE(txq, DMA_CH_TDLR_LO,
928 low32_value(txq->ring_phys_addr));
929 }
930}
931
932static int wrapper_rx_desc_init(struct axgbe_port *pdata)
933{
934 struct axgbe_rx_queue *rxq;
935 struct rte_mbuf *mbuf;
936 volatile union axgbe_rx_desc *desc;
937 unsigned int i, j;
938
939 for (i = 0; i < pdata->eth_dev->data->nb_rx_queues; i++) {
940 rxq = pdata->eth_dev->data->rx_queues[i];
941
942
943 rxq->mbuf_alloc = 0;
944 rxq->cur = 0;
945 rxq->dirty = 0;
946 desc = AXGBE_GET_DESC_PT(rxq, 0);
947
948 for (j = 0; j < rxq->nb_desc; j++) {
949 mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
950 if (mbuf == NULL) {
951 PMD_DRV_LOG(ERR, "RX mbuf alloc failed queue_id = %u, idx = %d\n",
952 (unsigned int)rxq->queue_id, j);
953 axgbe_dev_rx_queue_release(pdata->eth_dev, i);
954 return -ENOMEM;
955 }
956 rxq->sw_ring[j] = mbuf;
957
958 mbuf->next = NULL;
959 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
960 mbuf->nb_segs = 1;
961 mbuf->port = rxq->port_id;
962 desc->read.baddr =
963 rte_cpu_to_le_64(
964 rte_mbuf_data_iova_default(mbuf));
965 rte_wmb();
966 AXGMAC_SET_BITS_LE(desc->read.desc3,
967 RX_NORMAL_DESC3, OWN, 1);
968 rte_wmb();
969 rxq->mbuf_alloc++;
970 desc++;
971 }
972
973 AXGMAC_DMA_IOWRITE(rxq, DMA_CH_RDRLR,
974 rxq->nb_desc - 1);
975
976 AXGMAC_DMA_IOWRITE(rxq, DMA_CH_RDLR_HI,
977 high32_value(rxq->ring_phys_addr));
978 AXGMAC_DMA_IOWRITE(rxq, DMA_CH_RDLR_LO,
979 low32_value(rxq->ring_phys_addr));
980
981 AXGMAC_DMA_IOWRITE(rxq, DMA_CH_RDTR_LO,
982 low32_value(rxq->ring_phys_addr +
983 (rxq->nb_desc - 1) *
984 sizeof(union axgbe_rx_desc)));
985 }
986 return 0;
987}
988
989static void axgbe_config_mtl_mode(struct axgbe_port *pdata)
990{
991 unsigned int i;
992
993
994 AXGMAC_IOWRITE_BITS(pdata, MTL_OMR, ETSALG, MTL_ETSALG_WRR);
995
996
997 for (i = 0; i < pdata->hw_feat.tc_cnt; i++) {
998 AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA,
999 MTL_TSA_ETS);
1000 AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_QWR, QW, 1);
1001 }
1002
1003
1004 AXGMAC_IOWRITE_BITS(pdata, MTL_OMR, RAA, MTL_RAA_SP);
1005}
1006
1007static int axgbe_config_tsf_mode(struct axgbe_port *pdata, unsigned int val)
1008{
1009 unsigned int i;
1010
1011 for (i = 0; i < pdata->tx_q_count; i++)
1012 AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TSF, val);
1013
1014 return 0;
1015}
1016
1017static int axgbe_config_rsf_mode(struct axgbe_port *pdata, unsigned int val)
1018{
1019 unsigned int i;
1020
1021 for (i = 0; i < pdata->rx_q_count; i++)
1022 AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RSF, val);
1023
1024 return 0;
1025}
1026
1027static int axgbe_config_tx_threshold(struct axgbe_port *pdata,
1028 unsigned int val)
1029{
1030 unsigned int i;
1031
1032 for (i = 0; i < pdata->tx_q_count; i++)
1033 AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TTC, val);
1034
1035 return 0;
1036}
1037
1038static int axgbe_config_rx_threshold(struct axgbe_port *pdata,
1039 unsigned int val)
1040{
1041 unsigned int i;
1042
1043 for (i = 0; i < pdata->rx_q_count; i++)
1044 AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RTC, val);
1045
1046 return 0;
1047}
1048
1049
1050static void axgbe_config_rx_fifo_size(struct axgbe_port *pdata)
1051{
1052 unsigned int fifo_size;
1053 unsigned int q_fifo_size;
1054 unsigned int p_fifo, i;
1055
1056 fifo_size = RTE_MIN(pdata->rx_max_fifo_size,
1057 pdata->hw_feat.rx_fifo_size);
1058 q_fifo_size = fifo_size / pdata->rx_q_count;
1059
1060
1061
1062
1063
1064
1065 p_fifo = q_fifo_size / AXGMAC_FIFO_UNIT;
1066 if (p_fifo)
1067 p_fifo--;
1068
1069 for (i = 0; i < pdata->rx_q_count; i++)
1070 AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RQS, p_fifo);
1071 pdata->fifo = p_fifo;
1072
1073
1074 axgbe_calculate_flow_control_threshold(pdata);
1075 axgbe_config_flow_control_threshold(pdata);
1076
1077 PMD_DRV_LOG(DEBUG, "%d Rx hardware queues, %d byte fifo per queue\n",
1078 pdata->rx_q_count, q_fifo_size);
1079}
1080
1081static void axgbe_config_tx_fifo_size(struct axgbe_port *pdata)
1082{
1083 unsigned int fifo_size;
1084 unsigned int q_fifo_size;
1085 unsigned int p_fifo, i;
1086
1087 fifo_size = RTE_MIN(pdata->tx_max_fifo_size,
1088 pdata->hw_feat.tx_fifo_size);
1089 q_fifo_size = fifo_size / pdata->tx_q_count;
1090
1091
1092
1093
1094
1095
1096 p_fifo = q_fifo_size / AXGMAC_FIFO_UNIT;
1097 if (p_fifo)
1098 p_fifo--;
1099
1100 for (i = 0; i < pdata->tx_q_count; i++)
1101 AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TQS, p_fifo);
1102
1103 PMD_DRV_LOG(DEBUG, "%d Tx hardware queues, %d byte fifo per queue\n",
1104 pdata->tx_q_count, q_fifo_size);
1105}
1106
1107static void axgbe_config_queue_mapping(struct axgbe_port *pdata)
1108{
1109 unsigned int qptc, qptc_extra, queue;
1110 unsigned int i, j, reg, reg_val;
1111
1112
1113
1114
1115 qptc = pdata->tx_q_count / pdata->hw_feat.tc_cnt;
1116 qptc_extra = pdata->tx_q_count % pdata->hw_feat.tc_cnt;
1117
1118 for (i = 0, queue = 0; i < pdata->hw_feat.tc_cnt; i++) {
1119 for (j = 0; j < qptc; j++) {
1120 PMD_DRV_LOG(DEBUG, "TXq%u mapped to TC%u\n", queue, i);
1121 AXGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR,
1122 Q2TCMAP, i);
1123 }
1124 if (i < qptc_extra) {
1125 PMD_DRV_LOG(DEBUG, "TXq%u mapped to TC%u\n", queue, i);
1126 AXGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR,
1127 Q2TCMAP, i);
1128 }
1129 }
1130
1131 if (pdata->rss_enable) {
1132
1133 reg = MTL_RQDCM0R;
1134 reg_val = 0;
1135 for (i = 0; i < pdata->rx_q_count;) {
1136 reg_val |= (0x80 << ((i++ % MTL_RQDCM_Q_PER_REG) << 3));
1137
1138 if ((i % MTL_RQDCM_Q_PER_REG) &&
1139 (i != pdata->rx_q_count))
1140 continue;
1141
1142 AXGMAC_IOWRITE(pdata, reg, reg_val);
1143
1144 reg += MTL_RQDCM_INC;
1145 reg_val = 0;
1146 }
1147 }
1148}
1149
1150static void axgbe_enable_mtl_interrupts(struct axgbe_port *pdata)
1151{
1152 unsigned int mtl_q_isr;
1153 unsigned int q_count, i;
1154
1155 q_count = RTE_MAX(pdata->hw_feat.tx_q_cnt, pdata->hw_feat.rx_q_cnt);
1156 for (i = 0; i < q_count; i++) {
1157
1158 mtl_q_isr = AXGMAC_MTL_IOREAD(pdata, i, MTL_Q_ISR);
1159 AXGMAC_MTL_IOWRITE(pdata, i, MTL_Q_ISR, mtl_q_isr);
1160
1161
1162 AXGMAC_MTL_IOWRITE(pdata, i, MTL_Q_IER, 0);
1163 }
1164}
1165
1166static uint32_t crc32_le(uint32_t crc, uint8_t *p, uint32_t len)
1167{
1168 int i;
1169 while (len--) {
1170 crc ^= *p++;
1171 for (i = 0; i < 8; i++)
1172 crc = (crc >> 1) ^ ((crc & 1) ? 0xedb88320 : 0);
1173 }
1174 return crc;
1175}
1176
1177void axgbe_set_mac_hash_table(struct axgbe_port *pdata, u8 *addr, bool add)
1178{
1179 uint32_t crc, htable_index, htable_bitmask;
1180
1181 crc = bitrev32(~crc32_le(~0, addr, RTE_ETHER_ADDR_LEN));
1182 crc >>= pdata->hash_table_shift;
1183 htable_index = crc >> 5;
1184 htable_bitmask = 1 << (crc & 0x1f);
1185
1186 if (add) {
1187 pdata->uc_hash_table[htable_index] |= htable_bitmask;
1188 pdata->uc_hash_mac_addr++;
1189 } else {
1190 pdata->uc_hash_table[htable_index] &= ~htable_bitmask;
1191 pdata->uc_hash_mac_addr--;
1192 }
1193 PMD_DRV_LOG(DEBUG, "%s MAC hash table Bit %d at Index %#x\n",
1194 add ? "set" : "clear", (crc & 0x1f), htable_index);
1195
1196 AXGMAC_IOWRITE(pdata, MAC_HTR(htable_index),
1197 pdata->uc_hash_table[htable_index]);
1198}
1199
1200void axgbe_set_mac_addn_addr(struct axgbe_port *pdata, u8 *addr, uint32_t index)
1201{
1202 unsigned int mac_addr_hi, mac_addr_lo;
1203 u8 *mac_addr;
1204
1205 mac_addr_lo = 0;
1206 mac_addr_hi = 0;
1207
1208 if (addr) {
1209 mac_addr = (u8 *)&mac_addr_lo;
1210 mac_addr[0] = addr[0];
1211 mac_addr[1] = addr[1];
1212 mac_addr[2] = addr[2];
1213 mac_addr[3] = addr[3];
1214 mac_addr = (u8 *)&mac_addr_hi;
1215 mac_addr[0] = addr[4];
1216 mac_addr[1] = addr[5];
1217
1218
1219 AXGMAC_SET_BITS(mac_addr_hi, MAC_MACA1HR, AE, 1);
1220 }
1221
1222 PMD_DRV_LOG(DEBUG, "%s mac address at %#x\n",
1223 addr ? "set" : "clear", index);
1224
1225 AXGMAC_IOWRITE(pdata, MAC_MACAHR(index), mac_addr_hi);
1226 AXGMAC_IOWRITE(pdata, MAC_MACALR(index), mac_addr_lo);
1227}
1228
1229static int axgbe_set_mac_address(struct axgbe_port *pdata, u8 *addr)
1230{
1231 unsigned int mac_addr_hi, mac_addr_lo;
1232
1233 mac_addr_hi = (addr[5] << 8) | (addr[4] << 0);
1234 mac_addr_lo = (addr[3] << 24) | (addr[2] << 16) |
1235 (addr[1] << 8) | (addr[0] << 0);
1236
1237 AXGMAC_IOWRITE(pdata, MAC_MACA0HR, mac_addr_hi);
1238 AXGMAC_IOWRITE(pdata, MAC_MACA0LR, mac_addr_lo);
1239
1240 return 0;
1241}
1242
1243static void axgbe_config_mac_hash_table(struct axgbe_port *pdata)
1244{
1245 struct axgbe_hw_features *hw_feat = &pdata->hw_feat;
1246
1247 pdata->hash_table_shift = 0;
1248 pdata->hash_table_count = 0;
1249 pdata->uc_hash_mac_addr = 0;
1250 memset(pdata->uc_hash_table, 0, sizeof(pdata->uc_hash_table));
1251
1252 if (hw_feat->hash_table_size) {
1253 pdata->hash_table_shift = 26 - (hw_feat->hash_table_size >> 7);
1254 pdata->hash_table_count = hw_feat->hash_table_size / 32;
1255 }
1256}
1257
1258static void axgbe_config_mac_address(struct axgbe_port *pdata)
1259{
1260 axgbe_set_mac_address(pdata, pdata->mac_addr.addr_bytes);
1261}
1262
1263static void axgbe_config_jumbo_enable(struct axgbe_port *pdata)
1264{
1265 unsigned int val;
1266
1267 val = (pdata->rx_buf_size > AXGMAC_STD_PACKET_MTU) ? 1 : 0;
1268
1269 AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, JE, val);
1270}
1271
1272static void axgbe_config_mac_speed(struct axgbe_port *pdata)
1273{
1274 axgbe_set_speed(pdata, pdata->phy_speed);
1275}
1276
1277static void axgbe_config_checksum_offload(struct axgbe_port *pdata)
1278{
1279 if (pdata->rx_csum_enable)
1280 AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, IPC, 1);
1281 else
1282 AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, IPC, 0);
1283}
1284
1285static void axgbe_config_mmc(struct axgbe_port *pdata)
1286{
1287 struct axgbe_mmc_stats *stats = &pdata->mmc_stats;
1288
1289
1290 memset(stats, 0, sizeof(*stats));
1291
1292
1293 AXGMAC_IOWRITE_BITS(pdata, MMC_CR, ROR, 1);
1294
1295
1296 AXGMAC_IOWRITE_BITS(pdata, MMC_CR, CR, 1);
1297}
1298
1299static int axgbe_init(struct axgbe_port *pdata)
1300{
1301 int ret;
1302
1303
1304 ret = axgbe_flush_tx_queues(pdata);
1305 if (ret)
1306 return ret;
1307
1308 axgbe_config_dma_bus(pdata);
1309 axgbe_config_dma_cache(pdata);
1310 axgbe_config_edma_control(pdata);
1311 axgbe_config_osp_mode(pdata);
1312 axgbe_config_pblx8(pdata);
1313 axgbe_config_tx_pbl_val(pdata);
1314 axgbe_config_rx_pbl_val(pdata);
1315 axgbe_config_rx_buffer_size(pdata);
1316 axgbe_config_rss(pdata);
1317 wrapper_tx_desc_init(pdata);
1318 ret = wrapper_rx_desc_init(pdata);
1319 if (ret)
1320 return ret;
1321 axgbe_enable_dma_interrupts(pdata);
1322
1323
1324 axgbe_config_mtl_mode(pdata);
1325 axgbe_config_queue_mapping(pdata);
1326 axgbe_config_tsf_mode(pdata, pdata->tx_sf_mode);
1327 axgbe_config_rsf_mode(pdata, pdata->rx_sf_mode);
1328 axgbe_config_tx_threshold(pdata, pdata->tx_threshold);
1329 axgbe_config_rx_threshold(pdata, pdata->rx_threshold);
1330 axgbe_config_tx_fifo_size(pdata);
1331 axgbe_config_rx_fifo_size(pdata);
1332
1333 axgbe_enable_mtl_interrupts(pdata);
1334
1335
1336 axgbe_config_mac_hash_table(pdata);
1337 axgbe_config_mac_address(pdata);
1338 axgbe_config_jumbo_enable(pdata);
1339 axgbe_config_flow_control(pdata);
1340 axgbe_config_mac_speed(pdata);
1341 axgbe_config_checksum_offload(pdata);
1342 axgbe_config_mmc(pdata);
1343
1344 return 0;
1345}
1346
1347void axgbe_init_function_ptrs_dev(struct axgbe_hw_if *hw_if)
1348{
1349 hw_if->exit = axgbe_exit;
1350 hw_if->config_flow_control = axgbe_config_flow_control;
1351
1352 hw_if->init = axgbe_init;
1353
1354 hw_if->read_mmd_regs = axgbe_read_mmd_regs;
1355 hw_if->write_mmd_regs = axgbe_write_mmd_regs;
1356
1357 hw_if->set_speed = axgbe_set_speed;
1358
1359 hw_if->set_ext_mii_mode = axgbe_set_ext_mii_mode;
1360 hw_if->read_ext_mii_regs = axgbe_read_ext_mii_regs;
1361 hw_if->write_ext_mii_regs = axgbe_write_ext_mii_regs;
1362
1363 hw_if->config_tx_flow_control = axgbe_config_tx_flow_control;
1364 hw_if->config_rx_flow_control = axgbe_config_rx_flow_control;
1365
1366
1367 hw_if->enable_rx_vlan_stripping = axgbe_enable_rx_vlan_stripping;
1368 hw_if->disable_rx_vlan_stripping = axgbe_disable_rx_vlan_stripping;
1369 hw_if->enable_rx_vlan_filtering = axgbe_enable_rx_vlan_filtering;
1370 hw_if->disable_rx_vlan_filtering = axgbe_disable_rx_vlan_filtering;
1371 hw_if->update_vlan_hash_table = axgbe_update_vlan_hash_table;
1372}
1373