1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16#include <linux/module.h>
17#include <linux/of_mdio.h>
18#include <linux/of_platform.h>
19#include <linux/of_irq.h>
20#include <linux/of_address.h>
21#include <linux/of_net.h>
22
23#include "xilinx_axienet.h"
24
25struct axienet_stat {
26 const char *name;
27};
28
29static struct axienet_stat axienet_get_tx_strings_stats[] = {
30 { "txq0_packets" },
31 { "txq0_bytes" },
32 { "txq1_packets" },
33 { "txq1_bytes" },
34 { "txq2_packets" },
35 { "txq2_bytes" },
36 { "txq3_packets" },
37 { "txq3_bytes" },
38 { "txq4_packets" },
39 { "txq4_bytes" },
40 { "txq5_packets" },
41 { "txq5_bytes" },
42 { "txq6_packets" },
43 { "txq6_bytes" },
44 { "txq7_packets" },
45 { "txq7_bytes" },
46 { "txq8_packets" },
47 { "txq8_bytes" },
48 { "txq9_packets" },
49 { "txq9_bytes" },
50 { "txq10_packets" },
51 { "txq10_bytes" },
52 { "txq11_packets" },
53 { "txq11_bytes" },
54 { "txq12_packets" },
55 { "txq12_bytes" },
56 { "txq13_packets" },
57 { "txq13_bytes" },
58 { "txq14_packets" },
59 { "txq14_bytes" },
60 { "txq15_packets" },
61 { "txq15_bytes" },
62};
63
64static struct axienet_stat axienet_get_rx_strings_stats[] = {
65 { "rxq0_packets" },
66 { "rxq0_bytes" },
67 { "rxq1_packets" },
68 { "rxq1_bytes" },
69 { "rxq2_packets" },
70 { "rxq2_bytes" },
71 { "rxq3_packets" },
72 { "rxq3_bytes" },
73 { "rxq4_packets" },
74 { "rxq4_bytes" },
75 { "rxq5_packets" },
76 { "rxq5_bytes" },
77 { "rxq6_packets" },
78 { "rxq6_bytes" },
79 { "rxq7_packets" },
80 { "rxq7_bytes" },
81 { "rxq8_packets" },
82 { "rxq8_bytes" },
83 { "rxq9_packets" },
84 { "rxq9_bytes" },
85 { "rxq10_packets" },
86 { "rxq10_bytes" },
87 { "rxq11_packets" },
88 { "rxq11_bytes" },
89 { "rxq12_packets" },
90 { "rxq12_bytes" },
91 { "rxq13_packets" },
92 { "rxq13_bytes" },
93 { "rxq14_packets" },
94 { "rxq14_bytes" },
95 { "rxq15_packets" },
96 { "rxq15_bytes" },
97};
98
99
100
101
102
103
104
105
106
107void __maybe_unused axienet_mcdma_tx_bd_free(struct net_device *ndev,
108 struct axienet_dma_q *q)
109{
110 struct axienet_local *lp = netdev_priv(ndev);
111
112 if (q->txq_bd_v) {
113 dma_free_coherent(ndev->dev.parent,
114 sizeof(*q->txq_bd_v) * lp->tx_bd_num,
115 q->txq_bd_v,
116 q->tx_bd_p);
117 }
118 if (q->tx_bufs) {
119 dma_free_coherent(ndev->dev.parent,
120 XAE_MAX_PKT_LEN * lp->tx_bd_num,
121 q->tx_bufs,
122 q->tx_bufs_dma);
123 }
124}
125
126
127
128
129
130
131
132
133
134void __maybe_unused axienet_mcdma_rx_bd_free(struct net_device *ndev,
135 struct axienet_dma_q *q)
136{
137 int i;
138 struct axienet_local *lp = netdev_priv(ndev);
139
140 for (i = 0; i < lp->rx_bd_num; i++) {
141 dma_unmap_single(ndev->dev.parent, q->rxq_bd_v[i].phys,
142 lp->max_frm_size, DMA_FROM_DEVICE);
143 dev_kfree_skb((struct sk_buff *)
144 (q->rxq_bd_v[i].sw_id_offset));
145 }
146
147 if (q->rxq_bd_v) {
148 dma_free_coherent(ndev->dev.parent,
149 sizeof(*q->rxq_bd_v) * lp->rx_bd_num,
150 q->rxq_bd_v,
151 q->rx_bd_p);
152 }
153}
154
155
156
157
158
159
160
161
162
163
164
165int __maybe_unused axienet_mcdma_tx_q_init(struct net_device *ndev,
166 struct axienet_dma_q *q)
167{
168 u32 cr, chan_en;
169 int i;
170 struct axienet_local *lp = netdev_priv(ndev);
171
172 q->tx_bd_ci = 0;
173 q->tx_bd_tail = 0;
174
175 q->txq_bd_v = dma_alloc_coherent(ndev->dev.parent,
176 sizeof(*q->txq_bd_v) * lp->tx_bd_num,
177 &q->tx_bd_p, GFP_KERNEL);
178 if (!q->txq_bd_v)
179 goto out;
180
181 if (!q->eth_hasdre) {
182 q->tx_bufs = dma_alloc_coherent(ndev->dev.parent,
183 XAE_MAX_PKT_LEN * lp->tx_bd_num,
184 &q->tx_bufs_dma,
185 GFP_KERNEL);
186 if (!q->tx_bufs)
187 goto out;
188
189 for (i = 0; i < lp->tx_bd_num; i++)
190 q->tx_buf[i] = &q->tx_bufs[i * XAE_MAX_PKT_LEN];
191 }
192
193 for (i = 0; i < lp->tx_bd_num; i++) {
194 q->txq_bd_v[i].next = q->tx_bd_p +
195 sizeof(*q->txq_bd_v) *
196 ((i + 1) % lp->tx_bd_num);
197 }
198
199
200 cr = axienet_dma_in32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id));
201
202 cr = (((cr & ~XMCDMA_COALESCE_MASK)) |
203 ((lp->coalesce_count_tx) << XMCDMA_COALESCE_SHIFT));
204
205 cr = (((cr & ~XMCDMA_DELAY_MASK)) |
206 (XAXIDMA_DFT_TX_WAITBOUND << XMCDMA_DELAY_SHIFT));
207
208 cr |= XMCDMA_IRQ_ALL_MASK;
209
210 axienet_dma_out32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id), cr);
211
212
213
214
215
216 axienet_dma_bdout(q, XMCDMA_CHAN_CURDESC_OFFSET(q->chan_id),
217 q->tx_bd_p);
218 cr = axienet_dma_in32(q, XMCDMA_CR_OFFSET);
219 axienet_dma_out32(q, XMCDMA_CR_OFFSET,
220 cr | XMCDMA_CR_RUNSTOP_MASK);
221 cr = axienet_dma_in32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id));
222 axienet_dma_out32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id),
223 cr | XMCDMA_CR_RUNSTOP_MASK);
224 chan_en = axienet_dma_in32(q, XMCDMA_CHEN_OFFSET);
225 chan_en |= (1 << (q->chan_id - 1));
226 axienet_dma_out32(q, XMCDMA_CHEN_OFFSET, chan_en);
227
228 return 0;
229out:
230 for_each_tx_dma_queue(lp, i) {
231 axienet_mcdma_tx_bd_free(ndev, lp->dq[i]);
232 }
233 return -ENOMEM;
234}
235
236
237
238
239
240
241
242
243
244
245
246int __maybe_unused axienet_mcdma_rx_q_init(struct net_device *ndev,
247 struct axienet_dma_q *q)
248{
249 u32 cr, chan_en;
250 int i;
251 struct sk_buff *skb;
252 struct axienet_local *lp = netdev_priv(ndev);
253
254 q->rx_bd_ci = 0;
255 q->rx_offset = XMCDMA_CHAN_RX_OFFSET;
256
257 q->rxq_bd_v = dma_alloc_coherent(ndev->dev.parent,
258 sizeof(*q->rxq_bd_v) * lp->rx_bd_num,
259 &q->rx_bd_p, GFP_KERNEL);
260 if (!q->rxq_bd_v)
261 goto out;
262
263 for (i = 0; i < lp->rx_bd_num; i++) {
264 q->rxq_bd_v[i].next = q->rx_bd_p +
265 sizeof(*q->rxq_bd_v) *
266 ((i + 1) % lp->rx_bd_num);
267
268 skb = netdev_alloc_skb(ndev, lp->max_frm_size);
269 if (!skb)
270 goto out;
271
272
273
274
275 wmb();
276
277 q->rxq_bd_v[i].sw_id_offset = (phys_addr_t)skb;
278 q->rxq_bd_v[i].phys = dma_map_single(ndev->dev.parent,
279 skb->data,
280 lp->max_frm_size,
281 DMA_FROM_DEVICE);
282 q->rxq_bd_v[i].cntrl = lp->max_frm_size;
283 }
284
285
286 cr = axienet_dma_in32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id) +
287 q->rx_offset);
288
289 cr = ((cr & ~XMCDMA_COALESCE_MASK) |
290 ((lp->coalesce_count_rx) << XMCDMA_COALESCE_SHIFT));
291
292 cr = ((cr & ~XMCDMA_DELAY_MASK) |
293 (XAXIDMA_DFT_RX_WAITBOUND << XMCDMA_DELAY_SHIFT));
294
295 cr |= XMCDMA_IRQ_ALL_MASK;
296
297 axienet_dma_out32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id) +
298 q->rx_offset, cr);
299
300
301
302
303 axienet_dma_bdout(q, XMCDMA_CHAN_CURDESC_OFFSET(q->chan_id) +
304 q->rx_offset, q->rx_bd_p);
305 cr = axienet_dma_in32(q, XMCDMA_CR_OFFSET + q->rx_offset);
306 axienet_dma_out32(q, XMCDMA_CR_OFFSET + q->rx_offset,
307 cr | XMCDMA_CR_RUNSTOP_MASK);
308 cr = axienet_dma_in32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id) +
309 q->rx_offset);
310 axienet_dma_out32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id) + q->rx_offset,
311 cr | XMCDMA_CR_RUNSTOP_MASK);
312 axienet_dma_bdout(q, XMCDMA_CHAN_TAILDESC_OFFSET(q->chan_id) +
313 q->rx_offset, q->rx_bd_p + (sizeof(*q->rxq_bd_v) *
314 (lp->rx_bd_num - 1)));
315 chan_en = axienet_dma_in32(q, XMCDMA_CHEN_OFFSET + q->rx_offset);
316 chan_en |= (1 << (q->chan_id - 1));
317 axienet_dma_out32(q, XMCDMA_CHEN_OFFSET + q->rx_offset, chan_en);
318
319 return 0;
320
321out:
322 for_each_rx_dma_queue(lp, i) {
323 axienet_mcdma_rx_bd_free(ndev, lp->dq[i]);
324 }
325 return -ENOMEM;
326}
327
328static inline int get_mcdma_tx_q(struct axienet_local *lp, u32 chan_id)
329{
330 int i;
331
332 for_each_tx_dma_queue(lp, i) {
333 if (chan_id == lp->chan_num[i])
334 return lp->qnum[i];
335 }
336
337 return -ENODEV;
338}
339
340static inline int get_mcdma_rx_q(struct axienet_local *lp, u32 chan_id)
341{
342 int i;
343
344 for_each_rx_dma_queue(lp, i) {
345 if (chan_id == lp->chan_num[i])
346 return lp->qnum[i];
347 }
348
349 return -ENODEV;
350}
351
352static inline int map_dma_q_txirq(int irq, struct axienet_local *lp)
353{
354 int i, chan_sermask;
355 u16 chan_id = 1;
356 struct axienet_dma_q *q = lp->dq[0];
357
358 chan_sermask = axienet_dma_in32(q, XMCDMA_TXINT_SER_OFFSET);
359
360 for (i = 1, chan_id = 1; i != 0 && i <= chan_sermask;
361 i <<= 1, chan_id++) {
362 if (chan_sermask & i)
363 return chan_id;
364 }
365
366 return -ENODEV;
367}
368
369irqreturn_t __maybe_unused axienet_mcdma_tx_irq(int irq, void *_ndev)
370{
371 u32 cr;
372 unsigned int status;
373 struct net_device *ndev = _ndev;
374 struct axienet_local *lp = netdev_priv(ndev);
375 int i, j = map_dma_q_txirq(irq, lp);
376 struct axienet_dma_q *q;
377
378 if (j < 0)
379 return IRQ_NONE;
380
381 i = get_mcdma_tx_q(lp, j);
382 q = lp->dq[i];
383
384 status = axienet_dma_in32(q, XMCDMA_CHAN_SR_OFFSET(q->chan_id));
385 if (status & (XMCDMA_IRQ_IOC_MASK | XMCDMA_IRQ_DELAY_MASK)) {
386 axienet_dma_out32(q, XMCDMA_CHAN_SR_OFFSET(q->chan_id), status);
387 axienet_start_xmit_done(lp->ndev, q);
388 goto out;
389 }
390 if (!(status & XMCDMA_IRQ_ALL_MASK))
391 return IRQ_NONE;
392 if (status & XMCDMA_IRQ_ERR_MASK) {
393 dev_err(&ndev->dev, "DMA Tx error 0x%x\n", status);
394 dev_err(&ndev->dev, "Current BD is at: %pa\n",
395 &q->txq_bd_v[q->tx_bd_ci].phys);
396
397 cr = axienet_dma_in32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id));
398
399 cr &= (~XMCDMA_IRQ_ALL_MASK);
400
401 axienet_dma_out32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id), cr);
402
403 cr = axienet_dma_in32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id) +
404 q->rx_offset);
405
406 cr &= (~XMCDMA_IRQ_ALL_MASK);
407
408 axienet_dma_out32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id) +
409 q->rx_offset, cr);
410
411 tasklet_schedule(&lp->dma_err_tasklet[i]);
412 axienet_dma_out32(q, XMCDMA_CHAN_SR_OFFSET(q->chan_id) +
413 q->rx_offset, status);
414 }
415out:
416 return IRQ_HANDLED;
417}
418
419static inline int map_dma_q_rxirq(int irq, struct axienet_local *lp)
420{
421 int i, chan_sermask;
422 u16 chan_id = 1;
423 struct axienet_dma_q *q = lp->dq[0];
424
425 chan_sermask = axienet_dma_in32(q, XMCDMA_RXINT_SER_OFFSET +
426 q->rx_offset);
427
428 for (i = 1, chan_id = 1; i != 0 && i <= chan_sermask;
429 i <<= 1, chan_id++) {
430 if (chan_sermask & i)
431 return chan_id;
432 }
433
434 return -ENODEV;
435}
436
437irqreturn_t __maybe_unused axienet_mcdma_rx_irq(int irq, void *_ndev)
438{
439 u32 cr;
440 unsigned int status;
441 struct net_device *ndev = _ndev;
442 struct axienet_local *lp = netdev_priv(ndev);
443 int i, j = map_dma_q_rxirq(irq, lp);
444 struct axienet_dma_q *q;
445
446 if (j < 0)
447 return IRQ_NONE;
448
449 i = get_mcdma_rx_q(lp, j);
450 q = lp->dq[i];
451
452 status = axienet_dma_in32(q, XMCDMA_CHAN_SR_OFFSET(q->chan_id) +
453 q->rx_offset);
454 if (status & (XMCDMA_IRQ_IOC_MASK | XMCDMA_IRQ_DELAY_MASK)) {
455 cr = axienet_dma_in32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id) +
456 q->rx_offset);
457 cr &= ~(XMCDMA_IRQ_IOC_MASK | XMCDMA_IRQ_DELAY_MASK);
458 axienet_dma_out32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id) +
459 q->rx_offset, cr);
460 napi_schedule(&lp->napi[i]);
461 }
462
463 if (!(status & XMCDMA_IRQ_ALL_MASK))
464 return IRQ_NONE;
465
466 if (status & XMCDMA_IRQ_ERR_MASK) {
467 dev_err(&ndev->dev, "DMA Rx error 0x%x\n", status);
468 dev_err(&ndev->dev, "Current BD is at: %pa\n",
469 &q->rxq_bd_v[q->rx_bd_ci].phys);
470
471 cr = axienet_dma_in32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id));
472
473 cr &= (~XMCDMA_IRQ_ALL_MASK);
474
475 axienet_dma_out32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id), cr);
476
477 cr = axienet_dma_in32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id) +
478 q->rx_offset);
479
480 cr &= (~XMCDMA_IRQ_ALL_MASK);
481
482 axienet_dma_out32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id) +
483 q->rx_offset, cr);
484
485 tasklet_schedule(&lp->dma_err_tasklet[i]);
486 axienet_dma_out32(q, XMCDMA_CHAN_SR_OFFSET(q->chan_id) +
487 q->rx_offset, status);
488 }
489
490 return IRQ_HANDLED;
491}
492
493void axienet_strings(struct net_device *ndev, u32 sset, u8 *data)
494{
495 struct axienet_local *lp = netdev_priv(ndev);
496 struct axienet_dma_q *q;
497 int i, j, k = 0;
498
499 for (i = 0, j = 0; i < AXIENET_TX_SSTATS_LEN(lp);) {
500 if (j >= lp->num_tx_queues)
501 break;
502 q = lp->dq[j];
503 if (i % 2 == 0)
504 k = (q->chan_id - 1) * 2;
505 if (sset == ETH_SS_STATS)
506 memcpy(data + i * ETH_GSTRING_LEN,
507 axienet_get_tx_strings_stats[k].name,
508 ETH_GSTRING_LEN);
509 ++i;
510 k++;
511 if (i % 2 == 0)
512 ++j;
513 }
514 k = 0;
515 for (j = 0; i < AXIENET_TX_SSTATS_LEN(lp) +
516 AXIENET_RX_SSTATS_LEN(lp);) {
517 if (j >= lp->num_rx_queues)
518 break;
519 q = lp->dq[j];
520 if (i % 2 == 0)
521 k = (q->chan_id - 1) * 2;
522 if (sset == ETH_SS_STATS)
523 memcpy(data + i * ETH_GSTRING_LEN,
524 axienet_get_rx_strings_stats[k].name,
525 ETH_GSTRING_LEN);
526 ++i;
527 k++;
528 if (i % 2 == 0)
529 ++j;
530 }
531}
532
533int axienet_sset_count(struct net_device *ndev, int sset)
534{
535 struct axienet_local *lp = netdev_priv(ndev);
536
537 switch (sset) {
538 case ETH_SS_STATS:
539 return (AXIENET_TX_SSTATS_LEN(lp) + AXIENET_RX_SSTATS_LEN(lp));
540 default:
541 return -EOPNOTSUPP;
542 }
543}
544
545void axienet_get_stats(struct net_device *ndev,
546 struct ethtool_stats *stats,
547 u64 *data)
548{
549 struct axienet_local *lp = netdev_priv(ndev);
550 struct axienet_dma_q *q;
551 unsigned int i = 0, j;
552
553 for (i = 0, j = 0; i < AXIENET_TX_SSTATS_LEN(lp);) {
554 if (j >= lp->num_tx_queues)
555 break;
556
557 q = lp->dq[j];
558 data[i++] = q->tx_packets;
559 data[i++] = q->tx_bytes;
560 ++j;
561 }
562 for (j = 0; i < AXIENET_TX_SSTATS_LEN(lp) +
563 AXIENET_RX_SSTATS_LEN(lp);) {
564 if (j >= lp->num_rx_queues)
565 break;
566
567 q = lp->dq[j];
568 data[i++] = q->rx_packets;
569 data[i++] = q->rx_bytes;
570 ++j;
571 }
572}
573
574
575
576
577
578
579
580
581void __maybe_unused axienet_mcdma_err_handler(unsigned long data)
582{
583 u32 axienet_status;
584 u32 cr, i, chan_en;
585 struct axienet_dma_q *q = (struct axienet_dma_q *)data;
586 struct axienet_local *lp = q->lp;
587 struct net_device *ndev = lp->ndev;
588 struct aximcdma_bd *cur_p;
589
590 lp->axienet_config->setoptions(ndev, lp->options &
591 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
592
593 if (lp->axienet_config->mactype != XAXIENET_10G_25G) {
594 mutex_lock(&lp->mii_bus->mdio_lock);
595
596
597
598
599
600
601 axienet_mdio_disable(lp);
602 axienet_mdio_wait_until_ready(lp);
603 }
604
605 __axienet_device_reset(q);
606
607 if (lp->axienet_config->mactype != XAXIENET_10G_25G) {
608 axienet_mdio_enable(lp);
609 axienet_mdio_wait_until_ready(lp);
610 mutex_unlock(&lp->mii_bus->mdio_lock);
611 }
612
613 for (i = 0; i < lp->tx_bd_num; i++) {
614 cur_p = &q->txq_bd_v[i];
615 if (cur_p->phys)
616 dma_unmap_single(ndev->dev.parent, cur_p->phys,
617 (cur_p->cntrl &
618 XAXIDMA_BD_CTRL_LENGTH_MASK),
619 DMA_TO_DEVICE);
620 if (cur_p->tx_skb)
621 dev_kfree_skb_irq((struct sk_buff *)cur_p->tx_skb);
622 cur_p->phys = 0;
623 cur_p->cntrl = 0;
624 cur_p->status = 0;
625 cur_p->app0 = 0;
626 cur_p->app1 = 0;
627 cur_p->app2 = 0;
628 cur_p->app3 = 0;
629 cur_p->app4 = 0;
630 cur_p->sw_id_offset = 0;
631 cur_p->tx_skb = 0;
632 }
633
634 for (i = 0; i < lp->rx_bd_num; i++) {
635 cur_p = &q->rxq_bd_v[i];
636 cur_p->status = 0;
637 cur_p->app0 = 0;
638 cur_p->app1 = 0;
639 cur_p->app2 = 0;
640 cur_p->app3 = 0;
641 cur_p->app4 = 0;
642 }
643
644 q->tx_bd_ci = 0;
645 q->tx_bd_tail = 0;
646 q->rx_bd_ci = 0;
647
648
649 cr = axienet_dma_in32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id) +
650 q->rx_offset);
651
652 cr = ((cr & ~XMCDMA_COALESCE_MASK) |
653 ((lp->coalesce_count_rx) << XMCDMA_COALESCE_SHIFT));
654
655 cr = ((cr & ~XMCDMA_DELAY_MASK) |
656 (XAXIDMA_DFT_RX_WAITBOUND << XMCDMA_DELAY_SHIFT));
657
658 cr |= XMCDMA_IRQ_ALL_MASK;
659
660 axienet_dma_out32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id) +
661 q->rx_offset, cr);
662
663
664 cr = axienet_dma_in32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id));
665
666 cr = (((cr & ~XMCDMA_COALESCE_MASK)) |
667 ((lp->coalesce_count_tx) << XMCDMA_COALESCE_SHIFT));
668
669 cr = (((cr & ~XMCDMA_DELAY_MASK)) |
670 (XAXIDMA_DFT_TX_WAITBOUND << XMCDMA_DELAY_SHIFT));
671
672 cr |= XMCDMA_IRQ_ALL_MASK;
673
674 axienet_dma_out32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id), cr);
675
676
677
678
679 axienet_dma_bdout(q, XMCDMA_CHAN_CURDESC_OFFSET(q->chan_id) +
680 q->rx_offset, q->rx_bd_p);
681 cr = axienet_dma_in32(q, XMCDMA_CR_OFFSET + q->rx_offset);
682 axienet_dma_out32(q, XMCDMA_CR_OFFSET + q->rx_offset,
683 cr | XMCDMA_CR_RUNSTOP_MASK);
684 cr = axienet_dma_in32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id) +
685 q->rx_offset);
686 axienet_dma_out32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id) + q->rx_offset,
687 cr | XMCDMA_CR_RUNSTOP_MASK);
688 axienet_dma_bdout(q, XMCDMA_CHAN_TAILDESC_OFFSET(q->chan_id) +
689 q->rx_offset, q->rx_bd_p + (sizeof(*q->rxq_bd_v) *
690 (lp->rx_bd_num - 1)));
691 chan_en = axienet_dma_in32(q, XMCDMA_CHEN_OFFSET + q->rx_offset);
692 chan_en |= (1 << (q->chan_id - 1));
693 axienet_dma_out32(q, XMCDMA_CHEN_OFFSET + q->rx_offset, chan_en);
694
695
696
697
698
699 axienet_dma_bdout(q, XMCDMA_CHAN_CURDESC_OFFSET(q->chan_id),
700 q->tx_bd_p);
701 cr = axienet_dma_in32(q, XMCDMA_CR_OFFSET);
702 axienet_dma_out32(q, XMCDMA_CR_OFFSET,
703 cr | XMCDMA_CR_RUNSTOP_MASK);
704 cr = axienet_dma_in32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id));
705 axienet_dma_out32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id),
706 cr | XMCDMA_CR_RUNSTOP_MASK);
707 chan_en = axienet_dma_in32(q, XMCDMA_CHEN_OFFSET);
708 chan_en |= (1 << (q->chan_id - 1));
709 axienet_dma_out32(q, XMCDMA_CHEN_OFFSET, chan_en);
710
711 if (lp->axienet_config->mactype != XAXIENET_10G_25G) {
712 axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET);
713 axienet_status &= ~XAE_RCW1_RX_MASK;
714 axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status);
715 }
716
717 if (lp->axienet_config->mactype == XAXIENET_1G && !lp->eth_hasnobuf) {
718 axienet_status = axienet_ior(lp, XAE_IP_OFFSET);
719 if (axienet_status & XAE_INT_RXRJECT_MASK)
720 axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK);
721 }
722
723 if (lp->axienet_config->mactype != XAXIENET_10G_25G)
724 axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK);
725
726 lp->axienet_config->setoptions(ndev, lp->options &
727 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
728 axienet_set_mac_address(ndev, NULL);
729 axienet_set_multicast_list(ndev);
730 lp->axienet_config->setoptions(ndev, lp->options);
731}
732
733int __maybe_unused axienet_mcdma_tx_probe(struct platform_device *pdev,
734 struct device_node *np,
735 struct axienet_local *lp)
736{
737 int i;
738 char dma_name[24];
739
740 for_each_tx_dma_queue(lp, i) {
741 struct axienet_dma_q *q;
742
743 q = lp->dq[i];
744
745 q->dma_regs = lp->mcdma_regs;
746 snprintf(dma_name, sizeof(dma_name), "mm2s_ch%d_introut",
747 q->chan_id);
748 q->tx_irq = platform_get_irq_byname(pdev, dma_name);
749 q->eth_hasdre = of_property_read_bool(np,
750 "xlnx,include-dre");
751 spin_lock_init(&q->tx_lock);
752 }
753 of_node_put(np);
754
755 return 0;
756}
757
758int __maybe_unused axienet_mcdma_rx_probe(struct platform_device *pdev,
759 struct axienet_local *lp,
760 struct net_device *ndev)
761{
762 int i;
763 char dma_name[24];
764
765 for_each_rx_dma_queue(lp, i) {
766 struct axienet_dma_q *q;
767
768 q = lp->dq[i];
769
770 q->dma_regs = lp->mcdma_regs;
771 snprintf(dma_name, sizeof(dma_name), "s2mm_ch%d_introut",
772 q->chan_id);
773 q->rx_irq = platform_get_irq_byname(pdev, dma_name);
774
775 spin_lock_init(&q->rx_lock);
776
777 netif_napi_add(ndev, &lp->napi[i], xaxienet_rx_poll,
778 XAXIENET_NAPI_WEIGHT);
779 }
780
781 return 0;
782}
783
784static ssize_t rxch_obs1_show(struct device *dev,
785 struct device_attribute *attr, char *buf)
786{
787 struct net_device *ndev = dev_get_drvdata(dev);
788 struct axienet_local *lp = netdev_priv(ndev);
789 struct axienet_dma_q *q = lp->dq[0];
790 u32 reg;
791
792 reg = axienet_dma_in32(q, XMCDMA_CHOBS1_OFFSET + q->rx_offset);
793
794 return sprintf(buf, "Ingress Channel Observer 1 Contents is 0x%x\n",
795 reg);
796}
797
798static ssize_t rxch_obs2_show(struct device *dev,
799 struct device_attribute *attr, char *buf)
800{
801 struct net_device *ndev = dev_get_drvdata(dev);
802 struct axienet_local *lp = netdev_priv(ndev);
803 struct axienet_dma_q *q = lp->dq[0];
804 u32 reg;
805
806 reg = axienet_dma_in32(q, XMCDMA_CHOBS2_OFFSET + q->rx_offset);
807
808 return sprintf(buf, "Ingress Channel Observer 2 Contents is 0x%x\n",
809 reg);
810}
811
812static ssize_t rxch_obs3_show(struct device *dev,
813 struct device_attribute *attr, char *buf)
814{
815 struct net_device *ndev = dev_get_drvdata(dev);
816 struct axienet_local *lp = netdev_priv(ndev);
817 struct axienet_dma_q *q = lp->dq[0];
818 u32 reg;
819
820 reg = axienet_dma_in32(q, XMCDMA_CHOBS3_OFFSET + q->rx_offset);
821
822 return sprintf(buf, "Ingress Channel Observer 3 Contents is 0x%x\n",
823 reg);
824}
825
826static ssize_t rxch_obs4_show(struct device *dev,
827 struct device_attribute *attr, char *buf)
828{
829 struct net_device *ndev = dev_get_drvdata(dev);
830 struct axienet_local *lp = netdev_priv(ndev);
831 struct axienet_dma_q *q = lp->dq[0];
832 u32 reg;
833
834 reg = axienet_dma_in32(q, XMCDMA_CHOBS4_OFFSET + q->rx_offset);
835
836 return sprintf(buf, "Ingress Channel Observer 4 Contents is 0x%x\n",
837 reg);
838}
839
840static ssize_t rxch_obs5_show(struct device *dev,
841 struct device_attribute *attr, char *buf)
842{
843 struct net_device *ndev = dev_get_drvdata(dev);
844 struct axienet_local *lp = netdev_priv(ndev);
845 struct axienet_dma_q *q = lp->dq[0];
846 u32 reg;
847
848 reg = axienet_dma_in32(q, XMCDMA_CHOBS5_OFFSET + q->rx_offset);
849
850 return sprintf(buf, "Ingress Channel Observer 5 Contents is 0x%x\n",
851 reg);
852}
853
854static ssize_t rxch_obs6_show(struct device *dev,
855 struct device_attribute *attr, char *buf)
856{
857 struct net_device *ndev = dev_get_drvdata(dev);
858 struct axienet_local *lp = netdev_priv(ndev);
859 struct axienet_dma_q *q = lp->dq[0];
860 u32 reg;
861
862 reg = axienet_dma_in32(q, XMCDMA_CHOBS6_OFFSET + q->rx_offset);
863
864 return sprintf(buf, "Ingress Channel Observer 6 Contents is 0x%x\n\r",
865 reg);
866}
867
868static ssize_t txch_obs1_show(struct device *dev,
869 struct device_attribute *attr, char *buf)
870{
871 struct net_device *ndev = dev_get_drvdata(dev);
872 struct axienet_local *lp = netdev_priv(ndev);
873 struct axienet_dma_q *q = lp->dq[0];
874 u32 reg;
875
876 reg = axienet_dma_in32(q, XMCDMA_CHOBS1_OFFSET);
877
878 return sprintf(buf, "Egress Channel Observer 1 Contents is 0x%x\n",
879 reg);
880}
881
882static ssize_t txch_obs2_show(struct device *dev,
883 struct device_attribute *attr, char *buf)
884{
885 struct net_device *ndev = dev_get_drvdata(dev);
886 struct axienet_local *lp = netdev_priv(ndev);
887 struct axienet_dma_q *q = lp->dq[0];
888 u32 reg;
889
890 reg = axienet_dma_in32(q, XMCDMA_CHOBS2_OFFSET);
891
892 return sprintf(buf, "Egress Channel Observer 2 Contents is 0x%x\n\r",
893 reg);
894}
895
896static ssize_t txch_obs3_show(struct device *dev,
897 struct device_attribute *attr, char *buf)
898{
899 struct net_device *ndev = dev_get_drvdata(dev);
900 struct axienet_local *lp = netdev_priv(ndev);
901 struct axienet_dma_q *q = lp->dq[0];
902 u32 reg;
903
904 reg = axienet_dma_in32(q, XMCDMA_CHOBS3_OFFSET);
905
906 return sprintf(buf, "Egress Channel Observer 3 Contents is 0x%x\n\r",
907 reg);
908}
909
910static ssize_t txch_obs4_show(struct device *dev,
911 struct device_attribute *attr, char *buf)
912{
913 struct net_device *ndev = dev_get_drvdata(dev);
914 struct axienet_local *lp = netdev_priv(ndev);
915 struct axienet_dma_q *q = lp->dq[0];
916 u32 reg;
917
918 reg = axienet_dma_in32(q, XMCDMA_CHOBS4_OFFSET);
919
920 return sprintf(buf, "Egress Channel Observer 4 Contents is 0x%x\n\r",
921 reg);
922}
923
924static ssize_t txch_obs5_show(struct device *dev,
925 struct device_attribute *attr, char *buf)
926{
927 struct net_device *ndev = dev_get_drvdata(dev);
928 struct axienet_local *lp = netdev_priv(ndev);
929 struct axienet_dma_q *q = lp->dq[0];
930 u32 reg;
931
932 reg = axienet_dma_in32(q, XMCDMA_CHOBS5_OFFSET);
933
934 return sprintf(buf, "Egress Channel Observer 5 Contents is 0x%x\n\r",
935 reg);
936}
937
938static ssize_t txch_obs6_show(struct device *dev,
939 struct device_attribute *attr, char *buf)
940{
941 struct net_device *ndev = dev_get_drvdata(dev);
942 struct axienet_local *lp = netdev_priv(ndev);
943 struct axienet_dma_q *q = lp->dq[0];
944 u32 reg;
945
946 reg = axienet_dma_in32(q, XMCDMA_CHOBS6_OFFSET);
947
948 return sprintf(buf, "Egress Channel Observer 6 Contents is 0x%x\n\r",
949 reg);
950}
951
952static ssize_t chan_weight_show(struct device *dev,
953 struct device_attribute *attr, char *buf)
954{
955 struct net_device *ndev = dev_get_drvdata(dev);
956 struct axienet_local *lp = netdev_priv(ndev);
957
958 return sprintf(buf, "chan_id is %d and weight is %d\n",
959 lp->chan_id, lp->weight);
960}
961
962static ssize_t chan_weight_store(struct device *dev,
963 struct device_attribute *attr,
964 const char *buf, size_t count)
965{
966 struct net_device *ndev = dev_get_drvdata(dev);
967 struct axienet_local *lp = netdev_priv(ndev);
968 struct axienet_dma_q *q = lp->dq[0];
969 int ret;
970 u16 flags, chan_id;
971 u32 val;
972
973 ret = kstrtou16(buf, 16, &flags);
974 if (ret)
975 return ret;
976
977 lp->chan_id = (flags & 0xF0) >> 4;
978 lp->weight = flags & 0x0F;
979
980 if (lp->chan_id < 8)
981 val = axienet_dma_in32(q, XMCDMA_TXWEIGHT0_OFFSET);
982 else
983 val = axienet_dma_in32(q, XMCDMA_TXWEIGHT1_OFFSET);
984
985 if (lp->chan_id > 7)
986 chan_id = lp->chan_id - 8;
987 else
988 chan_id = lp->chan_id;
989
990 val &= ~XMCDMA_TXWEIGHT_CH_MASK(chan_id);
991 val |= lp->weight << XMCDMA_TXWEIGHT_CH_SHIFT(chan_id);
992
993 if (lp->chan_id < 8)
994 axienet_dma_out32(q, XMCDMA_TXWEIGHT0_OFFSET, val);
995 else
996 axienet_dma_out32(q, XMCDMA_TXWEIGHT1_OFFSET, val);
997
998 return count;
999}
1000
1001static DEVICE_ATTR_RW(chan_weight);
1002static DEVICE_ATTR_RO(rxch_obs1);
1003static DEVICE_ATTR_RO(rxch_obs2);
1004static DEVICE_ATTR_RO(rxch_obs3);
1005static DEVICE_ATTR_RO(rxch_obs4);
1006static DEVICE_ATTR_RO(rxch_obs5);
1007static DEVICE_ATTR_RO(rxch_obs6);
1008static DEVICE_ATTR_RO(txch_obs1);
1009static DEVICE_ATTR_RO(txch_obs2);
1010static DEVICE_ATTR_RO(txch_obs3);
1011static DEVICE_ATTR_RO(txch_obs4);
1012static DEVICE_ATTR_RO(txch_obs5);
1013static DEVICE_ATTR_RO(txch_obs6);
1014static const struct attribute *mcdma_attrs[] = {
1015 &dev_attr_chan_weight.attr,
1016 &dev_attr_rxch_obs1.attr,
1017 &dev_attr_rxch_obs2.attr,
1018 &dev_attr_rxch_obs3.attr,
1019 &dev_attr_rxch_obs4.attr,
1020 &dev_attr_rxch_obs5.attr,
1021 &dev_attr_rxch_obs6.attr,
1022 &dev_attr_txch_obs1.attr,
1023 &dev_attr_txch_obs2.attr,
1024 &dev_attr_txch_obs3.attr,
1025 &dev_attr_txch_obs4.attr,
1026 &dev_attr_txch_obs5.attr,
1027 &dev_attr_txch_obs6.attr,
1028 NULL,
1029};
1030
1031static const struct attribute_group mcdma_attributes = {
1032 .attrs = (struct attribute **)mcdma_attrs,
1033};
1034
1035int axeinet_mcdma_create_sysfs(struct kobject *kobj)
1036{
1037 return sysfs_create_group(kobj, &mcdma_attributes);
1038}
1039
1040void axeinet_mcdma_remove_sysfs(struct kobject *kobj)
1041{
1042 sysfs_remove_group(kobj, &mcdma_attributes);
1043}
1044