1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include "qemu/osdep.h"
25#include "hw/irq.h"
26#include "hw/net/imx_fec.h"
27#include "hw/qdev-properties.h"
28#include "migration/vmstate.h"
29#include "sysemu/dma.h"
30#include "qemu/log.h"
31#include "qemu/module.h"
32#include "net/checksum.h"
33#include "net/eth.h"
34#include "trace.h"
35
36
37#include <zlib.h>
38
39#define IMX_MAX_DESC 1024
40
41static const char *imx_default_reg_name(IMXFECState *s, uint32_t index)
42{
43 static char tmp[20];
44 sprintf(tmp, "index %d", index);
45 return tmp;
46}
47
48static const char *imx_fec_reg_name(IMXFECState *s, uint32_t index)
49{
50 switch (index) {
51 case ENET_FRBR:
52 return "FRBR";
53 case ENET_FRSR:
54 return "FRSR";
55 case ENET_MIIGSK_CFGR:
56 return "MIIGSK_CFGR";
57 case ENET_MIIGSK_ENR:
58 return "MIIGSK_ENR";
59 default:
60 return imx_default_reg_name(s, index);
61 }
62}
63
64static const char *imx_enet_reg_name(IMXFECState *s, uint32_t index)
65{
66 switch (index) {
67 case ENET_RSFL:
68 return "RSFL";
69 case ENET_RSEM:
70 return "RSEM";
71 case ENET_RAEM:
72 return "RAEM";
73 case ENET_RAFL:
74 return "RAFL";
75 case ENET_TSEM:
76 return "TSEM";
77 case ENET_TAEM:
78 return "TAEM";
79 case ENET_TAFL:
80 return "TAFL";
81 case ENET_TIPG:
82 return "TIPG";
83 case ENET_FTRL:
84 return "FTRL";
85 case ENET_TACC:
86 return "TACC";
87 case ENET_RACC:
88 return "RACC";
89 case ENET_ATCR:
90 return "ATCR";
91 case ENET_ATVR:
92 return "ATVR";
93 case ENET_ATOFF:
94 return "ATOFF";
95 case ENET_ATPER:
96 return "ATPER";
97 case ENET_ATCOR:
98 return "ATCOR";
99 case ENET_ATINC:
100 return "ATINC";
101 case ENET_ATSTMP:
102 return "ATSTMP";
103 case ENET_TGSR:
104 return "TGSR";
105 case ENET_TCSR0:
106 return "TCSR0";
107 case ENET_TCCR0:
108 return "TCCR0";
109 case ENET_TCSR1:
110 return "TCSR1";
111 case ENET_TCCR1:
112 return "TCCR1";
113 case ENET_TCSR2:
114 return "TCSR2";
115 case ENET_TCCR2:
116 return "TCCR2";
117 case ENET_TCSR3:
118 return "TCSR3";
119 case ENET_TCCR3:
120 return "TCCR3";
121 default:
122 return imx_default_reg_name(s, index);
123 }
124}
125
126static const char *imx_eth_reg_name(IMXFECState *s, uint32_t index)
127{
128 switch (index) {
129 case ENET_EIR:
130 return "EIR";
131 case ENET_EIMR:
132 return "EIMR";
133 case ENET_RDAR:
134 return "RDAR";
135 case ENET_TDAR:
136 return "TDAR";
137 case ENET_ECR:
138 return "ECR";
139 case ENET_MMFR:
140 return "MMFR";
141 case ENET_MSCR:
142 return "MSCR";
143 case ENET_MIBC:
144 return "MIBC";
145 case ENET_RCR:
146 return "RCR";
147 case ENET_TCR:
148 return "TCR";
149 case ENET_PALR:
150 return "PALR";
151 case ENET_PAUR:
152 return "PAUR";
153 case ENET_OPD:
154 return "OPD";
155 case ENET_IAUR:
156 return "IAUR";
157 case ENET_IALR:
158 return "IALR";
159 case ENET_GAUR:
160 return "GAUR";
161 case ENET_GALR:
162 return "GALR";
163 case ENET_TFWR:
164 return "TFWR";
165 case ENET_RDSR:
166 return "RDSR";
167 case ENET_TDSR:
168 return "TDSR";
169 case ENET_MRBR:
170 return "MRBR";
171 default:
172 if (s->is_fec) {
173 return imx_fec_reg_name(s, index);
174 } else {
175 return imx_enet_reg_name(s, index);
176 }
177 }
178}
179
180
181
182
183
184
185
186static bool imx_eth_is_multi_tx_ring(void *opaque)
187{
188 IMXFECState *s = IMX_FEC(opaque);
189
190 return s->tx_ring_num > 1;
191}
192
193static const VMStateDescription vmstate_imx_eth_txdescs = {
194 .name = "imx.fec/txdescs",
195 .version_id = 1,
196 .minimum_version_id = 1,
197 .needed = imx_eth_is_multi_tx_ring,
198 .fields = (VMStateField[]) {
199 VMSTATE_UINT32(tx_descriptor[1], IMXFECState),
200 VMSTATE_UINT32(tx_descriptor[2], IMXFECState),
201 VMSTATE_END_OF_LIST()
202 }
203};
204
205static const VMStateDescription vmstate_imx_eth = {
206 .name = TYPE_IMX_FEC,
207 .version_id = 2,
208 .minimum_version_id = 2,
209 .fields = (VMStateField[]) {
210 VMSTATE_UINT32_ARRAY(regs, IMXFECState, ENET_MAX),
211 VMSTATE_UINT32(rx_descriptor, IMXFECState),
212 VMSTATE_UINT32(tx_descriptor[0], IMXFECState),
213 VMSTATE_UINT32(phy_status, IMXFECState),
214 VMSTATE_UINT32(phy_control, IMXFECState),
215 VMSTATE_UINT32(phy_advertise, IMXFECState),
216 VMSTATE_UINT32(phy_int, IMXFECState),
217 VMSTATE_UINT32(phy_int_mask, IMXFECState),
218 VMSTATE_END_OF_LIST()
219 },
220 .subsections = (const VMStateDescription * []) {
221 &vmstate_imx_eth_txdescs,
222 NULL
223 },
224};
225
226#define PHY_INT_ENERGYON (1 << 7)
227#define PHY_INT_AUTONEG_COMPLETE (1 << 6)
228#define PHY_INT_FAULT (1 << 5)
229#define PHY_INT_DOWN (1 << 4)
230#define PHY_INT_AUTONEG_LP (1 << 3)
231#define PHY_INT_PARFAULT (1 << 2)
232#define PHY_INT_AUTONEG_PAGE (1 << 1)
233
234static void imx_eth_update(IMXFECState *s);
235
236
237
238
239
240
241
242static void imx_phy_update_irq(IMXFECState *s)
243{
244 imx_eth_update(s);
245}
246
247static void imx_phy_update_link(IMXFECState *s)
248{
249
250 if (qemu_get_queue(s->nic)->link_down) {
251 trace_imx_phy_update_link("down");
252 s->phy_status &= ~0x0024;
253 s->phy_int |= PHY_INT_DOWN;
254 } else {
255 trace_imx_phy_update_link("up");
256 s->phy_status |= 0x0024;
257 s->phy_int |= PHY_INT_ENERGYON;
258 s->phy_int |= PHY_INT_AUTONEG_COMPLETE;
259 }
260 imx_phy_update_irq(s);
261}
262
263static void imx_eth_set_link(NetClientState *nc)
264{
265 imx_phy_update_link(IMX_FEC(qemu_get_nic_opaque(nc)));
266}
267
268static void imx_phy_reset(IMXFECState *s)
269{
270 trace_imx_phy_reset();
271
272 s->phy_status = 0x7809;
273 s->phy_control = 0x3000;
274 s->phy_advertise = 0x01e1;
275 s->phy_int_mask = 0;
276 s->phy_int = 0;
277 imx_phy_update_link(s);
278}
279
280static uint32_t imx_phy_read(IMXFECState *s, int reg)
281{
282 uint32_t val;
283 uint32_t phy = reg / 32;
284
285 if (phy != s->phy_num) {
286 qemu_log_mask(LOG_GUEST_ERROR, "[%s.phy]%s: Bad phy num %u\n",
287 TYPE_IMX_FEC, __func__, phy);
288 return 0;
289 }
290
291 reg %= 32;
292
293 switch (reg) {
294 case 0:
295 val = s->phy_control;
296 break;
297 case 1:
298 val = s->phy_status;
299 break;
300 case 2:
301 val = 0x0007;
302 break;
303 case 3:
304 val = 0xc0d1;
305 break;
306 case 4:
307 val = s->phy_advertise;
308 break;
309 case 5:
310 val = 0x0f71;
311 break;
312 case 6:
313 val = 1;
314 break;
315 case 29:
316 val = s->phy_int;
317 s->phy_int = 0;
318 imx_phy_update_irq(s);
319 break;
320 case 30:
321 val = s->phy_int_mask;
322 break;
323 case 17:
324 case 18:
325 case 27:
326 case 31:
327 qemu_log_mask(LOG_UNIMP, "[%s.phy]%s: reg %d not implemented\n",
328 TYPE_IMX_FEC, __func__, reg);
329 val = 0;
330 break;
331 default:
332 qemu_log_mask(LOG_GUEST_ERROR, "[%s.phy]%s: Bad address at offset %d\n",
333 TYPE_IMX_FEC, __func__, reg);
334 val = 0;
335 break;
336 }
337
338 trace_imx_phy_read(val, phy, reg);
339
340 return val;
341}
342
343static void imx_phy_write(IMXFECState *s, int reg, uint32_t val)
344{
345 uint32_t phy = reg / 32;
346
347 if (phy != s->phy_num) {
348 qemu_log_mask(LOG_GUEST_ERROR, "[%s.phy]%s: Bad phy num %u\n",
349 TYPE_IMX_FEC, __func__, phy);
350 return;
351 }
352
353 reg %= 32;
354
355 trace_imx_phy_write(val, phy, reg);
356
357 switch (reg) {
358 case 0:
359 if (val & 0x8000) {
360 imx_phy_reset(s);
361 } else {
362 s->phy_control = val & 0x7980;
363
364 if (val & 0x1000) {
365 s->phy_status |= 0x0020;
366 }
367 }
368 break;
369 case 4:
370 s->phy_advertise = (val & 0x2d7f) | 0x80;
371 break;
372 case 30:
373 s->phy_int_mask = val & 0xff;
374 imx_phy_update_irq(s);
375 break;
376 case 17:
377 case 18:
378 case 27:
379 case 31:
380 qemu_log_mask(LOG_UNIMP, "[%s.phy)%s: reg %d not implemented\n",
381 TYPE_IMX_FEC, __func__, reg);
382 break;
383 default:
384 qemu_log_mask(LOG_GUEST_ERROR, "[%s.phy]%s: Bad address at offset %d\n",
385 TYPE_IMX_FEC, __func__, reg);
386 break;
387 }
388}
389
390static void imx_fec_read_bd(IMXFECBufDesc *bd, dma_addr_t addr)
391{
392 dma_memory_read(&address_space_memory, addr, bd, sizeof(*bd));
393
394 trace_imx_fec_read_bd(addr, bd->flags, bd->length, bd->data);
395}
396
397static void imx_fec_write_bd(IMXFECBufDesc *bd, dma_addr_t addr)
398{
399 dma_memory_write(&address_space_memory, addr, bd, sizeof(*bd));
400}
401
402static void imx_enet_read_bd(IMXENETBufDesc *bd, dma_addr_t addr)
403{
404 dma_memory_read(&address_space_memory, addr, bd, sizeof(*bd));
405
406 trace_imx_enet_read_bd(addr, bd->flags, bd->length, bd->data,
407 bd->option, bd->status);
408}
409
410static void imx_enet_write_bd(IMXENETBufDesc *bd, dma_addr_t addr)
411{
412 dma_memory_write(&address_space_memory, addr, bd, sizeof(*bd));
413}
414
415static void imx_eth_update(IMXFECState *s)
416{
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442 if (s->regs[ENET_EIR] & s->regs[ENET_EIMR] &
443 (ENET_INT_MAC | ENET_INT_TS_TIMER)) {
444 qemu_set_irq(s->irq[1], 1);
445 } else {
446 qemu_set_irq(s->irq[1], 0);
447 }
448
449 if (s->regs[ENET_EIR] & s->regs[ENET_EIMR] & ENET_INT_MAC) {
450 qemu_set_irq(s->irq[0], 1);
451 } else {
452 qemu_set_irq(s->irq[0], 0);
453 }
454}
455
456static void imx_fec_do_tx(IMXFECState *s)
457{
458 int frame_size = 0, descnt = 0;
459 uint8_t *ptr = s->frame;
460 uint32_t addr = s->tx_descriptor[0];
461
462 while (descnt++ < IMX_MAX_DESC) {
463 IMXFECBufDesc bd;
464 int len;
465
466 imx_fec_read_bd(&bd, addr);
467 if ((bd.flags & ENET_BD_R) == 0) {
468
469
470 trace_imx_eth_tx_bd_busy();
471
472 break;
473 }
474 len = bd.length;
475 if (frame_size + len > ENET_MAX_FRAME_SIZE) {
476 len = ENET_MAX_FRAME_SIZE - frame_size;
477 s->regs[ENET_EIR] |= ENET_INT_BABT;
478 }
479 dma_memory_read(&address_space_memory, bd.data, ptr, len);
480 ptr += len;
481 frame_size += len;
482 if (bd.flags & ENET_BD_L) {
483
484 qemu_send_packet(qemu_get_queue(s->nic), s->frame, frame_size);
485 ptr = s->frame;
486 frame_size = 0;
487 s->regs[ENET_EIR] |= ENET_INT_TXF;
488 }
489 s->regs[ENET_EIR] |= ENET_INT_TXB;
490 bd.flags &= ~ENET_BD_R;
491
492 imx_fec_write_bd(&bd, addr);
493
494 if ((bd.flags & ENET_BD_W) != 0) {
495 addr = s->regs[ENET_TDSR];
496 } else {
497 addr += sizeof(bd);
498 }
499 }
500
501 s->tx_descriptor[0] = addr;
502
503 imx_eth_update(s);
504}
505
506static void imx_enet_do_tx(IMXFECState *s, uint32_t index)
507{
508 int frame_size = 0, descnt = 0;
509
510 uint8_t *ptr = s->frame;
511 uint32_t addr, int_txb, int_txf, tdsr;
512 size_t ring;
513
514 switch (index) {
515 case ENET_TDAR:
516 ring = 0;
517 int_txb = ENET_INT_TXB;
518 int_txf = ENET_INT_TXF;
519 tdsr = ENET_TDSR;
520 break;
521 case ENET_TDAR1:
522 ring = 1;
523 int_txb = ENET_INT_TXB1;
524 int_txf = ENET_INT_TXF1;
525 tdsr = ENET_TDSR1;
526 break;
527 case ENET_TDAR2:
528 ring = 2;
529 int_txb = ENET_INT_TXB2;
530 int_txf = ENET_INT_TXF2;
531 tdsr = ENET_TDSR2;
532 break;
533 default:
534 qemu_log_mask(LOG_GUEST_ERROR,
535 "%s: bogus value for index %x\n",
536 __func__, index);
537 abort();
538 break;
539 }
540
541 addr = s->tx_descriptor[ring];
542
543 while (descnt++ < IMX_MAX_DESC) {
544 IMXENETBufDesc bd;
545 int len;
546
547 imx_enet_read_bd(&bd, addr);
548 if ((bd.flags & ENET_BD_R) == 0) {
549
550
551 trace_imx_eth_tx_bd_busy();
552
553 break;
554 }
555 len = bd.length;
556 if (frame_size + len > ENET_MAX_FRAME_SIZE) {
557 len = ENET_MAX_FRAME_SIZE - frame_size;
558 s->regs[ENET_EIR] |= ENET_INT_BABT;
559 }
560 dma_memory_read(&address_space_memory, bd.data, ptr, len);
561 ptr += len;
562 frame_size += len;
563 if (bd.flags & ENET_BD_L) {
564 int csum = 0;
565
566 if (bd.option & ENET_BD_PINS) {
567 csum |= (CSUM_TCP | CSUM_UDP);
568 }
569 if (bd.option & ENET_BD_IINS) {
570 csum |= CSUM_IP;
571 }
572 if (csum) {
573 net_checksum_calculate(s->frame, frame_size, csum);
574 }
575
576
577
578 qemu_send_packet(qemu_get_queue(s->nic), s->frame, frame_size);
579 ptr = s->frame;
580
581 frame_size = 0;
582 if (bd.option & ENET_BD_TX_INT) {
583 s->regs[ENET_EIR] |= int_txf;
584 }
585
586 bd.last_buffer = ENET_BD_BDU;
587 }
588 if (bd.option & ENET_BD_TX_INT) {
589 s->regs[ENET_EIR] |= int_txb;
590 }
591 bd.flags &= ~ENET_BD_R;
592
593 imx_enet_write_bd(&bd, addr);
594
595 if ((bd.flags & ENET_BD_W) != 0) {
596 addr = s->regs[tdsr];
597 } else {
598 addr += sizeof(bd);
599 }
600 }
601
602 s->tx_descriptor[ring] = addr;
603
604 imx_eth_update(s);
605}
606
607static void imx_eth_do_tx(IMXFECState *s, uint32_t index)
608{
609 if (!s->is_fec && (s->regs[ENET_ECR] & ENET_ECR_EN1588)) {
610 imx_enet_do_tx(s, index);
611 } else {
612 imx_fec_do_tx(s);
613 }
614}
615
616static void imx_eth_enable_rx(IMXFECState *s, bool flush)
617{
618 IMXFECBufDesc bd;
619
620 imx_fec_read_bd(&bd, s->rx_descriptor);
621
622 s->regs[ENET_RDAR] = (bd.flags & ENET_BD_E) ? ENET_RDAR_RDAR : 0;
623
624 if (!s->regs[ENET_RDAR]) {
625 trace_imx_eth_rx_bd_full();
626 } else if (flush) {
627 qemu_flush_queued_packets(qemu_get_queue(s->nic));
628 }
629}
630
631static void imx_eth_reset(DeviceState *d)
632{
633 IMXFECState *s = IMX_FEC(d);
634
635
636 memset(s->regs, 0, sizeof(s->regs));
637 s->regs[ENET_ECR] = 0xf0000000;
638 s->regs[ENET_MIBC] = 0xc0000000;
639 s->regs[ENET_RCR] = 0x05ee0001;
640 s->regs[ENET_OPD] = 0x00010000;
641
642 s->regs[ENET_PALR] = (s->conf.macaddr.a[0] << 24)
643 | (s->conf.macaddr.a[1] << 16)
644 | (s->conf.macaddr.a[2] << 8)
645 | s->conf.macaddr.a[3];
646 s->regs[ENET_PAUR] = (s->conf.macaddr.a[4] << 24)
647 | (s->conf.macaddr.a[5] << 16)
648 | 0x8808;
649
650 if (s->is_fec) {
651 s->regs[ENET_FRBR] = 0x00000600;
652 s->regs[ENET_FRSR] = 0x00000500;
653 s->regs[ENET_MIIGSK_ENR] = 0x00000006;
654 } else {
655 s->regs[ENET_RAEM] = 0x00000004;
656 s->regs[ENET_RAFL] = 0x00000004;
657 s->regs[ENET_TAEM] = 0x00000004;
658 s->regs[ENET_TAFL] = 0x00000008;
659 s->regs[ENET_TIPG] = 0x0000000c;
660 s->regs[ENET_FTRL] = 0x000007ff;
661 s->regs[ENET_ATPER] = 0x3b9aca00;
662 }
663
664 s->rx_descriptor = 0;
665 memset(s->tx_descriptor, 0, sizeof(s->tx_descriptor));
666
667
668 imx_phy_reset(s);
669}
670
671static uint32_t imx_default_read(IMXFECState *s, uint32_t index)
672{
673 qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Bad register at offset 0x%"
674 PRIx32 "\n", TYPE_IMX_FEC, __func__, index * 4);
675 return 0;
676}
677
678static uint32_t imx_fec_read(IMXFECState *s, uint32_t index)
679{
680 switch (index) {
681 case ENET_FRBR:
682 case ENET_FRSR:
683 case ENET_MIIGSK_CFGR:
684 case ENET_MIIGSK_ENR:
685 return s->regs[index];
686 default:
687 return imx_default_read(s, index);
688 }
689}
690
691static uint32_t imx_enet_read(IMXFECState *s, uint32_t index)
692{
693 switch (index) {
694 case ENET_RSFL:
695 case ENET_RSEM:
696 case ENET_RAEM:
697 case ENET_RAFL:
698 case ENET_TSEM:
699 case ENET_TAEM:
700 case ENET_TAFL:
701 case ENET_TIPG:
702 case ENET_FTRL:
703 case ENET_TACC:
704 case ENET_RACC:
705 case ENET_ATCR:
706 case ENET_ATVR:
707 case ENET_ATOFF:
708 case ENET_ATPER:
709 case ENET_ATCOR:
710 case ENET_ATINC:
711 case ENET_ATSTMP:
712 case ENET_TGSR:
713 case ENET_TCSR0:
714 case ENET_TCCR0:
715 case ENET_TCSR1:
716 case ENET_TCCR1:
717 case ENET_TCSR2:
718 case ENET_TCCR2:
719 case ENET_TCSR3:
720 case ENET_TCCR3:
721 return s->regs[index];
722 default:
723 return imx_default_read(s, index);
724 }
725}
726
727static uint64_t imx_eth_read(void *opaque, hwaddr offset, unsigned size)
728{
729 uint32_t value = 0;
730 IMXFECState *s = IMX_FEC(opaque);
731 uint32_t index = offset >> 2;
732
733 switch (index) {
734 case ENET_EIR:
735 case ENET_EIMR:
736 case ENET_RDAR:
737 case ENET_TDAR:
738 case ENET_ECR:
739 case ENET_MMFR:
740 case ENET_MSCR:
741 case ENET_MIBC:
742 case ENET_RCR:
743 case ENET_TCR:
744 case ENET_PALR:
745 case ENET_PAUR:
746 case ENET_OPD:
747 case ENET_IAUR:
748 case ENET_IALR:
749 case ENET_GAUR:
750 case ENET_GALR:
751 case ENET_TFWR:
752 case ENET_RDSR:
753 case ENET_TDSR:
754 case ENET_MRBR:
755 value = s->regs[index];
756 break;
757 default:
758 if (s->is_fec) {
759 value = imx_fec_read(s, index);
760 } else {
761 value = imx_enet_read(s, index);
762 }
763 break;
764 }
765
766 trace_imx_eth_read(index, imx_eth_reg_name(s, index), value);
767
768 return value;
769}
770
771static void imx_default_write(IMXFECState *s, uint32_t index, uint32_t value)
772{
773 qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Bad address at offset 0x%"
774 PRIx32 "\n", TYPE_IMX_FEC, __func__, index * 4);
775 return;
776}
777
778static void imx_fec_write(IMXFECState *s, uint32_t index, uint32_t value)
779{
780 switch (index) {
781 case ENET_FRBR:
782
783 qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Register FRBR is read only\n",
784 TYPE_IMX_FEC, __func__);
785 break;
786 case ENET_FRSR:
787 s->regs[index] = (value & 0x000003fc) | 0x00000400;
788 break;
789 case ENET_MIIGSK_CFGR:
790 s->regs[index] = value & 0x00000053;
791 break;
792 case ENET_MIIGSK_ENR:
793 s->regs[index] = (value & 0x00000002) ? 0x00000006 : 0;
794 break;
795 default:
796 imx_default_write(s, index, value);
797 break;
798 }
799}
800
801static void imx_enet_write(IMXFECState *s, uint32_t index, uint32_t value)
802{
803 switch (index) {
804 case ENET_RSFL:
805 case ENET_RSEM:
806 case ENET_RAEM:
807 case ENET_RAFL:
808 case ENET_TSEM:
809 case ENET_TAEM:
810 case ENET_TAFL:
811 s->regs[index] = value & 0x000001ff;
812 break;
813 case ENET_TIPG:
814 s->regs[index] = value & 0x0000001f;
815 break;
816 case ENET_FTRL:
817 s->regs[index] = value & 0x00003fff;
818 break;
819 case ENET_TACC:
820 s->regs[index] = value & 0x00000019;
821 break;
822 case ENET_RACC:
823 s->regs[index] = value & 0x000000C7;
824 break;
825 case ENET_ATCR:
826 s->regs[index] = value & 0x00002a9d;
827 break;
828 case ENET_ATVR:
829 case ENET_ATOFF:
830 case ENET_ATPER:
831 s->regs[index] = value;
832 break;
833 case ENET_ATSTMP:
834
835 qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Register ATSTMP is read only\n",
836 TYPE_IMX_FEC, __func__);
837 break;
838 case ENET_ATCOR:
839 s->regs[index] = value & 0x7fffffff;
840 break;
841 case ENET_ATINC:
842 s->regs[index] = value & 0x00007f7f;
843 break;
844 case ENET_TGSR:
845
846 s->regs[index] &= ~(value & 0x0000000f);
847 break;
848 case ENET_TCSR0:
849 case ENET_TCSR1:
850 case ENET_TCSR2:
851 case ENET_TCSR3:
852 s->regs[index] &= ~(value & 0x00000080);
853 s->regs[index] &= ~0x0000007d;
854 s->regs[index] |= (value & 0x0000007d);
855 break;
856 case ENET_TCCR0:
857 case ENET_TCCR1:
858 case ENET_TCCR2:
859 case ENET_TCCR3:
860 s->regs[index] = value;
861 break;
862 default:
863 imx_default_write(s, index, value);
864 break;
865 }
866}
867
868static void imx_eth_write(void *opaque, hwaddr offset, uint64_t value,
869 unsigned size)
870{
871 IMXFECState *s = IMX_FEC(opaque);
872 const bool single_tx_ring = !imx_eth_is_multi_tx_ring(s);
873 uint32_t index = offset >> 2;
874
875 trace_imx_eth_write(index, imx_eth_reg_name(s, index), value);
876
877 switch (index) {
878 case ENET_EIR:
879 s->regs[index] &= ~value;
880 break;
881 case ENET_EIMR:
882 s->regs[index] = value;
883 break;
884 case ENET_RDAR:
885 if (s->regs[ENET_ECR] & ENET_ECR_ETHEREN) {
886 if (!s->regs[index]) {
887 imx_eth_enable_rx(s, true);
888 }
889 } else {
890 s->regs[index] = 0;
891 }
892 break;
893 case ENET_TDAR1:
894 case ENET_TDAR2:
895 if (unlikely(single_tx_ring)) {
896 qemu_log_mask(LOG_GUEST_ERROR,
897 "[%s]%s: trying to access TDAR2 or TDAR1\n",
898 TYPE_IMX_FEC, __func__);
899 return;
900 }
901
902 case ENET_TDAR:
903 if (s->regs[ENET_ECR] & ENET_ECR_ETHEREN) {
904 s->regs[index] = ENET_TDAR_TDAR;
905 imx_eth_do_tx(s, index);
906 }
907 s->regs[index] = 0;
908 break;
909 case ENET_ECR:
910 if (value & ENET_ECR_RESET) {
911 return imx_eth_reset(DEVICE(s));
912 }
913 s->regs[index] = value;
914 if ((s->regs[index] & ENET_ECR_ETHEREN) == 0) {
915 s->regs[ENET_RDAR] = 0;
916 s->rx_descriptor = s->regs[ENET_RDSR];
917 s->regs[ENET_TDAR] = 0;
918 s->regs[ENET_TDAR1] = 0;
919 s->regs[ENET_TDAR2] = 0;
920 s->tx_descriptor[0] = s->regs[ENET_TDSR];
921 s->tx_descriptor[1] = s->regs[ENET_TDSR1];
922 s->tx_descriptor[2] = s->regs[ENET_TDSR2];
923 }
924 break;
925 case ENET_MMFR:
926 s->regs[index] = value;
927 if (extract32(value, 29, 1)) {
928
929 s->regs[ENET_MMFR] = deposit32(s->regs[ENET_MMFR], 0, 16,
930 imx_phy_read(s,
931 extract32(value,
932 18, 10)));
933 } else {
934
935 imx_phy_write(s, extract32(value, 18, 10), extract32(value, 0, 16));
936 }
937
938 s->regs[ENET_EIR] |= ENET_INT_MII;
939 break;
940 case ENET_MSCR:
941 s->regs[index] = value & 0xfe;
942 break;
943 case ENET_MIBC:
944
945 s->regs[index] = (value & 0x80000000) ? 0xc0000000 : 0;
946 break;
947 case ENET_RCR:
948 s->regs[index] = value & 0x07ff003f;
949
950 break;
951 case ENET_TCR:
952
953 s->regs[index] = value;
954 if (value & 1) {
955 s->regs[ENET_EIR] |= ENET_INT_GRA;
956 }
957 break;
958 case ENET_PALR:
959 s->regs[index] = value;
960 s->conf.macaddr.a[0] = value >> 24;
961 s->conf.macaddr.a[1] = value >> 16;
962 s->conf.macaddr.a[2] = value >> 8;
963 s->conf.macaddr.a[3] = value;
964 break;
965 case ENET_PAUR:
966 s->regs[index] = (value | 0x0000ffff) & 0xffff8808;
967 s->conf.macaddr.a[4] = value >> 24;
968 s->conf.macaddr.a[5] = value >> 16;
969 break;
970 case ENET_OPD:
971 s->regs[index] = (value & 0x0000ffff) | 0x00010000;
972 break;
973 case ENET_IAUR:
974 case ENET_IALR:
975 case ENET_GAUR:
976 case ENET_GALR:
977
978 break;
979 case ENET_TFWR:
980 if (s->is_fec) {
981 s->regs[index] = value & 0x3;
982 } else {
983 s->regs[index] = value & 0x13f;
984 }
985 break;
986 case ENET_RDSR:
987 if (s->is_fec) {
988 s->regs[index] = value & ~3;
989 } else {
990 s->regs[index] = value & ~7;
991 }
992 s->rx_descriptor = s->regs[index];
993 break;
994 case ENET_TDSR:
995 if (s->is_fec) {
996 s->regs[index] = value & ~3;
997 } else {
998 s->regs[index] = value & ~7;
999 }
1000 s->tx_descriptor[0] = s->regs[index];
1001 break;
1002 case ENET_TDSR1:
1003 if (unlikely(single_tx_ring)) {
1004 qemu_log_mask(LOG_GUEST_ERROR,
1005 "[%s]%s: trying to access TDSR1\n",
1006 TYPE_IMX_FEC, __func__);
1007 return;
1008 }
1009
1010 s->regs[index] = value & ~7;
1011 s->tx_descriptor[1] = s->regs[index];
1012 break;
1013 case ENET_TDSR2:
1014 if (unlikely(single_tx_ring)) {
1015 qemu_log_mask(LOG_GUEST_ERROR,
1016 "[%s]%s: trying to access TDSR2\n",
1017 TYPE_IMX_FEC, __func__);
1018 return;
1019 }
1020
1021 s->regs[index] = value & ~7;
1022 s->tx_descriptor[2] = s->regs[index];
1023 break;
1024 case ENET_MRBR:
1025 s->regs[index] = value & 0x00003ff0;
1026 break;
1027 default:
1028 if (s->is_fec) {
1029 imx_fec_write(s, index, value);
1030 } else {
1031 imx_enet_write(s, index, value);
1032 }
1033 return;
1034 }
1035
1036 imx_eth_update(s);
1037}
1038
1039static bool imx_eth_can_receive(NetClientState *nc)
1040{
1041 IMXFECState *s = IMX_FEC(qemu_get_nic_opaque(nc));
1042
1043 return !!s->regs[ENET_RDAR];
1044}
1045
1046static ssize_t imx_fec_receive(NetClientState *nc, const uint8_t *buf,
1047 size_t len)
1048{
1049 IMXFECState *s = IMX_FEC(qemu_get_nic_opaque(nc));
1050 IMXFECBufDesc bd;
1051 uint32_t flags = 0;
1052 uint32_t addr;
1053 uint32_t crc;
1054 uint32_t buf_addr;
1055 uint8_t *crc_ptr;
1056 unsigned int buf_len;
1057 size_t size = len;
1058
1059 trace_imx_fec_receive(size);
1060
1061 if (!s->regs[ENET_RDAR]) {
1062 qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Unexpected packet\n",
1063 TYPE_IMX_FEC, __func__);
1064 return 0;
1065 }
1066
1067
1068 size += 4;
1069 crc = cpu_to_be32(crc32(~0, buf, size));
1070 crc_ptr = (uint8_t *) &crc;
1071
1072
1073 if (size > ENET_MAX_FRAME_SIZE) {
1074 size = ENET_MAX_FRAME_SIZE;
1075 flags |= ENET_BD_TR | ENET_BD_LG;
1076 }
1077
1078
1079 if (size > (s->regs[ENET_RCR] >> 16)) {
1080 flags |= ENET_BD_LG;
1081 }
1082
1083 addr = s->rx_descriptor;
1084 while (size > 0) {
1085 imx_fec_read_bd(&bd, addr);
1086 if ((bd.flags & ENET_BD_E) == 0) {
1087
1088
1089
1090
1091
1092
1093 qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Lost end of frame\n",
1094 TYPE_IMX_FEC, __func__);
1095 break;
1096 }
1097 buf_len = (size <= s->regs[ENET_MRBR]) ? size : s->regs[ENET_MRBR];
1098 bd.length = buf_len;
1099 size -= buf_len;
1100
1101 trace_imx_fec_receive_len(addr, bd.length);
1102
1103
1104 if (size < 4) {
1105 buf_len += size - 4;
1106 }
1107 buf_addr = bd.data;
1108 dma_memory_write(&address_space_memory, buf_addr, buf, buf_len);
1109 buf += buf_len;
1110 if (size < 4) {
1111 dma_memory_write(&address_space_memory, buf_addr + buf_len,
1112 crc_ptr, 4 - size);
1113 crc_ptr += 4 - size;
1114 }
1115 bd.flags &= ~ENET_BD_E;
1116 if (size == 0) {
1117
1118 bd.flags |= flags | ENET_BD_L;
1119
1120 trace_imx_fec_receive_last(bd.flags);
1121
1122 s->regs[ENET_EIR] |= ENET_INT_RXF;
1123 } else {
1124 s->regs[ENET_EIR] |= ENET_INT_RXB;
1125 }
1126 imx_fec_write_bd(&bd, addr);
1127
1128 if ((bd.flags & ENET_BD_W) != 0) {
1129 addr = s->regs[ENET_RDSR];
1130 } else {
1131 addr += sizeof(bd);
1132 }
1133 }
1134 s->rx_descriptor = addr;
1135 imx_eth_enable_rx(s, false);
1136 imx_eth_update(s);
1137 return len;
1138}
1139
1140static ssize_t imx_enet_receive(NetClientState *nc, const uint8_t *buf,
1141 size_t len)
1142{
1143 IMXFECState *s = IMX_FEC(qemu_get_nic_opaque(nc));
1144 IMXENETBufDesc bd;
1145 uint32_t flags = 0;
1146 uint32_t addr;
1147 uint32_t crc;
1148 uint32_t buf_addr;
1149 uint8_t *crc_ptr;
1150 unsigned int buf_len;
1151 size_t size = len;
1152 bool shift16 = s->regs[ENET_RACC] & ENET_RACC_SHIFT16;
1153
1154 trace_imx_enet_receive(size);
1155
1156 if (!s->regs[ENET_RDAR]) {
1157 qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Unexpected packet\n",
1158 TYPE_IMX_FEC, __func__);
1159 return 0;
1160 }
1161
1162
1163 size += 4;
1164 crc = cpu_to_be32(crc32(~0, buf, size));
1165 crc_ptr = (uint8_t *) &crc;
1166
1167 if (shift16) {
1168 size += 2;
1169 }
1170
1171
1172 if (size > s->regs[ENET_FTRL]) {
1173 size = s->regs[ENET_FTRL];
1174 flags |= ENET_BD_TR | ENET_BD_LG;
1175 }
1176
1177
1178 if (size > (s->regs[ENET_RCR] >> 16)) {
1179 flags |= ENET_BD_LG;
1180 }
1181
1182 addr = s->rx_descriptor;
1183 while (size > 0) {
1184 imx_enet_read_bd(&bd, addr);
1185 if ((bd.flags & ENET_BD_E) == 0) {
1186
1187
1188
1189
1190
1191
1192 qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Lost end of frame\n",
1193 TYPE_IMX_FEC, __func__);
1194 break;
1195 }
1196 buf_len = MIN(size, s->regs[ENET_MRBR]);
1197 bd.length = buf_len;
1198 size -= buf_len;
1199
1200 trace_imx_enet_receive_len(addr, bd.length);
1201
1202
1203 if (size < 4) {
1204 buf_len += size - 4;
1205 }
1206 buf_addr = bd.data;
1207
1208 if (shift16) {
1209
1210
1211
1212
1213 const uint8_t zeros[2] = { 0 };
1214
1215 dma_memory_write(&address_space_memory, buf_addr,
1216 zeros, sizeof(zeros));
1217
1218 buf_addr += sizeof(zeros);
1219 buf_len -= sizeof(zeros);
1220
1221
1222 shift16 = false;
1223 }
1224
1225 dma_memory_write(&address_space_memory, buf_addr, buf, buf_len);
1226 buf += buf_len;
1227 if (size < 4) {
1228 dma_memory_write(&address_space_memory, buf_addr + buf_len,
1229 crc_ptr, 4 - size);
1230 crc_ptr += 4 - size;
1231 }
1232 bd.flags &= ~ENET_BD_E;
1233 if (size == 0) {
1234
1235 bd.flags |= flags | ENET_BD_L;
1236
1237 trace_imx_enet_receive_last(bd.flags);
1238
1239
1240 bd.last_buffer = ENET_BD_BDU;
1241 if (bd.option & ENET_BD_RX_INT) {
1242 s->regs[ENET_EIR] |= ENET_INT_RXF;
1243 }
1244 } else {
1245 if (bd.option & ENET_BD_RX_INT) {
1246 s->regs[ENET_EIR] |= ENET_INT_RXB;
1247 }
1248 }
1249 imx_enet_write_bd(&bd, addr);
1250
1251 if ((bd.flags & ENET_BD_W) != 0) {
1252 addr = s->regs[ENET_RDSR];
1253 } else {
1254 addr += sizeof(bd);
1255 }
1256 }
1257 s->rx_descriptor = addr;
1258 imx_eth_enable_rx(s, false);
1259 imx_eth_update(s);
1260 return len;
1261}
1262
1263static ssize_t imx_eth_receive(NetClientState *nc, const uint8_t *buf,
1264 size_t len)
1265{
1266 IMXFECState *s = IMX_FEC(qemu_get_nic_opaque(nc));
1267
1268 if (!s->is_fec && (s->regs[ENET_ECR] & ENET_ECR_EN1588)) {
1269 return imx_enet_receive(nc, buf, len);
1270 } else {
1271 return imx_fec_receive(nc, buf, len);
1272 }
1273}
1274
1275static const MemoryRegionOps imx_eth_ops = {
1276 .read = imx_eth_read,
1277 .write = imx_eth_write,
1278 .valid.min_access_size = 4,
1279 .valid.max_access_size = 4,
1280 .endianness = DEVICE_NATIVE_ENDIAN,
1281};
1282
1283static void imx_eth_cleanup(NetClientState *nc)
1284{
1285 IMXFECState *s = IMX_FEC(qemu_get_nic_opaque(nc));
1286
1287 s->nic = NULL;
1288}
1289
1290static NetClientInfo imx_eth_net_info = {
1291 .type = NET_CLIENT_DRIVER_NIC,
1292 .size = sizeof(NICState),
1293 .can_receive = imx_eth_can_receive,
1294 .receive = imx_eth_receive,
1295 .cleanup = imx_eth_cleanup,
1296 .link_status_changed = imx_eth_set_link,
1297};
1298
1299
1300static void imx_eth_realize(DeviceState *dev, Error **errp)
1301{
1302 IMXFECState *s = IMX_FEC(dev);
1303 SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
1304
1305 memory_region_init_io(&s->iomem, OBJECT(dev), &imx_eth_ops, s,
1306 TYPE_IMX_FEC, FSL_IMX25_FEC_SIZE);
1307 sysbus_init_mmio(sbd, &s->iomem);
1308 sysbus_init_irq(sbd, &s->irq[0]);
1309 sysbus_init_irq(sbd, &s->irq[1]);
1310
1311 qemu_macaddr_default_if_unset(&s->conf.macaddr);
1312
1313 s->nic = qemu_new_nic(&imx_eth_net_info, &s->conf,
1314 object_get_typename(OBJECT(dev)),
1315 dev->id, s);
1316
1317 qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a);
1318}
1319
1320static Property imx_eth_properties[] = {
1321 DEFINE_NIC_PROPERTIES(IMXFECState, conf),
1322 DEFINE_PROP_UINT32("tx-ring-num", IMXFECState, tx_ring_num, 1),
1323 DEFINE_PROP_UINT32("phy-num", IMXFECState, phy_num, 0),
1324 DEFINE_PROP_END_OF_LIST(),
1325};
1326
1327static void imx_eth_class_init(ObjectClass *klass, void *data)
1328{
1329 DeviceClass *dc = DEVICE_CLASS(klass);
1330
1331 dc->vmsd = &vmstate_imx_eth;
1332 dc->reset = imx_eth_reset;
1333 device_class_set_props(dc, imx_eth_properties);
1334 dc->realize = imx_eth_realize;
1335 dc->desc = "i.MX FEC/ENET Ethernet Controller";
1336}
1337
1338static void imx_fec_init(Object *obj)
1339{
1340 IMXFECState *s = IMX_FEC(obj);
1341
1342 s->is_fec = true;
1343}
1344
1345static void imx_enet_init(Object *obj)
1346{
1347 IMXFECState *s = IMX_FEC(obj);
1348
1349 s->is_fec = false;
1350}
1351
1352static const TypeInfo imx_fec_info = {
1353 .name = TYPE_IMX_FEC,
1354 .parent = TYPE_SYS_BUS_DEVICE,
1355 .instance_size = sizeof(IMXFECState),
1356 .instance_init = imx_fec_init,
1357 .class_init = imx_eth_class_init,
1358};
1359
1360static const TypeInfo imx_enet_info = {
1361 .name = TYPE_IMX_ENET,
1362 .parent = TYPE_IMX_FEC,
1363 .instance_init = imx_enet_init,
1364};
1365
1366static void imx_eth_register_types(void)
1367{
1368 type_register_static(&imx_fec_info);
1369 type_register_static(&imx_enet_info);
1370}
1371
1372type_init(imx_eth_register_types)
1373