1
2
3
4
5
6
7
8
9
10#include "qemu/osdep.h"
11#include "hw/pci/pci.h"
12#include "qemu/log.h"
13#include "qemu/module.h"
14#include "net/net.h"
15#include "net/eth.h"
16#include "net/checksum.h"
17#include "hw/net/mii.h"
18#include "sysemu/sysemu.h"
19#include "trace.h"
20
21#define TYPE_SUNGEM "sungem"
22
23#define SUNGEM(obj) OBJECT_CHECK(SunGEMState, (obj), TYPE_SUNGEM)
24
25#define MAX_PACKET_SIZE 9016
26
27#define SUNGEM_MMIO_SIZE 0x200000
28
29
30#define SUNGEM_MMIO_GREG_SIZE 0x2000
31
32#define GREG_SEBSTATE 0x0000UL
33
34#define GREG_STAT 0x000CUL
35#define GREG_STAT_TXINTME 0x00000001
36#define GREG_STAT_TXALL 0x00000002
37#define GREG_STAT_TXDONE 0x00000004
38#define GREG_STAT_RXDONE 0x00000010
39#define GREG_STAT_RXNOBUF 0x00000020
40#define GREG_STAT_RXTAGERR 0x00000040
41#define GREG_STAT_TXMAC 0x00004000
42#define GREG_STAT_RXMAC 0x00008000
43#define GREG_STAT_MAC 0x00010000
44#define GREG_STAT_TXNR 0xfff80000
45#define GREG_STAT_TXNR_SHIFT 19
46
47
48
49
50
51#define GREG_STAT_LATCH (GREG_STAT_TXALL | GREG_STAT_TXINTME | \
52 GREG_STAT_RXDONE | GREG_STAT_RXDONE | \
53 GREG_STAT_RXNOBUF | GREG_STAT_RXTAGERR)
54
55#define GREG_IMASK 0x0010UL
56#define GREG_IACK 0x0014UL
57#define GREG_STAT2 0x001CUL
58#define GREG_PCIESTAT 0x1000UL
59#define GREG_PCIEMASK 0x1004UL
60
61#define GREG_SWRST 0x1010UL
62#define GREG_SWRST_TXRST 0x00000001
63#define GREG_SWRST_RXRST 0x00000002
64#define GREG_SWRST_RSTOUT 0x00000004
65
66
67#define SUNGEM_MMIO_TXDMA_SIZE 0x1000
68
69#define TXDMA_KICK 0x0000UL
70
71#define TXDMA_CFG 0x0004UL
72#define TXDMA_CFG_ENABLE 0x00000001
73#define TXDMA_CFG_RINGSZ 0x0000001e
74
75#define TXDMA_DBLOW 0x0008UL
76#define TXDMA_DBHI 0x000CUL
77#define TXDMA_PCNT 0x0024UL
78#define TXDMA_SMACHINE 0x0028UL
79#define TXDMA_DPLOW 0x0030UL
80#define TXDMA_DPHI 0x0034UL
81#define TXDMA_TXDONE 0x0100UL
82#define TXDMA_FTAG 0x0108UL
83#define TXDMA_FSZ 0x0118UL
84
85
86#define SUNGEM_MMIO_RXDMA_SIZE 0x2000
87
88#define RXDMA_CFG 0x0000UL
89#define RXDMA_CFG_ENABLE 0x00000001
90#define RXDMA_CFG_RINGSZ 0x0000001e
91#define RXDMA_CFG_FBOFF 0x00001c00
92#define RXDMA_CFG_CSUMOFF 0x000fe000
93
94#define RXDMA_DBLOW 0x0004UL
95#define RXDMA_DBHI 0x0008UL
96#define RXDMA_PCNT 0x0018UL
97#define RXDMA_SMACHINE 0x001CUL
98#define RXDMA_PTHRESH 0x0020UL
99#define RXDMA_DPLOW 0x0024UL
100#define RXDMA_DPHI 0x0028UL
101#define RXDMA_KICK 0x0100UL
102#define RXDMA_DONE 0x0104UL
103#define RXDMA_BLANK 0x0108UL
104#define RXDMA_FTAG 0x0110UL
105#define RXDMA_FSZ 0x0120UL
106
107
108#define SUNGEM_MMIO_MAC_SIZE 0x200
109
110#define MAC_TXRST 0x0000UL
111#define MAC_RXRST 0x0004UL
112#define MAC_TXSTAT 0x0010UL
113#define MAC_RXSTAT 0x0014UL
114
115#define MAC_CSTAT 0x0018UL
116#define MAC_CSTAT_PTR 0xffff0000
117
118#define MAC_TXMASK 0x0020UL
119#define MAC_RXMASK 0x0024UL
120#define MAC_MCMASK 0x0028UL
121
122#define MAC_TXCFG 0x0030UL
123#define MAC_TXCFG_ENAB 0x00000001
124
125#define MAC_RXCFG 0x0034UL
126#define MAC_RXCFG_ENAB 0x00000001
127#define MAC_RXCFG_SFCS 0x00000004
128#define MAC_RXCFG_PROM 0x00000008
129#define MAC_RXCFG_PGRP 0x00000010
130#define MAC_RXCFG_HFE 0x00000020
131
132#define MAC_XIFCFG 0x003CUL
133#define MAC_XIFCFG_LBCK 0x00000002
134
135#define MAC_MINFSZ 0x0050UL
136#define MAC_MAXFSZ 0x0054UL
137#define MAC_ADDR0 0x0080UL
138#define MAC_ADDR1 0x0084UL
139#define MAC_ADDR2 0x0088UL
140#define MAC_ADDR3 0x008CUL
141#define MAC_ADDR4 0x0090UL
142#define MAC_ADDR5 0x0094UL
143#define MAC_HASH0 0x00C0UL
144#define MAC_PATMPS 0x0114UL
145#define MAC_SMACHINE 0x0134UL
146
147
148#define SUNGEM_MMIO_MIF_SIZE 0x20
149
150#define MIF_FRAME 0x000CUL
151#define MIF_FRAME_OP 0x30000000
152#define MIF_FRAME_PHYAD 0x0f800000
153#define MIF_FRAME_REGAD 0x007c0000
154#define MIF_FRAME_TALSB 0x00010000
155#define MIF_FRAME_DATA 0x0000ffff
156
157#define MIF_CFG 0x0010UL
158#define MIF_CFG_MDI0 0x00000100
159#define MIF_CFG_MDI1 0x00000200
160
161#define MIF_STATUS 0x0018UL
162#define MIF_SMACHINE 0x001CUL
163
164
165#define SUNGEM_MMIO_PCS_SIZE 0x60
166#define PCS_MIISTAT 0x0004UL
167#define PCS_ISTAT 0x0018UL
168#define PCS_SSTATE 0x005CUL
169
170
171struct gem_txd {
172 uint64_t control_word;
173 uint64_t buffer;
174};
175
176#define TXDCTRL_BUFSZ 0x0000000000007fffULL
177#define TXDCTRL_CSTART 0x00000000001f8000ULL
178#define TXDCTRL_COFF 0x000000001fe00000ULL
179#define TXDCTRL_CENAB 0x0000000020000000ULL
180#define TXDCTRL_EOF 0x0000000040000000ULL
181#define TXDCTRL_SOF 0x0000000080000000ULL
182#define TXDCTRL_INTME 0x0000000100000000ULL
183
184struct gem_rxd {
185 uint64_t status_word;
186 uint64_t buffer;
187};
188
189#define RXDCTRL_HPASS 0x1000000000000000ULL
190#define RXDCTRL_ALTMAC 0x2000000000000000ULL
191
192
193typedef struct {
194 PCIDevice pdev;
195
196 MemoryRegion sungem;
197 MemoryRegion greg;
198 MemoryRegion txdma;
199 MemoryRegion rxdma;
200 MemoryRegion mac;
201 MemoryRegion mif;
202 MemoryRegion pcs;
203 NICState *nic;
204 NICConf conf;
205 uint32_t phy_addr;
206
207 uint32_t gregs[SUNGEM_MMIO_GREG_SIZE >> 2];
208 uint32_t txdmaregs[SUNGEM_MMIO_TXDMA_SIZE >> 2];
209 uint32_t rxdmaregs[SUNGEM_MMIO_RXDMA_SIZE >> 2];
210 uint32_t macregs[SUNGEM_MMIO_MAC_SIZE >> 2];
211 uint32_t mifregs[SUNGEM_MMIO_MIF_SIZE >> 2];
212 uint32_t pcsregs[SUNGEM_MMIO_PCS_SIZE >> 2];
213
214
215 uint32_t rx_mask;
216 uint32_t tx_mask;
217
218
219 uint8_t tx_data[MAX_PACKET_SIZE];
220 uint32_t tx_size;
221 uint64_t tx_first_ctl;
222} SunGEMState;
223
224
225static void sungem_eval_irq(SunGEMState *s)
226{
227 uint32_t stat, mask;
228
229 mask = s->gregs[GREG_IMASK >> 2];
230 stat = s->gregs[GREG_STAT >> 2] & ~GREG_STAT_TXNR;
231 if (stat & ~mask) {
232 pci_set_irq(PCI_DEVICE(s), 1);
233 } else {
234 pci_set_irq(PCI_DEVICE(s), 0);
235 }
236}
237
238static void sungem_update_status(SunGEMState *s, uint32_t bits, bool val)
239{
240 uint32_t stat;
241
242 stat = s->gregs[GREG_STAT >> 2];
243 if (val) {
244 stat |= bits;
245 } else {
246 stat &= ~bits;
247 }
248 s->gregs[GREG_STAT >> 2] = stat;
249 sungem_eval_irq(s);
250}
251
252static void sungem_eval_cascade_irq(SunGEMState *s)
253{
254 uint32_t stat, mask;
255
256 mask = s->macregs[MAC_TXSTAT >> 2];
257 stat = s->macregs[MAC_TXMASK >> 2];
258 if (stat & ~mask) {
259 sungem_update_status(s, GREG_STAT_TXMAC, true);
260 } else {
261 sungem_update_status(s, GREG_STAT_TXMAC, false);
262 }
263
264 mask = s->macregs[MAC_RXSTAT >> 2];
265 stat = s->macregs[MAC_RXMASK >> 2];
266 if (stat & ~mask) {
267 sungem_update_status(s, GREG_STAT_RXMAC, true);
268 } else {
269 sungem_update_status(s, GREG_STAT_RXMAC, false);
270 }
271
272 mask = s->macregs[MAC_CSTAT >> 2];
273 stat = s->macregs[MAC_MCMASK >> 2] & ~MAC_CSTAT_PTR;
274 if (stat & ~mask) {
275 sungem_update_status(s, GREG_STAT_MAC, true);
276 } else {
277 sungem_update_status(s, GREG_STAT_MAC, false);
278 }
279}
280
281static void sungem_do_tx_csum(SunGEMState *s)
282{
283 uint16_t start, off;
284 uint32_t csum;
285
286 start = (s->tx_first_ctl & TXDCTRL_CSTART) >> 15;
287 off = (s->tx_first_ctl & TXDCTRL_COFF) >> 21;
288
289 trace_sungem_tx_checksum(start, off);
290
291 if (start > (s->tx_size - 2) || off > (s->tx_size - 2)) {
292 trace_sungem_tx_checksum_oob();
293 return;
294 }
295
296 csum = net_raw_checksum(s->tx_data + start, s->tx_size - start);
297 stw_be_p(s->tx_data + off, csum);
298}
299
300static void sungem_send_packet(SunGEMState *s, const uint8_t *buf,
301 int size)
302{
303 NetClientState *nc = qemu_get_queue(s->nic);
304
305 if (s->macregs[MAC_XIFCFG >> 2] & MAC_XIFCFG_LBCK) {
306 nc->info->receive(nc, buf, size);
307 } else {
308 qemu_send_packet(nc, buf, size);
309 }
310}
311
312static void sungem_process_tx_desc(SunGEMState *s, struct gem_txd *desc)
313{
314 PCIDevice *d = PCI_DEVICE(s);
315 uint32_t len;
316
317
318
319
320
321 if (desc->control_word & TXDCTRL_SOF) {
322 if (s->tx_first_ctl) {
323 trace_sungem_tx_unfinished();
324 }
325 s->tx_size = 0;
326 s->tx_first_ctl = desc->control_word;
327 }
328
329
330 len = desc->control_word & TXDCTRL_BUFSZ;
331
332
333 if ((s->tx_size + len) > MAX_PACKET_SIZE) {
334 trace_sungem_tx_overflow();
335 len = MAX_PACKET_SIZE - s->tx_size;
336 }
337
338
339 pci_dma_read(d, desc->buffer, &s->tx_data[s->tx_size], len);
340 s->tx_size += len;
341
342
343 if (desc->control_word & TXDCTRL_EOF) {
344 trace_sungem_tx_finished(s->tx_size);
345
346
347 if (s->tx_first_ctl & TXDCTRL_CENAB) {
348 sungem_do_tx_csum(s);
349 }
350
351
352 sungem_send_packet(s, s->tx_data, s->tx_size);
353
354
355 s->tx_size = 0;
356 s->tx_first_ctl = 0;
357 }
358}
359
360static void sungem_tx_kick(SunGEMState *s)
361{
362 PCIDevice *d = PCI_DEVICE(s);
363 uint32_t comp, kick;
364 uint32_t txdma_cfg, txmac_cfg, ints;
365 uint64_t dbase;
366
367 trace_sungem_tx_kick();
368
369
370
371
372
373
374
375
376 txdma_cfg = s->txdmaregs[TXDMA_CFG >> 2];
377 txmac_cfg = s->macregs[MAC_TXCFG >> 2];
378 if (!(txdma_cfg & TXDMA_CFG_ENABLE) ||
379 !(txmac_cfg & MAC_TXCFG_ENAB)) {
380 trace_sungem_tx_disabled();
381 return;
382 }
383
384
385
386
387 dbase = s->txdmaregs[TXDMA_DBHI >> 2];
388 dbase = (dbase << 32) | s->txdmaregs[TXDMA_DBLOW >> 2];
389
390 comp = s->txdmaregs[TXDMA_TXDONE >> 2] & s->tx_mask;
391 kick = s->txdmaregs[TXDMA_KICK >> 2] & s->tx_mask;
392
393 trace_sungem_tx_process(comp, kick, s->tx_mask + 1);
394
395
396
397
398
399 while (comp != kick) {
400 struct gem_txd desc;
401
402
403 pci_dma_read(d, dbase + comp * sizeof(desc), &desc, sizeof(desc));
404
405
406 desc.control_word = le64_to_cpu(desc.control_word);
407 desc.buffer = le64_to_cpu(desc.buffer);
408 trace_sungem_tx_desc(comp, desc.control_word, desc.buffer);
409
410
411 sungem_process_tx_desc(s, &desc);
412
413
414 ints = GREG_STAT_TXDONE;
415 if (desc.control_word & TXDCTRL_INTME) {
416 ints |= GREG_STAT_TXINTME;
417 }
418 sungem_update_status(s, ints, true);
419
420
421 comp = (comp + 1) & s->tx_mask;
422 s->txdmaregs[TXDMA_TXDONE >> 2] = comp;
423 }
424
425
426 sungem_update_status(s, GREG_STAT_TXALL, true);
427}
428
429static bool sungem_rx_full(SunGEMState *s, uint32_t kick, uint32_t done)
430{
431 return kick == ((done + 1) & s->rx_mask);
432}
433
434static int sungem_can_receive(NetClientState *nc)
435{
436 SunGEMState *s = qemu_get_nic_opaque(nc);
437 uint32_t kick, done, rxdma_cfg, rxmac_cfg;
438 bool full;
439
440 rxmac_cfg = s->macregs[MAC_RXCFG >> 2];
441 rxdma_cfg = s->rxdmaregs[RXDMA_CFG >> 2];
442
443
444 if ((rxmac_cfg & MAC_RXCFG_ENAB) == 0) {
445 trace_sungem_rx_mac_disabled();
446 return 0;
447 }
448 if ((rxdma_cfg & RXDMA_CFG_ENABLE) == 0) {
449 trace_sungem_rx_txdma_disabled();
450 return 0;
451 }
452
453
454 kick = s->rxdmaregs[RXDMA_KICK >> 2];
455 done = s->rxdmaregs[RXDMA_DONE >> 2];
456 full = sungem_rx_full(s, kick, done);
457
458 trace_sungem_rx_check(!full, kick, done);
459
460 return !full;
461}
462
463enum {
464 rx_no_match,
465 rx_match_promisc,
466 rx_match_bcast,
467 rx_match_allmcast,
468 rx_match_mcast,
469 rx_match_mac,
470 rx_match_altmac,
471};
472
473static int sungem_check_rx_mac(SunGEMState *s, const uint8_t *mac, uint32_t crc)
474{
475 uint32_t rxcfg = s->macregs[MAC_RXCFG >> 2];
476 uint32_t mac0, mac1, mac2;
477
478
479 if (rxcfg & MAC_RXCFG_PROM) {
480 return rx_match_promisc;
481 }
482
483
484 mac0 = (mac[4] << 8) | mac[5];
485 mac1 = (mac[2] << 8) | mac[3];
486 mac2 = (mac[0] << 8) | mac[1];
487
488 trace_sungem_rx_mac_check(mac0, mac1, mac2);
489
490
491 if (mac0 == 0xffff && mac1 == 0xffff && mac2 == 0xffff) {
492 return rx_match_bcast;
493 }
494
495
496
497
498 if (mac[0] & 1) {
499 trace_sungem_rx_mac_multicast();
500
501
502 if (rxcfg & MAC_RXCFG_PGRP) {
503 return rx_match_allmcast;
504 }
505
506
507
508
509 if (rxcfg & MAC_RXCFG_HFE) {
510 uint32_t hash, idx;
511
512 crc >>= 24;
513 idx = (crc >> 2) & 0x3c;
514 hash = s->macregs[(MAC_HASH0 + idx) >> 2];
515 if (hash & (1 << (15 - (crc & 0xf)))) {
516 return rx_match_mcast;
517 }
518 }
519 return rx_no_match;
520 }
521
522
523 trace_sungem_rx_mac_compare(s->macregs[MAC_ADDR0 >> 2],
524 s->macregs[MAC_ADDR1 >> 2],
525 s->macregs[MAC_ADDR2 >> 2]);
526
527 if (mac0 == s->macregs[MAC_ADDR0 >> 2] &&
528 mac1 == s->macregs[MAC_ADDR1 >> 2] &&
529 mac2 == s->macregs[MAC_ADDR2 >> 2]) {
530 return rx_match_mac;
531 }
532
533
534 if (mac0 == s->macregs[MAC_ADDR3 >> 2] &&
535 mac1 == s->macregs[MAC_ADDR4 >> 2] &&
536 mac2 == s->macregs[MAC_ADDR5 >> 2]) {
537 return rx_match_altmac;
538 }
539
540 return rx_no_match;
541}
542
543static ssize_t sungem_receive(NetClientState *nc, const uint8_t *buf,
544 size_t size)
545{
546 SunGEMState *s = qemu_get_nic_opaque(nc);
547 PCIDevice *d = PCI_DEVICE(s);
548 uint32_t mac_crc, done, kick, max_fsize;
549 uint32_t fcs_size, ints, rxdma_cfg, rxmac_cfg, csum, coff;
550 uint8_t smallbuf[60];
551 struct gem_rxd desc;
552 uint64_t dbase, baddr;
553 unsigned int rx_cond;
554
555 trace_sungem_rx_packet(size);
556
557 rxmac_cfg = s->macregs[MAC_RXCFG >> 2];
558 rxdma_cfg = s->rxdmaregs[RXDMA_CFG >> 2];
559 max_fsize = s->macregs[MAC_MAXFSZ >> 2] & 0x7fff;
560
561
562 if (!(rxdma_cfg & RXDMA_CFG_ENABLE) ||
563 !(rxmac_cfg & MAC_RXCFG_ENAB)) {
564 trace_sungem_rx_disabled();
565 return 0;
566 }
567
568
569 if (rxmac_cfg & MAC_RXCFG_SFCS) {
570 fcs_size = 0;
571 } else {
572 fcs_size = 4;
573 }
574
575
576
577
578 if (size < 6 || (size + 4) > max_fsize) {
579 trace_sungem_rx_bad_frame_size(size);
580
581 return size;
582 }
583
584
585
586
587
588
589
590 if (size < 60) {
591 memcpy(smallbuf, buf, size);
592 memset(&smallbuf[size], 0, 60 - size);
593 buf = smallbuf;
594 size = 60;
595 }
596
597
598 mac_crc = net_crc32_le(buf, ETH_ALEN);
599
600
601 rx_cond = sungem_check_rx_mac(s, buf, mac_crc);
602 if (rx_cond == rx_no_match) {
603
604 trace_sungem_rx_unmatched();
605 return size;
606 }
607
608
609 kick = s->rxdmaregs[RXDMA_KICK >> 2] & s->rx_mask;
610 done = s->rxdmaregs[RXDMA_DONE >> 2] & s->rx_mask;
611
612 trace_sungem_rx_process(done, kick, s->rx_mask + 1);
613
614
615 if (sungem_rx_full(s, kick, done)) {
616 trace_sungem_rx_ringfull();
617 return 0;
618 }
619
620
621
622
623
624
625 dbase = s->rxdmaregs[RXDMA_DBHI >> 2];
626 dbase = (dbase << 32) | s->rxdmaregs[RXDMA_DBLOW >> 2];
627
628
629 pci_dma_read(d, dbase + done * sizeof(desc), &desc, sizeof(desc));
630
631 trace_sungem_rx_desc(le64_to_cpu(desc.status_word),
632 le64_to_cpu(desc.buffer));
633
634
635 baddr = le64_to_cpu(desc.buffer) & ~7ull;
636 baddr |= (rxdma_cfg & RXDMA_CFG_FBOFF) >> 10;
637
638
639 pci_dma_write(d, baddr, buf, size);
640
641 if (fcs_size) {
642
643
644
645
646 }
647
648
649 coff = (rxdma_cfg & RXDMA_CFG_CSUMOFF) >> 13;
650 csum = net_raw_checksum((uint8_t *)buf + coff, size - coff);
651
652
653 desc.status_word = (size + fcs_size) << 16;
654 desc.status_word |= ((uint64_t)(mac_crc >> 16)) << 44;
655 desc.status_word |= csum;
656 if (rx_cond == rx_match_mcast) {
657 desc.status_word |= RXDCTRL_HPASS;
658 }
659 if (rx_cond == rx_match_altmac) {
660 desc.status_word |= RXDCTRL_ALTMAC;
661 }
662 desc.status_word = cpu_to_le64(desc.status_word);
663
664 pci_dma_write(d, dbase + done * sizeof(desc), &desc, sizeof(desc));
665
666 done = (done + 1) & s->rx_mask;
667 s->rxdmaregs[RXDMA_DONE >> 2] = done;
668
669
670
671
672
673 ints = GREG_STAT_RXDONE;
674 if (sungem_rx_full(s, kick, done)) {
675 ints |= GREG_STAT_RXNOBUF;
676 }
677 sungem_update_status(s, ints, true);
678
679 return size;
680}
681
682static void sungem_set_link_status(NetClientState *nc)
683{
684
685
686
687}
688
689static void sungem_update_masks(SunGEMState *s)
690{
691 uint32_t sz;
692
693 sz = 1 << (((s->rxdmaregs[RXDMA_CFG >> 2] & RXDMA_CFG_RINGSZ) >> 1) + 5);
694 s->rx_mask = sz - 1;
695
696 sz = 1 << (((s->txdmaregs[TXDMA_CFG >> 2] & TXDMA_CFG_RINGSZ) >> 1) + 5);
697 s->tx_mask = sz - 1;
698}
699
700static void sungem_reset_rx(SunGEMState *s)
701{
702 trace_sungem_rx_reset();
703
704
705
706 s->rxdmaregs[RXDMA_FSZ >> 2] = 0x140;
707 s->rxdmaregs[RXDMA_DONE >> 2] = 0;
708 s->rxdmaregs[RXDMA_KICK >> 2] = 0;
709 s->rxdmaregs[RXDMA_CFG >> 2] = 0x1000010;
710 s->rxdmaregs[RXDMA_PTHRESH >> 2] = 0xf8;
711 s->rxdmaregs[RXDMA_BLANK >> 2] = 0;
712
713 sungem_update_masks(s);
714}
715
716static void sungem_reset_tx(SunGEMState *s)
717{
718 trace_sungem_tx_reset();
719
720
721
722 s->txdmaregs[TXDMA_FSZ >> 2] = 0x90;
723 s->txdmaregs[TXDMA_TXDONE >> 2] = 0;
724 s->txdmaregs[TXDMA_KICK >> 2] = 0;
725 s->txdmaregs[TXDMA_CFG >> 2] = 0x118010;
726
727 sungem_update_masks(s);
728
729 s->tx_size = 0;
730 s->tx_first_ctl = 0;
731}
732
733static void sungem_reset_all(SunGEMState *s, bool pci_reset)
734{
735 trace_sungem_reset(pci_reset);
736
737 sungem_reset_rx(s);
738 sungem_reset_tx(s);
739
740 s->gregs[GREG_IMASK >> 2] = 0xFFFFFFF;
741 s->gregs[GREG_STAT >> 2] = 0;
742 if (pci_reset) {
743 uint8_t *ma = s->conf.macaddr.a;
744
745 s->gregs[GREG_SWRST >> 2] = 0;
746 s->macregs[MAC_ADDR0 >> 2] = (ma[4] << 8) | ma[5];
747 s->macregs[MAC_ADDR1 >> 2] = (ma[2] << 8) | ma[3];
748 s->macregs[MAC_ADDR2 >> 2] = (ma[0] << 8) | ma[1];
749 } else {
750 s->gregs[GREG_SWRST >> 2] &= GREG_SWRST_RSTOUT;
751 }
752 s->mifregs[MIF_CFG >> 2] = MIF_CFG_MDI0;
753}
754
755static void sungem_mii_write(SunGEMState *s, uint8_t phy_addr,
756 uint8_t reg_addr, uint16_t val)
757{
758 trace_sungem_mii_write(phy_addr, reg_addr, val);
759
760
761}
762
763static uint16_t __sungem_mii_read(SunGEMState *s, uint8_t phy_addr,
764 uint8_t reg_addr)
765{
766 if (phy_addr != s->phy_addr) {
767 return 0xffff;
768 }
769
770
771
772 switch (reg_addr) {
773 case MII_BMCR:
774 return 0;
775 case MII_PHYID1:
776 return 0x0040;
777 case MII_PHYID2:
778 return 0x6210;
779 case MII_BMSR:
780 if (qemu_get_queue(s->nic)->link_down) {
781 return MII_BMSR_100TX_FD | MII_BMSR_AUTONEG;
782 } else {
783 return MII_BMSR_100TX_FD | MII_BMSR_AN_COMP |
784 MII_BMSR_AUTONEG | MII_BMSR_LINK_ST;
785 }
786 case MII_ANLPAR:
787 case MII_ANAR:
788 return MII_ANLPAR_TXFD;
789 case 0x18:
790 return 3;
791 default:
792 return 0;
793 };
794}
795static uint16_t sungem_mii_read(SunGEMState *s, uint8_t phy_addr,
796 uint8_t reg_addr)
797{
798 uint16_t val;
799
800 val = __sungem_mii_read(s, phy_addr, reg_addr);
801
802 trace_sungem_mii_read(phy_addr, reg_addr, val);
803
804 return val;
805}
806
807static uint32_t sungem_mii_op(SunGEMState *s, uint32_t val)
808{
809 uint8_t phy_addr, reg_addr, op;
810
811
812 if ((val >> 30) != 1) {
813 trace_sungem_mii_invalid_sof(val >> 30);
814 return 0xffff;
815 }
816 phy_addr = (val & MIF_FRAME_PHYAD) >> 23;
817 reg_addr = (val & MIF_FRAME_REGAD) >> 18;
818 op = (val & MIF_FRAME_OP) >> 28;
819 switch (op) {
820 case 1:
821 sungem_mii_write(s, phy_addr, reg_addr, val & MIF_FRAME_DATA);
822 return val | MIF_FRAME_TALSB;
823 case 2:
824 return sungem_mii_read(s, phy_addr, reg_addr) | MIF_FRAME_TALSB;
825 default:
826 trace_sungem_mii_invalid_op(op);
827 }
828 return 0xffff | MIF_FRAME_TALSB;
829}
830
831static void sungem_mmio_greg_write(void *opaque, hwaddr addr, uint64_t val,
832 unsigned size)
833{
834 SunGEMState *s = opaque;
835
836 if (!(addr < 0x20) && !(addr >= 0x1000 && addr <= 0x1010)) {
837 qemu_log_mask(LOG_GUEST_ERROR,
838 "Write to unknown GREG register 0x%"HWADDR_PRIx"\n",
839 addr);
840 return;
841 }
842
843 trace_sungem_mmio_greg_write(addr, val);
844
845
846 switch (addr) {
847
848 case GREG_SEBSTATE:
849 case GREG_STAT:
850 case GREG_STAT2:
851 case GREG_PCIESTAT:
852 return;
853 case GREG_IACK:
854 val &= GREG_STAT_LATCH;
855 s->gregs[GREG_STAT >> 2] &= ~val;
856 sungem_eval_irq(s);
857 return;
858 case GREG_PCIEMASK:
859 val &= 0x7;
860 break;
861 }
862
863 s->gregs[addr >> 2] = val;
864
865
866 switch (addr) {
867 case GREG_IMASK:
868
869 sungem_eval_irq(s);
870 break;
871 case GREG_SWRST:
872 switch (val & (GREG_SWRST_TXRST | GREG_SWRST_RXRST)) {
873 case GREG_SWRST_RXRST:
874 sungem_reset_rx(s);
875 break;
876 case GREG_SWRST_TXRST:
877 sungem_reset_tx(s);
878 break;
879 case GREG_SWRST_RXRST | GREG_SWRST_TXRST:
880 sungem_reset_all(s, false);
881 }
882 break;
883 }
884}
885
886static uint64_t sungem_mmio_greg_read(void *opaque, hwaddr addr, unsigned size)
887{
888 SunGEMState *s = opaque;
889 uint32_t val;
890
891 if (!(addr < 0x20) && !(addr >= 0x1000 && addr <= 0x1010)) {
892 qemu_log_mask(LOG_GUEST_ERROR,
893 "Read from unknown GREG register 0x%"HWADDR_PRIx"\n",
894 addr);
895 return 0;
896 }
897
898 val = s->gregs[addr >> 2];
899
900 trace_sungem_mmio_greg_read(addr, val);
901
902 switch (addr) {
903 case GREG_STAT:
904
905 s->gregs[GREG_STAT >> 2] &= ~GREG_STAT_LATCH;
906 sungem_eval_irq(s);
907
908
909 val = (val & ~GREG_STAT_TXNR) |
910 (s->txdmaregs[TXDMA_TXDONE >> 2] << GREG_STAT_TXNR_SHIFT);
911 break;
912 case GREG_STAT2:
913
914
915
916 val = (s->gregs[GREG_STAT >> 2] & ~GREG_STAT_TXNR) |
917 (s->txdmaregs[TXDMA_TXDONE >> 2] << GREG_STAT_TXNR_SHIFT);
918 break;
919 }
920
921 return val;
922}
923
924static const MemoryRegionOps sungem_mmio_greg_ops = {
925 .read = sungem_mmio_greg_read,
926 .write = sungem_mmio_greg_write,
927 .endianness = DEVICE_LITTLE_ENDIAN,
928 .impl = {
929 .min_access_size = 4,
930 .max_access_size = 4,
931 },
932};
933
934static void sungem_mmio_txdma_write(void *opaque, hwaddr addr, uint64_t val,
935 unsigned size)
936{
937 SunGEMState *s = opaque;
938
939 if (!(addr < 0x38) && !(addr >= 0x100 && addr <= 0x118)) {
940 qemu_log_mask(LOG_GUEST_ERROR,
941 "Write to unknown TXDMA register 0x%"HWADDR_PRIx"\n",
942 addr);
943 return;
944 }
945
946 trace_sungem_mmio_txdma_write(addr, val);
947
948
949 switch (addr) {
950
951 case TXDMA_TXDONE:
952 case TXDMA_PCNT:
953 case TXDMA_SMACHINE:
954 case TXDMA_DPLOW:
955 case TXDMA_DPHI:
956 case TXDMA_FSZ:
957 case TXDMA_FTAG:
958 return;
959 }
960
961 s->txdmaregs[addr >> 2] = val;
962
963
964 switch (addr) {
965 case TXDMA_KICK:
966 sungem_tx_kick(s);
967 break;
968 case TXDMA_CFG:
969 sungem_update_masks(s);
970 break;
971 }
972}
973
974static uint64_t sungem_mmio_txdma_read(void *opaque, hwaddr addr, unsigned size)
975{
976 SunGEMState *s = opaque;
977 uint32_t val;
978
979 if (!(addr < 0x38) && !(addr >= 0x100 && addr <= 0x118)) {
980 qemu_log_mask(LOG_GUEST_ERROR,
981 "Read from unknown TXDMA register 0x%"HWADDR_PRIx"\n",
982 addr);
983 return 0;
984 }
985
986 val = s->txdmaregs[addr >> 2];
987
988 trace_sungem_mmio_txdma_read(addr, val);
989
990 return val;
991}
992
993static const MemoryRegionOps sungem_mmio_txdma_ops = {
994 .read = sungem_mmio_txdma_read,
995 .write = sungem_mmio_txdma_write,
996 .endianness = DEVICE_LITTLE_ENDIAN,
997 .impl = {
998 .min_access_size = 4,
999 .max_access_size = 4,
1000 },
1001};
1002
1003static void sungem_mmio_rxdma_write(void *opaque, hwaddr addr, uint64_t val,
1004 unsigned size)
1005{
1006 SunGEMState *s = opaque;
1007
1008 if (!(addr <= 0x28) && !(addr >= 0x100 && addr <= 0x120)) {
1009 qemu_log_mask(LOG_GUEST_ERROR,
1010 "Write to unknown RXDMA register 0x%"HWADDR_PRIx"\n",
1011 addr);
1012 return;
1013 }
1014
1015 trace_sungem_mmio_rxdma_write(addr, val);
1016
1017
1018 switch (addr) {
1019
1020 case RXDMA_DONE:
1021 case RXDMA_PCNT:
1022 case RXDMA_SMACHINE:
1023 case RXDMA_DPLOW:
1024 case RXDMA_DPHI:
1025 case RXDMA_FSZ:
1026 case RXDMA_FTAG:
1027 return;
1028 }
1029
1030 s->rxdmaregs[addr >> 2] = val;
1031
1032
1033 switch (addr) {
1034 case RXDMA_KICK:
1035 trace_sungem_rx_kick(val);
1036 break;
1037 case RXDMA_CFG:
1038 sungem_update_masks(s);
1039 if ((s->macregs[MAC_RXCFG >> 2] & MAC_RXCFG_ENAB) != 0 &&
1040 (s->rxdmaregs[RXDMA_CFG >> 2] & RXDMA_CFG_ENABLE) != 0) {
1041 qemu_flush_queued_packets(qemu_get_queue(s->nic));
1042 }
1043 break;
1044 }
1045}
1046
1047static uint64_t sungem_mmio_rxdma_read(void *opaque, hwaddr addr, unsigned size)
1048{
1049 SunGEMState *s = opaque;
1050 uint32_t val;
1051
1052 if (!(addr <= 0x28) && !(addr >= 0x100 && addr <= 0x120)) {
1053 qemu_log_mask(LOG_GUEST_ERROR,
1054 "Read from unknown RXDMA register 0x%"HWADDR_PRIx"\n",
1055 addr);
1056 return 0;
1057 }
1058
1059 val = s->rxdmaregs[addr >> 2];
1060
1061 trace_sungem_mmio_rxdma_read(addr, val);
1062
1063 return val;
1064}
1065
1066static const MemoryRegionOps sungem_mmio_rxdma_ops = {
1067 .read = sungem_mmio_rxdma_read,
1068 .write = sungem_mmio_rxdma_write,
1069 .endianness = DEVICE_LITTLE_ENDIAN,
1070 .impl = {
1071 .min_access_size = 4,
1072 .max_access_size = 4,
1073 },
1074};
1075
1076static void sungem_mmio_mac_write(void *opaque, hwaddr addr, uint64_t val,
1077 unsigned size)
1078{
1079 SunGEMState *s = opaque;
1080
1081 if (!(addr <= 0x134)) {
1082 qemu_log_mask(LOG_GUEST_ERROR,
1083 "Write to unknown MAC register 0x%"HWADDR_PRIx"\n",
1084 addr);
1085 return;
1086 }
1087
1088 trace_sungem_mmio_mac_write(addr, val);
1089
1090
1091 switch (addr) {
1092
1093 case MAC_TXRST:
1094 case MAC_RXRST:
1095 case MAC_TXSTAT:
1096 case MAC_RXSTAT:
1097 case MAC_CSTAT:
1098 case MAC_PATMPS:
1099 case MAC_SMACHINE:
1100 return;
1101 case MAC_MINFSZ:
1102
1103 val &= 0x3ff;
1104 break;
1105 }
1106
1107 s->macregs[addr >> 2] = val;
1108
1109
1110 switch (addr) {
1111 case MAC_TXMASK:
1112 case MAC_RXMASK:
1113 case MAC_MCMASK:
1114 sungem_eval_cascade_irq(s);
1115 break;
1116 case MAC_RXCFG:
1117 sungem_update_masks(s);
1118 if ((s->macregs[MAC_RXCFG >> 2] & MAC_RXCFG_ENAB) != 0 &&
1119 (s->rxdmaregs[RXDMA_CFG >> 2] & RXDMA_CFG_ENABLE) != 0) {
1120 qemu_flush_queued_packets(qemu_get_queue(s->nic));
1121 }
1122 break;
1123 }
1124}
1125
1126static uint64_t sungem_mmio_mac_read(void *opaque, hwaddr addr, unsigned size)
1127{
1128 SunGEMState *s = opaque;
1129 uint32_t val;
1130
1131 if (!(addr <= 0x134)) {
1132 qemu_log_mask(LOG_GUEST_ERROR,
1133 "Read from unknown MAC register 0x%"HWADDR_PRIx"\n",
1134 addr);
1135 return 0;
1136 }
1137
1138 val = s->macregs[addr >> 2];
1139
1140 trace_sungem_mmio_mac_read(addr, val);
1141
1142 switch (addr) {
1143 case MAC_TXSTAT:
1144
1145 s->macregs[addr >> 2] = 0;
1146 sungem_update_status(s, GREG_STAT_TXMAC, false);
1147 break;
1148 case MAC_RXSTAT:
1149
1150 s->macregs[addr >> 2] = 0;
1151 sungem_update_status(s, GREG_STAT_RXMAC, false);
1152 break;
1153 case MAC_CSTAT:
1154
1155 s->macregs[addr >> 2] &= MAC_CSTAT_PTR;
1156 sungem_update_status(s, GREG_STAT_MAC, false);
1157 break;
1158 }
1159
1160 return val;
1161}
1162
1163static const MemoryRegionOps sungem_mmio_mac_ops = {
1164 .read = sungem_mmio_mac_read,
1165 .write = sungem_mmio_mac_write,
1166 .endianness = DEVICE_LITTLE_ENDIAN,
1167 .impl = {
1168 .min_access_size = 4,
1169 .max_access_size = 4,
1170 },
1171};
1172
1173static void sungem_mmio_mif_write(void *opaque, hwaddr addr, uint64_t val,
1174 unsigned size)
1175{
1176 SunGEMState *s = opaque;
1177
1178 if (!(addr <= 0x1c)) {
1179 qemu_log_mask(LOG_GUEST_ERROR,
1180 "Write to unknown MIF register 0x%"HWADDR_PRIx"\n",
1181 addr);
1182 return;
1183 }
1184
1185 trace_sungem_mmio_mif_write(addr, val);
1186
1187
1188 switch (addr) {
1189
1190 case MIF_STATUS:
1191 case MIF_SMACHINE:
1192 return;
1193 case MIF_CFG:
1194
1195 val &= ~MIF_CFG_MDI1;
1196 val |= MIF_CFG_MDI0;
1197 break;
1198 }
1199
1200 s->mifregs[addr >> 2] = val;
1201
1202
1203 switch (addr) {
1204 case MIF_FRAME:
1205 s->mifregs[addr >> 2] = sungem_mii_op(s, val);
1206 break;
1207 }
1208}
1209
1210static uint64_t sungem_mmio_mif_read(void *opaque, hwaddr addr, unsigned size)
1211{
1212 SunGEMState *s = opaque;
1213 uint32_t val;
1214
1215 if (!(addr <= 0x1c)) {
1216 qemu_log_mask(LOG_GUEST_ERROR,
1217 "Read from unknown MIF register 0x%"HWADDR_PRIx"\n",
1218 addr);
1219 return 0;
1220 }
1221
1222 val = s->mifregs[addr >> 2];
1223
1224 trace_sungem_mmio_mif_read(addr, val);
1225
1226 return val;
1227}
1228
1229static const MemoryRegionOps sungem_mmio_mif_ops = {
1230 .read = sungem_mmio_mif_read,
1231 .write = sungem_mmio_mif_write,
1232 .endianness = DEVICE_LITTLE_ENDIAN,
1233 .impl = {
1234 .min_access_size = 4,
1235 .max_access_size = 4,
1236 },
1237};
1238
1239static void sungem_mmio_pcs_write(void *opaque, hwaddr addr, uint64_t val,
1240 unsigned size)
1241{
1242 SunGEMState *s = opaque;
1243
1244 if (!(addr <= 0x18) && !(addr >= 0x50 && addr <= 0x5c)) {
1245 qemu_log_mask(LOG_GUEST_ERROR,
1246 "Write to unknown PCS register 0x%"HWADDR_PRIx"\n",
1247 addr);
1248 return;
1249 }
1250
1251 trace_sungem_mmio_pcs_write(addr, val);
1252
1253
1254 switch (addr) {
1255
1256 case PCS_MIISTAT:
1257 case PCS_ISTAT:
1258 case PCS_SSTATE:
1259 return;
1260 }
1261
1262 s->pcsregs[addr >> 2] = val;
1263}
1264
1265static uint64_t sungem_mmio_pcs_read(void *opaque, hwaddr addr, unsigned size)
1266{
1267 SunGEMState *s = opaque;
1268 uint32_t val;
1269
1270 if (!(addr <= 0x18) && !(addr >= 0x50 && addr <= 0x5c)) {
1271 qemu_log_mask(LOG_GUEST_ERROR,
1272 "Read from unknown PCS register 0x%"HWADDR_PRIx"\n",
1273 addr);
1274 return 0;
1275 }
1276
1277 val = s->pcsregs[addr >> 2];
1278
1279 trace_sungem_mmio_pcs_read(addr, val);
1280
1281 return val;
1282}
1283
1284static const MemoryRegionOps sungem_mmio_pcs_ops = {
1285 .read = sungem_mmio_pcs_read,
1286 .write = sungem_mmio_pcs_write,
1287 .endianness = DEVICE_LITTLE_ENDIAN,
1288 .impl = {
1289 .min_access_size = 4,
1290 .max_access_size = 4,
1291 },
1292};
1293
1294static void sungem_uninit(PCIDevice *dev)
1295{
1296 SunGEMState *s = SUNGEM(dev);
1297
1298 qemu_del_nic(s->nic);
1299}
1300
1301static NetClientInfo net_sungem_info = {
1302 .type = NET_CLIENT_DRIVER_NIC,
1303 .size = sizeof(NICState),
1304 .can_receive = sungem_can_receive,
1305 .receive = sungem_receive,
1306 .link_status_changed = sungem_set_link_status,
1307};
1308
1309static void sungem_realize(PCIDevice *pci_dev, Error **errp)
1310{
1311 DeviceState *dev = DEVICE(pci_dev);
1312 SunGEMState *s = SUNGEM(pci_dev);
1313 uint8_t *pci_conf;
1314
1315 pci_conf = pci_dev->config;
1316
1317 pci_set_word(pci_conf + PCI_STATUS,
1318 PCI_STATUS_FAST_BACK |
1319 PCI_STATUS_DEVSEL_MEDIUM |
1320 PCI_STATUS_66MHZ);
1321
1322 pci_set_word(pci_conf + PCI_SUBSYSTEM_VENDOR_ID, 0x0);
1323 pci_set_word(pci_conf + PCI_SUBSYSTEM_ID, 0x0);
1324
1325 pci_conf[PCI_INTERRUPT_PIN] = 1;
1326 pci_conf[PCI_MIN_GNT] = 0x40;
1327 pci_conf[PCI_MAX_LAT] = 0x40;
1328
1329 sungem_reset_all(s, true);
1330 memory_region_init(&s->sungem, OBJECT(s), "sungem", SUNGEM_MMIO_SIZE);
1331
1332 memory_region_init_io(&s->greg, OBJECT(s), &sungem_mmio_greg_ops, s,
1333 "sungem.greg", SUNGEM_MMIO_GREG_SIZE);
1334 memory_region_add_subregion(&s->sungem, 0, &s->greg);
1335
1336 memory_region_init_io(&s->txdma, OBJECT(s), &sungem_mmio_txdma_ops, s,
1337 "sungem.txdma", SUNGEM_MMIO_TXDMA_SIZE);
1338 memory_region_add_subregion(&s->sungem, 0x2000, &s->txdma);
1339
1340 memory_region_init_io(&s->rxdma, OBJECT(s), &sungem_mmio_rxdma_ops, s,
1341 "sungem.rxdma", SUNGEM_MMIO_RXDMA_SIZE);
1342 memory_region_add_subregion(&s->sungem, 0x4000, &s->rxdma);
1343
1344 memory_region_init_io(&s->mac, OBJECT(s), &sungem_mmio_mac_ops, s,
1345 "sungem.mac", SUNGEM_MMIO_MAC_SIZE);
1346 memory_region_add_subregion(&s->sungem, 0x6000, &s->mac);
1347
1348 memory_region_init_io(&s->mif, OBJECT(s), &sungem_mmio_mif_ops, s,
1349 "sungem.mif", SUNGEM_MMIO_MIF_SIZE);
1350 memory_region_add_subregion(&s->sungem, 0x6200, &s->mif);
1351
1352 memory_region_init_io(&s->pcs, OBJECT(s), &sungem_mmio_pcs_ops, s,
1353 "sungem.pcs", SUNGEM_MMIO_PCS_SIZE);
1354 memory_region_add_subregion(&s->sungem, 0x9000, &s->pcs);
1355
1356 pci_register_bar(pci_dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY, &s->sungem);
1357
1358 qemu_macaddr_default_if_unset(&s->conf.macaddr);
1359 s->nic = qemu_new_nic(&net_sungem_info, &s->conf,
1360 object_get_typename(OBJECT(dev)),
1361 dev->id, s);
1362 qemu_format_nic_info_str(qemu_get_queue(s->nic),
1363 s->conf.macaddr.a);
1364}
1365
1366static void sungem_reset(DeviceState *dev)
1367{
1368 SunGEMState *s = SUNGEM(dev);
1369
1370 sungem_reset_all(s, true);
1371}
1372
1373static void sungem_instance_init(Object *obj)
1374{
1375 SunGEMState *s = SUNGEM(obj);
1376
1377 device_add_bootindex_property(obj, &s->conf.bootindex,
1378 "bootindex", "/ethernet-phy@0",
1379 DEVICE(obj), NULL);
1380}
1381
1382static Property sungem_properties[] = {
1383 DEFINE_NIC_PROPERTIES(SunGEMState, conf),
1384
1385
1386
1387
1388 DEFINE_PROP_UINT32("phy_addr", SunGEMState, phy_addr, 0),
1389 DEFINE_PROP_END_OF_LIST(),
1390};
1391
1392static const VMStateDescription vmstate_sungem = {
1393 .name = "sungem",
1394 .version_id = 0,
1395 .minimum_version_id = 0,
1396 .fields = (VMStateField[]) {
1397 VMSTATE_PCI_DEVICE(pdev, SunGEMState),
1398 VMSTATE_MACADDR(conf.macaddr, SunGEMState),
1399 VMSTATE_UINT32(phy_addr, SunGEMState),
1400 VMSTATE_UINT32_ARRAY(gregs, SunGEMState, (SUNGEM_MMIO_GREG_SIZE >> 2)),
1401 VMSTATE_UINT32_ARRAY(txdmaregs, SunGEMState,
1402 (SUNGEM_MMIO_TXDMA_SIZE >> 2)),
1403 VMSTATE_UINT32_ARRAY(rxdmaregs, SunGEMState,
1404 (SUNGEM_MMIO_RXDMA_SIZE >> 2)),
1405 VMSTATE_UINT32_ARRAY(macregs, SunGEMState, (SUNGEM_MMIO_MAC_SIZE >> 2)),
1406 VMSTATE_UINT32_ARRAY(mifregs, SunGEMState, (SUNGEM_MMIO_MIF_SIZE >> 2)),
1407 VMSTATE_UINT32_ARRAY(pcsregs, SunGEMState, (SUNGEM_MMIO_PCS_SIZE >> 2)),
1408 VMSTATE_UINT32(rx_mask, SunGEMState),
1409 VMSTATE_UINT32(tx_mask, SunGEMState),
1410 VMSTATE_UINT8_ARRAY(tx_data, SunGEMState, MAX_PACKET_SIZE),
1411 VMSTATE_UINT32(tx_size, SunGEMState),
1412 VMSTATE_UINT64(tx_first_ctl, SunGEMState),
1413 VMSTATE_END_OF_LIST()
1414 }
1415};
1416
1417static void sungem_class_init(ObjectClass *klass, void *data)
1418{
1419 DeviceClass *dc = DEVICE_CLASS(klass);
1420 PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
1421
1422 k->realize = sungem_realize;
1423 k->exit = sungem_uninit;
1424 k->vendor_id = PCI_VENDOR_ID_APPLE;
1425 k->device_id = PCI_DEVICE_ID_APPLE_UNI_N_GMAC;
1426 k->revision = 0x01;
1427 k->class_id = PCI_CLASS_NETWORK_ETHERNET;
1428 dc->vmsd = &vmstate_sungem;
1429 dc->reset = sungem_reset;
1430 dc->props = sungem_properties;
1431 set_bit(DEVICE_CATEGORY_NETWORK, dc->categories);
1432}
1433
1434static const TypeInfo sungem_info = {
1435 .name = TYPE_SUNGEM,
1436 .parent = TYPE_PCI_DEVICE,
1437 .instance_size = sizeof(SunGEMState),
1438 .class_init = sungem_class_init,
1439 .instance_init = sungem_instance_init,
1440 .interfaces = (InterfaceInfo[]) {
1441 { INTERFACE_CONVENTIONAL_PCI_DEVICE },
1442 { }
1443 }
1444};
1445
1446static void sungem_register_types(void)
1447{
1448 type_register_static(&sungem_info);
1449}
1450
1451type_init(sungem_register_types)
1452