1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30#include "qemu/osdep.h"
31
32
33#include <zlib.h>
34
35#include "hw/irq.h"
36#include "hw/qdev-clock.h"
37#include "hw/qdev-properties.h"
38#include "hw/net/npcm7xx_emc.h"
39#include "net/eth.h"
40#include "migration/vmstate.h"
41#include "qemu/bitops.h"
42#include "qemu/error-report.h"
43#include "qemu/log.h"
44#include "qemu/module.h"
45#include "qemu/units.h"
46#include "sysemu/dma.h"
47#include "trace.h"
48
49#define CRC_LENGTH 4
50
51
52
53
54
55
56#define MAX_ETH_FRAME_SIZE 1518
57
58static const char *emc_reg_name(int regno)
59{
60#define REG(name) case REG_ ## name: return #name;
61 switch (regno) {
62 REG(CAMCMR)
63 REG(CAMEN)
64 REG(TXDLSA)
65 REG(RXDLSA)
66 REG(MCMDR)
67 REG(MIID)
68 REG(MIIDA)
69 REG(FFTCR)
70 REG(TSDR)
71 REG(RSDR)
72 REG(DMARFC)
73 REG(MIEN)
74 REG(MISTA)
75 REG(MGSTA)
76 REG(MPCNT)
77 REG(MRPC)
78 REG(MRPCC)
79 REG(MREPC)
80 REG(DMARFS)
81 REG(CTXDSA)
82 REG(CTXBSA)
83 REG(CRXDSA)
84 REG(CRXBSA)
85 case REG_CAMM_BASE + 0: return "CAM0M";
86 case REG_CAML_BASE + 0: return "CAM0L";
87 case REG_CAMM_BASE + 2 ... REG_CAMML_LAST:
88
89 if (regno & 1) {
90 return "CAM<n>L";
91 } else {
92 return "CAM<n>M";
93 }
94 default: return "UNKNOWN";
95 }
96#undef REG
97}
98
99static void emc_reset(NPCM7xxEMCState *emc)
100{
101 trace_npcm7xx_emc_reset(emc->emc_num);
102
103 memset(&emc->regs[0], 0, sizeof(emc->regs));
104
105
106 emc->regs[REG_TXDLSA] = 0xfffffffc;
107 emc->regs[REG_RXDLSA] = 0xfffffffc;
108 emc->regs[REG_MIIDA] = 0x00900000;
109 emc->regs[REG_FFTCR] = 0x0101;
110 emc->regs[REG_DMARFC] = 0x0800;
111 emc->regs[REG_MPCNT] = 0x7fff;
112
113 emc->tx_active = false;
114 emc->rx_active = false;
115}
116
117static void npcm7xx_emc_reset(DeviceState *dev)
118{
119 NPCM7xxEMCState *emc = NPCM7XX_EMC(dev);
120 emc_reset(emc);
121}
122
123static void emc_soft_reset(NPCM7xxEMCState *emc)
124{
125
126
127
128
129 uint32_t mcmdr = emc->regs[REG_MCMDR];
130 emc_reset(emc);
131 emc->regs[REG_MCMDR] = mcmdr & (REG_MCMDR_LBK | REG_MCMDR_OPMOD);
132
133 qemu_set_irq(emc->tx_irq, 0);
134 qemu_set_irq(emc->rx_irq, 0);
135}
136
137static void emc_set_link(NetClientState *nc)
138{
139
140}
141
142
143static void emc_update_mista_txintr(NPCM7xxEMCState *emc)
144{
145
146 uint32_t mask = (REG_MISTA_TXBERR |
147 REG_MISTA_TDU |
148 REG_MISTA_TXCP);
149 if (emc->regs[REG_MISTA] & emc->regs[REG_MIEN] & mask) {
150 emc->regs[REG_MISTA] |= REG_MISTA_TXINTR;
151 } else {
152 emc->regs[REG_MISTA] &= ~REG_MISTA_TXINTR;
153 }
154}
155
156
157static void emc_update_mista_rxintr(NPCM7xxEMCState *emc)
158{
159
160 uint32_t mask = (REG_MISTA_RXBERR |
161 REG_MISTA_RDU |
162 REG_MISTA_RXGD);
163 if (emc->regs[REG_MISTA] & emc->regs[REG_MIEN] & mask) {
164 emc->regs[REG_MISTA] |= REG_MISTA_RXINTR;
165 } else {
166 emc->regs[REG_MISTA] &= ~REG_MISTA_RXINTR;
167 }
168}
169
170
171static void emc_update_tx_irq(NPCM7xxEMCState *emc)
172{
173 int level = !!(emc->regs[REG_MISTA] &
174 emc->regs[REG_MIEN] &
175 REG_MISTA_TXINTR);
176 trace_npcm7xx_emc_update_tx_irq(level);
177 qemu_set_irq(emc->tx_irq, level);
178}
179
180
181static void emc_update_rx_irq(NPCM7xxEMCState *emc)
182{
183 int level = !!(emc->regs[REG_MISTA] &
184 emc->regs[REG_MIEN] &
185 REG_MISTA_RXINTR);
186 trace_npcm7xx_emc_update_rx_irq(level);
187 qemu_set_irq(emc->rx_irq, level);
188}
189
190
191static void emc_update_irq_from_reg_change(NPCM7xxEMCState *emc)
192{
193 emc_update_mista_txintr(emc);
194 emc_update_tx_irq(emc);
195
196 emc_update_mista_rxintr(emc);
197 emc_update_rx_irq(emc);
198}
199
200static int emc_read_tx_desc(dma_addr_t addr, NPCM7xxEMCTxDesc *desc)
201{
202 if (dma_memory_read(&address_space_memory, addr, desc,
203 sizeof(*desc), MEMTXATTRS_UNSPECIFIED)) {
204 qemu_log_mask(LOG_GUEST_ERROR, "%s: Failed to read descriptor @ 0x%"
205 HWADDR_PRIx "\n", __func__, addr);
206 return -1;
207 }
208 desc->flags = le32_to_cpu(desc->flags);
209 desc->txbsa = le32_to_cpu(desc->txbsa);
210 desc->status_and_length = le32_to_cpu(desc->status_and_length);
211 desc->ntxdsa = le32_to_cpu(desc->ntxdsa);
212 return 0;
213}
214
215static int emc_write_tx_desc(const NPCM7xxEMCTxDesc *desc, dma_addr_t addr)
216{
217 NPCM7xxEMCTxDesc le_desc;
218
219 le_desc.flags = cpu_to_le32(desc->flags);
220 le_desc.txbsa = cpu_to_le32(desc->txbsa);
221 le_desc.status_and_length = cpu_to_le32(desc->status_and_length);
222 le_desc.ntxdsa = cpu_to_le32(desc->ntxdsa);
223 if (dma_memory_write(&address_space_memory, addr, &le_desc,
224 sizeof(le_desc), MEMTXATTRS_UNSPECIFIED)) {
225 qemu_log_mask(LOG_GUEST_ERROR, "%s: Failed to write descriptor @ 0x%"
226 HWADDR_PRIx "\n", __func__, addr);
227 return -1;
228 }
229 return 0;
230}
231
232static int emc_read_rx_desc(dma_addr_t addr, NPCM7xxEMCRxDesc *desc)
233{
234 if (dma_memory_read(&address_space_memory, addr, desc,
235 sizeof(*desc), MEMTXATTRS_UNSPECIFIED)) {
236 qemu_log_mask(LOG_GUEST_ERROR, "%s: Failed to read descriptor @ 0x%"
237 HWADDR_PRIx "\n", __func__, addr);
238 return -1;
239 }
240 desc->status_and_length = le32_to_cpu(desc->status_and_length);
241 desc->rxbsa = le32_to_cpu(desc->rxbsa);
242 desc->reserved = le32_to_cpu(desc->reserved);
243 desc->nrxdsa = le32_to_cpu(desc->nrxdsa);
244 return 0;
245}
246
247static int emc_write_rx_desc(const NPCM7xxEMCRxDesc *desc, dma_addr_t addr)
248{
249 NPCM7xxEMCRxDesc le_desc;
250
251 le_desc.status_and_length = cpu_to_le32(desc->status_and_length);
252 le_desc.rxbsa = cpu_to_le32(desc->rxbsa);
253 le_desc.reserved = cpu_to_le32(desc->reserved);
254 le_desc.nrxdsa = cpu_to_le32(desc->nrxdsa);
255 if (dma_memory_write(&address_space_memory, addr, &le_desc,
256 sizeof(le_desc), MEMTXATTRS_UNSPECIFIED)) {
257 qemu_log_mask(LOG_GUEST_ERROR, "%s: Failed to write descriptor @ 0x%"
258 HWADDR_PRIx "\n", __func__, addr);
259 return -1;
260 }
261 return 0;
262}
263
264static void emc_set_mista(NPCM7xxEMCState *emc, uint32_t flags)
265{
266 trace_npcm7xx_emc_set_mista(flags);
267 emc->regs[REG_MISTA] |= flags;
268 if (extract32(flags, 16, 16)) {
269 emc_update_mista_txintr(emc);
270 }
271 if (extract32(flags, 0, 16)) {
272 emc_update_mista_rxintr(emc);
273 }
274}
275
276static void emc_halt_tx(NPCM7xxEMCState *emc, uint32_t mista_flag)
277{
278 emc->tx_active = false;
279 emc_set_mista(emc, mista_flag);
280}
281
282static void emc_halt_rx(NPCM7xxEMCState *emc, uint32_t mista_flag)
283{
284 emc->rx_active = false;
285 emc_set_mista(emc, mista_flag);
286}
287
288static void emc_enable_rx_and_flush(NPCM7xxEMCState *emc)
289{
290 emc->rx_active = true;
291 qemu_flush_queued_packets(qemu_get_queue(emc->nic));
292}
293
294static void emc_set_next_tx_descriptor(NPCM7xxEMCState *emc,
295 const NPCM7xxEMCTxDesc *tx_desc,
296 uint32_t desc_addr)
297{
298
299 if (emc_write_tx_desc(tx_desc, desc_addr)) {
300
301
302
303
304 emc_set_mista(emc, REG_MISTA_TXBERR);
305 }
306 emc->regs[REG_CTXDSA] = TX_DESC_NTXDSA(tx_desc->ntxdsa);
307}
308
309static void emc_set_next_rx_descriptor(NPCM7xxEMCState *emc,
310 const NPCM7xxEMCRxDesc *rx_desc,
311 uint32_t desc_addr)
312{
313
314 if (emc_write_rx_desc(rx_desc, desc_addr)) {
315
316
317
318
319 emc_set_mista(emc, REG_MISTA_RXBERR);
320 }
321 emc->regs[REG_CRXDSA] = RX_DESC_NRXDSA(rx_desc->nrxdsa);
322}
323
324static void emc_try_send_next_packet(NPCM7xxEMCState *emc)
325{
326
327#define TX_BUFFER_SIZE 2048
328 uint8_t tx_send_buffer[TX_BUFFER_SIZE];
329 uint32_t desc_addr = TX_DESC_NTXDSA(emc->regs[REG_CTXDSA]);
330 NPCM7xxEMCTxDesc tx_desc;
331 uint32_t next_buf_addr, length;
332 uint8_t *buf;
333 g_autofree uint8_t *malloced_buf = NULL;
334
335 if (emc_read_tx_desc(desc_addr, &tx_desc)) {
336
337 emc_halt_tx(emc, REG_MISTA_TXBERR);
338 emc_update_tx_irq(emc);
339 return;
340 }
341
342
343 if (!(tx_desc.flags & TX_DESC_FLAG_OWNER_MASK)) {
344 trace_npcm7xx_emc_cpu_owned_desc(desc_addr);
345 emc_halt_tx(emc, REG_MISTA_TDU);
346 emc_update_tx_irq(emc);
347 return;
348 }
349
350
351 tx_desc.flags &= ~TX_DESC_FLAG_OWNER_MASK;
352 tx_desc.status_and_length &= 0xffff;
353
354
355
356
357
358
359
360 next_buf_addr = tx_desc.txbsa;
361 emc->regs[REG_CTXBSA] = next_buf_addr;
362 length = TX_DESC_PKT_LEN(tx_desc.status_and_length);
363 buf = &tx_send_buffer[0];
364
365 if (length > sizeof(tx_send_buffer)) {
366 malloced_buf = g_malloc(length);
367 buf = malloced_buf;
368 }
369
370 if (dma_memory_read(&address_space_memory, next_buf_addr, buf,
371 length, MEMTXATTRS_UNSPECIFIED)) {
372 qemu_log_mask(LOG_GUEST_ERROR, "%s: Failed to read packet @ 0x%x\n",
373 __func__, next_buf_addr);
374 emc_set_mista(emc, REG_MISTA_TXBERR);
375 emc_set_next_tx_descriptor(emc, &tx_desc, desc_addr);
376 emc_update_tx_irq(emc);
377 trace_npcm7xx_emc_tx_done(emc->regs[REG_CTXDSA]);
378 return;
379 }
380
381 if ((tx_desc.flags & TX_DESC_FLAG_PADEN) && (length < MIN_PACKET_LENGTH)) {
382 memset(buf + length, 0, MIN_PACKET_LENGTH - length);
383 length = MIN_PACKET_LENGTH;
384 }
385
386
387 qemu_send_packet(qemu_get_queue(emc->nic), buf, length);
388 trace_npcm7xx_emc_sent_packet(length);
389
390 tx_desc.status_and_length |= TX_DESC_STATUS_TXCP;
391 if (tx_desc.flags & TX_DESC_FLAG_INTEN) {
392 emc_set_mista(emc, REG_MISTA_TXCP);
393 }
394 if (emc->regs[REG_MISTA] & emc->regs[REG_MIEN] & REG_MISTA_TXINTR) {
395 tx_desc.status_and_length |= TX_DESC_STATUS_TXINTR;
396 }
397
398 emc_set_next_tx_descriptor(emc, &tx_desc, desc_addr);
399 emc_update_tx_irq(emc);
400 trace_npcm7xx_emc_tx_done(emc->regs[REG_CTXDSA]);
401}
402
403static bool emc_can_receive(NetClientState *nc)
404{
405 NPCM7xxEMCState *emc = NPCM7XX_EMC(qemu_get_nic_opaque(nc));
406
407 bool can_receive = emc->rx_active;
408 trace_npcm7xx_emc_can_receive(can_receive);
409 return can_receive;
410}
411
412
413static bool emc_receive_filter1(NPCM7xxEMCState *emc, const uint8_t *buf,
414 size_t len, const char **fail_reason)
415{
416 eth_pkt_types_e pkt_type = get_eth_packet_type(PKT_GET_ETH_HDR(buf));
417
418 switch (pkt_type) {
419 case ETH_PKT_BCAST:
420 if (emc->regs[REG_CAMCMR] & REG_CAMCMR_CCAM) {
421 return true;
422 } else {
423 *fail_reason = "Broadcast packet disabled";
424 return !!(emc->regs[REG_CAMCMR] & REG_CAMCMR_ABP);
425 }
426 case ETH_PKT_MCAST:
427 if (emc->regs[REG_CAMCMR] & REG_CAMCMR_CCAM) {
428 return true;
429 } else {
430 *fail_reason = "Multicast packet disabled";
431 return !!(emc->regs[REG_CAMCMR] & REG_CAMCMR_AMP);
432 }
433 case ETH_PKT_UCAST: {
434 bool matches;
435 if (emc->regs[REG_CAMCMR] & REG_CAMCMR_AUP) {
436 return true;
437 }
438 matches = ((emc->regs[REG_CAMCMR] & REG_CAMCMR_ECMP) &&
439
440 (emc->regs[REG_CAMEN] & (1 << 0)) &&
441 memcmp(buf, emc->conf.macaddr.a, ETH_ALEN) == 0);
442 if (emc->regs[REG_CAMCMR] & REG_CAMCMR_CCAM) {
443 *fail_reason = "MACADDR matched, comparison complemented";
444 return !matches;
445 } else {
446 *fail_reason = "MACADDR didn't match";
447 return matches;
448 }
449 }
450 default:
451 g_assert_not_reached();
452 }
453}
454
455static bool emc_receive_filter(NPCM7xxEMCState *emc, const uint8_t *buf,
456 size_t len)
457{
458 const char *fail_reason = NULL;
459 bool ok = emc_receive_filter1(emc, buf, len, &fail_reason);
460 if (!ok) {
461 trace_npcm7xx_emc_packet_filtered_out(fail_reason);
462 }
463 return ok;
464}
465
466static ssize_t emc_receive(NetClientState *nc, const uint8_t *buf, size_t len1)
467{
468 NPCM7xxEMCState *emc = NPCM7XX_EMC(qemu_get_nic_opaque(nc));
469 const uint32_t len = len1;
470 size_t max_frame_len;
471 bool long_frame;
472 uint32_t desc_addr;
473 NPCM7xxEMCRxDesc rx_desc;
474 uint32_t crc;
475 uint8_t *crc_ptr;
476 uint32_t buf_addr;
477
478 trace_npcm7xx_emc_receiving_packet(len);
479
480 if (!emc_can_receive(nc)) {
481 qemu_log_mask(LOG_GUEST_ERROR, "%s: Unexpected packet\n", __func__);
482 return -1;
483 }
484
485 if (len < ETH_HLEN ||
486
487 len > 0xffff - CRC_LENGTH) {
488 qemu_log_mask(LOG_GUEST_ERROR, "%s: Dropped frame of %u bytes\n",
489 __func__, len);
490 return len;
491 }
492
493
494
495
496
497 emc_set_mista(emc, REG_MISTA_DENI);
498
499 if (!emc_receive_filter(emc, buf, len)) {
500 emc_update_rx_irq(emc);
501 return len;
502 }
503
504
505 max_frame_len = REG_DMARFC_RXMS(emc->regs[REG_DMARFC]);
506 if (len + CRC_LENGTH > max_frame_len) {
507 trace_npcm7xx_emc_packet_dropped(len);
508 emc_set_mista(emc, REG_MISTA_DFOI);
509 emc_update_rx_irq(emc);
510 return len;
511 }
512
513
514
515
516
517 long_frame = false;
518 if (len + CRC_LENGTH > MAX_ETH_FRAME_SIZE) {
519 if (emc->regs[REG_MCMDR] & REG_MCMDR_ALP) {
520 long_frame = true;
521 } else {
522 trace_npcm7xx_emc_packet_dropped(len);
523 emc_set_mista(emc, REG_MISTA_PTLE);
524 emc_update_rx_irq(emc);
525 return len;
526 }
527 }
528
529 desc_addr = RX_DESC_NRXDSA(emc->regs[REG_CRXDSA]);
530 if (emc_read_rx_desc(desc_addr, &rx_desc)) {
531
532 emc_halt_rx(emc, REG_MISTA_RXBERR);
533 emc_update_rx_irq(emc);
534 return len;
535 }
536
537
538 if (!(rx_desc.status_and_length & RX_DESC_STATUS_OWNER_MASK)) {
539 trace_npcm7xx_emc_cpu_owned_desc(desc_addr);
540 emc_halt_rx(emc, REG_MISTA_RDU);
541 emc_update_rx_irq(emc);
542 return len;
543 }
544
545 crc = 0;
546 crc_ptr = (uint8_t *) &crc;
547 if (!(emc->regs[REG_MCMDR] & REG_MCMDR_SPCRC)) {
548 crc = cpu_to_be32(crc32(~0, buf, len));
549 }
550
551
552 rx_desc.status_and_length &= ~RX_DESC_STATUS_OWNER_MASK;
553
554 buf_addr = rx_desc.rxbsa;
555 emc->regs[REG_CRXBSA] = buf_addr;
556 if (dma_memory_write(&address_space_memory, buf_addr, buf,
557 len, MEMTXATTRS_UNSPECIFIED) ||
558 (!(emc->regs[REG_MCMDR] & REG_MCMDR_SPCRC) &&
559 dma_memory_write(&address_space_memory, buf_addr + len,
560 crc_ptr, 4, MEMTXATTRS_UNSPECIFIED))) {
561 qemu_log_mask(LOG_GUEST_ERROR, "%s: Bus error writing packet\n",
562 __func__);
563 emc_set_mista(emc, REG_MISTA_RXBERR);
564 emc_set_next_rx_descriptor(emc, &rx_desc, desc_addr);
565 emc_update_rx_irq(emc);
566 trace_npcm7xx_emc_rx_done(emc->regs[REG_CRXDSA]);
567 return len;
568 }
569
570 trace_npcm7xx_emc_received_packet(len);
571
572
573 rx_desc.status_and_length = len;
574 if (!(emc->regs[REG_MCMDR] & REG_MCMDR_SPCRC)) {
575 rx_desc.status_and_length += 4;
576 }
577 rx_desc.status_and_length |= RX_DESC_STATUS_RXGD;
578 emc_set_mista(emc, REG_MISTA_RXGD);
579
580 if (emc->regs[REG_MISTA] & emc->regs[REG_MIEN] & REG_MISTA_RXINTR) {
581 rx_desc.status_and_length |= RX_DESC_STATUS_RXINTR;
582 }
583 if (long_frame) {
584 rx_desc.status_and_length |= RX_DESC_STATUS_PTLE;
585 }
586
587 emc_set_next_rx_descriptor(emc, &rx_desc, desc_addr);
588 emc_update_rx_irq(emc);
589 trace_npcm7xx_emc_rx_done(emc->regs[REG_CRXDSA]);
590 return len;
591}
592
593static uint64_t npcm7xx_emc_read(void *opaque, hwaddr offset, unsigned size)
594{
595 NPCM7xxEMCState *emc = opaque;
596 uint32_t reg = offset / sizeof(uint32_t);
597 uint32_t result;
598
599 if (reg >= NPCM7XX_NUM_EMC_REGS) {
600 qemu_log_mask(LOG_GUEST_ERROR,
601 "%s: Invalid offset 0x%04" HWADDR_PRIx "\n",
602 __func__, offset);
603 return 0;
604 }
605
606 switch (reg) {
607 case REG_MIID:
608
609
610
611
612 qemu_log_mask(LOG_UNIMP, "%s: Read of MIID, returning 0\n", __func__);
613 result = 0;
614 break;
615 case REG_TSDR:
616 case REG_RSDR:
617 qemu_log_mask(LOG_GUEST_ERROR,
618 "%s: Read of write-only reg, %s/%d\n",
619 __func__, emc_reg_name(reg), reg);
620 return 0;
621 default:
622 result = emc->regs[reg];
623 break;
624 }
625
626 trace_npcm7xx_emc_reg_read(emc->emc_num, result, emc_reg_name(reg), reg);
627 return result;
628}
629
630static void npcm7xx_emc_write(void *opaque, hwaddr offset,
631 uint64_t v, unsigned size)
632{
633 NPCM7xxEMCState *emc = opaque;
634 uint32_t reg = offset / sizeof(uint32_t);
635 uint32_t value = v;
636
637 g_assert(size == sizeof(uint32_t));
638
639 if (reg >= NPCM7XX_NUM_EMC_REGS) {
640 qemu_log_mask(LOG_GUEST_ERROR,
641 "%s: Invalid offset 0x%04" HWADDR_PRIx "\n",
642 __func__, offset);
643 return;
644 }
645
646 trace_npcm7xx_emc_reg_write(emc->emc_num, emc_reg_name(reg), reg, value);
647
648 switch (reg) {
649 case REG_CAMCMR:
650 emc->regs[reg] = value;
651 break;
652 case REG_CAMEN:
653
654 if (value & ~1) {
655 qemu_log_mask(LOG_GUEST_ERROR,
656 "%s: Only CAM0 is supported, cannot enable others"
657 ": 0x%x\n",
658 __func__, value);
659 }
660 emc->regs[reg] = value & 1;
661 break;
662 case REG_CAMM_BASE + 0:
663 emc->regs[reg] = value;
664 emc->conf.macaddr.a[0] = value >> 24;
665 emc->conf.macaddr.a[1] = value >> 16;
666 emc->conf.macaddr.a[2] = value >> 8;
667 emc->conf.macaddr.a[3] = value >> 0;
668 break;
669 case REG_CAML_BASE + 0:
670 emc->regs[reg] = value;
671 emc->conf.macaddr.a[4] = value >> 24;
672 emc->conf.macaddr.a[5] = value >> 16;
673 break;
674 case REG_MCMDR: {
675 uint32_t prev;
676 if (value & REG_MCMDR_SWR) {
677 emc_soft_reset(emc);
678
679 break;
680 }
681 prev = emc->regs[reg];
682 emc->regs[reg] = value;
683
684 if (!(prev & REG_MCMDR_TXON) &&
685 (value & REG_MCMDR_TXON)) {
686 emc->regs[REG_CTXDSA] = emc->regs[REG_TXDLSA];
687
688
689
690
691
692 } else if ((prev & REG_MCMDR_TXON) &&
693 !(value & REG_MCMDR_TXON)) {
694 emc->regs[REG_MGSTA] |= REG_MGSTA_TXHA;
695 }
696 if (!(value & REG_MCMDR_TXON)) {
697 emc_halt_tx(emc, 0);
698 }
699
700 if (!(prev & REG_MCMDR_RXON) &&
701 (value & REG_MCMDR_RXON)) {
702 emc->regs[REG_CRXDSA] = emc->regs[REG_RXDLSA];
703 } else if ((prev & REG_MCMDR_RXON) &&
704 !(value & REG_MCMDR_RXON)) {
705 emc->regs[REG_MGSTA] |= REG_MGSTA_RXHA;
706 }
707 if (value & REG_MCMDR_RXON) {
708 emc_enable_rx_and_flush(emc);
709 } else {
710 emc_halt_rx(emc, 0);
711 }
712 break;
713 }
714 case REG_TXDLSA:
715 case REG_RXDLSA:
716 case REG_DMARFC:
717 case REG_MIID:
718 emc->regs[reg] = value;
719 break;
720 case REG_MIEN:
721 emc->regs[reg] = value;
722 emc_update_irq_from_reg_change(emc);
723 break;
724 case REG_MISTA:
725
726 emc->regs[reg] &= ~value;
727 emc_update_irq_from_reg_change(emc);
728 break;
729 case REG_MGSTA:
730
731 emc->regs[reg] &= ~value;
732 break;
733 case REG_TSDR:
734 if (emc->regs[REG_MCMDR] & REG_MCMDR_TXON) {
735 emc->tx_active = true;
736
737 while (emc->tx_active) {
738 emc_try_send_next_packet(emc);
739 }
740 }
741 break;
742 case REG_RSDR:
743 if (emc->regs[REG_MCMDR] & REG_MCMDR_RXON) {
744 emc_enable_rx_and_flush(emc);
745 }
746 break;
747 case REG_MIIDA:
748 emc->regs[reg] = value & ~REG_MIIDA_BUSY;
749 break;
750 case REG_MRPC:
751 case REG_MRPCC:
752 case REG_MREPC:
753 case REG_CTXDSA:
754 case REG_CTXBSA:
755 case REG_CRXDSA:
756 case REG_CRXBSA:
757 qemu_log_mask(LOG_GUEST_ERROR,
758 "%s: Write to read-only reg %s/%d\n",
759 __func__, emc_reg_name(reg), reg);
760 break;
761 default:
762 qemu_log_mask(LOG_UNIMP, "%s: Write to unimplemented reg %s/%d\n",
763 __func__, emc_reg_name(reg), reg);
764 break;
765 }
766}
767
768static const struct MemoryRegionOps npcm7xx_emc_ops = {
769 .read = npcm7xx_emc_read,
770 .write = npcm7xx_emc_write,
771 .endianness = DEVICE_LITTLE_ENDIAN,
772 .valid = {
773 .min_access_size = 4,
774 .max_access_size = 4,
775 .unaligned = false,
776 },
777};
778
779static void emc_cleanup(NetClientState *nc)
780{
781
782}
783
784static NetClientInfo net_npcm7xx_emc_info = {
785 .type = NET_CLIENT_DRIVER_NIC,
786 .size = sizeof(NICState),
787 .can_receive = emc_can_receive,
788 .receive = emc_receive,
789 .cleanup = emc_cleanup,
790 .link_status_changed = emc_set_link,
791};
792
793static void npcm7xx_emc_realize(DeviceState *dev, Error **errp)
794{
795 NPCM7xxEMCState *emc = NPCM7XX_EMC(dev);
796 SysBusDevice *sbd = SYS_BUS_DEVICE(emc);
797
798 memory_region_init_io(&emc->iomem, OBJECT(emc), &npcm7xx_emc_ops, emc,
799 TYPE_NPCM7XX_EMC, 4 * KiB);
800 sysbus_init_mmio(sbd, &emc->iomem);
801 sysbus_init_irq(sbd, &emc->tx_irq);
802 sysbus_init_irq(sbd, &emc->rx_irq);
803
804 qemu_macaddr_default_if_unset(&emc->conf.macaddr);
805 emc->nic = qemu_new_nic(&net_npcm7xx_emc_info, &emc->conf,
806 object_get_typename(OBJECT(dev)), dev->id, emc);
807 qemu_format_nic_info_str(qemu_get_queue(emc->nic), emc->conf.macaddr.a);
808}
809
810static void npcm7xx_emc_unrealize(DeviceState *dev)
811{
812 NPCM7xxEMCState *emc = NPCM7XX_EMC(dev);
813
814 qemu_del_nic(emc->nic);
815}
816
817static const VMStateDescription vmstate_npcm7xx_emc = {
818 .name = TYPE_NPCM7XX_EMC,
819 .version_id = 0,
820 .minimum_version_id = 0,
821 .fields = (VMStateField[]) {
822 VMSTATE_UINT8(emc_num, NPCM7xxEMCState),
823 VMSTATE_UINT32_ARRAY(regs, NPCM7xxEMCState, NPCM7XX_NUM_EMC_REGS),
824 VMSTATE_BOOL(tx_active, NPCM7xxEMCState),
825 VMSTATE_BOOL(rx_active, NPCM7xxEMCState),
826 VMSTATE_END_OF_LIST(),
827 },
828};
829
830static Property npcm7xx_emc_properties[] = {
831 DEFINE_NIC_PROPERTIES(NPCM7xxEMCState, conf),
832 DEFINE_PROP_END_OF_LIST(),
833};
834
835static void npcm7xx_emc_class_init(ObjectClass *klass, void *data)
836{
837 DeviceClass *dc = DEVICE_CLASS(klass);
838
839 set_bit(DEVICE_CATEGORY_NETWORK, dc->categories);
840 dc->desc = "NPCM7xx EMC Controller";
841 dc->realize = npcm7xx_emc_realize;
842 dc->unrealize = npcm7xx_emc_unrealize;
843 dc->reset = npcm7xx_emc_reset;
844 dc->vmsd = &vmstate_npcm7xx_emc;
845 device_class_set_props(dc, npcm7xx_emc_properties);
846}
847
848static const TypeInfo npcm7xx_emc_info = {
849 .name = TYPE_NPCM7XX_EMC,
850 .parent = TYPE_SYS_BUS_DEVICE,
851 .instance_size = sizeof(NPCM7xxEMCState),
852 .class_init = npcm7xx_emc_class_init,
853};
854
855static void npcm7xx_emc_register_type(void)
856{
857 type_register_static(&npcm7xx_emc_info);
858}
859
860type_init(npcm7xx_emc_register_type)
861