qemu/hw/net/imx_fec.c
<<
>>
Prefs
   1/*
   2 * i.MX Fast Ethernet Controller emulation.
   3 *
   4 * Copyright (c) 2013 Jean-Christophe Dubois. <jcd@tribudubois.net>
   5 *
   6 * Based on Coldfire Fast Ethernet Controller emulation.
   7 *
   8 * Copyright (c) 2007 CodeSourcery.
   9 *
  10 *  This program is free software; you can redistribute it and/or modify it
  11 *  under the terms of the GNU General Public License as published by the
  12 *  Free Software Foundation; either version 2 of the License, or
  13 *  (at your option) any later version.
  14 *
  15 *  This program is distributed in the hope that it will be useful, but WITHOUT
  16 *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  17 *  FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
  18 *  for more details.
  19 *
  20 *  You should have received a copy of the GNU General Public License along
  21 *  with this program; if not, see <http://www.gnu.org/licenses/>.
  22 */
  23
  24#include "qemu/osdep.h"
  25#include "hw/irq.h"
  26#include "hw/net/imx_fec.h"
  27#include "hw/qdev-properties.h"
  28#include "migration/vmstate.h"
  29#include "sysemu/dma.h"
  30#include "qemu/log.h"
  31#include "qemu/module.h"
  32#include "net/checksum.h"
  33#include "net/eth.h"
  34#include "trace.h"
  35
  36/* For crc32 */
  37#include <zlib.h>
  38
  39#define IMX_MAX_DESC    1024
  40
  41static const char *imx_default_reg_name(IMXFECState *s, uint32_t index)
  42{
  43    static char tmp[20];
  44    sprintf(tmp, "index %d", index);
  45    return tmp;
  46}
  47
  48static const char *imx_fec_reg_name(IMXFECState *s, uint32_t index)
  49{
  50    switch (index) {
  51    case ENET_FRBR:
  52        return "FRBR";
  53    case ENET_FRSR:
  54        return "FRSR";
  55    case ENET_MIIGSK_CFGR:
  56        return "MIIGSK_CFGR";
  57    case ENET_MIIGSK_ENR:
  58        return "MIIGSK_ENR";
  59    default:
  60        return imx_default_reg_name(s, index);
  61    }
  62}
  63
  64static const char *imx_enet_reg_name(IMXFECState *s, uint32_t index)
  65{
  66    switch (index) {
  67    case ENET_RSFL:
  68        return "RSFL";
  69    case ENET_RSEM:
  70        return "RSEM";
  71    case ENET_RAEM:
  72        return "RAEM";
  73    case ENET_RAFL:
  74        return "RAFL";
  75    case ENET_TSEM:
  76        return "TSEM";
  77    case ENET_TAEM:
  78        return "TAEM";
  79    case ENET_TAFL:
  80        return "TAFL";
  81    case ENET_TIPG:
  82        return "TIPG";
  83    case ENET_FTRL:
  84        return "FTRL";
  85    case ENET_TACC:
  86        return "TACC";
  87    case ENET_RACC:
  88        return "RACC";
  89    case ENET_ATCR:
  90        return "ATCR";
  91    case ENET_ATVR:
  92        return "ATVR";
  93    case ENET_ATOFF:
  94        return "ATOFF";
  95    case ENET_ATPER:
  96        return "ATPER";
  97    case ENET_ATCOR:
  98        return "ATCOR";
  99    case ENET_ATINC:
 100        return "ATINC";
 101    case ENET_ATSTMP:
 102        return "ATSTMP";
 103    case ENET_TGSR:
 104        return "TGSR";
 105    case ENET_TCSR0:
 106        return "TCSR0";
 107    case ENET_TCCR0:
 108        return "TCCR0";
 109    case ENET_TCSR1:
 110        return "TCSR1";
 111    case ENET_TCCR1:
 112        return "TCCR1";
 113    case ENET_TCSR2:
 114        return "TCSR2";
 115    case ENET_TCCR2:
 116        return "TCCR2";
 117    case ENET_TCSR3:
 118        return "TCSR3";
 119    case ENET_TCCR3:
 120        return "TCCR3";
 121    default:
 122        return imx_default_reg_name(s, index);
 123    }
 124}
 125
 126static const char *imx_eth_reg_name(IMXFECState *s, uint32_t index)
 127{
 128    switch (index) {
 129    case ENET_EIR:
 130        return "EIR";
 131    case ENET_EIMR:
 132        return "EIMR";
 133    case ENET_RDAR:
 134        return "RDAR";
 135    case ENET_TDAR:
 136        return "TDAR";
 137    case ENET_ECR:
 138        return "ECR";
 139    case ENET_MMFR:
 140        return "MMFR";
 141    case ENET_MSCR:
 142        return "MSCR";
 143    case ENET_MIBC:
 144        return "MIBC";
 145    case ENET_RCR:
 146        return "RCR";
 147    case ENET_TCR:
 148        return "TCR";
 149    case ENET_PALR:
 150        return "PALR";
 151    case ENET_PAUR:
 152        return "PAUR";
 153    case ENET_OPD:
 154        return "OPD";
 155    case ENET_IAUR:
 156        return "IAUR";
 157    case ENET_IALR:
 158        return "IALR";
 159    case ENET_GAUR:
 160        return "GAUR";
 161    case ENET_GALR:
 162        return "GALR";
 163    case ENET_TFWR:
 164        return "TFWR";
 165    case ENET_RDSR:
 166        return "RDSR";
 167    case ENET_TDSR:
 168        return "TDSR";
 169    case ENET_MRBR:
 170        return "MRBR";
 171    default:
 172        if (s->is_fec) {
 173            return imx_fec_reg_name(s, index);
 174        } else {
 175            return imx_enet_reg_name(s, index);
 176        }
 177    }
 178}
 179
 180/*
 181 * Versions of this device with more than one TX descriptor save the
 182 * 2nd and 3rd descriptors in a subsection, to maintain migration
 183 * compatibility with previous versions of the device that only
 184 * supported a single descriptor.
 185 */
 186static bool imx_eth_is_multi_tx_ring(void *opaque)
 187{
 188    IMXFECState *s = IMX_FEC(opaque);
 189
 190    return s->tx_ring_num > 1;
 191}
 192
 193static const VMStateDescription vmstate_imx_eth_txdescs = {
 194    .name = "imx.fec/txdescs",
 195    .version_id = 1,
 196    .minimum_version_id = 1,
 197    .needed = imx_eth_is_multi_tx_ring,
 198    .fields = (VMStateField[]) {
 199         VMSTATE_UINT32(tx_descriptor[1], IMXFECState),
 200         VMSTATE_UINT32(tx_descriptor[2], IMXFECState),
 201         VMSTATE_END_OF_LIST()
 202    }
 203};
 204
 205static const VMStateDescription vmstate_imx_eth = {
 206    .name = TYPE_IMX_FEC,
 207    .version_id = 2,
 208    .minimum_version_id = 2,
 209    .fields = (VMStateField[]) {
 210        VMSTATE_UINT32_ARRAY(regs, IMXFECState, ENET_MAX),
 211        VMSTATE_UINT32(rx_descriptor, IMXFECState),
 212        VMSTATE_UINT32(tx_descriptor[0], IMXFECState),
 213        VMSTATE_UINT32(phy_status, IMXFECState),
 214        VMSTATE_UINT32(phy_control, IMXFECState),
 215        VMSTATE_UINT32(phy_advertise, IMXFECState),
 216        VMSTATE_UINT32(phy_int, IMXFECState),
 217        VMSTATE_UINT32(phy_int_mask, IMXFECState),
 218        VMSTATE_END_OF_LIST()
 219    },
 220    .subsections = (const VMStateDescription * []) {
 221        &vmstate_imx_eth_txdescs,
 222        NULL
 223    },
 224};
 225
 226#define PHY_INT_ENERGYON            (1 << 7)
 227#define PHY_INT_AUTONEG_COMPLETE    (1 << 6)
 228#define PHY_INT_FAULT               (1 << 5)
 229#define PHY_INT_DOWN                (1 << 4)
 230#define PHY_INT_AUTONEG_LP          (1 << 3)
 231#define PHY_INT_PARFAULT            (1 << 2)
 232#define PHY_INT_AUTONEG_PAGE        (1 << 1)
 233
 234static void imx_eth_update(IMXFECState *s);
 235
 236/*
 237 * The MII phy could raise a GPIO to the processor which in turn
 238 * could be handled as an interrpt by the OS.
 239 * For now we don't handle any GPIO/interrupt line, so the OS will
 240 * have to poll for the PHY status.
 241 */
 242static void imx_phy_update_irq(IMXFECState *s)
 243{
 244    imx_eth_update(s);
 245}
 246
 247static void imx_phy_update_link(IMXFECState *s)
 248{
 249    /* Autonegotiation status mirrors link status.  */
 250    if (qemu_get_queue(s->nic)->link_down) {
 251        trace_imx_phy_update_link("down");
 252        s->phy_status &= ~0x0024;
 253        s->phy_int |= PHY_INT_DOWN;
 254    } else {
 255        trace_imx_phy_update_link("up");
 256        s->phy_status |= 0x0024;
 257        s->phy_int |= PHY_INT_ENERGYON;
 258        s->phy_int |= PHY_INT_AUTONEG_COMPLETE;
 259    }
 260    imx_phy_update_irq(s);
 261}
 262
 263static void imx_eth_set_link(NetClientState *nc)
 264{
 265    imx_phy_update_link(IMX_FEC(qemu_get_nic_opaque(nc)));
 266}
 267
 268static void imx_phy_reset(IMXFECState *s)
 269{
 270    trace_imx_phy_reset();
 271
 272    s->phy_status = 0x7809;
 273    s->phy_control = 0x3000;
 274    s->phy_advertise = 0x01e1;
 275    s->phy_int_mask = 0;
 276    s->phy_int = 0;
 277    imx_phy_update_link(s);
 278}
 279
 280static uint32_t imx_phy_read(IMXFECState *s, int reg)
 281{
 282    uint32_t val;
 283    uint32_t phy = reg / 32;
 284
 285    if (phy != s->phy_num) {
 286        trace_imx_phy_read_num(phy, s->phy_num);
 287        return 0xffff;
 288    }
 289
 290    reg %= 32;
 291
 292    switch (reg) {
 293    case 0:     /* Basic Control */
 294        val = s->phy_control;
 295        break;
 296    case 1:     /* Basic Status */
 297        val = s->phy_status;
 298        break;
 299    case 2:     /* ID1 */
 300        val = 0x0007;
 301        break;
 302    case 3:     /* ID2 */
 303        val = 0xc0d1;
 304        break;
 305    case 4:     /* Auto-neg advertisement */
 306        val = s->phy_advertise;
 307        break;
 308    case 5:     /* Auto-neg Link Partner Ability */
 309        val = 0x0f71;
 310        break;
 311    case 6:     /* Auto-neg Expansion */
 312        val = 1;
 313        break;
 314    case 29:    /* Interrupt source.  */
 315        val = s->phy_int;
 316        s->phy_int = 0;
 317        imx_phy_update_irq(s);
 318        break;
 319    case 30:    /* Interrupt mask */
 320        val = s->phy_int_mask;
 321        break;
 322    case 17:
 323    case 18:
 324    case 27:
 325    case 31:
 326        qemu_log_mask(LOG_UNIMP, "[%s.phy]%s: reg %d not implemented\n",
 327                      TYPE_IMX_FEC, __func__, reg);
 328        val = 0;
 329        break;
 330    default:
 331        qemu_log_mask(LOG_GUEST_ERROR, "[%s.phy]%s: Bad address at offset %d\n",
 332                      TYPE_IMX_FEC, __func__, reg);
 333        val = 0;
 334        break;
 335    }
 336
 337    trace_imx_phy_read(val, phy, reg);
 338
 339    return val;
 340}
 341
 342static void imx_phy_write(IMXFECState *s, int reg, uint32_t val)
 343{
 344    uint32_t phy = reg / 32;
 345
 346    if (phy != s->phy_num) {
 347        trace_imx_phy_write_num(phy, s->phy_num);
 348        return;
 349    }
 350
 351    reg %= 32;
 352
 353    trace_imx_phy_write(val, phy, reg);
 354
 355    switch (reg) {
 356    case 0:     /* Basic Control */
 357        if (val & 0x8000) {
 358            imx_phy_reset(s);
 359        } else {
 360            s->phy_control = val & 0x7980;
 361            /* Complete autonegotiation immediately.  */
 362            if (val & 0x1000) {
 363                s->phy_status |= 0x0020;
 364            }
 365        }
 366        break;
 367    case 4:     /* Auto-neg advertisement */
 368        s->phy_advertise = (val & 0x2d7f) | 0x80;
 369        break;
 370    case 30:    /* Interrupt mask */
 371        s->phy_int_mask = val & 0xff;
 372        imx_phy_update_irq(s);
 373        break;
 374    case 17:
 375    case 18:
 376    case 27:
 377    case 31:
 378        qemu_log_mask(LOG_UNIMP, "[%s.phy)%s: reg %d not implemented\n",
 379                      TYPE_IMX_FEC, __func__, reg);
 380        break;
 381    default:
 382        qemu_log_mask(LOG_GUEST_ERROR, "[%s.phy]%s: Bad address at offset %d\n",
 383                      TYPE_IMX_FEC, __func__, reg);
 384        break;
 385    }
 386}
 387
 388static void imx_fec_read_bd(IMXFECBufDesc *bd, dma_addr_t addr)
 389{
 390    dma_memory_read(&address_space_memory, addr, bd, sizeof(*bd),
 391                    MEMTXATTRS_UNSPECIFIED);
 392
 393    trace_imx_fec_read_bd(addr, bd->flags, bd->length, bd->data);
 394}
 395
 396static void imx_fec_write_bd(IMXFECBufDesc *bd, dma_addr_t addr)
 397{
 398    dma_memory_write(&address_space_memory, addr, bd, sizeof(*bd),
 399                     MEMTXATTRS_UNSPECIFIED);
 400}
 401
 402static void imx_enet_read_bd(IMXENETBufDesc *bd, dma_addr_t addr)
 403{
 404    dma_memory_read(&address_space_memory, addr, bd, sizeof(*bd),
 405                    MEMTXATTRS_UNSPECIFIED);
 406
 407    trace_imx_enet_read_bd(addr, bd->flags, bd->length, bd->data,
 408                   bd->option, bd->status);
 409}
 410
 411static void imx_enet_write_bd(IMXENETBufDesc *bd, dma_addr_t addr)
 412{
 413    dma_memory_write(&address_space_memory, addr, bd, sizeof(*bd),
 414                     MEMTXATTRS_UNSPECIFIED);
 415}
 416
 417static void imx_eth_update(IMXFECState *s)
 418{
 419    /*
 420     * Previous versions of qemu had the ENET_INT_MAC and ENET_INT_TS_TIMER
 421     * interrupts swapped. This worked with older versions of Linux (4.14
 422     * and older) since Linux associated both interrupt lines with Ethernet
 423     * MAC interrupts. Specifically,
 424     * - Linux 4.15 and later have separate interrupt handlers for the MAC and
 425     *   timer interrupts. Those versions of Linux fail with versions of QEMU
 426     *   with swapped interrupt assignments.
 427     * - In linux 4.14, both interrupt lines were registered with the Ethernet
 428     *   MAC interrupt handler. As a result, all versions of qemu happen to
 429     *   work, though that is accidental.
 430     * - In Linux 4.9 and older, the timer interrupt was registered directly
 431     *   with the Ethernet MAC interrupt handler. The MAC interrupt was
 432     *   redirected to a GPIO interrupt to work around erratum ERR006687.
 433     *   This was implemented using the SOC's IOMUX block. In qemu, this GPIO
 434     *   interrupt never fired since IOMUX is currently not supported in qemu.
 435     *   Linux instead received MAC interrupts on the timer interrupt.
 436     *   As a result, qemu versions with the swapped interrupt assignment work,
 437     *   albeit accidentally, but qemu versions with the correct interrupt
 438     *   assignment fail.
 439     *
 440     * To ensure that all versions of Linux work, generate ENET_INT_MAC
 441     * interrupts on both interrupt lines. This should be changed if and when
 442     * qemu supports IOMUX.
 443     */
 444    if (s->regs[ENET_EIR] & s->regs[ENET_EIMR] &
 445        (ENET_INT_MAC | ENET_INT_TS_TIMER)) {
 446        qemu_set_irq(s->irq[1], 1);
 447    } else {
 448        qemu_set_irq(s->irq[1], 0);
 449    }
 450
 451    if (s->regs[ENET_EIR] & s->regs[ENET_EIMR] & ENET_INT_MAC) {
 452        qemu_set_irq(s->irq[0], 1);
 453    } else {
 454        qemu_set_irq(s->irq[0], 0);
 455    }
 456}
 457
 458static void imx_fec_do_tx(IMXFECState *s)
 459{
 460    int frame_size = 0, descnt = 0;
 461    uint8_t *ptr = s->frame;
 462    uint32_t addr = s->tx_descriptor[0];
 463
 464    while (descnt++ < IMX_MAX_DESC) {
 465        IMXFECBufDesc bd;
 466        int len;
 467
 468        imx_fec_read_bd(&bd, addr);
 469        if ((bd.flags & ENET_BD_R) == 0) {
 470
 471            /* Run out of descriptors to transmit.  */
 472            trace_imx_eth_tx_bd_busy();
 473
 474            break;
 475        }
 476        len = bd.length;
 477        if (frame_size + len > ENET_MAX_FRAME_SIZE) {
 478            len = ENET_MAX_FRAME_SIZE - frame_size;
 479            s->regs[ENET_EIR] |= ENET_INT_BABT;
 480        }
 481        dma_memory_read(&address_space_memory, bd.data, ptr, len,
 482                        MEMTXATTRS_UNSPECIFIED);
 483        ptr += len;
 484        frame_size += len;
 485        if (bd.flags & ENET_BD_L) {
 486            /* Last buffer in frame.  */
 487            qemu_send_packet(qemu_get_queue(s->nic), s->frame, frame_size);
 488            ptr = s->frame;
 489            frame_size = 0;
 490            s->regs[ENET_EIR] |= ENET_INT_TXF;
 491        }
 492        s->regs[ENET_EIR] |= ENET_INT_TXB;
 493        bd.flags &= ~ENET_BD_R;
 494        /* Write back the modified descriptor.  */
 495        imx_fec_write_bd(&bd, addr);
 496        /* Advance to the next descriptor.  */
 497        if ((bd.flags & ENET_BD_W) != 0) {
 498            addr = s->regs[ENET_TDSR];
 499        } else {
 500            addr += sizeof(bd);
 501        }
 502    }
 503
 504    s->tx_descriptor[0] = addr;
 505
 506    imx_eth_update(s);
 507}
 508
 509static void imx_enet_do_tx(IMXFECState *s, uint32_t index)
 510{
 511    int frame_size = 0, descnt = 0;
 512
 513    uint8_t *ptr = s->frame;
 514    uint32_t addr, int_txb, int_txf, tdsr;
 515    size_t ring;
 516
 517    switch (index) {
 518    case ENET_TDAR:
 519        ring    = 0;
 520        int_txb = ENET_INT_TXB;
 521        int_txf = ENET_INT_TXF;
 522        tdsr    = ENET_TDSR;
 523        break;
 524    case ENET_TDAR1:
 525        ring    = 1;
 526        int_txb = ENET_INT_TXB1;
 527        int_txf = ENET_INT_TXF1;
 528        tdsr    = ENET_TDSR1;
 529        break;
 530    case ENET_TDAR2:
 531        ring    = 2;
 532        int_txb = ENET_INT_TXB2;
 533        int_txf = ENET_INT_TXF2;
 534        tdsr    = ENET_TDSR2;
 535        break;
 536    default:
 537        qemu_log_mask(LOG_GUEST_ERROR,
 538                      "%s: bogus value for index %x\n",
 539                      __func__, index);
 540        abort();
 541        break;
 542    }
 543
 544    addr = s->tx_descriptor[ring];
 545
 546    while (descnt++ < IMX_MAX_DESC) {
 547        IMXENETBufDesc bd;
 548        int len;
 549
 550        imx_enet_read_bd(&bd, addr);
 551        if ((bd.flags & ENET_BD_R) == 0) {
 552            /* Run out of descriptors to transmit.  */
 553
 554            trace_imx_eth_tx_bd_busy();
 555
 556            break;
 557        }
 558        len = bd.length;
 559        if (frame_size + len > ENET_MAX_FRAME_SIZE) {
 560            len = ENET_MAX_FRAME_SIZE - frame_size;
 561            s->regs[ENET_EIR] |= ENET_INT_BABT;
 562        }
 563        dma_memory_read(&address_space_memory, bd.data, ptr, len,
 564                        MEMTXATTRS_UNSPECIFIED);
 565        ptr += len;
 566        frame_size += len;
 567        if (bd.flags & ENET_BD_L) {
 568            int csum = 0;
 569
 570            if (bd.option & ENET_BD_PINS) {
 571                csum |= (CSUM_TCP | CSUM_UDP);
 572            }
 573            if (bd.option & ENET_BD_IINS) {
 574                csum |= CSUM_IP;
 575            }
 576            if (csum) {
 577                net_checksum_calculate(s->frame, frame_size, csum);
 578            }
 579
 580            /* Last buffer in frame.  */
 581
 582            qemu_send_packet(qemu_get_queue(s->nic), s->frame, frame_size);
 583            ptr = s->frame;
 584
 585            frame_size = 0;
 586            if (bd.option & ENET_BD_TX_INT) {
 587                s->regs[ENET_EIR] |= int_txf;
 588            }
 589            /* Indicate that we've updated the last buffer descriptor. */
 590            bd.last_buffer = ENET_BD_BDU;
 591        }
 592        if (bd.option & ENET_BD_TX_INT) {
 593            s->regs[ENET_EIR] |= int_txb;
 594        }
 595        bd.flags &= ~ENET_BD_R;
 596        /* Write back the modified descriptor.  */
 597        imx_enet_write_bd(&bd, addr);
 598        /* Advance to the next descriptor.  */
 599        if ((bd.flags & ENET_BD_W) != 0) {
 600            addr = s->regs[tdsr];
 601        } else {
 602            addr += sizeof(bd);
 603        }
 604    }
 605
 606    s->tx_descriptor[ring] = addr;
 607
 608    imx_eth_update(s);
 609}
 610
 611static void imx_eth_do_tx(IMXFECState *s, uint32_t index)
 612{
 613    if (!s->is_fec && (s->regs[ENET_ECR] & ENET_ECR_EN1588)) {
 614        imx_enet_do_tx(s, index);
 615    } else {
 616        imx_fec_do_tx(s);
 617    }
 618}
 619
 620static void imx_eth_enable_rx(IMXFECState *s, bool flush)
 621{
 622    IMXFECBufDesc bd;
 623
 624    imx_fec_read_bd(&bd, s->rx_descriptor);
 625
 626    s->regs[ENET_RDAR] = (bd.flags & ENET_BD_E) ? ENET_RDAR_RDAR : 0;
 627
 628    if (!s->regs[ENET_RDAR]) {
 629        trace_imx_eth_rx_bd_full();
 630    } else if (flush) {
 631        qemu_flush_queued_packets(qemu_get_queue(s->nic));
 632    }
 633}
 634
 635static void imx_eth_reset(DeviceState *d)
 636{
 637    IMXFECState *s = IMX_FEC(d);
 638
 639    /* Reset the Device */
 640    memset(s->regs, 0, sizeof(s->regs));
 641    s->regs[ENET_ECR]   = 0xf0000000;
 642    s->regs[ENET_MIBC]  = 0xc0000000;
 643    s->regs[ENET_RCR]   = 0x05ee0001;
 644    s->regs[ENET_OPD]   = 0x00010000;
 645
 646    s->regs[ENET_PALR]  = (s->conf.macaddr.a[0] << 24)
 647                          | (s->conf.macaddr.a[1] << 16)
 648                          | (s->conf.macaddr.a[2] << 8)
 649                          | s->conf.macaddr.a[3];
 650    s->regs[ENET_PAUR]  = (s->conf.macaddr.a[4] << 24)
 651                          | (s->conf.macaddr.a[5] << 16)
 652                          | 0x8808;
 653
 654    if (s->is_fec) {
 655        s->regs[ENET_FRBR]  = 0x00000600;
 656        s->regs[ENET_FRSR]  = 0x00000500;
 657        s->regs[ENET_MIIGSK_ENR]  = 0x00000006;
 658    } else {
 659        s->regs[ENET_RAEM]  = 0x00000004;
 660        s->regs[ENET_RAFL]  = 0x00000004;
 661        s->regs[ENET_TAEM]  = 0x00000004;
 662        s->regs[ENET_TAFL]  = 0x00000008;
 663        s->regs[ENET_TIPG]  = 0x0000000c;
 664        s->regs[ENET_FTRL]  = 0x000007ff;
 665        s->regs[ENET_ATPER] = 0x3b9aca00;
 666    }
 667
 668    s->rx_descriptor = 0;
 669    memset(s->tx_descriptor, 0, sizeof(s->tx_descriptor));
 670
 671    /* We also reset the PHY */
 672    imx_phy_reset(s);
 673}
 674
 675static uint32_t imx_default_read(IMXFECState *s, uint32_t index)
 676{
 677    qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Bad register at offset 0x%"
 678                  PRIx32 "\n", TYPE_IMX_FEC, __func__, index * 4);
 679    return 0;
 680}
 681
 682static uint32_t imx_fec_read(IMXFECState *s, uint32_t index)
 683{
 684    switch (index) {
 685    case ENET_FRBR:
 686    case ENET_FRSR:
 687    case ENET_MIIGSK_CFGR:
 688    case ENET_MIIGSK_ENR:
 689        return s->regs[index];
 690    default:
 691        return imx_default_read(s, index);
 692    }
 693}
 694
 695static uint32_t imx_enet_read(IMXFECState *s, uint32_t index)
 696{
 697    switch (index) {
 698    case ENET_RSFL:
 699    case ENET_RSEM:
 700    case ENET_RAEM:
 701    case ENET_RAFL:
 702    case ENET_TSEM:
 703    case ENET_TAEM:
 704    case ENET_TAFL:
 705    case ENET_TIPG:
 706    case ENET_FTRL:
 707    case ENET_TACC:
 708    case ENET_RACC:
 709    case ENET_ATCR:
 710    case ENET_ATVR:
 711    case ENET_ATOFF:
 712    case ENET_ATPER:
 713    case ENET_ATCOR:
 714    case ENET_ATINC:
 715    case ENET_ATSTMP:
 716    case ENET_TGSR:
 717    case ENET_TCSR0:
 718    case ENET_TCCR0:
 719    case ENET_TCSR1:
 720    case ENET_TCCR1:
 721    case ENET_TCSR2:
 722    case ENET_TCCR2:
 723    case ENET_TCSR3:
 724    case ENET_TCCR3:
 725        return s->regs[index];
 726    default:
 727        return imx_default_read(s, index);
 728    }
 729}
 730
 731static uint64_t imx_eth_read(void *opaque, hwaddr offset, unsigned size)
 732{
 733    uint32_t value = 0;
 734    IMXFECState *s = IMX_FEC(opaque);
 735    uint32_t index = offset >> 2;
 736
 737    switch (index) {
 738    case ENET_EIR:
 739    case ENET_EIMR:
 740    case ENET_RDAR:
 741    case ENET_TDAR:
 742    case ENET_ECR:
 743    case ENET_MMFR:
 744    case ENET_MSCR:
 745    case ENET_MIBC:
 746    case ENET_RCR:
 747    case ENET_TCR:
 748    case ENET_PALR:
 749    case ENET_PAUR:
 750    case ENET_OPD:
 751    case ENET_IAUR:
 752    case ENET_IALR:
 753    case ENET_GAUR:
 754    case ENET_GALR:
 755    case ENET_TFWR:
 756    case ENET_RDSR:
 757    case ENET_TDSR:
 758    case ENET_MRBR:
 759        value = s->regs[index];
 760        break;
 761    default:
 762        if (s->is_fec) {
 763            value = imx_fec_read(s, index);
 764        } else {
 765            value = imx_enet_read(s, index);
 766        }
 767        break;
 768    }
 769
 770    trace_imx_eth_read(index, imx_eth_reg_name(s, index), value);
 771
 772    return value;
 773}
 774
 775static void imx_default_write(IMXFECState *s, uint32_t index, uint32_t value)
 776{
 777    qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Bad address at offset 0x%"
 778                  PRIx32 "\n", TYPE_IMX_FEC, __func__, index * 4);
 779    return;
 780}
 781
 782static void imx_fec_write(IMXFECState *s, uint32_t index, uint32_t value)
 783{
 784    switch (index) {
 785    case ENET_FRBR:
 786        /* FRBR is read only */
 787        qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Register FRBR is read only\n",
 788                      TYPE_IMX_FEC, __func__);
 789        break;
 790    case ENET_FRSR:
 791        s->regs[index] = (value & 0x000003fc) | 0x00000400;
 792        break;
 793    case ENET_MIIGSK_CFGR:
 794        s->regs[index] = value & 0x00000053;
 795        break;
 796    case ENET_MIIGSK_ENR:
 797        s->regs[index] = (value & 0x00000002) ? 0x00000006 : 0;
 798        break;
 799    default:
 800        imx_default_write(s, index, value);
 801        break;
 802    }
 803}
 804
 805static void imx_enet_write(IMXFECState *s, uint32_t index, uint32_t value)
 806{
 807    switch (index) {
 808    case ENET_RSFL:
 809    case ENET_RSEM:
 810    case ENET_RAEM:
 811    case ENET_RAFL:
 812    case ENET_TSEM:
 813    case ENET_TAEM:
 814    case ENET_TAFL:
 815        s->regs[index] = value & 0x000001ff;
 816        break;
 817    case ENET_TIPG:
 818        s->regs[index] = value & 0x0000001f;
 819        break;
 820    case ENET_FTRL:
 821        s->regs[index] = value & 0x00003fff;
 822        break;
 823    case ENET_TACC:
 824        s->regs[index] = value & 0x00000019;
 825        break;
 826    case ENET_RACC:
 827        s->regs[index] = value & 0x000000C7;
 828        break;
 829    case ENET_ATCR:
 830        s->regs[index] = value & 0x00002a9d;
 831        break;
 832    case ENET_ATVR:
 833    case ENET_ATOFF:
 834    case ENET_ATPER:
 835        s->regs[index] = value;
 836        break;
 837    case ENET_ATSTMP:
 838        /* ATSTMP is read only */
 839        qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Register ATSTMP is read only\n",
 840                      TYPE_IMX_FEC, __func__);
 841        break;
 842    case ENET_ATCOR:
 843        s->regs[index] = value & 0x7fffffff;
 844        break;
 845    case ENET_ATINC:
 846        s->regs[index] = value & 0x00007f7f;
 847        break;
 848    case ENET_TGSR:
 849        /* implement clear timer flag */
 850        s->regs[index] &= ~(value & 0x0000000f); /* all bits W1C */
 851        break;
 852    case ENET_TCSR0:
 853    case ENET_TCSR1:
 854    case ENET_TCSR2:
 855    case ENET_TCSR3:
 856        s->regs[index] &= ~(value & 0x00000080); /* W1C bits */
 857        s->regs[index] &= ~0x0000007d; /* writable fields */
 858        s->regs[index] |= (value & 0x0000007d);
 859        break;
 860    case ENET_TCCR0:
 861    case ENET_TCCR1:
 862    case ENET_TCCR2:
 863    case ENET_TCCR3:
 864        s->regs[index] = value;
 865        break;
 866    default:
 867        imx_default_write(s, index, value);
 868        break;
 869    }
 870}
 871
 872static void imx_eth_write(void *opaque, hwaddr offset, uint64_t value,
 873                           unsigned size)
 874{
 875    IMXFECState *s = IMX_FEC(opaque);
 876    const bool single_tx_ring = !imx_eth_is_multi_tx_ring(s);
 877    uint32_t index = offset >> 2;
 878
 879    trace_imx_eth_write(index, imx_eth_reg_name(s, index), value);
 880
 881    switch (index) {
 882    case ENET_EIR:
 883        s->regs[index] &= ~value;
 884        break;
 885    case ENET_EIMR:
 886        s->regs[index] = value;
 887        break;
 888    case ENET_RDAR:
 889        if (s->regs[ENET_ECR] & ENET_ECR_ETHEREN) {
 890            if (!s->regs[index]) {
 891                imx_eth_enable_rx(s, true);
 892            }
 893        } else {
 894            s->regs[index] = 0;
 895        }
 896        break;
 897    case ENET_TDAR1:
 898    case ENET_TDAR2:
 899        if (unlikely(single_tx_ring)) {
 900            qemu_log_mask(LOG_GUEST_ERROR,
 901                          "[%s]%s: trying to access TDAR2 or TDAR1\n",
 902                          TYPE_IMX_FEC, __func__);
 903            return;
 904        }
 905        /* fall through */
 906    case ENET_TDAR:
 907        if (s->regs[ENET_ECR] & ENET_ECR_ETHEREN) {
 908            s->regs[index] = ENET_TDAR_TDAR;
 909            imx_eth_do_tx(s, index);
 910        }
 911        s->regs[index] = 0;
 912        break;
 913    case ENET_ECR:
 914        if (value & ENET_ECR_RESET) {
 915            return imx_eth_reset(DEVICE(s));
 916        }
 917        s->regs[index] = value;
 918        if ((s->regs[index] & ENET_ECR_ETHEREN) == 0) {
 919            s->regs[ENET_RDAR] = 0;
 920            s->rx_descriptor = s->regs[ENET_RDSR];
 921            s->regs[ENET_TDAR]  = 0;
 922            s->regs[ENET_TDAR1] = 0;
 923            s->regs[ENET_TDAR2] = 0;
 924            s->tx_descriptor[0] = s->regs[ENET_TDSR];
 925            s->tx_descriptor[1] = s->regs[ENET_TDSR1];
 926            s->tx_descriptor[2] = s->regs[ENET_TDSR2];
 927        }
 928        break;
 929    case ENET_MMFR:
 930        s->regs[index] = value;
 931        if (extract32(value, 29, 1)) {
 932            /* This is a read operation */
 933            s->regs[ENET_MMFR] = deposit32(s->regs[ENET_MMFR], 0, 16,
 934                                           imx_phy_read(s,
 935                                                       extract32(value,
 936                                                                 18, 10)));
 937        } else {
 938            /* This is a write operation */
 939            imx_phy_write(s, extract32(value, 18, 10), extract32(value, 0, 16));
 940        }
 941        /* raise the interrupt as the PHY operation is done */
 942        s->regs[ENET_EIR] |= ENET_INT_MII;
 943        break;
 944    case ENET_MSCR:
 945        s->regs[index] = value & 0xfe;
 946        break;
 947    case ENET_MIBC:
 948        /* TODO: Implement MIB.  */
 949        s->regs[index] = (value & 0x80000000) ? 0xc0000000 : 0;
 950        break;
 951    case ENET_RCR:
 952        s->regs[index] = value & 0x07ff003f;
 953        /* TODO: Implement LOOP mode.  */
 954        break;
 955    case ENET_TCR:
 956        /* We transmit immediately, so raise GRA immediately.  */
 957        s->regs[index] = value;
 958        if (value & 1) {
 959            s->regs[ENET_EIR] |= ENET_INT_GRA;
 960        }
 961        break;
 962    case ENET_PALR:
 963        s->regs[index] = value;
 964        s->conf.macaddr.a[0] = value >> 24;
 965        s->conf.macaddr.a[1] = value >> 16;
 966        s->conf.macaddr.a[2] = value >> 8;
 967        s->conf.macaddr.a[3] = value;
 968        break;
 969    case ENET_PAUR:
 970        s->regs[index] = (value | 0x0000ffff) & 0xffff8808;
 971        s->conf.macaddr.a[4] = value >> 24;
 972        s->conf.macaddr.a[5] = value >> 16;
 973        break;
 974    case ENET_OPD:
 975        s->regs[index] = (value & 0x0000ffff) | 0x00010000;
 976        break;
 977    case ENET_IAUR:
 978    case ENET_IALR:
 979    case ENET_GAUR:
 980    case ENET_GALR:
 981        /* TODO: implement MAC hash filtering.  */
 982        break;
 983    case ENET_TFWR:
 984        if (s->is_fec) {
 985            s->regs[index] = value & 0x3;
 986        } else {
 987            s->regs[index] = value & 0x13f;
 988        }
 989        break;
 990    case ENET_RDSR:
 991        if (s->is_fec) {
 992            s->regs[index] = value & ~3;
 993        } else {
 994            s->regs[index] = value & ~7;
 995        }
 996        s->rx_descriptor = s->regs[index];
 997        break;
 998    case ENET_TDSR:
 999        if (s->is_fec) {
1000            s->regs[index] = value & ~3;
1001        } else {
1002            s->regs[index] = value & ~7;
1003        }
1004        s->tx_descriptor[0] = s->regs[index];
1005        break;
1006    case ENET_TDSR1:
1007        if (unlikely(single_tx_ring)) {
1008            qemu_log_mask(LOG_GUEST_ERROR,
1009                          "[%s]%s: trying to access TDSR1\n",
1010                          TYPE_IMX_FEC, __func__);
1011            return;
1012        }
1013
1014        s->regs[index] = value & ~7;
1015        s->tx_descriptor[1] = s->regs[index];
1016        break;
1017    case ENET_TDSR2:
1018        if (unlikely(single_tx_ring)) {
1019            qemu_log_mask(LOG_GUEST_ERROR,
1020                          "[%s]%s: trying to access TDSR2\n",
1021                          TYPE_IMX_FEC, __func__);
1022            return;
1023        }
1024
1025        s->regs[index] = value & ~7;
1026        s->tx_descriptor[2] = s->regs[index];
1027        break;
1028    case ENET_MRBR:
1029        s->regs[index] = value & 0x00003ff0;
1030        break;
1031    default:
1032        if (s->is_fec) {
1033            imx_fec_write(s, index, value);
1034        } else {
1035            imx_enet_write(s, index, value);
1036        }
1037        return;
1038    }
1039
1040    imx_eth_update(s);
1041}
1042
1043static bool imx_eth_can_receive(NetClientState *nc)
1044{
1045    IMXFECState *s = IMX_FEC(qemu_get_nic_opaque(nc));
1046
1047    return !!s->regs[ENET_RDAR];
1048}
1049
1050static ssize_t imx_fec_receive(NetClientState *nc, const uint8_t *buf,
1051                               size_t len)
1052{
1053    IMXFECState *s = IMX_FEC(qemu_get_nic_opaque(nc));
1054    IMXFECBufDesc bd;
1055    uint32_t flags = 0;
1056    uint32_t addr;
1057    uint32_t crc;
1058    uint32_t buf_addr;
1059    uint8_t *crc_ptr;
1060    unsigned int buf_len;
1061    size_t size = len;
1062
1063    trace_imx_fec_receive(size);
1064
1065    if (!s->regs[ENET_RDAR]) {
1066        qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Unexpected packet\n",
1067                      TYPE_IMX_FEC, __func__);
1068        return 0;
1069    }
1070
1071    /* 4 bytes for the CRC.  */
1072    size += 4;
1073    crc = cpu_to_be32(crc32(~0, buf, size));
1074    crc_ptr = (uint8_t *) &crc;
1075
1076    /* Huge frames are truncated.  */
1077    if (size > ENET_MAX_FRAME_SIZE) {
1078        size = ENET_MAX_FRAME_SIZE;
1079        flags |= ENET_BD_TR | ENET_BD_LG;
1080    }
1081
1082    /* Frames larger than the user limit just set error flags.  */
1083    if (size > (s->regs[ENET_RCR] >> 16)) {
1084        flags |= ENET_BD_LG;
1085    }
1086
1087    addr = s->rx_descriptor;
1088    while (size > 0) {
1089        imx_fec_read_bd(&bd, addr);
1090        if ((bd.flags & ENET_BD_E) == 0) {
1091            /* No descriptors available.  Bail out.  */
1092            /*
1093             * FIXME: This is wrong. We should probably either
1094             * save the remainder for when more RX buffers are
1095             * available, or flag an error.
1096             */
1097            qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Lost end of frame\n",
1098                          TYPE_IMX_FEC, __func__);
1099            break;
1100        }
1101        buf_len = (size <= s->regs[ENET_MRBR]) ? size : s->regs[ENET_MRBR];
1102        bd.length = buf_len;
1103        size -= buf_len;
1104
1105        trace_imx_fec_receive_len(addr, bd.length);
1106
1107        /* The last 4 bytes are the CRC.  */
1108        if (size < 4) {
1109            buf_len += size - 4;
1110        }
1111        buf_addr = bd.data;
1112        dma_memory_write(&address_space_memory, buf_addr, buf, buf_len,
1113                         MEMTXATTRS_UNSPECIFIED);
1114        buf += buf_len;
1115        if (size < 4) {
1116            dma_memory_write(&address_space_memory, buf_addr + buf_len,
1117                             crc_ptr, 4 - size, MEMTXATTRS_UNSPECIFIED);
1118            crc_ptr += 4 - size;
1119        }
1120        bd.flags &= ~ENET_BD_E;
1121        if (size == 0) {
1122            /* Last buffer in frame.  */
1123            bd.flags |= flags | ENET_BD_L;
1124
1125            trace_imx_fec_receive_last(bd.flags);
1126
1127            s->regs[ENET_EIR] |= ENET_INT_RXF;
1128        } else {
1129            s->regs[ENET_EIR] |= ENET_INT_RXB;
1130        }
1131        imx_fec_write_bd(&bd, addr);
1132        /* Advance to the next descriptor.  */
1133        if ((bd.flags & ENET_BD_W) != 0) {
1134            addr = s->regs[ENET_RDSR];
1135        } else {
1136            addr += sizeof(bd);
1137        }
1138    }
1139    s->rx_descriptor = addr;
1140    imx_eth_enable_rx(s, false);
1141    imx_eth_update(s);
1142    return len;
1143}
1144
1145static ssize_t imx_enet_receive(NetClientState *nc, const uint8_t *buf,
1146                                size_t len)
1147{
1148    IMXFECState *s = IMX_FEC(qemu_get_nic_opaque(nc));
1149    IMXENETBufDesc bd;
1150    uint32_t flags = 0;
1151    uint32_t addr;
1152    uint32_t crc;
1153    uint32_t buf_addr;
1154    uint8_t *crc_ptr;
1155    unsigned int buf_len;
1156    size_t size = len;
1157    bool shift16 = s->regs[ENET_RACC] & ENET_RACC_SHIFT16;
1158
1159    trace_imx_enet_receive(size);
1160
1161    if (!s->regs[ENET_RDAR]) {
1162        qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Unexpected packet\n",
1163                      TYPE_IMX_FEC, __func__);
1164        return 0;
1165    }
1166
1167    /* 4 bytes for the CRC.  */
1168    size += 4;
1169    crc = cpu_to_be32(crc32(~0, buf, size));
1170    crc_ptr = (uint8_t *) &crc;
1171
1172    if (shift16) {
1173        size += 2;
1174    }
1175
1176    /* Huge frames are truncated. */
1177    if (size > s->regs[ENET_FTRL]) {
1178        size = s->regs[ENET_FTRL];
1179        flags |= ENET_BD_TR | ENET_BD_LG;
1180    }
1181
1182    /* Frames larger than the user limit just set error flags.  */
1183    if (size > (s->regs[ENET_RCR] >> 16)) {
1184        flags |= ENET_BD_LG;
1185    }
1186
1187    addr = s->rx_descriptor;
1188    while (size > 0) {
1189        imx_enet_read_bd(&bd, addr);
1190        if ((bd.flags & ENET_BD_E) == 0) {
1191            /* No descriptors available.  Bail out.  */
1192            /*
1193             * FIXME: This is wrong. We should probably either
1194             * save the remainder for when more RX buffers are
1195             * available, or flag an error.
1196             */
1197            qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Lost end of frame\n",
1198                          TYPE_IMX_FEC, __func__);
1199            break;
1200        }
1201        buf_len = MIN(size, s->regs[ENET_MRBR]);
1202        bd.length = buf_len;
1203        size -= buf_len;
1204
1205        trace_imx_enet_receive_len(addr, bd.length);
1206
1207        /* The last 4 bytes are the CRC.  */
1208        if (size < 4) {
1209            buf_len += size - 4;
1210        }
1211        buf_addr = bd.data;
1212
1213        if (shift16) {
1214            /*
1215             * If SHIFT16 bit of ENETx_RACC register is set we need to
1216             * align the payload to 4-byte boundary.
1217             */
1218            const uint8_t zeros[2] = { 0 };
1219
1220            dma_memory_write(&address_space_memory, buf_addr, zeros,
1221                             sizeof(zeros), MEMTXATTRS_UNSPECIFIED);
1222
1223            buf_addr += sizeof(zeros);
1224            buf_len  -= sizeof(zeros);
1225
1226            /* We only do this once per Ethernet frame */
1227            shift16 = false;
1228        }
1229
1230        dma_memory_write(&address_space_memory, buf_addr, buf, buf_len,
1231                         MEMTXATTRS_UNSPECIFIED);
1232        buf += buf_len;
1233        if (size < 4) {
1234            dma_memory_write(&address_space_memory, buf_addr + buf_len,
1235                             crc_ptr, 4 - size, MEMTXATTRS_UNSPECIFIED);
1236            crc_ptr += 4 - size;
1237        }
1238        bd.flags &= ~ENET_BD_E;
1239        if (size == 0) {
1240            /* Last buffer in frame.  */
1241            bd.flags |= flags | ENET_BD_L;
1242
1243            trace_imx_enet_receive_last(bd.flags);
1244
1245            /* Indicate that we've updated the last buffer descriptor. */
1246            bd.last_buffer = ENET_BD_BDU;
1247            if (bd.option & ENET_BD_RX_INT) {
1248                s->regs[ENET_EIR] |= ENET_INT_RXF;
1249            }
1250        } else {
1251            if (bd.option & ENET_BD_RX_INT) {
1252                s->regs[ENET_EIR] |= ENET_INT_RXB;
1253            }
1254        }
1255        imx_enet_write_bd(&bd, addr);
1256        /* Advance to the next descriptor.  */
1257        if ((bd.flags & ENET_BD_W) != 0) {
1258            addr = s->regs[ENET_RDSR];
1259        } else {
1260            addr += sizeof(bd);
1261        }
1262    }
1263    s->rx_descriptor = addr;
1264    imx_eth_enable_rx(s, false);
1265    imx_eth_update(s);
1266    return len;
1267}
1268
1269static ssize_t imx_eth_receive(NetClientState *nc, const uint8_t *buf,
1270                                size_t len)
1271{
1272    IMXFECState *s = IMX_FEC(qemu_get_nic_opaque(nc));
1273
1274    if (!s->is_fec && (s->regs[ENET_ECR] & ENET_ECR_EN1588)) {
1275        return imx_enet_receive(nc, buf, len);
1276    } else {
1277        return imx_fec_receive(nc, buf, len);
1278    }
1279}
1280
1281static const MemoryRegionOps imx_eth_ops = {
1282    .read                  = imx_eth_read,
1283    .write                 = imx_eth_write,
1284    .valid.min_access_size = 4,
1285    .valid.max_access_size = 4,
1286    .endianness            = DEVICE_NATIVE_ENDIAN,
1287};
1288
1289static void imx_eth_cleanup(NetClientState *nc)
1290{
1291    IMXFECState *s = IMX_FEC(qemu_get_nic_opaque(nc));
1292
1293    s->nic = NULL;
1294}
1295
1296static NetClientInfo imx_eth_net_info = {
1297    .type                = NET_CLIENT_DRIVER_NIC,
1298    .size                = sizeof(NICState),
1299    .can_receive         = imx_eth_can_receive,
1300    .receive             = imx_eth_receive,
1301    .cleanup             = imx_eth_cleanup,
1302    .link_status_changed = imx_eth_set_link,
1303};
1304
1305
1306static void imx_eth_realize(DeviceState *dev, Error **errp)
1307{
1308    IMXFECState *s = IMX_FEC(dev);
1309    SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
1310
1311    memory_region_init_io(&s->iomem, OBJECT(dev), &imx_eth_ops, s,
1312                          TYPE_IMX_FEC, FSL_IMX25_FEC_SIZE);
1313    sysbus_init_mmio(sbd, &s->iomem);
1314    sysbus_init_irq(sbd, &s->irq[0]);
1315    sysbus_init_irq(sbd, &s->irq[1]);
1316
1317    qemu_macaddr_default_if_unset(&s->conf.macaddr);
1318
1319    s->nic = qemu_new_nic(&imx_eth_net_info, &s->conf,
1320                          object_get_typename(OBJECT(dev)),
1321                          dev->id, s);
1322
1323    qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a);
1324}
1325
1326static Property imx_eth_properties[] = {
1327    DEFINE_NIC_PROPERTIES(IMXFECState, conf),
1328    DEFINE_PROP_UINT32("tx-ring-num", IMXFECState, tx_ring_num, 1),
1329    DEFINE_PROP_UINT32("phy-num", IMXFECState, phy_num, 0),
1330    DEFINE_PROP_END_OF_LIST(),
1331};
1332
1333static void imx_eth_class_init(ObjectClass *klass, void *data)
1334{
1335    DeviceClass *dc = DEVICE_CLASS(klass);
1336
1337    dc->vmsd    = &vmstate_imx_eth;
1338    dc->reset   = imx_eth_reset;
1339    device_class_set_props(dc, imx_eth_properties);
1340    dc->realize = imx_eth_realize;
1341    dc->desc    = "i.MX FEC/ENET Ethernet Controller";
1342}
1343
1344static void imx_fec_init(Object *obj)
1345{
1346    IMXFECState *s = IMX_FEC(obj);
1347
1348    s->is_fec = true;
1349}
1350
1351static void imx_enet_init(Object *obj)
1352{
1353    IMXFECState *s = IMX_FEC(obj);
1354
1355    s->is_fec = false;
1356}
1357
1358static const TypeInfo imx_fec_info = {
1359    .name          = TYPE_IMX_FEC,
1360    .parent        = TYPE_SYS_BUS_DEVICE,
1361    .instance_size = sizeof(IMXFECState),
1362    .instance_init = imx_fec_init,
1363    .class_init    = imx_eth_class_init,
1364};
1365
1366static const TypeInfo imx_enet_info = {
1367    .name          = TYPE_IMX_ENET,
1368    .parent        = TYPE_IMX_FEC,
1369    .instance_init = imx_enet_init,
1370};
1371
1372static void imx_eth_register_types(void)
1373{
1374    type_register_static(&imx_fec_info);
1375    type_register_static(&imx_enet_info);
1376}
1377
1378type_init(imx_eth_register_types)
1379