linux/drivers/net/cpmac.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2006, 2007 Eugene Konev
   3 *
   4 * This program is free software; you can redistribute it and/or modify
   5 * it under the terms of the GNU General Public License as published by
   6 * the Free Software Foundation; either version 2 of the License, or
   7 * (at your option) any later version.
   8 *
   9 * This program is distributed in the hope that it will be useful,
  10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  12 * GNU General Public License for more details.
  13 *
  14 * You should have received a copy of the GNU General Public License
  15 * along with this program; if not, write to the Free Software
  16 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  17 */
  18
  19#include <linux/module.h>
  20#include <linux/init.h>
  21#include <linux/moduleparam.h>
  22
  23#include <linux/sched.h>
  24#include <linux/kernel.h>
  25#include <linux/slab.h>
  26#include <linux/errno.h>
  27#include <linux/types.h>
  28#include <linux/delay.h>
  29
  30#include <linux/netdevice.h>
  31#include <linux/if_vlan.h>
  32#include <linux/etherdevice.h>
  33#include <linux/ethtool.h>
  34#include <linux/skbuff.h>
  35#include <linux/mii.h>
  36#include <linux/phy.h>
  37#include <linux/phy_fixed.h>
  38#include <linux/platform_device.h>
  39#include <linux/dma-mapping.h>
  40#include <linux/clk.h>
  41#include <linux/gpio.h>
  42#include <asm/atomic.h>
  43
  44MODULE_AUTHOR("Eugene Konev <ejka@imfi.kspu.ru>");
  45MODULE_DESCRIPTION("TI AR7 ethernet driver (CPMAC)");
  46MODULE_LICENSE("GPL");
  47MODULE_ALIAS("platform:cpmac");
  48
  49static int debug_level = 8;
  50static int dumb_switch;
  51
  52/* Next 2 are only used in cpmac_probe, so it's pointless to change them */
  53module_param(debug_level, int, 0444);
  54module_param(dumb_switch, int, 0444);
  55
  56MODULE_PARM_DESC(debug_level, "Number of NETIF_MSG bits to enable");
  57MODULE_PARM_DESC(dumb_switch, "Assume switch is not connected to MDIO bus");
  58
  59#define CPMAC_VERSION "0.5.2"
  60/* frame size + 802.1q tag + FCS size */
  61#define CPMAC_SKB_SIZE          (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN)
  62#define CPMAC_QUEUES    8
  63
  64/* Ethernet registers */
  65#define CPMAC_TX_CONTROL                0x0004
  66#define CPMAC_TX_TEARDOWN               0x0008
  67#define CPMAC_RX_CONTROL                0x0014
  68#define CPMAC_RX_TEARDOWN               0x0018
  69#define CPMAC_MBP                       0x0100
  70# define MBP_RXPASSCRC                  0x40000000
  71# define MBP_RXQOS                      0x20000000
  72# define MBP_RXNOCHAIN                  0x10000000
  73# define MBP_RXCMF                      0x01000000
  74# define MBP_RXSHORT                    0x00800000
  75# define MBP_RXCEF                      0x00400000
  76# define MBP_RXPROMISC                  0x00200000
  77# define MBP_PROMISCCHAN(channel)       (((channel) & 0x7) << 16)
  78# define MBP_RXBCAST                    0x00002000
  79# define MBP_BCASTCHAN(channel)         (((channel) & 0x7) << 8)
  80# define MBP_RXMCAST                    0x00000020
  81# define MBP_MCASTCHAN(channel)         ((channel) & 0x7)
  82#define CPMAC_UNICAST_ENABLE            0x0104
  83#define CPMAC_UNICAST_CLEAR             0x0108
  84#define CPMAC_MAX_LENGTH                0x010c
  85#define CPMAC_BUFFER_OFFSET             0x0110
  86#define CPMAC_MAC_CONTROL               0x0160
  87# define MAC_TXPTYPE                    0x00000200
  88# define MAC_TXPACE                     0x00000040
  89# define MAC_MII                        0x00000020
  90# define MAC_TXFLOW                     0x00000010
  91# define MAC_RXFLOW                     0x00000008
  92# define MAC_MTEST                      0x00000004
  93# define MAC_LOOPBACK                   0x00000002
  94# define MAC_FDX                        0x00000001
  95#define CPMAC_MAC_STATUS                0x0164
  96# define MAC_STATUS_QOS                 0x00000004
  97# define MAC_STATUS_RXFLOW              0x00000002
  98# define MAC_STATUS_TXFLOW              0x00000001
  99#define CPMAC_TX_INT_ENABLE             0x0178
 100#define CPMAC_TX_INT_CLEAR              0x017c
 101#define CPMAC_MAC_INT_VECTOR            0x0180
 102# define MAC_INT_STATUS                 0x00080000
 103# define MAC_INT_HOST                   0x00040000
 104# define MAC_INT_RX                     0x00020000
 105# define MAC_INT_TX                     0x00010000
 106#define CPMAC_MAC_EOI_VECTOR            0x0184
 107#define CPMAC_RX_INT_ENABLE             0x0198
 108#define CPMAC_RX_INT_CLEAR              0x019c
 109#define CPMAC_MAC_INT_ENABLE            0x01a8
 110#define CPMAC_MAC_INT_CLEAR             0x01ac
 111#define CPMAC_MAC_ADDR_LO(channel)      (0x01b0 + (channel) * 4)
 112#define CPMAC_MAC_ADDR_MID              0x01d0
 113#define CPMAC_MAC_ADDR_HI               0x01d4
 114#define CPMAC_MAC_HASH_LO               0x01d8
 115#define CPMAC_MAC_HASH_HI               0x01dc
 116#define CPMAC_TX_PTR(channel)           (0x0600 + (channel) * 4)
 117#define CPMAC_RX_PTR(channel)           (0x0620 + (channel) * 4)
 118#define CPMAC_TX_ACK(channel)           (0x0640 + (channel) * 4)
 119#define CPMAC_RX_ACK(channel)           (0x0660 + (channel) * 4)
 120#define CPMAC_REG_END                   0x0680
 121/*
 122 * Rx/Tx statistics
 123 * TODO: use some of them to fill stats in cpmac_stats()
 124 */
 125#define CPMAC_STATS_RX_GOOD             0x0200
 126#define CPMAC_STATS_RX_BCAST            0x0204
 127#define CPMAC_STATS_RX_MCAST            0x0208
 128#define CPMAC_STATS_RX_PAUSE            0x020c
 129#define CPMAC_STATS_RX_CRC              0x0210
 130#define CPMAC_STATS_RX_ALIGN            0x0214
 131#define CPMAC_STATS_RX_OVER             0x0218
 132#define CPMAC_STATS_RX_JABBER           0x021c
 133#define CPMAC_STATS_RX_UNDER            0x0220
 134#define CPMAC_STATS_RX_FRAG             0x0224
 135#define CPMAC_STATS_RX_FILTER           0x0228
 136#define CPMAC_STATS_RX_QOSFILTER        0x022c
 137#define CPMAC_STATS_RX_OCTETS           0x0230
 138
 139#define CPMAC_STATS_TX_GOOD             0x0234
 140#define CPMAC_STATS_TX_BCAST            0x0238
 141#define CPMAC_STATS_TX_MCAST            0x023c
 142#define CPMAC_STATS_TX_PAUSE            0x0240
 143#define CPMAC_STATS_TX_DEFER            0x0244
 144#define CPMAC_STATS_TX_COLLISION        0x0248
 145#define CPMAC_STATS_TX_SINGLECOLL       0x024c
 146#define CPMAC_STATS_TX_MULTICOLL        0x0250
 147#define CPMAC_STATS_TX_EXCESSCOLL       0x0254
 148#define CPMAC_STATS_TX_LATECOLL         0x0258
 149#define CPMAC_STATS_TX_UNDERRUN         0x025c
 150#define CPMAC_STATS_TX_CARRIERSENSE     0x0260
 151#define CPMAC_STATS_TX_OCTETS           0x0264
 152
 153#define cpmac_read(base, reg)           (readl((void __iomem *)(base) + (reg)))
 154#define cpmac_write(base, reg, val)     (writel(val, (void __iomem *)(base) + \
 155                                                (reg)))
 156
 157/* MDIO bus */
 158#define CPMAC_MDIO_VERSION              0x0000
 159#define CPMAC_MDIO_CONTROL              0x0004
 160# define MDIOC_IDLE                     0x80000000
 161# define MDIOC_ENABLE                   0x40000000
 162# define MDIOC_PREAMBLE                 0x00100000
 163# define MDIOC_FAULT                    0x00080000
 164# define MDIOC_FAULTDETECT              0x00040000
 165# define MDIOC_INTTEST                  0x00020000
 166# define MDIOC_CLKDIV(div)              ((div) & 0xff)
 167#define CPMAC_MDIO_ALIVE                0x0008
 168#define CPMAC_MDIO_LINK                 0x000c
 169#define CPMAC_MDIO_ACCESS(channel)      (0x0080 + (channel) * 8)
 170# define MDIO_BUSY                      0x80000000
 171# define MDIO_WRITE                     0x40000000
 172# define MDIO_REG(reg)                  (((reg) & 0x1f) << 21)
 173# define MDIO_PHY(phy)                  (((phy) & 0x1f) << 16)
 174# define MDIO_DATA(data)                ((data) & 0xffff)
 175#define CPMAC_MDIO_PHYSEL(channel)      (0x0084 + (channel) * 8)
 176# define PHYSEL_LINKSEL                 0x00000040
 177# define PHYSEL_LINKINT                 0x00000020
 178
 179struct cpmac_desc {
 180        u32 hw_next;
 181        u32 hw_data;
 182        u16 buflen;
 183        u16 bufflags;
 184        u16 datalen;
 185        u16 dataflags;
 186#define CPMAC_SOP                       0x8000
 187#define CPMAC_EOP                       0x4000
 188#define CPMAC_OWN                       0x2000
 189#define CPMAC_EOQ                       0x1000
 190        struct sk_buff *skb;
 191        struct cpmac_desc *next;
 192        struct cpmac_desc *prev;
 193        dma_addr_t mapping;
 194        dma_addr_t data_mapping;
 195};
 196
 197struct cpmac_priv {
 198        spinlock_t lock;
 199        spinlock_t rx_lock;
 200        struct cpmac_desc *rx_head;
 201        int ring_size;
 202        struct cpmac_desc *desc_ring;
 203        dma_addr_t dma_ring;
 204        void __iomem *regs;
 205        struct mii_bus *mii_bus;
 206        struct phy_device *phy;
 207        char phy_name[MII_BUS_ID_SIZE + 3];
 208        int oldlink, oldspeed, oldduplex;
 209        u32 msg_enable;
 210        struct net_device *dev;
 211        struct work_struct reset_work;
 212        struct platform_device *pdev;
 213        struct napi_struct napi;
 214        atomic_t reset_pending;
 215};
 216
 217static irqreturn_t cpmac_irq(int, void *);
 218static void cpmac_hw_start(struct net_device *dev);
 219static void cpmac_hw_stop(struct net_device *dev);
 220static int cpmac_stop(struct net_device *dev);
 221static int cpmac_open(struct net_device *dev);
 222
 223static void cpmac_dump_regs(struct net_device *dev)
 224{
 225        int i;
 226        struct cpmac_priv *priv = netdev_priv(dev);
 227        for (i = 0; i < CPMAC_REG_END; i += 4) {
 228                if (i % 16 == 0) {
 229                        if (i)
 230                                pr_cont("\n");
 231                        printk(KERN_DEBUG "%s: reg[%p]:", dev->name,
 232                               priv->regs + i);
 233                }
 234                printk(" %08x", cpmac_read(priv->regs, i));
 235        }
 236        printk("\n");
 237}
 238
 239static void cpmac_dump_desc(struct net_device *dev, struct cpmac_desc *desc)
 240{
 241        int i;
 242        printk(KERN_DEBUG "%s: desc[%p]:", dev->name, desc);
 243        for (i = 0; i < sizeof(*desc) / 4; i++)
 244                printk(" %08x", ((u32 *)desc)[i]);
 245        printk("\n");
 246}
 247
 248static void cpmac_dump_all_desc(struct net_device *dev)
 249{
 250        struct cpmac_priv *priv = netdev_priv(dev);
 251        struct cpmac_desc *dump = priv->rx_head;
 252        do {
 253                cpmac_dump_desc(dev, dump);
 254                dump = dump->next;
 255        } while (dump != priv->rx_head);
 256}
 257
 258static void cpmac_dump_skb(struct net_device *dev, struct sk_buff *skb)
 259{
 260        int i;
 261        printk(KERN_DEBUG "%s: skb 0x%p, len=%d\n", dev->name, skb, skb->len);
 262        for (i = 0; i < skb->len; i++) {
 263                if (i % 16 == 0) {
 264                        if (i)
 265                                pr_cont("\n");
 266                        printk(KERN_DEBUG "%s: data[%p]:", dev->name,
 267                               skb->data + i);
 268                }
 269                printk(" %02x", ((u8 *)skb->data)[i]);
 270        }
 271        printk("\n");
 272}
 273
 274static int cpmac_mdio_read(struct mii_bus *bus, int phy_id, int reg)
 275{
 276        u32 val;
 277
 278        while (cpmac_read(bus->priv, CPMAC_MDIO_ACCESS(0)) & MDIO_BUSY)
 279                cpu_relax();
 280        cpmac_write(bus->priv, CPMAC_MDIO_ACCESS(0), MDIO_BUSY | MDIO_REG(reg) |
 281                    MDIO_PHY(phy_id));
 282        while ((val = cpmac_read(bus->priv, CPMAC_MDIO_ACCESS(0))) & MDIO_BUSY)
 283                cpu_relax();
 284        return MDIO_DATA(val);
 285}
 286
 287static int cpmac_mdio_write(struct mii_bus *bus, int phy_id,
 288                            int reg, u16 val)
 289{
 290        while (cpmac_read(bus->priv, CPMAC_MDIO_ACCESS(0)) & MDIO_BUSY)
 291                cpu_relax();
 292        cpmac_write(bus->priv, CPMAC_MDIO_ACCESS(0), MDIO_BUSY | MDIO_WRITE |
 293                    MDIO_REG(reg) | MDIO_PHY(phy_id) | MDIO_DATA(val));
 294        return 0;
 295}
 296
 297static int cpmac_mdio_reset(struct mii_bus *bus)
 298{
 299        struct clk *cpmac_clk;
 300
 301        cpmac_clk = clk_get(&bus->dev, "cpmac");
 302        if (IS_ERR(cpmac_clk)) {
 303                printk(KERN_ERR "unable to get cpmac clock\n");
 304                return -1;
 305        }
 306        ar7_device_reset(AR7_RESET_BIT_MDIO);
 307        cpmac_write(bus->priv, CPMAC_MDIO_CONTROL, MDIOC_ENABLE |
 308                    MDIOC_CLKDIV(clk_get_rate(cpmac_clk) / 2200000 - 1));
 309        return 0;
 310}
 311
 312static int mii_irqs[PHY_MAX_ADDR] = { PHY_POLL, };
 313
 314static struct mii_bus *cpmac_mii;
 315
 316static int cpmac_config(struct net_device *dev, struct ifmap *map)
 317{
 318        if (dev->flags & IFF_UP)
 319                return -EBUSY;
 320
 321        /* Don't allow changing the I/O address */
 322        if (map->base_addr != dev->base_addr)
 323                return -EOPNOTSUPP;
 324
 325        /* ignore other fields */
 326        return 0;
 327}
 328
 329static void cpmac_set_multicast_list(struct net_device *dev)
 330{
 331        struct netdev_hw_addr *ha;
 332        u8 tmp;
 333        u32 mbp, bit, hash[2] = { 0, };
 334        struct cpmac_priv *priv = netdev_priv(dev);
 335
 336        mbp = cpmac_read(priv->regs, CPMAC_MBP);
 337        if (dev->flags & IFF_PROMISC) {
 338                cpmac_write(priv->regs, CPMAC_MBP, (mbp & ~MBP_PROMISCCHAN(0)) |
 339                            MBP_RXPROMISC);
 340        } else {
 341                cpmac_write(priv->regs, CPMAC_MBP, mbp & ~MBP_RXPROMISC);
 342                if (dev->flags & IFF_ALLMULTI) {
 343                        /* enable all multicast mode */
 344                        cpmac_write(priv->regs, CPMAC_MAC_HASH_LO, 0xffffffff);
 345                        cpmac_write(priv->regs, CPMAC_MAC_HASH_HI, 0xffffffff);
 346                } else {
 347                        /*
 348                         * cpmac uses some strange mac address hashing
 349                         * (not crc32)
 350                         */
 351                        netdev_for_each_mc_addr(ha, dev) {
 352                                bit = 0;
 353                                tmp = ha->addr[0];
 354                                bit  ^= (tmp >> 2) ^ (tmp << 4);
 355                                tmp = ha->addr[1];
 356                                bit  ^= (tmp >> 4) ^ (tmp << 2);
 357                                tmp = ha->addr[2];
 358                                bit  ^= (tmp >> 6) ^ tmp;
 359                                tmp = ha->addr[3];
 360                                bit  ^= (tmp >> 2) ^ (tmp << 4);
 361                                tmp = ha->addr[4];
 362                                bit  ^= (tmp >> 4) ^ (tmp << 2);
 363                                tmp = ha->addr[5];
 364                                bit  ^= (tmp >> 6) ^ tmp;
 365                                bit &= 0x3f;
 366                                hash[bit / 32] |= 1 << (bit % 32);
 367                        }
 368
 369                        cpmac_write(priv->regs, CPMAC_MAC_HASH_LO, hash[0]);
 370                        cpmac_write(priv->regs, CPMAC_MAC_HASH_HI, hash[1]);
 371                }
 372        }
 373}
 374
 375static struct sk_buff *cpmac_rx_one(struct cpmac_priv *priv,
 376                                    struct cpmac_desc *desc)
 377{
 378        struct sk_buff *skb, *result = NULL;
 379
 380        if (unlikely(netif_msg_hw(priv)))
 381                cpmac_dump_desc(priv->dev, desc);
 382        cpmac_write(priv->regs, CPMAC_RX_ACK(0), (u32)desc->mapping);
 383        if (unlikely(!desc->datalen)) {
 384                if (netif_msg_rx_err(priv) && net_ratelimit())
 385                        printk(KERN_WARNING "%s: rx: spurious interrupt\n",
 386                               priv->dev->name);
 387                return NULL;
 388        }
 389
 390        skb = netdev_alloc_skb_ip_align(priv->dev, CPMAC_SKB_SIZE);
 391        if (likely(skb)) {
 392                skb_put(desc->skb, desc->datalen);
 393                desc->skb->protocol = eth_type_trans(desc->skb, priv->dev);
 394                skb_checksum_none_assert(desc->skb);
 395                priv->dev->stats.rx_packets++;
 396                priv->dev->stats.rx_bytes += desc->datalen;
 397                result = desc->skb;
 398                dma_unmap_single(&priv->dev->dev, desc->data_mapping,
 399                                 CPMAC_SKB_SIZE, DMA_FROM_DEVICE);
 400                desc->skb = skb;
 401                desc->data_mapping = dma_map_single(&priv->dev->dev, skb->data,
 402                                                    CPMAC_SKB_SIZE,
 403                                                    DMA_FROM_DEVICE);
 404                desc->hw_data = (u32)desc->data_mapping;
 405                if (unlikely(netif_msg_pktdata(priv))) {
 406                        printk(KERN_DEBUG "%s: received packet:\n",
 407                               priv->dev->name);
 408                        cpmac_dump_skb(priv->dev, result);
 409                }
 410        } else {
 411                if (netif_msg_rx_err(priv) && net_ratelimit())
 412                        printk(KERN_WARNING
 413                               "%s: low on skbs, dropping packet\n",
 414                               priv->dev->name);
 415                priv->dev->stats.rx_dropped++;
 416        }
 417
 418        desc->buflen = CPMAC_SKB_SIZE;
 419        desc->dataflags = CPMAC_OWN;
 420
 421        return result;
 422}
 423
 424static int cpmac_poll(struct napi_struct *napi, int budget)
 425{
 426        struct sk_buff *skb;
 427        struct cpmac_desc *desc, *restart;
 428        struct cpmac_priv *priv = container_of(napi, struct cpmac_priv, napi);
 429        int received = 0, processed = 0;
 430
 431        spin_lock(&priv->rx_lock);
 432        if (unlikely(!priv->rx_head)) {
 433                if (netif_msg_rx_err(priv) && net_ratelimit())
 434                        printk(KERN_WARNING "%s: rx: polling, but no queue\n",
 435                               priv->dev->name);
 436                spin_unlock(&priv->rx_lock);
 437                napi_complete(napi);
 438                return 0;
 439        }
 440
 441        desc = priv->rx_head;
 442        restart = NULL;
 443        while (((desc->dataflags & CPMAC_OWN) == 0) && (received < budget)) {
 444                processed++;
 445
 446                if ((desc->dataflags & CPMAC_EOQ) != 0) {
 447                        /* The last update to eoq->hw_next didn't happen
 448                        * soon enough, and the receiver stopped here.
 449                        *Remember this descriptor so we can restart
 450                        * the receiver after freeing some space.
 451                        */
 452                        if (unlikely(restart)) {
 453                                if (netif_msg_rx_err(priv))
 454                                        printk(KERN_ERR "%s: poll found a"
 455                                                " duplicate EOQ: %p and %p\n",
 456                                                priv->dev->name, restart, desc);
 457                                goto fatal_error;
 458                        }
 459
 460                        restart = desc->next;
 461                }
 462
 463                skb = cpmac_rx_one(priv, desc);
 464                if (likely(skb)) {
 465                        netif_receive_skb(skb);
 466                        received++;
 467                }
 468                desc = desc->next;
 469        }
 470
 471        if (desc != priv->rx_head) {
 472                /* We freed some buffers, but not the whole ring,
 473                 * add what we did free to the rx list */
 474                desc->prev->hw_next = (u32)0;
 475                priv->rx_head->prev->hw_next = priv->rx_head->mapping;
 476        }
 477
 478        /* Optimization: If we did not actually process an EOQ (perhaps because
 479         * of quota limits), check to see if the tail of the queue has EOQ set.
 480        * We should immediately restart in that case so that the receiver can
 481        * restart and run in parallel with more packet processing.
 482        * This lets us handle slightly larger bursts before running
 483        * out of ring space (assuming dev->weight < ring_size) */
 484
 485        if (!restart &&
 486             (priv->rx_head->prev->dataflags & (CPMAC_OWN|CPMAC_EOQ))
 487                    == CPMAC_EOQ &&
 488             (priv->rx_head->dataflags & CPMAC_OWN) != 0) {
 489                /* reset EOQ so the poll loop (above) doesn't try to
 490                * restart this when it eventually gets to this descriptor.
 491                */
 492                priv->rx_head->prev->dataflags &= ~CPMAC_EOQ;
 493                restart = priv->rx_head;
 494        }
 495
 496        if (restart) {
 497                priv->dev->stats.rx_errors++;
 498                priv->dev->stats.rx_fifo_errors++;
 499                if (netif_msg_rx_err(priv) && net_ratelimit())
 500                        printk(KERN_WARNING "%s: rx dma ring overrun\n",
 501                               priv->dev->name);
 502
 503                if (unlikely((restart->dataflags & CPMAC_OWN) == 0)) {
 504                        if (netif_msg_drv(priv))
 505                                printk(KERN_ERR "%s: cpmac_poll is trying to "
 506                                        "restart rx from a descriptor that's "
 507                                        "not free: %p\n",
 508                                        priv->dev->name, restart);
 509                        goto fatal_error;
 510                }
 511
 512                cpmac_write(priv->regs, CPMAC_RX_PTR(0), restart->mapping);
 513        }
 514
 515        priv->rx_head = desc;
 516        spin_unlock(&priv->rx_lock);
 517        if (unlikely(netif_msg_rx_status(priv)))
 518                printk(KERN_DEBUG "%s: poll processed %d packets\n",
 519                       priv->dev->name, received);
 520        if (processed == 0) {
 521                /* we ran out of packets to read,
 522                 * revert to interrupt-driven mode */
 523                napi_complete(napi);
 524                cpmac_write(priv->regs, CPMAC_RX_INT_ENABLE, 1);
 525                return 0;
 526        }
 527
 528        return 1;
 529
 530fatal_error:
 531        /* Something went horribly wrong.
 532         * Reset hardware to try to recover rather than wedging. */
 533
 534        if (netif_msg_drv(priv)) {
 535                printk(KERN_ERR "%s: cpmac_poll is confused. "
 536                                "Resetting hardware\n", priv->dev->name);
 537                cpmac_dump_all_desc(priv->dev);
 538                printk(KERN_DEBUG "%s: RX_PTR(0)=0x%08x RX_ACK(0)=0x%08x\n",
 539                        priv->dev->name,
 540                        cpmac_read(priv->regs, CPMAC_RX_PTR(0)),
 541                        cpmac_read(priv->regs, CPMAC_RX_ACK(0)));
 542        }
 543
 544        spin_unlock(&priv->rx_lock);
 545        napi_complete(napi);
 546        netif_tx_stop_all_queues(priv->dev);
 547        napi_disable(&priv->napi);
 548
 549        atomic_inc(&priv->reset_pending);
 550        cpmac_hw_stop(priv->dev);
 551        if (!schedule_work(&priv->reset_work))
 552                atomic_dec(&priv->reset_pending);
 553        return 0;
 554
 555}
 556
 557static int cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev)
 558{
 559        int queue, len;
 560        struct cpmac_desc *desc;
 561        struct cpmac_priv *priv = netdev_priv(dev);
 562
 563        if (unlikely(atomic_read(&priv->reset_pending)))
 564                return NETDEV_TX_BUSY;
 565
 566        if (unlikely(skb_padto(skb, ETH_ZLEN)))
 567                return NETDEV_TX_OK;
 568
 569        len = max(skb->len, ETH_ZLEN);
 570        queue = skb_get_queue_mapping(skb);
 571        netif_stop_subqueue(dev, queue);
 572
 573        desc = &priv->desc_ring[queue];
 574        if (unlikely(desc->dataflags & CPMAC_OWN)) {
 575                if (netif_msg_tx_err(priv) && net_ratelimit())
 576                        printk(KERN_WARNING "%s: tx dma ring full\n",
 577                               dev->name);
 578                return NETDEV_TX_BUSY;
 579        }
 580
 581        spin_lock(&priv->lock);
 582        spin_unlock(&priv->lock);
 583        desc->dataflags = CPMAC_SOP | CPMAC_EOP | CPMAC_OWN;
 584        desc->skb = skb;
 585        desc->data_mapping = dma_map_single(&dev->dev, skb->data, len,
 586                                            DMA_TO_DEVICE);
 587        desc->hw_data = (u32)desc->data_mapping;
 588        desc->datalen = len;
 589        desc->buflen = len;
 590        if (unlikely(netif_msg_tx_queued(priv)))
 591                printk(KERN_DEBUG "%s: sending 0x%p, len=%d\n", dev->name, skb,
 592                       skb->len);
 593        if (unlikely(netif_msg_hw(priv)))
 594                cpmac_dump_desc(dev, desc);
 595        if (unlikely(netif_msg_pktdata(priv)))
 596                cpmac_dump_skb(dev, skb);
 597        cpmac_write(priv->regs, CPMAC_TX_PTR(queue), (u32)desc->mapping);
 598
 599        return NETDEV_TX_OK;
 600}
 601
 602static void cpmac_end_xmit(struct net_device *dev, int queue)
 603{
 604        struct cpmac_desc *desc;
 605        struct cpmac_priv *priv = netdev_priv(dev);
 606
 607        desc = &priv->desc_ring[queue];
 608        cpmac_write(priv->regs, CPMAC_TX_ACK(queue), (u32)desc->mapping);
 609        if (likely(desc->skb)) {
 610                spin_lock(&priv->lock);
 611                dev->stats.tx_packets++;
 612                dev->stats.tx_bytes += desc->skb->len;
 613                spin_unlock(&priv->lock);
 614                dma_unmap_single(&dev->dev, desc->data_mapping, desc->skb->len,
 615                                 DMA_TO_DEVICE);
 616
 617                if (unlikely(netif_msg_tx_done(priv)))
 618                        printk(KERN_DEBUG "%s: sent 0x%p, len=%d\n", dev->name,
 619                               desc->skb, desc->skb->len);
 620
 621                dev_kfree_skb_irq(desc->skb);
 622                desc->skb = NULL;
 623                if (__netif_subqueue_stopped(dev, queue))
 624                        netif_wake_subqueue(dev, queue);
 625        } else {
 626                if (netif_msg_tx_err(priv) && net_ratelimit())
 627                        printk(KERN_WARNING
 628                               "%s: end_xmit: spurious interrupt\n", dev->name);
 629                if (__netif_subqueue_stopped(dev, queue))
 630                        netif_wake_subqueue(dev, queue);
 631        }
 632}
 633
 634static void cpmac_hw_stop(struct net_device *dev)
 635{
 636        int i;
 637        struct cpmac_priv *priv = netdev_priv(dev);
 638        struct plat_cpmac_data *pdata = priv->pdev->dev.platform_data;
 639
 640        ar7_device_reset(pdata->reset_bit);
 641        cpmac_write(priv->regs, CPMAC_RX_CONTROL,
 642                    cpmac_read(priv->regs, CPMAC_RX_CONTROL) & ~1);
 643        cpmac_write(priv->regs, CPMAC_TX_CONTROL,
 644                    cpmac_read(priv->regs, CPMAC_TX_CONTROL) & ~1);
 645        for (i = 0; i < 8; i++) {
 646                cpmac_write(priv->regs, CPMAC_TX_PTR(i), 0);
 647                cpmac_write(priv->regs, CPMAC_RX_PTR(i), 0);
 648        }
 649        cpmac_write(priv->regs, CPMAC_UNICAST_CLEAR, 0xff);
 650        cpmac_write(priv->regs, CPMAC_RX_INT_CLEAR, 0xff);
 651        cpmac_write(priv->regs, CPMAC_TX_INT_CLEAR, 0xff);
 652        cpmac_write(priv->regs, CPMAC_MAC_INT_CLEAR, 0xff);
 653        cpmac_write(priv->regs, CPMAC_MAC_CONTROL,
 654                    cpmac_read(priv->regs, CPMAC_MAC_CONTROL) & ~MAC_MII);
 655}
 656
 657static void cpmac_hw_start(struct net_device *dev)
 658{
 659        int i;
 660        struct cpmac_priv *priv = netdev_priv(dev);
 661        struct plat_cpmac_data *pdata = priv->pdev->dev.platform_data;
 662
 663        ar7_device_reset(pdata->reset_bit);
 664        for (i = 0; i < 8; i++) {
 665                cpmac_write(priv->regs, CPMAC_TX_PTR(i), 0);
 666                cpmac_write(priv->regs, CPMAC_RX_PTR(i), 0);
 667        }
 668        cpmac_write(priv->regs, CPMAC_RX_PTR(0), priv->rx_head->mapping);
 669
 670        cpmac_write(priv->regs, CPMAC_MBP, MBP_RXSHORT | MBP_RXBCAST |
 671                    MBP_RXMCAST);
 672        cpmac_write(priv->regs, CPMAC_BUFFER_OFFSET, 0);
 673        for (i = 0; i < 8; i++)
 674                cpmac_write(priv->regs, CPMAC_MAC_ADDR_LO(i), dev->dev_addr[5]);
 675        cpmac_write(priv->regs, CPMAC_MAC_ADDR_MID, dev->dev_addr[4]);
 676        cpmac_write(priv->regs, CPMAC_MAC_ADDR_HI, dev->dev_addr[0] |
 677                    (dev->dev_addr[1] << 8) | (dev->dev_addr[2] << 16) |
 678                    (dev->dev_addr[3] << 24));
 679        cpmac_write(priv->regs, CPMAC_MAX_LENGTH, CPMAC_SKB_SIZE);
 680        cpmac_write(priv->regs, CPMAC_UNICAST_CLEAR, 0xff);
 681        cpmac_write(priv->regs, CPMAC_RX_INT_CLEAR, 0xff);
 682        cpmac_write(priv->regs, CPMAC_TX_INT_CLEAR, 0xff);
 683        cpmac_write(priv->regs, CPMAC_MAC_INT_CLEAR, 0xff);
 684        cpmac_write(priv->regs, CPMAC_UNICAST_ENABLE, 1);
 685        cpmac_write(priv->regs, CPMAC_RX_INT_ENABLE, 1);
 686        cpmac_write(priv->regs, CPMAC_TX_INT_ENABLE, 0xff);
 687        cpmac_write(priv->regs, CPMAC_MAC_INT_ENABLE, 3);
 688
 689        cpmac_write(priv->regs, CPMAC_RX_CONTROL,
 690                    cpmac_read(priv->regs, CPMAC_RX_CONTROL) | 1);
 691        cpmac_write(priv->regs, CPMAC_TX_CONTROL,
 692                    cpmac_read(priv->regs, CPMAC_TX_CONTROL) | 1);
 693        cpmac_write(priv->regs, CPMAC_MAC_CONTROL,
 694                    cpmac_read(priv->regs, CPMAC_MAC_CONTROL) | MAC_MII |
 695                    MAC_FDX);
 696}
 697
 698static void cpmac_clear_rx(struct net_device *dev)
 699{
 700        struct cpmac_priv *priv = netdev_priv(dev);
 701        struct cpmac_desc *desc;
 702        int i;
 703        if (unlikely(!priv->rx_head))
 704                return;
 705        desc = priv->rx_head;
 706        for (i = 0; i < priv->ring_size; i++) {
 707                if ((desc->dataflags & CPMAC_OWN) == 0) {
 708                        if (netif_msg_rx_err(priv) && net_ratelimit())
 709                                printk(KERN_WARNING "%s: packet dropped\n",
 710                                       dev->name);
 711                        if (unlikely(netif_msg_hw(priv)))
 712                                cpmac_dump_desc(dev, desc);
 713                        desc->dataflags = CPMAC_OWN;
 714                        dev->stats.rx_dropped++;
 715                }
 716                desc->hw_next = desc->next->mapping;
 717                desc = desc->next;
 718        }
 719        priv->rx_head->prev->hw_next = 0;
 720}
 721
 722static void cpmac_clear_tx(struct net_device *dev)
 723{
 724        struct cpmac_priv *priv = netdev_priv(dev);
 725        int i;
 726        if (unlikely(!priv->desc_ring))
 727                return;
 728        for (i = 0; i < CPMAC_QUEUES; i++) {
 729                priv->desc_ring[i].dataflags = 0;
 730                if (priv->desc_ring[i].skb) {
 731                        dev_kfree_skb_any(priv->desc_ring[i].skb);
 732                        priv->desc_ring[i].skb = NULL;
 733                }
 734        }
 735}
 736
 737static void cpmac_hw_error(struct work_struct *work)
 738{
 739        struct cpmac_priv *priv =
 740                container_of(work, struct cpmac_priv, reset_work);
 741
 742        spin_lock(&priv->rx_lock);
 743        cpmac_clear_rx(priv->dev);
 744        spin_unlock(&priv->rx_lock);
 745        cpmac_clear_tx(priv->dev);
 746        cpmac_hw_start(priv->dev);
 747        barrier();
 748        atomic_dec(&priv->reset_pending);
 749
 750        netif_tx_wake_all_queues(priv->dev);
 751        cpmac_write(priv->regs, CPMAC_MAC_INT_ENABLE, 3);
 752}
 753
 754static void cpmac_check_status(struct net_device *dev)
 755{
 756        struct cpmac_priv *priv = netdev_priv(dev);
 757
 758        u32 macstatus = cpmac_read(priv->regs, CPMAC_MAC_STATUS);
 759        int rx_channel = (macstatus >> 8) & 7;
 760        int rx_code = (macstatus >> 12) & 15;
 761        int tx_channel = (macstatus >> 16) & 7;
 762        int tx_code = (macstatus >> 20) & 15;
 763
 764        if (rx_code || tx_code) {
 765                if (netif_msg_drv(priv) && net_ratelimit()) {
 766                        /* Can't find any documentation on what these
 767                         *error codes actually are. So just log them and hope..
 768                         */
 769                        if (rx_code)
 770                                printk(KERN_WARNING "%s: host error %d on rx "
 771                                     "channel %d (macstatus %08x), resetting\n",
 772                                     dev->name, rx_code, rx_channel, macstatus);
 773                        if (tx_code)
 774                                printk(KERN_WARNING "%s: host error %d on tx "
 775                                     "channel %d (macstatus %08x), resetting\n",
 776                                     dev->name, tx_code, tx_channel, macstatus);
 777                }
 778
 779                netif_tx_stop_all_queues(dev);
 780                cpmac_hw_stop(dev);
 781                if (schedule_work(&priv->reset_work))
 782                        atomic_inc(&priv->reset_pending);
 783                if (unlikely(netif_msg_hw(priv)))
 784                        cpmac_dump_regs(dev);
 785        }
 786        cpmac_write(priv->regs, CPMAC_MAC_INT_CLEAR, 0xff);
 787}
 788
 789static irqreturn_t cpmac_irq(int irq, void *dev_id)
 790{
 791        struct net_device *dev = dev_id;
 792        struct cpmac_priv *priv;
 793        int queue;
 794        u32 status;
 795
 796        priv = netdev_priv(dev);
 797
 798        status = cpmac_read(priv->regs, CPMAC_MAC_INT_VECTOR);
 799
 800        if (unlikely(netif_msg_intr(priv)))
 801                printk(KERN_DEBUG "%s: interrupt status: 0x%08x\n", dev->name,
 802                       status);
 803
 804        if (status & MAC_INT_TX)
 805                cpmac_end_xmit(dev, (status & 7));
 806
 807        if (status & MAC_INT_RX) {
 808                queue = (status >> 8) & 7;
 809                if (napi_schedule_prep(&priv->napi)) {
 810                        cpmac_write(priv->regs, CPMAC_RX_INT_CLEAR, 1 << queue);
 811                        __napi_schedule(&priv->napi);
 812                }
 813        }
 814
 815        cpmac_write(priv->regs, CPMAC_MAC_EOI_VECTOR, 0);
 816
 817        if (unlikely(status & (MAC_INT_HOST | MAC_INT_STATUS)))
 818                cpmac_check_status(dev);
 819
 820        return IRQ_HANDLED;
 821}
 822
 823static void cpmac_tx_timeout(struct net_device *dev)
 824{
 825        struct cpmac_priv *priv = netdev_priv(dev);
 826
 827        spin_lock(&priv->lock);
 828        dev->stats.tx_errors++;
 829        spin_unlock(&priv->lock);
 830        if (netif_msg_tx_err(priv) && net_ratelimit())
 831                printk(KERN_WARNING "%s: transmit timeout\n", dev->name);
 832
 833        atomic_inc(&priv->reset_pending);
 834        barrier();
 835        cpmac_clear_tx(dev);
 836        barrier();
 837        atomic_dec(&priv->reset_pending);
 838
 839        netif_tx_wake_all_queues(priv->dev);
 840}
 841
 842static int cpmac_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 843{
 844        struct cpmac_priv *priv = netdev_priv(dev);
 845        if (!(netif_running(dev)))
 846                return -EINVAL;
 847        if (!priv->phy)
 848                return -EINVAL;
 849
 850        return phy_mii_ioctl(priv->phy, ifr, cmd);
 851}
 852
 853static int cpmac_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 854{
 855        struct cpmac_priv *priv = netdev_priv(dev);
 856
 857        if (priv->phy)
 858                return phy_ethtool_gset(priv->phy, cmd);
 859
 860        return -EINVAL;
 861}
 862
 863static int cpmac_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 864{
 865        struct cpmac_priv *priv = netdev_priv(dev);
 866
 867        if (!capable(CAP_NET_ADMIN))
 868                return -EPERM;
 869
 870        if (priv->phy)
 871                return phy_ethtool_sset(priv->phy, cmd);
 872
 873        return -EINVAL;
 874}
 875
 876static void cpmac_get_ringparam(struct net_device *dev,
 877                                                struct ethtool_ringparam *ring)
 878{
 879        struct cpmac_priv *priv = netdev_priv(dev);
 880
 881        ring->rx_max_pending = 1024;
 882        ring->rx_mini_max_pending = 1;
 883        ring->rx_jumbo_max_pending = 1;
 884        ring->tx_max_pending = 1;
 885
 886        ring->rx_pending = priv->ring_size;
 887        ring->rx_mini_pending = 1;
 888        ring->rx_jumbo_pending = 1;
 889        ring->tx_pending = 1;
 890}
 891
 892static int cpmac_set_ringparam(struct net_device *dev,
 893                                                struct ethtool_ringparam *ring)
 894{
 895        struct cpmac_priv *priv = netdev_priv(dev);
 896
 897        if (netif_running(dev))
 898                return -EBUSY;
 899        priv->ring_size = ring->rx_pending;
 900        return 0;
 901}
 902
 903static void cpmac_get_drvinfo(struct net_device *dev,
 904                              struct ethtool_drvinfo *info)
 905{
 906        strcpy(info->driver, "cpmac");
 907        strcpy(info->version, CPMAC_VERSION);
 908        info->fw_version[0] = '\0';
 909        sprintf(info->bus_info, "%s", "cpmac");
 910        info->regdump_len = 0;
 911}
 912
 913static const struct ethtool_ops cpmac_ethtool_ops = {
 914        .get_settings = cpmac_get_settings,
 915        .set_settings = cpmac_set_settings,
 916        .get_drvinfo = cpmac_get_drvinfo,
 917        .get_link = ethtool_op_get_link,
 918        .get_ringparam = cpmac_get_ringparam,
 919        .set_ringparam = cpmac_set_ringparam,
 920};
 921
 922static void cpmac_adjust_link(struct net_device *dev)
 923{
 924        struct cpmac_priv *priv = netdev_priv(dev);
 925        int new_state = 0;
 926
 927        spin_lock(&priv->lock);
 928        if (priv->phy->link) {
 929                netif_tx_start_all_queues(dev);
 930                if (priv->phy->duplex != priv->oldduplex) {
 931                        new_state = 1;
 932                        priv->oldduplex = priv->phy->duplex;
 933                }
 934
 935                if (priv->phy->speed != priv->oldspeed) {
 936                        new_state = 1;
 937                        priv->oldspeed = priv->phy->speed;
 938                }
 939
 940                if (!priv->oldlink) {
 941                        new_state = 1;
 942                        priv->oldlink = 1;
 943                }
 944        } else if (priv->oldlink) {
 945                new_state = 1;
 946                priv->oldlink = 0;
 947                priv->oldspeed = 0;
 948                priv->oldduplex = -1;
 949        }
 950
 951        if (new_state && netif_msg_link(priv) && net_ratelimit())
 952                phy_print_status(priv->phy);
 953
 954        spin_unlock(&priv->lock);
 955}
 956
 957static int cpmac_open(struct net_device *dev)
 958{
 959        int i, size, res;
 960        struct cpmac_priv *priv = netdev_priv(dev);
 961        struct resource *mem;
 962        struct cpmac_desc *desc;
 963        struct sk_buff *skb;
 964
 965        mem = platform_get_resource_byname(priv->pdev, IORESOURCE_MEM, "regs");
 966        if (!request_mem_region(mem->start, resource_size(mem), dev->name)) {
 967                if (netif_msg_drv(priv))
 968                        printk(KERN_ERR "%s: failed to request registers\n",
 969                               dev->name);
 970                res = -ENXIO;
 971                goto fail_reserve;
 972        }
 973
 974        priv->regs = ioremap(mem->start, resource_size(mem));
 975        if (!priv->regs) {
 976                if (netif_msg_drv(priv))
 977                        printk(KERN_ERR "%s: failed to remap registers\n",
 978                               dev->name);
 979                res = -ENXIO;
 980                goto fail_remap;
 981        }
 982
 983        size = priv->ring_size + CPMAC_QUEUES;
 984        priv->desc_ring = dma_alloc_coherent(&dev->dev,
 985                                             sizeof(struct cpmac_desc) * size,
 986                                             &priv->dma_ring,
 987                                             GFP_KERNEL);
 988        if (!priv->desc_ring) {
 989                res = -ENOMEM;
 990                goto fail_alloc;
 991        }
 992
 993        for (i = 0; i < size; i++)
 994                priv->desc_ring[i].mapping = priv->dma_ring + sizeof(*desc) * i;
 995
 996        priv->rx_head = &priv->desc_ring[CPMAC_QUEUES];
 997        for (i = 0, desc = priv->rx_head; i < priv->ring_size; i++, desc++) {
 998                skb = netdev_alloc_skb_ip_align(dev, CPMAC_SKB_SIZE);
 999                if (unlikely(!skb)) {
1000                        res = -ENOMEM;
1001                        goto fail_desc;
1002                }
1003                desc->skb = skb;
1004                desc->data_mapping = dma_map_single(&dev->dev, skb->data,
1005                                                    CPMAC_SKB_SIZE,
1006                                                    DMA_FROM_DEVICE);
1007                desc->hw_data = (u32)desc->data_mapping;
1008                desc->buflen = CPMAC_SKB_SIZE;
1009                desc->dataflags = CPMAC_OWN;
1010                desc->next = &priv->rx_head[(i + 1) % priv->ring_size];
1011                desc->next->prev = desc;
1012                desc->hw_next = (u32)desc->next->mapping;
1013        }
1014
1015        priv->rx_head->prev->hw_next = (u32)0;
1016
1017        res = request_irq(dev->irq, cpmac_irq, IRQF_SHARED, dev->name, dev);
1018        if (res) {
1019                if (netif_msg_drv(priv))
1020                        printk(KERN_ERR "%s: failed to obtain irq\n",
1021                               dev->name);
1022                goto fail_irq;
1023        }
1024
1025        atomic_set(&priv->reset_pending, 0);
1026        INIT_WORK(&priv->reset_work, cpmac_hw_error);
1027        cpmac_hw_start(dev);
1028
1029        napi_enable(&priv->napi);
1030        priv->phy->state = PHY_CHANGELINK;
1031        phy_start(priv->phy);
1032
1033        return 0;
1034
1035fail_irq:
1036fail_desc:
1037        for (i = 0; i < priv->ring_size; i++) {
1038                if (priv->rx_head[i].skb) {
1039                        dma_unmap_single(&dev->dev,
1040                                         priv->rx_head[i].data_mapping,
1041                                         CPMAC_SKB_SIZE,
1042                                         DMA_FROM_DEVICE);
1043                        kfree_skb(priv->rx_head[i].skb);
1044                }
1045        }
1046fail_alloc:
1047        kfree(priv->desc_ring);
1048        iounmap(priv->regs);
1049
1050fail_remap:
1051        release_mem_region(mem->start, resource_size(mem));
1052
1053fail_reserve:
1054        return res;
1055}
1056
1057static int cpmac_stop(struct net_device *dev)
1058{
1059        int i;
1060        struct cpmac_priv *priv = netdev_priv(dev);
1061        struct resource *mem;
1062
1063        netif_tx_stop_all_queues(dev);
1064
1065        cancel_work_sync(&priv->reset_work);
1066        napi_disable(&priv->napi);
1067        phy_stop(priv->phy);
1068
1069        cpmac_hw_stop(dev);
1070
1071        for (i = 0; i < 8; i++)
1072                cpmac_write(priv->regs, CPMAC_TX_PTR(i), 0);
1073        cpmac_write(priv->regs, CPMAC_RX_PTR(0), 0);
1074        cpmac_write(priv->regs, CPMAC_MBP, 0);
1075
1076        free_irq(dev->irq, dev);
1077        iounmap(priv->regs);
1078        mem = platform_get_resource_byname(priv->pdev, IORESOURCE_MEM, "regs");
1079        release_mem_region(mem->start, resource_size(mem));
1080        priv->rx_head = &priv->desc_ring[CPMAC_QUEUES];
1081        for (i = 0; i < priv->ring_size; i++) {
1082                if (priv->rx_head[i].skb) {
1083                        dma_unmap_single(&dev->dev,
1084                                         priv->rx_head[i].data_mapping,
1085                                         CPMAC_SKB_SIZE,
1086                                         DMA_FROM_DEVICE);
1087                        kfree_skb(priv->rx_head[i].skb);
1088                }
1089        }
1090
1091        dma_free_coherent(&dev->dev, sizeof(struct cpmac_desc) *
1092                          (CPMAC_QUEUES + priv->ring_size),
1093                          priv->desc_ring, priv->dma_ring);
1094        return 0;
1095}
1096
1097static const struct net_device_ops cpmac_netdev_ops = {
1098        .ndo_open               = cpmac_open,
1099        .ndo_stop               = cpmac_stop,
1100        .ndo_start_xmit         = cpmac_start_xmit,
1101        .ndo_tx_timeout         = cpmac_tx_timeout,
1102        .ndo_set_multicast_list = cpmac_set_multicast_list,
1103        .ndo_do_ioctl           = cpmac_ioctl,
1104        .ndo_set_config         = cpmac_config,
1105        .ndo_change_mtu         = eth_change_mtu,
1106        .ndo_validate_addr      = eth_validate_addr,
1107        .ndo_set_mac_address    = eth_mac_addr,
1108};
1109
1110static int external_switch;
1111
1112static int __devinit cpmac_probe(struct platform_device *pdev)
1113{
1114        int rc, phy_id;
1115        char mdio_bus_id[MII_BUS_ID_SIZE];
1116        struct resource *mem;
1117        struct cpmac_priv *priv;
1118        struct net_device *dev;
1119        struct plat_cpmac_data *pdata;
1120
1121        pdata = pdev->dev.platform_data;
1122
1123        if (external_switch || dumb_switch) {
1124                strncpy(mdio_bus_id, "0", MII_BUS_ID_SIZE); /* fixed phys bus */
1125                phy_id = pdev->id;
1126        } else {
1127                for (phy_id = 0; phy_id < PHY_MAX_ADDR; phy_id++) {
1128                        if (!(pdata->phy_mask & (1 << phy_id)))
1129                                continue;
1130                        if (!cpmac_mii->phy_map[phy_id])
1131                                continue;
1132                        strncpy(mdio_bus_id, cpmac_mii->id, MII_BUS_ID_SIZE);
1133                        break;
1134                }
1135        }
1136
1137        if (phy_id == PHY_MAX_ADDR) {
1138                dev_err(&pdev->dev, "no PHY present, falling back "
1139                                        "to switch on MDIO bus 0\n");
1140                strncpy(mdio_bus_id, "0", MII_BUS_ID_SIZE); /* fixed phys bus */
1141                phy_id = pdev->id;
1142        }
1143
1144        dev = alloc_etherdev_mq(sizeof(*priv), CPMAC_QUEUES);
1145
1146        if (!dev) {
1147                printk(KERN_ERR "cpmac: Unable to allocate net_device\n");
1148                return -ENOMEM;
1149        }
1150
1151        platform_set_drvdata(pdev, dev);
1152        priv = netdev_priv(dev);
1153
1154        priv->pdev = pdev;
1155        mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
1156        if (!mem) {
1157                rc = -ENODEV;
1158                goto fail;
1159        }
1160
1161        dev->irq = platform_get_irq_byname(pdev, "irq");
1162
1163        dev->netdev_ops = &cpmac_netdev_ops;
1164        dev->ethtool_ops = &cpmac_ethtool_ops;
1165
1166        netif_napi_add(dev, &priv->napi, cpmac_poll, 64);
1167
1168        spin_lock_init(&priv->lock);
1169        spin_lock_init(&priv->rx_lock);
1170        priv->dev = dev;
1171        priv->ring_size = 64;
1172        priv->msg_enable = netif_msg_init(debug_level, 0xff);
1173        memcpy(dev->dev_addr, pdata->dev_addr, sizeof(pdata->dev_addr));
1174
1175        snprintf(priv->phy_name, MII_BUS_ID_SIZE, PHY_ID_FMT,
1176                                                mdio_bus_id, phy_id);
1177
1178        priv->phy = phy_connect(dev, priv->phy_name, cpmac_adjust_link, 0,
1179                                                PHY_INTERFACE_MODE_MII);
1180
1181        if (IS_ERR(priv->phy)) {
1182                if (netif_msg_drv(priv))
1183                        printk(KERN_ERR "%s: Could not attach to PHY\n",
1184                               dev->name);
1185                rc = PTR_ERR(priv->phy);
1186                goto fail;
1187        }
1188
1189        rc = register_netdev(dev);
1190        if (rc) {
1191                printk(KERN_ERR "cpmac: error %i registering device %s\n", rc,
1192                       dev->name);
1193                goto fail;
1194        }
1195
1196        if (netif_msg_probe(priv)) {
1197                printk(KERN_INFO
1198                       "cpmac: device %s (regs: %p, irq: %d, phy: %s, "
1199                       "mac: %pM)\n", dev->name, (void *)mem->start, dev->irq,
1200                       priv->phy_name, dev->dev_addr);
1201        }
1202        return 0;
1203
1204fail:
1205        free_netdev(dev);
1206        return rc;
1207}
1208
1209static int __devexit cpmac_remove(struct platform_device *pdev)
1210{
1211        struct net_device *dev = platform_get_drvdata(pdev);
1212        unregister_netdev(dev);
1213        free_netdev(dev);
1214        return 0;
1215}
1216
1217static struct platform_driver cpmac_driver = {
1218        .driver.name = "cpmac",
1219        .driver.owner = THIS_MODULE,
1220        .probe = cpmac_probe,
1221        .remove = __devexit_p(cpmac_remove),
1222};
1223
1224int __devinit cpmac_init(void)
1225{
1226        u32 mask;
1227        int i, res;
1228
1229        cpmac_mii = mdiobus_alloc();
1230        if (cpmac_mii == NULL)
1231                return -ENOMEM;
1232
1233        cpmac_mii->name = "cpmac-mii";
1234        cpmac_mii->read = cpmac_mdio_read;
1235        cpmac_mii->write = cpmac_mdio_write;
1236        cpmac_mii->reset = cpmac_mdio_reset;
1237        cpmac_mii->irq = mii_irqs;
1238
1239        cpmac_mii->priv = ioremap(AR7_REGS_MDIO, 256);
1240
1241        if (!cpmac_mii->priv) {
1242                printk(KERN_ERR "Can't ioremap mdio registers\n");
1243                res = -ENXIO;
1244                goto fail_alloc;
1245        }
1246
1247#warning FIXME: unhardcode gpio&reset bits
1248        ar7_gpio_disable(26);
1249        ar7_gpio_disable(27);
1250        ar7_device_reset(AR7_RESET_BIT_CPMAC_LO);
1251        ar7_device_reset(AR7_RESET_BIT_CPMAC_HI);
1252        ar7_device_reset(AR7_RESET_BIT_EPHY);
1253
1254        cpmac_mii->reset(cpmac_mii);
1255
1256        for (i = 0; i < 300; i++) {
1257                mask = cpmac_read(cpmac_mii->priv, CPMAC_MDIO_ALIVE);
1258                if (mask)
1259                        break;
1260                else
1261                        msleep(10);
1262        }
1263
1264        mask &= 0x7fffffff;
1265        if (mask & (mask - 1)) {
1266                external_switch = 1;
1267                mask = 0;
1268        }
1269
1270        cpmac_mii->phy_mask = ~(mask | 0x80000000);
1271        snprintf(cpmac_mii->id, MII_BUS_ID_SIZE, "1");
1272
1273        res = mdiobus_register(cpmac_mii);
1274        if (res)
1275                goto fail_mii;
1276
1277        res = platform_driver_register(&cpmac_driver);
1278        if (res)
1279                goto fail_cpmac;
1280
1281        return 0;
1282
1283fail_cpmac:
1284        mdiobus_unregister(cpmac_mii);
1285
1286fail_mii:
1287        iounmap(cpmac_mii->priv);
1288
1289fail_alloc:
1290        mdiobus_free(cpmac_mii);
1291
1292        return res;
1293}
1294
1295void __devexit cpmac_exit(void)
1296{
1297        platform_driver_unregister(&cpmac_driver);
1298        mdiobus_unregister(cpmac_mii);
1299        iounmap(cpmac_mii->priv);
1300        mdiobus_free(cpmac_mii);
1301}
1302
1303module_init(cpmac_init);
1304module_exit(cpmac_exit);
1305