linux/drivers/net/ethernet/ibm/emac/mal.c
<<
>>
Prefs
   1/*
   2 * drivers/net/ethernet/ibm/emac/mal.c
   3 *
   4 * Memory Access Layer (MAL) support
   5 *
   6 * Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
   7 *                <benh@kernel.crashing.org>
   8 *
   9 * Based on the arch/ppc version of the driver:
  10 *
  11 * Copyright (c) 2004, 2005 Zultys Technologies.
  12 * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
  13 *
  14 * Based on original work by
  15 *      Benjamin Herrenschmidt <benh@kernel.crashing.org>,
  16 *      David Gibson <hermes@gibson.dropbear.id.au>,
  17 *
  18 *      Armin Kuster <akuster@mvista.com>
  19 *      Copyright 2002 MontaVista Softare Inc.
  20 *
  21 * This program is free software; you can redistribute  it and/or modify it
  22 * under  the terms of  the GNU General  Public License as published by the
  23 * Free Software Foundation;  either version 2 of the  License, or (at your
  24 * option) any later version.
  25 *
  26 */
  27
  28#include <linux/delay.h>
  29#include <linux/slab.h>
  30
  31#include "core.h"
  32#include <asm/dcr-regs.h>
  33
  34static int mal_count;
  35
  36int mal_register_commac(struct mal_instance *mal, struct mal_commac *commac)
  37{
  38        unsigned long flags;
  39
  40        spin_lock_irqsave(&mal->lock, flags);
  41
  42        MAL_DBG(mal, "reg(%08x, %08x)" NL,
  43                commac->tx_chan_mask, commac->rx_chan_mask);
  44
  45        /* Don't let multiple commacs claim the same channel(s) */
  46        if ((mal->tx_chan_mask & commac->tx_chan_mask) ||
  47            (mal->rx_chan_mask & commac->rx_chan_mask)) {
  48                spin_unlock_irqrestore(&mal->lock, flags);
  49                printk(KERN_WARNING "mal%d: COMMAC channels conflict!\n",
  50                       mal->index);
  51                return -EBUSY;
  52        }
  53
  54        if (list_empty(&mal->list))
  55                napi_enable(&mal->napi);
  56        mal->tx_chan_mask |= commac->tx_chan_mask;
  57        mal->rx_chan_mask |= commac->rx_chan_mask;
  58        list_add(&commac->list, &mal->list);
  59
  60        spin_unlock_irqrestore(&mal->lock, flags);
  61
  62        return 0;
  63}
  64
  65void mal_unregister_commac(struct mal_instance  *mal,
  66                struct mal_commac *commac)
  67{
  68        unsigned long flags;
  69
  70        spin_lock_irqsave(&mal->lock, flags);
  71
  72        MAL_DBG(mal, "unreg(%08x, %08x)" NL,
  73                commac->tx_chan_mask, commac->rx_chan_mask);
  74
  75        mal->tx_chan_mask &= ~commac->tx_chan_mask;
  76        mal->rx_chan_mask &= ~commac->rx_chan_mask;
  77        list_del_init(&commac->list);
  78        if (list_empty(&mal->list))
  79                napi_disable(&mal->napi);
  80
  81        spin_unlock_irqrestore(&mal->lock, flags);
  82}
  83
  84int mal_set_rcbs(struct mal_instance *mal, int channel, unsigned long size)
  85{
  86        BUG_ON(channel < 0 || channel >= mal->num_rx_chans ||
  87               size > MAL_MAX_RX_SIZE);
  88
  89        MAL_DBG(mal, "set_rbcs(%d, %lu)" NL, channel, size);
  90
  91        if (size & 0xf) {
  92                printk(KERN_WARNING
  93                       "mal%d: incorrect RX size %lu for the channel %d\n",
  94                       mal->index, size, channel);
  95                return -EINVAL;
  96        }
  97
  98        set_mal_dcrn(mal, MAL_RCBS(channel), size >> 4);
  99        return 0;
 100}
 101
 102int mal_tx_bd_offset(struct mal_instance *mal, int channel)
 103{
 104        BUG_ON(channel < 0 || channel >= mal->num_tx_chans);
 105
 106        return channel * NUM_TX_BUFF;
 107}
 108
 109int mal_rx_bd_offset(struct mal_instance *mal, int channel)
 110{
 111        BUG_ON(channel < 0 || channel >= mal->num_rx_chans);
 112        return mal->num_tx_chans * NUM_TX_BUFF + channel * NUM_RX_BUFF;
 113}
 114
 115void mal_enable_tx_channel(struct mal_instance *mal, int channel)
 116{
 117        unsigned long flags;
 118
 119        spin_lock_irqsave(&mal->lock, flags);
 120
 121        MAL_DBG(mal, "enable_tx(%d)" NL, channel);
 122
 123        set_mal_dcrn(mal, MAL_TXCASR,
 124                     get_mal_dcrn(mal, MAL_TXCASR) | MAL_CHAN_MASK(channel));
 125
 126        spin_unlock_irqrestore(&mal->lock, flags);
 127}
 128
 129void mal_disable_tx_channel(struct mal_instance *mal, int channel)
 130{
 131        set_mal_dcrn(mal, MAL_TXCARR, MAL_CHAN_MASK(channel));
 132
 133        MAL_DBG(mal, "disable_tx(%d)" NL, channel);
 134}
 135
 136void mal_enable_rx_channel(struct mal_instance *mal, int channel)
 137{
 138        unsigned long flags;
 139
 140        /*
 141         * On some 4xx PPC's (e.g. 460EX/GT), the rx channel is a multiple
 142         * of 8, but enabling in MAL_RXCASR needs the divided by 8 value
 143         * for the bitmask
 144         */
 145        if (!(channel % 8))
 146                channel >>= 3;
 147
 148        spin_lock_irqsave(&mal->lock, flags);
 149
 150        MAL_DBG(mal, "enable_rx(%d)" NL, channel);
 151
 152        set_mal_dcrn(mal, MAL_RXCASR,
 153                     get_mal_dcrn(mal, MAL_RXCASR) | MAL_CHAN_MASK(channel));
 154
 155        spin_unlock_irqrestore(&mal->lock, flags);
 156}
 157
 158void mal_disable_rx_channel(struct mal_instance *mal, int channel)
 159{
 160        /*
 161         * On some 4xx PPC's (e.g. 460EX/GT), the rx channel is a multiple
 162         * of 8, but enabling in MAL_RXCASR needs the divided by 8 value
 163         * for the bitmask
 164         */
 165        if (!(channel % 8))
 166                channel >>= 3;
 167
 168        set_mal_dcrn(mal, MAL_RXCARR, MAL_CHAN_MASK(channel));
 169
 170        MAL_DBG(mal, "disable_rx(%d)" NL, channel);
 171}
 172
 173void mal_poll_add(struct mal_instance *mal, struct mal_commac *commac)
 174{
 175        unsigned long flags;
 176
 177        spin_lock_irqsave(&mal->lock, flags);
 178
 179        MAL_DBG(mal, "poll_add(%p)" NL, commac);
 180
 181        /* starts disabled */
 182        set_bit(MAL_COMMAC_POLL_DISABLED, &commac->flags);
 183
 184        list_add_tail(&commac->poll_list, &mal->poll_list);
 185
 186        spin_unlock_irqrestore(&mal->lock, flags);
 187}
 188
 189void mal_poll_del(struct mal_instance *mal, struct mal_commac *commac)
 190{
 191        unsigned long flags;
 192
 193        spin_lock_irqsave(&mal->lock, flags);
 194
 195        MAL_DBG(mal, "poll_del(%p)" NL, commac);
 196
 197        list_del(&commac->poll_list);
 198
 199        spin_unlock_irqrestore(&mal->lock, flags);
 200}
 201
 202/* synchronized by mal_poll() */
 203static inline void mal_enable_eob_irq(struct mal_instance *mal)
 204{
 205        MAL_DBG2(mal, "enable_irq" NL);
 206
 207        // XXX might want to cache MAL_CFG as the DCR read can be slooooow
 208        set_mal_dcrn(mal, MAL_CFG, get_mal_dcrn(mal, MAL_CFG) | MAL_CFG_EOPIE);
 209}
 210
 211/* synchronized by NAPI state */
 212static inline void mal_disable_eob_irq(struct mal_instance *mal)
 213{
 214        // XXX might want to cache MAL_CFG as the DCR read can be slooooow
 215        set_mal_dcrn(mal, MAL_CFG, get_mal_dcrn(mal, MAL_CFG) & ~MAL_CFG_EOPIE);
 216
 217        MAL_DBG2(mal, "disable_irq" NL);
 218}
 219
 220static irqreturn_t mal_serr(int irq, void *dev_instance)
 221{
 222        struct mal_instance *mal = dev_instance;
 223
 224        u32 esr = get_mal_dcrn(mal, MAL_ESR);
 225
 226        /* Clear the error status register */
 227        set_mal_dcrn(mal, MAL_ESR, esr);
 228
 229        MAL_DBG(mal, "SERR %08x" NL, esr);
 230
 231        if (esr & MAL_ESR_EVB) {
 232                if (esr & MAL_ESR_DE) {
 233                        /* We ignore Descriptor error,
 234                         * TXDE or RXDE interrupt will be generated anyway.
 235                         */
 236                        return IRQ_HANDLED;
 237                }
 238
 239                if (esr & MAL_ESR_PEIN) {
 240                        /* PLB error, it's probably buggy hardware or
 241                         * incorrect physical address in BD (i.e. bug)
 242                         */
 243                        if (net_ratelimit())
 244                                printk(KERN_ERR
 245                                       "mal%d: system error, "
 246                                       "PLB (ESR = 0x%08x)\n",
 247                                       mal->index, esr);
 248                        return IRQ_HANDLED;
 249                }
 250
 251                /* OPB error, it's probably buggy hardware or incorrect
 252                 * EBC setup
 253                 */
 254                if (net_ratelimit())
 255                        printk(KERN_ERR
 256                               "mal%d: system error, OPB (ESR = 0x%08x)\n",
 257                               mal->index, esr);
 258        }
 259        return IRQ_HANDLED;
 260}
 261
 262static inline void mal_schedule_poll(struct mal_instance *mal)
 263{
 264        if (likely(napi_schedule_prep(&mal->napi))) {
 265                MAL_DBG2(mal, "schedule_poll" NL);
 266                mal_disable_eob_irq(mal);
 267                __napi_schedule(&mal->napi);
 268        } else
 269                MAL_DBG2(mal, "already in poll" NL);
 270}
 271
 272static irqreturn_t mal_txeob(int irq, void *dev_instance)
 273{
 274        struct mal_instance *mal = dev_instance;
 275
 276        u32 r = get_mal_dcrn(mal, MAL_TXEOBISR);
 277
 278        MAL_DBG2(mal, "txeob %08x" NL, r);
 279
 280        mal_schedule_poll(mal);
 281        set_mal_dcrn(mal, MAL_TXEOBISR, r);
 282
 283#ifdef CONFIG_PPC_DCR_NATIVE
 284        if (mal_has_feature(mal, MAL_FTR_CLEAR_ICINTSTAT))
 285                mtdcri(SDR0, DCRN_SDR_ICINTSTAT,
 286                                (mfdcri(SDR0, DCRN_SDR_ICINTSTAT) | ICINTSTAT_ICTX));
 287#endif
 288
 289        return IRQ_HANDLED;
 290}
 291
 292static irqreturn_t mal_rxeob(int irq, void *dev_instance)
 293{
 294        struct mal_instance *mal = dev_instance;
 295
 296        u32 r = get_mal_dcrn(mal, MAL_RXEOBISR);
 297
 298        MAL_DBG2(mal, "rxeob %08x" NL, r);
 299
 300        mal_schedule_poll(mal);
 301        set_mal_dcrn(mal, MAL_RXEOBISR, r);
 302
 303#ifdef CONFIG_PPC_DCR_NATIVE
 304        if (mal_has_feature(mal, MAL_FTR_CLEAR_ICINTSTAT))
 305                mtdcri(SDR0, DCRN_SDR_ICINTSTAT,
 306                                (mfdcri(SDR0, DCRN_SDR_ICINTSTAT) | ICINTSTAT_ICRX));
 307#endif
 308
 309        return IRQ_HANDLED;
 310}
 311
 312static irqreturn_t mal_txde(int irq, void *dev_instance)
 313{
 314        struct mal_instance *mal = dev_instance;
 315
 316        u32 deir = get_mal_dcrn(mal, MAL_TXDEIR);
 317        set_mal_dcrn(mal, MAL_TXDEIR, deir);
 318
 319        MAL_DBG(mal, "txde %08x" NL, deir);
 320
 321        if (net_ratelimit())
 322                printk(KERN_ERR
 323                       "mal%d: TX descriptor error (TXDEIR = 0x%08x)\n",
 324                       mal->index, deir);
 325
 326        return IRQ_HANDLED;
 327}
 328
 329static irqreturn_t mal_rxde(int irq, void *dev_instance)
 330{
 331        struct mal_instance *mal = dev_instance;
 332        struct list_head *l;
 333
 334        u32 deir = get_mal_dcrn(mal, MAL_RXDEIR);
 335
 336        MAL_DBG(mal, "rxde %08x" NL, deir);
 337
 338        list_for_each(l, &mal->list) {
 339                struct mal_commac *mc = list_entry(l, struct mal_commac, list);
 340                if (deir & mc->rx_chan_mask) {
 341                        set_bit(MAL_COMMAC_RX_STOPPED, &mc->flags);
 342                        mc->ops->rxde(mc->dev);
 343                }
 344        }
 345
 346        mal_schedule_poll(mal);
 347        set_mal_dcrn(mal, MAL_RXDEIR, deir);
 348
 349        return IRQ_HANDLED;
 350}
 351
 352static irqreturn_t mal_int(int irq, void *dev_instance)
 353{
 354        struct mal_instance *mal = dev_instance;
 355        u32 esr = get_mal_dcrn(mal, MAL_ESR);
 356
 357        if (esr & MAL_ESR_EVB) {
 358                /* descriptor error */
 359                if (esr & MAL_ESR_DE) {
 360                        if (esr & MAL_ESR_CIDT)
 361                                return mal_rxde(irq, dev_instance);
 362                        else
 363                                return mal_txde(irq, dev_instance);
 364                } else { /* SERR */
 365                        return mal_serr(irq, dev_instance);
 366                }
 367        }
 368        return IRQ_HANDLED;
 369}
 370
 371void mal_poll_disable(struct mal_instance *mal, struct mal_commac *commac)
 372{
 373        /* Spinlock-type semantics: only one caller disable poll at a time */
 374        while (test_and_set_bit(MAL_COMMAC_POLL_DISABLED, &commac->flags))
 375                msleep(1);
 376
 377        /* Synchronize with the MAL NAPI poller */
 378        napi_synchronize(&mal->napi);
 379}
 380
 381void mal_poll_enable(struct mal_instance *mal, struct mal_commac *commac)
 382{
 383        smp_wmb();
 384        clear_bit(MAL_COMMAC_POLL_DISABLED, &commac->flags);
 385
 386        /* Feels better to trigger a poll here to catch up with events that
 387         * may have happened on this channel while disabled. It will most
 388         * probably be delayed until the next interrupt but that's mostly a
 389         * non-issue in the context where this is called.
 390         */
 391        napi_schedule(&mal->napi);
 392}
 393
 394static int mal_poll(struct napi_struct *napi, int budget)
 395{
 396        struct mal_instance *mal = container_of(napi, struct mal_instance, napi);
 397        struct list_head *l;
 398        int received = 0;
 399        unsigned long flags;
 400
 401        MAL_DBG2(mal, "poll(%d)" NL, budget);
 402 again:
 403        /* Process TX skbs */
 404        list_for_each(l, &mal->poll_list) {
 405                struct mal_commac *mc =
 406                        list_entry(l, struct mal_commac, poll_list);
 407                mc->ops->poll_tx(mc->dev);
 408        }
 409
 410        /* Process RX skbs.
 411         *
 412         * We _might_ need something more smart here to enforce polling
 413         * fairness.
 414         */
 415        list_for_each(l, &mal->poll_list) {
 416                struct mal_commac *mc =
 417                        list_entry(l, struct mal_commac, poll_list);
 418                int n;
 419                if (unlikely(test_bit(MAL_COMMAC_POLL_DISABLED, &mc->flags)))
 420                        continue;
 421                n = mc->ops->poll_rx(mc->dev, budget);
 422                if (n) {
 423                        received += n;
 424                        budget -= n;
 425                        if (budget <= 0)
 426                                goto more_work; // XXX What if this is the last one ?
 427                }
 428        }
 429
 430        /* We need to disable IRQs to protect from RXDE IRQ here */
 431        spin_lock_irqsave(&mal->lock, flags);
 432        __napi_complete(napi);
 433        mal_enable_eob_irq(mal);
 434        spin_unlock_irqrestore(&mal->lock, flags);
 435
 436        /* Check for "rotting" packet(s) */
 437        list_for_each(l, &mal->poll_list) {
 438                struct mal_commac *mc =
 439                        list_entry(l, struct mal_commac, poll_list);
 440                if (unlikely(test_bit(MAL_COMMAC_POLL_DISABLED, &mc->flags)))
 441                        continue;
 442                if (unlikely(mc->ops->peek_rx(mc->dev) ||
 443                             test_bit(MAL_COMMAC_RX_STOPPED, &mc->flags))) {
 444                        MAL_DBG2(mal, "rotting packet" NL);
 445                        if (napi_reschedule(napi))
 446                                mal_disable_eob_irq(mal);
 447                        else
 448                                MAL_DBG2(mal, "already in poll list" NL);
 449
 450                        if (budget > 0)
 451                                goto again;
 452                        else
 453                                goto more_work;
 454                }
 455                mc->ops->poll_tx(mc->dev);
 456        }
 457
 458 more_work:
 459        MAL_DBG2(mal, "poll() %d <- %d" NL, budget, received);
 460        return received;
 461}
 462
 463static void mal_reset(struct mal_instance *mal)
 464{
 465        int n = 10;
 466
 467        MAL_DBG(mal, "reset" NL);
 468
 469        set_mal_dcrn(mal, MAL_CFG, MAL_CFG_SR);
 470
 471        /* Wait for reset to complete (1 system clock) */
 472        while ((get_mal_dcrn(mal, MAL_CFG) & MAL_CFG_SR) && n)
 473                --n;
 474
 475        if (unlikely(!n))
 476                printk(KERN_ERR "mal%d: reset timeout\n", mal->index);
 477}
 478
 479int mal_get_regs_len(struct mal_instance *mal)
 480{
 481        return sizeof(struct emac_ethtool_regs_subhdr) +
 482            sizeof(struct mal_regs);
 483}
 484
 485void *mal_dump_regs(struct mal_instance *mal, void *buf)
 486{
 487        struct emac_ethtool_regs_subhdr *hdr = buf;
 488        struct mal_regs *regs = (struct mal_regs *)(hdr + 1);
 489        int i;
 490
 491        hdr->version = mal->version;
 492        hdr->index = mal->index;
 493
 494        regs->tx_count = mal->num_tx_chans;
 495        regs->rx_count = mal->num_rx_chans;
 496
 497        regs->cfg = get_mal_dcrn(mal, MAL_CFG);
 498        regs->esr = get_mal_dcrn(mal, MAL_ESR);
 499        regs->ier = get_mal_dcrn(mal, MAL_IER);
 500        regs->tx_casr = get_mal_dcrn(mal, MAL_TXCASR);
 501        regs->tx_carr = get_mal_dcrn(mal, MAL_TXCARR);
 502        regs->tx_eobisr = get_mal_dcrn(mal, MAL_TXEOBISR);
 503        regs->tx_deir = get_mal_dcrn(mal, MAL_TXDEIR);
 504        regs->rx_casr = get_mal_dcrn(mal, MAL_RXCASR);
 505        regs->rx_carr = get_mal_dcrn(mal, MAL_RXCARR);
 506        regs->rx_eobisr = get_mal_dcrn(mal, MAL_RXEOBISR);
 507        regs->rx_deir = get_mal_dcrn(mal, MAL_RXDEIR);
 508
 509        for (i = 0; i < regs->tx_count; ++i)
 510                regs->tx_ctpr[i] = get_mal_dcrn(mal, MAL_TXCTPR(i));
 511
 512        for (i = 0; i < regs->rx_count; ++i) {
 513                regs->rx_ctpr[i] = get_mal_dcrn(mal, MAL_RXCTPR(i));
 514                regs->rcbs[i] = get_mal_dcrn(mal, MAL_RCBS(i));
 515        }
 516        return regs + 1;
 517}
 518
 519static int mal_probe(struct platform_device *ofdev)
 520{
 521        struct mal_instance *mal;
 522        int err = 0, i, bd_size;
 523        int index = mal_count++;
 524        unsigned int dcr_base;
 525        const u32 *prop;
 526        u32 cfg;
 527        unsigned long irqflags;
 528        irq_handler_t hdlr_serr, hdlr_txde, hdlr_rxde;
 529
 530        mal = kzalloc(sizeof(struct mal_instance), GFP_KERNEL);
 531        if (!mal)
 532                return -ENOMEM;
 533
 534        mal->index = index;
 535        mal->ofdev = ofdev;
 536        mal->version = of_device_is_compatible(ofdev->dev.of_node, "ibm,mcmal2") ? 2 : 1;
 537
 538        MAL_DBG(mal, "probe" NL);
 539
 540        prop = of_get_property(ofdev->dev.of_node, "num-tx-chans", NULL);
 541        if (prop == NULL) {
 542                printk(KERN_ERR
 543                       "mal%d: can't find MAL num-tx-chans property!\n",
 544                       index);
 545                err = -ENODEV;
 546                goto fail;
 547        }
 548        mal->num_tx_chans = prop[0];
 549
 550        prop = of_get_property(ofdev->dev.of_node, "num-rx-chans", NULL);
 551        if (prop == NULL) {
 552                printk(KERN_ERR
 553                       "mal%d: can't find MAL num-rx-chans property!\n",
 554                       index);
 555                err = -ENODEV;
 556                goto fail;
 557        }
 558        mal->num_rx_chans = prop[0];
 559
 560        dcr_base = dcr_resource_start(ofdev->dev.of_node, 0);
 561        if (dcr_base == 0) {
 562                printk(KERN_ERR
 563                       "mal%d: can't find DCR resource!\n", index);
 564                err = -ENODEV;
 565                goto fail;
 566        }
 567        mal->dcr_host = dcr_map(ofdev->dev.of_node, dcr_base, 0x100);
 568        if (!DCR_MAP_OK(mal->dcr_host)) {
 569                printk(KERN_ERR
 570                       "mal%d: failed to map DCRs !\n", index);
 571                err = -ENODEV;
 572                goto fail;
 573        }
 574
 575        if (of_device_is_compatible(ofdev->dev.of_node, "ibm,mcmal-405ez")) {
 576#if defined(CONFIG_IBM_EMAC_MAL_CLR_ICINTSTAT) && \
 577                defined(CONFIG_IBM_EMAC_MAL_COMMON_ERR)
 578                mal->features |= (MAL_FTR_CLEAR_ICINTSTAT |
 579                                MAL_FTR_COMMON_ERR_INT);
 580#else
 581                printk(KERN_ERR "%s: Support for 405EZ not enabled!\n",
 582                                ofdev->dev.of_node->full_name);
 583                err = -ENODEV;
 584                goto fail;
 585#endif
 586        }
 587
 588        mal->txeob_irq = irq_of_parse_and_map(ofdev->dev.of_node, 0);
 589        mal->rxeob_irq = irq_of_parse_and_map(ofdev->dev.of_node, 1);
 590        mal->serr_irq = irq_of_parse_and_map(ofdev->dev.of_node, 2);
 591
 592        if (mal_has_feature(mal, MAL_FTR_COMMON_ERR_INT)) {
 593                mal->txde_irq = mal->rxde_irq = mal->serr_irq;
 594        } else {
 595                mal->txde_irq = irq_of_parse_and_map(ofdev->dev.of_node, 3);
 596                mal->rxde_irq = irq_of_parse_and_map(ofdev->dev.of_node, 4);
 597        }
 598
 599        if (mal->txeob_irq == NO_IRQ || mal->rxeob_irq == NO_IRQ ||
 600            mal->serr_irq == NO_IRQ || mal->txde_irq == NO_IRQ ||
 601            mal->rxde_irq == NO_IRQ) {
 602                printk(KERN_ERR
 603                       "mal%d: failed to map interrupts !\n", index);
 604                err = -ENODEV;
 605                goto fail_unmap;
 606        }
 607
 608        INIT_LIST_HEAD(&mal->poll_list);
 609        INIT_LIST_HEAD(&mal->list);
 610        spin_lock_init(&mal->lock);
 611
 612        init_dummy_netdev(&mal->dummy_dev);
 613
 614        netif_napi_add(&mal->dummy_dev, &mal->napi, mal_poll,
 615                       CONFIG_IBM_EMAC_POLL_WEIGHT);
 616
 617        /* Load power-on reset defaults */
 618        mal_reset(mal);
 619
 620        /* Set the MAL configuration register */
 621        cfg = (mal->version == 2) ? MAL2_CFG_DEFAULT : MAL1_CFG_DEFAULT;
 622        cfg |= MAL_CFG_PLBB | MAL_CFG_OPBBL | MAL_CFG_LEA;
 623
 624        /* Current Axon is not happy with priority being non-0, it can
 625         * deadlock, fix it up here
 626         */
 627        if (of_device_is_compatible(ofdev->dev.of_node, "ibm,mcmal-axon"))
 628                cfg &= ~(MAL2_CFG_RPP_10 | MAL2_CFG_WPP_10);
 629
 630        /* Apply configuration */
 631        set_mal_dcrn(mal, MAL_CFG, cfg);
 632
 633        /* Allocate space for BD rings */
 634        BUG_ON(mal->num_tx_chans <= 0 || mal->num_tx_chans > 32);
 635        BUG_ON(mal->num_rx_chans <= 0 || mal->num_rx_chans > 32);
 636
 637        bd_size = sizeof(struct mal_descriptor) *
 638                (NUM_TX_BUFF * mal->num_tx_chans +
 639                 NUM_RX_BUFF * mal->num_rx_chans);
 640        mal->bd_virt = dma_alloc_coherent(&ofdev->dev, bd_size, &mal->bd_dma,
 641                                          GFP_KERNEL | __GFP_ZERO);
 642        if (mal->bd_virt == NULL) {
 643                err = -ENOMEM;
 644                goto fail_unmap;
 645        }
 646
 647        for (i = 0; i < mal->num_tx_chans; ++i)
 648                set_mal_dcrn(mal, MAL_TXCTPR(i), mal->bd_dma +
 649                             sizeof(struct mal_descriptor) *
 650                             mal_tx_bd_offset(mal, i));
 651
 652        for (i = 0; i < mal->num_rx_chans; ++i)
 653                set_mal_dcrn(mal, MAL_RXCTPR(i), mal->bd_dma +
 654                             sizeof(struct mal_descriptor) *
 655                             mal_rx_bd_offset(mal, i));
 656
 657        if (mal_has_feature(mal, MAL_FTR_COMMON_ERR_INT)) {
 658                irqflags = IRQF_SHARED;
 659                hdlr_serr = hdlr_txde = hdlr_rxde = mal_int;
 660        } else {
 661                irqflags = 0;
 662                hdlr_serr = mal_serr;
 663                hdlr_txde = mal_txde;
 664                hdlr_rxde = mal_rxde;
 665        }
 666
 667        err = request_irq(mal->serr_irq, hdlr_serr, irqflags, "MAL SERR", mal);
 668        if (err)
 669                goto fail2;
 670        err = request_irq(mal->txde_irq, hdlr_txde, irqflags, "MAL TX DE", mal);
 671        if (err)
 672                goto fail3;
 673        err = request_irq(mal->txeob_irq, mal_txeob, 0, "MAL TX EOB", mal);
 674        if (err)
 675                goto fail4;
 676        err = request_irq(mal->rxde_irq, hdlr_rxde, irqflags, "MAL RX DE", mal);
 677        if (err)
 678                goto fail5;
 679        err = request_irq(mal->rxeob_irq, mal_rxeob, 0, "MAL RX EOB", mal);
 680        if (err)
 681                goto fail6;
 682
 683        /* Enable all MAL SERR interrupt sources */
 684        if (mal->version == 2)
 685                set_mal_dcrn(mal, MAL_IER, MAL2_IER_EVENTS);
 686        else
 687                set_mal_dcrn(mal, MAL_IER, MAL1_IER_EVENTS);
 688
 689        /* Enable EOB interrupt */
 690        mal_enable_eob_irq(mal);
 691
 692        printk(KERN_INFO
 693               "MAL v%d %s, %d TX channels, %d RX channels\n",
 694               mal->version, ofdev->dev.of_node->full_name,
 695               mal->num_tx_chans, mal->num_rx_chans);
 696
 697        /* Advertise this instance to the rest of the world */
 698        wmb();
 699        platform_set_drvdata(ofdev, mal);
 700
 701        mal_dbg_register(mal);
 702
 703        return 0;
 704
 705 fail6:
 706        free_irq(mal->rxde_irq, mal);
 707 fail5:
 708        free_irq(mal->txeob_irq, mal);
 709 fail4:
 710        free_irq(mal->txde_irq, mal);
 711 fail3:
 712        free_irq(mal->serr_irq, mal);
 713 fail2:
 714        dma_free_coherent(&ofdev->dev, bd_size, mal->bd_virt, mal->bd_dma);
 715 fail_unmap:
 716        dcr_unmap(mal->dcr_host, 0x100);
 717 fail:
 718        kfree(mal);
 719
 720        return err;
 721}
 722
 723static int mal_remove(struct platform_device *ofdev)
 724{
 725        struct mal_instance *mal = platform_get_drvdata(ofdev);
 726
 727        MAL_DBG(mal, "remove" NL);
 728
 729        /* Synchronize with scheduled polling */
 730        napi_disable(&mal->napi);
 731
 732        if (!list_empty(&mal->list))
 733                /* This is *very* bad */
 734                WARN(1, KERN_EMERG
 735                       "mal%d: commac list is not empty on remove!\n",
 736                       mal->index);
 737
 738        free_irq(mal->serr_irq, mal);
 739        free_irq(mal->txde_irq, mal);
 740        free_irq(mal->txeob_irq, mal);
 741        free_irq(mal->rxde_irq, mal);
 742        free_irq(mal->rxeob_irq, mal);
 743
 744        mal_reset(mal);
 745
 746        mal_dbg_unregister(mal);
 747
 748        dma_free_coherent(&ofdev->dev,
 749                          sizeof(struct mal_descriptor) *
 750                          (NUM_TX_BUFF * mal->num_tx_chans +
 751                           NUM_RX_BUFF * mal->num_rx_chans), mal->bd_virt,
 752                          mal->bd_dma);
 753        kfree(mal);
 754
 755        return 0;
 756}
 757
 758static struct of_device_id mal_platform_match[] =
 759{
 760        {
 761                .compatible     = "ibm,mcmal",
 762        },
 763        {
 764                .compatible     = "ibm,mcmal2",
 765        },
 766        /* Backward compat */
 767        {
 768                .type           = "mcmal-dma",
 769                .compatible     = "ibm,mcmal",
 770        },
 771        {
 772                .type           = "mcmal-dma",
 773                .compatible     = "ibm,mcmal2",
 774        },
 775        {},
 776};
 777
 778static struct platform_driver mal_of_driver = {
 779        .driver = {
 780                .name = "mcmal",
 781                .owner = THIS_MODULE,
 782                .of_match_table = mal_platform_match,
 783        },
 784        .probe = mal_probe,
 785        .remove = mal_remove,
 786};
 787
 788int __init mal_init(void)
 789{
 790        return platform_driver_register(&mal_of_driver);
 791}
 792
 793void mal_exit(void)
 794{
 795        platform_driver_unregister(&mal_of_driver);
 796}
 797