linux/drivers/net/ethernet/ibm/emac/mal.c
<<
>>
Prefs
   1/*
   2 * drivers/net/ethernet/ibm/emac/mal.c
   3 *
   4 * Memory Access Layer (MAL) support
   5 *
   6 * Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
   7 *                <benh@kernel.crashing.org>
   8 *
   9 * Based on the arch/ppc version of the driver:
  10 *
  11 * Copyright (c) 2004, 2005 Zultys Technologies.
  12 * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
  13 *
  14 * Based on original work by
  15 *      Benjamin Herrenschmidt <benh@kernel.crashing.org>,
  16 *      David Gibson <hermes@gibson.dropbear.id.au>,
  17 *
  18 *      Armin Kuster <akuster@mvista.com>
  19 *      Copyright 2002 MontaVista Softare Inc.
  20 *
  21 * This program is free software; you can redistribute  it and/or modify it
  22 * under  the terms of  the GNU General  Public License as published by the
  23 * Free Software Foundation;  either version 2 of the  License, or (at your
  24 * option) any later version.
  25 *
  26 */
  27
  28#include <linux/delay.h>
  29#include <linux/slab.h>
  30#include <linux/of_irq.h>
  31
  32#include "core.h"
  33#include <asm/dcr-regs.h>
  34
  35static int mal_count;
  36
  37int mal_register_commac(struct mal_instance *mal, struct mal_commac *commac)
  38{
  39        unsigned long flags;
  40
  41        spin_lock_irqsave(&mal->lock, flags);
  42
  43        MAL_DBG(mal, "reg(%08x, %08x)" NL,
  44                commac->tx_chan_mask, commac->rx_chan_mask);
  45
  46        /* Don't let multiple commacs claim the same channel(s) */
  47        if ((mal->tx_chan_mask & commac->tx_chan_mask) ||
  48            (mal->rx_chan_mask & commac->rx_chan_mask)) {
  49                spin_unlock_irqrestore(&mal->lock, flags);
  50                printk(KERN_WARNING "mal%d: COMMAC channels conflict!\n",
  51                       mal->index);
  52                return -EBUSY;
  53        }
  54
  55        if (list_empty(&mal->list))
  56                napi_enable(&mal->napi);
  57        mal->tx_chan_mask |= commac->tx_chan_mask;
  58        mal->rx_chan_mask |= commac->rx_chan_mask;
  59        list_add(&commac->list, &mal->list);
  60
  61        spin_unlock_irqrestore(&mal->lock, flags);
  62
  63        return 0;
  64}
  65
  66void mal_unregister_commac(struct mal_instance  *mal,
  67                struct mal_commac *commac)
  68{
  69        unsigned long flags;
  70
  71        spin_lock_irqsave(&mal->lock, flags);
  72
  73        MAL_DBG(mal, "unreg(%08x, %08x)" NL,
  74                commac->tx_chan_mask, commac->rx_chan_mask);
  75
  76        mal->tx_chan_mask &= ~commac->tx_chan_mask;
  77        mal->rx_chan_mask &= ~commac->rx_chan_mask;
  78        list_del_init(&commac->list);
  79        if (list_empty(&mal->list))
  80                napi_disable(&mal->napi);
  81
  82        spin_unlock_irqrestore(&mal->lock, flags);
  83}
  84
  85int mal_set_rcbs(struct mal_instance *mal, int channel, unsigned long size)
  86{
  87        BUG_ON(channel < 0 || channel >= mal->num_rx_chans ||
  88               size > MAL_MAX_RX_SIZE);
  89
  90        MAL_DBG(mal, "set_rbcs(%d, %lu)" NL, channel, size);
  91
  92        if (size & 0xf) {
  93                printk(KERN_WARNING
  94                       "mal%d: incorrect RX size %lu for the channel %d\n",
  95                       mal->index, size, channel);
  96                return -EINVAL;
  97        }
  98
  99        set_mal_dcrn(mal, MAL_RCBS(channel), size >> 4);
 100        return 0;
 101}
 102
 103int mal_tx_bd_offset(struct mal_instance *mal, int channel)
 104{
 105        BUG_ON(channel < 0 || channel >= mal->num_tx_chans);
 106
 107        return channel * NUM_TX_BUFF;
 108}
 109
 110int mal_rx_bd_offset(struct mal_instance *mal, int channel)
 111{
 112        BUG_ON(channel < 0 || channel >= mal->num_rx_chans);
 113        return mal->num_tx_chans * NUM_TX_BUFF + channel * NUM_RX_BUFF;
 114}
 115
 116void mal_enable_tx_channel(struct mal_instance *mal, int channel)
 117{
 118        unsigned long flags;
 119
 120        spin_lock_irqsave(&mal->lock, flags);
 121
 122        MAL_DBG(mal, "enable_tx(%d)" NL, channel);
 123
 124        set_mal_dcrn(mal, MAL_TXCASR,
 125                     get_mal_dcrn(mal, MAL_TXCASR) | MAL_CHAN_MASK(channel));
 126
 127        spin_unlock_irqrestore(&mal->lock, flags);
 128}
 129
 130void mal_disable_tx_channel(struct mal_instance *mal, int channel)
 131{
 132        set_mal_dcrn(mal, MAL_TXCARR, MAL_CHAN_MASK(channel));
 133
 134        MAL_DBG(mal, "disable_tx(%d)" NL, channel);
 135}
 136
 137void mal_enable_rx_channel(struct mal_instance *mal, int channel)
 138{
 139        unsigned long flags;
 140
 141        /*
 142         * On some 4xx PPC's (e.g. 460EX/GT), the rx channel is a multiple
 143         * of 8, but enabling in MAL_RXCASR needs the divided by 8 value
 144         * for the bitmask
 145         */
 146        if (!(channel % 8))
 147                channel >>= 3;
 148
 149        spin_lock_irqsave(&mal->lock, flags);
 150
 151        MAL_DBG(mal, "enable_rx(%d)" NL, channel);
 152
 153        set_mal_dcrn(mal, MAL_RXCASR,
 154                     get_mal_dcrn(mal, MAL_RXCASR) | MAL_CHAN_MASK(channel));
 155
 156        spin_unlock_irqrestore(&mal->lock, flags);
 157}
 158
 159void mal_disable_rx_channel(struct mal_instance *mal, int channel)
 160{
 161        /*
 162         * On some 4xx PPC's (e.g. 460EX/GT), the rx channel is a multiple
 163         * of 8, but enabling in MAL_RXCASR needs the divided by 8 value
 164         * for the bitmask
 165         */
 166        if (!(channel % 8))
 167                channel >>= 3;
 168
 169        set_mal_dcrn(mal, MAL_RXCARR, MAL_CHAN_MASK(channel));
 170
 171        MAL_DBG(mal, "disable_rx(%d)" NL, channel);
 172}
 173
 174void mal_poll_add(struct mal_instance *mal, struct mal_commac *commac)
 175{
 176        unsigned long flags;
 177
 178        spin_lock_irqsave(&mal->lock, flags);
 179
 180        MAL_DBG(mal, "poll_add(%p)" NL, commac);
 181
 182        /* starts disabled */
 183        set_bit(MAL_COMMAC_POLL_DISABLED, &commac->flags);
 184
 185        list_add_tail(&commac->poll_list, &mal->poll_list);
 186
 187        spin_unlock_irqrestore(&mal->lock, flags);
 188}
 189
 190void mal_poll_del(struct mal_instance *mal, struct mal_commac *commac)
 191{
 192        unsigned long flags;
 193
 194        spin_lock_irqsave(&mal->lock, flags);
 195
 196        MAL_DBG(mal, "poll_del(%p)" NL, commac);
 197
 198        list_del(&commac->poll_list);
 199
 200        spin_unlock_irqrestore(&mal->lock, flags);
 201}
 202
 203/* synchronized by mal_poll() */
 204static inline void mal_enable_eob_irq(struct mal_instance *mal)
 205{
 206        MAL_DBG2(mal, "enable_irq" NL);
 207
 208        // XXX might want to cache MAL_CFG as the DCR read can be slooooow
 209        set_mal_dcrn(mal, MAL_CFG, get_mal_dcrn(mal, MAL_CFG) | MAL_CFG_EOPIE);
 210}
 211
 212/* synchronized by NAPI state */
 213static inline void mal_disable_eob_irq(struct mal_instance *mal)
 214{
 215        // XXX might want to cache MAL_CFG as the DCR read can be slooooow
 216        set_mal_dcrn(mal, MAL_CFG, get_mal_dcrn(mal, MAL_CFG) & ~MAL_CFG_EOPIE);
 217
 218        MAL_DBG2(mal, "disable_irq" NL);
 219}
 220
 221static irqreturn_t mal_serr(int irq, void *dev_instance)
 222{
 223        struct mal_instance *mal = dev_instance;
 224
 225        u32 esr = get_mal_dcrn(mal, MAL_ESR);
 226
 227        /* Clear the error status register */
 228        set_mal_dcrn(mal, MAL_ESR, esr);
 229
 230        MAL_DBG(mal, "SERR %08x" NL, esr);
 231
 232        if (esr & MAL_ESR_EVB) {
 233                if (esr & MAL_ESR_DE) {
 234                        /* We ignore Descriptor error,
 235                         * TXDE or RXDE interrupt will be generated anyway.
 236                         */
 237                        return IRQ_HANDLED;
 238                }
 239
 240                if (esr & MAL_ESR_PEIN) {
 241                        /* PLB error, it's probably buggy hardware or
 242                         * incorrect physical address in BD (i.e. bug)
 243                         */
 244                        if (net_ratelimit())
 245                                printk(KERN_ERR
 246                                       "mal%d: system error, "
 247                                       "PLB (ESR = 0x%08x)\n",
 248                                       mal->index, esr);
 249                        return IRQ_HANDLED;
 250                }
 251
 252                /* OPB error, it's probably buggy hardware or incorrect
 253                 * EBC setup
 254                 */
 255                if (net_ratelimit())
 256                        printk(KERN_ERR
 257                               "mal%d: system error, OPB (ESR = 0x%08x)\n",
 258                               mal->index, esr);
 259        }
 260        return IRQ_HANDLED;
 261}
 262
 263static inline void mal_schedule_poll(struct mal_instance *mal)
 264{
 265        if (likely(napi_schedule_prep(&mal->napi))) {
 266                MAL_DBG2(mal, "schedule_poll" NL);
 267                spin_lock(&mal->lock);
 268                mal_disable_eob_irq(mal);
 269                spin_unlock(&mal->lock);
 270                __napi_schedule(&mal->napi);
 271        } else
 272                MAL_DBG2(mal, "already in poll" NL);
 273}
 274
 275static irqreturn_t mal_txeob(int irq, void *dev_instance)
 276{
 277        struct mal_instance *mal = dev_instance;
 278
 279        u32 r = get_mal_dcrn(mal, MAL_TXEOBISR);
 280
 281        MAL_DBG2(mal, "txeob %08x" NL, r);
 282
 283        mal_schedule_poll(mal);
 284        set_mal_dcrn(mal, MAL_TXEOBISR, r);
 285
 286#ifdef CONFIG_PPC_DCR_NATIVE
 287        if (mal_has_feature(mal, MAL_FTR_CLEAR_ICINTSTAT))
 288                mtdcri(SDR0, DCRN_SDR_ICINTSTAT,
 289                                (mfdcri(SDR0, DCRN_SDR_ICINTSTAT) | ICINTSTAT_ICTX));
 290#endif
 291
 292        return IRQ_HANDLED;
 293}
 294
 295static irqreturn_t mal_rxeob(int irq, void *dev_instance)
 296{
 297        struct mal_instance *mal = dev_instance;
 298
 299        u32 r = get_mal_dcrn(mal, MAL_RXEOBISR);
 300
 301        MAL_DBG2(mal, "rxeob %08x" NL, r);
 302
 303        mal_schedule_poll(mal);
 304        set_mal_dcrn(mal, MAL_RXEOBISR, r);
 305
 306#ifdef CONFIG_PPC_DCR_NATIVE
 307        if (mal_has_feature(mal, MAL_FTR_CLEAR_ICINTSTAT))
 308                mtdcri(SDR0, DCRN_SDR_ICINTSTAT,
 309                                (mfdcri(SDR0, DCRN_SDR_ICINTSTAT) | ICINTSTAT_ICRX));
 310#endif
 311
 312        return IRQ_HANDLED;
 313}
 314
 315static irqreturn_t mal_txde(int irq, void *dev_instance)
 316{
 317        struct mal_instance *mal = dev_instance;
 318
 319        u32 deir = get_mal_dcrn(mal, MAL_TXDEIR);
 320        set_mal_dcrn(mal, MAL_TXDEIR, deir);
 321
 322        MAL_DBG(mal, "txde %08x" NL, deir);
 323
 324        if (net_ratelimit())
 325                printk(KERN_ERR
 326                       "mal%d: TX descriptor error (TXDEIR = 0x%08x)\n",
 327                       mal->index, deir);
 328
 329        return IRQ_HANDLED;
 330}
 331
 332static irqreturn_t mal_rxde(int irq, void *dev_instance)
 333{
 334        struct mal_instance *mal = dev_instance;
 335        struct list_head *l;
 336
 337        u32 deir = get_mal_dcrn(mal, MAL_RXDEIR);
 338
 339        MAL_DBG(mal, "rxde %08x" NL, deir);
 340
 341        list_for_each(l, &mal->list) {
 342                struct mal_commac *mc = list_entry(l, struct mal_commac, list);
 343                if (deir & mc->rx_chan_mask) {
 344                        set_bit(MAL_COMMAC_RX_STOPPED, &mc->flags);
 345                        mc->ops->rxde(mc->dev);
 346                }
 347        }
 348
 349        mal_schedule_poll(mal);
 350        set_mal_dcrn(mal, MAL_RXDEIR, deir);
 351
 352        return IRQ_HANDLED;
 353}
 354
 355static irqreturn_t mal_int(int irq, void *dev_instance)
 356{
 357        struct mal_instance *mal = dev_instance;
 358        u32 esr = get_mal_dcrn(mal, MAL_ESR);
 359
 360        if (esr & MAL_ESR_EVB) {
 361                /* descriptor error */
 362                if (esr & MAL_ESR_DE) {
 363                        if (esr & MAL_ESR_CIDT)
 364                                return mal_rxde(irq, dev_instance);
 365                        else
 366                                return mal_txde(irq, dev_instance);
 367                } else { /* SERR */
 368                        return mal_serr(irq, dev_instance);
 369                }
 370        }
 371        return IRQ_HANDLED;
 372}
 373
 374void mal_poll_disable(struct mal_instance *mal, struct mal_commac *commac)
 375{
 376        /* Spinlock-type semantics: only one caller disable poll at a time */
 377        while (test_and_set_bit(MAL_COMMAC_POLL_DISABLED, &commac->flags))
 378                msleep(1);
 379
 380        /* Synchronize with the MAL NAPI poller */
 381        napi_synchronize(&mal->napi);
 382}
 383
 384void mal_poll_enable(struct mal_instance *mal, struct mal_commac *commac)
 385{
 386        smp_wmb();
 387        clear_bit(MAL_COMMAC_POLL_DISABLED, &commac->flags);
 388
 389        /* Feels better to trigger a poll here to catch up with events that
 390         * may have happened on this channel while disabled. It will most
 391         * probably be delayed until the next interrupt but that's mostly a
 392         * non-issue in the context where this is called.
 393         */
 394        napi_schedule(&mal->napi);
 395}
 396
 397static int mal_poll(struct napi_struct *napi, int budget)
 398{
 399        struct mal_instance *mal = container_of(napi, struct mal_instance, napi);
 400        struct list_head *l;
 401        int received = 0;
 402        unsigned long flags;
 403
 404        MAL_DBG2(mal, "poll(%d)" NL, budget);
 405
 406        /* Process TX skbs */
 407        list_for_each(l, &mal->poll_list) {
 408                struct mal_commac *mc =
 409                        list_entry(l, struct mal_commac, poll_list);
 410                mc->ops->poll_tx(mc->dev);
 411        }
 412
 413        /* Process RX skbs.
 414         *
 415         * We _might_ need something more smart here to enforce polling
 416         * fairness.
 417         */
 418        list_for_each(l, &mal->poll_list) {
 419                struct mal_commac *mc =
 420                        list_entry(l, struct mal_commac, poll_list);
 421                int n;
 422                if (unlikely(test_bit(MAL_COMMAC_POLL_DISABLED, &mc->flags)))
 423                        continue;
 424                n = mc->ops->poll_rx(mc->dev, budget - received);
 425                if (n) {
 426                        received += n;
 427                        if (received >= budget)
 428                                return budget;
 429                }
 430        }
 431
 432        if (napi_complete_done(napi, received)) {
 433                /* We need to disable IRQs to protect from RXDE IRQ here */
 434                spin_lock_irqsave(&mal->lock, flags);
 435                mal_enable_eob_irq(mal);
 436                spin_unlock_irqrestore(&mal->lock, flags);
 437        }
 438
 439        /* Check for "rotting" packet(s) */
 440        list_for_each(l, &mal->poll_list) {
 441                struct mal_commac *mc =
 442                        list_entry(l, struct mal_commac, poll_list);
 443                if (unlikely(test_bit(MAL_COMMAC_POLL_DISABLED, &mc->flags)))
 444                        continue;
 445                if (unlikely(mc->ops->peek_rx(mc->dev) ||
 446                             test_bit(MAL_COMMAC_RX_STOPPED, &mc->flags))) {
 447                        MAL_DBG2(mal, "rotting packet" NL);
 448                        if (!napi_reschedule(napi))
 449                                goto more_work;
 450
 451                        spin_lock_irqsave(&mal->lock, flags);
 452                        mal_disable_eob_irq(mal);
 453                        spin_unlock_irqrestore(&mal->lock, flags);
 454                }
 455                mc->ops->poll_tx(mc->dev);
 456        }
 457
 458 more_work:
 459        MAL_DBG2(mal, "poll() %d <- %d" NL, budget, received);
 460        return received;
 461}
 462
 463static void mal_reset(struct mal_instance *mal)
 464{
 465        int n = 10;
 466
 467        MAL_DBG(mal, "reset" NL);
 468
 469        set_mal_dcrn(mal, MAL_CFG, MAL_CFG_SR);
 470
 471        /* Wait for reset to complete (1 system clock) */
 472        while ((get_mal_dcrn(mal, MAL_CFG) & MAL_CFG_SR) && n)
 473                --n;
 474
 475        if (unlikely(!n))
 476                printk(KERN_ERR "mal%d: reset timeout\n", mal->index);
 477}
 478
 479int mal_get_regs_len(struct mal_instance *mal)
 480{
 481        return sizeof(struct emac_ethtool_regs_subhdr) +
 482            sizeof(struct mal_regs);
 483}
 484
 485void *mal_dump_regs(struct mal_instance *mal, void *buf)
 486{
 487        struct emac_ethtool_regs_subhdr *hdr = buf;
 488        struct mal_regs *regs = (struct mal_regs *)(hdr + 1);
 489        int i;
 490
 491        hdr->version = mal->version;
 492        hdr->index = mal->index;
 493
 494        regs->tx_count = mal->num_tx_chans;
 495        regs->rx_count = mal->num_rx_chans;
 496
 497        regs->cfg = get_mal_dcrn(mal, MAL_CFG);
 498        regs->esr = get_mal_dcrn(mal, MAL_ESR);
 499        regs->ier = get_mal_dcrn(mal, MAL_IER);
 500        regs->tx_casr = get_mal_dcrn(mal, MAL_TXCASR);
 501        regs->tx_carr = get_mal_dcrn(mal, MAL_TXCARR);
 502        regs->tx_eobisr = get_mal_dcrn(mal, MAL_TXEOBISR);
 503        regs->tx_deir = get_mal_dcrn(mal, MAL_TXDEIR);
 504        regs->rx_casr = get_mal_dcrn(mal, MAL_RXCASR);
 505        regs->rx_carr = get_mal_dcrn(mal, MAL_RXCARR);
 506        regs->rx_eobisr = get_mal_dcrn(mal, MAL_RXEOBISR);
 507        regs->rx_deir = get_mal_dcrn(mal, MAL_RXDEIR);
 508
 509        for (i = 0; i < regs->tx_count; ++i)
 510                regs->tx_ctpr[i] = get_mal_dcrn(mal, MAL_TXCTPR(i));
 511
 512        for (i = 0; i < regs->rx_count; ++i) {
 513                regs->rx_ctpr[i] = get_mal_dcrn(mal, MAL_RXCTPR(i));
 514                regs->rcbs[i] = get_mal_dcrn(mal, MAL_RCBS(i));
 515        }
 516        return regs + 1;
 517}
 518
 519static int mal_probe(struct platform_device *ofdev)
 520{
 521        struct mal_instance *mal;
 522        int err = 0, i, bd_size;
 523        int index = mal_count++;
 524        unsigned int dcr_base;
 525        const u32 *prop;
 526        u32 cfg;
 527        unsigned long irqflags;
 528        irq_handler_t hdlr_serr, hdlr_txde, hdlr_rxde;
 529
 530        mal = kzalloc(sizeof(struct mal_instance), GFP_KERNEL);
 531        if (!mal)
 532                return -ENOMEM;
 533
 534        mal->index = index;
 535        mal->ofdev = ofdev;
 536        mal->version = of_device_is_compatible(ofdev->dev.of_node, "ibm,mcmal2") ? 2 : 1;
 537
 538        MAL_DBG(mal, "probe" NL);
 539
 540        prop = of_get_property(ofdev->dev.of_node, "num-tx-chans", NULL);
 541        if (prop == NULL) {
 542                printk(KERN_ERR
 543                       "mal%d: can't find MAL num-tx-chans property!\n",
 544                       index);
 545                err = -ENODEV;
 546                goto fail;
 547        }
 548        mal->num_tx_chans = prop[0];
 549
 550        prop = of_get_property(ofdev->dev.of_node, "num-rx-chans", NULL);
 551        if (prop == NULL) {
 552                printk(KERN_ERR
 553                       "mal%d: can't find MAL num-rx-chans property!\n",
 554                       index);
 555                err = -ENODEV;
 556                goto fail;
 557        }
 558        mal->num_rx_chans = prop[0];
 559
 560        dcr_base = dcr_resource_start(ofdev->dev.of_node, 0);
 561        if (dcr_base == 0) {
 562                printk(KERN_ERR
 563                       "mal%d: can't find DCR resource!\n", index);
 564                err = -ENODEV;
 565                goto fail;
 566        }
 567        mal->dcr_host = dcr_map(ofdev->dev.of_node, dcr_base, 0x100);
 568        if (!DCR_MAP_OK(mal->dcr_host)) {
 569                printk(KERN_ERR
 570                       "mal%d: failed to map DCRs !\n", index);
 571                err = -ENODEV;
 572                goto fail;
 573        }
 574
 575        if (of_device_is_compatible(ofdev->dev.of_node, "ibm,mcmal-405ez")) {
 576#if defined(CONFIG_IBM_EMAC_MAL_CLR_ICINTSTAT) && \
 577                defined(CONFIG_IBM_EMAC_MAL_COMMON_ERR)
 578                mal->features |= (MAL_FTR_CLEAR_ICINTSTAT |
 579                                MAL_FTR_COMMON_ERR_INT);
 580#else
 581                printk(KERN_ERR "%pOF: Support for 405EZ not enabled!\n",
 582                                ofdev->dev.of_node);
 583                err = -ENODEV;
 584                goto fail;
 585#endif
 586        }
 587
 588        mal->txeob_irq = irq_of_parse_and_map(ofdev->dev.of_node, 0);
 589        mal->rxeob_irq = irq_of_parse_and_map(ofdev->dev.of_node, 1);
 590        mal->serr_irq = irq_of_parse_and_map(ofdev->dev.of_node, 2);
 591
 592        if (mal_has_feature(mal, MAL_FTR_COMMON_ERR_INT)) {
 593                mal->txde_irq = mal->rxde_irq = mal->serr_irq;
 594        } else {
 595                mal->txde_irq = irq_of_parse_and_map(ofdev->dev.of_node, 3);
 596                mal->rxde_irq = irq_of_parse_and_map(ofdev->dev.of_node, 4);
 597        }
 598
 599        if (!mal->txeob_irq || !mal->rxeob_irq || !mal->serr_irq ||
 600            !mal->txde_irq  || !mal->rxde_irq) {
 601                printk(KERN_ERR
 602                       "mal%d: failed to map interrupts !\n", index);
 603                err = -ENODEV;
 604                goto fail_unmap;
 605        }
 606
 607        INIT_LIST_HEAD(&mal->poll_list);
 608        INIT_LIST_HEAD(&mal->list);
 609        spin_lock_init(&mal->lock);
 610
 611        init_dummy_netdev(&mal->dummy_dev);
 612
 613        netif_napi_add(&mal->dummy_dev, &mal->napi, mal_poll,
 614                       CONFIG_IBM_EMAC_POLL_WEIGHT);
 615
 616        /* Load power-on reset defaults */
 617        mal_reset(mal);
 618
 619        /* Set the MAL configuration register */
 620        cfg = (mal->version == 2) ? MAL2_CFG_DEFAULT : MAL1_CFG_DEFAULT;
 621        cfg |= MAL_CFG_PLBB | MAL_CFG_OPBBL | MAL_CFG_LEA;
 622
 623        /* Current Axon is not happy with priority being non-0, it can
 624         * deadlock, fix it up here
 625         */
 626        if (of_device_is_compatible(ofdev->dev.of_node, "ibm,mcmal-axon"))
 627                cfg &= ~(MAL2_CFG_RPP_10 | MAL2_CFG_WPP_10);
 628
 629        /* Apply configuration */
 630        set_mal_dcrn(mal, MAL_CFG, cfg);
 631
 632        /* Allocate space for BD rings */
 633        BUG_ON(mal->num_tx_chans <= 0 || mal->num_tx_chans > 32);
 634        BUG_ON(mal->num_rx_chans <= 0 || mal->num_rx_chans > 32);
 635
 636        bd_size = sizeof(struct mal_descriptor) *
 637                (NUM_TX_BUFF * mal->num_tx_chans +
 638                 NUM_RX_BUFF * mal->num_rx_chans);
 639        mal->bd_virt = dma_zalloc_coherent(&ofdev->dev, bd_size, &mal->bd_dma,
 640                                           GFP_KERNEL);
 641        if (mal->bd_virt == NULL) {
 642                err = -ENOMEM;
 643                goto fail_unmap;
 644        }
 645
 646        for (i = 0; i < mal->num_tx_chans; ++i)
 647                set_mal_dcrn(mal, MAL_TXCTPR(i), mal->bd_dma +
 648                             sizeof(struct mal_descriptor) *
 649                             mal_tx_bd_offset(mal, i));
 650
 651        for (i = 0; i < mal->num_rx_chans; ++i)
 652                set_mal_dcrn(mal, MAL_RXCTPR(i), mal->bd_dma +
 653                             sizeof(struct mal_descriptor) *
 654                             mal_rx_bd_offset(mal, i));
 655
 656        if (mal_has_feature(mal, MAL_FTR_COMMON_ERR_INT)) {
 657                irqflags = IRQF_SHARED;
 658                hdlr_serr = hdlr_txde = hdlr_rxde = mal_int;
 659        } else {
 660                irqflags = 0;
 661                hdlr_serr = mal_serr;
 662                hdlr_txde = mal_txde;
 663                hdlr_rxde = mal_rxde;
 664        }
 665
 666        err = request_irq(mal->serr_irq, hdlr_serr, irqflags, "MAL SERR", mal);
 667        if (err)
 668                goto fail2;
 669        err = request_irq(mal->txde_irq, hdlr_txde, irqflags, "MAL TX DE", mal);
 670        if (err)
 671                goto fail3;
 672        err = request_irq(mal->txeob_irq, mal_txeob, 0, "MAL TX EOB", mal);
 673        if (err)
 674                goto fail4;
 675        err = request_irq(mal->rxde_irq, hdlr_rxde, irqflags, "MAL RX DE", mal);
 676        if (err)
 677                goto fail5;
 678        err = request_irq(mal->rxeob_irq, mal_rxeob, 0, "MAL RX EOB", mal);
 679        if (err)
 680                goto fail6;
 681
 682        /* Enable all MAL SERR interrupt sources */
 683        set_mal_dcrn(mal, MAL_IER, MAL_IER_EVENTS);
 684
 685        /* Enable EOB interrupt */
 686        mal_enable_eob_irq(mal);
 687
 688        printk(KERN_INFO
 689               "MAL v%d %pOF, %d TX channels, %d RX channels\n",
 690               mal->version, ofdev->dev.of_node,
 691               mal->num_tx_chans, mal->num_rx_chans);
 692
 693        /* Advertise this instance to the rest of the world */
 694        wmb();
 695        platform_set_drvdata(ofdev, mal);
 696
 697        return 0;
 698
 699 fail6:
 700        free_irq(mal->rxde_irq, mal);
 701 fail5:
 702        free_irq(mal->txeob_irq, mal);
 703 fail4:
 704        free_irq(mal->txde_irq, mal);
 705 fail3:
 706        free_irq(mal->serr_irq, mal);
 707 fail2:
 708        dma_free_coherent(&ofdev->dev, bd_size, mal->bd_virt, mal->bd_dma);
 709 fail_unmap:
 710        dcr_unmap(mal->dcr_host, 0x100);
 711 fail:
 712        kfree(mal);
 713
 714        return err;
 715}
 716
 717static int mal_remove(struct platform_device *ofdev)
 718{
 719        struct mal_instance *mal = platform_get_drvdata(ofdev);
 720
 721        MAL_DBG(mal, "remove" NL);
 722
 723        /* Synchronize with scheduled polling */
 724        napi_disable(&mal->napi);
 725
 726        if (!list_empty(&mal->list))
 727                /* This is *very* bad */
 728                WARN(1, KERN_EMERG
 729                       "mal%d: commac list is not empty on remove!\n",
 730                       mal->index);
 731
 732        free_irq(mal->serr_irq, mal);
 733        free_irq(mal->txde_irq, mal);
 734        free_irq(mal->txeob_irq, mal);
 735        free_irq(mal->rxde_irq, mal);
 736        free_irq(mal->rxeob_irq, mal);
 737
 738        mal_reset(mal);
 739
 740        dma_free_coherent(&ofdev->dev,
 741                          sizeof(struct mal_descriptor) *
 742                          (NUM_TX_BUFF * mal->num_tx_chans +
 743                           NUM_RX_BUFF * mal->num_rx_chans), mal->bd_virt,
 744                          mal->bd_dma);
 745        kfree(mal);
 746
 747        return 0;
 748}
 749
 750static const struct of_device_id mal_platform_match[] =
 751{
 752        {
 753                .compatible     = "ibm,mcmal",
 754        },
 755        {
 756                .compatible     = "ibm,mcmal2",
 757        },
 758        /* Backward compat */
 759        {
 760                .type           = "mcmal-dma",
 761                .compatible     = "ibm,mcmal",
 762        },
 763        {
 764                .type           = "mcmal-dma",
 765                .compatible     = "ibm,mcmal2",
 766        },
 767        {},
 768};
 769
 770static struct platform_driver mal_of_driver = {
 771        .driver = {
 772                .name = "mcmal",
 773                .of_match_table = mal_platform_match,
 774        },
 775        .probe = mal_probe,
 776        .remove = mal_remove,
 777};
 778
 779int __init mal_init(void)
 780{
 781        return platform_driver_register(&mal_of_driver);
 782}
 783
 784void mal_exit(void)
 785{
 786        platform_driver_unregister(&mal_of_driver);
 787}
 788