linux/drivers/net/ethernet/ibm/emac/mal.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * drivers/net/ethernet/ibm/emac/mal.c
   4 *
   5 * Memory Access Layer (MAL) support
   6 *
   7 * Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
   8 *                <benh@kernel.crashing.org>
   9 *
  10 * Based on the arch/ppc version of the driver:
  11 *
  12 * Copyright (c) 2004, 2005 Zultys Technologies.
  13 * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
  14 *
  15 * Based on original work by
  16 *      Benjamin Herrenschmidt <benh@kernel.crashing.org>,
  17 *      David Gibson <hermes@gibson.dropbear.id.au>,
  18 *
  19 *      Armin Kuster <akuster@mvista.com>
  20 *      Copyright 2002 MontaVista Softare Inc.
  21 */
  22
  23#include <linux/delay.h>
  24#include <linux/slab.h>
  25#include <linux/of_irq.h>
  26
  27#include "core.h"
  28#include <asm/dcr-regs.h>
  29
  30static int mal_count;
  31
  32int mal_register_commac(struct mal_instance *mal, struct mal_commac *commac)
  33{
  34        unsigned long flags;
  35
  36        spin_lock_irqsave(&mal->lock, flags);
  37
  38        MAL_DBG(mal, "reg(%08x, %08x)" NL,
  39                commac->tx_chan_mask, commac->rx_chan_mask);
  40
  41        /* Don't let multiple commacs claim the same channel(s) */
  42        if ((mal->tx_chan_mask & commac->tx_chan_mask) ||
  43            (mal->rx_chan_mask & commac->rx_chan_mask)) {
  44                spin_unlock_irqrestore(&mal->lock, flags);
  45                printk(KERN_WARNING "mal%d: COMMAC channels conflict!\n",
  46                       mal->index);
  47                return -EBUSY;
  48        }
  49
  50        if (list_empty(&mal->list))
  51                napi_enable(&mal->napi);
  52        mal->tx_chan_mask |= commac->tx_chan_mask;
  53        mal->rx_chan_mask |= commac->rx_chan_mask;
  54        list_add(&commac->list, &mal->list);
  55
  56        spin_unlock_irqrestore(&mal->lock, flags);
  57
  58        return 0;
  59}
  60
  61void mal_unregister_commac(struct mal_instance  *mal,
  62                struct mal_commac *commac)
  63{
  64        unsigned long flags;
  65
  66        spin_lock_irqsave(&mal->lock, flags);
  67
  68        MAL_DBG(mal, "unreg(%08x, %08x)" NL,
  69                commac->tx_chan_mask, commac->rx_chan_mask);
  70
  71        mal->tx_chan_mask &= ~commac->tx_chan_mask;
  72        mal->rx_chan_mask &= ~commac->rx_chan_mask;
  73        list_del_init(&commac->list);
  74        if (list_empty(&mal->list))
  75                napi_disable(&mal->napi);
  76
  77        spin_unlock_irqrestore(&mal->lock, flags);
  78}
  79
  80int mal_set_rcbs(struct mal_instance *mal, int channel, unsigned long size)
  81{
  82        BUG_ON(channel < 0 || channel >= mal->num_rx_chans ||
  83               size > MAL_MAX_RX_SIZE);
  84
  85        MAL_DBG(mal, "set_rbcs(%d, %lu)" NL, channel, size);
  86
  87        if (size & 0xf) {
  88                printk(KERN_WARNING
  89                       "mal%d: incorrect RX size %lu for the channel %d\n",
  90                       mal->index, size, channel);
  91                return -EINVAL;
  92        }
  93
  94        set_mal_dcrn(mal, MAL_RCBS(channel), size >> 4);
  95        return 0;
  96}
  97
  98int mal_tx_bd_offset(struct mal_instance *mal, int channel)
  99{
 100        BUG_ON(channel < 0 || channel >= mal->num_tx_chans);
 101
 102        return channel * NUM_TX_BUFF;
 103}
 104
 105int mal_rx_bd_offset(struct mal_instance *mal, int channel)
 106{
 107        BUG_ON(channel < 0 || channel >= mal->num_rx_chans);
 108        return mal->num_tx_chans * NUM_TX_BUFF + channel * NUM_RX_BUFF;
 109}
 110
 111void mal_enable_tx_channel(struct mal_instance *mal, int channel)
 112{
 113        unsigned long flags;
 114
 115        spin_lock_irqsave(&mal->lock, flags);
 116
 117        MAL_DBG(mal, "enable_tx(%d)" NL, channel);
 118
 119        set_mal_dcrn(mal, MAL_TXCASR,
 120                     get_mal_dcrn(mal, MAL_TXCASR) | MAL_CHAN_MASK(channel));
 121
 122        spin_unlock_irqrestore(&mal->lock, flags);
 123}
 124
 125void mal_disable_tx_channel(struct mal_instance *mal, int channel)
 126{
 127        set_mal_dcrn(mal, MAL_TXCARR, MAL_CHAN_MASK(channel));
 128
 129        MAL_DBG(mal, "disable_tx(%d)" NL, channel);
 130}
 131
 132void mal_enable_rx_channel(struct mal_instance *mal, int channel)
 133{
 134        unsigned long flags;
 135
 136        /*
 137         * On some 4xx PPC's (e.g. 460EX/GT), the rx channel is a multiple
 138         * of 8, but enabling in MAL_RXCASR needs the divided by 8 value
 139         * for the bitmask
 140         */
 141        if (!(channel % 8))
 142                channel >>= 3;
 143
 144        spin_lock_irqsave(&mal->lock, flags);
 145
 146        MAL_DBG(mal, "enable_rx(%d)" NL, channel);
 147
 148        set_mal_dcrn(mal, MAL_RXCASR,
 149                     get_mal_dcrn(mal, MAL_RXCASR) | MAL_CHAN_MASK(channel));
 150
 151        spin_unlock_irqrestore(&mal->lock, flags);
 152}
 153
 154void mal_disable_rx_channel(struct mal_instance *mal, int channel)
 155{
 156        /*
 157         * On some 4xx PPC's (e.g. 460EX/GT), the rx channel is a multiple
 158         * of 8, but enabling in MAL_RXCASR needs the divided by 8 value
 159         * for the bitmask
 160         */
 161        if (!(channel % 8))
 162                channel >>= 3;
 163
 164        set_mal_dcrn(mal, MAL_RXCARR, MAL_CHAN_MASK(channel));
 165
 166        MAL_DBG(mal, "disable_rx(%d)" NL, channel);
 167}
 168
 169void mal_poll_add(struct mal_instance *mal, struct mal_commac *commac)
 170{
 171        unsigned long flags;
 172
 173        spin_lock_irqsave(&mal->lock, flags);
 174
 175        MAL_DBG(mal, "poll_add(%p)" NL, commac);
 176
 177        /* starts disabled */
 178        set_bit(MAL_COMMAC_POLL_DISABLED, &commac->flags);
 179
 180        list_add_tail(&commac->poll_list, &mal->poll_list);
 181
 182        spin_unlock_irqrestore(&mal->lock, flags);
 183}
 184
 185void mal_poll_del(struct mal_instance *mal, struct mal_commac *commac)
 186{
 187        unsigned long flags;
 188
 189        spin_lock_irqsave(&mal->lock, flags);
 190
 191        MAL_DBG(mal, "poll_del(%p)" NL, commac);
 192
 193        list_del(&commac->poll_list);
 194
 195        spin_unlock_irqrestore(&mal->lock, flags);
 196}
 197
 198/* synchronized by mal_poll() */
 199static inline void mal_enable_eob_irq(struct mal_instance *mal)
 200{
 201        MAL_DBG2(mal, "enable_irq" NL);
 202
 203        // XXX might want to cache MAL_CFG as the DCR read can be slooooow
 204        set_mal_dcrn(mal, MAL_CFG, get_mal_dcrn(mal, MAL_CFG) | MAL_CFG_EOPIE);
 205}
 206
 207/* synchronized by NAPI state */
 208static inline void mal_disable_eob_irq(struct mal_instance *mal)
 209{
 210        // XXX might want to cache MAL_CFG as the DCR read can be slooooow
 211        set_mal_dcrn(mal, MAL_CFG, get_mal_dcrn(mal, MAL_CFG) & ~MAL_CFG_EOPIE);
 212
 213        MAL_DBG2(mal, "disable_irq" NL);
 214}
 215
 216static irqreturn_t mal_serr(int irq, void *dev_instance)
 217{
 218        struct mal_instance *mal = dev_instance;
 219
 220        u32 esr = get_mal_dcrn(mal, MAL_ESR);
 221
 222        /* Clear the error status register */
 223        set_mal_dcrn(mal, MAL_ESR, esr);
 224
 225        MAL_DBG(mal, "SERR %08x" NL, esr);
 226
 227        if (esr & MAL_ESR_EVB) {
 228                if (esr & MAL_ESR_DE) {
 229                        /* We ignore Descriptor error,
 230                         * TXDE or RXDE interrupt will be generated anyway.
 231                         */
 232                        return IRQ_HANDLED;
 233                }
 234
 235                if (esr & MAL_ESR_PEIN) {
 236                        /* PLB error, it's probably buggy hardware or
 237                         * incorrect physical address in BD (i.e. bug)
 238                         */
 239                        if (net_ratelimit())
 240                                printk(KERN_ERR
 241                                       "mal%d: system error, "
 242                                       "PLB (ESR = 0x%08x)\n",
 243                                       mal->index, esr);
 244                        return IRQ_HANDLED;
 245                }
 246
 247                /* OPB error, it's probably buggy hardware or incorrect
 248                 * EBC setup
 249                 */
 250                if (net_ratelimit())
 251                        printk(KERN_ERR
 252                               "mal%d: system error, OPB (ESR = 0x%08x)\n",
 253                               mal->index, esr);
 254        }
 255        return IRQ_HANDLED;
 256}
 257
 258static inline void mal_schedule_poll(struct mal_instance *mal)
 259{
 260        if (likely(napi_schedule_prep(&mal->napi))) {
 261                MAL_DBG2(mal, "schedule_poll" NL);
 262                spin_lock(&mal->lock);
 263                mal_disable_eob_irq(mal);
 264                spin_unlock(&mal->lock);
 265                __napi_schedule(&mal->napi);
 266        } else
 267                MAL_DBG2(mal, "already in poll" NL);
 268}
 269
 270static irqreturn_t mal_txeob(int irq, void *dev_instance)
 271{
 272        struct mal_instance *mal = dev_instance;
 273
 274        u32 r = get_mal_dcrn(mal, MAL_TXEOBISR);
 275
 276        MAL_DBG2(mal, "txeob %08x" NL, r);
 277
 278        mal_schedule_poll(mal);
 279        set_mal_dcrn(mal, MAL_TXEOBISR, r);
 280
 281#ifdef CONFIG_PPC_DCR_NATIVE
 282        if (mal_has_feature(mal, MAL_FTR_CLEAR_ICINTSTAT))
 283                mtdcri(SDR0, DCRN_SDR_ICINTSTAT,
 284                                (mfdcri(SDR0, DCRN_SDR_ICINTSTAT) | ICINTSTAT_ICTX));
 285#endif
 286
 287        return IRQ_HANDLED;
 288}
 289
 290static irqreturn_t mal_rxeob(int irq, void *dev_instance)
 291{
 292        struct mal_instance *mal = dev_instance;
 293
 294        u32 r = get_mal_dcrn(mal, MAL_RXEOBISR);
 295
 296        MAL_DBG2(mal, "rxeob %08x" NL, r);
 297
 298        mal_schedule_poll(mal);
 299        set_mal_dcrn(mal, MAL_RXEOBISR, r);
 300
 301#ifdef CONFIG_PPC_DCR_NATIVE
 302        if (mal_has_feature(mal, MAL_FTR_CLEAR_ICINTSTAT))
 303                mtdcri(SDR0, DCRN_SDR_ICINTSTAT,
 304                                (mfdcri(SDR0, DCRN_SDR_ICINTSTAT) | ICINTSTAT_ICRX));
 305#endif
 306
 307        return IRQ_HANDLED;
 308}
 309
 310static irqreturn_t mal_txde(int irq, void *dev_instance)
 311{
 312        struct mal_instance *mal = dev_instance;
 313
 314        u32 deir = get_mal_dcrn(mal, MAL_TXDEIR);
 315        set_mal_dcrn(mal, MAL_TXDEIR, deir);
 316
 317        MAL_DBG(mal, "txde %08x" NL, deir);
 318
 319        if (net_ratelimit())
 320                printk(KERN_ERR
 321                       "mal%d: TX descriptor error (TXDEIR = 0x%08x)\n",
 322                       mal->index, deir);
 323
 324        return IRQ_HANDLED;
 325}
 326
 327static irqreturn_t mal_rxde(int irq, void *dev_instance)
 328{
 329        struct mal_instance *mal = dev_instance;
 330        struct list_head *l;
 331
 332        u32 deir = get_mal_dcrn(mal, MAL_RXDEIR);
 333
 334        MAL_DBG(mal, "rxde %08x" NL, deir);
 335
 336        list_for_each(l, &mal->list) {
 337                struct mal_commac *mc = list_entry(l, struct mal_commac, list);
 338                if (deir & mc->rx_chan_mask) {
 339                        set_bit(MAL_COMMAC_RX_STOPPED, &mc->flags);
 340                        mc->ops->rxde(mc->dev);
 341                }
 342        }
 343
 344        mal_schedule_poll(mal);
 345        set_mal_dcrn(mal, MAL_RXDEIR, deir);
 346
 347        return IRQ_HANDLED;
 348}
 349
 350static irqreturn_t mal_int(int irq, void *dev_instance)
 351{
 352        struct mal_instance *mal = dev_instance;
 353        u32 esr = get_mal_dcrn(mal, MAL_ESR);
 354
 355        if (esr & MAL_ESR_EVB) {
 356                /* descriptor error */
 357                if (esr & MAL_ESR_DE) {
 358                        if (esr & MAL_ESR_CIDT)
 359                                return mal_rxde(irq, dev_instance);
 360                        else
 361                                return mal_txde(irq, dev_instance);
 362                } else { /* SERR */
 363                        return mal_serr(irq, dev_instance);
 364                }
 365        }
 366        return IRQ_HANDLED;
 367}
 368
 369void mal_poll_disable(struct mal_instance *mal, struct mal_commac *commac)
 370{
 371        /* Spinlock-type semantics: only one caller disable poll at a time */
 372        while (test_and_set_bit(MAL_COMMAC_POLL_DISABLED, &commac->flags))
 373                msleep(1);
 374
 375        /* Synchronize with the MAL NAPI poller */
 376        napi_synchronize(&mal->napi);
 377}
 378
 379void mal_poll_enable(struct mal_instance *mal, struct mal_commac *commac)
 380{
 381        smp_wmb();
 382        clear_bit(MAL_COMMAC_POLL_DISABLED, &commac->flags);
 383
 384        /* Feels better to trigger a poll here to catch up with events that
 385         * may have happened on this channel while disabled. It will most
 386         * probably be delayed until the next interrupt but that's mostly a
 387         * non-issue in the context where this is called.
 388         */
 389        napi_schedule(&mal->napi);
 390}
 391
 392static int mal_poll(struct napi_struct *napi, int budget)
 393{
 394        struct mal_instance *mal = container_of(napi, struct mal_instance, napi);
 395        struct list_head *l;
 396        int received = 0;
 397        unsigned long flags;
 398
 399        MAL_DBG2(mal, "poll(%d)" NL, budget);
 400
 401        /* Process TX skbs */
 402        list_for_each(l, &mal->poll_list) {
 403                struct mal_commac *mc =
 404                        list_entry(l, struct mal_commac, poll_list);
 405                mc->ops->poll_tx(mc->dev);
 406        }
 407
 408        /* Process RX skbs.
 409         *
 410         * We _might_ need something more smart here to enforce polling
 411         * fairness.
 412         */
 413        list_for_each(l, &mal->poll_list) {
 414                struct mal_commac *mc =
 415                        list_entry(l, struct mal_commac, poll_list);
 416                int n;
 417                if (unlikely(test_bit(MAL_COMMAC_POLL_DISABLED, &mc->flags)))
 418                        continue;
 419                n = mc->ops->poll_rx(mc->dev, budget - received);
 420                if (n) {
 421                        received += n;
 422                        if (received >= budget)
 423                                return budget;
 424                }
 425        }
 426
 427        if (napi_complete_done(napi, received)) {
 428                /* We need to disable IRQs to protect from RXDE IRQ here */
 429                spin_lock_irqsave(&mal->lock, flags);
 430                mal_enable_eob_irq(mal);
 431                spin_unlock_irqrestore(&mal->lock, flags);
 432        }
 433
 434        /* Check for "rotting" packet(s) */
 435        list_for_each(l, &mal->poll_list) {
 436                struct mal_commac *mc =
 437                        list_entry(l, struct mal_commac, poll_list);
 438                if (unlikely(test_bit(MAL_COMMAC_POLL_DISABLED, &mc->flags)))
 439                        continue;
 440                if (unlikely(mc->ops->peek_rx(mc->dev) ||
 441                             test_bit(MAL_COMMAC_RX_STOPPED, &mc->flags))) {
 442                        MAL_DBG2(mal, "rotting packet" NL);
 443                        if (!napi_reschedule(napi))
 444                                goto more_work;
 445
 446                        spin_lock_irqsave(&mal->lock, flags);
 447                        mal_disable_eob_irq(mal);
 448                        spin_unlock_irqrestore(&mal->lock, flags);
 449                }
 450                mc->ops->poll_tx(mc->dev);
 451        }
 452
 453 more_work:
 454        MAL_DBG2(mal, "poll() %d <- %d" NL, budget, received);
 455        return received;
 456}
 457
 458static void mal_reset(struct mal_instance *mal)
 459{
 460        int n = 10;
 461
 462        MAL_DBG(mal, "reset" NL);
 463
 464        set_mal_dcrn(mal, MAL_CFG, MAL_CFG_SR);
 465
 466        /* Wait for reset to complete (1 system clock) */
 467        while ((get_mal_dcrn(mal, MAL_CFG) & MAL_CFG_SR) && n)
 468                --n;
 469
 470        if (unlikely(!n))
 471                printk(KERN_ERR "mal%d: reset timeout\n", mal->index);
 472}
 473
 474int mal_get_regs_len(struct mal_instance *mal)
 475{
 476        return sizeof(struct emac_ethtool_regs_subhdr) +
 477            sizeof(struct mal_regs);
 478}
 479
 480void *mal_dump_regs(struct mal_instance *mal, void *buf)
 481{
 482        struct emac_ethtool_regs_subhdr *hdr = buf;
 483        struct mal_regs *regs = (struct mal_regs *)(hdr + 1);
 484        int i;
 485
 486        hdr->version = mal->version;
 487        hdr->index = mal->index;
 488
 489        regs->tx_count = mal->num_tx_chans;
 490        regs->rx_count = mal->num_rx_chans;
 491
 492        regs->cfg = get_mal_dcrn(mal, MAL_CFG);
 493        regs->esr = get_mal_dcrn(mal, MAL_ESR);
 494        regs->ier = get_mal_dcrn(mal, MAL_IER);
 495        regs->tx_casr = get_mal_dcrn(mal, MAL_TXCASR);
 496        regs->tx_carr = get_mal_dcrn(mal, MAL_TXCARR);
 497        regs->tx_eobisr = get_mal_dcrn(mal, MAL_TXEOBISR);
 498        regs->tx_deir = get_mal_dcrn(mal, MAL_TXDEIR);
 499        regs->rx_casr = get_mal_dcrn(mal, MAL_RXCASR);
 500        regs->rx_carr = get_mal_dcrn(mal, MAL_RXCARR);
 501        regs->rx_eobisr = get_mal_dcrn(mal, MAL_RXEOBISR);
 502        regs->rx_deir = get_mal_dcrn(mal, MAL_RXDEIR);
 503
 504        for (i = 0; i < regs->tx_count; ++i)
 505                regs->tx_ctpr[i] = get_mal_dcrn(mal, MAL_TXCTPR(i));
 506
 507        for (i = 0; i < regs->rx_count; ++i) {
 508                regs->rx_ctpr[i] = get_mal_dcrn(mal, MAL_RXCTPR(i));
 509                regs->rcbs[i] = get_mal_dcrn(mal, MAL_RCBS(i));
 510        }
 511        return regs + 1;
 512}
 513
 514static int mal_probe(struct platform_device *ofdev)
 515{
 516        struct mal_instance *mal;
 517        int err = 0, i, bd_size;
 518        int index = mal_count++;
 519        unsigned int dcr_base;
 520        const u32 *prop;
 521        u32 cfg;
 522        unsigned long irqflags;
 523        irq_handler_t hdlr_serr, hdlr_txde, hdlr_rxde;
 524
 525        mal = kzalloc(sizeof(struct mal_instance), GFP_KERNEL);
 526        if (!mal)
 527                return -ENOMEM;
 528
 529        mal->index = index;
 530        mal->ofdev = ofdev;
 531        mal->version = of_device_is_compatible(ofdev->dev.of_node, "ibm,mcmal2") ? 2 : 1;
 532
 533        MAL_DBG(mal, "probe" NL);
 534
 535        prop = of_get_property(ofdev->dev.of_node, "num-tx-chans", NULL);
 536        if (prop == NULL) {
 537                printk(KERN_ERR
 538                       "mal%d: can't find MAL num-tx-chans property!\n",
 539                       index);
 540                err = -ENODEV;
 541                goto fail;
 542        }
 543        mal->num_tx_chans = prop[0];
 544
 545        prop = of_get_property(ofdev->dev.of_node, "num-rx-chans", NULL);
 546        if (prop == NULL) {
 547                printk(KERN_ERR
 548                       "mal%d: can't find MAL num-rx-chans property!\n",
 549                       index);
 550                err = -ENODEV;
 551                goto fail;
 552        }
 553        mal->num_rx_chans = prop[0];
 554
 555        dcr_base = dcr_resource_start(ofdev->dev.of_node, 0);
 556        if (dcr_base == 0) {
 557                printk(KERN_ERR
 558                       "mal%d: can't find DCR resource!\n", index);
 559                err = -ENODEV;
 560                goto fail;
 561        }
 562        mal->dcr_host = dcr_map(ofdev->dev.of_node, dcr_base, 0x100);
 563        if (!DCR_MAP_OK(mal->dcr_host)) {
 564                printk(KERN_ERR
 565                       "mal%d: failed to map DCRs !\n", index);
 566                err = -ENODEV;
 567                goto fail;
 568        }
 569
 570        if (of_device_is_compatible(ofdev->dev.of_node, "ibm,mcmal-405ez")) {
 571#if defined(CONFIG_IBM_EMAC_MAL_CLR_ICINTSTAT) && \
 572                defined(CONFIG_IBM_EMAC_MAL_COMMON_ERR)
 573                mal->features |= (MAL_FTR_CLEAR_ICINTSTAT |
 574                                MAL_FTR_COMMON_ERR_INT);
 575#else
 576                printk(KERN_ERR "%pOF: Support for 405EZ not enabled!\n",
 577                                ofdev->dev.of_node);
 578                err = -ENODEV;
 579                goto fail;
 580#endif
 581        }
 582
 583        mal->txeob_irq = irq_of_parse_and_map(ofdev->dev.of_node, 0);
 584        mal->rxeob_irq = irq_of_parse_and_map(ofdev->dev.of_node, 1);
 585        mal->serr_irq = irq_of_parse_and_map(ofdev->dev.of_node, 2);
 586
 587        if (mal_has_feature(mal, MAL_FTR_COMMON_ERR_INT)) {
 588                mal->txde_irq = mal->rxde_irq = mal->serr_irq;
 589        } else {
 590                mal->txde_irq = irq_of_parse_and_map(ofdev->dev.of_node, 3);
 591                mal->rxde_irq = irq_of_parse_and_map(ofdev->dev.of_node, 4);
 592        }
 593
 594        if (!mal->txeob_irq || !mal->rxeob_irq || !mal->serr_irq ||
 595            !mal->txde_irq  || !mal->rxde_irq) {
 596                printk(KERN_ERR
 597                       "mal%d: failed to map interrupts !\n", index);
 598                err = -ENODEV;
 599                goto fail_unmap;
 600        }
 601
 602        INIT_LIST_HEAD(&mal->poll_list);
 603        INIT_LIST_HEAD(&mal->list);
 604        spin_lock_init(&mal->lock);
 605
 606        init_dummy_netdev(&mal->dummy_dev);
 607
 608        netif_napi_add(&mal->dummy_dev, &mal->napi, mal_poll,
 609                       CONFIG_IBM_EMAC_POLL_WEIGHT);
 610
 611        /* Load power-on reset defaults */
 612        mal_reset(mal);
 613
 614        /* Set the MAL configuration register */
 615        cfg = (mal->version == 2) ? MAL2_CFG_DEFAULT : MAL1_CFG_DEFAULT;
 616        cfg |= MAL_CFG_PLBB | MAL_CFG_OPBBL | MAL_CFG_LEA;
 617
 618        /* Current Axon is not happy with priority being non-0, it can
 619         * deadlock, fix it up here
 620         */
 621        if (of_device_is_compatible(ofdev->dev.of_node, "ibm,mcmal-axon"))
 622                cfg &= ~(MAL2_CFG_RPP_10 | MAL2_CFG_WPP_10);
 623
 624        /* Apply configuration */
 625        set_mal_dcrn(mal, MAL_CFG, cfg);
 626
 627        /* Allocate space for BD rings */
 628        BUG_ON(mal->num_tx_chans <= 0 || mal->num_tx_chans > 32);
 629        BUG_ON(mal->num_rx_chans <= 0 || mal->num_rx_chans > 32);
 630
 631        bd_size = sizeof(struct mal_descriptor) *
 632                (NUM_TX_BUFF * mal->num_tx_chans +
 633                 NUM_RX_BUFF * mal->num_rx_chans);
 634        mal->bd_virt = dma_alloc_coherent(&ofdev->dev, bd_size, &mal->bd_dma,
 635                                          GFP_KERNEL);
 636        if (mal->bd_virt == NULL) {
 637                err = -ENOMEM;
 638                goto fail_unmap;
 639        }
 640
 641        for (i = 0; i < mal->num_tx_chans; ++i)
 642                set_mal_dcrn(mal, MAL_TXCTPR(i), mal->bd_dma +
 643                             sizeof(struct mal_descriptor) *
 644                             mal_tx_bd_offset(mal, i));
 645
 646        for (i = 0; i < mal->num_rx_chans; ++i)
 647                set_mal_dcrn(mal, MAL_RXCTPR(i), mal->bd_dma +
 648                             sizeof(struct mal_descriptor) *
 649                             mal_rx_bd_offset(mal, i));
 650
 651        if (mal_has_feature(mal, MAL_FTR_COMMON_ERR_INT)) {
 652                irqflags = IRQF_SHARED;
 653                hdlr_serr = hdlr_txde = hdlr_rxde = mal_int;
 654        } else {
 655                irqflags = 0;
 656                hdlr_serr = mal_serr;
 657                hdlr_txde = mal_txde;
 658                hdlr_rxde = mal_rxde;
 659        }
 660
 661        err = request_irq(mal->serr_irq, hdlr_serr, irqflags, "MAL SERR", mal);
 662        if (err)
 663                goto fail2;
 664        err = request_irq(mal->txde_irq, hdlr_txde, irqflags, "MAL TX DE", mal);
 665        if (err)
 666                goto fail3;
 667        err = request_irq(mal->txeob_irq, mal_txeob, 0, "MAL TX EOB", mal);
 668        if (err)
 669                goto fail4;
 670        err = request_irq(mal->rxde_irq, hdlr_rxde, irqflags, "MAL RX DE", mal);
 671        if (err)
 672                goto fail5;
 673        err = request_irq(mal->rxeob_irq, mal_rxeob, 0, "MAL RX EOB", mal);
 674        if (err)
 675                goto fail6;
 676
 677        /* Enable all MAL SERR interrupt sources */
 678        set_mal_dcrn(mal, MAL_IER, MAL_IER_EVENTS);
 679
 680        /* Enable EOB interrupt */
 681        mal_enable_eob_irq(mal);
 682
 683        printk(KERN_INFO
 684               "MAL v%d %pOF, %d TX channels, %d RX channels\n",
 685               mal->version, ofdev->dev.of_node,
 686               mal->num_tx_chans, mal->num_rx_chans);
 687
 688        /* Advertise this instance to the rest of the world */
 689        wmb();
 690        platform_set_drvdata(ofdev, mal);
 691
 692        return 0;
 693
 694 fail6:
 695        free_irq(mal->rxde_irq, mal);
 696 fail5:
 697        free_irq(mal->txeob_irq, mal);
 698 fail4:
 699        free_irq(mal->txde_irq, mal);
 700 fail3:
 701        free_irq(mal->serr_irq, mal);
 702 fail2:
 703        dma_free_coherent(&ofdev->dev, bd_size, mal->bd_virt, mal->bd_dma);
 704 fail_unmap:
 705        dcr_unmap(mal->dcr_host, 0x100);
 706 fail:
 707        kfree(mal);
 708
 709        return err;
 710}
 711
 712static int mal_remove(struct platform_device *ofdev)
 713{
 714        struct mal_instance *mal = platform_get_drvdata(ofdev);
 715
 716        MAL_DBG(mal, "remove" NL);
 717
 718        /* Synchronize with scheduled polling */
 719        napi_disable(&mal->napi);
 720
 721        if (!list_empty(&mal->list))
 722                /* This is *very* bad */
 723                WARN(1, KERN_EMERG
 724                       "mal%d: commac list is not empty on remove!\n",
 725                       mal->index);
 726
 727        free_irq(mal->serr_irq, mal);
 728        free_irq(mal->txde_irq, mal);
 729        free_irq(mal->txeob_irq, mal);
 730        free_irq(mal->rxde_irq, mal);
 731        free_irq(mal->rxeob_irq, mal);
 732
 733        mal_reset(mal);
 734
 735        dma_free_coherent(&ofdev->dev,
 736                          sizeof(struct mal_descriptor) *
 737                          (NUM_TX_BUFF * mal->num_tx_chans +
 738                           NUM_RX_BUFF * mal->num_rx_chans), mal->bd_virt,
 739                          mal->bd_dma);
 740        kfree(mal);
 741
 742        return 0;
 743}
 744
 745static const struct of_device_id mal_platform_match[] =
 746{
 747        {
 748                .compatible     = "ibm,mcmal",
 749        },
 750        {
 751                .compatible     = "ibm,mcmal2",
 752        },
 753        /* Backward compat */
 754        {
 755                .type           = "mcmal-dma",
 756                .compatible     = "ibm,mcmal",
 757        },
 758        {
 759                .type           = "mcmal-dma",
 760                .compatible     = "ibm,mcmal2",
 761        },
 762        {},
 763};
 764
 765static struct platform_driver mal_of_driver = {
 766        .driver = {
 767                .name = "mcmal",
 768                .of_match_table = mal_platform_match,
 769        },
 770        .probe = mal_probe,
 771        .remove = mal_remove,
 772};
 773
 774int __init mal_init(void)
 775{
 776        return platform_driver_register(&mal_of_driver);
 777}
 778
 779void mal_exit(void)
 780{
 781        platform_driver_unregister(&mal_of_driver);
 782}
 783