linux/drivers/bcma/main.c
<<
>>
Prefs
   1/*
   2 * Broadcom specific AMBA
   3 * Bus subsystem
   4 *
   5 * Licensed under the GNU/GPL. See COPYING for details.
   6 */
   7
   8#include "bcma_private.h"
   9#include <linux/module.h>
  10#include <linux/mmc/sdio_func.h>
  11#include <linux/platform_device.h>
  12#include <linux/pci.h>
  13#include <linux/bcma/bcma.h>
  14#include <linux/slab.h>
  15#include <linux/of_address.h>
  16#include <linux/of_irq.h>
  17#include <linux/of_platform.h>
  18
  19MODULE_DESCRIPTION("Broadcom's specific AMBA driver");
  20MODULE_LICENSE("GPL");
  21
  22/* contains the number the next bus should get. */
  23static unsigned int bcma_bus_next_num = 0;
  24
  25/* bcma_buses_mutex locks the bcma_bus_next_num */
  26static DEFINE_MUTEX(bcma_buses_mutex);
  27
  28static int bcma_bus_match(struct device *dev, struct device_driver *drv);
  29static int bcma_device_probe(struct device *dev);
  30static int bcma_device_remove(struct device *dev);
  31static int bcma_device_uevent(struct device *dev, struct kobj_uevent_env *env);
  32
  33static ssize_t manuf_show(struct device *dev, struct device_attribute *attr, char *buf)
  34{
  35        struct bcma_device *core = container_of(dev, struct bcma_device, dev);
  36        return sprintf(buf, "0x%03X\n", core->id.manuf);
  37}
  38static DEVICE_ATTR_RO(manuf);
  39
  40static ssize_t id_show(struct device *dev, struct device_attribute *attr, char *buf)
  41{
  42        struct bcma_device *core = container_of(dev, struct bcma_device, dev);
  43        return sprintf(buf, "0x%03X\n", core->id.id);
  44}
  45static DEVICE_ATTR_RO(id);
  46
  47static ssize_t rev_show(struct device *dev, struct device_attribute *attr, char *buf)
  48{
  49        struct bcma_device *core = container_of(dev, struct bcma_device, dev);
  50        return sprintf(buf, "0x%02X\n", core->id.rev);
  51}
  52static DEVICE_ATTR_RO(rev);
  53
  54static ssize_t class_show(struct device *dev, struct device_attribute *attr, char *buf)
  55{
  56        struct bcma_device *core = container_of(dev, struct bcma_device, dev);
  57        return sprintf(buf, "0x%X\n", core->id.class);
  58}
  59static DEVICE_ATTR_RO(class);
  60
  61static struct attribute *bcma_device_attrs[] = {
  62        &dev_attr_manuf.attr,
  63        &dev_attr_id.attr,
  64        &dev_attr_rev.attr,
  65        &dev_attr_class.attr,
  66        NULL,
  67};
  68ATTRIBUTE_GROUPS(bcma_device);
  69
  70static struct bus_type bcma_bus_type = {
  71        .name           = "bcma",
  72        .match          = bcma_bus_match,
  73        .probe          = bcma_device_probe,
  74        .remove         = bcma_device_remove,
  75        .uevent         = bcma_device_uevent,
  76        .dev_groups     = bcma_device_groups,
  77};
  78
  79static u16 bcma_cc_core_id(struct bcma_bus *bus)
  80{
  81        if (bus->chipinfo.id == BCMA_CHIP_ID_BCM4706)
  82                return BCMA_CORE_4706_CHIPCOMMON;
  83        return BCMA_CORE_CHIPCOMMON;
  84}
  85
  86struct bcma_device *bcma_find_core_unit(struct bcma_bus *bus, u16 coreid,
  87                                        u8 unit)
  88{
  89        struct bcma_device *core;
  90
  91        list_for_each_entry(core, &bus->cores, list) {
  92                if (core->id.id == coreid && core->core_unit == unit)
  93                        return core;
  94        }
  95        return NULL;
  96}
  97EXPORT_SYMBOL_GPL(bcma_find_core_unit);
  98
  99bool bcma_wait_value(struct bcma_device *core, u16 reg, u32 mask, u32 value,
 100                     int timeout)
 101{
 102        unsigned long deadline = jiffies + timeout;
 103        u32 val;
 104
 105        do {
 106                val = bcma_read32(core, reg);
 107                if ((val & mask) == value)
 108                        return true;
 109                cpu_relax();
 110                udelay(10);
 111        } while (!time_after_eq(jiffies, deadline));
 112
 113        bcma_warn(core->bus, "Timeout waiting for register 0x%04X!\n", reg);
 114
 115        return false;
 116}
 117
 118static void bcma_release_core_dev(struct device *dev)
 119{
 120        struct bcma_device *core = container_of(dev, struct bcma_device, dev);
 121        if (core->io_addr)
 122                iounmap(core->io_addr);
 123        if (core->io_wrap)
 124                iounmap(core->io_wrap);
 125        kfree(core);
 126}
 127
 128static bool bcma_is_core_needed_early(u16 core_id)
 129{
 130        switch (core_id) {
 131        case BCMA_CORE_NS_NAND:
 132        case BCMA_CORE_NS_QSPI:
 133                return true;
 134        }
 135
 136        return false;
 137}
 138
 139static struct device_node *bcma_of_find_child_device(struct platform_device *parent,
 140                                                     struct bcma_device *core)
 141{
 142        struct device_node *node;
 143        u64 size;
 144        const __be32 *reg;
 145
 146        if (!parent || !parent->dev.of_node)
 147                return NULL;
 148
 149        for_each_child_of_node(parent->dev.of_node, node) {
 150                reg = of_get_address(node, 0, &size, NULL);
 151                if (!reg)
 152                        continue;
 153                if (of_translate_address(node, reg) == core->addr)
 154                        return node;
 155        }
 156        return NULL;
 157}
 158
 159static int bcma_of_irq_parse(struct platform_device *parent,
 160                             struct bcma_device *core,
 161                             struct of_phandle_args *out_irq, int num)
 162{
 163        __be32 laddr[1];
 164        int rc;
 165
 166        if (core->dev.of_node) {
 167                rc = of_irq_parse_one(core->dev.of_node, num, out_irq);
 168                if (!rc)
 169                        return rc;
 170        }
 171
 172        out_irq->np = parent->dev.of_node;
 173        out_irq->args_count = 1;
 174        out_irq->args[0] = num;
 175
 176        laddr[0] = cpu_to_be32(core->addr);
 177        return of_irq_parse_raw(laddr, out_irq);
 178}
 179
 180static unsigned int bcma_of_get_irq(struct platform_device *parent,
 181                                    struct bcma_device *core, int num)
 182{
 183        struct of_phandle_args out_irq;
 184        int ret;
 185
 186        if (!IS_ENABLED(CONFIG_OF_IRQ) || !parent || !parent->dev.of_node)
 187                return 0;
 188
 189        ret = bcma_of_irq_parse(parent, core, &out_irq, num);
 190        if (ret) {
 191                bcma_debug(core->bus, "bcma_of_get_irq() failed with rc=%d\n",
 192                           ret);
 193                return 0;
 194        }
 195
 196        return irq_create_of_mapping(&out_irq);
 197}
 198
 199static void bcma_of_fill_device(struct platform_device *parent,
 200                                struct bcma_device *core)
 201{
 202        struct device_node *node;
 203
 204        if (!IS_ENABLED(CONFIG_OF_IRQ))
 205                return;
 206
 207        node = bcma_of_find_child_device(parent, core);
 208        if (node)
 209                core->dev.of_node = node;
 210
 211        core->irq = bcma_of_get_irq(parent, core, 0);
 212}
 213
 214unsigned int bcma_core_irq(struct bcma_device *core, int num)
 215{
 216        struct bcma_bus *bus = core->bus;
 217        unsigned int mips_irq;
 218
 219        switch (bus->hosttype) {
 220        case BCMA_HOSTTYPE_PCI:
 221                return bus->host_pci->irq;
 222        case BCMA_HOSTTYPE_SOC:
 223                if (bus->drv_mips.core && num == 0) {
 224                        mips_irq = bcma_core_mips_irq(core);
 225                        return mips_irq <= 4 ? mips_irq + 2 : 0;
 226                }
 227                if (bus->host_pdev)
 228                        return bcma_of_get_irq(bus->host_pdev, core, num);
 229                return 0;
 230        case BCMA_HOSTTYPE_SDIO:
 231                return 0;
 232        }
 233
 234        return 0;
 235}
 236EXPORT_SYMBOL(bcma_core_irq);
 237
 238void bcma_prepare_core(struct bcma_bus *bus, struct bcma_device *core)
 239{
 240        core->dev.release = bcma_release_core_dev;
 241        core->dev.bus = &bcma_bus_type;
 242        dev_set_name(&core->dev, "bcma%d:%d", bus->num, core->core_index);
 243
 244        switch (bus->hosttype) {
 245        case BCMA_HOSTTYPE_PCI:
 246                core->dev.parent = &bus->host_pci->dev;
 247                core->dma_dev = &bus->host_pci->dev;
 248                core->irq = bus->host_pci->irq;
 249                break;
 250        case BCMA_HOSTTYPE_SOC:
 251                core->dev.dma_mask = &core->dev.coherent_dma_mask;
 252                if (bus->host_pdev) {
 253                        core->dma_dev = &bus->host_pdev->dev;
 254                        core->dev.parent = &bus->host_pdev->dev;
 255                        bcma_of_fill_device(bus->host_pdev, core);
 256                } else {
 257                        core->dma_dev = &core->dev;
 258                }
 259                break;
 260        case BCMA_HOSTTYPE_SDIO:
 261                break;
 262        }
 263}
 264
 265struct device *bcma_bus_get_host_dev(struct bcma_bus *bus)
 266{
 267        switch (bus->hosttype) {
 268        case BCMA_HOSTTYPE_PCI:
 269                if (bus->host_pci)
 270                        return &bus->host_pci->dev;
 271                else
 272                        return NULL;
 273        case BCMA_HOSTTYPE_SOC:
 274                if (bus->host_pdev)
 275                        return &bus->host_pdev->dev;
 276                else
 277                        return NULL;
 278        case BCMA_HOSTTYPE_SDIO:
 279                if (bus->host_sdio)
 280                        return &bus->host_sdio->dev;
 281                else
 282                        return NULL;
 283        }
 284        return NULL;
 285}
 286
 287void bcma_init_bus(struct bcma_bus *bus)
 288{
 289        mutex_lock(&bcma_buses_mutex);
 290        bus->num = bcma_bus_next_num++;
 291        mutex_unlock(&bcma_buses_mutex);
 292
 293        INIT_LIST_HEAD(&bus->cores);
 294        bus->nr_cores = 0;
 295
 296        bcma_detect_chip(bus);
 297}
 298
 299static void bcma_register_core(struct bcma_bus *bus, struct bcma_device *core)
 300{
 301        int err;
 302
 303        err = device_register(&core->dev);
 304        if (err) {
 305                bcma_err(bus, "Could not register dev for core 0x%03X\n",
 306                         core->id.id);
 307                put_device(&core->dev);
 308                return;
 309        }
 310        core->dev_registered = true;
 311}
 312
 313static int bcma_register_devices(struct bcma_bus *bus)
 314{
 315        struct bcma_device *core;
 316        int err;
 317
 318        list_for_each_entry(core, &bus->cores, list) {
 319                /* We support that cores ourself */
 320                switch (core->id.id) {
 321                case BCMA_CORE_4706_CHIPCOMMON:
 322                case BCMA_CORE_CHIPCOMMON:
 323                case BCMA_CORE_NS_CHIPCOMMON_B:
 324                case BCMA_CORE_PCI:
 325                case BCMA_CORE_PCIE:
 326                case BCMA_CORE_PCIE2:
 327                case BCMA_CORE_MIPS_74K:
 328                case BCMA_CORE_4706_MAC_GBIT_COMMON:
 329                        continue;
 330                }
 331
 332                /* Early cores were already registered */
 333                if (bcma_is_core_needed_early(core->id.id))
 334                        continue;
 335
 336                /* Only first GMAC core on BCM4706 is connected and working */
 337                if (core->id.id == BCMA_CORE_4706_MAC_GBIT &&
 338                    core->core_unit > 0)
 339                        continue;
 340
 341                bcma_register_core(bus, core);
 342        }
 343
 344#ifdef CONFIG_BCMA_PFLASH
 345        if (bus->drv_cc.pflash.present) {
 346                err = platform_device_register(&bcma_pflash_dev);
 347                if (err)
 348                        bcma_err(bus, "Error registering parallel flash\n");
 349        }
 350#endif
 351
 352#ifdef CONFIG_BCMA_SFLASH
 353        if (bus->drv_cc.sflash.present) {
 354                err = platform_device_register(&bcma_sflash_dev);
 355                if (err)
 356                        bcma_err(bus, "Error registering serial flash\n");
 357        }
 358#endif
 359
 360#ifdef CONFIG_BCMA_NFLASH
 361        if (bus->drv_cc.nflash.present) {
 362                err = platform_device_register(&bcma_nflash_dev);
 363                if (err)
 364                        bcma_err(bus, "Error registering NAND flash\n");
 365        }
 366#endif
 367        err = bcma_gpio_init(&bus->drv_cc);
 368        if (err == -ENOTSUPP)
 369                bcma_debug(bus, "GPIO driver not activated\n");
 370        else if (err)
 371                bcma_err(bus, "Error registering GPIO driver: %i\n", err);
 372
 373        if (bus->hosttype == BCMA_HOSTTYPE_SOC) {
 374                err = bcma_chipco_watchdog_register(&bus->drv_cc);
 375                if (err)
 376                        bcma_err(bus, "Error registering watchdog driver\n");
 377        }
 378
 379        return 0;
 380}
 381
 382void bcma_unregister_cores(struct bcma_bus *bus)
 383{
 384        struct bcma_device *core, *tmp;
 385
 386        list_for_each_entry_safe(core, tmp, &bus->cores, list) {
 387                if (!core->dev_registered)
 388                        continue;
 389                list_del(&core->list);
 390                device_unregister(&core->dev);
 391        }
 392        if (bus->hosttype == BCMA_HOSTTYPE_SOC)
 393                platform_device_unregister(bus->drv_cc.watchdog);
 394
 395        /* Now noone uses internally-handled cores, we can free them */
 396        list_for_each_entry_safe(core, tmp, &bus->cores, list) {
 397                list_del(&core->list);
 398                kfree(core);
 399        }
 400}
 401
 402int bcma_bus_register(struct bcma_bus *bus)
 403{
 404        int err;
 405        struct bcma_device *core;
 406        struct device *dev;
 407
 408        /* Scan for devices (cores) */
 409        err = bcma_bus_scan(bus);
 410        if (err) {
 411                bcma_err(bus, "Failed to scan: %d\n", err);
 412                return err;
 413        }
 414
 415        /* Early init CC core */
 416        core = bcma_find_core(bus, bcma_cc_core_id(bus));
 417        if (core) {
 418                bus->drv_cc.core = core;
 419                bcma_core_chipcommon_early_init(&bus->drv_cc);
 420        }
 421
 422        /* Early init PCIE core */
 423        core = bcma_find_core(bus, BCMA_CORE_PCIE);
 424        if (core) {
 425                bus->drv_pci[0].core = core;
 426                bcma_core_pci_early_init(&bus->drv_pci[0]);
 427        }
 428
 429        dev = bcma_bus_get_host_dev(bus);
 430        if (dev) {
 431                of_platform_default_populate(dev->of_node, NULL, dev);
 432        }
 433
 434        /* Cores providing flash access go before SPROM init */
 435        list_for_each_entry(core, &bus->cores, list) {
 436                if (bcma_is_core_needed_early(core->id.id))
 437                        bcma_register_core(bus, core);
 438        }
 439
 440        /* Try to get SPROM */
 441        err = bcma_sprom_get(bus);
 442        if (err == -ENOENT) {
 443                bcma_err(bus, "No SPROM available\n");
 444        } else if (err)
 445                bcma_err(bus, "Failed to get SPROM: %d\n", err);
 446
 447        /* Init CC core */
 448        core = bcma_find_core(bus, bcma_cc_core_id(bus));
 449        if (core) {
 450                bus->drv_cc.core = core;
 451                bcma_core_chipcommon_init(&bus->drv_cc);
 452        }
 453
 454        /* Init CC core */
 455        core = bcma_find_core(bus, BCMA_CORE_NS_CHIPCOMMON_B);
 456        if (core) {
 457                bus->drv_cc_b.core = core;
 458                bcma_core_chipcommon_b_init(&bus->drv_cc_b);
 459        }
 460
 461        /* Init MIPS core */
 462        core = bcma_find_core(bus, BCMA_CORE_MIPS_74K);
 463        if (core) {
 464                bus->drv_mips.core = core;
 465                bcma_core_mips_init(&bus->drv_mips);
 466        }
 467
 468        /* Init PCIE core */
 469        core = bcma_find_core_unit(bus, BCMA_CORE_PCIE, 0);
 470        if (core) {
 471                bus->drv_pci[0].core = core;
 472                bcma_core_pci_init(&bus->drv_pci[0]);
 473        }
 474
 475        /* Init PCIE core */
 476        core = bcma_find_core_unit(bus, BCMA_CORE_PCIE, 1);
 477        if (core) {
 478                bus->drv_pci[1].core = core;
 479                bcma_core_pci_init(&bus->drv_pci[1]);
 480        }
 481
 482        /* Init PCIe Gen 2 core */
 483        core = bcma_find_core_unit(bus, BCMA_CORE_PCIE2, 0);
 484        if (core) {
 485                bus->drv_pcie2.core = core;
 486                bcma_core_pcie2_init(&bus->drv_pcie2);
 487        }
 488
 489        /* Init GBIT MAC COMMON core */
 490        core = bcma_find_core(bus, BCMA_CORE_4706_MAC_GBIT_COMMON);
 491        if (core) {
 492                bus->drv_gmac_cmn.core = core;
 493                bcma_core_gmac_cmn_init(&bus->drv_gmac_cmn);
 494        }
 495
 496        /* Register found cores */
 497        bcma_register_devices(bus);
 498
 499        bcma_info(bus, "Bus registered\n");
 500
 501        return 0;
 502}
 503
 504void bcma_bus_unregister(struct bcma_bus *bus)
 505{
 506        int err;
 507
 508        err = bcma_gpio_unregister(&bus->drv_cc);
 509        if (err == -EBUSY)
 510                bcma_err(bus, "Some GPIOs are still in use.\n");
 511        else if (err)
 512                bcma_err(bus, "Can not unregister GPIO driver: %i\n", err);
 513
 514        bcma_core_chipcommon_b_free(&bus->drv_cc_b);
 515
 516        bcma_unregister_cores(bus);
 517}
 518
 519/*
 520 * This is a special version of bus registration function designed for SoCs.
 521 * It scans bus and performs basic initialization of main cores only.
 522 * Please note it requires memory allocation, however it won't try to sleep.
 523 */
 524int __init bcma_bus_early_register(struct bcma_bus *bus)
 525{
 526        int err;
 527        struct bcma_device *core;
 528
 529        /* Scan for devices (cores) */
 530        err = bcma_bus_scan(bus);
 531        if (err) {
 532                bcma_err(bus, "Failed to scan bus: %d\n", err);
 533                return -1;
 534        }
 535
 536        /* Early init CC core */
 537        core = bcma_find_core(bus, bcma_cc_core_id(bus));
 538        if (core) {
 539                bus->drv_cc.core = core;
 540                bcma_core_chipcommon_early_init(&bus->drv_cc);
 541        }
 542
 543        /* Early init MIPS core */
 544        core = bcma_find_core(bus, BCMA_CORE_MIPS_74K);
 545        if (core) {
 546                bus->drv_mips.core = core;
 547                bcma_core_mips_early_init(&bus->drv_mips);
 548        }
 549
 550        bcma_info(bus, "Early bus registered\n");
 551
 552        return 0;
 553}
 554
 555#ifdef CONFIG_PM
 556int bcma_bus_suspend(struct bcma_bus *bus)
 557{
 558        struct bcma_device *core;
 559
 560        list_for_each_entry(core, &bus->cores, list) {
 561                struct device_driver *drv = core->dev.driver;
 562                if (drv) {
 563                        struct bcma_driver *adrv = container_of(drv, struct bcma_driver, drv);
 564                        if (adrv->suspend)
 565                                adrv->suspend(core);
 566                }
 567        }
 568        return 0;
 569}
 570
 571int bcma_bus_resume(struct bcma_bus *bus)
 572{
 573        struct bcma_device *core;
 574
 575        /* Init CC core */
 576        if (bus->drv_cc.core) {
 577                bus->drv_cc.setup_done = false;
 578                bcma_core_chipcommon_init(&bus->drv_cc);
 579        }
 580
 581        list_for_each_entry(core, &bus->cores, list) {
 582                struct device_driver *drv = core->dev.driver;
 583                if (drv) {
 584                        struct bcma_driver *adrv = container_of(drv, struct bcma_driver, drv);
 585                        if (adrv->resume)
 586                                adrv->resume(core);
 587                }
 588        }
 589
 590        return 0;
 591}
 592#endif
 593
 594int __bcma_driver_register(struct bcma_driver *drv, struct module *owner)
 595{
 596        drv->drv.name = drv->name;
 597        drv->drv.bus = &bcma_bus_type;
 598        drv->drv.owner = owner;
 599
 600        return driver_register(&drv->drv);
 601}
 602EXPORT_SYMBOL_GPL(__bcma_driver_register);
 603
 604void bcma_driver_unregister(struct bcma_driver *drv)
 605{
 606        driver_unregister(&drv->drv);
 607}
 608EXPORT_SYMBOL_GPL(bcma_driver_unregister);
 609
 610static int bcma_bus_match(struct device *dev, struct device_driver *drv)
 611{
 612        struct bcma_device *core = container_of(dev, struct bcma_device, dev);
 613        struct bcma_driver *adrv = container_of(drv, struct bcma_driver, drv);
 614        const struct bcma_device_id *cid = &core->id;
 615        const struct bcma_device_id *did;
 616
 617        for (did = adrv->id_table; did->manuf || did->id || did->rev; did++) {
 618            if ((did->manuf == cid->manuf || did->manuf == BCMA_ANY_MANUF) &&
 619                (did->id == cid->id || did->id == BCMA_ANY_ID) &&
 620                (did->rev == cid->rev || did->rev == BCMA_ANY_REV) &&
 621                (did->class == cid->class || did->class == BCMA_ANY_CLASS))
 622                        return 1;
 623        }
 624        return 0;
 625}
 626
 627static int bcma_device_probe(struct device *dev)
 628{
 629        struct bcma_device *core = container_of(dev, struct bcma_device, dev);
 630        struct bcma_driver *adrv = container_of(dev->driver, struct bcma_driver,
 631                                               drv);
 632        int err = 0;
 633
 634        if (adrv->probe)
 635                err = adrv->probe(core);
 636
 637        return err;
 638}
 639
 640static int bcma_device_remove(struct device *dev)
 641{
 642        struct bcma_device *core = container_of(dev, struct bcma_device, dev);
 643        struct bcma_driver *adrv = container_of(dev->driver, struct bcma_driver,
 644                                               drv);
 645
 646        if (adrv->remove)
 647                adrv->remove(core);
 648
 649        return 0;
 650}
 651
 652static int bcma_device_uevent(struct device *dev, struct kobj_uevent_env *env)
 653{
 654        struct bcma_device *core = container_of(dev, struct bcma_device, dev);
 655
 656        return add_uevent_var(env,
 657                              "MODALIAS=bcma:m%04Xid%04Xrev%02Xcl%02X",
 658                              core->id.manuf, core->id.id,
 659                              core->id.rev, core->id.class);
 660}
 661
 662static unsigned int bcma_bus_registered;
 663
 664/*
 665 * If built-in, bus has to be registered early, before any driver calls
 666 * bcma_driver_register.
 667 * Otherwise registering driver would trigger BUG in driver_register.
 668 */
 669static int __init bcma_init_bus_register(void)
 670{
 671        int err;
 672
 673        if (bcma_bus_registered)
 674                return 0;
 675
 676        err = bus_register(&bcma_bus_type);
 677        if (!err)
 678                bcma_bus_registered = 1;
 679
 680        return err;
 681}
 682#ifndef MODULE
 683fs_initcall(bcma_init_bus_register);
 684#endif
 685
 686/* Main initialization has to be done with SPI/mtd/NAND/SPROM available */
 687static int __init bcma_modinit(void)
 688{
 689        int err;
 690
 691        err = bcma_init_bus_register();
 692        if (err)
 693                return err;
 694
 695        err = bcma_host_soc_register_driver();
 696        if (err) {
 697                pr_err("SoC host initialization failed\n");
 698                err = 0;
 699        }
 700#ifdef CONFIG_BCMA_HOST_PCI
 701        err = bcma_host_pci_init();
 702        if (err) {
 703                pr_err("PCI host initialization failed\n");
 704                err = 0;
 705        }
 706#endif
 707
 708        return err;
 709}
 710module_init(bcma_modinit);
 711
 712static void __exit bcma_modexit(void)
 713{
 714#ifdef CONFIG_BCMA_HOST_PCI
 715        bcma_host_pci_exit();
 716#endif
 717        bcma_host_soc_unregister_driver();
 718        bus_unregister(&bcma_bus_type);
 719}
 720module_exit(bcma_modexit)
 721