linux/drivers/sh/maple/maple.c
<<
>>
Prefs
   1/*
   2 * Core maple bus functionality
   3 *
   4 *  Copyright (C) 2007 - 2009 Adrian McMenamin
   5 *  Copyright (C) 2001 - 2008 Paul Mundt
   6 *  Copyright (C) 2000 - 2001 YAEGASHI Takeshi
   7 *  Copyright (C) 2001 M. R. Brown
   8 *
   9 * This file is subject to the terms and conditions of the GNU General Public
  10 * License.  See the file "COPYING" in the main directory of this archive
  11 * for more details.
  12 */
  13#include <linux/init.h>
  14#include <linux/kernel.h>
  15#include <linux/device.h>
  16#include <linux/interrupt.h>
  17#include <linux/list.h>
  18#include <linux/io.h>
  19#include <linux/slab.h>
  20#include <linux/maple.h>
  21#include <linux/dma-mapping.h>
  22#include <linux/delay.h>
  23#include <linux/module.h>
  24#include <asm/cacheflush.h>
  25#include <asm/dma.h>
  26#include <asm/io.h>
  27#include <mach/dma.h>
  28#include <mach/sysasic.h>
  29
  30MODULE_AUTHOR("Adrian McMenamin <adrian@mcmen.demon.co.uk>");
  31MODULE_DESCRIPTION("Maple bus driver for Dreamcast");
  32MODULE_LICENSE("GPL v2");
  33MODULE_SUPPORTED_DEVICE("{{SEGA, Dreamcast/Maple}}");
  34
  35static void maple_dma_handler(struct work_struct *work);
  36static void maple_vblank_handler(struct work_struct *work);
  37
  38static DECLARE_WORK(maple_dma_process, maple_dma_handler);
  39static DECLARE_WORK(maple_vblank_process, maple_vblank_handler);
  40
  41static LIST_HEAD(maple_waitq);
  42static LIST_HEAD(maple_sentq);
  43
  44/* mutex to protect queue of waiting packets */
  45static DEFINE_MUTEX(maple_wlist_lock);
  46
  47static struct maple_driver maple_unsupported_device;
  48static struct device maple_bus;
  49static int subdevice_map[MAPLE_PORTS];
  50static unsigned long *maple_sendbuf, *maple_sendptr, *maple_lastptr;
  51static unsigned long maple_pnp_time;
  52static int started, scanning, fullscan;
  53static struct kmem_cache *maple_queue_cache;
  54
  55struct maple_device_specify {
  56        int port;
  57        int unit;
  58};
  59
  60static bool checked[MAPLE_PORTS];
  61static bool empty[MAPLE_PORTS];
  62static struct maple_device *baseunits[MAPLE_PORTS];
  63
  64/**
  65 * maple_driver_register - register a maple driver
  66 * @drv: maple driver to be registered.
  67 *
  68 * Registers the passed in @drv, while updating the bus type.
  69 * Devices with matching function IDs will be automatically probed.
  70 */
  71int maple_driver_register(struct maple_driver *drv)
  72{
  73        if (!drv)
  74                return -EINVAL;
  75
  76        drv->drv.bus = &maple_bus_type;
  77
  78        return driver_register(&drv->drv);
  79}
  80EXPORT_SYMBOL_GPL(maple_driver_register);
  81
  82/**
  83 * maple_driver_unregister - unregister a maple driver.
  84 * @drv: maple driver to unregister.
  85 *
  86 * Cleans up after maple_driver_register(). To be invoked in the exit
  87 * path of any module drivers.
  88 */
  89void maple_driver_unregister(struct maple_driver *drv)
  90{
  91        driver_unregister(&drv->drv);
  92}
  93EXPORT_SYMBOL_GPL(maple_driver_unregister);
  94
  95/* set hardware registers to enable next round of dma */
  96static void maple_dma_reset(void)
  97{
  98        __raw_writel(MAPLE_MAGIC, MAPLE_RESET);
  99        /* set trig type to 0 for software trigger, 1 for hardware (VBLANK) */
 100        __raw_writel(1, MAPLE_TRIGTYPE);
 101        /*
 102        * Maple system register
 103        * bits 31 - 16  timeout in units of 20nsec
 104        * bit 12        hard trigger - set 0 to keep responding to VBLANK
 105        * bits 9 - 8    set 00 for 2 Mbps, 01 for 1 Mbps
 106        * bits 3 - 0    delay (in 1.3ms) between VBLANK and start of DMA
 107        * max delay is 11
 108        */
 109        __raw_writel(MAPLE_2MBPS | MAPLE_TIMEOUT(0xFFFF), MAPLE_SPEED);
 110        __raw_writel(virt_to_phys(maple_sendbuf), MAPLE_DMAADDR);
 111        __raw_writel(1, MAPLE_ENABLE);
 112}
 113
 114/**
 115 * maple_getcond_callback - setup handling MAPLE_COMMAND_GETCOND
 116 * @dev: device responding
 117 * @callback: handler callback
 118 * @interval: interval in jiffies between callbacks
 119 * @function: the function code for the device
 120 */
 121void maple_getcond_callback(struct maple_device *dev,
 122                        void (*callback) (struct mapleq *mq),
 123                        unsigned long interval, unsigned long function)
 124{
 125        dev->callback = callback;
 126        dev->interval = interval;
 127        dev->function = cpu_to_be32(function);
 128        dev->when = jiffies;
 129}
 130EXPORT_SYMBOL_GPL(maple_getcond_callback);
 131
 132static int maple_dma_done(void)
 133{
 134        return (__raw_readl(MAPLE_STATE) & 1) == 0;
 135}
 136
 137static void maple_release_device(struct device *dev)
 138{
 139        struct maple_device *mdev;
 140        struct mapleq *mq;
 141
 142        mdev = to_maple_dev(dev);
 143        mq = mdev->mq;
 144        kmem_cache_free(maple_queue_cache, mq->recvbuf);
 145        kfree(mq);
 146        kfree(mdev);
 147}
 148
 149/**
 150 * maple_add_packet - add a single instruction to the maple bus queue
 151 * @mdev: maple device
 152 * @function: function on device being queried
 153 * @command: maple command to add
 154 * @length: length of command string (in 32 bit words)
 155 * @data: remainder of command string
 156 */
 157int maple_add_packet(struct maple_device *mdev, u32 function, u32 command,
 158        size_t length, void *data)
 159{
 160        int ret = 0;
 161        void *sendbuf = NULL;
 162
 163        if (length) {
 164                sendbuf = kcalloc(length, 4, GFP_KERNEL);
 165                if (!sendbuf) {
 166                        ret = -ENOMEM;
 167                        goto out;
 168                }
 169                ((__be32 *)sendbuf)[0] = cpu_to_be32(function);
 170        }
 171
 172        mdev->mq->command = command;
 173        mdev->mq->length = length;
 174        if (length > 1)
 175                memcpy(sendbuf + 4, data, (length - 1) * 4);
 176        mdev->mq->sendbuf = sendbuf;
 177
 178        mutex_lock(&maple_wlist_lock);
 179        list_add_tail(&mdev->mq->list, &maple_waitq);
 180        mutex_unlock(&maple_wlist_lock);
 181out:
 182        return ret;
 183}
 184EXPORT_SYMBOL_GPL(maple_add_packet);
 185
 186static struct mapleq *maple_allocq(struct maple_device *mdev)
 187{
 188        struct mapleq *mq;
 189
 190        mq = kzalloc(sizeof(*mq), GFP_KERNEL);
 191        if (!mq)
 192                goto failed_nomem;
 193
 194        INIT_LIST_HEAD(&mq->list);
 195        mq->dev = mdev;
 196        mq->recvbuf = kmem_cache_zalloc(maple_queue_cache, GFP_KERNEL);
 197        if (!mq->recvbuf)
 198                goto failed_p2;
 199        mq->recvbuf->buf = &((mq->recvbuf->bufx)[0]);
 200
 201        return mq;
 202
 203failed_p2:
 204        kfree(mq);
 205failed_nomem:
 206        dev_err(&mdev->dev, "could not allocate memory for device (%d, %d)\n",
 207                mdev->port, mdev->unit);
 208        return NULL;
 209}
 210
 211static struct maple_device *maple_alloc_dev(int port, int unit)
 212{
 213        struct maple_device *mdev;
 214
 215        /* zero this out to avoid kobj subsystem
 216        * thinking it has already been registered */
 217
 218        mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
 219        if (!mdev)
 220                return NULL;
 221
 222        mdev->port = port;
 223        mdev->unit = unit;
 224
 225        mdev->mq = maple_allocq(mdev);
 226
 227        if (!mdev->mq) {
 228                kfree(mdev);
 229                return NULL;
 230        }
 231        mdev->dev.bus = &maple_bus_type;
 232        mdev->dev.parent = &maple_bus;
 233        init_waitqueue_head(&mdev->maple_wait);
 234        return mdev;
 235}
 236
 237static void maple_free_dev(struct maple_device *mdev)
 238{
 239        kmem_cache_free(maple_queue_cache, mdev->mq->recvbuf);
 240        kfree(mdev->mq);
 241        kfree(mdev);
 242}
 243
 244/* process the command queue into a maple command block
 245 * terminating command has bit 32 of first long set to 0
 246 */
 247static void maple_build_block(struct mapleq *mq)
 248{
 249        int port, unit, from, to, len;
 250        unsigned long *lsendbuf = mq->sendbuf;
 251
 252        port = mq->dev->port & 3;
 253        unit = mq->dev->unit;
 254        len = mq->length;
 255        from = port << 6;
 256        to = (port << 6) | (unit > 0 ? (1 << (unit - 1)) & 0x1f : 0x20);
 257
 258        *maple_lastptr &= 0x7fffffff;
 259        maple_lastptr = maple_sendptr;
 260
 261        *maple_sendptr++ = (port << 16) | len | 0x80000000;
 262        *maple_sendptr++ = virt_to_phys(mq->recvbuf->buf);
 263        *maple_sendptr++ =
 264            mq->command | (to << 8) | (from << 16) | (len << 24);
 265        while (len-- > 0)
 266                *maple_sendptr++ = *lsendbuf++;
 267}
 268
 269/* build up command queue */
 270static void maple_send(void)
 271{
 272        int i, maple_packets = 0;
 273        struct mapleq *mq, *nmq;
 274
 275        if (!maple_dma_done())
 276                return;
 277
 278        /* disable DMA */
 279        __raw_writel(0, MAPLE_ENABLE);
 280
 281        if (!list_empty(&maple_sentq))
 282                goto finish;
 283
 284        mutex_lock(&maple_wlist_lock);
 285        if (list_empty(&maple_waitq)) {
 286                mutex_unlock(&maple_wlist_lock);
 287                goto finish;
 288        }
 289
 290        maple_lastptr = maple_sendbuf;
 291        maple_sendptr = maple_sendbuf;
 292
 293        list_for_each_entry_safe(mq, nmq, &maple_waitq, list) {
 294                maple_build_block(mq);
 295                list_del_init(&mq->list);
 296                list_add_tail(&mq->list, &maple_sentq);
 297                if (maple_packets++ > MAPLE_MAXPACKETS)
 298                        break;
 299        }
 300        mutex_unlock(&maple_wlist_lock);
 301        if (maple_packets > 0) {
 302                for (i = 0; i < (1 << MAPLE_DMA_PAGES); i++)
 303                        sh_sync_dma_for_device(maple_sendbuf + i * PAGE_SIZE,
 304                                       PAGE_SIZE, DMA_BIDIRECTIONAL);
 305        }
 306
 307finish:
 308        maple_dma_reset();
 309}
 310
 311/* check if there is a driver registered likely to match this device */
 312static int maple_check_matching_driver(struct device_driver *driver,
 313                                        void *devptr)
 314{
 315        struct maple_driver *maple_drv;
 316        struct maple_device *mdev;
 317
 318        mdev = devptr;
 319        maple_drv = to_maple_driver(driver);
 320        if (mdev->devinfo.function & cpu_to_be32(maple_drv->function))
 321                return 1;
 322        return 0;
 323}
 324
 325static void maple_detach_driver(struct maple_device *mdev)
 326{
 327        device_unregister(&mdev->dev);
 328}
 329
 330/* process initial MAPLE_COMMAND_DEVINFO for each device or port */
 331static void maple_attach_driver(struct maple_device *mdev)
 332{
 333        char *p, *recvbuf;
 334        unsigned long function;
 335        int matched, error;
 336
 337        recvbuf = mdev->mq->recvbuf->buf;
 338        /* copy the data as individual elements in
 339        * case of memory optimisation */
 340        memcpy(&mdev->devinfo.function, recvbuf + 4, 4);
 341        memcpy(&mdev->devinfo.function_data[0], recvbuf + 8, 12);
 342        memcpy(&mdev->devinfo.area_code, recvbuf + 20, 1);
 343        memcpy(&mdev->devinfo.connector_direction, recvbuf + 21, 1);
 344        memcpy(&mdev->devinfo.product_name[0], recvbuf + 22, 30);
 345        memcpy(&mdev->devinfo.standby_power, recvbuf + 112, 2);
 346        memcpy(&mdev->devinfo.max_power, recvbuf + 114, 2);
 347        memcpy(mdev->product_name, mdev->devinfo.product_name, 30);
 348        mdev->product_name[30] = '\0';
 349        memcpy(mdev->product_licence, mdev->devinfo.product_licence, 60);
 350        mdev->product_licence[60] = '\0';
 351
 352        for (p = mdev->product_name + 29; mdev->product_name <= p; p--)
 353                if (*p == ' ')
 354                        *p = '\0';
 355                else
 356                        break;
 357        for (p = mdev->product_licence + 59; mdev->product_licence <= p; p--)
 358                if (*p == ' ')
 359                        *p = '\0';
 360                else
 361                        break;
 362
 363        function = be32_to_cpu(mdev->devinfo.function);
 364
 365        dev_info(&mdev->dev, "detected %s: function 0x%lX: at (%d, %d)\n",
 366                mdev->product_name, function, mdev->port, mdev->unit);
 367
 368        if (function > 0x200) {
 369                /* Do this silently - as not a real device */
 370                function = 0;
 371                mdev->driver = &maple_unsupported_device;
 372                dev_set_name(&mdev->dev, "%d:0.port", mdev->port);
 373        } else {
 374                matched =
 375                        bus_for_each_drv(&maple_bus_type, NULL, mdev,
 376                                maple_check_matching_driver);
 377
 378                if (matched == 0) {
 379                        /* Driver does not exist yet */
 380                        dev_info(&mdev->dev, "no driver found\n");
 381                        mdev->driver = &maple_unsupported_device;
 382                }
 383                dev_set_name(&mdev->dev, "%d:0%d.%lX", mdev->port,
 384                             mdev->unit, function);
 385        }
 386
 387        mdev->function = function;
 388        mdev->dev.release = &maple_release_device;
 389
 390        atomic_set(&mdev->busy, 0);
 391        error = device_register(&mdev->dev);
 392        if (error) {
 393                dev_warn(&mdev->dev, "could not register device at"
 394                        " (%d, %d), with error 0x%X\n", mdev->unit,
 395                        mdev->port, error);
 396                maple_free_dev(mdev);
 397                mdev = NULL;
 398                return;
 399        }
 400}
 401
 402/*
 403 * if device has been registered for the given
 404 * port and unit then return 1 - allows identification
 405 * of which devices need to be attached or detached
 406 */
 407static int check_maple_device(struct device *device, void *portptr)
 408{
 409        struct maple_device_specify *ds;
 410        struct maple_device *mdev;
 411
 412        ds = portptr;
 413        mdev = to_maple_dev(device);
 414        if (mdev->port == ds->port && mdev->unit == ds->unit)
 415                return 1;
 416        return 0;
 417}
 418
 419static int setup_maple_commands(struct device *device, void *ignored)
 420{
 421        int add;
 422        struct maple_device *mdev = to_maple_dev(device);
 423        if (mdev->interval > 0 && atomic_read(&mdev->busy) == 0 &&
 424                time_after(jiffies, mdev->when)) {
 425                /* bounce if we cannot add */
 426                add = maple_add_packet(mdev,
 427                        be32_to_cpu(mdev->devinfo.function),
 428                        MAPLE_COMMAND_GETCOND, 1, NULL);
 429                if (!add)
 430                        mdev->when = jiffies + mdev->interval;
 431        } else {
 432                if (time_after(jiffies, maple_pnp_time))
 433                        /* Ensure we don't have block reads and devinfo
 434                        * calls interfering with one another - so flag the
 435                        * device as busy */
 436                        if (atomic_read(&mdev->busy) == 0) {
 437                                atomic_set(&mdev->busy, 1);
 438                                maple_add_packet(mdev, 0,
 439                                        MAPLE_COMMAND_DEVINFO, 0, NULL);
 440                        }
 441        }
 442        return 0;
 443}
 444
 445/* VBLANK bottom half - implemented via workqueue */
 446static void maple_vblank_handler(struct work_struct *work)
 447{
 448        int x, locking;
 449        struct maple_device *mdev;
 450
 451        if (!maple_dma_done())
 452                return;
 453
 454        __raw_writel(0, MAPLE_ENABLE);
 455
 456        if (!list_empty(&maple_sentq))
 457                goto finish;
 458
 459        /*
 460        * Set up essential commands - to fetch data and
 461        * check devices are still present
 462        */
 463        bus_for_each_dev(&maple_bus_type, NULL, NULL,
 464                setup_maple_commands);
 465
 466        if (time_after(jiffies, maple_pnp_time)) {
 467                /*
 468                * Scan the empty ports - bus is flakey and may have
 469                * mis-reported emptyness
 470                */
 471                for (x = 0; x < MAPLE_PORTS; x++) {
 472                        if (checked[x] && empty[x]) {
 473                                mdev = baseunits[x];
 474                                if (!mdev)
 475                                        break;
 476                                atomic_set(&mdev->busy, 1);
 477                                locking = maple_add_packet(mdev, 0,
 478                                        MAPLE_COMMAND_DEVINFO, 0, NULL);
 479                                if (!locking)
 480                                        break;
 481                                }
 482                        }
 483
 484                maple_pnp_time = jiffies + MAPLE_PNP_INTERVAL;
 485        }
 486
 487finish:
 488        maple_send();
 489}
 490
 491/* handle devices added via hotplugs - placing them on queue for DEVINFO */
 492static void maple_map_subunits(struct maple_device *mdev, int submask)
 493{
 494        int retval, k, devcheck;
 495        struct maple_device *mdev_add;
 496        struct maple_device_specify ds;
 497
 498        ds.port = mdev->port;
 499        for (k = 0; k < 5; k++) {
 500                ds.unit = k + 1;
 501                retval =
 502                    bus_for_each_dev(&maple_bus_type, NULL, &ds,
 503                                     check_maple_device);
 504                if (retval) {
 505                        submask = submask >> 1;
 506                        continue;
 507                }
 508                devcheck = submask & 0x01;
 509                if (devcheck) {
 510                        mdev_add = maple_alloc_dev(mdev->port, k + 1);
 511                        if (!mdev_add)
 512                                return;
 513                        atomic_set(&mdev_add->busy, 1);
 514                        maple_add_packet(mdev_add, 0, MAPLE_COMMAND_DEVINFO,
 515                                0, NULL);
 516                        /* mark that we are checking sub devices */
 517                        scanning = 1;
 518                }
 519                submask = submask >> 1;
 520        }
 521}
 522
 523/* mark a device as removed */
 524static void maple_clean_submap(struct maple_device *mdev)
 525{
 526        int killbit;
 527
 528        killbit = (mdev->unit > 0 ? (1 << (mdev->unit - 1)) & 0x1f : 0x20);
 529        killbit = ~killbit;
 530        killbit &= 0xFF;
 531        subdevice_map[mdev->port] = subdevice_map[mdev->port] & killbit;
 532}
 533
 534/* handle empty port or hotplug removal */
 535static void maple_response_none(struct maple_device *mdev)
 536{
 537        maple_clean_submap(mdev);
 538
 539        if (likely(mdev->unit != 0)) {
 540                /*
 541                * Block devices play up
 542                * and give the impression they have
 543                * been removed even when still in place or
 544                * trip the mtd layer when they have
 545                * really gone - this code traps that eventuality
 546                * and ensures we aren't overloaded with useless
 547                * error messages
 548                */
 549                if (mdev->can_unload) {
 550                        if (!mdev->can_unload(mdev)) {
 551                                atomic_set(&mdev->busy, 2);
 552                                wake_up(&mdev->maple_wait);
 553                                return;
 554                        }
 555                }
 556
 557                dev_info(&mdev->dev, "detaching device at (%d, %d)\n",
 558                        mdev->port, mdev->unit);
 559                maple_detach_driver(mdev);
 560                return;
 561        } else {
 562                if (!started || !fullscan) {
 563                        if (checked[mdev->port] == false) {
 564                                checked[mdev->port] = true;
 565                                empty[mdev->port] = true;
 566                                dev_info(&mdev->dev, "no devices"
 567                                        " to port %d\n", mdev->port);
 568                        }
 569                        return;
 570                }
 571        }
 572        /* Some hardware devices generate false detach messages on unit 0 */
 573        atomic_set(&mdev->busy, 0);
 574}
 575
 576/* preprocess hotplugs or scans */
 577static void maple_response_devinfo(struct maple_device *mdev,
 578                                   char *recvbuf)
 579{
 580        char submask;
 581        if (!started || (scanning == 2) || !fullscan) {
 582                if ((mdev->unit == 0) && (checked[mdev->port] == false)) {
 583                        checked[mdev->port] = true;
 584                        maple_attach_driver(mdev);
 585                } else {
 586                        if (mdev->unit != 0)
 587                                maple_attach_driver(mdev);
 588                        if (mdev->unit == 0) {
 589                                empty[mdev->port] = false;
 590                                maple_attach_driver(mdev);
 591                        }
 592                }
 593        }
 594        if (mdev->unit == 0) {
 595                submask = recvbuf[2] & 0x1F;
 596                if (submask ^ subdevice_map[mdev->port]) {
 597                        maple_map_subunits(mdev, submask);
 598                        subdevice_map[mdev->port] = submask;
 599                }
 600        }
 601}
 602
 603static void maple_response_fileerr(struct maple_device *mdev, void *recvbuf)
 604{
 605        if (mdev->fileerr_handler) {
 606                mdev->fileerr_handler(mdev, recvbuf);
 607                return;
 608        } else
 609                dev_warn(&mdev->dev, "device at (%d, %d) reports"
 610                        "file error 0x%X\n", mdev->port, mdev->unit,
 611                        ((int *)recvbuf)[1]);
 612}
 613
 614static void maple_port_rescan(void)
 615{
 616        int i;
 617        struct maple_device *mdev;
 618
 619        fullscan = 1;
 620        for (i = 0; i < MAPLE_PORTS; i++) {
 621                if (checked[i] == false) {
 622                        fullscan = 0;
 623                        mdev = baseunits[i];
 624                        maple_add_packet(mdev, 0, MAPLE_COMMAND_DEVINFO,
 625                                0, NULL);
 626                }
 627        }
 628}
 629
 630/* maple dma end bottom half - implemented via workqueue */
 631static void maple_dma_handler(struct work_struct *work)
 632{
 633        struct mapleq *mq, *nmq;
 634        struct maple_device *mdev;
 635        char *recvbuf;
 636        enum maple_code code;
 637
 638        if (!maple_dma_done())
 639                return;
 640        __raw_writel(0, MAPLE_ENABLE);
 641        if (!list_empty(&maple_sentq)) {
 642                list_for_each_entry_safe(mq, nmq, &maple_sentq, list) {
 643                        mdev = mq->dev;
 644                        recvbuf = mq->recvbuf->buf;
 645                        sh_sync_dma_for_device(recvbuf, 0x400, DMA_FROM_DEVICE);
 646                        code = recvbuf[0];
 647                        kfree(mq->sendbuf);
 648                        list_del_init(&mq->list);
 649                        switch (code) {
 650                        case MAPLE_RESPONSE_NONE:
 651                                maple_response_none(mdev);
 652                                break;
 653
 654                        case MAPLE_RESPONSE_DEVINFO:
 655                                maple_response_devinfo(mdev, recvbuf);
 656                                atomic_set(&mdev->busy, 0);
 657                                break;
 658
 659                        case MAPLE_RESPONSE_DATATRF:
 660                                if (mdev->callback)
 661                                        mdev->callback(mq);
 662                                atomic_set(&mdev->busy, 0);
 663                                wake_up(&mdev->maple_wait);
 664                                break;
 665
 666                        case MAPLE_RESPONSE_FILEERR:
 667                                maple_response_fileerr(mdev, recvbuf);
 668                                atomic_set(&mdev->busy, 0);
 669                                wake_up(&mdev->maple_wait);
 670                                break;
 671
 672                        case MAPLE_RESPONSE_AGAIN:
 673                        case MAPLE_RESPONSE_BADCMD:
 674                        case MAPLE_RESPONSE_BADFUNC:
 675                                dev_warn(&mdev->dev, "non-fatal error"
 676                                        " 0x%X at (%d, %d)\n", code,
 677                                        mdev->port, mdev->unit);
 678                                atomic_set(&mdev->busy, 0);
 679                                break;
 680
 681                        case MAPLE_RESPONSE_ALLINFO:
 682                                dev_notice(&mdev->dev, "extended"
 683                                " device information request for (%d, %d)"
 684                                " but call is not supported\n", mdev->port,
 685                                mdev->unit);
 686                                atomic_set(&mdev->busy, 0);
 687                                break;
 688
 689                        case MAPLE_RESPONSE_OK:
 690                                atomic_set(&mdev->busy, 0);
 691                                wake_up(&mdev->maple_wait);
 692                                break;
 693
 694                        default:
 695                                break;
 696                        }
 697                }
 698                /* if scanning is 1 then we have subdevices to check */
 699                if (scanning == 1) {
 700                        maple_send();
 701                        scanning = 2;
 702                } else
 703                        scanning = 0;
 704                /*check if we have actually tested all ports yet */
 705                if (!fullscan)
 706                        maple_port_rescan();
 707                /* mark that we have been through the first scan */
 708                started = 1;
 709        }
 710        maple_send();
 711}
 712
 713static irqreturn_t maple_dma_interrupt(int irq, void *dev_id)
 714{
 715        /* Load everything into the bottom half */
 716        schedule_work(&maple_dma_process);
 717        return IRQ_HANDLED;
 718}
 719
 720static irqreturn_t maple_vblank_interrupt(int irq, void *dev_id)
 721{
 722        schedule_work(&maple_vblank_process);
 723        return IRQ_HANDLED;
 724}
 725
 726static int maple_set_dma_interrupt_handler(void)
 727{
 728        return request_irq(HW_EVENT_MAPLE_DMA, maple_dma_interrupt,
 729                IRQF_SHARED, "maple bus DMA", &maple_unsupported_device);
 730}
 731
 732static int maple_set_vblank_interrupt_handler(void)
 733{
 734        return request_irq(HW_EVENT_VSYNC, maple_vblank_interrupt,
 735                IRQF_SHARED, "maple bus VBLANK", &maple_unsupported_device);
 736}
 737
 738static int maple_get_dma_buffer(void)
 739{
 740        maple_sendbuf =
 741            (void *) __get_free_pages(GFP_KERNEL | __GFP_ZERO,
 742                                      MAPLE_DMA_PAGES);
 743        if (!maple_sendbuf)
 744                return -ENOMEM;
 745        return 0;
 746}
 747
 748static int maple_match_bus_driver(struct device *devptr,
 749                                  struct device_driver *drvptr)
 750{
 751        struct maple_driver *maple_drv = to_maple_driver(drvptr);
 752        struct maple_device *maple_dev = to_maple_dev(devptr);
 753
 754        /* Trap empty port case */
 755        if (maple_dev->devinfo.function == 0xFFFFFFFF)
 756                return 0;
 757        else if (maple_dev->devinfo.function &
 758                 cpu_to_be32(maple_drv->function))
 759                return 1;
 760        return 0;
 761}
 762
 763static int maple_bus_uevent(struct device *dev,
 764                            struct kobj_uevent_env *env)
 765{
 766        return 0;
 767}
 768
 769static void maple_bus_release(struct device *dev)
 770{
 771}
 772
 773static struct maple_driver maple_unsupported_device = {
 774        .drv = {
 775                .name = "maple_unsupported_device",
 776                .bus = &maple_bus_type,
 777        },
 778};
 779/*
 780 * maple_bus_type - core maple bus structure
 781 */
 782struct bus_type maple_bus_type = {
 783        .name = "maple",
 784        .match = maple_match_bus_driver,
 785        .uevent = maple_bus_uevent,
 786};
 787EXPORT_SYMBOL_GPL(maple_bus_type);
 788
 789static struct device maple_bus = {
 790        .init_name = "maple",
 791        .release = maple_bus_release,
 792};
 793
 794static int __init maple_bus_init(void)
 795{
 796        int retval, i;
 797        struct maple_device *mdev[MAPLE_PORTS];
 798
 799        __raw_writel(0, MAPLE_ENABLE);
 800
 801        retval = device_register(&maple_bus);
 802        if (retval)
 803                goto cleanup;
 804
 805        retval = bus_register(&maple_bus_type);
 806        if (retval)
 807                goto cleanup_device;
 808
 809        retval = driver_register(&maple_unsupported_device.drv);
 810        if (retval)
 811                goto cleanup_bus;
 812
 813        /* allocate memory for maple bus dma */
 814        retval = maple_get_dma_buffer();
 815        if (retval) {
 816                dev_err(&maple_bus, "failed to allocate DMA buffers\n");
 817                goto cleanup_basic;
 818        }
 819
 820        /* set up DMA interrupt handler */
 821        retval = maple_set_dma_interrupt_handler();
 822        if (retval) {
 823                dev_err(&maple_bus, "bus failed to grab maple "
 824                        "DMA IRQ\n");
 825                goto cleanup_dma;
 826        }
 827
 828        /* set up VBLANK interrupt handler */
 829        retval = maple_set_vblank_interrupt_handler();
 830        if (retval) {
 831                dev_err(&maple_bus, "bus failed to grab VBLANK IRQ\n");
 832                goto cleanup_irq;
 833        }
 834
 835        maple_queue_cache = KMEM_CACHE(maple_buffer, SLAB_HWCACHE_ALIGN);
 836
 837        if (!maple_queue_cache)
 838                goto cleanup_bothirqs;
 839
 840        INIT_LIST_HEAD(&maple_waitq);
 841        INIT_LIST_HEAD(&maple_sentq);
 842
 843        /* setup maple ports */
 844        for (i = 0; i < MAPLE_PORTS; i++) {
 845                checked[i] = false;
 846                empty[i] = false;
 847                mdev[i] = maple_alloc_dev(i, 0);
 848                if (!mdev[i]) {
 849                        while (i-- > 0)
 850                                maple_free_dev(mdev[i]);
 851                        goto cleanup_cache;
 852                }
 853                baseunits[i] = mdev[i];
 854                atomic_set(&mdev[i]->busy, 1);
 855                maple_add_packet(mdev[i], 0, MAPLE_COMMAND_DEVINFO, 0, NULL);
 856                subdevice_map[i] = 0;
 857        }
 858
 859        maple_pnp_time = jiffies + HZ;
 860        /* prepare initial queue */
 861        maple_send();
 862        dev_info(&maple_bus, "bus core now registered\n");
 863
 864        return 0;
 865
 866cleanup_cache:
 867        kmem_cache_destroy(maple_queue_cache);
 868
 869cleanup_bothirqs:
 870        free_irq(HW_EVENT_VSYNC, 0);
 871
 872cleanup_irq:
 873        free_irq(HW_EVENT_MAPLE_DMA, 0);
 874
 875cleanup_dma:
 876        free_pages((unsigned long) maple_sendbuf, MAPLE_DMA_PAGES);
 877
 878cleanup_basic:
 879        driver_unregister(&maple_unsupported_device.drv);
 880
 881cleanup_bus:
 882        bus_unregister(&maple_bus_type);
 883
 884cleanup_device:
 885        device_unregister(&maple_bus);
 886
 887cleanup:
 888        printk(KERN_ERR "Maple bus registration failed\n");
 889        return retval;
 890}
 891/* Push init to later to ensure hardware gets detected */
 892fs_initcall(maple_bus_init);
 893