linux/drivers/sh/maple/maple.c
<<
>>
Prefs
   1/*
   2 * Core maple bus functionality
   3 *
   4 *  Copyright (C) 2007 - 2009 Adrian McMenamin
   5 *  Copyright (C) 2001 - 2008 Paul Mundt
   6 *  Copyright (C) 2000 - 2001 YAEGASHI Takeshi
   7 *  Copyright (C) 2001 M. R. Brown
   8 *
   9 * This file is subject to the terms and conditions of the GNU General Public
  10 * License.  See the file "COPYING" in the main directory of this archive
  11 * for more details.
  12 */
  13#include <linux/init.h>
  14#include <linux/kernel.h>
  15#include <linux/device.h>
  16#include <linux/interrupt.h>
  17#include <linux/list.h>
  18#include <linux/io.h>
  19#include <linux/slab.h>
  20#include <linux/maple.h>
  21#include <linux/dma-mapping.h>
  22#include <linux/delay.h>
  23#include <linux/module.h>
  24#include <asm/cacheflush.h>
  25#include <asm/dma.h>
  26#include <asm/io.h>
  27#include <mach/dma.h>
  28#include <mach/sysasic.h>
  29
  30MODULE_AUTHOR("Adrian McMenamin <adrian@mcmen.demon.co.uk>");
  31MODULE_DESCRIPTION("Maple bus driver for Dreamcast");
  32MODULE_LICENSE("GPL v2");
  33MODULE_SUPPORTED_DEVICE("{{SEGA, Dreamcast/Maple}}");
  34
  35static void maple_dma_handler(struct work_struct *work);
  36static void maple_vblank_handler(struct work_struct *work);
  37
  38static DECLARE_WORK(maple_dma_process, maple_dma_handler);
  39static DECLARE_WORK(maple_vblank_process, maple_vblank_handler);
  40
  41static LIST_HEAD(maple_waitq);
  42static LIST_HEAD(maple_sentq);
  43
  44/* mutex to protect queue of waiting packets */
  45static DEFINE_MUTEX(maple_wlist_lock);
  46
  47static struct maple_driver maple_unsupported_device;
  48static struct device maple_bus;
  49static int subdevice_map[MAPLE_PORTS];
  50static unsigned long *maple_sendbuf, *maple_sendptr, *maple_lastptr;
  51static unsigned long maple_pnp_time;
  52static int started, scanning, fullscan;
  53static struct kmem_cache *maple_queue_cache;
  54
  55struct maple_device_specify {
  56        int port;
  57        int unit;
  58};
  59
  60static bool checked[MAPLE_PORTS];
  61static bool empty[MAPLE_PORTS];
  62static struct maple_device *baseunits[MAPLE_PORTS];
  63
  64/**
  65 * maple_driver_register - register a maple driver
  66 * @drv: maple driver to be registered.
  67 *
  68 * Registers the passed in @drv, while updating the bus type.
  69 * Devices with matching function IDs will be automatically probed.
  70 */
  71int maple_driver_register(struct maple_driver *drv)
  72{
  73        if (!drv)
  74                return -EINVAL;
  75
  76        drv->drv.bus = &maple_bus_type;
  77
  78        return driver_register(&drv->drv);
  79}
  80EXPORT_SYMBOL_GPL(maple_driver_register);
  81
  82/**
  83 * maple_driver_unregister - unregister a maple driver.
  84 * @drv: maple driver to unregister.
  85 *
  86 * Cleans up after maple_driver_register(). To be invoked in the exit
  87 * path of any module drivers.
  88 */
  89void maple_driver_unregister(struct maple_driver *drv)
  90{
  91        driver_unregister(&drv->drv);
  92}
  93EXPORT_SYMBOL_GPL(maple_driver_unregister);
  94
  95/* set hardware registers to enable next round of dma */
  96static void maple_dma_reset(void)
  97{
  98        __raw_writel(MAPLE_MAGIC, MAPLE_RESET);
  99        /* set trig type to 0 for software trigger, 1 for hardware (VBLANK) */
 100        __raw_writel(1, MAPLE_TRIGTYPE);
 101        /*
 102        * Maple system register
 103        * bits 31 - 16  timeout in units of 20nsec
 104        * bit 12        hard trigger - set 0 to keep responding to VBLANK
 105        * bits 9 - 8    set 00 for 2 Mbps, 01 for 1 Mbps
 106        * bits 3 - 0    delay (in 1.3ms) between VBLANK and start of DMA
 107        * max delay is 11
 108        */
 109        __raw_writel(MAPLE_2MBPS | MAPLE_TIMEOUT(0xFFFF), MAPLE_SPEED);
 110        __raw_writel(virt_to_phys(maple_sendbuf), MAPLE_DMAADDR);
 111        __raw_writel(1, MAPLE_ENABLE);
 112}
 113
 114/**
 115 * maple_getcond_callback - setup handling MAPLE_COMMAND_GETCOND
 116 * @dev: device responding
 117 * @callback: handler callback
 118 * @interval: interval in jiffies between callbacks
 119 * @function: the function code for the device
 120 */
 121void maple_getcond_callback(struct maple_device *dev,
 122                        void (*callback) (struct mapleq *mq),
 123                        unsigned long interval, unsigned long function)
 124{
 125        dev->callback = callback;
 126        dev->interval = interval;
 127        dev->function = cpu_to_be32(function);
 128        dev->when = jiffies;
 129}
 130EXPORT_SYMBOL_GPL(maple_getcond_callback);
 131
 132static int maple_dma_done(void)
 133{
 134        return (__raw_readl(MAPLE_STATE) & 1) == 0;
 135}
 136
 137static void maple_release_device(struct device *dev)
 138{
 139        struct maple_device *mdev;
 140        struct mapleq *mq;
 141
 142        mdev = to_maple_dev(dev);
 143        mq = mdev->mq;
 144        kmem_cache_free(maple_queue_cache, mq->recvbuf);
 145        kfree(mq);
 146        kfree(mdev);
 147}
 148
 149/**
 150 * maple_add_packet - add a single instruction to the maple bus queue
 151 * @mdev: maple device
 152 * @function: function on device being queried
 153 * @command: maple command to add
 154 * @length: length of command string (in 32 bit words)
 155 * @data: remainder of command string
 156 */
 157int maple_add_packet(struct maple_device *mdev, u32 function, u32 command,
 158        size_t length, void *data)
 159{
 160        int ret = 0;
 161        void *sendbuf = NULL;
 162
 163        if (length) {
 164                sendbuf = kzalloc(length * 4, GFP_KERNEL);
 165                if (!sendbuf) {
 166                        ret = -ENOMEM;
 167                        goto out;
 168                }
 169                ((__be32 *)sendbuf)[0] = cpu_to_be32(function);
 170        }
 171
 172        mdev->mq->command = command;
 173        mdev->mq->length = length;
 174        if (length > 1)
 175                memcpy(sendbuf + 4, data, (length - 1) * 4);
 176        mdev->mq->sendbuf = sendbuf;
 177
 178        mutex_lock(&maple_wlist_lock);
 179        list_add_tail(&mdev->mq->list, &maple_waitq);
 180        mutex_unlock(&maple_wlist_lock);
 181out:
 182        return ret;
 183}
 184EXPORT_SYMBOL_GPL(maple_add_packet);
 185
 186static struct mapleq *maple_allocq(struct maple_device *mdev)
 187{
 188        struct mapleq *mq;
 189
 190        mq = kzalloc(sizeof(*mq), GFP_KERNEL);
 191        if (!mq)
 192                goto failed_nomem;
 193
 194        INIT_LIST_HEAD(&mq->list);
 195        mq->dev = mdev;
 196        mq->recvbuf = kmem_cache_zalloc(maple_queue_cache, GFP_KERNEL);
 197        if (!mq->recvbuf)
 198                goto failed_p2;
 199        mq->recvbuf->buf = &((mq->recvbuf->bufx)[0]);
 200
 201        return mq;
 202
 203failed_p2:
 204        kfree(mq);
 205failed_nomem:
 206        dev_err(&mdev->dev, "could not allocate memory for device (%d, %d)\n",
 207                mdev->port, mdev->unit);
 208        return NULL;
 209}
 210
 211static struct maple_device *maple_alloc_dev(int port, int unit)
 212{
 213        struct maple_device *mdev;
 214
 215        /* zero this out to avoid kobj subsystem
 216        * thinking it has already been registered */
 217
 218        mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
 219        if (!mdev)
 220                return NULL;
 221
 222        mdev->port = port;
 223        mdev->unit = unit;
 224
 225        mdev->mq = maple_allocq(mdev);
 226
 227        if (!mdev->mq) {
 228                kfree(mdev);
 229                return NULL;
 230        }
 231        mdev->dev.bus = &maple_bus_type;
 232        mdev->dev.parent = &maple_bus;
 233        init_waitqueue_head(&mdev->maple_wait);
 234        return mdev;
 235}
 236
 237static void maple_free_dev(struct maple_device *mdev)
 238{
 239        kmem_cache_free(maple_queue_cache, mdev->mq->recvbuf);
 240        kfree(mdev->mq);
 241        kfree(mdev);
 242}
 243
 244/* process the command queue into a maple command block
 245 * terminating command has bit 32 of first long set to 0
 246 */
 247static void maple_build_block(struct mapleq *mq)
 248{
 249        int port, unit, from, to, len;
 250        unsigned long *lsendbuf = mq->sendbuf;
 251
 252        port = mq->dev->port & 3;
 253        unit = mq->dev->unit;
 254        len = mq->length;
 255        from = port << 6;
 256        to = (port << 6) | (unit > 0 ? (1 << (unit - 1)) & 0x1f : 0x20);
 257
 258        *maple_lastptr &= 0x7fffffff;
 259        maple_lastptr = maple_sendptr;
 260
 261        *maple_sendptr++ = (port << 16) | len | 0x80000000;
 262        *maple_sendptr++ = virt_to_phys(mq->recvbuf->buf);
 263        *maple_sendptr++ =
 264            mq->command | (to << 8) | (from << 16) | (len << 24);
 265        while (len-- > 0)
 266                *maple_sendptr++ = *lsendbuf++;
 267}
 268
 269/* build up command queue */
 270static void maple_send(void)
 271{
 272        int i, maple_packets = 0;
 273        struct mapleq *mq, *nmq;
 274
 275        if (!maple_dma_done())
 276                return;
 277
 278        /* disable DMA */
 279        __raw_writel(0, MAPLE_ENABLE);
 280
 281        if (!list_empty(&maple_sentq))
 282                goto finish;
 283
 284        mutex_lock(&maple_wlist_lock);
 285        if (list_empty(&maple_waitq)) {
 286                mutex_unlock(&maple_wlist_lock);
 287                goto finish;
 288        }
 289
 290        maple_lastptr = maple_sendbuf;
 291        maple_sendptr = maple_sendbuf;
 292
 293        list_for_each_entry_safe(mq, nmq, &maple_waitq, list) {
 294                maple_build_block(mq);
 295                list_del_init(&mq->list);
 296                list_add_tail(&mq->list, &maple_sentq);
 297                if (maple_packets++ > MAPLE_MAXPACKETS)
 298                        break;
 299        }
 300        mutex_unlock(&maple_wlist_lock);
 301        if (maple_packets > 0) {
 302                for (i = 0; i < (1 << MAPLE_DMA_PAGES); i++)
 303                        dma_cache_sync(0, maple_sendbuf + i * PAGE_SIZE,
 304                                       PAGE_SIZE, DMA_BIDIRECTIONAL);
 305        }
 306
 307finish:
 308        maple_dma_reset();
 309}
 310
 311/* check if there is a driver registered likely to match this device */
 312static int maple_check_matching_driver(struct device_driver *driver,
 313                                        void *devptr)
 314{
 315        struct maple_driver *maple_drv;
 316        struct maple_device *mdev;
 317
 318        mdev = devptr;
 319        maple_drv = to_maple_driver(driver);
 320        if (mdev->devinfo.function & cpu_to_be32(maple_drv->function))
 321                return 1;
 322        return 0;
 323}
 324
 325static void maple_detach_driver(struct maple_device *mdev)
 326{
 327        device_unregister(&mdev->dev);
 328}
 329
 330/* process initial MAPLE_COMMAND_DEVINFO for each device or port */
 331static void maple_attach_driver(struct maple_device *mdev)
 332{
 333        char *p, *recvbuf;
 334        unsigned long function;
 335        int matched, error;
 336
 337        recvbuf = mdev->mq->recvbuf->buf;
 338        /* copy the data as individual elements in
 339        * case of memory optimisation */
 340        memcpy(&mdev->devinfo.function, recvbuf + 4, 4);
 341        memcpy(&mdev->devinfo.function_data[0], recvbuf + 8, 12);
 342        memcpy(&mdev->devinfo.area_code, recvbuf + 20, 1);
 343        memcpy(&mdev->devinfo.connector_direction, recvbuf + 21, 1);
 344        memcpy(&mdev->devinfo.product_name[0], recvbuf + 22, 30);
 345        memcpy(&mdev->devinfo.standby_power, recvbuf + 112, 2);
 346        memcpy(&mdev->devinfo.max_power, recvbuf + 114, 2);
 347        memcpy(mdev->product_name, mdev->devinfo.product_name, 30);
 348        mdev->product_name[30] = '\0';
 349        memcpy(mdev->product_licence, mdev->devinfo.product_licence, 60);
 350        mdev->product_licence[60] = '\0';
 351
 352        for (p = mdev->product_name + 29; mdev->product_name <= p; p--)
 353                if (*p == ' ')
 354                        *p = '\0';
 355                else
 356                        break;
 357        for (p = mdev->product_licence + 59; mdev->product_licence <= p; p--)
 358                if (*p == ' ')
 359                        *p = '\0';
 360                else
 361                        break;
 362
 363        function = be32_to_cpu(mdev->devinfo.function);
 364
 365        dev_info(&mdev->dev, "detected %s: function 0x%lX: at (%d, %d)\n",
 366                mdev->product_name, function, mdev->port, mdev->unit);
 367
 368        if (function > 0x200) {
 369                /* Do this silently - as not a real device */
 370                function = 0;
 371                mdev->driver = &maple_unsupported_device;
 372                dev_set_name(&mdev->dev, "%d:0.port", mdev->port);
 373        } else {
 374                matched =
 375                        bus_for_each_drv(&maple_bus_type, NULL, mdev,
 376                                maple_check_matching_driver);
 377
 378                if (matched == 0) {
 379                        /* Driver does not exist yet */
 380                        dev_info(&mdev->dev, "no driver found\n");
 381                        mdev->driver = &maple_unsupported_device;
 382                }
 383                dev_set_name(&mdev->dev, "%d:0%d.%lX", mdev->port,
 384                             mdev->unit, function);
 385        }
 386
 387        mdev->function = function;
 388        mdev->dev.release = &maple_release_device;
 389
 390        atomic_set(&mdev->busy, 0);
 391        error = device_register(&mdev->dev);
 392        if (error) {
 393                dev_warn(&mdev->dev, "could not register device at"
 394                        " (%d, %d), with error 0x%X\n", mdev->unit,
 395                        mdev->port, error);
 396                maple_free_dev(mdev);
 397                mdev = NULL;
 398                return;
 399        }
 400}
 401
 402/*
 403 * if device has been registered for the given
 404 * port and unit then return 1 - allows identification
 405 * of which devices need to be attached or detached
 406 */
 407static int check_maple_device(struct device *device, void *portptr)
 408{
 409        struct maple_device_specify *ds;
 410        struct maple_device *mdev;
 411
 412        ds = portptr;
 413        mdev = to_maple_dev(device);
 414        if (mdev->port == ds->port && mdev->unit == ds->unit)
 415                return 1;
 416        return 0;
 417}
 418
 419static int setup_maple_commands(struct device *device, void *ignored)
 420{
 421        int add;
 422        struct maple_device *mdev = to_maple_dev(device);
 423        if (mdev->interval > 0 && atomic_read(&mdev->busy) == 0 &&
 424                time_after(jiffies, mdev->when)) {
 425                /* bounce if we cannot add */
 426                add = maple_add_packet(mdev,
 427                        be32_to_cpu(mdev->devinfo.function),
 428                        MAPLE_COMMAND_GETCOND, 1, NULL);
 429                if (!add)
 430                        mdev->when = jiffies + mdev->interval;
 431        } else {
 432                if (time_after(jiffies, maple_pnp_time))
 433                        /* Ensure we don't have block reads and devinfo
 434                        * calls interfering with one another - so flag the
 435                        * device as busy */
 436                        if (atomic_read(&mdev->busy) == 0) {
 437                                atomic_set(&mdev->busy, 1);
 438                                maple_add_packet(mdev, 0,
 439                                        MAPLE_COMMAND_DEVINFO, 0, NULL);
 440                        }
 441        }
 442        return 0;
 443}
 444
 445/* VBLANK bottom half - implemented via workqueue */
 446static void maple_vblank_handler(struct work_struct *work)
 447{
 448        int x, locking;
 449        struct maple_device *mdev;
 450
 451        if (!maple_dma_done())
 452                return;
 453
 454        __raw_writel(0, MAPLE_ENABLE);
 455
 456        if (!list_empty(&maple_sentq))
 457                goto finish;
 458
 459        /*
 460        * Set up essential commands - to fetch data and
 461        * check devices are still present
 462        */
 463        bus_for_each_dev(&maple_bus_type, NULL, NULL,
 464                setup_maple_commands);
 465
 466        if (time_after(jiffies, maple_pnp_time)) {
 467                /*
 468                * Scan the empty ports - bus is flakey and may have
 469                * mis-reported emptyness
 470                */
 471                for (x = 0; x < MAPLE_PORTS; x++) {
 472                        if (checked[x] && empty[x]) {
 473                                mdev = baseunits[x];
 474                                if (!mdev)
 475                                        break;
 476                                atomic_set(&mdev->busy, 1);
 477                                locking = maple_add_packet(mdev, 0,
 478                                        MAPLE_COMMAND_DEVINFO, 0, NULL);
 479                                if (!locking)
 480                                        break;
 481                                }
 482                        }
 483
 484                maple_pnp_time = jiffies + MAPLE_PNP_INTERVAL;
 485        }
 486
 487finish:
 488        maple_send();
 489}
 490
 491/* handle devices added via hotplugs - placing them on queue for DEVINFO */
 492static void maple_map_subunits(struct maple_device *mdev, int submask)
 493{
 494        int retval, k, devcheck;
 495        struct maple_device *mdev_add;
 496        struct maple_device_specify ds;
 497
 498        ds.port = mdev->port;
 499        for (k = 0; k < 5; k++) {
 500                ds.unit = k + 1;
 501                retval =
 502                    bus_for_each_dev(&maple_bus_type, NULL, &ds,
 503                                     check_maple_device);
 504                if (retval) {
 505                        submask = submask >> 1;
 506                        continue;
 507                }
 508                devcheck = submask & 0x01;
 509                if (devcheck) {
 510                        mdev_add = maple_alloc_dev(mdev->port, k + 1);
 511                        if (!mdev_add)
 512                                return;
 513                        atomic_set(&mdev_add->busy, 1);
 514                        maple_add_packet(mdev_add, 0, MAPLE_COMMAND_DEVINFO,
 515                                0, NULL);
 516                        /* mark that we are checking sub devices */
 517                        scanning = 1;
 518                }
 519                submask = submask >> 1;
 520        }
 521}
 522
 523/* mark a device as removed */
 524static void maple_clean_submap(struct maple_device *mdev)
 525{
 526        int killbit;
 527
 528        killbit = (mdev->unit > 0 ? (1 << (mdev->unit - 1)) & 0x1f : 0x20);
 529        killbit = ~killbit;
 530        killbit &= 0xFF;
 531        subdevice_map[mdev->port] = subdevice_map[mdev->port] & killbit;
 532}
 533
 534/* handle empty port or hotplug removal */
 535static void maple_response_none(struct maple_device *mdev)
 536{
 537        maple_clean_submap(mdev);
 538
 539        if (likely(mdev->unit != 0)) {
 540                /*
 541                * Block devices play up
 542                * and give the impression they have
 543                * been removed even when still in place or
 544                * trip the mtd layer when they have
 545                * really gone - this code traps that eventuality
 546                * and ensures we aren't overloaded with useless
 547                * error messages
 548                */
 549                if (mdev->can_unload) {
 550                        if (!mdev->can_unload(mdev)) {
 551                                atomic_set(&mdev->busy, 2);
 552                                wake_up(&mdev->maple_wait);
 553                                return;
 554                        }
 555                }
 556
 557                dev_info(&mdev->dev, "detaching device at (%d, %d)\n",
 558                        mdev->port, mdev->unit);
 559                maple_detach_driver(mdev);
 560                return;
 561        } else {
 562                if (!started || !fullscan) {
 563                        if (checked[mdev->port] == false) {
 564                                checked[mdev->port] = true;
 565                                empty[mdev->port] = true;
 566                                dev_info(&mdev->dev, "no devices"
 567                                        " to port %d\n", mdev->port);
 568                        }
 569                        return;
 570                }
 571        }
 572        /* Some hardware devices generate false detach messages on unit 0 */
 573        atomic_set(&mdev->busy, 0);
 574}
 575
 576/* preprocess hotplugs or scans */
 577static void maple_response_devinfo(struct maple_device *mdev,
 578                                   char *recvbuf)
 579{
 580        char submask;
 581        if (!started || (scanning == 2) || !fullscan) {
 582                if ((mdev->unit == 0) && (checked[mdev->port] == false)) {
 583                        checked[mdev->port] = true;
 584                        maple_attach_driver(mdev);
 585                } else {
 586                        if (mdev->unit != 0)
 587                                maple_attach_driver(mdev);
 588                        if (mdev->unit == 0) {
 589                                empty[mdev->port] = false;
 590                                maple_attach_driver(mdev);
 591                        }
 592                }
 593        }
 594        if (mdev->unit == 0) {
 595                submask = recvbuf[2] & 0x1F;
 596                if (submask ^ subdevice_map[mdev->port]) {
 597                        maple_map_subunits(mdev, submask);
 598                        subdevice_map[mdev->port] = submask;
 599                }
 600        }
 601}
 602
 603static void maple_response_fileerr(struct maple_device *mdev, void *recvbuf)
 604{
 605        if (mdev->fileerr_handler) {
 606                mdev->fileerr_handler(mdev, recvbuf);
 607                return;
 608        } else
 609                dev_warn(&mdev->dev, "device at (%d, %d) reports"
 610                        "file error 0x%X\n", mdev->port, mdev->unit,
 611                        ((int *)recvbuf)[1]);
 612}
 613
 614static void maple_port_rescan(void)
 615{
 616        int i;
 617        struct maple_device *mdev;
 618
 619        fullscan = 1;
 620        for (i = 0; i < MAPLE_PORTS; i++) {
 621                if (checked[i] == false) {
 622                        fullscan = 0;
 623                        mdev = baseunits[i];
 624                        maple_add_packet(mdev, 0, MAPLE_COMMAND_DEVINFO,
 625                                0, NULL);
 626                }
 627        }
 628}
 629
 630/* maple dma end bottom half - implemented via workqueue */
 631static void maple_dma_handler(struct work_struct *work)
 632{
 633        struct mapleq *mq, *nmq;
 634        struct maple_device *mdev;
 635        char *recvbuf;
 636        enum maple_code code;
 637
 638        if (!maple_dma_done())
 639                return;
 640        __raw_writel(0, MAPLE_ENABLE);
 641        if (!list_empty(&maple_sentq)) {
 642                list_for_each_entry_safe(mq, nmq, &maple_sentq, list) {
 643                        mdev = mq->dev;
 644                        recvbuf = mq->recvbuf->buf;
 645                        dma_cache_sync(&mdev->dev, recvbuf, 0x400,
 646                                DMA_FROM_DEVICE);
 647                        code = recvbuf[0];
 648                        kfree(mq->sendbuf);
 649                        list_del_init(&mq->list);
 650                        switch (code) {
 651                        case MAPLE_RESPONSE_NONE:
 652                                maple_response_none(mdev);
 653                                break;
 654
 655                        case MAPLE_RESPONSE_DEVINFO:
 656                                maple_response_devinfo(mdev, recvbuf);
 657                                atomic_set(&mdev->busy, 0);
 658                                break;
 659
 660                        case MAPLE_RESPONSE_DATATRF:
 661                                if (mdev->callback)
 662                                        mdev->callback(mq);
 663                                atomic_set(&mdev->busy, 0);
 664                                wake_up(&mdev->maple_wait);
 665                                break;
 666
 667                        case MAPLE_RESPONSE_FILEERR:
 668                                maple_response_fileerr(mdev, recvbuf);
 669                                atomic_set(&mdev->busy, 0);
 670                                wake_up(&mdev->maple_wait);
 671                                break;
 672
 673                        case MAPLE_RESPONSE_AGAIN:
 674                        case MAPLE_RESPONSE_BADCMD:
 675                        case MAPLE_RESPONSE_BADFUNC:
 676                                dev_warn(&mdev->dev, "non-fatal error"
 677                                        " 0x%X at (%d, %d)\n", code,
 678                                        mdev->port, mdev->unit);
 679                                atomic_set(&mdev->busy, 0);
 680                                break;
 681
 682                        case MAPLE_RESPONSE_ALLINFO:
 683                                dev_notice(&mdev->dev, "extended"
 684                                " device information request for (%d, %d)"
 685                                " but call is not supported\n", mdev->port,
 686                                mdev->unit);
 687                                atomic_set(&mdev->busy, 0);
 688                                break;
 689
 690                        case MAPLE_RESPONSE_OK:
 691                                atomic_set(&mdev->busy, 0);
 692                                wake_up(&mdev->maple_wait);
 693                                break;
 694
 695                        default:
 696                                break;
 697                        }
 698                }
 699                /* if scanning is 1 then we have subdevices to check */
 700                if (scanning == 1) {
 701                        maple_send();
 702                        scanning = 2;
 703                } else
 704                        scanning = 0;
 705                /*check if we have actually tested all ports yet */
 706                if (!fullscan)
 707                        maple_port_rescan();
 708                /* mark that we have been through the first scan */
 709                started = 1;
 710        }
 711        maple_send();
 712}
 713
 714static irqreturn_t maple_dma_interrupt(int irq, void *dev_id)
 715{
 716        /* Load everything into the bottom half */
 717        schedule_work(&maple_dma_process);
 718        return IRQ_HANDLED;
 719}
 720
 721static irqreturn_t maple_vblank_interrupt(int irq, void *dev_id)
 722{
 723        schedule_work(&maple_vblank_process);
 724        return IRQ_HANDLED;
 725}
 726
 727static int maple_set_dma_interrupt_handler(void)
 728{
 729        return request_irq(HW_EVENT_MAPLE_DMA, maple_dma_interrupt,
 730                IRQF_SHARED, "maple bus DMA", &maple_unsupported_device);
 731}
 732
 733static int maple_set_vblank_interrupt_handler(void)
 734{
 735        return request_irq(HW_EVENT_VSYNC, maple_vblank_interrupt,
 736                IRQF_SHARED, "maple bus VBLANK", &maple_unsupported_device);
 737}
 738
 739static int maple_get_dma_buffer(void)
 740{
 741        maple_sendbuf =
 742            (void *) __get_free_pages(GFP_KERNEL | __GFP_ZERO,
 743                                      MAPLE_DMA_PAGES);
 744        if (!maple_sendbuf)
 745                return -ENOMEM;
 746        return 0;
 747}
 748
 749static int maple_match_bus_driver(struct device *devptr,
 750                                  struct device_driver *drvptr)
 751{
 752        struct maple_driver *maple_drv = to_maple_driver(drvptr);
 753        struct maple_device *maple_dev = to_maple_dev(devptr);
 754
 755        /* Trap empty port case */
 756        if (maple_dev->devinfo.function == 0xFFFFFFFF)
 757                return 0;
 758        else if (maple_dev->devinfo.function &
 759                 cpu_to_be32(maple_drv->function))
 760                return 1;
 761        return 0;
 762}
 763
 764static int maple_bus_uevent(struct device *dev,
 765                            struct kobj_uevent_env *env)
 766{
 767        return 0;
 768}
 769
 770static void maple_bus_release(struct device *dev)
 771{
 772}
 773
 774static struct maple_driver maple_unsupported_device = {
 775        .drv = {
 776                .name = "maple_unsupported_device",
 777                .bus = &maple_bus_type,
 778        },
 779};
 780/*
 781 * maple_bus_type - core maple bus structure
 782 */
 783struct bus_type maple_bus_type = {
 784        .name = "maple",
 785        .match = maple_match_bus_driver,
 786        .uevent = maple_bus_uevent,
 787};
 788EXPORT_SYMBOL_GPL(maple_bus_type);
 789
 790static struct device maple_bus = {
 791        .init_name = "maple",
 792        .release = maple_bus_release,
 793};
 794
 795static int __init maple_bus_init(void)
 796{
 797        int retval, i;
 798        struct maple_device *mdev[MAPLE_PORTS];
 799
 800        __raw_writel(0, MAPLE_ENABLE);
 801
 802        retval = device_register(&maple_bus);
 803        if (retval)
 804                goto cleanup;
 805
 806        retval = bus_register(&maple_bus_type);
 807        if (retval)
 808                goto cleanup_device;
 809
 810        retval = driver_register(&maple_unsupported_device.drv);
 811        if (retval)
 812                goto cleanup_bus;
 813
 814        /* allocate memory for maple bus dma */
 815        retval = maple_get_dma_buffer();
 816        if (retval) {
 817                dev_err(&maple_bus, "failed to allocate DMA buffers\n");
 818                goto cleanup_basic;
 819        }
 820
 821        /* set up DMA interrupt handler */
 822        retval = maple_set_dma_interrupt_handler();
 823        if (retval) {
 824                dev_err(&maple_bus, "bus failed to grab maple "
 825                        "DMA IRQ\n");
 826                goto cleanup_dma;
 827        }
 828
 829        /* set up VBLANK interrupt handler */
 830        retval = maple_set_vblank_interrupt_handler();
 831        if (retval) {
 832                dev_err(&maple_bus, "bus failed to grab VBLANK IRQ\n");
 833                goto cleanup_irq;
 834        }
 835
 836        maple_queue_cache = KMEM_CACHE(maple_buffer, SLAB_HWCACHE_ALIGN);
 837
 838        if (!maple_queue_cache)
 839                goto cleanup_bothirqs;
 840
 841        INIT_LIST_HEAD(&maple_waitq);
 842        INIT_LIST_HEAD(&maple_sentq);
 843
 844        /* setup maple ports */
 845        for (i = 0; i < MAPLE_PORTS; i++) {
 846                checked[i] = false;
 847                empty[i] = false;
 848                mdev[i] = maple_alloc_dev(i, 0);
 849                if (!mdev[i]) {
 850                        while (i-- > 0)
 851                                maple_free_dev(mdev[i]);
 852                        goto cleanup_cache;
 853                }
 854                baseunits[i] = mdev[i];
 855                atomic_set(&mdev[i]->busy, 1);
 856                maple_add_packet(mdev[i], 0, MAPLE_COMMAND_DEVINFO, 0, NULL);
 857                subdevice_map[i] = 0;
 858        }
 859
 860        maple_pnp_time = jiffies + HZ;
 861        /* prepare initial queue */
 862        maple_send();
 863        dev_info(&maple_bus, "bus core now registered\n");
 864
 865        return 0;
 866
 867cleanup_cache:
 868        kmem_cache_destroy(maple_queue_cache);
 869
 870cleanup_bothirqs:
 871        free_irq(HW_EVENT_VSYNC, 0);
 872
 873cleanup_irq:
 874        free_irq(HW_EVENT_MAPLE_DMA, 0);
 875
 876cleanup_dma:
 877        free_pages((unsigned long) maple_sendbuf, MAPLE_DMA_PAGES);
 878
 879cleanup_basic:
 880        driver_unregister(&maple_unsupported_device.drv);
 881
 882cleanup_bus:
 883        bus_unregister(&maple_bus_type);
 884
 885cleanup_device:
 886        device_unregister(&maple_bus);
 887
 888cleanup:
 889        printk(KERN_ERR "Maple bus registration failed\n");
 890        return retval;
 891}
 892/* Push init to later to ensure hardware gets detected */
 893fs_initcall(maple_bus_init);
 894