linux/drivers/sh/maple/maple.c
<<
>>
Prefs
   1/*
   2 * Core maple bus functionality
   3 *
   4 *  Copyright (C) 2007 - 2009 Adrian McMenamin
   5 *  Copyright (C) 2001 - 2008 Paul Mundt
   6 *  Copyright (C) 2000 - 2001 YAEGASHI Takeshi
   7 *  Copyright (C) 2001 M. R. Brown
   8 *
   9 * This file is subject to the terms and conditions of the GNU General Public
  10 * License.  See the file "COPYING" in the main directory of this archive
  11 * for more details.
  12 */
  13#include <linux/init.h>
  14#include <linux/kernel.h>
  15#include <linux/device.h>
  16#include <linux/interrupt.h>
  17#include <linux/list.h>
  18#include <linux/io.h>
  19#include <linux/slab.h>
  20#include <linux/maple.h>
  21#include <linux/dma-mapping.h>
  22#include <linux/delay.h>
  23#include <asm/cacheflush.h>
  24#include <asm/dma.h>
  25#include <asm/io.h>
  26#include <mach/dma.h>
  27#include <mach/sysasic.h>
  28
  29MODULE_AUTHOR("Adrian McMenamin <adrian@mcmen.demon.co.uk>");
  30MODULE_DESCRIPTION("Maple bus driver for Dreamcast");
  31MODULE_LICENSE("GPL v2");
  32MODULE_SUPPORTED_DEVICE("{{SEGA, Dreamcast/Maple}}");
  33
  34static void maple_dma_handler(struct work_struct *work);
  35static void maple_vblank_handler(struct work_struct *work);
  36
  37static DECLARE_WORK(maple_dma_process, maple_dma_handler);
  38static DECLARE_WORK(maple_vblank_process, maple_vblank_handler);
  39
  40static LIST_HEAD(maple_waitq);
  41static LIST_HEAD(maple_sentq);
  42
  43/* mutex to protect queue of waiting packets */
  44static DEFINE_MUTEX(maple_wlist_lock);
  45
  46static struct maple_driver maple_unsupported_device;
  47static struct device maple_bus;
  48static int subdevice_map[MAPLE_PORTS];
  49static unsigned long *maple_sendbuf, *maple_sendptr, *maple_lastptr;
  50static unsigned long maple_pnp_time;
  51static int started, scanning, fullscan;
  52static struct kmem_cache *maple_queue_cache;
  53
  54struct maple_device_specify {
  55        int port;
  56        int unit;
  57};
  58
  59static bool checked[MAPLE_PORTS];
  60static bool empty[MAPLE_PORTS];
  61static struct maple_device *baseunits[MAPLE_PORTS];
  62
  63/**
  64 * maple_driver_register - register a maple driver
  65 * @drv: maple driver to be registered.
  66 *
  67 * Registers the passed in @drv, while updating the bus type.
  68 * Devices with matching function IDs will be automatically probed.
  69 */
  70int maple_driver_register(struct maple_driver *drv)
  71{
  72        if (!drv)
  73                return -EINVAL;
  74
  75        drv->drv.bus = &maple_bus_type;
  76
  77        return driver_register(&drv->drv);
  78}
  79EXPORT_SYMBOL_GPL(maple_driver_register);
  80
  81/**
  82 * maple_driver_unregister - unregister a maple driver.
  83 * @drv: maple driver to unregister.
  84 *
  85 * Cleans up after maple_driver_register(). To be invoked in the exit
  86 * path of any module drivers.
  87 */
  88void maple_driver_unregister(struct maple_driver *drv)
  89{
  90        driver_unregister(&drv->drv);
  91}
  92EXPORT_SYMBOL_GPL(maple_driver_unregister);
  93
  94/* set hardware registers to enable next round of dma */
  95static void maple_dma_reset(void)
  96{
  97        ctrl_outl(MAPLE_MAGIC, MAPLE_RESET);
  98        /* set trig type to 0 for software trigger, 1 for hardware (VBLANK) */
  99        ctrl_outl(1, MAPLE_TRIGTYPE);
 100        /*
 101        * Maple system register
 102        * bits 31 - 16  timeout in units of 20nsec
 103        * bit 12        hard trigger - set 0 to keep responding to VBLANK
 104        * bits 9 - 8    set 00 for 2 Mbps, 01 for 1 Mbps
 105        * bits 3 - 0    delay (in 1.3ms) between VBLANK and start of DMA
 106        * max delay is 11
 107        */
 108        ctrl_outl(MAPLE_2MBPS | MAPLE_TIMEOUT(0xFFFF), MAPLE_SPEED);
 109        ctrl_outl(PHYSADDR(maple_sendbuf), MAPLE_DMAADDR);
 110        ctrl_outl(1, MAPLE_ENABLE);
 111}
 112
 113/**
 114 * maple_getcond_callback - setup handling MAPLE_COMMAND_GETCOND
 115 * @dev: device responding
 116 * @callback: handler callback
 117 * @interval: interval in jiffies between callbacks
 118 * @function: the function code for the device
 119 */
 120void maple_getcond_callback(struct maple_device *dev,
 121                        void (*callback) (struct mapleq *mq),
 122                        unsigned long interval, unsigned long function)
 123{
 124        dev->callback = callback;
 125        dev->interval = interval;
 126        dev->function = cpu_to_be32(function);
 127        dev->when = jiffies;
 128}
 129EXPORT_SYMBOL_GPL(maple_getcond_callback);
 130
 131static int maple_dma_done(void)
 132{
 133        return (ctrl_inl(MAPLE_STATE) & 1) == 0;
 134}
 135
 136static void maple_release_device(struct device *dev)
 137{
 138        struct maple_device *mdev;
 139        struct mapleq *mq;
 140
 141        mdev = to_maple_dev(dev);
 142        mq = mdev->mq;
 143        kmem_cache_free(maple_queue_cache, mq->recvbuf);
 144        kfree(mq);
 145        kfree(mdev);
 146}
 147
 148/**
 149 * maple_add_packet - add a single instruction to the maple bus queue
 150 * @mdev: maple device
 151 * @function: function on device being queried
 152 * @command: maple command to add
 153 * @length: length of command string (in 32 bit words)
 154 * @data: remainder of command string
 155 */
 156int maple_add_packet(struct maple_device *mdev, u32 function, u32 command,
 157        size_t length, void *data)
 158{
 159        int ret = 0;
 160        void *sendbuf = NULL;
 161
 162        if (length) {
 163                sendbuf = kzalloc(length * 4, GFP_KERNEL);
 164                if (!sendbuf) {
 165                        ret = -ENOMEM;
 166                        goto out;
 167                }
 168                ((__be32 *)sendbuf)[0] = cpu_to_be32(function);
 169        }
 170
 171        mdev->mq->command = command;
 172        mdev->mq->length = length;
 173        if (length > 1)
 174                memcpy(sendbuf + 4, data, (length - 1) * 4);
 175        mdev->mq->sendbuf = sendbuf;
 176
 177        mutex_lock(&maple_wlist_lock);
 178        list_add_tail(&mdev->mq->list, &maple_waitq);
 179        mutex_unlock(&maple_wlist_lock);
 180out:
 181        return ret;
 182}
 183EXPORT_SYMBOL_GPL(maple_add_packet);
 184
 185static struct mapleq *maple_allocq(struct maple_device *mdev)
 186{
 187        struct mapleq *mq;
 188
 189        mq = kzalloc(sizeof(*mq), GFP_KERNEL);
 190        if (!mq)
 191                goto failed_nomem;
 192
 193        INIT_LIST_HEAD(&mq->list);
 194        mq->dev = mdev;
 195        mq->recvbuf = kmem_cache_zalloc(maple_queue_cache, GFP_KERNEL);
 196        if (!mq->recvbuf)
 197                goto failed_p2;
 198        mq->recvbuf->buf = &((mq->recvbuf->bufx)[0]);
 199
 200        return mq;
 201
 202failed_p2:
 203        kfree(mq);
 204failed_nomem:
 205        dev_err(&mdev->dev, "could not allocate memory for device (%d, %d)\n",
 206                mdev->port, mdev->unit);
 207        return NULL;
 208}
 209
 210static struct maple_device *maple_alloc_dev(int port, int unit)
 211{
 212        struct maple_device *mdev;
 213
 214        /* zero this out to avoid kobj subsystem
 215        * thinking it has already been registered */
 216
 217        mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
 218        if (!mdev)
 219                return NULL;
 220
 221        mdev->port = port;
 222        mdev->unit = unit;
 223
 224        mdev->mq = maple_allocq(mdev);
 225
 226        if (!mdev->mq) {
 227                kfree(mdev);
 228                return NULL;
 229        }
 230        mdev->dev.bus = &maple_bus_type;
 231        mdev->dev.parent = &maple_bus;
 232        init_waitqueue_head(&mdev->maple_wait);
 233        return mdev;
 234}
 235
 236static void maple_free_dev(struct maple_device *mdev)
 237{
 238        kmem_cache_free(maple_queue_cache, mdev->mq->recvbuf);
 239        kfree(mdev->mq);
 240        kfree(mdev);
 241}
 242
 243/* process the command queue into a maple command block
 244 * terminating command has bit 32 of first long set to 0
 245 */
 246static void maple_build_block(struct mapleq *mq)
 247{
 248        int port, unit, from, to, len;
 249        unsigned long *lsendbuf = mq->sendbuf;
 250
 251        port = mq->dev->port & 3;
 252        unit = mq->dev->unit;
 253        len = mq->length;
 254        from = port << 6;
 255        to = (port << 6) | (unit > 0 ? (1 << (unit - 1)) & 0x1f : 0x20);
 256
 257        *maple_lastptr &= 0x7fffffff;
 258        maple_lastptr = maple_sendptr;
 259
 260        *maple_sendptr++ = (port << 16) | len | 0x80000000;
 261        *maple_sendptr++ = PHYSADDR(mq->recvbuf->buf);
 262        *maple_sendptr++ =
 263            mq->command | (to << 8) | (from << 16) | (len << 24);
 264        while (len-- > 0)
 265                *maple_sendptr++ = *lsendbuf++;
 266}
 267
 268/* build up command queue */
 269static void maple_send(void)
 270{
 271        int i, maple_packets = 0;
 272        struct mapleq *mq, *nmq;
 273
 274        if (!maple_dma_done())
 275                return;
 276
 277        /* disable DMA */
 278        ctrl_outl(0, MAPLE_ENABLE);
 279
 280        if (!list_empty(&maple_sentq))
 281                goto finish;
 282
 283        mutex_lock(&maple_wlist_lock);
 284        if (list_empty(&maple_waitq)) {
 285                mutex_unlock(&maple_wlist_lock);
 286                goto finish;
 287        }
 288
 289        maple_lastptr = maple_sendbuf;
 290        maple_sendptr = maple_sendbuf;
 291
 292        list_for_each_entry_safe(mq, nmq, &maple_waitq, list) {
 293                maple_build_block(mq);
 294                list_del_init(&mq->list);
 295                list_add_tail(&mq->list, &maple_sentq);
 296                if (maple_packets++ > MAPLE_MAXPACKETS)
 297                        break;
 298        }
 299        mutex_unlock(&maple_wlist_lock);
 300        if (maple_packets > 0) {
 301                for (i = 0; i < (1 << MAPLE_DMA_PAGES); i++)
 302                        dma_cache_sync(0, maple_sendbuf + i * PAGE_SIZE,
 303                                       PAGE_SIZE, DMA_BIDIRECTIONAL);
 304        }
 305
 306finish:
 307        maple_dma_reset();
 308}
 309
 310/* check if there is a driver registered likely to match this device */
 311static int maple_check_matching_driver(struct device_driver *driver,
 312                                        void *devptr)
 313{
 314        struct maple_driver *maple_drv;
 315        struct maple_device *mdev;
 316
 317        mdev = devptr;
 318        maple_drv = to_maple_driver(driver);
 319        if (mdev->devinfo.function & cpu_to_be32(maple_drv->function))
 320                return 1;
 321        return 0;
 322}
 323
 324static void maple_detach_driver(struct maple_device *mdev)
 325{
 326        device_unregister(&mdev->dev);
 327}
 328
 329/* process initial MAPLE_COMMAND_DEVINFO for each device or port */
 330static void maple_attach_driver(struct maple_device *mdev)
 331{
 332        char *p, *recvbuf;
 333        unsigned long function;
 334        int matched, error;
 335
 336        recvbuf = mdev->mq->recvbuf->buf;
 337        /* copy the data as individual elements in
 338        * case of memory optimisation */
 339        memcpy(&mdev->devinfo.function, recvbuf + 4, 4);
 340        memcpy(&mdev->devinfo.function_data[0], recvbuf + 8, 12);
 341        memcpy(&mdev->devinfo.area_code, recvbuf + 20, 1);
 342        memcpy(&mdev->devinfo.connector_direction, recvbuf + 21, 1);
 343        memcpy(&mdev->devinfo.product_name[0], recvbuf + 22, 30);
 344        memcpy(&mdev->devinfo.standby_power, recvbuf + 112, 2);
 345        memcpy(&mdev->devinfo.max_power, recvbuf + 114, 2);
 346        memcpy(mdev->product_name, mdev->devinfo.product_name, 30);
 347        mdev->product_name[30] = '\0';
 348        memcpy(mdev->product_licence, mdev->devinfo.product_licence, 60);
 349        mdev->product_licence[60] = '\0';
 350
 351        for (p = mdev->product_name + 29; mdev->product_name <= p; p--)
 352                if (*p == ' ')
 353                        *p = '\0';
 354                else
 355                        break;
 356        for (p = mdev->product_licence + 59; mdev->product_licence <= p; p--)
 357                if (*p == ' ')
 358                        *p = '\0';
 359                else
 360                        break;
 361
 362        function = be32_to_cpu(mdev->devinfo.function);
 363
 364        dev_info(&mdev->dev, "detected %s: function 0x%lX: at (%d, %d)\n",
 365                mdev->product_name, function, mdev->port, mdev->unit);
 366
 367        if (function > 0x200) {
 368                /* Do this silently - as not a real device */
 369                function = 0;
 370                mdev->driver = &maple_unsupported_device;
 371                dev_set_name(&mdev->dev, "%d:0.port", mdev->port);
 372        } else {
 373                matched =
 374                        bus_for_each_drv(&maple_bus_type, NULL, mdev,
 375                                maple_check_matching_driver);
 376
 377                if (matched == 0) {
 378                        /* Driver does not exist yet */
 379                        dev_info(&mdev->dev, "no driver found\n");
 380                        mdev->driver = &maple_unsupported_device;
 381                }
 382                dev_set_name(&mdev->dev, "%d:0%d.%lX", mdev->port,
 383                             mdev->unit, function);
 384        }
 385
 386        mdev->function = function;
 387        mdev->dev.release = &maple_release_device;
 388
 389        atomic_set(&mdev->busy, 0);
 390        error = device_register(&mdev->dev);
 391        if (error) {
 392                dev_warn(&mdev->dev, "could not register device at"
 393                        " (%d, %d), with error 0x%X\n", mdev->unit,
 394                        mdev->port, error);
 395                maple_free_dev(mdev);
 396                mdev = NULL;
 397                return;
 398        }
 399}
 400
 401/*
 402 * if device has been registered for the given
 403 * port and unit then return 1 - allows identification
 404 * of which devices need to be attached or detached
 405 */
 406static int check_maple_device(struct device *device, void *portptr)
 407{
 408        struct maple_device_specify *ds;
 409        struct maple_device *mdev;
 410
 411        ds = portptr;
 412        mdev = to_maple_dev(device);
 413        if (mdev->port == ds->port && mdev->unit == ds->unit)
 414                return 1;
 415        return 0;
 416}
 417
 418static int setup_maple_commands(struct device *device, void *ignored)
 419{
 420        int add;
 421        struct maple_device *mdev = to_maple_dev(device);
 422        if (mdev->interval > 0 && atomic_read(&mdev->busy) == 0 &&
 423                time_after(jiffies, mdev->when)) {
 424                /* bounce if we cannot add */
 425                add = maple_add_packet(mdev,
 426                        be32_to_cpu(mdev->devinfo.function),
 427                        MAPLE_COMMAND_GETCOND, 1, NULL);
 428                if (!add)
 429                        mdev->when = jiffies + mdev->interval;
 430        } else {
 431                if (time_after(jiffies, maple_pnp_time))
 432                        /* Ensure we don't have block reads and devinfo
 433                        * calls interfering with one another - so flag the
 434                        * device as busy */
 435                        if (atomic_read(&mdev->busy) == 0) {
 436                                atomic_set(&mdev->busy, 1);
 437                                maple_add_packet(mdev, 0,
 438                                        MAPLE_COMMAND_DEVINFO, 0, NULL);
 439                        }
 440        }
 441        return 0;
 442}
 443
 444/* VBLANK bottom half - implemented via workqueue */
 445static void maple_vblank_handler(struct work_struct *work)
 446{
 447        int x, locking;
 448        struct maple_device *mdev;
 449
 450        if (!maple_dma_done())
 451                return;
 452
 453        ctrl_outl(0, MAPLE_ENABLE);
 454
 455        if (!list_empty(&maple_sentq))
 456                goto finish;
 457
 458        /*
 459        * Set up essential commands - to fetch data and
 460        * check devices are still present
 461        */
 462        bus_for_each_dev(&maple_bus_type, NULL, NULL,
 463                setup_maple_commands);
 464
 465        if (time_after(jiffies, maple_pnp_time)) {
 466                /*
 467                * Scan the empty ports - bus is flakey and may have
 468                * mis-reported emptyness
 469                */
 470                for (x = 0; x < MAPLE_PORTS; x++) {
 471                        if (checked[x] && empty[x]) {
 472                                mdev = baseunits[x];
 473                                if (!mdev)
 474                                        break;
 475                                atomic_set(&mdev->busy, 1);
 476                                locking = maple_add_packet(mdev, 0,
 477                                        MAPLE_COMMAND_DEVINFO, 0, NULL);
 478                                if (!locking)
 479                                        break;
 480                                }
 481                        }
 482
 483                maple_pnp_time = jiffies + MAPLE_PNP_INTERVAL;
 484        }
 485
 486finish:
 487        maple_send();
 488}
 489
 490/* handle devices added via hotplugs - placing them on queue for DEVINFO */
 491static void maple_map_subunits(struct maple_device *mdev, int submask)
 492{
 493        int retval, k, devcheck;
 494        struct maple_device *mdev_add;
 495        struct maple_device_specify ds;
 496
 497        ds.port = mdev->port;
 498        for (k = 0; k < 5; k++) {
 499                ds.unit = k + 1;
 500                retval =
 501                    bus_for_each_dev(&maple_bus_type, NULL, &ds,
 502                                     check_maple_device);
 503                if (retval) {
 504                        submask = submask >> 1;
 505                        continue;
 506                }
 507                devcheck = submask & 0x01;
 508                if (devcheck) {
 509                        mdev_add = maple_alloc_dev(mdev->port, k + 1);
 510                        if (!mdev_add)
 511                                return;
 512                        atomic_set(&mdev_add->busy, 1);
 513                        maple_add_packet(mdev_add, 0, MAPLE_COMMAND_DEVINFO,
 514                                0, NULL);
 515                        /* mark that we are checking sub devices */
 516                        scanning = 1;
 517                }
 518                submask = submask >> 1;
 519        }
 520}
 521
 522/* mark a device as removed */
 523static void maple_clean_submap(struct maple_device *mdev)
 524{
 525        int killbit;
 526
 527        killbit = (mdev->unit > 0 ? (1 << (mdev->unit - 1)) & 0x1f : 0x20);
 528        killbit = ~killbit;
 529        killbit &= 0xFF;
 530        subdevice_map[mdev->port] = subdevice_map[mdev->port] & killbit;
 531}
 532
 533/* handle empty port or hotplug removal */
 534static void maple_response_none(struct maple_device *mdev)
 535{
 536        maple_clean_submap(mdev);
 537
 538        if (likely(mdev->unit != 0)) {
 539                /*
 540                * Block devices play up
 541                * and give the impression they have
 542                * been removed even when still in place or
 543                * trip the mtd layer when they have
 544                * really gone - this code traps that eventuality
 545                * and ensures we aren't overloaded with useless
 546                * error messages
 547                */
 548                if (mdev->can_unload) {
 549                        if (!mdev->can_unload(mdev)) {
 550                                atomic_set(&mdev->busy, 2);
 551                                wake_up(&mdev->maple_wait);
 552                                return;
 553                        }
 554                }
 555
 556                dev_info(&mdev->dev, "detaching device at (%d, %d)\n",
 557                        mdev->port, mdev->unit);
 558                maple_detach_driver(mdev);
 559                return;
 560        } else {
 561                if (!started || !fullscan) {
 562                        if (checked[mdev->port] == false) {
 563                                checked[mdev->port] = true;
 564                                empty[mdev->port] = true;
 565                                dev_info(&mdev->dev, "no devices"
 566                                        " to port %d\n", mdev->port);
 567                        }
 568                        return;
 569                }
 570        }
 571        /* Some hardware devices generate false detach messages on unit 0 */
 572        atomic_set(&mdev->busy, 0);
 573}
 574
 575/* preprocess hotplugs or scans */
 576static void maple_response_devinfo(struct maple_device *mdev,
 577                                   char *recvbuf)
 578{
 579        char submask;
 580        if (!started || (scanning == 2) || !fullscan) {
 581                if ((mdev->unit == 0) && (checked[mdev->port] == false)) {
 582                        checked[mdev->port] = true;
 583                        maple_attach_driver(mdev);
 584                } else {
 585                        if (mdev->unit != 0)
 586                                maple_attach_driver(mdev);
 587                        if (mdev->unit == 0) {
 588                                empty[mdev->port] = false;
 589                                maple_attach_driver(mdev);
 590                        }
 591                }
 592        }
 593        if (mdev->unit == 0) {
 594                submask = recvbuf[2] & 0x1F;
 595                if (submask ^ subdevice_map[mdev->port]) {
 596                        maple_map_subunits(mdev, submask);
 597                        subdevice_map[mdev->port] = submask;
 598                }
 599        }
 600}
 601
 602static void maple_response_fileerr(struct maple_device *mdev, void *recvbuf)
 603{
 604        if (mdev->fileerr_handler) {
 605                mdev->fileerr_handler(mdev, recvbuf);
 606                return;
 607        } else
 608                dev_warn(&mdev->dev, "device at (%d, %d) reports"
 609                        "file error 0x%X\n", mdev->port, mdev->unit,
 610                        ((int *)recvbuf)[1]);
 611}
 612
 613static void maple_port_rescan(void)
 614{
 615        int i;
 616        struct maple_device *mdev;
 617
 618        fullscan = 1;
 619        for (i = 0; i < MAPLE_PORTS; i++) {
 620                if (checked[i] == false) {
 621                        fullscan = 0;
 622                        mdev = baseunits[i];
 623                        maple_add_packet(mdev, 0, MAPLE_COMMAND_DEVINFO,
 624                                0, NULL);
 625                }
 626        }
 627}
 628
 629/* maple dma end bottom half - implemented via workqueue */
 630static void maple_dma_handler(struct work_struct *work)
 631{
 632        struct mapleq *mq, *nmq;
 633        struct maple_device *mdev;
 634        char *recvbuf;
 635        enum maple_code code;
 636
 637        if (!maple_dma_done())
 638                return;
 639        ctrl_outl(0, MAPLE_ENABLE);
 640        if (!list_empty(&maple_sentq)) {
 641                list_for_each_entry_safe(mq, nmq, &maple_sentq, list) {
 642                        mdev = mq->dev;
 643                        recvbuf = mq->recvbuf->buf;
 644                        dma_cache_sync(&mdev->dev, recvbuf, 0x400,
 645                                DMA_FROM_DEVICE);
 646                        code = recvbuf[0];
 647                        kfree(mq->sendbuf);
 648                        list_del_init(&mq->list);
 649                        switch (code) {
 650                        case MAPLE_RESPONSE_NONE:
 651                                maple_response_none(mdev);
 652                                break;
 653
 654                        case MAPLE_RESPONSE_DEVINFO:
 655                                maple_response_devinfo(mdev, recvbuf);
 656                                atomic_set(&mdev->busy, 0);
 657                                break;
 658
 659                        case MAPLE_RESPONSE_DATATRF:
 660                                if (mdev->callback)
 661                                        mdev->callback(mq);
 662                                atomic_set(&mdev->busy, 0);
 663                                wake_up(&mdev->maple_wait);
 664                                break;
 665
 666                        case MAPLE_RESPONSE_FILEERR:
 667                                maple_response_fileerr(mdev, recvbuf);
 668                                atomic_set(&mdev->busy, 0);
 669                                wake_up(&mdev->maple_wait);
 670                                break;
 671
 672                        case MAPLE_RESPONSE_AGAIN:
 673                        case MAPLE_RESPONSE_BADCMD:
 674                        case MAPLE_RESPONSE_BADFUNC:
 675                                dev_warn(&mdev->dev, "non-fatal error"
 676                                        " 0x%X at (%d, %d)\n", code,
 677                                        mdev->port, mdev->unit);
 678                                atomic_set(&mdev->busy, 0);
 679                                break;
 680
 681                        case MAPLE_RESPONSE_ALLINFO:
 682                                dev_notice(&mdev->dev, "extended"
 683                                " device information request for (%d, %d)"
 684                                " but call is not supported\n", mdev->port,
 685                                mdev->unit);
 686                                atomic_set(&mdev->busy, 0);
 687                                break;
 688
 689                        case MAPLE_RESPONSE_OK:
 690                                atomic_set(&mdev->busy, 0);
 691                                wake_up(&mdev->maple_wait);
 692                                break;
 693
 694                        default:
 695                                break;
 696                        }
 697                }
 698                /* if scanning is 1 then we have subdevices to check */
 699                if (scanning == 1) {
 700                        maple_send();
 701                        scanning = 2;
 702                } else
 703                        scanning = 0;
 704                /*check if we have actually tested all ports yet */
 705                if (!fullscan)
 706                        maple_port_rescan();
 707                /* mark that we have been through the first scan */
 708                started = 1;
 709        }
 710        maple_send();
 711}
 712
 713static irqreturn_t maple_dma_interrupt(int irq, void *dev_id)
 714{
 715        /* Load everything into the bottom half */
 716        schedule_work(&maple_dma_process);
 717        return IRQ_HANDLED;
 718}
 719
 720static irqreturn_t maple_vblank_interrupt(int irq, void *dev_id)
 721{
 722        schedule_work(&maple_vblank_process);
 723        return IRQ_HANDLED;
 724}
 725
 726static int maple_set_dma_interrupt_handler(void)
 727{
 728        return request_irq(HW_EVENT_MAPLE_DMA, maple_dma_interrupt,
 729                IRQF_SHARED, "maple bus DMA", &maple_unsupported_device);
 730}
 731
 732static int maple_set_vblank_interrupt_handler(void)
 733{
 734        return request_irq(HW_EVENT_VSYNC, maple_vblank_interrupt,
 735                IRQF_SHARED, "maple bus VBLANK", &maple_unsupported_device);
 736}
 737
 738static int maple_get_dma_buffer(void)
 739{
 740        maple_sendbuf =
 741            (void *) __get_free_pages(GFP_KERNEL | __GFP_ZERO,
 742                                      MAPLE_DMA_PAGES);
 743        if (!maple_sendbuf)
 744                return -ENOMEM;
 745        return 0;
 746}
 747
 748static int maple_match_bus_driver(struct device *devptr,
 749                                  struct device_driver *drvptr)
 750{
 751        struct maple_driver *maple_drv = to_maple_driver(drvptr);
 752        struct maple_device *maple_dev = to_maple_dev(devptr);
 753
 754        /* Trap empty port case */
 755        if (maple_dev->devinfo.function == 0xFFFFFFFF)
 756                return 0;
 757        else if (maple_dev->devinfo.function &
 758                 cpu_to_be32(maple_drv->function))
 759                return 1;
 760        return 0;
 761}
 762
 763static int maple_bus_uevent(struct device *dev,
 764                            struct kobj_uevent_env *env)
 765{
 766        return 0;
 767}
 768
 769static void maple_bus_release(struct device *dev)
 770{
 771}
 772
 773static struct maple_driver maple_unsupported_device = {
 774        .drv = {
 775                .name = "maple_unsupported_device",
 776                .bus = &maple_bus_type,
 777        },
 778};
 779/*
 780 * maple_bus_type - core maple bus structure
 781 */
 782struct bus_type maple_bus_type = {
 783        .name = "maple",
 784        .match = maple_match_bus_driver,
 785        .uevent = maple_bus_uevent,
 786};
 787EXPORT_SYMBOL_GPL(maple_bus_type);
 788
 789static struct device maple_bus = {
 790        .init_name = "maple",
 791        .release = maple_bus_release,
 792};
 793
 794static int __init maple_bus_init(void)
 795{
 796        int retval, i;
 797        struct maple_device *mdev[MAPLE_PORTS];
 798
 799        ctrl_outl(0, MAPLE_ENABLE);
 800
 801        retval = device_register(&maple_bus);
 802        if (retval)
 803                goto cleanup;
 804
 805        retval = bus_register(&maple_bus_type);
 806        if (retval)
 807                goto cleanup_device;
 808
 809        retval = driver_register(&maple_unsupported_device.drv);
 810        if (retval)
 811                goto cleanup_bus;
 812
 813        /* allocate memory for maple bus dma */
 814        retval = maple_get_dma_buffer();
 815        if (retval) {
 816                dev_err(&maple_bus, "failed to allocate DMA buffers\n");
 817                goto cleanup_basic;
 818        }
 819
 820        /* set up DMA interrupt handler */
 821        retval = maple_set_dma_interrupt_handler();
 822        if (retval) {
 823                dev_err(&maple_bus, "bus failed to grab maple "
 824                        "DMA IRQ\n");
 825                goto cleanup_dma;
 826        }
 827
 828        /* set up VBLANK interrupt handler */
 829        retval = maple_set_vblank_interrupt_handler();
 830        if (retval) {
 831                dev_err(&maple_bus, "bus failed to grab VBLANK IRQ\n");
 832                goto cleanup_irq;
 833        }
 834
 835        maple_queue_cache = KMEM_CACHE(maple_buffer, SLAB_HWCACHE_ALIGN);
 836
 837        if (!maple_queue_cache)
 838                goto cleanup_bothirqs;
 839
 840        INIT_LIST_HEAD(&maple_waitq);
 841        INIT_LIST_HEAD(&maple_sentq);
 842
 843        /* setup maple ports */
 844        for (i = 0; i < MAPLE_PORTS; i++) {
 845                checked[i] = false;
 846                empty[i] = false;
 847                mdev[i] = maple_alloc_dev(i, 0);
 848                if (!mdev[i]) {
 849                        while (i-- > 0)
 850                                maple_free_dev(mdev[i]);
 851                        goto cleanup_cache;
 852                }
 853                baseunits[i] = mdev[i];
 854                atomic_set(&mdev[i]->busy, 1);
 855                maple_add_packet(mdev[i], 0, MAPLE_COMMAND_DEVINFO, 0, NULL);
 856                subdevice_map[i] = 0;
 857        }
 858
 859        maple_pnp_time = jiffies + HZ;
 860        /* prepare initial queue */
 861        maple_send();
 862        dev_info(&maple_bus, "bus core now registered\n");
 863
 864        return 0;
 865
 866cleanup_cache:
 867        kmem_cache_destroy(maple_queue_cache);
 868
 869cleanup_bothirqs:
 870        free_irq(HW_EVENT_VSYNC, 0);
 871
 872cleanup_irq:
 873        free_irq(HW_EVENT_MAPLE_DMA, 0);
 874
 875cleanup_dma:
 876        free_pages((unsigned long) maple_sendbuf, MAPLE_DMA_PAGES);
 877
 878cleanup_basic:
 879        driver_unregister(&maple_unsupported_device.drv);
 880
 881cleanup_bus:
 882        bus_unregister(&maple_bus_type);
 883
 884cleanup_device:
 885        device_unregister(&maple_bus);
 886
 887cleanup:
 888        printk(KERN_ERR "Maple bus registration failed\n");
 889        return retval;
 890}
 891/* Push init to later to ensure hardware gets detected */
 892fs_initcall(maple_bus_init);
 893