linux/drivers/gpu/drm/mga/mga_dma.c
<<
>>
Prefs
   1/* mga_dma.c -- DMA support for mga g200/g400 -*- linux-c -*-
   2 * Created: Mon Dec 13 01:50:01 1999 by jhartmann@precisioninsight.com
   3 *
   4 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
   5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
   6 * All Rights Reserved.
   7 *
   8 * Permission is hereby granted, free of charge, to any person obtaining a
   9 * copy of this software and associated documentation files (the "Software"),
  10 * to deal in the Software without restriction, including without limitation
  11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  12 * and/or sell copies of the Software, and to permit persons to whom the
  13 * Software is furnished to do so, subject to the following conditions:
  14 *
  15 * The above copyright notice and this permission notice (including the next
  16 * paragraph) shall be included in all copies or substantial portions of the
  17 * Software.
  18 *
  19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  22 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
  23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  25 * DEALINGS IN THE SOFTWARE.
  26 */
  27
  28/**
  29 * \file mga_dma.c
  30 * DMA support for MGA G200 / G400.
  31 *
  32 * \author Rickard E. (Rik) Faith <faith@valinux.com>
  33 * \author Jeff Hartmann <jhartmann@valinux.com>
  34 * \author Keith Whitwell <keith@tungstengraphics.com>
  35 * \author Gareth Hughes <gareth@valinux.com>
  36 */
  37
  38#include "drmP.h"
  39#include "drm.h"
  40#include "drm_sarea.h"
  41#include "mga_drm.h"
  42#include "mga_drv.h"
  43
  44#define MGA_DEFAULT_USEC_TIMEOUT        10000
  45#define MGA_FREELIST_DEBUG              0
  46
  47#define MINIMAL_CLEANUP 0
  48#define FULL_CLEANUP 1
  49static int mga_do_cleanup_dma(struct drm_device *dev, int full_cleanup);
  50
  51/* ================================================================
  52 * Engine control
  53 */
  54
  55int mga_do_wait_for_idle(drm_mga_private_t * dev_priv)
  56{
  57        u32 status = 0;
  58        int i;
  59        DRM_DEBUG("\n");
  60
  61        for (i = 0; i < dev_priv->usec_timeout; i++) {
  62                status = MGA_READ(MGA_STATUS) & MGA_ENGINE_IDLE_MASK;
  63                if (status == MGA_ENDPRDMASTS) {
  64                        MGA_WRITE8(MGA_CRTC_INDEX, 0);
  65                        return 0;
  66                }
  67                DRM_UDELAY(1);
  68        }
  69
  70#if MGA_DMA_DEBUG
  71        DRM_ERROR("failed!\n");
  72        DRM_INFO("   status=0x%08x\n", status);
  73#endif
  74        return -EBUSY;
  75}
  76
  77static int mga_do_dma_reset(drm_mga_private_t * dev_priv)
  78{
  79        drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
  80        drm_mga_primary_buffer_t *primary = &dev_priv->prim;
  81
  82        DRM_DEBUG("\n");
  83
  84        /* The primary DMA stream should look like new right about now.
  85         */
  86        primary->tail = 0;
  87        primary->space = primary->size;
  88        primary->last_flush = 0;
  89
  90        sarea_priv->last_wrap = 0;
  91
  92        /* FIXME: Reset counters, buffer ages etc...
  93         */
  94
  95        /* FIXME: What else do we need to reinitialize?  WARP stuff?
  96         */
  97
  98        return 0;
  99}
 100
 101/* ================================================================
 102 * Primary DMA stream
 103 */
 104
 105void mga_do_dma_flush(drm_mga_private_t * dev_priv)
 106{
 107        drm_mga_primary_buffer_t *primary = &dev_priv->prim;
 108        u32 head, tail;
 109        u32 status = 0;
 110        int i;
 111        DMA_LOCALS;
 112        DRM_DEBUG("\n");
 113
 114        /* We need to wait so that we can do an safe flush */
 115        for (i = 0; i < dev_priv->usec_timeout; i++) {
 116                status = MGA_READ(MGA_STATUS) & MGA_ENGINE_IDLE_MASK;
 117                if (status == MGA_ENDPRDMASTS)
 118                        break;
 119                DRM_UDELAY(1);
 120        }
 121
 122        if (primary->tail == primary->last_flush) {
 123                DRM_DEBUG("   bailing out...\n");
 124                return;
 125        }
 126
 127        tail = primary->tail + dev_priv->primary->offset;
 128
 129        /* We need to pad the stream between flushes, as the card
 130         * actually (partially?) reads the first of these commands.
 131         * See page 4-16 in the G400 manual, middle of the page or so.
 132         */
 133        BEGIN_DMA(1);
 134
 135        DMA_BLOCK(MGA_DMAPAD, 0x00000000,
 136                  MGA_DMAPAD, 0x00000000,
 137                  MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000);
 138
 139        ADVANCE_DMA();
 140
 141        primary->last_flush = primary->tail;
 142
 143        head = MGA_READ(MGA_PRIMADDRESS);
 144
 145        if (head <= tail) {
 146                primary->space = primary->size - primary->tail;
 147        } else {
 148                primary->space = head - tail;
 149        }
 150
 151        DRM_DEBUG("   head = 0x%06lx\n", (unsigned long)(head - dev_priv->primary->offset));
 152        DRM_DEBUG("   tail = 0x%06lx\n", (unsigned long)(tail - dev_priv->primary->offset));
 153        DRM_DEBUG("  space = 0x%06x\n", primary->space);
 154
 155        mga_flush_write_combine();
 156        MGA_WRITE(MGA_PRIMEND, tail | dev_priv->dma_access);
 157
 158        DRM_DEBUG("done.\n");
 159}
 160
 161void mga_do_dma_wrap_start(drm_mga_private_t * dev_priv)
 162{
 163        drm_mga_primary_buffer_t *primary = &dev_priv->prim;
 164        u32 head, tail;
 165        DMA_LOCALS;
 166        DRM_DEBUG("\n");
 167
 168        BEGIN_DMA_WRAP();
 169
 170        DMA_BLOCK(MGA_DMAPAD, 0x00000000,
 171                  MGA_DMAPAD, 0x00000000,
 172                  MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000);
 173
 174        ADVANCE_DMA();
 175
 176        tail = primary->tail + dev_priv->primary->offset;
 177
 178        primary->tail = 0;
 179        primary->last_flush = 0;
 180        primary->last_wrap++;
 181
 182        head = MGA_READ(MGA_PRIMADDRESS);
 183
 184        if (head == dev_priv->primary->offset) {
 185                primary->space = primary->size;
 186        } else {
 187                primary->space = head - dev_priv->primary->offset;
 188        }
 189
 190        DRM_DEBUG("   head = 0x%06lx\n", (unsigned long)(head - dev_priv->primary->offset));
 191        DRM_DEBUG("   tail = 0x%06x\n", primary->tail);
 192        DRM_DEBUG("   wrap = %d\n", primary->last_wrap);
 193        DRM_DEBUG("  space = 0x%06x\n", primary->space);
 194
 195        mga_flush_write_combine();
 196        MGA_WRITE(MGA_PRIMEND, tail | dev_priv->dma_access);
 197
 198        set_bit(0, &primary->wrapped);
 199        DRM_DEBUG("done.\n");
 200}
 201
 202void mga_do_dma_wrap_end(drm_mga_private_t * dev_priv)
 203{
 204        drm_mga_primary_buffer_t *primary = &dev_priv->prim;
 205        drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
 206        u32 head = dev_priv->primary->offset;
 207        DRM_DEBUG("\n");
 208
 209        sarea_priv->last_wrap++;
 210        DRM_DEBUG("   wrap = %d\n", sarea_priv->last_wrap);
 211
 212        mga_flush_write_combine();
 213        MGA_WRITE(MGA_PRIMADDRESS, head | MGA_DMA_GENERAL);
 214
 215        clear_bit(0, &primary->wrapped);
 216        DRM_DEBUG("done.\n");
 217}
 218
 219/* ================================================================
 220 * Freelist management
 221 */
 222
 223#define MGA_BUFFER_USED         ~0
 224#define MGA_BUFFER_FREE         0
 225
 226#if MGA_FREELIST_DEBUG
 227static void mga_freelist_print(struct drm_device * dev)
 228{
 229        drm_mga_private_t *dev_priv = dev->dev_private;
 230        drm_mga_freelist_t *entry;
 231
 232        DRM_INFO("\n");
 233        DRM_INFO("current dispatch: last=0x%x done=0x%x\n",
 234                 dev_priv->sarea_priv->last_dispatch,
 235                 (unsigned int)(MGA_READ(MGA_PRIMADDRESS) -
 236                                dev_priv->primary->offset));
 237        DRM_INFO("current freelist:\n");
 238
 239        for (entry = dev_priv->head->next; entry; entry = entry->next) {
 240                DRM_INFO("   %p   idx=%2d  age=0x%x 0x%06lx\n",
 241                         entry, entry->buf->idx, entry->age.head,
 242                         (unsigned long)(entry->age.head - dev_priv->primary->offset));
 243        }
 244        DRM_INFO("\n");
 245}
 246#endif
 247
 248static int mga_freelist_init(struct drm_device * dev, drm_mga_private_t * dev_priv)
 249{
 250        struct drm_device_dma *dma = dev->dma;
 251        struct drm_buf *buf;
 252        drm_mga_buf_priv_t *buf_priv;
 253        drm_mga_freelist_t *entry;
 254        int i;
 255        DRM_DEBUG("count=%d\n", dma->buf_count);
 256
 257        dev_priv->head = kzalloc(sizeof(drm_mga_freelist_t), GFP_KERNEL);
 258        if (dev_priv->head == NULL)
 259                return -ENOMEM;
 260
 261        SET_AGE(&dev_priv->head->age, MGA_BUFFER_USED, 0);
 262
 263        for (i = 0; i < dma->buf_count; i++) {
 264                buf = dma->buflist[i];
 265                buf_priv = buf->dev_private;
 266
 267                entry = kzalloc(sizeof(drm_mga_freelist_t), GFP_KERNEL);
 268                if (entry == NULL)
 269                        return -ENOMEM;
 270
 271                entry->next = dev_priv->head->next;
 272                entry->prev = dev_priv->head;
 273                SET_AGE(&entry->age, MGA_BUFFER_FREE, 0);
 274                entry->buf = buf;
 275
 276                if (dev_priv->head->next != NULL)
 277                        dev_priv->head->next->prev = entry;
 278                if (entry->next == NULL)
 279                        dev_priv->tail = entry;
 280
 281                buf_priv->list_entry = entry;
 282                buf_priv->discard = 0;
 283                buf_priv->dispatched = 0;
 284
 285                dev_priv->head->next = entry;
 286        }
 287
 288        return 0;
 289}
 290
 291static void mga_freelist_cleanup(struct drm_device * dev)
 292{
 293        drm_mga_private_t *dev_priv = dev->dev_private;
 294        drm_mga_freelist_t *entry;
 295        drm_mga_freelist_t *next;
 296        DRM_DEBUG("\n");
 297
 298        entry = dev_priv->head;
 299        while (entry) {
 300                next = entry->next;
 301                kfree(entry);
 302                entry = next;
 303        }
 304
 305        dev_priv->head = dev_priv->tail = NULL;
 306}
 307
 308#if 0
 309/* FIXME: Still needed?
 310 */
 311static void mga_freelist_reset(struct drm_device * dev)
 312{
 313        struct drm_device_dma *dma = dev->dma;
 314        struct drm_buf *buf;
 315        drm_mga_buf_priv_t *buf_priv;
 316        int i;
 317
 318        for (i = 0; i < dma->buf_count; i++) {
 319                buf = dma->buflist[i];
 320                buf_priv = buf->dev_private;
 321                SET_AGE(&buf_priv->list_entry->age, MGA_BUFFER_FREE, 0);
 322        }
 323}
 324#endif
 325
 326static struct drm_buf *mga_freelist_get(struct drm_device * dev)
 327{
 328        drm_mga_private_t *dev_priv = dev->dev_private;
 329        drm_mga_freelist_t *next;
 330        drm_mga_freelist_t *prev;
 331        drm_mga_freelist_t *tail = dev_priv->tail;
 332        u32 head, wrap;
 333        DRM_DEBUG("\n");
 334
 335        head = MGA_READ(MGA_PRIMADDRESS);
 336        wrap = dev_priv->sarea_priv->last_wrap;
 337
 338        DRM_DEBUG("   tail=0x%06lx %d\n",
 339                  tail->age.head ?
 340                  (unsigned long)(tail->age.head - dev_priv->primary->offset) : 0,
 341                  tail->age.wrap);
 342        DRM_DEBUG("   head=0x%06lx %d\n",
 343                  (unsigned long)(head - dev_priv->primary->offset), wrap);
 344
 345        if (TEST_AGE(&tail->age, head, wrap)) {
 346                prev = dev_priv->tail->prev;
 347                next = dev_priv->tail;
 348                prev->next = NULL;
 349                next->prev = next->next = NULL;
 350                dev_priv->tail = prev;
 351                SET_AGE(&next->age, MGA_BUFFER_USED, 0);
 352                return next->buf;
 353        }
 354
 355        DRM_DEBUG("returning NULL!\n");
 356        return NULL;
 357}
 358
 359int mga_freelist_put(struct drm_device * dev, struct drm_buf * buf)
 360{
 361        drm_mga_private_t *dev_priv = dev->dev_private;
 362        drm_mga_buf_priv_t *buf_priv = buf->dev_private;
 363        drm_mga_freelist_t *head, *entry, *prev;
 364
 365        DRM_DEBUG("age=0x%06lx wrap=%d\n",
 366                  (unsigned long)(buf_priv->list_entry->age.head -
 367                                  dev_priv->primary->offset),
 368                  buf_priv->list_entry->age.wrap);
 369
 370        entry = buf_priv->list_entry;
 371        head = dev_priv->head;
 372
 373        if (buf_priv->list_entry->age.head == MGA_BUFFER_USED) {
 374                SET_AGE(&entry->age, MGA_BUFFER_FREE, 0);
 375                prev = dev_priv->tail;
 376                prev->next = entry;
 377                entry->prev = prev;
 378                entry->next = NULL;
 379        } else {
 380                prev = head->next;
 381                head->next = entry;
 382                prev->prev = entry;
 383                entry->prev = head;
 384                entry->next = prev;
 385        }
 386
 387        return 0;
 388}
 389
 390/* ================================================================
 391 * DMA initialization, cleanup
 392 */
 393
 394int mga_driver_load(struct drm_device * dev, unsigned long flags)
 395{
 396        drm_mga_private_t *dev_priv;
 397        int ret;
 398
 399        dev_priv = kzalloc(sizeof(drm_mga_private_t), GFP_KERNEL);
 400        if (!dev_priv)
 401                return -ENOMEM;
 402
 403        dev->dev_private = (void *)dev_priv;
 404
 405        dev_priv->usec_timeout = MGA_DEFAULT_USEC_TIMEOUT;
 406        dev_priv->chipset = flags;
 407
 408        dev_priv->mmio_base = drm_get_resource_start(dev, 1);
 409        dev_priv->mmio_size = drm_get_resource_len(dev, 1);
 410
 411        dev->counters += 3;
 412        dev->types[6] = _DRM_STAT_IRQ;
 413        dev->types[7] = _DRM_STAT_PRIMARY;
 414        dev->types[8] = _DRM_STAT_SECONDARY;
 415
 416        ret = drm_vblank_init(dev, 1);
 417
 418        if (ret) {
 419                (void) mga_driver_unload(dev);
 420                return ret;
 421        }
 422
 423        return 0;
 424}
 425
 426#if __OS_HAS_AGP
 427/**
 428 * Bootstrap the driver for AGP DMA.
 429 *
 430 * \todo
 431 * Investigate whether there is any benifit to storing the WARP microcode in
 432 * AGP memory.  If not, the microcode may as well always be put in PCI
 433 * memory.
 434 *
 435 * \todo
 436 * This routine needs to set dma_bs->agp_mode to the mode actually configured
 437 * in the hardware.  Looking just at the Linux AGP driver code, I don't see
 438 * an easy way to determine this.
 439 *
 440 * \sa mga_do_dma_bootstrap, mga_do_pci_dma_bootstrap
 441 */
 442static int mga_do_agp_dma_bootstrap(struct drm_device * dev,
 443                                    drm_mga_dma_bootstrap_t * dma_bs)
 444{
 445        drm_mga_private_t *const dev_priv =
 446            (drm_mga_private_t *) dev->dev_private;
 447        unsigned int warp_size = MGA_WARP_UCODE_SIZE;
 448        int err;
 449        unsigned offset;
 450        const unsigned secondary_size = dma_bs->secondary_bin_count
 451            * dma_bs->secondary_bin_size;
 452        const unsigned agp_size = (dma_bs->agp_size << 20);
 453        struct drm_buf_desc req;
 454        struct drm_agp_mode mode;
 455        struct drm_agp_info info;
 456        struct drm_agp_buffer agp_req;
 457        struct drm_agp_binding bind_req;
 458
 459        /* Acquire AGP. */
 460        err = drm_agp_acquire(dev);
 461        if (err) {
 462                DRM_ERROR("Unable to acquire AGP: %d\n", err);
 463                return err;
 464        }
 465
 466        err = drm_agp_info(dev, &info);
 467        if (err) {
 468                DRM_ERROR("Unable to get AGP info: %d\n", err);
 469                return err;
 470        }
 471
 472        mode.mode = (info.mode & ~0x07) | dma_bs->agp_mode;
 473        err = drm_agp_enable(dev, mode);
 474        if (err) {
 475                DRM_ERROR("Unable to enable AGP (mode = 0x%lx)\n", mode.mode);
 476                return err;
 477        }
 478
 479        /* In addition to the usual AGP mode configuration, the G200 AGP cards
 480         * need to have the AGP mode "manually" set.
 481         */
 482
 483        if (dev_priv->chipset == MGA_CARD_TYPE_G200) {
 484                if (mode.mode & 0x02) {
 485                        MGA_WRITE(MGA_AGP_PLL, MGA_AGP2XPLL_ENABLE);
 486                } else {
 487                        MGA_WRITE(MGA_AGP_PLL, MGA_AGP2XPLL_DISABLE);
 488                }
 489        }
 490
 491        /* Allocate and bind AGP memory. */
 492        agp_req.size = agp_size;
 493        agp_req.type = 0;
 494        err = drm_agp_alloc(dev, &agp_req);
 495        if (err) {
 496                dev_priv->agp_size = 0;
 497                DRM_ERROR("Unable to allocate %uMB AGP memory\n",
 498                          dma_bs->agp_size);
 499                return err;
 500        }
 501
 502        dev_priv->agp_size = agp_size;
 503        dev_priv->agp_handle = agp_req.handle;
 504
 505        bind_req.handle = agp_req.handle;
 506        bind_req.offset = 0;
 507        err = drm_agp_bind(dev, &bind_req);
 508        if (err) {
 509                DRM_ERROR("Unable to bind AGP memory: %d\n", err);
 510                return err;
 511        }
 512
 513        /* Make drm_addbufs happy by not trying to create a mapping for less
 514         * than a page.
 515         */
 516        if (warp_size < PAGE_SIZE)
 517                warp_size = PAGE_SIZE;
 518
 519        offset = 0;
 520        err = drm_addmap(dev, offset, warp_size,
 521                         _DRM_AGP, _DRM_READ_ONLY, &dev_priv->warp);
 522        if (err) {
 523                DRM_ERROR("Unable to map WARP microcode: %d\n", err);
 524                return err;
 525        }
 526
 527        offset += warp_size;
 528        err = drm_addmap(dev, offset, dma_bs->primary_size,
 529                         _DRM_AGP, _DRM_READ_ONLY, &dev_priv->primary);
 530        if (err) {
 531                DRM_ERROR("Unable to map primary DMA region: %d\n", err);
 532                return err;
 533        }
 534
 535        offset += dma_bs->primary_size;
 536        err = drm_addmap(dev, offset, secondary_size,
 537                         _DRM_AGP, 0, &dev->agp_buffer_map);
 538        if (err) {
 539                DRM_ERROR("Unable to map secondary DMA region: %d\n", err);
 540                return err;
 541        }
 542
 543        (void)memset(&req, 0, sizeof(req));
 544        req.count = dma_bs->secondary_bin_count;
 545        req.size = dma_bs->secondary_bin_size;
 546        req.flags = _DRM_AGP_BUFFER;
 547        req.agp_start = offset;
 548
 549        err = drm_addbufs_agp(dev, &req);
 550        if (err) {
 551                DRM_ERROR("Unable to add secondary DMA buffers: %d\n", err);
 552                return err;
 553        }
 554
 555        {
 556                struct drm_map_list *_entry;
 557                unsigned long agp_token = 0;
 558
 559                list_for_each_entry(_entry, &dev->maplist, head) {
 560                        if (_entry->map == dev->agp_buffer_map)
 561                                agp_token = _entry->user_token;
 562                }
 563                if (!agp_token)
 564                        return -EFAULT;
 565
 566                dev->agp_buffer_token = agp_token;
 567        }
 568
 569        offset += secondary_size;
 570        err = drm_addmap(dev, offset, agp_size - offset,
 571                         _DRM_AGP, 0, &dev_priv->agp_textures);
 572        if (err) {
 573                DRM_ERROR("Unable to map AGP texture region %d\n", err);
 574                return err;
 575        }
 576
 577        drm_core_ioremap(dev_priv->warp, dev);
 578        drm_core_ioremap(dev_priv->primary, dev);
 579        drm_core_ioremap(dev->agp_buffer_map, dev);
 580
 581        if (!dev_priv->warp->handle ||
 582            !dev_priv->primary->handle || !dev->agp_buffer_map->handle) {
 583                DRM_ERROR("failed to ioremap agp regions! (%p, %p, %p)\n",
 584                          dev_priv->warp->handle, dev_priv->primary->handle,
 585                          dev->agp_buffer_map->handle);
 586                return -ENOMEM;
 587        }
 588
 589        dev_priv->dma_access = MGA_PAGPXFER;
 590        dev_priv->wagp_enable = MGA_WAGP_ENABLE;
 591
 592        DRM_INFO("Initialized card for AGP DMA.\n");
 593        return 0;
 594}
 595#else
 596static int mga_do_agp_dma_bootstrap(struct drm_device * dev,
 597                                    drm_mga_dma_bootstrap_t * dma_bs)
 598{
 599        return -EINVAL;
 600}
 601#endif
 602
 603/**
 604 * Bootstrap the driver for PCI DMA.
 605 *
 606 * \todo
 607 * The algorithm for decreasing the size of the primary DMA buffer could be
 608 * better.  The size should be rounded up to the nearest page size, then
 609 * decrease the request size by a single page each pass through the loop.
 610 *
 611 * \todo
 612 * Determine whether the maximum address passed to drm_pci_alloc is correct.
 613 * The same goes for drm_addbufs_pci.
 614 *
 615 * \sa mga_do_dma_bootstrap, mga_do_agp_dma_bootstrap
 616 */
 617static int mga_do_pci_dma_bootstrap(struct drm_device * dev,
 618                                    drm_mga_dma_bootstrap_t * dma_bs)
 619{
 620        drm_mga_private_t *const dev_priv =
 621            (drm_mga_private_t *) dev->dev_private;
 622        unsigned int warp_size = MGA_WARP_UCODE_SIZE;
 623        unsigned int primary_size;
 624        unsigned int bin_count;
 625        int err;
 626        struct drm_buf_desc req;
 627
 628        if (dev->dma == NULL) {
 629                DRM_ERROR("dev->dma is NULL\n");
 630                return -EFAULT;
 631        }
 632
 633        /* Make drm_addbufs happy by not trying to create a mapping for less
 634         * than a page.
 635         */
 636        if (warp_size < PAGE_SIZE)
 637                warp_size = PAGE_SIZE;
 638
 639        /* The proper alignment is 0x100 for this mapping */
 640        err = drm_addmap(dev, 0, warp_size, _DRM_CONSISTENT,
 641                         _DRM_READ_ONLY, &dev_priv->warp);
 642        if (err != 0) {
 643                DRM_ERROR("Unable to create mapping for WARP microcode: %d\n",
 644                          err);
 645                return err;
 646        }
 647
 648        /* Other than the bottom two bits being used to encode other
 649         * information, there don't appear to be any restrictions on the
 650         * alignment of the primary or secondary DMA buffers.
 651         */
 652
 653        for (primary_size = dma_bs->primary_size; primary_size != 0;
 654             primary_size >>= 1) {
 655                /* The proper alignment for this mapping is 0x04 */
 656                err = drm_addmap(dev, 0, primary_size, _DRM_CONSISTENT,
 657                                 _DRM_READ_ONLY, &dev_priv->primary);
 658                if (!err)
 659                        break;
 660        }
 661
 662        if (err != 0) {
 663                DRM_ERROR("Unable to allocate primary DMA region: %d\n", err);
 664                return -ENOMEM;
 665        }
 666
 667        if (dev_priv->primary->size != dma_bs->primary_size) {
 668                DRM_INFO("Primary DMA buffer size reduced from %u to %u.\n",
 669                         dma_bs->primary_size,
 670                         (unsigned)dev_priv->primary->size);
 671                dma_bs->primary_size = dev_priv->primary->size;
 672        }
 673
 674        for (bin_count = dma_bs->secondary_bin_count; bin_count > 0;
 675             bin_count--) {
 676                (void)memset(&req, 0, sizeof(req));
 677                req.count = bin_count;
 678                req.size = dma_bs->secondary_bin_size;
 679
 680                err = drm_addbufs_pci(dev, &req);
 681                if (!err) {
 682                        break;
 683                }
 684        }
 685
 686        if (bin_count == 0) {
 687                DRM_ERROR("Unable to add secondary DMA buffers: %d\n", err);
 688                return err;
 689        }
 690
 691        if (bin_count != dma_bs->secondary_bin_count) {
 692                DRM_INFO("Secondary PCI DMA buffer bin count reduced from %u "
 693                         "to %u.\n", dma_bs->secondary_bin_count, bin_count);
 694
 695                dma_bs->secondary_bin_count = bin_count;
 696        }
 697
 698        dev_priv->dma_access = 0;
 699        dev_priv->wagp_enable = 0;
 700
 701        dma_bs->agp_mode = 0;
 702
 703        DRM_INFO("Initialized card for PCI DMA.\n");
 704        return 0;
 705}
 706
 707static int mga_do_dma_bootstrap(struct drm_device * dev,
 708                                drm_mga_dma_bootstrap_t * dma_bs)
 709{
 710        const int is_agp = (dma_bs->agp_mode != 0) && drm_device_is_agp(dev);
 711        int err;
 712        drm_mga_private_t *const dev_priv =
 713            (drm_mga_private_t *) dev->dev_private;
 714
 715        dev_priv->used_new_dma_init = 1;
 716
 717        /* The first steps are the same for both PCI and AGP based DMA.  Map
 718         * the cards MMIO registers and map a status page.
 719         */
 720        err = drm_addmap(dev, dev_priv->mmio_base, dev_priv->mmio_size,
 721                         _DRM_REGISTERS, _DRM_READ_ONLY, &dev_priv->mmio);
 722        if (err) {
 723                DRM_ERROR("Unable to map MMIO region: %d\n", err);
 724                return err;
 725        }
 726
 727        err = drm_addmap(dev, 0, SAREA_MAX, _DRM_SHM,
 728                         _DRM_READ_ONLY | _DRM_LOCKED | _DRM_KERNEL,
 729                         &dev_priv->status);
 730        if (err) {
 731                DRM_ERROR("Unable to map status region: %d\n", err);
 732                return err;
 733        }
 734
 735        /* The DMA initialization procedure is slightly different for PCI and
 736         * AGP cards.  AGP cards just allocate a large block of AGP memory and
 737         * carve off portions of it for internal uses.  The remaining memory
 738         * is returned to user-mode to be used for AGP textures.
 739         */
 740        if (is_agp) {
 741                err = mga_do_agp_dma_bootstrap(dev, dma_bs);
 742        }
 743
 744        /* If we attempted to initialize the card for AGP DMA but failed,
 745         * clean-up any mess that may have been created.
 746         */
 747
 748        if (err) {
 749                mga_do_cleanup_dma(dev, MINIMAL_CLEANUP);
 750        }
 751
 752        /* Not only do we want to try and initialized PCI cards for PCI DMA,
 753         * but we also try to initialized AGP cards that could not be
 754         * initialized for AGP DMA.  This covers the case where we have an AGP
 755         * card in a system with an unsupported AGP chipset.  In that case the
 756         * card will be detected as AGP, but we won't be able to allocate any
 757         * AGP memory, etc.
 758         */
 759
 760        if (!is_agp || err) {
 761                err = mga_do_pci_dma_bootstrap(dev, dma_bs);
 762        }
 763
 764        return err;
 765}
 766
 767int mga_dma_bootstrap(struct drm_device *dev, void *data,
 768                      struct drm_file *file_priv)
 769{
 770        drm_mga_dma_bootstrap_t *bootstrap = data;
 771        int err;
 772        static const int modes[] = { 0, 1, 2, 2, 4, 4, 4, 4 };
 773        const drm_mga_private_t *const dev_priv =
 774                (drm_mga_private_t *) dev->dev_private;
 775
 776        err = mga_do_dma_bootstrap(dev, bootstrap);
 777        if (err) {
 778                mga_do_cleanup_dma(dev, FULL_CLEANUP);
 779                return err;
 780        }
 781
 782        if (dev_priv->agp_textures != NULL) {
 783                bootstrap->texture_handle = dev_priv->agp_textures->offset;
 784                bootstrap->texture_size = dev_priv->agp_textures->size;
 785        } else {
 786                bootstrap->texture_handle = 0;
 787                bootstrap->texture_size = 0;
 788        }
 789
 790        bootstrap->agp_mode = modes[bootstrap->agp_mode & 0x07];
 791
 792        return err;
 793}
 794
 795static int mga_do_init_dma(struct drm_device * dev, drm_mga_init_t * init)
 796{
 797        drm_mga_private_t *dev_priv;
 798        int ret;
 799        DRM_DEBUG("\n");
 800
 801        dev_priv = dev->dev_private;
 802
 803        if (init->sgram) {
 804                dev_priv->clear_cmd = MGA_DWGCTL_CLEAR | MGA_ATYPE_BLK;
 805        } else {
 806                dev_priv->clear_cmd = MGA_DWGCTL_CLEAR | MGA_ATYPE_RSTR;
 807        }
 808        dev_priv->maccess = init->maccess;
 809
 810        dev_priv->fb_cpp = init->fb_cpp;
 811        dev_priv->front_offset = init->front_offset;
 812        dev_priv->front_pitch = init->front_pitch;
 813        dev_priv->back_offset = init->back_offset;
 814        dev_priv->back_pitch = init->back_pitch;
 815
 816        dev_priv->depth_cpp = init->depth_cpp;
 817        dev_priv->depth_offset = init->depth_offset;
 818        dev_priv->depth_pitch = init->depth_pitch;
 819
 820        /* FIXME: Need to support AGP textures...
 821         */
 822        dev_priv->texture_offset = init->texture_offset[0];
 823        dev_priv->texture_size = init->texture_size[0];
 824
 825        dev_priv->sarea = drm_getsarea(dev);
 826        if (!dev_priv->sarea) {
 827                DRM_ERROR("failed to find sarea!\n");
 828                return -EINVAL;
 829        }
 830
 831        if (!dev_priv->used_new_dma_init) {
 832
 833                dev_priv->dma_access = MGA_PAGPXFER;
 834                dev_priv->wagp_enable = MGA_WAGP_ENABLE;
 835
 836                dev_priv->status = drm_core_findmap(dev, init->status_offset);
 837                if (!dev_priv->status) {
 838                        DRM_ERROR("failed to find status page!\n");
 839                        return -EINVAL;
 840                }
 841                dev_priv->mmio = drm_core_findmap(dev, init->mmio_offset);
 842                if (!dev_priv->mmio) {
 843                        DRM_ERROR("failed to find mmio region!\n");
 844                        return -EINVAL;
 845                }
 846                dev_priv->warp = drm_core_findmap(dev, init->warp_offset);
 847                if (!dev_priv->warp) {
 848                        DRM_ERROR("failed to find warp microcode region!\n");
 849                        return -EINVAL;
 850                }
 851                dev_priv->primary = drm_core_findmap(dev, init->primary_offset);
 852                if (!dev_priv->primary) {
 853                        DRM_ERROR("failed to find primary dma region!\n");
 854                        return -EINVAL;
 855                }
 856                dev->agp_buffer_token = init->buffers_offset;
 857                dev->agp_buffer_map =
 858                    drm_core_findmap(dev, init->buffers_offset);
 859                if (!dev->agp_buffer_map) {
 860                        DRM_ERROR("failed to find dma buffer region!\n");
 861                        return -EINVAL;
 862                }
 863
 864                drm_core_ioremap(dev_priv->warp, dev);
 865                drm_core_ioremap(dev_priv->primary, dev);
 866                drm_core_ioremap(dev->agp_buffer_map, dev);
 867        }
 868
 869        dev_priv->sarea_priv =
 870            (drm_mga_sarea_t *) ((u8 *) dev_priv->sarea->handle +
 871                                 init->sarea_priv_offset);
 872
 873        if (!dev_priv->warp->handle ||
 874            !dev_priv->primary->handle ||
 875            ((dev_priv->dma_access != 0) &&
 876             ((dev->agp_buffer_map == NULL) ||
 877              (dev->agp_buffer_map->handle == NULL)))) {
 878                DRM_ERROR("failed to ioremap agp regions!\n");
 879                return -ENOMEM;
 880        }
 881
 882        ret = mga_warp_install_microcode(dev_priv);
 883        if (ret < 0) {
 884                DRM_ERROR("failed to install WARP ucode!: %d\n", ret);
 885                return ret;
 886        }
 887
 888        ret = mga_warp_init(dev_priv);
 889        if (ret < 0) {
 890                DRM_ERROR("failed to init WARP engine!: %d\n", ret);
 891                return ret;
 892        }
 893
 894        dev_priv->prim.status = (u32 *) dev_priv->status->handle;
 895
 896        mga_do_wait_for_idle(dev_priv);
 897
 898        /* Init the primary DMA registers.
 899         */
 900        MGA_WRITE(MGA_PRIMADDRESS, dev_priv->primary->offset | MGA_DMA_GENERAL);
 901#if 0
 902        MGA_WRITE(MGA_PRIMPTR, virt_to_bus((void *)dev_priv->prim.status) | MGA_PRIMPTREN0 |    /* Soft trap, SECEND, SETUPEND */
 903                  MGA_PRIMPTREN1);      /* DWGSYNC */
 904#endif
 905
 906        dev_priv->prim.start = (u8 *) dev_priv->primary->handle;
 907        dev_priv->prim.end = ((u8 *) dev_priv->primary->handle
 908                              + dev_priv->primary->size);
 909        dev_priv->prim.size = dev_priv->primary->size;
 910
 911        dev_priv->prim.tail = 0;
 912        dev_priv->prim.space = dev_priv->prim.size;
 913        dev_priv->prim.wrapped = 0;
 914
 915        dev_priv->prim.last_flush = 0;
 916        dev_priv->prim.last_wrap = 0;
 917
 918        dev_priv->prim.high_mark = 256 * DMA_BLOCK_SIZE;
 919
 920        dev_priv->prim.status[0] = dev_priv->primary->offset;
 921        dev_priv->prim.status[1] = 0;
 922
 923        dev_priv->sarea_priv->last_wrap = 0;
 924        dev_priv->sarea_priv->last_frame.head = 0;
 925        dev_priv->sarea_priv->last_frame.wrap = 0;
 926
 927        if (mga_freelist_init(dev, dev_priv) < 0) {
 928                DRM_ERROR("could not initialize freelist\n");
 929                return -ENOMEM;
 930        }
 931
 932        return 0;
 933}
 934
 935static int mga_do_cleanup_dma(struct drm_device *dev, int full_cleanup)
 936{
 937        int err = 0;
 938        DRM_DEBUG("\n");
 939
 940        /* Make sure interrupts are disabled here because the uninstall ioctl
 941         * may not have been called from userspace and after dev_private
 942         * is freed, it's too late.
 943         */
 944        if (dev->irq_enabled)
 945                drm_irq_uninstall(dev);
 946
 947        if (dev->dev_private) {
 948                drm_mga_private_t *dev_priv = dev->dev_private;
 949
 950                if ((dev_priv->warp != NULL)
 951                    && (dev_priv->warp->type != _DRM_CONSISTENT))
 952                        drm_core_ioremapfree(dev_priv->warp, dev);
 953
 954                if ((dev_priv->primary != NULL)
 955                    && (dev_priv->primary->type != _DRM_CONSISTENT))
 956                        drm_core_ioremapfree(dev_priv->primary, dev);
 957
 958                if (dev->agp_buffer_map != NULL)
 959                        drm_core_ioremapfree(dev->agp_buffer_map, dev);
 960
 961                if (dev_priv->used_new_dma_init) {
 962#if __OS_HAS_AGP
 963                        if (dev_priv->agp_handle != 0) {
 964                                struct drm_agp_binding unbind_req;
 965                                struct drm_agp_buffer free_req;
 966
 967                                unbind_req.handle = dev_priv->agp_handle;
 968                                drm_agp_unbind(dev, &unbind_req);
 969
 970                                free_req.handle = dev_priv->agp_handle;
 971                                drm_agp_free(dev, &free_req);
 972
 973                                dev_priv->agp_textures = NULL;
 974                                dev_priv->agp_size = 0;
 975                                dev_priv->agp_handle = 0;
 976                        }
 977
 978                        if ((dev->agp != NULL) && dev->agp->acquired) {
 979                                err = drm_agp_release(dev);
 980                        }
 981#endif
 982                }
 983
 984                dev_priv->warp = NULL;
 985                dev_priv->primary = NULL;
 986                dev_priv->sarea = NULL;
 987                dev_priv->sarea_priv = NULL;
 988                dev->agp_buffer_map = NULL;
 989
 990                if (full_cleanup) {
 991                        dev_priv->mmio = NULL;
 992                        dev_priv->status = NULL;
 993                        dev_priv->used_new_dma_init = 0;
 994                }
 995
 996                memset(&dev_priv->prim, 0, sizeof(dev_priv->prim));
 997                dev_priv->warp_pipe = 0;
 998                memset(dev_priv->warp_pipe_phys, 0,
 999                       sizeof(dev_priv->warp_pipe_phys));
1000
1001                if (dev_priv->head != NULL) {
1002                        mga_freelist_cleanup(dev);
1003                }
1004        }
1005
1006        return err;
1007}
1008
1009int mga_dma_init(struct drm_device *dev, void *data,
1010                 struct drm_file *file_priv)
1011{
1012        drm_mga_init_t *init = data;
1013        int err;
1014
1015        LOCK_TEST_WITH_RETURN(dev, file_priv);
1016
1017        switch (init->func) {
1018        case MGA_INIT_DMA:
1019                err = mga_do_init_dma(dev, init);
1020                if (err) {
1021                        (void)mga_do_cleanup_dma(dev, FULL_CLEANUP);
1022                }
1023                return err;
1024        case MGA_CLEANUP_DMA:
1025                return mga_do_cleanup_dma(dev, FULL_CLEANUP);
1026        }
1027
1028        return -EINVAL;
1029}
1030
1031/* ================================================================
1032 * Primary DMA stream management
1033 */
1034
1035int mga_dma_flush(struct drm_device *dev, void *data,
1036                  struct drm_file *file_priv)
1037{
1038        drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
1039        struct drm_lock *lock = data;
1040
1041        LOCK_TEST_WITH_RETURN(dev, file_priv);
1042
1043        DRM_DEBUG("%s%s%s\n",
1044                  (lock->flags & _DRM_LOCK_FLUSH) ? "flush, " : "",
1045                  (lock->flags & _DRM_LOCK_FLUSH_ALL) ? "flush all, " : "",
1046                  (lock->flags & _DRM_LOCK_QUIESCENT) ? "idle, " : "");
1047
1048        WRAP_WAIT_WITH_RETURN(dev_priv);
1049
1050        if (lock->flags & (_DRM_LOCK_FLUSH | _DRM_LOCK_FLUSH_ALL)) {
1051                mga_do_dma_flush(dev_priv);
1052        }
1053
1054        if (lock->flags & _DRM_LOCK_QUIESCENT) {
1055#if MGA_DMA_DEBUG
1056                int ret = mga_do_wait_for_idle(dev_priv);
1057                if (ret < 0)
1058                        DRM_INFO("-EBUSY\n");
1059                return ret;
1060#else
1061                return mga_do_wait_for_idle(dev_priv);
1062#endif
1063        } else {
1064                return 0;
1065        }
1066}
1067
1068int mga_dma_reset(struct drm_device *dev, void *data,
1069                  struct drm_file *file_priv)
1070{
1071        drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
1072
1073        LOCK_TEST_WITH_RETURN(dev, file_priv);
1074
1075        return mga_do_dma_reset(dev_priv);
1076}
1077
1078/* ================================================================
1079 * DMA buffer management
1080 */
1081
1082static int mga_dma_get_buffers(struct drm_device * dev,
1083                               struct drm_file *file_priv, struct drm_dma * d)
1084{
1085        struct drm_buf *buf;
1086        int i;
1087
1088        for (i = d->granted_count; i < d->request_count; i++) {
1089                buf = mga_freelist_get(dev);
1090                if (!buf)
1091                        return -EAGAIN;
1092
1093                buf->file_priv = file_priv;
1094
1095                if (DRM_COPY_TO_USER(&d->request_indices[i],
1096                                     &buf->idx, sizeof(buf->idx)))
1097                        return -EFAULT;
1098                if (DRM_COPY_TO_USER(&d->request_sizes[i],
1099                                     &buf->total, sizeof(buf->total)))
1100                        return -EFAULT;
1101
1102                d->granted_count++;
1103        }
1104        return 0;
1105}
1106
1107int mga_dma_buffers(struct drm_device *dev, void *data,
1108                    struct drm_file *file_priv)
1109{
1110        struct drm_device_dma *dma = dev->dma;
1111        drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
1112        struct drm_dma *d = data;
1113        int ret = 0;
1114
1115        LOCK_TEST_WITH_RETURN(dev, file_priv);
1116
1117        /* Please don't send us buffers.
1118         */
1119        if (d->send_count != 0) {
1120                DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n",
1121                          DRM_CURRENTPID, d->send_count);
1122                return -EINVAL;
1123        }
1124
1125        /* We'll send you buffers.
1126         */
1127        if (d->request_count < 0 || d->request_count > dma->buf_count) {
1128                DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
1129                          DRM_CURRENTPID, d->request_count, dma->buf_count);
1130                return -EINVAL;
1131        }
1132
1133        WRAP_TEST_WITH_RETURN(dev_priv);
1134
1135        d->granted_count = 0;
1136
1137        if (d->request_count) {
1138                ret = mga_dma_get_buffers(dev, file_priv, d);
1139        }
1140
1141        return ret;
1142}
1143
1144/**
1145 * Called just before the module is unloaded.
1146 */
1147int mga_driver_unload(struct drm_device * dev)
1148{
1149        kfree(dev->dev_private);
1150        dev->dev_private = NULL;
1151
1152        return 0;
1153}
1154
1155/**
1156 * Called when the last opener of the device is closed.
1157 */
1158void mga_driver_lastclose(struct drm_device * dev)
1159{
1160        mga_do_cleanup_dma(dev, FULL_CLEANUP);
1161}
1162
1163int mga_driver_dma_quiescent(struct drm_device * dev)
1164{
1165        drm_mga_private_t *dev_priv = dev->dev_private;
1166        return mga_do_wait_for_idle(dev_priv);
1167}
1168