linux/drivers/gpu/drm/mga/mga_dma.c
<<
>>
Prefs
   1/* mga_dma.c -- DMA support for mga g200/g400 -*- linux-c -*-
   2 * Created: Mon Dec 13 01:50:01 1999 by jhartmann@precisioninsight.com
   3 *
   4 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
   5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
   6 * All Rights Reserved.
   7 *
   8 * Permission is hereby granted, free of charge, to any person obtaining a
   9 * copy of this software and associated documentation files (the "Software"),
  10 * to deal in the Software without restriction, including without limitation
  11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  12 * and/or sell copies of the Software, and to permit persons to whom the
  13 * Software is furnished to do so, subject to the following conditions:
  14 *
  15 * The above copyright notice and this permission notice (including the next
  16 * paragraph) shall be included in all copies or substantial portions of the
  17 * Software.
  18 *
  19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  22 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
  23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  25 * DEALINGS IN THE SOFTWARE.
  26 */
  27
  28/*
  29 * \file mga_dma.c
  30 * DMA support for MGA G200 / G400.
  31 *
  32 * \author Rickard E. (Rik) Faith <faith@valinux.com>
  33 * \author Jeff Hartmann <jhartmann@valinux.com>
  34 * \author Keith Whitwell <keith@tungstengraphics.com>
  35 * \author Gareth Hughes <gareth@valinux.com>
  36 */
  37
  38#include <linux/delay.h>
  39
  40#include "mga_drv.h"
  41
  42#define MGA_DEFAULT_USEC_TIMEOUT        10000
  43#define MGA_FREELIST_DEBUG              0
  44
  45#define MINIMAL_CLEANUP 0
  46#define FULL_CLEANUP 1
  47static int mga_do_cleanup_dma(struct drm_device *dev, int full_cleanup);
  48
  49/* ================================================================
  50 * Engine control
  51 */
  52
  53int mga_do_wait_for_idle(drm_mga_private_t *dev_priv)
  54{
  55        u32 status = 0;
  56        int i;
  57        DRM_DEBUG("\n");
  58
  59        for (i = 0; i < dev_priv->usec_timeout; i++) {
  60                status = MGA_READ(MGA_STATUS) & MGA_ENGINE_IDLE_MASK;
  61                if (status == MGA_ENDPRDMASTS) {
  62                        MGA_WRITE8(MGA_CRTC_INDEX, 0);
  63                        return 0;
  64                }
  65                udelay(1);
  66        }
  67
  68#if MGA_DMA_DEBUG
  69        DRM_ERROR("failed!\n");
  70        DRM_INFO("   status=0x%08x\n", status);
  71#endif
  72        return -EBUSY;
  73}
  74
  75static int mga_do_dma_reset(drm_mga_private_t *dev_priv)
  76{
  77        drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
  78        drm_mga_primary_buffer_t *primary = &dev_priv->prim;
  79
  80        DRM_DEBUG("\n");
  81
  82        /* The primary DMA stream should look like new right about now.
  83         */
  84        primary->tail = 0;
  85        primary->space = primary->size;
  86        primary->last_flush = 0;
  87
  88        sarea_priv->last_wrap = 0;
  89
  90        /* FIXME: Reset counters, buffer ages etc...
  91         */
  92
  93        /* FIXME: What else do we need to reinitialize?  WARP stuff?
  94         */
  95
  96        return 0;
  97}
  98
  99/* ================================================================
 100 * Primary DMA stream
 101 */
 102
 103void mga_do_dma_flush(drm_mga_private_t *dev_priv)
 104{
 105        drm_mga_primary_buffer_t *primary = &dev_priv->prim;
 106        u32 head, tail;
 107        u32 status = 0;
 108        int i;
 109        DMA_LOCALS;
 110        DRM_DEBUG("\n");
 111
 112        /* We need to wait so that we can do an safe flush */
 113        for (i = 0; i < dev_priv->usec_timeout; i++) {
 114                status = MGA_READ(MGA_STATUS) & MGA_ENGINE_IDLE_MASK;
 115                if (status == MGA_ENDPRDMASTS)
 116                        break;
 117                udelay(1);
 118        }
 119
 120        if (primary->tail == primary->last_flush) {
 121                DRM_DEBUG("   bailing out...\n");
 122                return;
 123        }
 124
 125        tail = primary->tail + dev_priv->primary->offset;
 126
 127        /* We need to pad the stream between flushes, as the card
 128         * actually (partially?) reads the first of these commands.
 129         * See page 4-16 in the G400 manual, middle of the page or so.
 130         */
 131        BEGIN_DMA(1);
 132
 133        DMA_BLOCK(MGA_DMAPAD, 0x00000000,
 134                  MGA_DMAPAD, 0x00000000,
 135                  MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000);
 136
 137        ADVANCE_DMA();
 138
 139        primary->last_flush = primary->tail;
 140
 141        head = MGA_READ(MGA_PRIMADDRESS);
 142
 143        if (head <= tail)
 144                primary->space = primary->size - primary->tail;
 145        else
 146                primary->space = head - tail;
 147
 148        DRM_DEBUG("   head = 0x%06lx\n", (unsigned long)(head - dev_priv->primary->offset));
 149        DRM_DEBUG("   tail = 0x%06lx\n", (unsigned long)(tail - dev_priv->primary->offset));
 150        DRM_DEBUG("  space = 0x%06x\n", primary->space);
 151
 152        mga_flush_write_combine();
 153        MGA_WRITE(MGA_PRIMEND, tail | dev_priv->dma_access);
 154
 155        DRM_DEBUG("done.\n");
 156}
 157
 158void mga_do_dma_wrap_start(drm_mga_private_t *dev_priv)
 159{
 160        drm_mga_primary_buffer_t *primary = &dev_priv->prim;
 161        u32 head, tail;
 162        DMA_LOCALS;
 163        DRM_DEBUG("\n");
 164
 165        BEGIN_DMA_WRAP();
 166
 167        DMA_BLOCK(MGA_DMAPAD, 0x00000000,
 168                  MGA_DMAPAD, 0x00000000,
 169                  MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000);
 170
 171        ADVANCE_DMA();
 172
 173        tail = primary->tail + dev_priv->primary->offset;
 174
 175        primary->tail = 0;
 176        primary->last_flush = 0;
 177        primary->last_wrap++;
 178
 179        head = MGA_READ(MGA_PRIMADDRESS);
 180
 181        if (head == dev_priv->primary->offset)
 182                primary->space = primary->size;
 183        else
 184                primary->space = head - dev_priv->primary->offset;
 185
 186        DRM_DEBUG("   head = 0x%06lx\n", (unsigned long)(head - dev_priv->primary->offset));
 187        DRM_DEBUG("   tail = 0x%06x\n", primary->tail);
 188        DRM_DEBUG("   wrap = %d\n", primary->last_wrap);
 189        DRM_DEBUG("  space = 0x%06x\n", primary->space);
 190
 191        mga_flush_write_combine();
 192        MGA_WRITE(MGA_PRIMEND, tail | dev_priv->dma_access);
 193
 194        set_bit(0, &primary->wrapped);
 195        DRM_DEBUG("done.\n");
 196}
 197
 198void mga_do_dma_wrap_end(drm_mga_private_t *dev_priv)
 199{
 200        drm_mga_primary_buffer_t *primary = &dev_priv->prim;
 201        drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
 202        u32 head = dev_priv->primary->offset;
 203        DRM_DEBUG("\n");
 204
 205        sarea_priv->last_wrap++;
 206        DRM_DEBUG("   wrap = %d\n", sarea_priv->last_wrap);
 207
 208        mga_flush_write_combine();
 209        MGA_WRITE(MGA_PRIMADDRESS, head | MGA_DMA_GENERAL);
 210
 211        clear_bit(0, &primary->wrapped);
 212        DRM_DEBUG("done.\n");
 213}
 214
 215/* ================================================================
 216 * Freelist management
 217 */
 218
 219#define MGA_BUFFER_USED         (~0)
 220#define MGA_BUFFER_FREE         0
 221
 222#if MGA_FREELIST_DEBUG
 223static void mga_freelist_print(struct drm_device *dev)
 224{
 225        drm_mga_private_t *dev_priv = dev->dev_private;
 226        drm_mga_freelist_t *entry;
 227
 228        DRM_INFO("\n");
 229        DRM_INFO("current dispatch: last=0x%x done=0x%x\n",
 230                 dev_priv->sarea_priv->last_dispatch,
 231                 (unsigned int)(MGA_READ(MGA_PRIMADDRESS) -
 232                                dev_priv->primary->offset));
 233        DRM_INFO("current freelist:\n");
 234
 235        for (entry = dev_priv->head->next; entry; entry = entry->next) {
 236                DRM_INFO("   %p   idx=%2d  age=0x%x 0x%06lx\n",
 237                         entry, entry->buf->idx, entry->age.head,
 238                         (unsigned long)(entry->age.head - dev_priv->primary->offset));
 239        }
 240        DRM_INFO("\n");
 241}
 242#endif
 243
 244static int mga_freelist_init(struct drm_device *dev, drm_mga_private_t *dev_priv)
 245{
 246        struct drm_device_dma *dma = dev->dma;
 247        struct drm_buf *buf;
 248        drm_mga_buf_priv_t *buf_priv;
 249        drm_mga_freelist_t *entry;
 250        int i;
 251        DRM_DEBUG("count=%d\n", dma->buf_count);
 252
 253        dev_priv->head = kzalloc(sizeof(drm_mga_freelist_t), GFP_KERNEL);
 254        if (dev_priv->head == NULL)
 255                return -ENOMEM;
 256
 257        SET_AGE(&dev_priv->head->age, MGA_BUFFER_USED, 0);
 258
 259        for (i = 0; i < dma->buf_count; i++) {
 260                buf = dma->buflist[i];
 261                buf_priv = buf->dev_private;
 262
 263                entry = kzalloc(sizeof(drm_mga_freelist_t), GFP_KERNEL);
 264                if (entry == NULL)
 265                        return -ENOMEM;
 266
 267                entry->next = dev_priv->head->next;
 268                entry->prev = dev_priv->head;
 269                SET_AGE(&entry->age, MGA_BUFFER_FREE, 0);
 270                entry->buf = buf;
 271
 272                if (dev_priv->head->next != NULL)
 273                        dev_priv->head->next->prev = entry;
 274                if (entry->next == NULL)
 275                        dev_priv->tail = entry;
 276
 277                buf_priv->list_entry = entry;
 278                buf_priv->discard = 0;
 279                buf_priv->dispatched = 0;
 280
 281                dev_priv->head->next = entry;
 282        }
 283
 284        return 0;
 285}
 286
 287static void mga_freelist_cleanup(struct drm_device *dev)
 288{
 289        drm_mga_private_t *dev_priv = dev->dev_private;
 290        drm_mga_freelist_t *entry;
 291        drm_mga_freelist_t *next;
 292        DRM_DEBUG("\n");
 293
 294        entry = dev_priv->head;
 295        while (entry) {
 296                next = entry->next;
 297                kfree(entry);
 298                entry = next;
 299        }
 300
 301        dev_priv->head = dev_priv->tail = NULL;
 302}
 303
 304#if 0
 305/* FIXME: Still needed?
 306 */
 307static void mga_freelist_reset(struct drm_device *dev)
 308{
 309        struct drm_device_dma *dma = dev->dma;
 310        struct drm_buf *buf;
 311        drm_mga_buf_priv_t *buf_priv;
 312        int i;
 313
 314        for (i = 0; i < dma->buf_count; i++) {
 315                buf = dma->buflist[i];
 316                buf_priv = buf->dev_private;
 317                SET_AGE(&buf_priv->list_entry->age, MGA_BUFFER_FREE, 0);
 318        }
 319}
 320#endif
 321
 322static struct drm_buf *mga_freelist_get(struct drm_device * dev)
 323{
 324        drm_mga_private_t *dev_priv = dev->dev_private;
 325        drm_mga_freelist_t *next;
 326        drm_mga_freelist_t *prev;
 327        drm_mga_freelist_t *tail = dev_priv->tail;
 328        u32 head, wrap;
 329        DRM_DEBUG("\n");
 330
 331        head = MGA_READ(MGA_PRIMADDRESS);
 332        wrap = dev_priv->sarea_priv->last_wrap;
 333
 334        DRM_DEBUG("   tail=0x%06lx %d\n",
 335                  tail->age.head ?
 336                  (unsigned long)(tail->age.head - dev_priv->primary->offset) : 0,
 337                  tail->age.wrap);
 338        DRM_DEBUG("   head=0x%06lx %d\n",
 339                  (unsigned long)(head - dev_priv->primary->offset), wrap);
 340
 341        if (TEST_AGE(&tail->age, head, wrap)) {
 342                prev = dev_priv->tail->prev;
 343                next = dev_priv->tail;
 344                prev->next = NULL;
 345                next->prev = next->next = NULL;
 346                dev_priv->tail = prev;
 347                SET_AGE(&next->age, MGA_BUFFER_USED, 0);
 348                return next->buf;
 349        }
 350
 351        DRM_DEBUG("returning NULL!\n");
 352        return NULL;
 353}
 354
 355int mga_freelist_put(struct drm_device *dev, struct drm_buf *buf)
 356{
 357        drm_mga_private_t *dev_priv = dev->dev_private;
 358        drm_mga_buf_priv_t *buf_priv = buf->dev_private;
 359        drm_mga_freelist_t *head, *entry, *prev;
 360
 361        DRM_DEBUG("age=0x%06lx wrap=%d\n",
 362                  (unsigned long)(buf_priv->list_entry->age.head -
 363                                  dev_priv->primary->offset),
 364                  buf_priv->list_entry->age.wrap);
 365
 366        entry = buf_priv->list_entry;
 367        head = dev_priv->head;
 368
 369        if (buf_priv->list_entry->age.head == MGA_BUFFER_USED) {
 370                SET_AGE(&entry->age, MGA_BUFFER_FREE, 0);
 371                prev = dev_priv->tail;
 372                prev->next = entry;
 373                entry->prev = prev;
 374                entry->next = NULL;
 375        } else {
 376                prev = head->next;
 377                head->next = entry;
 378                prev->prev = entry;
 379                entry->prev = head;
 380                entry->next = prev;
 381        }
 382
 383        return 0;
 384}
 385
 386/* ================================================================
 387 * DMA initialization, cleanup
 388 */
 389
 390int mga_driver_load(struct drm_device *dev, unsigned long flags)
 391{
 392        struct pci_dev *pdev = to_pci_dev(dev->dev);
 393        drm_mga_private_t *dev_priv;
 394        int ret;
 395
 396        /* There are PCI versions of the G450.  These cards have the
 397         * same PCI ID as the AGP G450, but have an additional PCI-to-PCI
 398         * bridge chip.  We detect these cards, which are not currently
 399         * supported by this driver, by looking at the device ID of the
 400         * bus the "card" is on.  If vendor is 0x3388 (Hint Corp) and the
 401         * device is 0x0021 (HB6 Universal PCI-PCI bridge), we reject the
 402         * device.
 403         */
 404        if ((pdev->device == 0x0525) && pdev->bus->self
 405            && (pdev->bus->self->vendor == 0x3388)
 406            && (pdev->bus->self->device == 0x0021)
 407            && dev->agp) {
 408                /* FIXME: This should be quirked in the pci core, but oh well
 409                 * the hw probably stopped existing. */
 410                arch_phys_wc_del(dev->agp->agp_mtrr);
 411                kfree(dev->agp);
 412                dev->agp = NULL;
 413        }
 414        dev_priv = kzalloc(sizeof(drm_mga_private_t), GFP_KERNEL);
 415        if (!dev_priv)
 416                return -ENOMEM;
 417
 418        dev->dev_private = (void *)dev_priv;
 419
 420        dev_priv->usec_timeout = MGA_DEFAULT_USEC_TIMEOUT;
 421        dev_priv->chipset = flags;
 422
 423        pci_set_master(pdev);
 424
 425        dev_priv->mmio_base = pci_resource_start(pdev, 1);
 426        dev_priv->mmio_size = pci_resource_len(pdev, 1);
 427
 428        ret = drm_vblank_init(dev, 1);
 429
 430        if (ret) {
 431                (void) mga_driver_unload(dev);
 432                return ret;
 433        }
 434
 435        return 0;
 436}
 437
 438#if IS_ENABLED(CONFIG_AGP)
 439/*
 440 * Bootstrap the driver for AGP DMA.
 441 *
 442 * \todo
 443 * Investigate whether there is any benefit to storing the WARP microcode in
 444 * AGP memory.  If not, the microcode may as well always be put in PCI
 445 * memory.
 446 *
 447 * \todo
 448 * This routine needs to set dma_bs->agp_mode to the mode actually configured
 449 * in the hardware.  Looking just at the Linux AGP driver code, I don't see
 450 * an easy way to determine this.
 451 *
 452 * \sa mga_do_dma_bootstrap, mga_do_pci_dma_bootstrap
 453 */
 454static int mga_do_agp_dma_bootstrap(struct drm_device *dev,
 455                                    drm_mga_dma_bootstrap_t *dma_bs)
 456{
 457        drm_mga_private_t *const dev_priv =
 458            (drm_mga_private_t *) dev->dev_private;
 459        unsigned int warp_size = MGA_WARP_UCODE_SIZE;
 460        int err;
 461        unsigned offset;
 462        const unsigned secondary_size = dma_bs->secondary_bin_count
 463            * dma_bs->secondary_bin_size;
 464        const unsigned agp_size = (dma_bs->agp_size << 20);
 465        struct drm_buf_desc req;
 466        struct drm_agp_mode mode;
 467        struct drm_agp_info info;
 468        struct drm_agp_buffer agp_req;
 469        struct drm_agp_binding bind_req;
 470
 471        /* Acquire AGP. */
 472        err = drm_legacy_agp_acquire(dev);
 473        if (err) {
 474                DRM_ERROR("Unable to acquire AGP: %d\n", err);
 475                return err;
 476        }
 477
 478        err = drm_legacy_agp_info(dev, &info);
 479        if (err) {
 480                DRM_ERROR("Unable to get AGP info: %d\n", err);
 481                return err;
 482        }
 483
 484        mode.mode = (info.mode & ~0x07) | dma_bs->agp_mode;
 485        err = drm_legacy_agp_enable(dev, mode);
 486        if (err) {
 487                DRM_ERROR("Unable to enable AGP (mode = 0x%lx)\n", mode.mode);
 488                return err;
 489        }
 490
 491        /* In addition to the usual AGP mode configuration, the G200 AGP cards
 492         * need to have the AGP mode "manually" set.
 493         */
 494
 495        if (dev_priv->chipset == MGA_CARD_TYPE_G200) {
 496                if (mode.mode & 0x02)
 497                        MGA_WRITE(MGA_AGP_PLL, MGA_AGP2XPLL_ENABLE);
 498                else
 499                        MGA_WRITE(MGA_AGP_PLL, MGA_AGP2XPLL_DISABLE);
 500        }
 501
 502        /* Allocate and bind AGP memory. */
 503        agp_req.size = agp_size;
 504        agp_req.type = 0;
 505        err = drm_legacy_agp_alloc(dev, &agp_req);
 506        if (err) {
 507                dev_priv->agp_size = 0;
 508                DRM_ERROR("Unable to allocate %uMB AGP memory\n",
 509                          dma_bs->agp_size);
 510                return err;
 511        }
 512
 513        dev_priv->agp_size = agp_size;
 514        dev_priv->agp_handle = agp_req.handle;
 515
 516        bind_req.handle = agp_req.handle;
 517        bind_req.offset = 0;
 518        err = drm_legacy_agp_bind(dev, &bind_req);
 519        if (err) {
 520                DRM_ERROR("Unable to bind AGP memory: %d\n", err);
 521                return err;
 522        }
 523
 524        /* Make drm_legacy_addbufs happy by not trying to create a mapping for
 525         * less than a page.
 526         */
 527        if (warp_size < PAGE_SIZE)
 528                warp_size = PAGE_SIZE;
 529
 530        offset = 0;
 531        err = drm_legacy_addmap(dev, offset, warp_size,
 532                                _DRM_AGP, _DRM_READ_ONLY, &dev_priv->warp);
 533        if (err) {
 534                DRM_ERROR("Unable to map WARP microcode: %d\n", err);
 535                return err;
 536        }
 537
 538        offset += warp_size;
 539        err = drm_legacy_addmap(dev, offset, dma_bs->primary_size,
 540                                _DRM_AGP, _DRM_READ_ONLY, &dev_priv->primary);
 541        if (err) {
 542                DRM_ERROR("Unable to map primary DMA region: %d\n", err);
 543                return err;
 544        }
 545
 546        offset += dma_bs->primary_size;
 547        err = drm_legacy_addmap(dev, offset, secondary_size,
 548                                _DRM_AGP, 0, &dev->agp_buffer_map);
 549        if (err) {
 550                DRM_ERROR("Unable to map secondary DMA region: %d\n", err);
 551                return err;
 552        }
 553
 554        (void)memset(&req, 0, sizeof(req));
 555        req.count = dma_bs->secondary_bin_count;
 556        req.size = dma_bs->secondary_bin_size;
 557        req.flags = _DRM_AGP_BUFFER;
 558        req.agp_start = offset;
 559
 560        err = drm_legacy_addbufs_agp(dev, &req);
 561        if (err) {
 562                DRM_ERROR("Unable to add secondary DMA buffers: %d\n", err);
 563                return err;
 564        }
 565
 566        {
 567                struct drm_map_list *_entry;
 568                unsigned long agp_token = 0;
 569
 570                list_for_each_entry(_entry, &dev->maplist, head) {
 571                        if (_entry->map == dev->agp_buffer_map)
 572                                agp_token = _entry->user_token;
 573                }
 574                if (!agp_token)
 575                        return -EFAULT;
 576
 577                dev->agp_buffer_token = agp_token;
 578        }
 579
 580        offset += secondary_size;
 581        err = drm_legacy_addmap(dev, offset, agp_size - offset,
 582                                _DRM_AGP, 0, &dev_priv->agp_textures);
 583        if (err) {
 584                DRM_ERROR("Unable to map AGP texture region %d\n", err);
 585                return err;
 586        }
 587
 588        drm_legacy_ioremap(dev_priv->warp, dev);
 589        drm_legacy_ioremap(dev_priv->primary, dev);
 590        drm_legacy_ioremap(dev->agp_buffer_map, dev);
 591
 592        if (!dev_priv->warp->handle ||
 593            !dev_priv->primary->handle || !dev->agp_buffer_map->handle) {
 594                DRM_ERROR("failed to ioremap agp regions! (%p, %p, %p)\n",
 595                          dev_priv->warp->handle, dev_priv->primary->handle,
 596                          dev->agp_buffer_map->handle);
 597                return -ENOMEM;
 598        }
 599
 600        dev_priv->dma_access = MGA_PAGPXFER;
 601        dev_priv->wagp_enable = MGA_WAGP_ENABLE;
 602
 603        DRM_INFO("Initialized card for AGP DMA.\n");
 604        return 0;
 605}
 606#else
 607static int mga_do_agp_dma_bootstrap(struct drm_device *dev,
 608                                    drm_mga_dma_bootstrap_t *dma_bs)
 609{
 610        return -EINVAL;
 611}
 612#endif
 613
 614/*
 615 * Bootstrap the driver for PCI DMA.
 616 *
 617 * \todo
 618 * The algorithm for decreasing the size of the primary DMA buffer could be
 619 * better.  The size should be rounded up to the nearest page size, then
 620 * decrease the request size by a single page each pass through the loop.
 621 *
 622 * \todo
 623 * Determine whether the maximum address passed to drm_pci_alloc is correct.
 624 * The same goes for drm_legacy_addbufs_pci.
 625 *
 626 * \sa mga_do_dma_bootstrap, mga_do_agp_dma_bootstrap
 627 */
 628static int mga_do_pci_dma_bootstrap(struct drm_device *dev,
 629                                    drm_mga_dma_bootstrap_t *dma_bs)
 630{
 631        drm_mga_private_t *const dev_priv =
 632            (drm_mga_private_t *) dev->dev_private;
 633        unsigned int warp_size = MGA_WARP_UCODE_SIZE;
 634        unsigned int primary_size;
 635        unsigned int bin_count;
 636        int err;
 637        struct drm_buf_desc req;
 638
 639        if (dev->dma == NULL) {
 640                DRM_ERROR("dev->dma is NULL\n");
 641                return -EFAULT;
 642        }
 643
 644        /* Make drm_legacy_addbufs happy by not trying to create a mapping for
 645         * less than a page.
 646         */
 647        if (warp_size < PAGE_SIZE)
 648                warp_size = PAGE_SIZE;
 649
 650        /* The proper alignment is 0x100 for this mapping */
 651        err = drm_legacy_addmap(dev, 0, warp_size, _DRM_CONSISTENT,
 652                                _DRM_READ_ONLY, &dev_priv->warp);
 653        if (err != 0) {
 654                DRM_ERROR("Unable to create mapping for WARP microcode: %d\n",
 655                          err);
 656                return err;
 657        }
 658
 659        /* Other than the bottom two bits being used to encode other
 660         * information, there don't appear to be any restrictions on the
 661         * alignment of the primary or secondary DMA buffers.
 662         */
 663
 664        for (primary_size = dma_bs->primary_size; primary_size != 0;
 665             primary_size >>= 1) {
 666                /* The proper alignment for this mapping is 0x04 */
 667                err = drm_legacy_addmap(dev, 0, primary_size, _DRM_CONSISTENT,
 668                                        _DRM_READ_ONLY, &dev_priv->primary);
 669                if (!err)
 670                        break;
 671        }
 672
 673        if (err != 0) {
 674                DRM_ERROR("Unable to allocate primary DMA region: %d\n", err);
 675                return -ENOMEM;
 676        }
 677
 678        if (dev_priv->primary->size != dma_bs->primary_size) {
 679                DRM_INFO("Primary DMA buffer size reduced from %u to %u.\n",
 680                         dma_bs->primary_size,
 681                         (unsigned)dev_priv->primary->size);
 682                dma_bs->primary_size = dev_priv->primary->size;
 683        }
 684
 685        for (bin_count = dma_bs->secondary_bin_count; bin_count > 0;
 686             bin_count--) {
 687                (void)memset(&req, 0, sizeof(req));
 688                req.count = bin_count;
 689                req.size = dma_bs->secondary_bin_size;
 690
 691                err = drm_legacy_addbufs_pci(dev, &req);
 692                if (!err)
 693                        break;
 694        }
 695
 696        if (bin_count == 0) {
 697                DRM_ERROR("Unable to add secondary DMA buffers: %d\n", err);
 698                return err;
 699        }
 700
 701        if (bin_count != dma_bs->secondary_bin_count) {
 702                DRM_INFO("Secondary PCI DMA buffer bin count reduced from %u "
 703                         "to %u.\n", dma_bs->secondary_bin_count, bin_count);
 704
 705                dma_bs->secondary_bin_count = bin_count;
 706        }
 707
 708        dev_priv->dma_access = 0;
 709        dev_priv->wagp_enable = 0;
 710
 711        dma_bs->agp_mode = 0;
 712
 713        DRM_INFO("Initialized card for PCI DMA.\n");
 714        return 0;
 715}
 716
 717static int mga_do_dma_bootstrap(struct drm_device *dev,
 718                                drm_mga_dma_bootstrap_t *dma_bs)
 719{
 720        const int is_agp = (dma_bs->agp_mode != 0) && dev->agp;
 721        int err;
 722        drm_mga_private_t *const dev_priv =
 723            (drm_mga_private_t *) dev->dev_private;
 724
 725        dev_priv->used_new_dma_init = 1;
 726
 727        /* The first steps are the same for both PCI and AGP based DMA.  Map
 728         * the cards MMIO registers and map a status page.
 729         */
 730        err = drm_legacy_addmap(dev, dev_priv->mmio_base, dev_priv->mmio_size,
 731                                _DRM_REGISTERS, _DRM_READ_ONLY,
 732                                &dev_priv->mmio);
 733        if (err) {
 734                DRM_ERROR("Unable to map MMIO region: %d\n", err);
 735                return err;
 736        }
 737
 738        err = drm_legacy_addmap(dev, 0, SAREA_MAX, _DRM_SHM,
 739                                _DRM_READ_ONLY | _DRM_LOCKED | _DRM_KERNEL,
 740                         &dev_priv->status);
 741        if (err) {
 742                DRM_ERROR("Unable to map status region: %d\n", err);
 743                return err;
 744        }
 745
 746        /* The DMA initialization procedure is slightly different for PCI and
 747         * AGP cards.  AGP cards just allocate a large block of AGP memory and
 748         * carve off portions of it for internal uses.  The remaining memory
 749         * is returned to user-mode to be used for AGP textures.
 750         */
 751        if (is_agp)
 752                err = mga_do_agp_dma_bootstrap(dev, dma_bs);
 753
 754        /* If we attempted to initialize the card for AGP DMA but failed,
 755         * clean-up any mess that may have been created.
 756         */
 757
 758        if (err)
 759                mga_do_cleanup_dma(dev, MINIMAL_CLEANUP);
 760
 761        /* Not only do we want to try and initialized PCI cards for PCI DMA,
 762         * but we also try to initialized AGP cards that could not be
 763         * initialized for AGP DMA.  This covers the case where we have an AGP
 764         * card in a system with an unsupported AGP chipset.  In that case the
 765         * card will be detected as AGP, but we won't be able to allocate any
 766         * AGP memory, etc.
 767         */
 768
 769        if (!is_agp || err)
 770                err = mga_do_pci_dma_bootstrap(dev, dma_bs);
 771
 772        return err;
 773}
 774
 775int mga_dma_bootstrap(struct drm_device *dev, void *data,
 776                      struct drm_file *file_priv)
 777{
 778        drm_mga_dma_bootstrap_t *bootstrap = data;
 779        int err;
 780        static const int modes[] = { 0, 1, 2, 2, 4, 4, 4, 4 };
 781        const drm_mga_private_t *const dev_priv =
 782                (drm_mga_private_t *) dev->dev_private;
 783
 784        err = mga_do_dma_bootstrap(dev, bootstrap);
 785        if (err) {
 786                mga_do_cleanup_dma(dev, FULL_CLEANUP);
 787                return err;
 788        }
 789
 790        if (dev_priv->agp_textures != NULL) {
 791                bootstrap->texture_handle = dev_priv->agp_textures->offset;
 792                bootstrap->texture_size = dev_priv->agp_textures->size;
 793        } else {
 794                bootstrap->texture_handle = 0;
 795                bootstrap->texture_size = 0;
 796        }
 797
 798        bootstrap->agp_mode = modes[bootstrap->agp_mode & 0x07];
 799
 800        return err;
 801}
 802
 803static int mga_do_init_dma(struct drm_device *dev, drm_mga_init_t *init)
 804{
 805        drm_mga_private_t *dev_priv;
 806        int ret;
 807        DRM_DEBUG("\n");
 808
 809        dev_priv = dev->dev_private;
 810
 811        if (init->sgram)
 812                dev_priv->clear_cmd = MGA_DWGCTL_CLEAR | MGA_ATYPE_BLK;
 813        else
 814                dev_priv->clear_cmd = MGA_DWGCTL_CLEAR | MGA_ATYPE_RSTR;
 815        dev_priv->maccess = init->maccess;
 816
 817        dev_priv->fb_cpp = init->fb_cpp;
 818        dev_priv->front_offset = init->front_offset;
 819        dev_priv->front_pitch = init->front_pitch;
 820        dev_priv->back_offset = init->back_offset;
 821        dev_priv->back_pitch = init->back_pitch;
 822
 823        dev_priv->depth_cpp = init->depth_cpp;
 824        dev_priv->depth_offset = init->depth_offset;
 825        dev_priv->depth_pitch = init->depth_pitch;
 826
 827        /* FIXME: Need to support AGP textures...
 828         */
 829        dev_priv->texture_offset = init->texture_offset[0];
 830        dev_priv->texture_size = init->texture_size[0];
 831
 832        dev_priv->sarea = drm_legacy_getsarea(dev);
 833        if (!dev_priv->sarea) {
 834                DRM_ERROR("failed to find sarea!\n");
 835                return -EINVAL;
 836        }
 837
 838        if (!dev_priv->used_new_dma_init) {
 839
 840                dev_priv->dma_access = MGA_PAGPXFER;
 841                dev_priv->wagp_enable = MGA_WAGP_ENABLE;
 842
 843                dev_priv->status = drm_legacy_findmap(dev, init->status_offset);
 844                if (!dev_priv->status) {
 845                        DRM_ERROR("failed to find status page!\n");
 846                        return -EINVAL;
 847                }
 848                dev_priv->mmio = drm_legacy_findmap(dev, init->mmio_offset);
 849                if (!dev_priv->mmio) {
 850                        DRM_ERROR("failed to find mmio region!\n");
 851                        return -EINVAL;
 852                }
 853                dev_priv->warp = drm_legacy_findmap(dev, init->warp_offset);
 854                if (!dev_priv->warp) {
 855                        DRM_ERROR("failed to find warp microcode region!\n");
 856                        return -EINVAL;
 857                }
 858                dev_priv->primary = drm_legacy_findmap(dev, init->primary_offset);
 859                if (!dev_priv->primary) {
 860                        DRM_ERROR("failed to find primary dma region!\n");
 861                        return -EINVAL;
 862                }
 863                dev->agp_buffer_token = init->buffers_offset;
 864                dev->agp_buffer_map =
 865                    drm_legacy_findmap(dev, init->buffers_offset);
 866                if (!dev->agp_buffer_map) {
 867                        DRM_ERROR("failed to find dma buffer region!\n");
 868                        return -EINVAL;
 869                }
 870
 871                drm_legacy_ioremap(dev_priv->warp, dev);
 872                drm_legacy_ioremap(dev_priv->primary, dev);
 873                drm_legacy_ioremap(dev->agp_buffer_map, dev);
 874        }
 875
 876        dev_priv->sarea_priv =
 877            (drm_mga_sarea_t *) ((u8 *) dev_priv->sarea->handle +
 878                                 init->sarea_priv_offset);
 879
 880        if (!dev_priv->warp->handle ||
 881            !dev_priv->primary->handle ||
 882            ((dev_priv->dma_access != 0) &&
 883             ((dev->agp_buffer_map == NULL) ||
 884              (dev->agp_buffer_map->handle == NULL)))) {
 885                DRM_ERROR("failed to ioremap agp regions!\n");
 886                return -ENOMEM;
 887        }
 888
 889        ret = mga_warp_install_microcode(dev_priv);
 890        if (ret < 0) {
 891                DRM_ERROR("failed to install WARP ucode!: %d\n", ret);
 892                return ret;
 893        }
 894
 895        ret = mga_warp_init(dev_priv);
 896        if (ret < 0) {
 897                DRM_ERROR("failed to init WARP engine!: %d\n", ret);
 898                return ret;
 899        }
 900
 901        dev_priv->prim.status = (u32 *) dev_priv->status->handle;
 902
 903        mga_do_wait_for_idle(dev_priv);
 904
 905        /* Init the primary DMA registers.
 906         */
 907        MGA_WRITE(MGA_PRIMADDRESS, dev_priv->primary->offset | MGA_DMA_GENERAL);
 908#if 0
 909        MGA_WRITE(MGA_PRIMPTR, virt_to_bus((void *)dev_priv->prim.status) | MGA_PRIMPTREN0 |    /* Soft trap, SECEND, SETUPEND */
 910                  MGA_PRIMPTREN1);      /* DWGSYNC */
 911#endif
 912
 913        dev_priv->prim.start = (u8 *) dev_priv->primary->handle;
 914        dev_priv->prim.end = ((u8 *) dev_priv->primary->handle
 915                              + dev_priv->primary->size);
 916        dev_priv->prim.size = dev_priv->primary->size;
 917
 918        dev_priv->prim.tail = 0;
 919        dev_priv->prim.space = dev_priv->prim.size;
 920        dev_priv->prim.wrapped = 0;
 921
 922        dev_priv->prim.last_flush = 0;
 923        dev_priv->prim.last_wrap = 0;
 924
 925        dev_priv->prim.high_mark = 256 * DMA_BLOCK_SIZE;
 926
 927        dev_priv->prim.status[0] = dev_priv->primary->offset;
 928        dev_priv->prim.status[1] = 0;
 929
 930        dev_priv->sarea_priv->last_wrap = 0;
 931        dev_priv->sarea_priv->last_frame.head = 0;
 932        dev_priv->sarea_priv->last_frame.wrap = 0;
 933
 934        if (mga_freelist_init(dev, dev_priv) < 0) {
 935                DRM_ERROR("could not initialize freelist\n");
 936                return -ENOMEM;
 937        }
 938
 939        return 0;
 940}
 941
 942static int mga_do_cleanup_dma(struct drm_device *dev, int full_cleanup)
 943{
 944        int err = 0;
 945        DRM_DEBUG("\n");
 946
 947        /* Make sure interrupts are disabled here because the uninstall ioctl
 948         * may not have been called from userspace and after dev_private
 949         * is freed, it's too late.
 950         */
 951        if (dev->irq_enabled)
 952                drm_legacy_irq_uninstall(dev);
 953
 954        if (dev->dev_private) {
 955                drm_mga_private_t *dev_priv = dev->dev_private;
 956
 957                if ((dev_priv->warp != NULL)
 958                    && (dev_priv->warp->type != _DRM_CONSISTENT))
 959                        drm_legacy_ioremapfree(dev_priv->warp, dev);
 960
 961                if ((dev_priv->primary != NULL)
 962                    && (dev_priv->primary->type != _DRM_CONSISTENT))
 963                        drm_legacy_ioremapfree(dev_priv->primary, dev);
 964
 965                if (dev->agp_buffer_map != NULL)
 966                        drm_legacy_ioremapfree(dev->agp_buffer_map, dev);
 967
 968                if (dev_priv->used_new_dma_init) {
 969#if IS_ENABLED(CONFIG_AGP)
 970                        if (dev_priv->agp_handle != 0) {
 971                                struct drm_agp_binding unbind_req;
 972                                struct drm_agp_buffer free_req;
 973
 974                                unbind_req.handle = dev_priv->agp_handle;
 975                                drm_legacy_agp_unbind(dev, &unbind_req);
 976
 977                                free_req.handle = dev_priv->agp_handle;
 978                                drm_legacy_agp_free(dev, &free_req);
 979
 980                                dev_priv->agp_textures = NULL;
 981                                dev_priv->agp_size = 0;
 982                                dev_priv->agp_handle = 0;
 983                        }
 984
 985                        if ((dev->agp != NULL) && dev->agp->acquired)
 986                                err = drm_legacy_agp_release(dev);
 987#endif
 988                }
 989
 990                dev_priv->warp = NULL;
 991                dev_priv->primary = NULL;
 992                dev_priv->sarea = NULL;
 993                dev_priv->sarea_priv = NULL;
 994                dev->agp_buffer_map = NULL;
 995
 996                if (full_cleanup) {
 997                        dev_priv->mmio = NULL;
 998                        dev_priv->status = NULL;
 999                        dev_priv->used_new_dma_init = 0;
1000                }
1001
1002                memset(&dev_priv->prim, 0, sizeof(dev_priv->prim));
1003                dev_priv->warp_pipe = 0;
1004                memset(dev_priv->warp_pipe_phys, 0,
1005                       sizeof(dev_priv->warp_pipe_phys));
1006
1007                if (dev_priv->head != NULL)
1008                        mga_freelist_cleanup(dev);
1009        }
1010
1011        return err;
1012}
1013
1014int mga_dma_init(struct drm_device *dev, void *data,
1015                 struct drm_file *file_priv)
1016{
1017        drm_mga_init_t *init = data;
1018        int err;
1019
1020        LOCK_TEST_WITH_RETURN(dev, file_priv);
1021
1022        switch (init->func) {
1023        case MGA_INIT_DMA:
1024                err = mga_do_init_dma(dev, init);
1025                if (err)
1026                        (void)mga_do_cleanup_dma(dev, FULL_CLEANUP);
1027                return err;
1028        case MGA_CLEANUP_DMA:
1029                return mga_do_cleanup_dma(dev, FULL_CLEANUP);
1030        }
1031
1032        return -EINVAL;
1033}
1034
1035/* ================================================================
1036 * Primary DMA stream management
1037 */
1038
1039int mga_dma_flush(struct drm_device *dev, void *data,
1040                  struct drm_file *file_priv)
1041{
1042        drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
1043        struct drm_lock *lock = data;
1044
1045        LOCK_TEST_WITH_RETURN(dev, file_priv);
1046
1047        DRM_DEBUG("%s%s%s\n",
1048                  (lock->flags & _DRM_LOCK_FLUSH) ? "flush, " : "",
1049                  (lock->flags & _DRM_LOCK_FLUSH_ALL) ? "flush all, " : "",
1050                  (lock->flags & _DRM_LOCK_QUIESCENT) ? "idle, " : "");
1051
1052        WRAP_WAIT_WITH_RETURN(dev_priv);
1053
1054        if (lock->flags & (_DRM_LOCK_FLUSH | _DRM_LOCK_FLUSH_ALL))
1055                mga_do_dma_flush(dev_priv);
1056
1057        if (lock->flags & _DRM_LOCK_QUIESCENT) {
1058#if MGA_DMA_DEBUG
1059                int ret = mga_do_wait_for_idle(dev_priv);
1060                if (ret < 0)
1061                        DRM_INFO("-EBUSY\n");
1062                return ret;
1063#else
1064                return mga_do_wait_for_idle(dev_priv);
1065#endif
1066        } else {
1067                return 0;
1068        }
1069}
1070
1071int mga_dma_reset(struct drm_device *dev, void *data,
1072                  struct drm_file *file_priv)
1073{
1074        drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
1075
1076        LOCK_TEST_WITH_RETURN(dev, file_priv);
1077
1078        return mga_do_dma_reset(dev_priv);
1079}
1080
1081/* ================================================================
1082 * DMA buffer management
1083 */
1084
1085static int mga_dma_get_buffers(struct drm_device *dev,
1086                               struct drm_file *file_priv, struct drm_dma *d)
1087{
1088        struct drm_buf *buf;
1089        int i;
1090
1091        for (i = d->granted_count; i < d->request_count; i++) {
1092                buf = mga_freelist_get(dev);
1093                if (!buf)
1094                        return -EAGAIN;
1095
1096                buf->file_priv = file_priv;
1097
1098                if (copy_to_user(&d->request_indices[i],
1099                                     &buf->idx, sizeof(buf->idx)))
1100                        return -EFAULT;
1101                if (copy_to_user(&d->request_sizes[i],
1102                                     &buf->total, sizeof(buf->total)))
1103                        return -EFAULT;
1104
1105                d->granted_count++;
1106        }
1107        return 0;
1108}
1109
1110int mga_dma_buffers(struct drm_device *dev, void *data,
1111                    struct drm_file *file_priv)
1112{
1113        struct drm_device_dma *dma = dev->dma;
1114        drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
1115        struct drm_dma *d = data;
1116        int ret = 0;
1117
1118        LOCK_TEST_WITH_RETURN(dev, file_priv);
1119
1120        /* Please don't send us buffers.
1121         */
1122        if (d->send_count != 0) {
1123                DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n",
1124                          task_pid_nr(current), d->send_count);
1125                return -EINVAL;
1126        }
1127
1128        /* We'll send you buffers.
1129         */
1130        if (d->request_count < 0 || d->request_count > dma->buf_count) {
1131                DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
1132                          task_pid_nr(current), d->request_count,
1133                          dma->buf_count);
1134                return -EINVAL;
1135        }
1136
1137        WRAP_TEST_WITH_RETURN(dev_priv);
1138
1139        d->granted_count = 0;
1140
1141        if (d->request_count)
1142                ret = mga_dma_get_buffers(dev, file_priv, d);
1143
1144        return ret;
1145}
1146
1147/*
1148 * Called just before the module is unloaded.
1149 */
1150void mga_driver_unload(struct drm_device *dev)
1151{
1152        kfree(dev->dev_private);
1153        dev->dev_private = NULL;
1154}
1155
1156/*
1157 * Called when the last opener of the device is closed.
1158 */
1159void mga_driver_lastclose(struct drm_device *dev)
1160{
1161        mga_do_cleanup_dma(dev, FULL_CLEANUP);
1162}
1163
1164int mga_driver_dma_quiescent(struct drm_device *dev)
1165{
1166        drm_mga_private_t *dev_priv = dev->dev_private;
1167        return mga_do_wait_for_idle(dev_priv);
1168}
1169