linux/drivers/gpu/drm/via/via_dmablit.c
<<
>>
Prefs
   1/* via_dmablit.c -- PCI DMA BitBlt support for the VIA Unichrome/Pro
   2 *
   3 * Copyright (C) 2005 Thomas Hellstrom, All Rights Reserved.
   4 *
   5 * Permission is hereby granted, free of charge, to any person obtaining a
   6 * copy of this software and associated documentation files (the "Software"),
   7 * to deal in the Software without restriction, including without limitation
   8 * the rights to use, copy, modify, merge, publish, distribute, sub license,
   9 * and/or sell copies of the Software, and to permit persons to whom the
  10 * Software is furnished to do so, subject to the following conditions:
  11 *
  12 * The above copyright notice and this permission notice (including the
  13 * next paragraph) shall be included in all copies or substantial portions
  14 * of the Software.
  15 *
  16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  19 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  23 *
  24 * Authors:
  25 *    Thomas Hellstrom.
  26 *    Partially based on code obtained from Digeo Inc.
  27 */
  28
  29
  30/*
  31 * Unmaps the DMA mappings.
  32 * FIXME: Is this a NoOp on x86? Also
  33 * FIXME: What happens if this one is called and a pending blit has previously done
  34 * the same DMA mappings?
  35 */
  36
  37#include "drmP.h"
  38#include "via_drm.h"
  39#include "via_drv.h"
  40#include "via_dmablit.h"
  41
  42#include <linux/pagemap.h>
  43
  44#define VIA_PGDN(x)          (((unsigned long)(x)) & PAGE_MASK)
  45#define VIA_PGOFF(x)        (((unsigned long)(x)) & ~PAGE_MASK)
  46#define VIA_PFN(x)            ((unsigned long)(x) >> PAGE_SHIFT)
  47
  48typedef struct _drm_via_descriptor {
  49        uint32_t mem_addr;
  50        uint32_t dev_addr;
  51        uint32_t size;
  52        uint32_t next;
  53} drm_via_descriptor_t;
  54
  55
  56/*
  57 * Unmap a DMA mapping.
  58 */
  59
  60
  61
  62static void
  63via_unmap_blit_from_device(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
  64{
  65        int num_desc = vsg->num_desc;
  66        unsigned cur_descriptor_page = num_desc / vsg->descriptors_per_page;
  67        unsigned descriptor_this_page = num_desc % vsg->descriptors_per_page;
  68        drm_via_descriptor_t *desc_ptr = vsg->desc_pages[cur_descriptor_page] +
  69                descriptor_this_page;
  70        dma_addr_t next = vsg->chain_start;
  71
  72        while(num_desc--) {
  73                if (descriptor_this_page-- == 0) {
  74                        cur_descriptor_page--;
  75                        descriptor_this_page = vsg->descriptors_per_page - 1;
  76                        desc_ptr = vsg->desc_pages[cur_descriptor_page] +
  77                                descriptor_this_page;
  78                }
  79                dma_unmap_single(&pdev->dev, next, sizeof(*desc_ptr), DMA_TO_DEVICE);
  80                dma_unmap_page(&pdev->dev, desc_ptr->mem_addr, desc_ptr->size, vsg->direction);
  81                next = (dma_addr_t) desc_ptr->next;
  82                desc_ptr--;
  83        }
  84}
  85
  86/*
  87 * If mode = 0, count how many descriptors are needed.
  88 * If mode = 1, Map the DMA pages for the device, put together and map also the descriptors.
  89 * Descriptors are run in reverse order by the hardware because we are not allowed to update the
  90 * 'next' field without syncing calls when the descriptor is already mapped.
  91 */
  92
  93static void
  94via_map_blit_for_device(struct pci_dev *pdev,
  95                   const drm_via_dmablit_t *xfer,
  96                   drm_via_sg_info_t *vsg,
  97                   int mode)
  98{
  99        unsigned cur_descriptor_page = 0;
 100        unsigned num_descriptors_this_page = 0;
 101        unsigned char *mem_addr = xfer->mem_addr;
 102        unsigned char *cur_mem;
 103        unsigned char *first_addr = (unsigned char *)VIA_PGDN(mem_addr);
 104        uint32_t fb_addr = xfer->fb_addr;
 105        uint32_t cur_fb;
 106        unsigned long line_len;
 107        unsigned remaining_len;
 108        int num_desc = 0;
 109        int cur_line;
 110        dma_addr_t next = 0 | VIA_DMA_DPR_EC;
 111        drm_via_descriptor_t *desc_ptr = NULL;
 112
 113        if (mode == 1)
 114                desc_ptr = vsg->desc_pages[cur_descriptor_page];
 115
 116        for (cur_line = 0; cur_line < xfer->num_lines; ++cur_line) {
 117
 118                line_len = xfer->line_length;
 119                cur_fb = fb_addr;
 120                cur_mem = mem_addr;
 121
 122                while (line_len > 0) {
 123
 124                        remaining_len = min(PAGE_SIZE-VIA_PGOFF(cur_mem), line_len);
 125                        line_len -= remaining_len;
 126
 127                        if (mode == 1) {
 128                                desc_ptr->mem_addr =
 129                                        dma_map_page(&pdev->dev,
 130                                                     vsg->pages[VIA_PFN(cur_mem) -
 131                                                                VIA_PFN(first_addr)],
 132                                                     VIA_PGOFF(cur_mem), remaining_len,
 133                                                     vsg->direction);
 134                                desc_ptr->dev_addr = cur_fb;
 135
 136                                desc_ptr->size = remaining_len;
 137                                desc_ptr->next = (uint32_t) next;
 138                                next = dma_map_single(&pdev->dev, desc_ptr, sizeof(*desc_ptr),
 139                                                      DMA_TO_DEVICE);
 140                                desc_ptr++;
 141                                if (++num_descriptors_this_page >= vsg->descriptors_per_page) {
 142                                        num_descriptors_this_page = 0;
 143                                        desc_ptr = vsg->desc_pages[++cur_descriptor_page];
 144                                }
 145                        }
 146
 147                        num_desc++;
 148                        cur_mem += remaining_len;
 149                        cur_fb += remaining_len;
 150                }
 151
 152                mem_addr += xfer->mem_stride;
 153                fb_addr += xfer->fb_stride;
 154        }
 155
 156        if (mode == 1) {
 157                vsg->chain_start = next;
 158                vsg->state = dr_via_device_mapped;
 159        }
 160        vsg->num_desc = num_desc;
 161}
 162
 163/*
 164 * Function that frees up all resources for a blit. It is usable even if the
 165 * blit info has only been partially built as long as the status enum is consistent
 166 * with the actual status of the used resources.
 167 */
 168
 169
 170static void
 171via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
 172{
 173        struct page *page;
 174        int i;
 175
 176        switch(vsg->state) {
 177        case dr_via_device_mapped:
 178                via_unmap_blit_from_device(pdev, vsg);
 179        case dr_via_desc_pages_alloc:
 180                for (i=0; i<vsg->num_desc_pages; ++i) {
 181                        if (vsg->desc_pages[i] != NULL)
 182                          free_page((unsigned long)vsg->desc_pages[i]);
 183                }
 184                kfree(vsg->desc_pages);
 185        case dr_via_pages_locked:
 186                for (i=0; i<vsg->num_pages; ++i) {
 187                        if ( NULL != (page = vsg->pages[i])) {
 188                                if (! PageReserved(page) && (DMA_FROM_DEVICE == vsg->direction))
 189                                        SetPageDirty(page);
 190                                page_cache_release(page);
 191                        }
 192                }
 193        case dr_via_pages_alloc:
 194                vfree(vsg->pages);
 195        default:
 196                vsg->state = dr_via_sg_init;
 197        }
 198        vfree(vsg->bounce_buffer);
 199        vsg->bounce_buffer = NULL;
 200        vsg->free_on_sequence = 0;
 201}
 202
 203/*
 204 * Fire a blit engine.
 205 */
 206
 207static void
 208via_fire_dmablit(struct drm_device *dev, drm_via_sg_info_t *vsg, int engine)
 209{
 210        drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
 211
 212        VIA_WRITE(VIA_PCI_DMA_MAR0 + engine*0x10, 0);
 213        VIA_WRITE(VIA_PCI_DMA_DAR0 + engine*0x10, 0);
 214        VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_DD | VIA_DMA_CSR_TD |
 215                  VIA_DMA_CSR_DE);
 216        VIA_WRITE(VIA_PCI_DMA_MR0  + engine*0x04, VIA_DMA_MR_CM | VIA_DMA_MR_TDIE);
 217        VIA_WRITE(VIA_PCI_DMA_BCR0 + engine*0x10, 0);
 218        VIA_WRITE(VIA_PCI_DMA_DPR0 + engine*0x10, vsg->chain_start);
 219        DRM_WRITEMEMORYBARRIER();
 220        VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_DE | VIA_DMA_CSR_TS);
 221        VIA_READ(VIA_PCI_DMA_CSR0 + engine*0x04);
 222}
 223
 224/*
 225 * Obtain a page pointer array and lock all pages into system memory. A segmentation violation will
 226 * occur here if the calling user does not have access to the submitted address.
 227 */
 228
 229static int
 230via_lock_all_dma_pages(drm_via_sg_info_t *vsg,  drm_via_dmablit_t *xfer)
 231{
 232        int ret;
 233        unsigned long first_pfn = VIA_PFN(xfer->mem_addr);
 234        vsg->num_pages = VIA_PFN(xfer->mem_addr + (xfer->num_lines * xfer->mem_stride -1)) -
 235                first_pfn + 1;
 236
 237        if (NULL == (vsg->pages = vmalloc(sizeof(struct page *) * vsg->num_pages)))
 238                return -ENOMEM;
 239        memset(vsg->pages, 0, sizeof(struct page *) * vsg->num_pages);
 240        down_read(&current->mm->mmap_sem);
 241        ret = get_user_pages(current, current->mm,
 242                             (unsigned long)xfer->mem_addr,
 243                             vsg->num_pages,
 244                             (vsg->direction == DMA_FROM_DEVICE),
 245                             0, vsg->pages, NULL);
 246
 247        up_read(&current->mm->mmap_sem);
 248        if (ret != vsg->num_pages) {
 249                if (ret < 0)
 250                        return ret;
 251                vsg->state = dr_via_pages_locked;
 252                return -EINVAL;
 253        }
 254        vsg->state = dr_via_pages_locked;
 255        DRM_DEBUG("DMA pages locked\n");
 256        return 0;
 257}
 258
 259/*
 260 * Allocate DMA capable memory for the blit descriptor chain, and an array that keeps track of the
 261 * pages we allocate. We don't want to use kmalloc for the descriptor chain because it may be
 262 * quite large for some blits, and pages don't need to be contingous.
 263 */
 264
 265static int
 266via_alloc_desc_pages(drm_via_sg_info_t *vsg)
 267{
 268        int i;
 269
 270        vsg->descriptors_per_page = PAGE_SIZE / sizeof( drm_via_descriptor_t);
 271        vsg->num_desc_pages = (vsg->num_desc + vsg->descriptors_per_page - 1) /
 272                vsg->descriptors_per_page;
 273
 274        if (NULL ==  (vsg->desc_pages = kcalloc(vsg->num_desc_pages, sizeof(void *), GFP_KERNEL)))
 275                return -ENOMEM;
 276
 277        vsg->state = dr_via_desc_pages_alloc;
 278        for (i=0; i<vsg->num_desc_pages; ++i) {
 279                if (NULL == (vsg->desc_pages[i] =
 280                             (drm_via_descriptor_t *) __get_free_page(GFP_KERNEL)))
 281                        return -ENOMEM;
 282        }
 283        DRM_DEBUG("Allocated %d pages for %d descriptors.\n", vsg->num_desc_pages,
 284                  vsg->num_desc);
 285        return 0;
 286}
 287
 288static void
 289via_abort_dmablit(struct drm_device *dev, int engine)
 290{
 291        drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
 292
 293        VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TA);
 294}
 295
 296static void
 297via_dmablit_engine_off(struct drm_device *dev, int engine)
 298{
 299        drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
 300
 301        VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TD | VIA_DMA_CSR_DD);
 302}
 303
 304
 305
 306/*
 307 * The dmablit part of the IRQ handler. Trying to do only reasonably fast things here.
 308 * The rest, like unmapping and freeing memory for done blits is done in a separate workqueue
 309 * task. Basically the task of the interrupt handler is to submit a new blit to the engine, while
 310 * the workqueue task takes care of processing associated with the old blit.
 311 */
 312
 313void
 314via_dmablit_handler(struct drm_device *dev, int engine, int from_irq)
 315{
 316        drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
 317        drm_via_blitq_t *blitq = dev_priv->blit_queues + engine;
 318        int cur;
 319        int done_transfer;
 320        unsigned long irqsave=0;
 321        uint32_t status = 0;
 322
 323        DRM_DEBUG("DMA blit handler called. engine = %d, from_irq = %d, blitq = 0x%lx\n",
 324                  engine, from_irq, (unsigned long) blitq);
 325
 326        if (from_irq) {
 327                spin_lock(&blitq->blit_lock);
 328        } else {
 329                spin_lock_irqsave(&blitq->blit_lock, irqsave);
 330        }
 331
 332        done_transfer = blitq->is_active &&
 333          (( status = VIA_READ(VIA_PCI_DMA_CSR0 + engine*0x04)) & VIA_DMA_CSR_TD);
 334        done_transfer = done_transfer || ( blitq->aborting && !(status & VIA_DMA_CSR_DE));
 335
 336        cur = blitq->cur;
 337        if (done_transfer) {
 338
 339                blitq->blits[cur]->aborted = blitq->aborting;
 340                blitq->done_blit_handle++;
 341                DRM_WAKEUP(blitq->blit_queue + cur);
 342
 343                cur++;
 344                if (cur >= VIA_NUM_BLIT_SLOTS)
 345                        cur = 0;
 346                blitq->cur = cur;
 347
 348                /*
 349                 * Clear transfer done flag.
 350                 */
 351
 352                VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04,  VIA_DMA_CSR_TD);
 353
 354                blitq->is_active = 0;
 355                blitq->aborting = 0;
 356                schedule_work(&blitq->wq);
 357
 358        } else if (blitq->is_active && time_after_eq(jiffies, blitq->end)) {
 359
 360                /*
 361                 * Abort transfer after one second.
 362                 */
 363
 364                via_abort_dmablit(dev, engine);
 365                blitq->aborting = 1;
 366                blitq->end = jiffies + DRM_HZ;
 367        }
 368
 369        if (!blitq->is_active) {
 370                if (blitq->num_outstanding) {
 371                        via_fire_dmablit(dev, blitq->blits[cur], engine);
 372                        blitq->is_active = 1;
 373                        blitq->cur = cur;
 374                        blitq->num_outstanding--;
 375                        blitq->end = jiffies + DRM_HZ;
 376                        if (!timer_pending(&blitq->poll_timer))
 377                                mod_timer(&blitq->poll_timer, jiffies + 1);
 378                } else {
 379                        if (timer_pending(&blitq->poll_timer)) {
 380                                del_timer(&blitq->poll_timer);
 381                        }
 382                        via_dmablit_engine_off(dev, engine);
 383                }
 384        }
 385
 386        if (from_irq) {
 387                spin_unlock(&blitq->blit_lock);
 388        } else {
 389                spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
 390        }
 391}
 392
 393
 394
 395/*
 396 * Check whether this blit is still active, performing necessary locking.
 397 */
 398
 399static int
 400via_dmablit_active(drm_via_blitq_t *blitq, int engine, uint32_t handle, wait_queue_head_t **queue)
 401{
 402        unsigned long irqsave;
 403        uint32_t slot;
 404        int active;
 405
 406        spin_lock_irqsave(&blitq->blit_lock, irqsave);
 407
 408        /*
 409         * Allow for handle wraparounds.
 410         */
 411
 412        active = ((blitq->done_blit_handle - handle) > (1 << 23)) &&
 413                ((blitq->cur_blit_handle - handle) <= (1 << 23));
 414
 415        if (queue && active) {
 416                slot = handle - blitq->done_blit_handle + blitq->cur -1;
 417                if (slot >= VIA_NUM_BLIT_SLOTS) {
 418                        slot -= VIA_NUM_BLIT_SLOTS;
 419                }
 420                *queue = blitq->blit_queue + slot;
 421        }
 422
 423        spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
 424
 425        return active;
 426}
 427
 428/*
 429 * Sync. Wait for at least three seconds for the blit to be performed.
 430 */
 431
 432static int
 433via_dmablit_sync(struct drm_device *dev, uint32_t handle, int engine)
 434{
 435
 436        drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
 437        drm_via_blitq_t *blitq = dev_priv->blit_queues + engine;
 438        wait_queue_head_t *queue;
 439        int ret = 0;
 440
 441        if (via_dmablit_active(blitq, engine, handle, &queue)) {
 442                DRM_WAIT_ON(ret, *queue, 3 * DRM_HZ,
 443                            !via_dmablit_active(blitq, engine, handle, NULL));
 444        }
 445        DRM_DEBUG("DMA blit sync handle 0x%x engine %d returned %d\n",
 446                  handle, engine, ret);
 447
 448        return ret;
 449}
 450
 451
 452/*
 453 * A timer that regularly polls the blit engine in cases where we don't have interrupts:
 454 * a) Broken hardware (typically those that don't have any video capture facility).
 455 * b) Blit abort. The hardware doesn't send an interrupt when a blit is aborted.
 456 * The timer and hardware IRQ's can and do work in parallel. If the hardware has
 457 * irqs, it will shorten the latency somewhat.
 458 */
 459
 460
 461
 462static void
 463via_dmablit_timer(unsigned long data)
 464{
 465        drm_via_blitq_t *blitq = (drm_via_blitq_t *) data;
 466        struct drm_device *dev = blitq->dev;
 467        int engine = (int)
 468                (blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues);
 469
 470        DRM_DEBUG("Polling timer called for engine %d, jiffies %lu\n", engine,
 471                  (unsigned long) jiffies);
 472
 473        via_dmablit_handler(dev, engine, 0);
 474
 475        if (!timer_pending(&blitq->poll_timer)) {
 476                mod_timer(&blitq->poll_timer, jiffies + 1);
 477
 478               /*
 479                * Rerun handler to delete timer if engines are off, and
 480                * to shorten abort latency. This is a little nasty.
 481                */
 482
 483               via_dmablit_handler(dev, engine, 0);
 484
 485        }
 486}
 487
 488
 489
 490
 491/*
 492 * Workqueue task that frees data and mappings associated with a blit.
 493 * Also wakes up waiting processes. Each of these tasks handles one
 494 * blit engine only and may not be called on each interrupt.
 495 */
 496
 497
 498static void
 499via_dmablit_workqueue(struct work_struct *work)
 500{
 501        drm_via_blitq_t *blitq = container_of(work, drm_via_blitq_t, wq);
 502        struct drm_device *dev = blitq->dev;
 503        unsigned long irqsave;
 504        drm_via_sg_info_t *cur_sg;
 505        int cur_released;
 506
 507
 508        DRM_DEBUG("Workqueue task called for blit engine %ld\n",(unsigned long)
 509                  (blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues));
 510
 511        spin_lock_irqsave(&blitq->blit_lock, irqsave);
 512
 513        while(blitq->serviced != blitq->cur) {
 514
 515                cur_released = blitq->serviced++;
 516
 517                DRM_DEBUG("Releasing blit slot %d\n", cur_released);
 518
 519                if (blitq->serviced >= VIA_NUM_BLIT_SLOTS)
 520                        blitq->serviced = 0;
 521
 522                cur_sg = blitq->blits[cur_released];
 523                blitq->num_free++;
 524
 525                spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
 526
 527                DRM_WAKEUP(&blitq->busy_queue);
 528
 529                via_free_sg_info(dev->pdev, cur_sg);
 530                kfree(cur_sg);
 531
 532                spin_lock_irqsave(&blitq->blit_lock, irqsave);
 533        }
 534
 535        spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
 536}
 537
 538
 539/*
 540 * Init all blit engines. Currently we use two, but some hardware have 4.
 541 */
 542
 543
 544void
 545via_init_dmablit(struct drm_device *dev)
 546{
 547        int i,j;
 548        drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
 549        drm_via_blitq_t *blitq;
 550
 551        pci_set_master(dev->pdev);
 552
 553        for (i=0; i< VIA_NUM_BLIT_ENGINES; ++i) {
 554                blitq = dev_priv->blit_queues + i;
 555                blitq->dev = dev;
 556                blitq->cur_blit_handle = 0;
 557                blitq->done_blit_handle = 0;
 558                blitq->head = 0;
 559                blitq->cur = 0;
 560                blitq->serviced = 0;
 561                blitq->num_free = VIA_NUM_BLIT_SLOTS - 1;
 562                blitq->num_outstanding = 0;
 563                blitq->is_active = 0;
 564                blitq->aborting = 0;
 565                spin_lock_init(&blitq->blit_lock);
 566                for (j=0; j<VIA_NUM_BLIT_SLOTS; ++j) {
 567                        DRM_INIT_WAITQUEUE(blitq->blit_queue + j);
 568                }
 569                DRM_INIT_WAITQUEUE(&blitq->busy_queue);
 570                INIT_WORK(&blitq->wq, via_dmablit_workqueue);
 571                setup_timer(&blitq->poll_timer, via_dmablit_timer,
 572                                (unsigned long)blitq);
 573        }
 574}
 575
 576/*
 577 * Build all info and do all mappings required for a blit.
 578 */
 579
 580
 581static int
 582via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
 583{
 584        int draw = xfer->to_fb;
 585        int ret = 0;
 586
 587        vsg->direction = (draw) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
 588        vsg->bounce_buffer = NULL;
 589
 590        vsg->state = dr_via_sg_init;
 591
 592        if (xfer->num_lines <= 0 || xfer->line_length <= 0) {
 593                DRM_ERROR("Zero size bitblt.\n");
 594                return -EINVAL;
 595        }
 596
 597        /*
 598         * Below check is a driver limitation, not a hardware one. We
 599         * don't want to lock unused pages, and don't want to incoporate the
 600         * extra logic of avoiding them. Make sure there are no.
 601         * (Not a big limitation anyway.)
 602         */
 603
 604        if ((xfer->mem_stride - xfer->line_length) > 2*PAGE_SIZE) {
 605                DRM_ERROR("Too large system memory stride. Stride: %d, "
 606                          "Length: %d\n", xfer->mem_stride, xfer->line_length);
 607                return -EINVAL;
 608        }
 609
 610        if ((xfer->mem_stride == xfer->line_length) &&
 611           (xfer->fb_stride == xfer->line_length)) {
 612                xfer->mem_stride *= xfer->num_lines;
 613                xfer->line_length = xfer->mem_stride;
 614                xfer->fb_stride = xfer->mem_stride;
 615                xfer->num_lines = 1;
 616        }
 617
 618        /*
 619         * Don't lock an arbitrary large number of pages, since that causes a
 620         * DOS security hole.
 621         */
 622
 623        if (xfer->num_lines > 2048 || (xfer->num_lines*xfer->mem_stride > (2048*2048*4))) {
 624                DRM_ERROR("Too large PCI DMA bitblt.\n");
 625                return -EINVAL;
 626        }
 627
 628        /*
 629         * we allow a negative fb stride to allow flipping of images in
 630         * transfer.
 631         */
 632
 633        if (xfer->mem_stride < xfer->line_length ||
 634                abs(xfer->fb_stride) < xfer->line_length) {
 635                DRM_ERROR("Invalid frame-buffer / memory stride.\n");
 636                return -EINVAL;
 637        }
 638
 639        /*
 640         * A hardware bug seems to be worked around if system memory addresses start on
 641         * 16 byte boundaries. This seems a bit restrictive however. VIA is contacted
 642         * about this. Meanwhile, impose the following restrictions:
 643         */
 644
 645#ifdef VIA_BUGFREE
 646        if ((((unsigned long)xfer->mem_addr & 3) != ((unsigned long)xfer->fb_addr & 3)) ||
 647            ((xfer->num_lines > 1) && ((xfer->mem_stride & 3) != (xfer->fb_stride & 3)))) {
 648                DRM_ERROR("Invalid DRM bitblt alignment.\n");
 649                return -EINVAL;
 650        }
 651#else
 652        if ((((unsigned long)xfer->mem_addr & 15) ||
 653              ((unsigned long)xfer->fb_addr & 3)) ||
 654           ((xfer->num_lines > 1) &&
 655           ((xfer->mem_stride & 15) || (xfer->fb_stride & 3)))) {
 656                DRM_ERROR("Invalid DRM bitblt alignment.\n");
 657                return -EINVAL;
 658        }
 659#endif
 660
 661        if (0 != (ret = via_lock_all_dma_pages(vsg, xfer))) {
 662                DRM_ERROR("Could not lock DMA pages.\n");
 663                via_free_sg_info(dev->pdev, vsg);
 664                return ret;
 665        }
 666
 667        via_map_blit_for_device(dev->pdev, xfer, vsg, 0);
 668        if (0 != (ret = via_alloc_desc_pages(vsg))) {
 669                DRM_ERROR("Could not allocate DMA descriptor pages.\n");
 670                via_free_sg_info(dev->pdev, vsg);
 671                return ret;
 672        }
 673        via_map_blit_for_device(dev->pdev, xfer, vsg, 1);
 674
 675        return 0;
 676}
 677
 678
 679/*
 680 * Reserve one free slot in the blit queue. Will wait for one second for one
 681 * to become available. Otherwise -EBUSY is returned.
 682 */
 683
 684static int
 685via_dmablit_grab_slot(drm_via_blitq_t *blitq, int engine)
 686{
 687        int ret=0;
 688        unsigned long irqsave;
 689
 690        DRM_DEBUG("Num free is %d\n", blitq->num_free);
 691        spin_lock_irqsave(&blitq->blit_lock, irqsave);
 692        while(blitq->num_free == 0) {
 693                spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
 694
 695                DRM_WAIT_ON(ret, blitq->busy_queue, DRM_HZ, blitq->num_free > 0);
 696                if (ret) {
 697                        return (-EINTR == ret) ? -EAGAIN : ret;
 698                }
 699
 700                spin_lock_irqsave(&blitq->blit_lock, irqsave);
 701        }
 702
 703        blitq->num_free--;
 704        spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
 705
 706        return 0;
 707}
 708
 709/*
 710 * Hand back a free slot if we changed our mind.
 711 */
 712
 713static void
 714via_dmablit_release_slot(drm_via_blitq_t *blitq)
 715{
 716        unsigned long irqsave;
 717
 718        spin_lock_irqsave(&blitq->blit_lock, irqsave);
 719        blitq->num_free++;
 720        spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
 721        DRM_WAKEUP( &blitq->busy_queue );
 722}
 723
 724/*
 725 * Grab a free slot. Build blit info and queue a blit.
 726 */
 727
 728
 729static int
 730via_dmablit(struct drm_device *dev, drm_via_dmablit_t *xfer)
 731{
 732        drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
 733        drm_via_sg_info_t *vsg;
 734        drm_via_blitq_t *blitq;
 735        int ret;
 736        int engine;
 737        unsigned long irqsave;
 738
 739        if (dev_priv == NULL) {
 740                DRM_ERROR("Called without initialization.\n");
 741                return -EINVAL;
 742        }
 743
 744        engine = (xfer->to_fb) ? 0 : 1;
 745        blitq = dev_priv->blit_queues + engine;
 746        if (0 != (ret = via_dmablit_grab_slot(blitq, engine))) {
 747                return ret;
 748        }
 749        if (NULL == (vsg = kmalloc(sizeof(*vsg), GFP_KERNEL))) {
 750                via_dmablit_release_slot(blitq);
 751                return -ENOMEM;
 752        }
 753        if (0 != (ret = via_build_sg_info(dev, vsg, xfer))) {
 754                via_dmablit_release_slot(blitq);
 755                kfree(vsg);
 756                return ret;
 757        }
 758        spin_lock_irqsave(&blitq->blit_lock, irqsave);
 759
 760        blitq->blits[blitq->head++] = vsg;
 761        if (blitq->head >= VIA_NUM_BLIT_SLOTS)
 762                blitq->head = 0;
 763        blitq->num_outstanding++;
 764        xfer->sync.sync_handle = ++blitq->cur_blit_handle;
 765
 766        spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
 767        xfer->sync.engine = engine;
 768
 769        via_dmablit_handler(dev, engine, 0);
 770
 771        return 0;
 772}
 773
 774/*
 775 * Sync on a previously submitted blit. Note that the X server use signals extensively, and
 776 * that there is a very big probability that this IOCTL will be interrupted by a signal. In that
 777 * case it returns with -EAGAIN for the signal to be delivered.
 778 * The caller should then reissue the IOCTL. This is similar to what is being done for drmGetLock().
 779 */
 780
 781int
 782via_dma_blit_sync( struct drm_device *dev, void *data, struct drm_file *file_priv )
 783{
 784        drm_via_blitsync_t *sync = data;
 785        int err;
 786
 787        if (sync->engine >= VIA_NUM_BLIT_ENGINES)
 788                return -EINVAL;
 789
 790        err = via_dmablit_sync(dev, sync->sync_handle, sync->engine);
 791
 792        if (-EINTR == err)
 793                err = -EAGAIN;
 794
 795        return err;
 796}
 797
 798
 799/*
 800 * Queue a blit and hand back a handle to be used for sync. This IOCTL may be interrupted by a signal
 801 * while waiting for a free slot in the blit queue. In that case it returns with -EAGAIN and should
 802 * be reissued. See the above IOCTL code.
 803 */
 804
 805int
 806via_dma_blit( struct drm_device *dev, void *data, struct drm_file *file_priv )
 807{
 808        drm_via_dmablit_t *xfer = data;
 809        int err;
 810
 811        err = via_dmablit(dev, xfer);
 812
 813        return err;
 814}
 815