linux/drivers/gpu/drm/drm_vm.c
<<
>>
Prefs
   1/**
   2 * \file drm_vm.c
   3 * Memory mapping for DRM
   4 *
   5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
   6 * \author Gareth Hughes <gareth@valinux.com>
   7 */
   8
   9/*
  10 * Created: Mon Jan  4 08:58:31 1999 by faith@valinux.com
  11 *
  12 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
  13 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
  14 * All Rights Reserved.
  15 *
  16 * Permission is hereby granted, free of charge, to any person obtaining a
  17 * copy of this software and associated documentation files (the "Software"),
  18 * to deal in the Software without restriction, including without limitation
  19 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  20 * and/or sell copies of the Software, and to permit persons to whom the
  21 * Software is furnished to do so, subject to the following conditions:
  22 *
  23 * The above copyright notice and this permission notice (including the next
  24 * paragraph) shall be included in all copies or substantial portions of the
  25 * Software.
  26 *
  27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  28 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  29 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  30 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
  31 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  32 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  33 * OTHER DEALINGS IN THE SOFTWARE.
  34 */
  35
  36#include "drmP.h"
  37#if defined(__ia64__)
  38#include <linux/efi.h>
  39#include <linux/slab.h>
  40#endif
  41
  42static void drm_vm_open(struct vm_area_struct *vma);
  43static void drm_vm_close(struct vm_area_struct *vma);
  44
  45static pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma)
  46{
  47        pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
  48
  49#if defined(__i386__) || defined(__x86_64__)
  50        if (boot_cpu_data.x86 > 3 && map_type != _DRM_AGP) {
  51                pgprot_val(tmp) |= _PAGE_PCD;
  52                pgprot_val(tmp) &= ~_PAGE_PWT;
  53        }
  54#elif defined(__powerpc__)
  55        pgprot_val(tmp) |= _PAGE_NO_CACHE;
  56        if (map_type == _DRM_REGISTERS)
  57                pgprot_val(tmp) |= _PAGE_GUARDED;
  58#elif defined(__ia64__)
  59        if (efi_range_is_wc(vma->vm_start, vma->vm_end -
  60                                    vma->vm_start))
  61                tmp = pgprot_writecombine(tmp);
  62        else
  63                tmp = pgprot_noncached(tmp);
  64#elif defined(__sparc__) || defined(__arm__)
  65        tmp = pgprot_noncached(tmp);
  66#endif
  67        return tmp;
  68}
  69
  70static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma)
  71{
  72        pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
  73
  74#if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE)
  75        tmp |= _PAGE_NO_CACHE;
  76#endif
  77        return tmp;
  78}
  79
  80/**
  81 * \c fault method for AGP virtual memory.
  82 *
  83 * \param vma virtual memory area.
  84 * \param address access address.
  85 * \return pointer to the page structure.
  86 *
  87 * Find the right map and if it's AGP memory find the real physical page to
  88 * map, get the page, increment the use count and return it.
  89 */
  90#if __OS_HAS_AGP
  91static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  92{
  93        struct drm_file *priv = vma->vm_file->private_data;
  94        struct drm_device *dev = priv->minor->dev;
  95        struct drm_local_map *map = NULL;
  96        struct drm_map_list *r_list;
  97        struct drm_hash_item *hash;
  98
  99        /*
 100         * Find the right map
 101         */
 102        if (!drm_core_has_AGP(dev))
 103                goto vm_fault_error;
 104
 105        if (!dev->agp || !dev->agp->cant_use_aperture)
 106                goto vm_fault_error;
 107
 108        if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash))
 109                goto vm_fault_error;
 110
 111        r_list = drm_hash_entry(hash, struct drm_map_list, hash);
 112        map = r_list->map;
 113
 114        if (map && map->type == _DRM_AGP) {
 115                /*
 116                 * Using vm_pgoff as a selector forces us to use this unusual
 117                 * addressing scheme.
 118                 */
 119                resource_size_t offset = (unsigned long)vmf->virtual_address -
 120                        vma->vm_start;
 121                resource_size_t baddr = map->offset + offset;
 122                struct drm_agp_mem *agpmem;
 123                struct page *page;
 124
 125#ifdef __alpha__
 126                /*
 127                 * Adjust to a bus-relative address
 128                 */
 129                baddr -= dev->hose->mem_space->start;
 130#endif
 131
 132                /*
 133                 * It's AGP memory - find the real physical page to map
 134                 */
 135                list_for_each_entry(agpmem, &dev->agp->memory, head) {
 136                        if (agpmem->bound <= baddr &&
 137                            agpmem->bound + agpmem->pages * PAGE_SIZE > baddr)
 138                                break;
 139                }
 140
 141                if (&agpmem->head == &dev->agp->memory)
 142                        goto vm_fault_error;
 143
 144                /*
 145                 * Get the page, inc the use count, and return it
 146                 */
 147                offset = (baddr - agpmem->bound) >> PAGE_SHIFT;
 148                page = agpmem->memory->pages[offset];
 149                get_page(page);
 150                vmf->page = page;
 151
 152                DRM_DEBUG
 153                    ("baddr = 0x%llx page = 0x%p, offset = 0x%llx, count=%d\n",
 154                     (unsigned long long)baddr,
 155                     agpmem->memory->pages[offset],
 156                     (unsigned long long)offset,
 157                     page_count(page));
 158                return 0;
 159        }
 160vm_fault_error:
 161        return VM_FAULT_SIGBUS; /* Disallow mremap */
 162}
 163#else                           /* __OS_HAS_AGP */
 164static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 165{
 166        return VM_FAULT_SIGBUS;
 167}
 168#endif                          /* __OS_HAS_AGP */
 169
 170/**
 171 * \c nopage method for shared virtual memory.
 172 *
 173 * \param vma virtual memory area.
 174 * \param address access address.
 175 * \return pointer to the page structure.
 176 *
 177 * Get the mapping, find the real physical page to map, get the page, and
 178 * return it.
 179 */
 180static int drm_do_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 181{
 182        struct drm_local_map *map = vma->vm_private_data;
 183        unsigned long offset;
 184        unsigned long i;
 185        struct page *page;
 186
 187        if (!map)
 188                return VM_FAULT_SIGBUS; /* Nothing allocated */
 189
 190        offset = (unsigned long)vmf->virtual_address - vma->vm_start;
 191        i = (unsigned long)map->handle + offset;
 192        page = vmalloc_to_page((void *)i);
 193        if (!page)
 194                return VM_FAULT_SIGBUS;
 195        get_page(page);
 196        vmf->page = page;
 197
 198        DRM_DEBUG("shm_fault 0x%lx\n", offset);
 199        return 0;
 200}
 201
 202/**
 203 * \c close method for shared virtual memory.
 204 *
 205 * \param vma virtual memory area.
 206 *
 207 * Deletes map information if we are the last
 208 * person to close a mapping and it's not in the global maplist.
 209 */
 210static void drm_vm_shm_close(struct vm_area_struct *vma)
 211{
 212        struct drm_file *priv = vma->vm_file->private_data;
 213        struct drm_device *dev = priv->minor->dev;
 214        struct drm_vma_entry *pt, *temp;
 215        struct drm_local_map *map;
 216        struct drm_map_list *r_list;
 217        int found_maps = 0;
 218
 219        DRM_DEBUG("0x%08lx,0x%08lx\n",
 220                  vma->vm_start, vma->vm_end - vma->vm_start);
 221        atomic_dec(&dev->vma_count);
 222
 223        map = vma->vm_private_data;
 224
 225        mutex_lock(&dev->struct_mutex);
 226        list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
 227                if (pt->vma->vm_private_data == map)
 228                        found_maps++;
 229                if (pt->vma == vma) {
 230                        list_del(&pt->head);
 231                        kfree(pt);
 232                }
 233        }
 234
 235        /* We were the only map that was found */
 236        if (found_maps == 1 && map->flags & _DRM_REMOVABLE) {
 237                /* Check to see if we are in the maplist, if we are not, then
 238                 * we delete this mappings information.
 239                 */
 240                found_maps = 0;
 241                list_for_each_entry(r_list, &dev->maplist, head) {
 242                        if (r_list->map == map)
 243                                found_maps++;
 244                }
 245
 246                if (!found_maps) {
 247                        drm_dma_handle_t dmah;
 248
 249                        switch (map->type) {
 250                        case _DRM_REGISTERS:
 251                        case _DRM_FRAME_BUFFER:
 252                                if (drm_core_has_MTRR(dev) && map->mtrr >= 0) {
 253                                        int retcode;
 254                                        retcode = mtrr_del(map->mtrr,
 255                                                           map->offset,
 256                                                           map->size);
 257                                        DRM_DEBUG("mtrr_del = %d\n", retcode);
 258                                }
 259                                iounmap(map->handle);
 260                                break;
 261                        case _DRM_SHM:
 262                                vfree(map->handle);
 263                                break;
 264                        case _DRM_AGP:
 265                        case _DRM_SCATTER_GATHER:
 266                                break;
 267                        case _DRM_CONSISTENT:
 268                                dmah.vaddr = map->handle;
 269                                dmah.busaddr = map->offset;
 270                                dmah.size = map->size;
 271                                __drm_pci_free(dev, &dmah);
 272                                break;
 273                        case _DRM_GEM:
 274                                DRM_ERROR("tried to rmmap GEM object\n");
 275                                break;
 276                        }
 277                        kfree(map);
 278                }
 279        }
 280        mutex_unlock(&dev->struct_mutex);
 281}
 282
 283/**
 284 * \c fault method for DMA virtual memory.
 285 *
 286 * \param vma virtual memory area.
 287 * \param address access address.
 288 * \return pointer to the page structure.
 289 *
 290 * Determine the page number from the page offset and get it from drm_device_dma::pagelist.
 291 */
 292static int drm_do_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 293{
 294        struct drm_file *priv = vma->vm_file->private_data;
 295        struct drm_device *dev = priv->minor->dev;
 296        struct drm_device_dma *dma = dev->dma;
 297        unsigned long offset;
 298        unsigned long page_nr;
 299        struct page *page;
 300
 301        if (!dma)
 302                return VM_FAULT_SIGBUS; /* Error */
 303        if (!dma->pagelist)
 304                return VM_FAULT_SIGBUS; /* Nothing allocated */
 305
 306        offset = (unsigned long)vmf->virtual_address - vma->vm_start;   /* vm_[pg]off[set] should be 0 */
 307        page_nr = offset >> PAGE_SHIFT; /* page_nr could just be vmf->pgoff */
 308        page = virt_to_page((dma->pagelist[page_nr] + (offset & (~PAGE_MASK))));
 309
 310        get_page(page);
 311        vmf->page = page;
 312
 313        DRM_DEBUG("dma_fault 0x%lx (page %lu)\n", offset, page_nr);
 314        return 0;
 315}
 316
 317/**
 318 * \c fault method for scatter-gather virtual memory.
 319 *
 320 * \param vma virtual memory area.
 321 * \param address access address.
 322 * \return pointer to the page structure.
 323 *
 324 * Determine the map offset from the page offset and get it from drm_sg_mem::pagelist.
 325 */
 326static int drm_do_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 327{
 328        struct drm_local_map *map = vma->vm_private_data;
 329        struct drm_file *priv = vma->vm_file->private_data;
 330        struct drm_device *dev = priv->minor->dev;
 331        struct drm_sg_mem *entry = dev->sg;
 332        unsigned long offset;
 333        unsigned long map_offset;
 334        unsigned long page_offset;
 335        struct page *page;
 336
 337        if (!entry)
 338                return VM_FAULT_SIGBUS; /* Error */
 339        if (!entry->pagelist)
 340                return VM_FAULT_SIGBUS; /* Nothing allocated */
 341
 342        offset = (unsigned long)vmf->virtual_address - vma->vm_start;
 343        map_offset = map->offset - (unsigned long)dev->sg->virtual;
 344        page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT);
 345        page = entry->pagelist[page_offset];
 346        get_page(page);
 347        vmf->page = page;
 348
 349        return 0;
 350}
 351
 352static int drm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 353{
 354        return drm_do_vm_fault(vma, vmf);
 355}
 356
 357static int drm_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 358{
 359        return drm_do_vm_shm_fault(vma, vmf);
 360}
 361
 362static int drm_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 363{
 364        return drm_do_vm_dma_fault(vma, vmf);
 365}
 366
 367static int drm_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 368{
 369        return drm_do_vm_sg_fault(vma, vmf);
 370}
 371
 372/** AGP virtual memory operations */
 373static const struct vm_operations_struct drm_vm_ops = {
 374        .fault = drm_vm_fault,
 375        .open = drm_vm_open,
 376        .close = drm_vm_close,
 377};
 378
 379/** Shared virtual memory operations */
 380static const struct vm_operations_struct drm_vm_shm_ops = {
 381        .fault = drm_vm_shm_fault,
 382        .open = drm_vm_open,
 383        .close = drm_vm_shm_close,
 384};
 385
 386/** DMA virtual memory operations */
 387static const struct vm_operations_struct drm_vm_dma_ops = {
 388        .fault = drm_vm_dma_fault,
 389        .open = drm_vm_open,
 390        .close = drm_vm_close,
 391};
 392
 393/** Scatter-gather virtual memory operations */
 394static const struct vm_operations_struct drm_vm_sg_ops = {
 395        .fault = drm_vm_sg_fault,
 396        .open = drm_vm_open,
 397        .close = drm_vm_close,
 398};
 399
 400/**
 401 * \c open method for shared virtual memory.
 402 *
 403 * \param vma virtual memory area.
 404 *
 405 * Create a new drm_vma_entry structure as the \p vma private data entry and
 406 * add it to drm_device::vmalist.
 407 */
 408void drm_vm_open_locked(struct vm_area_struct *vma)
 409{
 410        struct drm_file *priv = vma->vm_file->private_data;
 411        struct drm_device *dev = priv->minor->dev;
 412        struct drm_vma_entry *vma_entry;
 413
 414        DRM_DEBUG("0x%08lx,0x%08lx\n",
 415                  vma->vm_start, vma->vm_end - vma->vm_start);
 416        atomic_inc(&dev->vma_count);
 417
 418        vma_entry = kmalloc(sizeof(*vma_entry), GFP_KERNEL);
 419        if (vma_entry) {
 420                vma_entry->vma = vma;
 421                vma_entry->pid = current->pid;
 422                list_add(&vma_entry->head, &dev->vmalist);
 423        }
 424}
 425
 426static void drm_vm_open(struct vm_area_struct *vma)
 427{
 428        struct drm_file *priv = vma->vm_file->private_data;
 429        struct drm_device *dev = priv->minor->dev;
 430
 431        mutex_lock(&dev->struct_mutex);
 432        drm_vm_open_locked(vma);
 433        mutex_unlock(&dev->struct_mutex);
 434}
 435
 436void drm_vm_close_locked(struct vm_area_struct *vma)
 437{
 438        struct drm_file *priv = vma->vm_file->private_data;
 439        struct drm_device *dev = priv->minor->dev;
 440        struct drm_vma_entry *pt, *temp;
 441
 442        DRM_DEBUG("0x%08lx,0x%08lx\n",
 443                  vma->vm_start, vma->vm_end - vma->vm_start);
 444        atomic_dec(&dev->vma_count);
 445
 446        list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
 447                if (pt->vma == vma) {
 448                        list_del(&pt->head);
 449                        kfree(pt);
 450                        break;
 451                }
 452        }
 453}
 454
 455/**
 456 * \c close method for all virtual memory types.
 457 *
 458 * \param vma virtual memory area.
 459 *
 460 * Search the \p vma private data entry in drm_device::vmalist, unlink it, and
 461 * free it.
 462 */
 463static void drm_vm_close(struct vm_area_struct *vma)
 464{
 465        struct drm_file *priv = vma->vm_file->private_data;
 466        struct drm_device *dev = priv->minor->dev;
 467
 468        mutex_lock(&dev->struct_mutex);
 469        drm_vm_close_locked(vma);
 470        mutex_unlock(&dev->struct_mutex);
 471}
 472
 473/**
 474 * mmap DMA memory.
 475 *
 476 * \param file_priv DRM file private.
 477 * \param vma virtual memory area.
 478 * \return zero on success or a negative number on failure.
 479 *
 480 * Sets the virtual memory area operations structure to vm_dma_ops, the file
 481 * pointer, and calls vm_open().
 482 */
 483static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
 484{
 485        struct drm_file *priv = filp->private_data;
 486        struct drm_device *dev;
 487        struct drm_device_dma *dma;
 488        unsigned long length = vma->vm_end - vma->vm_start;
 489
 490        dev = priv->minor->dev;
 491        dma = dev->dma;
 492        DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
 493                  vma->vm_start, vma->vm_end, vma->vm_pgoff);
 494
 495        /* Length must match exact page count */
 496        if (!dma || (length >> PAGE_SHIFT) != dma->page_count) {
 497                return -EINVAL;
 498        }
 499
 500        if (!capable(CAP_SYS_ADMIN) &&
 501            (dma->flags & _DRM_DMA_USE_PCI_RO)) {
 502                vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
 503#if defined(__i386__) || defined(__x86_64__)
 504                pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
 505#else
 506                /* Ye gads this is ugly.  With more thought
 507                   we could move this up higher and use
 508                   `protection_map' instead.  */
 509                vma->vm_page_prot =
 510                    __pgprot(pte_val
 511                             (pte_wrprotect
 512                              (__pte(pgprot_val(vma->vm_page_prot)))));
 513#endif
 514        }
 515
 516        vma->vm_ops = &drm_vm_dma_ops;
 517
 518        vma->vm_flags |= VM_RESERVED;   /* Don't swap */
 519        vma->vm_flags |= VM_DONTEXPAND;
 520
 521        vma->vm_file = filp;    /* Needed for drm_vm_open() */
 522        drm_vm_open_locked(vma);
 523        return 0;
 524}
 525
 526static resource_size_t drm_core_get_reg_ofs(struct drm_device *dev)
 527{
 528#ifdef __alpha__
 529        return dev->hose->dense_mem_base - dev->hose->mem_space->start;
 530#else
 531        return 0;
 532#endif
 533}
 534
 535/**
 536 * mmap DMA memory.
 537 *
 538 * \param file_priv DRM file private.
 539 * \param vma virtual memory area.
 540 * \return zero on success or a negative number on failure.
 541 *
 542 * If the virtual memory area has no offset associated with it then it's a DMA
 543 * area, so calls mmap_dma(). Otherwise searches the map in drm_device::maplist,
 544 * checks that the restricted flag is not set, sets the virtual memory operations
 545 * according to the mapping type and remaps the pages. Finally sets the file
 546 * pointer and calls vm_open().
 547 */
 548int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
 549{
 550        struct drm_file *priv = filp->private_data;
 551        struct drm_device *dev = priv->minor->dev;
 552        struct drm_local_map *map = NULL;
 553        resource_size_t offset = 0;
 554        struct drm_hash_item *hash;
 555
 556        DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
 557                  vma->vm_start, vma->vm_end, vma->vm_pgoff);
 558
 559        if (!priv->authenticated)
 560                return -EACCES;
 561
 562        /* We check for "dma". On Apple's UniNorth, it's valid to have
 563         * the AGP mapped at physical address 0
 564         * --BenH.
 565         */
 566        if (!vma->vm_pgoff
 567#if __OS_HAS_AGP
 568            && (!dev->agp
 569                || dev->agp->agp_info.device->vendor != PCI_VENDOR_ID_APPLE)
 570#endif
 571            )
 572                return drm_mmap_dma(filp, vma);
 573
 574        if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash)) {
 575                DRM_ERROR("Could not find map\n");
 576                return -EINVAL;
 577        }
 578
 579        map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
 580        if (!map || ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN)))
 581                return -EPERM;
 582
 583        /* Check for valid size. */
 584        if (map->size < vma->vm_end - vma->vm_start)
 585                return -EINVAL;
 586
 587        if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) {
 588                vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
 589#if defined(__i386__) || defined(__x86_64__)
 590                pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
 591#else
 592                /* Ye gads this is ugly.  With more thought
 593                   we could move this up higher and use
 594                   `protection_map' instead.  */
 595                vma->vm_page_prot =
 596                    __pgprot(pte_val
 597                             (pte_wrprotect
 598                              (__pte(pgprot_val(vma->vm_page_prot)))));
 599#endif
 600        }
 601
 602        switch (map->type) {
 603#if !defined(__arm__)
 604        case _DRM_AGP:
 605                if (drm_core_has_AGP(dev) && dev->agp->cant_use_aperture) {
 606                        /*
 607                         * On some platforms we can't talk to bus dma address from the CPU, so for
 608                         * memory of type DRM_AGP, we'll deal with sorting out the real physical
 609                         * pages and mappings in fault()
 610                         */
 611#if defined(__powerpc__)
 612                        pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
 613#endif
 614                        vma->vm_ops = &drm_vm_ops;
 615                        break;
 616                }
 617                /* fall through to _DRM_FRAME_BUFFER... */
 618#endif
 619        case _DRM_FRAME_BUFFER:
 620        case _DRM_REGISTERS:
 621                offset = drm_core_get_reg_ofs(dev);
 622                vma->vm_flags |= VM_IO; /* not in core dump */
 623                vma->vm_page_prot = drm_io_prot(map->type, vma);
 624#if !defined(__arm__)
 625                if (io_remap_pfn_range(vma, vma->vm_start,
 626                                       (map->offset + offset) >> PAGE_SHIFT,
 627                                       vma->vm_end - vma->vm_start,
 628                                       vma->vm_page_prot))
 629                        return -EAGAIN;
 630#else
 631                if (remap_pfn_range(vma, vma->vm_start,
 632                                        (map->offset + offset) >> PAGE_SHIFT,
 633                                        vma->vm_end - vma->vm_start,
 634                                        vma->vm_page_prot))
 635                        return -EAGAIN;
 636#endif
 637
 638                DRM_DEBUG("   Type = %d; start = 0x%lx, end = 0x%lx,"
 639                          " offset = 0x%llx\n",
 640                          map->type,
 641                          vma->vm_start, vma->vm_end, (unsigned long long)(map->offset + offset));
 642
 643                vma->vm_ops = &drm_vm_ops;
 644                break;
 645        case _DRM_CONSISTENT:
 646                /* Consistent memory is really like shared memory. But
 647                 * it's allocated in a different way, so avoid fault */
 648                if (remap_pfn_range(vma, vma->vm_start,
 649                    page_to_pfn(virt_to_page(map->handle)),
 650                    vma->vm_end - vma->vm_start, vma->vm_page_prot))
 651                        return -EAGAIN;
 652                vma->vm_page_prot = drm_dma_prot(map->type, vma);
 653        /* fall through to _DRM_SHM */
 654        case _DRM_SHM:
 655                vma->vm_ops = &drm_vm_shm_ops;
 656                vma->vm_private_data = (void *)map;
 657                /* Don't let this area swap.  Change when
 658                   DRM_KERNEL advisory is supported. */
 659                vma->vm_flags |= VM_RESERVED;
 660                break;
 661        case _DRM_SCATTER_GATHER:
 662                vma->vm_ops = &drm_vm_sg_ops;
 663                vma->vm_private_data = (void *)map;
 664                vma->vm_flags |= VM_RESERVED;
 665                vma->vm_page_prot = drm_dma_prot(map->type, vma);
 666                break;
 667        default:
 668                return -EINVAL; /* This should never happen. */
 669        }
 670        vma->vm_flags |= VM_RESERVED;   /* Don't swap */
 671        vma->vm_flags |= VM_DONTEXPAND;
 672
 673        vma->vm_file = filp;    /* Needed for drm_vm_open() */
 674        drm_vm_open_locked(vma);
 675        return 0;
 676}
 677
 678int drm_mmap(struct file *filp, struct vm_area_struct *vma)
 679{
 680        struct drm_file *priv = filp->private_data;
 681        struct drm_device *dev = priv->minor->dev;
 682        int ret;
 683
 684        mutex_lock(&dev->struct_mutex);
 685        ret = drm_mmap_locked(filp, vma);
 686        mutex_unlock(&dev->struct_mutex);
 687
 688        return ret;
 689}
 690EXPORT_SYMBOL(drm_mmap);
 691