linux/drivers/gpu/drm/drm_vm.c
<<
>>
Prefs
   1/**
   2 * \file drm_vm.c
   3 * Memory mapping for DRM
   4 *
   5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
   6 * \author Gareth Hughes <gareth@valinux.com>
   7 */
   8
   9/*
  10 * Created: Mon Jan  4 08:58:31 1999 by faith@valinux.com
  11 *
  12 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
  13 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
  14 * All Rights Reserved.
  15 *
  16 * Permission is hereby granted, free of charge, to any person obtaining a
  17 * copy of this software and associated documentation files (the "Software"),
  18 * to deal in the Software without restriction, including without limitation
  19 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  20 * and/or sell copies of the Software, and to permit persons to whom the
  21 * Software is furnished to do so, subject to the following conditions:
  22 *
  23 * The above copyright notice and this permission notice (including the next
  24 * paragraph) shall be included in all copies or substantial portions of the
  25 * Software.
  26 *
  27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  28 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  29 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  30 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
  31 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  32 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  33 * OTHER DEALINGS IN THE SOFTWARE.
  34 */
  35
  36#include "drmP.h"
  37#if defined(__ia64__)
  38#include <linux/efi.h>
  39#endif
  40
  41static void drm_vm_open(struct vm_area_struct *vma);
  42static void drm_vm_close(struct vm_area_struct *vma);
  43
  44static pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma)
  45{
  46        pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
  47
  48#if defined(__i386__) || defined(__x86_64__)
  49        if (boot_cpu_data.x86 > 3 && map_type != _DRM_AGP) {
  50                pgprot_val(tmp) |= _PAGE_PCD;
  51                pgprot_val(tmp) &= ~_PAGE_PWT;
  52        }
  53#elif defined(__powerpc__)
  54        pgprot_val(tmp) |= _PAGE_NO_CACHE;
  55        if (map_type == _DRM_REGISTERS)
  56                pgprot_val(tmp) |= _PAGE_GUARDED;
  57#elif defined(__ia64__)
  58        if (efi_range_is_wc(vma->vm_start, vma->vm_end -
  59                                    vma->vm_start))
  60                tmp = pgprot_writecombine(tmp);
  61        else
  62                tmp = pgprot_noncached(tmp);
  63#elif defined(__sparc__)
  64        tmp = pgprot_noncached(tmp);
  65#endif
  66        return tmp;
  67}
  68
  69static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma)
  70{
  71        pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
  72
  73#if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE)
  74        tmp |= _PAGE_NO_CACHE;
  75#endif
  76        return tmp;
  77}
  78
  79/**
  80 * \c fault method for AGP virtual memory.
  81 *
  82 * \param vma virtual memory area.
  83 * \param address access address.
  84 * \return pointer to the page structure.
  85 *
  86 * Find the right map and if it's AGP memory find the real physical page to
  87 * map, get the page, increment the use count and return it.
  88 */
  89#if __OS_HAS_AGP
  90static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  91{
  92        struct drm_file *priv = vma->vm_file->private_data;
  93        struct drm_device *dev = priv->minor->dev;
  94        struct drm_local_map *map = NULL;
  95        struct drm_map_list *r_list;
  96        struct drm_hash_item *hash;
  97
  98        /*
  99         * Find the right map
 100         */
 101        if (!drm_core_has_AGP(dev))
 102                goto vm_fault_error;
 103
 104        if (!dev->agp || !dev->agp->cant_use_aperture)
 105                goto vm_fault_error;
 106
 107        if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash))
 108                goto vm_fault_error;
 109
 110        r_list = drm_hash_entry(hash, struct drm_map_list, hash);
 111        map = r_list->map;
 112
 113        if (map && map->type == _DRM_AGP) {
 114                /*
 115                 * Using vm_pgoff as a selector forces us to use this unusual
 116                 * addressing scheme.
 117                 */
 118                resource_size_t offset = (unsigned long)vmf->virtual_address -
 119                        vma->vm_start;
 120                resource_size_t baddr = map->offset + offset;
 121                struct drm_agp_mem *agpmem;
 122                struct page *page;
 123
 124#ifdef __alpha__
 125                /*
 126                 * Adjust to a bus-relative address
 127                 */
 128                baddr -= dev->hose->mem_space->start;
 129#endif
 130
 131                /*
 132                 * It's AGP memory - find the real physical page to map
 133                 */
 134                list_for_each_entry(agpmem, &dev->agp->memory, head) {
 135                        if (agpmem->bound <= baddr &&
 136                            agpmem->bound + agpmem->pages * PAGE_SIZE > baddr)
 137                                break;
 138                }
 139
 140                if (!agpmem)
 141                        goto vm_fault_error;
 142
 143                /*
 144                 * Get the page, inc the use count, and return it
 145                 */
 146                offset = (baddr - agpmem->bound) >> PAGE_SHIFT;
 147                page = agpmem->memory->pages[offset];
 148                get_page(page);
 149                vmf->page = page;
 150
 151                DRM_DEBUG
 152                    ("baddr = 0x%llx page = 0x%p, offset = 0x%llx, count=%d\n",
 153                     (unsigned long long)baddr,
 154                     agpmem->memory->pages[offset],
 155                     (unsigned long long)offset,
 156                     page_count(page));
 157                return 0;
 158        }
 159vm_fault_error:
 160        return VM_FAULT_SIGBUS; /* Disallow mremap */
 161}
 162#else                           /* __OS_HAS_AGP */
 163static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 164{
 165        return VM_FAULT_SIGBUS;
 166}
 167#endif                          /* __OS_HAS_AGP */
 168
 169/**
 170 * \c nopage method for shared virtual memory.
 171 *
 172 * \param vma virtual memory area.
 173 * \param address access address.
 174 * \return pointer to the page structure.
 175 *
 176 * Get the mapping, find the real physical page to map, get the page, and
 177 * return it.
 178 */
 179static int drm_do_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 180{
 181        struct drm_local_map *map = vma->vm_private_data;
 182        unsigned long offset;
 183        unsigned long i;
 184        struct page *page;
 185
 186        if (!map)
 187                return VM_FAULT_SIGBUS; /* Nothing allocated */
 188
 189        offset = (unsigned long)vmf->virtual_address - vma->vm_start;
 190        i = (unsigned long)map->handle + offset;
 191        page = vmalloc_to_page((void *)i);
 192        if (!page)
 193                return VM_FAULT_SIGBUS;
 194        get_page(page);
 195        vmf->page = page;
 196
 197        DRM_DEBUG("shm_fault 0x%lx\n", offset);
 198        return 0;
 199}
 200
 201/**
 202 * \c close method for shared virtual memory.
 203 *
 204 * \param vma virtual memory area.
 205 *
 206 * Deletes map information if we are the last
 207 * person to close a mapping and it's not in the global maplist.
 208 */
 209static void drm_vm_shm_close(struct vm_area_struct *vma)
 210{
 211        struct drm_file *priv = vma->vm_file->private_data;
 212        struct drm_device *dev = priv->minor->dev;
 213        struct drm_vma_entry *pt, *temp;
 214        struct drm_local_map *map;
 215        struct drm_map_list *r_list;
 216        int found_maps = 0;
 217
 218        DRM_DEBUG("0x%08lx,0x%08lx\n",
 219                  vma->vm_start, vma->vm_end - vma->vm_start);
 220        atomic_dec(&dev->vma_count);
 221
 222        map = vma->vm_private_data;
 223
 224        mutex_lock(&dev->struct_mutex);
 225        list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
 226                if (pt->vma->vm_private_data == map)
 227                        found_maps++;
 228                if (pt->vma == vma) {
 229                        list_del(&pt->head);
 230                        kfree(pt);
 231                }
 232        }
 233
 234        /* We were the only map that was found */
 235        if (found_maps == 1 && map->flags & _DRM_REMOVABLE) {
 236                /* Check to see if we are in the maplist, if we are not, then
 237                 * we delete this mappings information.
 238                 */
 239                found_maps = 0;
 240                list_for_each_entry(r_list, &dev->maplist, head) {
 241                        if (r_list->map == map)
 242                                found_maps++;
 243                }
 244
 245                if (!found_maps) {
 246                        drm_dma_handle_t dmah;
 247
 248                        switch (map->type) {
 249                        case _DRM_REGISTERS:
 250                        case _DRM_FRAME_BUFFER:
 251                                if (drm_core_has_MTRR(dev) && map->mtrr >= 0) {
 252                                        int retcode;
 253                                        retcode = mtrr_del(map->mtrr,
 254                                                           map->offset,
 255                                                           map->size);
 256                                        DRM_DEBUG("mtrr_del = %d\n", retcode);
 257                                }
 258                                iounmap(map->handle);
 259                                break;
 260                        case _DRM_SHM:
 261                                vfree(map->handle);
 262                                break;
 263                        case _DRM_AGP:
 264                        case _DRM_SCATTER_GATHER:
 265                                break;
 266                        case _DRM_CONSISTENT:
 267                                dmah.vaddr = map->handle;
 268                                dmah.busaddr = map->offset;
 269                                dmah.size = map->size;
 270                                __drm_pci_free(dev, &dmah);
 271                                break;
 272                        case _DRM_GEM:
 273                                DRM_ERROR("tried to rmmap GEM object\n");
 274                                break;
 275                        }
 276                        kfree(map);
 277                }
 278        }
 279        mutex_unlock(&dev->struct_mutex);
 280}
 281
 282/**
 283 * \c fault method for DMA virtual memory.
 284 *
 285 * \param vma virtual memory area.
 286 * \param address access address.
 287 * \return pointer to the page structure.
 288 *
 289 * Determine the page number from the page offset and get it from drm_device_dma::pagelist.
 290 */
 291static int drm_do_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 292{
 293        struct drm_file *priv = vma->vm_file->private_data;
 294        struct drm_device *dev = priv->minor->dev;
 295        struct drm_device_dma *dma = dev->dma;
 296        unsigned long offset;
 297        unsigned long page_nr;
 298        struct page *page;
 299
 300        if (!dma)
 301                return VM_FAULT_SIGBUS; /* Error */
 302        if (!dma->pagelist)
 303                return VM_FAULT_SIGBUS; /* Nothing allocated */
 304
 305        offset = (unsigned long)vmf->virtual_address - vma->vm_start;   /* vm_[pg]off[set] should be 0 */
 306        page_nr = offset >> PAGE_SHIFT; /* page_nr could just be vmf->pgoff */
 307        page = virt_to_page((dma->pagelist[page_nr] + (offset & (~PAGE_MASK))));
 308
 309        get_page(page);
 310        vmf->page = page;
 311
 312        DRM_DEBUG("dma_fault 0x%lx (page %lu)\n", offset, page_nr);
 313        return 0;
 314}
 315
 316/**
 317 * \c fault method for scatter-gather virtual memory.
 318 *
 319 * \param vma virtual memory area.
 320 * \param address access address.
 321 * \return pointer to the page structure.
 322 *
 323 * Determine the map offset from the page offset and get it from drm_sg_mem::pagelist.
 324 */
 325static int drm_do_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 326{
 327        struct drm_local_map *map = vma->vm_private_data;
 328        struct drm_file *priv = vma->vm_file->private_data;
 329        struct drm_device *dev = priv->minor->dev;
 330        struct drm_sg_mem *entry = dev->sg;
 331        unsigned long offset;
 332        unsigned long map_offset;
 333        unsigned long page_offset;
 334        struct page *page;
 335
 336        if (!entry)
 337                return VM_FAULT_SIGBUS; /* Error */
 338        if (!entry->pagelist)
 339                return VM_FAULT_SIGBUS; /* Nothing allocated */
 340
 341        offset = (unsigned long)vmf->virtual_address - vma->vm_start;
 342        map_offset = map->offset - (unsigned long)dev->sg->virtual;
 343        page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT);
 344        page = entry->pagelist[page_offset];
 345        get_page(page);
 346        vmf->page = page;
 347
 348        return 0;
 349}
 350
 351static int drm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 352{
 353        return drm_do_vm_fault(vma, vmf);
 354}
 355
 356static int drm_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 357{
 358        return drm_do_vm_shm_fault(vma, vmf);
 359}
 360
 361static int drm_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 362{
 363        return drm_do_vm_dma_fault(vma, vmf);
 364}
 365
 366static int drm_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 367{
 368        return drm_do_vm_sg_fault(vma, vmf);
 369}
 370
 371/** AGP virtual memory operations */
 372static const struct vm_operations_struct drm_vm_ops = {
 373        .fault = drm_vm_fault,
 374        .open = drm_vm_open,
 375        .close = drm_vm_close,
 376};
 377
 378/** Shared virtual memory operations */
 379static const struct vm_operations_struct drm_vm_shm_ops = {
 380        .fault = drm_vm_shm_fault,
 381        .open = drm_vm_open,
 382        .close = drm_vm_shm_close,
 383};
 384
 385/** DMA virtual memory operations */
 386static const struct vm_operations_struct drm_vm_dma_ops = {
 387        .fault = drm_vm_dma_fault,
 388        .open = drm_vm_open,
 389        .close = drm_vm_close,
 390};
 391
 392/** Scatter-gather virtual memory operations */
 393static const struct vm_operations_struct drm_vm_sg_ops = {
 394        .fault = drm_vm_sg_fault,
 395        .open = drm_vm_open,
 396        .close = drm_vm_close,
 397};
 398
 399/**
 400 * \c open method for shared virtual memory.
 401 *
 402 * \param vma virtual memory area.
 403 *
 404 * Create a new drm_vma_entry structure as the \p vma private data entry and
 405 * add it to drm_device::vmalist.
 406 */
 407void drm_vm_open_locked(struct vm_area_struct *vma)
 408{
 409        struct drm_file *priv = vma->vm_file->private_data;
 410        struct drm_device *dev = priv->minor->dev;
 411        struct drm_vma_entry *vma_entry;
 412
 413        DRM_DEBUG("0x%08lx,0x%08lx\n",
 414                  vma->vm_start, vma->vm_end - vma->vm_start);
 415        atomic_inc(&dev->vma_count);
 416
 417        vma_entry = kmalloc(sizeof(*vma_entry), GFP_KERNEL);
 418        if (vma_entry) {
 419                vma_entry->vma = vma;
 420                vma_entry->pid = current->pid;
 421                list_add(&vma_entry->head, &dev->vmalist);
 422        }
 423}
 424
 425static void drm_vm_open(struct vm_area_struct *vma)
 426{
 427        struct drm_file *priv = vma->vm_file->private_data;
 428        struct drm_device *dev = priv->minor->dev;
 429
 430        mutex_lock(&dev->struct_mutex);
 431        drm_vm_open_locked(vma);
 432        mutex_unlock(&dev->struct_mutex);
 433}
 434
 435/**
 436 * \c close method for all virtual memory types.
 437 *
 438 * \param vma virtual memory area.
 439 *
 440 * Search the \p vma private data entry in drm_device::vmalist, unlink it, and
 441 * free it.
 442 */
 443static void drm_vm_close(struct vm_area_struct *vma)
 444{
 445        struct drm_file *priv = vma->vm_file->private_data;
 446        struct drm_device *dev = priv->minor->dev;
 447        struct drm_vma_entry *pt, *temp;
 448
 449        DRM_DEBUG("0x%08lx,0x%08lx\n",
 450                  vma->vm_start, vma->vm_end - vma->vm_start);
 451        atomic_dec(&dev->vma_count);
 452
 453        mutex_lock(&dev->struct_mutex);
 454        list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
 455                if (pt->vma == vma) {
 456                        list_del(&pt->head);
 457                        kfree(pt);
 458                        break;
 459                }
 460        }
 461        mutex_unlock(&dev->struct_mutex);
 462}
 463
 464/**
 465 * mmap DMA memory.
 466 *
 467 * \param file_priv DRM file private.
 468 * \param vma virtual memory area.
 469 * \return zero on success or a negative number on failure.
 470 *
 471 * Sets the virtual memory area operations structure to vm_dma_ops, the file
 472 * pointer, and calls vm_open().
 473 */
 474static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
 475{
 476        struct drm_file *priv = filp->private_data;
 477        struct drm_device *dev;
 478        struct drm_device_dma *dma;
 479        unsigned long length = vma->vm_end - vma->vm_start;
 480
 481        dev = priv->minor->dev;
 482        dma = dev->dma;
 483        DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
 484                  vma->vm_start, vma->vm_end, vma->vm_pgoff);
 485
 486        /* Length must match exact page count */
 487        if (!dma || (length >> PAGE_SHIFT) != dma->page_count) {
 488                return -EINVAL;
 489        }
 490
 491        if (!capable(CAP_SYS_ADMIN) &&
 492            (dma->flags & _DRM_DMA_USE_PCI_RO)) {
 493                vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
 494#if defined(__i386__) || defined(__x86_64__)
 495                pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
 496#else
 497                /* Ye gads this is ugly.  With more thought
 498                   we could move this up higher and use
 499                   `protection_map' instead.  */
 500                vma->vm_page_prot =
 501                    __pgprot(pte_val
 502                             (pte_wrprotect
 503                              (__pte(pgprot_val(vma->vm_page_prot)))));
 504#endif
 505        }
 506
 507        vma->vm_ops = &drm_vm_dma_ops;
 508
 509        vma->vm_flags |= VM_RESERVED;   /* Don't swap */
 510        vma->vm_flags |= VM_DONTEXPAND;
 511
 512        vma->vm_file = filp;    /* Needed for drm_vm_open() */
 513        drm_vm_open_locked(vma);
 514        return 0;
 515}
 516
 517resource_size_t drm_core_get_map_ofs(struct drm_local_map * map)
 518{
 519        return map->offset;
 520}
 521
 522EXPORT_SYMBOL(drm_core_get_map_ofs);
 523
 524resource_size_t drm_core_get_reg_ofs(struct drm_device *dev)
 525{
 526#ifdef __alpha__
 527        return dev->hose->dense_mem_base - dev->hose->mem_space->start;
 528#else
 529        return 0;
 530#endif
 531}
 532
 533EXPORT_SYMBOL(drm_core_get_reg_ofs);
 534
 535/**
 536 * mmap DMA memory.
 537 *
 538 * \param file_priv DRM file private.
 539 * \param vma virtual memory area.
 540 * \return zero on success or a negative number on failure.
 541 *
 542 * If the virtual memory area has no offset associated with it then it's a DMA
 543 * area, so calls mmap_dma(). Otherwise searches the map in drm_device::maplist,
 544 * checks that the restricted flag is not set, sets the virtual memory operations
 545 * according to the mapping type and remaps the pages. Finally sets the file
 546 * pointer and calls vm_open().
 547 */
 548int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
 549{
 550        struct drm_file *priv = filp->private_data;
 551        struct drm_device *dev = priv->minor->dev;
 552        struct drm_local_map *map = NULL;
 553        resource_size_t offset = 0;
 554        struct drm_hash_item *hash;
 555
 556        DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
 557                  vma->vm_start, vma->vm_end, vma->vm_pgoff);
 558
 559        if (!priv->authenticated)
 560                return -EACCES;
 561
 562        /* We check for "dma". On Apple's UniNorth, it's valid to have
 563         * the AGP mapped at physical address 0
 564         * --BenH.
 565         */
 566        if (!vma->vm_pgoff
 567#if __OS_HAS_AGP
 568            && (!dev->agp
 569                || dev->agp->agp_info.device->vendor != PCI_VENDOR_ID_APPLE)
 570#endif
 571            )
 572                return drm_mmap_dma(filp, vma);
 573
 574        if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash)) {
 575                DRM_ERROR("Could not find map\n");
 576                return -EINVAL;
 577        }
 578
 579        map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
 580        if (!map || ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN)))
 581                return -EPERM;
 582
 583        /* Check for valid size. */
 584        if (map->size < vma->vm_end - vma->vm_start)
 585                return -EINVAL;
 586
 587        if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) {
 588                vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
 589#if defined(__i386__) || defined(__x86_64__)
 590                pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
 591#else
 592                /* Ye gads this is ugly.  With more thought
 593                   we could move this up higher and use
 594                   `protection_map' instead.  */
 595                vma->vm_page_prot =
 596                    __pgprot(pte_val
 597                             (pte_wrprotect
 598                              (__pte(pgprot_val(vma->vm_page_prot)))));
 599#endif
 600        }
 601
 602        switch (map->type) {
 603        case _DRM_AGP:
 604                if (drm_core_has_AGP(dev) && dev->agp->cant_use_aperture) {
 605                        /*
 606                         * On some platforms we can't talk to bus dma address from the CPU, so for
 607                         * memory of type DRM_AGP, we'll deal with sorting out the real physical
 608                         * pages and mappings in fault()
 609                         */
 610#if defined(__powerpc__)
 611                        pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
 612#endif
 613                        vma->vm_ops = &drm_vm_ops;
 614                        break;
 615                }
 616                /* fall through to _DRM_FRAME_BUFFER... */
 617        case _DRM_FRAME_BUFFER:
 618        case _DRM_REGISTERS:
 619                offset = dev->driver->get_reg_ofs(dev);
 620                vma->vm_flags |= VM_IO; /* not in core dump */
 621                vma->vm_page_prot = drm_io_prot(map->type, vma);
 622                if (io_remap_pfn_range(vma, vma->vm_start,
 623                                       (map->offset + offset) >> PAGE_SHIFT,
 624                                       vma->vm_end - vma->vm_start,
 625                                       vma->vm_page_prot))
 626                        return -EAGAIN;
 627                DRM_DEBUG("   Type = %d; start = 0x%lx, end = 0x%lx,"
 628                          " offset = 0x%llx\n",
 629                          map->type,
 630                          vma->vm_start, vma->vm_end, (unsigned long long)(map->offset + offset));
 631                vma->vm_ops = &drm_vm_ops;
 632                break;
 633        case _DRM_CONSISTENT:
 634                /* Consistent memory is really like shared memory. But
 635                 * it's allocated in a different way, so avoid fault */
 636                if (remap_pfn_range(vma, vma->vm_start,
 637                    page_to_pfn(virt_to_page(map->handle)),
 638                    vma->vm_end - vma->vm_start, vma->vm_page_prot))
 639                        return -EAGAIN;
 640                vma->vm_page_prot = drm_dma_prot(map->type, vma);
 641        /* fall through to _DRM_SHM */
 642        case _DRM_SHM:
 643                vma->vm_ops = &drm_vm_shm_ops;
 644                vma->vm_private_data = (void *)map;
 645                /* Don't let this area swap.  Change when
 646                   DRM_KERNEL advisory is supported. */
 647                vma->vm_flags |= VM_RESERVED;
 648                break;
 649        case _DRM_SCATTER_GATHER:
 650                vma->vm_ops = &drm_vm_sg_ops;
 651                vma->vm_private_data = (void *)map;
 652                vma->vm_flags |= VM_RESERVED;
 653                vma->vm_page_prot = drm_dma_prot(map->type, vma);
 654                break;
 655        default:
 656                return -EINVAL; /* This should never happen. */
 657        }
 658        vma->vm_flags |= VM_RESERVED;   /* Don't swap */
 659        vma->vm_flags |= VM_DONTEXPAND;
 660
 661        vma->vm_file = filp;    /* Needed for drm_vm_open() */
 662        drm_vm_open_locked(vma);
 663        return 0;
 664}
 665
 666int drm_mmap(struct file *filp, struct vm_area_struct *vma)
 667{
 668        struct drm_file *priv = filp->private_data;
 669        struct drm_device *dev = priv->minor->dev;
 670        int ret;
 671
 672        mutex_lock(&dev->struct_mutex);
 673        ret = drm_mmap_locked(filp, vma);
 674        mutex_unlock(&dev->struct_mutex);
 675
 676        return ret;
 677}
 678EXPORT_SYMBOL(drm_mmap);
 679