linux/drivers/gpu/drm/drm_vm.c
<<
>>
Prefs
   1/**
   2 * \file drm_vm.c
   3 * Memory mapping for DRM
   4 *
   5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
   6 * \author Gareth Hughes <gareth@valinux.com>
   7 */
   8
   9/*
  10 * Created: Mon Jan  4 08:58:31 1999 by faith@valinux.com
  11 *
  12 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
  13 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
  14 * All Rights Reserved.
  15 *
  16 * Permission is hereby granted, free of charge, to any person obtaining a
  17 * copy of this software and associated documentation files (the "Software"),
  18 * to deal in the Software without restriction, including without limitation
  19 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  20 * and/or sell copies of the Software, and to permit persons to whom the
  21 * Software is furnished to do so, subject to the following conditions:
  22 *
  23 * The above copyright notice and this permission notice (including the next
  24 * paragraph) shall be included in all copies or substantial portions of the
  25 * Software.
  26 *
  27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  28 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  29 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  30 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
  31 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  32 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  33 * OTHER DEALINGS IN THE SOFTWARE.
  34 */
  35
  36#include <drm/drmP.h>
  37#include <linux/export.h>
  38#include <linux/seq_file.h>
  39#if defined(__ia64__)
  40#include <linux/efi.h>
  41#include <linux/slab.h>
  42#endif
  43#include <asm/pgtable.h>
  44#include "drm_legacy.h"
  45
  46struct drm_vma_entry {
  47        struct list_head head;
  48        struct vm_area_struct *vma;
  49        pid_t pid;
  50};
  51
  52static void drm_vm_open(struct vm_area_struct *vma);
  53static void drm_vm_close(struct vm_area_struct *vma);
  54
  55static pgprot_t drm_io_prot(struct drm_local_map *map,
  56                            struct vm_area_struct *vma)
  57{
  58        pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
  59
  60#if defined(__i386__) || defined(__x86_64__) || defined(__powerpc__)
  61        if (map->type == _DRM_REGISTERS && !(map->flags & _DRM_WRITE_COMBINING))
  62                tmp = pgprot_noncached(tmp);
  63        else
  64                tmp = pgprot_writecombine(tmp);
  65#elif defined(__ia64__)
  66        if (efi_range_is_wc(vma->vm_start, vma->vm_end -
  67                                    vma->vm_start))
  68                tmp = pgprot_writecombine(tmp);
  69        else
  70                tmp = pgprot_noncached(tmp);
  71#elif defined(__sparc__) || defined(__arm__) || defined(__mips__)
  72        tmp = pgprot_noncached(tmp);
  73#endif
  74        return tmp;
  75}
  76
  77static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma)
  78{
  79        pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
  80
  81#if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE)
  82        tmp |= _PAGE_NO_CACHE;
  83#endif
  84        return tmp;
  85}
  86
  87/**
  88 * \c fault method for AGP virtual memory.
  89 *
  90 * \param vma virtual memory area.
  91 * \param address access address.
  92 * \return pointer to the page structure.
  93 *
  94 * Find the right map and if it's AGP memory find the real physical page to
  95 * map, get the page, increment the use count and return it.
  96 */
  97#if __OS_HAS_AGP
  98static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  99{
 100        struct drm_file *priv = vma->vm_file->private_data;
 101        struct drm_device *dev = priv->minor->dev;
 102        struct drm_local_map *map = NULL;
 103        struct drm_map_list *r_list;
 104        struct drm_hash_item *hash;
 105
 106        /*
 107         * Find the right map
 108         */
 109        if (!dev->agp)
 110                goto vm_fault_error;
 111
 112        if (!dev->agp || !dev->agp->cant_use_aperture)
 113                goto vm_fault_error;
 114
 115        if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash))
 116                goto vm_fault_error;
 117
 118        r_list = drm_hash_entry(hash, struct drm_map_list, hash);
 119        map = r_list->map;
 120
 121        if (map && map->type == _DRM_AGP) {
 122                /*
 123                 * Using vm_pgoff as a selector forces us to use this unusual
 124                 * addressing scheme.
 125                 */
 126                resource_size_t offset = (unsigned long)vmf->virtual_address -
 127                        vma->vm_start;
 128                resource_size_t baddr = map->offset + offset;
 129                struct drm_agp_mem *agpmem;
 130                struct page *page;
 131
 132#ifdef __alpha__
 133                /*
 134                 * Adjust to a bus-relative address
 135                 */
 136                baddr -= dev->hose->mem_space->start;
 137#endif
 138
 139                /*
 140                 * It's AGP memory - find the real physical page to map
 141                 */
 142                list_for_each_entry(agpmem, &dev->agp->memory, head) {
 143                        if (agpmem->bound <= baddr &&
 144                            agpmem->bound + agpmem->pages * PAGE_SIZE > baddr)
 145                                break;
 146                }
 147
 148                if (&agpmem->head == &dev->agp->memory)
 149                        goto vm_fault_error;
 150
 151                /*
 152                 * Get the page, inc the use count, and return it
 153                 */
 154                offset = (baddr - agpmem->bound) >> PAGE_SHIFT;
 155                page = agpmem->memory->pages[offset];
 156                get_page(page);
 157                vmf->page = page;
 158
 159                DRM_DEBUG
 160                    ("baddr = 0x%llx page = 0x%p, offset = 0x%llx, count=%d\n",
 161                     (unsigned long long)baddr,
 162                     agpmem->memory->pages[offset],
 163                     (unsigned long long)offset,
 164                     page_count(page));
 165                return 0;
 166        }
 167vm_fault_error:
 168        return VM_FAULT_SIGBUS; /* Disallow mremap */
 169}
 170#else                           /* __OS_HAS_AGP */
 171static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 172{
 173        return VM_FAULT_SIGBUS;
 174}
 175#endif                          /* __OS_HAS_AGP */
 176
 177/**
 178 * \c nopage method for shared virtual memory.
 179 *
 180 * \param vma virtual memory area.
 181 * \param address access address.
 182 * \return pointer to the page structure.
 183 *
 184 * Get the mapping, find the real physical page to map, get the page, and
 185 * return it.
 186 */
 187static int drm_do_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 188{
 189        struct drm_local_map *map = vma->vm_private_data;
 190        unsigned long offset;
 191        unsigned long i;
 192        struct page *page;
 193
 194        if (!map)
 195                return VM_FAULT_SIGBUS; /* Nothing allocated */
 196
 197        offset = (unsigned long)vmf->virtual_address - vma->vm_start;
 198        i = (unsigned long)map->handle + offset;
 199        page = vmalloc_to_page((void *)i);
 200        if (!page)
 201                return VM_FAULT_SIGBUS;
 202        get_page(page);
 203        vmf->page = page;
 204
 205        DRM_DEBUG("shm_fault 0x%lx\n", offset);
 206        return 0;
 207}
 208
 209/**
 210 * \c close method for shared virtual memory.
 211 *
 212 * \param vma virtual memory area.
 213 *
 214 * Deletes map information if we are the last
 215 * person to close a mapping and it's not in the global maplist.
 216 */
 217static void drm_vm_shm_close(struct vm_area_struct *vma)
 218{
 219        struct drm_file *priv = vma->vm_file->private_data;
 220        struct drm_device *dev = priv->minor->dev;
 221        struct drm_vma_entry *pt, *temp;
 222        struct drm_local_map *map;
 223        struct drm_map_list *r_list;
 224        int found_maps = 0;
 225
 226        DRM_DEBUG("0x%08lx,0x%08lx\n",
 227                  vma->vm_start, vma->vm_end - vma->vm_start);
 228
 229        map = vma->vm_private_data;
 230
 231        mutex_lock(&dev->struct_mutex);
 232        list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
 233                if (pt->vma->vm_private_data == map)
 234                        found_maps++;
 235                if (pt->vma == vma) {
 236                        list_del(&pt->head);
 237                        kfree(pt);
 238                }
 239        }
 240
 241        /* We were the only map that was found */
 242        if (found_maps == 1 && map->flags & _DRM_REMOVABLE) {
 243                /* Check to see if we are in the maplist, if we are not, then
 244                 * we delete this mappings information.
 245                 */
 246                found_maps = 0;
 247                list_for_each_entry(r_list, &dev->maplist, head) {
 248                        if (r_list->map == map)
 249                                found_maps++;
 250                }
 251
 252                if (!found_maps) {
 253                        drm_dma_handle_t dmah;
 254
 255                        switch (map->type) {
 256                        case _DRM_REGISTERS:
 257                        case _DRM_FRAME_BUFFER:
 258                                arch_phys_wc_del(map->mtrr);
 259                                iounmap(map->handle);
 260                                break;
 261                        case _DRM_SHM:
 262                                vfree(map->handle);
 263                                break;
 264                        case _DRM_AGP:
 265                        case _DRM_SCATTER_GATHER:
 266                                break;
 267                        case _DRM_CONSISTENT:
 268                                dmah.vaddr = map->handle;
 269                                dmah.busaddr = map->offset;
 270                                dmah.size = map->size;
 271                                __drm_legacy_pci_free(dev, &dmah);
 272                                break;
 273                        }
 274                        kfree(map);
 275                }
 276        }
 277        mutex_unlock(&dev->struct_mutex);
 278}
 279
 280/**
 281 * \c fault method for DMA virtual memory.
 282 *
 283 * \param vma virtual memory area.
 284 * \param address access address.
 285 * \return pointer to the page structure.
 286 *
 287 * Determine the page number from the page offset and get it from drm_device_dma::pagelist.
 288 */
 289static int drm_do_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 290{
 291        struct drm_file *priv = vma->vm_file->private_data;
 292        struct drm_device *dev = priv->minor->dev;
 293        struct drm_device_dma *dma = dev->dma;
 294        unsigned long offset;
 295        unsigned long page_nr;
 296        struct page *page;
 297
 298        if (!dma)
 299                return VM_FAULT_SIGBUS; /* Error */
 300        if (!dma->pagelist)
 301                return VM_FAULT_SIGBUS; /* Nothing allocated */
 302
 303        offset = (unsigned long)vmf->virtual_address - vma->vm_start;   /* vm_[pg]off[set] should be 0 */
 304        page_nr = offset >> PAGE_SHIFT; /* page_nr could just be vmf->pgoff */
 305        page = virt_to_page((void *)dma->pagelist[page_nr]);
 306
 307        get_page(page);
 308        vmf->page = page;
 309
 310        DRM_DEBUG("dma_fault 0x%lx (page %lu)\n", offset, page_nr);
 311        return 0;
 312}
 313
 314/**
 315 * \c fault method for scatter-gather virtual memory.
 316 *
 317 * \param vma virtual memory area.
 318 * \param address access address.
 319 * \return pointer to the page structure.
 320 *
 321 * Determine the map offset from the page offset and get it from drm_sg_mem::pagelist.
 322 */
 323static int drm_do_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 324{
 325        struct drm_local_map *map = vma->vm_private_data;
 326        struct drm_file *priv = vma->vm_file->private_data;
 327        struct drm_device *dev = priv->minor->dev;
 328        struct drm_sg_mem *entry = dev->sg;
 329        unsigned long offset;
 330        unsigned long map_offset;
 331        unsigned long page_offset;
 332        struct page *page;
 333
 334        if (!entry)
 335                return VM_FAULT_SIGBUS; /* Error */
 336        if (!entry->pagelist)
 337                return VM_FAULT_SIGBUS; /* Nothing allocated */
 338
 339        offset = (unsigned long)vmf->virtual_address - vma->vm_start;
 340        map_offset = map->offset - (unsigned long)dev->sg->virtual;
 341        page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT);
 342        page = entry->pagelist[page_offset];
 343        get_page(page);
 344        vmf->page = page;
 345
 346        return 0;
 347}
 348
 349static int drm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 350{
 351        return drm_do_vm_fault(vma, vmf);
 352}
 353
 354static int drm_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 355{
 356        return drm_do_vm_shm_fault(vma, vmf);
 357}
 358
 359static int drm_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 360{
 361        return drm_do_vm_dma_fault(vma, vmf);
 362}
 363
 364static int drm_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 365{
 366        return drm_do_vm_sg_fault(vma, vmf);
 367}
 368
 369/** AGP virtual memory operations */
 370static const struct vm_operations_struct drm_vm_ops = {
 371        .fault = drm_vm_fault,
 372        .open = drm_vm_open,
 373        .close = drm_vm_close,
 374};
 375
 376/** Shared virtual memory operations */
 377static const struct vm_operations_struct drm_vm_shm_ops = {
 378        .fault = drm_vm_shm_fault,
 379        .open = drm_vm_open,
 380        .close = drm_vm_shm_close,
 381};
 382
 383/** DMA virtual memory operations */
 384static const struct vm_operations_struct drm_vm_dma_ops = {
 385        .fault = drm_vm_dma_fault,
 386        .open = drm_vm_open,
 387        .close = drm_vm_close,
 388};
 389
 390/** Scatter-gather virtual memory operations */
 391static const struct vm_operations_struct drm_vm_sg_ops = {
 392        .fault = drm_vm_sg_fault,
 393        .open = drm_vm_open,
 394        .close = drm_vm_close,
 395};
 396
 397/**
 398 * \c open method for shared virtual memory.
 399 *
 400 * \param vma virtual memory area.
 401 *
 402 * Create a new drm_vma_entry structure as the \p vma private data entry and
 403 * add it to drm_device::vmalist.
 404 */
 405void drm_vm_open_locked(struct drm_device *dev,
 406                struct vm_area_struct *vma)
 407{
 408        struct drm_vma_entry *vma_entry;
 409
 410        DRM_DEBUG("0x%08lx,0x%08lx\n",
 411                  vma->vm_start, vma->vm_end - vma->vm_start);
 412
 413        vma_entry = kmalloc(sizeof(*vma_entry), GFP_KERNEL);
 414        if (vma_entry) {
 415                vma_entry->vma = vma;
 416                vma_entry->pid = current->pid;
 417                list_add(&vma_entry->head, &dev->vmalist);
 418        }
 419}
 420
 421static void drm_vm_open(struct vm_area_struct *vma)
 422{
 423        struct drm_file *priv = vma->vm_file->private_data;
 424        struct drm_device *dev = priv->minor->dev;
 425
 426        mutex_lock(&dev->struct_mutex);
 427        drm_vm_open_locked(dev, vma);
 428        mutex_unlock(&dev->struct_mutex);
 429}
 430
 431void drm_vm_close_locked(struct drm_device *dev,
 432                struct vm_area_struct *vma)
 433{
 434        struct drm_vma_entry *pt, *temp;
 435
 436        DRM_DEBUG("0x%08lx,0x%08lx\n",
 437                  vma->vm_start, vma->vm_end - vma->vm_start);
 438
 439        list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
 440                if (pt->vma == vma) {
 441                        list_del(&pt->head);
 442                        kfree(pt);
 443                        break;
 444                }
 445        }
 446}
 447
 448/**
 449 * \c close method for all virtual memory types.
 450 *
 451 * \param vma virtual memory area.
 452 *
 453 * Search the \p vma private data entry in drm_device::vmalist, unlink it, and
 454 * free it.
 455 */
 456static void drm_vm_close(struct vm_area_struct *vma)
 457{
 458        struct drm_file *priv = vma->vm_file->private_data;
 459        struct drm_device *dev = priv->minor->dev;
 460
 461        mutex_lock(&dev->struct_mutex);
 462        drm_vm_close_locked(dev, vma);
 463        mutex_unlock(&dev->struct_mutex);
 464}
 465
 466/**
 467 * mmap DMA memory.
 468 *
 469 * \param file_priv DRM file private.
 470 * \param vma virtual memory area.
 471 * \return zero on success or a negative number on failure.
 472 *
 473 * Sets the virtual memory area operations structure to vm_dma_ops, the file
 474 * pointer, and calls vm_open().
 475 */
 476static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
 477{
 478        struct drm_file *priv = filp->private_data;
 479        struct drm_device *dev;
 480        struct drm_device_dma *dma;
 481        unsigned long length = vma->vm_end - vma->vm_start;
 482
 483        dev = priv->minor->dev;
 484        dma = dev->dma;
 485        DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
 486                  vma->vm_start, vma->vm_end, vma->vm_pgoff);
 487
 488        /* Length must match exact page count */
 489        if (!dma || (length >> PAGE_SHIFT) != dma->page_count) {
 490                return -EINVAL;
 491        }
 492
 493        if (!capable(CAP_SYS_ADMIN) &&
 494            (dma->flags & _DRM_DMA_USE_PCI_RO)) {
 495                vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
 496#if defined(__i386__) || defined(__x86_64__)
 497                pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
 498#else
 499                /* Ye gads this is ugly.  With more thought
 500                   we could move this up higher and use
 501                   `protection_map' instead.  */
 502                vma->vm_page_prot =
 503                    __pgprot(pte_val
 504                             (pte_wrprotect
 505                              (__pte(pgprot_val(vma->vm_page_prot)))));
 506#endif
 507        }
 508
 509        vma->vm_ops = &drm_vm_dma_ops;
 510
 511        vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
 512
 513        drm_vm_open_locked(dev, vma);
 514        return 0;
 515}
 516
 517static resource_size_t drm_core_get_reg_ofs(struct drm_device *dev)
 518{
 519#ifdef __alpha__
 520        return dev->hose->dense_mem_base;
 521#else
 522        return 0;
 523#endif
 524}
 525
 526/**
 527 * mmap DMA memory.
 528 *
 529 * \param file_priv DRM file private.
 530 * \param vma virtual memory area.
 531 * \return zero on success or a negative number on failure.
 532 *
 533 * If the virtual memory area has no offset associated with it then it's a DMA
 534 * area, so calls mmap_dma(). Otherwise searches the map in drm_device::maplist,
 535 * checks that the restricted flag is not set, sets the virtual memory operations
 536 * according to the mapping type and remaps the pages. Finally sets the file
 537 * pointer and calls vm_open().
 538 */
 539static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
 540{
 541        struct drm_file *priv = filp->private_data;
 542        struct drm_device *dev = priv->minor->dev;
 543        struct drm_local_map *map = NULL;
 544        resource_size_t offset = 0;
 545        struct drm_hash_item *hash;
 546
 547        DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
 548                  vma->vm_start, vma->vm_end, vma->vm_pgoff);
 549
 550        if (!priv->authenticated)
 551                return -EACCES;
 552
 553        /* We check for "dma". On Apple's UniNorth, it's valid to have
 554         * the AGP mapped at physical address 0
 555         * --BenH.
 556         */
 557        if (!vma->vm_pgoff
 558#if __OS_HAS_AGP
 559            && (!dev->agp
 560                || dev->agp->agp_info.device->vendor != PCI_VENDOR_ID_APPLE)
 561#endif
 562            )
 563                return drm_mmap_dma(filp, vma);
 564
 565        if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash)) {
 566                DRM_ERROR("Could not find map\n");
 567                return -EINVAL;
 568        }
 569
 570        map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
 571        if (!map || ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN)))
 572                return -EPERM;
 573
 574        /* Check for valid size. */
 575        if (map->size < vma->vm_end - vma->vm_start)
 576                return -EINVAL;
 577
 578        if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) {
 579                vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
 580#if defined(__i386__) || defined(__x86_64__)
 581                pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
 582#else
 583                /* Ye gads this is ugly.  With more thought
 584                   we could move this up higher and use
 585                   `protection_map' instead.  */
 586                vma->vm_page_prot =
 587                    __pgprot(pte_val
 588                             (pte_wrprotect
 589                              (__pte(pgprot_val(vma->vm_page_prot)))));
 590#endif
 591        }
 592
 593        switch (map->type) {
 594#if !defined(__arm__)
 595        case _DRM_AGP:
 596                if (dev->agp && dev->agp->cant_use_aperture) {
 597                        /*
 598                         * On some platforms we can't talk to bus dma address from the CPU, so for
 599                         * memory of type DRM_AGP, we'll deal with sorting out the real physical
 600                         * pages and mappings in fault()
 601                         */
 602#if defined(__powerpc__)
 603                        pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
 604#endif
 605                        vma->vm_ops = &drm_vm_ops;
 606                        break;
 607                }
 608                /* fall through to _DRM_FRAME_BUFFER... */
 609#endif
 610        case _DRM_FRAME_BUFFER:
 611        case _DRM_REGISTERS:
 612                offset = drm_core_get_reg_ofs(dev);
 613                vma->vm_page_prot = drm_io_prot(map, vma);
 614                if (io_remap_pfn_range(vma, vma->vm_start,
 615                                       (map->offset + offset) >> PAGE_SHIFT,
 616                                       vma->vm_end - vma->vm_start,
 617                                       vma->vm_page_prot))
 618                        return -EAGAIN;
 619                DRM_DEBUG("   Type = %d; start = 0x%lx, end = 0x%lx,"
 620                          " offset = 0x%llx\n",
 621                          map->type,
 622                          vma->vm_start, vma->vm_end, (unsigned long long)(map->offset + offset));
 623
 624                vma->vm_ops = &drm_vm_ops;
 625                break;
 626        case _DRM_CONSISTENT:
 627                /* Consistent memory is really like shared memory. But
 628                 * it's allocated in a different way, so avoid fault */
 629                if (remap_pfn_range(vma, vma->vm_start,
 630                    page_to_pfn(virt_to_page(map->handle)),
 631                    vma->vm_end - vma->vm_start, vma->vm_page_prot))
 632                        return -EAGAIN;
 633                vma->vm_page_prot = drm_dma_prot(map->type, vma);
 634        /* fall through to _DRM_SHM */
 635        case _DRM_SHM:
 636                vma->vm_ops = &drm_vm_shm_ops;
 637                vma->vm_private_data = (void *)map;
 638                break;
 639        case _DRM_SCATTER_GATHER:
 640                vma->vm_ops = &drm_vm_sg_ops;
 641                vma->vm_private_data = (void *)map;
 642                vma->vm_page_prot = drm_dma_prot(map->type, vma);
 643                break;
 644        default:
 645                return -EINVAL; /* This should never happen. */
 646        }
 647        vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
 648
 649        drm_vm_open_locked(dev, vma);
 650        return 0;
 651}
 652
 653int drm_legacy_mmap(struct file *filp, struct vm_area_struct *vma)
 654{
 655        struct drm_file *priv = filp->private_data;
 656        struct drm_device *dev = priv->minor->dev;
 657        int ret;
 658
 659        if (drm_device_is_unplugged(dev))
 660                return -ENODEV;
 661
 662        mutex_lock(&dev->struct_mutex);
 663        ret = drm_mmap_locked(filp, vma);
 664        mutex_unlock(&dev->struct_mutex);
 665
 666        return ret;
 667}
 668EXPORT_SYMBOL(drm_legacy_mmap);
 669
 670void drm_legacy_vma_flush(struct drm_device *dev)
 671{
 672        struct drm_vma_entry *vma, *vma_temp;
 673
 674        /* Clear vma list (only needed for legacy drivers) */
 675        list_for_each_entry_safe(vma, vma_temp, &dev->vmalist, head) {
 676                list_del(&vma->head);
 677                kfree(vma);
 678        }
 679}
 680
 681int drm_vma_info(struct seq_file *m, void *data)
 682{
 683        struct drm_info_node *node = (struct drm_info_node *) m->private;
 684        struct drm_device *dev = node->minor->dev;
 685        struct drm_vma_entry *pt;
 686        struct vm_area_struct *vma;
 687        unsigned long vma_count = 0;
 688#if defined(__i386__)
 689        unsigned int pgprot;
 690#endif
 691
 692        mutex_lock(&dev->struct_mutex);
 693        list_for_each_entry(pt, &dev->vmalist, head)
 694                vma_count++;
 695
 696        seq_printf(m, "vma use count: %lu, high_memory = %pK, 0x%pK\n",
 697                   vma_count, high_memory,
 698                   (void *)(unsigned long)virt_to_phys(high_memory));
 699
 700        list_for_each_entry(pt, &dev->vmalist, head) {
 701                vma = pt->vma;
 702                if (!vma)
 703                        continue;
 704                seq_printf(m,
 705                           "\n%5d 0x%pK-0x%pK %c%c%c%c%c%c 0x%08lx000",
 706                           pt->pid,
 707                           (void *)vma->vm_start, (void *)vma->vm_end,
 708                           vma->vm_flags & VM_READ ? 'r' : '-',
 709                           vma->vm_flags & VM_WRITE ? 'w' : '-',
 710                           vma->vm_flags & VM_EXEC ? 'x' : '-',
 711                           vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
 712                           vma->vm_flags & VM_LOCKED ? 'l' : '-',
 713                           vma->vm_flags & VM_IO ? 'i' : '-',
 714                           vma->vm_pgoff);
 715
 716#if defined(__i386__)
 717                pgprot = pgprot_val(vma->vm_page_prot);
 718                seq_printf(m, " %c%c%c%c%c%c%c%c%c",
 719                           pgprot & _PAGE_PRESENT ? 'p' : '-',
 720                           pgprot & _PAGE_RW ? 'w' : 'r',
 721                           pgprot & _PAGE_USER ? 'u' : 's',
 722                           pgprot & _PAGE_PWT ? 't' : 'b',
 723                           pgprot & _PAGE_PCD ? 'u' : 'c',
 724                           pgprot & _PAGE_ACCESSED ? 'a' : '-',
 725                           pgprot & _PAGE_DIRTY ? 'd' : '-',
 726                           pgprot & _PAGE_PSE ? 'm' : 'k',
 727                           pgprot & _PAGE_GLOBAL ? 'g' : 'l');
 728#endif
 729                seq_printf(m, "\n");
 730        }
 731        mutex_unlock(&dev->struct_mutex);
 732        return 0;
 733}
 734