linux/drivers/gpu/drm/drm_bufs.c
<<
>>
Prefs
   1/*
   2 * Legacy: Generic DRM Buffer Management
   3 *
   4 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
   5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
   6 * All Rights Reserved.
   7 *
   8 * Author: Rickard E. (Rik) Faith <faith@valinux.com>
   9 * Author: Gareth Hughes <gareth@valinux.com>
  10 *
  11 * Permission is hereby granted, free of charge, to any person obtaining a
  12 * copy of this software and associated documentation files (the "Software"),
  13 * to deal in the Software without restriction, including without limitation
  14 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  15 * and/or sell copies of the Software, and to permit persons to whom the
  16 * Software is furnished to do so, subject to the following conditions:
  17 *
  18 * The above copyright notice and this permission notice (including the next
  19 * paragraph) shall be included in all copies or substantial portions of the
  20 * Software.
  21 *
  22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  23 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  24 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  25 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
  26 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  27 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  28 * OTHER DEALINGS IN THE SOFTWARE.
  29 */
  30
  31#include <linux/export.h>
  32#include <linux/log2.h>
  33#include <linux/mm.h>
  34#include <linux/mman.h>
  35#include <linux/nospec.h>
  36#include <linux/pci.h>
  37#include <linux/slab.h>
  38#include <linux/uaccess.h>
  39#include <linux/vmalloc.h>
  40
  41#include <asm/shmparam.h>
  42
  43#include <drm/drm_agpsupport.h>
  44#include <drm/drm_device.h>
  45#include <drm/drm_drv.h>
  46#include <drm/drm_file.h>
  47#include <drm/drm_print.h>
  48
  49#include "drm_legacy.h"
  50
  51
  52static struct drm_map_list *drm_find_matching_map(struct drm_device *dev,
  53                                                  struct drm_local_map *map)
  54{
  55        struct drm_map_list *entry;
  56
  57        list_for_each_entry(entry, &dev->maplist, head) {
  58                /*
  59                 * Because the kernel-userspace ABI is fixed at a 32-bit offset
  60                 * while PCI resources may live above that, we only compare the
  61                 * lower 32 bits of the map offset for maps of type
  62                 * _DRM_FRAMEBUFFER or _DRM_REGISTERS.
  63                 * It is assumed that if a driver have more than one resource
  64                 * of each type, the lower 32 bits are different.
  65                 */
  66                if (!entry->map ||
  67                    map->type != entry->map->type ||
  68                    entry->master != dev->master)
  69                        continue;
  70                switch (map->type) {
  71                case _DRM_SHM:
  72                        if (map->flags != _DRM_CONTAINS_LOCK)
  73                                break;
  74                        return entry;
  75                case _DRM_REGISTERS:
  76                case _DRM_FRAME_BUFFER:
  77                        if ((entry->map->offset & 0xffffffff) ==
  78                            (map->offset & 0xffffffff))
  79                                return entry;
  80                default: /* Make gcc happy */
  81                        ;
  82                }
  83                if (entry->map->offset == map->offset)
  84                        return entry;
  85        }
  86
  87        return NULL;
  88}
  89
  90static int drm_map_handle(struct drm_device *dev, struct drm_hash_item *hash,
  91                          unsigned long user_token, int hashed_handle, int shm)
  92{
  93        int use_hashed_handle, shift;
  94        unsigned long add;
  95
  96#if (BITS_PER_LONG == 64)
  97        use_hashed_handle = ((user_token & 0xFFFFFFFF00000000UL) || hashed_handle);
  98#elif (BITS_PER_LONG == 32)
  99        use_hashed_handle = hashed_handle;
 100#else
 101#error Unsupported long size. Neither 64 nor 32 bits.
 102#endif
 103
 104        if (!use_hashed_handle) {
 105                int ret;
 106
 107                hash->key = user_token >> PAGE_SHIFT;
 108                ret = drm_ht_insert_item(&dev->map_hash, hash);
 109                if (ret != -EINVAL)
 110                        return ret;
 111        }
 112
 113        shift = 0;
 114        add = DRM_MAP_HASH_OFFSET >> PAGE_SHIFT;
 115        if (shm && (SHMLBA > PAGE_SIZE)) {
 116                int bits = ilog2(SHMLBA >> PAGE_SHIFT) + 1;
 117
 118                /* For shared memory, we have to preserve the SHMLBA
 119                 * bits of the eventual vma->vm_pgoff value during
 120                 * mmap().  Otherwise we run into cache aliasing problems
 121                 * on some platforms.  On these platforms, the pgoff of
 122                 * a mmap() request is used to pick a suitable virtual
 123                 * address for the mmap() region such that it will not
 124                 * cause cache aliasing problems.
 125                 *
 126                 * Therefore, make sure the SHMLBA relevant bits of the
 127                 * hash value we use are equal to those in the original
 128                 * kernel virtual address.
 129                 */
 130                shift = bits;
 131                add |= ((user_token >> PAGE_SHIFT) & ((1UL << bits) - 1UL));
 132        }
 133
 134        return drm_ht_just_insert_please(&dev->map_hash, hash,
 135                                         user_token, 32 - PAGE_SHIFT - 3,
 136                                         shift, add);
 137}
 138
 139/*
 140 * Core function to create a range of memory available for mapping by a
 141 * non-root process.
 142 *
 143 * Adjusts the memory offset to its absolute value according to the mapping
 144 * type.  Adds the map to the map list drm_device::maplist. Adds MTRR's where
 145 * applicable and if supported by the kernel.
 146 */
 147static int drm_addmap_core(struct drm_device *dev, resource_size_t offset,
 148                           unsigned int size, enum drm_map_type type,
 149                           enum drm_map_flags flags,
 150                           struct drm_map_list **maplist)
 151{
 152        struct drm_local_map *map;
 153        struct drm_map_list *list;
 154        unsigned long user_token;
 155        int ret;
 156
 157        map = kmalloc(sizeof(*map), GFP_KERNEL);
 158        if (!map)
 159                return -ENOMEM;
 160
 161        map->offset = offset;
 162        map->size = size;
 163        map->flags = flags;
 164        map->type = type;
 165
 166        /* Only allow shared memory to be removable since we only keep enough
 167         * book keeping information about shared memory to allow for removal
 168         * when processes fork.
 169         */
 170        if ((map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM) {
 171                kfree(map);
 172                return -EINVAL;
 173        }
 174        DRM_DEBUG("offset = 0x%08llx, size = 0x%08lx, type = %d\n",
 175                  (unsigned long long)map->offset, map->size, map->type);
 176
 177        /* page-align _DRM_SHM maps. They are allocated here so there is no security
 178         * hole created by that and it works around various broken drivers that use
 179         * a non-aligned quantity to map the SAREA. --BenH
 180         */
 181        if (map->type == _DRM_SHM)
 182                map->size = PAGE_ALIGN(map->size);
 183
 184        if ((map->offset & (~(resource_size_t)PAGE_MASK)) || (map->size & (~PAGE_MASK))) {
 185                kfree(map);
 186                return -EINVAL;
 187        }
 188        map->mtrr = -1;
 189        map->handle = NULL;
 190
 191        switch (map->type) {
 192        case _DRM_REGISTERS:
 193        case _DRM_FRAME_BUFFER:
 194#if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__) && !defined(__arm__)
 195                if (map->offset + (map->size-1) < map->offset ||
 196                    map->offset < virt_to_phys(high_memory)) {
 197                        kfree(map);
 198                        return -EINVAL;
 199                }
 200#endif
 201                /* Some drivers preinitialize some maps, without the X Server
 202                 * needing to be aware of it.  Therefore, we just return success
 203                 * when the server tries to create a duplicate map.
 204                 */
 205                list = drm_find_matching_map(dev, map);
 206                if (list != NULL) {
 207                        if (list->map->size != map->size) {
 208                                DRM_DEBUG("Matching maps of type %d with "
 209                                          "mismatched sizes, (%ld vs %ld)\n",
 210                                          map->type, map->size,
 211                                          list->map->size);
 212                                list->map->size = map->size;
 213                        }
 214
 215                        kfree(map);
 216                        *maplist = list;
 217                        return 0;
 218                }
 219
 220                if (map->type == _DRM_FRAME_BUFFER ||
 221                    (map->flags & _DRM_WRITE_COMBINING)) {
 222                        map->mtrr =
 223                                arch_phys_wc_add(map->offset, map->size);
 224                }
 225                if (map->type == _DRM_REGISTERS) {
 226                        if (map->flags & _DRM_WRITE_COMBINING)
 227                                map->handle = ioremap_wc(map->offset,
 228                                                         map->size);
 229                        else
 230                                map->handle = ioremap(map->offset, map->size);
 231                        if (!map->handle) {
 232                                kfree(map);
 233                                return -ENOMEM;
 234                        }
 235                }
 236
 237                break;
 238        case _DRM_SHM:
 239                list = drm_find_matching_map(dev, map);
 240                if (list != NULL) {
 241                        if (list->map->size != map->size) {
 242                                DRM_DEBUG("Matching maps of type %d with "
 243                                          "mismatched sizes, (%ld vs %ld)\n",
 244                                          map->type, map->size, list->map->size);
 245                                list->map->size = map->size;
 246                        }
 247
 248                        kfree(map);
 249                        *maplist = list;
 250                        return 0;
 251                }
 252                map->handle = vmalloc_user(map->size);
 253                DRM_DEBUG("%lu %d %p\n",
 254                          map->size, order_base_2(map->size), map->handle);
 255                if (!map->handle) {
 256                        kfree(map);
 257                        return -ENOMEM;
 258                }
 259                map->offset = (unsigned long)map->handle;
 260                if (map->flags & _DRM_CONTAINS_LOCK) {
 261                        /* Prevent a 2nd X Server from creating a 2nd lock */
 262                        if (dev->master->lock.hw_lock != NULL) {
 263                                vfree(map->handle);
 264                                kfree(map);
 265                                return -EBUSY;
 266                        }
 267                        dev->sigdata.lock = dev->master->lock.hw_lock = map->handle;    /* Pointer to lock */
 268                }
 269                break;
 270        case _DRM_AGP: {
 271                struct drm_agp_mem *entry;
 272                int valid = 0;
 273
 274                if (!dev->agp) {
 275                        kfree(map);
 276                        return -EINVAL;
 277                }
 278#ifdef __alpha__
 279                map->offset += dev->hose->mem_space->start;
 280#endif
 281                /* In some cases (i810 driver), user space may have already
 282                 * added the AGP base itself, because dev->agp->base previously
 283                 * only got set during AGP enable.  So, only add the base
 284                 * address if the map's offset isn't already within the
 285                 * aperture.
 286                 */
 287                if (map->offset < dev->agp->base ||
 288                    map->offset > dev->agp->base +
 289                    dev->agp->agp_info.aper_size * 1024 * 1024 - 1) {
 290                        map->offset += dev->agp->base;
 291                }
 292                map->mtrr = dev->agp->agp_mtrr; /* for getmap */
 293
 294                /* This assumes the DRM is in total control of AGP space.
 295                 * It's not always the case as AGP can be in the control
 296                 * of user space (i.e. i810 driver). So this loop will get
 297                 * skipped and we double check that dev->agp->memory is
 298                 * actually set as well as being invalid before EPERM'ing
 299                 */
 300                list_for_each_entry(entry, &dev->agp->memory, head) {
 301                        if ((map->offset >= entry->bound) &&
 302                            (map->offset + map->size <= entry->bound + entry->pages * PAGE_SIZE)) {
 303                                valid = 1;
 304                                break;
 305                        }
 306                }
 307                if (!list_empty(&dev->agp->memory) && !valid) {
 308                        kfree(map);
 309                        return -EPERM;
 310                }
 311                DRM_DEBUG("AGP offset = 0x%08llx, size = 0x%08lx\n",
 312                          (unsigned long long)map->offset, map->size);
 313
 314                break;
 315        }
 316        case _DRM_SCATTER_GATHER:
 317                if (!dev->sg) {
 318                        kfree(map);
 319                        return -EINVAL;
 320                }
 321                map->offset += (unsigned long)dev->sg->virtual;
 322                break;
 323        case _DRM_CONSISTENT:
 324                /* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G,
 325                 * As we're limiting the address to 2^32-1 (or less),
 326                 * casting it down to 32 bits is no problem, but we
 327                 * need to point to a 64bit variable first. */
 328                map->handle = dma_alloc_coherent(&dev->pdev->dev,
 329                                                 map->size,
 330                                                 &map->offset,
 331                                                 GFP_KERNEL);
 332                if (!map->handle) {
 333                        kfree(map);
 334                        return -ENOMEM;
 335                }
 336                break;
 337        default:
 338                kfree(map);
 339                return -EINVAL;
 340        }
 341
 342        list = kzalloc(sizeof(*list), GFP_KERNEL);
 343        if (!list) {
 344                if (map->type == _DRM_REGISTERS)
 345                        iounmap(map->handle);
 346                kfree(map);
 347                return -EINVAL;
 348        }
 349        list->map = map;
 350
 351        mutex_lock(&dev->struct_mutex);
 352        list_add(&list->head, &dev->maplist);
 353
 354        /* Assign a 32-bit handle */
 355        /* We do it here so that dev->struct_mutex protects the increment */
 356        user_token = (map->type == _DRM_SHM) ? (unsigned long)map->handle :
 357                map->offset;
 358        ret = drm_map_handle(dev, &list->hash, user_token, 0,
 359                             (map->type == _DRM_SHM));
 360        if (ret) {
 361                if (map->type == _DRM_REGISTERS)
 362                        iounmap(map->handle);
 363                kfree(map);
 364                kfree(list);
 365                mutex_unlock(&dev->struct_mutex);
 366                return ret;
 367        }
 368
 369        list->user_token = list->hash.key << PAGE_SHIFT;
 370        mutex_unlock(&dev->struct_mutex);
 371
 372        if (!(map->flags & _DRM_DRIVER))
 373                list->master = dev->master;
 374        *maplist = list;
 375        return 0;
 376}
 377
 378int drm_legacy_addmap(struct drm_device *dev, resource_size_t offset,
 379                      unsigned int size, enum drm_map_type type,
 380                      enum drm_map_flags flags, struct drm_local_map **map_ptr)
 381{
 382        struct drm_map_list *list;
 383        int rc;
 384
 385        rc = drm_addmap_core(dev, offset, size, type, flags, &list);
 386        if (!rc)
 387                *map_ptr = list->map;
 388        return rc;
 389}
 390EXPORT_SYMBOL(drm_legacy_addmap);
 391
 392struct drm_local_map *drm_legacy_findmap(struct drm_device *dev,
 393                                         unsigned int token)
 394{
 395        struct drm_map_list *_entry;
 396
 397        list_for_each_entry(_entry, &dev->maplist, head)
 398                if (_entry->user_token == token)
 399                        return _entry->map;
 400        return NULL;
 401}
 402EXPORT_SYMBOL(drm_legacy_findmap);
 403
 404/*
 405 * Ioctl to specify a range of memory that is available for mapping by a
 406 * non-root process.
 407 *
 408 * \param inode device inode.
 409 * \param file_priv DRM file private.
 410 * \param cmd command.
 411 * \param arg pointer to a drm_map structure.
 412 * \return zero on success or a negative value on error.
 413 *
 414 */
 415int drm_legacy_addmap_ioctl(struct drm_device *dev, void *data,
 416                            struct drm_file *file_priv)
 417{
 418        struct drm_map *map = data;
 419        struct drm_map_list *maplist;
 420        int err;
 421
 422        if (!(capable(CAP_SYS_ADMIN) || map->type == _DRM_AGP || map->type == _DRM_SHM))
 423                return -EPERM;
 424
 425        if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
 426            !drm_core_check_feature(dev, DRIVER_LEGACY))
 427                return -EOPNOTSUPP;
 428
 429        err = drm_addmap_core(dev, map->offset, map->size, map->type,
 430                              map->flags, &maplist);
 431
 432        if (err)
 433                return err;
 434
 435        /* avoid a warning on 64-bit, this casting isn't very nice, but the API is set so too late */
 436        map->handle = (void *)(unsigned long)maplist->user_token;
 437
 438        /*
 439         * It appears that there are no users of this value whatsoever --
 440         * drmAddMap just discards it.  Let's not encourage its use.
 441         * (Keeping drm_addmap_core's returned mtrr value would be wrong --
 442         *  it's not a real mtrr index anymore.)
 443         */
 444        map->mtrr = -1;
 445
 446        return 0;
 447}
 448
 449/*
 450 * Get a mapping information.
 451 *
 452 * \param inode device inode.
 453 * \param file_priv DRM file private.
 454 * \param cmd command.
 455 * \param arg user argument, pointing to a drm_map structure.
 456 *
 457 * \return zero on success or a negative number on failure.
 458 *
 459 * Searches for the mapping with the specified offset and copies its information
 460 * into userspace
 461 */
 462int drm_legacy_getmap_ioctl(struct drm_device *dev, void *data,
 463                            struct drm_file *file_priv)
 464{
 465        struct drm_map *map = data;
 466        struct drm_map_list *r_list = NULL;
 467        struct list_head *list;
 468        int idx;
 469        int i;
 470
 471        if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
 472            !drm_core_check_feature(dev, DRIVER_LEGACY))
 473                return -EOPNOTSUPP;
 474
 475        idx = map->offset;
 476        if (idx < 0)
 477                return -EINVAL;
 478
 479        i = 0;
 480        mutex_lock(&dev->struct_mutex);
 481        list_for_each(list, &dev->maplist) {
 482                if (i == idx) {
 483                        r_list = list_entry(list, struct drm_map_list, head);
 484                        break;
 485                }
 486                i++;
 487        }
 488        if (!r_list || !r_list->map) {
 489                mutex_unlock(&dev->struct_mutex);
 490                return -EINVAL;
 491        }
 492
 493        map->offset = r_list->map->offset;
 494        map->size = r_list->map->size;
 495        map->type = r_list->map->type;
 496        map->flags = r_list->map->flags;
 497        map->handle = (void *)(unsigned long) r_list->user_token;
 498        map->mtrr = arch_phys_wc_index(r_list->map->mtrr);
 499
 500        mutex_unlock(&dev->struct_mutex);
 501
 502        return 0;
 503}
 504
 505/*
 506 * Remove a map private from list and deallocate resources if the mapping
 507 * isn't in use.
 508 *
 509 * Searches the map on drm_device::maplist, removes it from the list, see if
 510 * it's being used, and free any associated resource (such as MTRR's) if it's not
 511 * being on use.
 512 *
 513 * \sa drm_legacy_addmap
 514 */
 515int drm_legacy_rmmap_locked(struct drm_device *dev, struct drm_local_map *map)
 516{
 517        struct drm_map_list *r_list = NULL, *list_t;
 518        int found = 0;
 519        struct drm_master *master;
 520
 521        /* Find the list entry for the map and remove it */
 522        list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) {
 523                if (r_list->map == map) {
 524                        master = r_list->master;
 525                        list_del(&r_list->head);
 526                        drm_ht_remove_key(&dev->map_hash,
 527                                          r_list->user_token >> PAGE_SHIFT);
 528                        kfree(r_list);
 529                        found = 1;
 530                        break;
 531                }
 532        }
 533
 534        if (!found)
 535                return -EINVAL;
 536
 537        switch (map->type) {
 538        case _DRM_REGISTERS:
 539                iounmap(map->handle);
 540                fallthrough;
 541        case _DRM_FRAME_BUFFER:
 542                arch_phys_wc_del(map->mtrr);
 543                break;
 544        case _DRM_SHM:
 545                vfree(map->handle);
 546                if (master) {
 547                        if (dev->sigdata.lock == master->lock.hw_lock)
 548                                dev->sigdata.lock = NULL;
 549                        master->lock.hw_lock = NULL;   /* SHM removed */
 550                        master->lock.file_priv = NULL;
 551                        wake_up_interruptible_all(&master->lock.lock_queue);
 552                }
 553                break;
 554        case _DRM_AGP:
 555        case _DRM_SCATTER_GATHER:
 556                break;
 557        case _DRM_CONSISTENT:
 558                dma_free_coherent(&dev->pdev->dev,
 559                                  map->size,
 560                                  map->handle,
 561                                  map->offset);
 562                break;
 563        }
 564        kfree(map);
 565
 566        return 0;
 567}
 568EXPORT_SYMBOL(drm_legacy_rmmap_locked);
 569
 570void drm_legacy_rmmap(struct drm_device *dev, struct drm_local_map *map)
 571{
 572        if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
 573            !drm_core_check_feature(dev, DRIVER_LEGACY))
 574                return;
 575
 576        mutex_lock(&dev->struct_mutex);
 577        drm_legacy_rmmap_locked(dev, map);
 578        mutex_unlock(&dev->struct_mutex);
 579}
 580EXPORT_SYMBOL(drm_legacy_rmmap);
 581
 582void drm_legacy_master_rmmaps(struct drm_device *dev, struct drm_master *master)
 583{
 584        struct drm_map_list *r_list, *list_temp;
 585
 586        if (!drm_core_check_feature(dev, DRIVER_LEGACY))
 587                return;
 588
 589        mutex_lock(&dev->struct_mutex);
 590        list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) {
 591                if (r_list->master == master) {
 592                        drm_legacy_rmmap_locked(dev, r_list->map);
 593                        r_list = NULL;
 594                }
 595        }
 596        mutex_unlock(&dev->struct_mutex);
 597}
 598
 599void drm_legacy_rmmaps(struct drm_device *dev)
 600{
 601        struct drm_map_list *r_list, *list_temp;
 602
 603        list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head)
 604                drm_legacy_rmmap(dev, r_list->map);
 605}
 606
 607/* The rmmap ioctl appears to be unnecessary.  All mappings are torn down on
 608 * the last close of the device, and this is necessary for cleanup when things
 609 * exit uncleanly.  Therefore, having userland manually remove mappings seems
 610 * like a pointless exercise since they're going away anyway.
 611 *
 612 * One use case might be after addmap is allowed for normal users for SHM and
 613 * gets used by drivers that the server doesn't need to care about.  This seems
 614 * unlikely.
 615 *
 616 * \param inode device inode.
 617 * \param file_priv DRM file private.
 618 * \param cmd command.
 619 * \param arg pointer to a struct drm_map structure.
 620 * \return zero on success or a negative value on error.
 621 */
 622int drm_legacy_rmmap_ioctl(struct drm_device *dev, void *data,
 623                           struct drm_file *file_priv)
 624{
 625        struct drm_map *request = data;
 626        struct drm_local_map *map = NULL;
 627        struct drm_map_list *r_list;
 628        int ret;
 629
 630        if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
 631            !drm_core_check_feature(dev, DRIVER_LEGACY))
 632                return -EOPNOTSUPP;
 633
 634        mutex_lock(&dev->struct_mutex);
 635        list_for_each_entry(r_list, &dev->maplist, head) {
 636                if (r_list->map &&
 637                    r_list->user_token == (unsigned long)request->handle &&
 638                    r_list->map->flags & _DRM_REMOVABLE) {
 639                        map = r_list->map;
 640                        break;
 641                }
 642        }
 643
 644        /* List has wrapped around to the head pointer, or it's empty we didn't
 645         * find anything.
 646         */
 647        if (list_empty(&dev->maplist) || !map) {
 648                mutex_unlock(&dev->struct_mutex);
 649                return -EINVAL;
 650        }
 651
 652        /* Register and framebuffer maps are permanent */
 653        if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) {
 654                mutex_unlock(&dev->struct_mutex);
 655                return 0;
 656        }
 657
 658        ret = drm_legacy_rmmap_locked(dev, map);
 659
 660        mutex_unlock(&dev->struct_mutex);
 661
 662        return ret;
 663}
 664
 665/*
 666 * Cleanup after an error on one of the addbufs() functions.
 667 *
 668 * \param dev DRM device.
 669 * \param entry buffer entry where the error occurred.
 670 *
 671 * Frees any pages and buffers associated with the given entry.
 672 */
 673static void drm_cleanup_buf_error(struct drm_device *dev,
 674                                  struct drm_buf_entry *entry)
 675{
 676        int i;
 677
 678        if (entry->seg_count) {
 679                for (i = 0; i < entry->seg_count; i++) {
 680                        if (entry->seglist[i]) {
 681                                drm_pci_free(dev, entry->seglist[i]);
 682                        }
 683                }
 684                kfree(entry->seglist);
 685
 686                entry->seg_count = 0;
 687        }
 688
 689        if (entry->buf_count) {
 690                for (i = 0; i < entry->buf_count; i++) {
 691                        kfree(entry->buflist[i].dev_private);
 692                }
 693                kfree(entry->buflist);
 694
 695                entry->buf_count = 0;
 696        }
 697}
 698
 699#if IS_ENABLED(CONFIG_AGP)
 700/*
 701 * Add AGP buffers for DMA transfers.
 702 *
 703 * \param dev struct drm_device to which the buffers are to be added.
 704 * \param request pointer to a struct drm_buf_desc describing the request.
 705 * \return zero on success or a negative number on failure.
 706 *
 707 * After some sanity checks creates a drm_buf structure for each buffer and
 708 * reallocates the buffer list of the same size order to accommodate the new
 709 * buffers.
 710 */
 711int drm_legacy_addbufs_agp(struct drm_device *dev,
 712                           struct drm_buf_desc *request)
 713{
 714        struct drm_device_dma *dma = dev->dma;
 715        struct drm_buf_entry *entry;
 716        struct drm_agp_mem *agp_entry;
 717        struct drm_buf *buf;
 718        unsigned long offset;
 719        unsigned long agp_offset;
 720        int count;
 721        int order;
 722        int size;
 723        int alignment;
 724        int page_order;
 725        int total;
 726        int byte_count;
 727        int i, valid;
 728        struct drm_buf **temp_buflist;
 729
 730        if (!dma)
 731                return -EINVAL;
 732
 733        count = request->count;
 734        order = order_base_2(request->size);
 735        size = 1 << order;
 736
 737        alignment = (request->flags & _DRM_PAGE_ALIGN)
 738            ? PAGE_ALIGN(size) : size;
 739        page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
 740        total = PAGE_SIZE << page_order;
 741
 742        byte_count = 0;
 743        agp_offset = dev->agp->base + request->agp_start;
 744
 745        DRM_DEBUG("count:      %d\n", count);
 746        DRM_DEBUG("order:      %d\n", order);
 747        DRM_DEBUG("size:       %d\n", size);
 748        DRM_DEBUG("agp_offset: %lx\n", agp_offset);
 749        DRM_DEBUG("alignment:  %d\n", alignment);
 750        DRM_DEBUG("page_order: %d\n", page_order);
 751        DRM_DEBUG("total:      %d\n", total);
 752
 753        if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
 754                return -EINVAL;
 755
 756        /* Make sure buffers are located in AGP memory that we own */
 757        valid = 0;
 758        list_for_each_entry(agp_entry, &dev->agp->memory, head) {
 759                if ((agp_offset >= agp_entry->bound) &&
 760                    (agp_offset + total * count <= agp_entry->bound + agp_entry->pages * PAGE_SIZE)) {
 761                        valid = 1;
 762                        break;
 763                }
 764        }
 765        if (!list_empty(&dev->agp->memory) && !valid) {
 766                DRM_DEBUG("zone invalid\n");
 767                return -EINVAL;
 768        }
 769        spin_lock(&dev->buf_lock);
 770        if (dev->buf_use) {
 771                spin_unlock(&dev->buf_lock);
 772                return -EBUSY;
 773        }
 774        atomic_inc(&dev->buf_alloc);
 775        spin_unlock(&dev->buf_lock);
 776
 777        mutex_lock(&dev->struct_mutex);
 778        entry = &dma->bufs[order];
 779        if (entry->buf_count) {
 780                mutex_unlock(&dev->struct_mutex);
 781                atomic_dec(&dev->buf_alloc);
 782                return -ENOMEM; /* May only call once for each order */
 783        }
 784
 785        if (count < 0 || count > 4096) {
 786                mutex_unlock(&dev->struct_mutex);
 787                atomic_dec(&dev->buf_alloc);
 788                return -EINVAL;
 789        }
 790
 791        entry->buflist = kcalloc(count, sizeof(*entry->buflist), GFP_KERNEL);
 792        if (!entry->buflist) {
 793                mutex_unlock(&dev->struct_mutex);
 794                atomic_dec(&dev->buf_alloc);
 795                return -ENOMEM;
 796        }
 797
 798        entry->buf_size = size;
 799        entry->page_order = page_order;
 800
 801        offset = 0;
 802
 803        while (entry->buf_count < count) {
 804                buf = &entry->buflist[entry->buf_count];
 805                buf->idx = dma->buf_count + entry->buf_count;
 806                buf->total = alignment;
 807                buf->order = order;
 808                buf->used = 0;
 809
 810                buf->offset = (dma->byte_count + offset);
 811                buf->bus_address = agp_offset + offset;
 812                buf->address = (void *)(agp_offset + offset);
 813                buf->next = NULL;
 814                buf->waiting = 0;
 815                buf->pending = 0;
 816                buf->file_priv = NULL;
 817
 818                buf->dev_priv_size = dev->driver->dev_priv_size;
 819                buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL);
 820                if (!buf->dev_private) {
 821                        /* Set count correctly so we free the proper amount. */
 822                        entry->buf_count = count;
 823                        drm_cleanup_buf_error(dev, entry);
 824                        mutex_unlock(&dev->struct_mutex);
 825                        atomic_dec(&dev->buf_alloc);
 826                        return -ENOMEM;
 827                }
 828
 829                DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
 830
 831                offset += alignment;
 832                entry->buf_count++;
 833                byte_count += PAGE_SIZE << page_order;
 834        }
 835
 836        DRM_DEBUG("byte_count: %d\n", byte_count);
 837
 838        temp_buflist = krealloc(dma->buflist,
 839                                (dma->buf_count + entry->buf_count) *
 840                                sizeof(*dma->buflist), GFP_KERNEL);
 841        if (!temp_buflist) {
 842                /* Free the entry because it isn't valid */
 843                drm_cleanup_buf_error(dev, entry);
 844                mutex_unlock(&dev->struct_mutex);
 845                atomic_dec(&dev->buf_alloc);
 846                return -ENOMEM;
 847        }
 848        dma->buflist = temp_buflist;
 849
 850        for (i = 0; i < entry->buf_count; i++) {
 851                dma->buflist[i + dma->buf_count] = &entry->buflist[i];
 852        }
 853
 854        dma->buf_count += entry->buf_count;
 855        dma->seg_count += entry->seg_count;
 856        dma->page_count += byte_count >> PAGE_SHIFT;
 857        dma->byte_count += byte_count;
 858
 859        DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
 860        DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
 861
 862        mutex_unlock(&dev->struct_mutex);
 863
 864        request->count = entry->buf_count;
 865        request->size = size;
 866
 867        dma->flags = _DRM_DMA_USE_AGP;
 868
 869        atomic_dec(&dev->buf_alloc);
 870        return 0;
 871}
 872EXPORT_SYMBOL(drm_legacy_addbufs_agp);
 873#endif /* CONFIG_AGP */
 874
 875int drm_legacy_addbufs_pci(struct drm_device *dev,
 876                           struct drm_buf_desc *request)
 877{
 878        struct drm_device_dma *dma = dev->dma;
 879        int count;
 880        int order;
 881        int size;
 882        int total;
 883        int page_order;
 884        struct drm_buf_entry *entry;
 885        drm_dma_handle_t *dmah;
 886        struct drm_buf *buf;
 887        int alignment;
 888        unsigned long offset;
 889        int i;
 890        int byte_count;
 891        int page_count;
 892        unsigned long *temp_pagelist;
 893        struct drm_buf **temp_buflist;
 894
 895        if (!drm_core_check_feature(dev, DRIVER_PCI_DMA))
 896                return -EOPNOTSUPP;
 897
 898        if (!dma)
 899                return -EINVAL;
 900
 901        if (!capable(CAP_SYS_ADMIN))
 902                return -EPERM;
 903
 904        count = request->count;
 905        order = order_base_2(request->size);
 906        size = 1 << order;
 907
 908        DRM_DEBUG("count=%d, size=%d (%d), order=%d\n",
 909                  request->count, request->size, size, order);
 910
 911        if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
 912                return -EINVAL;
 913
 914        alignment = (request->flags & _DRM_PAGE_ALIGN)
 915            ? PAGE_ALIGN(size) : size;
 916        page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
 917        total = PAGE_SIZE << page_order;
 918
 919        spin_lock(&dev->buf_lock);
 920        if (dev->buf_use) {
 921                spin_unlock(&dev->buf_lock);
 922                return -EBUSY;
 923        }
 924        atomic_inc(&dev->buf_alloc);
 925        spin_unlock(&dev->buf_lock);
 926
 927        mutex_lock(&dev->struct_mutex);
 928        entry = &dma->bufs[order];
 929        if (entry->buf_count) {
 930                mutex_unlock(&dev->struct_mutex);
 931                atomic_dec(&dev->buf_alloc);
 932                return -ENOMEM; /* May only call once for each order */
 933        }
 934
 935        if (count < 0 || count > 4096) {
 936                mutex_unlock(&dev->struct_mutex);
 937                atomic_dec(&dev->buf_alloc);
 938                return -EINVAL;
 939        }
 940
 941        entry->buflist = kcalloc(count, sizeof(*entry->buflist), GFP_KERNEL);
 942        if (!entry->buflist) {
 943                mutex_unlock(&dev->struct_mutex);
 944                atomic_dec(&dev->buf_alloc);
 945                return -ENOMEM;
 946        }
 947
 948        entry->seglist = kcalloc(count, sizeof(*entry->seglist), GFP_KERNEL);
 949        if (!entry->seglist) {
 950                kfree(entry->buflist);
 951                mutex_unlock(&dev->struct_mutex);
 952                atomic_dec(&dev->buf_alloc);
 953                return -ENOMEM;
 954        }
 955
 956        /* Keep the original pagelist until we know all the allocations
 957         * have succeeded
 958         */
 959        temp_pagelist = kmalloc_array(dma->page_count + (count << page_order),
 960                                      sizeof(*dma->pagelist),
 961                                      GFP_KERNEL);
 962        if (!temp_pagelist) {
 963                kfree(entry->buflist);
 964                kfree(entry->seglist);
 965                mutex_unlock(&dev->struct_mutex);
 966                atomic_dec(&dev->buf_alloc);
 967                return -ENOMEM;
 968        }
 969        memcpy(temp_pagelist,
 970               dma->pagelist, dma->page_count * sizeof(*dma->pagelist));
 971        DRM_DEBUG("pagelist: %d entries\n",
 972                  dma->page_count + (count << page_order));
 973
 974        entry->buf_size = size;
 975        entry->page_order = page_order;
 976        byte_count = 0;
 977        page_count = 0;
 978
 979        while (entry->buf_count < count) {
 980
 981                dmah = drm_pci_alloc(dev, PAGE_SIZE << page_order, 0x1000);
 982
 983                if (!dmah) {
 984                        /* Set count correctly so we free the proper amount. */
 985                        entry->buf_count = count;
 986                        entry->seg_count = count;
 987                        drm_cleanup_buf_error(dev, entry);
 988                        kfree(temp_pagelist);
 989                        mutex_unlock(&dev->struct_mutex);
 990                        atomic_dec(&dev->buf_alloc);
 991                        return -ENOMEM;
 992                }
 993                entry->seglist[entry->seg_count++] = dmah;
 994                for (i = 0; i < (1 << page_order); i++) {
 995                        DRM_DEBUG("page %d @ 0x%08lx\n",
 996                                  dma->page_count + page_count,
 997                                  (unsigned long)dmah->vaddr + PAGE_SIZE * i);
 998                        temp_pagelist[dma->page_count + page_count++]
 999                                = (unsigned long)dmah->vaddr + PAGE_SIZE * i;
1000                }
1001                for (offset = 0;
1002                     offset + size <= total && entry->buf_count < count;
1003                     offset += alignment, ++entry->buf_count) {
1004                        buf = &entry->buflist[entry->buf_count];
1005                        buf->idx = dma->buf_count + entry->buf_count;
1006                        buf->total = alignment;
1007                        buf->order = order;
1008                        buf->used = 0;
1009                        buf->offset = (dma->byte_count + byte_count + offset);
1010                        buf->address = (void *)(dmah->vaddr + offset);
1011                        buf->bus_address = dmah->busaddr + offset;
1012                        buf->next = NULL;
1013                        buf->waiting = 0;
1014                        buf->pending = 0;
1015                        buf->file_priv = NULL;
1016
1017                        buf->dev_priv_size = dev->driver->dev_priv_size;
1018                        buf->dev_private = kzalloc(buf->dev_priv_size,
1019                                                GFP_KERNEL);
1020                        if (!buf->dev_private) {
1021                                /* Set count correctly so we free the proper amount. */
1022                                entry->buf_count = count;
1023                                entry->seg_count = count;
1024                                drm_cleanup_buf_error(dev, entry);
1025                                kfree(temp_pagelist);
1026                                mutex_unlock(&dev->struct_mutex);
1027                                atomic_dec(&dev->buf_alloc);
1028                                return -ENOMEM;
1029                        }
1030
1031                        DRM_DEBUG("buffer %d @ %p\n",
1032                                  entry->buf_count, buf->address);
1033                }
1034                byte_count += PAGE_SIZE << page_order;
1035        }
1036
1037        temp_buflist = krealloc(dma->buflist,
1038                                (dma->buf_count + entry->buf_count) *
1039                                sizeof(*dma->buflist), GFP_KERNEL);
1040        if (!temp_buflist) {
1041                /* Free the entry because it isn't valid */
1042                drm_cleanup_buf_error(dev, entry);
1043                kfree(temp_pagelist);
1044                mutex_unlock(&dev->struct_mutex);
1045                atomic_dec(&dev->buf_alloc);
1046                return -ENOMEM;
1047        }
1048        dma->buflist = temp_buflist;
1049
1050        for (i = 0; i < entry->buf_count; i++) {
1051                dma->buflist[i + dma->buf_count] = &entry->buflist[i];
1052        }
1053
1054        /* No allocations failed, so now we can replace the original pagelist
1055         * with the new one.
1056         */
1057        if (dma->page_count) {
1058                kfree(dma->pagelist);
1059        }
1060        dma->pagelist = temp_pagelist;
1061
1062        dma->buf_count += entry->buf_count;
1063        dma->seg_count += entry->seg_count;
1064        dma->page_count += entry->seg_count << page_order;
1065        dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
1066
1067        mutex_unlock(&dev->struct_mutex);
1068
1069        request->count = entry->buf_count;
1070        request->size = size;
1071
1072        if (request->flags & _DRM_PCI_BUFFER_RO)
1073                dma->flags = _DRM_DMA_USE_PCI_RO;
1074
1075        atomic_dec(&dev->buf_alloc);
1076        return 0;
1077
1078}
1079EXPORT_SYMBOL(drm_legacy_addbufs_pci);
1080
1081static int drm_legacy_addbufs_sg(struct drm_device *dev,
1082                                 struct drm_buf_desc *request)
1083{
1084        struct drm_device_dma *dma = dev->dma;
1085        struct drm_buf_entry *entry;
1086        struct drm_buf *buf;
1087        unsigned long offset;
1088        unsigned long agp_offset;
1089        int count;
1090        int order;
1091        int size;
1092        int alignment;
1093        int page_order;
1094        int total;
1095        int byte_count;
1096        int i;
1097        struct drm_buf **temp_buflist;
1098
1099        if (!drm_core_check_feature(dev, DRIVER_SG))
1100                return -EOPNOTSUPP;
1101
1102        if (!dma)
1103                return -EINVAL;
1104
1105        if (!capable(CAP_SYS_ADMIN))
1106                return -EPERM;
1107
1108        count = request->count;
1109        order = order_base_2(request->size);
1110        size = 1 << order;
1111
1112        alignment = (request->flags & _DRM_PAGE_ALIGN)
1113            ? PAGE_ALIGN(size) : size;
1114        page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
1115        total = PAGE_SIZE << page_order;
1116
1117        byte_count = 0;
1118        agp_offset = request->agp_start;
1119
1120        DRM_DEBUG("count:      %d\n", count);
1121        DRM_DEBUG("order:      %d\n", order);
1122        DRM_DEBUG("size:       %d\n", size);
1123        DRM_DEBUG("agp_offset: %lu\n", agp_offset);
1124        DRM_DEBUG("alignment:  %d\n", alignment);
1125        DRM_DEBUG("page_order: %d\n", page_order);
1126        DRM_DEBUG("total:      %d\n", total);
1127
1128        if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1129                return -EINVAL;
1130
1131        spin_lock(&dev->buf_lock);
1132        if (dev->buf_use) {
1133                spin_unlock(&dev->buf_lock);
1134                return -EBUSY;
1135        }
1136        atomic_inc(&dev->buf_alloc);
1137        spin_unlock(&dev->buf_lock);
1138
1139        mutex_lock(&dev->struct_mutex);
1140        entry = &dma->bufs[order];
1141        if (entry->buf_count) {
1142                mutex_unlock(&dev->struct_mutex);
1143                atomic_dec(&dev->buf_alloc);
1144                return -ENOMEM; /* May only call once for each order */
1145        }
1146
1147        if (count < 0 || count > 4096) {
1148                mutex_unlock(&dev->struct_mutex);
1149                atomic_dec(&dev->buf_alloc);
1150                return -EINVAL;
1151        }
1152
1153        entry->buflist = kcalloc(count, sizeof(*entry->buflist), GFP_KERNEL);
1154        if (!entry->buflist) {
1155                mutex_unlock(&dev->struct_mutex);
1156                atomic_dec(&dev->buf_alloc);
1157                return -ENOMEM;
1158        }
1159
1160        entry->buf_size = size;
1161        entry->page_order = page_order;
1162
1163        offset = 0;
1164
1165        while (entry->buf_count < count) {
1166                buf = &entry->buflist[entry->buf_count];
1167                buf->idx = dma->buf_count + entry->buf_count;
1168                buf->total = alignment;
1169                buf->order = order;
1170                buf->used = 0;
1171
1172                buf->offset = (dma->byte_count + offset);
1173                buf->bus_address = agp_offset + offset;
1174                buf->address = (void *)(agp_offset + offset
1175                                        + (unsigned long)dev->sg->virtual);
1176                buf->next = NULL;
1177                buf->waiting = 0;
1178                buf->pending = 0;
1179                buf->file_priv = NULL;
1180
1181                buf->dev_priv_size = dev->driver->dev_priv_size;
1182                buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL);
1183                if (!buf->dev_private) {
1184                        /* Set count correctly so we free the proper amount. */
1185                        entry->buf_count = count;
1186                        drm_cleanup_buf_error(dev, entry);
1187                        mutex_unlock(&dev->struct_mutex);
1188                        atomic_dec(&dev->buf_alloc);
1189                        return -ENOMEM;
1190                }
1191
1192                DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
1193
1194                offset += alignment;
1195                entry->buf_count++;
1196                byte_count += PAGE_SIZE << page_order;
1197        }
1198
1199        DRM_DEBUG("byte_count: %d\n", byte_count);
1200
1201        temp_buflist = krealloc(dma->buflist,
1202                                (dma->buf_count + entry->buf_count) *
1203                                sizeof(*dma->buflist), GFP_KERNEL);
1204        if (!temp_buflist) {
1205                /* Free the entry because it isn't valid */
1206                drm_cleanup_buf_error(dev, entry);
1207                mutex_unlock(&dev->struct_mutex);
1208                atomic_dec(&dev->buf_alloc);
1209                return -ENOMEM;
1210        }
1211        dma->buflist = temp_buflist;
1212
1213        for (i = 0; i < entry->buf_count; i++) {
1214                dma->buflist[i + dma->buf_count] = &entry->buflist[i];
1215        }
1216
1217        dma->buf_count += entry->buf_count;
1218        dma->seg_count += entry->seg_count;
1219        dma->page_count += byte_count >> PAGE_SHIFT;
1220        dma->byte_count += byte_count;
1221
1222        DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
1223        DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
1224
1225        mutex_unlock(&dev->struct_mutex);
1226
1227        request->count = entry->buf_count;
1228        request->size = size;
1229
1230        dma->flags = _DRM_DMA_USE_SG;
1231
1232        atomic_dec(&dev->buf_alloc);
1233        return 0;
1234}
1235
1236/*
1237 * Add buffers for DMA transfers (ioctl).
1238 *
1239 * \param inode device inode.
1240 * \param file_priv DRM file private.
1241 * \param cmd command.
1242 * \param arg pointer to a struct drm_buf_desc request.
1243 * \return zero on success or a negative number on failure.
1244 *
1245 * According with the memory type specified in drm_buf_desc::flags and the
1246 * build options, it dispatches the call either to addbufs_agp(),
1247 * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent
1248 * PCI memory respectively.
1249 */
1250int drm_legacy_addbufs(struct drm_device *dev, void *data,
1251                       struct drm_file *file_priv)
1252{
1253        struct drm_buf_desc *request = data;
1254        int ret;
1255
1256        if (!drm_core_check_feature(dev, DRIVER_LEGACY))
1257                return -EOPNOTSUPP;
1258
1259        if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1260                return -EOPNOTSUPP;
1261
1262#if IS_ENABLED(CONFIG_AGP)
1263        if (request->flags & _DRM_AGP_BUFFER)
1264                ret = drm_legacy_addbufs_agp(dev, request);
1265        else
1266#endif
1267        if (request->flags & _DRM_SG_BUFFER)
1268                ret = drm_legacy_addbufs_sg(dev, request);
1269        else if (request->flags & _DRM_FB_BUFFER)
1270                ret = -EINVAL;
1271        else
1272                ret = drm_legacy_addbufs_pci(dev, request);
1273
1274        return ret;
1275}
1276
1277/*
1278 * Get information about the buffer mappings.
1279 *
1280 * This was originally mean for debugging purposes, or by a sophisticated
1281 * client library to determine how best to use the available buffers (e.g.,
1282 * large buffers can be used for image transfer).
1283 *
1284 * \param inode device inode.
1285 * \param file_priv DRM file private.
1286 * \param cmd command.
1287 * \param arg pointer to a drm_buf_info structure.
1288 * \return zero on success or a negative number on failure.
1289 *
1290 * Increments drm_device::buf_use while holding the drm_device::buf_lock
1291 * lock, preventing of allocating more buffers after this call. Information
1292 * about each requested buffer is then copied into user space.
1293 */
1294int __drm_legacy_infobufs(struct drm_device *dev,
1295                        void *data, int *p,
1296                        int (*f)(void *, int, struct drm_buf_entry *))
1297{
1298        struct drm_device_dma *dma = dev->dma;
1299        int i;
1300        int count;
1301
1302        if (!drm_core_check_feature(dev, DRIVER_LEGACY))
1303                return -EOPNOTSUPP;
1304
1305        if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1306                return -EOPNOTSUPP;
1307
1308        if (!dma)
1309                return -EINVAL;
1310
1311        spin_lock(&dev->buf_lock);
1312        if (atomic_read(&dev->buf_alloc)) {
1313                spin_unlock(&dev->buf_lock);
1314                return -EBUSY;
1315        }
1316        ++dev->buf_use;         /* Can't allocate more after this call */
1317        spin_unlock(&dev->buf_lock);
1318
1319        for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
1320                if (dma->bufs[i].buf_count)
1321                        ++count;
1322        }
1323
1324        DRM_DEBUG("count = %d\n", count);
1325
1326        if (*p >= count) {
1327                for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
1328                        struct drm_buf_entry *from = &dma->bufs[i];
1329
1330                        if (from->buf_count) {
1331                                if (f(data, count, from) < 0)
1332                                        return -EFAULT;
1333                                DRM_DEBUG("%d %d %d %d %d\n",
1334                                          i,
1335                                          dma->bufs[i].buf_count,
1336                                          dma->bufs[i].buf_size,
1337                                          dma->bufs[i].low_mark,
1338                                          dma->bufs[i].high_mark);
1339                                ++count;
1340                        }
1341                }
1342        }
1343        *p = count;
1344
1345        return 0;
1346}
1347
1348static int copy_one_buf(void *data, int count, struct drm_buf_entry *from)
1349{
1350        struct drm_buf_info *request = data;
1351        struct drm_buf_desc __user *to = &request->list[count];
1352        struct drm_buf_desc v = {.count = from->buf_count,
1353                                 .size = from->buf_size,
1354                                 .low_mark = from->low_mark,
1355                                 .high_mark = from->high_mark};
1356
1357        if (copy_to_user(to, &v, offsetof(struct drm_buf_desc, flags)))
1358                return -EFAULT;
1359        return 0;
1360}
1361
1362int drm_legacy_infobufs(struct drm_device *dev, void *data,
1363                        struct drm_file *file_priv)
1364{
1365        struct drm_buf_info *request = data;
1366
1367        return __drm_legacy_infobufs(dev, data, &request->count, copy_one_buf);
1368}
1369
1370/*
1371 * Specifies a low and high water mark for buffer allocation
1372 *
1373 * \param inode device inode.
1374 * \param file_priv DRM file private.
1375 * \param cmd command.
1376 * \param arg a pointer to a drm_buf_desc structure.
1377 * \return zero on success or a negative number on failure.
1378 *
1379 * Verifies that the size order is bounded between the admissible orders and
1380 * updates the respective drm_device_dma::bufs entry low and high water mark.
1381 *
1382 * \note This ioctl is deprecated and mostly never used.
1383 */
1384int drm_legacy_markbufs(struct drm_device *dev, void *data,
1385                        struct drm_file *file_priv)
1386{
1387        struct drm_device_dma *dma = dev->dma;
1388        struct drm_buf_desc *request = data;
1389        int order;
1390        struct drm_buf_entry *entry;
1391
1392        if (!drm_core_check_feature(dev, DRIVER_LEGACY))
1393                return -EOPNOTSUPP;
1394
1395        if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1396                return -EOPNOTSUPP;
1397
1398        if (!dma)
1399                return -EINVAL;
1400
1401        DRM_DEBUG("%d, %d, %d\n",
1402                  request->size, request->low_mark, request->high_mark);
1403        order = order_base_2(request->size);
1404        if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1405                return -EINVAL;
1406        entry = &dma->bufs[order];
1407
1408        if (request->low_mark < 0 || request->low_mark > entry->buf_count)
1409                return -EINVAL;
1410        if (request->high_mark < 0 || request->high_mark > entry->buf_count)
1411                return -EINVAL;
1412
1413        entry->low_mark = request->low_mark;
1414        entry->high_mark = request->high_mark;
1415
1416        return 0;
1417}
1418
1419/*
1420 * Unreserve the buffers in list, previously reserved using drmDMA.
1421 *
1422 * \param inode device inode.
1423 * \param file_priv DRM file private.
1424 * \param cmd command.
1425 * \param arg pointer to a drm_buf_free structure.
1426 * \return zero on success or a negative number on failure.
1427 *
1428 * Calls free_buffer() for each used buffer.
1429 * This function is primarily used for debugging.
1430 */
1431int drm_legacy_freebufs(struct drm_device *dev, void *data,
1432                        struct drm_file *file_priv)
1433{
1434        struct drm_device_dma *dma = dev->dma;
1435        struct drm_buf_free *request = data;
1436        int i;
1437        int idx;
1438        struct drm_buf *buf;
1439
1440        if (!drm_core_check_feature(dev, DRIVER_LEGACY))
1441                return -EOPNOTSUPP;
1442
1443        if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1444                return -EOPNOTSUPP;
1445
1446        if (!dma)
1447                return -EINVAL;
1448
1449        DRM_DEBUG("%d\n", request->count);
1450        for (i = 0; i < request->count; i++) {
1451                if (copy_from_user(&idx, &request->list[i], sizeof(idx)))
1452                        return -EFAULT;
1453                if (idx < 0 || idx >= dma->buf_count) {
1454                        DRM_ERROR("Index %d (of %d max)\n",
1455                                  idx, dma->buf_count - 1);
1456                        return -EINVAL;
1457                }
1458                idx = array_index_nospec(idx, dma->buf_count);
1459                buf = dma->buflist[idx];
1460                if (buf->file_priv != file_priv) {
1461                        DRM_ERROR("Process %d freeing buffer not owned\n",
1462                                  task_pid_nr(current));
1463                        return -EINVAL;
1464                }
1465                drm_legacy_free_buffer(dev, buf);
1466        }
1467
1468        return 0;
1469}
1470
1471/*
1472 * Maps all of the DMA buffers into client-virtual space (ioctl).
1473 *
1474 * \param inode device inode.
1475 * \param file_priv DRM file private.
1476 * \param cmd command.
1477 * \param arg pointer to a drm_buf_map structure.
1478 * \return zero on success or a negative number on failure.
1479 *
1480 * Maps the AGP, SG or PCI buffer region with vm_mmap(), and copies information
1481 * about each buffer into user space. For PCI buffers, it calls vm_mmap() with
1482 * offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls
1483 * drm_mmap_dma().
1484 */
1485int __drm_legacy_mapbufs(struct drm_device *dev, void *data, int *p,
1486                         void __user **v,
1487                         int (*f)(void *, int, unsigned long,
1488                                 struct drm_buf *),
1489                                 struct drm_file *file_priv)
1490{
1491        struct drm_device_dma *dma = dev->dma;
1492        int retcode = 0;
1493        unsigned long virtual;
1494        int i;
1495
1496        if (!drm_core_check_feature(dev, DRIVER_LEGACY))
1497                return -EOPNOTSUPP;
1498
1499        if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1500                return -EOPNOTSUPP;
1501
1502        if (!dma)
1503                return -EINVAL;
1504
1505        spin_lock(&dev->buf_lock);
1506        if (atomic_read(&dev->buf_alloc)) {
1507                spin_unlock(&dev->buf_lock);
1508                return -EBUSY;
1509        }
1510        dev->buf_use++;         /* Can't allocate more after this call */
1511        spin_unlock(&dev->buf_lock);
1512
1513        if (*p >= dma->buf_count) {
1514                if ((dev->agp && (dma->flags & _DRM_DMA_USE_AGP))
1515                    || (drm_core_check_feature(dev, DRIVER_SG)
1516                        && (dma->flags & _DRM_DMA_USE_SG))) {
1517                        struct drm_local_map *map = dev->agp_buffer_map;
1518                        unsigned long token = dev->agp_buffer_token;
1519
1520                        if (!map) {
1521                                retcode = -EINVAL;
1522                                goto done;
1523                        }
1524                        virtual = vm_mmap(file_priv->filp, 0, map->size,
1525                                          PROT_READ | PROT_WRITE,
1526                                          MAP_SHARED,
1527                                          token);
1528                } else {
1529                        virtual = vm_mmap(file_priv->filp, 0, dma->byte_count,
1530                                          PROT_READ | PROT_WRITE,
1531                                          MAP_SHARED, 0);
1532                }
1533                if (virtual > -1024UL) {
1534                        /* Real error */
1535                        retcode = (signed long)virtual;
1536                        goto done;
1537                }
1538                *v = (void __user *)virtual;
1539
1540                for (i = 0; i < dma->buf_count; i++) {
1541                        if (f(data, i, virtual, dma->buflist[i]) < 0) {
1542                                retcode = -EFAULT;
1543                                goto done;
1544                        }
1545                }
1546        }
1547      done:
1548        *p = dma->buf_count;
1549        DRM_DEBUG("%d buffers, retcode = %d\n", *p, retcode);
1550
1551        return retcode;
1552}
1553
1554static int map_one_buf(void *data, int idx, unsigned long virtual,
1555                        struct drm_buf *buf)
1556{
1557        struct drm_buf_map *request = data;
1558        unsigned long address = virtual + buf->offset;  /* *** */
1559
1560        if (copy_to_user(&request->list[idx].idx, &buf->idx,
1561                         sizeof(request->list[0].idx)))
1562                return -EFAULT;
1563        if (copy_to_user(&request->list[idx].total, &buf->total,
1564                         sizeof(request->list[0].total)))
1565                return -EFAULT;
1566        if (clear_user(&request->list[idx].used, sizeof(int)))
1567                return -EFAULT;
1568        if (copy_to_user(&request->list[idx].address, &address,
1569                         sizeof(address)))
1570                return -EFAULT;
1571        return 0;
1572}
1573
1574int drm_legacy_mapbufs(struct drm_device *dev, void *data,
1575                       struct drm_file *file_priv)
1576{
1577        struct drm_buf_map *request = data;
1578
1579        return __drm_legacy_mapbufs(dev, data, &request->count,
1580                                    &request->virtual, map_one_buf,
1581                                    file_priv);
1582}
1583
1584int drm_legacy_dma_ioctl(struct drm_device *dev, void *data,
1585                  struct drm_file *file_priv)
1586{
1587        if (!drm_core_check_feature(dev, DRIVER_LEGACY))
1588                return -EOPNOTSUPP;
1589
1590        if (dev->driver->dma_ioctl)
1591                return dev->driver->dma_ioctl(dev, data, file_priv);
1592        else
1593                return -EINVAL;
1594}
1595
1596struct drm_local_map *drm_legacy_getsarea(struct drm_device *dev)
1597{
1598        struct drm_map_list *entry;
1599
1600        list_for_each_entry(entry, &dev->maplist, head) {
1601                if (entry->map && entry->map->type == _DRM_SHM &&
1602                    (entry->map->flags & _DRM_CONTAINS_LOCK)) {
1603                        return entry->map;
1604                }
1605        }
1606        return NULL;
1607}
1608EXPORT_SYMBOL(drm_legacy_getsarea);
1609