linux/drivers/gpu/drm/drm_bufs.c
<<
>>
Prefs
   1/*
   2 * Legacy: Generic DRM Buffer Management
   3 *
   4 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
   5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
   6 * All Rights Reserved.
   7 *
   8 * Author: Rickard E. (Rik) Faith <faith@valinux.com>
   9 * Author: Gareth Hughes <gareth@valinux.com>
  10 *
  11 * Permission is hereby granted, free of charge, to any person obtaining a
  12 * copy of this software and associated documentation files (the "Software"),
  13 * to deal in the Software without restriction, including without limitation
  14 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  15 * and/or sell copies of the Software, and to permit persons to whom the
  16 * Software is furnished to do so, subject to the following conditions:
  17 *
  18 * The above copyright notice and this permission notice (including the next
  19 * paragraph) shall be included in all copies or substantial portions of the
  20 * Software.
  21 *
  22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  23 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  24 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  25 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
  26 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  27 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  28 * OTHER DEALINGS IN THE SOFTWARE.
  29 */
  30
  31#include <linux/export.h>
  32#include <linux/log2.h>
  33#include <linux/mm.h>
  34#include <linux/mman.h>
  35#include <linux/nospec.h>
  36#include <linux/pci.h>
  37#include <linux/slab.h>
  38#include <linux/uaccess.h>
  39#include <linux/vmalloc.h>
  40
  41#include <asm/shmparam.h>
  42
  43#include <drm/drm_agpsupport.h>
  44#include <drm/drm_device.h>
  45#include <drm/drm_drv.h>
  46#include <drm/drm_file.h>
  47#include <drm/drm_print.h>
  48
  49#include "drm_legacy.h"
  50
  51
  52static struct drm_map_list *drm_find_matching_map(struct drm_device *dev,
  53                                                  struct drm_local_map *map)
  54{
  55        struct drm_map_list *entry;
  56
  57        list_for_each_entry(entry, &dev->maplist, head) {
  58                /*
  59                 * Because the kernel-userspace ABI is fixed at a 32-bit offset
  60                 * while PCI resources may live above that, we only compare the
  61                 * lower 32 bits of the map offset for maps of type
  62                 * _DRM_FRAMEBUFFER or _DRM_REGISTERS.
  63                 * It is assumed that if a driver have more than one resource
  64                 * of each type, the lower 32 bits are different.
  65                 */
  66                if (!entry->map ||
  67                    map->type != entry->map->type ||
  68                    entry->master != dev->master)
  69                        continue;
  70                switch (map->type) {
  71                case _DRM_SHM:
  72                        if (map->flags != _DRM_CONTAINS_LOCK)
  73                                break;
  74                        return entry;
  75                case _DRM_REGISTERS:
  76                case _DRM_FRAME_BUFFER:
  77                        if ((entry->map->offset & 0xffffffff) ==
  78                            (map->offset & 0xffffffff))
  79                                return entry;
  80                        break;
  81                default: /* Make gcc happy */
  82                        ;
  83                }
  84                if (entry->map->offset == map->offset)
  85                        return entry;
  86        }
  87
  88        return NULL;
  89}
  90
  91static int drm_map_handle(struct drm_device *dev, struct drm_hash_item *hash,
  92                          unsigned long user_token, int hashed_handle, int shm)
  93{
  94        int use_hashed_handle, shift;
  95        unsigned long add;
  96
  97#if (BITS_PER_LONG == 64)
  98        use_hashed_handle = ((user_token & 0xFFFFFFFF00000000UL) || hashed_handle);
  99#elif (BITS_PER_LONG == 32)
 100        use_hashed_handle = hashed_handle;
 101#else
 102#error Unsupported long size. Neither 64 nor 32 bits.
 103#endif
 104
 105        if (!use_hashed_handle) {
 106                int ret;
 107
 108                hash->key = user_token >> PAGE_SHIFT;
 109                ret = drm_ht_insert_item(&dev->map_hash, hash);
 110                if (ret != -EINVAL)
 111                        return ret;
 112        }
 113
 114        shift = 0;
 115        add = DRM_MAP_HASH_OFFSET >> PAGE_SHIFT;
 116        if (shm && (SHMLBA > PAGE_SIZE)) {
 117                int bits = ilog2(SHMLBA >> PAGE_SHIFT) + 1;
 118
 119                /* For shared memory, we have to preserve the SHMLBA
 120                 * bits of the eventual vma->vm_pgoff value during
 121                 * mmap().  Otherwise we run into cache aliasing problems
 122                 * on some platforms.  On these platforms, the pgoff of
 123                 * a mmap() request is used to pick a suitable virtual
 124                 * address for the mmap() region such that it will not
 125                 * cause cache aliasing problems.
 126                 *
 127                 * Therefore, make sure the SHMLBA relevant bits of the
 128                 * hash value we use are equal to those in the original
 129                 * kernel virtual address.
 130                 */
 131                shift = bits;
 132                add |= ((user_token >> PAGE_SHIFT) & ((1UL << bits) - 1UL));
 133        }
 134
 135        return drm_ht_just_insert_please(&dev->map_hash, hash,
 136                                         user_token, 32 - PAGE_SHIFT - 3,
 137                                         shift, add);
 138}
 139
 140/*
 141 * Core function to create a range of memory available for mapping by a
 142 * non-root process.
 143 *
 144 * Adjusts the memory offset to its absolute value according to the mapping
 145 * type.  Adds the map to the map list drm_device::maplist. Adds MTRR's where
 146 * applicable and if supported by the kernel.
 147 */
 148static int drm_addmap_core(struct drm_device *dev, resource_size_t offset,
 149                           unsigned int size, enum drm_map_type type,
 150                           enum drm_map_flags flags,
 151                           struct drm_map_list **maplist)
 152{
 153        struct drm_local_map *map;
 154        struct drm_map_list *list;
 155        unsigned long user_token;
 156        int ret;
 157
 158        map = kmalloc(sizeof(*map), GFP_KERNEL);
 159        if (!map)
 160                return -ENOMEM;
 161
 162        map->offset = offset;
 163        map->size = size;
 164        map->flags = flags;
 165        map->type = type;
 166
 167        /* Only allow shared memory to be removable since we only keep enough
 168         * book keeping information about shared memory to allow for removal
 169         * when processes fork.
 170         */
 171        if ((map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM) {
 172                kfree(map);
 173                return -EINVAL;
 174        }
 175        DRM_DEBUG("offset = 0x%08llx, size = 0x%08lx, type = %d\n",
 176                  (unsigned long long)map->offset, map->size, map->type);
 177
 178        /* page-align _DRM_SHM maps. They are allocated here so there is no security
 179         * hole created by that and it works around various broken drivers that use
 180         * a non-aligned quantity to map the SAREA. --BenH
 181         */
 182        if (map->type == _DRM_SHM)
 183                map->size = PAGE_ALIGN(map->size);
 184
 185        if ((map->offset & (~(resource_size_t)PAGE_MASK)) || (map->size & (~PAGE_MASK))) {
 186                kfree(map);
 187                return -EINVAL;
 188        }
 189        map->mtrr = -1;
 190        map->handle = NULL;
 191
 192        switch (map->type) {
 193        case _DRM_REGISTERS:
 194        case _DRM_FRAME_BUFFER:
 195#if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__) && !defined(__arm__)
 196                if (map->offset + (map->size-1) < map->offset ||
 197                    map->offset < virt_to_phys(high_memory)) {
 198                        kfree(map);
 199                        return -EINVAL;
 200                }
 201#endif
 202                /* Some drivers preinitialize some maps, without the X Server
 203                 * needing to be aware of it.  Therefore, we just return success
 204                 * when the server tries to create a duplicate map.
 205                 */
 206                list = drm_find_matching_map(dev, map);
 207                if (list != NULL) {
 208                        if (list->map->size != map->size) {
 209                                DRM_DEBUG("Matching maps of type %d with "
 210                                          "mismatched sizes, (%ld vs %ld)\n",
 211                                          map->type, map->size,
 212                                          list->map->size);
 213                                list->map->size = map->size;
 214                        }
 215
 216                        kfree(map);
 217                        *maplist = list;
 218                        return 0;
 219                }
 220
 221                if (map->type == _DRM_FRAME_BUFFER ||
 222                    (map->flags & _DRM_WRITE_COMBINING)) {
 223                        map->mtrr =
 224                                arch_phys_wc_add(map->offset, map->size);
 225                }
 226                if (map->type == _DRM_REGISTERS) {
 227                        if (map->flags & _DRM_WRITE_COMBINING)
 228                                map->handle = ioremap_wc(map->offset,
 229                                                         map->size);
 230                        else
 231                                map->handle = ioremap(map->offset, map->size);
 232                        if (!map->handle) {
 233                                kfree(map);
 234                                return -ENOMEM;
 235                        }
 236                }
 237
 238                break;
 239        case _DRM_SHM:
 240                list = drm_find_matching_map(dev, map);
 241                if (list != NULL) {
 242                        if (list->map->size != map->size) {
 243                                DRM_DEBUG("Matching maps of type %d with "
 244                                          "mismatched sizes, (%ld vs %ld)\n",
 245                                          map->type, map->size, list->map->size);
 246                                list->map->size = map->size;
 247                        }
 248
 249                        kfree(map);
 250                        *maplist = list;
 251                        return 0;
 252                }
 253                map->handle = vmalloc_user(map->size);
 254                DRM_DEBUG("%lu %d %p\n",
 255                          map->size, order_base_2(map->size), map->handle);
 256                if (!map->handle) {
 257                        kfree(map);
 258                        return -ENOMEM;
 259                }
 260                map->offset = (unsigned long)map->handle;
 261                if (map->flags & _DRM_CONTAINS_LOCK) {
 262                        /* Prevent a 2nd X Server from creating a 2nd lock */
 263                        if (dev->master->lock.hw_lock != NULL) {
 264                                vfree(map->handle);
 265                                kfree(map);
 266                                return -EBUSY;
 267                        }
 268                        dev->sigdata.lock = dev->master->lock.hw_lock = map->handle;    /* Pointer to lock */
 269                }
 270                break;
 271        case _DRM_AGP: {
 272                struct drm_agp_mem *entry;
 273                int valid = 0;
 274
 275                if (!dev->agp) {
 276                        kfree(map);
 277                        return -EINVAL;
 278                }
 279#ifdef __alpha__
 280                map->offset += dev->hose->mem_space->start;
 281#endif
 282                /* In some cases (i810 driver), user space may have already
 283                 * added the AGP base itself, because dev->agp->base previously
 284                 * only got set during AGP enable.  So, only add the base
 285                 * address if the map's offset isn't already within the
 286                 * aperture.
 287                 */
 288                if (map->offset < dev->agp->base ||
 289                    map->offset > dev->agp->base +
 290                    dev->agp->agp_info.aper_size * 1024 * 1024 - 1) {
 291                        map->offset += dev->agp->base;
 292                }
 293                map->mtrr = dev->agp->agp_mtrr; /* for getmap */
 294
 295                /* This assumes the DRM is in total control of AGP space.
 296                 * It's not always the case as AGP can be in the control
 297                 * of user space (i.e. i810 driver). So this loop will get
 298                 * skipped and we double check that dev->agp->memory is
 299                 * actually set as well as being invalid before EPERM'ing
 300                 */
 301                list_for_each_entry(entry, &dev->agp->memory, head) {
 302                        if ((map->offset >= entry->bound) &&
 303                            (map->offset + map->size <= entry->bound + entry->pages * PAGE_SIZE)) {
 304                                valid = 1;
 305                                break;
 306                        }
 307                }
 308                if (!list_empty(&dev->agp->memory) && !valid) {
 309                        kfree(map);
 310                        return -EPERM;
 311                }
 312                DRM_DEBUG("AGP offset = 0x%08llx, size = 0x%08lx\n",
 313                          (unsigned long long)map->offset, map->size);
 314
 315                break;
 316        }
 317        case _DRM_SCATTER_GATHER:
 318                if (!dev->sg) {
 319                        kfree(map);
 320                        return -EINVAL;
 321                }
 322                map->offset += (unsigned long)dev->sg->virtual;
 323                break;
 324        case _DRM_CONSISTENT:
 325                /* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G,
 326                 * As we're limiting the address to 2^32-1 (or less),
 327                 * casting it down to 32 bits is no problem, but we
 328                 * need to point to a 64bit variable first. */
 329                map->handle = dma_alloc_coherent(dev->dev,
 330                                                 map->size,
 331                                                 &map->offset,
 332                                                 GFP_KERNEL);
 333                if (!map->handle) {
 334                        kfree(map);
 335                        return -ENOMEM;
 336                }
 337                break;
 338        default:
 339                kfree(map);
 340                return -EINVAL;
 341        }
 342
 343        list = kzalloc(sizeof(*list), GFP_KERNEL);
 344        if (!list) {
 345                if (map->type == _DRM_REGISTERS)
 346                        iounmap(map->handle);
 347                kfree(map);
 348                return -EINVAL;
 349        }
 350        list->map = map;
 351
 352        mutex_lock(&dev->struct_mutex);
 353        list_add(&list->head, &dev->maplist);
 354
 355        /* Assign a 32-bit handle */
 356        /* We do it here so that dev->struct_mutex protects the increment */
 357        user_token = (map->type == _DRM_SHM) ? (unsigned long)map->handle :
 358                map->offset;
 359        ret = drm_map_handle(dev, &list->hash, user_token, 0,
 360                             (map->type == _DRM_SHM));
 361        if (ret) {
 362                if (map->type == _DRM_REGISTERS)
 363                        iounmap(map->handle);
 364                kfree(map);
 365                kfree(list);
 366                mutex_unlock(&dev->struct_mutex);
 367                return ret;
 368        }
 369
 370        list->user_token = list->hash.key << PAGE_SHIFT;
 371        mutex_unlock(&dev->struct_mutex);
 372
 373        if (!(map->flags & _DRM_DRIVER))
 374                list->master = dev->master;
 375        *maplist = list;
 376        return 0;
 377}
 378
 379int drm_legacy_addmap(struct drm_device *dev, resource_size_t offset,
 380                      unsigned int size, enum drm_map_type type,
 381                      enum drm_map_flags flags, struct drm_local_map **map_ptr)
 382{
 383        struct drm_map_list *list;
 384        int rc;
 385
 386        rc = drm_addmap_core(dev, offset, size, type, flags, &list);
 387        if (!rc)
 388                *map_ptr = list->map;
 389        return rc;
 390}
 391EXPORT_SYMBOL(drm_legacy_addmap);
 392
 393struct drm_local_map *drm_legacy_findmap(struct drm_device *dev,
 394                                         unsigned int token)
 395{
 396        struct drm_map_list *_entry;
 397
 398        list_for_each_entry(_entry, &dev->maplist, head)
 399                if (_entry->user_token == token)
 400                        return _entry->map;
 401        return NULL;
 402}
 403EXPORT_SYMBOL(drm_legacy_findmap);
 404
 405/*
 406 * Ioctl to specify a range of memory that is available for mapping by a
 407 * non-root process.
 408 *
 409 * \param inode device inode.
 410 * \param file_priv DRM file private.
 411 * \param cmd command.
 412 * \param arg pointer to a drm_map structure.
 413 * \return zero on success or a negative value on error.
 414 *
 415 */
 416int drm_legacy_addmap_ioctl(struct drm_device *dev, void *data,
 417                            struct drm_file *file_priv)
 418{
 419        struct drm_map *map = data;
 420        struct drm_map_list *maplist;
 421        int err;
 422
 423        if (!(capable(CAP_SYS_ADMIN) || map->type == _DRM_AGP || map->type == _DRM_SHM))
 424                return -EPERM;
 425
 426        if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
 427            !drm_core_check_feature(dev, DRIVER_LEGACY))
 428                return -EOPNOTSUPP;
 429
 430        err = drm_addmap_core(dev, map->offset, map->size, map->type,
 431                              map->flags, &maplist);
 432
 433        if (err)
 434                return err;
 435
 436        /* avoid a warning on 64-bit, this casting isn't very nice, but the API is set so too late */
 437        map->handle = (void *)(unsigned long)maplist->user_token;
 438
 439        /*
 440         * It appears that there are no users of this value whatsoever --
 441         * drmAddMap just discards it.  Let's not encourage its use.
 442         * (Keeping drm_addmap_core's returned mtrr value would be wrong --
 443         *  it's not a real mtrr index anymore.)
 444         */
 445        map->mtrr = -1;
 446
 447        return 0;
 448}
 449
 450/*
 451 * Get a mapping information.
 452 *
 453 * \param inode device inode.
 454 * \param file_priv DRM file private.
 455 * \param cmd command.
 456 * \param arg user argument, pointing to a drm_map structure.
 457 *
 458 * \return zero on success or a negative number on failure.
 459 *
 460 * Searches for the mapping with the specified offset and copies its information
 461 * into userspace
 462 */
 463int drm_legacy_getmap_ioctl(struct drm_device *dev, void *data,
 464                            struct drm_file *file_priv)
 465{
 466        struct drm_map *map = data;
 467        struct drm_map_list *r_list = NULL;
 468        struct list_head *list;
 469        int idx;
 470        int i;
 471
 472        if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
 473            !drm_core_check_feature(dev, DRIVER_LEGACY))
 474                return -EOPNOTSUPP;
 475
 476        idx = map->offset;
 477        if (idx < 0)
 478                return -EINVAL;
 479
 480        i = 0;
 481        mutex_lock(&dev->struct_mutex);
 482        list_for_each(list, &dev->maplist) {
 483                if (i == idx) {
 484                        r_list = list_entry(list, struct drm_map_list, head);
 485                        break;
 486                }
 487                i++;
 488        }
 489        if (!r_list || !r_list->map) {
 490                mutex_unlock(&dev->struct_mutex);
 491                return -EINVAL;
 492        }
 493
 494        map->offset = r_list->map->offset;
 495        map->size = r_list->map->size;
 496        map->type = r_list->map->type;
 497        map->flags = r_list->map->flags;
 498        map->handle = (void *)(unsigned long) r_list->user_token;
 499        map->mtrr = arch_phys_wc_index(r_list->map->mtrr);
 500
 501        mutex_unlock(&dev->struct_mutex);
 502
 503        return 0;
 504}
 505
 506/*
 507 * Remove a map private from list and deallocate resources if the mapping
 508 * isn't in use.
 509 *
 510 * Searches the map on drm_device::maplist, removes it from the list, see if
 511 * it's being used, and free any associated resource (such as MTRR's) if it's not
 512 * being on use.
 513 *
 514 * \sa drm_legacy_addmap
 515 */
 516int drm_legacy_rmmap_locked(struct drm_device *dev, struct drm_local_map *map)
 517{
 518        struct drm_map_list *r_list = NULL, *list_t;
 519        int found = 0;
 520        struct drm_master *master;
 521
 522        /* Find the list entry for the map and remove it */
 523        list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) {
 524                if (r_list->map == map) {
 525                        master = r_list->master;
 526                        list_del(&r_list->head);
 527                        drm_ht_remove_key(&dev->map_hash,
 528                                          r_list->user_token >> PAGE_SHIFT);
 529                        kfree(r_list);
 530                        found = 1;
 531                        break;
 532                }
 533        }
 534
 535        if (!found)
 536                return -EINVAL;
 537
 538        switch (map->type) {
 539        case _DRM_REGISTERS:
 540                iounmap(map->handle);
 541                fallthrough;
 542        case _DRM_FRAME_BUFFER:
 543                arch_phys_wc_del(map->mtrr);
 544                break;
 545        case _DRM_SHM:
 546                vfree(map->handle);
 547                if (master) {
 548                        if (dev->sigdata.lock == master->lock.hw_lock)
 549                                dev->sigdata.lock = NULL;
 550                        master->lock.hw_lock = NULL;   /* SHM removed */
 551                        master->lock.file_priv = NULL;
 552                        wake_up_interruptible_all(&master->lock.lock_queue);
 553                }
 554                break;
 555        case _DRM_AGP:
 556        case _DRM_SCATTER_GATHER:
 557                break;
 558        case _DRM_CONSISTENT:
 559                dma_free_coherent(dev->dev,
 560                                  map->size,
 561                                  map->handle,
 562                                  map->offset);
 563                break;
 564        }
 565        kfree(map);
 566
 567        return 0;
 568}
 569EXPORT_SYMBOL(drm_legacy_rmmap_locked);
 570
 571void drm_legacy_rmmap(struct drm_device *dev, struct drm_local_map *map)
 572{
 573        if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
 574            !drm_core_check_feature(dev, DRIVER_LEGACY))
 575                return;
 576
 577        mutex_lock(&dev->struct_mutex);
 578        drm_legacy_rmmap_locked(dev, map);
 579        mutex_unlock(&dev->struct_mutex);
 580}
 581EXPORT_SYMBOL(drm_legacy_rmmap);
 582
 583void drm_legacy_master_rmmaps(struct drm_device *dev, struct drm_master *master)
 584{
 585        struct drm_map_list *r_list, *list_temp;
 586
 587        if (!drm_core_check_feature(dev, DRIVER_LEGACY))
 588                return;
 589
 590        mutex_lock(&dev->struct_mutex);
 591        list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) {
 592                if (r_list->master == master) {
 593                        drm_legacy_rmmap_locked(dev, r_list->map);
 594                        r_list = NULL;
 595                }
 596        }
 597        mutex_unlock(&dev->struct_mutex);
 598}
 599
 600void drm_legacy_rmmaps(struct drm_device *dev)
 601{
 602        struct drm_map_list *r_list, *list_temp;
 603
 604        list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head)
 605                drm_legacy_rmmap(dev, r_list->map);
 606}
 607
 608/* The rmmap ioctl appears to be unnecessary.  All mappings are torn down on
 609 * the last close of the device, and this is necessary for cleanup when things
 610 * exit uncleanly.  Therefore, having userland manually remove mappings seems
 611 * like a pointless exercise since they're going away anyway.
 612 *
 613 * One use case might be after addmap is allowed for normal users for SHM and
 614 * gets used by drivers that the server doesn't need to care about.  This seems
 615 * unlikely.
 616 *
 617 * \param inode device inode.
 618 * \param file_priv DRM file private.
 619 * \param cmd command.
 620 * \param arg pointer to a struct drm_map structure.
 621 * \return zero on success or a negative value on error.
 622 */
 623int drm_legacy_rmmap_ioctl(struct drm_device *dev, void *data,
 624                           struct drm_file *file_priv)
 625{
 626        struct drm_map *request = data;
 627        struct drm_local_map *map = NULL;
 628        struct drm_map_list *r_list;
 629        int ret;
 630
 631        if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
 632            !drm_core_check_feature(dev, DRIVER_LEGACY))
 633                return -EOPNOTSUPP;
 634
 635        mutex_lock(&dev->struct_mutex);
 636        list_for_each_entry(r_list, &dev->maplist, head) {
 637                if (r_list->map &&
 638                    r_list->user_token == (unsigned long)request->handle &&
 639                    r_list->map->flags & _DRM_REMOVABLE) {
 640                        map = r_list->map;
 641                        break;
 642                }
 643        }
 644
 645        /* List has wrapped around to the head pointer, or it's empty we didn't
 646         * find anything.
 647         */
 648        if (list_empty(&dev->maplist) || !map) {
 649                mutex_unlock(&dev->struct_mutex);
 650                return -EINVAL;
 651        }
 652
 653        /* Register and framebuffer maps are permanent */
 654        if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) {
 655                mutex_unlock(&dev->struct_mutex);
 656                return 0;
 657        }
 658
 659        ret = drm_legacy_rmmap_locked(dev, map);
 660
 661        mutex_unlock(&dev->struct_mutex);
 662
 663        return ret;
 664}
 665
 666/*
 667 * Cleanup after an error on one of the addbufs() functions.
 668 *
 669 * \param dev DRM device.
 670 * \param entry buffer entry where the error occurred.
 671 *
 672 * Frees any pages and buffers associated with the given entry.
 673 */
 674static void drm_cleanup_buf_error(struct drm_device *dev,
 675                                  struct drm_buf_entry *entry)
 676{
 677        int i;
 678
 679        if (entry->seg_count) {
 680                for (i = 0; i < entry->seg_count; i++) {
 681                        if (entry->seglist[i]) {
 682                                drm_pci_free(dev, entry->seglist[i]);
 683                        }
 684                }
 685                kfree(entry->seglist);
 686
 687                entry->seg_count = 0;
 688        }
 689
 690        if (entry->buf_count) {
 691                for (i = 0; i < entry->buf_count; i++) {
 692                        kfree(entry->buflist[i].dev_private);
 693                }
 694                kfree(entry->buflist);
 695
 696                entry->buf_count = 0;
 697        }
 698}
 699
 700#if IS_ENABLED(CONFIG_AGP)
 701/*
 702 * Add AGP buffers for DMA transfers.
 703 *
 704 * \param dev struct drm_device to which the buffers are to be added.
 705 * \param request pointer to a struct drm_buf_desc describing the request.
 706 * \return zero on success or a negative number on failure.
 707 *
 708 * After some sanity checks creates a drm_buf structure for each buffer and
 709 * reallocates the buffer list of the same size order to accommodate the new
 710 * buffers.
 711 */
 712int drm_legacy_addbufs_agp(struct drm_device *dev,
 713                           struct drm_buf_desc *request)
 714{
 715        struct drm_device_dma *dma = dev->dma;
 716        struct drm_buf_entry *entry;
 717        struct drm_agp_mem *agp_entry;
 718        struct drm_buf *buf;
 719        unsigned long offset;
 720        unsigned long agp_offset;
 721        int count;
 722        int order;
 723        int size;
 724        int alignment;
 725        int page_order;
 726        int total;
 727        int byte_count;
 728        int i, valid;
 729        struct drm_buf **temp_buflist;
 730
 731        if (!dma)
 732                return -EINVAL;
 733
 734        count = request->count;
 735        order = order_base_2(request->size);
 736        size = 1 << order;
 737
 738        alignment = (request->flags & _DRM_PAGE_ALIGN)
 739            ? PAGE_ALIGN(size) : size;
 740        page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
 741        total = PAGE_SIZE << page_order;
 742
 743        byte_count = 0;
 744        agp_offset = dev->agp->base + request->agp_start;
 745
 746        DRM_DEBUG("count:      %d\n", count);
 747        DRM_DEBUG("order:      %d\n", order);
 748        DRM_DEBUG("size:       %d\n", size);
 749        DRM_DEBUG("agp_offset: %lx\n", agp_offset);
 750        DRM_DEBUG("alignment:  %d\n", alignment);
 751        DRM_DEBUG("page_order: %d\n", page_order);
 752        DRM_DEBUG("total:      %d\n", total);
 753
 754        if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
 755                return -EINVAL;
 756
 757        /* Make sure buffers are located in AGP memory that we own */
 758        valid = 0;
 759        list_for_each_entry(agp_entry, &dev->agp->memory, head) {
 760                if ((agp_offset >= agp_entry->bound) &&
 761                    (agp_offset + total * count <= agp_entry->bound + agp_entry->pages * PAGE_SIZE)) {
 762                        valid = 1;
 763                        break;
 764                }
 765        }
 766        if (!list_empty(&dev->agp->memory) && !valid) {
 767                DRM_DEBUG("zone invalid\n");
 768                return -EINVAL;
 769        }
 770        spin_lock(&dev->buf_lock);
 771        if (dev->buf_use) {
 772                spin_unlock(&dev->buf_lock);
 773                return -EBUSY;
 774        }
 775        atomic_inc(&dev->buf_alloc);
 776        spin_unlock(&dev->buf_lock);
 777
 778        mutex_lock(&dev->struct_mutex);
 779        entry = &dma->bufs[order];
 780        if (entry->buf_count) {
 781                mutex_unlock(&dev->struct_mutex);
 782                atomic_dec(&dev->buf_alloc);
 783                return -ENOMEM; /* May only call once for each order */
 784        }
 785
 786        if (count < 0 || count > 4096) {
 787                mutex_unlock(&dev->struct_mutex);
 788                atomic_dec(&dev->buf_alloc);
 789                return -EINVAL;
 790        }
 791
 792        entry->buflist = kcalloc(count, sizeof(*entry->buflist), GFP_KERNEL);
 793        if (!entry->buflist) {
 794                mutex_unlock(&dev->struct_mutex);
 795                atomic_dec(&dev->buf_alloc);
 796                return -ENOMEM;
 797        }
 798
 799        entry->buf_size = size;
 800        entry->page_order = page_order;
 801
 802        offset = 0;
 803
 804        while (entry->buf_count < count) {
 805                buf = &entry->buflist[entry->buf_count];
 806                buf->idx = dma->buf_count + entry->buf_count;
 807                buf->total = alignment;
 808                buf->order = order;
 809                buf->used = 0;
 810
 811                buf->offset = (dma->byte_count + offset);
 812                buf->bus_address = agp_offset + offset;
 813                buf->address = (void *)(agp_offset + offset);
 814                buf->next = NULL;
 815                buf->waiting = 0;
 816                buf->pending = 0;
 817                buf->file_priv = NULL;
 818
 819                buf->dev_priv_size = dev->driver->dev_priv_size;
 820                buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL);
 821                if (!buf->dev_private) {
 822                        /* Set count correctly so we free the proper amount. */
 823                        entry->buf_count = count;
 824                        drm_cleanup_buf_error(dev, entry);
 825                        mutex_unlock(&dev->struct_mutex);
 826                        atomic_dec(&dev->buf_alloc);
 827                        return -ENOMEM;
 828                }
 829
 830                DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
 831
 832                offset += alignment;
 833                entry->buf_count++;
 834                byte_count += PAGE_SIZE << page_order;
 835        }
 836
 837        DRM_DEBUG("byte_count: %d\n", byte_count);
 838
 839        temp_buflist = krealloc(dma->buflist,
 840                                (dma->buf_count + entry->buf_count) *
 841                                sizeof(*dma->buflist), GFP_KERNEL);
 842        if (!temp_buflist) {
 843                /* Free the entry because it isn't valid */
 844                drm_cleanup_buf_error(dev, entry);
 845                mutex_unlock(&dev->struct_mutex);
 846                atomic_dec(&dev->buf_alloc);
 847                return -ENOMEM;
 848        }
 849        dma->buflist = temp_buflist;
 850
 851        for (i = 0; i < entry->buf_count; i++) {
 852                dma->buflist[i + dma->buf_count] = &entry->buflist[i];
 853        }
 854
 855        dma->buf_count += entry->buf_count;
 856        dma->seg_count += entry->seg_count;
 857        dma->page_count += byte_count >> PAGE_SHIFT;
 858        dma->byte_count += byte_count;
 859
 860        DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
 861        DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
 862
 863        mutex_unlock(&dev->struct_mutex);
 864
 865        request->count = entry->buf_count;
 866        request->size = size;
 867
 868        dma->flags = _DRM_DMA_USE_AGP;
 869
 870        atomic_dec(&dev->buf_alloc);
 871        return 0;
 872}
 873EXPORT_SYMBOL(drm_legacy_addbufs_agp);
 874#endif /* CONFIG_AGP */
 875
 876int drm_legacy_addbufs_pci(struct drm_device *dev,
 877                           struct drm_buf_desc *request)
 878{
 879        struct drm_device_dma *dma = dev->dma;
 880        int count;
 881        int order;
 882        int size;
 883        int total;
 884        int page_order;
 885        struct drm_buf_entry *entry;
 886        drm_dma_handle_t *dmah;
 887        struct drm_buf *buf;
 888        int alignment;
 889        unsigned long offset;
 890        int i;
 891        int byte_count;
 892        int page_count;
 893        unsigned long *temp_pagelist;
 894        struct drm_buf **temp_buflist;
 895
 896        if (!drm_core_check_feature(dev, DRIVER_PCI_DMA))
 897                return -EOPNOTSUPP;
 898
 899        if (!dma)
 900                return -EINVAL;
 901
 902        if (!capable(CAP_SYS_ADMIN))
 903                return -EPERM;
 904
 905        count = request->count;
 906        order = order_base_2(request->size);
 907        size = 1 << order;
 908
 909        DRM_DEBUG("count=%d, size=%d (%d), order=%d\n",
 910                  request->count, request->size, size, order);
 911
 912        if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
 913                return -EINVAL;
 914
 915        alignment = (request->flags & _DRM_PAGE_ALIGN)
 916            ? PAGE_ALIGN(size) : size;
 917        page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
 918        total = PAGE_SIZE << page_order;
 919
 920        spin_lock(&dev->buf_lock);
 921        if (dev->buf_use) {
 922                spin_unlock(&dev->buf_lock);
 923                return -EBUSY;
 924        }
 925        atomic_inc(&dev->buf_alloc);
 926        spin_unlock(&dev->buf_lock);
 927
 928        mutex_lock(&dev->struct_mutex);
 929        entry = &dma->bufs[order];
 930        if (entry->buf_count) {
 931                mutex_unlock(&dev->struct_mutex);
 932                atomic_dec(&dev->buf_alloc);
 933                return -ENOMEM; /* May only call once for each order */
 934        }
 935
 936        if (count < 0 || count > 4096) {
 937                mutex_unlock(&dev->struct_mutex);
 938                atomic_dec(&dev->buf_alloc);
 939                return -EINVAL;
 940        }
 941
 942        entry->buflist = kcalloc(count, sizeof(*entry->buflist), GFP_KERNEL);
 943        if (!entry->buflist) {
 944                mutex_unlock(&dev->struct_mutex);
 945                atomic_dec(&dev->buf_alloc);
 946                return -ENOMEM;
 947        }
 948
 949        entry->seglist = kcalloc(count, sizeof(*entry->seglist), GFP_KERNEL);
 950        if (!entry->seglist) {
 951                kfree(entry->buflist);
 952                mutex_unlock(&dev->struct_mutex);
 953                atomic_dec(&dev->buf_alloc);
 954                return -ENOMEM;
 955        }
 956
 957        /* Keep the original pagelist until we know all the allocations
 958         * have succeeded
 959         */
 960        temp_pagelist = kmalloc_array(dma->page_count + (count << page_order),
 961                                      sizeof(*dma->pagelist),
 962                                      GFP_KERNEL);
 963        if (!temp_pagelist) {
 964                kfree(entry->buflist);
 965                kfree(entry->seglist);
 966                mutex_unlock(&dev->struct_mutex);
 967                atomic_dec(&dev->buf_alloc);
 968                return -ENOMEM;
 969        }
 970        memcpy(temp_pagelist,
 971               dma->pagelist, dma->page_count * sizeof(*dma->pagelist));
 972        DRM_DEBUG("pagelist: %d entries\n",
 973                  dma->page_count + (count << page_order));
 974
 975        entry->buf_size = size;
 976        entry->page_order = page_order;
 977        byte_count = 0;
 978        page_count = 0;
 979
 980        while (entry->buf_count < count) {
 981
 982                dmah = drm_pci_alloc(dev, PAGE_SIZE << page_order, 0x1000);
 983
 984                if (!dmah) {
 985                        /* Set count correctly so we free the proper amount. */
 986                        entry->buf_count = count;
 987                        entry->seg_count = count;
 988                        drm_cleanup_buf_error(dev, entry);
 989                        kfree(temp_pagelist);
 990                        mutex_unlock(&dev->struct_mutex);
 991                        atomic_dec(&dev->buf_alloc);
 992                        return -ENOMEM;
 993                }
 994                entry->seglist[entry->seg_count++] = dmah;
 995                for (i = 0; i < (1 << page_order); i++) {
 996                        DRM_DEBUG("page %d @ 0x%08lx\n",
 997                                  dma->page_count + page_count,
 998                                  (unsigned long)dmah->vaddr + PAGE_SIZE * i);
 999                        temp_pagelist[dma->page_count + page_count++]
1000                                = (unsigned long)dmah->vaddr + PAGE_SIZE * i;
1001                }
1002                for (offset = 0;
1003                     offset + size <= total && entry->buf_count < count;
1004                     offset += alignment, ++entry->buf_count) {
1005                        buf = &entry->buflist[entry->buf_count];
1006                        buf->idx = dma->buf_count + entry->buf_count;
1007                        buf->total = alignment;
1008                        buf->order = order;
1009                        buf->used = 0;
1010                        buf->offset = (dma->byte_count + byte_count + offset);
1011                        buf->address = (void *)(dmah->vaddr + offset);
1012                        buf->bus_address = dmah->busaddr + offset;
1013                        buf->next = NULL;
1014                        buf->waiting = 0;
1015                        buf->pending = 0;
1016                        buf->file_priv = NULL;
1017
1018                        buf->dev_priv_size = dev->driver->dev_priv_size;
1019                        buf->dev_private = kzalloc(buf->dev_priv_size,
1020                                                GFP_KERNEL);
1021                        if (!buf->dev_private) {
1022                                /* Set count correctly so we free the proper amount. */
1023                                entry->buf_count = count;
1024                                entry->seg_count = count;
1025                                drm_cleanup_buf_error(dev, entry);
1026                                kfree(temp_pagelist);
1027                                mutex_unlock(&dev->struct_mutex);
1028                                atomic_dec(&dev->buf_alloc);
1029                                return -ENOMEM;
1030                        }
1031
1032                        DRM_DEBUG("buffer %d @ %p\n",
1033                                  entry->buf_count, buf->address);
1034                }
1035                byte_count += PAGE_SIZE << page_order;
1036        }
1037
1038        temp_buflist = krealloc(dma->buflist,
1039                                (dma->buf_count + entry->buf_count) *
1040                                sizeof(*dma->buflist), GFP_KERNEL);
1041        if (!temp_buflist) {
1042                /* Free the entry because it isn't valid */
1043                drm_cleanup_buf_error(dev, entry);
1044                kfree(temp_pagelist);
1045                mutex_unlock(&dev->struct_mutex);
1046                atomic_dec(&dev->buf_alloc);
1047                return -ENOMEM;
1048        }
1049        dma->buflist = temp_buflist;
1050
1051        for (i = 0; i < entry->buf_count; i++) {
1052                dma->buflist[i + dma->buf_count] = &entry->buflist[i];
1053        }
1054
1055        /* No allocations failed, so now we can replace the original pagelist
1056         * with the new one.
1057         */
1058        if (dma->page_count) {
1059                kfree(dma->pagelist);
1060        }
1061        dma->pagelist = temp_pagelist;
1062
1063        dma->buf_count += entry->buf_count;
1064        dma->seg_count += entry->seg_count;
1065        dma->page_count += entry->seg_count << page_order;
1066        dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
1067
1068        mutex_unlock(&dev->struct_mutex);
1069
1070        request->count = entry->buf_count;
1071        request->size = size;
1072
1073        if (request->flags & _DRM_PCI_BUFFER_RO)
1074                dma->flags = _DRM_DMA_USE_PCI_RO;
1075
1076        atomic_dec(&dev->buf_alloc);
1077        return 0;
1078
1079}
1080EXPORT_SYMBOL(drm_legacy_addbufs_pci);
1081
1082static int drm_legacy_addbufs_sg(struct drm_device *dev,
1083                                 struct drm_buf_desc *request)
1084{
1085        struct drm_device_dma *dma = dev->dma;
1086        struct drm_buf_entry *entry;
1087        struct drm_buf *buf;
1088        unsigned long offset;
1089        unsigned long agp_offset;
1090        int count;
1091        int order;
1092        int size;
1093        int alignment;
1094        int page_order;
1095        int total;
1096        int byte_count;
1097        int i;
1098        struct drm_buf **temp_buflist;
1099
1100        if (!drm_core_check_feature(dev, DRIVER_SG))
1101                return -EOPNOTSUPP;
1102
1103        if (!dma)
1104                return -EINVAL;
1105
1106        if (!capable(CAP_SYS_ADMIN))
1107                return -EPERM;
1108
1109        count = request->count;
1110        order = order_base_2(request->size);
1111        size = 1 << order;
1112
1113        alignment = (request->flags & _DRM_PAGE_ALIGN)
1114            ? PAGE_ALIGN(size) : size;
1115        page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
1116        total = PAGE_SIZE << page_order;
1117
1118        byte_count = 0;
1119        agp_offset = request->agp_start;
1120
1121        DRM_DEBUG("count:      %d\n", count);
1122        DRM_DEBUG("order:      %d\n", order);
1123        DRM_DEBUG("size:       %d\n", size);
1124        DRM_DEBUG("agp_offset: %lu\n", agp_offset);
1125        DRM_DEBUG("alignment:  %d\n", alignment);
1126        DRM_DEBUG("page_order: %d\n", page_order);
1127        DRM_DEBUG("total:      %d\n", total);
1128
1129        if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1130                return -EINVAL;
1131
1132        spin_lock(&dev->buf_lock);
1133        if (dev->buf_use) {
1134                spin_unlock(&dev->buf_lock);
1135                return -EBUSY;
1136        }
1137        atomic_inc(&dev->buf_alloc);
1138        spin_unlock(&dev->buf_lock);
1139
1140        mutex_lock(&dev->struct_mutex);
1141        entry = &dma->bufs[order];
1142        if (entry->buf_count) {
1143                mutex_unlock(&dev->struct_mutex);
1144                atomic_dec(&dev->buf_alloc);
1145                return -ENOMEM; /* May only call once for each order */
1146        }
1147
1148        if (count < 0 || count > 4096) {
1149                mutex_unlock(&dev->struct_mutex);
1150                atomic_dec(&dev->buf_alloc);
1151                return -EINVAL;
1152        }
1153
1154        entry->buflist = kcalloc(count, sizeof(*entry->buflist), GFP_KERNEL);
1155        if (!entry->buflist) {
1156                mutex_unlock(&dev->struct_mutex);
1157                atomic_dec(&dev->buf_alloc);
1158                return -ENOMEM;
1159        }
1160
1161        entry->buf_size = size;
1162        entry->page_order = page_order;
1163
1164        offset = 0;
1165
1166        while (entry->buf_count < count) {
1167                buf = &entry->buflist[entry->buf_count];
1168                buf->idx = dma->buf_count + entry->buf_count;
1169                buf->total = alignment;
1170                buf->order = order;
1171                buf->used = 0;
1172
1173                buf->offset = (dma->byte_count + offset);
1174                buf->bus_address = agp_offset + offset;
1175                buf->address = (void *)(agp_offset + offset
1176                                        + (unsigned long)dev->sg->virtual);
1177                buf->next = NULL;
1178                buf->waiting = 0;
1179                buf->pending = 0;
1180                buf->file_priv = NULL;
1181
1182                buf->dev_priv_size = dev->driver->dev_priv_size;
1183                buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL);
1184                if (!buf->dev_private) {
1185                        /* Set count correctly so we free the proper amount. */
1186                        entry->buf_count = count;
1187                        drm_cleanup_buf_error(dev, entry);
1188                        mutex_unlock(&dev->struct_mutex);
1189                        atomic_dec(&dev->buf_alloc);
1190                        return -ENOMEM;
1191                }
1192
1193                DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
1194
1195                offset += alignment;
1196                entry->buf_count++;
1197                byte_count += PAGE_SIZE << page_order;
1198        }
1199
1200        DRM_DEBUG("byte_count: %d\n", byte_count);
1201
1202        temp_buflist = krealloc(dma->buflist,
1203                                (dma->buf_count + entry->buf_count) *
1204                                sizeof(*dma->buflist), GFP_KERNEL);
1205        if (!temp_buflist) {
1206                /* Free the entry because it isn't valid */
1207                drm_cleanup_buf_error(dev, entry);
1208                mutex_unlock(&dev->struct_mutex);
1209                atomic_dec(&dev->buf_alloc);
1210                return -ENOMEM;
1211        }
1212        dma->buflist = temp_buflist;
1213
1214        for (i = 0; i < entry->buf_count; i++) {
1215                dma->buflist[i + dma->buf_count] = &entry->buflist[i];
1216        }
1217
1218        dma->buf_count += entry->buf_count;
1219        dma->seg_count += entry->seg_count;
1220        dma->page_count += byte_count >> PAGE_SHIFT;
1221        dma->byte_count += byte_count;
1222
1223        DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
1224        DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
1225
1226        mutex_unlock(&dev->struct_mutex);
1227
1228        request->count = entry->buf_count;
1229        request->size = size;
1230
1231        dma->flags = _DRM_DMA_USE_SG;
1232
1233        atomic_dec(&dev->buf_alloc);
1234        return 0;
1235}
1236
1237/*
1238 * Add buffers for DMA transfers (ioctl).
1239 *
1240 * \param inode device inode.
1241 * \param file_priv DRM file private.
1242 * \param cmd command.
1243 * \param arg pointer to a struct drm_buf_desc request.
1244 * \return zero on success or a negative number on failure.
1245 *
1246 * According with the memory type specified in drm_buf_desc::flags and the
1247 * build options, it dispatches the call either to addbufs_agp(),
1248 * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent
1249 * PCI memory respectively.
1250 */
1251int drm_legacy_addbufs(struct drm_device *dev, void *data,
1252                       struct drm_file *file_priv)
1253{
1254        struct drm_buf_desc *request = data;
1255        int ret;
1256
1257        if (!drm_core_check_feature(dev, DRIVER_LEGACY))
1258                return -EOPNOTSUPP;
1259
1260        if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1261                return -EOPNOTSUPP;
1262
1263#if IS_ENABLED(CONFIG_AGP)
1264        if (request->flags & _DRM_AGP_BUFFER)
1265                ret = drm_legacy_addbufs_agp(dev, request);
1266        else
1267#endif
1268        if (request->flags & _DRM_SG_BUFFER)
1269                ret = drm_legacy_addbufs_sg(dev, request);
1270        else if (request->flags & _DRM_FB_BUFFER)
1271                ret = -EINVAL;
1272        else
1273                ret = drm_legacy_addbufs_pci(dev, request);
1274
1275        return ret;
1276}
1277
1278/*
1279 * Get information about the buffer mappings.
1280 *
1281 * This was originally mean for debugging purposes, or by a sophisticated
1282 * client library to determine how best to use the available buffers (e.g.,
1283 * large buffers can be used for image transfer).
1284 *
1285 * \param inode device inode.
1286 * \param file_priv DRM file private.
1287 * \param cmd command.
1288 * \param arg pointer to a drm_buf_info structure.
1289 * \return zero on success or a negative number on failure.
1290 *
1291 * Increments drm_device::buf_use while holding the drm_device::buf_lock
1292 * lock, preventing of allocating more buffers after this call. Information
1293 * about each requested buffer is then copied into user space.
1294 */
1295int __drm_legacy_infobufs(struct drm_device *dev,
1296                        void *data, int *p,
1297                        int (*f)(void *, int, struct drm_buf_entry *))
1298{
1299        struct drm_device_dma *dma = dev->dma;
1300        int i;
1301        int count;
1302
1303        if (!drm_core_check_feature(dev, DRIVER_LEGACY))
1304                return -EOPNOTSUPP;
1305
1306        if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1307                return -EOPNOTSUPP;
1308
1309        if (!dma)
1310                return -EINVAL;
1311
1312        spin_lock(&dev->buf_lock);
1313        if (atomic_read(&dev->buf_alloc)) {
1314                spin_unlock(&dev->buf_lock);
1315                return -EBUSY;
1316        }
1317        ++dev->buf_use;         /* Can't allocate more after this call */
1318        spin_unlock(&dev->buf_lock);
1319
1320        for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
1321                if (dma->bufs[i].buf_count)
1322                        ++count;
1323        }
1324
1325        DRM_DEBUG("count = %d\n", count);
1326
1327        if (*p >= count) {
1328                for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
1329                        struct drm_buf_entry *from = &dma->bufs[i];
1330
1331                        if (from->buf_count) {
1332                                if (f(data, count, from) < 0)
1333                                        return -EFAULT;
1334                                DRM_DEBUG("%d %d %d %d %d\n",
1335                                          i,
1336                                          dma->bufs[i].buf_count,
1337                                          dma->bufs[i].buf_size,
1338                                          dma->bufs[i].low_mark,
1339                                          dma->bufs[i].high_mark);
1340                                ++count;
1341                        }
1342                }
1343        }
1344        *p = count;
1345
1346        return 0;
1347}
1348
1349static int copy_one_buf(void *data, int count, struct drm_buf_entry *from)
1350{
1351        struct drm_buf_info *request = data;
1352        struct drm_buf_desc __user *to = &request->list[count];
1353        struct drm_buf_desc v = {.count = from->buf_count,
1354                                 .size = from->buf_size,
1355                                 .low_mark = from->low_mark,
1356                                 .high_mark = from->high_mark};
1357
1358        if (copy_to_user(to, &v, offsetof(struct drm_buf_desc, flags)))
1359                return -EFAULT;
1360        return 0;
1361}
1362
1363int drm_legacy_infobufs(struct drm_device *dev, void *data,
1364                        struct drm_file *file_priv)
1365{
1366        struct drm_buf_info *request = data;
1367
1368        return __drm_legacy_infobufs(dev, data, &request->count, copy_one_buf);
1369}
1370
1371/*
1372 * Specifies a low and high water mark for buffer allocation
1373 *
1374 * \param inode device inode.
1375 * \param file_priv DRM file private.
1376 * \param cmd command.
1377 * \param arg a pointer to a drm_buf_desc structure.
1378 * \return zero on success or a negative number on failure.
1379 *
1380 * Verifies that the size order is bounded between the admissible orders and
1381 * updates the respective drm_device_dma::bufs entry low and high water mark.
1382 *
1383 * \note This ioctl is deprecated and mostly never used.
1384 */
1385int drm_legacy_markbufs(struct drm_device *dev, void *data,
1386                        struct drm_file *file_priv)
1387{
1388        struct drm_device_dma *dma = dev->dma;
1389        struct drm_buf_desc *request = data;
1390        int order;
1391        struct drm_buf_entry *entry;
1392
1393        if (!drm_core_check_feature(dev, DRIVER_LEGACY))
1394                return -EOPNOTSUPP;
1395
1396        if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1397                return -EOPNOTSUPP;
1398
1399        if (!dma)
1400                return -EINVAL;
1401
1402        DRM_DEBUG("%d, %d, %d\n",
1403                  request->size, request->low_mark, request->high_mark);
1404        order = order_base_2(request->size);
1405        if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1406                return -EINVAL;
1407        entry = &dma->bufs[order];
1408
1409        if (request->low_mark < 0 || request->low_mark > entry->buf_count)
1410                return -EINVAL;
1411        if (request->high_mark < 0 || request->high_mark > entry->buf_count)
1412                return -EINVAL;
1413
1414        entry->low_mark = request->low_mark;
1415        entry->high_mark = request->high_mark;
1416
1417        return 0;
1418}
1419
1420/*
1421 * Unreserve the buffers in list, previously reserved using drmDMA.
1422 *
1423 * \param inode device inode.
1424 * \param file_priv DRM file private.
1425 * \param cmd command.
1426 * \param arg pointer to a drm_buf_free structure.
1427 * \return zero on success or a negative number on failure.
1428 *
1429 * Calls free_buffer() for each used buffer.
1430 * This function is primarily used for debugging.
1431 */
1432int drm_legacy_freebufs(struct drm_device *dev, void *data,
1433                        struct drm_file *file_priv)
1434{
1435        struct drm_device_dma *dma = dev->dma;
1436        struct drm_buf_free *request = data;
1437        int i;
1438        int idx;
1439        struct drm_buf *buf;
1440
1441        if (!drm_core_check_feature(dev, DRIVER_LEGACY))
1442                return -EOPNOTSUPP;
1443
1444        if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1445                return -EOPNOTSUPP;
1446
1447        if (!dma)
1448                return -EINVAL;
1449
1450        DRM_DEBUG("%d\n", request->count);
1451        for (i = 0; i < request->count; i++) {
1452                if (copy_from_user(&idx, &request->list[i], sizeof(idx)))
1453                        return -EFAULT;
1454                if (idx < 0 || idx >= dma->buf_count) {
1455                        DRM_ERROR("Index %d (of %d max)\n",
1456                                  idx, dma->buf_count - 1);
1457                        return -EINVAL;
1458                }
1459                idx = array_index_nospec(idx, dma->buf_count);
1460                buf = dma->buflist[idx];
1461                if (buf->file_priv != file_priv) {
1462                        DRM_ERROR("Process %d freeing buffer not owned\n",
1463                                  task_pid_nr(current));
1464                        return -EINVAL;
1465                }
1466                drm_legacy_free_buffer(dev, buf);
1467        }
1468
1469        return 0;
1470}
1471
1472/*
1473 * Maps all of the DMA buffers into client-virtual space (ioctl).
1474 *
1475 * \param inode device inode.
1476 * \param file_priv DRM file private.
1477 * \param cmd command.
1478 * \param arg pointer to a drm_buf_map structure.
1479 * \return zero on success or a negative number on failure.
1480 *
1481 * Maps the AGP, SG or PCI buffer region with vm_mmap(), and copies information
1482 * about each buffer into user space. For PCI buffers, it calls vm_mmap() with
1483 * offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls
1484 * drm_mmap_dma().
1485 */
1486int __drm_legacy_mapbufs(struct drm_device *dev, void *data, int *p,
1487                         void __user **v,
1488                         int (*f)(void *, int, unsigned long,
1489                                 struct drm_buf *),
1490                                 struct drm_file *file_priv)
1491{
1492        struct drm_device_dma *dma = dev->dma;
1493        int retcode = 0;
1494        unsigned long virtual;
1495        int i;
1496
1497        if (!drm_core_check_feature(dev, DRIVER_LEGACY))
1498                return -EOPNOTSUPP;
1499
1500        if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1501                return -EOPNOTSUPP;
1502
1503        if (!dma)
1504                return -EINVAL;
1505
1506        spin_lock(&dev->buf_lock);
1507        if (atomic_read(&dev->buf_alloc)) {
1508                spin_unlock(&dev->buf_lock);
1509                return -EBUSY;
1510        }
1511        dev->buf_use++;         /* Can't allocate more after this call */
1512        spin_unlock(&dev->buf_lock);
1513
1514        if (*p >= dma->buf_count) {
1515                if ((dev->agp && (dma->flags & _DRM_DMA_USE_AGP))
1516                    || (drm_core_check_feature(dev, DRIVER_SG)
1517                        && (dma->flags & _DRM_DMA_USE_SG))) {
1518                        struct drm_local_map *map = dev->agp_buffer_map;
1519                        unsigned long token = dev->agp_buffer_token;
1520
1521                        if (!map) {
1522                                retcode = -EINVAL;
1523                                goto done;
1524                        }
1525                        virtual = vm_mmap(file_priv->filp, 0, map->size,
1526                                          PROT_READ | PROT_WRITE,
1527                                          MAP_SHARED,
1528                                          token);
1529                } else {
1530                        virtual = vm_mmap(file_priv->filp, 0, dma->byte_count,
1531                                          PROT_READ | PROT_WRITE,
1532                                          MAP_SHARED, 0);
1533                }
1534                if (virtual > -1024UL) {
1535                        /* Real error */
1536                        retcode = (signed long)virtual;
1537                        goto done;
1538                }
1539                *v = (void __user *)virtual;
1540
1541                for (i = 0; i < dma->buf_count; i++) {
1542                        if (f(data, i, virtual, dma->buflist[i]) < 0) {
1543                                retcode = -EFAULT;
1544                                goto done;
1545                        }
1546                }
1547        }
1548      done:
1549        *p = dma->buf_count;
1550        DRM_DEBUG("%d buffers, retcode = %d\n", *p, retcode);
1551
1552        return retcode;
1553}
1554
1555static int map_one_buf(void *data, int idx, unsigned long virtual,
1556                        struct drm_buf *buf)
1557{
1558        struct drm_buf_map *request = data;
1559        unsigned long address = virtual + buf->offset;  /* *** */
1560
1561        if (copy_to_user(&request->list[idx].idx, &buf->idx,
1562                         sizeof(request->list[0].idx)))
1563                return -EFAULT;
1564        if (copy_to_user(&request->list[idx].total, &buf->total,
1565                         sizeof(request->list[0].total)))
1566                return -EFAULT;
1567        if (clear_user(&request->list[idx].used, sizeof(int)))
1568                return -EFAULT;
1569        if (copy_to_user(&request->list[idx].address, &address,
1570                         sizeof(address)))
1571                return -EFAULT;
1572        return 0;
1573}
1574
1575int drm_legacy_mapbufs(struct drm_device *dev, void *data,
1576                       struct drm_file *file_priv)
1577{
1578        struct drm_buf_map *request = data;
1579
1580        return __drm_legacy_mapbufs(dev, data, &request->count,
1581                                    &request->virtual, map_one_buf,
1582                                    file_priv);
1583}
1584
1585int drm_legacy_dma_ioctl(struct drm_device *dev, void *data,
1586                  struct drm_file *file_priv)
1587{
1588        if (!drm_core_check_feature(dev, DRIVER_LEGACY))
1589                return -EOPNOTSUPP;
1590
1591        if (dev->driver->dma_ioctl)
1592                return dev->driver->dma_ioctl(dev, data, file_priv);
1593        else
1594                return -EINVAL;
1595}
1596
1597struct drm_local_map *drm_legacy_getsarea(struct drm_device *dev)
1598{
1599        struct drm_map_list *entry;
1600
1601        list_for_each_entry(entry, &dev->maplist, head) {
1602                if (entry->map && entry->map->type == _DRM_SHM &&
1603                    (entry->map->flags & _DRM_CONTAINS_LOCK)) {
1604                        return entry->map;
1605                }
1606        }
1607        return NULL;
1608}
1609EXPORT_SYMBOL(drm_legacy_getsarea);
1610