linux/drivers/gpu/drm/drm_bufs.c
<<
>>
Prefs
   1/*
   2 * Legacy: Generic DRM Buffer Management
   3 *
   4 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
   5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
   6 * All Rights Reserved.
   7 *
   8 * Author: Rickard E. (Rik) Faith <faith@valinux.com>
   9 * Author: Gareth Hughes <gareth@valinux.com>
  10 *
  11 * Permission is hereby granted, free of charge, to any person obtaining a
  12 * copy of this software and associated documentation files (the "Software"),
  13 * to deal in the Software without restriction, including without limitation
  14 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  15 * and/or sell copies of the Software, and to permit persons to whom the
  16 * Software is furnished to do so, subject to the following conditions:
  17 *
  18 * The above copyright notice and this permission notice (including the next
  19 * paragraph) shall be included in all copies or substantial portions of the
  20 * Software.
  21 *
  22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  23 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  24 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  25 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
  26 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  27 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  28 * OTHER DEALINGS IN THE SOFTWARE.
  29 */
  30
  31#include <linux/export.h>
  32#include <linux/log2.h>
  33#include <linux/mm.h>
  34#include <linux/mman.h>
  35#include <linux/nospec.h>
  36#include <linux/slab.h>
  37#include <linux/uaccess.h>
  38#include <linux/vmalloc.h>
  39
  40#include <asm/shmparam.h>
  41
  42#include <drm/drm_agpsupport.h>
  43#include <drm/drm_device.h>
  44#include <drm/drm_drv.h>
  45#include <drm/drm_file.h>
  46#include <drm/drm_pci.h>
  47#include <drm/drm_print.h>
  48
  49#include "drm_legacy.h"
  50
  51
  52static struct drm_map_list *drm_find_matching_map(struct drm_device *dev,
  53                                                  struct drm_local_map *map)
  54{
  55        struct drm_map_list *entry;
  56        list_for_each_entry(entry, &dev->maplist, head) {
  57                /*
  58                 * Because the kernel-userspace ABI is fixed at a 32-bit offset
  59                 * while PCI resources may live above that, we only compare the
  60                 * lower 32 bits of the map offset for maps of type
  61                 * _DRM_FRAMEBUFFER or _DRM_REGISTERS.
  62                 * It is assumed that if a driver have more than one resource
  63                 * of each type, the lower 32 bits are different.
  64                 */
  65                if (!entry->map ||
  66                    map->type != entry->map->type ||
  67                    entry->master != dev->master)
  68                        continue;
  69                switch (map->type) {
  70                case _DRM_SHM:
  71                        if (map->flags != _DRM_CONTAINS_LOCK)
  72                                break;
  73                        return entry;
  74                case _DRM_REGISTERS:
  75                case _DRM_FRAME_BUFFER:
  76                        if ((entry->map->offset & 0xffffffff) ==
  77                            (map->offset & 0xffffffff))
  78                                return entry;
  79                default: /* Make gcc happy */
  80                        ;
  81                }
  82                if (entry->map->offset == map->offset)
  83                        return entry;
  84        }
  85
  86        return NULL;
  87}
  88
  89static int drm_map_handle(struct drm_device *dev, struct drm_hash_item *hash,
  90                          unsigned long user_token, int hashed_handle, int shm)
  91{
  92        int use_hashed_handle, shift;
  93        unsigned long add;
  94
  95#if (BITS_PER_LONG == 64)
  96        use_hashed_handle = ((user_token & 0xFFFFFFFF00000000UL) || hashed_handle);
  97#elif (BITS_PER_LONG == 32)
  98        use_hashed_handle = hashed_handle;
  99#else
 100#error Unsupported long size. Neither 64 nor 32 bits.
 101#endif
 102
 103        if (!use_hashed_handle) {
 104                int ret;
 105                hash->key = user_token >> PAGE_SHIFT;
 106                ret = drm_ht_insert_item(&dev->map_hash, hash);
 107                if (ret != -EINVAL)
 108                        return ret;
 109        }
 110
 111        shift = 0;
 112        add = DRM_MAP_HASH_OFFSET >> PAGE_SHIFT;
 113        if (shm && (SHMLBA > PAGE_SIZE)) {
 114                int bits = ilog2(SHMLBA >> PAGE_SHIFT) + 1;
 115
 116                /* For shared memory, we have to preserve the SHMLBA
 117                 * bits of the eventual vma->vm_pgoff value during
 118                 * mmap().  Otherwise we run into cache aliasing problems
 119                 * on some platforms.  On these platforms, the pgoff of
 120                 * a mmap() request is used to pick a suitable virtual
 121                 * address for the mmap() region such that it will not
 122                 * cause cache aliasing problems.
 123                 *
 124                 * Therefore, make sure the SHMLBA relevant bits of the
 125                 * hash value we use are equal to those in the original
 126                 * kernel virtual address.
 127                 */
 128                shift = bits;
 129                add |= ((user_token >> PAGE_SHIFT) & ((1UL << bits) - 1UL));
 130        }
 131
 132        return drm_ht_just_insert_please(&dev->map_hash, hash,
 133                                         user_token, 32 - PAGE_SHIFT - 3,
 134                                         shift, add);
 135}
 136
 137/**
 138 * Core function to create a range of memory available for mapping by a
 139 * non-root process.
 140 *
 141 * Adjusts the memory offset to its absolute value according to the mapping
 142 * type.  Adds the map to the map list drm_device::maplist. Adds MTRR's where
 143 * applicable and if supported by the kernel.
 144 */
 145static int drm_addmap_core(struct drm_device *dev, resource_size_t offset,
 146                           unsigned int size, enum drm_map_type type,
 147                           enum drm_map_flags flags,
 148                           struct drm_map_list **maplist)
 149{
 150        struct drm_local_map *map;
 151        struct drm_map_list *list;
 152        drm_dma_handle_t *dmah;
 153        unsigned long user_token;
 154        int ret;
 155
 156        map = kmalloc(sizeof(*map), GFP_KERNEL);
 157        if (!map)
 158                return -ENOMEM;
 159
 160        map->offset = offset;
 161        map->size = size;
 162        map->flags = flags;
 163        map->type = type;
 164
 165        /* Only allow shared memory to be removable since we only keep enough
 166         * book keeping information about shared memory to allow for removal
 167         * when processes fork.
 168         */
 169        if ((map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM) {
 170                kfree(map);
 171                return -EINVAL;
 172        }
 173        DRM_DEBUG("offset = 0x%08llx, size = 0x%08lx, type = %d\n",
 174                  (unsigned long long)map->offset, map->size, map->type);
 175
 176        /* page-align _DRM_SHM maps. They are allocated here so there is no security
 177         * hole created by that and it works around various broken drivers that use
 178         * a non-aligned quantity to map the SAREA. --BenH
 179         */
 180        if (map->type == _DRM_SHM)
 181                map->size = PAGE_ALIGN(map->size);
 182
 183        if ((map->offset & (~(resource_size_t)PAGE_MASK)) || (map->size & (~PAGE_MASK))) {
 184                kfree(map);
 185                return -EINVAL;
 186        }
 187        map->mtrr = -1;
 188        map->handle = NULL;
 189
 190        switch (map->type) {
 191        case _DRM_REGISTERS:
 192        case _DRM_FRAME_BUFFER:
 193#if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__) && !defined(__arm__)
 194                if (map->offset + (map->size-1) < map->offset ||
 195                    map->offset < virt_to_phys(high_memory)) {
 196                        kfree(map);
 197                        return -EINVAL;
 198                }
 199#endif
 200                /* Some drivers preinitialize some maps, without the X Server
 201                 * needing to be aware of it.  Therefore, we just return success
 202                 * when the server tries to create a duplicate map.
 203                 */
 204                list = drm_find_matching_map(dev, map);
 205                if (list != NULL) {
 206                        if (list->map->size != map->size) {
 207                                DRM_DEBUG("Matching maps of type %d with "
 208                                          "mismatched sizes, (%ld vs %ld)\n",
 209                                          map->type, map->size,
 210                                          list->map->size);
 211                                list->map->size = map->size;
 212                        }
 213
 214                        kfree(map);
 215                        *maplist = list;
 216                        return 0;
 217                }
 218
 219                if (map->type == _DRM_FRAME_BUFFER ||
 220                    (map->flags & _DRM_WRITE_COMBINING)) {
 221                        map->mtrr =
 222                                arch_phys_wc_add(map->offset, map->size);
 223                }
 224                if (map->type == _DRM_REGISTERS) {
 225                        if (map->flags & _DRM_WRITE_COMBINING)
 226                                map->handle = ioremap_wc(map->offset,
 227                                                         map->size);
 228                        else
 229                                map->handle = ioremap(map->offset, map->size);
 230                        if (!map->handle) {
 231                                kfree(map);
 232                                return -ENOMEM;
 233                        }
 234                }
 235
 236                break;
 237        case _DRM_SHM:
 238                list = drm_find_matching_map(dev, map);
 239                if (list != NULL) {
 240                        if (list->map->size != map->size) {
 241                                DRM_DEBUG("Matching maps of type %d with "
 242                                          "mismatched sizes, (%ld vs %ld)\n",
 243                                          map->type, map->size, list->map->size);
 244                                list->map->size = map->size;
 245                        }
 246
 247                        kfree(map);
 248                        *maplist = list;
 249                        return 0;
 250                }
 251                map->handle = vmalloc_user(map->size);
 252                DRM_DEBUG("%lu %d %p\n",
 253                          map->size, order_base_2(map->size), map->handle);
 254                if (!map->handle) {
 255                        kfree(map);
 256                        return -ENOMEM;
 257                }
 258                map->offset = (unsigned long)map->handle;
 259                if (map->flags & _DRM_CONTAINS_LOCK) {
 260                        /* Prevent a 2nd X Server from creating a 2nd lock */
 261                        if (dev->master->lock.hw_lock != NULL) {
 262                                vfree(map->handle);
 263                                kfree(map);
 264                                return -EBUSY;
 265                        }
 266                        dev->sigdata.lock = dev->master->lock.hw_lock = map->handle;    /* Pointer to lock */
 267                }
 268                break;
 269        case _DRM_AGP: {
 270                struct drm_agp_mem *entry;
 271                int valid = 0;
 272
 273                if (!dev->agp) {
 274                        kfree(map);
 275                        return -EINVAL;
 276                }
 277#ifdef __alpha__
 278                map->offset += dev->hose->mem_space->start;
 279#endif
 280                /* In some cases (i810 driver), user space may have already
 281                 * added the AGP base itself, because dev->agp->base previously
 282                 * only got set during AGP enable.  So, only add the base
 283                 * address if the map's offset isn't already within the
 284                 * aperture.
 285                 */
 286                if (map->offset < dev->agp->base ||
 287                    map->offset > dev->agp->base +
 288                    dev->agp->agp_info.aper_size * 1024 * 1024 - 1) {
 289                        map->offset += dev->agp->base;
 290                }
 291                map->mtrr = dev->agp->agp_mtrr; /* for getmap */
 292
 293                /* This assumes the DRM is in total control of AGP space.
 294                 * It's not always the case as AGP can be in the control
 295                 * of user space (i.e. i810 driver). So this loop will get
 296                 * skipped and we double check that dev->agp->memory is
 297                 * actually set as well as being invalid before EPERM'ing
 298                 */
 299                list_for_each_entry(entry, &dev->agp->memory, head) {
 300                        if ((map->offset >= entry->bound) &&
 301                            (map->offset + map->size <= entry->bound + entry->pages * PAGE_SIZE)) {
 302                                valid = 1;
 303                                break;
 304                        }
 305                }
 306                if (!list_empty(&dev->agp->memory) && !valid) {
 307                        kfree(map);
 308                        return -EPERM;
 309                }
 310                DRM_DEBUG("AGP offset = 0x%08llx, size = 0x%08lx\n",
 311                          (unsigned long long)map->offset, map->size);
 312
 313                break;
 314        }
 315        case _DRM_SCATTER_GATHER:
 316                if (!dev->sg) {
 317                        kfree(map);
 318                        return -EINVAL;
 319                }
 320                map->offset += (unsigned long)dev->sg->virtual;
 321                break;
 322        case _DRM_CONSISTENT:
 323                /* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G,
 324                 * As we're limiting the address to 2^32-1 (or less),
 325                 * casting it down to 32 bits is no problem, but we
 326                 * need to point to a 64bit variable first. */
 327                dmah = drm_pci_alloc(dev, map->size, map->size);
 328                if (!dmah) {
 329                        kfree(map);
 330                        return -ENOMEM;
 331                }
 332                map->handle = dmah->vaddr;
 333                map->offset = (unsigned long)dmah->busaddr;
 334                kfree(dmah);
 335                break;
 336        default:
 337                kfree(map);
 338                return -EINVAL;
 339        }
 340
 341        list = kzalloc(sizeof(*list), GFP_KERNEL);
 342        if (!list) {
 343                if (map->type == _DRM_REGISTERS)
 344                        iounmap(map->handle);
 345                kfree(map);
 346                return -EINVAL;
 347        }
 348        list->map = map;
 349
 350        mutex_lock(&dev->struct_mutex);
 351        list_add(&list->head, &dev->maplist);
 352
 353        /* Assign a 32-bit handle */
 354        /* We do it here so that dev->struct_mutex protects the increment */
 355        user_token = (map->type == _DRM_SHM) ? (unsigned long)map->handle :
 356                map->offset;
 357        ret = drm_map_handle(dev, &list->hash, user_token, 0,
 358                             (map->type == _DRM_SHM));
 359        if (ret) {
 360                if (map->type == _DRM_REGISTERS)
 361                        iounmap(map->handle);
 362                kfree(map);
 363                kfree(list);
 364                mutex_unlock(&dev->struct_mutex);
 365                return ret;
 366        }
 367
 368        list->user_token = list->hash.key << PAGE_SHIFT;
 369        mutex_unlock(&dev->struct_mutex);
 370
 371        if (!(map->flags & _DRM_DRIVER))
 372                list->master = dev->master;
 373        *maplist = list;
 374        return 0;
 375}
 376
 377int drm_legacy_addmap(struct drm_device *dev, resource_size_t offset,
 378                      unsigned int size, enum drm_map_type type,
 379                      enum drm_map_flags flags, struct drm_local_map **map_ptr)
 380{
 381        struct drm_map_list *list;
 382        int rc;
 383
 384        rc = drm_addmap_core(dev, offset, size, type, flags, &list);
 385        if (!rc)
 386                *map_ptr = list->map;
 387        return rc;
 388}
 389EXPORT_SYMBOL(drm_legacy_addmap);
 390
 391struct drm_local_map *drm_legacy_findmap(struct drm_device *dev,
 392                                         unsigned int token)
 393{
 394        struct drm_map_list *_entry;
 395        list_for_each_entry(_entry, &dev->maplist, head)
 396                if (_entry->user_token == token)
 397                        return _entry->map;
 398        return NULL;
 399}
 400EXPORT_SYMBOL(drm_legacy_findmap);
 401
 402/**
 403 * Ioctl to specify a range of memory that is available for mapping by a
 404 * non-root process.
 405 *
 406 * \param inode device inode.
 407 * \param file_priv DRM file private.
 408 * \param cmd command.
 409 * \param arg pointer to a drm_map structure.
 410 * \return zero on success or a negative value on error.
 411 *
 412 */
 413int drm_legacy_addmap_ioctl(struct drm_device *dev, void *data,
 414                            struct drm_file *file_priv)
 415{
 416        struct drm_map *map = data;
 417        struct drm_map_list *maplist;
 418        int err;
 419
 420        if (!(capable(CAP_SYS_ADMIN) || map->type == _DRM_AGP || map->type == _DRM_SHM))
 421                return -EPERM;
 422
 423        if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
 424            !drm_core_check_feature(dev, DRIVER_LEGACY))
 425                return -EOPNOTSUPP;
 426
 427        err = drm_addmap_core(dev, map->offset, map->size, map->type,
 428                              map->flags, &maplist);
 429
 430        if (err)
 431                return err;
 432
 433        /* avoid a warning on 64-bit, this casting isn't very nice, but the API is set so too late */
 434        map->handle = (void *)(unsigned long)maplist->user_token;
 435
 436        /*
 437         * It appears that there are no users of this value whatsoever --
 438         * drmAddMap just discards it.  Let's not encourage its use.
 439         * (Keeping drm_addmap_core's returned mtrr value would be wrong --
 440         *  it's not a real mtrr index anymore.)
 441         */
 442        map->mtrr = -1;
 443
 444        return 0;
 445}
 446
 447/*
 448 * Get a mapping information.
 449 *
 450 * \param inode device inode.
 451 * \param file_priv DRM file private.
 452 * \param cmd command.
 453 * \param arg user argument, pointing to a drm_map structure.
 454 *
 455 * \return zero on success or a negative number on failure.
 456 *
 457 * Searches for the mapping with the specified offset and copies its information
 458 * into userspace
 459 */
 460int drm_legacy_getmap_ioctl(struct drm_device *dev, void *data,
 461                            struct drm_file *file_priv)
 462{
 463        struct drm_map *map = data;
 464        struct drm_map_list *r_list = NULL;
 465        struct list_head *list;
 466        int idx;
 467        int i;
 468
 469        if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
 470            !drm_core_check_feature(dev, DRIVER_LEGACY))
 471                return -EOPNOTSUPP;
 472
 473        idx = map->offset;
 474        if (idx < 0)
 475                return -EINVAL;
 476
 477        i = 0;
 478        mutex_lock(&dev->struct_mutex);
 479        list_for_each(list, &dev->maplist) {
 480                if (i == idx) {
 481                        r_list = list_entry(list, struct drm_map_list, head);
 482                        break;
 483                }
 484                i++;
 485        }
 486        if (!r_list || !r_list->map) {
 487                mutex_unlock(&dev->struct_mutex);
 488                return -EINVAL;
 489        }
 490
 491        map->offset = r_list->map->offset;
 492        map->size = r_list->map->size;
 493        map->type = r_list->map->type;
 494        map->flags = r_list->map->flags;
 495        map->handle = (void *)(unsigned long) r_list->user_token;
 496        map->mtrr = arch_phys_wc_index(r_list->map->mtrr);
 497
 498        mutex_unlock(&dev->struct_mutex);
 499
 500        return 0;
 501}
 502
 503/**
 504 * Remove a map private from list and deallocate resources if the mapping
 505 * isn't in use.
 506 *
 507 * Searches the map on drm_device::maplist, removes it from the list, see if
 508 * it's being used, and free any associated resource (such as MTRR's) if it's not
 509 * being on use.
 510 *
 511 * \sa drm_legacy_addmap
 512 */
 513int drm_legacy_rmmap_locked(struct drm_device *dev, struct drm_local_map *map)
 514{
 515        struct drm_map_list *r_list = NULL, *list_t;
 516        drm_dma_handle_t dmah;
 517        int found = 0;
 518        struct drm_master *master;
 519
 520        /* Find the list entry for the map and remove it */
 521        list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) {
 522                if (r_list->map == map) {
 523                        master = r_list->master;
 524                        list_del(&r_list->head);
 525                        drm_ht_remove_key(&dev->map_hash,
 526                                          r_list->user_token >> PAGE_SHIFT);
 527                        kfree(r_list);
 528                        found = 1;
 529                        break;
 530                }
 531        }
 532
 533        if (!found)
 534                return -EINVAL;
 535
 536        switch (map->type) {
 537        case _DRM_REGISTERS:
 538                iounmap(map->handle);
 539                /* FALLTHROUGH */
 540        case _DRM_FRAME_BUFFER:
 541                arch_phys_wc_del(map->mtrr);
 542                break;
 543        case _DRM_SHM:
 544                vfree(map->handle);
 545                if (master) {
 546                        if (dev->sigdata.lock == master->lock.hw_lock)
 547                                dev->sigdata.lock = NULL;
 548                        master->lock.hw_lock = NULL;   /* SHM removed */
 549                        master->lock.file_priv = NULL;
 550                        wake_up_interruptible_all(&master->lock.lock_queue);
 551                }
 552                break;
 553        case _DRM_AGP:
 554        case _DRM_SCATTER_GATHER:
 555                break;
 556        case _DRM_CONSISTENT:
 557                dmah.vaddr = map->handle;
 558                dmah.busaddr = map->offset;
 559                dmah.size = map->size;
 560                __drm_legacy_pci_free(dev, &dmah);
 561                break;
 562        }
 563        kfree(map);
 564
 565        return 0;
 566}
 567EXPORT_SYMBOL(drm_legacy_rmmap_locked);
 568
 569void drm_legacy_rmmap(struct drm_device *dev, struct drm_local_map *map)
 570{
 571        if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
 572            !drm_core_check_feature(dev, DRIVER_LEGACY))
 573                return;
 574
 575        mutex_lock(&dev->struct_mutex);
 576        drm_legacy_rmmap_locked(dev, map);
 577        mutex_unlock(&dev->struct_mutex);
 578}
 579EXPORT_SYMBOL(drm_legacy_rmmap);
 580
 581void drm_legacy_master_rmmaps(struct drm_device *dev, struct drm_master *master)
 582{
 583        struct drm_map_list *r_list, *list_temp;
 584
 585        if (!drm_core_check_feature(dev, DRIVER_LEGACY))
 586                return;
 587
 588        mutex_lock(&dev->struct_mutex);
 589        list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) {
 590                if (r_list->master == master) {
 591                        drm_legacy_rmmap_locked(dev, r_list->map);
 592                        r_list = NULL;
 593                }
 594        }
 595        mutex_unlock(&dev->struct_mutex);
 596}
 597
 598void drm_legacy_rmmaps(struct drm_device *dev)
 599{
 600        struct drm_map_list *r_list, *list_temp;
 601
 602        list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head)
 603                drm_legacy_rmmap(dev, r_list->map);
 604}
 605
 606/* The rmmap ioctl appears to be unnecessary.  All mappings are torn down on
 607 * the last close of the device, and this is necessary for cleanup when things
 608 * exit uncleanly.  Therefore, having userland manually remove mappings seems
 609 * like a pointless exercise since they're going away anyway.
 610 *
 611 * One use case might be after addmap is allowed for normal users for SHM and
 612 * gets used by drivers that the server doesn't need to care about.  This seems
 613 * unlikely.
 614 *
 615 * \param inode device inode.
 616 * \param file_priv DRM file private.
 617 * \param cmd command.
 618 * \param arg pointer to a struct drm_map structure.
 619 * \return zero on success or a negative value on error.
 620 */
 621int drm_legacy_rmmap_ioctl(struct drm_device *dev, void *data,
 622                           struct drm_file *file_priv)
 623{
 624        struct drm_map *request = data;
 625        struct drm_local_map *map = NULL;
 626        struct drm_map_list *r_list;
 627        int ret;
 628
 629        if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
 630            !drm_core_check_feature(dev, DRIVER_LEGACY))
 631                return -EOPNOTSUPP;
 632
 633        mutex_lock(&dev->struct_mutex);
 634        list_for_each_entry(r_list, &dev->maplist, head) {
 635                if (r_list->map &&
 636                    r_list->user_token == (unsigned long)request->handle &&
 637                    r_list->map->flags & _DRM_REMOVABLE) {
 638                        map = r_list->map;
 639                        break;
 640                }
 641        }
 642
 643        /* List has wrapped around to the head pointer, or it's empty we didn't
 644         * find anything.
 645         */
 646        if (list_empty(&dev->maplist) || !map) {
 647                mutex_unlock(&dev->struct_mutex);
 648                return -EINVAL;
 649        }
 650
 651        /* Register and framebuffer maps are permanent */
 652        if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) {
 653                mutex_unlock(&dev->struct_mutex);
 654                return 0;
 655        }
 656
 657        ret = drm_legacy_rmmap_locked(dev, map);
 658
 659        mutex_unlock(&dev->struct_mutex);
 660
 661        return ret;
 662}
 663
 664/**
 665 * Cleanup after an error on one of the addbufs() functions.
 666 *
 667 * \param dev DRM device.
 668 * \param entry buffer entry where the error occurred.
 669 *
 670 * Frees any pages and buffers associated with the given entry.
 671 */
 672static void drm_cleanup_buf_error(struct drm_device *dev,
 673                                  struct drm_buf_entry *entry)
 674{
 675        int i;
 676
 677        if (entry->seg_count) {
 678                for (i = 0; i < entry->seg_count; i++) {
 679                        if (entry->seglist[i]) {
 680                                drm_pci_free(dev, entry->seglist[i]);
 681                        }
 682                }
 683                kfree(entry->seglist);
 684
 685                entry->seg_count = 0;
 686        }
 687
 688        if (entry->buf_count) {
 689                for (i = 0; i < entry->buf_count; i++) {
 690                        kfree(entry->buflist[i].dev_private);
 691                }
 692                kfree(entry->buflist);
 693
 694                entry->buf_count = 0;
 695        }
 696}
 697
 698#if IS_ENABLED(CONFIG_AGP)
 699/**
 700 * Add AGP buffers for DMA transfers.
 701 *
 702 * \param dev struct drm_device to which the buffers are to be added.
 703 * \param request pointer to a struct drm_buf_desc describing the request.
 704 * \return zero on success or a negative number on failure.
 705 *
 706 * After some sanity checks creates a drm_buf structure for each buffer and
 707 * reallocates the buffer list of the same size order to accommodate the new
 708 * buffers.
 709 */
 710int drm_legacy_addbufs_agp(struct drm_device *dev,
 711                           struct drm_buf_desc *request)
 712{
 713        struct drm_device_dma *dma = dev->dma;
 714        struct drm_buf_entry *entry;
 715        struct drm_agp_mem *agp_entry;
 716        struct drm_buf *buf;
 717        unsigned long offset;
 718        unsigned long agp_offset;
 719        int count;
 720        int order;
 721        int size;
 722        int alignment;
 723        int page_order;
 724        int total;
 725        int byte_count;
 726        int i, valid;
 727        struct drm_buf **temp_buflist;
 728
 729        if (!dma)
 730                return -EINVAL;
 731
 732        count = request->count;
 733        order = order_base_2(request->size);
 734        size = 1 << order;
 735
 736        alignment = (request->flags & _DRM_PAGE_ALIGN)
 737            ? PAGE_ALIGN(size) : size;
 738        page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
 739        total = PAGE_SIZE << page_order;
 740
 741        byte_count = 0;
 742        agp_offset = dev->agp->base + request->agp_start;
 743
 744        DRM_DEBUG("count:      %d\n", count);
 745        DRM_DEBUG("order:      %d\n", order);
 746        DRM_DEBUG("size:       %d\n", size);
 747        DRM_DEBUG("agp_offset: %lx\n", agp_offset);
 748        DRM_DEBUG("alignment:  %d\n", alignment);
 749        DRM_DEBUG("page_order: %d\n", page_order);
 750        DRM_DEBUG("total:      %d\n", total);
 751
 752        if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
 753                return -EINVAL;
 754
 755        /* Make sure buffers are located in AGP memory that we own */
 756        valid = 0;
 757        list_for_each_entry(agp_entry, &dev->agp->memory, head) {
 758                if ((agp_offset >= agp_entry->bound) &&
 759                    (agp_offset + total * count <= agp_entry->bound + agp_entry->pages * PAGE_SIZE)) {
 760                        valid = 1;
 761                        break;
 762                }
 763        }
 764        if (!list_empty(&dev->agp->memory) && !valid) {
 765                DRM_DEBUG("zone invalid\n");
 766                return -EINVAL;
 767        }
 768        spin_lock(&dev->buf_lock);
 769        if (dev->buf_use) {
 770                spin_unlock(&dev->buf_lock);
 771                return -EBUSY;
 772        }
 773        atomic_inc(&dev->buf_alloc);
 774        spin_unlock(&dev->buf_lock);
 775
 776        mutex_lock(&dev->struct_mutex);
 777        entry = &dma->bufs[order];
 778        if (entry->buf_count) {
 779                mutex_unlock(&dev->struct_mutex);
 780                atomic_dec(&dev->buf_alloc);
 781                return -ENOMEM; /* May only call once for each order */
 782        }
 783
 784        if (count < 0 || count > 4096) {
 785                mutex_unlock(&dev->struct_mutex);
 786                atomic_dec(&dev->buf_alloc);
 787                return -EINVAL;
 788        }
 789
 790        entry->buflist = kcalloc(count, sizeof(*entry->buflist), GFP_KERNEL);
 791        if (!entry->buflist) {
 792                mutex_unlock(&dev->struct_mutex);
 793                atomic_dec(&dev->buf_alloc);
 794                return -ENOMEM;
 795        }
 796
 797        entry->buf_size = size;
 798        entry->page_order = page_order;
 799
 800        offset = 0;
 801
 802        while (entry->buf_count < count) {
 803                buf = &entry->buflist[entry->buf_count];
 804                buf->idx = dma->buf_count + entry->buf_count;
 805                buf->total = alignment;
 806                buf->order = order;
 807                buf->used = 0;
 808
 809                buf->offset = (dma->byte_count + offset);
 810                buf->bus_address = agp_offset + offset;
 811                buf->address = (void *)(agp_offset + offset);
 812                buf->next = NULL;
 813                buf->waiting = 0;
 814                buf->pending = 0;
 815                buf->file_priv = NULL;
 816
 817                buf->dev_priv_size = dev->driver->dev_priv_size;
 818                buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL);
 819                if (!buf->dev_private) {
 820                        /* Set count correctly so we free the proper amount. */
 821                        entry->buf_count = count;
 822                        drm_cleanup_buf_error(dev, entry);
 823                        mutex_unlock(&dev->struct_mutex);
 824                        atomic_dec(&dev->buf_alloc);
 825                        return -ENOMEM;
 826                }
 827
 828                DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
 829
 830                offset += alignment;
 831                entry->buf_count++;
 832                byte_count += PAGE_SIZE << page_order;
 833        }
 834
 835        DRM_DEBUG("byte_count: %d\n", byte_count);
 836
 837        temp_buflist = krealloc(dma->buflist,
 838                                (dma->buf_count + entry->buf_count) *
 839                                sizeof(*dma->buflist), GFP_KERNEL);
 840        if (!temp_buflist) {
 841                /* Free the entry because it isn't valid */
 842                drm_cleanup_buf_error(dev, entry);
 843                mutex_unlock(&dev->struct_mutex);
 844                atomic_dec(&dev->buf_alloc);
 845                return -ENOMEM;
 846        }
 847        dma->buflist = temp_buflist;
 848
 849        for (i = 0; i < entry->buf_count; i++) {
 850                dma->buflist[i + dma->buf_count] = &entry->buflist[i];
 851        }
 852
 853        dma->buf_count += entry->buf_count;
 854        dma->seg_count += entry->seg_count;
 855        dma->page_count += byte_count >> PAGE_SHIFT;
 856        dma->byte_count += byte_count;
 857
 858        DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
 859        DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
 860
 861        mutex_unlock(&dev->struct_mutex);
 862
 863        request->count = entry->buf_count;
 864        request->size = size;
 865
 866        dma->flags = _DRM_DMA_USE_AGP;
 867
 868        atomic_dec(&dev->buf_alloc);
 869        return 0;
 870}
 871EXPORT_SYMBOL(drm_legacy_addbufs_agp);
 872#endif /* CONFIG_AGP */
 873
 874int drm_legacy_addbufs_pci(struct drm_device *dev,
 875                           struct drm_buf_desc *request)
 876{
 877        struct drm_device_dma *dma = dev->dma;
 878        int count;
 879        int order;
 880        int size;
 881        int total;
 882        int page_order;
 883        struct drm_buf_entry *entry;
 884        drm_dma_handle_t *dmah;
 885        struct drm_buf *buf;
 886        int alignment;
 887        unsigned long offset;
 888        int i;
 889        int byte_count;
 890        int page_count;
 891        unsigned long *temp_pagelist;
 892        struct drm_buf **temp_buflist;
 893
 894        if (!drm_core_check_feature(dev, DRIVER_PCI_DMA))
 895                return -EOPNOTSUPP;
 896
 897        if (!dma)
 898                return -EINVAL;
 899
 900        if (!capable(CAP_SYS_ADMIN))
 901                return -EPERM;
 902
 903        count = request->count;
 904        order = order_base_2(request->size);
 905        size = 1 << order;
 906
 907        DRM_DEBUG("count=%d, size=%d (%d), order=%d\n",
 908                  request->count, request->size, size, order);
 909
 910        if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
 911                return -EINVAL;
 912
 913        alignment = (request->flags & _DRM_PAGE_ALIGN)
 914            ? PAGE_ALIGN(size) : size;
 915        page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
 916        total = PAGE_SIZE << page_order;
 917
 918        spin_lock(&dev->buf_lock);
 919        if (dev->buf_use) {
 920                spin_unlock(&dev->buf_lock);
 921                return -EBUSY;
 922        }
 923        atomic_inc(&dev->buf_alloc);
 924        spin_unlock(&dev->buf_lock);
 925
 926        mutex_lock(&dev->struct_mutex);
 927        entry = &dma->bufs[order];
 928        if (entry->buf_count) {
 929                mutex_unlock(&dev->struct_mutex);
 930                atomic_dec(&dev->buf_alloc);
 931                return -ENOMEM; /* May only call once for each order */
 932        }
 933
 934        if (count < 0 || count > 4096) {
 935                mutex_unlock(&dev->struct_mutex);
 936                atomic_dec(&dev->buf_alloc);
 937                return -EINVAL;
 938        }
 939
 940        entry->buflist = kcalloc(count, sizeof(*entry->buflist), GFP_KERNEL);
 941        if (!entry->buflist) {
 942                mutex_unlock(&dev->struct_mutex);
 943                atomic_dec(&dev->buf_alloc);
 944                return -ENOMEM;
 945        }
 946
 947        entry->seglist = kcalloc(count, sizeof(*entry->seglist), GFP_KERNEL);
 948        if (!entry->seglist) {
 949                kfree(entry->buflist);
 950                mutex_unlock(&dev->struct_mutex);
 951                atomic_dec(&dev->buf_alloc);
 952                return -ENOMEM;
 953        }
 954
 955        /* Keep the original pagelist until we know all the allocations
 956         * have succeeded
 957         */
 958        temp_pagelist = kmalloc_array(dma->page_count + (count << page_order),
 959                                      sizeof(*dma->pagelist),
 960                                      GFP_KERNEL);
 961        if (!temp_pagelist) {
 962                kfree(entry->buflist);
 963                kfree(entry->seglist);
 964                mutex_unlock(&dev->struct_mutex);
 965                atomic_dec(&dev->buf_alloc);
 966                return -ENOMEM;
 967        }
 968        memcpy(temp_pagelist,
 969               dma->pagelist, dma->page_count * sizeof(*dma->pagelist));
 970        DRM_DEBUG("pagelist: %d entries\n",
 971                  dma->page_count + (count << page_order));
 972
 973        entry->buf_size = size;
 974        entry->page_order = page_order;
 975        byte_count = 0;
 976        page_count = 0;
 977
 978        while (entry->buf_count < count) {
 979
 980                dmah = drm_pci_alloc(dev, PAGE_SIZE << page_order, 0x1000);
 981
 982                if (!dmah) {
 983                        /* Set count correctly so we free the proper amount. */
 984                        entry->buf_count = count;
 985                        entry->seg_count = count;
 986                        drm_cleanup_buf_error(dev, entry);
 987                        kfree(temp_pagelist);
 988                        mutex_unlock(&dev->struct_mutex);
 989                        atomic_dec(&dev->buf_alloc);
 990                        return -ENOMEM;
 991                }
 992                entry->seglist[entry->seg_count++] = dmah;
 993                for (i = 0; i < (1 << page_order); i++) {
 994                        DRM_DEBUG("page %d @ 0x%08lx\n",
 995                                  dma->page_count + page_count,
 996                                  (unsigned long)dmah->vaddr + PAGE_SIZE * i);
 997                        temp_pagelist[dma->page_count + page_count++]
 998                                = (unsigned long)dmah->vaddr + PAGE_SIZE * i;
 999                }
1000                for (offset = 0;
1001                     offset + size <= total && entry->buf_count < count;
1002                     offset += alignment, ++entry->buf_count) {
1003                        buf = &entry->buflist[entry->buf_count];
1004                        buf->idx = dma->buf_count + entry->buf_count;
1005                        buf->total = alignment;
1006                        buf->order = order;
1007                        buf->used = 0;
1008                        buf->offset = (dma->byte_count + byte_count + offset);
1009                        buf->address = (void *)(dmah->vaddr + offset);
1010                        buf->bus_address = dmah->busaddr + offset;
1011                        buf->next = NULL;
1012                        buf->waiting = 0;
1013                        buf->pending = 0;
1014                        buf->file_priv = NULL;
1015
1016                        buf->dev_priv_size = dev->driver->dev_priv_size;
1017                        buf->dev_private = kzalloc(buf->dev_priv_size,
1018                                                GFP_KERNEL);
1019                        if (!buf->dev_private) {
1020                                /* Set count correctly so we free the proper amount. */
1021                                entry->buf_count = count;
1022                                entry->seg_count = count;
1023                                drm_cleanup_buf_error(dev, entry);
1024                                kfree(temp_pagelist);
1025                                mutex_unlock(&dev->struct_mutex);
1026                                atomic_dec(&dev->buf_alloc);
1027                                return -ENOMEM;
1028                        }
1029
1030                        DRM_DEBUG("buffer %d @ %p\n",
1031                                  entry->buf_count, buf->address);
1032                }
1033                byte_count += PAGE_SIZE << page_order;
1034        }
1035
1036        temp_buflist = krealloc(dma->buflist,
1037                                (dma->buf_count + entry->buf_count) *
1038                                sizeof(*dma->buflist), GFP_KERNEL);
1039        if (!temp_buflist) {
1040                /* Free the entry because it isn't valid */
1041                drm_cleanup_buf_error(dev, entry);
1042                kfree(temp_pagelist);
1043                mutex_unlock(&dev->struct_mutex);
1044                atomic_dec(&dev->buf_alloc);
1045                return -ENOMEM;
1046        }
1047        dma->buflist = temp_buflist;
1048
1049        for (i = 0; i < entry->buf_count; i++) {
1050                dma->buflist[i + dma->buf_count] = &entry->buflist[i];
1051        }
1052
1053        /* No allocations failed, so now we can replace the original pagelist
1054         * with the new one.
1055         */
1056        if (dma->page_count) {
1057                kfree(dma->pagelist);
1058        }
1059        dma->pagelist = temp_pagelist;
1060
1061        dma->buf_count += entry->buf_count;
1062        dma->seg_count += entry->seg_count;
1063        dma->page_count += entry->seg_count << page_order;
1064        dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
1065
1066        mutex_unlock(&dev->struct_mutex);
1067
1068        request->count = entry->buf_count;
1069        request->size = size;
1070
1071        if (request->flags & _DRM_PCI_BUFFER_RO)
1072                dma->flags = _DRM_DMA_USE_PCI_RO;
1073
1074        atomic_dec(&dev->buf_alloc);
1075        return 0;
1076
1077}
1078EXPORT_SYMBOL(drm_legacy_addbufs_pci);
1079
1080static int drm_legacy_addbufs_sg(struct drm_device *dev,
1081                                 struct drm_buf_desc *request)
1082{
1083        struct drm_device_dma *dma = dev->dma;
1084        struct drm_buf_entry *entry;
1085        struct drm_buf *buf;
1086        unsigned long offset;
1087        unsigned long agp_offset;
1088        int count;
1089        int order;
1090        int size;
1091        int alignment;
1092        int page_order;
1093        int total;
1094        int byte_count;
1095        int i;
1096        struct drm_buf **temp_buflist;
1097
1098        if (!drm_core_check_feature(dev, DRIVER_SG))
1099                return -EOPNOTSUPP;
1100
1101        if (!dma)
1102                return -EINVAL;
1103
1104        if (!capable(CAP_SYS_ADMIN))
1105                return -EPERM;
1106
1107        count = request->count;
1108        order = order_base_2(request->size);
1109        size = 1 << order;
1110
1111        alignment = (request->flags & _DRM_PAGE_ALIGN)
1112            ? PAGE_ALIGN(size) : size;
1113        page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
1114        total = PAGE_SIZE << page_order;
1115
1116        byte_count = 0;
1117        agp_offset = request->agp_start;
1118
1119        DRM_DEBUG("count:      %d\n", count);
1120        DRM_DEBUG("order:      %d\n", order);
1121        DRM_DEBUG("size:       %d\n", size);
1122        DRM_DEBUG("agp_offset: %lu\n", agp_offset);
1123        DRM_DEBUG("alignment:  %d\n", alignment);
1124        DRM_DEBUG("page_order: %d\n", page_order);
1125        DRM_DEBUG("total:      %d\n", total);
1126
1127        if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1128                return -EINVAL;
1129
1130        spin_lock(&dev->buf_lock);
1131        if (dev->buf_use) {
1132                spin_unlock(&dev->buf_lock);
1133                return -EBUSY;
1134        }
1135        atomic_inc(&dev->buf_alloc);
1136        spin_unlock(&dev->buf_lock);
1137
1138        mutex_lock(&dev->struct_mutex);
1139        entry = &dma->bufs[order];
1140        if (entry->buf_count) {
1141                mutex_unlock(&dev->struct_mutex);
1142                atomic_dec(&dev->buf_alloc);
1143                return -ENOMEM; /* May only call once for each order */
1144        }
1145
1146        if (count < 0 || count > 4096) {
1147                mutex_unlock(&dev->struct_mutex);
1148                atomic_dec(&dev->buf_alloc);
1149                return -EINVAL;
1150        }
1151
1152        entry->buflist = kcalloc(count, sizeof(*entry->buflist), GFP_KERNEL);
1153        if (!entry->buflist) {
1154                mutex_unlock(&dev->struct_mutex);
1155                atomic_dec(&dev->buf_alloc);
1156                return -ENOMEM;
1157        }
1158
1159        entry->buf_size = size;
1160        entry->page_order = page_order;
1161
1162        offset = 0;
1163
1164        while (entry->buf_count < count) {
1165                buf = &entry->buflist[entry->buf_count];
1166                buf->idx = dma->buf_count + entry->buf_count;
1167                buf->total = alignment;
1168                buf->order = order;
1169                buf->used = 0;
1170
1171                buf->offset = (dma->byte_count + offset);
1172                buf->bus_address = agp_offset + offset;
1173                buf->address = (void *)(agp_offset + offset
1174                                        + (unsigned long)dev->sg->virtual);
1175                buf->next = NULL;
1176                buf->waiting = 0;
1177                buf->pending = 0;
1178                buf->file_priv = NULL;
1179
1180                buf->dev_priv_size = dev->driver->dev_priv_size;
1181                buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL);
1182                if (!buf->dev_private) {
1183                        /* Set count correctly so we free the proper amount. */
1184                        entry->buf_count = count;
1185                        drm_cleanup_buf_error(dev, entry);
1186                        mutex_unlock(&dev->struct_mutex);
1187                        atomic_dec(&dev->buf_alloc);
1188                        return -ENOMEM;
1189                }
1190
1191                DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
1192
1193                offset += alignment;
1194                entry->buf_count++;
1195                byte_count += PAGE_SIZE << page_order;
1196        }
1197
1198        DRM_DEBUG("byte_count: %d\n", byte_count);
1199
1200        temp_buflist = krealloc(dma->buflist,
1201                                (dma->buf_count + entry->buf_count) *
1202                                sizeof(*dma->buflist), GFP_KERNEL);
1203        if (!temp_buflist) {
1204                /* Free the entry because it isn't valid */
1205                drm_cleanup_buf_error(dev, entry);
1206                mutex_unlock(&dev->struct_mutex);
1207                atomic_dec(&dev->buf_alloc);
1208                return -ENOMEM;
1209        }
1210        dma->buflist = temp_buflist;
1211
1212        for (i = 0; i < entry->buf_count; i++) {
1213                dma->buflist[i + dma->buf_count] = &entry->buflist[i];
1214        }
1215
1216        dma->buf_count += entry->buf_count;
1217        dma->seg_count += entry->seg_count;
1218        dma->page_count += byte_count >> PAGE_SHIFT;
1219        dma->byte_count += byte_count;
1220
1221        DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
1222        DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
1223
1224        mutex_unlock(&dev->struct_mutex);
1225
1226        request->count = entry->buf_count;
1227        request->size = size;
1228
1229        dma->flags = _DRM_DMA_USE_SG;
1230
1231        atomic_dec(&dev->buf_alloc);
1232        return 0;
1233}
1234
1235/**
1236 * Add buffers for DMA transfers (ioctl).
1237 *
1238 * \param inode device inode.
1239 * \param file_priv DRM file private.
1240 * \param cmd command.
1241 * \param arg pointer to a struct drm_buf_desc request.
1242 * \return zero on success or a negative number on failure.
1243 *
1244 * According with the memory type specified in drm_buf_desc::flags and the
1245 * build options, it dispatches the call either to addbufs_agp(),
1246 * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent
1247 * PCI memory respectively.
1248 */
1249int drm_legacy_addbufs(struct drm_device *dev, void *data,
1250                       struct drm_file *file_priv)
1251{
1252        struct drm_buf_desc *request = data;
1253        int ret;
1254
1255        if (!drm_core_check_feature(dev, DRIVER_LEGACY))
1256                return -EOPNOTSUPP;
1257
1258        if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1259                return -EOPNOTSUPP;
1260
1261#if IS_ENABLED(CONFIG_AGP)
1262        if (request->flags & _DRM_AGP_BUFFER)
1263                ret = drm_legacy_addbufs_agp(dev, request);
1264        else
1265#endif
1266        if (request->flags & _DRM_SG_BUFFER)
1267                ret = drm_legacy_addbufs_sg(dev, request);
1268        else if (request->flags & _DRM_FB_BUFFER)
1269                ret = -EINVAL;
1270        else
1271                ret = drm_legacy_addbufs_pci(dev, request);
1272
1273        return ret;
1274}
1275
1276/**
1277 * Get information about the buffer mappings.
1278 *
1279 * This was originally mean for debugging purposes, or by a sophisticated
1280 * client library to determine how best to use the available buffers (e.g.,
1281 * large buffers can be used for image transfer).
1282 *
1283 * \param inode device inode.
1284 * \param file_priv DRM file private.
1285 * \param cmd command.
1286 * \param arg pointer to a drm_buf_info structure.
1287 * \return zero on success or a negative number on failure.
1288 *
1289 * Increments drm_device::buf_use while holding the drm_device::buf_lock
1290 * lock, preventing of allocating more buffers after this call. Information
1291 * about each requested buffer is then copied into user space.
1292 */
1293int __drm_legacy_infobufs(struct drm_device *dev,
1294                        void *data, int *p,
1295                        int (*f)(void *, int, struct drm_buf_entry *))
1296{
1297        struct drm_device_dma *dma = dev->dma;
1298        int i;
1299        int count;
1300
1301        if (!drm_core_check_feature(dev, DRIVER_LEGACY))
1302                return -EOPNOTSUPP;
1303
1304        if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1305                return -EOPNOTSUPP;
1306
1307        if (!dma)
1308                return -EINVAL;
1309
1310        spin_lock(&dev->buf_lock);
1311        if (atomic_read(&dev->buf_alloc)) {
1312                spin_unlock(&dev->buf_lock);
1313                return -EBUSY;
1314        }
1315        ++dev->buf_use;         /* Can't allocate more after this call */
1316        spin_unlock(&dev->buf_lock);
1317
1318        for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
1319                if (dma->bufs[i].buf_count)
1320                        ++count;
1321        }
1322
1323        DRM_DEBUG("count = %d\n", count);
1324
1325        if (*p >= count) {
1326                for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
1327                        struct drm_buf_entry *from = &dma->bufs[i];
1328                        if (from->buf_count) {
1329                                if (f(data, count, from) < 0)
1330                                        return -EFAULT;
1331                                DRM_DEBUG("%d %d %d %d %d\n",
1332                                          i,
1333                                          dma->bufs[i].buf_count,
1334                                          dma->bufs[i].buf_size,
1335                                          dma->bufs[i].low_mark,
1336                                          dma->bufs[i].high_mark);
1337                                ++count;
1338                        }
1339                }
1340        }
1341        *p = count;
1342
1343        return 0;
1344}
1345
1346static int copy_one_buf(void *data, int count, struct drm_buf_entry *from)
1347{
1348        struct drm_buf_info *request = data;
1349        struct drm_buf_desc __user *to = &request->list[count];
1350        struct drm_buf_desc v = {.count = from->buf_count,
1351                                 .size = from->buf_size,
1352                                 .low_mark = from->low_mark,
1353                                 .high_mark = from->high_mark};
1354
1355        if (copy_to_user(to, &v, offsetof(struct drm_buf_desc, flags)))
1356                return -EFAULT;
1357        return 0;
1358}
1359
1360int drm_legacy_infobufs(struct drm_device *dev, void *data,
1361                        struct drm_file *file_priv)
1362{
1363        struct drm_buf_info *request = data;
1364        return __drm_legacy_infobufs(dev, data, &request->count, copy_one_buf);
1365}
1366
1367/**
1368 * Specifies a low and high water mark for buffer allocation
1369 *
1370 * \param inode device inode.
1371 * \param file_priv DRM file private.
1372 * \param cmd command.
1373 * \param arg a pointer to a drm_buf_desc structure.
1374 * \return zero on success or a negative number on failure.
1375 *
1376 * Verifies that the size order is bounded between the admissible orders and
1377 * updates the respective drm_device_dma::bufs entry low and high water mark.
1378 *
1379 * \note This ioctl is deprecated and mostly never used.
1380 */
1381int drm_legacy_markbufs(struct drm_device *dev, void *data,
1382                        struct drm_file *file_priv)
1383{
1384        struct drm_device_dma *dma = dev->dma;
1385        struct drm_buf_desc *request = data;
1386        int order;
1387        struct drm_buf_entry *entry;
1388
1389        if (!drm_core_check_feature(dev, DRIVER_LEGACY))
1390                return -EOPNOTSUPP;
1391
1392        if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1393                return -EOPNOTSUPP;
1394
1395        if (!dma)
1396                return -EINVAL;
1397
1398        DRM_DEBUG("%d, %d, %d\n",
1399                  request->size, request->low_mark, request->high_mark);
1400        order = order_base_2(request->size);
1401        if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1402                return -EINVAL;
1403        entry = &dma->bufs[order];
1404
1405        if (request->low_mark < 0 || request->low_mark > entry->buf_count)
1406                return -EINVAL;
1407        if (request->high_mark < 0 || request->high_mark > entry->buf_count)
1408                return -EINVAL;
1409
1410        entry->low_mark = request->low_mark;
1411        entry->high_mark = request->high_mark;
1412
1413        return 0;
1414}
1415
1416/**
1417 * Unreserve the buffers in list, previously reserved using drmDMA.
1418 *
1419 * \param inode device inode.
1420 * \param file_priv DRM file private.
1421 * \param cmd command.
1422 * \param arg pointer to a drm_buf_free structure.
1423 * \return zero on success or a negative number on failure.
1424 *
1425 * Calls free_buffer() for each used buffer.
1426 * This function is primarily used for debugging.
1427 */
1428int drm_legacy_freebufs(struct drm_device *dev, void *data,
1429                        struct drm_file *file_priv)
1430{
1431        struct drm_device_dma *dma = dev->dma;
1432        struct drm_buf_free *request = data;
1433        int i;
1434        int idx;
1435        struct drm_buf *buf;
1436
1437        if (!drm_core_check_feature(dev, DRIVER_LEGACY))
1438                return -EOPNOTSUPP;
1439
1440        if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1441                return -EOPNOTSUPP;
1442
1443        if (!dma)
1444                return -EINVAL;
1445
1446        DRM_DEBUG("%d\n", request->count);
1447        for (i = 0; i < request->count; i++) {
1448                if (copy_from_user(&idx, &request->list[i], sizeof(idx)))
1449                        return -EFAULT;
1450                if (idx < 0 || idx >= dma->buf_count) {
1451                        DRM_ERROR("Index %d (of %d max)\n",
1452                                  idx, dma->buf_count - 1);
1453                        return -EINVAL;
1454                }
1455                idx = array_index_nospec(idx, dma->buf_count);
1456                buf = dma->buflist[idx];
1457                if (buf->file_priv != file_priv) {
1458                        DRM_ERROR("Process %d freeing buffer not owned\n",
1459                                  task_pid_nr(current));
1460                        return -EINVAL;
1461                }
1462                drm_legacy_free_buffer(dev, buf);
1463        }
1464
1465        return 0;
1466}
1467
1468/**
1469 * Maps all of the DMA buffers into client-virtual space (ioctl).
1470 *
1471 * \param inode device inode.
1472 * \param file_priv DRM file private.
1473 * \param cmd command.
1474 * \param arg pointer to a drm_buf_map structure.
1475 * \return zero on success or a negative number on failure.
1476 *
1477 * Maps the AGP, SG or PCI buffer region with vm_mmap(), and copies information
1478 * about each buffer into user space. For PCI buffers, it calls vm_mmap() with
1479 * offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls
1480 * drm_mmap_dma().
1481 */
1482int __drm_legacy_mapbufs(struct drm_device *dev, void *data, int *p,
1483                         void __user **v,
1484                         int (*f)(void *, int, unsigned long,
1485                                 struct drm_buf *),
1486                                 struct drm_file *file_priv)
1487{
1488        struct drm_device_dma *dma = dev->dma;
1489        int retcode = 0;
1490        unsigned long virtual;
1491        int i;
1492
1493        if (!drm_core_check_feature(dev, DRIVER_LEGACY))
1494                return -EOPNOTSUPP;
1495
1496        if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1497                return -EOPNOTSUPP;
1498
1499        if (!dma)
1500                return -EINVAL;
1501
1502        spin_lock(&dev->buf_lock);
1503        if (atomic_read(&dev->buf_alloc)) {
1504                spin_unlock(&dev->buf_lock);
1505                return -EBUSY;
1506        }
1507        dev->buf_use++;         /* Can't allocate more after this call */
1508        spin_unlock(&dev->buf_lock);
1509
1510        if (*p >= dma->buf_count) {
1511                if ((dev->agp && (dma->flags & _DRM_DMA_USE_AGP))
1512                    || (drm_core_check_feature(dev, DRIVER_SG)
1513                        && (dma->flags & _DRM_DMA_USE_SG))) {
1514                        struct drm_local_map *map = dev->agp_buffer_map;
1515                        unsigned long token = dev->agp_buffer_token;
1516
1517                        if (!map) {
1518                                retcode = -EINVAL;
1519                                goto done;
1520                        }
1521                        virtual = vm_mmap(file_priv->filp, 0, map->size,
1522                                          PROT_READ | PROT_WRITE,
1523                                          MAP_SHARED,
1524                                          token);
1525                } else {
1526                        virtual = vm_mmap(file_priv->filp, 0, dma->byte_count,
1527                                          PROT_READ | PROT_WRITE,
1528                                          MAP_SHARED, 0);
1529                }
1530                if (virtual > -1024UL) {
1531                        /* Real error */
1532                        retcode = (signed long)virtual;
1533                        goto done;
1534                }
1535                *v = (void __user *)virtual;
1536
1537                for (i = 0; i < dma->buf_count; i++) {
1538                        if (f(data, i, virtual, dma->buflist[i]) < 0) {
1539                                retcode = -EFAULT;
1540                                goto done;
1541                        }
1542                }
1543        }
1544      done:
1545        *p = dma->buf_count;
1546        DRM_DEBUG("%d buffers, retcode = %d\n", *p, retcode);
1547
1548        return retcode;
1549}
1550
1551static int map_one_buf(void *data, int idx, unsigned long virtual,
1552                        struct drm_buf *buf)
1553{
1554        struct drm_buf_map *request = data;
1555        unsigned long address = virtual + buf->offset;  /* *** */
1556
1557        if (copy_to_user(&request->list[idx].idx, &buf->idx,
1558                         sizeof(request->list[0].idx)))
1559                return -EFAULT;
1560        if (copy_to_user(&request->list[idx].total, &buf->total,
1561                         sizeof(request->list[0].total)))
1562                return -EFAULT;
1563        if (clear_user(&request->list[idx].used, sizeof(int)))
1564                return -EFAULT;
1565        if (copy_to_user(&request->list[idx].address, &address,
1566                         sizeof(address)))
1567                return -EFAULT;
1568        return 0;
1569}
1570
1571int drm_legacy_mapbufs(struct drm_device *dev, void *data,
1572                       struct drm_file *file_priv)
1573{
1574        struct drm_buf_map *request = data;
1575        return __drm_legacy_mapbufs(dev, data, &request->count,
1576                                    &request->virtual, map_one_buf,
1577                                    file_priv);
1578}
1579
1580int drm_legacy_dma_ioctl(struct drm_device *dev, void *data,
1581                  struct drm_file *file_priv)
1582{
1583        if (!drm_core_check_feature(dev, DRIVER_LEGACY))
1584                return -EOPNOTSUPP;
1585
1586        if (dev->driver->dma_ioctl)
1587                return dev->driver->dma_ioctl(dev, data, file_priv);
1588        else
1589                return -EINVAL;
1590}
1591
1592struct drm_local_map *drm_legacy_getsarea(struct drm_device *dev)
1593{
1594        struct drm_map_list *entry;
1595
1596        list_for_each_entry(entry, &dev->maplist, head) {
1597                if (entry->map && entry->map->type == _DRM_SHM &&
1598                    (entry->map->flags & _DRM_CONTAINS_LOCK)) {
1599                        return entry->map;
1600                }
1601        }
1602        return NULL;
1603}
1604EXPORT_SYMBOL(drm_legacy_getsarea);
1605