linux/drivers/gpu/drm/drm_bufs.c
<<
>>
Prefs
   1/**
   2 * \file drm_bufs.c
   3 * Generic buffer template
   4 *
   5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
   6 * \author Gareth Hughes <gareth@valinux.com>
   7 */
   8
   9/*
  10 * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
  11 *
  12 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
  13 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
  14 * All Rights Reserved.
  15 *
  16 * Permission is hereby granted, free of charge, to any person obtaining a
  17 * copy of this software and associated documentation files (the "Software"),
  18 * to deal in the Software without restriction, including without limitation
  19 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  20 * and/or sell copies of the Software, and to permit persons to whom the
  21 * Software is furnished to do so, subject to the following conditions:
  22 *
  23 * The above copyright notice and this permission notice (including the next
  24 * paragraph) shall be included in all copies or substantial portions of the
  25 * Software.
  26 *
  27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  28 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  29 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  30 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
  31 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  32 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  33 * OTHER DEALINGS IN THE SOFTWARE.
  34 */
  35
  36#include <linux/vmalloc.h>
  37#include <linux/slab.h>
  38#include <linux/log2.h>
  39#include <linux/export.h>
  40#include <asm/shmparam.h>
  41#include <drm/drmP.h>
  42
  43static struct drm_map_list *drm_find_matching_map(struct drm_device *dev,
  44                                                  struct drm_local_map *map)
  45{
  46        struct drm_map_list *entry;
  47        list_for_each_entry(entry, &dev->maplist, head) {
  48                /*
  49                 * Because the kernel-userspace ABI is fixed at a 32-bit offset
  50                 * while PCI resources may live above that, we only compare the
  51                 * lower 32 bits of the map offset for maps of type
  52                 * _DRM_FRAMEBUFFER or _DRM_REGISTERS.
  53                 * It is assumed that if a driver have more than one resource
  54                 * of each type, the lower 32 bits are different.
  55                 */
  56                if (!entry->map ||
  57                    map->type != entry->map->type ||
  58                    entry->master != dev->primary->master)
  59                        continue;
  60                switch (map->type) {
  61                case _DRM_SHM:
  62                        if (map->flags != _DRM_CONTAINS_LOCK)
  63                                break;
  64                        return entry;
  65                case _DRM_REGISTERS:
  66                case _DRM_FRAME_BUFFER:
  67                        if ((entry->map->offset & 0xffffffff) ==
  68                            (map->offset & 0xffffffff))
  69                                return entry;
  70                default: /* Make gcc happy */
  71                        ;
  72                }
  73                if (entry->map->offset == map->offset)
  74                        return entry;
  75        }
  76
  77        return NULL;
  78}
  79
  80static int drm_map_handle(struct drm_device *dev, struct drm_hash_item *hash,
  81                          unsigned long user_token, int hashed_handle, int shm)
  82{
  83        int use_hashed_handle, shift;
  84        unsigned long add;
  85
  86#if (BITS_PER_LONG == 64)
  87        use_hashed_handle = ((user_token & 0xFFFFFFFF00000000UL) || hashed_handle);
  88#elif (BITS_PER_LONG == 32)
  89        use_hashed_handle = hashed_handle;
  90#else
  91#error Unsupported long size. Neither 64 nor 32 bits.
  92#endif
  93
  94        if (!use_hashed_handle) {
  95                int ret;
  96                hash->key = user_token >> PAGE_SHIFT;
  97                ret = drm_ht_insert_item(&dev->map_hash, hash);
  98                if (ret != -EINVAL)
  99                        return ret;
 100        }
 101
 102        shift = 0;
 103        add = DRM_MAP_HASH_OFFSET >> PAGE_SHIFT;
 104        if (shm && (SHMLBA > PAGE_SIZE)) {
 105                int bits = ilog2(SHMLBA >> PAGE_SHIFT) + 1;
 106
 107                /* For shared memory, we have to preserve the SHMLBA
 108                 * bits of the eventual vma->vm_pgoff value during
 109                 * mmap().  Otherwise we run into cache aliasing problems
 110                 * on some platforms.  On these platforms, the pgoff of
 111                 * a mmap() request is used to pick a suitable virtual
 112                 * address for the mmap() region such that it will not
 113                 * cause cache aliasing problems.
 114                 *
 115                 * Therefore, make sure the SHMLBA relevant bits of the
 116                 * hash value we use are equal to those in the original
 117                 * kernel virtual address.
 118                 */
 119                shift = bits;
 120                add |= ((user_token >> PAGE_SHIFT) & ((1UL << bits) - 1UL));
 121        }
 122
 123        return drm_ht_just_insert_please(&dev->map_hash, hash,
 124                                         user_token, 32 - PAGE_SHIFT - 3,
 125                                         shift, add);
 126}
 127
 128/**
 129 * Core function to create a range of memory available for mapping by a
 130 * non-root process.
 131 *
 132 * Adjusts the memory offset to its absolute value according to the mapping
 133 * type.  Adds the map to the map list drm_device::maplist. Adds MTRR's where
 134 * applicable and if supported by the kernel.
 135 */
 136static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
 137                           unsigned int size, enum drm_map_type type,
 138                           enum drm_map_flags flags,
 139                           struct drm_map_list ** maplist)
 140{
 141        struct drm_local_map *map;
 142        struct drm_map_list *list;
 143        drm_dma_handle_t *dmah;
 144        unsigned long user_token;
 145        int ret;
 146
 147        map = kmalloc(sizeof(*map), GFP_KERNEL);
 148        if (!map)
 149                return -ENOMEM;
 150
 151        map->offset = offset;
 152        map->size = size;
 153        map->flags = flags;
 154        map->type = type;
 155
 156        /* Only allow shared memory to be removable since we only keep enough
 157         * book keeping information about shared memory to allow for removal
 158         * when processes fork.
 159         */
 160        if ((map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM) {
 161                kfree(map);
 162                return -EINVAL;
 163        }
 164        DRM_DEBUG("offset = 0x%08llx, size = 0x%08lx, type = %d\n",
 165                  (unsigned long long)map->offset, map->size, map->type);
 166
 167        /* page-align _DRM_SHM maps. They are allocated here so there is no security
 168         * hole created by that and it works around various broken drivers that use
 169         * a non-aligned quantity to map the SAREA. --BenH
 170         */
 171        if (map->type == _DRM_SHM)
 172                map->size = PAGE_ALIGN(map->size);
 173
 174        if ((map->offset & (~(resource_size_t)PAGE_MASK)) || (map->size & (~PAGE_MASK))) {
 175                kfree(map);
 176                return -EINVAL;
 177        }
 178        map->mtrr = -1;
 179        map->handle = NULL;
 180
 181        switch (map->type) {
 182        case _DRM_REGISTERS:
 183        case _DRM_FRAME_BUFFER:
 184#if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__) && !defined(__arm__)
 185                if (map->offset + (map->size-1) < map->offset ||
 186                    map->offset < virt_to_phys(high_memory)) {
 187                        kfree(map);
 188                        return -EINVAL;
 189                }
 190#endif
 191                /* Some drivers preinitialize some maps, without the X Server
 192                 * needing to be aware of it.  Therefore, we just return success
 193                 * when the server tries to create a duplicate map.
 194                 */
 195                list = drm_find_matching_map(dev, map);
 196                if (list != NULL) {
 197                        if (list->map->size != map->size) {
 198                                DRM_DEBUG("Matching maps of type %d with "
 199                                          "mismatched sizes, (%ld vs %ld)\n",
 200                                          map->type, map->size,
 201                                          list->map->size);
 202                                list->map->size = map->size;
 203                        }
 204
 205                        kfree(map);
 206                        *maplist = list;
 207                        return 0;
 208                }
 209
 210                if (drm_core_has_MTRR(dev)) {
 211                        if (map->type == _DRM_FRAME_BUFFER ||
 212                            (map->flags & _DRM_WRITE_COMBINING)) {
 213                                map->mtrr = mtrr_add(map->offset, map->size,
 214                                                     MTRR_TYPE_WRCOMB, 1);
 215                        }
 216                }
 217                if (map->type == _DRM_REGISTERS) {
 218                        map->handle = ioremap(map->offset, map->size);
 219                        if (!map->handle) {
 220                                kfree(map);
 221                                return -ENOMEM;
 222                        }
 223                }
 224
 225                break;
 226        case _DRM_SHM:
 227                list = drm_find_matching_map(dev, map);
 228                if (list != NULL) {
 229                        if(list->map->size != map->size) {
 230                                DRM_DEBUG("Matching maps of type %d with "
 231                                          "mismatched sizes, (%ld vs %ld)\n",
 232                                          map->type, map->size, list->map->size);
 233                                list->map->size = map->size;
 234                        }
 235
 236                        kfree(map);
 237                        *maplist = list;
 238                        return 0;
 239                }
 240                map->handle = vmalloc_user(map->size);
 241                DRM_DEBUG("%lu %d %p\n",
 242                          map->size, drm_order(map->size), map->handle);
 243                if (!map->handle) {
 244                        kfree(map);
 245                        return -ENOMEM;
 246                }
 247                map->offset = (unsigned long)map->handle;
 248                if (map->flags & _DRM_CONTAINS_LOCK) {
 249                        /* Prevent a 2nd X Server from creating a 2nd lock */
 250                        if (dev->primary->master->lock.hw_lock != NULL) {
 251                                vfree(map->handle);
 252                                kfree(map);
 253                                return -EBUSY;
 254                        }
 255                        dev->sigdata.lock = dev->primary->master->lock.hw_lock = map->handle;   /* Pointer to lock */
 256                }
 257                break;
 258        case _DRM_AGP: {
 259                struct drm_agp_mem *entry;
 260                int valid = 0;
 261
 262                if (!drm_core_has_AGP(dev)) {
 263                        kfree(map);
 264                        return -EINVAL;
 265                }
 266#ifdef __alpha__
 267                map->offset += dev->hose->mem_space->start;
 268#endif
 269                /* In some cases (i810 driver), user space may have already
 270                 * added the AGP base itself, because dev->agp->base previously
 271                 * only got set during AGP enable.  So, only add the base
 272                 * address if the map's offset isn't already within the
 273                 * aperture.
 274                 */
 275                if (map->offset < dev->agp->base ||
 276                    map->offset > dev->agp->base +
 277                    dev->agp->agp_info.aper_size * 1024 * 1024 - 1) {
 278                        map->offset += dev->agp->base;
 279                }
 280                map->mtrr = dev->agp->agp_mtrr; /* for getmap */
 281
 282                /* This assumes the DRM is in total control of AGP space.
 283                 * It's not always the case as AGP can be in the control
 284                 * of user space (i.e. i810 driver). So this loop will get
 285                 * skipped and we double check that dev->agp->memory is
 286                 * actually set as well as being invalid before EPERM'ing
 287                 */
 288                list_for_each_entry(entry, &dev->agp->memory, head) {
 289                        if ((map->offset >= entry->bound) &&
 290                            (map->offset + map->size <= entry->bound + entry->pages * PAGE_SIZE)) {
 291                                valid = 1;
 292                                break;
 293                        }
 294                }
 295                if (!list_empty(&dev->agp->memory) && !valid) {
 296                        kfree(map);
 297                        return -EPERM;
 298                }
 299                DRM_DEBUG("AGP offset = 0x%08llx, size = 0x%08lx\n",
 300                          (unsigned long long)map->offset, map->size);
 301
 302                break;
 303        }
 304        case _DRM_GEM:
 305                DRM_ERROR("tried to addmap GEM object\n");
 306                break;
 307        case _DRM_SCATTER_GATHER:
 308                if (!dev->sg) {
 309                        kfree(map);
 310                        return -EINVAL;
 311                }
 312                map->offset += (unsigned long)dev->sg->virtual;
 313                break;
 314        case _DRM_CONSISTENT:
 315                /* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G,
 316                 * As we're limiting the address to 2^32-1 (or less),
 317                 * casting it down to 32 bits is no problem, but we
 318                 * need to point to a 64bit variable first. */
 319                dmah = drm_pci_alloc(dev, map->size, map->size);
 320                if (!dmah) {
 321                        kfree(map);
 322                        return -ENOMEM;
 323                }
 324                map->handle = dmah->vaddr;
 325                map->offset = (unsigned long)dmah->busaddr;
 326                kfree(dmah);
 327                break;
 328        default:
 329                kfree(map);
 330                return -EINVAL;
 331        }
 332
 333        list = kzalloc(sizeof(*list), GFP_KERNEL);
 334        if (!list) {
 335                if (map->type == _DRM_REGISTERS)
 336                        iounmap(map->handle);
 337                kfree(map);
 338                return -EINVAL;
 339        }
 340        list->map = map;
 341
 342        mutex_lock(&dev->struct_mutex);
 343        list_add(&list->head, &dev->maplist);
 344
 345        /* Assign a 32-bit handle */
 346        /* We do it here so that dev->struct_mutex protects the increment */
 347        user_token = (map->type == _DRM_SHM) ? (unsigned long)map->handle :
 348                map->offset;
 349        ret = drm_map_handle(dev, &list->hash, user_token, 0,
 350                             (map->type == _DRM_SHM));
 351        if (ret) {
 352                if (map->type == _DRM_REGISTERS)
 353                        iounmap(map->handle);
 354                kfree(map);
 355                kfree(list);
 356                mutex_unlock(&dev->struct_mutex);
 357                return ret;
 358        }
 359
 360        list->user_token = list->hash.key << PAGE_SHIFT;
 361        mutex_unlock(&dev->struct_mutex);
 362
 363        if (!(map->flags & _DRM_DRIVER))
 364                list->master = dev->primary->master;
 365        *maplist = list;
 366        return 0;
 367        }
 368
 369int drm_addmap(struct drm_device * dev, resource_size_t offset,
 370               unsigned int size, enum drm_map_type type,
 371               enum drm_map_flags flags, struct drm_local_map ** map_ptr)
 372{
 373        struct drm_map_list *list;
 374        int rc;
 375
 376        rc = drm_addmap_core(dev, offset, size, type, flags, &list);
 377        if (!rc)
 378                *map_ptr = list->map;
 379        return rc;
 380}
 381
 382EXPORT_SYMBOL(drm_addmap);
 383
 384/**
 385 * Ioctl to specify a range of memory that is available for mapping by a
 386 * non-root process.
 387 *
 388 * \param inode device inode.
 389 * \param file_priv DRM file private.
 390 * \param cmd command.
 391 * \param arg pointer to a drm_map structure.
 392 * \return zero on success or a negative value on error.
 393 *
 394 */
 395int drm_addmap_ioctl(struct drm_device *dev, void *data,
 396                     struct drm_file *file_priv)
 397{
 398        struct drm_map *map = data;
 399        struct drm_map_list *maplist;
 400        int err;
 401
 402        if (!(capable(CAP_SYS_ADMIN) || map->type == _DRM_AGP || map->type == _DRM_SHM))
 403                return -EPERM;
 404
 405        err = drm_addmap_core(dev, map->offset, map->size, map->type,
 406                              map->flags, &maplist);
 407
 408        if (err)
 409                return err;
 410
 411        /* avoid a warning on 64-bit, this casting isn't very nice, but the API is set so too late */
 412        map->handle = (void *)(unsigned long)maplist->user_token;
 413        return 0;
 414}
 415
 416/**
 417 * Remove a map private from list and deallocate resources if the mapping
 418 * isn't in use.
 419 *
 420 * Searches the map on drm_device::maplist, removes it from the list, see if
 421 * its being used, and free any associate resource (such as MTRR's) if it's not
 422 * being on use.
 423 *
 424 * \sa drm_addmap
 425 */
 426int drm_rmmap_locked(struct drm_device *dev, struct drm_local_map *map)
 427{
 428        struct drm_map_list *r_list = NULL, *list_t;
 429        drm_dma_handle_t dmah;
 430        int found = 0;
 431        struct drm_master *master;
 432
 433        /* Find the list entry for the map and remove it */
 434        list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) {
 435                if (r_list->map == map) {
 436                        master = r_list->master;
 437                        list_del(&r_list->head);
 438                        drm_ht_remove_key(&dev->map_hash,
 439                                          r_list->user_token >> PAGE_SHIFT);
 440                        kfree(r_list);
 441                        found = 1;
 442                        break;
 443                }
 444        }
 445
 446        if (!found)
 447                return -EINVAL;
 448
 449        switch (map->type) {
 450        case _DRM_REGISTERS:
 451                iounmap(map->handle);
 452                /* FALLTHROUGH */
 453        case _DRM_FRAME_BUFFER:
 454                if (drm_core_has_MTRR(dev) && map->mtrr >= 0) {
 455                        int retcode;
 456                        retcode = mtrr_del(map->mtrr, map->offset, map->size);
 457                        DRM_DEBUG("mtrr_del=%d\n", retcode);
 458                }
 459                break;
 460        case _DRM_SHM:
 461                vfree(map->handle);
 462                if (master) {
 463                        if (dev->sigdata.lock == master->lock.hw_lock)
 464                                dev->sigdata.lock = NULL;
 465                        master->lock.hw_lock = NULL;   /* SHM removed */
 466                        master->lock.file_priv = NULL;
 467                        wake_up_interruptible_all(&master->lock.lock_queue);
 468                }
 469                break;
 470        case _DRM_AGP:
 471        case _DRM_SCATTER_GATHER:
 472                break;
 473        case _DRM_CONSISTENT:
 474                dmah.vaddr = map->handle;
 475                dmah.busaddr = map->offset;
 476                dmah.size = map->size;
 477                __drm_pci_free(dev, &dmah);
 478                break;
 479        case _DRM_GEM:
 480                DRM_ERROR("tried to rmmap GEM object\n");
 481                break;
 482        }
 483        kfree(map);
 484
 485        return 0;
 486}
 487EXPORT_SYMBOL(drm_rmmap_locked);
 488
 489int drm_rmmap(struct drm_device *dev, struct drm_local_map *map)
 490{
 491        int ret;
 492
 493        mutex_lock(&dev->struct_mutex);
 494        ret = drm_rmmap_locked(dev, map);
 495        mutex_unlock(&dev->struct_mutex);
 496
 497        return ret;
 498}
 499EXPORT_SYMBOL(drm_rmmap);
 500
 501/* The rmmap ioctl appears to be unnecessary.  All mappings are torn down on
 502 * the last close of the device, and this is necessary for cleanup when things
 503 * exit uncleanly.  Therefore, having userland manually remove mappings seems
 504 * like a pointless exercise since they're going away anyway.
 505 *
 506 * One use case might be after addmap is allowed for normal users for SHM and
 507 * gets used by drivers that the server doesn't need to care about.  This seems
 508 * unlikely.
 509 *
 510 * \param inode device inode.
 511 * \param file_priv DRM file private.
 512 * \param cmd command.
 513 * \param arg pointer to a struct drm_map structure.
 514 * \return zero on success or a negative value on error.
 515 */
 516int drm_rmmap_ioctl(struct drm_device *dev, void *data,
 517                    struct drm_file *file_priv)
 518{
 519        struct drm_map *request = data;
 520        struct drm_local_map *map = NULL;
 521        struct drm_map_list *r_list;
 522        int ret;
 523
 524        mutex_lock(&dev->struct_mutex);
 525        list_for_each_entry(r_list, &dev->maplist, head) {
 526                if (r_list->map &&
 527                    r_list->user_token == (unsigned long)request->handle &&
 528                    r_list->map->flags & _DRM_REMOVABLE) {
 529                        map = r_list->map;
 530                        break;
 531                }
 532        }
 533
 534        /* List has wrapped around to the head pointer, or its empty we didn't
 535         * find anything.
 536         */
 537        if (list_empty(&dev->maplist) || !map) {
 538                mutex_unlock(&dev->struct_mutex);
 539                return -EINVAL;
 540        }
 541
 542        /* Register and framebuffer maps are permanent */
 543        if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) {
 544                mutex_unlock(&dev->struct_mutex);
 545                return 0;
 546        }
 547
 548        ret = drm_rmmap_locked(dev, map);
 549
 550        mutex_unlock(&dev->struct_mutex);
 551
 552        return ret;
 553}
 554
 555/**
 556 * Cleanup after an error on one of the addbufs() functions.
 557 *
 558 * \param dev DRM device.
 559 * \param entry buffer entry where the error occurred.
 560 *
 561 * Frees any pages and buffers associated with the given entry.
 562 */
 563static void drm_cleanup_buf_error(struct drm_device * dev,
 564                                  struct drm_buf_entry * entry)
 565{
 566        int i;
 567
 568        if (entry->seg_count) {
 569                for (i = 0; i < entry->seg_count; i++) {
 570                        if (entry->seglist[i]) {
 571                                drm_pci_free(dev, entry->seglist[i]);
 572                        }
 573                }
 574                kfree(entry->seglist);
 575
 576                entry->seg_count = 0;
 577        }
 578
 579        if (entry->buf_count) {
 580                for (i = 0; i < entry->buf_count; i++) {
 581                        kfree(entry->buflist[i].dev_private);
 582                }
 583                kfree(entry->buflist);
 584
 585                entry->buf_count = 0;
 586        }
 587}
 588
 589#if __OS_HAS_AGP
 590/**
 591 * Add AGP buffers for DMA transfers.
 592 *
 593 * \param dev struct drm_device to which the buffers are to be added.
 594 * \param request pointer to a struct drm_buf_desc describing the request.
 595 * \return zero on success or a negative number on failure.
 596 *
 597 * After some sanity checks creates a drm_buf structure for each buffer and
 598 * reallocates the buffer list of the same size order to accommodate the new
 599 * buffers.
 600 */
 601int drm_addbufs_agp(struct drm_device * dev, struct drm_buf_desc * request)
 602{
 603        struct drm_device_dma *dma = dev->dma;
 604        struct drm_buf_entry *entry;
 605        struct drm_agp_mem *agp_entry;
 606        struct drm_buf *buf;
 607        unsigned long offset;
 608        unsigned long agp_offset;
 609        int count;
 610        int order;
 611        int size;
 612        int alignment;
 613        int page_order;
 614        int total;
 615        int byte_count;
 616        int i, valid;
 617        struct drm_buf **temp_buflist;
 618
 619        if (!dma)
 620                return -EINVAL;
 621
 622        count = request->count;
 623        order = drm_order(request->size);
 624        size = 1 << order;
 625
 626        alignment = (request->flags & _DRM_PAGE_ALIGN)
 627            ? PAGE_ALIGN(size) : size;
 628        page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
 629        total = PAGE_SIZE << page_order;
 630
 631        byte_count = 0;
 632        agp_offset = dev->agp->base + request->agp_start;
 633
 634        DRM_DEBUG("count:      %d\n", count);
 635        DRM_DEBUG("order:      %d\n", order);
 636        DRM_DEBUG("size:       %d\n", size);
 637        DRM_DEBUG("agp_offset: %lx\n", agp_offset);
 638        DRM_DEBUG("alignment:  %d\n", alignment);
 639        DRM_DEBUG("page_order: %d\n", page_order);
 640        DRM_DEBUG("total:      %d\n", total);
 641
 642        if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
 643                return -EINVAL;
 644
 645        /* Make sure buffers are located in AGP memory that we own */
 646        valid = 0;
 647        list_for_each_entry(agp_entry, &dev->agp->memory, head) {
 648                if ((agp_offset >= agp_entry->bound) &&
 649                    (agp_offset + total * count <= agp_entry->bound + agp_entry->pages * PAGE_SIZE)) {
 650                        valid = 1;
 651                        break;
 652                }
 653        }
 654        if (!list_empty(&dev->agp->memory) && !valid) {
 655                DRM_DEBUG("zone invalid\n");
 656                return -EINVAL;
 657        }
 658        spin_lock(&dev->count_lock);
 659        if (dev->buf_use) {
 660                spin_unlock(&dev->count_lock);
 661                return -EBUSY;
 662        }
 663        atomic_inc(&dev->buf_alloc);
 664        spin_unlock(&dev->count_lock);
 665
 666        mutex_lock(&dev->struct_mutex);
 667        entry = &dma->bufs[order];
 668        if (entry->buf_count) {
 669                mutex_unlock(&dev->struct_mutex);
 670                atomic_dec(&dev->buf_alloc);
 671                return -ENOMEM; /* May only call once for each order */
 672        }
 673
 674        if (count < 0 || count > 4096) {
 675                mutex_unlock(&dev->struct_mutex);
 676                atomic_dec(&dev->buf_alloc);
 677                return -EINVAL;
 678        }
 679
 680        entry->buflist = kzalloc(count * sizeof(*entry->buflist), GFP_KERNEL);
 681        if (!entry->buflist) {
 682                mutex_unlock(&dev->struct_mutex);
 683                atomic_dec(&dev->buf_alloc);
 684                return -ENOMEM;
 685        }
 686
 687        entry->buf_size = size;
 688        entry->page_order = page_order;
 689
 690        offset = 0;
 691
 692        while (entry->buf_count < count) {
 693                buf = &entry->buflist[entry->buf_count];
 694                buf->idx = dma->buf_count + entry->buf_count;
 695                buf->total = alignment;
 696                buf->order = order;
 697                buf->used = 0;
 698
 699                buf->offset = (dma->byte_count + offset);
 700                buf->bus_address = agp_offset + offset;
 701                buf->address = (void *)(agp_offset + offset);
 702                buf->next = NULL;
 703                buf->waiting = 0;
 704                buf->pending = 0;
 705                buf->file_priv = NULL;
 706
 707                buf->dev_priv_size = dev->driver->dev_priv_size;
 708                buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL);
 709                if (!buf->dev_private) {
 710                        /* Set count correctly so we free the proper amount. */
 711                        entry->buf_count = count;
 712                        drm_cleanup_buf_error(dev, entry);
 713                        mutex_unlock(&dev->struct_mutex);
 714                        atomic_dec(&dev->buf_alloc);
 715                        return -ENOMEM;
 716                }
 717
 718                DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
 719
 720                offset += alignment;
 721                entry->buf_count++;
 722                byte_count += PAGE_SIZE << page_order;
 723        }
 724
 725        DRM_DEBUG("byte_count: %d\n", byte_count);
 726
 727        temp_buflist = krealloc(dma->buflist,
 728                                (dma->buf_count + entry->buf_count) *
 729                                sizeof(*dma->buflist), GFP_KERNEL);
 730        if (!temp_buflist) {
 731                /* Free the entry because it isn't valid */
 732                drm_cleanup_buf_error(dev, entry);
 733                mutex_unlock(&dev->struct_mutex);
 734                atomic_dec(&dev->buf_alloc);
 735                return -ENOMEM;
 736        }
 737        dma->buflist = temp_buflist;
 738
 739        for (i = 0; i < entry->buf_count; i++) {
 740                dma->buflist[i + dma->buf_count] = &entry->buflist[i];
 741        }
 742
 743        dma->buf_count += entry->buf_count;
 744        dma->seg_count += entry->seg_count;
 745        dma->page_count += byte_count >> PAGE_SHIFT;
 746        dma->byte_count += byte_count;
 747
 748        DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
 749        DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
 750
 751        mutex_unlock(&dev->struct_mutex);
 752
 753        request->count = entry->buf_count;
 754        request->size = size;
 755
 756        dma->flags = _DRM_DMA_USE_AGP;
 757
 758        atomic_dec(&dev->buf_alloc);
 759        return 0;
 760}
 761EXPORT_SYMBOL(drm_addbufs_agp);
 762#endif                          /* __OS_HAS_AGP */
 763
 764int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
 765{
 766        struct drm_device_dma *dma = dev->dma;
 767        int count;
 768        int order;
 769        int size;
 770        int total;
 771        int page_order;
 772        struct drm_buf_entry *entry;
 773        drm_dma_handle_t *dmah;
 774        struct drm_buf *buf;
 775        int alignment;
 776        unsigned long offset;
 777        int i;
 778        int byte_count;
 779        int page_count;
 780        unsigned long *temp_pagelist;
 781        struct drm_buf **temp_buflist;
 782
 783        if (!drm_core_check_feature(dev, DRIVER_PCI_DMA))
 784                return -EINVAL;
 785
 786        if (!dma)
 787                return -EINVAL;
 788
 789        if (!capable(CAP_SYS_ADMIN))
 790                return -EPERM;
 791
 792        count = request->count;
 793        order = drm_order(request->size);
 794        size = 1 << order;
 795
 796        DRM_DEBUG("count=%d, size=%d (%d), order=%d\n",
 797                  request->count, request->size, size, order);
 798
 799        if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
 800                return -EINVAL;
 801
 802        alignment = (request->flags & _DRM_PAGE_ALIGN)
 803            ? PAGE_ALIGN(size) : size;
 804        page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
 805        total = PAGE_SIZE << page_order;
 806
 807        spin_lock(&dev->count_lock);
 808        if (dev->buf_use) {
 809                spin_unlock(&dev->count_lock);
 810                return -EBUSY;
 811        }
 812        atomic_inc(&dev->buf_alloc);
 813        spin_unlock(&dev->count_lock);
 814
 815        mutex_lock(&dev->struct_mutex);
 816        entry = &dma->bufs[order];
 817        if (entry->buf_count) {
 818                mutex_unlock(&dev->struct_mutex);
 819                atomic_dec(&dev->buf_alloc);
 820                return -ENOMEM; /* May only call once for each order */
 821        }
 822
 823        if (count < 0 || count > 4096) {
 824                mutex_unlock(&dev->struct_mutex);
 825                atomic_dec(&dev->buf_alloc);
 826                return -EINVAL;
 827        }
 828
 829        entry->buflist = kzalloc(count * sizeof(*entry->buflist), GFP_KERNEL);
 830        if (!entry->buflist) {
 831                mutex_unlock(&dev->struct_mutex);
 832                atomic_dec(&dev->buf_alloc);
 833                return -ENOMEM;
 834        }
 835
 836        entry->seglist = kzalloc(count * sizeof(*entry->seglist), GFP_KERNEL);
 837        if (!entry->seglist) {
 838                kfree(entry->buflist);
 839                mutex_unlock(&dev->struct_mutex);
 840                atomic_dec(&dev->buf_alloc);
 841                return -ENOMEM;
 842        }
 843
 844        /* Keep the original pagelist until we know all the allocations
 845         * have succeeded
 846         */
 847        temp_pagelist = kmalloc((dma->page_count + (count << page_order)) *
 848                               sizeof(*dma->pagelist), GFP_KERNEL);
 849        if (!temp_pagelist) {
 850                kfree(entry->buflist);
 851                kfree(entry->seglist);
 852                mutex_unlock(&dev->struct_mutex);
 853                atomic_dec(&dev->buf_alloc);
 854                return -ENOMEM;
 855        }
 856        memcpy(temp_pagelist,
 857               dma->pagelist, dma->page_count * sizeof(*dma->pagelist));
 858        DRM_DEBUG("pagelist: %d entries\n",
 859                  dma->page_count + (count << page_order));
 860
 861        entry->buf_size = size;
 862        entry->page_order = page_order;
 863        byte_count = 0;
 864        page_count = 0;
 865
 866        while (entry->buf_count < count) {
 867
 868                dmah = drm_pci_alloc(dev, PAGE_SIZE << page_order, 0x1000);
 869
 870                if (!dmah) {
 871                        /* Set count correctly so we free the proper amount. */
 872                        entry->buf_count = count;
 873                        entry->seg_count = count;
 874                        drm_cleanup_buf_error(dev, entry);
 875                        kfree(temp_pagelist);
 876                        mutex_unlock(&dev->struct_mutex);
 877                        atomic_dec(&dev->buf_alloc);
 878                        return -ENOMEM;
 879                }
 880                entry->seglist[entry->seg_count++] = dmah;
 881                for (i = 0; i < (1 << page_order); i++) {
 882                        DRM_DEBUG("page %d @ 0x%08lx\n",
 883                                  dma->page_count + page_count,
 884                                  (unsigned long)dmah->vaddr + PAGE_SIZE * i);
 885                        temp_pagelist[dma->page_count + page_count++]
 886                                = (unsigned long)dmah->vaddr + PAGE_SIZE * i;
 887                }
 888                for (offset = 0;
 889                     offset + size <= total && entry->buf_count < count;
 890                     offset += alignment, ++entry->buf_count) {
 891                        buf = &entry->buflist[entry->buf_count];
 892                        buf->idx = dma->buf_count + entry->buf_count;
 893                        buf->total = alignment;
 894                        buf->order = order;
 895                        buf->used = 0;
 896                        buf->offset = (dma->byte_count + byte_count + offset);
 897                        buf->address = (void *)(dmah->vaddr + offset);
 898                        buf->bus_address = dmah->busaddr + offset;
 899                        buf->next = NULL;
 900                        buf->waiting = 0;
 901                        buf->pending = 0;
 902                        buf->file_priv = NULL;
 903
 904                        buf->dev_priv_size = dev->driver->dev_priv_size;
 905                        buf->dev_private = kzalloc(buf->dev_priv_size,
 906                                                GFP_KERNEL);
 907                        if (!buf->dev_private) {
 908                                /* Set count correctly so we free the proper amount. */
 909                                entry->buf_count = count;
 910                                entry->seg_count = count;
 911                                drm_cleanup_buf_error(dev, entry);
 912                                kfree(temp_pagelist);
 913                                mutex_unlock(&dev->struct_mutex);
 914                                atomic_dec(&dev->buf_alloc);
 915                                return -ENOMEM;
 916                        }
 917
 918                        DRM_DEBUG("buffer %d @ %p\n",
 919                                  entry->buf_count, buf->address);
 920                }
 921                byte_count += PAGE_SIZE << page_order;
 922        }
 923
 924        temp_buflist = krealloc(dma->buflist,
 925                                (dma->buf_count + entry->buf_count) *
 926                                sizeof(*dma->buflist), GFP_KERNEL);
 927        if (!temp_buflist) {
 928                /* Free the entry because it isn't valid */
 929                drm_cleanup_buf_error(dev, entry);
 930                kfree(temp_pagelist);
 931                mutex_unlock(&dev->struct_mutex);
 932                atomic_dec(&dev->buf_alloc);
 933                return -ENOMEM;
 934        }
 935        dma->buflist = temp_buflist;
 936
 937        for (i = 0; i < entry->buf_count; i++) {
 938                dma->buflist[i + dma->buf_count] = &entry->buflist[i];
 939        }
 940
 941        /* No allocations failed, so now we can replace the original pagelist
 942         * with the new one.
 943         */
 944        if (dma->page_count) {
 945                kfree(dma->pagelist);
 946        }
 947        dma->pagelist = temp_pagelist;
 948
 949        dma->buf_count += entry->buf_count;
 950        dma->seg_count += entry->seg_count;
 951        dma->page_count += entry->seg_count << page_order;
 952        dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
 953
 954        mutex_unlock(&dev->struct_mutex);
 955
 956        request->count = entry->buf_count;
 957        request->size = size;
 958
 959        if (request->flags & _DRM_PCI_BUFFER_RO)
 960                dma->flags = _DRM_DMA_USE_PCI_RO;
 961
 962        atomic_dec(&dev->buf_alloc);
 963        return 0;
 964
 965}
 966EXPORT_SYMBOL(drm_addbufs_pci);
 967
 968static int drm_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request)
 969{
 970        struct drm_device_dma *dma = dev->dma;
 971        struct drm_buf_entry *entry;
 972        struct drm_buf *buf;
 973        unsigned long offset;
 974        unsigned long agp_offset;
 975        int count;
 976        int order;
 977        int size;
 978        int alignment;
 979        int page_order;
 980        int total;
 981        int byte_count;
 982        int i;
 983        struct drm_buf **temp_buflist;
 984
 985        if (!drm_core_check_feature(dev, DRIVER_SG))
 986                return -EINVAL;
 987
 988        if (!dma)
 989                return -EINVAL;
 990
 991        if (!capable(CAP_SYS_ADMIN))
 992                return -EPERM;
 993
 994        count = request->count;
 995        order = drm_order(request->size);
 996        size = 1 << order;
 997
 998        alignment = (request->flags & _DRM_PAGE_ALIGN)
 999            ? PAGE_ALIGN(size) : size;
1000        page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
1001        total = PAGE_SIZE << page_order;
1002
1003        byte_count = 0;
1004        agp_offset = request->agp_start;
1005
1006        DRM_DEBUG("count:      %d\n", count);
1007        DRM_DEBUG("order:      %d\n", order);
1008        DRM_DEBUG("size:       %d\n", size);
1009        DRM_DEBUG("agp_offset: %lu\n", agp_offset);
1010        DRM_DEBUG("alignment:  %d\n", alignment);
1011        DRM_DEBUG("page_order: %d\n", page_order);
1012        DRM_DEBUG("total:      %d\n", total);
1013
1014        if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1015                return -EINVAL;
1016
1017        spin_lock(&dev->count_lock);
1018        if (dev->buf_use) {
1019                spin_unlock(&dev->count_lock);
1020                return -EBUSY;
1021        }
1022        atomic_inc(&dev->buf_alloc);
1023        spin_unlock(&dev->count_lock);
1024
1025        mutex_lock(&dev->struct_mutex);
1026        entry = &dma->bufs[order];
1027        if (entry->buf_count) {
1028                mutex_unlock(&dev->struct_mutex);
1029                atomic_dec(&dev->buf_alloc);
1030                return -ENOMEM; /* May only call once for each order */
1031        }
1032
1033        if (count < 0 || count > 4096) {
1034                mutex_unlock(&dev->struct_mutex);
1035                atomic_dec(&dev->buf_alloc);
1036                return -EINVAL;
1037        }
1038
1039        entry->buflist = kzalloc(count * sizeof(*entry->buflist),
1040                                GFP_KERNEL);
1041        if (!entry->buflist) {
1042                mutex_unlock(&dev->struct_mutex);
1043                atomic_dec(&dev->buf_alloc);
1044                return -ENOMEM;
1045        }
1046
1047        entry->buf_size = size;
1048        entry->page_order = page_order;
1049
1050        offset = 0;
1051
1052        while (entry->buf_count < count) {
1053                buf = &entry->buflist[entry->buf_count];
1054                buf->idx = dma->buf_count + entry->buf_count;
1055                buf->total = alignment;
1056                buf->order = order;
1057                buf->used = 0;
1058
1059                buf->offset = (dma->byte_count + offset);
1060                buf->bus_address = agp_offset + offset;
1061                buf->address = (void *)(agp_offset + offset
1062                                        + (unsigned long)dev->sg->virtual);
1063                buf->next = NULL;
1064                buf->waiting = 0;
1065                buf->pending = 0;
1066                buf->file_priv = NULL;
1067
1068                buf->dev_priv_size = dev->driver->dev_priv_size;
1069                buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL);
1070                if (!buf->dev_private) {
1071                        /* Set count correctly so we free the proper amount. */
1072                        entry->buf_count = count;
1073                        drm_cleanup_buf_error(dev, entry);
1074                        mutex_unlock(&dev->struct_mutex);
1075                        atomic_dec(&dev->buf_alloc);
1076                        return -ENOMEM;
1077                }
1078
1079                DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
1080
1081                offset += alignment;
1082                entry->buf_count++;
1083                byte_count += PAGE_SIZE << page_order;
1084        }
1085
1086        DRM_DEBUG("byte_count: %d\n", byte_count);
1087
1088        temp_buflist = krealloc(dma->buflist,
1089                                (dma->buf_count + entry->buf_count) *
1090                                sizeof(*dma->buflist), GFP_KERNEL);
1091        if (!temp_buflist) {
1092                /* Free the entry because it isn't valid */
1093                drm_cleanup_buf_error(dev, entry);
1094                mutex_unlock(&dev->struct_mutex);
1095                atomic_dec(&dev->buf_alloc);
1096                return -ENOMEM;
1097        }
1098        dma->buflist = temp_buflist;
1099
1100        for (i = 0; i < entry->buf_count; i++) {
1101                dma->buflist[i + dma->buf_count] = &entry->buflist[i];
1102        }
1103
1104        dma->buf_count += entry->buf_count;
1105        dma->seg_count += entry->seg_count;
1106        dma->page_count += byte_count >> PAGE_SHIFT;
1107        dma->byte_count += byte_count;
1108
1109        DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
1110        DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
1111
1112        mutex_unlock(&dev->struct_mutex);
1113
1114        request->count = entry->buf_count;
1115        request->size = size;
1116
1117        dma->flags = _DRM_DMA_USE_SG;
1118
1119        atomic_dec(&dev->buf_alloc);
1120        return 0;
1121}
1122
1123static int drm_addbufs_fb(struct drm_device * dev, struct drm_buf_desc * request)
1124{
1125        struct drm_device_dma *dma = dev->dma;
1126        struct drm_buf_entry *entry;
1127        struct drm_buf *buf;
1128        unsigned long offset;
1129        unsigned long agp_offset;
1130        int count;
1131        int order;
1132        int size;
1133        int alignment;
1134        int page_order;
1135        int total;
1136        int byte_count;
1137        int i;
1138        struct drm_buf **temp_buflist;
1139
1140        if (!drm_core_check_feature(dev, DRIVER_FB_DMA))
1141                return -EINVAL;
1142
1143        if (!dma)
1144                return -EINVAL;
1145
1146        if (!capable(CAP_SYS_ADMIN))
1147                return -EPERM;
1148
1149        count = request->count;
1150        order = drm_order(request->size);
1151        size = 1 << order;
1152
1153        alignment = (request->flags & _DRM_PAGE_ALIGN)
1154            ? PAGE_ALIGN(size) : size;
1155        page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
1156        total = PAGE_SIZE << page_order;
1157
1158        byte_count = 0;
1159        agp_offset = request->agp_start;
1160
1161        DRM_DEBUG("count:      %d\n", count);
1162        DRM_DEBUG("order:      %d\n", order);
1163        DRM_DEBUG("size:       %d\n", size);
1164        DRM_DEBUG("agp_offset: %lu\n", agp_offset);
1165        DRM_DEBUG("alignment:  %d\n", alignment);
1166        DRM_DEBUG("page_order: %d\n", page_order);
1167        DRM_DEBUG("total:      %d\n", total);
1168
1169        if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1170                return -EINVAL;
1171
1172        spin_lock(&dev->count_lock);
1173        if (dev->buf_use) {
1174                spin_unlock(&dev->count_lock);
1175                return -EBUSY;
1176        }
1177        atomic_inc(&dev->buf_alloc);
1178        spin_unlock(&dev->count_lock);
1179
1180        mutex_lock(&dev->struct_mutex);
1181        entry = &dma->bufs[order];
1182        if (entry->buf_count) {
1183                mutex_unlock(&dev->struct_mutex);
1184                atomic_dec(&dev->buf_alloc);
1185                return -ENOMEM; /* May only call once for each order */
1186        }
1187
1188        if (count < 0 || count > 4096) {
1189                mutex_unlock(&dev->struct_mutex);
1190                atomic_dec(&dev->buf_alloc);
1191                return -EINVAL;
1192        }
1193
1194        entry->buflist = kzalloc(count * sizeof(*entry->buflist),
1195                                GFP_KERNEL);
1196        if (!entry->buflist) {
1197                mutex_unlock(&dev->struct_mutex);
1198                atomic_dec(&dev->buf_alloc);
1199                return -ENOMEM;
1200        }
1201
1202        entry->buf_size = size;
1203        entry->page_order = page_order;
1204
1205        offset = 0;
1206
1207        while (entry->buf_count < count) {
1208                buf = &entry->buflist[entry->buf_count];
1209                buf->idx = dma->buf_count + entry->buf_count;
1210                buf->total = alignment;
1211                buf->order = order;
1212                buf->used = 0;
1213
1214                buf->offset = (dma->byte_count + offset);
1215                buf->bus_address = agp_offset + offset;
1216                buf->address = (void *)(agp_offset + offset);
1217                buf->next = NULL;
1218                buf->waiting = 0;
1219                buf->pending = 0;
1220                buf->file_priv = NULL;
1221
1222                buf->dev_priv_size = dev->driver->dev_priv_size;
1223                buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL);
1224                if (!buf->dev_private) {
1225                        /* Set count correctly so we free the proper amount. */
1226                        entry->buf_count = count;
1227                        drm_cleanup_buf_error(dev, entry);
1228                        mutex_unlock(&dev->struct_mutex);
1229                        atomic_dec(&dev->buf_alloc);
1230                        return -ENOMEM;
1231                }
1232
1233                DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
1234
1235                offset += alignment;
1236                entry->buf_count++;
1237                byte_count += PAGE_SIZE << page_order;
1238        }
1239
1240        DRM_DEBUG("byte_count: %d\n", byte_count);
1241
1242        temp_buflist = krealloc(dma->buflist,
1243                                (dma->buf_count + entry->buf_count) *
1244                                sizeof(*dma->buflist), GFP_KERNEL);
1245        if (!temp_buflist) {
1246                /* Free the entry because it isn't valid */
1247                drm_cleanup_buf_error(dev, entry);
1248                mutex_unlock(&dev->struct_mutex);
1249                atomic_dec(&dev->buf_alloc);
1250                return -ENOMEM;
1251        }
1252        dma->buflist = temp_buflist;
1253
1254        for (i = 0; i < entry->buf_count; i++) {
1255                dma->buflist[i + dma->buf_count] = &entry->buflist[i];
1256        }
1257
1258        dma->buf_count += entry->buf_count;
1259        dma->seg_count += entry->seg_count;
1260        dma->page_count += byte_count >> PAGE_SHIFT;
1261        dma->byte_count += byte_count;
1262
1263        DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
1264        DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
1265
1266        mutex_unlock(&dev->struct_mutex);
1267
1268        request->count = entry->buf_count;
1269        request->size = size;
1270
1271        dma->flags = _DRM_DMA_USE_FB;
1272
1273        atomic_dec(&dev->buf_alloc);
1274        return 0;
1275}
1276
1277
1278/**
1279 * Add buffers for DMA transfers (ioctl).
1280 *
1281 * \param inode device inode.
1282 * \param file_priv DRM file private.
1283 * \param cmd command.
1284 * \param arg pointer to a struct drm_buf_desc request.
1285 * \return zero on success or a negative number on failure.
1286 *
1287 * According with the memory type specified in drm_buf_desc::flags and the
1288 * build options, it dispatches the call either to addbufs_agp(),
1289 * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent
1290 * PCI memory respectively.
1291 */
1292int drm_addbufs(struct drm_device *dev, void *data,
1293                struct drm_file *file_priv)
1294{
1295        struct drm_buf_desc *request = data;
1296        int ret;
1297
1298        if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1299                return -EINVAL;
1300
1301#if __OS_HAS_AGP
1302        if (request->flags & _DRM_AGP_BUFFER)
1303                ret = drm_addbufs_agp(dev, request);
1304        else
1305#endif
1306        if (request->flags & _DRM_SG_BUFFER)
1307                ret = drm_addbufs_sg(dev, request);
1308        else if (request->flags & _DRM_FB_BUFFER)
1309                ret = drm_addbufs_fb(dev, request);
1310        else
1311                ret = drm_addbufs_pci(dev, request);
1312
1313        return ret;
1314}
1315
1316/**
1317 * Get information about the buffer mappings.
1318 *
1319 * This was originally mean for debugging purposes, or by a sophisticated
1320 * client library to determine how best to use the available buffers (e.g.,
1321 * large buffers can be used for image transfer).
1322 *
1323 * \param inode device inode.
1324 * \param file_priv DRM file private.
1325 * \param cmd command.
1326 * \param arg pointer to a drm_buf_info structure.
1327 * \return zero on success or a negative number on failure.
1328 *
1329 * Increments drm_device::buf_use while holding the drm_device::count_lock
1330 * lock, preventing of allocating more buffers after this call. Information
1331 * about each requested buffer is then copied into user space.
1332 */
1333int drm_infobufs(struct drm_device *dev, void *data,
1334                 struct drm_file *file_priv)
1335{
1336        struct drm_device_dma *dma = dev->dma;
1337        struct drm_buf_info *request = data;
1338        int i;
1339        int count;
1340
1341        if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1342                return -EINVAL;
1343
1344        if (!dma)
1345                return -EINVAL;
1346
1347        spin_lock(&dev->count_lock);
1348        if (atomic_read(&dev->buf_alloc)) {
1349                spin_unlock(&dev->count_lock);
1350                return -EBUSY;
1351        }
1352        ++dev->buf_use;         /* Can't allocate more after this call */
1353        spin_unlock(&dev->count_lock);
1354
1355        for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
1356                if (dma->bufs[i].buf_count)
1357                        ++count;
1358        }
1359
1360        DRM_DEBUG("count = %d\n", count);
1361
1362        if (request->count >= count) {
1363                for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
1364                        if (dma->bufs[i].buf_count) {
1365                                struct drm_buf_desc __user *to =
1366                                    &request->list[count];
1367                                struct drm_buf_entry *from = &dma->bufs[i];
1368                                struct drm_freelist *list = &dma->bufs[i].freelist;
1369                                if (copy_to_user(&to->count,
1370                                                 &from->buf_count,
1371                                                 sizeof(from->buf_count)) ||
1372                                    copy_to_user(&to->size,
1373                                                 &from->buf_size,
1374                                                 sizeof(from->buf_size)) ||
1375                                    copy_to_user(&to->low_mark,
1376                                                 &list->low_mark,
1377                                                 sizeof(list->low_mark)) ||
1378                                    copy_to_user(&to->high_mark,
1379                                                 &list->high_mark,
1380                                                 sizeof(list->high_mark)))
1381                                        return -EFAULT;
1382
1383                                DRM_DEBUG("%d %d %d %d %d\n",
1384                                          i,
1385                                          dma->bufs[i].buf_count,
1386                                          dma->bufs[i].buf_size,
1387                                          dma->bufs[i].freelist.low_mark,
1388                                          dma->bufs[i].freelist.high_mark);
1389                                ++count;
1390                        }
1391                }
1392        }
1393        request->count = count;
1394
1395        return 0;
1396}
1397
1398/**
1399 * Specifies a low and high water mark for buffer allocation
1400 *
1401 * \param inode device inode.
1402 * \param file_priv DRM file private.
1403 * \param cmd command.
1404 * \param arg a pointer to a drm_buf_desc structure.
1405 * \return zero on success or a negative number on failure.
1406 *
1407 * Verifies that the size order is bounded between the admissible orders and
1408 * updates the respective drm_device_dma::bufs entry low and high water mark.
1409 *
1410 * \note This ioctl is deprecated and mostly never used.
1411 */
1412int drm_markbufs(struct drm_device *dev, void *data,
1413                 struct drm_file *file_priv)
1414{
1415        struct drm_device_dma *dma = dev->dma;
1416        struct drm_buf_desc *request = data;
1417        int order;
1418        struct drm_buf_entry *entry;
1419
1420        if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1421                return -EINVAL;
1422
1423        if (!dma)
1424                return -EINVAL;
1425
1426        DRM_DEBUG("%d, %d, %d\n",
1427                  request->size, request->low_mark, request->high_mark);
1428        order = drm_order(request->size);
1429        if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1430                return -EINVAL;
1431        entry = &dma->bufs[order];
1432
1433        if (request->low_mark < 0 || request->low_mark > entry->buf_count)
1434                return -EINVAL;
1435        if (request->high_mark < 0 || request->high_mark > entry->buf_count)
1436                return -EINVAL;
1437
1438        entry->freelist.low_mark = request->low_mark;
1439        entry->freelist.high_mark = request->high_mark;
1440
1441        return 0;
1442}
1443
1444/**
1445 * Unreserve the buffers in list, previously reserved using drmDMA.
1446 *
1447 * \param inode device inode.
1448 * \param file_priv DRM file private.
1449 * \param cmd command.
1450 * \param arg pointer to a drm_buf_free structure.
1451 * \return zero on success or a negative number on failure.
1452 *
1453 * Calls free_buffer() for each used buffer.
1454 * This function is primarily used for debugging.
1455 */
1456int drm_freebufs(struct drm_device *dev, void *data,
1457                 struct drm_file *file_priv)
1458{
1459        struct drm_device_dma *dma = dev->dma;
1460        struct drm_buf_free *request = data;
1461        int i;
1462        int idx;
1463        struct drm_buf *buf;
1464
1465        if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1466                return -EINVAL;
1467
1468        if (!dma)
1469                return -EINVAL;
1470
1471        DRM_DEBUG("%d\n", request->count);
1472        for (i = 0; i < request->count; i++) {
1473                if (copy_from_user(&idx, &request->list[i], sizeof(idx)))
1474                        return -EFAULT;
1475                if (idx < 0 || idx >= dma->buf_count) {
1476                        DRM_ERROR("Index %d (of %d max)\n",
1477                                  idx, dma->buf_count - 1);
1478                        return -EINVAL;
1479                }
1480                buf = dma->buflist[idx];
1481                if (buf->file_priv != file_priv) {
1482                        DRM_ERROR("Process %d freeing buffer not owned\n",
1483                                  task_pid_nr(current));
1484                        return -EINVAL;
1485                }
1486                drm_free_buffer(dev, buf);
1487        }
1488
1489        return 0;
1490}
1491
1492/**
1493 * Maps all of the DMA buffers into client-virtual space (ioctl).
1494 *
1495 * \param inode device inode.
1496 * \param file_priv DRM file private.
1497 * \param cmd command.
1498 * \param arg pointer to a drm_buf_map structure.
1499 * \return zero on success or a negative number on failure.
1500 *
1501 * Maps the AGP, SG or PCI buffer region with vm_mmap(), and copies information
1502 * about each buffer into user space. For PCI buffers, it calls vm_mmap() with
1503 * offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls
1504 * drm_mmap_dma().
1505 */
1506int drm_mapbufs(struct drm_device *dev, void *data,
1507                struct drm_file *file_priv)
1508{
1509        struct drm_device_dma *dma = dev->dma;
1510        int retcode = 0;
1511        const int zero = 0;
1512        unsigned long virtual;
1513        unsigned long address;
1514        struct drm_buf_map *request = data;
1515        int i;
1516
1517        if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1518                return -EINVAL;
1519
1520        if (!dma)
1521                return -EINVAL;
1522
1523        spin_lock(&dev->count_lock);
1524        if (atomic_read(&dev->buf_alloc)) {
1525                spin_unlock(&dev->count_lock);
1526                return -EBUSY;
1527        }
1528        dev->buf_use++;         /* Can't allocate more after this call */
1529        spin_unlock(&dev->count_lock);
1530
1531        if (request->count >= dma->buf_count) {
1532                if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP))
1533                    || (drm_core_check_feature(dev, DRIVER_SG)
1534                        && (dma->flags & _DRM_DMA_USE_SG))
1535                    || (drm_core_check_feature(dev, DRIVER_FB_DMA)
1536                        && (dma->flags & _DRM_DMA_USE_FB))) {
1537                        struct drm_local_map *map = dev->agp_buffer_map;
1538                        unsigned long token = dev->agp_buffer_token;
1539
1540                        if (!map) {
1541                                retcode = -EINVAL;
1542                                goto done;
1543                        }
1544                        virtual = vm_mmap(file_priv->filp, 0, map->size,
1545                                          PROT_READ | PROT_WRITE,
1546                                          MAP_SHARED,
1547                                          token);
1548                } else {
1549                        virtual = vm_mmap(file_priv->filp, 0, dma->byte_count,
1550                                          PROT_READ | PROT_WRITE,
1551                                          MAP_SHARED, 0);
1552                }
1553                if (virtual > -1024UL) {
1554                        /* Real error */
1555                        retcode = (signed long)virtual;
1556                        goto done;
1557                }
1558                request->virtual = (void __user *)virtual;
1559
1560                for (i = 0; i < dma->buf_count; i++) {
1561                        if (copy_to_user(&request->list[i].idx,
1562                                         &dma->buflist[i]->idx,
1563                                         sizeof(request->list[0].idx))) {
1564                                retcode = -EFAULT;
1565                                goto done;
1566                        }
1567                        if (copy_to_user(&request->list[i].total,
1568                                         &dma->buflist[i]->total,
1569                                         sizeof(request->list[0].total))) {
1570                                retcode = -EFAULT;
1571                                goto done;
1572                        }
1573                        if (copy_to_user(&request->list[i].used,
1574                                         &zero, sizeof(zero))) {
1575                                retcode = -EFAULT;
1576                                goto done;
1577                        }
1578                        address = virtual + dma->buflist[i]->offset;    /* *** */
1579                        if (copy_to_user(&request->list[i].address,
1580                                         &address, sizeof(address))) {
1581                                retcode = -EFAULT;
1582                                goto done;
1583                        }
1584                }
1585        }
1586      done:
1587        request->count = dma->buf_count;
1588        DRM_DEBUG("%d buffers, retcode = %d\n", request->count, retcode);
1589
1590        return retcode;
1591}
1592
1593/**
1594 * Compute size order.  Returns the exponent of the smaller power of two which
1595 * is greater or equal to given number.
1596 *
1597 * \param size size.
1598 * \return order.
1599 *
1600 * \todo Can be made faster.
1601 */
1602int drm_order(unsigned long size)
1603{
1604        int order;
1605        unsigned long tmp;
1606
1607        for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++) ;
1608
1609        if (size & (size - 1))
1610                ++order;
1611
1612        return order;
1613}
1614EXPORT_SYMBOL(drm_order);
1615