linux/drivers/gpu/drm/omapdrm/omap_gem_helpers.c
<<
>>
Prefs
   1/*
   2 * drivers/gpu/drm/omapdrm/omap_gem_helpers.c
   3 *
   4 * Copyright (C) 2011 Texas Instruments
   5 * Author: Rob Clark <rob.clark@linaro.org>
   6 *
   7 * This program is free software; you can redistribute it and/or modify it
   8 * under the terms of the GNU General Public License version 2 as published by
   9 * the Free Software Foundation.
  10 *
  11 * This program is distributed in the hope that it will be useful, but WITHOUT
  12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  14 * more details.
  15 *
  16 * You should have received a copy of the GNU General Public License along with
  17 * this program.  If not, see <http://www.gnu.org/licenses/>.
  18 */
  19
  20/* temporary copy of drm_gem_{get,put}_pages() until the
  21 * "drm/gem: add functions to get/put pages" patch is merged..
  22 */
  23
  24#include <linux/module.h>
  25#include <linux/types.h>
  26#include <linux/shmem_fs.h>
  27
  28#include <drm/drmP.h>
  29
  30/**
  31 * drm_gem_get_pages - helper to allocate backing pages for a GEM object
  32 * @obj: obj in question
  33 * @gfpmask: gfp mask of requested pages
  34 */
  35struct page **_drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask)
  36{
  37        struct inode *inode;
  38        struct address_space *mapping;
  39        struct page *p, **pages;
  40        int i, npages;
  41
  42        /* This is the shared memory object that backs the GEM resource */
  43        inode = file_inode(obj->filp);
  44        mapping = inode->i_mapping;
  45
  46        npages = obj->size >> PAGE_SHIFT;
  47
  48        pages = drm_malloc_ab(npages, sizeof(struct page *));
  49        if (pages == NULL)
  50                return ERR_PTR(-ENOMEM);
  51
  52        gfpmask |= mapping_gfp_mask(mapping);
  53
  54        for (i = 0; i < npages; i++) {
  55                p = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
  56                if (IS_ERR(p))
  57                        goto fail;
  58                pages[i] = p;
  59
  60                /* There is a hypothetical issue w/ drivers that require
  61                 * buffer memory in the low 4GB.. if the pages are un-
  62                 * pinned, and swapped out, they can end up swapped back
  63                 * in above 4GB.  If pages are already in memory, then
  64                 * shmem_read_mapping_page_gfp will ignore the gfpmask,
  65                 * even if the already in-memory page disobeys the mask.
  66                 *
  67                 * It is only a theoretical issue today, because none of
  68                 * the devices with this limitation can be populated with
  69                 * enough memory to trigger the issue.  But this BUG_ON()
  70                 * is here as a reminder in case the problem with
  71                 * shmem_read_mapping_page_gfp() isn't solved by the time
  72                 * it does become a real issue.
  73                 *
  74                 * See this thread: http://lkml.org/lkml/2011/7/11/238
  75                 */
  76                BUG_ON((gfpmask & __GFP_DMA32) &&
  77                                (page_to_pfn(p) >= 0x00100000UL));
  78        }
  79
  80        return pages;
  81
  82fail:
  83        while (i--)
  84                page_cache_release(pages[i]);
  85
  86        drm_free_large(pages);
  87        return ERR_CAST(p);
  88}
  89
  90/**
  91 * drm_gem_put_pages - helper to free backing pages for a GEM object
  92 * @obj: obj in question
  93 * @pages: pages to free
  94 */
  95void _drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
  96                bool dirty, bool accessed)
  97{
  98        int i, npages;
  99
 100        npages = obj->size >> PAGE_SHIFT;
 101
 102        for (i = 0; i < npages; i++) {
 103                if (dirty)
 104                        set_page_dirty(pages[i]);
 105
 106                if (accessed)
 107                        mark_page_accessed(pages[i]);
 108
 109                /* Undo the reference we took when populating the table */
 110                page_cache_release(pages[i]);
 111        }
 112
 113        drm_free_large(pages);
 114}
 115
 116int
 117_drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size)
 118{
 119        struct drm_device *dev = obj->dev;
 120        struct drm_gem_mm *mm = dev->mm_private;
 121        struct drm_map_list *list;
 122        struct drm_local_map *map;
 123        int ret = 0;
 124
 125        /* Set the object up for mmap'ing */
 126        list = &obj->map_list;
 127        list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
 128        if (!list->map)
 129                return -ENOMEM;
 130
 131        map = list->map;
 132        map->type = _DRM_GEM;
 133        map->size = size;
 134        map->handle = obj;
 135
 136        /* Get a DRM GEM mmap offset allocated... */
 137        list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
 138                        size / PAGE_SIZE, 0, 0);
 139
 140        if (!list->file_offset_node) {
 141                DRM_ERROR("failed to allocate offset for bo %d\n", obj->name);
 142                ret = -ENOSPC;
 143                goto out_free_list;
 144        }
 145
 146        list->file_offset_node = drm_mm_get_block(list->file_offset_node,
 147                        size / PAGE_SIZE, 0);
 148        if (!list->file_offset_node) {
 149                ret = -ENOMEM;
 150                goto out_free_list;
 151        }
 152
 153        list->hash.key = list->file_offset_node->start;
 154        ret = drm_ht_insert_item(&mm->offset_hash, &list->hash);
 155        if (ret) {
 156                DRM_ERROR("failed to add to map hash\n");
 157                goto out_free_mm;
 158        }
 159
 160        return 0;
 161
 162out_free_mm:
 163        drm_mm_put_block(list->file_offset_node);
 164out_free_list:
 165        kfree(list->map);
 166        list->map = NULL;
 167
 168        return ret;
 169}
 170