linux/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0 OR MIT
   2/**************************************************************************
   3 *
   4 * Copyright 2007-2010 VMware, Inc., Palo Alto, CA., USA
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the
   8 * "Software"), to deal in the Software without restriction, including
   9 * without limitation the rights to use, copy, modify, merge, publish,
  10 * distribute, sub license, and/or sell copies of the Software, and to
  11 * permit persons to whom the Software is furnished to do so, subject to
  12 * the following conditions:
  13 *
  14 * The above copyright notice and this permission notice (including the
  15 * next paragraph) shall be included in all copies or substantial portions
  16 * of the Software.
  17 *
  18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25 *
  26 **************************************************************************/
  27/*
  28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
  29 */
  30
  31#include "vmwgfx_drv.h"
  32#include <drm/ttm/ttm_module.h>
  33#include <drm/ttm/ttm_bo_driver.h>
  34#include <drm/ttm/ttm_placement.h>
  35#include <linux/idr.h>
  36#include <linux/spinlock.h>
  37#include <linux/kernel.h>
  38
  39struct vmwgfx_gmrid_man {
  40        spinlock_t lock;
  41        struct ida gmr_ida;
  42        uint32_t max_gmr_ids;
  43        uint32_t max_gmr_pages;
  44        uint32_t used_gmr_pages;
  45};
  46
  47static int vmw_gmrid_man_get_node(struct ttm_mem_type_manager *man,
  48                                  struct ttm_buffer_object *bo,
  49                                  const struct ttm_place *place,
  50                                  struct ttm_mem_reg *mem)
  51{
  52        struct vmwgfx_gmrid_man *gman =
  53                (struct vmwgfx_gmrid_man *)man->priv;
  54        int id;
  55
  56        id = ida_alloc_max(&gman->gmr_ida, gman->max_gmr_ids - 1, GFP_KERNEL);
  57        if (id < 0)
  58                return id;
  59
  60        spin_lock(&gman->lock);
  61
  62        if (gman->max_gmr_pages > 0) {
  63                gman->used_gmr_pages += bo->num_pages;
  64                if (unlikely(gman->used_gmr_pages > gman->max_gmr_pages))
  65                        goto nospace;
  66        }
  67
  68        mem->mm_node = gman;
  69        mem->start = id;
  70        mem->num_pages = bo->num_pages;
  71
  72        spin_unlock(&gman->lock);
  73        return 0;
  74
  75nospace:
  76        gman->used_gmr_pages -= bo->num_pages;
  77        spin_unlock(&gman->lock);
  78        ida_free(&gman->gmr_ida, id);
  79        return -ENOSPC;
  80}
  81
  82static void vmw_gmrid_man_put_node(struct ttm_mem_type_manager *man,
  83                                   struct ttm_mem_reg *mem)
  84{
  85        struct vmwgfx_gmrid_man *gman =
  86                (struct vmwgfx_gmrid_man *)man->priv;
  87
  88        if (mem->mm_node) {
  89                ida_free(&gman->gmr_ida, mem->start);
  90                spin_lock(&gman->lock);
  91                gman->used_gmr_pages -= mem->num_pages;
  92                spin_unlock(&gman->lock);
  93                mem->mm_node = NULL;
  94        }
  95}
  96
  97static int vmw_gmrid_man_init(struct ttm_mem_type_manager *man,
  98                              unsigned long p_size)
  99{
 100        struct vmw_private *dev_priv =
 101                container_of(man->bdev, struct vmw_private, bdev);
 102        struct vmwgfx_gmrid_man *gman =
 103                kzalloc(sizeof(*gman), GFP_KERNEL);
 104
 105        if (unlikely(!gman))
 106                return -ENOMEM;
 107
 108        spin_lock_init(&gman->lock);
 109        gman->used_gmr_pages = 0;
 110        ida_init(&gman->gmr_ida);
 111
 112        switch (p_size) {
 113        case VMW_PL_GMR:
 114                gman->max_gmr_ids = dev_priv->max_gmr_ids;
 115                gman->max_gmr_pages = dev_priv->max_gmr_pages;
 116                break;
 117        case VMW_PL_MOB:
 118                gman->max_gmr_ids = VMWGFX_NUM_MOB;
 119                gman->max_gmr_pages = dev_priv->max_mob_pages;
 120                break;
 121        default:
 122                BUG();
 123        }
 124        man->priv = (void *) gman;
 125        return 0;
 126}
 127
 128static int vmw_gmrid_man_takedown(struct ttm_mem_type_manager *man)
 129{
 130        struct vmwgfx_gmrid_man *gman =
 131                (struct vmwgfx_gmrid_man *)man->priv;
 132
 133        if (gman) {
 134                ida_destroy(&gman->gmr_ida);
 135                kfree(gman);
 136        }
 137        return 0;
 138}
 139
 140static void vmw_gmrid_man_debug(struct ttm_mem_type_manager *man,
 141                                struct drm_printer *printer)
 142{
 143        drm_printf(printer, "No debug info available for the GMR id manager\n");
 144}
 145
 146const struct ttm_mem_type_manager_func vmw_gmrid_manager_func = {
 147        .init = vmw_gmrid_man_init,
 148        .takedown = vmw_gmrid_man_takedown,
 149        .get_node = vmw_gmrid_man_get_node,
 150        .put_node = vmw_gmrid_man_put_node,
 151        .debug = vmw_gmrid_man_debug
 152};
 153