linux/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0 OR MIT
   2/**************************************************************************
   3 *
   4 * Copyright 2007-2010 VMware, Inc., Palo Alto, CA., USA
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the
   8 * "Software"), to deal in the Software without restriction, including
   9 * without limitation the rights to use, copy, modify, merge, publish,
  10 * distribute, sub license, and/or sell copies of the Software, and to
  11 * permit persons to whom the Software is furnished to do so, subject to
  12 * the following conditions:
  13 *
  14 * The above copyright notice and this permission notice (including the
  15 * next paragraph) shall be included in all copies or substantial portions
  16 * of the Software.
  17 *
  18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25 *
  26 **************************************************************************/
  27/*
  28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
  29 */
  30
  31#include "vmwgfx_drv.h"
  32#include <drm/ttm/ttm_bo_driver.h>
  33#include <drm/ttm/ttm_placement.h>
  34#include <linux/idr.h>
  35#include <linux/spinlock.h>
  36#include <linux/kernel.h>
  37
  38struct vmwgfx_gmrid_man {
  39        struct ttm_resource_manager manager;
  40        spinlock_t lock;
  41        struct ida gmr_ida;
  42        uint32_t max_gmr_ids;
  43        uint32_t max_gmr_pages;
  44        uint32_t used_gmr_pages;
  45};
  46
  47static struct vmwgfx_gmrid_man *to_gmrid_manager(struct ttm_resource_manager *man)
  48{
  49        return container_of(man, struct vmwgfx_gmrid_man, manager);
  50}
  51
  52static int vmw_gmrid_man_get_node(struct ttm_resource_manager *man,
  53                                  struct ttm_buffer_object *bo,
  54                                  const struct ttm_place *place,
  55                                  struct ttm_resource **res)
  56{
  57        struct vmwgfx_gmrid_man *gman = to_gmrid_manager(man);
  58        int id;
  59
  60        *res = kmalloc(sizeof(**res), GFP_KERNEL);
  61        if (!*res)
  62                return -ENOMEM;
  63
  64        ttm_resource_init(bo, place, *res);
  65
  66        id = ida_alloc_max(&gman->gmr_ida, gman->max_gmr_ids - 1, GFP_KERNEL);
  67        if (id < 0)
  68                return id;
  69
  70        spin_lock(&gman->lock);
  71
  72        if (gman->max_gmr_pages > 0) {
  73                gman->used_gmr_pages += (*res)->num_pages;
  74                /*
  75                 * Because the graphics memory is a soft limit we can try to
  76                 * expand it instead of letting the userspace apps crash.
  77                 * We're just going to have a sane limit (half of RAM)
  78                 * on the number of MOB's that we create and will try to keep
  79                 * the system running until we reach that.
  80                 */
  81                if (unlikely(gman->used_gmr_pages > gman->max_gmr_pages)) {
  82                        const unsigned long max_graphics_pages = totalram_pages() / 2;
  83                        uint32_t new_max_pages = 0;
  84
  85                        DRM_WARN("vmwgfx: mob memory overflow. Consider increasing guest RAM and graphicsMemory.\n");
  86                        vmw_host_printf("vmwgfx, warning: mob memory overflow. Consider increasing guest RAM and graphicsMemory.\n");
  87
  88                        if (gman->max_gmr_pages > (max_graphics_pages / 2)) {
  89                                DRM_WARN("vmwgfx: guest requires more than half of RAM for graphics.\n");
  90                                new_max_pages = max_graphics_pages;
  91                        } else
  92                                new_max_pages = gman->max_gmr_pages * 2;
  93                        if (new_max_pages > gman->max_gmr_pages && new_max_pages >= gman->used_gmr_pages) {
  94                                DRM_WARN("vmwgfx: increasing guest mob limits to %u kB.\n",
  95                                         ((new_max_pages) << (PAGE_SHIFT - 10)));
  96
  97                                gman->max_gmr_pages = new_max_pages;
  98                        } else {
  99                                char buf[256];
 100                                snprintf(buf, sizeof(buf),
 101                                         "vmwgfx, error: guest graphics is out of memory (mob limit at: %ukB).\n",
 102                                         ((gman->max_gmr_pages) << (PAGE_SHIFT - 10)));
 103                                vmw_host_printf(buf);
 104                                DRM_WARN("%s", buf);
 105                                goto nospace;
 106                        }
 107                }
 108        }
 109
 110        (*res)->start = id;
 111
 112        spin_unlock(&gman->lock);
 113        return 0;
 114
 115nospace:
 116        gman->used_gmr_pages -= (*res)->num_pages;
 117        spin_unlock(&gman->lock);
 118        ida_free(&gman->gmr_ida, id);
 119        kfree(*res);
 120        return -ENOSPC;
 121}
 122
 123static void vmw_gmrid_man_put_node(struct ttm_resource_manager *man,
 124                                   struct ttm_resource *res)
 125{
 126        struct vmwgfx_gmrid_man *gman = to_gmrid_manager(man);
 127
 128        ida_free(&gman->gmr_ida, res->start);
 129        spin_lock(&gman->lock);
 130        gman->used_gmr_pages -= res->num_pages;
 131        spin_unlock(&gman->lock);
 132        kfree(res);
 133}
 134
 135static const struct ttm_resource_manager_func vmw_gmrid_manager_func;
 136
 137int vmw_gmrid_man_init(struct vmw_private *dev_priv, int type)
 138{
 139        struct ttm_resource_manager *man;
 140        struct vmwgfx_gmrid_man *gman =
 141                kzalloc(sizeof(*gman), GFP_KERNEL);
 142
 143        if (unlikely(!gman))
 144                return -ENOMEM;
 145
 146        man = &gman->manager;
 147
 148        man->func = &vmw_gmrid_manager_func;
 149        /* TODO: This is most likely not correct */
 150        man->use_tt = true;
 151        ttm_resource_manager_init(man, 0);
 152        spin_lock_init(&gman->lock);
 153        gman->used_gmr_pages = 0;
 154        ida_init(&gman->gmr_ida);
 155
 156        switch (type) {
 157        case VMW_PL_GMR:
 158                gman->max_gmr_ids = dev_priv->max_gmr_ids;
 159                gman->max_gmr_pages = dev_priv->max_gmr_pages;
 160                break;
 161        case VMW_PL_MOB:
 162                gman->max_gmr_ids = VMWGFX_NUM_MOB;
 163                gman->max_gmr_pages = dev_priv->max_mob_pages;
 164                break;
 165        default:
 166                BUG();
 167        }
 168        ttm_set_driver_manager(&dev_priv->bdev, type, &gman->manager);
 169        ttm_resource_manager_set_used(man, true);
 170        return 0;
 171}
 172
 173void vmw_gmrid_man_fini(struct vmw_private *dev_priv, int type)
 174{
 175        struct ttm_resource_manager *man = ttm_manager_type(&dev_priv->bdev, type);
 176        struct vmwgfx_gmrid_man *gman = to_gmrid_manager(man);
 177
 178        ttm_resource_manager_set_used(man, false);
 179
 180        ttm_resource_manager_evict_all(&dev_priv->bdev, man);
 181
 182        ttm_resource_manager_cleanup(man);
 183
 184        ttm_set_driver_manager(&dev_priv->bdev, type, NULL);
 185        ida_destroy(&gman->gmr_ida);
 186        kfree(gman);
 187
 188}
 189
 190static const struct ttm_resource_manager_func vmw_gmrid_manager_func = {
 191        .alloc = vmw_gmrid_man_get_node,
 192        .free = vmw_gmrid_man_put_node,
 193};
 194