1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31#include "vmwgfx_drv.h"
32#include <drm/ttm/ttm_module.h>
33#include <drm/ttm/ttm_bo_driver.h>
34#include <drm/ttm/ttm_placement.h>
35#include <linux/idr.h>
36#include <linux/spinlock.h>
37#include <linux/kernel.h>
38
39struct vmwgfx_gmrid_man {
40 spinlock_t lock;
41 struct ida gmr_ida;
42 uint32_t max_gmr_ids;
43 uint32_t max_gmr_pages;
44 uint32_t used_gmr_pages;
45};
46
47static int vmw_gmrid_man_get_node(struct ttm_mem_type_manager *man,
48 struct ttm_buffer_object *bo,
49 const struct ttm_place *place,
50 struct ttm_mem_reg *mem)
51{
52 struct vmwgfx_gmrid_man *gman =
53 (struct vmwgfx_gmrid_man *)man->priv;
54 int id;
55
56 id = ida_alloc_max(&gman->gmr_ida, gman->max_gmr_ids - 1, GFP_KERNEL);
57 if (id < 0)
58 return id;
59
60 spin_lock(&gman->lock);
61
62 if (gman->max_gmr_pages > 0) {
63 gman->used_gmr_pages += bo->num_pages;
64 if (unlikely(gman->used_gmr_pages > gman->max_gmr_pages))
65 goto nospace;
66 }
67
68 mem->mm_node = gman;
69 mem->start = id;
70 mem->num_pages = bo->num_pages;
71
72 spin_unlock(&gman->lock);
73 return 0;
74
75nospace:
76 gman->used_gmr_pages -= bo->num_pages;
77 spin_unlock(&gman->lock);
78 ida_free(&gman->gmr_ida, id);
79 return -ENOSPC;
80}
81
82static void vmw_gmrid_man_put_node(struct ttm_mem_type_manager *man,
83 struct ttm_mem_reg *mem)
84{
85 struct vmwgfx_gmrid_man *gman =
86 (struct vmwgfx_gmrid_man *)man->priv;
87
88 if (mem->mm_node) {
89 ida_free(&gman->gmr_ida, mem->start);
90 spin_lock(&gman->lock);
91 gman->used_gmr_pages -= mem->num_pages;
92 spin_unlock(&gman->lock);
93 mem->mm_node = NULL;
94 }
95}
96
97static int vmw_gmrid_man_init(struct ttm_mem_type_manager *man,
98 unsigned long p_size)
99{
100 struct vmw_private *dev_priv =
101 container_of(man->bdev, struct vmw_private, bdev);
102 struct vmwgfx_gmrid_man *gman =
103 kzalloc(sizeof(*gman), GFP_KERNEL);
104
105 if (unlikely(!gman))
106 return -ENOMEM;
107
108 spin_lock_init(&gman->lock);
109 gman->used_gmr_pages = 0;
110 ida_init(&gman->gmr_ida);
111
112 switch (p_size) {
113 case VMW_PL_GMR:
114 gman->max_gmr_ids = dev_priv->max_gmr_ids;
115 gman->max_gmr_pages = dev_priv->max_gmr_pages;
116 break;
117 case VMW_PL_MOB:
118 gman->max_gmr_ids = VMWGFX_NUM_MOB;
119 gman->max_gmr_pages = dev_priv->max_mob_pages;
120 break;
121 default:
122 BUG();
123 }
124 man->priv = (void *) gman;
125 return 0;
126}
127
128static int vmw_gmrid_man_takedown(struct ttm_mem_type_manager *man)
129{
130 struct vmwgfx_gmrid_man *gman =
131 (struct vmwgfx_gmrid_man *)man->priv;
132
133 if (gman) {
134 ida_destroy(&gman->gmr_ida);
135 kfree(gman);
136 }
137 return 0;
138}
139
140static void vmw_gmrid_man_debug(struct ttm_mem_type_manager *man,
141 struct drm_printer *printer)
142{
143 drm_printf(printer, "No debug info available for the GMR id manager\n");
144}
145
146const struct ttm_mem_type_manager_func vmw_gmrid_manager_func = {
147 .init = vmw_gmrid_man_init,
148 .takedown = vmw_gmrid_man_takedown,
149 .get_node = vmw_gmrid_man_get_node,
150 .put_node = vmw_gmrid_man_put_node,
151 .debug = vmw_gmrid_man_debug
152};
153