1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28#define pr_fmt(fmt) "[TTM DEVICE] " fmt
29
30#include <linux/mm.h>
31
32#include <drm/ttm/ttm_device.h>
33#include <drm/ttm/ttm_tt.h>
34#include <drm/ttm/ttm_placement.h>
35#include <drm/ttm/ttm_bo_api.h>
36
37#include "ttm_module.h"
38
39
40
41
42static DEFINE_MUTEX(ttm_global_mutex);
43static unsigned ttm_glob_use_count;
44struct ttm_global ttm_glob;
45EXPORT_SYMBOL(ttm_glob);
46
47struct dentry *ttm_debugfs_root;
48
49static void ttm_global_release(void)
50{
51 struct ttm_global *glob = &ttm_glob;
52
53 mutex_lock(&ttm_global_mutex);
54 if (--ttm_glob_use_count > 0)
55 goto out;
56
57 ttm_pool_mgr_fini();
58 debugfs_remove(ttm_debugfs_root);
59
60 __free_page(glob->dummy_read_page);
61 memset(glob, 0, sizeof(*glob));
62out:
63 mutex_unlock(&ttm_global_mutex);
64}
65
66static int ttm_global_init(void)
67{
68 struct ttm_global *glob = &ttm_glob;
69 unsigned long num_pages, num_dma32;
70 struct sysinfo si;
71 int ret = 0;
72
73 mutex_lock(&ttm_global_mutex);
74 if (++ttm_glob_use_count > 1)
75 goto out;
76
77 si_meminfo(&si);
78
79 ttm_debugfs_root = debugfs_create_dir("ttm", NULL);
80 if (IS_ERR(ttm_debugfs_root)) {
81 ttm_debugfs_root = NULL;
82 }
83
84
85
86
87 num_pages = ((u64)si.totalram * si.mem_unit) >> PAGE_SHIFT;
88 num_pages /= 2;
89
90
91 num_dma32 = (u64)(si.totalram - si.totalhigh) * si.mem_unit
92 >> PAGE_SHIFT;
93 num_dma32 = min(num_dma32, 2UL << (30 - PAGE_SHIFT));
94
95 ttm_pool_mgr_init(num_pages);
96 ttm_tt_mgr_init(num_pages, num_dma32);
97
98 glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
99
100 if (unlikely(glob->dummy_read_page == NULL)) {
101 ret = -ENOMEM;
102 goto out;
103 }
104
105 INIT_LIST_HEAD(&glob->device_list);
106 atomic_set(&glob->bo_count, 0);
107
108 debugfs_create_atomic_t("buffer_objects", 0444, ttm_debugfs_root,
109 &glob->bo_count);
110out:
111 if (ret && ttm_debugfs_root)
112 debugfs_remove(ttm_debugfs_root);
113 if (ret)
114 --ttm_glob_use_count;
115 mutex_unlock(&ttm_global_mutex);
116 return ret;
117}
118
119
120
121
122
123int ttm_global_swapout(struct ttm_operation_ctx *ctx, gfp_t gfp_flags)
124{
125 struct ttm_global *glob = &ttm_glob;
126 struct ttm_device *bdev;
127 int ret = 0;
128
129 mutex_lock(&ttm_global_mutex);
130 list_for_each_entry(bdev, &glob->device_list, device_list) {
131 ret = ttm_device_swapout(bdev, ctx, gfp_flags);
132 if (ret > 0) {
133 list_move_tail(&bdev->device_list, &glob->device_list);
134 break;
135 }
136 }
137 mutex_unlock(&ttm_global_mutex);
138 return ret;
139}
140EXPORT_SYMBOL(ttm_global_swapout);
141
142int ttm_device_swapout(struct ttm_device *bdev, struct ttm_operation_ctx *ctx,
143 gfp_t gfp_flags)
144{
145 struct ttm_resource_manager *man;
146 struct ttm_buffer_object *bo;
147 unsigned i, j;
148 int ret;
149
150 spin_lock(&bdev->lru_lock);
151 for (i = TTM_PL_SYSTEM; i < TTM_NUM_MEM_TYPES; ++i) {
152 man = ttm_manager_type(bdev, i);
153 if (!man || !man->use_tt)
154 continue;
155
156 for (j = 0; j < TTM_MAX_BO_PRIORITY; ++j) {
157 list_for_each_entry(bo, &man->lru[j], lru) {
158 uint32_t num_pages = PFN_UP(bo->base.size);
159
160 ret = ttm_bo_swapout(bo, ctx, gfp_flags);
161
162 if (!ret)
163 return num_pages;
164 if (ret != -EBUSY)
165 return ret;
166 }
167 }
168 }
169 spin_unlock(&bdev->lru_lock);
170 return 0;
171}
172EXPORT_SYMBOL(ttm_device_swapout);
173
174static void ttm_device_delayed_workqueue(struct work_struct *work)
175{
176 struct ttm_device *bdev =
177 container_of(work, struct ttm_device, wq.work);
178
179 if (!ttm_bo_delayed_delete(bdev, false))
180 schedule_delayed_work(&bdev->wq,
181 ((HZ / 100) < 1) ? 1 : HZ / 100);
182}
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199int ttm_device_init(struct ttm_device *bdev, struct ttm_device_funcs *funcs,
200 struct device *dev, struct address_space *mapping,
201 struct drm_vma_offset_manager *vma_manager,
202 bool use_dma_alloc, bool use_dma32)
203{
204 struct ttm_global *glob = &ttm_glob;
205 int ret;
206
207 if (WARN_ON(vma_manager == NULL))
208 return -EINVAL;
209
210 ret = ttm_global_init();
211 if (ret)
212 return ret;
213
214 bdev->funcs = funcs;
215
216 ttm_sys_man_init(bdev);
217 ttm_pool_init(&bdev->pool, dev, use_dma_alloc, use_dma32);
218
219 bdev->vma_manager = vma_manager;
220 INIT_DELAYED_WORK(&bdev->wq, ttm_device_delayed_workqueue);
221 spin_lock_init(&bdev->lru_lock);
222 INIT_LIST_HEAD(&bdev->ddestroy);
223 bdev->dev_mapping = mapping;
224 mutex_lock(&ttm_global_mutex);
225 list_add_tail(&bdev->device_list, &glob->device_list);
226 mutex_unlock(&ttm_global_mutex);
227
228 return 0;
229}
230EXPORT_SYMBOL(ttm_device_init);
231
232void ttm_device_fini(struct ttm_device *bdev)
233{
234 struct ttm_resource_manager *man;
235 unsigned i;
236
237 man = ttm_manager_type(bdev, TTM_PL_SYSTEM);
238 ttm_resource_manager_set_used(man, false);
239 ttm_set_driver_manager(bdev, TTM_PL_SYSTEM, NULL);
240
241 mutex_lock(&ttm_global_mutex);
242 list_del(&bdev->device_list);
243 mutex_unlock(&ttm_global_mutex);
244
245 cancel_delayed_work_sync(&bdev->wq);
246
247 if (ttm_bo_delayed_delete(bdev, true))
248 pr_debug("Delayed destroy list was clean\n");
249
250 spin_lock(&bdev->lru_lock);
251 for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
252 if (list_empty(&man->lru[0]))
253 pr_debug("Swap list %d was clean\n", i);
254 spin_unlock(&bdev->lru_lock);
255
256 ttm_pool_fini(&bdev->pool);
257 ttm_global_release();
258}
259EXPORT_SYMBOL(ttm_device_fini);
260