1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#include <linux/dma-buf-map.h>
26#include <linux/io-mapping.h>
27#include <linux/scatterlist.h>
28
29#include <drm/ttm/ttm_resource.h>
30#include <drm/ttm/ttm_bo_driver.h>
31
32void ttm_resource_init(struct ttm_buffer_object *bo,
33 const struct ttm_place *place,
34 struct ttm_resource *res)
35{
36 res->start = 0;
37 res->num_pages = PFN_UP(bo->base.size);
38 res->mem_type = place->mem_type;
39 res->placement = place->flags;
40 res->bus.addr = NULL;
41 res->bus.offset = 0;
42 res->bus.is_iomem = false;
43 res->bus.caching = ttm_cached;
44}
45EXPORT_SYMBOL(ttm_resource_init);
46
47int ttm_resource_alloc(struct ttm_buffer_object *bo,
48 const struct ttm_place *place,
49 struct ttm_resource **res_ptr)
50{
51 struct ttm_resource_manager *man =
52 ttm_manager_type(bo->bdev, place->mem_type);
53
54 return man->func->alloc(man, bo, place, res_ptr);
55}
56
57void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource **res)
58{
59 struct ttm_resource_manager *man;
60
61 if (!*res)
62 return;
63
64 man = ttm_manager_type(bo->bdev, (*res)->mem_type);
65 man->func->free(man, *res);
66 *res = NULL;
67}
68EXPORT_SYMBOL(ttm_resource_free);
69
70
71
72
73
74
75
76
77
78void ttm_resource_manager_init(struct ttm_resource_manager *man,
79 unsigned long p_size)
80{
81 unsigned i;
82
83 spin_lock_init(&man->move_lock);
84 man->size = p_size;
85
86 for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
87 INIT_LIST_HEAD(&man->lru[i]);
88 man->move = NULL;
89}
90EXPORT_SYMBOL(ttm_resource_manager_init);
91
92
93
94
95
96
97
98
99
100
101int ttm_resource_manager_evict_all(struct ttm_device *bdev,
102 struct ttm_resource_manager *man)
103{
104 struct ttm_operation_ctx ctx = {
105 .interruptible = false,
106 .no_wait_gpu = false,
107 .force_alloc = true
108 };
109 struct dma_fence *fence;
110 int ret;
111 unsigned i;
112
113
114
115
116
117 spin_lock(&bdev->lru_lock);
118 for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
119 while (!list_empty(&man->lru[i])) {
120 spin_unlock(&bdev->lru_lock);
121 ret = ttm_mem_evict_first(bdev, man, NULL, &ctx,
122 NULL);
123 if (ret)
124 return ret;
125 spin_lock(&bdev->lru_lock);
126 }
127 }
128 spin_unlock(&bdev->lru_lock);
129
130 spin_lock(&man->move_lock);
131 fence = dma_fence_get(man->move);
132 spin_unlock(&man->move_lock);
133
134 if (fence) {
135 ret = dma_fence_wait(fence, false);
136 dma_fence_put(fence);
137 if (ret)
138 return ret;
139 }
140
141 return 0;
142}
143EXPORT_SYMBOL(ttm_resource_manager_evict_all);
144
145
146
147
148
149
150
151void ttm_resource_manager_debug(struct ttm_resource_manager *man,
152 struct drm_printer *p)
153{
154 drm_printf(p, " use_type: %d\n", man->use_type);
155 drm_printf(p, " use_tt: %d\n", man->use_tt);
156 drm_printf(p, " size: %llu\n", man->size);
157 if (man->func->debug)
158 man->func->debug(man, p);
159}
160EXPORT_SYMBOL(ttm_resource_manager_debug);
161
162static void ttm_kmap_iter_iomap_map_local(struct ttm_kmap_iter *iter,
163 struct dma_buf_map *dmap,
164 pgoff_t i)
165{
166 struct ttm_kmap_iter_iomap *iter_io =
167 container_of(iter, typeof(*iter_io), base);
168 void __iomem *addr;
169
170retry:
171 while (i >= iter_io->cache.end) {
172 iter_io->cache.sg = iter_io->cache.sg ?
173 sg_next(iter_io->cache.sg) : iter_io->st->sgl;
174 iter_io->cache.i = iter_io->cache.end;
175 iter_io->cache.end += sg_dma_len(iter_io->cache.sg) >>
176 PAGE_SHIFT;
177 iter_io->cache.offs = sg_dma_address(iter_io->cache.sg) -
178 iter_io->start;
179 }
180
181 if (i < iter_io->cache.i) {
182 iter_io->cache.end = 0;
183 iter_io->cache.sg = NULL;
184 goto retry;
185 }
186
187 addr = io_mapping_map_local_wc(iter_io->iomap, iter_io->cache.offs +
188 (((resource_size_t)i - iter_io->cache.i)
189 << PAGE_SHIFT));
190 dma_buf_map_set_vaddr_iomem(dmap, addr);
191}
192
193static void ttm_kmap_iter_iomap_unmap_local(struct ttm_kmap_iter *iter,
194 struct dma_buf_map *map)
195{
196 io_mapping_unmap_local(map->vaddr_iomem);
197}
198
199static const struct ttm_kmap_iter_ops ttm_kmap_iter_io_ops = {
200 .map_local = ttm_kmap_iter_iomap_map_local,
201 .unmap_local = ttm_kmap_iter_iomap_unmap_local,
202 .maps_tt = false,
203};
204
205
206
207
208
209
210
211
212
213
214
215
216struct ttm_kmap_iter *
217ttm_kmap_iter_iomap_init(struct ttm_kmap_iter_iomap *iter_io,
218 struct io_mapping *iomap,
219 struct sg_table *st,
220 resource_size_t start)
221{
222 iter_io->base.ops = &ttm_kmap_iter_io_ops;
223 iter_io->iomap = iomap;
224 iter_io->st = st;
225 iter_io->start = start;
226 memset(&iter_io->cache, 0, sizeof(iter_io->cache));
227
228 return &iter_io->base;
229}
230EXPORT_SYMBOL(ttm_kmap_iter_iomap_init);
231
232
233
234
235
236
237
238
239
240
241
242
243
244static void ttm_kmap_iter_linear_io_map_local(struct ttm_kmap_iter *iter,
245 struct dma_buf_map *dmap,
246 pgoff_t i)
247{
248 struct ttm_kmap_iter_linear_io *iter_io =
249 container_of(iter, typeof(*iter_io), base);
250
251 *dmap = iter_io->dmap;
252 dma_buf_map_incr(dmap, i * PAGE_SIZE);
253}
254
255static const struct ttm_kmap_iter_ops ttm_kmap_iter_linear_io_ops = {
256 .map_local = ttm_kmap_iter_linear_io_map_local,
257 .maps_tt = false,
258};
259
260
261
262
263
264
265
266
267
268
269
270
271
272struct ttm_kmap_iter *
273ttm_kmap_iter_linear_io_init(struct ttm_kmap_iter_linear_io *iter_io,
274 struct ttm_device *bdev,
275 struct ttm_resource *mem)
276{
277 int ret;
278
279 ret = ttm_mem_io_reserve(bdev, mem);
280 if (ret)
281 goto out_err;
282 if (!mem->bus.is_iomem) {
283 ret = -EINVAL;
284 goto out_io_free;
285 }
286
287 if (mem->bus.addr) {
288 dma_buf_map_set_vaddr(&iter_io->dmap, mem->bus.addr);
289 iter_io->needs_unmap = false;
290 } else {
291 size_t bus_size = (size_t)mem->num_pages << PAGE_SHIFT;
292
293 iter_io->needs_unmap = true;
294 memset(&iter_io->dmap, 0, sizeof(iter_io->dmap));
295 if (mem->bus.caching == ttm_write_combined)
296 dma_buf_map_set_vaddr_iomem(&iter_io->dmap,
297 ioremap_wc(mem->bus.offset,
298 bus_size));
299 else if (mem->bus.caching == ttm_cached)
300 dma_buf_map_set_vaddr(&iter_io->dmap,
301 memremap(mem->bus.offset, bus_size,
302 MEMREMAP_WB |
303 MEMREMAP_WT |
304 MEMREMAP_WC));
305
306
307 if (dma_buf_map_is_null(&iter_io->dmap))
308 dma_buf_map_set_vaddr_iomem(&iter_io->dmap,
309 ioremap(mem->bus.offset,
310 bus_size));
311
312 if (dma_buf_map_is_null(&iter_io->dmap)) {
313 ret = -ENOMEM;
314 goto out_io_free;
315 }
316 }
317
318 iter_io->base.ops = &ttm_kmap_iter_linear_io_ops;
319 return &iter_io->base;
320
321out_io_free:
322 ttm_mem_io_free(bdev, mem);
323out_err:
324 return ERR_PTR(ret);
325}
326
327
328
329
330
331
332
333
334
335
336void
337ttm_kmap_iter_linear_io_fini(struct ttm_kmap_iter_linear_io *iter_io,
338 struct ttm_device *bdev,
339 struct ttm_resource *mem)
340{
341 if (iter_io->needs_unmap && dma_buf_map_is_set(&iter_io->dmap)) {
342 if (iter_io->dmap.is_iomem)
343 iounmap(iter_io->dmap.vaddr_iomem);
344 else
345 memunmap(iter_io->dmap.vaddr);
346 }
347
348 ttm_mem_io_free(bdev, mem);
349}
350