1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31#define pr_fmt(fmt) "[TTM] " fmt
32
33#include <drm/ttm/ttm_module.h>
34#include <drm/ttm/ttm_bo_driver.h>
35#include <drm/ttm/ttm_placement.h>
36#include <drm/drm_vma_manager.h>
37#include <linux/mm.h>
38#include <linux/pfn_t.h>
39#include <linux/rbtree.h>
40#include <linux/module.h>
41#include <linux/uaccess.h>
42#include <linux/mem_encrypt.h>
43
44#define TTM_BO_VM_NUM_PREFAULT 16
45
46static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
47 struct vm_area_struct *vma,
48 struct vm_fault *vmf)
49{
50 int ret = 0;
51
52 if (likely(!bo->moving))
53 goto out_unlock;
54
55
56
57
58 if (dma_fence_is_signaled(bo->moving))
59 goto out_clear;
60
61
62
63
64
65 if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) {
66 ret = VM_FAULT_RETRY;
67 if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
68 goto out_unlock;
69
70 ttm_bo_reference(bo);
71 up_read(&vma->vm_mm->mmap_sem);
72 (void) dma_fence_wait(bo->moving, true);
73 ttm_bo_unreserve(bo);
74 ttm_bo_unref(&bo);
75 goto out_unlock;
76 }
77
78
79
80
81 ret = dma_fence_wait(bo->moving, true);
82 if (unlikely(ret != 0)) {
83 ret = (ret != -ERESTARTSYS) ? VM_FAULT_SIGBUS :
84 VM_FAULT_NOPAGE;
85 goto out_unlock;
86 }
87
88out_clear:
89 dma_fence_put(bo->moving);
90 bo->moving = NULL;
91
92out_unlock:
93 return ret;
94}
95
96static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
97{
98 struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
99 vma->vm_private_data;
100 struct ttm_bo_device *bdev = bo->bdev;
101 unsigned long page_offset;
102 unsigned long page_last;
103 unsigned long pfn;
104 struct ttm_tt *ttm = NULL;
105 struct page *page;
106 int ret;
107 int i;
108 unsigned long address = (unsigned long)vmf->virtual_address;
109 int retval = VM_FAULT_NOPAGE;
110 struct ttm_mem_type_manager *man =
111 &bdev->man[bo->mem.mem_type];
112 struct vm_area_struct cvma;
113
114
115
116
117
118
119
120 ret = ttm_bo_reserve(bo, true, true, NULL);
121 if (unlikely(ret != 0)) {
122 if (ret != -EBUSY)
123 return VM_FAULT_NOPAGE;
124
125 if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) {
126 if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
127 ttm_bo_reference(bo);
128 up_read(&vma->vm_mm->mmap_sem);
129 (void) ttm_bo_wait_unreserved(bo);
130 ttm_bo_unref(&bo);
131 }
132
133 return VM_FAULT_RETRY;
134 }
135
136
137
138
139
140
141 return VM_FAULT_NOPAGE;
142 }
143
144
145
146
147
148 if (bo->ttm && (bo->ttm->page_flags & TTM_PAGE_FLAG_SG)) {
149 retval = VM_FAULT_SIGBUS;
150 goto out_unlock;
151 }
152
153 if (bdev->driver->fault_reserve_notify) {
154 ret = bdev->driver->fault_reserve_notify(bo);
155 switch (ret) {
156 case 0:
157 break;
158 case -EBUSY:
159 case -ERESTARTSYS:
160 retval = VM_FAULT_NOPAGE;
161 goto out_unlock;
162 default:
163 retval = VM_FAULT_SIGBUS;
164 goto out_unlock;
165 }
166 }
167
168
169
170
171
172 ret = ttm_bo_vm_fault_idle(bo, vma, vmf);
173 if (unlikely(ret != 0)) {
174 retval = ret;
175
176 if (retval == VM_FAULT_RETRY &&
177 !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
178
179 return retval;
180 }
181
182 goto out_unlock;
183 }
184
185 ret = ttm_mem_io_lock(man, true);
186 if (unlikely(ret != 0)) {
187 retval = VM_FAULT_NOPAGE;
188 goto out_unlock;
189 }
190 ret = ttm_mem_io_reserve_vm(bo);
191 if (unlikely(ret != 0)) {
192 retval = VM_FAULT_SIGBUS;
193 goto out_io_unlock;
194 }
195
196 page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
197 vma->vm_pgoff - drm_vma_node_start(&bo->vma_node);
198 page_last = vma_pages(vma) + vma->vm_pgoff -
199 drm_vma_node_start(&bo->vma_node);
200
201 if (unlikely(page_offset >= bo->num_pages)) {
202 retval = VM_FAULT_SIGBUS;
203 goto out_io_unlock;
204 }
205
206
207
208
209
210
211 cvma = *vma;
212 cvma.vm_page_prot = vm_get_page_prot(cvma.vm_flags);
213
214 if (bo->mem.bus.is_iomem) {
215 cvma.vm_page_prot = ttm_io_prot(bo->mem.placement,
216 cvma.vm_page_prot);
217 } else {
218 ttm = bo->ttm;
219 cvma.vm_page_prot = ttm_io_prot(bo->mem.placement,
220 cvma.vm_page_prot);
221
222
223 if (ttm->bdev->driver->ttm_tt_populate(ttm)) {
224 retval = VM_FAULT_OOM;
225 goto out_io_unlock;
226 }
227 }
228
229
230
231
232
233 for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) {
234 if (bo->mem.bus.is_iomem) {
235
236 cvma.vm_page_prot = pgprot_decrypted(cvma.vm_page_prot);
237 pfn = bdev->driver->io_mem_pfn(bo, page_offset);
238 } else {
239 page = ttm->pages[page_offset];
240 if (unlikely(!page && i == 0)) {
241 retval = VM_FAULT_OOM;
242 goto out_io_unlock;
243 } else if (unlikely(!page)) {
244 break;
245 }
246 page->mapping = vma->vm_file->f_mapping;
247 page->index = drm_vma_node_start(&bo->vma_node) +
248 page_offset;
249 pfn = page_to_pfn(page);
250 }
251
252 if (vma->vm_flags & VM_MIXEDMAP)
253 ret = vm_insert_mixed(&cvma, address,
254 __pfn_to_pfn_t(pfn, PFN_DEV));
255 else
256 ret = vm_insert_pfn(&cvma, address, pfn);
257
258
259
260
261
262
263 if (unlikely((ret == -EBUSY) || (ret != 0 && i > 0)))
264 break;
265 else if (unlikely(ret != 0)) {
266 retval =
267 (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
268 goto out_io_unlock;
269 }
270
271 address += PAGE_SIZE;
272 if (unlikely(++page_offset >= page_last))
273 break;
274 }
275out_io_unlock:
276 ttm_mem_io_unlock(man);
277out_unlock:
278 ttm_bo_unreserve(bo);
279 return retval;
280}
281
282static void ttm_bo_vm_open(struct vm_area_struct *vma)
283{
284 struct ttm_buffer_object *bo =
285 (struct ttm_buffer_object *)vma->vm_private_data;
286
287 WARN_ON(bo->bdev->dev_mapping != vma->vm_file->f_mapping);
288
289 (void)ttm_bo_reference(bo);
290}
291
292static void ttm_bo_vm_close(struct vm_area_struct *vma)
293{
294 struct ttm_buffer_object *bo = (struct ttm_buffer_object *)vma->vm_private_data;
295
296 ttm_bo_unref(&bo);
297 vma->vm_private_data = NULL;
298}
299
300static int ttm_bo_vm_access_kmap(struct ttm_buffer_object *bo,
301 unsigned long offset,
302 void *buf, int len, int write)
303{
304 unsigned long page = offset >> PAGE_SHIFT;
305 unsigned long bytes_left = len;
306 int ret;
307
308
309
310
311 offset -= page << PAGE_SHIFT;
312 do {
313 unsigned long bytes = min(bytes_left, PAGE_SIZE - offset);
314 struct ttm_bo_kmap_obj map;
315 void *ptr;
316 bool is_iomem;
317
318 ret = ttm_bo_kmap(bo, page, 1, &map);
319 if (ret)
320 return ret;
321
322 ptr = (uint8_t *)ttm_kmap_obj_virtual(&map, &is_iomem) + offset;
323 WARN_ON_ONCE(is_iomem);
324 if (write)
325 memcpy(ptr, buf, bytes);
326 else
327 memcpy(buf, ptr, bytes);
328 ttm_bo_kunmap(&map);
329
330 page++;
331 bytes_left -= bytes;
332 offset = 0;
333 } while (bytes_left);
334
335 return len;
336}
337
338static int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr,
339 void *buf, int len, int write)
340{
341 unsigned long offset = (addr) - vma->vm_start;
342 struct ttm_buffer_object *bo = vma->vm_private_data;
343 int ret;
344
345 if (len < 1 || (offset + len) >> PAGE_SHIFT > bo->num_pages)
346 return -EIO;
347
348 ret = ttm_bo_reserve(bo, true, false, NULL);
349 if (ret)
350 return ret;
351
352 switch (bo->mem.mem_type) {
353 case TTM_PL_SYSTEM:
354 if (unlikely(bo->ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
355 ret = ttm_tt_swapin(bo->ttm);
356 if (unlikely(ret != 0))
357 return ret;
358 }
359
360 case TTM_PL_TT:
361 ret = ttm_bo_vm_access_kmap(bo, offset, buf, len, write);
362 break;
363 default:
364 if (bo->bdev->driver->access_memory)
365 ret = bo->bdev->driver->access_memory(
366 bo, offset, buf, len, write);
367 else
368 ret = -EIO;
369 }
370
371 ttm_bo_unreserve(bo);
372
373 return ret;
374}
375
376static const struct vm_operations_struct ttm_bo_vm_ops = {
377 .fault = ttm_bo_vm_fault,
378 .open = ttm_bo_vm_open,
379 .close = ttm_bo_vm_close,
380 .access = ttm_bo_vm_access
381};
382
383static struct ttm_buffer_object *ttm_bo_vm_lookup(struct ttm_bo_device *bdev,
384 unsigned long offset,
385 unsigned long pages)
386{
387 struct drm_vma_offset_node *node;
388 struct ttm_buffer_object *bo = NULL;
389
390 drm_vma_offset_lock_lookup(&bdev->vma_manager);
391
392 node = drm_vma_offset_lookup_locked(&bdev->vma_manager, offset, pages);
393 if (likely(node)) {
394 bo = container_of(node, struct ttm_buffer_object, vma_node);
395 if (!kref_get_unless_zero(&bo->kref))
396 bo = NULL;
397 }
398
399 drm_vma_offset_unlock_lookup(&bdev->vma_manager);
400
401 if (!bo)
402 pr_err("Could not find buffer object to map\n");
403
404 return bo;
405}
406
407unsigned long ttm_bo_default_io_mem_pfn(struct ttm_buffer_object *bo,
408 unsigned long page_offset)
409{
410 return ((bo->mem.bus.base + bo->mem.bus.offset) >> PAGE_SHIFT)
411 + page_offset;
412}
413EXPORT_SYMBOL(ttm_bo_default_io_mem_pfn);
414
415int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
416 struct ttm_bo_device *bdev)
417{
418 struct ttm_bo_driver *driver;
419 struct ttm_buffer_object *bo;
420 int ret;
421
422 bo = ttm_bo_vm_lookup(bdev, vma->vm_pgoff, vma_pages(vma));
423 if (unlikely(!bo))
424 return -EINVAL;
425
426 driver = bo->bdev->driver;
427 if (unlikely(!driver->verify_access)) {
428 ret = -EPERM;
429 goto out_unref;
430 }
431 ret = driver->verify_access(bo, filp);
432 if (unlikely(ret != 0))
433 goto out_unref;
434
435 vma->vm_ops = &ttm_bo_vm_ops;
436
437
438
439
440
441
442 vma->vm_private_data = bo;
443
444
445
446
447
448
449
450
451 vma->vm_flags |= VM_MIXEDMAP;
452 vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
453 return 0;
454out_unref:
455 ttm_bo_unref(&bo);
456 return ret;
457}
458EXPORT_SYMBOL(ttm_bo_mmap);
459
460int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
461{
462 if (vma->vm_pgoff != 0)
463 return -EACCES;
464
465 vma->vm_ops = &ttm_bo_vm_ops;
466 vma->vm_private_data = ttm_bo_reference(bo);
467 vma->vm_flags |= VM_MIXEDMAP;
468 vma->vm_flags |= VM_IO | VM_DONTEXPAND;
469 return 0;
470}
471EXPORT_SYMBOL(ttm_fbdev_mmap);
472