1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28#include <drm/drmP.h>
29#include <drm/drm_vma_manager.h>
30#include <drm/i915_drm.h>
31#include "i915_drv.h"
32#include "i915_vgpu.h"
33#include "i915_trace.h"
34#include "intel_drv.h"
35#include "intel_mocs.h"
36#include <linux/shmem_fs.h>
37#include <linux/slab.h>
38#include <linux/swap.h>
39#include <linux/pci.h>
40#include <linux/dma-buf.h>
41
42static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
43static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
44static void
45i915_gem_object_retire__write(struct drm_i915_gem_object *obj);
46static void
47i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int ring);
48
49static bool cpu_cache_is_coherent(struct drm_device *dev,
50 enum i915_cache_level level)
51{
52 return HAS_LLC(dev) || level != I915_CACHE_NONE;
53}
54
55static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
56{
57 if (!cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
58 return true;
59
60 return obj->pin_display;
61}
62
63
64static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
65 size_t size)
66{
67 spin_lock(&dev_priv->mm.object_stat_lock);
68 dev_priv->mm.object_count++;
69 dev_priv->mm.object_memory += size;
70 spin_unlock(&dev_priv->mm.object_stat_lock);
71}
72
73static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
74 size_t size)
75{
76 spin_lock(&dev_priv->mm.object_stat_lock);
77 dev_priv->mm.object_count--;
78 dev_priv->mm.object_memory -= size;
79 spin_unlock(&dev_priv->mm.object_stat_lock);
80}
81
82static int
83i915_gem_wait_for_error(struct i915_gpu_error *error)
84{
85 int ret;
86
87 if (!i915_reset_in_progress(error))
88 return 0;
89
90
91
92
93
94
95 ret = wait_event_interruptible_timeout(error->reset_queue,
96 !i915_reset_in_progress(error),
97 10*HZ);
98 if (ret == 0) {
99 DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
100 return -EIO;
101 } else if (ret < 0) {
102 return ret;
103 } else {
104 return 0;
105 }
106}
107
108int i915_mutex_lock_interruptible(struct drm_device *dev)
109{
110 struct drm_i915_private *dev_priv = dev->dev_private;
111 int ret;
112
113 ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
114 if (ret)
115 return ret;
116
117 ret = mutex_lock_interruptible(&dev->struct_mutex);
118 if (ret)
119 return ret;
120
121 WARN_ON(i915_verify_lists(dev));
122 return 0;
123}
124
125int
126i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
127 struct drm_file *file)
128{
129 struct drm_i915_private *dev_priv = to_i915(dev);
130 struct i915_ggtt *ggtt = &dev_priv->ggtt;
131 struct drm_i915_gem_get_aperture *args = data;
132 struct i915_vma *vma;
133 size_t pinned;
134
135 pinned = 0;
136 mutex_lock(&dev->struct_mutex);
137 list_for_each_entry(vma, &ggtt->base.active_list, vm_link)
138 if (vma->pin_count)
139 pinned += vma->node.size;
140 list_for_each_entry(vma, &ggtt->base.inactive_list, vm_link)
141 if (vma->pin_count)
142 pinned += vma->node.size;
143 mutex_unlock(&dev->struct_mutex);
144
145 args->aper_size = ggtt->base.total;
146 args->aper_available_size = args->aper_size - pinned;
147
148 return 0;
149}
150
151static int
152i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
153{
154 struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
155 char *vaddr = obj->phys_handle->vaddr;
156 struct sg_table *st;
157 struct scatterlist *sg;
158 int i;
159
160 if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
161 return -EINVAL;
162
163 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
164 struct page *page;
165 char *src;
166
167 page = shmem_read_mapping_page(mapping, i);
168 if (IS_ERR(page))
169 return PTR_ERR(page);
170
171 src = kmap_atomic(page);
172 memcpy(vaddr, src, PAGE_SIZE);
173 drm_clflush_virt_range(vaddr, PAGE_SIZE);
174 kunmap_atomic(src);
175
176 put_page(page);
177 vaddr += PAGE_SIZE;
178 }
179
180 i915_gem_chipset_flush(obj->base.dev);
181
182 st = kmalloc(sizeof(*st), GFP_KERNEL);
183 if (st == NULL)
184 return -ENOMEM;
185
186 if (sg_alloc_table(st, 1, GFP_KERNEL)) {
187 kfree(st);
188 return -ENOMEM;
189 }
190
191 sg = st->sgl;
192 sg->offset = 0;
193 sg->length = obj->base.size;
194
195 sg_dma_address(sg) = obj->phys_handle->busaddr;
196 sg_dma_len(sg) = obj->base.size;
197
198 obj->pages = st;
199 return 0;
200}
201
202static void
203i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj)
204{
205 int ret;
206
207 BUG_ON(obj->madv == __I915_MADV_PURGED);
208
209 ret = i915_gem_object_set_to_cpu_domain(obj, true);
210 if (WARN_ON(ret)) {
211
212
213
214 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
215 }
216
217 if (obj->madv == I915_MADV_DONTNEED)
218 obj->dirty = 0;
219
220 if (obj->dirty) {
221 struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
222 char *vaddr = obj->phys_handle->vaddr;
223 int i;
224
225 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
226 struct page *page;
227 char *dst;
228
229 page = shmem_read_mapping_page(mapping, i);
230 if (IS_ERR(page))
231 continue;
232
233 dst = kmap_atomic(page);
234 drm_clflush_virt_range(vaddr, PAGE_SIZE);
235 memcpy(dst, vaddr, PAGE_SIZE);
236 kunmap_atomic(dst);
237
238 set_page_dirty(page);
239 if (obj->madv == I915_MADV_WILLNEED)
240 mark_page_accessed(page);
241 put_page(page);
242 vaddr += PAGE_SIZE;
243 }
244 obj->dirty = 0;
245 }
246
247 sg_free_table(obj->pages);
248 kfree(obj->pages);
249}
250
251static void
252i915_gem_object_release_phys(struct drm_i915_gem_object *obj)
253{
254 drm_pci_free(obj->base.dev, obj->phys_handle);
255}
256
257static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
258 .get_pages = i915_gem_object_get_pages_phys,
259 .put_pages = i915_gem_object_put_pages_phys,
260 .release = i915_gem_object_release_phys,
261};
262
263static int
264drop_pages(struct drm_i915_gem_object *obj)
265{
266 struct i915_vma *vma, *next;
267 int ret;
268
269 drm_gem_object_reference(&obj->base);
270 list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link)
271 if (i915_vma_unbind(vma))
272 break;
273
274 ret = i915_gem_object_put_pages(obj);
275 drm_gem_object_unreference(&obj->base);
276
277 return ret;
278}
279
280int
281i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
282 int align)
283{
284 drm_dma_handle_t *phys;
285 int ret;
286
287 if (obj->phys_handle) {
288 if ((unsigned long)obj->phys_handle->vaddr & (align -1))
289 return -EBUSY;
290
291 return 0;
292 }
293
294 if (obj->madv != I915_MADV_WILLNEED)
295 return -EFAULT;
296
297 if (obj->base.filp == NULL)
298 return -EINVAL;
299
300 ret = drop_pages(obj);
301 if (ret)
302 return ret;
303
304
305 phys = drm_pci_alloc(obj->base.dev, obj->base.size, align);
306 if (!phys)
307 return -ENOMEM;
308
309 obj->phys_handle = phys;
310 obj->ops = &i915_gem_phys_ops;
311
312 return i915_gem_object_get_pages(obj);
313}
314
315static int
316i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
317 struct drm_i915_gem_pwrite *args,
318 struct drm_file *file_priv)
319{
320 struct drm_device *dev = obj->base.dev;
321 void *vaddr = obj->phys_handle->vaddr + args->offset;
322 char __user *user_data = u64_to_user_ptr(args->data_ptr);
323 int ret = 0;
324
325
326
327
328 ret = i915_gem_object_wait_rendering(obj, false);
329 if (ret)
330 return ret;
331
332 intel_fb_obj_invalidate(obj, ORIGIN_CPU);
333 if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
334 unsigned long unwritten;
335
336
337
338
339
340 mutex_unlock(&dev->struct_mutex);
341 unwritten = copy_from_user(vaddr, user_data, args->size);
342 mutex_lock(&dev->struct_mutex);
343 if (unwritten) {
344 ret = -EFAULT;
345 goto out;
346 }
347 }
348
349 drm_clflush_virt_range(vaddr, args->size);
350 i915_gem_chipset_flush(dev);
351
352out:
353 intel_fb_obj_flush(obj, false, ORIGIN_CPU);
354 return ret;
355}
356
357void *i915_gem_object_alloc(struct drm_device *dev)
358{
359 struct drm_i915_private *dev_priv = dev->dev_private;
360 return kmem_cache_zalloc(dev_priv->objects, GFP_KERNEL);
361}
362
363void i915_gem_object_free(struct drm_i915_gem_object *obj)
364{
365 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
366 kmem_cache_free(dev_priv->objects, obj);
367}
368
369static int
370i915_gem_create(struct drm_file *file,
371 struct drm_device *dev,
372 uint64_t size,
373 uint32_t *handle_p)
374{
375 struct drm_i915_gem_object *obj;
376 int ret;
377 u32 handle;
378
379 size = roundup(size, PAGE_SIZE);
380 if (size == 0)
381 return -EINVAL;
382
383
384 obj = i915_gem_alloc_object(dev, size);
385 if (obj == NULL)
386 return -ENOMEM;
387
388 ret = drm_gem_handle_create(file, &obj->base, &handle);
389
390 drm_gem_object_unreference_unlocked(&obj->base);
391 if (ret)
392 return ret;
393
394 *handle_p = handle;
395 return 0;
396}
397
398int
399i915_gem_dumb_create(struct drm_file *file,
400 struct drm_device *dev,
401 struct drm_mode_create_dumb *args)
402{
403
404 args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
405 args->size = args->pitch * args->height;
406 return i915_gem_create(file, dev,
407 args->size, &args->handle);
408}
409
410
411
412
413int
414i915_gem_create_ioctl(struct drm_device *dev, void *data,
415 struct drm_file *file)
416{
417 struct drm_i915_gem_create *args = data;
418
419 return i915_gem_create(file, dev,
420 args->size, &args->handle);
421}
422
423static inline int
424__copy_to_user_swizzled(char __user *cpu_vaddr,
425 const char *gpu_vaddr, int gpu_offset,
426 int length)
427{
428 int ret, cpu_offset = 0;
429
430 while (length > 0) {
431 int cacheline_end = ALIGN(gpu_offset + 1, 64);
432 int this_length = min(cacheline_end - gpu_offset, length);
433 int swizzled_gpu_offset = gpu_offset ^ 64;
434
435 ret = __copy_to_user(cpu_vaddr + cpu_offset,
436 gpu_vaddr + swizzled_gpu_offset,
437 this_length);
438 if (ret)
439 return ret + length;
440
441 cpu_offset += this_length;
442 gpu_offset += this_length;
443 length -= this_length;
444 }
445
446 return 0;
447}
448
449static inline int
450__copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
451 const char __user *cpu_vaddr,
452 int length)
453{
454 int ret, cpu_offset = 0;
455
456 while (length > 0) {
457 int cacheline_end = ALIGN(gpu_offset + 1, 64);
458 int this_length = min(cacheline_end - gpu_offset, length);
459 int swizzled_gpu_offset = gpu_offset ^ 64;
460
461 ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
462 cpu_vaddr + cpu_offset,
463 this_length);
464 if (ret)
465 return ret + length;
466
467 cpu_offset += this_length;
468 gpu_offset += this_length;
469 length -= this_length;
470 }
471
472 return 0;
473}
474
475
476
477
478
479
480int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
481 int *needs_clflush)
482{
483 int ret;
484
485 *needs_clflush = 0;
486
487 if (WARN_ON((obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE) == 0))
488 return -EINVAL;
489
490 if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
491
492
493
494
495 *needs_clflush = !cpu_cache_is_coherent(obj->base.dev,
496 obj->cache_level);
497 ret = i915_gem_object_wait_rendering(obj, true);
498 if (ret)
499 return ret;
500 }
501
502 ret = i915_gem_object_get_pages(obj);
503 if (ret)
504 return ret;
505
506 i915_gem_object_pin_pages(obj);
507
508 return ret;
509}
510
511
512
513
514static int
515shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length,
516 char __user *user_data,
517 bool page_do_bit17_swizzling, bool needs_clflush)
518{
519 char *vaddr;
520 int ret;
521
522 if (unlikely(page_do_bit17_swizzling))
523 return -EINVAL;
524
525 vaddr = kmap_atomic(page);
526 if (needs_clflush)
527 drm_clflush_virt_range(vaddr + shmem_page_offset,
528 page_length);
529 ret = __copy_to_user_inatomic(user_data,
530 vaddr + shmem_page_offset,
531 page_length);
532 kunmap_atomic(vaddr);
533
534 return ret ? -EFAULT : 0;
535}
536
537static void
538shmem_clflush_swizzled_range(char *addr, unsigned long length,
539 bool swizzled)
540{
541 if (unlikely(swizzled)) {
542 unsigned long start = (unsigned long) addr;
543 unsigned long end = (unsigned long) addr + length;
544
545
546
547
548
549 start = round_down(start, 128);
550 end = round_up(end, 128);
551
552 drm_clflush_virt_range((void *)start, end - start);
553 } else {
554 drm_clflush_virt_range(addr, length);
555 }
556
557}
558
559
560
561static int
562shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
563 char __user *user_data,
564 bool page_do_bit17_swizzling, bool needs_clflush)
565{
566 char *vaddr;
567 int ret;
568
569 vaddr = kmap(page);
570 if (needs_clflush)
571 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
572 page_length,
573 page_do_bit17_swizzling);
574
575 if (page_do_bit17_swizzling)
576 ret = __copy_to_user_swizzled(user_data,
577 vaddr, shmem_page_offset,
578 page_length);
579 else
580 ret = __copy_to_user(user_data,
581 vaddr + shmem_page_offset,
582 page_length);
583 kunmap(page);
584
585 return ret ? - EFAULT : 0;
586}
587
588static int
589i915_gem_shmem_pread(struct drm_device *dev,
590 struct drm_i915_gem_object *obj,
591 struct drm_i915_gem_pread *args,
592 struct drm_file *file)
593{
594 char __user *user_data;
595 ssize_t remain;
596 loff_t offset;
597 int shmem_page_offset, page_length, ret = 0;
598 int obj_do_bit17_swizzling, page_do_bit17_swizzling;
599 int prefaulted = 0;
600 int needs_clflush = 0;
601 struct sg_page_iter sg_iter;
602
603 user_data = u64_to_user_ptr(args->data_ptr);
604 remain = args->size;
605
606 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
607
608 ret = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush);
609 if (ret)
610 return ret;
611
612 offset = args->offset;
613
614 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
615 offset >> PAGE_SHIFT) {
616 struct page *page = sg_page_iter_page(&sg_iter);
617
618 if (remain <= 0)
619 break;
620
621
622
623
624
625
626 shmem_page_offset = offset_in_page(offset);
627 page_length = remain;
628 if ((shmem_page_offset + page_length) > PAGE_SIZE)
629 page_length = PAGE_SIZE - shmem_page_offset;
630
631 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
632 (page_to_phys(page) & (1 << 17)) != 0;
633
634 ret = shmem_pread_fast(page, shmem_page_offset, page_length,
635 user_data, page_do_bit17_swizzling,
636 needs_clflush);
637 if (ret == 0)
638 goto next_page;
639
640 mutex_unlock(&dev->struct_mutex);
641
642 if (likely(!i915.prefault_disable) && !prefaulted) {
643 ret = fault_in_multipages_writeable(user_data, remain);
644
645
646
647
648 (void)ret;
649 prefaulted = 1;
650 }
651
652 ret = shmem_pread_slow(page, shmem_page_offset, page_length,
653 user_data, page_do_bit17_swizzling,
654 needs_clflush);
655
656 mutex_lock(&dev->struct_mutex);
657
658 if (ret)
659 goto out;
660
661next_page:
662 remain -= page_length;
663 user_data += page_length;
664 offset += page_length;
665 }
666
667out:
668 i915_gem_object_unpin_pages(obj);
669
670 return ret;
671}
672
673
674
675
676
677
678int
679i915_gem_pread_ioctl(struct drm_device *dev, void *data,
680 struct drm_file *file)
681{
682 struct drm_i915_gem_pread *args = data;
683 struct drm_i915_gem_object *obj;
684 int ret = 0;
685
686 if (args->size == 0)
687 return 0;
688
689 if (!access_ok(VERIFY_WRITE,
690 u64_to_user_ptr(args->data_ptr),
691 args->size))
692 return -EFAULT;
693
694 ret = i915_mutex_lock_interruptible(dev);
695 if (ret)
696 return ret;
697
698 obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
699 if (&obj->base == NULL) {
700 ret = -ENOENT;
701 goto unlock;
702 }
703
704
705 if (args->offset > obj->base.size ||
706 args->size > obj->base.size - args->offset) {
707 ret = -EINVAL;
708 goto out;
709 }
710
711
712
713
714 if (!obj->base.filp) {
715 ret = -EINVAL;
716 goto out;
717 }
718
719 trace_i915_gem_object_pread(obj, args->offset, args->size);
720
721 ret = i915_gem_shmem_pread(dev, obj, args, file);
722
723out:
724 drm_gem_object_unreference(&obj->base);
725unlock:
726 mutex_unlock(&dev->struct_mutex);
727 return ret;
728}
729
730
731
732
733
734static inline int
735fast_user_write(struct io_mapping *mapping,
736 loff_t page_base, int page_offset,
737 char __user *user_data,
738 int length)
739{
740 void __iomem *vaddr_atomic;
741 void *vaddr;
742 unsigned long unwritten;
743
744 vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
745
746 vaddr = (void __force*)vaddr_atomic + page_offset;
747 unwritten = __copy_from_user_inatomic_nocache(vaddr,
748 user_data, length);
749 io_mapping_unmap_atomic(vaddr_atomic);
750 return unwritten;
751}
752
753
754
755
756
757static int
758i915_gem_gtt_pwrite_fast(struct drm_device *dev,
759 struct drm_i915_gem_object *obj,
760 struct drm_i915_gem_pwrite *args,
761 struct drm_file *file)
762{
763 struct drm_i915_private *dev_priv = to_i915(dev);
764 struct i915_ggtt *ggtt = &dev_priv->ggtt;
765 ssize_t remain;
766 loff_t offset, page_base;
767 char __user *user_data;
768 int page_offset, page_length, ret;
769
770 ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE | PIN_NONBLOCK);
771 if (ret)
772 goto out;
773
774 ret = i915_gem_object_set_to_gtt_domain(obj, true);
775 if (ret)
776 goto out_unpin;
777
778 ret = i915_gem_object_put_fence(obj);
779 if (ret)
780 goto out_unpin;
781
782 user_data = u64_to_user_ptr(args->data_ptr);
783 remain = args->size;
784
785 offset = i915_gem_obj_ggtt_offset(obj) + args->offset;
786
787 intel_fb_obj_invalidate(obj, ORIGIN_GTT);
788
789 while (remain > 0) {
790
791
792
793
794
795
796 page_base = offset & PAGE_MASK;
797 page_offset = offset_in_page(offset);
798 page_length = remain;
799 if ((page_offset + remain) > PAGE_SIZE)
800 page_length = PAGE_SIZE - page_offset;
801
802
803
804
805
806 if (fast_user_write(ggtt->mappable, page_base,
807 page_offset, user_data, page_length)) {
808 ret = -EFAULT;
809 goto out_flush;
810 }
811
812 remain -= page_length;
813 user_data += page_length;
814 offset += page_length;
815 }
816
817out_flush:
818 intel_fb_obj_flush(obj, false, ORIGIN_GTT);
819out_unpin:
820 i915_gem_object_ggtt_unpin(obj);
821out:
822 return ret;
823}
824
825
826
827
828
829static int
830shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length,
831 char __user *user_data,
832 bool page_do_bit17_swizzling,
833 bool needs_clflush_before,
834 bool needs_clflush_after)
835{
836 char *vaddr;
837 int ret;
838
839 if (unlikely(page_do_bit17_swizzling))
840 return -EINVAL;
841
842 vaddr = kmap_atomic(page);
843 if (needs_clflush_before)
844 drm_clflush_virt_range(vaddr + shmem_page_offset,
845 page_length);
846 ret = __copy_from_user_inatomic(vaddr + shmem_page_offset,
847 user_data, page_length);
848 if (needs_clflush_after)
849 drm_clflush_virt_range(vaddr + shmem_page_offset,
850 page_length);
851 kunmap_atomic(vaddr);
852
853 return ret ? -EFAULT : 0;
854}
855
856
857
858static int
859shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length,
860 char __user *user_data,
861 bool page_do_bit17_swizzling,
862 bool needs_clflush_before,
863 bool needs_clflush_after)
864{
865 char *vaddr;
866 int ret;
867
868 vaddr = kmap(page);
869 if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
870 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
871 page_length,
872 page_do_bit17_swizzling);
873 if (page_do_bit17_swizzling)
874 ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
875 user_data,
876 page_length);
877 else
878 ret = __copy_from_user(vaddr + shmem_page_offset,
879 user_data,
880 page_length);
881 if (needs_clflush_after)
882 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
883 page_length,
884 page_do_bit17_swizzling);
885 kunmap(page);
886
887 return ret ? -EFAULT : 0;
888}
889
890static int
891i915_gem_shmem_pwrite(struct drm_device *dev,
892 struct drm_i915_gem_object *obj,
893 struct drm_i915_gem_pwrite *args,
894 struct drm_file *file)
895{
896 ssize_t remain;
897 loff_t offset;
898 char __user *user_data;
899 int shmem_page_offset, page_length, ret = 0;
900 int obj_do_bit17_swizzling, page_do_bit17_swizzling;
901 int hit_slowpath = 0;
902 int needs_clflush_after = 0;
903 int needs_clflush_before = 0;
904 struct sg_page_iter sg_iter;
905
906 user_data = u64_to_user_ptr(args->data_ptr);
907 remain = args->size;
908
909 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
910
911 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
912
913
914
915
916 needs_clflush_after = cpu_write_needs_clflush(obj);
917 ret = i915_gem_object_wait_rendering(obj, false);
918 if (ret)
919 return ret;
920 }
921
922
923 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
924 needs_clflush_before =
925 !cpu_cache_is_coherent(dev, obj->cache_level);
926
927 ret = i915_gem_object_get_pages(obj);
928 if (ret)
929 return ret;
930
931 intel_fb_obj_invalidate(obj, ORIGIN_CPU);
932
933 i915_gem_object_pin_pages(obj);
934
935 offset = args->offset;
936 obj->dirty = 1;
937
938 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
939 offset >> PAGE_SHIFT) {
940 struct page *page = sg_page_iter_page(&sg_iter);
941 int partial_cacheline_write;
942
943 if (remain <= 0)
944 break;
945
946
947
948
949
950
951 shmem_page_offset = offset_in_page(offset);
952
953 page_length = remain;
954 if ((shmem_page_offset + page_length) > PAGE_SIZE)
955 page_length = PAGE_SIZE - shmem_page_offset;
956
957
958
959
960 partial_cacheline_write = needs_clflush_before &&
961 ((shmem_page_offset | page_length)
962 & (boot_cpu_data.x86_clflush_size - 1));
963
964 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
965 (page_to_phys(page) & (1 << 17)) != 0;
966
967 ret = shmem_pwrite_fast(page, shmem_page_offset, page_length,
968 user_data, page_do_bit17_swizzling,
969 partial_cacheline_write,
970 needs_clflush_after);
971 if (ret == 0)
972 goto next_page;
973
974 hit_slowpath = 1;
975 mutex_unlock(&dev->struct_mutex);
976 ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
977 user_data, page_do_bit17_swizzling,
978 partial_cacheline_write,
979 needs_clflush_after);
980
981 mutex_lock(&dev->struct_mutex);
982
983 if (ret)
984 goto out;
985
986next_page:
987 remain -= page_length;
988 user_data += page_length;
989 offset += page_length;
990 }
991
992out:
993 i915_gem_object_unpin_pages(obj);
994
995 if (hit_slowpath) {
996
997
998
999
1000
1001 if (!needs_clflush_after &&
1002 obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
1003 if (i915_gem_clflush_object(obj, obj->pin_display))
1004 needs_clflush_after = true;
1005 }
1006 }
1007
1008 if (needs_clflush_after)
1009 i915_gem_chipset_flush(dev);
1010 else
1011 obj->cache_dirty = true;
1012
1013 intel_fb_obj_flush(obj, false, ORIGIN_CPU);
1014 return ret;
1015}
1016
1017
1018
1019
1020
1021
1022int
1023i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
1024 struct drm_file *file)
1025{
1026 struct drm_i915_private *dev_priv = dev->dev_private;
1027 struct drm_i915_gem_pwrite *args = data;
1028 struct drm_i915_gem_object *obj;
1029 int ret;
1030
1031 if (args->size == 0)
1032 return 0;
1033
1034 if (!access_ok(VERIFY_READ,
1035 u64_to_user_ptr(args->data_ptr),
1036 args->size))
1037 return -EFAULT;
1038
1039 if (likely(!i915.prefault_disable)) {
1040 ret = fault_in_multipages_readable(u64_to_user_ptr(args->data_ptr),
1041 args->size);
1042 if (ret)
1043 return -EFAULT;
1044 }
1045
1046 intel_runtime_pm_get(dev_priv);
1047
1048 ret = i915_mutex_lock_interruptible(dev);
1049 if (ret)
1050 goto put_rpm;
1051
1052 obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
1053 if (&obj->base == NULL) {
1054 ret = -ENOENT;
1055 goto unlock;
1056 }
1057
1058
1059 if (args->offset > obj->base.size ||
1060 args->size > obj->base.size - args->offset) {
1061 ret = -EINVAL;
1062 goto out;
1063 }
1064
1065
1066
1067
1068 if (!obj->base.filp) {
1069 ret = -EINVAL;
1070 goto out;
1071 }
1072
1073 trace_i915_gem_object_pwrite(obj, args->offset, args->size);
1074
1075 ret = -EFAULT;
1076
1077
1078
1079
1080
1081
1082 if (obj->tiling_mode == I915_TILING_NONE &&
1083 obj->base.write_domain != I915_GEM_DOMAIN_CPU &&
1084 cpu_write_needs_clflush(obj)) {
1085 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
1086
1087
1088
1089 }
1090
1091 if (ret == -EFAULT || ret == -ENOSPC) {
1092 if (obj->phys_handle)
1093 ret = i915_gem_phys_pwrite(obj, args, file);
1094 else
1095 ret = i915_gem_shmem_pwrite(dev, obj, args, file);
1096 }
1097
1098out:
1099 drm_gem_object_unreference(&obj->base);
1100unlock:
1101 mutex_unlock(&dev->struct_mutex);
1102put_rpm:
1103 intel_runtime_pm_put(dev_priv);
1104
1105 return ret;
1106}
1107
1108static int
1109i915_gem_check_wedge(unsigned reset_counter, bool interruptible)
1110{
1111 if (__i915_terminally_wedged(reset_counter))
1112 return -EIO;
1113
1114 if (__i915_reset_in_progress(reset_counter)) {
1115
1116
1117 if (!interruptible)
1118 return -EIO;
1119
1120 return -EAGAIN;
1121 }
1122
1123 return 0;
1124}
1125
1126static void fake_irq(unsigned long data)
1127{
1128 wake_up_process((struct task_struct *)data);
1129}
1130
1131static bool missed_irq(struct drm_i915_private *dev_priv,
1132 struct intel_engine_cs *engine)
1133{
1134 return test_bit(engine->id, &dev_priv->gpu_error.missed_irq_rings);
1135}
1136
1137static unsigned long local_clock_us(unsigned *cpu)
1138{
1139 unsigned long t;
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152 *cpu = get_cpu();
1153 t = local_clock() >> 10;
1154 put_cpu();
1155
1156 return t;
1157}
1158
1159static bool busywait_stop(unsigned long timeout, unsigned cpu)
1160{
1161 unsigned this_cpu;
1162
1163 if (time_after(local_clock_us(&this_cpu), timeout))
1164 return true;
1165
1166 return this_cpu != cpu;
1167}
1168
1169static int __i915_spin_request(struct drm_i915_gem_request *req, int state)
1170{
1171 unsigned long timeout;
1172 unsigned cpu;
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184 if (req->engine->irq_refcount)
1185 return -EBUSY;
1186
1187
1188 if (!i915_gem_request_started(req, true))
1189 return -EAGAIN;
1190
1191 timeout = local_clock_us(&cpu) + 5;
1192 while (!need_resched()) {
1193 if (i915_gem_request_completed(req, true))
1194 return 0;
1195
1196 if (signal_pending_state(state, current))
1197 break;
1198
1199 if (busywait_stop(timeout, cpu))
1200 break;
1201
1202 cpu_relax_lowlatency();
1203 }
1204
1205 if (i915_gem_request_completed(req, false))
1206 return 0;
1207
1208 return -EAGAIN;
1209}
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227int __i915_wait_request(struct drm_i915_gem_request *req,
1228 bool interruptible,
1229 s64 *timeout,
1230 struct intel_rps_client *rps)
1231{
1232 struct intel_engine_cs *engine = i915_gem_request_get_engine(req);
1233 struct drm_device *dev = engine->dev;
1234 struct drm_i915_private *dev_priv = dev->dev_private;
1235 const bool irq_test_in_progress =
1236 ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_engine_flag(engine);
1237 int state = interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
1238 DEFINE_WAIT(wait);
1239 unsigned long timeout_expire;
1240 s64 before = 0;
1241 int ret;
1242
1243 WARN(!intel_irqs_enabled(dev_priv), "IRQs disabled");
1244
1245 if (list_empty(&req->list))
1246 return 0;
1247
1248 if (i915_gem_request_completed(req, true))
1249 return 0;
1250
1251 timeout_expire = 0;
1252 if (timeout) {
1253 if (WARN_ON(*timeout < 0))
1254 return -EINVAL;
1255
1256 if (*timeout == 0)
1257 return -ETIME;
1258
1259 timeout_expire = jiffies + nsecs_to_jiffies_timeout(*timeout);
1260
1261
1262
1263
1264 before = ktime_get_raw_ns();
1265 }
1266
1267 if (INTEL_INFO(dev_priv)->gen >= 6)
1268 gen6_rps_boost(dev_priv, rps, req->emitted_jiffies);
1269
1270 trace_i915_gem_request_wait_begin(req);
1271
1272
1273 ret = __i915_spin_request(req, state);
1274 if (ret == 0)
1275 goto out;
1276
1277 if (!irq_test_in_progress && WARN_ON(!engine->irq_get(engine))) {
1278 ret = -ENODEV;
1279 goto out;
1280 }
1281
1282 for (;;) {
1283 struct timer_list timer;
1284
1285 prepare_to_wait(&engine->irq_queue, &wait, state);
1286
1287
1288
1289
1290
1291
1292
1293
1294 if (req->reset_counter != i915_reset_counter(&dev_priv->gpu_error)) {
1295 ret = 0;
1296 break;
1297 }
1298
1299 if (i915_gem_request_completed(req, false)) {
1300 ret = 0;
1301 break;
1302 }
1303
1304 if (signal_pending_state(state, current)) {
1305 ret = -ERESTARTSYS;
1306 break;
1307 }
1308
1309 if (timeout && time_after_eq(jiffies, timeout_expire)) {
1310 ret = -ETIME;
1311 break;
1312 }
1313
1314 timer.function = NULL;
1315 if (timeout || missed_irq(dev_priv, engine)) {
1316 unsigned long expire;
1317
1318 setup_timer_on_stack(&timer, fake_irq, (unsigned long)current);
1319 expire = missed_irq(dev_priv, engine) ? jiffies + 1 : timeout_expire;
1320 mod_timer(&timer, expire);
1321 }
1322
1323 io_schedule();
1324
1325 if (timer.function) {
1326 del_singleshot_timer_sync(&timer);
1327 destroy_timer_on_stack(&timer);
1328 }
1329 }
1330 if (!irq_test_in_progress)
1331 engine->irq_put(engine);
1332
1333 finish_wait(&engine->irq_queue, &wait);
1334
1335out:
1336 trace_i915_gem_request_wait_end(req);
1337
1338 if (timeout) {
1339 s64 tres = *timeout - (ktime_get_raw_ns() - before);
1340
1341 *timeout = tres < 0 ? 0 : tres;
1342
1343
1344
1345
1346
1347
1348
1349
1350 if (ret == -ETIME && *timeout < jiffies_to_usecs(1)*1000)
1351 *timeout = 0;
1352 }
1353
1354 return ret;
1355}
1356
1357int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
1358 struct drm_file *file)
1359{
1360 struct drm_i915_file_private *file_priv;
1361
1362 WARN_ON(!req || !file || req->file_priv);
1363
1364 if (!req || !file)
1365 return -EINVAL;
1366
1367 if (req->file_priv)
1368 return -EINVAL;
1369
1370 file_priv = file->driver_priv;
1371
1372 spin_lock(&file_priv->mm.lock);
1373 req->file_priv = file_priv;
1374 list_add_tail(&req->client_list, &file_priv->mm.request_list);
1375 spin_unlock(&file_priv->mm.lock);
1376
1377 req->pid = get_pid(task_pid(current));
1378
1379 return 0;
1380}
1381
1382static inline void
1383i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
1384{
1385 struct drm_i915_file_private *file_priv = request->file_priv;
1386
1387 if (!file_priv)
1388 return;
1389
1390 spin_lock(&file_priv->mm.lock);
1391 list_del(&request->client_list);
1392 request->file_priv = NULL;
1393 spin_unlock(&file_priv->mm.lock);
1394
1395 put_pid(request->pid);
1396 request->pid = NULL;
1397}
1398
1399static void i915_gem_request_retire(struct drm_i915_gem_request *request)
1400{
1401 trace_i915_gem_request_retire(request);
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411 request->ringbuf->last_retired_head = request->postfix;
1412
1413 list_del_init(&request->list);
1414 i915_gem_request_remove_from_client(request);
1415
1416 i915_gem_request_unreference(request);
1417}
1418
1419static void
1420__i915_gem_request_retire__upto(struct drm_i915_gem_request *req)
1421{
1422 struct intel_engine_cs *engine = req->engine;
1423 struct drm_i915_gem_request *tmp;
1424
1425 lockdep_assert_held(&engine->dev->struct_mutex);
1426
1427 if (list_empty(&req->list))
1428 return;
1429
1430 do {
1431 tmp = list_first_entry(&engine->request_list,
1432 typeof(*tmp), list);
1433
1434 i915_gem_request_retire(tmp);
1435 } while (tmp != req);
1436
1437 WARN_ON(i915_verify_lists(engine->dev));
1438}
1439
1440
1441
1442
1443
1444int
1445i915_wait_request(struct drm_i915_gem_request *req)
1446{
1447 struct drm_i915_private *dev_priv = req->i915;
1448 bool interruptible;
1449 int ret;
1450
1451 interruptible = dev_priv->mm.interruptible;
1452
1453 BUG_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
1454
1455 ret = __i915_wait_request(req, interruptible, NULL, NULL);
1456 if (ret)
1457 return ret;
1458
1459
1460 if (req->reset_counter == i915_reset_counter(&dev_priv->gpu_error))
1461 __i915_gem_request_retire__upto(req);
1462
1463 return 0;
1464}
1465
1466
1467
1468
1469
1470int
1471i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
1472 bool readonly)
1473{
1474 int ret, i;
1475
1476 if (!obj->active)
1477 return 0;
1478
1479 if (readonly) {
1480 if (obj->last_write_req != NULL) {
1481 ret = i915_wait_request(obj->last_write_req);
1482 if (ret)
1483 return ret;
1484
1485 i = obj->last_write_req->engine->id;
1486 if (obj->last_read_req[i] == obj->last_write_req)
1487 i915_gem_object_retire__read(obj, i);
1488 else
1489 i915_gem_object_retire__write(obj);
1490 }
1491 } else {
1492 for (i = 0; i < I915_NUM_ENGINES; i++) {
1493 if (obj->last_read_req[i] == NULL)
1494 continue;
1495
1496 ret = i915_wait_request(obj->last_read_req[i]);
1497 if (ret)
1498 return ret;
1499
1500 i915_gem_object_retire__read(obj, i);
1501 }
1502 GEM_BUG_ON(obj->active);
1503 }
1504
1505 return 0;
1506}
1507
1508static void
1509i915_gem_object_retire_request(struct drm_i915_gem_object *obj,
1510 struct drm_i915_gem_request *req)
1511{
1512 int ring = req->engine->id;
1513
1514 if (obj->last_read_req[ring] == req)
1515 i915_gem_object_retire__read(obj, ring);
1516 else if (obj->last_write_req == req)
1517 i915_gem_object_retire__write(obj);
1518
1519 if (req->reset_counter == i915_reset_counter(&req->i915->gpu_error))
1520 __i915_gem_request_retire__upto(req);
1521}
1522
1523
1524
1525
1526static __must_check int
1527i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
1528 struct intel_rps_client *rps,
1529 bool readonly)
1530{
1531 struct drm_device *dev = obj->base.dev;
1532 struct drm_i915_private *dev_priv = dev->dev_private;
1533 struct drm_i915_gem_request *requests[I915_NUM_ENGINES];
1534 int ret, i, n = 0;
1535
1536 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1537 BUG_ON(!dev_priv->mm.interruptible);
1538
1539 if (!obj->active)
1540 return 0;
1541
1542 if (readonly) {
1543 struct drm_i915_gem_request *req;
1544
1545 req = obj->last_write_req;
1546 if (req == NULL)
1547 return 0;
1548
1549 requests[n++] = i915_gem_request_reference(req);
1550 } else {
1551 for (i = 0; i < I915_NUM_ENGINES; i++) {
1552 struct drm_i915_gem_request *req;
1553
1554 req = obj->last_read_req[i];
1555 if (req == NULL)
1556 continue;
1557
1558 requests[n++] = i915_gem_request_reference(req);
1559 }
1560 }
1561
1562 mutex_unlock(&dev->struct_mutex);
1563 ret = 0;
1564 for (i = 0; ret == 0 && i < n; i++)
1565 ret = __i915_wait_request(requests[i], true, NULL, rps);
1566 mutex_lock(&dev->struct_mutex);
1567
1568 for (i = 0; i < n; i++) {
1569 if (ret == 0)
1570 i915_gem_object_retire_request(obj, requests[i]);
1571 i915_gem_request_unreference(requests[i]);
1572 }
1573
1574 return ret;
1575}
1576
1577static struct intel_rps_client *to_rps_client(struct drm_file *file)
1578{
1579 struct drm_i915_file_private *fpriv = file->driver_priv;
1580 return &fpriv->rps;
1581}
1582
1583
1584
1585
1586
1587int
1588i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1589 struct drm_file *file)
1590{
1591 struct drm_i915_gem_set_domain *args = data;
1592 struct drm_i915_gem_object *obj;
1593 uint32_t read_domains = args->read_domains;
1594 uint32_t write_domain = args->write_domain;
1595 int ret;
1596
1597
1598 if (write_domain & I915_GEM_GPU_DOMAINS)
1599 return -EINVAL;
1600
1601 if (read_domains & I915_GEM_GPU_DOMAINS)
1602 return -EINVAL;
1603
1604
1605
1606
1607 if (write_domain != 0 && read_domains != write_domain)
1608 return -EINVAL;
1609
1610 ret = i915_mutex_lock_interruptible(dev);
1611 if (ret)
1612 return ret;
1613
1614 obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
1615 if (&obj->base == NULL) {
1616 ret = -ENOENT;
1617 goto unlock;
1618 }
1619
1620
1621
1622
1623
1624 ret = i915_gem_object_wait_rendering__nonblocking(obj,
1625 to_rps_client(file),
1626 !write_domain);
1627 if (ret)
1628 goto unref;
1629
1630 if (read_domains & I915_GEM_DOMAIN_GTT)
1631 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
1632 else
1633 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
1634
1635 if (write_domain != 0)
1636 intel_fb_obj_invalidate(obj,
1637 write_domain == I915_GEM_DOMAIN_GTT ?
1638 ORIGIN_GTT : ORIGIN_CPU);
1639
1640unref:
1641 drm_gem_object_unreference(&obj->base);
1642unlock:
1643 mutex_unlock(&dev->struct_mutex);
1644 return ret;
1645}
1646
1647
1648
1649
1650int
1651i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1652 struct drm_file *file)
1653{
1654 struct drm_i915_gem_sw_finish *args = data;
1655 struct drm_i915_gem_object *obj;
1656 int ret = 0;
1657
1658 ret = i915_mutex_lock_interruptible(dev);
1659 if (ret)
1660 return ret;
1661
1662 obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
1663 if (&obj->base == NULL) {
1664 ret = -ENOENT;
1665 goto unlock;
1666 }
1667
1668
1669 if (obj->pin_display)
1670 i915_gem_object_flush_cpu_write_domain(obj);
1671
1672 drm_gem_object_unreference(&obj->base);
1673unlock:
1674 mutex_unlock(&dev->struct_mutex);
1675 return ret;
1676}
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695int
1696i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1697 struct drm_file *file)
1698{
1699 struct drm_i915_gem_mmap *args = data;
1700 struct drm_gem_object *obj;
1701 unsigned long addr;
1702
1703 if (args->flags & ~(I915_MMAP_WC))
1704 return -EINVAL;
1705
1706 if (args->flags & I915_MMAP_WC && !boot_cpu_has(X86_FEATURE_PAT))
1707 return -ENODEV;
1708
1709 obj = drm_gem_object_lookup(file, args->handle);
1710 if (obj == NULL)
1711 return -ENOENT;
1712
1713
1714
1715
1716 if (!obj->filp) {
1717 drm_gem_object_unreference_unlocked(obj);
1718 return -EINVAL;
1719 }
1720
1721 addr = vm_mmap(obj->filp, 0, args->size,
1722 PROT_READ | PROT_WRITE, MAP_SHARED,
1723 args->offset);
1724 if (args->flags & I915_MMAP_WC) {
1725 struct mm_struct *mm = current->mm;
1726 struct vm_area_struct *vma;
1727
1728 if (down_write_killable(&mm->mmap_sem)) {
1729 drm_gem_object_unreference_unlocked(obj);
1730 return -EINTR;
1731 }
1732 vma = find_vma(mm, addr);
1733 if (vma)
1734 vma->vm_page_prot =
1735 pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
1736 else
1737 addr = -ENOMEM;
1738 up_write(&mm->mmap_sem);
1739 }
1740 drm_gem_object_unreference_unlocked(obj);
1741 if (IS_ERR((void *)addr))
1742 return addr;
1743
1744 args->addr_ptr = (uint64_t) addr;
1745
1746 return 0;
1747}
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1766{
1767 struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data);
1768 struct drm_device *dev = obj->base.dev;
1769 struct drm_i915_private *dev_priv = to_i915(dev);
1770 struct i915_ggtt *ggtt = &dev_priv->ggtt;
1771 struct i915_ggtt_view view = i915_ggtt_view_normal;
1772 pgoff_t page_offset;
1773 unsigned long pfn;
1774 int ret = 0;
1775 bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
1776
1777 intel_runtime_pm_get(dev_priv);
1778
1779
1780 page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
1781 PAGE_SHIFT;
1782
1783 ret = i915_mutex_lock_interruptible(dev);
1784 if (ret)
1785 goto out;
1786
1787 trace_i915_gem_object_fault(obj, page_offset, true, write);
1788
1789
1790
1791
1792
1793
1794 ret = i915_gem_object_wait_rendering__nonblocking(obj, NULL, !write);
1795 if (ret)
1796 goto unlock;
1797
1798
1799 if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev)) {
1800 ret = -EFAULT;
1801 goto unlock;
1802 }
1803
1804
1805 if (obj->base.size >= ggtt->mappable_end &&
1806 obj->tiling_mode == I915_TILING_NONE) {
1807 static const unsigned int chunk_size = 256;
1808
1809 memset(&view, 0, sizeof(view));
1810 view.type = I915_GGTT_VIEW_PARTIAL;
1811 view.params.partial.offset = rounddown(page_offset, chunk_size);
1812 view.params.partial.size =
1813 min_t(unsigned int,
1814 chunk_size,
1815 (vma->vm_end - vma->vm_start)/PAGE_SIZE -
1816 view.params.partial.offset);
1817 }
1818
1819
1820 ret = i915_gem_object_ggtt_pin(obj, &view, 0, PIN_MAPPABLE);
1821 if (ret)
1822 goto unlock;
1823
1824 ret = i915_gem_object_set_to_gtt_domain(obj, write);
1825 if (ret)
1826 goto unpin;
1827
1828 ret = i915_gem_object_get_fence(obj);
1829 if (ret)
1830 goto unpin;
1831
1832
1833 pfn = ggtt->mappable_base +
1834 i915_gem_obj_ggtt_offset_view(obj, &view);
1835 pfn >>= PAGE_SHIFT;
1836
1837 if (unlikely(view.type == I915_GGTT_VIEW_PARTIAL)) {
1838
1839
1840
1841
1842
1843 unsigned long base = vma->vm_start +
1844 (view.params.partial.offset << PAGE_SHIFT);
1845 unsigned int i;
1846
1847 for (i = 0; i < view.params.partial.size; i++) {
1848 ret = vm_insert_pfn(vma, base + i * PAGE_SIZE, pfn + i);
1849 if (ret)
1850 break;
1851 }
1852
1853 obj->fault_mappable = true;
1854 } else {
1855 if (!obj->fault_mappable) {
1856 unsigned long size = min_t(unsigned long,
1857 vma->vm_end - vma->vm_start,
1858 obj->base.size);
1859 int i;
1860
1861 for (i = 0; i < size >> PAGE_SHIFT; i++) {
1862 ret = vm_insert_pfn(vma,
1863 (unsigned long)vma->vm_start + i * PAGE_SIZE,
1864 pfn + i);
1865 if (ret)
1866 break;
1867 }
1868
1869 obj->fault_mappable = true;
1870 } else
1871 ret = vm_insert_pfn(vma,
1872 (unsigned long)vmf->virtual_address,
1873 pfn + page_offset);
1874 }
1875unpin:
1876 i915_gem_object_ggtt_unpin_view(obj, &view);
1877unlock:
1878 mutex_unlock(&dev->struct_mutex);
1879out:
1880 switch (ret) {
1881 case -EIO:
1882
1883
1884
1885
1886
1887
1888 if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
1889 ret = VM_FAULT_SIGBUS;
1890 break;
1891 }
1892 case -EAGAIN:
1893
1894
1895
1896
1897
1898 case 0:
1899 case -ERESTARTSYS:
1900 case -EINTR:
1901 case -EBUSY:
1902
1903
1904
1905
1906 ret = VM_FAULT_NOPAGE;
1907 break;
1908 case -ENOMEM:
1909 ret = VM_FAULT_OOM;
1910 break;
1911 case -ENOSPC:
1912 case -EFAULT:
1913 ret = VM_FAULT_SIGBUS;
1914 break;
1915 default:
1916 WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret);
1917 ret = VM_FAULT_SIGBUS;
1918 break;
1919 }
1920
1921 intel_runtime_pm_put(dev_priv);
1922 return ret;
1923}
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939void
1940i915_gem_release_mmap(struct drm_i915_gem_object *obj)
1941{
1942
1943
1944
1945
1946 lockdep_assert_held(&obj->base.dev->struct_mutex);
1947
1948 if (!obj->fault_mappable)
1949 return;
1950
1951 drm_vma_node_unmap(&obj->base.vma_node,
1952 obj->base.dev->anon_inode->i_mapping);
1953
1954
1955
1956
1957
1958
1959
1960
1961 wmb();
1962
1963 obj->fault_mappable = false;
1964}
1965
1966void
1967i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv)
1968{
1969 struct drm_i915_gem_object *obj;
1970
1971 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
1972 i915_gem_release_mmap(obj);
1973}
1974
1975uint32_t
1976i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
1977{
1978 uint32_t gtt_size;
1979
1980 if (INTEL_INFO(dev)->gen >= 4 ||
1981 tiling_mode == I915_TILING_NONE)
1982 return size;
1983
1984
1985 if (INTEL_INFO(dev)->gen == 3)
1986 gtt_size = 1024*1024;
1987 else
1988 gtt_size = 512*1024;
1989
1990 while (gtt_size < size)
1991 gtt_size <<= 1;
1992
1993 return gtt_size;
1994}
1995
1996
1997
1998
1999
2000
2001
2002
2003uint32_t
2004i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
2005 int tiling_mode, bool fenced)
2006{
2007
2008
2009
2010
2011 if (INTEL_INFO(dev)->gen >= 4 || (!fenced && IS_G33(dev)) ||
2012 tiling_mode == I915_TILING_NONE)
2013 return 4096;
2014
2015
2016
2017
2018
2019 return i915_gem_get_gtt_size(dev, size, tiling_mode);
2020}
2021
2022static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
2023{
2024 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2025 int ret;
2026
2027 dev_priv->mm.shrinker_no_lock_stealing = true;
2028
2029 ret = drm_gem_create_mmap_offset(&obj->base);
2030 if (ret != -ENOSPC)
2031 goto out;
2032
2033
2034
2035
2036
2037
2038
2039
2040 i915_gem_shrink(dev_priv,
2041 obj->base.size >> PAGE_SHIFT,
2042 I915_SHRINK_BOUND |
2043 I915_SHRINK_UNBOUND |
2044 I915_SHRINK_PURGEABLE);
2045 ret = drm_gem_create_mmap_offset(&obj->base);
2046 if (ret != -ENOSPC)
2047 goto out;
2048
2049 i915_gem_shrink_all(dev_priv);
2050 ret = drm_gem_create_mmap_offset(&obj->base);
2051out:
2052 dev_priv->mm.shrinker_no_lock_stealing = false;
2053
2054 return ret;
2055}
2056
2057static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
2058{
2059 drm_gem_free_mmap_offset(&obj->base);
2060}
2061
2062int
2063i915_gem_mmap_gtt(struct drm_file *file,
2064 struct drm_device *dev,
2065 uint32_t handle,
2066 uint64_t *offset)
2067{
2068 struct drm_i915_gem_object *obj;
2069 int ret;
2070
2071 ret = i915_mutex_lock_interruptible(dev);
2072 if (ret)
2073 return ret;
2074
2075 obj = to_intel_bo(drm_gem_object_lookup(file, handle));
2076 if (&obj->base == NULL) {
2077 ret = -ENOENT;
2078 goto unlock;
2079 }
2080
2081 if (obj->madv != I915_MADV_WILLNEED) {
2082 DRM_DEBUG("Attempting to mmap a purgeable buffer\n");
2083 ret = -EFAULT;
2084 goto out;
2085 }
2086
2087 ret = i915_gem_object_create_mmap_offset(obj);
2088 if (ret)
2089 goto out;
2090
2091 *offset = drm_vma_node_offset_addr(&obj->base.vma_node);
2092
2093out:
2094 drm_gem_object_unreference(&obj->base);
2095unlock:
2096 mutex_unlock(&dev->struct_mutex);
2097 return ret;
2098}
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115int
2116i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
2117 struct drm_file *file)
2118{
2119 struct drm_i915_gem_mmap_gtt *args = data;
2120
2121 return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
2122}
2123
2124
2125static void
2126i915_gem_object_truncate(struct drm_i915_gem_object *obj)
2127{
2128 i915_gem_object_free_mmap_offset(obj);
2129
2130 if (obj->base.filp == NULL)
2131 return;
2132
2133
2134
2135
2136
2137
2138 shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
2139 obj->madv = __I915_MADV_PURGED;
2140}
2141
2142
2143static void
2144i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
2145{
2146 struct address_space *mapping;
2147
2148 switch (obj->madv) {
2149 case I915_MADV_DONTNEED:
2150 i915_gem_object_truncate(obj);
2151 case __I915_MADV_PURGED:
2152 return;
2153 }
2154
2155 if (obj->base.filp == NULL)
2156 return;
2157
2158 mapping = file_inode(obj->base.filp)->i_mapping,
2159 invalidate_mapping_pages(mapping, 0, (loff_t)-1);
2160}
2161
2162static void
2163i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
2164{
2165 struct sg_page_iter sg_iter;
2166 int ret;
2167
2168 BUG_ON(obj->madv == __I915_MADV_PURGED);
2169
2170 ret = i915_gem_object_set_to_cpu_domain(obj, true);
2171 if (WARN_ON(ret)) {
2172
2173
2174
2175 i915_gem_clflush_object(obj, true);
2176 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
2177 }
2178
2179 i915_gem_gtt_finish_object(obj);
2180
2181 if (i915_gem_object_needs_bit17_swizzle(obj))
2182 i915_gem_object_save_bit_17_swizzle(obj);
2183
2184 if (obj->madv == I915_MADV_DONTNEED)
2185 obj->dirty = 0;
2186
2187 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
2188 struct page *page = sg_page_iter_page(&sg_iter);
2189
2190 if (obj->dirty)
2191 set_page_dirty(page);
2192
2193 if (obj->madv == I915_MADV_WILLNEED)
2194 mark_page_accessed(page);
2195
2196 put_page(page);
2197 }
2198 obj->dirty = 0;
2199
2200 sg_free_table(obj->pages);
2201 kfree(obj->pages);
2202}
2203
2204int
2205i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
2206{
2207 const struct drm_i915_gem_object_ops *ops = obj->ops;
2208
2209 if (obj->pages == NULL)
2210 return 0;
2211
2212 if (obj->pages_pin_count)
2213 return -EBUSY;
2214
2215 BUG_ON(i915_gem_obj_bound_any(obj));
2216
2217
2218
2219
2220 list_del(&obj->global_list);
2221
2222 if (obj->mapping) {
2223 if (is_vmalloc_addr(obj->mapping))
2224 vunmap(obj->mapping);
2225 else
2226 kunmap(kmap_to_page(obj->mapping));
2227 obj->mapping = NULL;
2228 }
2229
2230 ops->put_pages(obj);
2231 obj->pages = NULL;
2232
2233 i915_gem_object_invalidate(obj);
2234
2235 return 0;
2236}
2237
2238static int
2239i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
2240{
2241 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2242 int page_count, i;
2243 struct address_space *mapping;
2244 struct sg_table *st;
2245 struct scatterlist *sg;
2246 struct sg_page_iter sg_iter;
2247 struct page *page;
2248 unsigned long last_pfn = 0;
2249 int ret;
2250 gfp_t gfp;
2251
2252
2253
2254
2255
2256 BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
2257 BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
2258
2259 st = kmalloc(sizeof(*st), GFP_KERNEL);
2260 if (st == NULL)
2261 return -ENOMEM;
2262
2263 page_count = obj->base.size / PAGE_SIZE;
2264 if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
2265 kfree(st);
2266 return -ENOMEM;
2267 }
2268
2269
2270
2271
2272
2273
2274 mapping = file_inode(obj->base.filp)->i_mapping;
2275 gfp = mapping_gfp_constraint(mapping, ~(__GFP_IO | __GFP_RECLAIM));
2276 gfp |= __GFP_NORETRY | __GFP_NOWARN;
2277 sg = st->sgl;
2278 st->nents = 0;
2279 for (i = 0; i < page_count; i++) {
2280 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
2281 if (IS_ERR(page)) {
2282 i915_gem_shrink(dev_priv,
2283 page_count,
2284 I915_SHRINK_BOUND |
2285 I915_SHRINK_UNBOUND |
2286 I915_SHRINK_PURGEABLE);
2287 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
2288 }
2289 if (IS_ERR(page)) {
2290
2291
2292
2293
2294 i915_gem_shrink_all(dev_priv);
2295 page = shmem_read_mapping_page(mapping, i);
2296 if (IS_ERR(page)) {
2297 ret = PTR_ERR(page);
2298 goto err_pages;
2299 }
2300 }
2301#ifdef CONFIG_SWIOTLB
2302 if (swiotlb_nr_tbl()) {
2303 st->nents++;
2304 sg_set_page(sg, page, PAGE_SIZE, 0);
2305 sg = sg_next(sg);
2306 continue;
2307 }
2308#endif
2309 if (!i || page_to_pfn(page) != last_pfn + 1) {
2310 if (i)
2311 sg = sg_next(sg);
2312 st->nents++;
2313 sg_set_page(sg, page, PAGE_SIZE, 0);
2314 } else {
2315 sg->length += PAGE_SIZE;
2316 }
2317 last_pfn = page_to_pfn(page);
2318
2319
2320 WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
2321 }
2322#ifdef CONFIG_SWIOTLB
2323 if (!swiotlb_nr_tbl())
2324#endif
2325 sg_mark_end(sg);
2326 obj->pages = st;
2327
2328 ret = i915_gem_gtt_prepare_object(obj);
2329 if (ret)
2330 goto err_pages;
2331
2332 if (i915_gem_object_needs_bit17_swizzle(obj))
2333 i915_gem_object_do_bit_17_swizzle(obj);
2334
2335 if (obj->tiling_mode != I915_TILING_NONE &&
2336 dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
2337 i915_gem_object_pin_pages(obj);
2338
2339 return 0;
2340
2341err_pages:
2342 sg_mark_end(sg);
2343 for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
2344 put_page(sg_page_iter_page(&sg_iter));
2345 sg_free_table(st);
2346 kfree(st);
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356 if (ret == -ENOSPC)
2357 ret = -ENOMEM;
2358
2359 return ret;
2360}
2361
2362
2363
2364
2365
2366
2367
2368
2369int
2370i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
2371{
2372 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2373 const struct drm_i915_gem_object_ops *ops = obj->ops;
2374 int ret;
2375
2376 if (obj->pages)
2377 return 0;
2378
2379 if (obj->madv != I915_MADV_WILLNEED) {
2380 DRM_DEBUG("Attempting to obtain a purgeable object\n");
2381 return -EFAULT;
2382 }
2383
2384 BUG_ON(obj->pages_pin_count);
2385
2386 ret = ops->get_pages(obj);
2387 if (ret)
2388 return ret;
2389
2390 list_add_tail(&obj->global_list, &dev_priv->mm.unbound_list);
2391
2392 obj->get_page.sg = obj->pages->sgl;
2393 obj->get_page.last = 0;
2394
2395 return 0;
2396}
2397
2398void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj)
2399{
2400 int ret;
2401
2402 lockdep_assert_held(&obj->base.dev->struct_mutex);
2403
2404 ret = i915_gem_object_get_pages(obj);
2405 if (ret)
2406 return ERR_PTR(ret);
2407
2408 i915_gem_object_pin_pages(obj);
2409
2410 if (obj->mapping == NULL) {
2411 struct page **pages;
2412
2413 pages = NULL;
2414 if (obj->base.size == PAGE_SIZE)
2415 obj->mapping = kmap(sg_page(obj->pages->sgl));
2416 else
2417 pages = drm_malloc_gfp(obj->base.size >> PAGE_SHIFT,
2418 sizeof(*pages),
2419 GFP_TEMPORARY);
2420 if (pages != NULL) {
2421 struct sg_page_iter sg_iter;
2422 int n;
2423
2424 n = 0;
2425 for_each_sg_page(obj->pages->sgl, &sg_iter,
2426 obj->pages->nents, 0)
2427 pages[n++] = sg_page_iter_page(&sg_iter);
2428
2429 obj->mapping = vmap(pages, n, 0, PAGE_KERNEL);
2430 drm_free_large(pages);
2431 }
2432 if (obj->mapping == NULL) {
2433 i915_gem_object_unpin_pages(obj);
2434 return ERR_PTR(-ENOMEM);
2435 }
2436 }
2437
2438 return obj->mapping;
2439}
2440
2441void i915_vma_move_to_active(struct i915_vma *vma,
2442 struct drm_i915_gem_request *req)
2443{
2444 struct drm_i915_gem_object *obj = vma->obj;
2445 struct intel_engine_cs *engine;
2446
2447 engine = i915_gem_request_get_engine(req);
2448
2449
2450 if (obj->active == 0)
2451 drm_gem_object_reference(&obj->base);
2452 obj->active |= intel_engine_flag(engine);
2453
2454 list_move_tail(&obj->engine_list[engine->id], &engine->active_list);
2455 i915_gem_request_assign(&obj->last_read_req[engine->id], req);
2456
2457 list_move_tail(&vma->vm_link, &vma->vm->active_list);
2458}
2459
2460static void
2461i915_gem_object_retire__write(struct drm_i915_gem_object *obj)
2462{
2463 GEM_BUG_ON(obj->last_write_req == NULL);
2464 GEM_BUG_ON(!(obj->active & intel_engine_flag(obj->last_write_req->engine)));
2465
2466 i915_gem_request_assign(&obj->last_write_req, NULL);
2467 intel_fb_obj_flush(obj, true, ORIGIN_CS);
2468}
2469
2470static void
2471i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int ring)
2472{
2473 struct i915_vma *vma;
2474
2475 GEM_BUG_ON(obj->last_read_req[ring] == NULL);
2476 GEM_BUG_ON(!(obj->active & (1 << ring)));
2477
2478 list_del_init(&obj->engine_list[ring]);
2479 i915_gem_request_assign(&obj->last_read_req[ring], NULL);
2480
2481 if (obj->last_write_req && obj->last_write_req->engine->id == ring)
2482 i915_gem_object_retire__write(obj);
2483
2484 obj->active &= ~(1 << ring);
2485 if (obj->active)
2486 return;
2487
2488
2489
2490
2491
2492 list_move_tail(&obj->global_list,
2493 &to_i915(obj->base.dev)->mm.bound_list);
2494
2495 list_for_each_entry(vma, &obj->vma_list, obj_link) {
2496 if (!list_empty(&vma->vm_link))
2497 list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
2498 }
2499
2500 i915_gem_request_assign(&obj->last_fenced_req, NULL);
2501 drm_gem_object_unreference(&obj->base);
2502}
2503
2504static int
2505i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
2506{
2507 struct drm_i915_private *dev_priv = dev->dev_private;
2508 struct intel_engine_cs *engine;
2509 int ret;
2510
2511
2512 for_each_engine(engine, dev_priv) {
2513 ret = intel_engine_idle(engine);
2514 if (ret)
2515 return ret;
2516 }
2517 i915_gem_retire_requests(dev);
2518
2519
2520 for_each_engine(engine, dev_priv)
2521 intel_ring_init_seqno(engine, seqno);
2522
2523 return 0;
2524}
2525
2526int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
2527{
2528 struct drm_i915_private *dev_priv = dev->dev_private;
2529 int ret;
2530
2531 if (seqno == 0)
2532 return -EINVAL;
2533
2534
2535
2536
2537 ret = i915_gem_init_seqno(dev, seqno - 1);
2538 if (ret)
2539 return ret;
2540
2541
2542
2543
2544 dev_priv->next_seqno = seqno;
2545 dev_priv->last_seqno = seqno - 1;
2546 if (dev_priv->last_seqno == 0)
2547 dev_priv->last_seqno--;
2548
2549 return 0;
2550}
2551
2552int
2553i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
2554{
2555 struct drm_i915_private *dev_priv = dev->dev_private;
2556
2557
2558 if (dev_priv->next_seqno == 0) {
2559 int ret = i915_gem_init_seqno(dev, 0);
2560 if (ret)
2561 return ret;
2562
2563 dev_priv->next_seqno = 1;
2564 }
2565
2566 *seqno = dev_priv->last_seqno = dev_priv->next_seqno++;
2567 return 0;
2568}
2569
2570
2571
2572
2573
2574
2575void __i915_add_request(struct drm_i915_gem_request *request,
2576 struct drm_i915_gem_object *obj,
2577 bool flush_caches)
2578{
2579 struct intel_engine_cs *engine;
2580 struct drm_i915_private *dev_priv;
2581 struct intel_ringbuffer *ringbuf;
2582 u32 request_start;
2583 int ret;
2584
2585 if (WARN_ON(request == NULL))
2586 return;
2587
2588 engine = request->engine;
2589 dev_priv = request->i915;
2590 ringbuf = request->ringbuf;
2591
2592
2593
2594
2595
2596
2597 intel_ring_reserved_space_use(ringbuf);
2598
2599 request_start = intel_ring_get_tail(ringbuf);
2600
2601
2602
2603
2604
2605
2606
2607 if (flush_caches) {
2608 if (i915.enable_execlists)
2609 ret = logical_ring_flush_all_caches(request);
2610 else
2611 ret = intel_ring_flush_all_caches(request);
2612
2613 WARN(ret, "*_ring_flush_all_caches failed: %d!\n", ret);
2614 }
2615
2616 trace_i915_gem_request_add(request);
2617
2618 request->head = request_start;
2619
2620
2621
2622
2623
2624
2625
2626 request->batch_obj = obj;
2627
2628
2629
2630
2631
2632
2633 request->emitted_jiffies = jiffies;
2634 request->previous_seqno = engine->last_submitted_seqno;
2635 smp_store_mb(engine->last_submitted_seqno, request->seqno);
2636 list_add_tail(&request->list, &engine->request_list);
2637
2638
2639
2640
2641
2642
2643 request->postfix = intel_ring_get_tail(ringbuf);
2644
2645 if (i915.enable_execlists)
2646 ret = engine->emit_request(request);
2647 else {
2648 ret = engine->add_request(request);
2649
2650 request->tail = intel_ring_get_tail(ringbuf);
2651 }
2652
2653 WARN(ret, "emit|add_request failed: %d!\n", ret);
2654
2655 i915_queue_hangcheck(engine->dev);
2656
2657 queue_delayed_work(dev_priv->wq,
2658 &dev_priv->mm.retire_work,
2659 round_jiffies_up_relative(HZ));
2660 intel_mark_busy(dev_priv->dev);
2661
2662
2663 intel_ring_reserved_space_end(ringbuf);
2664}
2665
2666static bool i915_context_is_banned(struct drm_i915_private *dev_priv,
2667 const struct intel_context *ctx)
2668{
2669 unsigned long elapsed;
2670
2671 elapsed = get_seconds() - ctx->hang_stats.guilty_ts;
2672
2673 if (ctx->hang_stats.banned)
2674 return true;
2675
2676 if (ctx->hang_stats.ban_period_seconds &&
2677 elapsed <= ctx->hang_stats.ban_period_seconds) {
2678 if (!i915_gem_context_is_default(ctx)) {
2679 DRM_DEBUG("context hanging too fast, banning!\n");
2680 return true;
2681 } else if (i915_stop_ring_allow_ban(dev_priv)) {
2682 if (i915_stop_ring_allow_warn(dev_priv))
2683 DRM_ERROR("gpu hanging too fast, banning!\n");
2684 return true;
2685 }
2686 }
2687
2688 return false;
2689}
2690
2691static void i915_set_reset_status(struct drm_i915_private *dev_priv,
2692 struct intel_context *ctx,
2693 const bool guilty)
2694{
2695 struct i915_ctx_hang_stats *hs;
2696
2697 if (WARN_ON(!ctx))
2698 return;
2699
2700 hs = &ctx->hang_stats;
2701
2702 if (guilty) {
2703 hs->banned = i915_context_is_banned(dev_priv, ctx);
2704 hs->batch_active++;
2705 hs->guilty_ts = get_seconds();
2706 } else {
2707 hs->batch_pending++;
2708 }
2709}
2710
2711void i915_gem_request_free(struct kref *req_ref)
2712{
2713 struct drm_i915_gem_request *req = container_of(req_ref,
2714 typeof(*req), ref);
2715 struct intel_context *ctx = req->ctx;
2716
2717 if (req->file_priv)
2718 i915_gem_request_remove_from_client(req);
2719
2720 if (ctx) {
2721 if (i915.enable_execlists && ctx != req->i915->kernel_context)
2722 intel_lr_context_unpin(ctx, req->engine);
2723
2724 i915_gem_context_unreference(ctx);
2725 }
2726
2727 kmem_cache_free(req->i915->requests, req);
2728}
2729
2730static inline int
2731__i915_gem_request_alloc(struct intel_engine_cs *engine,
2732 struct intel_context *ctx,
2733 struct drm_i915_gem_request **req_out)
2734{
2735 struct drm_i915_private *dev_priv = to_i915(engine->dev);
2736 unsigned reset_counter = i915_reset_counter(&dev_priv->gpu_error);
2737 struct drm_i915_gem_request *req;
2738 int ret;
2739
2740 if (!req_out)
2741 return -EINVAL;
2742
2743 *req_out = NULL;
2744
2745
2746
2747
2748
2749 ret = i915_gem_check_wedge(reset_counter, dev_priv->mm.interruptible);
2750 if (ret)
2751 return ret;
2752
2753 req = kmem_cache_zalloc(dev_priv->requests, GFP_KERNEL);
2754 if (req == NULL)
2755 return -ENOMEM;
2756
2757 ret = i915_gem_get_seqno(engine->dev, &req->seqno);
2758 if (ret)
2759 goto err;
2760
2761 kref_init(&req->ref);
2762 req->i915 = dev_priv;
2763 req->engine = engine;
2764 req->reset_counter = reset_counter;
2765 req->ctx = ctx;
2766 i915_gem_context_reference(req->ctx);
2767
2768 if (i915.enable_execlists)
2769 ret = intel_logical_ring_alloc_request_extras(req);
2770 else
2771 ret = intel_ring_alloc_request_extras(req);
2772 if (ret) {
2773 i915_gem_context_unreference(req->ctx);
2774 goto err;
2775 }
2776
2777
2778
2779
2780
2781
2782
2783
2784 if (i915.enable_execlists)
2785 ret = intel_logical_ring_reserve_space(req);
2786 else
2787 ret = intel_ring_reserve_space(req);
2788 if (ret) {
2789
2790
2791
2792
2793
2794 intel_ring_reserved_space_cancel(req->ringbuf);
2795 i915_gem_request_unreference(req);
2796 return ret;
2797 }
2798
2799 *req_out = req;
2800 return 0;
2801
2802err:
2803 kmem_cache_free(dev_priv->requests, req);
2804 return ret;
2805}
2806
2807
2808
2809
2810
2811
2812
2813
2814
2815
2816
2817
2818
2819struct drm_i915_gem_request *
2820i915_gem_request_alloc(struct intel_engine_cs *engine,
2821 struct intel_context *ctx)
2822{
2823 struct drm_i915_gem_request *req;
2824 int err;
2825
2826 if (ctx == NULL)
2827 ctx = to_i915(engine->dev)->kernel_context;
2828 err = __i915_gem_request_alloc(engine, ctx, &req);
2829 return err ? ERR_PTR(err) : req;
2830}
2831
2832struct drm_i915_gem_request *
2833i915_gem_find_active_request(struct intel_engine_cs *engine)
2834{
2835 struct drm_i915_gem_request *request;
2836
2837 list_for_each_entry(request, &engine->request_list, list) {
2838 if (i915_gem_request_completed(request, false))
2839 continue;
2840
2841 return request;
2842 }
2843
2844 return NULL;
2845}
2846
2847static void i915_gem_reset_engine_status(struct drm_i915_private *dev_priv,
2848 struct intel_engine_cs *engine)
2849{
2850 struct drm_i915_gem_request *request;
2851 bool ring_hung;
2852
2853 request = i915_gem_find_active_request(engine);
2854
2855 if (request == NULL)
2856 return;
2857
2858 ring_hung = engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG;
2859
2860 i915_set_reset_status(dev_priv, request->ctx, ring_hung);
2861
2862 list_for_each_entry_continue(request, &engine->request_list, list)
2863 i915_set_reset_status(dev_priv, request->ctx, false);
2864}
2865
2866static void i915_gem_reset_engine_cleanup(struct drm_i915_private *dev_priv,
2867 struct intel_engine_cs *engine)
2868{
2869 struct intel_ringbuffer *buffer;
2870
2871 while (!list_empty(&engine->active_list)) {
2872 struct drm_i915_gem_object *obj;
2873
2874 obj = list_first_entry(&engine->active_list,
2875 struct drm_i915_gem_object,
2876 engine_list[engine->id]);
2877
2878 i915_gem_object_retire__read(obj, engine->id);
2879 }
2880
2881
2882
2883
2884
2885
2886
2887 if (i915.enable_execlists) {
2888
2889 tasklet_kill(&engine->irq_tasklet);
2890
2891 spin_lock_bh(&engine->execlist_lock);
2892
2893 list_splice_tail_init(&engine->execlist_queue,
2894 &engine->execlist_retired_req_list);
2895 spin_unlock_bh(&engine->execlist_lock);
2896
2897 intel_execlists_retire_requests(engine);
2898 }
2899
2900
2901
2902
2903
2904
2905
2906
2907 while (!list_empty(&engine->request_list)) {
2908 struct drm_i915_gem_request *request;
2909
2910 request = list_first_entry(&engine->request_list,
2911 struct drm_i915_gem_request,
2912 list);
2913
2914 i915_gem_request_retire(request);
2915 }
2916
2917
2918
2919
2920
2921
2922
2923
2924 list_for_each_entry(buffer, &engine->buffers, link) {
2925 buffer->last_retired_head = buffer->tail;
2926 intel_ring_update_space(buffer);
2927 }
2928
2929 intel_ring_init_seqno(engine, engine->last_submitted_seqno);
2930}
2931
2932void i915_gem_reset(struct drm_device *dev)
2933{
2934 struct drm_i915_private *dev_priv = dev->dev_private;
2935 struct intel_engine_cs *engine;
2936
2937
2938
2939
2940
2941
2942 for_each_engine(engine, dev_priv)
2943 i915_gem_reset_engine_status(dev_priv, engine);
2944
2945 for_each_engine(engine, dev_priv)
2946 i915_gem_reset_engine_cleanup(dev_priv, engine);
2947
2948 i915_gem_context_reset(dev);
2949
2950 i915_gem_restore_fences(dev);
2951
2952 WARN_ON(i915_verify_lists(dev));
2953}
2954
2955
2956
2957
2958void
2959i915_gem_retire_requests_ring(struct intel_engine_cs *engine)
2960{
2961 WARN_ON(i915_verify_lists(engine->dev));
2962
2963
2964
2965
2966
2967
2968 while (!list_empty(&engine->request_list)) {
2969 struct drm_i915_gem_request *request;
2970
2971 request = list_first_entry(&engine->request_list,
2972 struct drm_i915_gem_request,
2973 list);
2974
2975 if (!i915_gem_request_completed(request, true))
2976 break;
2977
2978 i915_gem_request_retire(request);
2979 }
2980
2981
2982
2983
2984
2985 while (!list_empty(&engine->active_list)) {
2986 struct drm_i915_gem_object *obj;
2987
2988 obj = list_first_entry(&engine->active_list,
2989 struct drm_i915_gem_object,
2990 engine_list[engine->id]);
2991
2992 if (!list_empty(&obj->last_read_req[engine->id]->list))
2993 break;
2994
2995 i915_gem_object_retire__read(obj, engine->id);
2996 }
2997
2998 if (unlikely(engine->trace_irq_req &&
2999 i915_gem_request_completed(engine->trace_irq_req, true))) {
3000 engine->irq_put(engine);
3001 i915_gem_request_assign(&engine->trace_irq_req, NULL);
3002 }
3003
3004 WARN_ON(i915_verify_lists(engine->dev));
3005}
3006
3007bool
3008i915_gem_retire_requests(struct drm_device *dev)
3009{
3010 struct drm_i915_private *dev_priv = dev->dev_private;
3011 struct intel_engine_cs *engine;
3012 bool idle = true;
3013
3014 for_each_engine(engine, dev_priv) {
3015 i915_gem_retire_requests_ring(engine);
3016 idle &= list_empty(&engine->request_list);
3017 if (i915.enable_execlists) {
3018 spin_lock_bh(&engine->execlist_lock);
3019 idle &= list_empty(&engine->execlist_queue);
3020 spin_unlock_bh(&engine->execlist_lock);
3021
3022 intel_execlists_retire_requests(engine);
3023 }
3024 }
3025
3026 if (idle)
3027 mod_delayed_work(dev_priv->wq,
3028 &dev_priv->mm.idle_work,
3029 msecs_to_jiffies(100));
3030
3031 return idle;
3032}
3033
3034static void
3035i915_gem_retire_work_handler(struct work_struct *work)
3036{
3037 struct drm_i915_private *dev_priv =
3038 container_of(work, typeof(*dev_priv), mm.retire_work.work);
3039 struct drm_device *dev = dev_priv->dev;
3040 bool idle;
3041
3042
3043 idle = false;
3044 if (mutex_trylock(&dev->struct_mutex)) {
3045 idle = i915_gem_retire_requests(dev);
3046 mutex_unlock(&dev->struct_mutex);
3047 }
3048 if (!idle)
3049 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
3050 round_jiffies_up_relative(HZ));
3051}
3052
3053static void
3054i915_gem_idle_work_handler(struct work_struct *work)
3055{
3056 struct drm_i915_private *dev_priv =
3057 container_of(work, typeof(*dev_priv), mm.idle_work.work);
3058 struct drm_device *dev = dev_priv->dev;
3059 struct intel_engine_cs *engine;
3060
3061 for_each_engine(engine, dev_priv)
3062 if (!list_empty(&engine->request_list))
3063 return;
3064
3065
3066
3067
3068
3069 intel_mark_idle(dev);
3070
3071 if (mutex_trylock(&dev->struct_mutex)) {
3072 for_each_engine(engine, dev_priv)
3073 i915_gem_batch_pool_fini(&engine->batch_pool);
3074
3075 mutex_unlock(&dev->struct_mutex);
3076 }
3077}
3078
3079
3080
3081
3082
3083
3084static int
3085i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
3086{
3087 int i;
3088
3089 if (!obj->active)
3090 return 0;
3091
3092 for (i = 0; i < I915_NUM_ENGINES; i++) {
3093 struct drm_i915_gem_request *req;
3094
3095 req = obj->last_read_req[i];
3096 if (req == NULL)
3097 continue;
3098
3099 if (list_empty(&req->list))
3100 goto retire;
3101
3102 if (i915_gem_request_completed(req, true)) {
3103 __i915_gem_request_retire__upto(req);
3104retire:
3105 i915_gem_object_retire__read(obj, i);
3106 }
3107 }
3108
3109 return 0;
3110}
3111
3112
3113
3114
3115
3116
3117
3118
3119
3120
3121
3122
3123
3124
3125
3126
3127
3128
3129
3130
3131
3132
3133
3134int
3135i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
3136{
3137 struct drm_i915_gem_wait *args = data;
3138 struct drm_i915_gem_object *obj;
3139 struct drm_i915_gem_request *req[I915_NUM_ENGINES];
3140 int i, n = 0;
3141 int ret;
3142
3143 if (args->flags != 0)
3144 return -EINVAL;
3145
3146 ret = i915_mutex_lock_interruptible(dev);
3147 if (ret)
3148 return ret;
3149
3150 obj = to_intel_bo(drm_gem_object_lookup(file, args->bo_handle));
3151 if (&obj->base == NULL) {
3152 mutex_unlock(&dev->struct_mutex);
3153 return -ENOENT;
3154 }
3155
3156
3157 ret = i915_gem_object_flush_active(obj);
3158 if (ret)
3159 goto out;
3160
3161 if (!obj->active)
3162 goto out;
3163
3164
3165
3166
3167 if (args->timeout_ns == 0) {
3168 ret = -ETIME;
3169 goto out;
3170 }
3171
3172 drm_gem_object_unreference(&obj->base);
3173
3174 for (i = 0; i < I915_NUM_ENGINES; i++) {
3175 if (obj->last_read_req[i] == NULL)
3176 continue;
3177
3178 req[n++] = i915_gem_request_reference(obj->last_read_req[i]);
3179 }
3180
3181 mutex_unlock(&dev->struct_mutex);
3182
3183 for (i = 0; i < n; i++) {
3184 if (ret == 0)
3185 ret = __i915_wait_request(req[i], true,
3186 args->timeout_ns > 0 ? &args->timeout_ns : NULL,
3187 to_rps_client(file));
3188 i915_gem_request_unreference__unlocked(req[i]);
3189 }
3190 return ret;
3191
3192out:
3193 drm_gem_object_unreference(&obj->base);
3194 mutex_unlock(&dev->struct_mutex);
3195 return ret;
3196}
3197
3198static int
3199__i915_gem_object_sync(struct drm_i915_gem_object *obj,
3200 struct intel_engine_cs *to,
3201 struct drm_i915_gem_request *from_req,
3202 struct drm_i915_gem_request **to_req)
3203{
3204 struct intel_engine_cs *from;
3205 int ret;
3206
3207 from = i915_gem_request_get_engine(from_req);
3208 if (to == from)
3209 return 0;
3210
3211 if (i915_gem_request_completed(from_req, true))
3212 return 0;
3213
3214 if (!i915_semaphore_is_enabled(obj->base.dev)) {
3215 struct drm_i915_private *i915 = to_i915(obj->base.dev);
3216 ret = __i915_wait_request(from_req,
3217 i915->mm.interruptible,
3218 NULL,
3219 &i915->rps.semaphores);
3220 if (ret)
3221 return ret;
3222
3223 i915_gem_object_retire_request(obj, from_req);
3224 } else {
3225 int idx = intel_ring_sync_index(from, to);
3226 u32 seqno = i915_gem_request_get_seqno(from_req);
3227
3228 WARN_ON(!to_req);
3229
3230 if (seqno <= from->semaphore.sync_seqno[idx])
3231 return 0;
3232
3233 if (*to_req == NULL) {
3234 struct drm_i915_gem_request *req;
3235
3236 req = i915_gem_request_alloc(to, NULL);
3237 if (IS_ERR(req))
3238 return PTR_ERR(req);
3239
3240 *to_req = req;
3241 }
3242
3243 trace_i915_gem_ring_sync_to(*to_req, from, from_req);
3244 ret = to->semaphore.sync_to(*to_req, from, seqno);
3245 if (ret)
3246 return ret;
3247
3248
3249
3250
3251
3252 from->semaphore.sync_seqno[idx] =
3253 i915_gem_request_get_seqno(obj->last_read_req[from->id]);
3254 }
3255
3256 return 0;
3257}
3258
3259
3260
3261
3262
3263
3264
3265
3266
3267
3268
3269
3270
3271
3272
3273
3274
3275
3276
3277
3278
3279
3280
3281
3282
3283
3284
3285
3286
3287
3288
3289
3290
3291
3292
3293
3294int
3295i915_gem_object_sync(struct drm_i915_gem_object *obj,
3296 struct intel_engine_cs *to,
3297 struct drm_i915_gem_request **to_req)
3298{
3299 const bool readonly = obj->base.pending_write_domain == 0;
3300 struct drm_i915_gem_request *req[I915_NUM_ENGINES];
3301 int ret, i, n;
3302
3303 if (!obj->active)
3304 return 0;
3305
3306 if (to == NULL)
3307 return i915_gem_object_wait_rendering(obj, readonly);
3308
3309 n = 0;
3310 if (readonly) {
3311 if (obj->last_write_req)
3312 req[n++] = obj->last_write_req;
3313 } else {
3314 for (i = 0; i < I915_NUM_ENGINES; i++)
3315 if (obj->last_read_req[i])
3316 req[n++] = obj->last_read_req[i];
3317 }
3318 for (i = 0; i < n; i++) {
3319 ret = __i915_gem_object_sync(obj, to, req[i], to_req);
3320 if (ret)
3321 return ret;
3322 }
3323
3324 return 0;
3325}
3326
3327static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
3328{
3329 u32 old_write_domain, old_read_domains;
3330
3331
3332 i915_gem_release_mmap(obj);
3333
3334 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
3335 return;
3336
3337 old_read_domains = obj->base.read_domains;
3338 old_write_domain = obj->base.write_domain;
3339
3340 obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT;
3341 obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT;
3342
3343 trace_i915_gem_object_change_domain(obj,
3344 old_read_domains,
3345 old_write_domain);
3346}
3347
3348static int __i915_vma_unbind(struct i915_vma *vma, bool wait)
3349{
3350 struct drm_i915_gem_object *obj = vma->obj;
3351 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
3352 int ret;
3353
3354 if (list_empty(&vma->obj_link))
3355 return 0;
3356
3357 if (!drm_mm_node_allocated(&vma->node)) {
3358 i915_gem_vma_destroy(vma);
3359 return 0;
3360 }
3361
3362 if (vma->pin_count)
3363 return -EBUSY;
3364
3365 BUG_ON(obj->pages == NULL);
3366
3367 if (wait) {
3368 ret = i915_gem_object_wait_rendering(obj, false);
3369 if (ret)
3370 return ret;
3371 }
3372
3373 if (vma->is_ggtt && vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) {
3374 i915_gem_object_finish_gtt(obj);
3375
3376
3377 ret = i915_gem_object_put_fence(obj);
3378 if (ret)
3379 return ret;
3380 }
3381
3382 trace_i915_vma_unbind(vma);
3383
3384 vma->vm->unbind_vma(vma);
3385 vma->bound = 0;
3386
3387 list_del_init(&vma->vm_link);
3388 if (vma->is_ggtt) {
3389 if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) {
3390 obj->map_and_fenceable = false;
3391 } else if (vma->ggtt_view.pages) {
3392 sg_free_table(vma->ggtt_view.pages);
3393 kfree(vma->ggtt_view.pages);
3394 }
3395 vma->ggtt_view.pages = NULL;
3396 }
3397
3398 drm_mm_remove_node(&vma->node);
3399 i915_gem_vma_destroy(vma);
3400
3401
3402
3403 if (list_empty(&obj->vma_list))
3404 list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
3405
3406
3407
3408
3409
3410 i915_gem_object_unpin_pages(obj);
3411
3412 return 0;
3413}
3414
3415int i915_vma_unbind(struct i915_vma *vma)
3416{
3417 return __i915_vma_unbind(vma, true);
3418}
3419
3420int __i915_vma_unbind_no_wait(struct i915_vma *vma)
3421{
3422 return __i915_vma_unbind(vma, false);
3423}
3424
3425int i915_gpu_idle(struct drm_device *dev)
3426{
3427 struct drm_i915_private *dev_priv = dev->dev_private;
3428 struct intel_engine_cs *engine;
3429 int ret;
3430
3431
3432 for_each_engine(engine, dev_priv) {
3433 if (!i915.enable_execlists) {
3434 struct drm_i915_gem_request *req;
3435
3436 req = i915_gem_request_alloc(engine, NULL);
3437 if (IS_ERR(req))
3438 return PTR_ERR(req);
3439
3440 ret = i915_switch_context(req);
3441 i915_add_request_no_flush(req);
3442 if (ret)
3443 return ret;
3444 }
3445
3446 ret = intel_engine_idle(engine);
3447 if (ret)
3448 return ret;
3449 }
3450
3451 WARN_ON(i915_verify_lists(dev));
3452 return 0;
3453}
3454
3455static bool i915_gem_valid_gtt_space(struct i915_vma *vma,
3456 unsigned long cache_level)
3457{
3458 struct drm_mm_node *gtt_space = &vma->node;
3459 struct drm_mm_node *other;
3460
3461
3462
3463
3464
3465
3466
3467
3468 if (vma->vm->mm.color_adjust == NULL)
3469 return true;
3470
3471 if (!drm_mm_node_allocated(gtt_space))
3472 return true;
3473
3474 if (list_empty(>t_space->node_list))
3475 return true;
3476
3477 other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list);
3478 if (other->allocated && !other->hole_follows && other->color != cache_level)
3479 return false;
3480
3481 other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list);
3482 if (other->allocated && !gtt_space->hole_follows && other->color != cache_level)
3483 return false;
3484
3485 return true;
3486}
3487
3488
3489
3490
3491
3492static struct i915_vma *
3493i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
3494 struct i915_address_space *vm,
3495 const struct i915_ggtt_view *ggtt_view,
3496 unsigned alignment,
3497 uint64_t flags)
3498{
3499 struct drm_device *dev = obj->base.dev;
3500 struct drm_i915_private *dev_priv = to_i915(dev);
3501 struct i915_ggtt *ggtt = &dev_priv->ggtt;
3502 u32 fence_alignment, unfenced_alignment;
3503 u32 search_flag, alloc_flag;
3504 u64 start, end;
3505 u64 size, fence_size;
3506 struct i915_vma *vma;
3507 int ret;
3508
3509 if (i915_is_ggtt(vm)) {
3510 u32 view_size;
3511
3512 if (WARN_ON(!ggtt_view))
3513 return ERR_PTR(-EINVAL);
3514
3515 view_size = i915_ggtt_view_size(obj, ggtt_view);
3516
3517 fence_size = i915_gem_get_gtt_size(dev,
3518 view_size,
3519 obj->tiling_mode);
3520 fence_alignment = i915_gem_get_gtt_alignment(dev,
3521 view_size,
3522 obj->tiling_mode,
3523 true);
3524 unfenced_alignment = i915_gem_get_gtt_alignment(dev,
3525 view_size,
3526 obj->tiling_mode,
3527 false);
3528 size = flags & PIN_MAPPABLE ? fence_size : view_size;
3529 } else {
3530 fence_size = i915_gem_get_gtt_size(dev,
3531 obj->base.size,
3532 obj->tiling_mode);
3533 fence_alignment = i915_gem_get_gtt_alignment(dev,
3534 obj->base.size,
3535 obj->tiling_mode,
3536 true);
3537 unfenced_alignment =
3538 i915_gem_get_gtt_alignment(dev,
3539 obj->base.size,
3540 obj->tiling_mode,
3541 false);
3542 size = flags & PIN_MAPPABLE ? fence_size : obj->base.size;
3543 }
3544
3545 start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
3546 end = vm->total;
3547 if (flags & PIN_MAPPABLE)
3548 end = min_t(u64, end, ggtt->mappable_end);
3549 if (flags & PIN_ZONE_4G)
3550 end = min_t(u64, end, (1ULL << 32) - PAGE_SIZE);
3551
3552 if (alignment == 0)
3553 alignment = flags & PIN_MAPPABLE ? fence_alignment :
3554 unfenced_alignment;
3555 if (flags & PIN_MAPPABLE && alignment & (fence_alignment - 1)) {
3556 DRM_DEBUG("Invalid object (view type=%u) alignment requested %u\n",
3557 ggtt_view ? ggtt_view->type : 0,
3558 alignment);
3559 return ERR_PTR(-EINVAL);
3560 }
3561
3562
3563
3564
3565
3566 if (size > end) {
3567 DRM_DEBUG("Attempting to bind an object (view type=%u) larger than the aperture: size=%llu > %s aperture=%llu\n",
3568 ggtt_view ? ggtt_view->type : 0,
3569 size,
3570 flags & PIN_MAPPABLE ? "mappable" : "total",
3571 end);
3572 return ERR_PTR(-E2BIG);
3573 }
3574
3575 ret = i915_gem_object_get_pages(obj);
3576 if (ret)
3577 return ERR_PTR(ret);
3578
3579 i915_gem_object_pin_pages(obj);
3580
3581 vma = ggtt_view ? i915_gem_obj_lookup_or_create_ggtt_vma(obj, ggtt_view) :
3582 i915_gem_obj_lookup_or_create_vma(obj, vm);
3583
3584 if (IS_ERR(vma))
3585 goto err_unpin;
3586
3587 if (flags & PIN_OFFSET_FIXED) {
3588 uint64_t offset = flags & PIN_OFFSET_MASK;
3589
3590 if (offset & (alignment - 1) || offset + size > end) {
3591 ret = -EINVAL;
3592 goto err_free_vma;
3593 }
3594 vma->node.start = offset;
3595 vma->node.size = size;
3596 vma->node.color = obj->cache_level;
3597 ret = drm_mm_reserve_node(&vm->mm, &vma->node);
3598 if (ret) {
3599 ret = i915_gem_evict_for_vma(vma);
3600 if (ret == 0)
3601 ret = drm_mm_reserve_node(&vm->mm, &vma->node);
3602 }
3603 if (ret)
3604 goto err_free_vma;
3605 } else {
3606 if (flags & PIN_HIGH) {
3607 search_flag = DRM_MM_SEARCH_BELOW;
3608 alloc_flag = DRM_MM_CREATE_TOP;
3609 } else {
3610 search_flag = DRM_MM_SEARCH_DEFAULT;
3611 alloc_flag = DRM_MM_CREATE_DEFAULT;
3612 }
3613
3614search_free:
3615 ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node,
3616 size, alignment,
3617 obj->cache_level,
3618 start, end,
3619 search_flag,
3620 alloc_flag);
3621 if (ret) {
3622 ret = i915_gem_evict_something(dev, vm, size, alignment,
3623 obj->cache_level,
3624 start, end,
3625 flags);
3626 if (ret == 0)
3627 goto search_free;
3628
3629 goto err_free_vma;
3630 }
3631 }
3632 if (WARN_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level))) {
3633 ret = -EINVAL;
3634 goto err_remove_node;
3635 }
3636
3637 trace_i915_vma_bind(vma, flags);
3638 ret = i915_vma_bind(vma, obj->cache_level, flags);
3639 if (ret)
3640 goto err_remove_node;
3641
3642 list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
3643 list_add_tail(&vma->vm_link, &vm->inactive_list);
3644
3645 return vma;
3646
3647err_remove_node:
3648 drm_mm_remove_node(&vma->node);
3649err_free_vma:
3650 i915_gem_vma_destroy(vma);
3651 vma = ERR_PTR(ret);
3652err_unpin:
3653 i915_gem_object_unpin_pages(obj);
3654 return vma;
3655}
3656
3657bool
3658i915_gem_clflush_object(struct drm_i915_gem_object *obj,
3659 bool force)
3660{
3661
3662
3663
3664
3665 if (obj->pages == NULL)
3666 return false;
3667
3668
3669
3670
3671
3672 if (obj->stolen || obj->phys_handle)
3673 return false;
3674
3675
3676
3677
3678
3679
3680
3681
3682
3683 if (!force && cpu_cache_is_coherent(obj->base.dev, obj->cache_level)) {
3684 obj->cache_dirty = true;
3685 return false;
3686 }
3687
3688 trace_i915_gem_object_clflush(obj);
3689 drm_clflush_sg(obj->pages);
3690 obj->cache_dirty = false;
3691
3692 return true;
3693}
3694
3695
3696static void
3697i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
3698{
3699 uint32_t old_write_domain;
3700
3701 if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
3702 return;
3703
3704
3705
3706
3707
3708
3709
3710
3711
3712 wmb();
3713
3714 old_write_domain = obj->base.write_domain;
3715 obj->base.write_domain = 0;
3716
3717 intel_fb_obj_flush(obj, false, ORIGIN_GTT);
3718
3719 trace_i915_gem_object_change_domain(obj,
3720 obj->base.read_domains,
3721 old_write_domain);
3722}
3723
3724
3725static void
3726i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
3727{
3728 uint32_t old_write_domain;
3729
3730 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
3731 return;
3732
3733 if (i915_gem_clflush_object(obj, obj->pin_display))
3734 i915_gem_chipset_flush(obj->base.dev);
3735
3736 old_write_domain = obj->base.write_domain;
3737 obj->base.write_domain = 0;
3738
3739 intel_fb_obj_flush(obj, false, ORIGIN_CPU);
3740
3741 trace_i915_gem_object_change_domain(obj,
3742 obj->base.read_domains,
3743 old_write_domain);
3744}
3745
3746
3747
3748
3749
3750
3751
3752int
3753i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
3754{
3755 struct drm_device *dev = obj->base.dev;
3756 struct drm_i915_private *dev_priv = to_i915(dev);
3757 struct i915_ggtt *ggtt = &dev_priv->ggtt;
3758 uint32_t old_write_domain, old_read_domains;
3759 struct i915_vma *vma;
3760 int ret;
3761
3762 if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
3763 return 0;
3764
3765 ret = i915_gem_object_wait_rendering(obj, !write);
3766 if (ret)
3767 return ret;
3768
3769
3770
3771
3772
3773
3774
3775
3776
3777 ret = i915_gem_object_get_pages(obj);
3778 if (ret)
3779 return ret;
3780
3781 i915_gem_object_flush_cpu_write_domain(obj);
3782
3783
3784
3785
3786
3787 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
3788 mb();
3789
3790 old_write_domain = obj->base.write_domain;
3791 old_read_domains = obj->base.read_domains;
3792
3793
3794
3795
3796 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
3797 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3798 if (write) {
3799 obj->base.read_domains = I915_GEM_DOMAIN_GTT;
3800 obj->base.write_domain = I915_GEM_DOMAIN_GTT;
3801 obj->dirty = 1;
3802 }
3803
3804 trace_i915_gem_object_change_domain(obj,
3805 old_read_domains,
3806 old_write_domain);
3807
3808
3809 vma = i915_gem_obj_to_ggtt(obj);
3810 if (vma && drm_mm_node_allocated(&vma->node) && !obj->active)
3811 list_move_tail(&vma->vm_link,
3812 &ggtt->base.inactive_list);
3813
3814 return 0;
3815}
3816
3817
3818
3819
3820
3821
3822
3823
3824
3825
3826
3827
3828
3829
3830int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3831 enum i915_cache_level cache_level)
3832{
3833 struct drm_device *dev = obj->base.dev;
3834 struct i915_vma *vma, *next;
3835 bool bound = false;
3836 int ret = 0;
3837
3838 if (obj->cache_level == cache_level)
3839 goto out;
3840
3841
3842
3843
3844
3845
3846 list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link) {
3847 if (!drm_mm_node_allocated(&vma->node))
3848 continue;
3849
3850 if (vma->pin_count) {
3851 DRM_DEBUG("can not change the cache level of pinned objects\n");
3852 return -EBUSY;
3853 }
3854
3855 if (!i915_gem_valid_gtt_space(vma, cache_level)) {
3856 ret = i915_vma_unbind(vma);
3857 if (ret)
3858 return ret;
3859 } else
3860 bound = true;
3861 }
3862
3863
3864
3865
3866
3867
3868
3869
3870 if (bound) {
3871
3872
3873
3874
3875 ret = i915_gem_object_wait_rendering(obj, false);
3876 if (ret)
3877 return ret;
3878
3879 if (!HAS_LLC(dev) && cache_level != I915_CACHE_NONE) {
3880
3881
3882
3883
3884
3885
3886
3887 i915_gem_release_mmap(obj);
3888
3889
3890
3891
3892
3893
3894
3895
3896 ret = i915_gem_object_put_fence(obj);
3897 if (ret)
3898 return ret;
3899 } else {
3900
3901
3902
3903
3904
3905
3906
3907 }
3908
3909 list_for_each_entry(vma, &obj->vma_list, obj_link) {
3910 if (!drm_mm_node_allocated(&vma->node))
3911 continue;
3912
3913 ret = i915_vma_bind(vma, cache_level, PIN_UPDATE);
3914 if (ret)
3915 return ret;
3916 }
3917 }
3918
3919 list_for_each_entry(vma, &obj->vma_list, obj_link)
3920 vma->node.color = cache_level;
3921 obj->cache_level = cache_level;
3922
3923out:
3924
3925
3926
3927
3928 if (obj->cache_dirty &&
3929 obj->base.write_domain != I915_GEM_DOMAIN_CPU &&
3930 cpu_write_needs_clflush(obj)) {
3931 if (i915_gem_clflush_object(obj, true))
3932 i915_gem_chipset_flush(obj->base.dev);
3933 }
3934
3935 return 0;
3936}
3937
3938int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
3939 struct drm_file *file)
3940{
3941 struct drm_i915_gem_caching *args = data;
3942 struct drm_i915_gem_object *obj;
3943
3944 obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
3945 if (&obj->base == NULL)
3946 return -ENOENT;
3947
3948 switch (obj->cache_level) {
3949 case I915_CACHE_LLC:
3950 case I915_CACHE_L3_LLC:
3951 args->caching = I915_CACHING_CACHED;
3952 break;
3953
3954 case I915_CACHE_WT:
3955 args->caching = I915_CACHING_DISPLAY;
3956 break;
3957
3958 default:
3959 args->caching = I915_CACHING_NONE;
3960 break;
3961 }
3962
3963 drm_gem_object_unreference_unlocked(&obj->base);
3964 return 0;
3965}
3966
3967int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
3968 struct drm_file *file)
3969{
3970 struct drm_i915_private *dev_priv = dev->dev_private;
3971 struct drm_i915_gem_caching *args = data;
3972 struct drm_i915_gem_object *obj;
3973 enum i915_cache_level level;
3974 int ret;
3975
3976 switch (args->caching) {
3977 case I915_CACHING_NONE:
3978 level = I915_CACHE_NONE;
3979 break;
3980 case I915_CACHING_CACHED:
3981
3982
3983
3984
3985
3986
3987 if (!HAS_LLC(dev) && !HAS_SNOOP(dev))
3988 return -ENODEV;
3989
3990 level = I915_CACHE_LLC;
3991 break;
3992 case I915_CACHING_DISPLAY:
3993 level = HAS_WT(dev) ? I915_CACHE_WT : I915_CACHE_NONE;
3994 break;
3995 default:
3996 return -EINVAL;
3997 }
3998
3999 intel_runtime_pm_get(dev_priv);
4000
4001 ret = i915_mutex_lock_interruptible(dev);
4002 if (ret)
4003 goto rpm_put;
4004
4005 obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
4006 if (&obj->base == NULL) {
4007 ret = -ENOENT;
4008 goto unlock;
4009 }
4010
4011 ret = i915_gem_object_set_cache_level(obj, level);
4012
4013 drm_gem_object_unreference(&obj->base);
4014unlock:
4015 mutex_unlock(&dev->struct_mutex);
4016rpm_put:
4017 intel_runtime_pm_put(dev_priv);
4018
4019 return ret;
4020}
4021
4022
4023
4024
4025
4026
4027int
4028i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
4029 u32 alignment,
4030 const struct i915_ggtt_view *view)
4031{
4032 u32 old_read_domains, old_write_domain;
4033 int ret;
4034
4035
4036
4037
4038 obj->pin_display++;
4039
4040
4041
4042
4043
4044
4045
4046
4047
4048
4049 ret = i915_gem_object_set_cache_level(obj,
4050 HAS_WT(obj->base.dev) ? I915_CACHE_WT : I915_CACHE_NONE);
4051 if (ret)
4052 goto err_unpin_display;
4053
4054
4055
4056
4057
4058 ret = i915_gem_object_ggtt_pin(obj, view, alignment,
4059 view->type == I915_GGTT_VIEW_NORMAL ?
4060 PIN_MAPPABLE : 0);
4061 if (ret)
4062 goto err_unpin_display;
4063
4064 i915_gem_object_flush_cpu_write_domain(obj);
4065
4066 old_write_domain = obj->base.write_domain;
4067 old_read_domains = obj->base.read_domains;
4068
4069
4070
4071
4072 obj->base.write_domain = 0;
4073 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
4074
4075 trace_i915_gem_object_change_domain(obj,
4076 old_read_domains,
4077 old_write_domain);
4078
4079 return 0;
4080
4081err_unpin_display:
4082 obj->pin_display--;
4083 return ret;
4084}
4085
4086void
4087i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj,
4088 const struct i915_ggtt_view *view)
4089{
4090 if (WARN_ON(obj->pin_display == 0))
4091 return;
4092
4093 i915_gem_object_ggtt_unpin_view(obj, view);
4094
4095 obj->pin_display--;
4096}
4097
4098
4099
4100
4101
4102
4103
4104int
4105i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
4106{
4107 uint32_t old_write_domain, old_read_domains;
4108 int ret;
4109
4110 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
4111 return 0;
4112
4113 ret = i915_gem_object_wait_rendering(obj, !write);
4114 if (ret)
4115 return ret;
4116
4117 i915_gem_object_flush_gtt_write_domain(obj);
4118
4119 old_write_domain = obj->base.write_domain;
4120 old_read_domains = obj->base.read_domains;
4121
4122
4123 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
4124 i915_gem_clflush_object(obj, false);
4125
4126 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
4127 }
4128
4129
4130
4131
4132 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
4133
4134
4135
4136
4137 if (write) {
4138 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4139 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
4140 }
4141
4142 trace_i915_gem_object_change_domain(obj,
4143 old_read_domains,
4144 old_write_domain);
4145
4146 return 0;
4147}
4148
4149
4150
4151
4152
4153
4154
4155
4156
4157
4158
4159static int
4160i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
4161{
4162 struct drm_i915_private *dev_priv = dev->dev_private;
4163 struct drm_i915_file_private *file_priv = file->driver_priv;
4164 unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES;
4165 struct drm_i915_gem_request *request, *target = NULL;
4166 int ret;
4167
4168 ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
4169 if (ret)
4170 return ret;
4171
4172
4173 if (i915_terminally_wedged(&dev_priv->gpu_error))
4174 return -EIO;
4175
4176 spin_lock(&file_priv->mm.lock);
4177 list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
4178 if (time_after_eq(request->emitted_jiffies, recent_enough))
4179 break;
4180
4181
4182
4183
4184
4185 if (!request->emitted_jiffies)
4186 continue;
4187
4188 target = request;
4189 }
4190 if (target)
4191 i915_gem_request_reference(target);
4192 spin_unlock(&file_priv->mm.lock);
4193
4194 if (target == NULL)
4195 return 0;
4196
4197 ret = __i915_wait_request(target, true, NULL, NULL);
4198 if (ret == 0)
4199 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
4200
4201 i915_gem_request_unreference__unlocked(target);
4202
4203 return ret;
4204}
4205
4206static bool
4207i915_vma_misplaced(struct i915_vma *vma, uint32_t alignment, uint64_t flags)
4208{
4209 struct drm_i915_gem_object *obj = vma->obj;
4210
4211 if (alignment &&
4212 vma->node.start & (alignment - 1))
4213 return true;
4214
4215 if (flags & PIN_MAPPABLE && !obj->map_and_fenceable)
4216 return true;
4217
4218 if (flags & PIN_OFFSET_BIAS &&
4219 vma->node.start < (flags & PIN_OFFSET_MASK))
4220 return true;
4221
4222 if (flags & PIN_OFFSET_FIXED &&
4223 vma->node.start != (flags & PIN_OFFSET_MASK))
4224 return true;
4225
4226 return false;
4227}
4228
4229void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
4230{
4231 struct drm_i915_gem_object *obj = vma->obj;
4232 bool mappable, fenceable;
4233 u32 fence_size, fence_alignment;
4234
4235 fence_size = i915_gem_get_gtt_size(obj->base.dev,
4236 obj->base.size,
4237 obj->tiling_mode);
4238 fence_alignment = i915_gem_get_gtt_alignment(obj->base.dev,
4239 obj->base.size,
4240 obj->tiling_mode,
4241 true);
4242
4243 fenceable = (vma->node.size == fence_size &&
4244 (vma->node.start & (fence_alignment - 1)) == 0);
4245
4246 mappable = (vma->node.start + fence_size <=
4247 to_i915(obj->base.dev)->ggtt.mappable_end);
4248
4249 obj->map_and_fenceable = mappable && fenceable;
4250}
4251
4252static int
4253i915_gem_object_do_pin(struct drm_i915_gem_object *obj,
4254 struct i915_address_space *vm,
4255 const struct i915_ggtt_view *ggtt_view,
4256 uint32_t alignment,
4257 uint64_t flags)
4258{
4259 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
4260 struct i915_vma *vma;
4261 unsigned bound;
4262 int ret;
4263
4264 if (WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base))
4265 return -ENODEV;
4266
4267 if (WARN_ON(flags & (PIN_GLOBAL | PIN_MAPPABLE) && !i915_is_ggtt(vm)))
4268 return -EINVAL;
4269
4270 if (WARN_ON((flags & (PIN_MAPPABLE | PIN_GLOBAL)) == PIN_MAPPABLE))
4271 return -EINVAL;
4272
4273 if (WARN_ON(i915_is_ggtt(vm) != !!ggtt_view))
4274 return -EINVAL;
4275
4276 vma = ggtt_view ? i915_gem_obj_to_ggtt_view(obj, ggtt_view) :
4277 i915_gem_obj_to_vma(obj, vm);
4278
4279 if (vma) {
4280 if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
4281 return -EBUSY;
4282
4283 if (i915_vma_misplaced(vma, alignment, flags)) {
4284 WARN(vma->pin_count,
4285 "bo is already pinned in %s with incorrect alignment:"
4286 " offset=%08x %08x, req.alignment=%x, req.map_and_fenceable=%d,"
4287 " obj->map_and_fenceable=%d\n",
4288 ggtt_view ? "ggtt" : "ppgtt",
4289 upper_32_bits(vma->node.start),
4290 lower_32_bits(vma->node.start),
4291 alignment,
4292 !!(flags & PIN_MAPPABLE),
4293 obj->map_and_fenceable);
4294 ret = i915_vma_unbind(vma);
4295 if (ret)
4296 return ret;
4297
4298 vma = NULL;
4299 }
4300 }
4301
4302 bound = vma ? vma->bound : 0;
4303 if (vma == NULL || !drm_mm_node_allocated(&vma->node)) {
4304 vma = i915_gem_object_bind_to_vm(obj, vm, ggtt_view, alignment,
4305 flags);
4306 if (IS_ERR(vma))
4307 return PTR_ERR(vma);
4308 } else {
4309 ret = i915_vma_bind(vma, obj->cache_level, flags);
4310 if (ret)
4311 return ret;
4312 }
4313
4314 if (ggtt_view && ggtt_view->type == I915_GGTT_VIEW_NORMAL &&
4315 (bound ^ vma->bound) & GLOBAL_BIND) {
4316 __i915_vma_set_map_and_fenceable(vma);
4317 WARN_ON(flags & PIN_MAPPABLE && !obj->map_and_fenceable);
4318 }
4319
4320 vma->pin_count++;
4321 return 0;
4322}
4323
4324int
4325i915_gem_object_pin(struct drm_i915_gem_object *obj,
4326 struct i915_address_space *vm,
4327 uint32_t alignment,
4328 uint64_t flags)
4329{
4330 return i915_gem_object_do_pin(obj, vm,
4331 i915_is_ggtt(vm) ? &i915_ggtt_view_normal : NULL,
4332 alignment, flags);
4333}
4334
4335int
4336i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
4337 const struct i915_ggtt_view *view,
4338 uint32_t alignment,
4339 uint64_t flags)
4340{
4341 struct drm_device *dev = obj->base.dev;
4342 struct drm_i915_private *dev_priv = to_i915(dev);
4343 struct i915_ggtt *ggtt = &dev_priv->ggtt;
4344
4345 BUG_ON(!view);
4346
4347 return i915_gem_object_do_pin(obj, &ggtt->base, view,
4348 alignment, flags | PIN_GLOBAL);
4349}
4350
4351void
4352i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj,
4353 const struct i915_ggtt_view *view)
4354{
4355 struct i915_vma *vma = i915_gem_obj_to_ggtt_view(obj, view);
4356
4357 WARN_ON(vma->pin_count == 0);
4358 WARN_ON(!i915_gem_obj_ggtt_bound_view(obj, view));
4359
4360 --vma->pin_count;
4361}
4362
4363int
4364i915_gem_busy_ioctl(struct drm_device *dev, void *data,
4365 struct drm_file *file)
4366{
4367 struct drm_i915_gem_busy *args = data;
4368 struct drm_i915_gem_object *obj;
4369 int ret;
4370
4371 ret = i915_mutex_lock_interruptible(dev);
4372 if (ret)
4373 return ret;
4374
4375 obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
4376 if (&obj->base == NULL) {
4377 ret = -ENOENT;
4378 goto unlock;
4379 }
4380
4381
4382
4383
4384
4385
4386 ret = i915_gem_object_flush_active(obj);
4387 if (ret)
4388 goto unref;
4389
4390 args->busy = 0;
4391 if (obj->active) {
4392 int i;
4393
4394 for (i = 0; i < I915_NUM_ENGINES; i++) {
4395 struct drm_i915_gem_request *req;
4396
4397 req = obj->last_read_req[i];
4398 if (req)
4399 args->busy |= 1 << (16 + req->engine->exec_id);
4400 }
4401 if (obj->last_write_req)
4402 args->busy |= obj->last_write_req->engine->exec_id;
4403 }
4404
4405unref:
4406 drm_gem_object_unreference(&obj->base);
4407unlock:
4408 mutex_unlock(&dev->struct_mutex);
4409 return ret;
4410}
4411
4412int
4413i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
4414 struct drm_file *file_priv)
4415{
4416 return i915_gem_ring_throttle(dev, file_priv);
4417}
4418
4419int
4420i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
4421 struct drm_file *file_priv)
4422{
4423 struct drm_i915_private *dev_priv = dev->dev_private;
4424 struct drm_i915_gem_madvise *args = data;
4425 struct drm_i915_gem_object *obj;
4426 int ret;
4427
4428 switch (args->madv) {
4429 case I915_MADV_DONTNEED:
4430 case I915_MADV_WILLNEED:
4431 break;
4432 default:
4433 return -EINVAL;
4434 }
4435
4436 ret = i915_mutex_lock_interruptible(dev);
4437 if (ret)
4438 return ret;
4439
4440 obj = to_intel_bo(drm_gem_object_lookup(file_priv, args->handle));
4441 if (&obj->base == NULL) {
4442 ret = -ENOENT;
4443 goto unlock;
4444 }
4445
4446 if (i915_gem_obj_is_pinned(obj)) {
4447 ret = -EINVAL;
4448 goto out;
4449 }
4450
4451 if (obj->pages &&
4452 obj->tiling_mode != I915_TILING_NONE &&
4453 dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
4454 if (obj->madv == I915_MADV_WILLNEED)
4455 i915_gem_object_unpin_pages(obj);
4456 if (args->madv == I915_MADV_WILLNEED)
4457 i915_gem_object_pin_pages(obj);
4458 }
4459
4460 if (obj->madv != __I915_MADV_PURGED)
4461 obj->madv = args->madv;
4462
4463
4464 if (obj->madv == I915_MADV_DONTNEED && obj->pages == NULL)
4465 i915_gem_object_truncate(obj);
4466
4467 args->retained = obj->madv != __I915_MADV_PURGED;
4468
4469out:
4470 drm_gem_object_unreference(&obj->base);
4471unlock:
4472 mutex_unlock(&dev->struct_mutex);
4473 return ret;
4474}
4475
4476void i915_gem_object_init(struct drm_i915_gem_object *obj,
4477 const struct drm_i915_gem_object_ops *ops)
4478{
4479 int i;
4480
4481 INIT_LIST_HEAD(&obj->global_list);
4482 for (i = 0; i < I915_NUM_ENGINES; i++)
4483 INIT_LIST_HEAD(&obj->engine_list[i]);
4484 INIT_LIST_HEAD(&obj->obj_exec_link);
4485 INIT_LIST_HEAD(&obj->vma_list);
4486 INIT_LIST_HEAD(&obj->batch_pool_link);
4487
4488 obj->ops = ops;
4489
4490 obj->fence_reg = I915_FENCE_REG_NONE;
4491 obj->madv = I915_MADV_WILLNEED;
4492
4493 i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size);
4494}
4495
4496static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
4497 .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE,
4498 .get_pages = i915_gem_object_get_pages_gtt,
4499 .put_pages = i915_gem_object_put_pages_gtt,
4500};
4501
4502struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
4503 size_t size)
4504{
4505 struct drm_i915_gem_object *obj;
4506 struct address_space *mapping;
4507 gfp_t mask;
4508
4509 obj = i915_gem_object_alloc(dev);
4510 if (obj == NULL)
4511 return NULL;
4512
4513 if (drm_gem_object_init(dev, &obj->base, size) != 0) {
4514 i915_gem_object_free(obj);
4515 return NULL;
4516 }
4517
4518 mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
4519 if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) {
4520
4521 mask &= ~__GFP_HIGHMEM;
4522 mask |= __GFP_DMA32;
4523 }
4524
4525 mapping = file_inode(obj->base.filp)->i_mapping;
4526 mapping_set_gfp_mask(mapping, mask);
4527
4528 i915_gem_object_init(obj, &i915_gem_object_ops);
4529
4530 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
4531 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4532
4533 if (HAS_LLC(dev)) {
4534
4535
4536
4537
4538
4539
4540
4541
4542
4543
4544
4545
4546 obj->cache_level = I915_CACHE_LLC;
4547 } else
4548 obj->cache_level = I915_CACHE_NONE;
4549
4550 trace_i915_gem_object_create(obj);
4551
4552 return obj;
4553}
4554
4555static bool discard_backing_storage(struct drm_i915_gem_object *obj)
4556{
4557
4558
4559
4560
4561
4562
4563 if (obj->madv != I915_MADV_WILLNEED)
4564 return false;
4565
4566 if (obj->base.filp == NULL)
4567 return true;
4568
4569
4570
4571
4572
4573
4574
4575
4576 return atomic_long_read(&obj->base.filp->f_count) == 1;
4577}
4578
4579void i915_gem_free_object(struct drm_gem_object *gem_obj)
4580{
4581 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
4582 struct drm_device *dev = obj->base.dev;
4583 struct drm_i915_private *dev_priv = dev->dev_private;
4584 struct i915_vma *vma, *next;
4585
4586 intel_runtime_pm_get(dev_priv);
4587
4588 trace_i915_gem_object_destroy(obj);
4589
4590 list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link) {
4591 int ret;
4592
4593 vma->pin_count = 0;
4594 ret = i915_vma_unbind(vma);
4595 if (WARN_ON(ret == -ERESTARTSYS)) {
4596 bool was_interruptible;
4597
4598 was_interruptible = dev_priv->mm.interruptible;
4599 dev_priv->mm.interruptible = false;
4600
4601 WARN_ON(i915_vma_unbind(vma));
4602
4603 dev_priv->mm.interruptible = was_interruptible;
4604 }
4605 }
4606
4607
4608
4609 if (obj->stolen)
4610 i915_gem_object_unpin_pages(obj);
4611
4612 WARN_ON(obj->frontbuffer_bits);
4613
4614 if (obj->pages && obj->madv == I915_MADV_WILLNEED &&
4615 dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES &&
4616 obj->tiling_mode != I915_TILING_NONE)
4617 i915_gem_object_unpin_pages(obj);
4618
4619 if (WARN_ON(obj->pages_pin_count))
4620 obj->pages_pin_count = 0;
4621 if (discard_backing_storage(obj))
4622 obj->madv = I915_MADV_DONTNEED;
4623 i915_gem_object_put_pages(obj);
4624 i915_gem_object_free_mmap_offset(obj);
4625
4626 BUG_ON(obj->pages);
4627
4628 if (obj->base.import_attach)
4629 drm_prime_gem_destroy(&obj->base, NULL);
4630
4631 if (obj->ops->release)
4632 obj->ops->release(obj);
4633
4634 drm_gem_object_release(&obj->base);
4635 i915_gem_info_remove_obj(dev_priv, obj->base.size);
4636
4637 kfree(obj->bit_17);
4638 i915_gem_object_free(obj);
4639
4640 intel_runtime_pm_put(dev_priv);
4641}
4642
4643struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
4644 struct i915_address_space *vm)
4645{
4646 struct i915_vma *vma;
4647 list_for_each_entry(vma, &obj->vma_list, obj_link) {
4648 if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL &&
4649 vma->vm == vm)
4650 return vma;
4651 }
4652 return NULL;
4653}
4654
4655struct i915_vma *i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj,
4656 const struct i915_ggtt_view *view)
4657{
4658 struct drm_device *dev = obj->base.dev;
4659 struct drm_i915_private *dev_priv = to_i915(dev);
4660 struct i915_ggtt *ggtt = &dev_priv->ggtt;
4661 struct i915_vma *vma;
4662
4663 BUG_ON(!view);
4664
4665 list_for_each_entry(vma, &obj->vma_list, obj_link)
4666 if (vma->vm == &ggtt->base &&
4667 i915_ggtt_view_equal(&vma->ggtt_view, view))
4668 return vma;
4669 return NULL;
4670}
4671
4672void i915_gem_vma_destroy(struct i915_vma *vma)
4673{
4674 WARN_ON(vma->node.allocated);
4675
4676
4677 if (!list_empty(&vma->exec_list))
4678 return;
4679
4680 if (!vma->is_ggtt)
4681 i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm));
4682
4683 list_del(&vma->obj_link);
4684
4685 kmem_cache_free(to_i915(vma->obj->base.dev)->vmas, vma);
4686}
4687
4688static void
4689i915_gem_stop_engines(struct drm_device *dev)
4690{
4691 struct drm_i915_private *dev_priv = dev->dev_private;
4692 struct intel_engine_cs *engine;
4693
4694 for_each_engine(engine, dev_priv)
4695 dev_priv->gt.stop_engine(engine);
4696}
4697
4698int
4699i915_gem_suspend(struct drm_device *dev)
4700{
4701 struct drm_i915_private *dev_priv = dev->dev_private;
4702 int ret = 0;
4703
4704 mutex_lock(&dev->struct_mutex);
4705 ret = i915_gpu_idle(dev);
4706 if (ret)
4707 goto err;
4708
4709 i915_gem_retire_requests(dev);
4710
4711 i915_gem_stop_engines(dev);
4712 mutex_unlock(&dev->struct_mutex);
4713
4714 cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
4715 cancel_delayed_work_sync(&dev_priv->mm.retire_work);
4716 flush_delayed_work(&dev_priv->mm.idle_work);
4717
4718
4719
4720
4721 WARN_ON(dev_priv->mm.busy);
4722
4723 return 0;
4724
4725err:
4726 mutex_unlock(&dev->struct_mutex);
4727 return ret;
4728}
4729
4730int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice)
4731{
4732 struct intel_engine_cs *engine = req->engine;
4733 struct drm_device *dev = engine->dev;
4734 struct drm_i915_private *dev_priv = dev->dev_private;
4735 u32 *remap_info = dev_priv->l3_parity.remap_info[slice];
4736 int i, ret;
4737
4738 if (!HAS_L3_DPF(dev) || !remap_info)
4739 return 0;
4740
4741 ret = intel_ring_begin(req, GEN7_L3LOG_SIZE / 4 * 3);
4742 if (ret)
4743 return ret;
4744
4745
4746
4747
4748
4749
4750 for (i = 0; i < GEN7_L3LOG_SIZE / 4; i++) {
4751 intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
4752 intel_ring_emit_reg(engine, GEN7_L3LOG(slice, i));
4753 intel_ring_emit(engine, remap_info[i]);
4754 }
4755
4756 intel_ring_advance(engine);
4757
4758 return ret;
4759}
4760
4761void i915_gem_init_swizzling(struct drm_device *dev)
4762{
4763 struct drm_i915_private *dev_priv = dev->dev_private;
4764
4765 if (INTEL_INFO(dev)->gen < 5 ||
4766 dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
4767 return;
4768
4769 I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
4770 DISP_TILE_SURFACE_SWIZZLING);
4771
4772 if (IS_GEN5(dev))
4773 return;
4774
4775 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
4776 if (IS_GEN6(dev))
4777 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
4778 else if (IS_GEN7(dev))
4779 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
4780 else if (IS_GEN8(dev))
4781 I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
4782 else
4783 BUG();
4784}
4785
4786static void init_unused_ring(struct drm_device *dev, u32 base)
4787{
4788 struct drm_i915_private *dev_priv = dev->dev_private;
4789
4790 I915_WRITE(RING_CTL(base), 0);
4791 I915_WRITE(RING_HEAD(base), 0);
4792 I915_WRITE(RING_TAIL(base), 0);
4793 I915_WRITE(RING_START(base), 0);
4794}
4795
4796static void init_unused_rings(struct drm_device *dev)
4797{
4798 if (IS_I830(dev)) {
4799 init_unused_ring(dev, PRB1_BASE);
4800 init_unused_ring(dev, SRB0_BASE);
4801 init_unused_ring(dev, SRB1_BASE);
4802 init_unused_ring(dev, SRB2_BASE);
4803 init_unused_ring(dev, SRB3_BASE);
4804 } else if (IS_GEN2(dev)) {
4805 init_unused_ring(dev, SRB0_BASE);
4806 init_unused_ring(dev, SRB1_BASE);
4807 } else if (IS_GEN3(dev)) {
4808 init_unused_ring(dev, PRB1_BASE);
4809 init_unused_ring(dev, PRB2_BASE);
4810 }
4811}
4812
4813int i915_gem_init_engines(struct drm_device *dev)
4814{
4815 struct drm_i915_private *dev_priv = dev->dev_private;
4816 int ret;
4817
4818 ret = intel_init_render_ring_buffer(dev);
4819 if (ret)
4820 return ret;
4821
4822 if (HAS_BSD(dev)) {
4823 ret = intel_init_bsd_ring_buffer(dev);
4824 if (ret)
4825 goto cleanup_render_ring;
4826 }
4827
4828 if (HAS_BLT(dev)) {
4829 ret = intel_init_blt_ring_buffer(dev);
4830 if (ret)
4831 goto cleanup_bsd_ring;
4832 }
4833
4834 if (HAS_VEBOX(dev)) {
4835 ret = intel_init_vebox_ring_buffer(dev);
4836 if (ret)
4837 goto cleanup_blt_ring;
4838 }
4839
4840 if (HAS_BSD2(dev)) {
4841 ret = intel_init_bsd2_ring_buffer(dev);
4842 if (ret)
4843 goto cleanup_vebox_ring;
4844 }
4845
4846 return 0;
4847
4848cleanup_vebox_ring:
4849 intel_cleanup_engine(&dev_priv->engine[VECS]);
4850cleanup_blt_ring:
4851 intel_cleanup_engine(&dev_priv->engine[BCS]);
4852cleanup_bsd_ring:
4853 intel_cleanup_engine(&dev_priv->engine[VCS]);
4854cleanup_render_ring:
4855 intel_cleanup_engine(&dev_priv->engine[RCS]);
4856
4857 return ret;
4858}
4859
4860int
4861i915_gem_init_hw(struct drm_device *dev)
4862{
4863 struct drm_i915_private *dev_priv = dev->dev_private;
4864 struct intel_engine_cs *engine;
4865 int ret, j;
4866
4867
4868 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
4869
4870 if (HAS_EDRAM(dev) && INTEL_GEN(dev_priv) < 9)
4871 I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
4872
4873 if (IS_HASWELL(dev))
4874 I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev) ?
4875 LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
4876
4877 if (HAS_PCH_NOP(dev)) {
4878 if (IS_IVYBRIDGE(dev)) {
4879 u32 temp = I915_READ(GEN7_MSG_CTL);
4880 temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
4881 I915_WRITE(GEN7_MSG_CTL, temp);
4882 } else if (INTEL_INFO(dev)->gen >= 7) {
4883 u32 temp = I915_READ(HSW_NDE_RSTWRN_OPT);
4884 temp &= ~RESET_PCH_HANDSHAKE_ENABLE;
4885 I915_WRITE(HSW_NDE_RSTWRN_OPT, temp);
4886 }
4887 }
4888
4889 i915_gem_init_swizzling(dev);
4890
4891
4892
4893
4894
4895
4896
4897 init_unused_rings(dev);
4898
4899 BUG_ON(!dev_priv->kernel_context);
4900
4901 ret = i915_ppgtt_init_hw(dev);
4902 if (ret) {
4903 DRM_ERROR("PPGTT enable HW failed %d\n", ret);
4904 goto out;
4905 }
4906
4907
4908 for_each_engine(engine, dev_priv) {
4909 ret = engine->init_hw(engine);
4910 if (ret)
4911 goto out;
4912 }
4913
4914 intel_mocs_init_l3cc_table(dev);
4915
4916
4917 if (HAS_GUC_UCODE(dev)) {
4918 ret = intel_guc_ucode_load(dev);
4919 if (ret) {
4920 DRM_ERROR("Failed to initialize GuC, error %d\n", ret);
4921 ret = -EIO;
4922 goto out;
4923 }
4924 }
4925
4926
4927
4928
4929
4930 ret = i915_gem_set_seqno(dev, dev_priv->next_seqno+0x100);
4931 if (ret)
4932 goto out;
4933
4934
4935 for_each_engine(engine, dev_priv) {
4936 struct drm_i915_gem_request *req;
4937
4938 req = i915_gem_request_alloc(engine, NULL);
4939 if (IS_ERR(req)) {
4940 ret = PTR_ERR(req);
4941 break;
4942 }
4943
4944 if (engine->id == RCS) {
4945 for (j = 0; j < NUM_L3_SLICES(dev); j++) {
4946 ret = i915_gem_l3_remap(req, j);
4947 if (ret)
4948 goto err_request;
4949 }
4950 }
4951
4952 ret = i915_ppgtt_init_ring(req);
4953 if (ret)
4954 goto err_request;
4955
4956 ret = i915_gem_context_enable(req);
4957 if (ret)
4958 goto err_request;
4959
4960err_request:
4961 i915_add_request_no_flush(req);
4962 if (ret) {
4963 DRM_ERROR("Failed to enable %s, error=%d\n",
4964 engine->name, ret);
4965 i915_gem_cleanup_engines(dev);
4966 break;
4967 }
4968 }
4969
4970out:
4971 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4972 return ret;
4973}
4974
4975int i915_gem_init(struct drm_device *dev)
4976{
4977 struct drm_i915_private *dev_priv = dev->dev_private;
4978 int ret;
4979
4980 i915.enable_execlists = intel_sanitize_enable_execlists(dev,
4981 i915.enable_execlists);
4982
4983 mutex_lock(&dev->struct_mutex);
4984
4985 if (!i915.enable_execlists) {
4986 dev_priv->gt.execbuf_submit = i915_gem_ringbuffer_submission;
4987 dev_priv->gt.init_engines = i915_gem_init_engines;
4988 dev_priv->gt.cleanup_engine = intel_cleanup_engine;
4989 dev_priv->gt.stop_engine = intel_stop_engine;
4990 } else {
4991 dev_priv->gt.execbuf_submit = intel_execlists_submission;
4992 dev_priv->gt.init_engines = intel_logical_rings_init;
4993 dev_priv->gt.cleanup_engine = intel_logical_ring_cleanup;
4994 dev_priv->gt.stop_engine = intel_logical_ring_stop;
4995 }
4996
4997
4998
4999
5000
5001
5002
5003 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
5004
5005 ret = i915_gem_init_userptr(dev);
5006 if (ret)
5007 goto out_unlock;
5008
5009 i915_gem_init_ggtt(dev);
5010
5011 ret = i915_gem_context_init(dev);
5012 if (ret)
5013 goto out_unlock;
5014
5015 ret = dev_priv->gt.init_engines(dev);
5016 if (ret)
5017 goto out_unlock;
5018
5019 ret = i915_gem_init_hw(dev);
5020 if (ret == -EIO) {
5021
5022
5023
5024
5025 DRM_ERROR("Failed to initialize GPU, declaring it wedged\n");
5026 atomic_or(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
5027 ret = 0;
5028 }
5029
5030out_unlock:
5031 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
5032 mutex_unlock(&dev->struct_mutex);
5033
5034 return ret;
5035}
5036
5037void
5038i915_gem_cleanup_engines(struct drm_device *dev)
5039{
5040 struct drm_i915_private *dev_priv = dev->dev_private;
5041 struct intel_engine_cs *engine;
5042
5043 for_each_engine(engine, dev_priv)
5044 dev_priv->gt.cleanup_engine(engine);
5045
5046 if (i915.enable_execlists)
5047
5048
5049
5050
5051
5052 intel_gpu_reset(dev, ALL_ENGINES);
5053}
5054
5055static void
5056init_engine_lists(struct intel_engine_cs *engine)
5057{
5058 INIT_LIST_HEAD(&engine->active_list);
5059 INIT_LIST_HEAD(&engine->request_list);
5060}
5061
5062void
5063i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
5064{
5065 struct drm_device *dev = dev_priv->dev;
5066
5067 if (INTEL_INFO(dev_priv)->gen >= 7 && !IS_VALLEYVIEW(dev_priv) &&
5068 !IS_CHERRYVIEW(dev_priv))
5069 dev_priv->num_fence_regs = 32;
5070 else if (INTEL_INFO(dev_priv)->gen >= 4 || IS_I945G(dev_priv) ||
5071 IS_I945GM(dev_priv) || IS_G33(dev_priv))
5072 dev_priv->num_fence_regs = 16;
5073 else
5074 dev_priv->num_fence_regs = 8;
5075
5076 if (intel_vgpu_active(dev))
5077 dev_priv->num_fence_regs =
5078 I915_READ(vgtif_reg(avail_rs.fence_num));
5079
5080
5081 i915_gem_restore_fences(dev);
5082
5083 i915_gem_detect_bit_6_swizzle(dev);
5084}
5085
5086void
5087i915_gem_load_init(struct drm_device *dev)
5088{
5089 struct drm_i915_private *dev_priv = dev->dev_private;
5090 int i;
5091
5092 dev_priv->objects =
5093 kmem_cache_create("i915_gem_object",
5094 sizeof(struct drm_i915_gem_object), 0,
5095 SLAB_HWCACHE_ALIGN,
5096 NULL);
5097 dev_priv->vmas =
5098 kmem_cache_create("i915_gem_vma",
5099 sizeof(struct i915_vma), 0,
5100 SLAB_HWCACHE_ALIGN,
5101 NULL);
5102 dev_priv->requests =
5103 kmem_cache_create("i915_gem_request",
5104 sizeof(struct drm_i915_gem_request), 0,
5105 SLAB_HWCACHE_ALIGN,
5106 NULL);
5107
5108 INIT_LIST_HEAD(&dev_priv->vm_list);
5109 INIT_LIST_HEAD(&dev_priv->context_list);
5110 INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
5111 INIT_LIST_HEAD(&dev_priv->mm.bound_list);
5112 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
5113 for (i = 0; i < I915_NUM_ENGINES; i++)
5114 init_engine_lists(&dev_priv->engine[i]);
5115 for (i = 0; i < I915_MAX_NUM_FENCES; i++)
5116 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
5117 INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
5118 i915_gem_retire_work_handler);
5119 INIT_DELAYED_WORK(&dev_priv->mm.idle_work,
5120 i915_gem_idle_work_handler);
5121 init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
5122
5123 dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
5124
5125
5126
5127
5128
5129
5130 dev_priv->next_seqno = ((u32)~0 - 0x1100);
5131 dev_priv->last_seqno = ((u32)~0 - 0x1101);
5132
5133 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
5134
5135 init_waitqueue_head(&dev_priv->pending_flip_queue);
5136
5137 dev_priv->mm.interruptible = true;
5138
5139 mutex_init(&dev_priv->fb_tracking.lock);
5140}
5141
5142void i915_gem_load_cleanup(struct drm_device *dev)
5143{
5144 struct drm_i915_private *dev_priv = to_i915(dev);
5145
5146 kmem_cache_destroy(dev_priv->requests);
5147 kmem_cache_destroy(dev_priv->vmas);
5148 kmem_cache_destroy(dev_priv->objects);
5149}
5150
5151void i915_gem_release(struct drm_device *dev, struct drm_file *file)
5152{
5153 struct drm_i915_file_private *file_priv = file->driver_priv;
5154
5155
5156
5157
5158
5159 spin_lock(&file_priv->mm.lock);
5160 while (!list_empty(&file_priv->mm.request_list)) {
5161 struct drm_i915_gem_request *request;
5162
5163 request = list_first_entry(&file_priv->mm.request_list,
5164 struct drm_i915_gem_request,
5165 client_list);
5166 list_del(&request->client_list);
5167 request->file_priv = NULL;
5168 }
5169 spin_unlock(&file_priv->mm.lock);
5170
5171 if (!list_empty(&file_priv->rps.link)) {
5172 spin_lock(&to_i915(dev)->rps.client_lock);
5173 list_del(&file_priv->rps.link);
5174 spin_unlock(&to_i915(dev)->rps.client_lock);
5175 }
5176}
5177
5178int i915_gem_open(struct drm_device *dev, struct drm_file *file)
5179{
5180 struct drm_i915_file_private *file_priv;
5181 int ret;
5182
5183 DRM_DEBUG_DRIVER("\n");
5184
5185 file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
5186 if (!file_priv)
5187 return -ENOMEM;
5188
5189 file->driver_priv = file_priv;
5190 file_priv->dev_priv = dev->dev_private;
5191 file_priv->file = file;
5192 INIT_LIST_HEAD(&file_priv->rps.link);
5193
5194 spin_lock_init(&file_priv->mm.lock);
5195 INIT_LIST_HEAD(&file_priv->mm.request_list);
5196
5197 file_priv->bsd_ring = -1;
5198
5199 ret = i915_gem_context_open(dev, file);
5200 if (ret)
5201 kfree(file_priv);
5202
5203 return ret;
5204}
5205
5206
5207
5208
5209
5210
5211
5212
5213
5214
5215void i915_gem_track_fb(struct drm_i915_gem_object *old,
5216 struct drm_i915_gem_object *new,
5217 unsigned frontbuffer_bits)
5218{
5219 if (old) {
5220 WARN_ON(!mutex_is_locked(&old->base.dev->struct_mutex));
5221 WARN_ON(!(old->frontbuffer_bits & frontbuffer_bits));
5222 old->frontbuffer_bits &= ~frontbuffer_bits;
5223 }
5224
5225 if (new) {
5226 WARN_ON(!mutex_is_locked(&new->base.dev->struct_mutex));
5227 WARN_ON(new->frontbuffer_bits & frontbuffer_bits);
5228 new->frontbuffer_bits |= frontbuffer_bits;
5229 }
5230}
5231
5232
5233u64 i915_gem_obj_offset(struct drm_i915_gem_object *o,
5234 struct i915_address_space *vm)
5235{
5236 struct drm_i915_private *dev_priv = o->base.dev->dev_private;
5237 struct i915_vma *vma;
5238
5239 WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base);
5240
5241 list_for_each_entry(vma, &o->vma_list, obj_link) {
5242 if (vma->is_ggtt &&
5243 vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
5244 continue;
5245 if (vma->vm == vm)
5246 return vma->node.start;
5247 }
5248
5249 WARN(1, "%s vma for this object not found.\n",
5250 i915_is_ggtt(vm) ? "global" : "ppgtt");
5251 return -1;
5252}
5253
5254u64 i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o,
5255 const struct i915_ggtt_view *view)
5256{
5257 struct drm_i915_private *dev_priv = to_i915(o->base.dev);
5258 struct i915_ggtt *ggtt = &dev_priv->ggtt;
5259 struct i915_vma *vma;
5260
5261 list_for_each_entry(vma, &o->vma_list, obj_link)
5262 if (vma->vm == &ggtt->base &&
5263 i915_ggtt_view_equal(&vma->ggtt_view, view))
5264 return vma->node.start;
5265
5266 WARN(1, "global vma for this object not found. (view=%u)\n", view->type);
5267 return -1;
5268}
5269
5270bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
5271 struct i915_address_space *vm)
5272{
5273 struct i915_vma *vma;
5274
5275 list_for_each_entry(vma, &o->vma_list, obj_link) {
5276 if (vma->is_ggtt &&
5277 vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
5278 continue;
5279 if (vma->vm == vm && drm_mm_node_allocated(&vma->node))
5280 return true;
5281 }
5282
5283 return false;
5284}
5285
5286bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o,
5287 const struct i915_ggtt_view *view)
5288{
5289 struct drm_i915_private *dev_priv = to_i915(o->base.dev);
5290 struct i915_ggtt *ggtt = &dev_priv->ggtt;
5291 struct i915_vma *vma;
5292
5293 list_for_each_entry(vma, &o->vma_list, obj_link)
5294 if (vma->vm == &ggtt->base &&
5295 i915_ggtt_view_equal(&vma->ggtt_view, view) &&
5296 drm_mm_node_allocated(&vma->node))
5297 return true;
5298
5299 return false;
5300}
5301
5302bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o)
5303{
5304 struct i915_vma *vma;
5305
5306 list_for_each_entry(vma, &o->vma_list, obj_link)
5307 if (drm_mm_node_allocated(&vma->node))
5308 return true;
5309
5310 return false;
5311}
5312
5313unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
5314 struct i915_address_space *vm)
5315{
5316 struct drm_i915_private *dev_priv = o->base.dev->dev_private;
5317 struct i915_vma *vma;
5318
5319 WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base);
5320
5321 BUG_ON(list_empty(&o->vma_list));
5322
5323 list_for_each_entry(vma, &o->vma_list, obj_link) {
5324 if (vma->is_ggtt &&
5325 vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
5326 continue;
5327 if (vma->vm == vm)
5328 return vma->node.size;
5329 }
5330 return 0;
5331}
5332
5333bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj)
5334{
5335 struct i915_vma *vma;
5336 list_for_each_entry(vma, &obj->vma_list, obj_link)
5337 if (vma->pin_count > 0)
5338 return true;
5339
5340 return false;
5341}
5342
5343
5344struct page *
5345i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, int n)
5346{
5347 struct page *page;
5348
5349
5350 if (WARN_ON((obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE) == 0))
5351 return NULL;
5352
5353 page = i915_gem_object_get_page(obj, n);
5354 set_page_dirty(page);
5355 return page;
5356}
5357
5358
5359struct drm_i915_gem_object *
5360i915_gem_object_create_from_data(struct drm_device *dev,
5361 const void *data, size_t size)
5362{
5363 struct drm_i915_gem_object *obj;
5364 struct sg_table *sg;
5365 size_t bytes;
5366 int ret;
5367
5368 obj = i915_gem_alloc_object(dev, round_up(size, PAGE_SIZE));
5369 if (IS_ERR_OR_NULL(obj))
5370 return obj;
5371
5372 ret = i915_gem_object_set_to_cpu_domain(obj, true);
5373 if (ret)
5374 goto fail;
5375
5376 ret = i915_gem_object_get_pages(obj);
5377 if (ret)
5378 goto fail;
5379
5380 i915_gem_object_pin_pages(obj);
5381 sg = obj->pages;
5382 bytes = sg_copy_from_buffer(sg->sgl, sg->nents, (void *)data, size);
5383 obj->dirty = 1;
5384 i915_gem_object_unpin_pages(obj);
5385
5386 if (WARN_ON(bytes != size)) {
5387 DRM_ERROR("Incomplete copy, wrote %zu of %zu", bytes, size);
5388 ret = -EFAULT;
5389 goto fail;
5390 }
5391
5392 return obj;
5393
5394fail:
5395 drm_gem_object_unreference(&obj->base);
5396 return ERR_PTR(ret);
5397}
5398