1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28#include <drm/drmP.h>
29#include <drm/i915_drm.h>
30#include "i915_drv.h"
31#include "i915_trace.h"
32#include "intel_drv.h"
33#include <linux/shmem_fs.h>
34#include <linux/slab.h>
35#include <linux/swap.h>
36#include <linux/pci.h>
37#include <linux/dma-buf.h>
38
39static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
40static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
41static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
42 unsigned alignment,
43 bool map_and_fenceable,
44 bool nonblocking);
45static int i915_gem_phys_pwrite(struct drm_device *dev,
46 struct drm_i915_gem_object *obj,
47 struct drm_i915_gem_pwrite *args,
48 struct drm_file *file);
49
50static void i915_gem_write_fence(struct drm_device *dev, int reg,
51 struct drm_i915_gem_object *obj);
52static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
53 struct drm_i915_fence_reg *fence,
54 bool enable);
55
56static int i915_gem_inactive_shrink(struct shrinker *shrinker,
57 struct shrink_control *sc);
58static long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
59static void i915_gem_shrink_all(struct drm_i915_private *dev_priv);
60static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
61
62static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
63{
64 if (obj->tiling_mode)
65 i915_gem_release_mmap(obj);
66
67
68
69
70 obj->fence_dirty = false;
71 obj->fence_reg = I915_FENCE_REG_NONE;
72}
73
74
75static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
76 size_t size)
77{
78 dev_priv->mm.object_count++;
79 dev_priv->mm.object_memory += size;
80}
81
82static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
83 size_t size)
84{
85 dev_priv->mm.object_count--;
86 dev_priv->mm.object_memory -= size;
87}
88
89static int
90i915_gem_wait_for_error(struct i915_gpu_error *error)
91{
92 int ret;
93
94#define EXIT_COND (!i915_reset_in_progress(error) || \
95 i915_terminally_wedged(error))
96 if (EXIT_COND)
97 return 0;
98
99
100
101
102
103
104 ret = wait_event_interruptible_timeout(error->reset_queue,
105 EXIT_COND,
106 10*HZ);
107 if (ret == 0) {
108 DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
109 return -EIO;
110 } else if (ret < 0) {
111 return ret;
112 }
113#undef EXIT_COND
114
115 return 0;
116}
117
118int i915_mutex_lock_interruptible(struct drm_device *dev)
119{
120 struct drm_i915_private *dev_priv = dev->dev_private;
121 int ret;
122
123 ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
124 if (ret)
125 return ret;
126
127 ret = mutex_lock_interruptible(&dev->struct_mutex);
128 if (ret)
129 return ret;
130
131 WARN_ON(i915_verify_lists(dev));
132 return 0;
133}
134
135static inline bool
136i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
137{
138 return obj->gtt_space && !obj->active;
139}
140
141int
142i915_gem_init_ioctl(struct drm_device *dev, void *data,
143 struct drm_file *file)
144{
145 struct drm_i915_private *dev_priv = dev->dev_private;
146 struct drm_i915_gem_init *args = data;
147
148 if (drm_core_check_feature(dev, DRIVER_MODESET))
149 return -ENODEV;
150
151 if (args->gtt_start >= args->gtt_end ||
152 (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1))
153 return -EINVAL;
154
155
156 if (INTEL_INFO(dev)->gen >= 5)
157 return -ENODEV;
158
159 mutex_lock(&dev->struct_mutex);
160 i915_gem_setup_global_gtt(dev, args->gtt_start, args->gtt_end,
161 args->gtt_end);
162 dev_priv->gtt.mappable_end = args->gtt_end;
163 mutex_unlock(&dev->struct_mutex);
164
165 return 0;
166}
167
168int
169i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
170 struct drm_file *file)
171{
172 struct drm_i915_private *dev_priv = dev->dev_private;
173 struct drm_i915_gem_get_aperture *args = data;
174 struct drm_i915_gem_object *obj;
175 size_t pinned;
176
177 pinned = 0;
178 mutex_lock(&dev->struct_mutex);
179 list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list)
180 if (obj->pin_count)
181 pinned += obj->gtt_space->size;
182 mutex_unlock(&dev->struct_mutex);
183
184 args->aper_size = dev_priv->gtt.total;
185 args->aper_available_size = args->aper_size - pinned;
186
187 return 0;
188}
189
190void *i915_gem_object_alloc(struct drm_device *dev)
191{
192 struct drm_i915_private *dev_priv = dev->dev_private;
193 return kmem_cache_alloc(dev_priv->slab, GFP_KERNEL | __GFP_ZERO);
194}
195
196void i915_gem_object_free(struct drm_i915_gem_object *obj)
197{
198 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
199 kmem_cache_free(dev_priv->slab, obj);
200}
201
202static int
203i915_gem_create(struct drm_file *file,
204 struct drm_device *dev,
205 uint64_t size,
206 uint32_t *handle_p)
207{
208 struct drm_i915_gem_object *obj;
209 int ret;
210 u32 handle;
211
212 size = roundup(size, PAGE_SIZE);
213 if (size == 0)
214 return -EINVAL;
215
216
217 obj = i915_gem_alloc_object(dev, size);
218 if (obj == NULL)
219 return -ENOMEM;
220
221 ret = drm_gem_handle_create(file, &obj->base, &handle);
222 if (ret) {
223 drm_gem_object_release(&obj->base);
224 i915_gem_info_remove_obj(dev->dev_private, obj->base.size);
225 i915_gem_object_free(obj);
226 return ret;
227 }
228
229
230 drm_gem_object_unreference(&obj->base);
231 trace_i915_gem_object_create(obj);
232
233 *handle_p = handle;
234 return 0;
235}
236
237int
238i915_gem_dumb_create(struct drm_file *file,
239 struct drm_device *dev,
240 struct drm_mode_create_dumb *args)
241{
242
243 args->pitch = ALIGN(args->width * ((args->bpp + 7) / 8), 64);
244 args->size = args->pitch * args->height;
245 return i915_gem_create(file, dev,
246 args->size, &args->handle);
247}
248
249int i915_gem_dumb_destroy(struct drm_file *file,
250 struct drm_device *dev,
251 uint32_t handle)
252{
253 return drm_gem_handle_delete(file, handle);
254}
255
256
257
258
259int
260i915_gem_create_ioctl(struct drm_device *dev, void *data,
261 struct drm_file *file)
262{
263 struct drm_i915_gem_create *args = data;
264
265 return i915_gem_create(file, dev,
266 args->size, &args->handle);
267}
268
269static inline int
270__copy_to_user_swizzled(char __user *cpu_vaddr,
271 const char *gpu_vaddr, int gpu_offset,
272 int length)
273{
274 int ret, cpu_offset = 0;
275
276 while (length > 0) {
277 int cacheline_end = ALIGN(gpu_offset + 1, 64);
278 int this_length = min(cacheline_end - gpu_offset, length);
279 int swizzled_gpu_offset = gpu_offset ^ 64;
280
281 ret = __copy_to_user(cpu_vaddr + cpu_offset,
282 gpu_vaddr + swizzled_gpu_offset,
283 this_length);
284 if (ret)
285 return ret + length;
286
287 cpu_offset += this_length;
288 gpu_offset += this_length;
289 length -= this_length;
290 }
291
292 return 0;
293}
294
295static inline int
296__copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
297 const char __user *cpu_vaddr,
298 int length)
299{
300 int ret, cpu_offset = 0;
301
302 while (length > 0) {
303 int cacheline_end = ALIGN(gpu_offset + 1, 64);
304 int this_length = min(cacheline_end - gpu_offset, length);
305 int swizzled_gpu_offset = gpu_offset ^ 64;
306
307 ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
308 cpu_vaddr + cpu_offset,
309 this_length);
310 if (ret)
311 return ret + length;
312
313 cpu_offset += this_length;
314 gpu_offset += this_length;
315 length -= this_length;
316 }
317
318 return 0;
319}
320
321
322
323
324static int
325shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length,
326 char __user *user_data,
327 bool page_do_bit17_swizzling, bool needs_clflush)
328{
329 char *vaddr;
330 int ret;
331
332 if (unlikely(page_do_bit17_swizzling))
333 return -EINVAL;
334
335 vaddr = kmap_atomic(page);
336 if (needs_clflush)
337 drm_clflush_virt_range(vaddr + shmem_page_offset,
338 page_length);
339 ret = __copy_to_user_inatomic(user_data,
340 vaddr + shmem_page_offset,
341 page_length);
342 kunmap_atomic(vaddr);
343
344 return ret ? -EFAULT : 0;
345}
346
347static void
348shmem_clflush_swizzled_range(char *addr, unsigned long length,
349 bool swizzled)
350{
351 if (unlikely(swizzled)) {
352 unsigned long start = (unsigned long) addr;
353 unsigned long end = (unsigned long) addr + length;
354
355
356
357
358
359 start = round_down(start, 128);
360 end = round_up(end, 128);
361
362 drm_clflush_virt_range((void *)start, end - start);
363 } else {
364 drm_clflush_virt_range(addr, length);
365 }
366
367}
368
369
370
371static int
372shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
373 char __user *user_data,
374 bool page_do_bit17_swizzling, bool needs_clflush)
375{
376 char *vaddr;
377 int ret;
378
379 vaddr = kmap(page);
380 if (needs_clflush)
381 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
382 page_length,
383 page_do_bit17_swizzling);
384
385 if (page_do_bit17_swizzling)
386 ret = __copy_to_user_swizzled(user_data,
387 vaddr, shmem_page_offset,
388 page_length);
389 else
390 ret = __copy_to_user(user_data,
391 vaddr + shmem_page_offset,
392 page_length);
393 kunmap(page);
394
395 return ret ? - EFAULT : 0;
396}
397
398static int
399i915_gem_shmem_pread(struct drm_device *dev,
400 struct drm_i915_gem_object *obj,
401 struct drm_i915_gem_pread *args,
402 struct drm_file *file)
403{
404 char __user *user_data;
405 ssize_t remain;
406 loff_t offset;
407 int shmem_page_offset, page_length, ret = 0;
408 int obj_do_bit17_swizzling, page_do_bit17_swizzling;
409 int prefaulted = 0;
410 int needs_clflush = 0;
411 struct sg_page_iter sg_iter;
412
413 user_data = to_user_ptr(args->data_ptr);
414 remain = args->size;
415
416 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
417
418 if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
419
420
421
422
423 if (obj->cache_level == I915_CACHE_NONE)
424 needs_clflush = 1;
425 if (obj->gtt_space) {
426 ret = i915_gem_object_set_to_gtt_domain(obj, false);
427 if (ret)
428 return ret;
429 }
430 }
431
432 ret = i915_gem_object_get_pages(obj);
433 if (ret)
434 return ret;
435
436 i915_gem_object_pin_pages(obj);
437
438 offset = args->offset;
439
440 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
441 offset >> PAGE_SHIFT) {
442 struct page *page = sg_page_iter_page(&sg_iter);
443
444 if (remain <= 0)
445 break;
446
447
448
449
450
451
452 shmem_page_offset = offset_in_page(offset);
453 page_length = remain;
454 if ((shmem_page_offset + page_length) > PAGE_SIZE)
455 page_length = PAGE_SIZE - shmem_page_offset;
456
457 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
458 (page_to_phys(page) & (1 << 17)) != 0;
459
460 ret = shmem_pread_fast(page, shmem_page_offset, page_length,
461 user_data, page_do_bit17_swizzling,
462 needs_clflush);
463 if (ret == 0)
464 goto next_page;
465
466 mutex_unlock(&dev->struct_mutex);
467
468 if (!prefaulted) {
469 ret = fault_in_multipages_writeable(user_data, remain);
470
471
472
473
474 (void)ret;
475 prefaulted = 1;
476 }
477
478 ret = shmem_pread_slow(page, shmem_page_offset, page_length,
479 user_data, page_do_bit17_swizzling,
480 needs_clflush);
481
482 mutex_lock(&dev->struct_mutex);
483
484next_page:
485 mark_page_accessed(page);
486
487 if (ret)
488 goto out;
489
490 remain -= page_length;
491 user_data += page_length;
492 offset += page_length;
493 }
494
495out:
496 i915_gem_object_unpin_pages(obj);
497
498 return ret;
499}
500
501
502
503
504
505
506int
507i915_gem_pread_ioctl(struct drm_device *dev, void *data,
508 struct drm_file *file)
509{
510 struct drm_i915_gem_pread *args = data;
511 struct drm_i915_gem_object *obj;
512 int ret = 0;
513
514 if (args->size == 0)
515 return 0;
516
517 if (!access_ok(VERIFY_WRITE,
518 to_user_ptr(args->data_ptr),
519 args->size))
520 return -EFAULT;
521
522 ret = i915_mutex_lock_interruptible(dev);
523 if (ret)
524 return ret;
525
526 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
527 if (&obj->base == NULL) {
528 ret = -ENOENT;
529 goto unlock;
530 }
531
532
533 if (args->offset > obj->base.size ||
534 args->size > obj->base.size - args->offset) {
535 ret = -EINVAL;
536 goto out;
537 }
538
539
540
541
542 if (!obj->base.filp) {
543 ret = -EINVAL;
544 goto out;
545 }
546
547 trace_i915_gem_object_pread(obj, args->offset, args->size);
548
549 ret = i915_gem_shmem_pread(dev, obj, args, file);
550
551out:
552 drm_gem_object_unreference(&obj->base);
553unlock:
554 mutex_unlock(&dev->struct_mutex);
555 return ret;
556}
557
558
559
560
561
562static inline int
563fast_user_write(struct io_mapping *mapping,
564 loff_t page_base, int page_offset,
565 char __user *user_data,
566 int length)
567{
568 void __iomem *vaddr_atomic;
569 void *vaddr;
570 unsigned long unwritten;
571
572 vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
573
574 vaddr = (void __force*)vaddr_atomic + page_offset;
575 unwritten = __copy_from_user_inatomic_nocache(vaddr,
576 user_data, length);
577 io_mapping_unmap_atomic(vaddr_atomic);
578 return unwritten;
579}
580
581
582
583
584
585static int
586i915_gem_gtt_pwrite_fast(struct drm_device *dev,
587 struct drm_i915_gem_object *obj,
588 struct drm_i915_gem_pwrite *args,
589 struct drm_file *file)
590{
591 drm_i915_private_t *dev_priv = dev->dev_private;
592 ssize_t remain;
593 loff_t offset, page_base;
594 char __user *user_data;
595 int page_offset, page_length, ret;
596
597 ret = i915_gem_object_pin(obj, 0, true, true);
598 if (ret)
599 goto out;
600
601 ret = i915_gem_object_set_to_gtt_domain(obj, true);
602 if (ret)
603 goto out_unpin;
604
605 ret = i915_gem_object_put_fence(obj);
606 if (ret)
607 goto out_unpin;
608
609 user_data = to_user_ptr(args->data_ptr);
610 remain = args->size;
611
612 offset = obj->gtt_offset + args->offset;
613
614 while (remain > 0) {
615
616
617
618
619
620
621 page_base = offset & PAGE_MASK;
622 page_offset = offset_in_page(offset);
623 page_length = remain;
624 if ((page_offset + remain) > PAGE_SIZE)
625 page_length = PAGE_SIZE - page_offset;
626
627
628
629
630
631 if (fast_user_write(dev_priv->gtt.mappable, page_base,
632 page_offset, user_data, page_length)) {
633 ret = -EFAULT;
634 goto out_unpin;
635 }
636
637 remain -= page_length;
638 user_data += page_length;
639 offset += page_length;
640 }
641
642out_unpin:
643 i915_gem_object_unpin(obj);
644out:
645 return ret;
646}
647
648
649
650
651
652static int
653shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length,
654 char __user *user_data,
655 bool page_do_bit17_swizzling,
656 bool needs_clflush_before,
657 bool needs_clflush_after)
658{
659 char *vaddr;
660 int ret;
661
662 if (unlikely(page_do_bit17_swizzling))
663 return -EINVAL;
664
665 vaddr = kmap_atomic(page);
666 if (needs_clflush_before)
667 drm_clflush_virt_range(vaddr + shmem_page_offset,
668 page_length);
669 ret = __copy_from_user_inatomic_nocache(vaddr + shmem_page_offset,
670 user_data,
671 page_length);
672 if (needs_clflush_after)
673 drm_clflush_virt_range(vaddr + shmem_page_offset,
674 page_length);
675 kunmap_atomic(vaddr);
676
677 return ret ? -EFAULT : 0;
678}
679
680
681
682static int
683shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length,
684 char __user *user_data,
685 bool page_do_bit17_swizzling,
686 bool needs_clflush_before,
687 bool needs_clflush_after)
688{
689 char *vaddr;
690 int ret;
691
692 vaddr = kmap(page);
693 if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
694 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
695 page_length,
696 page_do_bit17_swizzling);
697 if (page_do_bit17_swizzling)
698 ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
699 user_data,
700 page_length);
701 else
702 ret = __copy_from_user(vaddr + shmem_page_offset,
703 user_data,
704 page_length);
705 if (needs_clflush_after)
706 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
707 page_length,
708 page_do_bit17_swizzling);
709 kunmap(page);
710
711 return ret ? -EFAULT : 0;
712}
713
714static int
715i915_gem_shmem_pwrite(struct drm_device *dev,
716 struct drm_i915_gem_object *obj,
717 struct drm_i915_gem_pwrite *args,
718 struct drm_file *file)
719{
720 ssize_t remain;
721 loff_t offset;
722 char __user *user_data;
723 int shmem_page_offset, page_length, ret = 0;
724 int obj_do_bit17_swizzling, page_do_bit17_swizzling;
725 int hit_slowpath = 0;
726 int needs_clflush_after = 0;
727 int needs_clflush_before = 0;
728 struct sg_page_iter sg_iter;
729
730 user_data = to_user_ptr(args->data_ptr);
731 remain = args->size;
732
733 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
734
735 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
736
737
738
739
740 if (obj->cache_level == I915_CACHE_NONE)
741 needs_clflush_after = 1;
742 if (obj->gtt_space) {
743 ret = i915_gem_object_set_to_gtt_domain(obj, true);
744 if (ret)
745 return ret;
746 }
747 }
748
749
750 if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)
751 && obj->cache_level == I915_CACHE_NONE)
752 needs_clflush_before = 1;
753
754 ret = i915_gem_object_get_pages(obj);
755 if (ret)
756 return ret;
757
758 i915_gem_object_pin_pages(obj);
759
760 offset = args->offset;
761 obj->dirty = 1;
762
763 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
764 offset >> PAGE_SHIFT) {
765 struct page *page = sg_page_iter_page(&sg_iter);
766 int partial_cacheline_write;
767
768 if (remain <= 0)
769 break;
770
771
772
773
774
775
776 shmem_page_offset = offset_in_page(offset);
777
778 page_length = remain;
779 if ((shmem_page_offset + page_length) > PAGE_SIZE)
780 page_length = PAGE_SIZE - shmem_page_offset;
781
782
783
784
785 partial_cacheline_write = needs_clflush_before &&
786 ((shmem_page_offset | page_length)
787 & (boot_cpu_data.x86_clflush_size - 1));
788
789 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
790 (page_to_phys(page) & (1 << 17)) != 0;
791
792 ret = shmem_pwrite_fast(page, shmem_page_offset, page_length,
793 user_data, page_do_bit17_swizzling,
794 partial_cacheline_write,
795 needs_clflush_after);
796 if (ret == 0)
797 goto next_page;
798
799 hit_slowpath = 1;
800 mutex_unlock(&dev->struct_mutex);
801 ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
802 user_data, page_do_bit17_swizzling,
803 partial_cacheline_write,
804 needs_clflush_after);
805
806 mutex_lock(&dev->struct_mutex);
807
808next_page:
809 set_page_dirty(page);
810 mark_page_accessed(page);
811
812 if (ret)
813 goto out;
814
815 remain -= page_length;
816 user_data += page_length;
817 offset += page_length;
818 }
819
820out:
821 i915_gem_object_unpin_pages(obj);
822
823 if (hit_slowpath) {
824
825
826
827
828
829 if (!needs_clflush_after &&
830 obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
831 i915_gem_clflush_object(obj);
832 i915_gem_chipset_flush(dev);
833 }
834 }
835
836 if (needs_clflush_after)
837 i915_gem_chipset_flush(dev);
838
839 return ret;
840}
841
842
843
844
845
846
847int
848i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
849 struct drm_file *file)
850{
851 struct drm_i915_gem_pwrite *args = data;
852 struct drm_i915_gem_object *obj;
853 int ret;
854
855 if (args->size == 0)
856 return 0;
857
858 if (!access_ok(VERIFY_READ,
859 to_user_ptr(args->data_ptr),
860 args->size))
861 return -EFAULT;
862
863 ret = fault_in_multipages_readable(to_user_ptr(args->data_ptr),
864 args->size);
865 if (ret)
866 return -EFAULT;
867
868 ret = i915_mutex_lock_interruptible(dev);
869 if (ret)
870 return ret;
871
872 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
873 if (&obj->base == NULL) {
874 ret = -ENOENT;
875 goto unlock;
876 }
877
878
879 if (args->offset > obj->base.size ||
880 args->size > obj->base.size - args->offset) {
881 ret = -EINVAL;
882 goto out;
883 }
884
885
886
887
888 if (!obj->base.filp) {
889 ret = -EINVAL;
890 goto out;
891 }
892
893 trace_i915_gem_object_pwrite(obj, args->offset, args->size);
894
895 ret = -EFAULT;
896
897
898
899
900
901
902 if (obj->phys_obj) {
903 ret = i915_gem_phys_pwrite(dev, obj, args, file);
904 goto out;
905 }
906
907 if (obj->cache_level == I915_CACHE_NONE &&
908 obj->tiling_mode == I915_TILING_NONE &&
909 obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
910 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
911
912
913
914 }
915
916 if (ret == -EFAULT || ret == -ENOSPC)
917 ret = i915_gem_shmem_pwrite(dev, obj, args, file);
918
919out:
920 drm_gem_object_unreference(&obj->base);
921unlock:
922 mutex_unlock(&dev->struct_mutex);
923 return ret;
924}
925
926int
927i915_gem_check_wedge(struct i915_gpu_error *error,
928 bool interruptible)
929{
930 if (i915_reset_in_progress(error)) {
931
932
933 if (!interruptible)
934 return -EIO;
935
936
937 if (i915_terminally_wedged(error))
938 return -EIO;
939
940 return -EAGAIN;
941 }
942
943 return 0;
944}
945
946
947
948
949
950static int
951i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
952{
953 int ret;
954
955 BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex));
956
957 ret = 0;
958 if (seqno == ring->outstanding_lazy_request)
959 ret = i915_add_request(ring, NULL, NULL);
960
961 return ret;
962}
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
983 unsigned reset_counter,
984 bool interruptible, struct timespec *timeout)
985{
986 drm_i915_private_t *dev_priv = ring->dev->dev_private;
987 struct timespec before, now, wait_time={1,0};
988 unsigned long timeout_jiffies;
989 long end;
990 bool wait_forever = true;
991 int ret;
992
993 if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
994 return 0;
995
996 trace_i915_gem_request_wait_begin(ring, seqno);
997
998 if (timeout != NULL) {
999 wait_time = *timeout;
1000 wait_forever = false;
1001 }
1002
1003 timeout_jiffies = timespec_to_jiffies_timeout(&wait_time);
1004
1005 if (WARN_ON(!ring->irq_get(ring)))
1006 return -ENODEV;
1007
1008
1009 getrawmonotonic(&before);
1010
1011#define EXIT_COND \
1012 (i915_seqno_passed(ring->get_seqno(ring, false), seqno) || \
1013 i915_reset_in_progress(&dev_priv->gpu_error) || \
1014 reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
1015 do {
1016 if (interruptible)
1017 end = wait_event_interruptible_timeout(ring->irq_queue,
1018 EXIT_COND,
1019 timeout_jiffies);
1020 else
1021 end = wait_event_timeout(ring->irq_queue, EXIT_COND,
1022 timeout_jiffies);
1023
1024
1025
1026 if (reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
1027 end = -EAGAIN;
1028
1029
1030
1031 ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
1032 if (ret)
1033 end = ret;
1034 } while (end == 0 && wait_forever);
1035
1036 getrawmonotonic(&now);
1037
1038 ring->irq_put(ring);
1039 trace_i915_gem_request_wait_end(ring, seqno);
1040#undef EXIT_COND
1041
1042 if (timeout) {
1043 struct timespec sleep_time = timespec_sub(now, before);
1044 *timeout = timespec_sub(*timeout, sleep_time);
1045 if (!timespec_valid(timeout))
1046 set_normalized_timespec(timeout, 0, 0);
1047 }
1048
1049 switch (end) {
1050 case -EIO:
1051 case -EAGAIN:
1052 case -ERESTARTSYS:
1053 return (int)end;
1054 case 0:
1055 return -ETIME;
1056 default:
1057 WARN_ON(end < 0);
1058 return 0;
1059 }
1060}
1061
1062
1063
1064
1065
1066int
1067i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
1068{
1069 struct drm_device *dev = ring->dev;
1070 struct drm_i915_private *dev_priv = dev->dev_private;
1071 bool interruptible = dev_priv->mm.interruptible;
1072 int ret;
1073
1074 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1075 BUG_ON(seqno == 0);
1076
1077 ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
1078 if (ret)
1079 return ret;
1080
1081 ret = i915_gem_check_olr(ring, seqno);
1082 if (ret)
1083 return ret;
1084
1085 return __wait_seqno(ring, seqno,
1086 atomic_read(&dev_priv->gpu_error.reset_counter),
1087 interruptible, NULL);
1088}
1089
1090
1091
1092
1093
1094static __must_check int
1095i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
1096 bool readonly)
1097{
1098 struct intel_ring_buffer *ring = obj->ring;
1099 u32 seqno;
1100 int ret;
1101
1102 seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
1103 if (seqno == 0)
1104 return 0;
1105
1106 ret = i915_wait_seqno(ring, seqno);
1107 if (ret)
1108 return ret;
1109
1110 i915_gem_retire_requests_ring(ring);
1111
1112
1113
1114
1115 if (obj->last_write_seqno &&
1116 i915_seqno_passed(seqno, obj->last_write_seqno)) {
1117 obj->last_write_seqno = 0;
1118 obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
1119 }
1120
1121 return 0;
1122}
1123
1124
1125
1126
1127static __must_check int
1128i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
1129 bool readonly)
1130{
1131 struct drm_device *dev = obj->base.dev;
1132 struct drm_i915_private *dev_priv = dev->dev_private;
1133 struct intel_ring_buffer *ring = obj->ring;
1134 unsigned reset_counter;
1135 u32 seqno;
1136 int ret;
1137
1138 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1139 BUG_ON(!dev_priv->mm.interruptible);
1140
1141 seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
1142 if (seqno == 0)
1143 return 0;
1144
1145 ret = i915_gem_check_wedge(&dev_priv->gpu_error, true);
1146 if (ret)
1147 return ret;
1148
1149 ret = i915_gem_check_olr(ring, seqno);
1150 if (ret)
1151 return ret;
1152
1153 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
1154 mutex_unlock(&dev->struct_mutex);
1155 ret = __wait_seqno(ring, seqno, reset_counter, true, NULL);
1156 mutex_lock(&dev->struct_mutex);
1157
1158 i915_gem_retire_requests_ring(ring);
1159
1160
1161
1162
1163 if (obj->last_write_seqno &&
1164 i915_seqno_passed(seqno, obj->last_write_seqno)) {
1165 obj->last_write_seqno = 0;
1166 obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
1167 }
1168
1169 return ret;
1170}
1171
1172
1173
1174
1175
1176int
1177i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1178 struct drm_file *file)
1179{
1180 struct drm_i915_gem_set_domain *args = data;
1181 struct drm_i915_gem_object *obj;
1182 uint32_t read_domains = args->read_domains;
1183 uint32_t write_domain = args->write_domain;
1184 int ret;
1185
1186
1187 if (write_domain & I915_GEM_GPU_DOMAINS)
1188 return -EINVAL;
1189
1190 if (read_domains & I915_GEM_GPU_DOMAINS)
1191 return -EINVAL;
1192
1193
1194
1195
1196 if (write_domain != 0 && read_domains != write_domain)
1197 return -EINVAL;
1198
1199 ret = i915_mutex_lock_interruptible(dev);
1200 if (ret)
1201 return ret;
1202
1203 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1204 if (&obj->base == NULL) {
1205 ret = -ENOENT;
1206 goto unlock;
1207 }
1208
1209
1210
1211
1212
1213 ret = i915_gem_object_wait_rendering__nonblocking(obj, !write_domain);
1214 if (ret)
1215 goto unref;
1216
1217 if (read_domains & I915_GEM_DOMAIN_GTT) {
1218 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
1219
1220
1221
1222
1223
1224 if (ret == -EINVAL)
1225 ret = 0;
1226 } else {
1227 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
1228 }
1229
1230unref:
1231 drm_gem_object_unreference(&obj->base);
1232unlock:
1233 mutex_unlock(&dev->struct_mutex);
1234 return ret;
1235}
1236
1237
1238
1239
1240int
1241i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1242 struct drm_file *file)
1243{
1244 struct drm_i915_gem_sw_finish *args = data;
1245 struct drm_i915_gem_object *obj;
1246 int ret = 0;
1247
1248 ret = i915_mutex_lock_interruptible(dev);
1249 if (ret)
1250 return ret;
1251
1252 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1253 if (&obj->base == NULL) {
1254 ret = -ENOENT;
1255 goto unlock;
1256 }
1257
1258
1259 if (obj->pin_count)
1260 i915_gem_object_flush_cpu_write_domain(obj);
1261
1262 drm_gem_object_unreference(&obj->base);
1263unlock:
1264 mutex_unlock(&dev->struct_mutex);
1265 return ret;
1266}
1267
1268
1269
1270
1271
1272
1273
1274
1275int
1276i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1277 struct drm_file *file)
1278{
1279 struct drm_i915_gem_mmap *args = data;
1280 struct drm_gem_object *obj;
1281 unsigned long addr;
1282
1283 obj = drm_gem_object_lookup(dev, file, args->handle);
1284 if (obj == NULL)
1285 return -ENOENT;
1286
1287
1288
1289
1290 if (!obj->filp) {
1291 drm_gem_object_unreference_unlocked(obj);
1292 return -EINVAL;
1293 }
1294
1295 addr = vm_mmap(obj->filp, 0, args->size,
1296 PROT_READ | PROT_WRITE, MAP_SHARED,
1297 args->offset);
1298 drm_gem_object_unreference_unlocked(obj);
1299 if (IS_ERR((void *)addr))
1300 return addr;
1301
1302 args->addr_ptr = (uint64_t) addr;
1303
1304 return 0;
1305}
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1324{
1325 struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data);
1326 struct drm_device *dev = obj->base.dev;
1327 drm_i915_private_t *dev_priv = dev->dev_private;
1328 pgoff_t page_offset;
1329 unsigned long pfn;
1330 int ret = 0;
1331 bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
1332
1333
1334 page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
1335 PAGE_SHIFT;
1336
1337 ret = i915_mutex_lock_interruptible(dev);
1338 if (ret)
1339 goto out;
1340
1341 trace_i915_gem_object_fault(obj, page_offset, true, write);
1342
1343
1344 if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev)) {
1345 ret = -EINVAL;
1346 goto unlock;
1347 }
1348
1349
1350 ret = i915_gem_object_pin(obj, 0, true, false);
1351 if (ret)
1352 goto unlock;
1353
1354 ret = i915_gem_object_set_to_gtt_domain(obj, write);
1355 if (ret)
1356 goto unpin;
1357
1358 ret = i915_gem_object_get_fence(obj);
1359 if (ret)
1360 goto unpin;
1361
1362 obj->fault_mappable = true;
1363
1364 pfn = ((dev_priv->gtt.mappable_base + obj->gtt_offset) >> PAGE_SHIFT) +
1365 page_offset;
1366
1367
1368 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
1369unpin:
1370 i915_gem_object_unpin(obj);
1371unlock:
1372 mutex_unlock(&dev->struct_mutex);
1373out:
1374 switch (ret) {
1375 case -EIO:
1376
1377
1378
1379 if (i915_terminally_wedged(&dev_priv->gpu_error))
1380 return VM_FAULT_SIGBUS;
1381 case -EAGAIN:
1382
1383
1384
1385
1386
1387
1388
1389 set_need_resched();
1390 case 0:
1391 case -ERESTARTSYS:
1392 case -EINTR:
1393 case -EBUSY:
1394
1395
1396
1397
1398 return VM_FAULT_NOPAGE;
1399 case -ENOMEM:
1400 return VM_FAULT_OOM;
1401 case -ENOSPC:
1402 return VM_FAULT_SIGBUS;
1403 default:
1404 WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret);
1405 return VM_FAULT_SIGBUS;
1406 }
1407}
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423void
1424i915_gem_release_mmap(struct drm_i915_gem_object *obj)
1425{
1426 if (!obj->fault_mappable)
1427 return;
1428
1429 if (obj->base.dev->dev_mapping)
1430 unmap_mapping_range(obj->base.dev->dev_mapping,
1431 (loff_t)obj->base.map_list.hash.key<<PAGE_SHIFT,
1432 obj->base.size, 1);
1433
1434 obj->fault_mappable = false;
1435}
1436
1437uint32_t
1438i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
1439{
1440 uint32_t gtt_size;
1441
1442 if (INTEL_INFO(dev)->gen >= 4 ||
1443 tiling_mode == I915_TILING_NONE)
1444 return size;
1445
1446
1447 if (INTEL_INFO(dev)->gen == 3)
1448 gtt_size = 1024*1024;
1449 else
1450 gtt_size = 512*1024;
1451
1452 while (gtt_size < size)
1453 gtt_size <<= 1;
1454
1455 return gtt_size;
1456}
1457
1458
1459
1460
1461
1462
1463
1464
1465uint32_t
1466i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
1467 int tiling_mode, bool fenced)
1468{
1469
1470
1471
1472
1473 if (INTEL_INFO(dev)->gen >= 4 || (!fenced && IS_G33(dev)) ||
1474 tiling_mode == I915_TILING_NONE)
1475 return 4096;
1476
1477
1478
1479
1480
1481 return i915_gem_get_gtt_size(dev, size, tiling_mode);
1482}
1483
1484static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
1485{
1486 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1487 int ret;
1488
1489 if (obj->base.map_list.map)
1490 return 0;
1491
1492 dev_priv->mm.shrinker_no_lock_stealing = true;
1493
1494 ret = drm_gem_create_mmap_offset(&obj->base);
1495 if (ret != -ENOSPC)
1496 goto out;
1497
1498
1499
1500
1501
1502
1503
1504
1505 i915_gem_purge(dev_priv, obj->base.size >> PAGE_SHIFT);
1506 ret = drm_gem_create_mmap_offset(&obj->base);
1507 if (ret != -ENOSPC)
1508 goto out;
1509
1510 i915_gem_shrink_all(dev_priv);
1511 ret = drm_gem_create_mmap_offset(&obj->base);
1512out:
1513 dev_priv->mm.shrinker_no_lock_stealing = false;
1514
1515 return ret;
1516}
1517
1518static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
1519{
1520 if (!obj->base.map_list.map)
1521 return;
1522
1523 drm_gem_free_mmap_offset(&obj->base);
1524}
1525
1526int
1527i915_gem_mmap_gtt(struct drm_file *file,
1528 struct drm_device *dev,
1529 uint32_t handle,
1530 uint64_t *offset)
1531{
1532 struct drm_i915_private *dev_priv = dev->dev_private;
1533 struct drm_i915_gem_object *obj;
1534 int ret;
1535
1536 ret = i915_mutex_lock_interruptible(dev);
1537 if (ret)
1538 return ret;
1539
1540 obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
1541 if (&obj->base == NULL) {
1542 ret = -ENOENT;
1543 goto unlock;
1544 }
1545
1546 if (obj->base.size > dev_priv->gtt.mappable_end) {
1547 ret = -E2BIG;
1548 goto out;
1549 }
1550
1551 if (obj->madv != I915_MADV_WILLNEED) {
1552 DRM_ERROR("Attempting to mmap a purgeable buffer\n");
1553 ret = -EINVAL;
1554 goto out;
1555 }
1556
1557 ret = i915_gem_object_create_mmap_offset(obj);
1558 if (ret)
1559 goto out;
1560
1561 *offset = (u64)obj->base.map_list.hash.key << PAGE_SHIFT;
1562
1563out:
1564 drm_gem_object_unreference(&obj->base);
1565unlock:
1566 mutex_unlock(&dev->struct_mutex);
1567 return ret;
1568}
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585int
1586i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1587 struct drm_file *file)
1588{
1589 struct drm_i915_gem_mmap_gtt *args = data;
1590
1591 return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
1592}
1593
1594
1595static void
1596i915_gem_object_truncate(struct drm_i915_gem_object *obj)
1597{
1598 struct inode *inode;
1599
1600 i915_gem_object_free_mmap_offset(obj);
1601
1602 if (obj->base.filp == NULL)
1603 return;
1604
1605
1606
1607
1608
1609
1610 inode = file_inode(obj->base.filp);
1611 shmem_truncate_range(inode, 0, (loff_t)-1);
1612
1613 obj->madv = __I915_MADV_PURGED;
1614}
1615
1616static inline int
1617i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
1618{
1619 return obj->madv == I915_MADV_DONTNEED;
1620}
1621
1622static void
1623i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
1624{
1625 struct sg_page_iter sg_iter;
1626 int ret;
1627
1628 BUG_ON(obj->madv == __I915_MADV_PURGED);
1629
1630 ret = i915_gem_object_set_to_cpu_domain(obj, true);
1631 if (ret) {
1632
1633
1634
1635 WARN_ON(ret != -EIO);
1636 i915_gem_clflush_object(obj);
1637 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
1638 }
1639
1640 if (i915_gem_object_needs_bit17_swizzle(obj))
1641 i915_gem_object_save_bit_17_swizzle(obj);
1642
1643 if (obj->madv == I915_MADV_DONTNEED)
1644 obj->dirty = 0;
1645
1646 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
1647 struct page *page = sg_page_iter_page(&sg_iter);
1648
1649 if (obj->dirty)
1650 set_page_dirty(page);
1651
1652 if (obj->madv == I915_MADV_WILLNEED)
1653 mark_page_accessed(page);
1654
1655 page_cache_release(page);
1656 }
1657 obj->dirty = 0;
1658
1659 sg_free_table(obj->pages);
1660 kfree(obj->pages);
1661}
1662
1663int
1664i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
1665{
1666 const struct drm_i915_gem_object_ops *ops = obj->ops;
1667
1668 if (obj->pages == NULL)
1669 return 0;
1670
1671 BUG_ON(obj->gtt_space);
1672
1673 if (obj->pages_pin_count)
1674 return -EBUSY;
1675
1676
1677
1678
1679 list_del(&obj->gtt_list);
1680
1681 ops->put_pages(obj);
1682 obj->pages = NULL;
1683
1684 if (i915_gem_object_is_purgeable(obj))
1685 i915_gem_object_truncate(obj);
1686
1687 return 0;
1688}
1689
1690static long
1691__i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
1692 bool purgeable_only)
1693{
1694 struct drm_i915_gem_object *obj, *next;
1695 long count = 0;
1696
1697 list_for_each_entry_safe(obj, next,
1698 &dev_priv->mm.unbound_list,
1699 gtt_list) {
1700 if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) &&
1701 i915_gem_object_put_pages(obj) == 0) {
1702 count += obj->base.size >> PAGE_SHIFT;
1703 if (count >= target)
1704 return count;
1705 }
1706 }
1707
1708 list_for_each_entry_safe(obj, next,
1709 &dev_priv->mm.inactive_list,
1710 mm_list) {
1711 if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) &&
1712 i915_gem_object_unbind(obj) == 0 &&
1713 i915_gem_object_put_pages(obj) == 0) {
1714 count += obj->base.size >> PAGE_SHIFT;
1715 if (count >= target)
1716 return count;
1717 }
1718 }
1719
1720 return count;
1721}
1722
1723static long
1724i915_gem_purge(struct drm_i915_private *dev_priv, long target)
1725{
1726 return __i915_gem_shrink(dev_priv, target, true);
1727}
1728
1729static void
1730i915_gem_shrink_all(struct drm_i915_private *dev_priv)
1731{
1732 struct drm_i915_gem_object *obj, *next;
1733
1734 i915_gem_evict_everything(dev_priv->dev);
1735
1736 list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list, gtt_list)
1737 i915_gem_object_put_pages(obj);
1738}
1739
1740static int
1741i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
1742{
1743 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1744 int page_count, i;
1745 struct address_space *mapping;
1746 struct sg_table *st;
1747 struct scatterlist *sg;
1748 struct sg_page_iter sg_iter;
1749 struct page *page;
1750 unsigned long last_pfn = 0;
1751 gfp_t gfp;
1752
1753
1754
1755
1756
1757 BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
1758 BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
1759
1760 st = kmalloc(sizeof(*st), GFP_KERNEL);
1761 if (st == NULL)
1762 return -ENOMEM;
1763
1764 page_count = obj->base.size / PAGE_SIZE;
1765 if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
1766 sg_free_table(st);
1767 kfree(st);
1768 return -ENOMEM;
1769 }
1770
1771
1772
1773
1774
1775
1776 mapping = file_inode(obj->base.filp)->i_mapping;
1777 gfp = mapping_gfp_mask(mapping);
1778 gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
1779 gfp &= ~(__GFP_IO | __GFP_WAIT);
1780 sg = st->sgl;
1781 st->nents = 0;
1782 for (i = 0; i < page_count; i++) {
1783 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
1784 if (IS_ERR(page)) {
1785 i915_gem_purge(dev_priv, page_count);
1786 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
1787 }
1788 if (IS_ERR(page)) {
1789
1790
1791
1792
1793 gfp &= ~(__GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD);
1794 gfp |= __GFP_IO | __GFP_WAIT;
1795
1796 i915_gem_shrink_all(dev_priv);
1797 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
1798 if (IS_ERR(page))
1799 goto err_pages;
1800
1801 gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
1802 gfp &= ~(__GFP_IO | __GFP_WAIT);
1803 }
1804#ifdef CONFIG_SWIOTLB
1805 if (swiotlb_nr_tbl()) {
1806 st->nents++;
1807 sg_set_page(sg, page, PAGE_SIZE, 0);
1808 sg = sg_next(sg);
1809 continue;
1810 }
1811#endif
1812 if (!i || page_to_pfn(page) != last_pfn + 1) {
1813 if (i)
1814 sg = sg_next(sg);
1815 st->nents++;
1816 sg_set_page(sg, page, PAGE_SIZE, 0);
1817 } else {
1818 sg->length += PAGE_SIZE;
1819 }
1820 last_pfn = page_to_pfn(page);
1821 }
1822#ifdef CONFIG_SWIOTLB
1823 if (!swiotlb_nr_tbl())
1824#endif
1825 sg_mark_end(sg);
1826 obj->pages = st;
1827
1828 if (i915_gem_object_needs_bit17_swizzle(obj))
1829 i915_gem_object_do_bit_17_swizzle(obj);
1830
1831 return 0;
1832
1833err_pages:
1834 sg_mark_end(sg);
1835 for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
1836 page_cache_release(sg_page_iter_page(&sg_iter));
1837 sg_free_table(st);
1838 kfree(st);
1839 return PTR_ERR(page);
1840}
1841
1842
1843
1844
1845
1846
1847
1848
1849int
1850i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
1851{
1852 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1853 const struct drm_i915_gem_object_ops *ops = obj->ops;
1854 int ret;
1855
1856 if (obj->pages)
1857 return 0;
1858
1859 if (obj->madv != I915_MADV_WILLNEED) {
1860 DRM_ERROR("Attempting to obtain a purgeable object\n");
1861 return -EINVAL;
1862 }
1863
1864 BUG_ON(obj->pages_pin_count);
1865
1866 ret = ops->get_pages(obj);
1867 if (ret)
1868 return ret;
1869
1870 list_add_tail(&obj->gtt_list, &dev_priv->mm.unbound_list);
1871 return 0;
1872}
1873
1874void
1875i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
1876 struct intel_ring_buffer *ring)
1877{
1878 struct drm_device *dev = obj->base.dev;
1879 struct drm_i915_private *dev_priv = dev->dev_private;
1880 u32 seqno = intel_ring_get_seqno(ring);
1881
1882 BUG_ON(ring == NULL);
1883 obj->ring = ring;
1884
1885
1886 if (!obj->active) {
1887 drm_gem_object_reference(&obj->base);
1888 obj->active = 1;
1889 }
1890
1891
1892 list_move_tail(&obj->mm_list, &dev_priv->mm.active_list);
1893 list_move_tail(&obj->ring_list, &ring->active_list);
1894
1895 obj->last_read_seqno = seqno;
1896
1897 if (obj->fenced_gpu_access) {
1898 obj->last_fenced_seqno = seqno;
1899
1900
1901 if (obj->fence_reg != I915_FENCE_REG_NONE) {
1902 struct drm_i915_fence_reg *reg;
1903
1904 reg = &dev_priv->fence_regs[obj->fence_reg];
1905 list_move_tail(®->lru_list,
1906 &dev_priv->mm.fence_list);
1907 }
1908 }
1909}
1910
1911static void
1912i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
1913{
1914 struct drm_device *dev = obj->base.dev;
1915 struct drm_i915_private *dev_priv = dev->dev_private;
1916
1917 BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
1918 BUG_ON(!obj->active);
1919
1920 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
1921
1922 list_del_init(&obj->ring_list);
1923 obj->ring = NULL;
1924
1925 obj->last_read_seqno = 0;
1926 obj->last_write_seqno = 0;
1927 obj->base.write_domain = 0;
1928
1929 obj->last_fenced_seqno = 0;
1930 obj->fenced_gpu_access = false;
1931
1932 obj->active = 0;
1933 drm_gem_object_unreference(&obj->base);
1934
1935 WARN_ON(i915_verify_lists(dev));
1936}
1937
1938static int
1939i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
1940{
1941 struct drm_i915_private *dev_priv = dev->dev_private;
1942 struct intel_ring_buffer *ring;
1943 int ret, i, j;
1944
1945
1946 for_each_ring(ring, dev_priv, i) {
1947 ret = intel_ring_idle(ring);
1948 if (ret)
1949 return ret;
1950 }
1951 i915_gem_retire_requests(dev);
1952
1953
1954 for_each_ring(ring, dev_priv, i) {
1955 intel_ring_init_seqno(ring, seqno);
1956
1957 for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++)
1958 ring->sync_seqno[j] = 0;
1959 }
1960
1961 return 0;
1962}
1963
1964int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
1965{
1966 struct drm_i915_private *dev_priv = dev->dev_private;
1967 int ret;
1968
1969 if (seqno == 0)
1970 return -EINVAL;
1971
1972
1973
1974
1975 ret = i915_gem_init_seqno(dev, seqno - 1);
1976 if (ret)
1977 return ret;
1978
1979
1980
1981
1982 dev_priv->next_seqno = seqno;
1983 dev_priv->last_seqno = seqno - 1;
1984 if (dev_priv->last_seqno == 0)
1985 dev_priv->last_seqno--;
1986
1987 return 0;
1988}
1989
1990int
1991i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
1992{
1993 struct drm_i915_private *dev_priv = dev->dev_private;
1994
1995
1996 if (dev_priv->next_seqno == 0) {
1997 int ret = i915_gem_init_seqno(dev, 0);
1998 if (ret)
1999 return ret;
2000
2001 dev_priv->next_seqno = 1;
2002 }
2003
2004 *seqno = dev_priv->last_seqno = dev_priv->next_seqno++;
2005 return 0;
2006}
2007
2008int
2009i915_add_request(struct intel_ring_buffer *ring,
2010 struct drm_file *file,
2011 u32 *out_seqno)
2012{
2013 drm_i915_private_t *dev_priv = ring->dev->dev_private;
2014 struct drm_i915_gem_request *request;
2015 u32 request_ring_position;
2016 int was_empty;
2017 int ret;
2018
2019
2020
2021
2022
2023
2024
2025
2026 ret = intel_ring_flush_all_caches(ring);
2027 if (ret)
2028 return ret;
2029
2030 request = kmalloc(sizeof(*request), GFP_KERNEL);
2031 if (request == NULL)
2032 return -ENOMEM;
2033
2034
2035
2036
2037
2038
2039
2040 request_ring_position = intel_ring_get_tail(ring);
2041
2042 ret = ring->add_request(ring);
2043 if (ret) {
2044 kfree(request);
2045 return ret;
2046 }
2047
2048 request->seqno = intel_ring_get_seqno(ring);
2049 request->ring = ring;
2050 request->tail = request_ring_position;
2051 request->emitted_jiffies = jiffies;
2052 was_empty = list_empty(&ring->request_list);
2053 list_add_tail(&request->list, &ring->request_list);
2054 request->file_priv = NULL;
2055
2056 if (file) {
2057 struct drm_i915_file_private *file_priv = file->driver_priv;
2058
2059 spin_lock(&file_priv->mm.lock);
2060 request->file_priv = file_priv;
2061 list_add_tail(&request->client_list,
2062 &file_priv->mm.request_list);
2063 spin_unlock(&file_priv->mm.lock);
2064 }
2065
2066 trace_i915_gem_request_add(ring, request->seqno);
2067 ring->outstanding_lazy_request = 0;
2068
2069 if (!dev_priv->mm.suspended) {
2070 if (i915_enable_hangcheck) {
2071 mod_timer(&dev_priv->gpu_error.hangcheck_timer,
2072 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
2073 }
2074 if (was_empty) {
2075 queue_delayed_work(dev_priv->wq,
2076 &dev_priv->mm.retire_work,
2077 round_jiffies_up_relative(HZ));
2078 intel_mark_busy(dev_priv->dev);
2079 }
2080 }
2081
2082 if (out_seqno)
2083 *out_seqno = request->seqno;
2084 return 0;
2085}
2086
2087static inline void
2088i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
2089{
2090 struct drm_i915_file_private *file_priv = request->file_priv;
2091
2092 if (!file_priv)
2093 return;
2094
2095 spin_lock(&file_priv->mm.lock);
2096 if (request->file_priv) {
2097 list_del(&request->client_list);
2098 request->file_priv = NULL;
2099 }
2100 spin_unlock(&file_priv->mm.lock);
2101}
2102
2103static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
2104 struct intel_ring_buffer *ring)
2105{
2106 while (!list_empty(&ring->request_list)) {
2107 struct drm_i915_gem_request *request;
2108
2109 request = list_first_entry(&ring->request_list,
2110 struct drm_i915_gem_request,
2111 list);
2112
2113 list_del(&request->list);
2114 i915_gem_request_remove_from_client(request);
2115 kfree(request);
2116 }
2117
2118 while (!list_empty(&ring->active_list)) {
2119 struct drm_i915_gem_object *obj;
2120
2121 obj = list_first_entry(&ring->active_list,
2122 struct drm_i915_gem_object,
2123 ring_list);
2124
2125 i915_gem_object_move_to_inactive(obj);
2126 }
2127}
2128
2129void i915_gem_restore_fences(struct drm_device *dev)
2130{
2131 struct drm_i915_private *dev_priv = dev->dev_private;
2132 int i;
2133
2134 for (i = 0; i < dev_priv->num_fence_regs; i++) {
2135 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
2136 i915_gem_write_fence(dev, i, reg->obj);
2137 }
2138}
2139
2140void i915_gem_reset(struct drm_device *dev)
2141{
2142 struct drm_i915_private *dev_priv = dev->dev_private;
2143 struct drm_i915_gem_object *obj;
2144 struct intel_ring_buffer *ring;
2145 int i;
2146
2147 for_each_ring(ring, dev_priv, i)
2148 i915_gem_reset_ring_lists(dev_priv, ring);
2149
2150
2151
2152
2153 list_for_each_entry(obj,
2154 &dev_priv->mm.inactive_list,
2155 mm_list)
2156 {
2157 obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
2158 }
2159
2160 i915_gem_restore_fences(dev);
2161}
2162
2163
2164
2165
2166void
2167i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
2168{
2169 uint32_t seqno;
2170
2171 if (list_empty(&ring->request_list))
2172 return;
2173
2174 WARN_ON(i915_verify_lists(ring->dev));
2175
2176 seqno = ring->get_seqno(ring, true);
2177
2178 while (!list_empty(&ring->request_list)) {
2179 struct drm_i915_gem_request *request;
2180
2181 request = list_first_entry(&ring->request_list,
2182 struct drm_i915_gem_request,
2183 list);
2184
2185 if (!i915_seqno_passed(seqno, request->seqno))
2186 break;
2187
2188 trace_i915_gem_request_retire(ring, request->seqno);
2189
2190
2191
2192
2193
2194 ring->last_retired_head = request->tail;
2195
2196 list_del(&request->list);
2197 i915_gem_request_remove_from_client(request);
2198 kfree(request);
2199 }
2200
2201
2202
2203
2204 while (!list_empty(&ring->active_list)) {
2205 struct drm_i915_gem_object *obj;
2206
2207 obj = list_first_entry(&ring->active_list,
2208 struct drm_i915_gem_object,
2209 ring_list);
2210
2211 if (!i915_seqno_passed(seqno, obj->last_read_seqno))
2212 break;
2213
2214 i915_gem_object_move_to_inactive(obj);
2215 }
2216
2217 if (unlikely(ring->trace_irq_seqno &&
2218 i915_seqno_passed(seqno, ring->trace_irq_seqno))) {
2219 ring->irq_put(ring);
2220 ring->trace_irq_seqno = 0;
2221 }
2222
2223 WARN_ON(i915_verify_lists(ring->dev));
2224}
2225
2226void
2227i915_gem_retire_requests(struct drm_device *dev)
2228{
2229 drm_i915_private_t *dev_priv = dev->dev_private;
2230 struct intel_ring_buffer *ring;
2231 int i;
2232
2233 for_each_ring(ring, dev_priv, i)
2234 i915_gem_retire_requests_ring(ring);
2235}
2236
2237static void
2238i915_gem_retire_work_handler(struct work_struct *work)
2239{
2240 drm_i915_private_t *dev_priv;
2241 struct drm_device *dev;
2242 struct intel_ring_buffer *ring;
2243 bool idle;
2244 int i;
2245
2246 dev_priv = container_of(work, drm_i915_private_t,
2247 mm.retire_work.work);
2248 dev = dev_priv->dev;
2249
2250
2251 if (!mutex_trylock(&dev->struct_mutex)) {
2252 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
2253 round_jiffies_up_relative(HZ));
2254 return;
2255 }
2256
2257 i915_gem_retire_requests(dev);
2258
2259
2260
2261
2262 idle = true;
2263 for_each_ring(ring, dev_priv, i) {
2264 if (ring->gpu_caches_dirty)
2265 i915_add_request(ring, NULL, NULL);
2266
2267 idle &= list_empty(&ring->request_list);
2268 }
2269
2270 if (!dev_priv->mm.suspended && !idle)
2271 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
2272 round_jiffies_up_relative(HZ));
2273 if (idle)
2274 intel_mark_idle(dev);
2275
2276 mutex_unlock(&dev->struct_mutex);
2277}
2278
2279
2280
2281
2282
2283
2284static int
2285i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
2286{
2287 int ret;
2288
2289 if (obj->active) {
2290 ret = i915_gem_check_olr(obj->ring, obj->last_read_seqno);
2291 if (ret)
2292 return ret;
2293
2294 i915_gem_retire_requests_ring(obj->ring);
2295 }
2296
2297 return 0;
2298}
2299
2300
2301
2302
2303
2304
2305
2306
2307
2308
2309
2310
2311
2312
2313
2314
2315
2316
2317
2318
2319
2320
2321
2322int
2323i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2324{
2325 drm_i915_private_t *dev_priv = dev->dev_private;
2326 struct drm_i915_gem_wait *args = data;
2327 struct drm_i915_gem_object *obj;
2328 struct intel_ring_buffer *ring = NULL;
2329 struct timespec timeout_stack, *timeout = NULL;
2330 unsigned reset_counter;
2331 u32 seqno = 0;
2332 int ret = 0;
2333
2334 if (args->timeout_ns >= 0) {
2335 timeout_stack = ns_to_timespec(args->timeout_ns);
2336 timeout = &timeout_stack;
2337 }
2338
2339 ret = i915_mutex_lock_interruptible(dev);
2340 if (ret)
2341 return ret;
2342
2343 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->bo_handle));
2344 if (&obj->base == NULL) {
2345 mutex_unlock(&dev->struct_mutex);
2346 return -ENOENT;
2347 }
2348
2349
2350 ret = i915_gem_object_flush_active(obj);
2351 if (ret)
2352 goto out;
2353
2354 if (obj->active) {
2355 seqno = obj->last_read_seqno;
2356 ring = obj->ring;
2357 }
2358
2359 if (seqno == 0)
2360 goto out;
2361
2362
2363
2364
2365 if (!args->timeout_ns) {
2366 ret = -ETIME;
2367 goto out;
2368 }
2369
2370 drm_gem_object_unreference(&obj->base);
2371 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
2372 mutex_unlock(&dev->struct_mutex);
2373
2374 ret = __wait_seqno(ring, seqno, reset_counter, true, timeout);
2375 if (timeout)
2376 args->timeout_ns = timespec_to_ns(timeout);
2377 return ret;
2378
2379out:
2380 drm_gem_object_unreference(&obj->base);
2381 mutex_unlock(&dev->struct_mutex);
2382 return ret;
2383}
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394
2395
2396
2397int
2398i915_gem_object_sync(struct drm_i915_gem_object *obj,
2399 struct intel_ring_buffer *to)
2400{
2401 struct intel_ring_buffer *from = obj->ring;
2402 u32 seqno;
2403 int ret, idx;
2404
2405 if (from == NULL || to == from)
2406 return 0;
2407
2408 if (to == NULL || !i915_semaphore_is_enabled(obj->base.dev))
2409 return i915_gem_object_wait_rendering(obj, false);
2410
2411 idx = intel_ring_sync_index(from, to);
2412
2413 seqno = obj->last_read_seqno;
2414 if (seqno <= from->sync_seqno[idx])
2415 return 0;
2416
2417 ret = i915_gem_check_olr(obj->ring, seqno);
2418 if (ret)
2419 return ret;
2420
2421 ret = to->sync_to(to, from, seqno);
2422 if (!ret)
2423
2424
2425
2426
2427 from->sync_seqno[idx] = obj->last_read_seqno;
2428
2429 return ret;
2430}
2431
2432static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
2433{
2434 u32 old_write_domain, old_read_domains;
2435
2436
2437 i915_gem_release_mmap(obj);
2438
2439 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
2440 return;
2441
2442
2443 mb();
2444
2445 old_read_domains = obj->base.read_domains;
2446 old_write_domain = obj->base.write_domain;
2447
2448 obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT;
2449 obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT;
2450
2451 trace_i915_gem_object_change_domain(obj,
2452 old_read_domains,
2453 old_write_domain);
2454}
2455
2456
2457
2458
2459int
2460i915_gem_object_unbind(struct drm_i915_gem_object *obj)
2461{
2462 drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
2463 int ret;
2464
2465 if (obj->gtt_space == NULL)
2466 return 0;
2467
2468 if (obj->pin_count)
2469 return -EBUSY;
2470
2471 BUG_ON(obj->pages == NULL);
2472
2473 ret = i915_gem_object_finish_gpu(obj);
2474 if (ret)
2475 return ret;
2476
2477
2478
2479
2480
2481 i915_gem_object_finish_gtt(obj);
2482
2483
2484 ret = i915_gem_object_put_fence(obj);
2485 if (ret)
2486 return ret;
2487
2488 trace_i915_gem_object_unbind(obj);
2489
2490 if (obj->has_global_gtt_mapping)
2491 i915_gem_gtt_unbind_object(obj);
2492 if (obj->has_aliasing_ppgtt_mapping) {
2493 i915_ppgtt_unbind_object(dev_priv->mm.aliasing_ppgtt, obj);
2494 obj->has_aliasing_ppgtt_mapping = 0;
2495 }
2496 i915_gem_gtt_finish_object(obj);
2497
2498 list_del(&obj->mm_list);
2499 list_move_tail(&obj->gtt_list, &dev_priv->mm.unbound_list);
2500
2501 obj->map_and_fenceable = true;
2502
2503 drm_mm_put_block(obj->gtt_space);
2504 obj->gtt_space = NULL;
2505 obj->gtt_offset = 0;
2506
2507 return 0;
2508}
2509
2510int i915_gpu_idle(struct drm_device *dev)
2511{
2512 drm_i915_private_t *dev_priv = dev->dev_private;
2513 struct intel_ring_buffer *ring;
2514 int ret, i;
2515
2516
2517 for_each_ring(ring, dev_priv, i) {
2518 ret = i915_switch_context(ring, NULL, DEFAULT_CONTEXT_ID);
2519 if (ret)
2520 return ret;
2521
2522 ret = intel_ring_idle(ring);
2523 if (ret)
2524 return ret;
2525 }
2526
2527 return 0;
2528}
2529
2530static void i965_write_fence_reg(struct drm_device *dev, int reg,
2531 struct drm_i915_gem_object *obj)
2532{
2533 drm_i915_private_t *dev_priv = dev->dev_private;
2534 int fence_reg;
2535 int fence_pitch_shift;
2536 uint64_t val;
2537
2538 if (INTEL_INFO(dev)->gen >= 6) {
2539 fence_reg = FENCE_REG_SANDYBRIDGE_0;
2540 fence_pitch_shift = SANDYBRIDGE_FENCE_PITCH_SHIFT;
2541 } else {
2542 fence_reg = FENCE_REG_965_0;
2543 fence_pitch_shift = I965_FENCE_PITCH_SHIFT;
2544 }
2545
2546 if (obj) {
2547 u32 size = obj->gtt_space->size;
2548
2549 val = (uint64_t)((obj->gtt_offset + size - 4096) &
2550 0xfffff000) << 32;
2551 val |= obj->gtt_offset & 0xfffff000;
2552 val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift;
2553 if (obj->tiling_mode == I915_TILING_Y)
2554 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2555 val |= I965_FENCE_REG_VALID;
2556 } else
2557 val = 0;
2558
2559 fence_reg += reg * 8;
2560 I915_WRITE64(fence_reg, val);
2561 POSTING_READ(fence_reg);
2562}
2563
2564static void i915_write_fence_reg(struct drm_device *dev, int reg,
2565 struct drm_i915_gem_object *obj)
2566{
2567 drm_i915_private_t *dev_priv = dev->dev_private;
2568 u32 val;
2569
2570 if (obj) {
2571 u32 size = obj->gtt_space->size;
2572 int pitch_val;
2573 int tile_width;
2574
2575 WARN((obj->gtt_offset & ~I915_FENCE_START_MASK) ||
2576 (size & -size) != size ||
2577 (obj->gtt_offset & (size - 1)),
2578 "object 0x%08x [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
2579 obj->gtt_offset, obj->map_and_fenceable, size);
2580
2581 if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
2582 tile_width = 128;
2583 else
2584 tile_width = 512;
2585
2586
2587 pitch_val = obj->stride / tile_width;
2588 pitch_val = ffs(pitch_val) - 1;
2589
2590 val = obj->gtt_offset;
2591 if (obj->tiling_mode == I915_TILING_Y)
2592 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2593 val |= I915_FENCE_SIZE_BITS(size);
2594 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2595 val |= I830_FENCE_REG_VALID;
2596 } else
2597 val = 0;
2598
2599 if (reg < 8)
2600 reg = FENCE_REG_830_0 + reg * 4;
2601 else
2602 reg = FENCE_REG_945_8 + (reg - 8) * 4;
2603
2604 I915_WRITE(reg, val);
2605 POSTING_READ(reg);
2606}
2607
2608static void i830_write_fence_reg(struct drm_device *dev, int reg,
2609 struct drm_i915_gem_object *obj)
2610{
2611 drm_i915_private_t *dev_priv = dev->dev_private;
2612 uint32_t val;
2613
2614 if (obj) {
2615 u32 size = obj->gtt_space->size;
2616 uint32_t pitch_val;
2617
2618 WARN((obj->gtt_offset & ~I830_FENCE_START_MASK) ||
2619 (size & -size) != size ||
2620 (obj->gtt_offset & (size - 1)),
2621 "object 0x%08x not 512K or pot-size 0x%08x aligned\n",
2622 obj->gtt_offset, size);
2623
2624 pitch_val = obj->stride / 128;
2625 pitch_val = ffs(pitch_val) - 1;
2626
2627 val = obj->gtt_offset;
2628 if (obj->tiling_mode == I915_TILING_Y)
2629 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2630 val |= I830_FENCE_SIZE_BITS(size);
2631 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2632 val |= I830_FENCE_REG_VALID;
2633 } else
2634 val = 0;
2635
2636 I915_WRITE(FENCE_REG_830_0 + reg * 4, val);
2637 POSTING_READ(FENCE_REG_830_0 + reg * 4);
2638}
2639
2640inline static bool i915_gem_object_needs_mb(struct drm_i915_gem_object *obj)
2641{
2642 return obj && obj->base.read_domains & I915_GEM_DOMAIN_GTT;
2643}
2644
2645static void i915_gem_write_fence(struct drm_device *dev, int reg,
2646 struct drm_i915_gem_object *obj)
2647{
2648 struct drm_i915_private *dev_priv = dev->dev_private;
2649
2650
2651
2652
2653 if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj))
2654 mb();
2655
2656 switch (INTEL_INFO(dev)->gen) {
2657 case 7:
2658 case 6:
2659 case 5:
2660 case 4: i965_write_fence_reg(dev, reg, obj); break;
2661 case 3: i915_write_fence_reg(dev, reg, obj); break;
2662 case 2: i830_write_fence_reg(dev, reg, obj); break;
2663 default: BUG();
2664 }
2665
2666
2667
2668
2669 if (i915_gem_object_needs_mb(obj))
2670 mb();
2671}
2672
2673static inline int fence_number(struct drm_i915_private *dev_priv,
2674 struct drm_i915_fence_reg *fence)
2675{
2676 return fence - dev_priv->fence_regs;
2677}
2678
2679static void i915_gem_write_fence__ipi(void *data)
2680{
2681 wbinvd();
2682}
2683
2684static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
2685 struct drm_i915_fence_reg *fence,
2686 bool enable)
2687{
2688 struct drm_device *dev = obj->base.dev;
2689 struct drm_i915_private *dev_priv = dev->dev_private;
2690 int fence_reg = fence_number(dev_priv, fence);
2691
2692
2693
2694
2695
2696
2697
2698
2699
2700
2701
2702 if (HAS_LLC(obj->base.dev))
2703 on_each_cpu(i915_gem_write_fence__ipi, NULL, 1);
2704 i915_gem_write_fence(dev, fence_reg, enable ? obj : NULL);
2705
2706 if (enable) {
2707 obj->fence_reg = fence_reg;
2708 fence->obj = obj;
2709 list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
2710 } else {
2711 obj->fence_reg = I915_FENCE_REG_NONE;
2712 fence->obj = NULL;
2713 list_del_init(&fence->lru_list);
2714 }
2715}
2716
2717static int
2718i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
2719{
2720 if (obj->last_fenced_seqno) {
2721 int ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno);
2722 if (ret)
2723 return ret;
2724
2725 obj->last_fenced_seqno = 0;
2726 }
2727
2728 obj->fenced_gpu_access = false;
2729 return 0;
2730}
2731
2732int
2733i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
2734{
2735 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2736 struct drm_i915_fence_reg *fence;
2737 int ret;
2738
2739 ret = i915_gem_object_wait_fence(obj);
2740 if (ret)
2741 return ret;
2742
2743 if (obj->fence_reg == I915_FENCE_REG_NONE)
2744 return 0;
2745
2746 fence = &dev_priv->fence_regs[obj->fence_reg];
2747
2748 i915_gem_object_fence_lost(obj);
2749 i915_gem_object_update_fence(obj, fence, false);
2750
2751 return 0;
2752}
2753
2754static struct drm_i915_fence_reg *
2755i915_find_fence_reg(struct drm_device *dev)
2756{
2757 struct drm_i915_private *dev_priv = dev->dev_private;
2758 struct drm_i915_fence_reg *reg, *avail;
2759 int i;
2760
2761
2762 avail = NULL;
2763 for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
2764 reg = &dev_priv->fence_regs[i];
2765 if (!reg->obj)
2766 return reg;
2767
2768 if (!reg->pin_count)
2769 avail = reg;
2770 }
2771
2772 if (avail == NULL)
2773 return NULL;
2774
2775
2776 list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
2777 if (reg->pin_count)
2778 continue;
2779
2780 return reg;
2781 }
2782
2783 return NULL;
2784}
2785
2786
2787
2788
2789
2790
2791
2792
2793
2794
2795
2796
2797
2798
2799
2800int
2801i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
2802{
2803 struct drm_device *dev = obj->base.dev;
2804 struct drm_i915_private *dev_priv = dev->dev_private;
2805 bool enable = obj->tiling_mode != I915_TILING_NONE;
2806 struct drm_i915_fence_reg *reg;
2807 int ret;
2808
2809
2810
2811
2812 if (obj->fence_dirty) {
2813 ret = i915_gem_object_wait_fence(obj);
2814 if (ret)
2815 return ret;
2816 }
2817
2818
2819 if (obj->fence_reg != I915_FENCE_REG_NONE) {
2820 reg = &dev_priv->fence_regs[obj->fence_reg];
2821 if (!obj->fence_dirty) {
2822 list_move_tail(®->lru_list,
2823 &dev_priv->mm.fence_list);
2824 return 0;
2825 }
2826 } else if (enable) {
2827 reg = i915_find_fence_reg(dev);
2828 if (reg == NULL)
2829 return -EDEADLK;
2830
2831 if (reg->obj) {
2832 struct drm_i915_gem_object *old = reg->obj;
2833
2834 ret = i915_gem_object_wait_fence(old);
2835 if (ret)
2836 return ret;
2837
2838 i915_gem_object_fence_lost(old);
2839 }
2840 } else
2841 return 0;
2842
2843 i915_gem_object_update_fence(obj, reg, enable);
2844 obj->fence_dirty = false;
2845
2846 return 0;
2847}
2848
2849static bool i915_gem_valid_gtt_space(struct drm_device *dev,
2850 struct drm_mm_node *gtt_space,
2851 unsigned long cache_level)
2852{
2853 struct drm_mm_node *other;
2854
2855
2856
2857
2858
2859 if (HAS_LLC(dev))
2860 return true;
2861
2862 if (gtt_space == NULL)
2863 return true;
2864
2865 if (list_empty(>t_space->node_list))
2866 return true;
2867
2868 other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list);
2869 if (other->allocated && !other->hole_follows && other->color != cache_level)
2870 return false;
2871
2872 other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list);
2873 if (other->allocated && !gtt_space->hole_follows && other->color != cache_level)
2874 return false;
2875
2876 return true;
2877}
2878
2879static void i915_gem_verify_gtt(struct drm_device *dev)
2880{
2881#if WATCH_GTT
2882 struct drm_i915_private *dev_priv = dev->dev_private;
2883 struct drm_i915_gem_object *obj;
2884 int err = 0;
2885
2886 list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
2887 if (obj->gtt_space == NULL) {
2888 printk(KERN_ERR "object found on GTT list with no space reserved\n");
2889 err++;
2890 continue;
2891 }
2892
2893 if (obj->cache_level != obj->gtt_space->color) {
2894 printk(KERN_ERR "object reserved space [%08lx, %08lx] with wrong color, cache_level=%x, color=%lx\n",
2895 obj->gtt_space->start,
2896 obj->gtt_space->start + obj->gtt_space->size,
2897 obj->cache_level,
2898 obj->gtt_space->color);
2899 err++;
2900 continue;
2901 }
2902
2903 if (!i915_gem_valid_gtt_space(dev,
2904 obj->gtt_space,
2905 obj->cache_level)) {
2906 printk(KERN_ERR "invalid GTT space found at [%08lx, %08lx] - color=%x\n",
2907 obj->gtt_space->start,
2908 obj->gtt_space->start + obj->gtt_space->size,
2909 obj->cache_level);
2910 err++;
2911 continue;
2912 }
2913 }
2914
2915 WARN_ON(err);
2916#endif
2917}
2918
2919
2920
2921
2922static int
2923i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
2924 unsigned alignment,
2925 bool map_and_fenceable,
2926 bool nonblocking)
2927{
2928 struct drm_device *dev = obj->base.dev;
2929 drm_i915_private_t *dev_priv = dev->dev_private;
2930 struct drm_mm_node *node;
2931 u32 size, fence_size, fence_alignment, unfenced_alignment;
2932 bool mappable, fenceable;
2933 int ret;
2934
2935 fence_size = i915_gem_get_gtt_size(dev,
2936 obj->base.size,
2937 obj->tiling_mode);
2938 fence_alignment = i915_gem_get_gtt_alignment(dev,
2939 obj->base.size,
2940 obj->tiling_mode, true);
2941 unfenced_alignment =
2942 i915_gem_get_gtt_alignment(dev,
2943 obj->base.size,
2944 obj->tiling_mode, false);
2945
2946 if (alignment == 0)
2947 alignment = map_and_fenceable ? fence_alignment :
2948 unfenced_alignment;
2949 if (map_and_fenceable && alignment & (fence_alignment - 1)) {
2950 DRM_ERROR("Invalid object alignment requested %u\n", alignment);
2951 return -EINVAL;
2952 }
2953
2954 size = map_and_fenceable ? fence_size : obj->base.size;
2955
2956
2957
2958
2959 if (obj->base.size >
2960 (map_and_fenceable ? dev_priv->gtt.mappable_end : dev_priv->gtt.total)) {
2961 DRM_ERROR("Attempting to bind an object larger than the aperture\n");
2962 return -E2BIG;
2963 }
2964
2965 ret = i915_gem_object_get_pages(obj);
2966 if (ret)
2967 return ret;
2968
2969 i915_gem_object_pin_pages(obj);
2970
2971 node = kzalloc(sizeof(*node), GFP_KERNEL);
2972 if (node == NULL) {
2973 i915_gem_object_unpin_pages(obj);
2974 return -ENOMEM;
2975 }
2976
2977 search_free:
2978 if (map_and_fenceable)
2979 ret = drm_mm_insert_node_in_range_generic(&dev_priv->mm.gtt_space, node,
2980 size, alignment, obj->cache_level,
2981 0, dev_priv->gtt.mappable_end);
2982 else
2983 ret = drm_mm_insert_node_generic(&dev_priv->mm.gtt_space, node,
2984 size, alignment, obj->cache_level);
2985 if (ret) {
2986 ret = i915_gem_evict_something(dev, size, alignment,
2987 obj->cache_level,
2988 map_and_fenceable,
2989 nonblocking);
2990 if (ret == 0)
2991 goto search_free;
2992
2993 i915_gem_object_unpin_pages(obj);
2994 kfree(node);
2995 return ret;
2996 }
2997 if (WARN_ON(!i915_gem_valid_gtt_space(dev, node, obj->cache_level))) {
2998 i915_gem_object_unpin_pages(obj);
2999 drm_mm_put_block(node);
3000 return -EINVAL;
3001 }
3002
3003 ret = i915_gem_gtt_prepare_object(obj);
3004 if (ret) {
3005 i915_gem_object_unpin_pages(obj);
3006 drm_mm_put_block(node);
3007 return ret;
3008 }
3009
3010 list_move_tail(&obj->gtt_list, &dev_priv->mm.bound_list);
3011 list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
3012
3013 obj->gtt_space = node;
3014 obj->gtt_offset = node->start;
3015
3016 fenceable =
3017 node->size == fence_size &&
3018 (node->start & (fence_alignment - 1)) == 0;
3019
3020 mappable =
3021 obj->gtt_offset + obj->base.size <= dev_priv->gtt.mappable_end;
3022
3023 obj->map_and_fenceable = mappable && fenceable;
3024
3025 i915_gem_object_unpin_pages(obj);
3026 trace_i915_gem_object_bind(obj, map_and_fenceable);
3027 i915_gem_verify_gtt(dev);
3028 return 0;
3029}
3030
3031void
3032i915_gem_clflush_object(struct drm_i915_gem_object *obj)
3033{
3034
3035
3036
3037
3038 if (obj->pages == NULL)
3039 return;
3040
3041
3042
3043
3044
3045 if (obj->stolen)
3046 return;
3047
3048
3049
3050
3051
3052
3053
3054
3055
3056 if (obj->cache_level != I915_CACHE_NONE)
3057 return;
3058
3059 trace_i915_gem_object_clflush(obj);
3060
3061 drm_clflush_sg(obj->pages);
3062}
3063
3064
3065static void
3066i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
3067{
3068 uint32_t old_write_domain;
3069
3070 if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
3071 return;
3072
3073
3074
3075
3076
3077
3078
3079
3080
3081 wmb();
3082
3083 old_write_domain = obj->base.write_domain;
3084 obj->base.write_domain = 0;
3085
3086 trace_i915_gem_object_change_domain(obj,
3087 obj->base.read_domains,
3088 old_write_domain);
3089}
3090
3091
3092static void
3093i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
3094{
3095 uint32_t old_write_domain;
3096
3097 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
3098 return;
3099
3100 i915_gem_clflush_object(obj);
3101 i915_gem_chipset_flush(obj->base.dev);
3102 old_write_domain = obj->base.write_domain;
3103 obj->base.write_domain = 0;
3104
3105 trace_i915_gem_object_change_domain(obj,
3106 obj->base.read_domains,
3107 old_write_domain);
3108}
3109
3110
3111
3112
3113
3114
3115
3116int
3117i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
3118{
3119 drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
3120 uint32_t old_write_domain, old_read_domains;
3121 int ret;
3122
3123
3124 if (obj->gtt_space == NULL)
3125 return -EINVAL;
3126
3127 if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
3128 return 0;
3129
3130 ret = i915_gem_object_wait_rendering(obj, !write);
3131 if (ret)
3132 return ret;
3133
3134 i915_gem_object_flush_cpu_write_domain(obj);
3135
3136
3137
3138
3139
3140 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
3141 mb();
3142
3143 old_write_domain = obj->base.write_domain;
3144 old_read_domains = obj->base.read_domains;
3145
3146
3147
3148
3149 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
3150 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3151 if (write) {
3152 obj->base.read_domains = I915_GEM_DOMAIN_GTT;
3153 obj->base.write_domain = I915_GEM_DOMAIN_GTT;
3154 obj->dirty = 1;
3155 }
3156
3157 trace_i915_gem_object_change_domain(obj,
3158 old_read_domains,
3159 old_write_domain);
3160
3161
3162 if (i915_gem_object_is_inactive(obj))
3163 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
3164
3165 return 0;
3166}
3167
3168int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3169 enum i915_cache_level cache_level)
3170{
3171 struct drm_device *dev = obj->base.dev;
3172 drm_i915_private_t *dev_priv = dev->dev_private;
3173 int ret;
3174
3175 if (obj->cache_level == cache_level)
3176 return 0;
3177
3178 if (obj->pin_count) {
3179 DRM_DEBUG("can not change the cache level of pinned objects\n");
3180 return -EBUSY;
3181 }
3182
3183 if (!i915_gem_valid_gtt_space(dev, obj->gtt_space, cache_level)) {
3184 ret = i915_gem_object_unbind(obj);
3185 if (ret)
3186 return ret;
3187 }
3188
3189 if (obj->gtt_space) {
3190 ret = i915_gem_object_finish_gpu(obj);
3191 if (ret)
3192 return ret;
3193
3194 i915_gem_object_finish_gtt(obj);
3195
3196
3197
3198
3199
3200 if (INTEL_INFO(dev)->gen < 6) {
3201 ret = i915_gem_object_put_fence(obj);
3202 if (ret)
3203 return ret;
3204 }
3205
3206 if (obj->has_global_gtt_mapping)
3207 i915_gem_gtt_bind_object(obj, cache_level);
3208 if (obj->has_aliasing_ppgtt_mapping)
3209 i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
3210 obj, cache_level);
3211
3212 obj->gtt_space->color = cache_level;
3213 }
3214
3215 if (cache_level == I915_CACHE_NONE) {
3216 u32 old_read_domains, old_write_domain;
3217
3218
3219
3220
3221
3222
3223
3224 WARN_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU);
3225 WARN_ON(obj->base.read_domains & ~I915_GEM_DOMAIN_CPU);
3226
3227 old_read_domains = obj->base.read_domains;
3228 old_write_domain = obj->base.write_domain;
3229
3230 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3231 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3232
3233 trace_i915_gem_object_change_domain(obj,
3234 old_read_domains,
3235 old_write_domain);
3236 }
3237
3238 obj->cache_level = cache_level;
3239 i915_gem_verify_gtt(dev);
3240 return 0;
3241}
3242
3243int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
3244 struct drm_file *file)
3245{
3246 struct drm_i915_gem_caching *args = data;
3247 struct drm_i915_gem_object *obj;
3248 int ret;
3249
3250 ret = i915_mutex_lock_interruptible(dev);
3251 if (ret)
3252 return ret;
3253
3254 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3255 if (&obj->base == NULL) {
3256 ret = -ENOENT;
3257 goto unlock;
3258 }
3259
3260 args->caching = obj->cache_level != I915_CACHE_NONE;
3261
3262 drm_gem_object_unreference(&obj->base);
3263unlock:
3264 mutex_unlock(&dev->struct_mutex);
3265 return ret;
3266}
3267
3268int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
3269 struct drm_file *file)
3270{
3271 struct drm_i915_gem_caching *args = data;
3272 struct drm_i915_gem_object *obj;
3273 enum i915_cache_level level;
3274 int ret;
3275
3276 switch (args->caching) {
3277 case I915_CACHING_NONE:
3278 level = I915_CACHE_NONE;
3279 break;
3280 case I915_CACHING_CACHED:
3281 level = I915_CACHE_LLC;
3282 break;
3283 default:
3284 return -EINVAL;
3285 }
3286
3287 ret = i915_mutex_lock_interruptible(dev);
3288 if (ret)
3289 return ret;
3290
3291 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3292 if (&obj->base == NULL) {
3293 ret = -ENOENT;
3294 goto unlock;
3295 }
3296
3297 ret = i915_gem_object_set_cache_level(obj, level);
3298
3299 drm_gem_object_unreference(&obj->base);
3300unlock:
3301 mutex_unlock(&dev->struct_mutex);
3302 return ret;
3303}
3304
3305
3306
3307
3308
3309
3310int
3311i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3312 u32 alignment,
3313 struct intel_ring_buffer *pipelined)
3314{
3315 u32 old_read_domains, old_write_domain;
3316 int ret;
3317
3318 if (pipelined != obj->ring) {
3319 ret = i915_gem_object_sync(obj, pipelined);
3320 if (ret)
3321 return ret;
3322 }
3323
3324
3325
3326
3327
3328
3329
3330
3331
3332
3333 ret = i915_gem_object_set_cache_level(obj, I915_CACHE_NONE);
3334 if (ret)
3335 return ret;
3336
3337
3338
3339
3340
3341 ret = i915_gem_object_pin(obj, alignment, true, false);
3342 if (ret)
3343 return ret;
3344
3345 i915_gem_object_flush_cpu_write_domain(obj);
3346
3347 old_write_domain = obj->base.write_domain;
3348 old_read_domains = obj->base.read_domains;
3349
3350
3351
3352
3353 obj->base.write_domain = 0;
3354 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3355
3356 trace_i915_gem_object_change_domain(obj,
3357 old_read_domains,
3358 old_write_domain);
3359
3360 return 0;
3361}
3362
3363int
3364i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
3365{
3366 int ret;
3367
3368 if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0)
3369 return 0;
3370
3371 ret = i915_gem_object_wait_rendering(obj, false);
3372 if (ret)
3373 return ret;
3374
3375
3376 obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
3377 return 0;
3378}
3379
3380
3381
3382
3383
3384
3385
3386int
3387i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
3388{
3389 uint32_t old_write_domain, old_read_domains;
3390 int ret;
3391
3392 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
3393 return 0;
3394
3395 ret = i915_gem_object_wait_rendering(obj, !write);
3396 if (ret)
3397 return ret;
3398
3399 i915_gem_object_flush_gtt_write_domain(obj);
3400
3401 old_write_domain = obj->base.write_domain;
3402 old_read_domains = obj->base.read_domains;
3403
3404
3405 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
3406 i915_gem_clflush_object(obj);
3407
3408 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
3409 }
3410
3411
3412
3413
3414 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
3415
3416
3417
3418
3419 if (write) {
3420 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3421 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3422 }
3423
3424 trace_i915_gem_object_change_domain(obj,
3425 old_read_domains,
3426 old_write_domain);
3427
3428 return 0;
3429}
3430
3431
3432
3433
3434
3435
3436
3437
3438
3439
3440
3441static int
3442i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
3443{
3444 struct drm_i915_private *dev_priv = dev->dev_private;
3445 struct drm_i915_file_private *file_priv = file->driver_priv;
3446 unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
3447 struct drm_i915_gem_request *request;
3448 struct intel_ring_buffer *ring = NULL;
3449 unsigned reset_counter;
3450 u32 seqno = 0;
3451 int ret;
3452
3453 ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
3454 if (ret)
3455 return ret;
3456
3457 ret = i915_gem_check_wedge(&dev_priv->gpu_error, false);
3458 if (ret)
3459 return ret;
3460
3461 spin_lock(&file_priv->mm.lock);
3462 list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
3463 if (time_after_eq(request->emitted_jiffies, recent_enough))
3464 break;
3465
3466 ring = request->ring;
3467 seqno = request->seqno;
3468 }
3469 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
3470 spin_unlock(&file_priv->mm.lock);
3471
3472 if (seqno == 0)
3473 return 0;
3474
3475 ret = __wait_seqno(ring, seqno, reset_counter, true, NULL);
3476 if (ret == 0)
3477 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
3478
3479 return ret;
3480}
3481
3482int
3483i915_gem_object_pin(struct drm_i915_gem_object *obj,
3484 uint32_t alignment,
3485 bool map_and_fenceable,
3486 bool nonblocking)
3487{
3488 int ret;
3489
3490 if (WARN_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
3491 return -EBUSY;
3492
3493 if (obj->gtt_space != NULL) {
3494 if ((alignment && obj->gtt_offset & (alignment - 1)) ||
3495 (map_and_fenceable && !obj->map_and_fenceable)) {
3496 WARN(obj->pin_count,
3497 "bo is already pinned with incorrect alignment:"
3498 " offset=%x, req.alignment=%x, req.map_and_fenceable=%d,"
3499 " obj->map_and_fenceable=%d\n",
3500 obj->gtt_offset, alignment,
3501 map_and_fenceable,
3502 obj->map_and_fenceable);
3503 ret = i915_gem_object_unbind(obj);
3504 if (ret)
3505 return ret;
3506 }
3507 }
3508
3509 if (obj->gtt_space == NULL) {
3510 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
3511
3512 ret = i915_gem_object_bind_to_gtt(obj, alignment,
3513 map_and_fenceable,
3514 nonblocking);
3515 if (ret)
3516 return ret;
3517
3518 if (!dev_priv->mm.aliasing_ppgtt)
3519 i915_gem_gtt_bind_object(obj, obj->cache_level);
3520 }
3521
3522 if (!obj->has_global_gtt_mapping && map_and_fenceable)
3523 i915_gem_gtt_bind_object(obj, obj->cache_level);
3524
3525 obj->pin_count++;
3526 obj->pin_mappable |= map_and_fenceable;
3527
3528 return 0;
3529}
3530
3531void
3532i915_gem_object_unpin(struct drm_i915_gem_object *obj)
3533{
3534 BUG_ON(obj->pin_count == 0);
3535 BUG_ON(obj->gtt_space == NULL);
3536
3537 if (--obj->pin_count == 0)
3538 obj->pin_mappable = false;
3539}
3540
3541int
3542i915_gem_pin_ioctl(struct drm_device *dev, void *data,
3543 struct drm_file *file)
3544{
3545 struct drm_i915_gem_pin *args = data;
3546 struct drm_i915_gem_object *obj;
3547 int ret;
3548
3549 ret = i915_mutex_lock_interruptible(dev);
3550 if (ret)
3551 return ret;
3552
3553 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3554 if (&obj->base == NULL) {
3555 ret = -ENOENT;
3556 goto unlock;
3557 }
3558
3559 if (obj->madv != I915_MADV_WILLNEED) {
3560 DRM_ERROR("Attempting to pin a purgeable buffer\n");
3561 ret = -EINVAL;
3562 goto out;
3563 }
3564
3565 if (obj->pin_filp != NULL && obj->pin_filp != file) {
3566 DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
3567 args->handle);
3568 ret = -EINVAL;
3569 goto out;
3570 }
3571
3572 if (obj->user_pin_count == 0) {
3573 ret = i915_gem_object_pin(obj, args->alignment, true, false);
3574 if (ret)
3575 goto out;
3576 }
3577
3578 obj->user_pin_count++;
3579 obj->pin_filp = file;
3580
3581
3582
3583
3584 i915_gem_object_flush_cpu_write_domain(obj);
3585 args->offset = obj->gtt_offset;
3586out:
3587 drm_gem_object_unreference(&obj->base);
3588unlock:
3589 mutex_unlock(&dev->struct_mutex);
3590 return ret;
3591}
3592
3593int
3594i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
3595 struct drm_file *file)
3596{
3597 struct drm_i915_gem_pin *args = data;
3598 struct drm_i915_gem_object *obj;
3599 int ret;
3600
3601 ret = i915_mutex_lock_interruptible(dev);
3602 if (ret)
3603 return ret;
3604
3605 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3606 if (&obj->base == NULL) {
3607 ret = -ENOENT;
3608 goto unlock;
3609 }
3610
3611 if (obj->pin_filp != file) {
3612 DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
3613 args->handle);
3614 ret = -EINVAL;
3615 goto out;
3616 }
3617 obj->user_pin_count--;
3618 if (obj->user_pin_count == 0) {
3619 obj->pin_filp = NULL;
3620 i915_gem_object_unpin(obj);
3621 }
3622
3623out:
3624 drm_gem_object_unreference(&obj->base);
3625unlock:
3626 mutex_unlock(&dev->struct_mutex);
3627 return ret;
3628}
3629
3630int
3631i915_gem_busy_ioctl(struct drm_device *dev, void *data,
3632 struct drm_file *file)
3633{
3634 struct drm_i915_gem_busy *args = data;
3635 struct drm_i915_gem_object *obj;
3636 int ret;
3637
3638 ret = i915_mutex_lock_interruptible(dev);
3639 if (ret)
3640 return ret;
3641
3642 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3643 if (&obj->base == NULL) {
3644 ret = -ENOENT;
3645 goto unlock;
3646 }
3647
3648
3649
3650
3651
3652
3653 ret = i915_gem_object_flush_active(obj);
3654
3655 args->busy = obj->active;
3656 if (obj->ring) {
3657 BUILD_BUG_ON(I915_NUM_RINGS > 16);
3658 args->busy |= intel_ring_flag(obj->ring) << 16;
3659 }
3660
3661 drm_gem_object_unreference(&obj->base);
3662unlock:
3663 mutex_unlock(&dev->struct_mutex);
3664 return ret;
3665}
3666
3667int
3668i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
3669 struct drm_file *file_priv)
3670{
3671 return i915_gem_ring_throttle(dev, file_priv);
3672}
3673
3674int
3675i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
3676 struct drm_file *file_priv)
3677{
3678 struct drm_i915_gem_madvise *args = data;
3679 struct drm_i915_gem_object *obj;
3680 int ret;
3681
3682 switch (args->madv) {
3683 case I915_MADV_DONTNEED:
3684 case I915_MADV_WILLNEED:
3685 break;
3686 default:
3687 return -EINVAL;
3688 }
3689
3690 ret = i915_mutex_lock_interruptible(dev);
3691 if (ret)
3692 return ret;
3693
3694 obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
3695 if (&obj->base == NULL) {
3696 ret = -ENOENT;
3697 goto unlock;
3698 }
3699
3700 if (obj->pin_count) {
3701 ret = -EINVAL;
3702 goto out;
3703 }
3704
3705 if (obj->madv != __I915_MADV_PURGED)
3706 obj->madv = args->madv;
3707
3708
3709 if (i915_gem_object_is_purgeable(obj) && obj->pages == NULL)
3710 i915_gem_object_truncate(obj);
3711
3712 args->retained = obj->madv != __I915_MADV_PURGED;
3713
3714out:
3715 drm_gem_object_unreference(&obj->base);
3716unlock:
3717 mutex_unlock(&dev->struct_mutex);
3718 return ret;
3719}
3720
3721void i915_gem_object_init(struct drm_i915_gem_object *obj,
3722 const struct drm_i915_gem_object_ops *ops)
3723{
3724 INIT_LIST_HEAD(&obj->mm_list);
3725 INIT_LIST_HEAD(&obj->gtt_list);
3726 INIT_LIST_HEAD(&obj->ring_list);
3727 INIT_LIST_HEAD(&obj->exec_list);
3728
3729 obj->ops = ops;
3730
3731 obj->fence_reg = I915_FENCE_REG_NONE;
3732 obj->madv = I915_MADV_WILLNEED;
3733
3734 obj->map_and_fenceable = true;
3735
3736 i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size);
3737}
3738
3739static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
3740 .get_pages = i915_gem_object_get_pages_gtt,
3741 .put_pages = i915_gem_object_put_pages_gtt,
3742};
3743
3744struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
3745 size_t size)
3746{
3747 struct drm_i915_gem_object *obj;
3748 struct address_space *mapping;
3749 gfp_t mask;
3750
3751 obj = i915_gem_object_alloc(dev);
3752 if (obj == NULL)
3753 return NULL;
3754
3755 if (drm_gem_object_init(dev, &obj->base, size) != 0) {
3756 i915_gem_object_free(obj);
3757 return NULL;
3758 }
3759
3760 mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
3761 if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) {
3762
3763 mask &= ~__GFP_HIGHMEM;
3764 mask |= __GFP_DMA32;
3765 }
3766
3767 mapping = file_inode(obj->base.filp)->i_mapping;
3768 mapping_set_gfp_mask(mapping, mask);
3769
3770 i915_gem_object_init(obj, &i915_gem_object_ops);
3771
3772 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3773 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3774
3775 if (HAS_LLC(dev)) {
3776
3777
3778
3779
3780
3781
3782
3783
3784
3785
3786
3787
3788 obj->cache_level = I915_CACHE_LLC;
3789 } else
3790 obj->cache_level = I915_CACHE_NONE;
3791
3792 return obj;
3793}
3794
3795int i915_gem_init_object(struct drm_gem_object *obj)
3796{
3797 BUG();
3798
3799 return 0;
3800}
3801
3802void i915_gem_free_object(struct drm_gem_object *gem_obj)
3803{
3804 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
3805 struct drm_device *dev = obj->base.dev;
3806 drm_i915_private_t *dev_priv = dev->dev_private;
3807
3808 trace_i915_gem_object_destroy(obj);
3809
3810 if (obj->phys_obj)
3811 i915_gem_detach_phys_object(dev, obj);
3812
3813 obj->pin_count = 0;
3814 if (WARN_ON(i915_gem_object_unbind(obj) == -ERESTARTSYS)) {
3815 bool was_interruptible;
3816
3817 was_interruptible = dev_priv->mm.interruptible;
3818 dev_priv->mm.interruptible = false;
3819
3820 WARN_ON(i915_gem_object_unbind(obj));
3821
3822 dev_priv->mm.interruptible = was_interruptible;
3823 }
3824
3825 obj->pages_pin_count = 0;
3826 i915_gem_object_put_pages(obj);
3827 i915_gem_object_free_mmap_offset(obj);
3828 i915_gem_object_release_stolen(obj);
3829
3830 BUG_ON(obj->pages);
3831
3832 if (obj->base.import_attach)
3833 drm_prime_gem_destroy(&obj->base, NULL);
3834
3835 drm_gem_object_release(&obj->base);
3836 i915_gem_info_remove_obj(dev_priv, obj->base.size);
3837
3838 kfree(obj->bit_17);
3839 i915_gem_object_free(obj);
3840}
3841
3842int
3843i915_gem_idle(struct drm_device *dev)
3844{
3845 drm_i915_private_t *dev_priv = dev->dev_private;
3846 int ret;
3847
3848 mutex_lock(&dev->struct_mutex);
3849
3850 if (dev_priv->mm.suspended) {
3851 mutex_unlock(&dev->struct_mutex);
3852 return 0;
3853 }
3854
3855 ret = i915_gpu_idle(dev);
3856 if (ret) {
3857 mutex_unlock(&dev->struct_mutex);
3858 return ret;
3859 }
3860 i915_gem_retire_requests(dev);
3861
3862
3863 if (!drm_core_check_feature(dev, DRIVER_MODESET))
3864 i915_gem_evict_everything(dev);
3865
3866
3867
3868
3869
3870 dev_priv->mm.suspended = 1;
3871 del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
3872
3873 i915_kernel_lost_context(dev);
3874 i915_gem_cleanup_ringbuffer(dev);
3875
3876 mutex_unlock(&dev->struct_mutex);
3877
3878
3879 cancel_delayed_work_sync(&dev_priv->mm.retire_work);
3880
3881 return 0;
3882}
3883
3884void i915_gem_l3_remap(struct drm_device *dev)
3885{
3886 drm_i915_private_t *dev_priv = dev->dev_private;
3887 u32 misccpctl;
3888 int i;
3889
3890 if (!HAS_L3_GPU_CACHE(dev))
3891 return;
3892
3893 if (!dev_priv->l3_parity.remap_info)
3894 return;
3895
3896 misccpctl = I915_READ(GEN7_MISCCPCTL);
3897 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
3898 POSTING_READ(GEN7_MISCCPCTL);
3899
3900 for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) {
3901 u32 remap = I915_READ(GEN7_L3LOG_BASE + i);
3902 if (remap && remap != dev_priv->l3_parity.remap_info[i/4])
3903 DRM_DEBUG("0x%x was already programmed to %x\n",
3904 GEN7_L3LOG_BASE + i, remap);
3905 if (remap && !dev_priv->l3_parity.remap_info[i/4])
3906 DRM_DEBUG_DRIVER("Clearing remapped register\n");
3907 I915_WRITE(GEN7_L3LOG_BASE + i, dev_priv->l3_parity.remap_info[i/4]);
3908 }
3909
3910
3911 POSTING_READ(GEN7_L3LOG_BASE);
3912
3913 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
3914}
3915
3916void i915_gem_init_swizzling(struct drm_device *dev)
3917{
3918 drm_i915_private_t *dev_priv = dev->dev_private;
3919
3920 if (INTEL_INFO(dev)->gen < 5 ||
3921 dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
3922 return;
3923
3924 I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
3925 DISP_TILE_SURFACE_SWIZZLING);
3926
3927 if (IS_GEN5(dev))
3928 return;
3929
3930 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
3931 if (IS_GEN6(dev))
3932 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
3933 else if (IS_GEN7(dev))
3934 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
3935 else
3936 BUG();
3937}
3938
3939static bool
3940intel_enable_blt(struct drm_device *dev)
3941{
3942 if (!HAS_BLT(dev))
3943 return false;
3944
3945
3946 if (IS_GEN6(dev) && dev->pdev->revision < 8) {
3947 DRM_INFO("BLT not supported on this pre-production hardware;"
3948 " graphics performance will be degraded.\n");
3949 return false;
3950 }
3951
3952 return true;
3953}
3954
3955static int i915_gem_init_rings(struct drm_device *dev)
3956{
3957 struct drm_i915_private *dev_priv = dev->dev_private;
3958 int ret;
3959
3960 ret = intel_init_render_ring_buffer(dev);
3961 if (ret)
3962 return ret;
3963
3964 if (HAS_BSD(dev)) {
3965 ret = intel_init_bsd_ring_buffer(dev);
3966 if (ret)
3967 goto cleanup_render_ring;
3968 }
3969
3970 if (intel_enable_blt(dev)) {
3971 ret = intel_init_blt_ring_buffer(dev);
3972 if (ret)
3973 goto cleanup_bsd_ring;
3974 }
3975
3976 ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000));
3977 if (ret)
3978 goto cleanup_blt_ring;
3979
3980 return 0;
3981
3982cleanup_blt_ring:
3983 intel_cleanup_ring_buffer(&dev_priv->ring[BCS]);
3984cleanup_bsd_ring:
3985 intel_cleanup_ring_buffer(&dev_priv->ring[VCS]);
3986cleanup_render_ring:
3987 intel_cleanup_ring_buffer(&dev_priv->ring[RCS]);
3988
3989 return ret;
3990}
3991
3992int
3993i915_gem_init_hw(struct drm_device *dev)
3994{
3995 drm_i915_private_t *dev_priv = dev->dev_private;
3996 int ret;
3997
3998 if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
3999 return -EIO;
4000
4001 if (IS_HASWELL(dev) && (I915_READ(0x120010) == 1))
4002 I915_WRITE(0x9008, I915_READ(0x9008) | 0xf0000);
4003
4004 if (HAS_PCH_NOP(dev)) {
4005 u32 temp = I915_READ(GEN7_MSG_CTL);
4006 temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
4007 I915_WRITE(GEN7_MSG_CTL, temp);
4008 }
4009
4010 i915_gem_l3_remap(dev);
4011
4012 i915_gem_init_swizzling(dev);
4013
4014 ret = i915_gem_init_rings(dev);
4015 if (ret)
4016 return ret;
4017
4018
4019
4020
4021
4022 i915_gem_context_init(dev);
4023 if (dev_priv->mm.aliasing_ppgtt) {
4024 ret = dev_priv->mm.aliasing_ppgtt->enable(dev);
4025 if (ret) {
4026 i915_gem_cleanup_aliasing_ppgtt(dev);
4027 DRM_INFO("PPGTT enable failed. This is not fatal, but unexpected\n");
4028 }
4029 }
4030
4031 return 0;
4032}
4033
4034int i915_gem_init(struct drm_device *dev)
4035{
4036 struct drm_i915_private *dev_priv = dev->dev_private;
4037 int ret;
4038
4039 mutex_lock(&dev->struct_mutex);
4040
4041 if (IS_VALLEYVIEW(dev)) {
4042
4043 I915_WRITE(VLV_GTLC_WAKE_CTRL, 1);
4044 if (wait_for((I915_READ(VLV_GTLC_PW_STATUS) & 1) == 1, 10))
4045 DRM_DEBUG_DRIVER("allow wake ack timed out\n");
4046 }
4047
4048 i915_gem_init_global_gtt(dev);
4049
4050 ret = i915_gem_init_hw(dev);
4051 mutex_unlock(&dev->struct_mutex);
4052 if (ret) {
4053 i915_gem_cleanup_aliasing_ppgtt(dev);
4054 return ret;
4055 }
4056
4057
4058 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4059 dev_priv->dri1.allow_batchbuffer = 1;
4060 return 0;
4061}
4062
4063void
4064i915_gem_cleanup_ringbuffer(struct drm_device *dev)
4065{
4066 drm_i915_private_t *dev_priv = dev->dev_private;
4067 struct intel_ring_buffer *ring;
4068 int i;
4069
4070 for_each_ring(ring, dev_priv, i)
4071 intel_cleanup_ring_buffer(ring);
4072}
4073
4074int
4075i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
4076 struct drm_file *file_priv)
4077{
4078 drm_i915_private_t *dev_priv = dev->dev_private;
4079 int ret;
4080
4081 if (drm_core_check_feature(dev, DRIVER_MODESET))
4082 return 0;
4083
4084 if (i915_reset_in_progress(&dev_priv->gpu_error)) {
4085 DRM_ERROR("Reenabling wedged hardware, good luck\n");
4086 atomic_set(&dev_priv->gpu_error.reset_counter, 0);
4087 }
4088
4089 mutex_lock(&dev->struct_mutex);
4090 dev_priv->mm.suspended = 0;
4091
4092 ret = i915_gem_init_hw(dev);
4093 if (ret != 0) {
4094 mutex_unlock(&dev->struct_mutex);
4095 return ret;
4096 }
4097
4098 BUG_ON(!list_empty(&dev_priv->mm.active_list));
4099 mutex_unlock(&dev->struct_mutex);
4100
4101 ret = drm_irq_install(dev);
4102 if (ret)
4103 goto cleanup_ringbuffer;
4104
4105 return 0;
4106
4107cleanup_ringbuffer:
4108 mutex_lock(&dev->struct_mutex);
4109 i915_gem_cleanup_ringbuffer(dev);
4110 dev_priv->mm.suspended = 1;
4111 mutex_unlock(&dev->struct_mutex);
4112
4113 return ret;
4114}
4115
4116int
4117i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
4118 struct drm_file *file_priv)
4119{
4120 if (drm_core_check_feature(dev, DRIVER_MODESET))
4121 return 0;
4122
4123 drm_irq_uninstall(dev);
4124 return i915_gem_idle(dev);
4125}
4126
4127void
4128i915_gem_lastclose(struct drm_device *dev)
4129{
4130 int ret;
4131
4132 if (drm_core_check_feature(dev, DRIVER_MODESET))
4133 return;
4134
4135 ret = i915_gem_idle(dev);
4136 if (ret)
4137 DRM_ERROR("failed to idle hardware: %d\n", ret);
4138}
4139
4140static void
4141init_ring_lists(struct intel_ring_buffer *ring)
4142{
4143 INIT_LIST_HEAD(&ring->active_list);
4144 INIT_LIST_HEAD(&ring->request_list);
4145}
4146
4147void
4148i915_gem_load(struct drm_device *dev)
4149{
4150 drm_i915_private_t *dev_priv = dev->dev_private;
4151 int i;
4152
4153 dev_priv->slab =
4154 kmem_cache_create("i915_gem_object",
4155 sizeof(struct drm_i915_gem_object), 0,
4156 SLAB_HWCACHE_ALIGN,
4157 NULL);
4158
4159 INIT_LIST_HEAD(&dev_priv->mm.active_list);
4160 INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
4161 INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
4162 INIT_LIST_HEAD(&dev_priv->mm.bound_list);
4163 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4164 for (i = 0; i < I915_NUM_RINGS; i++)
4165 init_ring_lists(&dev_priv->ring[i]);
4166 for (i = 0; i < I915_MAX_NUM_FENCES; i++)
4167 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
4168 INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
4169 i915_gem_retire_work_handler);
4170 init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
4171
4172
4173 if (IS_GEN3(dev)) {
4174 I915_WRITE(MI_ARB_STATE,
4175 _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
4176 }
4177
4178 dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
4179
4180
4181 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4182 dev_priv->fence_reg_start = 3;
4183
4184 if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev))
4185 dev_priv->num_fence_regs = 32;
4186 else if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
4187 dev_priv->num_fence_regs = 16;
4188 else
4189 dev_priv->num_fence_regs = 8;
4190
4191
4192 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4193 i915_gem_restore_fences(dev);
4194
4195 i915_gem_detect_bit_6_swizzle(dev);
4196 init_waitqueue_head(&dev_priv->pending_flip_queue);
4197
4198 dev_priv->mm.interruptible = true;
4199
4200 dev_priv->mm.inactive_shrinker.shrink = i915_gem_inactive_shrink;
4201 dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS;
4202 register_shrinker(&dev_priv->mm.inactive_shrinker);
4203}
4204
4205
4206
4207
4208
4209static int i915_gem_init_phys_object(struct drm_device *dev,
4210 int id, int size, int align)
4211{
4212 drm_i915_private_t *dev_priv = dev->dev_private;
4213 struct drm_i915_gem_phys_object *phys_obj;
4214 int ret;
4215
4216 if (dev_priv->mm.phys_objs[id - 1] || !size)
4217 return 0;
4218
4219 phys_obj = kzalloc(sizeof(struct drm_i915_gem_phys_object), GFP_KERNEL);
4220 if (!phys_obj)
4221 return -ENOMEM;
4222
4223 phys_obj->id = id;
4224
4225 phys_obj->handle = drm_pci_alloc(dev, size, align);
4226 if (!phys_obj->handle) {
4227 ret = -ENOMEM;
4228 goto kfree_obj;
4229 }
4230#ifdef CONFIG_X86
4231 set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4232#endif
4233
4234 dev_priv->mm.phys_objs[id - 1] = phys_obj;
4235
4236 return 0;
4237kfree_obj:
4238 kfree(phys_obj);
4239 return ret;
4240}
4241
4242static void i915_gem_free_phys_object(struct drm_device *dev, int id)
4243{
4244 drm_i915_private_t *dev_priv = dev->dev_private;
4245 struct drm_i915_gem_phys_object *phys_obj;
4246
4247 if (!dev_priv->mm.phys_objs[id - 1])
4248 return;
4249
4250 phys_obj = dev_priv->mm.phys_objs[id - 1];
4251 if (phys_obj->cur_obj) {
4252 i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
4253 }
4254
4255#ifdef CONFIG_X86
4256 set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4257#endif
4258 drm_pci_free(dev, phys_obj->handle);
4259 kfree(phys_obj);
4260 dev_priv->mm.phys_objs[id - 1] = NULL;
4261}
4262
4263void i915_gem_free_all_phys_object(struct drm_device *dev)
4264{
4265 int i;
4266
4267 for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
4268 i915_gem_free_phys_object(dev, i);
4269}
4270
4271void i915_gem_detach_phys_object(struct drm_device *dev,
4272 struct drm_i915_gem_object *obj)
4273{
4274 struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
4275 char *vaddr;
4276 int i;
4277 int page_count;
4278
4279 if (!obj->phys_obj)
4280 return;
4281 vaddr = obj->phys_obj->handle->vaddr;
4282
4283 page_count = obj->base.size / PAGE_SIZE;
4284 for (i = 0; i < page_count; i++) {
4285 struct page *page = shmem_read_mapping_page(mapping, i);
4286 if (!IS_ERR(page)) {
4287 char *dst = kmap_atomic(page);
4288 memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE);
4289 kunmap_atomic(dst);
4290
4291 drm_clflush_pages(&page, 1);
4292
4293 set_page_dirty(page);
4294 mark_page_accessed(page);
4295 page_cache_release(page);
4296 }
4297 }
4298 i915_gem_chipset_flush(dev);
4299
4300 obj->phys_obj->cur_obj = NULL;
4301 obj->phys_obj = NULL;
4302}
4303
4304int
4305i915_gem_attach_phys_object(struct drm_device *dev,
4306 struct drm_i915_gem_object *obj,
4307 int id,
4308 int align)
4309{
4310 struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
4311 drm_i915_private_t *dev_priv = dev->dev_private;
4312 int ret = 0;
4313 int page_count;
4314 int i;
4315
4316 if (id > I915_MAX_PHYS_OBJECT)
4317 return -EINVAL;
4318
4319 if (obj->phys_obj) {
4320 if (obj->phys_obj->id == id)
4321 return 0;
4322 i915_gem_detach_phys_object(dev, obj);
4323 }
4324
4325
4326 if (!dev_priv->mm.phys_objs[id - 1]) {
4327 ret = i915_gem_init_phys_object(dev, id,
4328 obj->base.size, align);
4329 if (ret) {
4330 DRM_ERROR("failed to init phys object %d size: %zu\n",
4331 id, obj->base.size);
4332 return ret;
4333 }
4334 }
4335
4336
4337 obj->phys_obj = dev_priv->mm.phys_objs[id - 1];
4338 obj->phys_obj->cur_obj = obj;
4339
4340 page_count = obj->base.size / PAGE_SIZE;
4341
4342 for (i = 0; i < page_count; i++) {
4343 struct page *page;
4344 char *dst, *src;
4345
4346 page = shmem_read_mapping_page(mapping, i);
4347 if (IS_ERR(page))
4348 return PTR_ERR(page);
4349
4350 src = kmap_atomic(page);
4351 dst = obj->phys_obj->handle->vaddr + (i * PAGE_SIZE);
4352 memcpy(dst, src, PAGE_SIZE);
4353 kunmap_atomic(src);
4354
4355 mark_page_accessed(page);
4356 page_cache_release(page);
4357 }
4358
4359 return 0;
4360}
4361
4362static int
4363i915_gem_phys_pwrite(struct drm_device *dev,
4364 struct drm_i915_gem_object *obj,
4365 struct drm_i915_gem_pwrite *args,
4366 struct drm_file *file_priv)
4367{
4368 void *vaddr = obj->phys_obj->handle->vaddr + args->offset;
4369 char __user *user_data = to_user_ptr(args->data_ptr);
4370
4371 if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
4372 unsigned long unwritten;
4373
4374
4375
4376
4377
4378 mutex_unlock(&dev->struct_mutex);
4379 unwritten = copy_from_user(vaddr, user_data, args->size);
4380 mutex_lock(&dev->struct_mutex);
4381 if (unwritten)
4382 return -EFAULT;
4383 }
4384
4385 i915_gem_chipset_flush(dev);
4386 return 0;
4387}
4388
4389void i915_gem_release(struct drm_device *dev, struct drm_file *file)
4390{
4391 struct drm_i915_file_private *file_priv = file->driver_priv;
4392
4393
4394
4395
4396
4397 spin_lock(&file_priv->mm.lock);
4398 while (!list_empty(&file_priv->mm.request_list)) {
4399 struct drm_i915_gem_request *request;
4400
4401 request = list_first_entry(&file_priv->mm.request_list,
4402 struct drm_i915_gem_request,
4403 client_list);
4404 list_del(&request->client_list);
4405 request->file_priv = NULL;
4406 }
4407 spin_unlock(&file_priv->mm.lock);
4408}
4409
4410static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
4411{
4412 if (!mutex_is_locked(mutex))
4413 return false;
4414
4415#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
4416 return mutex->owner == task;
4417#else
4418
4419 return false;
4420#endif
4421}
4422
4423static int
4424i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
4425{
4426 struct drm_i915_private *dev_priv =
4427 container_of(shrinker,
4428 struct drm_i915_private,
4429 mm.inactive_shrinker);
4430 struct drm_device *dev = dev_priv->dev;
4431 struct drm_i915_gem_object *obj;
4432 int nr_to_scan = sc->nr_to_scan;
4433 bool unlock = true;
4434 int cnt;
4435
4436 if (!mutex_trylock(&dev->struct_mutex)) {
4437 if (!mutex_is_locked_by(&dev->struct_mutex, current))
4438 return 0;
4439
4440 if (dev_priv->mm.shrinker_no_lock_stealing)
4441 return 0;
4442
4443 unlock = false;
4444 }
4445
4446 if (nr_to_scan) {
4447 nr_to_scan -= i915_gem_purge(dev_priv, nr_to_scan);
4448 if (nr_to_scan > 0)
4449 nr_to_scan -= __i915_gem_shrink(dev_priv, nr_to_scan,
4450 false);
4451 if (nr_to_scan > 0)
4452 i915_gem_shrink_all(dev_priv);
4453 }
4454
4455 cnt = 0;
4456 list_for_each_entry(obj, &dev_priv->mm.unbound_list, gtt_list)
4457 if (obj->pages_pin_count == 0)
4458 cnt += obj->base.size >> PAGE_SHIFT;
4459 list_for_each_entry(obj, &dev_priv->mm.inactive_list, gtt_list)
4460 if (obj->pin_count == 0 && obj->pages_pin_count == 0)
4461 cnt += obj->base.size >> PAGE_SHIFT;
4462
4463 if (unlock)
4464 mutex_unlock(&dev->struct_mutex);
4465 return cnt;
4466}
4467