1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30#include "drmP.h"
31#include "drm.h"
32#include "i915_drv.h"
33#include "i915_drm.h"
34#include "i915_trace.h"
35#include "intel_drv.h"
36
37
38
39
40
41struct pipe_control {
42 struct drm_i915_gem_object *obj;
43 volatile u32 *cpu_page;
44 u32 gtt_offset;
45};
46
47static inline int ring_space(struct intel_ring_buffer *ring)
48{
49 int space = (ring->head & HEAD_ADDR) - (ring->tail + 8);
50 if (space < 0)
51 space += ring->size;
52 return space;
53}
54
55static u32 i915_gem_get_seqno(struct drm_device *dev)
56{
57 drm_i915_private_t *dev_priv = dev->dev_private;
58 u32 seqno;
59
60 seqno = dev_priv->next_seqno;
61
62
63 if (++dev_priv->next_seqno == 0)
64 dev_priv->next_seqno = 1;
65
66 return seqno;
67}
68
69static int
70render_ring_flush(struct intel_ring_buffer *ring,
71 u32 invalidate_domains,
72 u32 flush_domains)
73{
74 struct drm_device *dev = ring->dev;
75 u32 cmd;
76 int ret;
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106 cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
107 if ((invalidate_domains|flush_domains) &
108 I915_GEM_DOMAIN_RENDER)
109 cmd &= ~MI_NO_WRITE_FLUSH;
110 if (INTEL_INFO(dev)->gen < 4) {
111
112
113
114
115 if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
116 cmd |= MI_READ_FLUSH;
117 }
118 if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
119 cmd |= MI_EXE_FLUSH;
120
121 if (invalidate_domains & I915_GEM_DOMAIN_COMMAND &&
122 (IS_G4X(dev) || IS_GEN5(dev)))
123 cmd |= MI_INVALIDATE_ISP;
124
125 ret = intel_ring_begin(ring, 2);
126 if (ret)
127 return ret;
128
129 intel_ring_emit(ring, cmd);
130 intel_ring_emit(ring, MI_NOOP);
131 intel_ring_advance(ring);
132
133 return 0;
134}
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173static int
174intel_emit_post_sync_nonzero_flush(struct intel_ring_buffer *ring)
175{
176 struct pipe_control *pc = ring->private;
177 u32 scratch_addr = pc->gtt_offset + 128;
178 int ret;
179
180
181 ret = intel_ring_begin(ring, 6);
182 if (ret)
183 return ret;
184
185 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
186 intel_ring_emit(ring, PIPE_CONTROL_CS_STALL |
187 PIPE_CONTROL_STALL_AT_SCOREBOARD);
188 intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
189 intel_ring_emit(ring, 0);
190 intel_ring_emit(ring, 0);
191 intel_ring_emit(ring, MI_NOOP);
192 intel_ring_advance(ring);
193
194 ret = intel_ring_begin(ring, 6);
195 if (ret)
196 return ret;
197
198 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
199 intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE);
200 intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
201 intel_ring_emit(ring, 0);
202 intel_ring_emit(ring, 0);
203 intel_ring_emit(ring, MI_NOOP);
204 intel_ring_advance(ring);
205
206 return 0;
207}
208
209static int
210gen6_render_ring_flush(struct intel_ring_buffer *ring,
211 u32 invalidate_domains, u32 flush_domains)
212{
213 u32 flags = 0;
214 struct pipe_control *pc = ring->private;
215 u32 scratch_addr = pc->gtt_offset + 128;
216 int ret;
217
218
219 intel_emit_post_sync_nonzero_flush(ring);
220
221
222
223
224
225 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
226 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
227 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
228 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
229 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
230 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
231 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
232
233 ret = intel_ring_begin(ring, 6);
234 if (ret)
235 return ret;
236
237 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
238 intel_ring_emit(ring, flags);
239 intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
240 intel_ring_emit(ring, 0);
241 intel_ring_emit(ring, 0);
242 intel_ring_emit(ring, MI_NOOP);
243 intel_ring_advance(ring);
244
245 return 0;
246}
247
248static void ring_write_tail(struct intel_ring_buffer *ring,
249 u32 value)
250{
251 drm_i915_private_t *dev_priv = ring->dev->dev_private;
252 I915_WRITE_TAIL(ring, value);
253}
254
255u32 intel_ring_get_active_head(struct intel_ring_buffer *ring)
256{
257 drm_i915_private_t *dev_priv = ring->dev->dev_private;
258 u32 acthd_reg = INTEL_INFO(ring->dev)->gen >= 4 ?
259 RING_ACTHD(ring->mmio_base) : ACTHD;
260
261 return I915_READ(acthd_reg);
262}
263
264static int init_ring_common(struct intel_ring_buffer *ring)
265{
266 drm_i915_private_t *dev_priv = ring->dev->dev_private;
267 struct drm_i915_gem_object *obj = ring->obj;
268 u32 head;
269
270
271 I915_WRITE_CTL(ring, 0);
272 I915_WRITE_HEAD(ring, 0);
273 ring->write_tail(ring, 0);
274
275
276 I915_WRITE_START(ring, obj->gtt_offset);
277 head = I915_READ_HEAD(ring) & HEAD_ADDR;
278
279
280 if (head != 0) {
281 DRM_DEBUG_KMS("%s head not reset to zero "
282 "ctl %08x head %08x tail %08x start %08x\n",
283 ring->name,
284 I915_READ_CTL(ring),
285 I915_READ_HEAD(ring),
286 I915_READ_TAIL(ring),
287 I915_READ_START(ring));
288
289 I915_WRITE_HEAD(ring, 0);
290
291 if (I915_READ_HEAD(ring) & HEAD_ADDR) {
292 DRM_ERROR("failed to set %s head to zero "
293 "ctl %08x head %08x tail %08x start %08x\n",
294 ring->name,
295 I915_READ_CTL(ring),
296 I915_READ_HEAD(ring),
297 I915_READ_TAIL(ring),
298 I915_READ_START(ring));
299 }
300 }
301
302 I915_WRITE_CTL(ring,
303 ((ring->size - PAGE_SIZE) & RING_NR_PAGES)
304 | RING_REPORT_64K | RING_VALID);
305
306
307 if ((I915_READ_CTL(ring) & RING_VALID) == 0 ||
308 I915_READ_START(ring) != obj->gtt_offset ||
309 (I915_READ_HEAD(ring) & HEAD_ADDR) != 0) {
310 DRM_ERROR("%s initialization failed "
311 "ctl %08x head %08x tail %08x start %08x\n",
312 ring->name,
313 I915_READ_CTL(ring),
314 I915_READ_HEAD(ring),
315 I915_READ_TAIL(ring),
316 I915_READ_START(ring));
317 return -EIO;
318 }
319
320 if (!drm_core_check_feature(ring->dev, DRIVER_MODESET))
321 i915_kernel_lost_context(ring->dev);
322 else {
323 ring->head = I915_READ_HEAD(ring);
324 ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
325 ring->space = ring_space(ring);
326 }
327
328 return 0;
329}
330
331static int
332init_pipe_control(struct intel_ring_buffer *ring)
333{
334 struct pipe_control *pc;
335 struct drm_i915_gem_object *obj;
336 int ret;
337
338 if (ring->private)
339 return 0;
340
341 pc = kmalloc(sizeof(*pc), GFP_KERNEL);
342 if (!pc)
343 return -ENOMEM;
344
345 obj = i915_gem_alloc_object(ring->dev, 4096);
346 if (obj == NULL) {
347 DRM_ERROR("Failed to allocate seqno page\n");
348 ret = -ENOMEM;
349 goto err;
350 }
351
352 i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
353
354 ret = i915_gem_object_pin(obj, 4096, true);
355 if (ret)
356 goto err_unref;
357
358 pc->gtt_offset = obj->gtt_offset;
359 pc->cpu_page = kmap(obj->pages[0]);
360 if (pc->cpu_page == NULL)
361 goto err_unpin;
362
363 pc->obj = obj;
364 ring->private = pc;
365 return 0;
366
367err_unpin:
368 i915_gem_object_unpin(obj);
369err_unref:
370 drm_gem_object_unreference(&obj->base);
371err:
372 kfree(pc);
373 return ret;
374}
375
376static void
377cleanup_pipe_control(struct intel_ring_buffer *ring)
378{
379 struct pipe_control *pc = ring->private;
380 struct drm_i915_gem_object *obj;
381
382 if (!ring->private)
383 return;
384
385 obj = pc->obj;
386 kunmap(obj->pages[0]);
387 i915_gem_object_unpin(obj);
388 drm_gem_object_unreference(&obj->base);
389
390 kfree(pc);
391 ring->private = NULL;
392}
393
394static int init_render_ring(struct intel_ring_buffer *ring)
395{
396 struct drm_device *dev = ring->dev;
397 struct drm_i915_private *dev_priv = dev->dev_private;
398 int ret = init_ring_common(ring);
399
400 if (INTEL_INFO(dev)->gen > 3) {
401 int mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH;
402 if (IS_GEN6(dev) || IS_GEN7(dev))
403 mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE;
404 I915_WRITE(MI_MODE, mode);
405 if (IS_GEN7(dev))
406 I915_WRITE(GFX_MODE_GEN7,
407 GFX_MODE_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) |
408 GFX_MODE_ENABLE(GFX_REPLAY_MODE));
409 }
410
411 if (INTEL_INFO(dev)->gen >= 5) {
412 ret = init_pipe_control(ring);
413 if (ret)
414 return ret;
415 }
416
417 return ret;
418}
419
420static void render_ring_cleanup(struct intel_ring_buffer *ring)
421{
422 if (!ring->private)
423 return;
424
425 cleanup_pipe_control(ring);
426}
427
428static void
429update_mboxes(struct intel_ring_buffer *ring,
430 u32 seqno,
431 u32 mmio_offset)
432{
433 intel_ring_emit(ring, MI_SEMAPHORE_MBOX |
434 MI_SEMAPHORE_GLOBAL_GTT |
435 MI_SEMAPHORE_REGISTER |
436 MI_SEMAPHORE_UPDATE);
437 intel_ring_emit(ring, seqno);
438 intel_ring_emit(ring, mmio_offset);
439}
440
441
442
443
444
445
446
447
448
449
450static int
451gen6_add_request(struct intel_ring_buffer *ring,
452 u32 *seqno)
453{
454 u32 mbox1_reg;
455 u32 mbox2_reg;
456 int ret;
457
458 ret = intel_ring_begin(ring, 10);
459 if (ret)
460 return ret;
461
462 mbox1_reg = ring->signal_mbox[0];
463 mbox2_reg = ring->signal_mbox[1];
464
465 *seqno = i915_gem_get_seqno(ring->dev);
466
467 update_mboxes(ring, *seqno, mbox1_reg);
468 update_mboxes(ring, *seqno, mbox2_reg);
469 intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
470 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
471 intel_ring_emit(ring, *seqno);
472 intel_ring_emit(ring, MI_USER_INTERRUPT);
473 intel_ring_advance(ring);
474
475 return 0;
476}
477
478
479
480
481
482
483
484
485static int
486intel_ring_sync(struct intel_ring_buffer *waiter,
487 struct intel_ring_buffer *signaller,
488 int ring,
489 u32 seqno)
490{
491 int ret;
492 u32 dw1 = MI_SEMAPHORE_MBOX |
493 MI_SEMAPHORE_COMPARE |
494 MI_SEMAPHORE_REGISTER;
495
496 ret = intel_ring_begin(waiter, 4);
497 if (ret)
498 return ret;
499
500 intel_ring_emit(waiter, dw1 | signaller->semaphore_register[ring]);
501 intel_ring_emit(waiter, seqno);
502 intel_ring_emit(waiter, 0);
503 intel_ring_emit(waiter, MI_NOOP);
504 intel_ring_advance(waiter);
505
506 return 0;
507}
508
509
510int
511render_ring_sync_to(struct intel_ring_buffer *waiter,
512 struct intel_ring_buffer *signaller,
513 u32 seqno)
514{
515 WARN_ON(signaller->semaphore_register[RCS] == MI_SEMAPHORE_SYNC_INVALID);
516 return intel_ring_sync(waiter,
517 signaller,
518 RCS,
519 seqno);
520}
521
522
523int
524gen6_bsd_ring_sync_to(struct intel_ring_buffer *waiter,
525 struct intel_ring_buffer *signaller,
526 u32 seqno)
527{
528 WARN_ON(signaller->semaphore_register[VCS] == MI_SEMAPHORE_SYNC_INVALID);
529 return intel_ring_sync(waiter,
530 signaller,
531 VCS,
532 seqno);
533}
534
535
536int
537gen6_blt_ring_sync_to(struct intel_ring_buffer *waiter,
538 struct intel_ring_buffer *signaller,
539 u32 seqno)
540{
541 WARN_ON(signaller->semaphore_register[BCS] == MI_SEMAPHORE_SYNC_INVALID);
542 return intel_ring_sync(waiter,
543 signaller,
544 BCS,
545 seqno);
546}
547
548
549
550#define PIPE_CONTROL_FLUSH(ring__, addr__) \
551do { \
552 intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | \
553 PIPE_CONTROL_DEPTH_STALL); \
554 intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT); \
555 intel_ring_emit(ring__, 0); \
556 intel_ring_emit(ring__, 0); \
557} while (0)
558
559static int
560pc_render_add_request(struct intel_ring_buffer *ring,
561 u32 *result)
562{
563 struct drm_device *dev = ring->dev;
564 u32 seqno = i915_gem_get_seqno(dev);
565 struct pipe_control *pc = ring->private;
566 u32 scratch_addr = pc->gtt_offset + 128;
567 int ret;
568
569
570
571
572
573
574
575
576
577 ret = intel_ring_begin(ring, 32);
578 if (ret)
579 return ret;
580
581 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
582 PIPE_CONTROL_WRITE_FLUSH |
583 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
584 intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
585 intel_ring_emit(ring, seqno);
586 intel_ring_emit(ring, 0);
587 PIPE_CONTROL_FLUSH(ring, scratch_addr);
588 scratch_addr += 128;
589 PIPE_CONTROL_FLUSH(ring, scratch_addr);
590 scratch_addr += 128;
591 PIPE_CONTROL_FLUSH(ring, scratch_addr);
592 scratch_addr += 128;
593 PIPE_CONTROL_FLUSH(ring, scratch_addr);
594 scratch_addr += 128;
595 PIPE_CONTROL_FLUSH(ring, scratch_addr);
596 scratch_addr += 128;
597 PIPE_CONTROL_FLUSH(ring, scratch_addr);
598 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
599 PIPE_CONTROL_WRITE_FLUSH |
600 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
601 PIPE_CONTROL_NOTIFY);
602 intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
603 intel_ring_emit(ring, seqno);
604 intel_ring_emit(ring, 0);
605 intel_ring_advance(ring);
606
607 *result = seqno;
608 return 0;
609}
610
611static int
612render_ring_add_request(struct intel_ring_buffer *ring,
613 u32 *result)
614{
615 struct drm_device *dev = ring->dev;
616 u32 seqno = i915_gem_get_seqno(dev);
617 int ret;
618
619 ret = intel_ring_begin(ring, 4);
620 if (ret)
621 return ret;
622
623 intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
624 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
625 intel_ring_emit(ring, seqno);
626 intel_ring_emit(ring, MI_USER_INTERRUPT);
627 intel_ring_advance(ring);
628
629 *result = seqno;
630 return 0;
631}
632
633static u32
634ring_get_seqno(struct intel_ring_buffer *ring)
635{
636 return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
637}
638
639static u32
640pc_render_get_seqno(struct intel_ring_buffer *ring)
641{
642 struct pipe_control *pc = ring->private;
643 return pc->cpu_page[0];
644}
645
646static void
647ironlake_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
648{
649 dev_priv->gt_irq_mask &= ~mask;
650 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
651 POSTING_READ(GTIMR);
652}
653
654static void
655ironlake_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
656{
657 dev_priv->gt_irq_mask |= mask;
658 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
659 POSTING_READ(GTIMR);
660}
661
662static void
663i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
664{
665 dev_priv->irq_mask &= ~mask;
666 I915_WRITE(IMR, dev_priv->irq_mask);
667 POSTING_READ(IMR);
668}
669
670static void
671i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
672{
673 dev_priv->irq_mask |= mask;
674 I915_WRITE(IMR, dev_priv->irq_mask);
675 POSTING_READ(IMR);
676}
677
678static bool
679render_ring_get_irq(struct intel_ring_buffer *ring)
680{
681 struct drm_device *dev = ring->dev;
682 drm_i915_private_t *dev_priv = dev->dev_private;
683
684 if (!dev->irq_enabled)
685 return false;
686
687 spin_lock(&ring->irq_lock);
688 if (ring->irq_refcount++ == 0) {
689 if (HAS_PCH_SPLIT(dev))
690 ironlake_enable_irq(dev_priv,
691 GT_PIPE_NOTIFY | GT_USER_INTERRUPT);
692 else
693 i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
694 }
695 spin_unlock(&ring->irq_lock);
696
697 return true;
698}
699
700static void
701render_ring_put_irq(struct intel_ring_buffer *ring)
702{
703 struct drm_device *dev = ring->dev;
704 drm_i915_private_t *dev_priv = dev->dev_private;
705
706 spin_lock(&ring->irq_lock);
707 if (--ring->irq_refcount == 0) {
708 if (HAS_PCH_SPLIT(dev))
709 ironlake_disable_irq(dev_priv,
710 GT_USER_INTERRUPT |
711 GT_PIPE_NOTIFY);
712 else
713 i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
714 }
715 spin_unlock(&ring->irq_lock);
716}
717
718void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
719{
720 struct drm_device *dev = ring->dev;
721 drm_i915_private_t *dev_priv = ring->dev->dev_private;
722 u32 mmio = 0;
723
724
725
726
727 if (IS_GEN7(dev)) {
728 switch (ring->id) {
729 case RING_RENDER:
730 mmio = RENDER_HWS_PGA_GEN7;
731 break;
732 case RING_BLT:
733 mmio = BLT_HWS_PGA_GEN7;
734 break;
735 case RING_BSD:
736 mmio = BSD_HWS_PGA_GEN7;
737 break;
738 }
739 } else if (IS_GEN6(ring->dev)) {
740 mmio = RING_HWS_PGA_GEN6(ring->mmio_base);
741 } else {
742 mmio = RING_HWS_PGA(ring->mmio_base);
743 }
744
745 I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
746 POSTING_READ(mmio);
747}
748
749static int
750bsd_ring_flush(struct intel_ring_buffer *ring,
751 u32 invalidate_domains,
752 u32 flush_domains)
753{
754 int ret;
755
756 ret = intel_ring_begin(ring, 2);
757 if (ret)
758 return ret;
759
760 intel_ring_emit(ring, MI_FLUSH);
761 intel_ring_emit(ring, MI_NOOP);
762 intel_ring_advance(ring);
763 return 0;
764}
765
766static int
767ring_add_request(struct intel_ring_buffer *ring,
768 u32 *result)
769{
770 u32 seqno;
771 int ret;
772
773 ret = intel_ring_begin(ring, 4);
774 if (ret)
775 return ret;
776
777 seqno = i915_gem_get_seqno(ring->dev);
778
779 intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
780 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
781 intel_ring_emit(ring, seqno);
782 intel_ring_emit(ring, MI_USER_INTERRUPT);
783 intel_ring_advance(ring);
784
785 *result = seqno;
786 return 0;
787}
788
789static bool
790gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
791{
792 struct drm_device *dev = ring->dev;
793 drm_i915_private_t *dev_priv = dev->dev_private;
794
795 if (!dev->irq_enabled)
796 return false;
797
798 spin_lock(&ring->irq_lock);
799 if (ring->irq_refcount++ == 0) {
800 ring->irq_mask &= ~rflag;
801 I915_WRITE_IMR(ring, ring->irq_mask);
802 ironlake_enable_irq(dev_priv, gflag);
803 }
804 spin_unlock(&ring->irq_lock);
805
806 return true;
807}
808
809static void
810gen6_ring_put_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
811{
812 struct drm_device *dev = ring->dev;
813 drm_i915_private_t *dev_priv = dev->dev_private;
814
815 spin_lock(&ring->irq_lock);
816 if (--ring->irq_refcount == 0) {
817 ring->irq_mask |= rflag;
818 I915_WRITE_IMR(ring, ring->irq_mask);
819 ironlake_disable_irq(dev_priv, gflag);
820 }
821 spin_unlock(&ring->irq_lock);
822}
823
824static bool
825bsd_ring_get_irq(struct intel_ring_buffer *ring)
826{
827 struct drm_device *dev = ring->dev;
828 drm_i915_private_t *dev_priv = dev->dev_private;
829
830 if (!dev->irq_enabled)
831 return false;
832
833 spin_lock(&ring->irq_lock);
834 if (ring->irq_refcount++ == 0) {
835 if (IS_G4X(dev))
836 i915_enable_irq(dev_priv, I915_BSD_USER_INTERRUPT);
837 else
838 ironlake_enable_irq(dev_priv, GT_BSD_USER_INTERRUPT);
839 }
840 spin_unlock(&ring->irq_lock);
841
842 return true;
843}
844static void
845bsd_ring_put_irq(struct intel_ring_buffer *ring)
846{
847 struct drm_device *dev = ring->dev;
848 drm_i915_private_t *dev_priv = dev->dev_private;
849
850 spin_lock(&ring->irq_lock);
851 if (--ring->irq_refcount == 0) {
852 if (IS_G4X(dev))
853 i915_disable_irq(dev_priv, I915_BSD_USER_INTERRUPT);
854 else
855 ironlake_disable_irq(dev_priv, GT_BSD_USER_INTERRUPT);
856 }
857 spin_unlock(&ring->irq_lock);
858}
859
860static int
861ring_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length)
862{
863 int ret;
864
865 ret = intel_ring_begin(ring, 2);
866 if (ret)
867 return ret;
868
869 intel_ring_emit(ring,
870 MI_BATCH_BUFFER_START | (2 << 6) |
871 MI_BATCH_NON_SECURE_I965);
872 intel_ring_emit(ring, offset);
873 intel_ring_advance(ring);
874
875 return 0;
876}
877
878static int
879render_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
880 u32 offset, u32 len)
881{
882 struct drm_device *dev = ring->dev;
883 int ret;
884
885 if (IS_I830(dev) || IS_845G(dev)) {
886 ret = intel_ring_begin(ring, 4);
887 if (ret)
888 return ret;
889
890 intel_ring_emit(ring, MI_BATCH_BUFFER);
891 intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
892 intel_ring_emit(ring, offset + len - 8);
893 intel_ring_emit(ring, 0);
894 } else {
895 ret = intel_ring_begin(ring, 2);
896 if (ret)
897 return ret;
898
899 if (INTEL_INFO(dev)->gen >= 4) {
900 intel_ring_emit(ring,
901 MI_BATCH_BUFFER_START | (2 << 6) |
902 MI_BATCH_NON_SECURE_I965);
903 intel_ring_emit(ring, offset);
904 } else {
905 intel_ring_emit(ring,
906 MI_BATCH_BUFFER_START | (2 << 6));
907 intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
908 }
909 }
910 intel_ring_advance(ring);
911
912 return 0;
913}
914
915static void cleanup_status_page(struct intel_ring_buffer *ring)
916{
917 drm_i915_private_t *dev_priv = ring->dev->dev_private;
918 struct drm_i915_gem_object *obj;
919
920 obj = ring->status_page.obj;
921 if (obj == NULL)
922 return;
923
924 kunmap(obj->pages[0]);
925 i915_gem_object_unpin(obj);
926 drm_gem_object_unreference(&obj->base);
927 ring->status_page.obj = NULL;
928
929 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
930}
931
932static int init_status_page(struct intel_ring_buffer *ring)
933{
934 struct drm_device *dev = ring->dev;
935 drm_i915_private_t *dev_priv = dev->dev_private;
936 struct drm_i915_gem_object *obj;
937 int ret;
938
939 obj = i915_gem_alloc_object(dev, 4096);
940 if (obj == NULL) {
941 DRM_ERROR("Failed to allocate status page\n");
942 ret = -ENOMEM;
943 goto err;
944 }
945
946 i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
947
948 ret = i915_gem_object_pin(obj, 4096, true);
949 if (ret != 0) {
950 goto err_unref;
951 }
952
953 ring->status_page.gfx_addr = obj->gtt_offset;
954 ring->status_page.page_addr = kmap(obj->pages[0]);
955 if (ring->status_page.page_addr == NULL) {
956 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
957 goto err_unpin;
958 }
959 ring->status_page.obj = obj;
960 memset(ring->status_page.page_addr, 0, PAGE_SIZE);
961
962 intel_ring_setup_status_page(ring);
963 DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
964 ring->name, ring->status_page.gfx_addr);
965
966 return 0;
967
968err_unpin:
969 i915_gem_object_unpin(obj);
970err_unref:
971 drm_gem_object_unreference(&obj->base);
972err:
973 return ret;
974}
975
976int intel_init_ring_buffer(struct drm_device *dev,
977 struct intel_ring_buffer *ring)
978{
979 struct drm_i915_gem_object *obj;
980 int ret;
981
982 ring->dev = dev;
983 INIT_LIST_HEAD(&ring->active_list);
984 INIT_LIST_HEAD(&ring->request_list);
985 INIT_LIST_HEAD(&ring->gpu_write_list);
986
987 init_waitqueue_head(&ring->irq_queue);
988 spin_lock_init(&ring->irq_lock);
989 ring->irq_mask = ~0;
990
991 if (I915_NEED_GFX_HWS(dev)) {
992 ret = init_status_page(ring);
993 if (ret)
994 return ret;
995 }
996
997 obj = i915_gem_alloc_object(dev, ring->size);
998 if (obj == NULL) {
999 DRM_ERROR("Failed to allocate ringbuffer\n");
1000 ret = -ENOMEM;
1001 goto err_hws;
1002 }
1003
1004 ring->obj = obj;
1005
1006 ret = i915_gem_object_pin(obj, PAGE_SIZE, true);
1007 if (ret)
1008 goto err_unref;
1009
1010 ring->map.size = ring->size;
1011 ring->map.offset = dev->agp->base + obj->gtt_offset;
1012 ring->map.type = 0;
1013 ring->map.flags = 0;
1014 ring->map.mtrr = 0;
1015
1016 drm_core_ioremap_wc(&ring->map, dev);
1017 if (ring->map.handle == NULL) {
1018 DRM_ERROR("Failed to map ringbuffer.\n");
1019 ret = -EINVAL;
1020 goto err_unpin;
1021 }
1022
1023 ring->virtual_start = ring->map.handle;
1024 ret = ring->init(ring);
1025 if (ret)
1026 goto err_unmap;
1027
1028
1029
1030
1031
1032 ring->effective_size = ring->size;
1033 if (IS_I830(ring->dev))
1034 ring->effective_size -= 128;
1035
1036 return 0;
1037
1038err_unmap:
1039 drm_core_ioremapfree(&ring->map, dev);
1040err_unpin:
1041 i915_gem_object_unpin(obj);
1042err_unref:
1043 drm_gem_object_unreference(&obj->base);
1044 ring->obj = NULL;
1045err_hws:
1046 cleanup_status_page(ring);
1047 return ret;
1048}
1049
1050void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
1051{
1052 struct drm_i915_private *dev_priv;
1053 int ret;
1054
1055 if (ring->obj == NULL)
1056 return;
1057
1058
1059 dev_priv = ring->dev->dev_private;
1060 ret = intel_wait_ring_idle(ring);
1061 if (ret)
1062 DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
1063 ring->name, ret);
1064
1065 I915_WRITE_CTL(ring, 0);
1066
1067 drm_core_ioremapfree(&ring->map, ring->dev);
1068
1069 i915_gem_object_unpin(ring->obj);
1070 drm_gem_object_unreference(&ring->obj->base);
1071 ring->obj = NULL;
1072
1073 if (ring->cleanup)
1074 ring->cleanup(ring);
1075
1076 cleanup_status_page(ring);
1077}
1078
1079static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
1080{
1081 unsigned int *virt;
1082 int rem = ring->size - ring->tail;
1083
1084 if (ring->space < rem) {
1085 int ret = intel_wait_ring_buffer(ring, rem);
1086 if (ret)
1087 return ret;
1088 }
1089
1090 virt = (unsigned int *)(ring->virtual_start + ring->tail);
1091 rem /= 8;
1092 while (rem--) {
1093 *virt++ = MI_NOOP;
1094 *virt++ = MI_NOOP;
1095 }
1096
1097 ring->tail = 0;
1098 ring->space = ring_space(ring);
1099
1100 return 0;
1101}
1102
1103int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
1104{
1105 struct drm_device *dev = ring->dev;
1106 struct drm_i915_private *dev_priv = dev->dev_private;
1107 unsigned long end;
1108 u32 head;
1109
1110
1111
1112
1113 head = intel_read_status_page(ring, 4);
1114 if (head > ring->head) {
1115 ring->head = head;
1116 ring->space = ring_space(ring);
1117 if (ring->space >= n)
1118 return 0;
1119 }
1120
1121 trace_i915_ring_wait_begin(ring);
1122 end = jiffies + 3 * HZ;
1123 do {
1124 ring->head = I915_READ_HEAD(ring);
1125 ring->space = ring_space(ring);
1126 if (ring->space >= n) {
1127 trace_i915_ring_wait_end(ring);
1128 return 0;
1129 }
1130
1131 if (dev->primary->master) {
1132 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
1133 if (master_priv->sarea_priv)
1134 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
1135 }
1136
1137 msleep(1);
1138 if (atomic_read(&dev_priv->mm.wedged))
1139 return -EAGAIN;
1140 } while (!time_after(jiffies, end));
1141 trace_i915_ring_wait_end(ring);
1142 return -EBUSY;
1143}
1144
1145int intel_ring_begin(struct intel_ring_buffer *ring,
1146 int num_dwords)
1147{
1148 struct drm_i915_private *dev_priv = ring->dev->dev_private;
1149 int n = 4*num_dwords;
1150 int ret;
1151
1152 if (unlikely(atomic_read(&dev_priv->mm.wedged)))
1153 return -EIO;
1154
1155 if (unlikely(ring->tail + n > ring->effective_size)) {
1156 ret = intel_wrap_ring_buffer(ring);
1157 if (unlikely(ret))
1158 return ret;
1159 }
1160
1161 if (unlikely(ring->space < n)) {
1162 ret = intel_wait_ring_buffer(ring, n);
1163 if (unlikely(ret))
1164 return ret;
1165 }
1166
1167 ring->space -= n;
1168 return 0;
1169}
1170
1171void intel_ring_advance(struct intel_ring_buffer *ring)
1172{
1173 ring->tail &= ring->size - 1;
1174 ring->write_tail(ring, ring->tail);
1175}
1176
1177static const struct intel_ring_buffer render_ring = {
1178 .name = "render ring",
1179 .id = RING_RENDER,
1180 .mmio_base = RENDER_RING_BASE,
1181 .size = 32 * PAGE_SIZE,
1182 .init = init_render_ring,
1183 .write_tail = ring_write_tail,
1184 .flush = render_ring_flush,
1185 .add_request = render_ring_add_request,
1186 .get_seqno = ring_get_seqno,
1187 .irq_get = render_ring_get_irq,
1188 .irq_put = render_ring_put_irq,
1189 .dispatch_execbuffer = render_ring_dispatch_execbuffer,
1190 .cleanup = render_ring_cleanup,
1191 .sync_to = render_ring_sync_to,
1192 .semaphore_register = {MI_SEMAPHORE_SYNC_INVALID,
1193 MI_SEMAPHORE_SYNC_RV,
1194 MI_SEMAPHORE_SYNC_RB},
1195 .signal_mbox = {GEN6_VRSYNC, GEN6_BRSYNC},
1196};
1197
1198
1199
1200static const struct intel_ring_buffer bsd_ring = {
1201 .name = "bsd ring",
1202 .id = RING_BSD,
1203 .mmio_base = BSD_RING_BASE,
1204 .size = 32 * PAGE_SIZE,
1205 .init = init_ring_common,
1206 .write_tail = ring_write_tail,
1207 .flush = bsd_ring_flush,
1208 .add_request = ring_add_request,
1209 .get_seqno = ring_get_seqno,
1210 .irq_get = bsd_ring_get_irq,
1211 .irq_put = bsd_ring_put_irq,
1212 .dispatch_execbuffer = ring_dispatch_execbuffer,
1213};
1214
1215
1216static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
1217 u32 value)
1218{
1219 drm_i915_private_t *dev_priv = ring->dev->dev_private;
1220
1221
1222 I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
1223 GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
1224 GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE);
1225 I915_WRITE(GEN6_BSD_RNCID, 0x0);
1226
1227 if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) &
1228 GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR) == 0,
1229 50))
1230 DRM_ERROR("timed out waiting for IDLE Indicator\n");
1231
1232 I915_WRITE_TAIL(ring, value);
1233 I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
1234 GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
1235 GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE);
1236}
1237
1238static int gen6_ring_flush(struct intel_ring_buffer *ring,
1239 u32 invalidate, u32 flush)
1240{
1241 uint32_t cmd;
1242 int ret;
1243
1244 ret = intel_ring_begin(ring, 4);
1245 if (ret)
1246 return ret;
1247
1248 cmd = MI_FLUSH_DW;
1249 if (invalidate & I915_GEM_GPU_DOMAINS)
1250 cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD;
1251 intel_ring_emit(ring, cmd);
1252 intel_ring_emit(ring, 0);
1253 intel_ring_emit(ring, 0);
1254 intel_ring_emit(ring, MI_NOOP);
1255 intel_ring_advance(ring);
1256 return 0;
1257}
1258
1259static int
1260gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
1261 u32 offset, u32 len)
1262{
1263 int ret;
1264
1265 ret = intel_ring_begin(ring, 2);
1266 if (ret)
1267 return ret;
1268
1269 intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965);
1270
1271 intel_ring_emit(ring, offset);
1272 intel_ring_advance(ring);
1273
1274 return 0;
1275}
1276
1277static bool
1278gen6_render_ring_get_irq(struct intel_ring_buffer *ring)
1279{
1280 return gen6_ring_get_irq(ring,
1281 GT_USER_INTERRUPT,
1282 GEN6_RENDER_USER_INTERRUPT);
1283}
1284
1285static void
1286gen6_render_ring_put_irq(struct intel_ring_buffer *ring)
1287{
1288 return gen6_ring_put_irq(ring,
1289 GT_USER_INTERRUPT,
1290 GEN6_RENDER_USER_INTERRUPT);
1291}
1292
1293static bool
1294gen6_bsd_ring_get_irq(struct intel_ring_buffer *ring)
1295{
1296 return gen6_ring_get_irq(ring,
1297 GT_GEN6_BSD_USER_INTERRUPT,
1298 GEN6_BSD_USER_INTERRUPT);
1299}
1300
1301static void
1302gen6_bsd_ring_put_irq(struct intel_ring_buffer *ring)
1303{
1304 return gen6_ring_put_irq(ring,
1305 GT_GEN6_BSD_USER_INTERRUPT,
1306 GEN6_BSD_USER_INTERRUPT);
1307}
1308
1309
1310static const struct intel_ring_buffer gen6_bsd_ring = {
1311 .name = "gen6 bsd ring",
1312 .id = RING_BSD,
1313 .mmio_base = GEN6_BSD_RING_BASE,
1314 .size = 32 * PAGE_SIZE,
1315 .init = init_ring_common,
1316 .write_tail = gen6_bsd_ring_write_tail,
1317 .flush = gen6_ring_flush,
1318 .add_request = gen6_add_request,
1319 .get_seqno = ring_get_seqno,
1320 .irq_get = gen6_bsd_ring_get_irq,
1321 .irq_put = gen6_bsd_ring_put_irq,
1322 .dispatch_execbuffer = gen6_ring_dispatch_execbuffer,
1323 .sync_to = gen6_bsd_ring_sync_to,
1324 .semaphore_register = {MI_SEMAPHORE_SYNC_VR,
1325 MI_SEMAPHORE_SYNC_INVALID,
1326 MI_SEMAPHORE_SYNC_VB},
1327 .signal_mbox = {GEN6_RVSYNC, GEN6_BVSYNC},
1328};
1329
1330
1331
1332static bool
1333blt_ring_get_irq(struct intel_ring_buffer *ring)
1334{
1335 return gen6_ring_get_irq(ring,
1336 GT_BLT_USER_INTERRUPT,
1337 GEN6_BLITTER_USER_INTERRUPT);
1338}
1339
1340static void
1341blt_ring_put_irq(struct intel_ring_buffer *ring)
1342{
1343 gen6_ring_put_irq(ring,
1344 GT_BLT_USER_INTERRUPT,
1345 GEN6_BLITTER_USER_INTERRUPT);
1346}
1347
1348
1349
1350
1351
1352
1353
1354#define NEED_BLT_WORKAROUND(dev) \
1355 (IS_GEN6(dev) && (dev->pdev->revision < 8))
1356
1357static inline struct drm_i915_gem_object *
1358to_blt_workaround(struct intel_ring_buffer *ring)
1359{
1360 return ring->private;
1361}
1362
1363static int blt_ring_init(struct intel_ring_buffer *ring)
1364{
1365 if (NEED_BLT_WORKAROUND(ring->dev)) {
1366 struct drm_i915_gem_object *obj;
1367 u32 *ptr;
1368 int ret;
1369
1370 obj = i915_gem_alloc_object(ring->dev, 4096);
1371 if (obj == NULL)
1372 return -ENOMEM;
1373
1374 ret = i915_gem_object_pin(obj, 4096, true);
1375 if (ret) {
1376 drm_gem_object_unreference(&obj->base);
1377 return ret;
1378 }
1379
1380 ptr = kmap(obj->pages[0]);
1381 *ptr++ = MI_BATCH_BUFFER_END;
1382 *ptr++ = MI_NOOP;
1383 kunmap(obj->pages[0]);
1384
1385 ret = i915_gem_object_set_to_gtt_domain(obj, false);
1386 if (ret) {
1387 i915_gem_object_unpin(obj);
1388 drm_gem_object_unreference(&obj->base);
1389 return ret;
1390 }
1391
1392 ring->private = obj;
1393 }
1394
1395 return init_ring_common(ring);
1396}
1397
1398static int blt_ring_begin(struct intel_ring_buffer *ring,
1399 int num_dwords)
1400{
1401 if (ring->private) {
1402 int ret = intel_ring_begin(ring, num_dwords+2);
1403 if (ret)
1404 return ret;
1405
1406 intel_ring_emit(ring, MI_BATCH_BUFFER_START);
1407 intel_ring_emit(ring, to_blt_workaround(ring)->gtt_offset);
1408
1409 return 0;
1410 } else
1411 return intel_ring_begin(ring, 4);
1412}
1413
1414static int blt_ring_flush(struct intel_ring_buffer *ring,
1415 u32 invalidate, u32 flush)
1416{
1417 uint32_t cmd;
1418 int ret;
1419
1420 ret = blt_ring_begin(ring, 4);
1421 if (ret)
1422 return ret;
1423
1424 cmd = MI_FLUSH_DW;
1425 if (invalidate & I915_GEM_DOMAIN_RENDER)
1426 cmd |= MI_INVALIDATE_TLB;
1427 intel_ring_emit(ring, cmd);
1428 intel_ring_emit(ring, 0);
1429 intel_ring_emit(ring, 0);
1430 intel_ring_emit(ring, MI_NOOP);
1431 intel_ring_advance(ring);
1432 return 0;
1433}
1434
1435static void blt_ring_cleanup(struct intel_ring_buffer *ring)
1436{
1437 if (!ring->private)
1438 return;
1439
1440 i915_gem_object_unpin(ring->private);
1441 drm_gem_object_unreference(ring->private);
1442 ring->private = NULL;
1443}
1444
1445static const struct intel_ring_buffer gen6_blt_ring = {
1446 .name = "blt ring",
1447 .id = RING_BLT,
1448 .mmio_base = BLT_RING_BASE,
1449 .size = 32 * PAGE_SIZE,
1450 .init = blt_ring_init,
1451 .write_tail = ring_write_tail,
1452 .flush = blt_ring_flush,
1453 .add_request = gen6_add_request,
1454 .get_seqno = ring_get_seqno,
1455 .irq_get = blt_ring_get_irq,
1456 .irq_put = blt_ring_put_irq,
1457 .dispatch_execbuffer = gen6_ring_dispatch_execbuffer,
1458 .cleanup = blt_ring_cleanup,
1459 .sync_to = gen6_blt_ring_sync_to,
1460 .semaphore_register = {MI_SEMAPHORE_SYNC_BR,
1461 MI_SEMAPHORE_SYNC_BV,
1462 MI_SEMAPHORE_SYNC_INVALID},
1463 .signal_mbox = {GEN6_RBSYNC, GEN6_VBSYNC},
1464};
1465
1466int intel_init_render_ring_buffer(struct drm_device *dev)
1467{
1468 drm_i915_private_t *dev_priv = dev->dev_private;
1469 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
1470
1471 *ring = render_ring;
1472 if (INTEL_INFO(dev)->gen >= 6) {
1473 ring->add_request = gen6_add_request;
1474 ring->flush = gen6_render_ring_flush;
1475 ring->irq_get = gen6_render_ring_get_irq;
1476 ring->irq_put = gen6_render_ring_put_irq;
1477 } else if (IS_GEN5(dev)) {
1478 ring->add_request = pc_render_add_request;
1479 ring->get_seqno = pc_render_get_seqno;
1480 }
1481
1482 if (!I915_NEED_GFX_HWS(dev)) {
1483 ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
1484 memset(ring->status_page.page_addr, 0, PAGE_SIZE);
1485 }
1486
1487 return intel_init_ring_buffer(dev, ring);
1488}
1489
1490int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
1491{
1492 drm_i915_private_t *dev_priv = dev->dev_private;
1493 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
1494
1495 *ring = render_ring;
1496 if (INTEL_INFO(dev)->gen >= 6) {
1497 ring->add_request = gen6_add_request;
1498 ring->irq_get = gen6_render_ring_get_irq;
1499 ring->irq_put = gen6_render_ring_put_irq;
1500 } else if (IS_GEN5(dev)) {
1501 ring->add_request = pc_render_add_request;
1502 ring->get_seqno = pc_render_get_seqno;
1503 }
1504
1505 if (!I915_NEED_GFX_HWS(dev))
1506 ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
1507
1508 ring->dev = dev;
1509 INIT_LIST_HEAD(&ring->active_list);
1510 INIT_LIST_HEAD(&ring->request_list);
1511 INIT_LIST_HEAD(&ring->gpu_write_list);
1512
1513 ring->size = size;
1514 ring->effective_size = ring->size;
1515 if (IS_I830(ring->dev))
1516 ring->effective_size -= 128;
1517
1518 ring->map.offset = start;
1519 ring->map.size = size;
1520 ring->map.type = 0;
1521 ring->map.flags = 0;
1522 ring->map.mtrr = 0;
1523
1524 drm_core_ioremap_wc(&ring->map, dev);
1525 if (ring->map.handle == NULL) {
1526 DRM_ERROR("can not ioremap virtual address for"
1527 " ring buffer\n");
1528 return -ENOMEM;
1529 }
1530
1531 ring->virtual_start = (void __force __iomem *)ring->map.handle;
1532 return 0;
1533}
1534
1535int intel_init_bsd_ring_buffer(struct drm_device *dev)
1536{
1537 drm_i915_private_t *dev_priv = dev->dev_private;
1538 struct intel_ring_buffer *ring = &dev_priv->ring[VCS];
1539
1540 if (IS_GEN6(dev) || IS_GEN7(dev))
1541 *ring = gen6_bsd_ring;
1542 else
1543 *ring = bsd_ring;
1544
1545 return intel_init_ring_buffer(dev, ring);
1546}
1547
1548int intel_init_blt_ring_buffer(struct drm_device *dev)
1549{
1550 drm_i915_private_t *dev_priv = dev->dev_private;
1551 struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
1552
1553 *ring = gen6_blt_ring;
1554
1555 return intel_init_ring_buffer(dev, ring);
1556}
1557