1
2
3
4
5
6
7#include "gem/i915_gem_object.h"
8#include "i915_drv.h"
9#include "i915_vma.h"
10#include "intel_engine.h"
11#include "intel_ring.h"
12#include "intel_timeline.h"
13
14unsigned int intel_ring_update_space(struct intel_ring *ring)
15{
16 unsigned int space;
17
18 space = __intel_ring_space(ring->head, ring->emit, ring->size);
19
20 ring->space = space;
21 return space;
22}
23
24int intel_ring_pin(struct intel_ring *ring)
25{
26 struct i915_vma *vma = ring->vma;
27 unsigned int flags;
28 void *addr;
29 int ret;
30
31 if (atomic_fetch_inc(&ring->pin_count))
32 return 0;
33
34 flags = PIN_GLOBAL;
35
36
37 flags |= PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma);
38
39 if (vma->obj->stolen)
40 flags |= PIN_MAPPABLE;
41 else
42 flags |= PIN_HIGH;
43
44 ret = i915_vma_pin(vma, 0, 0, flags);
45 if (unlikely(ret))
46 goto err_unpin;
47
48 if (i915_vma_is_map_and_fenceable(vma))
49 addr = (void __force *)i915_vma_pin_iomap(vma);
50 else
51 addr = i915_gem_object_pin_map(vma->obj,
52 i915_coherent_map_type(vma->vm->i915));
53 if (IS_ERR(addr)) {
54 ret = PTR_ERR(addr);
55 goto err_ring;
56 }
57
58 i915_vma_make_unshrinkable(vma);
59
60
61 intel_ring_reset(ring, ring->emit);
62
63 ring->vaddr = addr;
64 return 0;
65
66err_ring:
67 i915_vma_unpin(vma);
68err_unpin:
69 atomic_dec(&ring->pin_count);
70 return ret;
71}
72
73void intel_ring_reset(struct intel_ring *ring, u32 tail)
74{
75 tail = intel_ring_wrap(ring, tail);
76 ring->tail = tail;
77 ring->head = tail;
78 ring->emit = tail;
79 intel_ring_update_space(ring);
80}
81
82void intel_ring_unpin(struct intel_ring *ring)
83{
84 struct i915_vma *vma = ring->vma;
85
86 if (!atomic_dec_and_test(&ring->pin_count))
87 return;
88
89 i915_vma_unset_ggtt_write(vma);
90 if (i915_vma_is_map_and_fenceable(vma))
91 i915_vma_unpin_iomap(vma);
92 else
93 i915_gem_object_unpin_map(vma->obj);
94
95 i915_vma_make_purgeable(vma);
96 i915_vma_unpin(vma);
97}
98
99static struct i915_vma *create_ring_vma(struct i915_ggtt *ggtt, int size)
100{
101 struct i915_address_space *vm = &ggtt->vm;
102 struct drm_i915_private *i915 = vm->i915;
103 struct drm_i915_gem_object *obj;
104 struct i915_vma *vma;
105
106 obj = ERR_PTR(-ENODEV);
107 if (i915_ggtt_has_aperture(ggtt))
108 obj = i915_gem_object_create_stolen(i915, size);
109 if (IS_ERR(obj))
110 obj = i915_gem_object_create_internal(i915, size);
111 if (IS_ERR(obj))
112 return ERR_CAST(obj);
113
114
115
116
117
118 if (vm->has_read_only)
119 i915_gem_object_set_readonly(obj);
120
121 vma = i915_vma_instance(obj, vm, NULL);
122 if (IS_ERR(vma))
123 goto err;
124
125 return vma;
126
127err:
128 i915_gem_object_put(obj);
129 return vma;
130}
131
132struct intel_ring *
133intel_engine_create_ring(struct intel_engine_cs *engine, int size)
134{
135 struct drm_i915_private *i915 = engine->i915;
136 struct intel_ring *ring;
137 struct i915_vma *vma;
138
139 GEM_BUG_ON(!is_power_of_2(size));
140 GEM_BUG_ON(RING_CTL_SIZE(size) & ~RING_NR_PAGES);
141
142 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
143 if (!ring)
144 return ERR_PTR(-ENOMEM);
145
146 kref_init(&ring->ref);
147 ring->size = size;
148 ring->wrap = BITS_PER_TYPE(ring->size) - ilog2(size);
149
150
151
152
153
154
155 ring->effective_size = size;
156 if (IS_I830(i915) || IS_I845G(i915))
157 ring->effective_size -= 2 * CACHELINE_BYTES;
158
159 intel_ring_update_space(ring);
160
161 vma = create_ring_vma(engine->gt->ggtt, size);
162 if (IS_ERR(vma)) {
163 kfree(ring);
164 return ERR_CAST(vma);
165 }
166 ring->vma = vma;
167
168 return ring;
169}
170
171void intel_ring_free(struct kref *ref)
172{
173 struct intel_ring *ring = container_of(ref, typeof(*ring), ref);
174
175 i915_vma_put(ring->vma);
176 kfree(ring);
177}
178
179static noinline int
180wait_for_space(struct intel_ring *ring,
181 struct intel_timeline *tl,
182 unsigned int bytes)
183{
184 struct i915_request *target;
185 long timeout;
186
187 if (intel_ring_update_space(ring) >= bytes)
188 return 0;
189
190 GEM_BUG_ON(list_empty(&tl->requests));
191 list_for_each_entry(target, &tl->requests, link) {
192 if (target->ring != ring)
193 continue;
194
195
196 if (bytes <= __intel_ring_space(target->postfix,
197 ring->emit, ring->size))
198 break;
199 }
200
201 if (GEM_WARN_ON(&target->link == &tl->requests))
202 return -ENOSPC;
203
204 timeout = i915_request_wait(target,
205 I915_WAIT_INTERRUPTIBLE,
206 MAX_SCHEDULE_TIMEOUT);
207 if (timeout < 0)
208 return timeout;
209
210 i915_request_retire_upto(target);
211
212 intel_ring_update_space(ring);
213 GEM_BUG_ON(ring->space < bytes);
214 return 0;
215}
216
217u32 *intel_ring_begin(struct i915_request *rq, unsigned int num_dwords)
218{
219 struct intel_ring *ring = rq->ring;
220 const unsigned int remain_usable = ring->effective_size - ring->emit;
221 const unsigned int bytes = num_dwords * sizeof(u32);
222 unsigned int need_wrap = 0;
223 unsigned int total_bytes;
224 u32 *cs;
225
226
227 GEM_BUG_ON(num_dwords & 1);
228
229 total_bytes = bytes + rq->reserved_space;
230 GEM_BUG_ON(total_bytes > ring->effective_size);
231
232 if (unlikely(total_bytes > remain_usable)) {
233 const int remain_actual = ring->size - ring->emit;
234
235 if (bytes > remain_usable) {
236
237
238
239
240
241 total_bytes += remain_actual;
242 need_wrap = remain_actual | 1;
243 } else {
244
245
246
247
248
249
250 total_bytes = rq->reserved_space + remain_actual;
251 }
252 }
253
254 if (unlikely(total_bytes > ring->space)) {
255 int ret;
256
257
258
259
260
261
262
263
264
265
266 GEM_BUG_ON(!rq->reserved_space);
267
268 ret = wait_for_space(ring,
269 i915_request_timeline(rq),
270 total_bytes);
271 if (unlikely(ret))
272 return ERR_PTR(ret);
273 }
274
275 if (unlikely(need_wrap)) {
276 need_wrap &= ~1;
277 GEM_BUG_ON(need_wrap > ring->space);
278 GEM_BUG_ON(ring->emit + need_wrap > ring->size);
279 GEM_BUG_ON(!IS_ALIGNED(need_wrap, sizeof(u64)));
280
281
282 memset64(ring->vaddr + ring->emit, 0, need_wrap / sizeof(u64));
283 ring->space -= need_wrap;
284 ring->emit = 0;
285 }
286
287 GEM_BUG_ON(ring->emit > ring->size - bytes);
288 GEM_BUG_ON(ring->space < bytes);
289 cs = ring->vaddr + ring->emit;
290 GEM_DEBUG_EXEC(memset32(cs, POISON_INUSE, bytes / sizeof(*cs)));
291 ring->emit += bytes;
292 ring->space -= bytes;
293
294 return cs;
295}
296
297
298int intel_ring_cacheline_align(struct i915_request *rq)
299{
300 int num_dwords;
301 void *cs;
302
303 num_dwords = (rq->ring->emit & (CACHELINE_BYTES - 1)) / sizeof(u32);
304 if (num_dwords == 0)
305 return 0;
306
307 num_dwords = CACHELINE_DWORDS - num_dwords;
308 GEM_BUG_ON(num_dwords & 1);
309
310 cs = intel_ring_begin(rq, num_dwords);
311 if (IS_ERR(cs))
312 return PTR_ERR(cs);
313
314 memset64(cs, (u64)MI_NOOP << 32 | MI_NOOP, num_dwords / 2);
315 intel_ring_advance(rq, cs + num_dwords);
316
317 GEM_BUG_ON(rq->ring->emit & (CACHELINE_BYTES - 1));
318 return 0;
319}
320
321#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
322#include "selftest_ring.c"
323#endif
324