1
2
3
4
5
6#include "i915_drv.h"
7
8#include "intel_breadcrumbs.h"
9#include "intel_context.h"
10#include "intel_engine.h"
11#include "intel_engine_heartbeat.h"
12#include "intel_engine_pm.h"
13#include "intel_gt.h"
14#include "intel_gt_pm.h"
15#include "intel_rc6.h"
16#include "intel_ring.h"
17#include "shmem_utils.h"
18
19static void dbg_poison_ce(struct intel_context *ce)
20{
21 if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
22 return;
23
24 if (ce->state) {
25 struct drm_i915_gem_object *obj = ce->state->obj;
26 int type = i915_coherent_map_type(ce->engine->i915, obj, true);
27 void *map;
28
29 if (!i915_gem_object_trylock(obj))
30 return;
31
32 map = i915_gem_object_pin_map(obj, type);
33 if (!IS_ERR(map)) {
34 memset(map, CONTEXT_REDZONE, obj->base.size);
35 i915_gem_object_flush_map(obj);
36 i915_gem_object_unpin_map(obj);
37 }
38 i915_gem_object_unlock(obj);
39 }
40}
41
42static int __engine_unpark(struct intel_wakeref *wf)
43{
44 struct intel_engine_cs *engine =
45 container_of(wf, typeof(*engine), wakeref);
46 struct intel_context *ce;
47
48 ENGINE_TRACE(engine, "\n");
49
50 intel_gt_pm_get(engine->gt);
51
52
53 ce = engine->kernel_context;
54 if (ce) {
55 GEM_BUG_ON(test_bit(CONTEXT_VALID_BIT, &ce->flags));
56
57
58 while (unlikely(intel_context_inflight(ce)))
59 intel_engine_flush_submission(engine);
60
61
62 dbg_poison_ce(ce);
63
64
65 ce->ops->reset(ce);
66
67 CE_TRACE(ce, "reset { seqno:%x, *hwsp:%x, ring:%x }\n",
68 ce->timeline->seqno,
69 READ_ONCE(*ce->timeline->hwsp_seqno),
70 ce->ring->emit);
71 GEM_BUG_ON(ce->timeline->seqno !=
72 READ_ONCE(*ce->timeline->hwsp_seqno));
73 }
74
75 if (engine->unpark)
76 engine->unpark(engine);
77
78 intel_breadcrumbs_unpark(engine->breadcrumbs);
79 intel_engine_unpark_heartbeat(engine);
80 return 0;
81}
82
83#if IS_ENABLED(CONFIG_LOCKDEP)
84
85static unsigned long __timeline_mark_lock(struct intel_context *ce)
86{
87 unsigned long flags;
88
89 local_irq_save(flags);
90 mutex_acquire(&ce->timeline->mutex.dep_map, 2, 0, _THIS_IP_);
91
92 return flags;
93}
94
95static void __timeline_mark_unlock(struct intel_context *ce,
96 unsigned long flags)
97{
98 mutex_release(&ce->timeline->mutex.dep_map, _THIS_IP_);
99 local_irq_restore(flags);
100}
101
102#else
103
104static unsigned long __timeline_mark_lock(struct intel_context *ce)
105{
106 return 0;
107}
108
109static void __timeline_mark_unlock(struct intel_context *ce,
110 unsigned long flags)
111{
112}
113
114#endif
115
116static void duration(struct dma_fence *fence, struct dma_fence_cb *cb)
117{
118 struct i915_request *rq = to_request(fence);
119
120 ewma__engine_latency_add(&rq->engine->latency,
121 ktime_us_delta(rq->fence.timestamp,
122 rq->duration.emitted));
123}
124
125static void
126__queue_and_release_pm(struct i915_request *rq,
127 struct intel_timeline *tl,
128 struct intel_engine_cs *engine)
129{
130 struct intel_gt_timelines *timelines = &engine->gt->timelines;
131
132 ENGINE_TRACE(engine, "parking\n");
133
134
135
136
137
138
139
140
141
142
143 spin_lock(&timelines->lock);
144
145
146 if (!atomic_fetch_inc(&tl->active_count))
147 list_add_tail(&tl->link, &timelines->active_list);
148
149
150 __i915_request_queue_bh(rq);
151
152
153 __intel_wakeref_defer_park(&engine->wakeref);
154
155 spin_unlock(&timelines->lock);
156}
157
158static bool switch_to_kernel_context(struct intel_engine_cs *engine)
159{
160 struct intel_context *ce = engine->kernel_context;
161 struct i915_request *rq;
162 unsigned long flags;
163 bool result = true;
164
165
166 if (intel_gt_is_wedged(engine->gt))
167 return true;
168
169 GEM_BUG_ON(!intel_context_is_barrier(ce));
170 GEM_BUG_ON(ce->timeline->hwsp_ggtt != engine->status_page.vma);
171
172
173 if (engine->wakeref_serial == engine->serial)
174 return true;
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204 flags = __timeline_mark_lock(ce);
205 GEM_BUG_ON(atomic_read(&ce->timeline->active_count) < 0);
206
207 rq = __i915_request_create(ce, GFP_NOWAIT);
208 if (IS_ERR(rq))
209
210 goto out_unlock;
211
212
213 engine->wakeref_serial = engine->serial + 1;
214 i915_request_add_active_barriers(rq);
215
216
217 rq->sched.attr.priority = I915_PRIORITY_BARRIER;
218 if (likely(!__i915_request_commit(rq))) {
219
220
221
222
223
224
225
226 BUILD_BUG_ON(sizeof(rq->duration) > sizeof(rq->submitq));
227 dma_fence_add_callback(&rq->fence, &rq->duration.cb, duration);
228 rq->duration.emitted = ktime_get();
229 }
230
231
232 __queue_and_release_pm(rq, ce->timeline, engine);
233
234 result = false;
235out_unlock:
236 __timeline_mark_unlock(ce, flags);
237 return result;
238}
239
240static void call_idle_barriers(struct intel_engine_cs *engine)
241{
242 struct llist_node *node, *next;
243
244 llist_for_each_safe(node, next, llist_del_all(&engine->barrier_tasks)) {
245 struct dma_fence_cb *cb =
246 container_of((struct list_head *)node,
247 typeof(*cb), node);
248
249 cb->func(ERR_PTR(-EAGAIN), cb);
250 }
251}
252
253static int __engine_park(struct intel_wakeref *wf)
254{
255 struct intel_engine_cs *engine =
256 container_of(wf, typeof(*engine), wakeref);
257
258 engine->saturated = 0;
259
260
261
262
263
264
265
266
267 if (!switch_to_kernel_context(engine))
268 return -EBUSY;
269
270 ENGINE_TRACE(engine, "parked\n");
271
272 call_idle_barriers(engine);
273
274 intel_engine_park_heartbeat(engine);
275 intel_breadcrumbs_park(engine->breadcrumbs);
276
277
278 GEM_BUG_ON(engine->execlists.queue_priority_hint != INT_MIN);
279
280 if (engine->park)
281 engine->park(engine);
282
283 engine->execlists.no_priolist = false;
284
285
286 intel_gt_pm_put_async(engine->gt);
287 return 0;
288}
289
290static const struct intel_wakeref_ops wf_ops = {
291 .get = __engine_unpark,
292 .put = __engine_park,
293};
294
295void intel_engine_init__pm(struct intel_engine_cs *engine)
296{
297 struct intel_runtime_pm *rpm = engine->uncore->rpm;
298
299 intel_wakeref_init(&engine->wakeref, rpm, &wf_ops);
300 intel_engine_init_heartbeat(engine);
301}
302
303#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
304#include "selftest_engine_pm.c"
305#endif
306