1
2
3
4
5
6
7#include "i915_drv.h"
8
9#include "intel_context.h"
10#include "intel_engine.h"
11#include "intel_engine_heartbeat.h"
12#include "intel_engine_pm.h"
13#include "intel_gt.h"
14#include "intel_gt_pm.h"
15#include "intel_rc6.h"
16#include "intel_ring.h"
17#include "shmem_utils.h"
18
19static int __engine_unpark(struct intel_wakeref *wf)
20{
21 struct intel_engine_cs *engine =
22 container_of(wf, typeof(*engine), wakeref);
23 struct intel_context *ce;
24
25 ENGINE_TRACE(engine, "\n");
26
27 intel_gt_pm_get(engine->gt);
28
29
30 ce = engine->kernel_context;
31 if (ce) {
32 GEM_BUG_ON(test_bit(CONTEXT_VALID_BIT, &ce->flags));
33
34
35 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) && ce->state) {
36 struct drm_i915_gem_object *obj = ce->state->obj;
37 int type = i915_coherent_map_type(engine->i915);
38 void *map;
39
40 map = i915_gem_object_pin_map(obj, type);
41 if (!IS_ERR(map)) {
42 memset(map, CONTEXT_REDZONE, obj->base.size);
43 i915_gem_object_flush_map(obj);
44 i915_gem_object_unpin_map(obj);
45 }
46 }
47
48 ce->ops->reset(ce);
49 }
50
51 if (engine->unpark)
52 engine->unpark(engine);
53
54 intel_engine_unpark_heartbeat(engine);
55 return 0;
56}
57
58#if IS_ENABLED(CONFIG_LOCKDEP)
59
60static inline unsigned long __timeline_mark_lock(struct intel_context *ce)
61{
62 unsigned long flags;
63
64 local_irq_save(flags);
65 mutex_acquire(&ce->timeline->mutex.dep_map, 2, 0, _THIS_IP_);
66
67 return flags;
68}
69
70static inline void __timeline_mark_unlock(struct intel_context *ce,
71 unsigned long flags)
72{
73 mutex_release(&ce->timeline->mutex.dep_map, _THIS_IP_);
74 local_irq_restore(flags);
75}
76
77#else
78
79static inline unsigned long __timeline_mark_lock(struct intel_context *ce)
80{
81 return 0;
82}
83
84static inline void __timeline_mark_unlock(struct intel_context *ce,
85 unsigned long flags)
86{
87}
88
89#endif
90
91static void duration(struct dma_fence *fence, struct dma_fence_cb *cb)
92{
93 struct i915_request *rq = to_request(fence);
94
95 ewma__engine_latency_add(&rq->engine->latency,
96 ktime_us_delta(rq->fence.timestamp,
97 rq->duration.emitted));
98}
99
100static void
101__queue_and_release_pm(struct i915_request *rq,
102 struct intel_timeline *tl,
103 struct intel_engine_cs *engine)
104{
105 struct intel_gt_timelines *timelines = &engine->gt->timelines;
106
107 ENGINE_TRACE(engine, "parking\n");
108
109
110
111
112
113
114
115
116
117
118 spin_lock(&timelines->lock);
119
120
121 if (!atomic_fetch_inc(&tl->active_count))
122 list_add_tail(&tl->link, &timelines->active_list);
123
124
125 __i915_request_queue(rq, NULL);
126
127
128 __intel_wakeref_defer_park(&engine->wakeref);
129
130 spin_unlock(&timelines->lock);
131}
132
133static bool switch_to_kernel_context(struct intel_engine_cs *engine)
134{
135 struct intel_context *ce = engine->kernel_context;
136 struct i915_request *rq;
137 unsigned long flags;
138 bool result = true;
139
140
141 if (intel_gt_is_wedged(engine->gt))
142 return true;
143
144 GEM_BUG_ON(!intel_context_is_barrier(ce));
145
146
147 if (engine->wakeref_serial == engine->serial)
148 return true;
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178 flags = __timeline_mark_lock(ce);
179 GEM_BUG_ON(atomic_read(&ce->timeline->active_count) < 0);
180
181 rq = __i915_request_create(ce, GFP_NOWAIT);
182 if (IS_ERR(rq))
183
184 goto out_unlock;
185
186
187 engine->wakeref_serial = engine->serial + 1;
188 i915_request_add_active_barriers(rq);
189
190
191 rq->sched.attr.priority = I915_PRIORITY_BARRIER;
192 if (likely(!__i915_request_commit(rq))) {
193
194
195
196
197
198
199
200 BUILD_BUG_ON(sizeof(rq->duration) > sizeof(rq->submitq));
201 dma_fence_add_callback(&rq->fence, &rq->duration.cb, duration);
202 rq->duration.emitted = ktime_get();
203 }
204
205
206 __queue_and_release_pm(rq, ce->timeline, engine);
207
208 result = false;
209out_unlock:
210 __timeline_mark_unlock(ce, flags);
211 return result;
212}
213
214static void call_idle_barriers(struct intel_engine_cs *engine)
215{
216 struct llist_node *node, *next;
217
218 llist_for_each_safe(node, next, llist_del_all(&engine->barrier_tasks)) {
219 struct dma_fence_cb *cb =
220 container_of((struct list_head *)node,
221 typeof(*cb), node);
222
223 cb->func(ERR_PTR(-EAGAIN), cb);
224 }
225}
226
227static int __engine_park(struct intel_wakeref *wf)
228{
229 struct intel_engine_cs *engine =
230 container_of(wf, typeof(*engine), wakeref);
231
232 engine->saturated = 0;
233
234
235
236
237
238
239
240
241 if (!switch_to_kernel_context(engine))
242 return -EBUSY;
243
244 ENGINE_TRACE(engine, "parked\n");
245
246 call_idle_barriers(engine);
247
248 intel_engine_park_heartbeat(engine);
249 intel_engine_disarm_breadcrumbs(engine);
250
251
252 GEM_BUG_ON(engine->execlists.queue_priority_hint != INT_MIN);
253
254 if (engine->park)
255 engine->park(engine);
256
257 engine->execlists.no_priolist = false;
258
259
260 intel_gt_pm_put_async(engine->gt);
261 return 0;
262}
263
264static const struct intel_wakeref_ops wf_ops = {
265 .get = __engine_unpark,
266 .put = __engine_park,
267};
268
269void intel_engine_init__pm(struct intel_engine_cs *engine)
270{
271 struct intel_runtime_pm *rpm = engine->uncore->rpm;
272
273 intel_wakeref_init(&engine->wakeref, rpm, &wf_ops);
274 intel_engine_init_heartbeat(engine);
275}
276
277#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
278#include "selftest_engine_pm.c"
279#endif
280