1
2
3
4
5
6
7#include "i915_drv.h"
8
9#include "intel_context.h"
10#include "intel_engine.h"
11#include "intel_engine_heartbeat.h"
12#include "intel_engine_pm.h"
13#include "intel_engine_pool.h"
14#include "intel_gt.h"
15#include "intel_gt_pm.h"
16#include "intel_rc6.h"
17#include "intel_ring.h"
18
19static int __engine_unpark(struct intel_wakeref *wf)
20{
21 struct intel_engine_cs *engine =
22 container_of(wf, typeof(*engine), wakeref);
23 struct intel_context *ce;
24 void *map;
25
26 ENGINE_TRACE(engine, "\n");
27
28 intel_gt_pm_get(engine->gt);
29
30
31 map = NULL;
32 if (engine->default_state)
33 map = i915_gem_object_pin_map(engine->default_state,
34 I915_MAP_WB);
35 if (!IS_ERR_OR_NULL(map))
36 engine->pinned_default_state = map;
37
38
39 ce = engine->kernel_context;
40 if (ce) {
41 GEM_BUG_ON(test_bit(CONTEXT_VALID_BIT, &ce->flags));
42
43
44 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) && ce->state) {
45 struct drm_i915_gem_object *obj = ce->state->obj;
46 int type = i915_coherent_map_type(engine->i915);
47
48 map = i915_gem_object_pin_map(obj, type);
49 if (!IS_ERR(map)) {
50 memset(map, CONTEXT_REDZONE, obj->base.size);
51 i915_gem_object_flush_map(obj);
52 i915_gem_object_unpin_map(obj);
53 }
54 }
55
56 ce->ops->reset(ce);
57 }
58
59 if (engine->unpark)
60 engine->unpark(engine);
61
62 intel_engine_unpark_heartbeat(engine);
63 return 0;
64}
65
66#if IS_ENABLED(CONFIG_LOCKDEP)
67
68static inline unsigned long __timeline_mark_lock(struct intel_context *ce)
69{
70 unsigned long flags;
71
72 local_irq_save(flags);
73 mutex_acquire(&ce->timeline->mutex.dep_map, 2, 0, _THIS_IP_);
74
75 return flags;
76}
77
78static inline void __timeline_mark_unlock(struct intel_context *ce,
79 unsigned long flags)
80{
81 mutex_release(&ce->timeline->mutex.dep_map, _THIS_IP_);
82 local_irq_restore(flags);
83}
84
85#else
86
87static inline unsigned long __timeline_mark_lock(struct intel_context *ce)
88{
89 return 0;
90}
91
92static inline void __timeline_mark_unlock(struct intel_context *ce,
93 unsigned long flags)
94{
95}
96
97#endif
98
99static void duration(struct dma_fence *fence, struct dma_fence_cb *cb)
100{
101 struct i915_request *rq = to_request(fence);
102
103 ewma__engine_latency_add(&rq->engine->latency,
104 ktime_us_delta(rq->fence.timestamp,
105 rq->duration.emitted));
106}
107
108static void
109__queue_and_release_pm(struct i915_request *rq,
110 struct intel_timeline *tl,
111 struct intel_engine_cs *engine)
112{
113 struct intel_gt_timelines *timelines = &engine->gt->timelines;
114
115 ENGINE_TRACE(engine, "\n");
116
117
118
119
120
121
122
123
124
125
126 spin_lock(&timelines->lock);
127
128
129 if (!atomic_fetch_inc(&tl->active_count))
130 list_add_tail(&tl->link, &timelines->active_list);
131
132
133 __i915_request_queue(rq, NULL);
134
135
136 __intel_wakeref_defer_park(&engine->wakeref);
137
138 spin_unlock(&timelines->lock);
139}
140
141static bool switch_to_kernel_context(struct intel_engine_cs *engine)
142{
143 struct intel_context *ce = engine->kernel_context;
144 struct i915_request *rq;
145 unsigned long flags;
146 bool result = true;
147
148
149 if (intel_gt_is_wedged(engine->gt))
150 return true;
151
152 GEM_BUG_ON(!intel_context_is_barrier(ce));
153
154
155 if (engine->wakeref_serial == engine->serial)
156 return true;
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186 flags = __timeline_mark_lock(ce);
187 GEM_BUG_ON(atomic_read(&ce->timeline->active_count) < 0);
188
189 rq = __i915_request_create(ce, GFP_NOWAIT);
190 if (IS_ERR(rq))
191
192 goto out_unlock;
193
194
195 engine->wakeref_serial = engine->serial + 1;
196 i915_request_add_active_barriers(rq);
197
198
199 rq->sched.attr.priority = I915_PRIORITY_BARRIER;
200 if (likely(!__i915_request_commit(rq))) {
201
202
203
204
205
206
207
208 BUILD_BUG_ON(sizeof(rq->duration) > sizeof(rq->submitq));
209 dma_fence_add_callback(&rq->fence, &rq->duration.cb, duration);
210 rq->duration.emitted = ktime_get();
211 }
212
213
214 __queue_and_release_pm(rq, ce->timeline, engine);
215
216 result = false;
217out_unlock:
218 __timeline_mark_unlock(ce, flags);
219 return result;
220}
221
222static void call_idle_barriers(struct intel_engine_cs *engine)
223{
224 struct llist_node *node, *next;
225
226 llist_for_each_safe(node, next, llist_del_all(&engine->barrier_tasks)) {
227 struct dma_fence_cb *cb =
228 container_of((struct list_head *)node,
229 typeof(*cb), node);
230
231 cb->func(ERR_PTR(-EAGAIN), cb);
232 }
233}
234
235static int __engine_park(struct intel_wakeref *wf)
236{
237 struct intel_engine_cs *engine =
238 container_of(wf, typeof(*engine), wakeref);
239
240 engine->saturated = 0;
241
242
243
244
245
246
247
248
249 if (!switch_to_kernel_context(engine))
250 return -EBUSY;
251
252 ENGINE_TRACE(engine, "\n");
253
254 call_idle_barriers(engine);
255
256 intel_engine_park_heartbeat(engine);
257 intel_engine_disarm_breadcrumbs(engine);
258 intel_engine_pool_park(&engine->pool);
259
260
261 GEM_BUG_ON(engine->execlists.queue_priority_hint != INT_MIN);
262
263 if (engine->park)
264 engine->park(engine);
265
266 if (engine->pinned_default_state) {
267 i915_gem_object_unpin_map(engine->default_state);
268 engine->pinned_default_state = NULL;
269 }
270
271 engine->execlists.no_priolist = false;
272
273
274 intel_gt_pm_put_async(engine->gt);
275 return 0;
276}
277
278static const struct intel_wakeref_ops wf_ops = {
279 .get = __engine_unpark,
280 .put = __engine_park,
281};
282
283void intel_engine_init__pm(struct intel_engine_cs *engine)
284{
285 struct intel_runtime_pm *rpm = engine->uncore->rpm;
286
287 intel_wakeref_init(&engine->wakeref, rpm, &wf_ops);
288 intel_engine_init_heartbeat(engine);
289}
290
291#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
292#include "selftest_engine_pm.c"
293#endif
294