1
2
3
4
5
6
7#include "gem/i915_gem_pm.h"
8#include "gt/intel_gt_pm.h"
9
10#include "i915_drv.h"
11#include "i915_globals.h"
12
13static void call_idle_barriers(struct intel_engine_cs *engine)
14{
15 struct llist_node *node, *next;
16
17 llist_for_each_safe(node, next, llist_del_all(&engine->barrier_tasks)) {
18 struct i915_active_request *active =
19 container_of((struct list_head *)node,
20 typeof(*active), link);
21
22 INIT_LIST_HEAD(&active->link);
23 RCU_INIT_POINTER(active->request, NULL);
24
25 active->retire(active, NULL);
26 }
27}
28
29static void i915_gem_park(struct drm_i915_private *i915)
30{
31 struct intel_engine_cs *engine;
32 enum intel_engine_id id;
33
34 lockdep_assert_held(&i915->drm.struct_mutex);
35
36 for_each_engine(engine, i915, id) {
37 call_idle_barriers(engine);
38 i915_gem_batch_pool_fini(&engine->batch_pool);
39 }
40
41 i915_timelines_park(i915);
42 i915_vma_parked(i915);
43
44 i915_globals_park();
45}
46
47static void idle_work_handler(struct work_struct *work)
48{
49 struct drm_i915_private *i915 =
50 container_of(work, typeof(*i915), gem.idle_work);
51 bool park;
52
53 cancel_delayed_work_sync(&i915->gem.retire_work);
54 mutex_lock(&i915->drm.struct_mutex);
55
56 intel_wakeref_lock(&i915->gt.wakeref);
57 park = !intel_wakeref_active(&i915->gt.wakeref) && !work_pending(work);
58 intel_wakeref_unlock(&i915->gt.wakeref);
59 if (park)
60 i915_gem_park(i915);
61 else
62 queue_delayed_work(i915->wq,
63 &i915->gem.retire_work,
64 round_jiffies_up_relative(HZ));
65
66 mutex_unlock(&i915->drm.struct_mutex);
67}
68
69static void retire_work_handler(struct work_struct *work)
70{
71 struct drm_i915_private *i915 =
72 container_of(work, typeof(*i915), gem.retire_work.work);
73
74
75 if (mutex_trylock(&i915->drm.struct_mutex)) {
76 i915_retire_requests(i915);
77 mutex_unlock(&i915->drm.struct_mutex);
78 }
79
80 queue_delayed_work(i915->wq,
81 &i915->gem.retire_work,
82 round_jiffies_up_relative(HZ));
83}
84
85static int pm_notifier(struct notifier_block *nb,
86 unsigned long action,
87 void *data)
88{
89 struct drm_i915_private *i915 =
90 container_of(nb, typeof(*i915), gem.pm_notifier);
91
92 switch (action) {
93 case INTEL_GT_UNPARK:
94 i915_globals_unpark();
95 queue_delayed_work(i915->wq,
96 &i915->gem.retire_work,
97 round_jiffies_up_relative(HZ));
98 break;
99
100 case INTEL_GT_PARK:
101 queue_work(i915->wq, &i915->gem.idle_work);
102 break;
103 }
104
105 return NOTIFY_OK;
106}
107
108static bool switch_to_kernel_context_sync(struct drm_i915_private *i915)
109{
110 bool result = !i915_terminally_wedged(i915);
111
112 do {
113 if (i915_gem_wait_for_idle(i915,
114 I915_WAIT_LOCKED |
115 I915_WAIT_FOR_IDLE_BOOST,
116 I915_GEM_IDLE_TIMEOUT) == -ETIME) {
117
118 if (i915_modparams.reset) {
119 dev_err(i915->drm.dev,
120 "Failed to idle engines, declaring wedged!\n");
121 GEM_TRACE_DUMP();
122 }
123
124
125
126
127
128 i915_gem_set_wedged(i915);
129 result = false;
130 }
131 } while (i915_retire_requests(i915) && result);
132
133 GEM_BUG_ON(i915->gt.awake);
134 return result;
135}
136
137bool i915_gem_load_power_context(struct drm_i915_private *i915)
138{
139 return switch_to_kernel_context_sync(i915);
140}
141
142void i915_gem_suspend(struct drm_i915_private *i915)
143{
144 GEM_TRACE("\n");
145
146 intel_wakeref_auto(&i915->ggtt.userfault_wakeref, 0);
147 flush_workqueue(i915->wq);
148
149 mutex_lock(&i915->drm.struct_mutex);
150
151
152
153
154
155
156
157
158
159
160 switch_to_kernel_context_sync(i915);
161
162 mutex_unlock(&i915->drm.struct_mutex);
163
164
165
166
167
168 GEM_BUG_ON(i915->gt.awake);
169 flush_work(&i915->gem.idle_work);
170
171 cancel_delayed_work_sync(&i915->gpu_error.hangcheck_work);
172
173 i915_gem_drain_freed_objects(i915);
174
175 intel_uc_suspend(i915);
176}
177
178static struct drm_i915_gem_object *first_mm_object(struct list_head *list)
179{
180 return list_first_entry_or_null(list,
181 struct drm_i915_gem_object,
182 mm.link);
183}
184
185void i915_gem_suspend_late(struct drm_i915_private *i915)
186{
187 struct drm_i915_gem_object *obj;
188 struct list_head *phases[] = {
189 &i915->mm.shrink_list,
190 &i915->mm.purge_list,
191 NULL
192 }, **phase;
193 unsigned long flags;
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215 spin_lock_irqsave(&i915->mm.obj_lock, flags);
216 for (phase = phases; *phase; phase++) {
217 LIST_HEAD(keep);
218
219 while ((obj = first_mm_object(*phase))) {
220 list_move_tail(&obj->mm.link, &keep);
221
222
223 if (!kref_get_unless_zero(&obj->base.refcount))
224 continue;
225
226 spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
227
228 i915_gem_object_lock(obj);
229 WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false));
230 i915_gem_object_unlock(obj);
231 i915_gem_object_put(obj);
232
233 spin_lock_irqsave(&i915->mm.obj_lock, flags);
234 }
235
236 list_splice_tail(&keep, *phase);
237 }
238 spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
239
240 intel_uc_sanitize(i915);
241 i915_gem_sanitize(i915);
242}
243
244void i915_gem_resume(struct drm_i915_private *i915)
245{
246 GEM_TRACE("\n");
247
248 WARN_ON(i915->gt.awake);
249
250 mutex_lock(&i915->drm.struct_mutex);
251 intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
252
253 i915_gem_restore_gtt_mappings(i915);
254 i915_gem_restore_fences(i915);
255
256 if (i915_gem_init_hw(i915))
257 goto err_wedged;
258
259
260
261
262
263
264 if (intel_gt_resume(i915))
265 goto err_wedged;
266
267 intel_uc_resume(i915);
268
269
270 if (!i915_gem_load_power_context(i915))
271 goto err_wedged;
272
273out_unlock:
274 intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
275 mutex_unlock(&i915->drm.struct_mutex);
276 return;
277
278err_wedged:
279 if (!i915_reset_failed(i915)) {
280 dev_err(i915->drm.dev,
281 "Failed to re-initialize GPU, declaring it wedged!\n");
282 i915_gem_set_wedged(i915);
283 }
284 goto out_unlock;
285}
286
287void i915_gem_init__pm(struct drm_i915_private *i915)
288{
289 INIT_WORK(&i915->gem.idle_work, idle_work_handler);
290 INIT_DELAYED_WORK(&i915->gem.retire_work, retire_work_handler);
291
292 i915->gem.pm_notifier.notifier_call = pm_notifier;
293 blocking_notifier_chain_register(&i915->gt.pm_notifications,
294 &i915->gem.pm_notifier);
295}
296