1
2
3
4
5
6#include <linux/suspend.h>
7
8#include "i915_drv.h"
9#include "i915_globals.h"
10#include "i915_params.h"
11#include "intel_context.h"
12#include "intel_engine_pm.h"
13#include "intel_gt.h"
14#include "intel_gt_clock_utils.h"
15#include "intel_gt_pm.h"
16#include "intel_gt_requests.h"
17#include "intel_llc.h"
18#include "intel_pm.h"
19#include "intel_rc6.h"
20#include "intel_rps.h"
21#include "intel_wakeref.h"
22
23static void user_forcewake(struct intel_gt *gt, bool suspend)
24{
25 int count = atomic_read(>->user_wakeref);
26
27
28 if (likely(!count))
29 return;
30
31 intel_gt_pm_get(gt);
32 if (suspend) {
33 GEM_BUG_ON(count > atomic_read(>->wakeref.count));
34 atomic_sub(count, >->wakeref.count);
35 } else {
36 atomic_add(count, >->wakeref.count);
37 }
38 intel_gt_pm_put(gt);
39}
40
41static void runtime_begin(struct intel_gt *gt)
42{
43 local_irq_disable();
44 write_seqcount_begin(>->stats.lock);
45 gt->stats.start = ktime_get();
46 gt->stats.active = true;
47 write_seqcount_end(>->stats.lock);
48 local_irq_enable();
49}
50
51static void runtime_end(struct intel_gt *gt)
52{
53 local_irq_disable();
54 write_seqcount_begin(>->stats.lock);
55 gt->stats.active = false;
56 gt->stats.total =
57 ktime_add(gt->stats.total,
58 ktime_sub(ktime_get(), gt->stats.start));
59 write_seqcount_end(>->stats.lock);
60 local_irq_enable();
61}
62
63static int __gt_unpark(struct intel_wakeref *wf)
64{
65 struct intel_gt *gt = container_of(wf, typeof(*gt), wakeref);
66 struct drm_i915_private *i915 = gt->i915;
67
68 GT_TRACE(gt, "\n");
69
70 i915_globals_unpark();
71
72
73
74
75
76
77
78
79
80
81
82
83 gt->awake = intel_display_power_get(i915, POWER_DOMAIN_GT_IRQ);
84 GEM_BUG_ON(!gt->awake);
85
86 intel_rc6_unpark(>->rc6);
87 intel_rps_unpark(>->rps);
88 i915_pmu_gt_unparked(i915);
89
90 intel_gt_unpark_requests(gt);
91 runtime_begin(gt);
92
93 return 0;
94}
95
96static int __gt_park(struct intel_wakeref *wf)
97{
98 struct intel_gt *gt = container_of(wf, typeof(*gt), wakeref);
99 intel_wakeref_t wakeref = fetch_and_zero(>->awake);
100 struct drm_i915_private *i915 = gt->i915;
101
102 GT_TRACE(gt, "\n");
103
104 runtime_end(gt);
105 intel_gt_park_requests(gt);
106
107 i915_vma_parked(gt);
108 i915_pmu_gt_parked(i915);
109 intel_rps_park(>->rps);
110 intel_rc6_park(>->rc6);
111
112
113 intel_synchronize_irq(i915);
114
115
116 GEM_BUG_ON(!wakeref);
117 intel_display_power_put_async(i915, POWER_DOMAIN_GT_IRQ, wakeref);
118
119 i915_globals_park();
120
121 return 0;
122}
123
124static const struct intel_wakeref_ops wf_ops = {
125 .get = __gt_unpark,
126 .put = __gt_park,
127};
128
129void intel_gt_pm_init_early(struct intel_gt *gt)
130{
131 intel_wakeref_init(>->wakeref, gt->uncore->rpm, &wf_ops);
132 seqcount_mutex_init(>->stats.lock, >->wakeref.mutex);
133}
134
135void intel_gt_pm_init(struct intel_gt *gt)
136{
137
138
139
140
141
142 intel_rc6_init(>->rc6);
143 intel_rps_init(>->rps);
144}
145
146static bool reset_engines(struct intel_gt *gt)
147{
148 if (INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
149 return false;
150
151 return __intel_gt_reset(gt, ALL_ENGINES) == 0;
152}
153
154static void gt_sanitize(struct intel_gt *gt, bool force)
155{
156 struct intel_engine_cs *engine;
157 enum intel_engine_id id;
158 intel_wakeref_t wakeref;
159
160 GT_TRACE(gt, "force:%s", yesno(force));
161
162
163 wakeref = intel_runtime_pm_get(gt->uncore->rpm);
164 intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
165
166 intel_gt_check_clock_frequency(gt);
167
168
169
170
171
172
173
174 if (intel_gt_is_wedged(gt))
175 intel_gt_unset_wedged(gt);
176
177 intel_uc_sanitize(>->uc);
178
179 for_each_engine(engine, gt, id)
180 if (engine->reset.prepare)
181 engine->reset.prepare(engine);
182
183 intel_uc_reset_prepare(>->uc);
184
185 for_each_engine(engine, gt, id)
186 if (engine->sanitize)
187 engine->sanitize(engine);
188
189 if (reset_engines(gt) || force) {
190 for_each_engine(engine, gt, id)
191 __intel_engine_reset(engine, false);
192 }
193
194 for_each_engine(engine, gt, id)
195 if (engine->reset.finish)
196 engine->reset.finish(engine);
197
198 intel_rps_sanitize(>->rps);
199
200 intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
201 intel_runtime_pm_put(gt->uncore->rpm, wakeref);
202}
203
204void intel_gt_pm_fini(struct intel_gt *gt)
205{
206 intel_rc6_fini(>->rc6);
207}
208
209int intel_gt_resume(struct intel_gt *gt)
210{
211 struct intel_engine_cs *engine;
212 enum intel_engine_id id;
213 int err;
214
215 err = intel_gt_has_unrecoverable_error(gt);
216 if (err)
217 return err;
218
219 GT_TRACE(gt, "\n");
220
221
222
223
224
225
226
227 gt_sanitize(gt, true);
228
229 intel_gt_pm_get(gt);
230
231 intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
232 intel_rc6_sanitize(>->rc6);
233 if (intel_gt_is_wedged(gt)) {
234 err = -EIO;
235 goto out_fw;
236 }
237
238
239 err = intel_gt_init_hw(gt);
240 if (err) {
241 i915_probe_error(gt->i915,
242 "Failed to initialize GPU, declaring it wedged!\n");
243 goto err_wedged;
244 }
245
246 intel_rps_enable(>->rps);
247 intel_llc_enable(>->llc);
248
249 for_each_engine(engine, gt, id) {
250 intel_engine_pm_get(engine);
251
252 engine->serial++;
253 err = intel_engine_resume(engine);
254
255 intel_engine_pm_put(engine);
256 if (err) {
257 drm_err(>->i915->drm,
258 "Failed to restart %s (%d)\n",
259 engine->name, err);
260 goto err_wedged;
261 }
262 }
263
264 intel_rc6_enable(>->rc6);
265
266 intel_uc_resume(>->uc);
267
268 user_forcewake(gt, false);
269
270out_fw:
271 intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
272 intel_gt_pm_put(gt);
273 return err;
274
275err_wedged:
276 intel_gt_set_wedged(gt);
277 goto out_fw;
278}
279
280static void wait_for_suspend(struct intel_gt *gt)
281{
282 if (!intel_gt_pm_is_awake(gt))
283 return;
284
285 if (intel_gt_wait_for_idle(gt, I915_GEM_IDLE_TIMEOUT) == -ETIME) {
286
287
288
289
290 intel_gt_set_wedged(gt);
291 intel_gt_retire_requests(gt);
292 }
293
294 intel_gt_pm_wait_for_idle(gt);
295}
296
297void intel_gt_suspend_prepare(struct intel_gt *gt)
298{
299 user_forcewake(gt, true);
300 wait_for_suspend(gt);
301
302 intel_uc_suspend(>->uc);
303}
304
305static suspend_state_t pm_suspend_target(void)
306{
307#if IS_ENABLED(CONFIG_SUSPEND) && IS_ENABLED(CONFIG_PM_SLEEP)
308 return pm_suspend_target_state;
309#else
310 return PM_SUSPEND_TO_IDLE;
311#endif
312}
313
314void intel_gt_suspend_late(struct intel_gt *gt)
315{
316 intel_wakeref_t wakeref;
317
318
319 wait_for_suspend(gt);
320
321 if (is_mock_gt(gt))
322 return;
323
324 GEM_BUG_ON(gt->awake);
325
326
327
328
329
330
331
332
333
334
335
336 if (pm_suspend_target() == PM_SUSPEND_TO_IDLE)
337 return;
338
339 with_intel_runtime_pm(gt->uncore->rpm, wakeref) {
340 intel_rps_disable(>->rps);
341 intel_rc6_disable(>->rc6);
342 intel_llc_disable(>->llc);
343 }
344
345 gt_sanitize(gt, false);
346
347 GT_TRACE(gt, "\n");
348}
349
350void intel_gt_runtime_suspend(struct intel_gt *gt)
351{
352 intel_uc_runtime_suspend(>->uc);
353
354 GT_TRACE(gt, "\n");
355}
356
357int intel_gt_runtime_resume(struct intel_gt *gt)
358{
359 GT_TRACE(gt, "\n");
360 intel_gt_init_swizzling(gt);
361 intel_ggtt_restore_fences(gt->ggtt);
362
363 return intel_uc_runtime_resume(>->uc);
364}
365
366static ktime_t __intel_gt_get_awake_time(const struct intel_gt *gt)
367{
368 ktime_t total = gt->stats.total;
369
370 if (gt->stats.active)
371 total = ktime_add(total,
372 ktime_sub(ktime_get(), gt->stats.start));
373
374 return total;
375}
376
377ktime_t intel_gt_get_awake_time(const struct intel_gt *gt)
378{
379 unsigned int seq;
380 ktime_t total;
381
382 do {
383 seq = read_seqcount_begin(>->stats.lock);
384 total = __intel_gt_get_awake_time(gt);
385 } while (read_seqcount_retry(>->stats.lock, seq));
386
387 return total;
388}
389
390#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
391#include "selftest_gt_pm.c"
392#endif
393