1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#include <linux/kthread.h>
26#include <uapi/linux/sched/types.h>
27
28#include "i915_drv.h"
29
30static void intel_breadcrumbs_hangcheck(unsigned long data)
31{
32 struct intel_engine_cs *engine = (struct intel_engine_cs *)data;
33 struct intel_breadcrumbs *b = &engine->breadcrumbs;
34
35 if (!b->irq_enabled)
36 return;
37
38 if (time_before(jiffies, b->timeout)) {
39 mod_timer(&b->hangcheck, b->timeout);
40 return;
41 }
42
43 DRM_DEBUG("Hangcheck timer elapsed... %s idle\n", engine->name);
44 set_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings);
45 mod_timer(&engine->breadcrumbs.fake_irq, jiffies + 1);
46
47
48
49
50
51
52
53
54
55
56 i915_queue_hangcheck(engine->i915);
57}
58
59static unsigned long wait_timeout(void)
60{
61 return round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES);
62}
63
64static void intel_breadcrumbs_fake_irq(unsigned long data)
65{
66 struct intel_engine_cs *engine = (struct intel_engine_cs *)data;
67
68
69
70
71
72
73
74
75 if (intel_engine_wakeup(engine))
76 mod_timer(&engine->breadcrumbs.fake_irq, jiffies + 1);
77}
78
79static void irq_enable(struct intel_engine_cs *engine)
80{
81
82
83
84
85 engine->breadcrumbs.irq_posted = true;
86
87
88 spin_lock(&engine->i915->irq_lock);
89 engine->irq_enable(engine);
90 spin_unlock(&engine->i915->irq_lock);
91}
92
93static void irq_disable(struct intel_engine_cs *engine)
94{
95
96 spin_lock(&engine->i915->irq_lock);
97 engine->irq_disable(engine);
98 spin_unlock(&engine->i915->irq_lock);
99
100 engine->breadcrumbs.irq_posted = false;
101}
102
103static void __intel_breadcrumbs_enable_irq(struct intel_breadcrumbs *b)
104{
105 struct intel_engine_cs *engine =
106 container_of(b, struct intel_engine_cs, breadcrumbs);
107 struct drm_i915_private *i915 = engine->i915;
108
109 assert_spin_locked(&b->lock);
110 if (b->rpm_wakelock)
111 return;
112
113
114
115
116
117
118 intel_runtime_pm_get_noresume(i915);
119 b->rpm_wakelock = true;
120
121
122 if (intel_irqs_enabled(i915)) {
123 if (!test_bit(engine->id, &i915->gpu_error.test_irq_rings))
124 irq_enable(engine);
125 b->irq_enabled = true;
126 }
127
128 if (!b->irq_enabled ||
129 test_bit(engine->id, &i915->gpu_error.missed_irq_rings)) {
130 mod_timer(&b->fake_irq, jiffies + 1);
131 } else {
132
133 GEM_BUG_ON(!time_after(b->timeout, jiffies));
134 mod_timer(&b->hangcheck, b->timeout);
135 }
136}
137
138static void __intel_breadcrumbs_disable_irq(struct intel_breadcrumbs *b)
139{
140 struct intel_engine_cs *engine =
141 container_of(b, struct intel_engine_cs, breadcrumbs);
142
143 assert_spin_locked(&b->lock);
144 if (!b->rpm_wakelock)
145 return;
146
147 if (b->irq_enabled) {
148 irq_disable(engine);
149 b->irq_enabled = false;
150 }
151
152 intel_runtime_pm_put(engine->i915);
153 b->rpm_wakelock = false;
154}
155
156static inline struct intel_wait *to_wait(struct rb_node *node)
157{
158 return rb_entry(node, struct intel_wait, node);
159}
160
161static inline void __intel_breadcrumbs_finish(struct intel_breadcrumbs *b,
162 struct intel_wait *wait)
163{
164 assert_spin_locked(&b->lock);
165
166
167
168
169 rb_erase(&wait->node, &b->waiters);
170 RB_CLEAR_NODE(&wait->node);
171
172 wake_up_process(wait->tsk);
173}
174
175static bool __intel_engine_add_wait(struct intel_engine_cs *engine,
176 struct intel_wait *wait)
177{
178 struct intel_breadcrumbs *b = &engine->breadcrumbs;
179 struct rb_node **p, *parent, *completed;
180 bool first;
181 u32 seqno;
182
183
184
185
186
187
188
189
190
191
192
193
194
195 first = true;
196 parent = NULL;
197 completed = NULL;
198 seqno = intel_engine_get_seqno(engine);
199
200
201
202
203
204
205 if (i915_seqno_passed(seqno, wait->seqno)) {
206 RB_CLEAR_NODE(&wait->node);
207 return first;
208 }
209
210 p = &b->waiters.rb_node;
211 while (*p) {
212 parent = *p;
213 if (wait->seqno == to_wait(parent)->seqno) {
214
215
216
217
218
219 if (wait->tsk->prio > to_wait(parent)->tsk->prio) {
220 p = &parent->rb_right;
221 first = false;
222 } else {
223 p = &parent->rb_left;
224 }
225 } else if (i915_seqno_passed(wait->seqno,
226 to_wait(parent)->seqno)) {
227 p = &parent->rb_right;
228 if (i915_seqno_passed(seqno, to_wait(parent)->seqno))
229 completed = parent;
230 else
231 first = false;
232 } else {
233 p = &parent->rb_left;
234 }
235 }
236 rb_link_node(&wait->node, parent, p);
237 rb_insert_color(&wait->node, &b->waiters);
238 GEM_BUG_ON(!first && !rcu_access_pointer(b->irq_seqno_bh));
239
240 if (completed) {
241 struct rb_node *next = rb_next(completed);
242
243 GEM_BUG_ON(!next && !first);
244 if (next && next != &wait->node) {
245 GEM_BUG_ON(first);
246 b->timeout = wait_timeout();
247 b->first_wait = to_wait(next);
248 rcu_assign_pointer(b->irq_seqno_bh, b->first_wait->tsk);
249
250
251
252
253
254
255
256
257
258
259 __intel_breadcrumbs_enable_irq(b);
260 if (READ_ONCE(b->irq_posted))
261 wake_up_process(to_wait(next)->tsk);
262 }
263
264 do {
265 struct intel_wait *crumb = to_wait(completed);
266 completed = rb_prev(completed);
267 __intel_breadcrumbs_finish(b, crumb);
268 } while (completed);
269 }
270
271 if (first) {
272 GEM_BUG_ON(rb_first(&b->waiters) != &wait->node);
273 b->timeout = wait_timeout();
274 b->first_wait = wait;
275 rcu_assign_pointer(b->irq_seqno_bh, wait->tsk);
276
277
278
279
280
281
282
283
284 __intel_breadcrumbs_enable_irq(b);
285 }
286 GEM_BUG_ON(!rcu_access_pointer(b->irq_seqno_bh));
287 GEM_BUG_ON(!b->first_wait);
288 GEM_BUG_ON(rb_first(&b->waiters) != &b->first_wait->node);
289
290 return first;
291}
292
293bool intel_engine_add_wait(struct intel_engine_cs *engine,
294 struct intel_wait *wait)
295{
296 struct intel_breadcrumbs *b = &engine->breadcrumbs;
297 bool first;
298
299 spin_lock_irq(&b->lock);
300 first = __intel_engine_add_wait(engine, wait);
301 spin_unlock_irq(&b->lock);
302
303 return first;
304}
305
306static inline bool chain_wakeup(struct rb_node *rb, int priority)
307{
308 return rb && to_wait(rb)->tsk->prio <= priority;
309}
310
311static inline int wakeup_priority(struct intel_breadcrumbs *b,
312 struct task_struct *tsk)
313{
314 if (tsk == b->signaler)
315 return INT_MIN;
316 else
317 return tsk->prio;
318}
319
320void intel_engine_remove_wait(struct intel_engine_cs *engine,
321 struct intel_wait *wait)
322{
323 struct intel_breadcrumbs *b = &engine->breadcrumbs;
324
325
326
327
328
329 if (RB_EMPTY_NODE(&wait->node))
330 return;
331
332 spin_lock_irq(&b->lock);
333
334 if (RB_EMPTY_NODE(&wait->node))
335 goto out_unlock;
336
337 if (b->first_wait == wait) {
338 const int priority = wakeup_priority(b, wait->tsk);
339 struct rb_node *next;
340
341 GEM_BUG_ON(rcu_access_pointer(b->irq_seqno_bh) != wait->tsk);
342
343
344
345
346
347
348
349
350 next = rb_next(&wait->node);
351 if (chain_wakeup(next, priority)) {
352
353
354
355
356
357
358
359
360
361
362
363 u32 seqno = intel_engine_get_seqno(engine);
364
365 while (i915_seqno_passed(seqno, to_wait(next)->seqno)) {
366 struct rb_node *n = rb_next(next);
367
368 __intel_breadcrumbs_finish(b, to_wait(next));
369 next = n;
370 if (!chain_wakeup(next, priority))
371 break;
372 }
373 }
374
375 if (next) {
376
377
378
379
380
381
382
383 b->timeout = wait_timeout();
384 b->first_wait = to_wait(next);
385 rcu_assign_pointer(b->irq_seqno_bh, b->first_wait->tsk);
386 if (b->first_wait->seqno != wait->seqno)
387 __intel_breadcrumbs_enable_irq(b);
388 wake_up_process(b->first_wait->tsk);
389 } else {
390 b->first_wait = NULL;
391 rcu_assign_pointer(b->irq_seqno_bh, NULL);
392 __intel_breadcrumbs_disable_irq(b);
393 }
394 } else {
395 GEM_BUG_ON(rb_first(&b->waiters) == &wait->node);
396 }
397
398 GEM_BUG_ON(RB_EMPTY_NODE(&wait->node));
399 rb_erase(&wait->node, &b->waiters);
400
401out_unlock:
402 GEM_BUG_ON(b->first_wait == wait);
403 GEM_BUG_ON(rb_first(&b->waiters) !=
404 (b->first_wait ? &b->first_wait->node : NULL));
405 GEM_BUG_ON(!rcu_access_pointer(b->irq_seqno_bh) ^ RB_EMPTY_ROOT(&b->waiters));
406 spin_unlock_irq(&b->lock);
407}
408
409static bool signal_complete(struct drm_i915_gem_request *request)
410{
411 if (!request)
412 return false;
413
414
415
416
417 if (intel_wait_complete(&request->signaling.wait))
418 return true;
419
420
421
422
423 if (__i915_request_irq_complete(request))
424 return true;
425
426 return false;
427}
428
429static struct drm_i915_gem_request *to_signaler(struct rb_node *rb)
430{
431 return rb_entry(rb, struct drm_i915_gem_request, signaling.node);
432}
433
434static void signaler_set_rtpriority(void)
435{
436 struct sched_param param = { .sched_priority = 1 };
437
438 sched_setscheduler_nocheck(current, SCHED_FIFO, ¶m);
439}
440
441static int intel_breadcrumbs_signaler(void *arg)
442{
443 struct intel_engine_cs *engine = arg;
444 struct intel_breadcrumbs *b = &engine->breadcrumbs;
445 struct drm_i915_gem_request *request;
446
447
448 signaler_set_rtpriority();
449
450 do {
451 set_current_state(TASK_INTERRUPTIBLE);
452
453
454
455
456
457
458
459
460
461 request = READ_ONCE(b->first_signal);
462 if (signal_complete(request)) {
463
464
465
466 intel_engine_remove_wait(engine,
467 &request->signaling.wait);
468
469 local_bh_disable();
470 dma_fence_signal(&request->fence);
471 local_bh_enable();
472
473
474
475
476
477
478
479 spin_lock_irq(&b->lock);
480 if (request == b->first_signal) {
481 struct rb_node *rb =
482 rb_next(&request->signaling.node);
483 b->first_signal = rb ? to_signaler(rb) : NULL;
484 }
485 rb_erase(&request->signaling.node, &b->signals);
486 spin_unlock_irq(&b->lock);
487
488 i915_gem_request_put(request);
489 } else {
490 if (kthread_should_stop())
491 break;
492
493 schedule();
494 }
495 } while (1);
496 __set_current_state(TASK_RUNNING);
497
498 return 0;
499}
500
501void intel_engine_enable_signaling(struct drm_i915_gem_request *request)
502{
503 struct intel_engine_cs *engine = request->engine;
504 struct intel_breadcrumbs *b = &engine->breadcrumbs;
505 struct rb_node *parent, **p;
506 bool first, wakeup;
507
508
509
510
511
512
513
514
515
516 assert_spin_locked(&request->lock);
517 if (!request->global_seqno)
518 return;
519
520 request->signaling.wait.tsk = b->signaler;
521 request->signaling.wait.seqno = request->global_seqno;
522 i915_gem_request_get(request);
523
524 spin_lock(&b->lock);
525
526
527
528
529
530
531
532
533
534 wakeup = __intel_engine_add_wait(engine, &request->signaling.wait);
535
536
537
538
539
540 parent = NULL;
541 first = true;
542 p = &b->signals.rb_node;
543 while (*p) {
544 parent = *p;
545 if (i915_seqno_passed(request->global_seqno,
546 to_signaler(parent)->global_seqno)) {
547 p = &parent->rb_right;
548 first = false;
549 } else {
550 p = &parent->rb_left;
551 }
552 }
553 rb_link_node(&request->signaling.node, parent, p);
554 rb_insert_color(&request->signaling.node, &b->signals);
555 if (first)
556 smp_store_mb(b->first_signal, request);
557
558 spin_unlock(&b->lock);
559
560 if (wakeup)
561 wake_up_process(b->signaler);
562}
563
564int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine)
565{
566 struct intel_breadcrumbs *b = &engine->breadcrumbs;
567 struct task_struct *tsk;
568
569 spin_lock_init(&b->lock);
570 setup_timer(&b->fake_irq,
571 intel_breadcrumbs_fake_irq,
572 (unsigned long)engine);
573 setup_timer(&b->hangcheck,
574 intel_breadcrumbs_hangcheck,
575 (unsigned long)engine);
576
577
578
579
580
581
582
583 tsk = kthread_run(intel_breadcrumbs_signaler, engine,
584 "i915/signal:%d", engine->id);
585 if (IS_ERR(tsk))
586 return PTR_ERR(tsk);
587
588 b->signaler = tsk;
589
590 return 0;
591}
592
593static void cancel_fake_irq(struct intel_engine_cs *engine)
594{
595 struct intel_breadcrumbs *b = &engine->breadcrumbs;
596
597 del_timer_sync(&b->hangcheck);
598 del_timer_sync(&b->fake_irq);
599 clear_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings);
600}
601
602void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine)
603{
604 struct intel_breadcrumbs *b = &engine->breadcrumbs;
605
606 cancel_fake_irq(engine);
607 spin_lock_irq(&b->lock);
608
609 __intel_breadcrumbs_disable_irq(b);
610 if (intel_engine_has_waiter(engine)) {
611 b->timeout = wait_timeout();
612 __intel_breadcrumbs_enable_irq(b);
613 if (READ_ONCE(b->irq_posted))
614 wake_up_process(b->first_wait->tsk);
615 } else {
616
617 irq_disable(engine);
618 }
619
620 spin_unlock_irq(&b->lock);
621}
622
623void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine)
624{
625 struct intel_breadcrumbs *b = &engine->breadcrumbs;
626
627
628 WARN_ON(READ_ONCE(b->first_wait));
629 WARN_ON(!RB_EMPTY_ROOT(&b->waiters));
630 WARN_ON(READ_ONCE(b->first_signal));
631 WARN_ON(!RB_EMPTY_ROOT(&b->signals));
632
633 if (!IS_ERR_OR_NULL(b->signaler))
634 kthread_stop(b->signaler);
635
636 cancel_fake_irq(engine);
637}
638
639unsigned int intel_breadcrumbs_busy(struct drm_i915_private *i915)
640{
641 struct intel_engine_cs *engine;
642 enum intel_engine_id id;
643 unsigned int mask = 0;
644
645 for_each_engine(engine, i915, id) {
646 struct intel_breadcrumbs *b = &engine->breadcrumbs;
647
648 spin_lock_irq(&b->lock);
649
650 if (b->first_wait) {
651 wake_up_process(b->first_wait->tsk);
652 mask |= intel_engine_flag(engine);
653 }
654
655 if (b->first_signal) {
656 wake_up_process(b->signaler);
657 mask |= intel_engine_flag(engine);
658 }
659
660 spin_unlock_irq(&b->lock);
661 }
662
663 return mask;
664}
665