1
2
3
4#include <linux/delay.h>
5#include <linux/interrupt.h>
6#include <linux/io.h>
7#include <linux/platform_device.h>
8#include <linux/pm_runtime.h>
9#include <linux/dma-resv.h>
10#include <drm/gpu_scheduler.h>
11#include <drm/panfrost_drm.h>
12
13#include "panfrost_device.h"
14#include "panfrost_devfreq.h"
15#include "panfrost_job.h"
16#include "panfrost_features.h"
17#include "panfrost_issues.h"
18#include "panfrost_gem.h"
19#include "panfrost_regs.h"
20#include "panfrost_gpu.h"
21#include "panfrost_mmu.h"
22
23#define JOB_TIMEOUT_MS 500
24
25#define job_write(dev, reg, data) writel(data, dev->iomem + (reg))
26#define job_read(dev, reg) readl(dev->iomem + (reg))
27
28enum panfrost_queue_status {
29 PANFROST_QUEUE_STATUS_ACTIVE,
30 PANFROST_QUEUE_STATUS_STOPPED,
31 PANFROST_QUEUE_STATUS_STARTING,
32 PANFROST_QUEUE_STATUS_FAULT_PENDING,
33};
34
35struct panfrost_queue_state {
36 struct drm_gpu_scheduler sched;
37 atomic_t status;
38 struct mutex lock;
39 u64 fence_context;
40 u64 emit_seqno;
41};
42
43struct panfrost_job_slot {
44 struct panfrost_queue_state queue[NUM_JOB_SLOTS];
45 spinlock_t job_lock;
46};
47
48static struct panfrost_job *
49to_panfrost_job(struct drm_sched_job *sched_job)
50{
51 return container_of(sched_job, struct panfrost_job, base);
52}
53
54struct panfrost_fence {
55 struct dma_fence base;
56 struct drm_device *dev;
57
58 u64 seqno;
59 int queue;
60};
61
62static inline struct panfrost_fence *
63to_panfrost_fence(struct dma_fence *fence)
64{
65 return (struct panfrost_fence *)fence;
66}
67
68static const char *panfrost_fence_get_driver_name(struct dma_fence *fence)
69{
70 return "panfrost";
71}
72
73static const char *panfrost_fence_get_timeline_name(struct dma_fence *fence)
74{
75 struct panfrost_fence *f = to_panfrost_fence(fence);
76
77 switch (f->queue) {
78 case 0:
79 return "panfrost-js-0";
80 case 1:
81 return "panfrost-js-1";
82 case 2:
83 return "panfrost-js-2";
84 default:
85 return NULL;
86 }
87}
88
89static const struct dma_fence_ops panfrost_fence_ops = {
90 .get_driver_name = panfrost_fence_get_driver_name,
91 .get_timeline_name = panfrost_fence_get_timeline_name,
92};
93
94static struct dma_fence *panfrost_fence_create(struct panfrost_device *pfdev, int js_num)
95{
96 struct panfrost_fence *fence;
97 struct panfrost_job_slot *js = pfdev->js;
98
99 fence = kzalloc(sizeof(*fence), GFP_KERNEL);
100 if (!fence)
101 return ERR_PTR(-ENOMEM);
102
103 fence->dev = pfdev->ddev;
104 fence->queue = js_num;
105 fence->seqno = ++js->queue[js_num].emit_seqno;
106 dma_fence_init(&fence->base, &panfrost_fence_ops, &js->job_lock,
107 js->queue[js_num].fence_context, fence->seqno);
108
109 return &fence->base;
110}
111
112static int panfrost_job_get_slot(struct panfrost_job *job)
113{
114
115
116
117
118 if (job->requirements & PANFROST_JD_REQ_FS)
119 return 0;
120
121
122#if 0
123 if (job->requirements & PANFROST_JD_REQ_ONLY_COMPUTE) {
124 if ((job->requirements & PANFROST_JD_REQ_CORE_GRP_MASK) &&
125 (job->pfdev->features.nr_core_groups == 2))
126 return 2;
127 if (panfrost_has_hw_issue(job->pfdev, HW_ISSUE_8987))
128 return 2;
129 }
130#endif
131 return 1;
132}
133
134static void panfrost_job_write_affinity(struct panfrost_device *pfdev,
135 u32 requirements,
136 int js)
137{
138 u64 affinity;
139
140
141
142
143
144
145 affinity = pfdev->features.shader_present;
146
147 job_write(pfdev, JS_AFFINITY_NEXT_LO(js), affinity & 0xFFFFFFFF);
148 job_write(pfdev, JS_AFFINITY_NEXT_HI(js), affinity >> 32);
149}
150
151static void panfrost_job_hw_submit(struct panfrost_job *job, int js)
152{
153 struct panfrost_device *pfdev = job->pfdev;
154 u32 cfg;
155 u64 jc_head = job->jc;
156 int ret;
157
158 panfrost_devfreq_record_busy(&pfdev->pfdevfreq);
159
160 ret = pm_runtime_get_sync(pfdev->dev);
161 if (ret < 0)
162 return;
163
164 if (WARN_ON(job_read(pfdev, JS_COMMAND_NEXT(js)))) {
165 return;
166 }
167
168 cfg = panfrost_mmu_as_get(pfdev, &job->file_priv->mmu);
169
170 job_write(pfdev, JS_HEAD_NEXT_LO(js), jc_head & 0xFFFFFFFF);
171 job_write(pfdev, JS_HEAD_NEXT_HI(js), jc_head >> 32);
172
173 panfrost_job_write_affinity(pfdev, job->requirements, js);
174
175
176
177 cfg |= JS_CONFIG_THREAD_PRI(8) |
178 JS_CONFIG_START_FLUSH_CLEAN_INVALIDATE |
179 JS_CONFIG_END_FLUSH_CLEAN_INVALIDATE;
180
181 if (panfrost_has_hw_feature(pfdev, HW_FEATURE_FLUSH_REDUCTION))
182 cfg |= JS_CONFIG_ENABLE_FLUSH_REDUCTION;
183
184 if (panfrost_has_hw_issue(pfdev, HW_ISSUE_10649))
185 cfg |= JS_CONFIG_START_MMU;
186
187 job_write(pfdev, JS_CONFIG_NEXT(js), cfg);
188
189 if (panfrost_has_hw_feature(pfdev, HW_FEATURE_FLUSH_REDUCTION))
190 job_write(pfdev, JS_FLUSH_ID_NEXT(js), job->flush_id);
191
192
193 dev_dbg(pfdev->dev, "JS: Submitting atom %p to js[%d] with head=0x%llx",
194 job, js, jc_head);
195
196 job_write(pfdev, JS_COMMAND_NEXT(js), JS_COMMAND_START);
197}
198
199static void panfrost_acquire_object_fences(struct drm_gem_object **bos,
200 int bo_count,
201 struct dma_fence **implicit_fences)
202{
203 int i;
204
205 for (i = 0; i < bo_count; i++)
206 implicit_fences[i] = dma_resv_get_excl_rcu(bos[i]->resv);
207}
208
209static void panfrost_attach_object_fences(struct drm_gem_object **bos,
210 int bo_count,
211 struct dma_fence *fence)
212{
213 int i;
214
215 for (i = 0; i < bo_count; i++)
216 dma_resv_add_excl_fence(bos[i]->resv, fence);
217}
218
219int panfrost_job_push(struct panfrost_job *job)
220{
221 struct panfrost_device *pfdev = job->pfdev;
222 int slot = panfrost_job_get_slot(job);
223 struct drm_sched_entity *entity = &job->file_priv->sched_entity[slot];
224 struct ww_acquire_ctx acquire_ctx;
225 int ret = 0;
226
227 mutex_lock(&pfdev->sched_lock);
228
229 ret = drm_gem_lock_reservations(job->bos, job->bo_count,
230 &acquire_ctx);
231 if (ret) {
232 mutex_unlock(&pfdev->sched_lock);
233 return ret;
234 }
235
236 ret = drm_sched_job_init(&job->base, entity, NULL);
237 if (ret) {
238 mutex_unlock(&pfdev->sched_lock);
239 goto unlock;
240 }
241
242 job->render_done_fence = dma_fence_get(&job->base.s_fence->finished);
243
244 kref_get(&job->refcount);
245
246 panfrost_acquire_object_fences(job->bos, job->bo_count,
247 job->implicit_fences);
248
249 drm_sched_entity_push_job(&job->base, entity);
250
251 mutex_unlock(&pfdev->sched_lock);
252
253 panfrost_attach_object_fences(job->bos, job->bo_count,
254 job->render_done_fence);
255
256unlock:
257 drm_gem_unlock_reservations(job->bos, job->bo_count, &acquire_ctx);
258
259 return ret;
260}
261
262static void panfrost_job_cleanup(struct kref *ref)
263{
264 struct panfrost_job *job = container_of(ref, struct panfrost_job,
265 refcount);
266 unsigned int i;
267
268 if (job->in_fences) {
269 for (i = 0; i < job->in_fence_count; i++)
270 dma_fence_put(job->in_fences[i]);
271 kvfree(job->in_fences);
272 }
273 if (job->implicit_fences) {
274 for (i = 0; i < job->bo_count; i++)
275 dma_fence_put(job->implicit_fences[i]);
276 kvfree(job->implicit_fences);
277 }
278 dma_fence_put(job->done_fence);
279 dma_fence_put(job->render_done_fence);
280
281 if (job->mappings) {
282 for (i = 0; i < job->bo_count; i++) {
283 if (!job->mappings[i])
284 break;
285
286 atomic_dec(&job->mappings[i]->obj->gpu_usecount);
287 panfrost_gem_mapping_put(job->mappings[i]);
288 }
289 kvfree(job->mappings);
290 }
291
292 if (job->bos) {
293 for (i = 0; i < job->bo_count; i++)
294 drm_gem_object_put(job->bos[i]);
295
296 kvfree(job->bos);
297 }
298
299 kfree(job);
300}
301
302void panfrost_job_put(struct panfrost_job *job)
303{
304 kref_put(&job->refcount, panfrost_job_cleanup);
305}
306
307static void panfrost_job_free(struct drm_sched_job *sched_job)
308{
309 struct panfrost_job *job = to_panfrost_job(sched_job);
310
311 drm_sched_job_cleanup(sched_job);
312
313 panfrost_job_put(job);
314}
315
316static struct dma_fence *panfrost_job_dependency(struct drm_sched_job *sched_job,
317 struct drm_sched_entity *s_entity)
318{
319 struct panfrost_job *job = to_panfrost_job(sched_job);
320 struct dma_fence *fence;
321 unsigned int i;
322
323
324 for (i = 0; i < job->in_fence_count; i++) {
325 if (job->in_fences[i]) {
326 fence = job->in_fences[i];
327 job->in_fences[i] = NULL;
328 return fence;
329 }
330 }
331
332
333 for (i = 0; i < job->bo_count; i++) {
334 if (job->implicit_fences[i]) {
335 fence = job->implicit_fences[i];
336 job->implicit_fences[i] = NULL;
337 return fence;
338 }
339 }
340
341 return NULL;
342}
343
344static struct dma_fence *panfrost_job_run(struct drm_sched_job *sched_job)
345{
346 struct panfrost_job *job = to_panfrost_job(sched_job);
347 struct panfrost_device *pfdev = job->pfdev;
348 int slot = panfrost_job_get_slot(job);
349 struct dma_fence *fence = NULL;
350
351 if (unlikely(job->base.s_fence->finished.error))
352 return NULL;
353
354 pfdev->jobs[slot] = job;
355
356 fence = panfrost_fence_create(pfdev, slot);
357 if (IS_ERR(fence))
358 return NULL;
359
360 if (job->done_fence)
361 dma_fence_put(job->done_fence);
362 job->done_fence = dma_fence_get(fence);
363
364 panfrost_job_hw_submit(job, slot);
365
366 return fence;
367}
368
369void panfrost_job_enable_interrupts(struct panfrost_device *pfdev)
370{
371 int j;
372 u32 irq_mask = 0;
373
374 for (j = 0; j < NUM_JOB_SLOTS; j++) {
375 irq_mask |= MK_JS_MASK(j);
376 }
377
378 job_write(pfdev, JOB_INT_CLEAR, irq_mask);
379 job_write(pfdev, JOB_INT_MASK, irq_mask);
380}
381
382static bool panfrost_scheduler_stop(struct panfrost_queue_state *queue,
383 struct drm_sched_job *bad)
384{
385 enum panfrost_queue_status old_status;
386 bool stopped = false;
387
388 mutex_lock(&queue->lock);
389 old_status = atomic_xchg(&queue->status,
390 PANFROST_QUEUE_STATUS_STOPPED);
391 if (old_status == PANFROST_QUEUE_STATUS_STOPPED)
392 goto out;
393
394 WARN_ON(old_status != PANFROST_QUEUE_STATUS_ACTIVE);
395 drm_sched_stop(&queue->sched, bad);
396 if (bad)
397 drm_sched_increase_karma(bad);
398
399 stopped = true;
400
401
402
403
404
405
406 queue->sched.timeout = MAX_SCHEDULE_TIMEOUT;
407
408out:
409 mutex_unlock(&queue->lock);
410
411 return stopped;
412}
413
414static void panfrost_scheduler_start(struct panfrost_queue_state *queue)
415{
416 enum panfrost_queue_status old_status;
417
418 mutex_lock(&queue->lock);
419 old_status = atomic_xchg(&queue->status,
420 PANFROST_QUEUE_STATUS_STARTING);
421 WARN_ON(old_status != PANFROST_QUEUE_STATUS_STOPPED);
422
423
424 queue->sched.timeout = msecs_to_jiffies(JOB_TIMEOUT_MS);
425 drm_sched_resubmit_jobs(&queue->sched);
426 drm_sched_start(&queue->sched, true);
427 old_status = atomic_xchg(&queue->status,
428 PANFROST_QUEUE_STATUS_ACTIVE);
429 if (old_status == PANFROST_QUEUE_STATUS_FAULT_PENDING)
430 drm_sched_fault(&queue->sched);
431
432 mutex_unlock(&queue->lock);
433}
434
435static enum drm_gpu_sched_stat panfrost_job_timedout(struct drm_sched_job
436 *sched_job)
437{
438 struct panfrost_job *job = to_panfrost_job(sched_job);
439 struct panfrost_device *pfdev = job->pfdev;
440 int js = panfrost_job_get_slot(job);
441
442
443
444
445
446 if (dma_fence_is_signaled(job->done_fence))
447 return DRM_GPU_SCHED_STAT_NOMINAL;
448
449 dev_err(pfdev->dev, "gpu sched timeout, js=%d, config=0x%x, status=0x%x, head=0x%x, tail=0x%x, sched_job=%p",
450 js,
451 job_read(pfdev, JS_CONFIG(js)),
452 job_read(pfdev, JS_STATUS(js)),
453 job_read(pfdev, JS_HEAD_LO(js)),
454 job_read(pfdev, JS_TAIL_LO(js)),
455 sched_job);
456
457
458 if (!panfrost_scheduler_stop(&pfdev->js->queue[js], sched_job))
459 return DRM_GPU_SCHED_STAT_NOMINAL;
460
461
462 if (!atomic_xchg(&pfdev->reset.pending, 1))
463 schedule_work(&pfdev->reset.work);
464
465 return DRM_GPU_SCHED_STAT_NOMINAL;
466}
467
468static const struct drm_sched_backend_ops panfrost_sched_ops = {
469 .dependency = panfrost_job_dependency,
470 .run_job = panfrost_job_run,
471 .timedout_job = panfrost_job_timedout,
472 .free_job = panfrost_job_free
473};
474
475static irqreturn_t panfrost_job_irq_handler(int irq, void *data)
476{
477 struct panfrost_device *pfdev = data;
478 u32 status = job_read(pfdev, JOB_INT_STAT);
479 int j;
480
481 dev_dbg(pfdev->dev, "jobslot irq status=%x\n", status);
482
483 if (!status)
484 return IRQ_NONE;
485
486 pm_runtime_mark_last_busy(pfdev->dev);
487
488 for (j = 0; status; j++) {
489 u32 mask = MK_JS_MASK(j);
490
491 if (!(status & mask))
492 continue;
493
494 job_write(pfdev, JOB_INT_CLEAR, mask);
495
496 if (status & JOB_INT_MASK_ERR(j)) {
497 enum panfrost_queue_status old_status;
498
499 job_write(pfdev, JS_COMMAND_NEXT(j), JS_COMMAND_NOP);
500
501 dev_err(pfdev->dev, "js fault, js=%d, status=%s, head=0x%x, tail=0x%x",
502 j,
503 panfrost_exception_name(pfdev, job_read(pfdev, JS_STATUS(j))),
504 job_read(pfdev, JS_HEAD_LO(j)),
505 job_read(pfdev, JS_TAIL_LO(j)));
506
507
508
509
510
511
512
513
514 old_status = atomic_cmpxchg(&pfdev->js->queue[j].status,
515 PANFROST_QUEUE_STATUS_STARTING,
516 PANFROST_QUEUE_STATUS_FAULT_PENDING);
517 if (old_status == PANFROST_QUEUE_STATUS_ACTIVE)
518 drm_sched_fault(&pfdev->js->queue[j].sched);
519 }
520
521 if (status & JOB_INT_MASK_DONE(j)) {
522 struct panfrost_job *job;
523
524 spin_lock(&pfdev->js->job_lock);
525 job = pfdev->jobs[j];
526
527 if (job) {
528 pfdev->jobs[j] = NULL;
529
530 panfrost_mmu_as_put(pfdev, &job->file_priv->mmu);
531 panfrost_devfreq_record_idle(&pfdev->pfdevfreq);
532
533 dma_fence_signal_locked(job->done_fence);
534 pm_runtime_put_autosuspend(pfdev->dev);
535 }
536 spin_unlock(&pfdev->js->job_lock);
537 }
538
539 status &= ~mask;
540 }
541
542 return IRQ_HANDLED;
543}
544
545static void panfrost_reset(struct work_struct *work)
546{
547 struct panfrost_device *pfdev = container_of(work,
548 struct panfrost_device,
549 reset.work);
550 unsigned long flags;
551 unsigned int i;
552 bool cookie;
553
554 cookie = dma_fence_begin_signalling();
555 for (i = 0; i < NUM_JOB_SLOTS; i++) {
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572 pfdev->js->queue[i].sched.timeout = MAX_SCHEDULE_TIMEOUT - 1;
573 cancel_delayed_work_sync(&pfdev->js->queue[i].sched.work_tdr);
574 panfrost_scheduler_stop(&pfdev->js->queue[i], NULL);
575 }
576
577
578 atomic_set(&pfdev->reset.pending, 0);
579
580 spin_lock_irqsave(&pfdev->js->job_lock, flags);
581 for (i = 0; i < NUM_JOB_SLOTS; i++) {
582 if (pfdev->jobs[i]) {
583 pm_runtime_put_noidle(pfdev->dev);
584 panfrost_devfreq_record_idle(&pfdev->pfdevfreq);
585 pfdev->jobs[i] = NULL;
586 }
587 }
588 spin_unlock_irqrestore(&pfdev->js->job_lock, flags);
589
590 panfrost_device_reset(pfdev);
591
592 for (i = 0; i < NUM_JOB_SLOTS; i++)
593 panfrost_scheduler_start(&pfdev->js->queue[i]);
594
595 dma_fence_end_signalling(cookie);
596}
597
598int panfrost_job_init(struct panfrost_device *pfdev)
599{
600 struct panfrost_job_slot *js;
601 int ret, j, irq;
602
603 INIT_WORK(&pfdev->reset.work, panfrost_reset);
604
605 pfdev->js = js = devm_kzalloc(pfdev->dev, sizeof(*js), GFP_KERNEL);
606 if (!js)
607 return -ENOMEM;
608
609 spin_lock_init(&js->job_lock);
610
611 irq = platform_get_irq_byname(to_platform_device(pfdev->dev), "job");
612 if (irq <= 0)
613 return -ENODEV;
614
615 ret = devm_request_irq(pfdev->dev, irq, panfrost_job_irq_handler,
616 IRQF_SHARED, KBUILD_MODNAME "-job", pfdev);
617 if (ret) {
618 dev_err(pfdev->dev, "failed to request job irq");
619 return ret;
620 }
621
622 for (j = 0; j < NUM_JOB_SLOTS; j++) {
623 mutex_init(&js->queue[j].lock);
624
625 js->queue[j].fence_context = dma_fence_context_alloc(1);
626
627 ret = drm_sched_init(&js->queue[j].sched,
628 &panfrost_sched_ops,
629 1, 0, msecs_to_jiffies(JOB_TIMEOUT_MS),
630 NULL, "pan_js");
631 if (ret) {
632 dev_err(pfdev->dev, "Failed to create scheduler: %d.", ret);
633 goto err_sched;
634 }
635 }
636
637 panfrost_job_enable_interrupts(pfdev);
638
639 return 0;
640
641err_sched:
642 for (j--; j >= 0; j--)
643 drm_sched_fini(&js->queue[j].sched);
644
645 return ret;
646}
647
648void panfrost_job_fini(struct panfrost_device *pfdev)
649{
650 struct panfrost_job_slot *js = pfdev->js;
651 int j;
652
653 job_write(pfdev, JOB_INT_MASK, 0);
654
655 for (j = 0; j < NUM_JOB_SLOTS; j++) {
656 drm_sched_fini(&js->queue[j].sched);
657 mutex_destroy(&js->queue[j].lock);
658 }
659
660}
661
662int panfrost_job_open(struct panfrost_file_priv *panfrost_priv)
663{
664 struct panfrost_device *pfdev = panfrost_priv->pfdev;
665 struct panfrost_job_slot *js = pfdev->js;
666 struct drm_gpu_scheduler *sched;
667 int ret, i;
668
669 for (i = 0; i < NUM_JOB_SLOTS; i++) {
670 sched = &js->queue[i].sched;
671 ret = drm_sched_entity_init(&panfrost_priv->sched_entity[i],
672 DRM_SCHED_PRIORITY_NORMAL, &sched,
673 1, NULL);
674 if (WARN_ON(ret))
675 return ret;
676 }
677 return 0;
678}
679
680void panfrost_job_close(struct panfrost_file_priv *panfrost_priv)
681{
682 int i;
683
684 for (i = 0; i < NUM_JOB_SLOTS; i++)
685 drm_sched_entity_destroy(&panfrost_priv->sched_entity[i]);
686}
687
688int panfrost_job_is_idle(struct panfrost_device *pfdev)
689{
690 struct panfrost_job_slot *js = pfdev->js;
691 int i;
692
693 for (i = 0; i < NUM_JOB_SLOTS; i++) {
694
695 if (atomic_read(&js->queue[i].sched.hw_rq_count))
696 return false;
697 }
698
699 return true;
700}
701