1
2
3
4
5
6
7#include "msm_gpu.h"
8#include "msm_gem.h"
9#include "msm_mmu.h"
10#include "msm_fence.h"
11#include "msm_gpu_trace.h"
12#include "adreno/adreno_gpu.h"
13
14#include <generated/utsrelease.h>
15#include <linux/string_helpers.h>
16#include <linux/pm_opp.h>
17#include <linux/devfreq.h>
18#include <linux/devcoredump.h>
19
20
21
22
23
24static int msm_devfreq_target(struct device *dev, unsigned long *freq,
25 u32 flags)
26{
27 struct msm_gpu *gpu = platform_get_drvdata(to_platform_device(dev));
28 struct dev_pm_opp *opp;
29
30 opp = devfreq_recommended_opp(dev, freq, flags);
31
32 if (IS_ERR(opp))
33 return PTR_ERR(opp);
34
35 if (gpu->funcs->gpu_set_freq)
36 gpu->funcs->gpu_set_freq(gpu, (u64)*freq);
37 else
38 clk_set_rate(gpu->core_clk, *freq);
39
40 dev_pm_opp_put(opp);
41
42 return 0;
43}
44
45static int msm_devfreq_get_dev_status(struct device *dev,
46 struct devfreq_dev_status *status)
47{
48 struct msm_gpu *gpu = platform_get_drvdata(to_platform_device(dev));
49 ktime_t time;
50
51 if (gpu->funcs->gpu_get_freq)
52 status->current_frequency = gpu->funcs->gpu_get_freq(gpu);
53 else
54 status->current_frequency = clk_get_rate(gpu->core_clk);
55
56 status->busy_time = gpu->funcs->gpu_busy(gpu);
57
58 time = ktime_get();
59 status->total_time = ktime_us_delta(time, gpu->devfreq.time);
60 gpu->devfreq.time = time;
61
62 return 0;
63}
64
65static int msm_devfreq_get_cur_freq(struct device *dev, unsigned long *freq)
66{
67 struct msm_gpu *gpu = platform_get_drvdata(to_platform_device(dev));
68
69 if (gpu->funcs->gpu_get_freq)
70 *freq = gpu->funcs->gpu_get_freq(gpu);
71 else
72 *freq = clk_get_rate(gpu->core_clk);
73
74 return 0;
75}
76
77static struct devfreq_dev_profile msm_devfreq_profile = {
78 .polling_ms = 10,
79 .target = msm_devfreq_target,
80 .get_dev_status = msm_devfreq_get_dev_status,
81 .get_cur_freq = msm_devfreq_get_cur_freq,
82};
83
84static void msm_devfreq_init(struct msm_gpu *gpu)
85{
86
87 if (!gpu->funcs->gpu_busy)
88 return;
89
90 msm_devfreq_profile.initial_freq = gpu->fast_rate;
91
92
93
94
95
96
97 gpu->devfreq.devfreq = devm_devfreq_add_device(&gpu->pdev->dev,
98 &msm_devfreq_profile, "simple_ondemand", NULL);
99
100 if (IS_ERR(gpu->devfreq.devfreq)) {
101 DRM_DEV_ERROR(&gpu->pdev->dev, "Couldn't initialize GPU devfreq\n");
102 gpu->devfreq.devfreq = NULL;
103 }
104
105 devfreq_suspend_device(gpu->devfreq.devfreq);
106}
107
108static int enable_pwrrail(struct msm_gpu *gpu)
109{
110 struct drm_device *dev = gpu->dev;
111 int ret = 0;
112
113 if (gpu->gpu_reg) {
114 ret = regulator_enable(gpu->gpu_reg);
115 if (ret) {
116 DRM_DEV_ERROR(dev->dev, "failed to enable 'gpu_reg': %d\n", ret);
117 return ret;
118 }
119 }
120
121 if (gpu->gpu_cx) {
122 ret = regulator_enable(gpu->gpu_cx);
123 if (ret) {
124 DRM_DEV_ERROR(dev->dev, "failed to enable 'gpu_cx': %d\n", ret);
125 return ret;
126 }
127 }
128
129 return 0;
130}
131
132static int disable_pwrrail(struct msm_gpu *gpu)
133{
134 if (gpu->gpu_cx)
135 regulator_disable(gpu->gpu_cx);
136 if (gpu->gpu_reg)
137 regulator_disable(gpu->gpu_reg);
138 return 0;
139}
140
141static int enable_clk(struct msm_gpu *gpu)
142{
143 if (gpu->core_clk && gpu->fast_rate)
144 clk_set_rate(gpu->core_clk, gpu->fast_rate);
145
146
147 if (gpu->rbbmtimer_clk)
148 clk_set_rate(gpu->rbbmtimer_clk, 19200000);
149
150 return clk_bulk_prepare_enable(gpu->nr_clocks, gpu->grp_clks);
151}
152
153static int disable_clk(struct msm_gpu *gpu)
154{
155 clk_bulk_disable_unprepare(gpu->nr_clocks, gpu->grp_clks);
156
157
158
159
160
161
162 if (gpu->core_clk)
163 clk_set_rate(gpu->core_clk, 27000000);
164
165 if (gpu->rbbmtimer_clk)
166 clk_set_rate(gpu->rbbmtimer_clk, 0);
167
168 return 0;
169}
170
171static int enable_axi(struct msm_gpu *gpu)
172{
173 if (gpu->ebi1_clk)
174 clk_prepare_enable(gpu->ebi1_clk);
175 return 0;
176}
177
178static int disable_axi(struct msm_gpu *gpu)
179{
180 if (gpu->ebi1_clk)
181 clk_disable_unprepare(gpu->ebi1_clk);
182 return 0;
183}
184
185void msm_gpu_resume_devfreq(struct msm_gpu *gpu)
186{
187 gpu->devfreq.busy_cycles = 0;
188 gpu->devfreq.time = ktime_get();
189
190 devfreq_resume_device(gpu->devfreq.devfreq);
191}
192
193int msm_gpu_pm_resume(struct msm_gpu *gpu)
194{
195 int ret;
196
197 DBG("%s", gpu->name);
198
199 ret = enable_pwrrail(gpu);
200 if (ret)
201 return ret;
202
203 ret = enable_clk(gpu);
204 if (ret)
205 return ret;
206
207 ret = enable_axi(gpu);
208 if (ret)
209 return ret;
210
211 msm_gpu_resume_devfreq(gpu);
212
213 gpu->needs_hw_init = true;
214
215 return 0;
216}
217
218int msm_gpu_pm_suspend(struct msm_gpu *gpu)
219{
220 int ret;
221
222 DBG("%s", gpu->name);
223
224 devfreq_suspend_device(gpu->devfreq.devfreq);
225
226 ret = disable_axi(gpu);
227 if (ret)
228 return ret;
229
230 ret = disable_clk(gpu);
231 if (ret)
232 return ret;
233
234 ret = disable_pwrrail(gpu);
235 if (ret)
236 return ret;
237
238 return 0;
239}
240
241int msm_gpu_hw_init(struct msm_gpu *gpu)
242{
243 int ret;
244
245 WARN_ON(!mutex_is_locked(&gpu->dev->struct_mutex));
246
247 if (!gpu->needs_hw_init)
248 return 0;
249
250 disable_irq(gpu->irq);
251 ret = gpu->funcs->hw_init(gpu);
252 if (!ret)
253 gpu->needs_hw_init = false;
254 enable_irq(gpu->irq);
255
256 return ret;
257}
258
259#ifdef CONFIG_DEV_COREDUMP
260static ssize_t msm_gpu_devcoredump_read(char *buffer, loff_t offset,
261 size_t count, void *data, size_t datalen)
262{
263 struct msm_gpu *gpu = data;
264 struct drm_print_iterator iter;
265 struct drm_printer p;
266 struct msm_gpu_state *state;
267
268 state = msm_gpu_crashstate_get(gpu);
269 if (!state)
270 return 0;
271
272 iter.data = buffer;
273 iter.offset = 0;
274 iter.start = offset;
275 iter.remain = count;
276
277 p = drm_coredump_printer(&iter);
278
279 drm_printf(&p, "---\n");
280 drm_printf(&p, "kernel: " UTS_RELEASE "\n");
281 drm_printf(&p, "module: " KBUILD_MODNAME "\n");
282 drm_printf(&p, "time: %lld.%09ld\n",
283 state->time.tv_sec, state->time.tv_nsec);
284 if (state->comm)
285 drm_printf(&p, "comm: %s\n", state->comm);
286 if (state->cmd)
287 drm_printf(&p, "cmdline: %s\n", state->cmd);
288
289 gpu->funcs->show(gpu, state, &p);
290
291 msm_gpu_crashstate_put(gpu);
292
293 return count - iter.remain;
294}
295
296static void msm_gpu_devcoredump_free(void *data)
297{
298 struct msm_gpu *gpu = data;
299
300 msm_gpu_crashstate_put(gpu);
301}
302
303static void msm_gpu_crashstate_get_bo(struct msm_gpu_state *state,
304 struct msm_gem_object *obj, u64 iova, u32 flags)
305{
306 struct msm_gpu_state_bo *state_bo = &state->bos[state->nr_bos];
307
308
309 state_bo->size = obj->base.size;
310 state_bo->iova = iova;
311
312
313 if ((flags & MSM_SUBMIT_BO_READ) && !obj->base.import_attach) {
314 void *ptr;
315
316 state_bo->data = kvmalloc(obj->base.size, GFP_KERNEL);
317 if (!state_bo->data)
318 goto out;
319
320 ptr = msm_gem_get_vaddr_active(&obj->base);
321 if (IS_ERR(ptr)) {
322 kvfree(state_bo->data);
323 state_bo->data = NULL;
324 goto out;
325 }
326
327 memcpy(state_bo->data, ptr, obj->base.size);
328 msm_gem_put_vaddr(&obj->base);
329 }
330out:
331 state->nr_bos++;
332}
333
334static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
335 struct msm_gem_submit *submit, char *comm, char *cmd)
336{
337 struct msm_gpu_state *state;
338
339
340 if (!gpu->funcs->gpu_state_get)
341 return;
342
343
344 if (gpu->crashstate)
345 return;
346
347 state = gpu->funcs->gpu_state_get(gpu);
348 if (IS_ERR_OR_NULL(state))
349 return;
350
351
352 state->comm = kstrdup(comm, GFP_KERNEL);
353 state->cmd = kstrdup(cmd, GFP_KERNEL);
354
355 if (submit) {
356 int i;
357
358 state->bos = kcalloc(submit->nr_cmds,
359 sizeof(struct msm_gpu_state_bo), GFP_KERNEL);
360
361 for (i = 0; state->bos && i < submit->nr_cmds; i++) {
362 int idx = submit->cmd[i].idx;
363
364 msm_gpu_crashstate_get_bo(state, submit->bos[idx].obj,
365 submit->bos[idx].iova, submit->bos[idx].flags);
366 }
367 }
368
369
370 gpu->crashstate = state;
371
372
373 dev_coredumpm(gpu->dev->dev, THIS_MODULE, gpu, 0, GFP_KERNEL,
374 msm_gpu_devcoredump_read, msm_gpu_devcoredump_free);
375}
376#else
377static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
378 struct msm_gem_submit *submit, char *comm, char *cmd)
379{
380}
381#endif
382
383
384
385
386
387static void update_fences(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
388 uint32_t fence)
389{
390 struct msm_gem_submit *submit;
391
392 list_for_each_entry(submit, &ring->submits, node) {
393 if (submit->seqno > fence)
394 break;
395
396 msm_update_fence(submit->ring->fctx,
397 submit->fence->seqno);
398 }
399}
400
401static struct msm_gem_submit *
402find_submit(struct msm_ringbuffer *ring, uint32_t fence)
403{
404 struct msm_gem_submit *submit;
405
406 WARN_ON(!mutex_is_locked(&ring->gpu->dev->struct_mutex));
407
408 list_for_each_entry(submit, &ring->submits, node)
409 if (submit->seqno == fence)
410 return submit;
411
412 return NULL;
413}
414
415static void retire_submits(struct msm_gpu *gpu);
416
417static void recover_worker(struct work_struct *work)
418{
419 struct msm_gpu *gpu = container_of(work, struct msm_gpu, recover_work);
420 struct drm_device *dev = gpu->dev;
421 struct msm_drm_private *priv = dev->dev_private;
422 struct msm_gem_submit *submit;
423 struct msm_ringbuffer *cur_ring = gpu->funcs->active_ring(gpu);
424 char *comm = NULL, *cmd = NULL;
425 int i;
426
427 mutex_lock(&dev->struct_mutex);
428
429 DRM_DEV_ERROR(dev->dev, "%s: hangcheck recover!\n", gpu->name);
430
431 submit = find_submit(cur_ring, cur_ring->memptrs->fence + 1);
432 if (submit) {
433 struct task_struct *task;
434
435
436 gpu->global_faults++;
437 submit->queue->faults++;
438
439 task = get_pid_task(submit->pid, PIDTYPE_PID);
440 if (task) {
441 comm = kstrdup(task->comm, GFP_KERNEL);
442 cmd = kstrdup_quotable_cmdline(task, GFP_KERNEL);
443 put_task_struct(task);
444 }
445
446 if (comm && cmd) {
447 DRM_DEV_ERROR(dev->dev, "%s: offending task: %s (%s)\n",
448 gpu->name, comm, cmd);
449
450 msm_rd_dump_submit(priv->hangrd, submit,
451 "offending task: %s (%s)", comm, cmd);
452 } else
453 msm_rd_dump_submit(priv->hangrd, submit, NULL);
454 }
455
456
457 pm_runtime_get_sync(&gpu->pdev->dev);
458 msm_gpu_crashstate_capture(gpu, submit, comm, cmd);
459 pm_runtime_put_sync(&gpu->pdev->dev);
460
461 kfree(cmd);
462 kfree(comm);
463
464
465
466
467
468
469 for (i = 0; i < gpu->nr_rings; i++) {
470 struct msm_ringbuffer *ring = gpu->rb[i];
471
472 uint32_t fence = ring->memptrs->fence;
473
474
475
476
477
478 if (ring == cur_ring)
479 fence++;
480
481 update_fences(gpu, ring, fence);
482 }
483
484 if (msm_gpu_active(gpu)) {
485
486 retire_submits(gpu);
487
488 pm_runtime_get_sync(&gpu->pdev->dev);
489 gpu->funcs->recover(gpu);
490 pm_runtime_put_sync(&gpu->pdev->dev);
491
492
493
494
495
496 for (i = 0; i < gpu->nr_rings; i++) {
497 struct msm_ringbuffer *ring = gpu->rb[i];
498
499 list_for_each_entry(submit, &ring->submits, node)
500 gpu->funcs->submit(gpu, submit, NULL);
501 }
502 }
503
504 mutex_unlock(&dev->struct_mutex);
505
506 msm_gpu_retire(gpu);
507}
508
509static void hangcheck_timer_reset(struct msm_gpu *gpu)
510{
511 DBG("%s", gpu->name);
512 mod_timer(&gpu->hangcheck_timer,
513 round_jiffies_up(jiffies + DRM_MSM_HANGCHECK_JIFFIES));
514}
515
516static void hangcheck_handler(struct timer_list *t)
517{
518 struct msm_gpu *gpu = from_timer(gpu, t, hangcheck_timer);
519 struct drm_device *dev = gpu->dev;
520 struct msm_drm_private *priv = dev->dev_private;
521 struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu);
522 uint32_t fence = ring->memptrs->fence;
523
524 if (fence != ring->hangcheck_fence) {
525
526 ring->hangcheck_fence = fence;
527 } else if (fence < ring->seqno) {
528
529 ring->hangcheck_fence = fence;
530 DRM_DEV_ERROR(dev->dev, "%s: hangcheck detected gpu lockup rb %d!\n",
531 gpu->name, ring->id);
532 DRM_DEV_ERROR(dev->dev, "%s: completed fence: %u\n",
533 gpu->name, fence);
534 DRM_DEV_ERROR(dev->dev, "%s: submitted fence: %u\n",
535 gpu->name, ring->seqno);
536
537 queue_work(priv->wq, &gpu->recover_work);
538 }
539
540
541 if (ring->seqno > ring->hangcheck_fence)
542 hangcheck_timer_reset(gpu);
543
544
545 queue_work(priv->wq, &gpu->retire_work);
546}
547
548
549
550
551
552
553static int update_hw_cntrs(struct msm_gpu *gpu, uint32_t ncntrs, uint32_t *cntrs)
554{
555 uint32_t current_cntrs[ARRAY_SIZE(gpu->last_cntrs)];
556 int i, n = min(ncntrs, gpu->num_perfcntrs);
557
558
559 for (i = 0; i < gpu->num_perfcntrs; i++)
560 current_cntrs[i] = gpu_read(gpu, gpu->perfcntrs[i].sample_reg);
561
562
563 for (i = 0; i < n; i++)
564 cntrs[i] = current_cntrs[i] - gpu->last_cntrs[i];
565
566
567 for (i = 0; i < gpu->num_perfcntrs; i++)
568 gpu->last_cntrs[i] = current_cntrs[i];
569
570 return n;
571}
572
573static void update_sw_cntrs(struct msm_gpu *gpu)
574{
575 ktime_t time;
576 uint32_t elapsed;
577 unsigned long flags;
578
579 spin_lock_irqsave(&gpu->perf_lock, flags);
580 if (!gpu->perfcntr_active)
581 goto out;
582
583 time = ktime_get();
584 elapsed = ktime_to_us(ktime_sub(time, gpu->last_sample.time));
585
586 gpu->totaltime += elapsed;
587 if (gpu->last_sample.active)
588 gpu->activetime += elapsed;
589
590 gpu->last_sample.active = msm_gpu_active(gpu);
591 gpu->last_sample.time = time;
592
593out:
594 spin_unlock_irqrestore(&gpu->perf_lock, flags);
595}
596
597void msm_gpu_perfcntr_start(struct msm_gpu *gpu)
598{
599 unsigned long flags;
600
601 pm_runtime_get_sync(&gpu->pdev->dev);
602
603 spin_lock_irqsave(&gpu->perf_lock, flags);
604
605 gpu->last_sample.active = msm_gpu_active(gpu);
606 gpu->last_sample.time = ktime_get();
607 gpu->activetime = gpu->totaltime = 0;
608 gpu->perfcntr_active = true;
609 update_hw_cntrs(gpu, 0, NULL);
610 spin_unlock_irqrestore(&gpu->perf_lock, flags);
611}
612
613void msm_gpu_perfcntr_stop(struct msm_gpu *gpu)
614{
615 gpu->perfcntr_active = false;
616 pm_runtime_put_sync(&gpu->pdev->dev);
617}
618
619
620int msm_gpu_perfcntr_sample(struct msm_gpu *gpu, uint32_t *activetime,
621 uint32_t *totaltime, uint32_t ncntrs, uint32_t *cntrs)
622{
623 unsigned long flags;
624 int ret;
625
626 spin_lock_irqsave(&gpu->perf_lock, flags);
627
628 if (!gpu->perfcntr_active) {
629 ret = -EINVAL;
630 goto out;
631 }
632
633 *activetime = gpu->activetime;
634 *totaltime = gpu->totaltime;
635
636 gpu->activetime = gpu->totaltime = 0;
637
638 ret = update_hw_cntrs(gpu, ncntrs, cntrs);
639
640out:
641 spin_unlock_irqrestore(&gpu->perf_lock, flags);
642
643 return ret;
644}
645
646
647
648
649
650static void retire_submit(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
651 struct msm_gem_submit *submit)
652{
653 int index = submit->seqno % MSM_GPU_SUBMIT_STATS_COUNT;
654 volatile struct msm_gpu_submit_stats *stats;
655 u64 elapsed, clock = 0;
656 int i;
657
658 stats = &ring->memptrs->stats[index];
659
660 elapsed = (stats->alwayson_end - stats->alwayson_start) * 10000;
661 do_div(elapsed, 192);
662
663
664 if (elapsed) {
665 clock = (stats->cpcycles_end - stats->cpcycles_start) * 1000;
666 do_div(clock, elapsed);
667 }
668
669 trace_msm_gpu_submit_retired(submit, elapsed, clock,
670 stats->alwayson_start, stats->alwayson_end);
671
672 for (i = 0; i < submit->nr_bos; i++) {
673 struct msm_gem_object *msm_obj = submit->bos[i].obj;
674
675 msm_gem_move_to_inactive(&msm_obj->base);
676 msm_gem_unpin_iova(&msm_obj->base, submit->aspace);
677 drm_gem_object_put(&msm_obj->base);
678 }
679
680 pm_runtime_mark_last_busy(&gpu->pdev->dev);
681 pm_runtime_put_autosuspend(&gpu->pdev->dev);
682 msm_gem_submit_free(submit);
683}
684
685static void retire_submits(struct msm_gpu *gpu)
686{
687 struct drm_device *dev = gpu->dev;
688 struct msm_gem_submit *submit, *tmp;
689 int i;
690
691 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
692
693
694 for (i = 0; i < gpu->nr_rings; i++) {
695 struct msm_ringbuffer *ring = gpu->rb[i];
696
697 list_for_each_entry_safe(submit, tmp, &ring->submits, node) {
698 if (dma_fence_is_signaled(submit->fence))
699 retire_submit(gpu, ring, submit);
700 }
701 }
702}
703
704static void retire_worker(struct work_struct *work)
705{
706 struct msm_gpu *gpu = container_of(work, struct msm_gpu, retire_work);
707 struct drm_device *dev = gpu->dev;
708 int i;
709
710 for (i = 0; i < gpu->nr_rings; i++)
711 update_fences(gpu, gpu->rb[i], gpu->rb[i]->memptrs->fence);
712
713 mutex_lock(&dev->struct_mutex);
714 retire_submits(gpu);
715 mutex_unlock(&dev->struct_mutex);
716}
717
718
719void msm_gpu_retire(struct msm_gpu *gpu)
720{
721 struct msm_drm_private *priv = gpu->dev->dev_private;
722 queue_work(priv->wq, &gpu->retire_work);
723 update_sw_cntrs(gpu);
724}
725
726
727void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
728 struct msm_file_private *ctx)
729{
730 struct drm_device *dev = gpu->dev;
731 struct msm_drm_private *priv = dev->dev_private;
732 struct msm_ringbuffer *ring = submit->ring;
733 int i;
734
735 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
736
737 pm_runtime_get_sync(&gpu->pdev->dev);
738
739 msm_gpu_hw_init(gpu);
740
741 submit->seqno = ++ring->seqno;
742
743 list_add_tail(&submit->node, &ring->submits);
744
745 msm_rd_dump_submit(priv->rd, submit, NULL);
746
747 update_sw_cntrs(gpu);
748
749 for (i = 0; i < submit->nr_bos; i++) {
750 struct msm_gem_object *msm_obj = submit->bos[i].obj;
751 uint64_t iova;
752
753
754
755
756 WARN_ON(is_active(msm_obj) && (msm_obj->gpu != gpu));
757
758
759 drm_gem_object_get(&msm_obj->base);
760 msm_gem_get_and_pin_iova(&msm_obj->base, submit->aspace, &iova);
761
762 if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE)
763 msm_gem_move_to_active(&msm_obj->base, gpu, true, submit->fence);
764 else if (submit->bos[i].flags & MSM_SUBMIT_BO_READ)
765 msm_gem_move_to_active(&msm_obj->base, gpu, false, submit->fence);
766 }
767
768 gpu->funcs->submit(gpu, submit, ctx);
769 priv->lastctx = ctx;
770
771 hangcheck_timer_reset(gpu);
772}
773
774
775
776
777
778static irqreturn_t irq_handler(int irq, void *data)
779{
780 struct msm_gpu *gpu = data;
781 return gpu->funcs->irq(gpu);
782}
783
784static int get_clocks(struct platform_device *pdev, struct msm_gpu *gpu)
785{
786 int ret = msm_clk_bulk_get(&pdev->dev, &gpu->grp_clks);
787
788 if (ret < 1) {
789 gpu->nr_clocks = 0;
790 return ret;
791 }
792
793 gpu->nr_clocks = ret;
794
795 gpu->core_clk = msm_clk_bulk_get_clock(gpu->grp_clks,
796 gpu->nr_clocks, "core");
797
798 gpu->rbbmtimer_clk = msm_clk_bulk_get_clock(gpu->grp_clks,
799 gpu->nr_clocks, "rbbmtimer");
800
801 return 0;
802}
803
804static struct msm_gem_address_space *
805msm_gpu_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev,
806 uint64_t va_start, uint64_t va_end)
807{
808 struct msm_gem_address_space *aspace;
809 int ret;
810
811
812
813
814
815
816 if (!adreno_is_a2xx(to_adreno_gpu(gpu))) {
817 struct iommu_domain *iommu = iommu_domain_alloc(&platform_bus_type);
818 if (!iommu)
819 return NULL;
820
821 iommu->geometry.aperture_start = va_start;
822 iommu->geometry.aperture_end = va_end;
823
824 DRM_DEV_INFO(gpu->dev->dev, "%s: using IOMMU\n", gpu->name);
825
826 aspace = msm_gem_address_space_create(&pdev->dev, iommu, "gpu");
827 if (IS_ERR(aspace))
828 iommu_domain_free(iommu);
829 } else {
830 aspace = msm_gem_address_space_create_a2xx(&pdev->dev, gpu, "gpu",
831 va_start, va_end);
832 }
833
834 if (IS_ERR(aspace)) {
835 DRM_DEV_ERROR(gpu->dev->dev, "failed to init mmu: %ld\n",
836 PTR_ERR(aspace));
837 return ERR_CAST(aspace);
838 }
839
840 ret = aspace->mmu->funcs->attach(aspace->mmu, NULL, 0);
841 if (ret) {
842 msm_gem_address_space_put(aspace);
843 return ERR_PTR(ret);
844 }
845
846 return aspace;
847}
848
849int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
850 struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
851 const char *name, struct msm_gpu_config *config)
852{
853 int i, ret, nr_rings = config->nr_rings;
854 void *memptrs;
855 uint64_t memptrs_iova;
856
857 if (WARN_ON(gpu->num_perfcntrs > ARRAY_SIZE(gpu->last_cntrs)))
858 gpu->num_perfcntrs = ARRAY_SIZE(gpu->last_cntrs);
859
860 gpu->dev = drm;
861 gpu->funcs = funcs;
862 gpu->name = name;
863
864 INIT_LIST_HEAD(&gpu->active_list);
865 INIT_WORK(&gpu->retire_work, retire_worker);
866 INIT_WORK(&gpu->recover_work, recover_worker);
867
868
869 timer_setup(&gpu->hangcheck_timer, hangcheck_handler, 0);
870
871 spin_lock_init(&gpu->perf_lock);
872
873
874
875 gpu->mmio = msm_ioremap(pdev, config->ioname, name);
876 if (IS_ERR(gpu->mmio)) {
877 ret = PTR_ERR(gpu->mmio);
878 goto fail;
879 }
880
881
882 gpu->irq = platform_get_irq(pdev, 0);
883 if (gpu->irq < 0) {
884 ret = gpu->irq;
885 DRM_DEV_ERROR(drm->dev, "failed to get irq: %d\n", ret);
886 goto fail;
887 }
888
889 ret = devm_request_irq(&pdev->dev, gpu->irq, irq_handler,
890 IRQF_TRIGGER_HIGH, gpu->name, gpu);
891 if (ret) {
892 DRM_DEV_ERROR(drm->dev, "failed to request IRQ%u: %d\n", gpu->irq, ret);
893 goto fail;
894 }
895
896 ret = get_clocks(pdev, gpu);
897 if (ret)
898 goto fail;
899
900 gpu->ebi1_clk = msm_clk_get(pdev, "bus");
901 DBG("ebi1_clk: %p", gpu->ebi1_clk);
902 if (IS_ERR(gpu->ebi1_clk))
903 gpu->ebi1_clk = NULL;
904
905
906 gpu->gpu_reg = devm_regulator_get(&pdev->dev, "vdd");
907 DBG("gpu_reg: %p", gpu->gpu_reg);
908 if (IS_ERR(gpu->gpu_reg))
909 gpu->gpu_reg = NULL;
910
911 gpu->gpu_cx = devm_regulator_get(&pdev->dev, "vddcx");
912 DBG("gpu_cx: %p", gpu->gpu_cx);
913 if (IS_ERR(gpu->gpu_cx))
914 gpu->gpu_cx = NULL;
915
916 gpu->pdev = pdev;
917 platform_set_drvdata(pdev, gpu);
918
919 msm_devfreq_init(gpu);
920
921 gpu->aspace = msm_gpu_create_address_space(gpu, pdev,
922 config->va_start, config->va_end);
923
924 if (gpu->aspace == NULL)
925 DRM_DEV_INFO(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name);
926 else if (IS_ERR(gpu->aspace)) {
927 ret = PTR_ERR(gpu->aspace);
928 goto fail;
929 }
930
931 memptrs = msm_gem_kernel_new(drm,
932 sizeof(struct msm_rbmemptrs) * nr_rings,
933 MSM_BO_UNCACHED, gpu->aspace, &gpu->memptrs_bo,
934 &memptrs_iova);
935
936 if (IS_ERR(memptrs)) {
937 ret = PTR_ERR(memptrs);
938 DRM_DEV_ERROR(drm->dev, "could not allocate memptrs: %d\n", ret);
939 goto fail;
940 }
941
942 msm_gem_object_set_name(gpu->memptrs_bo, "memptrs");
943
944 if (nr_rings > ARRAY_SIZE(gpu->rb)) {
945 DRM_DEV_INFO_ONCE(drm->dev, "Only creating %zu ringbuffers\n",
946 ARRAY_SIZE(gpu->rb));
947 nr_rings = ARRAY_SIZE(gpu->rb);
948 }
949
950
951 for (i = 0; i < nr_rings; i++) {
952 gpu->rb[i] = msm_ringbuffer_new(gpu, i, memptrs, memptrs_iova);
953
954 if (IS_ERR(gpu->rb[i])) {
955 ret = PTR_ERR(gpu->rb[i]);
956 DRM_DEV_ERROR(drm->dev,
957 "could not create ringbuffer %d: %d\n", i, ret);
958 goto fail;
959 }
960
961 memptrs += sizeof(struct msm_rbmemptrs);
962 memptrs_iova += sizeof(struct msm_rbmemptrs);
963 }
964
965 gpu->nr_rings = nr_rings;
966
967 return 0;
968
969fail:
970 for (i = 0; i < ARRAY_SIZE(gpu->rb); i++) {
971 msm_ringbuffer_destroy(gpu->rb[i]);
972 gpu->rb[i] = NULL;
973 }
974
975 msm_gem_kernel_put(gpu->memptrs_bo, gpu->aspace, false);
976
977 platform_set_drvdata(pdev, NULL);
978 return ret;
979}
980
981void msm_gpu_cleanup(struct msm_gpu *gpu)
982{
983 int i;
984
985 DBG("%s", gpu->name);
986
987 WARN_ON(!list_empty(&gpu->active_list));
988
989 for (i = 0; i < ARRAY_SIZE(gpu->rb); i++) {
990 msm_ringbuffer_destroy(gpu->rb[i]);
991 gpu->rb[i] = NULL;
992 }
993
994 msm_gem_kernel_put(gpu->memptrs_bo, gpu->aspace, false);
995
996 if (!IS_ERR_OR_NULL(gpu->aspace)) {
997 gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu,
998 NULL, 0);
999 msm_gem_address_space_put(gpu->aspace);
1000 }
1001}
1002