1
2
3
4
5
6
7
8#include <linux/dma-mapping.h>
9#include <linux/kthread.h>
10#include <linux/sched/mm.h>
11#include <linux/uaccess.h>
12#include <uapi/linux/sched/types.h>
13
14#include <drm/drm_drv.h>
15#include <drm/drm_file.h>
16#include <drm/drm_ioctl.h>
17#include <drm/drm_prime.h>
18#include <drm/drm_of.h>
19#include <drm/drm_vblank.h>
20
21#include "disp/msm_disp_snapshot.h"
22#include "msm_drv.h"
23#include "msm_debugfs.h"
24#include "msm_fence.h"
25#include "msm_gem.h"
26#include "msm_gpu.h"
27#include "msm_kms.h"
28#include "adreno/adreno_gpu.h"
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45#define MSM_VERSION_MAJOR 1
46#define MSM_VERSION_MINOR 8
47#define MSM_VERSION_PATCHLEVEL 0
48
49static const struct drm_mode_config_funcs mode_config_funcs = {
50 .fb_create = msm_framebuffer_create,
51 .output_poll_changed = drm_fb_helper_output_poll_changed,
52 .atomic_check = drm_atomic_helper_check,
53 .atomic_commit = drm_atomic_helper_commit,
54};
55
56static const struct drm_mode_config_helper_funcs mode_config_helper_funcs = {
57 .atomic_commit_tail = msm_atomic_commit_tail,
58};
59
60#ifdef CONFIG_DRM_MSM_REGISTER_LOGGING
61static bool reglog = false;
62MODULE_PARM_DESC(reglog, "Enable register read/write logging");
63module_param(reglog, bool, 0600);
64#else
65#define reglog 0
66#endif
67
68#ifdef CONFIG_DRM_FBDEV_EMULATION
69static bool fbdev = true;
70MODULE_PARM_DESC(fbdev, "Enable fbdev compat layer");
71module_param(fbdev, bool, 0600);
72#endif
73
74static char *vram = "16m";
75MODULE_PARM_DESC(vram, "Configure VRAM size (for devices without IOMMU/GPUMMU)");
76module_param(vram, charp, 0);
77
78bool dumpstate = false;
79MODULE_PARM_DESC(dumpstate, "Dump KMS state on errors");
80module_param(dumpstate, bool, 0600);
81
82static bool modeset = true;
83MODULE_PARM_DESC(modeset, "Use kernel modesetting [KMS] (1=on (default), 0=disable)");
84module_param(modeset, bool, 0600);
85
86
87
88
89
90struct clk *msm_clk_bulk_get_clock(struct clk_bulk_data *bulk, int count,
91 const char *name)
92{
93 int i;
94 char n[32];
95
96 snprintf(n, sizeof(n), "%s_clk", name);
97
98 for (i = 0; bulk && i < count; i++) {
99 if (!strcmp(bulk[i].id, name) || !strcmp(bulk[i].id, n))
100 return bulk[i].clk;
101 }
102
103
104 return NULL;
105}
106
107struct clk *msm_clk_get(struct platform_device *pdev, const char *name)
108{
109 struct clk *clk;
110 char name2[32];
111
112 clk = devm_clk_get(&pdev->dev, name);
113 if (!IS_ERR(clk) || PTR_ERR(clk) == -EPROBE_DEFER)
114 return clk;
115
116 snprintf(name2, sizeof(name2), "%s_clk", name);
117
118 clk = devm_clk_get(&pdev->dev, name2);
119 if (!IS_ERR(clk))
120 dev_warn(&pdev->dev, "Using legacy clk name binding. Use "
121 "\"%s\" instead of \"%s\"\n", name, name2);
122
123 return clk;
124}
125
126static void __iomem *_msm_ioremap(struct platform_device *pdev, const char *name,
127 const char *dbgname, bool quiet, phys_addr_t *psize)
128{
129 struct resource *res;
130 unsigned long size;
131 void __iomem *ptr;
132
133 if (name)
134 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
135 else
136 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
137
138 if (!res) {
139 if (!quiet)
140 DRM_DEV_ERROR(&pdev->dev, "failed to get memory resource: %s\n", name);
141 return ERR_PTR(-EINVAL);
142 }
143
144 size = resource_size(res);
145
146 ptr = devm_ioremap(&pdev->dev, res->start, size);
147 if (!ptr) {
148 if (!quiet)
149 DRM_DEV_ERROR(&pdev->dev, "failed to ioremap: %s\n", name);
150 return ERR_PTR(-ENOMEM);
151 }
152
153 if (reglog)
154 printk(KERN_DEBUG "IO:region %s %p %08lx\n", dbgname, ptr, size);
155
156 if (psize)
157 *psize = size;
158
159 return ptr;
160}
161
162void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
163 const char *dbgname)
164{
165 return _msm_ioremap(pdev, name, dbgname, false, NULL);
166}
167
168void __iomem *msm_ioremap_quiet(struct platform_device *pdev, const char *name,
169 const char *dbgname)
170{
171 return _msm_ioremap(pdev, name, dbgname, true, NULL);
172}
173
174void __iomem *msm_ioremap_size(struct platform_device *pdev, const char *name,
175 const char *dbgname, phys_addr_t *psize)
176{
177 return _msm_ioremap(pdev, name, dbgname, false, psize);
178}
179
180void msm_writel(u32 data, void __iomem *addr)
181{
182 if (reglog)
183 printk(KERN_DEBUG "IO:W %p %08x\n", addr, data);
184 writel(data, addr);
185}
186
187u32 msm_readl(const void __iomem *addr)
188{
189 u32 val = readl(addr);
190 if (reglog)
191 pr_err("IO:R %p %08x\n", addr, val);
192 return val;
193}
194
195void msm_rmw(void __iomem *addr, u32 mask, u32 or)
196{
197 u32 val = msm_readl(addr);
198
199 val &= ~mask;
200 msm_writel(val | or, addr);
201}
202
203static irqreturn_t msm_irq(int irq, void *arg)
204{
205 struct drm_device *dev = arg;
206 struct msm_drm_private *priv = dev->dev_private;
207 struct msm_kms *kms = priv->kms;
208
209 BUG_ON(!kms);
210
211 return kms->funcs->irq(kms);
212}
213
214static void msm_irq_preinstall(struct drm_device *dev)
215{
216 struct msm_drm_private *priv = dev->dev_private;
217 struct msm_kms *kms = priv->kms;
218
219 BUG_ON(!kms);
220
221 kms->funcs->irq_preinstall(kms);
222}
223
224static int msm_irq_postinstall(struct drm_device *dev)
225{
226 struct msm_drm_private *priv = dev->dev_private;
227 struct msm_kms *kms = priv->kms;
228
229 BUG_ON(!kms);
230
231 if (kms->funcs->irq_postinstall)
232 return kms->funcs->irq_postinstall(kms);
233
234 return 0;
235}
236
237static int msm_irq_install(struct drm_device *dev, unsigned int irq)
238{
239 int ret;
240
241 if (irq == IRQ_NOTCONNECTED)
242 return -ENOTCONN;
243
244 msm_irq_preinstall(dev);
245
246 ret = request_irq(irq, msm_irq, 0, dev->driver->name, dev);
247 if (ret)
248 return ret;
249
250 ret = msm_irq_postinstall(dev);
251 if (ret) {
252 free_irq(irq, dev);
253 return ret;
254 }
255
256 return 0;
257}
258
259static void msm_irq_uninstall(struct drm_device *dev)
260{
261 struct msm_drm_private *priv = dev->dev_private;
262 struct msm_kms *kms = priv->kms;
263
264 kms->funcs->irq_uninstall(kms);
265 free_irq(kms->irq, dev);
266}
267
268struct msm_vblank_work {
269 struct work_struct work;
270 int crtc_id;
271 bool enable;
272 struct msm_drm_private *priv;
273};
274
275static void vblank_ctrl_worker(struct work_struct *work)
276{
277 struct msm_vblank_work *vbl_work = container_of(work,
278 struct msm_vblank_work, work);
279 struct msm_drm_private *priv = vbl_work->priv;
280 struct msm_kms *kms = priv->kms;
281
282 if (vbl_work->enable)
283 kms->funcs->enable_vblank(kms, priv->crtcs[vbl_work->crtc_id]);
284 else
285 kms->funcs->disable_vblank(kms, priv->crtcs[vbl_work->crtc_id]);
286
287 kfree(vbl_work);
288}
289
290static int vblank_ctrl_queue_work(struct msm_drm_private *priv,
291 int crtc_id, bool enable)
292{
293 struct msm_vblank_work *vbl_work;
294
295 vbl_work = kzalloc(sizeof(*vbl_work), GFP_ATOMIC);
296 if (!vbl_work)
297 return -ENOMEM;
298
299 INIT_WORK(&vbl_work->work, vblank_ctrl_worker);
300
301 vbl_work->crtc_id = crtc_id;
302 vbl_work->enable = enable;
303 vbl_work->priv = priv;
304
305 queue_work(priv->wq, &vbl_work->work);
306
307 return 0;
308}
309
310static int msm_drm_uninit(struct device *dev)
311{
312 struct platform_device *pdev = to_platform_device(dev);
313 struct drm_device *ddev = platform_get_drvdata(pdev);
314 struct msm_drm_private *priv = ddev->dev_private;
315 struct msm_kms *kms = priv->kms;
316 struct msm_mdss *mdss = priv->mdss;
317 int i;
318
319
320
321
322
323
324
325
326 if (ddev->registered) {
327 drm_dev_unregister(ddev);
328 drm_atomic_helper_shutdown(ddev);
329 }
330
331
332
333
334
335
336 flush_workqueue(priv->wq);
337
338
339 for (i = 0; i < priv->num_crtcs; i++) {
340 if (priv->event_thread[i].worker)
341 kthread_destroy_worker(priv->event_thread[i].worker);
342 }
343
344 msm_gem_shrinker_cleanup(ddev);
345
346 drm_kms_helper_poll_fini(ddev);
347
348 msm_perf_debugfs_cleanup(priv);
349 msm_rd_debugfs_cleanup(priv);
350
351#ifdef CONFIG_DRM_FBDEV_EMULATION
352 if (fbdev && priv->fbdev)
353 msm_fbdev_free(ddev);
354#endif
355
356 msm_disp_snapshot_destroy(ddev);
357
358 drm_mode_config_cleanup(ddev);
359
360 pm_runtime_get_sync(dev);
361 msm_irq_uninstall(ddev);
362 pm_runtime_put_sync(dev);
363
364 if (kms && kms->funcs)
365 kms->funcs->destroy(kms);
366
367 if (priv->vram.paddr) {
368 unsigned long attrs = DMA_ATTR_NO_KERNEL_MAPPING;
369 drm_mm_takedown(&priv->vram.mm);
370 dma_free_attrs(dev, priv->vram.size, NULL,
371 priv->vram.paddr, attrs);
372 }
373
374 component_unbind_all(dev, ddev);
375
376 if (mdss && mdss->funcs)
377 mdss->funcs->destroy(ddev);
378
379 ddev->dev_private = NULL;
380 drm_dev_put(ddev);
381
382 destroy_workqueue(priv->wq);
383 kfree(priv);
384
385 return 0;
386}
387
388#define KMS_MDP4 4
389#define KMS_MDP5 5
390#define KMS_DPU 3
391
392static int get_mdp_ver(struct platform_device *pdev)
393{
394 struct device *dev = &pdev->dev;
395
396 return (int) (unsigned long) of_device_get_match_data(dev);
397}
398
399#include <linux/of_address.h>
400
401bool msm_use_mmu(struct drm_device *dev)
402{
403 struct msm_drm_private *priv = dev->dev_private;
404
405
406 return priv->is_a2xx || iommu_present(&platform_bus_type);
407}
408
409static int msm_init_vram(struct drm_device *dev)
410{
411 struct msm_drm_private *priv = dev->dev_private;
412 struct device_node *node;
413 unsigned long size = 0;
414 int ret = 0;
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433 node = of_parse_phandle(dev->dev->of_node, "memory-region", 0);
434 if (node) {
435 struct resource r;
436 ret = of_address_to_resource(node, 0, &r);
437 of_node_put(node);
438 if (ret)
439 return ret;
440 size = r.end - r.start;
441 DRM_INFO("using VRAM carveout: %lx@%pa\n", size, &r.start);
442
443
444
445
446
447 } else if (!msm_use_mmu(dev)) {
448 DRM_INFO("using %s VRAM carveout\n", vram);
449 size = memparse(vram, NULL);
450 }
451
452 if (size) {
453 unsigned long attrs = 0;
454 void *p;
455
456 priv->vram.size = size;
457
458 drm_mm_init(&priv->vram.mm, 0, (size >> PAGE_SHIFT) - 1);
459 spin_lock_init(&priv->vram.lock);
460
461 attrs |= DMA_ATTR_NO_KERNEL_MAPPING;
462 attrs |= DMA_ATTR_WRITE_COMBINE;
463
464
465
466
467 p = dma_alloc_attrs(dev->dev, size,
468 &priv->vram.paddr, GFP_KERNEL, attrs);
469 if (!p) {
470 DRM_DEV_ERROR(dev->dev, "failed to allocate VRAM\n");
471 priv->vram.paddr = 0;
472 return -ENOMEM;
473 }
474
475 DRM_DEV_INFO(dev->dev, "VRAM: %08x->%08x\n",
476 (uint32_t)priv->vram.paddr,
477 (uint32_t)(priv->vram.paddr + size));
478 }
479
480 return ret;
481}
482
483static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
484{
485 struct platform_device *pdev = to_platform_device(dev);
486 struct drm_device *ddev;
487 struct msm_drm_private *priv;
488 struct msm_kms *kms;
489 struct msm_mdss *mdss;
490 int ret, i;
491
492 ddev = drm_dev_alloc(drv, dev);
493 if (IS_ERR(ddev)) {
494 DRM_DEV_ERROR(dev, "failed to allocate drm_device\n");
495 return PTR_ERR(ddev);
496 }
497
498 platform_set_drvdata(pdev, ddev);
499
500 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
501 if (!priv) {
502 ret = -ENOMEM;
503 goto err_put_drm_dev;
504 }
505
506 ddev->dev_private = priv;
507 priv->dev = ddev;
508
509 switch (get_mdp_ver(pdev)) {
510 case KMS_MDP5:
511 ret = mdp5_mdss_init(ddev);
512 break;
513 case KMS_DPU:
514 ret = dpu_mdss_init(ddev);
515 break;
516 default:
517 ret = 0;
518 break;
519 }
520 if (ret)
521 goto err_free_priv;
522
523 mdss = priv->mdss;
524
525 priv->wq = alloc_ordered_workqueue("msm", 0);
526 priv->hangcheck_period = DRM_MSM_HANGCHECK_DEFAULT_PERIOD;
527
528 INIT_LIST_HEAD(&priv->objects);
529 mutex_init(&priv->obj_lock);
530
531 INIT_LIST_HEAD(&priv->inactive_willneed);
532 INIT_LIST_HEAD(&priv->inactive_dontneed);
533 INIT_LIST_HEAD(&priv->inactive_unpinned);
534 mutex_init(&priv->mm_lock);
535
536
537 fs_reclaim_acquire(GFP_KERNEL);
538 might_lock(&priv->mm_lock);
539 fs_reclaim_release(GFP_KERNEL);
540
541 drm_mode_config_init(ddev);
542
543 ret = msm_init_vram(ddev);
544 if (ret)
545 goto err_destroy_mdss;
546
547
548 ret = component_bind_all(dev, ddev);
549 if (ret)
550 goto err_destroy_mdss;
551
552 dma_set_max_seg_size(dev, UINT_MAX);
553
554 msm_gem_shrinker_init(ddev);
555
556 switch (get_mdp_ver(pdev)) {
557 case KMS_MDP4:
558 kms = mdp4_kms_init(ddev);
559 priv->kms = kms;
560 break;
561 case KMS_MDP5:
562 kms = mdp5_kms_init(ddev);
563 break;
564 case KMS_DPU:
565 kms = dpu_kms_init(ddev);
566 priv->kms = kms;
567 break;
568 default:
569
570 WARN_ON(dev->of_node);
571 kms = NULL;
572 break;
573 }
574
575 if (IS_ERR(kms)) {
576 DRM_DEV_ERROR(dev, "failed to load kms\n");
577 ret = PTR_ERR(kms);
578 priv->kms = NULL;
579 goto err_msm_uninit;
580 }
581
582
583 ddev->mode_config.normalize_zpos = true;
584
585 if (kms) {
586 kms->dev = ddev;
587 ret = kms->funcs->hw_init(kms);
588 if (ret) {
589 DRM_DEV_ERROR(dev, "kms hw init failed: %d\n", ret);
590 goto err_msm_uninit;
591 }
592 }
593
594 ddev->mode_config.funcs = &mode_config_funcs;
595 ddev->mode_config.helper_private = &mode_config_helper_funcs;
596
597 for (i = 0; i < priv->num_crtcs; i++) {
598
599 priv->event_thread[i].crtc_id = priv->crtcs[i]->base.id;
600 priv->event_thread[i].dev = ddev;
601 priv->event_thread[i].worker = kthread_create_worker(0,
602 "crtc_event:%d", priv->event_thread[i].crtc_id);
603 if (IS_ERR(priv->event_thread[i].worker)) {
604 ret = PTR_ERR(priv->event_thread[i].worker);
605 DRM_DEV_ERROR(dev, "failed to create crtc_event kthread\n");
606 ret = PTR_ERR(priv->event_thread[i].worker);
607 goto err_msm_uninit;
608 }
609
610 sched_set_fifo(priv->event_thread[i].worker->task);
611 }
612
613 ret = drm_vblank_init(ddev, priv->num_crtcs);
614 if (ret < 0) {
615 DRM_DEV_ERROR(dev, "failed to initialize vblank\n");
616 goto err_msm_uninit;
617 }
618
619 if (kms) {
620 pm_runtime_get_sync(dev);
621 ret = msm_irq_install(ddev, kms->irq);
622 pm_runtime_put_sync(dev);
623 if (ret < 0) {
624 DRM_DEV_ERROR(dev, "failed to install IRQ handler\n");
625 goto err_msm_uninit;
626 }
627 }
628
629 ret = drm_dev_register(ddev, 0);
630 if (ret)
631 goto err_msm_uninit;
632
633 if (kms) {
634 ret = msm_disp_snapshot_init(ddev);
635 if (ret)
636 DRM_DEV_ERROR(dev, "msm_disp_snapshot_init failed ret = %d\n", ret);
637 }
638 drm_mode_config_reset(ddev);
639
640#ifdef CONFIG_DRM_FBDEV_EMULATION
641 if (kms && fbdev)
642 priv->fbdev = msm_fbdev_init(ddev);
643#endif
644
645 ret = msm_debugfs_late_init(ddev);
646 if (ret)
647 goto err_msm_uninit;
648
649 drm_kms_helper_poll_init(ddev);
650
651 return 0;
652
653err_msm_uninit:
654 msm_drm_uninit(dev);
655 return ret;
656err_destroy_mdss:
657 if (mdss && mdss->funcs)
658 mdss->funcs->destroy(ddev);
659err_free_priv:
660 kfree(priv);
661err_put_drm_dev:
662 drm_dev_put(ddev);
663 platform_set_drvdata(pdev, NULL);
664 return ret;
665}
666
667
668
669
670
671static void load_gpu(struct drm_device *dev)
672{
673 static DEFINE_MUTEX(init_lock);
674 struct msm_drm_private *priv = dev->dev_private;
675
676 mutex_lock(&init_lock);
677
678 if (!priv->gpu)
679 priv->gpu = adreno_load_gpu(dev);
680
681 mutex_unlock(&init_lock);
682}
683
684static int context_init(struct drm_device *dev, struct drm_file *file)
685{
686 static atomic_t ident = ATOMIC_INIT(0);
687 struct msm_drm_private *priv = dev->dev_private;
688 struct msm_file_private *ctx;
689
690 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
691 if (!ctx)
692 return -ENOMEM;
693
694 INIT_LIST_HEAD(&ctx->submitqueues);
695 rwlock_init(&ctx->queuelock);
696
697 kref_init(&ctx->ref);
698 msm_submitqueue_init(dev, ctx);
699
700 ctx->aspace = msm_gpu_create_private_address_space(priv->gpu, current);
701 file->driver_priv = ctx;
702
703 ctx->seqno = atomic_inc_return(&ident);
704
705 return 0;
706}
707
708static int msm_open(struct drm_device *dev, struct drm_file *file)
709{
710
711
712
713 load_gpu(dev);
714
715 return context_init(dev, file);
716}
717
718static void context_close(struct msm_file_private *ctx)
719{
720 msm_submitqueue_close(ctx);
721 msm_file_private_put(ctx);
722}
723
724static void msm_postclose(struct drm_device *dev, struct drm_file *file)
725{
726 struct msm_drm_private *priv = dev->dev_private;
727 struct msm_file_private *ctx = file->driver_priv;
728
729 mutex_lock(&dev->struct_mutex);
730 if (ctx == priv->lastctx)
731 priv->lastctx = NULL;
732 mutex_unlock(&dev->struct_mutex);
733
734 context_close(ctx);
735}
736
737int msm_crtc_enable_vblank(struct drm_crtc *crtc)
738{
739 struct drm_device *dev = crtc->dev;
740 unsigned int pipe = crtc->index;
741 struct msm_drm_private *priv = dev->dev_private;
742 struct msm_kms *kms = priv->kms;
743 if (!kms)
744 return -ENXIO;
745 drm_dbg_vbl(dev, "crtc=%u", pipe);
746 return vblank_ctrl_queue_work(priv, pipe, true);
747}
748
749void msm_crtc_disable_vblank(struct drm_crtc *crtc)
750{
751 struct drm_device *dev = crtc->dev;
752 unsigned int pipe = crtc->index;
753 struct msm_drm_private *priv = dev->dev_private;
754 struct msm_kms *kms = priv->kms;
755 if (!kms)
756 return;
757 drm_dbg_vbl(dev, "crtc=%u", pipe);
758 vblank_ctrl_queue_work(priv, pipe, false);
759}
760
761
762
763
764
765static int msm_ioctl_get_param(struct drm_device *dev, void *data,
766 struct drm_file *file)
767{
768 struct msm_drm_private *priv = dev->dev_private;
769 struct drm_msm_param *args = data;
770 struct msm_gpu *gpu;
771
772
773
774
775 if (args->pipe != MSM_PIPE_3D0)
776 return -EINVAL;
777
778 gpu = priv->gpu;
779
780 if (!gpu)
781 return -ENXIO;
782
783 return gpu->funcs->get_param(gpu, args->param, &args->value);
784}
785
786static int msm_ioctl_gem_new(struct drm_device *dev, void *data,
787 struct drm_file *file)
788{
789 struct drm_msm_gem_new *args = data;
790
791 if (args->flags & ~MSM_BO_FLAGS) {
792 DRM_ERROR("invalid flags: %08x\n", args->flags);
793 return -EINVAL;
794 }
795
796 return msm_gem_new_handle(dev, file, args->size,
797 args->flags, &args->handle, NULL);
798}
799
800static inline ktime_t to_ktime(struct drm_msm_timespec timeout)
801{
802 return ktime_set(timeout.tv_sec, timeout.tv_nsec);
803}
804
805static int msm_ioctl_gem_cpu_prep(struct drm_device *dev, void *data,
806 struct drm_file *file)
807{
808 struct drm_msm_gem_cpu_prep *args = data;
809 struct drm_gem_object *obj;
810 ktime_t timeout = to_ktime(args->timeout);
811 int ret;
812
813 if (args->op & ~MSM_PREP_FLAGS) {
814 DRM_ERROR("invalid op: %08x\n", args->op);
815 return -EINVAL;
816 }
817
818 obj = drm_gem_object_lookup(file, args->handle);
819 if (!obj)
820 return -ENOENT;
821
822 ret = msm_gem_cpu_prep(obj, args->op, &timeout);
823
824 drm_gem_object_put(obj);
825
826 return ret;
827}
828
829static int msm_ioctl_gem_cpu_fini(struct drm_device *dev, void *data,
830 struct drm_file *file)
831{
832 struct drm_msm_gem_cpu_fini *args = data;
833 struct drm_gem_object *obj;
834 int ret;
835
836 obj = drm_gem_object_lookup(file, args->handle);
837 if (!obj)
838 return -ENOENT;
839
840 ret = msm_gem_cpu_fini(obj);
841
842 drm_gem_object_put(obj);
843
844 return ret;
845}
846
847static int msm_ioctl_gem_info_iova(struct drm_device *dev,
848 struct drm_file *file, struct drm_gem_object *obj,
849 uint64_t *iova)
850{
851 struct msm_drm_private *priv = dev->dev_private;
852 struct msm_file_private *ctx = file->driver_priv;
853
854 if (!priv->gpu)
855 return -EINVAL;
856
857
858
859
860
861 return msm_gem_get_iova(obj, ctx->aspace, iova);
862}
863
864static int msm_ioctl_gem_info(struct drm_device *dev, void *data,
865 struct drm_file *file)
866{
867 struct drm_msm_gem_info *args = data;
868 struct drm_gem_object *obj;
869 struct msm_gem_object *msm_obj;
870 int i, ret = 0;
871
872 if (args->pad)
873 return -EINVAL;
874
875 switch (args->info) {
876 case MSM_INFO_GET_OFFSET:
877 case MSM_INFO_GET_IOVA:
878
879 if (args->len)
880 return -EINVAL;
881 break;
882 case MSM_INFO_SET_NAME:
883 case MSM_INFO_GET_NAME:
884 break;
885 default:
886 return -EINVAL;
887 }
888
889 obj = drm_gem_object_lookup(file, args->handle);
890 if (!obj)
891 return -ENOENT;
892
893 msm_obj = to_msm_bo(obj);
894
895 switch (args->info) {
896 case MSM_INFO_GET_OFFSET:
897 args->value = msm_gem_mmap_offset(obj);
898 break;
899 case MSM_INFO_GET_IOVA:
900 ret = msm_ioctl_gem_info_iova(dev, file, obj, &args->value);
901 break;
902 case MSM_INFO_SET_NAME:
903
904 if (args->len >= sizeof(msm_obj->name)) {
905 ret = -EINVAL;
906 break;
907 }
908 if (copy_from_user(msm_obj->name, u64_to_user_ptr(args->value),
909 args->len)) {
910 msm_obj->name[0] = '\0';
911 ret = -EFAULT;
912 break;
913 }
914 msm_obj->name[args->len] = '\0';
915 for (i = 0; i < args->len; i++) {
916 if (!isprint(msm_obj->name[i])) {
917 msm_obj->name[i] = '\0';
918 break;
919 }
920 }
921 break;
922 case MSM_INFO_GET_NAME:
923 if (args->value && (args->len < strlen(msm_obj->name))) {
924 ret = -EINVAL;
925 break;
926 }
927 args->len = strlen(msm_obj->name);
928 if (args->value) {
929 if (copy_to_user(u64_to_user_ptr(args->value),
930 msm_obj->name, args->len))
931 ret = -EFAULT;
932 }
933 break;
934 }
935
936 drm_gem_object_put(obj);
937
938 return ret;
939}
940
941static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
942 struct drm_file *file)
943{
944 struct msm_drm_private *priv = dev->dev_private;
945 struct drm_msm_wait_fence *args = data;
946 ktime_t timeout = to_ktime(args->timeout);
947 struct msm_gpu_submitqueue *queue;
948 struct msm_gpu *gpu = priv->gpu;
949 struct dma_fence *fence;
950 int ret;
951
952 if (args->pad) {
953 DRM_ERROR("invalid pad: %08x\n", args->pad);
954 return -EINVAL;
955 }
956
957 if (!gpu)
958 return 0;
959
960 queue = msm_submitqueue_get(file->driver_priv, args->queueid);
961 if (!queue)
962 return -ENOENT;
963
964
965
966
967
968
969
970
971
972 ret = mutex_lock_interruptible(&queue->lock);
973 if (ret)
974 return ret;
975 fence = idr_find(&queue->fence_idr, args->fence);
976 if (fence)
977 fence = dma_fence_get_rcu(fence);
978 mutex_unlock(&queue->lock);
979
980 if (!fence)
981 return 0;
982
983 ret = dma_fence_wait_timeout(fence, true, timeout_to_jiffies(&timeout));
984 if (ret == 0) {
985 ret = -ETIMEDOUT;
986 } else if (ret != -ERESTARTSYS) {
987 ret = 0;
988 }
989
990 dma_fence_put(fence);
991 msm_submitqueue_put(queue);
992
993 return ret;
994}
995
996static int msm_ioctl_gem_madvise(struct drm_device *dev, void *data,
997 struct drm_file *file)
998{
999 struct drm_msm_gem_madvise *args = data;
1000 struct drm_gem_object *obj;
1001 int ret;
1002
1003 switch (args->madv) {
1004 case MSM_MADV_DONTNEED:
1005 case MSM_MADV_WILLNEED:
1006 break;
1007 default:
1008 return -EINVAL;
1009 }
1010
1011 obj = drm_gem_object_lookup(file, args->handle);
1012 if (!obj) {
1013 return -ENOENT;
1014 }
1015
1016 ret = msm_gem_madvise(obj, args->madv);
1017 if (ret >= 0) {
1018 args->retained = ret;
1019 ret = 0;
1020 }
1021
1022 drm_gem_object_put(obj);
1023
1024 return ret;
1025}
1026
1027
1028static int msm_ioctl_submitqueue_new(struct drm_device *dev, void *data,
1029 struct drm_file *file)
1030{
1031 struct drm_msm_submitqueue *args = data;
1032
1033 if (args->flags & ~MSM_SUBMITQUEUE_FLAGS)
1034 return -EINVAL;
1035
1036 return msm_submitqueue_create(dev, file->driver_priv, args->prio,
1037 args->flags, &args->id);
1038}
1039
1040static int msm_ioctl_submitqueue_query(struct drm_device *dev, void *data,
1041 struct drm_file *file)
1042{
1043 return msm_submitqueue_query(dev, file->driver_priv, data);
1044}
1045
1046static int msm_ioctl_submitqueue_close(struct drm_device *dev, void *data,
1047 struct drm_file *file)
1048{
1049 u32 id = *(u32 *) data;
1050
1051 return msm_submitqueue_remove(file->driver_priv, id);
1052}
1053
1054static const struct drm_ioctl_desc msm_ioctls[] = {
1055 DRM_IOCTL_DEF_DRV(MSM_GET_PARAM, msm_ioctl_get_param, DRM_RENDER_ALLOW),
1056 DRM_IOCTL_DEF_DRV(MSM_GEM_NEW, msm_ioctl_gem_new, DRM_RENDER_ALLOW),
1057 DRM_IOCTL_DEF_DRV(MSM_GEM_INFO, msm_ioctl_gem_info, DRM_RENDER_ALLOW),
1058 DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_PREP, msm_ioctl_gem_cpu_prep, DRM_RENDER_ALLOW),
1059 DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_FINI, msm_ioctl_gem_cpu_fini, DRM_RENDER_ALLOW),
1060 DRM_IOCTL_DEF_DRV(MSM_GEM_SUBMIT, msm_ioctl_gem_submit, DRM_RENDER_ALLOW),
1061 DRM_IOCTL_DEF_DRV(MSM_WAIT_FENCE, msm_ioctl_wait_fence, DRM_RENDER_ALLOW),
1062 DRM_IOCTL_DEF_DRV(MSM_GEM_MADVISE, msm_ioctl_gem_madvise, DRM_RENDER_ALLOW),
1063 DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_NEW, msm_ioctl_submitqueue_new, DRM_RENDER_ALLOW),
1064 DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_CLOSE, msm_ioctl_submitqueue_close, DRM_RENDER_ALLOW),
1065 DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_QUERY, msm_ioctl_submitqueue_query, DRM_RENDER_ALLOW),
1066};
1067
1068DEFINE_DRM_GEM_FOPS(fops);
1069
1070static const struct drm_driver msm_driver = {
1071 .driver_features = DRIVER_GEM |
1072 DRIVER_RENDER |
1073 DRIVER_ATOMIC |
1074 DRIVER_MODESET |
1075 DRIVER_SYNCOBJ,
1076 .open = msm_open,
1077 .postclose = msm_postclose,
1078 .lastclose = drm_fb_helper_lastclose,
1079 .dumb_create = msm_gem_dumb_create,
1080 .dumb_map_offset = msm_gem_dumb_map_offset,
1081 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
1082 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
1083 .gem_prime_import_sg_table = msm_gem_prime_import_sg_table,
1084 .gem_prime_mmap = drm_gem_prime_mmap,
1085#ifdef CONFIG_DEBUG_FS
1086 .debugfs_init = msm_debugfs_init,
1087#endif
1088 .ioctls = msm_ioctls,
1089 .num_ioctls = ARRAY_SIZE(msm_ioctls),
1090 .fops = &fops,
1091 .name = "msm",
1092 .desc = "MSM Snapdragon DRM",
1093 .date = "20130625",
1094 .major = MSM_VERSION_MAJOR,
1095 .minor = MSM_VERSION_MINOR,
1096 .patchlevel = MSM_VERSION_PATCHLEVEL,
1097};
1098
1099static int __maybe_unused msm_runtime_suspend(struct device *dev)
1100{
1101 struct drm_device *ddev = dev_get_drvdata(dev);
1102 struct msm_drm_private *priv = ddev->dev_private;
1103 struct msm_mdss *mdss = priv->mdss;
1104
1105 DBG("");
1106
1107 if (mdss && mdss->funcs)
1108 return mdss->funcs->disable(mdss);
1109
1110 return 0;
1111}
1112
1113static int __maybe_unused msm_runtime_resume(struct device *dev)
1114{
1115 struct drm_device *ddev = dev_get_drvdata(dev);
1116 struct msm_drm_private *priv = ddev->dev_private;
1117 struct msm_mdss *mdss = priv->mdss;
1118
1119 DBG("");
1120
1121 if (mdss && mdss->funcs)
1122 return mdss->funcs->enable(mdss);
1123
1124 return 0;
1125}
1126
1127static int __maybe_unused msm_pm_suspend(struct device *dev)
1128{
1129
1130 if (pm_runtime_suspended(dev))
1131 return 0;
1132
1133 return msm_runtime_suspend(dev);
1134}
1135
1136static int __maybe_unused msm_pm_resume(struct device *dev)
1137{
1138 if (pm_runtime_suspended(dev))
1139 return 0;
1140
1141 return msm_runtime_resume(dev);
1142}
1143
1144static int __maybe_unused msm_pm_prepare(struct device *dev)
1145{
1146 struct drm_device *ddev = dev_get_drvdata(dev);
1147 struct msm_drm_private *priv = ddev ? ddev->dev_private : NULL;
1148
1149 if (!priv || !priv->kms)
1150 return 0;
1151
1152 return drm_mode_config_helper_suspend(ddev);
1153}
1154
1155static void __maybe_unused msm_pm_complete(struct device *dev)
1156{
1157 struct drm_device *ddev = dev_get_drvdata(dev);
1158 struct msm_drm_private *priv = ddev ? ddev->dev_private : NULL;
1159
1160 if (!priv || !priv->kms)
1161 return;
1162
1163 drm_mode_config_helper_resume(ddev);
1164}
1165
1166static const struct dev_pm_ops msm_pm_ops = {
1167 SET_SYSTEM_SLEEP_PM_OPS(msm_pm_suspend, msm_pm_resume)
1168 SET_RUNTIME_PM_OPS(msm_runtime_suspend, msm_runtime_resume, NULL)
1169 .prepare = msm_pm_prepare,
1170 .complete = msm_pm_complete,
1171};
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181static int compare_of(struct device *dev, void *data)
1182{
1183 return dev->of_node == data;
1184}
1185
1186
1187
1188
1189
1190
1191
1192static int add_components_mdp(struct device *mdp_dev,
1193 struct component_match **matchptr)
1194{
1195 struct device_node *np = mdp_dev->of_node;
1196 struct device_node *ep_node;
1197 struct device *master_dev;
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207 if (of_device_is_compatible(np, "qcom,mdp4"))
1208 master_dev = mdp_dev;
1209 else
1210 master_dev = mdp_dev->parent;
1211
1212 for_each_endpoint_of_node(np, ep_node) {
1213 struct device_node *intf;
1214 struct of_endpoint ep;
1215 int ret;
1216
1217 ret = of_graph_parse_endpoint(ep_node, &ep);
1218 if (ret) {
1219 DRM_DEV_ERROR(mdp_dev, "unable to parse port endpoint\n");
1220 of_node_put(ep_node);
1221 return ret;
1222 }
1223
1224
1225
1226
1227
1228 if (of_device_is_compatible(np, "qcom,mdp4") &&
1229 ep.port == 0)
1230 continue;
1231
1232
1233
1234
1235
1236
1237 intf = of_graph_get_remote_port_parent(ep_node);
1238 if (!intf)
1239 continue;
1240
1241 if (of_device_is_available(intf))
1242 drm_of_component_match_add(master_dev, matchptr,
1243 compare_of, intf);
1244
1245 of_node_put(intf);
1246 }
1247
1248 return 0;
1249}
1250
1251static int compare_name_mdp(struct device *dev, void *data)
1252{
1253 return (strstr(dev_name(dev), "mdp") != NULL);
1254}
1255
1256static int add_display_components(struct platform_device *pdev,
1257 struct component_match **matchptr)
1258{
1259 struct device *mdp_dev;
1260 struct device *dev = &pdev->dev;
1261 int ret;
1262
1263
1264
1265
1266
1267
1268
1269 switch (get_mdp_ver(pdev)) {
1270 case KMS_MDP5:
1271 case KMS_DPU:
1272 ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
1273 if (ret) {
1274 DRM_DEV_ERROR(dev, "failed to populate children devices\n");
1275 return ret;
1276 }
1277
1278 mdp_dev = device_find_child(dev, NULL, compare_name_mdp);
1279 if (!mdp_dev) {
1280 DRM_DEV_ERROR(dev, "failed to find MDSS MDP node\n");
1281 of_platform_depopulate(dev);
1282 return -ENODEV;
1283 }
1284
1285 put_device(mdp_dev);
1286
1287
1288 drm_of_component_match_add(dev, matchptr, compare_of,
1289 mdp_dev->of_node);
1290 break;
1291 case KMS_MDP4:
1292
1293 mdp_dev = dev;
1294 break;
1295 }
1296
1297 ret = add_components_mdp(mdp_dev, matchptr);
1298 if (ret)
1299 of_platform_depopulate(dev);
1300
1301 return ret;
1302}
1303
1304
1305
1306
1307
1308
1309static const struct of_device_id msm_gpu_match[] = {
1310 { .compatible = "qcom,adreno" },
1311 { .compatible = "qcom,adreno-3xx" },
1312 { .compatible = "amd,imageon" },
1313 { .compatible = "qcom,kgsl-3d0" },
1314 { },
1315};
1316
1317static int add_gpu_components(struct device *dev,
1318 struct component_match **matchptr)
1319{
1320 struct device_node *np;
1321
1322 np = of_find_matching_node(NULL, msm_gpu_match);
1323 if (!np)
1324 return 0;
1325
1326 if (of_device_is_available(np))
1327 drm_of_component_match_add(dev, matchptr, compare_of, np);
1328
1329 of_node_put(np);
1330
1331 return 0;
1332}
1333
1334static int msm_drm_bind(struct device *dev)
1335{
1336 return msm_drm_init(dev, &msm_driver);
1337}
1338
1339static void msm_drm_unbind(struct device *dev)
1340{
1341 msm_drm_uninit(dev);
1342}
1343
1344static const struct component_master_ops msm_drm_ops = {
1345 .bind = msm_drm_bind,
1346 .unbind = msm_drm_unbind,
1347};
1348
1349
1350
1351
1352
1353static int msm_pdev_probe(struct platform_device *pdev)
1354{
1355 struct component_match *match = NULL;
1356 int ret;
1357
1358 if (get_mdp_ver(pdev)) {
1359 ret = add_display_components(pdev, &match);
1360 if (ret)
1361 return ret;
1362 }
1363
1364 ret = add_gpu_components(&pdev->dev, &match);
1365 if (ret)
1366 goto fail;
1367
1368
1369
1370
1371 ret = dma_set_mask_and_coherent(&pdev->dev, ~0);
1372 if (ret)
1373 goto fail;
1374
1375 ret = component_master_add_with_match(&pdev->dev, &msm_drm_ops, match);
1376 if (ret)
1377 goto fail;
1378
1379 return 0;
1380
1381fail:
1382 of_platform_depopulate(&pdev->dev);
1383 return ret;
1384}
1385
1386static int msm_pdev_remove(struct platform_device *pdev)
1387{
1388 component_master_del(&pdev->dev, &msm_drm_ops);
1389 of_platform_depopulate(&pdev->dev);
1390
1391 return 0;
1392}
1393
1394static void msm_pdev_shutdown(struct platform_device *pdev)
1395{
1396 struct drm_device *drm = platform_get_drvdata(pdev);
1397 struct msm_drm_private *priv = drm ? drm->dev_private : NULL;
1398
1399 if (!priv || !priv->kms)
1400 return;
1401
1402 drm_atomic_helper_shutdown(drm);
1403}
1404
1405static const struct of_device_id dt_match[] = {
1406 { .compatible = "qcom,mdp4", .data = (void *)KMS_MDP4 },
1407 { .compatible = "qcom,mdss", .data = (void *)KMS_MDP5 },
1408 { .compatible = "qcom,sdm845-mdss", .data = (void *)KMS_DPU },
1409 { .compatible = "qcom,sc7180-mdss", .data = (void *)KMS_DPU },
1410 { .compatible = "qcom,sc7280-mdss", .data = (void *)KMS_DPU },
1411 { .compatible = "qcom,sm8150-mdss", .data = (void *)KMS_DPU },
1412 { .compatible = "qcom,sm8250-mdss", .data = (void *)KMS_DPU },
1413 {}
1414};
1415MODULE_DEVICE_TABLE(of, dt_match);
1416
1417static struct platform_driver msm_platform_driver = {
1418 .probe = msm_pdev_probe,
1419 .remove = msm_pdev_remove,
1420 .shutdown = msm_pdev_shutdown,
1421 .driver = {
1422 .name = "msm",
1423 .of_match_table = dt_match,
1424 .pm = &msm_pm_ops,
1425 },
1426};
1427
1428static int __init msm_drm_register(void)
1429{
1430 if (!modeset)
1431 return -EINVAL;
1432
1433 DBG("init");
1434 msm_mdp_register();
1435 msm_dpu_register();
1436 msm_dsi_register();
1437 msm_edp_register();
1438 msm_hdmi_register();
1439 msm_dp_register();
1440 adreno_register();
1441 return platform_driver_register(&msm_platform_driver);
1442}
1443
1444static void __exit msm_drm_unregister(void)
1445{
1446 DBG("fini");
1447 platform_driver_unregister(&msm_platform_driver);
1448 msm_dp_unregister();
1449 msm_hdmi_unregister();
1450 adreno_unregister();
1451 msm_edp_unregister();
1452 msm_dsi_unregister();
1453 msm_mdp_unregister();
1454 msm_dpu_unregister();
1455}
1456
1457module_init(msm_drm_register);
1458module_exit(msm_drm_unregister);
1459
1460MODULE_AUTHOR("Rob Clark <robdclark@gmail.com");
1461MODULE_DESCRIPTION("MSM DRM Driver");
1462MODULE_LICENSE("GPL");
1463