1
2
3
4
5
6
7
8
9
10
11
12
13
14#include <linux/pm_runtime.h>
15#include <drm/drmP.h>
16#include <drm/drm_atomic.h>
17#include <drm/drm_atomic_helper.h>
18#include <drm/drm_crtc_helper.h>
19
20#include <linux/component.h>
21
22#include <drm/exynos_drm.h>
23
24#include "exynos_drm_drv.h"
25#include "exynos_drm_crtc.h"
26#include "exynos_drm_fbdev.h"
27#include "exynos_drm_fb.h"
28#include "exynos_drm_gem.h"
29#include "exynos_drm_plane.h"
30#include "exynos_drm_vidi.h"
31#include "exynos_drm_g2d.h"
32#include "exynos_drm_ipp.h"
33#include "exynos_drm_iommu.h"
34
35#define DRIVER_NAME "exynos"
36#define DRIVER_DESC "Samsung SoC DRM"
37#define DRIVER_DATE "20110530"
38#define DRIVER_MAJOR 1
39#define DRIVER_MINOR 0
40
41struct exynos_atomic_commit {
42 struct work_struct work;
43 struct drm_device *dev;
44 struct drm_atomic_state *state;
45 u32 crtcs;
46};
47
48static void exynos_atomic_wait_for_commit(struct drm_atomic_state *state)
49{
50 struct drm_crtc_state *crtc_state;
51 struct drm_crtc *crtc;
52 int i, ret;
53
54 for_each_crtc_in_state(state, crtc, crtc_state, i) {
55 struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
56
57 if (!crtc->state->enable)
58 continue;
59
60 ret = drm_crtc_vblank_get(crtc);
61 if (ret)
62 continue;
63
64 exynos_drm_crtc_wait_pending_update(exynos_crtc);
65 drm_crtc_vblank_put(crtc);
66 }
67}
68
69static void exynos_atomic_commit_complete(struct exynos_atomic_commit *commit)
70{
71 struct drm_device *dev = commit->dev;
72 struct exynos_drm_private *priv = dev->dev_private;
73 struct drm_atomic_state *state = commit->state;
74 struct drm_plane *plane;
75 struct drm_crtc *crtc;
76 struct drm_plane_state *plane_state;
77 struct drm_crtc_state *crtc_state;
78 int i;
79
80 drm_atomic_helper_commit_modeset_disables(dev, state);
81
82 drm_atomic_helper_commit_modeset_enables(dev, state);
83
84
85
86
87
88
89
90
91
92 for_each_crtc_in_state(state, crtc, crtc_state, i) {
93 struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
94
95 atomic_set(&exynos_crtc->pending_update, 0);
96 }
97
98 for_each_plane_in_state(state, plane, plane_state, i) {
99 struct exynos_drm_crtc *exynos_crtc =
100 to_exynos_crtc(plane->crtc);
101
102 if (!plane->crtc)
103 continue;
104
105 atomic_inc(&exynos_crtc->pending_update);
106 }
107
108 drm_atomic_helper_commit_planes(dev, state, false);
109
110 exynos_atomic_wait_for_commit(state);
111
112 drm_atomic_helper_cleanup_planes(dev, state);
113
114 drm_atomic_state_free(state);
115
116 spin_lock(&priv->lock);
117 priv->pending &= ~commit->crtcs;
118 spin_unlock(&priv->lock);
119
120 wake_up_all(&priv->wait);
121
122 kfree(commit);
123}
124
125static void exynos_drm_atomic_work(struct work_struct *work)
126{
127 struct exynos_atomic_commit *commit = container_of(work,
128 struct exynos_atomic_commit, work);
129
130 exynos_atomic_commit_complete(commit);
131}
132
133static struct device *exynos_drm_get_dma_device(void);
134
135static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
136{
137 struct exynos_drm_private *private;
138 struct drm_encoder *encoder;
139 unsigned int clone_mask;
140 int cnt, ret;
141
142 private = kzalloc(sizeof(struct exynos_drm_private), GFP_KERNEL);
143 if (!private)
144 return -ENOMEM;
145
146 init_waitqueue_head(&private->wait);
147 spin_lock_init(&private->lock);
148
149 dev_set_drvdata(dev->dev, dev);
150 dev->dev_private = (void *)private;
151
152
153 private->dma_dev = exynos_drm_get_dma_device();
154 if (!private->dma_dev) {
155 DRM_ERROR("no device found for DMA mapping operations.\n");
156 ret = -ENODEV;
157 goto err_free_private;
158 }
159 DRM_INFO("Exynos DRM: using %s device for DMA mapping operations\n",
160 dev_name(private->dma_dev));
161
162
163 ret = drm_create_iommu_mapping(dev);
164 if (ret < 0) {
165 DRM_ERROR("failed to create iommu mapping.\n");
166 goto err_free_private;
167 }
168
169 drm_mode_config_init(dev);
170
171 exynos_drm_mode_config_init(dev);
172
173
174 cnt = 0;
175 clone_mask = 0;
176 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
177 clone_mask |= (1 << (cnt++));
178
179 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
180 encoder->possible_clones = clone_mask;
181
182 platform_set_drvdata(dev->platformdev, dev);
183
184
185 ret = component_bind_all(dev->dev, dev);
186 if (ret)
187 goto err_mode_config_cleanup;
188
189 ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
190 if (ret)
191 goto err_unbind_all;
192
193
194 ret = exynos_drm_device_subdrv_probe(dev);
195 if (ret)
196 goto err_cleanup_vblank;
197
198 drm_mode_config_reset(dev);
199
200
201
202
203
204
205
206
207
208 dev->irq_enabled = true;
209
210
211 drm_kms_helper_poll_init(dev);
212
213
214 drm_helper_hpd_irq_event(dev);
215
216 return 0;
217
218err_cleanup_vblank:
219 drm_vblank_cleanup(dev);
220err_unbind_all:
221 component_unbind_all(dev->dev, dev);
222err_mode_config_cleanup:
223 drm_mode_config_cleanup(dev);
224 drm_release_iommu_mapping(dev);
225err_free_private:
226 kfree(private);
227
228 return ret;
229}
230
231static int exynos_drm_unload(struct drm_device *dev)
232{
233 exynos_drm_device_subdrv_remove(dev);
234
235 exynos_drm_fbdev_fini(dev);
236 drm_kms_helper_poll_fini(dev);
237
238 drm_vblank_cleanup(dev);
239 component_unbind_all(dev->dev, dev);
240 drm_mode_config_cleanup(dev);
241 drm_release_iommu_mapping(dev);
242
243 kfree(dev->dev_private);
244 dev->dev_private = NULL;
245
246 return 0;
247}
248
249static int commit_is_pending(struct exynos_drm_private *priv, u32 crtcs)
250{
251 bool pending;
252
253 spin_lock(&priv->lock);
254 pending = priv->pending & crtcs;
255 spin_unlock(&priv->lock);
256
257 return pending;
258}
259
260int exynos_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state,
261 bool nonblock)
262{
263 struct exynos_drm_private *priv = dev->dev_private;
264 struct exynos_atomic_commit *commit;
265 struct drm_crtc *crtc;
266 struct drm_crtc_state *crtc_state;
267 int i, ret;
268
269 commit = kzalloc(sizeof(*commit), GFP_KERNEL);
270 if (!commit)
271 return -ENOMEM;
272
273 ret = drm_atomic_helper_prepare_planes(dev, state);
274 if (ret) {
275 kfree(commit);
276 return ret;
277 }
278
279
280
281 INIT_WORK(&commit->work, exynos_drm_atomic_work);
282 commit->dev = dev;
283 commit->state = state;
284
285
286
287
288 for_each_crtc_in_state(state, crtc, crtc_state, i)
289 commit->crtcs |= drm_crtc_mask(crtc);
290
291 wait_event(priv->wait, !commit_is_pending(priv, commit->crtcs));
292
293 spin_lock(&priv->lock);
294 priv->pending |= commit->crtcs;
295 spin_unlock(&priv->lock);
296
297 drm_atomic_helper_swap_state(state, true);
298
299 if (nonblock)
300 schedule_work(&commit->work);
301 else
302 exynos_atomic_commit_complete(commit);
303
304 return 0;
305}
306
307static int exynos_drm_open(struct drm_device *dev, struct drm_file *file)
308{
309 struct drm_exynos_file_private *file_priv;
310 int ret;
311
312 file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
313 if (!file_priv)
314 return -ENOMEM;
315
316 file->driver_priv = file_priv;
317
318 ret = exynos_drm_subdrv_open(dev, file);
319 if (ret)
320 goto err_file_priv_free;
321
322 return ret;
323
324err_file_priv_free:
325 kfree(file_priv);
326 file->driver_priv = NULL;
327 return ret;
328}
329
330static void exynos_drm_preclose(struct drm_device *dev,
331 struct drm_file *file)
332{
333 struct drm_crtc *crtc;
334
335 exynos_drm_subdrv_close(dev, file);
336
337 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
338 exynos_drm_crtc_cancel_page_flip(crtc, file);
339}
340
341static void exynos_drm_postclose(struct drm_device *dev, struct drm_file *file)
342{
343 kfree(file->driver_priv);
344 file->driver_priv = NULL;
345}
346
347static void exynos_drm_lastclose(struct drm_device *dev)
348{
349 exynos_drm_fbdev_restore_mode(dev);
350}
351
352static const struct vm_operations_struct exynos_drm_gem_vm_ops = {
353 .fault = exynos_drm_gem_fault,
354 .open = drm_gem_vm_open,
355 .close = drm_gem_vm_close,
356};
357
358static const struct drm_ioctl_desc exynos_ioctls[] = {
359 DRM_IOCTL_DEF_DRV(EXYNOS_GEM_CREATE, exynos_drm_gem_create_ioctl,
360 DRM_AUTH | DRM_RENDER_ALLOW),
361 DRM_IOCTL_DEF_DRV(EXYNOS_GEM_MAP, exynos_drm_gem_map_ioctl,
362 DRM_AUTH | DRM_RENDER_ALLOW),
363 DRM_IOCTL_DEF_DRV(EXYNOS_GEM_GET, exynos_drm_gem_get_ioctl,
364 DRM_RENDER_ALLOW),
365 DRM_IOCTL_DEF_DRV(EXYNOS_VIDI_CONNECTION, vidi_connection_ioctl,
366 DRM_AUTH),
367 DRM_IOCTL_DEF_DRV(EXYNOS_G2D_GET_VER, exynos_g2d_get_ver_ioctl,
368 DRM_AUTH | DRM_RENDER_ALLOW),
369 DRM_IOCTL_DEF_DRV(EXYNOS_G2D_SET_CMDLIST, exynos_g2d_set_cmdlist_ioctl,
370 DRM_AUTH | DRM_RENDER_ALLOW),
371 DRM_IOCTL_DEF_DRV(EXYNOS_G2D_EXEC, exynos_g2d_exec_ioctl,
372 DRM_AUTH | DRM_RENDER_ALLOW),
373 DRM_IOCTL_DEF_DRV(EXYNOS_IPP_GET_PROPERTY, exynos_drm_ipp_get_property,
374 DRM_AUTH | DRM_RENDER_ALLOW),
375 DRM_IOCTL_DEF_DRV(EXYNOS_IPP_SET_PROPERTY, exynos_drm_ipp_set_property,
376 DRM_AUTH | DRM_RENDER_ALLOW),
377 DRM_IOCTL_DEF_DRV(EXYNOS_IPP_QUEUE_BUF, exynos_drm_ipp_queue_buf,
378 DRM_AUTH | DRM_RENDER_ALLOW),
379 DRM_IOCTL_DEF_DRV(EXYNOS_IPP_CMD_CTRL, exynos_drm_ipp_cmd_ctrl,
380 DRM_AUTH | DRM_RENDER_ALLOW),
381};
382
383static const struct file_operations exynos_drm_driver_fops = {
384 .owner = THIS_MODULE,
385 .open = drm_open,
386 .mmap = exynos_drm_gem_mmap,
387 .poll = drm_poll,
388 .read = drm_read,
389 .unlocked_ioctl = drm_ioctl,
390#ifdef CONFIG_COMPAT
391 .compat_ioctl = drm_compat_ioctl,
392#endif
393 .release = drm_release,
394};
395
396static struct drm_driver exynos_drm_driver = {
397 .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME
398 | DRIVER_ATOMIC | DRIVER_RENDER,
399 .load = exynos_drm_load,
400 .unload = exynos_drm_unload,
401 .open = exynos_drm_open,
402 .preclose = exynos_drm_preclose,
403 .lastclose = exynos_drm_lastclose,
404 .postclose = exynos_drm_postclose,
405 .get_vblank_counter = drm_vblank_no_hw_counter,
406 .enable_vblank = exynos_drm_crtc_enable_vblank,
407 .disable_vblank = exynos_drm_crtc_disable_vblank,
408 .gem_free_object_unlocked = exynos_drm_gem_free_object,
409 .gem_vm_ops = &exynos_drm_gem_vm_ops,
410 .dumb_create = exynos_drm_gem_dumb_create,
411 .dumb_map_offset = exynos_drm_gem_dumb_map_offset,
412 .dumb_destroy = drm_gem_dumb_destroy,
413 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
414 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
415 .gem_prime_export = drm_gem_prime_export,
416 .gem_prime_import = drm_gem_prime_import,
417 .gem_prime_get_sg_table = exynos_drm_gem_prime_get_sg_table,
418 .gem_prime_import_sg_table = exynos_drm_gem_prime_import_sg_table,
419 .gem_prime_vmap = exynos_drm_gem_prime_vmap,
420 .gem_prime_vunmap = exynos_drm_gem_prime_vunmap,
421 .gem_prime_mmap = exynos_drm_gem_prime_mmap,
422 .ioctls = exynos_ioctls,
423 .num_ioctls = ARRAY_SIZE(exynos_ioctls),
424 .fops = &exynos_drm_driver_fops,
425 .name = DRIVER_NAME,
426 .desc = DRIVER_DESC,
427 .date = DRIVER_DATE,
428 .major = DRIVER_MAJOR,
429 .minor = DRIVER_MINOR,
430};
431
432#ifdef CONFIG_PM_SLEEP
433static int exynos_drm_suspend(struct device *dev)
434{
435 struct drm_device *drm_dev = dev_get_drvdata(dev);
436 struct drm_connector *connector;
437
438 if (pm_runtime_suspended(dev) || !drm_dev)
439 return 0;
440
441 drm_modeset_lock_all(drm_dev);
442 drm_for_each_connector(connector, drm_dev) {
443 int old_dpms = connector->dpms;
444
445 if (connector->funcs->dpms)
446 connector->funcs->dpms(connector, DRM_MODE_DPMS_OFF);
447
448
449 connector->dpms = old_dpms;
450 }
451 drm_modeset_unlock_all(drm_dev);
452
453 return 0;
454}
455
456static int exynos_drm_resume(struct device *dev)
457{
458 struct drm_device *drm_dev = dev_get_drvdata(dev);
459 struct drm_connector *connector;
460
461 if (pm_runtime_suspended(dev) || !drm_dev)
462 return 0;
463
464 drm_modeset_lock_all(drm_dev);
465 drm_for_each_connector(connector, drm_dev) {
466 if (connector->funcs->dpms) {
467 int dpms = connector->dpms;
468
469 connector->dpms = DRM_MODE_DPMS_OFF;
470 connector->funcs->dpms(connector, dpms);
471 }
472 }
473 drm_modeset_unlock_all(drm_dev);
474
475 return 0;
476}
477#endif
478
479static const struct dev_pm_ops exynos_drm_pm_ops = {
480 SET_SYSTEM_SLEEP_PM_OPS(exynos_drm_suspend, exynos_drm_resume)
481};
482
483
484static struct platform_driver exynos_drm_platform_driver;
485
486struct exynos_drm_driver_info {
487 struct platform_driver *driver;
488 unsigned int flags;
489};
490
491#define DRM_COMPONENT_DRIVER BIT(0)
492#define DRM_VIRTUAL_DEVICE BIT(1)
493#define DRM_DMA_DEVICE BIT(2)
494
495#define DRV_PTR(drv, cond) (IS_ENABLED(cond) ? &drv : NULL)
496
497
498
499
500
501static struct exynos_drm_driver_info exynos_drm_drivers[] = {
502 {
503 DRV_PTR(fimd_driver, CONFIG_DRM_EXYNOS_FIMD),
504 DRM_COMPONENT_DRIVER | DRM_DMA_DEVICE
505 }, {
506 DRV_PTR(exynos5433_decon_driver, CONFIG_DRM_EXYNOS5433_DECON),
507 DRM_COMPONENT_DRIVER | DRM_DMA_DEVICE
508 }, {
509 DRV_PTR(decon_driver, CONFIG_DRM_EXYNOS7_DECON),
510 DRM_COMPONENT_DRIVER | DRM_DMA_DEVICE
511 }, {
512 DRV_PTR(mixer_driver, CONFIG_DRM_EXYNOS_MIXER),
513 DRM_COMPONENT_DRIVER | DRM_DMA_DEVICE
514 }, {
515 DRV_PTR(mic_driver, CONFIG_DRM_EXYNOS_MIC),
516 DRM_COMPONENT_DRIVER
517 }, {
518 DRV_PTR(dp_driver, CONFIG_DRM_EXYNOS_DP),
519 DRM_COMPONENT_DRIVER
520 }, {
521 DRV_PTR(dsi_driver, CONFIG_DRM_EXYNOS_DSI),
522 DRM_COMPONENT_DRIVER
523 }, {
524 DRV_PTR(hdmi_driver, CONFIG_DRM_EXYNOS_HDMI),
525 DRM_COMPONENT_DRIVER
526 }, {
527 DRV_PTR(vidi_driver, CONFIG_DRM_EXYNOS_VIDI),
528 DRM_COMPONENT_DRIVER | DRM_VIRTUAL_DEVICE
529 }, {
530 DRV_PTR(g2d_driver, CONFIG_DRM_EXYNOS_G2D),
531 }, {
532 DRV_PTR(fimc_driver, CONFIG_DRM_EXYNOS_FIMC),
533 }, {
534 DRV_PTR(rotator_driver, CONFIG_DRM_EXYNOS_ROTATOR),
535 }, {
536 DRV_PTR(gsc_driver, CONFIG_DRM_EXYNOS_GSC),
537 }, {
538 DRV_PTR(ipp_driver, CONFIG_DRM_EXYNOS_IPP),
539 DRM_VIRTUAL_DEVICE
540 }, {
541 &exynos_drm_platform_driver,
542 DRM_VIRTUAL_DEVICE
543 }
544};
545
546static int compare_dev(struct device *dev, void *data)
547{
548 return dev == (struct device *)data;
549}
550
551static struct component_match *exynos_drm_match_add(struct device *dev)
552{
553 struct component_match *match = NULL;
554 int i;
555
556 for (i = 0; i < ARRAY_SIZE(exynos_drm_drivers); ++i) {
557 struct exynos_drm_driver_info *info = &exynos_drm_drivers[i];
558 struct device *p = NULL, *d;
559
560 if (!info->driver || !(info->flags & DRM_COMPONENT_DRIVER))
561 continue;
562
563 while ((d = bus_find_device(&platform_bus_type, p,
564 &info->driver->driver,
565 (void *)platform_bus_type.match))) {
566 put_device(p);
567 component_match_add(dev, &match, compare_dev, d);
568 p = d;
569 }
570 put_device(p);
571 }
572
573 return match ?: ERR_PTR(-ENODEV);
574}
575
576static int exynos_drm_bind(struct device *dev)
577{
578 return drm_platform_init(&exynos_drm_driver, to_platform_device(dev));
579}
580
581static void exynos_drm_unbind(struct device *dev)
582{
583 drm_put_dev(dev_get_drvdata(dev));
584}
585
586static const struct component_master_ops exynos_drm_ops = {
587 .bind = exynos_drm_bind,
588 .unbind = exynos_drm_unbind,
589};
590
591static int exynos_drm_platform_probe(struct platform_device *pdev)
592{
593 struct component_match *match;
594
595 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
596 exynos_drm_driver.num_ioctls = ARRAY_SIZE(exynos_ioctls);
597
598 match = exynos_drm_match_add(&pdev->dev);
599 if (IS_ERR(match))
600 return PTR_ERR(match);
601
602 return component_master_add_with_match(&pdev->dev, &exynos_drm_ops,
603 match);
604}
605
606static int exynos_drm_platform_remove(struct platform_device *pdev)
607{
608 component_master_del(&pdev->dev, &exynos_drm_ops);
609 return 0;
610}
611
612static struct platform_driver exynos_drm_platform_driver = {
613 .probe = exynos_drm_platform_probe,
614 .remove = exynos_drm_platform_remove,
615 .driver = {
616 .name = "exynos-drm",
617 .pm = &exynos_drm_pm_ops,
618 },
619};
620
621static struct device *exynos_drm_get_dma_device(void)
622{
623 int i;
624
625 for (i = 0; i < ARRAY_SIZE(exynos_drm_drivers); ++i) {
626 struct exynos_drm_driver_info *info = &exynos_drm_drivers[i];
627 struct device *dev;
628
629 if (!info->driver || !(info->flags & DRM_DMA_DEVICE))
630 continue;
631
632 while ((dev = bus_find_device(&platform_bus_type, NULL,
633 &info->driver->driver,
634 (void *)platform_bus_type.match))) {
635 put_device(dev);
636 return dev;
637 }
638 }
639 return NULL;
640}
641
642static void exynos_drm_unregister_devices(void)
643{
644 int i;
645
646 for (i = ARRAY_SIZE(exynos_drm_drivers) - 1; i >= 0; --i) {
647 struct exynos_drm_driver_info *info = &exynos_drm_drivers[i];
648 struct device *dev;
649
650 if (!info->driver || !(info->flags & DRM_VIRTUAL_DEVICE))
651 continue;
652
653 while ((dev = bus_find_device(&platform_bus_type, NULL,
654 &info->driver->driver,
655 (void *)platform_bus_type.match))) {
656 put_device(dev);
657 platform_device_unregister(to_platform_device(dev));
658 }
659 }
660}
661
662static int exynos_drm_register_devices(void)
663{
664 struct platform_device *pdev;
665 int i;
666
667 for (i = 0; i < ARRAY_SIZE(exynos_drm_drivers); ++i) {
668 struct exynos_drm_driver_info *info = &exynos_drm_drivers[i];
669
670 if (!info->driver || !(info->flags & DRM_VIRTUAL_DEVICE))
671 continue;
672
673 pdev = platform_device_register_simple(
674 info->driver->driver.name, -1, NULL, 0);
675 if (IS_ERR(pdev))
676 goto fail;
677 }
678
679 return 0;
680fail:
681 exynos_drm_unregister_devices();
682 return PTR_ERR(pdev);
683}
684
685static void exynos_drm_unregister_drivers(void)
686{
687 int i;
688
689 for (i = ARRAY_SIZE(exynos_drm_drivers) - 1; i >= 0; --i) {
690 struct exynos_drm_driver_info *info = &exynos_drm_drivers[i];
691
692 if (!info->driver)
693 continue;
694
695 platform_driver_unregister(info->driver);
696 }
697}
698
699static int exynos_drm_register_drivers(void)
700{
701 int i, ret;
702
703 for (i = 0; i < ARRAY_SIZE(exynos_drm_drivers); ++i) {
704 struct exynos_drm_driver_info *info = &exynos_drm_drivers[i];
705
706 if (!info->driver)
707 continue;
708
709 ret = platform_driver_register(info->driver);
710 if (ret)
711 goto fail;
712 }
713 return 0;
714fail:
715 exynos_drm_unregister_drivers();
716 return ret;
717}
718
719static int exynos_drm_init(void)
720{
721 int ret;
722
723 ret = exynos_drm_register_devices();
724 if (ret)
725 return ret;
726
727 ret = exynos_drm_register_drivers();
728 if (ret)
729 goto err_unregister_pdevs;
730
731 return 0;
732
733err_unregister_pdevs:
734 exynos_drm_unregister_devices();
735
736 return ret;
737}
738
739static void exynos_drm_exit(void)
740{
741 exynos_drm_unregister_drivers();
742 exynos_drm_unregister_devices();
743}
744
745module_init(exynos_drm_init);
746module_exit(exynos_drm_exit);
747
748MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
749MODULE_AUTHOR("Joonyoung Shim <jy0922.shim@samsung.com>");
750MODULE_AUTHOR("Seung-Woo Kim <sw0312.kim@samsung.com>");
751MODULE_DESCRIPTION("Samsung SoC DRM Driver");
752MODULE_LICENSE("GPL");
753