1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16#include <linux/component.h>
17#include <linux/device.h>
18#include <linux/dma-buf.h>
19#include <linux/module.h>
20#include <linux/platform_device.h>
21#include <drm/drmP.h>
22#include <drm/drm_atomic.h>
23#include <drm/drm_atomic_helper.h>
24#include <drm/drm_fb_helper.h>
25#include <drm/drm_crtc_helper.h>
26#include <drm/drm_gem_cma_helper.h>
27#include <drm/drm_gem_framebuffer_helper.h>
28#include <drm/drm_fb_cma_helper.h>
29#include <drm/drm_plane_helper.h>
30#include <drm/drm_of.h>
31#include <video/imx-ipu-v3.h>
32
33#include "imx-drm.h"
34#include "ipuv3-plane.h"
35
36#define MAX_CRTC 4
37
38struct imx_drm_device {
39 struct drm_device *drm;
40 unsigned int pipes;
41 struct drm_atomic_state *state;
42};
43
44#if IS_ENABLED(CONFIG_DRM_FBDEV_EMULATION)
45static int legacyfb_depth = 16;
46module_param(legacyfb_depth, int, 0444);
47#endif
48
49DEFINE_DRM_GEM_CMA_FOPS(imx_drm_driver_fops);
50
51void imx_drm_connector_destroy(struct drm_connector *connector)
52{
53 drm_connector_unregister(connector);
54 drm_connector_cleanup(connector);
55}
56EXPORT_SYMBOL_GPL(imx_drm_connector_destroy);
57
58void imx_drm_encoder_destroy(struct drm_encoder *encoder)
59{
60 drm_encoder_cleanup(encoder);
61}
62EXPORT_SYMBOL_GPL(imx_drm_encoder_destroy);
63
64static int imx_drm_atomic_check(struct drm_device *dev,
65 struct drm_atomic_state *state)
66{
67 int ret;
68
69 ret = drm_atomic_helper_check_modeset(dev, state);
70 if (ret)
71 return ret;
72
73 ret = drm_atomic_helper_check_planes(dev, state);
74 if (ret)
75 return ret;
76
77
78
79
80
81 ret = drm_atomic_helper_check_modeset(dev, state);
82 if (ret)
83 return ret;
84
85
86 ret = ipu_planes_assign_pre(dev, state);
87 if (ret)
88 return ret;
89
90 return ret;
91}
92
93static const struct drm_mode_config_funcs imx_drm_mode_config_funcs = {
94 .fb_create = drm_gem_fb_create,
95 .output_poll_changed = drm_fb_helper_output_poll_changed,
96 .atomic_check = imx_drm_atomic_check,
97 .atomic_commit = drm_atomic_helper_commit,
98};
99
100static void imx_drm_atomic_commit_tail(struct drm_atomic_state *state)
101{
102 struct drm_device *dev = state->dev;
103 struct drm_plane *plane;
104 struct drm_plane_state *old_plane_state, *new_plane_state;
105 bool plane_disabling = false;
106 int i;
107
108 drm_atomic_helper_commit_modeset_disables(dev, state);
109
110 drm_atomic_helper_commit_planes(dev, state,
111 DRM_PLANE_COMMIT_ACTIVE_ONLY |
112 DRM_PLANE_COMMIT_NO_DISABLE_AFTER_MODESET);
113
114 drm_atomic_helper_commit_modeset_enables(dev, state);
115
116 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
117 if (drm_atomic_plane_disabling(old_plane_state, new_plane_state))
118 plane_disabling = true;
119 }
120
121
122
123
124
125
126
127
128 drm_atomic_helper_wait_for_flip_done(dev, state);
129
130 if (plane_disabling) {
131 for_each_old_plane_in_state(state, plane, old_plane_state, i)
132 ipu_plane_disable_deferred(plane);
133
134 }
135
136 drm_atomic_helper_commit_hw_done(state);
137}
138
139static const struct drm_mode_config_helper_funcs imx_drm_mode_config_helpers = {
140 .atomic_commit_tail = imx_drm_atomic_commit_tail,
141};
142
143
144int imx_drm_encoder_parse_of(struct drm_device *drm,
145 struct drm_encoder *encoder, struct device_node *np)
146{
147 uint32_t crtc_mask = drm_of_find_possible_crtcs(drm, np);
148
149
150
151
152
153
154
155 if (crtc_mask == 0)
156 return -EPROBE_DEFER;
157
158 encoder->possible_crtcs = crtc_mask;
159
160
161 encoder->possible_clones = ~0;
162
163 return 0;
164}
165EXPORT_SYMBOL_GPL(imx_drm_encoder_parse_of);
166
167static const struct drm_ioctl_desc imx_drm_ioctls[] = {
168
169};
170
171static struct drm_driver imx_drm_driver = {
172 .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME |
173 DRIVER_ATOMIC,
174 .lastclose = drm_fb_helper_lastclose,
175 .gem_free_object_unlocked = drm_gem_cma_free_object,
176 .gem_vm_ops = &drm_gem_cma_vm_ops,
177 .dumb_create = drm_gem_cma_dumb_create,
178
179 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
180 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
181 .gem_prime_import = drm_gem_prime_import,
182 .gem_prime_export = drm_gem_prime_export,
183 .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
184 .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
185 .gem_prime_vmap = drm_gem_cma_prime_vmap,
186 .gem_prime_vunmap = drm_gem_cma_prime_vunmap,
187 .gem_prime_mmap = drm_gem_cma_prime_mmap,
188 .ioctls = imx_drm_ioctls,
189 .num_ioctls = ARRAY_SIZE(imx_drm_ioctls),
190 .fops = &imx_drm_driver_fops,
191 .name = "imx-drm",
192 .desc = "i.MX DRM graphics",
193 .date = "20120507",
194 .major = 1,
195 .minor = 0,
196 .patchlevel = 0,
197};
198
199static int compare_of(struct device *dev, void *data)
200{
201 struct device_node *np = data;
202
203
204 if (strcmp(dev->driver->name, "imx-ipuv3-crtc") == 0) {
205 struct ipu_client_platformdata *pdata = dev->platform_data;
206
207 return pdata->of_node == np;
208 }
209
210
211 if (of_node_cmp(np->name, "lvds-channel") == 0) {
212 np = of_get_parent(np);
213 of_node_put(np);
214 }
215
216 return dev->of_node == np;
217}
218
219static int imx_drm_bind(struct device *dev)
220{
221 struct drm_device *drm;
222 struct imx_drm_device *imxdrm;
223 int ret;
224
225 drm = drm_dev_alloc(&imx_drm_driver, dev);
226 if (IS_ERR(drm))
227 return PTR_ERR(drm);
228
229 imxdrm = devm_kzalloc(dev, sizeof(*imxdrm), GFP_KERNEL);
230 if (!imxdrm) {
231 ret = -ENOMEM;
232 goto err_unref;
233 }
234
235 imxdrm->drm = drm;
236 drm->dev_private = imxdrm;
237
238
239
240
241
242
243
244
245
246
247 drm->irq_enabled = true;
248
249
250
251
252
253
254 drm->mode_config.min_width = 1;
255 drm->mode_config.min_height = 1;
256 drm->mode_config.max_width = 4096;
257 drm->mode_config.max_height = 4096;
258 drm->mode_config.funcs = &imx_drm_mode_config_funcs;
259 drm->mode_config.helper_private = &imx_drm_mode_config_helpers;
260 drm->mode_config.allow_fb_modifiers = true;
261
262 drm_mode_config_init(drm);
263
264 ret = drm_vblank_init(drm, MAX_CRTC);
265 if (ret)
266 goto err_kms;
267
268 dev_set_drvdata(dev, drm);
269
270
271 ret = component_bind_all(dev, drm);
272 if (ret)
273 goto err_kms;
274
275 drm_mode_config_reset(drm);
276
277
278
279
280
281
282#if IS_ENABLED(CONFIG_DRM_FBDEV_EMULATION)
283 if (legacyfb_depth != 16 && legacyfb_depth != 32) {
284 dev_warn(dev, "Invalid legacyfb_depth. Defaulting to 16bpp\n");
285 legacyfb_depth = 16;
286 }
287 ret = drm_fb_cma_fbdev_init(drm, legacyfb_depth, MAX_CRTC);
288 if (ret)
289 goto err_unbind;
290#endif
291
292 drm_kms_helper_poll_init(drm);
293
294 ret = drm_dev_register(drm, 0);
295 if (ret)
296 goto err_fbhelper;
297
298 return 0;
299
300err_fbhelper:
301 drm_kms_helper_poll_fini(drm);
302#if IS_ENABLED(CONFIG_DRM_FBDEV_EMULATION)
303 drm_fb_cma_fbdev_fini(drm);
304err_unbind:
305#endif
306 component_unbind_all(drm->dev, drm);
307err_kms:
308 drm_mode_config_cleanup(drm);
309err_unref:
310 drm_dev_unref(drm);
311
312 return ret;
313}
314
315static void imx_drm_unbind(struct device *dev)
316{
317 struct drm_device *drm = dev_get_drvdata(dev);
318
319 drm_dev_unregister(drm);
320
321 drm_kms_helper_poll_fini(drm);
322
323 drm_fb_cma_fbdev_fini(drm);
324
325 drm_mode_config_cleanup(drm);
326
327 component_unbind_all(drm->dev, drm);
328 dev_set_drvdata(dev, NULL);
329
330 drm_dev_unref(drm);
331}
332
333static const struct component_master_ops imx_drm_ops = {
334 .bind = imx_drm_bind,
335 .unbind = imx_drm_unbind,
336};
337
338static int imx_drm_platform_probe(struct platform_device *pdev)
339{
340 int ret = drm_of_component_probe(&pdev->dev, compare_of, &imx_drm_ops);
341
342 if (!ret)
343 ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
344
345 return ret;
346}
347
348static int imx_drm_platform_remove(struct platform_device *pdev)
349{
350 component_master_del(&pdev->dev, &imx_drm_ops);
351 return 0;
352}
353
354#ifdef CONFIG_PM_SLEEP
355static int imx_drm_suspend(struct device *dev)
356{
357 struct drm_device *drm_dev = dev_get_drvdata(dev);
358 struct imx_drm_device *imxdrm;
359
360
361 if (drm_dev == NULL)
362 return 0;
363
364 drm_kms_helper_poll_disable(drm_dev);
365
366 imxdrm = drm_dev->dev_private;
367 imxdrm->state = drm_atomic_helper_suspend(drm_dev);
368 if (IS_ERR(imxdrm->state)) {
369 drm_kms_helper_poll_enable(drm_dev);
370 return PTR_ERR(imxdrm->state);
371 }
372
373 return 0;
374}
375
376static int imx_drm_resume(struct device *dev)
377{
378 struct drm_device *drm_dev = dev_get_drvdata(dev);
379 struct imx_drm_device *imx_drm;
380
381 if (drm_dev == NULL)
382 return 0;
383
384 imx_drm = drm_dev->dev_private;
385 drm_atomic_helper_resume(drm_dev, imx_drm->state);
386 drm_kms_helper_poll_enable(drm_dev);
387
388 return 0;
389}
390#endif
391
392static SIMPLE_DEV_PM_OPS(imx_drm_pm_ops, imx_drm_suspend, imx_drm_resume);
393
394static const struct of_device_id imx_drm_dt_ids[] = {
395 { .compatible = "fsl,imx-display-subsystem", },
396 { },
397};
398MODULE_DEVICE_TABLE(of, imx_drm_dt_ids);
399
400static struct platform_driver imx_drm_pdrv = {
401 .probe = imx_drm_platform_probe,
402 .remove = imx_drm_platform_remove,
403 .driver = {
404 .name = "imx-drm",
405 .pm = &imx_drm_pm_ops,
406 .of_match_table = imx_drm_dt_ids,
407 },
408};
409
410static struct platform_driver * const drivers[] = {
411 &imx_drm_pdrv,
412 &ipu_drm_driver,
413};
414
415static int __init imx_drm_init(void)
416{
417 return platform_register_drivers(drivers, ARRAY_SIZE(drivers));
418}
419module_init(imx_drm_init);
420
421static void __exit imx_drm_exit(void)
422{
423 platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
424}
425module_exit(imx_drm_exit);
426
427MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>");
428MODULE_DESCRIPTION("i.MX drm driver core");
429MODULE_LICENSE("GPL");
430