1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#include "msm_drv.h"
20#include "msm_mmu.h"
21#include "mdp4_kms.h"
22
23static struct mdp4_platform_config *mdp4_get_config(struct platform_device *dev);
24
25static int mdp4_hw_init(struct msm_kms *kms)
26{
27 struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
28 struct drm_device *dev = mdp4_kms->dev;
29 uint32_t version, major, minor, dmap_cfg, vg_cfg;
30 unsigned long clk;
31 int ret = 0;
32
33 pm_runtime_get_sync(dev->dev);
34
35 mdp4_enable(mdp4_kms);
36 version = mdp4_read(mdp4_kms, REG_MDP4_VERSION);
37 mdp4_disable(mdp4_kms);
38
39 major = FIELD(version, MDP4_VERSION_MAJOR);
40 minor = FIELD(version, MDP4_VERSION_MINOR);
41
42 DBG("found MDP4 version v%d.%d", major, minor);
43
44 if (major != 4) {
45 dev_err(dev->dev, "unexpected MDP version: v%d.%d\n",
46 major, minor);
47 ret = -ENXIO;
48 goto out;
49 }
50
51 mdp4_kms->rev = minor;
52
53 if (mdp4_kms->rev > 1) {
54 mdp4_write(mdp4_kms, REG_MDP4_CS_CONTROLLER0, 0x0707ffff);
55 mdp4_write(mdp4_kms, REG_MDP4_CS_CONTROLLER1, 0x03073f3f);
56 }
57
58 mdp4_write(mdp4_kms, REG_MDP4_PORTMAP_MODE, 0x3);
59
60
61 mdp4_write(mdp4_kms, REG_MDP4_READ_CNFG, 0x02222);
62
63 clk = clk_get_rate(mdp4_kms->clk);
64
65 if ((mdp4_kms->rev >= 1) || (clk >= 90000000)) {
66 dmap_cfg = 0x47;
67 vg_cfg = 0x47;
68 } else {
69 dmap_cfg = 0x27;
70 vg_cfg = 0x43;
71 }
72
73 DBG("fetch config: dmap=%02x, vg=%02x", dmap_cfg, vg_cfg);
74
75 mdp4_write(mdp4_kms, REG_MDP4_DMA_FETCH_CONFIG(DMA_P), dmap_cfg);
76 mdp4_write(mdp4_kms, REG_MDP4_DMA_FETCH_CONFIG(DMA_E), dmap_cfg);
77
78 mdp4_write(mdp4_kms, REG_MDP4_PIPE_FETCH_CONFIG(VG1), vg_cfg);
79 mdp4_write(mdp4_kms, REG_MDP4_PIPE_FETCH_CONFIG(VG2), vg_cfg);
80 mdp4_write(mdp4_kms, REG_MDP4_PIPE_FETCH_CONFIG(RGB1), vg_cfg);
81 mdp4_write(mdp4_kms, REG_MDP4_PIPE_FETCH_CONFIG(RGB2), vg_cfg);
82
83 if (mdp4_kms->rev >= 2)
84 mdp4_write(mdp4_kms, REG_MDP4_LAYERMIXER_IN_CFG_UPDATE_METHOD, 1);
85 mdp4_write(mdp4_kms, REG_MDP4_LAYERMIXER_IN_CFG, 0);
86
87
88 mdp4_write(mdp4_kms, REG_MDP4_PIPE_OP_MODE(VG1), 0);
89 mdp4_write(mdp4_kms, REG_MDP4_PIPE_OP_MODE(VG2), 0);
90 mdp4_write(mdp4_kms, REG_MDP4_DMA_P_OP_MODE, 0);
91 mdp4_write(mdp4_kms, REG_MDP4_DMA_S_OP_MODE, 0);
92 mdp4_write(mdp4_kms, REG_MDP4_OVLP_CSC_CONFIG(1), 0);
93 mdp4_write(mdp4_kms, REG_MDP4_OVLP_CSC_CONFIG(2), 0);
94
95 if (mdp4_kms->rev > 1)
96 mdp4_write(mdp4_kms, REG_MDP4_RESET_STATUS, 1);
97
98 dev->mode_config.allow_fb_modifiers = true;
99
100out:
101 pm_runtime_put_sync(dev->dev);
102
103 return ret;
104}
105
106static void mdp4_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *state)
107{
108 struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
109 int i;
110 struct drm_crtc *crtc;
111 struct drm_crtc_state *crtc_state;
112
113 mdp4_enable(mdp4_kms);
114
115
116 for_each_crtc_in_state(state, crtc, crtc_state, i)
117 drm_crtc_vblank_get(crtc);
118}
119
120static void mdp4_complete_commit(struct msm_kms *kms, struct drm_atomic_state *state)
121{
122 struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
123 int i;
124 struct drm_crtc *crtc;
125 struct drm_crtc_state *crtc_state;
126
127
128 for_each_crtc_in_state(state, crtc, crtc_state, i)
129 drm_crtc_vblank_put(crtc);
130
131 mdp4_disable(mdp4_kms);
132}
133
134static void mdp4_wait_for_crtc_commit_done(struct msm_kms *kms,
135 struct drm_crtc *crtc)
136{
137 mdp4_crtc_wait_for_commit_done(crtc);
138}
139
140static long mdp4_round_pixclk(struct msm_kms *kms, unsigned long rate,
141 struct drm_encoder *encoder)
142{
143
144 switch (encoder->encoder_type) {
145 case DRM_MODE_ENCODER_TMDS:
146 return mdp4_dtv_round_pixclk(encoder, rate);
147 case DRM_MODE_ENCODER_LVDS:
148 case DRM_MODE_ENCODER_DSI:
149 default:
150 return rate;
151 }
152}
153
154static const char * const iommu_ports[] = {
155 "mdp_port0_cb0", "mdp_port1_cb0",
156};
157
158static void mdp4_destroy(struct msm_kms *kms)
159{
160 struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
161 struct device *dev = mdp4_kms->dev->dev;
162 struct msm_mmu *mmu = mdp4_kms->mmu;
163
164 if (mmu) {
165 mmu->funcs->detach(mmu, iommu_ports, ARRAY_SIZE(iommu_ports));
166 mmu->funcs->destroy(mmu);
167 }
168
169 if (mdp4_kms->blank_cursor_iova)
170 msm_gem_put_iova(mdp4_kms->blank_cursor_bo, mdp4_kms->id);
171 drm_gem_object_unreference_unlocked(mdp4_kms->blank_cursor_bo);
172
173 if (mdp4_kms->rpm_enabled)
174 pm_runtime_disable(dev);
175
176 kfree(mdp4_kms);
177}
178
179static const struct mdp_kms_funcs kms_funcs = {
180 .base = {
181 .hw_init = mdp4_hw_init,
182 .irq_preinstall = mdp4_irq_preinstall,
183 .irq_postinstall = mdp4_irq_postinstall,
184 .irq_uninstall = mdp4_irq_uninstall,
185 .irq = mdp4_irq,
186 .enable_vblank = mdp4_enable_vblank,
187 .disable_vblank = mdp4_disable_vblank,
188 .prepare_commit = mdp4_prepare_commit,
189 .complete_commit = mdp4_complete_commit,
190 .wait_for_crtc_commit_done = mdp4_wait_for_crtc_commit_done,
191 .get_format = mdp_get_format,
192 .round_pixclk = mdp4_round_pixclk,
193 .destroy = mdp4_destroy,
194 },
195 .set_irqmask = mdp4_set_irqmask,
196};
197
198int mdp4_disable(struct mdp4_kms *mdp4_kms)
199{
200 DBG("");
201
202 clk_disable_unprepare(mdp4_kms->clk);
203 if (mdp4_kms->pclk)
204 clk_disable_unprepare(mdp4_kms->pclk);
205 clk_disable_unprepare(mdp4_kms->lut_clk);
206 if (mdp4_kms->axi_clk)
207 clk_disable_unprepare(mdp4_kms->axi_clk);
208
209 return 0;
210}
211
212int mdp4_enable(struct mdp4_kms *mdp4_kms)
213{
214 DBG("");
215
216 clk_prepare_enable(mdp4_kms->clk);
217 if (mdp4_kms->pclk)
218 clk_prepare_enable(mdp4_kms->pclk);
219 clk_prepare_enable(mdp4_kms->lut_clk);
220 if (mdp4_kms->axi_clk)
221 clk_prepare_enable(mdp4_kms->axi_clk);
222
223 return 0;
224}
225
226static struct device_node *mdp4_detect_lcdc_panel(struct drm_device *dev)
227{
228 struct device_node *endpoint, *panel_node;
229 struct device_node *np = dev->dev->of_node;
230
231
232
233
234
235 endpoint = of_graph_get_endpoint_by_regs(np, 0, -1);
236 if (!endpoint) {
237 DBG("no LVDS remote endpoint\n");
238 return NULL;
239 }
240
241 panel_node = of_graph_get_remote_port_parent(endpoint);
242 if (!panel_node) {
243 DBG("no valid panel node in LVDS endpoint\n");
244 of_node_put(endpoint);
245 return NULL;
246 }
247
248 of_node_put(endpoint);
249
250 return panel_node;
251}
252
253static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms,
254 int intf_type)
255{
256 struct drm_device *dev = mdp4_kms->dev;
257 struct msm_drm_private *priv = dev->dev_private;
258 struct drm_encoder *encoder;
259 struct drm_connector *connector;
260 struct device_node *panel_node;
261 struct drm_encoder *dsi_encs[MSM_DSI_ENCODER_NUM];
262 int i, dsi_id;
263 int ret;
264
265 switch (intf_type) {
266 case DRM_MODE_ENCODER_LVDS:
267
268
269
270
271 panel_node = mdp4_detect_lcdc_panel(dev);
272 if (!panel_node)
273 return 0;
274
275 encoder = mdp4_lcdc_encoder_init(dev, panel_node);
276 if (IS_ERR(encoder)) {
277 dev_err(dev->dev, "failed to construct LCDC encoder\n");
278 return PTR_ERR(encoder);
279 }
280
281
282 encoder->possible_crtcs = 1 << DMA_P;
283
284 connector = mdp4_lvds_connector_init(dev, panel_node, encoder);
285 if (IS_ERR(connector)) {
286 dev_err(dev->dev, "failed to initialize LVDS connector\n");
287 return PTR_ERR(connector);
288 }
289
290 priv->encoders[priv->num_encoders++] = encoder;
291 priv->connectors[priv->num_connectors++] = connector;
292
293 break;
294 case DRM_MODE_ENCODER_TMDS:
295 encoder = mdp4_dtv_encoder_init(dev);
296 if (IS_ERR(encoder)) {
297 dev_err(dev->dev, "failed to construct DTV encoder\n");
298 return PTR_ERR(encoder);
299 }
300
301
302 encoder->possible_crtcs = 1 << 1;
303
304 if (priv->hdmi) {
305
306 ret = msm_hdmi_modeset_init(priv->hdmi, dev, encoder);
307 if (ret) {
308 dev_err(dev->dev, "failed to initialize HDMI: %d\n", ret);
309 return ret;
310 }
311 }
312
313 priv->encoders[priv->num_encoders++] = encoder;
314
315 break;
316 case DRM_MODE_ENCODER_DSI:
317
318 dsi_id = 0;
319
320 if (!priv->dsi[dsi_id])
321 break;
322
323 for (i = 0; i < MSM_DSI_ENCODER_NUM; i++) {
324 dsi_encs[i] = mdp4_dsi_encoder_init(dev);
325 if (IS_ERR(dsi_encs[i])) {
326 ret = PTR_ERR(dsi_encs[i]);
327 dev_err(dev->dev,
328 "failed to construct DSI encoder: %d\n",
329 ret);
330 return ret;
331 }
332
333
334 dsi_encs[i]->possible_crtcs = 1 << DMA_P;
335 priv->encoders[priv->num_encoders++] = dsi_encs[i];
336 }
337
338 ret = msm_dsi_modeset_init(priv->dsi[dsi_id], dev, dsi_encs);
339 if (ret) {
340 dev_err(dev->dev, "failed to initialize DSI: %d\n",
341 ret);
342 return ret;
343 }
344
345 break;
346 default:
347 dev_err(dev->dev, "Invalid or unsupported interface\n");
348 return -EINVAL;
349 }
350
351 return 0;
352}
353
354static int modeset_init(struct mdp4_kms *mdp4_kms)
355{
356 struct drm_device *dev = mdp4_kms->dev;
357 struct msm_drm_private *priv = dev->dev_private;
358 struct drm_plane *plane;
359 struct drm_crtc *crtc;
360 int i, ret;
361 static const enum mdp4_pipe rgb_planes[] = {
362 RGB1, RGB2,
363 };
364 static const enum mdp4_pipe vg_planes[] = {
365 VG1, VG2,
366 };
367 static const enum mdp4_dma mdp4_crtcs[] = {
368 DMA_P, DMA_E,
369 };
370 static const char * const mdp4_crtc_names[] = {
371 "DMA_P", "DMA_E",
372 };
373 static const int mdp4_intfs[] = {
374 DRM_MODE_ENCODER_LVDS,
375 DRM_MODE_ENCODER_DSI,
376 DRM_MODE_ENCODER_TMDS,
377 };
378
379
380 for (i = 0; i < ARRAY_SIZE(vg_planes); i++) {
381 plane = mdp4_plane_init(dev, vg_planes[i], false);
382 if (IS_ERR(plane)) {
383 dev_err(dev->dev,
384 "failed to construct plane for VG%d\n", i + 1);
385 ret = PTR_ERR(plane);
386 goto fail;
387 }
388 priv->planes[priv->num_planes++] = plane;
389 }
390
391 for (i = 0; i < ARRAY_SIZE(mdp4_crtcs); i++) {
392 plane = mdp4_plane_init(dev, rgb_planes[i], true);
393 if (IS_ERR(plane)) {
394 dev_err(dev->dev,
395 "failed to construct plane for RGB%d\n", i + 1);
396 ret = PTR_ERR(plane);
397 goto fail;
398 }
399
400 crtc = mdp4_crtc_init(dev, plane, priv->num_crtcs, i,
401 mdp4_crtcs[i]);
402 if (IS_ERR(crtc)) {
403 dev_err(dev->dev, "failed to construct crtc for %s\n",
404 mdp4_crtc_names[i]);
405 ret = PTR_ERR(crtc);
406 goto fail;
407 }
408
409 priv->crtcs[priv->num_crtcs++] = crtc;
410 }
411
412
413
414
415
416
417
418
419
420
421
422 for (i = 0; i < ARRAY_SIZE(mdp4_intfs); i++) {
423 ret = mdp4_modeset_init_intf(mdp4_kms, mdp4_intfs[i]);
424 if (ret) {
425 dev_err(dev->dev, "failed to initialize intf: %d, %d\n",
426 i, ret);
427 goto fail;
428 }
429 }
430
431 return 0;
432
433fail:
434 return ret;
435}
436
437struct msm_kms *mdp4_kms_init(struct drm_device *dev)
438{
439 struct platform_device *pdev = dev->platformdev;
440 struct mdp4_platform_config *config = mdp4_get_config(pdev);
441 struct mdp4_kms *mdp4_kms;
442 struct msm_kms *kms = NULL;
443 struct msm_mmu *mmu;
444 int irq, ret;
445
446 mdp4_kms = kzalloc(sizeof(*mdp4_kms), GFP_KERNEL);
447 if (!mdp4_kms) {
448 dev_err(dev->dev, "failed to allocate kms\n");
449 ret = -ENOMEM;
450 goto fail;
451 }
452
453 mdp_kms_init(&mdp4_kms->base, &kms_funcs);
454
455 kms = &mdp4_kms->base.base;
456
457 mdp4_kms->dev = dev;
458
459 mdp4_kms->mmio = msm_ioremap(pdev, NULL, "MDP4");
460 if (IS_ERR(mdp4_kms->mmio)) {
461 ret = PTR_ERR(mdp4_kms->mmio);
462 goto fail;
463 }
464
465 irq = platform_get_irq(pdev, 0);
466 if (irq < 0) {
467 ret = irq;
468 dev_err(dev->dev, "failed to get irq: %d\n", ret);
469 goto fail;
470 }
471
472 kms->irq = irq;
473
474
475
476
477
478 mdp4_kms->vdd = devm_regulator_get_exclusive(&pdev->dev, "vdd");
479 if (IS_ERR(mdp4_kms->vdd))
480 mdp4_kms->vdd = NULL;
481
482 if (mdp4_kms->vdd) {
483 ret = regulator_enable(mdp4_kms->vdd);
484 if (ret) {
485 dev_err(dev->dev, "failed to enable regulator vdd: %d\n", ret);
486 goto fail;
487 }
488 }
489
490 mdp4_kms->clk = devm_clk_get(&pdev->dev, "core_clk");
491 if (IS_ERR(mdp4_kms->clk)) {
492 dev_err(dev->dev, "failed to get core_clk\n");
493 ret = PTR_ERR(mdp4_kms->clk);
494 goto fail;
495 }
496
497 mdp4_kms->pclk = devm_clk_get(&pdev->dev, "iface_clk");
498 if (IS_ERR(mdp4_kms->pclk))
499 mdp4_kms->pclk = NULL;
500
501
502 mdp4_kms->lut_clk = devm_clk_get(&pdev->dev, "lut_clk");
503 if (IS_ERR(mdp4_kms->lut_clk)) {
504 dev_err(dev->dev, "failed to get lut_clk\n");
505 ret = PTR_ERR(mdp4_kms->lut_clk);
506 goto fail;
507 }
508
509 mdp4_kms->axi_clk = devm_clk_get(&pdev->dev, "bus_clk");
510 if (IS_ERR(mdp4_kms->axi_clk)) {
511 dev_err(dev->dev, "failed to get axi_clk\n");
512 ret = PTR_ERR(mdp4_kms->axi_clk);
513 goto fail;
514 }
515
516 clk_set_rate(mdp4_kms->clk, config->max_clk);
517 clk_set_rate(mdp4_kms->lut_clk, config->max_clk);
518
519 pm_runtime_enable(dev->dev);
520 mdp4_kms->rpm_enabled = true;
521
522
523
524
525
526 mdp4_enable(mdp4_kms);
527 mdp4_write(mdp4_kms, REG_MDP4_DTV_ENABLE, 0);
528 mdp4_write(mdp4_kms, REG_MDP4_LCDC_ENABLE, 0);
529 mdp4_write(mdp4_kms, REG_MDP4_DSI_ENABLE, 0);
530 mdp4_disable(mdp4_kms);
531 mdelay(16);
532
533 if (config->iommu) {
534 mmu = msm_iommu_new(&pdev->dev, config->iommu);
535 if (IS_ERR(mmu)) {
536 ret = PTR_ERR(mmu);
537 goto fail;
538 }
539 ret = mmu->funcs->attach(mmu, iommu_ports,
540 ARRAY_SIZE(iommu_ports));
541 if (ret)
542 goto fail;
543
544 mdp4_kms->mmu = mmu;
545 } else {
546 dev_info(dev->dev, "no iommu, fallback to phys "
547 "contig buffers for scanout\n");
548 mmu = NULL;
549 }
550
551 mdp4_kms->id = msm_register_mmu(dev, mmu);
552 if (mdp4_kms->id < 0) {
553 ret = mdp4_kms->id;
554 dev_err(dev->dev, "failed to register mdp4 iommu: %d\n", ret);
555 goto fail;
556 }
557
558 ret = modeset_init(mdp4_kms);
559 if (ret) {
560 dev_err(dev->dev, "modeset_init failed: %d\n", ret);
561 goto fail;
562 }
563
564 mutex_lock(&dev->struct_mutex);
565 mdp4_kms->blank_cursor_bo = msm_gem_new(dev, SZ_16K, MSM_BO_WC);
566 mutex_unlock(&dev->struct_mutex);
567 if (IS_ERR(mdp4_kms->blank_cursor_bo)) {
568 ret = PTR_ERR(mdp4_kms->blank_cursor_bo);
569 dev_err(dev->dev, "could not allocate blank-cursor bo: %d\n", ret);
570 mdp4_kms->blank_cursor_bo = NULL;
571 goto fail;
572 }
573
574 ret = msm_gem_get_iova(mdp4_kms->blank_cursor_bo, mdp4_kms->id,
575 &mdp4_kms->blank_cursor_iova);
576 if (ret) {
577 dev_err(dev->dev, "could not pin blank-cursor bo: %d\n", ret);
578 goto fail;
579 }
580
581 dev->mode_config.min_width = 0;
582 dev->mode_config.min_height = 0;
583 dev->mode_config.max_width = 2048;
584 dev->mode_config.max_height = 2048;
585
586 return kms;
587
588fail:
589 if (kms)
590 mdp4_destroy(kms);
591 return ERR_PTR(ret);
592}
593
594static struct mdp4_platform_config *mdp4_get_config(struct platform_device *dev)
595{
596 static struct mdp4_platform_config config = {};
597
598
599 config.max_clk = 266667000;
600 config.iommu = iommu_domain_alloc(&platform_bus_type);
601
602 return &config;
603}
604