1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#include <linux/sort.h>
20#include <drm/drm_mode.h>
21#include <drm/drm_crtc.h>
22#include <drm/drm_crtc_helper.h>
23#include <drm/drm_flip_work.h>
24
25#include "mdp5_kms.h"
26
27#define CURSOR_WIDTH 64
28#define CURSOR_HEIGHT 64
29
30struct mdp5_crtc {
31 struct drm_crtc base;
32 int id;
33 bool enabled;
34
35 spinlock_t lm_lock;
36
37
38 struct drm_pending_vblank_event *event;
39
40
41
42
43 u32 flushed_mask;
44
45#define PENDING_CURSOR 0x1
46#define PENDING_FLIP 0x2
47 atomic_t pending;
48
49
50 struct drm_flip_work unref_cursor_work;
51
52 struct mdp_irq vblank;
53 struct mdp_irq err;
54 struct mdp_irq pp_done;
55
56 struct completion pp_completion;
57
58 bool lm_cursor_enabled;
59
60 struct {
61
62 spinlock_t lock;
63
64
65 struct drm_gem_object *scanout_bo;
66 uint64_t iova;
67 uint32_t width, height;
68 uint32_t x, y;
69 } cursor;
70};
71#define to_mdp5_crtc(x) container_of(x, struct mdp5_crtc, base)
72
73static void mdp5_crtc_restore_cursor(struct drm_crtc *crtc);
74
75static struct mdp5_kms *get_kms(struct drm_crtc *crtc)
76{
77 struct msm_drm_private *priv = crtc->dev->dev_private;
78 return to_mdp5_kms(to_mdp_kms(priv->kms));
79}
80
81static void request_pending(struct drm_crtc *crtc, uint32_t pending)
82{
83 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
84
85 atomic_or(pending, &mdp5_crtc->pending);
86 mdp_irq_register(&get_kms(crtc)->base, &mdp5_crtc->vblank);
87}
88
89static void request_pp_done_pending(struct drm_crtc *crtc)
90{
91 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
92 reinit_completion(&mdp5_crtc->pp_completion);
93}
94
95static u32 crtc_flush(struct drm_crtc *crtc, u32 flush_mask)
96{
97 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
98 struct mdp5_ctl *ctl = mdp5_cstate->ctl;
99 struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
100 bool start = !mdp5_cstate->defer_start;
101
102 mdp5_cstate->defer_start = false;
103
104 DBG("%s: flush=%08x", crtc->name, flush_mask);
105
106 return mdp5_ctl_commit(ctl, pipeline, flush_mask, start);
107}
108
109
110
111
112
113
114static u32 crtc_flush_all(struct drm_crtc *crtc)
115{
116 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
117 struct mdp5_hw_mixer *mixer, *r_mixer;
118 struct drm_plane *plane;
119 uint32_t flush_mask = 0;
120
121
122 if (WARN_ON(!mdp5_cstate->ctl))
123 return 0;
124
125 drm_atomic_crtc_for_each_plane(plane, crtc) {
126 if (!plane->state->visible)
127 continue;
128 flush_mask |= mdp5_plane_get_flush(plane);
129 }
130
131 mixer = mdp5_cstate->pipeline.mixer;
132 flush_mask |= mdp_ctl_flush_mask_lm(mixer->lm);
133
134 r_mixer = mdp5_cstate->pipeline.r_mixer;
135 if (r_mixer)
136 flush_mask |= mdp_ctl_flush_mask_lm(r_mixer->lm);
137
138 return crtc_flush(crtc, flush_mask);
139}
140
141
142static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
143{
144 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
145 struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
146 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
147 struct mdp5_ctl *ctl = mdp5_cstate->ctl;
148 struct drm_device *dev = crtc->dev;
149 struct drm_pending_vblank_event *event;
150 unsigned long flags;
151
152 spin_lock_irqsave(&dev->event_lock, flags);
153 event = mdp5_crtc->event;
154 if (event) {
155 mdp5_crtc->event = NULL;
156 DBG("%s: send event: %p", crtc->name, event);
157 drm_crtc_send_vblank_event(crtc, event);
158 }
159 spin_unlock_irqrestore(&dev->event_lock, flags);
160
161 if (ctl && !crtc->state->enable) {
162
163 mdp5_ctl_blend(ctl, pipeline, NULL, NULL, 0, 0);
164
165
166 }
167}
168
169static void unref_cursor_worker(struct drm_flip_work *work, void *val)
170{
171 struct mdp5_crtc *mdp5_crtc =
172 container_of(work, struct mdp5_crtc, unref_cursor_work);
173 struct mdp5_kms *mdp5_kms = get_kms(&mdp5_crtc->base);
174 struct msm_kms *kms = &mdp5_kms->base.base;
175
176 msm_gem_put_iova(val, kms->aspace);
177 drm_gem_object_put_unlocked(val);
178}
179
180static void mdp5_crtc_destroy(struct drm_crtc *crtc)
181{
182 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
183
184 drm_crtc_cleanup(crtc);
185 drm_flip_work_cleanup(&mdp5_crtc->unref_cursor_work);
186
187 kfree(mdp5_crtc);
188}
189
190static inline u32 mdp5_lm_use_fg_alpha_mask(enum mdp_mixer_stage_id stage)
191{
192 switch (stage) {
193 case STAGE0: return MDP5_LM_BLEND_COLOR_OUT_STAGE0_FG_ALPHA;
194 case STAGE1: return MDP5_LM_BLEND_COLOR_OUT_STAGE1_FG_ALPHA;
195 case STAGE2: return MDP5_LM_BLEND_COLOR_OUT_STAGE2_FG_ALPHA;
196 case STAGE3: return MDP5_LM_BLEND_COLOR_OUT_STAGE3_FG_ALPHA;
197 case STAGE4: return MDP5_LM_BLEND_COLOR_OUT_STAGE4_FG_ALPHA;
198 case STAGE5: return MDP5_LM_BLEND_COLOR_OUT_STAGE5_FG_ALPHA;
199 case STAGE6: return MDP5_LM_BLEND_COLOR_OUT_STAGE6_FG_ALPHA;
200 default:
201 return 0;
202 }
203}
204
205
206
207
208#define PIPE_LEFT 0
209#define PIPE_RIGHT 1
210
211
212
213
214
215
216
217
218static void blend_setup(struct drm_crtc *crtc)
219{
220 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
221 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
222 struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
223 struct mdp5_kms *mdp5_kms = get_kms(crtc);
224 struct drm_plane *plane;
225 const struct mdp5_cfg_hw *hw_cfg;
226 struct mdp5_plane_state *pstate, *pstates[STAGE_MAX + 1] = {NULL};
227 const struct mdp_format *format;
228 struct mdp5_hw_mixer *mixer = pipeline->mixer;
229 uint32_t lm = mixer->lm;
230 struct mdp5_hw_mixer *r_mixer = pipeline->r_mixer;
231 uint32_t r_lm = r_mixer ? r_mixer->lm : 0;
232 struct mdp5_ctl *ctl = mdp5_cstate->ctl;
233 uint32_t blend_op, fg_alpha, bg_alpha, ctl_blend_flags = 0;
234 unsigned long flags;
235 enum mdp5_pipe stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { { SSPP_NONE } };
236 enum mdp5_pipe r_stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { { SSPP_NONE } };
237 int i, plane_cnt = 0;
238 bool bg_alpha_enabled = false;
239 u32 mixer_op_mode = 0;
240 u32 val;
241#define blender(stage) ((stage) - STAGE0)
242
243 hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
244
245 spin_lock_irqsave(&mdp5_crtc->lm_lock, flags);
246
247
248
249 if (!ctl)
250 goto out;
251
252
253 drm_atomic_crtc_for_each_plane(plane, crtc) {
254 enum mdp5_pipe right_pipe;
255
256 if (!plane->state->visible)
257 continue;
258
259 pstate = to_mdp5_plane_state(plane->state);
260 pstates[pstate->stage] = pstate;
261 stage[pstate->stage][PIPE_LEFT] = mdp5_plane_pipe(plane);
262
263
264
265
266 if (r_mixer)
267 r_stage[pstate->stage][PIPE_LEFT] =
268 mdp5_plane_pipe(plane);
269
270
271
272
273
274 right_pipe = mdp5_plane_right_pipe(plane);
275 if (right_pipe) {
276 stage[pstate->stage][PIPE_RIGHT] = right_pipe;
277 r_stage[pstate->stage][PIPE_RIGHT] = right_pipe;
278 }
279
280 plane_cnt++;
281 }
282
283 if (!pstates[STAGE_BASE]) {
284 ctl_blend_flags |= MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT;
285 DBG("Border Color is enabled");
286 } else if (plane_cnt) {
287 format = to_mdp_format(msm_framebuffer_format(pstates[STAGE_BASE]->base.fb));
288
289 if (format->alpha_enable)
290 bg_alpha_enabled = true;
291 }
292
293
294 for (i = STAGE0; i <= STAGE_MAX; i++) {
295 if (!pstates[i])
296 continue;
297
298 format = to_mdp_format(
299 msm_framebuffer_format(pstates[i]->base.fb));
300 plane = pstates[i]->base.plane;
301 blend_op = MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST) |
302 MDP5_LM_BLEND_OP_MODE_BG_ALPHA(BG_CONST);
303 fg_alpha = pstates[i]->alpha;
304 bg_alpha = 0xFF - pstates[i]->alpha;
305
306 if (!format->alpha_enable && bg_alpha_enabled)
307 mixer_op_mode = 0;
308 else
309 mixer_op_mode |= mdp5_lm_use_fg_alpha_mask(i);
310
311 DBG("Stage %d fg_alpha %x bg_alpha %x", i, fg_alpha, bg_alpha);
312
313 if (format->alpha_enable && pstates[i]->premultiplied) {
314 blend_op = MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST) |
315 MDP5_LM_BLEND_OP_MODE_BG_ALPHA(FG_PIXEL);
316 if (fg_alpha != 0xff) {
317 bg_alpha = fg_alpha;
318 blend_op |=
319 MDP5_LM_BLEND_OP_MODE_BG_MOD_ALPHA |
320 MDP5_LM_BLEND_OP_MODE_BG_INV_MOD_ALPHA;
321 } else {
322 blend_op |= MDP5_LM_BLEND_OP_MODE_BG_INV_ALPHA;
323 }
324 } else if (format->alpha_enable) {
325 blend_op = MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_PIXEL) |
326 MDP5_LM_BLEND_OP_MODE_BG_ALPHA(FG_PIXEL);
327 if (fg_alpha != 0xff) {
328 bg_alpha = fg_alpha;
329 blend_op |=
330 MDP5_LM_BLEND_OP_MODE_FG_MOD_ALPHA |
331 MDP5_LM_BLEND_OP_MODE_FG_INV_MOD_ALPHA |
332 MDP5_LM_BLEND_OP_MODE_BG_MOD_ALPHA |
333 MDP5_LM_BLEND_OP_MODE_BG_INV_MOD_ALPHA;
334 } else {
335 blend_op |= MDP5_LM_BLEND_OP_MODE_BG_INV_ALPHA;
336 }
337 }
338
339 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_OP_MODE(lm,
340 blender(i)), blend_op);
341 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_FG_ALPHA(lm,
342 blender(i)), fg_alpha);
343 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_BG_ALPHA(lm,
344 blender(i)), bg_alpha);
345 if (r_mixer) {
346 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_OP_MODE(r_lm,
347 blender(i)), blend_op);
348 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_FG_ALPHA(r_lm,
349 blender(i)), fg_alpha);
350 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_BG_ALPHA(r_lm,
351 blender(i)), bg_alpha);
352 }
353 }
354
355 val = mdp5_read(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(lm));
356 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(lm),
357 val | mixer_op_mode);
358 if (r_mixer) {
359 val = mdp5_read(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(r_lm));
360 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(r_lm),
361 val | mixer_op_mode);
362 }
363
364 mdp5_ctl_blend(ctl, pipeline, stage, r_stage, plane_cnt,
365 ctl_blend_flags);
366out:
367 spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags);
368}
369
370static void mdp5_crtc_mode_set_nofb(struct drm_crtc *crtc)
371{
372 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
373 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
374 struct mdp5_kms *mdp5_kms = get_kms(crtc);
375 struct mdp5_hw_mixer *mixer = mdp5_cstate->pipeline.mixer;
376 struct mdp5_hw_mixer *r_mixer = mdp5_cstate->pipeline.r_mixer;
377 uint32_t lm = mixer->lm;
378 u32 mixer_width, val;
379 unsigned long flags;
380 struct drm_display_mode *mode;
381
382 if (WARN_ON(!crtc->state))
383 return;
384
385 mode = &crtc->state->adjusted_mode;
386
387 DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
388 crtc->name, mode->base.id, mode->name,
389 mode->vrefresh, mode->clock,
390 mode->hdisplay, mode->hsync_start,
391 mode->hsync_end, mode->htotal,
392 mode->vdisplay, mode->vsync_start,
393 mode->vsync_end, mode->vtotal,
394 mode->type, mode->flags);
395
396 mixer_width = mode->hdisplay;
397 if (r_mixer)
398 mixer_width /= 2;
399
400 spin_lock_irqsave(&mdp5_crtc->lm_lock, flags);
401 mdp5_write(mdp5_kms, REG_MDP5_LM_OUT_SIZE(lm),
402 MDP5_LM_OUT_SIZE_WIDTH(mixer_width) |
403 MDP5_LM_OUT_SIZE_HEIGHT(mode->vdisplay));
404
405
406 val = mdp5_read(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(lm));
407 val &= ~MDP5_LM_BLEND_COLOR_OUT_SPLIT_LEFT_RIGHT;
408 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(lm), val);
409
410 if (r_mixer) {
411 u32 r_lm = r_mixer->lm;
412
413 mdp5_write(mdp5_kms, REG_MDP5_LM_OUT_SIZE(r_lm),
414 MDP5_LM_OUT_SIZE_WIDTH(mixer_width) |
415 MDP5_LM_OUT_SIZE_HEIGHT(mode->vdisplay));
416
417
418 val = mdp5_read(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(r_lm));
419 val |= MDP5_LM_BLEND_COLOR_OUT_SPLIT_LEFT_RIGHT;
420 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(r_lm), val);
421 }
422
423 spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags);
424}
425
426static void mdp5_crtc_atomic_disable(struct drm_crtc *crtc,
427 struct drm_crtc_state *old_state)
428{
429 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
430 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
431 struct mdp5_kms *mdp5_kms = get_kms(crtc);
432 struct device *dev = &mdp5_kms->pdev->dev;
433
434 DBG("%s", crtc->name);
435
436 if (WARN_ON(!mdp5_crtc->enabled))
437 return;
438
439
440 drm_crtc_vblank_off(crtc);
441
442 if (mdp5_cstate->cmd_mode)
443 mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->pp_done);
444
445 mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->err);
446 pm_runtime_put_sync(dev);
447
448 mdp5_crtc->enabled = false;
449}
450
451static void mdp5_crtc_atomic_enable(struct drm_crtc *crtc,
452 struct drm_crtc_state *old_state)
453{
454 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
455 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
456 struct mdp5_kms *mdp5_kms = get_kms(crtc);
457 struct device *dev = &mdp5_kms->pdev->dev;
458
459 DBG("%s", crtc->name);
460
461 if (WARN_ON(mdp5_crtc->enabled))
462 return;
463
464 pm_runtime_get_sync(dev);
465
466 if (mdp5_crtc->lm_cursor_enabled) {
467
468
469
470
471 if (mdp5_crtc->cursor.iova) {
472 unsigned long flags;
473
474 spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
475 mdp5_crtc_restore_cursor(crtc);
476 spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags);
477
478 mdp5_ctl_set_cursor(mdp5_cstate->ctl,
479 &mdp5_cstate->pipeline, 0, true);
480 } else {
481 mdp5_ctl_set_cursor(mdp5_cstate->ctl,
482 &mdp5_cstate->pipeline, 0, false);
483 }
484 }
485
486
487 drm_crtc_vblank_on(crtc);
488
489 mdp5_crtc_mode_set_nofb(crtc);
490
491 mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->err);
492
493 if (mdp5_cstate->cmd_mode)
494 mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->pp_done);
495
496 mdp5_crtc->enabled = true;
497}
498
499int mdp5_crtc_setup_pipeline(struct drm_crtc *crtc,
500 struct drm_crtc_state *new_crtc_state,
501 bool need_right_mixer)
502{
503 struct mdp5_crtc_state *mdp5_cstate =
504 to_mdp5_crtc_state(new_crtc_state);
505 struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
506 struct mdp5_interface *intf;
507 bool new_mixer = false;
508
509 new_mixer = !pipeline->mixer;
510
511 if ((need_right_mixer && !pipeline->r_mixer) ||
512 (!need_right_mixer && pipeline->r_mixer))
513 new_mixer = true;
514
515 if (new_mixer) {
516 struct mdp5_hw_mixer *old_mixer = pipeline->mixer;
517 struct mdp5_hw_mixer *old_r_mixer = pipeline->r_mixer;
518 u32 caps;
519 int ret;
520
521 caps = MDP_LM_CAP_DISPLAY;
522 if (need_right_mixer)
523 caps |= MDP_LM_CAP_PAIR;
524
525 ret = mdp5_mixer_assign(new_crtc_state->state, crtc, caps,
526 &pipeline->mixer, need_right_mixer ?
527 &pipeline->r_mixer : NULL);
528 if (ret)
529 return ret;
530
531 mdp5_mixer_release(new_crtc_state->state, old_mixer);
532 if (old_r_mixer) {
533 mdp5_mixer_release(new_crtc_state->state, old_r_mixer);
534 if (!need_right_mixer)
535 pipeline->r_mixer = NULL;
536 }
537 }
538
539
540
541
542
543 intf = pipeline->intf;
544
545 mdp5_cstate->err_irqmask = intf2err(intf->num);
546 mdp5_cstate->vblank_irqmask = intf2vblank(pipeline->mixer, intf);
547
548 if ((intf->type == INTF_DSI) &&
549 (intf->mode == MDP5_INTF_DSI_MODE_COMMAND)) {
550 mdp5_cstate->pp_done_irqmask = lm2ppdone(pipeline->mixer);
551 mdp5_cstate->cmd_mode = true;
552 } else {
553 mdp5_cstate->pp_done_irqmask = 0;
554 mdp5_cstate->cmd_mode = false;
555 }
556
557 return 0;
558}
559
560struct plane_state {
561 struct drm_plane *plane;
562 struct mdp5_plane_state *state;
563};
564
565static int pstate_cmp(const void *a, const void *b)
566{
567 struct plane_state *pa = (struct plane_state *)a;
568 struct plane_state *pb = (struct plane_state *)b;
569 return pa->state->zpos - pb->state->zpos;
570}
571
572
573static bool is_fullscreen(struct drm_crtc_state *cstate,
574 struct drm_plane_state *pstate)
575{
576 return (pstate->crtc_x <= 0) && (pstate->crtc_y <= 0) &&
577 ((pstate->crtc_x + pstate->crtc_w) >= cstate->mode.hdisplay) &&
578 ((pstate->crtc_y + pstate->crtc_h) >= cstate->mode.vdisplay);
579}
580
581static enum mdp_mixer_stage_id get_start_stage(struct drm_crtc *crtc,
582 struct drm_crtc_state *new_crtc_state,
583 struct drm_plane_state *bpstate)
584{
585 struct mdp5_crtc_state *mdp5_cstate =
586 to_mdp5_crtc_state(new_crtc_state);
587
588
589
590
591
592 if (mdp5_cstate->pipeline.r_mixer)
593 return STAGE0;
594
595
596
597
598 if (!is_fullscreen(new_crtc_state, bpstate))
599 return STAGE0;
600
601 return STAGE_BASE;
602}
603
604static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
605 struct drm_crtc_state *state)
606{
607 struct mdp5_kms *mdp5_kms = get_kms(crtc);
608 struct drm_plane *plane;
609 struct drm_device *dev = crtc->dev;
610 struct plane_state pstates[STAGE_MAX + 1];
611 const struct mdp5_cfg_hw *hw_cfg;
612 const struct drm_plane_state *pstate;
613 const struct drm_display_mode *mode = &state->adjusted_mode;
614 bool cursor_plane = false;
615 bool need_right_mixer = false;
616 int cnt = 0, i;
617 int ret;
618 enum mdp_mixer_stage_id start;
619
620 DBG("%s: check", crtc->name);
621
622 drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) {
623 if (!pstate->visible)
624 continue;
625
626 pstates[cnt].plane = plane;
627 pstates[cnt].state = to_mdp5_plane_state(pstate);
628
629
630
631
632
633 if (pstates[cnt].state->r_hwpipe)
634 need_right_mixer = true;
635 cnt++;
636
637 if (plane->type == DRM_PLANE_TYPE_CURSOR)
638 cursor_plane = true;
639 }
640
641
642 if (!cnt)
643 return 0;
644
645 hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
646
647
648
649
650
651 if (mode->hdisplay > hw_cfg->lm.max_width)
652 need_right_mixer = true;
653
654 ret = mdp5_crtc_setup_pipeline(crtc, state, need_right_mixer);
655 if (ret) {
656 dev_err(dev->dev, "couldn't assign mixers %d\n", ret);
657 return ret;
658 }
659
660
661 sort(pstates, cnt, sizeof(pstates[0]), pstate_cmp, NULL);
662
663
664 WARN_ON(cursor_plane &&
665 (pstates[cnt - 1].plane->type != DRM_PLANE_TYPE_CURSOR));
666
667 start = get_start_stage(crtc, state, &pstates[0].state->base);
668
669
670
671
672 if ((cnt + start - 1) >= hw_cfg->lm.nb_stages) {
673 dev_err(dev->dev, "too many planes! cnt=%d, start stage=%d\n",
674 cnt, start);
675 return -EINVAL;
676 }
677
678 for (i = 0; i < cnt; i++) {
679 if (cursor_plane && (i == (cnt - 1)))
680 pstates[i].state->stage = hw_cfg->lm.nb_stages;
681 else
682 pstates[i].state->stage = start + i;
683 DBG("%s: assign pipe %s on stage=%d", crtc->name,
684 pstates[i].plane->name,
685 pstates[i].state->stage);
686 }
687
688 return 0;
689}
690
691static void mdp5_crtc_atomic_begin(struct drm_crtc *crtc,
692 struct drm_crtc_state *old_crtc_state)
693{
694 DBG("%s: begin", crtc->name);
695}
696
697static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc,
698 struct drm_crtc_state *old_crtc_state)
699{
700 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
701 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
702 struct drm_device *dev = crtc->dev;
703 unsigned long flags;
704
705 DBG("%s: event: %p", crtc->name, crtc->state->event);
706
707 WARN_ON(mdp5_crtc->event);
708
709 spin_lock_irqsave(&dev->event_lock, flags);
710 mdp5_crtc->event = crtc->state->event;
711 crtc->state->event = NULL;
712 spin_unlock_irqrestore(&dev->event_lock, flags);
713
714
715
716
717
718
719
720 if (unlikely(!mdp5_cstate->ctl))
721 return;
722
723 blend_setup(crtc);
724
725
726
727
728
729
730
731 if (mdp5_cstate->cmd_mode)
732 request_pp_done_pending(crtc);
733
734 mdp5_crtc->flushed_mask = crtc_flush_all(crtc);
735
736
737 mdp5_crtc->vblank.irqmask = mdp5_cstate->vblank_irqmask;
738 mdp5_crtc->err.irqmask = mdp5_cstate->err_irqmask;
739 mdp5_crtc->pp_done.irqmask = mdp5_cstate->pp_done_irqmask;
740
741 request_pending(crtc, PENDING_FLIP);
742}
743
744static void get_roi(struct drm_crtc *crtc, uint32_t *roi_w, uint32_t *roi_h)
745{
746 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
747 uint32_t xres = crtc->mode.hdisplay;
748 uint32_t yres = crtc->mode.vdisplay;
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764 *roi_w = min(mdp5_crtc->cursor.width, xres -
765 mdp5_crtc->cursor.x);
766 *roi_h = min(mdp5_crtc->cursor.height, yres -
767 mdp5_crtc->cursor.y);
768}
769
770static void mdp5_crtc_restore_cursor(struct drm_crtc *crtc)
771{
772 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
773 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
774 struct mdp5_kms *mdp5_kms = get_kms(crtc);
775 const enum mdp5_cursor_alpha cur_alpha = CURSOR_ALPHA_PER_PIXEL;
776 uint32_t blendcfg, stride;
777 uint32_t x, y, width, height;
778 uint32_t roi_w, roi_h;
779 int lm;
780
781 assert_spin_locked(&mdp5_crtc->cursor.lock);
782
783 lm = mdp5_cstate->pipeline.mixer->lm;
784
785 x = mdp5_crtc->cursor.x;
786 y = mdp5_crtc->cursor.y;
787 width = mdp5_crtc->cursor.width;
788 height = mdp5_crtc->cursor.height;
789
790 stride = width * drm_format_plane_cpp(DRM_FORMAT_ARGB8888, 0);
791
792 get_roi(crtc, &roi_w, &roi_h);
793
794 mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_STRIDE(lm), stride);
795 mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_FORMAT(lm),
796 MDP5_LM_CURSOR_FORMAT_FORMAT(CURSOR_FMT_ARGB8888));
797 mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_IMG_SIZE(lm),
798 MDP5_LM_CURSOR_IMG_SIZE_SRC_H(height) |
799 MDP5_LM_CURSOR_IMG_SIZE_SRC_W(width));
800 mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(lm),
801 MDP5_LM_CURSOR_SIZE_ROI_H(roi_h) |
802 MDP5_LM_CURSOR_SIZE_ROI_W(roi_w));
803 mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_START_XY(lm),
804 MDP5_LM_CURSOR_START_XY_Y_START(y) |
805 MDP5_LM_CURSOR_START_XY_X_START(x));
806 mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_BASE_ADDR(lm),
807 mdp5_crtc->cursor.iova);
808
809 blendcfg = MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_EN;
810 blendcfg |= MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_ALPHA_SEL(cur_alpha);
811 mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_BLEND_CONFIG(lm), blendcfg);
812}
813
814static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
815 struct drm_file *file, uint32_t handle,
816 uint32_t width, uint32_t height)
817{
818 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
819 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
820 struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
821 struct drm_device *dev = crtc->dev;
822 struct mdp5_kms *mdp5_kms = get_kms(crtc);
823 struct platform_device *pdev = mdp5_kms->pdev;
824 struct msm_kms *kms = &mdp5_kms->base.base;
825 struct drm_gem_object *cursor_bo, *old_bo = NULL;
826 struct mdp5_ctl *ctl;
827 int ret;
828 uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0);
829 bool cursor_enable = true;
830 unsigned long flags;
831
832 if (!mdp5_crtc->lm_cursor_enabled) {
833 dev_warn(dev->dev,
834 "cursor_set is deprecated with cursor planes\n");
835 return -EINVAL;
836 }
837
838 if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) {
839 dev_err(dev->dev, "bad cursor size: %dx%d\n", width, height);
840 return -EINVAL;
841 }
842
843 ctl = mdp5_cstate->ctl;
844 if (!ctl)
845 return -EINVAL;
846
847
848 if (mdp5_cstate->pipeline.r_mixer)
849 return -EINVAL;
850
851 if (!handle) {
852 DBG("Cursor off");
853 cursor_enable = false;
854 mdp5_crtc->cursor.iova = 0;
855 pm_runtime_get_sync(&pdev->dev);
856 goto set_cursor;
857 }
858
859 cursor_bo = drm_gem_object_lookup(file, handle);
860 if (!cursor_bo)
861 return -ENOENT;
862
863 ret = msm_gem_get_iova(cursor_bo, kms->aspace,
864 &mdp5_crtc->cursor.iova);
865 if (ret)
866 return -EINVAL;
867
868 pm_runtime_get_sync(&pdev->dev);
869
870 spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
871 old_bo = mdp5_crtc->cursor.scanout_bo;
872
873 mdp5_crtc->cursor.scanout_bo = cursor_bo;
874 mdp5_crtc->cursor.width = width;
875 mdp5_crtc->cursor.height = height;
876
877 mdp5_crtc_restore_cursor(crtc);
878
879 spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags);
880
881set_cursor:
882 ret = mdp5_ctl_set_cursor(ctl, pipeline, 0, cursor_enable);
883 if (ret) {
884 dev_err(dev->dev, "failed to %sable cursor: %d\n",
885 cursor_enable ? "en" : "dis", ret);
886 goto end;
887 }
888
889 crtc_flush(crtc, flush_mask);
890
891end:
892 pm_runtime_put_sync(&pdev->dev);
893 if (old_bo) {
894 drm_flip_work_queue(&mdp5_crtc->unref_cursor_work, old_bo);
895
896 request_pending(crtc, PENDING_CURSOR);
897 }
898 return ret;
899}
900
901static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
902{
903 struct mdp5_kms *mdp5_kms = get_kms(crtc);
904 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
905 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
906 uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0);
907 struct drm_device *dev = crtc->dev;
908 uint32_t roi_w;
909 uint32_t roi_h;
910 unsigned long flags;
911
912 if (!mdp5_crtc->lm_cursor_enabled) {
913 dev_warn(dev->dev,
914 "cursor_move is deprecated with cursor planes\n");
915 return -EINVAL;
916 }
917
918
919 if (mdp5_cstate->pipeline.r_mixer)
920 return -EINVAL;
921
922
923 if (unlikely(!crtc->state->enable))
924 return 0;
925
926 mdp5_crtc->cursor.x = x = max(x, 0);
927 mdp5_crtc->cursor.y = y = max(y, 0);
928
929 get_roi(crtc, &roi_w, &roi_h);
930
931 pm_runtime_get_sync(&mdp5_kms->pdev->dev);
932
933 spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
934 mdp5_crtc_restore_cursor(crtc);
935 spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags);
936
937 crtc_flush(crtc, flush_mask);
938
939 pm_runtime_put_sync(&mdp5_kms->pdev->dev);
940
941 return 0;
942}
943
944static void
945mdp5_crtc_atomic_print_state(struct drm_printer *p,
946 const struct drm_crtc_state *state)
947{
948 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(state);
949 struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
950 struct mdp5_kms *mdp5_kms = get_kms(state->crtc);
951
952 if (WARN_ON(!pipeline))
953 return;
954
955 if (mdp5_cstate->ctl)
956 drm_printf(p, "\tctl=%d\n", mdp5_ctl_get_ctl_id(mdp5_cstate->ctl));
957
958 drm_printf(p, "\thwmixer=%s\n", pipeline->mixer ?
959 pipeline->mixer->name : "(null)");
960
961 if (mdp5_kms->caps & MDP_CAP_SRC_SPLIT)
962 drm_printf(p, "\tright hwmixer=%s\n", pipeline->r_mixer ?
963 pipeline->r_mixer->name : "(null)");
964
965 drm_printf(p, "\tcmd_mode=%d\n", mdp5_cstate->cmd_mode);
966}
967
968static void mdp5_crtc_reset(struct drm_crtc *crtc)
969{
970 struct mdp5_crtc_state *mdp5_cstate;
971
972 if (crtc->state) {
973 __drm_atomic_helper_crtc_destroy_state(crtc->state);
974 kfree(to_mdp5_crtc_state(crtc->state));
975 }
976
977 mdp5_cstate = kzalloc(sizeof(*mdp5_cstate), GFP_KERNEL);
978
979 if (mdp5_cstate) {
980 mdp5_cstate->base.crtc = crtc;
981 crtc->state = &mdp5_cstate->base;
982 }
983}
984
985static struct drm_crtc_state *
986mdp5_crtc_duplicate_state(struct drm_crtc *crtc)
987{
988 struct mdp5_crtc_state *mdp5_cstate;
989
990 if (WARN_ON(!crtc->state))
991 return NULL;
992
993 mdp5_cstate = kmemdup(to_mdp5_crtc_state(crtc->state),
994 sizeof(*mdp5_cstate), GFP_KERNEL);
995 if (!mdp5_cstate)
996 return NULL;
997
998 __drm_atomic_helper_crtc_duplicate_state(crtc, &mdp5_cstate->base);
999
1000 return &mdp5_cstate->base;
1001}
1002
1003static void mdp5_crtc_destroy_state(struct drm_crtc *crtc, struct drm_crtc_state *state)
1004{
1005 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(state);
1006
1007 __drm_atomic_helper_crtc_destroy_state(state);
1008
1009 kfree(mdp5_cstate);
1010}
1011
1012static const struct drm_crtc_funcs mdp5_crtc_funcs = {
1013 .set_config = drm_atomic_helper_set_config,
1014 .destroy = mdp5_crtc_destroy,
1015 .page_flip = drm_atomic_helper_page_flip,
1016 .reset = mdp5_crtc_reset,
1017 .atomic_duplicate_state = mdp5_crtc_duplicate_state,
1018 .atomic_destroy_state = mdp5_crtc_destroy_state,
1019 .cursor_set = mdp5_crtc_cursor_set,
1020 .cursor_move = mdp5_crtc_cursor_move,
1021 .atomic_print_state = mdp5_crtc_atomic_print_state,
1022};
1023
1024static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs = {
1025 .mode_set_nofb = mdp5_crtc_mode_set_nofb,
1026 .atomic_check = mdp5_crtc_atomic_check,
1027 .atomic_begin = mdp5_crtc_atomic_begin,
1028 .atomic_flush = mdp5_crtc_atomic_flush,
1029 .atomic_enable = mdp5_crtc_atomic_enable,
1030 .atomic_disable = mdp5_crtc_atomic_disable,
1031};
1032
1033static void mdp5_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus)
1034{
1035 struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, vblank);
1036 struct drm_crtc *crtc = &mdp5_crtc->base;
1037 struct msm_drm_private *priv = crtc->dev->dev_private;
1038 unsigned pending;
1039
1040 mdp_irq_unregister(&get_kms(crtc)->base, &mdp5_crtc->vblank);
1041
1042 pending = atomic_xchg(&mdp5_crtc->pending, 0);
1043
1044 if (pending & PENDING_FLIP) {
1045 complete_flip(crtc, NULL);
1046 }
1047
1048 if (pending & PENDING_CURSOR)
1049 drm_flip_work_commit(&mdp5_crtc->unref_cursor_work, priv->wq);
1050}
1051
1052static void mdp5_crtc_err_irq(struct mdp_irq *irq, uint32_t irqstatus)
1053{
1054 struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, err);
1055
1056 DBG("%s: error: %08x", mdp5_crtc->base.name, irqstatus);
1057}
1058
1059static void mdp5_crtc_pp_done_irq(struct mdp_irq *irq, uint32_t irqstatus)
1060{
1061 struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc,
1062 pp_done);
1063
1064 complete(&mdp5_crtc->pp_completion);
1065}
1066
1067static void mdp5_crtc_wait_for_pp_done(struct drm_crtc *crtc)
1068{
1069 struct drm_device *dev = crtc->dev;
1070 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
1071 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
1072 int ret;
1073
1074 ret = wait_for_completion_timeout(&mdp5_crtc->pp_completion,
1075 msecs_to_jiffies(50));
1076 if (ret == 0)
1077 dev_warn(dev->dev, "pp done time out, lm=%d\n",
1078 mdp5_cstate->pipeline.mixer->lm);
1079}
1080
1081static void mdp5_crtc_wait_for_flush_done(struct drm_crtc *crtc)
1082{
1083 struct drm_device *dev = crtc->dev;
1084 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
1085 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
1086 struct mdp5_ctl *ctl = mdp5_cstate->ctl;
1087 int ret;
1088
1089
1090 if (!ctl)
1091 return;
1092
1093 ret = drm_crtc_vblank_get(crtc);
1094 if (ret)
1095 return;
1096
1097 ret = wait_event_timeout(dev->vblank[drm_crtc_index(crtc)].queue,
1098 ((mdp5_ctl_get_commit_status(ctl) &
1099 mdp5_crtc->flushed_mask) == 0),
1100 msecs_to_jiffies(50));
1101 if (ret <= 0)
1102 dev_warn(dev->dev, "vblank time out, crtc=%d\n", mdp5_crtc->id);
1103
1104 mdp5_crtc->flushed_mask = 0;
1105
1106 drm_crtc_vblank_put(crtc);
1107}
1108
1109uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc)
1110{
1111 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
1112 return mdp5_crtc->vblank.irqmask;
1113}
1114
1115void mdp5_crtc_set_pipeline(struct drm_crtc *crtc)
1116{
1117 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
1118 struct mdp5_kms *mdp5_kms = get_kms(crtc);
1119
1120
1121 mdp_irq_update(&mdp5_kms->base);
1122
1123 mdp5_ctl_set_pipeline(mdp5_cstate->ctl, &mdp5_cstate->pipeline);
1124}
1125
1126struct mdp5_ctl *mdp5_crtc_get_ctl(struct drm_crtc *crtc)
1127{
1128 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
1129
1130 return mdp5_cstate->ctl;
1131}
1132
1133struct mdp5_hw_mixer *mdp5_crtc_get_mixer(struct drm_crtc *crtc)
1134{
1135 struct mdp5_crtc_state *mdp5_cstate;
1136
1137 if (WARN_ON(!crtc))
1138 return ERR_PTR(-EINVAL);
1139
1140 mdp5_cstate = to_mdp5_crtc_state(crtc->state);
1141
1142 return WARN_ON(!mdp5_cstate->pipeline.mixer) ?
1143 ERR_PTR(-EINVAL) : mdp5_cstate->pipeline.mixer;
1144}
1145
1146struct mdp5_pipeline *mdp5_crtc_get_pipeline(struct drm_crtc *crtc)
1147{
1148 struct mdp5_crtc_state *mdp5_cstate;
1149
1150 if (WARN_ON(!crtc))
1151 return ERR_PTR(-EINVAL);
1152
1153 mdp5_cstate = to_mdp5_crtc_state(crtc->state);
1154
1155 return &mdp5_cstate->pipeline;
1156}
1157
1158void mdp5_crtc_wait_for_commit_done(struct drm_crtc *crtc)
1159{
1160 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
1161
1162 if (mdp5_cstate->cmd_mode)
1163 mdp5_crtc_wait_for_pp_done(crtc);
1164 else
1165 mdp5_crtc_wait_for_flush_done(crtc);
1166}
1167
1168
1169struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
1170 struct drm_plane *plane,
1171 struct drm_plane *cursor_plane, int id)
1172{
1173 struct drm_crtc *crtc = NULL;
1174 struct mdp5_crtc *mdp5_crtc;
1175
1176 mdp5_crtc = kzalloc(sizeof(*mdp5_crtc), GFP_KERNEL);
1177 if (!mdp5_crtc)
1178 return ERR_PTR(-ENOMEM);
1179
1180 crtc = &mdp5_crtc->base;
1181
1182 mdp5_crtc->id = id;
1183
1184 spin_lock_init(&mdp5_crtc->lm_lock);
1185 spin_lock_init(&mdp5_crtc->cursor.lock);
1186 init_completion(&mdp5_crtc->pp_completion);
1187
1188 mdp5_crtc->vblank.irq = mdp5_crtc_vblank_irq;
1189 mdp5_crtc->err.irq = mdp5_crtc_err_irq;
1190 mdp5_crtc->pp_done.irq = mdp5_crtc_pp_done_irq;
1191
1192 mdp5_crtc->lm_cursor_enabled = cursor_plane ? false : true;
1193
1194 drm_crtc_init_with_planes(dev, crtc, plane, cursor_plane,
1195 &mdp5_crtc_funcs, NULL);
1196
1197 drm_flip_work_init(&mdp5_crtc->unref_cursor_work,
1198 "unref cursor", unref_cursor_worker);
1199
1200 drm_crtc_helper_add(crtc, &mdp5_crtc_helper_funcs);
1201 plane->crtc = crtc;
1202
1203 return crtc;
1204}
1205