1
2
3
4
5
6
7
8#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
9#include <linux/sort.h>
10#include <linux/debugfs.h>
11#include <linux/ktime.h>
12#include <linux/bits.h>
13
14#include <drm/drm_atomic.h>
15#include <drm/drm_crtc.h>
16#include <drm/drm_flip_work.h>
17#include <drm/drm_mode.h>
18#include <drm/drm_probe_helper.h>
19#include <drm/drm_rect.h>
20#include <drm/drm_vblank.h>
21
22#include "dpu_kms.h"
23#include "dpu_hw_lm.h"
24#include "dpu_hw_ctl.h"
25#include "dpu_hw_dspp.h"
26#include "dpu_crtc.h"
27#include "dpu_plane.h"
28#include "dpu_encoder.h"
29#include "dpu_vbif.h"
30#include "dpu_core_perf.h"
31#include "dpu_trace.h"
32
33#define DPU_DRM_BLEND_OP_NOT_DEFINED 0
34#define DPU_DRM_BLEND_OP_OPAQUE 1
35#define DPU_DRM_BLEND_OP_PREMULTIPLIED 2
36#define DPU_DRM_BLEND_OP_COVERAGE 3
37#define DPU_DRM_BLEND_OP_MAX 4
38
39
40#define LEFT_MIXER 0
41#define RIGHT_MIXER 1
42
43
44#define DPU_CRTC_FRAME_DONE_TIMEOUT_MS 60
45
46#define CONVERT_S3_15(val) \
47 (((((u64)val) & ~BIT_ULL(63)) >> 17) & GENMASK_ULL(17, 0))
48
49static struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc)
50{
51 struct msm_drm_private *priv = crtc->dev->dev_private;
52
53 return to_dpu_kms(priv->kms);
54}
55
56static void dpu_crtc_destroy(struct drm_crtc *crtc)
57{
58 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
59
60 DPU_DEBUG("\n");
61
62 if (!crtc)
63 return;
64
65 drm_crtc_cleanup(crtc);
66 kfree(dpu_crtc);
67}
68
69static void _dpu_crtc_setup_blend_cfg(struct dpu_crtc_mixer *mixer,
70 struct dpu_plane_state *pstate, struct dpu_format *format)
71{
72 struct dpu_hw_mixer *lm = mixer->hw_lm;
73 uint32_t blend_op;
74 struct drm_format_name_buf format_name;
75
76
77 blend_op = DPU_BLEND_FG_ALPHA_FG_CONST |
78 DPU_BLEND_BG_ALPHA_BG_CONST;
79
80 if (format->alpha_enable) {
81
82 blend_op = DPU_BLEND_FG_ALPHA_FG_PIXEL |
83 DPU_BLEND_BG_ALPHA_FG_PIXEL |
84 DPU_BLEND_BG_INV_ALPHA;
85 }
86
87 lm->ops.setup_blend_config(lm, pstate->stage,
88 0xFF, 0, blend_op);
89
90 DPU_DEBUG("format:%s, alpha_en:%u blend_op:0x%x\n",
91 drm_get_format_name(format->base.pixel_format, &format_name),
92 format->alpha_enable, blend_op);
93}
94
95static void _dpu_crtc_program_lm_output_roi(struct drm_crtc *crtc)
96{
97 struct dpu_crtc_state *crtc_state;
98 int lm_idx, lm_horiz_position;
99
100 crtc_state = to_dpu_crtc_state(crtc->state);
101
102 lm_horiz_position = 0;
103 for (lm_idx = 0; lm_idx < crtc_state->num_mixers; lm_idx++) {
104 const struct drm_rect *lm_roi = &crtc_state->lm_bounds[lm_idx];
105 struct dpu_hw_mixer *hw_lm = crtc_state->mixers[lm_idx].hw_lm;
106 struct dpu_hw_mixer_cfg cfg;
107
108 if (!lm_roi || !drm_rect_visible(lm_roi))
109 continue;
110
111 cfg.out_width = drm_rect_width(lm_roi);
112 cfg.out_height = drm_rect_height(lm_roi);
113 cfg.right_mixer = lm_horiz_position++;
114 cfg.flags = 0;
115 hw_lm->ops.setup_mixer_out(hw_lm, &cfg);
116 }
117}
118
119static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc,
120 struct dpu_crtc *dpu_crtc, struct dpu_crtc_mixer *mixer)
121{
122 struct drm_plane *plane;
123 struct drm_framebuffer *fb;
124 struct drm_plane_state *state;
125 struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
126 struct dpu_plane_state *pstate = NULL;
127 struct dpu_format *format;
128 struct dpu_hw_ctl *ctl = mixer->lm_ctl;
129 struct dpu_hw_stage_cfg *stage_cfg = &dpu_crtc->stage_cfg;
130
131 u32 flush_mask;
132 uint32_t stage_idx, lm_idx;
133 int zpos_cnt[DPU_STAGE_MAX + 1] = { 0 };
134 bool bg_alpha_enable = false;
135
136 drm_atomic_crtc_for_each_plane(plane, crtc) {
137 state = plane->state;
138 if (!state)
139 continue;
140
141 pstate = to_dpu_plane_state(state);
142 fb = state->fb;
143
144 dpu_plane_get_ctl_flush(plane, ctl, &flush_mask);
145
146 DPU_DEBUG("crtc %d stage:%d - plane %d sspp %d fb %d\n",
147 crtc->base.id,
148 pstate->stage,
149 plane->base.id,
150 dpu_plane_pipe(plane) - SSPP_VIG0,
151 state->fb ? state->fb->base.id : -1);
152
153 format = to_dpu_format(msm_framebuffer_format(pstate->base.fb));
154
155 if (pstate->stage == DPU_STAGE_BASE && format->alpha_enable)
156 bg_alpha_enable = true;
157
158 stage_idx = zpos_cnt[pstate->stage]++;
159 stage_cfg->stage[pstate->stage][stage_idx] =
160 dpu_plane_pipe(plane);
161 stage_cfg->multirect_index[pstate->stage][stage_idx] =
162 pstate->multirect_index;
163
164 trace_dpu_crtc_setup_mixer(DRMID(crtc), DRMID(plane),
165 state, pstate, stage_idx,
166 dpu_plane_pipe(plane) - SSPP_VIG0,
167 format->base.pixel_format,
168 fb ? fb->modifier : 0);
169
170
171 for (lm_idx = 0; lm_idx < cstate->num_mixers; lm_idx++) {
172 _dpu_crtc_setup_blend_cfg(mixer + lm_idx,
173 pstate, format);
174
175 mixer[lm_idx].flush_mask |= flush_mask;
176
177 if (bg_alpha_enable && !format->alpha_enable)
178 mixer[lm_idx].mixer_op_mode = 0;
179 else
180 mixer[lm_idx].mixer_op_mode |=
181 1 << pstate->stage;
182 }
183 }
184
185 _dpu_crtc_program_lm_output_roi(crtc);
186}
187
188
189
190
191
192static void _dpu_crtc_blend_setup(struct drm_crtc *crtc)
193{
194 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
195 struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
196 struct dpu_crtc_mixer *mixer = cstate->mixers;
197 struct dpu_hw_ctl *ctl;
198 struct dpu_hw_mixer *lm;
199 int i;
200
201 DPU_DEBUG("%s\n", dpu_crtc->name);
202
203 for (i = 0; i < cstate->num_mixers; i++) {
204 mixer[i].mixer_op_mode = 0;
205 mixer[i].flush_mask = 0;
206 if (mixer[i].lm_ctl->ops.clear_all_blendstages)
207 mixer[i].lm_ctl->ops.clear_all_blendstages(
208 mixer[i].lm_ctl);
209 }
210
211
212 memset(&dpu_crtc->stage_cfg, 0, sizeof(struct dpu_hw_stage_cfg));
213
214 _dpu_crtc_blend_setup_mixer(crtc, dpu_crtc, mixer);
215
216 for (i = 0; i < cstate->num_mixers; i++) {
217 ctl = mixer[i].lm_ctl;
218 lm = mixer[i].hw_lm;
219
220 lm->ops.setup_alpha_out(lm, mixer[i].mixer_op_mode);
221
222 mixer[i].flush_mask |= ctl->ops.get_bitmask_mixer(ctl,
223 mixer[i].hw_lm->idx);
224
225
226 ctl->ops.update_pending_flush(ctl, mixer[i].flush_mask);
227
228 DPU_DEBUG("lm %d, op_mode 0x%X, ctl %d, flush mask 0x%x\n",
229 mixer[i].hw_lm->idx - LM_0,
230 mixer[i].mixer_op_mode,
231 ctl->idx - CTL_0,
232 mixer[i].flush_mask);
233
234 ctl->ops.setup_blendstage(ctl, mixer[i].hw_lm->idx,
235 &dpu_crtc->stage_cfg);
236 }
237}
238
239
240
241
242
243
244
245
246
247
248static void _dpu_crtc_complete_flip(struct drm_crtc *crtc)
249{
250 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
251 struct drm_device *dev = crtc->dev;
252 unsigned long flags;
253
254 spin_lock_irqsave(&dev->event_lock, flags);
255 if (dpu_crtc->event) {
256 DRM_DEBUG_VBL("%s: send event: %pK\n", dpu_crtc->name,
257 dpu_crtc->event);
258 trace_dpu_crtc_complete_flip(DRMID(crtc));
259 drm_crtc_send_vblank_event(crtc, dpu_crtc->event);
260 dpu_crtc->event = NULL;
261 }
262 spin_unlock_irqrestore(&dev->event_lock, flags);
263}
264
265enum dpu_intf_mode dpu_crtc_get_intf_mode(struct drm_crtc *crtc)
266{
267 struct drm_encoder *encoder;
268
269
270
271
272
273
274
275
276
277
278 WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
279
280
281 drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
282 return dpu_encoder_get_intf_mode(encoder);
283
284 return INTF_MODE_NONE;
285}
286
287void dpu_crtc_vblank_callback(struct drm_crtc *crtc)
288{
289 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
290
291
292 if (ktime_compare(dpu_crtc->vblank_cb_time, ktime_set(0, 0)) == 0)
293 dpu_crtc->vblank_cb_time = ktime_get();
294 else
295 dpu_crtc->vblank_cb_count++;
296 drm_crtc_handle_vblank(crtc);
297 trace_dpu_crtc_vblank_cb(DRMID(crtc));
298}
299
300static void dpu_crtc_frame_event_work(struct kthread_work *work)
301{
302 struct dpu_crtc_frame_event *fevent = container_of(work,
303 struct dpu_crtc_frame_event, work);
304 struct drm_crtc *crtc = fevent->crtc;
305 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
306 unsigned long flags;
307 bool frame_done = false;
308
309 DPU_ATRACE_BEGIN("crtc_frame_event");
310
311 DRM_DEBUG_KMS("crtc%d event:%u ts:%lld\n", crtc->base.id, fevent->event,
312 ktime_to_ns(fevent->ts));
313
314 if (fevent->event & (DPU_ENCODER_FRAME_EVENT_DONE
315 | DPU_ENCODER_FRAME_EVENT_ERROR
316 | DPU_ENCODER_FRAME_EVENT_PANEL_DEAD)) {
317
318 if (atomic_read(&dpu_crtc->frame_pending) < 1) {
319
320 } else if (atomic_dec_return(&dpu_crtc->frame_pending) == 0) {
321
322 trace_dpu_crtc_frame_event_done(DRMID(crtc),
323 fevent->event);
324 dpu_core_perf_crtc_release_bw(crtc);
325 } else {
326 trace_dpu_crtc_frame_event_more_pending(DRMID(crtc),
327 fevent->event);
328 }
329
330 if (fevent->event & DPU_ENCODER_FRAME_EVENT_DONE)
331 dpu_core_perf_crtc_update(crtc, 0, false);
332
333 if (fevent->event & (DPU_ENCODER_FRAME_EVENT_DONE
334 | DPU_ENCODER_FRAME_EVENT_ERROR))
335 frame_done = true;
336 }
337
338 if (fevent->event & DPU_ENCODER_FRAME_EVENT_PANEL_DEAD)
339 DPU_ERROR("crtc%d ts:%lld received panel dead event\n",
340 crtc->base.id, ktime_to_ns(fevent->ts));
341
342 if (frame_done)
343 complete_all(&dpu_crtc->frame_done_comp);
344
345 spin_lock_irqsave(&dpu_crtc->spin_lock, flags);
346 list_add_tail(&fevent->list, &dpu_crtc->frame_event_list);
347 spin_unlock_irqrestore(&dpu_crtc->spin_lock, flags);
348 DPU_ATRACE_END("crtc_frame_event");
349}
350
351
352
353
354
355
356
357
358
359
360static void dpu_crtc_frame_event_cb(void *data, u32 event)
361{
362 struct drm_crtc *crtc = (struct drm_crtc *)data;
363 struct dpu_crtc *dpu_crtc;
364 struct msm_drm_private *priv;
365 struct dpu_crtc_frame_event *fevent;
366 unsigned long flags;
367 u32 crtc_id;
368
369
370 if (event & DPU_ENCODER_FRAME_EVENT_IDLE)
371 return;
372
373 dpu_crtc = to_dpu_crtc(crtc);
374 priv = crtc->dev->dev_private;
375 crtc_id = drm_crtc_index(crtc);
376
377 trace_dpu_crtc_frame_event_cb(DRMID(crtc), event);
378
379 spin_lock_irqsave(&dpu_crtc->spin_lock, flags);
380 fevent = list_first_entry_or_null(&dpu_crtc->frame_event_list,
381 struct dpu_crtc_frame_event, list);
382 if (fevent)
383 list_del_init(&fevent->list);
384 spin_unlock_irqrestore(&dpu_crtc->spin_lock, flags);
385
386 if (!fevent) {
387 DRM_ERROR_RATELIMITED("crtc%d event %d overflow\n", crtc->base.id, event);
388 return;
389 }
390
391 fevent->event = event;
392 fevent->crtc = crtc;
393 fevent->ts = ktime_get();
394 kthread_queue_work(priv->event_thread[crtc_id].worker, &fevent->work);
395}
396
397void dpu_crtc_complete_commit(struct drm_crtc *crtc)
398{
399 trace_dpu_crtc_complete_commit(DRMID(crtc));
400 _dpu_crtc_complete_flip(crtc);
401}
402
403static void _dpu_crtc_setup_lm_bounds(struct drm_crtc *crtc,
404 struct drm_crtc_state *state)
405{
406 struct dpu_crtc_state *cstate = to_dpu_crtc_state(state);
407 struct drm_display_mode *adj_mode = &state->adjusted_mode;
408 u32 crtc_split_width = adj_mode->hdisplay / cstate->num_mixers;
409 int i;
410
411 for (i = 0; i < cstate->num_mixers; i++) {
412 struct drm_rect *r = &cstate->lm_bounds[i];
413 r->x1 = crtc_split_width * i;
414 r->y1 = 0;
415 r->x2 = r->x1 + crtc_split_width;
416 r->y2 = adj_mode->vdisplay;
417
418 trace_dpu_crtc_setup_lm_bounds(DRMID(crtc), i, r);
419 }
420}
421
422static void _dpu_crtc_get_pcc_coeff(struct drm_crtc_state *state,
423 struct dpu_hw_pcc_cfg *cfg)
424{
425 struct drm_color_ctm *ctm;
426
427 memset(cfg, 0, sizeof(struct dpu_hw_pcc_cfg));
428
429 ctm = (struct drm_color_ctm *)state->ctm->data;
430
431 if (!ctm)
432 return;
433
434 cfg->r.r = CONVERT_S3_15(ctm->matrix[0]);
435 cfg->g.r = CONVERT_S3_15(ctm->matrix[1]);
436 cfg->b.r = CONVERT_S3_15(ctm->matrix[2]);
437
438 cfg->r.g = CONVERT_S3_15(ctm->matrix[3]);
439 cfg->g.g = CONVERT_S3_15(ctm->matrix[4]);
440 cfg->b.g = CONVERT_S3_15(ctm->matrix[5]);
441
442 cfg->r.b = CONVERT_S3_15(ctm->matrix[6]);
443 cfg->g.b = CONVERT_S3_15(ctm->matrix[7]);
444 cfg->b.b = CONVERT_S3_15(ctm->matrix[8]);
445}
446
447static void _dpu_crtc_setup_cp_blocks(struct drm_crtc *crtc)
448{
449 struct drm_crtc_state *state = crtc->state;
450 struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
451 struct dpu_crtc_mixer *mixer = cstate->mixers;
452 struct dpu_hw_pcc_cfg cfg;
453 struct dpu_hw_ctl *ctl;
454 struct dpu_hw_dspp *dspp;
455 int i;
456
457
458 if (!state->color_mgmt_changed)
459 return;
460
461 for (i = 0; i < cstate->num_mixers; i++) {
462 ctl = mixer[i].lm_ctl;
463 dspp = mixer[i].hw_dspp;
464
465 if (!dspp || !dspp->ops.setup_pcc)
466 continue;
467
468 if (!state->ctm) {
469 dspp->ops.setup_pcc(dspp, NULL);
470 } else {
471 _dpu_crtc_get_pcc_coeff(state, &cfg);
472 dspp->ops.setup_pcc(dspp, &cfg);
473 }
474
475 mixer[i].flush_mask |= ctl->ops.get_bitmask_dspp(ctl,
476 mixer[i].hw_dspp->idx);
477
478
479 ctl->ops.update_pending_flush(ctl, mixer[i].flush_mask);
480
481 DPU_DEBUG("lm %d, ctl %d, flush mask 0x%x\n",
482 mixer[i].hw_lm->idx - DSPP_0,
483 ctl->idx - CTL_0,
484 mixer[i].flush_mask);
485 }
486}
487
488static void dpu_crtc_atomic_begin(struct drm_crtc *crtc,
489 struct drm_atomic_state *state)
490{
491 struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
492 struct drm_encoder *encoder;
493
494 if (!crtc->state->enable) {
495 DPU_DEBUG("crtc%d -> enable %d, skip atomic_begin\n",
496 crtc->base.id, crtc->state->enable);
497 return;
498 }
499
500 DPU_DEBUG("crtc%d\n", crtc->base.id);
501
502 _dpu_crtc_setup_lm_bounds(crtc, crtc->state);
503
504
505 drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
506 dpu_encoder_trigger_kickoff_pending(encoder);
507
508
509
510
511
512
513 if (unlikely(!cstate->num_mixers))
514 return;
515
516 _dpu_crtc_blend_setup(crtc);
517
518 _dpu_crtc_setup_cp_blocks(crtc);
519
520
521
522
523
524
525
526
527}
528
529static void dpu_crtc_atomic_flush(struct drm_crtc *crtc,
530 struct drm_atomic_state *state)
531{
532 struct dpu_crtc *dpu_crtc;
533 struct drm_device *dev;
534 struct drm_plane *plane;
535 struct msm_drm_private *priv;
536 unsigned long flags;
537 struct dpu_crtc_state *cstate;
538
539 if (!crtc->state->enable) {
540 DPU_DEBUG("crtc%d -> enable %d, skip atomic_flush\n",
541 crtc->base.id, crtc->state->enable);
542 return;
543 }
544
545 DPU_DEBUG("crtc%d\n", crtc->base.id);
546
547 dpu_crtc = to_dpu_crtc(crtc);
548 cstate = to_dpu_crtc_state(crtc->state);
549 dev = crtc->dev;
550 priv = dev->dev_private;
551
552 if (crtc->index >= ARRAY_SIZE(priv->event_thread)) {
553 DPU_ERROR("invalid crtc index[%d]\n", crtc->index);
554 return;
555 }
556
557 WARN_ON(dpu_crtc->event);
558 spin_lock_irqsave(&dev->event_lock, flags);
559 dpu_crtc->event = crtc->state->event;
560 crtc->state->event = NULL;
561 spin_unlock_irqrestore(&dev->event_lock, flags);
562
563
564
565
566
567
568 if (unlikely(!cstate->num_mixers))
569 return;
570
571
572
573
574
575
576
577
578 drm_atomic_crtc_for_each_plane(plane, crtc)
579 dpu_plane_restore(plane);
580
581
582 dpu_core_perf_crtc_update(crtc, 1, false);
583
584
585
586
587
588
589 drm_atomic_crtc_for_each_plane(plane, crtc) {
590 if (dpu_crtc->smmu_state.transition_error)
591 dpu_plane_set_error(plane, true);
592 dpu_plane_flush(plane);
593 }
594
595
596}
597
598
599
600
601
602
603static void dpu_crtc_destroy_state(struct drm_crtc *crtc,
604 struct drm_crtc_state *state)
605{
606 struct dpu_crtc_state *cstate = to_dpu_crtc_state(state);
607
608 DPU_DEBUG("crtc%d\n", crtc->base.id);
609
610 __drm_atomic_helper_crtc_destroy_state(state);
611
612 kfree(cstate);
613}
614
615static int _dpu_crtc_wait_for_frame_done(struct drm_crtc *crtc)
616{
617 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
618 int ret, rc = 0;
619
620 if (!atomic_read(&dpu_crtc->frame_pending)) {
621 DPU_DEBUG("no frames pending\n");
622 return 0;
623 }
624
625 DPU_ATRACE_BEGIN("frame done completion wait");
626 ret = wait_for_completion_timeout(&dpu_crtc->frame_done_comp,
627 msecs_to_jiffies(DPU_CRTC_FRAME_DONE_TIMEOUT_MS));
628 if (!ret) {
629 DRM_ERROR("frame done wait timed out, ret:%d\n", ret);
630 rc = -ETIMEDOUT;
631 }
632 DPU_ATRACE_END("frame done completion wait");
633
634 return rc;
635}
636
637void dpu_crtc_commit_kickoff(struct drm_crtc *crtc)
638{
639 struct drm_encoder *encoder;
640 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
641 struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc);
642 struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
643
644
645
646
647
648
649 if (unlikely(!cstate->num_mixers))
650 return;
651
652 DPU_ATRACE_BEGIN("crtc_commit");
653
654
655
656
657
658 drm_for_each_encoder_mask(encoder, crtc->dev,
659 crtc->state->encoder_mask)
660 dpu_encoder_prepare_for_kickoff(encoder);
661
662 if (atomic_inc_return(&dpu_crtc->frame_pending) == 1) {
663
664 DPU_DEBUG("crtc%d first commit\n", crtc->base.id);
665 } else
666 DPU_DEBUG("crtc%d commit\n", crtc->base.id);
667
668 dpu_crtc->play_count++;
669
670 dpu_vbif_clear_errors(dpu_kms);
671
672 drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
673 dpu_encoder_kickoff(encoder);
674
675 reinit_completion(&dpu_crtc->frame_done_comp);
676 DPU_ATRACE_END("crtc_commit");
677}
678
679static void dpu_crtc_reset(struct drm_crtc *crtc)
680{
681 struct dpu_crtc_state *cstate = kzalloc(sizeof(*cstate), GFP_KERNEL);
682
683 if (crtc->state)
684 dpu_crtc_destroy_state(crtc, crtc->state);
685
686 __drm_atomic_helper_crtc_reset(crtc, &cstate->base);
687}
688
689
690
691
692
693static struct drm_crtc_state *dpu_crtc_duplicate_state(struct drm_crtc *crtc)
694{
695 struct dpu_crtc_state *cstate, *old_cstate = to_dpu_crtc_state(crtc->state);
696
697 cstate = kmemdup(old_cstate, sizeof(*old_cstate), GFP_KERNEL);
698 if (!cstate) {
699 DPU_ERROR("failed to allocate state\n");
700 return NULL;
701 }
702
703
704 __drm_atomic_helper_crtc_duplicate_state(crtc, &cstate->base);
705
706 return &cstate->base;
707}
708
709static void dpu_crtc_disable(struct drm_crtc *crtc,
710 struct drm_atomic_state *state)
711{
712 struct drm_crtc_state *old_crtc_state = drm_atomic_get_old_crtc_state(state,
713 crtc);
714 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
715 struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
716 struct drm_encoder *encoder;
717 unsigned long flags;
718 bool release_bandwidth = false;
719
720 DRM_DEBUG_KMS("crtc%d\n", crtc->base.id);
721
722
723 drm_crtc_vblank_off(crtc);
724
725 drm_for_each_encoder_mask(encoder, crtc->dev,
726 old_crtc_state->encoder_mask) {
727
728
729
730
731 if (dpu_encoder_get_intf_mode(encoder) == INTF_MODE_VIDEO)
732 release_bandwidth = true;
733 dpu_encoder_assign_crtc(encoder, NULL);
734 }
735
736
737 if (_dpu_crtc_wait_for_frame_done(crtc))
738 DPU_ERROR("crtc%d wait for frame done failed;frame_pending%d\n",
739 crtc->base.id,
740 atomic_read(&dpu_crtc->frame_pending));
741
742 trace_dpu_crtc_disable(DRMID(crtc), false, dpu_crtc);
743 dpu_crtc->enabled = false;
744
745 if (atomic_read(&dpu_crtc->frame_pending)) {
746 trace_dpu_crtc_disable_frame_pending(DRMID(crtc),
747 atomic_read(&dpu_crtc->frame_pending));
748 if (release_bandwidth)
749 dpu_core_perf_crtc_release_bw(crtc);
750 atomic_set(&dpu_crtc->frame_pending, 0);
751 }
752
753 dpu_core_perf_crtc_update(crtc, 0, true);
754
755 drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
756 dpu_encoder_register_frame_event_callback(encoder, NULL, NULL);
757
758 memset(cstate->mixers, 0, sizeof(cstate->mixers));
759 cstate->num_mixers = 0;
760
761
762 cstate->bw_control = false;
763 cstate->bw_split_vote = false;
764
765 if (crtc->state->event && !crtc->state->active) {
766 spin_lock_irqsave(&crtc->dev->event_lock, flags);
767 drm_crtc_send_vblank_event(crtc, crtc->state->event);
768 crtc->state->event = NULL;
769 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
770 }
771
772 pm_runtime_put_sync(crtc->dev->dev);
773}
774
775static void dpu_crtc_enable(struct drm_crtc *crtc,
776 struct drm_atomic_state *state)
777{
778 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
779 struct drm_encoder *encoder;
780 bool request_bandwidth = false;
781
782 pm_runtime_get_sync(crtc->dev->dev);
783
784 DRM_DEBUG_KMS("crtc%d\n", crtc->base.id);
785
786 drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask) {
787
788
789
790
791 if (dpu_encoder_get_intf_mode(encoder) == INTF_MODE_VIDEO)
792 request_bandwidth = true;
793 dpu_encoder_register_frame_event_callback(encoder,
794 dpu_crtc_frame_event_cb, (void *)crtc);
795 }
796
797 if (request_bandwidth)
798 atomic_inc(&_dpu_crtc_get_kms(crtc)->bandwidth_ref);
799
800 trace_dpu_crtc_enable(DRMID(crtc), true, dpu_crtc);
801 dpu_crtc->enabled = true;
802
803 drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
804 dpu_encoder_assign_crtc(encoder, crtc);
805
806
807 drm_crtc_vblank_on(crtc);
808}
809
810struct plane_state {
811 struct dpu_plane_state *dpu_pstate;
812 const struct drm_plane_state *drm_pstate;
813 int stage;
814 u32 pipe_id;
815};
816
817static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
818 struct drm_atomic_state *state)
819{
820 struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
821 crtc);
822 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
823 struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc_state);
824 struct plane_state *pstates;
825
826 const struct drm_plane_state *pstate;
827 struct drm_plane *plane;
828 struct drm_display_mode *mode;
829
830 int cnt = 0, rc = 0, mixer_width = 0, i, z_pos;
831
832 struct dpu_multirect_plane_states multirect_plane[DPU_STAGE_MAX * 2];
833 int multirect_count = 0;
834 const struct drm_plane_state *pipe_staged[SSPP_MAX];
835 int left_zpos_cnt = 0, right_zpos_cnt = 0;
836 struct drm_rect crtc_rect = { 0 };
837
838 pstates = kzalloc(sizeof(*pstates) * DPU_STAGE_MAX * 4, GFP_KERNEL);
839
840 if (!crtc_state->enable || !crtc_state->active) {
841 DPU_DEBUG("crtc%d -> enable %d, active %d, skip atomic_check\n",
842 crtc->base.id, crtc_state->enable,
843 crtc_state->active);
844 goto end;
845 }
846
847 mode = &crtc_state->adjusted_mode;
848 DPU_DEBUG("%s: check\n", dpu_crtc->name);
849
850
851 if (crtc_state->active_changed)
852 crtc_state->mode_changed = true;
853
854 memset(pipe_staged, 0, sizeof(pipe_staged));
855
856 if (cstate->num_mixers) {
857 mixer_width = mode->hdisplay / cstate->num_mixers;
858
859 _dpu_crtc_setup_lm_bounds(crtc, crtc_state);
860 }
861
862 crtc_rect.x2 = mode->hdisplay;
863 crtc_rect.y2 = mode->vdisplay;
864
865
866 drm_atomic_crtc_state_for_each_plane_state(plane, pstate, crtc_state) {
867 struct drm_rect dst, clip = crtc_rect;
868
869 if (IS_ERR_OR_NULL(pstate)) {
870 rc = PTR_ERR(pstate);
871 DPU_ERROR("%s: failed to get plane%d state, %d\n",
872 dpu_crtc->name, plane->base.id, rc);
873 goto end;
874 }
875 if (cnt >= DPU_STAGE_MAX * 4)
876 continue;
877
878 pstates[cnt].dpu_pstate = to_dpu_plane_state(pstate);
879 pstates[cnt].drm_pstate = pstate;
880 pstates[cnt].stage = pstate->normalized_zpos;
881 pstates[cnt].pipe_id = dpu_plane_pipe(plane);
882
883 if (pipe_staged[pstates[cnt].pipe_id]) {
884 multirect_plane[multirect_count].r0 =
885 pipe_staged[pstates[cnt].pipe_id];
886 multirect_plane[multirect_count].r1 = pstate;
887 multirect_count++;
888
889 pipe_staged[pstates[cnt].pipe_id] = NULL;
890 } else {
891 pipe_staged[pstates[cnt].pipe_id] = pstate;
892 }
893
894 cnt++;
895
896 dst = drm_plane_state_dest(pstate);
897 if (!drm_rect_intersect(&clip, &dst)) {
898 DPU_ERROR("invalid vertical/horizontal destination\n");
899 DPU_ERROR("display: " DRM_RECT_FMT " plane: "
900 DRM_RECT_FMT "\n", DRM_RECT_ARG(&crtc_rect),
901 DRM_RECT_ARG(&dst));
902 rc = -E2BIG;
903 goto end;
904 }
905 }
906
907 for (i = 1; i < SSPP_MAX; i++) {
908 if (pipe_staged[i]) {
909 dpu_plane_clear_multirect(pipe_staged[i]);
910
911 if (is_dpu_plane_virtual(pipe_staged[i]->plane)) {
912 DPU_ERROR(
913 "r1 only virt plane:%d not supported\n",
914 pipe_staged[i]->plane->base.id);
915 rc = -EINVAL;
916 goto end;
917 }
918 }
919 }
920
921 z_pos = -1;
922 for (i = 0; i < cnt; i++) {
923
924 if (pstates[i].stage != z_pos) {
925 left_zpos_cnt = 0;
926 right_zpos_cnt = 0;
927 z_pos = pstates[i].stage;
928 }
929
930
931 if (z_pos >= DPU_STAGE_MAX - DPU_STAGE_0) {
932 DPU_ERROR("> %d plane stages assigned\n",
933 DPU_STAGE_MAX - DPU_STAGE_0);
934 rc = -EINVAL;
935 goto end;
936 } else if (pstates[i].drm_pstate->crtc_x < mixer_width) {
937 if (left_zpos_cnt == 2) {
938 DPU_ERROR("> 2 planes @ stage %d on left\n",
939 z_pos);
940 rc = -EINVAL;
941 goto end;
942 }
943 left_zpos_cnt++;
944
945 } else {
946 if (right_zpos_cnt == 2) {
947 DPU_ERROR("> 2 planes @ stage %d on right\n",
948 z_pos);
949 rc = -EINVAL;
950 goto end;
951 }
952 right_zpos_cnt++;
953 }
954
955 pstates[i].dpu_pstate->stage = z_pos + DPU_STAGE_0;
956 DPU_DEBUG("%s: zpos %d\n", dpu_crtc->name, z_pos);
957 }
958
959 for (i = 0; i < multirect_count; i++) {
960 if (dpu_plane_validate_multirect_v2(&multirect_plane[i])) {
961 DPU_ERROR(
962 "multirect validation failed for planes (%d - %d)\n",
963 multirect_plane[i].r0->plane->base.id,
964 multirect_plane[i].r1->plane->base.id);
965 rc = -EINVAL;
966 goto end;
967 }
968 }
969
970 atomic_inc(&_dpu_crtc_get_kms(crtc)->bandwidth_ref);
971
972 rc = dpu_core_perf_crtc_check(crtc, crtc_state);
973 if (rc) {
974 DPU_ERROR("crtc%d failed performance check %d\n",
975 crtc->base.id, rc);
976 goto end;
977 }
978
979
980
981
982
983
984 for (i = 1; i < cnt; i++) {
985 struct plane_state *prv_pstate, *cur_pstate;
986 struct drm_rect left_rect, right_rect;
987 int32_t left_pid, right_pid;
988 int32_t stage;
989
990 prv_pstate = &pstates[i - 1];
991 cur_pstate = &pstates[i];
992 if (prv_pstate->stage != cur_pstate->stage)
993 continue;
994
995 stage = cur_pstate->stage;
996
997 left_pid = prv_pstate->dpu_pstate->base.plane->base.id;
998 left_rect = drm_plane_state_dest(prv_pstate->drm_pstate);
999
1000 right_pid = cur_pstate->dpu_pstate->base.plane->base.id;
1001 right_rect = drm_plane_state_dest(cur_pstate->drm_pstate);
1002
1003 if (right_rect.x1 < left_rect.x1) {
1004 swap(left_pid, right_pid);
1005 swap(left_rect, right_rect);
1006 }
1007
1008
1009
1010
1011
1012
1013
1014
1015 if (right_pid < left_pid) {
1016 DPU_ERROR(
1017 "invalid src split cfg. priority mismatch. stage: %d left: %d right: %d\n",
1018 stage, left_pid, right_pid);
1019 rc = -EINVAL;
1020 goto end;
1021 } else if (right_rect.x1 != drm_rect_width(&left_rect)) {
1022 DPU_ERROR("non-contiguous coordinates for src split. "
1023 "stage: %d left: " DRM_RECT_FMT " right: "
1024 DRM_RECT_FMT "\n", stage,
1025 DRM_RECT_ARG(&left_rect),
1026 DRM_RECT_ARG(&right_rect));
1027 rc = -EINVAL;
1028 goto end;
1029 } else if (left_rect.y1 != right_rect.y1 ||
1030 drm_rect_height(&left_rect) != drm_rect_height(&right_rect)) {
1031 DPU_ERROR("source split at stage: %d. invalid "
1032 "yoff/height: left: " DRM_RECT_FMT " right: "
1033 DRM_RECT_FMT "\n", stage,
1034 DRM_RECT_ARG(&left_rect),
1035 DRM_RECT_ARG(&right_rect));
1036 rc = -EINVAL;
1037 goto end;
1038 }
1039 }
1040
1041end:
1042 kfree(pstates);
1043 return rc;
1044}
1045
1046int dpu_crtc_vblank(struct drm_crtc *crtc, bool en)
1047{
1048 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
1049 struct drm_encoder *enc;
1050
1051 trace_dpu_crtc_vblank(DRMID(&dpu_crtc->base), en, dpu_crtc);
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068 list_for_each_entry(enc, &crtc->dev->mode_config.encoder_list, head) {
1069 trace_dpu_crtc_vblank_enable(DRMID(crtc), DRMID(enc), en,
1070 dpu_crtc);
1071
1072 dpu_encoder_toggle_vblank_for_crtc(enc, crtc, en);
1073 }
1074
1075 return 0;
1076}
1077
1078#ifdef CONFIG_DEBUG_FS
1079static int _dpu_debugfs_status_show(struct seq_file *s, void *data)
1080{
1081 struct dpu_crtc *dpu_crtc;
1082 struct dpu_plane_state *pstate = NULL;
1083 struct dpu_crtc_mixer *m;
1084
1085 struct drm_crtc *crtc;
1086 struct drm_plane *plane;
1087 struct drm_display_mode *mode;
1088 struct drm_framebuffer *fb;
1089 struct drm_plane_state *state;
1090 struct dpu_crtc_state *cstate;
1091
1092 int i, out_width;
1093
1094 dpu_crtc = s->private;
1095 crtc = &dpu_crtc->base;
1096
1097 drm_modeset_lock_all(crtc->dev);
1098 cstate = to_dpu_crtc_state(crtc->state);
1099
1100 mode = &crtc->state->adjusted_mode;
1101 out_width = mode->hdisplay / cstate->num_mixers;
1102
1103 seq_printf(s, "crtc:%d width:%d height:%d\n", crtc->base.id,
1104 mode->hdisplay, mode->vdisplay);
1105
1106 seq_puts(s, "\n");
1107
1108 for (i = 0; i < cstate->num_mixers; ++i) {
1109 m = &cstate->mixers[i];
1110 seq_printf(s, "\tmixer:%d ctl:%d width:%d height:%d\n",
1111 m->hw_lm->idx - LM_0, m->lm_ctl->idx - CTL_0,
1112 out_width, mode->vdisplay);
1113 }
1114
1115 seq_puts(s, "\n");
1116
1117 drm_atomic_crtc_for_each_plane(plane, crtc) {
1118 pstate = to_dpu_plane_state(plane->state);
1119 state = plane->state;
1120
1121 if (!pstate || !state)
1122 continue;
1123
1124 seq_printf(s, "\tplane:%u stage:%d\n", plane->base.id,
1125 pstate->stage);
1126
1127 if (plane->state->fb) {
1128 fb = plane->state->fb;
1129
1130 seq_printf(s, "\tfb:%d image format:%4.4s wxh:%ux%u ",
1131 fb->base.id, (char *) &fb->format->format,
1132 fb->width, fb->height);
1133 for (i = 0; i < ARRAY_SIZE(fb->format->cpp); ++i)
1134 seq_printf(s, "cpp[%d]:%u ",
1135 i, fb->format->cpp[i]);
1136 seq_puts(s, "\n\t");
1137
1138 seq_printf(s, "modifier:%8llu ", fb->modifier);
1139 seq_puts(s, "\n");
1140
1141 seq_puts(s, "\t");
1142 for (i = 0; i < ARRAY_SIZE(fb->pitches); i++)
1143 seq_printf(s, "pitches[%d]:%8u ", i,
1144 fb->pitches[i]);
1145 seq_puts(s, "\n");
1146
1147 seq_puts(s, "\t");
1148 for (i = 0; i < ARRAY_SIZE(fb->offsets); i++)
1149 seq_printf(s, "offsets[%d]:%8u ", i,
1150 fb->offsets[i]);
1151 seq_puts(s, "\n");
1152 }
1153
1154 seq_printf(s, "\tsrc_x:%4d src_y:%4d src_w:%4d src_h:%4d\n",
1155 state->src_x, state->src_y, state->src_w, state->src_h);
1156
1157 seq_printf(s, "\tdst x:%4d dst_y:%4d dst_w:%4d dst_h:%4d\n",
1158 state->crtc_x, state->crtc_y, state->crtc_w,
1159 state->crtc_h);
1160 seq_printf(s, "\tmultirect: mode: %d index: %d\n",
1161 pstate->multirect_mode, pstate->multirect_index);
1162
1163 seq_puts(s, "\n");
1164 }
1165 if (dpu_crtc->vblank_cb_count) {
1166 ktime_t diff = ktime_sub(ktime_get(), dpu_crtc->vblank_cb_time);
1167 s64 diff_ms = ktime_to_ms(diff);
1168 s64 fps = diff_ms ? div_s64(
1169 dpu_crtc->vblank_cb_count * 1000, diff_ms) : 0;
1170
1171 seq_printf(s,
1172 "vblank fps:%lld count:%u total:%llums total_framecount:%llu\n",
1173 fps, dpu_crtc->vblank_cb_count,
1174 ktime_to_ms(diff), dpu_crtc->play_count);
1175
1176
1177 dpu_crtc->vblank_cb_count = 0;
1178 dpu_crtc->vblank_cb_time = ktime_set(0, 0);
1179 }
1180
1181 drm_modeset_unlock_all(crtc->dev);
1182
1183 return 0;
1184}
1185
1186DEFINE_SHOW_ATTRIBUTE(_dpu_debugfs_status);
1187
1188static int dpu_crtc_debugfs_state_show(struct seq_file *s, void *v)
1189{
1190 struct drm_crtc *crtc = (struct drm_crtc *) s->private;
1191 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
1192
1193 seq_printf(s, "client type: %d\n", dpu_crtc_get_client_type(crtc));
1194 seq_printf(s, "intf_mode: %d\n", dpu_crtc_get_intf_mode(crtc));
1195 seq_printf(s, "core_clk_rate: %llu\n",
1196 dpu_crtc->cur_perf.core_clk_rate);
1197 seq_printf(s, "bw_ctl: %llu\n", dpu_crtc->cur_perf.bw_ctl);
1198 seq_printf(s, "max_per_pipe_ib: %llu\n",
1199 dpu_crtc->cur_perf.max_per_pipe_ib);
1200
1201 return 0;
1202}
1203DEFINE_SHOW_ATTRIBUTE(dpu_crtc_debugfs_state);
1204
1205static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc)
1206{
1207 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
1208
1209 dpu_crtc->debugfs_root = debugfs_create_dir(dpu_crtc->name,
1210 crtc->dev->primary->debugfs_root);
1211
1212 debugfs_create_file("status", 0400,
1213 dpu_crtc->debugfs_root,
1214 dpu_crtc, &_dpu_debugfs_status_fops);
1215 debugfs_create_file("state", 0600,
1216 dpu_crtc->debugfs_root,
1217 &dpu_crtc->base,
1218 &dpu_crtc_debugfs_state_fops);
1219
1220 return 0;
1221}
1222#else
1223static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc)
1224{
1225 return 0;
1226}
1227#endif
1228
1229static int dpu_crtc_late_register(struct drm_crtc *crtc)
1230{
1231 return _dpu_crtc_init_debugfs(crtc);
1232}
1233
1234static void dpu_crtc_early_unregister(struct drm_crtc *crtc)
1235{
1236 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
1237
1238 debugfs_remove_recursive(dpu_crtc->debugfs_root);
1239}
1240
1241static const struct drm_crtc_funcs dpu_crtc_funcs = {
1242 .set_config = drm_atomic_helper_set_config,
1243 .destroy = dpu_crtc_destroy,
1244 .page_flip = drm_atomic_helper_page_flip,
1245 .reset = dpu_crtc_reset,
1246 .atomic_duplicate_state = dpu_crtc_duplicate_state,
1247 .atomic_destroy_state = dpu_crtc_destroy_state,
1248 .late_register = dpu_crtc_late_register,
1249 .early_unregister = dpu_crtc_early_unregister,
1250 .enable_vblank = msm_crtc_enable_vblank,
1251 .disable_vblank = msm_crtc_disable_vblank,
1252};
1253
1254static const struct drm_crtc_helper_funcs dpu_crtc_helper_funcs = {
1255 .atomic_disable = dpu_crtc_disable,
1256 .atomic_enable = dpu_crtc_enable,
1257 .atomic_check = dpu_crtc_atomic_check,
1258 .atomic_begin = dpu_crtc_atomic_begin,
1259 .atomic_flush = dpu_crtc_atomic_flush,
1260};
1261
1262
1263struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane,
1264 struct drm_plane *cursor)
1265{
1266 struct drm_crtc *crtc = NULL;
1267 struct dpu_crtc *dpu_crtc = NULL;
1268 int i;
1269
1270 dpu_crtc = kzalloc(sizeof(*dpu_crtc), GFP_KERNEL);
1271 if (!dpu_crtc)
1272 return ERR_PTR(-ENOMEM);
1273
1274 crtc = &dpu_crtc->base;
1275 crtc->dev = dev;
1276
1277 spin_lock_init(&dpu_crtc->spin_lock);
1278 atomic_set(&dpu_crtc->frame_pending, 0);
1279
1280 init_completion(&dpu_crtc->frame_done_comp);
1281
1282 INIT_LIST_HEAD(&dpu_crtc->frame_event_list);
1283
1284 for (i = 0; i < ARRAY_SIZE(dpu_crtc->frame_events); i++) {
1285 INIT_LIST_HEAD(&dpu_crtc->frame_events[i].list);
1286 list_add(&dpu_crtc->frame_events[i].list,
1287 &dpu_crtc->frame_event_list);
1288 kthread_init_work(&dpu_crtc->frame_events[i].work,
1289 dpu_crtc_frame_event_work);
1290 }
1291
1292 drm_crtc_init_with_planes(dev, crtc, plane, cursor, &dpu_crtc_funcs,
1293 NULL);
1294
1295 drm_crtc_helper_add(crtc, &dpu_crtc_helper_funcs);
1296
1297 drm_crtc_enable_color_mgmt(crtc, 0, true, 0);
1298
1299
1300 snprintf(dpu_crtc->name, DPU_CRTC_NAME_SIZE, "crtc%u", crtc->base.id);
1301
1302
1303 spin_lock_init(&dpu_crtc->event_lock);
1304
1305 DPU_DEBUG("%s: successfully initialized crtc\n", dpu_crtc->name);
1306 return crtc;
1307}
1308