1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21#include <drm/drm_atomic.h>
22#include <drm/drm_atomic_helper.h>
23#include <drm/drm_fb_cma_helper.h>
24#include <drm/drm_plane_helper.h>
25
26#include "vc4_drv.h"
27#include "vc4_regs.h"
28
29enum vc4_scaling_mode {
30 VC4_SCALING_NONE,
31 VC4_SCALING_TPZ,
32 VC4_SCALING_PPF,
33};
34
35struct vc4_plane_state {
36 struct drm_plane_state base;
37
38
39
40 u32 *dlist;
41 u32 dlist_size;
42 u32 dlist_count;
43
44
45
46
47 u32 pos0_offset;
48 u32 pos2_offset;
49 u32 ptr0_offset;
50
51
52
53
54 u32 __iomem *hw_dlist;
55
56
57 int crtc_x, crtc_y, crtc_w, crtc_h;
58
59 u32 src_x, src_y;
60
61 u32 src_w[2], src_h[2];
62
63
64 enum vc4_scaling_mode x_scaling[2], y_scaling[2];
65 bool is_unity;
66 bool is_yuv;
67
68
69
70
71 u32 offsets[3];
72
73
74 struct drm_mm_node lbm;
75};
76
77static inline struct vc4_plane_state *
78to_vc4_plane_state(struct drm_plane_state *state)
79{
80 return (struct vc4_plane_state *)state;
81}
82
83static const struct hvs_format {
84 u32 drm;
85 u32 hvs;
86 u32 pixel_order;
87 bool has_alpha;
88 bool flip_cbcr;
89} hvs_formats[] = {
90 {
91 .drm = DRM_FORMAT_XRGB8888, .hvs = HVS_PIXEL_FORMAT_RGBA8888,
92 .pixel_order = HVS_PIXEL_ORDER_ABGR, .has_alpha = false,
93 },
94 {
95 .drm = DRM_FORMAT_ARGB8888, .hvs = HVS_PIXEL_FORMAT_RGBA8888,
96 .pixel_order = HVS_PIXEL_ORDER_ABGR, .has_alpha = true,
97 },
98 {
99 .drm = DRM_FORMAT_ABGR8888, .hvs = HVS_PIXEL_FORMAT_RGBA8888,
100 .pixel_order = HVS_PIXEL_ORDER_ARGB, .has_alpha = true,
101 },
102 {
103 .drm = DRM_FORMAT_XBGR8888, .hvs = HVS_PIXEL_FORMAT_RGBA8888,
104 .pixel_order = HVS_PIXEL_ORDER_ARGB, .has_alpha = false,
105 },
106 {
107 .drm = DRM_FORMAT_RGB565, .hvs = HVS_PIXEL_FORMAT_RGB565,
108 .pixel_order = HVS_PIXEL_ORDER_XRGB, .has_alpha = false,
109 },
110 {
111 .drm = DRM_FORMAT_BGR565, .hvs = HVS_PIXEL_FORMAT_RGB565,
112 .pixel_order = HVS_PIXEL_ORDER_XBGR, .has_alpha = false,
113 },
114 {
115 .drm = DRM_FORMAT_ARGB1555, .hvs = HVS_PIXEL_FORMAT_RGBA5551,
116 .pixel_order = HVS_PIXEL_ORDER_ABGR, .has_alpha = true,
117 },
118 {
119 .drm = DRM_FORMAT_XRGB1555, .hvs = HVS_PIXEL_FORMAT_RGBA5551,
120 .pixel_order = HVS_PIXEL_ORDER_ABGR, .has_alpha = false,
121 },
122 {
123 .drm = DRM_FORMAT_YUV422,
124 .hvs = HVS_PIXEL_FORMAT_YCBCR_YUV422_3PLANE,
125 },
126 {
127 .drm = DRM_FORMAT_YVU422,
128 .hvs = HVS_PIXEL_FORMAT_YCBCR_YUV422_3PLANE,
129 .flip_cbcr = true,
130 },
131 {
132 .drm = DRM_FORMAT_YUV420,
133 .hvs = HVS_PIXEL_FORMAT_YCBCR_YUV420_3PLANE,
134 },
135 {
136 .drm = DRM_FORMAT_YVU420,
137 .hvs = HVS_PIXEL_FORMAT_YCBCR_YUV420_3PLANE,
138 .flip_cbcr = true,
139 },
140 {
141 .drm = DRM_FORMAT_NV12,
142 .hvs = HVS_PIXEL_FORMAT_YCBCR_YUV420_2PLANE,
143 },
144 {
145 .drm = DRM_FORMAT_NV16,
146 .hvs = HVS_PIXEL_FORMAT_YCBCR_YUV422_2PLANE,
147 },
148};
149
150static const struct hvs_format *vc4_get_hvs_format(u32 drm_format)
151{
152 unsigned i;
153
154 for (i = 0; i < ARRAY_SIZE(hvs_formats); i++) {
155 if (hvs_formats[i].drm == drm_format)
156 return &hvs_formats[i];
157 }
158
159 return NULL;
160}
161
162static enum vc4_scaling_mode vc4_get_scaling_mode(u32 src, u32 dst)
163{
164 if (dst > src)
165 return VC4_SCALING_PPF;
166 else if (dst < src)
167 return VC4_SCALING_TPZ;
168 else
169 return VC4_SCALING_NONE;
170}
171
172static bool plane_enabled(struct drm_plane_state *state)
173{
174 return state->fb && state->crtc;
175}
176
177static struct drm_plane_state *vc4_plane_duplicate_state(struct drm_plane *plane)
178{
179 struct vc4_plane_state *vc4_state;
180
181 if (WARN_ON(!plane->state))
182 return NULL;
183
184 vc4_state = kmemdup(plane->state, sizeof(*vc4_state), GFP_KERNEL);
185 if (!vc4_state)
186 return NULL;
187
188 memset(&vc4_state->lbm, 0, sizeof(vc4_state->lbm));
189
190 __drm_atomic_helper_plane_duplicate_state(plane, &vc4_state->base);
191
192 if (vc4_state->dlist) {
193 vc4_state->dlist = kmemdup(vc4_state->dlist,
194 vc4_state->dlist_count * 4,
195 GFP_KERNEL);
196 if (!vc4_state->dlist) {
197 kfree(vc4_state);
198 return NULL;
199 }
200 vc4_state->dlist_size = vc4_state->dlist_count;
201 }
202
203 return &vc4_state->base;
204}
205
206static void vc4_plane_destroy_state(struct drm_plane *plane,
207 struct drm_plane_state *state)
208{
209 struct vc4_dev *vc4 = to_vc4_dev(plane->dev);
210 struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
211
212 if (vc4_state->lbm.allocated) {
213 unsigned long irqflags;
214
215 spin_lock_irqsave(&vc4->hvs->mm_lock, irqflags);
216 drm_mm_remove_node(&vc4_state->lbm);
217 spin_unlock_irqrestore(&vc4->hvs->mm_lock, irqflags);
218 }
219
220 kfree(vc4_state->dlist);
221 __drm_atomic_helper_plane_destroy_state(&vc4_state->base);
222 kfree(state);
223}
224
225
226static void vc4_plane_reset(struct drm_plane *plane)
227{
228 struct vc4_plane_state *vc4_state;
229
230 WARN_ON(plane->state);
231
232 vc4_state = kzalloc(sizeof(*vc4_state), GFP_KERNEL);
233 if (!vc4_state)
234 return;
235
236 plane->state = &vc4_state->base;
237 vc4_state->base.plane = plane;
238}
239
240static void vc4_dlist_write(struct vc4_plane_state *vc4_state, u32 val)
241{
242 if (vc4_state->dlist_count == vc4_state->dlist_size) {
243 u32 new_size = max(4u, vc4_state->dlist_count * 2);
244 u32 *new_dlist = kmalloc(new_size * 4, GFP_KERNEL);
245
246 if (!new_dlist)
247 return;
248 memcpy(new_dlist, vc4_state->dlist, vc4_state->dlist_count * 4);
249
250 kfree(vc4_state->dlist);
251 vc4_state->dlist = new_dlist;
252 vc4_state->dlist_size = new_size;
253 }
254
255 vc4_state->dlist[vc4_state->dlist_count++] = val;
256}
257
258
259
260
261
262
263static u32 vc4_get_scl_field(struct drm_plane_state *state, int plane)
264{
265 struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
266
267 switch (vc4_state->x_scaling[plane] << 2 | vc4_state->y_scaling[plane]) {
268 case VC4_SCALING_PPF << 2 | VC4_SCALING_PPF:
269 return SCALER_CTL0_SCL_H_PPF_V_PPF;
270 case VC4_SCALING_TPZ << 2 | VC4_SCALING_PPF:
271 return SCALER_CTL0_SCL_H_TPZ_V_PPF;
272 case VC4_SCALING_PPF << 2 | VC4_SCALING_TPZ:
273 return SCALER_CTL0_SCL_H_PPF_V_TPZ;
274 case VC4_SCALING_TPZ << 2 | VC4_SCALING_TPZ:
275 return SCALER_CTL0_SCL_H_TPZ_V_TPZ;
276 case VC4_SCALING_PPF << 2 | VC4_SCALING_NONE:
277 return SCALER_CTL0_SCL_H_PPF_V_NONE;
278 case VC4_SCALING_NONE << 2 | VC4_SCALING_PPF:
279 return SCALER_CTL0_SCL_H_NONE_V_PPF;
280 case VC4_SCALING_NONE << 2 | VC4_SCALING_TPZ:
281 return SCALER_CTL0_SCL_H_NONE_V_TPZ;
282 case VC4_SCALING_TPZ << 2 | VC4_SCALING_NONE:
283 return SCALER_CTL0_SCL_H_TPZ_V_NONE;
284 default:
285 case VC4_SCALING_NONE << 2 | VC4_SCALING_NONE:
286
287
288
289 return 0;
290 }
291}
292
293static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state)
294{
295 struct drm_plane *plane = state->plane;
296 struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
297 struct drm_framebuffer *fb = state->fb;
298 struct drm_gem_cma_object *bo = drm_fb_cma_get_gem_obj(fb, 0);
299 u32 subpixel_src_mask = (1 << 16) - 1;
300 u32 format = fb->format->format;
301 int num_planes = fb->format->num_planes;
302 u32 h_subsample = 1;
303 u32 v_subsample = 1;
304 int i;
305
306 for (i = 0; i < num_planes; i++)
307 vc4_state->offsets[i] = bo->paddr + fb->offsets[i];
308
309
310 if ((state->src_x & subpixel_src_mask) ||
311 (state->src_y & subpixel_src_mask) ||
312 (state->src_w & subpixel_src_mask) ||
313 (state->src_h & subpixel_src_mask)) {
314 return -EINVAL;
315 }
316
317 vc4_state->src_x = state->src_x >> 16;
318 vc4_state->src_y = state->src_y >> 16;
319 vc4_state->src_w[0] = state->src_w >> 16;
320 vc4_state->src_h[0] = state->src_h >> 16;
321
322 vc4_state->crtc_x = state->crtc_x;
323 vc4_state->crtc_y = state->crtc_y;
324 vc4_state->crtc_w = state->crtc_w;
325 vc4_state->crtc_h = state->crtc_h;
326
327 vc4_state->x_scaling[0] = vc4_get_scaling_mode(vc4_state->src_w[0],
328 vc4_state->crtc_w);
329 vc4_state->y_scaling[0] = vc4_get_scaling_mode(vc4_state->src_h[0],
330 vc4_state->crtc_h);
331
332 if (num_planes > 1) {
333 vc4_state->is_yuv = true;
334
335 h_subsample = drm_format_horz_chroma_subsampling(format);
336 v_subsample = drm_format_vert_chroma_subsampling(format);
337 vc4_state->src_w[1] = vc4_state->src_w[0] / h_subsample;
338 vc4_state->src_h[1] = vc4_state->src_h[0] / v_subsample;
339
340 vc4_state->x_scaling[1] =
341 vc4_get_scaling_mode(vc4_state->src_w[1],
342 vc4_state->crtc_w);
343 vc4_state->y_scaling[1] =
344 vc4_get_scaling_mode(vc4_state->src_h[1],
345 vc4_state->crtc_h);
346
347
348
349
350
351 if (vc4_state->x_scaling[0] == VC4_SCALING_NONE)
352 vc4_state->x_scaling[0] = VC4_SCALING_TPZ;
353 if (vc4_state->y_scaling[0] == VC4_SCALING_NONE)
354 vc4_state->y_scaling[0] = VC4_SCALING_TPZ;
355 }
356
357 vc4_state->is_unity = (vc4_state->x_scaling[0] == VC4_SCALING_NONE &&
358 vc4_state->y_scaling[0] == VC4_SCALING_NONE &&
359 vc4_state->x_scaling[1] == VC4_SCALING_NONE &&
360 vc4_state->y_scaling[1] == VC4_SCALING_NONE);
361
362
363
364
365
366 if (plane->type == DRM_PLANE_TYPE_CURSOR && !vc4_state->is_unity)
367 return -EINVAL;
368
369
370
371
372 if (vc4_state->crtc_x < 0) {
373 for (i = 0; i < num_planes; i++) {
374 u32 cpp = fb->format->cpp[i];
375 u32 subs = ((i == 0) ? 1 : h_subsample);
376
377 vc4_state->offsets[i] += (cpp *
378 (-vc4_state->crtc_x) / subs);
379 }
380 vc4_state->src_w[0] += vc4_state->crtc_x;
381 vc4_state->src_w[1] += vc4_state->crtc_x / h_subsample;
382 vc4_state->crtc_x = 0;
383 }
384
385 if (vc4_state->crtc_y < 0) {
386 for (i = 0; i < num_planes; i++) {
387 u32 subs = ((i == 0) ? 1 : v_subsample);
388
389 vc4_state->offsets[i] += (fb->pitches[i] *
390 (-vc4_state->crtc_y) / subs);
391 }
392 vc4_state->src_h[0] += vc4_state->crtc_y;
393 vc4_state->src_h[1] += vc4_state->crtc_y / v_subsample;
394 vc4_state->crtc_y = 0;
395 }
396
397 return 0;
398}
399
400static void vc4_write_tpz(struct vc4_plane_state *vc4_state, u32 src, u32 dst)
401{
402 u32 scale, recip;
403
404 scale = (1 << 16) * src / dst;
405
406
407
408
409 recip = ~0 / scale;
410
411 vc4_dlist_write(vc4_state,
412 VC4_SET_FIELD(scale, SCALER_TPZ0_SCALE) |
413 VC4_SET_FIELD(0, SCALER_TPZ0_IPHASE));
414 vc4_dlist_write(vc4_state,
415 VC4_SET_FIELD(recip, SCALER_TPZ1_RECIP));
416}
417
418static void vc4_write_ppf(struct vc4_plane_state *vc4_state, u32 src, u32 dst)
419{
420 u32 scale = (1 << 16) * src / dst;
421
422 vc4_dlist_write(vc4_state,
423 SCALER_PPF_AGC |
424 VC4_SET_FIELD(scale, SCALER_PPF_SCALE) |
425 VC4_SET_FIELD(0, SCALER_PPF_IPHASE));
426}
427
428static u32 vc4_lbm_size(struct drm_plane_state *state)
429{
430 struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
431
432
433
434 u32 pix_per_line = max(vc4_state->src_w[0], (u32)vc4_state->crtc_w);
435 u32 lbm;
436
437 if (!vc4_state->is_yuv) {
438 if (vc4_state->is_unity)
439 return 0;
440 else if (vc4_state->y_scaling[0] == VC4_SCALING_TPZ)
441 lbm = pix_per_line * 8;
442 else {
443
444 lbm = pix_per_line * 16;
445 }
446 } else {
447
448
449
450
451 lbm = pix_per_line * 16;
452 }
453
454 lbm = roundup(lbm, 32);
455
456 return lbm;
457}
458
459static void vc4_write_scaling_parameters(struct drm_plane_state *state,
460 int channel)
461{
462 struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
463
464
465 if (vc4_state->x_scaling[channel] == VC4_SCALING_PPF) {
466 vc4_write_ppf(vc4_state,
467 vc4_state->src_w[channel], vc4_state->crtc_w);
468 }
469
470
471 if (vc4_state->y_scaling[channel] == VC4_SCALING_PPF) {
472 vc4_write_ppf(vc4_state,
473 vc4_state->src_h[channel], vc4_state->crtc_h);
474 vc4_dlist_write(vc4_state, 0xc0c0c0c0);
475 }
476
477
478 if (vc4_state->x_scaling[channel] == VC4_SCALING_TPZ) {
479 vc4_write_tpz(vc4_state,
480 vc4_state->src_w[channel], vc4_state->crtc_w);
481 }
482
483
484 if (vc4_state->y_scaling[channel] == VC4_SCALING_TPZ) {
485 vc4_write_tpz(vc4_state,
486 vc4_state->src_h[channel], vc4_state->crtc_h);
487 vc4_dlist_write(vc4_state, 0xc0c0c0c0);
488 }
489}
490
491
492
493
494static int vc4_plane_mode_set(struct drm_plane *plane,
495 struct drm_plane_state *state)
496{
497 struct vc4_dev *vc4 = to_vc4_dev(plane->dev);
498 struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
499 struct drm_framebuffer *fb = state->fb;
500 u32 ctl0_offset = vc4_state->dlist_count;
501 const struct hvs_format *format = vc4_get_hvs_format(fb->format->format);
502 int num_planes = drm_format_num_planes(format->drm);
503 u32 scl0, scl1, pitch0;
504 u32 lbm_size, tiling;
505 unsigned long irqflags;
506 int ret, i;
507
508 ret = vc4_plane_setup_clipping_and_scaling(state);
509 if (ret)
510 return ret;
511
512
513
514
515 lbm_size = vc4_lbm_size(state);
516 if (lbm_size) {
517 if (!vc4_state->lbm.allocated) {
518 spin_lock_irqsave(&vc4->hvs->mm_lock, irqflags);
519 ret = drm_mm_insert_node_generic(&vc4->hvs->lbm_mm,
520 &vc4_state->lbm,
521 lbm_size, 32, 0, 0);
522 spin_unlock_irqrestore(&vc4->hvs->mm_lock, irqflags);
523 } else {
524 WARN_ON_ONCE(lbm_size != vc4_state->lbm.size);
525 }
526 }
527
528 if (ret)
529 return ret;
530
531
532
533
534
535
536
537 if (num_planes == 1) {
538 scl0 = vc4_get_scl_field(state, 1);
539 scl1 = scl0;
540 } else {
541 scl0 = vc4_get_scl_field(state, 1);
542 scl1 = vc4_get_scl_field(state, 0);
543 }
544
545 switch (fb->modifier) {
546 case DRM_FORMAT_MOD_LINEAR:
547 tiling = SCALER_CTL0_TILING_LINEAR;
548 pitch0 = VC4_SET_FIELD(fb->pitches[0], SCALER_SRC_PITCH);
549 break;
550 case DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED:
551 tiling = SCALER_CTL0_TILING_256B_OR_T;
552
553 pitch0 = (VC4_SET_FIELD(0, SCALER_PITCH0_TILE_Y_OFFSET),
554 VC4_SET_FIELD(0, SCALER_PITCH0_TILE_WIDTH_L),
555 VC4_SET_FIELD((vc4_state->src_w[0] + 31) >> 5,
556 SCALER_PITCH0_TILE_WIDTH_R));
557 break;
558 default:
559 DRM_DEBUG_KMS("Unsupported FB tiling flag 0x%16llx",
560 (long long)fb->modifier);
561 return -EINVAL;
562 }
563
564
565 vc4_dlist_write(vc4_state,
566 SCALER_CTL0_VALID |
567 (format->pixel_order << SCALER_CTL0_ORDER_SHIFT) |
568 (format->hvs << SCALER_CTL0_PIXEL_FORMAT_SHIFT) |
569 VC4_SET_FIELD(tiling, SCALER_CTL0_TILING) |
570 (vc4_state->is_unity ? SCALER_CTL0_UNITY : 0) |
571 VC4_SET_FIELD(scl0, SCALER_CTL0_SCL0) |
572 VC4_SET_FIELD(scl1, SCALER_CTL0_SCL1));
573
574
575 vc4_state->pos0_offset = vc4_state->dlist_count;
576 vc4_dlist_write(vc4_state,
577 VC4_SET_FIELD(0xff, SCALER_POS0_FIXED_ALPHA) |
578 VC4_SET_FIELD(vc4_state->crtc_x, SCALER_POS0_START_X) |
579 VC4_SET_FIELD(vc4_state->crtc_y, SCALER_POS0_START_Y));
580
581
582 if (!vc4_state->is_unity) {
583 vc4_dlist_write(vc4_state,
584 VC4_SET_FIELD(vc4_state->crtc_w,
585 SCALER_POS1_SCL_WIDTH) |
586 VC4_SET_FIELD(vc4_state->crtc_h,
587 SCALER_POS1_SCL_HEIGHT));
588 }
589
590
591 vc4_state->pos2_offset = vc4_state->dlist_count;
592 vc4_dlist_write(vc4_state,
593 VC4_SET_FIELD(format->has_alpha ?
594 SCALER_POS2_ALPHA_MODE_PIPELINE :
595 SCALER_POS2_ALPHA_MODE_FIXED,
596 SCALER_POS2_ALPHA_MODE) |
597 VC4_SET_FIELD(vc4_state->src_w[0], SCALER_POS2_WIDTH) |
598 VC4_SET_FIELD(vc4_state->src_h[0], SCALER_POS2_HEIGHT));
599
600
601 vc4_dlist_write(vc4_state, 0xc0c0c0c0);
602
603
604
605
606
607
608 vc4_state->ptr0_offset = vc4_state->dlist_count;
609 if (!format->flip_cbcr) {
610 for (i = 0; i < num_planes; i++)
611 vc4_dlist_write(vc4_state, vc4_state->offsets[i]);
612 } else {
613 WARN_ON_ONCE(num_planes != 3);
614 vc4_dlist_write(vc4_state, vc4_state->offsets[0]);
615 vc4_dlist_write(vc4_state, vc4_state->offsets[2]);
616 vc4_dlist_write(vc4_state, vc4_state->offsets[1]);
617 }
618
619
620 for (i = 0; i < num_planes; i++)
621 vc4_dlist_write(vc4_state, 0xc0c0c0c0);
622
623
624 vc4_dlist_write(vc4_state, pitch0);
625
626
627 for (i = 1; i < num_planes; i++) {
628 vc4_dlist_write(vc4_state,
629 VC4_SET_FIELD(fb->pitches[i], SCALER_SRC_PITCH));
630 }
631
632
633 if (vc4_state->is_yuv) {
634 vc4_dlist_write(vc4_state, SCALER_CSC0_ITR_R_601_5);
635 vc4_dlist_write(vc4_state, SCALER_CSC1_ITR_R_601_5);
636 vc4_dlist_write(vc4_state, SCALER_CSC2_ITR_R_601_5);
637 }
638
639 if (!vc4_state->is_unity) {
640
641 if (vc4_state->y_scaling[0] != VC4_SCALING_NONE ||
642 vc4_state->y_scaling[1] != VC4_SCALING_NONE) {
643 vc4_dlist_write(vc4_state, vc4_state->lbm.start);
644 }
645
646 if (num_planes > 1) {
647
648
649
650
651 vc4_write_scaling_parameters(state, 1);
652 }
653 vc4_write_scaling_parameters(state, 0);
654
655
656
657
658 if (vc4_state->x_scaling[0] == VC4_SCALING_PPF ||
659 vc4_state->y_scaling[0] == VC4_SCALING_PPF ||
660 vc4_state->x_scaling[1] == VC4_SCALING_PPF ||
661 vc4_state->y_scaling[1] == VC4_SCALING_PPF) {
662 u32 kernel = VC4_SET_FIELD(vc4->hvs->mitchell_netravali_filter.start,
663 SCALER_PPF_KERNEL_OFFSET);
664
665
666 vc4_dlist_write(vc4_state, kernel);
667
668 vc4_dlist_write(vc4_state, kernel);
669
670 vc4_dlist_write(vc4_state, kernel);
671
672 vc4_dlist_write(vc4_state, kernel);
673 }
674 }
675
676 vc4_state->dlist[ctl0_offset] |=
677 VC4_SET_FIELD(vc4_state->dlist_count, SCALER_CTL0_SIZE);
678
679 return 0;
680}
681
682
683
684
685
686
687
688
689static int vc4_plane_atomic_check(struct drm_plane *plane,
690 struct drm_plane_state *state)
691{
692 struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
693
694 vc4_state->dlist_count = 0;
695
696 if (plane_enabled(state))
697 return vc4_plane_mode_set(plane, state);
698 else
699 return 0;
700}
701
702static void vc4_plane_atomic_update(struct drm_plane *plane,
703 struct drm_plane_state *old_state)
704{
705
706
707
708
709
710}
711
712u32 vc4_plane_write_dlist(struct drm_plane *plane, u32 __iomem *dlist)
713{
714 struct vc4_plane_state *vc4_state = to_vc4_plane_state(plane->state);
715 int i;
716
717 vc4_state->hw_dlist = dlist;
718
719
720 for (i = 0; i < vc4_state->dlist_count; i++)
721 writel(vc4_state->dlist[i], &dlist[i]);
722
723 return vc4_state->dlist_count;
724}
725
726u32 vc4_plane_dlist_size(const struct drm_plane_state *state)
727{
728 const struct vc4_plane_state *vc4_state =
729 container_of(state, typeof(*vc4_state), base);
730
731 return vc4_state->dlist_count;
732}
733
734
735
736
737void vc4_plane_async_set_fb(struct drm_plane *plane, struct drm_framebuffer *fb)
738{
739 struct vc4_plane_state *vc4_state = to_vc4_plane_state(plane->state);
740 struct drm_gem_cma_object *bo = drm_fb_cma_get_gem_obj(fb, 0);
741 uint32_t addr;
742
743
744
745
746 WARN_ON_ONCE(plane->state->crtc_x < 0 || plane->state->crtc_y < 0);
747 addr = bo->paddr + fb->offsets[0];
748
749
750
751
752
753 writel(addr, &vc4_state->hw_dlist[vc4_state->ptr0_offset]);
754
755
756
757
758
759 vc4_state->dlist[vc4_state->ptr0_offset] = addr;
760}
761
762static int vc4_prepare_fb(struct drm_plane *plane,
763 struct drm_plane_state *state)
764{
765 struct vc4_bo *bo;
766 struct dma_fence *fence;
767
768 if ((plane->state->fb == state->fb) || !state->fb)
769 return 0;
770
771 bo = to_vc4_bo(&drm_fb_cma_get_gem_obj(state->fb, 0)->base);
772 fence = reservation_object_get_excl_rcu(bo->resv);
773 drm_atomic_set_fence_for_plane(state, fence);
774
775 return 0;
776}
777
778static const struct drm_plane_helper_funcs vc4_plane_helper_funcs = {
779 .atomic_check = vc4_plane_atomic_check,
780 .atomic_update = vc4_plane_atomic_update,
781 .prepare_fb = vc4_prepare_fb,
782};
783
784static void vc4_plane_destroy(struct drm_plane *plane)
785{
786 drm_plane_helper_disable(plane);
787 drm_plane_cleanup(plane);
788}
789
790
791
792
793static int
794vc4_update_plane(struct drm_plane *plane,
795 struct drm_crtc *crtc,
796 struct drm_framebuffer *fb,
797 int crtc_x, int crtc_y,
798 unsigned int crtc_w, unsigned int crtc_h,
799 uint32_t src_x, uint32_t src_y,
800 uint32_t src_w, uint32_t src_h,
801 struct drm_modeset_acquire_ctx *ctx)
802{
803 struct drm_plane_state *plane_state;
804 struct vc4_plane_state *vc4_state;
805
806 if (plane != crtc->cursor)
807 goto out;
808
809 plane_state = plane->state;
810 vc4_state = to_vc4_plane_state(plane_state);
811
812 if (!plane_state)
813 goto out;
814
815
816 if (crtc_w != plane_state->crtc_w ||
817 crtc_h != plane_state->crtc_h ||
818 src_w != plane_state->src_w ||
819 src_h != plane_state->src_h) {
820 goto out;
821 }
822
823 if (fb != plane_state->fb) {
824 drm_atomic_set_fb_for_plane(plane->state, fb);
825 vc4_plane_async_set_fb(plane, fb);
826 }
827
828
829
830
831
832 plane_state->crtc_x = crtc_x;
833 plane_state->crtc_y = crtc_y;
834
835
836
837
838 plane_state->src_x = src_x;
839 plane_state->src_y = src_y;
840
841
842 vc4_plane_atomic_check(plane, plane_state);
843
844
845
846
847
848 writel(vc4_state->dlist[vc4_state->pos0_offset],
849 &vc4_state->hw_dlist[vc4_state->pos0_offset]);
850 writel(vc4_state->dlist[vc4_state->pos2_offset],
851 &vc4_state->hw_dlist[vc4_state->pos2_offset]);
852 writel(vc4_state->dlist[vc4_state->ptr0_offset],
853 &vc4_state->hw_dlist[vc4_state->ptr0_offset]);
854
855 return 0;
856
857out:
858 return drm_atomic_helper_update_plane(plane, crtc, fb,
859 crtc_x, crtc_y,
860 crtc_w, crtc_h,
861 src_x, src_y,
862 src_w, src_h,
863 ctx);
864}
865
866static const struct drm_plane_funcs vc4_plane_funcs = {
867 .update_plane = vc4_update_plane,
868 .disable_plane = drm_atomic_helper_disable_plane,
869 .destroy = vc4_plane_destroy,
870 .set_property = NULL,
871 .reset = vc4_plane_reset,
872 .atomic_duplicate_state = vc4_plane_duplicate_state,
873 .atomic_destroy_state = vc4_plane_destroy_state,
874};
875
876struct drm_plane *vc4_plane_init(struct drm_device *dev,
877 enum drm_plane_type type)
878{
879 struct drm_plane *plane = NULL;
880 struct vc4_plane *vc4_plane;
881 u32 formats[ARRAY_SIZE(hvs_formats)];
882 u32 num_formats = 0;
883 int ret = 0;
884 unsigned i;
885
886 vc4_plane = devm_kzalloc(dev->dev, sizeof(*vc4_plane),
887 GFP_KERNEL);
888 if (!vc4_plane)
889 return ERR_PTR(-ENOMEM);
890
891 for (i = 0; i < ARRAY_SIZE(hvs_formats); i++) {
892
893
894
895
896 if (type != DRM_PLANE_TYPE_CURSOR ||
897 hvs_formats[i].hvs < HVS_PIXEL_FORMAT_YCBCR_YUV420_3PLANE) {
898 formats[num_formats++] = hvs_formats[i].drm;
899 }
900 }
901 plane = &vc4_plane->base;
902 ret = drm_universal_plane_init(dev, plane, 0,
903 &vc4_plane_funcs,
904 formats, num_formats,
905 NULL, type, NULL);
906
907 drm_plane_helper_add(plane, &vc4_plane_helper_funcs);
908
909 return plane;
910}
911