1
2
3
4
5
6
7
8
9
10
11
12
13
14#include <linux/clk.h>
15
16#include <drm/drm_atomic.h>
17#include <drm/drm_atomic_helper.h>
18#include <drm/drm_crtc.h>
19#include <drm/drm_gem_framebuffer_helper.h>
20#include <drm/drm_plane_helper.h>
21#include <drm/drm_probe_helper.h>
22#include <drm/drm_vblank.h>
23
24#include "vc4_drv.h"
25#include "vc4_regs.h"
26
27#define HVS_NUM_CHANNELS 3
28
29struct vc4_ctm_state {
30 struct drm_private_state base;
31 struct drm_color_ctm *ctm;
32 int fifo;
33};
34
35static struct vc4_ctm_state *to_vc4_ctm_state(struct drm_private_state *priv)
36{
37 return container_of(priv, struct vc4_ctm_state, base);
38}
39
40struct vc4_hvs_state {
41 struct drm_private_state base;
42
43 struct {
44 unsigned in_use: 1;
45 struct drm_crtc_commit *pending_commit;
46 } fifo_state[HVS_NUM_CHANNELS];
47};
48
49static struct vc4_hvs_state *
50to_vc4_hvs_state(struct drm_private_state *priv)
51{
52 return container_of(priv, struct vc4_hvs_state, base);
53}
54
55struct vc4_load_tracker_state {
56 struct drm_private_state base;
57 u64 hvs_load;
58 u64 membus_load;
59};
60
61static struct vc4_load_tracker_state *
62to_vc4_load_tracker_state(struct drm_private_state *priv)
63{
64 return container_of(priv, struct vc4_load_tracker_state, base);
65}
66
67static struct vc4_ctm_state *vc4_get_ctm_state(struct drm_atomic_state *state,
68 struct drm_private_obj *manager)
69{
70 struct drm_device *dev = state->dev;
71 struct vc4_dev *vc4 = to_vc4_dev(dev);
72 struct drm_private_state *priv_state;
73 int ret;
74
75 ret = drm_modeset_lock(&vc4->ctm_state_lock, state->acquire_ctx);
76 if (ret)
77 return ERR_PTR(ret);
78
79 priv_state = drm_atomic_get_private_obj_state(state, manager);
80 if (IS_ERR(priv_state))
81 return ERR_CAST(priv_state);
82
83 return to_vc4_ctm_state(priv_state);
84}
85
86static struct drm_private_state *
87vc4_ctm_duplicate_state(struct drm_private_obj *obj)
88{
89 struct vc4_ctm_state *state;
90
91 state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
92 if (!state)
93 return NULL;
94
95 __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
96
97 return &state->base;
98}
99
100static void vc4_ctm_destroy_state(struct drm_private_obj *obj,
101 struct drm_private_state *state)
102{
103 struct vc4_ctm_state *ctm_state = to_vc4_ctm_state(state);
104
105 kfree(ctm_state);
106}
107
108static const struct drm_private_state_funcs vc4_ctm_state_funcs = {
109 .atomic_duplicate_state = vc4_ctm_duplicate_state,
110 .atomic_destroy_state = vc4_ctm_destroy_state,
111};
112
113static void vc4_ctm_obj_fini(struct drm_device *dev, void *unused)
114{
115 struct vc4_dev *vc4 = to_vc4_dev(dev);
116
117 drm_atomic_private_obj_fini(&vc4->ctm_manager);
118}
119
120static int vc4_ctm_obj_init(struct vc4_dev *vc4)
121{
122 struct vc4_ctm_state *ctm_state;
123
124 drm_modeset_lock_init(&vc4->ctm_state_lock);
125
126 ctm_state = kzalloc(sizeof(*ctm_state), GFP_KERNEL);
127 if (!ctm_state)
128 return -ENOMEM;
129
130 drm_atomic_private_obj_init(&vc4->base, &vc4->ctm_manager, &ctm_state->base,
131 &vc4_ctm_state_funcs);
132
133 return drmm_add_action_or_reset(&vc4->base, vc4_ctm_obj_fini, NULL);
134}
135
136
137static u16 vc4_ctm_s31_32_to_s0_9(u64 in)
138{
139 u16 r;
140
141
142 r = in & BIT_ULL(63) ? BIT(9) : 0;
143
144 if ((in & GENMASK_ULL(62, 32)) > 0) {
145
146 r |= GENMASK(8, 0);
147 } else {
148
149 r |= (in >> 23) & GENMASK(8, 0);
150 }
151
152 return r;
153}
154
155static void
156vc4_ctm_commit(struct vc4_dev *vc4, struct drm_atomic_state *state)
157{
158 struct vc4_ctm_state *ctm_state = to_vc4_ctm_state(vc4->ctm_manager.state);
159 struct drm_color_ctm *ctm = ctm_state->ctm;
160
161 if (ctm_state->fifo) {
162 HVS_WRITE(SCALER_OLEDCOEF2,
163 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[0]),
164 SCALER_OLEDCOEF2_R_TO_R) |
165 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[3]),
166 SCALER_OLEDCOEF2_R_TO_G) |
167 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[6]),
168 SCALER_OLEDCOEF2_R_TO_B));
169 HVS_WRITE(SCALER_OLEDCOEF1,
170 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[1]),
171 SCALER_OLEDCOEF1_G_TO_R) |
172 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[4]),
173 SCALER_OLEDCOEF1_G_TO_G) |
174 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[7]),
175 SCALER_OLEDCOEF1_G_TO_B));
176 HVS_WRITE(SCALER_OLEDCOEF0,
177 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[2]),
178 SCALER_OLEDCOEF0_B_TO_R) |
179 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[5]),
180 SCALER_OLEDCOEF0_B_TO_G) |
181 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[8]),
182 SCALER_OLEDCOEF0_B_TO_B));
183 }
184
185 HVS_WRITE(SCALER_OLEDOFFS,
186 VC4_SET_FIELD(ctm_state->fifo, SCALER_OLEDOFFS_DISPFIFO));
187}
188
189static struct vc4_hvs_state *
190vc4_hvs_get_new_global_state(struct drm_atomic_state *state)
191{
192 struct vc4_dev *vc4 = to_vc4_dev(state->dev);
193 struct drm_private_state *priv_state;
194
195 priv_state = drm_atomic_get_new_private_obj_state(state, &vc4->hvs_channels);
196 if (IS_ERR(priv_state))
197 return ERR_CAST(priv_state);
198
199 return to_vc4_hvs_state(priv_state);
200}
201
202static struct vc4_hvs_state *
203vc4_hvs_get_old_global_state(struct drm_atomic_state *state)
204{
205 struct vc4_dev *vc4 = to_vc4_dev(state->dev);
206 struct drm_private_state *priv_state;
207
208 priv_state = drm_atomic_get_old_private_obj_state(state, &vc4->hvs_channels);
209 if (IS_ERR(priv_state))
210 return ERR_CAST(priv_state);
211
212 return to_vc4_hvs_state(priv_state);
213}
214
215static struct vc4_hvs_state *
216vc4_hvs_get_global_state(struct drm_atomic_state *state)
217{
218 struct vc4_dev *vc4 = to_vc4_dev(state->dev);
219 struct drm_private_state *priv_state;
220
221 priv_state = drm_atomic_get_private_obj_state(state, &vc4->hvs_channels);
222 if (IS_ERR(priv_state))
223 return ERR_CAST(priv_state);
224
225 return to_vc4_hvs_state(priv_state);
226}
227
228static void vc4_hvs_pv_muxing_commit(struct vc4_dev *vc4,
229 struct drm_atomic_state *state)
230{
231 struct drm_crtc_state *crtc_state;
232 struct drm_crtc *crtc;
233 unsigned int i;
234
235 for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
236 struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc_state);
237 u32 dispctrl;
238 u32 dsp3_mux;
239
240 if (!crtc_state->active)
241 continue;
242
243 if (vc4_state->assigned_channel != 2)
244 continue;
245
246
247
248
249
250
251
252
253
254
255
256 if (vc4_state->feed_txp)
257 dsp3_mux = VC4_SET_FIELD(3, SCALER_DISPCTRL_DSP3_MUX);
258 else
259 dsp3_mux = VC4_SET_FIELD(2, SCALER_DISPCTRL_DSP3_MUX);
260
261 dispctrl = HVS_READ(SCALER_DISPCTRL) &
262 ~SCALER_DISPCTRL_DSP3_MUX_MASK;
263 HVS_WRITE(SCALER_DISPCTRL, dispctrl | dsp3_mux);
264 }
265}
266
267static void vc5_hvs_pv_muxing_commit(struct vc4_dev *vc4,
268 struct drm_atomic_state *state)
269{
270 struct drm_crtc_state *crtc_state;
271 struct drm_crtc *crtc;
272 unsigned char mux;
273 unsigned int i;
274 u32 reg;
275
276 for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
277 struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc_state);
278 struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
279
280 if (!vc4_state->update_muxing)
281 continue;
282
283 switch (vc4_crtc->data->hvs_output) {
284 case 2:
285 mux = (vc4_state->assigned_channel == 2) ? 0 : 1;
286 reg = HVS_READ(SCALER_DISPECTRL);
287 HVS_WRITE(SCALER_DISPECTRL,
288 (reg & ~SCALER_DISPECTRL_DSP2_MUX_MASK) |
289 VC4_SET_FIELD(mux, SCALER_DISPECTRL_DSP2_MUX));
290 break;
291
292 case 3:
293 if (vc4_state->assigned_channel == VC4_HVS_CHANNEL_DISABLED)
294 mux = 3;
295 else
296 mux = vc4_state->assigned_channel;
297
298 reg = HVS_READ(SCALER_DISPCTRL);
299 HVS_WRITE(SCALER_DISPCTRL,
300 (reg & ~SCALER_DISPCTRL_DSP3_MUX_MASK) |
301 VC4_SET_FIELD(mux, SCALER_DISPCTRL_DSP3_MUX));
302 break;
303
304 case 4:
305 if (vc4_state->assigned_channel == VC4_HVS_CHANNEL_DISABLED)
306 mux = 3;
307 else
308 mux = vc4_state->assigned_channel;
309
310 reg = HVS_READ(SCALER_DISPEOLN);
311 HVS_WRITE(SCALER_DISPEOLN,
312 (reg & ~SCALER_DISPEOLN_DSP4_MUX_MASK) |
313 VC4_SET_FIELD(mux, SCALER_DISPEOLN_DSP4_MUX));
314
315 break;
316
317 case 5:
318 if (vc4_state->assigned_channel == VC4_HVS_CHANNEL_DISABLED)
319 mux = 3;
320 else
321 mux = vc4_state->assigned_channel;
322
323 reg = HVS_READ(SCALER_DISPDITHER);
324 HVS_WRITE(SCALER_DISPDITHER,
325 (reg & ~SCALER_DISPDITHER_DSP5_MUX_MASK) |
326 VC4_SET_FIELD(mux, SCALER_DISPDITHER_DSP5_MUX));
327 break;
328
329 default:
330 break;
331 }
332 }
333}
334
335static void vc4_atomic_commit_tail(struct drm_atomic_state *state)
336{
337 struct drm_device *dev = state->dev;
338 struct vc4_dev *vc4 = to_vc4_dev(dev);
339 struct vc4_hvs *hvs = vc4->hvs;
340 struct drm_crtc_state *new_crtc_state;
341 struct drm_crtc *crtc;
342 struct vc4_hvs_state *old_hvs_state;
343 unsigned int channel;
344 int i;
345
346 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
347 struct vc4_crtc_state *vc4_crtc_state;
348
349 if (!new_crtc_state->commit)
350 continue;
351
352 vc4_crtc_state = to_vc4_crtc_state(new_crtc_state);
353 vc4_hvs_mask_underrun(dev, vc4_crtc_state->assigned_channel);
354 }
355
356 old_hvs_state = vc4_hvs_get_old_global_state(state);
357 if (IS_ERR(old_hvs_state))
358 return;
359
360 for (channel = 0; channel < HVS_NUM_CHANNELS; channel++) {
361 struct drm_crtc_commit *commit;
362 int ret;
363
364 if (!old_hvs_state->fifo_state[channel].in_use)
365 continue;
366
367 commit = old_hvs_state->fifo_state[channel].pending_commit;
368 if (!commit)
369 continue;
370
371 ret = drm_crtc_commit_wait(commit);
372 if (ret)
373 drm_err(dev, "Timed out waiting for commit\n");
374
375 drm_crtc_commit_put(commit);
376 old_hvs_state->fifo_state[channel].pending_commit = NULL;
377 }
378
379 if (vc4->hvs->hvs5)
380 clk_set_min_rate(hvs->core_clk, 500000000);
381
382 drm_atomic_helper_commit_modeset_disables(dev, state);
383
384 vc4_ctm_commit(vc4, state);
385
386 if (vc4->hvs->hvs5)
387 vc5_hvs_pv_muxing_commit(vc4, state);
388 else
389 vc4_hvs_pv_muxing_commit(vc4, state);
390
391 drm_atomic_helper_commit_planes(dev, state, 0);
392
393 drm_atomic_helper_commit_modeset_enables(dev, state);
394
395 drm_atomic_helper_fake_vblank(state);
396
397 drm_atomic_helper_commit_hw_done(state);
398
399 drm_atomic_helper_wait_for_flip_done(dev, state);
400
401 drm_atomic_helper_cleanup_planes(dev, state);
402
403 if (vc4->hvs->hvs5)
404 clk_set_min_rate(hvs->core_clk, 0);
405}
406
407static int vc4_atomic_commit_setup(struct drm_atomic_state *state)
408{
409 struct drm_crtc_state *crtc_state;
410 struct vc4_hvs_state *hvs_state;
411 struct drm_crtc *crtc;
412 unsigned int i;
413
414 hvs_state = vc4_hvs_get_new_global_state(state);
415 if (WARN_ON(IS_ERR(hvs_state)))
416 return PTR_ERR(hvs_state);
417
418 for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
419 struct vc4_crtc_state *vc4_crtc_state =
420 to_vc4_crtc_state(crtc_state);
421 unsigned int channel =
422 vc4_crtc_state->assigned_channel;
423
424 if (channel == VC4_HVS_CHANNEL_DISABLED)
425 continue;
426
427 if (!hvs_state->fifo_state[channel].in_use)
428 continue;
429
430 hvs_state->fifo_state[channel].pending_commit =
431 drm_crtc_commit_get(crtc_state->commit);
432 }
433
434 return 0;
435}
436
437static struct drm_framebuffer *vc4_fb_create(struct drm_device *dev,
438 struct drm_file *file_priv,
439 const struct drm_mode_fb_cmd2 *mode_cmd)
440{
441 struct drm_mode_fb_cmd2 mode_cmd_local;
442
443
444
445
446 if (!(mode_cmd->flags & DRM_MODE_FB_MODIFIERS)) {
447 struct drm_gem_object *gem_obj;
448 struct vc4_bo *bo;
449
450 gem_obj = drm_gem_object_lookup(file_priv,
451 mode_cmd->handles[0]);
452 if (!gem_obj) {
453 DRM_DEBUG("Failed to look up GEM BO %d\n",
454 mode_cmd->handles[0]);
455 return ERR_PTR(-ENOENT);
456 }
457 bo = to_vc4_bo(gem_obj);
458
459 mode_cmd_local = *mode_cmd;
460
461 if (bo->t_format) {
462 mode_cmd_local.modifier[0] =
463 DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED;
464 } else {
465 mode_cmd_local.modifier[0] = DRM_FORMAT_MOD_NONE;
466 }
467
468 drm_gem_object_put(gem_obj);
469
470 mode_cmd = &mode_cmd_local;
471 }
472
473 return drm_gem_fb_create(dev, file_priv, mode_cmd);
474}
475
476
477
478
479
480static int
481vc4_ctm_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
482{
483 struct vc4_dev *vc4 = to_vc4_dev(dev);
484 struct vc4_ctm_state *ctm_state = NULL;
485 struct drm_crtc *crtc;
486 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
487 struct drm_color_ctm *ctm;
488 int i;
489
490 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
491
492 if (!new_crtc_state->ctm && old_crtc_state->ctm) {
493 ctm_state = vc4_get_ctm_state(state, &vc4->ctm_manager);
494 if (IS_ERR(ctm_state))
495 return PTR_ERR(ctm_state);
496 ctm_state->fifo = 0;
497 }
498 }
499
500 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
501 if (new_crtc_state->ctm == old_crtc_state->ctm)
502 continue;
503
504 if (!ctm_state) {
505 ctm_state = vc4_get_ctm_state(state, &vc4->ctm_manager);
506 if (IS_ERR(ctm_state))
507 return PTR_ERR(ctm_state);
508 }
509
510
511 if (new_crtc_state->ctm) {
512 struct vc4_crtc_state *vc4_crtc_state =
513 to_vc4_crtc_state(new_crtc_state);
514
515
516 int fifo = vc4_crtc_state->assigned_channel + 1;
517
518
519
520
521 if (ctm_state->fifo && ctm_state->fifo != fifo) {
522 DRM_DEBUG_DRIVER("Too many CTM configured\n");
523 return -EINVAL;
524 }
525
526
527
528
529
530 ctm = new_crtc_state->ctm->data;
531 for (i = 0; i < ARRAY_SIZE(ctm->matrix); i++) {
532 u64 val = ctm->matrix[i];
533
534 val &= ~BIT_ULL(63);
535 if (val > BIT_ULL(32))
536 return -EINVAL;
537 }
538
539 ctm_state->fifo = fifo;
540 ctm_state->ctm = ctm;
541 }
542 }
543
544 return 0;
545}
546
547static int vc4_load_tracker_atomic_check(struct drm_atomic_state *state)
548{
549 struct drm_plane_state *old_plane_state, *new_plane_state;
550 struct vc4_dev *vc4 = to_vc4_dev(state->dev);
551 struct vc4_load_tracker_state *load_state;
552 struct drm_private_state *priv_state;
553 struct drm_plane *plane;
554 int i;
555
556 if (!vc4->load_tracker_available)
557 return 0;
558
559 priv_state = drm_atomic_get_private_obj_state(state,
560 &vc4->load_tracker);
561 if (IS_ERR(priv_state))
562 return PTR_ERR(priv_state);
563
564 load_state = to_vc4_load_tracker_state(priv_state);
565 for_each_oldnew_plane_in_state(state, plane, old_plane_state,
566 new_plane_state, i) {
567 struct vc4_plane_state *vc4_plane_state;
568
569 if (old_plane_state->fb && old_plane_state->crtc) {
570 vc4_plane_state = to_vc4_plane_state(old_plane_state);
571 load_state->membus_load -= vc4_plane_state->membus_load;
572 load_state->hvs_load -= vc4_plane_state->hvs_load;
573 }
574
575 if (new_plane_state->fb && new_plane_state->crtc) {
576 vc4_plane_state = to_vc4_plane_state(new_plane_state);
577 load_state->membus_load += vc4_plane_state->membus_load;
578 load_state->hvs_load += vc4_plane_state->hvs_load;
579 }
580 }
581
582
583 if (!vc4->load_tracker_enabled)
584 return 0;
585
586
587
588
589 if (load_state->membus_load > SZ_1G + SZ_512M)
590 return -ENOSPC;
591
592
593
594
595 if (load_state->hvs_load > 240000000ULL)
596 return -ENOSPC;
597
598 return 0;
599}
600
601static struct drm_private_state *
602vc4_load_tracker_duplicate_state(struct drm_private_obj *obj)
603{
604 struct vc4_load_tracker_state *state;
605
606 state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
607 if (!state)
608 return NULL;
609
610 __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
611
612 return &state->base;
613}
614
615static void vc4_load_tracker_destroy_state(struct drm_private_obj *obj,
616 struct drm_private_state *state)
617{
618 struct vc4_load_tracker_state *load_state;
619
620 load_state = to_vc4_load_tracker_state(state);
621 kfree(load_state);
622}
623
624static const struct drm_private_state_funcs vc4_load_tracker_state_funcs = {
625 .atomic_duplicate_state = vc4_load_tracker_duplicate_state,
626 .atomic_destroy_state = vc4_load_tracker_destroy_state,
627};
628
629static void vc4_load_tracker_obj_fini(struct drm_device *dev, void *unused)
630{
631 struct vc4_dev *vc4 = to_vc4_dev(dev);
632
633 if (!vc4->load_tracker_available)
634 return;
635
636 drm_atomic_private_obj_fini(&vc4->load_tracker);
637}
638
639static int vc4_load_tracker_obj_init(struct vc4_dev *vc4)
640{
641 struct vc4_load_tracker_state *load_state;
642
643 if (!vc4->load_tracker_available)
644 return 0;
645
646 load_state = kzalloc(sizeof(*load_state), GFP_KERNEL);
647 if (!load_state)
648 return -ENOMEM;
649
650 drm_atomic_private_obj_init(&vc4->base, &vc4->load_tracker,
651 &load_state->base,
652 &vc4_load_tracker_state_funcs);
653
654 return drmm_add_action_or_reset(&vc4->base, vc4_load_tracker_obj_fini, NULL);
655}
656
657static struct drm_private_state *
658vc4_hvs_channels_duplicate_state(struct drm_private_obj *obj)
659{
660 struct vc4_hvs_state *old_state = to_vc4_hvs_state(obj->state);
661 struct vc4_hvs_state *state;
662 unsigned int i;
663
664 state = kzalloc(sizeof(*state), GFP_KERNEL);
665 if (!state)
666 return NULL;
667
668 __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
669
670
671 for (i = 0; i < HVS_NUM_CHANNELS; i++) {
672 state->fifo_state[i].in_use = old_state->fifo_state[i].in_use;
673 }
674
675 return &state->base;
676}
677
678static void vc4_hvs_channels_destroy_state(struct drm_private_obj *obj,
679 struct drm_private_state *state)
680{
681 struct vc4_hvs_state *hvs_state = to_vc4_hvs_state(state);
682 unsigned int i;
683
684 for (i = 0; i < HVS_NUM_CHANNELS; i++) {
685 if (!hvs_state->fifo_state[i].pending_commit)
686 continue;
687
688 drm_crtc_commit_put(hvs_state->fifo_state[i].pending_commit);
689 }
690
691 kfree(hvs_state);
692}
693
694static const struct drm_private_state_funcs vc4_hvs_state_funcs = {
695 .atomic_duplicate_state = vc4_hvs_channels_duplicate_state,
696 .atomic_destroy_state = vc4_hvs_channels_destroy_state,
697};
698
699static void vc4_hvs_channels_obj_fini(struct drm_device *dev, void *unused)
700{
701 struct vc4_dev *vc4 = to_vc4_dev(dev);
702
703 drm_atomic_private_obj_fini(&vc4->hvs_channels);
704}
705
706static int vc4_hvs_channels_obj_init(struct vc4_dev *vc4)
707{
708 struct vc4_hvs_state *state;
709
710 state = kzalloc(sizeof(*state), GFP_KERNEL);
711 if (!state)
712 return -ENOMEM;
713
714 drm_atomic_private_obj_init(&vc4->base, &vc4->hvs_channels,
715 &state->base,
716 &vc4_hvs_state_funcs);
717
718 return drmm_add_action_or_reset(&vc4->base, vc4_hvs_channels_obj_fini, NULL);
719}
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751static int vc4_pv_muxing_atomic_check(struct drm_device *dev,
752 struct drm_atomic_state *state)
753{
754 struct vc4_hvs_state *hvs_new_state;
755 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
756 struct drm_crtc *crtc;
757 unsigned int unassigned_channels = 0;
758 unsigned int i;
759
760 hvs_new_state = vc4_hvs_get_global_state(state);
761 if (IS_ERR(hvs_new_state))
762 return PTR_ERR(hvs_new_state);
763
764 for (i = 0; i < ARRAY_SIZE(hvs_new_state->fifo_state); i++)
765 if (!hvs_new_state->fifo_state[i].in_use)
766 unassigned_channels |= BIT(i);
767
768 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
769 struct vc4_crtc_state *old_vc4_crtc_state =
770 to_vc4_crtc_state(old_crtc_state);
771 struct vc4_crtc_state *new_vc4_crtc_state =
772 to_vc4_crtc_state(new_crtc_state);
773 struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
774 unsigned int matching_channels;
775 unsigned int channel;
776
777
778 if (old_crtc_state->enable == new_crtc_state->enable)
779 continue;
780
781
782 new_vc4_crtc_state->update_muxing = true;
783
784
785 if (!new_crtc_state->enable) {
786 channel = old_vc4_crtc_state->assigned_channel;
787 hvs_new_state->fifo_state[channel].in_use = false;
788 new_vc4_crtc_state->assigned_channel = VC4_HVS_CHANNEL_DISABLED;
789 continue;
790 }
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816 matching_channels = unassigned_channels & vc4_crtc->data->hvs_available_channels;
817 if (!matching_channels)
818 return -EINVAL;
819
820 channel = ffs(matching_channels) - 1;
821 new_vc4_crtc_state->assigned_channel = channel;
822 unassigned_channels &= ~BIT(channel);
823 hvs_new_state->fifo_state[channel].in_use = true;
824 }
825
826 return 0;
827}
828
829static int
830vc4_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
831{
832 int ret;
833
834 ret = vc4_pv_muxing_atomic_check(dev, state);
835 if (ret)
836 return ret;
837
838 ret = vc4_ctm_atomic_check(dev, state);
839 if (ret < 0)
840 return ret;
841
842 ret = drm_atomic_helper_check(dev, state);
843 if (ret)
844 return ret;
845
846 return vc4_load_tracker_atomic_check(state);
847}
848
849static struct drm_mode_config_helper_funcs vc4_mode_config_helpers = {
850 .atomic_commit_setup = vc4_atomic_commit_setup,
851 .atomic_commit_tail = vc4_atomic_commit_tail,
852};
853
854static const struct drm_mode_config_funcs vc4_mode_funcs = {
855 .atomic_check = vc4_atomic_check,
856 .atomic_commit = drm_atomic_helper_commit,
857 .fb_create = vc4_fb_create,
858};
859
860int vc4_kms_load(struct drm_device *dev)
861{
862 struct vc4_dev *vc4 = to_vc4_dev(dev);
863 bool is_vc5 = of_device_is_compatible(dev->dev->of_node,
864 "brcm,bcm2711-vc5");
865 int ret;
866
867 if (!is_vc5) {
868 vc4->load_tracker_available = true;
869
870
871
872
873 vc4->load_tracker_enabled = true;
874 }
875
876
877 dev->vblank_disable_immediate = true;
878
879 ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
880 if (ret < 0) {
881 dev_err(dev->dev, "failed to initialize vblank\n");
882 return ret;
883 }
884
885 if (is_vc5) {
886 dev->mode_config.max_width = 7680;
887 dev->mode_config.max_height = 7680;
888 } else {
889 dev->mode_config.max_width = 2048;
890 dev->mode_config.max_height = 2048;
891 }
892
893 dev->mode_config.funcs = &vc4_mode_funcs;
894 dev->mode_config.helper_private = &vc4_mode_config_helpers;
895 dev->mode_config.preferred_depth = 24;
896 dev->mode_config.async_page_flip = true;
897
898 ret = vc4_ctm_obj_init(vc4);
899 if (ret)
900 return ret;
901
902 ret = vc4_load_tracker_obj_init(vc4);
903 if (ret)
904 return ret;
905
906 ret = vc4_hvs_channels_obj_init(vc4);
907 if (ret)
908 return ret;
909
910 drm_mode_config_reset(dev);
911
912 drm_kms_helper_poll_init(dev);
913
914 return 0;
915}
916