1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include <drm/drm_fourcc.h>
25#include <drm/drm_vblank.h>
26
27#include "amdgpu.h"
28#include "amdgpu_pm.h"
29#include "amdgpu_i2c.h"
30#include "cikd.h"
31#include "atom.h"
32#include "amdgpu_atombios.h"
33#include "atombios_crtc.h"
34#include "atombios_encoders.h"
35#include "amdgpu_pll.h"
36#include "amdgpu_connectors.h"
37#include "amdgpu_display.h"
38#include "dce_v8_0.h"
39
40#include "dce/dce_8_0_d.h"
41#include "dce/dce_8_0_sh_mask.h"
42
43#include "gca/gfx_7_2_enum.h"
44
45#include "gmc/gmc_7_1_d.h"
46#include "gmc/gmc_7_1_sh_mask.h"
47
48#include "oss/oss_2_0_d.h"
49#include "oss/oss_2_0_sh_mask.h"
50
51static void dce_v8_0_set_display_funcs(struct amdgpu_device *adev);
52static void dce_v8_0_set_irq_funcs(struct amdgpu_device *adev);
53
54static const u32 crtc_offsets[6] =
55{
56 CRTC0_REGISTER_OFFSET,
57 CRTC1_REGISTER_OFFSET,
58 CRTC2_REGISTER_OFFSET,
59 CRTC3_REGISTER_OFFSET,
60 CRTC4_REGISTER_OFFSET,
61 CRTC5_REGISTER_OFFSET
62};
63
64static const u32 hpd_offsets[] =
65{
66 HPD0_REGISTER_OFFSET,
67 HPD1_REGISTER_OFFSET,
68 HPD2_REGISTER_OFFSET,
69 HPD3_REGISTER_OFFSET,
70 HPD4_REGISTER_OFFSET,
71 HPD5_REGISTER_OFFSET
72};
73
74static const uint32_t dig_offsets[] = {
75 CRTC0_REGISTER_OFFSET,
76 CRTC1_REGISTER_OFFSET,
77 CRTC2_REGISTER_OFFSET,
78 CRTC3_REGISTER_OFFSET,
79 CRTC4_REGISTER_OFFSET,
80 CRTC5_REGISTER_OFFSET,
81 (0x13830 - 0x7030) >> 2,
82};
83
84static const struct {
85 uint32_t reg;
86 uint32_t vblank;
87 uint32_t vline;
88 uint32_t hpd;
89
90} interrupt_status_offsets[6] = { {
91 .reg = mmDISP_INTERRUPT_STATUS,
92 .vblank = DISP_INTERRUPT_STATUS__LB_D1_VBLANK_INTERRUPT_MASK,
93 .vline = DISP_INTERRUPT_STATUS__LB_D1_VLINE_INTERRUPT_MASK,
94 .hpd = DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT_MASK
95}, {
96 .reg = mmDISP_INTERRUPT_STATUS_CONTINUE,
97 .vblank = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VBLANK_INTERRUPT_MASK,
98 .vline = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VLINE_INTERRUPT_MASK,
99 .hpd = DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_INTERRUPT_MASK
100}, {
101 .reg = mmDISP_INTERRUPT_STATUS_CONTINUE2,
102 .vblank = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VBLANK_INTERRUPT_MASK,
103 .vline = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VLINE_INTERRUPT_MASK,
104 .hpd = DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_INTERRUPT_MASK
105}, {
106 .reg = mmDISP_INTERRUPT_STATUS_CONTINUE3,
107 .vblank = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VBLANK_INTERRUPT_MASK,
108 .vline = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VLINE_INTERRUPT_MASK,
109 .hpd = DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_INTERRUPT_MASK
110}, {
111 .reg = mmDISP_INTERRUPT_STATUS_CONTINUE4,
112 .vblank = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VBLANK_INTERRUPT_MASK,
113 .vline = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VLINE_INTERRUPT_MASK,
114 .hpd = DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_INTERRUPT_MASK
115}, {
116 .reg = mmDISP_INTERRUPT_STATUS_CONTINUE5,
117 .vblank = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VBLANK_INTERRUPT_MASK,
118 .vline = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VLINE_INTERRUPT_MASK,
119 .hpd = DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK
120} };
121
122static u32 dce_v8_0_audio_endpt_rreg(struct amdgpu_device *adev,
123 u32 block_offset, u32 reg)
124{
125 unsigned long flags;
126 u32 r;
127
128 spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags);
129 WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
130 r = RREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset);
131 spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
132
133 return r;
134}
135
136static void dce_v8_0_audio_endpt_wreg(struct amdgpu_device *adev,
137 u32 block_offset, u32 reg, u32 v)
138{
139 unsigned long flags;
140
141 spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags);
142 WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
143 WREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset, v);
144 spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
145}
146
147static u32 dce_v8_0_vblank_get_counter(struct amdgpu_device *adev, int crtc)
148{
149 if (crtc >= adev->mode_info.num_crtc)
150 return 0;
151 else
152 return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
153}
154
155static void dce_v8_0_pageflip_interrupt_init(struct amdgpu_device *adev)
156{
157 unsigned i;
158
159
160 for (i = 0; i < adev->mode_info.num_crtc; i++)
161 amdgpu_irq_get(adev, &adev->pageflip_irq, i);
162}
163
164static void dce_v8_0_pageflip_interrupt_fini(struct amdgpu_device *adev)
165{
166 unsigned i;
167
168
169 for (i = 0; i < adev->mode_info.num_crtc; i++)
170 amdgpu_irq_put(adev, &adev->pageflip_irq, i);
171}
172
173
174
175
176
177
178
179
180
181
182
183static void dce_v8_0_page_flip(struct amdgpu_device *adev,
184 int crtc_id, u64 crtc_base, bool async)
185{
186 struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
187
188
189 WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, async ?
190 GRPH_FLIP_CONTROL__GRPH_SURFACE_UPDATE_H_RETRACE_EN_MASK : 0);
191
192 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
193 upper_32_bits(crtc_base));
194
195 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
196 lower_32_bits(crtc_base));
197
198 RREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset);
199}
200
201static int dce_v8_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
202 u32 *vbl, u32 *position)
203{
204 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
205 return -EINVAL;
206
207 *vbl = RREG32(mmCRTC_V_BLANK_START_END + crtc_offsets[crtc]);
208 *position = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
209
210 return 0;
211}
212
213
214
215
216
217
218
219
220
221
222static bool dce_v8_0_hpd_sense(struct amdgpu_device *adev,
223 enum amdgpu_hpd_id hpd)
224{
225 bool connected = false;
226
227 if (hpd >= adev->mode_info.num_hpd)
228 return connected;
229
230 if (RREG32(mmDC_HPD1_INT_STATUS + hpd_offsets[hpd]) &
231 DC_HPD1_INT_STATUS__DC_HPD1_SENSE_MASK)
232 connected = true;
233
234 return connected;
235}
236
237
238
239
240
241
242
243
244
245static void dce_v8_0_hpd_set_polarity(struct amdgpu_device *adev,
246 enum amdgpu_hpd_id hpd)
247{
248 u32 tmp;
249 bool connected = dce_v8_0_hpd_sense(adev, hpd);
250
251 if (hpd >= adev->mode_info.num_hpd)
252 return;
253
254 tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
255 if (connected)
256 tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK;
257 else
258 tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK;
259 WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
260}
261
262
263
264
265
266
267
268
269
270static void dce_v8_0_hpd_init(struct amdgpu_device *adev)
271{
272 struct drm_device *dev = adev->ddev;
273 struct drm_connector *connector;
274 u32 tmp;
275
276 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
277 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
278
279 if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
280 continue;
281
282 tmp = RREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
283 tmp |= DC_HPD1_CONTROL__DC_HPD1_EN_MASK;
284 WREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
285
286 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
287 connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
288
289
290
291
292
293 tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
294 tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
295 WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
296 continue;
297 }
298
299 dce_v8_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
300 amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
301 }
302}
303
304
305
306
307
308
309
310
311
312static void dce_v8_0_hpd_fini(struct amdgpu_device *adev)
313{
314 struct drm_device *dev = adev->ddev;
315 struct drm_connector *connector;
316 u32 tmp;
317
318 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
319 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
320
321 if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
322 continue;
323
324 tmp = RREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
325 tmp &= ~DC_HPD1_CONTROL__DC_HPD1_EN_MASK;
326 WREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], 0);
327
328 amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
329 }
330}
331
332static u32 dce_v8_0_hpd_get_gpio_reg(struct amdgpu_device *adev)
333{
334 return mmDC_GPIO_HPD_A;
335}
336
337static bool dce_v8_0_is_display_hung(struct amdgpu_device *adev)
338{
339 u32 crtc_hung = 0;
340 u32 crtc_status[6];
341 u32 i, j, tmp;
342
343 for (i = 0; i < adev->mode_info.num_crtc; i++) {
344 if (RREG32(mmCRTC_CONTROL + crtc_offsets[i]) & CRTC_CONTROL__CRTC_MASTER_EN_MASK) {
345 crtc_status[i] = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]);
346 crtc_hung |= (1 << i);
347 }
348 }
349
350 for (j = 0; j < 10; j++) {
351 for (i = 0; i < adev->mode_info.num_crtc; i++) {
352 if (crtc_hung & (1 << i)) {
353 tmp = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]);
354 if (tmp != crtc_status[i])
355 crtc_hung &= ~(1 << i);
356 }
357 }
358 if (crtc_hung == 0)
359 return false;
360 udelay(100);
361 }
362
363 return true;
364}
365
366static void dce_v8_0_set_vga_render_state(struct amdgpu_device *adev,
367 bool render)
368{
369 u32 tmp;
370
371
372 tmp = RREG32(mmVGA_HDP_CONTROL);
373 if (render)
374 tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 0);
375 else
376 tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
377 WREG32(mmVGA_HDP_CONTROL, tmp);
378
379
380 tmp = RREG32(mmVGA_RENDER_CONTROL);
381 if (render)
382 tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 1);
383 else
384 tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
385 WREG32(mmVGA_RENDER_CONTROL, tmp);
386}
387
388static int dce_v8_0_get_num_crtc(struct amdgpu_device *adev)
389{
390 int num_crtc = 0;
391
392 switch (adev->asic_type) {
393 case CHIP_BONAIRE:
394 case CHIP_HAWAII:
395 num_crtc = 6;
396 break;
397 case CHIP_KAVERI:
398 num_crtc = 4;
399 break;
400 case CHIP_KABINI:
401 case CHIP_MULLINS:
402 num_crtc = 2;
403 break;
404 default:
405 num_crtc = 0;
406 }
407 return num_crtc;
408}
409
410void dce_v8_0_disable_dce(struct amdgpu_device *adev)
411{
412
413 if (amdgpu_atombios_has_dce_engine_info(adev)) {
414 u32 tmp;
415 int crtc_enabled, i;
416
417 dce_v8_0_set_vga_render_state(adev, false);
418
419
420 for (i = 0; i < dce_v8_0_get_num_crtc(adev); i++) {
421 crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]),
422 CRTC_CONTROL, CRTC_MASTER_EN);
423 if (crtc_enabled) {
424 WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
425 tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
426 tmp = REG_SET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN, 0);
427 WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp);
428 WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
429 }
430 }
431 }
432}
433
434static void dce_v8_0_program_fmt(struct drm_encoder *encoder)
435{
436 struct drm_device *dev = encoder->dev;
437 struct amdgpu_device *adev = dev->dev_private;
438 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
439 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
440 struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
441 int bpc = 0;
442 u32 tmp = 0;
443 enum amdgpu_connector_dither dither = AMDGPU_FMT_DITHER_DISABLE;
444
445 if (connector) {
446 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
447 bpc = amdgpu_connector_get_monitor_bpc(connector);
448 dither = amdgpu_connector->dither;
449 }
450
451
452 if (amdgpu_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
453 return;
454
455
456 if ((amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1) ||
457 (amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2))
458 return;
459
460 if (bpc == 0)
461 return;
462
463 switch (bpc) {
464 case 6:
465 if (dither == AMDGPU_FMT_DITHER_ENABLE)
466
467 tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK |
468 FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK |
469 FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK |
470 (0 << FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH__SHIFT));
471 else
472 tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK |
473 (0 << FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH__SHIFT));
474 break;
475 case 8:
476 if (dither == AMDGPU_FMT_DITHER_ENABLE)
477
478 tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK |
479 FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK |
480 FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE_MASK |
481 FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK |
482 (1 << FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH__SHIFT));
483 else
484 tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK |
485 (1 << FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH__SHIFT));
486 break;
487 case 10:
488 if (dither == AMDGPU_FMT_DITHER_ENABLE)
489
490 tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK |
491 FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK |
492 FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE_MASK |
493 FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK |
494 (2 << FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH__SHIFT));
495 else
496 tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK |
497 (2 << FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH__SHIFT));
498 break;
499 default:
500
501 break;
502 }
503
504 WREG32(mmFMT_BIT_DEPTH_CONTROL + amdgpu_crtc->crtc_offset, tmp);
505}
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521static u32 dce_v8_0_line_buffer_adjust(struct amdgpu_device *adev,
522 struct amdgpu_crtc *amdgpu_crtc,
523 struct drm_display_mode *mode)
524{
525 u32 tmp, buffer_alloc, i;
526 u32 pipe_offset = amdgpu_crtc->crtc_id * 0x8;
527
528
529
530
531
532
533
534
535 if (amdgpu_crtc->base.enabled && mode) {
536 if (mode->crtc_hdisplay < 1920) {
537 tmp = 1;
538 buffer_alloc = 2;
539 } else if (mode->crtc_hdisplay < 2560) {
540 tmp = 2;
541 buffer_alloc = 2;
542 } else if (mode->crtc_hdisplay < 4096) {
543 tmp = 0;
544 buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4;
545 } else {
546 DRM_DEBUG_KMS("Mode too big for LB!\n");
547 tmp = 0;
548 buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4;
549 }
550 } else {
551 tmp = 1;
552 buffer_alloc = 0;
553 }
554
555 WREG32(mmLB_MEMORY_CTRL + amdgpu_crtc->crtc_offset,
556 (tmp << LB_MEMORY_CTRL__LB_MEMORY_CONFIG__SHIFT) |
557 (0x6B0 << LB_MEMORY_CTRL__LB_MEMORY_SIZE__SHIFT));
558
559 WREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset,
560 (buffer_alloc << PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATED__SHIFT));
561 for (i = 0; i < adev->usec_timeout; i++) {
562 if (RREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset) &
563 PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATION_COMPLETED_MASK)
564 break;
565 udelay(1);
566 }
567
568 if (amdgpu_crtc->base.enabled && mode) {
569 switch (tmp) {
570 case 0:
571 default:
572 return 4096 * 2;
573 case 1:
574 return 1920 * 2;
575 case 2:
576 return 2560 * 2;
577 }
578 }
579
580
581 return 0;
582}
583
584
585
586
587
588
589
590
591
592
593static u32 cik_get_number_of_dram_channels(struct amdgpu_device *adev)
594{
595 u32 tmp = RREG32(mmMC_SHARED_CHMAP);
596
597 switch ((tmp & MC_SHARED_CHMAP__NOOFCHAN_MASK) >> MC_SHARED_CHMAP__NOOFCHAN__SHIFT) {
598 case 0:
599 default:
600 return 1;
601 case 1:
602 return 2;
603 case 2:
604 return 4;
605 case 3:
606 return 8;
607 case 4:
608 return 3;
609 case 5:
610 return 6;
611 case 6:
612 return 10;
613 case 7:
614 return 12;
615 case 8:
616 return 16;
617 }
618}
619
620struct dce8_wm_params {
621 u32 dram_channels;
622 u32 yclk;
623 u32 sclk;
624 u32 disp_clk;
625 u32 src_width;
626 u32 active_time;
627 u32 blank_time;
628 bool interlaced;
629 fixed20_12 vsc;
630 u32 num_heads;
631 u32 bytes_per_pixel;
632 u32 lb_size;
633 u32 vtaps;
634};
635
636
637
638
639
640
641
642
643
644
645static u32 dce_v8_0_dram_bandwidth(struct dce8_wm_params *wm)
646{
647
648 fixed20_12 dram_efficiency;
649 fixed20_12 yclk, dram_channels, bandwidth;
650 fixed20_12 a;
651
652 a.full = dfixed_const(1000);
653 yclk.full = dfixed_const(wm->yclk);
654 yclk.full = dfixed_div(yclk, a);
655 dram_channels.full = dfixed_const(wm->dram_channels * 4);
656 a.full = dfixed_const(10);
657 dram_efficiency.full = dfixed_const(7);
658 dram_efficiency.full = dfixed_div(dram_efficiency, a);
659 bandwidth.full = dfixed_mul(dram_channels, yclk);
660 bandwidth.full = dfixed_mul(bandwidth, dram_efficiency);
661
662 return dfixed_trunc(bandwidth);
663}
664
665
666
667
668
669
670
671
672
673
674static u32 dce_v8_0_dram_bandwidth_for_display(struct dce8_wm_params *wm)
675{
676
677 fixed20_12 disp_dram_allocation;
678 fixed20_12 yclk, dram_channels, bandwidth;
679 fixed20_12 a;
680
681 a.full = dfixed_const(1000);
682 yclk.full = dfixed_const(wm->yclk);
683 yclk.full = dfixed_div(yclk, a);
684 dram_channels.full = dfixed_const(wm->dram_channels * 4);
685 a.full = dfixed_const(10);
686 disp_dram_allocation.full = dfixed_const(3);
687 disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a);
688 bandwidth.full = dfixed_mul(dram_channels, yclk);
689 bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation);
690
691 return dfixed_trunc(bandwidth);
692}
693
694
695
696
697
698
699
700
701
702
703static u32 dce_v8_0_data_return_bandwidth(struct dce8_wm_params *wm)
704{
705
706 fixed20_12 return_efficiency;
707 fixed20_12 sclk, bandwidth;
708 fixed20_12 a;
709
710 a.full = dfixed_const(1000);
711 sclk.full = dfixed_const(wm->sclk);
712 sclk.full = dfixed_div(sclk, a);
713 a.full = dfixed_const(10);
714 return_efficiency.full = dfixed_const(8);
715 return_efficiency.full = dfixed_div(return_efficiency, a);
716 a.full = dfixed_const(32);
717 bandwidth.full = dfixed_mul(a, sclk);
718 bandwidth.full = dfixed_mul(bandwidth, return_efficiency);
719
720 return dfixed_trunc(bandwidth);
721}
722
723
724
725
726
727
728
729
730
731
732static u32 dce_v8_0_dmif_request_bandwidth(struct dce8_wm_params *wm)
733{
734
735 fixed20_12 disp_clk_request_efficiency;
736 fixed20_12 disp_clk, bandwidth;
737 fixed20_12 a, b;
738
739 a.full = dfixed_const(1000);
740 disp_clk.full = dfixed_const(wm->disp_clk);
741 disp_clk.full = dfixed_div(disp_clk, a);
742 a.full = dfixed_const(32);
743 b.full = dfixed_mul(a, disp_clk);
744
745 a.full = dfixed_const(10);
746 disp_clk_request_efficiency.full = dfixed_const(8);
747 disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a);
748
749 bandwidth.full = dfixed_mul(b, disp_clk_request_efficiency);
750
751 return dfixed_trunc(bandwidth);
752}
753
754
755
756
757
758
759
760
761
762
763static u32 dce_v8_0_available_bandwidth(struct dce8_wm_params *wm)
764{
765
766 u32 dram_bandwidth = dce_v8_0_dram_bandwidth(wm);
767 u32 data_return_bandwidth = dce_v8_0_data_return_bandwidth(wm);
768 u32 dmif_req_bandwidth = dce_v8_0_dmif_request_bandwidth(wm);
769
770 return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth));
771}
772
773
774
775
776
777
778
779
780
781
782static u32 dce_v8_0_average_bandwidth(struct dce8_wm_params *wm)
783{
784
785
786
787
788 fixed20_12 bpp;
789 fixed20_12 line_time;
790 fixed20_12 src_width;
791 fixed20_12 bandwidth;
792 fixed20_12 a;
793
794 a.full = dfixed_const(1000);
795 line_time.full = dfixed_const(wm->active_time + wm->blank_time);
796 line_time.full = dfixed_div(line_time, a);
797 bpp.full = dfixed_const(wm->bytes_per_pixel);
798 src_width.full = dfixed_const(wm->src_width);
799 bandwidth.full = dfixed_mul(src_width, bpp);
800 bandwidth.full = dfixed_mul(bandwidth, wm->vsc);
801 bandwidth.full = dfixed_div(bandwidth, line_time);
802
803 return dfixed_trunc(bandwidth);
804}
805
806
807
808
809
810
811
812
813
814
815static u32 dce_v8_0_latency_watermark(struct dce8_wm_params *wm)
816{
817
818 u32 mc_latency = 2000;
819 u32 available_bandwidth = dce_v8_0_available_bandwidth(wm);
820 u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth;
821 u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth;
822 u32 dc_latency = 40000000 / wm->disp_clk;
823 u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) +
824 (wm->num_heads * cursor_line_pair_return_time);
825 u32 latency = mc_latency + other_heads_data_return_time + dc_latency;
826 u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time;
827 u32 tmp, dmif_size = 12288;
828 fixed20_12 a, b, c;
829
830 if (wm->num_heads == 0)
831 return 0;
832
833 a.full = dfixed_const(2);
834 b.full = dfixed_const(1);
835 if ((wm->vsc.full > a.full) ||
836 ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) ||
837 (wm->vtaps >= 5) ||
838 ((wm->vsc.full >= a.full) && wm->interlaced))
839 max_src_lines_per_dst_line = 4;
840 else
841 max_src_lines_per_dst_line = 2;
842
843 a.full = dfixed_const(available_bandwidth);
844 b.full = dfixed_const(wm->num_heads);
845 a.full = dfixed_div(a, b);
846 tmp = div_u64((u64) dmif_size * (u64) wm->disp_clk, mc_latency + 512);
847 tmp = min(dfixed_trunc(a), tmp);
848
849 lb_fill_bw = min(tmp, wm->disp_clk * wm->bytes_per_pixel / 1000);
850
851 a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
852 b.full = dfixed_const(1000);
853 c.full = dfixed_const(lb_fill_bw);
854 b.full = dfixed_div(c, b);
855 a.full = dfixed_div(a, b);
856 line_fill_time = dfixed_trunc(a);
857
858 if (line_fill_time < wm->active_time)
859 return latency;
860 else
861 return latency + (line_fill_time - wm->active_time);
862
863}
864
865
866
867
868
869
870
871
872
873
874
875
876static bool dce_v8_0_average_bandwidth_vs_dram_bandwidth_for_display(struct dce8_wm_params *wm)
877{
878 if (dce_v8_0_average_bandwidth(wm) <=
879 (dce_v8_0_dram_bandwidth_for_display(wm) / wm->num_heads))
880 return true;
881 else
882 return false;
883}
884
885
886
887
888
889
890
891
892
893
894
895
896static bool dce_v8_0_average_bandwidth_vs_available_bandwidth(struct dce8_wm_params *wm)
897{
898 if (dce_v8_0_average_bandwidth(wm) <=
899 (dce_v8_0_available_bandwidth(wm) / wm->num_heads))
900 return true;
901 else
902 return false;
903}
904
905
906
907
908
909
910
911
912
913
914static bool dce_v8_0_check_latency_hiding(struct dce8_wm_params *wm)
915{
916 u32 lb_partitions = wm->lb_size / wm->src_width;
917 u32 line_time = wm->active_time + wm->blank_time;
918 u32 latency_tolerant_lines;
919 u32 latency_hiding;
920 fixed20_12 a;
921
922 a.full = dfixed_const(1);
923 if (wm->vsc.full > a.full)
924 latency_tolerant_lines = 1;
925 else {
926 if (lb_partitions <= (wm->vtaps + 1))
927 latency_tolerant_lines = 1;
928 else
929 latency_tolerant_lines = 2;
930 }
931
932 latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time);
933
934 if (dce_v8_0_latency_watermark(wm) <= latency_hiding)
935 return true;
936 else
937 return false;
938}
939
940
941
942
943
944
945
946
947
948
949
950
951static void dce_v8_0_program_watermarks(struct amdgpu_device *adev,
952 struct amdgpu_crtc *amdgpu_crtc,
953 u32 lb_size, u32 num_heads)
954{
955 struct drm_display_mode *mode = &amdgpu_crtc->base.mode;
956 struct dce8_wm_params wm_low, wm_high;
957 u32 active_time;
958 u32 line_time = 0;
959 u32 latency_watermark_a = 0, latency_watermark_b = 0;
960 u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
961
962 if (amdgpu_crtc->base.enabled && num_heads && mode) {
963 active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
964 (u32)mode->clock);
965 line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
966 (u32)mode->clock);
967 line_time = min(line_time, (u32)65535);
968
969
970 if (adev->pm.dpm_enabled) {
971 wm_high.yclk =
972 amdgpu_dpm_get_mclk(adev, false) * 10;
973 wm_high.sclk =
974 amdgpu_dpm_get_sclk(adev, false) * 10;
975 } else {
976 wm_high.yclk = adev->pm.current_mclk * 10;
977 wm_high.sclk = adev->pm.current_sclk * 10;
978 }
979
980 wm_high.disp_clk = mode->clock;
981 wm_high.src_width = mode->crtc_hdisplay;
982 wm_high.active_time = active_time;
983 wm_high.blank_time = line_time - wm_high.active_time;
984 wm_high.interlaced = false;
985 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
986 wm_high.interlaced = true;
987 wm_high.vsc = amdgpu_crtc->vsc;
988 wm_high.vtaps = 1;
989 if (amdgpu_crtc->rmx_type != RMX_OFF)
990 wm_high.vtaps = 2;
991 wm_high.bytes_per_pixel = 4;
992 wm_high.lb_size = lb_size;
993 wm_high.dram_channels = cik_get_number_of_dram_channels(adev);
994 wm_high.num_heads = num_heads;
995
996
997 latency_watermark_a = min(dce_v8_0_latency_watermark(&wm_high), (u32)65535);
998
999
1000
1001 if (!dce_v8_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) ||
1002 !dce_v8_0_average_bandwidth_vs_available_bandwidth(&wm_high) ||
1003 !dce_v8_0_check_latency_hiding(&wm_high) ||
1004 (adev->mode_info.disp_priority == 2)) {
1005 DRM_DEBUG_KMS("force priority to high\n");
1006 }
1007
1008
1009 if (adev->pm.dpm_enabled) {
1010 wm_low.yclk =
1011 amdgpu_dpm_get_mclk(adev, true) * 10;
1012 wm_low.sclk =
1013 amdgpu_dpm_get_sclk(adev, true) * 10;
1014 } else {
1015 wm_low.yclk = adev->pm.current_mclk * 10;
1016 wm_low.sclk = adev->pm.current_sclk * 10;
1017 }
1018
1019 wm_low.disp_clk = mode->clock;
1020 wm_low.src_width = mode->crtc_hdisplay;
1021 wm_low.active_time = active_time;
1022 wm_low.blank_time = line_time - wm_low.active_time;
1023 wm_low.interlaced = false;
1024 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1025 wm_low.interlaced = true;
1026 wm_low.vsc = amdgpu_crtc->vsc;
1027 wm_low.vtaps = 1;
1028 if (amdgpu_crtc->rmx_type != RMX_OFF)
1029 wm_low.vtaps = 2;
1030 wm_low.bytes_per_pixel = 4;
1031 wm_low.lb_size = lb_size;
1032 wm_low.dram_channels = cik_get_number_of_dram_channels(adev);
1033 wm_low.num_heads = num_heads;
1034
1035
1036 latency_watermark_b = min(dce_v8_0_latency_watermark(&wm_low), (u32)65535);
1037
1038
1039
1040 if (!dce_v8_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) ||
1041 !dce_v8_0_average_bandwidth_vs_available_bandwidth(&wm_low) ||
1042 !dce_v8_0_check_latency_hiding(&wm_low) ||
1043 (adev->mode_info.disp_priority == 2)) {
1044 DRM_DEBUG_KMS("force priority to high\n");
1045 }
1046 lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay);
1047 }
1048
1049
1050 wm_mask = RREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset);
1051 tmp = wm_mask;
1052 tmp &= ~(3 << DPG_WATERMARK_MASK_CONTROL__URGENCY_WATERMARK_MASK__SHIFT);
1053 tmp |= (1 << DPG_WATERMARK_MASK_CONTROL__URGENCY_WATERMARK_MASK__SHIFT);
1054 WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, tmp);
1055 WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset,
1056 ((latency_watermark_a << DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT) |
1057 (line_time << DPG_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK__SHIFT)));
1058
1059 tmp = RREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset);
1060 tmp &= ~(3 << DPG_WATERMARK_MASK_CONTROL__URGENCY_WATERMARK_MASK__SHIFT);
1061 tmp |= (2 << DPG_WATERMARK_MASK_CONTROL__URGENCY_WATERMARK_MASK__SHIFT);
1062 WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, tmp);
1063 WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset,
1064 ((latency_watermark_b << DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT) |
1065 (line_time << DPG_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK__SHIFT)));
1066
1067 WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, wm_mask);
1068
1069
1070 amdgpu_crtc->line_time = line_time;
1071 amdgpu_crtc->wm_high = latency_watermark_a;
1072 amdgpu_crtc->wm_low = latency_watermark_b;
1073
1074 amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines;
1075}
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085static void dce_v8_0_bandwidth_update(struct amdgpu_device *adev)
1086{
1087 struct drm_display_mode *mode = NULL;
1088 u32 num_heads = 0, lb_size;
1089 int i;
1090
1091 amdgpu_display_update_priority(adev);
1092
1093 for (i = 0; i < adev->mode_info.num_crtc; i++) {
1094 if (adev->mode_info.crtcs[i]->base.enabled)
1095 num_heads++;
1096 }
1097 for (i = 0; i < adev->mode_info.num_crtc; i++) {
1098 mode = &adev->mode_info.crtcs[i]->base.mode;
1099 lb_size = dce_v8_0_line_buffer_adjust(adev, adev->mode_info.crtcs[i], mode);
1100 dce_v8_0_program_watermarks(adev, adev->mode_info.crtcs[i],
1101 lb_size, num_heads);
1102 }
1103}
1104
1105static void dce_v8_0_audio_get_connected_pins(struct amdgpu_device *adev)
1106{
1107 int i;
1108 u32 offset, tmp;
1109
1110 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1111 offset = adev->mode_info.audio.pin[i].offset;
1112 tmp = RREG32_AUDIO_ENDPT(offset,
1113 ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT);
1114 if (((tmp &
1115 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK) >>
1116 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT) == 1)
1117 adev->mode_info.audio.pin[i].connected = false;
1118 else
1119 adev->mode_info.audio.pin[i].connected = true;
1120 }
1121}
1122
1123static struct amdgpu_audio_pin *dce_v8_0_audio_get_pin(struct amdgpu_device *adev)
1124{
1125 int i;
1126
1127 dce_v8_0_audio_get_connected_pins(adev);
1128
1129 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1130 if (adev->mode_info.audio.pin[i].connected)
1131 return &adev->mode_info.audio.pin[i];
1132 }
1133 DRM_ERROR("No connected audio pins found!\n");
1134 return NULL;
1135}
1136
1137static void dce_v8_0_afmt_audio_select_pin(struct drm_encoder *encoder)
1138{
1139 struct amdgpu_device *adev = encoder->dev->dev_private;
1140 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1141 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1142 u32 offset;
1143
1144 if (!dig || !dig->afmt || !dig->afmt->pin)
1145 return;
1146
1147 offset = dig->afmt->offset;
1148
1149 WREG32(mmAFMT_AUDIO_SRC_CONTROL + offset,
1150 (dig->afmt->pin->id << AFMT_AUDIO_SRC_CONTROL__AFMT_AUDIO_SRC_SELECT__SHIFT));
1151}
1152
1153static void dce_v8_0_audio_write_latency_fields(struct drm_encoder *encoder,
1154 struct drm_display_mode *mode)
1155{
1156 struct amdgpu_device *adev = encoder->dev->dev_private;
1157 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1158 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1159 struct drm_connector *connector;
1160 struct amdgpu_connector *amdgpu_connector = NULL;
1161 u32 tmp = 0, offset;
1162
1163 if (!dig || !dig->afmt || !dig->afmt->pin)
1164 return;
1165
1166 offset = dig->afmt->pin->offset;
1167
1168 list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
1169 if (connector->encoder == encoder) {
1170 amdgpu_connector = to_amdgpu_connector(connector);
1171 break;
1172 }
1173 }
1174
1175 if (!amdgpu_connector) {
1176 DRM_ERROR("Couldn't find encoder's connector\n");
1177 return;
1178 }
1179
1180 if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
1181 if (connector->latency_present[1])
1182 tmp =
1183 (connector->video_latency[1] <<
1184 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT) |
1185 (connector->audio_latency[1] <<
1186 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT);
1187 else
1188 tmp =
1189 (0 <<
1190 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT) |
1191 (0 <<
1192 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT);
1193 } else {
1194 if (connector->latency_present[0])
1195 tmp =
1196 (connector->video_latency[0] <<
1197 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT) |
1198 (connector->audio_latency[0] <<
1199 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT);
1200 else
1201 tmp =
1202 (0 <<
1203 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT) |
1204 (0 <<
1205 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT);
1206
1207 }
1208 WREG32_AUDIO_ENDPT(offset, ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp);
1209}
1210
1211static void dce_v8_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
1212{
1213 struct amdgpu_device *adev = encoder->dev->dev_private;
1214 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1215 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1216 struct drm_connector *connector;
1217 struct amdgpu_connector *amdgpu_connector = NULL;
1218 u32 offset, tmp;
1219 u8 *sadb = NULL;
1220 int sad_count;
1221
1222 if (!dig || !dig->afmt || !dig->afmt->pin)
1223 return;
1224
1225 offset = dig->afmt->pin->offset;
1226
1227 list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
1228 if (connector->encoder == encoder) {
1229 amdgpu_connector = to_amdgpu_connector(connector);
1230 break;
1231 }
1232 }
1233
1234 if (!amdgpu_connector) {
1235 DRM_ERROR("Couldn't find encoder's connector\n");
1236 return;
1237 }
1238
1239 sad_count = drm_edid_to_speaker_allocation(amdgpu_connector_edid(connector), &sadb);
1240 if (sad_count < 0) {
1241 DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
1242 sad_count = 0;
1243 }
1244
1245
1246 tmp = RREG32_AUDIO_ENDPT(offset, ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
1247 tmp &= ~(AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DP_CONNECTION_MASK |
1248 AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION_MASK);
1249
1250 tmp |= AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__HDMI_CONNECTION_MASK;
1251 if (sad_count)
1252 tmp |= (sadb[0] << AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION__SHIFT);
1253 else
1254 tmp |= (5 << AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION__SHIFT);
1255 WREG32_AUDIO_ENDPT(offset, ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
1256
1257 kfree(sadb);
1258}
1259
1260static void dce_v8_0_audio_write_sad_regs(struct drm_encoder *encoder)
1261{
1262 struct amdgpu_device *adev = encoder->dev->dev_private;
1263 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1264 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1265 u32 offset;
1266 struct drm_connector *connector;
1267 struct amdgpu_connector *amdgpu_connector = NULL;
1268 struct cea_sad *sads;
1269 int i, sad_count;
1270
1271 static const u16 eld_reg_to_type[][2] = {
1272 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, HDMI_AUDIO_CODING_TYPE_PCM },
1273 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1, HDMI_AUDIO_CODING_TYPE_AC3 },
1274 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2, HDMI_AUDIO_CODING_TYPE_MPEG1 },
1275 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3, HDMI_AUDIO_CODING_TYPE_MP3 },
1276 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4, HDMI_AUDIO_CODING_TYPE_MPEG2 },
1277 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5, HDMI_AUDIO_CODING_TYPE_AAC_LC },
1278 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6, HDMI_AUDIO_CODING_TYPE_DTS },
1279 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7, HDMI_AUDIO_CODING_TYPE_ATRAC },
1280 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9, HDMI_AUDIO_CODING_TYPE_EAC3 },
1281 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10, HDMI_AUDIO_CODING_TYPE_DTS_HD },
1282 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11, HDMI_AUDIO_CODING_TYPE_MLP },
1283 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
1284 };
1285
1286 if (!dig || !dig->afmt || !dig->afmt->pin)
1287 return;
1288
1289 offset = dig->afmt->pin->offset;
1290
1291 list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
1292 if (connector->encoder == encoder) {
1293 amdgpu_connector = to_amdgpu_connector(connector);
1294 break;
1295 }
1296 }
1297
1298 if (!amdgpu_connector) {
1299 DRM_ERROR("Couldn't find encoder's connector\n");
1300 return;
1301 }
1302
1303 sad_count = drm_edid_to_sad(amdgpu_connector_edid(connector), &sads);
1304 if (sad_count <= 0) {
1305 DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
1306 return;
1307 }
1308 BUG_ON(!sads);
1309
1310 for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
1311 u32 value = 0;
1312 u8 stereo_freqs = 0;
1313 int max_channels = -1;
1314 int j;
1315
1316 for (j = 0; j < sad_count; j++) {
1317 struct cea_sad *sad = &sads[j];
1318
1319 if (sad->format == eld_reg_to_type[i][1]) {
1320 if (sad->channels > max_channels) {
1321 value = (sad->channels <<
1322 AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS__SHIFT) |
1323 (sad->byte2 <<
1324 AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2__SHIFT) |
1325 (sad->freq <<
1326 AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES__SHIFT);
1327 max_channels = sad->channels;
1328 }
1329
1330 if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM)
1331 stereo_freqs |= sad->freq;
1332 else
1333 break;
1334 }
1335 }
1336
1337 value |= (stereo_freqs <<
1338 AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO__SHIFT);
1339
1340 WREG32_AUDIO_ENDPT(offset, eld_reg_to_type[i][0], value);
1341 }
1342
1343 kfree(sads);
1344}
1345
1346static void dce_v8_0_audio_enable(struct amdgpu_device *adev,
1347 struct amdgpu_audio_pin *pin,
1348 bool enable)
1349{
1350 if (!pin)
1351 return;
1352
1353 WREG32_AUDIO_ENDPT(pin->offset, ixAZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
1354 enable ? AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK : 0);
1355}
1356
1357static const u32 pin_offsets[7] =
1358{
1359 (0x1780 - 0x1780),
1360 (0x1786 - 0x1780),
1361 (0x178c - 0x1780),
1362 (0x1792 - 0x1780),
1363 (0x1798 - 0x1780),
1364 (0x179d - 0x1780),
1365 (0x17a4 - 0x1780),
1366};
1367
1368static int dce_v8_0_audio_init(struct amdgpu_device *adev)
1369{
1370 int i;
1371
1372 if (!amdgpu_audio)
1373 return 0;
1374
1375 adev->mode_info.audio.enabled = true;
1376
1377 if (adev->asic_type == CHIP_KAVERI)
1378 adev->mode_info.audio.num_pins = 7;
1379 else if ((adev->asic_type == CHIP_KABINI) ||
1380 (adev->asic_type == CHIP_MULLINS))
1381 adev->mode_info.audio.num_pins = 3;
1382 else if ((adev->asic_type == CHIP_BONAIRE) ||
1383 (adev->asic_type == CHIP_HAWAII))
1384 adev->mode_info.audio.num_pins = 7;
1385 else
1386 adev->mode_info.audio.num_pins = 3;
1387
1388 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1389 adev->mode_info.audio.pin[i].channels = -1;
1390 adev->mode_info.audio.pin[i].rate = -1;
1391 adev->mode_info.audio.pin[i].bits_per_sample = -1;
1392 adev->mode_info.audio.pin[i].status_bits = 0;
1393 adev->mode_info.audio.pin[i].category_code = 0;
1394 adev->mode_info.audio.pin[i].connected = false;
1395 adev->mode_info.audio.pin[i].offset = pin_offsets[i];
1396 adev->mode_info.audio.pin[i].id = i;
1397
1398
1399 dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
1400 }
1401
1402 return 0;
1403}
1404
1405static void dce_v8_0_audio_fini(struct amdgpu_device *adev)
1406{
1407 int i;
1408
1409 if (!amdgpu_audio)
1410 return;
1411
1412 if (!adev->mode_info.audio.enabled)
1413 return;
1414
1415 for (i = 0; i < adev->mode_info.audio.num_pins; i++)
1416 dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
1417
1418 adev->mode_info.audio.enabled = false;
1419}
1420
1421
1422
1423
1424static void dce_v8_0_afmt_update_ACR(struct drm_encoder *encoder, uint32_t clock)
1425{
1426 struct drm_device *dev = encoder->dev;
1427 struct amdgpu_device *adev = dev->dev_private;
1428 struct amdgpu_afmt_acr acr = amdgpu_afmt_acr(clock);
1429 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1430 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1431 uint32_t offset = dig->afmt->offset;
1432
1433 WREG32(mmHDMI_ACR_32_0 + offset, (acr.cts_32khz << HDMI_ACR_32_0__HDMI_ACR_CTS_32__SHIFT));
1434 WREG32(mmHDMI_ACR_32_1 + offset, acr.n_32khz);
1435
1436 WREG32(mmHDMI_ACR_44_0 + offset, (acr.cts_44_1khz << HDMI_ACR_44_0__HDMI_ACR_CTS_44__SHIFT));
1437 WREG32(mmHDMI_ACR_44_1 + offset, acr.n_44_1khz);
1438
1439 WREG32(mmHDMI_ACR_48_0 + offset, (acr.cts_48khz << HDMI_ACR_48_0__HDMI_ACR_CTS_48__SHIFT));
1440 WREG32(mmHDMI_ACR_48_1 + offset, acr.n_48khz);
1441}
1442
1443
1444
1445
1446static void dce_v8_0_afmt_update_avi_infoframe(struct drm_encoder *encoder,
1447 void *buffer, size_t size)
1448{
1449 struct drm_device *dev = encoder->dev;
1450 struct amdgpu_device *adev = dev->dev_private;
1451 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1452 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1453 uint32_t offset = dig->afmt->offset;
1454 uint8_t *frame = buffer + 3;
1455 uint8_t *header = buffer;
1456
1457 WREG32(mmAFMT_AVI_INFO0 + offset,
1458 frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
1459 WREG32(mmAFMT_AVI_INFO1 + offset,
1460 frame[0x4] | (frame[0x5] << 8) | (frame[0x6] << 16) | (frame[0x7] << 24));
1461 WREG32(mmAFMT_AVI_INFO2 + offset,
1462 frame[0x8] | (frame[0x9] << 8) | (frame[0xA] << 16) | (frame[0xB] << 24));
1463 WREG32(mmAFMT_AVI_INFO3 + offset,
1464 frame[0xC] | (frame[0xD] << 8) | (header[1] << 24));
1465}
1466
1467static void dce_v8_0_audio_set_dto(struct drm_encoder *encoder, u32 clock)
1468{
1469 struct drm_device *dev = encoder->dev;
1470 struct amdgpu_device *adev = dev->dev_private;
1471 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1472 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1473 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
1474 u32 dto_phase = 24 * 1000;
1475 u32 dto_modulo = clock;
1476
1477 if (!dig || !dig->afmt)
1478 return;
1479
1480
1481
1482
1483
1484
1485 WREG32(mmDCCG_AUDIO_DTO_SOURCE, (amdgpu_crtc->crtc_id << DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO0_SOURCE_SEL__SHIFT));
1486 WREG32(mmDCCG_AUDIO_DTO0_PHASE, dto_phase);
1487 WREG32(mmDCCG_AUDIO_DTO0_MODULE, dto_modulo);
1488}
1489
1490
1491
1492
1493static void dce_v8_0_afmt_setmode(struct drm_encoder *encoder,
1494 struct drm_display_mode *mode)
1495{
1496 struct drm_device *dev = encoder->dev;
1497 struct amdgpu_device *adev = dev->dev_private;
1498 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1499 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1500 struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
1501 u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
1502 struct hdmi_avi_infoframe frame;
1503 uint32_t offset, val;
1504 ssize_t err;
1505 int bpc = 8;
1506
1507 if (!dig || !dig->afmt)
1508 return;
1509
1510
1511 if (!dig->afmt->enabled)
1512 return;
1513
1514 offset = dig->afmt->offset;
1515
1516
1517 if (encoder->crtc) {
1518 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
1519 bpc = amdgpu_crtc->bpc;
1520 }
1521
1522
1523 dig->afmt->pin = dce_v8_0_audio_get_pin(adev);
1524 dce_v8_0_audio_enable(adev, dig->afmt->pin, false);
1525
1526 dce_v8_0_audio_set_dto(encoder, mode->clock);
1527
1528 WREG32(mmHDMI_VBI_PACKET_CONTROL + offset,
1529 HDMI_VBI_PACKET_CONTROL__HDMI_NULL_SEND_MASK);
1530
1531 WREG32(mmAFMT_AUDIO_CRC_CONTROL + offset, 0x1000);
1532
1533 val = RREG32(mmHDMI_CONTROL + offset);
1534 val &= ~HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE_MASK;
1535 val &= ~HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH_MASK;
1536
1537 switch (bpc) {
1538 case 0:
1539 case 6:
1540 case 8:
1541 case 16:
1542 default:
1543 DRM_DEBUG("%s: Disabling hdmi deep color for %d bpc.\n",
1544 connector->name, bpc);
1545 break;
1546 case 10:
1547 val |= HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE_MASK;
1548 val |= 1 << HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH__SHIFT;
1549 DRM_DEBUG("%s: Enabling hdmi deep color 30 for 10 bpc.\n",
1550 connector->name);
1551 break;
1552 case 12:
1553 val |= HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE_MASK;
1554 val |= 2 << HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH__SHIFT;
1555 DRM_DEBUG("%s: Enabling hdmi deep color 36 for 12 bpc.\n",
1556 connector->name);
1557 break;
1558 }
1559
1560 WREG32(mmHDMI_CONTROL + offset, val);
1561
1562 WREG32(mmHDMI_VBI_PACKET_CONTROL + offset,
1563 HDMI_VBI_PACKET_CONTROL__HDMI_NULL_SEND_MASK |
1564 HDMI_VBI_PACKET_CONTROL__HDMI_GC_SEND_MASK |
1565 HDMI_VBI_PACKET_CONTROL__HDMI_GC_CONT_MASK);
1566
1567 WREG32(mmHDMI_INFOFRAME_CONTROL0 + offset,
1568 HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_SEND_MASK |
1569 HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_CONT_MASK);
1570
1571 WREG32(mmAFMT_INFOFRAME_CONTROL0 + offset,
1572 AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_UPDATE_MASK);
1573
1574 WREG32(mmHDMI_INFOFRAME_CONTROL1 + offset,
1575 (2 << HDMI_INFOFRAME_CONTROL1__HDMI_AUDIO_INFO_LINE__SHIFT));
1576
1577 WREG32(mmHDMI_GC + offset, 0);
1578
1579 WREG32(mmHDMI_AUDIO_PACKET_CONTROL + offset,
1580 (1 << HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_DELAY_EN__SHIFT) |
1581 (3 << HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_PACKETS_PER_LINE__SHIFT));
1582
1583 WREG32(mmAFMT_AUDIO_PACKET_CONTROL + offset,
1584 AFMT_AUDIO_PACKET_CONTROL__AFMT_60958_CS_UPDATE_MASK);
1585
1586
1587
1588 if (bpc > 8)
1589 WREG32(mmHDMI_ACR_PACKET_CONTROL + offset,
1590 HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUTO_SEND_MASK);
1591 else
1592 WREG32(mmHDMI_ACR_PACKET_CONTROL + offset,
1593 HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SOURCE_MASK |
1594 HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUTO_SEND_MASK);
1595
1596 dce_v8_0_afmt_update_ACR(encoder, mode->clock);
1597
1598 WREG32(mmAFMT_60958_0 + offset,
1599 (1 << AFMT_60958_0__AFMT_60958_CS_CHANNEL_NUMBER_L__SHIFT));
1600
1601 WREG32(mmAFMT_60958_1 + offset,
1602 (2 << AFMT_60958_1__AFMT_60958_CS_CHANNEL_NUMBER_R__SHIFT));
1603
1604 WREG32(mmAFMT_60958_2 + offset,
1605 (3 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_2__SHIFT) |
1606 (4 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_3__SHIFT) |
1607 (5 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_4__SHIFT) |
1608 (6 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_5__SHIFT) |
1609 (7 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_6__SHIFT) |
1610 (8 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_7__SHIFT));
1611
1612 dce_v8_0_audio_write_speaker_allocation(encoder);
1613
1614
1615 WREG32(mmAFMT_AUDIO_PACKET_CONTROL2 + offset,
1616 (0xff << AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_CHANNEL_ENABLE__SHIFT));
1617
1618 dce_v8_0_afmt_audio_select_pin(encoder);
1619 dce_v8_0_audio_write_sad_regs(encoder);
1620 dce_v8_0_audio_write_latency_fields(encoder, mode);
1621
1622 err = drm_hdmi_avi_infoframe_from_display_mode(&frame, connector, mode);
1623 if (err < 0) {
1624 DRM_ERROR("failed to setup AVI infoframe: %zd\n", err);
1625 return;
1626 }
1627
1628 err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer));
1629 if (err < 0) {
1630 DRM_ERROR("failed to pack AVI infoframe: %zd\n", err);
1631 return;
1632 }
1633
1634 dce_v8_0_afmt_update_avi_infoframe(encoder, buffer, sizeof(buffer));
1635
1636 WREG32_OR(mmHDMI_INFOFRAME_CONTROL0 + offset,
1637 HDMI_INFOFRAME_CONTROL0__HDMI_AVI_INFO_SEND_MASK |
1638 HDMI_INFOFRAME_CONTROL0__HDMI_AVI_INFO_CONT_MASK);
1639
1640 WREG32_P(mmHDMI_INFOFRAME_CONTROL1 + offset,
1641 (2 << HDMI_INFOFRAME_CONTROL1__HDMI_AVI_INFO_LINE__SHIFT),
1642 ~HDMI_INFOFRAME_CONTROL1__HDMI_AVI_INFO_LINE_MASK);
1643
1644 WREG32_OR(mmAFMT_AUDIO_PACKET_CONTROL + offset,
1645 AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND_MASK);
1646
1647 WREG32(mmAFMT_RAMP_CONTROL0 + offset, 0x00FFFFFF);
1648 WREG32(mmAFMT_RAMP_CONTROL1 + offset, 0x007FFFFF);
1649 WREG32(mmAFMT_RAMP_CONTROL2 + offset, 0x00000001);
1650 WREG32(mmAFMT_RAMP_CONTROL3 + offset, 0x00000001);
1651
1652
1653 dce_v8_0_audio_enable(adev, dig->afmt->pin, true);
1654}
1655
1656static void dce_v8_0_afmt_enable(struct drm_encoder *encoder, bool enable)
1657{
1658 struct drm_device *dev = encoder->dev;
1659 struct amdgpu_device *adev = dev->dev_private;
1660 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1661 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1662
1663 if (!dig || !dig->afmt)
1664 return;
1665
1666
1667 if (enable && dig->afmt->enabled)
1668 return;
1669 if (!enable && !dig->afmt->enabled)
1670 return;
1671
1672 if (!enable && dig->afmt->pin) {
1673 dce_v8_0_audio_enable(adev, dig->afmt->pin, false);
1674 dig->afmt->pin = NULL;
1675 }
1676
1677 dig->afmt->enabled = enable;
1678
1679 DRM_DEBUG("%sabling AFMT interface @ 0x%04X for encoder 0x%x\n",
1680 enable ? "En" : "Dis", dig->afmt->offset, amdgpu_encoder->encoder_id);
1681}
1682
1683static int dce_v8_0_afmt_init(struct amdgpu_device *adev)
1684{
1685 int i;
1686
1687 for (i = 0; i < adev->mode_info.num_dig; i++)
1688 adev->mode_info.afmt[i] = NULL;
1689
1690
1691 for (i = 0; i < adev->mode_info.num_dig; i++) {
1692 adev->mode_info.afmt[i] = kzalloc(sizeof(struct amdgpu_afmt), GFP_KERNEL);
1693 if (adev->mode_info.afmt[i]) {
1694 adev->mode_info.afmt[i]->offset = dig_offsets[i];
1695 adev->mode_info.afmt[i]->id = i;
1696 } else {
1697 int j;
1698 for (j = 0; j < i; j++) {
1699 kfree(adev->mode_info.afmt[j]);
1700 adev->mode_info.afmt[j] = NULL;
1701 }
1702 return -ENOMEM;
1703 }
1704 }
1705 return 0;
1706}
1707
1708static void dce_v8_0_afmt_fini(struct amdgpu_device *adev)
1709{
1710 int i;
1711
1712 for (i = 0; i < adev->mode_info.num_dig; i++) {
1713 kfree(adev->mode_info.afmt[i]);
1714 adev->mode_info.afmt[i] = NULL;
1715 }
1716}
1717
1718static const u32 vga_control_regs[6] =
1719{
1720 mmD1VGA_CONTROL,
1721 mmD2VGA_CONTROL,
1722 mmD3VGA_CONTROL,
1723 mmD4VGA_CONTROL,
1724 mmD5VGA_CONTROL,
1725 mmD6VGA_CONTROL,
1726};
1727
1728static void dce_v8_0_vga_enable(struct drm_crtc *crtc, bool enable)
1729{
1730 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1731 struct drm_device *dev = crtc->dev;
1732 struct amdgpu_device *adev = dev->dev_private;
1733 u32 vga_control;
1734
1735 vga_control = RREG32(vga_control_regs[amdgpu_crtc->crtc_id]) & ~1;
1736 if (enable)
1737 WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control | 1);
1738 else
1739 WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control);
1740}
1741
1742static void dce_v8_0_grph_enable(struct drm_crtc *crtc, bool enable)
1743{
1744 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1745 struct drm_device *dev = crtc->dev;
1746 struct amdgpu_device *adev = dev->dev_private;
1747
1748 if (enable)
1749 WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 1);
1750 else
1751 WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 0);
1752}
1753
1754static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc,
1755 struct drm_framebuffer *fb,
1756 int x, int y, int atomic)
1757{
1758 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1759 struct drm_device *dev = crtc->dev;
1760 struct amdgpu_device *adev = dev->dev_private;
1761 struct drm_framebuffer *target_fb;
1762 struct drm_gem_object *obj;
1763 struct amdgpu_bo *abo;
1764 uint64_t fb_location, tiling_flags;
1765 uint32_t fb_format, fb_pitch_pixels;
1766 u32 fb_swap = (GRPH_ENDIAN_NONE << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1767 u32 pipe_config;
1768 u32 viewport_w, viewport_h;
1769 int r;
1770 bool bypass_lut = false;
1771 struct drm_format_name_buf format_name;
1772
1773
1774 if (!atomic && !crtc->primary->fb) {
1775 DRM_DEBUG_KMS("No FB bound\n");
1776 return 0;
1777 }
1778
1779 if (atomic)
1780 target_fb = fb;
1781 else
1782 target_fb = crtc->primary->fb;
1783
1784
1785
1786
1787 obj = target_fb->obj[0];
1788 abo = gem_to_amdgpu_bo(obj);
1789 r = amdgpu_bo_reserve(abo, false);
1790 if (unlikely(r != 0))
1791 return r;
1792
1793 if (!atomic) {
1794 r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM);
1795 if (unlikely(r != 0)) {
1796 amdgpu_bo_unreserve(abo);
1797 return -EINVAL;
1798 }
1799 }
1800 fb_location = amdgpu_bo_gpu_offset(abo);
1801
1802 amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
1803 amdgpu_bo_unreserve(abo);
1804
1805 pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
1806
1807 switch (target_fb->format->format) {
1808 case DRM_FORMAT_C8:
1809 fb_format = ((GRPH_DEPTH_8BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1810 (GRPH_FORMAT_INDEXED << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1811 break;
1812 case DRM_FORMAT_XRGB4444:
1813 case DRM_FORMAT_ARGB4444:
1814 fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1815 (GRPH_FORMAT_ARGB4444 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1816#ifdef __BIG_ENDIAN
1817 fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1818#endif
1819 break;
1820 case DRM_FORMAT_XRGB1555:
1821 case DRM_FORMAT_ARGB1555:
1822 fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1823 (GRPH_FORMAT_ARGB1555 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1824#ifdef __BIG_ENDIAN
1825 fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1826#endif
1827 break;
1828 case DRM_FORMAT_BGRX5551:
1829 case DRM_FORMAT_BGRA5551:
1830 fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1831 (GRPH_FORMAT_BGRA5551 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1832#ifdef __BIG_ENDIAN
1833 fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1834#endif
1835 break;
1836 case DRM_FORMAT_RGB565:
1837 fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1838 (GRPH_FORMAT_ARGB565 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1839#ifdef __BIG_ENDIAN
1840 fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1841#endif
1842 break;
1843 case DRM_FORMAT_XRGB8888:
1844 case DRM_FORMAT_ARGB8888:
1845 fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1846 (GRPH_FORMAT_ARGB8888 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1847#ifdef __BIG_ENDIAN
1848 fb_swap = (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1849#endif
1850 break;
1851 case DRM_FORMAT_XRGB2101010:
1852 case DRM_FORMAT_ARGB2101010:
1853 fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1854 (GRPH_FORMAT_ARGB2101010 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1855#ifdef __BIG_ENDIAN
1856 fb_swap = (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1857#endif
1858
1859 bypass_lut = true;
1860 break;
1861 case DRM_FORMAT_BGRX1010102:
1862 case DRM_FORMAT_BGRA1010102:
1863 fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1864 (GRPH_FORMAT_BGRA1010102 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1865#ifdef __BIG_ENDIAN
1866 fb_swap = (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1867#endif
1868
1869 bypass_lut = true;
1870 break;
1871 case DRM_FORMAT_XBGR8888:
1872 case DRM_FORMAT_ABGR8888:
1873 fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1874 (GRPH_FORMAT_ARGB8888 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1875 fb_swap = ((GRPH_RED_SEL_B << GRPH_SWAP_CNTL__GRPH_RED_CROSSBAR__SHIFT) |
1876 (GRPH_BLUE_SEL_R << GRPH_SWAP_CNTL__GRPH_BLUE_CROSSBAR__SHIFT));
1877#ifdef __BIG_ENDIAN
1878 fb_swap |= (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1879#endif
1880 break;
1881 default:
1882 DRM_ERROR("Unsupported screen format %s\n",
1883 drm_get_format_name(target_fb->format->format, &format_name));
1884 return -EINVAL;
1885 }
1886
1887 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_2D_TILED_THIN1) {
1888 unsigned bankw, bankh, mtaspect, tile_split, num_banks;
1889
1890 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
1891 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
1892 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
1893 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
1894 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
1895
1896 fb_format |= (num_banks << GRPH_CONTROL__GRPH_NUM_BANKS__SHIFT);
1897 fb_format |= (GRPH_ARRAY_2D_TILED_THIN1 << GRPH_CONTROL__GRPH_ARRAY_MODE__SHIFT);
1898 fb_format |= (tile_split << GRPH_CONTROL__GRPH_TILE_SPLIT__SHIFT);
1899 fb_format |= (bankw << GRPH_CONTROL__GRPH_BANK_WIDTH__SHIFT);
1900 fb_format |= (bankh << GRPH_CONTROL__GRPH_BANK_HEIGHT__SHIFT);
1901 fb_format |= (mtaspect << GRPH_CONTROL__GRPH_MACRO_TILE_ASPECT__SHIFT);
1902 fb_format |= (DISPLAY_MICRO_TILING << GRPH_CONTROL__GRPH_MICRO_TILE_MODE__SHIFT);
1903 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_1D_TILED_THIN1) {
1904 fb_format |= (GRPH_ARRAY_1D_TILED_THIN1 << GRPH_CONTROL__GRPH_ARRAY_MODE__SHIFT);
1905 }
1906
1907 fb_format |= (pipe_config << GRPH_CONTROL__GRPH_PIPE_CONFIG__SHIFT);
1908
1909 dce_v8_0_vga_enable(crtc, false);
1910
1911
1912
1913
1914 WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, 0);
1915
1916 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
1917 upper_32_bits(fb_location));
1918 WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
1919 upper_32_bits(fb_location));
1920 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
1921 (u32)fb_location & GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK);
1922 WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
1923 (u32) fb_location & GRPH_SECONDARY_SURFACE_ADDRESS__GRPH_SECONDARY_SURFACE_ADDRESS_MASK);
1924 WREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format);
1925 WREG32(mmGRPH_SWAP_CNTL + amdgpu_crtc->crtc_offset, fb_swap);
1926
1927
1928
1929
1930
1931
1932 WREG32_P(mmGRPH_LUT_10BIT_BYPASS_CONTROL + amdgpu_crtc->crtc_offset,
1933 (bypass_lut ? LUT_10BIT_BYPASS_EN : 0),
1934 ~LUT_10BIT_BYPASS_EN);
1935
1936 if (bypass_lut)
1937 DRM_DEBUG_KMS("Bypassing hardware LUT due to 10 bit fb scanout.\n");
1938
1939 WREG32(mmGRPH_SURFACE_OFFSET_X + amdgpu_crtc->crtc_offset, 0);
1940 WREG32(mmGRPH_SURFACE_OFFSET_Y + amdgpu_crtc->crtc_offset, 0);
1941 WREG32(mmGRPH_X_START + amdgpu_crtc->crtc_offset, 0);
1942 WREG32(mmGRPH_Y_START + amdgpu_crtc->crtc_offset, 0);
1943 WREG32(mmGRPH_X_END + amdgpu_crtc->crtc_offset, target_fb->width);
1944 WREG32(mmGRPH_Y_END + amdgpu_crtc->crtc_offset, target_fb->height);
1945
1946 fb_pitch_pixels = target_fb->pitches[0] / target_fb->format->cpp[0];
1947 WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset, fb_pitch_pixels);
1948
1949 dce_v8_0_grph_enable(crtc, true);
1950
1951 WREG32(mmLB_DESKTOP_HEIGHT + amdgpu_crtc->crtc_offset,
1952 target_fb->height);
1953
1954 x &= ~3;
1955 y &= ~1;
1956 WREG32(mmVIEWPORT_START + amdgpu_crtc->crtc_offset,
1957 (x << 16) | y);
1958 viewport_w = crtc->mode.hdisplay;
1959 viewport_h = (crtc->mode.vdisplay + 1) & ~1;
1960 WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset,
1961 (viewport_w << 16) | viewport_h);
1962
1963
1964 WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0);
1965
1966 if (!atomic && fb && fb != crtc->primary->fb) {
1967 abo = gem_to_amdgpu_bo(fb->obj[0]);
1968 r = amdgpu_bo_reserve(abo, true);
1969 if (unlikely(r != 0))
1970 return r;
1971 amdgpu_bo_unpin(abo);
1972 amdgpu_bo_unreserve(abo);
1973 }
1974
1975
1976 dce_v8_0_bandwidth_update(adev);
1977
1978 return 0;
1979}
1980
1981static void dce_v8_0_set_interleave(struct drm_crtc *crtc,
1982 struct drm_display_mode *mode)
1983{
1984 struct drm_device *dev = crtc->dev;
1985 struct amdgpu_device *adev = dev->dev_private;
1986 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1987
1988 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1989 WREG32(mmLB_DATA_FORMAT + amdgpu_crtc->crtc_offset,
1990 LB_DATA_FORMAT__INTERLEAVE_EN__SHIFT);
1991 else
1992 WREG32(mmLB_DATA_FORMAT + amdgpu_crtc->crtc_offset, 0);
1993}
1994
1995static void dce_v8_0_crtc_load_lut(struct drm_crtc *crtc)
1996{
1997 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1998 struct drm_device *dev = crtc->dev;
1999 struct amdgpu_device *adev = dev->dev_private;
2000 u16 *r, *g, *b;
2001 int i;
2002
2003 DRM_DEBUG_KMS("%d\n", amdgpu_crtc->crtc_id);
2004
2005 WREG32(mmINPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset,
2006 ((INPUT_CSC_BYPASS << INPUT_CSC_CONTROL__INPUT_CSC_GRPH_MODE__SHIFT) |
2007 (INPUT_CSC_BYPASS << INPUT_CSC_CONTROL__INPUT_CSC_OVL_MODE__SHIFT)));
2008 WREG32(mmPRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset,
2009 PRESCALE_GRPH_CONTROL__GRPH_PRESCALE_BYPASS_MASK);
2010 WREG32(mmPRESCALE_OVL_CONTROL + amdgpu_crtc->crtc_offset,
2011 PRESCALE_OVL_CONTROL__OVL_PRESCALE_BYPASS_MASK);
2012 WREG32(mmINPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset,
2013 ((INPUT_GAMMA_USE_LUT << INPUT_GAMMA_CONTROL__GRPH_INPUT_GAMMA_MODE__SHIFT) |
2014 (INPUT_GAMMA_USE_LUT << INPUT_GAMMA_CONTROL__OVL_INPUT_GAMMA_MODE__SHIFT)));
2015
2016 WREG32(mmDC_LUT_CONTROL + amdgpu_crtc->crtc_offset, 0);
2017
2018 WREG32(mmDC_LUT_BLACK_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0);
2019 WREG32(mmDC_LUT_BLACK_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0);
2020 WREG32(mmDC_LUT_BLACK_OFFSET_RED + amdgpu_crtc->crtc_offset, 0);
2021
2022 WREG32(mmDC_LUT_WHITE_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0xffff);
2023 WREG32(mmDC_LUT_WHITE_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0xffff);
2024 WREG32(mmDC_LUT_WHITE_OFFSET_RED + amdgpu_crtc->crtc_offset, 0xffff);
2025
2026 WREG32(mmDC_LUT_RW_MODE + amdgpu_crtc->crtc_offset, 0);
2027 WREG32(mmDC_LUT_WRITE_EN_MASK + amdgpu_crtc->crtc_offset, 0x00000007);
2028
2029 WREG32(mmDC_LUT_RW_INDEX + amdgpu_crtc->crtc_offset, 0);
2030 r = crtc->gamma_store;
2031 g = r + crtc->gamma_size;
2032 b = g + crtc->gamma_size;
2033 for (i = 0; i < 256; i++) {
2034 WREG32(mmDC_LUT_30_COLOR + amdgpu_crtc->crtc_offset,
2035 ((*r++ & 0xffc0) << 14) |
2036 ((*g++ & 0xffc0) << 4) |
2037 (*b++ >> 6));
2038 }
2039
2040 WREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
2041 ((DEGAMMA_BYPASS << DEGAMMA_CONTROL__GRPH_DEGAMMA_MODE__SHIFT) |
2042 (DEGAMMA_BYPASS << DEGAMMA_CONTROL__OVL_DEGAMMA_MODE__SHIFT) |
2043 (DEGAMMA_BYPASS << DEGAMMA_CONTROL__CURSOR_DEGAMMA_MODE__SHIFT)));
2044 WREG32(mmGAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset,
2045 ((GAMUT_REMAP_BYPASS << GAMUT_REMAP_CONTROL__GRPH_GAMUT_REMAP_MODE__SHIFT) |
2046 (GAMUT_REMAP_BYPASS << GAMUT_REMAP_CONTROL__OVL_GAMUT_REMAP_MODE__SHIFT)));
2047 WREG32(mmREGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
2048 ((REGAMMA_BYPASS << REGAMMA_CONTROL__GRPH_REGAMMA_MODE__SHIFT) |
2049 (REGAMMA_BYPASS << REGAMMA_CONTROL__OVL_REGAMMA_MODE__SHIFT)));
2050 WREG32(mmOUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset,
2051 ((OUTPUT_CSC_BYPASS << OUTPUT_CSC_CONTROL__OUTPUT_CSC_GRPH_MODE__SHIFT) |
2052 (OUTPUT_CSC_BYPASS << OUTPUT_CSC_CONTROL__OUTPUT_CSC_OVL_MODE__SHIFT)));
2053
2054 WREG32(0x1a50 + amdgpu_crtc->crtc_offset, 0);
2055
2056
2057
2058 WREG32(mmALPHA_CONTROL + amdgpu_crtc->crtc_offset,
2059 ALPHA_CONTROL__CURSOR_ALPHA_BLND_ENA_MASK);
2060}
2061
2062static int dce_v8_0_pick_dig_encoder(struct drm_encoder *encoder)
2063{
2064 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
2065 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
2066
2067 switch (amdgpu_encoder->encoder_id) {
2068 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
2069 if (dig->linkb)
2070 return 1;
2071 else
2072 return 0;
2073 break;
2074 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
2075 if (dig->linkb)
2076 return 3;
2077 else
2078 return 2;
2079 break;
2080 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
2081 if (dig->linkb)
2082 return 5;
2083 else
2084 return 4;
2085 break;
2086 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
2087 return 6;
2088 break;
2089 default:
2090 DRM_ERROR("invalid encoder_id: 0x%x\n", amdgpu_encoder->encoder_id);
2091 return 0;
2092 }
2093}
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117static u32 dce_v8_0_pick_pll(struct drm_crtc *crtc)
2118{
2119 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2120 struct drm_device *dev = crtc->dev;
2121 struct amdgpu_device *adev = dev->dev_private;
2122 u32 pll_in_use;
2123 int pll;
2124
2125 if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder))) {
2126 if (adev->clock.dp_extclk)
2127
2128 return ATOM_PPLL_INVALID;
2129 else {
2130
2131 pll = amdgpu_pll_get_shared_dp_ppll(crtc);
2132 if (pll != ATOM_PPLL_INVALID)
2133 return pll;
2134 }
2135 } else {
2136
2137 pll = amdgpu_pll_get_shared_nondp_ppll(crtc);
2138 if (pll != ATOM_PPLL_INVALID)
2139 return pll;
2140 }
2141
2142 if ((adev->asic_type == CHIP_KABINI) ||
2143 (adev->asic_type == CHIP_MULLINS)) {
2144
2145 pll_in_use = amdgpu_pll_get_use_mask(crtc);
2146 if (!(pll_in_use & (1 << ATOM_PPLL2)))
2147 return ATOM_PPLL2;
2148 if (!(pll_in_use & (1 << ATOM_PPLL1)))
2149 return ATOM_PPLL1;
2150 DRM_ERROR("unable to allocate a PPLL\n");
2151 return ATOM_PPLL_INVALID;
2152 } else {
2153
2154 pll_in_use = amdgpu_pll_get_use_mask(crtc);
2155 if (!(pll_in_use & (1 << ATOM_PPLL2)))
2156 return ATOM_PPLL2;
2157 if (!(pll_in_use & (1 << ATOM_PPLL1)))
2158 return ATOM_PPLL1;
2159 if (!(pll_in_use & (1 << ATOM_PPLL0)))
2160 return ATOM_PPLL0;
2161 DRM_ERROR("unable to allocate a PPLL\n");
2162 return ATOM_PPLL_INVALID;
2163 }
2164 return ATOM_PPLL_INVALID;
2165}
2166
2167static void dce_v8_0_lock_cursor(struct drm_crtc *crtc, bool lock)
2168{
2169 struct amdgpu_device *adev = crtc->dev->dev_private;
2170 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2171 uint32_t cur_lock;
2172
2173 cur_lock = RREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset);
2174 if (lock)
2175 cur_lock |= CUR_UPDATE__CURSOR_UPDATE_LOCK_MASK;
2176 else
2177 cur_lock &= ~CUR_UPDATE__CURSOR_UPDATE_LOCK_MASK;
2178 WREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset, cur_lock);
2179}
2180
2181static void dce_v8_0_hide_cursor(struct drm_crtc *crtc)
2182{
2183 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2184 struct amdgpu_device *adev = crtc->dev->dev_private;
2185
2186 WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
2187 (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
2188 (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
2189}
2190
2191static void dce_v8_0_show_cursor(struct drm_crtc *crtc)
2192{
2193 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2194 struct amdgpu_device *adev = crtc->dev->dev_private;
2195
2196 WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
2197 upper_32_bits(amdgpu_crtc->cursor_addr));
2198 WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
2199 lower_32_bits(amdgpu_crtc->cursor_addr));
2200
2201 WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
2202 CUR_CONTROL__CURSOR_EN_MASK |
2203 (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
2204 (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
2205}
2206
2207static int dce_v8_0_cursor_move_locked(struct drm_crtc *crtc,
2208 int x, int y)
2209{
2210 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2211 struct amdgpu_device *adev = crtc->dev->dev_private;
2212 int xorigin = 0, yorigin = 0;
2213
2214 amdgpu_crtc->cursor_x = x;
2215 amdgpu_crtc->cursor_y = y;
2216
2217
2218 x += crtc->x;
2219 y += crtc->y;
2220 DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
2221
2222 if (x < 0) {
2223 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
2224 x = 0;
2225 }
2226 if (y < 0) {
2227 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
2228 y = 0;
2229 }
2230
2231 WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
2232 WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
2233 WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
2234 ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
2235
2236 return 0;
2237}
2238
2239static int dce_v8_0_crtc_cursor_move(struct drm_crtc *crtc,
2240 int x, int y)
2241{
2242 int ret;
2243
2244 dce_v8_0_lock_cursor(crtc, true);
2245 ret = dce_v8_0_cursor_move_locked(crtc, x, y);
2246 dce_v8_0_lock_cursor(crtc, false);
2247
2248 return ret;
2249}
2250
2251static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc,
2252 struct drm_file *file_priv,
2253 uint32_t handle,
2254 uint32_t width,
2255 uint32_t height,
2256 int32_t hot_x,
2257 int32_t hot_y)
2258{
2259 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2260 struct drm_gem_object *obj;
2261 struct amdgpu_bo *aobj;
2262 int ret;
2263
2264 if (!handle) {
2265
2266 dce_v8_0_hide_cursor(crtc);
2267 obj = NULL;
2268 goto unpin;
2269 }
2270
2271 if ((width > amdgpu_crtc->max_cursor_width) ||
2272 (height > amdgpu_crtc->max_cursor_height)) {
2273 DRM_ERROR("bad cursor width or height %d x %d\n", width, height);
2274 return -EINVAL;
2275 }
2276
2277 obj = drm_gem_object_lookup(file_priv, handle);
2278 if (!obj) {
2279 DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, amdgpu_crtc->crtc_id);
2280 return -ENOENT;
2281 }
2282
2283 aobj = gem_to_amdgpu_bo(obj);
2284 ret = amdgpu_bo_reserve(aobj, false);
2285 if (ret != 0) {
2286 drm_gem_object_put_unlocked(obj);
2287 return ret;
2288 }
2289
2290 ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
2291 amdgpu_bo_unreserve(aobj);
2292 if (ret) {
2293 DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
2294 drm_gem_object_put_unlocked(obj);
2295 return ret;
2296 }
2297 amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
2298
2299 dce_v8_0_lock_cursor(crtc, true);
2300
2301 if (width != amdgpu_crtc->cursor_width ||
2302 height != amdgpu_crtc->cursor_height ||
2303 hot_x != amdgpu_crtc->cursor_hot_x ||
2304 hot_y != amdgpu_crtc->cursor_hot_y) {
2305 int x, y;
2306
2307 x = amdgpu_crtc->cursor_x + amdgpu_crtc->cursor_hot_x - hot_x;
2308 y = amdgpu_crtc->cursor_y + amdgpu_crtc->cursor_hot_y - hot_y;
2309
2310 dce_v8_0_cursor_move_locked(crtc, x, y);
2311
2312 amdgpu_crtc->cursor_width = width;
2313 amdgpu_crtc->cursor_height = height;
2314 amdgpu_crtc->cursor_hot_x = hot_x;
2315 amdgpu_crtc->cursor_hot_y = hot_y;
2316 }
2317
2318 dce_v8_0_show_cursor(crtc);
2319 dce_v8_0_lock_cursor(crtc, false);
2320
2321unpin:
2322 if (amdgpu_crtc->cursor_bo) {
2323 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
2324 ret = amdgpu_bo_reserve(aobj, true);
2325 if (likely(ret == 0)) {
2326 amdgpu_bo_unpin(aobj);
2327 amdgpu_bo_unreserve(aobj);
2328 }
2329 drm_gem_object_put_unlocked(amdgpu_crtc->cursor_bo);
2330 }
2331
2332 amdgpu_crtc->cursor_bo = obj;
2333 return 0;
2334}
2335
2336static void dce_v8_0_cursor_reset(struct drm_crtc *crtc)
2337{
2338 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2339
2340 if (amdgpu_crtc->cursor_bo) {
2341 dce_v8_0_lock_cursor(crtc, true);
2342
2343 dce_v8_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
2344 amdgpu_crtc->cursor_y);
2345
2346 dce_v8_0_show_cursor(crtc);
2347
2348 dce_v8_0_lock_cursor(crtc, false);
2349 }
2350}
2351
2352static int dce_v8_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
2353 u16 *blue, uint32_t size,
2354 struct drm_modeset_acquire_ctx *ctx)
2355{
2356 dce_v8_0_crtc_load_lut(crtc);
2357
2358 return 0;
2359}
2360
2361static void dce_v8_0_crtc_destroy(struct drm_crtc *crtc)
2362{
2363 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2364
2365 drm_crtc_cleanup(crtc);
2366 kfree(amdgpu_crtc);
2367}
2368
2369static const struct drm_crtc_funcs dce_v8_0_crtc_funcs = {
2370 .cursor_set2 = dce_v8_0_crtc_cursor_set2,
2371 .cursor_move = dce_v8_0_crtc_cursor_move,
2372 .gamma_set = dce_v8_0_crtc_gamma_set,
2373 .set_config = amdgpu_display_crtc_set_config,
2374 .destroy = dce_v8_0_crtc_destroy,
2375 .page_flip_target = amdgpu_display_crtc_page_flip_target,
2376};
2377
2378static void dce_v8_0_crtc_dpms(struct drm_crtc *crtc, int mode)
2379{
2380 struct drm_device *dev = crtc->dev;
2381 struct amdgpu_device *adev = dev->dev_private;
2382 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2383 unsigned type;
2384
2385 switch (mode) {
2386 case DRM_MODE_DPMS_ON:
2387 amdgpu_crtc->enabled = true;
2388 amdgpu_atombios_crtc_enable(crtc, ATOM_ENABLE);
2389 dce_v8_0_vga_enable(crtc, true);
2390 amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
2391 dce_v8_0_vga_enable(crtc, false);
2392
2393 type = amdgpu_display_crtc_idx_to_irq_type(adev,
2394 amdgpu_crtc->crtc_id);
2395 amdgpu_irq_update(adev, &adev->crtc_irq, type);
2396 amdgpu_irq_update(adev, &adev->pageflip_irq, type);
2397 drm_crtc_vblank_on(crtc);
2398 dce_v8_0_crtc_load_lut(crtc);
2399 break;
2400 case DRM_MODE_DPMS_STANDBY:
2401 case DRM_MODE_DPMS_SUSPEND:
2402 case DRM_MODE_DPMS_OFF:
2403 drm_crtc_vblank_off(crtc);
2404 if (amdgpu_crtc->enabled) {
2405 dce_v8_0_vga_enable(crtc, true);
2406 amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE);
2407 dce_v8_0_vga_enable(crtc, false);
2408 }
2409 amdgpu_atombios_crtc_enable(crtc, ATOM_DISABLE);
2410 amdgpu_crtc->enabled = false;
2411 break;
2412 }
2413
2414 amdgpu_pm_compute_clocks(adev);
2415}
2416
2417static void dce_v8_0_crtc_prepare(struct drm_crtc *crtc)
2418{
2419
2420 amdgpu_atombios_crtc_powergate(crtc, ATOM_DISABLE);
2421 amdgpu_atombios_crtc_lock(crtc, ATOM_ENABLE);
2422 dce_v8_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
2423}
2424
2425static void dce_v8_0_crtc_commit(struct drm_crtc *crtc)
2426{
2427 dce_v8_0_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
2428 amdgpu_atombios_crtc_lock(crtc, ATOM_DISABLE);
2429}
2430
2431static void dce_v8_0_crtc_disable(struct drm_crtc *crtc)
2432{
2433 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2434 struct drm_device *dev = crtc->dev;
2435 struct amdgpu_device *adev = dev->dev_private;
2436 struct amdgpu_atom_ss ss;
2437 int i;
2438
2439 dce_v8_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
2440 if (crtc->primary->fb) {
2441 int r;
2442 struct amdgpu_bo *abo;
2443
2444 abo = gem_to_amdgpu_bo(crtc->primary->fb->obj[0]);
2445 r = amdgpu_bo_reserve(abo, true);
2446 if (unlikely(r))
2447 DRM_ERROR("failed to reserve abo before unpin\n");
2448 else {
2449 amdgpu_bo_unpin(abo);
2450 amdgpu_bo_unreserve(abo);
2451 }
2452 }
2453
2454 dce_v8_0_grph_enable(crtc, false);
2455
2456 amdgpu_atombios_crtc_powergate(crtc, ATOM_ENABLE);
2457
2458 for (i = 0; i < adev->mode_info.num_crtc; i++) {
2459 if (adev->mode_info.crtcs[i] &&
2460 adev->mode_info.crtcs[i]->enabled &&
2461 i != amdgpu_crtc->crtc_id &&
2462 amdgpu_crtc->pll_id == adev->mode_info.crtcs[i]->pll_id) {
2463
2464
2465
2466 goto done;
2467 }
2468 }
2469
2470 switch (amdgpu_crtc->pll_id) {
2471 case ATOM_PPLL1:
2472 case ATOM_PPLL2:
2473
2474 amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id,
2475 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
2476 break;
2477 case ATOM_PPLL0:
2478
2479 if ((adev->asic_type == CHIP_KAVERI) ||
2480 (adev->asic_type == CHIP_BONAIRE) ||
2481 (adev->asic_type == CHIP_HAWAII))
2482 amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id,
2483 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
2484 break;
2485 default:
2486 break;
2487 }
2488done:
2489 amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
2490 amdgpu_crtc->adjusted_clock = 0;
2491 amdgpu_crtc->encoder = NULL;
2492 amdgpu_crtc->connector = NULL;
2493}
2494
2495static int dce_v8_0_crtc_mode_set(struct drm_crtc *crtc,
2496 struct drm_display_mode *mode,
2497 struct drm_display_mode *adjusted_mode,
2498 int x, int y, struct drm_framebuffer *old_fb)
2499{
2500 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2501
2502 if (!amdgpu_crtc->adjusted_clock)
2503 return -EINVAL;
2504
2505 amdgpu_atombios_crtc_set_pll(crtc, adjusted_mode);
2506 amdgpu_atombios_crtc_set_dtd_timing(crtc, adjusted_mode);
2507 dce_v8_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
2508 amdgpu_atombios_crtc_overscan_setup(crtc, mode, adjusted_mode);
2509 amdgpu_atombios_crtc_scaler_setup(crtc);
2510 dce_v8_0_cursor_reset(crtc);
2511
2512 amdgpu_crtc->hw_mode = *adjusted_mode;
2513
2514 return 0;
2515}
2516
2517static bool dce_v8_0_crtc_mode_fixup(struct drm_crtc *crtc,
2518 const struct drm_display_mode *mode,
2519 struct drm_display_mode *adjusted_mode)
2520{
2521 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2522 struct drm_device *dev = crtc->dev;
2523 struct drm_encoder *encoder;
2524
2525
2526 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
2527 if (encoder->crtc == crtc) {
2528 amdgpu_crtc->encoder = encoder;
2529 amdgpu_crtc->connector = amdgpu_get_connector_for_encoder(encoder);
2530 break;
2531 }
2532 }
2533 if ((amdgpu_crtc->encoder == NULL) || (amdgpu_crtc->connector == NULL)) {
2534 amdgpu_crtc->encoder = NULL;
2535 amdgpu_crtc->connector = NULL;
2536 return false;
2537 }
2538 if (!amdgpu_display_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
2539 return false;
2540 if (amdgpu_atombios_crtc_prepare_pll(crtc, adjusted_mode))
2541 return false;
2542
2543 amdgpu_crtc->pll_id = dce_v8_0_pick_pll(crtc);
2544
2545 if ((amdgpu_crtc->pll_id == ATOM_PPLL_INVALID) &&
2546 !ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder)))
2547 return false;
2548
2549 return true;
2550}
2551
2552static int dce_v8_0_crtc_set_base(struct drm_crtc *crtc, int x, int y,
2553 struct drm_framebuffer *old_fb)
2554{
2555 return dce_v8_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
2556}
2557
2558static int dce_v8_0_crtc_set_base_atomic(struct drm_crtc *crtc,
2559 struct drm_framebuffer *fb,
2560 int x, int y, enum mode_set_atomic state)
2561{
2562 return dce_v8_0_crtc_do_set_base(crtc, fb, x, y, 1);
2563}
2564
2565static const struct drm_crtc_helper_funcs dce_v8_0_crtc_helper_funcs = {
2566 .dpms = dce_v8_0_crtc_dpms,
2567 .mode_fixup = dce_v8_0_crtc_mode_fixup,
2568 .mode_set = dce_v8_0_crtc_mode_set,
2569 .mode_set_base = dce_v8_0_crtc_set_base,
2570 .mode_set_base_atomic = dce_v8_0_crtc_set_base_atomic,
2571 .prepare = dce_v8_0_crtc_prepare,
2572 .commit = dce_v8_0_crtc_commit,
2573 .disable = dce_v8_0_crtc_disable,
2574};
2575
2576static int dce_v8_0_crtc_init(struct amdgpu_device *adev, int index)
2577{
2578 struct amdgpu_crtc *amdgpu_crtc;
2579
2580 amdgpu_crtc = kzalloc(sizeof(struct amdgpu_crtc) +
2581 (AMDGPUFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
2582 if (amdgpu_crtc == NULL)
2583 return -ENOMEM;
2584
2585 drm_crtc_init(adev->ddev, &amdgpu_crtc->base, &dce_v8_0_crtc_funcs);
2586
2587 drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256);
2588 amdgpu_crtc->crtc_id = index;
2589 adev->mode_info.crtcs[index] = amdgpu_crtc;
2590
2591 amdgpu_crtc->max_cursor_width = CIK_CURSOR_WIDTH;
2592 amdgpu_crtc->max_cursor_height = CIK_CURSOR_HEIGHT;
2593 adev->ddev->mode_config.cursor_width = amdgpu_crtc->max_cursor_width;
2594 adev->ddev->mode_config.cursor_height = amdgpu_crtc->max_cursor_height;
2595
2596 amdgpu_crtc->crtc_offset = crtc_offsets[amdgpu_crtc->crtc_id];
2597
2598 amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
2599 amdgpu_crtc->adjusted_clock = 0;
2600 amdgpu_crtc->encoder = NULL;
2601 amdgpu_crtc->connector = NULL;
2602 drm_crtc_helper_add(&amdgpu_crtc->base, &dce_v8_0_crtc_helper_funcs);
2603
2604 return 0;
2605}
2606
2607static int dce_v8_0_early_init(void *handle)
2608{
2609 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2610
2611 adev->audio_endpt_rreg = &dce_v8_0_audio_endpt_rreg;
2612 adev->audio_endpt_wreg = &dce_v8_0_audio_endpt_wreg;
2613
2614 dce_v8_0_set_display_funcs(adev);
2615
2616 adev->mode_info.num_crtc = dce_v8_0_get_num_crtc(adev);
2617
2618 switch (adev->asic_type) {
2619 case CHIP_BONAIRE:
2620 case CHIP_HAWAII:
2621 adev->mode_info.num_hpd = 6;
2622 adev->mode_info.num_dig = 6;
2623 break;
2624 case CHIP_KAVERI:
2625 adev->mode_info.num_hpd = 6;
2626 adev->mode_info.num_dig = 7;
2627 break;
2628 case CHIP_KABINI:
2629 case CHIP_MULLINS:
2630 adev->mode_info.num_hpd = 6;
2631 adev->mode_info.num_dig = 6;
2632 break;
2633 default:
2634
2635 return -EINVAL;
2636 }
2637
2638 dce_v8_0_set_irq_funcs(adev);
2639
2640 return 0;
2641}
2642
2643static int dce_v8_0_sw_init(void *handle)
2644{
2645 int r, i;
2646 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2647
2648 for (i = 0; i < adev->mode_info.num_crtc; i++) {
2649 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i + 1, &adev->crtc_irq);
2650 if (r)
2651 return r;
2652 }
2653
2654 for (i = 8; i < 20; i += 2) {
2655 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i, &adev->pageflip_irq);
2656 if (r)
2657 return r;
2658 }
2659
2660
2661 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 42, &adev->hpd_irq);
2662 if (r)
2663 return r;
2664
2665 adev->ddev->mode_config.funcs = &amdgpu_mode_funcs;
2666
2667 adev->ddev->mode_config.async_page_flip = true;
2668
2669 adev->ddev->mode_config.max_width = 16384;
2670 adev->ddev->mode_config.max_height = 16384;
2671
2672 adev->ddev->mode_config.preferred_depth = 24;
2673 adev->ddev->mode_config.prefer_shadow = 1;
2674
2675 adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
2676
2677 r = amdgpu_display_modeset_create_props(adev);
2678 if (r)
2679 return r;
2680
2681 adev->ddev->mode_config.max_width = 16384;
2682 adev->ddev->mode_config.max_height = 16384;
2683
2684
2685 for (i = 0; i < adev->mode_info.num_crtc; i++) {
2686 r = dce_v8_0_crtc_init(adev, i);
2687 if (r)
2688 return r;
2689 }
2690
2691 if (amdgpu_atombios_get_connector_info_from_object_table(adev))
2692 amdgpu_display_print_display_setup(adev->ddev);
2693 else
2694 return -EINVAL;
2695
2696
2697 r = dce_v8_0_afmt_init(adev);
2698 if (r)
2699 return r;
2700
2701 r = dce_v8_0_audio_init(adev);
2702 if (r)
2703 return r;
2704
2705 drm_kms_helper_poll_init(adev->ddev);
2706
2707 adev->mode_info.mode_config_initialized = true;
2708 return 0;
2709}
2710
2711static int dce_v8_0_sw_fini(void *handle)
2712{
2713 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2714
2715 kfree(adev->mode_info.bios_hardcoded_edid);
2716
2717 drm_kms_helper_poll_fini(adev->ddev);
2718
2719 dce_v8_0_audio_fini(adev);
2720
2721 dce_v8_0_afmt_fini(adev);
2722
2723 drm_mode_config_cleanup(adev->ddev);
2724 adev->mode_info.mode_config_initialized = false;
2725
2726 return 0;
2727}
2728
2729static int dce_v8_0_hw_init(void *handle)
2730{
2731 int i;
2732 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2733
2734
2735 dce_v8_0_set_vga_render_state(adev, false);
2736
2737 amdgpu_atombios_encoder_init_dig(adev);
2738 amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk);
2739
2740
2741 dce_v8_0_hpd_init(adev);
2742
2743 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
2744 dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
2745 }
2746
2747 dce_v8_0_pageflip_interrupt_init(adev);
2748
2749 return 0;
2750}
2751
2752static int dce_v8_0_hw_fini(void *handle)
2753{
2754 int i;
2755 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2756
2757 dce_v8_0_hpd_fini(adev);
2758
2759 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
2760 dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
2761 }
2762
2763 dce_v8_0_pageflip_interrupt_fini(adev);
2764
2765 return 0;
2766}
2767
2768static int dce_v8_0_suspend(void *handle)
2769{
2770 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2771
2772 adev->mode_info.bl_level =
2773 amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
2774
2775 return dce_v8_0_hw_fini(handle);
2776}
2777
2778static int dce_v8_0_resume(void *handle)
2779{
2780 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2781 int ret;
2782
2783 amdgpu_atombios_encoder_set_backlight_level_to_reg(adev,
2784 adev->mode_info.bl_level);
2785
2786 ret = dce_v8_0_hw_init(handle);
2787
2788
2789 if (adev->mode_info.bl_encoder) {
2790 u8 bl_level = amdgpu_display_backlight_get_level(adev,
2791 adev->mode_info.bl_encoder);
2792 amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder,
2793 bl_level);
2794 }
2795
2796 return ret;
2797}
2798
2799static bool dce_v8_0_is_idle(void *handle)
2800{
2801 return true;
2802}
2803
2804static int dce_v8_0_wait_for_idle(void *handle)
2805{
2806 return 0;
2807}
2808
2809static int dce_v8_0_soft_reset(void *handle)
2810{
2811 u32 srbm_soft_reset = 0, tmp;
2812 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2813
2814 if (dce_v8_0_is_display_hung(adev))
2815 srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK;
2816
2817 if (srbm_soft_reset) {
2818 tmp = RREG32(mmSRBM_SOFT_RESET);
2819 tmp |= srbm_soft_reset;
2820 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
2821 WREG32(mmSRBM_SOFT_RESET, tmp);
2822 tmp = RREG32(mmSRBM_SOFT_RESET);
2823
2824 udelay(50);
2825
2826 tmp &= ~srbm_soft_reset;
2827 WREG32(mmSRBM_SOFT_RESET, tmp);
2828 tmp = RREG32(mmSRBM_SOFT_RESET);
2829
2830
2831 udelay(50);
2832 }
2833 return 0;
2834}
2835
2836static void dce_v8_0_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev,
2837 int crtc,
2838 enum amdgpu_interrupt_state state)
2839{
2840 u32 reg_block, lb_interrupt_mask;
2841
2842 if (crtc >= adev->mode_info.num_crtc) {
2843 DRM_DEBUG("invalid crtc %d\n", crtc);
2844 return;
2845 }
2846
2847 switch (crtc) {
2848 case 0:
2849 reg_block = CRTC0_REGISTER_OFFSET;
2850 break;
2851 case 1:
2852 reg_block = CRTC1_REGISTER_OFFSET;
2853 break;
2854 case 2:
2855 reg_block = CRTC2_REGISTER_OFFSET;
2856 break;
2857 case 3:
2858 reg_block = CRTC3_REGISTER_OFFSET;
2859 break;
2860 case 4:
2861 reg_block = CRTC4_REGISTER_OFFSET;
2862 break;
2863 case 5:
2864 reg_block = CRTC5_REGISTER_OFFSET;
2865 break;
2866 default:
2867 DRM_DEBUG("invalid crtc %d\n", crtc);
2868 return;
2869 }
2870
2871 switch (state) {
2872 case AMDGPU_IRQ_STATE_DISABLE:
2873 lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + reg_block);
2874 lb_interrupt_mask &= ~LB_INTERRUPT_MASK__VBLANK_INTERRUPT_MASK_MASK;
2875 WREG32(mmLB_INTERRUPT_MASK + reg_block, lb_interrupt_mask);
2876 break;
2877 case AMDGPU_IRQ_STATE_ENABLE:
2878 lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + reg_block);
2879 lb_interrupt_mask |= LB_INTERRUPT_MASK__VBLANK_INTERRUPT_MASK_MASK;
2880 WREG32(mmLB_INTERRUPT_MASK + reg_block, lb_interrupt_mask);
2881 break;
2882 default:
2883 break;
2884 }
2885}
2886
2887static void dce_v8_0_set_crtc_vline_interrupt_state(struct amdgpu_device *adev,
2888 int crtc,
2889 enum amdgpu_interrupt_state state)
2890{
2891 u32 reg_block, lb_interrupt_mask;
2892
2893 if (crtc >= adev->mode_info.num_crtc) {
2894 DRM_DEBUG("invalid crtc %d\n", crtc);
2895 return;
2896 }
2897
2898 switch (crtc) {
2899 case 0:
2900 reg_block = CRTC0_REGISTER_OFFSET;
2901 break;
2902 case 1:
2903 reg_block = CRTC1_REGISTER_OFFSET;
2904 break;
2905 case 2:
2906 reg_block = CRTC2_REGISTER_OFFSET;
2907 break;
2908 case 3:
2909 reg_block = CRTC3_REGISTER_OFFSET;
2910 break;
2911 case 4:
2912 reg_block = CRTC4_REGISTER_OFFSET;
2913 break;
2914 case 5:
2915 reg_block = CRTC5_REGISTER_OFFSET;
2916 break;
2917 default:
2918 DRM_DEBUG("invalid crtc %d\n", crtc);
2919 return;
2920 }
2921
2922 switch (state) {
2923 case AMDGPU_IRQ_STATE_DISABLE:
2924 lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + reg_block);
2925 lb_interrupt_mask &= ~LB_INTERRUPT_MASK__VLINE_INTERRUPT_MASK_MASK;
2926 WREG32(mmLB_INTERRUPT_MASK + reg_block, lb_interrupt_mask);
2927 break;
2928 case AMDGPU_IRQ_STATE_ENABLE:
2929 lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + reg_block);
2930 lb_interrupt_mask |= LB_INTERRUPT_MASK__VLINE_INTERRUPT_MASK_MASK;
2931 WREG32(mmLB_INTERRUPT_MASK + reg_block, lb_interrupt_mask);
2932 break;
2933 default:
2934 break;
2935 }
2936}
2937
2938static int dce_v8_0_set_hpd_interrupt_state(struct amdgpu_device *adev,
2939 struct amdgpu_irq_src *src,
2940 unsigned type,
2941 enum amdgpu_interrupt_state state)
2942{
2943 u32 dc_hpd_int_cntl;
2944
2945 if (type >= adev->mode_info.num_hpd) {
2946 DRM_DEBUG("invalid hdp %d\n", type);
2947 return 0;
2948 }
2949
2950 switch (state) {
2951 case AMDGPU_IRQ_STATE_DISABLE:
2952 dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type]);
2953 dc_hpd_int_cntl &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
2954 WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl);
2955 break;
2956 case AMDGPU_IRQ_STATE_ENABLE:
2957 dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type]);
2958 dc_hpd_int_cntl |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
2959 WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl);
2960 break;
2961 default:
2962 break;
2963 }
2964
2965 return 0;
2966}
2967
2968static int dce_v8_0_set_crtc_interrupt_state(struct amdgpu_device *adev,
2969 struct amdgpu_irq_src *src,
2970 unsigned type,
2971 enum amdgpu_interrupt_state state)
2972{
2973 switch (type) {
2974 case AMDGPU_CRTC_IRQ_VBLANK1:
2975 dce_v8_0_set_crtc_vblank_interrupt_state(adev, 0, state);
2976 break;
2977 case AMDGPU_CRTC_IRQ_VBLANK2:
2978 dce_v8_0_set_crtc_vblank_interrupt_state(adev, 1, state);
2979 break;
2980 case AMDGPU_CRTC_IRQ_VBLANK3:
2981 dce_v8_0_set_crtc_vblank_interrupt_state(adev, 2, state);
2982 break;
2983 case AMDGPU_CRTC_IRQ_VBLANK4:
2984 dce_v8_0_set_crtc_vblank_interrupt_state(adev, 3, state);
2985 break;
2986 case AMDGPU_CRTC_IRQ_VBLANK5:
2987 dce_v8_0_set_crtc_vblank_interrupt_state(adev, 4, state);
2988 break;
2989 case AMDGPU_CRTC_IRQ_VBLANK6:
2990 dce_v8_0_set_crtc_vblank_interrupt_state(adev, 5, state);
2991 break;
2992 case AMDGPU_CRTC_IRQ_VLINE1:
2993 dce_v8_0_set_crtc_vline_interrupt_state(adev, 0, state);
2994 break;
2995 case AMDGPU_CRTC_IRQ_VLINE2:
2996 dce_v8_0_set_crtc_vline_interrupt_state(adev, 1, state);
2997 break;
2998 case AMDGPU_CRTC_IRQ_VLINE3:
2999 dce_v8_0_set_crtc_vline_interrupt_state(adev, 2, state);
3000 break;
3001 case AMDGPU_CRTC_IRQ_VLINE4:
3002 dce_v8_0_set_crtc_vline_interrupt_state(adev, 3, state);
3003 break;
3004 case AMDGPU_CRTC_IRQ_VLINE5:
3005 dce_v8_0_set_crtc_vline_interrupt_state(adev, 4, state);
3006 break;
3007 case AMDGPU_CRTC_IRQ_VLINE6:
3008 dce_v8_0_set_crtc_vline_interrupt_state(adev, 5, state);
3009 break;
3010 default:
3011 break;
3012 }
3013 return 0;
3014}
3015
3016static int dce_v8_0_crtc_irq(struct amdgpu_device *adev,
3017 struct amdgpu_irq_src *source,
3018 struct amdgpu_iv_entry *entry)
3019{
3020 unsigned crtc = entry->src_id - 1;
3021 uint32_t disp_int = RREG32(interrupt_status_offsets[crtc].reg);
3022 unsigned int irq_type = amdgpu_display_crtc_idx_to_irq_type(adev,
3023 crtc);
3024
3025 switch (entry->src_data[0]) {
3026 case 0:
3027 if (disp_int & interrupt_status_offsets[crtc].vblank)
3028 WREG32(mmLB_VBLANK_STATUS + crtc_offsets[crtc], LB_VBLANK_STATUS__VBLANK_ACK_MASK);
3029 else
3030 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3031
3032 if (amdgpu_irq_enabled(adev, source, irq_type)) {
3033 drm_handle_vblank(adev->ddev, crtc);
3034 }
3035 DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
3036 break;
3037 case 1:
3038 if (disp_int & interrupt_status_offsets[crtc].vline)
3039 WREG32(mmLB_VLINE_STATUS + crtc_offsets[crtc], LB_VLINE_STATUS__VLINE_ACK_MASK);
3040 else
3041 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3042
3043 DRM_DEBUG("IH: D%d vline\n", crtc + 1);
3044 break;
3045 default:
3046 DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]);
3047 break;
3048 }
3049
3050 return 0;
3051}
3052
3053static int dce_v8_0_set_pageflip_interrupt_state(struct amdgpu_device *adev,
3054 struct amdgpu_irq_src *src,
3055 unsigned type,
3056 enum amdgpu_interrupt_state state)
3057{
3058 u32 reg;
3059
3060 if (type >= adev->mode_info.num_crtc) {
3061 DRM_ERROR("invalid pageflip crtc %d\n", type);
3062 return -EINVAL;
3063 }
3064
3065 reg = RREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type]);
3066 if (state == AMDGPU_IRQ_STATE_DISABLE)
3067 WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
3068 reg & ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
3069 else
3070 WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
3071 reg | GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
3072
3073 return 0;
3074}
3075
3076static int dce_v8_0_pageflip_irq(struct amdgpu_device *adev,
3077 struct amdgpu_irq_src *source,
3078 struct amdgpu_iv_entry *entry)
3079{
3080 unsigned long flags;
3081 unsigned crtc_id;
3082 struct amdgpu_crtc *amdgpu_crtc;
3083 struct amdgpu_flip_work *works;
3084
3085 crtc_id = (entry->src_id - 8) >> 1;
3086 amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
3087
3088 if (crtc_id >= adev->mode_info.num_crtc) {
3089 DRM_ERROR("invalid pageflip crtc %d\n", crtc_id);
3090 return -EINVAL;
3091 }
3092
3093 if (RREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id]) &
3094 GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK)
3095 WREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id],
3096 GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK);
3097
3098
3099 if (amdgpu_crtc == NULL)
3100 return 0;
3101
3102 spin_lock_irqsave(&adev->ddev->event_lock, flags);
3103 works = amdgpu_crtc->pflip_works;
3104 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
3105 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != "
3106 "AMDGPU_FLIP_SUBMITTED(%d)\n",
3107 amdgpu_crtc->pflip_status,
3108 AMDGPU_FLIP_SUBMITTED);
3109 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
3110 return 0;
3111 }
3112
3113
3114 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
3115 amdgpu_crtc->pflip_works = NULL;
3116
3117
3118 if (works->event)
3119 drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event);
3120
3121 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
3122
3123 drm_crtc_vblank_put(&amdgpu_crtc->base);
3124 schedule_work(&works->unpin_work);
3125
3126 return 0;
3127}
3128
3129static int dce_v8_0_hpd_irq(struct amdgpu_device *adev,
3130 struct amdgpu_irq_src *source,
3131 struct amdgpu_iv_entry *entry)
3132{
3133 uint32_t disp_int, mask, tmp;
3134 unsigned hpd;
3135
3136 if (entry->src_data[0] >= adev->mode_info.num_hpd) {
3137 DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]);
3138 return 0;
3139 }
3140
3141 hpd = entry->src_data[0];
3142 disp_int = RREG32(interrupt_status_offsets[hpd].reg);
3143 mask = interrupt_status_offsets[hpd].hpd;
3144
3145 if (disp_int & mask) {
3146 tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
3147 tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK;
3148 WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
3149 schedule_work(&adev->hotplug_work);
3150 DRM_DEBUG("IH: HPD%d\n", hpd + 1);
3151 }
3152
3153 return 0;
3154
3155}
3156
3157static int dce_v8_0_set_clockgating_state(void *handle,
3158 enum amd_clockgating_state state)
3159{
3160 return 0;
3161}
3162
3163static int dce_v8_0_set_powergating_state(void *handle,
3164 enum amd_powergating_state state)
3165{
3166 return 0;
3167}
3168
3169static const struct amd_ip_funcs dce_v8_0_ip_funcs = {
3170 .name = "dce_v8_0",
3171 .early_init = dce_v8_0_early_init,
3172 .late_init = NULL,
3173 .sw_init = dce_v8_0_sw_init,
3174 .sw_fini = dce_v8_0_sw_fini,
3175 .hw_init = dce_v8_0_hw_init,
3176 .hw_fini = dce_v8_0_hw_fini,
3177 .suspend = dce_v8_0_suspend,
3178 .resume = dce_v8_0_resume,
3179 .is_idle = dce_v8_0_is_idle,
3180 .wait_for_idle = dce_v8_0_wait_for_idle,
3181 .soft_reset = dce_v8_0_soft_reset,
3182 .set_clockgating_state = dce_v8_0_set_clockgating_state,
3183 .set_powergating_state = dce_v8_0_set_powergating_state,
3184};
3185
3186static void
3187dce_v8_0_encoder_mode_set(struct drm_encoder *encoder,
3188 struct drm_display_mode *mode,
3189 struct drm_display_mode *adjusted_mode)
3190{
3191 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3192
3193 amdgpu_encoder->pixel_clock = adjusted_mode->clock;
3194
3195
3196 amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
3197
3198
3199 dce_v8_0_set_interleave(encoder->crtc, mode);
3200
3201 if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
3202 dce_v8_0_afmt_enable(encoder, true);
3203 dce_v8_0_afmt_setmode(encoder, adjusted_mode);
3204 }
3205}
3206
3207static void dce_v8_0_encoder_prepare(struct drm_encoder *encoder)
3208{
3209 struct amdgpu_device *adev = encoder->dev->dev_private;
3210 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3211 struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
3212
3213 if ((amdgpu_encoder->active_device &
3214 (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
3215 (amdgpu_encoder_get_dp_bridge_encoder_id(encoder) !=
3216 ENCODER_OBJECT_ID_NONE)) {
3217 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
3218 if (dig) {
3219 dig->dig_encoder = dce_v8_0_pick_dig_encoder(encoder);
3220 if (amdgpu_encoder->active_device & ATOM_DEVICE_DFP_SUPPORT)
3221 dig->afmt = adev->mode_info.afmt[dig->dig_encoder];
3222 }
3223 }
3224
3225 amdgpu_atombios_scratch_regs_lock(adev, true);
3226
3227 if (connector) {
3228 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
3229
3230
3231 if (amdgpu_connector->router.cd_valid)
3232 amdgpu_i2c_router_select_cd_port(amdgpu_connector);
3233
3234
3235 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
3236 amdgpu_atombios_encoder_set_edp_panel_power(connector,
3237 ATOM_TRANSMITTER_ACTION_POWER_ON);
3238 }
3239
3240
3241 amdgpu_atombios_encoder_set_crtc_source(encoder);
3242
3243 dce_v8_0_program_fmt(encoder);
3244}
3245
3246static void dce_v8_0_encoder_commit(struct drm_encoder *encoder)
3247{
3248 struct drm_device *dev = encoder->dev;
3249 struct amdgpu_device *adev = dev->dev_private;
3250
3251
3252 amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
3253 amdgpu_atombios_scratch_regs_lock(adev, false);
3254}
3255
3256static void dce_v8_0_encoder_disable(struct drm_encoder *encoder)
3257{
3258 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3259 struct amdgpu_encoder_atom_dig *dig;
3260
3261 amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
3262
3263 if (amdgpu_atombios_encoder_is_digital(encoder)) {
3264 if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
3265 dce_v8_0_afmt_enable(encoder, false);
3266 dig = amdgpu_encoder->enc_priv;
3267 dig->dig_encoder = -1;
3268 }
3269 amdgpu_encoder->active_device = 0;
3270}
3271
3272
3273static void dce_v8_0_ext_prepare(struct drm_encoder *encoder)
3274{
3275
3276}
3277
3278static void dce_v8_0_ext_commit(struct drm_encoder *encoder)
3279{
3280
3281}
3282
3283static void
3284dce_v8_0_ext_mode_set(struct drm_encoder *encoder,
3285 struct drm_display_mode *mode,
3286 struct drm_display_mode *adjusted_mode)
3287{
3288
3289}
3290
3291static void dce_v8_0_ext_disable(struct drm_encoder *encoder)
3292{
3293
3294}
3295
3296static void
3297dce_v8_0_ext_dpms(struct drm_encoder *encoder, int mode)
3298{
3299
3300}
3301
3302static const struct drm_encoder_helper_funcs dce_v8_0_ext_helper_funcs = {
3303 .dpms = dce_v8_0_ext_dpms,
3304 .prepare = dce_v8_0_ext_prepare,
3305 .mode_set = dce_v8_0_ext_mode_set,
3306 .commit = dce_v8_0_ext_commit,
3307 .disable = dce_v8_0_ext_disable,
3308
3309};
3310
3311static const struct drm_encoder_helper_funcs dce_v8_0_dig_helper_funcs = {
3312 .dpms = amdgpu_atombios_encoder_dpms,
3313 .mode_fixup = amdgpu_atombios_encoder_mode_fixup,
3314 .prepare = dce_v8_0_encoder_prepare,
3315 .mode_set = dce_v8_0_encoder_mode_set,
3316 .commit = dce_v8_0_encoder_commit,
3317 .disable = dce_v8_0_encoder_disable,
3318 .detect = amdgpu_atombios_encoder_dig_detect,
3319};
3320
3321static const struct drm_encoder_helper_funcs dce_v8_0_dac_helper_funcs = {
3322 .dpms = amdgpu_atombios_encoder_dpms,
3323 .mode_fixup = amdgpu_atombios_encoder_mode_fixup,
3324 .prepare = dce_v8_0_encoder_prepare,
3325 .mode_set = dce_v8_0_encoder_mode_set,
3326 .commit = dce_v8_0_encoder_commit,
3327 .detect = amdgpu_atombios_encoder_dac_detect,
3328};
3329
3330static void dce_v8_0_encoder_destroy(struct drm_encoder *encoder)
3331{
3332 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3333 if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
3334 amdgpu_atombios_encoder_fini_backlight(amdgpu_encoder);
3335 kfree(amdgpu_encoder->enc_priv);
3336 drm_encoder_cleanup(encoder);
3337 kfree(amdgpu_encoder);
3338}
3339
3340static const struct drm_encoder_funcs dce_v8_0_encoder_funcs = {
3341 .destroy = dce_v8_0_encoder_destroy,
3342};
3343
3344static void dce_v8_0_encoder_add(struct amdgpu_device *adev,
3345 uint32_t encoder_enum,
3346 uint32_t supported_device,
3347 u16 caps)
3348{
3349 struct drm_device *dev = adev->ddev;
3350 struct drm_encoder *encoder;
3351 struct amdgpu_encoder *amdgpu_encoder;
3352
3353
3354 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
3355 amdgpu_encoder = to_amdgpu_encoder(encoder);
3356 if (amdgpu_encoder->encoder_enum == encoder_enum) {
3357 amdgpu_encoder->devices |= supported_device;
3358 return;
3359 }
3360
3361 }
3362
3363
3364 amdgpu_encoder = kzalloc(sizeof(struct amdgpu_encoder), GFP_KERNEL);
3365 if (!amdgpu_encoder)
3366 return;
3367
3368 encoder = &amdgpu_encoder->base;
3369 switch (adev->mode_info.num_crtc) {
3370 case 1:
3371 encoder->possible_crtcs = 0x1;
3372 break;
3373 case 2:
3374 default:
3375 encoder->possible_crtcs = 0x3;
3376 break;
3377 case 4:
3378 encoder->possible_crtcs = 0xf;
3379 break;
3380 case 6:
3381 encoder->possible_crtcs = 0x3f;
3382 break;
3383 }
3384
3385 amdgpu_encoder->enc_priv = NULL;
3386
3387 amdgpu_encoder->encoder_enum = encoder_enum;
3388 amdgpu_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
3389 amdgpu_encoder->devices = supported_device;
3390 amdgpu_encoder->rmx_type = RMX_OFF;
3391 amdgpu_encoder->underscan_type = UNDERSCAN_OFF;
3392 amdgpu_encoder->is_ext_encoder = false;
3393 amdgpu_encoder->caps = caps;
3394
3395 switch (amdgpu_encoder->encoder_id) {
3396 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
3397 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
3398 drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3399 DRM_MODE_ENCODER_DAC, NULL);
3400 drm_encoder_helper_add(encoder, &dce_v8_0_dac_helper_funcs);
3401 break;
3402 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
3403 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
3404 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
3405 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
3406 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
3407 if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
3408 amdgpu_encoder->rmx_type = RMX_FULL;
3409 drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3410 DRM_MODE_ENCODER_LVDS, NULL);
3411 amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_lcd_info(amdgpu_encoder);
3412 } else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) {
3413 drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3414 DRM_MODE_ENCODER_DAC, NULL);
3415 amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
3416 } else {
3417 drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3418 DRM_MODE_ENCODER_TMDS, NULL);
3419 amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
3420 }
3421 drm_encoder_helper_add(encoder, &dce_v8_0_dig_helper_funcs);
3422 break;
3423 case ENCODER_OBJECT_ID_SI170B:
3424 case ENCODER_OBJECT_ID_CH7303:
3425 case ENCODER_OBJECT_ID_EXTERNAL_SDVOA:
3426 case ENCODER_OBJECT_ID_EXTERNAL_SDVOB:
3427 case ENCODER_OBJECT_ID_TITFP513:
3428 case ENCODER_OBJECT_ID_VT1623:
3429 case ENCODER_OBJECT_ID_HDMI_SI1930:
3430 case ENCODER_OBJECT_ID_TRAVIS:
3431 case ENCODER_OBJECT_ID_NUTMEG:
3432
3433 amdgpu_encoder->is_ext_encoder = true;
3434 if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
3435 drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3436 DRM_MODE_ENCODER_LVDS, NULL);
3437 else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT))
3438 drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3439 DRM_MODE_ENCODER_DAC, NULL);
3440 else
3441 drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3442 DRM_MODE_ENCODER_TMDS, NULL);
3443 drm_encoder_helper_add(encoder, &dce_v8_0_ext_helper_funcs);
3444 break;
3445 }
3446}
3447
3448static const struct amdgpu_display_funcs dce_v8_0_display_funcs = {
3449 .bandwidth_update = &dce_v8_0_bandwidth_update,
3450 .vblank_get_counter = &dce_v8_0_vblank_get_counter,
3451 .backlight_set_level = &amdgpu_atombios_encoder_set_backlight_level,
3452 .backlight_get_level = &amdgpu_atombios_encoder_get_backlight_level,
3453 .hpd_sense = &dce_v8_0_hpd_sense,
3454 .hpd_set_polarity = &dce_v8_0_hpd_set_polarity,
3455 .hpd_get_gpio_reg = &dce_v8_0_hpd_get_gpio_reg,
3456 .page_flip = &dce_v8_0_page_flip,
3457 .page_flip_get_scanoutpos = &dce_v8_0_crtc_get_scanoutpos,
3458 .add_encoder = &dce_v8_0_encoder_add,
3459 .add_connector = &amdgpu_connector_add,
3460};
3461
3462static void dce_v8_0_set_display_funcs(struct amdgpu_device *adev)
3463{
3464 adev->mode_info.funcs = &dce_v8_0_display_funcs;
3465}
3466
3467static const struct amdgpu_irq_src_funcs dce_v8_0_crtc_irq_funcs = {
3468 .set = dce_v8_0_set_crtc_interrupt_state,
3469 .process = dce_v8_0_crtc_irq,
3470};
3471
3472static const struct amdgpu_irq_src_funcs dce_v8_0_pageflip_irq_funcs = {
3473 .set = dce_v8_0_set_pageflip_interrupt_state,
3474 .process = dce_v8_0_pageflip_irq,
3475};
3476
3477static const struct amdgpu_irq_src_funcs dce_v8_0_hpd_irq_funcs = {
3478 .set = dce_v8_0_set_hpd_interrupt_state,
3479 .process = dce_v8_0_hpd_irq,
3480};
3481
3482static void dce_v8_0_set_irq_funcs(struct amdgpu_device *adev)
3483{
3484 if (adev->mode_info.num_crtc > 0)
3485 adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_VLINE1 + adev->mode_info.num_crtc;
3486 else
3487 adev->crtc_irq.num_types = 0;
3488 adev->crtc_irq.funcs = &dce_v8_0_crtc_irq_funcs;
3489
3490 adev->pageflip_irq.num_types = adev->mode_info.num_crtc;
3491 adev->pageflip_irq.funcs = &dce_v8_0_pageflip_irq_funcs;
3492
3493 adev->hpd_irq.num_types = adev->mode_info.num_hpd;
3494 adev->hpd_irq.funcs = &dce_v8_0_hpd_irq_funcs;
3495}
3496
3497const struct amdgpu_ip_block_version dce_v8_0_ip_block =
3498{
3499 .type = AMD_IP_BLOCK_TYPE_DCE,
3500 .major = 8,
3501 .minor = 0,
3502 .rev = 0,
3503 .funcs = &dce_v8_0_ip_funcs,
3504};
3505
3506const struct amdgpu_ip_block_version dce_v8_1_ip_block =
3507{
3508 .type = AMD_IP_BLOCK_TYPE_DCE,
3509 .major = 8,
3510 .minor = 1,
3511 .rev = 0,
3512 .funcs = &dce_v8_0_ip_funcs,
3513};
3514
3515const struct amdgpu_ip_block_version dce_v8_2_ip_block =
3516{
3517 .type = AMD_IP_BLOCK_TYPE_DCE,
3518 .major = 8,
3519 .minor = 2,
3520 .rev = 0,
3521 .funcs = &dce_v8_0_ip_funcs,
3522};
3523
3524const struct amdgpu_ip_block_version dce_v8_3_ip_block =
3525{
3526 .type = AMD_IP_BLOCK_TYPE_DCE,
3527 .major = 8,
3528 .minor = 3,
3529 .rev = 0,
3530 .funcs = &dce_v8_0_ip_funcs,
3531};
3532
3533const struct amdgpu_ip_block_version dce_v8_5_ip_block =
3534{
3535 .type = AMD_IP_BLOCK_TYPE_DCE,
3536 .major = 8,
3537 .minor = 5,
3538 .rev = 0,
3539 .funcs = &dce_v8_0_ip_funcs,
3540};
3541