1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#include "drmP.h"
24#include "amdgpu.h"
25#include "amdgpu_pm.h"
26#include "amdgpu_i2c.h"
27#include "atom.h"
28#include "amdgpu_atombios.h"
29#include "atombios_crtc.h"
30#include "atombios_encoders.h"
31#include "amdgpu_pll.h"
32#include "amdgpu_connectors.h"
33
34#include "bif/bif_3_0_d.h"
35#include "bif/bif_3_0_sh_mask.h"
36#include "oss/oss_1_0_d.h"
37#include "oss/oss_1_0_sh_mask.h"
38#include "gca/gfx_6_0_d.h"
39#include "gca/gfx_6_0_sh_mask.h"
40#include "gmc/gmc_6_0_d.h"
41#include "gmc/gmc_6_0_sh_mask.h"
42#include "dce/dce_6_0_d.h"
43#include "dce/dce_6_0_sh_mask.h"
44#include "gca/gfx_7_2_enum.h"
45#include "si_enums.h"
46
47static void dce_v6_0_set_display_funcs(struct amdgpu_device *adev);
48static void dce_v6_0_set_irq_funcs(struct amdgpu_device *adev);
49
50static const u32 crtc_offsets[6] =
51{
52 SI_CRTC0_REGISTER_OFFSET,
53 SI_CRTC1_REGISTER_OFFSET,
54 SI_CRTC2_REGISTER_OFFSET,
55 SI_CRTC3_REGISTER_OFFSET,
56 SI_CRTC4_REGISTER_OFFSET,
57 SI_CRTC5_REGISTER_OFFSET
58};
59
60static const u32 hpd_offsets[] =
61{
62 mmDC_HPD1_INT_STATUS - mmDC_HPD1_INT_STATUS,
63 mmDC_HPD2_INT_STATUS - mmDC_HPD1_INT_STATUS,
64 mmDC_HPD3_INT_STATUS - mmDC_HPD1_INT_STATUS,
65 mmDC_HPD4_INT_STATUS - mmDC_HPD1_INT_STATUS,
66 mmDC_HPD5_INT_STATUS - mmDC_HPD1_INT_STATUS,
67 mmDC_HPD6_INT_STATUS - mmDC_HPD1_INT_STATUS,
68};
69
70static const uint32_t dig_offsets[] = {
71 SI_CRTC0_REGISTER_OFFSET,
72 SI_CRTC1_REGISTER_OFFSET,
73 SI_CRTC2_REGISTER_OFFSET,
74 SI_CRTC3_REGISTER_OFFSET,
75 SI_CRTC4_REGISTER_OFFSET,
76 SI_CRTC5_REGISTER_OFFSET,
77 (0x13830 - 0x7030) >> 2,
78};
79
80static const struct {
81 uint32_t reg;
82 uint32_t vblank;
83 uint32_t vline;
84 uint32_t hpd;
85
86} interrupt_status_offsets[6] = { {
87 .reg = mmDISP_INTERRUPT_STATUS,
88 .vblank = DISP_INTERRUPT_STATUS__LB_D1_VBLANK_INTERRUPT_MASK,
89 .vline = DISP_INTERRUPT_STATUS__LB_D1_VLINE_INTERRUPT_MASK,
90 .hpd = DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT_MASK
91}, {
92 .reg = mmDISP_INTERRUPT_STATUS_CONTINUE,
93 .vblank = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VBLANK_INTERRUPT_MASK,
94 .vline = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VLINE_INTERRUPT_MASK,
95 .hpd = DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_INTERRUPT_MASK
96}, {
97 .reg = mmDISP_INTERRUPT_STATUS_CONTINUE2,
98 .vblank = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VBLANK_INTERRUPT_MASK,
99 .vline = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VLINE_INTERRUPT_MASK,
100 .hpd = DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_INTERRUPT_MASK
101}, {
102 .reg = mmDISP_INTERRUPT_STATUS_CONTINUE3,
103 .vblank = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VBLANK_INTERRUPT_MASK,
104 .vline = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VLINE_INTERRUPT_MASK,
105 .hpd = DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_INTERRUPT_MASK
106}, {
107 .reg = mmDISP_INTERRUPT_STATUS_CONTINUE4,
108 .vblank = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VBLANK_INTERRUPT_MASK,
109 .vline = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VLINE_INTERRUPT_MASK,
110 .hpd = DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_INTERRUPT_MASK
111}, {
112 .reg = mmDISP_INTERRUPT_STATUS_CONTINUE5,
113 .vblank = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VBLANK_INTERRUPT_MASK,
114 .vline = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VLINE_INTERRUPT_MASK,
115 .hpd = DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK
116} };
117
118static u32 dce_v6_0_audio_endpt_rreg(struct amdgpu_device *adev,
119 u32 block_offset, u32 reg)
120{
121 DRM_INFO("xxxx: dce_v6_0_audio_endpt_rreg ----no impl!!!!\n");
122 return 0;
123}
124
125static void dce_v6_0_audio_endpt_wreg(struct amdgpu_device *adev,
126 u32 block_offset, u32 reg, u32 v)
127{
128 DRM_INFO("xxxx: dce_v6_0_audio_endpt_wreg ----no impl!!!!\n");
129}
130
131static bool dce_v6_0_is_in_vblank(struct amdgpu_device *adev, int crtc)
132{
133 if (RREG32(mmCRTC_STATUS + crtc_offsets[crtc]) & CRTC_STATUS__CRTC_V_BLANK_MASK)
134 return true;
135 else
136 return false;
137}
138
139static bool dce_v6_0_is_counter_moving(struct amdgpu_device *adev, int crtc)
140{
141 u32 pos1, pos2;
142
143 pos1 = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
144 pos2 = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
145
146 if (pos1 != pos2)
147 return true;
148 else
149 return false;
150}
151
152
153
154
155
156
157
158
159static void dce_v6_0_vblank_wait(struct amdgpu_device *adev, int crtc)
160{
161 unsigned i = 100;
162
163 if (crtc >= adev->mode_info.num_crtc)
164 return;
165
166 if (!(RREG32(mmCRTC_CONTROL + crtc_offsets[crtc]) & CRTC_CONTROL__CRTC_MASTER_EN_MASK))
167 return;
168
169
170
171
172 while (dce_v6_0_is_in_vblank(adev, crtc)) {
173 if (i++ == 100) {
174 i = 0;
175 if (!dce_v6_0_is_counter_moving(adev, crtc))
176 break;
177 }
178 }
179
180 while (!dce_v6_0_is_in_vblank(adev, crtc)) {
181 if (i++ == 100) {
182 i = 0;
183 if (!dce_v6_0_is_counter_moving(adev, crtc))
184 break;
185 }
186 }
187}
188
189static u32 dce_v6_0_vblank_get_counter(struct amdgpu_device *adev, int crtc)
190{
191 if (crtc >= adev->mode_info.num_crtc)
192 return 0;
193 else
194 return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
195}
196
197static void dce_v6_0_pageflip_interrupt_init(struct amdgpu_device *adev)
198{
199 unsigned i;
200
201
202 for (i = 0; i < adev->mode_info.num_crtc; i++)
203 amdgpu_irq_get(adev, &adev->pageflip_irq, i);
204}
205
206static void dce_v6_0_pageflip_interrupt_fini(struct amdgpu_device *adev)
207{
208 unsigned i;
209
210
211 for (i = 0; i < adev->mode_info.num_crtc; i++)
212 amdgpu_irq_put(adev, &adev->pageflip_irq, i);
213}
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228static void dce_v6_0_page_flip(struct amdgpu_device *adev,
229 int crtc_id, u64 crtc_base, bool async)
230{
231 struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
232
233
234 WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, async ?
235 GRPH_FLIP_CONTROL__GRPH_SURFACE_UPDATE_H_RETRACE_EN_MASK : 0);
236
237 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
238 upper_32_bits(crtc_base));
239 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
240 (u32)crtc_base);
241
242
243 RREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset);
244}
245
246static int dce_v6_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
247 u32 *vbl, u32 *position)
248{
249 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
250 return -EINVAL;
251 *vbl = RREG32(mmCRTC_V_BLANK_START_END + crtc_offsets[crtc]);
252 *position = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
253
254 return 0;
255
256}
257
258
259
260
261
262
263
264
265
266
267static bool dce_v6_0_hpd_sense(struct amdgpu_device *adev,
268 enum amdgpu_hpd_id hpd)
269{
270 bool connected = false;
271
272 if (hpd >= adev->mode_info.num_hpd)
273 return connected;
274
275 if (RREG32(mmDC_HPD1_INT_STATUS + hpd_offsets[hpd]) & DC_HPD1_INT_STATUS__DC_HPD1_SENSE_MASK)
276 connected = true;
277
278 return connected;
279}
280
281
282
283
284
285
286
287
288
289static void dce_v6_0_hpd_set_polarity(struct amdgpu_device *adev,
290 enum amdgpu_hpd_id hpd)
291{
292 u32 tmp;
293 bool connected = dce_v6_0_hpd_sense(adev, hpd);
294
295 if (hpd >= adev->mode_info.num_hpd)
296 return;
297
298 tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
299 if (connected)
300 tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK;
301 else
302 tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK;
303 WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
304}
305
306
307
308
309
310
311
312
313
314static void dce_v6_0_hpd_init(struct amdgpu_device *adev)
315{
316 struct drm_device *dev = adev->ddev;
317 struct drm_connector *connector;
318 u32 tmp;
319
320 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
321 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
322
323 if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
324 continue;
325
326 tmp = RREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
327 tmp |= DC_HPD1_CONTROL__DC_HPD1_EN_MASK;
328 WREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
329
330 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
331 connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
332
333
334
335
336
337 tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
338 tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
339 WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
340 continue;
341 }
342
343 dce_v6_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
344 amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
345 }
346
347}
348
349
350
351
352
353
354
355
356
357static void dce_v6_0_hpd_fini(struct amdgpu_device *adev)
358{
359 struct drm_device *dev = adev->ddev;
360 struct drm_connector *connector;
361 u32 tmp;
362
363 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
364 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
365
366 if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
367 continue;
368
369 tmp = RREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
370 tmp &= ~DC_HPD1_CONTROL__DC_HPD1_EN_MASK;
371 WREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], 0);
372
373 amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
374 }
375}
376
377static u32 dce_v6_0_hpd_get_gpio_reg(struct amdgpu_device *adev)
378{
379 return mmDC_GPIO_HPD_A;
380}
381
382static u32 evergreen_get_vblank_counter(struct amdgpu_device* adev, int crtc)
383{
384 if (crtc >= adev->mode_info.num_crtc)
385 return 0;
386 else
387 return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
388}
389
390static void dce_v6_0_stop_mc_access(struct amdgpu_device *adev,
391 struct amdgpu_mode_mc_save *save)
392{
393 u32 crtc_enabled, tmp, frame_count;
394 int i, j;
395
396 save->vga_render_control = RREG32(mmVGA_RENDER_CONTROL);
397 save->vga_hdp_control = RREG32(mmVGA_HDP_CONTROL);
398
399
400 WREG32(mmVGA_RENDER_CONTROL, 0);
401
402
403 for (i = 0; i < adev->mode_info.num_crtc; i++) {
404 crtc_enabled = RREG32(mmCRTC_CONTROL + crtc_offsets[i]) & CRTC_CONTROL__CRTC_MASTER_EN_MASK;
405 if (crtc_enabled) {
406 save->crtc_enabled[i] = true;
407 tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]);
408
409 if (!(tmp & CRTC_BLANK_CONTROL__CRTC_BLANK_DATA_EN_MASK)) {
410 dce_v6_0_vblank_wait(adev, i);
411 WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
412 tmp |= CRTC_BLANK_CONTROL__CRTC_BLANK_DATA_EN_MASK;
413 WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
414 WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
415 }
416
417 frame_count = evergreen_get_vblank_counter(adev, i);
418 for (j = 0; j < adev->usec_timeout; j++) {
419 if (evergreen_get_vblank_counter(adev, i) != frame_count)
420 break;
421 udelay(1);
422 }
423
424
425 WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
426 tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
427 tmp &= ~CRTC_CONTROL__CRTC_MASTER_EN_MASK;
428 WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp);
429 WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
430 save->crtc_enabled[i] = false;
431
432 } else {
433 save->crtc_enabled[i] = false;
434 }
435 }
436}
437
438static void dce_v6_0_resume_mc_access(struct amdgpu_device *adev,
439 struct amdgpu_mode_mc_save *save)
440{
441 u32 tmp;
442 int i, j;
443
444
445 for (i = 0; i < adev->mode_info.num_crtc; i++) {
446 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
447 upper_32_bits(adev->mc.vram_start));
448 WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
449 upper_32_bits(adev->mc.vram_start));
450 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i],
451 (u32)adev->mc.vram_start);
452 WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i],
453 (u32)adev->mc.vram_start);
454 }
455
456 WREG32(mmVGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(adev->mc.vram_start));
457 WREG32(mmVGA_MEMORY_BASE_ADDRESS, (u32)adev->mc.vram_start);
458
459
460 for (i = 0; i < adev->mode_info.num_crtc; i++) {
461 if (save->crtc_enabled[i]) {
462 tmp = RREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i]);
463 if ((tmp & 0x7) != 0) {
464 tmp &= ~0x7;
465 WREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i], tmp);
466 }
467 tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]);
468 if (tmp & GRPH_UPDATE__GRPH_UPDATE_LOCK_MASK) {
469 tmp &= ~GRPH_UPDATE__GRPH_UPDATE_LOCK_MASK;
470 WREG32(mmGRPH_UPDATE + crtc_offsets[i], tmp);
471 }
472 tmp = RREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i]);
473 if (tmp & 1) {
474 tmp &= ~1;
475 WREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
476 }
477 for (j = 0; j < adev->usec_timeout; j++) {
478 tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]);
479 if ((tmp & GRPH_UPDATE__GRPH_SURFACE_UPDATE_PENDING_MASK) == 0)
480 break;
481 udelay(1);
482 }
483 }
484 }
485
486
487 WREG32(mmVGA_HDP_CONTROL, save->vga_hdp_control);
488 mdelay(1);
489 WREG32(mmVGA_RENDER_CONTROL, save->vga_render_control);
490
491}
492
493static void dce_v6_0_set_vga_render_state(struct amdgpu_device *adev,
494 bool render)
495{
496 if (!render)
497 WREG32(mmVGA_RENDER_CONTROL,
498 RREG32(mmVGA_RENDER_CONTROL) & VGA_VSTATUS_CNTL);
499
500}
501
502static int dce_v6_0_get_num_crtc(struct amdgpu_device *adev)
503{
504 int num_crtc = 0;
505
506 switch (adev->asic_type) {
507 case CHIP_TAHITI:
508 case CHIP_PITCAIRN:
509 case CHIP_VERDE:
510 num_crtc = 6;
511 break;
512 case CHIP_OLAND:
513 num_crtc = 2;
514 break;
515 default:
516 num_crtc = 0;
517 }
518 return num_crtc;
519}
520
521void dce_v6_0_disable_dce(struct amdgpu_device *adev)
522{
523
524 if (amdgpu_atombios_has_dce_engine_info(adev)) {
525 u32 tmp;
526 int crtc_enabled, i;
527
528 dce_v6_0_set_vga_render_state(adev, false);
529
530
531 for (i = 0; i < dce_v6_0_get_num_crtc(adev); i++) {
532 crtc_enabled = RREG32(mmCRTC_CONTROL + crtc_offsets[i]) &
533 CRTC_CONTROL__CRTC_MASTER_EN_MASK;
534 if (crtc_enabled) {
535 WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
536 tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
537 tmp &= ~CRTC_CONTROL__CRTC_MASTER_EN_MASK;
538 WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp);
539 WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
540 }
541 }
542 }
543}
544
545static void dce_v6_0_program_fmt(struct drm_encoder *encoder)
546{
547
548 struct drm_device *dev = encoder->dev;
549 struct amdgpu_device *adev = dev->dev_private;
550 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
551 struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
552 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
553 int bpc = 0;
554 u32 tmp = 0;
555 enum amdgpu_connector_dither dither = AMDGPU_FMT_DITHER_DISABLE;
556
557 if (connector) {
558 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
559 bpc = amdgpu_connector_get_monitor_bpc(connector);
560 dither = amdgpu_connector->dither;
561 }
562
563
564 if (amdgpu_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
565 return;
566
567 if (bpc == 0)
568 return;
569
570
571 switch (bpc) {
572 case 6:
573 if (dither == AMDGPU_FMT_DITHER_ENABLE)
574
575 tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK |
576 FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK |
577 FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK);
578 else
579 tmp |= FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK;
580 break;
581 case 8:
582 if (dither == AMDGPU_FMT_DITHER_ENABLE)
583
584 tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK |
585 FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK |
586 FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE_MASK |
587 FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK |
588 FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH_MASK);
589 else
590 tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK |
591 FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH_MASK);
592 break;
593 case 10:
594 default:
595
596 break;
597 }
598
599 WREG32(mmFMT_BIT_DEPTH_CONTROL + amdgpu_crtc->crtc_offset, tmp);
600}
601
602
603
604
605
606
607
608
609
610
611static u32 si_get_number_of_dram_channels(struct amdgpu_device *adev)
612{
613 u32 tmp = RREG32(mmMC_SHARED_CHMAP);
614
615 switch ((tmp & MC_SHARED_CHMAP__NOOFCHAN_MASK) >> MC_SHARED_CHMAP__NOOFCHAN__SHIFT) {
616 case 0:
617 default:
618 return 1;
619 case 1:
620 return 2;
621 case 2:
622 return 4;
623 case 3:
624 return 8;
625 case 4:
626 return 3;
627 case 5:
628 return 6;
629 case 6:
630 return 10;
631 case 7:
632 return 12;
633 case 8:
634 return 16;
635 }
636}
637
638struct dce6_wm_params {
639 u32 dram_channels;
640 u32 yclk;
641 u32 sclk;
642 u32 disp_clk;
643 u32 src_width;
644 u32 active_time;
645 u32 blank_time;
646 bool interlaced;
647 fixed20_12 vsc;
648 u32 num_heads;
649 u32 bytes_per_pixel;
650 u32 lb_size;
651 u32 vtaps;
652};
653
654
655
656
657
658
659
660
661
662
663static u32 dce_v6_0_dram_bandwidth(struct dce6_wm_params *wm)
664{
665
666 fixed20_12 dram_efficiency;
667 fixed20_12 yclk, dram_channels, bandwidth;
668 fixed20_12 a;
669
670 a.full = dfixed_const(1000);
671 yclk.full = dfixed_const(wm->yclk);
672 yclk.full = dfixed_div(yclk, a);
673 dram_channels.full = dfixed_const(wm->dram_channels * 4);
674 a.full = dfixed_const(10);
675 dram_efficiency.full = dfixed_const(7);
676 dram_efficiency.full = dfixed_div(dram_efficiency, a);
677 bandwidth.full = dfixed_mul(dram_channels, yclk);
678 bandwidth.full = dfixed_mul(bandwidth, dram_efficiency);
679
680 return dfixed_trunc(bandwidth);
681}
682
683
684
685
686
687
688
689
690
691
692static u32 dce_v6_0_dram_bandwidth_for_display(struct dce6_wm_params *wm)
693{
694
695 fixed20_12 disp_dram_allocation;
696 fixed20_12 yclk, dram_channels, bandwidth;
697 fixed20_12 a;
698
699 a.full = dfixed_const(1000);
700 yclk.full = dfixed_const(wm->yclk);
701 yclk.full = dfixed_div(yclk, a);
702 dram_channels.full = dfixed_const(wm->dram_channels * 4);
703 a.full = dfixed_const(10);
704 disp_dram_allocation.full = dfixed_const(3);
705 disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a);
706 bandwidth.full = dfixed_mul(dram_channels, yclk);
707 bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation);
708
709 return dfixed_trunc(bandwidth);
710}
711
712
713
714
715
716
717
718
719
720
721static u32 dce_v6_0_data_return_bandwidth(struct dce6_wm_params *wm)
722{
723
724 fixed20_12 return_efficiency;
725 fixed20_12 sclk, bandwidth;
726 fixed20_12 a;
727
728 a.full = dfixed_const(1000);
729 sclk.full = dfixed_const(wm->sclk);
730 sclk.full = dfixed_div(sclk, a);
731 a.full = dfixed_const(10);
732 return_efficiency.full = dfixed_const(8);
733 return_efficiency.full = dfixed_div(return_efficiency, a);
734 a.full = dfixed_const(32);
735 bandwidth.full = dfixed_mul(a, sclk);
736 bandwidth.full = dfixed_mul(bandwidth, return_efficiency);
737
738 return dfixed_trunc(bandwidth);
739}
740
741
742
743
744
745
746
747
748
749
750static u32 dce_v6_0_dmif_request_bandwidth(struct dce6_wm_params *wm)
751{
752
753 fixed20_12 disp_clk_request_efficiency;
754 fixed20_12 disp_clk, bandwidth;
755 fixed20_12 a, b;
756
757 a.full = dfixed_const(1000);
758 disp_clk.full = dfixed_const(wm->disp_clk);
759 disp_clk.full = dfixed_div(disp_clk, a);
760 a.full = dfixed_const(32);
761 b.full = dfixed_mul(a, disp_clk);
762
763 a.full = dfixed_const(10);
764 disp_clk_request_efficiency.full = dfixed_const(8);
765 disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a);
766
767 bandwidth.full = dfixed_mul(b, disp_clk_request_efficiency);
768
769 return dfixed_trunc(bandwidth);
770}
771
772
773
774
775
776
777
778
779
780
781static u32 dce_v6_0_available_bandwidth(struct dce6_wm_params *wm)
782{
783
784 u32 dram_bandwidth = dce_v6_0_dram_bandwidth(wm);
785 u32 data_return_bandwidth = dce_v6_0_data_return_bandwidth(wm);
786 u32 dmif_req_bandwidth = dce_v6_0_dmif_request_bandwidth(wm);
787
788 return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth));
789}
790
791
792
793
794
795
796
797
798
799
800static u32 dce_v6_0_average_bandwidth(struct dce6_wm_params *wm)
801{
802
803
804
805
806 fixed20_12 bpp;
807 fixed20_12 line_time;
808 fixed20_12 src_width;
809 fixed20_12 bandwidth;
810 fixed20_12 a;
811
812 a.full = dfixed_const(1000);
813 line_time.full = dfixed_const(wm->active_time + wm->blank_time);
814 line_time.full = dfixed_div(line_time, a);
815 bpp.full = dfixed_const(wm->bytes_per_pixel);
816 src_width.full = dfixed_const(wm->src_width);
817 bandwidth.full = dfixed_mul(src_width, bpp);
818 bandwidth.full = dfixed_mul(bandwidth, wm->vsc);
819 bandwidth.full = dfixed_div(bandwidth, line_time);
820
821 return dfixed_trunc(bandwidth);
822}
823
824
825
826
827
828
829
830
831
832
833static u32 dce_v6_0_latency_watermark(struct dce6_wm_params *wm)
834{
835
836 u32 mc_latency = 2000;
837 u32 available_bandwidth = dce_v6_0_available_bandwidth(wm);
838 u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth;
839 u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth;
840 u32 dc_latency = 40000000 / wm->disp_clk;
841 u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) +
842 (wm->num_heads * cursor_line_pair_return_time);
843 u32 latency = mc_latency + other_heads_data_return_time + dc_latency;
844 u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time;
845 u32 tmp, dmif_size = 12288;
846 fixed20_12 a, b, c;
847
848 if (wm->num_heads == 0)
849 return 0;
850
851 a.full = dfixed_const(2);
852 b.full = dfixed_const(1);
853 if ((wm->vsc.full > a.full) ||
854 ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) ||
855 (wm->vtaps >= 5) ||
856 ((wm->vsc.full >= a.full) && wm->interlaced))
857 max_src_lines_per_dst_line = 4;
858 else
859 max_src_lines_per_dst_line = 2;
860
861 a.full = dfixed_const(available_bandwidth);
862 b.full = dfixed_const(wm->num_heads);
863 a.full = dfixed_div(a, b);
864
865 b.full = dfixed_const(mc_latency + 512);
866 c.full = dfixed_const(wm->disp_clk);
867 b.full = dfixed_div(b, c);
868
869 c.full = dfixed_const(dmif_size);
870 b.full = dfixed_div(c, b);
871
872 tmp = min(dfixed_trunc(a), dfixed_trunc(b));
873
874 b.full = dfixed_const(1000);
875 c.full = dfixed_const(wm->disp_clk);
876 b.full = dfixed_div(c, b);
877 c.full = dfixed_const(wm->bytes_per_pixel);
878 b.full = dfixed_mul(b, c);
879
880 lb_fill_bw = min(tmp, dfixed_trunc(b));
881
882 a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
883 b.full = dfixed_const(1000);
884 c.full = dfixed_const(lb_fill_bw);
885 b.full = dfixed_div(c, b);
886 a.full = dfixed_div(a, b);
887 line_fill_time = dfixed_trunc(a);
888
889 if (line_fill_time < wm->active_time)
890 return latency;
891 else
892 return latency + (line_fill_time - wm->active_time);
893
894}
895
896
897
898
899
900
901
902
903
904
905
906
907static bool dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display(struct dce6_wm_params *wm)
908{
909 if (dce_v6_0_average_bandwidth(wm) <=
910 (dce_v6_0_dram_bandwidth_for_display(wm) / wm->num_heads))
911 return true;
912 else
913 return false;
914}
915
916
917
918
919
920
921
922
923
924
925
926
927static bool dce_v6_0_average_bandwidth_vs_available_bandwidth(struct dce6_wm_params *wm)
928{
929 if (dce_v6_0_average_bandwidth(wm) <=
930 (dce_v6_0_available_bandwidth(wm) / wm->num_heads))
931 return true;
932 else
933 return false;
934}
935
936
937
938
939
940
941
942
943
944
945static bool dce_v6_0_check_latency_hiding(struct dce6_wm_params *wm)
946{
947 u32 lb_partitions = wm->lb_size / wm->src_width;
948 u32 line_time = wm->active_time + wm->blank_time;
949 u32 latency_tolerant_lines;
950 u32 latency_hiding;
951 fixed20_12 a;
952
953 a.full = dfixed_const(1);
954 if (wm->vsc.full > a.full)
955 latency_tolerant_lines = 1;
956 else {
957 if (lb_partitions <= (wm->vtaps + 1))
958 latency_tolerant_lines = 1;
959 else
960 latency_tolerant_lines = 2;
961 }
962
963 latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time);
964
965 if (dce_v6_0_latency_watermark(wm) <= latency_hiding)
966 return true;
967 else
968 return false;
969}
970
971
972
973
974
975
976
977
978
979
980
981
982static void dce_v6_0_program_watermarks(struct amdgpu_device *adev,
983 struct amdgpu_crtc *amdgpu_crtc,
984 u32 lb_size, u32 num_heads)
985{
986 struct drm_display_mode *mode = &amdgpu_crtc->base.mode;
987 struct dce6_wm_params wm_low, wm_high;
988 u32 dram_channels;
989 u32 pixel_period;
990 u32 line_time = 0;
991 u32 latency_watermark_a = 0, latency_watermark_b = 0;
992 u32 priority_a_mark = 0, priority_b_mark = 0;
993 u32 priority_a_cnt = PRIORITY_OFF;
994 u32 priority_b_cnt = PRIORITY_OFF;
995 u32 tmp, arb_control3;
996 fixed20_12 a, b, c;
997
998 if (amdgpu_crtc->base.enabled && num_heads && mode) {
999 pixel_period = 1000000 / (u32)mode->clock;
1000 line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
1001 priority_a_cnt = 0;
1002 priority_b_cnt = 0;
1003
1004 dram_channels = si_get_number_of_dram_channels(adev);
1005
1006
1007 if (adev->pm.dpm_enabled) {
1008 wm_high.yclk =
1009 amdgpu_dpm_get_mclk(adev, false) * 10;
1010 wm_high.sclk =
1011 amdgpu_dpm_get_sclk(adev, false) * 10;
1012 } else {
1013 wm_high.yclk = adev->pm.current_mclk * 10;
1014 wm_high.sclk = adev->pm.current_sclk * 10;
1015 }
1016
1017 wm_high.disp_clk = mode->clock;
1018 wm_high.src_width = mode->crtc_hdisplay;
1019 wm_high.active_time = mode->crtc_hdisplay * pixel_period;
1020 wm_high.blank_time = line_time - wm_high.active_time;
1021 wm_high.interlaced = false;
1022 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1023 wm_high.interlaced = true;
1024 wm_high.vsc = amdgpu_crtc->vsc;
1025 wm_high.vtaps = 1;
1026 if (amdgpu_crtc->rmx_type != RMX_OFF)
1027 wm_high.vtaps = 2;
1028 wm_high.bytes_per_pixel = 4;
1029 wm_high.lb_size = lb_size;
1030 wm_high.dram_channels = dram_channels;
1031 wm_high.num_heads = num_heads;
1032
1033 if (adev->pm.dpm_enabled) {
1034
1035 wm_low.yclk =
1036 amdgpu_dpm_get_mclk(adev, true) * 10;
1037 wm_low.sclk =
1038 amdgpu_dpm_get_sclk(adev, true) * 10;
1039 } else {
1040 wm_low.yclk = adev->pm.current_mclk * 10;
1041 wm_low.sclk = adev->pm.current_sclk * 10;
1042 }
1043
1044 wm_low.disp_clk = mode->clock;
1045 wm_low.src_width = mode->crtc_hdisplay;
1046 wm_low.active_time = mode->crtc_hdisplay * pixel_period;
1047 wm_low.blank_time = line_time - wm_low.active_time;
1048 wm_low.interlaced = false;
1049 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1050 wm_low.interlaced = true;
1051 wm_low.vsc = amdgpu_crtc->vsc;
1052 wm_low.vtaps = 1;
1053 if (amdgpu_crtc->rmx_type != RMX_OFF)
1054 wm_low.vtaps = 2;
1055 wm_low.bytes_per_pixel = 4;
1056 wm_low.lb_size = lb_size;
1057 wm_low.dram_channels = dram_channels;
1058 wm_low.num_heads = num_heads;
1059
1060
1061 latency_watermark_a = min(dce_v6_0_latency_watermark(&wm_high), (u32)65535);
1062
1063 latency_watermark_b = min(dce_v6_0_latency_watermark(&wm_low), (u32)65535);
1064
1065
1066
1067 if (!dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) ||
1068 !dce_v6_0_average_bandwidth_vs_available_bandwidth(&wm_high) ||
1069 !dce_v6_0_check_latency_hiding(&wm_high) ||
1070 (adev->mode_info.disp_priority == 2)) {
1071 DRM_DEBUG_KMS("force priority to high\n");
1072 priority_a_cnt |= PRIORITY_ALWAYS_ON;
1073 priority_b_cnt |= PRIORITY_ALWAYS_ON;
1074 }
1075 if (!dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) ||
1076 !dce_v6_0_average_bandwidth_vs_available_bandwidth(&wm_low) ||
1077 !dce_v6_0_check_latency_hiding(&wm_low) ||
1078 (adev->mode_info.disp_priority == 2)) {
1079 DRM_DEBUG_KMS("force priority to high\n");
1080 priority_a_cnt |= PRIORITY_ALWAYS_ON;
1081 priority_b_cnt |= PRIORITY_ALWAYS_ON;
1082 }
1083
1084 a.full = dfixed_const(1000);
1085 b.full = dfixed_const(mode->clock);
1086 b.full = dfixed_div(b, a);
1087 c.full = dfixed_const(latency_watermark_a);
1088 c.full = dfixed_mul(c, b);
1089 c.full = dfixed_mul(c, amdgpu_crtc->hsc);
1090 c.full = dfixed_div(c, a);
1091 a.full = dfixed_const(16);
1092 c.full = dfixed_div(c, a);
1093 priority_a_mark = dfixed_trunc(c);
1094 priority_a_cnt |= priority_a_mark & PRIORITY_MARK_MASK;
1095
1096 a.full = dfixed_const(1000);
1097 b.full = dfixed_const(mode->clock);
1098 b.full = dfixed_div(b, a);
1099 c.full = dfixed_const(latency_watermark_b);
1100 c.full = dfixed_mul(c, b);
1101 c.full = dfixed_mul(c, amdgpu_crtc->hsc);
1102 c.full = dfixed_div(c, a);
1103 a.full = dfixed_const(16);
1104 c.full = dfixed_div(c, a);
1105 priority_b_mark = dfixed_trunc(c);
1106 priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK;
1107 }
1108
1109
1110 arb_control3 = RREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset);
1111 tmp = arb_control3;
1112 tmp &= ~LATENCY_WATERMARK_MASK(3);
1113 tmp |= LATENCY_WATERMARK_MASK(1);
1114 WREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, tmp);
1115 WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset,
1116 ((latency_watermark_a << DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT) |
1117 (line_time << DPG_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK__SHIFT)));
1118
1119 tmp = RREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset);
1120 tmp &= ~LATENCY_WATERMARK_MASK(3);
1121 tmp |= LATENCY_WATERMARK_MASK(2);
1122 WREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, tmp);
1123 WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset,
1124 ((latency_watermark_b << DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT) |
1125 (line_time << DPG_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK__SHIFT)));
1126
1127 WREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, arb_control3);
1128
1129
1130 WREG32(mmPRIORITY_A_CNT + amdgpu_crtc->crtc_offset, priority_a_cnt);
1131 WREG32(mmPRIORITY_B_CNT + amdgpu_crtc->crtc_offset, priority_b_cnt);
1132
1133
1134 amdgpu_crtc->line_time = line_time;
1135 amdgpu_crtc->wm_high = latency_watermark_a;
1136}
1137
1138
1139static u32 dce_v6_0_line_buffer_adjust(struct amdgpu_device *adev,
1140 struct amdgpu_crtc *amdgpu_crtc,
1141 struct drm_display_mode *mode,
1142 struct drm_display_mode *other_mode)
1143{
1144 u32 tmp, buffer_alloc, i;
1145 u32 pipe_offset = amdgpu_crtc->crtc_id * 0x8;
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159 if (amdgpu_crtc->base.enabled && mode) {
1160 if (other_mode) {
1161 tmp = 0;
1162 buffer_alloc = 1;
1163 } else {
1164 tmp = 2;
1165 buffer_alloc = 2;
1166 }
1167 } else {
1168 tmp = 0;
1169 buffer_alloc = 0;
1170 }
1171
1172 WREG32(mmDC_LB_MEMORY_SPLIT + amdgpu_crtc->crtc_offset,
1173 DC_LB_MEMORY_CONFIG(tmp));
1174
1175 WREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset,
1176 (buffer_alloc << PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATED__SHIFT));
1177 for (i = 0; i < adev->usec_timeout; i++) {
1178 if (RREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset) &
1179 PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATION_COMPLETED_MASK)
1180 break;
1181 udelay(1);
1182 }
1183
1184 if (amdgpu_crtc->base.enabled && mode) {
1185 switch (tmp) {
1186 case 0:
1187 default:
1188 return 4096 * 2;
1189 case 2:
1190 return 8192 * 2;
1191 }
1192 }
1193
1194
1195 return 0;
1196}
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208static void dce_v6_0_bandwidth_update(struct amdgpu_device *adev)
1209{
1210 struct drm_display_mode *mode0 = NULL;
1211 struct drm_display_mode *mode1 = NULL;
1212 u32 num_heads = 0, lb_size;
1213 int i;
1214
1215 if (!adev->mode_info.mode_config_initialized)
1216 return;
1217
1218 amdgpu_update_display_priority(adev);
1219
1220 for (i = 0; i < adev->mode_info.num_crtc; i++) {
1221 if (adev->mode_info.crtcs[i]->base.enabled)
1222 num_heads++;
1223 }
1224 for (i = 0; i < adev->mode_info.num_crtc; i += 2) {
1225 mode0 = &adev->mode_info.crtcs[i]->base.mode;
1226 mode1 = &adev->mode_info.crtcs[i+1]->base.mode;
1227 lb_size = dce_v6_0_line_buffer_adjust(adev, adev->mode_info.crtcs[i], mode0, mode1);
1228 dce_v6_0_program_watermarks(adev, adev->mode_info.crtcs[i], lb_size, num_heads);
1229 lb_size = dce_v6_0_line_buffer_adjust(adev, adev->mode_info.crtcs[i+1], mode1, mode0);
1230 dce_v6_0_program_watermarks(adev, adev->mode_info.crtcs[i+1], lb_size, num_heads);
1231 }
1232}
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299static void dce_v6_0_audio_enable(struct amdgpu_device *adev,
1300 struct amdgpu_audio_pin *pin,
1301 bool enable)
1302{
1303 DRM_INFO("xxxx: dce_v6_0_audio_enable---no imp!!!!!\n");
1304}
1305
1306static const u32 pin_offsets[7] =
1307{
1308 (0x1780 - 0x1780),
1309 (0x1786 - 0x1780),
1310 (0x178c - 0x1780),
1311 (0x1792 - 0x1780),
1312 (0x1798 - 0x1780),
1313 (0x179d - 0x1780),
1314 (0x17a4 - 0x1780),
1315};
1316
1317static int dce_v6_0_audio_init(struct amdgpu_device *adev)
1318{
1319 return 0;
1320}
1321
1322static void dce_v6_0_audio_fini(struct amdgpu_device *adev)
1323{
1324
1325}
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351static void dce_v6_0_afmt_setmode(struct drm_encoder *encoder,
1352 struct drm_display_mode *mode)
1353{
1354 DRM_INFO("xxxx: dce_v6_0_afmt_setmode ----no impl !!!!!!!!\n");
1355}
1356
1357static void dce_v6_0_afmt_enable(struct drm_encoder *encoder, bool enable)
1358{
1359 struct drm_device *dev = encoder->dev;
1360 struct amdgpu_device *adev = dev->dev_private;
1361 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1362 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1363
1364 if (!dig || !dig->afmt)
1365 return;
1366
1367
1368 if (enable && dig->afmt->enabled)
1369 return;
1370 if (!enable && !dig->afmt->enabled)
1371 return;
1372
1373 if (!enable && dig->afmt->pin) {
1374 dce_v6_0_audio_enable(adev, dig->afmt->pin, false);
1375 dig->afmt->pin = NULL;
1376 }
1377
1378 dig->afmt->enabled = enable;
1379
1380 DRM_DEBUG("%sabling AFMT interface @ 0x%04X for encoder 0x%x\n",
1381 enable ? "En" : "Dis", dig->afmt->offset, amdgpu_encoder->encoder_id);
1382}
1383
1384static int dce_v6_0_afmt_init(struct amdgpu_device *adev)
1385{
1386 int i, j;
1387
1388 for (i = 0; i < adev->mode_info.num_dig; i++)
1389 adev->mode_info.afmt[i] = NULL;
1390
1391
1392 for (i = 0; i < adev->mode_info.num_dig; i++) {
1393 adev->mode_info.afmt[i] = kzalloc(sizeof(struct amdgpu_afmt), GFP_KERNEL);
1394 if (adev->mode_info.afmt[i]) {
1395 adev->mode_info.afmt[i]->offset = dig_offsets[i];
1396 adev->mode_info.afmt[i]->id = i;
1397 } else {
1398 for (j = 0; j < i; j++) {
1399 kfree(adev->mode_info.afmt[j]);
1400 adev->mode_info.afmt[j] = NULL;
1401 }
1402 DRM_ERROR("Out of memory allocating afmt table\n");
1403 return -ENOMEM;
1404 }
1405 }
1406 return 0;
1407}
1408
1409static void dce_v6_0_afmt_fini(struct amdgpu_device *adev)
1410{
1411 int i;
1412
1413 for (i = 0; i < adev->mode_info.num_dig; i++) {
1414 kfree(adev->mode_info.afmt[i]);
1415 adev->mode_info.afmt[i] = NULL;
1416 }
1417}
1418
1419static const u32 vga_control_regs[6] =
1420{
1421 mmD1VGA_CONTROL,
1422 mmD2VGA_CONTROL,
1423 mmD3VGA_CONTROL,
1424 mmD4VGA_CONTROL,
1425 mmD5VGA_CONTROL,
1426 mmD6VGA_CONTROL,
1427};
1428
1429static void dce_v6_0_vga_enable(struct drm_crtc *crtc, bool enable)
1430{
1431 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1432 struct drm_device *dev = crtc->dev;
1433 struct amdgpu_device *adev = dev->dev_private;
1434 u32 vga_control;
1435
1436 vga_control = RREG32(vga_control_regs[amdgpu_crtc->crtc_id]) & ~1;
1437 WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control | (enable ? 1 : 0));
1438}
1439
1440static void dce_v6_0_grph_enable(struct drm_crtc *crtc, bool enable)
1441{
1442 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1443 struct drm_device *dev = crtc->dev;
1444 struct amdgpu_device *adev = dev->dev_private;
1445
1446 WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, enable ? 1 : 0);
1447}
1448
1449static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
1450 struct drm_framebuffer *fb,
1451 int x, int y, int atomic)
1452{
1453 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1454 struct drm_device *dev = crtc->dev;
1455 struct amdgpu_device *adev = dev->dev_private;
1456 struct amdgpu_framebuffer *amdgpu_fb;
1457 struct drm_framebuffer *target_fb;
1458 struct drm_gem_object *obj;
1459 struct amdgpu_bo *abo;
1460 uint64_t fb_location, tiling_flags;
1461 uint32_t fb_format, fb_pitch_pixels, pipe_config;
1462 u32 fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_NONE);
1463 u32 viewport_w, viewport_h;
1464 int r;
1465 bool bypass_lut = false;
1466 struct drm_format_name_buf format_name;
1467
1468
1469 if (!atomic && !crtc->primary->fb) {
1470 DRM_DEBUG_KMS("No FB bound\n");
1471 return 0;
1472 }
1473
1474 if (atomic) {
1475 amdgpu_fb = to_amdgpu_framebuffer(fb);
1476 target_fb = fb;
1477 } else {
1478 amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
1479 target_fb = crtc->primary->fb;
1480 }
1481
1482
1483
1484
1485 obj = amdgpu_fb->obj;
1486 abo = gem_to_amdgpu_bo(obj);
1487 r = amdgpu_bo_reserve(abo, false);
1488 if (unlikely(r != 0))
1489 return r;
1490
1491 if (atomic) {
1492 fb_location = amdgpu_bo_gpu_offset(abo);
1493 } else {
1494 r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
1495 if (unlikely(r != 0)) {
1496 amdgpu_bo_unreserve(abo);
1497 return -EINVAL;
1498 }
1499 }
1500
1501 amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
1502 amdgpu_bo_unreserve(abo);
1503
1504 switch (target_fb->pixel_format) {
1505 case DRM_FORMAT_C8:
1506 fb_format = (GRPH_DEPTH(GRPH_DEPTH_8BPP) |
1507 GRPH_FORMAT(GRPH_FORMAT_INDEXED));
1508 break;
1509 case DRM_FORMAT_XRGB4444:
1510 case DRM_FORMAT_ARGB4444:
1511 fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) |
1512 GRPH_FORMAT(GRPH_FORMAT_ARGB4444));
1513#ifdef __BIG_ENDIAN
1514 fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16);
1515#endif
1516 break;
1517 case DRM_FORMAT_XRGB1555:
1518 case DRM_FORMAT_ARGB1555:
1519 fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) |
1520 GRPH_FORMAT(GRPH_FORMAT_ARGB1555));
1521#ifdef __BIG_ENDIAN
1522 fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16);
1523#endif
1524 break;
1525 case DRM_FORMAT_BGRX5551:
1526 case DRM_FORMAT_BGRA5551:
1527 fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) |
1528 GRPH_FORMAT(GRPH_FORMAT_BGRA5551));
1529#ifdef __BIG_ENDIAN
1530 fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16);
1531#endif
1532 break;
1533 case DRM_FORMAT_RGB565:
1534 fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) |
1535 GRPH_FORMAT(GRPH_FORMAT_ARGB565));
1536#ifdef __BIG_ENDIAN
1537 fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16);
1538#endif
1539 break;
1540 case DRM_FORMAT_XRGB8888:
1541 case DRM_FORMAT_ARGB8888:
1542 fb_format = (GRPH_DEPTH(GRPH_DEPTH_32BPP) |
1543 GRPH_FORMAT(GRPH_FORMAT_ARGB8888));
1544#ifdef __BIG_ENDIAN
1545 fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN32);
1546#endif
1547 break;
1548 case DRM_FORMAT_XRGB2101010:
1549 case DRM_FORMAT_ARGB2101010:
1550 fb_format = (GRPH_DEPTH(GRPH_DEPTH_32BPP) |
1551 GRPH_FORMAT(GRPH_FORMAT_ARGB2101010));
1552#ifdef __BIG_ENDIAN
1553 fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN32);
1554#endif
1555
1556 bypass_lut = true;
1557 break;
1558 case DRM_FORMAT_BGRX1010102:
1559 case DRM_FORMAT_BGRA1010102:
1560 fb_format = (GRPH_DEPTH(GRPH_DEPTH_32BPP) |
1561 GRPH_FORMAT(GRPH_FORMAT_BGRA1010102));
1562#ifdef __BIG_ENDIAN
1563 fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN32);
1564#endif
1565
1566 bypass_lut = true;
1567 break;
1568 default:
1569 DRM_ERROR("Unsupported screen format %s\n",
1570 drm_get_format_name(target_fb->pixel_format, &format_name));
1571 return -EINVAL;
1572 }
1573
1574 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_2D_TILED_THIN1) {
1575 unsigned bankw, bankh, mtaspect, tile_split, num_banks;
1576
1577 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
1578 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
1579 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
1580 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
1581 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
1582
1583 fb_format |= GRPH_NUM_BANKS(num_banks);
1584 fb_format |= GRPH_ARRAY_MODE(GRPH_ARRAY_2D_TILED_THIN1);
1585 fb_format |= GRPH_TILE_SPLIT(tile_split);
1586 fb_format |= GRPH_BANK_WIDTH(bankw);
1587 fb_format |= GRPH_BANK_HEIGHT(bankh);
1588 fb_format |= GRPH_MACRO_TILE_ASPECT(mtaspect);
1589 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_1D_TILED_THIN1) {
1590 fb_format |= GRPH_ARRAY_MODE(GRPH_ARRAY_1D_TILED_THIN1);
1591 }
1592
1593 pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
1594 fb_format |= GRPH_PIPE_CONFIG(pipe_config);
1595
1596 dce_v6_0_vga_enable(crtc, false);
1597
1598
1599
1600
1601 WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, 0);
1602
1603 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
1604 upper_32_bits(fb_location));
1605 WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
1606 upper_32_bits(fb_location));
1607 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
1608 (u32)fb_location & GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK);
1609 WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
1610 (u32) fb_location & GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK);
1611 WREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format);
1612 WREG32(mmGRPH_SWAP_CNTL + amdgpu_crtc->crtc_offset, fb_swap);
1613
1614
1615
1616
1617
1618
1619 WREG32_P(mmGRPH_LUT_10BIT_BYPASS + amdgpu_crtc->crtc_offset,
1620 (bypass_lut ? GRPH_LUT_10BIT_BYPASS__GRPH_LUT_10BIT_BYPASS_EN_MASK : 0),
1621 ~GRPH_LUT_10BIT_BYPASS__GRPH_LUT_10BIT_BYPASS_EN_MASK);
1622
1623 if (bypass_lut)
1624 DRM_DEBUG_KMS("Bypassing hardware LUT due to 10 bit fb scanout.\n");
1625
1626 WREG32(mmGRPH_SURFACE_OFFSET_X + amdgpu_crtc->crtc_offset, 0);
1627 WREG32(mmGRPH_SURFACE_OFFSET_Y + amdgpu_crtc->crtc_offset, 0);
1628 WREG32(mmGRPH_X_START + amdgpu_crtc->crtc_offset, 0);
1629 WREG32(mmGRPH_Y_START + amdgpu_crtc->crtc_offset, 0);
1630 WREG32(mmGRPH_X_END + amdgpu_crtc->crtc_offset, target_fb->width);
1631 WREG32(mmGRPH_Y_END + amdgpu_crtc->crtc_offset, target_fb->height);
1632
1633 fb_pitch_pixels = target_fb->pitches[0] / (target_fb->bits_per_pixel / 8);
1634 WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset, fb_pitch_pixels);
1635
1636 dce_v6_0_grph_enable(crtc, true);
1637
1638 WREG32(mmDESKTOP_HEIGHT + amdgpu_crtc->crtc_offset,
1639 target_fb->height);
1640 x &= ~3;
1641 y &= ~1;
1642 WREG32(mmVIEWPORT_START + amdgpu_crtc->crtc_offset,
1643 (x << 16) | y);
1644 viewport_w = crtc->mode.hdisplay;
1645 viewport_h = (crtc->mode.vdisplay + 1) & ~1;
1646
1647 WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset,
1648 (viewport_w << 16) | viewport_h);
1649
1650
1651 WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0);
1652
1653 if (!atomic && fb && fb != crtc->primary->fb) {
1654 amdgpu_fb = to_amdgpu_framebuffer(fb);
1655 abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
1656 r = amdgpu_bo_reserve(abo, false);
1657 if (unlikely(r != 0))
1658 return r;
1659 amdgpu_bo_unpin(abo);
1660 amdgpu_bo_unreserve(abo);
1661 }
1662
1663
1664 dce_v6_0_bandwidth_update(adev);
1665
1666 return 0;
1667
1668}
1669
1670static void dce_v6_0_set_interleave(struct drm_crtc *crtc,
1671 struct drm_display_mode *mode)
1672{
1673 struct drm_device *dev = crtc->dev;
1674 struct amdgpu_device *adev = dev->dev_private;
1675 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1676
1677 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1678 WREG32(mmDATA_FORMAT + amdgpu_crtc->crtc_offset,
1679 INTERLEAVE_EN);
1680 else
1681 WREG32(mmDATA_FORMAT + amdgpu_crtc->crtc_offset, 0);
1682}
1683
1684static void dce_v6_0_crtc_load_lut(struct drm_crtc *crtc)
1685{
1686
1687 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1688 struct drm_device *dev = crtc->dev;
1689 struct amdgpu_device *adev = dev->dev_private;
1690 int i;
1691
1692 DRM_DEBUG_KMS("%d\n", amdgpu_crtc->crtc_id);
1693
1694 WREG32(mmINPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset,
1695 ((0 << INPUT_CSC_CONTROL__INPUT_CSC_GRPH_MODE__SHIFT) |
1696 (0 << INPUT_CSC_CONTROL__INPUT_CSC_OVL_MODE__SHIFT)));
1697 WREG32(mmPRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset,
1698 PRESCALE_GRPH_CONTROL__GRPH_PRESCALE_BYPASS_MASK);
1699 WREG32(mmPRESCALE_OVL_CONTROL + amdgpu_crtc->crtc_offset,
1700 PRESCALE_OVL_CONTROL__OVL_PRESCALE_BYPASS_MASK);
1701 WREG32(mmINPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset,
1702 ((0 << INPUT_GAMMA_CONTROL__GRPH_INPUT_GAMMA_MODE__SHIFT) |
1703 (0 << INPUT_GAMMA_CONTROL__OVL_INPUT_GAMMA_MODE__SHIFT)));
1704
1705 WREG32(mmDC_LUT_CONTROL + amdgpu_crtc->crtc_offset, 0);
1706
1707 WREG32(mmDC_LUT_BLACK_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0);
1708 WREG32(mmDC_LUT_BLACK_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0);
1709 WREG32(mmDC_LUT_BLACK_OFFSET_RED + amdgpu_crtc->crtc_offset, 0);
1710
1711 WREG32(mmDC_LUT_WHITE_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0xffff);
1712 WREG32(mmDC_LUT_WHITE_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0xffff);
1713 WREG32(mmDC_LUT_WHITE_OFFSET_RED + amdgpu_crtc->crtc_offset, 0xffff);
1714
1715 WREG32(mmDC_LUT_RW_MODE + amdgpu_crtc->crtc_offset, 0);
1716 WREG32(mmDC_LUT_WRITE_EN_MASK + amdgpu_crtc->crtc_offset, 0x00000007);
1717
1718 WREG32(mmDC_LUT_RW_INDEX + amdgpu_crtc->crtc_offset, 0);
1719 for (i = 0; i < 256; i++) {
1720 WREG32(mmDC_LUT_30_COLOR + amdgpu_crtc->crtc_offset,
1721 (amdgpu_crtc->lut_r[i] << 20) |
1722 (amdgpu_crtc->lut_g[i] << 10) |
1723 (amdgpu_crtc->lut_b[i] << 0));
1724 }
1725
1726 WREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
1727 ((0 << DEGAMMA_CONTROL__GRPH_DEGAMMA_MODE__SHIFT) |
1728 (0 << DEGAMMA_CONTROL__OVL_DEGAMMA_MODE__SHIFT) |
1729 ICON_DEGAMMA_MODE(0) |
1730 (0 << DEGAMMA_CONTROL__CURSOR_DEGAMMA_MODE__SHIFT)));
1731 WREG32(mmGAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset,
1732 ((0 << GAMUT_REMAP_CONTROL__GRPH_GAMUT_REMAP_MODE__SHIFT) |
1733 (0 << GAMUT_REMAP_CONTROL__OVL_GAMUT_REMAP_MODE__SHIFT)));
1734 WREG32(mmREGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
1735 ((0 << REGAMMA_CONTROL__GRPH_REGAMMA_MODE__SHIFT) |
1736 (0 << REGAMMA_CONTROL__OVL_REGAMMA_MODE__SHIFT)));
1737 WREG32(mmOUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset,
1738 ((0 << OUTPUT_CSC_CONTROL__OUTPUT_CSC_GRPH_MODE__SHIFT) |
1739 (0 << OUTPUT_CSC_CONTROL__OUTPUT_CSC_OVL_MODE__SHIFT)));
1740
1741 WREG32(0x1a50 + amdgpu_crtc->crtc_offset, 0);
1742
1743
1744}
1745
1746static int dce_v6_0_pick_dig_encoder(struct drm_encoder *encoder)
1747{
1748 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1749 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1750
1751 switch (amdgpu_encoder->encoder_id) {
1752 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
1753 return dig->linkb ? 1 : 0;
1754 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
1755 return dig->linkb ? 3 : 2;
1756 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
1757 return dig->linkb ? 5 : 4;
1758 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
1759 return 6;
1760 default:
1761 DRM_ERROR("invalid encoder_id: 0x%x\n", amdgpu_encoder->encoder_id);
1762 return 0;
1763 }
1764}
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781static u32 dce_v6_0_pick_pll(struct drm_crtc *crtc)
1782{
1783 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1784 struct drm_device *dev = crtc->dev;
1785 struct amdgpu_device *adev = dev->dev_private;
1786 u32 pll_in_use;
1787 int pll;
1788
1789 if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder))) {
1790 if (adev->clock.dp_extclk)
1791
1792 return ATOM_PPLL_INVALID;
1793 else
1794 return ATOM_PPLL0;
1795 } else {
1796
1797 pll = amdgpu_pll_get_shared_nondp_ppll(crtc);
1798 if (pll != ATOM_PPLL_INVALID)
1799 return pll;
1800 }
1801
1802
1803 pll_in_use = amdgpu_pll_get_use_mask(crtc);
1804 if (!(pll_in_use & (1 << ATOM_PPLL2)))
1805 return ATOM_PPLL2;
1806 if (!(pll_in_use & (1 << ATOM_PPLL1)))
1807 return ATOM_PPLL1;
1808 DRM_ERROR("unable to allocate a PPLL\n");
1809 return ATOM_PPLL_INVALID;
1810}
1811
1812static void dce_v6_0_lock_cursor(struct drm_crtc *crtc, bool lock)
1813{
1814 struct amdgpu_device *adev = crtc->dev->dev_private;
1815 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1816 uint32_t cur_lock;
1817
1818 cur_lock = RREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset);
1819 if (lock)
1820 cur_lock |= CUR_UPDATE__CURSOR_UPDATE_LOCK_MASK;
1821 else
1822 cur_lock &= ~CUR_UPDATE__CURSOR_UPDATE_LOCK_MASK;
1823 WREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset, cur_lock);
1824}
1825
1826static void dce_v6_0_hide_cursor(struct drm_crtc *crtc)
1827{
1828 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1829 struct amdgpu_device *adev = crtc->dev->dev_private;
1830
1831 WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
1832 (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
1833 (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
1834
1835
1836}
1837
1838static void dce_v6_0_show_cursor(struct drm_crtc *crtc)
1839{
1840 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1841 struct amdgpu_device *adev = crtc->dev->dev_private;
1842
1843 WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
1844 upper_32_bits(amdgpu_crtc->cursor_addr));
1845 WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
1846 lower_32_bits(amdgpu_crtc->cursor_addr));
1847
1848 WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
1849 CUR_CONTROL__CURSOR_EN_MASK |
1850 (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
1851 (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
1852
1853}
1854
1855static int dce_v6_0_cursor_move_locked(struct drm_crtc *crtc,
1856 int x, int y)
1857{
1858 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1859 struct amdgpu_device *adev = crtc->dev->dev_private;
1860 int xorigin = 0, yorigin = 0;
1861
1862 int w = amdgpu_crtc->cursor_width;
1863
1864 amdgpu_crtc->cursor_x = x;
1865 amdgpu_crtc->cursor_y = y;
1866
1867
1868 x += crtc->x;
1869 y += crtc->y;
1870 DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
1871
1872 if (x < 0) {
1873 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
1874 x = 0;
1875 }
1876 if (y < 0) {
1877 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
1878 y = 0;
1879 }
1880
1881 WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
1882 WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
1883 WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
1884 ((w - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
1885
1886 return 0;
1887}
1888
1889static int dce_v6_0_crtc_cursor_move(struct drm_crtc *crtc,
1890 int x, int y)
1891{
1892 int ret;
1893
1894 dce_v6_0_lock_cursor(crtc, true);
1895 ret = dce_v6_0_cursor_move_locked(crtc, x, y);
1896 dce_v6_0_lock_cursor(crtc, false);
1897
1898 return ret;
1899}
1900
1901static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc,
1902 struct drm_file *file_priv,
1903 uint32_t handle,
1904 uint32_t width,
1905 uint32_t height,
1906 int32_t hot_x,
1907 int32_t hot_y)
1908{
1909 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1910 struct drm_gem_object *obj;
1911 struct amdgpu_bo *aobj;
1912 int ret;
1913
1914 if (!handle) {
1915
1916 dce_v6_0_hide_cursor(crtc);
1917 obj = NULL;
1918 goto unpin;
1919 }
1920
1921 if ((width > amdgpu_crtc->max_cursor_width) ||
1922 (height > amdgpu_crtc->max_cursor_height)) {
1923 DRM_ERROR("bad cursor width or height %d x %d\n", width, height);
1924 return -EINVAL;
1925 }
1926
1927 obj = drm_gem_object_lookup(file_priv, handle);
1928 if (!obj) {
1929 DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, amdgpu_crtc->crtc_id);
1930 return -ENOENT;
1931 }
1932
1933 aobj = gem_to_amdgpu_bo(obj);
1934 ret = amdgpu_bo_reserve(aobj, false);
1935 if (ret != 0) {
1936 drm_gem_object_unreference_unlocked(obj);
1937 return ret;
1938 }
1939
1940 ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM, &amdgpu_crtc->cursor_addr);
1941 amdgpu_bo_unreserve(aobj);
1942 if (ret) {
1943 DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
1944 drm_gem_object_unreference_unlocked(obj);
1945 return ret;
1946 }
1947
1948 dce_v6_0_lock_cursor(crtc, true);
1949
1950 if (width != amdgpu_crtc->cursor_width ||
1951 height != amdgpu_crtc->cursor_height ||
1952 hot_x != amdgpu_crtc->cursor_hot_x ||
1953 hot_y != amdgpu_crtc->cursor_hot_y) {
1954 int x, y;
1955
1956 x = amdgpu_crtc->cursor_x + amdgpu_crtc->cursor_hot_x - hot_x;
1957 y = amdgpu_crtc->cursor_y + amdgpu_crtc->cursor_hot_y - hot_y;
1958
1959 dce_v6_0_cursor_move_locked(crtc, x, y);
1960
1961 amdgpu_crtc->cursor_width = width;
1962 amdgpu_crtc->cursor_height = height;
1963 amdgpu_crtc->cursor_hot_x = hot_x;
1964 amdgpu_crtc->cursor_hot_y = hot_y;
1965 }
1966
1967 dce_v6_0_show_cursor(crtc);
1968 dce_v6_0_lock_cursor(crtc, false);
1969
1970unpin:
1971 if (amdgpu_crtc->cursor_bo) {
1972 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
1973 ret = amdgpu_bo_reserve(aobj, false);
1974 if (likely(ret == 0)) {
1975 amdgpu_bo_unpin(aobj);
1976 amdgpu_bo_unreserve(aobj);
1977 }
1978 drm_gem_object_unreference_unlocked(amdgpu_crtc->cursor_bo);
1979 }
1980
1981 amdgpu_crtc->cursor_bo = obj;
1982 return 0;
1983}
1984
1985static void dce_v6_0_cursor_reset(struct drm_crtc *crtc)
1986{
1987 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1988
1989 if (amdgpu_crtc->cursor_bo) {
1990 dce_v6_0_lock_cursor(crtc, true);
1991
1992 dce_v6_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
1993 amdgpu_crtc->cursor_y);
1994
1995 dce_v6_0_show_cursor(crtc);
1996 dce_v6_0_lock_cursor(crtc, false);
1997 }
1998}
1999
2000static int dce_v6_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
2001 u16 *blue, uint32_t size)
2002{
2003 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2004 int i;
2005
2006
2007 for (i = 0; i < size; i++) {
2008 amdgpu_crtc->lut_r[i] = red[i] >> 6;
2009 amdgpu_crtc->lut_g[i] = green[i] >> 6;
2010 amdgpu_crtc->lut_b[i] = blue[i] >> 6;
2011 }
2012 dce_v6_0_crtc_load_lut(crtc);
2013
2014 return 0;
2015}
2016
2017static void dce_v6_0_crtc_destroy(struct drm_crtc *crtc)
2018{
2019 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2020
2021 drm_crtc_cleanup(crtc);
2022 kfree(amdgpu_crtc);
2023}
2024
2025static const struct drm_crtc_funcs dce_v6_0_crtc_funcs = {
2026 .cursor_set2 = dce_v6_0_crtc_cursor_set2,
2027 .cursor_move = dce_v6_0_crtc_cursor_move,
2028 .gamma_set = dce_v6_0_crtc_gamma_set,
2029 .set_config = amdgpu_crtc_set_config,
2030 .destroy = dce_v6_0_crtc_destroy,
2031 .page_flip_target = amdgpu_crtc_page_flip_target,
2032};
2033
2034static void dce_v6_0_crtc_dpms(struct drm_crtc *crtc, int mode)
2035{
2036 struct drm_device *dev = crtc->dev;
2037 struct amdgpu_device *adev = dev->dev_private;
2038 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2039 unsigned type;
2040
2041 switch (mode) {
2042 case DRM_MODE_DPMS_ON:
2043 amdgpu_crtc->enabled = true;
2044 amdgpu_atombios_crtc_enable(crtc, ATOM_ENABLE);
2045 amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
2046
2047 type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
2048 amdgpu_irq_update(adev, &adev->crtc_irq, type);
2049 amdgpu_irq_update(adev, &adev->pageflip_irq, type);
2050 drm_crtc_vblank_on(crtc);
2051 dce_v6_0_crtc_load_lut(crtc);
2052 break;
2053 case DRM_MODE_DPMS_STANDBY:
2054 case DRM_MODE_DPMS_SUSPEND:
2055 case DRM_MODE_DPMS_OFF:
2056 drm_crtc_vblank_off(crtc);
2057 if (amdgpu_crtc->enabled)
2058 amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE);
2059 amdgpu_atombios_crtc_enable(crtc, ATOM_DISABLE);
2060 amdgpu_crtc->enabled = false;
2061 break;
2062 }
2063
2064 amdgpu_pm_compute_clocks(adev);
2065}
2066
2067static void dce_v6_0_crtc_prepare(struct drm_crtc *crtc)
2068{
2069
2070 amdgpu_atombios_crtc_powergate(crtc, ATOM_DISABLE);
2071 amdgpu_atombios_crtc_lock(crtc, ATOM_ENABLE);
2072 dce_v6_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
2073}
2074
2075static void dce_v6_0_crtc_commit(struct drm_crtc *crtc)
2076{
2077 dce_v6_0_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
2078 amdgpu_atombios_crtc_lock(crtc, ATOM_DISABLE);
2079}
2080
2081static void dce_v6_0_crtc_disable(struct drm_crtc *crtc)
2082{
2083
2084 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2085 struct drm_device *dev = crtc->dev;
2086 struct amdgpu_device *adev = dev->dev_private;
2087 struct amdgpu_atom_ss ss;
2088 int i;
2089
2090 dce_v6_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
2091 if (crtc->primary->fb) {
2092 int r;
2093 struct amdgpu_framebuffer *amdgpu_fb;
2094 struct amdgpu_bo *abo;
2095
2096 amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
2097 abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
2098 r = amdgpu_bo_reserve(abo, false);
2099 if (unlikely(r))
2100 DRM_ERROR("failed to reserve abo before unpin\n");
2101 else {
2102 amdgpu_bo_unpin(abo);
2103 amdgpu_bo_unreserve(abo);
2104 }
2105 }
2106
2107 dce_v6_0_grph_enable(crtc, false);
2108
2109 amdgpu_atombios_crtc_powergate(crtc, ATOM_ENABLE);
2110
2111 for (i = 0; i < adev->mode_info.num_crtc; i++) {
2112 if (adev->mode_info.crtcs[i] &&
2113 adev->mode_info.crtcs[i]->enabled &&
2114 i != amdgpu_crtc->crtc_id &&
2115 amdgpu_crtc->pll_id == adev->mode_info.crtcs[i]->pll_id) {
2116
2117
2118
2119 goto done;
2120 }
2121 }
2122
2123 switch (amdgpu_crtc->pll_id) {
2124 case ATOM_PPLL1:
2125 case ATOM_PPLL2:
2126
2127 amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id,
2128 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
2129 break;
2130 default:
2131 break;
2132 }
2133done:
2134 amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
2135 amdgpu_crtc->adjusted_clock = 0;
2136 amdgpu_crtc->encoder = NULL;
2137 amdgpu_crtc->connector = NULL;
2138}
2139
2140static int dce_v6_0_crtc_mode_set(struct drm_crtc *crtc,
2141 struct drm_display_mode *mode,
2142 struct drm_display_mode *adjusted_mode,
2143 int x, int y, struct drm_framebuffer *old_fb)
2144{
2145 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2146
2147 if (!amdgpu_crtc->adjusted_clock)
2148 return -EINVAL;
2149
2150 amdgpu_atombios_crtc_set_pll(crtc, adjusted_mode);
2151 amdgpu_atombios_crtc_set_dtd_timing(crtc, adjusted_mode);
2152 dce_v6_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
2153 amdgpu_atombios_crtc_overscan_setup(crtc, mode, adjusted_mode);
2154 amdgpu_atombios_crtc_scaler_setup(crtc);
2155 dce_v6_0_cursor_reset(crtc);
2156
2157 amdgpu_crtc->hw_mode = *adjusted_mode;
2158
2159 return 0;
2160}
2161
2162static bool dce_v6_0_crtc_mode_fixup(struct drm_crtc *crtc,
2163 const struct drm_display_mode *mode,
2164 struct drm_display_mode *adjusted_mode)
2165{
2166
2167 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2168 struct drm_device *dev = crtc->dev;
2169 struct drm_encoder *encoder;
2170
2171
2172 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
2173 if (encoder->crtc == crtc) {
2174 amdgpu_crtc->encoder = encoder;
2175 amdgpu_crtc->connector = amdgpu_get_connector_for_encoder(encoder);
2176 break;
2177 }
2178 }
2179 if ((amdgpu_crtc->encoder == NULL) || (amdgpu_crtc->connector == NULL)) {
2180 amdgpu_crtc->encoder = NULL;
2181 amdgpu_crtc->connector = NULL;
2182 return false;
2183 }
2184 if (!amdgpu_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
2185 return false;
2186 if (amdgpu_atombios_crtc_prepare_pll(crtc, adjusted_mode))
2187 return false;
2188
2189 amdgpu_crtc->pll_id = dce_v6_0_pick_pll(crtc);
2190
2191 if ((amdgpu_crtc->pll_id == ATOM_PPLL_INVALID) &&
2192 !ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder)))
2193 return false;
2194
2195 return true;
2196}
2197
2198static int dce_v6_0_crtc_set_base(struct drm_crtc *crtc, int x, int y,
2199 struct drm_framebuffer *old_fb)
2200{
2201 return dce_v6_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
2202}
2203
2204static int dce_v6_0_crtc_set_base_atomic(struct drm_crtc *crtc,
2205 struct drm_framebuffer *fb,
2206 int x, int y, enum mode_set_atomic state)
2207{
2208 return dce_v6_0_crtc_do_set_base(crtc, fb, x, y, 1);
2209}
2210
2211static const struct drm_crtc_helper_funcs dce_v6_0_crtc_helper_funcs = {
2212 .dpms = dce_v6_0_crtc_dpms,
2213 .mode_fixup = dce_v6_0_crtc_mode_fixup,
2214 .mode_set = dce_v6_0_crtc_mode_set,
2215 .mode_set_base = dce_v6_0_crtc_set_base,
2216 .mode_set_base_atomic = dce_v6_0_crtc_set_base_atomic,
2217 .prepare = dce_v6_0_crtc_prepare,
2218 .commit = dce_v6_0_crtc_commit,
2219 .load_lut = dce_v6_0_crtc_load_lut,
2220 .disable = dce_v6_0_crtc_disable,
2221};
2222
2223static int dce_v6_0_crtc_init(struct amdgpu_device *adev, int index)
2224{
2225 struct amdgpu_crtc *amdgpu_crtc;
2226 int i;
2227
2228 amdgpu_crtc = kzalloc(sizeof(struct amdgpu_crtc) +
2229 (AMDGPUFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
2230 if (amdgpu_crtc == NULL)
2231 return -ENOMEM;
2232
2233 drm_crtc_init(adev->ddev, &amdgpu_crtc->base, &dce_v6_0_crtc_funcs);
2234
2235 drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256);
2236 amdgpu_crtc->crtc_id = index;
2237 adev->mode_info.crtcs[index] = amdgpu_crtc;
2238
2239 amdgpu_crtc->max_cursor_width = CURSOR_WIDTH;
2240 amdgpu_crtc->max_cursor_height = CURSOR_HEIGHT;
2241 adev->ddev->mode_config.cursor_width = amdgpu_crtc->max_cursor_width;
2242 adev->ddev->mode_config.cursor_height = amdgpu_crtc->max_cursor_height;
2243
2244 for (i = 0; i < 256; i++) {
2245 amdgpu_crtc->lut_r[i] = i << 2;
2246 amdgpu_crtc->lut_g[i] = i << 2;
2247 amdgpu_crtc->lut_b[i] = i << 2;
2248 }
2249
2250 amdgpu_crtc->crtc_offset = crtc_offsets[amdgpu_crtc->crtc_id];
2251
2252 amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
2253 amdgpu_crtc->adjusted_clock = 0;
2254 amdgpu_crtc->encoder = NULL;
2255 amdgpu_crtc->connector = NULL;
2256 drm_crtc_helper_add(&amdgpu_crtc->base, &dce_v6_0_crtc_helper_funcs);
2257
2258 return 0;
2259}
2260
2261static int dce_v6_0_early_init(void *handle)
2262{
2263 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2264
2265 adev->audio_endpt_rreg = &dce_v6_0_audio_endpt_rreg;
2266 adev->audio_endpt_wreg = &dce_v6_0_audio_endpt_wreg;
2267
2268 dce_v6_0_set_display_funcs(adev);
2269 dce_v6_0_set_irq_funcs(adev);
2270
2271 adev->mode_info.num_crtc = dce_v6_0_get_num_crtc(adev);
2272
2273 switch (adev->asic_type) {
2274 case CHIP_TAHITI:
2275 case CHIP_PITCAIRN:
2276 case CHIP_VERDE:
2277 adev->mode_info.num_hpd = 6;
2278 adev->mode_info.num_dig = 6;
2279 break;
2280 case CHIP_OLAND:
2281 adev->mode_info.num_hpd = 2;
2282 adev->mode_info.num_dig = 2;
2283 break;
2284 default:
2285 return -EINVAL;
2286 }
2287
2288 return 0;
2289}
2290
2291static int dce_v6_0_sw_init(void *handle)
2292{
2293 int r, i;
2294 bool ret;
2295 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2296
2297 for (i = 0; i < adev->mode_info.num_crtc; i++) {
2298 r = amdgpu_irq_add_id(adev, i + 1, &adev->crtc_irq);
2299 if (r)
2300 return r;
2301 }
2302
2303 for (i = 8; i < 20; i += 2) {
2304 r = amdgpu_irq_add_id(adev, i, &adev->pageflip_irq);
2305 if (r)
2306 return r;
2307 }
2308
2309
2310 r = amdgpu_irq_add_id(adev, 42, &adev->hpd_irq);
2311 if (r)
2312 return r;
2313
2314 adev->mode_info.mode_config_initialized = true;
2315
2316 adev->ddev->mode_config.funcs = &amdgpu_mode_funcs;
2317 adev->ddev->mode_config.async_page_flip = true;
2318 adev->ddev->mode_config.max_width = 16384;
2319 adev->ddev->mode_config.max_height = 16384;
2320 adev->ddev->mode_config.preferred_depth = 24;
2321 adev->ddev->mode_config.prefer_shadow = 1;
2322 adev->ddev->mode_config.fb_base = adev->mc.aper_base;
2323
2324 r = amdgpu_modeset_create_props(adev);
2325 if (r)
2326 return r;
2327
2328 adev->ddev->mode_config.max_width = 16384;
2329 adev->ddev->mode_config.max_height = 16384;
2330
2331
2332 for (i = 0; i < adev->mode_info.num_crtc; i++) {
2333 r = dce_v6_0_crtc_init(adev, i);
2334 if (r)
2335 return r;
2336 }
2337
2338 ret = amdgpu_atombios_get_connector_info_from_object_table(adev);
2339 if (ret)
2340 amdgpu_print_display_setup(adev->ddev);
2341 else
2342 return -EINVAL;
2343
2344
2345 r = dce_v6_0_afmt_init(adev);
2346 if (r)
2347 return r;
2348
2349 r = dce_v6_0_audio_init(adev);
2350 if (r)
2351 return r;
2352
2353 drm_kms_helper_poll_init(adev->ddev);
2354
2355 return r;
2356}
2357
2358static int dce_v6_0_sw_fini(void *handle)
2359{
2360 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2361
2362 kfree(adev->mode_info.bios_hardcoded_edid);
2363
2364 drm_kms_helper_poll_fini(adev->ddev);
2365
2366 dce_v6_0_audio_fini(adev);
2367 dce_v6_0_afmt_fini(adev);
2368
2369 drm_mode_config_cleanup(adev->ddev);
2370 adev->mode_info.mode_config_initialized = false;
2371
2372 return 0;
2373}
2374
2375static int dce_v6_0_hw_init(void *handle)
2376{
2377 int i;
2378 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2379
2380
2381 amdgpu_atombios_encoder_init_dig(adev);
2382 amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk);
2383
2384
2385 dce_v6_0_hpd_init(adev);
2386
2387 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
2388 dce_v6_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
2389 }
2390
2391 dce_v6_0_pageflip_interrupt_init(adev);
2392
2393 return 0;
2394}
2395
2396static int dce_v6_0_hw_fini(void *handle)
2397{
2398 int i;
2399 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2400
2401 dce_v6_0_hpd_fini(adev);
2402
2403 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
2404 dce_v6_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
2405 }
2406
2407 dce_v6_0_pageflip_interrupt_fini(adev);
2408
2409 return 0;
2410}
2411
2412static int dce_v6_0_suspend(void *handle)
2413{
2414 return dce_v6_0_hw_fini(handle);
2415}
2416
2417static int dce_v6_0_resume(void *handle)
2418{
2419 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2420 int ret;
2421
2422 ret = dce_v6_0_hw_init(handle);
2423
2424
2425 if (adev->mode_info.bl_encoder) {
2426 u8 bl_level = amdgpu_display_backlight_get_level(adev,
2427 adev->mode_info.bl_encoder);
2428 amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder,
2429 bl_level);
2430 }
2431
2432 return ret;
2433}
2434
2435static bool dce_v6_0_is_idle(void *handle)
2436{
2437 return true;
2438}
2439
2440static int dce_v6_0_wait_for_idle(void *handle)
2441{
2442 return 0;
2443}
2444
2445static int dce_v6_0_soft_reset(void *handle)
2446{
2447 DRM_INFO("xxxx: dce_v6_0_soft_reset --- no impl!!\n");
2448 return 0;
2449}
2450
2451static void dce_v6_0_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev,
2452 int crtc,
2453 enum amdgpu_interrupt_state state)
2454{
2455 u32 reg_block, interrupt_mask;
2456
2457 if (crtc >= adev->mode_info.num_crtc) {
2458 DRM_DEBUG("invalid crtc %d\n", crtc);
2459 return;
2460 }
2461
2462 switch (crtc) {
2463 case 0:
2464 reg_block = SI_CRTC0_REGISTER_OFFSET;
2465 break;
2466 case 1:
2467 reg_block = SI_CRTC1_REGISTER_OFFSET;
2468 break;
2469 case 2:
2470 reg_block = SI_CRTC2_REGISTER_OFFSET;
2471 break;
2472 case 3:
2473 reg_block = SI_CRTC3_REGISTER_OFFSET;
2474 break;
2475 case 4:
2476 reg_block = SI_CRTC4_REGISTER_OFFSET;
2477 break;
2478 case 5:
2479 reg_block = SI_CRTC5_REGISTER_OFFSET;
2480 break;
2481 default:
2482 DRM_DEBUG("invalid crtc %d\n", crtc);
2483 return;
2484 }
2485
2486 switch (state) {
2487 case AMDGPU_IRQ_STATE_DISABLE:
2488 interrupt_mask = RREG32(mmINT_MASK + reg_block);
2489 interrupt_mask &= ~VBLANK_INT_MASK;
2490 WREG32(mmINT_MASK + reg_block, interrupt_mask);
2491 break;
2492 case AMDGPU_IRQ_STATE_ENABLE:
2493 interrupt_mask = RREG32(mmINT_MASK + reg_block);
2494 interrupt_mask |= VBLANK_INT_MASK;
2495 WREG32(mmINT_MASK + reg_block, interrupt_mask);
2496 break;
2497 default:
2498 break;
2499 }
2500}
2501
2502static void dce_v6_0_set_crtc_vline_interrupt_state(struct amdgpu_device *adev,
2503 int crtc,
2504 enum amdgpu_interrupt_state state)
2505{
2506
2507}
2508
2509static int dce_v6_0_set_hpd_interrupt_state(struct amdgpu_device *adev,
2510 struct amdgpu_irq_src *src,
2511 unsigned type,
2512 enum amdgpu_interrupt_state state)
2513{
2514 u32 dc_hpd_int_cntl;
2515
2516 if (type >= adev->mode_info.num_hpd) {
2517 DRM_DEBUG("invalid hdp %d\n", type);
2518 return 0;
2519 }
2520
2521 switch (state) {
2522 case AMDGPU_IRQ_STATE_DISABLE:
2523 dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type]);
2524 dc_hpd_int_cntl &= ~DC_HPDx_INT_EN;
2525 WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl);
2526 break;
2527 case AMDGPU_IRQ_STATE_ENABLE:
2528 dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type]);
2529 dc_hpd_int_cntl |= DC_HPDx_INT_EN;
2530 WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl);
2531 break;
2532 default:
2533 break;
2534 }
2535
2536 return 0;
2537}
2538
2539static int dce_v6_0_set_crtc_interrupt_state(struct amdgpu_device *adev,
2540 struct amdgpu_irq_src *src,
2541 unsigned type,
2542 enum amdgpu_interrupt_state state)
2543{
2544 switch (type) {
2545 case AMDGPU_CRTC_IRQ_VBLANK1:
2546 dce_v6_0_set_crtc_vblank_interrupt_state(adev, 0, state);
2547 break;
2548 case AMDGPU_CRTC_IRQ_VBLANK2:
2549 dce_v6_0_set_crtc_vblank_interrupt_state(adev, 1, state);
2550 break;
2551 case AMDGPU_CRTC_IRQ_VBLANK3:
2552 dce_v6_0_set_crtc_vblank_interrupt_state(adev, 2, state);
2553 break;
2554 case AMDGPU_CRTC_IRQ_VBLANK4:
2555 dce_v6_0_set_crtc_vblank_interrupt_state(adev, 3, state);
2556 break;
2557 case AMDGPU_CRTC_IRQ_VBLANK5:
2558 dce_v6_0_set_crtc_vblank_interrupt_state(adev, 4, state);
2559 break;
2560 case AMDGPU_CRTC_IRQ_VBLANK6:
2561 dce_v6_0_set_crtc_vblank_interrupt_state(adev, 5, state);
2562 break;
2563 case AMDGPU_CRTC_IRQ_VLINE1:
2564 dce_v6_0_set_crtc_vline_interrupt_state(adev, 0, state);
2565 break;
2566 case AMDGPU_CRTC_IRQ_VLINE2:
2567 dce_v6_0_set_crtc_vline_interrupt_state(adev, 1, state);
2568 break;
2569 case AMDGPU_CRTC_IRQ_VLINE3:
2570 dce_v6_0_set_crtc_vline_interrupt_state(adev, 2, state);
2571 break;
2572 case AMDGPU_CRTC_IRQ_VLINE4:
2573 dce_v6_0_set_crtc_vline_interrupt_state(adev, 3, state);
2574 break;
2575 case AMDGPU_CRTC_IRQ_VLINE5:
2576 dce_v6_0_set_crtc_vline_interrupt_state(adev, 4, state);
2577 break;
2578 case AMDGPU_CRTC_IRQ_VLINE6:
2579 dce_v6_0_set_crtc_vline_interrupt_state(adev, 5, state);
2580 break;
2581 default:
2582 break;
2583 }
2584 return 0;
2585}
2586
2587static int dce_v6_0_crtc_irq(struct amdgpu_device *adev,
2588 struct amdgpu_irq_src *source,
2589 struct amdgpu_iv_entry *entry)
2590{
2591 unsigned crtc = entry->src_id - 1;
2592 uint32_t disp_int = RREG32(interrupt_status_offsets[crtc].reg);
2593 unsigned irq_type = amdgpu_crtc_idx_to_irq_type(adev, crtc);
2594
2595 switch (entry->src_data) {
2596 case 0:
2597 if (disp_int & interrupt_status_offsets[crtc].vblank)
2598 WREG32(mmVBLANK_STATUS + crtc_offsets[crtc], VBLANK_ACK);
2599 else
2600 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
2601
2602 if (amdgpu_irq_enabled(adev, source, irq_type)) {
2603 drm_handle_vblank(adev->ddev, crtc);
2604 }
2605 DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
2606 break;
2607 case 1:
2608 if (disp_int & interrupt_status_offsets[crtc].vline)
2609 WREG32(mmVLINE_STATUS + crtc_offsets[crtc], VLINE_ACK);
2610 else
2611 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
2612
2613 DRM_DEBUG("IH: D%d vline\n", crtc + 1);
2614 break;
2615 default:
2616 DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data);
2617 break;
2618 }
2619
2620 return 0;
2621}
2622
2623static int dce_v6_0_set_pageflip_interrupt_state(struct amdgpu_device *adev,
2624 struct amdgpu_irq_src *src,
2625 unsigned type,
2626 enum amdgpu_interrupt_state state)
2627{
2628 u32 reg;
2629
2630 if (type >= adev->mode_info.num_crtc) {
2631 DRM_ERROR("invalid pageflip crtc %d\n", type);
2632 return -EINVAL;
2633 }
2634
2635 reg = RREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type]);
2636 if (state == AMDGPU_IRQ_STATE_DISABLE)
2637 WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
2638 reg & ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
2639 else
2640 WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
2641 reg | GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
2642
2643 return 0;
2644}
2645
2646static int dce_v6_0_pageflip_irq(struct amdgpu_device *adev,
2647 struct amdgpu_irq_src *source,
2648 struct amdgpu_iv_entry *entry)
2649{
2650 unsigned long flags;
2651 unsigned crtc_id;
2652 struct amdgpu_crtc *amdgpu_crtc;
2653 struct amdgpu_flip_work *works;
2654
2655 crtc_id = (entry->src_id - 8) >> 1;
2656 amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
2657
2658 if (crtc_id >= adev->mode_info.num_crtc) {
2659 DRM_ERROR("invalid pageflip crtc %d\n", crtc_id);
2660 return -EINVAL;
2661 }
2662
2663 if (RREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id]) &
2664 GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK)
2665 WREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id],
2666 GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK);
2667
2668
2669 if (amdgpu_crtc == NULL)
2670 return 0;
2671
2672 spin_lock_irqsave(&adev->ddev->event_lock, flags);
2673 works = amdgpu_crtc->pflip_works;
2674 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
2675 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != "
2676 "AMDGPU_FLIP_SUBMITTED(%d)\n",
2677 amdgpu_crtc->pflip_status,
2678 AMDGPU_FLIP_SUBMITTED);
2679 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
2680 return 0;
2681 }
2682
2683
2684 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
2685 amdgpu_crtc->pflip_works = NULL;
2686
2687
2688 if (works->event)
2689 drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event);
2690
2691 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
2692
2693 drm_crtc_vblank_put(&amdgpu_crtc->base);
2694 schedule_work(&works->unpin_work);
2695
2696 return 0;
2697}
2698
2699static int dce_v6_0_hpd_irq(struct amdgpu_device *adev,
2700 struct amdgpu_irq_src *source,
2701 struct amdgpu_iv_entry *entry)
2702{
2703 uint32_t disp_int, mask, tmp;
2704 unsigned hpd;
2705
2706 if (entry->src_data >= adev->mode_info.num_hpd) {
2707 DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data);
2708 return 0;
2709 }
2710
2711 hpd = entry->src_data;
2712 disp_int = RREG32(interrupt_status_offsets[hpd].reg);
2713 mask = interrupt_status_offsets[hpd].hpd;
2714
2715 if (disp_int & mask) {
2716 tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
2717 tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK;
2718 WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
2719 schedule_work(&adev->hotplug_work);
2720 DRM_INFO("IH: HPD%d\n", hpd + 1);
2721 }
2722
2723 return 0;
2724
2725}
2726
2727static int dce_v6_0_set_clockgating_state(void *handle,
2728 enum amd_clockgating_state state)
2729{
2730 return 0;
2731}
2732
2733static int dce_v6_0_set_powergating_state(void *handle,
2734 enum amd_powergating_state state)
2735{
2736 return 0;
2737}
2738
2739static const struct amd_ip_funcs dce_v6_0_ip_funcs = {
2740 .name = "dce_v6_0",
2741 .early_init = dce_v6_0_early_init,
2742 .late_init = NULL,
2743 .sw_init = dce_v6_0_sw_init,
2744 .sw_fini = dce_v6_0_sw_fini,
2745 .hw_init = dce_v6_0_hw_init,
2746 .hw_fini = dce_v6_0_hw_fini,
2747 .suspend = dce_v6_0_suspend,
2748 .resume = dce_v6_0_resume,
2749 .is_idle = dce_v6_0_is_idle,
2750 .wait_for_idle = dce_v6_0_wait_for_idle,
2751 .soft_reset = dce_v6_0_soft_reset,
2752 .set_clockgating_state = dce_v6_0_set_clockgating_state,
2753 .set_powergating_state = dce_v6_0_set_powergating_state,
2754};
2755
2756static void
2757dce_v6_0_encoder_mode_set(struct drm_encoder *encoder,
2758 struct drm_display_mode *mode,
2759 struct drm_display_mode *adjusted_mode)
2760{
2761
2762 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
2763
2764 amdgpu_encoder->pixel_clock = adjusted_mode->clock;
2765
2766
2767 amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
2768
2769
2770 dce_v6_0_set_interleave(encoder->crtc, mode);
2771
2772 if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
2773 dce_v6_0_afmt_enable(encoder, true);
2774 dce_v6_0_afmt_setmode(encoder, adjusted_mode);
2775 }
2776}
2777
2778static void dce_v6_0_encoder_prepare(struct drm_encoder *encoder)
2779{
2780
2781 struct amdgpu_device *adev = encoder->dev->dev_private;
2782 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
2783 struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
2784
2785 if ((amdgpu_encoder->active_device &
2786 (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
2787 (amdgpu_encoder_get_dp_bridge_encoder_id(encoder) !=
2788 ENCODER_OBJECT_ID_NONE)) {
2789 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
2790 if (dig) {
2791 dig->dig_encoder = dce_v6_0_pick_dig_encoder(encoder);
2792 if (amdgpu_encoder->active_device & ATOM_DEVICE_DFP_SUPPORT)
2793 dig->afmt = adev->mode_info.afmt[dig->dig_encoder];
2794 }
2795 }
2796
2797 amdgpu_atombios_scratch_regs_lock(adev, true);
2798
2799 if (connector) {
2800 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
2801
2802
2803 if (amdgpu_connector->router.cd_valid)
2804 amdgpu_i2c_router_select_cd_port(amdgpu_connector);
2805
2806
2807 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
2808 amdgpu_atombios_encoder_set_edp_panel_power(connector,
2809 ATOM_TRANSMITTER_ACTION_POWER_ON);
2810 }
2811
2812
2813 amdgpu_atombios_encoder_set_crtc_source(encoder);
2814
2815 dce_v6_0_program_fmt(encoder);
2816}
2817
2818static void dce_v6_0_encoder_commit(struct drm_encoder *encoder)
2819{
2820
2821 struct drm_device *dev = encoder->dev;
2822 struct amdgpu_device *adev = dev->dev_private;
2823
2824
2825 amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
2826 amdgpu_atombios_scratch_regs_lock(adev, false);
2827}
2828
2829static void dce_v6_0_encoder_disable(struct drm_encoder *encoder)
2830{
2831
2832 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
2833 struct amdgpu_encoder_atom_dig *dig;
2834
2835 amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
2836
2837 if (amdgpu_atombios_encoder_is_digital(encoder)) {
2838 if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
2839 dce_v6_0_afmt_enable(encoder, false);
2840 dig = amdgpu_encoder->enc_priv;
2841 dig->dig_encoder = -1;
2842 }
2843 amdgpu_encoder->active_device = 0;
2844}
2845
2846
2847static void dce_v6_0_ext_prepare(struct drm_encoder *encoder)
2848{
2849
2850}
2851
2852static void dce_v6_0_ext_commit(struct drm_encoder *encoder)
2853{
2854
2855}
2856
2857static void
2858dce_v6_0_ext_mode_set(struct drm_encoder *encoder,
2859 struct drm_display_mode *mode,
2860 struct drm_display_mode *adjusted_mode)
2861{
2862
2863}
2864
2865static void dce_v6_0_ext_disable(struct drm_encoder *encoder)
2866{
2867
2868}
2869
2870static void
2871dce_v6_0_ext_dpms(struct drm_encoder *encoder, int mode)
2872{
2873
2874}
2875
2876static bool dce_v6_0_ext_mode_fixup(struct drm_encoder *encoder,
2877 const struct drm_display_mode *mode,
2878 struct drm_display_mode *adjusted_mode)
2879{
2880 return true;
2881}
2882
2883static const struct drm_encoder_helper_funcs dce_v6_0_ext_helper_funcs = {
2884 .dpms = dce_v6_0_ext_dpms,
2885 .mode_fixup = dce_v6_0_ext_mode_fixup,
2886 .prepare = dce_v6_0_ext_prepare,
2887 .mode_set = dce_v6_0_ext_mode_set,
2888 .commit = dce_v6_0_ext_commit,
2889 .disable = dce_v6_0_ext_disable,
2890
2891};
2892
2893static const struct drm_encoder_helper_funcs dce_v6_0_dig_helper_funcs = {
2894 .dpms = amdgpu_atombios_encoder_dpms,
2895 .mode_fixup = amdgpu_atombios_encoder_mode_fixup,
2896 .prepare = dce_v6_0_encoder_prepare,
2897 .mode_set = dce_v6_0_encoder_mode_set,
2898 .commit = dce_v6_0_encoder_commit,
2899 .disable = dce_v6_0_encoder_disable,
2900 .detect = amdgpu_atombios_encoder_dig_detect,
2901};
2902
2903static const struct drm_encoder_helper_funcs dce_v6_0_dac_helper_funcs = {
2904 .dpms = amdgpu_atombios_encoder_dpms,
2905 .mode_fixup = amdgpu_atombios_encoder_mode_fixup,
2906 .prepare = dce_v6_0_encoder_prepare,
2907 .mode_set = dce_v6_0_encoder_mode_set,
2908 .commit = dce_v6_0_encoder_commit,
2909 .detect = amdgpu_atombios_encoder_dac_detect,
2910};
2911
2912static void dce_v6_0_encoder_destroy(struct drm_encoder *encoder)
2913{
2914 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
2915 if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
2916 amdgpu_atombios_encoder_fini_backlight(amdgpu_encoder);
2917 kfree(amdgpu_encoder->enc_priv);
2918 drm_encoder_cleanup(encoder);
2919 kfree(amdgpu_encoder);
2920}
2921
2922static const struct drm_encoder_funcs dce_v6_0_encoder_funcs = {
2923 .destroy = dce_v6_0_encoder_destroy,
2924};
2925
2926static void dce_v6_0_encoder_add(struct amdgpu_device *adev,
2927 uint32_t encoder_enum,
2928 uint32_t supported_device,
2929 u16 caps)
2930{
2931 struct drm_device *dev = adev->ddev;
2932 struct drm_encoder *encoder;
2933 struct amdgpu_encoder *amdgpu_encoder;
2934
2935
2936 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
2937 amdgpu_encoder = to_amdgpu_encoder(encoder);
2938 if (amdgpu_encoder->encoder_enum == encoder_enum) {
2939 amdgpu_encoder->devices |= supported_device;
2940 return;
2941 }
2942
2943 }
2944
2945
2946 amdgpu_encoder = kzalloc(sizeof(struct amdgpu_encoder), GFP_KERNEL);
2947 if (!amdgpu_encoder)
2948 return;
2949
2950 encoder = &amdgpu_encoder->base;
2951 switch (adev->mode_info.num_crtc) {
2952 case 1:
2953 encoder->possible_crtcs = 0x1;
2954 break;
2955 case 2:
2956 default:
2957 encoder->possible_crtcs = 0x3;
2958 break;
2959 case 4:
2960 encoder->possible_crtcs = 0xf;
2961 break;
2962 case 6:
2963 encoder->possible_crtcs = 0x3f;
2964 break;
2965 }
2966
2967 amdgpu_encoder->enc_priv = NULL;
2968 amdgpu_encoder->encoder_enum = encoder_enum;
2969 amdgpu_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
2970 amdgpu_encoder->devices = supported_device;
2971 amdgpu_encoder->rmx_type = RMX_OFF;
2972 amdgpu_encoder->underscan_type = UNDERSCAN_OFF;
2973 amdgpu_encoder->is_ext_encoder = false;
2974 amdgpu_encoder->caps = caps;
2975
2976 switch (amdgpu_encoder->encoder_id) {
2977 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
2978 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
2979 drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
2980 DRM_MODE_ENCODER_DAC, NULL);
2981 drm_encoder_helper_add(encoder, &dce_v6_0_dac_helper_funcs);
2982 break;
2983 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
2984 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
2985 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
2986 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
2987 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
2988 if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
2989 amdgpu_encoder->rmx_type = RMX_FULL;
2990 drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
2991 DRM_MODE_ENCODER_LVDS, NULL);
2992 amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_lcd_info(amdgpu_encoder);
2993 } else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) {
2994 drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
2995 DRM_MODE_ENCODER_DAC, NULL);
2996 amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
2997 } else {
2998 drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
2999 DRM_MODE_ENCODER_TMDS, NULL);
3000 amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
3001 }
3002 drm_encoder_helper_add(encoder, &dce_v6_0_dig_helper_funcs);
3003 break;
3004 case ENCODER_OBJECT_ID_SI170B:
3005 case ENCODER_OBJECT_ID_CH7303:
3006 case ENCODER_OBJECT_ID_EXTERNAL_SDVOA:
3007 case ENCODER_OBJECT_ID_EXTERNAL_SDVOB:
3008 case ENCODER_OBJECT_ID_TITFP513:
3009 case ENCODER_OBJECT_ID_VT1623:
3010 case ENCODER_OBJECT_ID_HDMI_SI1930:
3011 case ENCODER_OBJECT_ID_TRAVIS:
3012 case ENCODER_OBJECT_ID_NUTMEG:
3013
3014 amdgpu_encoder->is_ext_encoder = true;
3015 if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
3016 drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
3017 DRM_MODE_ENCODER_LVDS, NULL);
3018 else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT))
3019 drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
3020 DRM_MODE_ENCODER_DAC, NULL);
3021 else
3022 drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
3023 DRM_MODE_ENCODER_TMDS, NULL);
3024 drm_encoder_helper_add(encoder, &dce_v6_0_ext_helper_funcs);
3025 break;
3026 }
3027}
3028
3029static const struct amdgpu_display_funcs dce_v6_0_display_funcs = {
3030 .set_vga_render_state = &dce_v6_0_set_vga_render_state,
3031 .bandwidth_update = &dce_v6_0_bandwidth_update,
3032 .vblank_get_counter = &dce_v6_0_vblank_get_counter,
3033 .vblank_wait = &dce_v6_0_vblank_wait,
3034 .backlight_set_level = &amdgpu_atombios_encoder_set_backlight_level,
3035 .backlight_get_level = &amdgpu_atombios_encoder_get_backlight_level,
3036 .hpd_sense = &dce_v6_0_hpd_sense,
3037 .hpd_set_polarity = &dce_v6_0_hpd_set_polarity,
3038 .hpd_get_gpio_reg = &dce_v6_0_hpd_get_gpio_reg,
3039 .page_flip = &dce_v6_0_page_flip,
3040 .page_flip_get_scanoutpos = &dce_v6_0_crtc_get_scanoutpos,
3041 .add_encoder = &dce_v6_0_encoder_add,
3042 .add_connector = &amdgpu_connector_add,
3043 .stop_mc_access = &dce_v6_0_stop_mc_access,
3044 .resume_mc_access = &dce_v6_0_resume_mc_access,
3045};
3046
3047static void dce_v6_0_set_display_funcs(struct amdgpu_device *adev)
3048{
3049 if (adev->mode_info.funcs == NULL)
3050 adev->mode_info.funcs = &dce_v6_0_display_funcs;
3051}
3052
3053static const struct amdgpu_irq_src_funcs dce_v6_0_crtc_irq_funcs = {
3054 .set = dce_v6_0_set_crtc_interrupt_state,
3055 .process = dce_v6_0_crtc_irq,
3056};
3057
3058static const struct amdgpu_irq_src_funcs dce_v6_0_pageflip_irq_funcs = {
3059 .set = dce_v6_0_set_pageflip_interrupt_state,
3060 .process = dce_v6_0_pageflip_irq,
3061};
3062
3063static const struct amdgpu_irq_src_funcs dce_v6_0_hpd_irq_funcs = {
3064 .set = dce_v6_0_set_hpd_interrupt_state,
3065 .process = dce_v6_0_hpd_irq,
3066};
3067
3068static void dce_v6_0_set_irq_funcs(struct amdgpu_device *adev)
3069{
3070 adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_LAST;
3071 adev->crtc_irq.funcs = &dce_v6_0_crtc_irq_funcs;
3072
3073 adev->pageflip_irq.num_types = AMDGPU_PAGEFLIP_IRQ_LAST;
3074 adev->pageflip_irq.funcs = &dce_v6_0_pageflip_irq_funcs;
3075
3076 adev->hpd_irq.num_types = AMDGPU_HPD_LAST;
3077 adev->hpd_irq.funcs = &dce_v6_0_hpd_irq_funcs;
3078}
3079
3080const struct amdgpu_ip_block_version dce_v6_0_ip_block =
3081{
3082 .type = AMD_IP_BLOCK_TYPE_DCE,
3083 .major = 6,
3084 .minor = 0,
3085 .rev = 0,
3086 .funcs = &dce_v6_0_ip_funcs,
3087};
3088
3089const struct amdgpu_ip_block_version dce_v6_4_ip_block =
3090{
3091 .type = AMD_IP_BLOCK_TYPE_DCE,
3092 .major = 6,
3093 .minor = 4,
3094 .rev = 0,
3095 .funcs = &dce_v6_0_ip_funcs,
3096};
3097