1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27#include <drm/amdgpu_drm.h>
28#include "amdgpu.h"
29#include "amdgpu_i2c.h"
30#include "atom.h"
31#include "amdgpu_connectors.h"
32#include "amdgpu_display.h"
33#include <asm/div64.h>
34
35#include <linux/pci.h>
36#include <linux/pm_runtime.h>
37#include <drm/drm_crtc_helper.h>
38#include <drm/drm_edid.h>
39#include <drm/drm_gem_framebuffer_helper.h>
40#include <drm/drm_fb_helper.h>
41#include <drm/drm_vblank.h>
42
43static void amdgpu_display_flip_callback(struct dma_fence *f,
44 struct dma_fence_cb *cb)
45{
46 struct amdgpu_flip_work *work =
47 container_of(cb, struct amdgpu_flip_work, cb);
48
49 dma_fence_put(f);
50 schedule_work(&work->flip_work.work);
51}
52
53static bool amdgpu_display_flip_handle_fence(struct amdgpu_flip_work *work,
54 struct dma_fence **f)
55{
56 struct dma_fence *fence= *f;
57
58 if (fence == NULL)
59 return false;
60
61 *f = NULL;
62
63 if (!dma_fence_add_callback(fence, &work->cb,
64 amdgpu_display_flip_callback))
65 return true;
66
67 dma_fence_put(fence);
68 return false;
69}
70
71static void amdgpu_display_flip_work_func(struct work_struct *__work)
72{
73 struct delayed_work *delayed_work =
74 container_of(__work, struct delayed_work, work);
75 struct amdgpu_flip_work *work =
76 container_of(delayed_work, struct amdgpu_flip_work, flip_work);
77 struct amdgpu_device *adev = work->adev;
78 struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[work->crtc_id];
79
80 struct drm_crtc *crtc = &amdgpu_crtc->base;
81 unsigned long flags;
82 unsigned i;
83 int vpos, hpos;
84
85 if (amdgpu_display_flip_handle_fence(work, &work->excl))
86 return;
87
88 for (i = 0; i < work->shared_count; ++i)
89 if (amdgpu_display_flip_handle_fence(work, &work->shared[i]))
90 return;
91
92
93
94
95 if (amdgpu_crtc->enabled &&
96 (amdgpu_display_get_crtc_scanoutpos(adev->ddev, work->crtc_id, 0,
97 &vpos, &hpos, NULL, NULL,
98 &crtc->hwmode)
99 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
100 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
101 (int)(work->target_vblank -
102 amdgpu_get_vblank_counter_kms(adev->ddev, amdgpu_crtc->crtc_id)) > 0) {
103 schedule_delayed_work(&work->flip_work, usecs_to_jiffies(1000));
104 return;
105 }
106
107
108 spin_lock_irqsave(&crtc->dev->event_lock, flags);
109
110
111 adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base, work->async);
112
113
114 amdgpu_crtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
115 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
116
117
118 DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_SUBMITTED, work: %p,\n",
119 amdgpu_crtc->crtc_id, amdgpu_crtc, work);
120
121}
122
123
124
125
126static void amdgpu_display_unpin_work_func(struct work_struct *__work)
127{
128 struct amdgpu_flip_work *work =
129 container_of(__work, struct amdgpu_flip_work, unpin_work);
130 int r;
131
132
133 r = amdgpu_bo_reserve(work->old_abo, true);
134 if (likely(r == 0)) {
135 r = amdgpu_bo_unpin(work->old_abo);
136 if (unlikely(r != 0)) {
137 DRM_ERROR("failed to unpin buffer after flip\n");
138 }
139 amdgpu_bo_unreserve(work->old_abo);
140 } else
141 DRM_ERROR("failed to reserve buffer after flip\n");
142
143 amdgpu_bo_unref(&work->old_abo);
144 kfree(work->shared);
145 kfree(work);
146}
147
148int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
149 struct drm_framebuffer *fb,
150 struct drm_pending_vblank_event *event,
151 uint32_t page_flip_flags, uint32_t target,
152 struct drm_modeset_acquire_ctx *ctx)
153{
154 struct drm_device *dev = crtc->dev;
155 struct amdgpu_device *adev = dev->dev_private;
156 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
157 struct drm_gem_object *obj;
158 struct amdgpu_flip_work *work;
159 struct amdgpu_bo *new_abo;
160 unsigned long flags;
161 u64 tiling_flags;
162 int i, r;
163
164 work = kzalloc(sizeof *work, GFP_KERNEL);
165 if (work == NULL)
166 return -ENOMEM;
167
168 INIT_DELAYED_WORK(&work->flip_work, amdgpu_display_flip_work_func);
169 INIT_WORK(&work->unpin_work, amdgpu_display_unpin_work_func);
170
171 work->event = event;
172 work->adev = adev;
173 work->crtc_id = amdgpu_crtc->crtc_id;
174 work->async = (page_flip_flags & DRM_MODE_PAGE_FLIP_ASYNC) != 0;
175
176
177 obj = crtc->primary->fb->obj[0];
178
179
180 work->old_abo = gem_to_amdgpu_bo(obj);
181 amdgpu_bo_ref(work->old_abo);
182
183 obj = fb->obj[0];
184 new_abo = gem_to_amdgpu_bo(obj);
185
186
187 r = amdgpu_bo_reserve(new_abo, false);
188 if (unlikely(r != 0)) {
189 DRM_ERROR("failed to reserve new abo buffer before flip\n");
190 goto cleanup;
191 }
192
193 if (!adev->enable_virtual_display) {
194 r = amdgpu_bo_pin(new_abo, amdgpu_display_supported_domains(adev));
195 if (unlikely(r != 0)) {
196 DRM_ERROR("failed to pin new abo buffer before flip\n");
197 goto unreserve;
198 }
199 }
200
201 r = amdgpu_ttm_alloc_gart(&new_abo->tbo);
202 if (unlikely(r != 0)) {
203 DRM_ERROR("%p bind failed\n", new_abo);
204 goto unpin;
205 }
206
207 r = reservation_object_get_fences_rcu(new_abo->tbo.resv, &work->excl,
208 &work->shared_count,
209 &work->shared);
210 if (unlikely(r != 0)) {
211 DRM_ERROR("failed to get fences for buffer\n");
212 goto unpin;
213 }
214
215 amdgpu_bo_get_tiling_flags(new_abo, &tiling_flags);
216 amdgpu_bo_unreserve(new_abo);
217
218 if (!adev->enable_virtual_display)
219 work->base = amdgpu_bo_gpu_offset(new_abo);
220 work->target_vblank = target - (uint32_t)drm_crtc_vblank_count(crtc) +
221 amdgpu_get_vblank_counter_kms(dev, work->crtc_id);
222
223
224 spin_lock_irqsave(&crtc->dev->event_lock, flags);
225 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_NONE) {
226 DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
227 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
228 r = -EBUSY;
229 goto pflip_cleanup;
230 }
231
232 amdgpu_crtc->pflip_status = AMDGPU_FLIP_PENDING;
233 amdgpu_crtc->pflip_works = work;
234
235
236 DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_PENDING, work: %p,\n",
237 amdgpu_crtc->crtc_id, amdgpu_crtc, work);
238
239 crtc->primary->fb = fb;
240 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
241 amdgpu_display_flip_work_func(&work->flip_work.work);
242 return 0;
243
244pflip_cleanup:
245 if (unlikely(amdgpu_bo_reserve(new_abo, false) != 0)) {
246 DRM_ERROR("failed to reserve new abo in error path\n");
247 goto cleanup;
248 }
249unpin:
250 if (!adev->enable_virtual_display)
251 if (unlikely(amdgpu_bo_unpin(new_abo) != 0))
252 DRM_ERROR("failed to unpin new abo in error path\n");
253
254unreserve:
255 amdgpu_bo_unreserve(new_abo);
256
257cleanup:
258 amdgpu_bo_unref(&work->old_abo);
259 dma_fence_put(work->excl);
260 for (i = 0; i < work->shared_count; ++i)
261 dma_fence_put(work->shared[i]);
262 kfree(work->shared);
263 kfree(work);
264
265 return r;
266}
267
268int amdgpu_display_crtc_set_config(struct drm_mode_set *set,
269 struct drm_modeset_acquire_ctx *ctx)
270{
271 struct drm_device *dev;
272 struct amdgpu_device *adev;
273 struct drm_crtc *crtc;
274 bool active = false;
275 int ret;
276
277 if (!set || !set->crtc)
278 return -EINVAL;
279
280 dev = set->crtc->dev;
281
282 ret = pm_runtime_get_sync(dev->dev);
283 if (ret < 0)
284 return ret;
285
286 ret = drm_crtc_helper_set_config(set, ctx);
287
288 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
289 if (crtc->enabled)
290 active = true;
291
292 pm_runtime_mark_last_busy(dev->dev);
293
294 adev = dev->dev_private;
295
296
297 if (active && !adev->have_disp_power_ref) {
298 adev->have_disp_power_ref = true;
299 return ret;
300 }
301
302
303 if (!active && adev->have_disp_power_ref) {
304 pm_runtime_put_autosuspend(dev->dev);
305 adev->have_disp_power_ref = false;
306 }
307
308
309 pm_runtime_put_autosuspend(dev->dev);
310 return ret;
311}
312
313static const char *encoder_names[41] = {
314 "NONE",
315 "INTERNAL_LVDS",
316 "INTERNAL_TMDS1",
317 "INTERNAL_TMDS2",
318 "INTERNAL_DAC1",
319 "INTERNAL_DAC2",
320 "INTERNAL_SDVOA",
321 "INTERNAL_SDVOB",
322 "SI170B",
323 "CH7303",
324 "CH7301",
325 "INTERNAL_DVO1",
326 "EXTERNAL_SDVOA",
327 "EXTERNAL_SDVOB",
328 "TITFP513",
329 "INTERNAL_LVTM1",
330 "VT1623",
331 "HDMI_SI1930",
332 "HDMI_INTERNAL",
333 "INTERNAL_KLDSCP_TMDS1",
334 "INTERNAL_KLDSCP_DVO1",
335 "INTERNAL_KLDSCP_DAC1",
336 "INTERNAL_KLDSCP_DAC2",
337 "SI178",
338 "MVPU_FPGA",
339 "INTERNAL_DDI",
340 "VT1625",
341 "HDMI_SI1932",
342 "DP_AN9801",
343 "DP_DP501",
344 "INTERNAL_UNIPHY",
345 "INTERNAL_KLDSCP_LVTMA",
346 "INTERNAL_UNIPHY1",
347 "INTERNAL_UNIPHY2",
348 "NUTMEG",
349 "TRAVIS",
350 "INTERNAL_VCE",
351 "INTERNAL_UNIPHY3",
352 "HDMI_ANX9805",
353 "INTERNAL_AMCLK",
354 "VIRTUAL",
355};
356
357static const char *hpd_names[6] = {
358 "HPD1",
359 "HPD2",
360 "HPD3",
361 "HPD4",
362 "HPD5",
363 "HPD6",
364};
365
366void amdgpu_display_print_display_setup(struct drm_device *dev)
367{
368 struct drm_connector *connector;
369 struct amdgpu_connector *amdgpu_connector;
370 struct drm_encoder *encoder;
371 struct amdgpu_encoder *amdgpu_encoder;
372 uint32_t devices;
373 int i = 0;
374
375 DRM_INFO("AMDGPU Display Connectors\n");
376 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
377 amdgpu_connector = to_amdgpu_connector(connector);
378 DRM_INFO("Connector %d:\n", i);
379 DRM_INFO(" %s\n", connector->name);
380 if (amdgpu_connector->hpd.hpd != AMDGPU_HPD_NONE)
381 DRM_INFO(" %s\n", hpd_names[amdgpu_connector->hpd.hpd]);
382 if (amdgpu_connector->ddc_bus) {
383 DRM_INFO(" DDC: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
384 amdgpu_connector->ddc_bus->rec.mask_clk_reg,
385 amdgpu_connector->ddc_bus->rec.mask_data_reg,
386 amdgpu_connector->ddc_bus->rec.a_clk_reg,
387 amdgpu_connector->ddc_bus->rec.a_data_reg,
388 amdgpu_connector->ddc_bus->rec.en_clk_reg,
389 amdgpu_connector->ddc_bus->rec.en_data_reg,
390 amdgpu_connector->ddc_bus->rec.y_clk_reg,
391 amdgpu_connector->ddc_bus->rec.y_data_reg);
392 if (amdgpu_connector->router.ddc_valid)
393 DRM_INFO(" DDC Router 0x%x/0x%x\n",
394 amdgpu_connector->router.ddc_mux_control_pin,
395 amdgpu_connector->router.ddc_mux_state);
396 if (amdgpu_connector->router.cd_valid)
397 DRM_INFO(" Clock/Data Router 0x%x/0x%x\n",
398 amdgpu_connector->router.cd_mux_control_pin,
399 amdgpu_connector->router.cd_mux_state);
400 } else {
401 if (connector->connector_type == DRM_MODE_CONNECTOR_VGA ||
402 connector->connector_type == DRM_MODE_CONNECTOR_DVII ||
403 connector->connector_type == DRM_MODE_CONNECTOR_DVID ||
404 connector->connector_type == DRM_MODE_CONNECTOR_DVIA ||
405 connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
406 connector->connector_type == DRM_MODE_CONNECTOR_HDMIB)
407 DRM_INFO(" DDC: no ddc bus - possible BIOS bug - please report to xorg-driver-ati@lists.x.org\n");
408 }
409 DRM_INFO(" Encoders:\n");
410 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
411 amdgpu_encoder = to_amdgpu_encoder(encoder);
412 devices = amdgpu_encoder->devices & amdgpu_connector->devices;
413 if (devices) {
414 if (devices & ATOM_DEVICE_CRT1_SUPPORT)
415 DRM_INFO(" CRT1: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
416 if (devices & ATOM_DEVICE_CRT2_SUPPORT)
417 DRM_INFO(" CRT2: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
418 if (devices & ATOM_DEVICE_LCD1_SUPPORT)
419 DRM_INFO(" LCD1: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
420 if (devices & ATOM_DEVICE_DFP1_SUPPORT)
421 DRM_INFO(" DFP1: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
422 if (devices & ATOM_DEVICE_DFP2_SUPPORT)
423 DRM_INFO(" DFP2: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
424 if (devices & ATOM_DEVICE_DFP3_SUPPORT)
425 DRM_INFO(" DFP3: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
426 if (devices & ATOM_DEVICE_DFP4_SUPPORT)
427 DRM_INFO(" DFP4: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
428 if (devices & ATOM_DEVICE_DFP5_SUPPORT)
429 DRM_INFO(" DFP5: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
430 if (devices & ATOM_DEVICE_DFP6_SUPPORT)
431 DRM_INFO(" DFP6: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
432 if (devices & ATOM_DEVICE_TV1_SUPPORT)
433 DRM_INFO(" TV1: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
434 if (devices & ATOM_DEVICE_CV_SUPPORT)
435 DRM_INFO(" CV: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
436 }
437 }
438 i++;
439 }
440}
441
442
443
444
445
446bool amdgpu_display_ddc_probe(struct amdgpu_connector *amdgpu_connector,
447 bool use_aux)
448{
449 u8 out = 0x0;
450 u8 buf[8];
451 int ret;
452 struct i2c_msg msgs[] = {
453 {
454 .addr = DDC_ADDR,
455 .flags = 0,
456 .len = 1,
457 .buf = &out,
458 },
459 {
460 .addr = DDC_ADDR,
461 .flags = I2C_M_RD,
462 .len = 8,
463 .buf = buf,
464 }
465 };
466
467
468 if (amdgpu_connector->router.ddc_valid)
469 amdgpu_i2c_router_select_ddc_port(amdgpu_connector);
470
471 if (use_aux) {
472 ret = i2c_transfer(&amdgpu_connector->ddc_bus->aux.ddc, msgs, 2);
473 } else {
474 ret = i2c_transfer(&amdgpu_connector->ddc_bus->adapter, msgs, 2);
475 }
476
477 if (ret != 2)
478
479 return false;
480
481
482
483
484
485 if (drm_edid_header_is_valid(buf) < 6) {
486
487
488 return false;
489 }
490 return true;
491}
492
493static const struct drm_framebuffer_funcs amdgpu_fb_funcs = {
494 .destroy = drm_gem_fb_destroy,
495 .create_handle = drm_gem_fb_create_handle,
496};
497
498uint32_t amdgpu_display_supported_domains(struct amdgpu_device *adev)
499{
500 uint32_t domain = AMDGPU_GEM_DOMAIN_VRAM;
501
502#if defined(CONFIG_DRM_AMD_DC)
503 if (adev->asic_type >= CHIP_CARRIZO && adev->asic_type < CHIP_RAVEN &&
504 adev->flags & AMD_IS_APU &&
505 amdgpu_device_asic_has_dc_support(adev->asic_type))
506 domain |= AMDGPU_GEM_DOMAIN_GTT;
507#endif
508
509 return domain;
510}
511
512int amdgpu_display_framebuffer_init(struct drm_device *dev,
513 struct amdgpu_framebuffer *rfb,
514 const struct drm_mode_fb_cmd2 *mode_cmd,
515 struct drm_gem_object *obj)
516{
517 int ret;
518 rfb->base.obj[0] = obj;
519 drm_helper_mode_fill_fb_struct(dev, &rfb->base, mode_cmd);
520 ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs);
521 if (ret) {
522 rfb->base.obj[0] = NULL;
523 return ret;
524 }
525 return 0;
526}
527
528struct drm_framebuffer *
529amdgpu_display_user_framebuffer_create(struct drm_device *dev,
530 struct drm_file *file_priv,
531 const struct drm_mode_fb_cmd2 *mode_cmd)
532{
533 struct drm_gem_object *obj;
534 struct amdgpu_framebuffer *amdgpu_fb;
535 int ret;
536
537 obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[0]);
538 if (obj == NULL) {
539 dev_err(&dev->pdev->dev, "No GEM object associated to handle 0x%08X, "
540 "can't create framebuffer\n", mode_cmd->handles[0]);
541 return ERR_PTR(-ENOENT);
542 }
543
544
545 if (obj->import_attach) {
546 DRM_DEBUG_KMS("Cannot create framebuffer from imported dma_buf\n");
547 return ERR_PTR(-EINVAL);
548 }
549
550 amdgpu_fb = kzalloc(sizeof(*amdgpu_fb), GFP_KERNEL);
551 if (amdgpu_fb == NULL) {
552 drm_gem_object_put_unlocked(obj);
553 return ERR_PTR(-ENOMEM);
554 }
555
556 ret = amdgpu_display_framebuffer_init(dev, amdgpu_fb, mode_cmd, obj);
557 if (ret) {
558 kfree(amdgpu_fb);
559 drm_gem_object_put_unlocked(obj);
560 return ERR_PTR(ret);
561 }
562
563 return &amdgpu_fb->base;
564}
565
566const struct drm_mode_config_funcs amdgpu_mode_funcs = {
567 .fb_create = amdgpu_display_user_framebuffer_create,
568 .output_poll_changed = drm_fb_helper_output_poll_changed,
569};
570
571static const struct drm_prop_enum_list amdgpu_underscan_enum_list[] =
572{ { UNDERSCAN_OFF, "off" },
573 { UNDERSCAN_ON, "on" },
574 { UNDERSCAN_AUTO, "auto" },
575};
576
577static const struct drm_prop_enum_list amdgpu_audio_enum_list[] =
578{ { AMDGPU_AUDIO_DISABLE, "off" },
579 { AMDGPU_AUDIO_ENABLE, "on" },
580 { AMDGPU_AUDIO_AUTO, "auto" },
581};
582
583
584static const struct drm_prop_enum_list amdgpu_dither_enum_list[] =
585{ { AMDGPU_FMT_DITHER_DISABLE, "off" },
586 { AMDGPU_FMT_DITHER_ENABLE, "on" },
587};
588
589int amdgpu_display_modeset_create_props(struct amdgpu_device *adev)
590{
591 int sz;
592
593 adev->mode_info.coherent_mode_property =
594 drm_property_create_range(adev->ddev, 0 , "coherent", 0, 1);
595 if (!adev->mode_info.coherent_mode_property)
596 return -ENOMEM;
597
598 adev->mode_info.load_detect_property =
599 drm_property_create_range(adev->ddev, 0, "load detection", 0, 1);
600 if (!adev->mode_info.load_detect_property)
601 return -ENOMEM;
602
603 drm_mode_create_scaling_mode_property(adev->ddev);
604
605 sz = ARRAY_SIZE(amdgpu_underscan_enum_list);
606 adev->mode_info.underscan_property =
607 drm_property_create_enum(adev->ddev, 0,
608 "underscan",
609 amdgpu_underscan_enum_list, sz);
610
611 adev->mode_info.underscan_hborder_property =
612 drm_property_create_range(adev->ddev, 0,
613 "underscan hborder", 0, 128);
614 if (!adev->mode_info.underscan_hborder_property)
615 return -ENOMEM;
616
617 adev->mode_info.underscan_vborder_property =
618 drm_property_create_range(adev->ddev, 0,
619 "underscan vborder", 0, 128);
620 if (!adev->mode_info.underscan_vborder_property)
621 return -ENOMEM;
622
623 sz = ARRAY_SIZE(amdgpu_audio_enum_list);
624 adev->mode_info.audio_property =
625 drm_property_create_enum(adev->ddev, 0,
626 "audio",
627 amdgpu_audio_enum_list, sz);
628
629 sz = ARRAY_SIZE(amdgpu_dither_enum_list);
630 adev->mode_info.dither_property =
631 drm_property_create_enum(adev->ddev, 0,
632 "dither",
633 amdgpu_dither_enum_list, sz);
634
635 if (amdgpu_device_has_dc_support(adev)) {
636 adev->mode_info.abm_level_property =
637 drm_property_create_range(adev->ddev, 0,
638 "abm level", 0, 4);
639 if (!adev->mode_info.abm_level_property)
640 return -ENOMEM;
641 }
642
643 return 0;
644}
645
646void amdgpu_display_update_priority(struct amdgpu_device *adev)
647{
648
649 if ((amdgpu_disp_priority == 0) || (amdgpu_disp_priority > 2))
650 adev->mode_info.disp_priority = 0;
651 else
652 adev->mode_info.disp_priority = amdgpu_disp_priority;
653
654}
655
656static bool amdgpu_display_is_hdtv_mode(const struct drm_display_mode *mode)
657{
658
659 if ((mode->vdisplay == 480 && mode->hdisplay == 720) ||
660 (mode->vdisplay == 576) ||
661 (mode->vdisplay == 720) ||
662 (mode->vdisplay == 1080))
663 return true;
664 else
665 return false;
666}
667
668bool amdgpu_display_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
669 const struct drm_display_mode *mode,
670 struct drm_display_mode *adjusted_mode)
671{
672 struct drm_device *dev = crtc->dev;
673 struct drm_encoder *encoder;
674 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
675 struct amdgpu_encoder *amdgpu_encoder;
676 struct drm_connector *connector;
677 struct amdgpu_connector *amdgpu_connector;
678 u32 src_v = 1, dst_v = 1;
679 u32 src_h = 1, dst_h = 1;
680
681 amdgpu_crtc->h_border = 0;
682 amdgpu_crtc->v_border = 0;
683
684 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
685 if (encoder->crtc != crtc)
686 continue;
687 amdgpu_encoder = to_amdgpu_encoder(encoder);
688 connector = amdgpu_get_connector_for_encoder(encoder);
689 amdgpu_connector = to_amdgpu_connector(connector);
690
691
692 if (amdgpu_encoder->rmx_type == RMX_OFF)
693 amdgpu_crtc->rmx_type = RMX_OFF;
694 else if (mode->hdisplay < amdgpu_encoder->native_mode.hdisplay ||
695 mode->vdisplay < amdgpu_encoder->native_mode.vdisplay)
696 amdgpu_crtc->rmx_type = amdgpu_encoder->rmx_type;
697 else
698 amdgpu_crtc->rmx_type = RMX_OFF;
699
700 memcpy(&amdgpu_crtc->native_mode,
701 &amdgpu_encoder->native_mode,
702 sizeof(struct drm_display_mode));
703 src_v = crtc->mode.vdisplay;
704 dst_v = amdgpu_crtc->native_mode.vdisplay;
705 src_h = crtc->mode.hdisplay;
706 dst_h = amdgpu_crtc->native_mode.hdisplay;
707
708
709 if ((!(mode->flags & DRM_MODE_FLAG_INTERLACE)) &&
710 ((amdgpu_encoder->underscan_type == UNDERSCAN_ON) ||
711 ((amdgpu_encoder->underscan_type == UNDERSCAN_AUTO) &&
712 drm_detect_hdmi_monitor(amdgpu_connector_edid(connector)) &&
713 amdgpu_display_is_hdtv_mode(mode)))) {
714 if (amdgpu_encoder->underscan_hborder != 0)
715 amdgpu_crtc->h_border = amdgpu_encoder->underscan_hborder;
716 else
717 amdgpu_crtc->h_border = (mode->hdisplay >> 5) + 16;
718 if (amdgpu_encoder->underscan_vborder != 0)
719 amdgpu_crtc->v_border = amdgpu_encoder->underscan_vborder;
720 else
721 amdgpu_crtc->v_border = (mode->vdisplay >> 5) + 16;
722 amdgpu_crtc->rmx_type = RMX_FULL;
723 src_v = crtc->mode.vdisplay;
724 dst_v = crtc->mode.vdisplay - (amdgpu_crtc->v_border * 2);
725 src_h = crtc->mode.hdisplay;
726 dst_h = crtc->mode.hdisplay - (amdgpu_crtc->h_border * 2);
727 }
728 }
729 if (amdgpu_crtc->rmx_type != RMX_OFF) {
730 fixed20_12 a, b;
731 a.full = dfixed_const(src_v);
732 b.full = dfixed_const(dst_v);
733 amdgpu_crtc->vsc.full = dfixed_div(a, b);
734 a.full = dfixed_const(src_h);
735 b.full = dfixed_const(dst_h);
736 amdgpu_crtc->hsc.full = dfixed_div(a, b);
737 } else {
738 amdgpu_crtc->vsc.full = dfixed_const(1);
739 amdgpu_crtc->hsc.full = dfixed_const(1);
740 }
741 return true;
742}
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781int amdgpu_display_get_crtc_scanoutpos(struct drm_device *dev,
782 unsigned int pipe, unsigned int flags, int *vpos,
783 int *hpos, ktime_t *stime, ktime_t *etime,
784 const struct drm_display_mode *mode)
785{
786 u32 vbl = 0, position = 0;
787 int vbl_start, vbl_end, vtotal, ret = 0;
788 bool in_vbl = true;
789
790 struct amdgpu_device *adev = dev->dev_private;
791
792
793
794
795 if (stime)
796 *stime = ktime_get();
797
798 if (amdgpu_display_page_flip_get_scanoutpos(adev, pipe, &vbl, &position) == 0)
799 ret |= DRM_SCANOUTPOS_VALID;
800
801
802 if (etime)
803 *etime = ktime_get();
804
805
806
807
808 *vpos = position & 0x1fff;
809 *hpos = (position >> 16) & 0x1fff;
810
811
812 if (vbl > 0) {
813
814 ret |= DRM_SCANOUTPOS_ACCURATE;
815 vbl_start = vbl & 0x1fff;
816 vbl_end = (vbl >> 16) & 0x1fff;
817 }
818 else {
819
820 vbl_start = mode->crtc_vdisplay;
821 vbl_end = 0;
822 }
823
824
825 if (flags & GET_DISTANCE_TO_VBLANKSTART) {
826
827 *hpos = *vpos - vbl_start;
828 }
829
830
831
832
833
834
835
836
837
838
839
840 if (!(flags & USE_REAL_VBLANKSTART))
841 vbl_start -= adev->mode_info.crtcs[pipe]->lb_vblank_lead_lines;
842
843
844 if ((*vpos < vbl_start) && (*vpos >= vbl_end))
845 in_vbl = false;
846
847
848 if (in_vbl)
849 ret |= DRM_SCANOUTPOS_IN_VBLANK;
850
851
852 if (flags & GET_DISTANCE_TO_VBLANKSTART) {
853
854 *vpos -= vbl_start;
855 return ret;
856 }
857
858
859
860
861
862
863
864
865 if (in_vbl && (*vpos >= vbl_start)) {
866 vtotal = mode->crtc_vtotal;
867
868
869
870
871
872 *vpos = (*vpos < vtotal) ? (*vpos - vtotal) : 0;
873 }
874
875
876 *vpos = *vpos - vbl_end;
877
878 return ret;
879}
880
881int amdgpu_display_crtc_idx_to_irq_type(struct amdgpu_device *adev, int crtc)
882{
883 if (crtc < 0 || crtc >= adev->mode_info.num_crtc)
884 return AMDGPU_CRTC_IRQ_NONE;
885
886 switch (crtc) {
887 case 0:
888 return AMDGPU_CRTC_IRQ_VBLANK1;
889 case 1:
890 return AMDGPU_CRTC_IRQ_VBLANK2;
891 case 2:
892 return AMDGPU_CRTC_IRQ_VBLANK3;
893 case 3:
894 return AMDGPU_CRTC_IRQ_VBLANK4;
895 case 4:
896 return AMDGPU_CRTC_IRQ_VBLANK5;
897 case 5:
898 return AMDGPU_CRTC_IRQ_VBLANK6;
899 default:
900 return AMDGPU_CRTC_IRQ_NONE;
901 }
902}
903