1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26#include <drm/drmP.h>
27#include <drm/amdgpu_drm.h>
28#include "amdgpu.h"
29#include "amdgpu_i2c.h"
30#include "atom.h"
31#include "amdgpu_connectors.h"
32#include <asm/div64.h>
33
34#include <linux/pm_runtime.h>
35#include <drm/drm_crtc_helper.h>
36#include <drm/drm_edid.h>
37
38static void amdgpu_flip_callback(struct dma_fence *f, struct dma_fence_cb *cb)
39{
40 struct amdgpu_flip_work *work =
41 container_of(cb, struct amdgpu_flip_work, cb);
42
43 dma_fence_put(f);
44 schedule_work(&work->flip_work.work);
45}
46
47static bool amdgpu_flip_handle_fence(struct amdgpu_flip_work *work,
48 struct dma_fence **f)
49{
50 struct dma_fence *fence= *f;
51
52 if (fence == NULL)
53 return false;
54
55 *f = NULL;
56
57 if (!dma_fence_add_callback(fence, &work->cb, amdgpu_flip_callback))
58 return true;
59
60 dma_fence_put(fence);
61 return false;
62}
63
64static void amdgpu_flip_work_func(struct work_struct *__work)
65{
66 struct delayed_work *delayed_work =
67 container_of(__work, struct delayed_work, work);
68 struct amdgpu_flip_work *work =
69 container_of(delayed_work, struct amdgpu_flip_work, flip_work);
70 struct amdgpu_device *adev = work->adev;
71 struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[work->crtc_id];
72
73 struct drm_crtc *crtc = &amdgpu_crtc->base;
74 unsigned long flags;
75 unsigned i;
76 int vpos, hpos;
77
78 if (amdgpu_flip_handle_fence(work, &work->excl))
79 return;
80
81 for (i = 0; i < work->shared_count; ++i)
82 if (amdgpu_flip_handle_fence(work, &work->shared[i]))
83 return;
84
85
86
87
88 if (amdgpu_crtc->enabled &&
89 (amdgpu_get_crtc_scanoutpos(adev->ddev, work->crtc_id, 0,
90 &vpos, &hpos, NULL, NULL,
91 &crtc->hwmode)
92 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
93 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
94 (int)(work->target_vblank -
95 amdgpu_get_vblank_counter_kms(adev->ddev, amdgpu_crtc->crtc_id)) > 0) {
96 schedule_delayed_work(&work->flip_work, usecs_to_jiffies(1000));
97 return;
98 }
99
100
101 spin_lock_irqsave(&crtc->dev->event_lock, flags);
102
103
104 adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base, work->async);
105
106
107 amdgpu_crtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
108 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
109
110
111 DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_SUBMITTED, work: %p,\n",
112 amdgpu_crtc->crtc_id, amdgpu_crtc, work);
113
114}
115
116
117
118
119static void amdgpu_unpin_work_func(struct work_struct *__work)
120{
121 struct amdgpu_flip_work *work =
122 container_of(__work, struct amdgpu_flip_work, unpin_work);
123 int r;
124
125
126 r = amdgpu_bo_reserve(work->old_abo, false);
127 if (likely(r == 0)) {
128 r = amdgpu_bo_unpin(work->old_abo);
129 if (unlikely(r != 0)) {
130 DRM_ERROR("failed to unpin buffer after flip\n");
131 }
132 amdgpu_bo_unreserve(work->old_abo);
133 } else
134 DRM_ERROR("failed to reserve buffer after flip\n");
135
136 amdgpu_bo_unref(&work->old_abo);
137 kfree(work->shared);
138 kfree(work);
139}
140
141int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc,
142 struct drm_framebuffer *fb,
143 struct drm_pending_vblank_event *event,
144 uint32_t page_flip_flags, uint32_t target)
145{
146 struct drm_device *dev = crtc->dev;
147 struct amdgpu_device *adev = dev->dev_private;
148 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
149 struct amdgpu_framebuffer *old_amdgpu_fb;
150 struct amdgpu_framebuffer *new_amdgpu_fb;
151 struct drm_gem_object *obj;
152 struct amdgpu_flip_work *work;
153 struct amdgpu_bo *new_abo;
154 unsigned long flags;
155 u64 tiling_flags;
156 u64 base;
157 int i, r;
158
159 work = kzalloc(sizeof *work, GFP_KERNEL);
160 if (work == NULL)
161 return -ENOMEM;
162
163 INIT_DELAYED_WORK(&work->flip_work, amdgpu_flip_work_func);
164 INIT_WORK(&work->unpin_work, amdgpu_unpin_work_func);
165
166 work->event = event;
167 work->adev = adev;
168 work->crtc_id = amdgpu_crtc->crtc_id;
169 work->async = (page_flip_flags & DRM_MODE_PAGE_FLIP_ASYNC) != 0;
170
171
172 old_amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
173 obj = old_amdgpu_fb->obj;
174
175
176 work->old_abo = gem_to_amdgpu_bo(obj);
177 amdgpu_bo_ref(work->old_abo);
178
179 new_amdgpu_fb = to_amdgpu_framebuffer(fb);
180 obj = new_amdgpu_fb->obj;
181 new_abo = gem_to_amdgpu_bo(obj);
182
183
184 r = amdgpu_bo_reserve(new_abo, false);
185 if (unlikely(r != 0)) {
186 DRM_ERROR("failed to reserve new abo buffer before flip\n");
187 goto cleanup;
188 }
189
190 r = amdgpu_bo_pin(new_abo, AMDGPU_GEM_DOMAIN_VRAM, &base);
191 if (unlikely(r != 0)) {
192 r = -EINVAL;
193 DRM_ERROR("failed to pin new abo buffer before flip\n");
194 goto unreserve;
195 }
196
197 r = reservation_object_get_fences_rcu(new_abo->tbo.resv, &work->excl,
198 &work->shared_count,
199 &work->shared);
200 if (unlikely(r != 0)) {
201 DRM_ERROR("failed to get fences for buffer\n");
202 goto unpin;
203 }
204
205 amdgpu_bo_get_tiling_flags(new_abo, &tiling_flags);
206 amdgpu_bo_unreserve(new_abo);
207
208 work->base = base;
209 work->target_vblank = target - drm_crtc_vblank_count(crtc) +
210 amdgpu_get_vblank_counter_kms(dev, work->crtc_id);
211
212
213 spin_lock_irqsave(&crtc->dev->event_lock, flags);
214 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_NONE) {
215 DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
216 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
217 r = -EBUSY;
218 goto pflip_cleanup;
219 }
220
221 amdgpu_crtc->pflip_status = AMDGPU_FLIP_PENDING;
222 amdgpu_crtc->pflip_works = work;
223
224
225 DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_PENDING, work: %p,\n",
226 amdgpu_crtc->crtc_id, amdgpu_crtc, work);
227
228 crtc->primary->fb = fb;
229 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
230 amdgpu_flip_work_func(&work->flip_work.work);
231 return 0;
232
233pflip_cleanup:
234 if (unlikely(amdgpu_bo_reserve(new_abo, false) != 0)) {
235 DRM_ERROR("failed to reserve new abo in error path\n");
236 goto cleanup;
237 }
238unpin:
239 if (unlikely(amdgpu_bo_unpin(new_abo) != 0)) {
240 DRM_ERROR("failed to unpin new abo in error path\n");
241 }
242unreserve:
243 amdgpu_bo_unreserve(new_abo);
244
245cleanup:
246 amdgpu_bo_unref(&work->old_abo);
247 dma_fence_put(work->excl);
248 for (i = 0; i < work->shared_count; ++i)
249 dma_fence_put(work->shared[i]);
250 kfree(work->shared);
251 kfree(work);
252
253 return r;
254}
255
256int amdgpu_crtc_set_config(struct drm_mode_set *set)
257{
258 struct drm_device *dev;
259 struct amdgpu_device *adev;
260 struct drm_crtc *crtc;
261 bool active = false;
262 int ret;
263
264 if (!set || !set->crtc)
265 return -EINVAL;
266
267 dev = set->crtc->dev;
268
269 ret = pm_runtime_get_sync(dev->dev);
270 if (ret < 0)
271 return ret;
272
273 ret = drm_crtc_helper_set_config(set);
274
275 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
276 if (crtc->enabled)
277 active = true;
278
279 pm_runtime_mark_last_busy(dev->dev);
280
281 adev = dev->dev_private;
282
283
284 if (active && !adev->have_disp_power_ref) {
285 adev->have_disp_power_ref = true;
286 return ret;
287 }
288
289
290 if (!active && adev->have_disp_power_ref) {
291 pm_runtime_put_autosuspend(dev->dev);
292 adev->have_disp_power_ref = false;
293 }
294
295
296 pm_runtime_put_autosuspend(dev->dev);
297 return ret;
298}
299
300static const char *encoder_names[41] = {
301 "NONE",
302 "INTERNAL_LVDS",
303 "INTERNAL_TMDS1",
304 "INTERNAL_TMDS2",
305 "INTERNAL_DAC1",
306 "INTERNAL_DAC2",
307 "INTERNAL_SDVOA",
308 "INTERNAL_SDVOB",
309 "SI170B",
310 "CH7303",
311 "CH7301",
312 "INTERNAL_DVO1",
313 "EXTERNAL_SDVOA",
314 "EXTERNAL_SDVOB",
315 "TITFP513",
316 "INTERNAL_LVTM1",
317 "VT1623",
318 "HDMI_SI1930",
319 "HDMI_INTERNAL",
320 "INTERNAL_KLDSCP_TMDS1",
321 "INTERNAL_KLDSCP_DVO1",
322 "INTERNAL_KLDSCP_DAC1",
323 "INTERNAL_KLDSCP_DAC2",
324 "SI178",
325 "MVPU_FPGA",
326 "INTERNAL_DDI",
327 "VT1625",
328 "HDMI_SI1932",
329 "DP_AN9801",
330 "DP_DP501",
331 "INTERNAL_UNIPHY",
332 "INTERNAL_KLDSCP_LVTMA",
333 "INTERNAL_UNIPHY1",
334 "INTERNAL_UNIPHY2",
335 "NUTMEG",
336 "TRAVIS",
337 "INTERNAL_VCE",
338 "INTERNAL_UNIPHY3",
339 "HDMI_ANX9805",
340 "INTERNAL_AMCLK",
341 "VIRTUAL",
342};
343
344static const char *hpd_names[6] = {
345 "HPD1",
346 "HPD2",
347 "HPD3",
348 "HPD4",
349 "HPD5",
350 "HPD6",
351};
352
353void amdgpu_print_display_setup(struct drm_device *dev)
354{
355 struct drm_connector *connector;
356 struct amdgpu_connector *amdgpu_connector;
357 struct drm_encoder *encoder;
358 struct amdgpu_encoder *amdgpu_encoder;
359 uint32_t devices;
360 int i = 0;
361
362 DRM_INFO("AMDGPU Display Connectors\n");
363 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
364 amdgpu_connector = to_amdgpu_connector(connector);
365 DRM_INFO("Connector %d:\n", i);
366 DRM_INFO(" %s\n", connector->name);
367 if (amdgpu_connector->hpd.hpd != AMDGPU_HPD_NONE)
368 DRM_INFO(" %s\n", hpd_names[amdgpu_connector->hpd.hpd]);
369 if (amdgpu_connector->ddc_bus) {
370 DRM_INFO(" DDC: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
371 amdgpu_connector->ddc_bus->rec.mask_clk_reg,
372 amdgpu_connector->ddc_bus->rec.mask_data_reg,
373 amdgpu_connector->ddc_bus->rec.a_clk_reg,
374 amdgpu_connector->ddc_bus->rec.a_data_reg,
375 amdgpu_connector->ddc_bus->rec.en_clk_reg,
376 amdgpu_connector->ddc_bus->rec.en_data_reg,
377 amdgpu_connector->ddc_bus->rec.y_clk_reg,
378 amdgpu_connector->ddc_bus->rec.y_data_reg);
379 if (amdgpu_connector->router.ddc_valid)
380 DRM_INFO(" DDC Router 0x%x/0x%x\n",
381 amdgpu_connector->router.ddc_mux_control_pin,
382 amdgpu_connector->router.ddc_mux_state);
383 if (amdgpu_connector->router.cd_valid)
384 DRM_INFO(" Clock/Data Router 0x%x/0x%x\n",
385 amdgpu_connector->router.cd_mux_control_pin,
386 amdgpu_connector->router.cd_mux_state);
387 } else {
388 if (connector->connector_type == DRM_MODE_CONNECTOR_VGA ||
389 connector->connector_type == DRM_MODE_CONNECTOR_DVII ||
390 connector->connector_type == DRM_MODE_CONNECTOR_DVID ||
391 connector->connector_type == DRM_MODE_CONNECTOR_DVIA ||
392 connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
393 connector->connector_type == DRM_MODE_CONNECTOR_HDMIB)
394 DRM_INFO(" DDC: no ddc bus - possible BIOS bug - please report to xorg-driver-ati@lists.x.org\n");
395 }
396 DRM_INFO(" Encoders:\n");
397 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
398 amdgpu_encoder = to_amdgpu_encoder(encoder);
399 devices = amdgpu_encoder->devices & amdgpu_connector->devices;
400 if (devices) {
401 if (devices & ATOM_DEVICE_CRT1_SUPPORT)
402 DRM_INFO(" CRT1: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
403 if (devices & ATOM_DEVICE_CRT2_SUPPORT)
404 DRM_INFO(" CRT2: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
405 if (devices & ATOM_DEVICE_LCD1_SUPPORT)
406 DRM_INFO(" LCD1: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
407 if (devices & ATOM_DEVICE_DFP1_SUPPORT)
408 DRM_INFO(" DFP1: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
409 if (devices & ATOM_DEVICE_DFP2_SUPPORT)
410 DRM_INFO(" DFP2: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
411 if (devices & ATOM_DEVICE_DFP3_SUPPORT)
412 DRM_INFO(" DFP3: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
413 if (devices & ATOM_DEVICE_DFP4_SUPPORT)
414 DRM_INFO(" DFP4: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
415 if (devices & ATOM_DEVICE_DFP5_SUPPORT)
416 DRM_INFO(" DFP5: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
417 if (devices & ATOM_DEVICE_DFP6_SUPPORT)
418 DRM_INFO(" DFP6: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
419 if (devices & ATOM_DEVICE_TV1_SUPPORT)
420 DRM_INFO(" TV1: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
421 if (devices & ATOM_DEVICE_CV_SUPPORT)
422 DRM_INFO(" CV: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
423 }
424 }
425 i++;
426 }
427}
428
429
430
431
432
433bool amdgpu_ddc_probe(struct amdgpu_connector *amdgpu_connector,
434 bool use_aux)
435{
436 u8 out = 0x0;
437 u8 buf[8];
438 int ret;
439 struct i2c_msg msgs[] = {
440 {
441 .addr = DDC_ADDR,
442 .flags = 0,
443 .len = 1,
444 .buf = &out,
445 },
446 {
447 .addr = DDC_ADDR,
448 .flags = I2C_M_RD,
449 .len = 8,
450 .buf = buf,
451 }
452 };
453
454
455 if (amdgpu_connector->router.ddc_valid)
456 amdgpu_i2c_router_select_ddc_port(amdgpu_connector);
457
458 if (use_aux) {
459 ret = i2c_transfer(&amdgpu_connector->ddc_bus->aux.ddc, msgs, 2);
460 } else {
461 ret = i2c_transfer(&amdgpu_connector->ddc_bus->adapter, msgs, 2);
462 }
463
464 if (ret != 2)
465
466 return false;
467
468
469
470
471
472 if (drm_edid_header_is_valid(buf) < 6) {
473
474
475 return false;
476 }
477 return true;
478}
479
480static void amdgpu_user_framebuffer_destroy(struct drm_framebuffer *fb)
481{
482 struct amdgpu_framebuffer *amdgpu_fb = to_amdgpu_framebuffer(fb);
483
484 drm_gem_object_unreference_unlocked(amdgpu_fb->obj);
485 drm_framebuffer_cleanup(fb);
486 kfree(amdgpu_fb);
487}
488
489static int amdgpu_user_framebuffer_create_handle(struct drm_framebuffer *fb,
490 struct drm_file *file_priv,
491 unsigned int *handle)
492{
493 struct amdgpu_framebuffer *amdgpu_fb = to_amdgpu_framebuffer(fb);
494
495 return drm_gem_handle_create(file_priv, amdgpu_fb->obj, handle);
496}
497
498static const struct drm_framebuffer_funcs amdgpu_fb_funcs = {
499 .destroy = amdgpu_user_framebuffer_destroy,
500 .create_handle = amdgpu_user_framebuffer_create_handle,
501};
502
503int
504amdgpu_framebuffer_init(struct drm_device *dev,
505 struct amdgpu_framebuffer *rfb,
506 const struct drm_mode_fb_cmd2 *mode_cmd,
507 struct drm_gem_object *obj)
508{
509 int ret;
510 rfb->obj = obj;
511 drm_helper_mode_fill_fb_struct(&rfb->base, mode_cmd);
512 ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs);
513 if (ret) {
514 rfb->obj = NULL;
515 return ret;
516 }
517 return 0;
518}
519
520static struct drm_framebuffer *
521amdgpu_user_framebuffer_create(struct drm_device *dev,
522 struct drm_file *file_priv,
523 const struct drm_mode_fb_cmd2 *mode_cmd)
524{
525 struct drm_gem_object *obj;
526 struct amdgpu_framebuffer *amdgpu_fb;
527 int ret;
528
529 obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[0]);
530 if (obj == NULL) {
531 dev_err(&dev->pdev->dev, "No GEM object associated to handle 0x%08X, "
532 "can't create framebuffer\n", mode_cmd->handles[0]);
533 return ERR_PTR(-ENOENT);
534 }
535
536 amdgpu_fb = kzalloc(sizeof(*amdgpu_fb), GFP_KERNEL);
537 if (amdgpu_fb == NULL) {
538 drm_gem_object_unreference_unlocked(obj);
539 return ERR_PTR(-ENOMEM);
540 }
541
542 ret = amdgpu_framebuffer_init(dev, amdgpu_fb, mode_cmd, obj);
543 if (ret) {
544 kfree(amdgpu_fb);
545 drm_gem_object_unreference_unlocked(obj);
546 return ERR_PTR(ret);
547 }
548
549 return &amdgpu_fb->base;
550}
551
552static void amdgpu_output_poll_changed(struct drm_device *dev)
553{
554 struct amdgpu_device *adev = dev->dev_private;
555 amdgpu_fb_output_poll_changed(adev);
556}
557
558const struct drm_mode_config_funcs amdgpu_mode_funcs = {
559 .fb_create = amdgpu_user_framebuffer_create,
560 .output_poll_changed = amdgpu_output_poll_changed
561};
562
563static const struct drm_prop_enum_list amdgpu_underscan_enum_list[] =
564{ { UNDERSCAN_OFF, "off" },
565 { UNDERSCAN_ON, "on" },
566 { UNDERSCAN_AUTO, "auto" },
567};
568
569static const struct drm_prop_enum_list amdgpu_audio_enum_list[] =
570{ { AMDGPU_AUDIO_DISABLE, "off" },
571 { AMDGPU_AUDIO_ENABLE, "on" },
572 { AMDGPU_AUDIO_AUTO, "auto" },
573};
574
575
576static const struct drm_prop_enum_list amdgpu_dither_enum_list[] =
577{ { AMDGPU_FMT_DITHER_DISABLE, "off" },
578 { AMDGPU_FMT_DITHER_ENABLE, "on" },
579};
580
581int amdgpu_modeset_create_props(struct amdgpu_device *adev)
582{
583 int sz;
584
585 if (adev->is_atom_bios) {
586 adev->mode_info.coherent_mode_property =
587 drm_property_create_range(adev->ddev, 0 , "coherent", 0, 1);
588 if (!adev->mode_info.coherent_mode_property)
589 return -ENOMEM;
590 }
591
592 adev->mode_info.load_detect_property =
593 drm_property_create_range(adev->ddev, 0, "load detection", 0, 1);
594 if (!adev->mode_info.load_detect_property)
595 return -ENOMEM;
596
597 drm_mode_create_scaling_mode_property(adev->ddev);
598
599 sz = ARRAY_SIZE(amdgpu_underscan_enum_list);
600 adev->mode_info.underscan_property =
601 drm_property_create_enum(adev->ddev, 0,
602 "underscan",
603 amdgpu_underscan_enum_list, sz);
604
605 adev->mode_info.underscan_hborder_property =
606 drm_property_create_range(adev->ddev, 0,
607 "underscan hborder", 0, 128);
608 if (!adev->mode_info.underscan_hborder_property)
609 return -ENOMEM;
610
611 adev->mode_info.underscan_vborder_property =
612 drm_property_create_range(adev->ddev, 0,
613 "underscan vborder", 0, 128);
614 if (!adev->mode_info.underscan_vborder_property)
615 return -ENOMEM;
616
617 sz = ARRAY_SIZE(amdgpu_audio_enum_list);
618 adev->mode_info.audio_property =
619 drm_property_create_enum(adev->ddev, 0,
620 "audio",
621 amdgpu_audio_enum_list, sz);
622
623 sz = ARRAY_SIZE(amdgpu_dither_enum_list);
624 adev->mode_info.dither_property =
625 drm_property_create_enum(adev->ddev, 0,
626 "dither",
627 amdgpu_dither_enum_list, sz);
628
629 return 0;
630}
631
632void amdgpu_update_display_priority(struct amdgpu_device *adev)
633{
634
635 if ((amdgpu_disp_priority == 0) || (amdgpu_disp_priority > 2))
636 adev->mode_info.disp_priority = 0;
637 else
638 adev->mode_info.disp_priority = amdgpu_disp_priority;
639
640}
641
642static bool is_hdtv_mode(const struct drm_display_mode *mode)
643{
644
645 if ((mode->vdisplay == 480 && mode->hdisplay == 720) ||
646 (mode->vdisplay == 576) ||
647 (mode->vdisplay == 720) ||
648 (mode->vdisplay == 1080))
649 return true;
650 else
651 return false;
652}
653
654bool amdgpu_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
655 const struct drm_display_mode *mode,
656 struct drm_display_mode *adjusted_mode)
657{
658 struct drm_device *dev = crtc->dev;
659 struct drm_encoder *encoder;
660 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
661 struct amdgpu_encoder *amdgpu_encoder;
662 struct drm_connector *connector;
663 struct amdgpu_connector *amdgpu_connector;
664 u32 src_v = 1, dst_v = 1;
665 u32 src_h = 1, dst_h = 1;
666
667 amdgpu_crtc->h_border = 0;
668 amdgpu_crtc->v_border = 0;
669
670 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
671 if (encoder->crtc != crtc)
672 continue;
673 amdgpu_encoder = to_amdgpu_encoder(encoder);
674 connector = amdgpu_get_connector_for_encoder(encoder);
675 amdgpu_connector = to_amdgpu_connector(connector);
676
677
678 if (amdgpu_encoder->rmx_type == RMX_OFF)
679 amdgpu_crtc->rmx_type = RMX_OFF;
680 else if (mode->hdisplay < amdgpu_encoder->native_mode.hdisplay ||
681 mode->vdisplay < amdgpu_encoder->native_mode.vdisplay)
682 amdgpu_crtc->rmx_type = amdgpu_encoder->rmx_type;
683 else
684 amdgpu_crtc->rmx_type = RMX_OFF;
685
686 memcpy(&amdgpu_crtc->native_mode,
687 &amdgpu_encoder->native_mode,
688 sizeof(struct drm_display_mode));
689 src_v = crtc->mode.vdisplay;
690 dst_v = amdgpu_crtc->native_mode.vdisplay;
691 src_h = crtc->mode.hdisplay;
692 dst_h = amdgpu_crtc->native_mode.hdisplay;
693
694
695 if ((!(mode->flags & DRM_MODE_FLAG_INTERLACE)) &&
696 ((amdgpu_encoder->underscan_type == UNDERSCAN_ON) ||
697 ((amdgpu_encoder->underscan_type == UNDERSCAN_AUTO) &&
698 drm_detect_hdmi_monitor(amdgpu_connector_edid(connector)) &&
699 is_hdtv_mode(mode)))) {
700 if (amdgpu_encoder->underscan_hborder != 0)
701 amdgpu_crtc->h_border = amdgpu_encoder->underscan_hborder;
702 else
703 amdgpu_crtc->h_border = (mode->hdisplay >> 5) + 16;
704 if (amdgpu_encoder->underscan_vborder != 0)
705 amdgpu_crtc->v_border = amdgpu_encoder->underscan_vborder;
706 else
707 amdgpu_crtc->v_border = (mode->vdisplay >> 5) + 16;
708 amdgpu_crtc->rmx_type = RMX_FULL;
709 src_v = crtc->mode.vdisplay;
710 dst_v = crtc->mode.vdisplay - (amdgpu_crtc->v_border * 2);
711 src_h = crtc->mode.hdisplay;
712 dst_h = crtc->mode.hdisplay - (amdgpu_crtc->h_border * 2);
713 }
714 }
715 if (amdgpu_crtc->rmx_type != RMX_OFF) {
716 fixed20_12 a, b;
717 a.full = dfixed_const(src_v);
718 b.full = dfixed_const(dst_v);
719 amdgpu_crtc->vsc.full = dfixed_div(a, b);
720 a.full = dfixed_const(src_h);
721 b.full = dfixed_const(dst_h);
722 amdgpu_crtc->hsc.full = dfixed_div(a, b);
723 } else {
724 amdgpu_crtc->vsc.full = dfixed_const(1);
725 amdgpu_crtc->hsc.full = dfixed_const(1);
726 }
727 return true;
728}
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767int amdgpu_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
768 unsigned int flags, int *vpos, int *hpos,
769 ktime_t *stime, ktime_t *etime,
770 const struct drm_display_mode *mode)
771{
772 u32 vbl = 0, position = 0;
773 int vbl_start, vbl_end, vtotal, ret = 0;
774 bool in_vbl = true;
775
776 struct amdgpu_device *adev = dev->dev_private;
777
778
779
780
781 if (stime)
782 *stime = ktime_get();
783
784 if (amdgpu_display_page_flip_get_scanoutpos(adev, pipe, &vbl, &position) == 0)
785 ret |= DRM_SCANOUTPOS_VALID;
786
787
788 if (etime)
789 *etime = ktime_get();
790
791
792
793
794 *vpos = position & 0x1fff;
795 *hpos = (position >> 16) & 0x1fff;
796
797
798 if (vbl > 0) {
799
800 ret |= DRM_SCANOUTPOS_ACCURATE;
801 vbl_start = vbl & 0x1fff;
802 vbl_end = (vbl >> 16) & 0x1fff;
803 }
804 else {
805
806 vbl_start = mode->crtc_vdisplay;
807 vbl_end = 0;
808 }
809
810
811 if (flags & GET_DISTANCE_TO_VBLANKSTART) {
812
813 *hpos = *vpos - vbl_start;
814 }
815
816
817
818
819
820
821
822
823
824
825
826 if (!(flags & USE_REAL_VBLANKSTART))
827 vbl_start -= adev->mode_info.crtcs[pipe]->lb_vblank_lead_lines;
828
829
830 if ((*vpos < vbl_start) && (*vpos >= vbl_end))
831 in_vbl = false;
832
833
834 if (in_vbl)
835 ret |= DRM_SCANOUTPOS_IN_VBLANK;
836
837
838 if (flags & GET_DISTANCE_TO_VBLANKSTART) {
839
840 *vpos -= vbl_start;
841 return ret;
842 }
843
844
845
846
847
848
849
850
851 if (in_vbl && (*vpos >= vbl_start)) {
852 vtotal = mode->crtc_vtotal;
853 *vpos = *vpos - vtotal;
854 }
855
856
857 *vpos = *vpos - vbl_end;
858
859 return ret;
860}
861
862int amdgpu_crtc_idx_to_irq_type(struct amdgpu_device *adev, int crtc)
863{
864 if (crtc < 0 || crtc >= adev->mode_info.num_crtc)
865 return AMDGPU_CRTC_IRQ_NONE;
866
867 switch (crtc) {
868 case 0:
869 return AMDGPU_CRTC_IRQ_VBLANK1;
870 case 1:
871 return AMDGPU_CRTC_IRQ_VBLANK2;
872 case 2:
873 return AMDGPU_CRTC_IRQ_VBLANK3;
874 case 3:
875 return AMDGPU_CRTC_IRQ_VBLANK4;
876 case 4:
877 return AMDGPU_CRTC_IRQ_VBLANK5;
878 case 5:
879 return AMDGPU_CRTC_IRQ_VBLANK6;
880 default:
881 return AMDGPU_CRTC_IRQ_NONE;
882 }
883}
884