1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29#include <linux/pci.h>
30#include <linux/pm_runtime.h>
31
32#include <drm/drm_crtc_helper.h>
33#include <drm/drm_device.h>
34#include <drm/drm_irq.h>
35#include <drm/drm_probe_helper.h>
36#include <drm/drm_vblank.h>
37#include <drm/radeon_drm.h>
38
39#include "atom.h"
40#include "radeon.h"
41#include "radeon_kms.h"
42#include "radeon_reg.h"
43
44
45#define RADEON_WAIT_IDLE_TIMEOUT 200
46
47
48
49
50
51
52
53
54irqreturn_t radeon_driver_irq_handler_kms(int irq, void *arg)
55{
56 struct drm_device *dev = (struct drm_device *) arg;
57 struct radeon_device *rdev = dev->dev_private;
58 irqreturn_t ret;
59
60 ret = radeon_irq_process(rdev);
61 if (ret == IRQ_HANDLED)
62 pm_runtime_mark_last_busy(dev->dev);
63 return ret;
64}
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80static void radeon_hotplug_work_func(struct work_struct *work)
81{
82 struct radeon_device *rdev = container_of(work, struct radeon_device,
83 hotplug_work.work);
84 struct drm_device *dev = rdev->ddev;
85 struct drm_mode_config *mode_config = &dev->mode_config;
86 struct drm_connector *connector;
87
88
89
90 if (!rdev->mode_info.mode_config_initialized)
91 return;
92
93 mutex_lock(&mode_config->mutex);
94 list_for_each_entry(connector, &mode_config->connector_list, head)
95 radeon_connector_hotplug(connector);
96 mutex_unlock(&mode_config->mutex);
97
98 drm_helper_hpd_irq_event(dev);
99}
100
101static void radeon_dp_work_func(struct work_struct *work)
102{
103 struct radeon_device *rdev = container_of(work, struct radeon_device,
104 dp_work);
105 struct drm_device *dev = rdev->ddev;
106 struct drm_mode_config *mode_config = &dev->mode_config;
107 struct drm_connector *connector;
108
109
110 list_for_each_entry(connector, &mode_config->connector_list, head)
111 radeon_connector_hotplug(connector);
112}
113
114
115
116
117
118
119
120
121void radeon_driver_irq_preinstall_kms(struct drm_device *dev)
122{
123 struct radeon_device *rdev = dev->dev_private;
124 unsigned long irqflags;
125 unsigned i;
126
127 spin_lock_irqsave(&rdev->irq.lock, irqflags);
128
129 for (i = 0; i < RADEON_NUM_RINGS; i++)
130 atomic_set(&rdev->irq.ring_int[i], 0);
131 rdev->irq.dpm_thermal = false;
132 for (i = 0; i < RADEON_MAX_HPD_PINS; i++)
133 rdev->irq.hpd[i] = false;
134 for (i = 0; i < RADEON_MAX_CRTCS; i++) {
135 rdev->irq.crtc_vblank_int[i] = false;
136 atomic_set(&rdev->irq.pflip[i], 0);
137 rdev->irq.afmt[i] = false;
138 }
139 radeon_irq_set(rdev);
140 spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
141
142 radeon_irq_process(rdev);
143}
144
145
146
147
148
149
150
151
152
153int radeon_driver_irq_postinstall_kms(struct drm_device *dev)
154{
155 struct radeon_device *rdev = dev->dev_private;
156
157 if (ASIC_IS_AVIVO(rdev))
158 dev->max_vblank_count = 0x00ffffff;
159 else
160 dev->max_vblank_count = 0x001fffff;
161
162 return 0;
163}
164
165
166
167
168
169
170
171
172void radeon_driver_irq_uninstall_kms(struct drm_device *dev)
173{
174 struct radeon_device *rdev = dev->dev_private;
175 unsigned long irqflags;
176 unsigned i;
177
178 if (rdev == NULL) {
179 return;
180 }
181 spin_lock_irqsave(&rdev->irq.lock, irqflags);
182
183 for (i = 0; i < RADEON_NUM_RINGS; i++)
184 atomic_set(&rdev->irq.ring_int[i], 0);
185 rdev->irq.dpm_thermal = false;
186 for (i = 0; i < RADEON_MAX_HPD_PINS; i++)
187 rdev->irq.hpd[i] = false;
188 for (i = 0; i < RADEON_MAX_CRTCS; i++) {
189 rdev->irq.crtc_vblank_int[i] = false;
190 atomic_set(&rdev->irq.pflip[i], 0);
191 rdev->irq.afmt[i] = false;
192 }
193 radeon_irq_set(rdev);
194 spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
195}
196
197
198
199
200
201
202
203
204
205
206
207static bool radeon_msi_ok(struct radeon_device *rdev)
208{
209
210 if (rdev->family < CHIP_RV380)
211 return false;
212
213
214 if (rdev->flags & RADEON_IS_AGP)
215 return false;
216
217
218
219
220
221
222 if (rdev->family < CHIP_BONAIRE) {
223 dev_info(rdev->dev, "radeon: MSI limited to 32-bit\n");
224 rdev->pdev->no_64bit_msi = 1;
225 }
226
227
228 if (radeon_msi == 1)
229 return true;
230 else if (radeon_msi == 0)
231 return false;
232
233
234
235 if ((rdev->pdev->device == 0x791f) &&
236 (rdev->pdev->subsystem_vendor == 0x103c) &&
237 (rdev->pdev->subsystem_device == 0x30c2))
238 return true;
239
240
241 if ((rdev->pdev->device == 0x791f) &&
242 (rdev->pdev->subsystem_vendor == 0x1028) &&
243 (rdev->pdev->subsystem_device == 0x01fc))
244 return true;
245
246
247 if ((rdev->pdev->device == 0x791f) &&
248 (rdev->pdev->subsystem_vendor == 0x1028) &&
249 (rdev->pdev->subsystem_device == 0x01fd))
250 return true;
251
252
253 if ((rdev->pdev->device == 0x791f) &&
254 (rdev->pdev->subsystem_vendor == 0x107b) &&
255 (rdev->pdev->subsystem_device == 0x0185))
256 return true;
257
258
259 if (rdev->family == CHIP_RS690)
260 return true;
261
262
263
264
265
266 if (rdev->family == CHIP_RV515)
267 return false;
268 if (rdev->flags & RADEON_IS_IGP) {
269
270 if (rdev->family >= CHIP_PALM)
271 return true;
272
273 return false;
274 }
275
276 return true;
277}
278
279
280
281
282
283
284
285
286
287int radeon_irq_kms_init(struct radeon_device *rdev)
288{
289 int r = 0;
290
291 spin_lock_init(&rdev->irq.lock);
292
293
294 rdev->ddev->vblank_disable_immediate = true;
295
296 r = drm_vblank_init(rdev->ddev, rdev->num_crtc);
297 if (r) {
298 return r;
299 }
300
301
302 rdev->msi_enabled = 0;
303
304 if (radeon_msi_ok(rdev)) {
305 int ret = pci_enable_msi(rdev->pdev);
306 if (!ret) {
307 rdev->msi_enabled = 1;
308 dev_info(rdev->dev, "radeon: using MSI.\n");
309 }
310 }
311
312 INIT_DELAYED_WORK(&rdev->hotplug_work, radeon_hotplug_work_func);
313 INIT_WORK(&rdev->dp_work, radeon_dp_work_func);
314 INIT_WORK(&rdev->audio_work, r600_audio_update_hdmi);
315
316 rdev->irq.installed = true;
317 r = drm_irq_install(rdev->ddev, rdev->pdev->irq);
318 if (r) {
319 rdev->irq.installed = false;
320 flush_delayed_work(&rdev->hotplug_work);
321 return r;
322 }
323
324 DRM_INFO("radeon: irq initialized.\n");
325 return 0;
326}
327
328
329
330
331
332
333
334
335void radeon_irq_kms_fini(struct radeon_device *rdev)
336{
337 if (rdev->irq.installed) {
338 drm_irq_uninstall(rdev->ddev);
339 rdev->irq.installed = false;
340 if (rdev->msi_enabled)
341 pci_disable_msi(rdev->pdev);
342 flush_delayed_work(&rdev->hotplug_work);
343 }
344}
345
346
347
348
349
350
351
352
353
354
355
356void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev, int ring)
357{
358 unsigned long irqflags;
359
360 if (!rdev->ddev->irq_enabled)
361 return;
362
363 if (atomic_inc_return(&rdev->irq.ring_int[ring]) == 1) {
364 spin_lock_irqsave(&rdev->irq.lock, irqflags);
365 radeon_irq_set(rdev);
366 spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
367 }
368}
369
370
371
372
373
374
375
376
377
378
379
380bool radeon_irq_kms_sw_irq_get_delayed(struct radeon_device *rdev, int ring)
381{
382 return atomic_inc_return(&rdev->irq.ring_int[ring]) == 1;
383}
384
385
386
387
388
389
390
391
392
393
394
395void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev, int ring)
396{
397 unsigned long irqflags;
398
399 if (!rdev->ddev->irq_enabled)
400 return;
401
402 if (atomic_dec_and_test(&rdev->irq.ring_int[ring])) {
403 spin_lock_irqsave(&rdev->irq.lock, irqflags);
404 radeon_irq_set(rdev);
405 spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
406 }
407}
408
409
410
411
412
413
414
415
416
417
418void radeon_irq_kms_pflip_irq_get(struct radeon_device *rdev, int crtc)
419{
420 unsigned long irqflags;
421
422 if (crtc < 0 || crtc >= rdev->num_crtc)
423 return;
424
425 if (!rdev->ddev->irq_enabled)
426 return;
427
428 if (atomic_inc_return(&rdev->irq.pflip[crtc]) == 1) {
429 spin_lock_irqsave(&rdev->irq.lock, irqflags);
430 radeon_irq_set(rdev);
431 spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
432 }
433}
434
435
436
437
438
439
440
441
442
443
444void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc)
445{
446 unsigned long irqflags;
447
448 if (crtc < 0 || crtc >= rdev->num_crtc)
449 return;
450
451 if (!rdev->ddev->irq_enabled)
452 return;
453
454 if (atomic_dec_and_test(&rdev->irq.pflip[crtc])) {
455 spin_lock_irqsave(&rdev->irq.lock, irqflags);
456 radeon_irq_set(rdev);
457 spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
458 }
459}
460
461
462
463
464
465
466
467
468
469void radeon_irq_kms_enable_afmt(struct radeon_device *rdev, int block)
470{
471 unsigned long irqflags;
472
473 if (!rdev->ddev->irq_enabled)
474 return;
475
476 spin_lock_irqsave(&rdev->irq.lock, irqflags);
477 rdev->irq.afmt[block] = true;
478 radeon_irq_set(rdev);
479 spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
480
481}
482
483
484
485
486
487
488
489
490
491void radeon_irq_kms_disable_afmt(struct radeon_device *rdev, int block)
492{
493 unsigned long irqflags;
494
495 if (!rdev->ddev->irq_enabled)
496 return;
497
498 spin_lock_irqsave(&rdev->irq.lock, irqflags);
499 rdev->irq.afmt[block] = false;
500 radeon_irq_set(rdev);
501 spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
502}
503
504
505
506
507
508
509
510
511
512void radeon_irq_kms_enable_hpd(struct radeon_device *rdev, unsigned hpd_mask)
513{
514 unsigned long irqflags;
515 int i;
516
517 if (!rdev->ddev->irq_enabled)
518 return;
519
520 spin_lock_irqsave(&rdev->irq.lock, irqflags);
521 for (i = 0; i < RADEON_MAX_HPD_PINS; ++i)
522 rdev->irq.hpd[i] |= !!(hpd_mask & (1 << i));
523 radeon_irq_set(rdev);
524 spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
525}
526
527
528
529
530
531
532
533
534
535void radeon_irq_kms_disable_hpd(struct radeon_device *rdev, unsigned hpd_mask)
536{
537 unsigned long irqflags;
538 int i;
539
540 if (!rdev->ddev->irq_enabled)
541 return;
542
543 spin_lock_irqsave(&rdev->irq.lock, irqflags);
544 for (i = 0; i < RADEON_MAX_HPD_PINS; ++i)
545 rdev->irq.hpd[i] &= !(hpd_mask & (1 << i));
546 radeon_irq_set(rdev);
547 spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
548}
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567void radeon_irq_kms_set_irq_n_enabled(struct radeon_device *rdev,
568 u32 reg, u32 mask,
569 bool enable, const char *name, unsigned n)
570{
571 u32 tmp = RREG32(reg);
572
573
574 if (!!(tmp & mask) == enable)
575 return;
576
577 if (enable) {
578 DRM_DEBUG("%s%d interrupts enabled\n", name, n);
579 WREG32(reg, tmp |= mask);
580 } else {
581 DRM_DEBUG("%s%d interrupts disabled\n", name, n);
582 WREG32(reg, tmp & ~mask);
583 }
584}
585