1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28#include <drm/drmP.h>
29#include <drm/drm_crtc_helper.h>
30#include <drm/radeon_drm.h>
31#include "radeon_reg.h"
32#include "radeon.h"
33#include "atom.h"
34
35#include <linux/pm_runtime.h>
36
37#define RADEON_WAIT_IDLE_TIMEOUT 200
38
39
40
41
42
43
44
45
46
47
48irqreturn_t radeon_driver_irq_handler_kms(int irq, void *arg)
49{
50 struct drm_device *dev = (struct drm_device *) arg;
51 struct radeon_device *rdev = dev->dev_private;
52 irqreturn_t ret;
53
54 ret = radeon_irq_process(rdev);
55 if (ret == IRQ_HANDLED)
56 pm_runtime_mark_last_busy(dev->dev);
57 return ret;
58}
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74static void radeon_hotplug_work_func(struct work_struct *work)
75{
76 struct radeon_device *rdev = container_of(work, struct radeon_device,
77 hotplug_work.work);
78 struct drm_device *dev = rdev->ddev;
79 struct drm_mode_config *mode_config = &dev->mode_config;
80 struct drm_connector *connector;
81
82
83
84 if (!rdev->mode_info.mode_config_initialized)
85 return;
86
87 mutex_lock(&mode_config->mutex);
88 if (mode_config->num_connector) {
89 list_for_each_entry(connector, &mode_config->connector_list, head)
90 radeon_connector_hotplug(connector);
91 }
92 mutex_unlock(&mode_config->mutex);
93
94 drm_helper_hpd_irq_event(dev);
95}
96
97static void radeon_dp_work_func(struct work_struct *work)
98{
99 struct radeon_device *rdev = container_of(work, struct radeon_device,
100 dp_work);
101 struct drm_device *dev = rdev->ddev;
102 struct drm_mode_config *mode_config = &dev->mode_config;
103 struct drm_connector *connector;
104
105
106 if (mode_config->num_connector) {
107 list_for_each_entry(connector, &mode_config->connector_list, head)
108 radeon_connector_hotplug(connector);
109 }
110}
111
112
113
114
115
116
117
118
119void radeon_driver_irq_preinstall_kms(struct drm_device *dev)
120{
121 struct radeon_device *rdev = dev->dev_private;
122 unsigned long irqflags;
123 unsigned i;
124
125 spin_lock_irqsave(&rdev->irq.lock, irqflags);
126
127 for (i = 0; i < RADEON_NUM_RINGS; i++)
128 atomic_set(&rdev->irq.ring_int[i], 0);
129 rdev->irq.dpm_thermal = false;
130 for (i = 0; i < RADEON_MAX_HPD_PINS; i++)
131 rdev->irq.hpd[i] = false;
132 for (i = 0; i < RADEON_MAX_CRTCS; i++) {
133 rdev->irq.crtc_vblank_int[i] = false;
134 atomic_set(&rdev->irq.pflip[i], 0);
135 rdev->irq.afmt[i] = false;
136 }
137 radeon_irq_set(rdev);
138 spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
139
140 radeon_irq_process(rdev);
141}
142
143
144
145
146
147
148
149
150
151int radeon_driver_irq_postinstall_kms(struct drm_device *dev)
152{
153 struct radeon_device *rdev = dev->dev_private;
154
155 if (ASIC_IS_AVIVO(rdev))
156 dev->max_vblank_count = 0x00ffffff;
157 else
158 dev->max_vblank_count = 0x001fffff;
159
160 return 0;
161}
162
163
164
165
166
167
168
169
170void radeon_driver_irq_uninstall_kms(struct drm_device *dev)
171{
172 struct radeon_device *rdev = dev->dev_private;
173 unsigned long irqflags;
174 unsigned i;
175
176 if (rdev == NULL) {
177 return;
178 }
179 spin_lock_irqsave(&rdev->irq.lock, irqflags);
180
181 for (i = 0; i < RADEON_NUM_RINGS; i++)
182 atomic_set(&rdev->irq.ring_int[i], 0);
183 rdev->irq.dpm_thermal = false;
184 for (i = 0; i < RADEON_MAX_HPD_PINS; i++)
185 rdev->irq.hpd[i] = false;
186 for (i = 0; i < RADEON_MAX_CRTCS; i++) {
187 rdev->irq.crtc_vblank_int[i] = false;
188 atomic_set(&rdev->irq.pflip[i], 0);
189 rdev->irq.afmt[i] = false;
190 }
191 radeon_irq_set(rdev);
192 spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
193}
194
195
196
197
198
199
200
201
202
203
204
205static bool radeon_msi_ok(struct radeon_device *rdev)
206{
207
208 if (rdev->family < CHIP_RV380)
209 return false;
210
211
212 if (rdev->flags & RADEON_IS_AGP)
213 return false;
214
215
216
217
218
219
220 if (rdev->family < CHIP_BONAIRE) {
221 dev_info(rdev->dev, "radeon: MSI limited to 32-bit\n");
222 rdev->pdev->no_64bit_msi = 1;
223 }
224
225
226 if (radeon_msi == 1)
227 return true;
228 else if (radeon_msi == 0)
229 return false;
230
231
232
233 if ((rdev->pdev->device == 0x791f) &&
234 (rdev->pdev->subsystem_vendor == 0x103c) &&
235 (rdev->pdev->subsystem_device == 0x30c2))
236 return true;
237
238
239 if ((rdev->pdev->device == 0x791f) &&
240 (rdev->pdev->subsystem_vendor == 0x1028) &&
241 (rdev->pdev->subsystem_device == 0x01fc))
242 return true;
243
244
245 if ((rdev->pdev->device == 0x791f) &&
246 (rdev->pdev->subsystem_vendor == 0x1028) &&
247 (rdev->pdev->subsystem_device == 0x01fd))
248 return true;
249
250
251 if ((rdev->pdev->device == 0x791f) &&
252 (rdev->pdev->subsystem_vendor == 0x107b) &&
253 (rdev->pdev->subsystem_device == 0x0185))
254 return true;
255
256
257 if (rdev->family == CHIP_RS690)
258 return true;
259
260
261
262
263
264 if (rdev->family == CHIP_RV515)
265 return false;
266 if (rdev->flags & RADEON_IS_IGP) {
267
268 if (rdev->family >= CHIP_PALM)
269 return true;
270
271 return false;
272 }
273
274 return true;
275}
276
277
278
279
280
281
282
283
284
285int radeon_irq_kms_init(struct radeon_device *rdev)
286{
287 int r = 0;
288
289 spin_lock_init(&rdev->irq.lock);
290 r = drm_vblank_init(rdev->ddev, rdev->num_crtc);
291 if (r) {
292 return r;
293 }
294 rdev->ddev->vblank_disable_allowed = true;
295
296
297 rdev->msi_enabled = 0;
298
299 if (radeon_msi_ok(rdev)) {
300 int ret = pci_enable_msi(rdev->pdev);
301 if (!ret) {
302 rdev->msi_enabled = 1;
303 dev_info(rdev->dev, "radeon: using MSI.\n");
304 }
305 }
306
307 INIT_DELAYED_WORK(&rdev->hotplug_work, radeon_hotplug_work_func);
308 INIT_WORK(&rdev->dp_work, radeon_dp_work_func);
309 INIT_WORK(&rdev->audio_work, r600_audio_update_hdmi);
310
311 rdev->irq.installed = true;
312 r = drm_irq_install(rdev->ddev, rdev->ddev->pdev->irq);
313 if (r) {
314 rdev->irq.installed = false;
315 flush_delayed_work(&rdev->hotplug_work);
316 return r;
317 }
318
319 DRM_INFO("radeon: irq initialized.\n");
320 return 0;
321}
322
323
324
325
326
327
328
329
330void radeon_irq_kms_fini(struct radeon_device *rdev)
331{
332 drm_vblank_cleanup(rdev->ddev);
333 if (rdev->irq.installed) {
334 drm_irq_uninstall(rdev->ddev);
335 rdev->irq.installed = false;
336 if (rdev->msi_enabled)
337 pci_disable_msi(rdev->pdev);
338 flush_delayed_work(&rdev->hotplug_work);
339 }
340}
341
342
343
344
345
346
347
348
349
350
351
352void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev, int ring)
353{
354 unsigned long irqflags;
355
356 if (!rdev->ddev->irq_enabled)
357 return;
358
359 if (atomic_inc_return(&rdev->irq.ring_int[ring]) == 1) {
360 spin_lock_irqsave(&rdev->irq.lock, irqflags);
361 radeon_irq_set(rdev);
362 spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
363 }
364}
365
366
367
368
369
370
371
372
373
374
375
376bool radeon_irq_kms_sw_irq_get_delayed(struct radeon_device *rdev, int ring)
377{
378 return atomic_inc_return(&rdev->irq.ring_int[ring]) == 1;
379}
380
381
382
383
384
385
386
387
388
389
390
391void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev, int ring)
392{
393 unsigned long irqflags;
394
395 if (!rdev->ddev->irq_enabled)
396 return;
397
398 if (atomic_dec_and_test(&rdev->irq.ring_int[ring])) {
399 spin_lock_irqsave(&rdev->irq.lock, irqflags);
400 radeon_irq_set(rdev);
401 spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
402 }
403}
404
405
406
407
408
409
410
411
412
413
414void radeon_irq_kms_pflip_irq_get(struct radeon_device *rdev, int crtc)
415{
416 unsigned long irqflags;
417
418 if (crtc < 0 || crtc >= rdev->num_crtc)
419 return;
420
421 if (!rdev->ddev->irq_enabled)
422 return;
423
424 if (atomic_inc_return(&rdev->irq.pflip[crtc]) == 1) {
425 spin_lock_irqsave(&rdev->irq.lock, irqflags);
426 radeon_irq_set(rdev);
427 spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
428 }
429}
430
431
432
433
434
435
436
437
438
439
440void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc)
441{
442 unsigned long irqflags;
443
444 if (crtc < 0 || crtc >= rdev->num_crtc)
445 return;
446
447 if (!rdev->ddev->irq_enabled)
448 return;
449
450 if (atomic_dec_and_test(&rdev->irq.pflip[crtc])) {
451 spin_lock_irqsave(&rdev->irq.lock, irqflags);
452 radeon_irq_set(rdev);
453 spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
454 }
455}
456
457
458
459
460
461
462
463
464
465void radeon_irq_kms_enable_afmt(struct radeon_device *rdev, int block)
466{
467 unsigned long irqflags;
468
469 if (!rdev->ddev->irq_enabled)
470 return;
471
472 spin_lock_irqsave(&rdev->irq.lock, irqflags);
473 rdev->irq.afmt[block] = true;
474 radeon_irq_set(rdev);
475 spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
476
477}
478
479
480
481
482
483
484
485
486
487void radeon_irq_kms_disable_afmt(struct radeon_device *rdev, int block)
488{
489 unsigned long irqflags;
490
491 if (!rdev->ddev->irq_enabled)
492 return;
493
494 spin_lock_irqsave(&rdev->irq.lock, irqflags);
495 rdev->irq.afmt[block] = false;
496 radeon_irq_set(rdev);
497 spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
498}
499
500
501
502
503
504
505
506
507
508void radeon_irq_kms_enable_hpd(struct radeon_device *rdev, unsigned hpd_mask)
509{
510 unsigned long irqflags;
511 int i;
512
513 if (!rdev->ddev->irq_enabled)
514 return;
515
516 spin_lock_irqsave(&rdev->irq.lock, irqflags);
517 for (i = 0; i < RADEON_MAX_HPD_PINS; ++i)
518 rdev->irq.hpd[i] |= !!(hpd_mask & (1 << i));
519 radeon_irq_set(rdev);
520 spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
521}
522
523
524
525
526
527
528
529
530
531void radeon_irq_kms_disable_hpd(struct radeon_device *rdev, unsigned hpd_mask)
532{
533 unsigned long irqflags;
534 int i;
535
536 if (!rdev->ddev->irq_enabled)
537 return;
538
539 spin_lock_irqsave(&rdev->irq.lock, irqflags);
540 for (i = 0; i < RADEON_MAX_HPD_PINS; ++i)
541 rdev->irq.hpd[i] &= !(hpd_mask & (1 << i));
542 radeon_irq_set(rdev);
543 spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
544}
545
546