1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28#include <drm/drmP.h>
29#include <drm/drm_crtc_helper.h>
30#include <drm/radeon_drm.h>
31#include "radeon_reg.h"
32#include "radeon.h"
33#include "atom.h"
34
35#define RADEON_WAIT_IDLE_TIMEOUT 200
36
37
38
39
40
41
42
43
44
45
46irqreturn_t radeon_driver_irq_handler_kms(DRM_IRQ_ARGS)
47{
48 struct drm_device *dev = (struct drm_device *) arg;
49 struct radeon_device *rdev = dev->dev_private;
50
51 return radeon_irq_process(rdev);
52}
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68static void radeon_hotplug_work_func(struct work_struct *work)
69{
70 struct radeon_device *rdev = container_of(work, struct radeon_device,
71 hotplug_work);
72 struct drm_device *dev = rdev->ddev;
73 struct drm_mode_config *mode_config = &dev->mode_config;
74 struct drm_connector *connector;
75
76 if (mode_config->num_connector) {
77 list_for_each_entry(connector, &mode_config->connector_list, head)
78 radeon_connector_hotplug(connector);
79 }
80
81 drm_helper_hpd_irq_event(dev);
82}
83
84
85
86
87
88
89
90
91
92
93static void radeon_irq_reset_work_func(struct work_struct *work)
94{
95 struct radeon_device *rdev = container_of(work, struct radeon_device,
96 reset_work);
97
98 radeon_gpu_reset(rdev);
99}
100
101
102
103
104
105
106
107
108
109void radeon_driver_irq_preinstall_kms(struct drm_device *dev)
110{
111 struct radeon_device *rdev = dev->dev_private;
112 unsigned long irqflags;
113 unsigned i;
114
115 spin_lock_irqsave(&rdev->irq.lock, irqflags);
116
117 for (i = 0; i < RADEON_NUM_RINGS; i++)
118 atomic_set(&rdev->irq.ring_int[i], 0);
119 rdev->irq.dpm_thermal = false;
120 for (i = 0; i < RADEON_MAX_HPD_PINS; i++)
121 rdev->irq.hpd[i] = false;
122 for (i = 0; i < RADEON_MAX_CRTCS; i++) {
123 rdev->irq.crtc_vblank_int[i] = false;
124 atomic_set(&rdev->irq.pflip[i], 0);
125 rdev->irq.afmt[i] = false;
126 }
127 radeon_irq_set(rdev);
128 spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
129
130 radeon_irq_process(rdev);
131}
132
133
134
135
136
137
138
139
140
141int radeon_driver_irq_postinstall_kms(struct drm_device *dev)
142{
143 dev->max_vblank_count = 0x001fffff;
144 return 0;
145}
146
147
148
149
150
151
152
153
154void radeon_driver_irq_uninstall_kms(struct drm_device *dev)
155{
156 struct radeon_device *rdev = dev->dev_private;
157 unsigned long irqflags;
158 unsigned i;
159
160 if (rdev == NULL) {
161 return;
162 }
163 spin_lock_irqsave(&rdev->irq.lock, irqflags);
164
165 for (i = 0; i < RADEON_NUM_RINGS; i++)
166 atomic_set(&rdev->irq.ring_int[i], 0);
167 rdev->irq.dpm_thermal = false;
168 for (i = 0; i < RADEON_MAX_HPD_PINS; i++)
169 rdev->irq.hpd[i] = false;
170 for (i = 0; i < RADEON_MAX_CRTCS; i++) {
171 rdev->irq.crtc_vblank_int[i] = false;
172 atomic_set(&rdev->irq.pflip[i], 0);
173 rdev->irq.afmt[i] = false;
174 }
175 radeon_irq_set(rdev);
176 spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
177}
178
179
180
181
182
183
184
185
186
187
188
189static bool radeon_msi_ok(struct radeon_device *rdev)
190{
191
192 if (rdev->family < CHIP_RV380)
193 return false;
194
195
196 if (rdev->flags & RADEON_IS_AGP)
197 return false;
198
199
200 if (radeon_msi == 1)
201 return true;
202 else if (radeon_msi == 0)
203 return false;
204
205
206
207 if ((rdev->pdev->device == 0x791f) &&
208 (rdev->pdev->subsystem_vendor == 0x103c) &&
209 (rdev->pdev->subsystem_device == 0x30c2))
210 return true;
211
212
213 if ((rdev->pdev->device == 0x791f) &&
214 (rdev->pdev->subsystem_vendor == 0x1028) &&
215 (rdev->pdev->subsystem_device == 0x01fc))
216 return true;
217
218
219 if ((rdev->pdev->device == 0x791f) &&
220 (rdev->pdev->subsystem_vendor == 0x1028) &&
221 (rdev->pdev->subsystem_device == 0x01fd))
222 return true;
223
224
225 if ((rdev->pdev->device == 0x791f) &&
226 (rdev->pdev->subsystem_vendor == 0x107b) &&
227 (rdev->pdev->subsystem_device == 0x0185))
228 return true;
229
230
231 if (rdev->family == CHIP_RS690)
232 return true;
233
234
235
236
237
238 if (rdev->family == CHIP_RV515)
239 return false;
240 if (rdev->flags & RADEON_IS_IGP) {
241
242 if (rdev->family >= CHIP_PALM)
243 return true;
244
245 return false;
246 }
247
248 return true;
249}
250
251
252
253
254
255
256
257
258
259int radeon_irq_kms_init(struct radeon_device *rdev)
260{
261 int r = 0;
262
263 spin_lock_init(&rdev->irq.lock);
264 r = drm_vblank_init(rdev->ddev, rdev->num_crtc);
265 if (r) {
266 return r;
267 }
268
269 rdev->msi_enabled = 0;
270
271 if (radeon_msi_ok(rdev)) {
272 int ret = pci_enable_msi(rdev->pdev);
273 if (!ret) {
274 rdev->msi_enabled = 1;
275 dev_info(rdev->dev, "radeon: using MSI.\n");
276 }
277 }
278
279 INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func);
280 INIT_WORK(&rdev->audio_work, r600_audio_update_hdmi);
281 INIT_WORK(&rdev->reset_work, radeon_irq_reset_work_func);
282
283 rdev->irq.installed = true;
284 r = drm_irq_install(rdev->ddev);
285 if (r) {
286 rdev->irq.installed = false;
287 flush_work(&rdev->hotplug_work);
288 return r;
289 }
290
291 DRM_INFO("radeon: irq initialized.\n");
292 return 0;
293}
294
295
296
297
298
299
300
301
302void radeon_irq_kms_fini(struct radeon_device *rdev)
303{
304 drm_vblank_cleanup(rdev->ddev);
305 if (rdev->irq.installed) {
306 drm_irq_uninstall(rdev->ddev);
307 rdev->irq.installed = false;
308 if (rdev->msi_enabled)
309 pci_disable_msi(rdev->pdev);
310 flush_work(&rdev->hotplug_work);
311 }
312}
313
314
315
316
317
318
319
320
321
322
323
324void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev, int ring)
325{
326 unsigned long irqflags;
327
328 if (!rdev->ddev->irq_enabled)
329 return;
330
331 if (atomic_inc_return(&rdev->irq.ring_int[ring]) == 1) {
332 spin_lock_irqsave(&rdev->irq.lock, irqflags);
333 radeon_irq_set(rdev);
334 spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
335 }
336}
337
338
339
340
341
342
343
344
345
346
347
348void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev, int ring)
349{
350 unsigned long irqflags;
351
352 if (!rdev->ddev->irq_enabled)
353 return;
354
355 if (atomic_dec_and_test(&rdev->irq.ring_int[ring])) {
356 spin_lock_irqsave(&rdev->irq.lock, irqflags);
357 radeon_irq_set(rdev);
358 spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
359 }
360}
361
362
363
364
365
366
367
368
369
370
371void radeon_irq_kms_pflip_irq_get(struct radeon_device *rdev, int crtc)
372{
373 unsigned long irqflags;
374
375 if (crtc < 0 || crtc >= rdev->num_crtc)
376 return;
377
378 if (!rdev->ddev->irq_enabled)
379 return;
380
381 if (atomic_inc_return(&rdev->irq.pflip[crtc]) == 1) {
382 spin_lock_irqsave(&rdev->irq.lock, irqflags);
383 radeon_irq_set(rdev);
384 spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
385 }
386}
387
388
389
390
391
392
393
394
395
396
397void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc)
398{
399 unsigned long irqflags;
400
401 if (crtc < 0 || crtc >= rdev->num_crtc)
402 return;
403
404 if (!rdev->ddev->irq_enabled)
405 return;
406
407 if (atomic_dec_and_test(&rdev->irq.pflip[crtc])) {
408 spin_lock_irqsave(&rdev->irq.lock, irqflags);
409 radeon_irq_set(rdev);
410 spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
411 }
412}
413
414
415
416
417
418
419
420
421
422void radeon_irq_kms_enable_afmt(struct radeon_device *rdev, int block)
423{
424 unsigned long irqflags;
425
426 if (!rdev->ddev->irq_enabled)
427 return;
428
429 spin_lock_irqsave(&rdev->irq.lock, irqflags);
430 rdev->irq.afmt[block] = true;
431 radeon_irq_set(rdev);
432 spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
433
434}
435
436
437
438
439
440
441
442
443
444void radeon_irq_kms_disable_afmt(struct radeon_device *rdev, int block)
445{
446 unsigned long irqflags;
447
448 if (!rdev->ddev->irq_enabled)
449 return;
450
451 spin_lock_irqsave(&rdev->irq.lock, irqflags);
452 rdev->irq.afmt[block] = false;
453 radeon_irq_set(rdev);
454 spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
455}
456
457
458
459
460
461
462
463
464
465void radeon_irq_kms_enable_hpd(struct radeon_device *rdev, unsigned hpd_mask)
466{
467 unsigned long irqflags;
468 int i;
469
470 if (!rdev->ddev->irq_enabled)
471 return;
472
473 spin_lock_irqsave(&rdev->irq.lock, irqflags);
474 for (i = 0; i < RADEON_MAX_HPD_PINS; ++i)
475 rdev->irq.hpd[i] |= !!(hpd_mask & (1 << i));
476 radeon_irq_set(rdev);
477 spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
478}
479
480
481
482
483
484
485
486
487
488void radeon_irq_kms_disable_hpd(struct radeon_device *rdev, unsigned hpd_mask)
489{
490 unsigned long irqflags;
491 int i;
492
493 if (!rdev->ddev->irq_enabled)
494 return;
495
496 spin_lock_irqsave(&rdev->irq.lock, irqflags);
497 for (i = 0; i < RADEON_MAX_HPD_PINS; ++i)
498 rdev->irq.hpd[i] &= !(hpd_mask & (1 << i));
499 radeon_irq_set(rdev);
500 spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
501}
502
503