1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28#include <drm/drmP.h>
29#include <drm/drm_crtc_helper.h>
30#include <drm/radeon_drm.h>
31#include "radeon_reg.h"
32#include "radeon.h"
33#include "atom.h"
34
35#define RADEON_WAIT_IDLE_TIMEOUT 200
36
37
38
39
40
41
42
43
44
45
46irqreturn_t radeon_driver_irq_handler_kms(DRM_IRQ_ARGS)
47{
48 struct drm_device *dev = (struct drm_device *) arg;
49 struct radeon_device *rdev = dev->dev_private;
50
51 return radeon_irq_process(rdev);
52}
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68static void radeon_hotplug_work_func(struct work_struct *work)
69{
70 struct radeon_device *rdev = container_of(work, struct radeon_device,
71 hotplug_work);
72 struct drm_device *dev = rdev->ddev;
73 struct drm_mode_config *mode_config = &dev->mode_config;
74 struct drm_connector *connector;
75
76 if (mode_config->num_connector) {
77 list_for_each_entry(connector, &mode_config->connector_list, head)
78 radeon_connector_hotplug(connector);
79 }
80
81 drm_helper_hpd_irq_event(dev);
82}
83
84
85
86
87
88
89
90
91
92void radeon_driver_irq_preinstall_kms(struct drm_device *dev)
93{
94 struct radeon_device *rdev = dev->dev_private;
95 unsigned long irqflags;
96 unsigned i;
97
98 spin_lock_irqsave(&rdev->irq.lock, irqflags);
99
100 for (i = 0; i < RADEON_NUM_RINGS; i++)
101 atomic_set(&rdev->irq.ring_int[i], 0);
102 for (i = 0; i < RADEON_MAX_HPD_PINS; i++)
103 rdev->irq.hpd[i] = false;
104 for (i = 0; i < RADEON_MAX_CRTCS; i++) {
105 rdev->irq.crtc_vblank_int[i] = false;
106 atomic_set(&rdev->irq.pflip[i], 0);
107 rdev->irq.afmt[i] = false;
108 }
109 radeon_irq_set(rdev);
110 spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
111
112 radeon_irq_process(rdev);
113}
114
115
116
117
118
119
120
121
122
123int radeon_driver_irq_postinstall_kms(struct drm_device *dev)
124{
125 dev->max_vblank_count = 0x001fffff;
126 return 0;
127}
128
129
130
131
132
133
134
135
136void radeon_driver_irq_uninstall_kms(struct drm_device *dev)
137{
138 struct radeon_device *rdev = dev->dev_private;
139 unsigned long irqflags;
140 unsigned i;
141
142 if (rdev == NULL) {
143 return;
144 }
145 spin_lock_irqsave(&rdev->irq.lock, irqflags);
146
147 for (i = 0; i < RADEON_NUM_RINGS; i++)
148 atomic_set(&rdev->irq.ring_int[i], 0);
149 for (i = 0; i < RADEON_MAX_HPD_PINS; i++)
150 rdev->irq.hpd[i] = false;
151 for (i = 0; i < RADEON_MAX_CRTCS; i++) {
152 rdev->irq.crtc_vblank_int[i] = false;
153 atomic_set(&rdev->irq.pflip[i], 0);
154 rdev->irq.afmt[i] = false;
155 }
156 radeon_irq_set(rdev);
157 spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
158}
159
160
161
162
163
164
165
166
167
168
169
170static bool radeon_msi_ok(struct radeon_device *rdev)
171{
172
173 if (rdev->family < CHIP_RV380)
174 return false;
175
176
177 if (rdev->flags & RADEON_IS_AGP)
178 return false;
179
180
181 if (radeon_msi == 1)
182 return true;
183 else if (radeon_msi == 0)
184 return false;
185
186
187
188 if ((rdev->pdev->device == 0x791f) &&
189 (rdev->pdev->subsystem_vendor == 0x103c) &&
190 (rdev->pdev->subsystem_device == 0x30c2))
191 return true;
192
193
194 if ((rdev->pdev->device == 0x791f) &&
195 (rdev->pdev->subsystem_vendor == 0x1028) &&
196 (rdev->pdev->subsystem_device == 0x01fc))
197 return true;
198
199
200 if ((rdev->pdev->device == 0x791f) &&
201 (rdev->pdev->subsystem_vendor == 0x1028) &&
202 (rdev->pdev->subsystem_device == 0x01fd))
203 return true;
204
205
206 if ((rdev->pdev->device == 0x791f) &&
207 (rdev->pdev->subsystem_vendor == 0x107b) &&
208 (rdev->pdev->subsystem_device == 0x0185))
209 return true;
210
211
212 if (rdev->family == CHIP_RS690)
213 return true;
214
215
216
217
218
219 if (rdev->family == CHIP_RV515)
220 return false;
221 if (rdev->flags & RADEON_IS_IGP) {
222
223 if (rdev->family >= CHIP_PALM)
224 return true;
225
226 return false;
227 }
228
229 return true;
230}
231
232
233
234
235
236
237
238
239
240int radeon_irq_kms_init(struct radeon_device *rdev)
241{
242 int r = 0;
243
244 INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func);
245 INIT_WORK(&rdev->audio_work, r600_audio_update_hdmi);
246
247 spin_lock_init(&rdev->irq.lock);
248 r = drm_vblank_init(rdev->ddev, rdev->num_crtc);
249 if (r) {
250 return r;
251 }
252
253 rdev->msi_enabled = 0;
254
255 if (radeon_msi_ok(rdev)) {
256 int ret = pci_enable_msi(rdev->pdev);
257 if (!ret) {
258 rdev->msi_enabled = 1;
259 dev_info(rdev->dev, "radeon: using MSI.\n");
260 }
261 }
262 rdev->irq.installed = true;
263 r = drm_irq_install(rdev->ddev);
264 if (r) {
265 rdev->irq.installed = false;
266 return r;
267 }
268 DRM_INFO("radeon: irq initialized.\n");
269 return 0;
270}
271
272
273
274
275
276
277
278
279void radeon_irq_kms_fini(struct radeon_device *rdev)
280{
281 drm_vblank_cleanup(rdev->ddev);
282 if (rdev->irq.installed) {
283 drm_irq_uninstall(rdev->ddev);
284 rdev->irq.installed = false;
285 if (rdev->msi_enabled)
286 pci_disable_msi(rdev->pdev);
287 }
288 flush_work(&rdev->hotplug_work);
289}
290
291
292
293
294
295
296
297
298
299
300
301void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev, int ring)
302{
303 unsigned long irqflags;
304
305 if (!rdev->ddev->irq_enabled)
306 return;
307
308 if (atomic_inc_return(&rdev->irq.ring_int[ring]) == 1) {
309 spin_lock_irqsave(&rdev->irq.lock, irqflags);
310 radeon_irq_set(rdev);
311 spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
312 }
313}
314
315
316
317
318
319
320
321
322
323
324
325void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev, int ring)
326{
327 unsigned long irqflags;
328
329 if (!rdev->ddev->irq_enabled)
330 return;
331
332 if (atomic_dec_and_test(&rdev->irq.ring_int[ring])) {
333 spin_lock_irqsave(&rdev->irq.lock, irqflags);
334 radeon_irq_set(rdev);
335 spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
336 }
337}
338
339
340
341
342
343
344
345
346
347
348void radeon_irq_kms_pflip_irq_get(struct radeon_device *rdev, int crtc)
349{
350 unsigned long irqflags;
351
352 if (crtc < 0 || crtc >= rdev->num_crtc)
353 return;
354
355 if (!rdev->ddev->irq_enabled)
356 return;
357
358 if (atomic_inc_return(&rdev->irq.pflip[crtc]) == 1) {
359 spin_lock_irqsave(&rdev->irq.lock, irqflags);
360 radeon_irq_set(rdev);
361 spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
362 }
363}
364
365
366
367
368
369
370
371
372
373
374void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc)
375{
376 unsigned long irqflags;
377
378 if (crtc < 0 || crtc >= rdev->num_crtc)
379 return;
380
381 if (!rdev->ddev->irq_enabled)
382 return;
383
384 if (atomic_dec_and_test(&rdev->irq.pflip[crtc])) {
385 spin_lock_irqsave(&rdev->irq.lock, irqflags);
386 radeon_irq_set(rdev);
387 spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
388 }
389}
390
391
392
393
394
395
396
397
398
399void radeon_irq_kms_enable_afmt(struct radeon_device *rdev, int block)
400{
401 unsigned long irqflags;
402
403 if (!rdev->ddev->irq_enabled)
404 return;
405
406 spin_lock_irqsave(&rdev->irq.lock, irqflags);
407 rdev->irq.afmt[block] = true;
408 radeon_irq_set(rdev);
409 spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
410
411}
412
413
414
415
416
417
418
419
420
421void radeon_irq_kms_disable_afmt(struct radeon_device *rdev, int block)
422{
423 unsigned long irqflags;
424
425 if (!rdev->ddev->irq_enabled)
426 return;
427
428 spin_lock_irqsave(&rdev->irq.lock, irqflags);
429 rdev->irq.afmt[block] = false;
430 radeon_irq_set(rdev);
431 spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
432}
433
434
435
436
437
438
439
440
441
442void radeon_irq_kms_enable_hpd(struct radeon_device *rdev, unsigned hpd_mask)
443{
444 unsigned long irqflags;
445 int i;
446
447 if (!rdev->ddev->irq_enabled)
448 return;
449
450 spin_lock_irqsave(&rdev->irq.lock, irqflags);
451 for (i = 0; i < RADEON_MAX_HPD_PINS; ++i)
452 rdev->irq.hpd[i] |= !!(hpd_mask & (1 << i));
453 radeon_irq_set(rdev);
454 spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
455}
456
457
458
459
460
461
462
463
464
465void radeon_irq_kms_disable_hpd(struct radeon_device *rdev, unsigned hpd_mask)
466{
467 unsigned long irqflags;
468 int i;
469
470 if (!rdev->ddev->irq_enabled)
471 return;
472
473 spin_lock_irqsave(&rdev->irq.lock, irqflags);
474 for (i = 0; i < RADEON_MAX_HPD_PINS; ++i)
475 rdev->irq.hpd[i] &= !(hpd_mask & (1 << i));
476 radeon_irq_set(rdev);
477 spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
478}
479
480