1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28#include <drm/drmP.h>
29#include <drm/drm_crtc_helper.h>
30#include <drm/radeon_drm.h>
31#include "radeon_reg.h"
32#include "radeon.h"
33#include "atom.h"
34
35#include <linux/pm_runtime.h>
36
37#define RADEON_WAIT_IDLE_TIMEOUT 200
38
39
40
41
42
43
44
45
46
47
48irqreturn_t radeon_driver_irq_handler_kms(int irq, void *arg)
49{
50 struct drm_device *dev = (struct drm_device *) arg;
51 struct radeon_device *rdev = dev->dev_private;
52 irqreturn_t ret;
53
54 ret = radeon_irq_process(rdev);
55 if (ret == IRQ_HANDLED)
56 pm_runtime_mark_last_busy(dev->dev);
57 return ret;
58}
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74static void radeon_hotplug_work_func(struct work_struct *work)
75{
76 struct radeon_device *rdev = container_of(work, struct radeon_device,
77 hotplug_work);
78 struct drm_device *dev = rdev->ddev;
79 struct drm_mode_config *mode_config = &dev->mode_config;
80 struct drm_connector *connector;
81
82 if (mode_config->num_connector) {
83 list_for_each_entry(connector, &mode_config->connector_list, head)
84 radeon_connector_hotplug(connector);
85 }
86
87 drm_helper_hpd_irq_event(dev);
88}
89
90
91
92
93
94
95
96
97
98
99static void radeon_irq_reset_work_func(struct work_struct *work)
100{
101 struct radeon_device *rdev = container_of(work, struct radeon_device,
102 reset_work);
103
104 radeon_gpu_reset(rdev);
105}
106
107
108
109
110
111
112
113
114
115void radeon_driver_irq_preinstall_kms(struct drm_device *dev)
116{
117 struct radeon_device *rdev = dev->dev_private;
118 unsigned long irqflags;
119 unsigned i;
120
121 spin_lock_irqsave(&rdev->irq.lock, irqflags);
122
123 for (i = 0; i < RADEON_NUM_RINGS; i++)
124 atomic_set(&rdev->irq.ring_int[i], 0);
125 rdev->irq.dpm_thermal = false;
126 for (i = 0; i < RADEON_MAX_HPD_PINS; i++)
127 rdev->irq.hpd[i] = false;
128 for (i = 0; i < RADEON_MAX_CRTCS; i++) {
129 rdev->irq.crtc_vblank_int[i] = false;
130 atomic_set(&rdev->irq.pflip[i], 0);
131 rdev->irq.afmt[i] = false;
132 }
133 radeon_irq_set(rdev);
134 spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
135
136 radeon_irq_process(rdev);
137}
138
139
140
141
142
143
144
145
146
147int radeon_driver_irq_postinstall_kms(struct drm_device *dev)
148{
149 dev->max_vblank_count = 0x001fffff;
150 return 0;
151}
152
153
154
155
156
157
158
159
160void radeon_driver_irq_uninstall_kms(struct drm_device *dev)
161{
162 struct radeon_device *rdev = dev->dev_private;
163 unsigned long irqflags;
164 unsigned i;
165
166 if (rdev == NULL) {
167 return;
168 }
169 spin_lock_irqsave(&rdev->irq.lock, irqflags);
170
171 for (i = 0; i < RADEON_NUM_RINGS; i++)
172 atomic_set(&rdev->irq.ring_int[i], 0);
173 rdev->irq.dpm_thermal = false;
174 for (i = 0; i < RADEON_MAX_HPD_PINS; i++)
175 rdev->irq.hpd[i] = false;
176 for (i = 0; i < RADEON_MAX_CRTCS; i++) {
177 rdev->irq.crtc_vblank_int[i] = false;
178 atomic_set(&rdev->irq.pflip[i], 0);
179 rdev->irq.afmt[i] = false;
180 }
181 radeon_irq_set(rdev);
182 spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
183}
184
185
186
187
188
189
190
191
192
193
194
195static bool radeon_msi_ok(struct radeon_device *rdev)
196{
197
198 if (rdev->family < CHIP_RV380)
199 return false;
200
201
202 if (rdev->flags & RADEON_IS_AGP)
203 return false;
204
205
206 if (radeon_msi == 1)
207 return true;
208 else if (radeon_msi == 0)
209 return false;
210
211
212
213 if ((rdev->pdev->device == 0x791f) &&
214 (rdev->pdev->subsystem_vendor == 0x103c) &&
215 (rdev->pdev->subsystem_device == 0x30c2))
216 return true;
217
218
219 if ((rdev->pdev->device == 0x791f) &&
220 (rdev->pdev->subsystem_vendor == 0x1028) &&
221 (rdev->pdev->subsystem_device == 0x01fc))
222 return true;
223
224
225 if ((rdev->pdev->device == 0x791f) &&
226 (rdev->pdev->subsystem_vendor == 0x1028) &&
227 (rdev->pdev->subsystem_device == 0x01fd))
228 return true;
229
230
231 if ((rdev->pdev->device == 0x791f) &&
232 (rdev->pdev->subsystem_vendor == 0x107b) &&
233 (rdev->pdev->subsystem_device == 0x0185))
234 return true;
235
236
237 if (rdev->family == CHIP_RS690)
238 return true;
239
240
241
242
243
244 if (rdev->family == CHIP_RV515)
245 return false;
246 if (rdev->flags & RADEON_IS_IGP) {
247
248 if (rdev->family >= CHIP_PALM)
249 return true;
250
251 return false;
252 }
253
254 return true;
255}
256
257
258
259
260
261
262
263
264
265int radeon_irq_kms_init(struct radeon_device *rdev)
266{
267 int r = 0;
268
269 spin_lock_init(&rdev->irq.lock);
270 r = drm_vblank_init(rdev->ddev, rdev->num_crtc);
271 if (r) {
272 return r;
273 }
274
275 rdev->msi_enabled = 0;
276
277 if (radeon_msi_ok(rdev)) {
278 int ret = pci_enable_msi(rdev->pdev);
279 if (!ret) {
280 rdev->msi_enabled = 1;
281 dev_info(rdev->dev, "radeon: using MSI.\n");
282 }
283 }
284
285 INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func);
286 INIT_WORK(&rdev->audio_work, r600_audio_update_hdmi);
287 INIT_WORK(&rdev->reset_work, radeon_irq_reset_work_func);
288
289 rdev->irq.installed = true;
290 r = drm_irq_install(rdev->ddev, rdev->ddev->pdev->irq);
291 if (r) {
292 rdev->irq.installed = false;
293 flush_work(&rdev->hotplug_work);
294 return r;
295 }
296
297 DRM_INFO("radeon: irq initialized.\n");
298 return 0;
299}
300
301
302
303
304
305
306
307
308void radeon_irq_kms_fini(struct radeon_device *rdev)
309{
310 drm_vblank_cleanup(rdev->ddev);
311 if (rdev->irq.installed) {
312 drm_irq_uninstall(rdev->ddev);
313 rdev->irq.installed = false;
314 if (rdev->msi_enabled)
315 pci_disable_msi(rdev->pdev);
316 flush_work(&rdev->hotplug_work);
317 }
318}
319
320
321
322
323
324
325
326
327
328
329
330void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev, int ring)
331{
332 unsigned long irqflags;
333
334 if (!rdev->ddev->irq_enabled)
335 return;
336
337 if (atomic_inc_return(&rdev->irq.ring_int[ring]) == 1) {
338 spin_lock_irqsave(&rdev->irq.lock, irqflags);
339 radeon_irq_set(rdev);
340 spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
341 }
342}
343
344
345
346
347
348
349
350
351
352
353
354void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev, int ring)
355{
356 unsigned long irqflags;
357
358 if (!rdev->ddev->irq_enabled)
359 return;
360
361 if (atomic_dec_and_test(&rdev->irq.ring_int[ring])) {
362 spin_lock_irqsave(&rdev->irq.lock, irqflags);
363 radeon_irq_set(rdev);
364 spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
365 }
366}
367
368
369
370
371
372
373
374
375
376
377void radeon_irq_kms_pflip_irq_get(struct radeon_device *rdev, int crtc)
378{
379 unsigned long irqflags;
380
381 if (crtc < 0 || crtc >= rdev->num_crtc)
382 return;
383
384 if (!rdev->ddev->irq_enabled)
385 return;
386
387 if (atomic_inc_return(&rdev->irq.pflip[crtc]) == 1) {
388 spin_lock_irqsave(&rdev->irq.lock, irqflags);
389 radeon_irq_set(rdev);
390 spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
391 }
392}
393
394
395
396
397
398
399
400
401
402
403void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc)
404{
405 unsigned long irqflags;
406
407 if (crtc < 0 || crtc >= rdev->num_crtc)
408 return;
409
410 if (!rdev->ddev->irq_enabled)
411 return;
412
413 if (atomic_dec_and_test(&rdev->irq.pflip[crtc])) {
414 spin_lock_irqsave(&rdev->irq.lock, irqflags);
415 radeon_irq_set(rdev);
416 spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
417 }
418}
419
420
421
422
423
424
425
426
427
428void radeon_irq_kms_enable_afmt(struct radeon_device *rdev, int block)
429{
430 unsigned long irqflags;
431
432 if (!rdev->ddev->irq_enabled)
433 return;
434
435 spin_lock_irqsave(&rdev->irq.lock, irqflags);
436 rdev->irq.afmt[block] = true;
437 radeon_irq_set(rdev);
438 spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
439
440}
441
442
443
444
445
446
447
448
449
450void radeon_irq_kms_disable_afmt(struct radeon_device *rdev, int block)
451{
452 unsigned long irqflags;
453
454 if (!rdev->ddev->irq_enabled)
455 return;
456
457 spin_lock_irqsave(&rdev->irq.lock, irqflags);
458 rdev->irq.afmt[block] = false;
459 radeon_irq_set(rdev);
460 spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
461}
462
463
464
465
466
467
468
469
470
471void radeon_irq_kms_enable_hpd(struct radeon_device *rdev, unsigned hpd_mask)
472{
473 unsigned long irqflags;
474 int i;
475
476 if (!rdev->ddev->irq_enabled)
477 return;
478
479 spin_lock_irqsave(&rdev->irq.lock, irqflags);
480 for (i = 0; i < RADEON_MAX_HPD_PINS; ++i)
481 rdev->irq.hpd[i] |= !!(hpd_mask & (1 << i));
482 radeon_irq_set(rdev);
483 spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
484}
485
486
487
488
489
490
491
492
493
494void radeon_irq_kms_disable_hpd(struct radeon_device *rdev, unsigned hpd_mask)
495{
496 unsigned long irqflags;
497 int i;
498
499 if (!rdev->ddev->irq_enabled)
500 return;
501
502 spin_lock_irqsave(&rdev->irq.lock, irqflags);
503 for (i = 0; i < RADEON_MAX_HPD_PINS; ++i)
504 rdev->irq.hpd[i] &= !(hpd_mask & (1 << i));
505 radeon_irq_set(rdev);
506 spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
507}
508
509