1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28#include <linux/irq.h>
29#include <drm/drmP.h>
30#include <drm/drm_crtc_helper.h>
31#include <drm/amdgpu_drm.h>
32#include "amdgpu.h"
33#include "amdgpu_ih.h"
34#include "atom.h"
35#include "amdgpu_connectors.h"
36
37#include <linux/pm_runtime.h>
38
39#define AMDGPU_WAIT_IDLE_TIMEOUT 200
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55static void amdgpu_hotplug_work_func(struct work_struct *work)
56{
57 struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
58 hotplug_work);
59 struct drm_device *dev = adev->ddev;
60 struct drm_mode_config *mode_config = &dev->mode_config;
61 struct drm_connector *connector;
62
63 mutex_lock(&mode_config->mutex);
64 if (mode_config->num_connector) {
65 list_for_each_entry(connector, &mode_config->connector_list, head)
66 amdgpu_connector_hotplug(connector);
67 }
68 mutex_unlock(&mode_config->mutex);
69
70 drm_helper_hpd_irq_event(dev);
71}
72
73
74
75
76
77
78
79
80
81
82static void amdgpu_irq_reset_work_func(struct work_struct *work)
83{
84 struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
85 reset_work);
86
87 amdgpu_gpu_reset(adev);
88}
89
90
91static void amdgpu_irq_disable_all(struct amdgpu_device *adev)
92{
93 unsigned long irqflags;
94 unsigned i, j;
95 int r;
96
97 spin_lock_irqsave(&adev->irq.lock, irqflags);
98 for (i = 0; i < AMDGPU_MAX_IRQ_SRC_ID; ++i) {
99 struct amdgpu_irq_src *src = adev->irq.sources[i];
100
101 if (!src || !src->funcs->set || !src->num_types)
102 continue;
103
104 for (j = 0; j < src->num_types; ++j) {
105 atomic_set(&src->enabled_types[j], 0);
106 r = src->funcs->set(adev, src, j,
107 AMDGPU_IRQ_STATE_DISABLE);
108 if (r)
109 DRM_ERROR("error disabling interrupt (%d)\n",
110 r);
111 }
112 }
113 spin_unlock_irqrestore(&adev->irq.lock, irqflags);
114}
115
116
117
118
119
120
121
122
123
124void amdgpu_irq_preinstall(struct drm_device *dev)
125{
126 struct amdgpu_device *adev = dev->dev_private;
127
128
129 amdgpu_irq_disable_all(adev);
130
131 amdgpu_ih_process(adev);
132}
133
134
135
136
137
138
139
140
141
142int amdgpu_irq_postinstall(struct drm_device *dev)
143{
144 dev->max_vblank_count = 0x00ffffff;
145 return 0;
146}
147
148
149
150
151
152
153
154
155void amdgpu_irq_uninstall(struct drm_device *dev)
156{
157 struct amdgpu_device *adev = dev->dev_private;
158
159 if (adev == NULL) {
160 return;
161 }
162 amdgpu_irq_disable_all(adev);
163}
164
165
166
167
168
169
170
171
172irqreturn_t amdgpu_irq_handler(int irq, void *arg)
173{
174 struct drm_device *dev = (struct drm_device *) arg;
175 struct amdgpu_device *adev = dev->dev_private;
176 irqreturn_t ret;
177
178 ret = amdgpu_ih_process(adev);
179 if (ret == IRQ_HANDLED)
180 pm_runtime_mark_last_busy(dev->dev);
181 return ret;
182}
183
184
185
186
187
188
189
190
191
192
193
194static bool amdgpu_msi_ok(struct amdgpu_device *adev)
195{
196
197 if (amdgpu_msi == 1)
198 return true;
199 else if (amdgpu_msi == 0)
200 return false;
201
202 return true;
203}
204
205
206
207
208
209
210
211
212
213int amdgpu_irq_init(struct amdgpu_device *adev)
214{
215 int r = 0;
216
217 spin_lock_init(&adev->irq.lock);
218 r = drm_vblank_init(adev->ddev, adev->mode_info.num_crtc);
219 if (r) {
220 return r;
221 }
222
223
224 adev->irq.msi_enabled = false;
225
226 if (amdgpu_msi_ok(adev)) {
227 int ret = pci_enable_msi(adev->pdev);
228 if (!ret) {
229 adev->irq.msi_enabled = true;
230 dev_info(adev->dev, "amdgpu: using MSI.\n");
231 }
232 }
233
234 INIT_WORK(&adev->hotplug_work, amdgpu_hotplug_work_func);
235 INIT_WORK(&adev->reset_work, amdgpu_irq_reset_work_func);
236
237 adev->irq.installed = true;
238 r = drm_irq_install(adev->ddev, adev->ddev->pdev->irq);
239 if (r) {
240 adev->irq.installed = false;
241 flush_work(&adev->hotplug_work);
242 return r;
243 }
244
245 DRM_INFO("amdgpu: irq initialized.\n");
246 return 0;
247}
248
249
250
251
252
253
254
255
256void amdgpu_irq_fini(struct amdgpu_device *adev)
257{
258 unsigned i;
259
260 drm_vblank_cleanup(adev->ddev);
261 if (adev->irq.installed) {
262 drm_irq_uninstall(adev->ddev);
263 adev->irq.installed = false;
264 if (adev->irq.msi_enabled)
265 pci_disable_msi(adev->pdev);
266 flush_work(&adev->hotplug_work);
267 }
268
269 for (i = 0; i < AMDGPU_MAX_IRQ_SRC_ID; ++i) {
270 struct amdgpu_irq_src *src = adev->irq.sources[i];
271
272 if (!src)
273 continue;
274
275 kfree(src->enabled_types);
276 src->enabled_types = NULL;
277 if (src->data) {
278 kfree(src->data);
279 kfree(src);
280 adev->irq.sources[i] = NULL;
281 }
282 }
283}
284
285
286
287
288
289
290
291
292
293int amdgpu_irq_add_id(struct amdgpu_device *adev, unsigned src_id,
294 struct amdgpu_irq_src *source)
295{
296 if (src_id >= AMDGPU_MAX_IRQ_SRC_ID)
297 return -EINVAL;
298
299 if (adev->irq.sources[src_id] != NULL)
300 return -EINVAL;
301
302 if (!source->funcs)
303 return -EINVAL;
304
305 if (source->num_types && !source->enabled_types) {
306 atomic_t *types;
307
308 types = kcalloc(source->num_types, sizeof(atomic_t),
309 GFP_KERNEL);
310 if (!types)
311 return -ENOMEM;
312
313 source->enabled_types = types;
314 }
315
316 adev->irq.sources[src_id] = source;
317
318 return 0;
319}
320
321
322
323
324
325
326
327
328
329void amdgpu_irq_dispatch(struct amdgpu_device *adev,
330 struct amdgpu_iv_entry *entry)
331{
332 unsigned src_id = entry->src_id;
333 struct amdgpu_irq_src *src;
334 int r;
335
336 if (src_id >= AMDGPU_MAX_IRQ_SRC_ID) {
337 DRM_DEBUG("Invalid src_id in IV: %d\n", src_id);
338 return;
339 }
340
341 if (adev->irq.virq[src_id]) {
342 generic_handle_irq(irq_find_mapping(adev->irq.domain, src_id));
343 } else {
344 src = adev->irq.sources[src_id];
345 if (!src) {
346 DRM_DEBUG("Unhandled interrupt src_id: %d\n", src_id);
347 return;
348 }
349
350 r = src->funcs->process(adev, src, entry);
351 if (r)
352 DRM_ERROR("error processing interrupt (%d)\n", r);
353 }
354}
355
356
357
358
359
360
361
362
363
364
365int amdgpu_irq_update(struct amdgpu_device *adev,
366 struct amdgpu_irq_src *src, unsigned type)
367{
368 unsigned long irqflags;
369 enum amdgpu_interrupt_state state;
370 int r;
371
372 spin_lock_irqsave(&adev->irq.lock, irqflags);
373
374
375
376 if (amdgpu_irq_enabled(adev, src, type))
377 state = AMDGPU_IRQ_STATE_ENABLE;
378 else
379 state = AMDGPU_IRQ_STATE_DISABLE;
380
381 r = src->funcs->set(adev, src, type, state);
382 spin_unlock_irqrestore(&adev->irq.lock, irqflags);
383 return r;
384}
385
386
387
388
389
390
391
392
393
394
395int amdgpu_irq_get(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
396 unsigned type)
397{
398 if (!adev->ddev->irq_enabled)
399 return -ENOENT;
400
401 if (type >= src->num_types)
402 return -EINVAL;
403
404 if (!src->enabled_types || !src->funcs->set)
405 return -EINVAL;
406
407 if (atomic_inc_return(&src->enabled_types[type]) == 1)
408 return amdgpu_irq_update(adev, src, type);
409
410 return 0;
411}
412
413bool amdgpu_irq_get_delayed(struct amdgpu_device *adev,
414 struct amdgpu_irq_src *src,
415 unsigned type)
416{
417 if ((type >= src->num_types) || !src->enabled_types)
418 return false;
419 return atomic_inc_return(&src->enabled_types[type]) == 1;
420}
421
422
423
424
425
426
427
428
429
430
431int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
432 unsigned type)
433{
434 if (!adev->ddev->irq_enabled)
435 return -ENOENT;
436
437 if (type >= src->num_types)
438 return -EINVAL;
439
440 if (!src->enabled_types || !src->funcs->set)
441 return -EINVAL;
442
443 if (atomic_dec_and_test(&src->enabled_types[type]))
444 return amdgpu_irq_update(adev, src, type);
445
446 return 0;
447}
448
449
450
451
452
453
454
455
456
457bool amdgpu_irq_enabled(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
458 unsigned type)
459{
460 if (!adev->ddev->irq_enabled)
461 return false;
462
463 if (type >= src->num_types)
464 return false;
465
466 if (!src->enabled_types || !src->funcs->set)
467 return false;
468
469 return !!atomic_read(&src->enabled_types[type]);
470}
471
472
473static void amdgpu_irq_mask(struct irq_data *irqd)
474{
475
476}
477
478static void amdgpu_irq_unmask(struct irq_data *irqd)
479{
480
481}
482
483static struct irq_chip amdgpu_irq_chip = {
484 .name = "amdgpu-ih",
485 .irq_mask = amdgpu_irq_mask,
486 .irq_unmask = amdgpu_irq_unmask,
487};
488
489static int amdgpu_irqdomain_map(struct irq_domain *d,
490 unsigned int irq, irq_hw_number_t hwirq)
491{
492 if (hwirq >= AMDGPU_MAX_IRQ_SRC_ID)
493 return -EPERM;
494
495 irq_set_chip_and_handler(irq,
496 &amdgpu_irq_chip, handle_simple_irq);
497 return 0;
498}
499
500static const struct irq_domain_ops amdgpu_hw_irqdomain_ops = {
501 .map = amdgpu_irqdomain_map,
502};
503
504
505
506
507
508
509
510
511
512int amdgpu_irq_add_domain(struct amdgpu_device *adev)
513{
514 adev->irq.domain = irq_domain_add_linear(NULL, AMDGPU_MAX_IRQ_SRC_ID,
515 &amdgpu_hw_irqdomain_ops, adev);
516 if (!adev->irq.domain) {
517 DRM_ERROR("GPU irq add domain failed\n");
518 return -ENODEV;
519 }
520
521 return 0;
522}
523
524
525
526
527
528
529
530
531
532void amdgpu_irq_remove_domain(struct amdgpu_device *adev)
533{
534 if (adev->irq.domain) {
535 irq_domain_remove(adev->irq.domain);
536 adev->irq.domain = NULL;
537 }
538}
539
540
541
542
543
544
545
546
547
548
549
550
551
552unsigned amdgpu_irq_create_mapping(struct amdgpu_device *adev, unsigned src_id)
553{
554 adev->irq.virq[src_id] = irq_create_mapping(adev->irq.domain, src_id);
555
556 return adev->irq.virq[src_id];
557}
558