1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28#include <linux/irq.h>
29#include <drm/drmP.h>
30#include <drm/drm_crtc_helper.h>
31#include <drm/amdgpu_drm.h>
32#include "amdgpu.h"
33#include "amdgpu_ih.h"
34#include "atom.h"
35#include "amdgpu_connectors.h"
36#include "amdgpu_trace.h"
37
38#include <linux/pm_runtime.h>
39
40#define AMDGPU_WAIT_IDLE_TIMEOUT 200
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56static void amdgpu_hotplug_work_func(struct work_struct *work)
57{
58 struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
59 hotplug_work);
60 struct drm_device *dev = adev->ddev;
61 struct drm_mode_config *mode_config = &dev->mode_config;
62 struct drm_connector *connector;
63
64 mutex_lock(&mode_config->mutex);
65 list_for_each_entry(connector, &mode_config->connector_list, head)
66 amdgpu_connector_hotplug(connector);
67 mutex_unlock(&mode_config->mutex);
68
69 drm_helper_hpd_irq_event(dev);
70}
71
72
73
74
75
76
77
78
79
80
81static void amdgpu_irq_reset_work_func(struct work_struct *work)
82{
83 struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
84 reset_work);
85
86 if (!amdgpu_sriov_vf(adev))
87 amdgpu_gpu_reset(adev);
88}
89
90
91static void amdgpu_irq_disable_all(struct amdgpu_device *adev)
92{
93 unsigned long irqflags;
94 unsigned i, j, k;
95 int r;
96
97 spin_lock_irqsave(&adev->irq.lock, irqflags);
98 for (i = 0; i < AMDGPU_IH_CLIENTID_MAX; ++i) {
99 if (!adev->irq.client[i].sources)
100 continue;
101
102 for (j = 0; j < AMDGPU_MAX_IRQ_SRC_ID; ++j) {
103 struct amdgpu_irq_src *src = adev->irq.client[i].sources[j];
104
105 if (!src || !src->funcs->set || !src->num_types)
106 continue;
107
108 for (k = 0; k < src->num_types; ++k) {
109 atomic_set(&src->enabled_types[k], 0);
110 r = src->funcs->set(adev, src, k,
111 AMDGPU_IRQ_STATE_DISABLE);
112 if (r)
113 DRM_ERROR("error disabling interrupt (%d)\n",
114 r);
115 }
116 }
117 }
118 spin_unlock_irqrestore(&adev->irq.lock, irqflags);
119}
120
121
122
123
124
125
126
127
128
129void amdgpu_irq_preinstall(struct drm_device *dev)
130{
131 struct amdgpu_device *adev = dev->dev_private;
132
133
134 amdgpu_irq_disable_all(adev);
135
136 amdgpu_ih_process(adev);
137}
138
139
140
141
142
143
144
145
146
147int amdgpu_irq_postinstall(struct drm_device *dev)
148{
149 dev->max_vblank_count = 0x00ffffff;
150 return 0;
151}
152
153
154
155
156
157
158
159
160void amdgpu_irq_uninstall(struct drm_device *dev)
161{
162 struct amdgpu_device *adev = dev->dev_private;
163
164 if (adev == NULL) {
165 return;
166 }
167 amdgpu_irq_disable_all(adev);
168}
169
170
171
172
173
174
175
176
177irqreturn_t amdgpu_irq_handler(int irq, void *arg)
178{
179 struct drm_device *dev = (struct drm_device *) arg;
180 struct amdgpu_device *adev = dev->dev_private;
181 irqreturn_t ret;
182
183 ret = amdgpu_ih_process(adev);
184 if (ret == IRQ_HANDLED)
185 pm_runtime_mark_last_busy(dev->dev);
186 return ret;
187}
188
189
190
191
192
193
194
195
196
197
198
199static bool amdgpu_msi_ok(struct amdgpu_device *adev)
200{
201
202 if (amdgpu_msi == 1)
203 return true;
204 else if (amdgpu_msi == 0)
205 return false;
206
207 return true;
208}
209
210
211
212
213
214
215
216
217
218int amdgpu_irq_init(struct amdgpu_device *adev)
219{
220 int r = 0;
221
222 spin_lock_init(&adev->irq.lock);
223 r = drm_vblank_init(adev->ddev, adev->mode_info.num_crtc);
224 if (r) {
225 return r;
226 }
227
228
229 adev->irq.msi_enabled = false;
230
231 if (amdgpu_msi_ok(adev)) {
232 int ret = pci_enable_msi(adev->pdev);
233 if (!ret) {
234 adev->irq.msi_enabled = true;
235 dev_info(adev->dev, "amdgpu: using MSI.\n");
236 }
237 }
238
239 INIT_WORK(&adev->hotplug_work, amdgpu_hotplug_work_func);
240 INIT_WORK(&adev->reset_work, amdgpu_irq_reset_work_func);
241
242 adev->irq.installed = true;
243 r = drm_irq_install(adev->ddev, adev->ddev->pdev->irq);
244 if (r) {
245 adev->irq.installed = false;
246 flush_work(&adev->hotplug_work);
247 cancel_work_sync(&adev->reset_work);
248 return r;
249 }
250
251 DRM_INFO("amdgpu: irq initialized.\n");
252 return 0;
253}
254
255
256
257
258
259
260
261
262void amdgpu_irq_fini(struct amdgpu_device *adev)
263{
264 unsigned i, j;
265
266 drm_vblank_cleanup(adev->ddev);
267 if (adev->irq.installed) {
268 drm_irq_uninstall(adev->ddev);
269 adev->irq.installed = false;
270 if (adev->irq.msi_enabled)
271 pci_disable_msi(adev->pdev);
272 flush_work(&adev->hotplug_work);
273 cancel_work_sync(&adev->reset_work);
274 }
275
276 for (i = 0; i < AMDGPU_IH_CLIENTID_MAX; ++i) {
277 if (!adev->irq.client[i].sources)
278 continue;
279
280 for (j = 0; j < AMDGPU_MAX_IRQ_SRC_ID; ++j) {
281 struct amdgpu_irq_src *src = adev->irq.client[i].sources[j];
282
283 if (!src)
284 continue;
285
286 kfree(src->enabled_types);
287 src->enabled_types = NULL;
288 if (src->data) {
289 kfree(src->data);
290 kfree(src);
291 adev->irq.client[i].sources[j] = NULL;
292 }
293 }
294 kfree(adev->irq.client[i].sources);
295 }
296}
297
298
299
300
301
302
303
304
305
306int amdgpu_irq_add_id(struct amdgpu_device *adev,
307 unsigned client_id, unsigned src_id,
308 struct amdgpu_irq_src *source)
309{
310 if (client_id >= AMDGPU_IH_CLIENTID_MAX)
311 return -EINVAL;
312
313 if (src_id >= AMDGPU_MAX_IRQ_SRC_ID)
314 return -EINVAL;
315
316 if (!source->funcs)
317 return -EINVAL;
318
319 if (!adev->irq.client[client_id].sources) {
320 adev->irq.client[client_id].sources =
321 kcalloc(AMDGPU_MAX_IRQ_SRC_ID,
322 sizeof(struct amdgpu_irq_src *),
323 GFP_KERNEL);
324 if (!adev->irq.client[client_id].sources)
325 return -ENOMEM;
326 }
327
328 if (adev->irq.client[client_id].sources[src_id] != NULL)
329 return -EINVAL;
330
331 if (source->num_types && !source->enabled_types) {
332 atomic_t *types;
333
334 types = kcalloc(source->num_types, sizeof(atomic_t),
335 GFP_KERNEL);
336 if (!types)
337 return -ENOMEM;
338
339 source->enabled_types = types;
340 }
341
342 adev->irq.client[client_id].sources[src_id] = source;
343 return 0;
344}
345
346
347
348
349
350
351
352
353
354void amdgpu_irq_dispatch(struct amdgpu_device *adev,
355 struct amdgpu_iv_entry *entry)
356{
357 unsigned client_id = entry->client_id;
358 unsigned src_id = entry->src_id;
359 struct amdgpu_irq_src *src;
360 int r;
361
362 trace_amdgpu_iv(entry);
363
364 if (client_id >= AMDGPU_IH_CLIENTID_MAX) {
365 DRM_DEBUG("Invalid client_id in IV: %d\n", client_id);
366 return;
367 }
368
369 if (src_id >= AMDGPU_MAX_IRQ_SRC_ID) {
370 DRM_DEBUG("Invalid src_id in IV: %d\n", src_id);
371 return;
372 }
373
374 if (adev->irq.virq[src_id]) {
375 generic_handle_irq(irq_find_mapping(adev->irq.domain, src_id));
376 } else {
377 if (!adev->irq.client[client_id].sources) {
378 DRM_DEBUG("Unregistered interrupt client_id: %d src_id: %d\n",
379 client_id, src_id);
380 return;
381 }
382
383 src = adev->irq.client[client_id].sources[src_id];
384 if (!src) {
385 DRM_DEBUG("Unhandled interrupt src_id: %d\n", src_id);
386 return;
387 }
388
389 r = src->funcs->process(adev, src, entry);
390 if (r)
391 DRM_ERROR("error processing interrupt (%d)\n", r);
392 }
393}
394
395
396
397
398
399
400
401
402
403
404int amdgpu_irq_update(struct amdgpu_device *adev,
405 struct amdgpu_irq_src *src, unsigned type)
406{
407 unsigned long irqflags;
408 enum amdgpu_interrupt_state state;
409 int r;
410
411 spin_lock_irqsave(&adev->irq.lock, irqflags);
412
413
414
415 if (amdgpu_irq_enabled(adev, src, type))
416 state = AMDGPU_IRQ_STATE_ENABLE;
417 else
418 state = AMDGPU_IRQ_STATE_DISABLE;
419
420 r = src->funcs->set(adev, src, type, state);
421 spin_unlock_irqrestore(&adev->irq.lock, irqflags);
422 return r;
423}
424
425void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev)
426{
427 int i, j, k;
428
429 for (i = 0; i < AMDGPU_IH_CLIENTID_MAX; ++i) {
430 if (!adev->irq.client[i].sources)
431 continue;
432
433 for (j = 0; j < AMDGPU_MAX_IRQ_SRC_ID; ++j) {
434 struct amdgpu_irq_src *src = adev->irq.client[i].sources[j];
435
436 if (!src)
437 continue;
438 for (k = 0; k < src->num_types; k++)
439 amdgpu_irq_update(adev, src, k);
440 }
441 }
442}
443
444
445
446
447
448
449
450
451
452
453int amdgpu_irq_get(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
454 unsigned type)
455{
456 if (!adev->ddev->irq_enabled)
457 return -ENOENT;
458
459 if (type >= src->num_types)
460 return -EINVAL;
461
462 if (!src->enabled_types || !src->funcs->set)
463 return -EINVAL;
464
465 if (atomic_inc_return(&src->enabled_types[type]) == 1)
466 return amdgpu_irq_update(adev, src, type);
467
468 return 0;
469}
470
471
472
473
474
475
476
477
478
479
480int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
481 unsigned type)
482{
483 if (!adev->ddev->irq_enabled)
484 return -ENOENT;
485
486 if (type >= src->num_types)
487 return -EINVAL;
488
489 if (!src->enabled_types || !src->funcs->set)
490 return -EINVAL;
491
492 if (atomic_dec_and_test(&src->enabled_types[type]))
493 return amdgpu_irq_update(adev, src, type);
494
495 return 0;
496}
497
498
499
500
501
502
503
504
505
506bool amdgpu_irq_enabled(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
507 unsigned type)
508{
509 if (!adev->ddev->irq_enabled)
510 return false;
511
512 if (type >= src->num_types)
513 return false;
514
515 if (!src->enabled_types || !src->funcs->set)
516 return false;
517
518 return !!atomic_read(&src->enabled_types[type]);
519}
520
521
522static void amdgpu_irq_mask(struct irq_data *irqd)
523{
524
525}
526
527static void amdgpu_irq_unmask(struct irq_data *irqd)
528{
529
530}
531
532static struct irq_chip amdgpu_irq_chip = {
533 .name = "amdgpu-ih",
534 .irq_mask = amdgpu_irq_mask,
535 .irq_unmask = amdgpu_irq_unmask,
536};
537
538static int amdgpu_irqdomain_map(struct irq_domain *d,
539 unsigned int irq, irq_hw_number_t hwirq)
540{
541 if (hwirq >= AMDGPU_MAX_IRQ_SRC_ID)
542 return -EPERM;
543
544 irq_set_chip_and_handler(irq,
545 &amdgpu_irq_chip, handle_simple_irq);
546 return 0;
547}
548
549static const struct irq_domain_ops amdgpu_hw_irqdomain_ops = {
550 .map = amdgpu_irqdomain_map,
551};
552
553
554
555
556
557
558
559
560
561int amdgpu_irq_add_domain(struct amdgpu_device *adev)
562{
563 adev->irq.domain = irq_domain_add_linear(NULL, AMDGPU_MAX_IRQ_SRC_ID,
564 &amdgpu_hw_irqdomain_ops, adev);
565 if (!adev->irq.domain) {
566 DRM_ERROR("GPU irq add domain failed\n");
567 return -ENODEV;
568 }
569
570 return 0;
571}
572
573
574
575
576
577
578
579
580
581void amdgpu_irq_remove_domain(struct amdgpu_device *adev)
582{
583 if (adev->irq.domain) {
584 irq_domain_remove(adev->irq.domain);
585 adev->irq.domain = NULL;
586 }
587}
588
589
590
591
592
593
594
595
596
597
598
599
600
601unsigned amdgpu_irq_create_mapping(struct amdgpu_device *adev, unsigned src_id)
602{
603 adev->irq.virq[src_id] = irq_create_mapping(adev->irq.domain, src_id);
604
605 return adev->irq.virq[src_id];
606}
607