1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28#include <drm/drmP.h>
29#include <drm/drm_crtc_helper.h>
30#include <drm/amdgpu_drm.h>
31#include "amdgpu.h"
32#include "amdgpu_ih.h"
33#include "atom.h"
34#include "amdgpu_connectors.h"
35
36#include <linux/pm_runtime.h>
37
38#define AMDGPU_WAIT_IDLE_TIMEOUT 200
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54static void amdgpu_hotplug_work_func(struct work_struct *work)
55{
56 struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
57 hotplug_work);
58 struct drm_device *dev = adev->ddev;
59 struct drm_mode_config *mode_config = &dev->mode_config;
60 struct drm_connector *connector;
61
62 mutex_lock(&mode_config->mutex);
63 if (mode_config->num_connector) {
64 list_for_each_entry(connector, &mode_config->connector_list, head)
65 amdgpu_connector_hotplug(connector);
66 }
67 mutex_unlock(&mode_config->mutex);
68
69 drm_helper_hpd_irq_event(dev);
70}
71
72
73
74
75
76
77
78
79
80
81static void amdgpu_irq_reset_work_func(struct work_struct *work)
82{
83 struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
84 reset_work);
85
86 amdgpu_gpu_reset(adev);
87}
88
89
90static void amdgpu_irq_disable_all(struct amdgpu_device *adev)
91{
92 unsigned long irqflags;
93 unsigned i, j;
94 int r;
95
96 spin_lock_irqsave(&adev->irq.lock, irqflags);
97 for (i = 0; i < AMDGPU_MAX_IRQ_SRC_ID; ++i) {
98 struct amdgpu_irq_src *src = adev->irq.sources[i];
99
100 if (!src || !src->funcs->set || !src->num_types)
101 continue;
102
103 for (j = 0; j < src->num_types; ++j) {
104 atomic_set(&src->enabled_types[j], 0);
105 r = src->funcs->set(adev, src, j,
106 AMDGPU_IRQ_STATE_DISABLE);
107 if (r)
108 DRM_ERROR("error disabling interrupt (%d)\n",
109 r);
110 }
111 }
112 spin_unlock_irqrestore(&adev->irq.lock, irqflags);
113}
114
115
116
117
118
119
120
121
122
123void amdgpu_irq_preinstall(struct drm_device *dev)
124{
125 struct amdgpu_device *adev = dev->dev_private;
126
127
128 amdgpu_irq_disable_all(adev);
129
130 amdgpu_ih_process(adev);
131}
132
133
134
135
136
137
138
139
140
141int amdgpu_irq_postinstall(struct drm_device *dev)
142{
143 dev->max_vblank_count = 0x001fffff;
144 return 0;
145}
146
147
148
149
150
151
152
153
154void amdgpu_irq_uninstall(struct drm_device *dev)
155{
156 struct amdgpu_device *adev = dev->dev_private;
157
158 if (adev == NULL) {
159 return;
160 }
161 amdgpu_irq_disable_all(adev);
162}
163
164
165
166
167
168
169
170
171irqreturn_t amdgpu_irq_handler(int irq, void *arg)
172{
173 struct drm_device *dev = (struct drm_device *) arg;
174 struct amdgpu_device *adev = dev->dev_private;
175 irqreturn_t ret;
176
177 ret = amdgpu_ih_process(adev);
178 if (ret == IRQ_HANDLED)
179 pm_runtime_mark_last_busy(dev->dev);
180 return ret;
181}
182
183
184
185
186
187
188
189
190
191
192
193static bool amdgpu_msi_ok(struct amdgpu_device *adev)
194{
195
196 if (amdgpu_msi == 1)
197 return true;
198 else if (amdgpu_msi == 0)
199 return false;
200
201 return true;
202}
203
204
205
206
207
208
209
210
211
212int amdgpu_irq_init(struct amdgpu_device *adev)
213{
214 int r = 0;
215
216 spin_lock_init(&adev->irq.lock);
217 r = drm_vblank_init(adev->ddev, adev->mode_info.num_crtc);
218 if (r) {
219 return r;
220 }
221
222 adev->irq.msi_enabled = false;
223
224 if (amdgpu_msi_ok(adev)) {
225 int ret = pci_enable_msi(adev->pdev);
226 if (!ret) {
227 adev->irq.msi_enabled = true;
228 dev_info(adev->dev, "amdgpu: using MSI.\n");
229 }
230 }
231
232 INIT_WORK(&adev->hotplug_work, amdgpu_hotplug_work_func);
233 INIT_WORK(&adev->reset_work, amdgpu_irq_reset_work_func);
234
235 adev->irq.installed = true;
236 r = drm_irq_install(adev->ddev, adev->ddev->pdev->irq);
237 if (r) {
238 adev->irq.installed = false;
239 flush_work(&adev->hotplug_work);
240 return r;
241 }
242
243 DRM_INFO("amdgpu: irq initialized.\n");
244 return 0;
245}
246
247
248
249
250
251
252
253
254void amdgpu_irq_fini(struct amdgpu_device *adev)
255{
256 unsigned i;
257
258 drm_vblank_cleanup(adev->ddev);
259 if (adev->irq.installed) {
260 drm_irq_uninstall(adev->ddev);
261 adev->irq.installed = false;
262 if (adev->irq.msi_enabled)
263 pci_disable_msi(adev->pdev);
264 flush_work(&adev->hotplug_work);
265 }
266
267 for (i = 0; i < AMDGPU_MAX_IRQ_SRC_ID; ++i) {
268 struct amdgpu_irq_src *src = adev->irq.sources[i];
269
270 if (!src)
271 continue;
272
273 kfree(src->enabled_types);
274 src->enabled_types = NULL;
275 }
276}
277
278
279
280
281
282
283
284
285
286int amdgpu_irq_add_id(struct amdgpu_device *adev, unsigned src_id,
287 struct amdgpu_irq_src *source)
288{
289 if (src_id >= AMDGPU_MAX_IRQ_SRC_ID)
290 return -EINVAL;
291
292 if (adev->irq.sources[src_id] != NULL)
293 return -EINVAL;
294
295 if (!source->funcs)
296 return -EINVAL;
297
298 if (source->num_types && !source->enabled_types) {
299 atomic_t *types;
300
301 types = kcalloc(source->num_types, sizeof(atomic_t),
302 GFP_KERNEL);
303 if (!types)
304 return -ENOMEM;
305
306 source->enabled_types = types;
307 }
308
309 adev->irq.sources[src_id] = source;
310 return 0;
311}
312
313
314
315
316
317
318
319
320
321void amdgpu_irq_dispatch(struct amdgpu_device *adev,
322 struct amdgpu_iv_entry *entry)
323{
324 unsigned src_id = entry->src_id;
325 struct amdgpu_irq_src *src;
326 int r;
327
328 if (src_id >= AMDGPU_MAX_IRQ_SRC_ID) {
329 DRM_DEBUG("Invalid src_id in IV: %d\n", src_id);
330 return;
331 }
332
333 src = adev->irq.sources[src_id];
334 if (!src) {
335 DRM_DEBUG("Unhandled interrupt src_id: %d\n", src_id);
336 return;
337 }
338
339 r = src->funcs->process(adev, src, entry);
340 if (r)
341 DRM_ERROR("error processing interrupt (%d)\n", r);
342}
343
344
345
346
347
348
349
350
351
352
353int amdgpu_irq_update(struct amdgpu_device *adev,
354 struct amdgpu_irq_src *src, unsigned type)
355{
356 unsigned long irqflags;
357 enum amdgpu_interrupt_state state;
358 int r;
359
360 spin_lock_irqsave(&adev->irq.lock, irqflags);
361
362
363
364 if (amdgpu_irq_enabled(adev, src, type))
365 state = AMDGPU_IRQ_STATE_ENABLE;
366 else
367 state = AMDGPU_IRQ_STATE_DISABLE;
368
369 r = src->funcs->set(adev, src, type, state);
370 spin_unlock_irqrestore(&adev->irq.lock, irqflags);
371 return r;
372}
373
374
375
376
377
378
379
380
381
382
383int amdgpu_irq_get(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
384 unsigned type)
385{
386 if (!adev->ddev->irq_enabled)
387 return -ENOENT;
388
389 if (type >= src->num_types)
390 return -EINVAL;
391
392 if (!src->enabled_types || !src->funcs->set)
393 return -EINVAL;
394
395 if (atomic_inc_return(&src->enabled_types[type]) == 1)
396 return amdgpu_irq_update(adev, src, type);
397
398 return 0;
399}
400
401bool amdgpu_irq_get_delayed(struct amdgpu_device *adev,
402 struct amdgpu_irq_src *src,
403 unsigned type)
404{
405 if ((type >= src->num_types) || !src->enabled_types)
406 return false;
407 return atomic_inc_return(&src->enabled_types[type]) == 1;
408}
409
410
411
412
413
414
415
416
417
418
419int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
420 unsigned type)
421{
422 if (!adev->ddev->irq_enabled)
423 return -ENOENT;
424
425 if (type >= src->num_types)
426 return -EINVAL;
427
428 if (!src->enabled_types || !src->funcs->set)
429 return -EINVAL;
430
431 if (atomic_dec_and_test(&src->enabled_types[type]))
432 return amdgpu_irq_update(adev, src, type);
433
434 return 0;
435}
436
437
438
439
440
441
442
443
444
445bool amdgpu_irq_enabled(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
446 unsigned type)
447{
448 if (!adev->ddev->irq_enabled)
449 return false;
450
451 if (type >= src->num_types)
452 return false;
453
454 if (!src->enabled_types || !src->funcs->set)
455 return false;
456
457 return !!atomic_read(&src->enabled_types[type]);
458}
459