1
2
3
4
5
6
7
8
9#include <linux/pci.h>
10#include <linux/interrupt.h>
11
12#include "../common/mic_dev.h"
13#include "mic_device.h"
14
15static irqreturn_t mic_thread_fn(int irq, void *dev)
16{
17 struct mic_device *mdev = dev;
18 struct mic_intr_info *intr_info = mdev->intr_info;
19 struct mic_irq_info *irq_info = &mdev->irq_info;
20 struct mic_intr_cb *intr_cb;
21 struct pci_dev *pdev = mdev->pdev;
22 int i;
23
24 spin_lock(&irq_info->mic_thread_lock);
25 for (i = intr_info->intr_start_idx[MIC_INTR_DB];
26 i < intr_info->intr_len[MIC_INTR_DB]; i++)
27 if (test_and_clear_bit(i, &irq_info->mask)) {
28 list_for_each_entry(intr_cb, &irq_info->cb_list[i],
29 list)
30 if (intr_cb->thread_fn)
31 intr_cb->thread_fn(pdev->irq,
32 intr_cb->data);
33 }
34 spin_unlock(&irq_info->mic_thread_lock);
35 return IRQ_HANDLED;
36}
37
38
39
40
41
42
43static irqreturn_t mic_interrupt(int irq, void *dev)
44{
45 struct mic_device *mdev = dev;
46 struct mic_intr_info *intr_info = mdev->intr_info;
47 struct mic_irq_info *irq_info = &mdev->irq_info;
48 struct mic_intr_cb *intr_cb;
49 struct pci_dev *pdev = mdev->pdev;
50 u32 mask;
51 int i;
52
53 mask = mdev->ops->ack_interrupt(mdev);
54 if (!mask)
55 return IRQ_NONE;
56
57 spin_lock(&irq_info->mic_intr_lock);
58 for (i = intr_info->intr_start_idx[MIC_INTR_DB];
59 i < intr_info->intr_len[MIC_INTR_DB]; i++)
60 if (mask & BIT(i)) {
61 list_for_each_entry(intr_cb, &irq_info->cb_list[i],
62 list)
63 if (intr_cb->handler)
64 intr_cb->handler(pdev->irq,
65 intr_cb->data);
66 set_bit(i, &irq_info->mask);
67 }
68 spin_unlock(&irq_info->mic_intr_lock);
69 return IRQ_WAKE_THREAD;
70}
71
72
73static u16 mic_map_src_to_offset(struct mic_device *mdev,
74 int intr_src, enum mic_intr_type type)
75{
76 if (type >= MIC_NUM_INTR_TYPES)
77 return MIC_NUM_OFFSETS;
78 if (intr_src >= mdev->intr_info->intr_len[type])
79 return MIC_NUM_OFFSETS;
80
81 return mdev->intr_info->intr_start_idx[type] + intr_src;
82}
83
84
85static struct msix_entry *mic_get_available_vector(struct mic_device *mdev)
86{
87 int i;
88 struct mic_irq_info *info = &mdev->irq_info;
89
90 for (i = 0; i < info->num_vectors; i++)
91 if (!info->mic_msi_map[i])
92 return &info->msix_entries[i];
93 return NULL;
94}
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109static struct mic_intr_cb *mic_register_intr_callback(struct mic_device *mdev,
110 u8 idx, irq_handler_t handler, irq_handler_t thread_fn,
111 void *data)
112{
113 struct mic_intr_cb *intr_cb;
114 unsigned long flags;
115 int rc;
116 intr_cb = kmalloc(sizeof(*intr_cb), GFP_KERNEL);
117
118 if (!intr_cb)
119 return ERR_PTR(-ENOMEM);
120
121 intr_cb->handler = handler;
122 intr_cb->thread_fn = thread_fn;
123 intr_cb->data = data;
124 intr_cb->cb_id = ida_simple_get(&mdev->irq_info.cb_ida,
125 0, 0, GFP_KERNEL);
126 if (intr_cb->cb_id < 0) {
127 rc = intr_cb->cb_id;
128 goto ida_fail;
129 }
130
131 spin_lock(&mdev->irq_info.mic_thread_lock);
132 spin_lock_irqsave(&mdev->irq_info.mic_intr_lock, flags);
133 list_add_tail(&intr_cb->list, &mdev->irq_info.cb_list[idx]);
134 spin_unlock_irqrestore(&mdev->irq_info.mic_intr_lock, flags);
135 spin_unlock(&mdev->irq_info.mic_thread_lock);
136
137 return intr_cb;
138ida_fail:
139 kfree(intr_cb);
140 return ERR_PTR(rc);
141}
142
143
144
145
146
147
148
149
150
151
152static u8 mic_unregister_intr_callback(struct mic_device *mdev, u32 idx)
153{
154 struct list_head *pos, *tmp;
155 struct mic_intr_cb *intr_cb;
156 unsigned long flags;
157 int i;
158
159 spin_lock(&mdev->irq_info.mic_thread_lock);
160 spin_lock_irqsave(&mdev->irq_info.mic_intr_lock, flags);
161 for (i = 0; i < MIC_NUM_OFFSETS; i++) {
162 list_for_each_safe(pos, tmp, &mdev->irq_info.cb_list[i]) {
163 intr_cb = list_entry(pos, struct mic_intr_cb, list);
164 if (intr_cb->cb_id == idx) {
165 list_del(pos);
166 ida_simple_remove(&mdev->irq_info.cb_ida,
167 intr_cb->cb_id);
168 kfree(intr_cb);
169 spin_unlock_irqrestore(
170 &mdev->irq_info.mic_intr_lock, flags);
171 spin_unlock(&mdev->irq_info.mic_thread_lock);
172 return i;
173 }
174 }
175 }
176 spin_unlock_irqrestore(&mdev->irq_info.mic_intr_lock, flags);
177 spin_unlock(&mdev->irq_info.mic_thread_lock);
178 return MIC_NUM_OFFSETS;
179}
180
181
182
183
184
185
186
187
188
189static int mic_setup_msix(struct mic_device *mdev, struct pci_dev *pdev)
190{
191 int rc, i;
192 int entry_size = sizeof(*mdev->irq_info.msix_entries);
193
194 mdev->irq_info.msix_entries = kmalloc_array(MIC_MIN_MSIX,
195 entry_size, GFP_KERNEL);
196 if (!mdev->irq_info.msix_entries) {
197 rc = -ENOMEM;
198 goto err_nomem1;
199 }
200
201 for (i = 0; i < MIC_MIN_MSIX; i++)
202 mdev->irq_info.msix_entries[i].entry = i;
203
204 rc = pci_enable_msix_exact(pdev, mdev->irq_info.msix_entries,
205 MIC_MIN_MSIX);
206 if (rc) {
207 dev_dbg(&pdev->dev, "Error enabling MSIx. rc = %d\n", rc);
208 goto err_enable_msix;
209 }
210
211 mdev->irq_info.num_vectors = MIC_MIN_MSIX;
212 mdev->irq_info.mic_msi_map = kzalloc((sizeof(u32) *
213 mdev->irq_info.num_vectors), GFP_KERNEL);
214
215 if (!mdev->irq_info.mic_msi_map) {
216 rc = -ENOMEM;
217 goto err_nomem2;
218 }
219
220 dev_dbg(&mdev->pdev->dev,
221 "%d MSIx irqs setup\n", mdev->irq_info.num_vectors);
222 return 0;
223err_nomem2:
224 pci_disable_msix(pdev);
225err_enable_msix:
226 kfree(mdev->irq_info.msix_entries);
227err_nomem1:
228 mdev->irq_info.num_vectors = 0;
229 return rc;
230}
231
232
233
234
235
236
237
238static int mic_setup_callbacks(struct mic_device *mdev)
239{
240 int i;
241
242 mdev->irq_info.cb_list = kmalloc_array(MIC_NUM_OFFSETS,
243 sizeof(*mdev->irq_info.cb_list),
244 GFP_KERNEL);
245 if (!mdev->irq_info.cb_list)
246 return -ENOMEM;
247
248 for (i = 0; i < MIC_NUM_OFFSETS; i++)
249 INIT_LIST_HEAD(&mdev->irq_info.cb_list[i]);
250 ida_init(&mdev->irq_info.cb_ida);
251 spin_lock_init(&mdev->irq_info.mic_intr_lock);
252 spin_lock_init(&mdev->irq_info.mic_thread_lock);
253 return 0;
254}
255
256
257
258
259
260
261
262static void mic_release_callbacks(struct mic_device *mdev)
263{
264 unsigned long flags;
265 struct list_head *pos, *tmp;
266 struct mic_intr_cb *intr_cb;
267 int i;
268
269 spin_lock(&mdev->irq_info.mic_thread_lock);
270 spin_lock_irqsave(&mdev->irq_info.mic_intr_lock, flags);
271 for (i = 0; i < MIC_NUM_OFFSETS; i++) {
272 if (list_empty(&mdev->irq_info.cb_list[i]))
273 break;
274
275 list_for_each_safe(pos, tmp, &mdev->irq_info.cb_list[i]) {
276 intr_cb = list_entry(pos, struct mic_intr_cb, list);
277 list_del(pos);
278 ida_simple_remove(&mdev->irq_info.cb_ida,
279 intr_cb->cb_id);
280 kfree(intr_cb);
281 }
282 }
283 spin_unlock_irqrestore(&mdev->irq_info.mic_intr_lock, flags);
284 spin_unlock(&mdev->irq_info.mic_thread_lock);
285 ida_destroy(&mdev->irq_info.cb_ida);
286 kfree(mdev->irq_info.cb_list);
287}
288
289
290
291
292
293
294
295
296
297static int mic_setup_msi(struct mic_device *mdev, struct pci_dev *pdev)
298{
299 int rc;
300
301 rc = pci_enable_msi(pdev);
302 if (rc) {
303 dev_dbg(&pdev->dev, "Error enabling MSI. rc = %d\n", rc);
304 return rc;
305 }
306
307 mdev->irq_info.num_vectors = 1;
308 mdev->irq_info.mic_msi_map = kzalloc((sizeof(u32) *
309 mdev->irq_info.num_vectors), GFP_KERNEL);
310
311 if (!mdev->irq_info.mic_msi_map) {
312 rc = -ENOMEM;
313 goto err_nomem1;
314 }
315
316 rc = mic_setup_callbacks(mdev);
317 if (rc) {
318 dev_err(&pdev->dev, "Error setting up callbacks\n");
319 goto err_nomem2;
320 }
321
322 rc = request_threaded_irq(pdev->irq, mic_interrupt, mic_thread_fn,
323 0, "mic-msi", mdev);
324 if (rc) {
325 dev_err(&pdev->dev, "Error allocating MSI interrupt\n");
326 goto err_irq_req_fail;
327 }
328
329 dev_dbg(&pdev->dev, "%d MSI irqs setup\n", mdev->irq_info.num_vectors);
330 return 0;
331err_irq_req_fail:
332 mic_release_callbacks(mdev);
333err_nomem2:
334 kfree(mdev->irq_info.mic_msi_map);
335err_nomem1:
336 pci_disable_msi(pdev);
337 mdev->irq_info.num_vectors = 0;
338 return rc;
339}
340
341
342
343
344
345
346
347
348
349static int mic_setup_intx(struct mic_device *mdev, struct pci_dev *pdev)
350{
351 int rc;
352
353
354 pci_intx(pdev, 1);
355 rc = mic_setup_callbacks(mdev);
356 if (rc) {
357 dev_err(&pdev->dev, "Error setting up callbacks\n");
358 goto err_nomem;
359 }
360
361 rc = request_threaded_irq(pdev->irq, mic_interrupt, mic_thread_fn,
362 IRQF_SHARED, "mic-intx", mdev);
363 if (rc)
364 goto err;
365
366 dev_dbg(&pdev->dev, "intx irq setup\n");
367 return 0;
368err:
369 mic_release_callbacks(mdev);
370err_nomem:
371 return rc;
372}
373
374
375
376
377
378
379
380
381
382
383int mic_next_db(struct mic_device *mdev)
384{
385 int next_db;
386
387 next_db = mdev->irq_info.next_avail_src %
388 mdev->intr_info->intr_len[MIC_INTR_DB];
389 mdev->irq_info.next_avail_src++;
390 return next_db;
391}
392
393#define COOKIE_ID_SHIFT 16
394#define GET_ENTRY(cookie) ((cookie) & 0xFFFF)
395#define GET_OFFSET(cookie) ((cookie) >> COOKIE_ID_SHIFT)
396#define MK_COOKIE(x, y) ((x) | (y) << COOKIE_ID_SHIFT)
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421struct mic_irq *
422mic_request_threaded_irq(struct mic_device *mdev,
423 irq_handler_t handler, irq_handler_t thread_fn,
424 const char *name, void *data, int intr_src,
425 enum mic_intr_type type)
426{
427 u16 offset;
428 int rc = 0;
429 struct msix_entry *msix = NULL;
430 unsigned long cookie = 0;
431 u16 entry;
432 struct mic_intr_cb *intr_cb;
433 struct pci_dev *pdev = mdev->pdev;
434
435 offset = mic_map_src_to_offset(mdev, intr_src, type);
436 if (offset >= MIC_NUM_OFFSETS) {
437 dev_err(&mdev->pdev->dev,
438 "Error mapping index %d to a valid source id.\n",
439 intr_src);
440 rc = -EINVAL;
441 goto err;
442 }
443
444 if (mdev->irq_info.num_vectors > 1) {
445 msix = mic_get_available_vector(mdev);
446 if (!msix) {
447 dev_err(&mdev->pdev->dev,
448 "No MSIx vectors available for use.\n");
449 rc = -ENOSPC;
450 goto err;
451 }
452
453 rc = request_threaded_irq(msix->vector, handler, thread_fn,
454 0, name, data);
455 if (rc) {
456 dev_dbg(&mdev->pdev->dev,
457 "request irq failed rc = %d\n", rc);
458 goto err;
459 }
460 entry = msix->entry;
461 mdev->irq_info.mic_msi_map[entry] |= BIT(offset);
462 mdev->intr_ops->program_msi_to_src_map(mdev,
463 entry, offset, true);
464 cookie = MK_COOKIE(entry, offset);
465 dev_dbg(&mdev->pdev->dev, "irq: %d assigned for src: %d\n",
466 msix->vector, intr_src);
467 } else {
468 intr_cb = mic_register_intr_callback(mdev, offset, handler,
469 thread_fn, data);
470 if (IS_ERR(intr_cb)) {
471 dev_err(&mdev->pdev->dev,
472 "No available callback entries for use\n");
473 rc = PTR_ERR(intr_cb);
474 goto err;
475 }
476
477 entry = 0;
478 if (pci_dev_msi_enabled(pdev)) {
479 mdev->irq_info.mic_msi_map[entry] |= (1 << offset);
480 mdev->intr_ops->program_msi_to_src_map(mdev,
481 entry, offset, true);
482 }
483 cookie = MK_COOKIE(entry, intr_cb->cb_id);
484 dev_dbg(&mdev->pdev->dev, "callback %d registered for src: %d\n",
485 intr_cb->cb_id, intr_src);
486 }
487 return (struct mic_irq *)cookie;
488err:
489 return ERR_PTR(rc);
490}
491
492
493
494
495
496
497
498
499
500
501
502
503void mic_free_irq(struct mic_device *mdev,
504 struct mic_irq *cookie, void *data)
505{
506 u32 offset;
507 u32 entry;
508 u8 src_id;
509 unsigned int irq;
510 struct pci_dev *pdev = mdev->pdev;
511
512 entry = GET_ENTRY((unsigned long)cookie);
513 offset = GET_OFFSET((unsigned long)cookie);
514 if (mdev->irq_info.num_vectors > 1) {
515 if (entry >= mdev->irq_info.num_vectors) {
516 dev_warn(&mdev->pdev->dev,
517 "entry %d should be < num_irq %d\n",
518 entry, mdev->irq_info.num_vectors);
519 return;
520 }
521 irq = mdev->irq_info.msix_entries[entry].vector;
522 free_irq(irq, data);
523 mdev->irq_info.mic_msi_map[entry] &= ~(BIT(offset));
524 mdev->intr_ops->program_msi_to_src_map(mdev,
525 entry, offset, false);
526
527 dev_dbg(&mdev->pdev->dev, "irq: %d freed\n", irq);
528 } else {
529 irq = pdev->irq;
530 src_id = mic_unregister_intr_callback(mdev, offset);
531 if (src_id >= MIC_NUM_OFFSETS) {
532 dev_warn(&mdev->pdev->dev, "Error unregistering callback\n");
533 return;
534 }
535 if (pci_dev_msi_enabled(pdev)) {
536 mdev->irq_info.mic_msi_map[entry] &= ~(BIT(src_id));
537 mdev->intr_ops->program_msi_to_src_map(mdev,
538 entry, src_id, false);
539 }
540 dev_dbg(&mdev->pdev->dev, "callback %d unregistered for src: %d\n",
541 offset, src_id);
542 }
543}
544
545
546
547
548
549
550
551
552
553int mic_setup_interrupts(struct mic_device *mdev, struct pci_dev *pdev)
554{
555 int rc;
556
557 rc = mic_setup_msix(mdev, pdev);
558 if (!rc)
559 goto done;
560
561 rc = mic_setup_msi(mdev, pdev);
562 if (!rc)
563 goto done;
564
565 rc = mic_setup_intx(mdev, pdev);
566 if (rc) {
567 dev_err(&mdev->pdev->dev, "no usable interrupts\n");
568 return rc;
569 }
570done:
571 mdev->intr_ops->enable_interrupts(mdev);
572 return 0;
573}
574
575
576
577
578
579
580
581
582
583void mic_free_interrupts(struct mic_device *mdev, struct pci_dev *pdev)
584{
585 int i;
586
587 mdev->intr_ops->disable_interrupts(mdev);
588 if (mdev->irq_info.num_vectors > 1) {
589 for (i = 0; i < mdev->irq_info.num_vectors; i++) {
590 if (mdev->irq_info.mic_msi_map[i])
591 dev_warn(&pdev->dev, "irq %d may still be in use.\n",
592 mdev->irq_info.msix_entries[i].vector);
593 }
594 kfree(mdev->irq_info.mic_msi_map);
595 kfree(mdev->irq_info.msix_entries);
596 pci_disable_msix(pdev);
597 } else {
598 if (pci_dev_msi_enabled(pdev)) {
599 free_irq(pdev->irq, mdev);
600 kfree(mdev->irq_info.mic_msi_map);
601 pci_disable_msi(pdev);
602 } else {
603 free_irq(pdev->irq, mdev);
604 }
605 mic_release_callbacks(mdev);
606 }
607}
608
609
610
611
612
613
614
615
616
617
618
619
620void mic_intr_restore(struct mic_device *mdev)
621{
622 int entry, offset;
623 struct pci_dev *pdev = mdev->pdev;
624
625 if (!pci_dev_msi_enabled(pdev))
626 return;
627
628 for (entry = 0; entry < mdev->irq_info.num_vectors; entry++) {
629 for (offset = 0; offset < MIC_NUM_OFFSETS; offset++) {
630 if (mdev->irq_info.mic_msi_map[entry] & BIT(offset))
631 mdev->intr_ops->program_msi_to_src_map(mdev,
632 entry, offset, true);
633 }
634 }
635}
636