1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35#include <linux/kernel.h>
36#include <linux/pci.h>
37#include <linux/interrupt.h>
38#include <linux/cpumask.h>
39#include <linux/string.h>
40
41#include "csio_init.h"
42#include "csio_hw.h"
43
44static irqreturn_t
45csio_nondata_isr(int irq, void *dev_id)
46{
47 struct csio_hw *hw = (struct csio_hw *) dev_id;
48 int rv;
49 unsigned long flags;
50
51 if (unlikely(!hw))
52 return IRQ_NONE;
53
54 if (unlikely(pci_channel_offline(hw->pdev))) {
55 CSIO_INC_STATS(hw, n_pcich_offline);
56 return IRQ_NONE;
57 }
58
59 spin_lock_irqsave(&hw->lock, flags);
60 csio_hw_slow_intr_handler(hw);
61 rv = csio_mb_isr_handler(hw);
62
63 if (rv == 0 && !(hw->flags & CSIO_HWF_FWEVT_PENDING)) {
64 hw->flags |= CSIO_HWF_FWEVT_PENDING;
65 spin_unlock_irqrestore(&hw->lock, flags);
66 schedule_work(&hw->evtq_work);
67 return IRQ_HANDLED;
68 }
69 spin_unlock_irqrestore(&hw->lock, flags);
70 return IRQ_HANDLED;
71}
72
73
74
75
76
77
78
79
80static void
81csio_fwevt_handler(struct csio_hw *hw)
82{
83 int rv;
84 unsigned long flags;
85
86 rv = csio_fwevtq_handler(hw);
87
88 spin_lock_irqsave(&hw->lock, flags);
89 if (rv == 0 && !(hw->flags & CSIO_HWF_FWEVT_PENDING)) {
90 hw->flags |= CSIO_HWF_FWEVT_PENDING;
91 spin_unlock_irqrestore(&hw->lock, flags);
92 schedule_work(&hw->evtq_work);
93 return;
94 }
95 spin_unlock_irqrestore(&hw->lock, flags);
96
97}
98
99
100
101
102
103
104
105
106
107static irqreturn_t
108csio_fwevt_isr(int irq, void *dev_id)
109{
110 struct csio_hw *hw = (struct csio_hw *) dev_id;
111
112 if (unlikely(!hw))
113 return IRQ_NONE;
114
115 if (unlikely(pci_channel_offline(hw->pdev))) {
116 CSIO_INC_STATS(hw, n_pcich_offline);
117 return IRQ_NONE;
118 }
119
120 csio_fwevt_handler(hw);
121
122 return IRQ_HANDLED;
123}
124
125
126
127
128
129
130void
131csio_fwevt_intx_handler(struct csio_hw *hw, void *wr, uint32_t len,
132 struct csio_fl_dma_buf *flb, void *priv)
133{
134 csio_fwevt_handler(hw);
135}
136
137
138
139
140
141
142
143
144
145static void
146csio_process_scsi_cmpl(struct csio_hw *hw, void *wr, uint32_t len,
147 struct csio_fl_dma_buf *flb, void *cbfn_q)
148{
149 struct csio_ioreq *ioreq;
150 uint8_t *scsiwr;
151 uint8_t subop;
152 void *cmnd;
153 unsigned long flags;
154
155 ioreq = csio_scsi_cmpl_handler(hw, wr, len, flb, NULL, &scsiwr);
156 if (likely(ioreq)) {
157 if (unlikely(*scsiwr == FW_SCSI_ABRT_CLS_WR)) {
158 subop = FW_SCSI_ABRT_CLS_WR_SUB_OPCODE_GET(
159 ((struct fw_scsi_abrt_cls_wr *)
160 scsiwr)->sub_opcode_to_chk_all_io);
161
162 csio_dbg(hw, "%s cmpl recvd ioreq:%p status:%d\n",
163 subop ? "Close" : "Abort",
164 ioreq, ioreq->wr_status);
165
166 spin_lock_irqsave(&hw->lock, flags);
167 if (subop)
168 csio_scsi_closed(ioreq,
169 (struct list_head *)cbfn_q);
170 else
171 csio_scsi_aborted(ioreq,
172 (struct list_head *)cbfn_q);
173
174
175
176
177
178
179
180
181
182
183
184 cmnd = csio_scsi_cmnd(ioreq);
185 if (unlikely(cmnd == NULL))
186 list_del_init(&ioreq->sm.sm_list);
187
188 spin_unlock_irqrestore(&hw->lock, flags);
189
190 if (unlikely(cmnd == NULL))
191 csio_put_scsi_ioreq_lock(hw,
192 csio_hw_to_scsim(hw), ioreq);
193 } else {
194 spin_lock_irqsave(&hw->lock, flags);
195 csio_scsi_completed(ioreq, (struct list_head *)cbfn_q);
196 spin_unlock_irqrestore(&hw->lock, flags);
197 }
198 }
199}
200
201
202
203
204
205
206
207
208
209
210
211static inline irqreturn_t
212csio_scsi_isr_handler(struct csio_q *iq)
213{
214 struct csio_hw *hw = (struct csio_hw *)iq->owner;
215 LIST_HEAD(cbfn_q);
216 struct list_head *tmp;
217 struct csio_scsim *scm;
218 struct csio_ioreq *ioreq;
219 int isr_completions = 0;
220
221 scm = csio_hw_to_scsim(hw);
222
223 if (unlikely(csio_wr_process_iq(hw, iq, csio_process_scsi_cmpl,
224 &cbfn_q) != 0))
225 return IRQ_NONE;
226
227
228 list_for_each(tmp, &cbfn_q) {
229 ioreq = (struct csio_ioreq *)tmp;
230 isr_completions++;
231 ioreq->io_cbfn(hw, ioreq);
232
233 if (unlikely(ioreq->dcopy))
234 csio_put_scsi_ddp_list_lock(hw, scm, &ioreq->gen_list,
235 ioreq->nsge);
236 }
237
238 if (isr_completions) {
239
240 csio_put_scsi_ioreq_list_lock(hw, scm, &cbfn_q,
241 isr_completions);
242 }
243
244 return IRQ_HANDLED;
245}
246
247
248
249
250
251
252
253
254
255static irqreturn_t
256csio_scsi_isr(int irq, void *dev_id)
257{
258 struct csio_q *iq = (struct csio_q *) dev_id;
259 struct csio_hw *hw;
260
261 if (unlikely(!iq))
262 return IRQ_NONE;
263
264 hw = (struct csio_hw *)iq->owner;
265
266 if (unlikely(pci_channel_offline(hw->pdev))) {
267 CSIO_INC_STATS(hw, n_pcich_offline);
268 return IRQ_NONE;
269 }
270
271 csio_scsi_isr_handler(iq);
272
273 return IRQ_HANDLED;
274}
275
276
277
278
279
280
281
282
283
284void
285csio_scsi_intx_handler(struct csio_hw *hw, void *wr, uint32_t len,
286 struct csio_fl_dma_buf *flb, void *priv)
287{
288 struct csio_q *iq = priv;
289
290 csio_scsi_isr_handler(iq);
291
292}
293
294
295
296
297
298
299
300
301static irqreturn_t
302csio_fcoe_isr(int irq, void *dev_id)
303{
304 struct csio_hw *hw = (struct csio_hw *) dev_id;
305 struct csio_q *intx_q = NULL;
306 int rv;
307 irqreturn_t ret = IRQ_NONE;
308 unsigned long flags;
309
310 if (unlikely(!hw))
311 return IRQ_NONE;
312
313 if (unlikely(pci_channel_offline(hw->pdev))) {
314 CSIO_INC_STATS(hw, n_pcich_offline);
315 return IRQ_NONE;
316 }
317
318
319 if (hw->intr_mode == CSIO_IM_INTX)
320 csio_wr_reg32(hw, 0, MYPF_REG(PCIE_PF_CLI_A));
321
322
323
324
325
326 if (csio_hw_slow_intr_handler(hw))
327 ret = IRQ_HANDLED;
328
329
330 intx_q = csio_get_q(hw, hw->intr_iq_idx);
331
332 CSIO_DB_ASSERT(intx_q);
333
334
335 if (likely(csio_wr_process_iq(hw, intx_q, NULL, NULL) == 0))
336 ret = IRQ_HANDLED;
337
338 spin_lock_irqsave(&hw->lock, flags);
339 rv = csio_mb_isr_handler(hw);
340 if (rv == 0 && !(hw->flags & CSIO_HWF_FWEVT_PENDING)) {
341 hw->flags |= CSIO_HWF_FWEVT_PENDING;
342 spin_unlock_irqrestore(&hw->lock, flags);
343 schedule_work(&hw->evtq_work);
344 return IRQ_HANDLED;
345 }
346 spin_unlock_irqrestore(&hw->lock, flags);
347
348 return ret;
349}
350
351static void
352csio_add_msix_desc(struct csio_hw *hw)
353{
354 int i;
355 struct csio_msix_entries *entryp = &hw->msix_entries[0];
356 int k = CSIO_EXTRA_VECS;
357 int len = sizeof(entryp->desc) - 1;
358 int cnt = hw->num_sqsets + k;
359
360
361 memset(entryp->desc, 0, len + 1);
362 snprintf(entryp->desc, len, "csio-%02x:%02x:%x-nondata",
363 CSIO_PCI_BUS(hw), CSIO_PCI_DEV(hw), CSIO_PCI_FUNC(hw));
364
365 entryp++;
366 memset(entryp->desc, 0, len + 1);
367 snprintf(entryp->desc, len, "csio-%02x:%02x:%x-fwevt",
368 CSIO_PCI_BUS(hw), CSIO_PCI_DEV(hw), CSIO_PCI_FUNC(hw));
369 entryp++;
370
371
372 for (i = k; i < cnt; i++, entryp++) {
373 memset(entryp->desc, 0, len + 1);
374 snprintf(entryp->desc, len, "csio-%02x:%02x:%x-scsi%d",
375 CSIO_PCI_BUS(hw), CSIO_PCI_DEV(hw),
376 CSIO_PCI_FUNC(hw), i - CSIO_EXTRA_VECS);
377 }
378}
379
380int
381csio_request_irqs(struct csio_hw *hw)
382{
383 int rv, i, j, k = 0;
384 struct csio_msix_entries *entryp = &hw->msix_entries[0];
385 struct csio_scsi_cpu_info *info;
386
387 if (hw->intr_mode != CSIO_IM_MSIX) {
388 rv = request_irq(hw->pdev->irq, csio_fcoe_isr,
389 (hw->intr_mode == CSIO_IM_MSI) ?
390 0 : IRQF_SHARED,
391 KBUILD_MODNAME, hw);
392 if (rv) {
393 if (hw->intr_mode == CSIO_IM_MSI)
394 pci_disable_msi(hw->pdev);
395 csio_err(hw, "Failed to allocate interrupt line.\n");
396 return -EINVAL;
397 }
398
399 goto out;
400 }
401
402
403 csio_add_msix_desc(hw);
404
405 rv = request_irq(entryp[k].vector, csio_nondata_isr, 0,
406 entryp[k].desc, hw);
407 if (rv) {
408 csio_err(hw, "IRQ request failed for vec %d err:%d\n",
409 entryp[k].vector, rv);
410 goto err;
411 }
412
413 entryp[k++].dev_id = (void *)hw;
414
415 rv = request_irq(entryp[k].vector, csio_fwevt_isr, 0,
416 entryp[k].desc, hw);
417 if (rv) {
418 csio_err(hw, "IRQ request failed for vec %d err:%d\n",
419 entryp[k].vector, rv);
420 goto err;
421 }
422
423 entryp[k++].dev_id = (void *)hw;
424
425
426 for (i = 0; i < hw->num_pports; i++) {
427 info = &hw->scsi_cpu_info[i];
428 for (j = 0; j < info->max_cpus; j++, k++) {
429 struct csio_scsi_qset *sqset = &hw->sqset[i][j];
430 struct csio_q *q = hw->wrm.q_arr[sqset->iq_idx];
431
432 rv = request_irq(entryp[k].vector, csio_scsi_isr, 0,
433 entryp[k].desc, q);
434 if (rv) {
435 csio_err(hw,
436 "IRQ request failed for vec %d err:%d\n",
437 entryp[k].vector, rv);
438 goto err;
439 }
440
441 entryp[k].dev_id = (void *)q;
442
443 }
444 }
445
446out:
447 hw->flags |= CSIO_HWF_HOST_INTR_ENABLED;
448
449 return 0;
450
451err:
452 for (i = 0; i < k; i++) {
453 entryp = &hw->msix_entries[i];
454 free_irq(entryp->vector, entryp->dev_id);
455 }
456 pci_disable_msix(hw->pdev);
457
458 return -EINVAL;
459}
460
461static void
462csio_disable_msix(struct csio_hw *hw, bool free)
463{
464 int i;
465 struct csio_msix_entries *entryp;
466 int cnt = hw->num_sqsets + CSIO_EXTRA_VECS;
467
468 if (free) {
469 for (i = 0; i < cnt; i++) {
470 entryp = &hw->msix_entries[i];
471 free_irq(entryp->vector, entryp->dev_id);
472 }
473 }
474 pci_disable_msix(hw->pdev);
475}
476
477
478static void
479csio_reduce_sqsets(struct csio_hw *hw, int cnt)
480{
481 int i;
482 struct csio_scsi_cpu_info *info;
483
484 while (cnt < hw->num_sqsets) {
485 for (i = 0; i < hw->num_pports; i++) {
486 info = &hw->scsi_cpu_info[i];
487 if (info->max_cpus > 1) {
488 info->max_cpus--;
489 hw->num_sqsets--;
490 if (hw->num_sqsets <= cnt)
491 break;
492 }
493 }
494 }
495
496 csio_dbg(hw, "Reduced sqsets to %d\n", hw->num_sqsets);
497}
498
499static int
500csio_enable_msix(struct csio_hw *hw)
501{
502 int i, j, k, n, min, cnt;
503 struct csio_msix_entries *entryp;
504 struct msix_entry *entries;
505 int extra = CSIO_EXTRA_VECS;
506 struct csio_scsi_cpu_info *info;
507
508 min = hw->num_pports + extra;
509 cnt = hw->num_sqsets + extra;
510
511
512 if (hw->flags & CSIO_HWF_USING_SOFT_PARAMS || !csio_is_hw_master(hw))
513 cnt = min_t(uint8_t, hw->cfg_niq, cnt);
514
515 entries = kzalloc(sizeof(struct msix_entry) * cnt, GFP_KERNEL);
516 if (!entries)
517 return -ENOMEM;
518
519 for (i = 0; i < cnt; i++)
520 entries[i].entry = (uint16_t)i;
521
522 csio_dbg(hw, "FW supp #niq:%d, trying %d msix's\n", hw->cfg_niq, cnt);
523
524 cnt = pci_enable_msix_range(hw->pdev, entries, min, cnt);
525 if (cnt < 0) {
526 kfree(entries);
527 return cnt;
528 }
529
530 if (cnt < (hw->num_sqsets + extra)) {
531 csio_dbg(hw, "Reducing sqsets to %d\n", cnt - extra);
532 csio_reduce_sqsets(hw, cnt - extra);
533 }
534
535
536 for (i = 0; i < cnt; i++) {
537 entryp = &hw->msix_entries[i];
538 entryp->vector = entries[i].vector;
539 }
540
541
542 k = 0;
543 csio_set_nondata_intr_idx(hw, entries[k].entry);
544 csio_set_mb_intr_idx(csio_hw_to_mbm(hw), entries[k++].entry);
545 csio_set_fwevt_intr_idx(hw, entries[k++].entry);
546
547 for (i = 0; i < hw->num_pports; i++) {
548 info = &hw->scsi_cpu_info[i];
549
550 for (j = 0; j < hw->num_scsi_msix_cpus; j++) {
551 n = (j % info->max_cpus) + k;
552 hw->sqset[i][j].intr_idx = entries[n].entry;
553 }
554
555 k += info->max_cpus;
556 }
557
558 kfree(entries);
559 return 0;
560}
561
562void
563csio_intr_enable(struct csio_hw *hw)
564{
565 hw->intr_mode = CSIO_IM_NONE;
566 hw->flags &= ~CSIO_HWF_HOST_INTR_ENABLED;
567
568
569 if ((csio_msi == 2) && !csio_enable_msix(hw))
570 hw->intr_mode = CSIO_IM_MSIX;
571 else {
572
573 if (hw->flags & CSIO_HWF_USING_SOFT_PARAMS ||
574 !csio_is_hw_master(hw)) {
575 int extra = CSIO_EXTRA_MSI_IQS;
576
577 if (hw->cfg_niq < (hw->num_sqsets + extra)) {
578 csio_dbg(hw, "Reducing sqsets to %d\n",
579 hw->cfg_niq - extra);
580 csio_reduce_sqsets(hw, hw->cfg_niq - extra);
581 }
582 }
583
584 if ((csio_msi == 1) && !pci_enable_msi(hw->pdev))
585 hw->intr_mode = CSIO_IM_MSI;
586 else
587 hw->intr_mode = CSIO_IM_INTX;
588 }
589
590 csio_dbg(hw, "Using %s interrupt mode.\n",
591 (hw->intr_mode == CSIO_IM_MSIX) ? "MSIX" :
592 ((hw->intr_mode == CSIO_IM_MSI) ? "MSI" : "INTx"));
593}
594
595void
596csio_intr_disable(struct csio_hw *hw, bool free)
597{
598 csio_hw_intr_disable(hw);
599
600 switch (hw->intr_mode) {
601 case CSIO_IM_MSIX:
602 csio_disable_msix(hw, free);
603 break;
604 case CSIO_IM_MSI:
605 if (free)
606 free_irq(hw->pdev->irq, hw);
607 pci_disable_msi(hw->pdev);
608 break;
609 case CSIO_IM_INTX:
610 if (free)
611 free_irq(hw->pdev->irq, hw);
612 break;
613 default:
614 break;
615 }
616 hw->intr_mode = CSIO_IM_NONE;
617 hw->flags &= ~CSIO_HWF_HOST_INTR_ENABLED;
618}
619