1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34#include <linux/spinlock.h>
35#include <linux/netdevice.h>
36#include <linux/moduleparam.h>
37
38#include "qib.h"
39#include "qib_common.h"
40
41
42static ushort sdma_descq_cnt = 256;
43module_param_named(sdma_descq_cnt, sdma_descq_cnt, ushort, S_IRUGO);
44MODULE_PARM_DESC(sdma_descq_cnt, "Number of SDMA descq entries");
45
46
47
48
49#define SDMA_DESC_LAST (1ULL << 11)
50#define SDMA_DESC_FIRST (1ULL << 12)
51#define SDMA_DESC_DMA_HEAD (1ULL << 13)
52#define SDMA_DESC_USE_LARGE_BUF (1ULL << 14)
53#define SDMA_DESC_INTR (1ULL << 15)
54#define SDMA_DESC_COUNT_LSB 16
55#define SDMA_DESC_GEN_LSB 30
56
57
58static int alloc_sdma(struct qib_pportdata *);
59static void sdma_complete(struct kref *);
60static void sdma_finalput(struct qib_sdma_state *);
61static void sdma_get(struct qib_sdma_state *);
62static void sdma_put(struct qib_sdma_state *);
63static void sdma_set_state(struct qib_pportdata *, enum qib_sdma_states);
64static void sdma_start_sw_clean_up(struct qib_pportdata *);
65static void sdma_sw_clean_up_task(unsigned long);
66static void unmap_desc(struct qib_pportdata *, unsigned);
67
68static void sdma_get(struct qib_sdma_state *ss)
69{
70 kref_get(&ss->kref);
71}
72
73static void sdma_complete(struct kref *kref)
74{
75 struct qib_sdma_state *ss =
76 container_of(kref, struct qib_sdma_state, kref);
77
78 complete(&ss->comp);
79}
80
81static void sdma_put(struct qib_sdma_state *ss)
82{
83 kref_put(&ss->kref, sdma_complete);
84}
85
86static void sdma_finalput(struct qib_sdma_state *ss)
87{
88 sdma_put(ss);
89 wait_for_completion(&ss->comp);
90}
91
92
93
94
95
96
97
98
99
100
101static void clear_sdma_activelist(struct qib_pportdata *ppd)
102{
103 struct qib_sdma_txreq *txp, *txp_next;
104
105 list_for_each_entry_safe(txp, txp_next, &ppd->sdma_activelist, list) {
106 list_del_init(&txp->list);
107 if (txp->flags & QIB_SDMA_TXREQ_F_FREEDESC) {
108 unsigned idx;
109
110 idx = txp->start_idx;
111 while (idx != txp->next_descq_idx) {
112 unmap_desc(ppd, idx);
113 if (++idx == ppd->sdma_descq_cnt)
114 idx = 0;
115 }
116 }
117 if (txp->callback)
118 (*txp->callback)(txp, QIB_SDMA_TXREQ_S_ABORTED);
119 }
120}
121
122static void sdma_sw_clean_up_task(unsigned long opaque)
123{
124 struct qib_pportdata *ppd = (struct qib_pportdata *) opaque;
125 unsigned long flags;
126
127 spin_lock_irqsave(&ppd->sdma_lock, flags);
128
129
130
131
132
133
134
135
136
137
138
139 qib_sdma_make_progress(ppd);
140
141 clear_sdma_activelist(ppd);
142
143
144
145
146
147 ppd->sdma_descq_removed = ppd->sdma_descq_added;
148
149
150
151
152
153
154 ppd->sdma_descq_tail = 0;
155 ppd->sdma_descq_head = 0;
156 ppd->sdma_head_dma[0] = 0;
157 ppd->sdma_generation = 0;
158
159 __qib_sdma_process_event(ppd, qib_sdma_event_e40_sw_cleaned);
160
161 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
162}
163
164
165
166
167
168
169static void sdma_hw_start_up(struct qib_pportdata *ppd)
170{
171 struct qib_sdma_state *ss = &ppd->sdma_state;
172 unsigned bufno;
173
174 for (bufno = ss->first_sendbuf; bufno < ss->last_sendbuf; ++bufno)
175 ppd->dd->f_sendctrl(ppd, QIB_SENDCTRL_DISARM_BUF(bufno));
176
177 ppd->dd->f_sdma_hw_start_up(ppd);
178}
179
180static void sdma_sw_tear_down(struct qib_pportdata *ppd)
181{
182 struct qib_sdma_state *ss = &ppd->sdma_state;
183
184
185 sdma_put(ss);
186}
187
188static void sdma_start_sw_clean_up(struct qib_pportdata *ppd)
189{
190 tasklet_hi_schedule(&ppd->sdma_sw_clean_up_task);
191}
192
193static void sdma_set_state(struct qib_pportdata *ppd,
194 enum qib_sdma_states next_state)
195{
196 struct qib_sdma_state *ss = &ppd->sdma_state;
197 struct sdma_set_state_action *action = ss->set_state_action;
198 unsigned op = 0;
199
200
201 ss->previous_state = ss->current_state;
202 ss->previous_op = ss->current_op;
203
204 ss->current_state = next_state;
205
206 if (action[next_state].op_enable)
207 op |= QIB_SDMA_SENDCTRL_OP_ENABLE;
208
209 if (action[next_state].op_intenable)
210 op |= QIB_SDMA_SENDCTRL_OP_INTENABLE;
211
212 if (action[next_state].op_halt)
213 op |= QIB_SDMA_SENDCTRL_OP_HALT;
214
215 if (action[next_state].op_drain)
216 op |= QIB_SDMA_SENDCTRL_OP_DRAIN;
217
218 if (action[next_state].go_s99_running_tofalse)
219 ss->go_s99_running = 0;
220
221 if (action[next_state].go_s99_running_totrue)
222 ss->go_s99_running = 1;
223
224 ss->current_op = op;
225
226 ppd->dd->f_sdma_sendctrl(ppd, ss->current_op);
227}
228
229static void unmap_desc(struct qib_pportdata *ppd, unsigned head)
230{
231 __le64 *descqp = &ppd->sdma_descq[head].qw[0];
232 u64 desc[2];
233 dma_addr_t addr;
234 size_t len;
235
236 desc[0] = le64_to_cpu(descqp[0]);
237 desc[1] = le64_to_cpu(descqp[1]);
238
239 addr = (desc[1] << 32) | (desc[0] >> 32);
240 len = (desc[0] >> 14) & (0x7ffULL << 2);
241 dma_unmap_single(&ppd->dd->pcidev->dev, addr, len, DMA_TO_DEVICE);
242}
243
244static int alloc_sdma(struct qib_pportdata *ppd)
245{
246 ppd->sdma_descq_cnt = sdma_descq_cnt;
247 if (!ppd->sdma_descq_cnt)
248 ppd->sdma_descq_cnt = 256;
249
250
251 ppd->sdma_descq = dma_alloc_coherent(&ppd->dd->pcidev->dev,
252 ppd->sdma_descq_cnt * sizeof(u64[2]), &ppd->sdma_descq_phys,
253 GFP_KERNEL);
254
255 if (!ppd->sdma_descq) {
256 qib_dev_err(ppd->dd,
257 "failed to allocate SendDMA descriptor FIFO memory\n");
258 goto bail;
259 }
260
261
262 ppd->sdma_head_dma = dma_alloc_coherent(&ppd->dd->pcidev->dev,
263 PAGE_SIZE, &ppd->sdma_head_phys, GFP_KERNEL);
264 if (!ppd->sdma_head_dma) {
265 qib_dev_err(ppd->dd,
266 "failed to allocate SendDMA head memory\n");
267 goto cleanup_descq;
268 }
269 ppd->sdma_head_dma[0] = 0;
270 return 0;
271
272cleanup_descq:
273 dma_free_coherent(&ppd->dd->pcidev->dev,
274 ppd->sdma_descq_cnt * sizeof(u64[2]), (void *)ppd->sdma_descq,
275 ppd->sdma_descq_phys);
276 ppd->sdma_descq = NULL;
277 ppd->sdma_descq_phys = 0;
278bail:
279 ppd->sdma_descq_cnt = 0;
280 return -ENOMEM;
281}
282
283static void free_sdma(struct qib_pportdata *ppd)
284{
285 struct qib_devdata *dd = ppd->dd;
286
287 if (ppd->sdma_head_dma) {
288 dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
289 (void *)ppd->sdma_head_dma,
290 ppd->sdma_head_phys);
291 ppd->sdma_head_dma = NULL;
292 ppd->sdma_head_phys = 0;
293 }
294
295 if (ppd->sdma_descq) {
296 dma_free_coherent(&dd->pcidev->dev,
297 ppd->sdma_descq_cnt * sizeof(u64[2]),
298 ppd->sdma_descq, ppd->sdma_descq_phys);
299 ppd->sdma_descq = NULL;
300 ppd->sdma_descq_phys = 0;
301 }
302}
303
304static inline void make_sdma_desc(struct qib_pportdata *ppd,
305 u64 *sdmadesc, u64 addr, u64 dwlen,
306 u64 dwoffset)
307{
308
309 WARN_ON(addr & 3);
310
311 sdmadesc[1] = addr >> 32;
312
313 sdmadesc[0] = (addr & 0xfffffffcULL) << 32;
314
315 sdmadesc[0] |= (ppd->sdma_generation & 3ULL) <<
316 SDMA_DESC_GEN_LSB;
317
318 sdmadesc[0] |= (dwlen & 0x7ffULL) << SDMA_DESC_COUNT_LSB;
319
320 sdmadesc[0] |= dwoffset & 0x7ffULL;
321}
322
323
324int qib_sdma_make_progress(struct qib_pportdata *ppd)
325{
326 struct list_head *lp = NULL;
327 struct qib_sdma_txreq *txp = NULL;
328 struct qib_devdata *dd = ppd->dd;
329 int progress = 0;
330 u16 hwhead;
331 u16 idx = 0;
332
333 hwhead = dd->f_sdma_gethead(ppd);
334
335
336
337
338
339
340
341 if (!list_empty(&ppd->sdma_activelist)) {
342 lp = ppd->sdma_activelist.next;
343 txp = list_entry(lp, struct qib_sdma_txreq, list);
344 idx = txp->start_idx;
345 }
346
347 while (ppd->sdma_descq_head != hwhead) {
348
349 if (txp && (txp->flags & QIB_SDMA_TXREQ_F_FREEDESC) &&
350 (idx == ppd->sdma_descq_head)) {
351 unmap_desc(ppd, ppd->sdma_descq_head);
352 if (++idx == ppd->sdma_descq_cnt)
353 idx = 0;
354 }
355
356
357 ppd->sdma_descq_removed++;
358
359
360 if (++ppd->sdma_descq_head == ppd->sdma_descq_cnt)
361 ppd->sdma_descq_head = 0;
362
363
364 if (txp && txp->next_descq_idx == ppd->sdma_descq_head) {
365
366 list_del_init(&txp->list);
367 if (txp->callback)
368 (*txp->callback)(txp, QIB_SDMA_TXREQ_S_OK);
369
370 if (list_empty(&ppd->sdma_activelist))
371 txp = NULL;
372 else {
373 lp = ppd->sdma_activelist.next;
374 txp = list_entry(lp, struct qib_sdma_txreq,
375 list);
376 idx = txp->start_idx;
377 }
378 }
379 progress = 1;
380 }
381 if (progress)
382 qib_verbs_sdma_desc_avail(ppd, qib_sdma_descq_freecnt(ppd));
383 return progress;
384}
385
386
387
388
389void qib_sdma_intr(struct qib_pportdata *ppd)
390{
391 unsigned long flags;
392
393 spin_lock_irqsave(&ppd->sdma_lock, flags);
394
395 __qib_sdma_intr(ppd);
396
397 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
398}
399
400void __qib_sdma_intr(struct qib_pportdata *ppd)
401{
402 if (__qib_sdma_running(ppd)) {
403 qib_sdma_make_progress(ppd);
404 if (!list_empty(&ppd->sdma_userpending))
405 qib_user_sdma_send_desc(ppd, &ppd->sdma_userpending);
406 }
407}
408
409int qib_setup_sdma(struct qib_pportdata *ppd)
410{
411 struct qib_devdata *dd = ppd->dd;
412 unsigned long flags;
413 int ret = 0;
414
415 ret = alloc_sdma(ppd);
416 if (ret)
417 goto bail;
418
419
420 ppd->dd->f_sdma_init_early(ppd);
421 spin_lock_irqsave(&ppd->sdma_lock, flags);
422 sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
423 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
424
425
426 kref_init(&ppd->sdma_state.kref);
427 init_completion(&ppd->sdma_state.comp);
428
429 ppd->sdma_generation = 0;
430 ppd->sdma_descq_head = 0;
431 ppd->sdma_descq_removed = 0;
432 ppd->sdma_descq_added = 0;
433
434 ppd->sdma_intrequest = 0;
435 INIT_LIST_HEAD(&ppd->sdma_userpending);
436
437 INIT_LIST_HEAD(&ppd->sdma_activelist);
438
439 tasklet_init(&ppd->sdma_sw_clean_up_task, sdma_sw_clean_up_task,
440 (unsigned long)ppd);
441
442 ret = dd->f_init_sdma_regs(ppd);
443 if (ret)
444 goto bail_alloc;
445
446 qib_sdma_process_event(ppd, qib_sdma_event_e10_go_hw_start);
447
448 return 0;
449
450bail_alloc:
451 qib_teardown_sdma(ppd);
452bail:
453 return ret;
454}
455
456void qib_teardown_sdma(struct qib_pportdata *ppd)
457{
458 qib_sdma_process_event(ppd, qib_sdma_event_e00_go_hw_down);
459
460
461
462
463
464
465 sdma_finalput(&ppd->sdma_state);
466
467 free_sdma(ppd);
468}
469
470int qib_sdma_running(struct qib_pportdata *ppd)
471{
472 unsigned long flags;
473 int ret;
474
475 spin_lock_irqsave(&ppd->sdma_lock, flags);
476 ret = __qib_sdma_running(ppd);
477 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
478
479 return ret;
480}
481
482
483
484
485
486
487
488
489static void complete_sdma_err_req(struct qib_pportdata *ppd,
490 struct qib_verbs_txreq *tx)
491{
492 struct qib_qp_priv *priv = tx->qp->priv;
493
494 atomic_inc(&priv->s_dma_busy);
495
496 tx->txreq.start_idx = 0;
497 tx->txreq.next_descq_idx = 0;
498 list_add_tail(&tx->txreq.list, &ppd->sdma_activelist);
499 clear_sdma_activelist(ppd);
500}
501
502
503
504
505
506
507
508
509
510
511int qib_sdma_verbs_send(struct qib_pportdata *ppd,
512 struct rvt_sge_state *ss, u32 dwords,
513 struct qib_verbs_txreq *tx)
514{
515 unsigned long flags;
516 struct rvt_sge *sge;
517 struct rvt_qp *qp;
518 int ret = 0;
519 u16 tail;
520 __le64 *descqp;
521 u64 sdmadesc[2];
522 u32 dwoffset;
523 dma_addr_t addr;
524 struct qib_qp_priv *priv;
525
526 spin_lock_irqsave(&ppd->sdma_lock, flags);
527
528retry:
529 if (unlikely(!__qib_sdma_running(ppd))) {
530 complete_sdma_err_req(ppd, tx);
531 goto unlock;
532 }
533
534 if (tx->txreq.sg_count > qib_sdma_descq_freecnt(ppd)) {
535 if (qib_sdma_make_progress(ppd))
536 goto retry;
537 if (ppd->dd->flags & QIB_HAS_SDMA_TIMEOUT)
538 ppd->dd->f_sdma_set_desc_cnt(ppd,
539 ppd->sdma_descq_cnt / 2);
540 goto busy;
541 }
542
543 dwoffset = tx->hdr_dwords;
544 make_sdma_desc(ppd, sdmadesc, (u64) tx->txreq.addr, dwoffset, 0);
545
546 sdmadesc[0] |= SDMA_DESC_FIRST;
547 if (tx->txreq.flags & QIB_SDMA_TXREQ_F_USELARGEBUF)
548 sdmadesc[0] |= SDMA_DESC_USE_LARGE_BUF;
549
550
551 tail = ppd->sdma_descq_tail;
552 descqp = &ppd->sdma_descq[tail].qw[0];
553 *descqp++ = cpu_to_le64(sdmadesc[0]);
554 *descqp++ = cpu_to_le64(sdmadesc[1]);
555
556
557 if (++tail == ppd->sdma_descq_cnt) {
558 tail = 0;
559 descqp = &ppd->sdma_descq[0].qw[0];
560 ++ppd->sdma_generation;
561 }
562
563 tx->txreq.start_idx = tail;
564
565 sge = &ss->sge;
566 while (dwords) {
567 u32 dw;
568 u32 len;
569
570 len = dwords << 2;
571 if (len > sge->length)
572 len = sge->length;
573 if (len > sge->sge_length)
574 len = sge->sge_length;
575 dw = (len + 3) >> 2;
576 addr = dma_map_single(&ppd->dd->pcidev->dev, sge->vaddr,
577 dw << 2, DMA_TO_DEVICE);
578 if (dma_mapping_error(&ppd->dd->pcidev->dev, addr)) {
579 ret = -ENOMEM;
580 goto unmap;
581 }
582 sdmadesc[0] = 0;
583 make_sdma_desc(ppd, sdmadesc, (u64) addr, dw, dwoffset);
584
585 if (tx->txreq.flags & QIB_SDMA_TXREQ_F_USELARGEBUF)
586 sdmadesc[0] |= SDMA_DESC_USE_LARGE_BUF;
587
588 *descqp++ = cpu_to_le64(sdmadesc[0]);
589 *descqp++ = cpu_to_le64(sdmadesc[1]);
590
591
592 if (++tail == ppd->sdma_descq_cnt) {
593 tail = 0;
594 descqp = &ppd->sdma_descq[0].qw[0];
595 ++ppd->sdma_generation;
596 }
597 sge->vaddr += len;
598 sge->length -= len;
599 sge->sge_length -= len;
600 if (sge->sge_length == 0) {
601 if (--ss->num_sge)
602 *sge = *ss->sg_list++;
603 } else if (sge->length == 0 && sge->mr->lkey) {
604 if (++sge->n >= RVT_SEGSZ) {
605 if (++sge->m >= sge->mr->mapsz)
606 break;
607 sge->n = 0;
608 }
609 sge->vaddr =
610 sge->mr->map[sge->m]->segs[sge->n].vaddr;
611 sge->length =
612 sge->mr->map[sge->m]->segs[sge->n].length;
613 }
614
615 dwoffset += dw;
616 dwords -= dw;
617 }
618
619 if (!tail)
620 descqp = &ppd->sdma_descq[ppd->sdma_descq_cnt].qw[0];
621 descqp -= 2;
622 descqp[0] |= cpu_to_le64(SDMA_DESC_LAST);
623 if (tx->txreq.flags & QIB_SDMA_TXREQ_F_HEADTOHOST)
624 descqp[0] |= cpu_to_le64(SDMA_DESC_DMA_HEAD);
625 if (tx->txreq.flags & QIB_SDMA_TXREQ_F_INTREQ)
626 descqp[0] |= cpu_to_le64(SDMA_DESC_INTR);
627 priv = tx->qp->priv;
628 atomic_inc(&priv->s_dma_busy);
629 tx->txreq.next_descq_idx = tail;
630 ppd->dd->f_sdma_update_tail(ppd, tail);
631 ppd->sdma_descq_added += tx->txreq.sg_count;
632 list_add_tail(&tx->txreq.list, &ppd->sdma_activelist);
633 goto unlock;
634
635unmap:
636 for (;;) {
637 if (!tail)
638 tail = ppd->sdma_descq_cnt - 1;
639 else
640 tail--;
641 if (tail == ppd->sdma_descq_tail)
642 break;
643 unmap_desc(ppd, tail);
644 }
645 qp = tx->qp;
646 priv = qp->priv;
647 qib_put_txreq(tx);
648 spin_lock(&qp->r_lock);
649 spin_lock(&qp->s_lock);
650 if (qp->ibqp.qp_type == IB_QPT_RC) {
651
652 if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)
653 rvt_error_qp(qp, IB_WC_GENERAL_ERR);
654 } else if (qp->s_wqe)
655 rvt_send_complete(qp, qp->s_wqe, IB_WC_GENERAL_ERR);
656 spin_unlock(&qp->s_lock);
657 spin_unlock(&qp->r_lock);
658
659 goto unlock;
660
661busy:
662 qp = tx->qp;
663 priv = qp->priv;
664 spin_lock(&qp->s_lock);
665 if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
666 struct qib_ibdev *dev;
667
668
669
670
671
672
673 tx->ss = ss;
674 tx->dwords = dwords;
675 priv->s_tx = tx;
676 dev = &ppd->dd->verbs_dev;
677 spin_lock(&dev->rdi.pending_lock);
678 if (list_empty(&priv->iowait)) {
679 struct qib_ibport *ibp;
680
681 ibp = &ppd->ibport_data;
682 ibp->rvp.n_dmawait++;
683 qp->s_flags |= RVT_S_WAIT_DMA_DESC;
684 list_add_tail(&priv->iowait, &dev->dmawait);
685 }
686 spin_unlock(&dev->rdi.pending_lock);
687 qp->s_flags &= ~RVT_S_BUSY;
688 spin_unlock(&qp->s_lock);
689 ret = -EBUSY;
690 } else {
691 spin_unlock(&qp->s_lock);
692 qib_put_txreq(tx);
693 }
694unlock:
695 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
696 return ret;
697}
698
699
700
701
702void dump_sdma_state(struct qib_pportdata *ppd)
703{
704 struct qib_sdma_desc *descq;
705 struct qib_sdma_txreq *txp, *txpnext;
706 __le64 *descqp;
707 u64 desc[2];
708 u64 addr;
709 u16 gen, dwlen, dwoffset;
710 u16 head, tail, cnt;
711
712 head = ppd->sdma_descq_head;
713 tail = ppd->sdma_descq_tail;
714 cnt = qib_sdma_descq_freecnt(ppd);
715 descq = ppd->sdma_descq;
716
717 qib_dev_porterr(ppd->dd, ppd->port,
718 "SDMA ppd->sdma_descq_head: %u\n", head);
719 qib_dev_porterr(ppd->dd, ppd->port,
720 "SDMA ppd->sdma_descq_tail: %u\n", tail);
721 qib_dev_porterr(ppd->dd, ppd->port,
722 "SDMA sdma_descq_freecnt: %u\n", cnt);
723
724
725 while (head != tail) {
726 char flags[6] = { 'x', 'x', 'x', 'x', 'x', 0 };
727
728 descqp = &descq[head].qw[0];
729 desc[0] = le64_to_cpu(descqp[0]);
730 desc[1] = le64_to_cpu(descqp[1]);
731 flags[0] = (desc[0] & 1<<15) ? 'I' : '-';
732 flags[1] = (desc[0] & 1<<14) ? 'L' : 'S';
733 flags[2] = (desc[0] & 1<<13) ? 'H' : '-';
734 flags[3] = (desc[0] & 1<<12) ? 'F' : '-';
735 flags[4] = (desc[0] & 1<<11) ? 'L' : '-';
736 addr = (desc[1] << 32) | ((desc[0] >> 32) & 0xfffffffcULL);
737 gen = (desc[0] >> 30) & 3ULL;
738 dwlen = (desc[0] >> 14) & (0x7ffULL << 2);
739 dwoffset = (desc[0] & 0x7ffULL) << 2;
740 qib_dev_porterr(ppd->dd, ppd->port,
741 "SDMA sdmadesc[%u]: flags:%s addr:0x%016llx gen:%u len:%u bytes offset:%u bytes\n",
742 head, flags, addr, gen, dwlen, dwoffset);
743 if (++head == ppd->sdma_descq_cnt)
744 head = 0;
745 }
746
747
748 list_for_each_entry_safe(txp, txpnext, &ppd->sdma_activelist,
749 list)
750 qib_dev_porterr(ppd->dd, ppd->port,
751 "SDMA txp->start_idx: %u txp->next_descq_idx: %u\n",
752 txp->start_idx, txp->next_descq_idx);
753}
754
755void qib_sdma_process_event(struct qib_pportdata *ppd,
756 enum qib_sdma_events event)
757{
758 unsigned long flags;
759
760 spin_lock_irqsave(&ppd->sdma_lock, flags);
761
762 __qib_sdma_process_event(ppd, event);
763
764 if (ppd->sdma_state.current_state == qib_sdma_state_s99_running)
765 qib_verbs_sdma_desc_avail(ppd, qib_sdma_descq_freecnt(ppd));
766
767 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
768}
769
770void __qib_sdma_process_event(struct qib_pportdata *ppd,
771 enum qib_sdma_events event)
772{
773 struct qib_sdma_state *ss = &ppd->sdma_state;
774
775 switch (ss->current_state) {
776 case qib_sdma_state_s00_hw_down:
777 switch (event) {
778 case qib_sdma_event_e00_go_hw_down:
779 break;
780 case qib_sdma_event_e30_go_running:
781
782
783
784
785
786
787 ss->go_s99_running = 1;
788
789 case qib_sdma_event_e10_go_hw_start:
790
791 sdma_get(&ppd->sdma_state);
792 sdma_set_state(ppd,
793 qib_sdma_state_s10_hw_start_up_wait);
794 break;
795 case qib_sdma_event_e20_hw_started:
796 break;
797 case qib_sdma_event_e40_sw_cleaned:
798 sdma_sw_tear_down(ppd);
799 break;
800 case qib_sdma_event_e50_hw_cleaned:
801 break;
802 case qib_sdma_event_e60_hw_halted:
803 break;
804 case qib_sdma_event_e70_go_idle:
805 break;
806 case qib_sdma_event_e7220_err_halted:
807 break;
808 case qib_sdma_event_e7322_err_halted:
809 break;
810 case qib_sdma_event_e90_timer_tick:
811 break;
812 }
813 break;
814
815 case qib_sdma_state_s10_hw_start_up_wait:
816 switch (event) {
817 case qib_sdma_event_e00_go_hw_down:
818 sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
819 sdma_sw_tear_down(ppd);
820 break;
821 case qib_sdma_event_e10_go_hw_start:
822 break;
823 case qib_sdma_event_e20_hw_started:
824 sdma_set_state(ppd, ss->go_s99_running ?
825 qib_sdma_state_s99_running :
826 qib_sdma_state_s20_idle);
827 break;
828 case qib_sdma_event_e30_go_running:
829 ss->go_s99_running = 1;
830 break;
831 case qib_sdma_event_e40_sw_cleaned:
832 break;
833 case qib_sdma_event_e50_hw_cleaned:
834 break;
835 case qib_sdma_event_e60_hw_halted:
836 break;
837 case qib_sdma_event_e70_go_idle:
838 ss->go_s99_running = 0;
839 break;
840 case qib_sdma_event_e7220_err_halted:
841 break;
842 case qib_sdma_event_e7322_err_halted:
843 break;
844 case qib_sdma_event_e90_timer_tick:
845 break;
846 }
847 break;
848
849 case qib_sdma_state_s20_idle:
850 switch (event) {
851 case qib_sdma_event_e00_go_hw_down:
852 sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
853 sdma_sw_tear_down(ppd);
854 break;
855 case qib_sdma_event_e10_go_hw_start:
856 break;
857 case qib_sdma_event_e20_hw_started:
858 break;
859 case qib_sdma_event_e30_go_running:
860 sdma_set_state(ppd, qib_sdma_state_s99_running);
861 ss->go_s99_running = 1;
862 break;
863 case qib_sdma_event_e40_sw_cleaned:
864 break;
865 case qib_sdma_event_e50_hw_cleaned:
866 break;
867 case qib_sdma_event_e60_hw_halted:
868 break;
869 case qib_sdma_event_e70_go_idle:
870 break;
871 case qib_sdma_event_e7220_err_halted:
872 break;
873 case qib_sdma_event_e7322_err_halted:
874 break;
875 case qib_sdma_event_e90_timer_tick:
876 break;
877 }
878 break;
879
880 case qib_sdma_state_s30_sw_clean_up_wait:
881 switch (event) {
882 case qib_sdma_event_e00_go_hw_down:
883 sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
884 break;
885 case qib_sdma_event_e10_go_hw_start:
886 break;
887 case qib_sdma_event_e20_hw_started:
888 break;
889 case qib_sdma_event_e30_go_running:
890 ss->go_s99_running = 1;
891 break;
892 case qib_sdma_event_e40_sw_cleaned:
893 sdma_set_state(ppd,
894 qib_sdma_state_s10_hw_start_up_wait);
895 sdma_hw_start_up(ppd);
896 break;
897 case qib_sdma_event_e50_hw_cleaned:
898 break;
899 case qib_sdma_event_e60_hw_halted:
900 break;
901 case qib_sdma_event_e70_go_idle:
902 ss->go_s99_running = 0;
903 break;
904 case qib_sdma_event_e7220_err_halted:
905 break;
906 case qib_sdma_event_e7322_err_halted:
907 break;
908 case qib_sdma_event_e90_timer_tick:
909 break;
910 }
911 break;
912
913 case qib_sdma_state_s40_hw_clean_up_wait:
914 switch (event) {
915 case qib_sdma_event_e00_go_hw_down:
916 sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
917 sdma_start_sw_clean_up(ppd);
918 break;
919 case qib_sdma_event_e10_go_hw_start:
920 break;
921 case qib_sdma_event_e20_hw_started:
922 break;
923 case qib_sdma_event_e30_go_running:
924 ss->go_s99_running = 1;
925 break;
926 case qib_sdma_event_e40_sw_cleaned:
927 break;
928 case qib_sdma_event_e50_hw_cleaned:
929 sdma_set_state(ppd,
930 qib_sdma_state_s30_sw_clean_up_wait);
931 sdma_start_sw_clean_up(ppd);
932 break;
933 case qib_sdma_event_e60_hw_halted:
934 break;
935 case qib_sdma_event_e70_go_idle:
936 ss->go_s99_running = 0;
937 break;
938 case qib_sdma_event_e7220_err_halted:
939 break;
940 case qib_sdma_event_e7322_err_halted:
941 break;
942 case qib_sdma_event_e90_timer_tick:
943 break;
944 }
945 break;
946
947 case qib_sdma_state_s50_hw_halt_wait:
948 switch (event) {
949 case qib_sdma_event_e00_go_hw_down:
950 sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
951 sdma_start_sw_clean_up(ppd);
952 break;
953 case qib_sdma_event_e10_go_hw_start:
954 break;
955 case qib_sdma_event_e20_hw_started:
956 break;
957 case qib_sdma_event_e30_go_running:
958 ss->go_s99_running = 1;
959 break;
960 case qib_sdma_event_e40_sw_cleaned:
961 break;
962 case qib_sdma_event_e50_hw_cleaned:
963 break;
964 case qib_sdma_event_e60_hw_halted:
965 sdma_set_state(ppd,
966 qib_sdma_state_s40_hw_clean_up_wait);
967 ppd->dd->f_sdma_hw_clean_up(ppd);
968 break;
969 case qib_sdma_event_e70_go_idle:
970 ss->go_s99_running = 0;
971 break;
972 case qib_sdma_event_e7220_err_halted:
973 break;
974 case qib_sdma_event_e7322_err_halted:
975 break;
976 case qib_sdma_event_e90_timer_tick:
977 break;
978 }
979 break;
980
981 case qib_sdma_state_s99_running:
982 switch (event) {
983 case qib_sdma_event_e00_go_hw_down:
984 sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
985 sdma_start_sw_clean_up(ppd);
986 break;
987 case qib_sdma_event_e10_go_hw_start:
988 break;
989 case qib_sdma_event_e20_hw_started:
990 break;
991 case qib_sdma_event_e30_go_running:
992 break;
993 case qib_sdma_event_e40_sw_cleaned:
994 break;
995 case qib_sdma_event_e50_hw_cleaned:
996 break;
997 case qib_sdma_event_e60_hw_halted:
998 sdma_set_state(ppd,
999 qib_sdma_state_s30_sw_clean_up_wait);
1000 sdma_start_sw_clean_up(ppd);
1001 break;
1002 case qib_sdma_event_e70_go_idle:
1003 sdma_set_state(ppd, qib_sdma_state_s50_hw_halt_wait);
1004 ss->go_s99_running = 0;
1005 break;
1006 case qib_sdma_event_e7220_err_halted:
1007 sdma_set_state(ppd,
1008 qib_sdma_state_s30_sw_clean_up_wait);
1009 sdma_start_sw_clean_up(ppd);
1010 break;
1011 case qib_sdma_event_e7322_err_halted:
1012 sdma_set_state(ppd, qib_sdma_state_s50_hw_halt_wait);
1013 break;
1014 case qib_sdma_event_e90_timer_tick:
1015 break;
1016 }
1017 break;
1018 }
1019
1020 ss->last_event = event;
1021}
1022