1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#include <linux/spinlock.h>
34#include <linux/netdevice.h>
35#include <linux/moduleparam.h>
36
37#include "qib.h"
38#include "qib_common.h"
39
40
41static ushort sdma_descq_cnt = 256;
42module_param_named(sdma_descq_cnt, sdma_descq_cnt, ushort, S_IRUGO);
43MODULE_PARM_DESC(sdma_descq_cnt, "Number of SDMA descq entries");
44
45
46
47
48#define SDMA_DESC_LAST (1ULL << 11)
49#define SDMA_DESC_FIRST (1ULL << 12)
50#define SDMA_DESC_DMA_HEAD (1ULL << 13)
51#define SDMA_DESC_USE_LARGE_BUF (1ULL << 14)
52#define SDMA_DESC_INTR (1ULL << 15)
53#define SDMA_DESC_COUNT_LSB 16
54#define SDMA_DESC_GEN_LSB 30
55
56char *qib_sdma_state_names[] = {
57 [qib_sdma_state_s00_hw_down] = "s00_HwDown",
58 [qib_sdma_state_s10_hw_start_up_wait] = "s10_HwStartUpWait",
59 [qib_sdma_state_s20_idle] = "s20_Idle",
60 [qib_sdma_state_s30_sw_clean_up_wait] = "s30_SwCleanUpWait",
61 [qib_sdma_state_s40_hw_clean_up_wait] = "s40_HwCleanUpWait",
62 [qib_sdma_state_s50_hw_halt_wait] = "s50_HwHaltWait",
63 [qib_sdma_state_s99_running] = "s99_Running",
64};
65
66char *qib_sdma_event_names[] = {
67 [qib_sdma_event_e00_go_hw_down] = "e00_GoHwDown",
68 [qib_sdma_event_e10_go_hw_start] = "e10_GoHwStart",
69 [qib_sdma_event_e20_hw_started] = "e20_HwStarted",
70 [qib_sdma_event_e30_go_running] = "e30_GoRunning",
71 [qib_sdma_event_e40_sw_cleaned] = "e40_SwCleaned",
72 [qib_sdma_event_e50_hw_cleaned] = "e50_HwCleaned",
73 [qib_sdma_event_e60_hw_halted] = "e60_HwHalted",
74 [qib_sdma_event_e70_go_idle] = "e70_GoIdle",
75 [qib_sdma_event_e7220_err_halted] = "e7220_ErrHalted",
76 [qib_sdma_event_e7322_err_halted] = "e7322_ErrHalted",
77 [qib_sdma_event_e90_timer_tick] = "e90_TimerTick",
78};
79
80
81static int alloc_sdma(struct qib_pportdata *);
82static void sdma_complete(struct kref *);
83static void sdma_finalput(struct qib_sdma_state *);
84static void sdma_get(struct qib_sdma_state *);
85static void sdma_put(struct qib_sdma_state *);
86static void sdma_set_state(struct qib_pportdata *, enum qib_sdma_states);
87static void sdma_start_sw_clean_up(struct qib_pportdata *);
88static void sdma_sw_clean_up_task(unsigned long);
89static void unmap_desc(struct qib_pportdata *, unsigned);
90
91static void sdma_get(struct qib_sdma_state *ss)
92{
93 kref_get(&ss->kref);
94}
95
96static void sdma_complete(struct kref *kref)
97{
98 struct qib_sdma_state *ss =
99 container_of(kref, struct qib_sdma_state, kref);
100
101 complete(&ss->comp);
102}
103
104static void sdma_put(struct qib_sdma_state *ss)
105{
106 kref_put(&ss->kref, sdma_complete);
107}
108
109static void sdma_finalput(struct qib_sdma_state *ss)
110{
111 sdma_put(ss);
112 wait_for_completion(&ss->comp);
113}
114
115
116
117
118
119
120
121
122
123
124static void clear_sdma_activelist(struct qib_pportdata *ppd)
125{
126 struct qib_sdma_txreq *txp, *txp_next;
127
128 list_for_each_entry_safe(txp, txp_next, &ppd->sdma_activelist, list) {
129 list_del_init(&txp->list);
130 if (txp->flags & QIB_SDMA_TXREQ_F_FREEDESC) {
131 unsigned idx;
132
133 idx = txp->start_idx;
134 while (idx != txp->next_descq_idx) {
135 unmap_desc(ppd, idx);
136 if (++idx == ppd->sdma_descq_cnt)
137 idx = 0;
138 }
139 }
140 if (txp->callback)
141 (*txp->callback)(txp, QIB_SDMA_TXREQ_S_ABORTED);
142 }
143}
144
145static void sdma_sw_clean_up_task(unsigned long opaque)
146{
147 struct qib_pportdata *ppd = (struct qib_pportdata *) opaque;
148 unsigned long flags;
149
150 spin_lock_irqsave(&ppd->sdma_lock, flags);
151
152
153
154
155
156
157
158
159
160
161
162 qib_sdma_make_progress(ppd);
163
164 clear_sdma_activelist(ppd);
165
166
167
168
169
170 ppd->sdma_descq_removed = ppd->sdma_descq_added;
171
172
173
174
175
176
177 ppd->sdma_descq_tail = 0;
178 ppd->sdma_descq_head = 0;
179 ppd->sdma_head_dma[0] = 0;
180 ppd->sdma_generation = 0;
181
182 __qib_sdma_process_event(ppd, qib_sdma_event_e40_sw_cleaned);
183
184 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
185}
186
187
188
189
190
191
192static void sdma_hw_start_up(struct qib_pportdata *ppd)
193{
194 struct qib_sdma_state *ss = &ppd->sdma_state;
195 unsigned bufno;
196
197 for (bufno = ss->first_sendbuf; bufno < ss->last_sendbuf; ++bufno)
198 ppd->dd->f_sendctrl(ppd, QIB_SENDCTRL_DISARM_BUF(bufno));
199
200 ppd->dd->f_sdma_hw_start_up(ppd);
201}
202
203static void sdma_sw_tear_down(struct qib_pportdata *ppd)
204{
205 struct qib_sdma_state *ss = &ppd->sdma_state;
206
207
208 sdma_put(ss);
209}
210
211static void sdma_start_sw_clean_up(struct qib_pportdata *ppd)
212{
213 tasklet_hi_schedule(&ppd->sdma_sw_clean_up_task);
214}
215
216static void sdma_set_state(struct qib_pportdata *ppd,
217 enum qib_sdma_states next_state)
218{
219 struct qib_sdma_state *ss = &ppd->sdma_state;
220 struct sdma_set_state_action *action = ss->set_state_action;
221 unsigned op = 0;
222
223
224 ss->previous_state = ss->current_state;
225 ss->previous_op = ss->current_op;
226
227 ss->current_state = next_state;
228
229 if (action[next_state].op_enable)
230 op |= QIB_SDMA_SENDCTRL_OP_ENABLE;
231
232 if (action[next_state].op_intenable)
233 op |= QIB_SDMA_SENDCTRL_OP_INTENABLE;
234
235 if (action[next_state].op_halt)
236 op |= QIB_SDMA_SENDCTRL_OP_HALT;
237
238 if (action[next_state].op_drain)
239 op |= QIB_SDMA_SENDCTRL_OP_DRAIN;
240
241 if (action[next_state].go_s99_running_tofalse)
242 ss->go_s99_running = 0;
243
244 if (action[next_state].go_s99_running_totrue)
245 ss->go_s99_running = 1;
246
247 ss->current_op = op;
248
249 ppd->dd->f_sdma_sendctrl(ppd, ss->current_op);
250}
251
252static void unmap_desc(struct qib_pportdata *ppd, unsigned head)
253{
254 __le64 *descqp = &ppd->sdma_descq[head].qw[0];
255 u64 desc[2];
256 dma_addr_t addr;
257 size_t len;
258
259 desc[0] = le64_to_cpu(descqp[0]);
260 desc[1] = le64_to_cpu(descqp[1]);
261
262 addr = (desc[1] << 32) | (desc[0] >> 32);
263 len = (desc[0] >> 14) & (0x7ffULL << 2);
264 dma_unmap_single(&ppd->dd->pcidev->dev, addr, len, DMA_TO_DEVICE);
265}
266
267static int alloc_sdma(struct qib_pportdata *ppd)
268{
269 ppd->sdma_descq_cnt = sdma_descq_cnt;
270 if (!ppd->sdma_descq_cnt)
271 ppd->sdma_descq_cnt = 256;
272
273
274 ppd->sdma_descq = dma_alloc_coherent(&ppd->dd->pcidev->dev,
275 ppd->sdma_descq_cnt * sizeof(u64[2]), &ppd->sdma_descq_phys,
276 GFP_KERNEL);
277
278 if (!ppd->sdma_descq) {
279 qib_dev_err(ppd->dd, "failed to allocate SendDMA descriptor "
280 "FIFO memory\n");
281 goto bail;
282 }
283
284
285 ppd->sdma_head_dma = dma_alloc_coherent(&ppd->dd->pcidev->dev,
286 PAGE_SIZE, &ppd->sdma_head_phys, GFP_KERNEL);
287 if (!ppd->sdma_head_dma) {
288 qib_dev_err(ppd->dd, "failed to allocate SendDMA "
289 "head memory\n");
290 goto cleanup_descq;
291 }
292 ppd->sdma_head_dma[0] = 0;
293 return 0;
294
295cleanup_descq:
296 dma_free_coherent(&ppd->dd->pcidev->dev,
297 ppd->sdma_descq_cnt * sizeof(u64[2]), (void *)ppd->sdma_descq,
298 ppd->sdma_descq_phys);
299 ppd->sdma_descq = NULL;
300 ppd->sdma_descq_phys = 0;
301bail:
302 ppd->sdma_descq_cnt = 0;
303 return -ENOMEM;
304}
305
306static void free_sdma(struct qib_pportdata *ppd)
307{
308 struct qib_devdata *dd = ppd->dd;
309
310 if (ppd->sdma_head_dma) {
311 dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
312 (void *)ppd->sdma_head_dma,
313 ppd->sdma_head_phys);
314 ppd->sdma_head_dma = NULL;
315 ppd->sdma_head_phys = 0;
316 }
317
318 if (ppd->sdma_descq) {
319 dma_free_coherent(&dd->pcidev->dev,
320 ppd->sdma_descq_cnt * sizeof(u64[2]),
321 ppd->sdma_descq, ppd->sdma_descq_phys);
322 ppd->sdma_descq = NULL;
323 ppd->sdma_descq_phys = 0;
324 }
325}
326
327static inline void make_sdma_desc(struct qib_pportdata *ppd,
328 u64 *sdmadesc, u64 addr, u64 dwlen,
329 u64 dwoffset)
330{
331
332 WARN_ON(addr & 3);
333
334 sdmadesc[1] = addr >> 32;
335
336 sdmadesc[0] = (addr & 0xfffffffcULL) << 32;
337
338 sdmadesc[0] |= (ppd->sdma_generation & 3ULL) <<
339 SDMA_DESC_GEN_LSB;
340
341 sdmadesc[0] |= (dwlen & 0x7ffULL) << SDMA_DESC_COUNT_LSB;
342
343 sdmadesc[0] |= dwoffset & 0x7ffULL;
344}
345
346
347int qib_sdma_make_progress(struct qib_pportdata *ppd)
348{
349 struct list_head *lp = NULL;
350 struct qib_sdma_txreq *txp = NULL;
351 struct qib_devdata *dd = ppd->dd;
352 int progress = 0;
353 u16 hwhead;
354 u16 idx = 0;
355
356 hwhead = dd->f_sdma_gethead(ppd);
357
358
359
360
361
362
363
364 if (!list_empty(&ppd->sdma_activelist)) {
365 lp = ppd->sdma_activelist.next;
366 txp = list_entry(lp, struct qib_sdma_txreq, list);
367 idx = txp->start_idx;
368 }
369
370 while (ppd->sdma_descq_head != hwhead) {
371
372 if (txp && (txp->flags & QIB_SDMA_TXREQ_F_FREEDESC) &&
373 (idx == ppd->sdma_descq_head)) {
374 unmap_desc(ppd, ppd->sdma_descq_head);
375 if (++idx == ppd->sdma_descq_cnt)
376 idx = 0;
377 }
378
379
380 ppd->sdma_descq_removed++;
381
382
383 if (++ppd->sdma_descq_head == ppd->sdma_descq_cnt)
384 ppd->sdma_descq_head = 0;
385
386
387 if (txp && txp->next_descq_idx == ppd->sdma_descq_head) {
388
389 list_del_init(&txp->list);
390 if (txp->callback)
391 (*txp->callback)(txp, QIB_SDMA_TXREQ_S_OK);
392
393 if (list_empty(&ppd->sdma_activelist))
394 txp = NULL;
395 else {
396 lp = ppd->sdma_activelist.next;
397 txp = list_entry(lp, struct qib_sdma_txreq,
398 list);
399 idx = txp->start_idx;
400 }
401 }
402 progress = 1;
403 }
404 if (progress)
405 qib_verbs_sdma_desc_avail(ppd, qib_sdma_descq_freecnt(ppd));
406 return progress;
407}
408
409
410
411
412void qib_sdma_intr(struct qib_pportdata *ppd)
413{
414 unsigned long flags;
415
416 spin_lock_irqsave(&ppd->sdma_lock, flags);
417
418 __qib_sdma_intr(ppd);
419
420 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
421}
422
423void __qib_sdma_intr(struct qib_pportdata *ppd)
424{
425 if (__qib_sdma_running(ppd))
426 qib_sdma_make_progress(ppd);
427}
428
429int qib_setup_sdma(struct qib_pportdata *ppd)
430{
431 struct qib_devdata *dd = ppd->dd;
432 unsigned long flags;
433 int ret = 0;
434
435 ret = alloc_sdma(ppd);
436 if (ret)
437 goto bail;
438
439
440 ppd->dd->f_sdma_init_early(ppd);
441 spin_lock_irqsave(&ppd->sdma_lock, flags);
442 sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
443 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
444
445
446 kref_init(&ppd->sdma_state.kref);
447 init_completion(&ppd->sdma_state.comp);
448
449 ppd->sdma_generation = 0;
450 ppd->sdma_descq_head = 0;
451 ppd->sdma_descq_removed = 0;
452 ppd->sdma_descq_added = 0;
453
454 INIT_LIST_HEAD(&ppd->sdma_activelist);
455
456 tasklet_init(&ppd->sdma_sw_clean_up_task, sdma_sw_clean_up_task,
457 (unsigned long)ppd);
458
459 ret = dd->f_init_sdma_regs(ppd);
460 if (ret)
461 goto bail_alloc;
462
463 qib_sdma_process_event(ppd, qib_sdma_event_e10_go_hw_start);
464
465 return 0;
466
467bail_alloc:
468 qib_teardown_sdma(ppd);
469bail:
470 return ret;
471}
472
473void qib_teardown_sdma(struct qib_pportdata *ppd)
474{
475 qib_sdma_process_event(ppd, qib_sdma_event_e00_go_hw_down);
476
477
478
479
480
481
482 sdma_finalput(&ppd->sdma_state);
483
484 free_sdma(ppd);
485}
486
487int qib_sdma_running(struct qib_pportdata *ppd)
488{
489 unsigned long flags;
490 int ret;
491
492 spin_lock_irqsave(&ppd->sdma_lock, flags);
493 ret = __qib_sdma_running(ppd);
494 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
495
496 return ret;
497}
498
499
500
501
502
503
504
505
506static void complete_sdma_err_req(struct qib_pportdata *ppd,
507 struct qib_verbs_txreq *tx)
508{
509 atomic_inc(&tx->qp->s_dma_busy);
510
511 tx->txreq.start_idx = 0;
512 tx->txreq.next_descq_idx = 0;
513 list_add_tail(&tx->txreq.list, &ppd->sdma_activelist);
514 clear_sdma_activelist(ppd);
515}
516
517
518
519
520
521
522
523
524
525
526int qib_sdma_verbs_send(struct qib_pportdata *ppd,
527 struct qib_sge_state *ss, u32 dwords,
528 struct qib_verbs_txreq *tx)
529{
530 unsigned long flags;
531 struct qib_sge *sge;
532 struct qib_qp *qp;
533 int ret = 0;
534 u16 tail;
535 __le64 *descqp;
536 u64 sdmadesc[2];
537 u32 dwoffset;
538 dma_addr_t addr;
539
540 spin_lock_irqsave(&ppd->sdma_lock, flags);
541
542retry:
543 if (unlikely(!__qib_sdma_running(ppd))) {
544 complete_sdma_err_req(ppd, tx);
545 goto unlock;
546 }
547
548 if (tx->txreq.sg_count > qib_sdma_descq_freecnt(ppd)) {
549 if (qib_sdma_make_progress(ppd))
550 goto retry;
551 if (ppd->dd->flags & QIB_HAS_SDMA_TIMEOUT)
552 ppd->dd->f_sdma_set_desc_cnt(ppd,
553 ppd->sdma_descq_cnt / 2);
554 goto busy;
555 }
556
557 dwoffset = tx->hdr_dwords;
558 make_sdma_desc(ppd, sdmadesc, (u64) tx->txreq.addr, dwoffset, 0);
559
560 sdmadesc[0] |= SDMA_DESC_FIRST;
561 if (tx->txreq.flags & QIB_SDMA_TXREQ_F_USELARGEBUF)
562 sdmadesc[0] |= SDMA_DESC_USE_LARGE_BUF;
563
564
565 tail = ppd->sdma_descq_tail;
566 descqp = &ppd->sdma_descq[tail].qw[0];
567 *descqp++ = cpu_to_le64(sdmadesc[0]);
568 *descqp++ = cpu_to_le64(sdmadesc[1]);
569
570
571 if (++tail == ppd->sdma_descq_cnt) {
572 tail = 0;
573 descqp = &ppd->sdma_descq[0].qw[0];
574 ++ppd->sdma_generation;
575 }
576
577 tx->txreq.start_idx = tail;
578
579 sge = &ss->sge;
580 while (dwords) {
581 u32 dw;
582 u32 len;
583
584 len = dwords << 2;
585 if (len > sge->length)
586 len = sge->length;
587 if (len > sge->sge_length)
588 len = sge->sge_length;
589 BUG_ON(len == 0);
590 dw = (len + 3) >> 2;
591 addr = dma_map_single(&ppd->dd->pcidev->dev, sge->vaddr,
592 dw << 2, DMA_TO_DEVICE);
593 if (dma_mapping_error(&ppd->dd->pcidev->dev, addr))
594 goto unmap;
595 sdmadesc[0] = 0;
596 make_sdma_desc(ppd, sdmadesc, (u64) addr, dw, dwoffset);
597
598 if (tx->txreq.flags & QIB_SDMA_TXREQ_F_USELARGEBUF)
599 sdmadesc[0] |= SDMA_DESC_USE_LARGE_BUF;
600
601 *descqp++ = cpu_to_le64(sdmadesc[0]);
602 *descqp++ = cpu_to_le64(sdmadesc[1]);
603
604
605 if (++tail == ppd->sdma_descq_cnt) {
606 tail = 0;
607 descqp = &ppd->sdma_descq[0].qw[0];
608 ++ppd->sdma_generation;
609 }
610 sge->vaddr += len;
611 sge->length -= len;
612 sge->sge_length -= len;
613 if (sge->sge_length == 0) {
614 if (--ss->num_sge)
615 *sge = *ss->sg_list++;
616 } else if (sge->length == 0 && sge->mr->lkey) {
617 if (++sge->n >= QIB_SEGSZ) {
618 if (++sge->m >= sge->mr->mapsz)
619 break;
620 sge->n = 0;
621 }
622 sge->vaddr =
623 sge->mr->map[sge->m]->segs[sge->n].vaddr;
624 sge->length =
625 sge->mr->map[sge->m]->segs[sge->n].length;
626 }
627
628 dwoffset += dw;
629 dwords -= dw;
630 }
631
632 if (!tail)
633 descqp = &ppd->sdma_descq[ppd->sdma_descq_cnt].qw[0];
634 descqp -= 2;
635 descqp[0] |= cpu_to_le64(SDMA_DESC_LAST);
636 if (tx->txreq.flags & QIB_SDMA_TXREQ_F_HEADTOHOST)
637 descqp[0] |= cpu_to_le64(SDMA_DESC_DMA_HEAD);
638 if (tx->txreq.flags & QIB_SDMA_TXREQ_F_INTREQ)
639 descqp[0] |= cpu_to_le64(SDMA_DESC_INTR);
640
641 atomic_inc(&tx->qp->s_dma_busy);
642 tx->txreq.next_descq_idx = tail;
643 ppd->dd->f_sdma_update_tail(ppd, tail);
644 ppd->sdma_descq_added += tx->txreq.sg_count;
645 list_add_tail(&tx->txreq.list, &ppd->sdma_activelist);
646 goto unlock;
647
648unmap:
649 for (;;) {
650 if (!tail)
651 tail = ppd->sdma_descq_cnt - 1;
652 else
653 tail--;
654 if (tail == ppd->sdma_descq_tail)
655 break;
656 unmap_desc(ppd, tail);
657 }
658 qp = tx->qp;
659 qib_put_txreq(tx);
660 spin_lock(&qp->r_lock);
661 spin_lock(&qp->s_lock);
662 if (qp->ibqp.qp_type == IB_QPT_RC) {
663
664 if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)
665 qib_error_qp(qp, IB_WC_GENERAL_ERR);
666 } else if (qp->s_wqe)
667 qib_send_complete(qp, qp->s_wqe, IB_WC_GENERAL_ERR);
668 spin_unlock(&qp->s_lock);
669 spin_unlock(&qp->r_lock);
670
671 goto unlock;
672
673busy:
674 qp = tx->qp;
675 spin_lock(&qp->s_lock);
676 if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) {
677 struct qib_ibdev *dev;
678
679
680
681
682
683
684 tx->ss = ss;
685 tx->dwords = dwords;
686 qp->s_tx = tx;
687 dev = &ppd->dd->verbs_dev;
688 spin_lock(&dev->pending_lock);
689 if (list_empty(&qp->iowait)) {
690 struct qib_ibport *ibp;
691
692 ibp = &ppd->ibport_data;
693 ibp->n_dmawait++;
694 qp->s_flags |= QIB_S_WAIT_DMA_DESC;
695 list_add_tail(&qp->iowait, &dev->dmawait);
696 }
697 spin_unlock(&dev->pending_lock);
698 qp->s_flags &= ~QIB_S_BUSY;
699 spin_unlock(&qp->s_lock);
700 ret = -EBUSY;
701 } else {
702 spin_unlock(&qp->s_lock);
703 qib_put_txreq(tx);
704 }
705unlock:
706 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
707 return ret;
708}
709
710void qib_sdma_process_event(struct qib_pportdata *ppd,
711 enum qib_sdma_events event)
712{
713 unsigned long flags;
714
715 spin_lock_irqsave(&ppd->sdma_lock, flags);
716
717 __qib_sdma_process_event(ppd, event);
718
719 if (ppd->sdma_state.current_state == qib_sdma_state_s99_running)
720 qib_verbs_sdma_desc_avail(ppd, qib_sdma_descq_freecnt(ppd));
721
722 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
723}
724
725void __qib_sdma_process_event(struct qib_pportdata *ppd,
726 enum qib_sdma_events event)
727{
728 struct qib_sdma_state *ss = &ppd->sdma_state;
729
730 switch (ss->current_state) {
731 case qib_sdma_state_s00_hw_down:
732 switch (event) {
733 case qib_sdma_event_e00_go_hw_down:
734 break;
735 case qib_sdma_event_e30_go_running:
736
737
738
739
740
741
742 ss->go_s99_running = 1;
743
744 case qib_sdma_event_e10_go_hw_start:
745
746 sdma_get(&ppd->sdma_state);
747 sdma_set_state(ppd,
748 qib_sdma_state_s10_hw_start_up_wait);
749 break;
750 case qib_sdma_event_e20_hw_started:
751 break;
752 case qib_sdma_event_e40_sw_cleaned:
753 sdma_sw_tear_down(ppd);
754 break;
755 case qib_sdma_event_e50_hw_cleaned:
756 break;
757 case qib_sdma_event_e60_hw_halted:
758 break;
759 case qib_sdma_event_e70_go_idle:
760 break;
761 case qib_sdma_event_e7220_err_halted:
762 break;
763 case qib_sdma_event_e7322_err_halted:
764 break;
765 case qib_sdma_event_e90_timer_tick:
766 break;
767 }
768 break;
769
770 case qib_sdma_state_s10_hw_start_up_wait:
771 switch (event) {
772 case qib_sdma_event_e00_go_hw_down:
773 sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
774 sdma_sw_tear_down(ppd);
775 break;
776 case qib_sdma_event_e10_go_hw_start:
777 break;
778 case qib_sdma_event_e20_hw_started:
779 sdma_set_state(ppd, ss->go_s99_running ?
780 qib_sdma_state_s99_running :
781 qib_sdma_state_s20_idle);
782 break;
783 case qib_sdma_event_e30_go_running:
784 ss->go_s99_running = 1;
785 break;
786 case qib_sdma_event_e40_sw_cleaned:
787 break;
788 case qib_sdma_event_e50_hw_cleaned:
789 break;
790 case qib_sdma_event_e60_hw_halted:
791 break;
792 case qib_sdma_event_e70_go_idle:
793 ss->go_s99_running = 0;
794 break;
795 case qib_sdma_event_e7220_err_halted:
796 break;
797 case qib_sdma_event_e7322_err_halted:
798 break;
799 case qib_sdma_event_e90_timer_tick:
800 break;
801 }
802 break;
803
804 case qib_sdma_state_s20_idle:
805 switch (event) {
806 case qib_sdma_event_e00_go_hw_down:
807 sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
808 sdma_sw_tear_down(ppd);
809 break;
810 case qib_sdma_event_e10_go_hw_start:
811 break;
812 case qib_sdma_event_e20_hw_started:
813 break;
814 case qib_sdma_event_e30_go_running:
815 sdma_set_state(ppd, qib_sdma_state_s99_running);
816 ss->go_s99_running = 1;
817 break;
818 case qib_sdma_event_e40_sw_cleaned:
819 break;
820 case qib_sdma_event_e50_hw_cleaned:
821 break;
822 case qib_sdma_event_e60_hw_halted:
823 break;
824 case qib_sdma_event_e70_go_idle:
825 break;
826 case qib_sdma_event_e7220_err_halted:
827 break;
828 case qib_sdma_event_e7322_err_halted:
829 break;
830 case qib_sdma_event_e90_timer_tick:
831 break;
832 }
833 break;
834
835 case qib_sdma_state_s30_sw_clean_up_wait:
836 switch (event) {
837 case qib_sdma_event_e00_go_hw_down:
838 sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
839 break;
840 case qib_sdma_event_e10_go_hw_start:
841 break;
842 case qib_sdma_event_e20_hw_started:
843 break;
844 case qib_sdma_event_e30_go_running:
845 ss->go_s99_running = 1;
846 break;
847 case qib_sdma_event_e40_sw_cleaned:
848 sdma_set_state(ppd,
849 qib_sdma_state_s10_hw_start_up_wait);
850 sdma_hw_start_up(ppd);
851 break;
852 case qib_sdma_event_e50_hw_cleaned:
853 break;
854 case qib_sdma_event_e60_hw_halted:
855 break;
856 case qib_sdma_event_e70_go_idle:
857 ss->go_s99_running = 0;
858 break;
859 case qib_sdma_event_e7220_err_halted:
860 break;
861 case qib_sdma_event_e7322_err_halted:
862 break;
863 case qib_sdma_event_e90_timer_tick:
864 break;
865 }
866 break;
867
868 case qib_sdma_state_s40_hw_clean_up_wait:
869 switch (event) {
870 case qib_sdma_event_e00_go_hw_down:
871 sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
872 sdma_start_sw_clean_up(ppd);
873 break;
874 case qib_sdma_event_e10_go_hw_start:
875 break;
876 case qib_sdma_event_e20_hw_started:
877 break;
878 case qib_sdma_event_e30_go_running:
879 ss->go_s99_running = 1;
880 break;
881 case qib_sdma_event_e40_sw_cleaned:
882 break;
883 case qib_sdma_event_e50_hw_cleaned:
884 sdma_set_state(ppd,
885 qib_sdma_state_s30_sw_clean_up_wait);
886 sdma_start_sw_clean_up(ppd);
887 break;
888 case qib_sdma_event_e60_hw_halted:
889 break;
890 case qib_sdma_event_e70_go_idle:
891 ss->go_s99_running = 0;
892 break;
893 case qib_sdma_event_e7220_err_halted:
894 break;
895 case qib_sdma_event_e7322_err_halted:
896 break;
897 case qib_sdma_event_e90_timer_tick:
898 break;
899 }
900 break;
901
902 case qib_sdma_state_s50_hw_halt_wait:
903 switch (event) {
904 case qib_sdma_event_e00_go_hw_down:
905 sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
906 sdma_start_sw_clean_up(ppd);
907 break;
908 case qib_sdma_event_e10_go_hw_start:
909 break;
910 case qib_sdma_event_e20_hw_started:
911 break;
912 case qib_sdma_event_e30_go_running:
913 ss->go_s99_running = 1;
914 break;
915 case qib_sdma_event_e40_sw_cleaned:
916 break;
917 case qib_sdma_event_e50_hw_cleaned:
918 break;
919 case qib_sdma_event_e60_hw_halted:
920 sdma_set_state(ppd,
921 qib_sdma_state_s40_hw_clean_up_wait);
922 ppd->dd->f_sdma_hw_clean_up(ppd);
923 break;
924 case qib_sdma_event_e70_go_idle:
925 ss->go_s99_running = 0;
926 break;
927 case qib_sdma_event_e7220_err_halted:
928 break;
929 case qib_sdma_event_e7322_err_halted:
930 break;
931 case qib_sdma_event_e90_timer_tick:
932 break;
933 }
934 break;
935
936 case qib_sdma_state_s99_running:
937 switch (event) {
938 case qib_sdma_event_e00_go_hw_down:
939 sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
940 sdma_start_sw_clean_up(ppd);
941 break;
942 case qib_sdma_event_e10_go_hw_start:
943 break;
944 case qib_sdma_event_e20_hw_started:
945 break;
946 case qib_sdma_event_e30_go_running:
947 break;
948 case qib_sdma_event_e40_sw_cleaned:
949 break;
950 case qib_sdma_event_e50_hw_cleaned:
951 break;
952 case qib_sdma_event_e60_hw_halted:
953 sdma_set_state(ppd,
954 qib_sdma_state_s30_sw_clean_up_wait);
955 sdma_start_sw_clean_up(ppd);
956 break;
957 case qib_sdma_event_e70_go_idle:
958 sdma_set_state(ppd, qib_sdma_state_s50_hw_halt_wait);
959 ss->go_s99_running = 0;
960 break;
961 case qib_sdma_event_e7220_err_halted:
962 sdma_set_state(ppd,
963 qib_sdma_state_s30_sw_clean_up_wait);
964 sdma_start_sw_clean_up(ppd);
965 break;
966 case qib_sdma_event_e7322_err_halted:
967 sdma_set_state(ppd, qib_sdma_state_s50_hw_halt_wait);
968 break;
969 case qib_sdma_event_e90_timer_tick:
970 break;
971 }
972 break;
973 }
974
975 ss->last_event = event;
976}
977