1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34#include <linux/spinlock.h>
35#include <linux/netdevice.h>
36#include <linux/moduleparam.h>
37
38#include "qib.h"
39#include "qib_common.h"
40
41
42static ushort sdma_descq_cnt = 256;
43module_param_named(sdma_descq_cnt, sdma_descq_cnt, ushort, S_IRUGO);
44MODULE_PARM_DESC(sdma_descq_cnt, "Number of SDMA descq entries");
45
46
47
48
49#define SDMA_DESC_LAST (1ULL << 11)
50#define SDMA_DESC_FIRST (1ULL << 12)
51#define SDMA_DESC_DMA_HEAD (1ULL << 13)
52#define SDMA_DESC_USE_LARGE_BUF (1ULL << 14)
53#define SDMA_DESC_INTR (1ULL << 15)
54#define SDMA_DESC_COUNT_LSB 16
55#define SDMA_DESC_GEN_LSB 30
56
57char *qib_sdma_state_names[] = {
58 [qib_sdma_state_s00_hw_down] = "s00_HwDown",
59 [qib_sdma_state_s10_hw_start_up_wait] = "s10_HwStartUpWait",
60 [qib_sdma_state_s20_idle] = "s20_Idle",
61 [qib_sdma_state_s30_sw_clean_up_wait] = "s30_SwCleanUpWait",
62 [qib_sdma_state_s40_hw_clean_up_wait] = "s40_HwCleanUpWait",
63 [qib_sdma_state_s50_hw_halt_wait] = "s50_HwHaltWait",
64 [qib_sdma_state_s99_running] = "s99_Running",
65};
66
67char *qib_sdma_event_names[] = {
68 [qib_sdma_event_e00_go_hw_down] = "e00_GoHwDown",
69 [qib_sdma_event_e10_go_hw_start] = "e10_GoHwStart",
70 [qib_sdma_event_e20_hw_started] = "e20_HwStarted",
71 [qib_sdma_event_e30_go_running] = "e30_GoRunning",
72 [qib_sdma_event_e40_sw_cleaned] = "e40_SwCleaned",
73 [qib_sdma_event_e50_hw_cleaned] = "e50_HwCleaned",
74 [qib_sdma_event_e60_hw_halted] = "e60_HwHalted",
75 [qib_sdma_event_e70_go_idle] = "e70_GoIdle",
76 [qib_sdma_event_e7220_err_halted] = "e7220_ErrHalted",
77 [qib_sdma_event_e7322_err_halted] = "e7322_ErrHalted",
78 [qib_sdma_event_e90_timer_tick] = "e90_TimerTick",
79};
80
81
82static int alloc_sdma(struct qib_pportdata *);
83static void sdma_complete(struct kref *);
84static void sdma_finalput(struct qib_sdma_state *);
85static void sdma_get(struct qib_sdma_state *);
86static void sdma_put(struct qib_sdma_state *);
87static void sdma_set_state(struct qib_pportdata *, enum qib_sdma_states);
88static void sdma_start_sw_clean_up(struct qib_pportdata *);
89static void sdma_sw_clean_up_task(unsigned long);
90static void unmap_desc(struct qib_pportdata *, unsigned);
91
92static void sdma_get(struct qib_sdma_state *ss)
93{
94 kref_get(&ss->kref);
95}
96
97static void sdma_complete(struct kref *kref)
98{
99 struct qib_sdma_state *ss =
100 container_of(kref, struct qib_sdma_state, kref);
101
102 complete(&ss->comp);
103}
104
105static void sdma_put(struct qib_sdma_state *ss)
106{
107 kref_put(&ss->kref, sdma_complete);
108}
109
110static void sdma_finalput(struct qib_sdma_state *ss)
111{
112 sdma_put(ss);
113 wait_for_completion(&ss->comp);
114}
115
116
117
118
119
120
121
122
123
124
125static void clear_sdma_activelist(struct qib_pportdata *ppd)
126{
127 struct qib_sdma_txreq *txp, *txp_next;
128
129 list_for_each_entry_safe(txp, txp_next, &ppd->sdma_activelist, list) {
130 list_del_init(&txp->list);
131 if (txp->flags & QIB_SDMA_TXREQ_F_FREEDESC) {
132 unsigned idx;
133
134 idx = txp->start_idx;
135 while (idx != txp->next_descq_idx) {
136 unmap_desc(ppd, idx);
137 if (++idx == ppd->sdma_descq_cnt)
138 idx = 0;
139 }
140 }
141 if (txp->callback)
142 (*txp->callback)(txp, QIB_SDMA_TXREQ_S_ABORTED);
143 }
144}
145
146static void sdma_sw_clean_up_task(unsigned long opaque)
147{
148 struct qib_pportdata *ppd = (struct qib_pportdata *) opaque;
149 unsigned long flags;
150
151 spin_lock_irqsave(&ppd->sdma_lock, flags);
152
153
154
155
156
157
158
159
160
161
162
163 qib_sdma_make_progress(ppd);
164
165 clear_sdma_activelist(ppd);
166
167
168
169
170
171 ppd->sdma_descq_removed = ppd->sdma_descq_added;
172
173
174
175
176
177
178 ppd->sdma_descq_tail = 0;
179 ppd->sdma_descq_head = 0;
180 ppd->sdma_head_dma[0] = 0;
181 ppd->sdma_generation = 0;
182
183 __qib_sdma_process_event(ppd, qib_sdma_event_e40_sw_cleaned);
184
185 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
186}
187
188
189
190
191
192
193static void sdma_hw_start_up(struct qib_pportdata *ppd)
194{
195 struct qib_sdma_state *ss = &ppd->sdma_state;
196 unsigned bufno;
197
198 for (bufno = ss->first_sendbuf; bufno < ss->last_sendbuf; ++bufno)
199 ppd->dd->f_sendctrl(ppd, QIB_SENDCTRL_DISARM_BUF(bufno));
200
201 ppd->dd->f_sdma_hw_start_up(ppd);
202}
203
204static void sdma_sw_tear_down(struct qib_pportdata *ppd)
205{
206 struct qib_sdma_state *ss = &ppd->sdma_state;
207
208
209 sdma_put(ss);
210}
211
212static void sdma_start_sw_clean_up(struct qib_pportdata *ppd)
213{
214 tasklet_hi_schedule(&ppd->sdma_sw_clean_up_task);
215}
216
217static void sdma_set_state(struct qib_pportdata *ppd,
218 enum qib_sdma_states next_state)
219{
220 struct qib_sdma_state *ss = &ppd->sdma_state;
221 struct sdma_set_state_action *action = ss->set_state_action;
222 unsigned op = 0;
223
224
225 ss->previous_state = ss->current_state;
226 ss->previous_op = ss->current_op;
227
228 ss->current_state = next_state;
229
230 if (action[next_state].op_enable)
231 op |= QIB_SDMA_SENDCTRL_OP_ENABLE;
232
233 if (action[next_state].op_intenable)
234 op |= QIB_SDMA_SENDCTRL_OP_INTENABLE;
235
236 if (action[next_state].op_halt)
237 op |= QIB_SDMA_SENDCTRL_OP_HALT;
238
239 if (action[next_state].op_drain)
240 op |= QIB_SDMA_SENDCTRL_OP_DRAIN;
241
242 if (action[next_state].go_s99_running_tofalse)
243 ss->go_s99_running = 0;
244
245 if (action[next_state].go_s99_running_totrue)
246 ss->go_s99_running = 1;
247
248 ss->current_op = op;
249
250 ppd->dd->f_sdma_sendctrl(ppd, ss->current_op);
251}
252
253static void unmap_desc(struct qib_pportdata *ppd, unsigned head)
254{
255 __le64 *descqp = &ppd->sdma_descq[head].qw[0];
256 u64 desc[2];
257 dma_addr_t addr;
258 size_t len;
259
260 desc[0] = le64_to_cpu(descqp[0]);
261 desc[1] = le64_to_cpu(descqp[1]);
262
263 addr = (desc[1] << 32) | (desc[0] >> 32);
264 len = (desc[0] >> 14) & (0x7ffULL << 2);
265 dma_unmap_single(&ppd->dd->pcidev->dev, addr, len, DMA_TO_DEVICE);
266}
267
268static int alloc_sdma(struct qib_pportdata *ppd)
269{
270 ppd->sdma_descq_cnt = sdma_descq_cnt;
271 if (!ppd->sdma_descq_cnt)
272 ppd->sdma_descq_cnt = 256;
273
274
275 ppd->sdma_descq = dma_alloc_coherent(&ppd->dd->pcidev->dev,
276 ppd->sdma_descq_cnt * sizeof(u64[2]), &ppd->sdma_descq_phys,
277 GFP_KERNEL);
278
279 if (!ppd->sdma_descq) {
280 qib_dev_err(ppd->dd,
281 "failed to allocate SendDMA descriptor FIFO memory\n");
282 goto bail;
283 }
284
285
286 ppd->sdma_head_dma = dma_alloc_coherent(&ppd->dd->pcidev->dev,
287 PAGE_SIZE, &ppd->sdma_head_phys, GFP_KERNEL);
288 if (!ppd->sdma_head_dma) {
289 qib_dev_err(ppd->dd,
290 "failed to allocate SendDMA head memory\n");
291 goto cleanup_descq;
292 }
293 ppd->sdma_head_dma[0] = 0;
294 return 0;
295
296cleanup_descq:
297 dma_free_coherent(&ppd->dd->pcidev->dev,
298 ppd->sdma_descq_cnt * sizeof(u64[2]), (void *)ppd->sdma_descq,
299 ppd->sdma_descq_phys);
300 ppd->sdma_descq = NULL;
301 ppd->sdma_descq_phys = 0;
302bail:
303 ppd->sdma_descq_cnt = 0;
304 return -ENOMEM;
305}
306
307static void free_sdma(struct qib_pportdata *ppd)
308{
309 struct qib_devdata *dd = ppd->dd;
310
311 if (ppd->sdma_head_dma) {
312 dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
313 (void *)ppd->sdma_head_dma,
314 ppd->sdma_head_phys);
315 ppd->sdma_head_dma = NULL;
316 ppd->sdma_head_phys = 0;
317 }
318
319 if (ppd->sdma_descq) {
320 dma_free_coherent(&dd->pcidev->dev,
321 ppd->sdma_descq_cnt * sizeof(u64[2]),
322 ppd->sdma_descq, ppd->sdma_descq_phys);
323 ppd->sdma_descq = NULL;
324 ppd->sdma_descq_phys = 0;
325 }
326}
327
328static inline void make_sdma_desc(struct qib_pportdata *ppd,
329 u64 *sdmadesc, u64 addr, u64 dwlen,
330 u64 dwoffset)
331{
332
333 WARN_ON(addr & 3);
334
335 sdmadesc[1] = addr >> 32;
336
337 sdmadesc[0] = (addr & 0xfffffffcULL) << 32;
338
339 sdmadesc[0] |= (ppd->sdma_generation & 3ULL) <<
340 SDMA_DESC_GEN_LSB;
341
342 sdmadesc[0] |= (dwlen & 0x7ffULL) << SDMA_DESC_COUNT_LSB;
343
344 sdmadesc[0] |= dwoffset & 0x7ffULL;
345}
346
347
348int qib_sdma_make_progress(struct qib_pportdata *ppd)
349{
350 struct list_head *lp = NULL;
351 struct qib_sdma_txreq *txp = NULL;
352 struct qib_devdata *dd = ppd->dd;
353 int progress = 0;
354 u16 hwhead;
355 u16 idx = 0;
356
357 hwhead = dd->f_sdma_gethead(ppd);
358
359
360
361
362
363
364
365 if (!list_empty(&ppd->sdma_activelist)) {
366 lp = ppd->sdma_activelist.next;
367 txp = list_entry(lp, struct qib_sdma_txreq, list);
368 idx = txp->start_idx;
369 }
370
371 while (ppd->sdma_descq_head != hwhead) {
372
373 if (txp && (txp->flags & QIB_SDMA_TXREQ_F_FREEDESC) &&
374 (idx == ppd->sdma_descq_head)) {
375 unmap_desc(ppd, ppd->sdma_descq_head);
376 if (++idx == ppd->sdma_descq_cnt)
377 idx = 0;
378 }
379
380
381 ppd->sdma_descq_removed++;
382
383
384 if (++ppd->sdma_descq_head == ppd->sdma_descq_cnt)
385 ppd->sdma_descq_head = 0;
386
387
388 if (txp && txp->next_descq_idx == ppd->sdma_descq_head) {
389
390 list_del_init(&txp->list);
391 if (txp->callback)
392 (*txp->callback)(txp, QIB_SDMA_TXREQ_S_OK);
393
394 if (list_empty(&ppd->sdma_activelist))
395 txp = NULL;
396 else {
397 lp = ppd->sdma_activelist.next;
398 txp = list_entry(lp, struct qib_sdma_txreq,
399 list);
400 idx = txp->start_idx;
401 }
402 }
403 progress = 1;
404 }
405 if (progress)
406 qib_verbs_sdma_desc_avail(ppd, qib_sdma_descq_freecnt(ppd));
407 return progress;
408}
409
410
411
412
413void qib_sdma_intr(struct qib_pportdata *ppd)
414{
415 unsigned long flags;
416
417 spin_lock_irqsave(&ppd->sdma_lock, flags);
418
419 __qib_sdma_intr(ppd);
420
421 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
422}
423
424void __qib_sdma_intr(struct qib_pportdata *ppd)
425{
426 if (__qib_sdma_running(ppd))
427 qib_sdma_make_progress(ppd);
428}
429
430int qib_setup_sdma(struct qib_pportdata *ppd)
431{
432 struct qib_devdata *dd = ppd->dd;
433 unsigned long flags;
434 int ret = 0;
435
436 ret = alloc_sdma(ppd);
437 if (ret)
438 goto bail;
439
440
441 ppd->dd->f_sdma_init_early(ppd);
442 spin_lock_irqsave(&ppd->sdma_lock, flags);
443 sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
444 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
445
446
447 kref_init(&ppd->sdma_state.kref);
448 init_completion(&ppd->sdma_state.comp);
449
450 ppd->sdma_generation = 0;
451 ppd->sdma_descq_head = 0;
452 ppd->sdma_descq_removed = 0;
453 ppd->sdma_descq_added = 0;
454
455 INIT_LIST_HEAD(&ppd->sdma_activelist);
456
457 tasklet_init(&ppd->sdma_sw_clean_up_task, sdma_sw_clean_up_task,
458 (unsigned long)ppd);
459
460 ret = dd->f_init_sdma_regs(ppd);
461 if (ret)
462 goto bail_alloc;
463
464 qib_sdma_process_event(ppd, qib_sdma_event_e10_go_hw_start);
465
466 return 0;
467
468bail_alloc:
469 qib_teardown_sdma(ppd);
470bail:
471 return ret;
472}
473
474void qib_teardown_sdma(struct qib_pportdata *ppd)
475{
476 qib_sdma_process_event(ppd, qib_sdma_event_e00_go_hw_down);
477
478
479
480
481
482
483 sdma_finalput(&ppd->sdma_state);
484
485 free_sdma(ppd);
486}
487
488int qib_sdma_running(struct qib_pportdata *ppd)
489{
490 unsigned long flags;
491 int ret;
492
493 spin_lock_irqsave(&ppd->sdma_lock, flags);
494 ret = __qib_sdma_running(ppd);
495 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
496
497 return ret;
498}
499
500
501
502
503
504
505
506
507static void complete_sdma_err_req(struct qib_pportdata *ppd,
508 struct qib_verbs_txreq *tx)
509{
510 atomic_inc(&tx->qp->s_dma_busy);
511
512 tx->txreq.start_idx = 0;
513 tx->txreq.next_descq_idx = 0;
514 list_add_tail(&tx->txreq.list, &ppd->sdma_activelist);
515 clear_sdma_activelist(ppd);
516}
517
518
519
520
521
522
523
524
525
526
527int qib_sdma_verbs_send(struct qib_pportdata *ppd,
528 struct qib_sge_state *ss, u32 dwords,
529 struct qib_verbs_txreq *tx)
530{
531 unsigned long flags;
532 struct qib_sge *sge;
533 struct qib_qp *qp;
534 int ret = 0;
535 u16 tail;
536 __le64 *descqp;
537 u64 sdmadesc[2];
538 u32 dwoffset;
539 dma_addr_t addr;
540
541 spin_lock_irqsave(&ppd->sdma_lock, flags);
542
543retry:
544 if (unlikely(!__qib_sdma_running(ppd))) {
545 complete_sdma_err_req(ppd, tx);
546 goto unlock;
547 }
548
549 if (tx->txreq.sg_count > qib_sdma_descq_freecnt(ppd)) {
550 if (qib_sdma_make_progress(ppd))
551 goto retry;
552 if (ppd->dd->flags & QIB_HAS_SDMA_TIMEOUT)
553 ppd->dd->f_sdma_set_desc_cnt(ppd,
554 ppd->sdma_descq_cnt / 2);
555 goto busy;
556 }
557
558 dwoffset = tx->hdr_dwords;
559 make_sdma_desc(ppd, sdmadesc, (u64) tx->txreq.addr, dwoffset, 0);
560
561 sdmadesc[0] |= SDMA_DESC_FIRST;
562 if (tx->txreq.flags & QIB_SDMA_TXREQ_F_USELARGEBUF)
563 sdmadesc[0] |= SDMA_DESC_USE_LARGE_BUF;
564
565
566 tail = ppd->sdma_descq_tail;
567 descqp = &ppd->sdma_descq[tail].qw[0];
568 *descqp++ = cpu_to_le64(sdmadesc[0]);
569 *descqp++ = cpu_to_le64(sdmadesc[1]);
570
571
572 if (++tail == ppd->sdma_descq_cnt) {
573 tail = 0;
574 descqp = &ppd->sdma_descq[0].qw[0];
575 ++ppd->sdma_generation;
576 }
577
578 tx->txreq.start_idx = tail;
579
580 sge = &ss->sge;
581 while (dwords) {
582 u32 dw;
583 u32 len;
584
585 len = dwords << 2;
586 if (len > sge->length)
587 len = sge->length;
588 if (len > sge->sge_length)
589 len = sge->sge_length;
590 BUG_ON(len == 0);
591 dw = (len + 3) >> 2;
592 addr = dma_map_single(&ppd->dd->pcidev->dev, sge->vaddr,
593 dw << 2, DMA_TO_DEVICE);
594 if (dma_mapping_error(&ppd->dd->pcidev->dev, addr))
595 goto unmap;
596 sdmadesc[0] = 0;
597 make_sdma_desc(ppd, sdmadesc, (u64) addr, dw, dwoffset);
598
599 if (tx->txreq.flags & QIB_SDMA_TXREQ_F_USELARGEBUF)
600 sdmadesc[0] |= SDMA_DESC_USE_LARGE_BUF;
601
602 *descqp++ = cpu_to_le64(sdmadesc[0]);
603 *descqp++ = cpu_to_le64(sdmadesc[1]);
604
605
606 if (++tail == ppd->sdma_descq_cnt) {
607 tail = 0;
608 descqp = &ppd->sdma_descq[0].qw[0];
609 ++ppd->sdma_generation;
610 }
611 sge->vaddr += len;
612 sge->length -= len;
613 sge->sge_length -= len;
614 if (sge->sge_length == 0) {
615 if (--ss->num_sge)
616 *sge = *ss->sg_list++;
617 } else if (sge->length == 0 && sge->mr->lkey) {
618 if (++sge->n >= QIB_SEGSZ) {
619 if (++sge->m >= sge->mr->mapsz)
620 break;
621 sge->n = 0;
622 }
623 sge->vaddr =
624 sge->mr->map[sge->m]->segs[sge->n].vaddr;
625 sge->length =
626 sge->mr->map[sge->m]->segs[sge->n].length;
627 }
628
629 dwoffset += dw;
630 dwords -= dw;
631 }
632
633 if (!tail)
634 descqp = &ppd->sdma_descq[ppd->sdma_descq_cnt].qw[0];
635 descqp -= 2;
636 descqp[0] |= cpu_to_le64(SDMA_DESC_LAST);
637 if (tx->txreq.flags & QIB_SDMA_TXREQ_F_HEADTOHOST)
638 descqp[0] |= cpu_to_le64(SDMA_DESC_DMA_HEAD);
639 if (tx->txreq.flags & QIB_SDMA_TXREQ_F_INTREQ)
640 descqp[0] |= cpu_to_le64(SDMA_DESC_INTR);
641
642 atomic_inc(&tx->qp->s_dma_busy);
643 tx->txreq.next_descq_idx = tail;
644 ppd->dd->f_sdma_update_tail(ppd, tail);
645 ppd->sdma_descq_added += tx->txreq.sg_count;
646 list_add_tail(&tx->txreq.list, &ppd->sdma_activelist);
647 goto unlock;
648
649unmap:
650 for (;;) {
651 if (!tail)
652 tail = ppd->sdma_descq_cnt - 1;
653 else
654 tail--;
655 if (tail == ppd->sdma_descq_tail)
656 break;
657 unmap_desc(ppd, tail);
658 }
659 qp = tx->qp;
660 qib_put_txreq(tx);
661 spin_lock(&qp->r_lock);
662 spin_lock(&qp->s_lock);
663 if (qp->ibqp.qp_type == IB_QPT_RC) {
664
665 if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)
666 qib_error_qp(qp, IB_WC_GENERAL_ERR);
667 } else if (qp->s_wqe)
668 qib_send_complete(qp, qp->s_wqe, IB_WC_GENERAL_ERR);
669 spin_unlock(&qp->s_lock);
670 spin_unlock(&qp->r_lock);
671
672 goto unlock;
673
674busy:
675 qp = tx->qp;
676 spin_lock(&qp->s_lock);
677 if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) {
678 struct qib_ibdev *dev;
679
680
681
682
683
684
685 tx->ss = ss;
686 tx->dwords = dwords;
687 qp->s_tx = tx;
688 dev = &ppd->dd->verbs_dev;
689 spin_lock(&dev->pending_lock);
690 if (list_empty(&qp->iowait)) {
691 struct qib_ibport *ibp;
692
693 ibp = &ppd->ibport_data;
694 ibp->n_dmawait++;
695 qp->s_flags |= QIB_S_WAIT_DMA_DESC;
696 list_add_tail(&qp->iowait, &dev->dmawait);
697 }
698 spin_unlock(&dev->pending_lock);
699 qp->s_flags &= ~QIB_S_BUSY;
700 spin_unlock(&qp->s_lock);
701 ret = -EBUSY;
702 } else {
703 spin_unlock(&qp->s_lock);
704 qib_put_txreq(tx);
705 }
706unlock:
707 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
708 return ret;
709}
710
711void qib_sdma_process_event(struct qib_pportdata *ppd,
712 enum qib_sdma_events event)
713{
714 unsigned long flags;
715
716 spin_lock_irqsave(&ppd->sdma_lock, flags);
717
718 __qib_sdma_process_event(ppd, event);
719
720 if (ppd->sdma_state.current_state == qib_sdma_state_s99_running)
721 qib_verbs_sdma_desc_avail(ppd, qib_sdma_descq_freecnt(ppd));
722
723 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
724}
725
726void __qib_sdma_process_event(struct qib_pportdata *ppd,
727 enum qib_sdma_events event)
728{
729 struct qib_sdma_state *ss = &ppd->sdma_state;
730
731 switch (ss->current_state) {
732 case qib_sdma_state_s00_hw_down:
733 switch (event) {
734 case qib_sdma_event_e00_go_hw_down:
735 break;
736 case qib_sdma_event_e30_go_running:
737
738
739
740
741
742
743 ss->go_s99_running = 1;
744
745 case qib_sdma_event_e10_go_hw_start:
746
747 sdma_get(&ppd->sdma_state);
748 sdma_set_state(ppd,
749 qib_sdma_state_s10_hw_start_up_wait);
750 break;
751 case qib_sdma_event_e20_hw_started:
752 break;
753 case qib_sdma_event_e40_sw_cleaned:
754 sdma_sw_tear_down(ppd);
755 break;
756 case qib_sdma_event_e50_hw_cleaned:
757 break;
758 case qib_sdma_event_e60_hw_halted:
759 break;
760 case qib_sdma_event_e70_go_idle:
761 break;
762 case qib_sdma_event_e7220_err_halted:
763 break;
764 case qib_sdma_event_e7322_err_halted:
765 break;
766 case qib_sdma_event_e90_timer_tick:
767 break;
768 }
769 break;
770
771 case qib_sdma_state_s10_hw_start_up_wait:
772 switch (event) {
773 case qib_sdma_event_e00_go_hw_down:
774 sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
775 sdma_sw_tear_down(ppd);
776 break;
777 case qib_sdma_event_e10_go_hw_start:
778 break;
779 case qib_sdma_event_e20_hw_started:
780 sdma_set_state(ppd, ss->go_s99_running ?
781 qib_sdma_state_s99_running :
782 qib_sdma_state_s20_idle);
783 break;
784 case qib_sdma_event_e30_go_running:
785 ss->go_s99_running = 1;
786 break;
787 case qib_sdma_event_e40_sw_cleaned:
788 break;
789 case qib_sdma_event_e50_hw_cleaned:
790 break;
791 case qib_sdma_event_e60_hw_halted:
792 break;
793 case qib_sdma_event_e70_go_idle:
794 ss->go_s99_running = 0;
795 break;
796 case qib_sdma_event_e7220_err_halted:
797 break;
798 case qib_sdma_event_e7322_err_halted:
799 break;
800 case qib_sdma_event_e90_timer_tick:
801 break;
802 }
803 break;
804
805 case qib_sdma_state_s20_idle:
806 switch (event) {
807 case qib_sdma_event_e00_go_hw_down:
808 sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
809 sdma_sw_tear_down(ppd);
810 break;
811 case qib_sdma_event_e10_go_hw_start:
812 break;
813 case qib_sdma_event_e20_hw_started:
814 break;
815 case qib_sdma_event_e30_go_running:
816 sdma_set_state(ppd, qib_sdma_state_s99_running);
817 ss->go_s99_running = 1;
818 break;
819 case qib_sdma_event_e40_sw_cleaned:
820 break;
821 case qib_sdma_event_e50_hw_cleaned:
822 break;
823 case qib_sdma_event_e60_hw_halted:
824 break;
825 case qib_sdma_event_e70_go_idle:
826 break;
827 case qib_sdma_event_e7220_err_halted:
828 break;
829 case qib_sdma_event_e7322_err_halted:
830 break;
831 case qib_sdma_event_e90_timer_tick:
832 break;
833 }
834 break;
835
836 case qib_sdma_state_s30_sw_clean_up_wait:
837 switch (event) {
838 case qib_sdma_event_e00_go_hw_down:
839 sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
840 break;
841 case qib_sdma_event_e10_go_hw_start:
842 break;
843 case qib_sdma_event_e20_hw_started:
844 break;
845 case qib_sdma_event_e30_go_running:
846 ss->go_s99_running = 1;
847 break;
848 case qib_sdma_event_e40_sw_cleaned:
849 sdma_set_state(ppd,
850 qib_sdma_state_s10_hw_start_up_wait);
851 sdma_hw_start_up(ppd);
852 break;
853 case qib_sdma_event_e50_hw_cleaned:
854 break;
855 case qib_sdma_event_e60_hw_halted:
856 break;
857 case qib_sdma_event_e70_go_idle:
858 ss->go_s99_running = 0;
859 break;
860 case qib_sdma_event_e7220_err_halted:
861 break;
862 case qib_sdma_event_e7322_err_halted:
863 break;
864 case qib_sdma_event_e90_timer_tick:
865 break;
866 }
867 break;
868
869 case qib_sdma_state_s40_hw_clean_up_wait:
870 switch (event) {
871 case qib_sdma_event_e00_go_hw_down:
872 sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
873 sdma_start_sw_clean_up(ppd);
874 break;
875 case qib_sdma_event_e10_go_hw_start:
876 break;
877 case qib_sdma_event_e20_hw_started:
878 break;
879 case qib_sdma_event_e30_go_running:
880 ss->go_s99_running = 1;
881 break;
882 case qib_sdma_event_e40_sw_cleaned:
883 break;
884 case qib_sdma_event_e50_hw_cleaned:
885 sdma_set_state(ppd,
886 qib_sdma_state_s30_sw_clean_up_wait);
887 sdma_start_sw_clean_up(ppd);
888 break;
889 case qib_sdma_event_e60_hw_halted:
890 break;
891 case qib_sdma_event_e70_go_idle:
892 ss->go_s99_running = 0;
893 break;
894 case qib_sdma_event_e7220_err_halted:
895 break;
896 case qib_sdma_event_e7322_err_halted:
897 break;
898 case qib_sdma_event_e90_timer_tick:
899 break;
900 }
901 break;
902
903 case qib_sdma_state_s50_hw_halt_wait:
904 switch (event) {
905 case qib_sdma_event_e00_go_hw_down:
906 sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
907 sdma_start_sw_clean_up(ppd);
908 break;
909 case qib_sdma_event_e10_go_hw_start:
910 break;
911 case qib_sdma_event_e20_hw_started:
912 break;
913 case qib_sdma_event_e30_go_running:
914 ss->go_s99_running = 1;
915 break;
916 case qib_sdma_event_e40_sw_cleaned:
917 break;
918 case qib_sdma_event_e50_hw_cleaned:
919 break;
920 case qib_sdma_event_e60_hw_halted:
921 sdma_set_state(ppd,
922 qib_sdma_state_s40_hw_clean_up_wait);
923 ppd->dd->f_sdma_hw_clean_up(ppd);
924 break;
925 case qib_sdma_event_e70_go_idle:
926 ss->go_s99_running = 0;
927 break;
928 case qib_sdma_event_e7220_err_halted:
929 break;
930 case qib_sdma_event_e7322_err_halted:
931 break;
932 case qib_sdma_event_e90_timer_tick:
933 break;
934 }
935 break;
936
937 case qib_sdma_state_s99_running:
938 switch (event) {
939 case qib_sdma_event_e00_go_hw_down:
940 sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
941 sdma_start_sw_clean_up(ppd);
942 break;
943 case qib_sdma_event_e10_go_hw_start:
944 break;
945 case qib_sdma_event_e20_hw_started:
946 break;
947 case qib_sdma_event_e30_go_running:
948 break;
949 case qib_sdma_event_e40_sw_cleaned:
950 break;
951 case qib_sdma_event_e50_hw_cleaned:
952 break;
953 case qib_sdma_event_e60_hw_halted:
954 sdma_set_state(ppd,
955 qib_sdma_state_s30_sw_clean_up_wait);
956 sdma_start_sw_clean_up(ppd);
957 break;
958 case qib_sdma_event_e70_go_idle:
959 sdma_set_state(ppd, qib_sdma_state_s50_hw_halt_wait);
960 ss->go_s99_running = 0;
961 break;
962 case qib_sdma_event_e7220_err_halted:
963 sdma_set_state(ppd,
964 qib_sdma_state_s30_sw_clean_up_wait);
965 sdma_start_sw_clean_up(ppd);
966 break;
967 case qib_sdma_event_e7322_err_halted:
968 sdma_set_state(ppd, qib_sdma_state_s50_hw_halt_wait);
969 break;
970 case qib_sdma_event_e90_timer_tick:
971 break;
972 }
973 break;
974 }
975
976 ss->last_event = event;
977}
978