1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39#include "qemu/osdep.h"
40#include "hw/hw.h"
41#include "hw/isa/isa.h"
42#include "hw/ppc/mac_dbdma.h"
43#include "qemu/main-loop.h"
44#include "qemu/log.h"
45
46
47
48
49#ifdef DEBUG_DBDMA
50#define DBDMA_DPRINTF(fmt, ...) \
51 do { printf("DBDMA: " fmt , ## __VA_ARGS__); } while (0)
52#else
53#define DBDMA_DPRINTF(fmt, ...)
54#endif
55
56
57
58
59static DBDMAState *dbdma_from_ch(DBDMA_channel *ch)
60{
61 return container_of(ch, DBDMAState, channels[ch->channel]);
62}
63
64#ifdef DEBUG_DBDMA
65static void dump_dbdma_cmd(dbdma_cmd *cmd)
66{
67 printf("dbdma_cmd %p\n", cmd);
68 printf(" req_count 0x%04x\n", le16_to_cpu(cmd->req_count));
69 printf(" command 0x%04x\n", le16_to_cpu(cmd->command));
70 printf(" phy_addr 0x%08x\n", le32_to_cpu(cmd->phy_addr));
71 printf(" cmd_dep 0x%08x\n", le32_to_cpu(cmd->cmd_dep));
72 printf(" res_count 0x%04x\n", le16_to_cpu(cmd->res_count));
73 printf(" xfer_status 0x%04x\n", le16_to_cpu(cmd->xfer_status));
74}
75#else
76static void dump_dbdma_cmd(dbdma_cmd *cmd)
77{
78}
79#endif
80static void dbdma_cmdptr_load(DBDMA_channel *ch)
81{
82 DBDMA_DPRINTF("dbdma_cmdptr_load 0x%08x\n",
83 ch->regs[DBDMA_CMDPTR_LO]);
84 cpu_physical_memory_read(ch->regs[DBDMA_CMDPTR_LO],
85 &ch->current, sizeof(dbdma_cmd));
86}
87
88static void dbdma_cmdptr_save(DBDMA_channel *ch)
89{
90 DBDMA_DPRINTF("dbdma_cmdptr_save 0x%08x\n",
91 ch->regs[DBDMA_CMDPTR_LO]);
92 DBDMA_DPRINTF("xfer_status 0x%08x res_count 0x%04x\n",
93 le16_to_cpu(ch->current.xfer_status),
94 le16_to_cpu(ch->current.res_count));
95 cpu_physical_memory_write(ch->regs[DBDMA_CMDPTR_LO],
96 &ch->current, sizeof(dbdma_cmd));
97}
98
99static void kill_channel(DBDMA_channel *ch)
100{
101 DBDMA_DPRINTF("kill_channel\n");
102
103 ch->regs[DBDMA_STATUS] |= DEAD;
104 ch->regs[DBDMA_STATUS] &= ~ACTIVE;
105
106 qemu_irq_raise(ch->irq);
107}
108
109static void conditional_interrupt(DBDMA_channel *ch)
110{
111 dbdma_cmd *current = &ch->current;
112 uint16_t intr;
113 uint16_t sel_mask, sel_value;
114 uint32_t status;
115 int cond;
116
117 DBDMA_DPRINTF("%s\n", __func__);
118
119 intr = le16_to_cpu(current->command) & INTR_MASK;
120
121 switch(intr) {
122 case INTR_NEVER:
123 return;
124 case INTR_ALWAYS:
125 qemu_irq_raise(ch->irq);
126 DBDMA_DPRINTF("%s: raise\n", __func__);
127 return;
128 }
129
130 status = ch->regs[DBDMA_STATUS] & DEVSTAT;
131
132 sel_mask = (ch->regs[DBDMA_INTR_SEL] >> 16) & 0x0f;
133 sel_value = ch->regs[DBDMA_INTR_SEL] & 0x0f;
134
135 cond = (status & sel_mask) == (sel_value & sel_mask);
136
137 switch(intr) {
138 case INTR_IFSET:
139 if (cond) {
140 qemu_irq_raise(ch->irq);
141 DBDMA_DPRINTF("%s: raise\n", __func__);
142 }
143 return;
144 case INTR_IFCLR:
145 if (!cond) {
146 qemu_irq_raise(ch->irq);
147 DBDMA_DPRINTF("%s: raise\n", __func__);
148 }
149 return;
150 }
151}
152
153static int conditional_wait(DBDMA_channel *ch)
154{
155 dbdma_cmd *current = &ch->current;
156 uint16_t wait;
157 uint16_t sel_mask, sel_value;
158 uint32_t status;
159 int cond;
160
161 DBDMA_DPRINTF("conditional_wait\n");
162
163 wait = le16_to_cpu(current->command) & WAIT_MASK;
164
165 switch(wait) {
166 case WAIT_NEVER:
167 return 0;
168 case WAIT_ALWAYS:
169 return 1;
170 }
171
172 status = ch->regs[DBDMA_STATUS] & DEVSTAT;
173
174 sel_mask = (ch->regs[DBDMA_WAIT_SEL] >> 16) & 0x0f;
175 sel_value = ch->regs[DBDMA_WAIT_SEL] & 0x0f;
176
177 cond = (status & sel_mask) == (sel_value & sel_mask);
178
179 switch(wait) {
180 case WAIT_IFSET:
181 if (cond)
182 return 1;
183 return 0;
184 case WAIT_IFCLR:
185 if (!cond)
186 return 1;
187 return 0;
188 }
189 return 0;
190}
191
192static void next(DBDMA_channel *ch)
193{
194 uint32_t cp;
195
196 ch->regs[DBDMA_STATUS] &= ~BT;
197
198 cp = ch->regs[DBDMA_CMDPTR_LO];
199 ch->regs[DBDMA_CMDPTR_LO] = cp + sizeof(dbdma_cmd);
200 dbdma_cmdptr_load(ch);
201}
202
203static void branch(DBDMA_channel *ch)
204{
205 dbdma_cmd *current = &ch->current;
206
207 ch->regs[DBDMA_CMDPTR_LO] = current->cmd_dep;
208 ch->regs[DBDMA_STATUS] |= BT;
209 dbdma_cmdptr_load(ch);
210}
211
212static void conditional_branch(DBDMA_channel *ch)
213{
214 dbdma_cmd *current = &ch->current;
215 uint16_t br;
216 uint16_t sel_mask, sel_value;
217 uint32_t status;
218 int cond;
219
220 DBDMA_DPRINTF("conditional_branch\n");
221
222
223
224 br = le16_to_cpu(current->command) & BR_MASK;
225
226 switch(br) {
227 case BR_NEVER:
228 next(ch);
229 return;
230 case BR_ALWAYS:
231 branch(ch);
232 return;
233 }
234
235 status = ch->regs[DBDMA_STATUS] & DEVSTAT;
236
237 sel_mask = (ch->regs[DBDMA_BRANCH_SEL] >> 16) & 0x0f;
238 sel_value = ch->regs[DBDMA_BRANCH_SEL] & 0x0f;
239
240 cond = (status & sel_mask) == (sel_value & sel_mask);
241
242 switch(br) {
243 case BR_IFSET:
244 if (cond)
245 branch(ch);
246 else
247 next(ch);
248 return;
249 case BR_IFCLR:
250 if (!cond)
251 branch(ch);
252 else
253 next(ch);
254 return;
255 }
256}
257
258static void channel_run(DBDMA_channel *ch);
259
260static void dbdma_end(DBDMA_io *io)
261{
262 DBDMA_channel *ch = io->channel;
263 dbdma_cmd *current = &ch->current;
264
265 DBDMA_DPRINTF("%s\n", __func__);
266
267 if (conditional_wait(ch))
268 goto wait;
269
270 current->xfer_status = cpu_to_le16(ch->regs[DBDMA_STATUS]);
271 current->res_count = cpu_to_le16(io->len);
272 dbdma_cmdptr_save(ch);
273 if (io->is_last)
274 ch->regs[DBDMA_STATUS] &= ~FLUSH;
275
276 conditional_interrupt(ch);
277 conditional_branch(ch);
278
279wait:
280
281 ch->io.processing = false;
282
283 if ((ch->regs[DBDMA_STATUS] & RUN) &&
284 (ch->regs[DBDMA_STATUS] & ACTIVE))
285 channel_run(ch);
286}
287
288static void start_output(DBDMA_channel *ch, int key, uint32_t addr,
289 uint16_t req_count, int is_last)
290{
291 DBDMA_DPRINTF("start_output\n");
292
293
294
295
296
297 DBDMA_DPRINTF("addr 0x%x key 0x%x\n", addr, key);
298 if (!addr || key > KEY_STREAM3) {
299 kill_channel(ch);
300 return;
301 }
302
303 ch->io.addr = addr;
304 ch->io.len = req_count;
305 ch->io.is_last = is_last;
306 ch->io.dma_end = dbdma_end;
307 ch->io.is_dma_out = 1;
308 ch->io.processing = true;
309 if (ch->rw) {
310 ch->rw(&ch->io);
311 }
312}
313
314static void start_input(DBDMA_channel *ch, int key, uint32_t addr,
315 uint16_t req_count, int is_last)
316{
317 DBDMA_DPRINTF("start_input\n");
318
319
320
321
322
323 DBDMA_DPRINTF("addr 0x%x key 0x%x\n", addr, key);
324 if (!addr || key > KEY_STREAM3) {
325 kill_channel(ch);
326 return;
327 }
328
329 ch->io.addr = addr;
330 ch->io.len = req_count;
331 ch->io.is_last = is_last;
332 ch->io.dma_end = dbdma_end;
333 ch->io.is_dma_out = 0;
334 ch->io.processing = true;
335 if (ch->rw) {
336 ch->rw(&ch->io);
337 }
338}
339
340static void load_word(DBDMA_channel *ch, int key, uint32_t addr,
341 uint16_t len)
342{
343 dbdma_cmd *current = &ch->current;
344 uint32_t val;
345
346 DBDMA_DPRINTF("load_word\n");
347
348
349
350 if (key != KEY_SYSTEM) {
351 printf("DBDMA: LOAD_WORD, unimplemented key %x\n", key);
352 kill_channel(ch);
353 return;
354 }
355
356 cpu_physical_memory_read(addr, &val, len);
357
358 if (len == 2)
359 val = (val << 16) | (current->cmd_dep & 0x0000ffff);
360 else if (len == 1)
361 val = (val << 24) | (current->cmd_dep & 0x00ffffff);
362
363 current->cmd_dep = val;
364
365 if (conditional_wait(ch))
366 goto wait;
367
368 current->xfer_status = cpu_to_le16(ch->regs[DBDMA_STATUS]);
369 dbdma_cmdptr_save(ch);
370 ch->regs[DBDMA_STATUS] &= ~FLUSH;
371
372 conditional_interrupt(ch);
373 next(ch);
374
375wait:
376 DBDMA_kick(dbdma_from_ch(ch));
377}
378
379static void store_word(DBDMA_channel *ch, int key, uint32_t addr,
380 uint16_t len)
381{
382 dbdma_cmd *current = &ch->current;
383 uint32_t val;
384
385 DBDMA_DPRINTF("store_word\n");
386
387
388
389 if (key != KEY_SYSTEM) {
390 printf("DBDMA: STORE_WORD, unimplemented key %x\n", key);
391 kill_channel(ch);
392 return;
393 }
394
395 val = current->cmd_dep;
396 if (len == 2)
397 val >>= 16;
398 else if (len == 1)
399 val >>= 24;
400
401 cpu_physical_memory_write(addr, &val, len);
402
403 if (conditional_wait(ch))
404 goto wait;
405
406 current->xfer_status = cpu_to_le16(ch->regs[DBDMA_STATUS]);
407 dbdma_cmdptr_save(ch);
408 ch->regs[DBDMA_STATUS] &= ~FLUSH;
409
410 conditional_interrupt(ch);
411 next(ch);
412
413wait:
414 DBDMA_kick(dbdma_from_ch(ch));
415}
416
417static void nop(DBDMA_channel *ch)
418{
419 dbdma_cmd *current = &ch->current;
420
421 if (conditional_wait(ch))
422 goto wait;
423
424 current->xfer_status = cpu_to_le16(ch->regs[DBDMA_STATUS]);
425 dbdma_cmdptr_save(ch);
426
427 conditional_interrupt(ch);
428 conditional_branch(ch);
429
430wait:
431 DBDMA_kick(dbdma_from_ch(ch));
432}
433
434static void stop(DBDMA_channel *ch)
435{
436 ch->regs[DBDMA_STATUS] &= ~(ACTIVE|DEAD|FLUSH);
437
438
439}
440
441static void channel_run(DBDMA_channel *ch)
442{
443 dbdma_cmd *current = &ch->current;
444 uint16_t cmd, key;
445 uint16_t req_count;
446 uint32_t phy_addr;
447
448 DBDMA_DPRINTF("channel_run\n");
449 dump_dbdma_cmd(current);
450
451
452
453 ch->regs[DBDMA_STATUS] &= ~WAKE;
454
455 cmd = le16_to_cpu(current->command) & COMMAND_MASK;
456
457 switch (cmd) {
458 case DBDMA_NOP:
459 nop(ch);
460 return;
461
462 case DBDMA_STOP:
463 stop(ch);
464 return;
465 }
466
467 key = le16_to_cpu(current->command) & 0x0700;
468 req_count = le16_to_cpu(current->req_count);
469 phy_addr = le32_to_cpu(current->phy_addr);
470
471 if (key == KEY_STREAM4) {
472 printf("command %x, invalid key 4\n", cmd);
473 kill_channel(ch);
474 return;
475 }
476
477 switch (cmd) {
478 case OUTPUT_MORE:
479 start_output(ch, key, phy_addr, req_count, 0);
480 return;
481
482 case OUTPUT_LAST:
483 start_output(ch, key, phy_addr, req_count, 1);
484 return;
485
486 case INPUT_MORE:
487 start_input(ch, key, phy_addr, req_count, 0);
488 return;
489
490 case INPUT_LAST:
491 start_input(ch, key, phy_addr, req_count, 1);
492 return;
493 }
494
495 if (key < KEY_REGS) {
496 printf("command %x, invalid key %x\n", cmd, key);
497 key = KEY_SYSTEM;
498 }
499
500
501
502
503
504 req_count = req_count & 0x0007;
505 if (req_count & 0x4) {
506 req_count = 4;
507 phy_addr &= ~3;
508 } else if (req_count & 0x2) {
509 req_count = 2;
510 phy_addr &= ~1;
511 } else
512 req_count = 1;
513
514 switch (cmd) {
515 case LOAD_WORD:
516 load_word(ch, key, phy_addr, req_count);
517 return;
518
519 case STORE_WORD:
520 store_word(ch, key, phy_addr, req_count);
521 return;
522 }
523}
524
525static void DBDMA_run(DBDMAState *s)
526{
527 int channel;
528
529 for (channel = 0; channel < DBDMA_CHANNELS; channel++) {
530 DBDMA_channel *ch = &s->channels[channel];
531 uint32_t status = ch->regs[DBDMA_STATUS];
532 if (!ch->io.processing && (status & RUN) && (status & ACTIVE)) {
533 channel_run(ch);
534 }
535 }
536}
537
538static void DBDMA_run_bh(void *opaque)
539{
540 DBDMAState *s = opaque;
541
542 DBDMA_DPRINTF("DBDMA_run_bh\n");
543
544 DBDMA_run(s);
545}
546
547void DBDMA_kick(DBDMAState *dbdma)
548{
549 qemu_bh_schedule(dbdma->bh);
550}
551
552void DBDMA_register_channel(void *dbdma, int nchan, qemu_irq irq,
553 DBDMA_rw rw, DBDMA_flush flush,
554 void *opaque)
555{
556 DBDMAState *s = dbdma;
557 DBDMA_channel *ch = &s->channels[nchan];
558
559 DBDMA_DPRINTF("DBDMA_register_channel 0x%x\n", nchan);
560
561 assert(rw);
562 assert(flush);
563
564 ch->irq = irq;
565 ch->rw = rw;
566 ch->flush = flush;
567 ch->io.opaque = opaque;
568}
569
570static void
571dbdma_control_write(DBDMA_channel *ch)
572{
573 uint16_t mask, value;
574 uint32_t status;
575
576 mask = (ch->regs[DBDMA_CONTROL] >> 16) & 0xffff;
577 value = ch->regs[DBDMA_CONTROL] & 0xffff;
578
579 value &= (RUN | PAUSE | FLUSH | WAKE | DEVSTAT);
580
581 status = ch->regs[DBDMA_STATUS];
582
583 status = (value & mask) | (status & ~mask);
584
585 if (status & WAKE)
586 status |= ACTIVE;
587 if (status & RUN) {
588 status |= ACTIVE;
589 status &= ~DEAD;
590 }
591 if (status & PAUSE)
592 status &= ~ACTIVE;
593 if ((ch->regs[DBDMA_STATUS] & RUN) && !(status & RUN)) {
594
595 status &= ~(ACTIVE|DEAD);
596 }
597
598 if ((status & FLUSH) && ch->flush) {
599 ch->flush(&ch->io);
600 status &= ~FLUSH;
601 }
602
603 DBDMA_DPRINTF(" status 0x%08x\n", status);
604
605 ch->regs[DBDMA_STATUS] = status;
606
607 if (status & ACTIVE) {
608 DBDMA_kick(dbdma_from_ch(ch));
609 }
610}
611
612static void dbdma_write(void *opaque, hwaddr addr,
613 uint64_t value, unsigned size)
614{
615 int channel = addr >> DBDMA_CHANNEL_SHIFT;
616 DBDMAState *s = opaque;
617 DBDMA_channel *ch = &s->channels[channel];
618 int reg = (addr - (channel << DBDMA_CHANNEL_SHIFT)) >> 2;
619
620 DBDMA_DPRINTF("writel 0x" TARGET_FMT_plx " <= 0x%08"PRIx64"\n",
621 addr, value);
622 DBDMA_DPRINTF("channel 0x%x reg 0x%x\n",
623 (uint32_t)addr >> DBDMA_CHANNEL_SHIFT, reg);
624
625
626
627 if (reg == DBDMA_CMDPTR_LO && (ch->regs[DBDMA_STATUS] & ACTIVE)) {
628 return;
629 }
630
631 ch->regs[reg] = value;
632
633 switch(reg) {
634 case DBDMA_CONTROL:
635 dbdma_control_write(ch);
636 break;
637 case DBDMA_CMDPTR_LO:
638
639 ch->regs[DBDMA_CMDPTR_LO] &= ~0xf;
640 dbdma_cmdptr_load(ch);
641 break;
642 case DBDMA_STATUS:
643 case DBDMA_INTR_SEL:
644 case DBDMA_BRANCH_SEL:
645 case DBDMA_WAIT_SEL:
646
647 break;
648 case DBDMA_XFER_MODE:
649 case DBDMA_CMDPTR_HI:
650 case DBDMA_DATA2PTR_HI:
651 case DBDMA_DATA2PTR_LO:
652 case DBDMA_ADDRESS_HI:
653 case DBDMA_BRANCH_ADDR_HI:
654 case DBDMA_RES1:
655 case DBDMA_RES2:
656 case DBDMA_RES3:
657 case DBDMA_RES4:
658
659 break;
660 }
661}
662
663static uint64_t dbdma_read(void *opaque, hwaddr addr,
664 unsigned size)
665{
666 uint32_t value;
667 int channel = addr >> DBDMA_CHANNEL_SHIFT;
668 DBDMAState *s = opaque;
669 DBDMA_channel *ch = &s->channels[channel];
670 int reg = (addr - (channel << DBDMA_CHANNEL_SHIFT)) >> 2;
671
672 value = ch->regs[reg];
673
674 DBDMA_DPRINTF("readl 0x" TARGET_FMT_plx " => 0x%08x\n", addr, value);
675 DBDMA_DPRINTF("channel 0x%x reg 0x%x\n",
676 (uint32_t)addr >> DBDMA_CHANNEL_SHIFT, reg);
677
678 switch(reg) {
679 case DBDMA_CONTROL:
680 value = 0;
681 break;
682 case DBDMA_STATUS:
683 case DBDMA_CMDPTR_LO:
684 case DBDMA_INTR_SEL:
685 case DBDMA_BRANCH_SEL:
686 case DBDMA_WAIT_SEL:
687
688 break;
689 case DBDMA_XFER_MODE:
690 case DBDMA_CMDPTR_HI:
691 case DBDMA_DATA2PTR_HI:
692 case DBDMA_DATA2PTR_LO:
693 case DBDMA_ADDRESS_HI:
694 case DBDMA_BRANCH_ADDR_HI:
695
696 value = 0;
697 break;
698 case DBDMA_RES1:
699 case DBDMA_RES2:
700 case DBDMA_RES3:
701 case DBDMA_RES4:
702
703 break;
704 }
705
706 return value;
707}
708
709static const MemoryRegionOps dbdma_ops = {
710 .read = dbdma_read,
711 .write = dbdma_write,
712 .endianness = DEVICE_LITTLE_ENDIAN,
713 .valid = {
714 .min_access_size = 4,
715 .max_access_size = 4,
716 },
717};
718
719static const VMStateDescription vmstate_dbdma_io = {
720 .name = "dbdma_io",
721 .version_id = 0,
722 .minimum_version_id = 0,
723 .fields = (VMStateField[]) {
724 VMSTATE_UINT64(addr, struct DBDMA_io),
725 VMSTATE_INT32(len, struct DBDMA_io),
726 VMSTATE_INT32(is_last, struct DBDMA_io),
727 VMSTATE_INT32(is_dma_out, struct DBDMA_io),
728 VMSTATE_BOOL(processing, struct DBDMA_io),
729 VMSTATE_END_OF_LIST()
730 }
731};
732
733static const VMStateDescription vmstate_dbdma_cmd = {
734 .name = "dbdma_cmd",
735 .version_id = 0,
736 .minimum_version_id = 0,
737 .fields = (VMStateField[]) {
738 VMSTATE_UINT16(req_count, dbdma_cmd),
739 VMSTATE_UINT16(command, dbdma_cmd),
740 VMSTATE_UINT32(phy_addr, dbdma_cmd),
741 VMSTATE_UINT32(cmd_dep, dbdma_cmd),
742 VMSTATE_UINT16(res_count, dbdma_cmd),
743 VMSTATE_UINT16(xfer_status, dbdma_cmd),
744 VMSTATE_END_OF_LIST()
745 }
746};
747
748static const VMStateDescription vmstate_dbdma_channel = {
749 .name = "dbdma_channel",
750 .version_id = 1,
751 .minimum_version_id = 1,
752 .fields = (VMStateField[]) {
753 VMSTATE_UINT32_ARRAY(regs, struct DBDMA_channel, DBDMA_REGS),
754 VMSTATE_STRUCT(io, struct DBDMA_channel, 0, vmstate_dbdma_io, DBDMA_io),
755 VMSTATE_STRUCT(current, struct DBDMA_channel, 0, vmstate_dbdma_cmd,
756 dbdma_cmd),
757 VMSTATE_END_OF_LIST()
758 }
759};
760
761static const VMStateDescription vmstate_dbdma = {
762 .name = "dbdma",
763 .version_id = 3,
764 .minimum_version_id = 3,
765 .fields = (VMStateField[]) {
766 VMSTATE_STRUCT_ARRAY(channels, DBDMAState, DBDMA_CHANNELS, 1,
767 vmstate_dbdma_channel, DBDMA_channel),
768 VMSTATE_END_OF_LIST()
769 }
770};
771
772static void dbdma_reset(void *opaque)
773{
774 DBDMAState *s = opaque;
775 int i;
776
777 for (i = 0; i < DBDMA_CHANNELS; i++)
778 memset(s->channels[i].regs, 0, DBDMA_SIZE);
779}
780
781static void dbdma_unassigned_rw(DBDMA_io *io)
782{
783 DBDMA_channel *ch = io->channel;
784 qemu_log_mask(LOG_GUEST_ERROR, "%s: use of unassigned channel %d\n",
785 __func__, ch->channel);
786}
787
788static void dbdma_unassigned_flush(DBDMA_io *io)
789{
790 DBDMA_channel *ch = io->channel;
791 qemu_log_mask(LOG_GUEST_ERROR, "%s: use of unassigned channel %d\n",
792 __func__, ch->channel);
793}
794
795void* DBDMA_init (MemoryRegion **dbdma_mem)
796{
797 DBDMAState *s;
798 int i;
799
800 s = g_malloc0(sizeof(DBDMAState));
801
802 for (i = 0; i < DBDMA_CHANNELS; i++) {
803 DBDMA_io *io = &s->channels[i].io;
804 DBDMA_channel *ch = &s->channels[i];
805 qemu_iovec_init(&io->iov, 1);
806
807 ch->rw = dbdma_unassigned_rw;
808 ch->flush = dbdma_unassigned_flush;
809 ch->channel = i;
810 ch->io.channel = ch;
811 }
812
813 memory_region_init_io(&s->mem, NULL, &dbdma_ops, s, "dbdma", 0x1000);
814 *dbdma_mem = &s->mem;
815 vmstate_register(NULL, -1, &vmstate_dbdma, s);
816 qemu_register_reset(dbdma_reset, s);
817
818 s->bh = qemu_bh_new(DBDMA_run_bh, s);
819
820 return s;
821}
822