1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#include "qemu/osdep.h"
26#include "hw/hw.h"
27#include "hw/ppc/mac.h"
28#include "hw/ppc/mac_dbdma.h"
29#include "sysemu/block-backend.h"
30#include "sysemu/dma.h"
31
32#include "hw/ide/internal.h"
33
34
35
36
37#ifdef DEBUG_MACIO
38static const int debug_macio = 1;
39#else
40static const int debug_macio = 0;
41#endif
42
43#define MACIO_DPRINTF(fmt, ...) do { \
44 if (debug_macio) { \
45 printf(fmt , ## __VA_ARGS__); \
46 } \
47 } while (0)
48
49
50
51
52
53#define MACIO_PAGE_SIZE 4096
54
55
56
57
58
59
60
61
62static void pmac_dma_read(BlockBackend *blk,
63 int64_t offset, unsigned int bytes,
64 void (*cb)(void *opaque, int ret), void *opaque)
65{
66 DBDMA_io *io = opaque;
67 MACIOIDEState *m = io->opaque;
68 IDEState *s = idebus_active_if(&m->bus);
69 dma_addr_t dma_addr;
70 int64_t sector_num;
71 int nsector;
72 uint64_t align = BDRV_SECTOR_SIZE;
73 size_t head_bytes, tail_bytes;
74
75 qemu_iovec_destroy(&io->iov);
76 qemu_iovec_init(&io->iov, io->len / MACIO_PAGE_SIZE + 1);
77
78 sector_num = (offset >> 9);
79 nsector = (io->len >> 9);
80
81 MACIO_DPRINTF("--- DMA read transfer (0x%" HWADDR_PRIx ",0x%x): "
82 "sector_num: %" PRId64 ", nsector: %d\n", io->addr, io->len,
83 sector_num, nsector);
84
85 dma_addr = io->addr;
86 io->dir = DMA_DIRECTION_FROM_DEVICE;
87 io->dma_len = io->len;
88 io->dma_mem = dma_memory_map(&address_space_memory, dma_addr, &io->dma_len,
89 io->dir);
90
91 if (offset & (align - 1)) {
92 head_bytes = offset & (align - 1);
93
94 MACIO_DPRINTF("--- DMA unaligned head: sector %" PRId64 ", "
95 "discarding %zu bytes\n", sector_num, head_bytes);
96
97 qemu_iovec_add(&io->iov, &io->head_remainder, head_bytes);
98
99 bytes += offset & (align - 1);
100 offset = offset & ~(align - 1);
101 }
102
103 qemu_iovec_add(&io->iov, io->dma_mem, io->len);
104
105 if ((offset + bytes) & (align - 1)) {
106 tail_bytes = (offset + bytes) & (align - 1);
107
108 MACIO_DPRINTF("--- DMA unaligned tail: sector %" PRId64 ", "
109 "discarding bytes %zu\n", sector_num, tail_bytes);
110
111 qemu_iovec_add(&io->iov, &io->tail_remainder, align - tail_bytes);
112 bytes = ROUND_UP(bytes, align);
113 }
114
115 s->io_buffer_size -= io->len;
116 s->io_buffer_index += io->len;
117
118 io->len = 0;
119
120 MACIO_DPRINTF("--- Block read transfer - sector_num: %" PRIx64 " "
121 "nsector: %x\n", (offset >> 9), (bytes >> 9));
122
123 s->bus->dma->aiocb = blk_aio_preadv(blk, offset, &io->iov, 0, cb, io);
124}
125
126static void pmac_dma_write(BlockBackend *blk,
127 int64_t offset, int bytes,
128 void (*cb)(void *opaque, int ret), void *opaque)
129{
130 DBDMA_io *io = opaque;
131 MACIOIDEState *m = io->opaque;
132 IDEState *s = idebus_active_if(&m->bus);
133 dma_addr_t dma_addr;
134 int64_t sector_num;
135 int nsector;
136 uint64_t align = BDRV_SECTOR_SIZE;
137 size_t head_bytes, tail_bytes;
138 bool unaligned_head = false, unaligned_tail = false;
139
140 qemu_iovec_destroy(&io->iov);
141 qemu_iovec_init(&io->iov, io->len / MACIO_PAGE_SIZE + 1);
142
143 sector_num = (offset >> 9);
144 nsector = (io->len >> 9);
145
146 MACIO_DPRINTF("--- DMA write transfer (0x%" HWADDR_PRIx ",0x%x): "
147 "sector_num: %" PRId64 ", nsector: %d\n", io->addr, io->len,
148 sector_num, nsector);
149
150 dma_addr = io->addr;
151 io->dir = DMA_DIRECTION_TO_DEVICE;
152 io->dma_len = io->len;
153 io->dma_mem = dma_memory_map(&address_space_memory, dma_addr, &io->dma_len,
154 io->dir);
155
156 if (offset & (align - 1)) {
157 head_bytes = offset & (align - 1);
158 sector_num = ((offset & ~(align - 1)) >> 9);
159
160 MACIO_DPRINTF("--- DMA unaligned head: pre-reading head sector %"
161 PRId64 "\n", sector_num);
162
163 blk_pread(s->blk, (sector_num << 9), &io->head_remainder, align);
164
165 qemu_iovec_add(&io->iov, &io->head_remainder, head_bytes);
166 qemu_iovec_add(&io->iov, io->dma_mem, io->len);
167
168 bytes += offset & (align - 1);
169 offset = offset & ~(align - 1);
170
171 unaligned_head = true;
172 }
173
174 if ((offset + bytes) & (align - 1)) {
175 tail_bytes = (offset + bytes) & (align - 1);
176 sector_num = (((offset + bytes) & ~(align - 1)) >> 9);
177
178 MACIO_DPRINTF("--- DMA unaligned tail: pre-reading tail sector %"
179 PRId64 "\n", sector_num);
180
181 blk_pread(s->blk, (sector_num << 9), &io->tail_remainder, align);
182
183 if (!unaligned_head) {
184 qemu_iovec_add(&io->iov, io->dma_mem, io->len);
185 }
186
187 qemu_iovec_add(&io->iov, &io->tail_remainder + tail_bytes,
188 align - tail_bytes);
189
190 bytes = ROUND_UP(bytes, align);
191
192 unaligned_tail = true;
193 }
194
195 if (!unaligned_head && !unaligned_tail) {
196 qemu_iovec_add(&io->iov, io->dma_mem, io->len);
197 }
198
199 s->io_buffer_size -= io->len;
200 s->io_buffer_index += io->len;
201
202 io->len = 0;
203
204 MACIO_DPRINTF("--- Block write transfer - sector_num: %" PRIx64 " "
205 "nsector: %x\n", (offset >> 9), (bytes >> 9));
206
207 s->bus->dma->aiocb = blk_aio_pwritev(blk, offset, &io->iov, 0, cb, io);
208}
209
210static void pmac_dma_trim(BlockBackend *blk,
211 int64_t offset, int bytes,
212 void (*cb)(void *opaque, int ret), void *opaque)
213{
214 DBDMA_io *io = opaque;
215 MACIOIDEState *m = io->opaque;
216 IDEState *s = idebus_active_if(&m->bus);
217 dma_addr_t dma_addr;
218
219 qemu_iovec_destroy(&io->iov);
220 qemu_iovec_init(&io->iov, io->len / MACIO_PAGE_SIZE + 1);
221
222 dma_addr = io->addr;
223 io->dir = DMA_DIRECTION_TO_DEVICE;
224 io->dma_len = io->len;
225 io->dma_mem = dma_memory_map(&address_space_memory, dma_addr, &io->dma_len,
226 io->dir);
227
228 qemu_iovec_add(&io->iov, io->dma_mem, io->len);
229 s->io_buffer_size -= io->len;
230 s->io_buffer_index += io->len;
231 io->len = 0;
232
233 s->bus->dma->aiocb = ide_issue_trim(offset, &io->iov, cb, io, blk);
234}
235
236static void pmac_ide_atapi_transfer_cb(void *opaque, int ret)
237{
238 DBDMA_io *io = opaque;
239 MACIOIDEState *m = io->opaque;
240 IDEState *s = idebus_active_if(&m->bus);
241 int64_t offset;
242
243 MACIO_DPRINTF("pmac_ide_atapi_transfer_cb\n");
244
245 if (ret < 0) {
246 MACIO_DPRINTF("DMA error: %d\n", ret);
247 ide_atapi_io_error(s, ret);
248 goto done;
249 }
250
251 if (!m->dma_active) {
252 MACIO_DPRINTF("waiting for data (%#x - %#x - %x)\n",
253 s->nsector, io->len, s->status);
254
255 io->processing = false;
256 return;
257 }
258
259 if (s->io_buffer_size <= 0) {
260 MACIO_DPRINTF("End of IDE transfer\n");
261 ide_atapi_cmd_ok(s);
262 m->dma_active = false;
263 goto done;
264 }
265
266 if (io->len == 0) {
267 MACIO_DPRINTF("End of DMA transfer\n");
268 goto done;
269 }
270
271 if (s->lba == -1) {
272
273 s->io_buffer_size = MIN(s->io_buffer_size, io->len);
274 dma_memory_write(&address_space_memory, io->addr, s->io_buffer,
275 s->io_buffer_size);
276 io->len = 0;
277 ide_atapi_cmd_ok(s);
278 m->dma_active = false;
279 goto done;
280 }
281
282
283 offset = ((int64_t)s->lba << 11) + s->io_buffer_index;
284
285 pmac_dma_read(s->blk, offset, io->len, pmac_ide_atapi_transfer_cb, io);
286 return;
287
288done:
289 dma_memory_unmap(&address_space_memory, io->dma_mem, io->dma_len,
290 io->dir, io->dma_len);
291
292 if (ret < 0) {
293 block_acct_failed(blk_get_stats(s->blk), &s->acct);
294 } else {
295 block_acct_done(blk_get_stats(s->blk), &s->acct);
296 }
297
298 ide_set_inactive(s, false);
299 io->dma_end(opaque);
300}
301
302static void pmac_ide_transfer_cb(void *opaque, int ret)
303{
304 DBDMA_io *io = opaque;
305 MACIOIDEState *m = io->opaque;
306 IDEState *s = idebus_active_if(&m->bus);
307 int64_t offset;
308
309 MACIO_DPRINTF("pmac_ide_transfer_cb\n");
310
311 if (ret < 0) {
312 MACIO_DPRINTF("DMA error: %d\n", ret);
313 ide_dma_error(s);
314 goto done;
315 }
316
317 if (!m->dma_active) {
318 MACIO_DPRINTF("waiting for data (%#x - %#x - %x)\n",
319 s->nsector, io->len, s->status);
320
321 io->processing = false;
322 return;
323 }
324
325 if (s->io_buffer_size <= 0) {
326 MACIO_DPRINTF("End of IDE transfer\n");
327 s->status = READY_STAT | SEEK_STAT;
328 ide_set_irq(s->bus);
329 m->dma_active = false;
330 goto done;
331 }
332
333 if (io->len == 0) {
334 MACIO_DPRINTF("End of DMA transfer\n");
335 goto done;
336 }
337
338
339 offset = (ide_get_sector(s) << 9) + s->io_buffer_index;
340
341 switch (s->dma_cmd) {
342 case IDE_DMA_READ:
343 pmac_dma_read(s->blk, offset, io->len, pmac_ide_transfer_cb, io);
344 break;
345 case IDE_DMA_WRITE:
346 pmac_dma_write(s->blk, offset, io->len, pmac_ide_transfer_cb, io);
347 break;
348 case IDE_DMA_TRIM:
349 pmac_dma_trim(s->blk, offset, io->len, pmac_ide_transfer_cb, io);
350 break;
351 default:
352 abort();
353 }
354
355 return;
356
357done:
358 dma_memory_unmap(&address_space_memory, io->dma_mem, io->dma_len,
359 io->dir, io->dma_len);
360
361 if (s->dma_cmd == IDE_DMA_READ || s->dma_cmd == IDE_DMA_WRITE) {
362 if (ret < 0) {
363 block_acct_failed(blk_get_stats(s->blk), &s->acct);
364 } else {
365 block_acct_done(blk_get_stats(s->blk), &s->acct);
366 }
367 }
368
369 ide_set_inactive(s, false);
370 io->dma_end(opaque);
371}
372
373static void pmac_ide_transfer(DBDMA_io *io)
374{
375 MACIOIDEState *m = io->opaque;
376 IDEState *s = idebus_active_if(&m->bus);
377
378 MACIO_DPRINTF("\n");
379
380 if (s->drive_kind == IDE_CD) {
381 block_acct_start(blk_get_stats(s->blk), &s->acct, io->len,
382 BLOCK_ACCT_READ);
383
384 pmac_ide_atapi_transfer_cb(io, 0);
385 return;
386 }
387
388 switch (s->dma_cmd) {
389 case IDE_DMA_READ:
390 block_acct_start(blk_get_stats(s->blk), &s->acct, io->len,
391 BLOCK_ACCT_READ);
392 break;
393 case IDE_DMA_WRITE:
394 block_acct_start(blk_get_stats(s->blk), &s->acct, io->len,
395 BLOCK_ACCT_WRITE);
396 break;
397 default:
398 break;
399 }
400
401 pmac_ide_transfer_cb(io, 0);
402}
403
404static void pmac_ide_flush(DBDMA_io *io)
405{
406 MACIOIDEState *m = io->opaque;
407 IDEState *s = idebus_active_if(&m->bus);
408
409 if (s->bus->dma->aiocb) {
410 blk_drain(s->blk);
411 }
412}
413
414
415static void pmac_ide_writeb (void *opaque,
416 hwaddr addr, uint32_t val)
417{
418 MACIOIDEState *d = opaque;
419
420 addr = (addr & 0xFFF) >> 4;
421 switch (addr) {
422 case 1 ... 7:
423 ide_ioport_write(&d->bus, addr, val);
424 break;
425 case 8:
426 case 22:
427 ide_cmd_write(&d->bus, 0, val);
428 break;
429 default:
430 break;
431 }
432}
433
434static uint32_t pmac_ide_readb (void *opaque,hwaddr addr)
435{
436 uint8_t retval;
437 MACIOIDEState *d = opaque;
438
439 addr = (addr & 0xFFF) >> 4;
440 switch (addr) {
441 case 1 ... 7:
442 retval = ide_ioport_read(&d->bus, addr);
443 break;
444 case 8:
445 case 22:
446 retval = ide_status_read(&d->bus, 0);
447 break;
448 default:
449 retval = 0xFF;
450 break;
451 }
452 return retval;
453}
454
455static void pmac_ide_writew (void *opaque,
456 hwaddr addr, uint32_t val)
457{
458 MACIOIDEState *d = opaque;
459
460 addr = (addr & 0xFFF) >> 4;
461 val = bswap16(val);
462 if (addr == 0) {
463 ide_data_writew(&d->bus, 0, val);
464 }
465}
466
467static uint32_t pmac_ide_readw (void *opaque,hwaddr addr)
468{
469 uint16_t retval;
470 MACIOIDEState *d = opaque;
471
472 addr = (addr & 0xFFF) >> 4;
473 if (addr == 0) {
474 retval = ide_data_readw(&d->bus, 0);
475 } else {
476 retval = 0xFFFF;
477 }
478 retval = bswap16(retval);
479 return retval;
480}
481
482static void pmac_ide_writel (void *opaque,
483 hwaddr addr, uint32_t val)
484{
485 MACIOIDEState *d = opaque;
486
487 addr = (addr & 0xFFF) >> 4;
488 val = bswap32(val);
489 if (addr == 0) {
490 ide_data_writel(&d->bus, 0, val);
491 }
492}
493
494static uint32_t pmac_ide_readl (void *opaque,hwaddr addr)
495{
496 uint32_t retval;
497 MACIOIDEState *d = opaque;
498
499 addr = (addr & 0xFFF) >> 4;
500 if (addr == 0) {
501 retval = ide_data_readl(&d->bus, 0);
502 } else {
503 retval = 0xFFFFFFFF;
504 }
505 retval = bswap32(retval);
506 return retval;
507}
508
509static const MemoryRegionOps pmac_ide_ops = {
510 .old_mmio = {
511 .write = {
512 pmac_ide_writeb,
513 pmac_ide_writew,
514 pmac_ide_writel,
515 },
516 .read = {
517 pmac_ide_readb,
518 pmac_ide_readw,
519 pmac_ide_readl,
520 },
521 },
522 .endianness = DEVICE_NATIVE_ENDIAN,
523};
524
525static const VMStateDescription vmstate_pmac = {
526 .name = "ide",
527 .version_id = 4,
528 .minimum_version_id = 0,
529 .fields = (VMStateField[]) {
530 VMSTATE_IDE_BUS(bus, MACIOIDEState),
531 VMSTATE_IDE_DRIVES(bus.ifs, MACIOIDEState),
532 VMSTATE_BOOL(dma_active, MACIOIDEState),
533 VMSTATE_END_OF_LIST()
534 }
535};
536
537static void macio_ide_reset(DeviceState *dev)
538{
539 MACIOIDEState *d = MACIO_IDE(dev);
540
541 ide_bus_reset(&d->bus);
542}
543
544static int ide_nop_int(IDEDMA *dma, int x)
545{
546 return 0;
547}
548
549static int32_t ide_nop_int32(IDEDMA *dma, int32_t l)
550{
551 return 0;
552}
553
554static void ide_dbdma_start(IDEDMA *dma, IDEState *s,
555 BlockCompletionFunc *cb)
556{
557 MACIOIDEState *m = container_of(dma, MACIOIDEState, dma);
558
559 s->io_buffer_index = 0;
560 if (s->drive_kind == IDE_CD) {
561 s->io_buffer_size = s->packet_transfer_size;
562 } else {
563 s->io_buffer_size = s->nsector * BDRV_SECTOR_SIZE;
564 }
565
566 MACIO_DPRINTF("\n\n------------ IDE transfer\n");
567 MACIO_DPRINTF("buffer_size: %x buffer_index: %x\n",
568 s->io_buffer_size, s->io_buffer_index);
569 MACIO_DPRINTF("lba: %x size: %x\n", s->lba, s->io_buffer_size);
570 MACIO_DPRINTF("-------------------------\n");
571
572 m->dma_active = true;
573 DBDMA_kick(m->dbdma);
574}
575
576static const IDEDMAOps dbdma_ops = {
577 .start_dma = ide_dbdma_start,
578 .prepare_buf = ide_nop_int32,
579 .rw_buf = ide_nop_int,
580};
581
582static void macio_ide_realizefn(DeviceState *dev, Error **errp)
583{
584 MACIOIDEState *s = MACIO_IDE(dev);
585
586 ide_init2(&s->bus, s->irq);
587
588
589 s->dma.ops = &dbdma_ops;
590 s->bus.dma = &s->dma;
591}
592
593static void macio_ide_initfn(Object *obj)
594{
595 SysBusDevice *d = SYS_BUS_DEVICE(obj);
596 MACIOIDEState *s = MACIO_IDE(obj);
597
598 ide_bus_new(&s->bus, sizeof(s->bus), DEVICE(obj), 0, 2);
599 memory_region_init_io(&s->mem, obj, &pmac_ide_ops, s, "pmac-ide", 0x1000);
600 sysbus_init_mmio(d, &s->mem);
601 sysbus_init_irq(d, &s->irq);
602 sysbus_init_irq(d, &s->dma_irq);
603}
604
605static void macio_ide_class_init(ObjectClass *oc, void *data)
606{
607 DeviceClass *dc = DEVICE_CLASS(oc);
608
609 dc->realize = macio_ide_realizefn;
610 dc->reset = macio_ide_reset;
611 dc->vmsd = &vmstate_pmac;
612 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
613}
614
615static const TypeInfo macio_ide_type_info = {
616 .name = TYPE_MACIO_IDE,
617 .parent = TYPE_SYS_BUS_DEVICE,
618 .instance_size = sizeof(MACIOIDEState),
619 .instance_init = macio_ide_initfn,
620 .class_init = macio_ide_class_init,
621};
622
623static void macio_ide_register_types(void)
624{
625 type_register_static(&macio_ide_type_info);
626}
627
628
629void macio_ide_init_drives(MACIOIDEState *s, DriveInfo **hd_table)
630{
631 int i;
632
633 for (i = 0; i < 2; i++) {
634 if (hd_table[i]) {
635 ide_create_drive(&s->bus, i, hd_table[i]);
636 }
637 }
638}
639
640void macio_ide_register_dma(MACIOIDEState *s, void *dbdma, int channel)
641{
642 s->dbdma = dbdma;
643 DBDMA_register_channel(dbdma, channel, s->dma_irq,
644 pmac_ide_transfer, pmac_ide_flush, s);
645}
646
647type_init(macio_ide_register_types)
648