1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#include "qemu/osdep.h"
26#include "hw/hw.h"
27#include "hw/ppc/mac.h"
28#include "hw/ppc/mac_dbdma.h"
29#include "sysemu/block-backend.h"
30#include "sysemu/dma.h"
31
32#include <hw/ide/internal.h>
33
34
35
36
37#ifdef DEBUG_MACIO
38static const int debug_macio = 1;
39#else
40static const int debug_macio = 0;
41#endif
42
43#define MACIO_DPRINTF(fmt, ...) do { \
44 if (debug_macio) { \
45 printf(fmt , ## __VA_ARGS__); \
46 } \
47 } while (0)
48
49
50
51
52
53#define MACIO_PAGE_SIZE 4096
54
55
56
57
58
59
60
61
62static void pmac_dma_read(BlockBackend *blk,
63 int64_t offset, unsigned int bytes,
64 void (*cb)(void *opaque, int ret), void *opaque)
65{
66 DBDMA_io *io = opaque;
67 MACIOIDEState *m = io->opaque;
68 IDEState *s = idebus_active_if(&m->bus);
69 dma_addr_t dma_addr, dma_len;
70 void *mem;
71 int64_t sector_num;
72 int nsector;
73 uint64_t align = BDRV_SECTOR_SIZE;
74 size_t head_bytes, tail_bytes;
75
76 qemu_iovec_destroy(&io->iov);
77 qemu_iovec_init(&io->iov, io->len / MACIO_PAGE_SIZE + 1);
78
79 sector_num = (offset >> 9);
80 nsector = (io->len >> 9);
81
82 MACIO_DPRINTF("--- DMA read transfer (0x%" HWADDR_PRIx ",0x%x): "
83 "sector_num: %" PRId64 ", nsector: %d\n", io->addr, io->len,
84 sector_num, nsector);
85
86 dma_addr = io->addr;
87 dma_len = io->len;
88 mem = dma_memory_map(&address_space_memory, dma_addr, &dma_len,
89 DMA_DIRECTION_FROM_DEVICE);
90
91 if (offset & (align - 1)) {
92 head_bytes = offset & (align - 1);
93
94 MACIO_DPRINTF("--- DMA unaligned head: sector %" PRId64 ", "
95 "discarding %zu bytes\n", sector_num, head_bytes);
96
97 qemu_iovec_add(&io->iov, &io->head_remainder, head_bytes);
98
99 bytes += offset & (align - 1);
100 offset = offset & ~(align - 1);
101 }
102
103 qemu_iovec_add(&io->iov, mem, io->len);
104
105 if ((offset + bytes) & (align - 1)) {
106 tail_bytes = (offset + bytes) & (align - 1);
107
108 MACIO_DPRINTF("--- DMA unaligned tail: sector %" PRId64 ", "
109 "discarding bytes %zu\n", sector_num, tail_bytes);
110
111 qemu_iovec_add(&io->iov, &io->tail_remainder, align - tail_bytes);
112 bytes = ROUND_UP(bytes, align);
113 }
114
115 s->io_buffer_size -= io->len;
116 s->io_buffer_index += io->len;
117
118 io->len = 0;
119
120 MACIO_DPRINTF("--- Block read transfer - sector_num: %" PRIx64 " "
121 "nsector: %x\n", (offset >> 9), (bytes >> 9));
122
123 s->bus->dma->aiocb = blk_aio_readv(blk, (offset >> 9), &io->iov,
124 (bytes >> 9), cb, io);
125}
126
127static void pmac_dma_write(BlockBackend *blk,
128 int64_t offset, int bytes,
129 void (*cb)(void *opaque, int ret), void *opaque)
130{
131 DBDMA_io *io = opaque;
132 MACIOIDEState *m = io->opaque;
133 IDEState *s = idebus_active_if(&m->bus);
134 dma_addr_t dma_addr, dma_len;
135 void *mem;
136 int64_t sector_num;
137 int nsector;
138 uint64_t align = BDRV_SECTOR_SIZE;
139 size_t head_bytes, tail_bytes;
140 bool unaligned_head = false, unaligned_tail = false;
141
142 qemu_iovec_destroy(&io->iov);
143 qemu_iovec_init(&io->iov, io->len / MACIO_PAGE_SIZE + 1);
144
145 sector_num = (offset >> 9);
146 nsector = (io->len >> 9);
147
148 MACIO_DPRINTF("--- DMA write transfer (0x%" HWADDR_PRIx ",0x%x): "
149 "sector_num: %" PRId64 ", nsector: %d\n", io->addr, io->len,
150 sector_num, nsector);
151
152 dma_addr = io->addr;
153 dma_len = io->len;
154 mem = dma_memory_map(&address_space_memory, dma_addr, &dma_len,
155 DMA_DIRECTION_TO_DEVICE);
156
157 if (offset & (align - 1)) {
158 head_bytes = offset & (align - 1);
159 sector_num = ((offset & ~(align - 1)) >> 9);
160
161 MACIO_DPRINTF("--- DMA unaligned head: pre-reading head sector %"
162 PRId64 "\n", sector_num);
163
164 blk_pread(s->blk, (sector_num << 9), &io->head_remainder, align);
165
166 qemu_iovec_add(&io->iov, &io->head_remainder, head_bytes);
167 qemu_iovec_add(&io->iov, mem, io->len);
168
169 bytes += offset & (align - 1);
170 offset = offset & ~(align - 1);
171
172 unaligned_head = true;
173 }
174
175 if ((offset + bytes) & (align - 1)) {
176 tail_bytes = (offset + bytes) & (align - 1);
177 sector_num = (((offset + bytes) & ~(align - 1)) >> 9);
178
179 MACIO_DPRINTF("--- DMA unaligned tail: pre-reading tail sector %"
180 PRId64 "\n", sector_num);
181
182 blk_pread(s->blk, (sector_num << 9), &io->tail_remainder, align);
183
184 if (!unaligned_head) {
185 qemu_iovec_add(&io->iov, mem, io->len);
186 }
187
188 qemu_iovec_add(&io->iov, &io->tail_remainder + tail_bytes,
189 align - tail_bytes);
190
191 bytes = ROUND_UP(bytes, align);
192
193 unaligned_tail = true;
194 }
195
196 if (!unaligned_head && !unaligned_tail) {
197 qemu_iovec_add(&io->iov, mem, io->len);
198 }
199
200 s->io_buffer_size -= io->len;
201 s->io_buffer_index += io->len;
202
203 io->len = 0;
204
205 MACIO_DPRINTF("--- Block write transfer - sector_num: %" PRIx64 " "
206 "nsector: %x\n", (offset >> 9), (bytes >> 9));
207
208 s->bus->dma->aiocb = blk_aio_writev(blk, (offset >> 9), &io->iov,
209 (bytes >> 9), cb, io);
210}
211
212static void pmac_dma_trim(BlockBackend *blk,
213 int64_t offset, int bytes,
214 void (*cb)(void *opaque, int ret), void *opaque)
215{
216 DBDMA_io *io = opaque;
217 MACIOIDEState *m = io->opaque;
218 IDEState *s = idebus_active_if(&m->bus);
219 dma_addr_t dma_addr, dma_len;
220 void *mem;
221
222 qemu_iovec_destroy(&io->iov);
223 qemu_iovec_init(&io->iov, io->len / MACIO_PAGE_SIZE + 1);
224
225 dma_addr = io->addr;
226 dma_len = io->len;
227 mem = dma_memory_map(&address_space_memory, dma_addr, &dma_len,
228 DMA_DIRECTION_TO_DEVICE);
229
230 qemu_iovec_add(&io->iov, mem, io->len);
231 s->io_buffer_size -= io->len;
232 s->io_buffer_index += io->len;
233 io->len = 0;
234
235 s->bus->dma->aiocb = ide_issue_trim(blk, (offset >> 9), &io->iov,
236 (bytes >> 9), cb, io);
237}
238
239static void pmac_ide_atapi_transfer_cb(void *opaque, int ret)
240{
241 DBDMA_io *io = opaque;
242 MACIOIDEState *m = io->opaque;
243 IDEState *s = idebus_active_if(&m->bus);
244 int64_t offset;
245
246 MACIO_DPRINTF("pmac_ide_atapi_transfer_cb\n");
247
248 if (ret < 0) {
249 MACIO_DPRINTF("DMA error: %d\n", ret);
250 ide_atapi_io_error(s, ret);
251 goto done;
252 }
253
254 if (!m->dma_active) {
255 MACIO_DPRINTF("waiting for data (%#x - %#x - %x)\n",
256 s->nsector, io->len, s->status);
257
258 io->processing = false;
259 return;
260 }
261
262 if (s->io_buffer_size <= 0) {
263 MACIO_DPRINTF("End of IDE transfer\n");
264 ide_atapi_cmd_ok(s);
265 m->dma_active = false;
266 goto done;
267 }
268
269 if (io->len == 0) {
270 MACIO_DPRINTF("End of DMA transfer\n");
271 goto done;
272 }
273
274 if (s->lba == -1) {
275
276 s->io_buffer_size = MIN(s->io_buffer_size, io->len);
277 cpu_physical_memory_write(io->addr, s->io_buffer, s->io_buffer_size);
278 io->len = 0;
279 ide_atapi_cmd_ok(s);
280 m->dma_active = false;
281 goto done;
282 }
283
284
285 offset = ((int64_t)s->lba << 11) + s->io_buffer_index;
286
287 pmac_dma_read(s->blk, offset, io->len, pmac_ide_atapi_transfer_cb, io);
288 return;
289
290done:
291 if (ret < 0) {
292 block_acct_failed(blk_get_stats(s->blk), &s->acct);
293 } else {
294 block_acct_done(blk_get_stats(s->blk), &s->acct);
295 }
296
297 ide_set_inactive(s, false);
298 io->dma_end(opaque);
299}
300
301static void pmac_ide_transfer_cb(void *opaque, int ret)
302{
303 DBDMA_io *io = opaque;
304 MACIOIDEState *m = io->opaque;
305 IDEState *s = idebus_active_if(&m->bus);
306 int64_t offset;
307
308 MACIO_DPRINTF("pmac_ide_transfer_cb\n");
309
310 if (ret < 0) {
311 MACIO_DPRINTF("DMA error: %d\n", ret);
312 ide_dma_error(s);
313 goto done;
314 }
315
316 if (!m->dma_active) {
317 MACIO_DPRINTF("waiting for data (%#x - %#x - %x)\n",
318 s->nsector, io->len, s->status);
319
320 io->processing = false;
321 return;
322 }
323
324 if (s->io_buffer_size <= 0) {
325 MACIO_DPRINTF("End of IDE transfer\n");
326 s->status = READY_STAT | SEEK_STAT;
327 ide_set_irq(s->bus);
328 m->dma_active = false;
329 goto done;
330 }
331
332 if (io->len == 0) {
333 MACIO_DPRINTF("End of DMA transfer\n");
334 goto done;
335 }
336
337
338 offset = (ide_get_sector(s) << 9) + s->io_buffer_index;
339
340 switch (s->dma_cmd) {
341 case IDE_DMA_READ:
342 pmac_dma_read(s->blk, offset, io->len, pmac_ide_transfer_cb, io);
343 break;
344 case IDE_DMA_WRITE:
345 pmac_dma_write(s->blk, offset, io->len, pmac_ide_transfer_cb, io);
346 break;
347 case IDE_DMA_TRIM:
348 pmac_dma_trim(s->blk, offset, io->len, pmac_ide_transfer_cb, io);
349 break;
350 default:
351 abort();
352 }
353
354 return;
355
356done:
357 if (s->dma_cmd == IDE_DMA_READ || s->dma_cmd == IDE_DMA_WRITE) {
358 if (ret < 0) {
359 block_acct_failed(blk_get_stats(s->blk), &s->acct);
360 } else {
361 block_acct_done(blk_get_stats(s->blk), &s->acct);
362 }
363 }
364
365 ide_set_inactive(s, false);
366 io->dma_end(opaque);
367}
368
369static void pmac_ide_transfer(DBDMA_io *io)
370{
371 MACIOIDEState *m = io->opaque;
372 IDEState *s = idebus_active_if(&m->bus);
373
374 MACIO_DPRINTF("\n");
375
376 if (s->drive_kind == IDE_CD) {
377 block_acct_start(blk_get_stats(s->blk), &s->acct, io->len,
378 BLOCK_ACCT_READ);
379
380 pmac_ide_atapi_transfer_cb(io, 0);
381 return;
382 }
383
384 switch (s->dma_cmd) {
385 case IDE_DMA_READ:
386 block_acct_start(blk_get_stats(s->blk), &s->acct, io->len,
387 BLOCK_ACCT_READ);
388 break;
389 case IDE_DMA_WRITE:
390 block_acct_start(blk_get_stats(s->blk), &s->acct, io->len,
391 BLOCK_ACCT_WRITE);
392 break;
393 default:
394 break;
395 }
396
397 pmac_ide_transfer_cb(io, 0);
398}
399
400static void pmac_ide_flush(DBDMA_io *io)
401{
402 MACIOIDEState *m = io->opaque;
403 IDEState *s = idebus_active_if(&m->bus);
404
405 if (s->bus->dma->aiocb) {
406 blk_drain_all();
407 }
408}
409
410
411static void pmac_ide_writeb (void *opaque,
412 hwaddr addr, uint32_t val)
413{
414 MACIOIDEState *d = opaque;
415
416 addr = (addr & 0xFFF) >> 4;
417 switch (addr) {
418 case 1 ... 7:
419 ide_ioport_write(&d->bus, addr, val);
420 break;
421 case 8:
422 case 22:
423 ide_cmd_write(&d->bus, 0, val);
424 break;
425 default:
426 break;
427 }
428}
429
430static uint32_t pmac_ide_readb (void *opaque,hwaddr addr)
431{
432 uint8_t retval;
433 MACIOIDEState *d = opaque;
434
435 addr = (addr & 0xFFF) >> 4;
436 switch (addr) {
437 case 1 ... 7:
438 retval = ide_ioport_read(&d->bus, addr);
439 break;
440 case 8:
441 case 22:
442 retval = ide_status_read(&d->bus, 0);
443 break;
444 default:
445 retval = 0xFF;
446 break;
447 }
448 return retval;
449}
450
451static void pmac_ide_writew (void *opaque,
452 hwaddr addr, uint32_t val)
453{
454 MACIOIDEState *d = opaque;
455
456 addr = (addr & 0xFFF) >> 4;
457 val = bswap16(val);
458 if (addr == 0) {
459 ide_data_writew(&d->bus, 0, val);
460 }
461}
462
463static uint32_t pmac_ide_readw (void *opaque,hwaddr addr)
464{
465 uint16_t retval;
466 MACIOIDEState *d = opaque;
467
468 addr = (addr & 0xFFF) >> 4;
469 if (addr == 0) {
470 retval = ide_data_readw(&d->bus, 0);
471 } else {
472 retval = 0xFFFF;
473 }
474 retval = bswap16(retval);
475 return retval;
476}
477
478static void pmac_ide_writel (void *opaque,
479 hwaddr addr, uint32_t val)
480{
481 MACIOIDEState *d = opaque;
482
483 addr = (addr & 0xFFF) >> 4;
484 val = bswap32(val);
485 if (addr == 0) {
486 ide_data_writel(&d->bus, 0, val);
487 }
488}
489
490static uint32_t pmac_ide_readl (void *opaque,hwaddr addr)
491{
492 uint32_t retval;
493 MACIOIDEState *d = opaque;
494
495 addr = (addr & 0xFFF) >> 4;
496 if (addr == 0) {
497 retval = ide_data_readl(&d->bus, 0);
498 } else {
499 retval = 0xFFFFFFFF;
500 }
501 retval = bswap32(retval);
502 return retval;
503}
504
505static const MemoryRegionOps pmac_ide_ops = {
506 .old_mmio = {
507 .write = {
508 pmac_ide_writeb,
509 pmac_ide_writew,
510 pmac_ide_writel,
511 },
512 .read = {
513 pmac_ide_readb,
514 pmac_ide_readw,
515 pmac_ide_readl,
516 },
517 },
518 .endianness = DEVICE_NATIVE_ENDIAN,
519};
520
521static const VMStateDescription vmstate_pmac = {
522 .name = "ide",
523 .version_id = 4,
524 .minimum_version_id = 0,
525 .fields = (VMStateField[]) {
526 VMSTATE_IDE_BUS(bus, MACIOIDEState),
527 VMSTATE_IDE_DRIVES(bus.ifs, MACIOIDEState),
528 VMSTATE_BOOL(dma_active, MACIOIDEState),
529 VMSTATE_END_OF_LIST()
530 }
531};
532
533static void macio_ide_reset(DeviceState *dev)
534{
535 MACIOIDEState *d = MACIO_IDE(dev);
536
537 ide_bus_reset(&d->bus);
538}
539
540static int ide_nop_int(IDEDMA *dma, int x)
541{
542 return 0;
543}
544
545static int32_t ide_nop_int32(IDEDMA *dma, int32_t l)
546{
547 return 0;
548}
549
550static void ide_dbdma_start(IDEDMA *dma, IDEState *s,
551 BlockCompletionFunc *cb)
552{
553 MACIOIDEState *m = container_of(dma, MACIOIDEState, dma);
554
555 s->io_buffer_index = 0;
556 if (s->drive_kind == IDE_CD) {
557 s->io_buffer_size = s->packet_transfer_size;
558 } else {
559 s->io_buffer_size = s->nsector * BDRV_SECTOR_SIZE;
560 }
561
562 MACIO_DPRINTF("\n\n------------ IDE transfer\n");
563 MACIO_DPRINTF("buffer_size: %x buffer_index: %x\n",
564 s->io_buffer_size, s->io_buffer_index);
565 MACIO_DPRINTF("lba: %x size: %x\n", s->lba, s->io_buffer_size);
566 MACIO_DPRINTF("-------------------------\n");
567
568 m->dma_active = true;
569 DBDMA_kick(m->dbdma);
570}
571
572static const IDEDMAOps dbdma_ops = {
573 .start_dma = ide_dbdma_start,
574 .prepare_buf = ide_nop_int32,
575 .rw_buf = ide_nop_int,
576};
577
578static void macio_ide_realizefn(DeviceState *dev, Error **errp)
579{
580 MACIOIDEState *s = MACIO_IDE(dev);
581
582 ide_init2(&s->bus, s->irq);
583
584
585 s->dma.ops = &dbdma_ops;
586 s->bus.dma = &s->dma;
587}
588
589static void macio_ide_initfn(Object *obj)
590{
591 SysBusDevice *d = SYS_BUS_DEVICE(obj);
592 MACIOIDEState *s = MACIO_IDE(obj);
593
594 ide_bus_new(&s->bus, sizeof(s->bus), DEVICE(obj), 0, 2);
595 memory_region_init_io(&s->mem, obj, &pmac_ide_ops, s, "pmac-ide", 0x1000);
596 sysbus_init_mmio(d, &s->mem);
597 sysbus_init_irq(d, &s->irq);
598 sysbus_init_irq(d, &s->dma_irq);
599}
600
601static void macio_ide_class_init(ObjectClass *oc, void *data)
602{
603 DeviceClass *dc = DEVICE_CLASS(oc);
604
605 dc->realize = macio_ide_realizefn;
606 dc->reset = macio_ide_reset;
607 dc->vmsd = &vmstate_pmac;
608 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
609}
610
611static const TypeInfo macio_ide_type_info = {
612 .name = TYPE_MACIO_IDE,
613 .parent = TYPE_SYS_BUS_DEVICE,
614 .instance_size = sizeof(MACIOIDEState),
615 .instance_init = macio_ide_initfn,
616 .class_init = macio_ide_class_init,
617};
618
619static void macio_ide_register_types(void)
620{
621 type_register_static(&macio_ide_type_info);
622}
623
624
625void macio_ide_init_drives(MACIOIDEState *s, DriveInfo **hd_table)
626{
627 int i;
628
629 for (i = 0; i < 2; i++) {
630 if (hd_table[i]) {
631 ide_create_drive(&s->bus, i, hd_table[i]);
632 }
633 }
634}
635
636void macio_ide_register_dma(MACIOIDEState *s, void *dbdma, int channel)
637{
638 s->dbdma = dbdma;
639 DBDMA_register_channel(dbdma, channel, s->dma_irq,
640 pmac_ide_transfer, pmac_ide_flush, s);
641}
642
643type_init(macio_ide_register_types)
644