diff options
author | Alexander Graf <agraf@suse.de> | 2014-05-26 10:27:58 +0200 |
---|---|---|
committer | Alexander Graf <agraf@suse.de> | 2014-06-16 13:24:38 +0200 |
commit | 3e300fa6ad4ee19b16339c25773dec8df0bfb982 (patch) | |
tree | 45fb2105bd9e7112fa0fc51428d44da6a37564a7 /hw | |
parent | 6ab39b1bd3474aab57e10cc90377b9a3b94a72d4 (diff) | |
download | qemu-3e300fa6ad4ee19b16339c25773dec8df0bfb982.zip |
macio ide: Do remainder access asynchronously
The macio IDE controller has some pretty nasty magic in its implementation to
allow for unaligned sector accesses. We used to handle these accesses
synchronously inside the IO callback handler.
However, the block infrastructure changed below our feet and now it's impossible
to call a synchronous block read/write from the aio callback handler of a
previous block access.
Work around that limitation by making the unaligned handling bits also go
through our asynchronous handler.
This fixes booting Mac OS X for me.
Reported-by: John Arbuckle <programmingkidx@gmail.com>
Signed-off-by: Alexander Graf <agraf@suse.de>
Diffstat (limited to 'hw')
-rw-r--r-- | hw/ide/macio.c | 50 | ||||
-rw-r--r-- | hw/misc/macio/mac_dbdma.c | 6 |
2 files changed, 46 insertions, 10 deletions
diff --git a/hw/ide/macio.c b/hw/ide/macio.c index af57168db3..c14a1ddddb 100644 --- a/hw/ide/macio.c +++ b/hw/ide/macio.c @@ -193,6 +193,11 @@ static void pmac_ide_transfer_cb(void *opaque, int ret) goto done; } + if (--io->requests) { + /* More requests still in flight */ + return; + } + if (!m->dma_active) { MACIO_DPRINTF("waiting for data (%#x - %#x - %x)\n", s->nsector, io->len, s->status); @@ -212,6 +217,13 @@ static void pmac_ide_transfer_cb(void *opaque, int ret) s->nsector -= n; } + if (io->finish_remain_read) { + /* Finish a stale read from the last iteration */ + io->finish_remain_read = false; + cpu_physical_memory_write(io->finish_addr, io->remainder, + io->finish_len); + } + MACIO_DPRINTF("remainder: %d io->len: %d nsector: %d " "sector_num: %" PRId64 "\n", io->remainder_len, io->len, s->nsector, sector_num); @@ -229,7 +241,6 @@ static void pmac_ide_transfer_cb(void *opaque, int ret) break; case IDE_DMA_WRITE: cpu_physical_memory_read(io->addr, p, remainder_len); - bdrv_write(s->bs, sector_num - 1, io->remainder, 1); break; case IDE_DMA_TRIM: break; @@ -237,6 +248,15 @@ static void pmac_ide_transfer_cb(void *opaque, int ret) io->addr += remainder_len; io->len -= remainder_len; io->remainder_len -= remainder_len; + + if (s->dma_cmd == IDE_DMA_WRITE && !io->remainder_len) { + io->requests++; + qemu_iovec_reset(&io->iov); + qemu_iovec_add(&io->iov, io->remainder, 0x200); + + m->aiocb = bdrv_aio_writev(s->bs, sector_num - 1, &io->iov, 1, + pmac_ide_transfer_cb, io); + } } if (s->nsector == 0 && !io->remainder_len) { @@ -267,20 +287,25 @@ static void pmac_ide_transfer_cb(void *opaque, int ret) switch (s->dma_cmd) { case IDE_DMA_READ: - bdrv_read(s->bs, sector_num + nsector, io->remainder, 1); - cpu_physical_memory_write(io->addr + io->len - unaligned, - io->remainder, unaligned); + io->requests++; + io->finish_addr = io->addr + io->len - unaligned; + io->finish_len = unaligned; + io->finish_remain_read = true; + qemu_iovec_reset(&io->iov); + qemu_iovec_add(&io->iov, io->remainder, 0x200); + + m->aiocb = bdrv_aio_readv(s->bs, sector_num + nsector, &io->iov, 1, + pmac_ide_transfer_cb, io); break; case IDE_DMA_WRITE: /* cache the contents in our io struct */ cpu_physical_memory_read(io->addr + io->len - unaligned, - io->remainder, unaligned); + io->remainder + io->remainder_len, + unaligned); break; case IDE_DMA_TRIM: break; } - - io->len -= unaligned; } MACIO_DPRINTF("io->len = %#x\n", io->len); @@ -292,10 +317,12 @@ static void pmac_ide_transfer_cb(void *opaque, int ret) io->remainder_len = (0x200 - unaligned) & 0x1ff; MACIO_DPRINTF("set remainder to: %d\n", io->remainder_len); - /* We would read no data from the block layer, thus not get a callback. - Just fake completion manually. */ + /* Only subsector reads happening */ if (!io->len) { - pmac_ide_transfer_cb(opaque, 0); + if (!io->requests) { + io->requests++; + pmac_ide_transfer_cb(opaque, ret); + } return; } @@ -319,6 +346,8 @@ static void pmac_ide_transfer_cb(void *opaque, int ret) DMA_DIRECTION_TO_DEVICE); break; } + + io->requests++; return; done: @@ -374,6 +403,7 @@ static void pmac_ide_transfer(DBDMA_io *io) break; } + io->requests++; pmac_ide_transfer_cb(io, 0); } diff --git a/hw/misc/macio/mac_dbdma.c b/hw/misc/macio/mac_dbdma.c index 3335476c29..b25e8511b2 100644 --- a/hw/misc/macio/mac_dbdma.c +++ b/hw/misc/macio/mac_dbdma.c @@ -748,9 +748,15 @@ static void dbdma_reset(void *opaque) void* DBDMA_init (MemoryRegion **dbdma_mem) { DBDMAState *s; + int i; s = g_malloc0(sizeof(DBDMAState)); + for (i = 0; i < DBDMA_CHANNELS; i++) { + DBDMA_io *io = &s->channels[i].io; + qemu_iovec_init(&io->iov, 1); + } + memory_region_init_io(&s->mem, NULL, &dbdma_ops, s, "dbdma", 0x1000); *dbdma_mem = &s->mem; vmstate_register(NULL, -1, &vmstate_dbdma, s); |