summaryrefslogtreecommitdiff
path: root/hw/misc/macio
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2013-03-01 13:59:19 +0100
committerPaolo Bonzini <pbonzini@redhat.com>2013-04-08 18:13:12 +0200
commit49ab747f668f421138d5b40d83fa279c4c5e278d (patch)
tree943225a04eac885aed038731adf058f2250a2f40 /hw/misc/macio
parentce3b494cb504f96992f2d37ebc8f56deed202b06 (diff)
downloadqemu-49ab747f668f421138d5b40d83fa279c4c5e278d.zip
hw: move target-independent files to subdirectories
This patch tackles all files that are compiled once, moving them to subdirectories of hw/. Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'hw/misc/macio')
-rw-r--r--hw/misc/macio/Makefile.objs3
-rw-r--r--hw/misc/macio/cuda.c740
-rw-r--r--hw/misc/macio/mac_dbdma.c859
-rw-r--r--hw/misc/macio/macio.c305
4 files changed, 1907 insertions, 0 deletions
diff --git a/hw/misc/macio/Makefile.objs b/hw/misc/macio/Makefile.objs
new file mode 100644
index 0000000000..ef7ac249ec
--- /dev/null
+++ b/hw/misc/macio/Makefile.objs
@@ -0,0 +1,3 @@
+common-obj-y += macio.o
+common-obj-$(CONFIG_CUDA) += cuda.o
+common-obj-$(CONFIG_MAC_DBDMA) += mac_dbdma.o
diff --git a/hw/misc/macio/cuda.c b/hw/misc/macio/cuda.c
new file mode 100644
index 0000000000..f797796a36
--- /dev/null
+++ b/hw/misc/macio/cuda.c
@@ -0,0 +1,740 @@
+/*
+ * QEMU PowerMac CUDA device support
+ *
+ * Copyright (c) 2004-2007 Fabrice Bellard
+ * Copyright (c) 2007 Jocelyn Mayer
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#include "hw/hw.h"
+#include "hw/ppc/mac.h"
+#include "hw/input/adb.h"
+#include "qemu/timer.h"
+#include "sysemu/sysemu.h"
+
+/* XXX: implement all timer modes */
+
+/* debug CUDA */
+//#define DEBUG_CUDA
+
+/* debug CUDA packets */
+//#define DEBUG_CUDA_PACKET
+
+#ifdef DEBUG_CUDA
+#define CUDA_DPRINTF(fmt, ...) \
+ do { printf("CUDA: " fmt , ## __VA_ARGS__); } while (0)
+#else
+#define CUDA_DPRINTF(fmt, ...)
+#endif
+
+/* Bits in B data register: all active low */
+#define TREQ 0x08 /* Transfer request (input) */
+#define TACK 0x10 /* Transfer acknowledge (output) */
+#define TIP 0x20 /* Transfer in progress (output) */
+
+/* Bits in ACR */
+#define SR_CTRL 0x1c /* Shift register control bits */
+#define SR_EXT 0x0c /* Shift on external clock */
+#define SR_OUT 0x10 /* Shift out if 1 */
+
+/* Bits in IFR and IER */
+#define IER_SET 0x80 /* set bits in IER */
+#define IER_CLR 0 /* clear bits in IER */
+#define SR_INT 0x04 /* Shift register full/empty */
+#define T1_INT 0x40 /* Timer 1 interrupt */
+#define T2_INT 0x20 /* Timer 2 interrupt */
+
+/* Bits in ACR */
+#define T1MODE 0xc0 /* Timer 1 mode */
+#define T1MODE_CONT 0x40 /* continuous interrupts */
+
+/* commands (1st byte) */
+#define ADB_PACKET 0
+#define CUDA_PACKET 1
+#define ERROR_PACKET 2
+#define TIMER_PACKET 3
+#define POWER_PACKET 4
+#define MACIIC_PACKET 5
+#define PMU_PACKET 6
+
+
+/* CUDA commands (2nd byte) */
+#define CUDA_WARM_START 0x0
+#define CUDA_AUTOPOLL 0x1
+#define CUDA_GET_6805_ADDR 0x2
+#define CUDA_GET_TIME 0x3
+#define CUDA_GET_PRAM 0x7
+#define CUDA_SET_6805_ADDR 0x8
+#define CUDA_SET_TIME 0x9
+#define CUDA_POWERDOWN 0xa
+#define CUDA_POWERUP_TIME 0xb
+#define CUDA_SET_PRAM 0xc
+#define CUDA_MS_RESET 0xd
+#define CUDA_SEND_DFAC 0xe
+#define CUDA_BATTERY_SWAP_SENSE 0x10
+#define CUDA_RESET_SYSTEM 0x11
+#define CUDA_SET_IPL 0x12
+#define CUDA_FILE_SERVER_FLAG 0x13
+#define CUDA_SET_AUTO_RATE 0x14
+#define CUDA_GET_AUTO_RATE 0x16
+#define CUDA_SET_DEVICE_LIST 0x19
+#define CUDA_GET_DEVICE_LIST 0x1a
+#define CUDA_SET_ONE_SECOND_MODE 0x1b
+#define CUDA_SET_POWER_MESSAGES 0x21
+#define CUDA_GET_SET_IIC 0x22
+#define CUDA_WAKEUP 0x23
+#define CUDA_TIMER_TICKLE 0x24
+#define CUDA_COMBINED_FORMAT_IIC 0x25
+
+#define CUDA_TIMER_FREQ (4700000 / 6)
+#define CUDA_ADB_POLL_FREQ 50
+
+/* CUDA returns time_t's offset from Jan 1, 1904, not 1970 */
+#define RTC_OFFSET 2082844800
+
+static void cuda_update(CUDAState *s);
+static void cuda_receive_packet_from_host(CUDAState *s,
+ const uint8_t *data, int len);
+static void cuda_timer_update(CUDAState *s, CUDATimer *ti,
+ int64_t current_time);
+
+static void cuda_update_irq(CUDAState *s)
+{
+ if (s->ifr & s->ier & (SR_INT | T1_INT)) {
+ qemu_irq_raise(s->irq);
+ } else {
+ qemu_irq_lower(s->irq);
+ }
+}
+
+static unsigned int get_counter(CUDATimer *s)
+{
+ int64_t d;
+ unsigned int counter;
+
+ d = muldiv64(qemu_get_clock_ns(vm_clock) - s->load_time,
+ CUDA_TIMER_FREQ, get_ticks_per_sec());
+ if (s->index == 0) {
+ /* the timer goes down from latch to -1 (period of latch + 2) */
+ if (d <= (s->counter_value + 1)) {
+ counter = (s->counter_value - d) & 0xffff;
+ } else {
+ counter = (d - (s->counter_value + 1)) % (s->latch + 2);
+ counter = (s->latch - counter) & 0xffff;
+ }
+ } else {
+ counter = (s->counter_value - d) & 0xffff;
+ }
+ return counter;
+}
+
+static void set_counter(CUDAState *s, CUDATimer *ti, unsigned int val)
+{
+ CUDA_DPRINTF("T%d.counter=%d\n", 1 + (ti->timer == NULL), val);
+ ti->load_time = qemu_get_clock_ns(vm_clock);
+ ti->counter_value = val;
+ cuda_timer_update(s, ti, ti->load_time);
+}
+
+static int64_t get_next_irq_time(CUDATimer *s, int64_t current_time)
+{
+ int64_t d, next_time;
+ unsigned int counter;
+
+ /* current counter value */
+ d = muldiv64(current_time - s->load_time,
+ CUDA_TIMER_FREQ, get_ticks_per_sec());
+ /* the timer goes down from latch to -1 (period of latch + 2) */
+ if (d <= (s->counter_value + 1)) {
+ counter = (s->counter_value - d) & 0xffff;
+ } else {
+ counter = (d - (s->counter_value + 1)) % (s->latch + 2);
+ counter = (s->latch - counter) & 0xffff;
+ }
+
+ /* Note: we consider the irq is raised on 0 */
+ if (counter == 0xffff) {
+ next_time = d + s->latch + 1;
+ } else if (counter == 0) {
+ next_time = d + s->latch + 2;
+ } else {
+ next_time = d + counter;
+ }
+ CUDA_DPRINTF("latch=%d counter=%" PRId64 " delta_next=%" PRId64 "\n",
+ s->latch, d, next_time - d);
+ next_time = muldiv64(next_time, get_ticks_per_sec(), CUDA_TIMER_FREQ) +
+ s->load_time;
+ if (next_time <= current_time)
+ next_time = current_time + 1;
+ return next_time;
+}
+
+static void cuda_timer_update(CUDAState *s, CUDATimer *ti,
+ int64_t current_time)
+{
+ if (!ti->timer)
+ return;
+ if ((s->acr & T1MODE) != T1MODE_CONT) {
+ qemu_del_timer(ti->timer);
+ } else {
+ ti->next_irq_time = get_next_irq_time(ti, current_time);
+ qemu_mod_timer(ti->timer, ti->next_irq_time);
+ }
+}
+
+static void cuda_timer1(void *opaque)
+{
+ CUDAState *s = opaque;
+ CUDATimer *ti = &s->timers[0];
+
+ cuda_timer_update(s, ti, ti->next_irq_time);
+ s->ifr |= T1_INT;
+ cuda_update_irq(s);
+}
+
+static uint32_t cuda_readb(void *opaque, hwaddr addr)
+{
+ CUDAState *s = opaque;
+ uint32_t val;
+
+ addr = (addr >> 9) & 0xf;
+ switch(addr) {
+ case 0:
+ val = s->b;
+ break;
+ case 1:
+ val = s->a;
+ break;
+ case 2:
+ val = s->dirb;
+ break;
+ case 3:
+ val = s->dira;
+ break;
+ case 4:
+ val = get_counter(&s->timers[0]) & 0xff;
+ s->ifr &= ~T1_INT;
+ cuda_update_irq(s);
+ break;
+ case 5:
+ val = get_counter(&s->timers[0]) >> 8;
+ cuda_update_irq(s);
+ break;
+ case 6:
+ val = s->timers[0].latch & 0xff;
+ break;
+ case 7:
+ /* XXX: check this */
+ val = (s->timers[0].latch >> 8) & 0xff;
+ break;
+ case 8:
+ val = get_counter(&s->timers[1]) & 0xff;
+ s->ifr &= ~T2_INT;
+ break;
+ case 9:
+ val = get_counter(&s->timers[1]) >> 8;
+ break;
+ case 10:
+ val = s->sr;
+ s->ifr &= ~SR_INT;
+ cuda_update_irq(s);
+ break;
+ case 11:
+ val = s->acr;
+ break;
+ case 12:
+ val = s->pcr;
+ break;
+ case 13:
+ val = s->ifr;
+ if (s->ifr & s->ier)
+ val |= 0x80;
+ break;
+ case 14:
+ val = s->ier | 0x80;
+ break;
+ default:
+ case 15:
+ val = s->anh;
+ break;
+ }
+ if (addr != 13 || val != 0) {
+ CUDA_DPRINTF("read: reg=0x%x val=%02x\n", (int)addr, val);
+ }
+
+ return val;
+}
+
+static void cuda_writeb(void *opaque, hwaddr addr, uint32_t val)
+{
+ CUDAState *s = opaque;
+
+ addr = (addr >> 9) & 0xf;
+ CUDA_DPRINTF("write: reg=0x%x val=%02x\n", (int)addr, val);
+
+ switch(addr) {
+ case 0:
+ s->b = val;
+ cuda_update(s);
+ break;
+ case 1:
+ s->a = val;
+ break;
+ case 2:
+ s->dirb = val;
+ break;
+ case 3:
+ s->dira = val;
+ break;
+ case 4:
+ s->timers[0].latch = (s->timers[0].latch & 0xff00) | val;
+ cuda_timer_update(s, &s->timers[0], qemu_get_clock_ns(vm_clock));
+ break;
+ case 5:
+ s->timers[0].latch = (s->timers[0].latch & 0xff) | (val << 8);
+ s->ifr &= ~T1_INT;
+ set_counter(s, &s->timers[0], s->timers[0].latch);
+ break;
+ case 6:
+ s->timers[0].latch = (s->timers[0].latch & 0xff00) | val;
+ cuda_timer_update(s, &s->timers[0], qemu_get_clock_ns(vm_clock));
+ break;
+ case 7:
+ s->timers[0].latch = (s->timers[0].latch & 0xff) | (val << 8);
+ s->ifr &= ~T1_INT;
+ cuda_timer_update(s, &s->timers[0], qemu_get_clock_ns(vm_clock));
+ break;
+ case 8:
+ s->timers[1].latch = val;
+ set_counter(s, &s->timers[1], val);
+ break;
+ case 9:
+ set_counter(s, &s->timers[1], (val << 8) | s->timers[1].latch);
+ break;
+ case 10:
+ s->sr = val;
+ break;
+ case 11:
+ s->acr = val;
+ cuda_timer_update(s, &s->timers[0], qemu_get_clock_ns(vm_clock));
+ cuda_update(s);
+ break;
+ case 12:
+ s->pcr = val;
+ break;
+ case 13:
+ /* reset bits */
+ s->ifr &= ~val;
+ cuda_update_irq(s);
+ break;
+ case 14:
+ if (val & IER_SET) {
+ /* set bits */
+ s->ier |= val & 0x7f;
+ } else {
+ /* reset bits */
+ s->ier &= ~val;
+ }
+ cuda_update_irq(s);
+ break;
+ default:
+ case 15:
+ s->anh = val;
+ break;
+ }
+}
+
+/* NOTE: TIP and TREQ are negated */
+static void cuda_update(CUDAState *s)
+{
+ int packet_received, len;
+
+ packet_received = 0;
+ if (!(s->b & TIP)) {
+ /* transfer requested from host */
+
+ if (s->acr & SR_OUT) {
+ /* data output */
+ if ((s->b & (TACK | TIP)) != (s->last_b & (TACK | TIP))) {
+ if (s->data_out_index < sizeof(s->data_out)) {
+ CUDA_DPRINTF("send: %02x\n", s->sr);
+ s->data_out[s->data_out_index++] = s->sr;
+ s->ifr |= SR_INT;
+ cuda_update_irq(s);
+ }
+ }
+ } else {
+ if (s->data_in_index < s->data_in_size) {
+ /* data input */
+ if ((s->b & (TACK | TIP)) != (s->last_b & (TACK | TIP))) {
+ s->sr = s->data_in[s->data_in_index++];
+ CUDA_DPRINTF("recv: %02x\n", s->sr);
+ /* indicate end of transfer */
+ if (s->data_in_index >= s->data_in_size) {
+ s->b = (s->b | TREQ);
+ }
+ s->ifr |= SR_INT;
+ cuda_update_irq(s);
+ }
+ }
+ }
+ } else {
+ /* no transfer requested: handle sync case */
+ if ((s->last_b & TIP) && (s->b & TACK) != (s->last_b & TACK)) {
+ /* update TREQ state each time TACK change state */
+ if (s->b & TACK)
+ s->b = (s->b | TREQ);
+ else
+ s->b = (s->b & ~TREQ);
+ s->ifr |= SR_INT;
+ cuda_update_irq(s);
+ } else {
+ if (!(s->last_b & TIP)) {
+ /* handle end of host to cuda transfer */
+ packet_received = (s->data_out_index > 0);
+ /* always an IRQ at the end of transfer */
+ s->ifr |= SR_INT;
+ cuda_update_irq(s);
+ }
+ /* signal if there is data to read */
+ if (s->data_in_index < s->data_in_size) {
+ s->b = (s->b & ~TREQ);
+ }
+ }
+ }
+
+ s->last_acr = s->acr;
+ s->last_b = s->b;
+
+ /* NOTE: cuda_receive_packet_from_host() can call cuda_update()
+ recursively */
+ if (packet_received) {
+ len = s->data_out_index;
+ s->data_out_index = 0;
+ cuda_receive_packet_from_host(s, s->data_out, len);
+ }
+}
+
+static void cuda_send_packet_to_host(CUDAState *s,
+ const uint8_t *data, int len)
+{
+#ifdef DEBUG_CUDA_PACKET
+ {
+ int i;
+ printf("cuda_send_packet_to_host:\n");
+ for(i = 0; i < len; i++)
+ printf(" %02x", data[i]);
+ printf("\n");
+ }
+#endif
+ memcpy(s->data_in, data, len);
+ s->data_in_size = len;
+ s->data_in_index = 0;
+ cuda_update(s);
+ s->ifr |= SR_INT;
+ cuda_update_irq(s);
+}
+
+static void cuda_adb_poll(void *opaque)
+{
+ CUDAState *s = opaque;
+ uint8_t obuf[ADB_MAX_OUT_LEN + 2];
+ int olen;
+
+ olen = adb_poll(&s->adb_bus, obuf + 2);
+ if (olen > 0) {
+ obuf[0] = ADB_PACKET;
+ obuf[1] = 0x40; /* polled data */
+ cuda_send_packet_to_host(s, obuf, olen + 2);
+ }
+ qemu_mod_timer(s->adb_poll_timer,
+ qemu_get_clock_ns(vm_clock) +
+ (get_ticks_per_sec() / CUDA_ADB_POLL_FREQ));
+}
+
+static void cuda_receive_packet(CUDAState *s,
+ const uint8_t *data, int len)
+{
+ uint8_t obuf[16];
+ int autopoll;
+ uint32_t ti;
+
+ switch(data[0]) {
+ case CUDA_AUTOPOLL:
+ autopoll = (data[1] != 0);
+ if (autopoll != s->autopoll) {
+ s->autopoll = autopoll;
+ if (autopoll) {
+ qemu_mod_timer(s->adb_poll_timer,
+ qemu_get_clock_ns(vm_clock) +
+ (get_ticks_per_sec() / CUDA_ADB_POLL_FREQ));
+ } else {
+ qemu_del_timer(s->adb_poll_timer);
+ }
+ }
+ obuf[0] = CUDA_PACKET;
+ obuf[1] = data[1];
+ cuda_send_packet_to_host(s, obuf, 2);
+ break;
+ case CUDA_SET_TIME:
+ ti = (((uint32_t)data[1]) << 24) + (((uint32_t)data[2]) << 16) + (((uint32_t)data[3]) << 8) + data[4];
+ s->tick_offset = ti - (qemu_get_clock_ns(vm_clock) / get_ticks_per_sec());
+ obuf[0] = CUDA_PACKET;
+ obuf[1] = 0;
+ obuf[2] = 0;
+ cuda_send_packet_to_host(s, obuf, 3);
+ break;
+ case CUDA_GET_TIME:
+ ti = s->tick_offset + (qemu_get_clock_ns(vm_clock) / get_ticks_per_sec());
+ obuf[0] = CUDA_PACKET;
+ obuf[1] = 0;
+ obuf[2] = 0;
+ obuf[3] = ti >> 24;
+ obuf[4] = ti >> 16;
+ obuf[5] = ti >> 8;
+ obuf[6] = ti;
+ cuda_send_packet_to_host(s, obuf, 7);
+ break;
+ case CUDA_FILE_SERVER_FLAG:
+ case CUDA_SET_DEVICE_LIST:
+ case CUDA_SET_AUTO_RATE:
+ case CUDA_SET_POWER_MESSAGES:
+ obuf[0] = CUDA_PACKET;
+ obuf[1] = 0;
+ cuda_send_packet_to_host(s, obuf, 2);
+ break;
+ case CUDA_POWERDOWN:
+ obuf[0] = CUDA_PACKET;
+ obuf[1] = 0;
+ cuda_send_packet_to_host(s, obuf, 2);
+ qemu_system_shutdown_request();
+ break;
+ case CUDA_RESET_SYSTEM:
+ obuf[0] = CUDA_PACKET;
+ obuf[1] = 0;
+ cuda_send_packet_to_host(s, obuf, 2);
+ qemu_system_reset_request();
+ break;
+ default:
+ break;
+ }
+}
+
+static void cuda_receive_packet_from_host(CUDAState *s,
+ const uint8_t *data, int len)
+{
+#ifdef DEBUG_CUDA_PACKET
+ {
+ int i;
+ printf("cuda_receive_packet_from_host:\n");
+ for(i = 0; i < len; i++)
+ printf(" %02x", data[i]);
+ printf("\n");
+ }
+#endif
+ switch(data[0]) {
+ case ADB_PACKET:
+ {
+ uint8_t obuf[ADB_MAX_OUT_LEN + 2];
+ int olen;
+ olen = adb_request(&s->adb_bus, obuf + 2, data + 1, len - 1);
+ if (olen > 0) {
+ obuf[0] = ADB_PACKET;
+ obuf[1] = 0x00;
+ } else {
+ /* error */
+ obuf[0] = ADB_PACKET;
+ obuf[1] = -olen;
+ olen = 0;
+ }
+ cuda_send_packet_to_host(s, obuf, olen + 2);
+ }
+ break;
+ case CUDA_PACKET:
+ cuda_receive_packet(s, data + 1, len - 1);
+ break;
+ }
+}
+
+static void cuda_writew (void *opaque, hwaddr addr, uint32_t value)
+{
+}
+
+static void cuda_writel (void *opaque, hwaddr addr, uint32_t value)
+{
+}
+
+static uint32_t cuda_readw (void *opaque, hwaddr addr)
+{
+ return 0;
+}
+
+static uint32_t cuda_readl (void *opaque, hwaddr addr)
+{
+ return 0;
+}
+
+static const MemoryRegionOps cuda_ops = {
+ .old_mmio = {
+ .write = {
+ cuda_writeb,
+ cuda_writew,
+ cuda_writel,
+ },
+ .read = {
+ cuda_readb,
+ cuda_readw,
+ cuda_readl,
+ },
+ },
+ .endianness = DEVICE_NATIVE_ENDIAN,
+};
+
+static bool cuda_timer_exist(void *opaque, int version_id)
+{
+ CUDATimer *s = opaque;
+
+ return s->timer != NULL;
+}
+
+static const VMStateDescription vmstate_cuda_timer = {
+ .name = "cuda_timer",
+ .version_id = 0,
+ .minimum_version_id = 0,
+ .minimum_version_id_old = 0,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT16(latch, CUDATimer),
+ VMSTATE_UINT16(counter_value, CUDATimer),
+ VMSTATE_INT64(load_time, CUDATimer),
+ VMSTATE_INT64(next_irq_time, CUDATimer),
+ VMSTATE_TIMER_TEST(timer, CUDATimer, cuda_timer_exist),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static const VMStateDescription vmstate_cuda = {
+ .name = "cuda",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .minimum_version_id_old = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT8(a, CUDAState),
+ VMSTATE_UINT8(b, CUDAState),
+ VMSTATE_UINT8(dira, CUDAState),
+ VMSTATE_UINT8(dirb, CUDAState),
+ VMSTATE_UINT8(sr, CUDAState),
+ VMSTATE_UINT8(acr, CUDAState),
+ VMSTATE_UINT8(pcr, CUDAState),
+ VMSTATE_UINT8(ifr, CUDAState),
+ VMSTATE_UINT8(ier, CUDAState),
+ VMSTATE_UINT8(anh, CUDAState),
+ VMSTATE_INT32(data_in_size, CUDAState),
+ VMSTATE_INT32(data_in_index, CUDAState),
+ VMSTATE_INT32(data_out_index, CUDAState),
+ VMSTATE_UINT8(autopoll, CUDAState),
+ VMSTATE_BUFFER(data_in, CUDAState),
+ VMSTATE_BUFFER(data_out, CUDAState),
+ VMSTATE_UINT32(tick_offset, CUDAState),
+ VMSTATE_STRUCT_ARRAY(timers, CUDAState, 2, 1,
+ vmstate_cuda_timer, CUDATimer),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static void cuda_reset(DeviceState *dev)
+{
+ CUDAState *s = CUDA(dev);
+
+ s->b = 0;
+ s->a = 0;
+ s->dirb = 0;
+ s->dira = 0;
+ s->sr = 0;
+ s->acr = 0;
+ s->pcr = 0;
+ s->ifr = 0;
+ s->ier = 0;
+ // s->ier = T1_INT | SR_INT;
+ s->anh = 0;
+ s->data_in_size = 0;
+ s->data_in_index = 0;
+ s->data_out_index = 0;
+ s->autopoll = 0;
+
+ s->timers[0].latch = 0xffff;
+ set_counter(s, &s->timers[0], 0xffff);
+
+ s->timers[1].latch = 0;
+ set_counter(s, &s->timers[1], 0xffff);
+}
+
+static void cuda_realizefn(DeviceState *dev, Error **errp)
+{
+ CUDAState *s = CUDA(dev);
+ struct tm tm;
+
+ s->timers[0].timer = qemu_new_timer_ns(vm_clock, cuda_timer1, s);
+
+ qemu_get_timedate(&tm, 0);
+ s->tick_offset = (uint32_t)mktimegm(&tm) + RTC_OFFSET;
+
+ s->adb_poll_timer = qemu_new_timer_ns(vm_clock, cuda_adb_poll, s);
+}
+
+static void cuda_initfn(Object *obj)
+{
+ SysBusDevice *d = SYS_BUS_DEVICE(obj);
+ CUDAState *s = CUDA(obj);
+ int i;
+
+ memory_region_init_io(&s->mem, &cuda_ops, s, "cuda", 0x2000);
+ sysbus_init_mmio(d, &s->mem);
+ sysbus_init_irq(d, &s->irq);
+
+ for (i = 0; i < ARRAY_SIZE(s->timers); i++) {
+ s->timers[i].index = i;
+ }
+
+ qbus_create_inplace((BusState *)&s->adb_bus, TYPE_ADB_BUS, DEVICE(obj),
+ "adb.0");
+}
+
+static void cuda_class_init(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+
+ dc->realize = cuda_realizefn;
+ dc->reset = cuda_reset;
+ dc->vmsd = &vmstate_cuda;
+}
+
+static const TypeInfo cuda_type_info = {
+ .name = TYPE_CUDA,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(CUDAState),
+ .instance_init = cuda_initfn,
+ .class_init = cuda_class_init,
+};
+
+static void cuda_register_types(void)
+{
+ type_register_static(&cuda_type_info);
+}
+
+type_init(cuda_register_types)
diff --git a/hw/misc/macio/mac_dbdma.c b/hw/misc/macio/mac_dbdma.c
new file mode 100644
index 0000000000..a2363bbdf2
--- /dev/null
+++ b/hw/misc/macio/mac_dbdma.c
@@ -0,0 +1,859 @@
+/*
+ * PowerMac descriptor-based DMA emulation
+ *
+ * Copyright (c) 2005-2007 Fabrice Bellard
+ * Copyright (c) 2007 Jocelyn Mayer
+ * Copyright (c) 2009 Laurent Vivier
+ *
+ * some parts from linux-2.6.28, arch/powerpc/include/asm/dbdma.h
+ *
+ * Definitions for using the Apple Descriptor-Based DMA controller
+ * in Power Macintosh computers.
+ *
+ * Copyright (C) 1996 Paul Mackerras.
+ *
+ * some parts from mol 0.9.71
+ *
+ * Descriptor based DMA emulation
+ *
+ * Copyright (C) 1998-2004 Samuel Rydh (samuel@ibrium.se)
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#include "hw/hw.h"
+#include "hw/isa/isa.h"
+#include "hw/ppc/mac_dbdma.h"
+#include "qemu/main-loop.h"
+
+/* debug DBDMA */
+//#define DEBUG_DBDMA
+
+#ifdef DEBUG_DBDMA
+#define DBDMA_DPRINTF(fmt, ...) \
+ do { printf("DBDMA: " fmt , ## __VA_ARGS__); } while (0)
+#else
+#define DBDMA_DPRINTF(fmt, ...)
+#endif
+
+/*
+ */
+
+/*
+ * DBDMA control/status registers. All little-endian.
+ */
+
+#define DBDMA_CONTROL 0x00
+#define DBDMA_STATUS 0x01
+#define DBDMA_CMDPTR_HI 0x02
+#define DBDMA_CMDPTR_LO 0x03
+#define DBDMA_INTR_SEL 0x04
+#define DBDMA_BRANCH_SEL 0x05
+#define DBDMA_WAIT_SEL 0x06
+#define DBDMA_XFER_MODE 0x07
+#define DBDMA_DATA2PTR_HI 0x08
+#define DBDMA_DATA2PTR_LO 0x09
+#define DBDMA_RES1 0x0A
+#define DBDMA_ADDRESS_HI 0x0B
+#define DBDMA_BRANCH_ADDR_HI 0x0C
+#define DBDMA_RES2 0x0D
+#define DBDMA_RES3 0x0E
+#define DBDMA_RES4 0x0F
+
+#define DBDMA_REGS 16
+#define DBDMA_SIZE (DBDMA_REGS * sizeof(uint32_t))
+
+#define DBDMA_CHANNEL_SHIFT 7
+#define DBDMA_CHANNEL_SIZE (1 << DBDMA_CHANNEL_SHIFT)
+
+#define DBDMA_CHANNELS (0x1000 >> DBDMA_CHANNEL_SHIFT)
+
+/* Bits in control and status registers */
+
+#define RUN 0x8000
+#define PAUSE 0x4000
+#define FLUSH 0x2000
+#define WAKE 0x1000
+#define DEAD 0x0800
+#define ACTIVE 0x0400
+#define BT 0x0100
+#define DEVSTAT 0x00ff
+
+/*
+ * DBDMA command structure. These fields are all little-endian!
+ */
+
+typedef struct dbdma_cmd {
+ uint16_t req_count; /* requested byte transfer count */
+ uint16_t command; /* command word (has bit-fields) */
+ uint32_t phy_addr; /* physical data address */
+ uint32_t cmd_dep; /* command-dependent field */
+ uint16_t res_count; /* residual count after completion */
+ uint16_t xfer_status; /* transfer status */
+} dbdma_cmd;
+
+/* DBDMA command values in command field */
+
+#define COMMAND_MASK 0xf000
+#define OUTPUT_MORE 0x0000 /* transfer memory data to stream */
+#define OUTPUT_LAST 0x1000 /* ditto followed by end marker */
+#define INPUT_MORE 0x2000 /* transfer stream data to memory */
+#define INPUT_LAST 0x3000 /* ditto, expect end marker */
+#define STORE_WORD 0x4000 /* write word (4 bytes) to device reg */
+#define LOAD_WORD 0x5000 /* read word (4 bytes) from device reg */
+#define DBDMA_NOP 0x6000 /* do nothing */
+#define DBDMA_STOP 0x7000 /* suspend processing */
+
+/* Key values in command field */
+
+#define KEY_MASK 0x0700
+#define KEY_STREAM0 0x0000 /* usual data stream */
+#define KEY_STREAM1 0x0100 /* control/status stream */
+#define KEY_STREAM2 0x0200 /* device-dependent stream */
+#define KEY_STREAM3 0x0300 /* device-dependent stream */
+#define KEY_STREAM4 0x0400 /* reserved */
+#define KEY_REGS 0x0500 /* device register space */
+#define KEY_SYSTEM 0x0600 /* system memory-mapped space */
+#define KEY_DEVICE 0x0700 /* device memory-mapped space */
+
+/* Interrupt control values in command field */
+
+#define INTR_MASK 0x0030
+#define INTR_NEVER 0x0000 /* don't interrupt */
+#define INTR_IFSET 0x0010 /* intr if condition bit is 1 */
+#define INTR_IFCLR 0x0020 /* intr if condition bit is 0 */
+#define INTR_ALWAYS 0x0030 /* always interrupt */
+
+/* Branch control values in command field */
+
+#define BR_MASK 0x000c
+#define BR_NEVER 0x0000 /* don't branch */
+#define BR_IFSET 0x0004 /* branch if condition bit is 1 */
+#define BR_IFCLR 0x0008 /* branch if condition bit is 0 */
+#define BR_ALWAYS 0x000c /* always branch */
+
+/* Wait control values in command field */
+
+#define WAIT_MASK 0x0003
+#define WAIT_NEVER 0x0000 /* don't wait */
+#define WAIT_IFSET 0x0001 /* wait if condition bit is 1 */
+#define WAIT_IFCLR 0x0002 /* wait if condition bit is 0 */
+#define WAIT_ALWAYS 0x0003 /* always wait */
+
+typedef struct DBDMA_channel {
+ int channel;
+ uint32_t regs[DBDMA_REGS];
+ qemu_irq irq;
+ DBDMA_io io;
+ DBDMA_rw rw;
+ DBDMA_flush flush;
+ dbdma_cmd current;
+ int processing;
+} DBDMA_channel;
+
+typedef struct {
+ MemoryRegion mem;
+ DBDMA_channel channels[DBDMA_CHANNELS];
+} DBDMAState;
+
+#ifdef DEBUG_DBDMA
+static void dump_dbdma_cmd(dbdma_cmd *cmd)
+{
+ printf("dbdma_cmd %p\n", cmd);
+ printf(" req_count 0x%04x\n", le16_to_cpu(cmd->req_count));
+ printf(" command 0x%04x\n", le16_to_cpu(cmd->command));
+ printf(" phy_addr 0x%08x\n", le32_to_cpu(cmd->phy_addr));
+ printf(" cmd_dep 0x%08x\n", le32_to_cpu(cmd->cmd_dep));
+ printf(" res_count 0x%04x\n", le16_to_cpu(cmd->res_count));
+ printf(" xfer_status 0x%04x\n", le16_to_cpu(cmd->xfer_status));
+}
+#else
+static void dump_dbdma_cmd(dbdma_cmd *cmd)
+{
+}
+#endif
+static void dbdma_cmdptr_load(DBDMA_channel *ch)
+{
+ DBDMA_DPRINTF("dbdma_cmdptr_load 0x%08x\n",
+ ch->regs[DBDMA_CMDPTR_LO]);
+ cpu_physical_memory_read(ch->regs[DBDMA_CMDPTR_LO],
+ (uint8_t*)&ch->current, sizeof(dbdma_cmd));
+}
+
+static void dbdma_cmdptr_save(DBDMA_channel *ch)
+{
+ DBDMA_DPRINTF("dbdma_cmdptr_save 0x%08x\n",
+ ch->regs[DBDMA_CMDPTR_LO]);
+ DBDMA_DPRINTF("xfer_status 0x%08x res_count 0x%04x\n",
+ le16_to_cpu(ch->current.xfer_status),
+ le16_to_cpu(ch->current.res_count));
+ cpu_physical_memory_write(ch->regs[DBDMA_CMDPTR_LO],
+ (uint8_t*)&ch->current, sizeof(dbdma_cmd));
+}
+
+static void kill_channel(DBDMA_channel *ch)
+{
+ DBDMA_DPRINTF("kill_channel\n");
+
+ ch->regs[DBDMA_STATUS] |= DEAD;
+ ch->regs[DBDMA_STATUS] &= ~ACTIVE;
+
+ qemu_irq_raise(ch->irq);
+}
+
+static void conditional_interrupt(DBDMA_channel *ch)
+{
+ dbdma_cmd *current = &ch->current;
+ uint16_t intr;
+ uint16_t sel_mask, sel_value;
+ uint32_t status;
+ int cond;
+
+ DBDMA_DPRINTF("conditional_interrupt\n");
+
+ intr = le16_to_cpu(current->command) & INTR_MASK;
+
+ switch(intr) {
+ case INTR_NEVER: /* don't interrupt */
+ return;
+ case INTR_ALWAYS: /* always interrupt */
+ qemu_irq_raise(ch->irq);
+ return;
+ }
+
+ status = ch->regs[DBDMA_STATUS] & DEVSTAT;
+
+ sel_mask = (ch->regs[DBDMA_INTR_SEL] >> 16) & 0x0f;
+ sel_value = ch->regs[DBDMA_INTR_SEL] & 0x0f;
+
+ cond = (status & sel_mask) == (sel_value & sel_mask);
+
+ switch(intr) {
+ case INTR_IFSET: /* intr if condition bit is 1 */
+ if (cond)
+ qemu_irq_raise(ch->irq);
+ return;
+ case INTR_IFCLR: /* intr if condition bit is 0 */
+ if (!cond)
+ qemu_irq_raise(ch->irq);
+ return;
+ }
+}
+
+static int conditional_wait(DBDMA_channel *ch)
+{
+ dbdma_cmd *current = &ch->current;
+ uint16_t wait;
+ uint16_t sel_mask, sel_value;
+ uint32_t status;
+ int cond;
+
+ DBDMA_DPRINTF("conditional_wait\n");
+
+ wait = le16_to_cpu(current->command) & WAIT_MASK;
+
+ switch(wait) {
+ case WAIT_NEVER: /* don't wait */
+ return 0;
+ case WAIT_ALWAYS: /* always wait */
+ return 1;
+ }
+
+ status = ch->regs[DBDMA_STATUS] & DEVSTAT;
+
+ sel_mask = (ch->regs[DBDMA_WAIT_SEL] >> 16) & 0x0f;
+ sel_value = ch->regs[DBDMA_WAIT_SEL] & 0x0f;
+
+ cond = (status & sel_mask) == (sel_value & sel_mask);
+
+ switch(wait) {
+ case WAIT_IFSET: /* wait if condition bit is 1 */
+ if (cond)
+ return 1;
+ return 0;
+ case WAIT_IFCLR: /* wait if condition bit is 0 */
+ if (!cond)
+ return 1;
+ return 0;
+ }
+ return 0;
+}
+
+static void next(DBDMA_channel *ch)
+{
+ uint32_t cp;
+
+ ch->regs[DBDMA_STATUS] &= ~BT;
+
+ cp = ch->regs[DBDMA_CMDPTR_LO];
+ ch->regs[DBDMA_CMDPTR_LO] = cp + sizeof(dbdma_cmd);
+ dbdma_cmdptr_load(ch);
+}
+
+static void branch(DBDMA_channel *ch)
+{
+ dbdma_cmd *current = &ch->current;
+
+ ch->regs[DBDMA_CMDPTR_LO] = current->cmd_dep;
+ ch->regs[DBDMA_STATUS] |= BT;
+ dbdma_cmdptr_load(ch);
+}
+
+static void conditional_branch(DBDMA_channel *ch)
+{
+ dbdma_cmd *current = &ch->current;
+ uint16_t br;
+ uint16_t sel_mask, sel_value;
+ uint32_t status;
+ int cond;
+
+ DBDMA_DPRINTF("conditional_branch\n");
+
+ /* check if we must branch */
+
+ br = le16_to_cpu(current->command) & BR_MASK;
+
+ switch(br) {
+ case BR_NEVER: /* don't branch */
+ next(ch);
+ return;
+ case BR_ALWAYS: /* always branch */
+ branch(ch);
+ return;
+ }
+
+ status = ch->regs[DBDMA_STATUS] & DEVSTAT;
+
+ sel_mask = (ch->regs[DBDMA_BRANCH_SEL] >> 16) & 0x0f;
+ sel_value = ch->regs[DBDMA_BRANCH_SEL] & 0x0f;
+
+ cond = (status & sel_mask) == (sel_value & sel_mask);
+
+ switch(br) {
+ case BR_IFSET: /* branch if condition bit is 1 */
+ if (cond)
+ branch(ch);
+ else
+ next(ch);
+ return;
+ case BR_IFCLR: /* branch if condition bit is 0 */
+ if (!cond)
+ branch(ch);
+ else
+ next(ch);
+ return;
+ }
+}
+
+static QEMUBH *dbdma_bh;
+static void channel_run(DBDMA_channel *ch);
+
+static void dbdma_end(DBDMA_io *io)
+{
+ DBDMA_channel *ch = io->channel;
+ dbdma_cmd *current = &ch->current;
+
+ if (conditional_wait(ch))
+ goto wait;
+
+ current->xfer_status = cpu_to_le16(ch->regs[DBDMA_STATUS]);
+ current->res_count = cpu_to_le16(io->len);
+ dbdma_cmdptr_save(ch);
+ if (io->is_last)
+ ch->regs[DBDMA_STATUS] &= ~FLUSH;
+
+ conditional_interrupt(ch);
+ conditional_branch(ch);
+
+wait:
+ ch->processing = 0;
+ if ((ch->regs[DBDMA_STATUS] & RUN) &&
+ (ch->regs[DBDMA_STATUS] & ACTIVE))
+ channel_run(ch);
+}
+
+static void start_output(DBDMA_channel *ch, int key, uint32_t addr,
+ uint16_t req_count, int is_last)
+{
+ DBDMA_DPRINTF("start_output\n");
+
+ /* KEY_REGS, KEY_DEVICE and KEY_STREAM
+ * are not implemented in the mac-io chip
+ */
+
+ DBDMA_DPRINTF("addr 0x%x key 0x%x\n", addr, key);
+ if (!addr || key > KEY_STREAM3) {
+ kill_channel(ch);
+ return;
+ }
+
+ ch->io.addr = addr;
+ ch->io.len = req_count;
+ ch->io.is_last = is_last;
+ ch->io.dma_end = dbdma_end;
+ ch->io.is_dma_out = 1;
+ ch->processing = 1;
+ if (ch->rw) {
+ ch->rw(&ch->io);
+ }
+}
+
+static void start_input(DBDMA_channel *ch, int key, uint32_t addr,
+ uint16_t req_count, int is_last)
+{
+ DBDMA_DPRINTF("start_input\n");
+
+ /* KEY_REGS, KEY_DEVICE and KEY_STREAM
+ * are not implemented in the mac-io chip
+ */
+
+ if (!addr || key > KEY_STREAM3) {
+ kill_channel(ch);
+ return;
+ }
+
+ ch->io.addr = addr;
+ ch->io.len = req_count;
+ ch->io.is_last = is_last;
+ ch->io.dma_end = dbdma_end;
+ ch->io.is_dma_out = 0;
+ ch->processing = 1;
+ if (ch->rw) {
+ ch->rw(&ch->io);
+ }
+}
+
+static void load_word(DBDMA_channel *ch, int key, uint32_t addr,
+ uint16_t len)
+{
+ dbdma_cmd *current = &ch->current;
+ uint32_t val;
+
+ DBDMA_DPRINTF("load_word\n");
+
+ /* only implements KEY_SYSTEM */
+
+ if (key != KEY_SYSTEM) {
+ printf("DBDMA: LOAD_WORD, unimplemented key %x\n", key);
+ kill_channel(ch);
+ return;
+ }
+
+ cpu_physical_memory_read(addr, (uint8_t*)&val, len);
+
+ if (len == 2)
+ val = (val << 16) | (current->cmd_dep & 0x0000ffff);
+ else if (len == 1)
+ val = (val << 24) | (current->cmd_dep & 0x00ffffff);
+
+ current->cmd_dep = val;
+
+ if (conditional_wait(ch))
+ goto wait;
+
+ current->xfer_status = cpu_to_le16(ch->regs[DBDMA_STATUS]);
+ dbdma_cmdptr_save(ch);
+ ch->regs[DBDMA_STATUS] &= ~FLUSH;
+
+ conditional_interrupt(ch);
+ next(ch);
+
+wait:
+ qemu_bh_schedule(dbdma_bh);
+}
+
+static void store_word(DBDMA_channel *ch, int key, uint32_t addr,
+ uint16_t len)
+{
+ dbdma_cmd *current = &ch->current;
+ uint32_t val;
+
+ DBDMA_DPRINTF("store_word\n");
+
+ /* only implements KEY_SYSTEM */
+
+ if (key != KEY_SYSTEM) {
+ printf("DBDMA: STORE_WORD, unimplemented key %x\n", key);
+ kill_channel(ch);
+ return;
+ }
+
+ val = current->cmd_dep;
+ if (len == 2)
+ val >>= 16;
+ else if (len == 1)
+ val >>= 24;
+
+ cpu_physical_memory_write(addr, (uint8_t*)&val, len);
+
+ if (conditional_wait(ch))
+ goto wait;
+
+ current->xfer_status = cpu_to_le16(ch->regs[DBDMA_STATUS]);
+ dbdma_cmdptr_save(ch);
+ ch->regs[DBDMA_STATUS] &= ~FLUSH;
+
+ conditional_interrupt(ch);
+ next(ch);
+
+wait:
+ qemu_bh_schedule(dbdma_bh);
+}
+
+static void nop(DBDMA_channel *ch)
+{
+ dbdma_cmd *current = &ch->current;
+
+ if (conditional_wait(ch))
+ goto wait;
+
+ current->xfer_status = cpu_to_le16(ch->regs[DBDMA_STATUS]);
+ dbdma_cmdptr_save(ch);
+
+ conditional_interrupt(ch);
+ conditional_branch(ch);
+
+wait:
+ qemu_bh_schedule(dbdma_bh);
+}
+
+static void stop(DBDMA_channel *ch)
+{
+ ch->regs[DBDMA_STATUS] &= ~(ACTIVE|DEAD|FLUSH);
+
+ /* the stop command does not increment command pointer */
+}
+
+static void channel_run(DBDMA_channel *ch)
+{
+ dbdma_cmd *current = &ch->current;
+ uint16_t cmd, key;
+ uint16_t req_count;
+ uint32_t phy_addr;
+
+ DBDMA_DPRINTF("channel_run\n");
+ dump_dbdma_cmd(current);
+
+ /* clear WAKE flag at command fetch */
+
+ ch->regs[DBDMA_STATUS] &= ~WAKE;
+
+ cmd = le16_to_cpu(current->command) & COMMAND_MASK;
+
+ switch (cmd) {
+ case DBDMA_NOP:
+ nop(ch);
+ return;
+
+ case DBDMA_STOP:
+ stop(ch);
+ return;
+ }
+
+ key = le16_to_cpu(current->command) & 0x0700;
+ req_count = le16_to_cpu(current->req_count);
+ phy_addr = le32_to_cpu(current->phy_addr);
+
+ if (key == KEY_STREAM4) {
+ printf("command %x, invalid key 4\n", cmd);
+ kill_channel(ch);
+ return;
+ }
+
+ switch (cmd) {
+ case OUTPUT_MORE:
+ start_output(ch, key, phy_addr, req_count, 0);
+ return;
+
+ case OUTPUT_LAST:
+ start_output(ch, key, phy_addr, req_count, 1);
+ return;
+
+ case INPUT_MORE:
+ start_input(ch, key, phy_addr, req_count, 0);
+ return;
+
+ case INPUT_LAST:
+ start_input(ch, key, phy_addr, req_count, 1);
+ return;
+ }
+
+ if (key < KEY_REGS) {
+ printf("command %x, invalid key %x\n", cmd, key);
+ key = KEY_SYSTEM;
+ }
+
+ /* for LOAD_WORD and STORE_WORD, req_count is on 3 bits
+ * and BRANCH is invalid
+ */
+
+ req_count = req_count & 0x0007;
+ if (req_count & 0x4) {
+ req_count = 4;
+ phy_addr &= ~3;
+ } else if (req_count & 0x2) {
+ req_count = 2;
+ phy_addr &= ~1;
+ } else
+ req_count = 1;
+
+ switch (cmd) {
+ case LOAD_WORD:
+ load_word(ch, key, phy_addr, req_count);
+ return;
+
+ case STORE_WORD:
+ store_word(ch, key, phy_addr, req_count);
+ return;
+ }
+}
+
+static void DBDMA_run(DBDMAState *s)
+{
+ int channel;
+
+ for (channel = 0; channel < DBDMA_CHANNELS; channel++) {
+ DBDMA_channel *ch = &s->channels[channel];
+ uint32_t status = ch->regs[DBDMA_STATUS];
+ if (!ch->processing && (status & RUN) && (status & ACTIVE)) {
+ channel_run(ch);
+ }
+ }
+}
+
+static void DBDMA_run_bh(void *opaque)
+{
+ DBDMAState *s = opaque;
+
+ DBDMA_DPRINTF("DBDMA_run_bh\n");
+
+ DBDMA_run(s);
+}
+
+void DBDMA_register_channel(void *dbdma, int nchan, qemu_irq irq,
+ DBDMA_rw rw, DBDMA_flush flush,
+ void *opaque)
+{
+ DBDMAState *s = dbdma;
+ DBDMA_channel *ch = &s->channels[nchan];
+
+ DBDMA_DPRINTF("DBDMA_register_channel 0x%x\n", nchan);
+
+ ch->irq = irq;
+ ch->channel = nchan;
+ ch->rw = rw;
+ ch->flush = flush;
+ ch->io.opaque = opaque;
+ ch->io.channel = ch;
+}
+
+static void
+dbdma_control_write(DBDMA_channel *ch)
+{
+ uint16_t mask, value;
+ uint32_t status;
+
+ mask = (ch->regs[DBDMA_CONTROL] >> 16) & 0xffff;
+ value = ch->regs[DBDMA_CONTROL] & 0xffff;
+
+ value &= (RUN | PAUSE | FLUSH | WAKE | DEVSTAT);
+
+ status = ch->regs[DBDMA_STATUS];
+
+ status = (value & mask) | (status & ~mask);
+
+ if (status & WAKE)
+ status |= ACTIVE;
+ if (status & RUN) {
+ status |= ACTIVE;
+ status &= ~DEAD;
+ }
+ if (status & PAUSE)
+ status &= ~ACTIVE;
+ if ((ch->regs[DBDMA_STATUS] & RUN) && !(status & RUN)) {
+ /* RUN is cleared */
+ status &= ~(ACTIVE|DEAD);
+ if ((status & FLUSH) && ch->flush) {
+ ch->flush(&ch->io);
+ status &= ~FLUSH;
+ }
+ }
+
+ DBDMA_DPRINTF(" status 0x%08x\n", status);
+
+ ch->regs[DBDMA_STATUS] = status;
+
+ if (status & ACTIVE)
+ qemu_bh_schedule(dbdma_bh);
+ if ((status & FLUSH) && ch->flush)
+ ch->flush(&ch->io);
+}
+
+static void dbdma_write(void *opaque, hwaddr addr,
+ uint64_t value, unsigned size)
+{
+ int channel = addr >> DBDMA_CHANNEL_SHIFT;
+ DBDMAState *s = opaque;
+ DBDMA_channel *ch = &s->channels[channel];
+ int reg = (addr - (channel << DBDMA_CHANNEL_SHIFT)) >> 2;
+
+ DBDMA_DPRINTF("writel 0x" TARGET_FMT_plx " <= 0x%08x\n", addr, value);
+ DBDMA_DPRINTF("channel 0x%x reg 0x%x\n",
+ (uint32_t)addr >> DBDMA_CHANNEL_SHIFT, reg);
+
+ /* cmdptr cannot be modified if channel is RUN or ACTIVE */
+
+ if (reg == DBDMA_CMDPTR_LO &&
+ (ch->regs[DBDMA_STATUS] & (RUN | ACTIVE)))
+ return;
+
+ ch->regs[reg] = value;
+
+ switch(reg) {
+ case DBDMA_CONTROL:
+ dbdma_control_write(ch);
+ break;
+ case DBDMA_CMDPTR_LO:
+ /* 16-byte aligned */
+ ch->regs[DBDMA_CMDPTR_LO] &= ~0xf;
+ dbdma_cmdptr_load(ch);
+ break;
+ case DBDMA_STATUS:
+ case DBDMA_INTR_SEL:
+ case DBDMA_BRANCH_SEL:
+ case DBDMA_WAIT_SEL:
+ /* nothing to do */
+ break;
+ case DBDMA_XFER_MODE:
+ case DBDMA_CMDPTR_HI:
+ case DBDMA_DATA2PTR_HI:
+ case DBDMA_DATA2PTR_LO:
+ case DBDMA_ADDRESS_HI:
+ case DBDMA_BRANCH_ADDR_HI:
+ case DBDMA_RES1:
+ case DBDMA_RES2:
+ case DBDMA_RES3:
+ case DBDMA_RES4:
+ /* unused */
+ break;
+ }
+}
+
+static uint64_t dbdma_read(void *opaque, hwaddr addr,
+ unsigned size)
+{
+ uint32_t value;
+ int channel = addr >> DBDMA_CHANNEL_SHIFT;
+ DBDMAState *s = opaque;
+ DBDMA_channel *ch = &s->channels[channel];
+ int reg = (addr - (channel << DBDMA_CHANNEL_SHIFT)) >> 2;
+
+ value = ch->regs[reg];
+
+ DBDMA_DPRINTF("readl 0x" TARGET_FMT_plx " => 0x%08x\n", addr, value);
+ DBDMA_DPRINTF("channel 0x%x reg 0x%x\n",
+ (uint32_t)addr >> DBDMA_CHANNEL_SHIFT, reg);
+
+ switch(reg) {
+ case DBDMA_CONTROL:
+ value = 0;
+ break;
+ case DBDMA_STATUS:
+ case DBDMA_CMDPTR_LO:
+ case DBDMA_INTR_SEL:
+ case DBDMA_BRANCH_SEL:
+ case DBDMA_WAIT_SEL:
+ /* nothing to do */
+ break;
+ case DBDMA_XFER_MODE:
+ case DBDMA_CMDPTR_HI:
+ case DBDMA_DATA2PTR_HI:
+ case DBDMA_DATA2PTR_LO:
+ case DBDMA_ADDRESS_HI:
+ case DBDMA_BRANCH_ADDR_HI:
+ /* unused */
+ value = 0;
+ break;
+ case DBDMA_RES1:
+ case DBDMA_RES2:
+ case DBDMA_RES3:
+ case DBDMA_RES4:
+ /* reserved */
+ break;
+ }
+
+ return value;
+}
+
+static const MemoryRegionOps dbdma_ops = {
+ .read = dbdma_read,
+ .write = dbdma_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ .valid = {
+ .min_access_size = 4,
+ .max_access_size = 4,
+ },
+};
+
+static const VMStateDescription vmstate_dbdma_channel = {
+ .name = "dbdma_channel",
+ .version_id = 0,
+ .minimum_version_id = 0,
+ .minimum_version_id_old = 0,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32_ARRAY(regs, struct DBDMA_channel, DBDMA_REGS),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static const VMStateDescription vmstate_dbdma = {
+ .name = "dbdma",
+ .version_id = 2,
+ .minimum_version_id = 2,
+ .minimum_version_id_old = 2,
+ .fields = (VMStateField[]) {
+ VMSTATE_STRUCT_ARRAY(channels, DBDMAState, DBDMA_CHANNELS, 1,
+ vmstate_dbdma_channel, DBDMA_channel),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static void dbdma_reset(void *opaque)
+{
+ DBDMAState *s = opaque;
+ int i;
+
+ for (i = 0; i < DBDMA_CHANNELS; i++)
+ memset(s->channels[i].regs, 0, DBDMA_SIZE);
+}
+
+void* DBDMA_init (MemoryRegion **dbdma_mem)
+{
+ DBDMAState *s;
+
+ s = g_malloc0(sizeof(DBDMAState));
+
+ memory_region_init_io(&s->mem, &dbdma_ops, s, "dbdma", 0x1000);
+ *dbdma_mem = &s->mem;
+ vmstate_register(NULL, -1, &vmstate_dbdma, s);
+ qemu_register_reset(dbdma_reset, s);
+
+ dbdma_bh = qemu_bh_new(DBDMA_run_bh, s);
+
+ return s;
+}
diff --git a/hw/misc/macio/macio.c b/hw/misc/macio/macio.c
new file mode 100644
index 0000000000..2f389dd7cc
--- /dev/null
+++ b/hw/misc/macio/macio.c
@@ -0,0 +1,305 @@
+/*
+ * PowerMac MacIO device emulation
+ *
+ * Copyright (c) 2005-2007 Fabrice Bellard
+ * Copyright (c) 2007 Jocelyn Mayer
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#include "hw/hw.h"
+#include "hw/ppc/mac.h"
+#include "hw/pci/pci.h"
+#include "hw/ppc/mac_dbdma.h"
+#include "hw/char/escc.h"
+
+#define TYPE_MACIO "macio"
+#define MACIO(obj) OBJECT_CHECK(MacIOState, (obj), TYPE_MACIO)
+
+typedef struct MacIOState
+{
+ /*< private >*/
+ PCIDevice parent;
+ /*< public >*/
+
+ MemoryRegion bar;
+ CUDAState cuda;
+ void *dbdma;
+ MemoryRegion *pic_mem;
+ MemoryRegion *escc_mem;
+} MacIOState;
+
+#define OLDWORLD_MACIO(obj) \
+ OBJECT_CHECK(OldWorldMacIOState, (obj), TYPE_OLDWORLD_MACIO)
+
+typedef struct OldWorldMacIOState {
+ /*< private >*/
+ MacIOState parent_obj;
+ /*< public >*/
+
+ qemu_irq irqs[3];
+
+ MacIONVRAMState nvram;
+ MACIOIDEState ide;
+} OldWorldMacIOState;
+
+#define NEWWORLD_MACIO(obj) \
+ OBJECT_CHECK(NewWorldMacIOState, (obj), TYPE_NEWWORLD_MACIO)
+
+typedef struct NewWorldMacIOState {
+ /*< private >*/
+ MacIOState parent_obj;
+ /*< public >*/
+ qemu_irq irqs[5];
+ MACIOIDEState ide[2];
+} NewWorldMacIOState;
+
+static void macio_bar_setup(MacIOState *macio_state)
+{
+ MemoryRegion *bar = &macio_state->bar;
+
+ if (macio_state->escc_mem) {
+ memory_region_add_subregion(bar, 0x13000, macio_state->escc_mem);
+ }
+}
+
+static int macio_common_initfn(PCIDevice *d)
+{
+ MacIOState *s = MACIO(d);
+ SysBusDevice *sysbus_dev;
+ int ret;
+
+ d->config[0x3d] = 0x01; // interrupt on pin 1
+
+ ret = qdev_init(DEVICE(&s->cuda));
+ if (ret < 0) {
+ return ret;
+ }
+ sysbus_dev = SYS_BUS_DEVICE(&s->cuda);
+ memory_region_add_subregion(&s->bar, 0x16000,
+ sysbus_mmio_get_region(sysbus_dev, 0));
+
+ macio_bar_setup(s);
+ pci_register_bar(d, 0, PCI_BASE_ADDRESS_SPACE_MEMORY, &s->bar);
+
+ return 0;
+}
+
+static int macio_oldworld_initfn(PCIDevice *d)
+{
+ MacIOState *s = MACIO(d);
+ OldWorldMacIOState *os = OLDWORLD_MACIO(d);
+ SysBusDevice *sysbus_dev;
+ int ret = macio_common_initfn(d);
+ if (ret < 0) {
+ return ret;
+ }
+
+ sysbus_dev = SYS_BUS_DEVICE(&s->cuda);
+ sysbus_connect_irq(sysbus_dev, 0, os->irqs[0]);
+
+ ret = qdev_init(DEVICE(&os->nvram));
+ if (ret < 0) {
+ return ret;
+ }
+ sysbus_dev = SYS_BUS_DEVICE(&os->nvram);
+ memory_region_add_subregion(&s->bar, 0x60000,
+ sysbus_mmio_get_region(sysbus_dev, 0));
+ pmac_format_nvram_partition(&os->nvram, os->nvram.size);
+
+ if (s->pic_mem) {
+ /* Heathrow PIC */
+ memory_region_add_subregion(&s->bar, 0x00000, s->pic_mem);
+ }
+
+ sysbus_dev = SYS_BUS_DEVICE(&os->ide);
+ sysbus_connect_irq(sysbus_dev, 0, os->irqs[1]);
+ sysbus_connect_irq(sysbus_dev, 1, os->irqs[2]);
+ macio_ide_register_dma(&os->ide, s->dbdma, 0x16);
+ ret = qdev_init(DEVICE(&os->ide));
+ if (ret < 0) {
+ return ret;
+ }
+
+ return 0;
+}
+
+static void macio_oldworld_init(Object *obj)
+{
+ MacIOState *s = MACIO(obj);
+ OldWorldMacIOState *os = OLDWORLD_MACIO(obj);
+ DeviceState *dev;
+
+ qdev_init_gpio_out(DEVICE(obj), os->irqs, ARRAY_SIZE(os->irqs));
+
+ object_initialize(&os->nvram, TYPE_MACIO_NVRAM);
+ dev = DEVICE(&os->nvram);
+ qdev_prop_set_uint32(dev, "size", 0x2000);
+ qdev_prop_set_uint32(dev, "it_shift", 4);
+
+ object_initialize(&os->ide, TYPE_MACIO_IDE);
+ qdev_set_parent_bus(DEVICE(&os->ide), sysbus_get_default());
+ memory_region_add_subregion(&s->bar, 0x1f000 + (1 * 0x1000), &os->ide.mem);
+ object_property_add_child(obj, "ide", OBJECT(&os->ide), NULL);
+}
+
+static int macio_newworld_initfn(PCIDevice *d)
+{
+ MacIOState *s = MACIO(d);
+ NewWorldMacIOState *ns = NEWWORLD_MACIO(d);
+ SysBusDevice *sysbus_dev;
+ int ret = macio_common_initfn(d);
+ if (ret < 0) {
+ return ret;
+ }
+
+ sysbus_dev = SYS_BUS_DEVICE(&s->cuda);
+ sysbus_connect_irq(sysbus_dev, 0, ns->irqs[0]);
+
+ if (s->pic_mem) {
+ /* OpenPIC */
+ memory_region_add_subregion(&s->bar, 0x40000, s->pic_mem);
+ }
+
+ sysbus_dev = SYS_BUS_DEVICE(&ns->ide[0]);
+ sysbus_connect_irq(sysbus_dev, 0, ns->irqs[1]);
+ sysbus_connect_irq(sysbus_dev, 1, ns->irqs[2]);
+ macio_ide_register_dma(&ns->ide[0], s->dbdma, 0x16);
+ ret = qdev_init(DEVICE(&ns->ide[0]));
+ if (ret < 0) {
+ return ret;
+ }
+
+ sysbus_dev = SYS_BUS_DEVICE(&ns->ide[1]);
+ sysbus_connect_irq(sysbus_dev, 0, ns->irqs[3]);
+ sysbus_connect_irq(sysbus_dev, 1, ns->irqs[4]);
+ macio_ide_register_dma(&ns->ide[1], s->dbdma, 0x1a);
+ ret = qdev_init(DEVICE(&ns->ide[1]));
+ if (ret < 0) {
+ return ret;
+ }
+
+ return 0;
+}
+
+static void macio_newworld_init(Object *obj)
+{
+ MacIOState *s = MACIO(obj);
+ NewWorldMacIOState *ns = NEWWORLD_MACIO(obj);
+ int i;
+ gchar *name;
+
+ qdev_init_gpio_out(DEVICE(obj), ns->irqs, ARRAY_SIZE(ns->irqs));
+
+ for (i = 0; i < 2; i++) {
+ object_initialize(&ns->ide[i], TYPE_MACIO_IDE);
+ qdev_set_parent_bus(DEVICE(&ns->ide[i]), sysbus_get_default());
+ memory_region_add_subregion(&s->bar, 0x1f000 + ((i + 1) * 0x1000),
+ &ns->ide[i].mem);
+ name = g_strdup_printf("ide[%i]", i);
+ object_property_add_child(obj, name, OBJECT(&ns->ide[i]), NULL);
+ g_free(name);
+ }
+}
+
+static void macio_instance_init(Object *obj)
+{
+ MacIOState *s = MACIO(obj);
+ MemoryRegion *dbdma_mem;
+
+ memory_region_init(&s->bar, "macio", 0x80000);
+
+ object_initialize(&s->cuda, TYPE_CUDA);
+ qdev_set_parent_bus(DEVICE(&s->cuda), sysbus_get_default());
+ object_property_add_child(obj, "cuda", OBJECT(&s->cuda), NULL);
+
+ s->dbdma = DBDMA_init(&dbdma_mem);
+ memory_region_add_subregion(&s->bar, 0x08000, dbdma_mem);
+}
+
+static void macio_oldworld_class_init(ObjectClass *oc, void *data)
+{
+ PCIDeviceClass *pdc = PCI_DEVICE_CLASS(oc);
+
+ pdc->init = macio_oldworld_initfn;
+ pdc->device_id = PCI_DEVICE_ID_APPLE_343S1201;
+}
+
+static void macio_newworld_class_init(ObjectClass *oc, void *data)
+{
+ PCIDeviceClass *pdc = PCI_DEVICE_CLASS(oc);
+
+ pdc->init = macio_newworld_initfn;
+ pdc->device_id = PCI_DEVICE_ID_APPLE_UNI_N_KEYL;
+}
+
+static void macio_class_init(ObjectClass *klass, void *data)
+{
+ PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
+
+ k->vendor_id = PCI_VENDOR_ID_APPLE;
+ k->class_id = PCI_CLASS_OTHERS << 8;
+}
+
+static const TypeInfo macio_oldworld_type_info = {
+ .name = TYPE_OLDWORLD_MACIO,
+ .parent = TYPE_MACIO,
+ .instance_size = sizeof(OldWorldMacIOState),
+ .instance_init = macio_oldworld_init,
+ .class_init = macio_oldworld_class_init,
+};
+
+static const TypeInfo macio_newworld_type_info = {
+ .name = TYPE_NEWWORLD_MACIO,
+ .parent = TYPE_MACIO,
+ .instance_size = sizeof(NewWorldMacIOState),
+ .instance_init = macio_newworld_init,
+ .class_init = macio_newworld_class_init,
+};
+
+static const TypeInfo macio_type_info = {
+ .name = TYPE_MACIO,
+ .parent = TYPE_PCI_DEVICE,
+ .instance_size = sizeof(MacIOState),
+ .instance_init = macio_instance_init,
+ .abstract = true,
+ .class_init = macio_class_init,
+};
+
+static void macio_register_types(void)
+{
+ type_register_static(&macio_type_info);
+ type_register_static(&macio_oldworld_type_info);
+ type_register_static(&macio_newworld_type_info);
+}
+
+type_init(macio_register_types)
+
+void macio_init(PCIDevice *d,
+ MemoryRegion *pic_mem,
+ MemoryRegion *escc_mem)
+{
+ MacIOState *macio_state = MACIO(d);
+
+ macio_state->pic_mem = pic_mem;
+ macio_state->escc_mem = escc_mem;
+ /* Note: this code is strongly inspirated from the corresponding code
+ in PearPC */
+
+ qdev_init_nofail(DEVICE(d));
+}