summaryrefslogtreecommitdiff
path: root/target
diff options
context:
space:
mode:
Diffstat (limited to 'target')
-rw-r--r--target/avr/Makefile.objs34
-rw-r--r--target/avr/cpu-param.h36
-rw-r--r--target/avr/cpu-qom.h53
-rw-r--r--target/avr/cpu.c366
-rw-r--r--target/avr/cpu.h256
-rw-r--r--target/avr/disas.c245
-rw-r--r--target/avr/gdbstub.c84
-rw-r--r--target/avr/helper.c348
-rw-r--r--target/avr/helper.h29
-rw-r--r--target/avr/insn.decode187
-rw-r--r--target/avr/machine.c119
-rw-r--r--target/avr/translate.c3061
12 files changed, 4818 insertions, 0 deletions
diff --git a/target/avr/Makefile.objs b/target/avr/Makefile.objs
new file mode 100644
index 0000000000..6e35ba2c5c
--- /dev/null
+++ b/target/avr/Makefile.objs
@@ -0,0 +1,34 @@
+#
+# QEMU AVR
+#
+# Copyright (c) 2016-2020 Michael Rolnik
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, see
+# <http://www.gnu.org/licenses/lgpl-2.1.html>
+#
+
+DECODETREE = $(SRC_PATH)/scripts/decodetree.py
+decode-y = $(SRC_PATH)/target/avr/insn.decode
+
+target/avr/decode_insn.inc.c: $(decode-y) $(DECODETREE)
+ $(call quiet-command, \
+ $(PYTHON) $(DECODETREE) -o $@ --decode decode_insn --insnwidth 16 $<, \
+ "GEN", $(TARGET_DIR)$@)
+
+target/avr/translate.o: target/avr/decode_insn.inc.c
+
+obj-y += translate.o cpu.o helper.o
+obj-y += gdbstub.o
+obj-y += disas.o
+obj-$(CONFIG_SOFTMMU) += machine.o
diff --git a/target/avr/cpu-param.h b/target/avr/cpu-param.h
new file mode 100644
index 0000000000..7ef4e7c679
--- /dev/null
+++ b/target/avr/cpu-param.h
@@ -0,0 +1,36 @@
+/*
+ * QEMU AVR CPU
+ *
+ * Copyright (c) 2016-2020 Michael Rolnik
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see
+ * <http://www.gnu.org/licenses/lgpl-2.1.html>
+ */
+
+#ifndef AVR_CPU_PARAM_H
+#define AVR_CPU_PARAM_H
+
+#define TARGET_LONG_BITS 32
+/*
+ * TARGET_PAGE_BITS cannot be more than 8 bits because
+ * 1. all IO registers occupy [0x0000 .. 0x00ff] address range, and they
+ * should be implemented as a device and not memory
+ * 2. SRAM starts at the address 0x0100
+ */
+#define TARGET_PAGE_BITS 8
+#define TARGET_PHYS_ADDR_SPACE_BITS 24
+#define TARGET_VIRT_ADDR_SPACE_BITS 24
+#define NB_MMU_MODES 2
+
+#endif
diff --git a/target/avr/cpu-qom.h b/target/avr/cpu-qom.h
new file mode 100644
index 0000000000..d23ad43a99
--- /dev/null
+++ b/target/avr/cpu-qom.h
@@ -0,0 +1,53 @@
+/*
+ * QEMU AVR CPU
+ *
+ * Copyright (c) 2016-2020 Michael Rolnik
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see
+ * <http://www.gnu.org/licenses/lgpl-2.1.html>
+ */
+
+#ifndef QEMU_AVR_QOM_H
+#define QEMU_AVR_QOM_H
+
+#include "hw/core/cpu.h"
+
+#define TYPE_AVR_CPU "avr-cpu"
+
+#define AVR_CPU_CLASS(klass) \
+ OBJECT_CLASS_CHECK(AVRCPUClass, (klass), TYPE_AVR_CPU)
+#define AVR_CPU(obj) \
+ OBJECT_CHECK(AVRCPU, (obj), TYPE_AVR_CPU)
+#define AVR_CPU_GET_CLASS(obj) \
+ OBJECT_GET_CLASS(AVRCPUClass, (obj), TYPE_AVR_CPU)
+
+/**
+ * AVRCPUClass:
+ * @parent_realize: The parent class' realize handler.
+ * @parent_reset: The parent class' reset handler.
+ * @vr: Version Register value.
+ *
+ * A AVR CPU model.
+ */
+typedef struct AVRCPUClass {
+ /*< private >*/
+ CPUClass parent_class;
+ /*< public >*/
+ DeviceRealize parent_realize;
+ DeviceReset parent_reset;
+} AVRCPUClass;
+
+typedef struct AVRCPU AVRCPU;
+
+#endif /* !defined (QEMU_AVR_CPU_QOM_H) */
diff --git a/target/avr/cpu.c b/target/avr/cpu.c
new file mode 100644
index 0000000000..5d9c4ad5bf
--- /dev/null
+++ b/target/avr/cpu.c
@@ -0,0 +1,366 @@
+/*
+ * QEMU AVR CPU
+ *
+ * Copyright (c) 2019-2020 Michael Rolnik
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see
+ * <http://www.gnu.org/licenses/lgpl-2.1.html>
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "qemu/qemu-print.h"
+#include "exec/exec-all.h"
+#include "cpu.h"
+#include "disas/dis-asm.h"
+
+static void avr_cpu_set_pc(CPUState *cs, vaddr value)
+{
+ AVRCPU *cpu = AVR_CPU(cs);
+
+ cpu->env.pc_w = value / 2; /* internally PC points to words */
+}
+
+static bool avr_cpu_has_work(CPUState *cs)
+{
+ AVRCPU *cpu = AVR_CPU(cs);
+ CPUAVRState *env = &cpu->env;
+
+ return (cs->interrupt_request & (CPU_INTERRUPT_HARD | CPU_INTERRUPT_RESET))
+ && cpu_interrupts_enabled(env);
+}
+
+static void avr_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
+{
+ AVRCPU *cpu = AVR_CPU(cs);
+ CPUAVRState *env = &cpu->env;
+
+ env->pc_w = tb->pc / 2; /* internally PC points to words */
+}
+
+static void avr_cpu_reset(DeviceState *ds)
+{
+ CPUState *cs = CPU(ds);
+ AVRCPU *cpu = AVR_CPU(cs);
+ AVRCPUClass *mcc = AVR_CPU_GET_CLASS(cpu);
+ CPUAVRState *env = &cpu->env;
+
+ mcc->parent_reset(ds);
+
+ env->pc_w = 0;
+ env->sregI = 1;
+ env->sregC = 0;
+ env->sregZ = 0;
+ env->sregN = 0;
+ env->sregV = 0;
+ env->sregS = 0;
+ env->sregH = 0;
+ env->sregT = 0;
+
+ env->rampD = 0;
+ env->rampX = 0;
+ env->rampY = 0;
+ env->rampZ = 0;
+ env->eind = 0;
+ env->sp = 0;
+
+ env->skip = 0;
+
+ memset(env->r, 0, sizeof(env->r));
+}
+
+static void avr_cpu_disas_set_info(CPUState *cpu, disassemble_info *info)
+{
+ info->mach = bfd_arch_avr;
+ info->print_insn = avr_print_insn;
+}
+
+static void avr_cpu_realizefn(DeviceState *dev, Error **errp)
+{
+ CPUState *cs = CPU(dev);
+ AVRCPUClass *mcc = AVR_CPU_GET_CLASS(dev);
+ Error *local_err = NULL;
+
+ cpu_exec_realizefn(cs, &local_err);
+ if (local_err != NULL) {
+ error_propagate(errp, local_err);
+ return;
+ }
+ qemu_init_vcpu(cs);
+ cpu_reset(cs);
+
+ mcc->parent_realize(dev, errp);
+}
+
+static void avr_cpu_set_int(void *opaque, int irq, int level)
+{
+ AVRCPU *cpu = opaque;
+ CPUAVRState *env = &cpu->env;
+ CPUState *cs = CPU(cpu);
+ uint64_t mask = (1ull << irq);
+
+ if (level) {
+ env->intsrc |= mask;
+ cpu_interrupt(cs, CPU_INTERRUPT_HARD);
+ } else {
+ env->intsrc &= ~mask;
+ if (env->intsrc == 0) {
+ cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD);
+ }
+ }
+}
+
+static void avr_cpu_initfn(Object *obj)
+{
+ AVRCPU *cpu = AVR_CPU(obj);
+
+ cpu_set_cpustate_pointers(cpu);
+
+ /* Set the number of interrupts supported by the CPU. */
+ qdev_init_gpio_in(DEVICE(cpu), avr_cpu_set_int,
+ sizeof(cpu->env.intsrc) * 8);
+}
+
+static ObjectClass *avr_cpu_class_by_name(const char *cpu_model)
+{
+ ObjectClass *oc;
+
+ oc = object_class_by_name(cpu_model);
+ if (object_class_dynamic_cast(oc, TYPE_AVR_CPU) == NULL ||
+ object_class_is_abstract(oc)) {
+ oc = NULL;
+ }
+ return oc;
+}
+
+static void avr_cpu_dump_state(CPUState *cs, FILE *f, int flags)
+{
+ AVRCPU *cpu = AVR_CPU(cs);
+ CPUAVRState *env = &cpu->env;
+ int i;
+
+ qemu_fprintf(f, "\n");
+ qemu_fprintf(f, "PC: %06x\n", env->pc_w * 2); /* PC points to words */
+ qemu_fprintf(f, "SP: %04x\n", env->sp);
+ qemu_fprintf(f, "rampD: %02x\n", env->rampD >> 16);
+ qemu_fprintf(f, "rampX: %02x\n", env->rampX >> 16);
+ qemu_fprintf(f, "rampY: %02x\n", env->rampY >> 16);
+ qemu_fprintf(f, "rampZ: %02x\n", env->rampZ >> 16);
+ qemu_fprintf(f, "EIND: %02x\n", env->eind >> 16);
+ qemu_fprintf(f, "X: %02x%02x\n", env->r[27], env->r[26]);
+ qemu_fprintf(f, "Y: %02x%02x\n", env->r[29], env->r[28]);
+ qemu_fprintf(f, "Z: %02x%02x\n", env->r[31], env->r[30]);
+ qemu_fprintf(f, "SREG: [ %c %c %c %c %c %c %c %c ]\n",
+ env->sregI ? 'I' : '-',
+ env->sregT ? 'T' : '-',
+ env->sregH ? 'H' : '-',
+ env->sregS ? 'S' : '-',
+ env->sregV ? 'V' : '-',
+ env->sregN ? '-' : 'N', /* Zf has negative logic */
+ env->sregZ ? 'Z' : '-',
+ env->sregC ? 'I' : '-');
+ qemu_fprintf(f, "SKIP: %02x\n", env->skip);
+
+ qemu_fprintf(f, "\n");
+ for (i = 0; i < ARRAY_SIZE(env->r); i++) {
+ qemu_fprintf(f, "R[%02d]: %02x ", i, env->r[i]);
+
+ if ((i % 8) == 7) {
+ qemu_fprintf(f, "\n");
+ }
+ }
+ qemu_fprintf(f, "\n");
+}
+
+static void avr_cpu_class_init(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ CPUClass *cc = CPU_CLASS(oc);
+ AVRCPUClass *mcc = AVR_CPU_CLASS(oc);
+
+ mcc->parent_realize = dc->realize;
+ dc->realize = avr_cpu_realizefn;
+
+ device_class_set_parent_reset(dc, avr_cpu_reset, &mcc->parent_reset);
+
+ cc->class_by_name = avr_cpu_class_by_name;
+
+ cc->has_work = avr_cpu_has_work;
+ cc->do_interrupt = avr_cpu_do_interrupt;
+ cc->cpu_exec_interrupt = avr_cpu_exec_interrupt;
+ cc->dump_state = avr_cpu_dump_state;
+ cc->set_pc = avr_cpu_set_pc;
+ cc->memory_rw_debug = avr_cpu_memory_rw_debug;
+ cc->get_phys_page_debug = avr_cpu_get_phys_page_debug;
+ cc->tlb_fill = avr_cpu_tlb_fill;
+ cc->vmsd = &vms_avr_cpu;
+ cc->disas_set_info = avr_cpu_disas_set_info;
+ cc->tcg_initialize = avr_cpu_tcg_init;
+ cc->synchronize_from_tb = avr_cpu_synchronize_from_tb;
+ cc->gdb_read_register = avr_cpu_gdb_read_register;
+ cc->gdb_write_register = avr_cpu_gdb_write_register;
+ cc->gdb_num_core_regs = 35;
+ cc->gdb_core_xml_file = "avr-cpu.xml";
+}
+
+/*
+ * Setting features of AVR core type avr5
+ * --------------------------------------
+ *
+ * This type of AVR core is present in the following AVR MCUs:
+ *
+ * ata5702m322, ata5782, ata5790, ata5790n, ata5791, ata5795, ata5831, ata6613c,
+ * ata6614q, ata8210, ata8510, atmega16, atmega16a, atmega161, atmega162,
+ * atmega163, atmega164a, atmega164p, atmega164pa, atmega165, atmega165a,
+ * atmega165p, atmega165pa, atmega168, atmega168a, atmega168p, atmega168pa,
+ * atmega168pb, atmega169, atmega169a, atmega169p, atmega169pa, atmega16hvb,
+ * atmega16hvbrevb, atmega16m1, atmega16u4, atmega32a, atmega32, atmega323,
+ * atmega324a, atmega324p, atmega324pa, atmega325, atmega325a, atmega325p,
+ * atmega325pa, atmega3250, atmega3250a, atmega3250p, atmega3250pa, atmega328,
+ * atmega328p, atmega328pb, atmega329, atmega329a, atmega329p, atmega329pa,
+ * atmega3290, atmega3290a, atmega3290p, atmega3290pa, atmega32c1, atmega32m1,
+ * atmega32u4, atmega32u6, atmega406, atmega64, atmega64a, atmega640, atmega644,
+ * atmega644a, atmega644p, atmega644pa, atmega645, atmega645a, atmega645p,
+ * atmega6450, atmega6450a, atmega6450p, atmega649, atmega649a, atmega649p,
+ * atmega6490, atmega16hva, atmega16hva2, atmega32hvb, atmega6490a, atmega6490p,
+ * atmega64c1, atmega64m1, atmega64hve, atmega64hve2, atmega64rfr2,
+ * atmega644rfr2, atmega32hvbrevb, at90can32, at90can64, at90pwm161, at90pwm216,
+ * at90pwm316, at90scr100, at90usb646, at90usb647, at94k, m3000
+ */
+static void avr_avr5_initfn(Object *obj)
+{
+ AVRCPU *cpu = AVR_CPU(obj);
+ CPUAVRState *env = &cpu->env;
+
+ set_avr_feature(env, AVR_FEATURE_LPM);
+ set_avr_feature(env, AVR_FEATURE_IJMP_ICALL);
+ set_avr_feature(env, AVR_FEATURE_ADIW_SBIW);
+ set_avr_feature(env, AVR_FEATURE_SRAM);
+ set_avr_feature(env, AVR_FEATURE_BREAK);
+
+ set_avr_feature(env, AVR_FEATURE_2_BYTE_PC);
+ set_avr_feature(env, AVR_FEATURE_2_BYTE_SP);
+ set_avr_feature(env, AVR_FEATURE_JMP_CALL);
+ set_avr_feature(env, AVR_FEATURE_LPMX);
+ set_avr_feature(env, AVR_FEATURE_MOVW);
+ set_avr_feature(env, AVR_FEATURE_MUL);
+}
+
+/*
+ * Setting features of AVR core type avr51
+ * --------------------------------------
+ *
+ * This type of AVR core is present in the following AVR MCUs:
+ *
+ * atmega128, atmega128a, atmega1280, atmega1281, atmega1284, atmega1284p,
+ * atmega128rfa1, atmega128rfr2, atmega1284rfr2, at90can128, at90usb1286,
+ * at90usb1287
+ */
+static void avr_avr51_initfn(Object *obj)
+{
+ AVRCPU *cpu = AVR_CPU(obj);
+ CPUAVRState *env = &cpu->env;
+
+ set_avr_feature(env, AVR_FEATURE_LPM);
+ set_avr_feature(env, AVR_FEATURE_IJMP_ICALL);
+ set_avr_feature(env, AVR_FEATURE_ADIW_SBIW);
+ set_avr_feature(env, AVR_FEATURE_SRAM);
+ set_avr_feature(env, AVR_FEATURE_BREAK);
+
+ set_avr_feature(env, AVR_FEATURE_2_BYTE_PC);
+ set_avr_feature(env, AVR_FEATURE_2_BYTE_SP);
+ set_avr_feature(env, AVR_FEATURE_RAMPZ);
+ set_avr_feature(env, AVR_FEATURE_ELPMX);
+ set_avr_feature(env, AVR_FEATURE_ELPM);
+ set_avr_feature(env, AVR_FEATURE_JMP_CALL);
+ set_avr_feature(env, AVR_FEATURE_LPMX);
+ set_avr_feature(env, AVR_FEATURE_MOVW);
+ set_avr_feature(env, AVR_FEATURE_MUL);
+}
+
+/*
+ * Setting features of AVR core type avr6
+ * --------------------------------------
+ *
+ * This type of AVR core is present in the following AVR MCUs:
+ *
+ * atmega2560, atmega2561, atmega256rfr2, atmega2564rfr2
+ */
+static void avr_avr6_initfn(Object *obj)
+{
+ AVRCPU *cpu = AVR_CPU(obj);
+ CPUAVRState *env = &cpu->env;
+
+ set_avr_feature(env, AVR_FEATURE_LPM);
+ set_avr_feature(env, AVR_FEATURE_IJMP_ICALL);
+ set_avr_feature(env, AVR_FEATURE_ADIW_SBIW);
+ set_avr_feature(env, AVR_FEATURE_SRAM);
+ set_avr_feature(env, AVR_FEATURE_BREAK);
+
+ set_avr_feature(env, AVR_FEATURE_3_BYTE_PC);
+ set_avr_feature(env, AVR_FEATURE_2_BYTE_SP);
+ set_avr_feature(env, AVR_FEATURE_RAMPZ);
+ set_avr_feature(env, AVR_FEATURE_EIJMP_EICALL);
+ set_avr_feature(env, AVR_FEATURE_ELPMX);
+ set_avr_feature(env, AVR_FEATURE_ELPM);
+ set_avr_feature(env, AVR_FEATURE_JMP_CALL);
+ set_avr_feature(env, AVR_FEATURE_LPMX);
+ set_avr_feature(env, AVR_FEATURE_MOVW);
+ set_avr_feature(env, AVR_FEATURE_MUL);
+}
+
+typedef struct AVRCPUInfo {
+ const char *name;
+ void (*initfn)(Object *obj);
+} AVRCPUInfo;
+
+
+static void avr_cpu_list_entry(gpointer data, gpointer user_data)
+{
+ const char *typename = object_class_get_name(OBJECT_CLASS(data));
+
+ qemu_printf("%s\n", typename);
+}
+
+void avr_cpu_list(void)
+{
+ GSList *list;
+ list = object_class_get_list_sorted(TYPE_AVR_CPU, false);
+ g_slist_foreach(list, avr_cpu_list_entry, NULL);
+ g_slist_free(list);
+}
+
+#define DEFINE_AVR_CPU_TYPE(model, initfn) \
+ { \
+ .parent = TYPE_AVR_CPU, \
+ .instance_init = initfn, \
+ .name = AVR_CPU_TYPE_NAME(model), \
+ }
+
+static const TypeInfo avr_cpu_type_info[] = {
+ {
+ .name = TYPE_AVR_CPU,
+ .parent = TYPE_CPU,
+ .instance_size = sizeof(AVRCPU),
+ .instance_init = avr_cpu_initfn,
+ .class_size = sizeof(AVRCPUClass),
+ .class_init = avr_cpu_class_init,
+ .abstract = true,
+ },
+ DEFINE_AVR_CPU_TYPE("avr5", avr_avr5_initfn),
+ DEFINE_AVR_CPU_TYPE("avr51", avr_avr51_initfn),
+ DEFINE_AVR_CPU_TYPE("avr6", avr_avr6_initfn),
+};
+
+DEFINE_TYPES(avr_cpu_type_info)
diff --git a/target/avr/cpu.h b/target/avr/cpu.h
new file mode 100644
index 0000000000..d148e8c75a
--- /dev/null
+++ b/target/avr/cpu.h
@@ -0,0 +1,256 @@
+/*
+ * QEMU AVR CPU
+ *
+ * Copyright (c) 2016-2020 Michael Rolnik
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see
+ * <http://www.gnu.org/licenses/lgpl-2.1.html>
+ */
+
+#ifndef QEMU_AVR_CPU_H
+#define QEMU_AVR_CPU_H
+
+#include "cpu-qom.h"
+#include "exec/cpu-defs.h"
+
+#ifdef CONFIG_USER_ONLY
+#error "AVR 8-bit does not support user mode"
+#endif
+
+#define AVR_CPU_TYPE_SUFFIX "-" TYPE_AVR_CPU
+#define AVR_CPU_TYPE_NAME(name) (name AVR_CPU_TYPE_SUFFIX)
+#define CPU_RESOLVING_TYPE TYPE_AVR_CPU
+
+#define TCG_GUEST_DEFAULT_MO 0
+
+/*
+ * AVR has two memory spaces, data & code.
+ * e.g. both have 0 address
+ * ST/LD instructions access data space
+ * LPM/SPM and instruction fetching access code memory space
+ */
+#define MMU_CODE_IDX 0
+#define MMU_DATA_IDX 1
+
+#define EXCP_RESET 1
+#define EXCP_INT(n) (EXCP_RESET + (n) + 1)
+
+/* Number of CPU registers */
+#define NUMBER_OF_CPU_REGISTERS 32
+/* Number of IO registers accessible by ld/st/in/out */
+#define NUMBER_OF_IO_REGISTERS 64
+
+/*
+ * Offsets of AVR memory regions in host memory space.
+ *
+ * This is needed because the AVR has separate code and data address
+ * spaces that both have start from zero but have to go somewhere in
+ * host memory.
+ *
+ * It's also useful to know where some things are, like the IO registers.
+ */
+/* Flash program memory */
+#define OFFSET_CODE 0x00000000
+/* CPU registers, IO registers, and SRAM */
+#define OFFSET_DATA 0x00800000
+/* CPU registers specifically, these are mapped at the start of data */
+#define OFFSET_CPU_REGISTERS OFFSET_DATA
+/*
+ * IO registers, including status register, stack pointer, and memory
+ * mapped peripherals, mapped just after CPU registers
+ */
+#define OFFSET_IO_REGISTERS (OFFSET_DATA + NUMBER_OF_CPU_REGISTERS)
+
+typedef enum AVRFeature {
+ AVR_FEATURE_SRAM,
+
+ AVR_FEATURE_1_BYTE_PC,
+ AVR_FEATURE_2_BYTE_PC,
+ AVR_FEATURE_3_BYTE_PC,
+
+ AVR_FEATURE_1_BYTE_SP,
+ AVR_FEATURE_2_BYTE_SP,
+
+ AVR_FEATURE_BREAK,
+ AVR_FEATURE_DES,
+ AVR_FEATURE_RMW, /* Read Modify Write - XCH LAC LAS LAT */
+
+ AVR_FEATURE_EIJMP_EICALL,
+ AVR_FEATURE_IJMP_ICALL,
+ AVR_FEATURE_JMP_CALL,
+
+ AVR_FEATURE_ADIW_SBIW,
+
+ AVR_FEATURE_SPM,
+ AVR_FEATURE_SPMX,
+
+ AVR_FEATURE_ELPMX,
+ AVR_FEATURE_ELPM,
+ AVR_FEATURE_LPMX,
+ AVR_FEATURE_LPM,
+
+ AVR_FEATURE_MOVW,
+ AVR_FEATURE_MUL,
+ AVR_FEATURE_RAMPD,
+ AVR_FEATURE_RAMPX,
+ AVR_FEATURE_RAMPY,
+ AVR_FEATURE_RAMPZ,
+} AVRFeature;
+
+typedef struct CPUAVRState CPUAVRState;
+
+struct CPUAVRState {
+ uint32_t pc_w; /* 0x003fffff up to 22 bits */
+
+ uint32_t sregC; /* 0x00000001 1 bit */
+ uint32_t sregZ; /* 0x00000001 1 bit */
+ uint32_t sregN; /* 0x00000001 1 bit */
+ uint32_t sregV; /* 0x00000001 1 bit */
+ uint32_t sregS; /* 0x00000001 1 bit */
+ uint32_t sregH; /* 0x00000001 1 bit */
+ uint32_t sregT; /* 0x00000001 1 bit */
+ uint32_t sregI; /* 0x00000001 1 bit */
+
+ uint32_t rampD; /* 0x00ff0000 8 bits */
+ uint32_t rampX; /* 0x00ff0000 8 bits */
+ uint32_t rampY; /* 0x00ff0000 8 bits */
+ uint32_t rampZ; /* 0x00ff0000 8 bits */
+ uint32_t eind; /* 0x00ff0000 8 bits */
+
+ uint32_t r[NUMBER_OF_CPU_REGISTERS]; /* 8 bits each */
+ uint32_t sp; /* 16 bits */
+
+ uint32_t skip; /* if set skip instruction */
+
+ uint64_t intsrc; /* interrupt sources */
+ bool fullacc; /* CPU/MEM if true MEM only otherwise */
+
+ uint64_t features;
+};
+
+/**
+ * AVRCPU:
+ * @env: #CPUAVRState
+ *
+ * A AVR CPU.
+ */
+typedef struct AVRCPU {
+ /*< private >*/
+ CPUState parent_obj;
+ /*< public >*/
+
+ CPUNegativeOffsetState neg;
+ CPUAVRState env;
+} AVRCPU;
+
+extern const struct VMStateDescription vms_avr_cpu;
+
+void avr_cpu_do_interrupt(CPUState *cpu);
+bool avr_cpu_exec_interrupt(CPUState *cpu, int int_req);
+hwaddr avr_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
+int avr_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
+int avr_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
+int avr_print_insn(bfd_vma addr, disassemble_info *info);
+
+static inline int avr_feature(CPUAVRState *env, AVRFeature feature)
+{
+ return (env->features & (1U << feature)) != 0;
+}
+
+static inline void set_avr_feature(CPUAVRState *env, int feature)
+{
+ env->features |= (1U << feature);
+}
+
+#define cpu_list avr_cpu_list
+#define cpu_signal_handler cpu_avr_signal_handler
+#define cpu_mmu_index avr_cpu_mmu_index
+
+static inline int avr_cpu_mmu_index(CPUAVRState *env, bool ifetch)
+{
+ return ifetch ? MMU_CODE_IDX : MMU_DATA_IDX;
+}
+
+void avr_cpu_tcg_init(void);
+
+void avr_cpu_list(void);
+int cpu_avr_exec(CPUState *cpu);
+int cpu_avr_signal_handler(int host_signum, void *pinfo, void *puc);
+int avr_cpu_memory_rw_debug(CPUState *cs, vaddr address, uint8_t *buf,
+ int len, bool is_write);
+
+enum {
+ TB_FLAGS_FULL_ACCESS = 1,
+ TB_FLAGS_SKIP = 2,
+};
+
+static inline void cpu_get_tb_cpu_state(CPUAVRState *env, target_ulong *pc,
+ target_ulong *cs_base, uint32_t *pflags)
+{
+ uint32_t flags = 0;
+
+ *pc = env->pc_w * 2;
+ *cs_base = 0;
+
+ if (env->fullacc) {
+ flags |= TB_FLAGS_FULL_ACCESS;
+ }
+ if (env->skip) {
+ flags |= TB_FLAGS_SKIP;
+ }
+
+ *pflags = flags;
+}
+
+static inline int cpu_interrupts_enabled(CPUAVRState *env)
+{
+ return env->sregI != 0;
+}
+
+static inline uint8_t cpu_get_sreg(CPUAVRState *env)
+{
+ uint8_t sreg;
+ sreg = (env->sregC) << 0
+ | (env->sregZ) << 1
+ | (env->sregN) << 2
+ | (env->sregV) << 3
+ | (env->sregS) << 4
+ | (env->sregH) << 5
+ | (env->sregT) << 6
+ | (env->sregI) << 7;
+ return sreg;
+}
+
+static inline void cpu_set_sreg(CPUAVRState *env, uint8_t sreg)
+{
+ env->sregC = (sreg >> 0) & 0x01;
+ env->sregZ = (sreg >> 1) & 0x01;
+ env->sregN = (sreg >> 2) & 0x01;
+ env->sregV = (sreg >> 3) & 0x01;
+ env->sregS = (sreg >> 4) & 0x01;
+ env->sregH = (sreg >> 5) & 0x01;
+ env->sregT = (sreg >> 6) & 0x01;
+ env->sregI = (sreg >> 7) & 0x01;
+}
+
+bool avr_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr);
+
+typedef CPUAVRState CPUArchState;
+typedef AVRCPU ArchCPU;
+
+#include "exec/cpu-all.h"
+
+#endif /* !defined (QEMU_AVR_CPU_H) */
diff --git a/target/avr/disas.c b/target/avr/disas.c
new file mode 100644
index 0000000000..8e1bac4d76
--- /dev/null
+++ b/target/avr/disas.c
@@ -0,0 +1,245 @@
+/*
+ * AVR disassembler
+ *
+ * Copyright (c) 2019-2020 Richard Henderson <rth@twiddle.net>
+ * Copyright (c) 2019-2020 Michael Rolnik <mrolnik@gmail.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+
+typedef struct {
+ disassemble_info *info;
+ uint16_t next_word;
+ bool next_word_used;
+} DisasContext;
+
+static int to_regs_16_31_by_one(DisasContext *ctx, int indx)
+{
+ return 16 + (indx % 16);
+}
+
+static int to_regs_16_23_by_one(DisasContext *ctx, int indx)
+{
+ return 16 + (indx % 8);
+}
+
+static int to_regs_24_30_by_two(DisasContext *ctx, int indx)
+{
+ return 24 + (indx % 4) * 2;
+}
+
+static int to_regs_00_30_by_two(DisasContext *ctx, int indx)
+{
+ return (indx % 16) * 2;
+}
+
+static uint16_t next_word(DisasContext *ctx)
+{
+ ctx->next_word_used = true;
+ return ctx->next_word;
+}
+
+static int append_16(DisasContext *ctx, int x)
+{
+ return x << 16 | next_word(ctx);
+}
+
+/* Include the auto-generated decoder. */
+static bool decode_insn(DisasContext *ctx, uint16_t insn);
+#include "decode_insn.inc.c"
+
+#define output(mnemonic, format, ...) \
+ (pctx->info->fprintf_func(pctx->info->stream, "%-9s " format, \
+ mnemonic, ##__VA_ARGS__))
+
+int avr_print_insn(bfd_vma addr, disassemble_info *info)
+{
+ DisasContext ctx;
+ DisasContext *pctx = &ctx;
+ bfd_byte buffer[4];
+ uint16_t insn;
+ int status;
+
+ ctx.info = info;
+
+ status = info->read_memory_func(addr, buffer, 4, info);
+ if (status != 0) {
+ info->memory_error_func(status, addr, info);
+ return -1;
+ }
+ insn = bfd_getl16(buffer);
+ ctx.next_word = bfd_getl16(buffer + 2);
+ ctx.next_word_used = false;
+
+ if (!decode_insn(&ctx, insn)) {
+ output(".db", "0x%02x, 0x%02x", buffer[0], buffer[1]);
+ }
+
+ return ctx.next_word_used ? 4 : 2;
+}
+
+
+#define INSN(opcode, format, ...) \
+static bool trans_##opcode(DisasContext *pctx, arg_##opcode * a) \
+{ \
+ output(#opcode, format, ##__VA_ARGS__); \
+ return true; \
+}
+
+#define INSN_MNEMONIC(opcode, mnemonic, format, ...) \
+static bool trans_##opcode(DisasContext *pctx, arg_##opcode * a) \
+{ \
+ output(mnemonic, format, ##__VA_ARGS__); \
+ return true; \
+}
+
+/*
+ * C Z N V S H T I
+ * 0 1 2 3 4 5 6 7
+ */
+static const char brbc[][5] = {
+ "BRCC", "BRNE", "BRPL", "BRVC", "BRGE", "BRHC", "BRTC", "BRID"
+};
+
+static const char brbs[][5] = {
+ "BRCS", "BREQ", "BRMI", "BRVS", "BRLT", "BRHS", "BRTS", "BRIE"
+};
+
+static const char bset[][4] = {
+ "SEC", "SEZ", "SEN", "SEZ", "SES", "SEH", "SET", "SEI"
+};
+
+static const char bclr[][4] = {
+ "CLC", "CLZ", "CLN", "CLZ", "CLS", "CLH", "CLT", "CLI"
+};
+
+/*
+ * Arithmetic Instructions
+ */
+INSN(ADD, "r%d, r%d", a->rd, a->rr)
+INSN(ADC, "r%d, r%d", a->rd, a->rr)
+INSN(ADIW, "r%d:r%d, %d", a->rd + 1, a->rd, a->imm)
+INSN(SUB, "r%d, r%d", a->rd, a->rr)
+INSN(SUBI, "r%d, %d", a->rd, a->imm)
+INSN(SBC, "r%d, r%d", a->rd, a->rr)
+INSN(SBCI, "r%d, %d", a->rd, a->imm)
+INSN(SBIW, "r%d:r%d, %d", a->rd + 1, a->rd, a->imm)
+INSN(AND, "r%d, r%d", a->rd, a->rr)
+INSN(ANDI, "r%d, %d", a->rd, a->imm)
+INSN(OR, "r%d, r%d", a->rd, a->rr)
+INSN(ORI, "r%d, %d", a->rd, a->imm)
+INSN(EOR, "r%d, r%d", a->rd, a->rr)
+INSN(COM, "r%d", a->rd)
+INSN(NEG, "r%d", a->rd)
+INSN(INC, "r%d", a->rd)
+INSN(DEC, "r%d", a->rd)
+INSN(MUL, "r%d, r%d", a->rd, a->rr)
+INSN(MULS, "r%d, r%d", a->rd, a->rr)
+INSN(MULSU, "r%d, r%d", a->rd, a->rr)
+INSN(FMUL, "r%d, r%d", a->rd, a->rr)
+INSN(FMULS, "r%d, r%d", a->rd, a->rr)
+INSN(FMULSU, "r%d, r%d", a->rd, a->rr)
+INSN(DES, "%d", a->imm)
+
+/*
+ * Branch Instructions
+ */
+INSN(RJMP, ".%+d", a->imm * 2)
+INSN(IJMP, "")
+INSN(EIJMP, "")
+INSN(JMP, "0x%x", a->imm * 2)
+INSN(RCALL, ".%+d", a->imm * 2)
+INSN(ICALL, "")
+INSN(EICALL, "")
+INSN(CALL, "0x%x", a->imm * 2)
+INSN(RET, "")
+INSN(RETI, "")
+INSN(CPSE, "r%d, r%d", a->rd, a->rr)
+INSN(CP, "r%d, r%d", a->rd, a->rr)
+INSN(CPC, "r%d, r%d", a->rd, a->rr)
+INSN(CPI, "r%d, %d", a->rd, a->imm)
+INSN(SBRC, "r%d, %d", a->rr, a->bit)
+INSN(SBRS, "r%d, %d", a->rr, a->bit)
+INSN(SBIC, "$%d, %d", a->reg, a->bit)
+INSN(SBIS, "$%d, %d", a->reg, a->bit)
+INSN_MNEMONIC(BRBS, brbs[a->bit], ".%+d", a->imm * 2)
+INSN_MNEMONIC(BRBC, brbc[a->bit], ".%+d", a->imm * 2)
+
+/*
+ * Data Transfer Instructions
+ */
+INSN(MOV, "r%d, r%d", a->rd, a->rr)
+INSN(MOVW, "r%d:r%d, r%d:r%d", a->rd + 1, a->rd, a->rr + 1, a->rr)
+INSN(LDI, "r%d, %d", a->rd, a->imm)
+INSN(LDS, "r%d, %d", a->rd, a->imm)
+INSN(LDX1, "r%d, X", a->rd)
+INSN(LDX2, "r%d, X+", a->rd)
+INSN(LDX3, "r%d, -X", a->rd)
+INSN(LDY2, "r%d, Y+", a->rd)
+INSN(LDY3, "r%d, -Y", a->rd)
+INSN(LDZ2, "r%d, Z+", a->rd)
+INSN(LDZ3, "r%d, -Z", a->rd)
+INSN(LDDY, "r%d, Y+%d", a->rd, a->imm)
+INSN(LDDZ, "r%d, Z+%d", a->rd, a->imm)
+INSN(STS, "%d, r%d", a->imm, a->rd)
+INSN(STX1, "X, r%d", a->rr)
+INSN(STX2, "X+, r%d", a->rr)
+INSN(STX3, "-X, r%d", a->rr)
+INSN(STY2, "Y+, r%d", a->rd)
+INSN(STY3, "-Y, r%d", a->rd)
+INSN(STZ2, "Z+, r%d", a->rd)
+INSN(STZ3, "-Z, r%d", a->rd)
+INSN(STDY, "Y+%d, r%d", a->imm, a->rd)
+INSN(STDZ, "Z+%d, r%d", a->imm, a->rd)
+INSN(LPM1, "")
+INSN(LPM2, "r%d, Z", a->rd)
+INSN(LPMX, "r%d, Z+", a->rd)
+INSN(ELPM1, "")
+INSN(ELPM2, "r%d, Z", a->rd)
+INSN(ELPMX, "r%d, Z+", a->rd)
+INSN(SPM, "")
+INSN(SPMX, "Z+")
+INSN(IN, "r%d, $%d", a->rd, a->imm)
+INSN(OUT, "$%d, r%d", a->imm, a->rd)
+INSN(PUSH, "r%d", a->rd)
+INSN(POP, "r%d", a->rd)
+INSN(XCH, "Z, r%d", a->rd)
+INSN(LAC, "Z, r%d", a->rd)
+INSN(LAS, "Z, r%d", a->rd)
+INSN(LAT, "Z, r%d", a->rd)
+
+/*
+ * Bit and Bit-test Instructions
+ */
+INSN(LSR, "r%d", a->rd)
+INSN(ROR, "r%d", a->rd)
+INSN(ASR, "r%d", a->rd)
+INSN(SWAP, "r%d", a->rd)
+INSN(SBI, "$%d, %d", a->reg, a->bit)
+INSN(CBI, "%d, %d", a->reg, a->bit)
+INSN(BST, "r%d, %d", a->rd, a->bit)
+INSN(BLD, "r%d, %d", a->rd, a->bit)
+INSN_MNEMONIC(BSET, bset[a->bit], "")
+INSN_MNEMONIC(BCLR, bclr[a->bit], "")
+
+/*
+ * MCU Control Instructions
+ */
+INSN(BREAK, "")
+INSN(NOP, "")
+INSN(SLEEP, "")
+INSN(WDR, "")
diff --git a/target/avr/gdbstub.c b/target/avr/gdbstub.c
new file mode 100644
index 0000000000..c28ed67efe
--- /dev/null
+++ b/target/avr/gdbstub.c
@@ -0,0 +1,84 @@
+/*
+ * QEMU AVR gdbstub
+ *
+ * Copyright (c) 2016-2020 Michael Rolnik
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see
+ * <http://www.gnu.org/licenses/lgpl-2.1.html>
+ */
+
+#include "qemu/osdep.h"
+#include "exec/gdbstub.h"
+
+int avr_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n)
+{
+ AVRCPU *cpu = AVR_CPU(cs);
+ CPUAVRState *env = &cpu->env;
+
+ /* R */
+ if (n < 32) {
+ return gdb_get_reg8(mem_buf, env->r[n]);
+ }
+
+ /* SREG */
+ if (n == 32) {
+ uint8_t sreg = cpu_get_sreg(env);
+
+ return gdb_get_reg8(mem_buf, sreg);
+ }
+
+ /* SP */
+ if (n == 33) {
+ return gdb_get_reg16(mem_buf, env->sp & 0x0000ffff);
+ }
+
+ /* PC */
+ if (n == 34) {
+ return gdb_get_reg32(mem_buf, env->pc_w * 2);
+ }
+
+ return 0;
+}
+
+int avr_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
+{
+ AVRCPU *cpu = AVR_CPU(cs);
+ CPUAVRState *env = &cpu->env;
+
+ /* R */
+ if (n < 32) {
+ env->r[n] = *mem_buf;
+ return 1;
+ }
+
+ /* SREG */
+ if (n == 32) {
+ cpu_set_sreg(env, *mem_buf);
+ return 1;
+ }
+
+ /* SP */
+ if (n == 33) {
+ env->sp = lduw_p(mem_buf);
+ return 2;
+ }
+
+ /* PC */
+ if (n == 34) {
+ env->pc_w = ldl_p(mem_buf) / 2;
+ return 4;
+ }
+
+ return 0;
+}
diff --git a/target/avr/helper.c b/target/avr/helper.c
new file mode 100644
index 0000000000..d96d14372b
--- /dev/null
+++ b/target/avr/helper.c
@@ -0,0 +1,348 @@
+/*
+ * QEMU AVR CPU helpers
+ *
+ * Copyright (c) 2016-2020 Michael Rolnik
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see
+ * <http://www.gnu.org/licenses/lgpl-2.1.html>
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "exec/address-spaces.h"
+#include "exec/helper-proto.h"
+
+bool avr_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
+{
+ bool ret = false;
+ CPUClass *cc = CPU_GET_CLASS(cs);
+ AVRCPU *cpu = AVR_CPU(cs);
+ CPUAVRState *env = &cpu->env;
+
+ if (interrupt_request & CPU_INTERRUPT_RESET) {
+ if (cpu_interrupts_enabled(env)) {
+ cs->exception_index = EXCP_RESET;
+ cc->do_interrupt(cs);
+
+ cs->interrupt_request &= ~CPU_INTERRUPT_RESET;
+
+ ret = true;
+ }
+ }
+ if (interrupt_request & CPU_INTERRUPT_HARD) {
+ if (cpu_interrupts_enabled(env) && env->intsrc != 0) {
+ int index = ctz32(env->intsrc);
+ cs->exception_index = EXCP_INT(index);
+ cc->do_interrupt(cs);
+
+ env->intsrc &= env->intsrc - 1; /* clear the interrupt */
+ cs->interrupt_request &= ~CPU_INTERRUPT_HARD;
+
+ ret = true;
+ }
+ }
+ return ret;
+}
+
+void avr_cpu_do_interrupt(CPUState *cs)
+{
+ AVRCPU *cpu = AVR_CPU(cs);
+ CPUAVRState *env = &cpu->env;
+
+ uint32_t ret = env->pc_w;
+ int vector = 0;
+ int size = avr_feature(env, AVR_FEATURE_JMP_CALL) ? 2 : 1;
+ int base = 0;
+
+ if (cs->exception_index == EXCP_RESET) {
+ vector = 0;
+ } else if (env->intsrc != 0) {
+ vector = ctz32(env->intsrc) + 1;
+ }
+
+ if (avr_feature(env, AVR_FEATURE_3_BYTE_PC)) {
+ cpu_stb_data(env, env->sp--, (ret & 0x0000ff));
+ cpu_stb_data(env, env->sp--, (ret & 0x00ff00) >> 8);
+ cpu_stb_data(env, env->sp--, (ret & 0xff0000) >> 16);
+ } else if (avr_feature(env, AVR_FEATURE_2_BYTE_PC)) {
+ cpu_stb_data(env, env->sp--, (ret & 0x0000ff));
+ cpu_stb_data(env, env->sp--, (ret & 0x00ff00) >> 8);
+ } else {
+ cpu_stb_data(env, env->sp--, (ret & 0x0000ff));
+ }
+
+ env->pc_w = base + vector * size;
+ env->sregI = 0; /* clear Global Interrupt Flag */
+
+ cs->exception_index = -1;
+}
+
+int avr_cpu_memory_rw_debug(CPUState *cs, vaddr addr, uint8_t *buf,
+ int len, bool is_write)
+{
+ return cpu_memory_rw_debug(cs, addr, buf, len, is_write);
+}
+
+hwaddr avr_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
+{
+ return addr; /* I assume 1:1 address correspondance */
+}
+
+bool avr_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr)
+{
+ int prot = 0;
+ MemTxAttrs attrs = {};
+ uint32_t paddr;
+
+ address &= TARGET_PAGE_MASK;
+
+ if (mmu_idx == MMU_CODE_IDX) {
+ /* access to code in flash */
+ paddr = OFFSET_CODE + address;
+ prot = PAGE_READ | PAGE_EXEC;
+ if (paddr + TARGET_PAGE_SIZE > OFFSET_DATA) {
+ error_report("execution left flash memory");
+ abort();
+ }
+ } else if (address < NUMBER_OF_CPU_REGISTERS + NUMBER_OF_IO_REGISTERS) {
+ /*
+ * access to CPU registers, exit and rebuilt this TB to use full access
+ * incase it touches specially handled registers like SREG or SP
+ */
+ AVRCPU *cpu = AVR_CPU(cs);
+ CPUAVRState *env = &cpu->env;
+ env->fullacc = 1;
+ cpu_loop_exit_restore(cs, retaddr);
+ } else {
+ /* access to memory. nothing special */
+ paddr = OFFSET_DATA + address;
+ prot = PAGE_READ | PAGE_WRITE;
+ }
+
+ tlb_set_page_with_attrs(cs, address, paddr, attrs, prot,
+ mmu_idx, TARGET_PAGE_SIZE);
+
+ return true;
+}
+
+/*
+ * helpers
+ */
+
+void helper_sleep(CPUAVRState *env)
+{
+ CPUState *cs = env_cpu(env);
+
+ cs->exception_index = EXCP_HLT;
+ cpu_loop_exit(cs);
+}
+
+void helper_unsupported(CPUAVRState *env)
+{
+ CPUState *cs = env_cpu(env);
+
+ /*
+ * I count not find what happens on the real platform, so
+ * it's EXCP_DEBUG for meanwhile
+ */
+ cs->exception_index = EXCP_DEBUG;
+ if (qemu_loglevel_mask(LOG_UNIMP)) {
+ qemu_log("UNSUPPORTED\n");
+ cpu_dump_state(cs, stderr, 0);
+ }
+ cpu_loop_exit(cs);
+}
+
+void helper_debug(CPUAVRState *env)
+{
+ CPUState *cs = env_cpu(env);
+
+ cs->exception_index = EXCP_DEBUG;
+ cpu_loop_exit(cs);
+}
+
+void helper_break(CPUAVRState *env)
+{
+ CPUState *cs = env_cpu(env);
+
+ cs->exception_index = EXCP_DEBUG;
+ cpu_loop_exit(cs);
+}
+
+void helper_wdr(CPUAVRState *env)
+{
+ CPUState *cs = env_cpu(env);
+
+ /* WD is not implemented yet, placeholder */
+ cs->exception_index = EXCP_DEBUG;
+ cpu_loop_exit(cs);
+}
+
+/*
+ * This function implements IN instruction
+ *
+ * It does the following
+ * a. if an IO register belongs to CPU, its value is read and returned
+ * b. otherwise io address is translated to mem address and physical memory
+ * is read.
+ * c. it caches the value for sake of SBI, SBIC, SBIS & CBI implementation
+ *
+ */
+target_ulong helper_inb(CPUAVRState *env, uint32_t port)
+{
+ target_ulong data = 0;
+
+ switch (port) {
+ case 0x38: /* RAMPD */
+ data = 0xff & (env->rampD >> 16);
+ break;
+ case 0x39: /* RAMPX */
+ data = 0xff & (env->rampX >> 16);
+ break;
+ case 0x3a: /* RAMPY */
+ data = 0xff & (env->rampY >> 16);
+ break;
+ case 0x3b: /* RAMPZ */
+ data = 0xff & (env->rampZ >> 16);
+ break;
+ case 0x3c: /* EIND */
+ data = 0xff & (env->eind >> 16);
+ break;
+ case 0x3d: /* SPL */
+ data = env->sp & 0x00ff;
+ break;
+ case 0x3e: /* SPH */
+ data = env->sp >> 8;
+ break;
+ case 0x3f: /* SREG */
+ data = cpu_get_sreg(env);
+ break;
+ default:
+ /* not a special register, pass to normal memory access */
+ data = address_space_ldub(&address_space_memory,
+ OFFSET_IO_REGISTERS + port,
+ MEMTXATTRS_UNSPECIFIED, NULL);
+ }
+
+ return data;
+}
+
+/*
+ * This function implements OUT instruction
+ *
+ * It does the following
+ * a. if an IO register belongs to CPU, its value is written into the register
+ * b. otherwise io address is translated to mem address and physical memory
+ * is written.
+ * c. it caches the value for sake of SBI, SBIC, SBIS & CBI implementation
+ *
+ */
+void helper_outb(CPUAVRState *env, uint32_t port, uint32_t data)
+{
+ data &= 0x000000ff;
+
+ switch (port) {
+ case 0x38: /* RAMPD */
+ if (avr_feature(env, AVR_FEATURE_RAMPD)) {
+ env->rampD = (data & 0xff) << 16;
+ }
+ break;
+ case 0x39: /* RAMPX */
+ if (avr_feature(env, AVR_FEATURE_RAMPX)) {
+ env->rampX = (data & 0xff) << 16;
+ }
+ break;
+ case 0x3a: /* RAMPY */
+ if (avr_feature(env, AVR_FEATURE_RAMPY)) {
+ env->rampY = (data & 0xff) << 16;
+ }
+ break;
+ case 0x3b: /* RAMPZ */
+ if (avr_feature(env, AVR_FEATURE_RAMPZ)) {
+ env->rampZ = (data & 0xff) << 16;
+ }
+ break;
+ case 0x3c: /* EIDN */
+ env->eind = (data & 0xff) << 16;
+ break;
+ case 0x3d: /* SPL */
+ env->sp = (env->sp & 0xff00) | (data);
+ break;
+ case 0x3e: /* SPH */
+ if (avr_feature(env, AVR_FEATURE_2_BYTE_SP)) {
+ env->sp = (env->sp & 0x00ff) | (data << 8);
+ }
+ break;
+ case 0x3f: /* SREG */
+ cpu_set_sreg(env, data);
+ break;
+ default:
+ /* not a special register, pass to normal memory access */
+ address_space_stb(&address_space_memory, OFFSET_IO_REGISTERS + port,
+ data, MEMTXATTRS_UNSPECIFIED, NULL);
+ }
+}
+
+/*
+ * this function implements LD instruction when there is a posibility to read
+ * from a CPU register
+ */
+target_ulong helper_fullrd(CPUAVRState *env, uint32_t addr)
+{
+ uint8_t data;
+
+ env->fullacc = false;
+
+ if (addr < NUMBER_OF_CPU_REGISTERS) {
+ /* CPU registers */
+ data = env->r[addr];
+ } else if (addr < NUMBER_OF_CPU_REGISTERS + NUMBER_OF_IO_REGISTERS) {
+ /* IO registers */
+ data = helper_inb(env, addr - NUMBER_OF_CPU_REGISTERS);
+ } else {
+ /* memory */
+ data = address_space_ldub(&address_space_memory, OFFSET_DATA + addr,
+ MEMTXATTRS_UNSPECIFIED, NULL);
+ }
+ return data;
+}
+
+/*
+ * this function implements ST instruction when there is a posibility to write
+ * into a CPU register
+ */
+void helper_fullwr(CPUAVRState *env, uint32_t data, uint32_t addr)
+{
+ env->fullacc = false;
+
+ /* Following logic assumes this: */
+ assert(OFFSET_CPU_REGISTERS == OFFSET_DATA);
+ assert(OFFSET_IO_REGISTERS == OFFSET_CPU_REGISTERS +
+ NUMBER_OF_CPU_REGISTERS);
+
+ if (addr < NUMBER_OF_CPU_REGISTERS) {
+ /* CPU registers */
+ env->r[addr] = data;
+ } else if (addr < NUMBER_OF_CPU_REGISTERS + NUMBER_OF_IO_REGISTERS) {
+ /* IO registers */
+ helper_outb(env, addr - NUMBER_OF_CPU_REGISTERS, data);
+ } else {
+ /* memory */
+ address_space_stb(&address_space_memory, OFFSET_DATA + addr, data,
+ MEMTXATTRS_UNSPECIFIED, NULL);
+ }
+}
diff --git a/target/avr/helper.h b/target/avr/helper.h
new file mode 100644
index 0000000000..8e1ae7fda0
--- /dev/null
+++ b/target/avr/helper.h
@@ -0,0 +1,29 @@
+/*
+ * QEMU AVR CPU helpers
+ *
+ * Copyright (c) 2016-2020 Michael Rolnik
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see
+ * <http://www.gnu.org/licenses/lgpl-2.1.html>
+ */
+
+DEF_HELPER_1(wdr, void, env)
+DEF_HELPER_1(debug, void, env)
+DEF_HELPER_1(break, void, env)
+DEF_HELPER_1(sleep, void, env)
+DEF_HELPER_1(unsupported, void, env)
+DEF_HELPER_3(outb, void, env, i32, i32)
+DEF_HELPER_2(inb, tl, env, i32)
+DEF_HELPER_3(fullwr, void, env, i32, i32)
+DEF_HELPER_2(fullrd, tl, env, i32)
diff --git a/target/avr/insn.decode b/target/avr/insn.decode
new file mode 100644
index 0000000000..482c23ad0c
--- /dev/null
+++ b/target/avr/insn.decode
@@ -0,0 +1,187 @@
+#
+# AVR instruction decode definitions.
+#
+# Copyright (c) 2019-2020 Michael Rolnik <mrolnik@gmail.com>
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, see <http://www.gnu.org/licenses/>.
+#
+
+#
+# regs_16_31_by_one = [16 .. 31]
+# regs_16_23_by_one = [16 .. 23]
+# regs_24_30_by_two = [24, 26, 28, 30]
+# regs_00_30_by_two = [0, 2, 4, 6, 8, .. 30]
+
+%rd 4:5
+%rr 9:1 0:4
+
+%rd_a 4:4 !function=to_regs_16_31_by_one
+%rd_b 4:3 !function=to_regs_16_23_by_one
+%rd_c 4:2 !function=to_regs_24_30_by_two
+%rr_a 0:4 !function=to_regs_16_31_by_one
+%rr_b 0:3 !function=to_regs_16_23_by_one
+
+%imm6 6:2 0:4
+%imm8 8:4 0:4
+
+%io_imm 9:2 0:4
+%ldst_d_imm 13:1 10:2 0:3
+
+
+&rd_rr rd rr
+&rd_imm rd imm
+
+@op_rd_rr .... .. . ..... .... &rd_rr rd=%rd rr=%rr
+@op_rd_imm6 .... .... .. .. .... &rd_imm rd=%rd_c imm=%imm6
+@op_rd_imm8 .... .... .... .... &rd_imm rd=%rd_a imm=%imm8
+@fmul .... .... . ... . ... &rd_rr rd=%rd_b rr=%rr_b
+
+#
+# Arithmetic Instructions
+#
+ADD 0000 11 . ..... .... @op_rd_rr
+ADC 0001 11 . ..... .... @op_rd_rr
+ADIW 1001 0110 .. .. .... @op_rd_imm6
+SUB 0001 10 . ..... .... @op_rd_rr
+SUBI 0101 .... .... .... @op_rd_imm8
+SBC 0000 10 . ..... .... @op_rd_rr
+SBCI 0100 .... .... .... @op_rd_imm8
+SBIW 1001 0111 .. .. .... @op_rd_imm6
+AND 0010 00 . ..... .... @op_rd_rr
+ANDI 0111 .... .... .... @op_rd_imm8
+OR 0010 10 . ..... .... @op_rd_rr
+ORI 0110 .... .... .... @op_rd_imm8
+EOR 0010 01 . ..... .... @op_rd_rr
+COM 1001 010 rd:5 0000
+NEG 1001 010 rd:5 0001
+INC 1001 010 rd:5 0011
+DEC 1001 010 rd:5 1010
+MUL 1001 11 . ..... .... @op_rd_rr
+MULS 0000 0010 .... .... &rd_rr rd=%rd_a rr=%rr_a
+MULSU 0000 0011 0 ... 0 ... @fmul
+FMUL 0000 0011 0 ... 1 ... @fmul
+FMULS 0000 0011 1 ... 0 ... @fmul
+FMULSU 0000 0011 1 ... 1 ... @fmul
+DES 1001 0100 imm:4 1011
+
+#
+# Branch Instructions
+#
+
+# The 22-bit immediate is partially in the opcode word,
+# and partially in the next. Use append_16 to build the
+# complete 22-bit value.
+%imm_call 4:5 0:1 !function=append_16
+
+@op_bit .... .... . bit:3 ....
+@op_bit_imm .... .. imm:s7 bit:3
+
+RJMP 1100 imm:s12
+IJMP 1001 0100 0000 1001
+EIJMP 1001 0100 0001 1001
+JMP 1001 010 ..... 110 . imm=%imm_call
+RCALL 1101 imm:s12
+ICALL 1001 0101 0000 1001
+EICALL 1001 0101 0001 1001
+CALL 1001 010 ..... 111 . imm=%imm_call
+RET 1001 0101 0000 1000
+RETI 1001 0101 0001 1000
+CPSE 0001 00 . ..... .... @op_rd_rr
+CP 0001 01 . ..... .... @op_rd_rr
+CPC 0000 01 . ..... .... @op_rd_rr
+CPI 0011 .... .... .... @op_rd_imm8
+SBRC 1111 110 rr:5 0 bit:3
+SBRS 1111 111 rr:5 0 bit:3
+SBIC 1001 1001 reg:5 bit:3
+SBIS 1001 1011 reg:5 bit:3
+BRBS 1111 00 ....... ... @op_bit_imm
+BRBC 1111 01 ....... ... @op_bit_imm
+
+#
+# Data Transfer Instructions
+#
+
+%rd_d 4:4 !function=to_regs_00_30_by_two
+%rr_d 0:4 !function=to_regs_00_30_by_two
+
+@io_rd_imm .... . .. ..... .... &rd_imm rd=%rd imm=%io_imm
+@ldst_d .. . . .. . rd:5 . ... &rd_imm imm=%ldst_d_imm
+
+# The 16-bit immediate is completely in the next word.
+# Fields cannot be defined with no bits, so we cannot play
+# the same trick and append to a zero-bit value.
+# Defer reading the immediate until trans_{LDS,STS}.
+@ldst_s .... ... rd:5 .... imm=0
+
+MOV 0010 11 . ..... .... @op_rd_rr
+MOVW 0000 0001 .... .... &rd_rr rd=%rd_d rr=%rr_d
+LDI 1110 .... .... .... @op_rd_imm8
+LDS 1001 000 ..... 0000 @ldst_s
+LDX1 1001 000 rd:5 1100
+LDX2 1001 000 rd:5 1101
+LDX3 1001 000 rd:5 1110
+LDY2 1001 000 rd:5 1001
+LDY3 1001 000 rd:5 1010
+LDZ2 1001 000 rd:5 0001
+LDZ3 1001 000 rd:5 0010
+LDDY 10 . 0 .. 0 ..... 1 ... @ldst_d
+LDDZ 10 . 0 .. 0 ..... 0 ... @ldst_d
+STS 1001 001 ..... 0000 @ldst_s
+STX1 1001 001 rr:5 1100
+STX2 1001 001 rr:5 1101
+STX3 1001 001 rr:5 1110
+STY2 1001 001 rd:5 1001
+STY3 1001 001 rd:5 1010
+STZ2 1001 001 rd:5 0001
+STZ3 1001 001 rd:5 0010
+STDY 10 . 0 .. 1 ..... 1 ... @ldst_d
+STDZ 10 . 0 .. 1 ..... 0 ... @ldst_d
+LPM1 1001 0101 1100 1000
+LPM2 1001 000 rd:5 0100
+LPMX 1001 000 rd:5 0101
+ELPM1 1001 0101 1101 1000
+ELPM2 1001 000 rd:5 0110
+ELPMX 1001 000 rd:5 0111
+SPM 1001 0101 1110 1000
+SPMX 1001 0101 1111 1000
+IN 1011 0 .. ..... .... @io_rd_imm
+OUT 1011 1 .. ..... .... @io_rd_imm
+PUSH 1001 001 rd:5 1111
+POP 1001 000 rd:5 1111
+XCH 1001 001 rd:5 0100
+LAC 1001 001 rd:5 0110
+LAS 1001 001 rd:5 0101
+LAT 1001 001 rd:5 0111
+
+#
+# Bit and Bit-test Instructions
+#
+LSR 1001 010 rd:5 0110
+ROR 1001 010 rd:5 0111
+ASR 1001 010 rd:5 0101
+SWAP 1001 010 rd:5 0010
+SBI 1001 1010 reg:5 bit:3
+CBI 1001 1000 reg:5 bit:3
+BST 1111 101 rd:5 0 bit:3
+BLD 1111 100 rd:5 0 bit:3
+BSET 1001 0100 0 bit:3 1000
+BCLR 1001 0100 1 bit:3 1000
+
+#
+# MCU Control Instructions
+#
+BREAK 1001 0101 1001 1000
+NOP 0000 0000 0000 0000
+SLEEP 1001 0101 1000 1000
+WDR 1001 0101 1010 1000
diff --git a/target/avr/machine.c b/target/avr/machine.c
new file mode 100644
index 0000000000..e315442787
--- /dev/null
+++ b/target/avr/machine.c
@@ -0,0 +1,119 @@
+/*
+ * QEMU AVR CPU
+ *
+ * Copyright (c) 2016-2020 Michael Rolnik
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see
+ * <http://www.gnu.org/licenses/lgpl-2.1.html>
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "migration/cpu.h"
+
+static int get_sreg(QEMUFile *f, void *opaque, size_t size,
+ const VMStateField *field)
+{
+ CPUAVRState *env = opaque;
+ uint8_t sreg;
+
+ sreg = qemu_get_byte(f);
+ cpu_set_sreg(env, sreg);
+ return 0;
+}
+
+static int put_sreg(QEMUFile *f, void *opaque, size_t size,
+ const VMStateField *field, QJSON *vmdesc)
+{
+ CPUAVRState *env = opaque;
+ uint8_t sreg = cpu_get_sreg(env);
+
+ qemu_put_byte(f, sreg);
+ return 0;
+}
+
+static const VMStateInfo vms_sreg = {
+ .name = "sreg",
+ .get = get_sreg,
+ .put = put_sreg,
+};
+
+static int get_segment(QEMUFile *f, void *opaque, size_t size,
+ const VMStateField *field)
+{
+ uint32_t *ramp = opaque;
+ uint8_t temp;
+
+ temp = qemu_get_byte(f);
+ *ramp = ((uint32_t)temp) << 16;
+ return 0;
+}
+
+static int put_segment(QEMUFile *f, void *opaque, size_t size,
+ const VMStateField *field, QJSON *vmdesc)
+{
+ uint32_t *ramp = opaque;
+ uint8_t temp = *ramp >> 16;
+
+ qemu_put_byte(f, temp);
+ return 0;
+}
+
+static const VMStateInfo vms_rampD = {
+ .name = "rampD",
+ .get = get_segment,
+ .put = put_segment,
+};
+static const VMStateInfo vms_rampX = {
+ .name = "rampX",
+ .get = get_segment,
+ .put = put_segment,
+};
+static const VMStateInfo vms_rampY = {
+ .name = "rampY",
+ .get = get_segment,
+ .put = put_segment,
+};
+static const VMStateInfo vms_rampZ = {
+ .name = "rampZ",
+ .get = get_segment,
+ .put = put_segment,
+};
+static const VMStateInfo vms_eind = {
+ .name = "eind",
+ .get = get_segment,
+ .put = put_segment,
+};
+
+const VMStateDescription vms_avr_cpu = {
+ .name = "cpu",
+ .version_id = 0,
+ .minimum_version_id = 0,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32(env.pc_w, AVRCPU),
+ VMSTATE_UINT32(env.sp, AVRCPU),
+ VMSTATE_UINT32(env.skip, AVRCPU),
+
+ VMSTATE_UINT32_ARRAY(env.r, AVRCPU, NUMBER_OF_CPU_REGISTERS),
+
+ VMSTATE_SINGLE(env, AVRCPU, 0, vms_sreg, CPUAVRState),
+ VMSTATE_SINGLE(env.rampD, AVRCPU, 0, vms_rampD, uint32_t),
+ VMSTATE_SINGLE(env.rampX, AVRCPU, 0, vms_rampX, uint32_t),
+ VMSTATE_SINGLE(env.rampY, AVRCPU, 0, vms_rampY, uint32_t),
+ VMSTATE_SINGLE(env.rampZ, AVRCPU, 0, vms_rampZ, uint32_t),
+ VMSTATE_SINGLE(env.eind, AVRCPU, 0, vms_eind, uint32_t),
+
+ VMSTATE_END_OF_LIST()
+ }
+};
diff --git a/target/avr/translate.c b/target/avr/translate.c
new file mode 100644
index 0000000000..648dcd5c3e
--- /dev/null
+++ b/target/avr/translate.c
@@ -0,0 +1,3061 @@
+/*
+ * QEMU AVR CPU
+ *
+ * Copyright (c) 2019-2020 Michael Rolnik
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see
+ * <http://www.gnu.org/licenses/lgpl-2.1.html>
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/qemu-print.h"
+#include "tcg/tcg.h"
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "tcg/tcg-op.h"
+#include "exec/cpu_ldst.h"
+#include "exec/helper-proto.h"
+#include "exec/helper-gen.h"
+#include "exec/log.h"
+#include "exec/translator.h"
+#include "exec/gen-icount.h"
+
+/*
+ * Define if you want a BREAK instruction translated to a breakpoint
+ * Active debugging connection is assumed
+ * This is for
+ * https://github.com/seharris/qemu-avr-tests/tree/master/instruction-tests
+ * tests
+ */
+#undef BREAKPOINT_ON_BREAK
+
+static TCGv cpu_pc;
+
+static TCGv cpu_Cf;
+static TCGv cpu_Zf;
+static TCGv cpu_Nf;
+static TCGv cpu_Vf;
+static TCGv cpu_Sf;
+static TCGv cpu_Hf;
+static TCGv cpu_Tf;
+static TCGv cpu_If;
+
+static TCGv cpu_rampD;
+static TCGv cpu_rampX;
+static TCGv cpu_rampY;
+static TCGv cpu_rampZ;
+
+static TCGv cpu_r[NUMBER_OF_CPU_REGISTERS];
+static TCGv cpu_eind;
+static TCGv cpu_sp;
+
+static TCGv cpu_skip;
+
+static const char reg_names[NUMBER_OF_CPU_REGISTERS][8] = {
+ "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
+ "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
+ "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
+ "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
+};
+#define REG(x) (cpu_r[x])
+
+enum {
+ DISAS_EXIT = DISAS_TARGET_0, /* We want return to the cpu main loop. */
+ DISAS_LOOKUP = DISAS_TARGET_1, /* We have a variable condition exit. */
+ DISAS_CHAIN = DISAS_TARGET_2, /* We have a single condition exit. */
+};
+
+typedef struct DisasContext DisasContext;
+
+/* This is the state at translation time. */
+struct DisasContext {
+ TranslationBlock *tb;
+
+ CPUAVRState *env;
+ CPUState *cs;
+
+ target_long npc;
+ uint32_t opcode;
+
+ /* Routine used to access memory */
+ int memidx;
+ int bstate;
+ int singlestep;
+
+ /*
+ * some AVR instructions can make the following instruction to be skipped
+ * Let's name those instructions
+ * A - instruction that can skip the next one
+ * B - instruction that can be skipped. this depends on execution of A
+ * there are two scenarios
+ * 1. A and B belong to the same translation block
+ * 2. A is the last instruction in the translation block and B is the last
+ *
+ * following variables are used to simplify the skipping logic, they are
+ * used in the following manner (sketch)
+ *
+ * TCGLabel *skip_label = NULL;
+ * if (ctx.skip_cond != TCG_COND_NEVER) {
+ * skip_label = gen_new_label();
+ * tcg_gen_brcond_tl(skip_cond, skip_var0, skip_var1, skip_label);
+ * }
+ *
+ * if (free_skip_var0) {
+ * tcg_temp_free(skip_var0);
+ * free_skip_var0 = false;
+ * }
+ *
+ * translate(&ctx);
+ *
+ * if (skip_label) {
+ * gen_set_label(skip_label);
+ * }
+ */
+ TCGv skip_var0;
+ TCGv skip_var1;
+ TCGCond skip_cond;
+ bool free_skip_var0;
+};
+
+void avr_cpu_tcg_init(void)
+{
+ int i;
+
+#define AVR_REG_OFFS(x) offsetof(CPUAVRState, x)
+ cpu_pc = tcg_global_mem_new_i32(cpu_env, AVR_REG_OFFS(pc_w), "pc");
+ cpu_Cf = tcg_global_mem_new_i32(cpu_env, AVR_REG_OFFS(sregC), "Cf");
+ cpu_Zf = tcg_global_mem_new_i32(cpu_env, AVR_REG_OFFS(sregZ), "Zf");
+ cpu_Nf = tcg_global_mem_new_i32(cpu_env, AVR_REG_OFFS(sregN), "Nf");
+ cpu_Vf = tcg_global_mem_new_i32(cpu_env, AVR_REG_OFFS(sregV), "Vf");
+ cpu_Sf = tcg_global_mem_new_i32(cpu_env, AVR_REG_OFFS(sregS), "Sf");
+ cpu_Hf = tcg_global_mem_new_i32(cpu_env, AVR_REG_OFFS(sregH), "Hf");
+ cpu_Tf = tcg_global_mem_new_i32(cpu_env, AVR_REG_OFFS(sregT), "Tf");
+ cpu_If = tcg_global_mem_new_i32(cpu_env, AVR_REG_OFFS(sregI), "If");
+ cpu_rampD = tcg_global_mem_new_i32(cpu_env, AVR_REG_OFFS(rampD), "rampD");
+ cpu_rampX = tcg_global_mem_new_i32(cpu_env, AVR_REG_OFFS(rampX), "rampX");
+ cpu_rampY = tcg_global_mem_new_i32(cpu_env, AVR_REG_OFFS(rampY), "rampY");
+ cpu_rampZ = tcg_global_mem_new_i32(cpu_env, AVR_REG_OFFS(rampZ), "rampZ");
+ cpu_eind = tcg_global_mem_new_i32(cpu_env, AVR_REG_OFFS(eind), "eind");
+ cpu_sp = tcg_global_mem_new_i32(cpu_env, AVR_REG_OFFS(sp), "sp");
+ cpu_skip = tcg_global_mem_new_i32(cpu_env, AVR_REG_OFFS(skip), "skip");
+
+ for (i = 0; i < NUMBER_OF_CPU_REGISTERS; i++) {
+ cpu_r[i] = tcg_global_mem_new_i32(cpu_env, AVR_REG_OFFS(r[i]),
+ reg_names[i]);
+ }
+#undef AVR_REG_OFFS
+}
+
+static int to_regs_16_31_by_one(DisasContext *ctx, int indx)
+{
+ return 16 + (indx % 16);
+}
+
+static int to_regs_16_23_by_one(DisasContext *ctx, int indx)
+{
+ return 16 + (indx % 8);
+}
+
+static int to_regs_24_30_by_two(DisasContext *ctx, int indx)
+{
+ return 24 + (indx % 4) * 2;
+}
+
+static int to_regs_00_30_by_two(DisasContext *ctx, int indx)
+{
+ return (indx % 16) * 2;
+}
+
+static uint16_t next_word(DisasContext *ctx)
+{
+ return cpu_lduw_code(ctx->env, ctx->npc++ * 2);
+}
+
+static int append_16(DisasContext *ctx, int x)
+{
+ return x << 16 | next_word(ctx);
+}
+
+static bool avr_have_feature(DisasContext *ctx, int feature)
+{
+ if (!avr_feature(ctx->env, feature)) {
+ gen_helper_unsupported(cpu_env);
+ ctx->bstate = DISAS_NORETURN;
+ return false;
+ }
+ return true;
+}
+
+static bool decode_insn(DisasContext *ctx, uint16_t insn);
+#include "decode_insn.inc.c"
+
+/*
+ * Arithmetic Instructions
+ */
+
+/*
+ * Utility functions for updating status registers:
+ *
+ * - gen_add_CHf()
+ * - gen_add_Vf()
+ * - gen_sub_CHf()
+ * - gen_sub_Vf()
+ * - gen_NSf()
+ * - gen_ZNSf()
+ *
+ */
+
+static void gen_add_CHf(TCGv R, TCGv Rd, TCGv Rr)
+{
+ TCGv t1 = tcg_temp_new_i32();
+ TCGv t2 = tcg_temp_new_i32();
+ TCGv t3 = tcg_temp_new_i32();
+
+ tcg_gen_and_tl(t1, Rd, Rr); /* t1 = Rd & Rr */
+ tcg_gen_andc_tl(t2, Rd, R); /* t2 = Rd & ~R */
+ tcg_gen_andc_tl(t3, Rr, R); /* t3 = Rr & ~R */
+ tcg_gen_or_tl(t1, t1, t2); /* t1 = t1 | t2 | t3 */
+ tcg_gen_or_tl(t1, t1, t3);
+
+ tcg_gen_shri_tl(cpu_Cf, t1, 7); /* Cf = t1(7) */
+ tcg_gen_shri_tl(cpu_Hf, t1, 3); /* Hf = t1(3) */
+ tcg_gen_andi_tl(cpu_Hf, cpu_Hf, 1);
+
+ tcg_temp_free_i32(t3);
+ tcg_temp_free_i32(t2);
+ tcg_temp_free_i32(t1);
+}
+
+static void gen_add_Vf(TCGv R, TCGv Rd, TCGv Rr)
+{
+ TCGv t1 = tcg_temp_new_i32();
+ TCGv t2 = tcg_temp_new_i32();
+
+ /* t1 = Rd & Rr & ~R | ~Rd & ~Rr & R */
+ /* = (Rd ^ R) & ~(Rd ^ Rr) */
+ tcg_gen_xor_tl(t1, Rd, R);
+ tcg_gen_xor_tl(t2, Rd, Rr);
+ tcg_gen_andc_tl(t1, t1, t2);
+
+ tcg_gen_shri_tl(cpu_Vf, t1, 7); /* Vf = t1(7) */
+
+ tcg_temp_free_i32(t2);
+ tcg_temp_free_i32(t1);
+}
+
+static void gen_sub_CHf(TCGv R, TCGv Rd, TCGv Rr)
+{
+ TCGv t1 = tcg_temp_new_i32();
+ TCGv t2 = tcg_temp_new_i32();
+ TCGv t3 = tcg_temp_new_i32();
+
+ tcg_gen_not_tl(t1, Rd); /* t1 = ~Rd */
+ tcg_gen_and_tl(t2, t1, Rr); /* t2 = ~Rd & Rr */
+ tcg_gen_or_tl(t3, t1, Rr); /* t3 = (~Rd | Rr) & R */
+ tcg_gen_and_tl(t3, t3, R);
+ tcg_gen_or_tl(t2, t2, t3); /* t2 = ~Rd & Rr | ~Rd & R | R & Rr */
+
+ tcg_gen_shri_tl(cpu_Cf, t2, 7); /* Cf = t2(7) */
+ tcg_gen_shri_tl(cpu_Hf, t2, 3); /* Hf = t2(3) */
+ tcg_gen_andi_tl(cpu_Hf, cpu_Hf, 1);
+
+ tcg_temp_free_i32(t3);
+ tcg_temp_free_i32(t2);
+ tcg_temp_free_i32(t1);
+}
+
+static void gen_sub_Vf(TCGv R, TCGv Rd, TCGv Rr)
+{
+ TCGv t1 = tcg_temp_new_i32();
+ TCGv t2 = tcg_temp_new_i32();
+
+ /* t1 = Rd & ~Rr & ~R | ~Rd & Rr & R */
+ /* = (Rd ^ R) & (Rd ^ R) */
+ tcg_gen_xor_tl(t1, Rd, R);
+ tcg_gen_xor_tl(t2, Rd, Rr);
+ tcg_gen_and_tl(t1, t1, t2);
+
+ tcg_gen_shri_tl(cpu_Vf, t1, 7); /* Vf = t1(7) */
+
+ tcg_temp_free_i32(t2);
+ tcg_temp_free_i32(t1);
+}
+
+static void gen_NSf(TCGv R)
+{
+ tcg_gen_shri_tl(cpu_Nf, R, 7); /* Nf = R(7) */
+ tcg_gen_xor_tl(cpu_Sf, cpu_Nf, cpu_Vf); /* Sf = Nf ^ Vf */
+}
+
+static void gen_ZNSf(TCGv R)
+{
+ tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_Zf, R, 0); /* Zf = R == 0 */
+
+ /* update status register */
+ tcg_gen_shri_tl(cpu_Nf, R, 7); /* Nf = R(7) */
+ tcg_gen_xor_tl(cpu_Sf, cpu_Nf, cpu_Vf); /* Sf = Nf ^ Vf */
+}
+
+/*
+ * Adds two registers without the C Flag and places the result in the
+ * destination register Rd.
+ */
+static bool trans_ADD(DisasContext *ctx, arg_ADD *a)
+{
+ TCGv Rd = cpu_r[a->rd];
+ TCGv Rr = cpu_r[a->rr];
+ TCGv R = tcg_temp_new_i32();
+
+ tcg_gen_add_tl(R, Rd, Rr); /* Rd = Rd + Rr */
+ tcg_gen_andi_tl(R, R, 0xff); /* make it 8 bits */
+
+ /* update status register */
+ gen_add_CHf(R, Rd, Rr);
+ gen_add_Vf(R, Rd, Rr);
+ gen_ZNSf(R);
+
+ /* update output registers */
+ tcg_gen_mov_tl(Rd, R);
+
+ tcg_temp_free_i32(R);
+
+ return true;
+}
+
+/*
+ * Adds two registers and the contents of the C Flag and places the result in
+ * the destination register Rd.
+ */
+static bool trans_ADC(DisasContext *ctx, arg_ADC *a)
+{
+ TCGv Rd = cpu_r[a->rd];
+ TCGv Rr = cpu_r[a->rr];
+ TCGv R = tcg_temp_new_i32();
+
+ tcg_gen_add_tl(R, Rd, Rr); /* R = Rd + Rr + Cf */
+ tcg_gen_add_tl(R, R, cpu_Cf);
+ tcg_gen_andi_tl(R, R, 0xff); /* make it 8 bits */
+
+ /* update status register */
+ gen_add_CHf(R, Rd, Rr);
+ gen_add_Vf(R, Rd, Rr);
+ gen_ZNSf(R);
+
+ /* update output registers */
+ tcg_gen_mov_tl(Rd, R);
+
+ tcg_temp_free_i32(R);
+
+ return true;
+}
+
+/*
+ * Adds an immediate value (0 - 63) to a register pair and places the result
+ * in the register pair. This instruction operates on the upper four register
+ * pairs, and is well suited for operations on the pointer registers. This
+ * instruction is not available in all devices. Refer to the device specific
+ * instruction set summary.
+ */
+static bool trans_ADIW(DisasContext *ctx, arg_ADIW *a)
+{
+ if (!avr_have_feature(ctx, AVR_FEATURE_ADIW_SBIW)) {
+ return true;
+ }
+
+ TCGv RdL = cpu_r[a->rd];
+ TCGv RdH = cpu_r[a->rd + 1];
+ int Imm = (a->imm);
+ TCGv R = tcg_temp_new_i32();
+ TCGv Rd = tcg_temp_new_i32();
+
+ tcg_gen_deposit_tl(Rd, RdL, RdH, 8, 8); /* Rd = RdH:RdL */
+ tcg_gen_addi_tl(R, Rd, Imm); /* R = Rd + Imm */
+ tcg_gen_andi_tl(R, R, 0xffff); /* make it 16 bits */
+
+ /* update status register */
+ tcg_gen_andc_tl(cpu_Cf, Rd, R); /* Cf = Rd & ~R */
+ tcg_gen_shri_tl(cpu_Cf, cpu_Cf, 15);
+ tcg_gen_andc_tl(cpu_Vf, R, Rd); /* Vf = R & ~Rd */
+ tcg_gen_shri_tl(cpu_Vf, cpu_Vf, 15);
+ tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_Zf, R, 0); /* Zf = R == 0 */
+ tcg_gen_shri_tl(cpu_Nf, R, 15); /* Nf = R(15) */
+ tcg_gen_xor_tl(cpu_Sf, cpu_Nf, cpu_Vf);/* Sf = Nf ^ Vf */
+
+ /* update output registers */
+ tcg_gen_andi_tl(RdL, R, 0xff);
+ tcg_gen_shri_tl(RdH, R, 8);
+
+ tcg_temp_free_i32(Rd);
+ tcg_temp_free_i32(R);
+
+ return true;
+}
+
+/*
+ * Subtracts two registers and places the result in the destination
+ * register Rd.
+ */
+static bool trans_SUB(DisasContext *ctx, arg_SUB *a)
+{
+ TCGv Rd = cpu_r[a->rd];
+ TCGv Rr = cpu_r[a->rr];
+ TCGv R = tcg_temp_new_i32();
+
+ tcg_gen_sub_tl(R, Rd, Rr); /* R = Rd - Rr */
+ tcg_gen_andi_tl(R, R, 0xff); /* make it 8 bits */
+
+ /* update status register */
+ tcg_gen_andc_tl(cpu_Cf, Rd, R); /* Cf = Rd & ~R */
+ gen_sub_CHf(R, Rd, Rr);
+ gen_sub_Vf(R, Rd, Rr);
+ gen_ZNSf(R);
+
+ /* update output registers */
+ tcg_gen_mov_tl(Rd, R);
+
+ tcg_temp_free_i32(R);
+
+ return true;
+}
+
+/*
+ * Subtracts a register and a constant and places the result in the
+ * destination register Rd. This instruction is working on Register R16 to R31
+ * and is very well suited for operations on the X, Y, and Z-pointers.
+ */
+static bool trans_SUBI(DisasContext *ctx, arg_SUBI *a)
+{
+ TCGv Rd = cpu_r[a->rd];
+ TCGv Rr = tcg_const_i32(a->imm);
+ TCGv R = tcg_temp_new_i32();
+
+ tcg_gen_sub_tl(R, Rd, Rr); /* R = Rd - Imm */
+ tcg_gen_andi_tl(R, R, 0xff); /* make it 8 bits */
+
+ /* update status register */
+ gen_sub_CHf(R, Rd, Rr);
+ gen_sub_Vf(R, Rd, Rr);
+ gen_ZNSf(R);
+
+ /* update output registers */
+ tcg_gen_mov_tl(Rd, R);
+
+ tcg_temp_free_i32(R);
+ tcg_temp_free_i32(Rr);
+
+ return true;
+}
+
+/*
+ * Subtracts two registers and subtracts with the C Flag and places the
+ * result in the destination register Rd.
+ */
+static bool trans_SBC(DisasContext *ctx, arg_SBC *a)
+{
+ TCGv Rd = cpu_r[a->rd];
+ TCGv Rr = cpu_r[a->rr];
+ TCGv R = tcg_temp_new_i32();
+ TCGv zero = tcg_const_i32(0);
+
+ tcg_gen_sub_tl(R, Rd, Rr); /* R = Rd - Rr - Cf */
+ tcg_gen_sub_tl(R, R, cpu_Cf);
+ tcg_gen_andi_tl(R, R, 0xff); /* make it 8 bits */
+
+ /* update status register */
+ gen_sub_CHf(R, Rd, Rr);
+ gen_sub_Vf(R, Rd, Rr);
+ gen_NSf(R);
+
+ /*
+ * Previous value remains unchanged when the result is zero;
+ * cleared otherwise.
+ */
+ tcg_gen_movcond_tl(TCG_COND_EQ, cpu_Zf, R, zero, cpu_Zf, zero);
+
+ /* update output registers */
+ tcg_gen_mov_tl(Rd, R);
+
+ tcg_temp_free_i32(zero);
+ tcg_temp_free_i32(R);
+
+ return true;
+}
+
+/*
+ * SBCI -- Subtract Immediate with Carry
+ */
+static bool trans_SBCI(DisasContext *ctx, arg_SBCI *a)
+{
+ TCGv Rd = cpu_r[a->rd];
+ TCGv Rr = tcg_const_i32(a->imm);
+ TCGv R = tcg_temp_new_i32();
+ TCGv zero = tcg_const_i32(0);
+
+ tcg_gen_sub_tl(R, Rd, Rr); /* R = Rd - Rr - Cf */
+ tcg_gen_sub_tl(R, R, cpu_Cf);
+ tcg_gen_andi_tl(R, R, 0xff); /* make it 8 bits */
+
+ /* update status register */
+ gen_sub_CHf(R, Rd, Rr);
+ gen_sub_Vf(R, Rd, Rr);
+ gen_NSf(R);
+
+ /*
+ * Previous value remains unchanged when the result is zero;
+ * cleared otherwise.
+ */
+ tcg_gen_movcond_tl(TCG_COND_EQ, cpu_Zf, R, zero, cpu_Zf, zero);
+
+ /* update output registers */
+ tcg_gen_mov_tl(Rd, R);
+
+ tcg_temp_free_i32(zero);
+ tcg_temp_free_i32(R);
+ tcg_temp_free_i32(Rr);
+
+ return true;
+}
+
+/*
+ * Subtracts an immediate value (0-63) from a register pair and places the
+ * result in the register pair. This instruction operates on the upper four
+ * register pairs, and is well suited for operations on the Pointer Registers.
+ * This instruction is not available in all devices. Refer to the device
+ * specific instruction set summary.
+ */
+static bool trans_SBIW(DisasContext *ctx, arg_SBIW *a)
+{
+ if (!avr_have_feature(ctx, AVR_FEATURE_ADIW_SBIW)) {
+ return true;
+ }
+
+ TCGv RdL = cpu_r[a->rd];
+ TCGv RdH = cpu_r[a->rd + 1];
+ int Imm = (a->imm);
+ TCGv R = tcg_temp_new_i32();
+ TCGv Rd = tcg_temp_new_i32();
+
+ tcg_gen_deposit_tl(Rd, RdL, RdH, 8, 8); /* Rd = RdH:RdL */
+ tcg_gen_subi_tl(R, Rd, Imm); /* R = Rd - Imm */
+ tcg_gen_andi_tl(R, R, 0xffff); /* make it 16 bits */
+
+ /* update status register */
+ tcg_gen_andc_tl(cpu_Cf, R, Rd);
+ tcg_gen_shri_tl(cpu_Cf, cpu_Cf, 15); /* Cf = R & ~Rd */
+ tcg_gen_andc_tl(cpu_Vf, Rd, R);
+ tcg_gen_shri_tl(cpu_Vf, cpu_Vf, 15); /* Vf = Rd & ~R */
+ tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_Zf, R, 0); /* Zf = R == 0 */
+ tcg_gen_shri_tl(cpu_Nf, R, 15); /* Nf = R(15) */
+ tcg_gen_xor_tl(cpu_Sf, cpu_Nf, cpu_Vf); /* Sf = Nf ^ Vf */
+
+ /* update output registers */
+ tcg_gen_andi_tl(RdL, R, 0xff);
+ tcg_gen_shri_tl(RdH, R, 8);
+
+ tcg_temp_free_i32(Rd);
+ tcg_temp_free_i32(R);
+
+ return true;
+}
+
+/*
+ * Performs the logical AND between the contents of register Rd and register
+ * Rr and places the result in the destination register Rd.
+ */
+static bool trans_AND(DisasContext *ctx, arg_AND *a)
+{
+ TCGv Rd = cpu_r[a->rd];
+ TCGv Rr = cpu_r[a->rr];
+ TCGv R = tcg_temp_new_i32();
+
+ tcg_gen_and_tl(R, Rd, Rr); /* Rd = Rd and Rr */
+
+ /* update status register */
+ tcg_gen_movi_tl(cpu_Vf, 0); /* Vf = 0 */
+ tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_Zf, R, 0); /* Zf = R == 0 */
+ gen_ZNSf(R);
+
+ /* update output registers */
+ tcg_gen_mov_tl(Rd, R);
+
+ tcg_temp_free_i32(R);
+
+ return true;
+}
+
+/*
+ * Performs the logical AND between the contents of register Rd and a constant
+ * and places the result in the destination register Rd.
+ */
+static bool trans_ANDI(DisasContext *ctx, arg_ANDI *a)
+{
+ TCGv Rd = cpu_r[a->rd];
+ int Imm = (a->imm);
+
+ tcg_gen_andi_tl(Rd, Rd, Imm); /* Rd = Rd & Imm */
+
+ /* update status register */
+ tcg_gen_movi_tl(cpu_Vf, 0x00); /* Vf = 0 */
+ gen_ZNSf(Rd);
+
+ return true;
+}
+
+/*
+ * Performs the logical OR between the contents of register Rd and register
+ * Rr and places the result in the destination register Rd.
+ */
+static bool trans_OR(DisasContext *ctx, arg_OR *a)
+{
+ TCGv Rd = cpu_r[a->rd];
+ TCGv Rr = cpu_r[a->rr];
+ TCGv R = tcg_temp_new_i32();
+
+ tcg_gen_or_tl(R, Rd, Rr);
+
+ /* update status register */
+ tcg_gen_movi_tl(cpu_Vf, 0);
+ gen_ZNSf(R);
+
+ /* update output registers */
+ tcg_gen_mov_tl(Rd, R);
+
+ tcg_temp_free_i32(R);
+
+ return true;
+}
+
+/*
+ * Performs the logical OR between the contents of register Rd and a
+ * constant and places the result in the destination register Rd.
+ */
+static bool trans_ORI(DisasContext *ctx, arg_ORI *a)
+{
+ TCGv Rd = cpu_r[a->rd];
+ int Imm = (a->imm);
+
+ tcg_gen_ori_tl(Rd, Rd, Imm); /* Rd = Rd | Imm */
+
+ /* update status register */
+ tcg_gen_movi_tl(cpu_Vf, 0x00); /* Vf = 0 */
+ gen_ZNSf(Rd);
+
+ return true;
+}
+
+/*
+ * Performs the logical EOR between the contents of register Rd and
+ * register Rr and places the result in the destination register Rd.
+ */
+static bool trans_EOR(DisasContext *ctx, arg_EOR *a)
+{
+ TCGv Rd = cpu_r[a->rd];
+ TCGv Rr = cpu_r[a->rr];
+
+ tcg_gen_xor_tl(Rd, Rd, Rr);
+
+ /* update status register */
+ tcg_gen_movi_tl(cpu_Vf, 0);
+ gen_ZNSf(Rd);
+
+ return true;
+}
+
+/*
+ * Clears the specified bits in register Rd. Performs the logical AND
+ * between the contents of register Rd and the complement of the constant mask
+ * K. The result will be placed in register Rd.
+ */
+static bool trans_COM(DisasContext *ctx, arg_COM *a)
+{
+ TCGv Rd = cpu_r[a->rd];
+ TCGv R = tcg_temp_new_i32();
+
+ tcg_gen_xori_tl(Rd, Rd, 0xff);
+
+ /* update status register */
+ tcg_gen_movi_tl(cpu_Cf, 1); /* Cf = 1 */
+ tcg_gen_movi_tl(cpu_Vf, 0); /* Vf = 0 */
+ gen_ZNSf(Rd);
+
+ tcg_temp_free_i32(R);
+
+ return true;
+}
+
+/*
+ * Replaces the contents of register Rd with its two's complement; the
+ * value $80 is left unchanged.
+ */
+static bool trans_NEG(DisasContext *ctx, arg_NEG *a)
+{
+ TCGv Rd = cpu_r[a->rd];
+ TCGv t0 = tcg_const_i32(0);
+ TCGv R = tcg_temp_new_i32();
+
+ tcg_gen_sub_tl(R, t0, Rd); /* R = 0 - Rd */
+ tcg_gen_andi_tl(R, R, 0xff); /* make it 8 bits */
+
+ /* update status register */
+ gen_sub_CHf(R, t0, Rd);
+ gen_sub_Vf(R, t0, Rd);
+ gen_ZNSf(R);
+
+ /* update output registers */
+ tcg_gen_mov_tl(Rd, R);
+
+ tcg_temp_free_i32(t0);
+ tcg_temp_free_i32(R);
+
+ return true;
+}
+
+/*
+ * Adds one -1- to the contents of register Rd and places the result in the
+ * destination register Rd. The C Flag in SREG is not affected by the
+ * operation, thus allowing the INC instruction to be used on a loop counter in
+ * multiple-precision computations. When operating on unsigned numbers, only
+ * BREQ and BRNE branches can be expected to perform consistently. When
+ * operating on two's complement values, all signed branches are available.
+ */
+static bool trans_INC(DisasContext *ctx, arg_INC *a)
+{
+ TCGv Rd = cpu_r[a->rd];
+
+ tcg_gen_addi_tl(Rd, Rd, 1);
+ tcg_gen_andi_tl(Rd, Rd, 0xff);
+
+ /* update status register */
+ tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_Vf, Rd, 0x80); /* Vf = Rd == 0x80 */
+ gen_ZNSf(Rd);
+
+ return true;
+}
+
+/*
+ * Subtracts one -1- from the contents of register Rd and places the result
+ * in the destination register Rd. The C Flag in SREG is not affected by the
+ * operation, thus allowing the DEC instruction to be used on a loop counter in
+ * multiple-precision computations. When operating on unsigned values, only
+ * BREQ and BRNE branches can be expected to perform consistently. When
+ * operating on two's complement values, all signed branches are available.
+ */
+static bool trans_DEC(DisasContext *ctx, arg_DEC *a)
+{
+ TCGv Rd = cpu_r[a->rd];
+
+ tcg_gen_subi_tl(Rd, Rd, 1); /* Rd = Rd - 1 */
+ tcg_gen_andi_tl(Rd, Rd, 0xff); /* make it 8 bits */
+
+ /* update status register */
+ tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_Vf, Rd, 0x7f); /* Vf = Rd == 0x7f */
+ gen_ZNSf(Rd);
+
+ return true;
+}
+
+/*
+ * This instruction performs 8-bit x 8-bit -> 16-bit unsigned multiplication.
+ */
+static bool trans_MUL(DisasContext *ctx, arg_MUL *a)
+{
+ if (!avr_have_feature(ctx, AVR_FEATURE_MUL)) {
+ return true;
+ }
+
+ TCGv R0 = cpu_r[0];
+ TCGv R1 = cpu_r[1];
+ TCGv Rd = cpu_r[a->rd];
+ TCGv Rr = cpu_r[a->rr];
+ TCGv R = tcg_temp_new_i32();
+
+ tcg_gen_mul_tl(R, Rd, Rr); /* R = Rd * Rr */
+ tcg_gen_andi_tl(R0, R, 0xff);
+ tcg_gen_shri_tl(R1, R, 8);
+
+ /* update status register */
+ tcg_gen_shri_tl(cpu_Cf, R, 15); /* Cf = R(15) */
+ tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_Zf, R, 0); /* Zf = R == 0 */
+
+ tcg_temp_free_i32(R);
+
+ return true;
+}
+
+/*
+ * This instruction performs 8-bit x 8-bit -> 16-bit signed multiplication.
+ */
+static bool trans_MULS(DisasContext *ctx, arg_MULS *a)
+{
+ if (!avr_have_feature(ctx, AVR_FEATURE_MUL)) {
+ return true;
+ }
+
+ TCGv R0 = cpu_r[0];
+ TCGv R1 = cpu_r[1];
+ TCGv Rd = cpu_r[a->rd];
+ TCGv Rr = cpu_r[a->rr];
+ TCGv R = tcg_temp_new_i32();
+ TCGv t0 = tcg_temp_new_i32();
+ TCGv t1 = tcg_temp_new_i32();
+
+ tcg_gen_ext8s_tl(t0, Rd); /* make Rd full 32 bit signed */
+ tcg_gen_ext8s_tl(t1, Rr); /* make Rr full 32 bit signed */
+ tcg_gen_mul_tl(R, t0, t1); /* R = Rd * Rr */
+ tcg_gen_andi_tl(R, R, 0xffff); /* make it 16 bits */
+ tcg_gen_andi_tl(R0, R, 0xff);
+ tcg_gen_shri_tl(R1, R, 8);
+
+ /* update status register */
+ tcg_gen_shri_tl(cpu_Cf, R, 15); /* Cf = R(15) */
+ tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_Zf, R, 0); /* Zf = R == 0 */
+
+ tcg_temp_free_i32(t1);
+ tcg_temp_free_i32(t0);
+ tcg_temp_free_i32(R);
+
+ return true;
+}
+
+/*
+ * This instruction performs 8-bit x 8-bit -> 16-bit multiplication of a
+ * signed and an unsigned number.
+ */
+static bool trans_MULSU(DisasContext *ctx, arg_MULSU *a)
+{
+ if (!avr_have_feature(ctx, AVR_FEATURE_MUL)) {
+ return true;
+ }
+
+ TCGv R0 = cpu_r[0];
+ TCGv R1 = cpu_r[1];
+ TCGv Rd = cpu_r[a->rd];
+ TCGv Rr = cpu_r[a->rr];
+ TCGv R = tcg_temp_new_i32();
+ TCGv t0 = tcg_temp_new_i32();
+
+ tcg_gen_ext8s_tl(t0, Rd); /* make Rd full 32 bit signed */
+ tcg_gen_mul_tl(R, t0, Rr); /* R = Rd * Rr */
+ tcg_gen_andi_tl(R, R, 0xffff); /* make R 16 bits */
+ tcg_gen_andi_tl(R0, R, 0xff);
+ tcg_gen_shri_tl(R1, R, 8);
+
+ /* update status register */
+ tcg_gen_shri_tl(cpu_Cf, R, 15); /* Cf = R(15) */
+ tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_Zf, R, 0); /* Zf = R == 0 */
+
+ tcg_temp_free_i32(t0);
+ tcg_temp_free_i32(R);
+
+ return true;
+}
+
+/*
+ * This instruction performs 8-bit x 8-bit -> 16-bit unsigned
+ * multiplication and shifts the result one bit left.
+ */
+static bool trans_FMUL(DisasContext *ctx, arg_FMUL *a)
+{
+ if (!avr_have_feature(ctx, AVR_FEATURE_MUL)) {
+ return true;
+ }
+
+ TCGv R0 = cpu_r[0];
+ TCGv R1 = cpu_r[1];
+ TCGv Rd = cpu_r[a->rd];
+ TCGv Rr = cpu_r[a->rr];
+ TCGv R = tcg_temp_new_i32();
+
+ tcg_gen_mul_tl(R, Rd, Rr); /* R = Rd * Rr */
+
+ /* update status register */
+ tcg_gen_shri_tl(cpu_Cf, R, 15); /* Cf = R(15) */
+ tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_Zf, R, 0); /* Zf = R == 0 */
+
+ /* update output registers */
+ tcg_gen_shli_tl(R, R, 1);
+ tcg_gen_andi_tl(R0, R, 0xff);
+ tcg_gen_shri_tl(R1, R, 8);
+ tcg_gen_andi_tl(R1, R1, 0xff);
+
+
+ tcg_temp_free_i32(R);
+
+ return true;
+}
+
+/*
+ * This instruction performs 8-bit x 8-bit -> 16-bit signed multiplication
+ * and shifts the result one bit left.
+ */
+static bool trans_FMULS(DisasContext *ctx, arg_FMULS *a)
+{
+ if (!avr_have_feature(ctx, AVR_FEATURE_MUL)) {
+ return true;
+ }
+
+ TCGv R0 = cpu_r[0];
+ TCGv R1 = cpu_r[1];
+ TCGv Rd = cpu_r[a->rd];
+ TCGv Rr = cpu_r[a->rr];
+ TCGv R = tcg_temp_new_i32();
+ TCGv t0 = tcg_temp_new_i32();
+ TCGv t1 = tcg_temp_new_i32();
+
+ tcg_gen_ext8s_tl(t0, Rd); /* make Rd full 32 bit signed */
+ tcg_gen_ext8s_tl(t1, Rr); /* make Rr full 32 bit signed */
+ tcg_gen_mul_tl(R, t0, t1); /* R = Rd * Rr */
+ tcg_gen_andi_tl(R, R, 0xffff); /* make it 16 bits */
+
+ /* update status register */
+ tcg_gen_shri_tl(cpu_Cf, R, 15); /* Cf = R(15) */
+ tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_Zf, R, 0); /* Zf = R == 0 */
+
+ /* update output registers */
+ tcg_gen_shli_tl(R, R, 1);
+ tcg_gen_andi_tl(R0, R, 0xff);
+ tcg_gen_shri_tl(R1, R, 8);
+ tcg_gen_andi_tl(R1, R1, 0xff);
+
+ tcg_temp_free_i32(t1);
+ tcg_temp_free_i32(t0);
+ tcg_temp_free_i32(R);
+
+ return true;
+}
+
+/*
+ * This instruction performs 8-bit x 8-bit -> 16-bit signed multiplication
+ * and shifts the result one bit left.
+ */
+static bool trans_FMULSU(DisasContext *ctx, arg_FMULSU *a)
+{
+ if (!avr_have_feature(ctx, AVR_FEATURE_MUL)) {
+ return true;
+ }
+
+ TCGv R0 = cpu_r[0];
+ TCGv R1 = cpu_r[1];
+ TCGv Rd = cpu_r[a->rd];
+ TCGv Rr = cpu_r[a->rr];
+ TCGv R = tcg_temp_new_i32();
+ TCGv t0 = tcg_temp_new_i32();
+
+ tcg_gen_ext8s_tl(t0, Rd); /* make Rd full 32 bit signed */
+ tcg_gen_mul_tl(R, t0, Rr); /* R = Rd * Rr */
+ tcg_gen_andi_tl(R, R, 0xffff); /* make it 16 bits */
+
+ /* update status register */
+ tcg_gen_shri_tl(cpu_Cf, R, 15); /* Cf = R(15) */
+ tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_Zf, R, 0); /* Zf = R == 0 */
+
+ /* update output registers */
+ tcg_gen_shli_tl(R, R, 1);
+ tcg_gen_andi_tl(R0, R, 0xff);
+ tcg_gen_shri_tl(R1, R, 8);
+ tcg_gen_andi_tl(R1, R1, 0xff);
+
+ tcg_temp_free_i32(t0);
+ tcg_temp_free_i32(R);
+
+ return true;
+}
+
+/*
+ * The module is an instruction set extension to the AVR CPU, performing
+ * DES iterations. The 64-bit data block (plaintext or ciphertext) is placed in
+ * the CPU register file, registers R0-R7, where LSB of data is placed in LSB
+ * of R0 and MSB of data is placed in MSB of R7. The full 64-bit key (including
+ * parity bits) is placed in registers R8- R15, organized in the register file
+ * with LSB of key in LSB of R8 and MSB of key in MSB of R15. Executing one DES
+ * instruction performs one round in the DES algorithm. Sixteen rounds must be
+ * executed in increasing order to form the correct DES ciphertext or
+ * plaintext. Intermediate results are stored in the register file (R0-R15)
+ * after each DES instruction. The instruction's operand (K) determines which
+ * round is executed, and the half carry flag (H) determines whether encryption
+ * or decryption is performed. The DES algorithm is described in
+ * "Specifications for the Data Encryption Standard" (Federal Information
+ * Processing Standards Publication 46). Intermediate results in this
+ * implementation differ from the standard because the initial permutation and
+ * the inverse initial permutation are performed each iteration. This does not
+ * affect the result in the final ciphertext or plaintext, but reduces
+ * execution time.
+ */
+static bool trans_DES(DisasContext *ctx, arg_DES *a)
+{
+ /* TODO */
+ if (!avr_have_feature(ctx, AVR_FEATURE_DES)) {
+ return true;
+ }
+
+ qemu_log_mask(LOG_UNIMP, "%s: not implemented\n", __func__);
+
+ return true;
+}
+
+/*
+ * Branch Instructions
+ */
+static void gen_jmp_ez(DisasContext *ctx)
+{
+ tcg_gen_deposit_tl(cpu_pc, cpu_r[30], cpu_r[31], 8, 8);
+ tcg_gen_or_tl(cpu_pc, cpu_pc, cpu_eind);
+ ctx->bstate = DISAS_LOOKUP;
+}
+
+static void gen_jmp_z(DisasContext *ctx)
+{
+ tcg_gen_deposit_tl(cpu_pc, cpu_r[30], cpu_r[31], 8, 8);
+ ctx->bstate = DISAS_LOOKUP;
+}
+
+static void gen_push_ret(DisasContext *ctx, int ret)
+{
+ if (avr_feature(ctx->env, AVR_FEATURE_1_BYTE_PC)) {
+
+ TCGv t0 = tcg_const_i32((ret & 0x0000ff));
+
+ tcg_gen_qemu_st_tl(t0, cpu_sp, MMU_DATA_IDX, MO_UB);
+ tcg_gen_subi_tl(cpu_sp, cpu_sp, 1);
+
+ tcg_temp_free_i32(t0);
+ } else if (avr_feature(ctx->env, AVR_FEATURE_2_BYTE_PC)) {
+
+ TCGv t0 = tcg_const_i32((ret & 0x00ffff));
+
+ tcg_gen_subi_tl(cpu_sp, cpu_sp, 1);
+ tcg_gen_qemu_st_tl(t0, cpu_sp, MMU_DATA_IDX, MO_BEUW);
+ tcg_gen_subi_tl(cpu_sp, cpu_sp, 1);
+
+ tcg_temp_free_i32(t0);
+
+ } else if (avr_feature(ctx->env, AVR_FEATURE_3_BYTE_PC)) {
+
+ TCGv lo = tcg_const_i32((ret & 0x0000ff));
+ TCGv hi = tcg_const_i32((ret & 0xffff00) >> 8);
+
+ tcg_gen_qemu_st_tl(lo, cpu_sp, MMU_DATA_IDX, MO_UB);
+ tcg_gen_subi_tl(cpu_sp, cpu_sp, 2);
+ tcg_gen_qemu_st_tl(hi, cpu_sp, MMU_DATA_IDX, MO_BEUW);
+ tcg_gen_subi_tl(cpu_sp, cpu_sp, 1);
+
+ tcg_temp_free_i32(lo);
+ tcg_temp_free_i32(hi);
+ }
+}
+
+static void gen_pop_ret(DisasContext *ctx, TCGv ret)
+{
+ if (avr_feature(ctx->env, AVR_FEATURE_1_BYTE_PC)) {
+ tcg_gen_addi_tl(cpu_sp, cpu_sp, 1);
+ tcg_gen_qemu_ld_tl(ret, cpu_sp, MMU_DATA_IDX, MO_UB);
+ } else if (avr_feature(ctx->env, AVR_FEATURE_2_BYTE_PC)) {
+ tcg_gen_addi_tl(cpu_sp, cpu_sp, 1);
+ tcg_gen_qemu_ld_tl(ret, cpu_sp, MMU_DATA_IDX, MO_BEUW);
+ tcg_gen_addi_tl(cpu_sp, cpu_sp, 1);
+ } else if (avr_feature(ctx->env, AVR_FEATURE_3_BYTE_PC)) {
+ TCGv lo = tcg_temp_new_i32();
+ TCGv hi = tcg_temp_new_i32();
+
+ tcg_gen_addi_tl(cpu_sp, cpu_sp, 1);
+ tcg_gen_qemu_ld_tl(hi, cpu_sp, MMU_DATA_IDX, MO_BEUW);
+
+ tcg_gen_addi_tl(cpu_sp, cpu_sp, 2);
+ tcg_gen_qemu_ld_tl(lo, cpu_sp, MMU_DATA_IDX, MO_UB);
+
+ tcg_gen_deposit_tl(ret, lo, hi, 8, 16);
+
+ tcg_temp_free_i32(lo);
+ tcg_temp_free_i32(hi);
+ }
+}
+
+static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
+{
+ TranslationBlock *tb = ctx->tb;
+
+ if (ctx->singlestep == 0) {
+ tcg_gen_goto_tb(n);
+ tcg_gen_movi_i32(cpu_pc, dest);
+ tcg_gen_exit_tb(tb, n);
+ } else {
+ tcg_gen_movi_i32(cpu_pc, dest);
+ gen_helper_debug(cpu_env);
+ tcg_gen_exit_tb(NULL, 0);
+ }
+ ctx->bstate = DISAS_NORETURN;
+}
+
+/*
+ * Relative jump to an address within PC - 2K +1 and PC + 2K (words). For
+ * AVR microcontrollers with Program memory not exceeding 4K words (8KB) this
+ * instruction can address the entire memory from every address location. See
+ * also JMP.
+ */
+static bool trans_RJMP(DisasContext *ctx, arg_RJMP *a)
+{
+ int dst = ctx->npc + a->imm;
+
+ gen_goto_tb(ctx, 0, dst);
+
+ return true;
+}
+
+/*
+ * Indirect jump to the address pointed to by the Z (16 bits) Pointer
+ * Register in the Register File. The Z-pointer Register is 16 bits wide and
+ * allows jump within the lowest 64K words (128KB) section of Program memory.
+ * This instruction is not available in all devices. Refer to the device
+ * specific instruction set summary.
+ */
+static bool trans_IJMP(DisasContext *ctx, arg_IJMP *a)
+{
+ if (!avr_have_feature(ctx, AVR_FEATURE_IJMP_ICALL)) {
+ return true;
+ }
+
+ gen_jmp_z(ctx);
+
+ return true;
+}
+
+/*
+ * Indirect jump to the address pointed to by the Z (16 bits) Pointer
+ * Register in the Register File and the EIND Register in the I/O space. This
+ * instruction allows for indirect jumps to the entire 4M (words) Program
+ * memory space. See also IJMP. This instruction is not available in all
+ * devices. Refer to the device specific instruction set summary.
+ */
+static bool trans_EIJMP(DisasContext *ctx, arg_EIJMP *a)
+{
+ if (!avr_have_feature(ctx, AVR_FEATURE_EIJMP_EICALL)) {
+ return true;
+ }
+
+ gen_jmp_ez(ctx);
+ return true;
+}
+
+/*
+ * Jump to an address within the entire 4M (words) Program memory. See also
+ * RJMP. This instruction is not available in all devices. Refer to the device
+ * specific instruction set summary.0
+ */
+static bool trans_JMP(DisasContext *ctx, arg_JMP *a)
+{
+ if (!avr_have_feature(ctx, AVR_FEATURE_JMP_CALL)) {
+ return true;
+ }
+
+ gen_goto_tb(ctx, 0, a->imm);
+
+ return true;
+}
+
+/*
+ * Relative call to an address within PC - 2K + 1 and PC + 2K (words). The
+ * return address (the instruction after the RCALL) is stored onto the Stack.
+ * See also CALL. For AVR microcontrollers with Program memory not exceeding 4K
+ * words (8KB) this instruction can address the entire memory from every
+ * address location. The Stack Pointer uses a post-decrement scheme during
+ * RCALL.
+ */
+static bool trans_RCALL(DisasContext *ctx, arg_RCALL *a)
+{
+ int ret = ctx->npc;
+ int dst = ctx->npc + a->imm;
+
+ gen_push_ret(ctx, ret);
+ gen_goto_tb(ctx, 0, dst);
+
+ return true;
+}
+
+/*
+ * Calls to a subroutine within the entire 4M (words) Program memory. The
+ * return address (to the instruction after the CALL) will be stored onto the
+ * Stack. See also RCALL. The Stack Pointer uses a post-decrement scheme during
+ * CALL. This instruction is not available in all devices. Refer to the device
+ * specific instruction set summary.
+ */
+static bool trans_ICALL(DisasContext *ctx, arg_ICALL *a)
+{
+ if (!avr_have_feature(ctx, AVR_FEATURE_IJMP_ICALL)) {
+ return true;
+ }
+
+ int ret = ctx->npc;
+
+ gen_push_ret(ctx, ret);
+ gen_jmp_z(ctx);
+
+ return true;
+}
+
+/*
+ * Indirect call of a subroutine pointed to by the Z (16 bits) Pointer
+ * Register in the Register File and the EIND Register in the I/O space. This
+ * instruction allows for indirect calls to the entire 4M (words) Program
+ * memory space. See also ICALL. The Stack Pointer uses a post-decrement scheme
+ * during EICALL. This instruction is not available in all devices. Refer to
+ * the device specific instruction set summary.
+ */
+static bool trans_EICALL(DisasContext *ctx, arg_EICALL *a)
+{
+ if (!avr_have_feature(ctx, AVR_FEATURE_EIJMP_EICALL)) {
+ return true;
+ }
+
+ int ret = ctx->npc;
+
+ gen_push_ret(ctx, ret);
+ gen_jmp_ez(ctx);
+ return true;
+}
+
+/*
+ * Calls to a subroutine within the entire Program memory. The return
+ * address (to the instruction after the CALL) will be stored onto the Stack.
+ * (See also RCALL). The Stack Pointer uses a post-decrement scheme during
+ * CALL. This instruction is not available in all devices. Refer to the device
+ * specific instruction set summary.
+ */
+static bool trans_CALL(DisasContext *ctx, arg_CALL *a)
+{
+ if (!avr_have_feature(ctx, AVR_FEATURE_JMP_CALL)) {
+ return true;
+ }
+
+ int Imm = a->imm;
+ int ret = ctx->npc;
+
+ gen_push_ret(ctx, ret);
+ gen_goto_tb(ctx, 0, Imm);
+
+ return true;
+}
+
+/*
+ * Returns from subroutine. The return address is loaded from the STACK.
+ * The Stack Pointer uses a preincrement scheme during RET.
+ */
+static bool trans_RET(DisasContext *ctx, arg_RET *a)
+{
+ gen_pop_ret(ctx, cpu_pc);
+
+ ctx->bstate = DISAS_LOOKUP;
+ return true;
+}
+
+/*
+ * Returns from interrupt. The return address is loaded from the STACK and
+ * the Global Interrupt Flag is set. Note that the Status Register is not
+ * automatically stored when entering an interrupt routine, and it is not
+ * restored when returning from an interrupt routine. This must be handled by
+ * the application program. The Stack Pointer uses a pre-increment scheme
+ * during RETI.
+ */
+static bool trans_RETI(DisasContext *ctx, arg_RETI *a)
+{
+ gen_pop_ret(ctx, cpu_pc);
+ tcg_gen_movi_tl(cpu_If, 1);
+
+ /* Need to return to main loop to re-evaluate interrupts. */
+ ctx->bstate = DISAS_EXIT;
+ return true;
+}
+
+/*
+ * This instruction performs a compare between two registers Rd and Rr, and
+ * skips the next instruction if Rd = Rr.
+ */
+static bool trans_CPSE(DisasContext *ctx, arg_CPSE *a)
+{
+ ctx->skip_cond = TCG_COND_EQ;
+ ctx->skip_var0 = cpu_r[a->rd];
+ ctx->skip_var1 = cpu_r[a->rr];
+ return true;
+}
+
+/*
+ * This instruction performs a compare between two registers Rd and Rr.
+ * None of the registers are changed. All conditional branches can be used
+ * after this instruction.
+ */
+static bool trans_CP(DisasContext *ctx, arg_CP *a)
+{
+ TCGv Rd = cpu_r[a->rd];
+ TCGv Rr = cpu_r[a->rr];
+ TCGv R = tcg_temp_new_i32();
+
+ tcg_gen_sub_tl(R, Rd, Rr); /* R = Rd - Rr */
+ tcg_gen_andi_tl(R, R, 0xff); /* make it 8 bits */
+
+ /* update status register */
+ gen_sub_CHf(R, Rd, Rr);
+ gen_sub_Vf(R, Rd, Rr);
+ gen_ZNSf(R);
+
+ tcg_temp_free_i32(R);
+
+ return true;
+}
+
+/*
+ * This instruction performs a compare between two registers Rd and Rr and
+ * also takes into account the previous carry. None of the registers are
+ * changed. All conditional branches can be used after this instruction.
+ */
+static bool trans_CPC(DisasContext *ctx, arg_CPC *a)
+{
+ TCGv Rd = cpu_r[a->rd];
+ TCGv Rr = cpu_r[a->rr];
+ TCGv R = tcg_temp_new_i32();
+ TCGv zero = tcg_const_i32(0);
+
+ tcg_gen_sub_tl(R, Rd, Rr); /* R = Rd - Rr - Cf */
+ tcg_gen_sub_tl(R, R, cpu_Cf);
+ tcg_gen_andi_tl(R, R, 0xff); /* make it 8 bits */
+ /* update status register */
+ gen_sub_CHf(R, Rd, Rr);
+ gen_sub_Vf(R, Rd, Rr);
+ gen_NSf(R);
+
+ /*
+ * Previous value remains unchanged when the result is zero;
+ * cleared otherwise.
+ */
+ tcg_gen_movcond_tl(TCG_COND_EQ, cpu_Zf, R, zero, cpu_Zf, zero);
+
+ tcg_temp_free_i32(zero);
+ tcg_temp_free_i32(R);
+
+ return true;
+}
+
+/*
+ * This instruction performs a compare between register Rd and a constant.
+ * The register is not changed. All conditional branches can be used after this
+ * instruction.
+ */
+static bool trans_CPI(DisasContext *ctx, arg_CPI *a)
+{
+ TCGv Rd = cpu_r[a->rd];
+ int Imm = a->imm;
+ TCGv Rr = tcg_const_i32(Imm);
+ TCGv R = tcg_temp_new_i32();
+
+ tcg_gen_sub_tl(R, Rd, Rr); /* R = Rd - Rr */
+ tcg_gen_andi_tl(R, R, 0xff); /* make it 8 bits */
+
+ /* update status register */
+ gen_sub_CHf(R, Rd, Rr);
+ gen_sub_Vf(R, Rd, Rr);
+ gen_ZNSf(R);
+
+ tcg_temp_free_i32(R);
+ tcg_temp_free_i32(Rr);
+
+ return true;
+}
+
+/*
+ * This instruction tests a single bit in a register and skips the next
+ * instruction if the bit is cleared.
+ */
+static bool trans_SBRC(DisasContext *ctx, arg_SBRC *a)
+{
+ TCGv Rr = cpu_r[a->rr];
+
+ ctx->skip_cond = TCG_COND_EQ;
+ ctx->skip_var0 = tcg_temp_new();
+ ctx->free_skip_var0 = true;
+
+ tcg_gen_andi_tl(ctx->skip_var0, Rr, 1 << a->bit);
+ return true;
+}
+
+/*
+ * This instruction tests a single bit in a register and skips the next
+ * instruction if the bit is set.
+ */
+static bool trans_SBRS(DisasContext *ctx, arg_SBRS *a)
+{
+ TCGv Rr = cpu_r[a->rr];
+
+ ctx->skip_cond = TCG_COND_NE;
+ ctx->skip_var0 = tcg_temp_new();
+ ctx->free_skip_var0 = true;
+
+ tcg_gen_andi_tl(ctx->skip_var0, Rr, 1 << a->bit);
+ return true;
+}
+
+/*
+ * This instruction tests a single bit in an I/O Register and skips the
+ * next instruction if the bit is cleared. This instruction operates on the
+ * lower 32 I/O Registers -- addresses 0-31.
+ */
+static bool trans_SBIC(DisasContext *ctx, arg_SBIC *a)
+{
+ TCGv temp = tcg_const_i32(a->reg);
+
+ gen_helper_inb(temp, cpu_env, temp);
+ tcg_gen_andi_tl(temp, temp, 1 << a->bit);
+ ctx->skip_cond = TCG_COND_EQ;
+ ctx->skip_var0 = temp;
+ ctx->free_skip_var0 = true;
+
+ return true;
+}
+
+/*
+ * This instruction tests a single bit in an I/O Register and skips the
+ * next instruction if the bit is set. This instruction operates on the lower
+ * 32 I/O Registers -- addresses 0-31.
+ */
+static bool trans_SBIS(DisasContext *ctx, arg_SBIS *a)
+{
+ TCGv temp = tcg_const_i32(a->reg);
+
+ gen_helper_inb(temp, cpu_env, temp);
+ tcg_gen_andi_tl(temp, temp, 1 << a->bit);
+ ctx->skip_cond = TCG_COND_NE;
+ ctx->skip_var0 = temp;
+ ctx->free_skip_var0 = true;
+
+ return true;
+}
+
+/*
+ * Conditional relative branch. Tests a single bit in SREG and branches
+ * relatively to PC if the bit is cleared. This instruction branches relatively
+ * to PC in either direction (PC - 63 < = destination <= PC + 64). The
+ * parameter k is the offset from PC and is represented in two's complement
+ * form.
+ */
+static bool trans_BRBC(DisasContext *ctx, arg_BRBC *a)
+{
+ TCGLabel *not_taken = gen_new_label();
+
+ TCGv var;
+
+ switch (a->bit) {
+ case 0x00:
+ var = cpu_Cf;
+ break;
+ case 0x01:
+ var = cpu_Zf;
+ break;
+ case 0x02:
+ var = cpu_Nf;
+ break;
+ case 0x03:
+ var = cpu_Vf;
+ break;
+ case 0x04:
+ var = cpu_Sf;
+ break;
+ case 0x05:
+ var = cpu_Hf;
+ break;
+ case 0x06:
+ var = cpu_Tf;
+ break;
+ case 0x07:
+ var = cpu_If;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ tcg_gen_brcondi_i32(TCG_COND_NE, var, 0, not_taken);
+ gen_goto_tb(ctx, 0, ctx->npc + a->imm);
+ gen_set_label(not_taken);
+
+ ctx->bstate = DISAS_CHAIN;
+ return true;
+}
+
+/*
+ * Conditional relative branch. Tests a single bit in SREG and branches
+ * relatively to PC if the bit is set. This instruction branches relatively to
+ * PC in either direction (PC - 63 < = destination <= PC + 64). The parameter k
+ * is the offset from PC and is represented in two's complement form.
+ */
+static bool trans_BRBS(DisasContext *ctx, arg_BRBS *a)
+{
+ TCGLabel *not_taken = gen_new_label();
+
+ TCGv var;
+
+ switch (a->bit) {
+ case 0x00:
+ var = cpu_Cf;
+ break;
+ case 0x01:
+ var = cpu_Zf;
+ break;
+ case 0x02:
+ var = cpu_Nf;
+ break;
+ case 0x03:
+ var = cpu_Vf;
+ break;
+ case 0x04:
+ var = cpu_Sf;
+ break;
+ case 0x05:
+ var = cpu_Hf;
+ break;
+ case 0x06:
+ var = cpu_Tf;
+ break;
+ case 0x07:
+ var = cpu_If;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ tcg_gen_brcondi_i32(TCG_COND_EQ, var, 0, not_taken);
+ gen_goto_tb(ctx, 0, ctx->npc + a->imm);
+ gen_set_label(not_taken);
+
+ ctx->bstate = DISAS_CHAIN;
+ return true;
+}
+
+/*
+ * Data Transfer Instructions
+ */
+
+/*
+ * in the gen_set_addr & gen_get_addr functions
+ * H assumed to be in 0x00ff0000 format
+ * M assumed to be in 0x000000ff format
+ * L assumed to be in 0x000000ff format
+ */
+static void gen_set_addr(TCGv addr, TCGv H, TCGv M, TCGv L)
+{
+
+ tcg_gen_andi_tl(L, addr, 0x000000ff);
+
+ tcg_gen_andi_tl(M, addr, 0x0000ff00);
+ tcg_gen_shri_tl(M, M, 8);
+
+ tcg_gen_andi_tl(H, addr, 0x00ff0000);
+}
+
+static void gen_set_xaddr(TCGv addr)
+{
+ gen_set_addr(addr, cpu_rampX, cpu_r[27], cpu_r[26]);
+}
+
+static void gen_set_yaddr(TCGv addr)
+{
+ gen_set_addr(addr, cpu_rampY, cpu_r[29], cpu_r[28]);
+}
+
+static void gen_set_zaddr(TCGv addr)
+{
+ gen_set_addr(addr, cpu_rampZ, cpu_r[31], cpu_r[30]);
+}
+
+static TCGv gen_get_addr(TCGv H, TCGv M, TCGv L)
+{
+ TCGv addr = tcg_temp_new_i32();
+
+ tcg_gen_deposit_tl(addr, M, H, 8, 8);
+ tcg_gen_deposit_tl(addr, L, addr, 8, 16);
+
+ return addr;
+}
+
+static TCGv gen_get_xaddr(void)
+{
+ return gen_get_addr(cpu_rampX, cpu_r[27], cpu_r[26]);
+}
+
+static TCGv gen_get_yaddr(void)
+{
+ return gen_get_addr(cpu_rampY, cpu_r[29], cpu_r[28]);
+}
+
+static TCGv gen_get_zaddr(void)
+{
+ return gen_get_addr(cpu_rampZ, cpu_r[31], cpu_r[30]);
+}
+
+/*
+ * Load one byte indirect from data space to register and stores an clear
+ * the bits in data space specified by the register. The instruction can only
+ * be used towards internal SRAM. The data location is pointed to by the Z (16
+ * bits) Pointer Register in the Register File. Memory access is limited to the
+ * current data segment of 64KB. To access another data segment in devices with
+ * more than 64KB data space, the RAMPZ in register in the I/O area has to be
+ * changed. The Z-pointer Register is left unchanged by the operation. This
+ * instruction is especially suited for clearing status bits stored in SRAM.
+ */
+static void gen_data_store(DisasContext *ctx, TCGv data, TCGv addr)
+{
+ if (ctx->tb->flags & TB_FLAGS_FULL_ACCESS) {
+ gen_helper_fullwr(cpu_env, data, addr);
+ } else {
+ tcg_gen_qemu_st8(data, addr, MMU_DATA_IDX); /* mem[addr] = data */
+ }
+}
+
+static void gen_data_load(DisasContext *ctx, TCGv data, TCGv addr)
+{
+ if (ctx->tb->flags & TB_FLAGS_FULL_ACCESS) {
+ gen_helper_fullrd(data, cpu_env, addr);
+ } else {
+ tcg_gen_qemu_ld8u(data, addr, MMU_DATA_IDX); /* data = mem[addr] */
+ }
+}
+
+/*
+ * This instruction makes a copy of one register into another. The source
+ * register Rr is left unchanged, while the destination register Rd is loaded
+ * with a copy of Rr.
+ */
+static bool trans_MOV(DisasContext *ctx, arg_MOV *a)
+{
+ TCGv Rd = cpu_r[a->rd];
+ TCGv Rr = cpu_r[a->rr];
+
+ tcg_gen_mov_tl(Rd, Rr);
+
+ return true;
+}
+
+/*
+ * This instruction makes a copy of one register pair into another register
+ * pair. The source register pair Rr+1:Rr is left unchanged, while the
+ * destination register pair Rd+1:Rd is loaded with a copy of Rr + 1:Rr. This
+ * instruction is not available in all devices. Refer to the device specific
+ * instruction set summary.
+ */
+static bool trans_MOVW(DisasContext *ctx, arg_MOVW *a)
+{
+ if (!avr_have_feature(ctx, AVR_FEATURE_MOVW)) {
+ return true;
+ }
+
+ TCGv RdL = cpu_r[a->rd];
+ TCGv RdH = cpu_r[a->rd + 1];
+ TCGv RrL = cpu_r[a->rr];
+ TCGv RrH = cpu_r[a->rr + 1];
+
+ tcg_gen_mov_tl(RdH, RrH);
+ tcg_gen_mov_tl(RdL, RrL);
+
+ return true;
+}
+
+/*
+ * Loads an 8 bit constant directly to register 16 to 31.
+ */
+static bool trans_LDI(DisasContext *ctx, arg_LDI *a)
+{
+ TCGv Rd = cpu_r[a->rd];
+ int imm = a->imm;
+
+ tcg_gen_movi_tl(Rd, imm);
+
+ return true;
+}
+
+/*
+ * Loads one byte from the data space to a register. For parts with SRAM,
+ * the data space consists of the Register File, I/O memory and internal SRAM
+ * (and external SRAM if applicable). For parts without SRAM, the data space
+ * consists of the register file only. The EEPROM has a separate address space.
+ * A 16-bit address must be supplied. Memory access is limited to the current
+ * data segment of 64KB. The LDS instruction uses the RAMPD Register to access
+ * memory above 64KB. To access another data segment in devices with more than
+ * 64KB data space, the RAMPD in register in the I/O area has to be changed.
+ * This instruction is not available in all devices. Refer to the device
+ * specific instruction set summary.
+ */
+static bool trans_LDS(DisasContext *ctx, arg_LDS *a)
+{
+ TCGv Rd = cpu_r[a->rd];
+ TCGv addr = tcg_temp_new_i32();
+ TCGv H = cpu_rampD;
+ a->imm = next_word(ctx);
+
+ tcg_gen_mov_tl(addr, H); /* addr = H:M:L */
+ tcg_gen_shli_tl(addr, addr, 16);
+ tcg_gen_ori_tl(addr, addr, a->imm);
+
+ gen_data_load(ctx, Rd, addr);
+
+ tcg_temp_free_i32(addr);
+
+ return true;
+}
+
+/*
+ * Loads one byte indirect from the data space to a register. For parts
+ * with SRAM, the data space consists of the Register File, I/O memory and
+ * internal SRAM (and external SRAM if applicable). For parts without SRAM, the
+ * data space consists of the Register File only. In some parts the Flash
+ * Memory has been mapped to the data space and can be read using this command.
+ * The EEPROM has a separate address space. The data location is pointed to by
+ * the X (16 bits) Pointer Register in the Register File. Memory access is
+ * limited to the current data segment of 64KB. To access another data segment
+ * in devices with more than 64KB data space, the RAMPX in register in the I/O
+ * area has to be changed. The X-pointer Register can either be left unchanged
+ * by the operation, or it can be post-incremented or predecremented. These
+ * features are especially suited for accessing arrays, tables, and Stack
+ * Pointer usage of the X-pointer Register. Note that only the low byte of the
+ * X-pointer is updated in devices with no more than 256 bytes data space. For
+ * such devices, the high byte of the pointer is not used by this instruction
+ * and can be used for other purposes. The RAMPX Register in the I/O area is
+ * updated in parts with more than 64KB data space or more than 64KB Program
+ * memory, and the increment/decrement is added to the entire 24-bit address on
+ * such devices. Not all variants of this instruction is available in all
+ * devices. Refer to the device specific instruction set summary. In the
+ * Reduced Core tinyAVR the LD instruction can be used to achieve the same
+ * operation as LPM since the program memory is mapped to the data memory
+ * space.
+ */
+static bool trans_LDX1(DisasContext *ctx, arg_LDX1 *a)
+{
+ TCGv Rd = cpu_r[a->rd];
+ TCGv addr = gen_get_xaddr();
+
+ gen_data_load(ctx, Rd, addr);
+
+ tcg_temp_free_i32(addr);
+
+ return true;
+}
+
+static bool trans_LDX2(DisasContext *ctx, arg_LDX2 *a)
+{
+ TCGv Rd = cpu_r[a->rd];
+ TCGv addr = gen_get_xaddr();
+
+ gen_data_load(ctx, Rd, addr);
+ tcg_gen_addi_tl(addr, addr, 1); /* addr = addr + 1 */
+
+ gen_set_xaddr(addr);
+
+ tcg_temp_free_i32(addr);
+
+ return true;
+}
+
+static bool trans_LDX3(DisasContext *ctx, arg_LDX3 *a)
+{
+ TCGv Rd = cpu_r[a->rd];
+ TCGv addr = gen_get_xaddr();
+
+ tcg_gen_subi_tl(addr, addr, 1); /* addr = addr - 1 */
+ gen_data_load(ctx, Rd, addr);
+ gen_set_xaddr(addr);
+
+ tcg_temp_free_i32(addr);
+
+ return true;
+}
+
+/*
+ * Loads one byte indirect with or without displacement from the data space
+ * to a register. For parts with SRAM, the data space consists of the Register
+ * File, I/O memory and internal SRAM (and external SRAM if applicable). For
+ * parts without SRAM, the data space consists of the Register File only. In
+ * some parts the Flash Memory has been mapped to the data space and can be
+ * read using this command. The EEPROM has a separate address space. The data
+ * location is pointed to by the Y (16 bits) Pointer Register in the Register
+ * File. Memory access is limited to the current data segment of 64KB. To
+ * access another data segment in devices with more than 64KB data space, the
+ * RAMPY in register in the I/O area has to be changed. The Y-pointer Register
+ * can either be left unchanged by the operation, or it can be post-incremented
+ * or predecremented. These features are especially suited for accessing
+ * arrays, tables, and Stack Pointer usage of the Y-pointer Register. Note that
+ * only the low byte of the Y-pointer is updated in devices with no more than
+ * 256 bytes data space. For such devices, the high byte of the pointer is not
+ * used by this instruction and can be used for other purposes. The RAMPY
+ * Register in the I/O area is updated in parts with more than 64KB data space
+ * or more than 64KB Program memory, and the increment/decrement/displacement
+ * is added to the entire 24-bit address on such devices. Not all variants of
+ * this instruction is available in all devices. Refer to the device specific
+ * instruction set summary. In the Reduced Core tinyAVR the LD instruction can
+ * be used to achieve the same operation as LPM since the program memory is
+ * mapped to the data memory space.
+ */
+static bool trans_LDY2(DisasContext *ctx, arg_LDY2 *a)
+{
+ TCGv Rd = cpu_r[a->rd];
+ TCGv addr = gen_get_yaddr();
+
+ gen_data_load(ctx, Rd, addr);
+ tcg_gen_addi_tl(addr, addr, 1); /* addr = addr + 1 */
+
+ gen_set_yaddr(addr);
+
+ tcg_temp_free_i32(addr);
+
+ return true;
+}
+
+static bool trans_LDY3(DisasContext *ctx, arg_LDY3 *a)
+{
+ TCGv Rd = cpu_r[a->rd];
+ TCGv addr = gen_get_yaddr();
+
+ tcg_gen_subi_tl(addr, addr, 1); /* addr = addr - 1 */
+ gen_data_load(ctx, Rd, addr);
+ gen_set_yaddr(addr);
+
+ tcg_temp_free_i32(addr);
+
+ return true;
+}
+
+static bool trans_LDDY(DisasContext *ctx, arg_LDDY *a)
+{
+ TCGv Rd = cpu_r[a->rd];
+ TCGv addr = gen_get_yaddr();
+
+ tcg_gen_addi_tl(addr, addr, a->imm); /* addr = addr + q */
+ gen_data_load(ctx, Rd, addr);
+
+ tcg_temp_free_i32(addr);
+
+ return true;
+}
+
+/*
+ * Loads one byte indirect with or without displacement from the data space
+ * to a register. For parts with SRAM, the data space consists of the Register
+ * File, I/O memory and internal SRAM (and external SRAM if applicable). For
+ * parts without SRAM, the data space consists of the Register File only. In
+ * some parts the Flash Memory has been mapped to the data space and can be
+ * read using this command. The EEPROM has a separate address space. The data
+ * location is pointed to by the Z (16 bits) Pointer Register in the Register
+ * File. Memory access is limited to the current data segment of 64KB. To
+ * access another data segment in devices with more than 64KB data space, the
+ * RAMPZ in register in the I/O area has to be changed. The Z-pointer Register
+ * can either be left unchanged by the operation, or it can be post-incremented
+ * or predecremented. These features are especially suited for Stack Pointer
+ * usage of the Z-pointer Register, however because the Z-pointer Register can
+ * be used for indirect subroutine calls, indirect jumps and table lookup, it
+ * is often more convenient to use the X or Y-pointer as a dedicated Stack
+ * Pointer. Note that only the low byte of the Z-pointer is updated in devices
+ * with no more than 256 bytes data space. For such devices, the high byte of
+ * the pointer is not used by this instruction and can be used for other
+ * purposes. The RAMPZ Register in the I/O area is updated in parts with more
+ * than 64KB data space or more than 64KB Program memory, and the
+ * increment/decrement/displacement is added to the entire 24-bit address on
+ * such devices. Not all variants of this instruction is available in all
+ * devices. Refer to the device specific instruction set summary. In the
+ * Reduced Core tinyAVR the LD instruction can be used to achieve the same
+ * operation as LPM since the program memory is mapped to the data memory
+ * space. For using the Z-pointer for table lookup in Program memory see the
+ * LPM and ELPM instructions.
+ */
+static bool trans_LDZ2(DisasContext *ctx, arg_LDZ2 *a)
+{
+ TCGv Rd = cpu_r[a->rd];
+ TCGv addr = gen_get_zaddr();
+
+ gen_data_load(ctx, Rd, addr);
+ tcg_gen_addi_tl(addr, addr, 1); /* addr = addr + 1 */
+
+ gen_set_zaddr(addr);
+
+ tcg_temp_free_i32(addr);
+
+ return true;
+}
+
+static bool trans_LDZ3(DisasContext *ctx, arg_LDZ3 *a)
+{
+ TCGv Rd = cpu_r[a->rd];
+ TCGv addr = gen_get_zaddr();
+
+ tcg_gen_subi_tl(addr, addr, 1); /* addr = addr - 1 */
+ gen_data_load(ctx, Rd, addr);
+
+ gen_set_zaddr(addr);
+
+ tcg_temp_free_i32(addr);
+
+ return true;
+}
+
+static bool trans_LDDZ(DisasContext *ctx, arg_LDDZ *a)
+{
+ TCGv Rd = cpu_r[a->rd];
+ TCGv addr = gen_get_zaddr();
+
+ tcg_gen_addi_tl(addr, addr, a->imm); /* addr = addr + q */
+ gen_data_load(ctx, Rd, addr);
+
+ tcg_temp_free_i32(addr);
+
+ return true;
+}
+
+/*
+ * Stores one byte from a Register to the data space. For parts with SRAM,
+ * the data space consists of the Register File, I/O memory and internal SRAM
+ * (and external SRAM if applicable). For parts without SRAM, the data space
+ * consists of the Register File only. The EEPROM has a separate address space.
+ * A 16-bit address must be supplied. Memory access is limited to the current
+ * data segment of 64KB. The STS instruction uses the RAMPD Register to access
+ * memory above 64KB. To access another data segment in devices with more than
+ * 64KB data space, the RAMPD in register in the I/O area has to be changed.
+ * This instruction is not available in all devices. Refer to the device
+ * specific instruction set summary.
+ */
+static bool trans_STS(DisasContext *ctx, arg_STS *a)
+{
+ TCGv Rd = cpu_r[a->rd];
+ TCGv addr = tcg_temp_new_i32();
+ TCGv H = cpu_rampD;
+ a->imm = next_word(ctx);
+
+ tcg_gen_mov_tl(addr, H); /* addr = H:M:L */
+ tcg_gen_shli_tl(addr, addr, 16);
+ tcg_gen_ori_tl(addr, addr, a->imm);
+ gen_data_store(ctx, Rd, addr);
+
+ tcg_temp_free_i32(addr);
+
+ return true;
+}
+
+/*
+ * Stores one byte indirect from a register to data space. For parts with SRAM,
+ * the data space consists of the Register File, I/O memory, and internal SRAM
+ * (and external SRAM if applicable). For parts without SRAM, the data space
+ * consists of the Register File only. The EEPROM has a separate address space.
+ *
+ * The data location is pointed to by the X (16 bits) Pointer Register in the
+ * Register File. Memory access is limited to the current data segment of 64KB.
+ * To access another data segment in devices with more than 64KB data space, the
+ * RAMPX in register in the I/O area has to be changed.
+ *
+ * The X-pointer Register can either be left unchanged by the operation, or it
+ * can be post-incremented or pre-decremented. These features are especially
+ * suited for accessing arrays, tables, and Stack Pointer usage of the
+ * X-pointer Register. Note that only the low byte of the X-pointer is updated
+ * in devices with no more than 256 bytes data space. For such devices, the high
+ * byte of the pointer is not used by this instruction and can be used for other
+ * purposes. The RAMPX Register in the I/O area is updated in parts with more
+ * than 64KB data space or more than 64KB Program memory, and the increment /
+ * decrement is added to the entire 24-bit address on such devices.
+ */
+static bool trans_STX1(DisasContext *ctx, arg_STX1 *a)
+{
+ TCGv Rd = cpu_r[a->rr];
+ TCGv addr = gen_get_xaddr();
+
+ gen_data_store(ctx, Rd, addr);
+
+ tcg_temp_free_i32(addr);
+
+ return true;
+}
+
+static bool trans_STX2(DisasContext *ctx, arg_STX2 *a)
+{
+ TCGv Rd = cpu_r[a->rr];
+ TCGv addr = gen_get_xaddr();
+
+ gen_data_store(ctx, Rd, addr);
+ tcg_gen_addi_tl(addr, addr, 1); /* addr = addr + 1 */
+ gen_set_xaddr(addr);
+
+ tcg_temp_free_i32(addr);
+
+ return true;
+}
+
+static bool trans_STX3(DisasContext *ctx, arg_STX3 *a)
+{
+ TCGv Rd = cpu_r[a->rr];
+ TCGv addr = gen_get_xaddr();
+
+ tcg_gen_subi_tl(addr, addr, 1); /* addr = addr - 1 */
+ gen_data_store(ctx, Rd, addr);
+ gen_set_xaddr(addr);
+
+ tcg_temp_free_i32(addr);
+
+ return true;
+}
+
+/*
+ * Stores one byte indirect with or without displacement from a register to data
+ * space. For parts with SRAM, the data space consists of the Register File, I/O
+ * memory, and internal SRAM (and external SRAM if applicable). For parts
+ * without SRAM, the data space consists of the Register File only. The EEPROM
+ * has a separate address space.
+ *
+ * The data location is pointed to by the Y (16 bits) Pointer Register in the
+ * Register File. Memory access is limited to the current data segment of 64KB.
+ * To access another data segment in devices with more than 64KB data space, the
+ * RAMPY in register in the I/O area has to be changed.
+ *
+ * The Y-pointer Register can either be left unchanged by the operation, or it
+ * can be post-incremented or pre-decremented. These features are especially
+ * suited for accessing arrays, tables, and Stack Pointer usage of the Y-pointer
+ * Register. Note that only the low byte of the Y-pointer is updated in devices
+ * with no more than 256 bytes data space. For such devices, the high byte of
+ * the pointer is not used by this instruction and can be used for other
+ * purposes. The RAMPY Register in the I/O area is updated in parts with more
+ * than 64KB data space or more than 64KB Program memory, and the increment /
+ * decrement / displacement is added to the entire 24-bit address on such
+ * devices.
+ */
+static bool trans_STY2(DisasContext *ctx, arg_STY2 *a)
+{
+ TCGv Rd = cpu_r[a->rd];
+ TCGv addr = gen_get_yaddr();
+
+ gen_data_store(ctx, Rd, addr);
+ tcg_gen_addi_tl(addr, addr, 1); /* addr = addr + 1 */
+ gen_set_yaddr(addr);
+
+ tcg_temp_free_i32(addr);
+
+ return true;
+}
+
+static bool trans_STY3(DisasContext *ctx, arg_STY3 *a)
+{
+ TCGv Rd = cpu_r[a->rd];
+ TCGv addr = gen_get_yaddr();
+
+ tcg_gen_subi_tl(addr, addr, 1); /* addr = addr - 1 */
+ gen_data_store(ctx, Rd, addr);
+ gen_set_yaddr(addr);
+
+ tcg_temp_free_i32(addr);
+
+ return true;
+}
+
+static bool trans_STDY(DisasContext *ctx, arg_STDY *a)
+{
+ TCGv Rd = cpu_r[a->rd];
+ TCGv addr = gen_get_yaddr();
+
+ tcg_gen_addi_tl(addr, addr, a->imm); /* addr = addr + q */
+ gen_data_store(ctx, Rd, addr);
+
+ tcg_temp_free_i32(addr);
+
+ return true;
+}
+
+/*
+ * Stores one byte indirect with or without displacement from a register to data
+ * space. For parts with SRAM, the data space consists of the Register File, I/O
+ * memory, and internal SRAM (and external SRAM if applicable). For parts
+ * without SRAM, the data space consists of the Register File only. The EEPROM
+ * has a separate address space.
+ *
+ * The data location is pointed to by the Y (16 bits) Pointer Register in the
+ * Register File. Memory access is limited to the current data segment of 64KB.
+ * To access another data segment in devices with more than 64KB data space, the
+ * RAMPY in register in the I/O area has to be changed.
+ *
+ * The Y-pointer Register can either be left unchanged by the operation, or it
+ * can be post-incremented or pre-decremented. These features are especially
+ * suited for accessing arrays, tables, and Stack Pointer usage of the Y-pointer
+ * Register. Note that only the low byte of the Y-pointer is updated in devices
+ * with no more than 256 bytes data space. For such devices, the high byte of
+ * the pointer is not used by this instruction and can be used for other
+ * purposes. The RAMPY Register in the I/O area is updated in parts with more
+ * than 64KB data space or more than 64KB Program memory, and the increment /
+ * decrement / displacement is added to the entire 24-bit address on such
+ * devices.
+ */
+static bool trans_STZ2(DisasContext *ctx, arg_STZ2 *a)
+{
+ TCGv Rd = cpu_r[a->rd];
+ TCGv addr = gen_get_zaddr();
+
+ gen_data_store(ctx, Rd, addr);
+ tcg_gen_addi_tl(addr, addr, 1); /* addr = addr + 1 */
+
+ gen_set_zaddr(addr);
+
+ tcg_temp_free_i32(addr);
+
+ return true;
+}
+
+static bool trans_STZ3(DisasContext *ctx, arg_STZ3 *a)
+{
+ TCGv Rd = cpu_r[a->rd];
+ TCGv addr = gen_get_zaddr();
+
+ tcg_gen_subi_tl(addr, addr, 1); /* addr = addr - 1 */
+ gen_data_store(ctx, Rd, addr);
+
+ gen_set_zaddr(addr);
+
+ tcg_temp_free_i32(addr);
+
+ return true;
+}
+
+static bool trans_STDZ(DisasContext *ctx, arg_STDZ *a)
+{
+ TCGv Rd = cpu_r[a->rd];
+ TCGv addr = gen_get_zaddr();
+
+ tcg_gen_addi_tl(addr, addr, a->imm); /* addr = addr + q */
+ gen_data_store(ctx, Rd, addr);
+
+ tcg_temp_free_i32(addr);
+
+ return true;
+}
+
+/*
+ * Loads one byte pointed to by the Z-register into the destination
+ * register Rd. This instruction features a 100% space effective constant
+ * initialization or constant data fetch. The Program memory is organized in
+ * 16-bit words while the Z-pointer is a byte address. Thus, the least
+ * significant bit of the Z-pointer selects either low byte (ZLSB = 0) or high
+ * byte (ZLSB = 1). This instruction can address the first 64KB (32K words) of
+ * Program memory. The Zpointer Register can either be left unchanged by the
+ * operation, or it can be incremented. The incrementation does not apply to
+ * the RAMPZ Register.
+ *
+ * Devices with Self-Programming capability can use the LPM instruction to read
+ * the Fuse and Lock bit values.
+ */
+static bool trans_LPM1(DisasContext *ctx, arg_LPM1 *a)
+{
+ if (!avr_have_feature(ctx, AVR_FEATURE_LPM)) {
+ return true;
+ }
+
+ TCGv Rd = cpu_r[0];
+ TCGv addr = tcg_temp_new_i32();
+ TCGv H = cpu_r[31];
+ TCGv L = cpu_r[30];
+
+ tcg_gen_shli_tl(addr, H, 8); /* addr = H:L */
+ tcg_gen_or_tl(addr, addr, L);
+ tcg_gen_qemu_ld8u(Rd, addr, MMU_CODE_IDX); /* Rd = mem[addr] */
+
+ tcg_temp_free_i32(addr);
+
+ return true;
+}
+
+static bool trans_LPM2(DisasContext *ctx, arg_LPM2 *a)
+{
+ if (!avr_have_feature(ctx, AVR_FEATURE_LPM)) {
+ return true;
+ }
+
+ TCGv Rd = cpu_r[a->rd];
+ TCGv addr = tcg_temp_new_i32();
+ TCGv H = cpu_r[31];
+ TCGv L = cpu_r[30];
+
+ tcg_gen_shli_tl(addr, H, 8); /* addr = H:L */
+ tcg_gen_or_tl(addr, addr, L);
+ tcg_gen_qemu_ld8u(Rd, addr, MMU_CODE_IDX); /* Rd = mem[addr] */
+
+ tcg_temp_free_i32(addr);
+
+ return true;
+}
+
+static bool trans_LPMX(DisasContext *ctx, arg_LPMX *a)
+{
+ if (!avr_have_feature(ctx, AVR_FEATURE_LPMX)) {
+ return true;
+ }
+
+ TCGv Rd = cpu_r[a->rd];
+ TCGv addr = tcg_temp_new_i32();
+ TCGv H = cpu_r[31];
+ TCGv L = cpu_r[30];
+
+ tcg_gen_shli_tl(addr, H, 8); /* addr = H:L */
+ tcg_gen_or_tl(addr, addr, L);
+ tcg_gen_qemu_ld8u(Rd, addr, MMU_CODE_IDX); /* Rd = mem[addr] */
+ tcg_gen_addi_tl(addr, addr, 1); /* addr = addr + 1 */
+ tcg_gen_andi_tl(L, addr, 0xff);
+ tcg_gen_shri_tl(addr, addr, 8);
+ tcg_gen_andi_tl(H, addr, 0xff);
+
+ tcg_temp_free_i32(addr);
+
+ return true;
+}
+
+/*
+ * Loads one byte pointed to by the Z-register and the RAMPZ Register in
+ * the I/O space, and places this byte in the destination register Rd. This
+ * instruction features a 100% space effective constant initialization or
+ * constant data fetch. The Program memory is organized in 16-bit words while
+ * the Z-pointer is a byte address. Thus, the least significant bit of the
+ * Z-pointer selects either low byte (ZLSB = 0) or high byte (ZLSB = 1). This
+ * instruction can address the entire Program memory space. The Z-pointer
+ * Register can either be left unchanged by the operation, or it can be
+ * incremented. The incrementation applies to the entire 24-bit concatenation
+ * of the RAMPZ and Z-pointer Registers.
+ *
+ * Devices with Self-Programming capability can use the ELPM instruction to
+ * read the Fuse and Lock bit value.
+ */
+static bool trans_ELPM1(DisasContext *ctx, arg_ELPM1 *a)
+{
+ if (!avr_have_feature(ctx, AVR_FEATURE_ELPM)) {
+ return true;
+ }
+
+ TCGv Rd = cpu_r[0];
+ TCGv addr = gen_get_zaddr();
+
+ tcg_gen_qemu_ld8u(Rd, addr, MMU_CODE_IDX); /* Rd = mem[addr] */
+
+ tcg_temp_free_i32(addr);
+
+ return true;
+}
+
+static bool trans_ELPM2(DisasContext *ctx, arg_ELPM2 *a)
+{
+ if (!avr_have_feature(ctx, AVR_FEATURE_ELPM)) {
+ return true;
+ }
+
+ TCGv Rd = cpu_r[a->rd];
+ TCGv addr = gen_get_zaddr();
+
+ tcg_gen_qemu_ld8u(Rd, addr, MMU_CODE_IDX); /* Rd = mem[addr] */
+
+ tcg_temp_free_i32(addr);
+
+ return true;
+}
+
+static bool trans_ELPMX(DisasContext *ctx, arg_ELPMX *a)
+{
+ if (!avr_have_feature(ctx, AVR_FEATURE_ELPMX)) {
+ return true;
+ }
+
+ TCGv Rd = cpu_r[a->rd];
+ TCGv addr = gen_get_zaddr();
+
+ tcg_gen_qemu_ld8u(Rd, addr, MMU_CODE_IDX); /* Rd = mem[addr] */
+ tcg_gen_addi_tl(addr, addr, 1); /* addr = addr + 1 */
+ gen_set_zaddr(addr);
+
+ tcg_temp_free_i32(addr);
+
+ return true;
+}
+
+/*
+ * SPM can be used to erase a page in the Program memory, to write a page
+ * in the Program memory (that is already erased), and to set Boot Loader Lock
+ * bits. In some devices, the Program memory can be written one word at a time,
+ * in other devices an entire page can be programmed simultaneously after first
+ * filling a temporary page buffer. In all cases, the Program memory must be
+ * erased one page at a time. When erasing the Program memory, the RAMPZ and
+ * Z-register are used as page address. When writing the Program memory, the
+ * RAMPZ and Z-register are used as page or word address, and the R1:R0
+ * register pair is used as data(1). When setting the Boot Loader Lock bits,
+ * the R1:R0 register pair is used as data. Refer to the device documentation
+ * for detailed description of SPM usage. This instruction can address the
+ * entire Program memory.
+ *
+ * The SPM instruction is not available in all devices. Refer to the device
+ * specific instruction set summary.
+ *
+ * Note: 1. R1 determines the instruction high byte, and R0 determines the
+ * instruction low byte.
+ */
+static bool trans_SPM(DisasContext *ctx, arg_SPM *a)
+{
+ /* TODO */
+ if (!avr_have_feature(ctx, AVR_FEATURE_SPM)) {
+ return true;
+ }
+
+ return true;
+}
+
+static bool trans_SPMX(DisasContext *ctx, arg_SPMX *a)
+{
+ /* TODO */
+ if (!avr_have_feature(ctx, AVR_FEATURE_SPMX)) {
+ return true;
+ }
+
+ return true;
+}
+
+/*
+ * Loads data from the I/O Space (Ports, Timers, Configuration Registers,
+ * etc.) into register Rd in the Register File.
+ */
+static bool trans_IN(DisasContext *ctx, arg_IN *a)
+{
+ TCGv Rd = cpu_r[a->rd];
+ TCGv port = tcg_const_i32(a->imm);
+
+ gen_helper_inb(Rd, cpu_env, port);
+
+ tcg_temp_free_i32(port);
+
+ return true;
+}
+
+/*
+ * Stores data from register Rr in the Register File to I/O Space (Ports,
+ * Timers, Configuration Registers, etc.).
+ */
+static bool trans_OUT(DisasContext *ctx, arg_OUT *a)
+{
+ TCGv Rd = cpu_r[a->rd];
+ TCGv port = tcg_const_i32(a->imm);
+
+ gen_helper_outb(cpu_env, port, Rd);
+
+ tcg_temp_free_i32(port);
+
+ return true;
+}
+
+/*
+ * This instruction stores the contents of register Rr on the STACK. The
+ * Stack Pointer is post-decremented by 1 after the PUSH. This instruction is
+ * not available in all devices. Refer to the device specific instruction set
+ * summary.
+ */
+static bool trans_PUSH(DisasContext *ctx, arg_PUSH *a)
+{
+ TCGv Rd = cpu_r[a->rd];
+
+ gen_data_store(ctx, Rd, cpu_sp);
+ tcg_gen_subi_tl(cpu_sp, cpu_sp, 1);
+
+ return true;
+}
+
+/*
+ * This instruction loads register Rd with a byte from the STACK. The Stack
+ * Pointer is pre-incremented by 1 before the POP. This instruction is not
+ * available in all devices. Refer to the device specific instruction set
+ * summary.
+ */
+static bool trans_POP(DisasContext *ctx, arg_POP *a)
+{
+ /*
+ * Using a temp to work around some strange behaviour:
+ * tcg_gen_addi_tl(cpu_sp, cpu_sp, 1);
+ * gen_data_load(ctx, Rd, cpu_sp);
+ * seems to cause the add to happen twice.
+ * This doesn't happen if either the add or the load is removed.
+ */
+ TCGv t1 = tcg_temp_new_i32();
+ TCGv Rd = cpu_r[a->rd];
+
+ tcg_gen_addi_tl(t1, cpu_sp, 1);
+ gen_data_load(ctx, Rd, t1);
+ tcg_gen_mov_tl(cpu_sp, t1);
+
+ return true;
+}
+
+/*
+ * Exchanges one byte indirect between register and data space. The data
+ * location is pointed to by the Z (16 bits) Pointer Register in the Register
+ * File. Memory access is limited to the current data segment of 64KB. To
+ * access another data segment in devices with more than 64KB data space, the
+ * RAMPZ in register in the I/O area has to be changed.
+ *
+ * The Z-pointer Register is left unchanged by the operation. This instruction
+ * is especially suited for writing/reading status bits stored in SRAM.
+ */
+static bool trans_XCH(DisasContext *ctx, arg_XCH *a)
+{
+ if (!avr_have_feature(ctx, AVR_FEATURE_RMW)) {
+ return true;
+ }
+
+ TCGv Rd = cpu_r[a->rd];
+ TCGv t0 = tcg_temp_new_i32();
+ TCGv addr = gen_get_zaddr();
+
+ gen_data_load(ctx, t0, addr);
+ gen_data_store(ctx, Rd, addr);
+ tcg_gen_mov_tl(Rd, t0);
+
+ tcg_temp_free_i32(t0);
+ tcg_temp_free_i32(addr);
+
+ return true;
+}
+
+/*
+ * Load one byte indirect from data space to register and set bits in data
+ * space specified by the register. The instruction can only be used towards
+ * internal SRAM. The data location is pointed to by the Z (16 bits) Pointer
+ * Register in the Register File. Memory access is limited to the current data
+ * segment of 64KB. To access another data segment in devices with more than
+ * 64KB data space, the RAMPZ in register in the I/O area has to be changed.
+ *
+ * The Z-pointer Register is left unchanged by the operation. This instruction
+ * is especially suited for setting status bits stored in SRAM.
+ */
+static bool trans_LAS(DisasContext *ctx, arg_LAS *a)
+{
+ if (!avr_have_feature(ctx, AVR_FEATURE_RMW)) {
+ return true;
+ }
+
+ TCGv Rr = cpu_r[a->rd];
+ TCGv addr = gen_get_zaddr();
+ TCGv t0 = tcg_temp_new_i32();
+ TCGv t1 = tcg_temp_new_i32();
+
+ gen_data_load(ctx, t0, addr); /* t0 = mem[addr] */
+ tcg_gen_or_tl(t1, t0, Rr);
+ tcg_gen_mov_tl(Rr, t0); /* Rr = t0 */
+ gen_data_store(ctx, t1, addr); /* mem[addr] = t1 */
+
+ tcg_temp_free_i32(t1);
+ tcg_temp_free_i32(t0);
+ tcg_temp_free_i32(addr);
+
+ return true;
+}
+
+/*
+ * Load one byte indirect from data space to register and stores and clear
+ * the bits in data space specified by the register. The instruction can
+ * only be used towards internal SRAM. The data location is pointed to by
+ * the Z (16 bits) Pointer Register in the Register File. Memory access is
+ * limited to the current data segment of 64KB. To access another data
+ * segment in devices with more than 64KB data space, the RAMPZ in register
+ * in the I/O area has to be changed.
+ *
+ * The Z-pointer Register is left unchanged by the operation. This instruction
+ * is especially suited for clearing status bits stored in SRAM.
+ */
+static bool trans_LAC(DisasContext *ctx, arg_LAC *a)
+{
+ if (!avr_have_feature(ctx, AVR_FEATURE_RMW)) {
+ return true;
+ }
+
+ TCGv Rr = cpu_r[a->rd];
+ TCGv addr = gen_get_zaddr();
+ TCGv t0 = tcg_temp_new_i32();
+ TCGv t1 = tcg_temp_new_i32();
+
+ gen_data_load(ctx, t0, addr); /* t0 = mem[addr] */
+ tcg_gen_andc_tl(t1, t0, Rr); /* t1 = t0 & (0xff - Rr) = t0 & ~Rr */
+ tcg_gen_mov_tl(Rr, t0); /* Rr = t0 */
+ gen_data_store(ctx, t1, addr); /* mem[addr] = t1 */
+
+ tcg_temp_free_i32(t1);
+ tcg_temp_free_i32(t0);
+ tcg_temp_free_i32(addr);
+
+ return true;
+}
+
+
+/*
+ * Load one byte indirect from data space to register and toggles bits in
+ * the data space specified by the register. The instruction can only be used
+ * towards SRAM. The data location is pointed to by the Z (16 bits) Pointer
+ * Register in the Register File. Memory access is limited to the current data
+ * segment of 64KB. To access another data segment in devices with more than
+ * 64KB data space, the RAMPZ in register in the I/O area has to be changed.
+ *
+ * The Z-pointer Register is left unchanged by the operation. This instruction
+ * is especially suited for changing status bits stored in SRAM.
+ */
+static bool trans_LAT(DisasContext *ctx, arg_LAT *a)
+{
+ if (!avr_have_feature(ctx, AVR_FEATURE_RMW)) {
+ return true;
+ }
+
+ TCGv Rd = cpu_r[a->rd];
+ TCGv addr = gen_get_zaddr();
+ TCGv t0 = tcg_temp_new_i32();
+ TCGv t1 = tcg_temp_new_i32();
+
+ gen_data_load(ctx, t0, addr); /* t0 = mem[addr] */
+ tcg_gen_xor_tl(t1, t0, Rd);
+ tcg_gen_mov_tl(Rd, t0); /* Rd = t0 */
+ gen_data_store(ctx, t1, addr); /* mem[addr] = t1 */
+
+ tcg_temp_free_i32(t1);
+ tcg_temp_free_i32(t0);
+ tcg_temp_free_i32(addr);
+
+ return true;
+}
+
+/*
+ * Bit and Bit-test Instructions
+ */
+static void gen_rshift_ZNVSf(TCGv R)
+{
+ tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_Zf, R, 0); /* Zf = R == 0 */
+ tcg_gen_shri_tl(cpu_Nf, R, 7); /* Nf = R(7) */
+ tcg_gen_xor_tl(cpu_Vf, cpu_Nf, cpu_Cf);
+ tcg_gen_xor_tl(cpu_Sf, cpu_Nf, cpu_Vf); /* Sf = Nf ^ Vf */
+}
+
+/*
+ * Shifts all bits in Rd one place to the right. Bit 7 is cleared. Bit 0 is
+ * loaded into the C Flag of the SREG. This operation effectively divides an
+ * unsigned value by two. The C Flag can be used to round the result.
+ */
+static bool trans_LSR(DisasContext *ctx, arg_LSR *a)
+{
+ TCGv Rd = cpu_r[a->rd];
+
+ tcg_gen_andi_tl(cpu_Cf, Rd, 1);
+ tcg_gen_shri_tl(Rd, Rd, 1);
+
+ /* update status register */
+ tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_Zf, Rd, 0); /* Zf = Rd == 0 */
+ tcg_gen_movi_tl(cpu_Nf, 0);
+ tcg_gen_mov_tl(cpu_Vf, cpu_Cf);
+ tcg_gen_mov_tl(cpu_Sf, cpu_Vf);
+
+ return true;
+}
+
+/*
+ * Shifts all bits in Rd one place to the right. The C Flag is shifted into
+ * bit 7 of Rd. Bit 0 is shifted into the C Flag. This operation, combined
+ * with ASR, effectively divides multi-byte signed values by two. Combined with
+ * LSR it effectively divides multi-byte unsigned values by two. The Carry Flag
+ * can be used to round the result.
+ */
+static bool trans_ROR(DisasContext *ctx, arg_ROR *a)
+{
+ TCGv Rd = cpu_r[a->rd];
+ TCGv t0 = tcg_temp_new_i32();
+
+ tcg_gen_shli_tl(t0, cpu_Cf, 7);
+
+ /* update status register */
+ tcg_gen_andi_tl(cpu_Cf, Rd, 1);
+
+ /* update output register */
+ tcg_gen_shri_tl(Rd, Rd, 1);
+ tcg_gen_or_tl(Rd, Rd, t0);
+
+ /* update status register */
+ gen_rshift_ZNVSf(Rd);
+
+ tcg_temp_free_i32(t0);
+
+ return true;
+}
+
+/*
+ * Shifts all bits in Rd one place to the right. Bit 7 is held constant. Bit 0
+ * is loaded into the C Flag of the SREG. This operation effectively divides a
+ * signed value by two without changing its sign. The Carry Flag can be used to
+ * round the result.
+ */
+static bool trans_ASR(DisasContext *ctx, arg_ASR *a)
+{
+ TCGv Rd = cpu_r[a->rd];
+ TCGv t0 = tcg_temp_new_i32();
+
+ /* update status register */
+ tcg_gen_andi_tl(cpu_Cf, Rd, 1); /* Cf = Rd(0) */
+
+ /* update output register */
+ tcg_gen_andi_tl(t0, Rd, 0x80); /* Rd = (Rd & 0x80) | (Rd >> 1) */
+ tcg_gen_shri_tl(Rd, Rd, 1);
+ tcg_gen_or_tl(Rd, Rd, t0);
+
+ /* update status register */
+ gen_rshift_ZNVSf(Rd);
+
+ tcg_temp_free_i32(t0);
+
+ return true;
+}
+
+/*
+ * Swaps high and low nibbles in a register.
+ */
+static bool trans_SWAP(DisasContext *ctx, arg_SWAP *a)
+{
+ TCGv Rd = cpu_r[a->rd];
+ TCGv t0 = tcg_temp_new_i32();
+ TCGv t1 = tcg_temp_new_i32();
+
+ tcg_gen_andi_tl(t0, Rd, 0x0f);
+ tcg_gen_shli_tl(t0, t0, 4);
+ tcg_gen_andi_tl(t1, Rd, 0xf0);
+ tcg_gen_shri_tl(t1, t1, 4);
+ tcg_gen_or_tl(Rd, t0, t1);
+
+ tcg_temp_free_i32(t1);
+ tcg_temp_free_i32(t0);
+
+ return true;
+}
+
+/*
+ * Sets a specified bit in an I/O Register. This instruction operates on
+ * the lower 32 I/O Registers -- addresses 0-31.
+ */
+static bool trans_SBI(DisasContext *ctx, arg_SBI *a)
+{
+ TCGv data = tcg_temp_new_i32();
+ TCGv port = tcg_const_i32(a->reg);
+
+ gen_helper_inb(data, cpu_env, port);
+ tcg_gen_ori_tl(data, data, 1 << a->bit);
+ gen_helper_outb(cpu_env, port, data);
+
+ tcg_temp_free_i32(port);
+ tcg_temp_free_i32(data);
+
+ return true;
+}
+
+/*
+ * Clears a specified bit in an I/O Register. This instruction operates on
+ * the lower 32 I/O Registers -- addresses 0-31.
+ */
+static bool trans_CBI(DisasContext *ctx, arg_CBI *a)
+{
+ TCGv data = tcg_temp_new_i32();
+ TCGv port = tcg_const_i32(a->reg);
+
+ gen_helper_inb(data, cpu_env, port);
+ tcg_gen_andi_tl(data, data, ~(1 << a->bit));
+ gen_helper_outb(cpu_env, port, data);
+
+ tcg_temp_free_i32(data);
+ tcg_temp_free_i32(port);
+
+ return true;
+}
+
+/*
+ * Stores bit b from Rd to the T Flag in SREG (Status Register).
+ */
+static bool trans_BST(DisasContext *ctx, arg_BST *a)
+{
+ TCGv Rd = cpu_r[a->rd];
+
+ tcg_gen_andi_tl(cpu_Tf, Rd, 1 << a->bit);
+ tcg_gen_shri_tl(cpu_Tf, cpu_Tf, a->bit);
+
+ return true;
+}
+
+/*
+ * Copies the T Flag in the SREG (Status Register) to bit b in register Rd.
+ */
+static bool trans_BLD(DisasContext *ctx, arg_BLD *a)
+{
+ TCGv Rd = cpu_r[a->rd];
+ TCGv t1 = tcg_temp_new_i32();
+
+ tcg_gen_andi_tl(Rd, Rd, ~(1u << a->bit)); /* clear bit */
+ tcg_gen_shli_tl(t1, cpu_Tf, a->bit); /* create mask */
+ tcg_gen_or_tl(Rd, Rd, t1);
+
+ tcg_temp_free_i32(t1);
+
+ return true;
+}
+
+/*
+ * Sets a single Flag or bit in SREG.
+ */
+static bool trans_BSET(DisasContext *ctx, arg_BSET *a)
+{
+ switch (a->bit) {
+ case 0x00:
+ tcg_gen_movi_tl(cpu_Cf, 0x01);
+ break;
+ case 0x01:
+ tcg_gen_movi_tl(cpu_Zf, 0x01);
+ break;
+ case 0x02:
+ tcg_gen_movi_tl(cpu_Nf, 0x01);
+ break;
+ case 0x03:
+ tcg_gen_movi_tl(cpu_Vf, 0x01);
+ break;
+ case 0x04:
+ tcg_gen_movi_tl(cpu_Sf, 0x01);
+ break;
+ case 0x05:
+ tcg_gen_movi_tl(cpu_Hf, 0x01);
+ break;
+ case 0x06:
+ tcg_gen_movi_tl(cpu_Tf, 0x01);
+ break;
+ case 0x07:
+ tcg_gen_movi_tl(cpu_If, 0x01);
+ break;
+ }
+
+ return true;
+}
+
+/*
+ * Clears a single Flag in SREG.
+ */
+static bool trans_BCLR(DisasContext *ctx, arg_BCLR *a)
+{
+ switch (a->bit) {
+ case 0x00:
+ tcg_gen_movi_tl(cpu_Cf, 0x00);
+ break;
+ case 0x01:
+ tcg_gen_movi_tl(cpu_Zf, 0x00);
+ break;
+ case 0x02:
+ tcg_gen_movi_tl(cpu_Nf, 0x00);
+ break;
+ case 0x03:
+ tcg_gen_movi_tl(cpu_Vf, 0x00);
+ break;
+ case 0x04:
+ tcg_gen_movi_tl(cpu_Sf, 0x00);
+ break;
+ case 0x05:
+ tcg_gen_movi_tl(cpu_Hf, 0x00);
+ break;
+ case 0x06:
+ tcg_gen_movi_tl(cpu_Tf, 0x00);
+ break;
+ case 0x07:
+ tcg_gen_movi_tl(cpu_If, 0x00);
+ break;
+ }
+
+ return true;
+}
+
+/*
+ * MCU Control Instructions
+ */
+
+/*
+ * The BREAK instruction is used by the On-chip Debug system, and is
+ * normally not used in the application software. When the BREAK instruction is
+ * executed, the AVR CPU is set in the Stopped Mode. This gives the On-chip
+ * Debugger access to internal resources. If any Lock bits are set, or either
+ * the JTAGEN or OCDEN Fuses are unprogrammed, the CPU will treat the BREAK
+ * instruction as a NOP and will not enter the Stopped mode. This instruction
+ * is not available in all devices. Refer to the device specific instruction
+ * set summary.
+ */
+static bool trans_BREAK(DisasContext *ctx, arg_BREAK *a)
+{
+ if (!avr_have_feature(ctx, AVR_FEATURE_BREAK)) {
+ return true;
+ }
+
+#ifdef BREAKPOINT_ON_BREAK
+ tcg_gen_movi_tl(cpu_pc, ctx->npc - 1);
+ gen_helper_debug(cpu_env);
+ ctx->bstate = DISAS_EXIT;
+#else
+ /* NOP */
+#endif
+
+ return true;
+}
+
+/*
+ * This instruction performs a single cycle No Operation.
+ */
+static bool trans_NOP(DisasContext *ctx, arg_NOP *a)
+{
+
+ /* NOP */
+
+ return true;
+}
+
+/*
+ * This instruction sets the circuit in sleep mode defined by the MCU
+ * Control Register.
+ */
+static bool trans_SLEEP(DisasContext *ctx, arg_SLEEP *a)
+{
+ gen_helper_sleep(cpu_env);
+ ctx->bstate = DISAS_NORETURN;
+ return true;
+}
+
+/*
+ * This instruction resets the Watchdog Timer. This instruction must be
+ * executed within a limited time given by the WD prescaler. See the Watchdog
+ * Timer hardware specification.
+ */
+static bool trans_WDR(DisasContext *ctx, arg_WDR *a)
+{
+ gen_helper_wdr(cpu_env);
+
+ return true;
+}
+
+/*
+ * Core translation mechanism functions:
+ *
+ * - translate()
+ * - canonicalize_skip()
+ * - gen_intermediate_code()
+ * - restore_state_to_opc()
+ *
+ */
+static void translate(DisasContext *ctx)
+{
+ uint32_t opcode = next_word(ctx);
+
+ if (!decode_insn(ctx, opcode)) {
+ gen_helper_unsupported(cpu_env);
+ ctx->bstate = DISAS_NORETURN;
+ }
+}
+
+/* Standardize the cpu_skip condition to NE. */
+static bool canonicalize_skip(DisasContext *ctx)
+{
+ switch (ctx->skip_cond) {
+ case TCG_COND_NEVER:
+ /* Normal case: cpu_skip is known to be false. */
+ return false;
+
+ case TCG_COND_ALWAYS:
+ /*
+ * Breakpoint case: cpu_skip is known to be true, via TB_FLAGS_SKIP.
+ * The breakpoint is on the instruction being skipped, at the start
+ * of the TranslationBlock. No need to update.
+ */
+ return false;
+
+ case TCG_COND_NE:
+ if (ctx->skip_var1 == NULL) {
+ tcg_gen_mov_tl(cpu_skip, ctx->skip_var0);
+ } else {
+ tcg_gen_xor_tl(cpu_skip, ctx->skip_var0, ctx->skip_var1);
+ ctx->skip_var1 = NULL;
+ }
+ break;
+
+ default:
+ /* Convert to a NE condition vs 0. */
+ if (ctx->skip_var1 == NULL) {
+ tcg_gen_setcondi_tl(ctx->skip_cond, cpu_skip, ctx->skip_var0, 0);
+ } else {
+ tcg_gen_setcond_tl(ctx->skip_cond, cpu_skip,
+ ctx->skip_var0, ctx->skip_var1);
+ ctx->skip_var1 = NULL;
+ }
+ ctx->skip_cond = TCG_COND_NE;
+ break;
+ }
+ if (ctx->free_skip_var0) {
+ tcg_temp_free(ctx->skip_var0);
+ ctx->free_skip_var0 = false;
+ }
+ ctx->skip_var0 = cpu_skip;
+ return true;
+}
+
+void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
+{
+ CPUAVRState *env = cs->env_ptr;
+ DisasContext ctx = {
+ .tb = tb,
+ .cs = cs,
+ .env = env,
+ .memidx = 0,
+ .bstate = DISAS_NEXT,
+ .skip_cond = TCG_COND_NEVER,
+ .singlestep = cs->singlestep_enabled,
+ };
+ target_ulong pc_start = tb->pc / 2;
+ int num_insns = 0;
+
+ if (tb->flags & TB_FLAGS_FULL_ACCESS) {
+ /*
+ * This flag is set by ST/LD instruction we will regenerate it ONLY
+ * with mem/cpu memory access instead of mem access
+ */
+ max_insns = 1;
+ }
+ if (ctx.singlestep) {
+ max_insns = 1;
+ }
+
+ gen_tb_start(tb);
+
+ ctx.npc = pc_start;
+ if (tb->flags & TB_FLAGS_SKIP) {
+ ctx.skip_cond = TCG_COND_ALWAYS;
+ ctx.skip_var0 = cpu_skip;
+ }
+
+ do {
+ TCGLabel *skip_label = NULL;
+
+ /* translate current instruction */
+ tcg_gen_insn_start(ctx.npc);
+ num_insns++;
+
+ /*
+ * this is due to some strange GDB behavior
+ * let's assume main has address 0x100
+ * b main - sets breakpoint at address 0x00000100 (code)
+ * b *0x100 - sets breakpoint at address 0x00800100 (data)
+ */
+ if (unlikely(!ctx.singlestep &&
+ (cpu_breakpoint_test(cs, OFFSET_CODE + ctx.npc * 2, BP_ANY) ||
+ cpu_breakpoint_test(cs, OFFSET_DATA + ctx.npc * 2, BP_ANY)))) {
+ canonicalize_skip(&ctx);
+ tcg_gen_movi_tl(cpu_pc, ctx.npc);
+ gen_helper_debug(cpu_env);
+ goto done_generating;
+ }
+
+ /* Conditionally skip the next instruction, if indicated. */
+ if (ctx.skip_cond != TCG_COND_NEVER) {
+ skip_label = gen_new_label();
+ if (ctx.skip_var0 == cpu_skip) {
+ /*
+ * Copy cpu_skip so that we may zero it before the branch.
+ * This ensures that cpu_skip is non-zero after the label
+ * if and only if the skipped insn itself sets a skip.
+ */
+ ctx.free_skip_var0 = true;
+ ctx.skip_var0 = tcg_temp_new();
+ tcg_gen_mov_tl(ctx.skip_var0, cpu_skip);
+ tcg_gen_movi_tl(cpu_skip, 0);
+ }
+ if (ctx.skip_var1 == NULL) {
+ tcg_gen_brcondi_tl(ctx.skip_cond, ctx.skip_var0, 0, skip_label);
+ } else {
+ tcg_gen_brcond_tl(ctx.skip_cond, ctx.skip_var0,
+ ctx.skip_var1, skip_label);
+ ctx.skip_var1 = NULL;
+ }
+ if (ctx.free_skip_var0) {
+ tcg_temp_free(ctx.skip_var0);
+ ctx.free_skip_var0 = false;
+ }
+ ctx.skip_cond = TCG_COND_NEVER;
+ ctx.skip_var0 = NULL;
+ }
+
+ translate(&ctx);
+
+ if (skip_label) {
+ canonicalize_skip(&ctx);
+ gen_set_label(skip_label);
+ if (ctx.bstate == DISAS_NORETURN) {
+ ctx.bstate = DISAS_CHAIN;
+ }
+ }
+ } while (ctx.bstate == DISAS_NEXT
+ && num_insns < max_insns
+ && (ctx.npc - pc_start) * 2 < TARGET_PAGE_SIZE - 4
+ && !tcg_op_buf_full());
+
+ if (tb->cflags & CF_LAST_IO) {
+ gen_io_end();
+ }
+
+ bool nonconst_skip = canonicalize_skip(&ctx);
+
+ switch (ctx.bstate) {
+ case DISAS_NORETURN:
+ assert(!nonconst_skip);
+ break;
+ case DISAS_NEXT:
+ case DISAS_TOO_MANY:
+ case DISAS_CHAIN:
+ if (!nonconst_skip) {
+ /* Note gen_goto_tb checks singlestep. */
+ gen_goto_tb(&ctx, 1, ctx.npc);
+ break;
+ }
+ tcg_gen_movi_tl(cpu_pc, ctx.npc);
+ /* fall through */
+ case DISAS_LOOKUP:
+ if (!ctx.singlestep) {
+ tcg_gen_lookup_and_goto_ptr();
+ break;
+ }
+ /* fall through */
+ case DISAS_EXIT:
+ if (ctx.singlestep) {
+ gen_helper_debug(cpu_env);
+ } else {
+ tcg_gen_exit_tb(NULL, 0);
+ }
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+done_generating:
+ gen_tb_end(tb, num_insns);
+
+ tb->size = (ctx.npc - pc_start) * 2;
+ tb->icount = num_insns;
+
+#ifdef DEBUG_DISAS
+ if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
+ && qemu_log_in_addr_range(tb->pc)) {
+ FILE *fd;
+ fd = qemu_log_lock();
+ qemu_log("IN: %s\n", lookup_symbol(tb->pc));
+ log_target_disas(cs, tb->pc, tb->size);
+ qemu_log("\n");
+ qemu_log_unlock(fd);
+ }
+#endif
+}
+
+void restore_state_to_opc(CPUAVRState *env, TranslationBlock *tb,
+ target_ulong *data)
+{
+ env->pc_w = data[0];
+}