summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPeter Maydell <peter.maydell@linaro.org>2017-01-20 11:36:47 +0000
committerPeter Maydell <peter.maydell@linaro.org>2017-01-20 11:36:48 +0000
commit28f5e970a69f0be05d08eb81bdc72ab35b591dd7 (patch)
tree934dda0e928b521c4086dc62ece5537d0d34ce99
parent0f6bcf68a99efdc531b209551f2b760b0bdcc554 (diff)
parentf29cacfb5fc0a6e93efc3f6d2900d82d625f143e (diff)
downloadqemu-28f5e970a69f0be05d08eb81bdc72ab35b591dd7.zip
Merge remote-tracking branch 'remotes/pmaydell/tags/pull-target-arm-20170120' into staging
target-arm queue: * support virtualization in GICv3 * enable EL2 in AArch64 CPU models * allow EL2 to be enabled on 'virt' board via -machine virtualization=on * aspeed: SMC improvements * m25p80: support die erase command * m25p80: Add Quad Page Program 4byte * m25p80: Improve 1GiB Micron flash definition * arm: Uniquely name imx25 I2C buses # gpg: Signature made Fri 20 Jan 2017 11:31:53 GMT # gpg: using RSA key 0x3C2525ED14360CDE # gpg: Good signature from "Peter Maydell <peter.maydell@linaro.org>" # gpg: aka "Peter Maydell <pmaydell@gmail.com>" # gpg: aka "Peter Maydell <pmaydell@chiark.greenend.org.uk>" # Primary key fingerprint: E1A5 C593 CD41 9DE2 8E83 15CF 3C25 25ED 1436 0CDE * remotes/pmaydell/tags/pull-target-arm-20170120: (36 commits) hw/arm/virt: Add board property to enable EL2 target-arm: Enable EL2 feature bit on A53 and A57 target/arm/psci.c: If EL2 implemented, start CPUs in EL2 hw/arm/virt-acpi-build: use SMC if booting in EL2 hw/arm/virt: Support using SMC for PSCI hw/intc/arm_gicv3: Implement EL2 traps for CPU i/f regs hw/intc/arm_gicv3: Implement gicv3_cpuif_virt_update() hw/intc/arm_gicv3: Implement ICV_ registers EOIR and IAR hw/intc/arm_gicv3: Implement ICV_ HPPIR, DIR and RPR registers hw/intc/arm_gicv3: Implement ICV_ registers which are just accessors hw/intc/arm_gicv3: Add accessors for ICH_ system registers hw/intc/gicv3: Add data fields for virtualization support hw/intc/gicv3: Add defines for ICH system register fields target-arm: Add ARMCPU fields for GIC CPU i/f config hw/arm/virt: Wire VIRQ, VFIQ, maintenance irq lines from GIC to CPU target-arm: Expose output GPIO line for VCPU maintenance interrupt hw/intc/arm_gic: Add external IRQ lines for VIRQ and VFIQ hw/intc/arm_gicv3: Add external IRQ lines for VIRQ and VFIQ hw/arm/virt-acpi - reserve ECAM space as PNP0C02 device arm: virt: Fix segmentation fault when specifying an unsupported CPU ... Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
-rw-r--r--hw/arm/aspeed.c41
-rw-r--r--hw/arm/imx25_pdk.c2
-rw-r--r--hw/arm/virt-acpi-build.c36
-rw-r--r--hw/arm/virt.c88
-rw-r--r--hw/arm/xlnx-zynqmp.c2
-rw-r--r--hw/block/m25p80.c51
-rw-r--r--hw/i2c/imx_i2c.c2
-rw-r--r--hw/intc/arm_gic_common.c6
-rw-r--r--hw/intc/arm_gicv3_common.c31
-rw-r--r--hw/intc/arm_gicv3_cpuif.c1303
-rw-r--r--hw/intc/gicv3_internal.h79
-rw-r--r--hw/intc/trace-events33
-rw-r--r--hw/ssi/aspeed_smc.c325
-rw-r--r--include/hw/arm/virt.h5
-rw-r--r--include/hw/intc/arm_gic_common.h2
-rw-r--r--include/hw/intc/arm_gicv3_common.h21
-rw-r--r--include/hw/ssi/aspeed_smc.h4
-rw-r--r--target/arm/cpu.c15
-rw-r--r--target/arm/cpu.h9
-rw-r--r--target/arm/cpu64.c8
-rw-r--r--target/arm/helper.c21
-rw-r--r--target/arm/psci.c25
-rw-r--r--tests/m25p80-test.c133
23 files changed, 2125 insertions, 117 deletions
diff --git a/hw/arm/aspeed.c b/hw/arm/aspeed.c
index 40c13838fb..a92c2f1c36 100644
--- a/hw/arm/aspeed.c
+++ b/hw/arm/aspeed.c
@@ -20,6 +20,8 @@
#include "qemu/log.h"
#include "sysemu/block-backend.h"
#include "sysemu/blockdev.h"
+#include "hw/loader.h"
+#include "qemu/error-report.h"
static struct arm_boot_info aspeed_board_binfo = {
.board_id = -1, /* device-tree-only board */
@@ -104,6 +106,28 @@ static const AspeedBoardConfig aspeed_boards[] = {
},
};
+#define FIRMWARE_ADDR 0x0
+
+static void write_boot_rom(DriveInfo *dinfo, hwaddr addr, size_t rom_size,
+ Error **errp)
+{
+ BlockBackend *blk = blk_by_legacy_dinfo(dinfo);
+ uint8_t *storage;
+
+ if (rom_size > blk_getlength(blk)) {
+ rom_size = blk_getlength(blk);
+ }
+
+ storage = g_new0(uint8_t, rom_size);
+ if (blk_pread(blk, 0, storage, rom_size) < 0) {
+ error_setg(errp, "failed to read the initial flash content");
+ return;
+ }
+
+ rom_add_blob_fixed("aspeed.boot_rom", storage, rom_size, addr);
+ g_free(storage);
+}
+
static void aspeed_board_init_flashes(AspeedSMCState *s, const char *flashtype,
Error **errp)
{
@@ -135,6 +159,7 @@ static void aspeed_board_init(MachineState *machine,
{
AspeedBoardState *bmc;
AspeedSoCClass *sc;
+ DriveInfo *drive0 = drive_get(IF_MTD, 0, 0);
bmc = g_new0(AspeedBoardState, 1);
object_initialize(&bmc->soc, (sizeof(bmc->soc)), cfg->soc_name);
@@ -168,6 +193,22 @@ static void aspeed_board_init(MachineState *machine,
aspeed_board_init_flashes(&bmc->soc.fmc, cfg->fmc_model, &error_abort);
aspeed_board_init_flashes(&bmc->soc.spi[0], cfg->spi_model, &error_abort);
+ /* Install first FMC flash content as a boot rom. */
+ if (drive0) {
+ AspeedSMCFlash *fl = &bmc->soc.fmc.flashes[0];
+ MemoryRegion *boot_rom = g_new(MemoryRegion, 1);
+
+ /*
+ * create a ROM region using the default mapping window size of
+ * the flash module.
+ */
+ memory_region_init_rom(boot_rom, OBJECT(bmc), "aspeed.boot_rom",
+ fl->size, &error_abort);
+ memory_region_add_subregion(get_system_memory(), FIRMWARE_ADDR,
+ boot_rom);
+ write_boot_rom(drive0, FIRMWARE_ADDR, fl->size, &error_abort);
+ }
+
aspeed_board_binfo.kernel_filename = machine->kernel_filename;
aspeed_board_binfo.initrd_filename = machine->initrd_filename;
aspeed_board_binfo.kernel_cmdline = machine->kernel_cmdline;
diff --git a/hw/arm/imx25_pdk.c b/hw/arm/imx25_pdk.c
index 025b60843e..44e741fde3 100644
--- a/hw/arm/imx25_pdk.c
+++ b/hw/arm/imx25_pdk.c
@@ -139,7 +139,7 @@ static void imx25_pdk_init(MachineState *machine)
* of simple qtest. See "make check" for details.
*/
i2c_create_slave((I2CBus *)qdev_get_child_bus(DEVICE(&s->soc.i2c[0]),
- "i2c"),
+ "i2c-bus.0"),
"ds1338", 0x68);
}
}
diff --git a/hw/arm/virt-acpi-build.c b/hw/arm/virt-acpi-build.c
index 085a611173..d0a8a0ff1e 100644
--- a/hw/arm/virt-acpi-build.c
+++ b/hw/arm/virt-acpi-build.c
@@ -310,6 +310,13 @@ static void acpi_dsdt_add_pci(Aml *scope, const MemMapEntry *memmap,
Aml *dev_rp0 = aml_device("%s", "RP0");
aml_append(dev_rp0, aml_name_decl("_ADR", aml_int(0)));
aml_append(dev, dev_rp0);
+
+ Aml *dev_res0 = aml_device("%s", "RES0");
+ aml_append(dev_res0, aml_name_decl("_HID", aml_string("PNP0C02")));
+ crs = aml_resource_template();
+ aml_append(crs, aml_memory32_fixed(base_ecam, size_ecam, AML_READ_WRITE));
+ aml_append(dev_res0, aml_name_decl("_CRS", crs));
+ aml_append(dev, dev_res0);
aml_append(scope, dev);
}
@@ -607,6 +614,9 @@ build_madt(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms)
if (arm_feature(&armcpu->env, ARM_FEATURE_PMU)) {
gicc->performance_interrupt = cpu_to_le32(PPI(VIRTUAL_PMU_IRQ));
}
+ if (vms->virt && vms->gic_version == 3) {
+ gicc->vgic_interrupt = cpu_to_le32(PPI(ARCH_GICV3_MAINT_IRQ));
+ }
}
if (vms->gic_version == 3) {
@@ -643,16 +653,30 @@ build_madt(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms)
}
/* FADT */
-static void
-build_fadt(GArray *table_data, BIOSLinker *linker, unsigned dsdt_tbl_offset)
+static void build_fadt(GArray *table_data, BIOSLinker *linker,
+ VirtMachineState *vms, unsigned dsdt_tbl_offset)
{
AcpiFadtDescriptorRev5_1 *fadt = acpi_data_push(table_data, sizeof(*fadt));
unsigned dsdt_entry_offset = (char *)&fadt->dsdt - table_data->data;
+ uint16_t bootflags;
+
+ switch (vms->psci_conduit) {
+ case QEMU_PSCI_CONDUIT_DISABLED:
+ bootflags = 0;
+ break;
+ case QEMU_PSCI_CONDUIT_HVC:
+ bootflags = ACPI_FADT_ARM_PSCI_COMPLIANT | ACPI_FADT_ARM_PSCI_USE_HVC;
+ break;
+ case QEMU_PSCI_CONDUIT_SMC:
+ bootflags = ACPI_FADT_ARM_PSCI_COMPLIANT;
+ break;
+ default:
+ g_assert_not_reached();
+ }
- /* Hardware Reduced = 1 and use PSCI 0.2+ and with HVC */
+ /* Hardware Reduced = 1 and use PSCI 0.2+ */
fadt->flags = cpu_to_le32(1 << ACPI_FADT_F_HW_REDUCED_ACPI);
- fadt->arm_boot_flags = cpu_to_le16(ACPI_FADT_ARM_PSCI_COMPLIANT |
- ACPI_FADT_ARM_PSCI_USE_HVC);
+ fadt->arm_boot_flags = cpu_to_le16(bootflags);
/* ACPI v5.1 (fadt->revision.fadt->minor_revision) */
fadt->minor_revision = 0x1;
@@ -738,7 +762,7 @@ void virt_acpi_build(VirtMachineState *vms, AcpiBuildTables *tables)
/* FADT MADT GTDT MCFG SPCR pointed to by RSDT */
acpi_add_table(table_offsets, tables_blob);
- build_fadt(tables_blob, tables->linker, dsdt);
+ build_fadt(tables_blob, tables->linker, vms, dsdt);
acpi_add_table(table_offsets, tables_blob);
build_madt(tables_blob, tables->linker, vms);
diff --git a/hw/arm/virt.c b/hw/arm/virt.c
index 7a03f84051..6c9e8985bf 100644
--- a/hw/arm/virt.c
+++ b/hw/arm/virt.c
@@ -167,7 +167,6 @@ static const char *valid_cpus[] = {
"cortex-a53",
"cortex-a57",
"host",
- NULL
};
static bool cpuname_valid(const char *cpu)
@@ -230,9 +229,19 @@ static void fdt_add_psci_node(const VirtMachineState *vms)
uint32_t migrate_fn;
void *fdt = vms->fdt;
ARMCPU *armcpu = ARM_CPU(qemu_get_cpu(0));
+ const char *psci_method;
- if (!vms->using_psci) {
+ switch (vms->psci_conduit) {
+ case QEMU_PSCI_CONDUIT_DISABLED:
return;
+ case QEMU_PSCI_CONDUIT_HVC:
+ psci_method = "hvc";
+ break;
+ case QEMU_PSCI_CONDUIT_SMC:
+ psci_method = "smc";
+ break;
+ default:
+ g_assert_not_reached();
}
qemu_fdt_add_subnode(fdt, "/psci");
@@ -264,7 +273,7 @@ static void fdt_add_psci_node(const VirtMachineState *vms)
* However, the device tree binding uses 'method' instead, so that is
* what we should use here.
*/
- qemu_fdt_setprop_string(fdt, "/psci", "method", "hvc");
+ qemu_fdt_setprop_string(fdt, "/psci", "method", psci_method);
qemu_fdt_setprop_cell(fdt, "/psci", "cpu_suspend", cpu_suspend_fn);
qemu_fdt_setprop_cell(fdt, "/psci", "cpu_off", cpu_off_fn);
@@ -366,7 +375,8 @@ static void fdt_add_cpu_nodes(const VirtMachineState *vms)
qemu_fdt_setprop_string(vms->fdt, nodename, "compatible",
armcpu->dtb_compatible);
- if (vms->using_psci && vms->smp_cpus > 1) {
+ if (vms->psci_conduit != QEMU_PSCI_CONDUIT_DISABLED
+ && vms->smp_cpus > 1) {
qemu_fdt_setprop_string(vms->fdt, nodename,
"enable-method", "psci");
}
@@ -433,6 +443,11 @@ static void fdt_add_gic_node(VirtMachineState *vms)
2, vms->memmap[VIRT_GIC_DIST].size,
2, vms->memmap[VIRT_GIC_REDIST].base,
2, vms->memmap[VIRT_GIC_REDIST].size);
+ if (vms->virt) {
+ qemu_fdt_setprop_cells(vms->fdt, "/intc", "interrupts",
+ GIC_FDT_IRQ_TYPE_PPI, ARCH_GICV3_MAINT_IRQ,
+ GIC_FDT_IRQ_FLAGS_LEVEL_HI);
+ }
} else {
/* 'cortex-a15-gic' means 'GIC v2' */
qemu_fdt_setprop_string(vms->fdt, "/intc", "compatible",
@@ -547,9 +562,9 @@ static void create_gic(VirtMachineState *vms, qemu_irq *pic)
sysbus_mmio_map(gicbusdev, 1, vms->memmap[VIRT_GIC_CPU].base);
}
- /* Wire the outputs from each CPU's generic timer to the
- * appropriate GIC PPI inputs, and the GIC's IRQ output to
- * the CPU's IRQ input.
+ /* Wire the outputs from each CPU's generic timer and the GICv3
+ * maintenance interrupt signal to the appropriate GIC PPI inputs,
+ * and the GIC's IRQ/FIQ/VIRQ/VFIQ interrupt outputs to the CPU's inputs.
*/
for (i = 0; i < smp_cpus; i++) {
DeviceState *cpudev = DEVICE(qemu_get_cpu(i));
@@ -571,9 +586,17 @@ static void create_gic(VirtMachineState *vms, qemu_irq *pic)
ppibase + timer_irq[irq]));
}
+ qdev_connect_gpio_out_named(cpudev, "gicv3-maintenance-interrupt", 0,
+ qdev_get_gpio_in(gicdev, ppibase
+ + ARCH_GICV3_MAINT_IRQ));
+
sysbus_connect_irq(gicbusdev, i, qdev_get_gpio_in(cpudev, ARM_CPU_IRQ));
sysbus_connect_irq(gicbusdev, i + smp_cpus,
qdev_get_gpio_in(cpudev, ARM_CPU_FIQ));
+ sysbus_connect_irq(gicbusdev, i + 2 * smp_cpus,
+ qdev_get_gpio_in(cpudev, ARM_CPU_VIRQ));
+ sysbus_connect_irq(gicbusdev, i + 3 * smp_cpus,
+ qdev_get_gpio_in(cpudev, ARM_CPU_VFIQ));
}
for (i = 0; i < NUM_IRQS; i++) {
@@ -1221,9 +1244,18 @@ static void machvirt_init(MachineState *machine)
* so it doesn't get in the way. Instead of starting secondary
* CPUs in PSCI powerdown state we will start them all running and
* let the boot ROM sort them out.
- * The usual case is that we do use QEMU's PSCI implementation.
+ * The usual case is that we do use QEMU's PSCI implementation;
+ * if the guest has EL2 then we will use SMC as the conduit,
+ * and otherwise we will use HVC (for backwards compatibility and
+ * because if we're using KVM then we must use HVC).
*/
- vms->using_psci = !(vms->secure && firmware_loaded);
+ if (vms->secure && firmware_loaded) {
+ vms->psci_conduit = QEMU_PSCI_CONDUIT_DISABLED;
+ } else if (vms->virt) {
+ vms->psci_conduit = QEMU_PSCI_CONDUIT_SMC;
+ } else {
+ vms->psci_conduit = QEMU_PSCI_CONDUIT_HVC;
+ }
/* The maximum number of CPUs depends on the GIC version, or on how
* many redistributors we can fit into the memory map.
@@ -1250,6 +1282,12 @@ static void machvirt_init(MachineState *machine)
exit(1);
}
+ if (vms->virt && kvm_enabled()) {
+ error_report("mach-virt: KVM does not support providing "
+ "Virtualization extensions to the guest CPU");
+ exit(1);
+ }
+
if (vms->secure) {
if (kvm_enabled()) {
error_report("mach-virt: KVM does not support Security extensions");
@@ -1306,8 +1344,12 @@ static void machvirt_init(MachineState *machine)
object_property_set_bool(cpuobj, false, "has_el3", NULL);
}
- if (vms->using_psci) {
- object_property_set_int(cpuobj, QEMU_PSCI_CONDUIT_HVC,
+ if (!vms->virt && object_property_find(cpuobj, "has_el2", NULL)) {
+ object_property_set_bool(cpuobj, false, "has_el2", NULL);
+ }
+
+ if (vms->psci_conduit != QEMU_PSCI_CONDUIT_DISABLED) {
+ object_property_set_int(cpuobj, vms->psci_conduit,
"psci-conduit", NULL);
/* Secondary CPUs start in PSCI powered-down state */
@@ -1408,6 +1450,20 @@ static void virt_set_secure(Object *obj, bool value, Error **errp)
vms->secure = value;
}
+static bool virt_get_virt(Object *obj, Error **errp)
+{
+ VirtMachineState *vms = VIRT_MACHINE(obj);
+
+ return vms->virt;
+}
+
+static void virt_set_virt(Object *obj, bool value, Error **errp)
+{
+ VirtMachineState *vms = VIRT_MACHINE(obj);
+
+ vms->virt = value;
+}
+
static bool virt_get_highmem(Object *obj, Error **errp)
{
VirtMachineState *vms = VIRT_MACHINE(obj);
@@ -1495,6 +1551,16 @@ static void virt_2_9_instance_init(Object *obj)
"Security Extensions (TrustZone)",
NULL);
+ /* EL2 is also disabled by default, for similar reasons */
+ vms->virt = false;
+ object_property_add_bool(obj, "virtualization", virt_get_virt,
+ virt_set_virt, NULL);
+ object_property_set_description(obj, "virtualization",
+ "Set on/off to enable/disable emulating a "
+ "guest CPU which implements the ARM "
+ "Virtualization Extensions",
+ NULL);
+
/* High memory is enabled by default */
vms->highmem = true;
object_property_add_bool(obj, "highmem", virt_get_highmem,
diff --git a/hw/arm/xlnx-zynqmp.c b/hw/arm/xlnx-zynqmp.c
index 0d86ba35ae..bc4e66b862 100644
--- a/hw/arm/xlnx-zynqmp.c
+++ b/hw/arm/xlnx-zynqmp.c
@@ -258,6 +258,8 @@ static void xlnx_zynqmp_realize(DeviceState *dev, Error **errp)
object_property_set_bool(OBJECT(&s->apu_cpu[i]),
s->secure, "has_el3", NULL);
+ object_property_set_bool(OBJECT(&s->apu_cpu[i]),
+ false, "has_el2", NULL);
object_property_set_int(OBJECT(&s->apu_cpu[i]), GIC_BASE_ADDR,
"reset-cbar", &error_abort);
object_property_set_bool(OBJECT(&s->apu_cpu[i]), true, "realized",
diff --git a/hw/block/m25p80.c b/hw/block/m25p80.c
index 4c5f8c3590..e90451496e 100644
--- a/hw/block/m25p80.c
+++ b/hw/block/m25p80.c
@@ -74,6 +74,12 @@ typedef struct FlashPartInfo {
uint32_t n_sectors;
uint32_t page_size;
uint16_t flags;
+ /*
+ * Big sized spi nor are often stacked devices, thus sometime
+ * replace chip erase with die erase.
+ * This field inform how many die is in the chip.
+ */
+ uint8_t die_cnt;
} FlashPartInfo;
/* adapted from linux */
@@ -91,7 +97,8 @@ typedef struct FlashPartInfo {
.sector_size = (_sector_size),\
.n_sectors = (_n_sectors),\
.page_size = 256,\
- .flags = (_flags),
+ .flags = (_flags),\
+ .die_cnt = 0
#define INFO6(_part_name, _jedec_id, _ext_id, _sector_size, _n_sectors, _flags)\
.part_name = _part_name,\
@@ -108,6 +115,24 @@ typedef struct FlashPartInfo {
.n_sectors = (_n_sectors),\
.page_size = 256,\
.flags = (_flags),\
+ .die_cnt = 0
+
+#define INFO_STACKED(_part_name, _jedec_id, _ext_id, _sector_size, _n_sectors,\
+ _flags, _die_cnt)\
+ .part_name = _part_name,\
+ .id = {\
+ ((_jedec_id) >> 16) & 0xff,\
+ ((_jedec_id) >> 8) & 0xff,\
+ (_jedec_id) & 0xff,\
+ ((_ext_id) >> 8) & 0xff,\
+ (_ext_id) & 0xff,\
+ },\
+ .id_len = (!(_jedec_id) ? 0 : (3 + ((_ext_id) ? 2 : 0))),\
+ .sector_size = (_sector_size),\
+ .n_sectors = (_n_sectors),\
+ .page_size = 256,\
+ .flags = (_flags),\
+ .die_cnt = _die_cnt
#define JEDEC_NUMONYX 0x20
#define JEDEC_WINBOND 0xEF
@@ -218,8 +243,10 @@ static const FlashPartInfo known_devices[] = {
{ INFO("n25q128", 0x20ba18, 0, 64 << 10, 256, 0) },
{ INFO("n25q256a", 0x20ba19, 0, 64 << 10, 512, ER_4K) },
{ INFO("n25q512a", 0x20ba20, 0, 64 << 10, 1024, ER_4K) },
- { INFO("mt25ql01g", 0x20ba21, 0, 64 << 10, 2048, ER_4K) },
- { INFO("mt25qu01g", 0x20bb21, 0, 64 << 10, 2048, ER_4K) },
+ { INFO_STACKED("n25q00", 0x20ba21, 0x1000, 64 << 10, 2048, ER_4K, 4) },
+ { INFO_STACKED("n25q00a", 0x20bb21, 0x1000, 64 << 10, 2048, ER_4K, 4) },
+ { INFO_STACKED("mt25ql01g", 0x20ba21, 0x1040, 64 << 10, 2048, ER_4K, 2) },
+ { INFO_STACKED("mt25qu01g", 0x20bb21, 0x1040, 64 << 10, 2048, ER_4K, 2) },
/* Spansion -- single (large) sector size only, at least
* for the chips listed here (without boot sectors).
@@ -327,6 +354,7 @@ typedef enum {
PP4_4 = 0x3e,
DPP = 0xa2,
QPP = 0x32,
+ QPP_4 = 0x34,
ERASE_4K = 0x20,
ERASE4_4K = 0x21,
@@ -359,6 +387,8 @@ typedef enum {
REVCR = 0x65,
WEVCR = 0x61,
+
+ DIE_ERASE = 0xC4,
} FlashCMD;
typedef enum {
@@ -516,6 +546,16 @@ static void flash_erase(Flash *s, int offset, FlashCMD cmd)
case BULK_ERASE:
len = s->size;
break;
+ case DIE_ERASE:
+ if (s->pi->die_cnt) {
+ len = s->size / s->pi->die_cnt;
+ offset = offset & (~(len - 1));
+ } else {
+ qemu_log_mask(LOG_GUEST_ERROR, "M25P80: die erase is not supported"
+ " by device\n");
+ return;
+ }
+ break;
default:
abort();
}
@@ -577,6 +617,7 @@ static inline int get_addr_length(Flash *s)
switch (s->cmd_in_progress) {
case PP4:
case PP4_4:
+ case QPP_4:
case READ4:
case QIOR4:
case ERASE4_4K:
@@ -610,6 +651,7 @@ static void complete_collecting_data(Flash *s)
switch (s->cmd_in_progress) {
case DPP:
case QPP:
+ case QPP_4:
case PP:
case PP4:
case PP4_4:
@@ -635,6 +677,7 @@ static void complete_collecting_data(Flash *s)
case ERASE4_32K:
case ERASE_SECTOR:
case ERASE4_SECTOR:
+ case DIE_ERASE:
flash_erase(s, s->cur_addr, s->cmd_in_progress);
break;
case WRSR:
@@ -877,9 +920,11 @@ static void decode_new_cmd(Flash *s, uint32_t value)
case READ4:
case DPP:
case QPP:
+ case QPP_4:
case PP:
case PP4:
case PP4_4:
+ case DIE_ERASE:
s->needed_bytes = get_addr_length(s);
s->pos = 0;
s->len = 0;
diff --git a/hw/i2c/imx_i2c.c b/hw/i2c/imx_i2c.c
index 37e5a62ce7..6c81b98ebd 100644
--- a/hw/i2c/imx_i2c.c
+++ b/hw/i2c/imx_i2c.c
@@ -310,7 +310,7 @@ static void imx_i2c_realize(DeviceState *dev, Error **errp)
IMX_I2C_MEM_SIZE);
sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->iomem);
sysbus_init_irq(SYS_BUS_DEVICE(dev), &s->irq);
- s->bus = i2c_init_bus(DEVICE(dev), "i2c");
+ s->bus = i2c_init_bus(DEVICE(dev), NULL);
}
static void imx_i2c_class_init(ObjectClass *klass, void *data)
diff --git a/hw/intc/arm_gic_common.c b/hw/intc/arm_gic_common.c
index 0a1f56af19..4a8df44fb1 100644
--- a/hw/intc/arm_gic_common.c
+++ b/hw/intc/arm_gic_common.c
@@ -110,6 +110,12 @@ void gic_init_irqs_and_mmio(GICState *s, qemu_irq_handler handler,
for (i = 0; i < s->num_cpu; i++) {
sysbus_init_irq(sbd, &s->parent_fiq[i]);
}
+ for (i = 0; i < s->num_cpu; i++) {
+ sysbus_init_irq(sbd, &s->parent_virq[i]);
+ }
+ for (i = 0; i < s->num_cpu; i++) {
+ sysbus_init_irq(sbd, &s->parent_vfiq[i]);
+ }
/* Distributor */
memory_region_init_io(&s->iomem, OBJECT(s), ops, s, "gic_dist", 0x1000);
diff --git a/hw/intc/arm_gicv3_common.c b/hw/intc/arm_gicv3_common.c
index 0aa9b9ca66..16b9b0f7eb 100644
--- a/hw/intc/arm_gicv3_common.c
+++ b/hw/intc/arm_gicv3_common.c
@@ -49,6 +49,27 @@ static int gicv3_post_load(void *opaque, int version_id)
return 0;
}
+static bool virt_state_needed(void *opaque)
+{
+ GICv3CPUState *cs = opaque;
+
+ return cs->num_list_regs != 0;
+}
+
+static const VMStateDescription vmstate_gicv3_cpu_virt = {
+ .name = "arm_gicv3_cpu/virt",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = virt_state_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64_2DARRAY(ich_apr, GICv3CPUState, 3, 4),
+ VMSTATE_UINT64(ich_hcr_el2, GICv3CPUState),
+ VMSTATE_UINT64_ARRAY(ich_lr_el2, GICv3CPUState, GICV3_LR_MAX),
+ VMSTATE_UINT64(ich_vmcr_el2, GICv3CPUState),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
static const VMStateDescription vmstate_gicv3_cpu = {
.name = "arm_gicv3_cpu",
.version_id = 1,
@@ -75,6 +96,10 @@ static const VMStateDescription vmstate_gicv3_cpu = {
VMSTATE_UINT64_ARRAY(icc_igrpen, GICv3CPUState, 3),
VMSTATE_UINT64(icc_ctlr_el3, GICv3CPUState),
VMSTATE_END_OF_LIST()
+ },
+ .subsections = (const VMStateDescription * []) {
+ &vmstate_gicv3_cpu_virt,
+ NULL
}
};
@@ -126,6 +151,12 @@ void gicv3_init_irqs_and_mmio(GICv3State *s, qemu_irq_handler handler,
for (i = 0; i < s->num_cpu; i++) {
sysbus_init_irq(sbd, &s->cpu[i].parent_fiq);
}
+ for (i = 0; i < s->num_cpu; i++) {
+ sysbus_init_irq(sbd, &s->cpu[i].parent_virq);
+ }
+ for (i = 0; i < s->num_cpu; i++) {
+ sysbus_init_irq(sbd, &s->cpu[i].parent_vfiq);
+ }
memory_region_init_io(&s->iomem_dist, OBJECT(s), ops, s,
"gicv3_dist", 0x10000);
diff --git a/hw/intc/arm_gicv3_cpuif.c b/hw/intc/arm_gicv3_cpuif.c
index 35e8eb30fc..a9ee7fddf9 100644
--- a/hw/intc/arm_gicv3_cpuif.c
+++ b/hw/intc/arm_gicv3_cpuif.c
@@ -13,6 +13,7 @@
*/
#include "qemu/osdep.h"
+#include "qemu/bitops.h"
#include "trace.h"
#include "gicv3_internal.h"
#include "cpu.h"
@@ -36,6 +37,610 @@ static bool gicv3_use_ns_bank(CPUARMState *env)
return !arm_is_secure_below_el3(env);
}
+/* The minimum BPR for the virtual interface is a configurable property */
+static inline int icv_min_vbpr(GICv3CPUState *cs)
+{
+ return 7 - cs->vprebits;
+}
+
+/* Simple accessor functions for LR fields */
+static uint32_t ich_lr_vintid(uint64_t lr)
+{
+ return extract64(lr, ICH_LR_EL2_VINTID_SHIFT, ICH_LR_EL2_VINTID_LENGTH);
+}
+
+static uint32_t ich_lr_pintid(uint64_t lr)
+{
+ return extract64(lr, ICH_LR_EL2_PINTID_SHIFT, ICH_LR_EL2_PINTID_LENGTH);
+}
+
+static uint32_t ich_lr_prio(uint64_t lr)
+{
+ return extract64(lr, ICH_LR_EL2_PRIORITY_SHIFT, ICH_LR_EL2_PRIORITY_LENGTH);
+}
+
+static int ich_lr_state(uint64_t lr)
+{
+ return extract64(lr, ICH_LR_EL2_STATE_SHIFT, ICH_LR_EL2_STATE_LENGTH);
+}
+
+static bool icv_access(CPUARMState *env, int hcr_flags)
+{
+ /* Return true if this ICC_ register access should really be
+ * directed to an ICV_ access. hcr_flags is a mask of
+ * HCR_EL2 bits to check: we treat this as an ICV_ access
+ * if we are in NS EL1 and at least one of the specified
+ * HCR_EL2 bits is set.
+ *
+ * ICV registers fall into four categories:
+ * * access if NS EL1 and HCR_EL2.FMO == 1:
+ * all ICV regs with '0' in their name
+ * * access if NS EL1 and HCR_EL2.IMO == 1:
+ * all ICV regs with '1' in their name
+ * * access if NS EL1 and either IMO or FMO == 1:
+ * CTLR, DIR, PMR, RPR
+ */
+ return (env->cp15.hcr_el2 & hcr_flags) && arm_current_el(env) == 1
+ && !arm_is_secure_below_el3(env);
+}
+
+static int read_vbpr(GICv3CPUState *cs, int grp)
+{
+ /* Read VBPR value out of the VMCR field (caller must handle
+ * VCBPR effects if required)
+ */
+ if (grp == GICV3_G0) {
+ return extract64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VBPR0_SHIFT,
+ ICH_VMCR_EL2_VBPR0_LENGTH);
+ } else {
+ return extract64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VBPR1_SHIFT,
+ ICH_VMCR_EL2_VBPR1_LENGTH);
+ }
+}
+
+static void write_vbpr(GICv3CPUState *cs, int grp, int value)
+{
+ /* Write new VBPR1 value, handling the "writing a value less than
+ * the minimum sets it to the minimum" semantics.
+ */
+ int min = icv_min_vbpr(cs);
+
+ if (grp != GICV3_G0) {
+ min++;
+ }
+
+ value = MAX(value, min);
+
+ if (grp == GICV3_G0) {
+ cs->ich_vmcr_el2 = deposit64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VBPR0_SHIFT,
+ ICH_VMCR_EL2_VBPR0_LENGTH, value);
+ } else {
+ cs->ich_vmcr_el2 = deposit64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VBPR1_SHIFT,
+ ICH_VMCR_EL2_VBPR1_LENGTH, value);
+ }
+}
+
+static uint32_t icv_fullprio_mask(GICv3CPUState *cs)
+{
+ /* Return a mask word which clears the unimplemented priority bits
+ * from a priority value for a virtual interrupt. (Not to be confused
+ * with the group priority, whose mask depends on the value of VBPR
+ * for the interrupt group.)
+ */
+ return ~0U << (8 - cs->vpribits);
+}
+
+static int ich_highest_active_virt_prio(GICv3CPUState *cs)
+{
+ /* Calculate the current running priority based on the set bits
+ * in the ICH Active Priority Registers.
+ */
+ int i;
+ int aprmax = 1 << (cs->vprebits - 5);
+
+ assert(aprmax <= ARRAY_SIZE(cs->ich_apr[0]));
+
+ for (i = 0; i < aprmax; i++) {
+ uint32_t apr = cs->ich_apr[GICV3_G0][i] |
+ cs->ich_apr[GICV3_G1NS][i];
+
+ if (!apr) {
+ continue;
+ }
+ return (i * 32 + ctz32(apr)) << (icv_min_vbpr(cs) + 1);
+ }
+ /* No current active interrupts: return idle priority */
+ return 0xff;
+}
+
+static int hppvi_index(GICv3CPUState *cs)
+{
+ /* Return the list register index of the highest priority pending
+ * virtual interrupt, as per the HighestPriorityVirtualInterrupt
+ * pseudocode. If no pending virtual interrupts, return -1.
+ */
+ int idx = -1;
+ int i;
+ /* Note that a list register entry with a priority of 0xff will
+ * never be reported by this function; this is the architecturally
+ * correct behaviour.
+ */
+ int prio = 0xff;
+
+ if (!(cs->ich_vmcr_el2 & (ICH_VMCR_EL2_VENG0 | ICH_VMCR_EL2_VENG1))) {
+ /* Both groups disabled, definitely nothing to do */
+ return idx;
+ }
+
+ for (i = 0; i < cs->num_list_regs; i++) {
+ uint64_t lr = cs->ich_lr_el2[i];
+ int thisprio;
+
+ if (ich_lr_state(lr) != ICH_LR_EL2_STATE_PENDING) {
+ /* Not Pending */
+ continue;
+ }
+
+ /* Ignore interrupts if relevant group enable not set */
+ if (lr & ICH_LR_EL2_GROUP) {
+ if (!(cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG1)) {
+ continue;
+ }
+ } else {
+ if (!(cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG0)) {
+ continue;
+ }
+ }
+
+ thisprio = ich_lr_prio(lr);
+
+ if (thisprio < prio) {
+ prio = thisprio;
+ idx = i;
+ }
+ }
+
+ return idx;
+}
+
+static uint32_t icv_gprio_mask(GICv3CPUState *cs, int group)
+{
+ /* Return a mask word which clears the subpriority bits from
+ * a priority value for a virtual interrupt in the specified group.
+ * This depends on the VBPR value:
+ * a BPR of 0 means the group priority bits are [7:1];
+ * a BPR of 1 means they are [7:2], and so on down to
+ * a BPR of 7 meaning no group priority bits at all.
+ * Which BPR to use depends on the group of the interrupt and
+ * the current ICH_VMCR_EL2.VCBPR settings.
+ */
+ if (group == GICV3_G1NS && cs->ich_vmcr_el2 & ICH_VMCR_EL2_VCBPR) {
+ group = GICV3_G0;
+ }
+
+ return ~0U << (read_vbpr(cs, group) + 1);
+}
+
+static bool icv_hppi_can_preempt(GICv3CPUState *cs, uint64_t lr)
+{
+ /* Return true if we can signal this virtual interrupt defined by
+ * the given list register value; see the pseudocode functions
+ * CanSignalVirtualInterrupt and CanSignalVirtualInt.
+ * Compare also icc_hppi_can_preempt() which is the non-virtual
+ * equivalent of these checks.
+ */
+ int grp;
+ uint32_t mask, prio, rprio, vpmr;
+
+ if (!(cs->ich_hcr_el2 & ICH_HCR_EL2_EN)) {
+ /* Virtual interface disabled */
+ return false;
+ }
+
+ /* We don't need to check that this LR is in Pending state because
+ * that has already been done in hppvi_index().
+ */
+
+ prio = ich_lr_prio(lr);
+ vpmr = extract64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VPMR_SHIFT,
+ ICH_VMCR_EL2_VPMR_LENGTH);
+
+ if (prio >= vpmr) {
+ /* Priority mask masks this interrupt */
+ return false;
+ }
+
+ rprio = ich_highest_active_virt_prio(cs);
+ if (rprio == 0xff) {
+ /* No running interrupt so we can preempt */
+ return true;
+ }
+
+ grp = (lr & ICH_LR_EL2_GROUP) ? GICV3_G1NS : GICV3_G0;
+
+ mask = icv_gprio_mask(cs, grp);
+
+ /* We only preempt a running interrupt if the pending interrupt's
+ * group priority is sufficient (the subpriorities are not considered).
+ */
+ if ((prio & mask) < (rprio & mask)) {
+ return true;
+ }
+
+ return false;
+}
+
+static uint32_t eoi_maintenance_interrupt_state(GICv3CPUState *cs,
+ uint32_t *misr)
+{
+ /* Return a set of bits indicating the EOI maintenance interrupt status
+ * for each list register. The EOI maintenance interrupt status is
+ * 1 if LR.State == 0 && LR.HW == 0 && LR.EOI == 1
+ * (see the GICv3 spec for the ICH_EISR_EL2 register).
+ * If misr is not NULL then we should also collect the information
+ * about the MISR.EOI, MISR.NP and MISR.U bits.
+ */
+ uint32_t value = 0;
+ int validcount = 0;
+ bool seenpending = false;
+ int i;
+
+ for (i = 0; i < cs->num_list_regs; i++) {
+ uint64_t lr = cs->ich_lr_el2[i];
+
+ if ((lr & (ICH_LR_EL2_STATE_MASK | ICH_LR_EL2_HW | ICH_LR_EL2_EOI))
+ == ICH_LR_EL2_EOI) {
+ value |= (1 << i);
+ }
+ if ((lr & ICH_LR_EL2_STATE_MASK)) {
+ validcount++;
+ }
+ if (ich_lr_state(lr) == ICH_LR_EL2_STATE_PENDING) {
+ seenpending = true;
+ }
+ }
+
+ if (misr) {
+ if (validcount < 2 && (cs->ich_hcr_el2 & ICH_HCR_EL2_UIE)) {
+ *misr |= ICH_MISR_EL2_U;
+ }
+ if (!seenpending && (cs->ich_hcr_el2 & ICH_HCR_EL2_NPIE)) {
+ *misr |= ICH_MISR_EL2_NP;
+ }
+ if (value) {
+ *misr |= ICH_MISR_EL2_EOI;
+ }
+ }
+ return value;
+}
+
+static uint32_t maintenance_interrupt_state(GICv3CPUState *cs)
+{
+ /* Return a set of bits indicating the maintenance interrupt status
+ * (as seen in the ICH_MISR_EL2 register).
+ */
+ uint32_t value = 0;
+
+ /* Scan list registers and fill in the U, NP and EOI bits */
+ eoi_maintenance_interrupt_state(cs, &value);
+
+ if (cs->ich_hcr_el2 & (ICH_HCR_EL2_LRENPIE | ICH_HCR_EL2_EOICOUNT_MASK)) {
+ value |= ICH_MISR_EL2_LRENP;
+ }
+
+ if ((cs->ich_hcr_el2 & ICH_HCR_EL2_VGRP0EIE) &&
+ (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG0)) {
+ value |= ICH_MISR_EL2_VGRP0E;
+ }
+
+ if ((cs->ich_hcr_el2 & ICH_HCR_EL2_VGRP0DIE) &&
+ !(cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG1)) {
+ value |= ICH_MISR_EL2_VGRP0D;
+ }
+ if ((cs->ich_hcr_el2 & ICH_HCR_EL2_VGRP1EIE) &&
+ (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG1)) {
+ value |= ICH_MISR_EL2_VGRP1E;
+ }
+
+ if ((cs->ich_hcr_el2 & ICH_HCR_EL2_VGRP1DIE) &&
+ !(cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG1)) {
+ value |= ICH_MISR_EL2_VGRP1D;
+ }
+
+ return value;
+}
+
+static void gicv3_cpuif_virt_update(GICv3CPUState *cs)
+{
+ /* Tell the CPU about any pending virtual interrupts or
+ * maintenance interrupts, following a change to the state
+ * of the CPU interface relevant to virtual interrupts.
+ *
+ * CAUTION: this function will call qemu_set_irq() on the
+ * CPU maintenance IRQ line, which is typically wired up
+ * to the GIC as a per-CPU interrupt. This means that it
+ * will recursively call back into the GIC code via
+ * gicv3_redist_set_irq() and thus into the CPU interface code's
+ * gicv3_cpuif_update(). It is therefore important that this
+ * function is only called as the final action of a CPU interface
+ * register write implementation, after all the GIC state
+ * fields have been updated. gicv3_cpuif_update() also must
+ * not cause this function to be called, but that happens
+ * naturally as a result of there being no architectural
+ * linkage between the physical and virtual GIC logic.
+ */
+ int idx;
+ int irqlevel = 0;
+ int fiqlevel = 0;
+ int maintlevel = 0;
+
+ idx = hppvi_index(cs);
+ trace_gicv3_cpuif_virt_update(gicv3_redist_affid(cs), idx);
+ if (idx >= 0) {
+ uint64_t lr = cs->ich_lr_el2[idx];
+
+ if (icv_hppi_can_preempt(cs, lr)) {
+ /* Virtual interrupts are simple: G0 are always FIQ, and G1 IRQ */
+ if (lr & ICH_LR_EL2_GROUP) {
+ irqlevel = 1;
+ } else {
+ fiqlevel = 1;
+ }
+ }
+ }
+
+ if (cs->ich_hcr_el2 & ICH_HCR_EL2_EN) {
+ maintlevel = maintenance_interrupt_state(cs);
+ }
+
+ trace_gicv3_cpuif_virt_set_irqs(gicv3_redist_affid(cs), fiqlevel,
+ irqlevel, maintlevel);
+
+ qemu_set_irq(cs->parent_vfiq, fiqlevel);
+ qemu_set_irq(cs->parent_virq, irqlevel);
+ qemu_set_irq(cs->maintenance_irq, maintlevel);
+}
+
+static uint64_t icv_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ GICv3CPUState *cs = icc_cs_from_env(env);
+ int regno = ri->opc2 & 3;
+ int grp = ri->crm & 1 ? GICV3_G0 : GICV3_G1NS;
+ uint64_t value = cs->ich_apr[grp][regno];
+
+ trace_gicv3_icv_ap_read(ri->crm & 1, regno, gicv3_redist_affid(cs), value);
+ return value;
+}
+
+static void icv_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ GICv3CPUState *cs = icc_cs_from_env(env);
+ int regno = ri->opc2 & 3;
+ int grp = ri->crm & 1 ? GICV3_G0 : GICV3_G1NS;
+
+ trace_gicv3_icv_ap_write(ri->crm & 1, regno, gicv3_redist_affid(cs), value);
+
+ cs->ich_apr[grp][regno] = value & 0xFFFFFFFFU;
+
+ gicv3_cpuif_virt_update(cs);
+ return;
+}
+
+static uint64_t icv_bpr_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ GICv3CPUState *cs = icc_cs_from_env(env);
+ int grp = (ri->crm == 8) ? GICV3_G0 : GICV3_G1NS;
+ uint64_t bpr;
+ bool satinc = false;
+
+ if (grp == GICV3_G1NS && (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VCBPR)) {
+ /* reads return bpr0 + 1 saturated to 7, writes ignored */
+ grp = GICV3_G0;
+ satinc = true;
+ }
+
+ bpr = read_vbpr(cs, grp);
+
+ if (satinc) {
+ bpr++;
+ bpr = MIN(bpr, 7);
+ }
+
+ trace_gicv3_icv_bpr_read(ri->crm == 8 ? 0 : 1, gicv3_redist_affid(cs), bpr);
+
+ return bpr;
+}
+
+static void icv_bpr_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ GICv3CPUState *cs = icc_cs_from_env(env);
+ int grp = (ri->crm == 8) ? GICV3_G0 : GICV3_G1NS;
+
+ trace_gicv3_icv_bpr_write(ri->crm == 8 ? 0 : 1,
+ gicv3_redist_affid(cs), value);
+
+ if (grp == GICV3_G1NS && (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VCBPR)) {
+ /* reads return bpr0 + 1 saturated to 7, writes ignored */
+ return;
+ }
+
+ write_vbpr(cs, grp, value);
+
+ gicv3_cpuif_virt_update(cs);
+}
+
+static uint64_t icv_pmr_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ GICv3CPUState *cs = icc_cs_from_env(env);
+ uint64_t value;
+
+ value = extract64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VPMR_SHIFT,
+ ICH_VMCR_EL2_VPMR_LENGTH);
+
+ trace_gicv3_icv_pmr_read(gicv3_redist_affid(cs), value);
+ return value;
+}
+
+static void icv_pmr_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ GICv3CPUState *cs = icc_cs_from_env(env);
+
+ trace_gicv3_icv_pmr_write(gicv3_redist_affid(cs), value);
+
+ value &= icv_fullprio_mask(cs);
+
+ cs->ich_vmcr_el2 = deposit64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VPMR_SHIFT,
+ ICH_VMCR_EL2_VPMR_LENGTH, value);
+
+ gicv3_cpuif_virt_update(cs);
+}
+
+static uint64_t icv_igrpen_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ GICv3CPUState *cs = icc_cs_from_env(env);
+ int enbit;
+ uint64_t value;
+
+ enbit = ri->opc2 & 1 ? ICH_VMCR_EL2_VENG1_SHIFT : ICH_VMCR_EL2_VENG0_SHIFT;
+ value = extract64(cs->ich_vmcr_el2, enbit, 1);
+
+ trace_gicv3_icv_igrpen_read(ri->opc2 & 1 ? 1 : 0,
+ gicv3_redist_affid(cs), value);
+ return value;
+}
+
+static void icv_igrpen_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ GICv3CPUState *cs = icc_cs_from_env(env);
+ int enbit;
+
+ trace_gicv3_icv_igrpen_write(ri->opc2 & 1 ? 1 : 0,
+ gicv3_redist_affid(cs), value);
+
+ enbit = ri->opc2 & 1 ? ICH_VMCR_EL2_VENG1_SHIFT : ICH_VMCR_EL2_VENG0_SHIFT;
+
+ cs->ich_vmcr_el2 = deposit64(cs->ich_vmcr_el2, enbit, 1, value);
+ gicv3_cpuif_virt_update(cs);
+}
+
+static uint64_t icv_ctlr_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ GICv3CPUState *cs = icc_cs_from_env(env);
+ uint64_t value;
+
+ /* Note that the fixed fields here (A3V, SEIS, IDbits, PRIbits)
+ * should match the ones reported in ich_vtr_read().
+ */
+ value = ICC_CTLR_EL1_A3V | (1 << ICC_CTLR_EL1_IDBITS_SHIFT) |
+ (7 << ICC_CTLR_EL1_PRIBITS_SHIFT);
+
+ if (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VEOIM) {
+ value |= ICC_CTLR_EL1_EOIMODE;
+ }
+
+ if (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VCBPR) {
+ value |= ICC_CTLR_EL1_CBPR;
+ }
+
+ trace_gicv3_icv_ctlr_read(gicv3_redist_affid(cs), value);
+ return value;
+}
+
+static void icv_ctlr_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ GICv3CPUState *cs = icc_cs_from_env(env);
+
+ trace_gicv3_icv_ctlr_write(gicv3_redist_affid(cs), value);
+
+ cs->ich_vmcr_el2 = deposit64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VCBPR_SHIFT,
+ 1, value & ICC_CTLR_EL1_CBPR ? 1 : 0);
+ cs->ich_vmcr_el2 = deposit64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VEOIM_SHIFT,
+ 1, value & ICC_CTLR_EL1_EOIMODE ? 1 : 0);
+
+ gicv3_cpuif_virt_update(cs);
+}
+
+static uint64_t icv_rpr_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ GICv3CPUState *cs = icc_cs_from_env(env);
+ int prio = ich_highest_active_virt_prio(cs);
+
+ trace_gicv3_icv_rpr_read(gicv3_redist_affid(cs), prio);
+ return prio;
+}
+
+static uint64_t icv_hppir_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ GICv3CPUState *cs = icc_cs_from_env(env);
+ int grp = ri->crm == 8 ? GICV3_G0 : GICV3_G1NS;
+ int idx = hppvi_index(cs);
+ uint64_t value = INTID_SPURIOUS;
+
+ if (idx >= 0) {
+ uint64_t lr = cs->ich_lr_el2[idx];
+ int thisgrp = (lr & ICH_LR_EL2_GROUP) ? GICV3_G1NS : GICV3_G0;
+
+ if (grp == thisgrp) {
+ value = ich_lr_vintid(lr);
+ }
+ }
+
+ trace_gicv3_icv_hppir_read(grp, gicv3_redist_affid(cs), value);
+ return value;
+}
+
+static void icv_activate_irq(GICv3CPUState *cs, int idx, int grp)
+{
+ /* Activate the interrupt in the specified list register
+ * by moving it from Pending to Active state, and update the
+ * Active Priority Registers.
+ */
+ uint32_t mask = icv_gprio_mask(cs, grp);
+ int prio = ich_lr_prio(cs->ich_lr_el2[idx]) & mask;
+ int aprbit = prio >> (8 - cs->vprebits);
+ int regno = aprbit / 32;
+ int regbit = aprbit % 32;
+
+ cs->ich_lr_el2[idx] &= ~ICH_LR_EL2_STATE_PENDING_BIT;
+ cs->ich_lr_el2[idx] |= ICH_LR_EL2_STATE_ACTIVE_BIT;
+ cs->ich_apr[grp][regno] |= (1 << regbit);
+}
+
+static uint64_t icv_iar_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ GICv3CPUState *cs = icc_cs_from_env(env);
+ int grp = ri->crm == 8 ? GICV3_G0 : GICV3_G1NS;
+ int idx = hppvi_index(cs);
+ uint64_t intid = INTID_SPURIOUS;
+
+ if (idx >= 0) {
+ uint64_t lr = cs->ich_lr_el2[idx];
+ int thisgrp = (lr & ICH_LR_EL2_GROUP) ? GICV3_G1NS : GICV3_G0;
+
+ if (thisgrp == grp && icv_hppi_can_preempt(cs, lr)) {
+ intid = ich_lr_vintid(lr);
+ if (intid < INTID_SECURE) {
+ icv_activate_irq(cs, idx, grp);
+ } else {
+ /* Interrupt goes from Pending to Invalid */
+ cs->ich_lr_el2[idx] &= ~ICH_LR_EL2_STATE_PENDING_BIT;
+ /* We will now return the (bogus) ID from the list register,
+ * as per the pseudocode.
+ */
+ }
+ }
+ }
+
+ trace_gicv3_icv_iar_read(ri->crm == 8 ? 0 : 1,
+ gicv3_redist_affid(cs), intid);
+ return intid;
+}
+
static int icc_highest_active_prio(GICv3CPUState *cs)
{
/* Calculate the current running priority based on the set bits
@@ -177,6 +782,10 @@ static uint64_t icc_pmr_read(CPUARMState *env, const ARMCPRegInfo *ri)
GICv3CPUState *cs = icc_cs_from_env(env);
uint32_t value = cs->icc_pmr_el1;
+ if (icv_access(env, HCR_FMO | HCR_IMO)) {
+ return icv_pmr_read(env, ri);
+ }
+
if (arm_feature(env, ARM_FEATURE_EL3) && !arm_is_secure(env) &&
(env->cp15.scr_el3 & SCR_FIQ)) {
/* NS access and Group 0 is inaccessible to NS: return the
@@ -200,6 +809,10 @@ static void icc_pmr_write(CPUARMState *env, const ARMCPRegInfo *ri,
{
GICv3CPUState *cs = icc_cs_from_env(env);
+ if (icv_access(env, HCR_FMO | HCR_IMO)) {
+ return icv_pmr_write(env, ri, value);
+ }
+
trace_gicv3_icc_pmr_write(gicv3_redist_affid(cs), value);
value &= 0xff;
@@ -321,6 +934,10 @@ static uint64_t icc_iar0_read(CPUARMState *env, const ARMCPRegInfo *ri)
GICv3CPUState *cs = icc_cs_from_env(env);
uint64_t intid;
+ if (icv_access(env, HCR_FMO)) {
+ return icv_iar_read(env, ri);
+ }
+
if (!icc_hppi_can_preempt(cs)) {
intid = INTID_SPURIOUS;
} else {
@@ -340,6 +957,10 @@ static uint64_t icc_iar1_read(CPUARMState *env, const ARMCPRegInfo *ri)
GICv3CPUState *cs = icc_cs_from_env(env);
uint64_t intid;
+ if (icv_access(env, HCR_IMO)) {
+ return icv_iar_read(env, ri);
+ }
+
if (!icc_hppi_can_preempt(cs)) {
intid = INTID_SPURIOUS;
} else {
@@ -446,6 +1067,190 @@ static void icc_deactivate_irq(GICv3CPUState *cs, int irq)
}
}
+static bool icv_eoi_split(CPUARMState *env, GICv3CPUState *cs)
+{
+ /* Return true if we should split priority drop and interrupt
+ * deactivation, ie whether the virtual EOIMode bit is set.
+ */
+ return cs->ich_vmcr_el2 & ICH_VMCR_EL2_VEOIM;
+}
+
+static int icv_find_active(GICv3CPUState *cs, int irq)
+{
+ /* Given an interrupt number for an active interrupt, return the index
+ * of the corresponding list register, or -1 if there is no match.
+ * Corresponds to FindActiveVirtualInterrupt pseudocode.
+ */
+ int i;
+
+ for (i = 0; i < cs->num_list_regs; i++) {
+ uint64_t lr = cs->ich_lr_el2[i];
+
+ if ((lr & ICH_LR_EL2_STATE_ACTIVE_BIT) && ich_lr_vintid(lr) == irq) {
+ return i;
+ }
+ }
+
+ return -1;
+}
+
+static void icv_deactivate_irq(GICv3CPUState *cs, int idx)
+{
+ /* Deactivate the interrupt in the specified list register index */
+ uint64_t lr = cs->ich_lr_el2[idx];
+
+ if (lr & ICH_LR_EL2_HW) {
+ /* Deactivate the associated physical interrupt */
+ int pirq = ich_lr_pintid(lr);
+
+ if (pirq < INTID_SECURE) {
+ icc_deactivate_irq(cs, pirq);
+ }
+ }
+
+ /* Clear the 'active' part of the state, so ActivePending->Pending
+ * and Active->Invalid.
+ */
+ lr &= ~ICH_LR_EL2_STATE_ACTIVE_BIT;
+ cs->ich_lr_el2[idx] = lr;
+}
+
+static void icv_increment_eoicount(GICv3CPUState *cs)
+{
+ /* Increment the EOICOUNT field in ICH_HCR_EL2 */
+ int eoicount = extract64(cs->ich_hcr_el2, ICH_HCR_EL2_EOICOUNT_SHIFT,
+ ICH_HCR_EL2_EOICOUNT_LENGTH);
+
+ cs->ich_hcr_el2 = deposit64(cs->ich_hcr_el2, ICH_HCR_EL2_EOICOUNT_SHIFT,
+ ICH_HCR_EL2_EOICOUNT_LENGTH, eoicount + 1);
+}
+
+static int icv_drop_prio(GICv3CPUState *cs)
+{
+ /* Drop the priority of the currently active virtual interrupt
+ * (favouring group 0 if there is a set active bit at
+ * the same priority for both group 0 and group 1).
+ * Return the priority value for the bit we just cleared,
+ * or 0xff if no bits were set in the AP registers at all.
+ * Note that though the ich_apr[] are uint64_t only the low
+ * 32 bits are actually relevant.
+ */
+ int i;
+ int aprmax = 1 << (cs->vprebits - 5);
+
+ assert(aprmax <= ARRAY_SIZE(cs->ich_apr[0]));
+
+ for (i = 0; i < aprmax; i++) {
+ uint64_t *papr0 = &cs->ich_apr[GICV3_G0][i];
+ uint64_t *papr1 = &cs->ich_apr[GICV3_G1NS][i];
+ int apr0count, apr1count;
+
+ if (!*papr0 && !*papr1) {
+ continue;
+ }
+
+ /* We can't just use the bit-twiddling hack icc_drop_prio() does
+ * because we need to return the bit number we cleared so
+ * it can be compared against the list register's priority field.
+ */
+ apr0count = ctz32(*papr0);
+ apr1count = ctz32(*papr1);
+
+ if (apr0count <= apr1count) {
+ *papr0 &= *papr0 - 1;
+ return (apr0count + i * 32) << (icv_min_vbpr(cs) + 1);
+ } else {
+ *papr1 &= *papr1 - 1;
+ return (apr1count + i * 32) << (icv_min_vbpr(cs) + 1);
+ }
+ }
+ return 0xff;
+}
+
+static void icv_dir_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ /* Deactivate interrupt */
+ GICv3CPUState *cs = icc_cs_from_env(env);
+ int idx;
+ int irq = value & 0xffffff;
+
+ trace_gicv3_icv_dir_write(gicv3_redist_affid(cs), value);
+
+ if (irq >= cs->gic->num_irq) {
+ /* Also catches special interrupt numbers and LPIs */
+ return;
+ }
+
+ if (!icv_eoi_split(env, cs)) {
+ return;
+ }
+
+ idx = icv_find_active(cs, irq);
+
+ if (idx < 0) {
+ /* No list register matching this, so increment the EOI count
+ * (might trigger a maintenance interrupt)
+ */
+ icv_increment_eoicount(cs);
+ } else {
+ icv_deactivate_irq(cs, idx);
+ }
+
+ gicv3_cpuif_virt_update(cs);
+}
+
+static void icv_eoir_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ /* End of Interrupt */
+ GICv3CPUState *cs = icc_cs_from_env(env);
+ int irq = value & 0xffffff;
+ int grp = ri->crm == 8 ? GICV3_G0 : GICV3_G1NS;
+ int idx, dropprio;
+
+ trace_gicv3_icv_eoir_write(ri->crm == 8 ? 0 : 1,
+ gicv3_redist_affid(cs), value);
+
+ if (irq >= cs->gic->num_irq) {
+ /* Also catches special interrupt numbers and LPIs */
+ return;
+ }
+
+ /* We implement the IMPDEF choice of "drop priority before doing
+ * error checks" (because that lets us avoid scanning the AP
+ * registers twice).
+ */
+ dropprio = icv_drop_prio(cs);
+ if (dropprio == 0xff) {
+ /* No active interrupt. It is CONSTRAINED UNPREDICTABLE
+ * whether the list registers are checked in this
+ * situation; we choose not to.
+ */
+ return;
+ }
+
+ idx = icv_find_active(cs, irq);
+
+ if (idx < 0) {
+ /* No valid list register corresponding to EOI ID */
+ icv_increment_eoicount(cs);
+ } else {
+ uint64_t lr = cs->ich_lr_el2[idx];
+ int thisgrp = (lr & ICH_LR_EL2_GROUP) ? GICV3_G1NS : GICV3_G0;
+ int lr_gprio = ich_lr_prio(lr) & icv_gprio_mask(cs, grp);
+
+ if (thisgrp == grp && lr_gprio == dropprio) {
+ if (!icv_eoi_split(env, cs)) {
+ /* Priority drop and deactivate not split: deactivate irq now */
+ icv_deactivate_irq(cs, idx);
+ }
+ }
+ }
+
+ gicv3_cpuif_virt_update(cs);
+}
+
static void icc_eoir_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
@@ -454,6 +1259,11 @@ static void icc_eoir_write(CPUARMState *env, const ARMCPRegInfo *ri,
int irq = value & 0xffffff;
int grp;
+ if (icv_access(env, ri->crm == 8 ? HCR_FMO : HCR_IMO)) {
+ icv_eoir_write(env, ri, value);
+ return;
+ }
+
trace_gicv3_icc_eoir_write(ri->crm == 8 ? 0 : 1,
gicv3_redist_affid(cs), value);
@@ -496,8 +1306,13 @@ static void icc_eoir_write(CPUARMState *env, const ARMCPRegInfo *ri,
static uint64_t icc_hppir0_read(CPUARMState *env, const ARMCPRegInfo *ri)
{
GICv3CPUState *cs = icc_cs_from_env(env);
- uint64_t value = icc_hppir0_value(cs, env);
+ uint64_t value;
+ if (icv_access(env, HCR_FMO)) {
+ return icv_hppir_read(env, ri);
+ }
+
+ value = icc_hppir0_value(cs, env);
trace_gicv3_icc_hppir0_read(gicv3_redist_affid(cs), value);
return value;
}
@@ -505,8 +1320,13 @@ static uint64_t icc_hppir0_read(CPUARMState *env, const ARMCPRegInfo *ri)
static uint64_t icc_hppir1_read(CPUARMState *env, const ARMCPRegInfo *ri)
{
GICv3CPUState *cs = icc_cs_from_env(env);
- uint64_t value = icc_hppir1_value(cs, env);
+ uint64_t value;
+ if (icv_access(env, HCR_IMO)) {
+ return icv_hppir_read(env, ri);
+ }
+
+ value = icc_hppir1_value(cs, env);
trace_gicv3_icc_hppir1_read(gicv3_redist_affid(cs), value);
return value;
}
@@ -518,6 +1338,10 @@ static uint64_t icc_bpr_read(CPUARMState *env, const ARMCPRegInfo *ri)
bool satinc = false;
uint64_t bpr;
+ if (icv_access(env, grp == GICV3_G0 ? HCR_FMO : HCR_IMO)) {
+ return icv_bpr_read(env, ri);
+ }
+
if (grp == GICV3_G1 && gicv3_use_ns_bank(env)) {
grp = GICV3_G1NS;
}
@@ -554,6 +1378,11 @@ static void icc_bpr_write(CPUARMState *env, const ARMCPRegInfo *ri,
GICv3CPUState *cs = icc_cs_from_env(env);
int grp = (ri->crm == 8) ? GICV3_G0 : GICV3_G1;
+ if (icv_access(env, grp == GICV3_G0 ? HCR_FMO : HCR_IMO)) {
+ icv_bpr_write(env, ri, value);
+ return;
+ }
+
trace_gicv3_icc_bpr_write(ri->crm == 8 ? 0 : 1,
gicv3_redist_affid(cs), value);
@@ -587,6 +1416,10 @@ static uint64_t icc_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
int regno = ri->opc2 & 3;
int grp = ri->crm & 1 ? GICV3_G0 : GICV3_G1;
+ if (icv_access(env, grp == GICV3_G0 ? HCR_FMO : HCR_IMO)) {
+ return icv_ap_read(env, ri);
+ }
+
if (grp == GICV3_G1 && gicv3_use_ns_bank(env)) {
grp = GICV3_G1NS;
}
@@ -605,6 +1438,11 @@ static void icc_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
int regno = ri->opc2 & 3;
int grp = ri->crm & 1 ? GICV3_G0 : GICV3_G1;
+ if (icv_access(env, grp == GICV3_G0 ? HCR_FMO : HCR_IMO)) {
+ icv_ap_write(env, ri, value);
+ return;
+ }
+
trace_gicv3_icc_ap_write(ri->crm & 1, regno, gicv3_redist_affid(cs), value);
if (grp == GICV3_G1 && gicv3_use_ns_bank(env)) {
@@ -633,6 +1471,11 @@ static void icc_dir_write(CPUARMState *env, const ARMCPRegInfo *ri,
bool irq_is_secure, single_sec_state, irq_is_grp0;
bool route_fiq_to_el3, route_irq_to_el3, route_fiq_to_el2, route_irq_to_el2;
+ if (icv_access(env, HCR_FMO | HCR_IMO)) {
+ icv_dir_write(env, ri, value);
+ return;
+ }
+
trace_gicv3_icc_dir_write(gicv3_redist_affid(cs), value);
if (irq >= cs->gic->num_irq) {
@@ -704,7 +1547,13 @@ static void icc_dir_write(CPUARMState *env, const ARMCPRegInfo *ri,
static uint64_t icc_rpr_read(CPUARMState *env, const ARMCPRegInfo *ri)
{
GICv3CPUState *cs = icc_cs_from_env(env);
- int prio = icc_highest_active_prio(cs);
+ int prio;
+
+ if (icv_access(env, HCR_FMO | HCR_IMO)) {
+ return icv_rpr_read(env, ri);
+ }
+
+ prio = icc_highest_active_prio(cs);
if (arm_feature(env, ARM_FEATURE_EL3) &&
!arm_is_secure(env) && (env->cp15.scr_el3 & SCR_FIQ)) {
@@ -817,6 +1666,10 @@ static uint64_t icc_igrpen_read(CPUARMState *env, const ARMCPRegInfo *ri)
int grp = ri->opc2 & 1 ? GICV3_G1 : GICV3_G0;
uint64_t value;
+ if (icv_access(env, grp == GICV3_G0 ? HCR_FMO : HCR_IMO)) {
+ return icv_igrpen_read(env, ri);
+ }
+
if (grp == GICV3_G1 && gicv3_use_ns_bank(env)) {
grp = GICV3_G1NS;
}
@@ -833,6 +1686,11 @@ static void icc_igrpen_write(CPUARMState *env, const ARMCPRegInfo *ri,
GICv3CPUState *cs = icc_cs_from_env(env);
int grp = ri->opc2 & 1 ? GICV3_G1 : GICV3_G0;
+ if (icv_access(env, grp == GICV3_G0 ? HCR_FMO : HCR_IMO)) {
+ icv_igrpen_write(env, ri, value);
+ return;
+ }
+
trace_gicv3_icc_igrpen_write(ri->opc2 & 1 ? 1 : 0,
gicv3_redist_affid(cs), value);
@@ -874,6 +1732,10 @@ static uint64_t icc_ctlr_el1_read(CPUARMState *env, const ARMCPRegInfo *ri)
int bank = gicv3_use_ns_bank(env) ? GICV3_NS : GICV3_S;
uint64_t value;
+ if (icv_access(env, HCR_FMO | HCR_IMO)) {
+ return icv_ctlr_read(env, ri);
+ }
+
value = cs->icc_ctlr_el1[bank];
trace_gicv3_icc_ctlr_read(gicv3_redist_affid(cs), value);
return value;
@@ -886,6 +1748,11 @@ static void icc_ctlr_el1_write(CPUARMState *env, const ARMCPRegInfo *ri,
int bank = gicv3_use_ns_bank(env) ? GICV3_NS : GICV3_S;
uint64_t mask;
+ if (icv_access(env, HCR_FMO | HCR_IMO)) {
+ icv_ctlr_write(env, ri, value);
+ return;
+ }
+
trace_gicv3_icc_ctlr_write(gicv3_redist_affid(cs), value);
/* Only CBPR and EOIMODE can be RW;
@@ -966,9 +1833,17 @@ static CPAccessResult gicv3_irqfiq_access(CPUARMState *env,
const ARMCPRegInfo *ri, bool isread)
{
CPAccessResult r = CP_ACCESS_OK;
+ GICv3CPUState *cs = icc_cs_from_env(env);
+ int el = arm_current_el(env);
+
+ if ((cs->ich_hcr_el2 & ICH_HCR_EL2_TC) &&
+ el == 1 && !arm_is_secure_below_el3(env)) {
+ /* Takes priority over a possible EL3 trap */
+ return CP_ACCESS_TRAP_EL2;
+ }
if ((env->cp15.scr_el3 & (SCR_FIQ | SCR_IRQ)) == (SCR_FIQ | SCR_IRQ)) {
- switch (arm_current_el(env)) {
+ switch (el) {
case 1:
if (arm_is_secure_below_el3(env) ||
((env->cp15.hcr_el2 & (HCR_IMO | HCR_FMO)) == 0)) {
@@ -994,13 +1869,47 @@ static CPAccessResult gicv3_irqfiq_access(CPUARMState *env,
return r;
}
+static CPAccessResult gicv3_dir_access(CPUARMState *env,
+ const ARMCPRegInfo *ri, bool isread)
+{
+ GICv3CPUState *cs = icc_cs_from_env(env);
+
+ if ((cs->ich_hcr_el2 & ICH_HCR_EL2_TDIR) &&
+ arm_current_el(env) == 1 && !arm_is_secure_below_el3(env)) {
+ /* Takes priority over a possible EL3 trap */
+ return CP_ACCESS_TRAP_EL2;
+ }
+
+ return gicv3_irqfiq_access(env, ri, isread);
+}
+
+static CPAccessResult gicv3_sgi_access(CPUARMState *env,
+ const ARMCPRegInfo *ri, bool isread)
+{
+ if ((env->cp15.hcr_el2 & (HCR_IMO | HCR_FMO)) &&
+ arm_current_el(env) == 1 && !arm_is_secure_below_el3(env)) {
+ /* Takes priority over a possible EL3 trap */
+ return CP_ACCESS_TRAP_EL2;
+ }
+
+ return gicv3_irqfiq_access(env, ri, isread);
+}
+
static CPAccessResult gicv3_fiq_access(CPUARMState *env,
const ARMCPRegInfo *ri, bool isread)
{
CPAccessResult r = CP_ACCESS_OK;
+ GICv3CPUState *cs = icc_cs_from_env(env);
+ int el = arm_current_el(env);
+
+ if ((cs->ich_hcr_el2 & ICH_HCR_EL2_TALL0) &&
+ el == 1 && !arm_is_secure_below_el3(env)) {
+ /* Takes priority over a possible EL3 trap */
+ return CP_ACCESS_TRAP_EL2;
+ }
if (env->cp15.scr_el3 & SCR_FIQ) {
- switch (arm_current_el(env)) {
+ switch (el) {
case 1:
if (arm_is_secure_below_el3(env) ||
((env->cp15.hcr_el2 & HCR_FMO) == 0)) {
@@ -1030,9 +1939,17 @@ static CPAccessResult gicv3_irq_access(CPUARMState *env,
const ARMCPRegInfo *ri, bool isread)
{
CPAccessResult r = CP_ACCESS_OK;
+ GICv3CPUState *cs = icc_cs_from_env(env);
+ int el = arm_current_el(env);
+
+ if ((cs->ich_hcr_el2 & ICH_HCR_EL2_TALL1) &&
+ el == 1 && !arm_is_secure_below_el3(env)) {
+ /* Takes priority over a possible EL3 trap */
+ return CP_ACCESS_TRAP_EL2;
+ }
if (env->cp15.scr_el3 & SCR_IRQ) {
- switch (arm_current_el(env)) {
+ switch (el) {
case 1:
if (arm_is_secure_below_el3(env) ||
((env->cp15.hcr_el2 & HCR_IMO) == 0)) {
@@ -1081,6 +1998,13 @@ static void icc_reset(CPUARMState *env, const ARMCPRegInfo *ri)
cs->icc_ctlr_el3 = ICC_CTLR_EL3_NDS | ICC_CTLR_EL3_A3V |
(1 << ICC_CTLR_EL3_IDBITS_SHIFT) |
(7 << ICC_CTLR_EL3_PRIBITS_SHIFT);
+
+ memset(cs->ich_apr, 0, sizeof(cs->ich_apr));
+ cs->ich_hcr_el2 = 0;
+ memset(cs->ich_lr_el2, 0, sizeof(cs->ich_lr_el2));
+ cs->ich_vmcr_el2 = ICH_VMCR_EL2_VFIQEN |
+ (icv_min_vbpr(cs) << ICH_VMCR_EL2_VBPR1_SHIFT) |
+ (icv_min_vbpr(cs) << ICH_VMCR_EL2_VBPR0_SHIFT);
}
static const ARMCPRegInfo gicv3_cpuif_reginfo[] = {
@@ -1181,7 +2105,7 @@ static const ARMCPRegInfo gicv3_cpuif_reginfo[] = {
{ .name = "ICC_DIR_EL1", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 0, .crn = 12, .crm = 11, .opc2 = 1,
.type = ARM_CP_IO | ARM_CP_NO_RAW,
- .access = PL1_W, .accessfn = gicv3_irqfiq_access,
+ .access = PL1_W, .accessfn = gicv3_dir_access,
.writefn = icc_dir_write,
},
{ .name = "ICC_RPR_EL1", .state = ARM_CP_STATE_BOTH,
@@ -1193,37 +2117,37 @@ static const ARMCPRegInfo gicv3_cpuif_reginfo[] = {
{ .name = "ICC_SGI1R_EL1", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 12, .crm = 11, .opc2 = 5,
.type = ARM_CP_IO | ARM_CP_NO_RAW,
- .access = PL1_W, .accessfn = gicv3_irqfiq_access,
+ .access = PL1_W, .accessfn = gicv3_sgi_access,
.writefn = icc_sgi1r_write,
},
{ .name = "ICC_SGI1R",
.cp = 15, .opc1 = 0, .crm = 12,
.type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_NO_RAW,
- .access = PL1_W, .accessfn = gicv3_irqfiq_access,
+ .access = PL1_W, .accessfn = gicv3_sgi_access,
.writefn = icc_sgi1r_write,
},
{ .name = "ICC_ASGI1R_EL1", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 12, .crm = 11, .opc2 = 6,
.type = ARM_CP_IO | ARM_CP_NO_RAW,
- .access = PL1_W, .accessfn = gicv3_irqfiq_access,
+ .access = PL1_W, .accessfn = gicv3_sgi_access,
.writefn = icc_asgi1r_write,
},
{ .name = "ICC_ASGI1R",
.cp = 15, .opc1 = 1, .crm = 12,
.type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_NO_RAW,
- .access = PL1_W, .accessfn = gicv3_irqfiq_access,
+ .access = PL1_W, .accessfn = gicv3_sgi_access,
.writefn = icc_asgi1r_write,
},
{ .name = "ICC_SGI0R_EL1", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 12, .crm = 11, .opc2 = 7,
.type = ARM_CP_IO | ARM_CP_NO_RAW,
- .access = PL1_W, .accessfn = gicv3_irqfiq_access,
+ .access = PL1_W, .accessfn = gicv3_sgi_access,
.writefn = icc_sgi0r_write,
},
{ .name = "ICC_SGI0R",
.cp = 15, .opc1 = 2, .crm = 12,
.type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_NO_RAW,
- .access = PL1_W, .accessfn = gicv3_irqfiq_access,
+ .access = PL1_W, .accessfn = gicv3_sgi_access,
.writefn = icc_sgi0r_write,
},
{ .name = "ICC_IAR1_EL1", .state = ARM_CP_STATE_BOTH,
@@ -1321,6 +2245,306 @@ static const ARMCPRegInfo gicv3_cpuif_reginfo[] = {
REGINFO_SENTINEL
};
+static uint64_t ich_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ GICv3CPUState *cs = icc_cs_from_env(env);
+ int regno = ri->opc2 & 3;
+ int grp = ri->crm & 1 ? GICV3_G0 : GICV3_G1NS;
+ uint64_t value;
+
+ value = cs->ich_apr[grp][regno];
+ trace_gicv3_ich_ap_read(ri->crm & 1, regno, gicv3_redist_affid(cs), value);
+ return value;
+}
+
+static void ich_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ GICv3CPUState *cs = icc_cs_from_env(env);
+ int regno = ri->opc2 & 3;
+ int grp = ri->crm & 1 ? GICV3_G0 : GICV3_G1NS;
+
+ trace_gicv3_ich_ap_write(ri->crm & 1, regno, gicv3_redist_affid(cs), value);
+
+ cs->ich_apr[grp][regno] = value & 0xFFFFFFFFU;
+ gicv3_cpuif_virt_update(cs);
+}
+
+static uint64_t ich_hcr_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ GICv3CPUState *cs = icc_cs_from_env(env);
+ uint64_t value = cs->ich_hcr_el2;
+
+ trace_gicv3_ich_hcr_read(gicv3_redist_affid(cs), value);
+ return value;
+}
+
+static void ich_hcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ GICv3CPUState *cs = icc_cs_from_env(env);
+
+ trace_gicv3_ich_hcr_write(gicv3_redist_affid(cs), value);
+
+ value &= ICH_HCR_EL2_EN | ICH_HCR_EL2_UIE | ICH_HCR_EL2_LRENPIE |
+ ICH_HCR_EL2_NPIE | ICH_HCR_EL2_VGRP0EIE | ICH_HCR_EL2_VGRP0DIE |
+ ICH_HCR_EL2_VGRP1EIE | ICH_HCR_EL2_VGRP1DIE | ICH_HCR_EL2_TC |
+ ICH_HCR_EL2_TALL0 | ICH_HCR_EL2_TALL1 | ICH_HCR_EL2_TSEI |
+ ICH_HCR_EL2_TDIR | ICH_HCR_EL2_EOICOUNT_MASK;
+
+ cs->ich_hcr_el2 = value;
+ gicv3_cpuif_virt_update(cs);
+}
+
+static uint64_t ich_vmcr_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ GICv3CPUState *cs = icc_cs_from_env(env);
+ uint64_t value = cs->ich_vmcr_el2;
+
+ trace_gicv3_ich_vmcr_read(gicv3_redist_affid(cs), value);
+ return value;
+}
+
+static void ich_vmcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ GICv3CPUState *cs = icc_cs_from_env(env);
+
+ trace_gicv3_ich_vmcr_write(gicv3_redist_affid(cs), value);
+
+ value &= ICH_VMCR_EL2_VENG0 | ICH_VMCR_EL2_VENG1 | ICH_VMCR_EL2_VCBPR |
+ ICH_VMCR_EL2_VEOIM | ICH_VMCR_EL2_VBPR1_MASK |
+ ICH_VMCR_EL2_VBPR0_MASK | ICH_VMCR_EL2_VPMR_MASK;
+ value |= ICH_VMCR_EL2_VFIQEN;
+
+ cs->ich_vmcr_el2 = value;
+ /* Enforce "writing BPRs to less than minimum sets them to the minimum"
+ * by reading and writing back the fields.
+ */
+ write_vbpr(cs, GICV3_G1, read_vbpr(cs, GICV3_G0));
+ write_vbpr(cs, GICV3_G1, read_vbpr(cs, GICV3_G1));
+
+ gicv3_cpuif_virt_update(cs);
+}
+
+static uint64_t ich_lr_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ GICv3CPUState *cs = icc_cs_from_env(env);
+ int regno = ri->opc2 | ((ri->crm & 1) << 3);
+ uint64_t value;
+
+ /* This read function handles all of:
+ * 64-bit reads of the whole LR
+ * 32-bit reads of the low half of the LR
+ * 32-bit reads of the high half of the LR
+ */
+ if (ri->state == ARM_CP_STATE_AA32) {
+ if (ri->crm >= 14) {
+ value = extract64(cs->ich_lr_el2[regno], 32, 32);
+ trace_gicv3_ich_lrc_read(regno, gicv3_redist_affid(cs), value);
+ } else {
+ value = extract64(cs->ich_lr_el2[regno], 0, 32);
+ trace_gicv3_ich_lr32_read(regno, gicv3_redist_affid(cs), value);
+ }
+ } else {
+ value = cs->ich_lr_el2[regno];
+ trace_gicv3_ich_lr_read(regno, gicv3_redist_affid(cs), value);
+ }
+
+ return value;
+}
+
+static void ich_lr_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ GICv3CPUState *cs = icc_cs_from_env(env);
+ int regno = ri->opc2 | ((ri->crm & 1) << 3);
+
+ /* This write function handles all of:
+ * 64-bit writes to the whole LR
+ * 32-bit writes to the low half of the LR
+ * 32-bit writes to the high half of the LR
+ */
+ if (ri->state == ARM_CP_STATE_AA32) {
+ if (ri->crm >= 14) {
+ trace_gicv3_ich_lrc_write(regno, gicv3_redist_affid(cs), value);
+ value = deposit64(cs->ich_lr_el2[regno], 32, 32, value);
+ } else {
+ trace_gicv3_ich_lr32_write(regno, gicv3_redist_affid(cs), value);
+ value = deposit64(cs->ich_lr_el2[regno], 0, 32, value);
+ }
+ } else {
+ trace_gicv3_ich_lr_write(regno, gicv3_redist_affid(cs), value);
+ }
+
+ /* Enforce RES0 bits in priority field */
+ if (cs->vpribits < 8) {
+ value = deposit64(value, ICH_LR_EL2_PRIORITY_SHIFT,
+ 8 - cs->vpribits, 0);
+ }
+
+ cs->ich_lr_el2[regno] = value;
+ gicv3_cpuif_virt_update(cs);
+}
+
+static uint64_t ich_vtr_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ GICv3CPUState *cs = icc_cs_from_env(env);
+ uint64_t value;
+
+ value = ((cs->num_list_regs - 1) << ICH_VTR_EL2_LISTREGS_SHIFT)
+ | ICH_VTR_EL2_TDS | ICH_VTR_EL2_NV4 | ICH_VTR_EL2_A3V
+ | (1 << ICH_VTR_EL2_IDBITS_SHIFT)
+ | ((cs->vprebits - 1) << ICH_VTR_EL2_PREBITS_SHIFT)
+ | ((cs->vpribits - 1) << ICH_VTR_EL2_PRIBITS_SHIFT);
+
+ trace_gicv3_ich_vtr_read(gicv3_redist_affid(cs), value);
+ return value;
+}
+
+static uint64_t ich_misr_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ GICv3CPUState *cs = icc_cs_from_env(env);
+ uint64_t value = maintenance_interrupt_state(cs);
+
+ trace_gicv3_ich_misr_read(gicv3_redist_affid(cs), value);
+ return value;
+}
+
+static uint64_t ich_eisr_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ GICv3CPUState *cs = icc_cs_from_env(env);
+ uint64_t value = eoi_maintenance_interrupt_state(cs, NULL);
+
+ trace_gicv3_ich_eisr_read(gicv3_redist_affid(cs), value);
+ return value;
+}
+
+static uint64_t ich_elrsr_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ GICv3CPUState *cs = icc_cs_from_env(env);
+ uint64_t value = 0;
+ int i;
+
+ for (i = 0; i < cs->num_list_regs; i++) {
+ uint64_t lr = cs->ich_lr_el2[i];
+
+ if ((lr & ICH_LR_EL2_STATE_MASK) == 0 &&
+ ((lr & ICH_LR_EL2_HW) == 1 || (lr & ICH_LR_EL2_EOI) == 0)) {
+ value |= (1 << i);
+ }
+ }
+
+ trace_gicv3_ich_elrsr_read(gicv3_redist_affid(cs), value);
+ return value;
+}
+
+static const ARMCPRegInfo gicv3_cpuif_hcr_reginfo[] = {
+ { .name = "ICH_AP0R0_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 8, .opc2 = 0,
+ .type = ARM_CP_IO | ARM_CP_NO_RAW,
+ .access = PL2_RW,
+ .readfn = ich_ap_read,
+ .writefn = ich_ap_write,
+ },
+ { .name = "ICH_AP1R0_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 9, .opc2 = 0,
+ .type = ARM_CP_IO | ARM_CP_NO_RAW,
+ .access = PL2_RW,
+ .readfn = ich_ap_read,
+ .writefn = ich_ap_write,
+ },
+ { .name = "ICH_HCR_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 11, .opc2 = 0,
+ .type = ARM_CP_IO | ARM_CP_NO_RAW,
+ .access = PL2_RW,
+ .readfn = ich_hcr_read,
+ .writefn = ich_hcr_write,
+ },
+ { .name = "ICH_VTR_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 11, .opc2 = 1,
+ .type = ARM_CP_IO | ARM_CP_NO_RAW,
+ .access = PL2_R,
+ .readfn = ich_vtr_read,
+ },
+ { .name = "ICH_MISR_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 11, .opc2 = 2,
+ .type = ARM_CP_IO | ARM_CP_NO_RAW,
+ .access = PL2_R,
+ .readfn = ich_misr_read,
+ },
+ { .name = "ICH_EISR_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 11, .opc2 = 3,
+ .type = ARM_CP_IO | ARM_CP_NO_RAW,
+ .access = PL2_R,
+ .readfn = ich_eisr_read,
+ },
+ { .name = "ICH_ELRSR_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 11, .opc2 = 5,
+ .type = ARM_CP_IO | ARM_CP_NO_RAW,
+ .access = PL2_R,
+ .readfn = ich_elrsr_read,
+ },
+ { .name = "ICH_VMCR_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 11, .opc2 = 7,
+ .type = ARM_CP_IO | ARM_CP_NO_RAW,
+ .access = PL2_RW,
+ .readfn = ich_vmcr_read,
+ .writefn = ich_vmcr_write,
+ },
+ REGINFO_SENTINEL
+};
+
+static const ARMCPRegInfo gicv3_cpuif_ich_apxr1_reginfo[] = {
+ { .name = "ICH_AP0R1_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 8, .opc2 = 1,
+ .type = ARM_CP_IO | ARM_CP_NO_RAW,
+ .access = PL2_RW,
+ .readfn = ich_ap_read,
+ .writefn = ich_ap_write,
+ },
+ { .name = "ICH_AP1R1_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 9, .opc2 = 1,
+ .type = ARM_CP_IO | ARM_CP_NO_RAW,
+ .access = PL2_RW,
+ .readfn = ich_ap_read,
+ .writefn = ich_ap_write,
+ },
+ REGINFO_SENTINEL
+};
+
+static const ARMCPRegInfo gicv3_cpuif_ich_apxr23_reginfo[] = {
+ { .name = "ICH_AP0R2_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 8, .opc2 = 2,
+ .type = ARM_CP_IO | ARM_CP_NO_RAW,
+ .access = PL2_RW,
+ .readfn = ich_ap_read,
+ .writefn = ich_ap_write,
+ },
+ { .name = "ICH_AP0R3_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 8, .opc2 = 3,
+ .type = ARM_CP_IO | ARM_CP_NO_RAW,
+ .access = PL2_RW,
+ .readfn = ich_ap_read,
+ .writefn = ich_ap_write,
+ },
+ { .name = "ICH_AP1R2_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 9, .opc2 = 2,
+ .type = ARM_CP_IO | ARM_CP_NO_RAW,
+ .access = PL2_RW,
+ .readfn = ich_ap_read,
+ .writefn = ich_ap_write,
+ },
+ { .name = "ICH_AP1R3_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 9, .opc2 = 3,
+ .type = ARM_CP_IO | ARM_CP_NO_RAW,
+ .access = PL2_RW,
+ .readfn = ich_ap_read,
+ .writefn = ich_ap_write,
+ },
+ REGINFO_SENTINEL
+};
+
static void gicv3_cpuif_el_change_hook(ARMCPU *cpu, void *opaque)
{
GICv3CPUState *cs = opaque;
@@ -1349,6 +2573,59 @@ void gicv3_init_cpuif(GICv3State *s)
* to need to register anyway.
*/
define_arm_cp_regs(cpu, gicv3_cpuif_reginfo);
+ if (arm_feature(&cpu->env, ARM_FEATURE_EL2)
+ && cpu->gic_num_lrs) {
+ int j;
+
+ cs->maintenance_irq = cpu->gicv3_maintenance_interrupt;
+
+ cs->num_list_regs = cpu->gic_num_lrs;
+ cs->vpribits = cpu->gic_vpribits;
+ cs->vprebits = cpu->gic_vprebits;
+
+ /* Check against architectural constraints: getting these
+ * wrong would be a bug in the CPU code defining these,
+ * and the implementation relies on them holding.
+ */
+ g_assert(cs->vprebits <= cs->vpribits);
+ g_assert(cs->vprebits >= 5 && cs->vprebits <= 7);
+ g_assert(cs->vpribits >= 5 && cs->vpribits <= 8);
+
+ define_arm_cp_regs(cpu, gicv3_cpuif_hcr_reginfo);
+
+ for (j = 0; j < cs->num_list_regs; j++) {
+ /* Note that the AArch64 LRs are 64-bit; the AArch32 LRs
+ * are split into two cp15 regs, LR (the low part, with the
+ * same encoding as the AArch64 LR) and LRC (the high part).
+ */
+ ARMCPRegInfo lr_regset[] = {
+ { .name = "ICH_LRn_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 12,
+ .crm = 12 + (j >> 3), .opc2 = j & 7,
+ .type = ARM_CP_IO | ARM_CP_NO_RAW,
+ .access = PL2_RW,
+ .readfn = ich_lr_read,
+ .writefn = ich_lr_write,
+ },
+ { .name = "ICH_LRCn_EL2", .state = ARM_CP_STATE_AA32,
+ .cp = 15, .opc1 = 4, .crn = 12,
+ .crm = 14 + (j >> 3), .opc2 = j & 7,
+ .type = ARM_CP_IO | ARM_CP_NO_RAW,
+ .access = PL2_RW,
+ .readfn = ich_lr_read,
+ .writefn = ich_lr_write,
+ },
+ REGINFO_SENTINEL
+ };
+ define_arm_cp_regs(cpu, lr_regset);
+ }
+ if (cs->vprebits >= 6) {
+ define_arm_cp_regs(cpu, gicv3_cpuif_ich_apxr1_reginfo);
+ }
+ if (cs->vprebits == 7) {
+ define_arm_cp_regs(cpu, gicv3_cpuif_ich_apxr23_reginfo);
+ }
+ }
arm_register_el_change_hook(cpu, gicv3_cpuif_el_change_hook, cs);
}
}
diff --git a/hw/intc/gicv3_internal.h b/hw/intc/gicv3_internal.h
index 8f3567edaa..aeb801d133 100644
--- a/hw/intc/gicv3_internal.h
+++ b/hw/intc/gicv3_internal.h
@@ -159,6 +159,85 @@
#define ICC_CTLR_EL3_A3V (1U << 15)
#define ICC_CTLR_EL3_NDS (1U << 17)
+#define ICH_VMCR_EL2_VENG0_SHIFT 0
+#define ICH_VMCR_EL2_VENG0 (1U << ICH_VMCR_EL2_VENG0_SHIFT)
+#define ICH_VMCR_EL2_VENG1_SHIFT 1
+#define ICH_VMCR_EL2_VENG1 (1U << ICH_VMCR_EL2_VENG1_SHIFT)
+#define ICH_VMCR_EL2_VACKCTL (1U << 2)
+#define ICH_VMCR_EL2_VFIQEN (1U << 3)
+#define ICH_VMCR_EL2_VCBPR_SHIFT 4
+#define ICH_VMCR_EL2_VCBPR (1U << ICH_VMCR_EL2_VCBPR_SHIFT)
+#define ICH_VMCR_EL2_VEOIM_SHIFT 9
+#define ICH_VMCR_EL2_VEOIM (1U << ICH_VMCR_EL2_VEOIM_SHIFT)
+#define ICH_VMCR_EL2_VBPR1_SHIFT 18
+#define ICH_VMCR_EL2_VBPR1_LENGTH 3
+#define ICH_VMCR_EL2_VBPR1_MASK (0x7U << ICH_VMCR_EL2_VBPR1_SHIFT)
+#define ICH_VMCR_EL2_VBPR0_SHIFT 21
+#define ICH_VMCR_EL2_VBPR0_LENGTH 3
+#define ICH_VMCR_EL2_VBPR0_MASK (0x7U << ICH_VMCR_EL2_VBPR0_SHIFT)
+#define ICH_VMCR_EL2_VPMR_SHIFT 24
+#define ICH_VMCR_EL2_VPMR_LENGTH 8
+#define ICH_VMCR_EL2_VPMR_MASK (0xffU << ICH_VMCR_EL2_VPMR_SHIFT)
+
+#define ICH_HCR_EL2_EN (1U << 0)
+#define ICH_HCR_EL2_UIE (1U << 1)
+#define ICH_HCR_EL2_LRENPIE (1U << 2)
+#define ICH_HCR_EL2_NPIE (1U << 3)
+#define ICH_HCR_EL2_VGRP0EIE (1U << 4)
+#define ICH_HCR_EL2_VGRP0DIE (1U << 5)
+#define ICH_HCR_EL2_VGRP1EIE (1U << 6)
+#define ICH_HCR_EL2_VGRP1DIE (1U << 7)
+#define ICH_HCR_EL2_TC (1U << 10)
+#define ICH_HCR_EL2_TALL0 (1U << 11)
+#define ICH_HCR_EL2_TALL1 (1U << 12)
+#define ICH_HCR_EL2_TSEI (1U << 13)
+#define ICH_HCR_EL2_TDIR (1U << 14)
+#define ICH_HCR_EL2_EOICOUNT_SHIFT 27
+#define ICH_HCR_EL2_EOICOUNT_LENGTH 5
+#define ICH_HCR_EL2_EOICOUNT_MASK (0x1fU << ICH_HCR_EL2_EOICOUNT_SHIFT)
+
+#define ICH_LR_EL2_VINTID_SHIFT 0
+#define ICH_LR_EL2_VINTID_LENGTH 32
+#define ICH_LR_EL2_VINTID_MASK (0xffffffffULL << ICH_LR_EL2_VINTID_SHIFT)
+#define ICH_LR_EL2_PINTID_SHIFT 32
+#define ICH_LR_EL2_PINTID_LENGTH 10
+#define ICH_LR_EL2_PINTID_MASK (0x3ffULL << ICH_LR_EL2_PINTID_SHIFT)
+/* Note that EOI shares with the top bit of the pINTID field */
+#define ICH_LR_EL2_EOI (1ULL << 41)
+#define ICH_LR_EL2_PRIORITY_SHIFT 48
+#define ICH_LR_EL2_PRIORITY_LENGTH 8
+#define ICH_LR_EL2_PRIORITY_MASK (0xffULL << ICH_LR_EL2_PRIORITY_SHIFT)
+#define ICH_LR_EL2_GROUP (1ULL << 60)
+#define ICH_LR_EL2_HW (1ULL << 61)
+#define ICH_LR_EL2_STATE_SHIFT 62
+#define ICH_LR_EL2_STATE_LENGTH 2
+#define ICH_LR_EL2_STATE_MASK (3ULL << ICH_LR_EL2_STATE_SHIFT)
+/* values for the state field: */
+#define ICH_LR_EL2_STATE_INVALID 0
+#define ICH_LR_EL2_STATE_PENDING 1
+#define ICH_LR_EL2_STATE_ACTIVE 2
+#define ICH_LR_EL2_STATE_ACTIVE_PENDING 3
+#define ICH_LR_EL2_STATE_PENDING_BIT (1ULL << ICH_LR_EL2_STATE_SHIFT)
+#define ICH_LR_EL2_STATE_ACTIVE_BIT (2ULL << ICH_LR_EL2_STATE_SHIFT)
+
+#define ICH_MISR_EL2_EOI (1U << 0)
+#define ICH_MISR_EL2_U (1U << 1)
+#define ICH_MISR_EL2_LRENP (1U << 2)
+#define ICH_MISR_EL2_NP (1U << 3)
+#define ICH_MISR_EL2_VGRP0E (1U << 4)
+#define ICH_MISR_EL2_VGRP0D (1U << 5)
+#define ICH_MISR_EL2_VGRP1E (1U << 6)
+#define ICH_MISR_EL2_VGRP1D (1U << 7)
+
+#define ICH_VTR_EL2_LISTREGS_SHIFT 0
+#define ICH_VTR_EL2_TDS (1U << 19)
+#define ICH_VTR_EL2_NV4 (1U << 20)
+#define ICH_VTR_EL2_A3V (1U << 21)
+#define ICH_VTR_EL2_SEIS (1U << 22)
+#define ICH_VTR_EL2_IDBITS_SHIFT 23
+#define ICH_VTR_EL2_PREBITS_SHIFT 26
+#define ICH_VTR_EL2_PRIBITS_SHIFT 29
+
/* Special interrupt IDs */
#define INTID_SECURE 1020
#define INTID_NONSECURE 1021
diff --git a/hw/intc/trace-events b/hw/intc/trace-events
index 340f617761..6116df5436 100644
--- a/hw/intc/trace-events
+++ b/hw/intc/trace-events
@@ -107,6 +107,39 @@ gicv3_icc_hppir0_read(uint32_t cpu, uint64_t val) "GICv3 ICC_HPPIR0 read cpu %x
gicv3_icc_hppir1_read(uint32_t cpu, uint64_t val) "GICv3 ICC_HPPIR1 read cpu %x value 0x%" PRIx64
gicv3_icc_dir_write(uint32_t cpu, uint64_t val) "GICv3 ICC_DIR write cpu %x value 0x%" PRIx64
gicv3_icc_rpr_read(uint32_t cpu, uint64_t val) "GICv3 ICC_RPR read cpu %x value 0x%" PRIx64
+gicv3_ich_ap_read(int grp, int regno, uint32_t cpu, uint64_t val) "GICv3 ICH_AP%dR%d read cpu %x value 0x%" PRIx64
+gicv3_ich_ap_write(int grp, int regno, uint32_t cpu, uint64_t val) "GICv3 ICH_AP%dR%d write cpu %x value 0x%" PRIx64
+gicv3_ich_hcr_read(uint32_t cpu, uint64_t val) "GICv3 ICH_HCR_EL2 read cpu %x value 0x%" PRIx64
+gicv3_ich_hcr_write(uint32_t cpu, uint64_t val) "GICv3 ICH_HCR_EL2 write cpu %x value 0x%" PRIx64
+gicv3_ich_vmcr_read(uint32_t cpu, uint64_t val) "GICv3 ICH_VMCR_EL2 read cpu %x value 0x%" PRIx64
+gicv3_ich_vmcr_write(uint32_t cpu, uint64_t val) "GICv3 ICH_VMCR_EL2 write cpu %x value 0x%" PRIx64
+gicv3_ich_lr_read(int regno, uint32_t cpu, uint64_t val) "GICv3 ICH_LR%d_EL2 read cpu %x value 0x%" PRIx64
+gicv3_ich_lr32_read(int regno, uint32_t cpu, uint32_t val) "GICv3 ICH_LR%d read cpu %x value 0x%" PRIx32
+gicv3_ich_lrc_read(int regno, uint32_t cpu, uint32_t val) "GICv3 ICH_LRC%d read cpu %x value 0x%" PRIx32
+gicv3_ich_lr_write(int regno, uint32_t cpu, uint64_t val) "GICv3 ICH_LR%d_EL2 write cpu %x value 0x%" PRIx64
+gicv3_ich_lr32_write(int regno, uint32_t cpu, uint32_t val) "GICv3 ICH_LR%d write cpu %x value 0x%" PRIx32
+gicv3_ich_lrc_write(int regno, uint32_t cpu, uint32_t val) "GICv3 ICH_LRC%d write cpu %x value 0x%" PRIx32
+gicv3_ich_vtr_read(uint32_t cpu, uint64_t val) "GICv3 ICH_VTR read cpu %x value 0x%" PRIx64
+gicv3_ich_misr_read(uint32_t cpu, uint64_t val) "GICv3 ICH_MISR read cpu %x value 0x%" PRIx64
+gicv3_ich_eisr_read(uint32_t cpu, uint64_t val) "GICv3 ICH_EISR read cpu %x value 0x%" PRIx64
+gicv3_ich_elrsr_read(uint32_t cpu, uint64_t val) "GICv3 ICH_ELRSR read cpu %x value 0x%" PRIx64
+gicv3_icv_ap_read(int grp, int regno, uint32_t cpu, uint64_t val) "GICv3 ICV_AP%dR%d read cpu %x value 0x%" PRIx64
+gicv3_icv_ap_write(int grp, int regno, uint32_t cpu, uint64_t val) "GICv3 ICV_AP%dR%d write cpu %x value 0x%" PRIx64
+gicv3_icv_bpr_read(int grp, uint32_t cpu, uint64_t val) "GICv3 ICV_BPR%d read cpu %x value 0x%" PRIx64
+gicv3_icv_bpr_write(int grp, uint32_t cpu, uint64_t val) "GICv3 ICV_BPR%d write cpu %x value 0x%" PRIx64
+gicv3_icv_pmr_read(uint32_t cpu, uint64_t val) "GICv3 ICV_PMR read cpu %x value 0x%" PRIx64
+gicv3_icv_pmr_write(uint32_t cpu, uint64_t val) "GICv3 ICV_PMR write cpu %x value 0x%" PRIx64
+gicv3_icv_igrpen_read(int grp, uint32_t cpu, uint64_t val) "GICv3 ICV_IGRPEN%d read cpu %x value 0x%" PRIx64
+gicv3_icv_igrpen_write(int grp, uint32_t cpu, uint64_t val) "GICv3 ICV_IGRPEN%d write cpu %x value 0x%" PRIx64
+gicv3_icv_ctlr_read(uint32_t cpu, uint64_t val) "GICv3 ICV_CTLR read cpu %x value 0x%" PRIx64
+gicv3_icv_ctlr_write(uint32_t cpu, uint64_t val) "GICv3 ICV_CTLR write cpu %x value 0x%" PRIx64
+gicv3_icv_rpr_read(uint32_t cpu, uint64_t val) "GICv3 ICV_RPR read cpu %x value 0x%" PRIx64
+gicv3_icv_hppir_read(int grp, uint32_t cpu, uint64_t val) "GICv3 ICV_HPPIR%d read cpu %x value 0x%" PRIx64
+gicv3_icv_dir_write(uint32_t cpu, uint64_t val) "GICv3 ICV_DIR write cpu %x value 0x%" PRIx64
+gicv3_icv_iar_read(int grp, uint32_t cpu, uint64_t val) "GICv3 ICV_IAR%d read cpu %x value 0x%" PRIx64
+gicv3_icv_eoir_write(int grp, uint32_t cpu, uint64_t val) "GICv3 ICV_EOIR%d write cpu %x value 0x%" PRIx64
+gicv3_cpuif_virt_update(uint32_t cpuid, int idx) "GICv3 CPU i/f %x virt HPPI update LR index %d"
+gicv3_cpuif_virt_set_irqs(uint32_t cpuid, int fiqlevel, int irqlevel, int maintlevel) "GICv3 CPU i/f %x virt HPPI update: setting FIQ %d IRQ %d maintenance-irq %d"
# hw/intc/arm_gicv3_dist.c
gicv3_dist_read(uint64_t offset, uint64_t data, unsigned size, bool secure) "GICv3 distributor read: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u secure %d"
diff --git a/hw/ssi/aspeed_smc.c b/hw/ssi/aspeed_smc.c
index 78f5aed532..ae1ad2dba6 100644
--- a/hw/ssi/aspeed_smc.c
+++ b/hw/ssi/aspeed_smc.c
@@ -39,11 +39,14 @@
#define CONF_ENABLE_W2 18
#define CONF_ENABLE_W1 17
#define CONF_ENABLE_W0 16
-#define CONF_FLASH_TYPE4 9
-#define CONF_FLASH_TYPE3 7
-#define CONF_FLASH_TYPE2 5
-#define CONF_FLASH_TYPE1 3
-#define CONF_FLASH_TYPE0 1
+#define CONF_FLASH_TYPE4 8
+#define CONF_FLASH_TYPE3 6
+#define CONF_FLASH_TYPE2 4
+#define CONF_FLASH_TYPE1 2
+#define CONF_FLASH_TYPE0 0
+#define CONF_FLASH_TYPE_NOR 0x0
+#define CONF_FLASH_TYPE_NAND 0x1
+#define CONF_FLASH_TYPE_SPI 0x2
/* CE Control Register */
#define R_CE_CTRL (0x04 / 4)
@@ -66,6 +69,7 @@
#define R_CTRL0 (0x10 / 4)
#define CTRL_CMD_SHIFT 16
#define CTRL_CMD_MASK 0xff
+#define CTRL_AST2400_SPI_4BYTE (1 << 13)
#define CTRL_CE_STOP_ACTIVE (1 << 2)
#define CTRL_CMD_MODE_MASK 0x3
#define CTRL_READMODE 0x0
@@ -127,11 +131,17 @@
#define R_SPI_MISC_CTRL (0x10 / 4)
#define R_SPI_TIMINGS (0x14 / 4)
+#define ASPEED_SMC_R_SPI_MAX (0x20 / 4)
+#define ASPEED_SMC_R_SMC_MAX (0x20 / 4)
+
#define ASPEED_SOC_SMC_FLASH_BASE 0x10000000
#define ASPEED_SOC_FMC_FLASH_BASE 0x20000000
#define ASPEED_SOC_SPI_FLASH_BASE 0x30000000
#define ASPEED_SOC_SPI2_FLASH_BASE 0x38000000
+/* Flash opcodes. */
+#define SPI_OP_READ 0x03 /* Read data bytes (low frequency) */
+
/*
* Default segments mapping addresses and size for each slave per
* controller. These can be changed when board is initialized with the
@@ -170,24 +180,85 @@ static const AspeedSegments aspeed_segments_ast2500_spi2[] = {
};
static const AspeedSMCController controllers[] = {
- { "aspeed.smc.smc", R_CONF, R_CE_CTRL, R_CTRL0, R_TIMINGS,
- CONF_ENABLE_W0, 5, aspeed_segments_legacy,
- ASPEED_SOC_SMC_FLASH_BASE, 0x6000000 },
- { "aspeed.smc.fmc", R_CONF, R_CE_CTRL, R_CTRL0, R_TIMINGS,
- CONF_ENABLE_W0, 5, aspeed_segments_fmc,
- ASPEED_SOC_FMC_FLASH_BASE, 0x10000000 },
- { "aspeed.smc.spi", R_SPI_CONF, 0xff, R_SPI_CTRL0, R_SPI_TIMINGS,
- SPI_CONF_ENABLE_W0, 1, aspeed_segments_spi,
- ASPEED_SOC_SPI_FLASH_BASE, 0x10000000 },
- { "aspeed.smc.ast2500-fmc", R_CONF, R_CE_CTRL, R_CTRL0, R_TIMINGS,
- CONF_ENABLE_W0, 3, aspeed_segments_ast2500_fmc,
- ASPEED_SOC_FMC_FLASH_BASE, 0x10000000 },
- { "aspeed.smc.ast2500-spi1", R_CONF, R_CE_CTRL, R_CTRL0, R_TIMINGS,
- CONF_ENABLE_W0, 2, aspeed_segments_ast2500_spi1,
- ASPEED_SOC_SPI_FLASH_BASE, 0x8000000 },
- { "aspeed.smc.ast2500-spi2", R_CONF, R_CE_CTRL, R_CTRL0, R_TIMINGS,
- CONF_ENABLE_W0, 2, aspeed_segments_ast2500_spi2,
- ASPEED_SOC_SPI2_FLASH_BASE, 0x8000000 },
+ {
+ .name = "aspeed.smc.smc",
+ .r_conf = R_CONF,
+ .r_ce_ctrl = R_CE_CTRL,
+ .r_ctrl0 = R_CTRL0,
+ .r_timings = R_TIMINGS,
+ .conf_enable_w0 = CONF_ENABLE_W0,
+ .max_slaves = 5,
+ .segments = aspeed_segments_legacy,
+ .flash_window_base = ASPEED_SOC_SMC_FLASH_BASE,
+ .flash_window_size = 0x6000000,
+ .has_dma = false,
+ .nregs = ASPEED_SMC_R_SMC_MAX,
+ }, {
+ .name = "aspeed.smc.fmc",
+ .r_conf = R_CONF,
+ .r_ce_ctrl = R_CE_CTRL,
+ .r_ctrl0 = R_CTRL0,
+ .r_timings = R_TIMINGS,
+ .conf_enable_w0 = CONF_ENABLE_W0,
+ .max_slaves = 5,
+ .segments = aspeed_segments_fmc,
+ .flash_window_base = ASPEED_SOC_FMC_FLASH_BASE,
+ .flash_window_size = 0x10000000,
+ .has_dma = true,
+ .nregs = ASPEED_SMC_R_MAX,
+ }, {
+ .name = "aspeed.smc.spi",
+ .r_conf = R_SPI_CONF,
+ .r_ce_ctrl = 0xff,
+ .r_ctrl0 = R_SPI_CTRL0,
+ .r_timings = R_SPI_TIMINGS,
+ .conf_enable_w0 = SPI_CONF_ENABLE_W0,
+ .max_slaves = 1,
+ .segments = aspeed_segments_spi,
+ .flash_window_base = ASPEED_SOC_SPI_FLASH_BASE,
+ .flash_window_size = 0x10000000,
+ .has_dma = false,
+ .nregs = ASPEED_SMC_R_SPI_MAX,
+ }, {
+ .name = "aspeed.smc.ast2500-fmc",
+ .r_conf = R_CONF,
+ .r_ce_ctrl = R_CE_CTRL,
+ .r_ctrl0 = R_CTRL0,
+ .r_timings = R_TIMINGS,
+ .conf_enable_w0 = CONF_ENABLE_W0,
+ .max_slaves = 3,
+ .segments = aspeed_segments_ast2500_fmc,
+ .flash_window_base = ASPEED_SOC_FMC_FLASH_BASE,
+ .flash_window_size = 0x10000000,
+ .has_dma = true,
+ .nregs = ASPEED_SMC_R_MAX,
+ }, {
+ .name = "aspeed.smc.ast2500-spi1",
+ .r_conf = R_CONF,
+ .r_ce_ctrl = R_CE_CTRL,
+ .r_ctrl0 = R_CTRL0,
+ .r_timings = R_TIMINGS,
+ .conf_enable_w0 = CONF_ENABLE_W0,
+ .max_slaves = 2,
+ .segments = aspeed_segments_ast2500_spi1,
+ .flash_window_base = ASPEED_SOC_SPI_FLASH_BASE,
+ .flash_window_size = 0x8000000,
+ .has_dma = false,
+ .nregs = ASPEED_SMC_R_MAX,
+ }, {
+ .name = "aspeed.smc.ast2500-spi2",
+ .r_conf = R_CONF,
+ .r_ce_ctrl = R_CE_CTRL,
+ .r_ctrl0 = R_CTRL0,
+ .r_timings = R_TIMINGS,
+ .conf_enable_w0 = CONF_ENABLE_W0,
+ .max_slaves = 2,
+ .segments = aspeed_segments_ast2500_spi2,
+ .flash_window_base = ASPEED_SOC_SPI2_FLASH_BASE,
+ .flash_window_size = 0x8000000,
+ .has_dma = false,
+ .nregs = ASPEED_SMC_R_MAX,
+ },
};
/*
@@ -328,36 +399,137 @@ static const MemoryRegionOps aspeed_smc_flash_default_ops = {
},
};
-static inline int aspeed_smc_flash_mode(const AspeedSMCState *s, int cs)
+static inline int aspeed_smc_flash_mode(const AspeedSMCFlash *fl)
{
- return s->regs[s->r_ctrl0 + cs] & CTRL_CMD_MODE_MASK;
+ const AspeedSMCState *s = fl->controller;
+
+ return s->regs[s->r_ctrl0 + fl->id] & CTRL_CMD_MODE_MASK;
}
-static inline bool aspeed_smc_is_usermode(const AspeedSMCState *s, int cs)
+static inline bool aspeed_smc_is_writable(const AspeedSMCFlash *fl)
{
- return aspeed_smc_flash_mode(s, cs) == CTRL_USERMODE;
+ const AspeedSMCState *s = fl->controller;
+
+ return s->regs[s->r_conf] & (1 << (s->conf_enable_w0 + fl->id));
}
-static inline bool aspeed_smc_is_writable(const AspeedSMCState *s, int cs)
+static inline int aspeed_smc_flash_cmd(const AspeedSMCFlash *fl)
{
- return s->regs[s->r_conf] & (1 << (s->conf_enable_w0 + cs));
+ const AspeedSMCState *s = fl->controller;
+ int cmd = (s->regs[s->r_ctrl0 + fl->id] >> CTRL_CMD_SHIFT) & CTRL_CMD_MASK;
+
+ /* In read mode, the default SPI command is READ (0x3). In other
+ * modes, the command should necessarily be defined */
+ if (aspeed_smc_flash_mode(fl) == CTRL_READMODE) {
+ cmd = SPI_OP_READ;
+ }
+
+ if (!cmd) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: no command defined for mode %d\n",
+ __func__, aspeed_smc_flash_mode(fl));
+ }
+
+ return cmd;
+}
+
+static inline int aspeed_smc_flash_is_4byte(const AspeedSMCFlash *fl)
+{
+ const AspeedSMCState *s = fl->controller;
+
+ if (s->ctrl->segments == aspeed_segments_spi) {
+ return s->regs[s->r_ctrl0] & CTRL_AST2400_SPI_4BYTE;
+ } else {
+ return s->regs[s->r_ce_ctrl] & (1 << (CTRL_EXTENDED0 + fl->id));
+ }
+}
+
+static inline bool aspeed_smc_is_ce_stop_active(const AspeedSMCFlash *fl)
+{
+ const AspeedSMCState *s = fl->controller;
+
+ return s->regs[s->r_ctrl0 + fl->id] & CTRL_CE_STOP_ACTIVE;
+}
+
+static void aspeed_smc_flash_select(AspeedSMCFlash *fl)
+{
+ AspeedSMCState *s = fl->controller;
+
+ s->regs[s->r_ctrl0 + fl->id] &= ~CTRL_CE_STOP_ACTIVE;
+ qemu_set_irq(s->cs_lines[fl->id], aspeed_smc_is_ce_stop_active(fl));
+}
+
+static void aspeed_smc_flash_unselect(AspeedSMCFlash *fl)
+{
+ AspeedSMCState *s = fl->controller;
+
+ s->regs[s->r_ctrl0 + fl->id] |= CTRL_CE_STOP_ACTIVE;
+ qemu_set_irq(s->cs_lines[fl->id], aspeed_smc_is_ce_stop_active(fl));
+}
+
+static uint32_t aspeed_smc_check_segment_addr(const AspeedSMCFlash *fl,
+ uint32_t addr)
+{
+ const AspeedSMCState *s = fl->controller;
+ AspeedSegments seg;
+
+ aspeed_smc_reg_to_segment(s->regs[R_SEG_ADDR0 + fl->id], &seg);
+ if ((addr & (seg.size - 1)) != addr) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: invalid address 0x%08x for CS%d segment : "
+ "[ 0x%"HWADDR_PRIx" - 0x%"HWADDR_PRIx" ]\n",
+ s->ctrl->name, addr, fl->id, seg.addr,
+ seg.addr + seg.size);
+ }
+
+ addr &= seg.size - 1;
+ return addr;
+}
+
+static void aspeed_smc_flash_send_addr(AspeedSMCFlash *fl, uint32_t addr)
+{
+ const AspeedSMCState *s = fl->controller;
+ uint8_t cmd = aspeed_smc_flash_cmd(fl);
+
+ /* Flash access can not exceed CS segment */
+ addr = aspeed_smc_check_segment_addr(fl, addr);
+
+ ssi_transfer(s->spi, cmd);
+
+ if (aspeed_smc_flash_is_4byte(fl)) {
+ ssi_transfer(s->spi, (addr >> 24) & 0xff);
+ }
+ ssi_transfer(s->spi, (addr >> 16) & 0xff);
+ ssi_transfer(s->spi, (addr >> 8) & 0xff);
+ ssi_transfer(s->spi, (addr & 0xff));
}
static uint64_t aspeed_smc_flash_read(void *opaque, hwaddr addr, unsigned size)
{
AspeedSMCFlash *fl = opaque;
- const AspeedSMCState *s = fl->controller;
+ AspeedSMCState *s = fl->controller;
uint64_t ret = 0;
int i;
- if (aspeed_smc_is_usermode(s, fl->id)) {
+ switch (aspeed_smc_flash_mode(fl)) {
+ case CTRL_USERMODE:
for (i = 0; i < size; i++) {
ret |= ssi_transfer(s->spi, 0x0) << (8 * i);
}
- } else {
- qemu_log_mask(LOG_UNIMP, "%s: usermode not implemented\n",
- __func__);
- ret = -1;
+ break;
+ case CTRL_READMODE:
+ case CTRL_FREADMODE:
+ aspeed_smc_flash_select(fl);
+ aspeed_smc_flash_send_addr(fl, addr);
+
+ for (i = 0; i < size; i++) {
+ ret |= ssi_transfer(s->spi, 0x0) << (8 * i);
+ }
+
+ aspeed_smc_flash_unselect(fl);
+ break;
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid flash mode %d\n",
+ __func__, aspeed_smc_flash_mode(fl));
}
return ret;
@@ -367,23 +539,34 @@ static void aspeed_smc_flash_write(void *opaque, hwaddr addr, uint64_t data,
unsigned size)
{
AspeedSMCFlash *fl = opaque;
- const AspeedSMCState *s = fl->controller;
+ AspeedSMCState *s = fl->controller;
int i;
- if (!aspeed_smc_is_writable(s, fl->id)) {
+ if (!aspeed_smc_is_writable(fl)) {
qemu_log_mask(LOG_GUEST_ERROR, "%s: flash is not writable at 0x%"
HWADDR_PRIx "\n", __func__, addr);
return;
}
- if (!aspeed_smc_is_usermode(s, fl->id)) {
- qemu_log_mask(LOG_UNIMP, "%s: usermode not implemented\n",
- __func__);
- return;
- }
+ switch (aspeed_smc_flash_mode(fl)) {
+ case CTRL_USERMODE:
+ for (i = 0; i < size; i++) {
+ ssi_transfer(s->spi, (data >> (8 * i)) & 0xff);
+ }
+ break;
+ case CTRL_WRITEMODE:
+ aspeed_smc_flash_select(fl);
+ aspeed_smc_flash_send_addr(fl, addr);
- for (i = 0; i < size; i++) {
- ssi_transfer(s->spi, (data >> (8 * i)) & 0xff);
+ for (i = 0; i < size; i++) {
+ ssi_transfer(s->spi, (data >> (8 * i)) & 0xff);
+ }
+
+ aspeed_smc_flash_unselect(fl);
+ break;
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid flash mode %d\n",
+ __func__, aspeed_smc_flash_mode(fl));
}
}
@@ -397,18 +580,11 @@ static const MemoryRegionOps aspeed_smc_flash_ops = {
},
};
-static bool aspeed_smc_is_ce_stop_active(const AspeedSMCState *s, int cs)
-{
- return s->regs[s->r_ctrl0 + cs] & CTRL_CE_STOP_ACTIVE;
-}
-
-static void aspeed_smc_update_cs(const AspeedSMCState *s)
+static void aspeed_smc_flash_update_cs(AspeedSMCFlash *fl)
{
- int i;
+ const AspeedSMCState *s = fl->controller;
- for (i = 0; i < s->num_cs; ++i) {
- qemu_set_irq(s->cs_lines[i], aspeed_smc_is_ce_stop_active(s, i));
- }
+ qemu_set_irq(s->cs_lines[fl->id], aspeed_smc_is_ce_stop_active(fl));
}
static void aspeed_smc_reset(DeviceState *d)
@@ -424,6 +600,7 @@ static void aspeed_smc_reset(DeviceState *d)
/* Unselect all slaves */
for (i = 0; i < s->num_cs; ++i) {
s->regs[s->r_ctrl0 + i] |= CTRL_CE_STOP_ACTIVE;
+ qemu_set_irq(s->cs_lines[i], true);
}
/* setup default segment register values for all */
@@ -432,7 +609,24 @@ static void aspeed_smc_reset(DeviceState *d)
aspeed_smc_segment_to_reg(&s->ctrl->segments[i]);
}
- aspeed_smc_update_cs(s);
+ /* HW strapping for AST2500 FMC controllers */
+ if (s->ctrl->segments == aspeed_segments_ast2500_fmc) {
+ /* flash type is fixed to SPI for CE0 and CE1 */
+ s->regs[s->r_conf] |= (CONF_FLASH_TYPE_SPI << CONF_FLASH_TYPE0);
+ s->regs[s->r_conf] |= (CONF_FLASH_TYPE_SPI << CONF_FLASH_TYPE1);
+
+ /* 4BYTE mode is autodetected for CE0. Let's force it to 1 for
+ * now */
+ s->regs[s->r_ce_ctrl] |= (1 << (CTRL_EXTENDED0));
+ }
+
+ /* HW strapping for AST2400 FMC controllers (SCU70). Let's use the
+ * configuration of the palmetto-bmc machine */
+ if (s->ctrl->segments == aspeed_segments_fmc) {
+ s->regs[s->r_conf] |= (CONF_FLASH_TYPE_SPI << CONF_FLASH_TYPE0);
+
+ s->regs[s->r_ce_ctrl] |= (1 << (CTRL_EXTENDED0));
+ }
}
static uint64_t aspeed_smc_read(void *opaque, hwaddr addr, unsigned int size)
@@ -441,13 +635,6 @@ static uint64_t aspeed_smc_read(void *opaque, hwaddr addr, unsigned int size)
addr >>= 2;
- if (addr >= ARRAY_SIZE(s->regs)) {
- qemu_log_mask(LOG_GUEST_ERROR,
- "%s: Out-of-bounds read at 0x%" HWADDR_PRIx "\n",
- __func__, addr);
- return 0;
- }
-
if (addr == s->r_conf ||
addr == s->r_timings ||
addr == s->r_ce_ctrl ||
@@ -470,20 +657,14 @@ static void aspeed_smc_write(void *opaque, hwaddr addr, uint64_t data,
addr >>= 2;
- if (addr >= ARRAY_SIZE(s->regs)) {
- qemu_log_mask(LOG_GUEST_ERROR,
- "%s: Out-of-bounds write at 0x%" HWADDR_PRIx "\n",
- __func__, addr);
- return;
- }
-
if (addr == s->r_conf ||
addr == s->r_timings ||
addr == s->r_ce_ctrl) {
s->regs[addr] = value;
} else if (addr >= s->r_ctrl0 && addr < s->r_ctrl0 + s->num_cs) {
+ int cs = addr - s->r_ctrl0;
s->regs[addr] = value;
- aspeed_smc_update_cs(s);
+ aspeed_smc_flash_update_cs(&s->flashes[cs]);
} else if (addr >= R_SEG_ADDR0 &&
addr < R_SEG_ADDR0 + s->ctrl->max_slaves) {
int cs = addr - R_SEG_ADDR0;
@@ -541,11 +722,9 @@ static void aspeed_smc_realize(DeviceState *dev, Error **errp)
sysbus_init_irq(sbd, &s->cs_lines[i]);
}
- aspeed_smc_reset(dev);
-
/* The memory region for the controller registers */
memory_region_init_io(&s->mmio, OBJECT(s), &aspeed_smc_ops, s,
- s->ctrl->name, ASPEED_SMC_R_MAX * 4);
+ s->ctrl->name, s->ctrl->nregs * 4);
sysbus_init_mmio(sbd, &s->mmio);
/*
diff --git a/include/hw/arm/virt.h b/include/hw/arm/virt.h
index eb1c63d688..58ce74e0e5 100644
--- a/include/hw/arm/virt.h
+++ b/include/hw/arm/virt.h
@@ -39,6 +39,8 @@
#define NUM_GICV2M_SPIS 64
#define NUM_VIRTIO_TRANSPORTS 32
+#define ARCH_GICV3_MAINT_IRQ 9
+
#define ARCH_TIMER_VIRT_IRQ 11
#define ARCH_TIMER_S_EL1_IRQ 13
#define ARCH_TIMER_NS_EL1_IRQ 14
@@ -91,6 +93,7 @@ typedef struct {
FWCfgState *fw_cfg;
bool secure;
bool highmem;
+ bool virt;
int32_t gic_version;
struct arm_boot_info bootinfo;
const MemMapEntry *memmap;
@@ -101,7 +104,7 @@ typedef struct {
uint32_t clock_phandle;
uint32_t gic_phandle;
uint32_t msi_phandle;
- bool using_psci;
+ int psci_conduit;
} VirtMachineState;
#define TYPE_VIRT_MACHINE MACHINE_TYPE_NAME("virt")
diff --git a/include/hw/intc/arm_gic_common.h b/include/hw/intc/arm_gic_common.h
index f4c349a2ef..af3ca18e2f 100644
--- a/include/hw/intc/arm_gic_common.h
+++ b/include/hw/intc/arm_gic_common.h
@@ -55,6 +55,8 @@ typedef struct GICState {
qemu_irq parent_irq[GIC_NCPU];
qemu_irq parent_fiq[GIC_NCPU];
+ qemu_irq parent_virq[GIC_NCPU];
+ qemu_irq parent_vfiq[GIC_NCPU];
/* GICD_CTLR; for a GIC with the security extensions the NS banked version
* of this register is just an alias of bit 1 of the S banked version.
*/
diff --git a/include/hw/intc/arm_gicv3_common.h b/include/hw/intc/arm_gicv3_common.h
index 341a3118f0..4156051d98 100644
--- a/include/hw/intc/arm_gicv3_common.h
+++ b/include/hw/intc/arm_gicv3_common.h
@@ -38,6 +38,9 @@
/* Number of SGI target-list bits */
#define GICV3_TARGETLIST_BITS 16
+/* Maximum number of list registers (architectural limit) */
+#define GICV3_LR_MAX 16
+
/* Minimum BPR for Secure, or when security not enabled */
#define GIC_MIN_BPR 0
/* Minimum BPR for Nonsecure when security is enabled */
@@ -145,6 +148,9 @@ struct GICv3CPUState {
CPUState *cpu;
qemu_irq parent_irq;
qemu_irq parent_fiq;
+ qemu_irq parent_virq;
+ qemu_irq parent_vfiq;
+ qemu_irq maintenance_irq;
/* Redistributor */
uint32_t level; /* Current IRQ level */
@@ -173,6 +179,21 @@ struct GICv3CPUState {
uint64_t icc_igrpen[3];
uint64_t icc_ctlr_el3;
+ /* Virtualization control interface */
+ uint64_t ich_apr[3][4]; /* ich_apr[GICV3_G1][x] never used */
+ uint64_t ich_hcr_el2;
+ uint64_t ich_lr_el2[GICV3_LR_MAX];
+ uint64_t ich_vmcr_el2;
+
+ /* Properties of the CPU interface. These are initialized from
+ * the settings in the CPU proper.
+ * If the number of implemented list registers is 0 then the
+ * virtualization support is not implemented.
+ */
+ int num_list_regs;
+ int vpribits; /* number of virtual priority bits */
+ int vprebits; /* number of virtual preemption bits */
+
/* Current highest priority pending interrupt for this CPU.
* This is cached information that can be recalculated from the
* real state above; it doesn't need to be migrated.
diff --git a/include/hw/ssi/aspeed_smc.h b/include/hw/ssi/aspeed_smc.h
index bdfbcc0ffa..1f557313fa 100644
--- a/include/hw/ssi/aspeed_smc.h
+++ b/include/hw/ssi/aspeed_smc.h
@@ -44,10 +44,12 @@ typedef struct AspeedSMCController {
const AspeedSegments *segments;
hwaddr flash_window_base;
uint32_t flash_window_size;
+ bool has_dma;
+ uint32_t nregs;
} AspeedSMCController;
typedef struct AspeedSMCFlash {
- const struct AspeedSMCState *controller;
+ struct AspeedSMCState *controller;
uint8_t id;
uint32_t size;
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
index 91046111d9..3f2cdb65bf 100644
--- a/target/arm/cpu.c
+++ b/target/arm/cpu.c
@@ -465,6 +465,9 @@ static void arm_cpu_initfn(Object *obj)
arm_gt_stimer_cb, cpu);
qdev_init_gpio_out(DEVICE(cpu), cpu->gt_timer_outputs,
ARRAY_SIZE(cpu->gt_timer_outputs));
+
+ qdev_init_gpio_out_named(DEVICE(cpu), &cpu->gicv3_maintenance_interrupt,
+ "gicv3-maintenance-interrupt", 1);
#endif
/* DTB consumers generally don't in fact care what the 'compatible'
@@ -493,6 +496,9 @@ static Property arm_cpu_reset_hivecs_property =
static Property arm_cpu_rvbar_property =
DEFINE_PROP_UINT64("rvbar", ARMCPU, rvbar, 0);
+static Property arm_cpu_has_el2_property =
+ DEFINE_PROP_BOOL("has_el2", ARMCPU, has_el2, true);
+
static Property arm_cpu_has_el3_property =
DEFINE_PROP_BOOL("has_el3", ARMCPU, has_el3, true);
@@ -543,6 +549,11 @@ static void arm_cpu_post_init(Object *obj)
#endif
}
+ if (arm_feature(&cpu->env, ARM_FEATURE_EL2)) {
+ qdev_property_add_static(DEVICE(obj), &arm_cpu_has_el2_property,
+ &error_abort);
+ }
+
if (arm_feature(&cpu->env, ARM_FEATURE_PMU)) {
qdev_property_add_static(DEVICE(obj), &arm_cpu_has_pmu_property,
&error_abort);
@@ -691,6 +702,10 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
cpu->id_aa64pfr0 &= ~0xf000;
}
+ if (!cpu->has_el2) {
+ unset_feature(env, ARM_FEATURE_EL2);
+ }
+
if (!cpu->has_pmu || !kvm_enabled()) {
cpu->has_pmu = false;
unset_feature(env, ARM_FEATURE_PMU);
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
index 7bd16eec18..151a5d754e 100644
--- a/target/arm/cpu.h
+++ b/target/arm/cpu.h
@@ -558,6 +558,8 @@ struct ARMCPU {
QEMUTimer *gt_timer[NUM_GTIMERS];
/* GPIO outputs for generic timer */
qemu_irq gt_timer_outputs[NUM_GTIMERS];
+ /* GPIO output for GICv3 maintenance interrupt signal */
+ qemu_irq gicv3_maintenance_interrupt;
/* MemoryRegion to use for secure physical accesses */
MemoryRegion *secure_memory;
@@ -575,6 +577,8 @@ struct ARMCPU {
bool start_powered_off;
/* CPU currently in PSCI powered-off state */
bool powered_off;
+ /* CPU has virtualization extension */
+ bool has_el2;
/* CPU has security extension */
bool has_el3;
/* CPU has PMU (Performance Monitor Unit) */
@@ -660,6 +664,11 @@ struct ARMCPU {
uint32_t dcz_blocksize;
uint64_t rvbar;
+ /* Configurable aspects of GIC cpu interface (which is part of the CPU) */
+ int gic_num_lrs; /* number of list registers */
+ int gic_vpribits; /* number of virtual priority bits */
+ int gic_vprebits; /* number of virtual preemption bits */
+
ARMELChangeHook *el_change_hook;
void *el_change_hook_opaque;
};
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
index 549cb1ee93..670c07ab6e 100644
--- a/target/arm/cpu64.c
+++ b/target/arm/cpu64.c
@@ -110,6 +110,7 @@ static void aarch64_a57_initfn(Object *obj)
set_feature(&cpu->env, ARM_FEATURE_V8_SHA256);
set_feature(&cpu->env, ARM_FEATURE_V8_PMULL);
set_feature(&cpu->env, ARM_FEATURE_CRC);
+ set_feature(&cpu->env, ARM_FEATURE_EL2);
set_feature(&cpu->env, ARM_FEATURE_EL3);
set_feature(&cpu->env, ARM_FEATURE_PMU);
cpu->kvm_target = QEMU_KVM_ARM_TARGET_CORTEX_A57;
@@ -147,6 +148,9 @@ static void aarch64_a57_initfn(Object *obj)
cpu->ccsidr[1] = 0x201fe012; /* 48KB L1 icache */
cpu->ccsidr[2] = 0x70ffe07a; /* 2048KB L2 cache */
cpu->dcz_blocksize = 4; /* 64 bytes */
+ cpu->gic_num_lrs = 4;
+ cpu->gic_vpribits = 5;
+ cpu->gic_vprebits = 5;
define_arm_cp_regs(cpu, cortex_a57_a53_cp_reginfo);
}
@@ -166,6 +170,7 @@ static void aarch64_a53_initfn(Object *obj)
set_feature(&cpu->env, ARM_FEATURE_V8_SHA256);
set_feature(&cpu->env, ARM_FEATURE_V8_PMULL);
set_feature(&cpu->env, ARM_FEATURE_CRC);
+ set_feature(&cpu->env, ARM_FEATURE_EL2);
set_feature(&cpu->env, ARM_FEATURE_EL3);
set_feature(&cpu->env, ARM_FEATURE_PMU);
cpu->kvm_target = QEMU_KVM_ARM_TARGET_CORTEX_A53;
@@ -201,6 +206,9 @@ static void aarch64_a53_initfn(Object *obj)
cpu->ccsidr[1] = 0x201fe00a; /* 32KB L1 icache */
cpu->ccsidr[2] = 0x707fe07a; /* 1024KB L2 cache */
cpu->dcz_blocksize = 4; /* 64 bytes */
+ cpu->gic_num_lrs = 4;
+ cpu->gic_vpribits = 5;
+ cpu->gic_vprebits = 5;
define_arm_cp_regs(cpu, cortex_a57_a53_cp_reginfo);
}
diff --git a/target/arm/helper.c b/target/arm/helper.c
index b3875c7c6e..7111c8cf18 100644
--- a/target/arm/helper.c
+++ b/target/arm/helper.c
@@ -4066,6 +4066,13 @@ static const ARMCPRegInfo debug_cp_reginfo[] = {
.cp = 14, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0,
.access = PL1_RW, .accessfn = access_tda,
.type = ARM_CP_NOP },
+ /* Dummy DBGVCR32_EL2 (which is only for a 64-bit hypervisor
+ * to save and restore a 32-bit guest's DBGVCR)
+ */
+ { .name = "DBGVCR32_EL2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 2, .opc1 = 4, .crn = 0, .crm = 7, .opc2 = 0,
+ .access = PL2_RW, .accessfn = access_tda,
+ .type = ARM_CP_NOP },
/* Dummy MDCCINT_EL1, since we don't implement the Debug Communications
* Channel but Linux may try to access this register. The 32-bit
* alias is DBGDCCINT.
@@ -6399,6 +6406,20 @@ static void arm_cpu_do_interrupt_aarch32(CPUState *cs)
}
offset = 4;
break;
+ case EXCP_VIRQ:
+ new_mode = ARM_CPU_MODE_IRQ;
+ addr = 0x18;
+ /* Disable IRQ and imprecise data aborts. */
+ mask = CPSR_A | CPSR_I;
+ offset = 4;
+ break;
+ case EXCP_VFIQ:
+ new_mode = ARM_CPU_MODE_FIQ;
+ addr = 0x1c;
+ /* Disable FIQ, IRQ and imprecise data aborts. */
+ mask = CPSR_A | CPSR_I | CPSR_F;
+ offset = 4;
+ break;
case EXCP_SMC:
new_mode = ARM_CPU_MODE_MON;
addr = 0x08;
diff --git a/target/arm/psci.c b/target/arm/psci.c
index 14316eb0ae..64bf82eea1 100644
--- a/target/arm/psci.c
+++ b/target/arm/psci.c
@@ -148,17 +148,28 @@ void arm_handle_psci_call(ARMCPU *cpu)
case QEMU_PSCI_0_1_FN_CPU_ON:
case QEMU_PSCI_0_2_FN_CPU_ON:
case QEMU_PSCI_0_2_FN64_CPU_ON:
+ {
+ /* The PSCI spec mandates that newly brought up CPUs start
+ * in the highest exception level which exists and is enabled
+ * on the calling CPU. Since the QEMU PSCI implementation is
+ * acting as a "fake EL3" or "fake EL2" firmware, this for us
+ * means that we want to start at the highest NS exception level
+ * that we are providing to the guest.
+ * The execution mode should be that which is currently in use
+ * by the same exception level on the calling CPU.
+ * The CPU should be started with the context_id value
+ * in x0 (if AArch64) or r0 (if AArch32).
+ */
+ int target_el = arm_feature(env, ARM_FEATURE_EL2) ? 2 : 1;
+ bool target_aarch64 = arm_el_is_aa64(env, target_el);
+
mpidr = param[1];
entry = param[2];
context_id = param[3];
- /*
- * The PSCI spec mandates that newly brought up CPUs enter the
- * exception level of the caller in the same execution mode as
- * the caller, with context_id in x0/r0, respectively.
- */
- ret = arm_set_cpu_on(mpidr, entry, context_id, arm_current_el(env),
- is_a64(env));
+ ret = arm_set_cpu_on(mpidr, entry, context_id,
+ target_el, target_aarch64);
break;
+ }
case QEMU_PSCI_0_1_FN_CPU_OFF:
case QEMU_PSCI_0_2_FN_CPU_OFF:
goto cpu_off;
diff --git a/tests/m25p80-test.c b/tests/m25p80-test.c
index cb7ec81f1a..244aa33dd9 100644
--- a/tests/m25p80-test.c
+++ b/tests/m25p80-test.c
@@ -36,6 +36,9 @@
#define CRTL_EXTENDED0 0 /* 32 bit addressing for SPI */
#define R_CTRL0 0x10
#define CTRL_CE_STOP_ACTIVE (1 << 2)
+#define CTRL_READMODE 0x0
+#define CTRL_FREADMODE 0x1
+#define CTRL_WRITEMODE 0x2
#define CTRL_USERMODE 0x3
#define ASPEED_FMC_BASE 0x1E620000
@@ -50,6 +53,8 @@ enum {
READ = 0x03,
PP = 0x02,
WREN = 0x6,
+ RESET_ENABLE = 0x66,
+ RESET_MEMORY = 0x99,
EN_4BYTE_ADDR = 0xB7,
ERASE_SECTOR = 0xd8,
};
@@ -76,6 +81,30 @@ static void spi_conf(uint32_t value)
writel(ASPEED_FMC_BASE + R_CONF, conf);
}
+static void spi_conf_remove(uint32_t value)
+{
+ uint32_t conf = readl(ASPEED_FMC_BASE + R_CONF);
+
+ conf &= ~value;
+ writel(ASPEED_FMC_BASE + R_CONF, conf);
+}
+
+static void spi_ce_ctrl(uint32_t value)
+{
+ uint32_t conf = readl(ASPEED_FMC_BASE + R_CE_CTRL);
+
+ conf |= value;
+ writel(ASPEED_FMC_BASE + R_CE_CTRL, conf);
+}
+
+static void spi_ctrl_setmode(uint8_t mode, uint8_t cmd)
+{
+ uint32_t ctrl = readl(ASPEED_FMC_BASE + R_CTRL0);
+ ctrl &= ~(CTRL_USERMODE | 0xff << 16);
+ ctrl |= mode | (cmd << 16);
+ writel(ASPEED_FMC_BASE + R_CTRL0, ctrl);
+}
+
static void spi_ctrl_start_user(void)
{
uint32_t ctrl = readl(ASPEED_FMC_BASE + R_CTRL0);
@@ -95,6 +124,18 @@ static void spi_ctrl_stop_user(void)
writel(ASPEED_FMC_BASE + R_CTRL0, ctrl);
}
+static void flash_reset(void)
+{
+ spi_conf(CONF_ENABLE_W0);
+
+ spi_ctrl_start_user();
+ writeb(ASPEED_FLASH_BASE, RESET_ENABLE);
+ writeb(ASPEED_FLASH_BASE, RESET_MEMORY);
+ spi_ctrl_stop_user();
+
+ spi_conf_remove(CONF_ENABLE_W0);
+}
+
static void test_read_jedec(void)
{
uint32_t jedec = 0x0;
@@ -108,6 +149,8 @@ static void test_read_jedec(void)
jedec |= readb(ASPEED_FLASH_BASE);
spi_ctrl_stop_user();
+ flash_reset();
+
g_assert_cmphex(jedec, ==, FLASH_JEDEC);
}
@@ -128,6 +171,18 @@ static void read_page(uint32_t addr, uint32_t *page)
spi_ctrl_stop_user();
}
+static void read_page_mem(uint32_t addr, uint32_t *page)
+{
+ int i;
+
+ /* move out USER mode to use direct reads from the AHB bus */
+ spi_ctrl_setmode(CTRL_READMODE, READ);
+
+ for (i = 0; i < PAGE_SIZE / 4; i++) {
+ page[i] = make_be32(readl(ASPEED_FLASH_BASE + addr + i * 4));
+ }
+}
+
static void test_erase_sector(void)
{
uint32_t some_page_addr = 0x600 * PAGE_SIZE;
@@ -155,6 +210,8 @@ static void test_erase_sector(void)
for (i = 0; i < PAGE_SIZE / 4; i++) {
g_assert_cmphex(page[i], ==, 0xffffffff);
}
+
+ flash_reset();
}
static void test_erase_all(void)
@@ -182,6 +239,8 @@ static void test_erase_all(void)
for (i = 0; i < PAGE_SIZE / 4; i++) {
g_assert_cmphex(page[i], ==, 0xffffffff);
}
+
+ flash_reset();
}
static void test_write_page(void)
@@ -195,6 +254,7 @@ static void test_write_page(void)
spi_ctrl_start_user();
writeb(ASPEED_FLASH_BASE, EN_4BYTE_ADDR);
+ writeb(ASPEED_FLASH_BASE, WREN);
writeb(ASPEED_FLASH_BASE, PP);
writel(ASPEED_FLASH_BASE, make_be32(my_page_addr));
@@ -215,6 +275,77 @@ static void test_write_page(void)
for (i = 0; i < PAGE_SIZE / 4; i++) {
g_assert_cmphex(page[i], ==, 0xffffffff);
}
+
+ flash_reset();
+}
+
+static void test_read_page_mem(void)
+{
+ uint32_t my_page_addr = 0x14000 * PAGE_SIZE; /* beyond 16MB */
+ uint32_t some_page_addr = 0x15000 * PAGE_SIZE;
+ uint32_t page[PAGE_SIZE / 4];
+ int i;
+
+ /* Enable 4BYTE mode for controller. This is should be strapped by
+ * HW for CE0 anyhow.
+ */
+ spi_ce_ctrl(1 << CRTL_EXTENDED0);
+
+ /* Enable 4BYTE mode for flash. */
+ spi_conf(CONF_ENABLE_W0);
+ spi_ctrl_start_user();
+ writeb(ASPEED_FLASH_BASE, EN_4BYTE_ADDR);
+ spi_ctrl_stop_user();
+ spi_conf_remove(CONF_ENABLE_W0);
+
+ /* Check what was written */
+ read_page_mem(my_page_addr, page);
+ for (i = 0; i < PAGE_SIZE / 4; i++) {
+ g_assert_cmphex(page[i], ==, my_page_addr + i * 4);
+ }
+
+ /* Check some other page. It should be full of 0xff */
+ read_page_mem(some_page_addr, page);
+ for (i = 0; i < PAGE_SIZE / 4; i++) {
+ g_assert_cmphex(page[i], ==, 0xffffffff);
+ }
+
+ flash_reset();
+}
+
+static void test_write_page_mem(void)
+{
+ uint32_t my_page_addr = 0x15000 * PAGE_SIZE;
+ uint32_t page[PAGE_SIZE / 4];
+ int i;
+
+ /* Enable 4BYTE mode for controller. This is should be strapped by
+ * HW for CE0 anyhow.
+ */
+ spi_ce_ctrl(1 << CRTL_EXTENDED0);
+
+ /* Enable 4BYTE mode for flash. */
+ spi_conf(CONF_ENABLE_W0);
+ spi_ctrl_start_user();
+ writeb(ASPEED_FLASH_BASE, EN_4BYTE_ADDR);
+ writeb(ASPEED_FLASH_BASE, WREN);
+ spi_ctrl_stop_user();
+
+ /* move out USER mode to use direct writes to the AHB bus */
+ spi_ctrl_setmode(CTRL_WRITEMODE, PP);
+
+ for (i = 0; i < PAGE_SIZE / 4; i++) {
+ writel(ASPEED_FLASH_BASE + my_page_addr + i * 4,
+ make_be32(my_page_addr + i * 4));
+ }
+
+ /* Check what was written */
+ read_page_mem(my_page_addr, page);
+ for (i = 0; i < PAGE_SIZE / 4; i++) {
+ g_assert_cmphex(page[i], ==, my_page_addr + i * 4);
+ }
+
+ flash_reset();
}
static char tmp_path[] = "/tmp/qtest.m25p80.XXXXXX";
@@ -242,6 +373,8 @@ int main(int argc, char **argv)
qtest_add_func("/m25p80/erase_sector", test_erase_sector);
qtest_add_func("/m25p80/erase_all", test_erase_all);
qtest_add_func("/m25p80/write_page", test_write_page);
+ qtest_add_func("/m25p80/read_page_mem", test_read_page_mem);
+ qtest_add_func("/m25p80/write_page_mem", test_write_page_mem);
ret = g_test_run();