summaryrefslogtreecommitdiff
path: root/target/i386
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2018-08-21 15:31:24 +0200
committerPaolo Bonzini <pbonzini@redhat.com>2018-10-02 19:09:12 +0200
commit92d5f1a4147c3722b5e9a8bcfb7dc261b7a8b855 (patch)
treea82c08f864204798283f7621c2c7ffd9458275d2 /target/i386
parent27e18b8952f8b7a1e26350846f8a0d5a9b33bfb8 (diff)
downloadqemu-92d5f1a4147c3722b5e9a8bcfb7dc261b7a8b855.zip
target/i386: unify masking of interrupts
Interrupt handling depends on various flags in env->hflags or env->hflags2, and the exact detail were not exactly replicated between x86_cpu_has_work and x86_cpu_exec_interrupt. Create a new function that extracts the highest-priority non-masked interrupt, and use it in both functions. Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'target/i386')
-rw-r--r--target/i386/cpu.c51
-rw-r--r--target/i386/cpu.h1
-rw-r--r--target/i386/seg_helper.c106
3 files changed, 91 insertions, 67 deletions
diff --git a/target/i386/cpu.c b/target/i386/cpu.c
index f24295e6e4..c88876dfe3 100644
--- a/target/i386/cpu.c
+++ b/target/i386/cpu.c
@@ -5429,20 +5429,51 @@ static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
cpu->env.eip = tb->pc - tb->cs_base;
}
-static bool x86_cpu_has_work(CPUState *cs)
+int x86_cpu_pending_interrupt(CPUState *cs, int interrupt_request)
{
X86CPU *cpu = X86_CPU(cs);
CPUX86State *env = &cpu->env;
- return ((cs->interrupt_request & (CPU_INTERRUPT_HARD |
- CPU_INTERRUPT_POLL)) &&
- (env->eflags & IF_MASK)) ||
- (cs->interrupt_request & (CPU_INTERRUPT_NMI |
- CPU_INTERRUPT_INIT |
- CPU_INTERRUPT_SIPI |
- CPU_INTERRUPT_MCE)) ||
- ((cs->interrupt_request & CPU_INTERRUPT_SMI) &&
- !(env->hflags & HF_SMM_MASK));
+#if !defined(CONFIG_USER_ONLY)
+ if (interrupt_request & CPU_INTERRUPT_POLL) {
+ return CPU_INTERRUPT_POLL;
+ }
+#endif
+ if (interrupt_request & CPU_INTERRUPT_SIPI) {
+ return CPU_INTERRUPT_SIPI;
+ }
+
+ if (env->hflags2 & HF2_GIF_MASK) {
+ if ((interrupt_request & CPU_INTERRUPT_SMI) &&
+ !(env->hflags & HF_SMM_MASK)) {
+ return CPU_INTERRUPT_SMI;
+ } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
+ !(env->hflags2 & HF2_NMI_MASK)) {
+ return CPU_INTERRUPT_NMI;
+ } else if (interrupt_request & CPU_INTERRUPT_MCE) {
+ return CPU_INTERRUPT_MCE;
+ } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
+ (((env->hflags2 & HF2_VINTR_MASK) &&
+ (env->hflags2 & HF2_HIF_MASK)) ||
+ (!(env->hflags2 & HF2_VINTR_MASK) &&
+ (env->eflags & IF_MASK &&
+ !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
+ return CPU_INTERRUPT_HARD;
+#if !defined(CONFIG_USER_ONLY)
+ } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
+ (env->eflags & IF_MASK) &&
+ !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
+ return CPU_INTERRUPT_VIRQ;
+#endif
+ }
+ }
+
+ return 0;
+}
+
+static bool x86_cpu_has_work(CPUState *cs)
+{
+ return x86_cpu_pending_interrupt(cs, cs->interrupt_request) != 0;
}
static void x86_disas_set_info(CPUState *cs, disassemble_info *info)
diff --git a/target/i386/cpu.h b/target/i386/cpu.h
index b572a8e4aa..227a5d9b61 100644
--- a/target/i386/cpu.h
+++ b/target/i386/cpu.h
@@ -1485,6 +1485,7 @@ extern struct VMStateDescription vmstate_x86_cpu;
*/
void x86_cpu_do_interrupt(CPUState *cpu);
bool x86_cpu_exec_interrupt(CPUState *cpu, int int_req);
+int x86_cpu_pending_interrupt(CPUState *cs, int interrupt_request);
int x86_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cpu,
int cpuid, void *opaque);
diff --git a/target/i386/seg_helper.c b/target/i386/seg_helper.c
index d1cbc6ebf0..ba9cc688ac 100644
--- a/target/i386/seg_helper.c
+++ b/target/i386/seg_helper.c
@@ -1319,74 +1319,66 @@ bool x86_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
{
X86CPU *cpu = X86_CPU(cs);
CPUX86State *env = &cpu->env;
- bool ret = false;
+ int intno;
+ interrupt_request = x86_cpu_pending_interrupt(cs, interrupt_request);
+ if (!interrupt_request) {
+ return false;
+ }
+
+ /* Don't process multiple interrupt requests in a single call.
+ * This is required to make icount-driven execution deterministic.
+ */
+ switch (interrupt_request) {
#if !defined(CONFIG_USER_ONLY)
- if (interrupt_request & CPU_INTERRUPT_POLL) {
+ case CPU_INTERRUPT_POLL:
cs->interrupt_request &= ~CPU_INTERRUPT_POLL;
apic_poll_irq(cpu->apic_state);
- /* Don't process multiple interrupt requests in a single call.
- This is required to make icount-driven execution deterministic. */
- return true;
- }
+ break;
#endif
- if (interrupt_request & CPU_INTERRUPT_SIPI) {
+ case CPU_INTERRUPT_SIPI:
do_cpu_sipi(cpu);
- ret = true;
- } else if (env->hflags2 & HF2_GIF_MASK) {
- if ((interrupt_request & CPU_INTERRUPT_SMI) &&
- !(env->hflags & HF_SMM_MASK)) {
- cpu_svm_check_intercept_param(env, SVM_EXIT_SMI, 0, 0);
- cs->interrupt_request &= ~CPU_INTERRUPT_SMI;
- do_smm_enter(cpu);
- ret = true;
- } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
- !(env->hflags2 & HF2_NMI_MASK)) {
- cpu_svm_check_intercept_param(env, SVM_EXIT_NMI, 0, 0);
- cs->interrupt_request &= ~CPU_INTERRUPT_NMI;
- env->hflags2 |= HF2_NMI_MASK;
- do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
- ret = true;
- } else if (interrupt_request & CPU_INTERRUPT_MCE) {
- cs->interrupt_request &= ~CPU_INTERRUPT_MCE;
- do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
- ret = true;
- } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
- (((env->hflags2 & HF2_VINTR_MASK) &&
- (env->hflags2 & HF2_HIF_MASK)) ||
- (!(env->hflags2 & HF2_VINTR_MASK) &&
- (env->eflags & IF_MASK &&
- !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
- int intno;
- cpu_svm_check_intercept_param(env, SVM_EXIT_INTR, 0, 0);
- cs->interrupt_request &= ~(CPU_INTERRUPT_HARD |
- CPU_INTERRUPT_VIRQ);
- intno = cpu_get_pic_interrupt(env);
- qemu_log_mask(CPU_LOG_TB_IN_ASM,
- "Servicing hardware INT=0x%02x\n", intno);
- do_interrupt_x86_hardirq(env, intno, 1);
- /* ensure that no TB jump will be modified as
- the program flow was changed */
- ret = true;
+ break;
+ case CPU_INTERRUPT_SMI:
+ cpu_svm_check_intercept_param(env, SVM_EXIT_SMI, 0, 0);
+ cs->interrupt_request &= ~CPU_INTERRUPT_SMI;
+ do_smm_enter(cpu);
+ break;
+ case CPU_INTERRUPT_NMI:
+ cpu_svm_check_intercept_param(env, SVM_EXIT_NMI, 0, 0);
+ cs->interrupt_request &= ~CPU_INTERRUPT_NMI;
+ env->hflags2 |= HF2_NMI_MASK;
+ do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
+ break;
+ case CPU_INTERRUPT_MCE:
+ cs->interrupt_request &= ~CPU_INTERRUPT_MCE;
+ do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
+ break;
+ case CPU_INTERRUPT_HARD:
+ cpu_svm_check_intercept_param(env, SVM_EXIT_INTR, 0, 0);
+ cs->interrupt_request &= ~(CPU_INTERRUPT_HARD |
+ CPU_INTERRUPT_VIRQ);
+ intno = cpu_get_pic_interrupt(env);
+ qemu_log_mask(CPU_LOG_TB_IN_ASM,
+ "Servicing hardware INT=0x%02x\n", intno);
+ do_interrupt_x86_hardirq(env, intno, 1);
+ break;
#if !defined(CONFIG_USER_ONLY)
- } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
- (env->eflags & IF_MASK) &&
- !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
- int intno;
- /* FIXME: this should respect TPR */
- cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR, 0, 0);
- intno = x86_ldl_phys(cs, env->vm_vmcb
+ case CPU_INTERRUPT_VIRQ:
+ /* FIXME: this should respect TPR */
+ cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR, 0, 0);
+ intno = x86_ldl_phys(cs, env->vm_vmcb
+ offsetof(struct vmcb, control.int_vector));
- qemu_log_mask(CPU_LOG_TB_IN_ASM,
- "Servicing virtual hardware INT=0x%02x\n", intno);
- do_interrupt_x86_hardirq(env, intno, 1);
- cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
- ret = true;
+ qemu_log_mask(CPU_LOG_TB_IN_ASM,
+ "Servicing virtual hardware INT=0x%02x\n", intno);
+ do_interrupt_x86_hardirq(env, intno, 1);
+ cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
+ break;
#endif
- }
}
- return ret;
+ /* Ensure that no TB jump will be modified as the program flow was changed. */
+ return true;
}
void helper_lldt(CPUX86State *env, int selector)