summaryrefslogtreecommitdiff
path: root/target/s390x/translate_vx.inc.c
diff options
context:
space:
mode:
authorDavid Hildenbrand <david@redhat.com>2019-03-07 13:15:18 +0100
committerCornelia Huck <cohuck@redhat.com>2019-03-11 09:31:01 +0100
commit5d203bea599bcfbcca00a1f614784a2cfd17e0f8 (patch)
tree0a2c5a44d02d18e1a045f5b124a5d48c7a4403f3 /target/s390x/translate_vx.inc.c
parente6790d3211185ee4c20fef38fb025f87c1931350 (diff)
downloadqemu-5d203bea599bcfbcca00a1f614784a2cfd17e0f8.zip
s390x/tcg: Implement VECTOR LOAD GR FROM VR ELEMENT
To avoid an helper, we have to do the actual calculation of the element address (offset in cpu_env + cpu_env) manually. Factor that out into get_vec_element_ptr_i64(). The same logic will be reused for "VECTOR LOAD VR ELEMENT FROM GR". Reviewed-by: Richard Henderson <richard.henderson@linaro.org> Signed-off-by: David Hildenbrand <david@redhat.com> Message-Id: <20190307121539.12842-12-david@redhat.com> Signed-off-by: Cornelia Huck <cohuck@redhat.com>
Diffstat (limited to 'target/s390x/translate_vx.inc.c')
-rw-r--r--target/s390x/translate_vx.inc.c63
1 files changed, 63 insertions, 0 deletions
diff --git a/target/s390x/translate_vx.inc.c b/target/s390x/translate_vx.inc.c
index d450ce2d2b..b163100b0d 100644
--- a/target/s390x/translate_vx.inc.c
+++ b/target/s390x/translate_vx.inc.c
@@ -113,6 +113,28 @@ static void write_vec_element_i64(TCGv_i64 src, int reg, uint8_t enr,
}
}
+static void get_vec_element_ptr_i64(TCGv_ptr ptr, uint8_t reg, TCGv_i64 enr,
+ uint8_t es)
+{
+ TCGv_i64 tmp = tcg_temp_new_i64();
+
+ /* mask off invalid parts from the element nr */
+ tcg_gen_andi_i64(tmp, enr, NUM_VEC_ELEMENTS(es) - 1);
+
+ /* convert it to an element offset relative to cpu_env (vec_reg_offset() */
+ tcg_gen_shli_i64(tmp, tmp, es);
+#ifndef HOST_WORDS_BIGENDIAN
+ tcg_gen_xori_i64(tmp, tmp, 8 - NUM_VEC_ELEMENT_BYTES(es));
+#endif
+ tcg_gen_addi_i64(tmp, tmp, vec_full_reg_offset(reg));
+
+ /* generate the final ptr by adding cpu_env */
+ tcg_gen_trunc_i64_ptr(ptr, tmp);
+ tcg_gen_add_ptr(ptr, ptr, cpu_env);
+
+ tcg_temp_free_i64(tmp);
+}
+
#define gen_gvec_dup_i64(es, v1, c) \
tcg_gen_gvec_dup_i64(es, vec_full_reg_offset(v1), 16, 16, c)
#define gen_gvec_mov(v1, v2) \
@@ -297,3 +319,44 @@ static DisasJumpType op_vlei(DisasContext *s, DisasOps *o)
tcg_temp_free_i64(tmp);
return DISAS_NEXT;
}
+
+static DisasJumpType op_vlgv(DisasContext *s, DisasOps *o)
+{
+ const uint8_t es = get_field(s->fields, m4);
+ TCGv_ptr ptr;
+
+ if (es > ES_64) {
+ gen_program_exception(s, PGM_SPECIFICATION);
+ return DISAS_NORETURN;
+ }
+
+ /* fast path if we don't need the register content */
+ if (!get_field(s->fields, b2)) {
+ uint8_t enr = get_field(s->fields, d2) & (NUM_VEC_ELEMENTS(es) - 1);
+
+ read_vec_element_i64(o->out, get_field(s->fields, v3), enr, es);
+ return DISAS_NEXT;
+ }
+
+ ptr = tcg_temp_new_ptr();
+ get_vec_element_ptr_i64(ptr, get_field(s->fields, v3), o->addr1, es);
+ switch (es) {
+ case ES_8:
+ tcg_gen_ld8u_i64(o->out, ptr, 0);
+ break;
+ case ES_16:
+ tcg_gen_ld16u_i64(o->out, ptr, 0);
+ break;
+ case ES_32:
+ tcg_gen_ld32u_i64(o->out, ptr, 0);
+ break;
+ case ES_64:
+ tcg_gen_ld_i64(o->out, ptr, 0);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ tcg_temp_free_ptr(ptr);
+
+ return DISAS_NEXT;
+}