Skip to content

Commit 18fc240

Browse files
rth7680pm215
authored andcommitted
target/arm: Implement SVE fp complex multiply add (indexed)
Enhance the existing helpers to support SVE, which takes the index from each 128-bit segment. The change has no effect for AdvSIMD, since there is only one such segment. Signed-off-by: Richard Henderson <[email protected]> Reviewed-by: Peter Maydell <[email protected]> Reviewed-by: Alex Bennée <[email protected]> Message-id: [email protected] Signed-off-by: Peter Maydell <[email protected]>
1 parent 2cc9991 commit 18fc240

File tree

3 files changed

+59
-20
lines changed

3 files changed

+59
-20
lines changed

target/arm/sve.decode

+6
Original file line numberDiff line numberDiff line change
@@ -733,6 +733,12 @@ FCADD 01100100 esz:2 00000 rot:1 100 pg:3 rm:5 rd:5 \
733733
FCMLA_zpzzz 01100100 esz:2 0 rm:5 0 rot:2 pg:3 rn:5 rd:5 \
734734
ra=%reg_movprfx
735735

736+
# SVE floating-point complex multiply-add (indexed)
737+
FCMLA_zzxz 01100100 10 1 index:2 rm:3 0001 rot:2 rn:5 rd:5 \
738+
ra=%reg_movprfx esz=1
739+
FCMLA_zzxz 01100100 11 1 index:1 rm:4 0001 rot:2 rn:5 rd:5 \
740+
ra=%reg_movprfx esz=2
741+
736742
### SVE FP Multiply-Add Indexed Group
737743

738744
# SVE floating-point multiply-add (indexed)

target/arm/translate-sve.c

+23
Original file line numberDiff line numberDiff line change
@@ -4005,6 +4005,29 @@ static bool trans_FCMLA_zpzzz(DisasContext *s,
40054005
return true;
40064006
}
40074007

4008+
static bool trans_FCMLA_zzxz(DisasContext *s, arg_FCMLA_zzxz *a, uint32_t insn)
4009+
{
4010+
static gen_helper_gvec_3_ptr * const fns[2] = {
4011+
gen_helper_gvec_fcmlah_idx,
4012+
gen_helper_gvec_fcmlas_idx,
4013+
};
4014+
4015+
tcg_debug_assert(a->esz == 1 || a->esz == 2);
4016+
tcg_debug_assert(a->rd == a->ra);
4017+
if (sve_access_check(s)) {
4018+
unsigned vsz = vec_full_reg_size(s);
4019+
TCGv_ptr status = get_fpstatus_ptr(a->esz == MO_16);
4020+
tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, a->rd),
4021+
vec_full_reg_offset(s, a->rn),
4022+
vec_full_reg_offset(s, a->rm),
4023+
status, vsz, vsz,
4024+
a->index * 4 + a->rot,
4025+
fns[a->esz - 1]);
4026+
tcg_temp_free_ptr(status);
4027+
}
4028+
return true;
4029+
}
4030+
40084031
/*
40094032
*** SVE Floating Point Unary Operations Predicated Group
40104033
*/

target/arm/vec_helper.c

+30-20
Original file line numberDiff line numberDiff line change
@@ -319,22 +319,27 @@ void HELPER(gvec_fcmlah_idx)(void *vd, void *vn, void *vm,
319319
uint32_t neg_imag = extract32(desc, SIMD_DATA_SHIFT + 1, 1);
320320
intptr_t index = extract32(desc, SIMD_DATA_SHIFT + 2, 2);
321321
uint32_t neg_real = flip ^ neg_imag;
322-
uintptr_t i;
323-
float16 e1 = m[H2(2 * index + flip)];
324-
float16 e3 = m[H2(2 * index + 1 - flip)];
322+
intptr_t elements = opr_sz / sizeof(float16);
323+
intptr_t eltspersegment = 16 / sizeof(float16);
324+
intptr_t i, j;
325325

326326
/* Shift boolean to the sign bit so we can xor to negate. */
327327
neg_real <<= 15;
328328
neg_imag <<= 15;
329-
e1 ^= neg_real;
330-
e3 ^= neg_imag;
331329

332-
for (i = 0; i < opr_sz / 2; i += 2) {
333-
float16 e2 = n[H2(i + flip)];
334-
float16 e4 = e2;
330+
for (i = 0; i < elements; i += eltspersegment) {
331+
float16 mr = m[H2(i + 2 * index + 0)];
332+
float16 mi = m[H2(i + 2 * index + 1)];
333+
float16 e1 = neg_real ^ (flip ? mi : mr);
334+
float16 e3 = neg_imag ^ (flip ? mr : mi);
335335

336-
d[H2(i)] = float16_muladd(e2, e1, d[H2(i)], 0, fpst);
337-
d[H2(i + 1)] = float16_muladd(e4, e3, d[H2(i + 1)], 0, fpst);
336+
for (j = i; j < i + eltspersegment; j += 2) {
337+
float16 e2 = n[H2(j + flip)];
338+
float16 e4 = e2;
339+
340+
d[H2(j)] = float16_muladd(e2, e1, d[H2(j)], 0, fpst);
341+
d[H2(j + 1)] = float16_muladd(e4, e3, d[H2(j + 1)], 0, fpst);
342+
}
338343
}
339344
clear_tail(d, opr_sz, simd_maxsz(desc));
340345
}
@@ -380,22 +385,27 @@ void HELPER(gvec_fcmlas_idx)(void *vd, void *vn, void *vm,
380385
uint32_t neg_imag = extract32(desc, SIMD_DATA_SHIFT + 1, 1);
381386
intptr_t index = extract32(desc, SIMD_DATA_SHIFT + 2, 2);
382387
uint32_t neg_real = flip ^ neg_imag;
383-
uintptr_t i;
384-
float32 e1 = m[H4(2 * index + flip)];
385-
float32 e3 = m[H4(2 * index + 1 - flip)];
388+
intptr_t elements = opr_sz / sizeof(float32);
389+
intptr_t eltspersegment = 16 / sizeof(float32);
390+
intptr_t i, j;
386391

387392
/* Shift boolean to the sign bit so we can xor to negate. */
388393
neg_real <<= 31;
389394
neg_imag <<= 31;
390-
e1 ^= neg_real;
391-
e3 ^= neg_imag;
392395

393-
for (i = 0; i < opr_sz / 4; i += 2) {
394-
float32 e2 = n[H4(i + flip)];
395-
float32 e4 = e2;
396+
for (i = 0; i < elements; i += eltspersegment) {
397+
float32 mr = m[H4(i + 2 * index + 0)];
398+
float32 mi = m[H4(i + 2 * index + 1)];
399+
float32 e1 = neg_real ^ (flip ? mi : mr);
400+
float32 e3 = neg_imag ^ (flip ? mr : mi);
396401

397-
d[H4(i)] = float32_muladd(e2, e1, d[H4(i)], 0, fpst);
398-
d[H4(i + 1)] = float32_muladd(e4, e3, d[H4(i + 1)], 0, fpst);
402+
for (j = i; j < i + eltspersegment; j += 2) {
403+
float32 e2 = n[H4(j + flip)];
404+
float32 e4 = e2;
405+
406+
d[H4(j)] = float32_muladd(e2, e1, d[H4(j)], 0, fpst);
407+
d[H4(j + 1)] = float32_muladd(e4, e3, d[H4(j + 1)], 0, fpst);
408+
}
399409
}
400410
clear_tail(d, opr_sz, simd_maxsz(desc));
401411
}

0 commit comments

Comments
 (0)