blob: a973454e4f42471f868787d5c34e7d99f7c7386c [file] [log] [blame]
Richard Hendersond9061ec2018-03-02 10:45:41 +00001/*
2 * ARM AdvSIMD / SVE Vector Operations
3 *
4 * Copyright (c) 2018 Linaro
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include "qemu/osdep.h"
21#include "cpu.h"
Richard Hendersond9061ec2018-03-02 10:45:41 +000022#include "exec/helper-proto.h"
23#include "tcg/tcg-gvec-desc.h"
Richard Henderson1695cd62018-03-02 10:45:43 +000024#include "fpu/softfloat.h"
Richard Hendersona04b68e2020-05-14 14:28:26 -070025#include "vec_internal.h"
Richard Hendersond9061ec2018-03-02 10:45:41 +000026
Richard Henderson1695cd62018-03-02 10:45:43 +000027/* Note that vector data is stored in host-endian 64-bit chunks,
28 so addressing units smaller than that needs a host-endian fixup. */
29#ifdef HOST_WORDS_BIGENDIAN
30#define H1(x) ((x) ^ 7)
31#define H2(x) ((x) ^ 3)
32#define H4(x) ((x) ^ 1)
33#else
34#define H1(x) (x)
35#define H2(x) (x)
36#define H4(x) (x)
37#endif
38
Richard Hendersond9061ec2018-03-02 10:45:41 +000039/* Signed saturating rounding doubling multiply-accumulate high half, 16-bit */
Richard Hendersond2179882020-08-28 10:02:49 +010040static int16_t do_sqrdmlah_h(int16_t src1, int16_t src2, int16_t src3,
41 bool neg, bool round, uint32_t *sat)
Richard Hendersond9061ec2018-03-02 10:45:41 +000042{
Richard Hendersond2179882020-08-28 10:02:49 +010043 /*
44 * Simplify:
Richard Hendersond9061ec2018-03-02 10:45:41 +000045 * = ((a3 << 16) + ((e1 * e2) << 1) + (1 << 15)) >> 16
46 * = ((a3 << 15) + (e1 * e2) + (1 << 14)) >> 15
47 */
48 int32_t ret = (int32_t)src1 * src2;
Richard Hendersond2179882020-08-28 10:02:49 +010049 if (neg) {
50 ret = -ret;
51 }
52 ret += ((int32_t)src3 << 15) + (round << 14);
Richard Hendersond9061ec2018-03-02 10:45:41 +000053 ret >>= 15;
Richard Hendersond2179882020-08-28 10:02:49 +010054
Richard Hendersond9061ec2018-03-02 10:45:41 +000055 if (ret != (int16_t)ret) {
Richard Hendersone286bf42020-05-13 09:32:42 -070056 *sat = 1;
Richard Hendersond2179882020-08-28 10:02:49 +010057 ret = (ret < 0 ? INT16_MIN : INT16_MAX);
Richard Hendersond9061ec2018-03-02 10:45:41 +000058 }
59 return ret;
60}
61
62uint32_t HELPER(neon_qrdmlah_s16)(CPUARMState *env, uint32_t src1,
63 uint32_t src2, uint32_t src3)
64{
Richard Hendersone286bf42020-05-13 09:32:42 -070065 uint32_t *sat = &env->vfp.qc[0];
Richard Hendersond2179882020-08-28 10:02:49 +010066 uint16_t e1 = do_sqrdmlah_h(src1, src2, src3, false, true, sat);
67 uint16_t e2 = do_sqrdmlah_h(src1 >> 16, src2 >> 16, src3 >> 16,
68 false, true, sat);
Richard Hendersond9061ec2018-03-02 10:45:41 +000069 return deposit32(e1, 16, 16, e2);
70}
71
Richard Hendersone7186d82018-03-02 10:45:42 +000072void HELPER(gvec_qrdmlah_s16)(void *vd, void *vn, void *vm,
Richard Hendersone286bf42020-05-13 09:32:42 -070073 void *vq, uint32_t desc)
Richard Hendersone7186d82018-03-02 10:45:42 +000074{
75 uintptr_t opr_sz = simd_oprsz(desc);
76 int16_t *d = vd;
77 int16_t *n = vn;
78 int16_t *m = vm;
Richard Hendersone7186d82018-03-02 10:45:42 +000079 uintptr_t i;
80
81 for (i = 0; i < opr_sz / 2; ++i) {
Richard Hendersond2179882020-08-28 10:02:49 +010082 d[i] = do_sqrdmlah_h(n[i], m[i], d[i], false, true, vq);
Richard Hendersone7186d82018-03-02 10:45:42 +000083 }
84 clear_tail(d, opr_sz, simd_maxsz(desc));
85}
86
Richard Hendersond9061ec2018-03-02 10:45:41 +000087uint32_t HELPER(neon_qrdmlsh_s16)(CPUARMState *env, uint32_t src1,
88 uint32_t src2, uint32_t src3)
89{
Richard Hendersone286bf42020-05-13 09:32:42 -070090 uint32_t *sat = &env->vfp.qc[0];
Richard Hendersond2179882020-08-28 10:02:49 +010091 uint16_t e1 = do_sqrdmlah_h(src1, src2, src3, true, true, sat);
92 uint16_t e2 = do_sqrdmlah_h(src1 >> 16, src2 >> 16, src3 >> 16,
93 true, true, sat);
Richard Hendersond9061ec2018-03-02 10:45:41 +000094 return deposit32(e1, 16, 16, e2);
95}
96
Richard Hendersone7186d82018-03-02 10:45:42 +000097void HELPER(gvec_qrdmlsh_s16)(void *vd, void *vn, void *vm,
Richard Hendersone286bf42020-05-13 09:32:42 -070098 void *vq, uint32_t desc)
Richard Hendersone7186d82018-03-02 10:45:42 +000099{
100 uintptr_t opr_sz = simd_oprsz(desc);
101 int16_t *d = vd;
102 int16_t *n = vn;
103 int16_t *m = vm;
Richard Hendersone7186d82018-03-02 10:45:42 +0000104 uintptr_t i;
105
106 for (i = 0; i < opr_sz / 2; ++i) {
Richard Hendersond2179882020-08-28 10:02:49 +0100107 d[i] = do_sqrdmlah_h(n[i], m[i], d[i], true, true, vq);
Richard Hendersone7186d82018-03-02 10:45:42 +0000108 }
109 clear_tail(d, opr_sz, simd_maxsz(desc));
110}
111
Richard Hendersoned788492020-08-28 10:02:50 +0100112void HELPER(neon_sqdmulh_h)(void *vd, void *vn, void *vm,
113 void *vq, uint32_t desc)
114{
115 intptr_t i, opr_sz = simd_oprsz(desc);
116 int16_t *d = vd, *n = vn, *m = vm;
117
118 for (i = 0; i < opr_sz / 2; ++i) {
119 d[i] = do_sqrdmlah_h(n[i], m[i], 0, false, false, vq);
120 }
121 clear_tail(d, opr_sz, simd_maxsz(desc));
122}
123
124void HELPER(neon_sqrdmulh_h)(void *vd, void *vn, void *vm,
125 void *vq, uint32_t desc)
126{
127 intptr_t i, opr_sz = simd_oprsz(desc);
128 int16_t *d = vd, *n = vn, *m = vm;
129
130 for (i = 0; i < opr_sz / 2; ++i) {
131 d[i] = do_sqrdmlah_h(n[i], m[i], 0, false, true, vq);
132 }
133 clear_tail(d, opr_sz, simd_maxsz(desc));
134}
135
Richard Hendersond9061ec2018-03-02 10:45:41 +0000136/* Signed saturating rounding doubling multiply-accumulate high half, 32-bit */
Richard Hendersond2179882020-08-28 10:02:49 +0100137static int32_t do_sqrdmlah_s(int32_t src1, int32_t src2, int32_t src3,
138 bool neg, bool round, uint32_t *sat)
Richard Hendersond9061ec2018-03-02 10:45:41 +0000139{
140 /* Simplify similarly to int_qrdmlah_s16 above. */
141 int64_t ret = (int64_t)src1 * src2;
Richard Hendersond2179882020-08-28 10:02:49 +0100142 if (neg) {
143 ret = -ret;
144 }
145 ret += ((int64_t)src3 << 31) + (round << 30);
Richard Hendersond9061ec2018-03-02 10:45:41 +0000146 ret >>= 31;
Richard Hendersond2179882020-08-28 10:02:49 +0100147
Richard Hendersond9061ec2018-03-02 10:45:41 +0000148 if (ret != (int32_t)ret) {
Richard Hendersone286bf42020-05-13 09:32:42 -0700149 *sat = 1;
Richard Hendersond9061ec2018-03-02 10:45:41 +0000150 ret = (ret < 0 ? INT32_MIN : INT32_MAX);
151 }
152 return ret;
153}
154
Richard Hendersone286bf42020-05-13 09:32:42 -0700155uint32_t HELPER(neon_qrdmlah_s32)(CPUARMState *env, int32_t src1,
156 int32_t src2, int32_t src3)
157{
158 uint32_t *sat = &env->vfp.qc[0];
Richard Hendersond2179882020-08-28 10:02:49 +0100159 return do_sqrdmlah_s(src1, src2, src3, false, true, sat);
Richard Hendersone286bf42020-05-13 09:32:42 -0700160}
161
Richard Hendersone7186d82018-03-02 10:45:42 +0000162void HELPER(gvec_qrdmlah_s32)(void *vd, void *vn, void *vm,
Richard Hendersone286bf42020-05-13 09:32:42 -0700163 void *vq, uint32_t desc)
Richard Hendersone7186d82018-03-02 10:45:42 +0000164{
165 uintptr_t opr_sz = simd_oprsz(desc);
166 int32_t *d = vd;
167 int32_t *n = vn;
168 int32_t *m = vm;
Richard Hendersone7186d82018-03-02 10:45:42 +0000169 uintptr_t i;
170
171 for (i = 0; i < opr_sz / 4; ++i) {
Richard Hendersond2179882020-08-28 10:02:49 +0100172 d[i] = do_sqrdmlah_s(n[i], m[i], d[i], false, true, vq);
Richard Hendersone7186d82018-03-02 10:45:42 +0000173 }
174 clear_tail(d, opr_sz, simd_maxsz(desc));
175}
176
Richard Hendersone286bf42020-05-13 09:32:42 -0700177uint32_t HELPER(neon_qrdmlsh_s32)(CPUARMState *env, int32_t src1,
178 int32_t src2, int32_t src3)
179{
180 uint32_t *sat = &env->vfp.qc[0];
Richard Hendersond2179882020-08-28 10:02:49 +0100181 return do_sqrdmlah_s(src1, src2, src3, true, true, sat);
Richard Hendersone286bf42020-05-13 09:32:42 -0700182}
183
Richard Hendersone7186d82018-03-02 10:45:42 +0000184void HELPER(gvec_qrdmlsh_s32)(void *vd, void *vn, void *vm,
Richard Hendersone286bf42020-05-13 09:32:42 -0700185 void *vq, uint32_t desc)
Richard Hendersone7186d82018-03-02 10:45:42 +0000186{
187 uintptr_t opr_sz = simd_oprsz(desc);
188 int32_t *d = vd;
189 int32_t *n = vn;
190 int32_t *m = vm;
Richard Hendersone7186d82018-03-02 10:45:42 +0000191 uintptr_t i;
192
193 for (i = 0; i < opr_sz / 4; ++i) {
Richard Hendersond2179882020-08-28 10:02:49 +0100194 d[i] = do_sqrdmlah_s(n[i], m[i], d[i], true, true, vq);
Richard Hendersone7186d82018-03-02 10:45:42 +0000195 }
196 clear_tail(d, opr_sz, simd_maxsz(desc));
197}
Richard Henderson1695cd62018-03-02 10:45:43 +0000198
Richard Hendersoned788492020-08-28 10:02:50 +0100199void HELPER(neon_sqdmulh_s)(void *vd, void *vn, void *vm,
200 void *vq, uint32_t desc)
201{
202 intptr_t i, opr_sz = simd_oprsz(desc);
203 int32_t *d = vd, *n = vn, *m = vm;
204
205 for (i = 0; i < opr_sz / 4; ++i) {
206 d[i] = do_sqrdmlah_s(n[i], m[i], 0, false, false, vq);
207 }
208 clear_tail(d, opr_sz, simd_maxsz(desc));
209}
210
211void HELPER(neon_sqrdmulh_s)(void *vd, void *vn, void *vm,
212 void *vq, uint32_t desc)
213{
214 intptr_t i, opr_sz = simd_oprsz(desc);
215 int32_t *d = vd, *n = vn, *m = vm;
216
217 for (i = 0; i < opr_sz / 4; ++i) {
218 d[i] = do_sqrdmlah_s(n[i], m[i], 0, false, true, vq);
219 }
220 clear_tail(d, opr_sz, simd_maxsz(desc));
221}
222
Richard Hendersond730eca2018-06-29 15:11:13 +0100223/* Integer 8 and 16-bit dot-product.
224 *
225 * Note that for the loops herein, host endianness does not matter
226 * with respect to the ordering of data within the 64-bit lanes.
227 * All elements are treated equally, no matter where they are.
228 */
229
230void HELPER(gvec_sdot_b)(void *vd, void *vn, void *vm, uint32_t desc)
231{
232 intptr_t i, opr_sz = simd_oprsz(desc);
233 uint32_t *d = vd;
234 int8_t *n = vn, *m = vm;
235
236 for (i = 0; i < opr_sz / 4; ++i) {
237 d[i] += n[i * 4 + 0] * m[i * 4 + 0]
238 + n[i * 4 + 1] * m[i * 4 + 1]
239 + n[i * 4 + 2] * m[i * 4 + 2]
240 + n[i * 4 + 3] * m[i * 4 + 3];
241 }
242 clear_tail(d, opr_sz, simd_maxsz(desc));
243}
244
245void HELPER(gvec_udot_b)(void *vd, void *vn, void *vm, uint32_t desc)
246{
247 intptr_t i, opr_sz = simd_oprsz(desc);
248 uint32_t *d = vd;
249 uint8_t *n = vn, *m = vm;
250
251 for (i = 0; i < opr_sz / 4; ++i) {
252 d[i] += n[i * 4 + 0] * m[i * 4 + 0]
253 + n[i * 4 + 1] * m[i * 4 + 1]
254 + n[i * 4 + 2] * m[i * 4 + 2]
255 + n[i * 4 + 3] * m[i * 4 + 3];
256 }
257 clear_tail(d, opr_sz, simd_maxsz(desc));
258}
259
260void HELPER(gvec_sdot_h)(void *vd, void *vn, void *vm, uint32_t desc)
261{
262 intptr_t i, opr_sz = simd_oprsz(desc);
263 uint64_t *d = vd;
264 int16_t *n = vn, *m = vm;
265
266 for (i = 0; i < opr_sz / 8; ++i) {
267 d[i] += (int64_t)n[i * 4 + 0] * m[i * 4 + 0]
268 + (int64_t)n[i * 4 + 1] * m[i * 4 + 1]
269 + (int64_t)n[i * 4 + 2] * m[i * 4 + 2]
270 + (int64_t)n[i * 4 + 3] * m[i * 4 + 3];
271 }
272 clear_tail(d, opr_sz, simd_maxsz(desc));
273}
274
275void HELPER(gvec_udot_h)(void *vd, void *vn, void *vm, uint32_t desc)
276{
277 intptr_t i, opr_sz = simd_oprsz(desc);
278 uint64_t *d = vd;
279 uint16_t *n = vn, *m = vm;
280
281 for (i = 0; i < opr_sz / 8; ++i) {
282 d[i] += (uint64_t)n[i * 4 + 0] * m[i * 4 + 0]
283 + (uint64_t)n[i * 4 + 1] * m[i * 4 + 1]
284 + (uint64_t)n[i * 4 + 2] * m[i * 4 + 2]
285 + (uint64_t)n[i * 4 + 3] * m[i * 4 + 3];
286 }
287 clear_tail(d, opr_sz, simd_maxsz(desc));
288}
289
Richard Henderson16fcfdc2018-06-29 15:11:15 +0100290void HELPER(gvec_sdot_idx_b)(void *vd, void *vn, void *vm, uint32_t desc)
291{
292 intptr_t i, segend, opr_sz = simd_oprsz(desc), opr_sz_4 = opr_sz / 4;
293 intptr_t index = simd_data(desc);
294 uint32_t *d = vd;
295 int8_t *n = vn;
296 int8_t *m_indexed = (int8_t *)vm + index * 4;
297
298 /* Notice the special case of opr_sz == 8, from aa64/aa32 advsimd.
299 * Otherwise opr_sz is a multiple of 16.
300 */
301 segend = MIN(4, opr_sz_4);
302 i = 0;
303 do {
304 int8_t m0 = m_indexed[i * 4 + 0];
305 int8_t m1 = m_indexed[i * 4 + 1];
306 int8_t m2 = m_indexed[i * 4 + 2];
307 int8_t m3 = m_indexed[i * 4 + 3];
308
309 do {
310 d[i] += n[i * 4 + 0] * m0
311 + n[i * 4 + 1] * m1
312 + n[i * 4 + 2] * m2
313 + n[i * 4 + 3] * m3;
314 } while (++i < segend);
315 segend = i + 4;
316 } while (i < opr_sz_4);
317
318 clear_tail(d, opr_sz, simd_maxsz(desc));
319}
320
321void HELPER(gvec_udot_idx_b)(void *vd, void *vn, void *vm, uint32_t desc)
322{
323 intptr_t i, segend, opr_sz = simd_oprsz(desc), opr_sz_4 = opr_sz / 4;
324 intptr_t index = simd_data(desc);
325 uint32_t *d = vd;
326 uint8_t *n = vn;
327 uint8_t *m_indexed = (uint8_t *)vm + index * 4;
328
329 /* Notice the special case of opr_sz == 8, from aa64/aa32 advsimd.
330 * Otherwise opr_sz is a multiple of 16.
331 */
332 segend = MIN(4, opr_sz_4);
333 i = 0;
334 do {
335 uint8_t m0 = m_indexed[i * 4 + 0];
336 uint8_t m1 = m_indexed[i * 4 + 1];
337 uint8_t m2 = m_indexed[i * 4 + 2];
338 uint8_t m3 = m_indexed[i * 4 + 3];
339
340 do {
341 d[i] += n[i * 4 + 0] * m0
342 + n[i * 4 + 1] * m1
343 + n[i * 4 + 2] * m2
344 + n[i * 4 + 3] * m3;
345 } while (++i < segend);
346 segend = i + 4;
347 } while (i < opr_sz_4);
348
349 clear_tail(d, opr_sz, simd_maxsz(desc));
350}
351
352void HELPER(gvec_sdot_idx_h)(void *vd, void *vn, void *vm, uint32_t desc)
353{
354 intptr_t i, opr_sz = simd_oprsz(desc), opr_sz_8 = opr_sz / 8;
355 intptr_t index = simd_data(desc);
356 uint64_t *d = vd;
357 int16_t *n = vn;
358 int16_t *m_indexed = (int16_t *)vm + index * 4;
359
360 /* This is supported by SVE only, so opr_sz is always a multiple of 16.
361 * Process the entire segment all at once, writing back the results
362 * only after we've consumed all of the inputs.
363 */
364 for (i = 0; i < opr_sz_8 ; i += 2) {
365 uint64_t d0, d1;
366
367 d0 = n[i * 4 + 0] * (int64_t)m_indexed[i * 4 + 0];
368 d0 += n[i * 4 + 1] * (int64_t)m_indexed[i * 4 + 1];
369 d0 += n[i * 4 + 2] * (int64_t)m_indexed[i * 4 + 2];
370 d0 += n[i * 4 + 3] * (int64_t)m_indexed[i * 4 + 3];
371 d1 = n[i * 4 + 4] * (int64_t)m_indexed[i * 4 + 0];
372 d1 += n[i * 4 + 5] * (int64_t)m_indexed[i * 4 + 1];
373 d1 += n[i * 4 + 6] * (int64_t)m_indexed[i * 4 + 2];
374 d1 += n[i * 4 + 7] * (int64_t)m_indexed[i * 4 + 3];
375
376 d[i + 0] += d0;
377 d[i + 1] += d1;
378 }
379
380 clear_tail(d, opr_sz, simd_maxsz(desc));
381}
382
383void HELPER(gvec_udot_idx_h)(void *vd, void *vn, void *vm, uint32_t desc)
384{
385 intptr_t i, opr_sz = simd_oprsz(desc), opr_sz_8 = opr_sz / 8;
386 intptr_t index = simd_data(desc);
387 uint64_t *d = vd;
388 uint16_t *n = vn;
389 uint16_t *m_indexed = (uint16_t *)vm + index * 4;
390
391 /* This is supported by SVE only, so opr_sz is always a multiple of 16.
392 * Process the entire segment all at once, writing back the results
393 * only after we've consumed all of the inputs.
394 */
395 for (i = 0; i < opr_sz_8 ; i += 2) {
396 uint64_t d0, d1;
397
398 d0 = n[i * 4 + 0] * (uint64_t)m_indexed[i * 4 + 0];
399 d0 += n[i * 4 + 1] * (uint64_t)m_indexed[i * 4 + 1];
400 d0 += n[i * 4 + 2] * (uint64_t)m_indexed[i * 4 + 2];
401 d0 += n[i * 4 + 3] * (uint64_t)m_indexed[i * 4 + 3];
402 d1 = n[i * 4 + 4] * (uint64_t)m_indexed[i * 4 + 0];
403 d1 += n[i * 4 + 5] * (uint64_t)m_indexed[i * 4 + 1];
404 d1 += n[i * 4 + 6] * (uint64_t)m_indexed[i * 4 + 2];
405 d1 += n[i * 4 + 7] * (uint64_t)m_indexed[i * 4 + 3];
406
407 d[i + 0] += d0;
408 d[i + 1] += d1;
409 }
410
411 clear_tail(d, opr_sz, simd_maxsz(desc));
412}
413
Richard Henderson1695cd62018-03-02 10:45:43 +0000414void HELPER(gvec_fcaddh)(void *vd, void *vn, void *vm,
415 void *vfpst, uint32_t desc)
416{
417 uintptr_t opr_sz = simd_oprsz(desc);
418 float16 *d = vd;
419 float16 *n = vn;
420 float16 *m = vm;
421 float_status *fpst = vfpst;
422 uint32_t neg_real = extract32(desc, SIMD_DATA_SHIFT, 1);
423 uint32_t neg_imag = neg_real ^ 1;
424 uintptr_t i;
425
426 /* Shift boolean to the sign bit so we can xor to negate. */
427 neg_real <<= 15;
428 neg_imag <<= 15;
429
430 for (i = 0; i < opr_sz / 2; i += 2) {
431 float16 e0 = n[H2(i)];
432 float16 e1 = m[H2(i + 1)] ^ neg_imag;
433 float16 e2 = n[H2(i + 1)];
434 float16 e3 = m[H2(i)] ^ neg_real;
435
436 d[H2(i)] = float16_add(e0, e1, fpst);
437 d[H2(i + 1)] = float16_add(e2, e3, fpst);
438 }
439 clear_tail(d, opr_sz, simd_maxsz(desc));
440}
441
442void HELPER(gvec_fcadds)(void *vd, void *vn, void *vm,
443 void *vfpst, uint32_t desc)
444{
445 uintptr_t opr_sz = simd_oprsz(desc);
446 float32 *d = vd;
447 float32 *n = vn;
448 float32 *m = vm;
449 float_status *fpst = vfpst;
450 uint32_t neg_real = extract32(desc, SIMD_DATA_SHIFT, 1);
451 uint32_t neg_imag = neg_real ^ 1;
452 uintptr_t i;
453
454 /* Shift boolean to the sign bit so we can xor to negate. */
455 neg_real <<= 31;
456 neg_imag <<= 31;
457
458 for (i = 0; i < opr_sz / 4; i += 2) {
459 float32 e0 = n[H4(i)];
460 float32 e1 = m[H4(i + 1)] ^ neg_imag;
461 float32 e2 = n[H4(i + 1)];
462 float32 e3 = m[H4(i)] ^ neg_real;
463
464 d[H4(i)] = float32_add(e0, e1, fpst);
465 d[H4(i + 1)] = float32_add(e2, e3, fpst);
466 }
467 clear_tail(d, opr_sz, simd_maxsz(desc));
468}
469
470void HELPER(gvec_fcaddd)(void *vd, void *vn, void *vm,
471 void *vfpst, uint32_t desc)
472{
473 uintptr_t opr_sz = simd_oprsz(desc);
474 float64 *d = vd;
475 float64 *n = vn;
476 float64 *m = vm;
477 float_status *fpst = vfpst;
478 uint64_t neg_real = extract64(desc, SIMD_DATA_SHIFT, 1);
479 uint64_t neg_imag = neg_real ^ 1;
480 uintptr_t i;
481
482 /* Shift boolean to the sign bit so we can xor to negate. */
483 neg_real <<= 63;
484 neg_imag <<= 63;
485
486 for (i = 0; i < opr_sz / 8; i += 2) {
487 float64 e0 = n[i];
488 float64 e1 = m[i + 1] ^ neg_imag;
489 float64 e2 = n[i + 1];
490 float64 e3 = m[i] ^ neg_real;
491
492 d[i] = float64_add(e0, e1, fpst);
493 d[i + 1] = float64_add(e2, e3, fpst);
494 }
495 clear_tail(d, opr_sz, simd_maxsz(desc));
496}
Richard Hendersond17b7cd2018-03-02 10:45:44 +0000497
498void HELPER(gvec_fcmlah)(void *vd, void *vn, void *vm,
499 void *vfpst, uint32_t desc)
500{
501 uintptr_t opr_sz = simd_oprsz(desc);
502 float16 *d = vd;
503 float16 *n = vn;
504 float16 *m = vm;
505 float_status *fpst = vfpst;
506 intptr_t flip = extract32(desc, SIMD_DATA_SHIFT, 1);
507 uint32_t neg_imag = extract32(desc, SIMD_DATA_SHIFT + 1, 1);
508 uint32_t neg_real = flip ^ neg_imag;
509 uintptr_t i;
510
511 /* Shift boolean to the sign bit so we can xor to negate. */
512 neg_real <<= 15;
513 neg_imag <<= 15;
514
515 for (i = 0; i < opr_sz / 2; i += 2) {
516 float16 e2 = n[H2(i + flip)];
517 float16 e1 = m[H2(i + flip)] ^ neg_real;
518 float16 e4 = e2;
519 float16 e3 = m[H2(i + 1 - flip)] ^ neg_imag;
520
521 d[H2(i)] = float16_muladd(e2, e1, d[H2(i)], 0, fpst);
522 d[H2(i + 1)] = float16_muladd(e4, e3, d[H2(i + 1)], 0, fpst);
523 }
524 clear_tail(d, opr_sz, simd_maxsz(desc));
525}
526
527void HELPER(gvec_fcmlah_idx)(void *vd, void *vn, void *vm,
528 void *vfpst, uint32_t desc)
529{
530 uintptr_t opr_sz = simd_oprsz(desc);
531 float16 *d = vd;
532 float16 *n = vn;
533 float16 *m = vm;
534 float_status *fpst = vfpst;
535 intptr_t flip = extract32(desc, SIMD_DATA_SHIFT, 1);
536 uint32_t neg_imag = extract32(desc, SIMD_DATA_SHIFT + 1, 1);
Richard Henderson2cc99912018-06-29 15:11:12 +0100537 intptr_t index = extract32(desc, SIMD_DATA_SHIFT + 2, 2);
Richard Hendersond17b7cd2018-03-02 10:45:44 +0000538 uint32_t neg_real = flip ^ neg_imag;
Richard Henderson18fc2402018-06-29 15:11:12 +0100539 intptr_t elements = opr_sz / sizeof(float16);
540 intptr_t eltspersegment = 16 / sizeof(float16);
541 intptr_t i, j;
Richard Hendersond17b7cd2018-03-02 10:45:44 +0000542
543 /* Shift boolean to the sign bit so we can xor to negate. */
544 neg_real <<= 15;
545 neg_imag <<= 15;
Richard Hendersond17b7cd2018-03-02 10:45:44 +0000546
Richard Henderson18fc2402018-06-29 15:11:12 +0100547 for (i = 0; i < elements; i += eltspersegment) {
548 float16 mr = m[H2(i + 2 * index + 0)];
549 float16 mi = m[H2(i + 2 * index + 1)];
550 float16 e1 = neg_real ^ (flip ? mi : mr);
551 float16 e3 = neg_imag ^ (flip ? mr : mi);
Richard Hendersond17b7cd2018-03-02 10:45:44 +0000552
Richard Henderson18fc2402018-06-29 15:11:12 +0100553 for (j = i; j < i + eltspersegment; j += 2) {
554 float16 e2 = n[H2(j + flip)];
555 float16 e4 = e2;
556
557 d[H2(j)] = float16_muladd(e2, e1, d[H2(j)], 0, fpst);
558 d[H2(j + 1)] = float16_muladd(e4, e3, d[H2(j + 1)], 0, fpst);
559 }
Richard Hendersond17b7cd2018-03-02 10:45:44 +0000560 }
561 clear_tail(d, opr_sz, simd_maxsz(desc));
562}
563
564void HELPER(gvec_fcmlas)(void *vd, void *vn, void *vm,
565 void *vfpst, uint32_t desc)
566{
567 uintptr_t opr_sz = simd_oprsz(desc);
568 float32 *d = vd;
569 float32 *n = vn;
570 float32 *m = vm;
571 float_status *fpst = vfpst;
572 intptr_t flip = extract32(desc, SIMD_DATA_SHIFT, 1);
573 uint32_t neg_imag = extract32(desc, SIMD_DATA_SHIFT + 1, 1);
574 uint32_t neg_real = flip ^ neg_imag;
575 uintptr_t i;
576
577 /* Shift boolean to the sign bit so we can xor to negate. */
578 neg_real <<= 31;
579 neg_imag <<= 31;
580
581 for (i = 0; i < opr_sz / 4; i += 2) {
582 float32 e2 = n[H4(i + flip)];
583 float32 e1 = m[H4(i + flip)] ^ neg_real;
584 float32 e4 = e2;
585 float32 e3 = m[H4(i + 1 - flip)] ^ neg_imag;
586
587 d[H4(i)] = float32_muladd(e2, e1, d[H4(i)], 0, fpst);
588 d[H4(i + 1)] = float32_muladd(e4, e3, d[H4(i + 1)], 0, fpst);
589 }
590 clear_tail(d, opr_sz, simd_maxsz(desc));
591}
592
593void HELPER(gvec_fcmlas_idx)(void *vd, void *vn, void *vm,
594 void *vfpst, uint32_t desc)
595{
596 uintptr_t opr_sz = simd_oprsz(desc);
597 float32 *d = vd;
598 float32 *n = vn;
599 float32 *m = vm;
600 float_status *fpst = vfpst;
601 intptr_t flip = extract32(desc, SIMD_DATA_SHIFT, 1);
602 uint32_t neg_imag = extract32(desc, SIMD_DATA_SHIFT + 1, 1);
Richard Henderson2cc99912018-06-29 15:11:12 +0100603 intptr_t index = extract32(desc, SIMD_DATA_SHIFT + 2, 2);
Richard Hendersond17b7cd2018-03-02 10:45:44 +0000604 uint32_t neg_real = flip ^ neg_imag;
Richard Henderson18fc2402018-06-29 15:11:12 +0100605 intptr_t elements = opr_sz / sizeof(float32);
606 intptr_t eltspersegment = 16 / sizeof(float32);
607 intptr_t i, j;
Richard Hendersond17b7cd2018-03-02 10:45:44 +0000608
609 /* Shift boolean to the sign bit so we can xor to negate. */
610 neg_real <<= 31;
611 neg_imag <<= 31;
Richard Hendersond17b7cd2018-03-02 10:45:44 +0000612
Richard Henderson18fc2402018-06-29 15:11:12 +0100613 for (i = 0; i < elements; i += eltspersegment) {
614 float32 mr = m[H4(i + 2 * index + 0)];
615 float32 mi = m[H4(i + 2 * index + 1)];
616 float32 e1 = neg_real ^ (flip ? mi : mr);
617 float32 e3 = neg_imag ^ (flip ? mr : mi);
Richard Hendersond17b7cd2018-03-02 10:45:44 +0000618
Richard Henderson18fc2402018-06-29 15:11:12 +0100619 for (j = i; j < i + eltspersegment; j += 2) {
620 float32 e2 = n[H4(j + flip)];
621 float32 e4 = e2;
622
623 d[H4(j)] = float32_muladd(e2, e1, d[H4(j)], 0, fpst);
624 d[H4(j + 1)] = float32_muladd(e4, e3, d[H4(j + 1)], 0, fpst);
625 }
Richard Hendersond17b7cd2018-03-02 10:45:44 +0000626 }
627 clear_tail(d, opr_sz, simd_maxsz(desc));
628}
629
630void HELPER(gvec_fcmlad)(void *vd, void *vn, void *vm,
631 void *vfpst, uint32_t desc)
632{
633 uintptr_t opr_sz = simd_oprsz(desc);
634 float64 *d = vd;
635 float64 *n = vn;
636 float64 *m = vm;
637 float_status *fpst = vfpst;
638 intptr_t flip = extract32(desc, SIMD_DATA_SHIFT, 1);
639 uint64_t neg_imag = extract32(desc, SIMD_DATA_SHIFT + 1, 1);
640 uint64_t neg_real = flip ^ neg_imag;
641 uintptr_t i;
642
643 /* Shift boolean to the sign bit so we can xor to negate. */
644 neg_real <<= 63;
645 neg_imag <<= 63;
646
647 for (i = 0; i < opr_sz / 8; i += 2) {
648 float64 e2 = n[i + flip];
649 float64 e1 = m[i + flip] ^ neg_real;
650 float64 e4 = e2;
651 float64 e3 = m[i + 1 - flip] ^ neg_imag;
652
653 d[i] = float64_muladd(e2, e1, d[i], 0, fpst);
654 d[i + 1] = float64_muladd(e4, e3, d[i + 1], 0, fpst);
655 }
656 clear_tail(d, opr_sz, simd_maxsz(desc));
657}
Richard Henderson29b80462018-06-15 14:57:15 +0100658
Peter Maydellad505db2020-08-28 19:33:35 +0100659/*
660 * Floating point comparisons producing an integer result (all 1s or all 0s).
661 * Note that EQ doesn't signal InvalidOp for QNaNs but GE and GT do.
662 * Softfloat routines return 0/1, which we convert to the 0/-1 Neon requires.
663 */
664static uint16_t float16_ceq(float16 op1, float16 op2, float_status *stat)
665{
666 return -float16_eq_quiet(op1, op2, stat);
667}
668
669static uint32_t float32_ceq(float32 op1, float32 op2, float_status *stat)
670{
671 return -float32_eq_quiet(op1, op2, stat);
672}
673
674static uint16_t float16_cge(float16 op1, float16 op2, float_status *stat)
675{
676 return -float16_le(op2, op1, stat);
677}
678
679static uint32_t float32_cge(float32 op1, float32 op2, float_status *stat)
680{
681 return -float32_le(op2, op1, stat);
682}
683
684static uint16_t float16_cgt(float16 op1, float16 op2, float_status *stat)
685{
686 return -float16_lt(op2, op1, stat);
687}
688
689static uint32_t float32_cgt(float32 op1, float32 op2, float_status *stat)
690{
691 return -float32_lt(op2, op1, stat);
692}
693
Peter Maydellbb2741d2020-08-28 19:33:36 +0100694static uint16_t float16_acge(float16 op1, float16 op2, float_status *stat)
695{
696 return -float16_le(float16_abs(op2), float16_abs(op1), stat);
697}
698
699static uint32_t float32_acge(float32 op1, float32 op2, float_status *stat)
700{
701 return -float32_le(float32_abs(op2), float32_abs(op1), stat);
702}
703
704static uint16_t float16_acgt(float16 op1, float16 op2, float_status *stat)
705{
706 return -float16_lt(float16_abs(op2), float16_abs(op1), stat);
707}
708
709static uint32_t float32_acgt(float32 op1, float32 op2, float_status *stat)
710{
711 return -float32_lt(float32_abs(op2), float32_abs(op1), stat);
712}
713
Peter Maydell7782a9a2020-08-28 19:33:45 +0100714static int16_t vfp_tosszh(float16 x, void *fpstp)
715{
716 float_status *fpst = fpstp;
717 if (float16_is_any_nan(x)) {
718 float_raise(float_flag_invalid, fpst);
719 return 0;
720 }
721 return float16_to_int16_round_to_zero(x, fpst);
722}
723
724static uint16_t vfp_touszh(float16 x, void *fpstp)
725{
726 float_status *fpst = fpstp;
727 if (float16_is_any_nan(x)) {
728 float_raise(float_flag_invalid, fpst);
729 return 0;
730 }
731 return float16_to_uint16_round_to_zero(x, fpst);
732}
733
Richard Henderson3887c032018-06-29 15:11:09 +0100734#define DO_2OP(NAME, FUNC, TYPE) \
735void HELPER(NAME)(void *vd, void *vn, void *stat, uint32_t desc) \
736{ \
737 intptr_t i, oprsz = simd_oprsz(desc); \
738 TYPE *d = vd, *n = vn; \
739 for (i = 0; i < oprsz / sizeof(TYPE); i++) { \
740 d[i] = FUNC(n[i], stat); \
741 } \
Richard Hendersond8efe782019-02-15 09:56:41 +0000742 clear_tail(d, oprsz, simd_maxsz(desc)); \
Richard Henderson3887c032018-06-29 15:11:09 +0100743}
744
745DO_2OP(gvec_frecpe_h, helper_recpe_f16, float16)
746DO_2OP(gvec_frecpe_s, helper_recpe_f32, float32)
747DO_2OP(gvec_frecpe_d, helper_recpe_f64, float64)
748
749DO_2OP(gvec_frsqrte_h, helper_rsqrte_f16, float16)
750DO_2OP(gvec_frsqrte_s, helper_rsqrte_f32, float32)
751DO_2OP(gvec_frsqrte_d, helper_rsqrte_f64, float64)
752
Peter Maydell23afcdd2020-08-28 19:33:50 +0100753DO_2OP(gvec_vrintx_h, float16_round_to_int, float16)
754DO_2OP(gvec_vrintx_s, float32_round_to_int, float32)
755
Peter Maydell7782a9a2020-08-28 19:33:45 +0100756DO_2OP(gvec_sitos, helper_vfp_sitos, int32_t)
757DO_2OP(gvec_uitos, helper_vfp_uitos, uint32_t)
758DO_2OP(gvec_tosizs, helper_vfp_tosizs, float32)
759DO_2OP(gvec_touizs, helper_vfp_touizs, float32)
760DO_2OP(gvec_sstoh, int16_to_float16, int16_t)
761DO_2OP(gvec_ustoh, uint16_to_float16, uint16_t)
762DO_2OP(gvec_tosszh, vfp_tosszh, float16)
763DO_2OP(gvec_touszh, vfp_touszh, float16)
764
Peter Maydell635187a2020-08-28 19:33:41 +0100765#define WRAP_CMP0_FWD(FN, CMPOP, TYPE) \
766 static TYPE TYPE##_##FN##0(TYPE op, float_status *stat) \
767 { \
768 return TYPE##_##CMPOP(op, TYPE##_zero, stat); \
769 }
770
771#define WRAP_CMP0_REV(FN, CMPOP, TYPE) \
772 static TYPE TYPE##_##FN##0(TYPE op, float_status *stat) \
773 { \
774 return TYPE##_##CMPOP(TYPE##_zero, op, stat); \
775 }
776
777#define DO_2OP_CMP0(FN, CMPOP, DIRN) \
778 WRAP_CMP0_##DIRN(FN, CMPOP, float16) \
779 WRAP_CMP0_##DIRN(FN, CMPOP, float32) \
780 DO_2OP(gvec_f##FN##0_h, float16_##FN##0, float16) \
781 DO_2OP(gvec_f##FN##0_s, float32_##FN##0, float32)
782
783DO_2OP_CMP0(cgt, cgt, FWD)
784DO_2OP_CMP0(cge, cge, FWD)
785DO_2OP_CMP0(ceq, ceq, FWD)
786DO_2OP_CMP0(clt, cgt, REV)
787DO_2OP_CMP0(cle, cge, REV)
788
Richard Henderson3887c032018-06-29 15:11:09 +0100789#undef DO_2OP
Peter Maydell635187a2020-08-28 19:33:41 +0100790#undef DO_2OP_CMP0
Richard Henderson3887c032018-06-29 15:11:09 +0100791
Richard Henderson29b80462018-06-15 14:57:15 +0100792/* Floating-point trigonometric starting value.
793 * See the ARM ARM pseudocode function FPTrigSMul.
794 */
795static float16 float16_ftsmul(float16 op1, uint16_t op2, float_status *stat)
796{
797 float16 result = float16_mul(op1, op1, stat);
798 if (!float16_is_any_nan(result)) {
799 result = float16_set_sign(result, op2 & 1);
800 }
801 return result;
802}
803
804static float32 float32_ftsmul(float32 op1, uint32_t op2, float_status *stat)
805{
806 float32 result = float32_mul(op1, op1, stat);
807 if (!float32_is_any_nan(result)) {
808 result = float32_set_sign(result, op2 & 1);
809 }
810 return result;
811}
812
813static float64 float64_ftsmul(float64 op1, uint64_t op2, float_status *stat)
814{
815 float64 result = float64_mul(op1, op1, stat);
816 if (!float64_is_any_nan(result)) {
817 result = float64_set_sign(result, op2 & 1);
818 }
819 return result;
820}
821
Peter Maydelle4a6d4a2020-08-28 19:33:32 +0100822static float16 float16_abd(float16 op1, float16 op2, float_status *stat)
823{
824 return float16_abs(float16_sub(op1, op2, stat));
825}
826
Peter Maydella26a3522020-05-12 17:38:58 +0100827static float32 float32_abd(float32 op1, float32 op2, float_status *stat)
828{
829 return float32_abs(float32_sub(op1, op2, stat));
830}
831
Peter Maydellac8c62c2020-08-28 19:33:42 +0100832/*
833 * Reciprocal step. These are the AArch32 version which uses a
834 * non-fused multiply-and-subtract.
835 */
836static float16 float16_recps_nf(float16 op1, float16 op2, float_status *stat)
837{
838 op1 = float16_squash_input_denormal(op1, stat);
839 op2 = float16_squash_input_denormal(op2, stat);
840
841 if ((float16_is_infinity(op1) && float16_is_zero(op2)) ||
842 (float16_is_infinity(op2) && float16_is_zero(op1))) {
843 return float16_two;
844 }
845 return float16_sub(float16_two, float16_mul(op1, op2, stat), stat);
846}
847
848static float32 float32_recps_nf(float32 op1, float32 op2, float_status *stat)
849{
850 op1 = float32_squash_input_denormal(op1, stat);
851 op2 = float32_squash_input_denormal(op2, stat);
852
853 if ((float32_is_infinity(op1) && float32_is_zero(op2)) ||
854 (float32_is_infinity(op2) && float32_is_zero(op1))) {
855 return float32_two;
856 }
857 return float32_sub(float32_two, float32_mul(op1, op2, stat), stat);
858}
859
Peter Maydell40fde722020-08-28 19:33:43 +0100860/* Reciprocal square-root step. AArch32 non-fused semantics. */
861static float16 float16_rsqrts_nf(float16 op1, float16 op2, float_status *stat)
862{
863 op1 = float16_squash_input_denormal(op1, stat);
864 op2 = float16_squash_input_denormal(op2, stat);
865
866 if ((float16_is_infinity(op1) && float16_is_zero(op2)) ||
867 (float16_is_infinity(op2) && float16_is_zero(op1))) {
868 return float16_one_point_five;
869 }
870 op1 = float16_sub(float16_three, float16_mul(op1, op2, stat), stat);
871 return float16_div(op1, float16_two, stat);
872}
873
874static float32 float32_rsqrts_nf(float32 op1, float32 op2, float_status *stat)
875{
876 op1 = float32_squash_input_denormal(op1, stat);
877 op2 = float32_squash_input_denormal(op2, stat);
878
879 if ((float32_is_infinity(op1) && float32_is_zero(op2)) ||
880 (float32_is_infinity(op2) && float32_is_zero(op1))) {
881 return float32_one_point_five;
882 }
883 op1 = float32_sub(float32_three, float32_mul(op1, op2, stat), stat);
884 return float32_div(op1, float32_two, stat);
885}
886
Richard Henderson29b80462018-06-15 14:57:15 +0100887#define DO_3OP(NAME, FUNC, TYPE) \
888void HELPER(NAME)(void *vd, void *vn, void *vm, void *stat, uint32_t desc) \
889{ \
890 intptr_t i, oprsz = simd_oprsz(desc); \
891 TYPE *d = vd, *n = vn, *m = vm; \
892 for (i = 0; i < oprsz / sizeof(TYPE); i++) { \
893 d[i] = FUNC(n[i], m[i], stat); \
894 } \
Richard Hendersond8efe782019-02-15 09:56:41 +0000895 clear_tail(d, oprsz, simd_maxsz(desc)); \
Richard Henderson29b80462018-06-15 14:57:15 +0100896}
897
898DO_3OP(gvec_fadd_h, float16_add, float16)
899DO_3OP(gvec_fadd_s, float32_add, float32)
900DO_3OP(gvec_fadd_d, float64_add, float64)
901
902DO_3OP(gvec_fsub_h, float16_sub, float16)
903DO_3OP(gvec_fsub_s, float32_sub, float32)
904DO_3OP(gvec_fsub_d, float64_sub, float64)
905
906DO_3OP(gvec_fmul_h, float16_mul, float16)
907DO_3OP(gvec_fmul_s, float32_mul, float32)
908DO_3OP(gvec_fmul_d, float64_mul, float64)
909
910DO_3OP(gvec_ftsmul_h, float16_ftsmul, float16)
911DO_3OP(gvec_ftsmul_s, float32_ftsmul, float32)
912DO_3OP(gvec_ftsmul_d, float64_ftsmul, float64)
913
Peter Maydelle4a6d4a2020-08-28 19:33:32 +0100914DO_3OP(gvec_fabd_h, float16_abd, float16)
Peter Maydella26a3522020-05-12 17:38:58 +0100915DO_3OP(gvec_fabd_s, float32_abd, float32)
916
Peter Maydellad505db2020-08-28 19:33:35 +0100917DO_3OP(gvec_fceq_h, float16_ceq, float16)
918DO_3OP(gvec_fceq_s, float32_ceq, float32)
919
920DO_3OP(gvec_fcge_h, float16_cge, float16)
921DO_3OP(gvec_fcge_s, float32_cge, float32)
922
923DO_3OP(gvec_fcgt_h, float16_cgt, float16)
924DO_3OP(gvec_fcgt_s, float32_cgt, float32)
925
Peter Maydellbb2741d2020-08-28 19:33:36 +0100926DO_3OP(gvec_facge_h, float16_acge, float16)
927DO_3OP(gvec_facge_s, float32_acge, float32)
928
929DO_3OP(gvec_facgt_h, float16_acgt, float16)
930DO_3OP(gvec_facgt_s, float32_acgt, float32)
931
Peter Maydelle43268c2020-08-28 19:33:37 +0100932DO_3OP(gvec_fmax_h, float16_max, float16)
933DO_3OP(gvec_fmax_s, float32_max, float32)
934
935DO_3OP(gvec_fmin_h, float16_min, float16)
936DO_3OP(gvec_fmin_s, float32_min, float32)
937
Peter Maydelle22705b2020-08-28 19:33:38 +0100938DO_3OP(gvec_fmaxnum_h, float16_maxnum, float16)
939DO_3OP(gvec_fmaxnum_s, float32_maxnum, float32)
940
941DO_3OP(gvec_fminnum_h, float16_minnum, float16)
942DO_3OP(gvec_fminnum_s, float32_minnum, float32)
943
Peter Maydellac8c62c2020-08-28 19:33:42 +0100944DO_3OP(gvec_recps_nf_h, float16_recps_nf, float16)
945DO_3OP(gvec_recps_nf_s, float32_recps_nf, float32)
946
Peter Maydell40fde722020-08-28 19:33:43 +0100947DO_3OP(gvec_rsqrts_nf_h, float16_rsqrts_nf, float16)
948DO_3OP(gvec_rsqrts_nf_s, float32_rsqrts_nf, float32)
949
Richard Henderson29b80462018-06-15 14:57:15 +0100950#ifdef TARGET_AARCH64
951
952DO_3OP(gvec_recps_h, helper_recpsf_f16, float16)
953DO_3OP(gvec_recps_s, helper_recpsf_f32, float32)
954DO_3OP(gvec_recps_d, helper_recpsf_f64, float64)
955
956DO_3OP(gvec_rsqrts_h, helper_rsqrtsf_f16, float16)
957DO_3OP(gvec_rsqrts_s, helper_rsqrtsf_f32, float32)
958DO_3OP(gvec_rsqrts_d, helper_rsqrtsf_f64, float64)
959
960#endif
961#undef DO_3OP
Richard Hendersonca40a6e2018-06-29 15:11:08 +0100962
Peter Maydelle5adc702020-08-28 19:33:39 +0100963/* Non-fused multiply-add (unlike float16_muladd etc, which are fused) */
964static float16 float16_muladd_nf(float16 dest, float16 op1, float16 op2,
965 float_status *stat)
966{
967 return float16_add(dest, float16_mul(op1, op2, stat), stat);
968}
969
970static float32 float32_muladd_nf(float32 dest, float32 op1, float32 op2,
971 float_status *stat)
972{
973 return float32_add(dest, float32_mul(op1, op2, stat), stat);
974}
975
976static float16 float16_mulsub_nf(float16 dest, float16 op1, float16 op2,
977 float_status *stat)
978{
979 return float16_sub(dest, float16_mul(op1, op2, stat), stat);
980}
981
982static float32 float32_mulsub_nf(float32 dest, float32 op1, float32 op2,
983 float_status *stat)
984{
985 return float32_sub(dest, float32_mul(op1, op2, stat), stat);
986}
987
Peter Maydellcf722d72020-08-28 19:33:40 +0100988/* Fused versions; these have the semantics Neon VFMA/VFMS want */
989static float16 float16_muladd_f(float16 dest, float16 op1, float16 op2,
990 float_status *stat)
991{
992 return float16_muladd(op1, op2, dest, 0, stat);
993}
994
995static float32 float32_muladd_f(float32 dest, float32 op1, float32 op2,
996 float_status *stat)
997{
998 return float32_muladd(op1, op2, dest, 0, stat);
999}
1000
1001static float16 float16_mulsub_f(float16 dest, float16 op1, float16 op2,
1002 float_status *stat)
1003{
1004 return float16_muladd(float16_chs(op1), op2, dest, 0, stat);
1005}
1006
1007static float32 float32_mulsub_f(float32 dest, float32 op1, float32 op2,
1008 float_status *stat)
1009{
1010 return float32_muladd(float32_chs(op1), op2, dest, 0, stat);
1011}
1012
1013#define DO_MULADD(NAME, FUNC, TYPE) \
Peter Maydelle5adc702020-08-28 19:33:39 +01001014void HELPER(NAME)(void *vd, void *vn, void *vm, void *stat, uint32_t desc) \
1015{ \
1016 intptr_t i, oprsz = simd_oprsz(desc); \
1017 TYPE *d = vd, *n = vn, *m = vm; \
1018 for (i = 0; i < oprsz / sizeof(TYPE); i++) { \
1019 d[i] = FUNC(d[i], n[i], m[i], stat); \
1020 } \
1021 clear_tail(d, oprsz, simd_maxsz(desc)); \
1022}
1023
1024DO_MULADD(gvec_fmla_h, float16_muladd_nf, float16)
1025DO_MULADD(gvec_fmla_s, float32_muladd_nf, float32)
1026
1027DO_MULADD(gvec_fmls_h, float16_mulsub_nf, float16)
1028DO_MULADD(gvec_fmls_s, float32_mulsub_nf, float32)
1029
Peter Maydellcf722d72020-08-28 19:33:40 +01001030DO_MULADD(gvec_vfma_h, float16_muladd_f, float16)
1031DO_MULADD(gvec_vfma_s, float32_muladd_f, float32)
1032
1033DO_MULADD(gvec_vfms_h, float16_mulsub_f, float16)
1034DO_MULADD(gvec_vfms_s, float32_mulsub_f, float32)
1035
Richard Hendersonca40a6e2018-06-29 15:11:08 +01001036/* For the indexed ops, SVE applies the index per 128-bit vector segment.
1037 * For AdvSIMD, there is of course only one such vector segment.
1038 */
1039
1040#define DO_MUL_IDX(NAME, TYPE, H) \
Richard Henderson2e5a2652020-08-28 10:02:50 +01001041void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \
1042{ \
Peter Maydelld7ce81e2020-08-28 19:33:51 +01001043 intptr_t i, j, oprsz = simd_oprsz(desc); \
1044 intptr_t segment = MIN(16, oprsz) / sizeof(TYPE); \
Richard Henderson2e5a2652020-08-28 10:02:50 +01001045 intptr_t idx = simd_data(desc); \
1046 TYPE *d = vd, *n = vn, *m = vm; \
1047 for (i = 0; i < oprsz / sizeof(TYPE); i += segment) { \
1048 TYPE mm = m[H(i + idx)]; \
1049 for (j = 0; j < segment; j++) { \
1050 d[i + j] = n[i + j] * mm; \
1051 } \
1052 } \
1053 clear_tail(d, oprsz, simd_maxsz(desc)); \
1054}
1055
1056DO_MUL_IDX(gvec_mul_idx_h, uint16_t, H2)
1057DO_MUL_IDX(gvec_mul_idx_s, uint32_t, H4)
1058DO_MUL_IDX(gvec_mul_idx_d, uint64_t, )
1059
1060#undef DO_MUL_IDX
1061
Richard Henderson36074402020-08-28 10:02:50 +01001062#define DO_MLA_IDX(NAME, TYPE, OP, H) \
1063void HELPER(NAME)(void *vd, void *vn, void *vm, void *va, uint32_t desc) \
1064{ \
Peter Maydelld7ce81e2020-08-28 19:33:51 +01001065 intptr_t i, j, oprsz = simd_oprsz(desc); \
1066 intptr_t segment = MIN(16, oprsz) / sizeof(TYPE); \
Richard Henderson36074402020-08-28 10:02:50 +01001067 intptr_t idx = simd_data(desc); \
1068 TYPE *d = vd, *n = vn, *m = vm, *a = va; \
1069 for (i = 0; i < oprsz / sizeof(TYPE); i += segment) { \
1070 TYPE mm = m[H(i + idx)]; \
1071 for (j = 0; j < segment; j++) { \
1072 d[i + j] = a[i + j] OP n[i + j] * mm; \
1073 } \
1074 } \
1075 clear_tail(d, oprsz, simd_maxsz(desc)); \
1076}
1077
1078DO_MLA_IDX(gvec_mla_idx_h, uint16_t, +, H2)
1079DO_MLA_IDX(gvec_mla_idx_s, uint32_t, +, H4)
1080DO_MLA_IDX(gvec_mla_idx_d, uint64_t, +, )
1081
1082DO_MLA_IDX(gvec_mls_idx_h, uint16_t, -, H2)
1083DO_MLA_IDX(gvec_mls_idx_s, uint32_t, -, H4)
1084DO_MLA_IDX(gvec_mls_idx_d, uint64_t, -, )
1085
1086#undef DO_MLA_IDX
1087
Peter Maydellc50d8d12020-08-28 19:33:52 +01001088#define DO_FMUL_IDX(NAME, ADD, TYPE, H) \
Richard Hendersonca40a6e2018-06-29 15:11:08 +01001089void HELPER(NAME)(void *vd, void *vn, void *vm, void *stat, uint32_t desc) \
1090{ \
Peter Maydelld7ce81e2020-08-28 19:33:51 +01001091 intptr_t i, j, oprsz = simd_oprsz(desc); \
1092 intptr_t segment = MIN(16, oprsz) / sizeof(TYPE); \
Richard Hendersonca40a6e2018-06-29 15:11:08 +01001093 intptr_t idx = simd_data(desc); \
1094 TYPE *d = vd, *n = vn, *m = vm; \
1095 for (i = 0; i < oprsz / sizeof(TYPE); i += segment) { \
1096 TYPE mm = m[H(i + idx)]; \
1097 for (j = 0; j < segment; j++) { \
Peter Maydellc50d8d12020-08-28 19:33:52 +01001098 d[i + j] = TYPE##_##ADD(d[i + j], \
1099 TYPE##_mul(n[i + j], mm, stat), stat); \
Richard Hendersonca40a6e2018-06-29 15:11:08 +01001100 } \
1101 } \
Richard Henderson525d9b62020-05-13 09:32:43 -07001102 clear_tail(d, oprsz, simd_maxsz(desc)); \
Richard Hendersonca40a6e2018-06-29 15:11:08 +01001103}
1104
Peter Maydellc50d8d12020-08-28 19:33:52 +01001105#define float16_nop(N, M, S) (M)
1106#define float32_nop(N, M, S) (M)
1107#define float64_nop(N, M, S) (M)
Richard Hendersonca40a6e2018-06-29 15:11:08 +01001108
Peter Maydellc50d8d12020-08-28 19:33:52 +01001109DO_FMUL_IDX(gvec_fmul_idx_h, nop, float16, H2)
1110DO_FMUL_IDX(gvec_fmul_idx_s, nop, float32, H4)
1111DO_FMUL_IDX(gvec_fmul_idx_d, nop, float64, )
1112
1113/*
1114 * Non-fused multiply-accumulate operations, for Neon. NB that unlike
1115 * the fused ops below they assume accumulate both from and into Vd.
1116 */
1117DO_FMUL_IDX(gvec_fmla_nf_idx_h, add, float16, H2)
1118DO_FMUL_IDX(gvec_fmla_nf_idx_s, add, float32, H4)
1119DO_FMUL_IDX(gvec_fmls_nf_idx_h, sub, float16, H2)
1120DO_FMUL_IDX(gvec_fmls_nf_idx_s, sub, float32, H4)
1121
1122#undef float16_nop
1123#undef float32_nop
1124#undef float64_nop
Richard Henderson2e5a2652020-08-28 10:02:50 +01001125#undef DO_FMUL_IDX
Richard Hendersonca40a6e2018-06-29 15:11:08 +01001126
1127#define DO_FMLA_IDX(NAME, TYPE, H) \
1128void HELPER(NAME)(void *vd, void *vn, void *vm, void *va, \
1129 void *stat, uint32_t desc) \
1130{ \
Peter Maydelld7ce81e2020-08-28 19:33:51 +01001131 intptr_t i, j, oprsz = simd_oprsz(desc); \
1132 intptr_t segment = MIN(16, oprsz) / sizeof(TYPE); \
Richard Hendersonca40a6e2018-06-29 15:11:08 +01001133 TYPE op1_neg = extract32(desc, SIMD_DATA_SHIFT, 1); \
1134 intptr_t idx = desc >> (SIMD_DATA_SHIFT + 1); \
1135 TYPE *d = vd, *n = vn, *m = vm, *a = va; \
1136 op1_neg <<= (8 * sizeof(TYPE) - 1); \
1137 for (i = 0; i < oprsz / sizeof(TYPE); i += segment) { \
1138 TYPE mm = m[H(i + idx)]; \
1139 for (j = 0; j < segment; j++) { \
1140 d[i + j] = TYPE##_muladd(n[i + j] ^ op1_neg, \
1141 mm, a[i + j], 0, stat); \
1142 } \
1143 } \
Richard Henderson525d9b62020-05-13 09:32:43 -07001144 clear_tail(d, oprsz, simd_maxsz(desc)); \
Richard Hendersonca40a6e2018-06-29 15:11:08 +01001145}
1146
1147DO_FMLA_IDX(gvec_fmla_idx_h, float16, H2)
1148DO_FMLA_IDX(gvec_fmla_idx_s, float32, H4)
1149DO_FMLA_IDX(gvec_fmla_idx_d, float64, )
1150
1151#undef DO_FMLA_IDX
Richard Henderson89e68b52019-02-15 09:56:41 +00001152
1153#define DO_SAT(NAME, WTYPE, TYPEN, TYPEM, OP, MIN, MAX) \
1154void HELPER(NAME)(void *vd, void *vq, void *vn, void *vm, uint32_t desc) \
1155{ \
1156 intptr_t i, oprsz = simd_oprsz(desc); \
1157 TYPEN *d = vd, *n = vn; TYPEM *m = vm; \
1158 bool q = false; \
1159 for (i = 0; i < oprsz / sizeof(TYPEN); i++) { \
1160 WTYPE dd = (WTYPE)n[i] OP m[i]; \
1161 if (dd < MIN) { \
1162 dd = MIN; \
1163 q = true; \
1164 } else if (dd > MAX) { \
1165 dd = MAX; \
1166 q = true; \
1167 } \
1168 d[i] = dd; \
1169 } \
1170 if (q) { \
1171 uint32_t *qc = vq; \
1172 qc[0] = 1; \
1173 } \
1174 clear_tail(d, oprsz, simd_maxsz(desc)); \
1175}
1176
1177DO_SAT(gvec_uqadd_b, int, uint8_t, uint8_t, +, 0, UINT8_MAX)
1178DO_SAT(gvec_uqadd_h, int, uint16_t, uint16_t, +, 0, UINT16_MAX)
1179DO_SAT(gvec_uqadd_s, int64_t, uint32_t, uint32_t, +, 0, UINT32_MAX)
1180
1181DO_SAT(gvec_sqadd_b, int, int8_t, int8_t, +, INT8_MIN, INT8_MAX)
1182DO_SAT(gvec_sqadd_h, int, int16_t, int16_t, +, INT16_MIN, INT16_MAX)
1183DO_SAT(gvec_sqadd_s, int64_t, int32_t, int32_t, +, INT32_MIN, INT32_MAX)
1184
1185DO_SAT(gvec_uqsub_b, int, uint8_t, uint8_t, -, 0, UINT8_MAX)
1186DO_SAT(gvec_uqsub_h, int, uint16_t, uint16_t, -, 0, UINT16_MAX)
1187DO_SAT(gvec_uqsub_s, int64_t, uint32_t, uint32_t, -, 0, UINT32_MAX)
1188
1189DO_SAT(gvec_sqsub_b, int, int8_t, int8_t, -, INT8_MIN, INT8_MAX)
1190DO_SAT(gvec_sqsub_h, int, int16_t, int16_t, -, INT16_MIN, INT16_MAX)
1191DO_SAT(gvec_sqsub_s, int64_t, int32_t, int32_t, -, INT32_MIN, INT32_MAX)
1192
1193#undef DO_SAT
1194
1195void HELPER(gvec_uqadd_d)(void *vd, void *vq, void *vn,
1196 void *vm, uint32_t desc)
1197{
1198 intptr_t i, oprsz = simd_oprsz(desc);
1199 uint64_t *d = vd, *n = vn, *m = vm;
1200 bool q = false;
1201
1202 for (i = 0; i < oprsz / 8; i++) {
1203 uint64_t nn = n[i], mm = m[i], dd = nn + mm;
1204 if (dd < nn) {
1205 dd = UINT64_MAX;
1206 q = true;
1207 }
1208 d[i] = dd;
1209 }
1210 if (q) {
1211 uint32_t *qc = vq;
1212 qc[0] = 1;
1213 }
1214 clear_tail(d, oprsz, simd_maxsz(desc));
1215}
1216
1217void HELPER(gvec_uqsub_d)(void *vd, void *vq, void *vn,
1218 void *vm, uint32_t desc)
1219{
1220 intptr_t i, oprsz = simd_oprsz(desc);
1221 uint64_t *d = vd, *n = vn, *m = vm;
1222 bool q = false;
1223
1224 for (i = 0; i < oprsz / 8; i++) {
1225 uint64_t nn = n[i], mm = m[i], dd = nn - mm;
1226 if (nn < mm) {
1227 dd = 0;
1228 q = true;
1229 }
1230 d[i] = dd;
1231 }
1232 if (q) {
1233 uint32_t *qc = vq;
1234 qc[0] = 1;
1235 }
1236 clear_tail(d, oprsz, simd_maxsz(desc));
1237}
1238
1239void HELPER(gvec_sqadd_d)(void *vd, void *vq, void *vn,
1240 void *vm, uint32_t desc)
1241{
1242 intptr_t i, oprsz = simd_oprsz(desc);
1243 int64_t *d = vd, *n = vn, *m = vm;
1244 bool q = false;
1245
1246 for (i = 0; i < oprsz / 8; i++) {
1247 int64_t nn = n[i], mm = m[i], dd = nn + mm;
1248 if (((dd ^ nn) & ~(nn ^ mm)) & INT64_MIN) {
1249 dd = (nn >> 63) ^ ~INT64_MIN;
1250 q = true;
1251 }
1252 d[i] = dd;
1253 }
1254 if (q) {
1255 uint32_t *qc = vq;
1256 qc[0] = 1;
1257 }
1258 clear_tail(d, oprsz, simd_maxsz(desc));
1259}
1260
1261void HELPER(gvec_sqsub_d)(void *vd, void *vq, void *vn,
1262 void *vm, uint32_t desc)
1263{
1264 intptr_t i, oprsz = simd_oprsz(desc);
1265 int64_t *d = vd, *n = vn, *m = vm;
1266 bool q = false;
1267
1268 for (i = 0; i < oprsz / 8; i++) {
1269 int64_t nn = n[i], mm = m[i], dd = nn - mm;
1270 if (((dd ^ nn) & (nn ^ mm)) & INT64_MIN) {
1271 dd = (nn >> 63) ^ ~INT64_MIN;
1272 q = true;
1273 }
1274 d[i] = dd;
1275 }
1276 if (q) {
1277 uint32_t *qc = vq;
1278 qc[0] = 1;
1279 }
1280 clear_tail(d, oprsz, simd_maxsz(desc));
1281}
Richard Hendersona4e943a2019-02-28 10:55:16 +00001282
Richard Henderson631e5652020-05-13 09:32:30 -07001283
1284#define DO_SRA(NAME, TYPE) \
1285void HELPER(NAME)(void *vd, void *vn, uint32_t desc) \
1286{ \
1287 intptr_t i, oprsz = simd_oprsz(desc); \
1288 int shift = simd_data(desc); \
1289 TYPE *d = vd, *n = vn; \
1290 for (i = 0; i < oprsz / sizeof(TYPE); i++) { \
1291 d[i] += n[i] >> shift; \
1292 } \
1293 clear_tail(d, oprsz, simd_maxsz(desc)); \
1294}
1295
1296DO_SRA(gvec_ssra_b, int8_t)
1297DO_SRA(gvec_ssra_h, int16_t)
1298DO_SRA(gvec_ssra_s, int32_t)
1299DO_SRA(gvec_ssra_d, int64_t)
1300
1301DO_SRA(gvec_usra_b, uint8_t)
1302DO_SRA(gvec_usra_h, uint16_t)
1303DO_SRA(gvec_usra_s, uint32_t)
1304DO_SRA(gvec_usra_d, uint64_t)
1305
1306#undef DO_SRA
1307
Richard Henderson6ccd48d2020-05-13 09:32:31 -07001308#define DO_RSHR(NAME, TYPE) \
1309void HELPER(NAME)(void *vd, void *vn, uint32_t desc) \
1310{ \
1311 intptr_t i, oprsz = simd_oprsz(desc); \
1312 int shift = simd_data(desc); \
1313 TYPE *d = vd, *n = vn; \
1314 for (i = 0; i < oprsz / sizeof(TYPE); i++) { \
1315 TYPE tmp = n[i] >> (shift - 1); \
1316 d[i] = (tmp >> 1) + (tmp & 1); \
1317 } \
1318 clear_tail(d, oprsz, simd_maxsz(desc)); \
1319}
1320
1321DO_RSHR(gvec_srshr_b, int8_t)
1322DO_RSHR(gvec_srshr_h, int16_t)
1323DO_RSHR(gvec_srshr_s, int32_t)
1324DO_RSHR(gvec_srshr_d, int64_t)
1325
1326DO_RSHR(gvec_urshr_b, uint8_t)
1327DO_RSHR(gvec_urshr_h, uint16_t)
1328DO_RSHR(gvec_urshr_s, uint32_t)
1329DO_RSHR(gvec_urshr_d, uint64_t)
1330
1331#undef DO_RSHR
1332
1333#define DO_RSRA(NAME, TYPE) \
1334void HELPER(NAME)(void *vd, void *vn, uint32_t desc) \
1335{ \
1336 intptr_t i, oprsz = simd_oprsz(desc); \
1337 int shift = simd_data(desc); \
1338 TYPE *d = vd, *n = vn; \
1339 for (i = 0; i < oprsz / sizeof(TYPE); i++) { \
1340 TYPE tmp = n[i] >> (shift - 1); \
1341 d[i] += (tmp >> 1) + (tmp & 1); \
1342 } \
1343 clear_tail(d, oprsz, simd_maxsz(desc)); \
1344}
1345
1346DO_RSRA(gvec_srsra_b, int8_t)
1347DO_RSRA(gvec_srsra_h, int16_t)
1348DO_RSRA(gvec_srsra_s, int32_t)
1349DO_RSRA(gvec_srsra_d, int64_t)
1350
1351DO_RSRA(gvec_ursra_b, uint8_t)
1352DO_RSRA(gvec_ursra_h, uint16_t)
1353DO_RSRA(gvec_ursra_s, uint32_t)
1354DO_RSRA(gvec_ursra_d, uint64_t)
1355
1356#undef DO_RSRA
1357
Richard Henderson893ab052020-05-13 09:32:32 -07001358#define DO_SRI(NAME, TYPE) \
1359void HELPER(NAME)(void *vd, void *vn, uint32_t desc) \
1360{ \
1361 intptr_t i, oprsz = simd_oprsz(desc); \
1362 int shift = simd_data(desc); \
1363 TYPE *d = vd, *n = vn; \
1364 for (i = 0; i < oprsz / sizeof(TYPE); i++) { \
1365 d[i] = deposit64(d[i], 0, sizeof(TYPE) * 8 - shift, n[i] >> shift); \
1366 } \
1367 clear_tail(d, oprsz, simd_maxsz(desc)); \
1368}
1369
1370DO_SRI(gvec_sri_b, uint8_t)
1371DO_SRI(gvec_sri_h, uint16_t)
1372DO_SRI(gvec_sri_s, uint32_t)
1373DO_SRI(gvec_sri_d, uint64_t)
1374
1375#undef DO_SRI
1376
1377#define DO_SLI(NAME, TYPE) \
1378void HELPER(NAME)(void *vd, void *vn, uint32_t desc) \
1379{ \
1380 intptr_t i, oprsz = simd_oprsz(desc); \
1381 int shift = simd_data(desc); \
1382 TYPE *d = vd, *n = vn; \
1383 for (i = 0; i < oprsz / sizeof(TYPE); i++) { \
1384 d[i] = deposit64(d[i], shift, sizeof(TYPE) * 8 - shift, n[i]); \
1385 } \
1386 clear_tail(d, oprsz, simd_maxsz(desc)); \
1387}
1388
1389DO_SLI(gvec_sli_b, uint8_t)
1390DO_SLI(gvec_sli_h, uint16_t)
1391DO_SLI(gvec_sli_s, uint32_t)
1392DO_SLI(gvec_sli_d, uint64_t)
1393
1394#undef DO_SLI
1395
Richard Hendersona4e943a2019-02-28 10:55:16 +00001396/*
1397 * Convert float16 to float32, raising no exceptions and
1398 * preserving exceptional values, including SNaN.
1399 * This is effectively an unpack+repack operation.
1400 */
1401static float32 float16_to_float32_by_bits(uint32_t f16, bool fz16)
1402{
1403 const int f16_bias = 15;
1404 const int f32_bias = 127;
1405 uint32_t sign = extract32(f16, 15, 1);
1406 uint32_t exp = extract32(f16, 10, 5);
1407 uint32_t frac = extract32(f16, 0, 10);
1408
1409 if (exp == 0x1f) {
1410 /* Inf or NaN */
1411 exp = 0xff;
1412 } else if (exp == 0) {
1413 /* Zero or denormal. */
1414 if (frac != 0) {
1415 if (fz16) {
1416 frac = 0;
1417 } else {
1418 /*
1419 * Denormal; these are all normal float32.
1420 * Shift the fraction so that the msb is at bit 11,
1421 * then remove bit 11 as the implicit bit of the
1422 * normalized float32. Note that we still go through
1423 * the shift for normal numbers below, to put the
1424 * float32 fraction at the right place.
1425 */
1426 int shift = clz32(frac) - 21;
1427 frac = (frac << shift) & 0x3ff;
1428 exp = f32_bias - f16_bias - shift + 1;
1429 }
1430 }
1431 } else {
1432 /* Normal number; adjust the bias. */
1433 exp += f32_bias - f16_bias;
1434 }
1435 sign <<= 31;
1436 exp <<= 23;
1437 frac <<= 23 - 10;
1438
1439 return sign | exp | frac;
1440}
1441
1442static uint64_t load4_f16(uint64_t *ptr, int is_q, int is_2)
1443{
1444 /*
1445 * Branchless load of u32[0], u64[0], u32[1], or u64[1].
1446 * Load the 2nd qword iff is_q & is_2.
1447 * Shift to the 2nd dword iff !is_q & is_2.
1448 * For !is_q & !is_2, the upper bits of the result are garbage.
1449 */
1450 return ptr[is_q & is_2] >> ((is_2 & ~is_q) << 5);
1451}
1452
1453/*
1454 * Note that FMLAL requires oprsz == 8 or oprsz == 16,
1455 * as there is not yet SVE versions that might use blocking.
1456 */
1457
1458static void do_fmlal(float32 *d, void *vn, void *vm, float_status *fpst,
1459 uint32_t desc, bool fz16)
1460{
1461 intptr_t i, oprsz = simd_oprsz(desc);
1462 int is_s = extract32(desc, SIMD_DATA_SHIFT, 1);
1463 int is_2 = extract32(desc, SIMD_DATA_SHIFT + 1, 1);
1464 int is_q = oprsz == 16;
1465 uint64_t n_4, m_4;
1466
1467 /* Pre-load all of the f16 data, avoiding overlap issues. */
1468 n_4 = load4_f16(vn, is_q, is_2);
1469 m_4 = load4_f16(vm, is_q, is_2);
1470
1471 /* Negate all inputs for FMLSL at once. */
1472 if (is_s) {
1473 n_4 ^= 0x8000800080008000ull;
1474 }
1475
1476 for (i = 0; i < oprsz / 4; i++) {
1477 float32 n_1 = float16_to_float32_by_bits(n_4 >> (i * 16), fz16);
1478 float32 m_1 = float16_to_float32_by_bits(m_4 >> (i * 16), fz16);
1479 d[H4(i)] = float32_muladd(n_1, m_1, d[H4(i)], 0, fpst);
1480 }
1481 clear_tail(d, oprsz, simd_maxsz(desc));
1482}
1483
1484void HELPER(gvec_fmlal_a32)(void *vd, void *vn, void *vm,
1485 void *venv, uint32_t desc)
1486{
1487 CPUARMState *env = venv;
1488 do_fmlal(vd, vn, vm, &env->vfp.standard_fp_status, desc,
1489 get_flush_inputs_to_zero(&env->vfp.fp_status_f16));
1490}
1491
1492void HELPER(gvec_fmlal_a64)(void *vd, void *vn, void *vm,
1493 void *venv, uint32_t desc)
1494{
1495 CPUARMState *env = venv;
1496 do_fmlal(vd, vn, vm, &env->vfp.fp_status, desc,
1497 get_flush_inputs_to_zero(&env->vfp.fp_status_f16));
1498}
1499
1500static void do_fmlal_idx(float32 *d, void *vn, void *vm, float_status *fpst,
1501 uint32_t desc, bool fz16)
1502{
1503 intptr_t i, oprsz = simd_oprsz(desc);
1504 int is_s = extract32(desc, SIMD_DATA_SHIFT, 1);
1505 int is_2 = extract32(desc, SIMD_DATA_SHIFT + 1, 1);
1506 int index = extract32(desc, SIMD_DATA_SHIFT + 2, 3);
1507 int is_q = oprsz == 16;
1508 uint64_t n_4;
1509 float32 m_1;
1510
1511 /* Pre-load all of the f16 data, avoiding overlap issues. */
1512 n_4 = load4_f16(vn, is_q, is_2);
1513
1514 /* Negate all inputs for FMLSL at once. */
1515 if (is_s) {
1516 n_4 ^= 0x8000800080008000ull;
1517 }
1518
1519 m_1 = float16_to_float32_by_bits(((float16 *)vm)[H2(index)], fz16);
1520
1521 for (i = 0; i < oprsz / 4; i++) {
1522 float32 n_1 = float16_to_float32_by_bits(n_4 >> (i * 16), fz16);
1523 d[H4(i)] = float32_muladd(n_1, m_1, d[H4(i)], 0, fpst);
1524 }
1525 clear_tail(d, oprsz, simd_maxsz(desc));
1526}
1527
1528void HELPER(gvec_fmlal_idx_a32)(void *vd, void *vn, void *vm,
1529 void *venv, uint32_t desc)
1530{
1531 CPUARMState *env = venv;
1532 do_fmlal_idx(vd, vn, vm, &env->vfp.standard_fp_status, desc,
1533 get_flush_inputs_to_zero(&env->vfp.fp_status_f16));
1534}
1535
1536void HELPER(gvec_fmlal_idx_a64)(void *vd, void *vn, void *vm,
1537 void *venv, uint32_t desc)
1538{
1539 CPUARMState *env = venv;
1540 do_fmlal_idx(vd, vn, vm, &env->vfp.fp_status, desc,
1541 get_flush_inputs_to_zero(&env->vfp.fp_status_f16));
1542}
Richard Henderson87b74e82020-02-16 13:42:29 -08001543
1544void HELPER(gvec_sshl_b)(void *vd, void *vn, void *vm, uint32_t desc)
1545{
1546 intptr_t i, opr_sz = simd_oprsz(desc);
1547 int8_t *d = vd, *n = vn, *m = vm;
1548
1549 for (i = 0; i < opr_sz; ++i) {
1550 int8_t mm = m[i];
1551 int8_t nn = n[i];
1552 int8_t res = 0;
1553 if (mm >= 0) {
1554 if (mm < 8) {
1555 res = nn << mm;
1556 }
1557 } else {
1558 res = nn >> (mm > -8 ? -mm : 7);
1559 }
1560 d[i] = res;
1561 }
1562 clear_tail(d, opr_sz, simd_maxsz(desc));
1563}
1564
1565void HELPER(gvec_sshl_h)(void *vd, void *vn, void *vm, uint32_t desc)
1566{
1567 intptr_t i, opr_sz = simd_oprsz(desc);
1568 int16_t *d = vd, *n = vn, *m = vm;
1569
1570 for (i = 0; i < opr_sz / 2; ++i) {
1571 int8_t mm = m[i]; /* only 8 bits of shift are significant */
1572 int16_t nn = n[i];
1573 int16_t res = 0;
1574 if (mm >= 0) {
1575 if (mm < 16) {
1576 res = nn << mm;
1577 }
1578 } else {
1579 res = nn >> (mm > -16 ? -mm : 15);
1580 }
1581 d[i] = res;
1582 }
1583 clear_tail(d, opr_sz, simd_maxsz(desc));
1584}
1585
1586void HELPER(gvec_ushl_b)(void *vd, void *vn, void *vm, uint32_t desc)
1587{
1588 intptr_t i, opr_sz = simd_oprsz(desc);
1589 uint8_t *d = vd, *n = vn, *m = vm;
1590
1591 for (i = 0; i < opr_sz; ++i) {
1592 int8_t mm = m[i];
1593 uint8_t nn = n[i];
1594 uint8_t res = 0;
1595 if (mm >= 0) {
1596 if (mm < 8) {
1597 res = nn << mm;
1598 }
1599 } else {
1600 if (mm > -8) {
1601 res = nn >> -mm;
1602 }
1603 }
1604 d[i] = res;
1605 }
1606 clear_tail(d, opr_sz, simd_maxsz(desc));
1607}
1608
1609void HELPER(gvec_ushl_h)(void *vd, void *vn, void *vm, uint32_t desc)
1610{
1611 intptr_t i, opr_sz = simd_oprsz(desc);
1612 uint16_t *d = vd, *n = vn, *m = vm;
1613
1614 for (i = 0; i < opr_sz / 2; ++i) {
1615 int8_t mm = m[i]; /* only 8 bits of shift are significant */
1616 uint16_t nn = n[i];
1617 uint16_t res = 0;
1618 if (mm >= 0) {
1619 if (mm < 16) {
1620 res = nn << mm;
1621 }
1622 } else {
1623 if (mm > -16) {
1624 res = nn >> -mm;
1625 }
1626 }
1627 d[i] = res;
1628 }
1629 clear_tail(d, opr_sz, simd_maxsz(desc));
1630}
Richard Hendersona21bb782020-02-16 13:42:30 -08001631
1632/*
1633 * 8x8->8 polynomial multiply.
1634 *
1635 * Polynomial multiplication is like integer multiplication except the
1636 * partial products are XORed, not added.
1637 *
1638 * TODO: expose this as a generic vector operation, as it is a common
1639 * crypto building block.
1640 */
1641void HELPER(gvec_pmul_b)(void *vd, void *vn, void *vm, uint32_t desc)
1642{
1643 intptr_t i, j, opr_sz = simd_oprsz(desc);
1644 uint64_t *d = vd, *n = vn, *m = vm;
1645
1646 for (i = 0; i < opr_sz / 8; ++i) {
1647 uint64_t nn = n[i];
1648 uint64_t mm = m[i];
1649 uint64_t rr = 0;
1650
1651 for (j = 0; j < 8; ++j) {
1652 uint64_t mask = (nn & 0x0101010101010101ull) * 0xff;
1653 rr ^= mm & mask;
1654 mm = (mm << 1) & 0xfefefefefefefefeull;
1655 nn >>= 1;
1656 }
1657 d[i] = rr;
1658 }
1659 clear_tail(d, opr_sz, simd_maxsz(desc));
1660}
Richard Hendersonb9ed5102020-02-16 13:42:31 -08001661
1662/*
1663 * 64x64->128 polynomial multiply.
1664 * Because of the lanes are not accessed in strict columns,
1665 * this probably cannot be turned into a generic helper.
1666 */
1667void HELPER(gvec_pmull_q)(void *vd, void *vn, void *vm, uint32_t desc)
1668{
1669 intptr_t i, j, opr_sz = simd_oprsz(desc);
1670 intptr_t hi = simd_data(desc);
1671 uint64_t *d = vd, *n = vn, *m = vm;
1672
1673 for (i = 0; i < opr_sz / 8; i += 2) {
1674 uint64_t nn = n[i + hi];
1675 uint64_t mm = m[i + hi];
1676 uint64_t rhi = 0;
1677 uint64_t rlo = 0;
1678
1679 /* Bit 0 can only influence the low 64-bit result. */
1680 if (nn & 1) {
1681 rlo = mm;
1682 }
1683
1684 for (j = 1; j < 64; ++j) {
1685 uint64_t mask = -((nn >> j) & 1);
1686 rlo ^= (mm << j) & mask;
1687 rhi ^= (mm >> (64 - j)) & mask;
1688 }
1689 d[i] = rlo;
1690 d[i + 1] = rhi;
1691 }
1692 clear_tail(d, opr_sz, simd_maxsz(desc));
1693}
Richard Hendersone7e96fc2020-02-16 13:42:32 -08001694
1695/*
1696 * 8x8->16 polynomial multiply.
1697 *
1698 * The byte inputs are expanded to (or extracted from) half-words.
1699 * Note that neon and sve2 get the inputs from different positions.
1700 * This allows 4 bytes to be processed in parallel with uint64_t.
1701 */
1702
1703static uint64_t expand_byte_to_half(uint64_t x)
1704{
1705 return (x & 0x000000ff)
1706 | ((x & 0x0000ff00) << 8)
1707 | ((x & 0x00ff0000) << 16)
1708 | ((x & 0xff000000) << 24);
1709}
1710
1711static uint64_t pmull_h(uint64_t op1, uint64_t op2)
1712{
1713 uint64_t result = 0;
1714 int i;
1715
1716 for (i = 0; i < 8; ++i) {
1717 uint64_t mask = (op1 & 0x0001000100010001ull) * 0xffff;
1718 result ^= op2 & mask;
1719 op1 >>= 1;
1720 op2 <<= 1;
1721 }
1722 return result;
1723}
1724
1725void HELPER(neon_pmull_h)(void *vd, void *vn, void *vm, uint32_t desc)
1726{
1727 int hi = simd_data(desc);
1728 uint64_t *d = vd, *n = vn, *m = vm;
1729 uint64_t nn = n[hi], mm = m[hi];
1730
1731 d[0] = pmull_h(expand_byte_to_half(nn), expand_byte_to_half(mm));
1732 nn >>= 32;
1733 mm >>= 32;
1734 d[1] = pmull_h(expand_byte_to_half(nn), expand_byte_to_half(mm));
1735
1736 clear_tail(d, 16, simd_maxsz(desc));
1737}
1738
1739#ifdef TARGET_AARCH64
1740void HELPER(sve2_pmull_h)(void *vd, void *vn, void *vm, uint32_t desc)
1741{
1742 int shift = simd_data(desc) * 8;
1743 intptr_t i, opr_sz = simd_oprsz(desc);
1744 uint64_t *d = vd, *n = vn, *m = vm;
1745
1746 for (i = 0; i < opr_sz / 8; ++i) {
1747 uint64_t nn = (n[i] >> shift) & 0x00ff00ff00ff00ffull;
1748 uint64_t mm = (m[i] >> shift) & 0x00ff00ff00ff00ffull;
1749
1750 d[i] = pmull_h(nn, mm);
1751 }
1752}
1753#endif
Richard Henderson6b375d32020-04-18 09:28:08 -07001754
1755#define DO_CMP0(NAME, TYPE, OP) \
1756void HELPER(NAME)(void *vd, void *vn, uint32_t desc) \
1757{ \
1758 intptr_t i, opr_sz = simd_oprsz(desc); \
1759 for (i = 0; i < opr_sz; i += sizeof(TYPE)) { \
1760 TYPE nn = *(TYPE *)(vn + i); \
1761 *(TYPE *)(vd + i) = -(nn OP 0); \
1762 } \
1763 clear_tail(vd, opr_sz, simd_maxsz(desc)); \
1764}
1765
1766DO_CMP0(gvec_ceq0_b, int8_t, ==)
1767DO_CMP0(gvec_clt0_b, int8_t, <)
1768DO_CMP0(gvec_cle0_b, int8_t, <=)
1769DO_CMP0(gvec_cgt0_b, int8_t, >)
1770DO_CMP0(gvec_cge0_b, int8_t, >=)
1771
1772DO_CMP0(gvec_ceq0_h, int16_t, ==)
1773DO_CMP0(gvec_clt0_h, int16_t, <)
1774DO_CMP0(gvec_cle0_h, int16_t, <=)
1775DO_CMP0(gvec_cgt0_h, int16_t, >)
1776DO_CMP0(gvec_cge0_h, int16_t, >=)
1777
1778#undef DO_CMP0
Richard Henderson50c160d2020-05-13 09:32:44 -07001779
1780#define DO_ABD(NAME, TYPE) \
1781void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \
1782{ \
1783 intptr_t i, opr_sz = simd_oprsz(desc); \
1784 TYPE *d = vd, *n = vn, *m = vm; \
1785 \
1786 for (i = 0; i < opr_sz / sizeof(TYPE); ++i) { \
1787 d[i] = n[i] < m[i] ? m[i] - n[i] : n[i] - m[i]; \
1788 } \
1789 clear_tail(d, opr_sz, simd_maxsz(desc)); \
1790}
1791
1792DO_ABD(gvec_sabd_b, int8_t)
1793DO_ABD(gvec_sabd_h, int16_t)
1794DO_ABD(gvec_sabd_s, int32_t)
1795DO_ABD(gvec_sabd_d, int64_t)
1796
1797DO_ABD(gvec_uabd_b, uint8_t)
1798DO_ABD(gvec_uabd_h, uint16_t)
1799DO_ABD(gvec_uabd_s, uint32_t)
1800DO_ABD(gvec_uabd_d, uint64_t)
1801
1802#undef DO_ABD
Richard Hendersoncfdb2c02020-05-13 09:32:45 -07001803
1804#define DO_ABA(NAME, TYPE) \
1805void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \
1806{ \
1807 intptr_t i, opr_sz = simd_oprsz(desc); \
1808 TYPE *d = vd, *n = vn, *m = vm; \
1809 \
1810 for (i = 0; i < opr_sz / sizeof(TYPE); ++i) { \
1811 d[i] += n[i] < m[i] ? m[i] - n[i] : n[i] - m[i]; \
1812 } \
1813 clear_tail(d, opr_sz, simd_maxsz(desc)); \
1814}
1815
1816DO_ABA(gvec_saba_b, int8_t)
1817DO_ABA(gvec_saba_h, int16_t)
1818DO_ABA(gvec_saba_s, int32_t)
1819DO_ABA(gvec_saba_d, int64_t)
1820
1821DO_ABA(gvec_uaba_b, uint8_t)
1822DO_ABA(gvec_uaba_h, uint16_t)
1823DO_ABA(gvec_uaba_s, uint32_t)
1824DO_ABA(gvec_uaba_d, uint64_t)
1825
1826#undef DO_ABA
Peter Maydell1dc587e2020-08-28 19:33:44 +01001827
1828#define DO_NEON_PAIRWISE(NAME, OP) \
1829 void HELPER(NAME##s)(void *vd, void *vn, void *vm, \
1830 void *stat, uint32_t oprsz) \
1831 { \
1832 float_status *fpst = stat; \
1833 float32 *d = vd; \
1834 float32 *n = vn; \
1835 float32 *m = vm; \
1836 float32 r0, r1; \
1837 \
1838 /* Read all inputs before writing outputs in case vm == vd */ \
1839 r0 = float32_##OP(n[H4(0)], n[H4(1)], fpst); \
1840 r1 = float32_##OP(m[H4(0)], m[H4(1)], fpst); \
1841 \
1842 d[H4(0)] = r0; \
1843 d[H4(1)] = r1; \
1844 } \
1845 \
1846 void HELPER(NAME##h)(void *vd, void *vn, void *vm, \
1847 void *stat, uint32_t oprsz) \
1848 { \
1849 float_status *fpst = stat; \
1850 float16 *d = vd; \
1851 float16 *n = vn; \
1852 float16 *m = vm; \
1853 float16 r0, r1, r2, r3; \
1854 \
1855 /* Read all inputs before writing outputs in case vm == vd */ \
1856 r0 = float16_##OP(n[H2(0)], n[H2(1)], fpst); \
1857 r1 = float16_##OP(n[H2(2)], n[H2(3)], fpst); \
1858 r2 = float16_##OP(m[H2(0)], m[H2(1)], fpst); \
1859 r3 = float16_##OP(m[H2(2)], m[H2(3)], fpst); \
1860 \
1861 d[H4(0)] = r0; \
1862 d[H4(1)] = r1; \
1863 d[H4(2)] = r2; \
1864 d[H4(3)] = r3; \
1865 }
1866
1867DO_NEON_PAIRWISE(neon_padd, add)
1868DO_NEON_PAIRWISE(neon_pmax, max)
1869DO_NEON_PAIRWISE(neon_pmin, min)
1870
1871#undef DO_NEON_PAIRWISE
Peter Maydell7b959c52020-08-28 19:33:46 +01001872
1873#define DO_VCVT_FIXED(NAME, FUNC, TYPE) \
1874 void HELPER(NAME)(void *vd, void *vn, void *stat, uint32_t desc) \
1875 { \
1876 intptr_t i, oprsz = simd_oprsz(desc); \
1877 int shift = simd_data(desc); \
1878 TYPE *d = vd, *n = vn; \
1879 float_status *fpst = stat; \
1880 for (i = 0; i < oprsz / sizeof(TYPE); i++) { \
1881 d[i] = FUNC(n[i], shift, fpst); \
1882 } \
1883 clear_tail(d, oprsz, simd_maxsz(desc)); \
1884 }
1885
1886DO_VCVT_FIXED(gvec_vcvt_sf, helper_vfp_sltos, uint32_t)
1887DO_VCVT_FIXED(gvec_vcvt_uf, helper_vfp_ultos, uint32_t)
1888DO_VCVT_FIXED(gvec_vcvt_fs, helper_vfp_tosls_round_to_zero, uint32_t)
1889DO_VCVT_FIXED(gvec_vcvt_fu, helper_vfp_touls_round_to_zero, uint32_t)
Peter Maydell24018cf2020-08-28 19:33:47 +01001890DO_VCVT_FIXED(gvec_vcvt_sh, helper_vfp_shtoh, uint16_t)
1891DO_VCVT_FIXED(gvec_vcvt_uh, helper_vfp_uhtoh, uint16_t)
1892DO_VCVT_FIXED(gvec_vcvt_hs, helper_vfp_toshh_round_to_zero, uint16_t)
1893DO_VCVT_FIXED(gvec_vcvt_hu, helper_vfp_touhh_round_to_zero, uint16_t)
Peter Maydell7b959c52020-08-28 19:33:46 +01001894
1895#undef DO_VCVT_FIXED
Peter Maydellca88a6e2020-08-28 19:33:48 +01001896
1897#define DO_VCVT_RMODE(NAME, FUNC, TYPE) \
1898 void HELPER(NAME)(void *vd, void *vn, void *stat, uint32_t desc) \
1899 { \
1900 float_status *fpst = stat; \
1901 intptr_t i, oprsz = simd_oprsz(desc); \
1902 uint32_t rmode = simd_data(desc); \
1903 uint32_t prev_rmode = get_float_rounding_mode(fpst); \
1904 TYPE *d = vd, *n = vn; \
1905 set_float_rounding_mode(rmode, fpst); \
1906 for (i = 0; i < oprsz / sizeof(TYPE); i++) { \
1907 d[i] = FUNC(n[i], 0, fpst); \
1908 } \
1909 set_float_rounding_mode(prev_rmode, fpst); \
1910 clear_tail(d, oprsz, simd_maxsz(desc)); \
1911 }
1912
1913DO_VCVT_RMODE(gvec_vcvt_rm_ss, helper_vfp_tosls, uint32_t)
1914DO_VCVT_RMODE(gvec_vcvt_rm_us, helper_vfp_touls, uint32_t)
1915DO_VCVT_RMODE(gvec_vcvt_rm_sh, helper_vfp_toshh, uint16_t)
1916DO_VCVT_RMODE(gvec_vcvt_rm_uh, helper_vfp_touhh, uint16_t)
1917
1918#undef DO_VCVT_RMODE
Peter Maydell18725912020-08-28 19:33:49 +01001919
1920#define DO_VRINT_RMODE(NAME, FUNC, TYPE) \
1921 void HELPER(NAME)(void *vd, void *vn, void *stat, uint32_t desc) \
1922 { \
1923 float_status *fpst = stat; \
1924 intptr_t i, oprsz = simd_oprsz(desc); \
1925 uint32_t rmode = simd_data(desc); \
1926 uint32_t prev_rmode = get_float_rounding_mode(fpst); \
1927 TYPE *d = vd, *n = vn; \
1928 set_float_rounding_mode(rmode, fpst); \
1929 for (i = 0; i < oprsz / sizeof(TYPE); i++) { \
1930 d[i] = FUNC(n[i], fpst); \
1931 } \
1932 set_float_rounding_mode(prev_rmode, fpst); \
1933 clear_tail(d, oprsz, simd_maxsz(desc)); \
1934 }
1935
1936DO_VRINT_RMODE(gvec_vrint_rm_h, helper_rinth, uint16_t)
1937DO_VRINT_RMODE(gvec_vrint_rm_s, helper_rints, uint32_t)
1938
1939#undef DO_VRINT_RMODE