2 * ARM NEON vector operations.
4 * Copyright (c) 2007 CodeSourcery.
5 * Written by Paul Brook
7 * This code is licenced under the GPL.
9 /* Note that for NEON an "l" prefix means it is a wide operation, unlike
10 scalar arm ops where it means a word size operation. */
12 /* ??? NEON ops should probably have their own float status. */
13 #define NFS &env->vfp.fp_status
14 #define NEON_OP(name) void OPPROTO op_neon_##name (void)
18 T0 = *(uint32_t *)((char *) env + PARAM1);
23 T1 = *(uint32_t *)((char *) env + PARAM1);
28 T2 = *(uint32_t *)((char *) env + PARAM1);
33 *(uint32_t *)((char *) env + PARAM1) = T0;
38 *(uint32_t *)((char *) env + PARAM1) = T1;
43 *(uint32_t *)((char *) env + PARAM1) = T2;
46 #define NEON_TYPE1(name, type) \
51 #ifdef WORDS_BIGENDIAN
52 #define NEON_TYPE2(name, type) \
58 #define NEON_TYPE4(name, type) \
67 #define NEON_TYPE2(name, type) \
73 #define NEON_TYPE4(name, type) \
83 NEON_TYPE4(s8, int8_t)
84 NEON_TYPE4(u8, uint8_t)
85 NEON_TYPE2(s16, int16_t)
86 NEON_TYPE2(u16, uint16_t)
87 NEON_TYPE1(s32, int32_t)
88 NEON_TYPE1(u32, uint32_t)
93 /* Copy from a uint32_t to a vector structure type. */
94 #define NEON_UNPACK(vtype, dest, val) do { \
103 /* Copy from a vector structure type to a uint32_t. */
104 #define NEON_PACK(vtype, dest, val) do { \
114 NEON_FN(vdest.v1, vsrc1.v1, vsrc2.v1);
116 NEON_FN(vdest.v1, vsrc1.v1, vsrc2.v1); \
117 NEON_FN(vdest.v2, vsrc1.v2, vsrc2.v2);
119 NEON_FN(vdest.v1, vsrc1.v1, vsrc2.v1); \
120 NEON_FN(vdest.v2, vsrc1.v2, vsrc2.v2); \
121 NEON_FN(vdest.v3, vsrc1.v3, vsrc2.v3); \
122 NEON_FN(vdest.v4, vsrc1.v4, vsrc2.v4);
124 #define NEON_VOP(name, vtype, n) \
130 NEON_UNPACK(vtype, vsrc1, T0); \
131 NEON_UNPACK(vtype, vsrc2, T1); \
133 NEON_PACK(vtype, T0, vdest); \
137 #define NEON_VOP1(name, vtype, n) \
142 NEON_UNPACK(vtype, vsrc1, T0); \
144 NEON_PACK(vtype, T0, vdest); \
148 /* Pairwise operations. */
149 /* For 32-bit elements each segment only contains a single element, so
150 the elementwise and pairwise operations are the same. */
152 NEON_FN(vdest.v1, vsrc1.v1, vsrc1.v2); \
153 NEON_FN(vdest.v2, vsrc2.v1, vsrc2.v2);
155 NEON_FN(vdest.v1, vsrc1.v1, vsrc1.v2); \
156 NEON_FN(vdest.v2, vsrc1.v3, vsrc1.v4); \
157 NEON_FN(vdest.v3, vsrc2.v1, vsrc2.v2); \
158 NEON_FN(vdest.v4, vsrc2.v3, vsrc2.v4); \
160 #define NEON_POP(name, vtype, n) \
166 NEON_UNPACK(vtype, vsrc1, T0); \
167 NEON_UNPACK(vtype, vsrc2, T1); \
169 NEON_PACK(vtype, T0, vdest); \
173 #define NEON_FN(dest, src1, src2) dest = (src1 + src2) >> 1
174 NEON_VOP(hadd_s8, neon_s8, 4)
175 NEON_VOP(hadd_u8, neon_u8, 4)
176 NEON_VOP(hadd_s16, neon_s16, 2)
177 NEON_VOP(hadd_u16, neon_u16, 2)
186 dest = (src1 >> 1) + (src2 >> 1);
199 dest = (src1 >> 1) + (src2 >> 1);
206 #define NEON_FN(dest, src1, src2) dest = (src1 + src2 + 1) >> 1
207 NEON_VOP(rhadd_s8, neon_s8, 4)
208 NEON_VOP(rhadd_u8, neon_u8, 4)
209 NEON_VOP(rhadd_s16, neon_s16, 2)
210 NEON_VOP(rhadd_u16, neon_u16, 2)
219 dest = (src1 >> 1) + (src2 >> 1);
220 if ((src1 | src2) & 1)
232 dest = (src1 >> 1) + (src2 >> 1);
233 if ((src1 | src2) & 1)
239 #define NEON_FN(dest, src1, src2) dest = (src1 - src2) >> 1
240 NEON_VOP(hsub_s8, neon_s8, 4)
241 NEON_VOP(hsub_u8, neon_u8, 4)
242 NEON_VOP(hsub_s16, neon_s16, 2)
243 NEON_VOP(hsub_u16, neon_u16, 2)
252 dest = (src1 >> 1) - (src2 >> 1);
253 if ((~src1) & src2 & 1)
265 dest = (src1 >> 1) - (src2 >> 1);
266 if ((~src1) & src2 & 1)
272 /* ??? bsl, bif and bit are all the same op, just with the oparands in a
273 differnet order. It's currently easier to have 3 differnt ops than
274 rearange the operands. */
276 /* Bitwise Select. */
279 T0 = (T0 & T2) | (T1 & ~T2);
282 /* Bitwise Insert If True. */
285 T0 = (T0 & T1) | (T2 & ~T1);
288 /* Bitwise Insert If False. */
291 T0 = (T2 & T1) | (T0 & ~T1);
294 #define NEON_USAT(dest, src1, src2, type) do { \
295 uint32_t tmp = (uint32_t)src1 + (uint32_t)src2; \
296 if (tmp != (type)tmp) { \
302 #define NEON_FN(dest, src1, src2) NEON_USAT(dest, src1, src2, uint8_t)
303 NEON_VOP(qadd_u8, neon_u8, 4)
305 #define NEON_FN(dest, src1, src2) NEON_USAT(dest, src1, src2, uint16_t)
306 NEON_VOP(qadd_u16, neon_u16, 2)
310 #define NEON_SSAT(dest, src1, src2, type) do { \
311 int32_t tmp = (uint32_t)src1 + (uint32_t)src2; \
312 if (tmp != (type)tmp) { \
315 tmp = (1 << (sizeof(type) * 8 - 1)) - 1; \
317 tmp = 1 << (sizeof(type) * 8 - 1); \
322 #define NEON_FN(dest, src1, src2) NEON_SSAT(dest, src1, src2, int8_t)
323 NEON_VOP(qadd_s8, neon_s8, 4)
325 #define NEON_FN(dest, src1, src2) NEON_SSAT(dest, src1, src2, int16_t)
326 NEON_VOP(qadd_s16, neon_s16, 2)
330 #define NEON_USAT(dest, src1, src2, type) do { \
331 uint32_t tmp = (uint32_t)src1 - (uint32_t)src2; \
332 if (tmp != (type)tmp) { \
338 #define NEON_FN(dest, src1, src2) NEON_USAT(dest, src1, src2, uint8_t)
339 NEON_VOP(qsub_u8, neon_u8, 4)
341 #define NEON_FN(dest, src1, src2) NEON_USAT(dest, src1, src2, uint16_t)
342 NEON_VOP(qsub_u16, neon_u16, 2)
346 #define NEON_SSAT(dest, src1, src2, type) do { \
347 int32_t tmp = (uint32_t)src1 - (uint32_t)src2; \
348 if (tmp != (type)tmp) { \
351 tmp = (1 << (sizeof(type) * 8 - 1)) - 1; \
353 tmp = 1 << (sizeof(type) * 8 - 1); \
358 #define NEON_FN(dest, src1, src2) NEON_SSAT(dest, src1, src2, int8_t)
359 NEON_VOP(qsub_s8, neon_s8, 4)
361 #define NEON_FN(dest, src1, src2) NEON_SSAT(dest, src1, src2, int16_t)
362 NEON_VOP(qsub_s16, neon_s16, 2)
366 #define NEON_FN(dest, src1, src2) dest = (src1 > src2) ? ~0 : 0
367 NEON_VOP(cgt_s8, neon_s8, 4)
368 NEON_VOP(cgt_u8, neon_u8, 4)
369 NEON_VOP(cgt_s16, neon_s16, 2)
370 NEON_VOP(cgt_u16, neon_u16, 2)
371 NEON_VOP(cgt_s32, neon_s32, 1)
372 NEON_VOP(cgt_u32, neon_u32, 1)
375 #define NEON_FN(dest, src1, src2) dest = (src1 >= src2) ? ~0 : 0
376 NEON_VOP(cge_s8, neon_s8, 4)
377 NEON_VOP(cge_u8, neon_u8, 4)
378 NEON_VOP(cge_s16, neon_s16, 2)
379 NEON_VOP(cge_u16, neon_u16, 2)
380 NEON_VOP(cge_s32, neon_s32, 1)
381 NEON_VOP(cge_u32, neon_u32, 1)
384 #define NEON_FN(dest, src1, src2) do { \
386 tmp = (int8_t)src2; \
388 dest = src1 >> -tmp; \
390 dest = src1 << tmp; \
392 NEON_VOP(shl_s8, neon_s8, 4)
393 NEON_VOP(shl_u8, neon_u8, 4)
394 NEON_VOP(shl_s16, neon_s16, 2)
395 NEON_VOP(shl_u16, neon_u16, 2)
396 NEON_VOP(shl_s32, neon_s32, 1)
397 NEON_VOP(shl_u32, neon_u32, 1)
403 uint64_t val = T0 | ((uint64_t)T1 << 32);
417 int64_t val = T0 | ((uint64_t)T1 << 32);
428 #define NEON_FN(dest, src1, src2) do { \
430 tmp = (int8_t)src1; \
432 dest = (src2 + (1 << (-1 - tmp))) >> -tmp; \
434 dest = src2 << tmp; \
437 NEON_VOP(rshl_s8, neon_s8, 4)
438 NEON_VOP(rshl_u8, neon_u8, 4)
439 NEON_VOP(rshl_s16, neon_s16, 2)
440 NEON_VOP(rshl_u16, neon_u16, 2)
441 NEON_VOP(rshl_s32, neon_s32, 1)
442 NEON_VOP(rshl_u32, neon_u32, 1)
448 uint64_t val = T0 | ((uint64_t)T1 << 32);
450 val = (val + ((uint64_t)1 << (-1 - shift))) >> -shift;
463 int64_t val = T0 | ((uint64_t)T1 << 32);
465 val = (val + ((int64_t)1 << (-1 - shift))) >> -shift;
474 #define NEON_FN(dest, src1, src2) do { \
476 tmp = (int8_t)src1; \
478 dest = src2 >> -tmp; \
480 dest = src2 << tmp; \
481 if ((dest >> tmp) != src2) { \
486 NEON_VOP(qshl_s8, neon_s8, 4)
487 NEON_VOP(qshl_s16, neon_s16, 2)
488 NEON_VOP(qshl_s32, neon_s32, 1)
494 int64_t val = T0 | ((uint64_t)T1 << 32);
500 if ((val >> shift) != tmp) {
502 val = (tmp >> 63) ^ 0x7fffffffffffffffULL;
510 #define NEON_FN(dest, src1, src2) do { \
512 tmp = (int8_t)src1; \
514 dest = src2 >> -tmp; \
516 dest = src2 << tmp; \
517 if ((dest >> tmp) != src2) { \
522 NEON_VOP(qshl_u8, neon_u8, 4)
523 NEON_VOP(qshl_u16, neon_u16, 2)
524 NEON_VOP(qshl_u32, neon_u32, 1)
530 uint64_t val = T0 | ((uint64_t)T1 << 32);
536 if ((val >> shift) != tmp) {
546 #define NEON_FN(dest, src1, src2) do { \
548 tmp = (int8_t)src1; \
550 dest = (src2 + (1 << (-1 - tmp))) >> -tmp; \
552 dest = src2 << tmp; \
553 if ((dest >> tmp) != src2) { \
557 NEON_VOP(qrshl_s8, neon_s8, 4)
558 NEON_VOP(qrshl_s16, neon_s16, 2)
559 NEON_VOP(qrshl_s32, neon_s32, 1)
562 #define NEON_FN(dest, src1, src2) do { \
564 tmp = (int8_t)src1; \
566 dest = (src2 + (1 << (-1 - tmp))) >> -tmp; \
568 dest = src2 << tmp; \
569 if ((dest >> tmp) != src2) { \
574 NEON_VOP(qrshl_u8, neon_u8, 4)
575 NEON_VOP(qrshl_u16, neon_u16, 2)
576 NEON_VOP(qrshl_u32, neon_u32, 1)
579 #define NEON_FN(dest, src1, src2) dest = (src1 > src2) ? src1 : src2
580 NEON_VOP(max_s8, neon_s8, 4)
581 NEON_VOP(max_u8, neon_u8, 4)
582 NEON_VOP(max_s16, neon_s16, 2)
583 NEON_VOP(max_u16, neon_u16, 2)
584 NEON_VOP(max_s32, neon_s32, 1)
585 NEON_VOP(max_u32, neon_u32, 1)
586 NEON_POP(pmax_s8, neon_s8, 4)
587 NEON_POP(pmax_u8, neon_u8, 4)
588 NEON_POP(pmax_s16, neon_s16, 2)
589 NEON_POP(pmax_u16, neon_u16, 2)
594 float32 f0 = vfp_itos(T0);
595 float32 f1 = vfp_itos(T1);
596 T0 = (float32_compare_quiet(f0, f1, NFS) == 1) ? T0 : T1;
600 #define NEON_FN(dest, src1, src2) dest = (src1 < src2) ? src1 : src2
601 NEON_VOP(min_s8, neon_s8, 4)
602 NEON_VOP(min_u8, neon_u8, 4)
603 NEON_VOP(min_s16, neon_s16, 2)
604 NEON_VOP(min_u16, neon_u16, 2)
605 NEON_VOP(min_s32, neon_s32, 1)
606 NEON_VOP(min_u32, neon_u32, 1)
607 NEON_POP(pmin_s8, neon_s8, 4)
608 NEON_POP(pmin_u8, neon_u8, 4)
609 NEON_POP(pmin_s16, neon_s16, 2)
610 NEON_POP(pmin_u16, neon_u16, 2)
615 float32 f0 = vfp_itos(T0);
616 float32 f1 = vfp_itos(T1);
617 T0 = (float32_compare_quiet(f0, f1, NFS) == -1) ? T0 : T1;
621 #define NEON_FN(dest, src1, src2) \
622 dest = (src1 > src2) ? (src1 - src2) : (src2 - src1)
623 NEON_VOP(abd_s8, neon_s8, 4)
624 NEON_VOP(abd_u8, neon_u8, 4)
625 NEON_VOP(abd_s16, neon_s16, 2)
626 NEON_VOP(abd_u16, neon_u16, 2)
627 NEON_VOP(abd_s32, neon_s32, 1)
628 NEON_VOP(abd_u32, neon_u32, 1)
633 float32 f0 = vfp_itos(T0);
634 float32 f1 = vfp_itos(T1);
635 T0 = vfp_stoi((float32_compare_quiet(f0, f1, NFS) == 1)
636 ? float32_sub(f0, f1, NFS)
637 : float32_sub(f1, f0, NFS));
641 #define NEON_FN(dest, src1, src2) dest = src1 + src2
642 NEON_VOP(add_u8, neon_u8, 4)
643 NEON_VOP(add_u16, neon_u16, 2)
644 NEON_POP(padd_u8, neon_u8, 4)
645 NEON_POP(padd_u16, neon_u16, 2)
650 T0 = vfp_stoi(float32_add(vfp_itos(T0), vfp_itos(T1), NFS));
654 #define NEON_FN(dest, src1, src2) dest = src1 - src2
655 NEON_VOP(sub_u8, neon_u8, 4)
656 NEON_VOP(sub_u16, neon_u16, 2)
661 T0 = vfp_stoi(float32_sub(vfp_itos(T0), vfp_itos(T1), NFS));
665 #define NEON_FN(dest, src1, src2) dest = src2 - src1
666 NEON_VOP(rsb_u8, neon_u8, 4)
667 NEON_VOP(rsb_u16, neon_u16, 2)
672 T0 = vfp_stoi(float32_sub(vfp_itos(T1), vfp_itos(T0), NFS));
676 #define NEON_FN(dest, src1, src2) dest = src1 * src2
677 NEON_VOP(mul_u8, neon_u8, 4)
678 NEON_VOP(mul_u16, neon_u16, 2)
683 T0 = vfp_stoi(float32_mul(vfp_itos(T0), vfp_itos(T1), NFS));
689 T0 = helper_neon_mul_p8(T0, T1);
692 #define NEON_FN(dest, src1, src2) dest = (src1 & src2) ? -1 : 0
693 NEON_VOP(tst_u8, neon_u8, 4)
694 NEON_VOP(tst_u16, neon_u16, 2)
695 NEON_VOP(tst_u32, neon_u32, 1)
698 #define NEON_FN(dest, src1, src2) dest = (src1 == src2) ? -1 : 0
699 NEON_VOP(ceq_u8, neon_u8, 4)
700 NEON_VOP(ceq_u16, neon_u16, 2)
701 NEON_VOP(ceq_u32, neon_u32, 1)
704 #define NEON_QDMULH16(dest, src1, src2, round) do { \
705 uint32_t tmp = (int32_t)(int16_t) src1 * (int16_t) src2; \
706 if ((tmp ^ (tmp << 1)) & SIGNBIT) { \
708 tmp = (tmp >> 31) ^ ~SIGNBIT; \
714 if ((int32_t)tmp < old) { \
721 #define NEON_FN(dest, src1, src2) NEON_QDMULH16(dest, src1, src2, 0)
722 NEON_VOP(qdmulh_s16, neon_s16, 2)
724 #define NEON_FN(dest, src1, src2) NEON_QDMULH16(dest, src1, src2, 1)
725 NEON_VOP(qrdmulh_s16, neon_s16, 2)
729 #define SIGNBIT64 ((uint64_t)1 << 63)
730 #define NEON_QDMULH32(dest, src1, src2, round) do { \
731 uint64_t tmp = (int64_t)(int32_t) src1 * (int32_t) src2; \
732 if ((tmp ^ (tmp << 1)) & SIGNBIT64) { \
734 tmp = (tmp >> 63) ^ ~SIGNBIT64; \
740 tmp += (int64_t)1 << 31; \
741 if ((int64_t)tmp < old) { \
743 tmp = SIGNBIT64 - 1; \
748 #define NEON_FN(dest, src1, src2) NEON_QDMULH32(dest, src1, src2, 0)
749 NEON_VOP(qdmulh_s32, neon_s32, 1)
751 #define NEON_FN(dest, src1, src2) NEON_QDMULH32(dest, src1, src2, 1)
752 NEON_VOP(qrdmulh_s32, neon_s32, 1)
758 T0 = vfp_stoi(helper_recps_f32(vfp_itos(T0), vfp_itos(T1)));
764 T0 = vfp_stoi(helper_rsqrts_f32(vfp_itos(T0), vfp_itos(T1)));
768 /* Floating point comparisons produce an integer result. */
769 #define NEON_VOP_FCMP(name, cmp) \
772 if (float32_compare_quiet(vfp_itos(T0), vfp_itos(T1), NFS) cmp 0) \
779 NEON_VOP_FCMP(ceq_f32, ==)
780 NEON_VOP_FCMP(cge_f32, >=)
781 NEON_VOP_FCMP(cgt_f32, >)
785 float32 f0 = float32_abs(vfp_itos(T0));
786 float32 f1 = float32_abs(vfp_itos(T1));
787 T0 = (float32_compare_quiet(f0, f1,NFS) >= 0) ? -1 : 0;
793 float32 f0 = float32_abs(vfp_itos(T0));
794 float32 f1 = float32_abs(vfp_itos(T1));
795 T0 = (float32_compare_quiet(f0, f1, NFS) > 0) ? -1 : 0;
799 /* Narrowing instructions. The named type is the destination type. */
802 T0 = (T0 & 0xff) | ((T0 >> 8) & 0xff00)
803 | ((T1 << 16) & 0xff0000) | (T1 << 24);
807 NEON_OP(narrow_sat_u8)
819 NEON_UNPACK(neon_u16, src, T0);
820 SAT8(dest.v1, src.v1);
821 SAT8(dest.v2, src.v2);
822 NEON_UNPACK(neon_u16, src, T1);
823 SAT8(dest.v3, src.v1);
824 SAT8(dest.v4, src.v2);
825 NEON_PACK(neon_u8, T0, dest);
830 NEON_OP(narrow_sat_s8)
835 if (s != (uint8_t)s) { \
836 d = (s >> 15) ^ 0x7f; \
842 NEON_UNPACK(neon_s16, src, T0);
843 SAT8(dest.v1, src.v1);
844 SAT8(dest.v2, src.v2);
845 NEON_UNPACK(neon_s16, src, T1);
846 SAT8(dest.v3, src.v1);
847 SAT8(dest.v4, src.v2);
848 NEON_PACK(neon_s8, T0, dest);
855 T0 = (T0 & 0xffff) | (T1 << 16);
858 NEON_OP(narrow_sat_u16)
872 NEON_OP(narrow_sat_s16)
874 if ((int32_t)T0 != (int16_t)T0) {
875 T0 = ((int32_t)T0 >> 31) ^ 0x7fff;
878 if ((int32_t)T1 != (int16_t) T1) {
879 T1 = ((int32_t)T1 >> 31) ^ 0x7fff;
882 T0 = (uint16_t)T0 | (T1 << 16);
886 NEON_OP(narrow_sat_u32)
895 NEON_OP(narrow_sat_s32)
897 int32_t sign = (int32_t)T1 >> 31;
899 if ((int32_t)T1 != sign) {
900 T0 = sign ^ 0x7fffffff;
906 /* Narrowing instructions. Named type is the narrow type. */
907 NEON_OP(narrow_high_u8)
909 T0 = ((T0 >> 8) & 0xff) | ((T0 >> 16) & 0xff00)
910 | ((T1 << 8) & 0xff0000) | (T1 & 0xff000000);
914 NEON_OP(narrow_high_u16)
916 T0 = (T0 >> 16) | (T1 & 0xffff0000);
920 NEON_OP(narrow_high_round_u8)
922 T0 = (((T0 + 0x80) >> 8) & 0xff) | (((T0 + 0x800000) >> 16) & 0xff00)
923 | (((T1 + 0x80) << 8) & 0xff0000) | ((T1 + 0x800000) & 0xff000000);
927 NEON_OP(narrow_high_round_u16)
929 T0 = ((T0 + 0x8000) >> 16) | ((T1 + 0x8000) & 0xffff0000);
933 NEON_OP(narrow_high_round_u32)
935 if (T0 >= 0x80000000u)
942 /* Widening instructions. Named type is source type. */
948 T0 = (uint16_t)(int8_t)src | ((int8_t)(src >> 8) << 16);
949 T1 = (uint16_t)(int8_t)(src >> 16) | ((int8_t)(src >> 24) << 16);
954 T1 = ((T0 >> 8) & 0xff0000) | ((T0 >> 16) & 0xff);
955 T0 = ((T0 << 8) & 0xff0000) | (T0 & 0xff);
975 T1 = (int32_t)T0 >> 31;
979 NEON_OP(widen_high_u8)
981 T1 = (T0 & 0xff000000) | ((T0 >> 8) & 0xff00);
982 T0 = ((T0 << 16) & 0xff000000) | ((T0 << 8) & 0xff00);
985 NEON_OP(widen_high_u16)
987 T1 = T0 & 0xffff0000;
991 /* Long operations. The type is the wide type. */
997 mask = 0xffff >> (16 - shift);
1001 T0 = (T0 << shift) & mask;
1002 T1 = (T1 << shift) & mask;
1011 T1 |= T0 >> (32 - shift);
1021 tmp = env->vfp.scratch[0];
1022 high = (T0 >> 16) + (tmp >> 16);
1023 T0 = (uint16_t)(T0 + tmp);
1025 tmp = env->vfp.scratch[1];
1026 high = (T1 >> 16) + (tmp >> 16);
1027 T1 = (uint16_t)(T1 + tmp);
1034 T0 += env->vfp.scratch[0];
1035 T1 += env->vfp.scratch[1];
1042 tmp = T0 | ((uint64_t)T1 << 32);
1043 tmp += env->vfp.scratch[0];
1044 tmp += (uint64_t)env->vfp.scratch[1] << 32;
1055 tmp = env->vfp.scratch[0];
1056 high = (T0 >> 16) - (tmp >> 16);
1057 T0 = (uint16_t)(T0 - tmp);
1059 tmp = env->vfp.scratch[1];
1060 high = (T1 >> 16) - (tmp >> 16);
1061 T1 = (uint16_t)(T1 - tmp);
1068 T0 -= env->vfp.scratch[0];
1069 T1 -= env->vfp.scratch[1];
1076 tmp = T0 | ((uint64_t)T1 << 32);
1077 tmp -= env->vfp.scratch[0];
1078 tmp -= (uint64_t)env->vfp.scratch[1] << 32;
1084 #define DO_ABD(dest, x, y, type) do { \
1087 dest = ((tmp_x > tmp_y) ? tmp_x - tmp_y : tmp_y - tmp_x); \
1096 DO_ABD(low, T0, T1, uint8_t);
1097 DO_ABD(tmp, T0 >> 8, T1 >> 8, uint8_t);
1099 DO_ABD(high, T0 >> 16, T1 >> 16, uint8_t);
1100 DO_ABD(tmp, T0 >> 24, T1 >> 24, uint8_t);
1113 DO_ABD(low, T0, T1, int8_t);
1114 DO_ABD(tmp, T0 >> 8, T1 >> 8, int8_t);
1116 DO_ABD(high, T0 >> 16, T1 >> 16, int8_t);
1117 DO_ABD(tmp, T0 >> 24, T1 >> 24, int8_t);
1129 DO_ABD(low, T0, T1, uint16_t);
1130 DO_ABD(high, T0 >> 16, T1 >> 16, uint16_t);
1141 DO_ABD(low, T0, T1, int16_t);
1142 DO_ABD(high, T0 >> 16, T1 >> 16, int16_t);
1150 DO_ABD(T0, T0, T1, uint32_t);
1156 DO_ABD(T0, T0, T1, int32_t);
1161 /* Widening multiple. Named type is the source type. */
1162 #define DO_MULL(dest, x, y, type1, type2) do { \
1165 dest = (type2)((type2)tmp_x * (type2)tmp_y); \
1174 DO_MULL(low, T0, T1, uint8_t, uint16_t);
1175 DO_MULL(tmp, T0 >> 8, T1 >> 8, uint8_t, uint16_t);
1177 DO_MULL(high, T0 >> 16, T1 >> 16, uint8_t, uint16_t);
1178 DO_MULL(tmp, T0 >> 24, T1 >> 24, uint8_t, uint16_t);
1191 DO_MULL(low, T0, T1, int8_t, uint16_t);
1192 DO_MULL(tmp, T0 >> 8, T1 >> 8, int8_t, uint16_t);
1194 DO_MULL(high, T0 >> 16, T1 >> 16, int8_t, uint16_t);
1195 DO_MULL(tmp, T0 >> 24, T1 >> 24, int8_t, uint16_t);
1207 DO_MULL(low, T0, T1, uint16_t, uint32_t);
1208 DO_MULL(high, T0 >> 16, T1 >> 16, uint16_t, uint32_t);
1219 DO_MULL(low, T0, T1, int16_t, uint32_t);
1220 DO_MULL(high, T0 >> 16, T1 >> 16, int16_t, uint32_t);
1226 NEON_OP(addl_saturate_s32)
1231 tmp = env->vfp.scratch[0];
1233 if (((res ^ T0) & SIGNBIT) && !((T0 ^ tmp) & SIGNBIT)) {
1235 T0 = (T0 >> 31) ^ 0x7fffffff;
1239 tmp = env->vfp.scratch[1];
1241 if (((res ^ T1) & SIGNBIT) && !((T1 ^ tmp) & SIGNBIT)) {
1243 T1 = (T1 >> 31) ^ 0x7fffffff;
1250 NEON_OP(addl_saturate_s64)
1256 src1 = T0 + ((uint64_t)T1 << 32);
1257 src2 = env->vfp.scratch[0] + ((uint64_t)env->vfp.scratch[1] << 32);
1259 if (((res ^ src1) & SIGNBIT64) && !((src1 ^ src2) & SIGNBIT64)) {
1261 T0 = ~(int64_t)src1 >> 63;
1262 T1 = T0 ^ 0x80000000;
1270 NEON_OP(addl_saturate_u64)
1276 src1 = T0 + ((uint64_t)T1 << 32);
1277 src2 = env->vfp.scratch[0] + ((uint64_t)env->vfp.scratch[1] << 32);
1290 NEON_OP(subl_saturate_s64)
1296 src1 = T0 + ((uint64_t)T1 << 32);
1297 src2 = env->vfp.scratch[0] + ((uint64_t)env->vfp.scratch[1] << 32);
1299 if (((res ^ src1) & SIGNBIT64) && ((src1 ^ src2) & SIGNBIT64)) {
1301 T0 = ~(int64_t)src1 >> 63;
1302 T1 = T0 ^ 0x80000000;
1310 NEON_OP(subl_saturate_u64)
1316 src1 = T0 + ((uint64_t)T1 << 32);
1317 src2 = env->vfp.scratch[0] + ((uint64_t)env->vfp.scratch[1] << 32);
1335 T0 = (-T0 & 0xffff) | (tmp << 16);
1338 T1 = (-T1 & 0xffff) | (tmp << 16);
1353 val = T0 | ((uint64_t)T1 << 32);
1360 /* Scalar operations. */
1363 T0 = (T0 & 0xffff) | (T0 << 16);
1369 T0 = (T0 >> 16) | (T0 & 0xffff0000);
1373 /* Helper for VEXT */
1377 T0 = (T0 >> shift) | (T1 << (32 - shift));
1381 /* Pairwise add long. Named type is source type. */
1389 result = (uint16_t)src1 + src2;
1392 T0 = (uint16_t)((uint16_t)src1 + src2) | ((uint32_t)result << 16);
1403 result = (uint16_t)src1 + src2;
1406 T0 = (uint16_t)((uint16_t)src1 + src2) | ((uint32_t)result << 16);
1412 T0 = (uint32_t)(int16_t)T0 + (uint32_t)(int16_t)(T0 >> 16);
1418 T0 = (uint32_t)(uint16_t)T0 + (uint32_t)(uint16_t)(T0 >> 16);
1425 tmp = (int64_t)(int32_t)T0 + (int64_t)(int32_t)T1;
1434 tmp = (uint64_t)T0 + (uint64_t)T1;
1440 /* Count Leading Sign/Zero Bits. */
1441 static inline int do_clz8(uint8_t x)
1449 static inline int do_clz16(uint16_t x)
1452 for (n = 16; x; n--)
1463 result = do_clz8(tmp);
1464 result |= do_clz8(tmp >> 8) << 8;
1465 result |= do_clz8(tmp >> 16) << 16;
1466 result |= do_clz8(tmp >> 24) << 24;
1476 result = do_clz16(tmp);
1477 result |= do_clz16(tmp >> 16) << 16;
1487 result = do_clz8((tmp < 0) ? ~tmp : tmp) - 1;
1489 result |= (do_clz8((tmp < 0) ? ~tmp : tmp) - 1) << 8;
1491 result |= (do_clz8((tmp < 0) ? ~tmp : tmp) - 1) << 16;
1493 result |= (do_clz8((tmp < 0) ? ~tmp : tmp) - 1) << 24;
1503 result = do_clz16((tmp < 0) ? ~tmp : tmp) - 1;
1505 result |= (do_clz16((tmp < 0) ? ~tmp : tmp) - 1) << 16;
1513 if ((int32_t)T0 < 0)
1515 for (count = 32; T0 > 0; count--)
1524 T0 = (T0 & 0x55555555) + ((T0 >> 1) & 0x55555555);
1525 T0 = (T0 & 0x33333333) + ((T0 >> 2) & 0x33333333);
1526 T0 = (T0 & 0x0f0f0f0f) + ((T0 >> 4) & 0x0f0f0f0f);
1530 /* Saturnating negation. */
1531 /* ??? Make these use NEON_VOP1 */
1532 #define DO_QABS8(x) do { \
1533 if (x == (int8_t)0x80) { \
1536 } else if (x < 0) { \
1542 NEON_UNPACK(neon_s8, vec, T0);
1547 NEON_PACK(neon_s8, T0, vec);
1552 #define DO_QNEG8(x) do { \
1553 if (x == (int8_t)0x80) { \
1562 NEON_UNPACK(neon_s8, vec, T0);
1567 NEON_PACK(neon_s8, T0, vec);
1572 #define DO_QABS16(x) do { \
1573 if (x == (int16_t)0x8000) { \
1576 } else if (x < 0) { \
1582 NEON_UNPACK(neon_s16, vec, T0);
1585 NEON_PACK(neon_s16, T0, vec);
1590 #define DO_QNEG16(x) do { \
1591 if (x == (int16_t)0x8000) { \
1600 NEON_UNPACK(neon_s16, vec, T0);
1603 NEON_PACK(neon_s16, T0, vec);
1610 if (T0 == 0x80000000) {
1613 } else if ((int32_t)T0 < 0) {
1621 if (T0 == 0x80000000) {
1630 /* Unary opperations */
1631 #define NEON_FN(dest, src, dummy) dest = (src < 0) ? -src : src
1632 NEON_VOP1(abs_s8, neon_s8, 4)
1633 NEON_VOP1(abs_s16, neon_s16, 2)
1636 if ((int32_t)T0 < 0)
1642 /* Transpose. Argument order is rather strange to avoid special casing
1643 the tranlation code.
1644 On input T0 = rm, T1 = rd. On output T0 = rd, T1 = rm */
1649 rd = ((T0 & 0x00ff00ff) << 8) | (T1 & 0x00ff00ff);
1650 rm = ((T1 & 0xff00ff00) >> 8) | (T0 & 0xff00ff00);
1660 rd = (T0 << 16) | (T1 & 0xffff);
1661 rm = (T1 >> 16) | (T0 & 0xffff0000);
1667 /* Worker routines for zip and unzip. */
1672 rd = (T0 & 0xff) | ((T0 >> 8) & 0xff00)
1673 | ((T1 << 16) & 0xff0000) | ((T1 << 8) & 0xff000000);
1674 rm = ((T0 >> 8) & 0xff) | ((T0 >> 16) & 0xff00)
1675 | ((T1 << 8) & 0xff0000) | (T1 & 0xff000000);
1685 rd = (T0 & 0xff) | ((T1 << 8) & 0xff00)
1686 | ((T0 << 16) & 0xff0000) | ((T1 << 24) & 0xff000000);
1687 rm = ((T0 >> 16) & 0xff) | ((T1 >> 8) & 0xff00)
1688 | ((T0 >> 8) & 0xff0000) | (T1 & 0xff000000);
1698 tmp = (T0 & 0xffff) | (T1 << 16);
1699 T1 = (T1 & 0xffff0000) | (T0 >> 16);
1704 /* Reciprocal/root estimate. */
1707 T0 = helper_recpe_u32(T0);
1712 T0 = helper_rsqrte_u32(T0);
1717 FT0s = helper_recpe_f32(FT0s);
1722 FT0s = helper_rsqrte_f32(FT0s);
1725 /* Table lookup. This accessed the register file directly. */
1728 helper_neon_tbl(PARAM1, PARAM2);
1733 T0 = (T0 >> PARAM1) & 0xff;
1739 /* Helpers for element load/store. */
1743 uint32_t mask = PARAM2;
1744 T2 = (T2 & mask) | (T0 << shift);
1748 NEON_OP(extract_elt)
1751 uint32_t mask = PARAM2;
1752 T0 = (T2 & mask) >> shift;