2 * PowerPC emulation helpers for qemu.
4 * Copyright (c) 2003-2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
22 #include "host-utils.h"
25 #include "helper_regs.h"
28 //#define DEBUG_EXCEPTIONS
29 //#define DEBUG_SOFTWARE_TLB
31 #ifdef DEBUG_SOFTWARE_TLB
32 # define LOG_SWTLB(...) qemu_log(__VA_ARGS__)
34 # define LOG_SWTLB(...) do { } while (0)
38 /*****************************************************************************/
39 /* Exceptions processing helpers */
41 void helper_raise_exception_err (uint32_t exception, uint32_t error_code)
44 printf("Raise exception %3x code : %d\n", exception, error_code);
46 env->exception_index = exception;
47 env->error_code = error_code;
51 void helper_raise_exception (uint32_t exception)
53 helper_raise_exception_err(exception, 0);
56 /*****************************************************************************/
57 /* Registers load and stores */
58 target_ulong helper_load_cr (void)
60 return (env->crf[0] << 28) |
70 void helper_store_cr (target_ulong val, uint32_t mask)
74 for (i = 0, sh = 7; i < 8; i++, sh--) {
76 env->crf[i] = (val >> (sh * 4)) & 0xFUL;
80 /*****************************************************************************/
82 void helper_load_dump_spr (uint32_t sprn)
84 qemu_log("Read SPR %d %03x => " ADDRX "\n",
85 sprn, sprn, env->spr[sprn]);
88 void helper_store_dump_spr (uint32_t sprn)
90 qemu_log("Write SPR %d %03x <= " ADDRX "\n",
91 sprn, sprn, env->spr[sprn]);
94 target_ulong helper_load_tbl (void)
96 return cpu_ppc_load_tbl(env);
99 target_ulong helper_load_tbu (void)
101 return cpu_ppc_load_tbu(env);
104 target_ulong helper_load_atbl (void)
106 return cpu_ppc_load_atbl(env);
109 target_ulong helper_load_atbu (void)
111 return cpu_ppc_load_atbu(env);
114 target_ulong helper_load_601_rtcl (void)
116 return cpu_ppc601_load_rtcl(env);
119 target_ulong helper_load_601_rtcu (void)
121 return cpu_ppc601_load_rtcu(env);
124 #if !defined(CONFIG_USER_ONLY)
125 #if defined (TARGET_PPC64)
126 void helper_store_asr (target_ulong val)
128 ppc_store_asr(env, val);
132 void helper_store_sdr1 (target_ulong val)
134 ppc_store_sdr1(env, val);
137 void helper_store_tbl (target_ulong val)
139 cpu_ppc_store_tbl(env, val);
142 void helper_store_tbu (target_ulong val)
144 cpu_ppc_store_tbu(env, val);
147 void helper_store_atbl (target_ulong val)
149 cpu_ppc_store_atbl(env, val);
152 void helper_store_atbu (target_ulong val)
154 cpu_ppc_store_atbu(env, val);
157 void helper_store_601_rtcl (target_ulong val)
159 cpu_ppc601_store_rtcl(env, val);
162 void helper_store_601_rtcu (target_ulong val)
164 cpu_ppc601_store_rtcu(env, val);
167 target_ulong helper_load_decr (void)
169 return cpu_ppc_load_decr(env);
172 void helper_store_decr (target_ulong val)
174 cpu_ppc_store_decr(env, val);
177 void helper_store_hid0_601 (target_ulong val)
181 hid0 = env->spr[SPR_HID0];
182 if ((val ^ hid0) & 0x00000008) {
183 /* Change current endianness */
184 env->hflags &= ~(1 << MSR_LE);
185 env->hflags_nmsr &= ~(1 << MSR_LE);
186 env->hflags_nmsr |= (1 << MSR_LE) & (((val >> 3) & 1) << MSR_LE);
187 env->hflags |= env->hflags_nmsr;
188 qemu_log("%s: set endianness to %c => " ADDRX "\n",
189 __func__, val & 0x8 ? 'l' : 'b', env->hflags);
191 env->spr[SPR_HID0] = (uint32_t)val;
194 void helper_store_403_pbr (uint32_t num, target_ulong value)
196 if (likely(env->pb[num] != value)) {
197 env->pb[num] = value;
198 /* Should be optimized */
203 target_ulong helper_load_40x_pit (void)
205 return load_40x_pit(env);
208 void helper_store_40x_pit (target_ulong val)
210 store_40x_pit(env, val);
213 void helper_store_40x_dbcr0 (target_ulong val)
215 store_40x_dbcr0(env, val);
218 void helper_store_40x_sler (target_ulong val)
220 store_40x_sler(env, val);
223 void helper_store_booke_tcr (target_ulong val)
225 store_booke_tcr(env, val);
228 void helper_store_booke_tsr (target_ulong val)
230 store_booke_tsr(env, val);
233 void helper_store_ibatu (uint32_t nr, target_ulong val)
235 ppc_store_ibatu(env, nr, val);
238 void helper_store_ibatl (uint32_t nr, target_ulong val)
240 ppc_store_ibatl(env, nr, val);
243 void helper_store_dbatu (uint32_t nr, target_ulong val)
245 ppc_store_dbatu(env, nr, val);
248 void helper_store_dbatl (uint32_t nr, target_ulong val)
250 ppc_store_dbatl(env, nr, val);
253 void helper_store_601_batl (uint32_t nr, target_ulong val)
255 ppc_store_ibatl_601(env, nr, val);
258 void helper_store_601_batu (uint32_t nr, target_ulong val)
260 ppc_store_ibatu_601(env, nr, val);
264 /*****************************************************************************/
265 /* Memory load and stores */
267 static always_inline target_ulong addr_add(target_ulong addr, target_long arg)
269 #if defined(TARGET_PPC64)
271 return (uint32_t)(addr + arg);
277 void helper_lmw (target_ulong addr, uint32_t reg)
279 for (; reg < 32; reg++) {
281 env->gpr[reg] = bswap32(ldl(addr));
283 env->gpr[reg] = ldl(addr);
284 addr = addr_add(addr, 4);
288 void helper_stmw (target_ulong addr, uint32_t reg)
290 for (; reg < 32; reg++) {
292 stl(addr, bswap32((uint32_t)env->gpr[reg]));
294 stl(addr, (uint32_t)env->gpr[reg]);
295 addr = addr_add(addr, 4);
299 void helper_lsw(target_ulong addr, uint32_t nb, uint32_t reg)
302 for (; nb > 3; nb -= 4) {
303 env->gpr[reg] = ldl(addr);
304 reg = (reg + 1) % 32;
305 addr = addr_add(addr, 4);
307 if (unlikely(nb > 0)) {
309 for (sh = 24; nb > 0; nb--, sh -= 8) {
310 env->gpr[reg] |= ldub(addr) << sh;
311 addr = addr_add(addr, 1);
315 /* PPC32 specification says we must generate an exception if
316 * rA is in the range of registers to be loaded.
317 * In an other hand, IBM says this is valid, but rA won't be loaded.
318 * For now, I'll follow the spec...
320 void helper_lswx(target_ulong addr, uint32_t reg, uint32_t ra, uint32_t rb)
322 if (likely(xer_bc != 0)) {
323 if (unlikely((ra != 0 && reg < ra && (reg + xer_bc) > ra) ||
324 (reg < rb && (reg + xer_bc) > rb))) {
325 helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
327 POWERPC_EXCP_INVAL_LSWX);
329 helper_lsw(addr, xer_bc, reg);
334 void helper_stsw(target_ulong addr, uint32_t nb, uint32_t reg)
337 for (; nb > 3; nb -= 4) {
338 stl(addr, env->gpr[reg]);
339 reg = (reg + 1) % 32;
340 addr = addr_add(addr, 4);
342 if (unlikely(nb > 0)) {
343 for (sh = 24; nb > 0; nb--, sh -= 8) {
344 stb(addr, (env->gpr[reg] >> sh) & 0xFF);
345 addr = addr_add(addr, 1);
350 static void do_dcbz(target_ulong addr, int dcache_line_size)
352 addr &= ~(dcache_line_size - 1);
354 for (i = 0 ; i < dcache_line_size ; i += 4) {
357 if (env->reserve == addr)
358 env->reserve = (target_ulong)-1ULL;
361 void helper_dcbz(target_ulong addr)
363 do_dcbz(addr, env->dcache_line_size);
366 void helper_dcbz_970(target_ulong addr)
368 if (((env->spr[SPR_970_HID5] >> 7) & 0x3) == 1)
371 do_dcbz(addr, env->dcache_line_size);
374 void helper_icbi(target_ulong addr)
378 addr &= ~(env->dcache_line_size - 1);
379 /* Invalidate one cache line :
380 * PowerPC specification says this is to be treated like a load
381 * (not a fetch) by the MMU. To be sure it will be so,
382 * do the load "by hand".
385 tb_invalidate_page_range(addr, addr + env->icache_line_size);
389 target_ulong helper_lscbx (target_ulong addr, uint32_t reg, uint32_t ra, uint32_t rb)
393 for (i = 0; i < xer_bc; i++) {
395 addr = addr_add(addr, 1);
396 /* ra (if not 0) and rb are never modified */
397 if (likely(reg != rb && (ra == 0 || reg != ra))) {
398 env->gpr[reg] = (env->gpr[reg] & ~(0xFF << d)) | (c << d);
400 if (unlikely(c == xer_cmp))
402 if (likely(d != 0)) {
413 /*****************************************************************************/
414 /* Fixed point operations helpers */
415 #if defined(TARGET_PPC64)
417 /* multiply high word */
418 uint64_t helper_mulhd (uint64_t arg1, uint64_t arg2)
422 muls64(&tl, &th, arg1, arg2);
426 /* multiply high word unsigned */
427 uint64_t helper_mulhdu (uint64_t arg1, uint64_t arg2)
431 mulu64(&tl, &th, arg1, arg2);
435 uint64_t helper_mulldo (uint64_t arg1, uint64_t arg2)
440 muls64(&tl, (uint64_t *)&th, arg1, arg2);
441 /* If th != 0 && th != -1, then we had an overflow */
442 if (likely((uint64_t)(th + 1) <= 1)) {
443 env->xer &= ~(1 << XER_OV);
445 env->xer |= (1 << XER_OV) | (1 << XER_SO);
451 target_ulong helper_cntlzw (target_ulong t)
456 #if defined(TARGET_PPC64)
457 target_ulong helper_cntlzd (target_ulong t)
463 /* shift right arithmetic helper */
464 target_ulong helper_sraw (target_ulong value, target_ulong shift)
468 if (likely(!(shift & 0x20))) {
469 if (likely((uint32_t)shift != 0)) {
471 ret = (int32_t)value >> shift;
472 if (likely(ret >= 0 || (value & ((1 << shift) - 1)) == 0)) {
473 env->xer &= ~(1 << XER_CA);
475 env->xer |= (1 << XER_CA);
478 ret = (int32_t)value;
479 env->xer &= ~(1 << XER_CA);
482 ret = (int32_t)value >> 31;
484 env->xer |= (1 << XER_CA);
486 env->xer &= ~(1 << XER_CA);
489 return (target_long)ret;
492 #if defined(TARGET_PPC64)
493 target_ulong helper_srad (target_ulong value, target_ulong shift)
497 if (likely(!(shift & 0x40))) {
498 if (likely((uint64_t)shift != 0)) {
500 ret = (int64_t)value >> shift;
501 if (likely(ret >= 0 || (value & ((1 << shift) - 1)) == 0)) {
502 env->xer &= ~(1 << XER_CA);
504 env->xer |= (1 << XER_CA);
507 ret = (int64_t)value;
508 env->xer &= ~(1 << XER_CA);
511 ret = (int64_t)value >> 63;
513 env->xer |= (1 << XER_CA);
515 env->xer &= ~(1 << XER_CA);
522 target_ulong helper_popcntb (target_ulong val)
524 val = (val & 0x55555555) + ((val >> 1) & 0x55555555);
525 val = (val & 0x33333333) + ((val >> 2) & 0x33333333);
526 val = (val & 0x0f0f0f0f) + ((val >> 4) & 0x0f0f0f0f);
530 #if defined(TARGET_PPC64)
531 target_ulong helper_popcntb_64 (target_ulong val)
533 val = (val & 0x5555555555555555ULL) + ((val >> 1) & 0x5555555555555555ULL);
534 val = (val & 0x3333333333333333ULL) + ((val >> 2) & 0x3333333333333333ULL);
535 val = (val & 0x0f0f0f0f0f0f0f0fULL) + ((val >> 4) & 0x0f0f0f0f0f0f0f0fULL);
540 /*****************************************************************************/
541 /* Floating point operations helpers */
542 uint64_t helper_float32_to_float64(uint32_t arg)
547 d.d = float32_to_float64(f.f, &env->fp_status);
551 uint32_t helper_float64_to_float32(uint64_t arg)
556 f.f = float64_to_float32(d.d, &env->fp_status);
560 static always_inline int isden (float64 d)
566 return ((u.ll >> 52) & 0x7FF) == 0;
569 uint32_t helper_compute_fprf (uint64_t arg, uint32_t set_fprf)
575 isneg = float64_is_neg(farg.d);
576 if (unlikely(float64_is_nan(farg.d))) {
577 if (float64_is_signaling_nan(farg.d)) {
578 /* Signaling NaN: flags are undefined */
584 } else if (unlikely(float64_is_infinity(farg.d))) {
591 if (float64_is_zero(farg.d)) {
599 /* Denormalized numbers */
602 /* Normalized numbers */
613 /* We update FPSCR_FPRF */
614 env->fpscr &= ~(0x1F << FPSCR_FPRF);
615 env->fpscr |= ret << FPSCR_FPRF;
617 /* We just need fpcc to update Rc1 */
621 /* Floating-point invalid operations exception */
622 static always_inline uint64_t fload_invalid_op_excp (int op)
629 case POWERPC_EXCP_FP_VXSNAN:
630 env->fpscr |= 1 << FPSCR_VXSNAN;
632 case POWERPC_EXCP_FP_VXSOFT:
633 env->fpscr |= 1 << FPSCR_VXSOFT;
635 case POWERPC_EXCP_FP_VXISI:
636 /* Magnitude subtraction of infinities */
637 env->fpscr |= 1 << FPSCR_VXISI;
639 case POWERPC_EXCP_FP_VXIDI:
640 /* Division of infinity by infinity */
641 env->fpscr |= 1 << FPSCR_VXIDI;
643 case POWERPC_EXCP_FP_VXZDZ:
644 /* Division of zero by zero */
645 env->fpscr |= 1 << FPSCR_VXZDZ;
647 case POWERPC_EXCP_FP_VXIMZ:
648 /* Multiplication of zero by infinity */
649 env->fpscr |= 1 << FPSCR_VXIMZ;
651 case POWERPC_EXCP_FP_VXVC:
652 /* Ordered comparison of NaN */
653 env->fpscr |= 1 << FPSCR_VXVC;
654 env->fpscr &= ~(0xF << FPSCR_FPCC);
655 env->fpscr |= 0x11 << FPSCR_FPCC;
656 /* We must update the target FPR before raising the exception */
658 env->exception_index = POWERPC_EXCP_PROGRAM;
659 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_VXVC;
660 /* Update the floating-point enabled exception summary */
661 env->fpscr |= 1 << FPSCR_FEX;
662 /* Exception is differed */
666 case POWERPC_EXCP_FP_VXSQRT:
667 /* Square root of a negative number */
668 env->fpscr |= 1 << FPSCR_VXSQRT;
670 env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
672 /* Set the result to quiet NaN */
673 ret = 0xFFF8000000000000ULL;
674 env->fpscr &= ~(0xF << FPSCR_FPCC);
675 env->fpscr |= 0x11 << FPSCR_FPCC;
678 case POWERPC_EXCP_FP_VXCVI:
679 /* Invalid conversion */
680 env->fpscr |= 1 << FPSCR_VXCVI;
681 env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
683 /* Set the result to quiet NaN */
684 ret = 0xFFF8000000000000ULL;
685 env->fpscr &= ~(0xF << FPSCR_FPCC);
686 env->fpscr |= 0x11 << FPSCR_FPCC;
690 /* Update the floating-point invalid operation summary */
691 env->fpscr |= 1 << FPSCR_VX;
692 /* Update the floating-point exception summary */
693 env->fpscr |= 1 << FPSCR_FX;
695 /* Update the floating-point enabled exception summary */
696 env->fpscr |= 1 << FPSCR_FEX;
697 if (msr_fe0 != 0 || msr_fe1 != 0)
698 helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_FP | op);
703 static always_inline void float_zero_divide_excp (void)
705 env->fpscr |= 1 << FPSCR_ZX;
706 env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
707 /* Update the floating-point exception summary */
708 env->fpscr |= 1 << FPSCR_FX;
710 /* Update the floating-point enabled exception summary */
711 env->fpscr |= 1 << FPSCR_FEX;
712 if (msr_fe0 != 0 || msr_fe1 != 0) {
713 helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
714 POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX);
719 static always_inline void float_overflow_excp (void)
721 env->fpscr |= 1 << FPSCR_OX;
722 /* Update the floating-point exception summary */
723 env->fpscr |= 1 << FPSCR_FX;
725 /* XXX: should adjust the result */
726 /* Update the floating-point enabled exception summary */
727 env->fpscr |= 1 << FPSCR_FEX;
728 /* We must update the target FPR before raising the exception */
729 env->exception_index = POWERPC_EXCP_PROGRAM;
730 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
732 env->fpscr |= 1 << FPSCR_XX;
733 env->fpscr |= 1 << FPSCR_FI;
737 static always_inline void float_underflow_excp (void)
739 env->fpscr |= 1 << FPSCR_UX;
740 /* Update the floating-point exception summary */
741 env->fpscr |= 1 << FPSCR_FX;
743 /* XXX: should adjust the result */
744 /* Update the floating-point enabled exception summary */
745 env->fpscr |= 1 << FPSCR_FEX;
746 /* We must update the target FPR before raising the exception */
747 env->exception_index = POWERPC_EXCP_PROGRAM;
748 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
752 static always_inline void float_inexact_excp (void)
754 env->fpscr |= 1 << FPSCR_XX;
755 /* Update the floating-point exception summary */
756 env->fpscr |= 1 << FPSCR_FX;
758 /* Update the floating-point enabled exception summary */
759 env->fpscr |= 1 << FPSCR_FEX;
760 /* We must update the target FPR before raising the exception */
761 env->exception_index = POWERPC_EXCP_PROGRAM;
762 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
766 static always_inline void fpscr_set_rounding_mode (void)
770 /* Set rounding mode */
773 /* Best approximation (round to nearest) */
774 rnd_type = float_round_nearest_even;
777 /* Smaller magnitude (round toward zero) */
778 rnd_type = float_round_to_zero;
781 /* Round toward +infinite */
782 rnd_type = float_round_up;
786 /* Round toward -infinite */
787 rnd_type = float_round_down;
790 set_float_rounding_mode(rnd_type, &env->fp_status);
793 void helper_fpscr_clrbit (uint32_t bit)
797 prev = (env->fpscr >> bit) & 1;
798 env->fpscr &= ~(1 << bit);
803 fpscr_set_rounding_mode();
811 void helper_fpscr_setbit (uint32_t bit)
815 prev = (env->fpscr >> bit) & 1;
816 env->fpscr |= 1 << bit;
820 env->fpscr |= 1 << FPSCR_FX;
824 env->fpscr |= 1 << FPSCR_FX;
829 env->fpscr |= 1 << FPSCR_FX;
834 env->fpscr |= 1 << FPSCR_FX;
839 env->fpscr |= 1 << FPSCR_FX;
852 env->fpscr |= 1 << FPSCR_VX;
853 env->fpscr |= 1 << FPSCR_FX;
860 env->error_code = POWERPC_EXCP_FP;
862 env->error_code |= POWERPC_EXCP_FP_VXSNAN;
864 env->error_code |= POWERPC_EXCP_FP_VXISI;
866 env->error_code |= POWERPC_EXCP_FP_VXIDI;
868 env->error_code |= POWERPC_EXCP_FP_VXZDZ;
870 env->error_code |= POWERPC_EXCP_FP_VXIMZ;
872 env->error_code |= POWERPC_EXCP_FP_VXVC;
874 env->error_code |= POWERPC_EXCP_FP_VXSOFT;
876 env->error_code |= POWERPC_EXCP_FP_VXSQRT;
878 env->error_code |= POWERPC_EXCP_FP_VXCVI;
885 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
892 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
899 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX;
906 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
912 fpscr_set_rounding_mode();
917 /* Update the floating-point enabled exception summary */
918 env->fpscr |= 1 << FPSCR_FEX;
919 /* We have to update Rc1 before raising the exception */
920 env->exception_index = POWERPC_EXCP_PROGRAM;
926 void helper_store_fpscr (uint64_t arg, uint32_t mask)
929 * We use only the 32 LSB of the incoming fpr
937 new |= prev & 0x60000000;
938 for (i = 0; i < 8; i++) {
939 if (mask & (1 << i)) {
940 env->fpscr &= ~(0xF << (4 * i));
941 env->fpscr |= new & (0xF << (4 * i));
944 /* Update VX and FEX */
946 env->fpscr |= 1 << FPSCR_VX;
948 env->fpscr &= ~(1 << FPSCR_VX);
949 if ((fpscr_ex & fpscr_eex) != 0) {
950 env->fpscr |= 1 << FPSCR_FEX;
951 env->exception_index = POWERPC_EXCP_PROGRAM;
952 /* XXX: we should compute it properly */
953 env->error_code = POWERPC_EXCP_FP;
956 env->fpscr &= ~(1 << FPSCR_FEX);
957 fpscr_set_rounding_mode();
960 void helper_float_check_status (void)
962 #ifdef CONFIG_SOFTFLOAT
963 if (env->exception_index == POWERPC_EXCP_PROGRAM &&
964 (env->error_code & POWERPC_EXCP_FP)) {
965 /* Differred floating-point exception after target FPR update */
966 if (msr_fe0 != 0 || msr_fe1 != 0)
967 helper_raise_exception_err(env->exception_index, env->error_code);
969 int status = get_float_exception_flags(&env->fp_status);
970 if (status & float_flag_divbyzero) {
971 float_zero_divide_excp();
972 } else if (status & float_flag_overflow) {
973 float_overflow_excp();
974 } else if (status & float_flag_underflow) {
975 float_underflow_excp();
976 } else if (status & float_flag_inexact) {
977 float_inexact_excp();
981 if (env->exception_index == POWERPC_EXCP_PROGRAM &&
982 (env->error_code & POWERPC_EXCP_FP)) {
983 /* Differred floating-point exception after target FPR update */
984 if (msr_fe0 != 0 || msr_fe1 != 0)
985 helper_raise_exception_err(env->exception_index, env->error_code);
990 #ifdef CONFIG_SOFTFLOAT
991 void helper_reset_fpstatus (void)
993 set_float_exception_flags(0, &env->fp_status);
998 uint64_t helper_fadd (uint64_t arg1, uint64_t arg2)
1000 CPU_DoubleU farg1, farg2;
1004 #if USE_PRECISE_EMULATION
1005 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1006 float64_is_signaling_nan(farg2.d))) {
1008 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1009 } else if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d) &&
1010 float64_is_neg(farg1.d) != float64_is_neg(farg2.d))) {
1011 /* Magnitude subtraction of infinities */
1012 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1014 farg1.d = float64_add(farg1.d, farg2.d, &env->fp_status);
1017 farg1.d = float64_add(farg1.d, farg2.d, &env->fp_status);
1023 uint64_t helper_fsub (uint64_t arg1, uint64_t arg2)
1025 CPU_DoubleU farg1, farg2;
1029 #if USE_PRECISE_EMULATION
1031 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1032 float64_is_signaling_nan(farg2.d))) {
1033 /* sNaN subtraction */
1034 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1035 } else if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d) &&
1036 float64_is_neg(farg1.d) == float64_is_neg(farg2.d))) {
1037 /* Magnitude subtraction of infinities */
1038 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1040 farg1.d = float64_sub(farg1.d, farg2.d, &env->fp_status);
1044 farg1.d = float64_sub(farg1.d, farg2.d, &env->fp_status);
1050 uint64_t helper_fmul (uint64_t arg1, uint64_t arg2)
1052 CPU_DoubleU farg1, farg2;
1056 #if USE_PRECISE_EMULATION
1057 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1058 float64_is_signaling_nan(farg2.d))) {
1059 /* sNaN multiplication */
1060 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1061 } else if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1062 (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1063 /* Multiplication of zero by infinity */
1064 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1066 farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1069 farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1075 uint64_t helper_fdiv (uint64_t arg1, uint64_t arg2)
1077 CPU_DoubleU farg1, farg2;
1081 #if USE_PRECISE_EMULATION
1082 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1083 float64_is_signaling_nan(farg2.d))) {
1085 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1086 } else if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d))) {
1087 /* Division of infinity by infinity */
1088 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIDI);
1089 } else if (unlikely(float64_is_zero(farg1.d) && float64_is_zero(farg2.d))) {
1090 /* Division of zero by zero */
1091 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXZDZ);
1093 farg1.d = float64_div(farg1.d, farg2.d, &env->fp_status);
1096 farg1.d = float64_div(farg1.d, farg2.d, &env->fp_status);
1102 uint64_t helper_fabs (uint64_t arg)
1107 farg.d = float64_abs(farg.d);
1112 uint64_t helper_fnabs (uint64_t arg)
1117 farg.d = float64_abs(farg.d);
1118 farg.d = float64_chs(farg.d);
1123 uint64_t helper_fneg (uint64_t arg)
1128 farg.d = float64_chs(farg.d);
1132 /* fctiw - fctiw. */
1133 uint64_t helper_fctiw (uint64_t arg)
1138 if (unlikely(float64_is_signaling_nan(farg.d))) {
1139 /* sNaN conversion */
1140 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1141 } else if (unlikely(float64_is_nan(farg.d) || float64_is_infinity(farg.d))) {
1142 /* qNan / infinity conversion */
1143 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1145 farg.ll = float64_to_int32(farg.d, &env->fp_status);
1146 #if USE_PRECISE_EMULATION
1147 /* XXX: higher bits are not supposed to be significant.
1148 * to make tests easier, return the same as a real PowerPC 750
1150 farg.ll |= 0xFFF80000ULL << 32;
1156 /* fctiwz - fctiwz. */
1157 uint64_t helper_fctiwz (uint64_t arg)
1162 if (unlikely(float64_is_signaling_nan(farg.d))) {
1163 /* sNaN conversion */
1164 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1165 } else if (unlikely(float64_is_nan(farg.d) || float64_is_infinity(farg.d))) {
1166 /* qNan / infinity conversion */
1167 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1169 farg.ll = float64_to_int32_round_to_zero(farg.d, &env->fp_status);
1170 #if USE_PRECISE_EMULATION
1171 /* XXX: higher bits are not supposed to be significant.
1172 * to make tests easier, return the same as a real PowerPC 750
1174 farg.ll |= 0xFFF80000ULL << 32;
1180 #if defined(TARGET_PPC64)
1181 /* fcfid - fcfid. */
1182 uint64_t helper_fcfid (uint64_t arg)
1185 farg.d = int64_to_float64(arg, &env->fp_status);
1189 /* fctid - fctid. */
1190 uint64_t helper_fctid (uint64_t arg)
1195 if (unlikely(float64_is_signaling_nan(farg.d))) {
1196 /* sNaN conversion */
1197 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1198 } else if (unlikely(float64_is_nan(farg.d) || float64_is_infinity(farg.d))) {
1199 /* qNan / infinity conversion */
1200 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1202 farg.ll = float64_to_int64(farg.d, &env->fp_status);
1207 /* fctidz - fctidz. */
1208 uint64_t helper_fctidz (uint64_t arg)
1213 if (unlikely(float64_is_signaling_nan(farg.d))) {
1214 /* sNaN conversion */
1215 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1216 } else if (unlikely(float64_is_nan(farg.d) || float64_is_infinity(farg.d))) {
1217 /* qNan / infinity conversion */
1218 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1220 farg.ll = float64_to_int64_round_to_zero(farg.d, &env->fp_status);
1227 static always_inline uint64_t do_fri (uint64_t arg, int rounding_mode)
1232 if (unlikely(float64_is_signaling_nan(farg.d))) {
1234 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1235 } else if (unlikely(float64_is_nan(farg.d) || float64_is_infinity(farg.d))) {
1236 /* qNan / infinity round */
1237 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1239 set_float_rounding_mode(rounding_mode, &env->fp_status);
1240 farg.ll = float64_round_to_int(farg.d, &env->fp_status);
1241 /* Restore rounding mode from FPSCR */
1242 fpscr_set_rounding_mode();
1247 uint64_t helper_frin (uint64_t arg)
1249 return do_fri(arg, float_round_nearest_even);
1252 uint64_t helper_friz (uint64_t arg)
1254 return do_fri(arg, float_round_to_zero);
1257 uint64_t helper_frip (uint64_t arg)
1259 return do_fri(arg, float_round_up);
1262 uint64_t helper_frim (uint64_t arg)
1264 return do_fri(arg, float_round_down);
1267 /* fmadd - fmadd. */
1268 uint64_t helper_fmadd (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1270 CPU_DoubleU farg1, farg2, farg3;
1275 #if USE_PRECISE_EMULATION
1276 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1277 float64_is_signaling_nan(farg2.d) ||
1278 float64_is_signaling_nan(farg3.d))) {
1279 /* sNaN operation */
1280 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1281 } else if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1282 (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1283 /* Multiplication of zero by infinity */
1284 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1287 /* This is the way the PowerPC specification defines it */
1288 float128 ft0_128, ft1_128;
1290 ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1291 ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1292 ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1293 if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
1294 float128_is_neg(ft0_128) != float64_is_neg(farg3.d))) {
1295 /* Magnitude subtraction of infinities */
1296 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1298 ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1299 ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status);
1300 farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1303 /* This is OK on x86 hosts */
1304 farg1.d = (farg1.d * farg2.d) + farg3.d;
1308 farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1309 farg1.d = float64_add(farg1.d, farg3.d, &env->fp_status);
1314 /* fmsub - fmsub. */
1315 uint64_t helper_fmsub (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1317 CPU_DoubleU farg1, farg2, farg3;
1322 #if USE_PRECISE_EMULATION
1323 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1324 float64_is_signaling_nan(farg2.d) ||
1325 float64_is_signaling_nan(farg3.d))) {
1326 /* sNaN operation */
1327 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1328 } else if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1329 (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1330 /* Multiplication of zero by infinity */
1331 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1334 /* This is the way the PowerPC specification defines it */
1335 float128 ft0_128, ft1_128;
1337 ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1338 ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1339 ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1340 if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
1341 float128_is_neg(ft0_128) == float64_is_neg(farg3.d))) {
1342 /* Magnitude subtraction of infinities */
1343 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1345 ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1346 ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status);
1347 farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1350 /* This is OK on x86 hosts */
1351 farg1.d = (farg1.d * farg2.d) - farg3.d;
1355 farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1356 farg1.d = float64_sub(farg1.d, farg3.d, &env->fp_status);
1361 /* fnmadd - fnmadd. */
1362 uint64_t helper_fnmadd (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1364 CPU_DoubleU farg1, farg2, farg3;
1370 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1371 float64_is_signaling_nan(farg2.d) ||
1372 float64_is_signaling_nan(farg3.d))) {
1373 /* sNaN operation */
1374 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1375 } else if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1376 (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1377 /* Multiplication of zero by infinity */
1378 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1380 #if USE_PRECISE_EMULATION
1382 /* This is the way the PowerPC specification defines it */
1383 float128 ft0_128, ft1_128;
1385 ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1386 ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1387 ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1388 if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
1389 float128_is_neg(ft0_128) != float64_is_neg(farg3.d))) {
1390 /* Magnitude subtraction of infinities */
1391 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1393 ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1394 ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status);
1395 farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1398 /* This is OK on x86 hosts */
1399 farg1.d = (farg1.d * farg2.d) + farg3.d;
1402 farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1403 farg1.d = float64_add(farg1.d, farg3.d, &env->fp_status);
1405 if (likely(!float64_is_nan(farg1.d)))
1406 farg1.d = float64_chs(farg1.d);
1411 /* fnmsub - fnmsub. */
1412 uint64_t helper_fnmsub (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1414 CPU_DoubleU farg1, farg2, farg3;
1420 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1421 float64_is_signaling_nan(farg2.d) ||
1422 float64_is_signaling_nan(farg3.d))) {
1423 /* sNaN operation */
1424 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1425 } else if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1426 (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1427 /* Multiplication of zero by infinity */
1428 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1430 #if USE_PRECISE_EMULATION
1432 /* This is the way the PowerPC specification defines it */
1433 float128 ft0_128, ft1_128;
1435 ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1436 ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1437 ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1438 if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
1439 float128_is_neg(ft0_128) == float64_is_neg(farg3.d))) {
1440 /* Magnitude subtraction of infinities */
1441 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1443 ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1444 ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status);
1445 farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1448 /* This is OK on x86 hosts */
1449 farg1.d = (farg1.d * farg2.d) - farg3.d;
1452 farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1453 farg1.d = float64_sub(farg1.d, farg3.d, &env->fp_status);
1455 if (likely(!float64_is_nan(farg1.d)))
1456 farg1.d = float64_chs(farg1.d);
1462 uint64_t helper_frsp (uint64_t arg)
1468 #if USE_PRECISE_EMULATION
1469 if (unlikely(float64_is_signaling_nan(farg.d))) {
1470 /* sNaN square root */
1471 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1473 f32 = float64_to_float32(farg.d, &env->fp_status);
1474 farg.d = float32_to_float64(f32, &env->fp_status);
1477 f32 = float64_to_float32(farg.d, &env->fp_status);
1478 farg.d = float32_to_float64(f32, &env->fp_status);
1483 /* fsqrt - fsqrt. */
1484 uint64_t helper_fsqrt (uint64_t arg)
1489 if (unlikely(float64_is_signaling_nan(farg.d))) {
1490 /* sNaN square root */
1491 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1492 } else if (unlikely(float64_is_neg(farg.d) && !float64_is_zero(farg.d))) {
1493 /* Square root of a negative nonzero number */
1494 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSQRT);
1496 farg.d = float64_sqrt(farg.d, &env->fp_status);
1502 uint64_t helper_fre (uint64_t arg)
1504 CPU_DoubleU fone, farg;
1505 fone.ll = 0x3FF0000000000000ULL; /* 1.0 */
1508 if (unlikely(float64_is_signaling_nan(farg.d))) {
1509 /* sNaN reciprocal */
1510 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1512 farg.d = float64_div(fone.d, farg.d, &env->fp_status);
1518 uint64_t helper_fres (uint64_t arg)
1520 CPU_DoubleU fone, farg;
1522 fone.ll = 0x3FF0000000000000ULL; /* 1.0 */
1525 if (unlikely(float64_is_signaling_nan(farg.d))) {
1526 /* sNaN reciprocal */
1527 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1529 farg.d = float64_div(fone.d, farg.d, &env->fp_status);
1530 f32 = float64_to_float32(farg.d, &env->fp_status);
1531 farg.d = float32_to_float64(f32, &env->fp_status);
1536 /* frsqrte - frsqrte. */
1537 uint64_t helper_frsqrte (uint64_t arg)
1539 CPU_DoubleU fone, farg;
1541 fone.ll = 0x3FF0000000000000ULL; /* 1.0 */
1544 if (unlikely(float64_is_signaling_nan(farg.d))) {
1545 /* sNaN reciprocal square root */
1546 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1547 } else if (unlikely(float64_is_neg(farg.d) && !float64_is_zero(farg.d))) {
1548 /* Reciprocal square root of a negative nonzero number */
1549 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSQRT);
1551 farg.d = float64_sqrt(farg.d, &env->fp_status);
1552 farg.d = float64_div(fone.d, farg.d, &env->fp_status);
1553 f32 = float64_to_float32(farg.d, &env->fp_status);
1554 farg.d = float32_to_float64(f32, &env->fp_status);
1560 uint64_t helper_fsel (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1566 if ((!float64_is_neg(farg1.d) || float64_is_zero(farg1.d)) && !float64_is_nan(farg1.d))
1572 void helper_fcmpu (uint64_t arg1, uint64_t arg2, uint32_t crfD)
1574 CPU_DoubleU farg1, farg2;
1579 if (unlikely(float64_is_nan(farg1.d) ||
1580 float64_is_nan(farg2.d))) {
1582 } else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
1584 } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
1590 env->fpscr &= ~(0x0F << FPSCR_FPRF);
1591 env->fpscr |= ret << FPSCR_FPRF;
1592 env->crf[crfD] = ret;
1593 if (unlikely(ret == 0x01UL
1594 && (float64_is_signaling_nan(farg1.d) ||
1595 float64_is_signaling_nan(farg2.d)))) {
1596 /* sNaN comparison */
1597 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1601 void helper_fcmpo (uint64_t arg1, uint64_t arg2, uint32_t crfD)
1603 CPU_DoubleU farg1, farg2;
1608 if (unlikely(float64_is_nan(farg1.d) ||
1609 float64_is_nan(farg2.d))) {
1611 } else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
1613 } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
1619 env->fpscr &= ~(0x0F << FPSCR_FPRF);
1620 env->fpscr |= ret << FPSCR_FPRF;
1621 env->crf[crfD] = ret;
1622 if (unlikely (ret == 0x01UL)) {
1623 if (float64_is_signaling_nan(farg1.d) ||
1624 float64_is_signaling_nan(farg2.d)) {
1625 /* sNaN comparison */
1626 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN |
1627 POWERPC_EXCP_FP_VXVC);
1629 /* qNaN comparison */
1630 fload_invalid_op_excp(POWERPC_EXCP_FP_VXVC);
1635 #if !defined (CONFIG_USER_ONLY)
1636 void helper_store_msr (target_ulong val)
1638 val = hreg_store_msr(env, val, 0);
1640 env->interrupt_request |= CPU_INTERRUPT_EXITTB;
1641 helper_raise_exception(val);
1645 static always_inline void do_rfi (target_ulong nip, target_ulong msr,
1646 target_ulong msrm, int keep_msrh)
1648 #if defined(TARGET_PPC64)
1649 if (msr & (1ULL << MSR_SF)) {
1650 nip = (uint64_t)nip;
1651 msr &= (uint64_t)msrm;
1653 nip = (uint32_t)nip;
1654 msr = (uint32_t)(msr & msrm);
1656 msr |= env->msr & ~((uint64_t)0xFFFFFFFF);
1659 nip = (uint32_t)nip;
1660 msr &= (uint32_t)msrm;
1662 /* XXX: beware: this is false if VLE is supported */
1663 env->nip = nip & ~((target_ulong)0x00000003);
1664 hreg_store_msr(env, msr, 1);
1665 #if defined (DEBUG_OP)
1666 cpu_dump_rfi(env->nip, env->msr);
1668 /* No need to raise an exception here,
1669 * as rfi is always the last insn of a TB
1671 env->interrupt_request |= CPU_INTERRUPT_EXITTB;
1674 void helper_rfi (void)
1676 do_rfi(env->spr[SPR_SRR0], env->spr[SPR_SRR1],
1677 ~((target_ulong)0xFFFF0000), 1);
1680 #if defined(TARGET_PPC64)
1681 void helper_rfid (void)
1683 do_rfi(env->spr[SPR_SRR0], env->spr[SPR_SRR1],
1684 ~((target_ulong)0xFFFF0000), 0);
1687 void helper_hrfid (void)
1689 do_rfi(env->spr[SPR_HSRR0], env->spr[SPR_HSRR1],
1690 ~((target_ulong)0xFFFF0000), 0);
1695 void helper_tw (target_ulong arg1, target_ulong arg2, uint32_t flags)
1697 if (!likely(!(((int32_t)arg1 < (int32_t)arg2 && (flags & 0x10)) ||
1698 ((int32_t)arg1 > (int32_t)arg2 && (flags & 0x08)) ||
1699 ((int32_t)arg1 == (int32_t)arg2 && (flags & 0x04)) ||
1700 ((uint32_t)arg1 < (uint32_t)arg2 && (flags & 0x02)) ||
1701 ((uint32_t)arg1 > (uint32_t)arg2 && (flags & 0x01))))) {
1702 helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_TRAP);
1706 #if defined(TARGET_PPC64)
1707 void helper_td (target_ulong arg1, target_ulong arg2, uint32_t flags)
1709 if (!likely(!(((int64_t)arg1 < (int64_t)arg2 && (flags & 0x10)) ||
1710 ((int64_t)arg1 > (int64_t)arg2 && (flags & 0x08)) ||
1711 ((int64_t)arg1 == (int64_t)arg2 && (flags & 0x04)) ||
1712 ((uint64_t)arg1 < (uint64_t)arg2 && (flags & 0x02)) ||
1713 ((uint64_t)arg1 > (uint64_t)arg2 && (flags & 0x01)))))
1714 helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_TRAP);
1718 /*****************************************************************************/
1719 /* PowerPC 601 specific instructions (POWER bridge) */
1721 target_ulong helper_clcs (uint32_t arg)
1725 /* Instruction cache line size */
1726 return env->icache_line_size;
1729 /* Data cache line size */
1730 return env->dcache_line_size;
1733 /* Minimum cache line size */
1734 return (env->icache_line_size < env->dcache_line_size) ?
1735 env->icache_line_size : env->dcache_line_size;
1738 /* Maximum cache line size */
1739 return (env->icache_line_size > env->dcache_line_size) ?
1740 env->icache_line_size : env->dcache_line_size;
1749 target_ulong helper_div (target_ulong arg1, target_ulong arg2)
1751 uint64_t tmp = (uint64_t)arg1 << 32 | env->spr[SPR_MQ];
1753 if (((int32_t)tmp == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1754 (int32_t)arg2 == 0) {
1755 env->spr[SPR_MQ] = 0;
1758 env->spr[SPR_MQ] = tmp % arg2;
1759 return tmp / (int32_t)arg2;
1763 target_ulong helper_divo (target_ulong arg1, target_ulong arg2)
1765 uint64_t tmp = (uint64_t)arg1 << 32 | env->spr[SPR_MQ];
1767 if (((int32_t)tmp == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1768 (int32_t)arg2 == 0) {
1769 env->xer |= (1 << XER_OV) | (1 << XER_SO);
1770 env->spr[SPR_MQ] = 0;
1773 env->spr[SPR_MQ] = tmp % arg2;
1774 tmp /= (int32_t)arg2;
1775 if ((int32_t)tmp != tmp) {
1776 env->xer |= (1 << XER_OV) | (1 << XER_SO);
1778 env->xer &= ~(1 << XER_OV);
1784 target_ulong helper_divs (target_ulong arg1, target_ulong arg2)
1786 if (((int32_t)arg1 == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1787 (int32_t)arg2 == 0) {
1788 env->spr[SPR_MQ] = 0;
1791 env->spr[SPR_MQ] = (int32_t)arg1 % (int32_t)arg2;
1792 return (int32_t)arg1 / (int32_t)arg2;
1796 target_ulong helper_divso (target_ulong arg1, target_ulong arg2)
1798 if (((int32_t)arg1 == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1799 (int32_t)arg2 == 0) {
1800 env->xer |= (1 << XER_OV) | (1 << XER_SO);
1801 env->spr[SPR_MQ] = 0;
1804 env->xer &= ~(1 << XER_OV);
1805 env->spr[SPR_MQ] = (int32_t)arg1 % (int32_t)arg2;
1806 return (int32_t)arg1 / (int32_t)arg2;
1810 #if !defined (CONFIG_USER_ONLY)
1811 target_ulong helper_rac (target_ulong addr)
1815 target_ulong ret = 0;
1817 /* We don't have to generate many instances of this instruction,
1818 * as rac is supervisor only.
1820 /* XXX: FIX THIS: Pretend we have no BAT */
1821 nb_BATs = env->nb_BATs;
1823 if (get_physical_address(env, &ctx, addr, 0, ACCESS_INT) == 0)
1825 env->nb_BATs = nb_BATs;
1829 void helper_rfsvc (void)
1831 do_rfi(env->lr, env->ctr, 0x0000FFFF, 0);
1835 /*****************************************************************************/
1836 /* 602 specific instructions */
1837 /* mfrom is the most crazy instruction ever seen, imho ! */
1838 /* Real implementation uses a ROM table. Do the same */
1839 /* Extremly decomposed:
1841 * return 256 * log10(10 + 1.0) + 0.5
1843 #if !defined (CONFIG_USER_ONLY)
1844 target_ulong helper_602_mfrom (target_ulong arg)
1846 if (likely(arg < 602)) {
1847 #include "mfrom_table.c"
1848 return mfrom_ROM_table[arg];
1855 /*****************************************************************************/
1856 /* Embedded PowerPC specific helpers */
1858 /* XXX: to be improved to check access rights when in user-mode */
1859 target_ulong helper_load_dcr (target_ulong dcrn)
1861 target_ulong val = 0;
1863 if (unlikely(env->dcr_env == NULL)) {
1864 qemu_log("No DCR environment\n");
1865 helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1866 POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL);
1867 } else if (unlikely(ppc_dcr_read(env->dcr_env, dcrn, &val) != 0)) {
1868 qemu_log("DCR read error %d %03x\n", (int)dcrn, (int)dcrn);
1869 helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1870 POWERPC_EXCP_INVAL | POWERPC_EXCP_PRIV_REG);
1875 void helper_store_dcr (target_ulong dcrn, target_ulong val)
1877 if (unlikely(env->dcr_env == NULL)) {
1878 qemu_log("No DCR environment\n");
1879 helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1880 POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL);
1881 } else if (unlikely(ppc_dcr_write(env->dcr_env, dcrn, val) != 0)) {
1882 qemu_log("DCR write error %d %03x\n", (int)dcrn, (int)dcrn);
1883 helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1884 POWERPC_EXCP_INVAL | POWERPC_EXCP_PRIV_REG);
1888 #if !defined(CONFIG_USER_ONLY)
1889 void helper_40x_rfci (void)
1891 do_rfi(env->spr[SPR_40x_SRR2], env->spr[SPR_40x_SRR3],
1892 ~((target_ulong)0xFFFF0000), 0);
1895 void helper_rfci (void)
1897 do_rfi(env->spr[SPR_BOOKE_CSRR0], SPR_BOOKE_CSRR1,
1898 ~((target_ulong)0x3FFF0000), 0);
1901 void helper_rfdi (void)
1903 do_rfi(env->spr[SPR_BOOKE_DSRR0], SPR_BOOKE_DSRR1,
1904 ~((target_ulong)0x3FFF0000), 0);
1907 void helper_rfmci (void)
1909 do_rfi(env->spr[SPR_BOOKE_MCSRR0], SPR_BOOKE_MCSRR1,
1910 ~((target_ulong)0x3FFF0000), 0);
1915 target_ulong helper_dlmzb (target_ulong high, target_ulong low, uint32_t update_Rc)
1921 for (mask = 0xFF000000; mask != 0; mask = mask >> 8) {
1922 if ((high & mask) == 0) {
1930 for (mask = 0xFF000000; mask != 0; mask = mask >> 8) {
1931 if ((low & mask) == 0) {
1943 env->xer = (env->xer & ~0x7F) | i;
1945 env->crf[0] |= xer_so;
1950 /*****************************************************************************/
1951 /* Altivec extension helpers */
1952 #if defined(WORDS_BIGENDIAN)
1960 #if defined(WORDS_BIGENDIAN)
1961 #define VECTOR_FOR_INORDER_I(index, element) \
1962 for (index = 0; index < ARRAY_SIZE(r->element); index++)
1964 #define VECTOR_FOR_INORDER_I(index, element) \
1965 for (index = ARRAY_SIZE(r->element)-1; index >= 0; index--)
1968 /* If X is a NaN, store the corresponding QNaN into RESULT. Otherwise,
1969 * execute the following block. */
1970 #define DO_HANDLE_NAN(result, x) \
1971 if (float32_is_nan(x) || float32_is_signaling_nan(x)) { \
1974 __f.l = __f.l | (1 << 22); /* Set QNaN bit. */ \
1978 #define HANDLE_NAN1(result, x) \
1979 DO_HANDLE_NAN(result, x)
1980 #define HANDLE_NAN2(result, x, y) \
1981 DO_HANDLE_NAN(result, x) DO_HANDLE_NAN(result, y)
1982 #define HANDLE_NAN3(result, x, y, z) \
1983 DO_HANDLE_NAN(result, x) DO_HANDLE_NAN(result, y) DO_HANDLE_NAN(result, z)
1985 /* Saturating arithmetic helpers. */
1986 #define SATCVT(from, to, from_type, to_type, min, max, use_min, use_max) \
1987 static always_inline to_type cvt##from##to (from_type x, int *sat) \
1990 if (use_min && x < min) { \
1993 } else if (use_max && x > max) { \
2001 SATCVT(sh, sb, int16_t, int8_t, INT8_MIN, INT8_MAX, 1, 1)
2002 SATCVT(sw, sh, int32_t, int16_t, INT16_MIN, INT16_MAX, 1, 1)
2003 SATCVT(sd, sw, int64_t, int32_t, INT32_MIN, INT32_MAX, 1, 1)
2004 SATCVT(uh, ub, uint16_t, uint8_t, 0, UINT8_MAX, 0, 1)
2005 SATCVT(uw, uh, uint32_t, uint16_t, 0, UINT16_MAX, 0, 1)
2006 SATCVT(ud, uw, uint64_t, uint32_t, 0, UINT32_MAX, 0, 1)
2007 SATCVT(sh, ub, int16_t, uint8_t, 0, UINT8_MAX, 1, 1)
2008 SATCVT(sw, uh, int32_t, uint16_t, 0, UINT16_MAX, 1, 1)
2009 SATCVT(sd, uw, int64_t, uint32_t, 0, UINT32_MAX, 1, 1)
2012 #define LVE(name, access, swap, element) \
2013 void helper_##name (ppc_avr_t *r, target_ulong addr) \
2015 size_t n_elems = ARRAY_SIZE(r->element); \
2016 int adjust = HI_IDX*(n_elems-1); \
2017 int sh = sizeof(r->element[0]) >> 1; \
2018 int index = (addr & 0xf) >> sh; \
2020 r->element[LO_IDX ? index : (adjust - index)] = swap(access(addr)); \
2022 r->element[LO_IDX ? index : (adjust - index)] = access(addr); \
2026 LVE(lvebx, ldub, I, u8)
2027 LVE(lvehx, lduw, bswap16, u16)
2028 LVE(lvewx, ldl, bswap32, u32)
2032 void helper_lvsl (ppc_avr_t *r, target_ulong sh)
2034 int i, j = (sh & 0xf);
2036 VECTOR_FOR_INORDER_I (i, u8) {
2041 void helper_lvsr (ppc_avr_t *r, target_ulong sh)
2043 int i, j = 0x10 - (sh & 0xf);
2045 VECTOR_FOR_INORDER_I (i, u8) {
2050 #define STVE(name, access, swap, element) \
2051 void helper_##name (ppc_avr_t *r, target_ulong addr) \
2053 size_t n_elems = ARRAY_SIZE(r->element); \
2054 int adjust = HI_IDX*(n_elems-1); \
2055 int sh = sizeof(r->element[0]) >> 1; \
2056 int index = (addr & 0xf) >> sh; \
2058 access(addr, swap(r->element[LO_IDX ? index : (adjust - index)])); \
2060 access(addr, r->element[LO_IDX ? index : (adjust - index)]); \
2064 STVE(stvebx, stb, I, u8)
2065 STVE(stvehx, stw, bswap16, u16)
2066 STVE(stvewx, stl, bswap32, u32)
2070 void helper_mtvscr (ppc_avr_t *r)
2072 #if defined(WORDS_BIGENDIAN)
2073 env->vscr = r->u32[3];
2075 env->vscr = r->u32[0];
2077 set_flush_to_zero(vscr_nj, &env->vec_status);
2080 void helper_vaddcuw (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2083 for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
2084 r->u32[i] = ~a->u32[i] < b->u32[i];
2088 #define VARITH_DO(name, op, element) \
2089 void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2092 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2093 r->element[i] = a->element[i] op b->element[i]; \
2096 #define VARITH(suffix, element) \
2097 VARITH_DO(add##suffix, +, element) \
2098 VARITH_DO(sub##suffix, -, element)
2105 #define VARITHSAT_CASE(type, op, cvt, element) \
2107 type result = (type)a->element[i] op (type)b->element[i]; \
2108 r->element[i] = cvt(result, &sat); \
2111 #define VARITHSAT_DO(name, op, optype, cvt, element) \
2112 void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2116 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2117 switch (sizeof(r->element[0])) { \
2118 case 1: VARITHSAT_CASE(optype, op, cvt, element); break; \
2119 case 2: VARITHSAT_CASE(optype, op, cvt, element); break; \
2120 case 4: VARITHSAT_CASE(optype, op, cvt, element); break; \
2124 env->vscr |= (1 << VSCR_SAT); \
2127 #define VARITHSAT_SIGNED(suffix, element, optype, cvt) \
2128 VARITHSAT_DO(adds##suffix##s, +, optype, cvt, element) \
2129 VARITHSAT_DO(subs##suffix##s, -, optype, cvt, element)
2130 #define VARITHSAT_UNSIGNED(suffix, element, optype, cvt) \
2131 VARITHSAT_DO(addu##suffix##s, +, optype, cvt, element) \
2132 VARITHSAT_DO(subu##suffix##s, -, optype, cvt, element)
2133 VARITHSAT_SIGNED(b, s8, int16_t, cvtshsb)
2134 VARITHSAT_SIGNED(h, s16, int32_t, cvtswsh)
2135 VARITHSAT_SIGNED(w, s32, int64_t, cvtsdsw)
2136 VARITHSAT_UNSIGNED(b, u8, uint16_t, cvtshub)
2137 VARITHSAT_UNSIGNED(h, u16, uint32_t, cvtswuh)
2138 VARITHSAT_UNSIGNED(w, u32, uint64_t, cvtsduw)
2139 #undef VARITHSAT_CASE
2141 #undef VARITHSAT_SIGNED
2142 #undef VARITHSAT_UNSIGNED
2144 #define VAVG_DO(name, element, etype) \
2145 void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2148 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2149 etype x = (etype)a->element[i] + (etype)b->element[i] + 1; \
2150 r->element[i] = x >> 1; \
2154 #define VAVG(type, signed_element, signed_type, unsigned_element, unsigned_type) \
2155 VAVG_DO(avgs##type, signed_element, signed_type) \
2156 VAVG_DO(avgu##type, unsigned_element, unsigned_type)
2157 VAVG(b, s8, int16_t, u8, uint16_t)
2158 VAVG(h, s16, int32_t, u16, uint32_t)
2159 VAVG(w, s32, int64_t, u32, uint64_t)
2163 #define VCF(suffix, cvt, element) \
2164 void helper_vcf##suffix (ppc_avr_t *r, ppc_avr_t *b, uint32_t uim) \
2167 for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
2168 float32 t = cvt(b->element[i], &env->vec_status); \
2169 r->f[i] = float32_scalbn (t, -uim, &env->vec_status); \
2172 VCF(ux, uint32_to_float32, u32)
2173 VCF(sx, int32_to_float32, s32)
2176 #define VCMP_DO(suffix, compare, element, record) \
2177 void helper_vcmp##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2179 uint32_t ones = (uint32_t)-1; \
2180 uint32_t all = ones; \
2181 uint32_t none = 0; \
2183 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2184 uint32_t result = (a->element[i] compare b->element[i] ? ones : 0x0); \
2185 switch (sizeof (a->element[0])) { \
2186 case 4: r->u32[i] = result; break; \
2187 case 2: r->u16[i] = result; break; \
2188 case 1: r->u8[i] = result; break; \
2194 env->crf[6] = ((all != 0) << 3) | ((none == 0) << 1); \
2197 #define VCMP(suffix, compare, element) \
2198 VCMP_DO(suffix, compare, element, 0) \
2199 VCMP_DO(suffix##_dot, compare, element, 1)
2212 void helper_vmhaddshs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2217 for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2218 int32_t prod = a->s16[i] * b->s16[i];
2219 int32_t t = (int32_t)c->s16[i] + (prod >> 15);
2220 r->s16[i] = cvtswsh (t, &sat);
2224 env->vscr |= (1 << VSCR_SAT);
2228 void helper_vmhraddshs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2233 for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2234 int32_t prod = a->s16[i] * b->s16[i] + 0x00004000;
2235 int32_t t = (int32_t)c->s16[i] + (prod >> 15);
2236 r->s16[i] = cvtswsh (t, &sat);
2240 env->vscr |= (1 << VSCR_SAT);
2244 #define VMINMAX_DO(name, compare, element) \
2245 void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2248 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2249 if (a->element[i] compare b->element[i]) { \
2250 r->element[i] = b->element[i]; \
2252 r->element[i] = a->element[i]; \
2256 #define VMINMAX(suffix, element) \
2257 VMINMAX_DO(min##suffix, >, element) \
2258 VMINMAX_DO(max##suffix, <, element)
2268 void helper_vmladduhm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2271 for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2272 int32_t prod = a->s16[i] * b->s16[i];
2273 r->s16[i] = (int16_t) (prod + c->s16[i]);
2277 #define VMRG_DO(name, element, highp) \
2278 void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2282 size_t n_elems = ARRAY_SIZE(r->element); \
2283 for (i = 0; i < n_elems/2; i++) { \
2285 result.element[i*2+HI_IDX] = a->element[i]; \
2286 result.element[i*2+LO_IDX] = b->element[i]; \
2288 result.element[n_elems - i*2 - (1+HI_IDX)] = b->element[n_elems - i - 1]; \
2289 result.element[n_elems - i*2 - (1+LO_IDX)] = a->element[n_elems - i - 1]; \
2294 #if defined(WORDS_BIGENDIAN)
2301 #define VMRG(suffix, element) \
2302 VMRG_DO(mrgl##suffix, element, MRGHI) \
2303 VMRG_DO(mrgh##suffix, element, MRGLO)
2312 void helper_vmsummbm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2317 for (i = 0; i < ARRAY_SIZE(r->s8); i++) {
2318 prod[i] = (int32_t)a->s8[i] * b->u8[i];
2321 VECTOR_FOR_INORDER_I(i, s32) {
2322 r->s32[i] = c->s32[i] + prod[4*i] + prod[4*i+1] + prod[4*i+2] + prod[4*i+3];
2326 void helper_vmsumshm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2331 for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2332 prod[i] = a->s16[i] * b->s16[i];
2335 VECTOR_FOR_INORDER_I(i, s32) {
2336 r->s32[i] = c->s32[i] + prod[2*i] + prod[2*i+1];
2340 void helper_vmsumshs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2346 for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2347 prod[i] = (int32_t)a->s16[i] * b->s16[i];
2350 VECTOR_FOR_INORDER_I (i, s32) {
2351 int64_t t = (int64_t)c->s32[i] + prod[2*i] + prod[2*i+1];
2352 r->u32[i] = cvtsdsw(t, &sat);
2356 env->vscr |= (1 << VSCR_SAT);
2360 void helper_vmsumubm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2365 for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
2366 prod[i] = a->u8[i] * b->u8[i];
2369 VECTOR_FOR_INORDER_I(i, u32) {
2370 r->u32[i] = c->u32[i] + prod[4*i] + prod[4*i+1] + prod[4*i+2] + prod[4*i+3];
2374 void helper_vmsumuhm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2379 for (i = 0; i < ARRAY_SIZE(r->u16); i++) {
2380 prod[i] = a->u16[i] * b->u16[i];
2383 VECTOR_FOR_INORDER_I(i, u32) {
2384 r->u32[i] = c->u32[i] + prod[2*i] + prod[2*i+1];
2388 void helper_vmsumuhs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2394 for (i = 0; i < ARRAY_SIZE(r->u16); i++) {
2395 prod[i] = a->u16[i] * b->u16[i];
2398 VECTOR_FOR_INORDER_I (i, s32) {
2399 uint64_t t = (uint64_t)c->u32[i] + prod[2*i] + prod[2*i+1];
2400 r->u32[i] = cvtuduw(t, &sat);
2404 env->vscr |= (1 << VSCR_SAT);
2408 #define VMUL_DO(name, mul_element, prod_element, evenp) \
2409 void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2412 VECTOR_FOR_INORDER_I(i, prod_element) { \
2414 r->prod_element[i] = a->mul_element[i*2+HI_IDX] * b->mul_element[i*2+HI_IDX]; \
2416 r->prod_element[i] = a->mul_element[i*2+LO_IDX] * b->mul_element[i*2+LO_IDX]; \
2420 #define VMUL(suffix, mul_element, prod_element) \
2421 VMUL_DO(mule##suffix, mul_element, prod_element, 1) \
2422 VMUL_DO(mulo##suffix, mul_element, prod_element, 0)
2430 void helper_vperm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2434 VECTOR_FOR_INORDER_I (i, u8) {
2435 int s = c->u8[i] & 0x1f;
2436 #if defined(WORDS_BIGENDIAN)
2437 int index = s & 0xf;
2439 int index = 15 - (s & 0xf);
2442 result.u8[i] = b->u8[index];
2444 result.u8[i] = a->u8[index];
2450 #if defined(WORDS_BIGENDIAN)
2455 void helper_vpkpx (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2459 #if defined(WORDS_BIGENDIAN)
2460 const ppc_avr_t *x[2] = { a, b };
2462 const ppc_avr_t *x[2] = { b, a };
2465 VECTOR_FOR_INORDER_I (i, u64) {
2466 VECTOR_FOR_INORDER_I (j, u32){
2467 uint32_t e = x[i]->u32[j];
2468 result.u16[4*i+j] = (((e >> 9) & 0xfc00) |
2469 ((e >> 6) & 0x3e0) |
2476 #define VPK(suffix, from, to, cvt, dosat) \
2477 void helper_vpk##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2482 ppc_avr_t *a0 = PKBIG ? a : b; \
2483 ppc_avr_t *a1 = PKBIG ? b : a; \
2484 VECTOR_FOR_INORDER_I (i, from) { \
2485 result.to[i] = cvt(a0->from[i], &sat); \
2486 result.to[i+ARRAY_SIZE(r->from)] = cvt(a1->from[i], &sat); \
2489 if (dosat && sat) { \
2490 env->vscr |= (1 << VSCR_SAT); \
2494 VPK(shss, s16, s8, cvtshsb, 1)
2495 VPK(shus, s16, u8, cvtshub, 1)
2496 VPK(swss, s32, s16, cvtswsh, 1)
2497 VPK(swus, s32, u16, cvtswuh, 1)
2498 VPK(uhus, u16, u8, cvtuhub, 1)
2499 VPK(uwus, u32, u16, cvtuwuh, 1)
2500 VPK(uhum, u16, u8, I, 0)
2501 VPK(uwum, u32, u16, I, 0)
2506 #define VRFI(suffix, rounding) \
2507 void helper_vrfi##suffix (ppc_avr_t *r, ppc_avr_t *b) \
2510 float_status s = env->vec_status; \
2511 set_float_rounding_mode(rounding, &s); \
2512 for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
2513 HANDLE_NAN1(r->f[i], b->f[i]) { \
2514 r->f[i] = float32_round_to_int (b->f[i], &s); \
2518 VRFI(n, float_round_nearest_even)
2519 VRFI(m, float_round_down)
2520 VRFI(p, float_round_up)
2521 VRFI(z, float_round_to_zero)
2524 #define VROTATE(suffix, element) \
2525 void helper_vrl##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2528 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2529 unsigned int mask = ((1 << (3 + (sizeof (a->element[0]) >> 1))) - 1); \
2530 unsigned int shift = b->element[i] & mask; \
2531 r->element[i] = (a->element[i] << shift) | (a->element[i] >> (sizeof(a->element[0]) * 8 - shift)); \
2539 void helper_vsel (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2541 r->u64[0] = (a->u64[0] & ~c->u64[0]) | (b->u64[0] & c->u64[0]);
2542 r->u64[1] = (a->u64[1] & ~c->u64[1]) | (b->u64[1] & c->u64[1]);
2545 #if defined(WORDS_BIGENDIAN)
2552 /* The specification says that the results are undefined if all of the
2553 * shift counts are not identical. We check to make sure that they are
2554 * to conform to what real hardware appears to do. */
2555 #define VSHIFT(suffix, leftp) \
2556 void helper_vs##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2558 int shift = b->u8[LO_IDX*0x15] & 0x7; \
2561 for (i = 0; i < ARRAY_SIZE(r->u8); i++) { \
2562 doit = doit && ((b->u8[i] & 0x7) == shift); \
2567 } else if (leftp) { \
2568 uint64_t carry = a->u64[LO_IDX] >> (64 - shift); \
2569 r->u64[HI_IDX] = (a->u64[HI_IDX] << shift) | carry; \
2570 r->u64[LO_IDX] = a->u64[LO_IDX] << shift; \
2572 uint64_t carry = a->u64[HI_IDX] << (64 - shift); \
2573 r->u64[LO_IDX] = (a->u64[LO_IDX] >> shift) | carry; \
2574 r->u64[HI_IDX] = a->u64[HI_IDX] >> shift; \
2584 #define VSL(suffix, element) \
2585 void helper_vsl##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2588 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2589 unsigned int mask = ((1 << (3 + (sizeof (a->element[0]) >> 1))) - 1); \
2590 unsigned int shift = b->element[i] & mask; \
2591 r->element[i] = a->element[i] << shift; \
2599 void helper_vsldoi (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, uint32_t shift)
2601 int sh = shift & 0xf;
2605 #if defined(WORDS_BIGENDIAN)
2606 for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
2609 result.u8[i] = b->u8[index-0x10];
2611 result.u8[i] = a->u8[index];
2615 for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
2616 int index = (16 - sh) + i;
2618 result.u8[i] = a->u8[index-0x10];
2620 result.u8[i] = b->u8[index];
2627 void helper_vslo (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2629 int sh = (b->u8[LO_IDX*0xf] >> 3) & 0xf;
2631 #if defined (WORDS_BIGENDIAN)
2632 memmove (&r->u8[0], &a->u8[sh], 16-sh);
2633 memset (&r->u8[16-sh], 0, sh);
2635 memmove (&r->u8[sh], &a->u8[0], 16-sh);
2636 memset (&r->u8[0], 0, sh);
2640 /* Experimental testing shows that hardware masks the immediate. */
2641 #define _SPLAT_MASKED(element) (splat & (ARRAY_SIZE(r->element) - 1))
2642 #if defined(WORDS_BIGENDIAN)
2643 #define SPLAT_ELEMENT(element) _SPLAT_MASKED(element)
2645 #define SPLAT_ELEMENT(element) (ARRAY_SIZE(r->element)-1 - _SPLAT_MASKED(element))
2647 #define VSPLT(suffix, element) \
2648 void helper_vsplt##suffix (ppc_avr_t *r, ppc_avr_t *b, uint32_t splat) \
2650 uint32_t s = b->element[SPLAT_ELEMENT(element)]; \
2652 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2653 r->element[i] = s; \
2660 #undef SPLAT_ELEMENT
2661 #undef _SPLAT_MASKED
2663 #define VSPLTI(suffix, element, splat_type) \
2664 void helper_vspltis##suffix (ppc_avr_t *r, uint32_t splat) \
2666 splat_type x = (int8_t)(splat << 3) >> 3; \
2668 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2669 r->element[i] = x; \
2672 VSPLTI(b, s8, int8_t)
2673 VSPLTI(h, s16, int16_t)
2674 VSPLTI(w, s32, int32_t)
2677 #define VSR(suffix, element) \
2678 void helper_vsr##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2681 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2682 unsigned int mask = ((1 << (3 + (sizeof (a->element[0]) >> 1))) - 1); \
2683 unsigned int shift = b->element[i] & mask; \
2684 r->element[i] = a->element[i] >> shift; \
2695 void helper_vsro (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2697 int sh = (b->u8[LO_IDX*0xf] >> 3) & 0xf;
2699 #if defined (WORDS_BIGENDIAN)
2700 memmove (&r->u8[sh], &a->u8[0], 16-sh);
2701 memset (&r->u8[0], 0, sh);
2703 memmove (&r->u8[0], &a->u8[sh], 16-sh);
2704 memset (&r->u8[16-sh], 0, sh);
2708 void helper_vsubcuw (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2711 for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
2712 r->u32[i] = a->u32[i] >= b->u32[i];
2716 void helper_vsumsws (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2723 #if defined(WORDS_BIGENDIAN)
2724 upper = ARRAY_SIZE(r->s32)-1;
2728 t = (int64_t)b->s32[upper];
2729 for (i = 0; i < ARRAY_SIZE(r->s32); i++) {
2733 result.s32[upper] = cvtsdsw(t, &sat);
2737 env->vscr |= (1 << VSCR_SAT);
2741 void helper_vsum2sws (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2747 #if defined(WORDS_BIGENDIAN)
2752 for (i = 0; i < ARRAY_SIZE(r->u64); i++) {
2753 int64_t t = (int64_t)b->s32[upper+i*2];
2755 for (j = 0; j < ARRAY_SIZE(r->u64); j++) {
2758 result.s32[upper+i*2] = cvtsdsw(t, &sat);
2763 env->vscr |= (1 << VSCR_SAT);
2767 void helper_vsum4sbs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2772 for (i = 0; i < ARRAY_SIZE(r->s32); i++) {
2773 int64_t t = (int64_t)b->s32[i];
2774 for (j = 0; j < ARRAY_SIZE(r->s32); j++) {
2777 r->s32[i] = cvtsdsw(t, &sat);
2781 env->vscr |= (1 << VSCR_SAT);
2785 void helper_vsum4shs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2790 for (i = 0; i < ARRAY_SIZE(r->s32); i++) {
2791 int64_t t = (int64_t)b->s32[i];
2792 t += a->s16[2*i] + a->s16[2*i+1];
2793 r->s32[i] = cvtsdsw(t, &sat);
2797 env->vscr |= (1 << VSCR_SAT);
2801 void helper_vsum4ubs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2806 for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
2807 uint64_t t = (uint64_t)b->u32[i];
2808 for (j = 0; j < ARRAY_SIZE(r->u32); j++) {
2811 r->u32[i] = cvtuduw(t, &sat);
2815 env->vscr |= (1 << VSCR_SAT);
2819 #if defined(WORDS_BIGENDIAN)
2826 #define VUPKPX(suffix, hi) \
2827 void helper_vupk##suffix (ppc_avr_t *r, ppc_avr_t *b) \
2831 for (i = 0; i < ARRAY_SIZE(r->u32); i++) { \
2832 uint16_t e = b->u16[hi ? i : i+4]; \
2833 uint8_t a = (e >> 15) ? 0xff : 0; \
2834 uint8_t r = (e >> 10) & 0x1f; \
2835 uint8_t g = (e >> 5) & 0x1f; \
2836 uint8_t b = e & 0x1f; \
2837 result.u32[i] = (a << 24) | (r << 16) | (g << 8) | b; \
2845 #define VUPK(suffix, unpacked, packee, hi) \
2846 void helper_vupk##suffix (ppc_avr_t *r, ppc_avr_t *b) \
2851 for (i = 0; i < ARRAY_SIZE(r->unpacked); i++) { \
2852 result.unpacked[i] = b->packee[i]; \
2855 for (i = ARRAY_SIZE(r->unpacked); i < ARRAY_SIZE(r->packee); i++) { \
2856 result.unpacked[i-ARRAY_SIZE(r->unpacked)] = b->packee[i]; \
2861 VUPK(hsb, s16, s8, UPKHI)
2862 VUPK(hsh, s32, s16, UPKHI)
2863 VUPK(lsb, s16, s8, UPKLO)
2864 VUPK(lsh, s32, s16, UPKLO)
2869 #undef DO_HANDLE_NAN
2873 #undef VECTOR_FOR_INORDER_I
2877 /*****************************************************************************/
2878 /* SPE extension helpers */
2879 /* Use a table to make this quicker */
2880 static uint8_t hbrev[16] = {
2881 0x0, 0x8, 0x4, 0xC, 0x2, 0xA, 0x6, 0xE,
2882 0x1, 0x9, 0x5, 0xD, 0x3, 0xB, 0x7, 0xF,
2885 static always_inline uint8_t byte_reverse (uint8_t val)
2887 return hbrev[val >> 4] | (hbrev[val & 0xF] << 4);
2890 static always_inline uint32_t word_reverse (uint32_t val)
2892 return byte_reverse(val >> 24) | (byte_reverse(val >> 16) << 8) |
2893 (byte_reverse(val >> 8) << 16) | (byte_reverse(val) << 24);
2896 #define MASKBITS 16 // Random value - to be fixed (implementation dependant)
2897 target_ulong helper_brinc (target_ulong arg1, target_ulong arg2)
2899 uint32_t a, b, d, mask;
2901 mask = UINT32_MAX >> (32 - MASKBITS);
2904 d = word_reverse(1 + word_reverse(a | ~b));
2905 return (arg1 & ~mask) | (d & b);
2908 uint32_t helper_cntlsw32 (uint32_t val)
2910 if (val & 0x80000000)
2916 uint32_t helper_cntlzw32 (uint32_t val)
2921 /* Single-precision floating-point conversions */
2922 static always_inline uint32_t efscfsi (uint32_t val)
2926 u.f = int32_to_float32(val, &env->vec_status);
2931 static always_inline uint32_t efscfui (uint32_t val)
2935 u.f = uint32_to_float32(val, &env->vec_status);
2940 static always_inline int32_t efsctsi (uint32_t val)
2945 /* NaN are not treated the same way IEEE 754 does */
2946 if (unlikely(float32_is_nan(u.f)))
2949 return float32_to_int32(u.f, &env->vec_status);
2952 static always_inline uint32_t efsctui (uint32_t val)
2957 /* NaN are not treated the same way IEEE 754 does */
2958 if (unlikely(float32_is_nan(u.f)))
2961 return float32_to_uint32(u.f, &env->vec_status);
2964 static always_inline uint32_t efsctsiz (uint32_t val)
2969 /* NaN are not treated the same way IEEE 754 does */
2970 if (unlikely(float32_is_nan(u.f)))
2973 return float32_to_int32_round_to_zero(u.f, &env->vec_status);
2976 static always_inline uint32_t efsctuiz (uint32_t val)
2981 /* NaN are not treated the same way IEEE 754 does */
2982 if (unlikely(float32_is_nan(u.f)))
2985 return float32_to_uint32_round_to_zero(u.f, &env->vec_status);
2988 static always_inline uint32_t efscfsf (uint32_t val)
2993 u.f = int32_to_float32(val, &env->vec_status);
2994 tmp = int64_to_float32(1ULL << 32, &env->vec_status);
2995 u.f = float32_div(u.f, tmp, &env->vec_status);
3000 static always_inline uint32_t efscfuf (uint32_t val)
3005 u.f = uint32_to_float32(val, &env->vec_status);
3006 tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
3007 u.f = float32_div(u.f, tmp, &env->vec_status);
3012 static always_inline uint32_t efsctsf (uint32_t val)
3018 /* NaN are not treated the same way IEEE 754 does */
3019 if (unlikely(float32_is_nan(u.f)))
3021 tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
3022 u.f = float32_mul(u.f, tmp, &env->vec_status);
3024 return float32_to_int32(u.f, &env->vec_status);
3027 static always_inline uint32_t efsctuf (uint32_t val)
3033 /* NaN are not treated the same way IEEE 754 does */
3034 if (unlikely(float32_is_nan(u.f)))
3036 tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
3037 u.f = float32_mul(u.f, tmp, &env->vec_status);
3039 return float32_to_uint32(u.f, &env->vec_status);
3042 #define HELPER_SPE_SINGLE_CONV(name) \
3043 uint32_t helper_e##name (uint32_t val) \
3045 return e##name(val); \
3048 HELPER_SPE_SINGLE_CONV(fscfsi);
3050 HELPER_SPE_SINGLE_CONV(fscfui);
3052 HELPER_SPE_SINGLE_CONV(fscfuf);
3054 HELPER_SPE_SINGLE_CONV(fscfsf);
3056 HELPER_SPE_SINGLE_CONV(fsctsi);
3058 HELPER_SPE_SINGLE_CONV(fsctui);
3060 HELPER_SPE_SINGLE_CONV(fsctsiz);
3062 HELPER_SPE_SINGLE_CONV(fsctuiz);
3064 HELPER_SPE_SINGLE_CONV(fsctsf);
3066 HELPER_SPE_SINGLE_CONV(fsctuf);
3068 #define HELPER_SPE_VECTOR_CONV(name) \
3069 uint64_t helper_ev##name (uint64_t val) \
3071 return ((uint64_t)e##name(val >> 32) << 32) | \
3072 (uint64_t)e##name(val); \
3075 HELPER_SPE_VECTOR_CONV(fscfsi);
3077 HELPER_SPE_VECTOR_CONV(fscfui);
3079 HELPER_SPE_VECTOR_CONV(fscfuf);
3081 HELPER_SPE_VECTOR_CONV(fscfsf);
3083 HELPER_SPE_VECTOR_CONV(fsctsi);
3085 HELPER_SPE_VECTOR_CONV(fsctui);
3087 HELPER_SPE_VECTOR_CONV(fsctsiz);
3089 HELPER_SPE_VECTOR_CONV(fsctuiz);
3091 HELPER_SPE_VECTOR_CONV(fsctsf);
3093 HELPER_SPE_VECTOR_CONV(fsctuf);
3095 /* Single-precision floating-point arithmetic */
3096 static always_inline uint32_t efsadd (uint32_t op1, uint32_t op2)
3101 u1.f = float32_add(u1.f, u2.f, &env->vec_status);
3105 static always_inline uint32_t efssub (uint32_t op1, uint32_t op2)
3110 u1.f = float32_sub(u1.f, u2.f, &env->vec_status);
3114 static always_inline uint32_t efsmul (uint32_t op1, uint32_t op2)
3119 u1.f = float32_mul(u1.f, u2.f, &env->vec_status);
3123 static always_inline uint32_t efsdiv (uint32_t op1, uint32_t op2)
3128 u1.f = float32_div(u1.f, u2.f, &env->vec_status);
3132 #define HELPER_SPE_SINGLE_ARITH(name) \
3133 uint32_t helper_e##name (uint32_t op1, uint32_t op2) \
3135 return e##name(op1, op2); \
3138 HELPER_SPE_SINGLE_ARITH(fsadd);
3140 HELPER_SPE_SINGLE_ARITH(fssub);
3142 HELPER_SPE_SINGLE_ARITH(fsmul);
3144 HELPER_SPE_SINGLE_ARITH(fsdiv);
3146 #define HELPER_SPE_VECTOR_ARITH(name) \
3147 uint64_t helper_ev##name (uint64_t op1, uint64_t op2) \
3149 return ((uint64_t)e##name(op1 >> 32, op2 >> 32) << 32) | \
3150 (uint64_t)e##name(op1, op2); \
3153 HELPER_SPE_VECTOR_ARITH(fsadd);
3155 HELPER_SPE_VECTOR_ARITH(fssub);
3157 HELPER_SPE_VECTOR_ARITH(fsmul);
3159 HELPER_SPE_VECTOR_ARITH(fsdiv);
3161 /* Single-precision floating-point comparisons */
3162 static always_inline uint32_t efststlt (uint32_t op1, uint32_t op2)
3167 return float32_lt(u1.f, u2.f, &env->vec_status) ? 4 : 0;
3170 static always_inline uint32_t efststgt (uint32_t op1, uint32_t op2)
3175 return float32_le(u1.f, u2.f, &env->vec_status) ? 0 : 4;
3178 static always_inline uint32_t efststeq (uint32_t op1, uint32_t op2)
3183 return float32_eq(u1.f, u2.f, &env->vec_status) ? 4 : 0;
3186 static always_inline uint32_t efscmplt (uint32_t op1, uint32_t op2)
3188 /* XXX: TODO: test special values (NaN, infinites, ...) */
3189 return efststlt(op1, op2);
3192 static always_inline uint32_t efscmpgt (uint32_t op1, uint32_t op2)
3194 /* XXX: TODO: test special values (NaN, infinites, ...) */
3195 return efststgt(op1, op2);
3198 static always_inline uint32_t efscmpeq (uint32_t op1, uint32_t op2)
3200 /* XXX: TODO: test special values (NaN, infinites, ...) */
3201 return efststeq(op1, op2);
3204 #define HELPER_SINGLE_SPE_CMP(name) \
3205 uint32_t helper_e##name (uint32_t op1, uint32_t op2) \
3207 return e##name(op1, op2) << 2; \
3210 HELPER_SINGLE_SPE_CMP(fststlt);
3212 HELPER_SINGLE_SPE_CMP(fststgt);
3214 HELPER_SINGLE_SPE_CMP(fststeq);
3216 HELPER_SINGLE_SPE_CMP(fscmplt);
3218 HELPER_SINGLE_SPE_CMP(fscmpgt);
3220 HELPER_SINGLE_SPE_CMP(fscmpeq);
3222 static always_inline uint32_t evcmp_merge (int t0, int t1)
3224 return (t0 << 3) | (t1 << 2) | ((t0 | t1) << 1) | (t0 & t1);
3227 #define HELPER_VECTOR_SPE_CMP(name) \
3228 uint32_t helper_ev##name (uint64_t op1, uint64_t op2) \
3230 return evcmp_merge(e##name(op1 >> 32, op2 >> 32), e##name(op1, op2)); \
3233 HELPER_VECTOR_SPE_CMP(fststlt);
3235 HELPER_VECTOR_SPE_CMP(fststgt);
3237 HELPER_VECTOR_SPE_CMP(fststeq);
3239 HELPER_VECTOR_SPE_CMP(fscmplt);
3241 HELPER_VECTOR_SPE_CMP(fscmpgt);
3243 HELPER_VECTOR_SPE_CMP(fscmpeq);
3245 /* Double-precision floating-point conversion */
3246 uint64_t helper_efdcfsi (uint32_t val)
3250 u.d = int32_to_float64(val, &env->vec_status);
3255 uint64_t helper_efdcfsid (uint64_t val)
3259 u.d = int64_to_float64(val, &env->vec_status);
3264 uint64_t helper_efdcfui (uint32_t val)
3268 u.d = uint32_to_float64(val, &env->vec_status);
3273 uint64_t helper_efdcfuid (uint64_t val)
3277 u.d = uint64_to_float64(val, &env->vec_status);
3282 uint32_t helper_efdctsi (uint64_t val)
3287 /* NaN are not treated the same way IEEE 754 does */
3288 if (unlikely(float64_is_nan(u.d)))
3291 return float64_to_int32(u.d, &env->vec_status);
3294 uint32_t helper_efdctui (uint64_t val)
3299 /* NaN are not treated the same way IEEE 754 does */
3300 if (unlikely(float64_is_nan(u.d)))
3303 return float64_to_uint32(u.d, &env->vec_status);
3306 uint32_t helper_efdctsiz (uint64_t val)
3311 /* NaN are not treated the same way IEEE 754 does */
3312 if (unlikely(float64_is_nan(u.d)))
3315 return float64_to_int32_round_to_zero(u.d, &env->vec_status);
3318 uint64_t helper_efdctsidz (uint64_t val)
3323 /* NaN are not treated the same way IEEE 754 does */
3324 if (unlikely(float64_is_nan(u.d)))
3327 return float64_to_int64_round_to_zero(u.d, &env->vec_status);
3330 uint32_t helper_efdctuiz (uint64_t val)
3335 /* NaN are not treated the same way IEEE 754 does */
3336 if (unlikely(float64_is_nan(u.d)))
3339 return float64_to_uint32_round_to_zero(u.d, &env->vec_status);
3342 uint64_t helper_efdctuidz (uint64_t val)
3347 /* NaN are not treated the same way IEEE 754 does */
3348 if (unlikely(float64_is_nan(u.d)))
3351 return float64_to_uint64_round_to_zero(u.d, &env->vec_status);
3354 uint64_t helper_efdcfsf (uint32_t val)
3359 u.d = int32_to_float64(val, &env->vec_status);
3360 tmp = int64_to_float64(1ULL << 32, &env->vec_status);
3361 u.d = float64_div(u.d, tmp, &env->vec_status);
3366 uint64_t helper_efdcfuf (uint32_t val)
3371 u.d = uint32_to_float64(val, &env->vec_status);
3372 tmp = int64_to_float64(1ULL << 32, &env->vec_status);
3373 u.d = float64_div(u.d, tmp, &env->vec_status);
3378 uint32_t helper_efdctsf (uint64_t val)
3384 /* NaN are not treated the same way IEEE 754 does */
3385 if (unlikely(float64_is_nan(u.d)))
3387 tmp = uint64_to_float64(1ULL << 32, &env->vec_status);
3388 u.d = float64_mul(u.d, tmp, &env->vec_status);
3390 return float64_to_int32(u.d, &env->vec_status);
3393 uint32_t helper_efdctuf (uint64_t val)
3399 /* NaN are not treated the same way IEEE 754 does */
3400 if (unlikely(float64_is_nan(u.d)))
3402 tmp = uint64_to_float64(1ULL << 32, &env->vec_status);
3403 u.d = float64_mul(u.d, tmp, &env->vec_status);
3405 return float64_to_uint32(u.d, &env->vec_status);
3408 uint32_t helper_efscfd (uint64_t val)
3414 u2.f = float64_to_float32(u1.d, &env->vec_status);
3419 uint64_t helper_efdcfs (uint32_t val)
3425 u2.d = float32_to_float64(u1.f, &env->vec_status);
3430 /* Double precision fixed-point arithmetic */
3431 uint64_t helper_efdadd (uint64_t op1, uint64_t op2)
3436 u1.d = float64_add(u1.d, u2.d, &env->vec_status);
3440 uint64_t helper_efdsub (uint64_t op1, uint64_t op2)
3445 u1.d = float64_sub(u1.d, u2.d, &env->vec_status);
3449 uint64_t helper_efdmul (uint64_t op1, uint64_t op2)
3454 u1.d = float64_mul(u1.d, u2.d, &env->vec_status);
3458 uint64_t helper_efddiv (uint64_t op1, uint64_t op2)
3463 u1.d = float64_div(u1.d, u2.d, &env->vec_status);
3467 /* Double precision floating point helpers */
3468 uint32_t helper_efdtstlt (uint64_t op1, uint64_t op2)
3473 return float64_lt(u1.d, u2.d, &env->vec_status) ? 4 : 0;
3476 uint32_t helper_efdtstgt (uint64_t op1, uint64_t op2)
3481 return float64_le(u1.d, u2.d, &env->vec_status) ? 0 : 4;
3484 uint32_t helper_efdtsteq (uint64_t op1, uint64_t op2)
3489 return float64_eq(u1.d, u2.d, &env->vec_status) ? 4 : 0;
3492 uint32_t helper_efdcmplt (uint64_t op1, uint64_t op2)
3494 /* XXX: TODO: test special values (NaN, infinites, ...) */
3495 return helper_efdtstlt(op1, op2);
3498 uint32_t helper_efdcmpgt (uint64_t op1, uint64_t op2)
3500 /* XXX: TODO: test special values (NaN, infinites, ...) */
3501 return helper_efdtstgt(op1, op2);
3504 uint32_t helper_efdcmpeq (uint64_t op1, uint64_t op2)
3506 /* XXX: TODO: test special values (NaN, infinites, ...) */
3507 return helper_efdtsteq(op1, op2);
3510 /*****************************************************************************/
3511 /* Softmmu support */
3512 #if !defined (CONFIG_USER_ONLY)
3514 #define MMUSUFFIX _mmu
3517 #include "softmmu_template.h"
3520 #include "softmmu_template.h"
3523 #include "softmmu_template.h"
3526 #include "softmmu_template.h"
3528 /* try to fill the TLB and return an exception if error. If retaddr is
3529 NULL, it means that the function was called in C code (i.e. not
3530 from generated code or from helper.c) */
3531 /* XXX: fix it to restore all registers */
3532 void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr)
3534 TranslationBlock *tb;
3535 CPUState *saved_env;
3539 /* XXX: hack to restore env in all cases, even if not called from
3542 env = cpu_single_env;
3543 ret = cpu_ppc_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
3544 if (unlikely(ret != 0)) {
3545 if (likely(retaddr)) {
3546 /* now we have a real cpu fault */
3547 pc = (unsigned long)retaddr;
3548 tb = tb_find_pc(pc);
3550 /* the PC is inside the translated code. It means that we have
3551 a virtual CPU fault */
3552 cpu_restore_state(tb, env, pc, NULL);
3555 helper_raise_exception_err(env->exception_index, env->error_code);
3560 /* Segment registers load and store */
3561 target_ulong helper_load_sr (target_ulong sr_num)
3563 return env->sr[sr_num];
3566 void helper_store_sr (target_ulong sr_num, target_ulong val)
3568 ppc_store_sr(env, sr_num, val);
3571 /* SLB management */
3572 #if defined(TARGET_PPC64)
3573 target_ulong helper_load_slb (target_ulong slb_nr)
3575 return ppc_load_slb(env, slb_nr);
3578 void helper_store_slb (target_ulong slb_nr, target_ulong rs)
3580 ppc_store_slb(env, slb_nr, rs);
3583 void helper_slbia (void)
3585 ppc_slb_invalidate_all(env);
3588 void helper_slbie (target_ulong addr)
3590 ppc_slb_invalidate_one(env, addr);
3593 #endif /* defined(TARGET_PPC64) */
3595 /* TLB management */
3596 void helper_tlbia (void)
3598 ppc_tlb_invalidate_all(env);
3601 void helper_tlbie (target_ulong addr)
3603 ppc_tlb_invalidate_one(env, addr);
3606 /* Software driven TLBs management */
3607 /* PowerPC 602/603 software TLB load instructions helpers */
3608 static void do_6xx_tlb (target_ulong new_EPN, int is_code)
3610 target_ulong RPN, CMP, EPN;
3613 RPN = env->spr[SPR_RPA];
3615 CMP = env->spr[SPR_ICMP];
3616 EPN = env->spr[SPR_IMISS];
3618 CMP = env->spr[SPR_DCMP];
3619 EPN = env->spr[SPR_DMISS];
3621 way = (env->spr[SPR_SRR1] >> 17) & 1;
3622 LOG_SWTLB("%s: EPN " ADDRX " " ADDRX " PTE0 " ADDRX
3623 " PTE1 " ADDRX " way %d\n",
3624 __func__, new_EPN, EPN, CMP, RPN, way);
3625 /* Store this TLB */
3626 ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK),
3627 way, is_code, CMP, RPN);
3630 void helper_6xx_tlbd (target_ulong EPN)
3635 void helper_6xx_tlbi (target_ulong EPN)
3640 /* PowerPC 74xx software TLB load instructions helpers */
3641 static void do_74xx_tlb (target_ulong new_EPN, int is_code)
3643 target_ulong RPN, CMP, EPN;
3646 RPN = env->spr[SPR_PTELO];
3647 CMP = env->spr[SPR_PTEHI];
3648 EPN = env->spr[SPR_TLBMISS] & ~0x3;
3649 way = env->spr[SPR_TLBMISS] & 0x3;
3650 LOG_SWTLB("%s: EPN " ADDRX " " ADDRX " PTE0 " ADDRX
3651 " PTE1 " ADDRX " way %d\n",
3652 __func__, new_EPN, EPN, CMP, RPN, way);
3653 /* Store this TLB */
3654 ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK),
3655 way, is_code, CMP, RPN);
3658 void helper_74xx_tlbd (target_ulong EPN)
3660 do_74xx_tlb(EPN, 0);
3663 void helper_74xx_tlbi (target_ulong EPN)
3665 do_74xx_tlb(EPN, 1);
3668 static always_inline target_ulong booke_tlb_to_page_size (int size)
3670 return 1024 << (2 * size);
3673 static always_inline int booke_page_size_to_tlb (target_ulong page_size)
3677 switch (page_size) {
3711 #if defined (TARGET_PPC64)
3712 case 0x000100000000ULL:
3715 case 0x000400000000ULL:
3718 case 0x001000000000ULL:
3721 case 0x004000000000ULL:
3724 case 0x010000000000ULL:
3736 /* Helpers for 4xx TLB management */
3737 target_ulong helper_4xx_tlbre_lo (target_ulong entry)
3744 tlb = &env->tlb[entry].tlbe;
3746 if (tlb->prot & PAGE_VALID)
3748 size = booke_page_size_to_tlb(tlb->size);
3749 if (size < 0 || size > 0x7)
3752 env->spr[SPR_40x_PID] = tlb->PID;
3756 target_ulong helper_4xx_tlbre_hi (target_ulong entry)
3762 tlb = &env->tlb[entry].tlbe;
3764 if (tlb->prot & PAGE_EXEC)
3766 if (tlb->prot & PAGE_WRITE)
3771 void helper_4xx_tlbwe_hi (target_ulong entry, target_ulong val)
3774 target_ulong page, end;
3776 LOG_SWTLB("%s entry %d val " ADDRX "\n", __func__, (int)entry, val);
3778 tlb = &env->tlb[entry].tlbe;
3779 /* Invalidate previous TLB (if it's valid) */
3780 if (tlb->prot & PAGE_VALID) {
3781 end = tlb->EPN + tlb->size;
3782 LOG_SWTLB("%s: invalidate old TLB %d start " ADDRX
3783 " end " ADDRX "\n", __func__, (int)entry, tlb->EPN, end);
3784 for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE)
3785 tlb_flush_page(env, page);
3787 tlb->size = booke_tlb_to_page_size((val >> 7) & 0x7);
3788 /* We cannot handle TLB size < TARGET_PAGE_SIZE.
3789 * If this ever occurs, one should use the ppcemb target instead
3790 * of the ppc or ppc64 one
3792 if ((val & 0x40) && tlb->size < TARGET_PAGE_SIZE) {
3793 cpu_abort(env, "TLB size " TARGET_FMT_lu " < %u "
3794 "are not supported (%d)\n",
3795 tlb->size, TARGET_PAGE_SIZE, (int)((val >> 7) & 0x7));
3797 tlb->EPN = val & ~(tlb->size - 1);
3799 tlb->prot |= PAGE_VALID;
3801 tlb->prot &= ~PAGE_VALID;
3803 /* XXX: TO BE FIXED */
3804 cpu_abort(env, "Little-endian TLB entries are not supported by now\n");
3806 tlb->PID = env->spr[SPR_40x_PID]; /* PID */
3807 tlb->attr = val & 0xFF;
3808 LOG_SWTLB("%s: set up TLB %d RPN " PADDRX " EPN " ADDRX
3809 " size " ADDRX " prot %c%c%c%c PID %d\n", __func__,
3810 (int)entry, tlb->RPN, tlb->EPN, tlb->size,
3811 tlb->prot & PAGE_READ ? 'r' : '-',
3812 tlb->prot & PAGE_WRITE ? 'w' : '-',
3813 tlb->prot & PAGE_EXEC ? 'x' : '-',
3814 tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
3815 /* Invalidate new TLB (if valid) */
3816 if (tlb->prot & PAGE_VALID) {
3817 end = tlb->EPN + tlb->size;
3818 LOG_SWTLB("%s: invalidate TLB %d start " ADDRX
3819 " end " ADDRX "\n", __func__, (int)entry, tlb->EPN, end);
3820 for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE)
3821 tlb_flush_page(env, page);
3825 void helper_4xx_tlbwe_lo (target_ulong entry, target_ulong val)
3829 LOG_SWTLB("%s entry %i val " ADDRX "\n", __func__, (int)entry, val);
3831 tlb = &env->tlb[entry].tlbe;
3832 tlb->RPN = val & 0xFFFFFC00;
3833 tlb->prot = PAGE_READ;
3835 tlb->prot |= PAGE_EXEC;
3837 tlb->prot |= PAGE_WRITE;
3838 LOG_SWTLB("%s: set up TLB %d RPN " PADDRX " EPN " ADDRX
3839 " size " ADDRX " prot %c%c%c%c PID %d\n", __func__,
3840 (int)entry, tlb->RPN, tlb->EPN, tlb->size,
3841 tlb->prot & PAGE_READ ? 'r' : '-',
3842 tlb->prot & PAGE_WRITE ? 'w' : '-',
3843 tlb->prot & PAGE_EXEC ? 'x' : '-',
3844 tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
3847 target_ulong helper_4xx_tlbsx (target_ulong address)
3849 return ppcemb_tlb_search(env, address, env->spr[SPR_40x_PID]);
3852 /* PowerPC 440 TLB management */
3853 void helper_440_tlbwe (uint32_t word, target_ulong entry, target_ulong value)
3856 target_ulong EPN, RPN, size;
3859 LOG_SWTLB("%s word %d entry %d value " ADDRX "\n",
3860 __func__, word, (int)entry, value);
3863 tlb = &env->tlb[entry].tlbe;
3866 /* Just here to please gcc */
3868 EPN = value & 0xFFFFFC00;
3869 if ((tlb->prot & PAGE_VALID) && EPN != tlb->EPN)
3872 size = booke_tlb_to_page_size((value >> 4) & 0xF);
3873 if ((tlb->prot & PAGE_VALID) && tlb->size < size)
3877 tlb->attr |= (value >> 8) & 1;
3878 if (value & 0x200) {
3879 tlb->prot |= PAGE_VALID;
3881 if (tlb->prot & PAGE_VALID) {
3882 tlb->prot &= ~PAGE_VALID;
3886 tlb->PID = env->spr[SPR_440_MMUCR] & 0x000000FF;
3891 RPN = value & 0xFFFFFC0F;
3892 if ((tlb->prot & PAGE_VALID) && tlb->RPN != RPN)
3897 tlb->attr = (tlb->attr & 0x1) | (value & 0x0000FF00);
3898 tlb->prot = tlb->prot & PAGE_VALID;
3900 tlb->prot |= PAGE_READ << 4;
3902 tlb->prot |= PAGE_WRITE << 4;
3904 tlb->prot |= PAGE_EXEC << 4;
3906 tlb->prot |= PAGE_READ;
3908 tlb->prot |= PAGE_WRITE;
3910 tlb->prot |= PAGE_EXEC;
3915 target_ulong helper_440_tlbre (uint32_t word, target_ulong entry)
3922 tlb = &env->tlb[entry].tlbe;
3925 /* Just here to please gcc */
3928 size = booke_page_size_to_tlb(tlb->size);
3929 if (size < 0 || size > 0xF)
3932 if (tlb->attr & 0x1)
3934 if (tlb->prot & PAGE_VALID)
3936 env->spr[SPR_440_MMUCR] &= ~0x000000FF;
3937 env->spr[SPR_440_MMUCR] |= tlb->PID;
3943 ret = tlb->attr & ~0x1;
3944 if (tlb->prot & (PAGE_READ << 4))
3946 if (tlb->prot & (PAGE_WRITE << 4))
3948 if (tlb->prot & (PAGE_EXEC << 4))
3950 if (tlb->prot & PAGE_READ)
3952 if (tlb->prot & PAGE_WRITE)
3954 if (tlb->prot & PAGE_EXEC)
3961 target_ulong helper_440_tlbsx (target_ulong address)
3963 return ppcemb_tlb_search(env, address, env->spr[SPR_440_MMUCR] & 0xFF);
3966 #endif /* !CONFIG_USER_ONLY */