7 void raise_exception(int tt)
9 env->exception_index = tt;
13 #ifdef USE_INT_TO_FLOAT_HELPERS
16 FT0 = (float) *((int32_t *)&FT1);
21 DT0 = (double) *((int32_t *)&FT1);
42 if (isnan(FT0) || isnan(FT1)) {
43 T0 = FSR_FCC1 | FSR_FCC0;
44 env->fsr &= ~(FSR_FCC1 | FSR_FCC0);
46 if (env->fsr & FSR_NVM) {
47 raise_exception(TT_FP_EXCP);
51 } else if (FT0 < FT1) {
53 } else if (FT0 > FT1) {
63 if (isnan(DT0) || isnan(DT1)) {
64 T0 = FSR_FCC1 | FSR_FCC0;
65 env->fsr &= ~(FSR_FCC1 | FSR_FCC0);
67 if (env->fsr & FSR_NVM) {
68 raise_exception(TT_FP_EXCP);
72 } else if (DT0 < DT1) {
74 } else if (DT0 > DT1) {
82 void helper_ld_asi(int asi, int size, int sign)
87 case 3: /* MMU probe */
91 mmulev = (T0 >> 8) & 15;
95 ret = mmu_probe(T0, mmulev);
99 printf("mmu_probe: 0x%08x (lev %d) -> 0x%08x\n", T0, mmulev, ret);
103 case 4: /* read MMU regs */
105 int reg = (T0 >> 8) & 0xf;
107 ret = env->mmuregs[reg];
108 if (reg == 3) /* Fault status cleared on read */
109 env->mmuregs[reg] = 0;
111 printf("mmu_read: reg[%d] = 0x%08x\n", reg, ret);
115 case 0x20 ... 0x2f: /* MMU passthrough */
116 cpu_physical_memory_read(T0, (void *) &ret, size);
120 tswap16s((uint16_t *)&ret);
129 void helper_st_asi(int asi, int size, int sign)
132 case 3: /* MMU flush */
136 mmulev = (T0 >> 8) & 15;
138 printf("mmu flush level %d\n", mmulev);
141 case 0: // flush page
142 tlb_flush_page(env, T0 & 0xfffff000);
144 case 1: // flush segment (256k)
145 case 2: // flush region (16M)
146 case 3: // flush context (4G)
147 case 4: // flush entire
158 case 4: /* write MMU regs */
160 int reg = (T0 >> 8) & 0xf, oldreg;
162 oldreg = env->mmuregs[reg];
165 env->mmuregs[reg] &= ~(MMU_E | MMU_NF);
166 env->mmuregs[reg] |= T1 & (MMU_E | MMU_NF);
167 // Mappings generated during no-fault mode or MMU
168 // disabled mode are invalid in normal mode
169 if (oldreg != env->mmuregs[reg])
173 env->mmuregs[reg] = T1;
174 if (oldreg != env->mmuregs[reg]) {
175 /* we flush when the MMU context changes because
176 QEMU has no MMU context support */
184 env->mmuregs[reg] = T1;
188 if (oldreg != env->mmuregs[reg]) {
189 printf("mmu change reg[%d]: 0x%08x -> 0x%08x\n", reg, oldreg, env->mmuregs[reg]);
195 case 0x17: /* Block copy, sta access */
198 // address (T0) = dst
200 int src = T1, dst = T0;
205 cpu_physical_memory_read(src, (void *) &temp, 32);
206 cpu_physical_memory_write(dst, (void *) &temp, 32);
209 case 0x1f: /* Block fill, stda access */
212 // address (T0) = dst
217 val = (((uint64_t)T1) << 32) | T2;
220 for (i = 0; i < 32; i += 8, dst += 8) {
221 cpu_physical_memory_write(dst, (void *) &val, 8);
225 case 0x20 ... 0x2f: /* MMU passthrough */
231 tswap16s((uint16_t *)&temp);
232 cpu_physical_memory_write(T0, (void *) &temp, size);
245 cwp = (env->cwp + 1) & (NWINDOWS - 1);
246 if (env->wim & (1 << cwp)) {
247 raise_exception(TT_WIN_UNF);
250 env->psrs = env->psrps;
253 void helper_ldfsr(void)
255 switch (env->fsr & FSR_RD_MASK) {
257 fesetround(FE_TONEAREST);
260 fesetround(FE_TOWARDZERO);
263 fesetround(FE_UPWARD);
266 fesetround(FE_DOWNWARD);
271 void cpu_get_fp64(uint64_t *pmant, uint16_t *pexp, double f)
275 *pmant = ldexp(frexp(f, &exptemp), 53);
279 double cpu_put_fp64(uint64_t mant, uint16_t exp)
281 return ldexp((double) mant, exp - 53);
286 env->exception_index = EXCP_DEBUG;