OPC_SPECIAL3D_RESERVED = 0x3D | OPC_SPECIAL,
};
+/* Multiplication variants of the vr54xx. */
+#define MASK_MUL_VR54XX(op) MASK_SPECIAL(op) | (op & (0x1F << 6))
+
+enum {
+ OPC_VR54XX_MULS = (0x03 << 6) | OPC_MULT,
+ OPC_VR54XX_MULSU = (0x03 << 6) | OPC_MULTU,
+ OPC_VR54XX_MACC = (0x05 << 6) | OPC_MULT,
+ OPC_VR54XX_MACCU = (0x05 << 6) | OPC_MULTU,
+ OPC_VR54XX_MSAC = (0x07 << 6) | OPC_MULT,
+ OPC_VR54XX_MSACU = (0x07 << 6) | OPC_MULTU,
+ OPC_VR54XX_MULHI = (0x09 << 6) | OPC_MULT,
+ OPC_VR54XX_MULHIU = (0x09 << 6) | OPC_MULTU,
+ OPC_VR54XX_MULSHI = (0x0B << 6) | OPC_MULT,
+ OPC_VR54XX_MULSHIU = (0x0B << 6) | OPC_MULTU,
+ OPC_VR54XX_MACCHI = (0x0D << 6) | OPC_MULT,
+ OPC_VR54XX_MACCHIU = (0x0D << 6) | OPC_MULTU,
+ OPC_VR54XX_MSACHI = (0x0F << 6) | OPC_MULT,
+ OPC_VR54XX_MSACHIU = (0x0F << 6) | OPC_MULTU,
+};
+
/* REGIMM (rt field) opcodes */
#define MASK_REGIMM(op) MASK_OP_MAJOR(op) | (op & (0x1F << 16))
uint32_t hflags, saved_hflags;
int bstate;
target_ulong btarget;
+ void *last_T0_store;
+ int last_T0_gpr;
} DisasContext;
enum {
ctx->opcode & 0x3F, ((ctx->opcode >> 16) & 0x1F)); \
} while (0)
-#define GEN_LOAD_REG_TN(Tn, Rn) \
+#define GEN_LOAD_REG_T0(Rn) \
do { \
if (Rn == 0) { \
- glue(gen_op_reset_, Tn)(); \
+ gen_op_reset_T0(); \
} else { \
- glue(gen_op_load_gpr_, Tn)(Rn); \
+ if (ctx->glue(last_T0, _store) != gen_opc_ptr \
+ || ctx->glue(last_T0, _gpr) != Rn) { \
+ gen_op_load_gpr_T0(Rn); \
+ } \
+ } \
+} while (0)
+
+#define GEN_LOAD_REG_T1(Rn) \
+do { \
+ if (Rn == 0) { \
+ gen_op_reset_T1(); \
+ } else { \
+ gen_op_load_gpr_T1(Rn); \
+ } \
+} while (0)
+
+#define GEN_LOAD_REG_T2(Rn) \
+do { \
+ if (Rn == 0) { \
+ gen_op_reset_T2(); \
+ } else { \
+ gen_op_load_gpr_T2(Rn); \
} \
} while (0)
} \
} while (0)
-#if defined(TARGET_MIPSN32) || defined(TARGET_MIPS64)
+#if defined(TARGET_MIPS64)
#define GEN_LOAD_IMM_TN(Tn, Imm) \
do { \
if (Imm == 0) { \
} while (0)
#endif
-#define GEN_STORE_TN_REG(Rn, Tn) \
+#define GEN_STORE_T0_REG(Rn) \
do { \
if (Rn != 0) { \
- glue(glue(gen_op_store_, Tn),_gpr)(Rn); \
+ glue(gen_op_store_T0,_gpr)(Rn); \
+ ctx->glue(last_T0,_store) = gen_opc_ptr; \
+ ctx->glue(last_T0,_gpr) = Rn; \
} \
} while (0)
+#define GEN_STORE_T1_REG(Rn) \
+do { \
+ if (Rn != 0) \
+ glue(gen_op_store_T1,_gpr)(Rn); \
+} while (0)
+
#define GEN_STORE_TN_SRSREG(Rn, Tn) \
do { \
if (Rn != 0) { \
static always_inline void gen_save_pc(target_ulong pc)
{
-#if defined(TARGET_MIPSN32) || defined(TARGET_MIPS64)
+#if defined(TARGET_MIPS64)
if (pc == (int32_t)pc) {
gen_op_save_pc(pc);
} else {
static always_inline void gen_save_btarget(target_ulong btarget)
{
-#if defined(TARGET_MIPSN32) || defined(TARGET_MIPS64)
+#if defined(TARGET_MIPS64)
if (btarget == (int32_t)btarget) {
gen_op_save_btarget(btarget);
} else {
generate_exception_err(ctx, EXCP_CpU, 1);
}
-static always_inline void check_cp1_64bitmode(DisasContext *ctx)
+/* Verify that the processor is running with COP1X instructions enabled.
+ This is associated with the nabla symbol in the MIPS32 and MIPS64
+ opcode tables. */
+
+static always_inline void check_cop1x(DisasContext *ctx)
{
- if (unlikely(!(ctx->hflags & MIPS_HFLAG_F64)))
+ if (unlikely(!(ctx->hflags & MIPS_HFLAG_COP1X)))
generate_exception(ctx, EXCP_RI);
}
-static always_inline void check_cp1_3d(CPUState *env, DisasContext *ctx)
+/* Verify that the processor is running with 64-bit floating-point
+ operations enabled. */
+
+static always_inline void check_cp1_64bitmode(DisasContext *ctx)
{
- if (unlikely(!(env->fpu->fcr0 & (1 << FCR0_3D))))
+ if (unlikely(~ctx->hflags & (MIPS_HFLAG_F64 | MIPS_HFLAG_COP1X)))
generate_exception(ctx, EXCP_RI);
}
generate_exception(ctx, EXCP_RI);
}
-/* This code generates a "reserved instruction" exception if the
- CPU is not MIPS MT capable. */
-static always_inline void check_mips_mt(CPUState *env, DisasContext *ctx)
-{
- if (unlikely(!(env->CP0_Config3 & (1 << CP0C3_MT))))
- generate_exception(ctx, EXCP_RI);
-}
-
/* This code generates a "reserved instruction" exception if 64-bit
instructions are not enabled. */
static always_inline void check_mips_64(DisasContext *ctx)
#define op_ldst(name) (*gen_op_##name[ctx->mem_idx])()
#define OP_LD_TABLE(width) \
static GenOpFunc *gen_op_l##width[] = { \
- &gen_op_l##width##_user, \
&gen_op_l##width##_kernel, \
+ &gen_op_l##width##_super, \
+ &gen_op_l##width##_user, \
}
#define OP_ST_TABLE(width) \
static GenOpFunc *gen_op_s##width[] = { \
- &gen_op_s##width##_user, \
&gen_op_s##width##_kernel, \
+ &gen_op_s##width##_super, \
+ &gen_op_s##width##_user, \
}
#endif
-#if defined(TARGET_MIPSN32) || defined(TARGET_MIPS64)
+#if defined(TARGET_MIPS64)
OP_LD_TABLE(d);
OP_LD_TABLE(dl);
OP_LD_TABLE(dr);
/* Don't do NOP if destination is zero: we must perform the actual
memory access. */
switch (opc) {
-#if defined(TARGET_MIPSN32) || defined(TARGET_MIPS64)
+#if defined(TARGET_MIPS64)
case OPC_LWU:
op_ldst(lwu);
- GEN_STORE_TN_REG(rt, T0);
+ GEN_STORE_T0_REG(rt);
opn = "lwu";
break;
case OPC_LD:
op_ldst(ld);
- GEN_STORE_TN_REG(rt, T0);
+ GEN_STORE_T0_REG(rt);
opn = "ld";
break;
case OPC_LLD:
op_ldst(lld);
- GEN_STORE_TN_REG(rt, T0);
+ GEN_STORE_T0_REG(rt);
opn = "lld";
break;
case OPC_SD:
- GEN_LOAD_REG_TN(T1, rt);
+ GEN_LOAD_REG_T1(rt);
op_ldst(sd);
opn = "sd";
break;
case OPC_SCD:
save_cpu_state(ctx, 1);
- GEN_LOAD_REG_TN(T1, rt);
+ GEN_LOAD_REG_T1(rt);
op_ldst(scd);
- GEN_STORE_TN_REG(rt, T0);
+ GEN_STORE_T0_REG(rt);
opn = "scd";
break;
case OPC_LDL:
- GEN_LOAD_REG_TN(T1, rt);
+ GEN_LOAD_REG_T1(rt);
op_ldst(ldl);
- GEN_STORE_TN_REG(rt, T1);
+ GEN_STORE_T1_REG(rt);
opn = "ldl";
break;
case OPC_SDL:
- GEN_LOAD_REG_TN(T1, rt);
+ GEN_LOAD_REG_T1(rt);
op_ldst(sdl);
opn = "sdl";
break;
case OPC_LDR:
- GEN_LOAD_REG_TN(T1, rt);
+ GEN_LOAD_REG_T1(rt);
op_ldst(ldr);
- GEN_STORE_TN_REG(rt, T1);
+ GEN_STORE_T1_REG(rt);
opn = "ldr";
break;
case OPC_SDR:
- GEN_LOAD_REG_TN(T1, rt);
+ GEN_LOAD_REG_T1(rt);
op_ldst(sdr);
opn = "sdr";
break;
#endif
case OPC_LW:
op_ldst(lw);
- GEN_STORE_TN_REG(rt, T0);
+ GEN_STORE_T0_REG(rt);
opn = "lw";
break;
case OPC_SW:
- GEN_LOAD_REG_TN(T1, rt);
+ GEN_LOAD_REG_T1(rt);
op_ldst(sw);
opn = "sw";
break;
case OPC_LH:
op_ldst(lh);
- GEN_STORE_TN_REG(rt, T0);
+ GEN_STORE_T0_REG(rt);
opn = "lh";
break;
case OPC_SH:
- GEN_LOAD_REG_TN(T1, rt);
+ GEN_LOAD_REG_T1(rt);
op_ldst(sh);
opn = "sh";
break;
case OPC_LHU:
op_ldst(lhu);
- GEN_STORE_TN_REG(rt, T0);
+ GEN_STORE_T0_REG(rt);
opn = "lhu";
break;
case OPC_LB:
op_ldst(lb);
- GEN_STORE_TN_REG(rt, T0);
+ GEN_STORE_T0_REG(rt);
opn = "lb";
break;
case OPC_SB:
- GEN_LOAD_REG_TN(T1, rt);
+ GEN_LOAD_REG_T1(rt);
op_ldst(sb);
opn = "sb";
break;
case OPC_LBU:
op_ldst(lbu);
- GEN_STORE_TN_REG(rt, T0);
+ GEN_STORE_T0_REG(rt);
opn = "lbu";
break;
case OPC_LWL:
- GEN_LOAD_REG_TN(T1, rt);
+ GEN_LOAD_REG_T1(rt);
op_ldst(lwl);
- GEN_STORE_TN_REG(rt, T1);
+ GEN_STORE_T1_REG(rt);
opn = "lwl";
break;
case OPC_SWL:
- GEN_LOAD_REG_TN(T1, rt);
+ GEN_LOAD_REG_T1(rt);
op_ldst(swl);
opn = "swr";
break;
case OPC_LWR:
- GEN_LOAD_REG_TN(T1, rt);
+ GEN_LOAD_REG_T1(rt);
op_ldst(lwr);
- GEN_STORE_TN_REG(rt, T1);
+ GEN_STORE_T1_REG(rt);
opn = "lwr";
break;
case OPC_SWR:
- GEN_LOAD_REG_TN(T1, rt);
+ GEN_LOAD_REG_T1(rt);
op_ldst(swr);
opn = "swr";
break;
case OPC_LL:
op_ldst(ll);
- GEN_STORE_TN_REG(rt, T0);
+ GEN_STORE_T0_REG(rt);
opn = "ll";
break;
case OPC_SC:
save_cpu_state(ctx, 1);
- GEN_LOAD_REG_TN(T1, rt);
+ GEN_LOAD_REG_T1(rt);
op_ldst(sc);
- GEN_STORE_TN_REG(rt, T0);
+ GEN_STORE_T0_REG(rt);
opn = "sc";
break;
default:
switch (opc) {
case OPC_ADDI:
case OPC_ADDIU:
-#if defined(TARGET_MIPSN32) || defined(TARGET_MIPS64)
+#if defined(TARGET_MIPS64)
case OPC_DADDI:
case OPC_DADDIU:
#endif
case OPC_ANDI:
case OPC_ORI:
case OPC_XORI:
- GEN_LOAD_REG_TN(T0, rs);
+ GEN_LOAD_REG_T0(rs);
GEN_LOAD_IMM_TN(T1, uimm);
break;
case OPC_LUI:
case OPC_SLL:
case OPC_SRA:
case OPC_SRL:
-#if defined(TARGET_MIPSN32) || defined(TARGET_MIPS64)
+#if defined(TARGET_MIPS64)
case OPC_DSLL:
case OPC_DSRA:
case OPC_DSRL:
case OPC_DSRL32:
#endif
uimm &= 0x1f;
- GEN_LOAD_REG_TN(T0, rs);
+ GEN_LOAD_REG_T0(rs);
GEN_LOAD_IMM_TN(T1, uimm);
break;
}
gen_op_add();
opn = "addiu";
break;
-#if defined(TARGET_MIPSN32) || defined(TARGET_MIPS64)
+#if defined(TARGET_MIPS64)
case OPC_DADDI:
save_cpu_state(ctx, 1);
gen_op_daddo();
break;
}
break;
-#if defined(TARGET_MIPSN32) || defined(TARGET_MIPS64)
+#if defined(TARGET_MIPS64)
case OPC_DSLL:
gen_op_dsll();
opn = "dsll";
generate_exception(ctx, EXCP_RI);
return;
}
- GEN_STORE_TN_REG(rt, T0);
+ GEN_STORE_T0_REG(rt);
MIPS_DEBUG("%s %s, %s, " TARGET_FMT_lx, opn, regnames[rt], regnames[rs], uimm);
}
MIPS_DEBUG("NOP");
return;
}
- GEN_LOAD_REG_TN(T0, rs);
- GEN_LOAD_REG_TN(T1, rt);
+ GEN_LOAD_REG_T0(rs);
+ /* Specialcase the conventional move operation. */
+ if (rt == 0 && (opc == OPC_ADDU || opc == OPC_DADDU
+ || opc == OPC_SUBU || opc == OPC_DSUBU)) {
+ GEN_STORE_T0_REG(rd);
+ return;
+ }
+ GEN_LOAD_REG_T1(rt);
switch (opc) {
case OPC_ADD:
save_cpu_state(ctx, 1);
gen_op_sub();
opn = "subu";
break;
-#if defined(TARGET_MIPSN32) || defined(TARGET_MIPS64)
+#if defined(TARGET_MIPS64)
case OPC_DADD:
save_cpu_state(ctx, 1);
gen_op_daddo();
break;
}
break;
-#if defined(TARGET_MIPSN32) || defined(TARGET_MIPS64)
+#if defined(TARGET_MIPS64)
case OPC_DSLLV:
gen_op_dsllv();
opn = "dsllv";
generate_exception(ctx, EXCP_RI);
return;
}
- GEN_STORE_TN_REG(rd, T0);
+ GEN_STORE_T0_REG(rd);
print:
MIPS_DEBUG("%s %s, %s, %s", opn, regnames[rd], regnames[rs], regnames[rt]);
}
switch (opc) {
case OPC_MFHI:
gen_op_load_HI(0);
- GEN_STORE_TN_REG(reg, T0);
+ GEN_STORE_T0_REG(reg);
opn = "mfhi";
break;
case OPC_MFLO:
gen_op_load_LO(0);
- GEN_STORE_TN_REG(reg, T0);
+ GEN_STORE_T0_REG(reg);
opn = "mflo";
break;
case OPC_MTHI:
- GEN_LOAD_REG_TN(T0, reg);
+ GEN_LOAD_REG_T0(reg);
gen_op_store_HI(0);
opn = "mthi";
break;
case OPC_MTLO:
- GEN_LOAD_REG_TN(T0, reg);
+ GEN_LOAD_REG_T0(reg);
gen_op_store_LO(0);
opn = "mtlo";
break;
{
const char *opn = "mul/div";
- GEN_LOAD_REG_TN(T0, rs);
- GEN_LOAD_REG_TN(T1, rt);
+ GEN_LOAD_REG_T0(rs);
+ GEN_LOAD_REG_T1(rt);
switch (opc) {
case OPC_DIV:
gen_op_div();
gen_op_multu();
opn = "multu";
break;
-#if defined(TARGET_MIPSN32) || defined(TARGET_MIPS64)
+#if defined(TARGET_MIPS64)
case OPC_DDIV:
gen_op_ddiv();
opn = "ddiv";
MIPS_DEBUG("%s %s %s", opn, regnames[rs], regnames[rt]);
}
+static void gen_mul_vr54xx (DisasContext *ctx, uint32_t opc,
+ int rd, int rs, int rt)
+{
+ const char *opn = "mul vr54xx";
+
+ GEN_LOAD_REG_T0(rs);
+ GEN_LOAD_REG_T1(rt);
+
+ switch (opc) {
+ case OPC_VR54XX_MULS:
+ gen_op_muls();
+ opn = "muls";
+ break;
+ case OPC_VR54XX_MULSU:
+ gen_op_mulsu();
+ opn = "mulsu";
+ break;
+ case OPC_VR54XX_MACC:
+ gen_op_macc();
+ opn = "macc";
+ break;
+ case OPC_VR54XX_MACCU:
+ gen_op_maccu();
+ opn = "maccu";
+ break;
+ case OPC_VR54XX_MSAC:
+ gen_op_msac();
+ opn = "msac";
+ break;
+ case OPC_VR54XX_MSACU:
+ gen_op_msacu();
+ opn = "msacu";
+ break;
+ case OPC_VR54XX_MULHI:
+ gen_op_mulhi();
+ opn = "mulhi";
+ break;
+ case OPC_VR54XX_MULHIU:
+ gen_op_mulhiu();
+ opn = "mulhiu";
+ break;
+ case OPC_VR54XX_MULSHI:
+ gen_op_mulshi();
+ opn = "mulshi";
+ break;
+ case OPC_VR54XX_MULSHIU:
+ gen_op_mulshiu();
+ opn = "mulshiu";
+ break;
+ case OPC_VR54XX_MACCHI:
+ gen_op_macchi();
+ opn = "macchi";
+ break;
+ case OPC_VR54XX_MACCHIU:
+ gen_op_macchiu();
+ opn = "macchiu";
+ break;
+ case OPC_VR54XX_MSACHI:
+ gen_op_msachi();
+ opn = "msachi";
+ break;
+ case OPC_VR54XX_MSACHIU:
+ gen_op_msachiu();
+ opn = "msachiu";
+ break;
+ default:
+ MIPS_INVAL("mul vr54xx");
+ generate_exception(ctx, EXCP_RI);
+ return;
+ }
+ GEN_STORE_T0_REG(rd);
+ MIPS_DEBUG("%s %s, %s, %s", opn, regnames[rd], regnames[rs], regnames[rt]);
+}
+
static void gen_cl (DisasContext *ctx, uint32_t opc,
int rd, int rs)
{
MIPS_DEBUG("NOP");
return;
}
- GEN_LOAD_REG_TN(T0, rs);
+ GEN_LOAD_REG_T0(rs);
switch (opc) {
case OPC_CLO:
gen_op_clo();
gen_op_clz();
opn = "clz";
break;
-#if defined(TARGET_MIPSN32) || defined(TARGET_MIPS64)
+#if defined(TARGET_MIPS64)
case OPC_DCLO:
gen_op_dclo();
opn = "dclo";
case OPC_TNE:
/* Compare two registers */
if (rs != rt) {
- GEN_LOAD_REG_TN(T0, rs);
- GEN_LOAD_REG_TN(T1, rt);
+ GEN_LOAD_REG_T0(rs);
+ GEN_LOAD_REG_T1(rt);
cond = 1;
}
break;
case OPC_TNEI:
/* Compare register to immediate */
if (rs != 0 || imm != 0) {
- GEN_LOAD_REG_TN(T0, rs);
+ GEN_LOAD_REG_T0(rs);
GEN_LOAD_IMM_TN(T1, (int32_t)imm);
cond = 1;
}
case OPC_BNEL:
/* Compare two registers */
if (rs != rt) {
- GEN_LOAD_REG_TN(T0, rs);
- GEN_LOAD_REG_TN(T1, rt);
+ GEN_LOAD_REG_T0(rs);
+ GEN_LOAD_REG_T1(rt);
bcond = 1;
}
btarget = ctx->pc + 4 + offset;
generate_exception(ctx, EXCP_RI);
return;
}
- GEN_LOAD_REG_TN(T2, rs);
+ GEN_LOAD_REG_T2(rs);
break;
default:
MIPS_INVAL("branch/jump");
static void gen_bitops (DisasContext *ctx, uint32_t opc, int rt,
int rs, int lsb, int msb)
{
- GEN_LOAD_REG_TN(T1, rs);
+ GEN_LOAD_REG_T1(rs);
switch (opc) {
case OPC_EXT:
if (lsb + msb > 31)
goto fail;
gen_op_ext(lsb, msb + 1);
break;
+#if defined(TARGET_MIPS64)
case OPC_DEXTM:
if (lsb + msb > 63)
goto fail;
- gen_op_ext(lsb, msb + 1 + 32);
+ gen_op_dext(lsb, msb + 1 + 32);
break;
case OPC_DEXTU:
if (lsb + msb > 63)
goto fail;
- gen_op_ext(lsb + 32, msb + 1);
+ gen_op_dext(lsb + 32, msb + 1);
break;
case OPC_DEXT:
- gen_op_ext(lsb, msb + 1);
+ if (lsb + msb > 63)
+ goto fail;
+ gen_op_dext(lsb, msb + 1);
break;
+#endif
case OPC_INS:
if (lsb > msb)
goto fail;
- GEN_LOAD_REG_TN(T0, rt);
+ GEN_LOAD_REG_T0(rt);
gen_op_ins(lsb, msb - lsb + 1);
break;
+#if defined(TARGET_MIPS64)
case OPC_DINSM:
if (lsb > msb)
goto fail;
- GEN_LOAD_REG_TN(T0, rt);
- gen_op_ins(lsb, msb - lsb + 1 + 32);
+ GEN_LOAD_REG_T0(rt);
+ gen_op_dins(lsb, msb - lsb + 1 + 32);
break;
case OPC_DINSU:
if (lsb > msb)
goto fail;
- GEN_LOAD_REG_TN(T0, rt);
- gen_op_ins(lsb + 32, msb - lsb + 1);
+ GEN_LOAD_REG_T0(rt);
+ gen_op_dins(lsb + 32, msb - lsb + 1);
break;
case OPC_DINS:
if (lsb > msb)
goto fail;
- GEN_LOAD_REG_TN(T0, rt);
- gen_op_ins(lsb, msb - lsb + 1);
+ GEN_LOAD_REG_T0(rt);
+ gen_op_dins(lsb, msb - lsb + 1);
break;
+#endif
default:
fail:
MIPS_INVAL("bitops");
generate_exception(ctx, EXCP_RI);
return;
}
- GEN_STORE_TN_REG(rt, T0);
+ GEN_STORE_T0_REG(rt);
}
/* CP0 (MMU and control) */
rn = "Index";
break;
case 1:
- check_mips_mt(env, ctx);
+ check_insn(env, ctx, ASE_MT);
gen_op_mfc0_mvpcontrol();
rn = "MVPControl";
break;
case 2:
- check_mips_mt(env, ctx);
+ check_insn(env, ctx, ASE_MT);
gen_op_mfc0_mvpconf0();
rn = "MVPConf0";
break;
case 3:
- check_mips_mt(env, ctx);
+ check_insn(env, ctx, ASE_MT);
gen_op_mfc0_mvpconf1();
rn = "MVPConf1";
break;
rn = "Random";
break;
case 1:
- check_mips_mt(env, ctx);
+ check_insn(env, ctx, ASE_MT);
gen_op_mfc0_vpecontrol();
rn = "VPEControl";
break;
case 2:
- check_mips_mt(env, ctx);
+ check_insn(env, ctx, ASE_MT);
gen_op_mfc0_vpeconf0();
rn = "VPEConf0";
break;
case 3:
- check_mips_mt(env, ctx);
+ check_insn(env, ctx, ASE_MT);
gen_op_mfc0_vpeconf1();
rn = "VPEConf1";
break;
case 4:
- check_mips_mt(env, ctx);
+ check_insn(env, ctx, ASE_MT);
gen_op_mfc0_yqmask();
rn = "YQMask";
break;
case 5:
- check_mips_mt(env, ctx);
+ check_insn(env, ctx, ASE_MT);
gen_op_mfc0_vpeschedule();
rn = "VPESchedule";
break;
case 6:
- check_mips_mt(env, ctx);
+ check_insn(env, ctx, ASE_MT);
gen_op_mfc0_vpeschefback();
rn = "VPEScheFBack";
break;
case 7:
- check_mips_mt(env, ctx);
+ check_insn(env, ctx, ASE_MT);
gen_op_mfc0_vpeopt();
rn = "VPEOpt";
break;
rn = "EntryLo0";
break;
case 1:
- check_mips_mt(env, ctx);
+ check_insn(env, ctx, ASE_MT);
gen_op_mfc0_tcstatus();
rn = "TCStatus";
break;
case 2:
- check_mips_mt(env, ctx);
+ check_insn(env, ctx, ASE_MT);
gen_op_mfc0_tcbind();
rn = "TCBind";
break;
case 3:
- check_mips_mt(env, ctx);
+ check_insn(env, ctx, ASE_MT);
gen_op_mfc0_tcrestart();
rn = "TCRestart";
break;
case 4:
- check_mips_mt(env, ctx);
+ check_insn(env, ctx, ASE_MT);
gen_op_mfc0_tchalt();
rn = "TCHalt";
break;
case 5:
- check_mips_mt(env, ctx);
+ check_insn(env, ctx, ASE_MT);
gen_op_mfc0_tccontext();
rn = "TCContext";
break;
case 6:
- check_mips_mt(env, ctx);
+ check_insn(env, ctx, ASE_MT);
gen_op_mfc0_tcschedule();
rn = "TCSchedule";
break;
case 7:
- check_mips_mt(env, ctx);
+ check_insn(env, ctx, ASE_MT);
gen_op_mfc0_tcschefback();
rn = "TCScheFBack";
break;
case 20:
switch (sel) {
case 0:
-#if defined(TARGET_MIPSN32) || defined(TARGET_MIPS64)
+#if defined(TARGET_MIPS64)
check_insn(env, ctx, ISA_MIPS3);
gen_op_mfc0_xcontext();
rn = "XContext";
case 0:
switch (sel) {
case 0:
- gen_op_mtc0_index();
+ gen_op_mtc0_index();
rn = "Index";
break;
case 1:
- check_mips_mt(env, ctx);
+ check_insn(env, ctx, ASE_MT);
gen_op_mtc0_mvpcontrol();
rn = "MVPControl";
break;
case 2:
- check_mips_mt(env, ctx);
+ check_insn(env, ctx, ASE_MT);
/* ignored */
rn = "MVPConf0";
break;
case 3:
- check_mips_mt(env, ctx);
+ check_insn(env, ctx, ASE_MT);
/* ignored */
rn = "MVPConf1";
break;
rn = "Random";
break;
case 1:
- check_mips_mt(env, ctx);
+ check_insn(env, ctx, ASE_MT);
gen_op_mtc0_vpecontrol();
rn = "VPEControl";
break;
case 2:
- check_mips_mt(env, ctx);
+ check_insn(env, ctx, ASE_MT);
gen_op_mtc0_vpeconf0();
rn = "VPEConf0";
break;
case 3:
- check_mips_mt(env, ctx);
+ check_insn(env, ctx, ASE_MT);
gen_op_mtc0_vpeconf1();
rn = "VPEConf1";
break;
case 4:
- check_mips_mt(env, ctx);
+ check_insn(env, ctx, ASE_MT);
gen_op_mtc0_yqmask();
rn = "YQMask";
break;
case 5:
- check_mips_mt(env, ctx);
+ check_insn(env, ctx, ASE_MT);
gen_op_mtc0_vpeschedule();
rn = "VPESchedule";
break;
case 6:
- check_mips_mt(env, ctx);
+ check_insn(env, ctx, ASE_MT);
gen_op_mtc0_vpeschefback();
rn = "VPEScheFBack";
break;
case 7:
- check_mips_mt(env, ctx);
+ check_insn(env, ctx, ASE_MT);
gen_op_mtc0_vpeopt();
rn = "VPEOpt";
break;
rn = "EntryLo0";
break;
case 1:
- check_mips_mt(env, ctx);
+ check_insn(env, ctx, ASE_MT);
gen_op_mtc0_tcstatus();
rn = "TCStatus";
break;
case 2:
- check_mips_mt(env, ctx);
+ check_insn(env, ctx, ASE_MT);
gen_op_mtc0_tcbind();
rn = "TCBind";
break;
case 3:
- check_mips_mt(env, ctx);
+ check_insn(env, ctx, ASE_MT);
gen_op_mtc0_tcrestart();
rn = "TCRestart";
break;
case 4:
- check_mips_mt(env, ctx);
+ check_insn(env, ctx, ASE_MT);
gen_op_mtc0_tchalt();
rn = "TCHalt";
break;
case 5:
- check_mips_mt(env, ctx);
+ check_insn(env, ctx, ASE_MT);
gen_op_mtc0_tccontext();
rn = "TCContext";
break;
case 6:
- check_mips_mt(env, ctx);
+ check_insn(env, ctx, ASE_MT);
gen_op_mtc0_tcschedule();
rn = "TCSchedule";
break;
case 7:
- check_mips_mt(env, ctx);
+ check_insn(env, ctx, ASE_MT);
gen_op_mtc0_tcschefback();
rn = "TCScheFBack";
break;
case 20:
switch (sel) {
case 0:
-#if defined(TARGET_MIPSN32) || defined(TARGET_MIPS64)
+#if defined(TARGET_MIPS64)
check_insn(env, ctx, ISA_MIPS3);
gen_op_mtc0_xcontext();
rn = "XContext";
generate_exception(ctx, EXCP_RI);
}
-#if defined(TARGET_MIPSN32) || defined(TARGET_MIPS64)
+#if defined(TARGET_MIPS64)
static void gen_dmfc0 (CPUState *env, DisasContext *ctx, int reg, int sel)
{
const char *rn = "invalid";
rn = "Index";
break;
case 1:
- check_mips_mt(env, ctx);
+ check_insn(env, ctx, ASE_MT);
gen_op_mfc0_mvpcontrol();
rn = "MVPControl";
break;
case 2:
- check_mips_mt(env, ctx);
+ check_insn(env, ctx, ASE_MT);
gen_op_mfc0_mvpconf0();
rn = "MVPConf0";
break;
case 3:
- check_mips_mt(env, ctx);
+ check_insn(env, ctx, ASE_MT);
gen_op_mfc0_mvpconf1();
rn = "MVPConf1";
break;
rn = "Random";
break;
case 1:
- check_mips_mt(env, ctx);
+ check_insn(env, ctx, ASE_MT);
gen_op_mfc0_vpecontrol();
rn = "VPEControl";
break;
case 2:
- check_mips_mt(env, ctx);
+ check_insn(env, ctx, ASE_MT);
gen_op_mfc0_vpeconf0();
rn = "VPEConf0";
break;
case 3:
- check_mips_mt(env, ctx);
+ check_insn(env, ctx, ASE_MT);
gen_op_mfc0_vpeconf1();
rn = "VPEConf1";
break;
case 4:
- check_mips_mt(env, ctx);
+ check_insn(env, ctx, ASE_MT);
gen_op_dmfc0_yqmask();
rn = "YQMask";
break;
case 5:
- check_mips_mt(env, ctx);
+ check_insn(env, ctx, ASE_MT);
gen_op_dmfc0_vpeschedule();
rn = "VPESchedule";
break;
case 6:
- check_mips_mt(env, ctx);
+ check_insn(env, ctx, ASE_MT);
gen_op_dmfc0_vpeschefback();
rn = "VPEScheFBack";
break;
case 7:
- check_mips_mt(env, ctx);
+ check_insn(env, ctx, ASE_MT);
gen_op_mfc0_vpeopt();
rn = "VPEOpt";
break;
rn = "EntryLo0";
break;
case 1:
- check_mips_mt(env, ctx);
+ check_insn(env, ctx, ASE_MT);
gen_op_mfc0_tcstatus();
rn = "TCStatus";
break;
case 2:
- check_mips_mt(env, ctx);
+ check_insn(env, ctx, ASE_MT);
gen_op_mfc0_tcbind();
rn = "TCBind";
break;
case 3:
- check_mips_mt(env, ctx);
+ check_insn(env, ctx, ASE_MT);
gen_op_dmfc0_tcrestart();
rn = "TCRestart";
break;
case 4:
- check_mips_mt(env, ctx);
+ check_insn(env, ctx, ASE_MT);
gen_op_dmfc0_tchalt();
rn = "TCHalt";
break;
case 5:
- check_mips_mt(env, ctx);
+ check_insn(env, ctx, ASE_MT);
gen_op_dmfc0_tccontext();
rn = "TCContext";
break;
case 6:
- check_mips_mt(env, ctx);
+ check_insn(env, ctx, ASE_MT);
gen_op_dmfc0_tcschedule();
rn = "TCSchedule";
break;
case 7:
- check_mips_mt(env, ctx);
+ check_insn(env, ctx, ASE_MT);
gen_op_dmfc0_tcschefback();
rn = "TCScheFBack";
break;
rn = "Index";
break;
case 1:
- check_mips_mt(env, ctx);
+ check_insn(env, ctx, ASE_MT);
gen_op_mtc0_mvpcontrol();
rn = "MVPControl";
break;
case 2:
- check_mips_mt(env, ctx);
+ check_insn(env, ctx, ASE_MT);
/* ignored */
rn = "MVPConf0";
break;
case 3:
- check_mips_mt(env, ctx);
+ check_insn(env, ctx, ASE_MT);
/* ignored */
rn = "MVPConf1";
break;
rn = "Random";
break;
case 1:
- check_mips_mt(env, ctx);
+ check_insn(env, ctx, ASE_MT);
gen_op_mtc0_vpecontrol();
rn = "VPEControl";
break;
case 2:
- check_mips_mt(env, ctx);
+ check_insn(env, ctx, ASE_MT);
gen_op_mtc0_vpeconf0();
rn = "VPEConf0";
break;
case 3:
- check_mips_mt(env, ctx);
+ check_insn(env, ctx, ASE_MT);
gen_op_mtc0_vpeconf1();
rn = "VPEConf1";
break;
case 4:
- check_mips_mt(env, ctx);
+ check_insn(env, ctx, ASE_MT);
gen_op_mtc0_yqmask();
rn = "YQMask";
break;
case 5:
- check_mips_mt(env, ctx);
+ check_insn(env, ctx, ASE_MT);
gen_op_mtc0_vpeschedule();
rn = "VPESchedule";
break;
case 6:
- check_mips_mt(env, ctx);
+ check_insn(env, ctx, ASE_MT);
gen_op_mtc0_vpeschefback();
rn = "VPEScheFBack";
break;
case 7:
- check_mips_mt(env, ctx);
+ check_insn(env, ctx, ASE_MT);
gen_op_mtc0_vpeopt();
rn = "VPEOpt";
break;
rn = "EntryLo0";
break;
case 1:
- check_mips_mt(env, ctx);
+ check_insn(env, ctx, ASE_MT);
gen_op_mtc0_tcstatus();
rn = "TCStatus";
break;
case 2:
- check_mips_mt(env, ctx);
+ check_insn(env, ctx, ASE_MT);
gen_op_mtc0_tcbind();
rn = "TCBind";
break;
case 3:
- check_mips_mt(env, ctx);
+ check_insn(env, ctx, ASE_MT);
gen_op_mtc0_tcrestart();
rn = "TCRestart";
break;
case 4:
- check_mips_mt(env, ctx);
+ check_insn(env, ctx, ASE_MT);
gen_op_mtc0_tchalt();
rn = "TCHalt";
break;
case 5:
- check_mips_mt(env, ctx);
+ check_insn(env, ctx, ASE_MT);
gen_op_mtc0_tccontext();
rn = "TCContext";
break;
case 6:
- check_mips_mt(env, ctx);
+ check_insn(env, ctx, ASE_MT);
gen_op_mtc0_tcschedule();
rn = "TCSchedule";
break;
case 7:
- check_mips_mt(env, ctx);
+ check_insn(env, ctx, ASE_MT);
gen_op_mtc0_tcschefback();
rn = "TCScheFBack";
break;
#endif
generate_exception(ctx, EXCP_RI);
}
-#endif /* TARGET_MIPSN32 || TARGET_MIPS64 */
+#endif /* TARGET_MIPS64 */
static void gen_mftr(CPUState *env, DisasContext *ctx, int rt,
int u, int sel, int h)
opn = "mfc0";
break;
case OPC_MTC0:
- GEN_LOAD_REG_TN(T0, rt);
+ GEN_LOAD_REG_T0(rt);
save_cpu_state(ctx, 1);
gen_mtc0(env, ctx, rd, ctx->opcode & 0x7);
opn = "mtc0";
break;
-#if defined(TARGET_MIPSN32) || defined(TARGET_MIPS64)
+#if defined(TARGET_MIPS64)
case OPC_DMFC0:
check_insn(env, ctx, ISA_MIPS3);
if (rt == 0) {
break;
case OPC_DMTC0:
check_insn(env, ctx, ISA_MIPS3);
- GEN_LOAD_REG_TN(T0, rt);
+ GEN_LOAD_REG_T0(rt);
save_cpu_state(ctx, 1);
gen_dmtc0(env, ctx, rd, ctx->opcode & 0x7);
opn = "dmtc0";
break;
#endif
case OPC_MFTR:
- check_mips_mt(env, ctx);
+ check_insn(env, ctx, ASE_MT);
if (rd == 0) {
/* Treat as NOP. */
return;
opn = "mftr";
break;
case OPC_MTTR:
- check_mips_mt(env, ctx);
- GEN_LOAD_REG_TN(T0, rt);
+ check_insn(env, ctx, ASE_MT);
+ GEN_LOAD_REG_T0(rt);
gen_mttr(env, ctx, rd, (ctx->opcode >> 5) & 1,
ctx->opcode & 0x7, (ctx->opcode >> 4) & 1);
opn = "mttr";
case OPC_MFC1:
GEN_LOAD_FREG_FTN(WT0, fs);
gen_op_mfc1();
- GEN_STORE_TN_REG(rt, T0);
+ GEN_STORE_T0_REG(rt);
opn = "mfc1";
break;
case OPC_MTC1:
- GEN_LOAD_REG_TN(T0, rt);
+ GEN_LOAD_REG_T0(rt);
gen_op_mtc1();
GEN_STORE_FTN_FREG(fs, WT0);
opn = "mtc1";
break;
case OPC_CFC1:
gen_op_cfc1(fs);
- GEN_STORE_TN_REG(rt, T0);
+ GEN_STORE_T0_REG(rt);
opn = "cfc1";
break;
case OPC_CTC1:
- GEN_LOAD_REG_TN(T0, rt);
+ GEN_LOAD_REG_T0(rt);
gen_op_ctc1(fs);
opn = "ctc1";
break;
case OPC_DMFC1:
GEN_LOAD_FREG_FTN(DT0, fs);
gen_op_dmfc1();
- GEN_STORE_TN_REG(rt, T0);
+ GEN_STORE_T0_REG(rt);
opn = "dmfc1";
break;
case OPC_DMTC1:
- GEN_LOAD_REG_TN(T0, rt);
+ GEN_LOAD_REG_T0(rt);
gen_op_dmtc1();
GEN_STORE_FTN_FREG(fs, DT0);
opn = "dmtc1";
case OPC_MFHC1:
GEN_LOAD_FREG_FTN(WTH0, fs);
gen_op_mfhc1();
- GEN_STORE_TN_REG(rt, T0);
+ GEN_STORE_T0_REG(rt);
opn = "mfhc1";
break;
case OPC_MTHC1:
- GEN_LOAD_REG_TN(T0, rt);
+ GEN_LOAD_REG_T0(rt);
gen_op_mthc1();
GEN_STORE_FTN_FREG(fs, WTH0);
opn = "mthc1";
{
uint32_t ccbit;
- GEN_LOAD_REG_TN(T0, rd);
- GEN_LOAD_REG_TN(T1, rs);
+ GEN_LOAD_REG_T0(rd);
+ GEN_LOAD_REG_T1(rs);
if (cc) {
ccbit = 1 << (24 + cc);
} else
gen_op_movf(ccbit);
else
gen_op_movt(ccbit);
- GEN_STORE_TN_REG(rd, T0);
+ GEN_STORE_T0_REG(rd);
}
#define GEN_MOVCF(fmt) \
opn = "floor.w.s";
break;
case FOP(17, 16):
- GEN_LOAD_REG_TN(T0, ft);
+ GEN_LOAD_REG_T0(ft);
GEN_LOAD_FREG_FTN(WT0, fs);
GEN_LOAD_FREG_FTN(WT2, fd);
gen_movcf_s(ctx, (ft >> 2) & 0x7, ft & 0x1);
opn = "movcf.s";
break;
case FOP(18, 16):
- GEN_LOAD_REG_TN(T0, ft);
+ GEN_LOAD_REG_T0(ft);
GEN_LOAD_FREG_FTN(WT0, fs);
GEN_LOAD_FREG_FTN(WT2, fd);
gen_op_float_movz_s();
opn = "movz.s";
break;
case FOP(19, 16):
- GEN_LOAD_REG_TN(T0, ft);
+ GEN_LOAD_REG_T0(ft);
GEN_LOAD_FREG_FTN(WT0, fs);
GEN_LOAD_FREG_FTN(WT2, fd);
gen_op_float_movn_s();
opn = "movn.s";
break;
case FOP(21, 16):
+ check_cop1x(ctx);
GEN_LOAD_FREG_FTN(WT0, fs);
gen_op_float_recip_s();
GEN_STORE_FTN_FREG(fd, WT2);
opn = "recip.s";
break;
case FOP(22, 16):
+ check_cop1x(ctx);
GEN_LOAD_FREG_FTN(WT0, fs);
gen_op_float_rsqrt_s();
GEN_STORE_FTN_FREG(fd, WT2);
GEN_LOAD_FREG_FTN(WT0, fs);
GEN_LOAD_FREG_FTN(WT1, ft);
if (ctx->opcode & (1 << 6)) {
- check_cp1_64bitmode(ctx);
+ check_cop1x(ctx);
gen_cmpabs_s(func-48, cc);
opn = condnames_abs[func-48];
} else {
opn = "floor.w.d";
break;
case FOP(17, 17):
- GEN_LOAD_REG_TN(T0, ft);
+ GEN_LOAD_REG_T0(ft);
GEN_LOAD_FREG_FTN(DT0, fs);
GEN_LOAD_FREG_FTN(DT2, fd);
gen_movcf_d(ctx, (ft >> 2) & 0x7, ft & 0x1);
opn = "movcf.d";
break;
case FOP(18, 17):
- GEN_LOAD_REG_TN(T0, ft);
+ GEN_LOAD_REG_T0(ft);
GEN_LOAD_FREG_FTN(DT0, fs);
GEN_LOAD_FREG_FTN(DT2, fd);
gen_op_float_movz_d();
opn = "movz.d";
break;
case FOP(19, 17):
- GEN_LOAD_REG_TN(T0, ft);
+ GEN_LOAD_REG_T0(ft);
GEN_LOAD_FREG_FTN(DT0, fs);
GEN_LOAD_FREG_FTN(DT2, fd);
gen_op_float_movn_d();
opn = "movn.d";
break;
case FOP(21, 17):
- check_cp1_registers(ctx, fs | fd);
+ check_cp1_64bitmode(ctx);
GEN_LOAD_FREG_FTN(DT0, fs);
gen_op_float_recip_d();
GEN_STORE_FTN_FREG(fd, DT2);
opn = "recip.d";
break;
case FOP(22, 17):
- check_cp1_registers(ctx, fs | fd);
+ check_cp1_64bitmode(ctx);
GEN_LOAD_FREG_FTN(DT0, fs);
gen_op_float_rsqrt_d();
GEN_STORE_FTN_FREG(fd, DT2);
GEN_LOAD_FREG_FTN(DT0, fs);
GEN_LOAD_FREG_FTN(DT1, ft);
if (ctx->opcode & (1 << 6)) {
- check_cp1_64bitmode(ctx);
+ check_cop1x(ctx);
+ check_cp1_registers(ctx, fs | ft);
gen_cmpabs_d(func-48, cc);
opn = condnames_abs[func-48];
} else {
opn = "cvt.d.l";
break;
case FOP(38, 20):
- case FOP(38, 21):
check_cp1_64bitmode(ctx);
GEN_LOAD_FREG_FTN(WT0, fs);
GEN_LOAD_FREG_FTN(WTH0, fs);
break;
case FOP(17, 22):
check_cp1_64bitmode(ctx);
- GEN_LOAD_REG_TN(T0, ft);
+ GEN_LOAD_REG_T0(ft);
GEN_LOAD_FREG_FTN(WT0, fs);
GEN_LOAD_FREG_FTN(WTH0, fs);
GEN_LOAD_FREG_FTN(WT2, fd);
break;
case FOP(18, 22):
check_cp1_64bitmode(ctx);
- GEN_LOAD_REG_TN(T0, ft);
+ GEN_LOAD_REG_T0(ft);
GEN_LOAD_FREG_FTN(WT0, fs);
GEN_LOAD_FREG_FTN(WTH0, fs);
GEN_LOAD_FREG_FTN(WT2, fd);
break;
case FOP(19, 22):
check_cp1_64bitmode(ctx);
- GEN_LOAD_REG_TN(T0, ft);
+ GEN_LOAD_REG_T0(ft);
GEN_LOAD_FREG_FTN(WT0, fs);
GEN_LOAD_FREG_FTN(WTH0, fs);
GEN_LOAD_FREG_FTN(WT2, fd);
const char *opn = "extended float load/store";
int store = 0;
- /* All of those work only on 64bit FPUs. */
- check_cp1_64bitmode(ctx);
if (base == 0) {
if (index == 0)
gen_op_reset_T0();
else
- GEN_LOAD_REG_TN(T0, index);
+ GEN_LOAD_REG_T0(index);
} else if (index == 0) {
- GEN_LOAD_REG_TN(T0, base);
+ GEN_LOAD_REG_T0(base);
} else {
- GEN_LOAD_REG_TN(T0, base);
- GEN_LOAD_REG_TN(T1, index);
+ GEN_LOAD_REG_T0(base);
+ GEN_LOAD_REG_T1(index);
gen_op_addr_add();
}
/* Don't do NOP if destination is zero: we must perform the actual
memory access. */
switch (opc) {
case OPC_LWXC1:
+ check_cop1x(ctx);
op_ldst(lwc1);
GEN_STORE_FTN_FREG(fd, WT0);
opn = "lwxc1";
break;
case OPC_LDXC1:
+ check_cop1x(ctx);
+ check_cp1_registers(ctx, fd);
op_ldst(ldc1);
GEN_STORE_FTN_FREG(fd, DT0);
opn = "ldxc1";
break;
case OPC_LUXC1:
+ check_cp1_64bitmode(ctx);
op_ldst(luxc1);
GEN_STORE_FTN_FREG(fd, DT0);
opn = "luxc1";
break;
case OPC_SWXC1:
+ check_cop1x(ctx);
GEN_LOAD_FREG_FTN(WT0, fs);
op_ldst(swc1);
opn = "swxc1";
store = 1;
break;
case OPC_SDXC1:
+ check_cop1x(ctx);
+ check_cp1_registers(ctx, fs);
GEN_LOAD_FREG_FTN(DT0, fs);
op_ldst(sdc1);
opn = "sdxc1";
store = 1;
break;
case OPC_SUXC1:
+ check_cp1_64bitmode(ctx);
GEN_LOAD_FREG_FTN(DT0, fs);
op_ldst(suxc1);
opn = "suxc1";
{
const char *opn = "flt3_arith";
- /* All of those work only on 64bit FPUs. */
- check_cp1_64bitmode(ctx);
switch (opc) {
case OPC_ALNV_PS:
- GEN_LOAD_REG_TN(T0, fr);
+ check_cp1_64bitmode(ctx);
+ GEN_LOAD_REG_T0(fr);
GEN_LOAD_FREG_FTN(DT0, fs);
GEN_LOAD_FREG_FTN(DT1, ft);
gen_op_float_alnv_ps();
opn = "alnv.ps";
break;
case OPC_MADD_S:
+ check_cop1x(ctx);
GEN_LOAD_FREG_FTN(WT0, fs);
GEN_LOAD_FREG_FTN(WT1, ft);
GEN_LOAD_FREG_FTN(WT2, fr);
opn = "madd.s";
break;
case OPC_MADD_D:
+ check_cop1x(ctx);
+ check_cp1_registers(ctx, fd | fs | ft | fr);
GEN_LOAD_FREG_FTN(DT0, fs);
GEN_LOAD_FREG_FTN(DT1, ft);
GEN_LOAD_FREG_FTN(DT2, fr);
opn = "madd.d";
break;
case OPC_MADD_PS:
+ check_cp1_64bitmode(ctx);
GEN_LOAD_FREG_FTN(WT0, fs);
GEN_LOAD_FREG_FTN(WTH0, fs);
GEN_LOAD_FREG_FTN(WT1, ft);
opn = "madd.ps";
break;
case OPC_MSUB_S:
+ check_cop1x(ctx);
GEN_LOAD_FREG_FTN(WT0, fs);
GEN_LOAD_FREG_FTN(WT1, ft);
GEN_LOAD_FREG_FTN(WT2, fr);
opn = "msub.s";
break;
case OPC_MSUB_D:
+ check_cop1x(ctx);
+ check_cp1_registers(ctx, fd | fs | ft | fr);
GEN_LOAD_FREG_FTN(DT0, fs);
GEN_LOAD_FREG_FTN(DT1, ft);
GEN_LOAD_FREG_FTN(DT2, fr);
opn = "msub.d";
break;
case OPC_MSUB_PS:
+ check_cp1_64bitmode(ctx);
GEN_LOAD_FREG_FTN(WT0, fs);
GEN_LOAD_FREG_FTN(WTH0, fs);
GEN_LOAD_FREG_FTN(WT1, ft);
opn = "msub.ps";
break;
case OPC_NMADD_S:
+ check_cop1x(ctx);
GEN_LOAD_FREG_FTN(WT0, fs);
GEN_LOAD_FREG_FTN(WT1, ft);
GEN_LOAD_FREG_FTN(WT2, fr);
opn = "nmadd.s";
break;
case OPC_NMADD_D:
+ check_cop1x(ctx);
+ check_cp1_registers(ctx, fd | fs | ft | fr);
GEN_LOAD_FREG_FTN(DT0, fs);
GEN_LOAD_FREG_FTN(DT1, ft);
GEN_LOAD_FREG_FTN(DT2, fr);
opn = "nmadd.d";
break;
case OPC_NMADD_PS:
+ check_cp1_64bitmode(ctx);
GEN_LOAD_FREG_FTN(WT0, fs);
GEN_LOAD_FREG_FTN(WTH0, fs);
GEN_LOAD_FREG_FTN(WT1, ft);
opn = "nmadd.ps";
break;
case OPC_NMSUB_S:
+ check_cop1x(ctx);
GEN_LOAD_FREG_FTN(WT0, fs);
GEN_LOAD_FREG_FTN(WT1, ft);
GEN_LOAD_FREG_FTN(WT2, fr);
opn = "nmsub.s";
break;
case OPC_NMSUB_D:
+ check_cop1x(ctx);
+ check_cp1_registers(ctx, fd | fs | ft | fr);
GEN_LOAD_FREG_FTN(DT0, fs);
GEN_LOAD_FREG_FTN(DT1, ft);
GEN_LOAD_FREG_FTN(DT2, fr);
opn = "nmsub.d";
break;
case OPC_NMSUB_PS:
+ check_cp1_64bitmode(ctx);
GEN_LOAD_FREG_FTN(WT0, fs);
GEN_LOAD_FREG_FTN(WTH0, fs);
GEN_LOAD_FREG_FTN(WT1, ft);
/* MIPS16 extension to MIPS32 */
/* SmartMIPS extension to MIPS32 */
-#if defined(TARGET_MIPSN32) || defined(TARGET_MIPS64)
+#if defined(TARGET_MIPS64)
/* MDMX extension to MIPS64 */
-/* MIPS-3D extension to MIPS64 */
#endif
gen_arith(env, ctx, op1, rd, rs, rt);
break;
case OPC_MULT ... OPC_DIVU:
- gen_muldiv(ctx, op1, rs, rt);
+ if (sa) {
+ check_insn(env, ctx, INSN_VR54XX);
+ op1 = MASK_MUL_VR54XX(ctx->opcode);
+ gen_mul_vr54xx(ctx, op1, rd, rs, rt);
+ } else
+ gen_muldiv(ctx, op1, rs, rt);
break;
case OPC_JR ... OPC_JALR:
gen_compute_branch(ctx, op1, rs, rd, sa);
}
break;
-#if defined(TARGET_MIPSN32) || defined(TARGET_MIPS64)
+#if defined(TARGET_MIPS64)
/* MIPS64 specific opcodes */
case OPC_DSLL:
case OPC_DSRL ... OPC_DSRA:
}
/* Treat as NOP. */
break;
-#if defined(TARGET_MIPSN32) || defined(TARGET_MIPS64)
+#if defined(TARGET_MIPS64)
case OPC_DCLZ ... OPC_DCLO:
check_insn(env, ctx, ISA_MIPS64);
check_mips_64(ctx);
op2 = MASK_BSHFL(ctx->opcode);
switch (op2) {
case OPC_WSBH:
- GEN_LOAD_REG_TN(T1, rt);
+ GEN_LOAD_REG_T1(rt);
gen_op_wsbh();
break;
case OPC_SEB:
- GEN_LOAD_REG_TN(T1, rt);
+ GEN_LOAD_REG_T1(rt);
gen_op_seb();
break;
case OPC_SEH:
- GEN_LOAD_REG_TN(T1, rt);
+ GEN_LOAD_REG_T1(rt);
gen_op_seh();
break;
default: /* Invalid */
generate_exception(ctx, EXCP_RI);
break;
}
- GEN_STORE_TN_REG(rd, T0);
+ GEN_STORE_T0_REG(rd);
break;
case OPC_RDHWR:
check_insn(env, ctx, ISA_MIPS32R2);
generate_exception(ctx, EXCP_RI);
break;
}
- GEN_STORE_TN_REG(rt, T0);
+ GEN_STORE_T0_REG(rt);
break;
case OPC_FORK:
- check_mips_mt(env, ctx);
- GEN_LOAD_REG_TN(T0, rt);
- GEN_LOAD_REG_TN(T1, rs);
+ check_insn(env, ctx, ASE_MT);
+ GEN_LOAD_REG_T0(rt);
+ GEN_LOAD_REG_T1(rs);
gen_op_fork();
break;
case OPC_YIELD:
- check_mips_mt(env, ctx);
- GEN_LOAD_REG_TN(T0, rs);
+ check_insn(env, ctx, ASE_MT);
+ GEN_LOAD_REG_T0(rs);
gen_op_yield();
- GEN_STORE_TN_REG(rd, T0);
+ GEN_STORE_T0_REG(rd);
break;
-#if defined(TARGET_MIPSN32) || defined(TARGET_MIPS64)
+#if defined(TARGET_MIPS64)
case OPC_DEXTM ... OPC_DEXT:
case OPC_DINSM ... OPC_DINS:
check_insn(env, ctx, ISA_MIPS64R2);
op2 = MASK_DBSHFL(ctx->opcode);
switch (op2) {
case OPC_DSBH:
- GEN_LOAD_REG_TN(T1, rt);
+ GEN_LOAD_REG_T1(rt);
gen_op_dsbh();
break;
case OPC_DSHD:
- GEN_LOAD_REG_TN(T1, rt);
+ GEN_LOAD_REG_T1(rt);
gen_op_dshd();
break;
default: /* Invalid */
generate_exception(ctx, EXCP_RI);
break;
}
- GEN_STORE_TN_REG(rd, T0);
+ GEN_STORE_T0_REG(rd);
+ break;
#endif
default: /* Invalid */
MIPS_INVAL("special3");
case OPC_MTC0:
case OPC_MFTR:
case OPC_MTTR:
-#if defined(TARGET_MIPSN32) || defined(TARGET_MIPS64)
+#if defined(TARGET_MIPS64)
case OPC_DMFC0:
case OPC_DMTC0:
#endif
op2 = MASK_MFMC0(ctx->opcode);
switch (op2) {
case OPC_DMT:
- check_mips_mt(env, ctx);
+ check_insn(env, ctx, ASE_MT);
gen_op_dmt();
break;
case OPC_EMT:
- check_mips_mt(env, ctx);
+ check_insn(env, ctx, ASE_MT);
gen_op_emt();
break;
case OPC_DVPE:
- check_mips_mt(env, ctx);
+ check_insn(env, ctx, ASE_MT);
gen_op_dvpe();
break;
case OPC_EVPE:
- check_mips_mt(env, ctx);
+ check_insn(env, ctx, ASE_MT);
gen_op_evpe();
break;
case OPC_DI:
generate_exception(ctx, EXCP_RI);
break;
}
- GEN_STORE_TN_REG(rt, T0);
+ GEN_STORE_T0_REG(rt);
break;
case OPC_RDPGPR:
check_insn(env, ctx, ISA_MIPS32R2);
GEN_LOAD_SRSREG_TN(T0, rt);
- GEN_STORE_TN_REG(rd, T0);
+ GEN_STORE_T0_REG(rd);
break;
case OPC_WRPGPR:
check_insn(env, ctx, ISA_MIPS32R2);
- GEN_LOAD_REG_TN(T0, rt);
+ GEN_LOAD_REG_T0(rt);
GEN_STORE_TN_SRSREG(rd, T0);
break;
default:
case OPC_CTC1:
gen_cp1(ctx, op1, rt, rd);
break;
-#if defined(TARGET_MIPSN32) || defined(TARGET_MIPS64)
+#if defined(TARGET_MIPS64)
case OPC_DMFC1:
case OPC_DMTC1:
check_insn(env, ctx, ISA_MIPS3);
#endif
case OPC_BC1ANY2:
case OPC_BC1ANY4:
- check_cp1_3d(env, ctx);
+ check_cop1x(ctx);
+ check_insn(env, ctx, ASE_MIPS3D);
/* fall through */
case OPC_BC1:
gen_compute_branch1(env, ctx, MASK_BC1(ctx->opcode),
}
break;
-#if defined(TARGET_MIPSN32) || defined(TARGET_MIPS64)
+#if defined(TARGET_MIPS64)
/* MIPS64 opcodes */
case OPC_LWU:
case OPC_LDL ... OPC_LDR:
ctx.hflags = (uint32_t)tb->flags; /* FIXME: maybe use 64 bits here? */
restore_cpu_state(env, &ctx);
#if defined(CONFIG_USER_ONLY)
- ctx.mem_idx = 0;
+ ctx.mem_idx = MIPS_HFLAG_UM;
#else
- ctx.mem_idx = !((ctx.hflags & MIPS_HFLAG_MODE) == MIPS_HFLAG_UM);
+ ctx.mem_idx = ctx.hflags & MIPS_HFLAG_KSU;
#endif
#ifdef DEBUG_DISAS
if (loglevel & CPU_LOG_TB_CPU) {
#endif
#ifdef MIPS_DEBUG_DISAS
if (loglevel & CPU_LOG_TB_IN_ASM)
- fprintf(logfile, "\ntb %p super %d cond %04x\n",
+ fprintf(logfile, "\ntb %p idx %d hflags %04x\n",
tb, ctx.mem_idx, ctx.hflags);
#endif
while (ctx.bstate == BS_NONE && gen_opc_ptr < gen_opc_end) {
}
}
done_generating:
+ ctx.last_T0_store = NULL;
*gen_opc_ptr = INDEX_op_end;
if (search_pc) {
j = gen_opc_ptr - gen_opc_buf;
}
}
-#if (defined(TARGET_MIPSN32) || defined(TARGET_MIPS64)) && defined(MIPS_DEBUG_SIGN_EXTENSIONS)
+#if defined(TARGET_MIPS64) && defined(MIPS_DEBUG_SIGN_EXTENSIONS)
/* Debug help: The architecture requires 32bit code to maintain proper
sign-extened values on 64bit machines. */
if (!SIGN_EXT_P(env->PC[env->current_tc]))
cpu_fprintf(f, "BROKEN: pc=0x" TARGET_FMT_lx "\n", env->PC[env->current_tc]);
- if (!SIGN_EXT_P(env->HI[env->current_tc]))
- cpu_fprintf(f, "BROKEN: HI=0x" TARGET_FMT_lx "\n", env->HI[env->current_tc]);
- if (!SIGN_EXT_P(env->LO[env->current_tc]))
- cpu_fprintf(f, "BROKEN: LO=0x" TARGET_FMT_lx "\n", env->LO[env->current_tc]);
+ if (!SIGN_EXT_P(env->HI[0][env->current_tc]))
+ cpu_fprintf(f, "BROKEN: HI=0x" TARGET_FMT_lx "\n", env->HI[0][env->current_tc]);
+ if (!SIGN_EXT_P(env->LO[0][env->current_tc]))
+ cpu_fprintf(f, "BROKEN: LO=0x" TARGET_FMT_lx "\n", env->LO[0][env->current_tc]);
if (!SIGN_EXT_P(env->btarget))
cpu_fprintf(f, "BROKEN: btarget=0x" TARGET_FMT_lx "\n", env->btarget);
env->CP0_Config0, env->CP0_Config1, env->CP0_LLAddr);
if (env->hflags & MIPS_HFLAG_FPU)
fpu_dump_state(env, f, cpu_fprintf, flags);
-#if (defined(TARGET_MIPSN32) || defined(TARGET_MIPS64)) && defined(MIPS_DEBUG_SIGN_EXTENSIONS)
+#if defined(TARGET_MIPS64) && defined(MIPS_DEBUG_SIGN_EXTENSIONS)
cpu_mips_check_sign_extensions(env, f, cpu_fprintf, flags);
#endif
}
-CPUMIPSState *cpu_mips_init (void)
+#include "translate_init.c"
+
+CPUMIPSState *cpu_mips_init (const char *cpu_model)
{
CPUMIPSState *env;
+ const mips_def_t *def;
+ def = cpu_mips_find_by_name(cpu_model);
+ if (!def)
+ return NULL;
env = qemu_mallocz(sizeof(CPUMIPSState));
if (!env)
return NULL;
+ env->cpu_model = def;
+
cpu_exec_init(env);
+ env->cpu_model_str = cpu_model;
cpu_reset(env);
return env;
}
#else
env->hflags = MIPS_HFLAG_CP0;
#endif
+ cpu_mips_register(env, env->cpu_model);
}
-
-#include "translate_init.c"