#include <inttypes.h>
#include <signal.h>
#include <assert.h>
+#include <sys/mman.h>
#include "cpu-i386.h"
#include "exec.h"
#include "disas.h"
+//#define DEBUG_MMU
+
/* XXX: move that elsewhere */
static uint16_t *gen_opc_ptr;
static uint32_t *gen_opparam_ptr;
int iopl;
int tf; /* TF cpu flag */
struct TranslationBlock *tb;
+ int popl_esp_hack; /* for correct popl with esp base handling */
} DisasContext;
/* i386 arith/logic operations */
};
static GenOpFunc *gen_op_arith_T0_T1_cc[8] = {
- gen_op_addl_T0_T1_cc,
- gen_op_orl_T0_T1_cc,
NULL,
+ gen_op_orl_T0_T1,
+ NULL,
+ NULL,
+ gen_op_andl_T0_T1,
+ NULL,
+ gen_op_xorl_T0_T1,
NULL,
- gen_op_andl_T0_T1_cc,
- gen_op_subl_T0_T1_cc,
- gen_op_xorl_T0_T1_cc,
- gen_op_cmpl_T0_T1_cc,
};
static GenOpFunc *gen_op_arithc_T0_T1_cc[3][2] = {
gen_op_fdiv_STN_ST0,
};
-static void gen_op(DisasContext *s1, int op, int ot, int d, int s)
+/* if d == OR_TMP0, it means memory operand (address in A0) */
+static void gen_op(DisasContext *s1, int op, int ot, int d)
{
- if (d != OR_TMP0)
+ GenOpFunc *gen_update_cc;
+
+ if (d != OR_TMP0) {
gen_op_mov_TN_reg[ot][0][d]();
- if (s != OR_TMP1)
- gen_op_mov_TN_reg[ot][1][s]();
- if (op == OP_ADCL || op == OP_SBBL) {
+ } else {
+ gen_op_ld_T0_A0[ot]();
+ }
+ switch(op) {
+ case OP_ADCL:
+ case OP_SBBL:
if (s1->cc_op != CC_OP_DYNAMIC)
gen_op_set_cc_op(s1->cc_op);
gen_op_arithc_T0_T1_cc[ot][op - OP_ADCL]();
s1->cc_op = CC_OP_DYNAMIC;
- } else {
+ /* XXX: incorrect: CC_OP must also be modified AFTER memory access */
+ gen_update_cc = gen_op_update2_cc;
+ break;
+ case OP_ADDL:
+ gen_op_addl_T0_T1();
+ s1->cc_op = CC_OP_ADDB + ot;
+ gen_update_cc = gen_op_update2_cc;
+ break;
+ case OP_SUBL:
+ gen_op_subl_T0_T1();
+ s1->cc_op = CC_OP_SUBB + ot;
+ gen_update_cc = gen_op_update2_cc;
+ break;
+ default:
+ case OP_ANDL:
+ case OP_ORL:
+ case OP_XORL:
gen_op_arith_T0_T1_cc[op]();
- s1->cc_op = cc_op_arithb[op] + ot;
+ s1->cc_op = CC_OP_LOGICB + ot;
+ gen_update_cc = gen_op_update1_cc;
+ break;
+ case OP_CMPL:
+ gen_op_cmpl_T0_T1_cc();
+ s1->cc_op = CC_OP_SUBB + ot;
+ gen_update_cc = NULL;
+ break;
}
- if (d != OR_TMP0 && op != OP_CMPL)
- gen_op_mov_reg_T0[ot][d]();
-}
-
-static void gen_opi(DisasContext *s1, int op, int ot, int d, int c)
-{
- gen_op_movl_T1_im(c);
- gen_op(s1, op, ot, d, OR_TMP1);
+ if (op != OP_CMPL) {
+ if (d != OR_TMP0)
+ gen_op_mov_reg_T0[ot][d]();
+ else
+ gen_op_st_T0_A0[ot]();
+ }
+ /* the flags update must happen after the memory write (precise
+ exception support) */
+ if (gen_update_cc)
+ gen_update_cc();
}
+/* if d == OR_TMP0, it means memory operand (address in A0) */
static void gen_inc(DisasContext *s1, int ot, int d, int c)
{
if (d != OR_TMP0)
gen_op_mov_TN_reg[ot][0][d]();
+ else
+ gen_op_ld_T0_A0[ot]();
if (s1->cc_op != CC_OP_DYNAMIC)
gen_op_set_cc_op(s1->cc_op);
if (c > 0) {
- gen_op_incl_T0_cc();
+ gen_op_incl_T0();
s1->cc_op = CC_OP_INCB + ot;
} else {
- gen_op_decl_T0_cc();
+ gen_op_decl_T0();
s1->cc_op = CC_OP_DECB + ot;
}
if (d != OR_TMP0)
gen_op_mov_reg_T0[ot][d]();
+ else
+ gen_op_st_T0_A0[ot]();
+ gen_op_update_inc_cc();
}
static void gen_shift(DisasContext *s1, int op, int ot, int d, int s)
}
if (base >= 0) {
+ /* for correct popl handling with esp */
+ if (base == 4 && s->popl_esp_hack)
+ disp += 4;
gen_op_movl_A0_reg[base]();
if (disp != 0)
gen_op_addl_A0_im(disp);
} else {
gen_op_movl_A0_im(disp);
}
+ /* XXX: index == 4 is always invalid */
if (havesib && (index != 4 || scale != 0)) {
gen_op_addl_A0_reg_sN[scale][index]();
}
rm = modrm & 7;
if (mod != 3) {
gen_lea_modrm(s, modrm, ®_addr, &offset_addr);
- gen_op_ld_T0_A0[ot]();
opreg = OR_TMP0;
} else {
opreg = OR_EAX + rm;
}
- gen_op(s, op, ot, opreg, reg);
- if (mod != 3 && op != 7) {
- gen_op_st_T0_A0[ot]();
- }
+ gen_op_mov_TN_reg[ot][1][reg]();
+ gen_op(s, op, ot, opreg);
break;
case 1: /* OP Gv, Ev */
modrm = ldub(s->pc++);
if (mod != 3) {
gen_lea_modrm(s, modrm, ®_addr, &offset_addr);
gen_op_ld_T1_A0[ot]();
- opreg = OR_TMP1;
} else {
- opreg = OR_EAX + rm;
+ gen_op_mov_TN_reg[ot][1][rm]();
}
- gen_op(s, op, ot, reg, opreg);
+ gen_op(s, op, ot, reg);
break;
case 2: /* OP A, Iv */
val = insn_get(s, ot);
- gen_opi(s, op, ot, OR_EAX, val);
+ gen_op_movl_T1_im(val);
+ gen_op(s, op, ot, OR_EAX);
break;
}
}
if (mod != 3) {
gen_lea_modrm(s, modrm, ®_addr, &offset_addr);
- gen_op_ld_T0_A0[ot]();
opreg = OR_TMP0;
} else {
opreg = rm + OR_EAX;
val = (int8_t)insn_get(s, OT_BYTE);
break;
}
-
- gen_opi(s, op, ot, opreg, val);
- if (op != 7 && mod != 3) {
- gen_op_st_T0_A0[ot]();
- }
+ gen_op_movl_T1_im(val);
+ gen_op(s, op, ot, opreg);
}
break;
}
break;
case 3: /* neg */
- gen_op_negl_T0_cc();
+ gen_op_negl_T0();
if (mod != 3) {
gen_op_st_T0_A0[ot]();
} else {
gen_op_mov_reg_T0[ot][rm]();
}
+ gen_op_update_neg_cc();
s->cc_op = CC_OP_SUBB + ot;
break;
case 4: /* mul */
}
if (mod != 3) {
gen_lea_modrm(s, modrm, ®_addr, &offset_addr);
- if (op != 3 && op != 5)
+ if (op >= 2 && op != 3 && op != 5)
gen_op_ld_T0_A0[ot]();
} else {
gen_op_mov_TN_reg[ot][0][rm]();
switch(op) {
case 0: /* inc Ev */
- gen_inc(s, ot, OR_TMP0, 1);
if (mod != 3)
- gen_op_st_T0_A0[ot]();
+ opreg = OR_TMP0;
else
- gen_op_mov_reg_T0[ot][rm]();
+ opreg = rm;
+ gen_inc(s, ot, opreg, 1);
break;
case 1: /* dec Ev */
- gen_inc(s, ot, OR_TMP0, -1);
if (mod != 3)
- gen_op_st_T0_A0[ot]();
+ opreg = OR_TMP0;
else
- gen_op_mov_reg_T0[ot][rm]();
+ opreg = rm;
+ gen_inc(s, ot, opreg, -1);
break;
case 2: /* call Ev */
/* XXX: optimize if memory (no and is necessary) */
rm = modrm & 7;
gen_op_mov_TN_reg[ot][0][reg]();
gen_op_mov_TN_reg[ot][1][rm]();
- gen_op_addl_T0_T1_cc();
+ gen_op_addl_T0_T1();
gen_op_mov_reg_T0[ot][rm]();
gen_op_mov_reg_T1[ot][reg]();
} else {
gen_lea_modrm(s, modrm, ®_addr, &offset_addr);
gen_op_mov_TN_reg[ot][0][reg]();
gen_op_ld_T1_A0[ot]();
- gen_op_addl_T0_T1_cc();
+ gen_op_addl_T0_T1();
gen_op_st_T0_A0[ot]();
gen_op_mov_reg_T1[ot][reg]();
}
+ gen_op_update2_cc();
s->cc_op = CC_OP_ADDB + ot;
break;
case 0x1b0:
ot = dflag ? OT_LONG : OT_WORD;
modrm = ldub(s->pc++);
gen_pop_T0(s);
+ s->popl_esp_hack = 1;
gen_ldst_modrm(s, modrm, ot, OR_TMP0, 1);
+ s->popl_esp_hack = 0;
gen_pop_update(s);
break;
case 0xc8: /* enter */
if (s->vm86 && s->iopl != 3) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
} else {
- /* XXX: not restartable */
- gen_stack_A0(s);
- /* pop offset */
- gen_op_ld_T0_A0[1 + s->dflag]();
- if (s->dflag == 0)
- gen_op_andl_T0_ffff();
- /* NOTE: keeping EIP updated is not a problem in case of
- exception */
- gen_op_jmp_T0();
- /* pop selector */
- gen_op_addl_A0_im(2 << s->dflag);
- gen_op_ld_T0_A0[1 + s->dflag]();
- /* pop eflags */
- gen_op_addl_A0_im(2 << s->dflag);
- gen_op_ld_T1_A0[1 + s->dflag]();
- gen_movl_seg_T0(s, R_CS, pc_start - s->cs_base);
- gen_op_movl_T0_T1();
- if (s->dflag) {
- gen_op_movl_eflags_T0();
- } else {
- gen_op_movw_eflags_T0();
- }
- gen_stack_update(s, (6 << s->dflag));
+ if (s->cc_op != CC_OP_DYNAMIC)
+ gen_op_set_cc_op(s->cc_op);
+ gen_op_jmp_im(pc_start - s->cs_base);
+ gen_op_iret_protected(s->dflag);
s->cc_op = CC_OP_EFLAGS;
}
s->is_jmp = 1;
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
} else {
gen_pop_T0(s);
- if (s->dflag) {
- gen_op_movl_eflags_T0();
+ if (s->cpl == 0) {
+ if (s->dflag) {
+ gen_op_movl_eflags_T0_cpl0();
+ } else {
+ gen_op_movw_eflags_T0_cpl0();
+ }
} else {
- gen_op_movw_eflags_T0();
+ if (s->dflag) {
+ gen_op_movl_eflags_T0();
+ } else {
+ gen_op_movw_eflags_T0();
+ }
}
gen_pop_update(s);
s->cc_op = CC_OP_EFLAGS;
gen_op_cpuid();
break;
case 0xf4: /* hlt */
- /* XXX: if cpl == 0, then should do something else */
- gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
+ if (s->cpl != 0) {
+ gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
+ } else {
+ if (s->cc_op != CC_OP_DYNAMIC)
+ gen_op_set_cc_op(s->cc_op);
+ gen_op_jmp_im(s->pc - s->cs_base);
+ gen_op_hlt();
+ s->is_jmp = 1;
+ }
break;
case 0x100:
modrm = ldub(s->pc++);
gen_op_lmsw_T0();
}
break;
+ case 7: /* invlpg */
+ if (s->cpl != 0) {
+ gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
+ } else {
+ if (mod == 3)
+ goto illegal_op;
+ gen_lea_modrm(s, modrm, ®_addr, &offset_addr);
+ gen_op_invlpg_A0();
+ }
+ break;
default:
goto illegal_op;
}
[INDEX_op_sbbl_T0_T1_cc] = CC_C,
/* subtle: due to the incl/decl implementation, C is used */
- [INDEX_op_incl_T0_cc] = CC_C,
- [INDEX_op_decl_T0_cc] = CC_C,
+ [INDEX_op_update_inc_cc] = CC_C,
[INDEX_op_into] = CC_O,
/* flags written by an operation */
static uint16_t opc_write_flags[NB_OPS] = {
- [INDEX_op_addl_T0_T1_cc] = CC_OSZAPC,
- [INDEX_op_orl_T0_T1_cc] = CC_OSZAPC,
+ [INDEX_op_update2_cc] = CC_OSZAPC,
+ [INDEX_op_update1_cc] = CC_OSZAPC,
[INDEX_op_adcb_T0_T1_cc] = CC_OSZAPC,
[INDEX_op_adcw_T0_T1_cc] = CC_OSZAPC,
[INDEX_op_adcl_T0_T1_cc] = CC_OSZAPC,
[INDEX_op_sbbb_T0_T1_cc] = CC_OSZAPC,
[INDEX_op_sbbw_T0_T1_cc] = CC_OSZAPC,
[INDEX_op_sbbl_T0_T1_cc] = CC_OSZAPC,
- [INDEX_op_andl_T0_T1_cc] = CC_OSZAPC,
- [INDEX_op_subl_T0_T1_cc] = CC_OSZAPC,
- [INDEX_op_xorl_T0_T1_cc] = CC_OSZAPC,
[INDEX_op_cmpl_T0_T1_cc] = CC_OSZAPC,
- [INDEX_op_negl_T0_cc] = CC_OSZAPC,
+ [INDEX_op_update_neg_cc] = CC_OSZAPC,
/* subtle: due to the incl/decl implementation, C is used */
- [INDEX_op_incl_T0_cc] = CC_OSZAPC,
- [INDEX_op_decl_T0_cc] = CC_OSZAPC,
+ [INDEX_op_update_inc_cc] = CC_OSZAPC,
[INDEX_op_testl_T0_T1_cc] = CC_OSZAPC,
[INDEX_op_mulb_AL_T0] = CC_OSZAPC,
/* simpler form of an operation if no flags need to be generated */
static uint16_t opc_simpler[NB_OPS] = {
- [INDEX_op_addl_T0_T1_cc] = INDEX_op_addl_T0_T1,
- [INDEX_op_orl_T0_T1_cc] = INDEX_op_orl_T0_T1,
- [INDEX_op_andl_T0_T1_cc] = INDEX_op_andl_T0_T1,
- [INDEX_op_subl_T0_T1_cc] = INDEX_op_subl_T0_T1,
- [INDEX_op_xorl_T0_T1_cc] = INDEX_op_xorl_T0_T1,
- [INDEX_op_negl_T0_cc] = INDEX_op_negl_T0,
- [INDEX_op_incl_T0_cc] = INDEX_op_incl_T0,
- [INDEX_op_decl_T0_cc] = INDEX_op_decl_T0,
+ [INDEX_op_update2_cc] = INDEX_op_nop,
+ [INDEX_op_update1_cc] = INDEX_op_nop,
+ [INDEX_op_update_neg_cc] = INDEX_op_nop,
+ [INDEX_op_update_inc_cc] = INDEX_op_nop,
[INDEX_op_rolb_T0_T1_cc] = INDEX_op_rolb_T0_T1,
[INDEX_op_rolw_T0_T1_cc] = INDEX_op_rolw_T0_T1,
/* generate intermediate code in gen_opc_buf and gen_opparam_buf for
basic block 'tb'. If search_pc is TRUE, also generate PC
information for each intermediate instruction. */
-int gen_intermediate_code(TranslationBlock *tb, int search_pc)
+static inline int gen_intermediate_code_internal(TranslationBlock *tb, int search_pc)
{
DisasContext dc1, *dc = &dc1;
uint8_t *pc_ptr;
dc->cc_op = CC_OP_DYNAMIC;
dc->cs_base = cs_base;
dc->tb = tb;
-
+ dc->popl_esp_hack = 0;
+
gen_opc_ptr = gen_opc_buf;
gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
gen_opparam_ptr = gen_opparam_buf;
while (lj < j)
gen_opc_instr_start[lj++] = 0;
gen_opc_pc[lj] = (uint32_t)pc_ptr;
+ gen_opc_cc_op[lj] = dc->cc_op;
gen_opc_instr_start[lj] = 1;
}
}
return 0;
}
+int gen_intermediate_code(TranslationBlock *tb)
+{
+ return gen_intermediate_code_internal(tb, 0);
+}
+
+int gen_intermediate_code_pc(TranslationBlock *tb)
+{
+ return gen_intermediate_code_internal(tb, 1);
+}
+
CPUX86State *cpu_x86_init(void)
{
CPUX86State *env;
free(env);
}
+/***********************************************************/
+/* x86 mmu */
+/* XXX: add PGE support */
+
+/* called when cr3 or PG bit are modified */
+static int last_pg_state = -1;
+int phys_ram_size;
+int phys_ram_fd;
+uint8_t *phys_ram_base;
+
+void cpu_x86_update_cr0(CPUX86State *env)
+{
+ int pg_state;
+ void *map_addr;
+
+#ifdef DEBUG_MMU
+ printf("CR0 update: CR0=0x%08x\n", env->cr[0]);
+#endif
+ pg_state = env->cr[0] & CR0_PG_MASK;
+ if (pg_state != last_pg_state) {
+ if (!pg_state) {
+ /* we map the physical memory at address 0 */
+
+ map_addr = mmap((void *)0, phys_ram_size, PROT_WRITE | PROT_READ,
+ MAP_SHARED | MAP_FIXED, phys_ram_fd, 0);
+ if (map_addr == MAP_FAILED) {
+ fprintf(stderr,
+ "Could not map physical memory at host address 0x%08x\n",
+ 0);
+ exit(1);
+ }
+ page_set_flags(0, phys_ram_size,
+ PAGE_VALID | PAGE_READ | PAGE_WRITE | PAGE_EXEC);
+ } else {
+ /* we unmap the physical memory */
+ munmap((void *)0, phys_ram_size);
+ page_set_flags(0, phys_ram_size, 0);
+ }
+ last_pg_state = pg_state;
+ }
+}
+
+void cpu_x86_update_cr3(CPUX86State *env)
+{
+ if (env->cr[0] & CR0_PG_MASK) {
+#ifdef DEBUG_MMU
+ printf("CR3 update: CR3=%08x\n", env->cr[3]);
+#endif
+ page_unmap();
+ }
+}
+
+void cpu_x86_init_mmu(CPUX86State *env)
+{
+ last_pg_state = -1;
+ cpu_x86_update_cr0(env);
+}
+
+/* XXX: also flush 4MB pages */
+void cpu_x86_flush_tlb(CPUX86State *env, uint32_t addr)
+{
+ int flags;
+ unsigned long virt_addr;
+
+ flags = page_get_flags(addr);
+ if (flags & PAGE_VALID) {
+ virt_addr = addr & ~0xfff;
+ munmap((void *)virt_addr, 4096);
+ page_set_flags(virt_addr, virt_addr + 4096, 0);
+ }
+}
+
+/* return value:
+ -1 = cannot handle fault
+ 0 = nothing more to do
+ 1 = generate PF fault
+*/
+int cpu_x86_handle_mmu_fault(CPUX86State *env, uint32_t addr, int is_write)
+{
+ uint8_t *pde_ptr, *pte_ptr;
+ uint32_t pde, pte, virt_addr;
+ int cpl, error_code, is_dirty, is_user, prot, page_size;
+ void *map_addr;
+
+ cpl = env->segs[R_CS].selector & 3;
+ is_user = (cpl == 3);
+
+#ifdef DEBUG_MMU
+ printf("MMU fault: addr=0x%08x w=%d u=%d eip=%08x\n",
+ addr, is_write, is_user, env->eip);
+#endif
+
+ if (env->user_mode_only) {
+ /* user mode only emulation */
+ error_code = 0;
+ goto do_fault;
+ }
+
+ if (!(env->cr[0] & CR0_PG_MASK))
+ return -1;
+
+ /* page directory entry */
+ pde_ptr = phys_ram_base + ((env->cr[3] & ~0xfff) + ((addr >> 20) & ~3));
+ pde = ldl(pde_ptr);
+ if (!(pde & PG_PRESENT_MASK)) {
+ error_code = 0;
+ goto do_fault;
+ }
+ if (is_user) {
+ if (!(pde & PG_USER_MASK))
+ goto do_fault_protect;
+ if (is_write && !(pde & PG_RW_MASK))
+ goto do_fault_protect;
+ } else {
+ if ((env->cr[0] & CR0_WP_MASK) && (pde & PG_USER_MASK) &&
+ is_write && !(pde & PG_RW_MASK))
+ goto do_fault_protect;
+ }
+ /* if PSE bit is set, then we use a 4MB page */
+ if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
+ is_dirty = is_write && !(pde & PG_DIRTY_MASK);
+ if (!(pde & PG_ACCESSED_MASK)) {
+ pde |= PG_ACCESSED_MASK;
+ if (is_dirty)
+ pde |= PG_DIRTY_MASK;
+ stl(pde_ptr, pde);
+ }
+
+ pte = pde & ~0x003ff000; /* align to 4MB */
+ page_size = 4096 * 1024;
+ virt_addr = addr & ~0x003fffff;
+ } else {
+ if (!(pde & PG_ACCESSED_MASK)) {
+ pde |= PG_ACCESSED_MASK;
+ stl(pde_ptr, pde);
+ }
+
+ /* page directory entry */
+ pte_ptr = phys_ram_base + ((pde & ~0xfff) + ((addr >> 10) & 0xffc));
+ pte = ldl(pte_ptr);
+ if (!(pte & PG_PRESENT_MASK)) {
+ error_code = 0;
+ goto do_fault;
+ }
+ if (is_user) {
+ if (!(pte & PG_USER_MASK))
+ goto do_fault_protect;
+ if (is_write && !(pte & PG_RW_MASK))
+ goto do_fault_protect;
+ } else {
+ if ((env->cr[0] & CR0_WP_MASK) && (pte & PG_USER_MASK) &&
+ is_write && !(pte & PG_RW_MASK))
+ goto do_fault_protect;
+ }
+ is_dirty = is_write && !(pte & PG_DIRTY_MASK);
+ if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
+ pte |= PG_ACCESSED_MASK;
+ if (is_dirty)
+ pte |= PG_DIRTY_MASK;
+ stl(pte_ptr, pte);
+ }
+ page_size = 4096;
+ virt_addr = addr & ~0xfff;
+ }
+ /* the page can be put in the TLB */
+ prot = PROT_READ;
+ if (is_user) {
+ if (pte & PG_RW_MASK)
+ prot |= PROT_WRITE;
+ } else {
+ if (!(env->cr[0] & CR0_WP_MASK) || !(pte & PG_USER_MASK) ||
+ (pte & PG_RW_MASK))
+ prot |= PROT_WRITE;
+ }
+ map_addr = mmap((void *)virt_addr, page_size, prot,
+ MAP_SHARED | MAP_FIXED, phys_ram_fd, pte & ~0xfff);
+ if (map_addr == MAP_FAILED) {
+ fprintf(stderr,
+ "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
+ pte & ~0xfff, virt_addr);
+ exit(1);
+ }
+ page_set_flags(virt_addr, virt_addr + page_size,
+ PAGE_VALID | PAGE_EXEC | prot);
+#ifdef DEBUG_MMU
+ printf("mmaping 0x%08x to virt 0x%08x pse=%d\n",
+ pte & ~0xfff, virt_addr, (page_size != 4096));
+#endif
+ return 0;
+ do_fault_protect:
+ error_code = PG_ERROR_P_MASK;
+ do_fault:
+ env->cr[2] = addr;
+ env->error_code = (is_write << PG_ERROR_W_BIT) | error_code;
+ if (is_user)
+ env->error_code |= PG_ERROR_U_MASK;
+ return 1;
+}
+
+/***********************************************************/
+/* x86 debug */
+
static const char *cc_op_str[] = {
"DYNAMIC",
"EFLAGS",