/* main execution loop */
-/* thread support */
-
-spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
-
-void cpu_lock(void)
-{
- spin_lock(&global_cpu_lock);
-}
-
-void cpu_unlock(void)
-{
- spin_unlock(&global_cpu_lock);
-}
-
-/* exception support */
-/* NOTE: not static to force relocation generation by GCC */
-void raise_exception_err(int exception_index, int error_code)
-{
- /* NOTE: the register at this point must be saved by hand because
- longjmp restore them */
-#ifdef __sparc__
- /* We have to stay in the same register window as our caller,
- * thus this trick.
- */
- __asm__ __volatile__("restore\n\t"
- "mov\t%o0, %i0");
-#endif
-#ifdef reg_EAX
- env->regs[R_EAX] = EAX;
-#endif
-#ifdef reg_ECX
- env->regs[R_ECX] = ECX;
-#endif
-#ifdef reg_EDX
- env->regs[R_EDX] = EDX;
-#endif
-#ifdef reg_EBX
- env->regs[R_EBX] = EBX;
-#endif
-#ifdef reg_ESP
- env->regs[R_ESP] = ESP;
-#endif
-#ifdef reg_EBP
- env->regs[R_EBP] = EBP;
-#endif
-#ifdef reg_ESI
- env->regs[R_ESI] = ESI;
-#endif
-#ifdef reg_EDI
- env->regs[R_EDI] = EDI;
-#endif
- env->exception_index = exception_index;
- env->error_code = error_code;
- longjmp(env->jmp_env, 1);
-}
-
-/* short cut if error_code is 0 or not present */
-void raise_exception(int exception_index)
-{
- raise_exception_err(exception_index, 0);
-}
-
int cpu_x86_exec(CPUX86State *env1)
{
int saved_T0, saved_T1, saved_A0;
#ifdef reg_EDI
int saved_EDI;
#endif
- int code_gen_size, ret, code_size;
+#ifdef __sparc__
+ int saved_i7, tmp_T0;
+#endif
+ int code_gen_size, ret;
void (*gen_func)(void);
TranslationBlock *tb, **ptb;
uint8_t *tc_ptr, *cs_base, *pc;
unsigned int flags;
-
+
/* first we save global registers */
saved_T0 = T0;
saved_T1 = T1;
saved_EDI = EDI;
EDI = env->regs[R_EDI];
#endif
+#ifdef __sparc__
+ /* we also save i7 because longjmp may not restore it */
+ asm volatile ("mov %%i7, %0" : "=r" (saved_i7));
+#endif
/* put eflags in CPU temporary format */
CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
if (setjmp(env->jmp_env) == 0) {
T0 = 0; /* force lookup of first TB */
for(;;) {
+#ifdef __sparc__
+ /* g1 can be modified by some libc? functions */
+ tmp_T0 = T0;
+#endif
if (env->interrupt_request) {
- raise_exception(EXCP_INTERRUPT);
+ env->exception_index = EXCP_INTERRUPT;
+ cpu_loop_exit();
}
#ifdef DEBUG_EXEC
if (loglevel) {
}
tc_ptr = code_gen_ptr;
tb->tc_ptr = tc_ptr;
- ret = cpu_x86_gen_code(code_gen_ptr, CODE_GEN_MAX_SIZE,
- &code_gen_size, pc, cs_base, flags,
- &code_size, tb);
+ tb->cs_base = (unsigned long)cs_base;
+ tb->flags = flags;
+ ret = cpu_x86_gen_code(tb, CODE_GEN_MAX_SIZE, &code_gen_size);
/* if invalid instruction, signal it */
if (ret != 0) {
/* NOTE: the tb is allocated but not linked, so we
raise_exception(EXCP06_ILLOP);
}
*ptb = tb;
- tb->size = code_size;
- tb->cs_base = (unsigned long)cs_base;
- tb->flags = flags;
tb->hash_next = NULL;
tb_link(tb);
code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
lookup_symbol((void *)tb->pc));
}
#endif
+#ifdef __sparc__
+ T0 = tmp_T0;
+#endif
/* see if we can patch the calling TB */
if (T0 != 0 && !(env->eflags & TF_MASK)) {
spin_lock(&tb_lock);
tb_add_jump((TranslationBlock *)(T0 & ~3), T0 & 3, tb);
spin_unlock(&tb_lock);
}
-
tc_ptr = tb->tc_ptr;
/* execute the generated code */
gen_func = (void *)tc_ptr;
-#ifdef __sparc__
+#if defined(__sparc__)
__asm__ __volatile__("call %0\n\t"
- " mov %%o7,%%i0"
+ "mov %%o7,%%i0"
: /* no outputs */
: "r" (gen_func)
: "i0", "i1", "i2", "i3", "i4", "i5");
+#elif defined(__arm__)
+ asm volatile ("mov pc, %0\n\t"
+ ".global exec_loop\n\t"
+ "exec_loop:\n\t"
+ : /* no outputs */
+ : "r" (gen_func)
+ : "r1", "r2", "r3", "r8", "r9", "r10", "r12", "r14");
#else
gen_func();
#endif
#ifdef reg_EDI
EDI = saved_EDI;
#endif
+#ifdef __sparc__
+ asm volatile ("mov %0, %%i7" : : "r" (saved_i7));
+#endif
T0 = saved_T0;
T1 = saved_T1;
A0 = saved_A0;
saved_env = env;
env = s;
- load_seg(seg_reg, selector);
+ if (env->eflags & VM_MASK) {
+ SegmentCache *sc;
+ selector &= 0xffff;
+ sc = &env->seg_cache[seg_reg];
+ /* NOTE: in VM86 mode, limit and seg_32bit are never reloaded,
+ so we must load them here */
+ sc->base = (void *)(selector << 4);
+ sc->limit = 0xffff;
+ sc->seg_32bit = 0;
+ env->segs[seg_reg] = selector;
+ } else {
+ load_seg(seg_reg, selector, 0);
+ }
+ env = saved_env;
+}
+
+void cpu_x86_fsave(CPUX86State *s, uint8_t *ptr, int data32)
+{
+ CPUX86State *saved_env;
+
+ saved_env = env;
+ env = s;
+
+ helper_fsave(ptr, data32);
+
+ env = saved_env;
+}
+
+void cpu_x86_frstor(CPUX86State *s, uint8_t *ptr, int data32)
+{
+ CPUX86State *saved_env;
+
+ saved_env = env;
+ env = s;
+
+ helper_frstor(ptr, data32);
+
env = saved_env;
}
static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
int is_write, sigset_t *old_set)
{
+ TranslationBlock *tb;
+ int ret;
+ uint32_t found_pc;
+
#if defined(DEBUG_SIGNAL)
printf("qemu: SIGSEGV pc=0x%08lx address=%08lx wr=%d oldset=0x%08lx\n",
pc, address, is_write, *(unsigned long *)old_set);
if (is_write && page_unprotect(address)) {
return 1;
}
- if (pc >= (unsigned long)code_gen_buffer &&
- pc < (unsigned long)code_gen_buffer + CODE_GEN_BUFFER_SIZE) {
+ tb = tb_find_pc(pc);
+ if (tb) {
/* the PC is inside the translated code. It means that we have
a virtual CPU fault */
+ ret = cpu_x86_search_pc(tb, &found_pc, pc);
+ if (ret < 0)
+ return 0;
+ env->eip = found_pc - tb->cs_base;
+ env->cr2 = address;
/* we restore the process signal mask as the sigreturn should
- do it */
+ do it (XXX: use sigsetjmp) */
sigprocmask(SIG_SETMASK, old_set, NULL);
- /* XXX: need to compute virtual pc position by retranslating
- code. The rest of the CPU state should be correct. */
- env->cr2 = address;
raise_exception_err(EXCP0E_PAGE, 4 | (is_write << 1));
/* never comes here */
return 1;
is_write, &uc->uc_sigmask);
}
+#elif defined(__alpha__)
+
+int cpu_x86_signal_handler(int host_signum, struct siginfo *info,
+ void *puc)
+{
+ struct ucontext *uc = puc;
+ uint32_t *pc = uc->uc_mcontext.sc_pc;
+ uint32_t insn = *pc;
+ int is_write = 0;
+
+ /* XXX: need kernel patch to get write flag faster */
+ switch (insn >> 26) {
+ case 0x0d: // stw
+ case 0x0e: // stb
+ case 0x0f: // stq_u
+ case 0x24: // stf
+ case 0x25: // stg
+ case 0x26: // sts
+ case 0x27: // stt
+ case 0x2c: // stl
+ case 0x2d: // stq
+ case 0x2e: // stl_c
+ case 0x2f: // stq_c
+ is_write = 1;
+ }
+
+ return handle_cpu_signal(pc, (unsigned long)info->si_addr,
+ is_write, &uc->uc_sigmask);
+}
+#elif defined(__sparc__)
+
+int cpu_x86_signal_handler(int host_signum, struct siginfo *info,
+ void *puc)
+{
+ uint32_t *regs = (uint32_t *)(info + 1);
+ void *sigmask = (regs + 20);
+ unsigned long pc;
+ int is_write;
+ uint32_t insn;
+
+ /* XXX: is there a standard glibc define ? */
+ pc = regs[1];
+ /* XXX: need kernel patch to get write flag faster */
+ is_write = 0;
+ insn = *(uint32_t *)pc;
+ if ((insn >> 30) == 3) {
+ switch((insn >> 19) & 0x3f) {
+ case 0x05: // stb
+ case 0x06: // sth
+ case 0x04: // st
+ case 0x07: // std
+ case 0x24: // stf
+ case 0x27: // stdf
+ case 0x25: // stfsr
+ is_write = 1;
+ break;
+ }
+ }
+ return handle_cpu_signal(pc, (unsigned long)info->si_addr,
+ is_write, sigmask);
+}
+
+#elif defined(__arm__)
+
+int cpu_x86_signal_handler(int host_signum, struct siginfo *info,
+ void *puc)
+{
+ struct ucontext *uc = puc;
+ unsigned long pc;
+ int is_write;
+
+ pc = uc->uc_mcontext.gregs[R15];
+ /* XXX: compute is_write */
+ is_write = 0;
+ return handle_cpu_signal(pc, (unsigned long)info->si_addr,
+ is_write,
+ &uc->uc_sigmask);
+}
+
#else
#error CPU specific signal handler needed