X-Git-Url: http://vcs.maemo.org/git/?a=blobdiff_plain;f=cpu-exec.c;h=44a0f73a58c56f81f64c3783b0be2a8d143467e1;hb=e98a6e40a9d56e16e52a4a839eaa698b658b94e0;hp=1ffeb8e86cd8916c726d6d75b130b385ad27d9c0;hpb=a412ac572ffad45f663795ba7dfa8fa1603ef206;p=qemu diff --git a/cpu-exec.c b/cpu-exec.c index 1ffeb8e..44a0f73 100644 --- a/cpu-exec.c +++ b/cpu-exec.c @@ -18,19 +18,15 @@ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include "config.h" -#ifdef TARGET_I386 -#include "exec-i386.h" -#endif -#ifdef TARGET_ARM -#include "exec-arm.h" -#endif - +#include "exec.h" #include "disas.h" +int tb_invalidated_flag; + //#define DEBUG_EXEC //#define DEBUG_SIGNAL -#if defined(TARGET_ARM) +#if defined(TARGET_ARM) || defined(TARGET_SPARC) /* XXX: unify with i386 target */ void cpu_loop_exit(void) { @@ -136,6 +132,8 @@ int cpu_exec(CPUState *env1) env->VF = (psr << 3) & 0x80000000; env->cpsr = psr & ~0xf0000000; } +#elif defined(TARGET_SPARC) +#elif defined(TARGET_PPC) #else #error unsupported target CPU #endif @@ -144,6 +142,7 @@ int cpu_exec(CPUState *env1) /* prepare setjmp context for exception handling */ for(;;) { if (setjmp(env->jmp_env) == 0) { + env->current_tb = NULL; /* if an exception is pending, we execute it here */ if (env->exception_index >= 0) { if (env->exception_index >= EXCP_INTERRUPT) { @@ -170,7 +169,9 @@ int cpu_exec(CPUState *env1) do_interrupt(env->exception_index, env->exception_is_int, env->error_code, - env->exception_next_eip); + env->exception_next_eip, 0); +#elif defined(TARGET_PPC) + do_interrupt(env); #endif } env->exception_index = -1; @@ -182,17 +183,18 @@ int cpu_exec(CPUState *env1) tmp_T0 = T0; #endif interrupt_request = env->interrupt_request; - if (interrupt_request) { + if (__builtin_expect(interrupt_request, 0)) { #if defined(TARGET_I386) /* if hardware interrupt pending, we execute it */ if ((interrupt_request & CPU_INTERRUPT_HARD) && - (env->eflags & IF_MASK)) { + (env->eflags & IF_MASK) && + !(env->hflags & HF_INHIBIT_IRQ_MASK)) { int intno; intno = cpu_x86_get_pic_interrupt(env); if (loglevel) { fprintf(logfile, "Servicing hardware INT=0x%02x\n", intno); } - do_interrupt(intno, 0, 0, 0); + do_interrupt(intno, 0, 0, 0, 1); env->interrupt_request &= ~CPU_INTERRUPT_HARD; /* ensure that no TB jump will be modified as the program flow was changed */ @@ -202,7 +204,24 @@ int cpu_exec(CPUState *env1) T0 = 0; #endif } +#elif defined(TARGET_PPC) + if ((interrupt_request & CPU_INTERRUPT_HARD)) { + do_queue_exception(EXCP_EXTERNAL); + if (check_exception_state(env)) + do_interrupt(env); + env->interrupt_request &= ~CPU_INTERRUPT_HARD; + } +#endif + if (interrupt_request & CPU_INTERRUPT_EXITTB) { + env->interrupt_request &= ~CPU_INTERRUPT_EXITTB; + /* ensure that no TB jump will be modified as + the program flow was changed */ +#ifdef __sparc__ + tmp_T0 = 0; +#else + T0 = 0; #endif + } if (interrupt_request & CPU_INTERRUPT_EXIT) { env->interrupt_request &= ~CPU_INTERRUPT_EXIT; env->exception_index = EXCP_INTERRUPT; @@ -228,48 +247,83 @@ int cpu_exec(CPUState *env1) env->cpsr = compute_cpsr(); cpu_arm_dump_state(env, logfile, 0); env->cpsr &= ~0xf0000000; +#elif defined(TARGET_SPARC) + cpu_sparc_dump_state (env, logfile, 0); +#elif defined(TARGET_PPC) + cpu_ppc_dump_state(env, logfile, 0); #else #error unsupported target CPU #endif } #endif - /* we compute the CPU state. We assume it will not - change during the whole generated block. */ + /* we record a subset of the CPU state. It will + always be the same before a given translated block + is executed. */ #if defined(TARGET_I386) - flags = (env->segs[R_CS].flags & DESC_B_MASK) - >> (DESC_B_SHIFT - GEN_FLAG_CODE32_SHIFT); - flags |= (env->segs[R_SS].flags & DESC_B_MASK) - >> (DESC_B_SHIFT - GEN_FLAG_SS32_SHIFT); - flags |= (((unsigned long)env->segs[R_DS].base | - (unsigned long)env->segs[R_ES].base | - (unsigned long)env->segs[R_SS].base) != 0) << - GEN_FLAG_ADDSEG_SHIFT; - if (env->cr[0] & CR0_PE_MASK) { - if (!(env->eflags & VM_MASK)) - flags |= (env->segs[R_CS].selector & 3) << - GEN_FLAG_CPL_SHIFT; - else - flags |= (1 << GEN_FLAG_VM_SHIFT); - } - flags |= (env->eflags & (IOPL_MASK | TF_MASK)); + flags = env->hflags; + flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK)); cs_base = env->segs[R_CS].base; pc = cs_base + env->eip; #elif defined(TARGET_ARM) flags = 0; cs_base = 0; pc = (uint8_t *)env->regs[15]; +#elif defined(TARGET_SPARC) + flags = 0; + cs_base = (uint8_t *)env->npc; + pc = (uint8_t *) env->pc; +#elif defined(TARGET_PPC) + flags = 0; + cs_base = 0; + pc = (uint8_t *)env->nip; #else #error unsupported CPU #endif tb = tb_find(&ptb, (unsigned long)pc, (unsigned long)cs_base, flags); if (!tb) { + TranslationBlock **ptb1; + unsigned int h; + target_ulong phys_pc, phys_page1, phys_page2, virt_page2; + + spin_lock(&tb_lock); + + tb_invalidated_flag = 0; + + /* find translated block using physical mappings */ + phys_pc = get_phys_addr_code(env, (unsigned long)pc); + phys_page1 = phys_pc & TARGET_PAGE_MASK; + phys_page2 = -1; + h = tb_phys_hash_func(phys_pc); + ptb1 = &tb_phys_hash[h]; + for(;;) { + tb = *ptb1; + if (!tb) + goto not_found; + if (tb->pc == (unsigned long)pc && + tb->page_addr[0] == phys_page1 && + tb->cs_base == (unsigned long)cs_base && + tb->flags == flags) { + /* check next page if needed */ + if (tb->page_addr[1] != -1) { + virt_page2 = ((unsigned long)pc & TARGET_PAGE_MASK) + + TARGET_PAGE_SIZE; + phys_page2 = get_phys_addr_code(env, virt_page2); + if (tb->page_addr[1] == phys_page2) + goto found; + } else { + goto found; + } + } + ptb1 = &tb->phys_hash_next; + } + not_found: /* if no translated code available, then translate it now */ tb = tb_alloc((unsigned long)pc); if (!tb) { /* flush must be done */ - tb_flush(); + tb_flush(env); /* cannot fail at this point */ tb = tb_alloc((unsigned long)pc); /* don't forget to invalidate previous TB info */ @@ -280,21 +334,31 @@ int cpu_exec(CPUState *env1) tb->tc_ptr = tc_ptr; tb->cs_base = (unsigned long)cs_base; tb->flags = flags; - ret = cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size); -#if defined(TARGET_I386) - /* XXX: suppress that, this is incorrect */ - /* if invalid instruction, signal it */ - if (ret != 0) { - /* NOTE: the tb is allocated but not linked, so we - can leave it */ - spin_unlock(&tb_lock); - raise_exception(EXCP06_ILLOP); + cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size); + code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1)); + + /* check next page if needed */ + virt_page2 = ((unsigned long)pc + tb->size - 1) & TARGET_PAGE_MASK; + phys_page2 = -1; + if (((unsigned long)pc & TARGET_PAGE_MASK) != virt_page2) { + phys_page2 = get_phys_addr_code(env, virt_page2); } -#endif + tb_link_phys(tb, phys_pc, phys_page2); + + found: + if (tb_invalidated_flag) { + /* as some TB could have been invalidated because + of memory exceptions while generating the code, we + must recompute the hash index here */ + ptb = &tb_hash[tb_hash_func((unsigned long)pc)]; + while (*ptb != NULL) + ptb = &(*ptb)->hash_next; + T0 = 0; + } + /* we add the TB in the virtual pc hash table */ *ptb = tb; tb->hash_next = NULL; tb_link(tb); - code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1)); spin_unlock(&tb_lock); } #ifdef DEBUG_EXEC @@ -307,10 +371,11 @@ int cpu_exec(CPUState *env1) #ifdef __sparc__ T0 = tmp_T0; #endif - /* see if we can patch the calling TB. XXX: remove TF test */ + /* see if we can patch the calling TB. */ if (T0 != 0 -#if defined(TARGET_I386) - && !(env->eflags & TF_MASK) +#if defined(TARGET_I386) && defined(USE_CODE_COPY) + && (tb->cflags & CF_CODE_COPY) == + (((TranslationBlock *)(T0 & ~3))->cflags & CF_CODE_COPY) #endif ) { spin_lock(&tb_lock); @@ -334,10 +399,87 @@ int cpu_exec(CPUState *env1) : /* no outputs */ : "r" (gen_func) : "r1", "r2", "r3", "r8", "r9", "r10", "r12", "r14"); +#elif defined(TARGET_I386) && defined(USE_CODE_COPY) +{ + if (!(tb->cflags & CF_CODE_COPY)) { + gen_func(); + } else { + /* we work with native eflags */ + CC_SRC = cc_table[CC_OP].compute_all(); + CC_OP = CC_OP_EFLAGS; + asm(".globl exec_loop\n" + "\n" + "debug1:\n" + " pushl %%ebp\n" + " fs movl %10, %9\n" + " fs movl %11, %%eax\n" + " andl $0x400, %%eax\n" + " fs orl %8, %%eax\n" + " pushl %%eax\n" + " popf\n" + " fs movl %%esp, %12\n" + " fs movl %0, %%eax\n" + " fs movl %1, %%ecx\n" + " fs movl %2, %%edx\n" + " fs movl %3, %%ebx\n" + " fs movl %4, %%esp\n" + " fs movl %5, %%ebp\n" + " fs movl %6, %%esi\n" + " fs movl %7, %%edi\n" + " fs jmp *%9\n" + "exec_loop:\n" + " fs movl %%esp, %4\n" + " fs movl %12, %%esp\n" + " fs movl %%eax, %0\n" + " fs movl %%ecx, %1\n" + " fs movl %%edx, %2\n" + " fs movl %%ebx, %3\n" + " fs movl %%ebp, %5\n" + " fs movl %%esi, %6\n" + " fs movl %%edi, %7\n" + " pushf\n" + " popl %%eax\n" + " movl %%eax, %%ecx\n" + " andl $0x400, %%ecx\n" + " shrl $9, %%ecx\n" + " andl $0x8d5, %%eax\n" + " fs movl %%eax, %8\n" + " movl $1, %%eax\n" + " subl %%ecx, %%eax\n" + " fs movl %%eax, %11\n" + " fs movl %9, %%ebx\n" /* get T0 value */ + " popl %%ebp\n" + : + : "m" (*(uint8_t *)offsetof(CPUState, regs[0])), + "m" (*(uint8_t *)offsetof(CPUState, regs[1])), + "m" (*(uint8_t *)offsetof(CPUState, regs[2])), + "m" (*(uint8_t *)offsetof(CPUState, regs[3])), + "m" (*(uint8_t *)offsetof(CPUState, regs[4])), + "m" (*(uint8_t *)offsetof(CPUState, regs[5])), + "m" (*(uint8_t *)offsetof(CPUState, regs[6])), + "m" (*(uint8_t *)offsetof(CPUState, regs[7])), + "m" (*(uint8_t *)offsetof(CPUState, cc_src)), + "m" (*(uint8_t *)offsetof(CPUState, tmp0)), + "a" (gen_func), + "m" (*(uint8_t *)offsetof(CPUState, df)), + "m" (*(uint8_t *)offsetof(CPUState, saved_esp)) + : "%ecx", "%edx" + ); + } +} #else gen_func(); #endif env->current_tb = NULL; + /* reset soft MMU for next block (it can currently + only be set by a memory fault) */ +#if defined(TARGET_I386) && !defined(CONFIG_SOFTMMU) + if (env->hflags & HF_SOFTMMU_MASK) { + env->hflags &= ~HF_SOFTMMU_MASK; + /* do not allow linking to another block */ + T0 = 0; + } +#endif } } else { } @@ -375,6 +517,8 @@ int cpu_exec(CPUState *env1) #endif #elif defined(TARGET_ARM) env->cpsr = compute_cpsr(); +#elif defined(TARGET_SPARC) +#elif defined(TARGET_PPC) #else #error unsupported target CPU #endif @@ -388,7 +532,7 @@ int cpu_exec(CPUState *env1) return ret; } -#if defined(TARGET_I386) +#if defined(TARGET_I386) && defined(CONFIG_USER_ONLY) void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector) { @@ -397,15 +541,11 @@ void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector) saved_env = env; env = s; if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) { - SegmentCache *sc; selector &= 0xffff; - sc = &env->segs[seg_reg]; - sc->base = (void *)(selector << 4); - sc->limit = 0xffff; - sc->flags = 0; - sc->selector = selector; + cpu_x86_load_seg_cache(env, seg_reg, selector, + (uint8_t *)(selector << 4), 0xffff, 0); } else { - load_seg(seg_reg, selector, 0); + load_seg(seg_reg, selector); } env = saved_env; } @@ -455,7 +595,8 @@ void cpu_x86_frstor(CPUX86State *s, uint8_t *ptr, int data32) write caused the exception and otherwise 0'. 'old_set' is the signal set which should be restored */ static inline int handle_cpu_signal(unsigned long pc, unsigned long address, - int is_write, sigset_t *old_set) + int is_write, sigset_t *old_set, + void *puc) { TranslationBlock *tb; int ret; @@ -463,15 +604,16 @@ static inline int handle_cpu_signal(unsigned long pc, unsigned long address, if (cpu_single_env) env = cpu_single_env; /* XXX: find a correct solution for multithread */ #if defined(DEBUG_SIGNAL) - printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n", - pc, address, is_write, *(unsigned long *)old_set); + qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n", + pc, address, is_write, *(unsigned long *)old_set); #endif /* XXX: locking issue */ if (is_write && page_unprotect(address)) { return 1; } /* see if it is an MMU fault */ - ret = cpu_x86_handle_mmu_fault(env, address, is_write); + ret = cpu_x86_handle_mmu_fault(env, address, is_write, + ((env->hflags & HF_CPL_MASK) == 3), 0); if (ret < 0) return 0; /* not an MMU fault */ if (ret == 0) @@ -481,38 +623,130 @@ static inline int handle_cpu_signal(unsigned long pc, unsigned long address, if (tb) { /* the PC is inside the translated code. It means that we have a virtual CPU fault */ - cpu_restore_state(tb, env, pc); + cpu_restore_state(tb, env, pc, puc); } + if (ret == 1) { #if 0 - printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n", - env->eip, env->cr[2], env->error_code); + printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n", + env->eip, env->cr[2], env->error_code); #endif - /* we restore the process signal mask as the sigreturn should - do it (XXX: use sigsetjmp) */ - sigprocmask(SIG_SETMASK, old_set, NULL); - raise_exception_err(EXCP0E_PAGE, env->error_code); + /* we restore the process signal mask as the sigreturn should + do it (XXX: use sigsetjmp) */ + sigprocmask(SIG_SETMASK, old_set, NULL); + raise_exception_err(EXCP0E_PAGE, env->error_code); + } else { + /* activate soft MMU for this block */ + env->hflags |= HF_SOFTMMU_MASK; + sigprocmask(SIG_SETMASK, old_set, NULL); + cpu_loop_exit(); + } /* never comes here */ return 1; } #elif defined(TARGET_ARM) static inline int handle_cpu_signal(unsigned long pc, unsigned long address, - int is_write, sigset_t *old_set) + int is_write, sigset_t *old_set, + void *puc) { /* XXX: do more */ return 0; } +#elif defined(TARGET_SPARC) +static inline int handle_cpu_signal(unsigned long pc, unsigned long address, + int is_write, sigset_t *old_set, + void *puc) +{ + /* XXX: locking issue */ + if (is_write && page_unprotect(address)) { + return 1; + } + return 0; +} +#elif defined (TARGET_PPC) +static inline int handle_cpu_signal(unsigned long pc, unsigned long address, + int is_write, sigset_t *old_set, + void *puc) +{ + TranslationBlock *tb; + int ret; + +#if 1 + if (cpu_single_env) + env = cpu_single_env; /* XXX: find a correct solution for multithread */ +#endif +#if defined(DEBUG_SIGNAL) + printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n", + pc, address, is_write, *(unsigned long *)old_set); +#endif + /* XXX: locking issue */ + if (is_write && page_unprotect(address)) { + return 1; + } + + /* see if it is an MMU fault */ + ret = cpu_ppc_handle_mmu_fault(env, address, is_write, msr_pr, 0); + if (ret < 0) + return 0; /* not an MMU fault */ + if (ret == 0) + return 1; /* the MMU fault was handled without causing real CPU fault */ + + /* now we have a real cpu fault */ + tb = tb_find_pc(pc); + if (tb) { + /* the PC is inside the translated code. It means that we have + a virtual CPU fault */ + cpu_restore_state(tb, env, pc, puc); + } + if (ret == 1) { +#if 0 + printf("PF exception: NIP=0x%08x error=0x%x %p\n", + env->nip, env->error_code, tb); +#endif + /* we restore the process signal mask as the sigreturn should + do it (XXX: use sigsetjmp) */ + sigprocmask(SIG_SETMASK, old_set, NULL); + do_queue_exception_err(env->exception_index, env->error_code); + } else { + /* activate soft MMU for this block */ + sigprocmask(SIG_SETMASK, old_set, NULL); + cpu_loop_exit(); + } + /* never comes here */ + return 1; +} #else #error unsupported target CPU #endif #if defined(__i386__) +#if defined(USE_CODE_COPY) +static void cpu_send_trap(unsigned long pc, int trap, + struct ucontext *uc) +{ + TranslationBlock *tb; + + if (cpu_single_env) + env = cpu_single_env; /* XXX: find a correct solution for multithread */ + /* now we have a real cpu fault */ + tb = tb_find_pc(pc); + if (tb) { + /* the PC is inside the translated code. It means that we have + a virtual CPU fault */ + cpu_restore_state(tb, env, pc, uc); + } + sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL); + raise_exception_err(trap, env->error_code); +} +#endif + int cpu_signal_handler(int host_signum, struct siginfo *info, void *puc) { struct ucontext *uc = puc; unsigned long pc; + int trapno; #ifndef REG_EIP /* for glibc 2.1 */ @@ -521,10 +755,18 @@ int cpu_signal_handler(int host_signum, struct siginfo *info, #define REG_TRAPNO TRAPNO #endif pc = uc->uc_mcontext.gregs[REG_EIP]; - return handle_cpu_signal(pc, (unsigned long)info->si_addr, - uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe ? - (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0, - &uc->uc_sigmask); + trapno = uc->uc_mcontext.gregs[REG_TRAPNO]; +#if defined(TARGET_I386) && defined(USE_CODE_COPY) + if (trapno == 0x00 || trapno == 0x05) { + /* send division by zero or bound exception */ + cpu_send_trap(pc, trapno, uc); + return 1; + } else +#endif + return handle_cpu_signal(pc, (unsigned long)info->si_addr, + trapno == 0xe ? + (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0, + &uc->uc_sigmask, puc); } #elif defined(__powerpc) @@ -548,7 +790,7 @@ int cpu_signal_handler(int host_signum, struct siginfo *info, is_write = 1; #endif return handle_cpu_signal(pc, (unsigned long)info->si_addr, - is_write, &uc->uc_sigmask); + is_write, &uc->uc_sigmask, puc); } #elif defined(__alpha__) @@ -578,7 +820,7 @@ int cpu_signal_handler(int host_signum, struct siginfo *info, } return handle_cpu_signal(pc, (unsigned long)info->si_addr, - is_write, &uc->uc_sigmask); + is_write, &uc->uc_sigmask, puc); } #elif defined(__sparc__) @@ -610,7 +852,7 @@ int cpu_signal_handler(int host_signum, struct siginfo *info, } } return handle_cpu_signal(pc, (unsigned long)info->si_addr, - is_write, sigmask); + is_write, sigmask, NULL); } #elif defined(__arm__) @@ -630,6 +872,23 @@ int cpu_signal_handler(int host_signum, struct siginfo *info, &uc->uc_sigmask); } +#elif defined(__mc68000) + +int cpu_signal_handler(int host_signum, struct siginfo *info, + void *puc) +{ + struct ucontext *uc = puc; + unsigned long pc; + int is_write; + + pc = uc->uc_mcontext.gregs[16]; + /* XXX: compute is_write */ + is_write = 0; + return handle_cpu_signal(pc, (unsigned long)info->si_addr, + is_write, + &uc->uc_sigmask, puc); +} + #else #error host CPU specific signal handler needed