X-Git-Url: https://vcs.maemo.org/git/?a=blobdiff_plain;f=exec-all.h;h=dc5a10d972ebb667af39838dd04030e9531d261a;hb=064034211a65bb602a32ccee18d92109eb2cd656;hp=045648c062c776d7c1d2b159c9ecd4a08d4a36e7;hpb=a332e112b73d641ca603066e46bb5b05d6c054fc;p=qemu diff --git a/exec-all.h b/exec-all.h index 045648c..dc5a10d 100644 --- a/exec-all.h +++ b/exec-all.h @@ -1,6 +1,6 @@ /* * internal execution defines for qemu - * + * * Copyright (c) 2003 Fabrice Bellard * * This library is free software; you can redistribute it and/or @@ -28,10 +28,23 @@ #define tostring(s) #s #endif +#ifndef likely #if __GNUC__ < 3 #define __builtin_expect(x, n) (x) #endif +#define likely(x) __builtin_expect(!!(x), 1) +#define unlikely(x) __builtin_expect(!!(x), 0) +#endif + +#ifndef always_inline +#if (__GNUC__ < 3) || defined(__APPLE__) +#define always_inline inline +#else +#define always_inline __attribute__ (( always_inline )) inline +#endif +#endif + #ifdef __i386__ #define REGPARM(n) __attribute((regparm(n))) #else @@ -62,12 +75,13 @@ extern target_ulong gen_opc_npc[OPC_BUF_SIZE]; extern uint8_t gen_opc_cc_op[OPC_BUF_SIZE]; extern uint8_t gen_opc_instr_start[OPC_BUF_SIZE]; extern target_ulong gen_opc_jump_pc[2]; +extern uint32_t gen_opc_hflags[OPC_BUF_SIZE]; typedef void (GenOpFunc)(void); typedef void (GenOpFunc1)(long); typedef void (GenOpFunc2)(long, long); typedef void (GenOpFunc3)(long, long, long); - + #if defined(TARGET_I386) void optimize_flags_init(void); @@ -77,37 +91,45 @@ void optimize_flags_init(void); extern FILE *logfile; extern int loglevel; +void muls64(int64_t *phigh, int64_t *plow, int64_t a, int64_t b); +void mulu64(uint64_t *phigh, uint64_t *plow, uint64_t a, uint64_t b); + int gen_intermediate_code(CPUState *env, struct TranslationBlock *tb); int gen_intermediate_code_pc(CPUState *env, struct TranslationBlock *tb); void dump_ops(const uint16_t *opc_buf, const uint32_t *opparam_buf); int cpu_gen_code(CPUState *env, struct TranslationBlock *tb, int max_code_size, int *gen_code_size_ptr); -int cpu_restore_state(struct TranslationBlock *tb, +int cpu_restore_state(struct TranslationBlock *tb, CPUState *env, unsigned long searched_pc, void *puc); int cpu_gen_code_copy(CPUState *env, struct TranslationBlock *tb, int max_code_size, int *gen_code_size_ptr); -int cpu_restore_state_copy(struct TranslationBlock *tb, +int cpu_restore_state_copy(struct TranslationBlock *tb, CPUState *env, unsigned long searched_pc, void *puc); void cpu_resume_from_signal(CPUState *env1, void *puc); -void cpu_exec_init(void); -int page_unprotect(unsigned long address, unsigned long pc, void *puc); -void tb_invalidate_phys_page_range(target_ulong start, target_ulong end, +void cpu_exec_init(CPUState *env); +int page_unprotect(target_ulong address, unsigned long pc, void *puc); +void tb_invalidate_phys_page_range(target_ulong start, target_ulong end, int is_cpu_write_access); void tb_invalidate_page_range(target_ulong start, target_ulong end); void tlb_flush_page(CPUState *env, target_ulong addr); void tlb_flush(CPUState *env, int flush_global); -int tlb_set_page(CPUState *env, target_ulong vaddr, - target_phys_addr_t paddr, int prot, - int is_user, int is_softmmu); +int tlb_set_page_exec(CPUState *env, target_ulong vaddr, + target_phys_addr_t paddr, int prot, + int is_user, int is_softmmu); +static inline int tlb_set_page(CPUState *env, target_ulong vaddr, + target_phys_addr_t paddr, int prot, + int is_user, int is_softmmu) +{ + if (prot & PAGE_READ) + prot |= PAGE_EXEC; + return tlb_set_page_exec(env, vaddr, paddr, prot, is_user, is_softmmu); +} #define CODE_GEN_MAX_SIZE 65536 #define CODE_GEN_ALIGN 16 /* must be >= of the size of a icache line */ -#define CODE_GEN_HASH_BITS 15 -#define CODE_GEN_HASH_SIZE (1 << CODE_GEN_HASH_BITS) - #define CODE_GEN_PHYS_HASH_BITS 15 #define CODE_GEN_PHYS_HASH_SIZE (1 << CODE_GEN_PHYS_HASH_BITS) @@ -147,7 +169,7 @@ int tlb_set_page(CPUState *env, target_ulong vaddr, #define CODE_GEN_MAX_BLOCKS (CODE_GEN_BUFFER_SIZE / CODE_GEN_AVG_BLOCK_SIZE) -#if defined(__powerpc__) +#if defined(__powerpc__) #define USE_DIRECT_JUMP #endif #if defined(__i386__) && !defined(_WIN32) @@ -157,7 +179,7 @@ int tlb_set_page(CPUState *env, target_ulong vaddr, typedef struct TranslationBlock { target_ulong pc; /* simulated PC corresponding to this block (EIP + CS base) */ target_ulong cs_base; /* CS base for this block */ - unsigned int flags; /* flags defining in which context the code was generated */ + uint64_t flags; /* flags defining in which context the code was generated */ uint16_t size; /* size of target code for this block (1 <= size <= TARGET_PAGE_SIZE) */ uint16_t cflags; /* compile flags */ @@ -167,13 +189,12 @@ typedef struct TranslationBlock { #define CF_SINGLE_INSN 0x0008 /* compile only a single instruction */ uint8_t *tc_ptr; /* pointer to the translated code */ - struct TranslationBlock *hash_next; /* next matching tb for virtual address */ /* next matching tb for physical address. */ - struct TranslationBlock *phys_hash_next; + struct TranslationBlock *phys_hash_next; /* first and second physical page containing code. The lower bit of the pointer tells the index in page_next[] */ - struct TranslationBlock *page_next[2]; - target_ulong page_addr[2]; + struct TranslationBlock *page_next[2]; + target_ulong page_addr[2]; /* the following data are used to directly call another TB from the code of this one. */ @@ -187,13 +208,23 @@ typedef struct TranslationBlock { the two least significant bits of the pointers to tell what is the next pointer: 0 = jmp_next[0], 1 = jmp_next[1], 2 = jmp_first */ - struct TranslationBlock *jmp_next[2]; + struct TranslationBlock *jmp_next[2]; struct TranslationBlock *jmp_first; } TranslationBlock; -static inline unsigned int tb_hash_func(target_ulong pc) +static inline unsigned int tb_jmp_cache_hash_page(target_ulong pc) { - return pc & (CODE_GEN_HASH_SIZE - 1); + target_ulong tmp; + tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)); + return (tmp >> TB_JMP_PAGE_BITS) & TB_JMP_PAGE_MASK; +} + +static inline unsigned int tb_jmp_cache_hash_func(target_ulong pc) +{ + target_ulong tmp; + tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)); + return (((tmp >> TB_JMP_PAGE_BITS) & TB_JMP_PAGE_MASK) | + (tmp & TB_JMP_ADDR_MASK)); } static inline unsigned int tb_phys_hash_func(unsigned long pc) @@ -203,41 +234,14 @@ static inline unsigned int tb_phys_hash_func(unsigned long pc) TranslationBlock *tb_alloc(target_ulong pc); void tb_flush(CPUState *env); -void tb_link(TranslationBlock *tb); -void tb_link_phys(TranslationBlock *tb, +void tb_link_phys(TranslationBlock *tb, target_ulong phys_pc, target_ulong phys_page2); -extern TranslationBlock *tb_hash[CODE_GEN_HASH_SIZE]; extern TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE]; extern uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE]; extern uint8_t *code_gen_ptr; -/* find a translation block in the translation cache. If not found, - return NULL and the pointer to the last element of the list in pptb */ -static inline TranslationBlock *tb_find(TranslationBlock ***pptb, - target_ulong pc, - target_ulong cs_base, - unsigned int flags) -{ - TranslationBlock **ptb, *tb; - unsigned int h; - - h = tb_hash_func(pc); - ptb = &tb_hash[h]; - for(;;) { - tb = *ptb; - if (!tb) - break; - if (tb->pc == pc && tb->cs_base == cs_base && tb->flags == flags) - return tb; - ptb = &tb->hash_next; - } - *pptb = ptb; - return NULL; -} - - #if defined(USE_DIRECT_JUMP) #if defined(__powerpc__) @@ -266,7 +270,7 @@ static inline void tb_set_jmp_target1(unsigned long jmp_addr, unsigned long addr } #endif -static inline void tb_set_jmp_target(TranslationBlock *tb, +static inline void tb_set_jmp_target(TranslationBlock *tb, int n, unsigned long addr) { unsigned long offset; @@ -281,7 +285,7 @@ static inline void tb_set_jmp_target(TranslationBlock *tb, #else /* set the jump target */ -static inline void tb_set_jmp_target(TranslationBlock *tb, +static inline void tb_set_jmp_target(TranslationBlock *tb, int n, unsigned long addr) { tb->tb_next[n] = addr; @@ -289,14 +293,14 @@ static inline void tb_set_jmp_target(TranslationBlock *tb, #endif -static inline void tb_add_jump(TranslationBlock *tb, int n, +static inline void tb_add_jump(TranslationBlock *tb, int n, TranslationBlock *tb_next) { /* NOTE: this test is only needed for thread safety */ if (!tb->jmp_next[n]) { /* patch the native jump address */ tb_set_jmp_target(tb, n, (unsigned long)tb_next->tc_ptr); - + /* add in TB jmp circular list */ tb->jmp_next[n] = tb_next->jmp_first; tb_next->jmp_first = (TranslationBlock *)((long)(tb) | (n)); @@ -349,14 +353,32 @@ do {\ "1:\n");\ } while (0) +#elif defined(__s390__) +/* GCC spills R13, so we have to restore it before branching away */ + +#define GOTO_TB(opname, tbparam, n)\ +do {\ + static void __attribute__((used)) *dummy ## n = &&dummy_label ## n;\ + static void __attribute__((used)) *__op_label ## n \ + __asm__(ASM_OP_LABEL_NAME(n, opname)) = &&label ## n;\ + __asm__ __volatile__ ( \ + "l %%r13,52(%%r15)\n" \ + "br %0\n" \ + : : "r" (((TranslationBlock*)tbparam)->tb_next[n]));\ + \ + for(;*((int*)0);); /* just to keep GCC busy */ \ +label ## n: ;\ +dummy_label ## n: ;\ +} while(0) + #else /* jump to next block operations (more portable code, does not need cache flushing, but slower because of indirect jump) */ #define GOTO_TB(opname, tbparam, n)\ do {\ - static void __attribute__((unused)) *dummy ## n = &&dummy_label ## n;\ - static void __attribute__((unused)) *__op_label ## n \ + static void __attribute__((used)) *dummy ## n = &&dummy_label ## n;\ + static void __attribute__((used)) *__op_label ## n \ __asm__(ASM_OP_LABEL_NAME(n, opname)) = &&label ## n;\ goto *(void *)(((TranslationBlock *)tbparam)->tb_next[n]);\ label ## n: ;\ @@ -365,20 +387,11 @@ dummy_label ## n: ;\ #endif -/* XXX: will be suppressed */ -#define JUMP_TB(opname, tbparam, n, eip)\ -do {\ - GOTO_TB(opname, tbparam, n);\ - T0 = (long)(tbparam) + (n);\ - EIP = (int32_t)eip;\ - EXIT_TB();\ -} while (0) - extern CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4]; extern CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4]; extern void *io_mem_opaque[IO_MEM_NB_ENTRIES]; -#ifdef __powerpc__ +#if defined(__powerpc__) static inline int testandset (int *p) { int ret; @@ -394,35 +407,29 @@ static inline int testandset (int *p) : "cr0", "memory"); return ret; } -#endif - -#ifdef __i386__ +#elif defined(__i386__) static inline int testandset (int *p) { long int readval = 0; - + __asm__ __volatile__ ("lock; cmpxchgl %2, %0" : "+m" (*p), "+a" (readval) : "r" (1) : "cc"); return readval; } -#endif - -#ifdef __x86_64__ +#elif defined(__x86_64__) static inline int testandset (int *p) { long int readval = 0; - + __asm__ __volatile__ ("lock; cmpxchgl %2, %0" : "+m" (*p), "+a" (readval) : "r" (1) : "cc"); return readval; } -#endif - -#ifdef __s390__ +#elif defined(__s390__) static inline int testandset (int *p) { int ret; @@ -430,13 +437,11 @@ static inline int testandset (int *p) __asm__ __volatile__ ("0: cs %0,%1,0(%2)\n" " jl 0b" : "=&d" (ret) - : "r" (1), "a" (p), "0" (*p) + : "r" (1), "a" (p), "0" (*p) : "cc", "memory" ); return ret; } -#endif - -#ifdef __alpha__ +#elif defined(__alpha__) static inline int testandset (int *p) { int ret; @@ -453,9 +458,7 @@ static inline int testandset (int *p) : "m" (*p)); return ret; } -#endif - -#ifdef __sparc__ +#elif defined(__sparc__) static inline int testandset (int *p) { int ret; @@ -467,21 +470,17 @@ static inline int testandset (int *p) return (ret ? 1 : 0); } -#endif - -#ifdef __arm__ +#elif defined(__arm__) static inline int testandset (int *spinlock) { register unsigned int ret; __asm__ __volatile__("swp %0, %1, [%2]" : "=r"(ret) : "0"(1), "r"(spinlock)); - + return ret; } -#endif - -#ifdef __mc68000 +#elif defined(__mc68000) static inline int testandset (int *p) { char ret; @@ -491,15 +490,36 @@ static inline int testandset (int *p) : "cc","memory"); return ret; } -#endif +#elif defined(__ia64) -#ifdef __ia64 #include static inline int testandset (int *p) { return __sync_lock_test_and_set (p, 1); } +#elif defined(__mips__) +static inline int testandset (int *p) +{ + int ret; + + __asm__ __volatile__ ( + " .set push \n" + " .set noat \n" + " .set mips2 \n" + "1: li $1, 1 \n" + " ll %0, %1 \n" + " sc $1, %1 \n" + " beqz $1, 1b \n" + " .set pop " + : "=r" (ret), "+R" (*p) + : + : "memory"); + + return ret; +} +#else +#error unimplemented CPU support #endif typedef int spinlock_t; @@ -542,7 +562,7 @@ extern int tb_invalidated_flag; #if !defined(CONFIG_USER_ONLY) -void tlb_fill(target_ulong addr, int is_write, int is_user, +void tlb_fill(target_ulong addr, int is_write, int is_user, void *retaddr); #define ACCESS_TYPE 3 @@ -589,42 +609,55 @@ static inline target_ulong get_phys_addr_code(CPUState *env, target_ulong addr) is_user = ((env->hflags & MIPS_HFLAG_MODE) == MIPS_HFLAG_UM); #elif defined (TARGET_SPARC) is_user = (env->psrs == 0); +#elif defined (TARGET_ARM) + is_user = ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR); +#elif defined (TARGET_SH4) + is_user = ((env->sr & SR_MD) == 0); +#elif defined (TARGET_ALPHA) + is_user = ((env->ps >> 3) & 3); +#elif defined (TARGET_M68K) + is_user = ((env->sr & SR_S) == 0); #else -#error "Unimplemented !" +#error unimplemented CPU #endif - if (__builtin_expect(env->tlb_read[is_user][index].address != + if (__builtin_expect(env->tlb_table[is_user][index].addr_code != (addr & TARGET_PAGE_MASK), 0)) { ldub_code(addr); } - pd = env->tlb_read[is_user][index].address & ~TARGET_PAGE_MASK; - if (pd > IO_MEM_ROM) { - cpu_abort(env, "Trying to execute code outside RAM or ROM at 0x%08lx\n", addr); + pd = env->tlb_table[is_user][index].addr_code & ~TARGET_PAGE_MASK; + if (pd > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) { +#ifdef TARGET_SPARC + do_unassigned_access(addr, 0, 1, 0); +#else + cpu_abort(env, "Trying to execute code outside RAM or ROM at 0x" TARGET_FMT_lx "\n", addr); +#endif } - return addr + env->tlb_read[is_user][index].addend - (unsigned long)phys_ram_base; + return addr + env->tlb_table[is_user][index].addend - (unsigned long)phys_ram_base; } #endif - #ifdef USE_KQEMU +#define KQEMU_MODIFY_PAGE_MASK (0xff & ~(VGA_DIRTY_FLAG | CODE_DIRTY_FLAG)) + int kqemu_init(CPUState *env); int kqemu_cpu_exec(CPUState *env); void kqemu_flush_page(CPUState *env, target_ulong addr); void kqemu_flush(CPUState *env, int global); void kqemu_set_notdirty(CPUState *env, ram_addr_t ram_addr); +void kqemu_modify_page(CPUState *env, ram_addr_t ram_addr); void kqemu_cpu_interrupt(CPUState *env); +void kqemu_record_dump(void); static inline int kqemu_is_ok(CPUState *env) { return(env->kqemu_enabled && - (env->hflags & HF_CPL_MASK) == 3 && - (env->eflags & IOPL_MASK) != IOPL_MASK && - (env->cr[0] & CR0_PE_MASK) && + (env->cr[0] & CR0_PE_MASK) && + !(env->hflags & HF_INHIBIT_IRQ_MASK) && (env->eflags & IF_MASK) && - !(env->eflags & VM_MASK) -#if 1 - && (env->ldt.limit == 0 || env->ldt.limit == 0x27) -#endif - ); + !(env->eflags & VM_MASK) && + (env->kqemu_enabled == 2 || + ((env->hflags & HF_CPL_MASK) == 3 && + (env->eflags & IOPL_MASK) != IOPL_MASK))); } #endif