X-Git-Url: https://vcs.maemo.org/git/?a=blobdiff_plain;f=exec-all.h;h=dc5a10d972ebb667af39838dd04030e9531d261a;hb=064034211a65bb602a32ccee18d92109eb2cd656;hp=82ef3acdca33a25035181e22b82a8ab6312d55e1;hpb=b362e5e067835d04ddde5fb1277272d4b498b970;p=qemu diff --git a/exec-all.h b/exec-all.h index 82ef3ac..dc5a10d 100644 --- a/exec-all.h +++ b/exec-all.h @@ -1,6 +1,6 @@ /* * internal execution defines for qemu - * + * * Copyright (c) 2003 Fabrice Bellard * * This library is free software; you can redistribute it and/or @@ -28,10 +28,23 @@ #define tostring(s) #s #endif +#ifndef likely #if __GNUC__ < 3 #define __builtin_expect(x, n) (x) #endif +#define likely(x) __builtin_expect(!!(x), 1) +#define unlikely(x) __builtin_expect(!!(x), 0) +#endif + +#ifndef always_inline +#if (__GNUC__ < 3) || defined(__APPLE__) +#define always_inline inline +#else +#define always_inline __attribute__ (( always_inline )) inline +#endif +#endif + #ifdef __i386__ #define REGPARM(n) __attribute((regparm(n))) #else @@ -68,7 +81,7 @@ typedef void (GenOpFunc)(void); typedef void (GenOpFunc1)(long); typedef void (GenOpFunc2)(long, long); typedef void (GenOpFunc3)(long, long, long); - + #if defined(TARGET_I386) void optimize_flags_init(void); @@ -78,32 +91,35 @@ void optimize_flags_init(void); extern FILE *logfile; extern int loglevel; +void muls64(int64_t *phigh, int64_t *plow, int64_t a, int64_t b); +void mulu64(uint64_t *phigh, uint64_t *plow, uint64_t a, uint64_t b); + int gen_intermediate_code(CPUState *env, struct TranslationBlock *tb); int gen_intermediate_code_pc(CPUState *env, struct TranslationBlock *tb); void dump_ops(const uint16_t *opc_buf, const uint32_t *opparam_buf); int cpu_gen_code(CPUState *env, struct TranslationBlock *tb, int max_code_size, int *gen_code_size_ptr); -int cpu_restore_state(struct TranslationBlock *tb, +int cpu_restore_state(struct TranslationBlock *tb, CPUState *env, unsigned long searched_pc, void *puc); int cpu_gen_code_copy(CPUState *env, struct TranslationBlock *tb, int max_code_size, int *gen_code_size_ptr); -int cpu_restore_state_copy(struct TranslationBlock *tb, +int cpu_restore_state_copy(struct TranslationBlock *tb, CPUState *env, unsigned long searched_pc, void *puc); void cpu_resume_from_signal(CPUState *env1, void *puc); void cpu_exec_init(CPUState *env); int page_unprotect(target_ulong address, unsigned long pc, void *puc); -void tb_invalidate_phys_page_range(target_ulong start, target_ulong end, +void tb_invalidate_phys_page_range(target_ulong start, target_ulong end, int is_cpu_write_access); void tb_invalidate_page_range(target_ulong start, target_ulong end); void tlb_flush_page(CPUState *env, target_ulong addr); void tlb_flush(CPUState *env, int flush_global); -int tlb_set_page_exec(CPUState *env, target_ulong vaddr, - target_phys_addr_t paddr, int prot, +int tlb_set_page_exec(CPUState *env, target_ulong vaddr, + target_phys_addr_t paddr, int prot, int is_user, int is_softmmu); -static inline int tlb_set_page(CPUState *env, target_ulong vaddr, - target_phys_addr_t paddr, int prot, +static inline int tlb_set_page(CPUState *env, target_ulong vaddr, + target_phys_addr_t paddr, int prot, int is_user, int is_softmmu) { if (prot & PAGE_READ) @@ -153,7 +169,7 @@ static inline int tlb_set_page(CPUState *env, target_ulong vaddr, #define CODE_GEN_MAX_BLOCKS (CODE_GEN_BUFFER_SIZE / CODE_GEN_AVG_BLOCK_SIZE) -#if defined(__powerpc__) +#if defined(__powerpc__) #define USE_DIRECT_JUMP #endif #if defined(__i386__) && !defined(_WIN32) @@ -163,7 +179,7 @@ static inline int tlb_set_page(CPUState *env, target_ulong vaddr, typedef struct TranslationBlock { target_ulong pc; /* simulated PC corresponding to this block (EIP + CS base) */ target_ulong cs_base; /* CS base for this block */ - unsigned int flags; /* flags defining in which context the code was generated */ + uint64_t flags; /* flags defining in which context the code was generated */ uint16_t size; /* size of target code for this block (1 <= size <= TARGET_PAGE_SIZE) */ uint16_t cflags; /* compile flags */ @@ -174,11 +190,11 @@ typedef struct TranslationBlock { uint8_t *tc_ptr; /* pointer to the translated code */ /* next matching tb for physical address. */ - struct TranslationBlock *phys_hash_next; + struct TranslationBlock *phys_hash_next; /* first and second physical page containing code. The lower bit of the pointer tells the index in page_next[] */ - struct TranslationBlock *page_next[2]; - target_ulong page_addr[2]; + struct TranslationBlock *page_next[2]; + target_ulong page_addr[2]; /* the following data are used to directly call another TB from the code of this one. */ @@ -192,7 +208,7 @@ typedef struct TranslationBlock { the two least significant bits of the pointers to tell what is the next pointer: 0 = jmp_next[0], 1 = jmp_next[1], 2 = jmp_first */ - struct TranslationBlock *jmp_next[2]; + struct TranslationBlock *jmp_next[2]; struct TranslationBlock *jmp_first; } TranslationBlock; @@ -218,7 +234,7 @@ static inline unsigned int tb_phys_hash_func(unsigned long pc) TranslationBlock *tb_alloc(target_ulong pc); void tb_flush(CPUState *env); -void tb_link_phys(TranslationBlock *tb, +void tb_link_phys(TranslationBlock *tb, target_ulong phys_pc, target_ulong phys_page2); extern TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE]; @@ -254,7 +270,7 @@ static inline void tb_set_jmp_target1(unsigned long jmp_addr, unsigned long addr } #endif -static inline void tb_set_jmp_target(TranslationBlock *tb, +static inline void tb_set_jmp_target(TranslationBlock *tb, int n, unsigned long addr) { unsigned long offset; @@ -269,7 +285,7 @@ static inline void tb_set_jmp_target(TranslationBlock *tb, #else /* set the jump target */ -static inline void tb_set_jmp_target(TranslationBlock *tb, +static inline void tb_set_jmp_target(TranslationBlock *tb, int n, unsigned long addr) { tb->tb_next[n] = addr; @@ -277,14 +293,14 @@ static inline void tb_set_jmp_target(TranslationBlock *tb, #endif -static inline void tb_add_jump(TranslationBlock *tb, int n, +static inline void tb_add_jump(TranslationBlock *tb, int n, TranslationBlock *tb_next) { /* NOTE: this test is only needed for thread safety */ if (!tb->jmp_next[n]) { /* patch the native jump address */ tb_set_jmp_target(tb, n, (unsigned long)tb_next->tc_ptr); - + /* add in TB jmp circular list */ tb->jmp_next[n] = tb_next->jmp_first; tb_next->jmp_first = (TranslationBlock *)((long)(tb) | (n)); @@ -337,14 +353,32 @@ do {\ "1:\n");\ } while (0) +#elif defined(__s390__) +/* GCC spills R13, so we have to restore it before branching away */ + +#define GOTO_TB(opname, tbparam, n)\ +do {\ + static void __attribute__((used)) *dummy ## n = &&dummy_label ## n;\ + static void __attribute__((used)) *__op_label ## n \ + __asm__(ASM_OP_LABEL_NAME(n, opname)) = &&label ## n;\ + __asm__ __volatile__ ( \ + "l %%r13,52(%%r15)\n" \ + "br %0\n" \ + : : "r" (((TranslationBlock*)tbparam)->tb_next[n]));\ + \ + for(;*((int*)0);); /* just to keep GCC busy */ \ +label ## n: ;\ +dummy_label ## n: ;\ +} while(0) + #else /* jump to next block operations (more portable code, does not need cache flushing, but slower because of indirect jump) */ #define GOTO_TB(opname, tbparam, n)\ do {\ - static void __attribute__((unused)) *dummy ## n = &&dummy_label ## n;\ - static void __attribute__((unused)) *__op_label ## n \ + static void __attribute__((used)) *dummy ## n = &&dummy_label ## n;\ + static void __attribute__((used)) *__op_label ## n \ __asm__(ASM_OP_LABEL_NAME(n, opname)) = &&label ## n;\ goto *(void *)(((TranslationBlock *)tbparam)->tb_next[n]);\ label ## n: ;\ @@ -357,7 +391,7 @@ extern CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4]; extern CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4]; extern void *io_mem_opaque[IO_MEM_NB_ENTRIES]; -#ifdef __powerpc__ +#if defined(__powerpc__) static inline int testandset (int *p) { int ret; @@ -373,35 +407,29 @@ static inline int testandset (int *p) : "cr0", "memory"); return ret; } -#endif - -#ifdef __i386__ +#elif defined(__i386__) static inline int testandset (int *p) { long int readval = 0; - + __asm__ __volatile__ ("lock; cmpxchgl %2, %0" : "+m" (*p), "+a" (readval) : "r" (1) : "cc"); return readval; } -#endif - -#ifdef __x86_64__ +#elif defined(__x86_64__) static inline int testandset (int *p) { long int readval = 0; - + __asm__ __volatile__ ("lock; cmpxchgl %2, %0" : "+m" (*p), "+a" (readval) : "r" (1) : "cc"); return readval; } -#endif - -#ifdef __s390__ +#elif defined(__s390__) static inline int testandset (int *p) { int ret; @@ -409,13 +437,11 @@ static inline int testandset (int *p) __asm__ __volatile__ ("0: cs %0,%1,0(%2)\n" " jl 0b" : "=&d" (ret) - : "r" (1), "a" (p), "0" (*p) + : "r" (1), "a" (p), "0" (*p) : "cc", "memory" ); return ret; } -#endif - -#ifdef __alpha__ +#elif defined(__alpha__) static inline int testandset (int *p) { int ret; @@ -432,9 +458,7 @@ static inline int testandset (int *p) : "m" (*p)); return ret; } -#endif - -#ifdef __sparc__ +#elif defined(__sparc__) static inline int testandset (int *p) { int ret; @@ -446,21 +470,17 @@ static inline int testandset (int *p) return (ret ? 1 : 0); } -#endif - -#ifdef __arm__ +#elif defined(__arm__) static inline int testandset (int *spinlock) { register unsigned int ret; __asm__ __volatile__("swp %0, %1, [%2]" : "=r"(ret) : "0"(1), "r"(spinlock)); - + return ret; } -#endif - -#ifdef __mc68000 +#elif defined(__mc68000) static inline int testandset (int *p) { char ret; @@ -470,15 +490,36 @@ static inline int testandset (int *p) : "cc","memory"); return ret; } -#endif +#elif defined(__ia64) -#ifdef __ia64 #include static inline int testandset (int *p) { return __sync_lock_test_and_set (p, 1); } +#elif defined(__mips__) +static inline int testandset (int *p) +{ + int ret; + + __asm__ __volatile__ ( + " .set push \n" + " .set noat \n" + " .set mips2 \n" + "1: li $1, 1 \n" + " ll %0, %1 \n" + " sc $1, %1 \n" + " beqz $1, 1b \n" + " .set pop " + : "=r" (ret), "+R" (*p) + : + : "memory"); + + return ret; +} +#else +#error unimplemented CPU support #endif typedef int spinlock_t; @@ -521,7 +562,7 @@ extern int tb_invalidated_flag; #if !defined(CONFIG_USER_ONLY) -void tlb_fill(target_ulong addr, int is_write, int is_user, +void tlb_fill(target_ulong addr, int is_write, int is_user, void *retaddr); #define ACCESS_TYPE 3 @@ -572,22 +613,29 @@ static inline target_ulong get_phys_addr_code(CPUState *env, target_ulong addr) is_user = ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR); #elif defined (TARGET_SH4) is_user = ((env->sr & SR_MD) == 0); +#elif defined (TARGET_ALPHA) + is_user = ((env->ps >> 3) & 3); +#elif defined (TARGET_M68K) + is_user = ((env->sr & SR_S) == 0); #else #error unimplemented CPU #endif - if (__builtin_expect(env->tlb_table[is_user][index].addr_code != + if (__builtin_expect(env->tlb_table[is_user][index].addr_code != (addr & TARGET_PAGE_MASK), 0)) { ldub_code(addr); } pd = env->tlb_table[is_user][index].addr_code & ~TARGET_PAGE_MASK; if (pd > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) { - cpu_abort(env, "Trying to execute code outside RAM or ROM at 0x%08lx\n", addr); +#ifdef TARGET_SPARC + do_unassigned_access(addr, 0, 1, 0); +#else + cpu_abort(env, "Trying to execute code outside RAM or ROM at 0x" TARGET_FMT_lx "\n", addr); +#endif } return addr + env->tlb_table[is_user][index].addend - (unsigned long)phys_ram_base; } #endif - #ifdef USE_KQEMU #define KQEMU_MODIFY_PAGE_MASK (0xff & ~(VGA_DIRTY_FLAG | CODE_DIRTY_FLAG)) @@ -603,11 +651,11 @@ void kqemu_record_dump(void); static inline int kqemu_is_ok(CPUState *env) { return(env->kqemu_enabled && - (env->cr[0] & CR0_PE_MASK) && + (env->cr[0] & CR0_PE_MASK) && !(env->hflags & HF_INHIBIT_IRQ_MASK) && (env->eflags & IF_MASK) && !(env->eflags & VM_MASK) && - (env->kqemu_enabled == 2 || + (env->kqemu_enabled == 2 || ((env->hflags & HF_CPL_MASK) == 3 && (env->eflags & IOPL_MASK) != IOPL_MASK))); }