2 * internal execution defines for qemu
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 /* allow to see translation results - the slowdown should be negligible, so we leave it */
25 #define xglue(x, y) x ## y
26 #define glue(x, y) xglue(x, y)
27 #define stringify(s) tostring(s)
28 #define tostring(s) #s
32 #define __builtin_expect(x, n) (x)
36 #define REGPARM(n) __attribute((regparm(n)))
41 /* is_jmp field values */
42 #define DISAS_NEXT 0 /* next instruction can be analyzed */
43 #define DISAS_JUMP 1 /* only pc was modified dynamically */
44 #define DISAS_UPDATE 2 /* cpu state was modified dynamically */
45 #define DISAS_TB_JUMP 3 /* only pc was modified statically */
47 struct TranslationBlock;
49 /* XXX: make safe guess about sizes */
50 #define MAX_OP_PER_INSTR 32
51 #define OPC_BUF_SIZE 512
52 #define OPC_MAX_SIZE (OPC_BUF_SIZE - MAX_OP_PER_INSTR)
54 #define OPPARAM_BUF_SIZE (OPC_BUF_SIZE * 3)
56 extern uint16_t gen_opc_buf[OPC_BUF_SIZE];
57 extern uint32_t gen_opparam_buf[OPPARAM_BUF_SIZE];
58 extern long gen_labels[OPC_BUF_SIZE];
59 extern int nb_gen_labels;
60 extern target_ulong gen_opc_pc[OPC_BUF_SIZE];
61 extern target_ulong gen_opc_npc[OPC_BUF_SIZE];
62 extern uint8_t gen_opc_cc_op[OPC_BUF_SIZE];
63 extern uint8_t gen_opc_instr_start[OPC_BUF_SIZE];
64 extern target_ulong gen_opc_jump_pc[2];
66 typedef void (GenOpFunc)(void);
67 typedef void (GenOpFunc1)(long);
68 typedef void (GenOpFunc2)(long, long);
69 typedef void (GenOpFunc3)(long, long, long);
71 #if defined(TARGET_I386)
73 void optimize_flags_init(void);
80 int gen_intermediate_code(CPUState *env, struct TranslationBlock *tb);
81 int gen_intermediate_code_pc(CPUState *env, struct TranslationBlock *tb);
82 void dump_ops(const uint16_t *opc_buf, const uint32_t *opparam_buf);
83 int cpu_gen_code(CPUState *env, struct TranslationBlock *tb,
84 int max_code_size, int *gen_code_size_ptr);
85 int cpu_restore_state(struct TranslationBlock *tb,
86 CPUState *env, unsigned long searched_pc,
88 int cpu_gen_code_copy(CPUState *env, struct TranslationBlock *tb,
89 int max_code_size, int *gen_code_size_ptr);
90 int cpu_restore_state_copy(struct TranslationBlock *tb,
91 CPUState *env, unsigned long searched_pc,
93 void cpu_resume_from_signal(CPUState *env1, void *puc);
94 void cpu_exec_init(CPUState *env);
95 int page_unprotect(unsigned long address, unsigned long pc, void *puc);
96 void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
97 int is_cpu_write_access);
98 void tb_invalidate_page_range(target_ulong start, target_ulong end);
99 void tlb_flush_page(CPUState *env, target_ulong addr);
100 void tlb_flush(CPUState *env, int flush_global);
101 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
102 target_phys_addr_t paddr, int prot,
103 int is_user, int is_softmmu);
104 static inline int tlb_set_page(CPUState *env, target_ulong vaddr,
105 target_phys_addr_t paddr, int prot,
106 int is_user, int is_softmmu)
108 if (prot & PAGE_READ)
110 return tlb_set_page_exec(env, vaddr, paddr, prot, is_user, is_softmmu);
113 #define CODE_GEN_MAX_SIZE 65536
114 #define CODE_GEN_ALIGN 16 /* must be >= of the size of a icache line */
116 #define CODE_GEN_PHYS_HASH_BITS 15
117 #define CODE_GEN_PHYS_HASH_SIZE (1 << CODE_GEN_PHYS_HASH_BITS)
119 /* maximum total translate dcode allocated */
121 /* NOTE: the translated code area cannot be too big because on some
122 archs the range of "fast" function calls is limited. Here is a
123 summary of the ranges:
125 i386 : signed 32 bits
128 sparc : signed 32 bits
129 alpha : signed 23 bits
132 #if defined(__alpha__)
133 #define CODE_GEN_BUFFER_SIZE (2 * 1024 * 1024)
134 #elif defined(__ia64)
135 #define CODE_GEN_BUFFER_SIZE (4 * 1024 * 1024) /* range of addl */
136 #elif defined(__powerpc__)
137 #define CODE_GEN_BUFFER_SIZE (6 * 1024 * 1024)
139 #define CODE_GEN_BUFFER_SIZE (16 * 1024 * 1024)
142 //#define CODE_GEN_BUFFER_SIZE (128 * 1024)
144 /* estimated block size for TB allocation */
145 /* XXX: use a per code average code fragment size and modulate it
146 according to the host CPU */
147 #if defined(CONFIG_SOFTMMU)
148 #define CODE_GEN_AVG_BLOCK_SIZE 128
150 #define CODE_GEN_AVG_BLOCK_SIZE 64
153 #define CODE_GEN_MAX_BLOCKS (CODE_GEN_BUFFER_SIZE / CODE_GEN_AVG_BLOCK_SIZE)
155 #if defined(__powerpc__)
156 #define USE_DIRECT_JUMP
158 #if defined(__i386__) && !defined(_WIN32)
159 #define USE_DIRECT_JUMP
162 typedef struct TranslationBlock {
163 target_ulong pc; /* simulated PC corresponding to this block (EIP + CS base) */
164 target_ulong cs_base; /* CS base for this block */
165 unsigned int flags; /* flags defining in which context the code was generated */
166 uint16_t size; /* size of target code for this block (1 <=
167 size <= TARGET_PAGE_SIZE) */
168 uint16_t cflags; /* compile flags */
169 #define CF_CODE_COPY 0x0001 /* block was generated in code copy mode */
170 #define CF_TB_FP_USED 0x0002 /* fp ops are used in the TB */
171 #define CF_FP_USED 0x0004 /* fp ops are used in the TB or in a chained TB */
172 #define CF_SINGLE_INSN 0x0008 /* compile only a single instruction */
174 uint8_t *tc_ptr; /* pointer to the translated code */
175 /* next matching tb for physical address. */
176 struct TranslationBlock *phys_hash_next;
177 /* first and second physical page containing code. The lower bit
178 of the pointer tells the index in page_next[] */
179 struct TranslationBlock *page_next[2];
180 target_ulong page_addr[2];
182 /* the following data are used to directly call another TB from
183 the code of this one. */
184 uint16_t tb_next_offset[2]; /* offset of original jump target */
185 #ifdef USE_DIRECT_JUMP
186 uint16_t tb_jmp_offset[4]; /* offset of jump instruction */
188 uint32_t tb_next[2]; /* address of jump generated code */
190 /* list of TBs jumping to this one. This is a circular list using
191 the two least significant bits of the pointers to tell what is
192 the next pointer: 0 = jmp_next[0], 1 = jmp_next[1], 2 =
194 struct TranslationBlock *jmp_next[2];
195 struct TranslationBlock *jmp_first;
198 static inline unsigned int tb_jmp_cache_hash_func(target_ulong pc)
200 return (pc ^ (pc >> TB_JMP_CACHE_BITS)) & (TB_JMP_CACHE_SIZE - 1);
203 static inline unsigned int tb_phys_hash_func(unsigned long pc)
205 return pc & (CODE_GEN_PHYS_HASH_SIZE - 1);
208 TranslationBlock *tb_alloc(target_ulong pc);
209 void tb_flush(CPUState *env);
210 void tb_link_phys(TranslationBlock *tb,
211 target_ulong phys_pc, target_ulong phys_page2);
213 extern TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
215 extern uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE];
216 extern uint8_t *code_gen_ptr;
218 #if defined(USE_DIRECT_JUMP)
220 #if defined(__powerpc__)
221 static inline void tb_set_jmp_target1(unsigned long jmp_addr, unsigned long addr)
225 /* patch the branch destination */
226 ptr = (uint32_t *)jmp_addr;
228 val = (val & ~0x03fffffc) | ((addr - jmp_addr) & 0x03fffffc);
231 asm volatile ("dcbst 0,%0" : : "r"(ptr) : "memory");
232 asm volatile ("sync" : : : "memory");
233 asm volatile ("icbi 0,%0" : : "r"(ptr) : "memory");
234 asm volatile ("sync" : : : "memory");
235 asm volatile ("isync" : : : "memory");
237 #elif defined(__i386__)
238 static inline void tb_set_jmp_target1(unsigned long jmp_addr, unsigned long addr)
240 /* patch the branch destination */
241 *(uint32_t *)jmp_addr = addr - (jmp_addr + 4);
242 /* no need to flush icache explicitely */
246 static inline void tb_set_jmp_target(TranslationBlock *tb,
247 int n, unsigned long addr)
249 unsigned long offset;
251 offset = tb->tb_jmp_offset[n];
252 tb_set_jmp_target1((unsigned long)(tb->tc_ptr + offset), addr);
253 offset = tb->tb_jmp_offset[n + 2];
254 if (offset != 0xffff)
255 tb_set_jmp_target1((unsigned long)(tb->tc_ptr + offset), addr);
260 /* set the jump target */
261 static inline void tb_set_jmp_target(TranslationBlock *tb,
262 int n, unsigned long addr)
264 tb->tb_next[n] = addr;
269 static inline void tb_add_jump(TranslationBlock *tb, int n,
270 TranslationBlock *tb_next)
272 /* NOTE: this test is only needed for thread safety */
273 if (!tb->jmp_next[n]) {
274 /* patch the native jump address */
275 tb_set_jmp_target(tb, n, (unsigned long)tb_next->tc_ptr);
277 /* add in TB jmp circular list */
278 tb->jmp_next[n] = tb_next->jmp_first;
279 tb_next->jmp_first = (TranslationBlock *)((long)(tb) | (n));
283 TranslationBlock *tb_find_pc(unsigned long pc_ptr);
286 #define offsetof(type, field) ((size_t) &((type *)0)->field)
290 #define ASM_DATA_SECTION ".section \".data\"\n"
291 #define ASM_PREVIOUS_SECTION ".section .text\n"
292 #elif defined(__APPLE__)
293 #define ASM_DATA_SECTION ".data\n"
294 #define ASM_PREVIOUS_SECTION ".text\n"
296 #define ASM_DATA_SECTION ".section \".data\"\n"
297 #define ASM_PREVIOUS_SECTION ".previous\n"
300 #define ASM_OP_LABEL_NAME(n, opname) \
301 ASM_NAME(__op_label) #n "." ASM_NAME(opname)
303 #if defined(__powerpc__)
305 /* we patch the jump instruction directly */
306 #define GOTO_TB(opname, tbparam, n)\
308 asm volatile (ASM_DATA_SECTION\
309 ASM_OP_LABEL_NAME(n, opname) ":\n"\
311 ASM_PREVIOUS_SECTION \
312 "b " ASM_NAME(__op_jmp) #n "\n"\
316 #elif defined(__i386__) && defined(USE_DIRECT_JUMP)
318 /* we patch the jump instruction directly */
319 #define GOTO_TB(opname, tbparam, n)\
321 asm volatile (".section .data\n"\
322 ASM_OP_LABEL_NAME(n, opname) ":\n"\
324 ASM_PREVIOUS_SECTION \
325 "jmp " ASM_NAME(__op_jmp) #n "\n"\
331 /* jump to next block operations (more portable code, does not need
332 cache flushing, but slower because of indirect jump) */
333 #define GOTO_TB(opname, tbparam, n)\
335 static void __attribute__((unused)) *dummy ## n = &&dummy_label ## n;\
336 static void __attribute__((unused)) *__op_label ## n \
337 __asm__(ASM_OP_LABEL_NAME(n, opname)) = &&label ## n;\
338 goto *(void *)(((TranslationBlock *)tbparam)->tb_next[n]);\
345 extern CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
346 extern CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
347 extern void *io_mem_opaque[IO_MEM_NB_ENTRIES];
350 static inline int testandset (int *p)
353 __asm__ __volatile__ (
361 : "r" (p), "r" (1), "r" (0)
368 static inline int testandset (int *p)
370 long int readval = 0;
372 __asm__ __volatile__ ("lock; cmpxchgl %2, %0"
373 : "+m" (*p), "+a" (readval)
381 static inline int testandset (int *p)
383 long int readval = 0;
385 __asm__ __volatile__ ("lock; cmpxchgl %2, %0"
386 : "+m" (*p), "+a" (readval)
394 static inline int testandset (int *p)
398 __asm__ __volatile__ ("0: cs %0,%1,0(%2)\n"
401 : "r" (1), "a" (p), "0" (*p)
408 static inline int testandset (int *p)
413 __asm__ __volatile__ ("0: mov 1,%2\n"
420 : "=r" (ret), "=m" (*p), "=r" (one)
427 static inline int testandset (int *p)
431 __asm__ __volatile__("ldstub [%1], %0"
436 return (ret ? 1 : 0);
441 static inline int testandset (int *spinlock)
443 register unsigned int ret;
444 __asm__ __volatile__("swp %0, %1, [%2]"
446 : "0"(1), "r"(spinlock));
453 static inline int testandset (int *p)
456 __asm__ __volatile__("tas %1; sne %0"
465 #include <ia64intrin.h>
467 static inline int testandset (int *p)
469 return __sync_lock_test_and_set (p, 1);
473 typedef int spinlock_t;
475 #define SPIN_LOCK_UNLOCKED 0
477 #if defined(CONFIG_USER_ONLY)
478 static inline void spin_lock(spinlock_t *lock)
480 while (testandset(lock));
483 static inline void spin_unlock(spinlock_t *lock)
488 static inline int spin_trylock(spinlock_t *lock)
490 return !testandset(lock);
493 static inline void spin_lock(spinlock_t *lock)
497 static inline void spin_unlock(spinlock_t *lock)
501 static inline int spin_trylock(spinlock_t *lock)
507 extern spinlock_t tb_lock;
509 extern int tb_invalidated_flag;
511 #if !defined(CONFIG_USER_ONLY)
513 void tlb_fill(target_ulong addr, int is_write, int is_user,
516 #define ACCESS_TYPE 3
517 #define MEMSUFFIX _code
518 #define env cpu_single_env
521 #include "softmmu_header.h"
524 #include "softmmu_header.h"
527 #include "softmmu_header.h"
530 #include "softmmu_header.h"
538 #if defined(CONFIG_USER_ONLY)
539 static inline target_ulong get_phys_addr_code(CPUState *env, target_ulong addr)
544 /* NOTE: this function can trigger an exception */
545 /* NOTE2: the returned address is not exactly the physical address: it
546 is the offset relative to phys_ram_base */
547 static inline target_ulong get_phys_addr_code(CPUState *env, target_ulong addr)
549 int is_user, index, pd;
551 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
552 #if defined(TARGET_I386)
553 is_user = ((env->hflags & HF_CPL_MASK) == 3);
554 #elif defined (TARGET_PPC)
556 #elif defined (TARGET_MIPS)
557 is_user = ((env->hflags & MIPS_HFLAG_MODE) == MIPS_HFLAG_UM);
558 #elif defined (TARGET_SPARC)
559 is_user = (env->psrs == 0);
560 #elif defined (TARGET_ARM)
561 is_user = ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR);
563 #error unimplemented CPU
565 if (__builtin_expect(env->tlb_table[is_user][index].addr_code !=
566 (addr & TARGET_PAGE_MASK), 0)) {
569 pd = env->tlb_table[is_user][index].addr_code & ~TARGET_PAGE_MASK;
570 if (pd > IO_MEM_ROM) {
571 cpu_abort(env, "Trying to execute code outside RAM or ROM at 0x%08lx\n", addr);
573 return addr + env->tlb_table[is_user][index].addend - (unsigned long)phys_ram_base;
579 int kqemu_init(CPUState *env);
580 int kqemu_cpu_exec(CPUState *env);
581 void kqemu_flush_page(CPUState *env, target_ulong addr);
582 void kqemu_flush(CPUState *env, int global);
583 void kqemu_set_notdirty(CPUState *env, ram_addr_t ram_addr);
584 void kqemu_cpu_interrupt(CPUState *env);
586 static inline int kqemu_is_ok(CPUState *env)
588 return(env->kqemu_enabled &&
589 (env->hflags & HF_CPL_MASK) == 3 &&
590 (env->eflags & IOPL_MASK) != IOPL_MASK &&
591 (env->cr[0] & CR0_PE_MASK) &&
592 (env->eflags & IF_MASK) &&
593 !(env->eflags & VM_MASK));