* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+/* allow to see translation results - the slowdown should be negligible, so we leave it */
+#define DEBUG_DISAS
+
+/* is_jmp field values */
+#define DISAS_NEXT 0 /* next instruction can be analyzed */
+#define DISAS_JUMP 1 /* only pc was modified dynamically */
+#define DISAS_UPDATE 2 /* cpu state was modified dynamically */
+#define DISAS_TB_JUMP 3 /* only pc was modified statically */
+
+struct TranslationBlock;
+
+/* XXX: make safe guess about sizes */
+#define MAX_OP_PER_INSTR 32
+#define OPC_BUF_SIZE 512
+#define OPC_MAX_SIZE (OPC_BUF_SIZE - MAX_OP_PER_INSTR)
+
+#define OPPARAM_BUF_SIZE (OPC_BUF_SIZE * 3)
+
+extern uint16_t gen_opc_buf[OPC_BUF_SIZE];
+extern uint32_t gen_opparam_buf[OPPARAM_BUF_SIZE];
+extern uint32_t gen_opc_pc[OPC_BUF_SIZE];
+extern uint8_t gen_opc_cc_op[OPC_BUF_SIZE];
+extern uint8_t gen_opc_instr_start[OPC_BUF_SIZE];
+
+#if defined(TARGET_I386)
+
#define GEN_FLAG_CODE32_SHIFT 0
#define GEN_FLAG_ADDSEG_SHIFT 1
#define GEN_FLAG_SS32_SHIFT 2
#define GEN_FLAG_CPL_SHIFT 9
#define GEN_FLAG_IOPL_SHIFT 12 /* same position as eflags */
-struct TranslationBlock;
-int cpu_x86_gen_code(struct TranslationBlock *tb,
- int max_code_size, int *gen_code_size_ptr);
-int cpu_x86_search_pc(struct TranslationBlock *tb,
- uint32_t *found_pc, unsigned long searched_pc);
-void cpu_x86_tblocks_init(void);
-void page_init(void);
+#endif
+
+extern FILE *logfile;
+extern int loglevel;
+
+int gen_intermediate_code(struct TranslationBlock *tb);
+int gen_intermediate_code_pc(struct TranslationBlock *tb);
+void dump_ops(const uint16_t *opc_buf, const uint32_t *opparam_buf);
+int cpu_gen_code(struct TranslationBlock *tb,
+ int max_code_size, int *gen_code_size_ptr);
+int cpu_restore_state(struct TranslationBlock *tb,
+ CPUState *env, unsigned long searched_pc);
+void cpu_exec_init(void);
int page_unprotect(unsigned long address);
+void page_unmap(void);
#define CODE_GEN_MAX_SIZE 65536
#define CODE_GEN_ALIGN 16 /* must be >= of the size of a icache line */
#ifdef USE_DIRECT_JUMP
uint16_t tb_jmp_offset[2]; /* offset of jump instruction */
#else
- uint8_t *tb_next[2]; /* address of jump generated code */
+ uint32_t tb_next[2]; /* address of jump generated code */
#endif
/* list of TBs jumping to this one. This is a circular list using
the two least significant bits of the pointers to tell what is
static inline void tb_set_jmp_target(TranslationBlock *tb,
int n, unsigned long addr)
{
- tb->tb_next[n] = (void *)addr;
+ tb->tb_next[n] = addr;
}
#endif
#define offsetof(type, field) ((size_t) &((type *)0)->field)
#endif
+#if defined(__powerpc__)
+
+/* on PowerPC we patch the jump instruction directly */
+#define JUMP_TB(tbparam, n, eip)\
+do {\
+ static void __attribute__((unused)) *__op_label ## n = &&label ## n;\
+ asm volatile ("b %0" : : "i" (&__op_jmp ## n));\
+label ## n:\
+ T0 = (long)(tbparam) + (n);\
+ EIP = eip;\
+} while (0)
+
+#else
+
+/* jump to next block operations (more portable code, does not need
+ cache flushing, but slower because of indirect jump) */
+#define JUMP_TB(tbparam, n, eip)\
+do {\
+ static void __attribute__((unused)) *__op_label ## n = &&label ## n;\
+ goto *(void *)(((TranslationBlock *)tbparam)->tb_next[n]);\
+label ## n:\
+ T0 = (long)(tbparam) + (n);\
+ EIP = eip;\
+} while (0)
+
+#endif
+
#ifdef __powerpc__
static inline int testandset (int *p)
{
#endif
#ifdef __alpha__
-int testandset (int *p)
+static inline int testandset (int *p)
{
int ret;
unsigned long one;
}
#endif
+#ifdef __arm__
+static inline int testandset (int *spinlock)
+{
+ register unsigned int ret;
+ __asm__ __volatile__("swp %0, %1, [%2]"
+ : "=r"(ret)
+ : "0"(1), "r"(spinlock));
+
+ return ret;
+}
+#endif
+
typedef int spinlock_t;
#define SPIN_LOCK_UNLOCKED 0