* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include "config.h"
-#ifdef TARGET_I386
-#include "exec-i386.h"
-#endif
-#ifdef TARGET_ARM
-#include "exec-arm.h"
-#endif
-
+#include "exec.h"
#include "disas.h"
//#define DEBUG_EXEC
//#define DEBUG_SIGNAL
-#if defined(TARGET_ARM)
+#if defined(TARGET_ARM) || defined(TARGET_SPARC)
/* XXX: unify with i386 target */
void cpu_loop_exit(void)
{
env->VF = (psr << 3) & 0x80000000;
env->cpsr = psr & ~0xf0000000;
}
+#elif defined(TARGET_SPARC)
#else
#error unsupported target CPU
#endif
do_interrupt(env->exception_index,
env->exception_is_int,
env->error_code,
- env->exception_next_eip);
+ env->exception_next_eip, 0);
#endif
}
env->exception_index = -1;
tmp_T0 = T0;
#endif
interrupt_request = env->interrupt_request;
- if (interrupt_request) {
+ if (__builtin_expect(interrupt_request, 0)) {
#if defined(TARGET_I386)
/* if hardware interrupt pending, we execute it */
if ((interrupt_request & CPU_INTERRUPT_HARD) &&
- (env->eflags & IF_MASK)) {
+ (env->eflags & IF_MASK) &&
+ !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
int intno;
intno = cpu_x86_get_pic_interrupt(env);
if (loglevel) {
fprintf(logfile, "Servicing hardware INT=0x%02x\n", intno);
}
- do_interrupt(intno, 0, 0, 0);
+ do_interrupt(intno, 0, 0, 0, 1);
env->interrupt_request &= ~CPU_INTERRUPT_HARD;
/* ensure that no TB jump will be modified as
the program flow was changed */
env->cpsr = compute_cpsr();
cpu_arm_dump_state(env, logfile, 0);
env->cpsr &= ~0xf0000000;
+#elif defined(TARGET_SPARC)
+ cpu_sparc_dump_state (env, logfile, 0);
#else
#error unsupported target CPU
#endif
}
#endif
- /* we compute the CPU state. We assume it will not
- change during the whole generated block. */
+ /* we record a subset of the CPU state. It will
+ always be the same before a given translated block
+ is executed. */
#if defined(TARGET_I386)
- flags = (env->segs[R_CS].flags & DESC_B_MASK)
- >> (DESC_B_SHIFT - GEN_FLAG_CODE32_SHIFT);
- flags |= (env->segs[R_SS].flags & DESC_B_MASK)
- >> (DESC_B_SHIFT - GEN_FLAG_SS32_SHIFT);
- flags |= (((unsigned long)env->segs[R_DS].base |
- (unsigned long)env->segs[R_ES].base |
- (unsigned long)env->segs[R_SS].base) != 0) <<
- GEN_FLAG_ADDSEG_SHIFT;
- flags |= env->cpl << GEN_FLAG_CPL_SHIFT;
- flags |= env->soft_mmu << GEN_FLAG_SOFT_MMU_SHIFT;
- flags |= (env->eflags & VM_MASK) >> (17 - GEN_FLAG_VM_SHIFT);
- flags |= (env->eflags & (IOPL_MASK | TF_MASK));
+ flags = env->hflags;
+ flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
cs_base = env->segs[R_CS].base;
pc = cs_base + env->eip;
#elif defined(TARGET_ARM)
flags = 0;
cs_base = 0;
pc = (uint8_t *)env->regs[15];
+#elif defined(TARGET_SPARC)
+ flags = 0;
+ cs_base = 0;
+ if (env->npc) {
+ env->pc = env->npc;
+ env->npc = 0;
+ }
+ pc = (uint8_t *) env->pc;
#else
#error unsupported CPU
#endif
tb->tc_ptr = tc_ptr;
tb->cs_base = (unsigned long)cs_base;
tb->flags = flags;
- ret = cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
-#if defined(TARGET_I386)
- /* XXX: suppress that, this is incorrect */
- /* if invalid instruction, signal it */
- if (ret != 0) {
- /* NOTE: the tb is allocated but not linked, so we
- can leave it */
- spin_unlock(&tb_lock);
- raise_exception(EXCP06_ILLOP);
- }
-#endif
+ /* XXX: an MMU exception can occur here */
+ cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
*ptb = tb;
tb->hash_next = NULL;
tb_link(tb);
#ifdef __sparc__
T0 = tmp_T0;
#endif
- /* see if we can patch the calling TB. XXX: remove TF test */
- if (T0 != 0
-#if defined(TARGET_I386)
- && !(env->eflags & TF_MASK)
-#endif
- ) {
+ /* see if we can patch the calling TB. */
+ if (T0 != 0) {
spin_lock(&tb_lock);
tb_add_jump((TranslationBlock *)(T0 & ~3), T0 & 3, tb);
spin_unlock(&tb_lock);
/* reset soft MMU for next block (it can currently
only be set by a memory fault) */
#if defined(TARGET_I386) && !defined(CONFIG_SOFTMMU)
- if (env->soft_mmu) {
- env->soft_mmu = 0;
+ if (env->hflags & HF_SOFTMMU_MASK) {
+ env->hflags &= ~HF_SOFTMMU_MASK;
/* do not allow linking to another block */
T0 = 0;
}
#endif
#elif defined(TARGET_ARM)
env->cpsr = compute_cpsr();
+#elif defined(TARGET_SPARC)
#else
#error unsupported target CPU
#endif
saved_env = env;
env = s;
if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
- SegmentCache *sc;
selector &= 0xffff;
- sc = &env->segs[seg_reg];
- sc->base = (void *)(selector << 4);
- sc->limit = 0xffff;
- sc->flags = 0;
- sc->selector = selector;
+ cpu_x86_load_seg_cache(env, seg_reg, selector,
+ (uint8_t *)(selector << 4), 0xffff, 0);
} else {
load_seg(seg_reg, selector, 0);
}
return 1;
}
/* see if it is an MMU fault */
- ret = cpu_x86_handle_mmu_fault(env, address, is_write);
+ ret = cpu_x86_handle_mmu_fault(env, address, is_write,
+ ((env->hflags & HF_CPL_MASK) == 3), 0);
if (ret < 0)
return 0; /* not an MMU fault */
if (ret == 0)
raise_exception_err(EXCP0E_PAGE, env->error_code);
} else {
/* activate soft MMU for this block */
- env->soft_mmu = 1;
+ env->hflags |= HF_SOFTMMU_MASK;
sigprocmask(SIG_SETMASK, old_set, NULL);
cpu_loop_exit();
}
/* XXX: do more */
return 0;
}
+#elif defined(TARGET_SPARC)
+static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
+ int is_write, sigset_t *old_set)
+{
+ return 0;
+}
#else
#error unsupported target CPU
#endif