2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 #if !defined(CONFIG_SOFTMMU)
35 #include <sys/ucontext.h>
38 int tb_invalidated_flag;
41 //#define DEBUG_SIGNAL
43 #if defined(TARGET_ARM) || defined(TARGET_SPARC)
44 /* XXX: unify with i386 target */
45 void cpu_loop_exit(void)
47 longjmp(env->jmp_env, 1);
54 /* exit the current TB from a signal handler. The host registers are
55 restored in a state compatible with the CPU emulator
57 void cpu_resume_from_signal(CPUState *env1, void *puc)
59 #if !defined(CONFIG_SOFTMMU)
60 struct ucontext *uc = puc;
65 /* XXX: restore cpu registers saved in host registers */
67 #if !defined(CONFIG_SOFTMMU)
69 /* XXX: use siglongjmp ? */
70 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
73 longjmp(env->jmp_env, 1);
77 static TranslationBlock *tb_find_slow(target_ulong pc,
81 TranslationBlock *tb, **ptb1;
84 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
89 tb_invalidated_flag = 0;
91 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
93 /* find translated block using physical mappings */
94 phys_pc = get_phys_addr_code(env, pc);
95 phys_page1 = phys_pc & TARGET_PAGE_MASK;
97 h = tb_phys_hash_func(phys_pc);
98 ptb1 = &tb_phys_hash[h];
104 tb->page_addr[0] == phys_page1 &&
105 tb->cs_base == cs_base &&
106 tb->flags == flags) {
107 /* check next page if needed */
108 if (tb->page_addr[1] != -1) {
109 virt_page2 = (pc & TARGET_PAGE_MASK) +
111 phys_page2 = get_phys_addr_code(env, virt_page2);
112 if (tb->page_addr[1] == phys_page2)
118 ptb1 = &tb->phys_hash_next;
121 /* if no translated code available, then translate it now */
124 /* flush must be done */
126 /* cannot fail at this point */
128 /* don't forget to invalidate previous TB info */
131 tc_ptr = code_gen_ptr;
133 tb->cs_base = cs_base;
135 cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
136 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
138 /* check next page if needed */
139 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
141 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
142 phys_page2 = get_phys_addr_code(env, virt_page2);
144 tb_link_phys(tb, phys_pc, phys_page2);
147 if (tb_invalidated_flag) {
148 /* as some TB could have been invalidated because
149 of memory exceptions while generating the code, we
150 must recompute the hash index here */
153 /* we add the TB in the virtual pc hash table */
154 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
155 spin_unlock(&tb_lock);
159 static inline TranslationBlock *tb_find_fast(void)
161 TranslationBlock *tb;
162 target_ulong cs_base, pc;
165 /* we record a subset of the CPU state. It will
166 always be the same before a given translated block
168 #if defined(TARGET_I386)
170 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
171 cs_base = env->segs[R_CS].base;
172 pc = cs_base + env->eip;
173 #elif defined(TARGET_ARM)
174 flags = env->thumb | (env->vfp.vec_len << 1)
175 | (env->vfp.vec_stride << 4);
178 #elif defined(TARGET_SPARC)
179 #ifdef TARGET_SPARC64
180 flags = (env->pstate << 2) | ((env->lsu & (DMMU_E | IMMU_E)) >> 2);
182 flags = env->psrs | ((env->mmuregs[0] & (MMU_E | MMU_NF)) << 1);
186 #elif defined(TARGET_PPC)
187 flags = (msr_pr << MSR_PR) | (msr_fp << MSR_FP) |
188 (msr_se << MSR_SE) | (msr_le << MSR_LE);
191 #elif defined(TARGET_MIPS)
192 flags = env->hflags & MIPS_HFLAGS_TMASK;
196 #error unsupported CPU
198 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
199 if (__builtin_expect(!tb || tb->pc != pc || tb->cs_base != cs_base ||
200 tb->flags != flags, 0)) {
201 tb = tb_find_slow(pc, cs_base, flags);
207 /* main execution loop */
209 int cpu_exec(CPUState *env1)
211 int saved_T0, saved_T1;
216 #if defined(TARGET_I386)
241 #elif defined(TARGET_SPARC)
242 #if defined(reg_REGWPTR)
243 uint32_t *saved_regwptr;
247 int saved_i7, tmp_T0;
249 int ret, interrupt_request;
250 void (*gen_func)(void);
251 TranslationBlock *tb;
254 #if defined(TARGET_I386)
255 /* handle exit of HALTED state */
256 if (env1->hflags & HF_HALTED_MASK) {
257 /* disable halt condition */
258 if ((env1->interrupt_request & CPU_INTERRUPT_HARD) &&
259 (env1->eflags & IF_MASK)) {
260 env1->hflags &= ~HF_HALTED_MASK;
267 cpu_single_env = env1;
269 /* first we save global registers */
278 /* we also save i7 because longjmp may not restore it */
279 asm volatile ("mov %%i7, %0" : "=r" (saved_i7));
282 #if defined(TARGET_I386)
309 /* put eflags in CPU temporary format */
310 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
311 DF = 1 - (2 * ((env->eflags >> 10) & 1));
312 CC_OP = CC_OP_EFLAGS;
313 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
314 #elif defined(TARGET_ARM)
318 env->CF = (psr >> 29) & 1;
319 env->NZF = (psr & 0xc0000000) ^ 0x40000000;
320 env->VF = (psr << 3) & 0x80000000;
321 env->QF = (psr >> 27) & 1;
322 env->cpsr = psr & ~CACHED_CPSR_BITS;
324 #elif defined(TARGET_SPARC)
325 #if defined(reg_REGWPTR)
326 saved_regwptr = REGWPTR;
328 #elif defined(TARGET_PPC)
329 #elif defined(TARGET_MIPS)
331 #error unsupported target CPU
333 env->exception_index = -1;
335 /* prepare setjmp context for exception handling */
337 if (setjmp(env->jmp_env) == 0) {
338 env->current_tb = NULL;
339 /* if an exception is pending, we execute it here */
340 if (env->exception_index >= 0) {
341 if (env->exception_index >= EXCP_INTERRUPT) {
342 /* exit request from the cpu execution loop */
343 ret = env->exception_index;
345 } else if (env->user_mode_only) {
346 /* if user mode only, we simulate a fake exception
347 which will be hanlded outside the cpu execution
349 #if defined(TARGET_I386)
350 do_interrupt_user(env->exception_index,
351 env->exception_is_int,
353 env->exception_next_eip);
355 ret = env->exception_index;
358 #if defined(TARGET_I386)
359 /* simulate a real cpu exception. On i386, it can
360 trigger new exceptions, but we do not handle
361 double or triple faults yet. */
362 do_interrupt(env->exception_index,
363 env->exception_is_int,
365 env->exception_next_eip, 0);
366 #elif defined(TARGET_PPC)
368 #elif defined(TARGET_MIPS)
370 #elif defined(TARGET_SPARC)
371 do_interrupt(env->exception_index);
374 env->exception_index = -1;
377 if (kqemu_is_ok(env) && env->interrupt_request == 0) {
379 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
380 ret = kqemu_cpu_exec(env);
381 /* put eflags in CPU temporary format */
382 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
383 DF = 1 - (2 * ((env->eflags >> 10) & 1));
384 CC_OP = CC_OP_EFLAGS;
385 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
388 longjmp(env->jmp_env, 1);
389 } else if (ret == 2) {
390 /* softmmu execution needed */
392 if (env->interrupt_request != 0) {
393 /* hardware interrupt will be executed just after */
395 /* otherwise, we restart */
396 longjmp(env->jmp_env, 1);
402 T0 = 0; /* force lookup of first TB */
405 /* g1 can be modified by some libc? functions */
408 interrupt_request = env->interrupt_request;
409 if (__builtin_expect(interrupt_request, 0)) {
410 #if defined(TARGET_I386)
411 /* if hardware interrupt pending, we execute it */
412 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
413 (env->eflags & IF_MASK) &&
414 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
416 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
417 intno = cpu_get_pic_interrupt(env);
418 if (loglevel & CPU_LOG_TB_IN_ASM) {
419 fprintf(logfile, "Servicing hardware INT=0x%02x\n", intno);
421 do_interrupt(intno, 0, 0, 0, 1);
422 /* ensure that no TB jump will be modified as
423 the program flow was changed */
430 #elif defined(TARGET_PPC)
432 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
437 if ((interrupt_request & CPU_INTERRUPT_HARD)) {
439 env->exception_index = EXCP_EXTERNAL;
442 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
448 } else if ((interrupt_request & CPU_INTERRUPT_TIMER)) {
450 env->exception_index = EXCP_DECR;
453 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
461 #elif defined(TARGET_MIPS)
462 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
463 (env->CP0_Status & (1 << CP0St_IE)) &&
464 (env->CP0_Status & env->CP0_Cause & 0x0000FF00) &&
465 !(env->hflags & MIPS_HFLAG_EXL) &&
466 !(env->hflags & MIPS_HFLAG_ERL) &&
467 !(env->hflags & MIPS_HFLAG_DM)) {
469 env->exception_index = EXCP_EXT_INTERRUPT;
472 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
479 #elif defined(TARGET_SPARC)
480 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
482 int pil = env->interrupt_index & 15;
483 int type = env->interrupt_index & 0xf0;
485 if (((type == TT_EXTINT) &&
486 (pil == 15 || pil > env->psrpil)) ||
488 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
489 do_interrupt(env->interrupt_index);
490 env->interrupt_index = 0;
497 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
498 //do_interrupt(0, 0, 0, 0, 0);
499 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
502 if (interrupt_request & CPU_INTERRUPT_EXITTB) {
503 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
504 /* ensure that no TB jump will be modified as
505 the program flow was changed */
512 if (interrupt_request & CPU_INTERRUPT_EXIT) {
513 env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
514 env->exception_index = EXCP_INTERRUPT;
519 if ((loglevel & CPU_LOG_EXEC)) {
520 #if defined(TARGET_I386)
521 /* restore flags in standard format */
523 env->regs[R_EAX] = EAX;
526 env->regs[R_EBX] = EBX;
529 env->regs[R_ECX] = ECX;
532 env->regs[R_EDX] = EDX;
535 env->regs[R_ESI] = ESI;
538 env->regs[R_EDI] = EDI;
541 env->regs[R_EBP] = EBP;
544 env->regs[R_ESP] = ESP;
546 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
547 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
548 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
549 #elif defined(TARGET_ARM)
550 env->cpsr = compute_cpsr();
551 cpu_dump_state(env, logfile, fprintf, 0);
552 env->cpsr &= ~CACHED_CPSR_BITS;
553 #elif defined(TARGET_SPARC)
554 REGWPTR = env->regbase + (env->cwp * 16);
555 env->regwptr = REGWPTR;
556 cpu_dump_state(env, logfile, fprintf, 0);
557 #elif defined(TARGET_PPC)
558 cpu_dump_state(env, logfile, fprintf, 0);
559 #elif defined(TARGET_MIPS)
560 cpu_dump_state(env, logfile, fprintf, 0);
562 #error unsupported target CPU
568 if ((loglevel & CPU_LOG_EXEC)) {
569 fprintf(logfile, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
570 (long)tb->tc_ptr, tb->pc,
571 lookup_symbol(tb->pc));
577 /* see if we can patch the calling TB. When the TB
578 spans two pages, we cannot safely do a direct
582 tb->page_addr[1] == -1
583 #if defined(TARGET_I386) && defined(USE_CODE_COPY)
584 && (tb->cflags & CF_CODE_COPY) ==
585 (((TranslationBlock *)(T0 & ~3))->cflags & CF_CODE_COPY)
589 tb_add_jump((TranslationBlock *)(long)(T0 & ~3), T0 & 3, tb);
590 #if defined(USE_CODE_COPY)
591 /* propagates the FP use info */
592 ((TranslationBlock *)(T0 & ~3))->cflags |=
593 (tb->cflags & CF_FP_USED);
595 spin_unlock(&tb_lock);
599 env->current_tb = tb;
600 /* execute the generated code */
601 gen_func = (void *)tc_ptr;
602 #if defined(__sparc__)
603 __asm__ __volatile__("call %0\n\t"
607 : "i0", "i1", "i2", "i3", "i4", "i5");
608 #elif defined(__arm__)
609 asm volatile ("mov pc, %0\n\t"
610 ".global exec_loop\n\t"
614 : "r1", "r2", "r3", "r8", "r9", "r10", "r12", "r14");
615 #elif defined(TARGET_I386) && defined(USE_CODE_COPY)
617 if (!(tb->cflags & CF_CODE_COPY)) {
618 if ((tb->cflags & CF_FP_USED) && env->native_fp_regs) {
619 save_native_fp_state(env);
623 if ((tb->cflags & CF_FP_USED) && !env->native_fp_regs) {
624 restore_native_fp_state(env);
626 /* we work with native eflags */
627 CC_SRC = cc_table[CC_OP].compute_all();
628 CC_OP = CC_OP_EFLAGS;
629 asm(".globl exec_loop\n"
634 " fs movl %11, %%eax\n"
635 " andl $0x400, %%eax\n"
636 " fs orl %8, %%eax\n"
639 " fs movl %%esp, %12\n"
640 " fs movl %0, %%eax\n"
641 " fs movl %1, %%ecx\n"
642 " fs movl %2, %%edx\n"
643 " fs movl %3, %%ebx\n"
644 " fs movl %4, %%esp\n"
645 " fs movl %5, %%ebp\n"
646 " fs movl %6, %%esi\n"
647 " fs movl %7, %%edi\n"
650 " fs movl %%esp, %4\n"
651 " fs movl %12, %%esp\n"
652 " fs movl %%eax, %0\n"
653 " fs movl %%ecx, %1\n"
654 " fs movl %%edx, %2\n"
655 " fs movl %%ebx, %3\n"
656 " fs movl %%ebp, %5\n"
657 " fs movl %%esi, %6\n"
658 " fs movl %%edi, %7\n"
661 " movl %%eax, %%ecx\n"
662 " andl $0x400, %%ecx\n"
664 " andl $0x8d5, %%eax\n"
665 " fs movl %%eax, %8\n"
667 " subl %%ecx, %%eax\n"
668 " fs movl %%eax, %11\n"
669 " fs movl %9, %%ebx\n" /* get T0 value */
672 : "m" (*(uint8_t *)offsetof(CPUState, regs[0])),
673 "m" (*(uint8_t *)offsetof(CPUState, regs[1])),
674 "m" (*(uint8_t *)offsetof(CPUState, regs[2])),
675 "m" (*(uint8_t *)offsetof(CPUState, regs[3])),
676 "m" (*(uint8_t *)offsetof(CPUState, regs[4])),
677 "m" (*(uint8_t *)offsetof(CPUState, regs[5])),
678 "m" (*(uint8_t *)offsetof(CPUState, regs[6])),
679 "m" (*(uint8_t *)offsetof(CPUState, regs[7])),
680 "m" (*(uint8_t *)offsetof(CPUState, cc_src)),
681 "m" (*(uint8_t *)offsetof(CPUState, tmp0)),
683 "m" (*(uint8_t *)offsetof(CPUState, df)),
684 "m" (*(uint8_t *)offsetof(CPUState, saved_esp))
689 #elif defined(__ia64)
696 fp.gp = code_gen_buffer + 2 * (1 << 20);
697 (*(void (*)(void)) &fp)();
701 env->current_tb = NULL;
702 /* reset soft MMU for next block (it can currently
703 only be set by a memory fault) */
704 #if defined(TARGET_I386) && !defined(CONFIG_SOFTMMU)
705 if (env->hflags & HF_SOFTMMU_MASK) {
706 env->hflags &= ~HF_SOFTMMU_MASK;
707 /* do not allow linking to another block */
718 #if defined(TARGET_I386)
719 #if defined(USE_CODE_COPY)
720 if (env->native_fp_regs) {
721 save_native_fp_state(env);
724 /* restore flags in standard format */
725 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
727 /* restore global registers */
752 #elif defined(TARGET_ARM)
753 env->cpsr = compute_cpsr();
754 /* XXX: Save/restore host fpu exception state?. */
755 #elif defined(TARGET_SPARC)
756 #if defined(reg_REGWPTR)
757 REGWPTR = saved_regwptr;
759 #elif defined(TARGET_PPC)
760 #elif defined(TARGET_MIPS)
762 #error unsupported target CPU
765 asm volatile ("mov %0, %%i7" : : "r" (saved_i7));
773 /* fail safe : never use cpu_single_env outside cpu_exec() */
774 cpu_single_env = NULL;
778 /* must only be called from the generated code as an exception can be
780 void tb_invalidate_page_range(target_ulong start, target_ulong end)
782 /* XXX: cannot enable it yet because it yields to MMU exception
783 where NIP != read address on PowerPC */
785 target_ulong phys_addr;
786 phys_addr = get_phys_addr_code(env, start);
787 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
791 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
793 void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
795 CPUX86State *saved_env;
799 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
801 cpu_x86_load_seg_cache(env, seg_reg, selector,
802 (selector << 4), 0xffff, 0);
804 load_seg(seg_reg, selector);
809 void cpu_x86_fsave(CPUX86State *s, uint8_t *ptr, int data32)
811 CPUX86State *saved_env;
816 helper_fsave((target_ulong)ptr, data32);
821 void cpu_x86_frstor(CPUX86State *s, uint8_t *ptr, int data32)
823 CPUX86State *saved_env;
828 helper_frstor((target_ulong)ptr, data32);
833 #endif /* TARGET_I386 */
835 #if !defined(CONFIG_SOFTMMU)
837 #if defined(TARGET_I386)
839 /* 'pc' is the host PC at which the exception was raised. 'address' is
840 the effective address of the memory exception. 'is_write' is 1 if a
841 write caused the exception and otherwise 0'. 'old_set' is the
842 signal set which should be restored */
843 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
844 int is_write, sigset_t *old_set,
847 TranslationBlock *tb;
851 env = cpu_single_env; /* XXX: find a correct solution for multithread */
852 #if defined(DEBUG_SIGNAL)
853 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
854 pc, address, is_write, *(unsigned long *)old_set);
856 /* XXX: locking issue */
857 if (is_write && page_unprotect(address, pc, puc)) {
861 /* see if it is an MMU fault */
862 ret = cpu_x86_handle_mmu_fault(env, address, is_write,
863 ((env->hflags & HF_CPL_MASK) == 3), 0);
865 return 0; /* not an MMU fault */
867 return 1; /* the MMU fault was handled without causing real CPU fault */
868 /* now we have a real cpu fault */
871 /* the PC is inside the translated code. It means that we have
872 a virtual CPU fault */
873 cpu_restore_state(tb, env, pc, puc);
877 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
878 env->eip, env->cr[2], env->error_code);
880 /* we restore the process signal mask as the sigreturn should
881 do it (XXX: use sigsetjmp) */
882 sigprocmask(SIG_SETMASK, old_set, NULL);
883 raise_exception_err(EXCP0E_PAGE, env->error_code);
885 /* activate soft MMU for this block */
886 env->hflags |= HF_SOFTMMU_MASK;
887 cpu_resume_from_signal(env, puc);
889 /* never comes here */
893 #elif defined(TARGET_ARM)
894 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
895 int is_write, sigset_t *old_set,
898 TranslationBlock *tb;
902 env = cpu_single_env; /* XXX: find a correct solution for multithread */
903 #if defined(DEBUG_SIGNAL)
904 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
905 pc, address, is_write, *(unsigned long *)old_set);
907 /* XXX: locking issue */
908 if (is_write && page_unprotect(address, pc, puc)) {
911 /* see if it is an MMU fault */
912 ret = cpu_arm_handle_mmu_fault(env, address, is_write, 1, 0);
914 return 0; /* not an MMU fault */
916 return 1; /* the MMU fault was handled without causing real CPU fault */
917 /* now we have a real cpu fault */
920 /* the PC is inside the translated code. It means that we have
921 a virtual CPU fault */
922 cpu_restore_state(tb, env, pc, puc);
924 /* we restore the process signal mask as the sigreturn should
925 do it (XXX: use sigsetjmp) */
926 sigprocmask(SIG_SETMASK, old_set, NULL);
929 #elif defined(TARGET_SPARC)
930 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
931 int is_write, sigset_t *old_set,
934 TranslationBlock *tb;
938 env = cpu_single_env; /* XXX: find a correct solution for multithread */
939 #if defined(DEBUG_SIGNAL)
940 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
941 pc, address, is_write, *(unsigned long *)old_set);
943 /* XXX: locking issue */
944 if (is_write && page_unprotect(address, pc, puc)) {
947 /* see if it is an MMU fault */
948 ret = cpu_sparc_handle_mmu_fault(env, address, is_write, 1, 0);
950 return 0; /* not an MMU fault */
952 return 1; /* the MMU fault was handled without causing real CPU fault */
953 /* now we have a real cpu fault */
956 /* the PC is inside the translated code. It means that we have
957 a virtual CPU fault */
958 cpu_restore_state(tb, env, pc, puc);
960 /* we restore the process signal mask as the sigreturn should
961 do it (XXX: use sigsetjmp) */
962 sigprocmask(SIG_SETMASK, old_set, NULL);
965 #elif defined (TARGET_PPC)
966 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
967 int is_write, sigset_t *old_set,
970 TranslationBlock *tb;
974 env = cpu_single_env; /* XXX: find a correct solution for multithread */
975 #if defined(DEBUG_SIGNAL)
976 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
977 pc, address, is_write, *(unsigned long *)old_set);
979 /* XXX: locking issue */
980 if (is_write && page_unprotect(address, pc, puc)) {
984 /* see if it is an MMU fault */
985 ret = cpu_ppc_handle_mmu_fault(env, address, is_write, msr_pr, 0);
987 return 0; /* not an MMU fault */
989 return 1; /* the MMU fault was handled without causing real CPU fault */
991 /* now we have a real cpu fault */
994 /* the PC is inside the translated code. It means that we have
995 a virtual CPU fault */
996 cpu_restore_state(tb, env, pc, puc);
1000 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1001 env->nip, env->error_code, tb);
1003 /* we restore the process signal mask as the sigreturn should
1004 do it (XXX: use sigsetjmp) */
1005 sigprocmask(SIG_SETMASK, old_set, NULL);
1006 do_raise_exception_err(env->exception_index, env->error_code);
1008 /* activate soft MMU for this block */
1009 cpu_resume_from_signal(env, puc);
1011 /* never comes here */
1015 #elif defined (TARGET_MIPS)
1016 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1017 int is_write, sigset_t *old_set,
1020 TranslationBlock *tb;
1024 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1025 #if defined(DEBUG_SIGNAL)
1026 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1027 pc, address, is_write, *(unsigned long *)old_set);
1029 /* XXX: locking issue */
1030 if (is_write && page_unprotect(address, pc, puc)) {
1034 /* see if it is an MMU fault */
1035 ret = cpu_ppc_handle_mmu_fault(env, address, is_write, msr_pr, 0);
1037 return 0; /* not an MMU fault */
1039 return 1; /* the MMU fault was handled without causing real CPU fault */
1041 /* now we have a real cpu fault */
1042 tb = tb_find_pc(pc);
1044 /* the PC is inside the translated code. It means that we have
1045 a virtual CPU fault */
1046 cpu_restore_state(tb, env, pc, puc);
1050 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1051 env->nip, env->error_code, tb);
1053 /* we restore the process signal mask as the sigreturn should
1054 do it (XXX: use sigsetjmp) */
1055 sigprocmask(SIG_SETMASK, old_set, NULL);
1056 do_raise_exception_err(env->exception_index, env->error_code);
1058 /* activate soft MMU for this block */
1059 cpu_resume_from_signal(env, puc);
1061 /* never comes here */
1066 #error unsupported target CPU
1069 #if defined(__i386__)
1071 #if defined(USE_CODE_COPY)
1072 static void cpu_send_trap(unsigned long pc, int trap,
1073 struct ucontext *uc)
1075 TranslationBlock *tb;
1078 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1079 /* now we have a real cpu fault */
1080 tb = tb_find_pc(pc);
1082 /* the PC is inside the translated code. It means that we have
1083 a virtual CPU fault */
1084 cpu_restore_state(tb, env, pc, uc);
1086 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
1087 raise_exception_err(trap, env->error_code);
1091 int cpu_signal_handler(int host_signum, struct siginfo *info,
1094 struct ucontext *uc = puc;
1102 #define REG_TRAPNO TRAPNO
1104 pc = uc->uc_mcontext.gregs[REG_EIP];
1105 trapno = uc->uc_mcontext.gregs[REG_TRAPNO];
1106 #if defined(TARGET_I386) && defined(USE_CODE_COPY)
1107 if (trapno == 0x00 || trapno == 0x05) {
1108 /* send division by zero or bound exception */
1109 cpu_send_trap(pc, trapno, uc);
1113 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1115 (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
1116 &uc->uc_sigmask, puc);
1119 #elif defined(__x86_64__)
1121 int cpu_signal_handler(int host_signum, struct siginfo *info,
1124 struct ucontext *uc = puc;
1127 pc = uc->uc_mcontext.gregs[REG_RIP];
1128 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1129 uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe ?
1130 (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
1131 &uc->uc_sigmask, puc);
1134 #elif defined(__powerpc__)
1136 /***********************************************************************
1137 * signal context platform-specific definitions
1141 /* All Registers access - only for local access */
1142 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1143 /* Gpr Registers access */
1144 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1145 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1146 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1147 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1148 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1149 # define LR_sig(context) REG_sig(link, context) /* Link register */
1150 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1151 /* Float Registers access */
1152 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1153 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1154 /* Exception Registers access */
1155 # define DAR_sig(context) REG_sig(dar, context)
1156 # define DSISR_sig(context) REG_sig(dsisr, context)
1157 # define TRAP_sig(context) REG_sig(trap, context)
1161 # include <sys/ucontext.h>
1162 typedef struct ucontext SIGCONTEXT;
1163 /* All Registers access - only for local access */
1164 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1165 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1166 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1167 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1168 /* Gpr Registers access */
1169 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1170 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1171 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1172 # define CTR_sig(context) REG_sig(ctr, context)
1173 # define XER_sig(context) REG_sig(xer, context) /* Link register */
1174 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1175 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
1176 /* Float Registers access */
1177 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1178 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1179 /* Exception Registers access */
1180 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1181 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1182 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1183 #endif /* __APPLE__ */
1185 int cpu_signal_handler(int host_signum, struct siginfo *info,
1188 struct ucontext *uc = puc;
1196 if (DSISR_sig(uc) & 0x00800000)
1199 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
1202 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1203 is_write, &uc->uc_sigmask, puc);
1206 #elif defined(__alpha__)
1208 int cpu_signal_handler(int host_signum, struct siginfo *info,
1211 struct ucontext *uc = puc;
1212 uint32_t *pc = uc->uc_mcontext.sc_pc;
1213 uint32_t insn = *pc;
1216 /* XXX: need kernel patch to get write flag faster */
1217 switch (insn >> 26) {
1232 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1233 is_write, &uc->uc_sigmask, puc);
1235 #elif defined(__sparc__)
1237 int cpu_signal_handler(int host_signum, struct siginfo *info,
1240 uint32_t *regs = (uint32_t *)(info + 1);
1241 void *sigmask = (regs + 20);
1246 /* XXX: is there a standard glibc define ? */
1248 /* XXX: need kernel patch to get write flag faster */
1250 insn = *(uint32_t *)pc;
1251 if ((insn >> 30) == 3) {
1252 switch((insn >> 19) & 0x3f) {
1264 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1265 is_write, sigmask, NULL);
1268 #elif defined(__arm__)
1270 int cpu_signal_handler(int host_signum, struct siginfo *info,
1273 struct ucontext *uc = puc;
1277 pc = uc->uc_mcontext.gregs[R15];
1278 /* XXX: compute is_write */
1280 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1285 #elif defined(__mc68000)
1287 int cpu_signal_handler(int host_signum, struct siginfo *info,
1290 struct ucontext *uc = puc;
1294 pc = uc->uc_mcontext.gregs[16];
1295 /* XXX: compute is_write */
1297 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1299 &uc->uc_sigmask, puc);
1302 #elif defined(__ia64)
1305 /* This ought to be in <bits/siginfo.h>... */
1306 # define __ISR_VALID 1
1307 # define si_flags _sifields._sigfault._si_pad0
1310 int cpu_signal_handler(int host_signum, struct siginfo *info, void *puc)
1312 struct ucontext *uc = puc;
1316 ip = uc->uc_mcontext.sc_ip;
1317 switch (host_signum) {
1323 if (info->si_code && (info->si_flags & __ISR_VALID))
1324 /* ISR.W (write-access) is bit 33: */
1325 is_write = (info->si_isr >> 33) & 1;
1331 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1333 &uc->uc_sigmask, puc);
1336 #elif defined(__s390__)
1338 int cpu_signal_handler(int host_signum, struct siginfo *info,
1341 struct ucontext *uc = puc;
1345 pc = uc->uc_mcontext.psw.addr;
1346 /* XXX: compute is_write */
1348 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1350 &uc->uc_sigmask, puc);
1355 #error host CPU specific signal handler needed
1359 #endif /* !defined(CONFIG_SOFTMMU) */