2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 #if !defined(CONFIG_SOFTMMU)
35 #include <sys/ucontext.h>
38 int tb_invalidated_flag;
41 //#define DEBUG_SIGNAL
43 #if defined(TARGET_ARM) || defined(TARGET_SPARC)
44 /* XXX: unify with i386 target */
45 void cpu_loop_exit(void)
47 longjmp(env->jmp_env, 1);
54 /* exit the current TB from a signal handler. The host registers are
55 restored in a state compatible with the CPU emulator
57 void cpu_resume_from_signal(CPUState *env1, void *puc)
59 #if !defined(CONFIG_SOFTMMU)
60 struct ucontext *uc = puc;
65 /* XXX: restore cpu registers saved in host registers */
67 #if !defined(CONFIG_SOFTMMU)
69 /* XXX: use siglongjmp ? */
70 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
73 longjmp(env->jmp_env, 1);
77 static TranslationBlock *tb_find_slow(target_ulong pc,
81 TranslationBlock *tb, **ptb1;
84 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
89 tb_invalidated_flag = 0;
91 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
93 /* find translated block using physical mappings */
94 phys_pc = get_phys_addr_code(env, pc);
95 phys_page1 = phys_pc & TARGET_PAGE_MASK;
97 h = tb_phys_hash_func(phys_pc);
98 ptb1 = &tb_phys_hash[h];
104 tb->page_addr[0] == phys_page1 &&
105 tb->cs_base == cs_base &&
106 tb->flags == flags) {
107 /* check next page if needed */
108 if (tb->page_addr[1] != -1) {
109 virt_page2 = (pc & TARGET_PAGE_MASK) +
111 phys_page2 = get_phys_addr_code(env, virt_page2);
112 if (tb->page_addr[1] == phys_page2)
118 ptb1 = &tb->phys_hash_next;
121 /* if no translated code available, then translate it now */
124 /* flush must be done */
126 /* cannot fail at this point */
128 /* don't forget to invalidate previous TB info */
131 tc_ptr = code_gen_ptr;
133 tb->cs_base = cs_base;
135 cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
136 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
138 /* check next page if needed */
139 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
141 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
142 phys_page2 = get_phys_addr_code(env, virt_page2);
144 tb_link_phys(tb, phys_pc, phys_page2);
147 if (tb_invalidated_flag) {
148 /* as some TB could have been invalidated because
149 of memory exceptions while generating the code, we
150 must recompute the hash index here */
153 /* we add the TB in the virtual pc hash table */
154 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
155 spin_unlock(&tb_lock);
159 static inline TranslationBlock *tb_find_fast(void)
161 TranslationBlock *tb;
162 target_ulong cs_base, pc;
165 /* we record a subset of the CPU state. It will
166 always be the same before a given translated block
168 #if defined(TARGET_I386)
170 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
171 cs_base = env->segs[R_CS].base;
172 pc = cs_base + env->eip;
173 #elif defined(TARGET_ARM)
174 flags = env->thumb | (env->vfp.vec_len << 1)
175 | (env->vfp.vec_stride << 4);
176 if ((env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR)
180 #elif defined(TARGET_SPARC)
181 #ifdef TARGET_SPARC64
182 flags = (env->pstate << 2) | ((env->lsu & (DMMU_E | IMMU_E)) >> 2);
184 flags = env->psrs | ((env->mmuregs[0] & (MMU_E | MMU_NF)) << 1);
188 #elif defined(TARGET_PPC)
189 flags = (msr_pr << MSR_PR) | (msr_fp << MSR_FP) |
190 (msr_se << MSR_SE) | (msr_le << MSR_LE);
193 #elif defined(TARGET_MIPS)
194 flags = env->hflags & MIPS_HFLAGS_TMASK;
198 #error unsupported CPU
200 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
201 if (__builtin_expect(!tb || tb->pc != pc || tb->cs_base != cs_base ||
202 tb->flags != flags, 0)) {
203 tb = tb_find_slow(pc, cs_base, flags);
209 /* main execution loop */
211 int cpu_exec(CPUState *env1)
213 int saved_T0, saved_T1;
218 #if defined(TARGET_I386)
243 #elif defined(TARGET_SPARC)
244 #if defined(reg_REGWPTR)
245 uint32_t *saved_regwptr;
249 int saved_i7, tmp_T0;
251 int ret, interrupt_request;
252 void (*gen_func)(void);
253 TranslationBlock *tb;
256 #if defined(TARGET_I386)
257 /* handle exit of HALTED state */
258 if (env1->hflags & HF_HALTED_MASK) {
259 /* disable halt condition */
260 if ((env1->interrupt_request & CPU_INTERRUPT_HARD) &&
261 (env1->eflags & IF_MASK)) {
262 env1->hflags &= ~HF_HALTED_MASK;
267 #elif defined(TARGET_PPC)
269 if (env1->msr[MSR_EE] &&
270 (env1->interrupt_request &
271 (CPU_INTERRUPT_HARD | CPU_INTERRUPT_TIMER))) {
277 #elif defined(TARGET_ARM)
279 /* An interrupt wakes the CPU even if the I and F CPSR bits are
281 if (env1->interrupt_request
282 & (CPU_INTERRUPT_FIQ | CPU_INTERRUPT_HARD)) {
290 cpu_single_env = env1;
292 /* first we save global registers */
301 /* we also save i7 because longjmp may not restore it */
302 asm volatile ("mov %%i7, %0" : "=r" (saved_i7));
305 #if defined(TARGET_I386)
332 /* put eflags in CPU temporary format */
333 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
334 DF = 1 - (2 * ((env->eflags >> 10) & 1));
335 CC_OP = CC_OP_EFLAGS;
336 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
337 #elif defined(TARGET_ARM)
338 #elif defined(TARGET_SPARC)
339 #if defined(reg_REGWPTR)
340 saved_regwptr = REGWPTR;
342 #elif defined(TARGET_PPC)
343 #elif defined(TARGET_MIPS)
345 #error unsupported target CPU
347 env->exception_index = -1;
349 /* prepare setjmp context for exception handling */
351 if (setjmp(env->jmp_env) == 0) {
352 env->current_tb = NULL;
353 /* if an exception is pending, we execute it here */
354 if (env->exception_index >= 0) {
355 if (env->exception_index >= EXCP_INTERRUPT) {
356 /* exit request from the cpu execution loop */
357 ret = env->exception_index;
359 } else if (env->user_mode_only) {
360 /* if user mode only, we simulate a fake exception
361 which will be hanlded outside the cpu execution
363 #if defined(TARGET_I386)
364 do_interrupt_user(env->exception_index,
365 env->exception_is_int,
367 env->exception_next_eip);
369 ret = env->exception_index;
372 #if defined(TARGET_I386)
373 /* simulate a real cpu exception. On i386, it can
374 trigger new exceptions, but we do not handle
375 double or triple faults yet. */
376 do_interrupt(env->exception_index,
377 env->exception_is_int,
379 env->exception_next_eip, 0);
380 #elif defined(TARGET_PPC)
382 #elif defined(TARGET_MIPS)
384 #elif defined(TARGET_SPARC)
385 do_interrupt(env->exception_index);
386 #elif defined(TARGET_ARM)
390 env->exception_index = -1;
393 if (kqemu_is_ok(env) && env->interrupt_request == 0) {
395 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
396 ret = kqemu_cpu_exec(env);
397 /* put eflags in CPU temporary format */
398 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
399 DF = 1 - (2 * ((env->eflags >> 10) & 1));
400 CC_OP = CC_OP_EFLAGS;
401 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
404 longjmp(env->jmp_env, 1);
405 } else if (ret == 2) {
406 /* softmmu execution needed */
408 if (env->interrupt_request != 0) {
409 /* hardware interrupt will be executed just after */
411 /* otherwise, we restart */
412 longjmp(env->jmp_env, 1);
418 T0 = 0; /* force lookup of first TB */
421 /* g1 can be modified by some libc? functions */
424 interrupt_request = env->interrupt_request;
425 if (__builtin_expect(interrupt_request, 0)) {
426 #if defined(TARGET_I386)
427 /* if hardware interrupt pending, we execute it */
428 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
429 (env->eflags & IF_MASK) &&
430 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
432 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
433 intno = cpu_get_pic_interrupt(env);
434 if (loglevel & CPU_LOG_TB_IN_ASM) {
435 fprintf(logfile, "Servicing hardware INT=0x%02x\n", intno);
437 do_interrupt(intno, 0, 0, 0, 1);
438 /* ensure that no TB jump will be modified as
439 the program flow was changed */
446 #elif defined(TARGET_PPC)
448 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
453 if ((interrupt_request & CPU_INTERRUPT_HARD)) {
455 env->exception_index = EXCP_EXTERNAL;
458 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
464 } else if ((interrupt_request & CPU_INTERRUPT_TIMER)) {
466 env->exception_index = EXCP_DECR;
469 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
477 #elif defined(TARGET_MIPS)
478 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
479 (env->CP0_Status & (1 << CP0St_IE)) &&
480 (env->CP0_Status & env->CP0_Cause & 0x0000FF00) &&
481 !(env->hflags & MIPS_HFLAG_EXL) &&
482 !(env->hflags & MIPS_HFLAG_ERL) &&
483 !(env->hflags & MIPS_HFLAG_DM)) {
485 env->exception_index = EXCP_EXT_INTERRUPT;
488 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
495 #elif defined(TARGET_SPARC)
496 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
498 int pil = env->interrupt_index & 15;
499 int type = env->interrupt_index & 0xf0;
501 if (((type == TT_EXTINT) &&
502 (pil == 15 || pil > env->psrpil)) ||
504 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
505 do_interrupt(env->interrupt_index);
506 env->interrupt_index = 0;
513 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
514 //do_interrupt(0, 0, 0, 0, 0);
515 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
517 #elif defined(TARGET_ARM)
518 if (interrupt_request & CPU_INTERRUPT_FIQ
519 && !(env->uncached_cpsr & CPSR_F)) {
520 env->exception_index = EXCP_FIQ;
523 if (interrupt_request & CPU_INTERRUPT_HARD
524 && !(env->uncached_cpsr & CPSR_I)) {
525 env->exception_index = EXCP_IRQ;
529 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
530 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
531 /* ensure that no TB jump will be modified as
532 the program flow was changed */
539 if (interrupt_request & CPU_INTERRUPT_EXIT) {
540 env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
541 env->exception_index = EXCP_INTERRUPT;
546 if ((loglevel & CPU_LOG_TB_CPU)) {
547 #if defined(TARGET_I386)
548 /* restore flags in standard format */
550 env->regs[R_EAX] = EAX;
553 env->regs[R_EBX] = EBX;
556 env->regs[R_ECX] = ECX;
559 env->regs[R_EDX] = EDX;
562 env->regs[R_ESI] = ESI;
565 env->regs[R_EDI] = EDI;
568 env->regs[R_EBP] = EBP;
571 env->regs[R_ESP] = ESP;
573 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
574 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
575 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
576 #elif defined(TARGET_ARM)
577 cpu_dump_state(env, logfile, fprintf, 0);
578 #elif defined(TARGET_SPARC)
579 REGWPTR = env->regbase + (env->cwp * 16);
580 env->regwptr = REGWPTR;
581 cpu_dump_state(env, logfile, fprintf, 0);
582 #elif defined(TARGET_PPC)
583 cpu_dump_state(env, logfile, fprintf, 0);
584 #elif defined(TARGET_MIPS)
585 cpu_dump_state(env, logfile, fprintf, 0);
587 #error unsupported target CPU
593 if ((loglevel & CPU_LOG_EXEC)) {
594 fprintf(logfile, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
595 (long)tb->tc_ptr, tb->pc,
596 lookup_symbol(tb->pc));
602 /* see if we can patch the calling TB. When the TB
603 spans two pages, we cannot safely do a direct
607 tb->page_addr[1] == -1
608 #if defined(TARGET_I386) && defined(USE_CODE_COPY)
609 && (tb->cflags & CF_CODE_COPY) ==
610 (((TranslationBlock *)(T0 & ~3))->cflags & CF_CODE_COPY)
614 tb_add_jump((TranslationBlock *)(long)(T0 & ~3), T0 & 3, tb);
615 #if defined(USE_CODE_COPY)
616 /* propagates the FP use info */
617 ((TranslationBlock *)(T0 & ~3))->cflags |=
618 (tb->cflags & CF_FP_USED);
620 spin_unlock(&tb_lock);
624 env->current_tb = tb;
625 /* execute the generated code */
626 gen_func = (void *)tc_ptr;
627 #if defined(__sparc__)
628 __asm__ __volatile__("call %0\n\t"
632 : "i0", "i1", "i2", "i3", "i4", "i5");
633 #elif defined(__arm__)
634 asm volatile ("mov pc, %0\n\t"
635 ".global exec_loop\n\t"
639 : "r1", "r2", "r3", "r8", "r9", "r10", "r12", "r14");
640 #elif defined(TARGET_I386) && defined(USE_CODE_COPY)
642 if (!(tb->cflags & CF_CODE_COPY)) {
643 if ((tb->cflags & CF_FP_USED) && env->native_fp_regs) {
644 save_native_fp_state(env);
648 if ((tb->cflags & CF_FP_USED) && !env->native_fp_regs) {
649 restore_native_fp_state(env);
651 /* we work with native eflags */
652 CC_SRC = cc_table[CC_OP].compute_all();
653 CC_OP = CC_OP_EFLAGS;
654 asm(".globl exec_loop\n"
659 " fs movl %11, %%eax\n"
660 " andl $0x400, %%eax\n"
661 " fs orl %8, %%eax\n"
664 " fs movl %%esp, %12\n"
665 " fs movl %0, %%eax\n"
666 " fs movl %1, %%ecx\n"
667 " fs movl %2, %%edx\n"
668 " fs movl %3, %%ebx\n"
669 " fs movl %4, %%esp\n"
670 " fs movl %5, %%ebp\n"
671 " fs movl %6, %%esi\n"
672 " fs movl %7, %%edi\n"
675 " fs movl %%esp, %4\n"
676 " fs movl %12, %%esp\n"
677 " fs movl %%eax, %0\n"
678 " fs movl %%ecx, %1\n"
679 " fs movl %%edx, %2\n"
680 " fs movl %%ebx, %3\n"
681 " fs movl %%ebp, %5\n"
682 " fs movl %%esi, %6\n"
683 " fs movl %%edi, %7\n"
686 " movl %%eax, %%ecx\n"
687 " andl $0x400, %%ecx\n"
689 " andl $0x8d5, %%eax\n"
690 " fs movl %%eax, %8\n"
692 " subl %%ecx, %%eax\n"
693 " fs movl %%eax, %11\n"
694 " fs movl %9, %%ebx\n" /* get T0 value */
697 : "m" (*(uint8_t *)offsetof(CPUState, regs[0])),
698 "m" (*(uint8_t *)offsetof(CPUState, regs[1])),
699 "m" (*(uint8_t *)offsetof(CPUState, regs[2])),
700 "m" (*(uint8_t *)offsetof(CPUState, regs[3])),
701 "m" (*(uint8_t *)offsetof(CPUState, regs[4])),
702 "m" (*(uint8_t *)offsetof(CPUState, regs[5])),
703 "m" (*(uint8_t *)offsetof(CPUState, regs[6])),
704 "m" (*(uint8_t *)offsetof(CPUState, regs[7])),
705 "m" (*(uint8_t *)offsetof(CPUState, cc_src)),
706 "m" (*(uint8_t *)offsetof(CPUState, tmp0)),
708 "m" (*(uint8_t *)offsetof(CPUState, df)),
709 "m" (*(uint8_t *)offsetof(CPUState, saved_esp))
714 #elif defined(__ia64)
721 fp.gp = code_gen_buffer + 2 * (1 << 20);
722 (*(void (*)(void)) &fp)();
726 env->current_tb = NULL;
727 /* reset soft MMU for next block (it can currently
728 only be set by a memory fault) */
729 #if defined(TARGET_I386) && !defined(CONFIG_SOFTMMU)
730 if (env->hflags & HF_SOFTMMU_MASK) {
731 env->hflags &= ~HF_SOFTMMU_MASK;
732 /* do not allow linking to another block */
743 #if defined(TARGET_I386)
744 #if defined(USE_CODE_COPY)
745 if (env->native_fp_regs) {
746 save_native_fp_state(env);
749 /* restore flags in standard format */
750 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
752 /* restore global registers */
777 #elif defined(TARGET_ARM)
778 /* XXX: Save/restore host fpu exception state?. */
779 #elif defined(TARGET_SPARC)
780 #if defined(reg_REGWPTR)
781 REGWPTR = saved_regwptr;
783 #elif defined(TARGET_PPC)
784 #elif defined(TARGET_MIPS)
786 #error unsupported target CPU
789 asm volatile ("mov %0, %%i7" : : "r" (saved_i7));
797 /* fail safe : never use cpu_single_env outside cpu_exec() */
798 cpu_single_env = NULL;
802 /* must only be called from the generated code as an exception can be
804 void tb_invalidate_page_range(target_ulong start, target_ulong end)
806 /* XXX: cannot enable it yet because it yields to MMU exception
807 where NIP != read address on PowerPC */
809 target_ulong phys_addr;
810 phys_addr = get_phys_addr_code(env, start);
811 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
815 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
817 void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
819 CPUX86State *saved_env;
823 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
825 cpu_x86_load_seg_cache(env, seg_reg, selector,
826 (selector << 4), 0xffff, 0);
828 load_seg(seg_reg, selector);
833 void cpu_x86_fsave(CPUX86State *s, uint8_t *ptr, int data32)
835 CPUX86State *saved_env;
840 helper_fsave((target_ulong)ptr, data32);
845 void cpu_x86_frstor(CPUX86State *s, uint8_t *ptr, int data32)
847 CPUX86State *saved_env;
852 helper_frstor((target_ulong)ptr, data32);
857 #endif /* TARGET_I386 */
859 #if !defined(CONFIG_SOFTMMU)
861 #if defined(TARGET_I386)
863 /* 'pc' is the host PC at which the exception was raised. 'address' is
864 the effective address of the memory exception. 'is_write' is 1 if a
865 write caused the exception and otherwise 0'. 'old_set' is the
866 signal set which should be restored */
867 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
868 int is_write, sigset_t *old_set,
871 TranslationBlock *tb;
875 env = cpu_single_env; /* XXX: find a correct solution for multithread */
876 #if defined(DEBUG_SIGNAL)
877 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
878 pc, address, is_write, *(unsigned long *)old_set);
880 /* XXX: locking issue */
881 if (is_write && page_unprotect(address, pc, puc)) {
885 /* see if it is an MMU fault */
886 ret = cpu_x86_handle_mmu_fault(env, address, is_write,
887 ((env->hflags & HF_CPL_MASK) == 3), 0);
889 return 0; /* not an MMU fault */
891 return 1; /* the MMU fault was handled without causing real CPU fault */
892 /* now we have a real cpu fault */
895 /* the PC is inside the translated code. It means that we have
896 a virtual CPU fault */
897 cpu_restore_state(tb, env, pc, puc);
901 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
902 env->eip, env->cr[2], env->error_code);
904 /* we restore the process signal mask as the sigreturn should
905 do it (XXX: use sigsetjmp) */
906 sigprocmask(SIG_SETMASK, old_set, NULL);
907 raise_exception_err(env->exception_index, env->error_code);
909 /* activate soft MMU for this block */
910 env->hflags |= HF_SOFTMMU_MASK;
911 cpu_resume_from_signal(env, puc);
913 /* never comes here */
917 #elif defined(TARGET_ARM)
918 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
919 int is_write, sigset_t *old_set,
922 TranslationBlock *tb;
926 env = cpu_single_env; /* XXX: find a correct solution for multithread */
927 #if defined(DEBUG_SIGNAL)
928 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
929 pc, address, is_write, *(unsigned long *)old_set);
931 /* XXX: locking issue */
932 if (is_write && page_unprotect(address, pc, puc)) {
935 /* see if it is an MMU fault */
936 ret = cpu_arm_handle_mmu_fault(env, address, is_write, 1, 0);
938 return 0; /* not an MMU fault */
940 return 1; /* the MMU fault was handled without causing real CPU fault */
941 /* now we have a real cpu fault */
944 /* the PC is inside the translated code. It means that we have
945 a virtual CPU fault */
946 cpu_restore_state(tb, env, pc, puc);
948 /* we restore the process signal mask as the sigreturn should
949 do it (XXX: use sigsetjmp) */
950 sigprocmask(SIG_SETMASK, old_set, NULL);
953 #elif defined(TARGET_SPARC)
954 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
955 int is_write, sigset_t *old_set,
958 TranslationBlock *tb;
962 env = cpu_single_env; /* XXX: find a correct solution for multithread */
963 #if defined(DEBUG_SIGNAL)
964 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
965 pc, address, is_write, *(unsigned long *)old_set);
967 /* XXX: locking issue */
968 if (is_write && page_unprotect(address, pc, puc)) {
971 /* see if it is an MMU fault */
972 ret = cpu_sparc_handle_mmu_fault(env, address, is_write, 1, 0);
974 return 0; /* not an MMU fault */
976 return 1; /* the MMU fault was handled without causing real CPU fault */
977 /* now we have a real cpu fault */
980 /* the PC is inside the translated code. It means that we have
981 a virtual CPU fault */
982 cpu_restore_state(tb, env, pc, puc);
984 /* we restore the process signal mask as the sigreturn should
985 do it (XXX: use sigsetjmp) */
986 sigprocmask(SIG_SETMASK, old_set, NULL);
989 #elif defined (TARGET_PPC)
990 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
991 int is_write, sigset_t *old_set,
994 TranslationBlock *tb;
998 env = cpu_single_env; /* XXX: find a correct solution for multithread */
999 #if defined(DEBUG_SIGNAL)
1000 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1001 pc, address, is_write, *(unsigned long *)old_set);
1003 /* XXX: locking issue */
1004 if (is_write && page_unprotect(address, pc, puc)) {
1008 /* see if it is an MMU fault */
1009 ret = cpu_ppc_handle_mmu_fault(env, address, is_write, msr_pr, 0);
1011 return 0; /* not an MMU fault */
1013 return 1; /* the MMU fault was handled without causing real CPU fault */
1015 /* now we have a real cpu fault */
1016 tb = tb_find_pc(pc);
1018 /* the PC is inside the translated code. It means that we have
1019 a virtual CPU fault */
1020 cpu_restore_state(tb, env, pc, puc);
1024 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1025 env->nip, env->error_code, tb);
1027 /* we restore the process signal mask as the sigreturn should
1028 do it (XXX: use sigsetjmp) */
1029 sigprocmask(SIG_SETMASK, old_set, NULL);
1030 do_raise_exception_err(env->exception_index, env->error_code);
1032 /* activate soft MMU for this block */
1033 cpu_resume_from_signal(env, puc);
1035 /* never comes here */
1039 #elif defined (TARGET_MIPS)
1040 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1041 int is_write, sigset_t *old_set,
1044 TranslationBlock *tb;
1048 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1049 #if defined(DEBUG_SIGNAL)
1050 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1051 pc, address, is_write, *(unsigned long *)old_set);
1053 /* XXX: locking issue */
1054 if (is_write && page_unprotect(address, pc, puc)) {
1058 /* see if it is an MMU fault */
1059 ret = cpu_mips_handle_mmu_fault(env, address, is_write, 1, 0);
1061 return 0; /* not an MMU fault */
1063 return 1; /* the MMU fault was handled without causing real CPU fault */
1065 /* now we have a real cpu fault */
1066 tb = tb_find_pc(pc);
1068 /* the PC is inside the translated code. It means that we have
1069 a virtual CPU fault */
1070 cpu_restore_state(tb, env, pc, puc);
1074 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1075 env->nip, env->error_code, tb);
1077 /* we restore the process signal mask as the sigreturn should
1078 do it (XXX: use sigsetjmp) */
1079 sigprocmask(SIG_SETMASK, old_set, NULL);
1080 do_raise_exception_err(env->exception_index, env->error_code);
1082 /* activate soft MMU for this block */
1083 cpu_resume_from_signal(env, puc);
1085 /* never comes here */
1090 #error unsupported target CPU
1093 #if defined(__i386__)
1095 #if defined(USE_CODE_COPY)
1096 static void cpu_send_trap(unsigned long pc, int trap,
1097 struct ucontext *uc)
1099 TranslationBlock *tb;
1102 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1103 /* now we have a real cpu fault */
1104 tb = tb_find_pc(pc);
1106 /* the PC is inside the translated code. It means that we have
1107 a virtual CPU fault */
1108 cpu_restore_state(tb, env, pc, uc);
1110 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
1111 raise_exception_err(trap, env->error_code);
1115 int cpu_signal_handler(int host_signum, struct siginfo *info,
1118 struct ucontext *uc = puc;
1126 #define REG_TRAPNO TRAPNO
1128 pc = uc->uc_mcontext.gregs[REG_EIP];
1129 trapno = uc->uc_mcontext.gregs[REG_TRAPNO];
1130 #if defined(TARGET_I386) && defined(USE_CODE_COPY)
1131 if (trapno == 0x00 || trapno == 0x05) {
1132 /* send division by zero or bound exception */
1133 cpu_send_trap(pc, trapno, uc);
1137 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1139 (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
1140 &uc->uc_sigmask, puc);
1143 #elif defined(__x86_64__)
1145 int cpu_signal_handler(int host_signum, struct siginfo *info,
1148 struct ucontext *uc = puc;
1151 pc = uc->uc_mcontext.gregs[REG_RIP];
1152 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1153 uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe ?
1154 (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
1155 &uc->uc_sigmask, puc);
1158 #elif defined(__powerpc__)
1160 /***********************************************************************
1161 * signal context platform-specific definitions
1165 /* All Registers access - only for local access */
1166 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1167 /* Gpr Registers access */
1168 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1169 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1170 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1171 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1172 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1173 # define LR_sig(context) REG_sig(link, context) /* Link register */
1174 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1175 /* Float Registers access */
1176 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1177 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1178 /* Exception Registers access */
1179 # define DAR_sig(context) REG_sig(dar, context)
1180 # define DSISR_sig(context) REG_sig(dsisr, context)
1181 # define TRAP_sig(context) REG_sig(trap, context)
1185 # include <sys/ucontext.h>
1186 typedef struct ucontext SIGCONTEXT;
1187 /* All Registers access - only for local access */
1188 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1189 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1190 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1191 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1192 /* Gpr Registers access */
1193 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1194 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1195 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1196 # define CTR_sig(context) REG_sig(ctr, context)
1197 # define XER_sig(context) REG_sig(xer, context) /* Link register */
1198 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1199 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
1200 /* Float Registers access */
1201 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1202 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1203 /* Exception Registers access */
1204 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1205 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1206 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1207 #endif /* __APPLE__ */
1209 int cpu_signal_handler(int host_signum, struct siginfo *info,
1212 struct ucontext *uc = puc;
1220 if (DSISR_sig(uc) & 0x00800000)
1223 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
1226 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1227 is_write, &uc->uc_sigmask, puc);
1230 #elif defined(__alpha__)
1232 int cpu_signal_handler(int host_signum, struct siginfo *info,
1235 struct ucontext *uc = puc;
1236 uint32_t *pc = uc->uc_mcontext.sc_pc;
1237 uint32_t insn = *pc;
1240 /* XXX: need kernel patch to get write flag faster */
1241 switch (insn >> 26) {
1256 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1257 is_write, &uc->uc_sigmask, puc);
1259 #elif defined(__sparc__)
1261 int cpu_signal_handler(int host_signum, struct siginfo *info,
1264 uint32_t *regs = (uint32_t *)(info + 1);
1265 void *sigmask = (regs + 20);
1270 /* XXX: is there a standard glibc define ? */
1272 /* XXX: need kernel patch to get write flag faster */
1274 insn = *(uint32_t *)pc;
1275 if ((insn >> 30) == 3) {
1276 switch((insn >> 19) & 0x3f) {
1288 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1289 is_write, sigmask, NULL);
1292 #elif defined(__arm__)
1294 int cpu_signal_handler(int host_signum, struct siginfo *info,
1297 struct ucontext *uc = puc;
1301 pc = uc->uc_mcontext.gregs[R15];
1302 /* XXX: compute is_write */
1304 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1309 #elif defined(__mc68000)
1311 int cpu_signal_handler(int host_signum, struct siginfo *info,
1314 struct ucontext *uc = puc;
1318 pc = uc->uc_mcontext.gregs[16];
1319 /* XXX: compute is_write */
1321 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1323 &uc->uc_sigmask, puc);
1326 #elif defined(__ia64)
1329 /* This ought to be in <bits/siginfo.h>... */
1330 # define __ISR_VALID 1
1331 # define si_flags _sifields._sigfault._si_pad0
1334 int cpu_signal_handler(int host_signum, struct siginfo *info, void *puc)
1336 struct ucontext *uc = puc;
1340 ip = uc->uc_mcontext.sc_ip;
1341 switch (host_signum) {
1347 if (info->si_code && (info->si_flags & __ISR_VALID))
1348 /* ISR.W (write-access) is bit 33: */
1349 is_write = (info->si_isr >> 33) & 1;
1355 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1357 &uc->uc_sigmask, puc);
1360 #elif defined(__s390__)
1362 int cpu_signal_handler(int host_signum, struct siginfo *info,
1365 struct ucontext *uc = puc;
1369 pc = uc->uc_mcontext.psw.addr;
1370 /* XXX: compute is_write */
1372 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1374 &uc->uc_sigmask, puc);
1379 #error host CPU specific signal handler needed
1383 #endif /* !defined(CONFIG_SOFTMMU) */