2 * i386 helpers (without register variable usage)
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
35 #include <linux/unistd.h>
36 #include <linux/version.h>
38 _syscall3(int, modify_ldt, int, func, void *, ptr, unsigned long, bytecount)
40 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 66)
41 #define modify_ldt_ldt_s user_desc
43 #endif /* USE_CODE_COPY */
45 CPUX86State *cpu_x86_init(void)
52 env = malloc(sizeof(CPUX86State));
55 memset(env, 0, sizeof(CPUX86State));
56 /* init various static tables */
59 optimize_flags_init();
62 /* testing code for code copy case */
64 struct modify_ldt_ldt_s ldt;
67 ldt.base_addr = (unsigned long)env;
68 ldt.limit = (sizeof(CPUState) + 0xfff) >> 12;
70 ldt.contents = MODIFY_LDT_CONTENTS_DATA;
71 ldt.read_exec_only = 0;
72 ldt.limit_in_pages = 1;
73 ldt.seg_not_present = 0;
75 modify_ldt(1, &ldt, sizeof(ldt)); /* write ldt entry */
77 asm volatile ("movl %0, %%fs" : : "r" ((1 << 3) | 7));
81 int family, model, stepping;
83 env->cpuid_vendor1 = 0x68747541; /* "Auth" */
84 env->cpuid_vendor2 = 0x69746e65; /* "enti" */
85 env->cpuid_vendor3 = 0x444d4163; /* "cAMD" */
90 env->cpuid_vendor1 = 0x756e6547; /* "Genu" */
91 env->cpuid_vendor2 = 0x49656e69; /* "ineI" */
92 env->cpuid_vendor3 = 0x6c65746e; /* "ntel" */
105 env->cpuid_version = (family << 8) | (model << 4) | stepping;
106 env->cpuid_features = (CPUID_FP87 | CPUID_DE | CPUID_PSE |
107 CPUID_TSC | CPUID_MSR | CPUID_MCE |
108 CPUID_CX8 | CPUID_PGE | CPUID_CMOV);
110 /* currently not enabled for std i386 because not fully tested */
111 env->cpuid_features |= CPUID_APIC | CPUID_FXSR | CPUID_PAE |
112 CPUID_SSE | CPUID_SSE2;
115 cpu_single_env = env;
120 /* NOTE: must be called outside the CPU execute loop */
121 void cpu_reset(CPUX86State *env)
125 memset(env, 0, offsetof(CPUX86State, breakpoints));
129 /* init to reset state */
131 #ifdef CONFIG_SOFTMMU
132 env->hflags |= HF_SOFTMMU_MASK;
135 cpu_x86_update_cr0(env, 0x60000010);
136 env->a20_mask = 0xffffffff;
138 env->idt.limit = 0xffff;
139 env->gdt.limit = 0xffff;
140 env->ldt.limit = 0xffff;
141 env->ldt.flags = DESC_P_MASK;
142 env->tr.limit = 0xffff;
143 env->tr.flags = DESC_P_MASK;
145 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff, 0);
146 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff, 0);
147 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff, 0);
148 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff, 0);
149 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff, 0);
150 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff, 0);
153 env->regs[R_EDX] = 0x600; /* indicate P6 processor */
158 for(i = 0;i < 8; i++)
163 void cpu_x86_close(CPUX86State *env)
168 /***********************************************************/
171 static const char *cc_op_str[] = {
226 void cpu_dump_state(CPUState *env, FILE *f,
227 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
232 static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
234 eflags = env->eflags;
236 if (env->hflags & HF_CS64_MASK) {
238 "RAX=%016llx RBX=%016llx RCX=%016llx RDX=%016llx\n"
239 "RSI=%016llx RDI=%016llx RBP=%016llx RSP=%016llx\n"
240 "R8 =%016llx R9 =%016llx R10=%016llx R11=%016llx\n"
241 "R12=%016llx R13=%016llx R14=%016llx R15=%016llx\n"
242 "RIP=%016llx RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d\n",
260 eflags & DF_MASK ? 'D' : '-',
261 eflags & CC_O ? 'O' : '-',
262 eflags & CC_S ? 'S' : '-',
263 eflags & CC_Z ? 'Z' : '-',
264 eflags & CC_A ? 'A' : '-',
265 eflags & CC_P ? 'P' : '-',
266 eflags & CC_C ? 'C' : '-',
267 env->hflags & HF_CPL_MASK,
268 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
269 (env->a20_mask >> 20) & 1);
273 cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
274 "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
275 "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d\n",
276 (uint32_t)env->regs[R_EAX],
277 (uint32_t)env->regs[R_EBX],
278 (uint32_t)env->regs[R_ECX],
279 (uint32_t)env->regs[R_EDX],
280 (uint32_t)env->regs[R_ESI],
281 (uint32_t)env->regs[R_EDI],
282 (uint32_t)env->regs[R_EBP],
283 (uint32_t)env->regs[R_ESP],
284 (uint32_t)env->eip, eflags,
285 eflags & DF_MASK ? 'D' : '-',
286 eflags & CC_O ? 'O' : '-',
287 eflags & CC_S ? 'S' : '-',
288 eflags & CC_Z ? 'Z' : '-',
289 eflags & CC_A ? 'A' : '-',
290 eflags & CC_P ? 'P' : '-',
291 eflags & CC_C ? 'C' : '-',
292 env->hflags & HF_CPL_MASK,
293 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
294 (env->a20_mask >> 20) & 1);
298 if (env->hflags & HF_LMA_MASK) {
299 for(i = 0; i < 6; i++) {
300 SegmentCache *sc = &env->segs[i];
301 cpu_fprintf(f, "%s =%04x %016llx %08x %08x\n",
308 cpu_fprintf(f, "LDT=%04x %016llx %08x %08x\n",
313 cpu_fprintf(f, "TR =%04x %016llx %08x %08x\n",
318 cpu_fprintf(f, "GDT= %016llx %08x\n",
319 env->gdt.base, env->gdt.limit);
320 cpu_fprintf(f, "IDT= %016llx %08x\n",
321 env->idt.base, env->idt.limit);
322 cpu_fprintf(f, "CR0=%08x CR2=%016llx CR3=%016llx CR4=%08x\n",
323 (uint32_t)env->cr[0],
326 (uint32_t)env->cr[4]);
330 for(i = 0; i < 6; i++) {
331 SegmentCache *sc = &env->segs[i];
332 cpu_fprintf(f, "%s =%04x %08x %08x %08x\n",
339 cpu_fprintf(f, "LDT=%04x %08x %08x %08x\n",
341 (uint32_t)env->ldt.base,
344 cpu_fprintf(f, "TR =%04x %08x %08x %08x\n",
346 (uint32_t)env->tr.base,
349 cpu_fprintf(f, "GDT= %08x %08x\n",
350 (uint32_t)env->gdt.base, env->gdt.limit);
351 cpu_fprintf(f, "IDT= %08x %08x\n",
352 (uint32_t)env->idt.base, env->idt.limit);
353 cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
354 (uint32_t)env->cr[0],
355 (uint32_t)env->cr[2],
356 (uint32_t)env->cr[3],
357 (uint32_t)env->cr[4]);
359 if (flags & X86_DUMP_CCOP) {
360 if ((unsigned)env->cc_op < CC_OP_NB)
361 snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
363 snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
365 if (env->hflags & HF_CS64_MASK) {
366 cpu_fprintf(f, "CCS=%016llx CCD=%016llx CCO=%-8s\n",
367 env->cc_src, env->cc_dst,
372 cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
373 (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
377 if (flags & X86_DUMP_FPU) {
378 cpu_fprintf(f, "ST0=%f ST1=%f ST2=%f ST3=%f\n",
379 (double)env->fpregs[0],
380 (double)env->fpregs[1],
381 (double)env->fpregs[2],
382 (double)env->fpregs[3]);
383 cpu_fprintf(f, "ST4=%f ST5=%f ST6=%f ST7=%f\n",
384 (double)env->fpregs[4],
385 (double)env->fpregs[5],
386 (double)env->fpregs[7],
387 (double)env->fpregs[8]);
391 /***********************************************************/
393 /* XXX: add PGE support */
395 void cpu_x86_set_a20(CPUX86State *env, int a20_state)
397 a20_state = (a20_state != 0);
398 if (a20_state != ((env->a20_mask >> 20) & 1)) {
399 #if defined(DEBUG_MMU)
400 printf("A20 update: a20=%d\n", a20_state);
402 /* if the cpu is currently executing code, we must unlink it and
403 all the potentially executing TB */
404 cpu_interrupt(env, CPU_INTERRUPT_EXITTB);
406 /* when a20 is changed, all the MMU mappings are invalid, so
407 we must flush everything */
409 env->a20_mask = 0xffefffff | (a20_state << 20);
413 void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
417 #if defined(DEBUG_MMU)
418 printf("CR0 update: CR0=0x%08x\n", new_cr0);
420 if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
421 (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
426 if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
427 (env->efer & MSR_EFER_LME)) {
428 /* enter in long mode */
429 /* XXX: generate an exception */
430 if (!(env->cr[4] & CR4_PAE_MASK))
432 env->efer |= MSR_EFER_LMA;
433 env->hflags |= HF_LMA_MASK;
434 } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
435 (env->efer & MSR_EFER_LMA)) {
437 env->efer &= ~MSR_EFER_LMA;
438 env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
439 env->eip &= 0xffffffff;
442 env->cr[0] = new_cr0 | CR0_ET_MASK;
444 /* update PE flag in hidden flags */
445 pe_state = (env->cr[0] & CR0_PE_MASK);
446 env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
447 /* ensure that ADDSEG is always set in real mode */
448 env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
449 /* update FPU flags */
450 env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
451 ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
454 void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
456 env->cr[3] = new_cr3;
457 if (env->cr[0] & CR0_PG_MASK) {
458 #if defined(DEBUG_MMU)
459 printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
465 void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
467 #if defined(DEBUG_MMU)
468 printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
470 if ((new_cr4 & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK)) !=
471 (env->cr[4] & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK))) {
474 env->cr[4] = new_cr4;
477 /* XXX: also flush 4MB pages */
478 void cpu_x86_flush_tlb(CPUX86State *env, uint32_t addr)
480 tlb_flush_page(env, addr);
483 static inline uint8_t *get_phys_mem_ptr(target_phys_addr_t addr)
486 return phys_ram_base + addr;
489 /* WARNING: addr must be aligned */
490 uint32_t ldl_phys_aligned(target_phys_addr_t addr)
494 ptr = get_phys_mem_ptr(addr);
502 void stl_phys_aligned(target_phys_addr_t addr, uint32_t val)
505 ptr = get_phys_mem_ptr(addr);
512 -1 = cannot handle fault
513 0 = nothing more to do
514 1 = generate PF fault
515 2 = soft MMU activation required for this block
517 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
518 int is_write, int is_user, int is_softmmu)
520 uint32_t pdpe_addr, pde_addr, pte_addr;
521 uint32_t pde, pte, ptep, pdpe;
522 int error_code, is_dirty, prot, page_size, ret;
523 unsigned long paddr, page_offset;
524 target_ulong vaddr, virt_addr;
526 #if defined(DEBUG_MMU)
527 printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
528 addr, is_write, is_user, env->eip);
532 if (env->user_mode_only) {
533 /* user mode only emulation */
538 if (!(env->cr[0] & CR0_PG_MASK)) {
540 virt_addr = addr & TARGET_PAGE_MASK;
541 prot = PAGE_READ | PAGE_WRITE;
547 if (env->cr[4] & CR4_PAE_MASK) {
548 /* XXX: we only use 32 bit physical addresses */
550 if (env->hflags & HF_LMA_MASK) {
551 uint32_t pml4e_addr, pml4e;
554 /* XXX: handle user + rw rights */
555 /* XXX: handle NX flag */
556 /* test virtual address sign extension */
557 sext = (int64_t)addr >> 47;
558 if (sext != 0 && sext != -1) {
563 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
565 pml4e = ldl_phys_aligned(pml4e_addr);
566 if (!(pml4e & PG_PRESENT_MASK)) {
570 if (!(pml4e & PG_ACCESSED_MASK)) {
571 pml4e |= PG_ACCESSED_MASK;
572 stl_phys_aligned(pml4e_addr, pml4e);
575 pdpe_addr = ((pml4e & ~0xfff) + (((addr >> 30) & 0x1ff) << 3)) &
577 pdpe = ldl_phys_aligned(pdpe_addr);
578 if (!(pdpe & PG_PRESENT_MASK)) {
582 if (!(pdpe & PG_ACCESSED_MASK)) {
583 pdpe |= PG_ACCESSED_MASK;
584 stl_phys_aligned(pdpe_addr, pdpe);
589 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 30) << 3)) &
591 pdpe = ldl_phys_aligned(pdpe_addr);
592 if (!(pdpe & PG_PRESENT_MASK)) {
598 pde_addr = ((pdpe & ~0xfff) + (((addr >> 21) & 0x1ff) << 3)) &
600 pde = ldl_phys_aligned(pde_addr);
601 if (!(pde & PG_PRESENT_MASK)) {
605 if (pde & PG_PSE_MASK) {
607 page_size = 2048 * 1024;
608 goto handle_big_page;
611 if (!(pde & PG_ACCESSED_MASK)) {
612 pde |= PG_ACCESSED_MASK;
613 stl_phys_aligned(pde_addr, pde);
615 pte_addr = ((pde & ~0xfff) + (((addr >> 12) & 0x1ff) << 3)) &
620 /* page directory entry */
621 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & ~3)) &
623 pde = ldl_phys_aligned(pde_addr);
624 if (!(pde & PG_PRESENT_MASK)) {
628 /* if PSE bit is set, then we use a 4MB page */
629 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
630 page_size = 4096 * 1024;
633 if (!(pde & PG_USER_MASK))
634 goto do_fault_protect;
635 if (is_write && !(pde & PG_RW_MASK))
636 goto do_fault_protect;
638 if ((env->cr[0] & CR0_WP_MASK) &&
639 is_write && !(pde & PG_RW_MASK))
640 goto do_fault_protect;
642 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
643 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
644 pde |= PG_ACCESSED_MASK;
646 pde |= PG_DIRTY_MASK;
647 stl_phys_aligned(pde_addr, pde);
650 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
652 virt_addr = addr & ~(page_size - 1);
654 if (!(pde & PG_ACCESSED_MASK)) {
655 pde |= PG_ACCESSED_MASK;
656 stl_phys_aligned(pde_addr, pde);
659 /* page directory entry */
660 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
663 pte = ldl_phys_aligned(pte_addr);
664 if (!(pte & PG_PRESENT_MASK)) {
668 /* combine pde and pte user and rw protections */
671 if (!(ptep & PG_USER_MASK))
672 goto do_fault_protect;
673 if (is_write && !(ptep & PG_RW_MASK))
674 goto do_fault_protect;
676 if ((env->cr[0] & CR0_WP_MASK) &&
677 is_write && !(ptep & PG_RW_MASK))
678 goto do_fault_protect;
680 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
681 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
682 pte |= PG_ACCESSED_MASK;
684 pte |= PG_DIRTY_MASK;
685 stl_phys_aligned(pte_addr, pte);
688 virt_addr = addr & ~0xfff;
691 /* the page can be put in the TLB */
693 if (pte & PG_DIRTY_MASK) {
694 /* only set write access if already dirty... otherwise wait
697 if (ptep & PG_RW_MASK)
700 if (!(env->cr[0] & CR0_WP_MASK) ||
707 pte = pte & env->a20_mask;
709 /* Even if 4MB pages, we map only one 4KB page in the cache to
710 avoid filling it too fast */
711 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
712 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
713 vaddr = virt_addr + page_offset;
715 ret = tlb_set_page(env, vaddr, paddr, prot, is_user, is_softmmu);
718 error_code = PG_ERROR_P_MASK;
721 env->error_code = (is_write << PG_ERROR_W_BIT) | error_code;
723 env->error_code |= PG_ERROR_U_MASK;
727 #if defined(CONFIG_USER_ONLY)
728 target_ulong cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
733 target_ulong cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
735 uint8_t *pde_ptr, *pte_ptr;
736 uint32_t pde, pte, paddr, page_offset, page_size;
738 if (!(env->cr[0] & CR0_PG_MASK)) {
742 /* page directory entry */
743 pde_ptr = phys_ram_base +
744 (((env->cr[3] & ~0xfff) + ((addr >> 20) & ~3)) & env->a20_mask);
745 pde = ldl_raw(pde_ptr);
746 if (!(pde & PG_PRESENT_MASK))
748 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
749 pte = pde & ~0x003ff000; /* align to 4MB */
750 page_size = 4096 * 1024;
752 /* page directory entry */
753 pte_ptr = phys_ram_base +
754 (((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask);
755 pte = ldl_raw(pte_ptr);
756 if (!(pte & PG_PRESENT_MASK))
761 pte = pte & env->a20_mask;
762 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
763 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
768 #if defined(USE_CODE_COPY)
781 uint8_t fpregs1[8 * 10];
784 void restore_native_fp_state(CPUState *env)
787 struct fpstate fp1, *fp = &fp1;
789 fp->fpuc = env->fpuc;
790 fp->fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
792 for (i=7; i>=0; i--) {
794 if (env->fptags[i]) {
797 /* the FPU automatically computes it */
802 for(i = 0;i < 8; i++) {
803 memcpy(&fp->fpregs1[i * 10], &env->fpregs[j], 10);
806 asm volatile ("frstor %0" : "=m" (*fp));
807 env->native_fp_regs = 1;
810 void save_native_fp_state(CPUState *env)
814 struct fpstate fp1, *fp = &fp1;
816 asm volatile ("fsave %0" : : "m" (*fp));
817 env->fpuc = fp->fpuc;
818 env->fpstt = (fp->fpus >> 11) & 7;
819 env->fpus = fp->fpus & ~0x3800;
821 for(i = 0;i < 8; i++) {
822 env->fptags[i] = ((fptag & 3) == 3);
826 for(i = 0;i < 8; i++) {
827 memcpy(&env->fpregs[j], &fp->fpregs1[i * 10], 10);
830 /* we must restore the default rounding state */
831 /* XXX: we do not restore the exception state */
832 fpuc = 0x037f | (env->fpuc & (3 << 10));
833 asm volatile("fldcw %0" : : "m" (fpuc));
834 env->native_fp_regs = 0;