4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include "exec-i386.h"
22 const uint8_t parity_table[256] = {
23 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
24 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
25 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
26 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
27 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
28 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
29 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
30 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
31 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
32 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
33 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
34 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
35 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
36 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
37 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
38 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
39 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
40 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
41 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
42 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
43 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
44 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
45 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
46 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
47 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
48 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
49 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
50 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
51 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
52 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
53 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
54 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
58 const uint8_t rclw_table[32] = {
59 0, 1, 2, 3, 4, 5, 6, 7,
60 8, 9,10,11,12,13,14,15,
61 16, 0, 1, 2, 3, 4, 5, 6,
62 7, 8, 9,10,11,12,13,14,
66 const uint8_t rclb_table[32] = {
67 0, 1, 2, 3, 4, 5, 6, 7,
68 8, 0, 1, 2, 3, 4, 5, 6,
69 7, 8, 0, 1, 2, 3, 4, 5,
70 6, 7, 8, 0, 1, 2, 3, 4,
73 const CPU86_LDouble f15rk[7] =
75 0.00000000000000000000L,
76 1.00000000000000000000L,
77 3.14159265358979323851L, /*pi*/
78 0.30102999566398119523L, /*lg2*/
79 0.69314718055994530943L, /*ln2*/
80 1.44269504088896340739L, /*l2e*/
81 3.32192809488736234781L, /*l2t*/
86 spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
90 spin_lock(&global_cpu_lock);
95 spin_unlock(&global_cpu_lock);
98 void cpu_loop_exit(void)
100 /* NOTE: the register at this point must be saved by hand because
101 longjmp restore them */
103 env->regs[R_EAX] = EAX;
106 env->regs[R_ECX] = ECX;
109 env->regs[R_EDX] = EDX;
112 env->regs[R_EBX] = EBX;
115 env->regs[R_ESP] = ESP;
118 env->regs[R_EBP] = EBP;
121 env->regs[R_ESI] = ESI;
124 env->regs[R_EDI] = EDI;
126 longjmp(env->jmp_env, 1);
129 static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
130 uint32_t *esp_ptr, int dpl)
132 int type, index, shift;
137 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
138 for(i=0;i<env->tr.limit;i++) {
139 printf("%02x ", env->tr.base[i]);
140 if ((i & 7) == 7) printf("\n");
146 if (!(env->tr.flags & DESC_P_MASK))
147 cpu_abort(env, "invalid tss");
148 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
150 cpu_abort(env, "invalid tss type");
152 index = (dpl * 4 + 2) << shift;
153 if (index + (4 << shift) - 1 > env->tr.limit)
154 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
156 *esp_ptr = lduw(env->tr.base + index);
157 *ss_ptr = lduw(env->tr.base + index + 2);
159 *esp_ptr = ldl(env->tr.base + index);
160 *ss_ptr = lduw(env->tr.base + index + 4);
164 /* return non zero if error */
165 static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
176 index = selector & ~7;
177 if ((index + 7) > dt->limit)
179 ptr = dt->base + index;
181 *e2_ptr = ldl(ptr + 4);
186 /* protected mode interrupt */
187 static void do_interrupt_protected(int intno, int is_int, int error_code,
188 unsigned int next_eip)
192 int type, dpl, selector, ss_dpl;
193 int has_error_code, new_stack, shift;
194 uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2, push_size;
195 uint32_t old_cs, old_ss, old_esp, old_eip;
198 if (intno * 8 + 7 > dt->limit)
199 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
200 ptr = dt->base + intno * 8;
203 /* check gate type */
204 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
206 case 5: /* task gate */
207 cpu_abort(env, "task gate not supported");
209 case 6: /* 286 interrupt gate */
210 case 7: /* 286 trap gate */
211 case 14: /* 386 interrupt gate */
212 case 15: /* 386 trap gate */
215 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
218 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
219 /* check privledge if software int */
220 if (is_int && dpl < env->cpl)
221 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
222 /* check valid bit */
223 if (!(e2 & DESC_P_MASK))
224 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
226 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
227 if ((selector & 0xfffc) == 0)
228 raise_exception_err(EXCP0D_GPF, 0);
230 if (load_segment(&e1, &e2, selector) != 0)
231 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
232 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
233 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
234 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
236 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
237 if (!(e2 & DESC_P_MASK))
238 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
239 if (!(e2 & DESC_C_MASK) && dpl < env->cpl) {
240 /* to inner priviledge */
241 get_ss_esp_from_tss(&ss, &esp, dpl);
242 if ((ss & 0xfffc) == 0)
243 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
245 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
246 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
247 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
248 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
250 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
251 if (!(ss_e2 & DESC_S_MASK) ||
252 (ss_e2 & DESC_CS_MASK) ||
253 !(ss_e2 & DESC_W_MASK))
254 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
255 if (!(ss_e2 & DESC_P_MASK))
256 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
258 } else if ((e2 & DESC_C_MASK) || dpl == env->cpl) {
259 /* to same priviledge */
262 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
263 new_stack = 0; /* avoid warning */
281 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
282 if (env->eflags & VM_MASK)
286 /* XXX: check that enough room is available */
288 old_esp = env->regs[R_ESP];
289 old_ss = env->segs[R_SS].selector;
290 load_seg(R_SS, ss, env->eip);
294 esp = env->regs[R_ESP];
300 old_cs = env->segs[R_CS].selector;
301 load_seg(R_CS, selector, env->eip);
303 env->regs[R_ESP] = esp - push_size;
304 ssp = env->segs[R_SS].base + esp;
307 if (env->eflags & VM_MASK) {
309 stl(ssp, env->segs[R_GS].selector);
311 stl(ssp, env->segs[R_FS].selector);
313 stl(ssp, env->segs[R_DS].selector);
315 stl(ssp, env->segs[R_ES].selector);
324 old_eflags = compute_eflags();
325 stl(ssp, old_eflags);
330 if (has_error_code) {
332 stl(ssp, error_code);
342 stw(ssp, compute_eflags());
347 if (has_error_code) {
349 stw(ssp, error_code);
353 /* interrupt gate clear IF mask */
354 if ((type & 1) == 0) {
355 env->eflags &= ~IF_MASK;
357 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
360 /* real mode interrupt */
361 static void do_interrupt_real(int intno, int is_int, int error_code,
362 unsigned int next_eip)
367 uint32_t offset, esp;
368 uint32_t old_cs, old_eip;
370 /* real mode (simpler !) */
372 if (intno * 4 + 3 > dt->limit)
373 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
374 ptr = dt->base + intno * 4;
376 selector = lduw(ptr + 2);
377 esp = env->regs[R_ESP];
378 ssp = env->segs[R_SS].base;
383 old_cs = env->segs[R_CS].selector;
385 stw(ssp + (esp & 0xffff), compute_eflags());
387 stw(ssp + (esp & 0xffff), old_cs);
389 stw(ssp + (esp & 0xffff), old_eip);
391 /* update processor state */
392 env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | (esp & 0xffff);
394 env->segs[R_CS].selector = selector;
395 env->segs[R_CS].base = (uint8_t *)(selector << 4);
396 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
399 /* fake user mode interrupt */
400 void do_interrupt_user(int intno, int is_int, int error_code,
401 unsigned int next_eip)
409 ptr = dt->base + (intno * 8);
412 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
413 /* check privledge if software int */
414 if (is_int && dpl < env->cpl)
415 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
417 /* Since we emulate only user space, we cannot do more than
418 exiting the emulation with the suitable exception and error
425 * Begin excution of an interruption. is_int is TRUE if coming from
426 * the int instruction. next_eip is the EIP value AFTER the interrupt
427 * instruction. It is only relevant if is_int is TRUE.
429 void do_interrupt(int intno, int is_int, int error_code,
430 unsigned int next_eip)
432 if (env->cr[0] & CR0_PE_MASK) {
433 do_interrupt_protected(intno, is_int, error_code, next_eip);
435 do_interrupt_real(intno, is_int, error_code, next_eip);
440 * Signal an interruption. It is executed in the main CPU loop.
441 * is_int is TRUE if coming from the int instruction. next_eip is the
442 * EIP value AFTER the interrupt instruction. It is only relevant if
445 void raise_interrupt(int intno, int is_int, int error_code,
446 unsigned int next_eip)
448 env->exception_index = intno;
449 env->error_code = error_code;
450 env->exception_is_int = is_int;
451 env->exception_next_eip = next_eip;
455 /* shortcuts to generate exceptions */
456 void raise_exception_err(int exception_index, int error_code)
458 raise_interrupt(exception_index, 0, error_code, 0);
461 void raise_exception(int exception_index)
463 raise_interrupt(exception_index, 0, 0, 0);
466 #ifdef BUGGY_GCC_DIV64
467 /* gcc 2.95.4 on PowerPC does not seem to like using __udivdi3, so we
468 call it from another function */
469 uint32_t div64(uint32_t *q_ptr, uint64_t num, uint32_t den)
475 int32_t idiv64(int32_t *q_ptr, int64_t num, int32_t den)
482 void helper_divl_EAX_T0(uint32_t eip)
484 unsigned int den, q, r;
487 num = EAX | ((uint64_t)EDX << 32);
491 raise_exception(EXCP00_DIVZ);
493 #ifdef BUGGY_GCC_DIV64
494 r = div64(&q, num, den);
503 void helper_idivl_EAX_T0(uint32_t eip)
508 num = EAX | ((uint64_t)EDX << 32);
512 raise_exception(EXCP00_DIVZ);
514 #ifdef BUGGY_GCC_DIV64
515 r = idiv64(&q, num, den);
524 void helper_cmpxchg8b(void)
529 eflags = cc_table[CC_OP].compute_all();
530 d = ldq((uint8_t *)A0);
531 if (d == (((uint64_t)EDX << 32) | EAX)) {
532 stq((uint8_t *)A0, ((uint64_t)ECX << 32) | EBX);
542 /* We simulate a pre-MMX pentium as in valgrind */
543 #define CPUID_FP87 (1 << 0)
544 #define CPUID_VME (1 << 1)
545 #define CPUID_DE (1 << 2)
546 #define CPUID_PSE (1 << 3)
547 #define CPUID_TSC (1 << 4)
548 #define CPUID_MSR (1 << 5)
549 #define CPUID_PAE (1 << 6)
550 #define CPUID_MCE (1 << 7)
551 #define CPUID_CX8 (1 << 8)
552 #define CPUID_APIC (1 << 9)
553 #define CPUID_SEP (1 << 11) /* sysenter/sysexit */
554 #define CPUID_MTRR (1 << 12)
555 #define CPUID_PGE (1 << 13)
556 #define CPUID_MCA (1 << 14)
557 #define CPUID_CMOV (1 << 15)
559 #define CPUID_MMX (1 << 23)
560 #define CPUID_FXSR (1 << 24)
561 #define CPUID_SSE (1 << 25)
562 #define CPUID_SSE2 (1 << 26)
564 void helper_cpuid(void)
567 EAX = 1; /* max EAX index supported */
571 } else if (EAX == 1) {
572 int family, model, stepping;
585 EAX = (family << 8) | (model << 4) | stepping;
588 EDX = CPUID_FP87 | CPUID_DE | CPUID_PSE |
589 CPUID_TSC | CPUID_MSR | CPUID_MCE |
590 CPUID_CX8 | CPUID_PGE | CPUID_CMOV;
594 static inline void load_seg_cache(SegmentCache *sc, uint32_t e1, uint32_t e2)
596 sc->base = (void *)((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
597 sc->limit = (e1 & 0xffff) | (e2 & 0x000f0000);
598 if (e2 & DESC_G_MASK)
599 sc->limit = (sc->limit << 12) | 0xfff;
603 void helper_lldt_T0(void)
611 selector = T0 & 0xffff;
612 if ((selector & 0xfffc) == 0) {
613 /* XXX: NULL selector case: invalid LDT */
614 env->ldt.base = NULL;
618 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
620 index = selector & ~7;
621 if ((index + 7) > dt->limit)
622 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
623 ptr = dt->base + index;
626 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
627 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
628 if (!(e2 & DESC_P_MASK))
629 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
630 load_seg_cache(&env->ldt, e1, e2);
632 env->ldt.selector = selector;
635 void helper_ltr_T0(void)
643 selector = T0 & 0xffff;
644 if ((selector & 0xfffc) == 0) {
645 /* NULL selector case: invalid LDT */
651 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
653 index = selector & ~7;
654 if ((index + 7) > dt->limit)
655 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
656 ptr = dt->base + index;
659 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
660 if ((e2 & DESC_S_MASK) ||
661 (type != 2 && type != 9))
662 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
663 if (!(e2 & DESC_P_MASK))
664 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
665 load_seg_cache(&env->tr, e1, e2);
666 e2 |= 0x00000200; /* set the busy bit */
669 env->tr.selector = selector;
672 /* only works if protected mode and not VM86 */
673 void load_seg(int seg_reg, int selector, unsigned int cur_eip)
678 sc = &env->segs[seg_reg];
679 if ((selector & 0xfffc) == 0) {
680 /* null selector case */
681 if (seg_reg == R_SS) {
683 raise_exception_err(EXCP0D_GPF, 0);
685 /* XXX: each access should trigger an exception */
691 if (load_segment(&e1, &e2, selector) != 0) {
693 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
695 if (!(e2 & DESC_S_MASK) ||
696 (e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) {
698 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
701 if (seg_reg == R_SS) {
702 if ((e2 & (DESC_CS_MASK | DESC_W_MASK)) == 0) {
704 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
707 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) {
709 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
713 if (!(e2 & DESC_P_MASK)) {
716 raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
718 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
720 load_seg_cache(sc, e1, e2);
722 fprintf(logfile, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
723 selector, (unsigned long)sc->base, sc->limit, sc->flags);
726 if (seg_reg == R_CS) {
727 cpu_x86_set_cpl(env, selector & 3);
729 sc->selector = selector;
732 /* protected mode jump */
733 void helper_ljmp_protected_T0_T1(void)
737 uint32_t e1, e2, cpl, dpl, rpl;
741 if ((new_cs & 0xfffc) == 0)
742 raise_exception_err(EXCP0D_GPF, 0);
743 if (load_segment(&e1, &e2, new_cs) != 0)
744 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
746 if (e2 & DESC_S_MASK) {
747 if (!(e2 & DESC_CS_MASK))
748 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
749 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
750 if (e2 & DESC_CS_MASK) {
751 /* conforming code segment */
753 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
755 /* non conforming code segment */
758 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
760 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
762 if (!(e2 & DESC_P_MASK))
763 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
764 load_seg_cache(&sc1, e1, e2);
765 if (new_eip > sc1.limit)
766 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
767 env->segs[R_CS].base = sc1.base;
768 env->segs[R_CS].limit = sc1.limit;
769 env->segs[R_CS].flags = sc1.flags;
770 env->segs[R_CS].selector = (new_cs & 0xfffc) | cpl;
773 cpu_abort(env, "jmp to call/task gate not supported 0x%04x:0x%08x",
779 void helper_lcall_real_T0_T1(int shift, int next_eip)
782 uint32_t esp, esp_mask;
787 esp = env->regs[R_ESP];
788 esp_mask = 0xffffffff;
789 if (!(env->segs[R_SS].flags & DESC_B_MASK))
791 ssp = env->segs[R_SS].base;
794 stl(ssp + (esp & esp_mask), env->segs[R_CS].selector);
796 stl(ssp + (esp & esp_mask), next_eip);
799 stw(ssp + (esp & esp_mask), env->segs[R_CS].selector);
801 stw(ssp + (esp & esp_mask), next_eip);
804 if (!(env->segs[R_SS].flags & DESC_B_MASK))
805 env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | (esp & 0xffff);
807 env->regs[R_ESP] = esp;
809 env->segs[R_CS].selector = new_cs;
810 env->segs[R_CS].base = (uint8_t *)(new_cs << 4);
813 /* protected mode call */
814 void helper_lcall_protected_T0_T1(int shift, int next_eip)
818 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
819 uint32_t ss, ss_e1, ss_e2, push_size, sp, type, ss_dpl;
820 uint32_t old_ss, old_esp, val, i;
821 uint8_t *ssp, *old_ssp;
825 if ((new_cs & 0xfffc) == 0)
826 raise_exception_err(EXCP0D_GPF, 0);
827 if (load_segment(&e1, &e2, new_cs) != 0)
828 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
830 if (e2 & DESC_S_MASK) {
831 if (!(e2 & DESC_CS_MASK))
832 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
833 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
834 if (e2 & DESC_CS_MASK) {
835 /* conforming code segment */
837 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
839 /* non conforming code segment */
842 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
844 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
846 if (!(e2 & DESC_P_MASK))
847 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
849 sp = env->regs[R_ESP];
850 if (!(env->segs[R_SS].flags & DESC_B_MASK))
852 ssp = env->segs[R_SS].base + sp;
855 stl(ssp, env->segs[R_CS].selector);
860 stw(ssp, env->segs[R_CS].selector);
866 load_seg_cache(&sc1, e1, e2);
867 if (new_eip > sc1.limit)
868 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
869 /* from this point, not restartable */
870 if (!(env->segs[R_SS].flags & DESC_B_MASK))
871 env->regs[R_ESP] = (env->regs[R_ESP] & 0xffff0000) | (sp & 0xffff);
873 env->regs[R_ESP] = sp;
874 env->segs[R_CS].base = sc1.base;
875 env->segs[R_CS].limit = sc1.limit;
876 env->segs[R_CS].flags = sc1.flags;
877 env->segs[R_CS].selector = (new_cs & 0xfffc) | cpl;
880 /* check gate type */
881 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
883 case 1: /* available 286 TSS */
884 case 9: /* available 386 TSS */
885 case 5: /* task gate */
886 cpu_abort(env, "task gate not supported");
888 case 4: /* 286 call gate */
889 case 12: /* 386 call gate */
892 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
897 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
899 if (dpl < cpl || dpl < rpl)
900 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
901 /* check valid bit */
902 if (!(e2 & DESC_P_MASK))
903 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
905 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
906 if ((selector & 0xfffc) == 0)
907 raise_exception_err(EXCP0D_GPF, 0);
909 if (load_segment(&e1, &e2, selector) != 0)
910 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
911 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
912 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
913 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
915 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
916 if (!(e2 & DESC_P_MASK))
917 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
919 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
920 /* to inner priviledge */
921 get_ss_esp_from_tss(&ss, &sp, dpl);
922 if ((ss & 0xfffc) == 0)
923 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
925 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
926 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
927 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
928 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
930 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
931 if (!(ss_e2 & DESC_S_MASK) ||
932 (ss_e2 & DESC_CS_MASK) ||
933 !(ss_e2 & DESC_W_MASK))
934 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
935 if (!(ss_e2 & DESC_P_MASK))
936 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
938 param_count = e2 & 0x1f;
939 push_size = ((param_count * 2) + 8) << shift;
941 old_esp = env->regs[R_ESP];
942 old_ss = env->segs[R_SS].selector;
943 if (!(env->segs[R_SS].flags & DESC_B_MASK))
945 old_ssp = env->segs[R_SS].base + old_esp;
947 /* XXX: from this point not restartable */
948 load_seg(R_SS, ss, env->eip);
950 if (!(env->segs[R_SS].flags & DESC_B_MASK))
952 ssp = env->segs[R_SS].base + sp;
958 ssp -= 4 * param_count;
959 for(i = 0; i < param_count; i++) {
960 val = ldl(old_ssp + i * 4);
961 stl(ssp + i * 4, val);
968 ssp -= 2 * param_count;
969 for(i = 0; i < param_count; i++) {
970 val = lduw(old_ssp + i * 2);
971 stw(ssp + i * 2, val);
975 /* to same priviledge */
976 if (!(env->segs[R_SS].flags & DESC_B_MASK))
978 ssp = env->segs[R_SS].base + sp;
979 push_size = (4 << shift);
984 stl(ssp, env->segs[R_CS].selector);
989 stw(ssp, env->segs[R_CS].selector);
995 load_seg(R_CS, selector, env->eip);
996 /* from this point, not restartable if same priviledge */
997 if (!(env->segs[R_SS].flags & DESC_B_MASK))
998 env->regs[R_ESP] = (env->regs[R_ESP] & 0xffff0000) | (sp & 0xffff);
1000 env->regs[R_ESP] = sp;
1005 /* init the segment cache in vm86 mode */
1006 static inline void load_seg_vm(int seg, int selector)
1008 SegmentCache *sc = &env->segs[seg];
1010 sc->base = (uint8_t *)(selector << 4);
1011 sc->selector = selector;
1016 /* real mode iret */
1017 void helper_iret_real(int shift)
1019 uint32_t sp, new_cs, new_eip, new_eflags, new_esp;
1023 sp = env->regs[R_ESP] & 0xffff;
1024 ssp = env->segs[R_SS].base + sp;
1027 new_eflags = ldl(ssp + 8);
1028 new_cs = ldl(ssp + 4) & 0xffff;
1029 new_eip = ldl(ssp) & 0xffff;
1032 new_eflags = lduw(ssp + 4);
1033 new_cs = lduw(ssp + 2);
1034 new_eip = lduw(ssp);
1036 new_esp = sp + (6 << shift);
1037 env->regs[R_ESP] = (env->regs[R_ESP] & 0xffff0000) |
1039 load_seg_vm(R_CS, new_cs);
1041 eflags_mask = FL_UPDATE_CPL0_MASK;
1043 eflags_mask &= 0xffff;
1044 load_eflags(new_eflags, eflags_mask);
1047 /* protected mode iret */
1048 static inline void helper_ret_protected(int shift, int is_iret, int addend)
1050 uint32_t sp, new_cs, new_eip, new_eflags, new_esp, new_ss;
1051 uint32_t new_es, new_ds, new_fs, new_gs;
1053 int cpl, dpl, rpl, eflags_mask;
1056 sp = env->regs[R_ESP];
1057 if (!(env->segs[R_SS].flags & DESC_B_MASK))
1059 ssp = env->segs[R_SS].base + sp;
1063 new_eflags = ldl(ssp + 8);
1064 new_cs = ldl(ssp + 4) & 0xffff;
1066 if (is_iret && (new_eflags & VM_MASK))
1067 goto return_to_vm86;
1071 new_eflags = lduw(ssp + 4);
1072 new_cs = lduw(ssp + 2);
1073 new_eip = lduw(ssp);
1075 if ((new_cs & 0xfffc) == 0)
1076 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1077 if (load_segment(&e1, &e2, new_cs) != 0)
1078 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1079 if (!(e2 & DESC_S_MASK) ||
1080 !(e2 & DESC_CS_MASK))
1081 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1085 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1086 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1087 if (e2 & DESC_CS_MASK) {
1089 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1092 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1094 if (!(e2 & DESC_P_MASK))
1095 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
1098 /* return to same priledge level */
1099 load_seg(R_CS, new_cs, env->eip);
1100 new_esp = sp + (4 << shift) + ((2 * is_iret) << shift) + addend;
1102 /* return to different priviledge level */
1103 ssp += (4 << shift) + ((2 * is_iret) << shift) + addend;
1107 new_ss = ldl(ssp + 4) & 0xffff;
1110 new_esp = lduw(ssp);
1111 new_ss = lduw(ssp + 2);
1114 if ((new_ss & 3) != rpl)
1115 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
1116 if (load_segment(&e1, &e2, new_ss) != 0)
1117 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
1118 if (!(e2 & DESC_S_MASK) ||
1119 (e2 & DESC_CS_MASK) ||
1120 !(e2 & DESC_W_MASK))
1121 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
1122 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1124 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
1125 if (!(e2 & DESC_P_MASK))
1126 raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
1128 load_seg(R_CS, new_cs, env->eip);
1129 load_seg(R_SS, new_ss, env->eip);
1131 if (env->segs[R_SS].flags & DESC_B_MASK)
1132 env->regs[R_ESP] = new_esp;
1134 env->regs[R_ESP] = (env->regs[R_ESP] & 0xffff0000) |
1139 eflags_mask = FL_UPDATE_CPL0_MASK;
1141 eflags_mask = FL_UPDATE_MASK32;
1143 eflags_mask &= 0xffff;
1144 load_eflags(new_eflags, eflags_mask);
1149 new_esp = ldl(ssp + 12);
1150 new_ss = ldl(ssp + 16);
1151 new_es = ldl(ssp + 20);
1152 new_ds = ldl(ssp + 24);
1153 new_fs = ldl(ssp + 28);
1154 new_gs = ldl(ssp + 32);
1156 /* modify processor state */
1157 load_eflags(new_eflags, FL_UPDATE_CPL0_MASK | VM_MASK | VIF_MASK | VIP_MASK);
1158 load_seg_vm(R_CS, new_cs);
1159 cpu_x86_set_cpl(env, 3);
1160 load_seg_vm(R_SS, new_ss);
1161 load_seg_vm(R_ES, new_es);
1162 load_seg_vm(R_DS, new_ds);
1163 load_seg_vm(R_FS, new_fs);
1164 load_seg_vm(R_GS, new_gs);
1167 env->regs[R_ESP] = new_esp;
1170 void helper_iret_protected(int shift)
1172 helper_ret_protected(shift, 1, 0);
1175 void helper_lret_protected(int shift, int addend)
1177 helper_ret_protected(shift, 0, addend);
1180 void helper_movl_crN_T0(int reg)
1185 cpu_x86_update_cr0(env);
1188 cpu_x86_update_cr3(env);
1194 void helper_movl_drN_T0(int reg)
1199 void helper_invlpg(unsigned int addr)
1201 cpu_x86_flush_tlb(env, addr);
1209 void helper_rdtsc(void)
1213 asm("rdtsc" : "=A" (val));
1215 /* better than nothing: the time increases */
1222 void helper_wrmsr(void)
1225 case MSR_IA32_SYSENTER_CS:
1226 env->sysenter_cs = EAX & 0xffff;
1228 case MSR_IA32_SYSENTER_ESP:
1229 env->sysenter_esp = EAX;
1231 case MSR_IA32_SYSENTER_EIP:
1232 env->sysenter_eip = EAX;
1235 /* XXX: exception ? */
1240 void helper_rdmsr(void)
1243 case MSR_IA32_SYSENTER_CS:
1244 EAX = env->sysenter_cs;
1247 case MSR_IA32_SYSENTER_ESP:
1248 EAX = env->sysenter_esp;
1251 case MSR_IA32_SYSENTER_EIP:
1252 EAX = env->sysenter_eip;
1256 /* XXX: exception ? */
1261 void helper_lsl(void)
1263 unsigned int selector, limit;
1266 CC_SRC = cc_table[CC_OP].compute_all() & ~CC_Z;
1267 selector = T0 & 0xffff;
1268 if (load_segment(&e1, &e2, selector) != 0)
1270 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
1272 limit = (limit << 12) | 0xfff;
1277 void helper_lar(void)
1279 unsigned int selector;
1282 CC_SRC = cc_table[CC_OP].compute_all() & ~CC_Z;
1283 selector = T0 & 0xffff;
1284 if (load_segment(&e1, &e2, selector) != 0)
1286 T1 = e2 & 0x00f0ff00;
1292 #ifndef USE_X86LDOUBLE
1293 void helper_fldt_ST0_A0(void)
1296 new_fpstt = (env->fpstt - 1) & 7;
1297 env->fpregs[new_fpstt] = helper_fldt((uint8_t *)A0);
1298 env->fpstt = new_fpstt;
1299 env->fptags[new_fpstt] = 0; /* validate stack entry */
1302 void helper_fstt_ST0_A0(void)
1304 helper_fstt(ST0, (uint8_t *)A0);
1310 #define MUL10(iv) ( iv + iv + (iv << 3) )
1312 void helper_fbld_ST0_A0(void)
1320 for(i = 8; i >= 0; i--) {
1321 v = ldub((uint8_t *)A0 + i);
1322 val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
1325 if (ldub((uint8_t *)A0 + 9) & 0x80)
1331 void helper_fbst_ST0_A0(void)
1335 uint8_t *mem_ref, *mem_end;
1340 mem_ref = (uint8_t *)A0;
1341 mem_end = mem_ref + 9;
1348 while (mem_ref < mem_end) {
1353 v = ((v / 10) << 4) | (v % 10);
1356 while (mem_ref < mem_end) {
1361 void helper_f2xm1(void)
1363 ST0 = pow(2.0,ST0) - 1.0;
1366 void helper_fyl2x(void)
1368 CPU86_LDouble fptemp;
1372 fptemp = log(fptemp)/log(2.0); /* log2(ST) */
1376 env->fpus &= (~0x4700);
1381 void helper_fptan(void)
1383 CPU86_LDouble fptemp;
1386 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
1392 env->fpus &= (~0x400); /* C2 <-- 0 */
1393 /* the above code is for |arg| < 2**52 only */
1397 void helper_fpatan(void)
1399 CPU86_LDouble fptemp, fpsrcop;
1403 ST1 = atan2(fpsrcop,fptemp);
1407 void helper_fxtract(void)
1409 CPU86_LDoubleU temp;
1410 unsigned int expdif;
1413 expdif = EXPD(temp) - EXPBIAS;
1414 /*DP exponent bias*/
1421 void helper_fprem1(void)
1423 CPU86_LDouble dblq, fpsrcop, fptemp;
1424 CPU86_LDoubleU fpsrcop1, fptemp1;
1430 fpsrcop1.d = fpsrcop;
1432 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
1434 dblq = fpsrcop / fptemp;
1435 dblq = (dblq < 0.0)? ceil(dblq): floor(dblq);
1436 ST0 = fpsrcop - fptemp*dblq;
1437 q = (int)dblq; /* cutting off top bits is assumed here */
1438 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
1439 /* (C0,C1,C3) <-- (q2,q1,q0) */
1440 env->fpus |= (q&0x4) << 6; /* (C0) <-- q2 */
1441 env->fpus |= (q&0x2) << 8; /* (C1) <-- q1 */
1442 env->fpus |= (q&0x1) << 14; /* (C3) <-- q0 */
1444 env->fpus |= 0x400; /* C2 <-- 1 */
1445 fptemp = pow(2.0, expdif-50);
1446 fpsrcop = (ST0 / ST1) / fptemp;
1447 /* fpsrcop = integer obtained by rounding to the nearest */
1448 fpsrcop = (fpsrcop-floor(fpsrcop) < ceil(fpsrcop)-fpsrcop)?
1449 floor(fpsrcop): ceil(fpsrcop);
1450 ST0 -= (ST1 * fpsrcop * fptemp);
1454 void helper_fprem(void)
1456 CPU86_LDouble dblq, fpsrcop, fptemp;
1457 CPU86_LDoubleU fpsrcop1, fptemp1;
1463 fpsrcop1.d = fpsrcop;
1465 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
1466 if ( expdif < 53 ) {
1467 dblq = fpsrcop / fptemp;
1468 dblq = (dblq < 0.0)? ceil(dblq): floor(dblq);
1469 ST0 = fpsrcop - fptemp*dblq;
1470 q = (int)dblq; /* cutting off top bits is assumed here */
1471 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
1472 /* (C0,C1,C3) <-- (q2,q1,q0) */
1473 env->fpus |= (q&0x4) << 6; /* (C0) <-- q2 */
1474 env->fpus |= (q&0x2) << 8; /* (C1) <-- q1 */
1475 env->fpus |= (q&0x1) << 14; /* (C3) <-- q0 */
1477 env->fpus |= 0x400; /* C2 <-- 1 */
1478 fptemp = pow(2.0, expdif-50);
1479 fpsrcop = (ST0 / ST1) / fptemp;
1480 /* fpsrcop = integer obtained by chopping */
1481 fpsrcop = (fpsrcop < 0.0)?
1482 -(floor(fabs(fpsrcop))): floor(fpsrcop);
1483 ST0 -= (ST1 * fpsrcop * fptemp);
1487 void helper_fyl2xp1(void)
1489 CPU86_LDouble fptemp;
1492 if ((fptemp+1.0)>0.0) {
1493 fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
1497 env->fpus &= (~0x4700);
1502 void helper_fsqrt(void)
1504 CPU86_LDouble fptemp;
1508 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
1514 void helper_fsincos(void)
1516 CPU86_LDouble fptemp;
1519 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
1525 env->fpus &= (~0x400); /* C2 <-- 0 */
1526 /* the above code is for |arg| < 2**63 only */
1530 void helper_frndint(void)
1536 switch(env->fpuc & RC_MASK) {
1539 asm("rndd %0, %1" : "=f" (a) : "f"(a));
1542 asm("rnddm %0, %1" : "=f" (a) : "f"(a));
1545 asm("rnddp %0, %1" : "=f" (a) : "f"(a));
1548 asm("rnddz %0, %1" : "=f" (a) : "f"(a));
1557 void helper_fscale(void)
1559 CPU86_LDouble fpsrcop, fptemp;
1562 fptemp = pow(fpsrcop,ST1);
1566 void helper_fsin(void)
1568 CPU86_LDouble fptemp;
1571 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
1575 env->fpus &= (~0x400); /* C2 <-- 0 */
1576 /* the above code is for |arg| < 2**53 only */
1580 void helper_fcos(void)
1582 CPU86_LDouble fptemp;
1585 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
1589 env->fpus &= (~0x400); /* C2 <-- 0 */
1590 /* the above code is for |arg5 < 2**63 only */
1594 void helper_fxam_ST0(void)
1596 CPU86_LDoubleU temp;
1601 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
1603 env->fpus |= 0x200; /* C1 <-- 1 */
1605 expdif = EXPD(temp);
1606 if (expdif == MAXEXPD) {
1607 if (MANTD(temp) == 0)
1608 env->fpus |= 0x500 /*Infinity*/;
1610 env->fpus |= 0x100 /*NaN*/;
1611 } else if (expdif == 0) {
1612 if (MANTD(temp) == 0)
1613 env->fpus |= 0x4000 /*Zero*/;
1615 env->fpus |= 0x4400 /*Denormal*/;
1621 void helper_fstenv(uint8_t *ptr, int data32)
1623 int fpus, fptag, exp, i;
1627 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
1629 for (i=7; i>=0; i--) {
1631 if (env->fptags[i]) {
1634 tmp.d = env->fpregs[i];
1637 if (exp == 0 && mant == 0) {
1640 } else if (exp == 0 || exp == MAXEXPD
1641 #ifdef USE_X86LDOUBLE
1642 || (mant & (1LL << 63)) == 0
1645 /* NaNs, infinity, denormal */
1652 stl(ptr, env->fpuc);
1654 stl(ptr + 8, fptag);
1661 stw(ptr, env->fpuc);
1663 stw(ptr + 4, fptag);
1671 void helper_fldenv(uint8_t *ptr, int data32)
1676 env->fpuc = lduw(ptr);
1677 fpus = lduw(ptr + 4);
1678 fptag = lduw(ptr + 8);
1681 env->fpuc = lduw(ptr);
1682 fpus = lduw(ptr + 2);
1683 fptag = lduw(ptr + 4);
1685 env->fpstt = (fpus >> 11) & 7;
1686 env->fpus = fpus & ~0x3800;
1687 for(i = 0;i < 7; i++) {
1688 env->fptags[i] = ((fptag & 3) == 3);
1693 void helper_fsave(uint8_t *ptr, int data32)
1698 helper_fstenv(ptr, data32);
1700 ptr += (14 << data32);
1701 for(i = 0;i < 8; i++) {
1703 #ifdef USE_X86LDOUBLE
1704 *(long double *)ptr = tmp;
1706 helper_fstt(tmp, ptr);
1725 void helper_frstor(uint8_t *ptr, int data32)
1730 helper_fldenv(ptr, data32);
1731 ptr += (14 << data32);
1733 for(i = 0;i < 8; i++) {
1734 #ifdef USE_X86LDOUBLE
1735 tmp = *(long double *)ptr;
1737 tmp = helper_fldt(ptr);