4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include "exec-i386.h"
22 const uint8_t parity_table[256] = {
23 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
24 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
25 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
26 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
27 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
28 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
29 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
30 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
31 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
32 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
33 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
34 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
35 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
36 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
37 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
38 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
39 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
40 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
41 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
42 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
43 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
44 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
45 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
46 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
47 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
48 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
49 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
50 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
51 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
52 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
53 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
54 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
58 const uint8_t rclw_table[32] = {
59 0, 1, 2, 3, 4, 5, 6, 7,
60 8, 9,10,11,12,13,14,15,
61 16, 0, 1, 2, 3, 4, 5, 6,
62 7, 8, 9,10,11,12,13,14,
66 const uint8_t rclb_table[32] = {
67 0, 1, 2, 3, 4, 5, 6, 7,
68 8, 0, 1, 2, 3, 4, 5, 6,
69 7, 8, 0, 1, 2, 3, 4, 5,
70 6, 7, 8, 0, 1, 2, 3, 4,
73 const CPU86_LDouble f15rk[7] =
75 0.00000000000000000000L,
76 1.00000000000000000000L,
77 3.14159265358979323851L, /*pi*/
78 0.30102999566398119523L, /*lg2*/
79 0.69314718055994530943L, /*ln2*/
80 1.44269504088896340739L, /*l2e*/
81 3.32192809488736234781L, /*l2t*/
86 spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
90 spin_lock(&global_cpu_lock);
95 spin_unlock(&global_cpu_lock);
98 void cpu_loop_exit(void)
100 /* NOTE: the register at this point must be saved by hand because
101 longjmp restore them */
103 env->regs[R_EAX] = EAX;
106 env->regs[R_ECX] = ECX;
109 env->regs[R_EDX] = EDX;
112 env->regs[R_EBX] = EBX;
115 env->regs[R_ESP] = ESP;
118 env->regs[R_EBP] = EBP;
121 env->regs[R_ESI] = ESI;
124 env->regs[R_EDI] = EDI;
126 longjmp(env->jmp_env, 1);
129 static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
130 uint32_t *esp_ptr, int dpl)
132 int type, index, shift;
137 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
138 for(i=0;i<env->tr.limit;i++) {
139 printf("%02x ", env->tr.base[i]);
140 if ((i & 7) == 7) printf("\n");
146 if (!(env->tr.flags & DESC_P_MASK))
147 cpu_abort(env, "invalid tss");
148 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
150 cpu_abort(env, "invalid tss type");
152 index = (dpl * 4 + 2) << shift;
153 if (index + (4 << shift) - 1 > env->tr.limit)
154 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
156 *esp_ptr = lduw(env->tr.base + index);
157 *ss_ptr = lduw(env->tr.base + index + 2);
159 *esp_ptr = ldl(env->tr.base + index);
160 *ss_ptr = lduw(env->tr.base + index + 4);
164 /* return non zero if error */
165 static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
176 index = selector & ~7;
177 if ((index + 7) > dt->limit)
179 ptr = dt->base + index;
181 *e2_ptr = ldl(ptr + 4);
186 /* protected mode interrupt */
187 static void do_interrupt_protected(int intno, int is_int, int error_code,
188 unsigned int next_eip)
192 int type, dpl, cpl, selector, ss_dpl;
193 int has_error_code, new_stack, shift;
194 uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2, push_size;
195 uint32_t old_cs, old_ss, old_esp, old_eip;
198 if (intno * 8 + 7 > dt->limit)
199 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
200 ptr = dt->base + intno * 8;
203 /* check gate type */
204 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
206 case 5: /* task gate */
207 cpu_abort(env, "task gate not supported");
209 case 6: /* 286 interrupt gate */
210 case 7: /* 286 trap gate */
211 case 14: /* 386 interrupt gate */
212 case 15: /* 386 trap gate */
215 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
218 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
219 cpl = env->segs[R_CS].selector & 3;
220 /* check privledge if software int */
221 if (is_int && dpl < cpl)
222 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
223 /* check valid bit */
224 if (!(e2 & DESC_P_MASK))
225 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
227 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
228 if ((selector & 0xfffc) == 0)
229 raise_exception_err(EXCP0D_GPF, 0);
231 if (load_segment(&e1, &e2, selector) != 0)
232 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
233 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
234 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
235 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
237 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
238 if (!(e2 & DESC_P_MASK))
239 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
240 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
241 /* to inner priviledge */
242 get_ss_esp_from_tss(&ss, &esp, dpl);
243 if ((ss & 0xfffc) == 0)
244 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
246 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
247 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
248 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
249 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
251 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
252 if (!(ss_e2 & DESC_S_MASK) ||
253 (ss_e2 & DESC_CS_MASK) ||
254 !(ss_e2 & DESC_W_MASK))
255 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
256 if (!(ss_e2 & DESC_P_MASK))
257 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
259 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
260 /* to same priviledge */
263 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
264 new_stack = 0; /* avoid warning */
282 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
283 if (env->eflags & VM_MASK)
287 /* XXX: check that enough room is available */
289 old_esp = env->regs[R_ESP];
290 old_ss = env->segs[R_SS].selector;
291 load_seg(R_SS, ss, env->eip);
295 esp = env->regs[R_ESP];
301 old_cs = env->segs[R_CS].selector;
302 load_seg(R_CS, selector, env->eip);
304 env->regs[R_ESP] = esp - push_size;
305 ssp = env->segs[R_SS].base + esp;
308 if (env->eflags & VM_MASK) {
310 stl(ssp, env->segs[R_GS].selector);
312 stl(ssp, env->segs[R_FS].selector);
314 stl(ssp, env->segs[R_DS].selector);
316 stl(ssp, env->segs[R_ES].selector);
325 old_eflags = compute_eflags();
326 stl(ssp, old_eflags);
331 if (has_error_code) {
333 stl(ssp, error_code);
343 stw(ssp, compute_eflags());
348 if (has_error_code) {
350 stw(ssp, error_code);
354 /* interrupt gate clear IF mask */
355 if ((type & 1) == 0) {
356 env->eflags &= ~IF_MASK;
358 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
361 /* real mode interrupt */
362 static void do_interrupt_real(int intno, int is_int, int error_code,
363 unsigned int next_eip)
368 uint32_t offset, esp;
369 uint32_t old_cs, old_eip;
371 /* real mode (simpler !) */
373 if (intno * 4 + 3 > dt->limit)
374 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
375 ptr = dt->base + intno * 4;
377 selector = lduw(ptr + 2);
378 esp = env->regs[R_ESP] & 0xffff;
379 ssp = env->segs[R_SS].base + esp;
384 old_cs = env->segs[R_CS].selector;
386 stw(ssp, compute_eflags());
393 /* update processor state */
394 env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | (esp & 0xffff);
396 env->segs[R_CS].selector = selector;
397 env->segs[R_CS].base = (uint8_t *)(selector << 4);
398 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
401 /* fake user mode interrupt */
402 void do_interrupt_user(int intno, int is_int, int error_code,
403 unsigned int next_eip)
411 ptr = dt->base + (intno * 8);
414 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
416 /* check privledge if software int */
417 if (is_int && dpl < cpl)
418 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
420 /* Since we emulate only user space, we cannot do more than
421 exiting the emulation with the suitable exception and error
428 * Begin excution of an interruption. is_int is TRUE if coming from
429 * the int instruction. next_eip is the EIP value AFTER the interrupt
430 * instruction. It is only relevant if is_int is TRUE.
432 void do_interrupt(int intno, int is_int, int error_code,
433 unsigned int next_eip)
435 if (env->cr[0] & CR0_PE_MASK) {
436 do_interrupt_protected(intno, is_int, error_code, next_eip);
438 do_interrupt_real(intno, is_int, error_code, next_eip);
443 * Signal an interruption. It is executed in the main CPU loop.
444 * is_int is TRUE if coming from the int instruction. next_eip is the
445 * EIP value AFTER the interrupt instruction. It is only relevant if
448 void raise_interrupt(int intno, int is_int, int error_code,
449 unsigned int next_eip)
451 env->exception_index = intno;
452 env->error_code = error_code;
453 env->exception_is_int = is_int;
454 env->exception_next_eip = next_eip;
458 /* shortcuts to generate exceptions */
459 void raise_exception_err(int exception_index, int error_code)
461 raise_interrupt(exception_index, 0, error_code, 0);
464 void raise_exception(int exception_index)
466 raise_interrupt(exception_index, 0, 0, 0);
469 #ifdef BUGGY_GCC_DIV64
470 /* gcc 2.95.4 on PowerPC does not seem to like using __udivdi3, so we
471 call it from another function */
472 uint32_t div64(uint32_t *q_ptr, uint64_t num, uint32_t den)
478 int32_t idiv64(int32_t *q_ptr, int64_t num, int32_t den)
485 void helper_divl_EAX_T0(uint32_t eip)
487 unsigned int den, q, r;
490 num = EAX | ((uint64_t)EDX << 32);
494 raise_exception(EXCP00_DIVZ);
496 #ifdef BUGGY_GCC_DIV64
497 r = div64(&q, num, den);
506 void helper_idivl_EAX_T0(uint32_t eip)
511 num = EAX | ((uint64_t)EDX << 32);
515 raise_exception(EXCP00_DIVZ);
517 #ifdef BUGGY_GCC_DIV64
518 r = idiv64(&q, num, den);
527 void helper_cmpxchg8b(void)
532 eflags = cc_table[CC_OP].compute_all();
533 d = ldq((uint8_t *)A0);
534 if (d == (((uint64_t)EDX << 32) | EAX)) {
535 stq((uint8_t *)A0, ((uint64_t)ECX << 32) | EBX);
545 /* We simulate a pre-MMX pentium as in valgrind */
546 #define CPUID_FP87 (1 << 0)
547 #define CPUID_VME (1 << 1)
548 #define CPUID_DE (1 << 2)
549 #define CPUID_PSE (1 << 3)
550 #define CPUID_TSC (1 << 4)
551 #define CPUID_MSR (1 << 5)
552 #define CPUID_PAE (1 << 6)
553 #define CPUID_MCE (1 << 7)
554 #define CPUID_CX8 (1 << 8)
555 #define CPUID_APIC (1 << 9)
556 #define CPUID_SEP (1 << 11) /* sysenter/sysexit */
557 #define CPUID_MTRR (1 << 12)
558 #define CPUID_PGE (1 << 13)
559 #define CPUID_MCA (1 << 14)
560 #define CPUID_CMOV (1 << 15)
562 #define CPUID_MMX (1 << 23)
563 #define CPUID_FXSR (1 << 24)
564 #define CPUID_SSE (1 << 25)
565 #define CPUID_SSE2 (1 << 26)
567 void helper_cpuid(void)
570 EAX = 1; /* max EAX index supported */
574 } else if (EAX == 1) {
579 EDX = CPUID_FP87 | CPUID_DE | CPUID_PSE |
580 CPUID_TSC | CPUID_MSR | CPUID_MCE |
585 static inline void load_seg_cache(SegmentCache *sc, uint32_t e1, uint32_t e2)
587 sc->base = (void *)((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
588 sc->limit = (e1 & 0xffff) | (e2 & 0x000f0000);
589 if (e2 & DESC_G_MASK)
590 sc->limit = (sc->limit << 12) | 0xfff;
594 void helper_lldt_T0(void)
602 selector = T0 & 0xffff;
603 if ((selector & 0xfffc) == 0) {
604 /* XXX: NULL selector case: invalid LDT */
605 env->ldt.base = NULL;
609 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
611 index = selector & ~7;
612 if ((index + 7) > dt->limit)
613 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
614 ptr = dt->base + index;
617 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
618 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
619 if (!(e2 & DESC_P_MASK))
620 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
621 load_seg_cache(&env->ldt, e1, e2);
623 env->ldt.selector = selector;
626 void helper_ltr_T0(void)
634 selector = T0 & 0xffff;
635 if ((selector & 0xfffc) == 0) {
636 /* NULL selector case: invalid LDT */
642 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
644 index = selector & ~7;
645 if ((index + 7) > dt->limit)
646 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
647 ptr = dt->base + index;
650 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
651 if ((e2 & DESC_S_MASK) ||
652 (type != 2 && type != 9))
653 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
654 if (!(e2 & DESC_P_MASK))
655 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
656 load_seg_cache(&env->tr, e1, e2);
657 e2 |= 0x00000200; /* set the busy bit */
660 env->tr.selector = selector;
663 /* only works if protected mode and not VM86 */
664 void load_seg(int seg_reg, int selector, unsigned int cur_eip)
669 sc = &env->segs[seg_reg];
670 if ((selector & 0xfffc) == 0) {
671 /* null selector case */
672 if (seg_reg == R_SS) {
674 raise_exception_err(EXCP0D_GPF, 0);
676 /* XXX: each access should trigger an exception */
682 if (load_segment(&e1, &e2, selector) != 0) {
684 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
686 if (!(e2 & DESC_S_MASK) ||
687 (e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) {
689 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
692 if (seg_reg == R_SS) {
693 if ((e2 & (DESC_CS_MASK | DESC_W_MASK)) == 0) {
695 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
698 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) {
700 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
704 if (!(e2 & DESC_P_MASK)) {
707 raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
709 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
711 load_seg_cache(sc, e1, e2);
713 fprintf(logfile, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
714 selector, (unsigned long)sc->base, sc->limit, sc->flags);
717 sc->selector = selector;
720 /* protected mode jump */
721 void jmp_seg(int selector, unsigned int new_eip)
724 uint32_t e1, e2, cpl, dpl, rpl;
726 if ((selector & 0xfffc) == 0) {
727 raise_exception_err(EXCP0D_GPF, 0);
730 if (load_segment(&e1, &e2, selector) != 0)
731 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
732 cpl = env->segs[R_CS].selector & 3;
733 if (e2 & DESC_S_MASK) {
734 if (!(e2 & DESC_CS_MASK))
735 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
736 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
737 if (e2 & DESC_CS_MASK) {
738 /* conforming code segment */
740 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
742 /* non conforming code segment */
745 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
747 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
749 if (!(e2 & DESC_P_MASK))
750 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
751 load_seg_cache(&sc1, e1, e2);
752 if (new_eip > sc1.limit)
753 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
754 env->segs[R_CS] = sc1;
755 env->segs[R_CS].selector = (selector & 0xfffc) | cpl;
758 cpu_abort(env, "jmp to call/task gate not supported 0x%04x:0x%08x",
763 /* init the segment cache in vm86 mode */
764 static inline void load_seg_vm(int seg, int selector)
766 SegmentCache *sc = &env->segs[seg];
768 sc->base = (uint8_t *)(selector << 4);
769 sc->selector = selector;
774 /* protected mode iret */
775 void helper_iret_protected(int shift)
777 uint32_t sp, new_cs, new_eip, new_eflags, new_esp, new_ss;
778 uint32_t new_es, new_ds, new_fs, new_gs;
780 int cpl, dpl, rpl, eflags_mask;
783 sp = env->regs[R_ESP];
784 if (!(env->segs[R_SS].flags & DESC_B_MASK))
786 ssp = env->segs[R_SS].base + sp;
789 new_eflags = ldl(ssp + 8);
790 new_cs = ldl(ssp + 4) & 0xffff;
792 if (new_eflags & VM_MASK)
796 new_eflags = lduw(ssp + 4);
797 new_cs = lduw(ssp + 2);
800 if ((new_cs & 0xfffc) == 0)
801 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
802 if (load_segment(&e1, &e2, new_cs) != 0)
803 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
804 if (!(e2 & DESC_S_MASK) ||
805 !(e2 & DESC_CS_MASK))
806 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
807 cpl = env->segs[R_CS].selector & 3;
810 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
811 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
812 if (e2 & DESC_CS_MASK) {
814 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
817 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
819 if (!(e2 & DESC_P_MASK))
820 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
823 /* return to same priledge level */
824 load_seg(R_CS, new_cs, env->eip);
825 new_esp = sp + (6 << shift);
827 /* return to differentr priviledge level */
830 new_esp = ldl(ssp + 12);
831 new_ss = ldl(ssp + 16) & 0xffff;
834 new_esp = lduw(ssp + 6);
835 new_ss = lduw(ssp + 8);
838 if ((new_ss & 3) != rpl)
839 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
840 if (load_segment(&e1, &e2, new_ss) != 0)
841 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
842 if (!(e2 & DESC_S_MASK) ||
843 (e2 & DESC_CS_MASK) ||
845 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
846 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
848 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
849 if (!(e2 & DESC_P_MASK))
850 raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
852 load_seg(R_CS, new_cs, env->eip);
853 load_seg(R_SS, new_ss, env->eip);
855 if (env->segs[R_SS].flags & DESC_B_MASK)
856 env->regs[R_ESP] = new_esp;
858 env->regs[R_ESP] = (env->regs[R_ESP] & 0xffff0000) |
862 eflags_mask = FL_UPDATE_CPL0_MASK;
864 eflags_mask = FL_UPDATE_MASK32;
866 eflags_mask &= 0xffff;
867 load_eflags(new_eflags, eflags_mask);
871 new_esp = ldl(ssp + 12);
872 new_ss = ldl(ssp + 16);
873 new_es = ldl(ssp + 20);
874 new_ds = ldl(ssp + 24);
875 new_fs = ldl(ssp + 28);
876 new_gs = ldl(ssp + 32);
878 /* modify processor state */
879 load_eflags(new_eflags, FL_UPDATE_CPL0_MASK | VM_MASK | VIF_MASK | VIP_MASK);
880 load_seg_vm(R_CS, new_cs);
881 load_seg_vm(R_SS, new_ss);
882 load_seg_vm(R_ES, new_es);
883 load_seg_vm(R_DS, new_ds);
884 load_seg_vm(R_FS, new_fs);
885 load_seg_vm(R_GS, new_gs);
888 env->regs[R_ESP] = new_esp;
891 void helper_movl_crN_T0(int reg)
896 cpu_x86_update_cr0(env);
899 cpu_x86_update_cr3(env);
905 void helper_movl_drN_T0(int reg)
910 void helper_invlpg(unsigned int addr)
912 cpu_x86_flush_tlb(env, addr);
920 void helper_rdtsc(void)
924 asm("rdtsc" : "=A" (val));
926 /* better than nothing: the time increases */
933 void helper_lsl(void)
935 unsigned int selector, limit;
938 CC_SRC = cc_table[CC_OP].compute_all() & ~CC_Z;
939 selector = T0 & 0xffff;
940 if (load_segment(&e1, &e2, selector) != 0)
942 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
944 limit = (limit << 12) | 0xfff;
949 void helper_lar(void)
951 unsigned int selector;
954 CC_SRC = cc_table[CC_OP].compute_all() & ~CC_Z;
955 selector = T0 & 0xffff;
956 if (load_segment(&e1, &e2, selector) != 0)
958 T1 = e2 & 0x00f0ff00;
964 #ifndef USE_X86LDOUBLE
965 void helper_fldt_ST0_A0(void)
967 ST0 = helper_fldt((uint8_t *)A0);
970 void helper_fstt_ST0_A0(void)
972 helper_fstt(ST0, (uint8_t *)A0);
978 #define MUL10(iv) ( iv + iv + (iv << 3) )
980 void helper_fbld_ST0_A0(void)
983 CPU86_LDouble fpsrcop;
987 /* in this code, seg/m32i will be used as temporary ptr/int */
988 seg = (uint8_t *)A0 + 8;
990 /* XXX: raise exception */
994 /* XXX: raise exception */
997 m32i = v; /* <-- d14 */
999 m32i = MUL10(m32i) + (v >> 4); /* <-- val * 10 + d13 */
1000 m32i = MUL10(m32i) + (v & 0xf); /* <-- val * 10 + d12 */
1002 m32i = MUL10(m32i) + (v >> 4); /* <-- val * 10 + d11 */
1003 m32i = MUL10(m32i) + (v & 0xf); /* <-- val * 10 + d10 */
1005 m32i = MUL10(m32i) + (v >> 4); /* <-- val * 10 + d9 */
1006 m32i = MUL10(m32i) + (v & 0xf); /* <-- val * 10 + d8 */
1007 fpsrcop = ((CPU86_LDouble)m32i) * 100000000.0;
1010 m32i = (v >> 4); /* <-- d7 */
1011 m32i = MUL10(m32i) + (v & 0xf); /* <-- val * 10 + d6 */
1013 m32i = MUL10(m32i) + (v >> 4); /* <-- val * 10 + d5 */
1014 m32i = MUL10(m32i) + (v & 0xf); /* <-- val * 10 + d4 */
1016 m32i = MUL10(m32i) + (v >> 4); /* <-- val * 10 + d3 */
1017 m32i = MUL10(m32i) + (v & 0xf); /* <-- val * 10 + d2 */
1019 m32i = MUL10(m32i) + (v >> 4); /* <-- val * 10 + d1 */
1020 m32i = MUL10(m32i) + (v & 0xf); /* <-- val * 10 + d0 */
1021 fpsrcop += ((CPU86_LDouble)m32i);
1022 if ( ldub(seg+9) & 0x80 )
1027 void helper_fbst_ST0_A0(void)
1029 CPU86_LDouble fptemp;
1030 CPU86_LDouble fpsrcop;
1032 uint8_t *mem_ref, *mem_end;
1034 fpsrcop = rint(ST0);
1035 mem_ref = (uint8_t *)A0;
1036 mem_end = mem_ref + 8;
1037 if ( fpsrcop < 0.0 ) {
1038 stw(mem_end, 0x8000);
1041 stw(mem_end, 0x0000);
1043 while (mem_ref < mem_end) {
1046 fptemp = floor(fpsrcop/10.0);
1047 v = ((int)(fpsrcop - fptemp*10.0));
1048 if (fptemp == 0.0) {
1053 fptemp = floor(fpsrcop/10.0);
1054 v |= (((int)(fpsrcop - fptemp*10.0)) << 4);
1058 while (mem_ref < mem_end) {
1063 void helper_f2xm1(void)
1065 ST0 = pow(2.0,ST0) - 1.0;
1068 void helper_fyl2x(void)
1070 CPU86_LDouble fptemp;
1074 fptemp = log(fptemp)/log(2.0); /* log2(ST) */
1078 env->fpus &= (~0x4700);
1083 void helper_fptan(void)
1085 CPU86_LDouble fptemp;
1088 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
1094 env->fpus &= (~0x400); /* C2 <-- 0 */
1095 /* the above code is for |arg| < 2**52 only */
1099 void helper_fpatan(void)
1101 CPU86_LDouble fptemp, fpsrcop;
1105 ST1 = atan2(fpsrcop,fptemp);
1109 void helper_fxtract(void)
1111 CPU86_LDoubleU temp;
1112 unsigned int expdif;
1115 expdif = EXPD(temp) - EXPBIAS;
1116 /*DP exponent bias*/
1123 void helper_fprem1(void)
1125 CPU86_LDouble dblq, fpsrcop, fptemp;
1126 CPU86_LDoubleU fpsrcop1, fptemp1;
1132 fpsrcop1.d = fpsrcop;
1134 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
1136 dblq = fpsrcop / fptemp;
1137 dblq = (dblq < 0.0)? ceil(dblq): floor(dblq);
1138 ST0 = fpsrcop - fptemp*dblq;
1139 q = (int)dblq; /* cutting off top bits is assumed here */
1140 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
1141 /* (C0,C1,C3) <-- (q2,q1,q0) */
1142 env->fpus |= (q&0x4) << 6; /* (C0) <-- q2 */
1143 env->fpus |= (q&0x2) << 8; /* (C1) <-- q1 */
1144 env->fpus |= (q&0x1) << 14; /* (C3) <-- q0 */
1146 env->fpus |= 0x400; /* C2 <-- 1 */
1147 fptemp = pow(2.0, expdif-50);
1148 fpsrcop = (ST0 / ST1) / fptemp;
1149 /* fpsrcop = integer obtained by rounding to the nearest */
1150 fpsrcop = (fpsrcop-floor(fpsrcop) < ceil(fpsrcop)-fpsrcop)?
1151 floor(fpsrcop): ceil(fpsrcop);
1152 ST0 -= (ST1 * fpsrcop * fptemp);
1156 void helper_fprem(void)
1158 CPU86_LDouble dblq, fpsrcop, fptemp;
1159 CPU86_LDoubleU fpsrcop1, fptemp1;
1165 fpsrcop1.d = fpsrcop;
1167 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
1168 if ( expdif < 53 ) {
1169 dblq = fpsrcop / fptemp;
1170 dblq = (dblq < 0.0)? ceil(dblq): floor(dblq);
1171 ST0 = fpsrcop - fptemp*dblq;
1172 q = (int)dblq; /* cutting off top bits is assumed here */
1173 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
1174 /* (C0,C1,C3) <-- (q2,q1,q0) */
1175 env->fpus |= (q&0x4) << 6; /* (C0) <-- q2 */
1176 env->fpus |= (q&0x2) << 8; /* (C1) <-- q1 */
1177 env->fpus |= (q&0x1) << 14; /* (C3) <-- q0 */
1179 env->fpus |= 0x400; /* C2 <-- 1 */
1180 fptemp = pow(2.0, expdif-50);
1181 fpsrcop = (ST0 / ST1) / fptemp;
1182 /* fpsrcop = integer obtained by chopping */
1183 fpsrcop = (fpsrcop < 0.0)?
1184 -(floor(fabs(fpsrcop))): floor(fpsrcop);
1185 ST0 -= (ST1 * fpsrcop * fptemp);
1189 void helper_fyl2xp1(void)
1191 CPU86_LDouble fptemp;
1194 if ((fptemp+1.0)>0.0) {
1195 fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
1199 env->fpus &= (~0x4700);
1204 void helper_fsqrt(void)
1206 CPU86_LDouble fptemp;
1210 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
1216 void helper_fsincos(void)
1218 CPU86_LDouble fptemp;
1221 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
1227 env->fpus &= (~0x400); /* C2 <-- 0 */
1228 /* the above code is for |arg| < 2**63 only */
1232 void helper_frndint(void)
1238 switch(env->fpuc & RC_MASK) {
1241 asm("rndd %0, %1" : "=f" (a) : "f"(a));
1244 asm("rnddm %0, %1" : "=f" (a) : "f"(a));
1247 asm("rnddp %0, %1" : "=f" (a) : "f"(a));
1250 asm("rnddz %0, %1" : "=f" (a) : "f"(a));
1259 void helper_fscale(void)
1261 CPU86_LDouble fpsrcop, fptemp;
1264 fptemp = pow(fpsrcop,ST1);
1268 void helper_fsin(void)
1270 CPU86_LDouble fptemp;
1273 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
1277 env->fpus &= (~0x400); /* C2 <-- 0 */
1278 /* the above code is for |arg| < 2**53 only */
1282 void helper_fcos(void)
1284 CPU86_LDouble fptemp;
1287 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
1291 env->fpus &= (~0x400); /* C2 <-- 0 */
1292 /* the above code is for |arg5 < 2**63 only */
1296 void helper_fxam_ST0(void)
1298 CPU86_LDoubleU temp;
1303 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
1305 env->fpus |= 0x200; /* C1 <-- 1 */
1307 expdif = EXPD(temp);
1308 if (expdif == MAXEXPD) {
1309 if (MANTD(temp) == 0)
1310 env->fpus |= 0x500 /*Infinity*/;
1312 env->fpus |= 0x100 /*NaN*/;
1313 } else if (expdif == 0) {
1314 if (MANTD(temp) == 0)
1315 env->fpus |= 0x4000 /*Zero*/;
1317 env->fpus |= 0x4400 /*Denormal*/;
1323 void helper_fstenv(uint8_t *ptr, int data32)
1325 int fpus, fptag, exp, i;
1329 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
1331 for (i=7; i>=0; i--) {
1333 if (env->fptags[i]) {
1336 tmp.d = env->fpregs[i];
1339 if (exp == 0 && mant == 0) {
1342 } else if (exp == 0 || exp == MAXEXPD
1343 #ifdef USE_X86LDOUBLE
1344 || (mant & (1LL << 63)) == 0
1347 /* NaNs, infinity, denormal */
1354 stl(ptr, env->fpuc);
1356 stl(ptr + 8, fptag);
1363 stw(ptr, env->fpuc);
1365 stw(ptr + 4, fptag);
1373 void helper_fldenv(uint8_t *ptr, int data32)
1378 env->fpuc = lduw(ptr);
1379 fpus = lduw(ptr + 4);
1380 fptag = lduw(ptr + 8);
1383 env->fpuc = lduw(ptr);
1384 fpus = lduw(ptr + 2);
1385 fptag = lduw(ptr + 4);
1387 env->fpstt = (fpus >> 11) & 7;
1388 env->fpus = fpus & ~0x3800;
1389 for(i = 0;i < 7; i++) {
1390 env->fptags[i] = ((fptag & 3) == 3);
1395 void helper_fsave(uint8_t *ptr, int data32)
1400 helper_fstenv(ptr, data32);
1402 ptr += (14 << data32);
1403 for(i = 0;i < 8; i++) {
1405 #ifdef USE_X86LDOUBLE
1406 *(long double *)ptr = tmp;
1408 helper_fstt(tmp, ptr);
1427 void helper_frstor(uint8_t *ptr, int data32)
1432 helper_fldenv(ptr, data32);
1433 ptr += (14 << data32);
1435 for(i = 0;i < 8; i++) {
1436 #ifdef USE_X86LDOUBLE
1437 tmp = *(long double *)ptr;
1439 tmp = helper_fldt(ptr);