4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include "exec-i386.h"
22 const uint8_t parity_table[256] = {
23 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
24 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
25 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
26 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
27 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
28 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
29 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
30 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
31 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
32 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
33 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
34 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
35 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
36 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
37 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
38 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
39 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
40 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
41 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
42 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
43 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
44 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
45 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
46 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
47 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
48 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
49 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
50 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
51 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
52 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
53 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
54 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
58 const uint8_t rclw_table[32] = {
59 0, 1, 2, 3, 4, 5, 6, 7,
60 8, 9,10,11,12,13,14,15,
61 16, 0, 1, 2, 3, 4, 5, 6,
62 7, 8, 9,10,11,12,13,14,
66 const uint8_t rclb_table[32] = {
67 0, 1, 2, 3, 4, 5, 6, 7,
68 8, 0, 1, 2, 3, 4, 5, 6,
69 7, 8, 0, 1, 2, 3, 4, 5,
70 6, 7, 8, 0, 1, 2, 3, 4,
73 const CPU86_LDouble f15rk[7] =
75 0.00000000000000000000L,
76 1.00000000000000000000L,
77 3.14159265358979323851L, /*pi*/
78 0.30102999566398119523L, /*lg2*/
79 0.69314718055994530943L, /*ln2*/
80 1.44269504088896340739L, /*l2e*/
81 3.32192809488736234781L, /*l2t*/
86 spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
90 spin_lock(&global_cpu_lock);
95 spin_unlock(&global_cpu_lock);
98 void cpu_loop_exit(void)
100 /* NOTE: the register at this point must be saved by hand because
101 longjmp restore them */
103 env->regs[R_EAX] = EAX;
106 env->regs[R_ECX] = ECX;
109 env->regs[R_EDX] = EDX;
112 env->regs[R_EBX] = EBX;
115 env->regs[R_ESP] = ESP;
118 env->regs[R_EBP] = EBP;
121 env->regs[R_ESI] = ESI;
124 env->regs[R_EDI] = EDI;
126 longjmp(env->jmp_env, 1);
129 static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
130 uint32_t *esp_ptr, int dpl)
132 int type, index, shift;
137 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
138 for(i=0;i<env->tr.limit;i++) {
139 printf("%02x ", env->tr.base[i]);
140 if ((i & 7) == 7) printf("\n");
146 if (!(env->tr.flags & DESC_P_MASK))
147 cpu_abort(env, "invalid tss");
148 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
150 cpu_abort(env, "invalid tss type");
152 index = (dpl * 4 + 2) << shift;
153 if (index + (4 << shift) - 1 > env->tr.limit)
154 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
156 *esp_ptr = lduw(env->tr.base + index);
157 *ss_ptr = lduw(env->tr.base + index + 2);
159 *esp_ptr = ldl(env->tr.base + index);
160 *ss_ptr = lduw(env->tr.base + index + 4);
164 /* return non zero if error */
165 static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
176 index = selector & ~7;
177 if ((index + 7) > dt->limit)
179 ptr = dt->base + index;
181 *e2_ptr = ldl(ptr + 4);
186 /* protected mode interrupt */
187 static void do_interrupt_protected(int intno, int is_int, int error_code,
188 unsigned int next_eip)
192 int type, dpl, cpl, selector, ss_dpl;
193 int has_error_code, new_stack, shift;
194 uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2, push_size;
195 uint32_t old_cs, old_ss, old_esp, old_eip;
198 if (intno * 8 + 7 > dt->limit)
199 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
200 ptr = dt->base + intno * 8;
203 /* check gate type */
204 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
206 case 5: /* task gate */
207 cpu_abort(env, "task gate not supported");
209 case 6: /* 286 interrupt gate */
210 case 7: /* 286 trap gate */
211 case 14: /* 386 interrupt gate */
212 case 15: /* 386 trap gate */
215 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
218 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
219 if (env->eflags & VM_MASK)
222 cpl = env->segs[R_CS].selector & 3;
223 /* check privledge if software int */
224 if (is_int && dpl < cpl)
225 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
226 /* check valid bit */
227 if (!(e2 & DESC_P_MASK))
228 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
230 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
231 if ((selector & 0xfffc) == 0)
232 raise_exception_err(EXCP0D_GPF, 0);
234 if (load_segment(&e1, &e2, selector) != 0)
235 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
236 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
237 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
238 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
240 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
241 if (!(e2 & DESC_P_MASK))
242 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
243 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
244 /* to inner priviledge */
245 get_ss_esp_from_tss(&ss, &esp, dpl);
246 if ((ss & 0xfffc) == 0)
247 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
249 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
250 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
251 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
252 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
254 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
255 if (!(ss_e2 & DESC_S_MASK) ||
256 (ss_e2 & DESC_CS_MASK) ||
257 !(ss_e2 & DESC_W_MASK))
258 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
259 if (!(ss_e2 & DESC_P_MASK))
260 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
262 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
263 /* to same priviledge */
266 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
267 new_stack = 0; /* avoid warning */
285 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
286 if (env->eflags & VM_MASK)
290 /* XXX: check that enough room is available */
292 old_esp = env->regs[R_ESP];
293 old_ss = env->segs[R_SS].selector;
294 load_seg(R_SS, ss, env->eip);
298 esp = env->regs[R_ESP];
304 old_cs = env->segs[R_CS].selector;
305 load_seg(R_CS, selector, env->eip);
307 env->regs[R_ESP] = esp - push_size;
308 ssp = env->segs[R_SS].base + esp;
311 if (env->eflags & VM_MASK) {
313 stl(ssp, env->segs[R_GS].selector);
315 stl(ssp, env->segs[R_FS].selector);
317 stl(ssp, env->segs[R_DS].selector);
319 stl(ssp, env->segs[R_ES].selector);
328 old_eflags = compute_eflags();
329 stl(ssp, old_eflags);
334 if (has_error_code) {
336 stl(ssp, error_code);
346 stw(ssp, compute_eflags());
351 if (has_error_code) {
353 stw(ssp, error_code);
357 /* interrupt gate clear IF mask */
358 if ((type & 1) == 0) {
359 env->eflags &= ~IF_MASK;
361 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
364 /* real mode interrupt */
365 static void do_interrupt_real(int intno, int is_int, int error_code,
366 unsigned int next_eip)
371 uint32_t offset, esp;
372 uint32_t old_cs, old_eip;
374 /* real mode (simpler !) */
376 if (intno * 4 + 3 > dt->limit)
377 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
378 ptr = dt->base + intno * 4;
380 selector = lduw(ptr + 2);
381 esp = env->regs[R_ESP] & 0xffff;
382 ssp = env->segs[R_SS].base + esp;
387 old_cs = env->segs[R_CS].selector;
389 stw(ssp, compute_eflags());
396 /* update processor state */
397 env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | (esp & 0xffff);
399 env->segs[R_CS].selector = selector;
400 env->segs[R_CS].base = (uint8_t *)(selector << 4);
401 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
404 /* fake user mode interrupt */
405 void do_interrupt_user(int intno, int is_int, int error_code,
406 unsigned int next_eip)
414 ptr = dt->base + (intno * 8);
417 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
419 /* check privledge if software int */
420 if (is_int && dpl < cpl)
421 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
423 /* Since we emulate only user space, we cannot do more than
424 exiting the emulation with the suitable exception and error
431 * Begin excution of an interruption. is_int is TRUE if coming from
432 * the int instruction. next_eip is the EIP value AFTER the interrupt
433 * instruction. It is only relevant if is_int is TRUE.
435 void do_interrupt(int intno, int is_int, int error_code,
436 unsigned int next_eip)
438 if (env->cr[0] & CR0_PE_MASK) {
439 do_interrupt_protected(intno, is_int, error_code, next_eip);
441 do_interrupt_real(intno, is_int, error_code, next_eip);
446 * Signal an interruption. It is executed in the main CPU loop.
447 * is_int is TRUE if coming from the int instruction. next_eip is the
448 * EIP value AFTER the interrupt instruction. It is only relevant if
451 void raise_interrupt(int intno, int is_int, int error_code,
452 unsigned int next_eip)
454 env->exception_index = intno;
455 env->error_code = error_code;
456 env->exception_is_int = is_int;
457 env->exception_next_eip = next_eip;
461 /* shortcuts to generate exceptions */
462 void raise_exception_err(int exception_index, int error_code)
464 raise_interrupt(exception_index, 0, error_code, 0);
467 void raise_exception(int exception_index)
469 raise_interrupt(exception_index, 0, 0, 0);
472 #ifdef BUGGY_GCC_DIV64
473 /* gcc 2.95.4 on PowerPC does not seem to like using __udivdi3, so we
474 call it from another function */
475 uint32_t div64(uint32_t *q_ptr, uint64_t num, uint32_t den)
481 int32_t idiv64(int32_t *q_ptr, int64_t num, int32_t den)
488 void helper_divl_EAX_T0(uint32_t eip)
490 unsigned int den, q, r;
493 num = EAX | ((uint64_t)EDX << 32);
497 raise_exception(EXCP00_DIVZ);
499 #ifdef BUGGY_GCC_DIV64
500 r = div64(&q, num, den);
509 void helper_idivl_EAX_T0(uint32_t eip)
514 num = EAX | ((uint64_t)EDX << 32);
518 raise_exception(EXCP00_DIVZ);
520 #ifdef BUGGY_GCC_DIV64
521 r = idiv64(&q, num, den);
530 void helper_cmpxchg8b(void)
535 eflags = cc_table[CC_OP].compute_all();
536 d = ldq((uint8_t *)A0);
537 if (d == (((uint64_t)EDX << 32) | EAX)) {
538 stq((uint8_t *)A0, ((uint64_t)ECX << 32) | EBX);
548 /* We simulate a pre-MMX pentium as in valgrind */
549 #define CPUID_FP87 (1 << 0)
550 #define CPUID_VME (1 << 1)
551 #define CPUID_DE (1 << 2)
552 #define CPUID_PSE (1 << 3)
553 #define CPUID_TSC (1 << 4)
554 #define CPUID_MSR (1 << 5)
555 #define CPUID_PAE (1 << 6)
556 #define CPUID_MCE (1 << 7)
557 #define CPUID_CX8 (1 << 8)
558 #define CPUID_APIC (1 << 9)
559 #define CPUID_SEP (1 << 11) /* sysenter/sysexit */
560 #define CPUID_MTRR (1 << 12)
561 #define CPUID_PGE (1 << 13)
562 #define CPUID_MCA (1 << 14)
563 #define CPUID_CMOV (1 << 15)
565 #define CPUID_MMX (1 << 23)
566 #define CPUID_FXSR (1 << 24)
567 #define CPUID_SSE (1 << 25)
568 #define CPUID_SSE2 (1 << 26)
570 void helper_cpuid(void)
573 EAX = 1; /* max EAX index supported */
577 } else if (EAX == 1) {
578 int family, model, stepping;
591 EAX = (family << 8) | (model << 4) | stepping;
594 EDX = CPUID_FP87 | CPUID_DE | CPUID_PSE |
595 CPUID_TSC | CPUID_MSR | CPUID_MCE |
596 CPUID_CX8 | CPUID_PGE | CPUID_CMOV;
600 static inline void load_seg_cache(SegmentCache *sc, uint32_t e1, uint32_t e2)
602 sc->base = (void *)((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
603 sc->limit = (e1 & 0xffff) | (e2 & 0x000f0000);
604 if (e2 & DESC_G_MASK)
605 sc->limit = (sc->limit << 12) | 0xfff;
609 void helper_lldt_T0(void)
617 selector = T0 & 0xffff;
618 if ((selector & 0xfffc) == 0) {
619 /* XXX: NULL selector case: invalid LDT */
620 env->ldt.base = NULL;
624 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
626 index = selector & ~7;
627 if ((index + 7) > dt->limit)
628 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
629 ptr = dt->base + index;
632 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
633 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
634 if (!(e2 & DESC_P_MASK))
635 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
636 load_seg_cache(&env->ldt, e1, e2);
638 env->ldt.selector = selector;
641 void helper_ltr_T0(void)
649 selector = T0 & 0xffff;
650 if ((selector & 0xfffc) == 0) {
651 /* NULL selector case: invalid LDT */
657 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
659 index = selector & ~7;
660 if ((index + 7) > dt->limit)
661 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
662 ptr = dt->base + index;
665 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
666 if ((e2 & DESC_S_MASK) ||
667 (type != 2 && type != 9))
668 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
669 if (!(e2 & DESC_P_MASK))
670 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
671 load_seg_cache(&env->tr, e1, e2);
672 e2 |= 0x00000200; /* set the busy bit */
675 env->tr.selector = selector;
678 /* only works if protected mode and not VM86 */
679 void load_seg(int seg_reg, int selector, unsigned int cur_eip)
684 sc = &env->segs[seg_reg];
685 if ((selector & 0xfffc) == 0) {
686 /* null selector case */
687 if (seg_reg == R_SS) {
689 raise_exception_err(EXCP0D_GPF, 0);
691 /* XXX: each access should trigger an exception */
697 if (load_segment(&e1, &e2, selector) != 0) {
699 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
701 if (!(e2 & DESC_S_MASK) ||
702 (e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) {
704 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
707 if (seg_reg == R_SS) {
708 if ((e2 & (DESC_CS_MASK | DESC_W_MASK)) == 0) {
710 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
713 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) {
715 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
719 if (!(e2 & DESC_P_MASK)) {
722 raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
724 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
726 load_seg_cache(sc, e1, e2);
728 fprintf(logfile, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
729 selector, (unsigned long)sc->base, sc->limit, sc->flags);
732 sc->selector = selector;
735 /* protected mode jump */
736 void jmp_seg(int selector, unsigned int new_eip)
739 uint32_t e1, e2, cpl, dpl, rpl;
741 if ((selector & 0xfffc) == 0) {
742 raise_exception_err(EXCP0D_GPF, 0);
745 if (load_segment(&e1, &e2, selector) != 0)
746 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
747 cpl = env->segs[R_CS].selector & 3;
748 if (e2 & DESC_S_MASK) {
749 if (!(e2 & DESC_CS_MASK))
750 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
751 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
752 if (e2 & DESC_CS_MASK) {
753 /* conforming code segment */
755 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
757 /* non conforming code segment */
760 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
762 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
764 if (!(e2 & DESC_P_MASK))
765 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
766 load_seg_cache(&sc1, e1, e2);
767 if (new_eip > sc1.limit)
768 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
769 env->segs[R_CS].base = sc1.base;
770 env->segs[R_CS].limit = sc1.limit;
771 env->segs[R_CS].flags = sc1.flags;
772 env->segs[R_CS].selector = (selector & 0xfffc) | cpl;
775 cpu_abort(env, "jmp to call/task gate not supported 0x%04x:0x%08x",
780 /* init the segment cache in vm86 mode */
781 static inline void load_seg_vm(int seg, int selector)
783 SegmentCache *sc = &env->segs[seg];
785 sc->base = (uint8_t *)(selector << 4);
786 sc->selector = selector;
792 void helper_iret_real(int shift)
794 uint32_t sp, new_cs, new_eip, new_eflags, new_esp;
798 sp = env->regs[R_ESP] & 0xffff;
799 ssp = env->segs[R_SS].base + sp;
802 new_eflags = ldl(ssp + 8);
803 new_cs = ldl(ssp + 4) & 0xffff;
804 new_eip = ldl(ssp) & 0xffff;
807 new_eflags = lduw(ssp + 4);
808 new_cs = lduw(ssp + 2);
811 new_esp = sp + (6 << shift);
812 env->regs[R_ESP] = (env->regs[R_ESP] & 0xffff0000) |
814 load_seg_vm(R_CS, new_cs);
816 eflags_mask = FL_UPDATE_CPL0_MASK;
818 eflags_mask &= 0xffff;
819 load_eflags(new_eflags, eflags_mask);
822 /* protected mode iret */
823 void helper_iret_protected(int shift)
825 uint32_t sp, new_cs, new_eip, new_eflags, new_esp, new_ss;
826 uint32_t new_es, new_ds, new_fs, new_gs;
828 int cpl, dpl, rpl, eflags_mask;
831 sp = env->regs[R_ESP];
832 if (!(env->segs[R_SS].flags & DESC_B_MASK))
834 ssp = env->segs[R_SS].base + sp;
837 new_eflags = ldl(ssp + 8);
838 new_cs = ldl(ssp + 4) & 0xffff;
840 if (new_eflags & VM_MASK)
844 new_eflags = lduw(ssp + 4);
845 new_cs = lduw(ssp + 2);
848 if ((new_cs & 0xfffc) == 0)
849 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
850 if (load_segment(&e1, &e2, new_cs) != 0)
851 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
852 if (!(e2 & DESC_S_MASK) ||
853 !(e2 & DESC_CS_MASK))
854 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
855 cpl = env->segs[R_CS].selector & 3;
858 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
859 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
860 if (e2 & DESC_CS_MASK) {
862 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
865 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
867 if (!(e2 & DESC_P_MASK))
868 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
871 /* return to same priledge level */
872 load_seg(R_CS, new_cs, env->eip);
873 new_esp = sp + (6 << shift);
875 /* return to differentr priviledge level */
878 new_esp = ldl(ssp + 12);
879 new_ss = ldl(ssp + 16) & 0xffff;
882 new_esp = lduw(ssp + 6);
883 new_ss = lduw(ssp + 8);
886 if ((new_ss & 3) != rpl)
887 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
888 if (load_segment(&e1, &e2, new_ss) != 0)
889 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
890 if (!(e2 & DESC_S_MASK) ||
891 (e2 & DESC_CS_MASK) ||
893 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
894 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
896 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
897 if (!(e2 & DESC_P_MASK))
898 raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
900 load_seg(R_CS, new_cs, env->eip);
901 load_seg(R_SS, new_ss, env->eip);
903 if (env->segs[R_SS].flags & DESC_B_MASK)
904 env->regs[R_ESP] = new_esp;
906 env->regs[R_ESP] = (env->regs[R_ESP] & 0xffff0000) |
910 eflags_mask = FL_UPDATE_CPL0_MASK;
912 eflags_mask = FL_UPDATE_MASK32;
914 eflags_mask &= 0xffff;
915 load_eflags(new_eflags, eflags_mask);
919 new_esp = ldl(ssp + 12);
920 new_ss = ldl(ssp + 16);
921 new_es = ldl(ssp + 20);
922 new_ds = ldl(ssp + 24);
923 new_fs = ldl(ssp + 28);
924 new_gs = ldl(ssp + 32);
926 /* modify processor state */
927 load_eflags(new_eflags, FL_UPDATE_CPL0_MASK | VM_MASK | VIF_MASK | VIP_MASK);
928 load_seg_vm(R_CS, new_cs);
929 load_seg_vm(R_SS, new_ss);
930 load_seg_vm(R_ES, new_es);
931 load_seg_vm(R_DS, new_ds);
932 load_seg_vm(R_FS, new_fs);
933 load_seg_vm(R_GS, new_gs);
936 env->regs[R_ESP] = new_esp;
939 void helper_movl_crN_T0(int reg)
944 cpu_x86_update_cr0(env);
947 cpu_x86_update_cr3(env);
953 void helper_movl_drN_T0(int reg)
958 void helper_invlpg(unsigned int addr)
960 cpu_x86_flush_tlb(env, addr);
968 void helper_rdtsc(void)
972 asm("rdtsc" : "=A" (val));
974 /* better than nothing: the time increases */
981 void helper_wrmsr(void)
984 case MSR_IA32_SYSENTER_CS:
985 env->sysenter_cs = EAX & 0xffff;
987 case MSR_IA32_SYSENTER_ESP:
988 env->sysenter_esp = EAX;
990 case MSR_IA32_SYSENTER_EIP:
991 env->sysenter_eip = EAX;
994 /* XXX: exception ? */
999 void helper_rdmsr(void)
1002 case MSR_IA32_SYSENTER_CS:
1003 EAX = env->sysenter_cs;
1006 case MSR_IA32_SYSENTER_ESP:
1007 EAX = env->sysenter_esp;
1010 case MSR_IA32_SYSENTER_EIP:
1011 EAX = env->sysenter_eip;
1015 /* XXX: exception ? */
1020 void helper_lsl(void)
1022 unsigned int selector, limit;
1025 CC_SRC = cc_table[CC_OP].compute_all() & ~CC_Z;
1026 selector = T0 & 0xffff;
1027 if (load_segment(&e1, &e2, selector) != 0)
1029 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
1031 limit = (limit << 12) | 0xfff;
1036 void helper_lar(void)
1038 unsigned int selector;
1041 CC_SRC = cc_table[CC_OP].compute_all() & ~CC_Z;
1042 selector = T0 & 0xffff;
1043 if (load_segment(&e1, &e2, selector) != 0)
1045 T1 = e2 & 0x00f0ff00;
1051 #ifndef USE_X86LDOUBLE
1052 void helper_fldt_ST0_A0(void)
1055 new_fpstt = (env->fpstt - 1) & 7;
1056 env->fpregs[new_fpstt] = helper_fldt((uint8_t *)A0);
1057 env->fpstt = new_fpstt;
1058 env->fptags[new_fpstt] = 0; /* validate stack entry */
1061 void helper_fstt_ST0_A0(void)
1063 helper_fstt(ST0, (uint8_t *)A0);
1069 #define MUL10(iv) ( iv + iv + (iv << 3) )
1071 void helper_fbld_ST0_A0(void)
1079 for(i = 8; i >= 0; i--) {
1080 v = ldub((uint8_t *)A0 + i);
1081 val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
1084 if (ldub((uint8_t *)A0 + 9) & 0x80)
1090 void helper_fbst_ST0_A0(void)
1094 uint8_t *mem_ref, *mem_end;
1099 mem_ref = (uint8_t *)A0;
1100 mem_end = mem_ref + 9;
1107 while (mem_ref < mem_end) {
1112 v = ((v / 10) << 4) | (v % 10);
1115 while (mem_ref < mem_end) {
1120 void helper_f2xm1(void)
1122 ST0 = pow(2.0,ST0) - 1.0;
1125 void helper_fyl2x(void)
1127 CPU86_LDouble fptemp;
1131 fptemp = log(fptemp)/log(2.0); /* log2(ST) */
1135 env->fpus &= (~0x4700);
1140 void helper_fptan(void)
1142 CPU86_LDouble fptemp;
1145 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
1151 env->fpus &= (~0x400); /* C2 <-- 0 */
1152 /* the above code is for |arg| < 2**52 only */
1156 void helper_fpatan(void)
1158 CPU86_LDouble fptemp, fpsrcop;
1162 ST1 = atan2(fpsrcop,fptemp);
1166 void helper_fxtract(void)
1168 CPU86_LDoubleU temp;
1169 unsigned int expdif;
1172 expdif = EXPD(temp) - EXPBIAS;
1173 /*DP exponent bias*/
1180 void helper_fprem1(void)
1182 CPU86_LDouble dblq, fpsrcop, fptemp;
1183 CPU86_LDoubleU fpsrcop1, fptemp1;
1189 fpsrcop1.d = fpsrcop;
1191 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
1193 dblq = fpsrcop / fptemp;
1194 dblq = (dblq < 0.0)? ceil(dblq): floor(dblq);
1195 ST0 = fpsrcop - fptemp*dblq;
1196 q = (int)dblq; /* cutting off top bits is assumed here */
1197 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
1198 /* (C0,C1,C3) <-- (q2,q1,q0) */
1199 env->fpus |= (q&0x4) << 6; /* (C0) <-- q2 */
1200 env->fpus |= (q&0x2) << 8; /* (C1) <-- q1 */
1201 env->fpus |= (q&0x1) << 14; /* (C3) <-- q0 */
1203 env->fpus |= 0x400; /* C2 <-- 1 */
1204 fptemp = pow(2.0, expdif-50);
1205 fpsrcop = (ST0 / ST1) / fptemp;
1206 /* fpsrcop = integer obtained by rounding to the nearest */
1207 fpsrcop = (fpsrcop-floor(fpsrcop) < ceil(fpsrcop)-fpsrcop)?
1208 floor(fpsrcop): ceil(fpsrcop);
1209 ST0 -= (ST1 * fpsrcop * fptemp);
1213 void helper_fprem(void)
1215 CPU86_LDouble dblq, fpsrcop, fptemp;
1216 CPU86_LDoubleU fpsrcop1, fptemp1;
1222 fpsrcop1.d = fpsrcop;
1224 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
1225 if ( expdif < 53 ) {
1226 dblq = fpsrcop / fptemp;
1227 dblq = (dblq < 0.0)? ceil(dblq): floor(dblq);
1228 ST0 = fpsrcop - fptemp*dblq;
1229 q = (int)dblq; /* cutting off top bits is assumed here */
1230 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
1231 /* (C0,C1,C3) <-- (q2,q1,q0) */
1232 env->fpus |= (q&0x4) << 6; /* (C0) <-- q2 */
1233 env->fpus |= (q&0x2) << 8; /* (C1) <-- q1 */
1234 env->fpus |= (q&0x1) << 14; /* (C3) <-- q0 */
1236 env->fpus |= 0x400; /* C2 <-- 1 */
1237 fptemp = pow(2.0, expdif-50);
1238 fpsrcop = (ST0 / ST1) / fptemp;
1239 /* fpsrcop = integer obtained by chopping */
1240 fpsrcop = (fpsrcop < 0.0)?
1241 -(floor(fabs(fpsrcop))): floor(fpsrcop);
1242 ST0 -= (ST1 * fpsrcop * fptemp);
1246 void helper_fyl2xp1(void)
1248 CPU86_LDouble fptemp;
1251 if ((fptemp+1.0)>0.0) {
1252 fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
1256 env->fpus &= (~0x4700);
1261 void helper_fsqrt(void)
1263 CPU86_LDouble fptemp;
1267 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
1273 void helper_fsincos(void)
1275 CPU86_LDouble fptemp;
1278 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
1284 env->fpus &= (~0x400); /* C2 <-- 0 */
1285 /* the above code is for |arg| < 2**63 only */
1289 void helper_frndint(void)
1295 switch(env->fpuc & RC_MASK) {
1298 asm("rndd %0, %1" : "=f" (a) : "f"(a));
1301 asm("rnddm %0, %1" : "=f" (a) : "f"(a));
1304 asm("rnddp %0, %1" : "=f" (a) : "f"(a));
1307 asm("rnddz %0, %1" : "=f" (a) : "f"(a));
1316 void helper_fscale(void)
1318 CPU86_LDouble fpsrcop, fptemp;
1321 fptemp = pow(fpsrcop,ST1);
1325 void helper_fsin(void)
1327 CPU86_LDouble fptemp;
1330 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
1334 env->fpus &= (~0x400); /* C2 <-- 0 */
1335 /* the above code is for |arg| < 2**53 only */
1339 void helper_fcos(void)
1341 CPU86_LDouble fptemp;
1344 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
1348 env->fpus &= (~0x400); /* C2 <-- 0 */
1349 /* the above code is for |arg5 < 2**63 only */
1353 void helper_fxam_ST0(void)
1355 CPU86_LDoubleU temp;
1360 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
1362 env->fpus |= 0x200; /* C1 <-- 1 */
1364 expdif = EXPD(temp);
1365 if (expdif == MAXEXPD) {
1366 if (MANTD(temp) == 0)
1367 env->fpus |= 0x500 /*Infinity*/;
1369 env->fpus |= 0x100 /*NaN*/;
1370 } else if (expdif == 0) {
1371 if (MANTD(temp) == 0)
1372 env->fpus |= 0x4000 /*Zero*/;
1374 env->fpus |= 0x4400 /*Denormal*/;
1380 void helper_fstenv(uint8_t *ptr, int data32)
1382 int fpus, fptag, exp, i;
1386 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
1388 for (i=7; i>=0; i--) {
1390 if (env->fptags[i]) {
1393 tmp.d = env->fpregs[i];
1396 if (exp == 0 && mant == 0) {
1399 } else if (exp == 0 || exp == MAXEXPD
1400 #ifdef USE_X86LDOUBLE
1401 || (mant & (1LL << 63)) == 0
1404 /* NaNs, infinity, denormal */
1411 stl(ptr, env->fpuc);
1413 stl(ptr + 8, fptag);
1420 stw(ptr, env->fpuc);
1422 stw(ptr + 4, fptag);
1430 void helper_fldenv(uint8_t *ptr, int data32)
1435 env->fpuc = lduw(ptr);
1436 fpus = lduw(ptr + 4);
1437 fptag = lduw(ptr + 8);
1440 env->fpuc = lduw(ptr);
1441 fpus = lduw(ptr + 2);
1442 fptag = lduw(ptr + 4);
1444 env->fpstt = (fpus >> 11) & 7;
1445 env->fpus = fpus & ~0x3800;
1446 for(i = 0;i < 7; i++) {
1447 env->fptags[i] = ((fptag & 3) == 3);
1452 void helper_fsave(uint8_t *ptr, int data32)
1457 helper_fstenv(ptr, data32);
1459 ptr += (14 << data32);
1460 for(i = 0;i < 8; i++) {
1462 #ifdef USE_X86LDOUBLE
1463 *(long double *)ptr = tmp;
1465 helper_fstt(tmp, ptr);
1484 void helper_frstor(uint8_t *ptr, int data32)
1489 helper_fldenv(ptr, data32);
1490 ptr += (14 << data32);
1492 for(i = 0;i < 8; i++) {
1493 #ifdef USE_X86LDOUBLE
1494 tmp = *(long double *)ptr;
1496 tmp = helper_fldt(ptr);