4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 const uint8_t parity_table[256] = {
23 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
24 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
25 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
26 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
27 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
28 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
29 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
30 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
31 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
32 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
33 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
34 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
35 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
36 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
37 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
38 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
39 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
40 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
41 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
42 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
43 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
44 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
45 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
46 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
47 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
48 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
49 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
50 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
51 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
52 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
53 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
54 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
58 const uint8_t rclw_table[32] = {
59 0, 1, 2, 3, 4, 5, 6, 7,
60 8, 9,10,11,12,13,14,15,
61 16, 0, 1, 2, 3, 4, 5, 6,
62 7, 8, 9,10,11,12,13,14,
66 const uint8_t rclb_table[32] = {
67 0, 1, 2, 3, 4, 5, 6, 7,
68 8, 0, 1, 2, 3, 4, 5, 6,
69 7, 8, 0, 1, 2, 3, 4, 5,
70 6, 7, 8, 0, 1, 2, 3, 4,
73 const CPU86_LDouble f15rk[7] =
75 0.00000000000000000000L,
76 1.00000000000000000000L,
77 3.14159265358979323851L, /*pi*/
78 0.30102999566398119523L, /*lg2*/
79 0.69314718055994530943L, /*ln2*/
80 1.44269504088896340739L, /*l2e*/
81 3.32192809488736234781L, /*l2t*/
86 spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
90 spin_lock(&global_cpu_lock);
95 spin_unlock(&global_cpu_lock);
98 void cpu_loop_exit(void)
100 /* NOTE: the register at this point must be saved by hand because
101 longjmp restore them */
103 env->regs[R_EAX] = EAX;
106 env->regs[R_ECX] = ECX;
109 env->regs[R_EDX] = EDX;
112 env->regs[R_EBX] = EBX;
115 env->regs[R_ESP] = ESP;
118 env->regs[R_EBP] = EBP;
121 env->regs[R_ESI] = ESI;
124 env->regs[R_EDI] = EDI;
126 longjmp(env->jmp_env, 1);
129 static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
130 uint32_t *esp_ptr, int dpl)
132 int type, index, shift;
137 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
138 for(i=0;i<env->tr.limit;i++) {
139 printf("%02x ", env->tr.base[i]);
140 if ((i & 7) == 7) printf("\n");
146 if (!(env->tr.flags & DESC_P_MASK))
147 cpu_abort(env, "invalid tss");
148 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
150 cpu_abort(env, "invalid tss type");
152 index = (dpl * 4 + 2) << shift;
153 if (index + (4 << shift) - 1 > env->tr.limit)
154 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
156 *esp_ptr = lduw_kernel(env->tr.base + index);
157 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
159 *esp_ptr = ldl_kernel(env->tr.base + index);
160 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
164 /* return non zero if error */
165 static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
176 index = selector & ~7;
177 if ((index + 7) > dt->limit)
179 ptr = dt->base + index;
180 *e1_ptr = ldl_kernel(ptr);
181 *e2_ptr = ldl_kernel(ptr + 4);
185 static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
188 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
189 if (e2 & DESC_G_MASK)
190 limit = (limit << 12) | 0xfff;
194 static inline uint8_t *get_seg_base(uint32_t e1, uint32_t e2)
196 return (uint8_t *)((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
199 static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
201 sc->base = get_seg_base(e1, e2);
202 sc->limit = get_seg_limit(e1, e2);
206 /* init the segment cache in vm86 mode. */
207 static inline void load_seg_vm(int seg, int selector)
210 cpu_x86_load_seg_cache(env, seg, selector,
211 (uint8_t *)(selector << 4), 0xffff, 0);
214 /* protected mode interrupt */
215 static void do_interrupt_protected(int intno, int is_int, int error_code,
216 unsigned int next_eip, int is_hw)
220 int type, dpl, selector, ss_dpl, cpl;
221 int has_error_code, new_stack, shift;
222 uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2, push_size;
223 uint32_t old_cs, old_ss, old_esp, old_eip;
226 if (intno * 8 + 7 > dt->limit)
227 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
228 ptr = dt->base + intno * 8;
229 e1 = ldl_kernel(ptr);
230 e2 = ldl_kernel(ptr + 4);
231 /* check gate type */
232 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
234 case 5: /* task gate */
235 cpu_abort(env, "task gate not supported");
237 case 6: /* 286 interrupt gate */
238 case 7: /* 286 trap gate */
239 case 14: /* 386 interrupt gate */
240 case 15: /* 386 trap gate */
243 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
246 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
247 cpl = env->hflags & HF_CPL_MASK;
248 /* check privledge if software int */
249 if (is_int && dpl < cpl)
250 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
251 /* check valid bit */
252 if (!(e2 & DESC_P_MASK))
253 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
255 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
256 if ((selector & 0xfffc) == 0)
257 raise_exception_err(EXCP0D_GPF, 0);
259 if (load_segment(&e1, &e2, selector) != 0)
260 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
261 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
262 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
263 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
265 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
266 if (!(e2 & DESC_P_MASK))
267 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
268 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
269 /* to inner priviledge */
270 get_ss_esp_from_tss(&ss, &esp, dpl);
271 if ((ss & 0xfffc) == 0)
272 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
274 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
275 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
276 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
277 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
279 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
280 if (!(ss_e2 & DESC_S_MASK) ||
281 (ss_e2 & DESC_CS_MASK) ||
282 !(ss_e2 & DESC_W_MASK))
283 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
284 if (!(ss_e2 & DESC_P_MASK))
285 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
287 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
288 /* to same priviledge */
291 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
292 new_stack = 0; /* avoid warning */
297 if (!is_int && !is_hw) {
310 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
311 if (env->eflags & VM_MASK)
315 /* XXX: check that enough room is available */
318 old_ss = env->segs[R_SS].selector;
319 ss = (ss & ~3) | dpl;
320 cpu_x86_load_seg_cache(env, R_SS, ss,
321 get_seg_base(ss_e1, ss_e2),
322 get_seg_limit(ss_e1, ss_e2),
333 old_cs = env->segs[R_CS].selector;
334 selector = (selector & ~3) | dpl;
335 cpu_x86_load_seg_cache(env, R_CS, selector,
336 get_seg_base(e1, e2),
337 get_seg_limit(e1, e2),
339 cpu_x86_set_cpl(env, dpl);
341 ESP = esp - push_size;
342 ssp = env->segs[R_SS].base + esp;
345 if (env->eflags & VM_MASK) {
347 stl_kernel(ssp, env->segs[R_GS].selector);
349 stl_kernel(ssp, env->segs[R_FS].selector);
351 stl_kernel(ssp, env->segs[R_DS].selector);
353 stl_kernel(ssp, env->segs[R_ES].selector);
357 stl_kernel(ssp, old_ss);
359 stl_kernel(ssp, old_esp);
362 old_eflags = compute_eflags();
363 stl_kernel(ssp, old_eflags);
365 stl_kernel(ssp, old_cs);
367 stl_kernel(ssp, old_eip);
368 if (has_error_code) {
370 stl_kernel(ssp, error_code);
375 stw_kernel(ssp, old_ss);
377 stw_kernel(ssp, old_esp);
380 stw_kernel(ssp, compute_eflags());
382 stw_kernel(ssp, old_cs);
384 stw_kernel(ssp, old_eip);
385 if (has_error_code) {
387 stw_kernel(ssp, error_code);
391 /* interrupt gate clear IF mask */
392 if ((type & 1) == 0) {
393 env->eflags &= ~IF_MASK;
395 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
398 /* real mode interrupt */
399 static void do_interrupt_real(int intno, int is_int, int error_code,
400 unsigned int next_eip)
405 uint32_t offset, esp;
406 uint32_t old_cs, old_eip;
408 /* real mode (simpler !) */
410 if (intno * 4 + 3 > dt->limit)
411 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
412 ptr = dt->base + intno * 4;
413 offset = lduw_kernel(ptr);
414 selector = lduw_kernel(ptr + 2);
416 ssp = env->segs[R_SS].base;
421 old_cs = env->segs[R_CS].selector;
423 stw_kernel(ssp + (esp & 0xffff), compute_eflags());
425 stw_kernel(ssp + (esp & 0xffff), old_cs);
427 stw_kernel(ssp + (esp & 0xffff), old_eip);
429 /* update processor state */
430 ESP = (ESP & ~0xffff) | (esp & 0xffff);
432 env->segs[R_CS].selector = selector;
433 env->segs[R_CS].base = (uint8_t *)(selector << 4);
434 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
437 /* fake user mode interrupt */
438 void do_interrupt_user(int intno, int is_int, int error_code,
439 unsigned int next_eip)
447 ptr = dt->base + (intno * 8);
448 e2 = ldl_kernel(ptr + 4);
450 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
451 cpl = env->hflags & HF_CPL_MASK;
452 /* check privledge if software int */
453 if (is_int && dpl < cpl)
454 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
456 /* Since we emulate only user space, we cannot do more than
457 exiting the emulation with the suitable exception and error
464 * Begin excution of an interruption. is_int is TRUE if coming from
465 * the int instruction. next_eip is the EIP value AFTER the interrupt
466 * instruction. It is only relevant if is_int is TRUE.
468 void do_interrupt(int intno, int is_int, int error_code,
469 unsigned int next_eip, int is_hw)
471 if (env->cr[0] & CR0_PE_MASK) {
472 do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
474 do_interrupt_real(intno, is_int, error_code, next_eip);
479 * Signal an interruption. It is executed in the main CPU loop.
480 * is_int is TRUE if coming from the int instruction. next_eip is the
481 * EIP value AFTER the interrupt instruction. It is only relevant if
484 void raise_interrupt(int intno, int is_int, int error_code,
485 unsigned int next_eip)
487 env->exception_index = intno;
488 env->error_code = error_code;
489 env->exception_is_int = is_int;
490 env->exception_next_eip = next_eip;
494 /* shortcuts to generate exceptions */
495 void raise_exception_err(int exception_index, int error_code)
497 raise_interrupt(exception_index, 0, error_code, 0);
500 void raise_exception(int exception_index)
502 raise_interrupt(exception_index, 0, 0, 0);
505 #ifdef BUGGY_GCC_DIV64
506 /* gcc 2.95.4 on PowerPC does not seem to like using __udivdi3, so we
507 call it from another function */
508 uint32_t div64(uint32_t *q_ptr, uint64_t num, uint32_t den)
514 int32_t idiv64(int32_t *q_ptr, int64_t num, int32_t den)
521 void helper_divl_EAX_T0(uint32_t eip)
523 unsigned int den, q, r;
526 num = EAX | ((uint64_t)EDX << 32);
530 raise_exception(EXCP00_DIVZ);
532 #ifdef BUGGY_GCC_DIV64
533 r = div64(&q, num, den);
542 void helper_idivl_EAX_T0(uint32_t eip)
547 num = EAX | ((uint64_t)EDX << 32);
551 raise_exception(EXCP00_DIVZ);
553 #ifdef BUGGY_GCC_DIV64
554 r = idiv64(&q, num, den);
563 void helper_cmpxchg8b(void)
568 eflags = cc_table[CC_OP].compute_all();
569 d = ldq((uint8_t *)A0);
570 if (d == (((uint64_t)EDX << 32) | EAX)) {
571 stq((uint8_t *)A0, ((uint64_t)ECX << 32) | EBX);
581 /* We simulate a pre-MMX pentium as in valgrind */
582 #define CPUID_FP87 (1 << 0)
583 #define CPUID_VME (1 << 1)
584 #define CPUID_DE (1 << 2)
585 #define CPUID_PSE (1 << 3)
586 #define CPUID_TSC (1 << 4)
587 #define CPUID_MSR (1 << 5)
588 #define CPUID_PAE (1 << 6)
589 #define CPUID_MCE (1 << 7)
590 #define CPUID_CX8 (1 << 8)
591 #define CPUID_APIC (1 << 9)
592 #define CPUID_SEP (1 << 11) /* sysenter/sysexit */
593 #define CPUID_MTRR (1 << 12)
594 #define CPUID_PGE (1 << 13)
595 #define CPUID_MCA (1 << 14)
596 #define CPUID_CMOV (1 << 15)
598 #define CPUID_MMX (1 << 23)
599 #define CPUID_FXSR (1 << 24)
600 #define CPUID_SSE (1 << 25)
601 #define CPUID_SSE2 (1 << 26)
603 void helper_cpuid(void)
606 EAX = 1; /* max EAX index supported */
610 } else if (EAX == 1) {
611 int family, model, stepping;
624 EAX = (family << 8) | (model << 4) | stepping;
627 EDX = CPUID_FP87 | CPUID_DE | CPUID_PSE |
628 CPUID_TSC | CPUID_MSR | CPUID_MCE |
629 CPUID_CX8 | CPUID_PGE | CPUID_CMOV;
633 void helper_lldt_T0(void)
641 selector = T0 & 0xffff;
642 if ((selector & 0xfffc) == 0) {
643 /* XXX: NULL selector case: invalid LDT */
644 env->ldt.base = NULL;
648 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
650 index = selector & ~7;
651 if ((index + 7) > dt->limit)
652 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
653 ptr = dt->base + index;
654 e1 = ldl_kernel(ptr);
655 e2 = ldl_kernel(ptr + 4);
656 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
657 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
658 if (!(e2 & DESC_P_MASK))
659 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
660 load_seg_cache_raw_dt(&env->ldt, e1, e2);
662 env->ldt.selector = selector;
665 void helper_ltr_T0(void)
673 selector = T0 & 0xffff;
674 if ((selector & 0xfffc) == 0) {
675 /* NULL selector case: invalid LDT */
681 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
683 index = selector & ~7;
684 if ((index + 7) > dt->limit)
685 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
686 ptr = dt->base + index;
687 e1 = ldl_kernel(ptr);
688 e2 = ldl_kernel(ptr + 4);
689 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
690 if ((e2 & DESC_S_MASK) ||
691 (type != 2 && type != 9))
692 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
693 if (!(e2 & DESC_P_MASK))
694 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
695 load_seg_cache_raw_dt(&env->tr, e1, e2);
696 e2 |= 0x00000200; /* set the busy bit */
697 stl_kernel(ptr + 4, e2);
699 env->tr.selector = selector;
702 /* only works if protected mode and not VM86. Calling load_seg with
703 seg_reg == R_CS is discouraged */
704 void load_seg(int seg_reg, int selector, unsigned int cur_eip)
708 if ((selector & 0xfffc) == 0) {
709 /* null selector case */
710 if (seg_reg == R_SS) {
712 raise_exception_err(EXCP0D_GPF, 0);
714 cpu_x86_load_seg_cache(env, seg_reg, selector, NULL, 0, 0);
717 if (load_segment(&e1, &e2, selector) != 0) {
719 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
721 if (!(e2 & DESC_S_MASK) ||
722 (e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) {
724 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
727 if (seg_reg == R_SS) {
728 if ((e2 & (DESC_CS_MASK | DESC_W_MASK)) == 0) {
730 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
733 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) {
735 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
739 if (!(e2 & DESC_P_MASK)) {
742 raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
744 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
746 cpu_x86_load_seg_cache(env, seg_reg, selector,
747 get_seg_base(e1, e2),
748 get_seg_limit(e1, e2),
751 fprintf(logfile, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
752 selector, (unsigned long)sc->base, sc->limit, sc->flags);
757 /* protected mode jump */
758 void helper_ljmp_protected_T0_T1(void)
761 uint32_t e1, e2, cpl, dpl, rpl, limit;
765 if ((new_cs & 0xfffc) == 0)
766 raise_exception_err(EXCP0D_GPF, 0);
767 if (load_segment(&e1, &e2, new_cs) != 0)
768 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
769 cpl = env->hflags & HF_CPL_MASK;
770 if (e2 & DESC_S_MASK) {
771 if (!(e2 & DESC_CS_MASK))
772 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
773 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
774 if (e2 & DESC_CS_MASK) {
775 /* conforming code segment */
777 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
779 /* non conforming code segment */
782 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
784 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
786 if (!(e2 & DESC_P_MASK))
787 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
788 limit = get_seg_limit(e1, e2);
790 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
791 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
792 get_seg_base(e1, e2), limit, e2);
795 cpu_abort(env, "jmp to call/task gate not supported 0x%04x:0x%08x",
801 void helper_lcall_real_T0_T1(int shift, int next_eip)
804 uint32_t esp, esp_mask;
810 esp_mask = 0xffffffff;
811 if (!(env->segs[R_SS].flags & DESC_B_MASK))
813 ssp = env->segs[R_SS].base;
816 stl_kernel(ssp + (esp & esp_mask), env->segs[R_CS].selector);
818 stl_kernel(ssp + (esp & esp_mask), next_eip);
821 stw_kernel(ssp + (esp & esp_mask), env->segs[R_CS].selector);
823 stw_kernel(ssp + (esp & esp_mask), next_eip);
826 if (!(env->segs[R_SS].flags & DESC_B_MASK))
827 ESP = (ESP & ~0xffff) | (esp & 0xffff);
831 env->segs[R_CS].selector = new_cs;
832 env->segs[R_CS].base = (uint8_t *)(new_cs << 4);
835 /* protected mode call */
836 void helper_lcall_protected_T0_T1(int shift, int next_eip)
839 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
840 uint32_t ss, ss_e1, ss_e2, push_size, sp, type, ss_dpl;
841 uint32_t old_ss, old_esp, val, i, limit;
842 uint8_t *ssp, *old_ssp;
846 if ((new_cs & 0xfffc) == 0)
847 raise_exception_err(EXCP0D_GPF, 0);
848 if (load_segment(&e1, &e2, new_cs) != 0)
849 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
850 cpl = env->hflags & HF_CPL_MASK;
851 if (e2 & DESC_S_MASK) {
852 if (!(e2 & DESC_CS_MASK))
853 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
854 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
855 if (e2 & DESC_CS_MASK) {
856 /* conforming code segment */
858 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
860 /* non conforming code segment */
863 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
865 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
867 if (!(e2 & DESC_P_MASK))
868 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
871 if (!(env->segs[R_SS].flags & DESC_B_MASK))
873 ssp = env->segs[R_SS].base + sp;
876 stl_kernel(ssp, env->segs[R_CS].selector);
878 stl_kernel(ssp, next_eip);
881 stw_kernel(ssp, env->segs[R_CS].selector);
883 stw_kernel(ssp, next_eip);
887 limit = get_seg_limit(e1, e2);
889 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
890 /* from this point, not restartable */
891 if (!(env->segs[R_SS].flags & DESC_B_MASK))
892 ESP = (ESP & 0xffff0000) | (sp & 0xffff);
895 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
896 get_seg_base(e1, e2), limit, e2);
899 /* check gate type */
900 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
902 case 1: /* available 286 TSS */
903 case 9: /* available 386 TSS */
904 case 5: /* task gate */
905 cpu_abort(env, "task gate not supported");
907 case 4: /* 286 call gate */
908 case 12: /* 386 call gate */
911 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
916 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
918 if (dpl < cpl || dpl < rpl)
919 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
920 /* check valid bit */
921 if (!(e2 & DESC_P_MASK))
922 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
924 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
925 if ((selector & 0xfffc) == 0)
926 raise_exception_err(EXCP0D_GPF, 0);
928 if (load_segment(&e1, &e2, selector) != 0)
929 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
930 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
931 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
932 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
934 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
935 if (!(e2 & DESC_P_MASK))
936 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
938 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
939 /* to inner priviledge */
940 get_ss_esp_from_tss(&ss, &sp, dpl);
941 if ((ss & 0xfffc) == 0)
942 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
944 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
945 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
946 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
947 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
949 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
950 if (!(ss_e2 & DESC_S_MASK) ||
951 (ss_e2 & DESC_CS_MASK) ||
952 !(ss_e2 & DESC_W_MASK))
953 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
954 if (!(ss_e2 & DESC_P_MASK))
955 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
957 param_count = e2 & 0x1f;
958 push_size = ((param_count * 2) + 8) << shift;
961 old_ss = env->segs[R_SS].selector;
962 if (!(env->segs[R_SS].flags & DESC_B_MASK))
964 old_ssp = env->segs[R_SS].base + old_esp;
966 /* XXX: from this point not restartable */
967 ss = (ss & ~3) | dpl;
968 cpu_x86_load_seg_cache(env, R_SS, ss,
969 get_seg_base(ss_e1, ss_e2),
970 get_seg_limit(ss_e1, ss_e2),
973 if (!(env->segs[R_SS].flags & DESC_B_MASK))
975 ssp = env->segs[R_SS].base + sp;
978 stl_kernel(ssp, old_ss);
980 stl_kernel(ssp, old_esp);
981 ssp -= 4 * param_count;
982 for(i = 0; i < param_count; i++) {
983 val = ldl_kernel(old_ssp + i * 4);
984 stl_kernel(ssp + i * 4, val);
988 stw_kernel(ssp, old_ss);
990 stw_kernel(ssp, old_esp);
991 ssp -= 2 * param_count;
992 for(i = 0; i < param_count; i++) {
993 val = lduw_kernel(old_ssp + i * 2);
994 stw_kernel(ssp + i * 2, val);
998 /* to same priviledge */
999 if (!(env->segs[R_SS].flags & DESC_B_MASK))
1001 ssp = env->segs[R_SS].base + sp;
1002 push_size = (4 << shift);
1007 stl_kernel(ssp, env->segs[R_CS].selector);
1009 stl_kernel(ssp, next_eip);
1012 stw_kernel(ssp, env->segs[R_CS].selector);
1014 stw_kernel(ssp, next_eip);
1018 selector = (selector & ~3) | dpl;
1019 cpu_x86_load_seg_cache(env, R_CS, selector,
1020 get_seg_base(e1, e2),
1021 get_seg_limit(e1, e2),
1023 cpu_x86_set_cpl(env, dpl);
1025 /* from this point, not restartable if same priviledge */
1026 if (!(env->segs[R_SS].flags & DESC_B_MASK))
1027 ESP = (ESP & 0xffff0000) | (sp & 0xffff);
1034 /* real mode iret */
1035 void helper_iret_real(int shift)
1037 uint32_t sp, new_cs, new_eip, new_eflags, new_esp;
1042 ssp = env->segs[R_SS].base + sp;
1045 new_eflags = ldl_kernel(ssp + 8);
1046 new_cs = ldl_kernel(ssp + 4) & 0xffff;
1047 new_eip = ldl_kernel(ssp) & 0xffff;
1050 new_eflags = lduw_kernel(ssp + 4);
1051 new_cs = lduw_kernel(ssp + 2);
1052 new_eip = lduw_kernel(ssp);
1054 new_esp = sp + (6 << shift);
1055 ESP = (ESP & 0xffff0000) |
1057 load_seg_vm(R_CS, new_cs);
1059 eflags_mask = FL_UPDATE_CPL0_MASK;
1061 eflags_mask &= 0xffff;
1062 load_eflags(new_eflags, eflags_mask);
1065 /* protected mode iret */
1066 static inline void helper_ret_protected(int shift, int is_iret, int addend)
1068 uint32_t sp, new_cs, new_eip, new_eflags, new_esp, new_ss;
1069 uint32_t new_es, new_ds, new_fs, new_gs;
1070 uint32_t e1, e2, ss_e1, ss_e2;
1071 int cpl, dpl, rpl, eflags_mask;
1075 if (!(env->segs[R_SS].flags & DESC_B_MASK))
1077 ssp = env->segs[R_SS].base + sp;
1081 new_eflags = ldl_kernel(ssp + 8);
1082 new_cs = ldl_kernel(ssp + 4) & 0xffff;
1083 new_eip = ldl_kernel(ssp);
1084 if (is_iret && (new_eflags & VM_MASK))
1085 goto return_to_vm86;
1089 new_eflags = lduw_kernel(ssp + 4);
1090 new_cs = lduw_kernel(ssp + 2);
1091 new_eip = lduw_kernel(ssp);
1093 if ((new_cs & 0xfffc) == 0)
1094 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1095 if (load_segment(&e1, &e2, new_cs) != 0)
1096 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1097 if (!(e2 & DESC_S_MASK) ||
1098 !(e2 & DESC_CS_MASK))
1099 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1100 cpl = env->hflags & HF_CPL_MASK;
1103 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1104 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1105 if (e2 & DESC_CS_MASK) {
1107 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1110 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1112 if (!(e2 & DESC_P_MASK))
1113 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
1116 /* return to same priledge level */
1117 cpu_x86_load_seg_cache(env, R_CS, new_cs,
1118 get_seg_base(e1, e2),
1119 get_seg_limit(e1, e2),
1121 new_esp = sp + (4 << shift) + ((2 * is_iret) << shift) + addend;
1123 /* return to different priviledge level */
1124 ssp += (4 << shift) + ((2 * is_iret) << shift) + addend;
1127 new_esp = ldl_kernel(ssp);
1128 new_ss = ldl_kernel(ssp + 4) & 0xffff;
1131 new_esp = lduw_kernel(ssp);
1132 new_ss = lduw_kernel(ssp + 2);
1135 if ((new_ss & 3) != rpl)
1136 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
1137 if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
1138 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
1139 if (!(ss_e2 & DESC_S_MASK) ||
1140 (ss_e2 & DESC_CS_MASK) ||
1141 !(ss_e2 & DESC_W_MASK))
1142 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
1143 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
1145 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
1146 if (!(ss_e2 & DESC_P_MASK))
1147 raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
1149 cpu_x86_load_seg_cache(env, R_CS, new_cs,
1150 get_seg_base(e1, e2),
1151 get_seg_limit(e1, e2),
1153 cpu_x86_load_seg_cache(env, R_SS, new_ss,
1154 get_seg_base(ss_e1, ss_e2),
1155 get_seg_limit(ss_e1, ss_e2),
1157 cpu_x86_set_cpl(env, rpl);
1159 if (env->segs[R_SS].flags & DESC_B_MASK)
1162 ESP = (ESP & 0xffff0000) |
1166 /* NOTE: 'cpl' can be different from the current CPL */
1168 eflags_mask = FL_UPDATE_CPL0_MASK;
1170 eflags_mask = FL_UPDATE_MASK32;
1172 eflags_mask &= 0xffff;
1173 load_eflags(new_eflags, eflags_mask);
1178 new_esp = ldl_kernel(ssp + 12);
1179 new_ss = ldl_kernel(ssp + 16);
1180 new_es = ldl_kernel(ssp + 20);
1181 new_ds = ldl_kernel(ssp + 24);
1182 new_fs = ldl_kernel(ssp + 28);
1183 new_gs = ldl_kernel(ssp + 32);
1185 /* modify processor state */
1186 load_eflags(new_eflags, FL_UPDATE_CPL0_MASK | VM_MASK | VIF_MASK | VIP_MASK);
1187 load_seg_vm(R_CS, new_cs);
1188 cpu_x86_set_cpl(env, 3);
1189 load_seg_vm(R_SS, new_ss);
1190 load_seg_vm(R_ES, new_es);
1191 load_seg_vm(R_DS, new_ds);
1192 load_seg_vm(R_FS, new_fs);
1193 load_seg_vm(R_GS, new_gs);
1199 void helper_iret_protected(int shift)
1201 helper_ret_protected(shift, 1, 0);
1204 void helper_lret_protected(int shift, int addend)
1206 helper_ret_protected(shift, 0, addend);
1209 void helper_movl_crN_T0(int reg)
1214 cpu_x86_update_cr0(env);
1217 cpu_x86_update_cr3(env);
1223 void helper_movl_drN_T0(int reg)
1228 void helper_invlpg(unsigned int addr)
1230 cpu_x86_flush_tlb(env, addr);
1238 void helper_rdtsc(void)
1242 asm("rdtsc" : "=A" (val));
1244 /* better than nothing: the time increases */
1251 void helper_wrmsr(void)
1254 case MSR_IA32_SYSENTER_CS:
1255 env->sysenter_cs = EAX & 0xffff;
1257 case MSR_IA32_SYSENTER_ESP:
1258 env->sysenter_esp = EAX;
1260 case MSR_IA32_SYSENTER_EIP:
1261 env->sysenter_eip = EAX;
1264 /* XXX: exception ? */
1269 void helper_rdmsr(void)
1272 case MSR_IA32_SYSENTER_CS:
1273 EAX = env->sysenter_cs;
1276 case MSR_IA32_SYSENTER_ESP:
1277 EAX = env->sysenter_esp;
1280 case MSR_IA32_SYSENTER_EIP:
1281 EAX = env->sysenter_eip;
1285 /* XXX: exception ? */
1290 void helper_lsl(void)
1292 unsigned int selector, limit;
1295 CC_SRC = cc_table[CC_OP].compute_all() & ~CC_Z;
1296 selector = T0 & 0xffff;
1297 if (load_segment(&e1, &e2, selector) != 0)
1299 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
1301 limit = (limit << 12) | 0xfff;
1306 void helper_lar(void)
1308 unsigned int selector;
1311 CC_SRC = cc_table[CC_OP].compute_all() & ~CC_Z;
1312 selector = T0 & 0xffff;
1313 if (load_segment(&e1, &e2, selector) != 0)
1315 T1 = e2 & 0x00f0ff00;
1321 #ifndef USE_X86LDOUBLE
1322 void helper_fldt_ST0_A0(void)
1325 new_fpstt = (env->fpstt - 1) & 7;
1326 env->fpregs[new_fpstt] = helper_fldt((uint8_t *)A0);
1327 env->fpstt = new_fpstt;
1328 env->fptags[new_fpstt] = 0; /* validate stack entry */
1331 void helper_fstt_ST0_A0(void)
1333 helper_fstt(ST0, (uint8_t *)A0);
1339 #define MUL10(iv) ( iv + iv + (iv << 3) )
1341 void helper_fbld_ST0_A0(void)
1349 for(i = 8; i >= 0; i--) {
1350 v = ldub((uint8_t *)A0 + i);
1351 val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
1354 if (ldub((uint8_t *)A0 + 9) & 0x80)
1360 void helper_fbst_ST0_A0(void)
1364 uint8_t *mem_ref, *mem_end;
1369 mem_ref = (uint8_t *)A0;
1370 mem_end = mem_ref + 9;
1377 while (mem_ref < mem_end) {
1382 v = ((v / 10) << 4) | (v % 10);
1385 while (mem_ref < mem_end) {
1390 void helper_f2xm1(void)
1392 ST0 = pow(2.0,ST0) - 1.0;
1395 void helper_fyl2x(void)
1397 CPU86_LDouble fptemp;
1401 fptemp = log(fptemp)/log(2.0); /* log2(ST) */
1405 env->fpus &= (~0x4700);
1410 void helper_fptan(void)
1412 CPU86_LDouble fptemp;
1415 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
1421 env->fpus &= (~0x400); /* C2 <-- 0 */
1422 /* the above code is for |arg| < 2**52 only */
1426 void helper_fpatan(void)
1428 CPU86_LDouble fptemp, fpsrcop;
1432 ST1 = atan2(fpsrcop,fptemp);
1436 void helper_fxtract(void)
1438 CPU86_LDoubleU temp;
1439 unsigned int expdif;
1442 expdif = EXPD(temp) - EXPBIAS;
1443 /*DP exponent bias*/
1450 void helper_fprem1(void)
1452 CPU86_LDouble dblq, fpsrcop, fptemp;
1453 CPU86_LDoubleU fpsrcop1, fptemp1;
1459 fpsrcop1.d = fpsrcop;
1461 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
1463 dblq = fpsrcop / fptemp;
1464 dblq = (dblq < 0.0)? ceil(dblq): floor(dblq);
1465 ST0 = fpsrcop - fptemp*dblq;
1466 q = (int)dblq; /* cutting off top bits is assumed here */
1467 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
1468 /* (C0,C1,C3) <-- (q2,q1,q0) */
1469 env->fpus |= (q&0x4) << 6; /* (C0) <-- q2 */
1470 env->fpus |= (q&0x2) << 8; /* (C1) <-- q1 */
1471 env->fpus |= (q&0x1) << 14; /* (C3) <-- q0 */
1473 env->fpus |= 0x400; /* C2 <-- 1 */
1474 fptemp = pow(2.0, expdif-50);
1475 fpsrcop = (ST0 / ST1) / fptemp;
1476 /* fpsrcop = integer obtained by rounding to the nearest */
1477 fpsrcop = (fpsrcop-floor(fpsrcop) < ceil(fpsrcop)-fpsrcop)?
1478 floor(fpsrcop): ceil(fpsrcop);
1479 ST0 -= (ST1 * fpsrcop * fptemp);
1483 void helper_fprem(void)
1485 CPU86_LDouble dblq, fpsrcop, fptemp;
1486 CPU86_LDoubleU fpsrcop1, fptemp1;
1492 fpsrcop1.d = fpsrcop;
1494 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
1495 if ( expdif < 53 ) {
1496 dblq = fpsrcop / fptemp;
1497 dblq = (dblq < 0.0)? ceil(dblq): floor(dblq);
1498 ST0 = fpsrcop - fptemp*dblq;
1499 q = (int)dblq; /* cutting off top bits is assumed here */
1500 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
1501 /* (C0,C1,C3) <-- (q2,q1,q0) */
1502 env->fpus |= (q&0x4) << 6; /* (C0) <-- q2 */
1503 env->fpus |= (q&0x2) << 8; /* (C1) <-- q1 */
1504 env->fpus |= (q&0x1) << 14; /* (C3) <-- q0 */
1506 env->fpus |= 0x400; /* C2 <-- 1 */
1507 fptemp = pow(2.0, expdif-50);
1508 fpsrcop = (ST0 / ST1) / fptemp;
1509 /* fpsrcop = integer obtained by chopping */
1510 fpsrcop = (fpsrcop < 0.0)?
1511 -(floor(fabs(fpsrcop))): floor(fpsrcop);
1512 ST0 -= (ST1 * fpsrcop * fptemp);
1516 void helper_fyl2xp1(void)
1518 CPU86_LDouble fptemp;
1521 if ((fptemp+1.0)>0.0) {
1522 fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
1526 env->fpus &= (~0x4700);
1531 void helper_fsqrt(void)
1533 CPU86_LDouble fptemp;
1537 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
1543 void helper_fsincos(void)
1545 CPU86_LDouble fptemp;
1548 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
1554 env->fpus &= (~0x400); /* C2 <-- 0 */
1555 /* the above code is for |arg| < 2**63 only */
1559 void helper_frndint(void)
1565 switch(env->fpuc & RC_MASK) {
1568 asm("rndd %0, %1" : "=f" (a) : "f"(a));
1571 asm("rnddm %0, %1" : "=f" (a) : "f"(a));
1574 asm("rnddp %0, %1" : "=f" (a) : "f"(a));
1577 asm("rnddz %0, %1" : "=f" (a) : "f"(a));
1586 void helper_fscale(void)
1588 CPU86_LDouble fpsrcop, fptemp;
1591 fptemp = pow(fpsrcop,ST1);
1595 void helper_fsin(void)
1597 CPU86_LDouble fptemp;
1600 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
1604 env->fpus &= (~0x400); /* C2 <-- 0 */
1605 /* the above code is for |arg| < 2**53 only */
1609 void helper_fcos(void)
1611 CPU86_LDouble fptemp;
1614 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
1618 env->fpus &= (~0x400); /* C2 <-- 0 */
1619 /* the above code is for |arg5 < 2**63 only */
1623 void helper_fxam_ST0(void)
1625 CPU86_LDoubleU temp;
1630 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
1632 env->fpus |= 0x200; /* C1 <-- 1 */
1634 expdif = EXPD(temp);
1635 if (expdif == MAXEXPD) {
1636 if (MANTD(temp) == 0)
1637 env->fpus |= 0x500 /*Infinity*/;
1639 env->fpus |= 0x100 /*NaN*/;
1640 } else if (expdif == 0) {
1641 if (MANTD(temp) == 0)
1642 env->fpus |= 0x4000 /*Zero*/;
1644 env->fpus |= 0x4400 /*Denormal*/;
1650 void helper_fstenv(uint8_t *ptr, int data32)
1652 int fpus, fptag, exp, i;
1656 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
1658 for (i=7; i>=0; i--) {
1660 if (env->fptags[i]) {
1663 tmp.d = env->fpregs[i];
1666 if (exp == 0 && mant == 0) {
1669 } else if (exp == 0 || exp == MAXEXPD
1670 #ifdef USE_X86LDOUBLE
1671 || (mant & (1LL << 63)) == 0
1674 /* NaNs, infinity, denormal */
1681 stl(ptr, env->fpuc);
1683 stl(ptr + 8, fptag);
1690 stw(ptr, env->fpuc);
1692 stw(ptr + 4, fptag);
1700 void helper_fldenv(uint8_t *ptr, int data32)
1705 env->fpuc = lduw(ptr);
1706 fpus = lduw(ptr + 4);
1707 fptag = lduw(ptr + 8);
1710 env->fpuc = lduw(ptr);
1711 fpus = lduw(ptr + 2);
1712 fptag = lduw(ptr + 4);
1714 env->fpstt = (fpus >> 11) & 7;
1715 env->fpus = fpus & ~0x3800;
1716 for(i = 0;i < 7; i++) {
1717 env->fptags[i] = ((fptag & 3) == 3);
1722 void helper_fsave(uint8_t *ptr, int data32)
1727 helper_fstenv(ptr, data32);
1729 ptr += (14 << data32);
1730 for(i = 0;i < 8; i++) {
1732 #ifdef USE_X86LDOUBLE
1733 *(long double *)ptr = tmp;
1735 helper_fstt(tmp, ptr);
1754 void helper_frstor(uint8_t *ptr, int data32)
1759 helper_fldenv(ptr, data32);
1760 ptr += (14 << data32);
1762 for(i = 0;i < 8; i++) {
1763 #ifdef USE_X86LDOUBLE
1764 tmp = *(long double *)ptr;
1766 tmp = helper_fldt(ptr);
1773 #if !defined(CONFIG_USER_ONLY)
1775 #define MMUSUFFIX _mmu
1776 #define GETPC() (__builtin_return_address(0))
1779 #include "softmmu_template.h"
1782 #include "softmmu_template.h"
1785 #include "softmmu_template.h"
1788 #include "softmmu_template.h"
1792 /* try to fill the TLB and return an exception if error. If retaddr is
1793 NULL, it means that the function was called in C code (i.e. not
1794 from generated code or from helper.c) */
1795 /* XXX: fix it to restore all registers */
1796 void tlb_fill(unsigned long addr, int is_write, int is_user, void *retaddr)
1798 TranslationBlock *tb;
1801 CPUX86State *saved_env;
1803 /* XXX: hack to restore env in all cases, even if not called from
1806 env = cpu_single_env;
1807 if (is_write && page_unprotect(addr)) {
1808 /* nothing more to do: the page was write protected because
1809 there was code in it. page_unprotect() flushed the code. */
1812 ret = cpu_x86_handle_mmu_fault(env, addr, is_write, is_user, 1);
1815 /* now we have a real cpu fault */
1816 pc = (unsigned long)retaddr;
1817 tb = tb_find_pc(pc);
1819 /* the PC is inside the translated code. It means that we have
1820 a virtual CPU fault */
1821 cpu_restore_state(tb, env, pc);
1824 raise_exception_err(EXCP0E_PAGE, env->error_code);