4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 #define raise_exception_err(a, b)\
27 fprintf(logfile, "raise_exception line=%d\n", __LINE__);\
28 (raise_exception_err)(a, b);\
32 const uint8_t parity_table[256] = {
33 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
34 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
35 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
36 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
37 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
38 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
39 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
40 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
41 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
42 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
43 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
44 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
45 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
46 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
47 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
48 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
49 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
50 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
51 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
52 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
53 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
54 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
55 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
56 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
57 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
58 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
59 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
60 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
61 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
62 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
63 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
64 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
68 const uint8_t rclw_table[32] = {
69 0, 1, 2, 3, 4, 5, 6, 7,
70 8, 9,10,11,12,13,14,15,
71 16, 0, 1, 2, 3, 4, 5, 6,
72 7, 8, 9,10,11,12,13,14,
76 const uint8_t rclb_table[32] = {
77 0, 1, 2, 3, 4, 5, 6, 7,
78 8, 0, 1, 2, 3, 4, 5, 6,
79 7, 8, 0, 1, 2, 3, 4, 5,
80 6, 7, 8, 0, 1, 2, 3, 4,
83 const CPU86_LDouble f15rk[7] =
85 0.00000000000000000000L,
86 1.00000000000000000000L,
87 3.14159265358979323851L, /*pi*/
88 0.30102999566398119523L, /*lg2*/
89 0.69314718055994530943L, /*ln2*/
90 1.44269504088896340739L, /*l2e*/
91 3.32192809488736234781L, /*l2t*/
96 spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
100 spin_lock(&global_cpu_lock);
103 void cpu_unlock(void)
105 spin_unlock(&global_cpu_lock);
108 void cpu_loop_exit(void)
110 /* NOTE: the register at this point must be saved by hand because
111 longjmp restore them */
113 longjmp(env->jmp_env, 1);
116 /* return non zero if error */
117 static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
128 index = selector & ~7;
129 if ((index + 7) > dt->limit)
131 ptr = dt->base + index;
132 *e1_ptr = ldl_kernel(ptr);
133 *e2_ptr = ldl_kernel(ptr + 4);
137 static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
140 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
141 if (e2 & DESC_G_MASK)
142 limit = (limit << 12) | 0xfff;
146 static inline uint8_t *get_seg_base(uint32_t e1, uint32_t e2)
148 return (uint8_t *)((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
151 static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
153 sc->base = get_seg_base(e1, e2);
154 sc->limit = get_seg_limit(e1, e2);
158 /* init the segment cache in vm86 mode. */
159 static inline void load_seg_vm(int seg, int selector)
162 cpu_x86_load_seg_cache(env, seg, selector,
163 (uint8_t *)(selector << 4), 0xffff, 0);
166 static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
167 uint32_t *esp_ptr, int dpl)
169 int type, index, shift;
174 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
175 for(i=0;i<env->tr.limit;i++) {
176 printf("%02x ", env->tr.base[i]);
177 if ((i & 7) == 7) printf("\n");
183 if (!(env->tr.flags & DESC_P_MASK))
184 cpu_abort(env, "invalid tss");
185 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
187 cpu_abort(env, "invalid tss type");
189 index = (dpl * 4 + 2) << shift;
190 if (index + (4 << shift) - 1 > env->tr.limit)
191 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
193 *esp_ptr = lduw_kernel(env->tr.base + index);
194 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
196 *esp_ptr = ldl_kernel(env->tr.base + index);
197 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
201 /* XXX: merge with load_seg() */
202 static void tss_load_seg(int seg_reg, int selector)
207 if ((selector & 0xfffc) != 0) {
208 if (load_segment(&e1, &e2, selector) != 0)
209 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
210 if (!(e2 & DESC_S_MASK))
211 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
213 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
214 cpl = env->hflags & HF_CPL_MASK;
215 if (seg_reg == R_CS) {
216 if (!(e2 & DESC_CS_MASK))
217 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
219 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
220 if ((e2 & DESC_C_MASK) && dpl > rpl)
221 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
223 } else if (seg_reg == R_SS) {
224 /* SS must be writable data */
225 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
226 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
227 if (dpl != cpl || dpl != rpl)
228 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
230 /* not readable code */
231 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
232 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
233 /* if data or non conforming code, checks the rights */
234 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
235 if (dpl < cpl || dpl < rpl)
236 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
239 if (!(e2 & DESC_P_MASK))
240 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
241 cpu_x86_load_seg_cache(env, seg_reg, selector,
242 get_seg_base(e1, e2),
243 get_seg_limit(e1, e2),
246 if (seg_reg == R_SS || seg_reg == R_CS)
247 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
251 #define SWITCH_TSS_JMP 0
252 #define SWITCH_TSS_IRET 1
253 #define SWITCH_TSS_CALL 2
255 /* XXX: restore CPU state in registers (PowerPC case) */
256 static void switch_tss(int tss_selector,
257 uint32_t e1, uint32_t e2, int source,
260 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
262 uint32_t new_regs[8], new_segs[6];
263 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
264 uint32_t old_eflags, eflags_mask;
269 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
271 if (loglevel & CPU_LOG_PCALL)
272 fprintf(logfile, "switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
275 /* if task gate, we read the TSS segment and we load it */
277 if (!(e2 & DESC_P_MASK))
278 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
279 tss_selector = e1 >> 16;
280 if (tss_selector & 4)
281 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
282 if (load_segment(&e1, &e2, tss_selector) != 0)
283 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
284 if (e2 & DESC_S_MASK)
285 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
286 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
288 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
291 if (!(e2 & DESC_P_MASK))
292 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
298 tss_limit = get_seg_limit(e1, e2);
299 tss_base = get_seg_base(e1, e2);
300 if ((tss_selector & 4) != 0 ||
301 tss_limit < tss_limit_max)
302 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
303 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
305 old_tss_limit_max = 103;
307 old_tss_limit_max = 43;
309 /* read all the registers from the new TSS */
312 new_cr3 = ldl_kernel(tss_base + 0x1c);
313 new_eip = ldl_kernel(tss_base + 0x20);
314 new_eflags = ldl_kernel(tss_base + 0x24);
315 for(i = 0; i < 8; i++)
316 new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
317 for(i = 0; i < 6; i++)
318 new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
319 new_ldt = lduw_kernel(tss_base + 0x60);
320 new_trap = ldl_kernel(tss_base + 0x64);
324 new_eip = lduw_kernel(tss_base + 0x0e);
325 new_eflags = lduw_kernel(tss_base + 0x10);
326 for(i = 0; i < 8; i++)
327 new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
328 for(i = 0; i < 4; i++)
329 new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
330 new_ldt = lduw_kernel(tss_base + 0x2a);
336 /* NOTE: we must avoid memory exceptions during the task switch,
337 so we make dummy accesses before */
338 /* XXX: it can still fail in some cases, so a bigger hack is
339 necessary to valid the TLB after having done the accesses */
341 v1 = ldub_kernel(env->tr.base);
342 v2 = ldub(env->tr.base + old_tss_limit_max);
343 stb_kernel(env->tr.base, v1);
344 stb_kernel(env->tr.base + old_tss_limit_max, v2);
346 /* clear busy bit (it is restartable) */
347 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
350 ptr = env->gdt.base + (env->tr.selector & ~7);
351 e2 = ldl_kernel(ptr + 4);
352 e2 &= ~DESC_TSS_BUSY_MASK;
353 stl_kernel(ptr + 4, e2);
355 old_eflags = compute_eflags();
356 if (source == SWITCH_TSS_IRET)
357 old_eflags &= ~NT_MASK;
359 /* save the current state in the old TSS */
362 stl_kernel(env->tr.base + 0x20, next_eip);
363 stl_kernel(env->tr.base + 0x24, old_eflags);
364 stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
365 stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
366 stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
367 stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
368 stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
369 stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
370 stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
371 stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
372 for(i = 0; i < 6; i++)
373 stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
376 stw_kernel(env->tr.base + 0x0e, next_eip);
377 stw_kernel(env->tr.base + 0x10, old_eflags);
378 stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
379 stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
380 stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
381 stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
382 stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
383 stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
384 stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
385 stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
386 for(i = 0; i < 4; i++)
387 stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
390 /* now if an exception occurs, it will occurs in the next task
393 if (source == SWITCH_TSS_CALL) {
394 stw_kernel(tss_base, env->tr.selector);
395 new_eflags |= NT_MASK;
399 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
402 ptr = env->gdt.base + (tss_selector & ~7);
403 e2 = ldl_kernel(ptr + 4);
404 e2 |= DESC_TSS_BUSY_MASK;
405 stl_kernel(ptr + 4, e2);
408 /* set the new CPU state */
409 /* from this point, any exception which occurs can give problems */
410 env->cr[0] |= CR0_TS_MASK;
411 env->hflags |= HF_TS_MASK;
412 env->tr.selector = tss_selector;
413 env->tr.base = tss_base;
414 env->tr.limit = tss_limit;
415 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
417 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
418 cpu_x86_update_cr3(env, new_cr3);
421 /* load all registers without an exception, then reload them with
422 possible exception */
424 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
425 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
427 eflags_mask &= 0xffff;
428 load_eflags(new_eflags, eflags_mask);
429 /* XXX: what to do in 16 bit case ? */
438 if (new_eflags & VM_MASK) {
439 for(i = 0; i < 6; i++)
440 load_seg_vm(i, new_segs[i]);
441 /* in vm86, CPL is always 3 */
442 cpu_x86_set_cpl(env, 3);
444 /* CPL is set the RPL of CS */
445 cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
446 /* first just selectors as the rest may trigger exceptions */
447 for(i = 0; i < 6; i++)
448 cpu_x86_load_seg_cache(env, i, new_segs[i], NULL, 0, 0);
451 env->ldt.selector = new_ldt & ~4;
452 env->ldt.base = NULL;
458 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
460 if ((new_ldt & 0xfffc) != 0) {
462 index = new_ldt & ~7;
463 if ((index + 7) > dt->limit)
464 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
465 ptr = dt->base + index;
466 e1 = ldl_kernel(ptr);
467 e2 = ldl_kernel(ptr + 4);
468 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
469 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
470 if (!(e2 & DESC_P_MASK))
471 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
472 load_seg_cache_raw_dt(&env->ldt, e1, e2);
475 /* load the segments */
476 if (!(new_eflags & VM_MASK)) {
477 tss_load_seg(R_CS, new_segs[R_CS]);
478 tss_load_seg(R_SS, new_segs[R_SS]);
479 tss_load_seg(R_ES, new_segs[R_ES]);
480 tss_load_seg(R_DS, new_segs[R_DS]);
481 tss_load_seg(R_FS, new_segs[R_FS]);
482 tss_load_seg(R_GS, new_segs[R_GS]);
485 /* check that EIP is in the CS segment limits */
486 if (new_eip > env->segs[R_CS].limit) {
487 /* XXX: different exception if CALL ? */
488 raise_exception_err(EXCP0D_GPF, 0);
492 /* check if Port I/O is allowed in TSS */
493 static inline void check_io(int addr, int size)
495 int io_offset, val, mask;
497 /* TSS must be a valid 32 bit one */
498 if (!(env->tr.flags & DESC_P_MASK) ||
499 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
502 io_offset = lduw_kernel(env->tr.base + 0x66);
503 io_offset += (addr >> 3);
504 /* Note: the check needs two bytes */
505 if ((io_offset + 1) > env->tr.limit)
507 val = lduw_kernel(env->tr.base + io_offset);
509 mask = (1 << size) - 1;
510 /* all bits must be zero to allow the I/O */
511 if ((val & mask) != 0) {
513 raise_exception_err(EXCP0D_GPF, 0);
517 void check_iob_T0(void)
522 void check_iow_T0(void)
527 void check_iol_T0(void)
532 void check_iob_DX(void)
534 check_io(EDX & 0xffff, 1);
537 void check_iow_DX(void)
539 check_io(EDX & 0xffff, 2);
542 void check_iol_DX(void)
544 check_io(EDX & 0xffff, 4);
547 static inline unsigned int get_sp_mask(unsigned int e2)
549 if (e2 & DESC_B_MASK)
555 /* XXX: add a is_user flag to have proper security support */
556 #define PUSHW(ssp, sp, sp_mask, val)\
559 stw_kernel((ssp) + (sp & (sp_mask)), (val));\
562 #define PUSHL(ssp, sp, sp_mask, val)\
565 stl_kernel((ssp) + (sp & (sp_mask)), (val));\
568 #define POPW(ssp, sp, sp_mask, val)\
570 val = lduw_kernel((ssp) + (sp & (sp_mask)));\
574 #define POPL(ssp, sp, sp_mask, val)\
576 val = ldl_kernel((ssp) + (sp & (sp_mask)));\
580 /* protected mode interrupt */
581 static void do_interrupt_protected(int intno, int is_int, int error_code,
582 unsigned int next_eip, int is_hw)
586 int type, dpl, selector, ss_dpl, cpl, sp_mask;
587 int has_error_code, new_stack, shift;
588 uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2;
592 if (!is_int && !is_hw) {
611 if (intno * 8 + 7 > dt->limit)
612 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
613 ptr = dt->base + intno * 8;
614 e1 = ldl_kernel(ptr);
615 e2 = ldl_kernel(ptr + 4);
616 /* check gate type */
617 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
619 case 5: /* task gate */
620 /* must do that check here to return the correct error code */
621 if (!(e2 & DESC_P_MASK))
622 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
623 switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
624 if (has_error_code) {
626 /* push the error code */
627 shift = (env->segs[R_CS].flags >> DESC_B_SHIFT) & 1;
628 if (env->segs[R_SS].flags & DESC_B_MASK)
632 esp = (ESP - (2 << shift)) & mask;
633 ssp = env->segs[R_SS].base + esp;
635 stl_kernel(ssp, error_code);
637 stw_kernel(ssp, error_code);
638 ESP = (esp & mask) | (ESP & ~mask);
641 case 6: /* 286 interrupt gate */
642 case 7: /* 286 trap gate */
643 case 14: /* 386 interrupt gate */
644 case 15: /* 386 trap gate */
647 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
650 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
651 cpl = env->hflags & HF_CPL_MASK;
652 /* check privledge if software int */
653 if (is_int && dpl < cpl)
654 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
655 /* check valid bit */
656 if (!(e2 & DESC_P_MASK))
657 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
659 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
660 if ((selector & 0xfffc) == 0)
661 raise_exception_err(EXCP0D_GPF, 0);
663 if (load_segment(&e1, &e2, selector) != 0)
664 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
665 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
666 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
667 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
669 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
670 if (!(e2 & DESC_P_MASK))
671 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
672 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
673 /* to inner priviledge */
674 get_ss_esp_from_tss(&ss, &esp, dpl);
675 if ((ss & 0xfffc) == 0)
676 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
678 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
679 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
680 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
681 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
683 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
684 if (!(ss_e2 & DESC_S_MASK) ||
685 (ss_e2 & DESC_CS_MASK) ||
686 !(ss_e2 & DESC_W_MASK))
687 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
688 if (!(ss_e2 & DESC_P_MASK))
689 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
691 sp_mask = get_sp_mask(ss_e2);
692 ssp = get_seg_base(ss_e1, ss_e2);
693 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
694 /* to same priviledge */
695 if (env->eflags & VM_MASK)
696 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
698 sp_mask = get_sp_mask(env->segs[R_SS].flags);
699 ssp = env->segs[R_SS].base;
703 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
704 new_stack = 0; /* avoid warning */
705 sp_mask = 0; /* avoid warning */
706 ssp = NULL; /* avoid warning */
707 esp = 0; /* avoid warning */
713 /* XXX: check that enough room is available */
714 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
715 if (env->eflags & VM_MASK)
721 if (env->eflags & VM_MASK) {
722 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
723 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
724 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
725 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
727 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
728 PUSHL(ssp, esp, sp_mask, ESP);
730 PUSHL(ssp, esp, sp_mask, compute_eflags());
731 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
732 PUSHL(ssp, esp, sp_mask, old_eip);
733 if (has_error_code) {
734 PUSHL(ssp, esp, sp_mask, error_code);
738 if (env->eflags & VM_MASK) {
739 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
740 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
741 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
742 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
744 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
745 PUSHW(ssp, esp, sp_mask, ESP);
747 PUSHW(ssp, esp, sp_mask, compute_eflags());
748 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
749 PUSHW(ssp, esp, sp_mask, old_eip);
750 if (has_error_code) {
751 PUSHW(ssp, esp, sp_mask, error_code);
756 if (env->eflags & VM_MASK) {
757 cpu_x86_load_seg_cache(env, R_ES, 0, NULL, 0, 0);
758 cpu_x86_load_seg_cache(env, R_DS, 0, NULL, 0, 0);
759 cpu_x86_load_seg_cache(env, R_FS, 0, NULL, 0, 0);
760 cpu_x86_load_seg_cache(env, R_GS, 0, NULL, 0, 0);
762 ss = (ss & ~3) | dpl;
763 cpu_x86_load_seg_cache(env, R_SS, ss,
764 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
766 ESP = (ESP & ~sp_mask) | (esp & sp_mask);
768 selector = (selector & ~3) | dpl;
769 cpu_x86_load_seg_cache(env, R_CS, selector,
770 get_seg_base(e1, e2),
771 get_seg_limit(e1, e2),
773 cpu_x86_set_cpl(env, dpl);
776 /* interrupt gate clear IF mask */
777 if ((type & 1) == 0) {
778 env->eflags &= ~IF_MASK;
780 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
783 /* real mode interrupt */
784 static void do_interrupt_real(int intno, int is_int, int error_code,
785 unsigned int next_eip)
790 uint32_t offset, esp;
791 uint32_t old_cs, old_eip;
793 /* real mode (simpler !) */
795 if (intno * 4 + 3 > dt->limit)
796 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
797 ptr = dt->base + intno * 4;
798 offset = lduw_kernel(ptr);
799 selector = lduw_kernel(ptr + 2);
801 ssp = env->segs[R_SS].base;
806 old_cs = env->segs[R_CS].selector;
807 /* XXX: use SS segment size ? */
808 PUSHW(ssp, esp, 0xffff, compute_eflags());
809 PUSHW(ssp, esp, 0xffff, old_cs);
810 PUSHW(ssp, esp, 0xffff, old_eip);
812 /* update processor state */
813 ESP = (ESP & ~0xffff) | (esp & 0xffff);
815 env->segs[R_CS].selector = selector;
816 env->segs[R_CS].base = (uint8_t *)(selector << 4);
817 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
820 /* fake user mode interrupt */
821 void do_interrupt_user(int intno, int is_int, int error_code,
822 unsigned int next_eip)
830 ptr = dt->base + (intno * 8);
831 e2 = ldl_kernel(ptr + 4);
833 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
834 cpl = env->hflags & HF_CPL_MASK;
835 /* check privledge if software int */
836 if (is_int && dpl < cpl)
837 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
839 /* Since we emulate only user space, we cannot do more than
840 exiting the emulation with the suitable exception and error
847 * Begin execution of an interruption. is_int is TRUE if coming from
848 * the int instruction. next_eip is the EIP value AFTER the interrupt
849 * instruction. It is only relevant if is_int is TRUE.
851 void do_interrupt(int intno, int is_int, int error_code,
852 unsigned int next_eip, int is_hw)
855 if (loglevel & (CPU_LOG_PCALL | CPU_LOG_INT)) {
856 if ((env->cr[0] & CR0_PE_MASK)) {
858 fprintf(logfile, "%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:%08x pc=%08x SP=%04x:%08x",
859 count, intno, error_code, is_int,
860 env->hflags & HF_CPL_MASK,
861 env->segs[R_CS].selector, EIP,
862 (int)env->segs[R_CS].base + EIP,
863 env->segs[R_SS].selector, ESP);
865 fprintf(logfile, " CR2=%08x", env->cr[2]);
867 fprintf(logfile, " EAX=%08x", EAX);
869 fprintf(logfile, "\n");
871 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
875 fprintf(logfile, " code=");
876 ptr = env->segs[R_CS].base + env->eip;
877 for(i = 0; i < 16; i++) {
878 fprintf(logfile, " %02x", ldub(ptr + i));
880 fprintf(logfile, "\n");
887 if (env->cr[0] & CR0_PE_MASK) {
888 do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
890 do_interrupt_real(intno, is_int, error_code, next_eip);
895 * Signal an interruption. It is executed in the main CPU loop.
896 * is_int is TRUE if coming from the int instruction. next_eip is the
897 * EIP value AFTER the interrupt instruction. It is only relevant if
900 void raise_interrupt(int intno, int is_int, int error_code,
901 unsigned int next_eip)
903 env->exception_index = intno;
904 env->error_code = error_code;
905 env->exception_is_int = is_int;
906 env->exception_next_eip = next_eip;
910 /* same as raise_exception_err, but do not restore global registers */
911 static void raise_exception_err_norestore(int exception_index, int error_code)
913 env->exception_index = exception_index;
914 env->error_code = error_code;
915 env->exception_is_int = 0;
916 env->exception_next_eip = 0;
917 longjmp(env->jmp_env, 1);
920 /* shortcuts to generate exceptions */
922 void (raise_exception_err)(int exception_index, int error_code)
924 raise_interrupt(exception_index, 0, error_code, 0);
927 void raise_exception(int exception_index)
929 raise_interrupt(exception_index, 0, 0, 0);
932 #ifdef BUGGY_GCC_DIV64
933 /* gcc 2.95.4 on PowerPC does not seem to like using __udivdi3, so we
934 call it from another function */
935 uint32_t div64(uint32_t *q_ptr, uint64_t num, uint32_t den)
941 int32_t idiv64(int32_t *q_ptr, int64_t num, int32_t den)
948 void helper_divl_EAX_T0(uint32_t eip)
950 unsigned int den, q, r;
953 num = EAX | ((uint64_t)EDX << 32);
957 raise_exception(EXCP00_DIVZ);
959 #ifdef BUGGY_GCC_DIV64
960 r = div64(&q, num, den);
969 void helper_idivl_EAX_T0(uint32_t eip)
974 num = EAX | ((uint64_t)EDX << 32);
978 raise_exception(EXCP00_DIVZ);
980 #ifdef BUGGY_GCC_DIV64
981 r = idiv64(&q, num, den);
990 void helper_cmpxchg8b(void)
995 eflags = cc_table[CC_OP].compute_all();
996 d = ldq((uint8_t *)A0);
997 if (d == (((uint64_t)EDX << 32) | EAX)) {
998 stq((uint8_t *)A0, ((uint64_t)ECX << 32) | EBX);
1008 #define CPUID_FP87 (1 << 0)
1009 #define CPUID_VME (1 << 1)
1010 #define CPUID_DE (1 << 2)
1011 #define CPUID_PSE (1 << 3)
1012 #define CPUID_TSC (1 << 4)
1013 #define CPUID_MSR (1 << 5)
1014 #define CPUID_PAE (1 << 6)
1015 #define CPUID_MCE (1 << 7)
1016 #define CPUID_CX8 (1 << 8)
1017 #define CPUID_APIC (1 << 9)
1018 #define CPUID_SEP (1 << 11) /* sysenter/sysexit */
1019 #define CPUID_MTRR (1 << 12)
1020 #define CPUID_PGE (1 << 13)
1021 #define CPUID_MCA (1 << 14)
1022 #define CPUID_CMOV (1 << 15)
1024 #define CPUID_MMX (1 << 23)
1025 #define CPUID_FXSR (1 << 24)
1026 #define CPUID_SSE (1 << 25)
1027 #define CPUID_SSE2 (1 << 26)
1029 void helper_cpuid(void)
1033 EAX = 2; /* max EAX index supported */
1040 int family, model, stepping;
1043 /* pentium 75-200 */
1053 EAX = (family << 8) | (model << 4) | stepping;
1056 EDX = CPUID_FP87 | CPUID_DE | CPUID_PSE |
1057 CPUID_TSC | CPUID_MSR | CPUID_MCE |
1058 CPUID_CX8 | CPUID_PGE | CPUID_CMOV;
1062 /* cache info: needed for Pentium Pro compatibility */
1071 void helper_enter_level(int level, int data32)
1074 uint32_t esp_mask, esp, ebp;
1076 esp_mask = get_sp_mask(env->segs[R_SS].flags);
1077 ssp = env->segs[R_SS].base;
1086 stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
1089 stl(ssp + (esp & esp_mask), T1);
1096 stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
1099 stw(ssp + (esp & esp_mask), T1);
1103 void helper_lldt_T0(void)
1111 selector = T0 & 0xffff;
1112 if ((selector & 0xfffc) == 0) {
1113 /* XXX: NULL selector case: invalid LDT */
1114 env->ldt.base = NULL;
1118 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1120 index = selector & ~7;
1121 if ((index + 7) > dt->limit)
1122 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1123 ptr = dt->base + index;
1124 e1 = ldl_kernel(ptr);
1125 e2 = ldl_kernel(ptr + 4);
1126 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
1127 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1128 if (!(e2 & DESC_P_MASK))
1129 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1130 load_seg_cache_raw_dt(&env->ldt, e1, e2);
1132 env->ldt.selector = selector;
1135 void helper_ltr_T0(void)
1143 selector = T0 & 0xffff;
1144 if ((selector & 0xfffc) == 0) {
1145 /* NULL selector case: invalid LDT */
1146 env->tr.base = NULL;
1151 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1153 index = selector & ~7;
1154 if ((index + 7) > dt->limit)
1155 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1156 ptr = dt->base + index;
1157 e1 = ldl_kernel(ptr);
1158 e2 = ldl_kernel(ptr + 4);
1159 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1160 if ((e2 & DESC_S_MASK) ||
1161 (type != 1 && type != 9))
1162 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1163 if (!(e2 & DESC_P_MASK))
1164 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1165 load_seg_cache_raw_dt(&env->tr, e1, e2);
1166 e2 |= DESC_TSS_BUSY_MASK;
1167 stl_kernel(ptr + 4, e2);
1169 env->tr.selector = selector;
1172 /* only works if protected mode and not VM86. seg_reg must be != R_CS */
1173 void load_seg(int seg_reg, int selector)
1182 if ((selector & 0xfffc) == 0) {
1183 /* null selector case */
1184 if (seg_reg == R_SS)
1185 raise_exception_err(EXCP0D_GPF, 0);
1186 cpu_x86_load_seg_cache(env, seg_reg, selector, NULL, 0, 0);
1193 index = selector & ~7;
1194 if ((index + 7) > dt->limit)
1195 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1196 ptr = dt->base + index;
1197 e1 = ldl_kernel(ptr);
1198 e2 = ldl_kernel(ptr + 4);
1200 if (!(e2 & DESC_S_MASK))
1201 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1203 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1204 cpl = env->hflags & HF_CPL_MASK;
1205 if (seg_reg == R_SS) {
1206 /* must be writable segment */
1207 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
1208 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1209 if (rpl != cpl || dpl != cpl)
1210 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1212 /* must be readable segment */
1213 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
1214 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1216 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
1217 /* if not conforming code, test rights */
1218 if (dpl < cpl || dpl < rpl)
1219 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1223 if (!(e2 & DESC_P_MASK)) {
1224 if (seg_reg == R_SS)
1225 raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
1227 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1230 /* set the access bit if not already set */
1231 if (!(e2 & DESC_A_MASK)) {
1233 stl_kernel(ptr + 4, e2);
1236 cpu_x86_load_seg_cache(env, seg_reg, selector,
1237 get_seg_base(e1, e2),
1238 get_seg_limit(e1, e2),
1241 fprintf(logfile, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
1242 selector, (unsigned long)sc->base, sc->limit, sc->flags);
1247 /* protected mode jump */
1248 void helper_ljmp_protected_T0_T1(int next_eip)
1250 int new_cs, new_eip, gate_cs, type;
1251 uint32_t e1, e2, cpl, dpl, rpl, limit;
1255 if ((new_cs & 0xfffc) == 0)
1256 raise_exception_err(EXCP0D_GPF, 0);
1257 if (load_segment(&e1, &e2, new_cs) != 0)
1258 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1259 cpl = env->hflags & HF_CPL_MASK;
1260 if (e2 & DESC_S_MASK) {
1261 if (!(e2 & DESC_CS_MASK))
1262 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1263 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1264 if (e2 & DESC_C_MASK) {
1265 /* conforming code segment */
1267 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1269 /* non conforming code segment */
1272 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1274 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1276 if (!(e2 & DESC_P_MASK))
1277 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
1278 limit = get_seg_limit(e1, e2);
1279 if (new_eip > limit)
1280 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1281 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1282 get_seg_base(e1, e2), limit, e2);
1285 /* jump to call or task gate */
1286 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1288 cpl = env->hflags & HF_CPL_MASK;
1289 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1291 case 1: /* 286 TSS */
1292 case 9: /* 386 TSS */
1293 case 5: /* task gate */
1294 if (dpl < cpl || dpl < rpl)
1295 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1296 switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
1298 case 4: /* 286 call gate */
1299 case 12: /* 386 call gate */
1300 if ((dpl < cpl) || (dpl < rpl))
1301 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1302 if (!(e2 & DESC_P_MASK))
1303 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
1305 new_eip = (e1 & 0xffff);
1307 new_eip |= (e2 & 0xffff0000);
1308 if (load_segment(&e1, &e2, gate_cs) != 0)
1309 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
1310 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1311 /* must be code segment */
1312 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
1313 (DESC_S_MASK | DESC_CS_MASK)))
1314 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
1315 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
1316 (!(e2 & DESC_C_MASK) && (dpl != cpl)))
1317 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
1318 if (!(e2 & DESC_P_MASK))
1319 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
1320 limit = get_seg_limit(e1, e2);
1321 if (new_eip > limit)
1322 raise_exception_err(EXCP0D_GPF, 0);
1323 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
1324 get_seg_base(e1, e2), limit, e2);
1328 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1334 /* real mode call */
1335 void helper_lcall_real_T0_T1(int shift, int next_eip)
1337 int new_cs, new_eip;
1338 uint32_t esp, esp_mask;
1344 esp_mask = get_sp_mask(env->segs[R_SS].flags);
1345 ssp = env->segs[R_SS].base;
1347 PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
1348 PUSHL(ssp, esp, esp_mask, next_eip);
1350 PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
1351 PUSHW(ssp, esp, esp_mask, next_eip);
1354 ESP = (ESP & ~esp_mask) | (esp & esp_mask);
1356 env->segs[R_CS].selector = new_cs;
1357 env->segs[R_CS].base = (uint8_t *)(new_cs << 4);
1360 /* protected mode call */
1361 void helper_lcall_protected_T0_T1(int shift, int next_eip)
1363 int new_cs, new_eip, new_stack, i;
1364 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
1365 uint32_t ss, ss_e1, ss_e2, sp, type, ss_dpl, sp_mask;
1366 uint32_t val, limit, old_sp_mask;
1367 uint8_t *ssp, *old_ssp;
1372 if (loglevel & CPU_LOG_PCALL) {
1373 fprintf(logfile, "lcall %04x:%08x s=%d\n",
1374 new_cs, new_eip, shift);
1375 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1378 if ((new_cs & 0xfffc) == 0)
1379 raise_exception_err(EXCP0D_GPF, 0);
1380 if (load_segment(&e1, &e2, new_cs) != 0)
1381 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1382 cpl = env->hflags & HF_CPL_MASK;
1384 if (loglevel & CPU_LOG_PCALL) {
1385 fprintf(logfile, "desc=%08x:%08x\n", e1, e2);
1388 if (e2 & DESC_S_MASK) {
1389 if (!(e2 & DESC_CS_MASK))
1390 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1391 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1392 if (e2 & DESC_C_MASK) {
1393 /* conforming code segment */
1395 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1397 /* non conforming code segment */
1400 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1402 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1404 if (!(e2 & DESC_P_MASK))
1405 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
1408 sp_mask = get_sp_mask(env->segs[R_SS].flags);
1409 ssp = env->segs[R_SS].base;
1411 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
1412 PUSHL(ssp, sp, sp_mask, next_eip);
1414 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
1415 PUSHW(ssp, sp, sp_mask, next_eip);
1418 limit = get_seg_limit(e1, e2);
1419 if (new_eip > limit)
1420 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1421 /* from this point, not restartable */
1422 ESP = (ESP & ~sp_mask) | (sp & sp_mask);
1423 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1424 get_seg_base(e1, e2), limit, e2);
1427 /* check gate type */
1428 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1429 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1432 case 1: /* available 286 TSS */
1433 case 9: /* available 386 TSS */
1434 case 5: /* task gate */
1435 if (dpl < cpl || dpl < rpl)
1436 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1437 switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
1439 case 4: /* 286 call gate */
1440 case 12: /* 386 call gate */
1443 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1448 if (dpl < cpl || dpl < rpl)
1449 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1450 /* check valid bit */
1451 if (!(e2 & DESC_P_MASK))
1452 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
1453 selector = e1 >> 16;
1454 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1455 param_count = e2 & 0x1f;
1456 if ((selector & 0xfffc) == 0)
1457 raise_exception_err(EXCP0D_GPF, 0);
1459 if (load_segment(&e1, &e2, selector) != 0)
1460 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1461 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
1462 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1463 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1465 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1466 if (!(e2 & DESC_P_MASK))
1467 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1469 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
1470 /* to inner priviledge */
1471 get_ss_esp_from_tss(&ss, &sp, dpl);
1473 if (loglevel & CPU_LOG_PCALL)
1474 fprintf(logfile, "new ss:esp=%04x:%08x param_count=%d ESP=%x\n",
1475 ss, sp, param_count, ESP);
1477 if ((ss & 0xfffc) == 0)
1478 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
1479 if ((ss & 3) != dpl)
1480 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
1481 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
1482 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
1483 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
1485 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
1486 if (!(ss_e2 & DESC_S_MASK) ||
1487 (ss_e2 & DESC_CS_MASK) ||
1488 !(ss_e2 & DESC_W_MASK))
1489 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
1490 if (!(ss_e2 & DESC_P_MASK))
1491 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
1493 // push_size = ((param_count * 2) + 8) << shift;
1495 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
1496 old_ssp = env->segs[R_SS].base;
1498 sp_mask = get_sp_mask(ss_e2);
1499 ssp = get_seg_base(ss_e1, ss_e2);
1501 PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
1502 PUSHL(ssp, sp, sp_mask, ESP);
1503 for(i = param_count - 1; i >= 0; i--) {
1504 val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
1505 PUSHL(ssp, sp, sp_mask, val);
1508 PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
1509 PUSHW(ssp, sp, sp_mask, ESP);
1510 for(i = param_count - 1; i >= 0; i--) {
1511 val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
1512 PUSHW(ssp, sp, sp_mask, val);
1517 /* to same priviledge */
1519 sp_mask = get_sp_mask(env->segs[R_SS].flags);
1520 ssp = env->segs[R_SS].base;
1521 // push_size = (4 << shift);
1526 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
1527 PUSHL(ssp, sp, sp_mask, next_eip);
1529 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
1530 PUSHW(ssp, sp, sp_mask, next_eip);
1533 /* from this point, not restartable */
1536 ss = (ss & ~3) | dpl;
1537 cpu_x86_load_seg_cache(env, R_SS, ss,
1539 get_seg_limit(ss_e1, ss_e2),
1543 selector = (selector & ~3) | dpl;
1544 cpu_x86_load_seg_cache(env, R_CS, selector,
1545 get_seg_base(e1, e2),
1546 get_seg_limit(e1, e2),
1548 cpu_x86_set_cpl(env, dpl);
1549 ESP = (ESP & ~sp_mask) | (sp & sp_mask);
1554 /* real and vm86 mode iret */
1555 void helper_iret_real(int shift)
1557 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
1561 sp_mask = 0xffff; /* XXXX: use SS segment size ? */
1563 ssp = env->segs[R_SS].base;
1566 POPL(ssp, sp, sp_mask, new_eip);
1567 POPL(ssp, sp, sp_mask, new_cs);
1569 POPL(ssp, sp, sp_mask, new_eflags);
1572 POPW(ssp, sp, sp_mask, new_eip);
1573 POPW(ssp, sp, sp_mask, new_cs);
1574 POPW(ssp, sp, sp_mask, new_eflags);
1576 ESP = (ESP & ~sp_mask) | (sp & sp_mask);
1577 load_seg_vm(R_CS, new_cs);
1579 if (env->eflags & VM_MASK)
1580 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
1582 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
1584 eflags_mask &= 0xffff;
1585 load_eflags(new_eflags, eflags_mask);
1588 static inline void validate_seg(int seg_reg, int cpl)
1593 e2 = env->segs[seg_reg].flags;
1594 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1595 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
1596 /* data or non conforming code segment */
1598 cpu_x86_load_seg_cache(env, seg_reg, 0, NULL, 0, 0);
1603 /* protected mode iret */
1604 static inline void helper_ret_protected(int shift, int is_iret, int addend)
1606 uint32_t sp, new_cs, new_eip, new_eflags, new_esp, new_ss, sp_mask;
1607 uint32_t new_es, new_ds, new_fs, new_gs;
1608 uint32_t e1, e2, ss_e1, ss_e2;
1609 int cpl, dpl, rpl, eflags_mask, iopl;
1612 sp_mask = get_sp_mask(env->segs[R_SS].flags);
1614 ssp = env->segs[R_SS].base;
1615 new_eflags = 0; /* avoid warning */
1618 POPL(ssp, sp, sp_mask, new_eip);
1619 POPL(ssp, sp, sp_mask, new_cs);
1622 POPL(ssp, sp, sp_mask, new_eflags);
1623 if (new_eflags & VM_MASK)
1624 goto return_to_vm86;
1628 POPW(ssp, sp, sp_mask, new_eip);
1629 POPW(ssp, sp, sp_mask, new_cs);
1631 POPW(ssp, sp, sp_mask, new_eflags);
1634 if (loglevel & CPU_LOG_PCALL) {
1635 fprintf(logfile, "lret new %04x:%08x s=%d addend=0x%x\n",
1636 new_cs, new_eip, shift, addend);
1637 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1640 if ((new_cs & 0xfffc) == 0)
1641 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1642 if (load_segment(&e1, &e2, new_cs) != 0)
1643 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1644 if (!(e2 & DESC_S_MASK) ||
1645 !(e2 & DESC_CS_MASK))
1646 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1647 cpl = env->hflags & HF_CPL_MASK;
1650 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1651 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1652 if (e2 & DESC_C_MASK) {
1654 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1657 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1659 if (!(e2 & DESC_P_MASK))
1660 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
1664 /* return to same priledge level */
1665 cpu_x86_load_seg_cache(env, R_CS, new_cs,
1666 get_seg_base(e1, e2),
1667 get_seg_limit(e1, e2),
1670 /* return to different priviledge level */
1673 POPL(ssp, sp, sp_mask, new_esp);
1674 POPL(ssp, sp, sp_mask, new_ss);
1678 POPW(ssp, sp, sp_mask, new_esp);
1679 POPW(ssp, sp, sp_mask, new_ss);
1682 if (loglevel & CPU_LOG_PCALL) {
1683 fprintf(logfile, "new ss:esp=%04x:%08x\n",
1688 if ((new_ss & 3) != rpl)
1689 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
1690 if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
1691 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
1692 if (!(ss_e2 & DESC_S_MASK) ||
1693 (ss_e2 & DESC_CS_MASK) ||
1694 !(ss_e2 & DESC_W_MASK))
1695 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
1696 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
1698 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
1699 if (!(ss_e2 & DESC_P_MASK))
1700 raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
1702 cpu_x86_load_seg_cache(env, R_CS, new_cs,
1703 get_seg_base(e1, e2),
1704 get_seg_limit(e1, e2),
1706 cpu_x86_load_seg_cache(env, R_SS, new_ss,
1707 get_seg_base(ss_e1, ss_e2),
1708 get_seg_limit(ss_e1, ss_e2),
1710 cpu_x86_set_cpl(env, rpl);
1712 sp_mask = get_sp_mask(ss_e2);
1714 /* validate data segments */
1715 validate_seg(R_ES, cpl);
1716 validate_seg(R_DS, cpl);
1717 validate_seg(R_FS, cpl);
1718 validate_seg(R_GS, cpl);
1722 ESP = (ESP & ~sp_mask) | (sp & sp_mask);
1725 /* NOTE: 'cpl' is the _old_ CPL */
1726 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
1728 eflags_mask |= IOPL_MASK;
1729 iopl = (env->eflags >> IOPL_SHIFT) & 3;
1731 eflags_mask |= IF_MASK;
1733 eflags_mask &= 0xffff;
1734 load_eflags(new_eflags, eflags_mask);
1739 POPL(ssp, sp, sp_mask, new_esp);
1740 POPL(ssp, sp, sp_mask, new_ss);
1741 POPL(ssp, sp, sp_mask, new_es);
1742 POPL(ssp, sp, sp_mask, new_ds);
1743 POPL(ssp, sp, sp_mask, new_fs);
1744 POPL(ssp, sp, sp_mask, new_gs);
1746 /* modify processor state */
1747 load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
1748 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
1749 load_seg_vm(R_CS, new_cs & 0xffff);
1750 cpu_x86_set_cpl(env, 3);
1751 load_seg_vm(R_SS, new_ss & 0xffff);
1752 load_seg_vm(R_ES, new_es & 0xffff);
1753 load_seg_vm(R_DS, new_ds & 0xffff);
1754 load_seg_vm(R_FS, new_fs & 0xffff);
1755 load_seg_vm(R_GS, new_gs & 0xffff);
1757 env->eip = new_eip & 0xffff;
1761 void helper_iret_protected(int shift, int next_eip)
1763 int tss_selector, type;
1766 /* specific case for TSS */
1767 if (env->eflags & NT_MASK) {
1768 tss_selector = lduw_kernel(env->tr.base + 0);
1769 if (tss_selector & 4)
1770 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
1771 if (load_segment(&e1, &e2, tss_selector) != 0)
1772 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
1773 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
1774 /* NOTE: we check both segment and busy TSS */
1776 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
1777 switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
1779 helper_ret_protected(shift, 1, 0);
1783 void helper_lret_protected(int shift, int addend)
1785 helper_ret_protected(shift, 0, addend);
1788 void helper_sysenter(void)
1790 if (env->sysenter_cs == 0) {
1791 raise_exception_err(EXCP0D_GPF, 0);
1793 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
1794 cpu_x86_set_cpl(env, 0);
1795 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
1797 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1799 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1800 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
1802 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1804 DESC_W_MASK | DESC_A_MASK);
1805 ESP = env->sysenter_esp;
1806 EIP = env->sysenter_eip;
1809 void helper_sysexit(void)
1813 cpl = env->hflags & HF_CPL_MASK;
1814 if (env->sysenter_cs == 0 || cpl != 0) {
1815 raise_exception_err(EXCP0D_GPF, 0);
1817 cpu_x86_set_cpl(env, 3);
1818 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
1820 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1821 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1822 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1823 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
1825 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1826 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1827 DESC_W_MASK | DESC_A_MASK);
1832 void helper_movl_crN_T0(int reg)
1836 cpu_x86_update_cr0(env, T0);
1839 cpu_x86_update_cr3(env, T0);
1842 cpu_x86_update_cr4(env, T0);
1851 void helper_movl_drN_T0(int reg)
1856 void helper_invlpg(unsigned int addr)
1858 cpu_x86_flush_tlb(env, addr);
1861 void helper_rdtsc(void)
1865 val = cpu_get_tsc(env);
1870 void helper_wrmsr(void)
1873 case MSR_IA32_SYSENTER_CS:
1874 env->sysenter_cs = EAX & 0xffff;
1876 case MSR_IA32_SYSENTER_ESP:
1877 env->sysenter_esp = EAX;
1879 case MSR_IA32_SYSENTER_EIP:
1880 env->sysenter_eip = EAX;
1883 /* XXX: exception ? */
1888 void helper_rdmsr(void)
1891 case MSR_IA32_SYSENTER_CS:
1892 EAX = env->sysenter_cs;
1895 case MSR_IA32_SYSENTER_ESP:
1896 EAX = env->sysenter_esp;
1899 case MSR_IA32_SYSENTER_EIP:
1900 EAX = env->sysenter_eip;
1904 /* XXX: exception ? */
1909 void helper_lsl(void)
1911 unsigned int selector, limit;
1913 int rpl, dpl, cpl, type;
1915 CC_SRC = cc_table[CC_OP].compute_all() & ~CC_Z;
1916 selector = T0 & 0xffff;
1917 if (load_segment(&e1, &e2, selector) != 0)
1920 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1921 cpl = env->hflags & HF_CPL_MASK;
1922 if (e2 & DESC_S_MASK) {
1923 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
1926 if (dpl < cpl || dpl < rpl)
1930 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1941 if (dpl < cpl || dpl < rpl)
1944 limit = get_seg_limit(e1, e2);
1949 void helper_lar(void)
1951 unsigned int selector;
1953 int rpl, dpl, cpl, type;
1955 CC_SRC = cc_table[CC_OP].compute_all() & ~CC_Z;
1956 selector = T0 & 0xffff;
1957 if ((selector & 0xfffc) == 0)
1959 if (load_segment(&e1, &e2, selector) != 0)
1962 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1963 cpl = env->hflags & HF_CPL_MASK;
1964 if (e2 & DESC_S_MASK) {
1965 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
1968 if (dpl < cpl || dpl < rpl)
1972 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1986 if (dpl < cpl || dpl < rpl)
1989 T1 = e2 & 0x00f0ff00;
1993 void helper_verr(void)
1995 unsigned int selector;
1999 CC_SRC = cc_table[CC_OP].compute_all() & ~CC_Z;
2000 selector = T0 & 0xffff;
2001 if ((selector & 0xfffc) == 0)
2003 if (load_segment(&e1, &e2, selector) != 0)
2005 if (!(e2 & DESC_S_MASK))
2008 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2009 cpl = env->hflags & HF_CPL_MASK;
2010 if (e2 & DESC_CS_MASK) {
2011 if (!(e2 & DESC_R_MASK))
2013 if (!(e2 & DESC_C_MASK)) {
2014 if (dpl < cpl || dpl < rpl)
2018 if (dpl < cpl || dpl < rpl)
2024 void helper_verw(void)
2026 unsigned int selector;
2030 CC_SRC = cc_table[CC_OP].compute_all() & ~CC_Z;
2031 selector = T0 & 0xffff;
2032 if ((selector & 0xfffc) == 0)
2034 if (load_segment(&e1, &e2, selector) != 0)
2036 if (!(e2 & DESC_S_MASK))
2039 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2040 cpl = env->hflags & HF_CPL_MASK;
2041 if (e2 & DESC_CS_MASK) {
2044 if (dpl < cpl || dpl < rpl)
2046 if (!(e2 & DESC_W_MASK))
2054 void helper_fldt_ST0_A0(void)
2057 new_fpstt = (env->fpstt - 1) & 7;
2058 env->fpregs[new_fpstt] = helper_fldt((uint8_t *)A0);
2059 env->fpstt = new_fpstt;
2060 env->fptags[new_fpstt] = 0; /* validate stack entry */
2063 void helper_fstt_ST0_A0(void)
2065 helper_fstt(ST0, (uint8_t *)A0);
2068 void fpu_set_exception(int mask)
2071 if (env->fpus & (~env->fpuc & FPUC_EM))
2072 env->fpus |= FPUS_SE | FPUS_B;
2075 CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
2078 fpu_set_exception(FPUS_ZE);
2082 void fpu_raise_exception(void)
2084 if (env->cr[0] & CR0_NE_MASK) {
2085 raise_exception(EXCP10_COPR);
2087 #if !defined(CONFIG_USER_ONLY)
2096 void helper_fbld_ST0_A0(void)
2104 for(i = 8; i >= 0; i--) {
2105 v = ldub((uint8_t *)A0 + i);
2106 val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
2109 if (ldub((uint8_t *)A0 + 9) & 0x80)
2115 void helper_fbst_ST0_A0(void)
2119 uint8_t *mem_ref, *mem_end;
2124 mem_ref = (uint8_t *)A0;
2125 mem_end = mem_ref + 9;
2132 while (mem_ref < mem_end) {
2137 v = ((v / 10) << 4) | (v % 10);
2140 while (mem_ref < mem_end) {
2145 void helper_f2xm1(void)
2147 ST0 = pow(2.0,ST0) - 1.0;
2150 void helper_fyl2x(void)
2152 CPU86_LDouble fptemp;
2156 fptemp = log(fptemp)/log(2.0); /* log2(ST) */
2160 env->fpus &= (~0x4700);
2165 void helper_fptan(void)
2167 CPU86_LDouble fptemp;
2170 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
2176 env->fpus &= (~0x400); /* C2 <-- 0 */
2177 /* the above code is for |arg| < 2**52 only */
2181 void helper_fpatan(void)
2183 CPU86_LDouble fptemp, fpsrcop;
2187 ST1 = atan2(fpsrcop,fptemp);
2191 void helper_fxtract(void)
2193 CPU86_LDoubleU temp;
2194 unsigned int expdif;
2197 expdif = EXPD(temp) - EXPBIAS;
2198 /*DP exponent bias*/
2205 void helper_fprem1(void)
2207 CPU86_LDouble dblq, fpsrcop, fptemp;
2208 CPU86_LDoubleU fpsrcop1, fptemp1;
2214 fpsrcop1.d = fpsrcop;
2216 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
2218 dblq = fpsrcop / fptemp;
2219 dblq = (dblq < 0.0)? ceil(dblq): floor(dblq);
2220 ST0 = fpsrcop - fptemp*dblq;
2221 q = (int)dblq; /* cutting off top bits is assumed here */
2222 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
2223 /* (C0,C1,C3) <-- (q2,q1,q0) */
2224 env->fpus |= (q&0x4) << 6; /* (C0) <-- q2 */
2225 env->fpus |= (q&0x2) << 8; /* (C1) <-- q1 */
2226 env->fpus |= (q&0x1) << 14; /* (C3) <-- q0 */
2228 env->fpus |= 0x400; /* C2 <-- 1 */
2229 fptemp = pow(2.0, expdif-50);
2230 fpsrcop = (ST0 / ST1) / fptemp;
2231 /* fpsrcop = integer obtained by rounding to the nearest */
2232 fpsrcop = (fpsrcop-floor(fpsrcop) < ceil(fpsrcop)-fpsrcop)?
2233 floor(fpsrcop): ceil(fpsrcop);
2234 ST0 -= (ST1 * fpsrcop * fptemp);
2238 void helper_fprem(void)
2240 CPU86_LDouble dblq, fpsrcop, fptemp;
2241 CPU86_LDoubleU fpsrcop1, fptemp1;
2247 fpsrcop1.d = fpsrcop;
2249 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
2250 if ( expdif < 53 ) {
2251 dblq = fpsrcop / fptemp;
2252 dblq = (dblq < 0.0)? ceil(dblq): floor(dblq);
2253 ST0 = fpsrcop - fptemp*dblq;
2254 q = (int)dblq; /* cutting off top bits is assumed here */
2255 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
2256 /* (C0,C1,C3) <-- (q2,q1,q0) */
2257 env->fpus |= (q&0x4) << 6; /* (C0) <-- q2 */
2258 env->fpus |= (q&0x2) << 8; /* (C1) <-- q1 */
2259 env->fpus |= (q&0x1) << 14; /* (C3) <-- q0 */
2261 env->fpus |= 0x400; /* C2 <-- 1 */
2262 fptemp = pow(2.0, expdif-50);
2263 fpsrcop = (ST0 / ST1) / fptemp;
2264 /* fpsrcop = integer obtained by chopping */
2265 fpsrcop = (fpsrcop < 0.0)?
2266 -(floor(fabs(fpsrcop))): floor(fpsrcop);
2267 ST0 -= (ST1 * fpsrcop * fptemp);
2271 void helper_fyl2xp1(void)
2273 CPU86_LDouble fptemp;
2276 if ((fptemp+1.0)>0.0) {
2277 fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
2281 env->fpus &= (~0x4700);
2286 void helper_fsqrt(void)
2288 CPU86_LDouble fptemp;
2292 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
2298 void helper_fsincos(void)
2300 CPU86_LDouble fptemp;
2303 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
2309 env->fpus &= (~0x400); /* C2 <-- 0 */
2310 /* the above code is for |arg| < 2**63 only */
2314 void helper_frndint(void)
2320 switch(env->fpuc & RC_MASK) {
2323 asm("rndd %0, %1" : "=f" (a) : "f"(a));
2326 asm("rnddm %0, %1" : "=f" (a) : "f"(a));
2329 asm("rnddp %0, %1" : "=f" (a) : "f"(a));
2332 asm("rnddz %0, %1" : "=f" (a) : "f"(a));
2341 void helper_fscale(void)
2343 CPU86_LDouble fpsrcop, fptemp;
2346 fptemp = pow(fpsrcop,ST1);
2350 void helper_fsin(void)
2352 CPU86_LDouble fptemp;
2355 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
2359 env->fpus &= (~0x400); /* C2 <-- 0 */
2360 /* the above code is for |arg| < 2**53 only */
2364 void helper_fcos(void)
2366 CPU86_LDouble fptemp;
2369 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
2373 env->fpus &= (~0x400); /* C2 <-- 0 */
2374 /* the above code is for |arg5 < 2**63 only */
2378 void helper_fxam_ST0(void)
2380 CPU86_LDoubleU temp;
2385 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
2387 env->fpus |= 0x200; /* C1 <-- 1 */
2389 expdif = EXPD(temp);
2390 if (expdif == MAXEXPD) {
2391 if (MANTD(temp) == 0)
2392 env->fpus |= 0x500 /*Infinity*/;
2394 env->fpus |= 0x100 /*NaN*/;
2395 } else if (expdif == 0) {
2396 if (MANTD(temp) == 0)
2397 env->fpus |= 0x4000 /*Zero*/;
2399 env->fpus |= 0x4400 /*Denormal*/;
2405 void helper_fstenv(uint8_t *ptr, int data32)
2407 int fpus, fptag, exp, i;
2411 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
2413 for (i=7; i>=0; i--) {
2415 if (env->fptags[i]) {
2418 tmp.d = env->fpregs[i];
2421 if (exp == 0 && mant == 0) {
2424 } else if (exp == 0 || exp == MAXEXPD
2425 #ifdef USE_X86LDOUBLE
2426 || (mant & (1LL << 63)) == 0
2429 /* NaNs, infinity, denormal */
2436 stl(ptr, env->fpuc);
2438 stl(ptr + 8, fptag);
2439 stl(ptr + 12, 0); /* fpip */
2440 stl(ptr + 16, 0); /* fpcs */
2441 stl(ptr + 20, 0); /* fpoo */
2442 stl(ptr + 24, 0); /* fpos */
2445 stw(ptr, env->fpuc);
2447 stw(ptr + 4, fptag);
2455 void helper_fldenv(uint8_t *ptr, int data32)
2460 env->fpuc = lduw(ptr);
2461 fpus = lduw(ptr + 4);
2462 fptag = lduw(ptr + 8);
2465 env->fpuc = lduw(ptr);
2466 fpus = lduw(ptr + 2);
2467 fptag = lduw(ptr + 4);
2469 env->fpstt = (fpus >> 11) & 7;
2470 env->fpus = fpus & ~0x3800;
2471 for(i = 0;i < 8; i++) {
2472 env->fptags[i] = ((fptag & 3) == 3);
2477 void helper_fsave(uint8_t *ptr, int data32)
2482 helper_fstenv(ptr, data32);
2484 ptr += (14 << data32);
2485 for(i = 0;i < 8; i++) {
2487 helper_fstt(tmp, ptr);
2505 void helper_frstor(uint8_t *ptr, int data32)
2510 helper_fldenv(ptr, data32);
2511 ptr += (14 << data32);
2513 for(i = 0;i < 8; i++) {
2514 tmp = helper_fldt(ptr);
2520 /* XXX: merge with helper_fstt ? */
2522 #ifndef USE_X86LDOUBLE
2524 void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
2526 CPU86_LDoubleU temp;
2531 *pmant = (MANTD(temp) << 11) | (1LL << 63);
2532 /* exponent + sign */
2533 e = EXPD(temp) - EXPBIAS + 16383;
2534 e |= SIGND(temp) >> 16;
2538 CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
2540 CPU86_LDoubleU temp;
2544 /* XXX: handle overflow ? */
2545 e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
2546 e |= (upper >> 4) & 0x800; /* sign */
2547 ll = (mant >> 11) & ((1LL << 52) - 1);
2549 temp.l.upper = (e << 20) | (ll >> 32);
2552 temp.ll = ll | ((uint64_t)e << 52);
2559 void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
2561 CPU86_LDoubleU temp;
2564 *pmant = temp.l.lower;
2565 *pexp = temp.l.upper;
2568 CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
2570 CPU86_LDoubleU temp;
2572 temp.l.upper = upper;
2573 temp.l.lower = mant;
2578 #if !defined(CONFIG_USER_ONLY)
2580 #define MMUSUFFIX _mmu
2581 #define GETPC() (__builtin_return_address(0))
2584 #include "softmmu_template.h"
2587 #include "softmmu_template.h"
2590 #include "softmmu_template.h"
2593 #include "softmmu_template.h"
2597 /* try to fill the TLB and return an exception if error. If retaddr is
2598 NULL, it means that the function was called in C code (i.e. not
2599 from generated code or from helper.c) */
2600 /* XXX: fix it to restore all registers */
2601 void tlb_fill(unsigned long addr, int is_write, int is_user, void *retaddr)
2603 TranslationBlock *tb;
2606 CPUX86State *saved_env;
2608 /* XXX: hack to restore env in all cases, even if not called from
2611 env = cpu_single_env;
2613 ret = cpu_x86_handle_mmu_fault(env, addr, is_write, is_user, 1);
2616 /* now we have a real cpu fault */
2617 pc = (unsigned long)retaddr;
2618 tb = tb_find_pc(pc);
2620 /* the PC is inside the translated code. It means that we have
2621 a virtual CPU fault */
2622 cpu_restore_state(tb, env, pc, NULL);
2626 raise_exception_err(EXCP0E_PAGE, env->error_code);
2628 raise_exception_err_norestore(EXCP0E_PAGE, env->error_code);