4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 #include "host-utils.h"
26 #define raise_exception_err(a, b)\
29 fprintf(logfile, "raise_exception line=%d\n", __LINE__);\
30 (raise_exception_err)(a, b);\
34 const uint8_t parity_table[256] = {
35 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
36 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
37 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
38 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
39 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
40 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
41 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
42 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
43 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
44 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
45 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
46 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
47 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
48 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
49 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
50 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
51 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
52 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
53 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
54 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
55 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
56 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
57 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
58 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
59 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
60 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
61 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
62 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
63 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
64 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
65 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
66 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
70 const uint8_t rclw_table[32] = {
71 0, 1, 2, 3, 4, 5, 6, 7,
72 8, 9,10,11,12,13,14,15,
73 16, 0, 1, 2, 3, 4, 5, 6,
74 7, 8, 9,10,11,12,13,14,
78 const uint8_t rclb_table[32] = {
79 0, 1, 2, 3, 4, 5, 6, 7,
80 8, 0, 1, 2, 3, 4, 5, 6,
81 7, 8, 0, 1, 2, 3, 4, 5,
82 6, 7, 8, 0, 1, 2, 3, 4,
85 const CPU86_LDouble f15rk[7] =
87 0.00000000000000000000L,
88 1.00000000000000000000L,
89 3.14159265358979323851L, /*pi*/
90 0.30102999566398119523L, /*lg2*/
91 0.69314718055994530943L, /*ln2*/
92 1.44269504088896340739L, /*l2e*/
93 3.32192809488736234781L, /*l2t*/
98 spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
102 spin_lock(&global_cpu_lock);
105 void cpu_unlock(void)
107 spin_unlock(&global_cpu_lock);
110 /* return non zero if error */
111 static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
122 index = selector & ~7;
123 if ((index + 7) > dt->limit)
125 ptr = dt->base + index;
126 *e1_ptr = ldl_kernel(ptr);
127 *e2_ptr = ldl_kernel(ptr + 4);
131 static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
134 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
135 if (e2 & DESC_G_MASK)
136 limit = (limit << 12) | 0xfff;
140 static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
142 return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
145 static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
147 sc->base = get_seg_base(e1, e2);
148 sc->limit = get_seg_limit(e1, e2);
152 /* init the segment cache in vm86 mode. */
153 static inline void load_seg_vm(int seg, int selector)
156 cpu_x86_load_seg_cache(env, seg, selector,
157 (selector << 4), 0xffff, 0);
160 static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
161 uint32_t *esp_ptr, int dpl)
163 int type, index, shift;
168 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
169 for(i=0;i<env->tr.limit;i++) {
170 printf("%02x ", env->tr.base[i]);
171 if ((i & 7) == 7) printf("\n");
177 if (!(env->tr.flags & DESC_P_MASK))
178 cpu_abort(env, "invalid tss");
179 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
181 cpu_abort(env, "invalid tss type");
183 index = (dpl * 4 + 2) << shift;
184 if (index + (4 << shift) - 1 > env->tr.limit)
185 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
187 *esp_ptr = lduw_kernel(env->tr.base + index);
188 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
190 *esp_ptr = ldl_kernel(env->tr.base + index);
191 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
195 /* XXX: merge with load_seg() */
196 static void tss_load_seg(int seg_reg, int selector)
201 if ((selector & 0xfffc) != 0) {
202 if (load_segment(&e1, &e2, selector) != 0)
203 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
204 if (!(e2 & DESC_S_MASK))
205 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
207 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
208 cpl = env->hflags & HF_CPL_MASK;
209 if (seg_reg == R_CS) {
210 if (!(e2 & DESC_CS_MASK))
211 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
212 /* XXX: is it correct ? */
214 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
215 if ((e2 & DESC_C_MASK) && dpl > rpl)
216 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
217 } else if (seg_reg == R_SS) {
218 /* SS must be writable data */
219 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
220 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
221 if (dpl != cpl || dpl != rpl)
222 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
224 /* not readable code */
225 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
226 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
227 /* if data or non conforming code, checks the rights */
228 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
229 if (dpl < cpl || dpl < rpl)
230 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
233 if (!(e2 & DESC_P_MASK))
234 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
235 cpu_x86_load_seg_cache(env, seg_reg, selector,
236 get_seg_base(e1, e2),
237 get_seg_limit(e1, e2),
240 if (seg_reg == R_SS || seg_reg == R_CS)
241 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
245 #define SWITCH_TSS_JMP 0
246 #define SWITCH_TSS_IRET 1
247 #define SWITCH_TSS_CALL 2
249 /* XXX: restore CPU state in registers (PowerPC case) */
250 static void switch_tss(int tss_selector,
251 uint32_t e1, uint32_t e2, int source,
254 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
255 target_ulong tss_base;
256 uint32_t new_regs[8], new_segs[6];
257 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
258 uint32_t old_eflags, eflags_mask;
263 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
265 if (loglevel & CPU_LOG_PCALL)
266 fprintf(logfile, "switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
269 /* if task gate, we read the TSS segment and we load it */
271 if (!(e2 & DESC_P_MASK))
272 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
273 tss_selector = e1 >> 16;
274 if (tss_selector & 4)
275 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
276 if (load_segment(&e1, &e2, tss_selector) != 0)
277 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
278 if (e2 & DESC_S_MASK)
279 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
280 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
282 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
285 if (!(e2 & DESC_P_MASK))
286 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
292 tss_limit = get_seg_limit(e1, e2);
293 tss_base = get_seg_base(e1, e2);
294 if ((tss_selector & 4) != 0 ||
295 tss_limit < tss_limit_max)
296 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
297 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
299 old_tss_limit_max = 103;
301 old_tss_limit_max = 43;
303 /* read all the registers from the new TSS */
306 new_cr3 = ldl_kernel(tss_base + 0x1c);
307 new_eip = ldl_kernel(tss_base + 0x20);
308 new_eflags = ldl_kernel(tss_base + 0x24);
309 for(i = 0; i < 8; i++)
310 new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
311 for(i = 0; i < 6; i++)
312 new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
313 new_ldt = lduw_kernel(tss_base + 0x60);
314 new_trap = ldl_kernel(tss_base + 0x64);
318 new_eip = lduw_kernel(tss_base + 0x0e);
319 new_eflags = lduw_kernel(tss_base + 0x10);
320 for(i = 0; i < 8; i++)
321 new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
322 for(i = 0; i < 4; i++)
323 new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
324 new_ldt = lduw_kernel(tss_base + 0x2a);
330 /* NOTE: we must avoid memory exceptions during the task switch,
331 so we make dummy accesses before */
332 /* XXX: it can still fail in some cases, so a bigger hack is
333 necessary to valid the TLB after having done the accesses */
335 v1 = ldub_kernel(env->tr.base);
336 v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
337 stb_kernel(env->tr.base, v1);
338 stb_kernel(env->tr.base + old_tss_limit_max, v2);
340 /* clear busy bit (it is restartable) */
341 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
344 ptr = env->gdt.base + (env->tr.selector & ~7);
345 e2 = ldl_kernel(ptr + 4);
346 e2 &= ~DESC_TSS_BUSY_MASK;
347 stl_kernel(ptr + 4, e2);
349 old_eflags = compute_eflags();
350 if (source == SWITCH_TSS_IRET)
351 old_eflags &= ~NT_MASK;
353 /* save the current state in the old TSS */
356 stl_kernel(env->tr.base + 0x20, next_eip);
357 stl_kernel(env->tr.base + 0x24, old_eflags);
358 stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
359 stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
360 stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
361 stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
362 stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
363 stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
364 stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
365 stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
366 for(i = 0; i < 6; i++)
367 stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
370 stw_kernel(env->tr.base + 0x0e, next_eip);
371 stw_kernel(env->tr.base + 0x10, old_eflags);
372 stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
373 stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
374 stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
375 stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
376 stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
377 stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
378 stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
379 stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
380 for(i = 0; i < 4; i++)
381 stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
384 /* now if an exception occurs, it will occurs in the next task
387 if (source == SWITCH_TSS_CALL) {
388 stw_kernel(tss_base, env->tr.selector);
389 new_eflags |= NT_MASK;
393 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
396 ptr = env->gdt.base + (tss_selector & ~7);
397 e2 = ldl_kernel(ptr + 4);
398 e2 |= DESC_TSS_BUSY_MASK;
399 stl_kernel(ptr + 4, e2);
402 /* set the new CPU state */
403 /* from this point, any exception which occurs can give problems */
404 env->cr[0] |= CR0_TS_MASK;
405 env->hflags |= HF_TS_MASK;
406 env->tr.selector = tss_selector;
407 env->tr.base = tss_base;
408 env->tr.limit = tss_limit;
409 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
411 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
412 cpu_x86_update_cr3(env, new_cr3);
415 /* load all registers without an exception, then reload them with
416 possible exception */
418 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
419 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
421 eflags_mask &= 0xffff;
422 load_eflags(new_eflags, eflags_mask);
423 /* XXX: what to do in 16 bit case ? */
432 if (new_eflags & VM_MASK) {
433 for(i = 0; i < 6; i++)
434 load_seg_vm(i, new_segs[i]);
435 /* in vm86, CPL is always 3 */
436 cpu_x86_set_cpl(env, 3);
438 /* CPL is set the RPL of CS */
439 cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
440 /* first just selectors as the rest may trigger exceptions */
441 for(i = 0; i < 6; i++)
442 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
445 env->ldt.selector = new_ldt & ~4;
452 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
454 if ((new_ldt & 0xfffc) != 0) {
456 index = new_ldt & ~7;
457 if ((index + 7) > dt->limit)
458 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
459 ptr = dt->base + index;
460 e1 = ldl_kernel(ptr);
461 e2 = ldl_kernel(ptr + 4);
462 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
463 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
464 if (!(e2 & DESC_P_MASK))
465 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
466 load_seg_cache_raw_dt(&env->ldt, e1, e2);
469 /* load the segments */
470 if (!(new_eflags & VM_MASK)) {
471 tss_load_seg(R_CS, new_segs[R_CS]);
472 tss_load_seg(R_SS, new_segs[R_SS]);
473 tss_load_seg(R_ES, new_segs[R_ES]);
474 tss_load_seg(R_DS, new_segs[R_DS]);
475 tss_load_seg(R_FS, new_segs[R_FS]);
476 tss_load_seg(R_GS, new_segs[R_GS]);
479 /* check that EIP is in the CS segment limits */
480 if (new_eip > env->segs[R_CS].limit) {
481 /* XXX: different exception if CALL ? */
482 raise_exception_err(EXCP0D_GPF, 0);
486 /* check if Port I/O is allowed in TSS */
487 static inline void check_io(int addr, int size)
489 int io_offset, val, mask;
491 /* TSS must be a valid 32 bit one */
492 if (!(env->tr.flags & DESC_P_MASK) ||
493 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
496 io_offset = lduw_kernel(env->tr.base + 0x66);
497 io_offset += (addr >> 3);
498 /* Note: the check needs two bytes */
499 if ((io_offset + 1) > env->tr.limit)
501 val = lduw_kernel(env->tr.base + io_offset);
503 mask = (1 << size) - 1;
504 /* all bits must be zero to allow the I/O */
505 if ((val & mask) != 0) {
507 raise_exception_err(EXCP0D_GPF, 0);
511 void check_iob_T0(void)
516 void check_iow_T0(void)
521 void check_iol_T0(void)
526 void check_iob_DX(void)
528 check_io(EDX & 0xffff, 1);
531 void check_iow_DX(void)
533 check_io(EDX & 0xffff, 2);
536 void check_iol_DX(void)
538 check_io(EDX & 0xffff, 4);
541 static inline unsigned int get_sp_mask(unsigned int e2)
543 if (e2 & DESC_B_MASK)
550 #define SET_ESP(val, sp_mask)\
552 if ((sp_mask) == 0xffff)\
553 ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
554 else if ((sp_mask) == 0xffffffffLL)\
555 ESP = (uint32_t)(val);\
560 #define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
563 /* XXX: add a is_user flag to have proper security support */
564 #define PUSHW(ssp, sp, sp_mask, val)\
567 stw_kernel((ssp) + (sp & (sp_mask)), (val));\
570 #define PUSHL(ssp, sp, sp_mask, val)\
573 stl_kernel((ssp) + (sp & (sp_mask)), (val));\
576 #define POPW(ssp, sp, sp_mask, val)\
578 val = lduw_kernel((ssp) + (sp & (sp_mask)));\
582 #define POPL(ssp, sp, sp_mask, val)\
584 val = (uint32_t)ldl_kernel((ssp) + (sp & (sp_mask)));\
588 /* protected mode interrupt */
589 static void do_interrupt_protected(int intno, int is_int, int error_code,
590 unsigned int next_eip, int is_hw)
593 target_ulong ptr, ssp;
594 int type, dpl, selector, ss_dpl, cpl;
595 int has_error_code, new_stack, shift;
596 uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2;
597 uint32_t old_eip, sp_mask;
598 int svm_should_check = 1;
600 if ((env->intercept & INTERCEPT_SVM_MASK) && !is_int && next_eip==-1) {
602 svm_should_check = 0;
606 && (INTERCEPTEDl(_exceptions, 1 << intno)
608 raise_interrupt(intno, is_int, error_code, 0);
611 if (!is_int && !is_hw) {
630 if (intno * 8 + 7 > dt->limit)
631 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
632 ptr = dt->base + intno * 8;
633 e1 = ldl_kernel(ptr);
634 e2 = ldl_kernel(ptr + 4);
635 /* check gate type */
636 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
638 case 5: /* task gate */
639 /* must do that check here to return the correct error code */
640 if (!(e2 & DESC_P_MASK))
641 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
642 switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
643 if (has_error_code) {
646 /* push the error code */
647 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
649 if (env->segs[R_SS].flags & DESC_B_MASK)
653 esp = (ESP - (2 << shift)) & mask;
654 ssp = env->segs[R_SS].base + esp;
656 stl_kernel(ssp, error_code);
658 stw_kernel(ssp, error_code);
662 case 6: /* 286 interrupt gate */
663 case 7: /* 286 trap gate */
664 case 14: /* 386 interrupt gate */
665 case 15: /* 386 trap gate */
668 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
671 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
672 cpl = env->hflags & HF_CPL_MASK;
673 /* check privledge if software int */
674 if (is_int && dpl < cpl)
675 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
676 /* check valid bit */
677 if (!(e2 & DESC_P_MASK))
678 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
680 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
681 if ((selector & 0xfffc) == 0)
682 raise_exception_err(EXCP0D_GPF, 0);
684 if (load_segment(&e1, &e2, selector) != 0)
685 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
686 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
687 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
688 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
690 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
691 if (!(e2 & DESC_P_MASK))
692 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
693 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
694 /* to inner privilege */
695 get_ss_esp_from_tss(&ss, &esp, dpl);
696 if ((ss & 0xfffc) == 0)
697 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
699 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
700 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
701 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
702 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
704 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
705 if (!(ss_e2 & DESC_S_MASK) ||
706 (ss_e2 & DESC_CS_MASK) ||
707 !(ss_e2 & DESC_W_MASK))
708 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
709 if (!(ss_e2 & DESC_P_MASK))
710 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
712 sp_mask = get_sp_mask(ss_e2);
713 ssp = get_seg_base(ss_e1, ss_e2);
714 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
715 /* to same privilege */
716 if (env->eflags & VM_MASK)
717 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
719 sp_mask = get_sp_mask(env->segs[R_SS].flags);
720 ssp = env->segs[R_SS].base;
724 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
725 new_stack = 0; /* avoid warning */
726 sp_mask = 0; /* avoid warning */
727 ssp = 0; /* avoid warning */
728 esp = 0; /* avoid warning */
734 /* XXX: check that enough room is available */
735 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
736 if (env->eflags & VM_MASK)
742 if (env->eflags & VM_MASK) {
743 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
744 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
745 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
746 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
748 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
749 PUSHL(ssp, esp, sp_mask, ESP);
751 PUSHL(ssp, esp, sp_mask, compute_eflags());
752 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
753 PUSHL(ssp, esp, sp_mask, old_eip);
754 if (has_error_code) {
755 PUSHL(ssp, esp, sp_mask, error_code);
759 if (env->eflags & VM_MASK) {
760 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
761 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
762 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
763 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
765 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
766 PUSHW(ssp, esp, sp_mask, ESP);
768 PUSHW(ssp, esp, sp_mask, compute_eflags());
769 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
770 PUSHW(ssp, esp, sp_mask, old_eip);
771 if (has_error_code) {
772 PUSHW(ssp, esp, sp_mask, error_code);
777 if (env->eflags & VM_MASK) {
778 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
779 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
780 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
781 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
783 ss = (ss & ~3) | dpl;
784 cpu_x86_load_seg_cache(env, R_SS, ss,
785 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
787 SET_ESP(esp, sp_mask);
789 selector = (selector & ~3) | dpl;
790 cpu_x86_load_seg_cache(env, R_CS, selector,
791 get_seg_base(e1, e2),
792 get_seg_limit(e1, e2),
794 cpu_x86_set_cpl(env, dpl);
797 /* interrupt gate clear IF mask */
798 if ((type & 1) == 0) {
799 env->eflags &= ~IF_MASK;
801 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
806 #define PUSHQ(sp, val)\
809 stq_kernel(sp, (val));\
812 #define POPQ(sp, val)\
814 val = ldq_kernel(sp);\
818 static inline target_ulong get_rsp_from_tss(int level)
823 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
824 env->tr.base, env->tr.limit);
827 if (!(env->tr.flags & DESC_P_MASK))
828 cpu_abort(env, "invalid tss");
829 index = 8 * level + 4;
830 if ((index + 7) > env->tr.limit)
831 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
832 return ldq_kernel(env->tr.base + index);
835 /* 64 bit interrupt */
836 static void do_interrupt64(int intno, int is_int, int error_code,
837 target_ulong next_eip, int is_hw)
841 int type, dpl, selector, cpl, ist;
842 int has_error_code, new_stack;
843 uint32_t e1, e2, e3, ss;
844 target_ulong old_eip, esp, offset;
845 int svm_should_check = 1;
847 if ((env->intercept & INTERCEPT_SVM_MASK) && !is_int && next_eip==-1) {
849 svm_should_check = 0;
852 && INTERCEPTEDl(_exceptions, 1 << intno)
854 raise_interrupt(intno, is_int, error_code, 0);
857 if (!is_int && !is_hw) {
876 if (intno * 16 + 15 > dt->limit)
877 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
878 ptr = dt->base + intno * 16;
879 e1 = ldl_kernel(ptr);
880 e2 = ldl_kernel(ptr + 4);
881 e3 = ldl_kernel(ptr + 8);
882 /* check gate type */
883 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
885 case 14: /* 386 interrupt gate */
886 case 15: /* 386 trap gate */
889 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
892 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
893 cpl = env->hflags & HF_CPL_MASK;
894 /* check privledge if software int */
895 if (is_int && dpl < cpl)
896 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
897 /* check valid bit */
898 if (!(e2 & DESC_P_MASK))
899 raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
901 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
903 if ((selector & 0xfffc) == 0)
904 raise_exception_err(EXCP0D_GPF, 0);
906 if (load_segment(&e1, &e2, selector) != 0)
907 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
908 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
909 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
910 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
912 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
913 if (!(e2 & DESC_P_MASK))
914 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
915 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
916 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
917 if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
918 /* to inner privilege */
920 esp = get_rsp_from_tss(ist + 3);
922 esp = get_rsp_from_tss(dpl);
923 esp &= ~0xfLL; /* align stack */
926 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
927 /* to same privilege */
928 if (env->eflags & VM_MASK)
929 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
932 esp = get_rsp_from_tss(ist + 3);
935 esp &= ~0xfLL; /* align stack */
938 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
939 new_stack = 0; /* avoid warning */
940 esp = 0; /* avoid warning */
943 PUSHQ(esp, env->segs[R_SS].selector);
945 PUSHQ(esp, compute_eflags());
946 PUSHQ(esp, env->segs[R_CS].selector);
948 if (has_error_code) {
949 PUSHQ(esp, error_code);
954 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
958 selector = (selector & ~3) | dpl;
959 cpu_x86_load_seg_cache(env, R_CS, selector,
960 get_seg_base(e1, e2),
961 get_seg_limit(e1, e2),
963 cpu_x86_set_cpl(env, dpl);
966 /* interrupt gate clear IF mask */
967 if ((type & 1) == 0) {
968 env->eflags &= ~IF_MASK;
970 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
974 #if defined(CONFIG_USER_ONLY)
975 void helper_syscall(int next_eip_addend)
977 env->exception_index = EXCP_SYSCALL;
978 env->exception_next_eip = env->eip + next_eip_addend;
982 void helper_syscall(int next_eip_addend)
986 if (!(env->efer & MSR_EFER_SCE)) {
987 raise_exception_err(EXCP06_ILLOP, 0);
989 selector = (env->star >> 32) & 0xffff;
991 if (env->hflags & HF_LMA_MASK) {
994 ECX = env->eip + next_eip_addend;
995 env->regs[11] = compute_eflags();
997 code64 = env->hflags & HF_CS64_MASK;
999 cpu_x86_set_cpl(env, 0);
1000 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1002 DESC_G_MASK | DESC_P_MASK |
1004 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
1005 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1007 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1009 DESC_W_MASK | DESC_A_MASK);
1010 env->eflags &= ~env->fmask;
1012 env->eip = env->lstar;
1014 env->eip = env->cstar;
1018 ECX = (uint32_t)(env->eip + next_eip_addend);
1020 cpu_x86_set_cpl(env, 0);
1021 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1023 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1025 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1026 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1028 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1030 DESC_W_MASK | DESC_A_MASK);
1031 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1032 env->eip = (uint32_t)env->star;
1037 void helper_sysret(int dflag)
1041 if (!(env->efer & MSR_EFER_SCE)) {
1042 raise_exception_err(EXCP06_ILLOP, 0);
1044 cpl = env->hflags & HF_CPL_MASK;
1045 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1046 raise_exception_err(EXCP0D_GPF, 0);
1048 selector = (env->star >> 48) & 0xffff;
1049 #ifdef TARGET_X86_64
1050 if (env->hflags & HF_LMA_MASK) {
1052 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1054 DESC_G_MASK | DESC_P_MASK |
1055 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1056 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1060 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1062 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1063 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1064 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1065 env->eip = (uint32_t)ECX;
1067 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1069 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1070 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1071 DESC_W_MASK | DESC_A_MASK);
1072 load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1073 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1074 cpu_x86_set_cpl(env, 3);
1078 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1080 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1081 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1082 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1083 env->eip = (uint32_t)ECX;
1084 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1086 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1087 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1088 DESC_W_MASK | DESC_A_MASK);
1089 env->eflags |= IF_MASK;
1090 cpu_x86_set_cpl(env, 3);
1093 if (kqemu_is_ok(env)) {
1094 if (env->hflags & HF_LMA_MASK)
1095 CC_OP = CC_OP_EFLAGS;
1096 env->exception_index = -1;
1102 /* real mode interrupt */
1103 static void do_interrupt_real(int intno, int is_int, int error_code,
1104 unsigned int next_eip)
1107 target_ulong ptr, ssp;
1109 uint32_t offset, esp;
1110 uint32_t old_cs, old_eip;
1111 int svm_should_check = 1;
1113 if ((env->intercept & INTERCEPT_SVM_MASK) && !is_int && next_eip==-1) {
1115 svm_should_check = 0;
1117 if (svm_should_check
1118 && INTERCEPTEDl(_exceptions, 1 << intno)
1120 raise_interrupt(intno, is_int, error_code, 0);
1122 /* real mode (simpler !) */
1124 if (intno * 4 + 3 > dt->limit)
1125 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1126 ptr = dt->base + intno * 4;
1127 offset = lduw_kernel(ptr);
1128 selector = lduw_kernel(ptr + 2);
1130 ssp = env->segs[R_SS].base;
1135 old_cs = env->segs[R_CS].selector;
1136 /* XXX: use SS segment size ? */
1137 PUSHW(ssp, esp, 0xffff, compute_eflags());
1138 PUSHW(ssp, esp, 0xffff, old_cs);
1139 PUSHW(ssp, esp, 0xffff, old_eip);
1141 /* update processor state */
1142 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1144 env->segs[R_CS].selector = selector;
1145 env->segs[R_CS].base = (selector << 4);
1146 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1149 /* fake user mode interrupt */
1150 void do_interrupt_user(int intno, int is_int, int error_code,
1151 target_ulong next_eip)
1155 int dpl, cpl, shift;
1159 if (env->hflags & HF_LMA_MASK) {
1164 ptr = dt->base + (intno << shift);
1165 e2 = ldl_kernel(ptr + 4);
1167 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1168 cpl = env->hflags & HF_CPL_MASK;
1169 /* check privledge if software int */
1170 if (is_int && dpl < cpl)
1171 raise_exception_err(EXCP0D_GPF, (intno << shift) + 2);
1173 /* Since we emulate only user space, we cannot do more than
1174 exiting the emulation with the suitable exception and error
1181 * Begin execution of an interruption. is_int is TRUE if coming from
1182 * the int instruction. next_eip is the EIP value AFTER the interrupt
1183 * instruction. It is only relevant if is_int is TRUE.
1185 void do_interrupt(int intno, int is_int, int error_code,
1186 target_ulong next_eip, int is_hw)
1188 if (loglevel & CPU_LOG_INT) {
1189 if ((env->cr[0] & CR0_PE_MASK)) {
1191 fprintf(logfile, "%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1192 count, intno, error_code, is_int,
1193 env->hflags & HF_CPL_MASK,
1194 env->segs[R_CS].selector, EIP,
1195 (int)env->segs[R_CS].base + EIP,
1196 env->segs[R_SS].selector, ESP);
1197 if (intno == 0x0e) {
1198 fprintf(logfile, " CR2=" TARGET_FMT_lx, env->cr[2]);
1200 fprintf(logfile, " EAX=" TARGET_FMT_lx, EAX);
1202 fprintf(logfile, "\n");
1203 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1208 fprintf(logfile, " code=");
1209 ptr = env->segs[R_CS].base + env->eip;
1210 for(i = 0; i < 16; i++) {
1211 fprintf(logfile, " %02x", ldub(ptr + i));
1213 fprintf(logfile, "\n");
1219 if (env->cr[0] & CR0_PE_MASK) {
1221 if (env->hflags & HF_LMA_MASK) {
1222 do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1226 do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1229 do_interrupt_real(intno, is_int, error_code, next_eip);
1234 * Check nested exceptions and change to double or triple fault if
1235 * needed. It should only be called, if this is not an interrupt.
1236 * Returns the new exception number.
1238 static int check_exception(int intno, int *error_code)
1240 char first_contributory = env->old_exception == 0 ||
1241 (env->old_exception >= 10 &&
1242 env->old_exception <= 13);
1243 char second_contributory = intno == 0 ||
1244 (intno >= 10 && intno <= 13);
1246 if (loglevel & CPU_LOG_INT)
1247 fprintf(logfile, "check_exception old: %x new %x\n",
1248 env->old_exception, intno);
1250 if (env->old_exception == EXCP08_DBLE)
1251 cpu_abort(env, "triple fault");
1253 if ((first_contributory && second_contributory)
1254 || (env->old_exception == EXCP0E_PAGE &&
1255 (second_contributory || (intno == EXCP0E_PAGE)))) {
1256 intno = EXCP08_DBLE;
1260 if (second_contributory || (intno == EXCP0E_PAGE) ||
1261 (intno == EXCP08_DBLE))
1262 env->old_exception = intno;
1268 * Signal an interruption. It is executed in the main CPU loop.
1269 * is_int is TRUE if coming from the int instruction. next_eip is the
1270 * EIP value AFTER the interrupt instruction. It is only relevant if
1273 void raise_interrupt(int intno, int is_int, int error_code,
1274 int next_eip_addend)
1277 svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
1278 intno = check_exception(intno, &error_code);
1281 env->exception_index = intno;
1282 env->error_code = error_code;
1283 env->exception_is_int = is_int;
1284 env->exception_next_eip = env->eip + next_eip_addend;
1288 /* same as raise_exception_err, but do not restore global registers */
1289 static void raise_exception_err_norestore(int exception_index, int error_code)
1291 exception_index = check_exception(exception_index, &error_code);
1293 env->exception_index = exception_index;
1294 env->error_code = error_code;
1295 env->exception_is_int = 0;
1296 env->exception_next_eip = 0;
1297 longjmp(env->jmp_env, 1);
1300 /* shortcuts to generate exceptions */
1302 void (raise_exception_err)(int exception_index, int error_code)
1304 raise_interrupt(exception_index, 0, error_code, 0);
1307 void raise_exception(int exception_index)
1309 raise_interrupt(exception_index, 0, 0, 0);
1314 #if defined(CONFIG_USER_ONLY)
1316 void do_smm_enter(void)
1320 void helper_rsm(void)
1326 #ifdef TARGET_X86_64
1327 #define SMM_REVISION_ID 0x00020064
1329 #define SMM_REVISION_ID 0x00020000
1332 void do_smm_enter(void)
1334 target_ulong sm_state;
1338 if (loglevel & CPU_LOG_INT) {
1339 fprintf(logfile, "SMM: enter\n");
1340 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1343 env->hflags |= HF_SMM_MASK;
1344 cpu_smm_update(env);
1346 sm_state = env->smbase + 0x8000;
1348 #ifdef TARGET_X86_64
1349 for(i = 0; i < 6; i++) {
1351 offset = 0x7e00 + i * 16;
1352 stw_phys(sm_state + offset, dt->selector);
1353 stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1354 stl_phys(sm_state + offset + 4, dt->limit);
1355 stq_phys(sm_state + offset + 8, dt->base);
1358 stq_phys(sm_state + 0x7e68, env->gdt.base);
1359 stl_phys(sm_state + 0x7e64, env->gdt.limit);
1361 stw_phys(sm_state + 0x7e70, env->ldt.selector);
1362 stq_phys(sm_state + 0x7e78, env->ldt.base);
1363 stl_phys(sm_state + 0x7e74, env->ldt.limit);
1364 stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1366 stq_phys(sm_state + 0x7e88, env->idt.base);
1367 stl_phys(sm_state + 0x7e84, env->idt.limit);
1369 stw_phys(sm_state + 0x7e90, env->tr.selector);
1370 stq_phys(sm_state + 0x7e98, env->tr.base);
1371 stl_phys(sm_state + 0x7e94, env->tr.limit);
1372 stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1374 stq_phys(sm_state + 0x7ed0, env->efer);
1376 stq_phys(sm_state + 0x7ff8, EAX);
1377 stq_phys(sm_state + 0x7ff0, ECX);
1378 stq_phys(sm_state + 0x7fe8, EDX);
1379 stq_phys(sm_state + 0x7fe0, EBX);
1380 stq_phys(sm_state + 0x7fd8, ESP);
1381 stq_phys(sm_state + 0x7fd0, EBP);
1382 stq_phys(sm_state + 0x7fc8, ESI);
1383 stq_phys(sm_state + 0x7fc0, EDI);
1384 for(i = 8; i < 16; i++)
1385 stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1386 stq_phys(sm_state + 0x7f78, env->eip);
1387 stl_phys(sm_state + 0x7f70, compute_eflags());
1388 stl_phys(sm_state + 0x7f68, env->dr[6]);
1389 stl_phys(sm_state + 0x7f60, env->dr[7]);
1391 stl_phys(sm_state + 0x7f48, env->cr[4]);
1392 stl_phys(sm_state + 0x7f50, env->cr[3]);
1393 stl_phys(sm_state + 0x7f58, env->cr[0]);
1395 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1396 stl_phys(sm_state + 0x7f00, env->smbase);
1398 stl_phys(sm_state + 0x7ffc, env->cr[0]);
1399 stl_phys(sm_state + 0x7ff8, env->cr[3]);
1400 stl_phys(sm_state + 0x7ff4, compute_eflags());
1401 stl_phys(sm_state + 0x7ff0, env->eip);
1402 stl_phys(sm_state + 0x7fec, EDI);
1403 stl_phys(sm_state + 0x7fe8, ESI);
1404 stl_phys(sm_state + 0x7fe4, EBP);
1405 stl_phys(sm_state + 0x7fe0, ESP);
1406 stl_phys(sm_state + 0x7fdc, EBX);
1407 stl_phys(sm_state + 0x7fd8, EDX);
1408 stl_phys(sm_state + 0x7fd4, ECX);
1409 stl_phys(sm_state + 0x7fd0, EAX);
1410 stl_phys(sm_state + 0x7fcc, env->dr[6]);
1411 stl_phys(sm_state + 0x7fc8, env->dr[7]);
1413 stl_phys(sm_state + 0x7fc4, env->tr.selector);
1414 stl_phys(sm_state + 0x7f64, env->tr.base);
1415 stl_phys(sm_state + 0x7f60, env->tr.limit);
1416 stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1418 stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1419 stl_phys(sm_state + 0x7f80, env->ldt.base);
1420 stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1421 stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1423 stl_phys(sm_state + 0x7f74, env->gdt.base);
1424 stl_phys(sm_state + 0x7f70, env->gdt.limit);
1426 stl_phys(sm_state + 0x7f58, env->idt.base);
1427 stl_phys(sm_state + 0x7f54, env->idt.limit);
1429 for(i = 0; i < 6; i++) {
1432 offset = 0x7f84 + i * 12;
1434 offset = 0x7f2c + (i - 3) * 12;
1435 stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1436 stl_phys(sm_state + offset + 8, dt->base);
1437 stl_phys(sm_state + offset + 4, dt->limit);
1438 stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1440 stl_phys(sm_state + 0x7f14, env->cr[4]);
1442 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1443 stl_phys(sm_state + 0x7ef8, env->smbase);
1445 /* init SMM cpu state */
1447 #ifdef TARGET_X86_64
1449 env->hflags &= ~HF_LMA_MASK;
1451 load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1452 env->eip = 0x00008000;
1453 cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1455 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1456 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1457 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1458 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1459 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1461 cpu_x86_update_cr0(env,
1462 env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1463 cpu_x86_update_cr4(env, 0);
1464 env->dr[7] = 0x00000400;
1465 CC_OP = CC_OP_EFLAGS;
1468 void helper_rsm(void)
1470 target_ulong sm_state;
1474 sm_state = env->smbase + 0x8000;
1475 #ifdef TARGET_X86_64
1476 env->efer = ldq_phys(sm_state + 0x7ed0);
1477 if (env->efer & MSR_EFER_LMA)
1478 env->hflags |= HF_LMA_MASK;
1480 env->hflags &= ~HF_LMA_MASK;
1482 for(i = 0; i < 6; i++) {
1483 offset = 0x7e00 + i * 16;
1484 cpu_x86_load_seg_cache(env, i,
1485 lduw_phys(sm_state + offset),
1486 ldq_phys(sm_state + offset + 8),
1487 ldl_phys(sm_state + offset + 4),
1488 (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1491 env->gdt.base = ldq_phys(sm_state + 0x7e68);
1492 env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1494 env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1495 env->ldt.base = ldq_phys(sm_state + 0x7e78);
1496 env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1497 env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1499 env->idt.base = ldq_phys(sm_state + 0x7e88);
1500 env->idt.limit = ldl_phys(sm_state + 0x7e84);
1502 env->tr.selector = lduw_phys(sm_state + 0x7e90);
1503 env->tr.base = ldq_phys(sm_state + 0x7e98);
1504 env->tr.limit = ldl_phys(sm_state + 0x7e94);
1505 env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
1507 EAX = ldq_phys(sm_state + 0x7ff8);
1508 ECX = ldq_phys(sm_state + 0x7ff0);
1509 EDX = ldq_phys(sm_state + 0x7fe8);
1510 EBX = ldq_phys(sm_state + 0x7fe0);
1511 ESP = ldq_phys(sm_state + 0x7fd8);
1512 EBP = ldq_phys(sm_state + 0x7fd0);
1513 ESI = ldq_phys(sm_state + 0x7fc8);
1514 EDI = ldq_phys(sm_state + 0x7fc0);
1515 for(i = 8; i < 16; i++)
1516 env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
1517 env->eip = ldq_phys(sm_state + 0x7f78);
1518 load_eflags(ldl_phys(sm_state + 0x7f70),
1519 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1520 env->dr[6] = ldl_phys(sm_state + 0x7f68);
1521 env->dr[7] = ldl_phys(sm_state + 0x7f60);
1523 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
1524 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
1525 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
1527 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1528 if (val & 0x20000) {
1529 env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
1532 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
1533 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
1534 load_eflags(ldl_phys(sm_state + 0x7ff4),
1535 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1536 env->eip = ldl_phys(sm_state + 0x7ff0);
1537 EDI = ldl_phys(sm_state + 0x7fec);
1538 ESI = ldl_phys(sm_state + 0x7fe8);
1539 EBP = ldl_phys(sm_state + 0x7fe4);
1540 ESP = ldl_phys(sm_state + 0x7fe0);
1541 EBX = ldl_phys(sm_state + 0x7fdc);
1542 EDX = ldl_phys(sm_state + 0x7fd8);
1543 ECX = ldl_phys(sm_state + 0x7fd4);
1544 EAX = ldl_phys(sm_state + 0x7fd0);
1545 env->dr[6] = ldl_phys(sm_state + 0x7fcc);
1546 env->dr[7] = ldl_phys(sm_state + 0x7fc8);
1548 env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
1549 env->tr.base = ldl_phys(sm_state + 0x7f64);
1550 env->tr.limit = ldl_phys(sm_state + 0x7f60);
1551 env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
1553 env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
1554 env->ldt.base = ldl_phys(sm_state + 0x7f80);
1555 env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
1556 env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
1558 env->gdt.base = ldl_phys(sm_state + 0x7f74);
1559 env->gdt.limit = ldl_phys(sm_state + 0x7f70);
1561 env->idt.base = ldl_phys(sm_state + 0x7f58);
1562 env->idt.limit = ldl_phys(sm_state + 0x7f54);
1564 for(i = 0; i < 6; i++) {
1566 offset = 0x7f84 + i * 12;
1568 offset = 0x7f2c + (i - 3) * 12;
1569 cpu_x86_load_seg_cache(env, i,
1570 ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
1571 ldl_phys(sm_state + offset + 8),
1572 ldl_phys(sm_state + offset + 4),
1573 (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
1575 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
1577 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1578 if (val & 0x20000) {
1579 env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
1582 CC_OP = CC_OP_EFLAGS;
1583 env->hflags &= ~HF_SMM_MASK;
1584 cpu_smm_update(env);
1586 if (loglevel & CPU_LOG_INT) {
1587 fprintf(logfile, "SMM: after RSM\n");
1588 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1592 #endif /* !CONFIG_USER_ONLY */
1595 #ifdef BUGGY_GCC_DIV64
1596 /* gcc 2.95.4 on PowerPC does not seem to like using __udivdi3, so we
1597 call it from another function */
1598 uint32_t div32(uint64_t *q_ptr, uint64_t num, uint32_t den)
1604 int32_t idiv32(int64_t *q_ptr, int64_t num, int32_t den)
1611 void helper_divl_EAX_T0(target_ulong t0)
1613 unsigned int den, r;
1616 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1619 raise_exception(EXCP00_DIVZ);
1621 #ifdef BUGGY_GCC_DIV64
1622 r = div32(&q, num, den);
1628 raise_exception(EXCP00_DIVZ);
1633 void helper_idivl_EAX_T0(target_ulong t0)
1638 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1641 raise_exception(EXCP00_DIVZ);
1643 #ifdef BUGGY_GCC_DIV64
1644 r = idiv32(&q, num, den);
1649 if (q != (int32_t)q)
1650 raise_exception(EXCP00_DIVZ);
1655 void helper_cmpxchg8b(void)
1660 eflags = cc_table[CC_OP].compute_all();
1662 if (d == (((uint64_t)EDX << 32) | EAX)) {
1663 stq(A0, ((uint64_t)ECX << 32) | EBX);
1673 void helper_single_step()
1675 env->dr[6] |= 0x4000;
1676 raise_exception(EXCP01_SSTP);
1679 void helper_cpuid(void)
1682 index = (uint32_t)EAX;
1684 /* test if maximum index reached */
1685 if (index & 0x80000000) {
1686 if (index > env->cpuid_xlevel)
1687 index = env->cpuid_level;
1689 if (index > env->cpuid_level)
1690 index = env->cpuid_level;
1695 EAX = env->cpuid_level;
1696 EBX = env->cpuid_vendor1;
1697 EDX = env->cpuid_vendor2;
1698 ECX = env->cpuid_vendor3;
1701 EAX = env->cpuid_version;
1702 EBX = (env->cpuid_apic_id << 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
1703 ECX = env->cpuid_ext_features;
1704 EDX = env->cpuid_features;
1707 /* cache info: needed for Pentium Pro compatibility */
1714 EAX = env->cpuid_xlevel;
1715 EBX = env->cpuid_vendor1;
1716 EDX = env->cpuid_vendor2;
1717 ECX = env->cpuid_vendor3;
1720 EAX = env->cpuid_features;
1722 ECX = env->cpuid_ext3_features;
1723 EDX = env->cpuid_ext2_features;
1728 EAX = env->cpuid_model[(index - 0x80000002) * 4 + 0];
1729 EBX = env->cpuid_model[(index - 0x80000002) * 4 + 1];
1730 ECX = env->cpuid_model[(index - 0x80000002) * 4 + 2];
1731 EDX = env->cpuid_model[(index - 0x80000002) * 4 + 3];
1734 /* cache info (L1 cache) */
1741 /* cache info (L2 cache) */
1748 /* virtual & phys address size in low 2 bytes. */
1761 /* reserved values: zero */
1770 void helper_enter_level(int level, int data32)
1773 uint32_t esp_mask, esp, ebp;
1775 esp_mask = get_sp_mask(env->segs[R_SS].flags);
1776 ssp = env->segs[R_SS].base;
1785 stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
1788 stl(ssp + (esp & esp_mask), T1);
1795 stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
1798 stw(ssp + (esp & esp_mask), T1);
1802 #ifdef TARGET_X86_64
1803 void helper_enter64_level(int level, int data64)
1805 target_ulong esp, ebp;
1825 stw(esp, lduw(ebp));
1833 void helper_lldt_T0(void)
1838 int index, entry_limit;
1841 selector = T0 & 0xffff;
1842 if ((selector & 0xfffc) == 0) {
1843 /* XXX: NULL selector case: invalid LDT */
1848 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1850 index = selector & ~7;
1851 #ifdef TARGET_X86_64
1852 if (env->hflags & HF_LMA_MASK)
1857 if ((index + entry_limit) > dt->limit)
1858 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1859 ptr = dt->base + index;
1860 e1 = ldl_kernel(ptr);
1861 e2 = ldl_kernel(ptr + 4);
1862 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
1863 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1864 if (!(e2 & DESC_P_MASK))
1865 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1866 #ifdef TARGET_X86_64
1867 if (env->hflags & HF_LMA_MASK) {
1869 e3 = ldl_kernel(ptr + 8);
1870 load_seg_cache_raw_dt(&env->ldt, e1, e2);
1871 env->ldt.base |= (target_ulong)e3 << 32;
1875 load_seg_cache_raw_dt(&env->ldt, e1, e2);
1878 env->ldt.selector = selector;
1881 void helper_ltr_T0(void)
1886 int index, type, entry_limit;
1889 selector = T0 & 0xffff;
1890 if ((selector & 0xfffc) == 0) {
1891 /* NULL selector case: invalid TR */
1897 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1899 index = selector & ~7;
1900 #ifdef TARGET_X86_64
1901 if (env->hflags & HF_LMA_MASK)
1906 if ((index + entry_limit) > dt->limit)
1907 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1908 ptr = dt->base + index;
1909 e1 = ldl_kernel(ptr);
1910 e2 = ldl_kernel(ptr + 4);
1911 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1912 if ((e2 & DESC_S_MASK) ||
1913 (type != 1 && type != 9))
1914 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1915 if (!(e2 & DESC_P_MASK))
1916 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1917 #ifdef TARGET_X86_64
1918 if (env->hflags & HF_LMA_MASK) {
1920 e3 = ldl_kernel(ptr + 8);
1921 e4 = ldl_kernel(ptr + 12);
1922 if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
1923 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1924 load_seg_cache_raw_dt(&env->tr, e1, e2);
1925 env->tr.base |= (target_ulong)e3 << 32;
1929 load_seg_cache_raw_dt(&env->tr, e1, e2);
1931 e2 |= DESC_TSS_BUSY_MASK;
1932 stl_kernel(ptr + 4, e2);
1934 env->tr.selector = selector;
1937 /* only works if protected mode and not VM86. seg_reg must be != R_CS */
1938 void load_seg(int seg_reg, int selector)
1947 cpl = env->hflags & HF_CPL_MASK;
1948 if ((selector & 0xfffc) == 0) {
1949 /* null selector case */
1951 #ifdef TARGET_X86_64
1952 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
1955 raise_exception_err(EXCP0D_GPF, 0);
1956 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
1963 index = selector & ~7;
1964 if ((index + 7) > dt->limit)
1965 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1966 ptr = dt->base + index;
1967 e1 = ldl_kernel(ptr);
1968 e2 = ldl_kernel(ptr + 4);
1970 if (!(e2 & DESC_S_MASK))
1971 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1973 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1974 if (seg_reg == R_SS) {
1975 /* must be writable segment */
1976 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
1977 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1978 if (rpl != cpl || dpl != cpl)
1979 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1981 /* must be readable segment */
1982 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
1983 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1985 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
1986 /* if not conforming code, test rights */
1987 if (dpl < cpl || dpl < rpl)
1988 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1992 if (!(e2 & DESC_P_MASK)) {
1993 if (seg_reg == R_SS)
1994 raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
1996 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1999 /* set the access bit if not already set */
2000 if (!(e2 & DESC_A_MASK)) {
2002 stl_kernel(ptr + 4, e2);
2005 cpu_x86_load_seg_cache(env, seg_reg, selector,
2006 get_seg_base(e1, e2),
2007 get_seg_limit(e1, e2),
2010 fprintf(logfile, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2011 selector, (unsigned long)sc->base, sc->limit, sc->flags);
2016 /* protected mode jump */
2017 void helper_ljmp_protected_T0_T1(int next_eip_addend)
2019 int new_cs, gate_cs, type;
2020 uint32_t e1, e2, cpl, dpl, rpl, limit;
2021 target_ulong new_eip, next_eip;
2025 if ((new_cs & 0xfffc) == 0)
2026 raise_exception_err(EXCP0D_GPF, 0);
2027 if (load_segment(&e1, &e2, new_cs) != 0)
2028 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2029 cpl = env->hflags & HF_CPL_MASK;
2030 if (e2 & DESC_S_MASK) {
2031 if (!(e2 & DESC_CS_MASK))
2032 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2033 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2034 if (e2 & DESC_C_MASK) {
2035 /* conforming code segment */
2037 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2039 /* non conforming code segment */
2042 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2044 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2046 if (!(e2 & DESC_P_MASK))
2047 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2048 limit = get_seg_limit(e1, e2);
2049 if (new_eip > limit &&
2050 !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2051 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2052 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2053 get_seg_base(e1, e2), limit, e2);
2056 /* jump to call or task gate */
2057 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2059 cpl = env->hflags & HF_CPL_MASK;
2060 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2062 case 1: /* 286 TSS */
2063 case 9: /* 386 TSS */
2064 case 5: /* task gate */
2065 if (dpl < cpl || dpl < rpl)
2066 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2067 next_eip = env->eip + next_eip_addend;
2068 switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2069 CC_OP = CC_OP_EFLAGS;
2071 case 4: /* 286 call gate */
2072 case 12: /* 386 call gate */
2073 if ((dpl < cpl) || (dpl < rpl))
2074 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2075 if (!(e2 & DESC_P_MASK))
2076 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2078 new_eip = (e1 & 0xffff);
2080 new_eip |= (e2 & 0xffff0000);
2081 if (load_segment(&e1, &e2, gate_cs) != 0)
2082 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2083 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2084 /* must be code segment */
2085 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2086 (DESC_S_MASK | DESC_CS_MASK)))
2087 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2088 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2089 (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2090 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2091 if (!(e2 & DESC_P_MASK))
2092 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2093 limit = get_seg_limit(e1, e2);
2094 if (new_eip > limit)
2095 raise_exception_err(EXCP0D_GPF, 0);
2096 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2097 get_seg_base(e1, e2), limit, e2);
2101 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2107 /* real mode call */
2108 void helper_lcall_real_T0_T1(int shift, int next_eip)
2110 int new_cs, new_eip;
2111 uint32_t esp, esp_mask;
2117 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2118 ssp = env->segs[R_SS].base;
2120 PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2121 PUSHL(ssp, esp, esp_mask, next_eip);
2123 PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2124 PUSHW(ssp, esp, esp_mask, next_eip);
2127 SET_ESP(esp, esp_mask);
2129 env->segs[R_CS].selector = new_cs;
2130 env->segs[R_CS].base = (new_cs << 4);
2133 /* protected mode call */
2134 void helper_lcall_protected_T0_T1(int shift, int next_eip_addend)
2136 int new_cs, new_stack, i;
2137 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2138 uint32_t ss, ss_e1, ss_e2, sp, type, ss_dpl, sp_mask;
2139 uint32_t val, limit, old_sp_mask;
2140 target_ulong ssp, old_ssp, next_eip, new_eip;
2144 next_eip = env->eip + next_eip_addend;
2146 if (loglevel & CPU_LOG_PCALL) {
2147 fprintf(logfile, "lcall %04x:%08x s=%d\n",
2148 new_cs, (uint32_t)new_eip, shift);
2149 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
2152 if ((new_cs & 0xfffc) == 0)
2153 raise_exception_err(EXCP0D_GPF, 0);
2154 if (load_segment(&e1, &e2, new_cs) != 0)
2155 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2156 cpl = env->hflags & HF_CPL_MASK;
2158 if (loglevel & CPU_LOG_PCALL) {
2159 fprintf(logfile, "desc=%08x:%08x\n", e1, e2);
2162 if (e2 & DESC_S_MASK) {
2163 if (!(e2 & DESC_CS_MASK))
2164 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2165 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2166 if (e2 & DESC_C_MASK) {
2167 /* conforming code segment */
2169 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2171 /* non conforming code segment */
2174 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2176 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2178 if (!(e2 & DESC_P_MASK))
2179 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2181 #ifdef TARGET_X86_64
2182 /* XXX: check 16/32 bit cases in long mode */
2187 PUSHQ(rsp, env->segs[R_CS].selector);
2188 PUSHQ(rsp, next_eip);
2189 /* from this point, not restartable */
2191 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2192 get_seg_base(e1, e2),
2193 get_seg_limit(e1, e2), e2);
2199 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2200 ssp = env->segs[R_SS].base;
2202 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2203 PUSHL(ssp, sp, sp_mask, next_eip);
2205 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2206 PUSHW(ssp, sp, sp_mask, next_eip);
2209 limit = get_seg_limit(e1, e2);
2210 if (new_eip > limit)
2211 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2212 /* from this point, not restartable */
2213 SET_ESP(sp, sp_mask);
2214 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2215 get_seg_base(e1, e2), limit, e2);
2219 /* check gate type */
2220 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2221 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2224 case 1: /* available 286 TSS */
2225 case 9: /* available 386 TSS */
2226 case 5: /* task gate */
2227 if (dpl < cpl || dpl < rpl)
2228 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2229 switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2230 CC_OP = CC_OP_EFLAGS;
2232 case 4: /* 286 call gate */
2233 case 12: /* 386 call gate */
2236 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2241 if (dpl < cpl || dpl < rpl)
2242 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2243 /* check valid bit */
2244 if (!(e2 & DESC_P_MASK))
2245 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2246 selector = e1 >> 16;
2247 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2248 param_count = e2 & 0x1f;
2249 if ((selector & 0xfffc) == 0)
2250 raise_exception_err(EXCP0D_GPF, 0);
2252 if (load_segment(&e1, &e2, selector) != 0)
2253 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2254 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
2255 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2256 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2258 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2259 if (!(e2 & DESC_P_MASK))
2260 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2262 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
2263 /* to inner privilege */
2264 get_ss_esp_from_tss(&ss, &sp, dpl);
2266 if (loglevel & CPU_LOG_PCALL)
2267 fprintf(logfile, "new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
2268 ss, sp, param_count, ESP);
2270 if ((ss & 0xfffc) == 0)
2271 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2272 if ((ss & 3) != dpl)
2273 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2274 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
2275 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2276 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2278 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2279 if (!(ss_e2 & DESC_S_MASK) ||
2280 (ss_e2 & DESC_CS_MASK) ||
2281 !(ss_e2 & DESC_W_MASK))
2282 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2283 if (!(ss_e2 & DESC_P_MASK))
2284 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2286 // push_size = ((param_count * 2) + 8) << shift;
2288 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
2289 old_ssp = env->segs[R_SS].base;
2291 sp_mask = get_sp_mask(ss_e2);
2292 ssp = get_seg_base(ss_e1, ss_e2);
2294 PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
2295 PUSHL(ssp, sp, sp_mask, ESP);
2296 for(i = param_count - 1; i >= 0; i--) {
2297 val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
2298 PUSHL(ssp, sp, sp_mask, val);
2301 PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
2302 PUSHW(ssp, sp, sp_mask, ESP);
2303 for(i = param_count - 1; i >= 0; i--) {
2304 val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
2305 PUSHW(ssp, sp, sp_mask, val);
2310 /* to same privilege */
2312 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2313 ssp = env->segs[R_SS].base;
2314 // push_size = (4 << shift);
2319 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2320 PUSHL(ssp, sp, sp_mask, next_eip);
2322 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2323 PUSHW(ssp, sp, sp_mask, next_eip);
2326 /* from this point, not restartable */
2329 ss = (ss & ~3) | dpl;
2330 cpu_x86_load_seg_cache(env, R_SS, ss,
2332 get_seg_limit(ss_e1, ss_e2),
2336 selector = (selector & ~3) | dpl;
2337 cpu_x86_load_seg_cache(env, R_CS, selector,
2338 get_seg_base(e1, e2),
2339 get_seg_limit(e1, e2),
2341 cpu_x86_set_cpl(env, dpl);
2342 SET_ESP(sp, sp_mask);
2346 if (kqemu_is_ok(env)) {
2347 env->exception_index = -1;
2353 /* real and vm86 mode iret */
2354 void helper_iret_real(int shift)
2356 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
2360 sp_mask = 0xffff; /* XXXX: use SS segment size ? */
2362 ssp = env->segs[R_SS].base;
2365 POPL(ssp, sp, sp_mask, new_eip);
2366 POPL(ssp, sp, sp_mask, new_cs);
2368 POPL(ssp, sp, sp_mask, new_eflags);
2371 POPW(ssp, sp, sp_mask, new_eip);
2372 POPW(ssp, sp, sp_mask, new_cs);
2373 POPW(ssp, sp, sp_mask, new_eflags);
2375 ESP = (ESP & ~sp_mask) | (sp & sp_mask);
2376 load_seg_vm(R_CS, new_cs);
2378 if (env->eflags & VM_MASK)
2379 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
2381 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
2383 eflags_mask &= 0xffff;
2384 load_eflags(new_eflags, eflags_mask);
2387 static inline void validate_seg(int seg_reg, int cpl)
2392 /* XXX: on x86_64, we do not want to nullify FS and GS because
2393 they may still contain a valid base. I would be interested to
2394 know how a real x86_64 CPU behaves */
2395 if ((seg_reg == R_FS || seg_reg == R_GS) &&
2396 (env->segs[seg_reg].selector & 0xfffc) == 0)
2399 e2 = env->segs[seg_reg].flags;
2400 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2401 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2402 /* data or non conforming code segment */
2404 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
2409 /* protected mode iret */
2410 static inline void helper_ret_protected(int shift, int is_iret, int addend)
2412 uint32_t new_cs, new_eflags, new_ss;
2413 uint32_t new_es, new_ds, new_fs, new_gs;
2414 uint32_t e1, e2, ss_e1, ss_e2;
2415 int cpl, dpl, rpl, eflags_mask, iopl;
2416 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
2418 #ifdef TARGET_X86_64
2423 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2425 ssp = env->segs[R_SS].base;
2426 new_eflags = 0; /* avoid warning */
2427 #ifdef TARGET_X86_64
2433 POPQ(sp, new_eflags);
2439 POPL(ssp, sp, sp_mask, new_eip);
2440 POPL(ssp, sp, sp_mask, new_cs);
2443 POPL(ssp, sp, sp_mask, new_eflags);
2444 if (new_eflags & VM_MASK)
2445 goto return_to_vm86;
2449 POPW(ssp, sp, sp_mask, new_eip);
2450 POPW(ssp, sp, sp_mask, new_cs);
2452 POPW(ssp, sp, sp_mask, new_eflags);
2455 if (loglevel & CPU_LOG_PCALL) {
2456 fprintf(logfile, "lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2457 new_cs, new_eip, shift, addend);
2458 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
2461 if ((new_cs & 0xfffc) == 0)
2462 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2463 if (load_segment(&e1, &e2, new_cs) != 0)
2464 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2465 if (!(e2 & DESC_S_MASK) ||
2466 !(e2 & DESC_CS_MASK))
2467 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2468 cpl = env->hflags & HF_CPL_MASK;
2471 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2472 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2473 if (e2 & DESC_C_MASK) {
2475 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2478 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2480 if (!(e2 & DESC_P_MASK))
2481 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2484 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2485 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
2486 /* return to same priledge level */
2487 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2488 get_seg_base(e1, e2),
2489 get_seg_limit(e1, e2),
2492 /* return to different privilege level */
2493 #ifdef TARGET_X86_64
2502 POPL(ssp, sp, sp_mask, new_esp);
2503 POPL(ssp, sp, sp_mask, new_ss);
2507 POPW(ssp, sp, sp_mask, new_esp);
2508 POPW(ssp, sp, sp_mask, new_ss);
2511 if (loglevel & CPU_LOG_PCALL) {
2512 fprintf(logfile, "new ss:esp=%04x:" TARGET_FMT_lx "\n",
2516 if ((new_ss & 0xfffc) == 0) {
2517 #ifdef TARGET_X86_64
2518 /* NULL ss is allowed in long mode if cpl != 3*/
2519 /* XXX: test CS64 ? */
2520 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2521 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2523 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2524 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2525 DESC_W_MASK | DESC_A_MASK);
2526 ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
2530 raise_exception_err(EXCP0D_GPF, 0);
2533 if ((new_ss & 3) != rpl)
2534 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2535 if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
2536 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2537 if (!(ss_e2 & DESC_S_MASK) ||
2538 (ss_e2 & DESC_CS_MASK) ||
2539 !(ss_e2 & DESC_W_MASK))
2540 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2541 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2543 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2544 if (!(ss_e2 & DESC_P_MASK))
2545 raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
2546 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2547 get_seg_base(ss_e1, ss_e2),
2548 get_seg_limit(ss_e1, ss_e2),
2552 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2553 get_seg_base(e1, e2),
2554 get_seg_limit(e1, e2),
2556 cpu_x86_set_cpl(env, rpl);
2558 #ifdef TARGET_X86_64
2559 if (env->hflags & HF_CS64_MASK)
2563 sp_mask = get_sp_mask(ss_e2);
2565 /* validate data segments */
2566 validate_seg(R_ES, rpl);
2567 validate_seg(R_DS, rpl);
2568 validate_seg(R_FS, rpl);
2569 validate_seg(R_GS, rpl);
2573 SET_ESP(sp, sp_mask);
2576 /* NOTE: 'cpl' is the _old_ CPL */
2577 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2579 eflags_mask |= IOPL_MASK;
2580 iopl = (env->eflags >> IOPL_SHIFT) & 3;
2582 eflags_mask |= IF_MASK;
2584 eflags_mask &= 0xffff;
2585 load_eflags(new_eflags, eflags_mask);
2590 POPL(ssp, sp, sp_mask, new_esp);
2591 POPL(ssp, sp, sp_mask, new_ss);
2592 POPL(ssp, sp, sp_mask, new_es);
2593 POPL(ssp, sp, sp_mask, new_ds);
2594 POPL(ssp, sp, sp_mask, new_fs);
2595 POPL(ssp, sp, sp_mask, new_gs);
2597 /* modify processor state */
2598 load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
2599 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
2600 load_seg_vm(R_CS, new_cs & 0xffff);
2601 cpu_x86_set_cpl(env, 3);
2602 load_seg_vm(R_SS, new_ss & 0xffff);
2603 load_seg_vm(R_ES, new_es & 0xffff);
2604 load_seg_vm(R_DS, new_ds & 0xffff);
2605 load_seg_vm(R_FS, new_fs & 0xffff);
2606 load_seg_vm(R_GS, new_gs & 0xffff);
2608 env->eip = new_eip & 0xffff;
2612 void helper_iret_protected(int shift, int next_eip)
2614 int tss_selector, type;
2617 /* specific case for TSS */
2618 if (env->eflags & NT_MASK) {
2619 #ifdef TARGET_X86_64
2620 if (env->hflags & HF_LMA_MASK)
2621 raise_exception_err(EXCP0D_GPF, 0);
2623 tss_selector = lduw_kernel(env->tr.base + 0);
2624 if (tss_selector & 4)
2625 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2626 if (load_segment(&e1, &e2, tss_selector) != 0)
2627 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2628 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2629 /* NOTE: we check both segment and busy TSS */
2631 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2632 switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
2634 helper_ret_protected(shift, 1, 0);
2637 if (kqemu_is_ok(env)) {
2638 CC_OP = CC_OP_EFLAGS;
2639 env->exception_index = -1;
2645 void helper_lret_protected(int shift, int addend)
2647 helper_ret_protected(shift, 0, addend);
2649 if (kqemu_is_ok(env)) {
2650 env->exception_index = -1;
2656 void helper_sysenter(void)
2658 if (env->sysenter_cs == 0) {
2659 raise_exception_err(EXCP0D_GPF, 0);
2661 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2662 cpu_x86_set_cpl(env, 0);
2663 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2665 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2667 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2668 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2670 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2672 DESC_W_MASK | DESC_A_MASK);
2673 ESP = env->sysenter_esp;
2674 EIP = env->sysenter_eip;
2677 void helper_sysexit(void)
2681 cpl = env->hflags & HF_CPL_MASK;
2682 if (env->sysenter_cs == 0 || cpl != 0) {
2683 raise_exception_err(EXCP0D_GPF, 0);
2685 cpu_x86_set_cpl(env, 3);
2686 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
2688 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2689 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2690 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2691 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
2693 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2694 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2695 DESC_W_MASK | DESC_A_MASK);
2699 if (kqemu_is_ok(env)) {
2700 env->exception_index = -1;
2706 void helper_movl_crN_T0(int reg)
2708 #if !defined(CONFIG_USER_ONLY)
2711 cpu_x86_update_cr0(env, T0);
2714 cpu_x86_update_cr3(env, T0);
2717 cpu_x86_update_cr4(env, T0);
2720 cpu_set_apic_tpr(env, T0);
2730 void helper_movl_drN_T0(int reg)
2735 void helper_invlpg(target_ulong addr)
2737 cpu_x86_flush_tlb(env, addr);
2740 void helper_rdtsc(void)
2744 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
2745 raise_exception(EXCP0D_GPF);
2747 val = cpu_get_tsc(env);
2748 EAX = (uint32_t)(val);
2749 EDX = (uint32_t)(val >> 32);
2752 void helper_rdpmc(void)
2754 if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
2755 raise_exception(EXCP0D_GPF);
2758 if (!svm_check_intercept_param(SVM_EXIT_RDPMC, 0)) {
2759 /* currently unimplemented */
2760 raise_exception_err(EXCP06_ILLOP, 0);
2764 #if defined(CONFIG_USER_ONLY)
2765 void helper_wrmsr(void)
2769 void helper_rdmsr(void)
2773 void helper_wrmsr(void)
2777 val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
2779 switch((uint32_t)ECX) {
2780 case MSR_IA32_SYSENTER_CS:
2781 env->sysenter_cs = val & 0xffff;
2783 case MSR_IA32_SYSENTER_ESP:
2784 env->sysenter_esp = val;
2786 case MSR_IA32_SYSENTER_EIP:
2787 env->sysenter_eip = val;
2789 case MSR_IA32_APICBASE:
2790 cpu_set_apic_base(env, val);
2794 uint64_t update_mask;
2796 if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
2797 update_mask |= MSR_EFER_SCE;
2798 if (env->cpuid_ext2_features & CPUID_EXT2_LM)
2799 update_mask |= MSR_EFER_LME;
2800 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
2801 update_mask |= MSR_EFER_FFXSR;
2802 if (env->cpuid_ext2_features & CPUID_EXT2_NX)
2803 update_mask |= MSR_EFER_NXE;
2804 env->efer = (env->efer & ~update_mask) |
2805 (val & update_mask);
2814 case MSR_VM_HSAVE_PA:
2815 env->vm_hsave = val;
2817 #ifdef TARGET_X86_64
2828 env->segs[R_FS].base = val;
2831 env->segs[R_GS].base = val;
2833 case MSR_KERNELGSBASE:
2834 env->kernelgsbase = val;
2838 /* XXX: exception ? */
2843 void helper_rdmsr(void)
2846 switch((uint32_t)ECX) {
2847 case MSR_IA32_SYSENTER_CS:
2848 val = env->sysenter_cs;
2850 case MSR_IA32_SYSENTER_ESP:
2851 val = env->sysenter_esp;
2853 case MSR_IA32_SYSENTER_EIP:
2854 val = env->sysenter_eip;
2856 case MSR_IA32_APICBASE:
2857 val = cpu_get_apic_base(env);
2868 case MSR_VM_HSAVE_PA:
2869 val = env->vm_hsave;
2871 #ifdef TARGET_X86_64
2882 val = env->segs[R_FS].base;
2885 val = env->segs[R_GS].base;
2887 case MSR_KERNELGSBASE:
2888 val = env->kernelgsbase;
2892 /* XXX: exception ? */
2896 EAX = (uint32_t)(val);
2897 EDX = (uint32_t)(val >> 32);
2901 void helper_lsl(void)
2903 unsigned int selector, limit;
2904 uint32_t e1, e2, eflags;
2905 int rpl, dpl, cpl, type;
2907 eflags = cc_table[CC_OP].compute_all();
2908 selector = T0 & 0xffff;
2909 if (load_segment(&e1, &e2, selector) != 0)
2912 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2913 cpl = env->hflags & HF_CPL_MASK;
2914 if (e2 & DESC_S_MASK) {
2915 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2918 if (dpl < cpl || dpl < rpl)
2922 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2933 if (dpl < cpl || dpl < rpl) {
2935 CC_SRC = eflags & ~CC_Z;
2939 limit = get_seg_limit(e1, e2);
2941 CC_SRC = eflags | CC_Z;
2944 void helper_lar(void)
2946 unsigned int selector;
2947 uint32_t e1, e2, eflags;
2948 int rpl, dpl, cpl, type;
2950 eflags = cc_table[CC_OP].compute_all();
2951 selector = T0 & 0xffff;
2952 if ((selector & 0xfffc) == 0)
2954 if (load_segment(&e1, &e2, selector) != 0)
2957 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2958 cpl = env->hflags & HF_CPL_MASK;
2959 if (e2 & DESC_S_MASK) {
2960 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2963 if (dpl < cpl || dpl < rpl)
2967 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2981 if (dpl < cpl || dpl < rpl) {
2983 CC_SRC = eflags & ~CC_Z;
2987 T1 = e2 & 0x00f0ff00;
2988 CC_SRC = eflags | CC_Z;
2991 void helper_verr(void)
2993 unsigned int selector;
2994 uint32_t e1, e2, eflags;
2997 eflags = cc_table[CC_OP].compute_all();
2998 selector = T0 & 0xffff;
2999 if ((selector & 0xfffc) == 0)
3001 if (load_segment(&e1, &e2, selector) != 0)
3003 if (!(e2 & DESC_S_MASK))
3006 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3007 cpl = env->hflags & HF_CPL_MASK;
3008 if (e2 & DESC_CS_MASK) {
3009 if (!(e2 & DESC_R_MASK))
3011 if (!(e2 & DESC_C_MASK)) {
3012 if (dpl < cpl || dpl < rpl)
3016 if (dpl < cpl || dpl < rpl) {
3018 CC_SRC = eflags & ~CC_Z;
3022 CC_SRC = eflags | CC_Z;
3025 void helper_verw(void)
3027 unsigned int selector;
3028 uint32_t e1, e2, eflags;
3031 eflags = cc_table[CC_OP].compute_all();
3032 selector = T0 & 0xffff;
3033 if ((selector & 0xfffc) == 0)
3035 if (load_segment(&e1, &e2, selector) != 0)
3037 if (!(e2 & DESC_S_MASK))
3040 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3041 cpl = env->hflags & HF_CPL_MASK;
3042 if (e2 & DESC_CS_MASK) {
3045 if (dpl < cpl || dpl < rpl)
3047 if (!(e2 & DESC_W_MASK)) {
3049 CC_SRC = eflags & ~CC_Z;
3053 CC_SRC = eflags | CC_Z;
3058 void helper_fldt_ST0_A0(void)
3061 new_fpstt = (env->fpstt - 1) & 7;
3062 env->fpregs[new_fpstt].d = helper_fldt(A0);
3063 env->fpstt = new_fpstt;
3064 env->fptags[new_fpstt] = 0; /* validate stack entry */
3067 void helper_fstt_ST0_A0(void)
3069 helper_fstt(ST0, A0);
3072 static void fpu_set_exception(int mask)
3075 if (env->fpus & (~env->fpuc & FPUC_EM))
3076 env->fpus |= FPUS_SE | FPUS_B;
3079 CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
3082 fpu_set_exception(FPUS_ZE);
3086 void fpu_raise_exception(void)
3088 if (env->cr[0] & CR0_NE_MASK) {
3089 raise_exception(EXCP10_COPR);
3091 #if !defined(CONFIG_USER_ONLY)
3100 void helper_fbld_ST0_A0(void)
3108 for(i = 8; i >= 0; i--) {
3110 val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
3113 if (ldub(A0 + 9) & 0x80)
3119 void helper_fbst_ST0_A0(void)
3122 target_ulong mem_ref, mem_end;
3125 val = floatx_to_int64(ST0, &env->fp_status);
3127 mem_end = mem_ref + 9;
3134 while (mem_ref < mem_end) {
3139 v = ((v / 10) << 4) | (v % 10);
3142 while (mem_ref < mem_end) {
3147 void helper_f2xm1(void)
3149 ST0 = pow(2.0,ST0) - 1.0;
3152 void helper_fyl2x(void)
3154 CPU86_LDouble fptemp;
3158 fptemp = log(fptemp)/log(2.0); /* log2(ST) */
3162 env->fpus &= (~0x4700);
3167 void helper_fptan(void)
3169 CPU86_LDouble fptemp;
3172 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3178 env->fpus &= (~0x400); /* C2 <-- 0 */
3179 /* the above code is for |arg| < 2**52 only */
3183 void helper_fpatan(void)
3185 CPU86_LDouble fptemp, fpsrcop;
3189 ST1 = atan2(fpsrcop,fptemp);
3193 void helper_fxtract(void)
3195 CPU86_LDoubleU temp;
3196 unsigned int expdif;
3199 expdif = EXPD(temp) - EXPBIAS;
3200 /*DP exponent bias*/
3207 void helper_fprem1(void)
3209 CPU86_LDouble dblq, fpsrcop, fptemp;
3210 CPU86_LDoubleU fpsrcop1, fptemp1;
3212 signed long long int q;
3214 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
3215 ST0 = 0.0 / 0.0; /* NaN */
3216 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3222 fpsrcop1.d = fpsrcop;
3224 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
3227 /* optimisation? taken from the AMD docs */
3228 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3229 /* ST0 is unchanged */
3234 dblq = fpsrcop / fptemp;
3235 /* round dblq towards nearest integer */
3237 ST0 = fpsrcop - fptemp * dblq;
3239 /* convert dblq to q by truncating towards zero */
3241 q = (signed long long int)(-dblq);
3243 q = (signed long long int)dblq;
3245 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3246 /* (C0,C3,C1) <-- (q2,q1,q0) */
3247 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
3248 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
3249 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
3251 env->fpus |= 0x400; /* C2 <-- 1 */
3252 fptemp = pow(2.0, expdif - 50);
3253 fpsrcop = (ST0 / ST1) / fptemp;
3254 /* fpsrcop = integer obtained by chopping */
3255 fpsrcop = (fpsrcop < 0.0) ?
3256 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
3257 ST0 -= (ST1 * fpsrcop * fptemp);
3261 void helper_fprem(void)
3263 CPU86_LDouble dblq, fpsrcop, fptemp;
3264 CPU86_LDoubleU fpsrcop1, fptemp1;
3266 signed long long int q;
3268 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
3269 ST0 = 0.0 / 0.0; /* NaN */
3270 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3274 fpsrcop = (CPU86_LDouble)ST0;
3275 fptemp = (CPU86_LDouble)ST1;
3276 fpsrcop1.d = fpsrcop;
3278 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
3281 /* optimisation? taken from the AMD docs */
3282 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3283 /* ST0 is unchanged */
3287 if ( expdif < 53 ) {
3288 dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/;
3289 /* round dblq towards zero */
3290 dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
3291 ST0 = fpsrcop/*ST0*/ - fptemp * dblq;
3293 /* convert dblq to q by truncating towards zero */
3295 q = (signed long long int)(-dblq);
3297 q = (signed long long int)dblq;
3299 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3300 /* (C0,C3,C1) <-- (q2,q1,q0) */
3301 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
3302 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
3303 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
3305 int N = 32 + (expdif % 32); /* as per AMD docs */
3306 env->fpus |= 0x400; /* C2 <-- 1 */
3307 fptemp = pow(2.0, (double)(expdif - N));
3308 fpsrcop = (ST0 / ST1) / fptemp;
3309 /* fpsrcop = integer obtained by chopping */
3310 fpsrcop = (fpsrcop < 0.0) ?
3311 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
3312 ST0 -= (ST1 * fpsrcop * fptemp);
3316 void helper_fyl2xp1(void)
3318 CPU86_LDouble fptemp;
3321 if ((fptemp+1.0)>0.0) {
3322 fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
3326 env->fpus &= (~0x4700);
3331 void helper_fsqrt(void)
3333 CPU86_LDouble fptemp;
3337 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3343 void helper_fsincos(void)
3345 CPU86_LDouble fptemp;
3348 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3354 env->fpus &= (~0x400); /* C2 <-- 0 */
3355 /* the above code is for |arg| < 2**63 only */
3359 void helper_frndint(void)
3361 ST0 = floatx_round_to_int(ST0, &env->fp_status);
3364 void helper_fscale(void)
3366 ST0 = ldexp (ST0, (int)(ST1));
3369 void helper_fsin(void)
3371 CPU86_LDouble fptemp;
3374 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3378 env->fpus &= (~0x400); /* C2 <-- 0 */
3379 /* the above code is for |arg| < 2**53 only */
3383 void helper_fcos(void)
3385 CPU86_LDouble fptemp;
3388 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3392 env->fpus &= (~0x400); /* C2 <-- 0 */
3393 /* the above code is for |arg5 < 2**63 only */
3397 void helper_fxam_ST0(void)
3399 CPU86_LDoubleU temp;
3404 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3406 env->fpus |= 0x200; /* C1 <-- 1 */
3408 /* XXX: test fptags too */
3409 expdif = EXPD(temp);
3410 if (expdif == MAXEXPD) {
3411 #ifdef USE_X86LDOUBLE
3412 if (MANTD(temp) == 0x8000000000000000ULL)
3414 if (MANTD(temp) == 0)
3416 env->fpus |= 0x500 /*Infinity*/;
3418 env->fpus |= 0x100 /*NaN*/;
3419 } else if (expdif == 0) {
3420 if (MANTD(temp) == 0)
3421 env->fpus |= 0x4000 /*Zero*/;
3423 env->fpus |= 0x4400 /*Denormal*/;
3429 void helper_fstenv(target_ulong ptr, int data32)
3431 int fpus, fptag, exp, i;
3435 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
3437 for (i=7; i>=0; i--) {
3439 if (env->fptags[i]) {
3442 tmp.d = env->fpregs[i].d;
3445 if (exp == 0 && mant == 0) {
3448 } else if (exp == 0 || exp == MAXEXPD
3449 #ifdef USE_X86LDOUBLE
3450 || (mant & (1LL << 63)) == 0
3453 /* NaNs, infinity, denormal */
3460 stl(ptr, env->fpuc);
3462 stl(ptr + 8, fptag);
3463 stl(ptr + 12, 0); /* fpip */
3464 stl(ptr + 16, 0); /* fpcs */
3465 stl(ptr + 20, 0); /* fpoo */
3466 stl(ptr + 24, 0); /* fpos */
3469 stw(ptr, env->fpuc);
3471 stw(ptr + 4, fptag);
3479 void helper_fldenv(target_ulong ptr, int data32)
3484 env->fpuc = lduw(ptr);
3485 fpus = lduw(ptr + 4);
3486 fptag = lduw(ptr + 8);
3489 env->fpuc = lduw(ptr);
3490 fpus = lduw(ptr + 2);
3491 fptag = lduw(ptr + 4);
3493 env->fpstt = (fpus >> 11) & 7;
3494 env->fpus = fpus & ~0x3800;
3495 for(i = 0;i < 8; i++) {
3496 env->fptags[i] = ((fptag & 3) == 3);
3501 void helper_fsave(target_ulong ptr, int data32)
3506 helper_fstenv(ptr, data32);
3508 ptr += (14 << data32);
3509 for(i = 0;i < 8; i++) {
3511 helper_fstt(tmp, ptr);
3529 void helper_frstor(target_ulong ptr, int data32)
3534 helper_fldenv(ptr, data32);
3535 ptr += (14 << data32);
3537 for(i = 0;i < 8; i++) {
3538 tmp = helper_fldt(ptr);
3544 void helper_fxsave(target_ulong ptr, int data64)
3546 int fpus, fptag, i, nb_xmm_regs;
3550 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
3552 for(i = 0; i < 8; i++) {
3553 fptag |= (env->fptags[i] << i);
3555 stw(ptr, env->fpuc);
3557 stw(ptr + 4, fptag ^ 0xff);
3560 for(i = 0;i < 8; i++) {
3562 helper_fstt(tmp, addr);
3566 if (env->cr[4] & CR4_OSFXSR_MASK) {
3567 /* XXX: finish it */
3568 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
3569 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
3570 nb_xmm_regs = 8 << data64;
3572 for(i = 0; i < nb_xmm_regs; i++) {
3573 stq(addr, env->xmm_regs[i].XMM_Q(0));
3574 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
3580 void helper_fxrstor(target_ulong ptr, int data64)
3582 int i, fpus, fptag, nb_xmm_regs;
3586 env->fpuc = lduw(ptr);
3587 fpus = lduw(ptr + 2);
3588 fptag = lduw(ptr + 4);
3589 env->fpstt = (fpus >> 11) & 7;
3590 env->fpus = fpus & ~0x3800;
3592 for(i = 0;i < 8; i++) {
3593 env->fptags[i] = ((fptag >> i) & 1);
3597 for(i = 0;i < 8; i++) {
3598 tmp = helper_fldt(addr);
3603 if (env->cr[4] & CR4_OSFXSR_MASK) {
3604 /* XXX: finish it */
3605 env->mxcsr = ldl(ptr + 0x18);
3607 nb_xmm_regs = 8 << data64;
3609 for(i = 0; i < nb_xmm_regs; i++) {
3610 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
3611 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
3617 #ifndef USE_X86LDOUBLE
3619 void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
3621 CPU86_LDoubleU temp;
3626 *pmant = (MANTD(temp) << 11) | (1LL << 63);
3627 /* exponent + sign */
3628 e = EXPD(temp) - EXPBIAS + 16383;
3629 e |= SIGND(temp) >> 16;
3633 CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
3635 CPU86_LDoubleU temp;
3639 /* XXX: handle overflow ? */
3640 e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
3641 e |= (upper >> 4) & 0x800; /* sign */
3642 ll = (mant >> 11) & ((1LL << 52) - 1);
3644 temp.l.upper = (e << 20) | (ll >> 32);
3647 temp.ll = ll | ((uint64_t)e << 52);
3654 void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
3656 CPU86_LDoubleU temp;
3659 *pmant = temp.l.lower;
3660 *pexp = temp.l.upper;
3663 CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
3665 CPU86_LDoubleU temp;
3667 temp.l.upper = upper;
3668 temp.l.lower = mant;
3673 #ifdef TARGET_X86_64
3675 //#define DEBUG_MULDIV
3677 static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
3686 static void neg128(uint64_t *plow, uint64_t *phigh)
3690 add128(plow, phigh, 1, 0);
3693 /* return TRUE if overflow */
3694 static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
3696 uint64_t q, r, a1, a0;
3709 /* XXX: use a better algorithm */
3710 for(i = 0; i < 64; i++) {
3712 a1 = (a1 << 1) | (a0 >> 63);
3713 if (ab || a1 >= b) {
3719 a0 = (a0 << 1) | qb;
3721 #if defined(DEBUG_MULDIV)
3722 printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
3723 *phigh, *plow, b, a0, a1);
3731 /* return TRUE if overflow */
3732 static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
3735 sa = ((int64_t)*phigh < 0);
3737 neg128(plow, phigh);
3741 if (div64(plow, phigh, b) != 0)
3744 if (*plow > (1ULL << 63))
3748 if (*plow >= (1ULL << 63))
3756 void helper_mulq_EAX_T0(void)
3760 mulu64(&r0, &r1, EAX, T0);
3767 void helper_imulq_EAX_T0(void)
3771 muls64(&r0, &r1, EAX, T0);
3775 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
3778 void helper_imulq_T0_T1(void)
3782 muls64(&r0, &r1, T0, T1);
3785 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
3788 void helper_divq_EAX_T0(void)
3792 raise_exception(EXCP00_DIVZ);
3796 if (div64(&r0, &r1, T0))
3797 raise_exception(EXCP00_DIVZ);
3802 void helper_idivq_EAX_T0(void)
3806 raise_exception(EXCP00_DIVZ);
3810 if (idiv64(&r0, &r1, T0))
3811 raise_exception(EXCP00_DIVZ);
3816 void helper_bswapq_T0(void)
3822 void helper_hlt(void)
3824 env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
3825 env->hflags |= HF_HALTED_MASK;
3826 env->exception_index = EXCP_HLT;
3830 void helper_monitor(void)
3832 if ((uint32_t)ECX != 0)
3833 raise_exception(EXCP0D_GPF);
3834 /* XXX: store address ? */
3837 void helper_mwait(void)
3839 if ((uint32_t)ECX != 0)
3840 raise_exception(EXCP0D_GPF);
3841 /* XXX: not complete but not completely erroneous */
3842 if (env->cpu_index != 0 || env->next_cpu != NULL) {
3843 /* more than one CPU: do not sleep because another CPU may
3850 float approx_rsqrt(float a)
3852 return 1.0 / sqrt(a);
3855 float approx_rcp(float a)
3860 void update_fp_status(void)
3864 /* set rounding mode */
3865 switch(env->fpuc & RC_MASK) {
3868 rnd_type = float_round_nearest_even;
3871 rnd_type = float_round_down;
3874 rnd_type = float_round_up;
3877 rnd_type = float_round_to_zero;
3880 set_float_rounding_mode(rnd_type, &env->fp_status);
3882 switch((env->fpuc >> 8) & 3) {
3894 set_floatx80_rounding_precision(rnd_type, &env->fp_status);
3898 #if !defined(CONFIG_USER_ONLY)
3900 #define MMUSUFFIX _mmu
3902 # define GETPC() ((void*)((unsigned long)__builtin_return_address(0) & 0x7fffffffUL))
3904 # define GETPC() (__builtin_return_address(0))
3908 #include "softmmu_template.h"
3911 #include "softmmu_template.h"
3914 #include "softmmu_template.h"
3917 #include "softmmu_template.h"
3921 /* try to fill the TLB and return an exception if error. If retaddr is
3922 NULL, it means that the function was called in C code (i.e. not
3923 from generated code or from helper.c) */
3924 /* XXX: fix it to restore all registers */
3925 void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
3927 TranslationBlock *tb;
3930 CPUX86State *saved_env;
3932 /* XXX: hack to restore env in all cases, even if not called from
3935 env = cpu_single_env;
3937 ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
3940 /* now we have a real cpu fault */
3941 pc = (unsigned long)retaddr;
3942 tb = tb_find_pc(pc);
3944 /* the PC is inside the translated code. It means that we have
3945 a virtual CPU fault */
3946 cpu_restore_state(tb, env, pc, NULL);
3950 raise_exception_err(env->exception_index, env->error_code);
3952 raise_exception_err_norestore(env->exception_index, env->error_code);
3958 /* Secure Virtual Machine helpers */
3960 void helper_stgi(void)
3962 env->hflags |= HF_GIF_MASK;
3965 void helper_clgi(void)
3967 env->hflags &= ~HF_GIF_MASK;
3970 #if defined(CONFIG_USER_ONLY)
3972 void helper_vmrun(target_ulong addr) { }
3973 void helper_vmmcall(void) { }
3974 void helper_vmload(target_ulong addr) { }
3975 void helper_vmsave(target_ulong addr) { }
3976 void helper_skinit(void) { }
3977 void helper_invlpga(void) { }
3978 void vmexit(uint64_t exit_code, uint64_t exit_info_1) { }
3979 int svm_check_intercept_param(uint32_t type, uint64_t param)
3986 static inline uint32_t
3987 vmcb2cpu_attrib(uint16_t vmcb_attrib, uint32_t vmcb_base, uint32_t vmcb_limit)
3989 return ((vmcb_attrib & 0x00ff) << 8) /* Type, S, DPL, P */
3990 | ((vmcb_attrib & 0x0f00) << 12) /* AVL, L, DB, G */
3991 | ((vmcb_base >> 16) & 0xff) /* Base 23-16 */
3992 | (vmcb_base & 0xff000000) /* Base 31-24 */
3993 | (vmcb_limit & 0xf0000); /* Limit 19-16 */
3996 static inline uint16_t cpu2vmcb_attrib(uint32_t cpu_attrib)
3998 return ((cpu_attrib >> 8) & 0xff) /* Type, S, DPL, P */
3999 | ((cpu_attrib & 0xf00000) >> 12); /* AVL, L, DB, G */
4002 extern uint8_t *phys_ram_base;
4003 void helper_vmrun(target_ulong addr)
4008 if (loglevel & CPU_LOG_TB_IN_ASM)
4009 fprintf(logfile,"vmrun! " TARGET_FMT_lx "\n", addr);
4011 env->vm_vmcb = addr;
4014 /* save the current CPU state in the hsave page */
4015 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
4016 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
4018 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
4019 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
4021 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
4022 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
4023 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
4024 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
4025 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr8), env->cr[8]);
4026 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
4027 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
4029 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
4030 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
4032 SVM_SAVE_SEG(env->vm_hsave, segs[R_ES], es);
4033 SVM_SAVE_SEG(env->vm_hsave, segs[R_CS], cs);
4034 SVM_SAVE_SEG(env->vm_hsave, segs[R_SS], ss);
4035 SVM_SAVE_SEG(env->vm_hsave, segs[R_DS], ds);
4037 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip), EIP);
4038 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
4039 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
4041 /* load the interception bitmaps so we do not need to access the
4043 /* We shift all the intercept bits so we can OR them with the TB
4045 env->intercept = (ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept)) << INTERCEPT_INTR) | INTERCEPT_SVM_MASK;
4046 env->intercept_cr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
4047 env->intercept_cr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
4048 env->intercept_dr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
4049 env->intercept_dr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
4050 env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
4052 env->gdt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
4053 env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
4055 env->idt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
4056 env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
4058 /* clear exit_info_2 so we behave like the real hardware */
4059 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
4061 cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
4062 cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
4063 cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
4064 env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
4065 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
4066 if (int_ctl & V_INTR_MASKING_MASK) {
4067 env->cr[8] = int_ctl & V_TPR_MASK;
4068 if (env->eflags & IF_MASK)
4069 env->hflags |= HF_HIF_MASK;
4072 #ifdef TARGET_X86_64
4073 env->efer = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer));
4074 env->hflags &= ~HF_LMA_MASK;
4075 if (env->efer & MSR_EFER_LMA)
4076 env->hflags |= HF_LMA_MASK;
4079 load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
4080 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
4081 CC_OP = CC_OP_EFLAGS;
4082 CC_DST = 0xffffffff;
4084 SVM_LOAD_SEG(env->vm_vmcb, ES, es);
4085 SVM_LOAD_SEG(env->vm_vmcb, CS, cs);
4086 SVM_LOAD_SEG(env->vm_vmcb, SS, ss);
4087 SVM_LOAD_SEG(env->vm_vmcb, DS, ds);
4089 EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
4091 ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
4092 EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
4093 env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
4094 env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
4095 cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
4097 /* FIXME: guest state consistency checks */
4099 switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
4100 case TLB_CONTROL_DO_NOTHING:
4102 case TLB_CONTROL_FLUSH_ALL_ASID:
4103 /* FIXME: this is not 100% correct but should work for now */
4112 /* maybe we need to inject an event */
4113 event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
4114 if (event_inj & SVM_EVTINJ_VALID) {
4115 uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
4116 uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
4117 uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
4118 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
4120 if (loglevel & CPU_LOG_TB_IN_ASM)
4121 fprintf(logfile, "Injecting(%#hx): ", valid_err);
4122 /* FIXME: need to implement valid_err */
4123 switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
4124 case SVM_EVTINJ_TYPE_INTR:
4125 env->exception_index = vector;
4126 env->error_code = event_inj_err;
4127 env->exception_is_int = 1;
4128 env->exception_next_eip = -1;
4129 if (loglevel & CPU_LOG_TB_IN_ASM)
4130 fprintf(logfile, "INTR");
4132 case SVM_EVTINJ_TYPE_NMI:
4133 env->exception_index = vector;
4134 env->error_code = event_inj_err;
4135 env->exception_is_int = 1;
4136 env->exception_next_eip = EIP;
4137 if (loglevel & CPU_LOG_TB_IN_ASM)
4138 fprintf(logfile, "NMI");
4140 case SVM_EVTINJ_TYPE_EXEPT:
4141 env->exception_index = vector;
4142 env->error_code = event_inj_err;
4143 env->exception_is_int = 0;
4144 env->exception_next_eip = -1;
4145 if (loglevel & CPU_LOG_TB_IN_ASM)
4146 fprintf(logfile, "EXEPT");
4148 case SVM_EVTINJ_TYPE_SOFT:
4149 env->exception_index = vector;
4150 env->error_code = event_inj_err;
4151 env->exception_is_int = 1;
4152 env->exception_next_eip = EIP;
4153 if (loglevel & CPU_LOG_TB_IN_ASM)
4154 fprintf(logfile, "SOFT");
4157 if (loglevel & CPU_LOG_TB_IN_ASM)
4158 fprintf(logfile, " %#x %#x\n", env->exception_index, env->error_code);
4160 if ((int_ctl & V_IRQ_MASK) || (env->intercept & INTERCEPT_VINTR)) {
4161 env->interrupt_request |= CPU_INTERRUPT_VIRQ;
4167 void helper_vmmcall(void)
4169 if (loglevel & CPU_LOG_TB_IN_ASM)
4170 fprintf(logfile,"vmmcall!\n");
4173 void helper_vmload(target_ulong addr)
4175 if (loglevel & CPU_LOG_TB_IN_ASM)
4176 fprintf(logfile,"vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
4177 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
4178 env->segs[R_FS].base);
4180 SVM_LOAD_SEG2(addr, segs[R_FS], fs);
4181 SVM_LOAD_SEG2(addr, segs[R_GS], gs);
4182 SVM_LOAD_SEG2(addr, tr, tr);
4183 SVM_LOAD_SEG2(addr, ldt, ldtr);
4185 #ifdef TARGET_X86_64
4186 env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
4187 env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
4188 env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
4189 env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
4191 env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
4192 env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
4193 env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp));
4194 env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip));
4197 void helper_vmsave(target_ulong addr)
4199 if (loglevel & CPU_LOG_TB_IN_ASM)
4200 fprintf(logfile,"vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
4201 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
4202 env->segs[R_FS].base);
4204 SVM_SAVE_SEG(addr, segs[R_FS], fs);
4205 SVM_SAVE_SEG(addr, segs[R_GS], gs);
4206 SVM_SAVE_SEG(addr, tr, tr);
4207 SVM_SAVE_SEG(addr, ldt, ldtr);
4209 #ifdef TARGET_X86_64
4210 stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
4211 stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
4212 stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
4213 stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
4215 stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
4216 stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
4217 stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp);
4218 stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
4221 void helper_skinit(void)
4223 if (loglevel & CPU_LOG_TB_IN_ASM)
4224 fprintf(logfile,"skinit!\n");
4227 void helper_invlpga(void)
4232 int svm_check_intercept_param(uint32_t type, uint64_t param)
4235 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
4236 if (INTERCEPTEDw(_cr_read, (1 << (type - SVM_EXIT_READ_CR0)))) {
4237 vmexit(type, param);
4241 case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 8:
4242 if (INTERCEPTEDw(_dr_read, (1 << (type - SVM_EXIT_READ_DR0)))) {
4243 vmexit(type, param);
4247 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
4248 if (INTERCEPTEDw(_cr_write, (1 << (type - SVM_EXIT_WRITE_CR0)))) {
4249 vmexit(type, param);
4253 case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 8:
4254 if (INTERCEPTEDw(_dr_write, (1 << (type - SVM_EXIT_WRITE_DR0)))) {
4255 vmexit(type, param);
4259 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 16:
4260 if (INTERCEPTEDl(_exceptions, (1 << (type - SVM_EXIT_EXCP_BASE)))) {
4261 vmexit(type, param);
4266 if (INTERCEPTED(1ULL << INTERCEPT_IOIO_PROT)) {
4267 /* FIXME: this should be read in at vmrun (faster this way?) */
4268 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
4269 uint16_t port = (uint16_t) (param >> 16);
4271 uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
4272 if(lduw_phys(addr + port / 8) & (mask << (port & 7)))
4273 vmexit(type, param);
4278 if (INTERCEPTED(1ULL << INTERCEPT_MSR_PROT)) {
4279 /* FIXME: this should be read in at vmrun (faster this way?) */
4280 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
4281 switch((uint32_t)ECX) {
4286 case 0xc0000000 ... 0xc0001fff:
4287 T0 = (8192 + ECX - 0xc0000000) * 2;
4291 case 0xc0010000 ... 0xc0011fff:
4292 T0 = (16384 + ECX - 0xc0010000) * 2;
4297 vmexit(type, param);
4300 if (ldub_phys(addr + T1) & ((1 << param) << T0))
4301 vmexit(type, param);
4306 if (INTERCEPTED((1ULL << ((type - SVM_EXIT_INTR) + INTERCEPT_INTR)))) {
4307 vmexit(type, param);
4315 void vmexit(uint64_t exit_code, uint64_t exit_info_1)
4319 if (loglevel & CPU_LOG_TB_IN_ASM)
4320 fprintf(logfile,"vmexit(%016" PRIx64 ", %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
4321 exit_code, exit_info_1,
4322 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)),
4325 if(env->hflags & HF_INHIBIT_IRQ_MASK) {
4326 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK);
4327 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
4329 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
4332 /* Save the VM state in the vmcb */
4333 SVM_SAVE_SEG(env->vm_vmcb, segs[R_ES], es);
4334 SVM_SAVE_SEG(env->vm_vmcb, segs[R_CS], cs);
4335 SVM_SAVE_SEG(env->vm_vmcb, segs[R_SS], ss);
4336 SVM_SAVE_SEG(env->vm_vmcb, segs[R_DS], ds);
4338 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
4339 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
4341 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base);
4342 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
4344 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
4345 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
4346 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
4347 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
4348 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
4350 if ((int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl))) & V_INTR_MASKING_MASK) {
4351 int_ctl &= ~V_TPR_MASK;
4352 int_ctl |= env->cr[8] & V_TPR_MASK;
4353 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
4356 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags());
4357 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
4358 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
4359 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
4360 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
4361 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
4362 stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK);
4364 /* Reload the host state from vm_hsave */
4365 env->hflags &= ~HF_HIF_MASK;
4367 env->intercept_exceptions = 0;
4368 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
4370 env->gdt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base));
4371 env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit));
4373 env->idt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base));
4374 env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit));
4376 cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK);
4377 cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4)));
4378 cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3)));
4379 if (int_ctl & V_INTR_MASKING_MASK)
4380 env->cr[8] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr8));
4381 /* we need to set the efer after the crs so the hidden flags get set properly */
4382 #ifdef TARGET_X86_64
4383 env->efer = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer));
4384 env->hflags &= ~HF_LMA_MASK;
4385 if (env->efer & MSR_EFER_LMA)
4386 env->hflags |= HF_LMA_MASK;
4390 load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
4391 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
4392 CC_OP = CC_OP_EFLAGS;
4394 SVM_LOAD_SEG(env->vm_hsave, ES, es);
4395 SVM_LOAD_SEG(env->vm_hsave, CS, cs);
4396 SVM_LOAD_SEG(env->vm_hsave, SS, ss);
4397 SVM_LOAD_SEG(env->vm_hsave, DS, ds);
4399 EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
4400 ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
4401 EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
4403 env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
4404 env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
4407 cpu_x86_set_cpl(env, 0);
4408 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code_hi), (uint32_t)(exit_code >> 32));
4409 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
4410 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
4413 /* FIXME: Resets the current ASID register to zero (host ASID). */
4415 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
4417 /* Clears the TSC_OFFSET inside the processor. */
4419 /* If the host is in PAE mode, the processor reloads the host's PDPEs
4420 from the page table indicated the host's CR3. If the PDPEs contain
4421 illegal state, the processor causes a shutdown. */
4423 /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
4424 env->cr[0] |= CR0_PE_MASK;
4425 env->eflags &= ~VM_MASK;
4427 /* Disables all breakpoints in the host DR7 register. */
4429 /* Checks the reloaded host state for consistency. */
4431 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
4432 host's code segment or non-canonical (in the case of long mode), a
4433 #GP fault is delivered inside the host.) */
4435 /* remove any pending exception */
4436 env->exception_index = -1;
4437 env->error_code = 0;
4438 env->old_exception = -1;