4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 #include "host-utils.h"
26 #define raise_exception_err(a, b)\
29 fprintf(logfile, "raise_exception line=%d\n", __LINE__);\
30 (raise_exception_err)(a, b);\
34 const uint8_t parity_table[256] = {
35 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
36 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
37 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
38 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
39 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
40 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
41 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
42 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
43 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
44 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
45 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
46 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
47 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
48 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
49 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
50 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
51 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
52 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
53 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
54 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
55 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
56 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
57 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
58 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
59 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
60 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
61 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
62 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
63 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
64 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
65 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
66 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
70 const uint8_t rclw_table[32] = {
71 0, 1, 2, 3, 4, 5, 6, 7,
72 8, 9,10,11,12,13,14,15,
73 16, 0, 1, 2, 3, 4, 5, 6,
74 7, 8, 9,10,11,12,13,14,
78 const uint8_t rclb_table[32] = {
79 0, 1, 2, 3, 4, 5, 6, 7,
80 8, 0, 1, 2, 3, 4, 5, 6,
81 7, 8, 0, 1, 2, 3, 4, 5,
82 6, 7, 8, 0, 1, 2, 3, 4,
85 const CPU86_LDouble f15rk[7] =
87 0.00000000000000000000L,
88 1.00000000000000000000L,
89 3.14159265358979323851L, /*pi*/
90 0.30102999566398119523L, /*lg2*/
91 0.69314718055994530943L, /*ln2*/
92 1.44269504088896340739L, /*l2e*/
93 3.32192809488736234781L, /*l2t*/
98 spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
102 spin_lock(&global_cpu_lock);
105 void cpu_unlock(void)
107 spin_unlock(&global_cpu_lock);
110 /* return non zero if error */
111 static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
122 index = selector & ~7;
123 if ((index + 7) > dt->limit)
125 ptr = dt->base + index;
126 *e1_ptr = ldl_kernel(ptr);
127 *e2_ptr = ldl_kernel(ptr + 4);
131 static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
134 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
135 if (e2 & DESC_G_MASK)
136 limit = (limit << 12) | 0xfff;
140 static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
142 return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
145 static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
147 sc->base = get_seg_base(e1, e2);
148 sc->limit = get_seg_limit(e1, e2);
152 /* init the segment cache in vm86 mode. */
153 static inline void load_seg_vm(int seg, int selector)
156 cpu_x86_load_seg_cache(env, seg, selector,
157 (selector << 4), 0xffff, 0);
160 static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
161 uint32_t *esp_ptr, int dpl)
163 int type, index, shift;
168 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
169 for(i=0;i<env->tr.limit;i++) {
170 printf("%02x ", env->tr.base[i]);
171 if ((i & 7) == 7) printf("\n");
177 if (!(env->tr.flags & DESC_P_MASK))
178 cpu_abort(env, "invalid tss");
179 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
181 cpu_abort(env, "invalid tss type");
183 index = (dpl * 4 + 2) << shift;
184 if (index + (4 << shift) - 1 > env->tr.limit)
185 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
187 *esp_ptr = lduw_kernel(env->tr.base + index);
188 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
190 *esp_ptr = ldl_kernel(env->tr.base + index);
191 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
195 /* XXX: merge with load_seg() */
196 static void tss_load_seg(int seg_reg, int selector)
201 if ((selector & 0xfffc) != 0) {
202 if (load_segment(&e1, &e2, selector) != 0)
203 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
204 if (!(e2 & DESC_S_MASK))
205 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
207 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
208 cpl = env->hflags & HF_CPL_MASK;
209 if (seg_reg == R_CS) {
210 if (!(e2 & DESC_CS_MASK))
211 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
212 /* XXX: is it correct ? */
214 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
215 if ((e2 & DESC_C_MASK) && dpl > rpl)
216 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
217 } else if (seg_reg == R_SS) {
218 /* SS must be writable data */
219 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
220 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
221 if (dpl != cpl || dpl != rpl)
222 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
224 /* not readable code */
225 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
226 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
227 /* if data or non conforming code, checks the rights */
228 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
229 if (dpl < cpl || dpl < rpl)
230 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
233 if (!(e2 & DESC_P_MASK))
234 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
235 cpu_x86_load_seg_cache(env, seg_reg, selector,
236 get_seg_base(e1, e2),
237 get_seg_limit(e1, e2),
240 if (seg_reg == R_SS || seg_reg == R_CS)
241 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
245 #define SWITCH_TSS_JMP 0
246 #define SWITCH_TSS_IRET 1
247 #define SWITCH_TSS_CALL 2
249 /* XXX: restore CPU state in registers (PowerPC case) */
250 static void switch_tss(int tss_selector,
251 uint32_t e1, uint32_t e2, int source,
254 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
255 target_ulong tss_base;
256 uint32_t new_regs[8], new_segs[6];
257 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
258 uint32_t old_eflags, eflags_mask;
263 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
265 if (loglevel & CPU_LOG_PCALL)
266 fprintf(logfile, "switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
269 /* if task gate, we read the TSS segment and we load it */
271 if (!(e2 & DESC_P_MASK))
272 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
273 tss_selector = e1 >> 16;
274 if (tss_selector & 4)
275 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
276 if (load_segment(&e1, &e2, tss_selector) != 0)
277 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
278 if (e2 & DESC_S_MASK)
279 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
280 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
282 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
285 if (!(e2 & DESC_P_MASK))
286 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
292 tss_limit = get_seg_limit(e1, e2);
293 tss_base = get_seg_base(e1, e2);
294 if ((tss_selector & 4) != 0 ||
295 tss_limit < tss_limit_max)
296 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
297 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
299 old_tss_limit_max = 103;
301 old_tss_limit_max = 43;
303 /* read all the registers from the new TSS */
306 new_cr3 = ldl_kernel(tss_base + 0x1c);
307 new_eip = ldl_kernel(tss_base + 0x20);
308 new_eflags = ldl_kernel(tss_base + 0x24);
309 for(i = 0; i < 8; i++)
310 new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
311 for(i = 0; i < 6; i++)
312 new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
313 new_ldt = lduw_kernel(tss_base + 0x60);
314 new_trap = ldl_kernel(tss_base + 0x64);
318 new_eip = lduw_kernel(tss_base + 0x0e);
319 new_eflags = lduw_kernel(tss_base + 0x10);
320 for(i = 0; i < 8; i++)
321 new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
322 for(i = 0; i < 4; i++)
323 new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
324 new_ldt = lduw_kernel(tss_base + 0x2a);
330 /* NOTE: we must avoid memory exceptions during the task switch,
331 so we make dummy accesses before */
332 /* XXX: it can still fail in some cases, so a bigger hack is
333 necessary to valid the TLB after having done the accesses */
335 v1 = ldub_kernel(env->tr.base);
336 v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
337 stb_kernel(env->tr.base, v1);
338 stb_kernel(env->tr.base + old_tss_limit_max, v2);
340 /* clear busy bit (it is restartable) */
341 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
344 ptr = env->gdt.base + (env->tr.selector & ~7);
345 e2 = ldl_kernel(ptr + 4);
346 e2 &= ~DESC_TSS_BUSY_MASK;
347 stl_kernel(ptr + 4, e2);
349 old_eflags = compute_eflags();
350 if (source == SWITCH_TSS_IRET)
351 old_eflags &= ~NT_MASK;
353 /* save the current state in the old TSS */
356 stl_kernel(env->tr.base + 0x20, next_eip);
357 stl_kernel(env->tr.base + 0x24, old_eflags);
358 stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
359 stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
360 stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
361 stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
362 stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
363 stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
364 stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
365 stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
366 for(i = 0; i < 6; i++)
367 stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
370 stw_kernel(env->tr.base + 0x0e, next_eip);
371 stw_kernel(env->tr.base + 0x10, old_eflags);
372 stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
373 stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
374 stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
375 stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
376 stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
377 stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
378 stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
379 stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
380 for(i = 0; i < 4; i++)
381 stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
384 /* now if an exception occurs, it will occurs in the next task
387 if (source == SWITCH_TSS_CALL) {
388 stw_kernel(tss_base, env->tr.selector);
389 new_eflags |= NT_MASK;
393 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
396 ptr = env->gdt.base + (tss_selector & ~7);
397 e2 = ldl_kernel(ptr + 4);
398 e2 |= DESC_TSS_BUSY_MASK;
399 stl_kernel(ptr + 4, e2);
402 /* set the new CPU state */
403 /* from this point, any exception which occurs can give problems */
404 env->cr[0] |= CR0_TS_MASK;
405 env->hflags |= HF_TS_MASK;
406 env->tr.selector = tss_selector;
407 env->tr.base = tss_base;
408 env->tr.limit = tss_limit;
409 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
411 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
412 cpu_x86_update_cr3(env, new_cr3);
415 /* load all registers without an exception, then reload them with
416 possible exception */
418 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
419 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
421 eflags_mask &= 0xffff;
422 load_eflags(new_eflags, eflags_mask);
423 /* XXX: what to do in 16 bit case ? */
432 if (new_eflags & VM_MASK) {
433 for(i = 0; i < 6; i++)
434 load_seg_vm(i, new_segs[i]);
435 /* in vm86, CPL is always 3 */
436 cpu_x86_set_cpl(env, 3);
438 /* CPL is set the RPL of CS */
439 cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
440 /* first just selectors as the rest may trigger exceptions */
441 for(i = 0; i < 6; i++)
442 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
445 env->ldt.selector = new_ldt & ~4;
452 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
454 if ((new_ldt & 0xfffc) != 0) {
456 index = new_ldt & ~7;
457 if ((index + 7) > dt->limit)
458 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
459 ptr = dt->base + index;
460 e1 = ldl_kernel(ptr);
461 e2 = ldl_kernel(ptr + 4);
462 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
463 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
464 if (!(e2 & DESC_P_MASK))
465 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
466 load_seg_cache_raw_dt(&env->ldt, e1, e2);
469 /* load the segments */
470 if (!(new_eflags & VM_MASK)) {
471 tss_load_seg(R_CS, new_segs[R_CS]);
472 tss_load_seg(R_SS, new_segs[R_SS]);
473 tss_load_seg(R_ES, new_segs[R_ES]);
474 tss_load_seg(R_DS, new_segs[R_DS]);
475 tss_load_seg(R_FS, new_segs[R_FS]);
476 tss_load_seg(R_GS, new_segs[R_GS]);
479 /* check that EIP is in the CS segment limits */
480 if (new_eip > env->segs[R_CS].limit) {
481 /* XXX: different exception if CALL ? */
482 raise_exception_err(EXCP0D_GPF, 0);
486 /* check if Port I/O is allowed in TSS */
487 static inline void check_io(int addr, int size)
489 int io_offset, val, mask;
491 /* TSS must be a valid 32 bit one */
492 if (!(env->tr.flags & DESC_P_MASK) ||
493 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
496 io_offset = lduw_kernel(env->tr.base + 0x66);
497 io_offset += (addr >> 3);
498 /* Note: the check needs two bytes */
499 if ((io_offset + 1) > env->tr.limit)
501 val = lduw_kernel(env->tr.base + io_offset);
503 mask = (1 << size) - 1;
504 /* all bits must be zero to allow the I/O */
505 if ((val & mask) != 0) {
507 raise_exception_err(EXCP0D_GPF, 0);
511 void check_iob_T0(void)
516 void check_iow_T0(void)
521 void check_iol_T0(void)
526 void check_iob_DX(void)
528 check_io(EDX & 0xffff, 1);
531 void check_iow_DX(void)
533 check_io(EDX & 0xffff, 2);
536 void check_iol_DX(void)
538 check_io(EDX & 0xffff, 4);
541 static inline unsigned int get_sp_mask(unsigned int e2)
543 if (e2 & DESC_B_MASK)
550 #define SET_ESP(val, sp_mask)\
552 if ((sp_mask) == 0xffff)\
553 ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
554 else if ((sp_mask) == 0xffffffffLL)\
555 ESP = (uint32_t)(val);\
560 #define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
563 /* XXX: add a is_user flag to have proper security support */
564 #define PUSHW(ssp, sp, sp_mask, val)\
567 stw_kernel((ssp) + (sp & (sp_mask)), (val));\
570 #define PUSHL(ssp, sp, sp_mask, val)\
573 stl_kernel((ssp) + (sp & (sp_mask)), (val));\
576 #define POPW(ssp, sp, sp_mask, val)\
578 val = lduw_kernel((ssp) + (sp & (sp_mask)));\
582 #define POPL(ssp, sp, sp_mask, val)\
584 val = (uint32_t)ldl_kernel((ssp) + (sp & (sp_mask)));\
588 /* protected mode interrupt */
589 static void do_interrupt_protected(int intno, int is_int, int error_code,
590 unsigned int next_eip, int is_hw)
593 target_ulong ptr, ssp;
594 int type, dpl, selector, ss_dpl, cpl;
595 int has_error_code, new_stack, shift;
596 uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2;
597 uint32_t old_eip, sp_mask;
598 int svm_should_check = 1;
600 if ((env->intercept & INTERCEPT_SVM_MASK) && !is_int && next_eip==-1) {
602 svm_should_check = 0;
606 && (INTERCEPTEDl(_exceptions, 1 << intno)
608 raise_interrupt(intno, is_int, error_code, 0);
611 if (!is_int && !is_hw) {
630 if (intno * 8 + 7 > dt->limit)
631 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
632 ptr = dt->base + intno * 8;
633 e1 = ldl_kernel(ptr);
634 e2 = ldl_kernel(ptr + 4);
635 /* check gate type */
636 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
638 case 5: /* task gate */
639 /* must do that check here to return the correct error code */
640 if (!(e2 & DESC_P_MASK))
641 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
642 switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
643 if (has_error_code) {
646 /* push the error code */
647 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
649 if (env->segs[R_SS].flags & DESC_B_MASK)
653 esp = (ESP - (2 << shift)) & mask;
654 ssp = env->segs[R_SS].base + esp;
656 stl_kernel(ssp, error_code);
658 stw_kernel(ssp, error_code);
662 case 6: /* 286 interrupt gate */
663 case 7: /* 286 trap gate */
664 case 14: /* 386 interrupt gate */
665 case 15: /* 386 trap gate */
668 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
671 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
672 cpl = env->hflags & HF_CPL_MASK;
673 /* check privledge if software int */
674 if (is_int && dpl < cpl)
675 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
676 /* check valid bit */
677 if (!(e2 & DESC_P_MASK))
678 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
680 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
681 if ((selector & 0xfffc) == 0)
682 raise_exception_err(EXCP0D_GPF, 0);
684 if (load_segment(&e1, &e2, selector) != 0)
685 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
686 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
687 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
688 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
690 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
691 if (!(e2 & DESC_P_MASK))
692 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
693 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
694 /* to inner privilege */
695 get_ss_esp_from_tss(&ss, &esp, dpl);
696 if ((ss & 0xfffc) == 0)
697 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
699 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
700 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
701 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
702 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
704 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
705 if (!(ss_e2 & DESC_S_MASK) ||
706 (ss_e2 & DESC_CS_MASK) ||
707 !(ss_e2 & DESC_W_MASK))
708 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
709 if (!(ss_e2 & DESC_P_MASK))
710 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
712 sp_mask = get_sp_mask(ss_e2);
713 ssp = get_seg_base(ss_e1, ss_e2);
714 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
715 /* to same privilege */
716 if (env->eflags & VM_MASK)
717 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
719 sp_mask = get_sp_mask(env->segs[R_SS].flags);
720 ssp = env->segs[R_SS].base;
724 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
725 new_stack = 0; /* avoid warning */
726 sp_mask = 0; /* avoid warning */
727 ssp = 0; /* avoid warning */
728 esp = 0; /* avoid warning */
734 /* XXX: check that enough room is available */
735 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
736 if (env->eflags & VM_MASK)
742 if (env->eflags & VM_MASK) {
743 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
744 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
745 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
746 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
748 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
749 PUSHL(ssp, esp, sp_mask, ESP);
751 PUSHL(ssp, esp, sp_mask, compute_eflags());
752 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
753 PUSHL(ssp, esp, sp_mask, old_eip);
754 if (has_error_code) {
755 PUSHL(ssp, esp, sp_mask, error_code);
759 if (env->eflags & VM_MASK) {
760 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
761 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
762 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
763 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
765 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
766 PUSHW(ssp, esp, sp_mask, ESP);
768 PUSHW(ssp, esp, sp_mask, compute_eflags());
769 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
770 PUSHW(ssp, esp, sp_mask, old_eip);
771 if (has_error_code) {
772 PUSHW(ssp, esp, sp_mask, error_code);
777 if (env->eflags & VM_MASK) {
778 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
779 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
780 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
781 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
783 ss = (ss & ~3) | dpl;
784 cpu_x86_load_seg_cache(env, R_SS, ss,
785 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
787 SET_ESP(esp, sp_mask);
789 selector = (selector & ~3) | dpl;
790 cpu_x86_load_seg_cache(env, R_CS, selector,
791 get_seg_base(e1, e2),
792 get_seg_limit(e1, e2),
794 cpu_x86_set_cpl(env, dpl);
797 /* interrupt gate clear IF mask */
798 if ((type & 1) == 0) {
799 env->eflags &= ~IF_MASK;
801 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
806 #define PUSHQ(sp, val)\
809 stq_kernel(sp, (val));\
812 #define POPQ(sp, val)\
814 val = ldq_kernel(sp);\
818 static inline target_ulong get_rsp_from_tss(int level)
823 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
824 env->tr.base, env->tr.limit);
827 if (!(env->tr.flags & DESC_P_MASK))
828 cpu_abort(env, "invalid tss");
829 index = 8 * level + 4;
830 if ((index + 7) > env->tr.limit)
831 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
832 return ldq_kernel(env->tr.base + index);
835 /* 64 bit interrupt */
836 static void do_interrupt64(int intno, int is_int, int error_code,
837 target_ulong next_eip, int is_hw)
841 int type, dpl, selector, cpl, ist;
842 int has_error_code, new_stack;
843 uint32_t e1, e2, e3, ss;
844 target_ulong old_eip, esp, offset;
845 int svm_should_check = 1;
847 if ((env->intercept & INTERCEPT_SVM_MASK) && !is_int && next_eip==-1) {
849 svm_should_check = 0;
852 && INTERCEPTEDl(_exceptions, 1 << intno)
854 raise_interrupt(intno, is_int, error_code, 0);
857 if (!is_int && !is_hw) {
876 if (intno * 16 + 15 > dt->limit)
877 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
878 ptr = dt->base + intno * 16;
879 e1 = ldl_kernel(ptr);
880 e2 = ldl_kernel(ptr + 4);
881 e3 = ldl_kernel(ptr + 8);
882 /* check gate type */
883 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
885 case 14: /* 386 interrupt gate */
886 case 15: /* 386 trap gate */
889 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
892 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
893 cpl = env->hflags & HF_CPL_MASK;
894 /* check privledge if software int */
895 if (is_int && dpl < cpl)
896 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
897 /* check valid bit */
898 if (!(e2 & DESC_P_MASK))
899 raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
901 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
903 if ((selector & 0xfffc) == 0)
904 raise_exception_err(EXCP0D_GPF, 0);
906 if (load_segment(&e1, &e2, selector) != 0)
907 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
908 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
909 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
910 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
912 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
913 if (!(e2 & DESC_P_MASK))
914 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
915 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
916 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
917 if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
918 /* to inner privilege */
920 esp = get_rsp_from_tss(ist + 3);
922 esp = get_rsp_from_tss(dpl);
923 esp &= ~0xfLL; /* align stack */
926 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
927 /* to same privilege */
928 if (env->eflags & VM_MASK)
929 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
932 esp = get_rsp_from_tss(ist + 3);
935 esp &= ~0xfLL; /* align stack */
938 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
939 new_stack = 0; /* avoid warning */
940 esp = 0; /* avoid warning */
943 PUSHQ(esp, env->segs[R_SS].selector);
945 PUSHQ(esp, compute_eflags());
946 PUSHQ(esp, env->segs[R_CS].selector);
948 if (has_error_code) {
949 PUSHQ(esp, error_code);
954 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
958 selector = (selector & ~3) | dpl;
959 cpu_x86_load_seg_cache(env, R_CS, selector,
960 get_seg_base(e1, e2),
961 get_seg_limit(e1, e2),
963 cpu_x86_set_cpl(env, dpl);
966 /* interrupt gate clear IF mask */
967 if ((type & 1) == 0) {
968 env->eflags &= ~IF_MASK;
970 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
974 void helper_syscall(int next_eip_addend)
978 if (!(env->efer & MSR_EFER_SCE)) {
979 raise_exception_err(EXCP06_ILLOP, 0);
981 selector = (env->star >> 32) & 0xffff;
983 if (env->hflags & HF_LMA_MASK) {
986 ECX = env->eip + next_eip_addend;
987 env->regs[11] = compute_eflags();
989 code64 = env->hflags & HF_CS64_MASK;
991 cpu_x86_set_cpl(env, 0);
992 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
994 DESC_G_MASK | DESC_P_MASK |
996 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
997 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
999 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1001 DESC_W_MASK | DESC_A_MASK);
1002 env->eflags &= ~env->fmask;
1004 env->eip = env->lstar;
1006 env->eip = env->cstar;
1010 ECX = (uint32_t)(env->eip + next_eip_addend);
1012 cpu_x86_set_cpl(env, 0);
1013 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1015 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1017 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1018 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1020 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1022 DESC_W_MASK | DESC_A_MASK);
1023 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1024 env->eip = (uint32_t)env->star;
1028 void helper_sysret(int dflag)
1032 if (!(env->efer & MSR_EFER_SCE)) {
1033 raise_exception_err(EXCP06_ILLOP, 0);
1035 cpl = env->hflags & HF_CPL_MASK;
1036 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1037 raise_exception_err(EXCP0D_GPF, 0);
1039 selector = (env->star >> 48) & 0xffff;
1040 #ifdef TARGET_X86_64
1041 if (env->hflags & HF_LMA_MASK) {
1043 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1045 DESC_G_MASK | DESC_P_MASK |
1046 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1047 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1051 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1053 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1054 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1055 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1056 env->eip = (uint32_t)ECX;
1058 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1060 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1061 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1062 DESC_W_MASK | DESC_A_MASK);
1063 load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1064 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1065 cpu_x86_set_cpl(env, 3);
1069 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1071 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1072 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1073 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1074 env->eip = (uint32_t)ECX;
1075 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1077 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1078 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1079 DESC_W_MASK | DESC_A_MASK);
1080 env->eflags |= IF_MASK;
1081 cpu_x86_set_cpl(env, 3);
1084 if (kqemu_is_ok(env)) {
1085 if (env->hflags & HF_LMA_MASK)
1086 CC_OP = CC_OP_EFLAGS;
1087 env->exception_index = -1;
1093 /* real mode interrupt */
1094 static void do_interrupt_real(int intno, int is_int, int error_code,
1095 unsigned int next_eip)
1098 target_ulong ptr, ssp;
1100 uint32_t offset, esp;
1101 uint32_t old_cs, old_eip;
1102 int svm_should_check = 1;
1104 if ((env->intercept & INTERCEPT_SVM_MASK) && !is_int && next_eip==-1) {
1106 svm_should_check = 0;
1108 if (svm_should_check
1109 && INTERCEPTEDl(_exceptions, 1 << intno)
1111 raise_interrupt(intno, is_int, error_code, 0);
1113 /* real mode (simpler !) */
1115 if (intno * 4 + 3 > dt->limit)
1116 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1117 ptr = dt->base + intno * 4;
1118 offset = lduw_kernel(ptr);
1119 selector = lduw_kernel(ptr + 2);
1121 ssp = env->segs[R_SS].base;
1126 old_cs = env->segs[R_CS].selector;
1127 /* XXX: use SS segment size ? */
1128 PUSHW(ssp, esp, 0xffff, compute_eflags());
1129 PUSHW(ssp, esp, 0xffff, old_cs);
1130 PUSHW(ssp, esp, 0xffff, old_eip);
1132 /* update processor state */
1133 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1135 env->segs[R_CS].selector = selector;
1136 env->segs[R_CS].base = (selector << 4);
1137 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1140 /* fake user mode interrupt */
1141 void do_interrupt_user(int intno, int is_int, int error_code,
1142 target_ulong next_eip)
1150 ptr = dt->base + (intno * 8);
1151 e2 = ldl_kernel(ptr + 4);
1153 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1154 cpl = env->hflags & HF_CPL_MASK;
1155 /* check privledge if software int */
1156 if (is_int && dpl < cpl)
1157 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1159 /* Since we emulate only user space, we cannot do more than
1160 exiting the emulation with the suitable exception and error
1167 * Begin execution of an interruption. is_int is TRUE if coming from
1168 * the int instruction. next_eip is the EIP value AFTER the interrupt
1169 * instruction. It is only relevant if is_int is TRUE.
1171 void do_interrupt(int intno, int is_int, int error_code,
1172 target_ulong next_eip, int is_hw)
1174 if (loglevel & CPU_LOG_INT) {
1175 if ((env->cr[0] & CR0_PE_MASK)) {
1177 fprintf(logfile, "%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1178 count, intno, error_code, is_int,
1179 env->hflags & HF_CPL_MASK,
1180 env->segs[R_CS].selector, EIP,
1181 (int)env->segs[R_CS].base + EIP,
1182 env->segs[R_SS].selector, ESP);
1183 if (intno == 0x0e) {
1184 fprintf(logfile, " CR2=" TARGET_FMT_lx, env->cr[2]);
1186 fprintf(logfile, " EAX=" TARGET_FMT_lx, EAX);
1188 fprintf(logfile, "\n");
1189 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1194 fprintf(logfile, " code=");
1195 ptr = env->segs[R_CS].base + env->eip;
1196 for(i = 0; i < 16; i++) {
1197 fprintf(logfile, " %02x", ldub(ptr + i));
1199 fprintf(logfile, "\n");
1205 if (env->cr[0] & CR0_PE_MASK) {
1207 if (env->hflags & HF_LMA_MASK) {
1208 do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1212 do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1215 do_interrupt_real(intno, is_int, error_code, next_eip);
1220 * Check nested exceptions and change to double or triple fault if
1221 * needed. It should only be called, if this is not an interrupt.
1222 * Returns the new exception number.
1224 int check_exception(int intno, int *error_code)
1226 char first_contributory = env->old_exception == 0 ||
1227 (env->old_exception >= 10 &&
1228 env->old_exception <= 13);
1229 char second_contributory = intno == 0 ||
1230 (intno >= 10 && intno <= 13);
1232 if (loglevel & CPU_LOG_INT)
1233 fprintf(logfile, "check_exception old: %x new %x\n",
1234 env->old_exception, intno);
1236 if (env->old_exception == EXCP08_DBLE)
1237 cpu_abort(env, "triple fault");
1239 if ((first_contributory && second_contributory)
1240 || (env->old_exception == EXCP0E_PAGE &&
1241 (second_contributory || (intno == EXCP0E_PAGE)))) {
1242 intno = EXCP08_DBLE;
1246 if (second_contributory || (intno == EXCP0E_PAGE) ||
1247 (intno == EXCP08_DBLE))
1248 env->old_exception = intno;
1254 * Signal an interruption. It is executed in the main CPU loop.
1255 * is_int is TRUE if coming from the int instruction. next_eip is the
1256 * EIP value AFTER the interrupt instruction. It is only relevant if
1259 void raise_interrupt(int intno, int is_int, int error_code,
1260 int next_eip_addend)
1263 svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
1264 intno = check_exception(intno, &error_code);
1267 env->exception_index = intno;
1268 env->error_code = error_code;
1269 env->exception_is_int = is_int;
1270 env->exception_next_eip = env->eip + next_eip_addend;
1274 /* same as raise_exception_err, but do not restore global registers */
1275 static void raise_exception_err_norestore(int exception_index, int error_code)
1277 exception_index = check_exception(exception_index, &error_code);
1279 env->exception_index = exception_index;
1280 env->error_code = error_code;
1281 env->exception_is_int = 0;
1282 env->exception_next_eip = 0;
1283 longjmp(env->jmp_env, 1);
1286 /* shortcuts to generate exceptions */
1288 void (raise_exception_err)(int exception_index, int error_code)
1290 raise_interrupt(exception_index, 0, error_code, 0);
1293 void raise_exception(int exception_index)
1295 raise_interrupt(exception_index, 0, 0, 0);
1300 #if defined(CONFIG_USER_ONLY)
1302 void do_smm_enter(void)
1306 void helper_rsm(void)
1312 #ifdef TARGET_X86_64
1313 #define SMM_REVISION_ID 0x00020064
1315 #define SMM_REVISION_ID 0x00020000
1318 void do_smm_enter(void)
1320 target_ulong sm_state;
1324 if (loglevel & CPU_LOG_INT) {
1325 fprintf(logfile, "SMM: enter\n");
1326 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1329 env->hflags |= HF_SMM_MASK;
1330 cpu_smm_update(env);
1332 sm_state = env->smbase + 0x8000;
1334 #ifdef TARGET_X86_64
1335 for(i = 0; i < 6; i++) {
1337 offset = 0x7e00 + i * 16;
1338 stw_phys(sm_state + offset, dt->selector);
1339 stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1340 stl_phys(sm_state + offset + 4, dt->limit);
1341 stq_phys(sm_state + offset + 8, dt->base);
1344 stq_phys(sm_state + 0x7e68, env->gdt.base);
1345 stl_phys(sm_state + 0x7e64, env->gdt.limit);
1347 stw_phys(sm_state + 0x7e70, env->ldt.selector);
1348 stq_phys(sm_state + 0x7e78, env->ldt.base);
1349 stl_phys(sm_state + 0x7e74, env->ldt.limit);
1350 stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1352 stq_phys(sm_state + 0x7e88, env->idt.base);
1353 stl_phys(sm_state + 0x7e84, env->idt.limit);
1355 stw_phys(sm_state + 0x7e90, env->tr.selector);
1356 stq_phys(sm_state + 0x7e98, env->tr.base);
1357 stl_phys(sm_state + 0x7e94, env->tr.limit);
1358 stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1360 stq_phys(sm_state + 0x7ed0, env->efer);
1362 stq_phys(sm_state + 0x7ff8, EAX);
1363 stq_phys(sm_state + 0x7ff0, ECX);
1364 stq_phys(sm_state + 0x7fe8, EDX);
1365 stq_phys(sm_state + 0x7fe0, EBX);
1366 stq_phys(sm_state + 0x7fd8, ESP);
1367 stq_phys(sm_state + 0x7fd0, EBP);
1368 stq_phys(sm_state + 0x7fc8, ESI);
1369 stq_phys(sm_state + 0x7fc0, EDI);
1370 for(i = 8; i < 16; i++)
1371 stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1372 stq_phys(sm_state + 0x7f78, env->eip);
1373 stl_phys(sm_state + 0x7f70, compute_eflags());
1374 stl_phys(sm_state + 0x7f68, env->dr[6]);
1375 stl_phys(sm_state + 0x7f60, env->dr[7]);
1377 stl_phys(sm_state + 0x7f48, env->cr[4]);
1378 stl_phys(sm_state + 0x7f50, env->cr[3]);
1379 stl_phys(sm_state + 0x7f58, env->cr[0]);
1381 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1382 stl_phys(sm_state + 0x7f00, env->smbase);
1384 stl_phys(sm_state + 0x7ffc, env->cr[0]);
1385 stl_phys(sm_state + 0x7ff8, env->cr[3]);
1386 stl_phys(sm_state + 0x7ff4, compute_eflags());
1387 stl_phys(sm_state + 0x7ff0, env->eip);
1388 stl_phys(sm_state + 0x7fec, EDI);
1389 stl_phys(sm_state + 0x7fe8, ESI);
1390 stl_phys(sm_state + 0x7fe4, EBP);
1391 stl_phys(sm_state + 0x7fe0, ESP);
1392 stl_phys(sm_state + 0x7fdc, EBX);
1393 stl_phys(sm_state + 0x7fd8, EDX);
1394 stl_phys(sm_state + 0x7fd4, ECX);
1395 stl_phys(sm_state + 0x7fd0, EAX);
1396 stl_phys(sm_state + 0x7fcc, env->dr[6]);
1397 stl_phys(sm_state + 0x7fc8, env->dr[7]);
1399 stl_phys(sm_state + 0x7fc4, env->tr.selector);
1400 stl_phys(sm_state + 0x7f64, env->tr.base);
1401 stl_phys(sm_state + 0x7f60, env->tr.limit);
1402 stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1404 stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1405 stl_phys(sm_state + 0x7f80, env->ldt.base);
1406 stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1407 stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1409 stl_phys(sm_state + 0x7f74, env->gdt.base);
1410 stl_phys(sm_state + 0x7f70, env->gdt.limit);
1412 stl_phys(sm_state + 0x7f58, env->idt.base);
1413 stl_phys(sm_state + 0x7f54, env->idt.limit);
1415 for(i = 0; i < 6; i++) {
1418 offset = 0x7f84 + i * 12;
1420 offset = 0x7f2c + (i - 3) * 12;
1421 stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1422 stl_phys(sm_state + offset + 8, dt->base);
1423 stl_phys(sm_state + offset + 4, dt->limit);
1424 stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1426 stl_phys(sm_state + 0x7f14, env->cr[4]);
1428 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1429 stl_phys(sm_state + 0x7ef8, env->smbase);
1431 /* init SMM cpu state */
1433 #ifdef TARGET_X86_64
1435 env->hflags &= ~HF_LMA_MASK;
1437 load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1438 env->eip = 0x00008000;
1439 cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1441 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1442 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1443 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1444 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1445 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1447 cpu_x86_update_cr0(env,
1448 env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1449 cpu_x86_update_cr4(env, 0);
1450 env->dr[7] = 0x00000400;
1451 CC_OP = CC_OP_EFLAGS;
1454 void helper_rsm(void)
1456 target_ulong sm_state;
1460 sm_state = env->smbase + 0x8000;
1461 #ifdef TARGET_X86_64
1462 env->efer = ldq_phys(sm_state + 0x7ed0);
1463 if (env->efer & MSR_EFER_LMA)
1464 env->hflags |= HF_LMA_MASK;
1466 env->hflags &= ~HF_LMA_MASK;
1468 for(i = 0; i < 6; i++) {
1469 offset = 0x7e00 + i * 16;
1470 cpu_x86_load_seg_cache(env, i,
1471 lduw_phys(sm_state + offset),
1472 ldq_phys(sm_state + offset + 8),
1473 ldl_phys(sm_state + offset + 4),
1474 (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1477 env->gdt.base = ldq_phys(sm_state + 0x7e68);
1478 env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1480 env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1481 env->ldt.base = ldq_phys(sm_state + 0x7e78);
1482 env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1483 env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1485 env->idt.base = ldq_phys(sm_state + 0x7e88);
1486 env->idt.limit = ldl_phys(sm_state + 0x7e84);
1488 env->tr.selector = lduw_phys(sm_state + 0x7e90);
1489 env->tr.base = ldq_phys(sm_state + 0x7e98);
1490 env->tr.limit = ldl_phys(sm_state + 0x7e94);
1491 env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
1493 EAX = ldq_phys(sm_state + 0x7ff8);
1494 ECX = ldq_phys(sm_state + 0x7ff0);
1495 EDX = ldq_phys(sm_state + 0x7fe8);
1496 EBX = ldq_phys(sm_state + 0x7fe0);
1497 ESP = ldq_phys(sm_state + 0x7fd8);
1498 EBP = ldq_phys(sm_state + 0x7fd0);
1499 ESI = ldq_phys(sm_state + 0x7fc8);
1500 EDI = ldq_phys(sm_state + 0x7fc0);
1501 for(i = 8; i < 16; i++)
1502 env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
1503 env->eip = ldq_phys(sm_state + 0x7f78);
1504 load_eflags(ldl_phys(sm_state + 0x7f70),
1505 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1506 env->dr[6] = ldl_phys(sm_state + 0x7f68);
1507 env->dr[7] = ldl_phys(sm_state + 0x7f60);
1509 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
1510 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
1511 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
1513 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1514 if (val & 0x20000) {
1515 env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
1518 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
1519 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
1520 load_eflags(ldl_phys(sm_state + 0x7ff4),
1521 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1522 env->eip = ldl_phys(sm_state + 0x7ff0);
1523 EDI = ldl_phys(sm_state + 0x7fec);
1524 ESI = ldl_phys(sm_state + 0x7fe8);
1525 EBP = ldl_phys(sm_state + 0x7fe4);
1526 ESP = ldl_phys(sm_state + 0x7fe0);
1527 EBX = ldl_phys(sm_state + 0x7fdc);
1528 EDX = ldl_phys(sm_state + 0x7fd8);
1529 ECX = ldl_phys(sm_state + 0x7fd4);
1530 EAX = ldl_phys(sm_state + 0x7fd0);
1531 env->dr[6] = ldl_phys(sm_state + 0x7fcc);
1532 env->dr[7] = ldl_phys(sm_state + 0x7fc8);
1534 env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
1535 env->tr.base = ldl_phys(sm_state + 0x7f64);
1536 env->tr.limit = ldl_phys(sm_state + 0x7f60);
1537 env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
1539 env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
1540 env->ldt.base = ldl_phys(sm_state + 0x7f80);
1541 env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
1542 env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
1544 env->gdt.base = ldl_phys(sm_state + 0x7f74);
1545 env->gdt.limit = ldl_phys(sm_state + 0x7f70);
1547 env->idt.base = ldl_phys(sm_state + 0x7f58);
1548 env->idt.limit = ldl_phys(sm_state + 0x7f54);
1550 for(i = 0; i < 6; i++) {
1552 offset = 0x7f84 + i * 12;
1554 offset = 0x7f2c + (i - 3) * 12;
1555 cpu_x86_load_seg_cache(env, i,
1556 ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
1557 ldl_phys(sm_state + offset + 8),
1558 ldl_phys(sm_state + offset + 4),
1559 (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
1561 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
1563 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1564 if (val & 0x20000) {
1565 env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
1568 CC_OP = CC_OP_EFLAGS;
1569 env->hflags &= ~HF_SMM_MASK;
1570 cpu_smm_update(env);
1572 if (loglevel & CPU_LOG_INT) {
1573 fprintf(logfile, "SMM: after RSM\n");
1574 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1578 #endif /* !CONFIG_USER_ONLY */
1581 #ifdef BUGGY_GCC_DIV64
1582 /* gcc 2.95.4 on PowerPC does not seem to like using __udivdi3, so we
1583 call it from another function */
1584 uint32_t div32(uint64_t *q_ptr, uint64_t num, uint32_t den)
1590 int32_t idiv32(int64_t *q_ptr, int64_t num, int32_t den)
1597 void helper_divl_EAX_T0(void)
1599 unsigned int den, r;
1602 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1605 raise_exception(EXCP00_DIVZ);
1607 #ifdef BUGGY_GCC_DIV64
1608 r = div32(&q, num, den);
1614 raise_exception(EXCP00_DIVZ);
1619 void helper_idivl_EAX_T0(void)
1624 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1627 raise_exception(EXCP00_DIVZ);
1629 #ifdef BUGGY_GCC_DIV64
1630 r = idiv32(&q, num, den);
1635 if (q != (int32_t)q)
1636 raise_exception(EXCP00_DIVZ);
1641 void helper_cmpxchg8b(void)
1646 eflags = cc_table[CC_OP].compute_all();
1648 if (d == (((uint64_t)EDX << 32) | EAX)) {
1649 stq(A0, ((uint64_t)ECX << 32) | EBX);
1659 void helper_single_step()
1661 env->dr[6] |= 0x4000;
1662 raise_exception(EXCP01_SSTP);
1665 void helper_cpuid(void)
1668 index = (uint32_t)EAX;
1670 /* test if maximum index reached */
1671 if (index & 0x80000000) {
1672 if (index > env->cpuid_xlevel)
1673 index = env->cpuid_level;
1675 if (index > env->cpuid_level)
1676 index = env->cpuid_level;
1681 EAX = env->cpuid_level;
1682 EBX = env->cpuid_vendor1;
1683 EDX = env->cpuid_vendor2;
1684 ECX = env->cpuid_vendor3;
1687 EAX = env->cpuid_version;
1688 EBX = (env->cpuid_apic_id << 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
1689 ECX = env->cpuid_ext_features;
1690 EDX = env->cpuid_features;
1693 /* cache info: needed for Pentium Pro compatibility */
1700 EAX = env->cpuid_xlevel;
1701 EBX = env->cpuid_vendor1;
1702 EDX = env->cpuid_vendor2;
1703 ECX = env->cpuid_vendor3;
1706 EAX = env->cpuid_features;
1708 ECX = env->cpuid_ext3_features;
1709 EDX = env->cpuid_ext2_features;
1714 EAX = env->cpuid_model[(index - 0x80000002) * 4 + 0];
1715 EBX = env->cpuid_model[(index - 0x80000002) * 4 + 1];
1716 ECX = env->cpuid_model[(index - 0x80000002) * 4 + 2];
1717 EDX = env->cpuid_model[(index - 0x80000002) * 4 + 3];
1720 /* cache info (L1 cache) */
1727 /* cache info (L2 cache) */
1734 /* virtual & phys address size in low 2 bytes. */
1741 /* reserved values: zero */
1750 void helper_enter_level(int level, int data32)
1753 uint32_t esp_mask, esp, ebp;
1755 esp_mask = get_sp_mask(env->segs[R_SS].flags);
1756 ssp = env->segs[R_SS].base;
1765 stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
1768 stl(ssp + (esp & esp_mask), T1);
1775 stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
1778 stw(ssp + (esp & esp_mask), T1);
1782 #ifdef TARGET_X86_64
1783 void helper_enter64_level(int level, int data64)
1785 target_ulong esp, ebp;
1805 stw(esp, lduw(ebp));
1813 void helper_lldt_T0(void)
1818 int index, entry_limit;
1821 selector = T0 & 0xffff;
1822 if ((selector & 0xfffc) == 0) {
1823 /* XXX: NULL selector case: invalid LDT */
1828 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1830 index = selector & ~7;
1831 #ifdef TARGET_X86_64
1832 if (env->hflags & HF_LMA_MASK)
1837 if ((index + entry_limit) > dt->limit)
1838 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1839 ptr = dt->base + index;
1840 e1 = ldl_kernel(ptr);
1841 e2 = ldl_kernel(ptr + 4);
1842 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
1843 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1844 if (!(e2 & DESC_P_MASK))
1845 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1846 #ifdef TARGET_X86_64
1847 if (env->hflags & HF_LMA_MASK) {
1849 e3 = ldl_kernel(ptr + 8);
1850 load_seg_cache_raw_dt(&env->ldt, e1, e2);
1851 env->ldt.base |= (target_ulong)e3 << 32;
1855 load_seg_cache_raw_dt(&env->ldt, e1, e2);
1858 env->ldt.selector = selector;
1861 void helper_ltr_T0(void)
1866 int index, type, entry_limit;
1869 selector = T0 & 0xffff;
1870 if ((selector & 0xfffc) == 0) {
1871 /* NULL selector case: invalid TR */
1877 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1879 index = selector & ~7;
1880 #ifdef TARGET_X86_64
1881 if (env->hflags & HF_LMA_MASK)
1886 if ((index + entry_limit) > dt->limit)
1887 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1888 ptr = dt->base + index;
1889 e1 = ldl_kernel(ptr);
1890 e2 = ldl_kernel(ptr + 4);
1891 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1892 if ((e2 & DESC_S_MASK) ||
1893 (type != 1 && type != 9))
1894 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1895 if (!(e2 & DESC_P_MASK))
1896 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1897 #ifdef TARGET_X86_64
1898 if (env->hflags & HF_LMA_MASK) {
1900 e3 = ldl_kernel(ptr + 8);
1901 e4 = ldl_kernel(ptr + 12);
1902 if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
1903 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1904 load_seg_cache_raw_dt(&env->tr, e1, e2);
1905 env->tr.base |= (target_ulong)e3 << 32;
1909 load_seg_cache_raw_dt(&env->tr, e1, e2);
1911 e2 |= DESC_TSS_BUSY_MASK;
1912 stl_kernel(ptr + 4, e2);
1914 env->tr.selector = selector;
1917 /* only works if protected mode and not VM86. seg_reg must be != R_CS */
1918 void load_seg(int seg_reg, int selector)
1927 cpl = env->hflags & HF_CPL_MASK;
1928 if ((selector & 0xfffc) == 0) {
1929 /* null selector case */
1931 #ifdef TARGET_X86_64
1932 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
1935 raise_exception_err(EXCP0D_GPF, 0);
1936 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
1943 index = selector & ~7;
1944 if ((index + 7) > dt->limit)
1945 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1946 ptr = dt->base + index;
1947 e1 = ldl_kernel(ptr);
1948 e2 = ldl_kernel(ptr + 4);
1950 if (!(e2 & DESC_S_MASK))
1951 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1953 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1954 if (seg_reg == R_SS) {
1955 /* must be writable segment */
1956 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
1957 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1958 if (rpl != cpl || dpl != cpl)
1959 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1961 /* must be readable segment */
1962 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
1963 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1965 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
1966 /* if not conforming code, test rights */
1967 if (dpl < cpl || dpl < rpl)
1968 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1972 if (!(e2 & DESC_P_MASK)) {
1973 if (seg_reg == R_SS)
1974 raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
1976 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1979 /* set the access bit if not already set */
1980 if (!(e2 & DESC_A_MASK)) {
1982 stl_kernel(ptr + 4, e2);
1985 cpu_x86_load_seg_cache(env, seg_reg, selector,
1986 get_seg_base(e1, e2),
1987 get_seg_limit(e1, e2),
1990 fprintf(logfile, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
1991 selector, (unsigned long)sc->base, sc->limit, sc->flags);
1996 /* protected mode jump */
1997 void helper_ljmp_protected_T0_T1(int next_eip_addend)
1999 int new_cs, gate_cs, type;
2000 uint32_t e1, e2, cpl, dpl, rpl, limit;
2001 target_ulong new_eip, next_eip;
2005 if ((new_cs & 0xfffc) == 0)
2006 raise_exception_err(EXCP0D_GPF, 0);
2007 if (load_segment(&e1, &e2, new_cs) != 0)
2008 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2009 cpl = env->hflags & HF_CPL_MASK;
2010 if (e2 & DESC_S_MASK) {
2011 if (!(e2 & DESC_CS_MASK))
2012 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2013 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2014 if (e2 & DESC_C_MASK) {
2015 /* conforming code segment */
2017 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2019 /* non conforming code segment */
2022 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2024 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2026 if (!(e2 & DESC_P_MASK))
2027 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2028 limit = get_seg_limit(e1, e2);
2029 if (new_eip > limit &&
2030 !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2031 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2032 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2033 get_seg_base(e1, e2), limit, e2);
2036 /* jump to call or task gate */
2037 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2039 cpl = env->hflags & HF_CPL_MASK;
2040 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2042 case 1: /* 286 TSS */
2043 case 9: /* 386 TSS */
2044 case 5: /* task gate */
2045 if (dpl < cpl || dpl < rpl)
2046 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2047 next_eip = env->eip + next_eip_addend;
2048 switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2049 CC_OP = CC_OP_EFLAGS;
2051 case 4: /* 286 call gate */
2052 case 12: /* 386 call gate */
2053 if ((dpl < cpl) || (dpl < rpl))
2054 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2055 if (!(e2 & DESC_P_MASK))
2056 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2058 new_eip = (e1 & 0xffff);
2060 new_eip |= (e2 & 0xffff0000);
2061 if (load_segment(&e1, &e2, gate_cs) != 0)
2062 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2063 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2064 /* must be code segment */
2065 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2066 (DESC_S_MASK | DESC_CS_MASK)))
2067 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2068 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2069 (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2070 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2071 if (!(e2 & DESC_P_MASK))
2072 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2073 limit = get_seg_limit(e1, e2);
2074 if (new_eip > limit)
2075 raise_exception_err(EXCP0D_GPF, 0);
2076 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2077 get_seg_base(e1, e2), limit, e2);
2081 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2087 /* real mode call */
2088 void helper_lcall_real_T0_T1(int shift, int next_eip)
2090 int new_cs, new_eip;
2091 uint32_t esp, esp_mask;
2097 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2098 ssp = env->segs[R_SS].base;
2100 PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2101 PUSHL(ssp, esp, esp_mask, next_eip);
2103 PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2104 PUSHW(ssp, esp, esp_mask, next_eip);
2107 SET_ESP(esp, esp_mask);
2109 env->segs[R_CS].selector = new_cs;
2110 env->segs[R_CS].base = (new_cs << 4);
2113 /* protected mode call */
2114 void helper_lcall_protected_T0_T1(int shift, int next_eip_addend)
2116 int new_cs, new_stack, i;
2117 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2118 uint32_t ss, ss_e1, ss_e2, sp, type, ss_dpl, sp_mask;
2119 uint32_t val, limit, old_sp_mask;
2120 target_ulong ssp, old_ssp, next_eip, new_eip;
2124 next_eip = env->eip + next_eip_addend;
2126 if (loglevel & CPU_LOG_PCALL) {
2127 fprintf(logfile, "lcall %04x:%08x s=%d\n",
2128 new_cs, (uint32_t)new_eip, shift);
2129 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
2132 if ((new_cs & 0xfffc) == 0)
2133 raise_exception_err(EXCP0D_GPF, 0);
2134 if (load_segment(&e1, &e2, new_cs) != 0)
2135 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2136 cpl = env->hflags & HF_CPL_MASK;
2138 if (loglevel & CPU_LOG_PCALL) {
2139 fprintf(logfile, "desc=%08x:%08x\n", e1, e2);
2142 if (e2 & DESC_S_MASK) {
2143 if (!(e2 & DESC_CS_MASK))
2144 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2145 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2146 if (e2 & DESC_C_MASK) {
2147 /* conforming code segment */
2149 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2151 /* non conforming code segment */
2154 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2156 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2158 if (!(e2 & DESC_P_MASK))
2159 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2161 #ifdef TARGET_X86_64
2162 /* XXX: check 16/32 bit cases in long mode */
2167 PUSHQ(rsp, env->segs[R_CS].selector);
2168 PUSHQ(rsp, next_eip);
2169 /* from this point, not restartable */
2171 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2172 get_seg_base(e1, e2),
2173 get_seg_limit(e1, e2), e2);
2179 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2180 ssp = env->segs[R_SS].base;
2182 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2183 PUSHL(ssp, sp, sp_mask, next_eip);
2185 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2186 PUSHW(ssp, sp, sp_mask, next_eip);
2189 limit = get_seg_limit(e1, e2);
2190 if (new_eip > limit)
2191 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2192 /* from this point, not restartable */
2193 SET_ESP(sp, sp_mask);
2194 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2195 get_seg_base(e1, e2), limit, e2);
2199 /* check gate type */
2200 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2201 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2204 case 1: /* available 286 TSS */
2205 case 9: /* available 386 TSS */
2206 case 5: /* task gate */
2207 if (dpl < cpl || dpl < rpl)
2208 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2209 switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2210 CC_OP = CC_OP_EFLAGS;
2212 case 4: /* 286 call gate */
2213 case 12: /* 386 call gate */
2216 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2221 if (dpl < cpl || dpl < rpl)
2222 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2223 /* check valid bit */
2224 if (!(e2 & DESC_P_MASK))
2225 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2226 selector = e1 >> 16;
2227 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2228 param_count = e2 & 0x1f;
2229 if ((selector & 0xfffc) == 0)
2230 raise_exception_err(EXCP0D_GPF, 0);
2232 if (load_segment(&e1, &e2, selector) != 0)
2233 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2234 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
2235 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2236 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2238 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2239 if (!(e2 & DESC_P_MASK))
2240 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2242 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
2243 /* to inner privilege */
2244 get_ss_esp_from_tss(&ss, &sp, dpl);
2246 if (loglevel & CPU_LOG_PCALL)
2247 fprintf(logfile, "new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
2248 ss, sp, param_count, ESP);
2250 if ((ss & 0xfffc) == 0)
2251 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2252 if ((ss & 3) != dpl)
2253 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2254 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
2255 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2256 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2258 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2259 if (!(ss_e2 & DESC_S_MASK) ||
2260 (ss_e2 & DESC_CS_MASK) ||
2261 !(ss_e2 & DESC_W_MASK))
2262 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2263 if (!(ss_e2 & DESC_P_MASK))
2264 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2266 // push_size = ((param_count * 2) + 8) << shift;
2268 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
2269 old_ssp = env->segs[R_SS].base;
2271 sp_mask = get_sp_mask(ss_e2);
2272 ssp = get_seg_base(ss_e1, ss_e2);
2274 PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
2275 PUSHL(ssp, sp, sp_mask, ESP);
2276 for(i = param_count - 1; i >= 0; i--) {
2277 val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
2278 PUSHL(ssp, sp, sp_mask, val);
2281 PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
2282 PUSHW(ssp, sp, sp_mask, ESP);
2283 for(i = param_count - 1; i >= 0; i--) {
2284 val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
2285 PUSHW(ssp, sp, sp_mask, val);
2290 /* to same privilege */
2292 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2293 ssp = env->segs[R_SS].base;
2294 // push_size = (4 << shift);
2299 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2300 PUSHL(ssp, sp, sp_mask, next_eip);
2302 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2303 PUSHW(ssp, sp, sp_mask, next_eip);
2306 /* from this point, not restartable */
2309 ss = (ss & ~3) | dpl;
2310 cpu_x86_load_seg_cache(env, R_SS, ss,
2312 get_seg_limit(ss_e1, ss_e2),
2316 selector = (selector & ~3) | dpl;
2317 cpu_x86_load_seg_cache(env, R_CS, selector,
2318 get_seg_base(e1, e2),
2319 get_seg_limit(e1, e2),
2321 cpu_x86_set_cpl(env, dpl);
2322 SET_ESP(sp, sp_mask);
2326 if (kqemu_is_ok(env)) {
2327 env->exception_index = -1;
2333 /* real and vm86 mode iret */
2334 void helper_iret_real(int shift)
2336 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
2340 sp_mask = 0xffff; /* XXXX: use SS segment size ? */
2342 ssp = env->segs[R_SS].base;
2345 POPL(ssp, sp, sp_mask, new_eip);
2346 POPL(ssp, sp, sp_mask, new_cs);
2348 POPL(ssp, sp, sp_mask, new_eflags);
2351 POPW(ssp, sp, sp_mask, new_eip);
2352 POPW(ssp, sp, sp_mask, new_cs);
2353 POPW(ssp, sp, sp_mask, new_eflags);
2355 ESP = (ESP & ~sp_mask) | (sp & sp_mask);
2356 load_seg_vm(R_CS, new_cs);
2358 if (env->eflags & VM_MASK)
2359 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
2361 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
2363 eflags_mask &= 0xffff;
2364 load_eflags(new_eflags, eflags_mask);
2367 static inline void validate_seg(int seg_reg, int cpl)
2372 /* XXX: on x86_64, we do not want to nullify FS and GS because
2373 they may still contain a valid base. I would be interested to
2374 know how a real x86_64 CPU behaves */
2375 if ((seg_reg == R_FS || seg_reg == R_GS) &&
2376 (env->segs[seg_reg].selector & 0xfffc) == 0)
2379 e2 = env->segs[seg_reg].flags;
2380 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2381 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2382 /* data or non conforming code segment */
2384 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
2389 /* protected mode iret */
2390 static inline void helper_ret_protected(int shift, int is_iret, int addend)
2392 uint32_t new_cs, new_eflags, new_ss;
2393 uint32_t new_es, new_ds, new_fs, new_gs;
2394 uint32_t e1, e2, ss_e1, ss_e2;
2395 int cpl, dpl, rpl, eflags_mask, iopl;
2396 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
2398 #ifdef TARGET_X86_64
2403 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2405 ssp = env->segs[R_SS].base;
2406 new_eflags = 0; /* avoid warning */
2407 #ifdef TARGET_X86_64
2413 POPQ(sp, new_eflags);
2419 POPL(ssp, sp, sp_mask, new_eip);
2420 POPL(ssp, sp, sp_mask, new_cs);
2423 POPL(ssp, sp, sp_mask, new_eflags);
2424 if (new_eflags & VM_MASK)
2425 goto return_to_vm86;
2429 POPW(ssp, sp, sp_mask, new_eip);
2430 POPW(ssp, sp, sp_mask, new_cs);
2432 POPW(ssp, sp, sp_mask, new_eflags);
2435 if (loglevel & CPU_LOG_PCALL) {
2436 fprintf(logfile, "lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2437 new_cs, new_eip, shift, addend);
2438 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
2441 if ((new_cs & 0xfffc) == 0)
2442 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2443 if (load_segment(&e1, &e2, new_cs) != 0)
2444 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2445 if (!(e2 & DESC_S_MASK) ||
2446 !(e2 & DESC_CS_MASK))
2447 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2448 cpl = env->hflags & HF_CPL_MASK;
2451 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2452 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2453 if (e2 & DESC_C_MASK) {
2455 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2458 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2460 if (!(e2 & DESC_P_MASK))
2461 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2464 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2465 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
2466 /* return to same priledge level */
2467 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2468 get_seg_base(e1, e2),
2469 get_seg_limit(e1, e2),
2472 /* return to different privilege level */
2473 #ifdef TARGET_X86_64
2482 POPL(ssp, sp, sp_mask, new_esp);
2483 POPL(ssp, sp, sp_mask, new_ss);
2487 POPW(ssp, sp, sp_mask, new_esp);
2488 POPW(ssp, sp, sp_mask, new_ss);
2491 if (loglevel & CPU_LOG_PCALL) {
2492 fprintf(logfile, "new ss:esp=%04x:" TARGET_FMT_lx "\n",
2496 if ((new_ss & 0xfffc) == 0) {
2497 #ifdef TARGET_X86_64
2498 /* NULL ss is allowed in long mode if cpl != 3*/
2499 /* XXX: test CS64 ? */
2500 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2501 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2503 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2504 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2505 DESC_W_MASK | DESC_A_MASK);
2506 ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
2510 raise_exception_err(EXCP0D_GPF, 0);
2513 if ((new_ss & 3) != rpl)
2514 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2515 if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
2516 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2517 if (!(ss_e2 & DESC_S_MASK) ||
2518 (ss_e2 & DESC_CS_MASK) ||
2519 !(ss_e2 & DESC_W_MASK))
2520 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2521 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2523 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2524 if (!(ss_e2 & DESC_P_MASK))
2525 raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
2526 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2527 get_seg_base(ss_e1, ss_e2),
2528 get_seg_limit(ss_e1, ss_e2),
2532 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2533 get_seg_base(e1, e2),
2534 get_seg_limit(e1, e2),
2536 cpu_x86_set_cpl(env, rpl);
2538 #ifdef TARGET_X86_64
2539 if (env->hflags & HF_CS64_MASK)
2543 sp_mask = get_sp_mask(ss_e2);
2545 /* validate data segments */
2546 validate_seg(R_ES, rpl);
2547 validate_seg(R_DS, rpl);
2548 validate_seg(R_FS, rpl);
2549 validate_seg(R_GS, rpl);
2553 SET_ESP(sp, sp_mask);
2556 /* NOTE: 'cpl' is the _old_ CPL */
2557 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2559 eflags_mask |= IOPL_MASK;
2560 iopl = (env->eflags >> IOPL_SHIFT) & 3;
2562 eflags_mask |= IF_MASK;
2564 eflags_mask &= 0xffff;
2565 load_eflags(new_eflags, eflags_mask);
2570 POPL(ssp, sp, sp_mask, new_esp);
2571 POPL(ssp, sp, sp_mask, new_ss);
2572 POPL(ssp, sp, sp_mask, new_es);
2573 POPL(ssp, sp, sp_mask, new_ds);
2574 POPL(ssp, sp, sp_mask, new_fs);
2575 POPL(ssp, sp, sp_mask, new_gs);
2577 /* modify processor state */
2578 load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
2579 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
2580 load_seg_vm(R_CS, new_cs & 0xffff);
2581 cpu_x86_set_cpl(env, 3);
2582 load_seg_vm(R_SS, new_ss & 0xffff);
2583 load_seg_vm(R_ES, new_es & 0xffff);
2584 load_seg_vm(R_DS, new_ds & 0xffff);
2585 load_seg_vm(R_FS, new_fs & 0xffff);
2586 load_seg_vm(R_GS, new_gs & 0xffff);
2588 env->eip = new_eip & 0xffff;
2592 void helper_iret_protected(int shift, int next_eip)
2594 int tss_selector, type;
2597 /* specific case for TSS */
2598 if (env->eflags & NT_MASK) {
2599 #ifdef TARGET_X86_64
2600 if (env->hflags & HF_LMA_MASK)
2601 raise_exception_err(EXCP0D_GPF, 0);
2603 tss_selector = lduw_kernel(env->tr.base + 0);
2604 if (tss_selector & 4)
2605 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2606 if (load_segment(&e1, &e2, tss_selector) != 0)
2607 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2608 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2609 /* NOTE: we check both segment and busy TSS */
2611 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2612 switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
2614 helper_ret_protected(shift, 1, 0);
2617 if (kqemu_is_ok(env)) {
2618 CC_OP = CC_OP_EFLAGS;
2619 env->exception_index = -1;
2625 void helper_lret_protected(int shift, int addend)
2627 helper_ret_protected(shift, 0, addend);
2629 if (kqemu_is_ok(env)) {
2630 env->exception_index = -1;
2636 void helper_sysenter(void)
2638 if (env->sysenter_cs == 0) {
2639 raise_exception_err(EXCP0D_GPF, 0);
2641 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2642 cpu_x86_set_cpl(env, 0);
2643 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2645 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2647 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2648 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2650 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2652 DESC_W_MASK | DESC_A_MASK);
2653 ESP = env->sysenter_esp;
2654 EIP = env->sysenter_eip;
2657 void helper_sysexit(void)
2661 cpl = env->hflags & HF_CPL_MASK;
2662 if (env->sysenter_cs == 0 || cpl != 0) {
2663 raise_exception_err(EXCP0D_GPF, 0);
2665 cpu_x86_set_cpl(env, 3);
2666 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
2668 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2669 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2670 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2671 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
2673 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2674 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2675 DESC_W_MASK | DESC_A_MASK);
2679 if (kqemu_is_ok(env)) {
2680 env->exception_index = -1;
2686 void helper_movl_crN_T0(int reg)
2688 #if !defined(CONFIG_USER_ONLY)
2691 cpu_x86_update_cr0(env, T0);
2694 cpu_x86_update_cr3(env, T0);
2697 cpu_x86_update_cr4(env, T0);
2700 cpu_set_apic_tpr(env, T0);
2710 void helper_movl_drN_T0(int reg)
2715 void helper_invlpg(target_ulong addr)
2717 cpu_x86_flush_tlb(env, addr);
2720 void helper_rdtsc(void)
2724 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
2725 raise_exception(EXCP0D_GPF);
2727 val = cpu_get_tsc(env);
2728 EAX = (uint32_t)(val);
2729 EDX = (uint32_t)(val >> 32);
2732 #if defined(CONFIG_USER_ONLY)
2733 void helper_wrmsr(void)
2737 void helper_rdmsr(void)
2741 void helper_wrmsr(void)
2745 val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
2747 switch((uint32_t)ECX) {
2748 case MSR_IA32_SYSENTER_CS:
2749 env->sysenter_cs = val & 0xffff;
2751 case MSR_IA32_SYSENTER_ESP:
2752 env->sysenter_esp = val;
2754 case MSR_IA32_SYSENTER_EIP:
2755 env->sysenter_eip = val;
2757 case MSR_IA32_APICBASE:
2758 cpu_set_apic_base(env, val);
2762 uint64_t update_mask;
2764 if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
2765 update_mask |= MSR_EFER_SCE;
2766 if (env->cpuid_ext2_features & CPUID_EXT2_LM)
2767 update_mask |= MSR_EFER_LME;
2768 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
2769 update_mask |= MSR_EFER_FFXSR;
2770 if (env->cpuid_ext2_features & CPUID_EXT2_NX)
2771 update_mask |= MSR_EFER_NXE;
2772 env->efer = (env->efer & ~update_mask) |
2773 (val & update_mask);
2782 case MSR_VM_HSAVE_PA:
2783 env->vm_hsave = val;
2785 #ifdef TARGET_X86_64
2796 env->segs[R_FS].base = val;
2799 env->segs[R_GS].base = val;
2801 case MSR_KERNELGSBASE:
2802 env->kernelgsbase = val;
2806 /* XXX: exception ? */
2811 void helper_rdmsr(void)
2814 switch((uint32_t)ECX) {
2815 case MSR_IA32_SYSENTER_CS:
2816 val = env->sysenter_cs;
2818 case MSR_IA32_SYSENTER_ESP:
2819 val = env->sysenter_esp;
2821 case MSR_IA32_SYSENTER_EIP:
2822 val = env->sysenter_eip;
2824 case MSR_IA32_APICBASE:
2825 val = cpu_get_apic_base(env);
2836 case MSR_VM_HSAVE_PA:
2837 val = env->vm_hsave;
2839 #ifdef TARGET_X86_64
2850 val = env->segs[R_FS].base;
2853 val = env->segs[R_GS].base;
2855 case MSR_KERNELGSBASE:
2856 val = env->kernelgsbase;
2860 /* XXX: exception ? */
2864 EAX = (uint32_t)(val);
2865 EDX = (uint32_t)(val >> 32);
2869 void helper_lsl(void)
2871 unsigned int selector, limit;
2872 uint32_t e1, e2, eflags;
2873 int rpl, dpl, cpl, type;
2875 eflags = cc_table[CC_OP].compute_all();
2876 selector = T0 & 0xffff;
2877 if (load_segment(&e1, &e2, selector) != 0)
2880 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2881 cpl = env->hflags & HF_CPL_MASK;
2882 if (e2 & DESC_S_MASK) {
2883 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2886 if (dpl < cpl || dpl < rpl)
2890 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2901 if (dpl < cpl || dpl < rpl) {
2903 CC_SRC = eflags & ~CC_Z;
2907 limit = get_seg_limit(e1, e2);
2909 CC_SRC = eflags | CC_Z;
2912 void helper_lar(void)
2914 unsigned int selector;
2915 uint32_t e1, e2, eflags;
2916 int rpl, dpl, cpl, type;
2918 eflags = cc_table[CC_OP].compute_all();
2919 selector = T0 & 0xffff;
2920 if ((selector & 0xfffc) == 0)
2922 if (load_segment(&e1, &e2, selector) != 0)
2925 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2926 cpl = env->hflags & HF_CPL_MASK;
2927 if (e2 & DESC_S_MASK) {
2928 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2931 if (dpl < cpl || dpl < rpl)
2935 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2949 if (dpl < cpl || dpl < rpl) {
2951 CC_SRC = eflags & ~CC_Z;
2955 T1 = e2 & 0x00f0ff00;
2956 CC_SRC = eflags | CC_Z;
2959 void helper_verr(void)
2961 unsigned int selector;
2962 uint32_t e1, e2, eflags;
2965 eflags = cc_table[CC_OP].compute_all();
2966 selector = T0 & 0xffff;
2967 if ((selector & 0xfffc) == 0)
2969 if (load_segment(&e1, &e2, selector) != 0)
2971 if (!(e2 & DESC_S_MASK))
2974 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2975 cpl = env->hflags & HF_CPL_MASK;
2976 if (e2 & DESC_CS_MASK) {
2977 if (!(e2 & DESC_R_MASK))
2979 if (!(e2 & DESC_C_MASK)) {
2980 if (dpl < cpl || dpl < rpl)
2984 if (dpl < cpl || dpl < rpl) {
2986 CC_SRC = eflags & ~CC_Z;
2990 CC_SRC = eflags | CC_Z;
2993 void helper_verw(void)
2995 unsigned int selector;
2996 uint32_t e1, e2, eflags;
2999 eflags = cc_table[CC_OP].compute_all();
3000 selector = T0 & 0xffff;
3001 if ((selector & 0xfffc) == 0)
3003 if (load_segment(&e1, &e2, selector) != 0)
3005 if (!(e2 & DESC_S_MASK))
3008 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3009 cpl = env->hflags & HF_CPL_MASK;
3010 if (e2 & DESC_CS_MASK) {
3013 if (dpl < cpl || dpl < rpl)
3015 if (!(e2 & DESC_W_MASK)) {
3017 CC_SRC = eflags & ~CC_Z;
3021 CC_SRC = eflags | CC_Z;
3026 void helper_fldt_ST0_A0(void)
3029 new_fpstt = (env->fpstt - 1) & 7;
3030 env->fpregs[new_fpstt].d = helper_fldt(A0);
3031 env->fpstt = new_fpstt;
3032 env->fptags[new_fpstt] = 0; /* validate stack entry */
3035 void helper_fstt_ST0_A0(void)
3037 helper_fstt(ST0, A0);
3040 void fpu_set_exception(int mask)
3043 if (env->fpus & (~env->fpuc & FPUC_EM))
3044 env->fpus |= FPUS_SE | FPUS_B;
3047 CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
3050 fpu_set_exception(FPUS_ZE);
3054 void fpu_raise_exception(void)
3056 if (env->cr[0] & CR0_NE_MASK) {
3057 raise_exception(EXCP10_COPR);
3059 #if !defined(CONFIG_USER_ONLY)
3068 void helper_fbld_ST0_A0(void)
3076 for(i = 8; i >= 0; i--) {
3078 val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
3081 if (ldub(A0 + 9) & 0x80)
3087 void helper_fbst_ST0_A0(void)
3090 target_ulong mem_ref, mem_end;
3093 val = floatx_to_int64(ST0, &env->fp_status);
3095 mem_end = mem_ref + 9;
3102 while (mem_ref < mem_end) {
3107 v = ((v / 10) << 4) | (v % 10);
3110 while (mem_ref < mem_end) {
3115 void helper_f2xm1(void)
3117 ST0 = pow(2.0,ST0) - 1.0;
3120 void helper_fyl2x(void)
3122 CPU86_LDouble fptemp;
3126 fptemp = log(fptemp)/log(2.0); /* log2(ST) */
3130 env->fpus &= (~0x4700);
3135 void helper_fptan(void)
3137 CPU86_LDouble fptemp;
3140 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3146 env->fpus &= (~0x400); /* C2 <-- 0 */
3147 /* the above code is for |arg| < 2**52 only */
3151 void helper_fpatan(void)
3153 CPU86_LDouble fptemp, fpsrcop;
3157 ST1 = atan2(fpsrcop,fptemp);
3161 void helper_fxtract(void)
3163 CPU86_LDoubleU temp;
3164 unsigned int expdif;
3167 expdif = EXPD(temp) - EXPBIAS;
3168 /*DP exponent bias*/
3175 void helper_fprem1(void)
3177 CPU86_LDouble dblq, fpsrcop, fptemp;
3178 CPU86_LDoubleU fpsrcop1, fptemp1;
3180 signed long long int q;
3182 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
3183 ST0 = 0.0 / 0.0; /* NaN */
3184 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3190 fpsrcop1.d = fpsrcop;
3192 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
3195 /* optimisation? taken from the AMD docs */
3196 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3197 /* ST0 is unchanged */
3202 dblq = fpsrcop / fptemp;
3203 /* round dblq towards nearest integer */
3205 ST0 = fpsrcop - fptemp * dblq;
3207 /* convert dblq to q by truncating towards zero */
3209 q = (signed long long int)(-dblq);
3211 q = (signed long long int)dblq;
3213 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3214 /* (C0,C3,C1) <-- (q2,q1,q0) */
3215 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
3216 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
3217 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
3219 env->fpus |= 0x400; /* C2 <-- 1 */
3220 fptemp = pow(2.0, expdif - 50);
3221 fpsrcop = (ST0 / ST1) / fptemp;
3222 /* fpsrcop = integer obtained by chopping */
3223 fpsrcop = (fpsrcop < 0.0) ?
3224 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
3225 ST0 -= (ST1 * fpsrcop * fptemp);
3229 void helper_fprem(void)
3231 CPU86_LDouble dblq, fpsrcop, fptemp;
3232 CPU86_LDoubleU fpsrcop1, fptemp1;
3234 signed long long int q;
3236 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
3237 ST0 = 0.0 / 0.0; /* NaN */
3238 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3242 fpsrcop = (CPU86_LDouble)ST0;
3243 fptemp = (CPU86_LDouble)ST1;
3244 fpsrcop1.d = fpsrcop;
3246 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
3249 /* optimisation? taken from the AMD docs */
3250 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3251 /* ST0 is unchanged */
3255 if ( expdif < 53 ) {
3256 dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/;
3257 /* round dblq towards zero */
3258 dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
3259 ST0 = fpsrcop/*ST0*/ - fptemp * dblq;
3261 /* convert dblq to q by truncating towards zero */
3263 q = (signed long long int)(-dblq);
3265 q = (signed long long int)dblq;
3267 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3268 /* (C0,C3,C1) <-- (q2,q1,q0) */
3269 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
3270 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
3271 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
3273 int N = 32 + (expdif % 32); /* as per AMD docs */
3274 env->fpus |= 0x400; /* C2 <-- 1 */
3275 fptemp = pow(2.0, (double)(expdif - N));
3276 fpsrcop = (ST0 / ST1) / fptemp;
3277 /* fpsrcop = integer obtained by chopping */
3278 fpsrcop = (fpsrcop < 0.0) ?
3279 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
3280 ST0 -= (ST1 * fpsrcop * fptemp);
3284 void helper_fyl2xp1(void)
3286 CPU86_LDouble fptemp;
3289 if ((fptemp+1.0)>0.0) {
3290 fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
3294 env->fpus &= (~0x4700);
3299 void helper_fsqrt(void)
3301 CPU86_LDouble fptemp;
3305 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3311 void helper_fsincos(void)
3313 CPU86_LDouble fptemp;
3316 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3322 env->fpus &= (~0x400); /* C2 <-- 0 */
3323 /* the above code is for |arg| < 2**63 only */
3327 void helper_frndint(void)
3329 ST0 = floatx_round_to_int(ST0, &env->fp_status);
3332 void helper_fscale(void)
3334 ST0 = ldexp (ST0, (int)(ST1));
3337 void helper_fsin(void)
3339 CPU86_LDouble fptemp;
3342 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3346 env->fpus &= (~0x400); /* C2 <-- 0 */
3347 /* the above code is for |arg| < 2**53 only */
3351 void helper_fcos(void)
3353 CPU86_LDouble fptemp;
3356 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3360 env->fpus &= (~0x400); /* C2 <-- 0 */
3361 /* the above code is for |arg5 < 2**63 only */
3365 void helper_fxam_ST0(void)
3367 CPU86_LDoubleU temp;
3372 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3374 env->fpus |= 0x200; /* C1 <-- 1 */
3376 /* XXX: test fptags too */
3377 expdif = EXPD(temp);
3378 if (expdif == MAXEXPD) {
3379 #ifdef USE_X86LDOUBLE
3380 if (MANTD(temp) == 0x8000000000000000ULL)
3382 if (MANTD(temp) == 0)
3384 env->fpus |= 0x500 /*Infinity*/;
3386 env->fpus |= 0x100 /*NaN*/;
3387 } else if (expdif == 0) {
3388 if (MANTD(temp) == 0)
3389 env->fpus |= 0x4000 /*Zero*/;
3391 env->fpus |= 0x4400 /*Denormal*/;
3397 void helper_fstenv(target_ulong ptr, int data32)
3399 int fpus, fptag, exp, i;
3403 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
3405 for (i=7; i>=0; i--) {
3407 if (env->fptags[i]) {
3410 tmp.d = env->fpregs[i].d;
3413 if (exp == 0 && mant == 0) {
3416 } else if (exp == 0 || exp == MAXEXPD
3417 #ifdef USE_X86LDOUBLE
3418 || (mant & (1LL << 63)) == 0
3421 /* NaNs, infinity, denormal */
3428 stl(ptr, env->fpuc);
3430 stl(ptr + 8, fptag);
3431 stl(ptr + 12, 0); /* fpip */
3432 stl(ptr + 16, 0); /* fpcs */
3433 stl(ptr + 20, 0); /* fpoo */
3434 stl(ptr + 24, 0); /* fpos */
3437 stw(ptr, env->fpuc);
3439 stw(ptr + 4, fptag);
3447 void helper_fldenv(target_ulong ptr, int data32)
3452 env->fpuc = lduw(ptr);
3453 fpus = lduw(ptr + 4);
3454 fptag = lduw(ptr + 8);
3457 env->fpuc = lduw(ptr);
3458 fpus = lduw(ptr + 2);
3459 fptag = lduw(ptr + 4);
3461 env->fpstt = (fpus >> 11) & 7;
3462 env->fpus = fpus & ~0x3800;
3463 for(i = 0;i < 8; i++) {
3464 env->fptags[i] = ((fptag & 3) == 3);
3469 void helper_fsave(target_ulong ptr, int data32)
3474 helper_fstenv(ptr, data32);
3476 ptr += (14 << data32);
3477 for(i = 0;i < 8; i++) {
3479 helper_fstt(tmp, ptr);
3497 void helper_frstor(target_ulong ptr, int data32)
3502 helper_fldenv(ptr, data32);
3503 ptr += (14 << data32);
3505 for(i = 0;i < 8; i++) {
3506 tmp = helper_fldt(ptr);
3512 void helper_fxsave(target_ulong ptr, int data64)
3514 int fpus, fptag, i, nb_xmm_regs;
3518 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
3520 for(i = 0; i < 8; i++) {
3521 fptag |= (env->fptags[i] << i);
3523 stw(ptr, env->fpuc);
3525 stw(ptr + 4, fptag ^ 0xff);
3528 for(i = 0;i < 8; i++) {
3530 helper_fstt(tmp, addr);
3534 if (env->cr[4] & CR4_OSFXSR_MASK) {
3535 /* XXX: finish it */
3536 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
3537 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
3538 nb_xmm_regs = 8 << data64;
3540 for(i = 0; i < nb_xmm_regs; i++) {
3541 stq(addr, env->xmm_regs[i].XMM_Q(0));
3542 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
3548 void helper_fxrstor(target_ulong ptr, int data64)
3550 int i, fpus, fptag, nb_xmm_regs;
3554 env->fpuc = lduw(ptr);
3555 fpus = lduw(ptr + 2);
3556 fptag = lduw(ptr + 4);
3557 env->fpstt = (fpus >> 11) & 7;
3558 env->fpus = fpus & ~0x3800;
3560 for(i = 0;i < 8; i++) {
3561 env->fptags[i] = ((fptag >> i) & 1);
3565 for(i = 0;i < 8; i++) {
3566 tmp = helper_fldt(addr);
3571 if (env->cr[4] & CR4_OSFXSR_MASK) {
3572 /* XXX: finish it */
3573 env->mxcsr = ldl(ptr + 0x18);
3575 nb_xmm_regs = 8 << data64;
3577 for(i = 0; i < nb_xmm_regs; i++) {
3578 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
3579 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
3585 #ifndef USE_X86LDOUBLE
3587 void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
3589 CPU86_LDoubleU temp;
3594 *pmant = (MANTD(temp) << 11) | (1LL << 63);
3595 /* exponent + sign */
3596 e = EXPD(temp) - EXPBIAS + 16383;
3597 e |= SIGND(temp) >> 16;
3601 CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
3603 CPU86_LDoubleU temp;
3607 /* XXX: handle overflow ? */
3608 e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
3609 e |= (upper >> 4) & 0x800; /* sign */
3610 ll = (mant >> 11) & ((1LL << 52) - 1);
3612 temp.l.upper = (e << 20) | (ll >> 32);
3615 temp.ll = ll | ((uint64_t)e << 52);
3622 void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
3624 CPU86_LDoubleU temp;
3627 *pmant = temp.l.lower;
3628 *pexp = temp.l.upper;
3631 CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
3633 CPU86_LDoubleU temp;
3635 temp.l.upper = upper;
3636 temp.l.lower = mant;
3641 #ifdef TARGET_X86_64
3643 //#define DEBUG_MULDIV
3645 static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
3654 static void neg128(uint64_t *plow, uint64_t *phigh)
3658 add128(plow, phigh, 1, 0);
3661 /* return TRUE if overflow */
3662 static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
3664 uint64_t q, r, a1, a0;
3677 /* XXX: use a better algorithm */
3678 for(i = 0; i < 64; i++) {
3680 a1 = (a1 << 1) | (a0 >> 63);
3681 if (ab || a1 >= b) {
3687 a0 = (a0 << 1) | qb;
3689 #if defined(DEBUG_MULDIV)
3690 printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
3691 *phigh, *plow, b, a0, a1);
3699 /* return TRUE if overflow */
3700 static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
3703 sa = ((int64_t)*phigh < 0);
3705 neg128(plow, phigh);
3709 if (div64(plow, phigh, b) != 0)
3712 if (*plow > (1ULL << 63))
3716 if (*plow >= (1ULL << 63))
3724 void helper_mulq_EAX_T0(void)
3728 mulu64(&r0, &r1, EAX, T0);
3735 void helper_imulq_EAX_T0(void)
3739 muls64(&r0, &r1, EAX, T0);
3743 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
3746 void helper_imulq_T0_T1(void)
3750 muls64(&r0, &r1, T0, T1);
3753 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
3756 void helper_divq_EAX_T0(void)
3760 raise_exception(EXCP00_DIVZ);
3764 if (div64(&r0, &r1, T0))
3765 raise_exception(EXCP00_DIVZ);
3770 void helper_idivq_EAX_T0(void)
3774 raise_exception(EXCP00_DIVZ);
3778 if (idiv64(&r0, &r1, T0))
3779 raise_exception(EXCP00_DIVZ);
3784 void helper_bswapq_T0(void)
3790 void helper_hlt(void)
3792 env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
3793 env->hflags |= HF_HALTED_MASK;
3794 env->exception_index = EXCP_HLT;
3798 void helper_monitor(void)
3800 if ((uint32_t)ECX != 0)
3801 raise_exception(EXCP0D_GPF);
3802 /* XXX: store address ? */
3805 void helper_mwait(void)
3807 if ((uint32_t)ECX != 0)
3808 raise_exception(EXCP0D_GPF);
3809 /* XXX: not complete but not completely erroneous */
3810 if (env->cpu_index != 0 || env->next_cpu != NULL) {
3811 /* more than one CPU: do not sleep because another CPU may
3818 float approx_rsqrt(float a)
3820 return 1.0 / sqrt(a);
3823 float approx_rcp(float a)
3828 void update_fp_status(void)
3832 /* set rounding mode */
3833 switch(env->fpuc & RC_MASK) {
3836 rnd_type = float_round_nearest_even;
3839 rnd_type = float_round_down;
3842 rnd_type = float_round_up;
3845 rnd_type = float_round_to_zero;
3848 set_float_rounding_mode(rnd_type, &env->fp_status);
3850 switch((env->fpuc >> 8) & 3) {
3862 set_floatx80_rounding_precision(rnd_type, &env->fp_status);
3866 #if !defined(CONFIG_USER_ONLY)
3868 #define MMUSUFFIX _mmu
3870 # define GETPC() ((void*)((unsigned long)__builtin_return_address(0) & 0x7fffffffUL))
3872 # define GETPC() (__builtin_return_address(0))
3876 #include "softmmu_template.h"
3879 #include "softmmu_template.h"
3882 #include "softmmu_template.h"
3885 #include "softmmu_template.h"
3889 /* try to fill the TLB and return an exception if error. If retaddr is
3890 NULL, it means that the function was called in C code (i.e. not
3891 from generated code or from helper.c) */
3892 /* XXX: fix it to restore all registers */
3893 void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
3895 TranslationBlock *tb;
3898 CPUX86State *saved_env;
3900 /* XXX: hack to restore env in all cases, even if not called from
3903 env = cpu_single_env;
3905 ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
3908 /* now we have a real cpu fault */
3909 pc = (unsigned long)retaddr;
3910 tb = tb_find_pc(pc);
3912 /* the PC is inside the translated code. It means that we have
3913 a virtual CPU fault */
3914 cpu_restore_state(tb, env, pc, NULL);
3918 raise_exception_err(env->exception_index, env->error_code);
3920 raise_exception_err_norestore(env->exception_index, env->error_code);
3926 /* Secure Virtual Machine helpers */
3928 void helper_stgi(void)
3930 env->hflags |= HF_GIF_MASK;
3933 void helper_clgi(void)
3935 env->hflags &= ~HF_GIF_MASK;
3938 #if defined(CONFIG_USER_ONLY)
3940 void helper_vmrun(target_ulong addr) { }
3941 void helper_vmmcall(void) { }
3942 void helper_vmload(target_ulong addr) { }
3943 void helper_vmsave(target_ulong addr) { }
3944 void helper_skinit(void) { }
3945 void helper_invlpga(void) { }
3946 void vmexit(uint64_t exit_code, uint64_t exit_info_1) { }
3947 int svm_check_intercept_param(uint32_t type, uint64_t param)
3954 static inline uint32_t
3955 vmcb2cpu_attrib(uint16_t vmcb_attrib, uint32_t vmcb_base, uint32_t vmcb_limit)
3957 return ((vmcb_attrib & 0x00ff) << 8) /* Type, S, DPL, P */
3958 | ((vmcb_attrib & 0x0f00) << 12) /* AVL, L, DB, G */
3959 | ((vmcb_base >> 16) & 0xff) /* Base 23-16 */
3960 | (vmcb_base & 0xff000000) /* Base 31-24 */
3961 | (vmcb_limit & 0xf0000); /* Limit 19-16 */
3964 static inline uint16_t cpu2vmcb_attrib(uint32_t cpu_attrib)
3966 return ((cpu_attrib >> 8) & 0xff) /* Type, S, DPL, P */
3967 | ((cpu_attrib & 0xf00000) >> 12); /* AVL, L, DB, G */
3970 extern uint8_t *phys_ram_base;
3971 void helper_vmrun(target_ulong addr)
3976 if (loglevel & CPU_LOG_TB_IN_ASM)
3977 fprintf(logfile,"vmrun! " TARGET_FMT_lx "\n", addr);
3979 env->vm_vmcb = addr;
3982 /* save the current CPU state in the hsave page */
3983 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
3984 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
3986 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
3987 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
3989 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
3990 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
3991 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
3992 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
3993 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr8), env->cr[8]);
3994 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
3995 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
3997 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
3998 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
4000 SVM_SAVE_SEG(env->vm_hsave, segs[R_ES], es);
4001 SVM_SAVE_SEG(env->vm_hsave, segs[R_CS], cs);
4002 SVM_SAVE_SEG(env->vm_hsave, segs[R_SS], ss);
4003 SVM_SAVE_SEG(env->vm_hsave, segs[R_DS], ds);
4005 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip), EIP);
4006 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
4007 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
4009 /* load the interception bitmaps so we do not need to access the
4011 /* We shift all the intercept bits so we can OR them with the TB
4013 env->intercept = (ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept)) << INTERCEPT_INTR) | INTERCEPT_SVM_MASK;
4014 env->intercept_cr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
4015 env->intercept_cr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
4016 env->intercept_dr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
4017 env->intercept_dr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
4018 env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
4020 env->gdt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
4021 env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
4023 env->idt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
4024 env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
4026 /* clear exit_info_2 so we behave like the real hardware */
4027 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
4029 cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
4030 cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
4031 cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
4032 env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
4033 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
4034 if (int_ctl & V_INTR_MASKING_MASK) {
4035 env->cr[8] = int_ctl & V_TPR_MASK;
4036 if (env->eflags & IF_MASK)
4037 env->hflags |= HF_HIF_MASK;
4040 #ifdef TARGET_X86_64
4041 env->efer = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer));
4042 env->hflags &= ~HF_LMA_MASK;
4043 if (env->efer & MSR_EFER_LMA)
4044 env->hflags |= HF_LMA_MASK;
4047 load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
4048 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
4049 CC_OP = CC_OP_EFLAGS;
4050 CC_DST = 0xffffffff;
4052 SVM_LOAD_SEG(env->vm_vmcb, ES, es);
4053 SVM_LOAD_SEG(env->vm_vmcb, CS, cs);
4054 SVM_LOAD_SEG(env->vm_vmcb, SS, ss);
4055 SVM_LOAD_SEG(env->vm_vmcb, DS, ds);
4057 EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
4059 ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
4060 EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
4061 env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
4062 env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
4063 cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
4065 /* FIXME: guest state consistency checks */
4067 switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
4068 case TLB_CONTROL_DO_NOTHING:
4070 case TLB_CONTROL_FLUSH_ALL_ASID:
4071 /* FIXME: this is not 100% correct but should work for now */
4080 /* maybe we need to inject an event */
4081 event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
4082 if (event_inj & SVM_EVTINJ_VALID) {
4083 uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
4084 uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
4085 uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
4086 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
4088 if (loglevel & CPU_LOG_TB_IN_ASM)
4089 fprintf(logfile, "Injecting(%#hx): ", valid_err);
4090 /* FIXME: need to implement valid_err */
4091 switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
4092 case SVM_EVTINJ_TYPE_INTR:
4093 env->exception_index = vector;
4094 env->error_code = event_inj_err;
4095 env->exception_is_int = 1;
4096 env->exception_next_eip = -1;
4097 if (loglevel & CPU_LOG_TB_IN_ASM)
4098 fprintf(logfile, "INTR");
4100 case SVM_EVTINJ_TYPE_NMI:
4101 env->exception_index = vector;
4102 env->error_code = event_inj_err;
4103 env->exception_is_int = 1;
4104 env->exception_next_eip = EIP;
4105 if (loglevel & CPU_LOG_TB_IN_ASM)
4106 fprintf(logfile, "NMI");
4108 case SVM_EVTINJ_TYPE_EXEPT:
4109 env->exception_index = vector;
4110 env->error_code = event_inj_err;
4111 env->exception_is_int = 0;
4112 env->exception_next_eip = -1;
4113 if (loglevel & CPU_LOG_TB_IN_ASM)
4114 fprintf(logfile, "EXEPT");
4116 case SVM_EVTINJ_TYPE_SOFT:
4117 env->exception_index = vector;
4118 env->error_code = event_inj_err;
4119 env->exception_is_int = 1;
4120 env->exception_next_eip = EIP;
4121 if (loglevel & CPU_LOG_TB_IN_ASM)
4122 fprintf(logfile, "SOFT");
4125 if (loglevel & CPU_LOG_TB_IN_ASM)
4126 fprintf(logfile, " %#x %#x\n", env->exception_index, env->error_code);
4128 if ((int_ctl & V_IRQ_MASK) || (env->intercept & INTERCEPT_VINTR)) {
4129 env->interrupt_request |= CPU_INTERRUPT_VIRQ;
4135 void helper_vmmcall(void)
4137 if (loglevel & CPU_LOG_TB_IN_ASM)
4138 fprintf(logfile,"vmmcall!\n");
4141 void helper_vmload(target_ulong addr)
4143 if (loglevel & CPU_LOG_TB_IN_ASM)
4144 fprintf(logfile,"vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
4145 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
4146 env->segs[R_FS].base);
4148 SVM_LOAD_SEG2(addr, segs[R_FS], fs);
4149 SVM_LOAD_SEG2(addr, segs[R_GS], gs);
4150 SVM_LOAD_SEG2(addr, tr, tr);
4151 SVM_LOAD_SEG2(addr, ldt, ldtr);
4153 #ifdef TARGET_X86_64
4154 env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
4155 env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
4156 env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
4157 env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
4159 env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
4160 env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
4161 env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp));
4162 env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip));
4165 void helper_vmsave(target_ulong addr)
4167 if (loglevel & CPU_LOG_TB_IN_ASM)
4168 fprintf(logfile,"vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
4169 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
4170 env->segs[R_FS].base);
4172 SVM_SAVE_SEG(addr, segs[R_FS], fs);
4173 SVM_SAVE_SEG(addr, segs[R_GS], gs);
4174 SVM_SAVE_SEG(addr, tr, tr);
4175 SVM_SAVE_SEG(addr, ldt, ldtr);
4177 #ifdef TARGET_X86_64
4178 stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
4179 stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
4180 stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
4181 stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
4183 stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
4184 stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
4185 stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp);
4186 stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
4189 void helper_skinit(void)
4191 if (loglevel & CPU_LOG_TB_IN_ASM)
4192 fprintf(logfile,"skinit!\n");
4195 void helper_invlpga(void)
4200 int svm_check_intercept_param(uint32_t type, uint64_t param)
4203 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
4204 if (INTERCEPTEDw(_cr_read, (1 << (type - SVM_EXIT_READ_CR0)))) {
4205 vmexit(type, param);
4209 case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 8:
4210 if (INTERCEPTEDw(_dr_read, (1 << (type - SVM_EXIT_READ_DR0)))) {
4211 vmexit(type, param);
4215 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
4216 if (INTERCEPTEDw(_cr_write, (1 << (type - SVM_EXIT_WRITE_CR0)))) {
4217 vmexit(type, param);
4221 case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 8:
4222 if (INTERCEPTEDw(_dr_write, (1 << (type - SVM_EXIT_WRITE_DR0)))) {
4223 vmexit(type, param);
4227 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 16:
4228 if (INTERCEPTEDl(_exceptions, (1 << (type - SVM_EXIT_EXCP_BASE)))) {
4229 vmexit(type, param);
4234 if (INTERCEPTED(1ULL << INTERCEPT_IOIO_PROT)) {
4235 /* FIXME: this should be read in at vmrun (faster this way?) */
4236 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
4237 uint16_t port = (uint16_t) (param >> 16);
4239 if(ldub_phys(addr + port / 8) & (1 << (port % 8)))
4240 vmexit(type, param);
4245 if (INTERCEPTED(1ULL << INTERCEPT_MSR_PROT)) {
4246 /* FIXME: this should be read in at vmrun (faster this way?) */
4247 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
4248 switch((uint32_t)ECX) {
4253 case 0xc0000000 ... 0xc0001fff:
4254 T0 = (8192 + ECX - 0xc0000000) * 2;
4258 case 0xc0010000 ... 0xc0011fff:
4259 T0 = (16384 + ECX - 0xc0010000) * 2;
4264 vmexit(type, param);
4267 if (ldub_phys(addr + T1) & ((1 << param) << T0))
4268 vmexit(type, param);
4273 if (INTERCEPTED((1ULL << ((type - SVM_EXIT_INTR) + INTERCEPT_INTR)))) {
4274 vmexit(type, param);
4282 void vmexit(uint64_t exit_code, uint64_t exit_info_1)
4286 if (loglevel & CPU_LOG_TB_IN_ASM)
4287 fprintf(logfile,"vmexit(%016" PRIx64 ", %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
4288 exit_code, exit_info_1,
4289 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)),
4292 if(env->hflags & HF_INHIBIT_IRQ_MASK) {
4293 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK);
4294 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
4296 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
4299 /* Save the VM state in the vmcb */
4300 SVM_SAVE_SEG(env->vm_vmcb, segs[R_ES], es);
4301 SVM_SAVE_SEG(env->vm_vmcb, segs[R_CS], cs);
4302 SVM_SAVE_SEG(env->vm_vmcb, segs[R_SS], ss);
4303 SVM_SAVE_SEG(env->vm_vmcb, segs[R_DS], ds);
4305 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
4306 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
4308 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base);
4309 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
4311 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
4312 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
4313 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
4314 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
4315 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
4317 if ((int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl))) & V_INTR_MASKING_MASK) {
4318 int_ctl &= ~V_TPR_MASK;
4319 int_ctl |= env->cr[8] & V_TPR_MASK;
4320 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
4323 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags());
4324 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
4325 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
4326 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
4327 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
4328 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
4329 stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK);
4331 /* Reload the host state from vm_hsave */
4332 env->hflags &= ~HF_HIF_MASK;
4334 env->intercept_exceptions = 0;
4335 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
4337 env->gdt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base));
4338 env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit));
4340 env->idt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base));
4341 env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit));
4343 cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK);
4344 cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4)));
4345 cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3)));
4346 if (int_ctl & V_INTR_MASKING_MASK)
4347 env->cr[8] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr8));
4348 /* we need to set the efer after the crs so the hidden flags get set properly */
4349 #ifdef TARGET_X86_64
4350 env->efer = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer));
4351 env->hflags &= ~HF_LMA_MASK;
4352 if (env->efer & MSR_EFER_LMA)
4353 env->hflags |= HF_LMA_MASK;
4357 load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
4358 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
4359 CC_OP = CC_OP_EFLAGS;
4361 SVM_LOAD_SEG(env->vm_hsave, ES, es);
4362 SVM_LOAD_SEG(env->vm_hsave, CS, cs);
4363 SVM_LOAD_SEG(env->vm_hsave, SS, ss);
4364 SVM_LOAD_SEG(env->vm_hsave, DS, ds);
4366 EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
4367 ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
4368 EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
4370 env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
4371 env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
4374 cpu_x86_set_cpl(env, 0);
4375 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code_hi), (uint32_t)(exit_code >> 32));
4376 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
4377 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
4380 /* FIXME: Resets the current ASID register to zero (host ASID). */
4382 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
4384 /* Clears the TSC_OFFSET inside the processor. */
4386 /* If the host is in PAE mode, the processor reloads the host's PDPEs
4387 from the page table indicated the host's CR3. If the PDPEs contain
4388 illegal state, the processor causes a shutdown. */
4390 /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
4391 env->cr[0] |= CR0_PE_MASK;
4392 env->eflags &= ~VM_MASK;
4394 /* Disables all breakpoints in the host DR7 register. */
4396 /* Checks the reloaded host state for consistency. */
4398 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
4399 host's code segment or non-canonical (in the case of long mode), a
4400 #GP fault is delivered inside the host.) */
4402 /* remove any pending exception */
4403 env->exception_index = -1;
4404 env->error_code = 0;
4405 env->old_exception = -1;