2 * i386 helpers (without register variable usage)
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
35 #include <linux/unistd.h>
36 #include <linux/version.h>
38 int modify_ldt(int func, void *ptr, unsigned long bytecount)
40 return syscall(__NR_modify_ldt, func, ptr, bytecount);
43 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 66)
44 #define modify_ldt_ldt_s user_desc
46 #endif /* USE_CODE_COPY */
48 CPUX86State *cpu_x86_init(void)
53 env = qemu_mallocz(sizeof(CPUX86State));
58 /* init various static tables */
61 optimize_flags_init();
64 /* testing code for code copy case */
66 struct modify_ldt_ldt_s ldt;
69 ldt.base_addr = (unsigned long)env;
70 ldt.limit = (sizeof(CPUState) + 0xfff) >> 12;
72 ldt.contents = MODIFY_LDT_CONTENTS_DATA;
73 ldt.read_exec_only = 0;
74 ldt.limit_in_pages = 1;
75 ldt.seg_not_present = 0;
77 modify_ldt(1, &ldt, sizeof(ldt)); /* write ldt entry */
79 asm volatile ("movl %0, %%fs" : : "r" ((1 << 3) | 7));
83 int family, model, stepping;
85 env->cpuid_vendor1 = 0x68747541; /* "Auth" */
86 env->cpuid_vendor2 = 0x69746e65; /* "enti" */
87 env->cpuid_vendor3 = 0x444d4163; /* "cAMD" */
92 env->cpuid_vendor1 = 0x756e6547; /* "Genu" */
93 env->cpuid_vendor2 = 0x49656e69; /* "ineI" */
94 env->cpuid_vendor3 = 0x6c65746e; /* "ntel" */
107 env->cpuid_level = 2;
108 env->cpuid_version = (family << 8) | (model << 4) | stepping;
109 env->cpuid_features = (CPUID_FP87 | CPUID_DE | CPUID_PSE |
110 CPUID_TSC | CPUID_MSR | CPUID_MCE |
111 CPUID_CX8 | CPUID_PGE | CPUID_CMOV |
113 env->pat = 0x0007040600070406ULL;
114 env->cpuid_ext_features = CPUID_EXT_SSE3;
115 env->cpuid_features |= CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | CPUID_PAE | CPUID_SEP;
116 env->cpuid_features |= CPUID_APIC;
117 env->cpuid_xlevel = 0x80000006;
119 const char *model_id = "QEMU Virtual CPU version " QEMU_VERSION;
121 len = strlen(model_id);
122 for(i = 0; i < 48; i++) {
127 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
131 /* currently not enabled for std i386 because not fully tested */
132 env->cpuid_ext2_features = (env->cpuid_features & 0x0183F3FF);
133 env->cpuid_ext2_features |= CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX;
134 env->cpuid_xlevel = 0x80000008;
136 /* these features are needed for Win64 and aren't fully implemented */
137 env->cpuid_features |= CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA;
138 /* this feature is needed for Solaris and isn't fully implemented */
139 env->cpuid_features |= CPUID_PSE36;
149 /* NOTE: must be called outside the CPU execute loop */
150 void cpu_reset(CPUX86State *env)
154 memset(env, 0, offsetof(CPUX86State, breakpoints));
158 env->old_exception = -1;
160 /* init to reset state */
162 #ifdef CONFIG_SOFTMMU
163 env->hflags |= HF_SOFTMMU_MASK;
166 cpu_x86_update_cr0(env, 0x60000010);
167 env->a20_mask = 0xffffffff;
168 env->smbase = 0x30000;
170 env->idt.limit = 0xffff;
171 env->gdt.limit = 0xffff;
172 env->ldt.limit = 0xffff;
173 env->ldt.flags = DESC_P_MASK;
174 env->tr.limit = 0xffff;
175 env->tr.flags = DESC_P_MASK;
177 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff, 0);
178 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff, 0);
179 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff, 0);
180 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff, 0);
181 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff, 0);
182 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff, 0);
185 env->regs[R_EDX] = 0x600; /* indicate P6 processor */
190 for(i = 0;i < 8; i++)
197 void cpu_x86_close(CPUX86State *env)
202 /***********************************************************/
205 static const char *cc_op_str[] = {
260 void cpu_dump_state(CPUState *env, FILE *f,
261 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
266 static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
268 eflags = env->eflags;
270 if (env->hflags & HF_CS64_MASK) {
272 "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
273 "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
274 "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
275 "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
276 "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
294 eflags & DF_MASK ? 'D' : '-',
295 eflags & CC_O ? 'O' : '-',
296 eflags & CC_S ? 'S' : '-',
297 eflags & CC_Z ? 'Z' : '-',
298 eflags & CC_A ? 'A' : '-',
299 eflags & CC_P ? 'P' : '-',
300 eflags & CC_C ? 'C' : '-',
301 env->hflags & HF_CPL_MASK,
302 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
303 (env->a20_mask >> 20) & 1,
304 (env->hflags >> HF_SMM_SHIFT) & 1,
305 (env->hflags >> HF_HALTED_SHIFT) & 1);
309 cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
310 "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
311 "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
312 (uint32_t)env->regs[R_EAX],
313 (uint32_t)env->regs[R_EBX],
314 (uint32_t)env->regs[R_ECX],
315 (uint32_t)env->regs[R_EDX],
316 (uint32_t)env->regs[R_ESI],
317 (uint32_t)env->regs[R_EDI],
318 (uint32_t)env->regs[R_EBP],
319 (uint32_t)env->regs[R_ESP],
320 (uint32_t)env->eip, eflags,
321 eflags & DF_MASK ? 'D' : '-',
322 eflags & CC_O ? 'O' : '-',
323 eflags & CC_S ? 'S' : '-',
324 eflags & CC_Z ? 'Z' : '-',
325 eflags & CC_A ? 'A' : '-',
326 eflags & CC_P ? 'P' : '-',
327 eflags & CC_C ? 'C' : '-',
328 env->hflags & HF_CPL_MASK,
329 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
330 (env->a20_mask >> 20) & 1,
331 (env->hflags >> HF_SMM_SHIFT) & 1,
332 (env->hflags >> HF_HALTED_SHIFT) & 1);
336 if (env->hflags & HF_LMA_MASK) {
337 for(i = 0; i < 6; i++) {
338 SegmentCache *sc = &env->segs[i];
339 cpu_fprintf(f, "%s =%04x %016" PRIx64 " %08x %08x\n",
346 cpu_fprintf(f, "LDT=%04x %016" PRIx64 " %08x %08x\n",
351 cpu_fprintf(f, "TR =%04x %016" PRIx64 " %08x %08x\n",
356 cpu_fprintf(f, "GDT= %016" PRIx64 " %08x\n",
357 env->gdt.base, env->gdt.limit);
358 cpu_fprintf(f, "IDT= %016" PRIx64 " %08x\n",
359 env->idt.base, env->idt.limit);
360 cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
361 (uint32_t)env->cr[0],
364 (uint32_t)env->cr[4]);
368 for(i = 0; i < 6; i++) {
369 SegmentCache *sc = &env->segs[i];
370 cpu_fprintf(f, "%s =%04x %08x %08x %08x\n",
377 cpu_fprintf(f, "LDT=%04x %08x %08x %08x\n",
379 (uint32_t)env->ldt.base,
382 cpu_fprintf(f, "TR =%04x %08x %08x %08x\n",
384 (uint32_t)env->tr.base,
387 cpu_fprintf(f, "GDT= %08x %08x\n",
388 (uint32_t)env->gdt.base, env->gdt.limit);
389 cpu_fprintf(f, "IDT= %08x %08x\n",
390 (uint32_t)env->idt.base, env->idt.limit);
391 cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
392 (uint32_t)env->cr[0],
393 (uint32_t)env->cr[2],
394 (uint32_t)env->cr[3],
395 (uint32_t)env->cr[4]);
397 if (flags & X86_DUMP_CCOP) {
398 if ((unsigned)env->cc_op < CC_OP_NB)
399 snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
401 snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
403 if (env->hflags & HF_CS64_MASK) {
404 cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
405 env->cc_src, env->cc_dst,
410 cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
411 (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
415 if (flags & X86_DUMP_FPU) {
418 for(i = 0; i < 8; i++) {
419 fptag |= ((!env->fptags[i]) << i);
421 cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
423 (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
428 #if defined(USE_X86LDOUBLE)
436 tmp.d = env->fpregs[i].d;
437 cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
438 i, tmp.l.lower, tmp.l.upper);
440 cpu_fprintf(f, "FPR%d=%016" PRIx64,
441 i, env->fpregs[i].mmx.q);
444 cpu_fprintf(f, "\n");
448 if (env->hflags & HF_CS64_MASK)
453 cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
455 env->xmm_regs[i].XMM_L(3),
456 env->xmm_regs[i].XMM_L(2),
457 env->xmm_regs[i].XMM_L(1),
458 env->xmm_regs[i].XMM_L(0));
460 cpu_fprintf(f, "\n");
467 /***********************************************************/
469 /* XXX: add PGE support */
471 void cpu_x86_set_a20(CPUX86State *env, int a20_state)
473 a20_state = (a20_state != 0);
474 if (a20_state != ((env->a20_mask >> 20) & 1)) {
475 #if defined(DEBUG_MMU)
476 printf("A20 update: a20=%d\n", a20_state);
478 /* if the cpu is currently executing code, we must unlink it and
479 all the potentially executing TB */
480 cpu_interrupt(env, CPU_INTERRUPT_EXITTB);
482 /* when a20 is changed, all the MMU mappings are invalid, so
483 we must flush everything */
485 env->a20_mask = 0xffefffff | (a20_state << 20);
489 void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
493 #if defined(DEBUG_MMU)
494 printf("CR0 update: CR0=0x%08x\n", new_cr0);
496 if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
497 (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
502 if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
503 (env->efer & MSR_EFER_LME)) {
504 /* enter in long mode */
505 /* XXX: generate an exception */
506 if (!(env->cr[4] & CR4_PAE_MASK))
508 env->efer |= MSR_EFER_LMA;
509 env->hflags |= HF_LMA_MASK;
510 } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
511 (env->efer & MSR_EFER_LMA)) {
513 env->efer &= ~MSR_EFER_LMA;
514 env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
515 env->eip &= 0xffffffff;
518 env->cr[0] = new_cr0 | CR0_ET_MASK;
520 /* update PE flag in hidden flags */
521 pe_state = (env->cr[0] & CR0_PE_MASK);
522 env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
523 /* ensure that ADDSEG is always set in real mode */
524 env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
525 /* update FPU flags */
526 env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
527 ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
530 /* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
532 void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
534 env->cr[3] = new_cr3;
535 if (env->cr[0] & CR0_PG_MASK) {
536 #if defined(DEBUG_MMU)
537 printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
543 void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
545 #if defined(DEBUG_MMU)
546 printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
548 if ((new_cr4 & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK)) !=
549 (env->cr[4] & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK))) {
553 if (!(env->cpuid_features & CPUID_SSE))
554 new_cr4 &= ~CR4_OSFXSR_MASK;
555 if (new_cr4 & CR4_OSFXSR_MASK)
556 env->hflags |= HF_OSFXSR_MASK;
558 env->hflags &= ~HF_OSFXSR_MASK;
560 env->cr[4] = new_cr4;
563 /* XXX: also flush 4MB pages */
564 void cpu_x86_flush_tlb(CPUX86State *env, target_ulong addr)
566 tlb_flush_page(env, addr);
569 #if defined(CONFIG_USER_ONLY)
571 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
572 int is_write, int is_user, int is_softmmu)
574 /* user mode only emulation */
577 env->error_code = (is_write << PG_ERROR_W_BIT);
578 env->error_code |= PG_ERROR_U_MASK;
579 env->exception_index = EXCP0E_PAGE;
583 target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
590 #define PHYS_ADDR_MASK 0xfffff000
593 -1 = cannot handle fault
594 0 = nothing more to do
595 1 = generate PF fault
596 2 = soft MMU activation required for this block
598 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
599 int is_write1, int is_user, int is_softmmu)
602 uint32_t pdpe_addr, pde_addr, pte_addr;
603 int error_code, is_dirty, prot, page_size, ret, is_write;
604 unsigned long paddr, page_offset;
605 target_ulong vaddr, virt_addr;
607 #if defined(DEBUG_MMU)
608 printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
609 addr, is_write1, is_user, env->eip);
611 is_write = is_write1 & 1;
613 if (!(env->cr[0] & CR0_PG_MASK)) {
615 virt_addr = addr & TARGET_PAGE_MASK;
616 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
621 if (env->cr[4] & CR4_PAE_MASK) {
624 /* XXX: we only use 32 bit physical addresses */
626 if (env->hflags & HF_LMA_MASK) {
631 /* test virtual address sign extension */
632 sext = (int64_t)addr >> 47;
633 if (sext != 0 && sext != -1) {
635 env->exception_index = EXCP0D_GPF;
639 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
641 pml4e = ldq_phys(pml4e_addr);
642 if (!(pml4e & PG_PRESENT_MASK)) {
646 if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) {
647 error_code = PG_ERROR_RSVD_MASK;
650 if (!(pml4e & PG_ACCESSED_MASK)) {
651 pml4e |= PG_ACCESSED_MASK;
652 stl_phys_notdirty(pml4e_addr, pml4e);
654 ptep = pml4e ^ PG_NX_MASK;
655 pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
657 pdpe = ldq_phys(pdpe_addr);
658 if (!(pdpe & PG_PRESENT_MASK)) {
662 if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) {
663 error_code = PG_ERROR_RSVD_MASK;
666 ptep &= pdpe ^ PG_NX_MASK;
667 if (!(pdpe & PG_ACCESSED_MASK)) {
668 pdpe |= PG_ACCESSED_MASK;
669 stl_phys_notdirty(pdpe_addr, pdpe);
674 /* XXX: load them when cr3 is loaded ? */
675 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
677 pdpe = ldq_phys(pdpe_addr);
678 if (!(pdpe & PG_PRESENT_MASK)) {
682 ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
685 pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
687 pde = ldq_phys(pde_addr);
688 if (!(pde & PG_PRESENT_MASK)) {
692 if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) {
693 error_code = PG_ERROR_RSVD_MASK;
696 ptep &= pde ^ PG_NX_MASK;
697 if (pde & PG_PSE_MASK) {
699 page_size = 2048 * 1024;
701 if ((ptep & PG_NX_MASK) && is_write1 == 2)
702 goto do_fault_protect;
704 if (!(ptep & PG_USER_MASK))
705 goto do_fault_protect;
706 if (is_write && !(ptep & PG_RW_MASK))
707 goto do_fault_protect;
709 if ((env->cr[0] & CR0_WP_MASK) &&
710 is_write && !(ptep & PG_RW_MASK))
711 goto do_fault_protect;
713 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
714 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
715 pde |= PG_ACCESSED_MASK;
717 pde |= PG_DIRTY_MASK;
718 stl_phys_notdirty(pde_addr, pde);
720 /* align to page_size */
721 pte = pde & ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff);
722 virt_addr = addr & ~(page_size - 1);
725 if (!(pde & PG_ACCESSED_MASK)) {
726 pde |= PG_ACCESSED_MASK;
727 stl_phys_notdirty(pde_addr, pde);
729 pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
731 pte = ldq_phys(pte_addr);
732 if (!(pte & PG_PRESENT_MASK)) {
736 if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) {
737 error_code = PG_ERROR_RSVD_MASK;
740 /* combine pde and pte nx, user and rw protections */
741 ptep &= pte ^ PG_NX_MASK;
743 if ((ptep & PG_NX_MASK) && is_write1 == 2)
744 goto do_fault_protect;
746 if (!(ptep & PG_USER_MASK))
747 goto do_fault_protect;
748 if (is_write && !(ptep & PG_RW_MASK))
749 goto do_fault_protect;
751 if ((env->cr[0] & CR0_WP_MASK) &&
752 is_write && !(ptep & PG_RW_MASK))
753 goto do_fault_protect;
755 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
756 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
757 pte |= PG_ACCESSED_MASK;
759 pte |= PG_DIRTY_MASK;
760 stl_phys_notdirty(pte_addr, pte);
763 virt_addr = addr & ~0xfff;
764 pte = pte & (PHYS_ADDR_MASK | 0xfff);
769 /* page directory entry */
770 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
772 pde = ldl_phys(pde_addr);
773 if (!(pde & PG_PRESENT_MASK)) {
777 /* if PSE bit is set, then we use a 4MB page */
778 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
779 page_size = 4096 * 1024;
781 if (!(pde & PG_USER_MASK))
782 goto do_fault_protect;
783 if (is_write && !(pde & PG_RW_MASK))
784 goto do_fault_protect;
786 if ((env->cr[0] & CR0_WP_MASK) &&
787 is_write && !(pde & PG_RW_MASK))
788 goto do_fault_protect;
790 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
791 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
792 pde |= PG_ACCESSED_MASK;
794 pde |= PG_DIRTY_MASK;
795 stl_phys_notdirty(pde_addr, pde);
798 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
800 virt_addr = addr & ~(page_size - 1);
802 if (!(pde & PG_ACCESSED_MASK)) {
803 pde |= PG_ACCESSED_MASK;
804 stl_phys_notdirty(pde_addr, pde);
807 /* page directory entry */
808 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
810 pte = ldl_phys(pte_addr);
811 if (!(pte & PG_PRESENT_MASK)) {
815 /* combine pde and pte user and rw protections */
818 if (!(ptep & PG_USER_MASK))
819 goto do_fault_protect;
820 if (is_write && !(ptep & PG_RW_MASK))
821 goto do_fault_protect;
823 if ((env->cr[0] & CR0_WP_MASK) &&
824 is_write && !(ptep & PG_RW_MASK))
825 goto do_fault_protect;
827 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
828 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
829 pte |= PG_ACCESSED_MASK;
831 pte |= PG_DIRTY_MASK;
832 stl_phys_notdirty(pte_addr, pte);
835 virt_addr = addr & ~0xfff;
838 /* the page can be put in the TLB */
840 if (!(ptep & PG_NX_MASK))
842 if (pte & PG_DIRTY_MASK) {
843 /* only set write access if already dirty... otherwise wait
846 if (ptep & PG_RW_MASK)
849 if (!(env->cr[0] & CR0_WP_MASK) ||
855 pte = pte & env->a20_mask;
857 /* Even if 4MB pages, we map only one 4KB page in the cache to
858 avoid filling it too fast */
859 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
860 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
861 vaddr = virt_addr + page_offset;
863 ret = tlb_set_page_exec(env, vaddr, paddr, prot, is_user, is_softmmu);
866 error_code = PG_ERROR_P_MASK;
869 error_code |= (is_write << PG_ERROR_W_BIT);
871 error_code |= PG_ERROR_U_MASK;
872 if (is_write1 == 2 &&
873 (env->efer & MSR_EFER_NXE) &&
874 (env->cr[4] & CR4_PAE_MASK))
875 error_code |= PG_ERROR_I_D_MASK;
876 env->error_code = error_code;
877 env->exception_index = EXCP0E_PAGE;
881 target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
883 uint32_t pde_addr, pte_addr;
884 uint32_t pde, pte, paddr, page_offset, page_size;
886 if (env->cr[4] & CR4_PAE_MASK) {
887 uint32_t pdpe_addr, pde_addr, pte_addr;
890 /* XXX: we only use 32 bit physical addresses */
892 if (env->hflags & HF_LMA_MASK) {
893 uint32_t pml4e_addr, pml4e;
896 /* test virtual address sign extension */
897 sext = (int64_t)addr >> 47;
898 if (sext != 0 && sext != -1)
901 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
903 pml4e = ldl_phys(pml4e_addr);
904 if (!(pml4e & PG_PRESENT_MASK))
907 pdpe_addr = ((pml4e & ~0xfff) + (((addr >> 30) & 0x1ff) << 3)) &
909 pdpe = ldl_phys(pdpe_addr);
910 if (!(pdpe & PG_PRESENT_MASK))
915 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
917 pdpe = ldl_phys(pdpe_addr);
918 if (!(pdpe & PG_PRESENT_MASK))
922 pde_addr = ((pdpe & ~0xfff) + (((addr >> 21) & 0x1ff) << 3)) &
924 pde = ldl_phys(pde_addr);
925 if (!(pde & PG_PRESENT_MASK)) {
928 if (pde & PG_PSE_MASK) {
930 page_size = 2048 * 1024;
931 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
934 pte_addr = ((pde & ~0xfff) + (((addr >> 12) & 0x1ff) << 3)) &
937 pte = ldl_phys(pte_addr);
940 if (!(env->cr[0] & CR0_PG_MASK)) {
944 /* page directory entry */
945 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask;
946 pde = ldl_phys(pde_addr);
947 if (!(pde & PG_PRESENT_MASK))
949 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
950 pte = pde & ~0x003ff000; /* align to 4MB */
951 page_size = 4096 * 1024;
953 /* page directory entry */
954 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
955 pte = ldl_phys(pte_addr);
956 if (!(pte & PG_PRESENT_MASK))
961 pte = pte & env->a20_mask;
964 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
965 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
968 #endif /* !CONFIG_USER_ONLY */
970 #if defined(USE_CODE_COPY)
983 uint8_t fpregs1[8 * 10];
986 void restore_native_fp_state(CPUState *env)
989 struct fpstate fp1, *fp = &fp1;
991 fp->fpuc = env->fpuc;
992 fp->fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
994 for (i=7; i>=0; i--) {
996 if (env->fptags[i]) {
999 /* the FPU automatically computes it */
1004 for(i = 0;i < 8; i++) {
1005 memcpy(&fp->fpregs1[i * 10], &env->fpregs[j].d, 10);
1008 asm volatile ("frstor %0" : "=m" (*fp));
1009 env->native_fp_regs = 1;
1012 void save_native_fp_state(CPUState *env)
1016 struct fpstate fp1, *fp = &fp1;
1018 asm volatile ("fsave %0" : : "m" (*fp));
1019 env->fpuc = fp->fpuc;
1020 env->fpstt = (fp->fpus >> 11) & 7;
1021 env->fpus = fp->fpus & ~0x3800;
1023 for(i = 0;i < 8; i++) {
1024 env->fptags[i] = ((fptag & 3) == 3);
1028 for(i = 0;i < 8; i++) {
1029 memcpy(&env->fpregs[j].d, &fp->fpregs1[i * 10], 10);
1032 /* we must restore the default rounding state */
1033 /* XXX: we do not restore the exception state */
1034 fpuc = 0x037f | (env->fpuc & (3 << 10));
1035 asm volatile("fldcw %0" : : "m" (fpuc));
1036 env->native_fp_regs = 0;