VirtualBox

source: vbox/trunk/src/recompiler/target-i386/helper2.c@ 9902

最後變更 在這個檔案從9902是 6938,由 vboxsync 提交於 17 年 前

recompiler: fix ptab calculation for 32bit code under x86_64 (from qemu)

  • 屬性 svn:eol-style 設為 native
檔案大小: 33.4 KB
 
1/*
2 * i386 helpers (without register variable usage)
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20#include <stdarg.h>
21#include <stdlib.h>
22#include <stdio.h>
23#include <string.h>
24#include <inttypes.h>
25#ifndef VBOX
26#include <signal.h>
27#include <assert.h>
28#else
29# include <VBox/pgm.h> /* PGM_DYNAMIC_RAM_ALLOC */
30#endif
31
32#include "cpu.h"
33#include "exec-all.h"
34
35//#define DEBUG_MMU
36
37#ifdef USE_CODE_COPY
38#include <asm/ldt.h>
39#include <linux/unistd.h>
40#include <linux/version.h>
41
42int modify_ldt(int func, void *ptr, unsigned long bytecount)
43{
44 return syscall(__NR_modify_ldt, func, ptr, bytecount);
45}
46
47#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 66)
48#define modify_ldt_ldt_s user_desc
49#endif
50#endif /* USE_CODE_COPY */
51
52#ifdef VBOX
53CPUX86State *cpu_x86_init(CPUX86State *env)
54{
55#else /* !VBOX */
56CPUX86State *cpu_x86_init(void)
57{
58 CPUX86State *env;
59#endif /* !VBOX */
60 static int inited;
61
62#ifndef VBOX
63 env = qemu_mallocz(sizeof(CPUX86State));
64 if (!env)
65 return NULL;
66#endif /* !VBOX */
67 cpu_exec_init(env);
68
69 /* init various static tables */
70 if (!inited) {
71 inited = 1;
72 optimize_flags_init();
73 }
74#ifdef USE_CODE_COPY
75 /* testing code for code copy case */
76 {
77 struct modify_ldt_ldt_s ldt;
78
79 ldt.entry_number = 1;
80 ldt.base_addr = (unsigned long)env;
81 ldt.limit = (sizeof(CPUState) + 0xfff) >> 12;
82 ldt.seg_32bit = 1;
83 ldt.contents = MODIFY_LDT_CONTENTS_DATA;
84 ldt.read_exec_only = 0;
85 ldt.limit_in_pages = 1;
86 ldt.seg_not_present = 0;
87 ldt.useable = 1;
88 modify_ldt(1, &ldt, sizeof(ldt)); /* write ldt entry */
89
90 asm volatile ("movl %0, %%fs" : : "r" ((1 << 3) | 7));
91 }
92#endif
93#ifndef VBOX /* cpuid_features is initialized by caller */
94 {
95 int family, model, stepping;
96#ifdef TARGET_X86_64
97 env->cpuid_vendor1 = 0x68747541; /* "Auth" */
98 env->cpuid_vendor2 = 0x69746e65; /* "enti" */
99 env->cpuid_vendor3 = 0x444d4163; /* "cAMD" */
100 family = 6;
101 model = 2;
102 stepping = 3;
103#else
104 env->cpuid_vendor1 = 0x756e6547; /* "Genu" */
105 env->cpuid_vendor2 = 0x49656e69; /* "ineI" */
106 env->cpuid_vendor3 = 0x6c65746e; /* "ntel" */
107#if 0
108 /* pentium 75-200 */
109 family = 5;
110 model = 2;
111 stepping = 11;
112#else
113 /* pentium pro */
114 family = 6;
115 model = 3;
116 stepping = 3;
117#endif
118#endif
119 env->cpuid_level = 2;
120 env->cpuid_version = (family << 8) | (model << 4) | stepping;
121 env->cpuid_features = (CPUID_FP87 | CPUID_DE | CPUID_PSE |
122 CPUID_TSC | CPUID_MSR | CPUID_MCE |
123 CPUID_CX8 | CPUID_PGE | CPUID_CMOV |
124 CPUID_PAT);
125 env->pat = 0x0007040600070406ULL;
126 env->cpuid_ext_features = CPUID_EXT_SSE3;
127 env->cpuid_features |= CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | CPUID_PAE | CPUID_SEP;
128 env->cpuid_features |= CPUID_APIC;
129 env->cpuid_xlevel = 0;
130 {
131 const char *model_id = "QEMU Virtual CPU version " QEMU_VERSION;
132 int c, len, i;
133 len = strlen(model_id);
134 for(i = 0; i < 48; i++) {
135 if (i >= len)
136 c = '\0';
137 else
138 c = model_id[i];
139 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
140 }
141 }
142#ifdef TARGET_X86_64
143 /* currently not enabled for std i386 because not fully tested */
144 env->cpuid_ext2_features = (env->cpuid_features & 0x0183F3FF);
145 env->cpuid_ext2_features |= CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX;
146 env->cpuid_xlevel = 0x80000008;
147
148 /* these features are needed for Win64 and aren't fully implemented */
149 env->cpuid_features |= CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA;
150 /* this feature is needed for Solaris and isn't fully implemented */
151 env->cpuid_features |= CPUID_PSE36;
152#endif
153 }
154#endif /* VBOX */
155 cpu_reset(env);
156#ifdef USE_KQEMU
157 kqemu_init(env);
158#endif
159 return env;
160}
161
162/* NOTE: must be called outside the CPU execute loop */
163void cpu_reset(CPUX86State *env)
164{
165 int i;
166
167 memset(env, 0, offsetof(CPUX86State, breakpoints));
168
169 tlb_flush(env, 1);
170
171 /* init to reset state */
172
173#ifdef CONFIG_SOFTMMU
174 env->hflags |= HF_SOFTMMU_MASK;
175#endif
176
177 cpu_x86_update_cr0(env, 0x60000010);
178 env->a20_mask = 0xffffffff;
179 env->smbase = 0x30000;
180
181 env->idt.limit = 0xffff;
182 env->gdt.limit = 0xffff;
183 env->ldt.limit = 0xffff;
184 env->ldt.flags = DESC_P_MASK;
185 env->tr.limit = 0xffff;
186 env->tr.flags = DESC_P_MASK;
187
188 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff, 0);
189 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff, 0);
190 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff, 0);
191 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff, 0);
192 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff, 0);
193 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff, 0);
194
195 env->eip = 0xfff0;
196 env->regs[R_EDX] = 0x600; /* indicate P6 processor */
197
198 env->eflags = 0x2;
199
200 /* FPU init */
201 for(i = 0;i < 8; i++)
202 env->fptags[i] = 1;
203 env->fpuc = 0x37f;
204
205 env->mxcsr = 0x1f80;
206}
207
208#ifndef VBOX
209void cpu_x86_close(CPUX86State *env)
210{
211 free(env);
212}
213#endif
214
215/***********************************************************/
216/* x86 debug */
217
218static const char *cc_op_str[] = {
219 "DYNAMIC",
220 "EFLAGS",
221
222 "MULB",
223 "MULW",
224 "MULL",
225 "MULQ",
226
227 "ADDB",
228 "ADDW",
229 "ADDL",
230 "ADDQ",
231
232 "ADCB",
233 "ADCW",
234 "ADCL",
235 "ADCQ",
236
237 "SUBB",
238 "SUBW",
239 "SUBL",
240 "SUBQ",
241
242 "SBBB",
243 "SBBW",
244 "SBBL",
245 "SBBQ",
246
247 "LOGICB",
248 "LOGICW",
249 "LOGICL",
250 "LOGICQ",
251
252 "INCB",
253 "INCW",
254 "INCL",
255 "INCQ",
256
257 "DECB",
258 "DECW",
259 "DECL",
260 "DECQ",
261
262 "SHLB",
263 "SHLW",
264 "SHLL",
265 "SHLQ",
266
267 "SARB",
268 "SARW",
269 "SARL",
270 "SARQ",
271};
272
273void cpu_dump_state(CPUState *env, FILE *f,
274 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
275 int flags)
276{
277 int eflags, i, nb;
278 char cc_op_name[32];
279 static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
280
281 eflags = env->eflags;
282#ifdef TARGET_X86_64
283 if (env->hflags & HF_CS64_MASK) {
284 cpu_fprintf(f,
285 "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
286 "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
287 "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
288 "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
289 "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
290 env->regs[R_EAX],
291 env->regs[R_EBX],
292 env->regs[R_ECX],
293 env->regs[R_EDX],
294 env->regs[R_ESI],
295 env->regs[R_EDI],
296 env->regs[R_EBP],
297 env->regs[R_ESP],
298 env->regs[8],
299 env->regs[9],
300 env->regs[10],
301 env->regs[11],
302 env->regs[12],
303 env->regs[13],
304 env->regs[14],
305 env->regs[15],
306 env->eip, eflags,
307 eflags & DF_MASK ? 'D' : '-',
308 eflags & CC_O ? 'O' : '-',
309 eflags & CC_S ? 'S' : '-',
310 eflags & CC_Z ? 'Z' : '-',
311 eflags & CC_A ? 'A' : '-',
312 eflags & CC_P ? 'P' : '-',
313 eflags & CC_C ? 'C' : '-',
314 env->hflags & HF_CPL_MASK,
315 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
316 (env->a20_mask >> 20) & 1,
317 (env->hflags >> HF_SMM_SHIFT) & 1,
318 (env->hflags >> HF_HALTED_SHIFT) & 1);
319 } else
320#endif
321 {
322 cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
323 "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
324 "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
325 (uint32_t)env->regs[R_EAX],
326 (uint32_t)env->regs[R_EBX],
327 (uint32_t)env->regs[R_ECX],
328 (uint32_t)env->regs[R_EDX],
329 (uint32_t)env->regs[R_ESI],
330 (uint32_t)env->regs[R_EDI],
331 (uint32_t)env->regs[R_EBP],
332 (uint32_t)env->regs[R_ESP],
333 (uint32_t)env->eip, eflags,
334 eflags & DF_MASK ? 'D' : '-',
335 eflags & CC_O ? 'O' : '-',
336 eflags & CC_S ? 'S' : '-',
337 eflags & CC_Z ? 'Z' : '-',
338 eflags & CC_A ? 'A' : '-',
339 eflags & CC_P ? 'P' : '-',
340 eflags & CC_C ? 'C' : '-',
341 env->hflags & HF_CPL_MASK,
342 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
343 (env->a20_mask >> 20) & 1,
344 (env->hflags >> HF_SMM_SHIFT) & 1,
345 (env->hflags >> HF_HALTED_SHIFT) & 1);
346 }
347
348#ifdef TARGET_X86_64
349 if (env->hflags & HF_LMA_MASK) {
350 for(i = 0; i < 6; i++) {
351 SegmentCache *sc = &env->segs[i];
352 cpu_fprintf(f, "%s =%04x %016" PRIx64 " %08x %08x\n",
353 seg_name[i],
354 sc->selector,
355 sc->base,
356 sc->limit,
357 sc->flags);
358 }
359 cpu_fprintf(f, "LDT=%04x %016" PRIx64 " %08x %08x\n",
360 env->ldt.selector,
361 env->ldt.base,
362 env->ldt.limit,
363 env->ldt.flags);
364 cpu_fprintf(f, "TR =%04x %016" PRIx64 " %08x %08x\n",
365 env->tr.selector,
366 env->tr.base,
367 env->tr.limit,
368 env->tr.flags);
369 cpu_fprintf(f, "GDT= %016" PRIx64 " %08x\n",
370 env->gdt.base, env->gdt.limit);
371 cpu_fprintf(f, "IDT= %016" PRIx64 " %08x\n",
372 env->idt.base, env->idt.limit);
373 cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
374 (uint32_t)env->cr[0],
375 env->cr[2],
376 env->cr[3],
377 (uint32_t)env->cr[4]);
378 } else
379#endif
380 {
381 for(i = 0; i < 6; i++) {
382 SegmentCache *sc = &env->segs[i];
383 cpu_fprintf(f, "%s =%04x %08x %08x %08x\n",
384 seg_name[i],
385 sc->selector,
386 (uint32_t)sc->base,
387 sc->limit,
388 sc->flags);
389 }
390 cpu_fprintf(f, "LDT=%04x %08x %08x %08x\n",
391 env->ldt.selector,
392 (uint32_t)env->ldt.base,
393 env->ldt.limit,
394 env->ldt.flags);
395 cpu_fprintf(f, "TR =%04x %08x %08x %08x\n",
396 env->tr.selector,
397 (uint32_t)env->tr.base,
398 env->tr.limit,
399 env->tr.flags);
400 cpu_fprintf(f, "GDT= %08x %08x\n",
401 (uint32_t)env->gdt.base, env->gdt.limit);
402 cpu_fprintf(f, "IDT= %08x %08x\n",
403 (uint32_t)env->idt.base, env->idt.limit);
404 cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
405 (uint32_t)env->cr[0],
406 (uint32_t)env->cr[2],
407 (uint32_t)env->cr[3],
408 (uint32_t)env->cr[4]);
409 }
410 if (flags & X86_DUMP_CCOP) {
411 if ((unsigned)env->cc_op < CC_OP_NB)
412 qemu_snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
413 else
414 qemu_snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
415#ifdef TARGET_X86_64
416 if (env->hflags & HF_CS64_MASK) {
417 cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
418 env->cc_src, env->cc_dst,
419 cc_op_name);
420 } else
421#endif
422 {
423 cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
424 (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
425 cc_op_name);
426 }
427 }
428 if (flags & X86_DUMP_FPU) {
429 int fptag;
430 fptag = 0;
431 for(i = 0; i < 8; i++) {
432 fptag |= ((!env->fptags[i]) << i);
433 }
434 cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
435 env->fpuc,
436 (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
437 env->fpstt,
438 fptag,
439 env->mxcsr);
440 for(i=0;i<8;i++) {
441#if defined(USE_X86LDOUBLE)
442 union {
443 long double d;
444 struct {
445 uint64_t lower;
446 uint16_t upper;
447 } l;
448 } tmp;
449 tmp.d = env->fpregs[i].d;
450 cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
451 i, tmp.l.lower, tmp.l.upper);
452#else
453 cpu_fprintf(f, "FPR%d=%016" PRIx64,
454 i, env->fpregs[i].mmx.q);
455#endif
456 if ((i & 1) == 1)
457 cpu_fprintf(f, "\n");
458 else
459 cpu_fprintf(f, " ");
460 }
461 if (env->hflags & HF_CS64_MASK)
462 nb = 16;
463 else
464 nb = 8;
465 for(i=0;i<nb;i++) {
466 cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
467 i,
468 env->xmm_regs[i].XMM_L(3),
469 env->xmm_regs[i].XMM_L(2),
470 env->xmm_regs[i].XMM_L(1),
471 env->xmm_regs[i].XMM_L(0));
472 if ((i & 1) == 1)
473 cpu_fprintf(f, "\n");
474 else
475 cpu_fprintf(f, " ");
476 }
477 }
478}
479
480/***********************************************************/
481/* x86 mmu */
482/* XXX: add PGE support */
483
484void cpu_x86_set_a20(CPUX86State *env, int a20_state)
485{
486 a20_state = (a20_state != 0);
487 if (a20_state != ((env->a20_mask >> 20) & 1)) {
488#if defined(DEBUG_MMU)
489 printf("A20 update: a20=%d\n", a20_state);
490#endif
491 /* if the cpu is currently executing code, we must unlink it and
492 all the potentially executing TB */
493 cpu_interrupt(env, CPU_INTERRUPT_EXITTB);
494
495 /* when a20 is changed, all the MMU mappings are invalid, so
496 we must flush everything */
497 tlb_flush(env, 1);
498 env->a20_mask = 0xffefffff | (a20_state << 20);
499 }
500}
501
502void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
503{
504 int pe_state;
505
506#if defined(DEBUG_MMU)
507 printf("CR0 update: CR0=0x%08x\n", new_cr0);
508#endif
509 if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
510 (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
511 tlb_flush(env, 1);
512 }
513
514#ifdef TARGET_X86_64
515 if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
516 (env->efer & MSR_EFER_LME)) {
517 /* enter in long mode */
518 /* XXX: generate an exception */
519 if (!(env->cr[4] & CR4_PAE_MASK))
520 return;
521 env->efer |= MSR_EFER_LMA;
522 env->hflags |= HF_LMA_MASK;
523 } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
524 (env->efer & MSR_EFER_LMA)) {
525 /* exit long mode */
526 env->efer &= ~MSR_EFER_LMA;
527 env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
528 env->eip &= 0xffffffff;
529 }
530#endif
531 env->cr[0] = new_cr0 | CR0_ET_MASK;
532
533 /* update PE flag in hidden flags */
534 pe_state = (env->cr[0] & CR0_PE_MASK);
535 env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
536 /* ensure that ADDSEG is always set in real mode */
537 env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
538 /* update FPU flags */
539 env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
540 ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
541#ifdef VBOX
542 remR3ChangeCpuMode(env);
543#endif
544}
545
546/* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
547 the PDPT */
548void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
549{
550 env->cr[3] = new_cr3;
551 if (env->cr[0] & CR0_PG_MASK) {
552#if defined(DEBUG_MMU)
553 printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
554#endif
555 tlb_flush(env, 0);
556 }
557}
558
559void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
560{
561#if defined(DEBUG_MMU)
562 printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
563#endif
564 if ((new_cr4 & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK)) !=
565 (env->cr[4] & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK))) {
566 tlb_flush(env, 1);
567 }
568 /* SSE handling */
569 if (!(env->cpuid_features & CPUID_SSE))
570 new_cr4 &= ~CR4_OSFXSR_MASK;
571 if (new_cr4 & CR4_OSFXSR_MASK)
572 env->hflags |= HF_OSFXSR_MASK;
573 else
574 env->hflags &= ~HF_OSFXSR_MASK;
575
576 env->cr[4] = new_cr4;
577#ifdef VBOX
578 remR3ChangeCpuMode(env);
579#endif
580}
581
582/* XXX: also flush 4MB pages */
583void cpu_x86_flush_tlb(CPUX86State *env, target_ulong addr)
584{
585#if defined(DEBUG) && defined(VBOX)
586 uint32_t pde;
587
588 /* page directory entry */
589 pde = remR3PhysReadU32(((env->cr[3] & ~0xfff) + ((addr >> 20) & ~3)) & env->a20_mask);
590
591 /* if PSE bit is set, then we use a 4MB page */
592 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
593 printf("cpu_x86_flush_tlb: 4 MB page!!!!!\n");
594 }
595#endif
596 tlb_flush_page(env, addr);
597}
598
599#if defined(CONFIG_USER_ONLY)
600
601int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
602 int is_write, int is_user, int is_softmmu)
603{
604 /* user mode only emulation */
605 is_write &= 1;
606 env->cr[2] = addr;
607 env->error_code = (is_write << PG_ERROR_W_BIT);
608 env->error_code |= PG_ERROR_U_MASK;
609 env->exception_index = EXCP0E_PAGE;
610 return 1;
611}
612
613target_ulong cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
614{
615 return addr;
616}
617
618#else
619
620#define PHYS_ADDR_MASK 0xfffff000
621
622/* return value:
623 -1 = cannot handle fault
624 0 = nothing more to do
625 1 = generate PF fault
626 2 = soft MMU activation required for this block
627*/
628int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
629 int is_write1, int is_user, int is_softmmu)
630{
631 uint64_t ptep, pte;
632 uint32_t pdpe_addr, pde_addr, pte_addr;
633 int error_code, is_dirty, prot, page_size, ret, is_write;
634 unsigned long paddr, page_offset;
635 target_ulong vaddr, virt_addr;
636
637#if defined(DEBUG_MMU)
638 printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
639 addr, is_write1, is_user, env->eip);
640#endif
641 is_write = is_write1 & 1;
642
643 if (!(env->cr[0] & CR0_PG_MASK)) {
644 pte = addr;
645 virt_addr = addr & TARGET_PAGE_MASK;
646 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
647 page_size = 4096;
648 goto do_mapping;
649 }
650
651 if (env->cr[4] & CR4_PAE_MASK) {
652 uint64_t pde, pdpe;
653
654 /* XXX: we only use 32 bit physical addresses */
655#ifdef TARGET_X86_64
656 if (env->hflags & HF_LMA_MASK) {
657 uint32_t pml4e_addr;
658 uint64_t pml4e;
659 int32_t sext;
660
661 /* test virtual address sign extension */
662 sext = (int64_t)addr >> 47;
663 if (sext != 0 && sext != -1) {
664 env->error_code = 0;
665 env->exception_index = EXCP0D_GPF;
666 return 1;
667 }
668
669 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
670 env->a20_mask;
671 pml4e = ldq_phys(pml4e_addr);
672 if (!(pml4e & PG_PRESENT_MASK)) {
673 error_code = 0;
674 goto do_fault;
675 }
676 if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) {
677 error_code = PG_ERROR_RSVD_MASK;
678 goto do_fault;
679 }
680 if (!(pml4e & PG_ACCESSED_MASK)) {
681 pml4e |= PG_ACCESSED_MASK;
682 stl_phys_notdirty(pml4e_addr, pml4e);
683 }
684 ptep = pml4e ^ PG_NX_MASK;
685 pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
686 env->a20_mask;
687 pdpe = ldq_phys(pdpe_addr);
688 if (!(pdpe & PG_PRESENT_MASK)) {
689 error_code = 0;
690 goto do_fault;
691 }
692 if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) {
693 error_code = PG_ERROR_RSVD_MASK;
694 goto do_fault;
695 }
696 ptep &= pdpe ^ PG_NX_MASK;
697 if (!(pdpe & PG_ACCESSED_MASK)) {
698 pdpe |= PG_ACCESSED_MASK;
699 stl_phys_notdirty(pdpe_addr, pdpe);
700 }
701 } else
702#endif
703 {
704 /* XXX: load them when cr3 is loaded ? */
705 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
706 env->a20_mask;
707 pdpe = ldq_phys(pdpe_addr);
708 if (!(pdpe & PG_PRESENT_MASK)) {
709 error_code = 0;
710 goto do_fault;
711 }
712 ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
713 }
714
715 pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
716 env->a20_mask;
717 pde = ldq_phys(pde_addr);
718 if (!(pde & PG_PRESENT_MASK)) {
719 error_code = 0;
720 goto do_fault;
721 }
722 if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) {
723 error_code = PG_ERROR_RSVD_MASK;
724 goto do_fault;
725 }
726 ptep &= pde ^ PG_NX_MASK;
727 if (pde & PG_PSE_MASK) {
728 /* 2 MB page */
729 page_size = 2048 * 1024;
730 ptep ^= PG_NX_MASK;
731 if ((ptep & PG_NX_MASK) && is_write1 == 2)
732 goto do_fault_protect;
733 if (is_user) {
734 if (!(ptep & PG_USER_MASK))
735 goto do_fault_protect;
736 if (is_write && !(ptep & PG_RW_MASK))
737 goto do_fault_protect;
738 } else {
739 if ((env->cr[0] & CR0_WP_MASK) &&
740 is_write && !(ptep & PG_RW_MASK))
741 goto do_fault_protect;
742 }
743 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
744 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
745 pde |= PG_ACCESSED_MASK;
746 if (is_dirty)
747 pde |= PG_DIRTY_MASK;
748 stl_phys_notdirty(pde_addr, pde);
749 }
750 /* align to page_size */
751 pte = pde & ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff);
752 virt_addr = addr & ~(page_size - 1);
753 } else {
754 /* 4 KB page */
755 if (!(pde & PG_ACCESSED_MASK)) {
756 pde |= PG_ACCESSED_MASK;
757 stl_phys_notdirty(pde_addr, pde);
758 }
759 pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
760 env->a20_mask;
761 pte = ldq_phys(pte_addr);
762 if (!(pte & PG_PRESENT_MASK)) {
763 error_code = 0;
764 goto do_fault;
765 }
766 if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) {
767 error_code = PG_ERROR_RSVD_MASK;
768 goto do_fault;
769 }
770 /* combine pde and pte nx, user and rw protections */
771 ptep &= pte ^ PG_NX_MASK;
772 ptep ^= PG_NX_MASK;
773 if ((ptep & PG_NX_MASK) && is_write1 == 2)
774 goto do_fault_protect;
775 if (is_user) {
776 if (!(ptep & PG_USER_MASK))
777 goto do_fault_protect;
778 if (is_write && !(ptep & PG_RW_MASK))
779 goto do_fault_protect;
780 } else {
781 if ((env->cr[0] & CR0_WP_MASK) &&
782 is_write && !(ptep & PG_RW_MASK))
783 goto do_fault_protect;
784 }
785 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
786 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
787 pte |= PG_ACCESSED_MASK;
788 if (is_dirty)
789 pte |= PG_DIRTY_MASK;
790 stl_phys_notdirty(pte_addr, pte);
791 }
792 page_size = 4096;
793 virt_addr = addr & ~0xfff;
794 pte = pte & (PHYS_ADDR_MASK | 0xfff);
795 }
796 } else {
797 uint32_t pde;
798
799 /* page directory entry */
800 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
801 env->a20_mask;
802 pde = ldl_phys(pde_addr);
803 if (!(pde & PG_PRESENT_MASK)) {
804 error_code = 0;
805 goto do_fault;
806 }
807 /* if PSE bit is set, then we use a 4MB page */
808 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
809 page_size = 4096 * 1024;
810 if (is_user) {
811 if (!(pde & PG_USER_MASK))
812 goto do_fault_protect;
813 if (is_write && !(pde & PG_RW_MASK))
814 goto do_fault_protect;
815 } else {
816 if ((env->cr[0] & CR0_WP_MASK) &&
817 is_write && !(pde & PG_RW_MASK))
818 goto do_fault_protect;
819 }
820 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
821 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
822 pde |= PG_ACCESSED_MASK;
823 if (is_dirty)
824 pde |= PG_DIRTY_MASK;
825 stl_phys_notdirty(pde_addr, pde);
826 }
827
828 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
829 ptep = pte;
830 virt_addr = addr & ~(page_size - 1);
831 } else {
832 if (!(pde & PG_ACCESSED_MASK)) {
833 pde |= PG_ACCESSED_MASK;
834 stl_phys_notdirty(pde_addr, pde);
835 }
836
837 /* page directory entry */
838 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
839 env->a20_mask;
840 pte = ldl_phys(pte_addr);
841 if (!(pte & PG_PRESENT_MASK)) {
842 error_code = 0;
843 goto do_fault;
844 }
845 /* combine pde and pte user and rw protections */
846 ptep = pte & pde;
847 if (is_user) {
848 if (!(ptep & PG_USER_MASK))
849 goto do_fault_protect;
850 if (is_write && !(ptep & PG_RW_MASK))
851 goto do_fault_protect;
852 } else {
853 if ((env->cr[0] & CR0_WP_MASK) &&
854 is_write && !(ptep & PG_RW_MASK))
855 goto do_fault_protect;
856 }
857 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
858 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
859 pte |= PG_ACCESSED_MASK;
860 if (is_dirty)
861 pte |= PG_DIRTY_MASK;
862 stl_phys_notdirty(pte_addr, pte);
863 }
864 page_size = 4096;
865 virt_addr = addr & ~0xfff;
866 }
867 }
868 /* the page can be put in the TLB */
869 prot = PAGE_READ;
870 if (!(ptep & PG_NX_MASK))
871 prot |= PAGE_EXEC;
872 if (pte & PG_DIRTY_MASK) {
873 /* only set write access if already dirty... otherwise wait
874 for dirty access */
875 if (is_user) {
876 if (ptep & PG_RW_MASK)
877 prot |= PAGE_WRITE;
878 } else {
879 if (!(env->cr[0] & CR0_WP_MASK) ||
880 (ptep & PG_RW_MASK))
881 prot |= PAGE_WRITE;
882 }
883 }
884 do_mapping:
885 pte = pte & env->a20_mask;
886
887 /* Even if 4MB pages, we map only one 4KB page in the cache to
888 avoid filling it too fast */
889 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
890 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
891 vaddr = virt_addr + page_offset;
892
893 ret = tlb_set_page_exec(env, vaddr, paddr, prot, is_user, is_softmmu);
894 return ret;
895 do_fault_protect:
896 error_code = PG_ERROR_P_MASK;
897 do_fault:
898 env->cr[2] = addr;
899 error_code |= (is_write << PG_ERROR_W_BIT);
900 if (is_user)
901 error_code |= PG_ERROR_U_MASK;
902 if (is_write1 == 2 &&
903 (env->efer & MSR_EFER_NXE) &&
904 (env->cr[4] & CR4_PAE_MASK))
905 error_code |= PG_ERROR_I_D_MASK;
906 env->error_code = error_code;
907 env->exception_index = EXCP0E_PAGE;
908 return 1;
909}
910
911target_ulong cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
912{
913 uint32_t pde_addr, pte_addr;
914 uint32_t pde, pte, paddr, page_offset, page_size;
915
916 if (env->cr[4] & CR4_PAE_MASK) {
917 uint32_t pdpe_addr, pde_addr, pte_addr;
918 uint32_t pdpe;
919
920 /* XXX: we only use 32 bit physical addresses */
921#ifdef TARGET_X86_64
922 if (env->hflags & HF_LMA_MASK) {
923 uint32_t pml4e_addr, pml4e;
924 int32_t sext;
925
926 /* test virtual address sign extension */
927 sext = (int64_t)addr >> 47;
928 if (sext != 0 && sext != -1)
929 return -1;
930
931 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
932 env->a20_mask;
933 pml4e = ldl_phys(pml4e_addr);
934 if (!(pml4e & PG_PRESENT_MASK))
935 return -1;
936
937 pdpe_addr = ((pml4e & ~0xfff) + (((addr >> 30) & 0x1ff) << 3)) &
938 env->a20_mask;
939 pdpe = ldl_phys(pdpe_addr);
940 if (!(pdpe & PG_PRESENT_MASK))
941 return -1;
942 } else
943#endif
944 {
945 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
946 env->a20_mask;
947 pdpe = ldl_phys(pdpe_addr);
948 if (!(pdpe & PG_PRESENT_MASK))
949 return -1;
950 }
951
952 pde_addr = ((pdpe & ~0xfff) + (((addr >> 21) & 0x1ff) << 3)) &
953 env->a20_mask;
954 pde = ldl_phys(pde_addr);
955 if (!(pde & PG_PRESENT_MASK)) {
956 return -1;
957 }
958 if (pde & PG_PSE_MASK) {
959 /* 2 MB page */
960 page_size = 2048 * 1024;
961 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
962 } else {
963 /* 4 KB page */
964 pte_addr = ((pde & ~0xfff) + (((addr >> 12) & 0x1ff) << 3)) &
965 env->a20_mask;
966 page_size = 4096;
967 pte = ldl_phys(pte_addr);
968 }
969 } else {
970 if (!(env->cr[0] & CR0_PG_MASK)) {
971 pte = addr;
972 page_size = 4096;
973 } else {
974 /* page directory entry */
975 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask;
976 pde = ldl_phys(pde_addr);
977 if (!(pde & PG_PRESENT_MASK))
978 return -1;
979 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
980 pte = pde & ~0x003ff000; /* align to 4MB */
981 page_size = 4096 * 1024;
982 } else {
983 /* page directory entry */
984 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
985 pte = ldl_phys(pte_addr);
986 if (!(pte & PG_PRESENT_MASK))
987 return -1;
988 page_size = 4096;
989 }
990 }
991 pte = pte & env->a20_mask;
992 }
993
994 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
995 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
996 return paddr;
997}
998#endif /* !CONFIG_USER_ONLY */
999
1000#if defined(USE_CODE_COPY)
1001struct fpstate {
1002 uint16_t fpuc;
1003 uint16_t dummy1;
1004 uint16_t fpus;
1005 uint16_t dummy2;
1006 uint16_t fptag;
1007 uint16_t dummy3;
1008
1009 uint32_t fpip;
1010 uint32_t fpcs;
1011 uint32_t fpoo;
1012 uint32_t fpos;
1013 uint8_t fpregs1[8 * 10];
1014};
1015
1016void restore_native_fp_state(CPUState *env)
1017{
1018 int fptag, i, j;
1019 struct fpstate fp1, *fp = &fp1;
1020
1021 fp->fpuc = env->fpuc;
1022 fp->fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
1023 fptag = 0;
1024 for (i=7; i>=0; i--) {
1025 fptag <<= 2;
1026 if (env->fptags[i]) {
1027 fptag |= 3;
1028 } else {
1029 /* the FPU automatically computes it */
1030 }
1031 }
1032 fp->fptag = fptag;
1033 j = env->fpstt;
1034 for(i = 0;i < 8; i++) {
1035 memcpy(&fp->fpregs1[i * 10], &env->fpregs[j].d, 10);
1036 j = (j + 1) & 7;
1037 }
1038 asm volatile ("frstor %0" : "=m" (*fp));
1039 env->native_fp_regs = 1;
1040}
1041
1042void save_native_fp_state(CPUState *env)
1043{
1044 int fptag, i, j;
1045 uint16_t fpuc;
1046 struct fpstate fp1, *fp = &fp1;
1047
1048 asm volatile ("fsave %0" : : "m" (*fp));
1049 env->fpuc = fp->fpuc;
1050 env->fpstt = (fp->fpus >> 11) & 7;
1051 env->fpus = fp->fpus & ~0x3800;
1052 fptag = fp->fptag;
1053 for(i = 0;i < 8; i++) {
1054 env->fptags[i] = ((fptag & 3) == 3);
1055 fptag >>= 2;
1056 }
1057 j = env->fpstt;
1058 for(i = 0;i < 8; i++) {
1059 memcpy(&env->fpregs[j].d, &fp->fpregs1[i * 10], 10);
1060 j = (j + 1) & 7;
1061 }
1062 /* we must restore the default rounding state */
1063 /* XXX: we do not restore the exception state */
1064 fpuc = 0x037f | (env->fpuc & (3 << 10));
1065 asm volatile("fldcw %0" : : "m" (fpuc));
1066 env->native_fp_regs = 0;
1067}
1068#endif
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette