VirtualBox

source: vbox/trunk/src/recompiler_new/target-i386/helper.c@ 14107

最後變更 在這個檔案從14107是 13652,由 vboxsync 提交於 16 年 前

Solaris and general 32-bit compilation fixes

  • 屬性 svn:eol-style 設為 native
檔案大小: 43.9 KB
 
1/*
2 * i386 helpers (without register variable usage)
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21/*
22 * Sun LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
23 * other than GPL or LGPL is available it will apply instead, Sun elects to use only
24 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
25 * a choice of LGPL license versions is made available with the language indicating
26 * that LGPLv2 or any later version may be used, or where a choice of which version
27 * of the LGPL is applied is otherwise unspecified.
28 */
29#include <stdarg.h>
30#include <stdlib.h>
31#include <stdio.h>
32#include <string.h>
33#ifndef VBOX
34#include <inttypes.h>
35#include <signal.h>
36#include <assert.h>
37#endif
38
39#include "cpu.h"
40#include "exec-all.h"
41#include "svm.h"
42#include "qemu-common.h"
43
44//#define DEBUG_MMU
45
46static int cpu_x86_register (CPUX86State *env, const char *cpu_model);
47
48static void add_flagname_to_bitmaps(char *flagname, uint32_t *features,
49 uint32_t *ext_features,
50 uint32_t *ext2_features,
51 uint32_t *ext3_features)
52{
53 int i;
54 /* feature flags taken from "Intel Processor Identification and the CPUID
55 * Instruction" and AMD's "CPUID Specification". In cases of disagreement
56 * about feature names, the Linux name is used. */
57 static const char *feature_name[] = {
58 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
59 "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
60 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */, NULL, "ds" /* Intel dts */, "acpi", "mmx",
61 "fxsr", "sse", "sse2", "ss", "ht" /* Intel htt */, "tm", "ia64", "pbe",
62 };
63 static const char *ext_feature_name[] = {
64 "pni" /* Intel,AMD sse3 */, NULL, NULL, "monitor", "ds_cpl", "vmx", NULL /* Linux smx */, "est",
65 "tm2", "ssse3", "cid", NULL, NULL, "cx16", "xtpr", NULL,
66 NULL, NULL, "dca", NULL, NULL, NULL, NULL, "popcnt",
67 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
68 };
69 static const char *ext2_feature_name[] = {
70 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
71 "cx8" /* AMD CMPXCHG8B */, "apic", NULL, "syscall", "mttr", "pge", "mca", "cmov",
72 "pat", "pse36", NULL, NULL /* Linux mp */, "nx" /* Intel xd */, NULL, "mmxext", "mmx",
73 "fxsr", "fxsr_opt" /* AMD ffxsr */, "pdpe1gb" /* AMD Page1GB */, "rdtscp", NULL, "lm" /* Intel 64 */, "3dnowext", "3dnow",
74 };
75 static const char *ext3_feature_name[] = {
76 "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */, "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
77 "3dnowprefetch", "osvw", NULL /* Linux ibs */, NULL, "skinit", "wdt", NULL, NULL,
78 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
79 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
80 };
81
82 for ( i = 0 ; i < 32 ; i++ )
83 if (feature_name[i] && !strcmp (flagname, feature_name[i])) {
84 *features |= 1 << i;
85 return;
86 }
87 for ( i = 0 ; i < 32 ; i++ )
88 if (ext_feature_name[i] && !strcmp (flagname, ext_feature_name[i])) {
89 *ext_features |= 1 << i;
90 return;
91 }
92 for ( i = 0 ; i < 32 ; i++ )
93 if (ext2_feature_name[i] && !strcmp (flagname, ext2_feature_name[i])) {
94 *ext2_features |= 1 << i;
95 return;
96 }
97 for ( i = 0 ; i < 32 ; i++ )
98 if (ext3_feature_name[i] && !strcmp (flagname, ext3_feature_name[i])) {
99 *ext3_features |= 1 << i;
100 return;
101 }
102 fprintf(stderr, "CPU feature %s not found\n", flagname);
103}
104#ifndef VBOX
105CPUX86State *cpu_x86_init(const char *cpu_model)
106{
107 CPUX86State *env;
108 static int inited;
109
110 env = qemu_mallocz(sizeof(CPUX86State));
111 if (!env)
112 return NULL;
113#else
114CPUX86State *cpu_x86_init(CPUX86State *env, const char *cpu_model)
115{
116 static int inited;
117#endif
118 cpu_exec_init(env);
119 env->cpu_model_str = cpu_model;
120
121 /* init various static tables */
122 if (!inited) {
123 inited = 1;
124 optimize_flags_init();
125 }
126 if (cpu_x86_register(env, cpu_model) < 0) {
127 cpu_x86_close(env);
128 return NULL;
129 }
130 cpu_reset(env);
131#ifdef USE_KQEMU
132 kqemu_init(env);
133#endif
134 return env;
135}
136
137typedef struct x86_def_t {
138 const char *name;
139 uint32_t level;
140 uint32_t vendor1, vendor2, vendor3;
141 int family;
142 int model;
143 int stepping;
144 uint32_t features, ext_features, ext2_features, ext3_features;
145 uint32_t xlevel;
146 char model_id[48];
147} x86_def_t;
148
149#ifndef VBOX
150#define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
151#define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
152 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX)
153#define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
154 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
155 CPUID_PSE36 | CPUID_FXSR)
156#define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
157#define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
158 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
159 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
160 CPUID_PAE | CPUID_SEP | CPUID_APIC)
161static x86_def_t x86_defs[] = {
162#ifdef TARGET_X86_64
163 {
164 .name = "qemu64",
165 .level = 2,
166 .vendor1 = CPUID_VENDOR_AMD_1,
167 .vendor2 = CPUID_VENDOR_AMD_2,
168 .vendor3 = CPUID_VENDOR_AMD_3,
169 .family = 6,
170 .model = 2,
171 .stepping = 3,
172 .features = PPRO_FEATURES |
173 /* these features are needed for Win64 and aren't fully implemented */
174 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
175 /* this feature is needed for Solaris and isn't fully implemented */
176 CPUID_PSE36,
177 .ext_features = CPUID_EXT_SSE3,
178 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) |
179 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
180 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
181 .ext3_features = CPUID_EXT3_SVM,
182 .xlevel = 0x8000000A,
183 .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
184 },
185 {
186 .name = "core2duo",
187 .level = 10,
188 .family = 6,
189 .model = 15,
190 .stepping = 11,
191 /* The original CPU also implements these features:
192 CPUID_VME, CPUID_DTS, CPUID_ACPI, CPUID_SS, CPUID_HT,
193 CPUID_TM, CPUID_PBE */
194 .features = PPRO_FEATURES |
195 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
196 CPUID_PSE36,
197 /* The original CPU also implements these ext features:
198 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_EST,
199 CPUID_EXT_TM2, CPUID_EXT_CX16, CPUID_EXT_XTPR, CPUID_EXT_PDCM */
200 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3,
201 .ext2_features = CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
202 /* Missing: .ext3_features = CPUID_EXT3_LAHF_LM */
203 .xlevel = 0x80000008,
204 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
205 },
206#endif
207 {
208 .name = "qemu32",
209 .level = 2,
210 .family = 6,
211 .model = 3,
212 .stepping = 3,
213 .features = PPRO_FEATURES,
214 .ext_features = CPUID_EXT_SSE3,
215 .xlevel = 0,
216 .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
217 },
218 {
219 .name = "coreduo",
220 .level = 10,
221 .family = 6,
222 .model = 14,
223 .stepping = 8,
224 /* The original CPU also implements these features:
225 CPUID_DTS, CPUID_ACPI, CPUID_SS, CPUID_HT,
226 CPUID_TM, CPUID_PBE */
227 .features = PPRO_FEATURES | CPUID_VME |
228 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA,
229 /* The original CPU also implements these ext features:
230 CPUID_EXT_VMX, CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_XTPR,
231 CPUID_EXT_PDCM */
232 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
233 .ext2_features = CPUID_EXT2_NX,
234 .xlevel = 0x80000008,
235 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
236 },
237 {
238 .name = "486",
239 .level = 0,
240 .family = 4,
241 .model = 0,
242 .stepping = 0,
243 .features = I486_FEATURES,
244 .xlevel = 0,
245 },
246 {
247 .name = "pentium",
248 .level = 1,
249 .family = 5,
250 .model = 4,
251 .stepping = 3,
252 .features = PENTIUM_FEATURES,
253 .xlevel = 0,
254 },
255 {
256 .name = "pentium2",
257 .level = 2,
258 .family = 6,
259 .model = 5,
260 .stepping = 2,
261 .features = PENTIUM2_FEATURES,
262 .xlevel = 0,
263 },
264 {
265 .name = "pentium3",
266 .level = 2,
267 .family = 6,
268 .model = 7,
269 .stepping = 3,
270 .features = PENTIUM3_FEATURES,
271 .xlevel = 0,
272 },
273 {
274 .name = "athlon",
275 .level = 2,
276 .vendor1 = 0x68747541, /* "Auth" */
277 .vendor2 = 0x69746e65, /* "enti" */
278 .vendor3 = 0x444d4163, /* "cAMD" */
279 .family = 6,
280 .model = 2,
281 .stepping = 3,
282 .features = PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR | CPUID_MCA,
283 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
284 .xlevel = 0x80000008,
285 /* XXX: put another string ? */
286 .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
287 },
288 {
289 .name = "n270",
290 /* original is on level 10 */
291 .level = 5,
292 .family = 6,
293 .model = 28,
294 .stepping = 2,
295 .features = PPRO_FEATURES |
296 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME,
297 /* Missing: CPUID_DTS | CPUID_ACPI | CPUID_SS |
298 * CPUID_HT | CPUID_TM | CPUID_PBE */
299 /* Some CPUs got no CPUID_SEP */
300 .ext_features = CPUID_EXT_MONITOR |
301 CPUID_EXT_SSE3 /* PNI */,
302 /* Missing: CPUID_EXT_DSCPL | CPUID_EXT_EST |
303 * CPUID_EXT_TM2 | CPUID_EXT_XTPR */
304 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | CPUID_EXT2_NX,
305 /* Missing: .ext3_features = CPUID_EXT3_LAHF_LM */
306 .xlevel = 0x8000000A,
307 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
308 },
309};
310
311static int cpu_x86_find_by_name(x86_def_t *x86_cpu_def, const char *cpu_model)
312{
313 unsigned int i;
314 x86_def_t *def;
315
316 char *s = strdup(cpu_model);
317 char *featurestr, *name = strtok(s, ",");
318 uint32_t plus_features = 0, plus_ext_features = 0, plus_ext2_features = 0, plus_ext3_features = 0;
319 uint32_t minus_features = 0, minus_ext_features = 0, minus_ext2_features = 0, minus_ext3_features = 0;
320 int family = -1, model = -1, stepping = -1;
321
322 def = NULL;
323 for (i = 0; i < sizeof(x86_defs) / sizeof(x86_def_t); i++) {
324 if (strcmp(name, x86_defs[i].name) == 0) {
325 def = &x86_defs[i];
326 break;
327 }
328 }
329 if (!def)
330 goto error;
331 memcpy(x86_cpu_def, def, sizeof(*def));
332
333 featurestr = strtok(NULL, ",");
334
335 while (featurestr) {
336 char *val;
337 if (featurestr[0] == '+') {
338 add_flagname_to_bitmaps(featurestr + 1, &plus_features, &plus_ext_features, &plus_ext2_features, &plus_ext3_features);
339 } else if (featurestr[0] == '-') {
340 add_flagname_to_bitmaps(featurestr + 1, &minus_features, &minus_ext_features, &minus_ext2_features, &minus_ext3_features);
341 } else if ((val = strchr(featurestr, '='))) {
342 *val = 0; val++;
343 if (!strcmp(featurestr, "family")) {
344 char *err;
345 family = strtol(val, &err, 10);
346 if (!*val || *err || family < 0) {
347 fprintf(stderr, "bad numerical value %s\n", val);
348 goto error;
349 }
350 x86_cpu_def->family = family;
351 } else if (!strcmp(featurestr, "model")) {
352 char *err;
353 model = strtol(val, &err, 10);
354 if (!*val || *err || model < 0 || model > 0xf) {
355 fprintf(stderr, "bad numerical value %s\n", val);
356 goto error;
357 }
358 x86_cpu_def->model = model;
359 } else if (!strcmp(featurestr, "stepping")) {
360 char *err;
361 stepping = strtol(val, &err, 10);
362 if (!*val || *err || stepping < 0 || stepping > 0xf) {
363 fprintf(stderr, "bad numerical value %s\n", val);
364 goto error;
365 }
366 x86_cpu_def->stepping = stepping;
367 } else if (!strcmp(featurestr, "vendor")) {
368 if (strlen(val) != 12) {
369 fprintf(stderr, "vendor string must be 12 chars long\n");
370 goto error;
371 }
372 x86_cpu_def->vendor1 = 0;
373 x86_cpu_def->vendor2 = 0;
374 x86_cpu_def->vendor3 = 0;
375 for(i = 0; i < 4; i++) {
376 x86_cpu_def->vendor1 |= ((uint8_t)val[i ]) << (8 * i);
377 x86_cpu_def->vendor2 |= ((uint8_t)val[i + 4]) << (8 * i);
378 x86_cpu_def->vendor3 |= ((uint8_t)val[i + 8]) << (8 * i);
379 }
380 } else if (!strcmp(featurestr, "model_id")) {
381 pstrcpy(x86_cpu_def->model_id, sizeof(x86_cpu_def->model_id),
382 val);
383 } else {
384 fprintf(stderr, "unrecognized feature %s\n", featurestr);
385 goto error;
386 }
387 } else {
388 fprintf(stderr, "feature string `%s' not in format (+feature|-feature|feature=xyz)\n", featurestr);
389 goto error;
390 }
391 featurestr = strtok(NULL, ",");
392 }
393 x86_cpu_def->features |= plus_features;
394 x86_cpu_def->ext_features |= plus_ext_features;
395 x86_cpu_def->ext2_features |= plus_ext2_features;
396 x86_cpu_def->ext3_features |= plus_ext3_features;
397 x86_cpu_def->features &= ~minus_features;
398 x86_cpu_def->ext_features &= ~minus_ext_features;
399 x86_cpu_def->ext2_features &= ~minus_ext2_features;
400 x86_cpu_def->ext3_features &= ~minus_ext3_features;
401 free(s);
402 return 0;
403
404error:
405 free(s);
406 return -1;
407}
408
409void x86_cpu_list (FILE *f, int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
410{
411 unsigned int i;
412
413 for (i = 0; i < sizeof(x86_defs) / sizeof(x86_def_t); i++)
414 (*cpu_fprintf)(f, "x86 %16s\n", x86_defs[i].name);
415}
416#endif /* !VBOX */
417
418static int cpu_x86_register (CPUX86State *env, const char *cpu_model)
419{
420#ifndef VBOX
421 x86_def_t def1, *def = &def1;
422
423 if (cpu_x86_find_by_name(def, cpu_model) < 0)
424 return -1;
425 if (def->vendor1) {
426 env->cpuid_vendor1 = def->vendor1;
427 env->cpuid_vendor2 = def->vendor2;
428 env->cpuid_vendor3 = def->vendor3;
429 } else {
430 env->cpuid_vendor1 = CPUID_VENDOR_INTEL_1;
431 env->cpuid_vendor2 = CPUID_VENDOR_INTEL_2;
432 env->cpuid_vendor3 = CPUID_VENDOR_INTEL_3;
433 }
434 env->cpuid_level = def->level;
435 env->cpuid_version = (def->family << 8) | (def->model << 4) | def->stepping;
436 env->cpuid_features = def->features;
437 env->pat = 0x0007040600070406ULL;
438 env->cpuid_ext_features = def->ext_features;
439 env->cpuid_ext2_features = def->ext2_features;
440 env->cpuid_xlevel = def->xlevel;
441 env->cpuid_ext3_features = def->ext3_features;
442 {
443 const char *model_id = def->model_id;
444 int c, len, i;
445 if (!model_id)
446 model_id = "";
447 len = strlen(model_id);
448 for(i = 0; i < 48; i++) {
449 if (i >= len)
450 c = '\0';
451 else
452 c = (uint8_t)model_id[i];
453 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
454 }
455 }
456#endif // !VBOX
457 return 0;
458}
459
460/* NOTE: must be called outside the CPU execute loop */
461void cpu_reset(CPUX86State *env)
462{
463 int i;
464
465 memset(env, 0, offsetof(CPUX86State, breakpoints));
466
467 tlb_flush(env, 1);
468
469 env->old_exception = -1;
470
471 /* init to reset state */
472
473#ifdef CONFIG_SOFTMMU
474 env->hflags |= HF_SOFTMMU_MASK;
475#endif
476 env->hflags2 |= HF2_GIF_MASK;
477
478 cpu_x86_update_cr0(env, 0x60000010);
479 env->a20_mask = ~0x0;
480 env->smbase = 0x30000;
481
482 env->idt.limit = 0xffff;
483 env->gdt.limit = 0xffff;
484 env->ldt.limit = 0xffff;
485 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
486 env->tr.limit = 0xffff;
487 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
488
489 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
490 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK | DESC_R_MASK);
491 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
492 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
493 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
494 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
495 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
496 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
497 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
498 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
499 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
500 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
501
502 env->eip = 0xfff0;
503#ifndef VBOX
504 env->regs[R_EDX] = env->cpuid_version;
505#else
506 /** @todo: is it right? */
507 env->regs[R_EDX] = 0x600; /* indicate P6 processor */
508#endif
509
510 env->eflags = 0x2;
511
512 /* FPU init */
513 for(i = 0;i < 8; i++)
514 env->fptags[i] = 1;
515 env->fpuc = 0x37f;
516
517 env->mxcsr = 0x1f80;
518}
519
520void cpu_x86_close(CPUX86State *env)
521{
522#ifndef VBOX
523 qemu_free(env);
524#endif
525}
526
527/***********************************************************/
528/* x86 debug */
529
530static const char *cc_op_str[] = {
531 "DYNAMIC",
532 "EFLAGS",
533
534 "MULB",
535 "MULW",
536 "MULL",
537 "MULQ",
538
539 "ADDB",
540 "ADDW",
541 "ADDL",
542 "ADDQ",
543
544 "ADCB",
545 "ADCW",
546 "ADCL",
547 "ADCQ",
548
549 "SUBB",
550 "SUBW",
551 "SUBL",
552 "SUBQ",
553
554 "SBBB",
555 "SBBW",
556 "SBBL",
557 "SBBQ",
558
559 "LOGICB",
560 "LOGICW",
561 "LOGICL",
562 "LOGICQ",
563
564 "INCB",
565 "INCW",
566 "INCL",
567 "INCQ",
568
569 "DECB",
570 "DECW",
571 "DECL",
572 "DECQ",
573
574 "SHLB",
575 "SHLW",
576 "SHLL",
577 "SHLQ",
578
579 "SARB",
580 "SARW",
581 "SARL",
582 "SARQ",
583};
584
585void cpu_dump_state(CPUState *env, FILE *f,
586 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
587 int flags)
588{
589 int eflags, i, nb;
590 char cc_op_name[32];
591 static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
592
593 eflags = env->eflags;
594#ifdef TARGET_X86_64
595 if (env->hflags & HF_CS64_MASK) {
596 cpu_fprintf(f,
597 "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
598 "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
599 "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
600 "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
601 "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
602 env->regs[R_EAX],
603 env->regs[R_EBX],
604 env->regs[R_ECX],
605 env->regs[R_EDX],
606 env->regs[R_ESI],
607 env->regs[R_EDI],
608 env->regs[R_EBP],
609 env->regs[R_ESP],
610 env->regs[8],
611 env->regs[9],
612 env->regs[10],
613 env->regs[11],
614 env->regs[12],
615 env->regs[13],
616 env->regs[14],
617 env->regs[15],
618 env->eip, eflags,
619 eflags & DF_MASK ? 'D' : '-',
620 eflags & CC_O ? 'O' : '-',
621 eflags & CC_S ? 'S' : '-',
622 eflags & CC_Z ? 'Z' : '-',
623 eflags & CC_A ? 'A' : '-',
624 eflags & CC_P ? 'P' : '-',
625 eflags & CC_C ? 'C' : '-',
626 env->hflags & HF_CPL_MASK,
627 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
628 (int)(env->a20_mask >> 20) & 1,
629 (env->hflags >> HF_SMM_SHIFT) & 1,
630 env->halted);
631 } else
632#endif
633 {
634 cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
635 "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
636 "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
637 (uint32_t)env->regs[R_EAX],
638 (uint32_t)env->regs[R_EBX],
639 (uint32_t)env->regs[R_ECX],
640 (uint32_t)env->regs[R_EDX],
641 (uint32_t)env->regs[R_ESI],
642 (uint32_t)env->regs[R_EDI],
643 (uint32_t)env->regs[R_EBP],
644 (uint32_t)env->regs[R_ESP],
645 (uint32_t)env->eip, eflags,
646 eflags & DF_MASK ? 'D' : '-',
647 eflags & CC_O ? 'O' : '-',
648 eflags & CC_S ? 'S' : '-',
649 eflags & CC_Z ? 'Z' : '-',
650 eflags & CC_A ? 'A' : '-',
651 eflags & CC_P ? 'P' : '-',
652 eflags & CC_C ? 'C' : '-',
653 env->hflags & HF_CPL_MASK,
654 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
655 (int)(env->a20_mask >> 20) & 1,
656 (env->hflags >> HF_SMM_SHIFT) & 1,
657 env->halted);
658 }
659
660#ifdef TARGET_X86_64
661 if (env->hflags & HF_LMA_MASK) {
662 for(i = 0; i < 6; i++) {
663 SegmentCache *sc = &env->segs[i];
664 cpu_fprintf(f, "%s =%04x %016" PRIx64 " %08x %08x\n",
665 seg_name[i],
666 sc->selector,
667 sc->base,
668 sc->limit,
669 sc->flags);
670 }
671 cpu_fprintf(f, "LDT=%04x %016" PRIx64 " %08x %08x\n",
672 env->ldt.selector,
673 env->ldt.base,
674 env->ldt.limit,
675 env->ldt.flags);
676 cpu_fprintf(f, "TR =%04x %016" PRIx64 " %08x %08x\n",
677 env->tr.selector,
678 env->tr.base,
679 env->tr.limit,
680 env->tr.flags);
681 cpu_fprintf(f, "GDT= %016" PRIx64 " %08x\n",
682 env->gdt.base, env->gdt.limit);
683 cpu_fprintf(f, "IDT= %016" PRIx64 " %08x\n",
684 env->idt.base, env->idt.limit);
685 cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
686 (uint32_t)env->cr[0],
687 env->cr[2],
688 env->cr[3],
689 (uint32_t)env->cr[4]);
690 } else
691#endif
692 {
693 for(i = 0; i < 6; i++) {
694 SegmentCache *sc = &env->segs[i];
695 cpu_fprintf(f, "%s =%04x %08x %08x %08x\n",
696 seg_name[i],
697 sc->selector,
698 (uint32_t)sc->base,
699 sc->limit,
700 sc->flags);
701 }
702 cpu_fprintf(f, "LDT=%04x %08x %08x %08x\n",
703 env->ldt.selector,
704 (uint32_t)env->ldt.base,
705 env->ldt.limit,
706 env->ldt.flags);
707 cpu_fprintf(f, "TR =%04x %08x %08x %08x\n",
708 env->tr.selector,
709 (uint32_t)env->tr.base,
710 env->tr.limit,
711 env->tr.flags);
712 cpu_fprintf(f, "GDT= %08x %08x\n",
713 (uint32_t)env->gdt.base, env->gdt.limit);
714 cpu_fprintf(f, "IDT= %08x %08x\n",
715 (uint32_t)env->idt.base, env->idt.limit);
716 cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
717 (uint32_t)env->cr[0],
718 (uint32_t)env->cr[2],
719 (uint32_t)env->cr[3],
720 (uint32_t)env->cr[4]);
721 }
722 if (flags & X86_DUMP_CCOP) {
723 if ((unsigned)env->cc_op < CC_OP_NB)
724 snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
725 else
726 snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
727#ifdef TARGET_X86_64
728 if (env->hflags & HF_CS64_MASK) {
729 cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
730 env->cc_src, env->cc_dst,
731 cc_op_name);
732 } else
733#endif
734 {
735 cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
736 (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
737 cc_op_name);
738 }
739 }
740 if (flags & X86_DUMP_FPU) {
741 int fptag;
742 fptag = 0;
743 for(i = 0; i < 8; i++) {
744 fptag |= ((!env->fptags[i]) << i);
745 }
746 cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
747 env->fpuc,
748 (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
749 env->fpstt,
750 fptag,
751 env->mxcsr);
752 for(i=0;i<8;i++) {
753#if defined(USE_X86LDOUBLE)
754 union {
755 long double d;
756 struct {
757 uint64_t lower;
758 uint16_t upper;
759 } l;
760 } tmp;
761 tmp.d = env->fpregs[i].d;
762 cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
763 i, tmp.l.lower, tmp.l.upper);
764#else
765 cpu_fprintf(f, "FPR%d=%016" PRIx64,
766 i, env->fpregs[i].mmx.q);
767#endif
768 if ((i & 1) == 1)
769 cpu_fprintf(f, "\n");
770 else
771 cpu_fprintf(f, " ");
772 }
773 if (env->hflags & HF_CS64_MASK)
774 nb = 16;
775 else
776 nb = 8;
777 for(i=0;i<nb;i++) {
778 cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
779 i,
780 env->xmm_regs[i].XMM_L(3),
781 env->xmm_regs[i].XMM_L(2),
782 env->xmm_regs[i].XMM_L(1),
783 env->xmm_regs[i].XMM_L(0));
784 if ((i & 1) == 1)
785 cpu_fprintf(f, "\n");
786 else
787 cpu_fprintf(f, " ");
788 }
789 }
790}
791
792/***********************************************************/
793/* x86 mmu */
794/* XXX: add PGE support */
795
796void cpu_x86_set_a20(CPUX86State *env, int a20_state)
797{
798 a20_state = (a20_state != 0);
799 if (a20_state != ((env->a20_mask >> 20) & 1)) {
800#if defined(DEBUG_MMU)
801 printf("A20 update: a20=%d\n", a20_state);
802#endif
803 /* if the cpu is currently executing code, we must unlink it and
804 all the potentially executing TB */
805 cpu_interrupt(env, CPU_INTERRUPT_EXITTB);
806
807 /* when a20 is changed, all the MMU mappings are invalid, so
808 we must flush everything */
809 tlb_flush(env, 1);
810 env->a20_mask = (~0x100000) | (a20_state << 20);
811 }
812}
813
814void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
815{
816 int pe_state;
817
818#if defined(DEBUG_MMU)
819 printf("CR0 update: CR0=0x%08x\n", new_cr0);
820#endif
821 if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
822 (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
823 tlb_flush(env, 1);
824 }
825
826#ifdef TARGET_X86_64
827 if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
828 (env->efer & MSR_EFER_LME)) {
829 /* enter in long mode */
830 /* XXX: generate an exception */
831 if (!(env->cr[4] & CR4_PAE_MASK))
832 return;
833 env->efer |= MSR_EFER_LMA;
834 env->hflags |= HF_LMA_MASK;
835 } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
836 (env->efer & MSR_EFER_LMA)) {
837 /* exit long mode */
838 env->efer &= ~MSR_EFER_LMA;
839 env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
840 env->eip &= 0xffffffff;
841 }
842#endif
843 env->cr[0] = new_cr0 | CR0_ET_MASK;
844
845 /* update PE flag in hidden flags */
846 pe_state = (env->cr[0] & CR0_PE_MASK);
847 env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
848 /* ensure that ADDSEG is always set in real mode */
849 env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
850 /* update FPU flags */
851 env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
852 ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
853
854#ifdef VBOX
855 remR3ChangeCpuMode(env);
856#endif
857}
858
859/* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
860 the PDPT */
861void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
862{
863 env->cr[3] = new_cr3;
864 if (env->cr[0] & CR0_PG_MASK) {
865#if defined(DEBUG_MMU)
866 printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
867#endif
868 tlb_flush(env, 0);
869 }
870}
871
872void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
873{
874#if defined(DEBUG_MMU)
875 printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
876#endif
877 if ((new_cr4 & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK)) !=
878 (env->cr[4] & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK))) {
879 tlb_flush(env, 1);
880 }
881 /* SSE handling */
882 if (!(env->cpuid_features & CPUID_SSE))
883 new_cr4 &= ~CR4_OSFXSR_MASK;
884 if (new_cr4 & CR4_OSFXSR_MASK)
885 env->hflags |= HF_OSFXSR_MASK;
886 else
887 env->hflags &= ~HF_OSFXSR_MASK;
888
889 env->cr[4] = new_cr4;
890#ifdef VBOX
891 remR3ChangeCpuMode(env);
892#endif
893}
894
895/* XXX: also flush 4MB pages */
896void cpu_x86_flush_tlb(CPUX86State *env, target_ulong addr)
897{
898 tlb_flush_page(env, addr);
899}
900
901#if defined(CONFIG_USER_ONLY)
902
903int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
904 int is_write, int mmu_idx, int is_softmmu)
905{
906 /* user mode only emulation */
907 is_write &= 1;
908 env->cr[2] = addr;
909 env->error_code = (is_write << PG_ERROR_W_BIT);
910 env->error_code |= PG_ERROR_U_MASK;
911 env->exception_index = EXCP0E_PAGE;
912 return 1;
913}
914
915target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
916{
917 return addr;
918}
919
920#else
921
922/* XXX: This value should match the one returned by CPUID
923 * and in exec.c */
924#if defined(USE_KQEMU)
925#define PHYS_ADDR_MASK 0xfffff000LL
926#else
927# if defined(TARGET_X86_64)
928# define PHYS_ADDR_MASK 0xfffffff000LL
929# else
930# define PHYS_ADDR_MASK 0xffffff000LL
931# endif
932#endif
933
934/* return value:
935 -1 = cannot handle fault
936 0 = nothing more to do
937 1 = generate PF fault
938 2 = soft MMU activation required for this block
939*/
940int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
941 int is_write1, int mmu_idx, int is_softmmu)
942{
943 uint64_t ptep, pte;
944 target_ulong pde_addr, pte_addr;
945 int error_code, is_dirty, prot, page_size, ret, is_write, is_user;
946 target_phys_addr_t paddr;
947 uint32_t page_offset;
948 target_ulong vaddr, virt_addr;
949
950 is_user = mmu_idx == MMU_USER_IDX;
951#if defined(DEBUG_MMU)
952 printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
953 addr, is_write1, is_user, env->eip);
954#endif
955 is_write = is_write1 & 1;
956
957 if (!(env->cr[0] & CR0_PG_MASK)) {
958 pte = addr;
959 virt_addr = addr & TARGET_PAGE_MASK;
960 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
961 page_size = 4096;
962 goto do_mapping;
963 }
964
965 if (env->cr[4] & CR4_PAE_MASK) {
966 uint64_t pde, pdpe;
967 target_ulong pdpe_addr;
968
969#ifdef TARGET_X86_64
970 if (env->hflags & HF_LMA_MASK) {
971 uint64_t pml4e_addr, pml4e;
972 int32_t sext;
973
974 /* test virtual address sign extension */
975 sext = (int64_t)addr >> 47;
976 if (sext != 0 && sext != -1) {
977 env->error_code = 0;
978 env->exception_index = EXCP0D_GPF;
979 return 1;
980 }
981
982 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
983 env->a20_mask;
984 pml4e = ldq_phys(pml4e_addr);
985 if (!(pml4e & PG_PRESENT_MASK)) {
986 error_code = 0;
987 goto do_fault;
988 }
989 if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) {
990 error_code = PG_ERROR_RSVD_MASK;
991 goto do_fault;
992 }
993 if (!(pml4e & PG_ACCESSED_MASK)) {
994 pml4e |= PG_ACCESSED_MASK;
995 stl_phys_notdirty(pml4e_addr, pml4e);
996 }
997 ptep = pml4e ^ PG_NX_MASK;
998 pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
999 env->a20_mask;
1000 pdpe = ldq_phys(pdpe_addr);
1001 if (!(pdpe & PG_PRESENT_MASK)) {
1002 error_code = 0;
1003 goto do_fault;
1004 }
1005 if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) {
1006 error_code = PG_ERROR_RSVD_MASK;
1007 goto do_fault;
1008 }
1009 ptep &= pdpe ^ PG_NX_MASK;
1010 if (!(pdpe & PG_ACCESSED_MASK)) {
1011 pdpe |= PG_ACCESSED_MASK;
1012 stl_phys_notdirty(pdpe_addr, pdpe);
1013 }
1014 } else
1015#endif
1016 {
1017 /* XXX: load them when cr3 is loaded ? */
1018 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
1019 env->a20_mask;
1020 pdpe = ldq_phys(pdpe_addr);
1021 if (!(pdpe & PG_PRESENT_MASK)) {
1022 error_code = 0;
1023 goto do_fault;
1024 }
1025 ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
1026 }
1027
1028 pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
1029 env->a20_mask;
1030 pde = ldq_phys(pde_addr);
1031 if (!(pde & PG_PRESENT_MASK)) {
1032 error_code = 0;
1033 goto do_fault;
1034 }
1035 if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) {
1036 error_code = PG_ERROR_RSVD_MASK;
1037 goto do_fault;
1038 }
1039 ptep &= pde ^ PG_NX_MASK;
1040 if (pde & PG_PSE_MASK) {
1041 /* 2 MB page */
1042 page_size = 2048 * 1024;
1043 ptep ^= PG_NX_MASK;
1044 if ((ptep & PG_NX_MASK) && is_write1 == 2)
1045 goto do_fault_protect;
1046 if (is_user) {
1047 if (!(ptep & PG_USER_MASK))
1048 goto do_fault_protect;
1049 if (is_write && !(ptep & PG_RW_MASK))
1050 goto do_fault_protect;
1051 } else {
1052 if ((env->cr[0] & CR0_WP_MASK) &&
1053 is_write && !(ptep & PG_RW_MASK))
1054 goto do_fault_protect;
1055 }
1056 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
1057 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
1058 pde |= PG_ACCESSED_MASK;
1059 if (is_dirty)
1060 pde |= PG_DIRTY_MASK;
1061 stl_phys_notdirty(pde_addr, pde);
1062 }
1063 /* align to page_size */
1064 pte = pde & ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff);
1065 virt_addr = addr & ~(page_size - 1);
1066 } else {
1067 /* 4 KB page */
1068 if (!(pde & PG_ACCESSED_MASK)) {
1069 pde |= PG_ACCESSED_MASK;
1070 stl_phys_notdirty(pde_addr, pde);
1071 }
1072 pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
1073 env->a20_mask;
1074 pte = ldq_phys(pte_addr);
1075 if (!(pte & PG_PRESENT_MASK)) {
1076 error_code = 0;
1077 goto do_fault;
1078 }
1079 if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) {
1080 error_code = PG_ERROR_RSVD_MASK;
1081 goto do_fault;
1082 }
1083 /* combine pde and pte nx, user and rw protections */
1084 ptep &= pte ^ PG_NX_MASK;
1085 ptep ^= PG_NX_MASK;
1086 if ((ptep & PG_NX_MASK) && is_write1 == 2)
1087 goto do_fault_protect;
1088 if (is_user) {
1089 if (!(ptep & PG_USER_MASK))
1090 goto do_fault_protect;
1091 if (is_write && !(ptep & PG_RW_MASK))
1092 goto do_fault_protect;
1093 } else {
1094 if ((env->cr[0] & CR0_WP_MASK) &&
1095 is_write && !(ptep & PG_RW_MASK))
1096 goto do_fault_protect;
1097 }
1098 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
1099 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
1100 pte |= PG_ACCESSED_MASK;
1101 if (is_dirty)
1102 pte |= PG_DIRTY_MASK;
1103 stl_phys_notdirty(pte_addr, pte);
1104 }
1105 page_size = 4096;
1106 virt_addr = addr & ~0xfff;
1107 pte = pte & (PHYS_ADDR_MASK | 0xfff);
1108 }
1109 } else {
1110 uint32_t pde;
1111
1112 /* page directory entry */
1113 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
1114 env->a20_mask;
1115 pde = ldl_phys(pde_addr);
1116 if (!(pde & PG_PRESENT_MASK)) {
1117 error_code = 0;
1118 goto do_fault;
1119 }
1120 /* if PSE bit is set, then we use a 4MB page */
1121 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
1122 page_size = 4096 * 1024;
1123 if (is_user) {
1124 if (!(pde & PG_USER_MASK))
1125 goto do_fault_protect;
1126 if (is_write && !(pde & PG_RW_MASK))
1127 goto do_fault_protect;
1128 } else {
1129 if ((env->cr[0] & CR0_WP_MASK) &&
1130 is_write && !(pde & PG_RW_MASK))
1131 goto do_fault_protect;
1132 }
1133 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
1134 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
1135 pde |= PG_ACCESSED_MASK;
1136 if (is_dirty)
1137 pde |= PG_DIRTY_MASK;
1138 stl_phys_notdirty(pde_addr, pde);
1139 }
1140
1141 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
1142 ptep = pte;
1143 virt_addr = addr & ~(page_size - 1);
1144 } else {
1145 if (!(pde & PG_ACCESSED_MASK)) {
1146 pde |= PG_ACCESSED_MASK;
1147 stl_phys_notdirty(pde_addr, pde);
1148 }
1149
1150 /* page directory entry */
1151 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
1152 env->a20_mask;
1153 pte = ldl_phys(pte_addr);
1154 if (!(pte & PG_PRESENT_MASK)) {
1155 error_code = 0;
1156 goto do_fault;
1157 }
1158 /* combine pde and pte user and rw protections */
1159 ptep = pte & pde;
1160 if (is_user) {
1161 if (!(ptep & PG_USER_MASK))
1162 goto do_fault_protect;
1163 if (is_write && !(ptep & PG_RW_MASK))
1164 goto do_fault_protect;
1165 } else {
1166 if ((env->cr[0] & CR0_WP_MASK) &&
1167 is_write && !(ptep & PG_RW_MASK))
1168 goto do_fault_protect;
1169 }
1170 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
1171 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
1172 pte |= PG_ACCESSED_MASK;
1173 if (is_dirty)
1174 pte |= PG_DIRTY_MASK;
1175 stl_phys_notdirty(pte_addr, pte);
1176 }
1177 page_size = 4096;
1178 virt_addr = addr & ~0xfff;
1179 }
1180 }
1181 /* the page can be put in the TLB */
1182 prot = PAGE_READ;
1183 if (!(ptep & PG_NX_MASK))
1184 prot |= PAGE_EXEC;
1185 if (pte & PG_DIRTY_MASK) {
1186 /* only set write access if already dirty... otherwise wait
1187 for dirty access */
1188 if (is_user) {
1189 if (ptep & PG_RW_MASK)
1190 prot |= PAGE_WRITE;
1191 } else {
1192 if (!(env->cr[0] & CR0_WP_MASK) ||
1193 (ptep & PG_RW_MASK))
1194 prot |= PAGE_WRITE;
1195 }
1196 }
1197 do_mapping:
1198 pte = pte & env->a20_mask;
1199
1200 /* Even if 4MB pages, we map only one 4KB page in the cache to
1201 avoid filling it too fast */
1202 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
1203 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
1204 vaddr = virt_addr + page_offset;
1205
1206 ret = tlb_set_page_exec(env, vaddr, paddr, prot, mmu_idx, is_softmmu);
1207 return ret;
1208 do_fault_protect:
1209 error_code = PG_ERROR_P_MASK;
1210 do_fault:
1211 error_code |= (is_write << PG_ERROR_W_BIT);
1212 if (is_user)
1213 error_code |= PG_ERROR_U_MASK;
1214 if (is_write1 == 2 &&
1215 (env->efer & MSR_EFER_NXE) &&
1216 (env->cr[4] & CR4_PAE_MASK))
1217 error_code |= PG_ERROR_I_D_MASK;
1218 if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
1219 /* cr2 is not modified in case of exceptions */
1220 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
1221 addr);
1222 } else {
1223 env->cr[2] = addr;
1224 }
1225 env->error_code = error_code;
1226 env->exception_index = EXCP0E_PAGE;
1227 return 1;
1228}
1229
1230target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
1231{
1232 target_ulong pde_addr, pte_addr;
1233 uint64_t pte;
1234 target_phys_addr_t paddr;
1235 uint32_t page_offset;
1236 int page_size;
1237
1238 if (env->cr[4] & CR4_PAE_MASK) {
1239 target_ulong pdpe_addr;
1240 uint64_t pde, pdpe;
1241
1242#ifdef TARGET_X86_64
1243 if (env->hflags & HF_LMA_MASK) {
1244 uint64_t pml4e_addr, pml4e;
1245 int32_t sext;
1246
1247 /* test virtual address sign extension */
1248 sext = (int64_t)addr >> 47;
1249 if (sext != 0 && sext != -1)
1250 return -1;
1251
1252 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
1253 env->a20_mask;
1254 pml4e = ldq_phys(pml4e_addr);
1255 if (!(pml4e & PG_PRESENT_MASK))
1256 return -1;
1257
1258 pdpe_addr = ((pml4e & ~0xfff) + (((addr >> 30) & 0x1ff) << 3)) &
1259 env->a20_mask;
1260 pdpe = ldq_phys(pdpe_addr);
1261 if (!(pdpe & PG_PRESENT_MASK))
1262 return -1;
1263 } else
1264#endif
1265 {
1266 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
1267 env->a20_mask;
1268 pdpe = ldq_phys(pdpe_addr);
1269 if (!(pdpe & PG_PRESENT_MASK))
1270 return -1;
1271 }
1272
1273 pde_addr = ((pdpe & ~0xfff) + (((addr >> 21) & 0x1ff) << 3)) &
1274 env->a20_mask;
1275 pde = ldq_phys(pde_addr);
1276 if (!(pde & PG_PRESENT_MASK)) {
1277 return -1;
1278 }
1279 if (pde & PG_PSE_MASK) {
1280 /* 2 MB page */
1281 page_size = 2048 * 1024;
1282 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
1283 } else {
1284 /* 4 KB page */
1285 pte_addr = ((pde & ~0xfff) + (((addr >> 12) & 0x1ff) << 3)) &
1286 env->a20_mask;
1287 page_size = 4096;
1288 pte = ldq_phys(pte_addr);
1289 }
1290 if (!(pte & PG_PRESENT_MASK))
1291 return -1;
1292 } else {
1293 uint32_t pde;
1294
1295 if (!(env->cr[0] & CR0_PG_MASK)) {
1296 pte = addr;
1297 page_size = 4096;
1298 } else {
1299 /* page directory entry */
1300 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask;
1301 pde = ldl_phys(pde_addr);
1302 if (!(pde & PG_PRESENT_MASK))
1303 return -1;
1304 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
1305 pte = pde & ~0x003ff000; /* align to 4MB */
1306 page_size = 4096 * 1024;
1307 } else {
1308 /* page directory entry */
1309 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
1310 pte = ldl_phys(pte_addr);
1311 if (!(pte & PG_PRESENT_MASK))
1312 return -1;
1313 page_size = 4096;
1314 }
1315 }
1316 pte = pte & env->a20_mask;
1317 }
1318
1319 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
1320 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
1321 return paddr;
1322}
1323#endif /* !CONFIG_USER_ONLY */
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette