VirtualBox

source: vbox/trunk/src/recompiler/target-i386/helper.c@ 33656

最後變更 在這個檔案從33656是 33656,由 vboxsync 提交於 14 年 前

*: rebrand Sun (L)GPL disclaimers

  • 屬性 svn:eol-style 設為 native
檔案大小: 43.9 KB
 
1/*
2 * i386 helpers (without register variable usage)
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21/*
22 * Oracle LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
23 * other than GPL or LGPL is available it will apply instead, Oracle elects to use only
24 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
25 * a choice of LGPL license versions is made available with the language indicating
26 * that LGPLv2 or any later version may be used, or where a choice of which version
27 * of the LGPL is applied is otherwise unspecified.
28 */
29
30#include <stdarg.h>
31#include <stdlib.h>
32#include <stdio.h>
33#include <string.h>
34#ifndef VBOX
35#include <inttypes.h>
36#include <signal.h>
37#include <assert.h>
38#endif
39
40#include "cpu.h"
41#include "exec-all.h"
42#include "svm.h"
43#include "qemu-common.h"
44
45//#define DEBUG_MMU
46
47static int cpu_x86_register (CPUX86State *env, const char *cpu_model);
48
49#ifndef VBOX
50static void add_flagname_to_bitmaps(char *flagname, uint32_t *features,
51 uint32_t *ext_features,
52 uint32_t *ext2_features,
53 uint32_t *ext3_features)
54{
55 int i;
56 /* feature flags taken from "Intel Processor Identification and the CPUID
57 * Instruction" and AMD's "CPUID Specification". In cases of disagreement
58 * about feature names, the Linux name is used. */
59 static const char *feature_name[] = {
60 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
61 "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
62 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */, NULL, "ds" /* Intel dts */, "acpi", "mmx",
63 "fxsr", "sse", "sse2", "ss", "ht" /* Intel htt */, "tm", "ia64", "pbe",
64 };
65 static const char *ext_feature_name[] = {
66 "pni" /* Intel,AMD sse3 */, NULL, NULL, "monitor", "ds_cpl", "vmx", NULL /* Linux smx */, "est",
67 "tm2", "ssse3", "cid", NULL, NULL, "cx16", "xtpr", NULL,
68 NULL, NULL, "dca", NULL, NULL, NULL, NULL, "popcnt",
69 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
70 };
71 static const char *ext2_feature_name[] = {
72 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
73 "cx8" /* AMD CMPXCHG8B */, "apic", NULL, "syscall", "mttr", "pge", "mca", "cmov",
74 "pat", "pse36", NULL, NULL /* Linux mp */, "nx" /* Intel xd */, NULL, "mmxext", "mmx",
75 "fxsr", "fxsr_opt" /* AMD ffxsr */, "pdpe1gb" /* AMD Page1GB */, "rdtscp", NULL, "lm" /* Intel 64 */, "3dnowext", "3dnow",
76 };
77 static const char *ext3_feature_name[] = {
78 "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */, "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
79 "3dnowprefetch", "osvw", NULL /* Linux ibs */, NULL, "skinit", "wdt", NULL, NULL,
80 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
81 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
82 };
83
84 for ( i = 0 ; i < 32 ; i++ )
85 if (feature_name[i] && !strcmp (flagname, feature_name[i])) {
86 *features |= 1 << i;
87 return;
88 }
89 for ( i = 0 ; i < 32 ; i++ )
90 if (ext_feature_name[i] && !strcmp (flagname, ext_feature_name[i])) {
91 *ext_features |= 1 << i;
92 return;
93 }
94 for ( i = 0 ; i < 32 ; i++ )
95 if (ext2_feature_name[i] && !strcmp (flagname, ext2_feature_name[i])) {
96 *ext2_features |= 1 << i;
97 return;
98 }
99 for ( i = 0 ; i < 32 ; i++ )
100 if (ext3_feature_name[i] && !strcmp (flagname, ext3_feature_name[i])) {
101 *ext3_features |= 1 << i;
102 return;
103 }
104 fprintf(stderr, "CPU feature %s not found\n", flagname);
105}
106#endif /* !VBOX */
107#ifndef VBOX
108CPUX86State *cpu_x86_init(const char *cpu_model)
109{
110 CPUX86State *env;
111 static int inited;
112
113 env = qemu_mallocz(sizeof(CPUX86State));
114 if (!env)
115 return NULL;
116#else
117CPUX86State *cpu_x86_init(CPUX86State *env, const char *cpu_model)
118{
119 static int inited;
120#endif
121 cpu_exec_init(env);
122 env->cpu_model_str = cpu_model;
123
124 /* init various static tables */
125 if (!inited) {
126 inited = 1;
127 optimize_flags_init();
128 }
129 if (cpu_x86_register(env, cpu_model) < 0) {
130 cpu_x86_close(env);
131 return NULL;
132 }
133 cpu_reset(env);
134#ifdef USE_KQEMU
135 kqemu_init(env);
136#endif
137 return env;
138}
139
140typedef struct x86_def_t {
141 const char *name;
142 uint32_t level;
143 uint32_t vendor1, vendor2, vendor3;
144 int family;
145 int model;
146 int stepping;
147 uint32_t features, ext_features, ext2_features, ext3_features;
148 uint32_t xlevel;
149 char model_id[48];
150} x86_def_t;
151
152#ifndef VBOX
153#define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
154#define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
155 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX)
156#define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
157 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
158 CPUID_PSE36 | CPUID_FXSR)
159#define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
160#define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
161 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
162 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
163 CPUID_PAE | CPUID_SEP | CPUID_APIC)
164static x86_def_t x86_defs[] = {
165#ifdef TARGET_X86_64
166 {
167 .name = "qemu64",
168 .level = 2,
169 .vendor1 = CPUID_VENDOR_AMD_1,
170 .vendor2 = CPUID_VENDOR_AMD_2,
171 .vendor3 = CPUID_VENDOR_AMD_3,
172 .family = 6,
173 .model = 2,
174 .stepping = 3,
175 .features = PPRO_FEATURES |
176 /* these features are needed for Win64 and aren't fully implemented */
177 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
178 /* this feature is needed for Solaris and isn't fully implemented */
179 CPUID_PSE36,
180 .ext_features = CPUID_EXT_SSE3,
181 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) |
182 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
183 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
184 .ext3_features = CPUID_EXT3_SVM,
185 .xlevel = 0x8000000A,
186 .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
187 },
188 {
189 .name = "core2duo",
190 .level = 10,
191 .family = 6,
192 .model = 15,
193 .stepping = 11,
194 /* The original CPU also implements these features:
195 CPUID_VME, CPUID_DTS, CPUID_ACPI, CPUID_SS, CPUID_HT,
196 CPUID_TM, CPUID_PBE */
197 .features = PPRO_FEATURES |
198 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
199 CPUID_PSE36,
200 /* The original CPU also implements these ext features:
201 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_EST,
202 CPUID_EXT_TM2, CPUID_EXT_CX16, CPUID_EXT_XTPR, CPUID_EXT_PDCM */
203 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3,
204 .ext2_features = CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
205 /* Missing: .ext3_features = CPUID_EXT3_LAHF_LM */
206 .xlevel = 0x80000008,
207 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
208 },
209#endif
210 {
211 .name = "qemu32",
212 .level = 2,
213 .family = 6,
214 .model = 3,
215 .stepping = 3,
216 .features = PPRO_FEATURES,
217 .ext_features = CPUID_EXT_SSE3,
218 .xlevel = 0,
219 .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
220 },
221 {
222 .name = "coreduo",
223 .level = 10,
224 .family = 6,
225 .model = 14,
226 .stepping = 8,
227 /* The original CPU also implements these features:
228 CPUID_DTS, CPUID_ACPI, CPUID_SS, CPUID_HT,
229 CPUID_TM, CPUID_PBE */
230 .features = PPRO_FEATURES | CPUID_VME |
231 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA,
232 /* The original CPU also implements these ext features:
233 CPUID_EXT_VMX, CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_XTPR,
234 CPUID_EXT_PDCM */
235 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
236 .ext2_features = CPUID_EXT2_NX,
237 .xlevel = 0x80000008,
238 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
239 },
240 {
241 .name = "486",
242 .level = 0,
243 .family = 4,
244 .model = 0,
245 .stepping = 0,
246 .features = I486_FEATURES,
247 .xlevel = 0,
248 },
249 {
250 .name = "pentium",
251 .level = 1,
252 .family = 5,
253 .model = 4,
254 .stepping = 3,
255 .features = PENTIUM_FEATURES,
256 .xlevel = 0,
257 },
258 {
259 .name = "pentium2",
260 .level = 2,
261 .family = 6,
262 .model = 5,
263 .stepping = 2,
264 .features = PENTIUM2_FEATURES,
265 .xlevel = 0,
266 },
267 {
268 .name = "pentium3",
269 .level = 2,
270 .family = 6,
271 .model = 7,
272 .stepping = 3,
273 .features = PENTIUM3_FEATURES,
274 .xlevel = 0,
275 },
276 {
277 .name = "athlon",
278 .level = 2,
279 .vendor1 = 0x68747541, /* "Auth" */
280 .vendor2 = 0x69746e65, /* "enti" */
281 .vendor3 = 0x444d4163, /* "cAMD" */
282 .family = 6,
283 .model = 2,
284 .stepping = 3,
285 .features = PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR | CPUID_MCA,
286 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
287 .xlevel = 0x80000008,
288 /* XXX: put another string ? */
289 .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
290 },
291 {
292 .name = "n270",
293 /* original is on level 10 */
294 .level = 5,
295 .family = 6,
296 .model = 28,
297 .stepping = 2,
298 .features = PPRO_FEATURES |
299 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME,
300 /* Missing: CPUID_DTS | CPUID_ACPI | CPUID_SS |
301 * CPUID_HT | CPUID_TM | CPUID_PBE */
302 /* Some CPUs got no CPUID_SEP */
303 .ext_features = CPUID_EXT_MONITOR |
304 CPUID_EXT_SSE3 /* PNI */,
305 /* Missing: CPUID_EXT_DSCPL | CPUID_EXT_EST |
306 * CPUID_EXT_TM2 | CPUID_EXT_XTPR */
307 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | CPUID_EXT2_NX,
308 /* Missing: .ext3_features = CPUID_EXT3_LAHF_LM */
309 .xlevel = 0x8000000A,
310 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
311 },
312};
313
314static int cpu_x86_find_by_name(x86_def_t *x86_cpu_def, const char *cpu_model)
315{
316 unsigned int i;
317 x86_def_t *def;
318
319 char *s = strdup(cpu_model);
320 char *featurestr, *name = strtok(s, ",");
321 uint32_t plus_features = 0, plus_ext_features = 0, plus_ext2_features = 0, plus_ext3_features = 0;
322 uint32_t minus_features = 0, minus_ext_features = 0, minus_ext2_features = 0, minus_ext3_features = 0;
323 int family = -1, model = -1, stepping = -1;
324
325 def = NULL;
326 for (i = 0; i < sizeof(x86_defs) / sizeof(x86_def_t); i++) {
327 if (strcmp(name, x86_defs[i].name) == 0) {
328 def = &x86_defs[i];
329 break;
330 }
331 }
332 if (!def)
333 goto error;
334 memcpy(x86_cpu_def, def, sizeof(*def));
335
336 featurestr = strtok(NULL, ",");
337
338 while (featurestr) {
339 char *val;
340 if (featurestr[0] == '+') {
341 add_flagname_to_bitmaps(featurestr + 1, &plus_features, &plus_ext_features, &plus_ext2_features, &plus_ext3_features);
342 } else if (featurestr[0] == '-') {
343 add_flagname_to_bitmaps(featurestr + 1, &minus_features, &minus_ext_features, &minus_ext2_features, &minus_ext3_features);
344 } else if ((val = strchr(featurestr, '='))) {
345 *val = 0; val++;
346 if (!strcmp(featurestr, "family")) {
347 char *err;
348 family = strtol(val, &err, 10);
349 if (!*val || *err || family < 0) {
350 fprintf(stderr, "bad numerical value %s\n", val);
351 goto error;
352 }
353 x86_cpu_def->family = family;
354 } else if (!strcmp(featurestr, "model")) {
355 char *err;
356 model = strtol(val, &err, 10);
357 if (!*val || *err || model < 0 || model > 0xf) {
358 fprintf(stderr, "bad numerical value %s\n", val);
359 goto error;
360 }
361 x86_cpu_def->model = model;
362 } else if (!strcmp(featurestr, "stepping")) {
363 char *err;
364 stepping = strtol(val, &err, 10);
365 if (!*val || *err || stepping < 0 || stepping > 0xf) {
366 fprintf(stderr, "bad numerical value %s\n", val);
367 goto error;
368 }
369 x86_cpu_def->stepping = stepping;
370 } else if (!strcmp(featurestr, "vendor")) {
371 if (strlen(val) != 12) {
372 fprintf(stderr, "vendor string must be 12 chars long\n");
373 goto error;
374 }
375 x86_cpu_def->vendor1 = 0;
376 x86_cpu_def->vendor2 = 0;
377 x86_cpu_def->vendor3 = 0;
378 for(i = 0; i < 4; i++) {
379 x86_cpu_def->vendor1 |= ((uint8_t)val[i ]) << (8 * i);
380 x86_cpu_def->vendor2 |= ((uint8_t)val[i + 4]) << (8 * i);
381 x86_cpu_def->vendor3 |= ((uint8_t)val[i + 8]) << (8 * i);
382 }
383 } else if (!strcmp(featurestr, "model_id")) {
384 pstrcpy(x86_cpu_def->model_id, sizeof(x86_cpu_def->model_id),
385 val);
386 } else {
387 fprintf(stderr, "unrecognized feature %s\n", featurestr);
388 goto error;
389 }
390 } else {
391 fprintf(stderr, "feature string `%s' not in format (+feature|-feature|feature=xyz)\n", featurestr);
392 goto error;
393 }
394 featurestr = strtok(NULL, ",");
395 }
396 x86_cpu_def->features |= plus_features;
397 x86_cpu_def->ext_features |= plus_ext_features;
398 x86_cpu_def->ext2_features |= plus_ext2_features;
399 x86_cpu_def->ext3_features |= plus_ext3_features;
400 x86_cpu_def->features &= ~minus_features;
401 x86_cpu_def->ext_features &= ~minus_ext_features;
402 x86_cpu_def->ext2_features &= ~minus_ext2_features;
403 x86_cpu_def->ext3_features &= ~minus_ext3_features;
404 free(s);
405 return 0;
406
407error:
408 free(s);
409 return -1;
410}
411
412void x86_cpu_list (FILE *f, int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
413{
414 unsigned int i;
415
416 for (i = 0; i < sizeof(x86_defs) / sizeof(x86_def_t); i++)
417 (*cpu_fprintf)(f, "x86 %16s\n", x86_defs[i].name);
418}
419#endif /* !VBOX */
420
421static int cpu_x86_register (CPUX86State *env, const char *cpu_model)
422{
423#ifndef VBOX
424 x86_def_t def1, *def = &def1;
425
426 if (cpu_x86_find_by_name(def, cpu_model) < 0)
427 return -1;
428 if (def->vendor1) {
429 env->cpuid_vendor1 = def->vendor1;
430 env->cpuid_vendor2 = def->vendor2;
431 env->cpuid_vendor3 = def->vendor3;
432 } else {
433 env->cpuid_vendor1 = CPUID_VENDOR_INTEL_1;
434 env->cpuid_vendor2 = CPUID_VENDOR_INTEL_2;
435 env->cpuid_vendor3 = CPUID_VENDOR_INTEL_3;
436 }
437 env->cpuid_level = def->level;
438 env->cpuid_version = (def->family << 8) | (def->model << 4) | def->stepping;
439 env->cpuid_features = def->features;
440 env->pat = 0x0007040600070406ULL;
441 env->cpuid_ext_features = def->ext_features;
442 env->cpuid_ext2_features = def->ext2_features;
443 env->cpuid_xlevel = def->xlevel;
444 env->cpuid_ext3_features = def->ext3_features;
445 {
446 const char *model_id = def->model_id;
447 int c, len, i;
448 if (!model_id)
449 model_id = "";
450 len = strlen(model_id);
451 for(i = 0; i < 48; i++) {
452 if (i >= len)
453 c = '\0';
454 else
455 c = (uint8_t)model_id[i];
456 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
457 }
458 }
459#endif // !VBOX
460 return 0;
461}
462
463/* NOTE: must be called outside the CPU execute loop */
464void cpu_reset(CPUX86State *env)
465{
466 int i;
467
468 memset(env, 0, offsetof(CPUX86State, breakpoints));
469
470 tlb_flush(env, 1);
471
472 env->old_exception = -1;
473
474 /* init to reset state */
475
476#ifdef CONFIG_SOFTMMU
477 env->hflags |= HF_SOFTMMU_MASK;
478#endif
479 env->hflags2 |= HF2_GIF_MASK;
480
481 cpu_x86_update_cr0(env, 0x60000010);
482 env->a20_mask = ~0x0;
483 env->smbase = 0x30000;
484
485 env->idt.limit = 0xffff;
486 env->gdt.limit = 0xffff;
487 env->ldt.limit = 0xffff;
488 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
489 env->tr.limit = 0xffff;
490 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
491
492 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
493 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK | DESC_R_MASK);
494 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
495 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
496 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
497 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
498 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
499 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
500 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
501 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
502 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
503 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
504
505 env->eip = 0xfff0;
506#ifndef VBOX
507 env->regs[R_EDX] = env->cpuid_version;
508#else
509 /** @todo: is it right? */
510 env->regs[R_EDX] = 0x600; /* indicate P6 processor */
511#endif
512
513 env->eflags = 0x2;
514
515 /* FPU init */
516 for(i = 0;i < 8; i++)
517 env->fptags[i] = 1;
518 env->fpuc = 0x37f;
519
520 env->mxcsr = 0x1f80;
521}
522
523void cpu_x86_close(CPUX86State *env)
524{
525#ifndef VBOX
526 qemu_free(env);
527#endif
528}
529
530/***********************************************************/
531/* x86 debug */
532
533static const char *cc_op_str[] = {
534 "DYNAMIC",
535 "EFLAGS",
536
537 "MULB",
538 "MULW",
539 "MULL",
540 "MULQ",
541
542 "ADDB",
543 "ADDW",
544 "ADDL",
545 "ADDQ",
546
547 "ADCB",
548 "ADCW",
549 "ADCL",
550 "ADCQ",
551
552 "SUBB",
553 "SUBW",
554 "SUBL",
555 "SUBQ",
556
557 "SBBB",
558 "SBBW",
559 "SBBL",
560 "SBBQ",
561
562 "LOGICB",
563 "LOGICW",
564 "LOGICL",
565 "LOGICQ",
566
567 "INCB",
568 "INCW",
569 "INCL",
570 "INCQ",
571
572 "DECB",
573 "DECW",
574 "DECL",
575 "DECQ",
576
577 "SHLB",
578 "SHLW",
579 "SHLL",
580 "SHLQ",
581
582 "SARB",
583 "SARW",
584 "SARL",
585 "SARQ",
586};
587
588void cpu_dump_state(CPUState *env, FILE *f,
589 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
590 int flags)
591{
592 int eflags, i, nb;
593 char cc_op_name[32];
594 static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
595
596 eflags = env->eflags;
597#ifdef TARGET_X86_64
598 if (env->hflags & HF_CS64_MASK) {
599 cpu_fprintf(f,
600 "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
601 "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
602 "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
603 "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
604 "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
605 env->regs[R_EAX],
606 env->regs[R_EBX],
607 env->regs[R_ECX],
608 env->regs[R_EDX],
609 env->regs[R_ESI],
610 env->regs[R_EDI],
611 env->regs[R_EBP],
612 env->regs[R_ESP],
613 env->regs[8],
614 env->regs[9],
615 env->regs[10],
616 env->regs[11],
617 env->regs[12],
618 env->regs[13],
619 env->regs[14],
620 env->regs[15],
621 env->eip, eflags,
622 eflags & DF_MASK ? 'D' : '-',
623 eflags & CC_O ? 'O' : '-',
624 eflags & CC_S ? 'S' : '-',
625 eflags & CC_Z ? 'Z' : '-',
626 eflags & CC_A ? 'A' : '-',
627 eflags & CC_P ? 'P' : '-',
628 eflags & CC_C ? 'C' : '-',
629 env->hflags & HF_CPL_MASK,
630 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
631 (int)(env->a20_mask >> 20) & 1,
632 (env->hflags >> HF_SMM_SHIFT) & 1,
633 env->halted);
634 } else
635#endif
636 {
637 cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
638 "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
639 "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
640 (uint32_t)env->regs[R_EAX],
641 (uint32_t)env->regs[R_EBX],
642 (uint32_t)env->regs[R_ECX],
643 (uint32_t)env->regs[R_EDX],
644 (uint32_t)env->regs[R_ESI],
645 (uint32_t)env->regs[R_EDI],
646 (uint32_t)env->regs[R_EBP],
647 (uint32_t)env->regs[R_ESP],
648 (uint32_t)env->eip, eflags,
649 eflags & DF_MASK ? 'D' : '-',
650 eflags & CC_O ? 'O' : '-',
651 eflags & CC_S ? 'S' : '-',
652 eflags & CC_Z ? 'Z' : '-',
653 eflags & CC_A ? 'A' : '-',
654 eflags & CC_P ? 'P' : '-',
655 eflags & CC_C ? 'C' : '-',
656 env->hflags & HF_CPL_MASK,
657 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
658 (int)(env->a20_mask >> 20) & 1,
659 (env->hflags >> HF_SMM_SHIFT) & 1,
660 env->halted);
661 }
662
663#ifdef TARGET_X86_64
664 if (env->hflags & HF_LMA_MASK) {
665 for(i = 0; i < 6; i++) {
666 SegmentCache *sc = &env->segs[i];
667 cpu_fprintf(f, "%s =%04x %016" PRIx64 " %08x %08x\n",
668 seg_name[i],
669 sc->selector,
670 sc->base,
671 sc->limit,
672 sc->flags);
673 }
674 cpu_fprintf(f, "LDT=%04x %016" PRIx64 " %08x %08x\n",
675 env->ldt.selector,
676 env->ldt.base,
677 env->ldt.limit,
678 env->ldt.flags);
679 cpu_fprintf(f, "TR =%04x %016" PRIx64 " %08x %08x\n",
680 env->tr.selector,
681 env->tr.base,
682 env->tr.limit,
683 env->tr.flags);
684 cpu_fprintf(f, "GDT= %016" PRIx64 " %08x\n",
685 env->gdt.base, env->gdt.limit);
686 cpu_fprintf(f, "IDT= %016" PRIx64 " %08x\n",
687 env->idt.base, env->idt.limit);
688 cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
689 (uint32_t)env->cr[0],
690 env->cr[2],
691 env->cr[3],
692 (uint32_t)env->cr[4]);
693 } else
694#endif
695 {
696 for(i = 0; i < 6; i++) {
697 SegmentCache *sc = &env->segs[i];
698 cpu_fprintf(f, "%s =%04x %08x %08x %08x\n",
699 seg_name[i],
700 sc->selector,
701 (uint32_t)sc->base,
702 sc->limit,
703 sc->flags);
704 }
705 cpu_fprintf(f, "LDT=%04x %08x %08x %08x\n",
706 env->ldt.selector,
707 (uint32_t)env->ldt.base,
708 env->ldt.limit,
709 env->ldt.flags);
710 cpu_fprintf(f, "TR =%04x %08x %08x %08x\n",
711 env->tr.selector,
712 (uint32_t)env->tr.base,
713 env->tr.limit,
714 env->tr.flags);
715 cpu_fprintf(f, "GDT= %08x %08x\n",
716 (uint32_t)env->gdt.base, env->gdt.limit);
717 cpu_fprintf(f, "IDT= %08x %08x\n",
718 (uint32_t)env->idt.base, env->idt.limit);
719 cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
720 (uint32_t)env->cr[0],
721 (uint32_t)env->cr[2],
722 (uint32_t)env->cr[3],
723 (uint32_t)env->cr[4]);
724 }
725 if (flags & X86_DUMP_CCOP) {
726 if ((unsigned)env->cc_op < CC_OP_NB)
727 snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
728 else
729 snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
730#ifdef TARGET_X86_64
731 if (env->hflags & HF_CS64_MASK) {
732 cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
733 env->cc_src, env->cc_dst,
734 cc_op_name);
735 } else
736#endif
737 {
738 cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
739 (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
740 cc_op_name);
741 }
742 }
743 if (flags & X86_DUMP_FPU) {
744 int fptag;
745 fptag = 0;
746 for(i = 0; i < 8; i++) {
747 fptag |= ((!env->fptags[i]) << i);
748 }
749 cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
750 env->fpuc,
751 (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
752 env->fpstt,
753 fptag,
754 env->mxcsr);
755 for(i=0;i<8;i++) {
756#if defined(USE_X86LDOUBLE)
757 union {
758 long double d;
759 struct {
760 uint64_t lower;
761 uint16_t upper;
762 } l;
763 } tmp;
764 tmp.d = env->fpregs[i].d;
765 cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
766 i, tmp.l.lower, tmp.l.upper);
767#else
768 cpu_fprintf(f, "FPR%d=%016" PRIx64,
769 i, env->fpregs[i].mmx.q);
770#endif
771 if ((i & 1) == 1)
772 cpu_fprintf(f, "\n");
773 else
774 cpu_fprintf(f, " ");
775 }
776 if (env->hflags & HF_CS64_MASK)
777 nb = 16;
778 else
779 nb = 8;
780 for(i=0;i<nb;i++) {
781 cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
782 i,
783 env->xmm_regs[i].XMM_L(3),
784 env->xmm_regs[i].XMM_L(2),
785 env->xmm_regs[i].XMM_L(1),
786 env->xmm_regs[i].XMM_L(0));
787 if ((i & 1) == 1)
788 cpu_fprintf(f, "\n");
789 else
790 cpu_fprintf(f, " ");
791 }
792 }
793}
794
795/***********************************************************/
796/* x86 mmu */
797/* XXX: add PGE support */
798
799void cpu_x86_set_a20(CPUX86State *env, int a20_state)
800{
801 a20_state = (a20_state != 0);
802 if (a20_state != ((env->a20_mask >> 20) & 1)) {
803#if defined(DEBUG_MMU)
804 printf("A20 update: a20=%d\n", a20_state);
805#endif
806 /* if the cpu is currently executing code, we must unlink it and
807 all the potentially executing TB */
808 cpu_interrupt(env, CPU_INTERRUPT_EXITTB);
809
810 /* when a20 is changed, all the MMU mappings are invalid, so
811 we must flush everything */
812 tlb_flush(env, 1);
813 env->a20_mask = (~0x100000) | (a20_state << 20);
814 }
815}
816
817void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
818{
819 int pe_state;
820
821#if defined(DEBUG_MMU)
822 printf("CR0 update: CR0=0x%08x\n", new_cr0);
823#endif
824 if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
825 (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
826 tlb_flush(env, 1);
827 }
828
829#ifdef TARGET_X86_64
830 if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
831 (env->efer & MSR_EFER_LME)) {
832 /* enter in long mode */
833 /* XXX: generate an exception */
834 if (!(env->cr[4] & CR4_PAE_MASK))
835 return;
836 env->efer |= MSR_EFER_LMA;
837 env->hflags |= HF_LMA_MASK;
838 } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
839 (env->efer & MSR_EFER_LMA)) {
840 /* exit long mode */
841 env->efer &= ~MSR_EFER_LMA;
842 env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
843 env->eip &= 0xffffffff;
844 }
845#endif
846 env->cr[0] = new_cr0 | CR0_ET_MASK;
847
848 /* update PE flag in hidden flags */
849 pe_state = (env->cr[0] & CR0_PE_MASK);
850 env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
851 /* ensure that ADDSEG is always set in real mode */
852 env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
853 /* update FPU flags */
854 env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
855 ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
856
857#ifdef VBOX
858 remR3ChangeCpuMode(env);
859#endif
860}
861
862/* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
863 the PDPT */
864void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
865{
866 env->cr[3] = new_cr3;
867 if (env->cr[0] & CR0_PG_MASK) {
868#if defined(DEBUG_MMU)
869 printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
870#endif
871 tlb_flush(env, 0);
872 }
873}
874
875void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
876{
877#if defined(DEBUG_MMU)
878 printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
879#endif
880 if ((new_cr4 & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK)) !=
881 (env->cr[4] & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK))) {
882 tlb_flush(env, 1);
883 }
884 /* SSE handling */
885 if (!(env->cpuid_features & CPUID_SSE))
886 new_cr4 &= ~CR4_OSFXSR_MASK;
887 if (new_cr4 & CR4_OSFXSR_MASK)
888 env->hflags |= HF_OSFXSR_MASK;
889 else
890 env->hflags &= ~HF_OSFXSR_MASK;
891
892 env->cr[4] = new_cr4;
893#ifdef VBOX
894 remR3ChangeCpuMode(env);
895#endif
896}
897
898/* XXX: also flush 4MB pages */
899void cpu_x86_flush_tlb(CPUX86State *env, target_ulong addr)
900{
901 tlb_flush_page(env, addr);
902}
903
904#if defined(CONFIG_USER_ONLY)
905
906int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
907 int is_write, int mmu_idx, int is_softmmu)
908{
909 /* user mode only emulation */
910 is_write &= 1;
911 env->cr[2] = addr;
912 env->error_code = (is_write << PG_ERROR_W_BIT);
913 env->error_code |= PG_ERROR_U_MASK;
914 env->exception_index = EXCP0E_PAGE;
915 return 1;
916}
917
918target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
919{
920 return addr;
921}
922
923#else
924
925/* XXX: This value should match the one returned by CPUID
926 * and in exec.c */
927#if defined(USE_KQEMU)
928#define PHYS_ADDR_MASK 0xfffff000LL
929#else
930# if defined(TARGET_X86_64)
931# define PHYS_ADDR_MASK 0xfffffff000LL
932# else
933# define PHYS_ADDR_MASK 0xffffff000LL
934# endif
935#endif
936
937/* return value:
938 -1 = cannot handle fault
939 0 = nothing more to do
940 1 = generate PF fault
941 2 = soft MMU activation required for this block
942*/
943int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
944 int is_write1, int mmu_idx, int is_softmmu)
945{
946 uint64_t ptep, pte;
947 target_ulong pde_addr, pte_addr;
948 int error_code, is_dirty, prot, page_size, ret, is_write, is_user;
949 target_phys_addr_t paddr;
950 uint32_t page_offset;
951 target_ulong vaddr, virt_addr;
952
953 is_user = mmu_idx == MMU_USER_IDX;
954#if defined(DEBUG_MMU)
955 printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
956 addr, is_write1, is_user, env->eip);
957#endif
958 is_write = is_write1 & 1;
959
960 if (!(env->cr[0] & CR0_PG_MASK)) {
961 pte = addr;
962 virt_addr = addr & TARGET_PAGE_MASK;
963 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
964 page_size = 4096;
965 goto do_mapping;
966 }
967
968 if (env->cr[4] & CR4_PAE_MASK) {
969 uint64_t pde, pdpe;
970 target_ulong pdpe_addr;
971
972#ifdef TARGET_X86_64
973 if (env->hflags & HF_LMA_MASK) {
974 uint64_t pml4e_addr, pml4e;
975 int32_t sext;
976
977 /* test virtual address sign extension */
978 sext = (int64_t)addr >> 47;
979 if (sext != 0 && sext != -1) {
980 env->error_code = 0;
981 env->exception_index = EXCP0D_GPF;
982 return 1;
983 }
984
985 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
986 env->a20_mask;
987 pml4e = ldq_phys(pml4e_addr);
988 if (!(pml4e & PG_PRESENT_MASK)) {
989 error_code = 0;
990 goto do_fault;
991 }
992 if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) {
993 error_code = PG_ERROR_RSVD_MASK;
994 goto do_fault;
995 }
996 if (!(pml4e & PG_ACCESSED_MASK)) {
997 pml4e |= PG_ACCESSED_MASK;
998 stl_phys_notdirty(pml4e_addr, pml4e);
999 }
1000 ptep = pml4e ^ PG_NX_MASK;
1001 pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
1002 env->a20_mask;
1003 pdpe = ldq_phys(pdpe_addr);
1004 if (!(pdpe & PG_PRESENT_MASK)) {
1005 error_code = 0;
1006 goto do_fault;
1007 }
1008 if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) {
1009 error_code = PG_ERROR_RSVD_MASK;
1010 goto do_fault;
1011 }
1012 ptep &= pdpe ^ PG_NX_MASK;
1013 if (!(pdpe & PG_ACCESSED_MASK)) {
1014 pdpe |= PG_ACCESSED_MASK;
1015 stl_phys_notdirty(pdpe_addr, pdpe);
1016 }
1017 } else
1018#endif
1019 {
1020 /* XXX: load them when cr3 is loaded ? */
1021 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
1022 env->a20_mask;
1023 pdpe = ldq_phys(pdpe_addr);
1024 if (!(pdpe & PG_PRESENT_MASK)) {
1025 error_code = 0;
1026 goto do_fault;
1027 }
1028 ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
1029 }
1030
1031 pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
1032 env->a20_mask;
1033 pde = ldq_phys(pde_addr);
1034 if (!(pde & PG_PRESENT_MASK)) {
1035 error_code = 0;
1036 goto do_fault;
1037 }
1038 if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) {
1039 error_code = PG_ERROR_RSVD_MASK;
1040 goto do_fault;
1041 }
1042 ptep &= pde ^ PG_NX_MASK;
1043 if (pde & PG_PSE_MASK) {
1044 /* 2 MB page */
1045 page_size = 2048 * 1024;
1046 ptep ^= PG_NX_MASK;
1047 if ((ptep & PG_NX_MASK) && is_write1 == 2)
1048 goto do_fault_protect;
1049 if (is_user) {
1050 if (!(ptep & PG_USER_MASK))
1051 goto do_fault_protect;
1052 if (is_write && !(ptep & PG_RW_MASK))
1053 goto do_fault_protect;
1054 } else {
1055 if ((env->cr[0] & CR0_WP_MASK) &&
1056 is_write && !(ptep & PG_RW_MASK))
1057 goto do_fault_protect;
1058 }
1059 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
1060 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
1061 pde |= PG_ACCESSED_MASK;
1062 if (is_dirty)
1063 pde |= PG_DIRTY_MASK;
1064 stl_phys_notdirty(pde_addr, pde);
1065 }
1066 /* align to page_size */
1067 pte = pde & ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff);
1068 virt_addr = addr & ~(page_size - 1);
1069 } else {
1070 /* 4 KB page */
1071 if (!(pde & PG_ACCESSED_MASK)) {
1072 pde |= PG_ACCESSED_MASK;
1073 stl_phys_notdirty(pde_addr, pde);
1074 }
1075 pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
1076 env->a20_mask;
1077 pte = ldq_phys(pte_addr);
1078 if (!(pte & PG_PRESENT_MASK)) {
1079 error_code = 0;
1080 goto do_fault;
1081 }
1082 if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) {
1083 error_code = PG_ERROR_RSVD_MASK;
1084 goto do_fault;
1085 }
1086 /* combine pde and pte nx, user and rw protections */
1087 ptep &= pte ^ PG_NX_MASK;
1088 ptep ^= PG_NX_MASK;
1089 if ((ptep & PG_NX_MASK) && is_write1 == 2)
1090 goto do_fault_protect;
1091 if (is_user) {
1092 if (!(ptep & PG_USER_MASK))
1093 goto do_fault_protect;
1094 if (is_write && !(ptep & PG_RW_MASK))
1095 goto do_fault_protect;
1096 } else {
1097 if ((env->cr[0] & CR0_WP_MASK) &&
1098 is_write && !(ptep & PG_RW_MASK))
1099 goto do_fault_protect;
1100 }
1101 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
1102 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
1103 pte |= PG_ACCESSED_MASK;
1104 if (is_dirty)
1105 pte |= PG_DIRTY_MASK;
1106 stl_phys_notdirty(pte_addr, pte);
1107 }
1108 page_size = 4096;
1109 virt_addr = addr & ~0xfff;
1110 pte = pte & (PHYS_ADDR_MASK | 0xfff);
1111 }
1112 } else {
1113 uint32_t pde;
1114
1115 /* page directory entry */
1116 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
1117 env->a20_mask;
1118 pde = ldl_phys(pde_addr);
1119 if (!(pde & PG_PRESENT_MASK)) {
1120 error_code = 0;
1121 goto do_fault;
1122 }
1123 /* if PSE bit is set, then we use a 4MB page */
1124 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
1125 page_size = 4096 * 1024;
1126 if (is_user) {
1127 if (!(pde & PG_USER_MASK))
1128 goto do_fault_protect;
1129 if (is_write && !(pde & PG_RW_MASK))
1130 goto do_fault_protect;
1131 } else {
1132 if ((env->cr[0] & CR0_WP_MASK) &&
1133 is_write && !(pde & PG_RW_MASK))
1134 goto do_fault_protect;
1135 }
1136 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
1137 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
1138 pde |= PG_ACCESSED_MASK;
1139 if (is_dirty)
1140 pde |= PG_DIRTY_MASK;
1141 stl_phys_notdirty(pde_addr, pde);
1142 }
1143
1144 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
1145 ptep = pte;
1146 virt_addr = addr & ~(page_size - 1);
1147 } else {
1148 if (!(pde & PG_ACCESSED_MASK)) {
1149 pde |= PG_ACCESSED_MASK;
1150 stl_phys_notdirty(pde_addr, pde);
1151 }
1152
1153 /* page directory entry */
1154 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
1155 env->a20_mask;
1156 pte = ldl_phys(pte_addr);
1157 if (!(pte & PG_PRESENT_MASK)) {
1158 error_code = 0;
1159 goto do_fault;
1160 }
1161 /* combine pde and pte user and rw protections */
1162 ptep = pte & pde;
1163 if (is_user) {
1164 if (!(ptep & PG_USER_MASK))
1165 goto do_fault_protect;
1166 if (is_write && !(ptep & PG_RW_MASK))
1167 goto do_fault_protect;
1168 } else {
1169 if ((env->cr[0] & CR0_WP_MASK) &&
1170 is_write && !(ptep & PG_RW_MASK))
1171 goto do_fault_protect;
1172 }
1173 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
1174 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
1175 pte |= PG_ACCESSED_MASK;
1176 if (is_dirty)
1177 pte |= PG_DIRTY_MASK;
1178 stl_phys_notdirty(pte_addr, pte);
1179 }
1180 page_size = 4096;
1181 virt_addr = addr & ~0xfff;
1182 }
1183 }
1184 /* the page can be put in the TLB */
1185 prot = PAGE_READ;
1186 if (!(ptep & PG_NX_MASK))
1187 prot |= PAGE_EXEC;
1188 if (pte & PG_DIRTY_MASK) {
1189 /* only set write access if already dirty... otherwise wait
1190 for dirty access */
1191 if (is_user) {
1192 if (ptep & PG_RW_MASK)
1193 prot |= PAGE_WRITE;
1194 } else {
1195 if (!(env->cr[0] & CR0_WP_MASK) ||
1196 (ptep & PG_RW_MASK))
1197 prot |= PAGE_WRITE;
1198 }
1199 }
1200 do_mapping:
1201 pte = pte & env->a20_mask;
1202
1203 /* Even if 4MB pages, we map only one 4KB page in the cache to
1204 avoid filling it too fast */
1205 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
1206 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
1207 vaddr = virt_addr + page_offset;
1208
1209 ret = tlb_set_page_exec(env, vaddr, paddr, prot, mmu_idx, is_softmmu);
1210 return ret;
1211 do_fault_protect:
1212 error_code = PG_ERROR_P_MASK;
1213 do_fault:
1214 error_code |= (is_write << PG_ERROR_W_BIT);
1215 if (is_user)
1216 error_code |= PG_ERROR_U_MASK;
1217 if (is_write1 == 2 &&
1218 (env->efer & MSR_EFER_NXE) &&
1219 (env->cr[4] & CR4_PAE_MASK))
1220 error_code |= PG_ERROR_I_D_MASK;
1221 if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
1222 /* cr2 is not modified in case of exceptions */
1223 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
1224 addr);
1225 } else {
1226 env->cr[2] = addr;
1227 }
1228 env->error_code = error_code;
1229 env->exception_index = EXCP0E_PAGE;
1230 return 1;
1231}
1232
1233target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
1234{
1235 target_ulong pde_addr, pte_addr;
1236 uint64_t pte;
1237 target_phys_addr_t paddr;
1238 uint32_t page_offset;
1239 int page_size;
1240
1241 if (env->cr[4] & CR4_PAE_MASK) {
1242 target_ulong pdpe_addr;
1243 uint64_t pde, pdpe;
1244
1245#ifdef TARGET_X86_64
1246 if (env->hflags & HF_LMA_MASK) {
1247 uint64_t pml4e_addr, pml4e;
1248 int32_t sext;
1249
1250 /* test virtual address sign extension */
1251 sext = (int64_t)addr >> 47;
1252 if (sext != 0 && sext != -1)
1253 return -1;
1254
1255 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
1256 env->a20_mask;
1257 pml4e = ldq_phys(pml4e_addr);
1258 if (!(pml4e & PG_PRESENT_MASK))
1259 return -1;
1260
1261 pdpe_addr = ((pml4e & ~0xfff) + (((addr >> 30) & 0x1ff) << 3)) &
1262 env->a20_mask;
1263 pdpe = ldq_phys(pdpe_addr);
1264 if (!(pdpe & PG_PRESENT_MASK))
1265 return -1;
1266 } else
1267#endif
1268 {
1269 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
1270 env->a20_mask;
1271 pdpe = ldq_phys(pdpe_addr);
1272 if (!(pdpe & PG_PRESENT_MASK))
1273 return -1;
1274 }
1275
1276 pde_addr = ((pdpe & ~0xfff) + (((addr >> 21) & 0x1ff) << 3)) &
1277 env->a20_mask;
1278 pde = ldq_phys(pde_addr);
1279 if (!(pde & PG_PRESENT_MASK)) {
1280 return -1;
1281 }
1282 if (pde & PG_PSE_MASK) {
1283 /* 2 MB page */
1284 page_size = 2048 * 1024;
1285 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
1286 } else {
1287 /* 4 KB page */
1288 pte_addr = ((pde & ~0xfff) + (((addr >> 12) & 0x1ff) << 3)) &
1289 env->a20_mask;
1290 page_size = 4096;
1291 pte = ldq_phys(pte_addr);
1292 }
1293 if (!(pte & PG_PRESENT_MASK))
1294 return -1;
1295 } else {
1296 uint32_t pde;
1297
1298 if (!(env->cr[0] & CR0_PG_MASK)) {
1299 pte = addr;
1300 page_size = 4096;
1301 } else {
1302 /* page directory entry */
1303 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask;
1304 pde = ldl_phys(pde_addr);
1305 if (!(pde & PG_PRESENT_MASK))
1306 return -1;
1307 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
1308 pte = pde & ~0x003ff000; /* align to 4MB */
1309 page_size = 4096 * 1024;
1310 } else {
1311 /* page directory entry */
1312 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
1313 pte = ldl_phys(pte_addr);
1314 if (!(pte & PG_PRESENT_MASK))
1315 return -1;
1316 page_size = 4096;
1317 }
1318 }
1319 pte = pte & env->a20_mask;
1320 }
1321
1322 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
1323 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
1324 return paddr;
1325}
1326#endif /* !CONFIG_USER_ONLY */
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette