VirtualBox

source: vbox/trunk/src/recompiler/target-i386/helper.c@ 36171

最後變更 在這個檔案從36171是 36171,由 vboxsync 提交於 14 年 前

rem: Merged in changes from the branches/stable_0_10 (r7249).

  • 屬性 svn:eol-style 設為 native
檔案大小: 55.6 KB
 
1/*
2 * i386 helpers (without register variable usage)
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
19 */
20
21/*
22 * Oracle LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
23 * other than GPL or LGPL is available it will apply instead, Oracle elects to use only
24 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
25 * a choice of LGPL license versions is made available with the language indicating
26 * that LGPLv2 or any later version may be used, or where a choice of which version
27 * of the LGPL is applied is otherwise unspecified.
28 */
29
30#include <stdarg.h>
31#include <stdlib.h>
32#include <stdio.h>
33#include <string.h>
34#ifndef VBOX
35#include <inttypes.h>
36#include <signal.h>
37#include <assert.h>
38#endif /* !VBOX */
39
40#include "cpu.h"
41#include "exec-all.h"
42#include "qemu-common.h"
43#include "kvm.h"
44
45//#define DEBUG_MMU
46
47#ifndef VBOX
48static void add_flagname_to_bitmaps(char *flagname, uint32_t *features,
49 uint32_t *ext_features,
50 uint32_t *ext2_features,
51 uint32_t *ext3_features)
52{
53 int i;
54 /* feature flags taken from "Intel Processor Identification and the CPUID
55 * Instruction" and AMD's "CPUID Specification". In cases of disagreement
56 * about feature names, the Linux name is used. */
57 static const char *feature_name[] = {
58 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
59 "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
60 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */, NULL, "ds" /* Intel dts */, "acpi", "mmx",
61 "fxsr", "sse", "sse2", "ss", "ht" /* Intel htt */, "tm", "ia64", "pbe",
62 };
63 static const char *ext_feature_name[] = {
64 "pni" /* Intel,AMD sse3 */, NULL, NULL, "monitor", "ds_cpl", "vmx", NULL /* Linux smx */, "est",
65 "tm2", "ssse3", "cid", NULL, NULL, "cx16", "xtpr", NULL,
66 NULL, NULL, "dca", NULL, NULL, NULL, NULL, "popcnt",
67 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
68 };
69 static const char *ext2_feature_name[] = {
70 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
71 "cx8" /* AMD CMPXCHG8B */, "apic", NULL, "syscall", "mtrr", "pge", "mca", "cmov",
72 "pat", "pse36", NULL, NULL /* Linux mp */, "nx" /* Intel xd */, NULL, "mmxext", "mmx",
73 "fxsr", "fxsr_opt" /* AMD ffxsr */, "pdpe1gb" /* AMD Page1GB */, "rdtscp", NULL, "lm" /* Intel 64 */, "3dnowext", "3dnow",
74 };
75 static const char *ext3_feature_name[] = {
76 "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */, "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
77 "3dnowprefetch", "osvw", NULL /* Linux ibs */, NULL, "skinit", "wdt", NULL, NULL,
78 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
79 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
80 };
81
82 for ( i = 0 ; i < 32 ; i++ )
83 if (feature_name[i] && !strcmp (flagname, feature_name[i])) {
84 *features |= 1 << i;
85 return;
86 }
87 for ( i = 0 ; i < 32 ; i++ )
88 if (ext_feature_name[i] && !strcmp (flagname, ext_feature_name[i])) {
89 *ext_features |= 1 << i;
90 return;
91 }
92 for ( i = 0 ; i < 32 ; i++ )
93 if (ext2_feature_name[i] && !strcmp (flagname, ext2_feature_name[i])) {
94 *ext2_features |= 1 << i;
95 return;
96 }
97 for ( i = 0 ; i < 32 ; i++ )
98 if (ext3_feature_name[i] && !strcmp (flagname, ext3_feature_name[i])) {
99 *ext3_features |= 1 << i;
100 return;
101 }
102 fprintf(stderr, "CPU feature %s not found\n", flagname);
103}
104#endif /* !VBOX */
105
106typedef struct x86_def_t {
107 const char *name;
108 uint32_t level;
109 uint32_t vendor1, vendor2, vendor3;
110 int family;
111 int model;
112 int stepping;
113 uint32_t features, ext_features, ext2_features, ext3_features;
114 uint32_t xlevel;
115 char model_id[48];
116} x86_def_t;
117
118#ifndef VBOX
119#define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
120#define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
121 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX)
122#define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
123 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
124 CPUID_PSE36 | CPUID_FXSR)
125#define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
126#define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
127 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
128 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
129 CPUID_PAE | CPUID_SEP | CPUID_APIC)
130static x86_def_t x86_defs[] = {
131#ifdef TARGET_X86_64
132 {
133 .name = "qemu64",
134 .level = 2,
135 .vendor1 = CPUID_VENDOR_AMD_1,
136 .vendor2 = CPUID_VENDOR_AMD_2,
137 .vendor3 = CPUID_VENDOR_AMD_3,
138 .family = 6,
139 .model = 2,
140 .stepping = 3,
141 .features = PPRO_FEATURES |
142 /* these features are needed for Win64 and aren't fully implemented */
143 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
144 /* this feature is needed for Solaris and isn't fully implemented */
145 CPUID_PSE36,
146 .ext_features = CPUID_EXT_SSE3,
147 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) |
148 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
149 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
150 .ext3_features = CPUID_EXT3_SVM,
151 .xlevel = 0x8000000A,
152 .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
153 },
154 {
155 .name = "phenom",
156 .level = 5,
157 .vendor1 = CPUID_VENDOR_AMD_1,
158 .vendor2 = CPUID_VENDOR_AMD_2,
159 .vendor3 = CPUID_VENDOR_AMD_3,
160 .family = 16,
161 .model = 2,
162 .stepping = 3,
163 /* Missing: CPUID_VME, CPUID_HT */
164 .features = PPRO_FEATURES |
165 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
166 CPUID_PSE36,
167 /* Missing: CPUID_EXT_CX16, CPUID_EXT_POPCNT */
168 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
169 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
170 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) |
171 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
172 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
173 CPUID_EXT2_FFXSR,
174 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
175 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
176 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
177 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
178 .ext3_features = CPUID_EXT3_SVM,
179 .xlevel = 0x8000001A,
180 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
181 },
182 {
183 .name = "core2duo",
184 .level = 10,
185 .family = 6,
186 .model = 15,
187 .stepping = 11,
188 /* The original CPU also implements these features:
189 CPUID_VME, CPUID_DTS, CPUID_ACPI, CPUID_SS, CPUID_HT,
190 CPUID_TM, CPUID_PBE */
191 .features = PPRO_FEATURES |
192 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
193 CPUID_PSE36,
194 /* The original CPU also implements these ext features:
195 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_EST,
196 CPUID_EXT_TM2, CPUID_EXT_CX16, CPUID_EXT_XTPR, CPUID_EXT_PDCM */
197 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3,
198 .ext2_features = CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
199 /* Missing: .ext3_features = CPUID_EXT3_LAHF_LM */
200 .xlevel = 0x80000008,
201 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
202 },
203#endif
204 {
205 .name = "qemu32",
206 .level = 2,
207 .family = 6,
208 .model = 3,
209 .stepping = 3,
210 .features = PPRO_FEATURES,
211 .ext_features = CPUID_EXT_SSE3,
212 .xlevel = 0,
213 .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
214 },
215 {
216 .name = "coreduo",
217 .level = 10,
218 .family = 6,
219 .model = 14,
220 .stepping = 8,
221 /* The original CPU also implements these features:
222 CPUID_DTS, CPUID_ACPI, CPUID_SS, CPUID_HT,
223 CPUID_TM, CPUID_PBE */
224 .features = PPRO_FEATURES | CPUID_VME |
225 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA,
226 /* The original CPU also implements these ext features:
227 CPUID_EXT_VMX, CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_XTPR,
228 CPUID_EXT_PDCM */
229 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
230 .ext2_features = CPUID_EXT2_NX,
231 .xlevel = 0x80000008,
232 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
233 },
234 {
235 .name = "486",
236 .level = 0,
237 .family = 4,
238 .model = 0,
239 .stepping = 0,
240 .features = I486_FEATURES,
241 .xlevel = 0,
242 },
243 {
244 .name = "pentium",
245 .level = 1,
246 .family = 5,
247 .model = 4,
248 .stepping = 3,
249 .features = PENTIUM_FEATURES,
250 .xlevel = 0,
251 },
252 {
253 .name = "pentium2",
254 .level = 2,
255 .family = 6,
256 .model = 5,
257 .stepping = 2,
258 .features = PENTIUM2_FEATURES,
259 .xlevel = 0,
260 },
261 {
262 .name = "pentium3",
263 .level = 2,
264 .family = 6,
265 .model = 7,
266 .stepping = 3,
267 .features = PENTIUM3_FEATURES,
268 .xlevel = 0,
269 },
270 {
271 .name = "athlon",
272 .level = 2,
273 .vendor1 = 0x68747541, /* "Auth" */
274 .vendor2 = 0x69746e65, /* "enti" */
275 .vendor3 = 0x444d4163, /* "cAMD" */
276 .family = 6,
277 .model = 2,
278 .stepping = 3,
279 .features = PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR | CPUID_MCA,
280 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
281 .xlevel = 0x80000008,
282 /* XXX: put another string ? */
283 .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
284 },
285 {
286 .name = "n270",
287 /* original is on level 10 */
288 .level = 5,
289 .family = 6,
290 .model = 28,
291 .stepping = 2,
292 .features = PPRO_FEATURES |
293 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME,
294 /* Missing: CPUID_DTS | CPUID_ACPI | CPUID_SS |
295 * CPUID_HT | CPUID_TM | CPUID_PBE */
296 /* Some CPUs got no CPUID_SEP */
297 .ext_features = CPUID_EXT_MONITOR |
298 CPUID_EXT_SSE3 /* PNI */ | CPUID_EXT_SSSE3,
299 /* Missing: CPUID_EXT_DSCPL | CPUID_EXT_EST |
300 * CPUID_EXT_TM2 | CPUID_EXT_XTPR */
301 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | CPUID_EXT2_NX,
302 /* Missing: .ext3_features = CPUID_EXT3_LAHF_LM */
303 .xlevel = 0x8000000A,
304 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
305 },
306};
307
308static int cpu_x86_find_by_name(x86_def_t *x86_cpu_def, const char *cpu_model)
309{
310 unsigned int i;
311 x86_def_t *def;
312
313 char *s = strdup(cpu_model);
314 char *featurestr, *name = strtok(s, ",");
315 uint32_t plus_features = 0, plus_ext_features = 0, plus_ext2_features = 0, plus_ext3_features = 0;
316 uint32_t minus_features = 0, minus_ext_features = 0, minus_ext2_features = 0, minus_ext3_features = 0;
317 int family = -1, model = -1, stepping = -1;
318
319 def = NULL;
320 for (i = 0; i < ARRAY_SIZE(x86_defs); i++) {
321 if (strcmp(name, x86_defs[i].name) == 0) {
322 def = &x86_defs[i];
323 break;
324 }
325 }
326 if (!def)
327 goto error;
328 memcpy(x86_cpu_def, def, sizeof(*def));
329
330 featurestr = strtok(NULL, ",");
331
332 while (featurestr) {
333 char *val;
334 if (featurestr[0] == '+') {
335 add_flagname_to_bitmaps(featurestr + 1, &plus_features, &plus_ext_features, &plus_ext2_features, &plus_ext3_features);
336 } else if (featurestr[0] == '-') {
337 add_flagname_to_bitmaps(featurestr + 1, &minus_features, &minus_ext_features, &minus_ext2_features, &minus_ext3_features);
338 } else if ((val = strchr(featurestr, '='))) {
339 *val = 0; val++;
340 if (!strcmp(featurestr, "family")) {
341 char *err;
342 family = strtol(val, &err, 10);
343 if (!*val || *err || family < 0) {
344 fprintf(stderr, "bad numerical value %s\n", val);
345 goto error;
346 }
347 x86_cpu_def->family = family;
348 } else if (!strcmp(featurestr, "model")) {
349 char *err;
350 model = strtol(val, &err, 10);
351 if (!*val || *err || model < 0 || model > 0xff) {
352 fprintf(stderr, "bad numerical value %s\n", val);
353 goto error;
354 }
355 x86_cpu_def->model = model;
356 } else if (!strcmp(featurestr, "stepping")) {
357 char *err;
358 stepping = strtol(val, &err, 10);
359 if (!*val || *err || stepping < 0 || stepping > 0xf) {
360 fprintf(stderr, "bad numerical value %s\n", val);
361 goto error;
362 }
363 x86_cpu_def->stepping = stepping;
364 } else if (!strcmp(featurestr, "vendor")) {
365 if (strlen(val) != 12) {
366 fprintf(stderr, "vendor string must be 12 chars long\n");
367 goto error;
368 }
369 x86_cpu_def->vendor1 = 0;
370 x86_cpu_def->vendor2 = 0;
371 x86_cpu_def->vendor3 = 0;
372 for(i = 0; i < 4; i++) {
373 x86_cpu_def->vendor1 |= ((uint8_t)val[i ]) << (8 * i);
374 x86_cpu_def->vendor2 |= ((uint8_t)val[i + 4]) << (8 * i);
375 x86_cpu_def->vendor3 |= ((uint8_t)val[i + 8]) << (8 * i);
376 }
377 } else if (!strcmp(featurestr, "model_id")) {
378 pstrcpy(x86_cpu_def->model_id, sizeof(x86_cpu_def->model_id),
379 val);
380 } else {
381 fprintf(stderr, "unrecognized feature %s\n", featurestr);
382 goto error;
383 }
384 } else {
385 fprintf(stderr, "feature string `%s' not in format (+feature|-feature|feature=xyz)\n", featurestr);
386 goto error;
387 }
388 featurestr = strtok(NULL, ",");
389 }
390 x86_cpu_def->features |= plus_features;
391 x86_cpu_def->ext_features |= plus_ext_features;
392 x86_cpu_def->ext2_features |= plus_ext2_features;
393 x86_cpu_def->ext3_features |= plus_ext3_features;
394 x86_cpu_def->features &= ~minus_features;
395 x86_cpu_def->ext_features &= ~minus_ext_features;
396 x86_cpu_def->ext2_features &= ~minus_ext2_features;
397 x86_cpu_def->ext3_features &= ~minus_ext3_features;
398 free(s);
399 return 0;
400
401error:
402 free(s);
403 return -1;
404}
405
406void x86_cpu_list (FILE *f, int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
407{
408 unsigned int i;
409
410 for (i = 0; i < ARRAY_SIZE(x86_defs); i++)
411 (*cpu_fprintf)(f, "x86 %16s\n", x86_defs[i].name);
412}
413#endif /* !VBOX */
414
415static int cpu_x86_register (CPUX86State *env, const char *cpu_model)
416{
417#ifndef VBOX
418 x86_def_t def1, *def = &def1;
419
420 if (cpu_x86_find_by_name(def, cpu_model) < 0)
421 return -1;
422 if (def->vendor1) {
423 env->cpuid_vendor1 = def->vendor1;
424 env->cpuid_vendor2 = def->vendor2;
425 env->cpuid_vendor3 = def->vendor3;
426 } else {
427 env->cpuid_vendor1 = CPUID_VENDOR_INTEL_1;
428 env->cpuid_vendor2 = CPUID_VENDOR_INTEL_2;
429 env->cpuid_vendor3 = CPUID_VENDOR_INTEL_3;
430 }
431 env->cpuid_level = def->level;
432 if (def->family > 0x0f)
433 env->cpuid_version = 0xf00 | ((def->family - 0x0f) << 20);
434 else
435 env->cpuid_version = def->family << 8;
436 env->cpuid_version |= ((def->model & 0xf) << 4) | ((def->model >> 4) << 16);
437 env->cpuid_version |= def->stepping;
438 env->cpuid_features = def->features;
439 env->pat = 0x0007040600070406ULL;
440 env->cpuid_ext_features = def->ext_features;
441 env->cpuid_ext2_features = def->ext2_features;
442 env->cpuid_xlevel = def->xlevel;
443 env->cpuid_ext3_features = def->ext3_features;
444 {
445 const char *model_id = def->model_id;
446 int c, len, i;
447 if (!model_id)
448 model_id = "";
449 len = strlen(model_id);
450 for(i = 0; i < 48; i++) {
451 if (i >= len)
452 c = '\0';
453 else
454 c = (uint8_t)model_id[i];
455 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
456 }
457 }
458#endif /* !VBOX */
459 return 0;
460}
461
462/* NOTE: must be called outside the CPU execute loop */
463void cpu_reset(CPUX86State *env)
464{
465 int i;
466
467 if (qemu_loglevel_mask(CPU_LOG_RESET)) {
468 qemu_log("CPU Reset (CPU %d)\n", env->cpu_index);
469 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
470 }
471
472 memset(env, 0, offsetof(CPUX86State, breakpoints));
473
474 tlb_flush(env, 1);
475
476 env->old_exception = -1;
477
478 /* init to reset state */
479
480#ifdef CONFIG_SOFTMMU
481 env->hflags |= HF_SOFTMMU_MASK;
482#endif
483 env->hflags2 |= HF2_GIF_MASK;
484
485 cpu_x86_update_cr0(env, 0x60000010);
486 env->a20_mask = ~0x0;
487 env->smbase = 0x30000;
488
489 env->idt.limit = 0xffff;
490 env->gdt.limit = 0xffff;
491 env->ldt.limit = 0xffff;
492 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
493 env->tr.limit = 0xffff;
494 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
495
496 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
497 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK | DESC_R_MASK);
498 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
499 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
500 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
501 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
502 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
503 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
504 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
505 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
506 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
507 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
508
509 env->eip = 0xfff0;
510#ifndef VBOX
511 env->regs[R_EDX] = env->cpuid_version;
512#else
513 /** @todo: is it right? */
514 env->regs[R_EDX] = 0x600; /* indicate P6 processor */
515#endif
516
517 env->eflags = 0x2;
518
519 /* FPU init */
520 for(i = 0;i < 8; i++)
521 env->fptags[i] = 1;
522 env->fpuc = 0x37f;
523
524 env->mxcsr = 0x1f80;
525
526 memset(env->dr, 0, sizeof(env->dr));
527 env->dr[6] = DR6_FIXED_1;
528 env->dr[7] = DR7_FIXED_1;
529 cpu_breakpoint_remove_all(env, BP_CPU);
530 cpu_watchpoint_remove_all(env, BP_CPU);
531}
532
533void cpu_x86_close(CPUX86State *env)
534{
535#ifndef VBOX
536 qemu_free(env);
537#endif
538}
539
540/***********************************************************/
541/* x86 debug */
542
543static const char *cc_op_str[] = {
544 "DYNAMIC",
545 "EFLAGS",
546
547 "MULB",
548 "MULW",
549 "MULL",
550 "MULQ",
551
552 "ADDB",
553 "ADDW",
554 "ADDL",
555 "ADDQ",
556
557 "ADCB",
558 "ADCW",
559 "ADCL",
560 "ADCQ",
561
562 "SUBB",
563 "SUBW",
564 "SUBL",
565 "SUBQ",
566
567 "SBBB",
568 "SBBW",
569 "SBBL",
570 "SBBQ",
571
572 "LOGICB",
573 "LOGICW",
574 "LOGICL",
575 "LOGICQ",
576
577 "INCB",
578 "INCW",
579 "INCL",
580 "INCQ",
581
582 "DECB",
583 "DECW",
584 "DECL",
585 "DECQ",
586
587 "SHLB",
588 "SHLW",
589 "SHLL",
590 "SHLQ",
591
592 "SARB",
593 "SARW",
594 "SARL",
595 "SARQ",
596};
597
598void cpu_dump_state(CPUState *env, FILE *f,
599 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
600 int flags)
601{
602 int eflags, i, nb;
603 char cc_op_name[32];
604 static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
605
606 if (kvm_enabled())
607 kvm_arch_get_registers(env);
608
609 eflags = env->eflags;
610#ifdef TARGET_X86_64
611 if (env->hflags & HF_CS64_MASK) {
612 cpu_fprintf(f,
613 "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
614 "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
615 "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
616 "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
617 "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
618 env->regs[R_EAX],
619 env->regs[R_EBX],
620 env->regs[R_ECX],
621 env->regs[R_EDX],
622 env->regs[R_ESI],
623 env->regs[R_EDI],
624 env->regs[R_EBP],
625 env->regs[R_ESP],
626 env->regs[8],
627 env->regs[9],
628 env->regs[10],
629 env->regs[11],
630 env->regs[12],
631 env->regs[13],
632 env->regs[14],
633 env->regs[15],
634 env->eip, eflags,
635 eflags & DF_MASK ? 'D' : '-',
636 eflags & CC_O ? 'O' : '-',
637 eflags & CC_S ? 'S' : '-',
638 eflags & CC_Z ? 'Z' : '-',
639 eflags & CC_A ? 'A' : '-',
640 eflags & CC_P ? 'P' : '-',
641 eflags & CC_C ? 'C' : '-',
642 env->hflags & HF_CPL_MASK,
643 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
644 (int)(env->a20_mask >> 20) & 1,
645 (env->hflags >> HF_SMM_SHIFT) & 1,
646 env->halted);
647 } else
648#endif
649 {
650 cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
651 "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
652 "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
653 (uint32_t)env->regs[R_EAX],
654 (uint32_t)env->regs[R_EBX],
655 (uint32_t)env->regs[R_ECX],
656 (uint32_t)env->regs[R_EDX],
657 (uint32_t)env->regs[R_ESI],
658 (uint32_t)env->regs[R_EDI],
659 (uint32_t)env->regs[R_EBP],
660 (uint32_t)env->regs[R_ESP],
661 (uint32_t)env->eip, eflags,
662 eflags & DF_MASK ? 'D' : '-',
663 eflags & CC_O ? 'O' : '-',
664 eflags & CC_S ? 'S' : '-',
665 eflags & CC_Z ? 'Z' : '-',
666 eflags & CC_A ? 'A' : '-',
667 eflags & CC_P ? 'P' : '-',
668 eflags & CC_C ? 'C' : '-',
669 env->hflags & HF_CPL_MASK,
670 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
671 (int)(env->a20_mask >> 20) & 1,
672 (env->hflags >> HF_SMM_SHIFT) & 1,
673 env->halted);
674 }
675
676#ifdef TARGET_X86_64
677 if (env->hflags & HF_LMA_MASK) {
678 for(i = 0; i < 6; i++) {
679 SegmentCache *sc = &env->segs[i];
680 cpu_fprintf(f, "%s =%04x %016" PRIx64 " %08x %08x\n",
681 seg_name[i],
682 sc->selector,
683 sc->base,
684 sc->limit,
685 sc->flags);
686 }
687 cpu_fprintf(f, "LDT=%04x %016" PRIx64 " %08x %08x\n",
688 env->ldt.selector,
689 env->ldt.base,
690 env->ldt.limit,
691 env->ldt.flags);
692 cpu_fprintf(f, "TR =%04x %016" PRIx64 " %08x %08x\n",
693 env->tr.selector,
694 env->tr.base,
695 env->tr.limit,
696 env->tr.flags);
697 cpu_fprintf(f, "GDT= %016" PRIx64 " %08x\n",
698 env->gdt.base, env->gdt.limit);
699 cpu_fprintf(f, "IDT= %016" PRIx64 " %08x\n",
700 env->idt.base, env->idt.limit);
701 cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
702 (uint32_t)env->cr[0],
703 env->cr[2],
704 env->cr[3],
705 (uint32_t)env->cr[4]);
706 for(i = 0; i < 4; i++)
707 cpu_fprintf(f, "DR%d=%016" PRIx64 " ", i, env->dr[i]);
708 cpu_fprintf(f, "\nDR6=%016" PRIx64 " DR7=%016" PRIx64 "\n",
709 env->dr[6], env->dr[7]);
710 } else
711#endif
712 {
713 for(i = 0; i < 6; i++) {
714 SegmentCache *sc = &env->segs[i];
715 cpu_fprintf(f, "%s =%04x %08x %08x %08x\n",
716 seg_name[i],
717 sc->selector,
718 (uint32_t)sc->base,
719 sc->limit,
720 sc->flags);
721 }
722 cpu_fprintf(f, "LDT=%04x %08x %08x %08x\n",
723 env->ldt.selector,
724 (uint32_t)env->ldt.base,
725 env->ldt.limit,
726 env->ldt.flags);
727 cpu_fprintf(f, "TR =%04x %08x %08x %08x\n",
728 env->tr.selector,
729 (uint32_t)env->tr.base,
730 env->tr.limit,
731 env->tr.flags);
732 cpu_fprintf(f, "GDT= %08x %08x\n",
733 (uint32_t)env->gdt.base, env->gdt.limit);
734 cpu_fprintf(f, "IDT= %08x %08x\n",
735 (uint32_t)env->idt.base, env->idt.limit);
736 cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
737 (uint32_t)env->cr[0],
738 (uint32_t)env->cr[2],
739 (uint32_t)env->cr[3],
740 (uint32_t)env->cr[4]);
741 for(i = 0; i < 4; i++)
742 cpu_fprintf(f, "DR%d=%08x ", i, env->dr[i]);
743 cpu_fprintf(f, "\nDR6=%08x DR7=%08x\n", env->dr[6], env->dr[7]);
744 }
745 if (flags & X86_DUMP_CCOP) {
746 if ((unsigned)env->cc_op < CC_OP_NB)
747 snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
748 else
749 snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
750#ifdef TARGET_X86_64
751 if (env->hflags & HF_CS64_MASK) {
752 cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
753 env->cc_src, env->cc_dst,
754 cc_op_name);
755 } else
756#endif
757 {
758 cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
759 (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
760 cc_op_name);
761 }
762 }
763 if (flags & X86_DUMP_FPU) {
764 int fptag;
765 fptag = 0;
766 for(i = 0; i < 8; i++) {
767 fptag |= ((!env->fptags[i]) << i);
768 }
769 cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
770 env->fpuc,
771 (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
772 env->fpstt,
773 fptag,
774 env->mxcsr);
775 for(i=0;i<8;i++) {
776#if defined(USE_X86LDOUBLE)
777 union {
778 long double d;
779 struct {
780 uint64_t lower;
781 uint16_t upper;
782 } l;
783 } tmp;
784 tmp.d = env->fpregs[i].d;
785 cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
786 i, tmp.l.lower, tmp.l.upper);
787#else
788 cpu_fprintf(f, "FPR%d=%016" PRIx64,
789 i, env->fpregs[i].mmx.q);
790#endif
791 if ((i & 1) == 1)
792 cpu_fprintf(f, "\n");
793 else
794 cpu_fprintf(f, " ");
795 }
796 if (env->hflags & HF_CS64_MASK)
797 nb = 16;
798 else
799 nb = 8;
800 for(i=0;i<nb;i++) {
801 cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
802 i,
803 env->xmm_regs[i].XMM_L(3),
804 env->xmm_regs[i].XMM_L(2),
805 env->xmm_regs[i].XMM_L(1),
806 env->xmm_regs[i].XMM_L(0));
807 if ((i & 1) == 1)
808 cpu_fprintf(f, "\n");
809 else
810 cpu_fprintf(f, " ");
811 }
812 }
813}
814
815/***********************************************************/
816/* x86 mmu */
817/* XXX: add PGE support */
818
819void cpu_x86_set_a20(CPUX86State *env, int a20_state)
820{
821 a20_state = (a20_state != 0);
822 if (a20_state != ((env->a20_mask >> 20) & 1)) {
823#if defined(DEBUG_MMU)
824 printf("A20 update: a20=%d\n", a20_state);
825#endif
826 /* if the cpu is currently executing code, we must unlink it and
827 all the potentially executing TB */
828 cpu_interrupt(env, CPU_INTERRUPT_EXITTB);
829
830 /* when a20 is changed, all the MMU mappings are invalid, so
831 we must flush everything */
832 tlb_flush(env, 1);
833 env->a20_mask = (~0x100000) | (a20_state << 20);
834 }
835}
836
837void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
838{
839 int pe_state;
840
841#if defined(DEBUG_MMU)
842 printf("CR0 update: CR0=0x%08x\n", new_cr0);
843#endif
844 if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
845 (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
846 tlb_flush(env, 1);
847 }
848
849#ifdef TARGET_X86_64
850 if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
851 (env->efer & MSR_EFER_LME)) {
852 /* enter in long mode */
853 /* XXX: generate an exception */
854 if (!(env->cr[4] & CR4_PAE_MASK))
855 return;
856 env->efer |= MSR_EFER_LMA;
857 env->hflags |= HF_LMA_MASK;
858 } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
859 (env->efer & MSR_EFER_LMA)) {
860 /* exit long mode */
861 env->efer &= ~MSR_EFER_LMA;
862 env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
863 env->eip &= 0xffffffff;
864 }
865#endif
866 env->cr[0] = new_cr0 | CR0_ET_MASK;
867
868 /* update PE flag in hidden flags */
869 pe_state = (env->cr[0] & CR0_PE_MASK);
870 env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
871 /* ensure that ADDSEG is always set in real mode */
872 env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
873 /* update FPU flags */
874 env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
875 ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
876#ifdef VBOX
877
878 remR3ChangeCpuMode(env);
879#endif
880}
881
882/* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
883 the PDPT */
884void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
885{
886 env->cr[3] = new_cr3;
887 if (env->cr[0] & CR0_PG_MASK) {
888#if defined(DEBUG_MMU)
889 printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
890#endif
891 tlb_flush(env, 0);
892 }
893}
894
895void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
896{
897#if defined(DEBUG_MMU)
898 printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
899#endif
900 if ((new_cr4 & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK)) !=
901 (env->cr[4] & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK))) {
902 tlb_flush(env, 1);
903 }
904 /* SSE handling */
905 if (!(env->cpuid_features & CPUID_SSE))
906 new_cr4 &= ~CR4_OSFXSR_MASK;
907 if (new_cr4 & CR4_OSFXSR_MASK)
908 env->hflags |= HF_OSFXSR_MASK;
909 else
910 env->hflags &= ~HF_OSFXSR_MASK;
911
912 env->cr[4] = new_cr4;
913#ifdef VBOX
914 remR3ChangeCpuMode(env);
915#endif
916}
917
918#if defined(CONFIG_USER_ONLY)
919
920int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
921 int is_write, int mmu_idx, int is_softmmu)
922{
923 /* user mode only emulation */
924 is_write &= 1;
925 env->cr[2] = addr;
926 env->error_code = (is_write << PG_ERROR_W_BIT);
927 env->error_code |= PG_ERROR_U_MASK;
928 env->exception_index = EXCP0E_PAGE;
929 return 1;
930}
931
932target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
933{
934 return addr;
935}
936
937#else
938
939/* XXX: This value should match the one returned by CPUID
940 * and in exec.c */
941#if defined(USE_KQEMU)
942#define PHYS_ADDR_MASK 0xfffff000LL
943#else
944# if defined(TARGET_X86_64)
945# define PHYS_ADDR_MASK 0xfffffff000LL
946# else
947# define PHYS_ADDR_MASK 0xffffff000LL
948# endif
949#endif
950
951/* return value:
952 -1 = cannot handle fault
953 0 = nothing more to do
954 1 = generate PF fault
955 2 = soft MMU activation required for this block
956*/
957int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
958 int is_write1, int mmu_idx, int is_softmmu)
959{
960 uint64_t ptep, pte;
961 target_ulong pde_addr, pte_addr;
962 int error_code, is_dirty, prot, page_size, ret, is_write, is_user;
963 target_phys_addr_t paddr;
964 uint32_t page_offset;
965 target_ulong vaddr, virt_addr;
966
967 is_user = mmu_idx == MMU_USER_IDX;
968#if defined(DEBUG_MMU)
969 printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
970 addr, is_write1, is_user, env->eip);
971#endif
972 is_write = is_write1 & 1;
973
974 if (!(env->cr[0] & CR0_PG_MASK)) {
975 pte = addr;
976 virt_addr = addr & TARGET_PAGE_MASK;
977 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
978 page_size = 4096;
979 goto do_mapping;
980 }
981
982 if (env->cr[4] & CR4_PAE_MASK) {
983 uint64_t pde, pdpe;
984 target_ulong pdpe_addr;
985
986#ifdef TARGET_X86_64
987 if (env->hflags & HF_LMA_MASK) {
988 uint64_t pml4e_addr, pml4e;
989 int32_t sext;
990
991 /* test virtual address sign extension */
992 sext = (int64_t)addr >> 47;
993 if (sext != 0 && sext != -1) {
994 env->error_code = 0;
995 env->exception_index = EXCP0D_GPF;
996 return 1;
997 }
998
999 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
1000 env->a20_mask;
1001 pml4e = ldq_phys(pml4e_addr);
1002 if (!(pml4e & PG_PRESENT_MASK)) {
1003 error_code = 0;
1004 goto do_fault;
1005 }
1006 if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) {
1007 error_code = PG_ERROR_RSVD_MASK;
1008 goto do_fault;
1009 }
1010 if (!(pml4e & PG_ACCESSED_MASK)) {
1011 pml4e |= PG_ACCESSED_MASK;
1012 stl_phys_notdirty(pml4e_addr, pml4e);
1013 }
1014 ptep = pml4e ^ PG_NX_MASK;
1015 pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
1016 env->a20_mask;
1017 pdpe = ldq_phys(pdpe_addr);
1018 if (!(pdpe & PG_PRESENT_MASK)) {
1019 error_code = 0;
1020 goto do_fault;
1021 }
1022 if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) {
1023 error_code = PG_ERROR_RSVD_MASK;
1024 goto do_fault;
1025 }
1026 ptep &= pdpe ^ PG_NX_MASK;
1027 if (!(pdpe & PG_ACCESSED_MASK)) {
1028 pdpe |= PG_ACCESSED_MASK;
1029 stl_phys_notdirty(pdpe_addr, pdpe);
1030 }
1031 } else
1032#endif
1033 {
1034 /* XXX: load them when cr3 is loaded ? */
1035 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
1036 env->a20_mask;
1037 pdpe = ldq_phys(pdpe_addr);
1038 if (!(pdpe & PG_PRESENT_MASK)) {
1039 error_code = 0;
1040 goto do_fault;
1041 }
1042 ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
1043 }
1044
1045 pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
1046 env->a20_mask;
1047 pde = ldq_phys(pde_addr);
1048 if (!(pde & PG_PRESENT_MASK)) {
1049 error_code = 0;
1050 goto do_fault;
1051 }
1052 if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) {
1053 error_code = PG_ERROR_RSVD_MASK;
1054 goto do_fault;
1055 }
1056 ptep &= pde ^ PG_NX_MASK;
1057 if (pde & PG_PSE_MASK) {
1058 /* 2 MB page */
1059 page_size = 2048 * 1024;
1060 ptep ^= PG_NX_MASK;
1061 if ((ptep & PG_NX_MASK) && is_write1 == 2)
1062 goto do_fault_protect;
1063 if (is_user) {
1064 if (!(ptep & PG_USER_MASK))
1065 goto do_fault_protect;
1066 if (is_write && !(ptep & PG_RW_MASK))
1067 goto do_fault_protect;
1068 } else {
1069 if ((env->cr[0] & CR0_WP_MASK) &&
1070 is_write && !(ptep & PG_RW_MASK))
1071 goto do_fault_protect;
1072 }
1073 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
1074 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
1075 pde |= PG_ACCESSED_MASK;
1076 if (is_dirty)
1077 pde |= PG_DIRTY_MASK;
1078 stl_phys_notdirty(pde_addr, pde);
1079 }
1080 /* align to page_size */
1081 pte = pde & ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff);
1082 virt_addr = addr & ~(page_size - 1);
1083 } else {
1084 /* 4 KB page */
1085 if (!(pde & PG_ACCESSED_MASK)) {
1086 pde |= PG_ACCESSED_MASK;
1087 stl_phys_notdirty(pde_addr, pde);
1088 }
1089 pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
1090 env->a20_mask;
1091 pte = ldq_phys(pte_addr);
1092 if (!(pte & PG_PRESENT_MASK)) {
1093 error_code = 0;
1094 goto do_fault;
1095 }
1096 if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) {
1097 error_code = PG_ERROR_RSVD_MASK;
1098 goto do_fault;
1099 }
1100 /* combine pde and pte nx, user and rw protections */
1101 ptep &= pte ^ PG_NX_MASK;
1102 ptep ^= PG_NX_MASK;
1103 if ((ptep & PG_NX_MASK) && is_write1 == 2)
1104 goto do_fault_protect;
1105 if (is_user) {
1106 if (!(ptep & PG_USER_MASK))
1107 goto do_fault_protect;
1108 if (is_write && !(ptep & PG_RW_MASK))
1109 goto do_fault_protect;
1110 } else {
1111 if ((env->cr[0] & CR0_WP_MASK) &&
1112 is_write && !(ptep & PG_RW_MASK))
1113 goto do_fault_protect;
1114 }
1115 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
1116 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
1117 pte |= PG_ACCESSED_MASK;
1118 if (is_dirty)
1119 pte |= PG_DIRTY_MASK;
1120 stl_phys_notdirty(pte_addr, pte);
1121 }
1122 page_size = 4096;
1123 virt_addr = addr & ~0xfff;
1124 pte = pte & (PHYS_ADDR_MASK | 0xfff);
1125 }
1126 } else {
1127 uint32_t pde;
1128
1129 /* page directory entry */
1130 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
1131 env->a20_mask;
1132 pde = ldl_phys(pde_addr);
1133 if (!(pde & PG_PRESENT_MASK)) {
1134 error_code = 0;
1135 goto do_fault;
1136 }
1137 /* if PSE bit is set, then we use a 4MB page */
1138 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
1139 page_size = 4096 * 1024;
1140 if (is_user) {
1141 if (!(pde & PG_USER_MASK))
1142 goto do_fault_protect;
1143 if (is_write && !(pde & PG_RW_MASK))
1144 goto do_fault_protect;
1145 } else {
1146 if ((env->cr[0] & CR0_WP_MASK) &&
1147 is_write && !(pde & PG_RW_MASK))
1148 goto do_fault_protect;
1149 }
1150 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
1151 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
1152 pde |= PG_ACCESSED_MASK;
1153 if (is_dirty)
1154 pde |= PG_DIRTY_MASK;
1155 stl_phys_notdirty(pde_addr, pde);
1156 }
1157
1158 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
1159 ptep = pte;
1160 virt_addr = addr & ~(page_size - 1);
1161 } else {
1162 if (!(pde & PG_ACCESSED_MASK)) {
1163 pde |= PG_ACCESSED_MASK;
1164 stl_phys_notdirty(pde_addr, pde);
1165 }
1166
1167 /* page directory entry */
1168 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
1169 env->a20_mask;
1170 pte = ldl_phys(pte_addr);
1171 if (!(pte & PG_PRESENT_MASK)) {
1172 error_code = 0;
1173 goto do_fault;
1174 }
1175 /* combine pde and pte user and rw protections */
1176 ptep = pte & pde;
1177 if (is_user) {
1178 if (!(ptep & PG_USER_MASK))
1179 goto do_fault_protect;
1180 if (is_write && !(ptep & PG_RW_MASK))
1181 goto do_fault_protect;
1182 } else {
1183 if ((env->cr[0] & CR0_WP_MASK) &&
1184 is_write && !(ptep & PG_RW_MASK))
1185 goto do_fault_protect;
1186 }
1187 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
1188 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
1189 pte |= PG_ACCESSED_MASK;
1190 if (is_dirty)
1191 pte |= PG_DIRTY_MASK;
1192 stl_phys_notdirty(pte_addr, pte);
1193 }
1194 page_size = 4096;
1195 virt_addr = addr & ~0xfff;
1196 }
1197 }
1198 /* the page can be put in the TLB */
1199 prot = PAGE_READ;
1200 if (!(ptep & PG_NX_MASK))
1201 prot |= PAGE_EXEC;
1202 if (pte & PG_DIRTY_MASK) {
1203 /* only set write access if already dirty... otherwise wait
1204 for dirty access */
1205 if (is_user) {
1206 if (ptep & PG_RW_MASK)
1207 prot |= PAGE_WRITE;
1208 } else {
1209 if (!(env->cr[0] & CR0_WP_MASK) ||
1210 (ptep & PG_RW_MASK))
1211 prot |= PAGE_WRITE;
1212 }
1213 }
1214 do_mapping:
1215 pte = pte & env->a20_mask;
1216
1217 /* Even if 4MB pages, we map only one 4KB page in the cache to
1218 avoid filling it too fast */
1219 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
1220 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
1221 vaddr = virt_addr + page_offset;
1222
1223 ret = tlb_set_page_exec(env, vaddr, paddr, prot, mmu_idx, is_softmmu);
1224 return ret;
1225 do_fault_protect:
1226 error_code = PG_ERROR_P_MASK;
1227 do_fault:
1228 error_code |= (is_write << PG_ERROR_W_BIT);
1229 if (is_user)
1230 error_code |= PG_ERROR_U_MASK;
1231 if (is_write1 == 2 &&
1232 (env->efer & MSR_EFER_NXE) &&
1233 (env->cr[4] & CR4_PAE_MASK))
1234 error_code |= PG_ERROR_I_D_MASK;
1235 if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
1236 /* cr2 is not modified in case of exceptions */
1237 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
1238 addr);
1239 } else {
1240 env->cr[2] = addr;
1241 }
1242 env->error_code = error_code;
1243 env->exception_index = EXCP0E_PAGE;
1244 return 1;
1245}
1246
1247target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
1248{
1249 target_ulong pde_addr, pte_addr;
1250 uint64_t pte;
1251 target_phys_addr_t paddr;
1252 uint32_t page_offset;
1253 int page_size;
1254
1255 if (env->cr[4] & CR4_PAE_MASK) {
1256 target_ulong pdpe_addr;
1257 uint64_t pde, pdpe;
1258
1259#ifdef TARGET_X86_64
1260 if (env->hflags & HF_LMA_MASK) {
1261 uint64_t pml4e_addr, pml4e;
1262 int32_t sext;
1263
1264 /* test virtual address sign extension */
1265 sext = (int64_t)addr >> 47;
1266 if (sext != 0 && sext != -1)
1267 return -1;
1268
1269 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
1270 env->a20_mask;
1271 pml4e = ldq_phys(pml4e_addr);
1272 if (!(pml4e & PG_PRESENT_MASK))
1273 return -1;
1274
1275 pdpe_addr = ((pml4e & ~0xfff) + (((addr >> 30) & 0x1ff) << 3)) &
1276 env->a20_mask;
1277 pdpe = ldq_phys(pdpe_addr);
1278 if (!(pdpe & PG_PRESENT_MASK))
1279 return -1;
1280 } else
1281#endif
1282 {
1283 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
1284 env->a20_mask;
1285 pdpe = ldq_phys(pdpe_addr);
1286 if (!(pdpe & PG_PRESENT_MASK))
1287 return -1;
1288 }
1289
1290 pde_addr = ((pdpe & ~0xfff) + (((addr >> 21) & 0x1ff) << 3)) &
1291 env->a20_mask;
1292 pde = ldq_phys(pde_addr);
1293 if (!(pde & PG_PRESENT_MASK)) {
1294 return -1;
1295 }
1296 if (pde & PG_PSE_MASK) {
1297 /* 2 MB page */
1298 page_size = 2048 * 1024;
1299 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
1300 } else {
1301 /* 4 KB page */
1302 pte_addr = ((pde & ~0xfff) + (((addr >> 12) & 0x1ff) << 3)) &
1303 env->a20_mask;
1304 page_size = 4096;
1305 pte = ldq_phys(pte_addr);
1306 }
1307 if (!(pte & PG_PRESENT_MASK))
1308 return -1;
1309 } else {
1310 uint32_t pde;
1311
1312 if (!(env->cr[0] & CR0_PG_MASK)) {
1313 pte = addr;
1314 page_size = 4096;
1315 } else {
1316 /* page directory entry */
1317 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask;
1318 pde = ldl_phys(pde_addr);
1319 if (!(pde & PG_PRESENT_MASK))
1320 return -1;
1321 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
1322 pte = pde & ~0x003ff000; /* align to 4MB */
1323 page_size = 4096 * 1024;
1324 } else {
1325 /* page directory entry */
1326 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
1327 pte = ldl_phys(pte_addr);
1328 if (!(pte & PG_PRESENT_MASK))
1329 return -1;
1330 page_size = 4096;
1331 }
1332 }
1333 pte = pte & env->a20_mask;
1334 }
1335
1336 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
1337 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
1338 return paddr;
1339}
1340
1341void hw_breakpoint_insert(CPUState *env, int index)
1342{
1343 int type, err = 0;
1344
1345 switch (hw_breakpoint_type(env->dr[7], index)) {
1346 case 0:
1347 if (hw_breakpoint_enabled(env->dr[7], index))
1348 err = cpu_breakpoint_insert(env, env->dr[index], BP_CPU,
1349 &env->cpu_breakpoint[index]);
1350 break;
1351 case 1:
1352 type = BP_CPU | BP_MEM_WRITE;
1353 goto insert_wp;
1354 case 2:
1355 /* No support for I/O watchpoints yet */
1356 break;
1357 case 3:
1358 type = BP_CPU | BP_MEM_ACCESS;
1359 insert_wp:
1360 err = cpu_watchpoint_insert(env, env->dr[index],
1361 hw_breakpoint_len(env->dr[7], index),
1362 type, &env->cpu_watchpoint[index]);
1363 break;
1364 }
1365 if (err)
1366 env->cpu_breakpoint[index] = NULL;
1367}
1368
1369void hw_breakpoint_remove(CPUState *env, int index)
1370{
1371 if (!env->cpu_breakpoint[index])
1372 return;
1373 switch (hw_breakpoint_type(env->dr[7], index)) {
1374 case 0:
1375 if (hw_breakpoint_enabled(env->dr[7], index))
1376 cpu_breakpoint_remove_by_ref(env, env->cpu_breakpoint[index]);
1377 break;
1378 case 1:
1379 case 3:
1380 cpu_watchpoint_remove_by_ref(env, env->cpu_watchpoint[index]);
1381 break;
1382 case 2:
1383 /* No support for I/O watchpoints yet */
1384 break;
1385 }
1386}
1387
1388int check_hw_breakpoints(CPUState *env, int force_dr6_update)
1389{
1390 target_ulong dr6;
1391 int reg, type;
1392 int hit_enabled = 0;
1393
1394 dr6 = env->dr[6] & ~0xf;
1395 for (reg = 0; reg < 4; reg++) {
1396 type = hw_breakpoint_type(env->dr[7], reg);
1397 if ((type == 0 && env->dr[reg] == env->eip) ||
1398 ((type & 1) && env->cpu_watchpoint[reg] &&
1399 (env->cpu_watchpoint[reg]->flags & BP_WATCHPOINT_HIT))) {
1400 dr6 |= 1 << reg;
1401 if (hw_breakpoint_enabled(env->dr[7], reg))
1402 hit_enabled = 1;
1403 }
1404 }
1405 if (hit_enabled || force_dr6_update)
1406 env->dr[6] = dr6;
1407 return hit_enabled;
1408}
1409
1410static CPUDebugExcpHandler *prev_debug_excp_handler;
1411
1412void raise_exception(int exception_index);
1413
1414static void breakpoint_handler(CPUState *env)
1415{
1416 CPUBreakpoint *bp;
1417
1418 if (env->watchpoint_hit) {
1419 if (env->watchpoint_hit->flags & BP_CPU) {
1420 env->watchpoint_hit = NULL;
1421 if (check_hw_breakpoints(env, 0))
1422 raise_exception(EXCP01_DB);
1423 else
1424 cpu_resume_from_signal(env, NULL);
1425 }
1426 } else {
1427 TAILQ_FOREACH(bp, &env->breakpoints, entry)
1428 if (bp->pc == env->eip) {
1429 if (bp->flags & BP_CPU) {
1430 check_hw_breakpoints(env, 1);
1431 raise_exception(EXCP01_DB);
1432 }
1433 break;
1434 }
1435 }
1436 if (prev_debug_excp_handler)
1437 prev_debug_excp_handler(env);
1438}
1439#endif /* !CONFIG_USER_ONLY */
1440
1441#ifndef VBOX
1442static void host_cpuid(uint32_t function, uint32_t count,
1443 uint32_t *eax, uint32_t *ebx,
1444 uint32_t *ecx, uint32_t *edx)
1445{
1446#if defined(CONFIG_KVM)
1447 uint32_t vec[4];
1448
1449#ifdef __x86_64__
1450 asm volatile("cpuid"
1451 : "=a"(vec[0]), "=b"(vec[1]),
1452 "=c"(vec[2]), "=d"(vec[3])
1453 : "0"(function), "c"(count) : "cc");
1454#else
1455 asm volatile("pusha \n\t"
1456 "cpuid \n\t"
1457 "mov %%eax, 0(%2) \n\t"
1458 "mov %%ebx, 4(%2) \n\t"
1459 "mov %%ecx, 8(%2) \n\t"
1460 "mov %%edx, 12(%2) \n\t"
1461 "popa"
1462 : : "a"(function), "c"(count), "S"(vec)
1463 : "memory", "cc");
1464#endif
1465
1466 if (eax)
1467 *eax = vec[0];
1468 if (ebx)
1469 *ebx = vec[1];
1470 if (ecx)
1471 *ecx = vec[2];
1472 if (edx)
1473 *edx = vec[3];
1474#endif
1475}
1476
1477void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
1478 uint32_t *eax, uint32_t *ebx,
1479 uint32_t *ecx, uint32_t *edx)
1480{
1481 /* test if maximum index reached */
1482 if (index & 0x80000000) {
1483 if (index > env->cpuid_xlevel)
1484 index = env->cpuid_level;
1485 } else {
1486 if (index > env->cpuid_level)
1487 index = env->cpuid_level;
1488 }
1489
1490 switch(index) {
1491 case 0:
1492 *eax = env->cpuid_level;
1493 *ebx = env->cpuid_vendor1;
1494 *edx = env->cpuid_vendor2;
1495 *ecx = env->cpuid_vendor3;
1496
1497 /* sysenter isn't supported on compatibility mode on AMD. and syscall
1498 * isn't supported in compatibility mode on Intel. so advertise the
1499 * actuall cpu, and say goodbye to migration between different vendors
1500 * is you use compatibility mode. */
1501 if (kvm_enabled())
1502 host_cpuid(0, 0, NULL, ebx, ecx, edx);
1503 break;
1504 case 1:
1505 *eax = env->cpuid_version;
1506 *ebx = (env->cpuid_apic_id << 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
1507 *ecx = env->cpuid_ext_features;
1508 *edx = env->cpuid_features;
1509
1510 /* "Hypervisor present" bit required for Microsoft SVVP */
1511 if (kvm_enabled())
1512 *ecx |= (1 << 31);
1513 break;
1514 case 2:
1515 /* cache info: needed for Pentium Pro compatibility */
1516 *eax = 1;
1517 *ebx = 0;
1518 *ecx = 0;
1519 *edx = 0x2c307d;
1520 break;
1521 case 4:
1522 /* cache info: needed for Core compatibility */
1523 switch (count) {
1524 case 0: /* L1 dcache info */
1525 *eax = 0x0000121;
1526 *ebx = 0x1c0003f;
1527 *ecx = 0x000003f;
1528 *edx = 0x0000001;
1529 break;
1530 case 1: /* L1 icache info */
1531 *eax = 0x0000122;
1532 *ebx = 0x1c0003f;
1533 *ecx = 0x000003f;
1534 *edx = 0x0000001;
1535 break;
1536 case 2: /* L2 cache info */
1537 *eax = 0x0000143;
1538 *ebx = 0x3c0003f;
1539 *ecx = 0x0000fff;
1540 *edx = 0x0000001;
1541 break;
1542 default: /* end of info */
1543 *eax = 0;
1544 *ebx = 0;
1545 *ecx = 0;
1546 *edx = 0;
1547 break;
1548 }
1549 break;
1550 case 5:
1551 /* mwait info: needed for Core compatibility */
1552 *eax = 0; /* Smallest monitor-line size in bytes */
1553 *ebx = 0; /* Largest monitor-line size in bytes */
1554 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
1555 *edx = 0;
1556 break;
1557 case 6:
1558 /* Thermal and Power Leaf */
1559 *eax = 0;
1560 *ebx = 0;
1561 *ecx = 0;
1562 *edx = 0;
1563 break;
1564 case 9:
1565 /* Direct Cache Access Information Leaf */
1566 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
1567 *ebx = 0;
1568 *ecx = 0;
1569 *edx = 0;
1570 break;
1571 case 0xA:
1572 /* Architectural Performance Monitoring Leaf */
1573 *eax = 0;
1574 *ebx = 0;
1575 *ecx = 0;
1576 *edx = 0;
1577 break;
1578 case 0x80000000:
1579 *eax = env->cpuid_xlevel;
1580 *ebx = env->cpuid_vendor1;
1581 *edx = env->cpuid_vendor2;
1582 *ecx = env->cpuid_vendor3;
1583 break;
1584 case 0x80000001:
1585 *eax = env->cpuid_features;
1586 *ebx = 0;
1587 *ecx = env->cpuid_ext3_features;
1588 *edx = env->cpuid_ext2_features;
1589
1590 if (kvm_enabled()) {
1591 uint32_t h_eax, h_edx;
1592
1593 host_cpuid(index, 0, &h_eax, NULL, NULL, &h_edx);
1594
1595 /* disable CPU features that the host does not support */
1596
1597 /* long mode */
1598 if ((h_edx & 0x20000000) == 0 /* || !lm_capable_kernel */)
1599 *edx &= ~0x20000000;
1600 /* syscall */
1601 if ((h_edx & 0x00000800) == 0)
1602 *edx &= ~0x00000800;
1603 /* nx */
1604 if ((h_edx & 0x00100000) == 0)
1605 *edx &= ~0x00100000;
1606
1607 /* disable CPU features that KVM cannot support */
1608
1609 /* svm */
1610 *ecx &= ~4UL;
1611 /* 3dnow */
1612 *edx &= ~0xc0000000;
1613 }
1614 break;
1615 case 0x80000002:
1616 case 0x80000003:
1617 case 0x80000004:
1618 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
1619 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
1620 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
1621 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
1622 break;
1623 case 0x80000005:
1624 /* cache info (L1 cache) */
1625 *eax = 0x01ff01ff;
1626 *ebx = 0x01ff01ff;
1627 *ecx = 0x40020140;
1628 *edx = 0x40020140;
1629 break;
1630 case 0x80000006:
1631 /* cache info (L2 cache) */
1632 *eax = 0;
1633 *ebx = 0x42004200;
1634 *ecx = 0x02008140;
1635 *edx = 0;
1636 break;
1637 case 0x80000008:
1638 /* virtual & phys address size in low 2 bytes. */
1639/* XXX: This value must match the one used in the MMU code. */
1640 if (env->cpuid_ext2_features & CPUID_EXT2_LM) {
1641 /* 64 bit processor */
1642#if defined(USE_KQEMU)
1643 *eax = 0x00003020; /* 48 bits virtual, 32 bits physical */
1644#else
1645/* XXX: The physical address space is limited to 42 bits in exec.c. */
1646 *eax = 0x00003028; /* 48 bits virtual, 40 bits physical */
1647#endif
1648 } else {
1649#if defined(USE_KQEMU)
1650 *eax = 0x00000020; /* 32 bits physical */
1651#else
1652 if (env->cpuid_features & CPUID_PSE36)
1653 *eax = 0x00000024; /* 36 bits physical */
1654 else
1655 *eax = 0x00000020; /* 32 bits physical */
1656#endif
1657 }
1658 *ebx = 0;
1659 *ecx = 0;
1660 *edx = 0;
1661 break;
1662 case 0x8000000A:
1663 *eax = 0x00000001; /* SVM Revision */
1664 *ebx = 0x00000010; /* nr of ASIDs */
1665 *ecx = 0;
1666 *edx = 0; /* optional features */
1667 break;
1668 default:
1669 /* reserved values: zero */
1670 *eax = 0;
1671 *ebx = 0;
1672 *ecx = 0;
1673 *edx = 0;
1674 break;
1675 }
1676}
1677#endif /* !VBOX */
1678
1679#ifndef VBOX
1680CPUX86State *cpu_x86_init(const char *cpu_model)
1681#else
1682CPUX86State *cpu_x86_init(CPUX86State *env, const char *cpu_model)
1683#endif
1684{
1685#ifndef VBOX
1686 CPUX86State *env;
1687#endif
1688 static int inited;
1689
1690#ifndef VBOX
1691 env = qemu_mallocz(sizeof(CPUX86State));
1692#endif
1693 cpu_exec_init(env);
1694 env->cpu_model_str = cpu_model;
1695
1696 /* init various static tables */
1697 if (!inited) {
1698 inited = 1;
1699 optimize_flags_init();
1700#ifndef CONFIG_USER_ONLY
1701 prev_debug_excp_handler =
1702 cpu_set_debug_excp_handler(breakpoint_handler);
1703#endif
1704 }
1705 if (cpu_x86_register(env, cpu_model) < 0) {
1706 cpu_x86_close(env);
1707 return NULL;
1708 }
1709 cpu_reset(env);
1710#ifdef USE_KQEMU
1711 kqemu_init(env);
1712#endif
1713 if (kvm_enabled())
1714 kvm_init_vcpu(env);
1715 return env;
1716}
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette