VirtualBox

source: vbox/trunk/src/recompiler/target-i386/helper.c@ 36887

最後變更 在這個檔案從36887是 36175,由 vboxsync 提交於 14 年 前

rem: Synced up to v0.11.1 (35bfc7324e2e6946c4113ada5db30553a1a7c40b) from git://git.savannah.nongnu.org/qemu.git.

  • 屬性 svn:eol-style 設為 native
檔案大小: 61.5 KB
 
1/*
2 * i386 helpers (without register variable usage)
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20/*
21 * Oracle LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
22 * other than GPL or LGPL is available it will apply instead, Oracle elects to use only
23 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
24 * a choice of LGPL license versions is made available with the language indicating
25 * that LGPLv2 or any later version may be used, or where a choice of which version
26 * of the LGPL is applied is otherwise unspecified.
27 */
28
29#include <stdarg.h>
30#include <stdlib.h>
31#include <stdio.h>
32#include <string.h>
33#ifndef VBOX
34#include <inttypes.h>
35#include <signal.h>
36#endif /* !VBOX */
37
38#include "cpu.h"
39#include "exec-all.h"
40#include "qemu-common.h"
41#include "kvm.h"
42
43//#define DEBUG_MMU
44
45#ifndef VBOX
46/* feature flags taken from "Intel Processor Identification and the CPUID
47 * Instruction" and AMD's "CPUID Specification". In cases of disagreement
48 * about feature names, the Linux name is used. */
49static const char *feature_name[] = {
50 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
51 "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
52 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */, NULL, "ds" /* Intel dts */, "acpi", "mmx",
53 "fxsr", "sse", "sse2", "ss", "ht" /* Intel htt */, "tm", "ia64", "pbe",
54};
55static const char *ext_feature_name[] = {
56 "pni" /* Intel,AMD sse3 */, NULL, NULL, "monitor", "ds_cpl", "vmx", NULL /* Linux smx */, "est",
57 "tm2", "ssse3", "cid", NULL, NULL, "cx16", "xtpr", NULL,
58 NULL, NULL, "dca", NULL, NULL, NULL, NULL, "popcnt",
59 NULL, NULL, NULL, NULL, NULL, NULL, NULL, "hypervisor",
60};
61static const char *ext2_feature_name[] = {
62 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
63 "cx8" /* AMD CMPXCHG8B */, "apic", NULL, "syscall", "mtrr", "pge", "mca", "cmov",
64 "pat", "pse36", NULL, NULL /* Linux mp */, "nx" /* Intel xd */, NULL, "mmxext", "mmx",
65 "fxsr", "fxsr_opt" /* AMD ffxsr */, "pdpe1gb" /* AMD Page1GB */, "rdtscp", NULL, "lm" /* Intel 64 */, "3dnowext", "3dnow",
66};
67static const char *ext3_feature_name[] = {
68 "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */, "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
69 "3dnowprefetch", "osvw", NULL /* Linux ibs */, NULL, "skinit", "wdt", NULL, NULL,
70 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
71 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
72};
73
74static void add_flagname_to_bitmaps(const char *flagname, uint32_t *features,
75 uint32_t *ext_features,
76 uint32_t *ext2_features,
77 uint32_t *ext3_features)
78{
79 int i;
80 int found = 0;
81
82 for ( i = 0 ; i < 32 ; i++ )
83 if (feature_name[i] && !strcmp (flagname, feature_name[i])) {
84 *features |= 1 << i;
85 found = 1;
86 }
87 for ( i = 0 ; i < 32 ; i++ )
88 if (ext_feature_name[i] && !strcmp (flagname, ext_feature_name[i])) {
89 *ext_features |= 1 << i;
90 found = 1;
91 }
92 for ( i = 0 ; i < 32 ; i++ )
93 if (ext2_feature_name[i] && !strcmp (flagname, ext2_feature_name[i])) {
94 *ext2_features |= 1 << i;
95 found = 1;
96 }
97 for ( i = 0 ; i < 32 ; i++ )
98 if (ext3_feature_name[i] && !strcmp (flagname, ext3_feature_name[i])) {
99 *ext3_features |= 1 << i;
100 found = 1;
101 }
102 if (!found) {
103 fprintf(stderr, "CPU feature %s not found\n", flagname);
104 }
105}
106#endif /* !VBOX */
107
108typedef struct x86_def_t {
109 const char *name;
110 uint32_t level;
111 uint32_t vendor1, vendor2, vendor3;
112 int family;
113 int model;
114 int stepping;
115 uint32_t features, ext_features, ext2_features, ext3_features;
116 uint32_t xlevel;
117 char model_id[48];
118 int vendor_override;
119} x86_def_t;
120
121#ifndef VBOX
122#define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
123#define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
124 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX)
125#define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
126 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
127 CPUID_PSE36 | CPUID_FXSR)
128#define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
129#define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
130 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
131 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
132 CPUID_PAE | CPUID_SEP | CPUID_APIC)
133static x86_def_t x86_defs[] = {
134#ifdef TARGET_X86_64
135 {
136 .name = "qemu64",
137 .level = 2,
138 .vendor1 = CPUID_VENDOR_AMD_1,
139 .vendor2 = CPUID_VENDOR_AMD_2,
140 .vendor3 = CPUID_VENDOR_AMD_3,
141 .family = 6,
142 .model = 2,
143 .stepping = 3,
144 .features = PPRO_FEATURES |
145 /* these features are needed for Win64 and aren't fully implemented */
146 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
147 /* this feature is needed for Solaris and isn't fully implemented */
148 CPUID_PSE36,
149 .ext_features = CPUID_EXT_SSE3,
150 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) |
151 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
152 .ext3_features = CPUID_EXT3_SVM,
153 .xlevel = 0x8000000A,
154 .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
155 },
156 {
157 .name = "phenom",
158 .level = 5,
159 .vendor1 = CPUID_VENDOR_AMD_1,
160 .vendor2 = CPUID_VENDOR_AMD_2,
161 .vendor3 = CPUID_VENDOR_AMD_3,
162 .family = 16,
163 .model = 2,
164 .stepping = 3,
165 /* Missing: CPUID_VME, CPUID_HT */
166 .features = PPRO_FEATURES |
167 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
168 CPUID_PSE36,
169 /* Missing: CPUID_EXT_CX16, CPUID_EXT_POPCNT */
170 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
171 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
172 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) |
173 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
174 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
175 CPUID_EXT2_FFXSR,
176 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
177 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
178 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
179 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
180 .ext3_features = CPUID_EXT3_SVM,
181 .xlevel = 0x8000001A,
182 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
183 },
184 {
185 .name = "core2duo",
186 .level = 10,
187 .family = 6,
188 .model = 15,
189 .stepping = 11,
190 /* The original CPU also implements these features:
191 CPUID_VME, CPUID_DTS, CPUID_ACPI, CPUID_SS, CPUID_HT,
192 CPUID_TM, CPUID_PBE */
193 .features = PPRO_FEATURES |
194 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
195 CPUID_PSE36,
196 /* The original CPU also implements these ext features:
197 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_EST,
198 CPUID_EXT_TM2, CPUID_EXT_CX16, CPUID_EXT_XTPR, CPUID_EXT_PDCM */
199 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3,
200 .ext2_features = CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
201 /* Missing: .ext3_features = CPUID_EXT3_LAHF_LM */
202 .xlevel = 0x80000008,
203 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
204 },
205#endif
206 {
207 .name = "qemu32",
208 .level = 2,
209 .family = 6,
210 .model = 3,
211 .stepping = 3,
212 .features = PPRO_FEATURES,
213 .ext_features = CPUID_EXT_SSE3,
214 .xlevel = 0,
215 .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
216 },
217 {
218 .name = "coreduo",
219 .level = 10,
220 .family = 6,
221 .model = 14,
222 .stepping = 8,
223 /* The original CPU also implements these features:
224 CPUID_DTS, CPUID_ACPI, CPUID_SS, CPUID_HT,
225 CPUID_TM, CPUID_PBE */
226 .features = PPRO_FEATURES | CPUID_VME |
227 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA,
228 /* The original CPU also implements these ext features:
229 CPUID_EXT_VMX, CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_XTPR,
230 CPUID_EXT_PDCM */
231 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
232 .ext2_features = CPUID_EXT2_NX,
233 .xlevel = 0x80000008,
234 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
235 },
236 {
237 .name = "486",
238 .level = 0,
239 .family = 4,
240 .model = 0,
241 .stepping = 0,
242 .features = I486_FEATURES,
243 .xlevel = 0,
244 },
245 {
246 .name = "pentium",
247 .level = 1,
248 .family = 5,
249 .model = 4,
250 .stepping = 3,
251 .features = PENTIUM_FEATURES,
252 .xlevel = 0,
253 },
254 {
255 .name = "pentium2",
256 .level = 2,
257 .family = 6,
258 .model = 5,
259 .stepping = 2,
260 .features = PENTIUM2_FEATURES,
261 .xlevel = 0,
262 },
263 {
264 .name = "pentium3",
265 .level = 2,
266 .family = 6,
267 .model = 7,
268 .stepping = 3,
269 .features = PENTIUM3_FEATURES,
270 .xlevel = 0,
271 },
272 {
273 .name = "athlon",
274 .level = 2,
275 .vendor1 = CPUID_VENDOR_AMD_1,
276 .vendor2 = CPUID_VENDOR_AMD_2,
277 .vendor3 = CPUID_VENDOR_AMD_3,
278 .family = 6,
279 .model = 2,
280 .stepping = 3,
281 .features = PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR | CPUID_MCA,
282 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
283 .xlevel = 0x80000008,
284 /* XXX: put another string ? */
285 .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
286 },
287 {
288 .name = "n270",
289 /* original is on level 10 */
290 .level = 5,
291 .family = 6,
292 .model = 28,
293 .stepping = 2,
294 .features = PPRO_FEATURES |
295 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME,
296 /* Missing: CPUID_DTS | CPUID_ACPI | CPUID_SS |
297 * CPUID_HT | CPUID_TM | CPUID_PBE */
298 /* Some CPUs got no CPUID_SEP */
299 .ext_features = CPUID_EXT_MONITOR |
300 CPUID_EXT_SSE3 /* PNI */ | CPUID_EXT_SSSE3,
301 /* Missing: CPUID_EXT_DSCPL | CPUID_EXT_EST |
302 * CPUID_EXT_TM2 | CPUID_EXT_XTPR */
303 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | CPUID_EXT2_NX,
304 /* Missing: .ext3_features = CPUID_EXT3_LAHF_LM */
305 .xlevel = 0x8000000A,
306 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
307 },
308};
309
310static void host_cpuid(uint32_t function, uint32_t count, uint32_t *eax,
311 uint32_t *ebx, uint32_t *ecx, uint32_t *edx);
312
313static int cpu_x86_fill_model_id(char *str)
314{
315 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
316 int i;
317
318 for (i = 0; i < 3; i++) {
319 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
320 memcpy(str + i * 16 + 0, &eax, 4);
321 memcpy(str + i * 16 + 4, &ebx, 4);
322 memcpy(str + i * 16 + 8, &ecx, 4);
323 memcpy(str + i * 16 + 12, &edx, 4);
324 }
325 return 0;
326}
327
328static int cpu_x86_fill_host(x86_def_t *x86_cpu_def)
329{
330 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
331
332 x86_cpu_def->name = "host";
333 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
334 x86_cpu_def->level = eax;
335 x86_cpu_def->vendor1 = ebx;
336 x86_cpu_def->vendor2 = edx;
337 x86_cpu_def->vendor3 = ecx;
338
339 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
340 x86_cpu_def->family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
341 x86_cpu_def->model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
342 x86_cpu_def->stepping = eax & 0x0F;
343 x86_cpu_def->ext_features = ecx;
344 x86_cpu_def->features = edx;
345
346 host_cpuid(0x80000000, 0, &eax, &ebx, &ecx, &edx);
347 x86_cpu_def->xlevel = eax;
348
349 host_cpuid(0x80000001, 0, &eax, &ebx, &ecx, &edx);
350 x86_cpu_def->ext2_features = edx;
351 x86_cpu_def->ext3_features = ecx;
352 cpu_x86_fill_model_id(x86_cpu_def->model_id);
353 x86_cpu_def->vendor_override = 0;
354
355 return 0;
356}
357
358static int cpu_x86_find_by_name(x86_def_t *x86_cpu_def, const char *cpu_model)
359{
360 unsigned int i;
361 x86_def_t *def;
362
363 char *s = strdup(cpu_model);
364 char *featurestr, *name = strtok(s, ",");
365 uint32_t plus_features = 0, plus_ext_features = 0, plus_ext2_features = 0, plus_ext3_features = 0;
366 uint32_t minus_features = 0, minus_ext_features = 0, minus_ext2_features = 0, minus_ext3_features = 0;
367 int family = -1, model = -1, stepping = -1;
368
369 def = NULL;
370 for (i = 0; i < ARRAY_SIZE(x86_defs); i++) {
371 if (strcmp(name, x86_defs[i].name) == 0) {
372 def = &x86_defs[i];
373 break;
374 }
375 }
376 if (kvm_enabled() && strcmp(name, "host") == 0) {
377 cpu_x86_fill_host(x86_cpu_def);
378 } else if (!def) {
379 goto error;
380 } else {
381 memcpy(x86_cpu_def, def, sizeof(*def));
382 }
383
384 add_flagname_to_bitmaps("hypervisor", &plus_features,
385 &plus_ext_features, &plus_ext2_features, &plus_ext3_features);
386
387 featurestr = strtok(NULL, ",");
388
389 while (featurestr) {
390 char *val;
391 if (featurestr[0] == '+') {
392 add_flagname_to_bitmaps(featurestr + 1, &plus_features, &plus_ext_features, &plus_ext2_features, &plus_ext3_features);
393 } else if (featurestr[0] == '-') {
394 add_flagname_to_bitmaps(featurestr + 1, &minus_features, &minus_ext_features, &minus_ext2_features, &minus_ext3_features);
395 } else if ((val = strchr(featurestr, '='))) {
396 *val = 0; val++;
397 if (!strcmp(featurestr, "family")) {
398 char *err;
399 family = strtol(val, &err, 10);
400 if (!*val || *err || family < 0) {
401 fprintf(stderr, "bad numerical value %s\n", val);
402 goto error;
403 }
404 x86_cpu_def->family = family;
405 } else if (!strcmp(featurestr, "model")) {
406 char *err;
407 model = strtol(val, &err, 10);
408 if (!*val || *err || model < 0 || model > 0xff) {
409 fprintf(stderr, "bad numerical value %s\n", val);
410 goto error;
411 }
412 x86_cpu_def->model = model;
413 } else if (!strcmp(featurestr, "stepping")) {
414 char *err;
415 stepping = strtol(val, &err, 10);
416 if (!*val || *err || stepping < 0 || stepping > 0xf) {
417 fprintf(stderr, "bad numerical value %s\n", val);
418 goto error;
419 }
420 x86_cpu_def->stepping = stepping;
421 } else if (!strcmp(featurestr, "vendor")) {
422 if (strlen(val) != 12) {
423 fprintf(stderr, "vendor string must be 12 chars long\n");
424 goto error;
425 }
426 x86_cpu_def->vendor1 = 0;
427 x86_cpu_def->vendor2 = 0;
428 x86_cpu_def->vendor3 = 0;
429 for(i = 0; i < 4; i++) {
430 x86_cpu_def->vendor1 |= ((uint8_t)val[i ]) << (8 * i);
431 x86_cpu_def->vendor2 |= ((uint8_t)val[i + 4]) << (8 * i);
432 x86_cpu_def->vendor3 |= ((uint8_t)val[i + 8]) << (8 * i);
433 }
434 x86_cpu_def->vendor_override = 1;
435 } else if (!strcmp(featurestr, "model_id")) {
436 pstrcpy(x86_cpu_def->model_id, sizeof(x86_cpu_def->model_id),
437 val);
438 } else {
439 fprintf(stderr, "unrecognized feature %s\n", featurestr);
440 goto error;
441 }
442 } else {
443 fprintf(stderr, "feature string `%s' not in format (+feature|-feature|feature=xyz)\n", featurestr);
444 goto error;
445 }
446 featurestr = strtok(NULL, ",");
447 }
448 x86_cpu_def->features |= plus_features;
449 x86_cpu_def->ext_features |= plus_ext_features;
450 x86_cpu_def->ext2_features |= plus_ext2_features;
451 x86_cpu_def->ext3_features |= plus_ext3_features;
452 x86_cpu_def->features &= ~minus_features;
453 x86_cpu_def->ext_features &= ~minus_ext_features;
454 x86_cpu_def->ext2_features &= ~minus_ext2_features;
455 x86_cpu_def->ext3_features &= ~minus_ext3_features;
456 free(s);
457 return 0;
458
459error:
460 free(s);
461 return -1;
462}
463
464void x86_cpu_list (FILE *f, int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
465{
466 unsigned int i;
467
468 for (i = 0; i < ARRAY_SIZE(x86_defs); i++)
469 (*cpu_fprintf)(f, "x86 %16s\n", x86_defs[i].name);
470}
471#endif /* !VBOX */
472
473static int cpu_x86_register (CPUX86State *env, const char *cpu_model)
474{
475#ifndef VBOX
476 x86_def_t def1, *def = &def1;
477
478 if (cpu_x86_find_by_name(def, cpu_model) < 0)
479 return -1;
480 if (def->vendor1) {
481 env->cpuid_vendor1 = def->vendor1;
482 env->cpuid_vendor2 = def->vendor2;
483 env->cpuid_vendor3 = def->vendor3;
484 } else {
485 env->cpuid_vendor1 = CPUID_VENDOR_INTEL_1;
486 env->cpuid_vendor2 = CPUID_VENDOR_INTEL_2;
487 env->cpuid_vendor3 = CPUID_VENDOR_INTEL_3;
488 }
489 env->cpuid_vendor_override = def->vendor_override;
490 env->cpuid_level = def->level;
491 if (def->family > 0x0f)
492 env->cpuid_version = 0xf00 | ((def->family - 0x0f) << 20);
493 else
494 env->cpuid_version = def->family << 8;
495 env->cpuid_version |= ((def->model & 0xf) << 4) | ((def->model >> 4) << 16);
496 env->cpuid_version |= def->stepping;
497 env->cpuid_features = def->features;
498 env->pat = 0x0007040600070406ULL;
499 env->cpuid_ext_features = def->ext_features;
500 env->cpuid_ext2_features = def->ext2_features;
501 env->cpuid_xlevel = def->xlevel;
502 env->cpuid_ext3_features = def->ext3_features;
503 {
504 const char *model_id = def->model_id;
505 int c, len, i;
506 if (!model_id)
507 model_id = "";
508 len = strlen(model_id);
509 for(i = 0; i < 48; i++) {
510 if (i >= len)
511 c = '\0';
512 else
513 c = (uint8_t)model_id[i];
514 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
515 }
516 }
517#endif /* !VBOX */
518 return 0;
519}
520
521/* NOTE: must be called outside the CPU execute loop */
522void cpu_reset(CPUX86State *env)
523{
524 int i;
525
526 if (qemu_loglevel_mask(CPU_LOG_RESET)) {
527 qemu_log("CPU Reset (CPU %d)\n", env->cpu_index);
528 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
529 }
530
531 memset(env, 0, offsetof(CPUX86State, breakpoints));
532
533 tlb_flush(env, 1);
534
535 env->old_exception = -1;
536
537 /* init to reset state */
538
539#ifdef CONFIG_SOFTMMU
540 env->hflags |= HF_SOFTMMU_MASK;
541#endif
542 env->hflags2 |= HF2_GIF_MASK;
543
544 cpu_x86_update_cr0(env, 0x60000010);
545 env->a20_mask = ~0x0;
546 env->smbase = 0x30000;
547
548 env->idt.limit = 0xffff;
549 env->gdt.limit = 0xffff;
550 env->ldt.limit = 0xffff;
551 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
552 env->tr.limit = 0xffff;
553 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
554
555 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
556 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
557 DESC_R_MASK | DESC_A_MASK);
558 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
559 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
560 DESC_A_MASK);
561 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
562 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
563 DESC_A_MASK);
564 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
565 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
566 DESC_A_MASK);
567 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
568 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
569 DESC_A_MASK);
570 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
571 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
572 DESC_A_MASK);
573
574 env->eip = 0xfff0;
575#ifndef VBOX
576 env->regs[R_EDX] = env->cpuid_version;
577#else
578 /** @todo: is it right? */
579 env->regs[R_EDX] = 0x600; /* indicate P6 processor */
580#endif
581
582 env->eflags = 0x2;
583
584 /* FPU init */
585 for(i = 0;i < 8; i++)
586 env->fptags[i] = 1;
587 env->fpuc = 0x37f;
588
589 env->mxcsr = 0x1f80;
590
591 memset(env->dr, 0, sizeof(env->dr));
592 env->dr[6] = DR6_FIXED_1;
593 env->dr[7] = DR7_FIXED_1;
594 cpu_breakpoint_remove_all(env, BP_CPU);
595 cpu_watchpoint_remove_all(env, BP_CPU);
596}
597
598void cpu_x86_close(CPUX86State *env)
599{
600#ifndef VBOX
601 qemu_free(env);
602#endif
603}
604
605/***********************************************************/
606/* x86 debug */
607
608static const char *cc_op_str[] = {
609 "DYNAMIC",
610 "EFLAGS",
611
612 "MULB",
613 "MULW",
614 "MULL",
615 "MULQ",
616
617 "ADDB",
618 "ADDW",
619 "ADDL",
620 "ADDQ",
621
622 "ADCB",
623 "ADCW",
624 "ADCL",
625 "ADCQ",
626
627 "SUBB",
628 "SUBW",
629 "SUBL",
630 "SUBQ",
631
632 "SBBB",
633 "SBBW",
634 "SBBL",
635 "SBBQ",
636
637 "LOGICB",
638 "LOGICW",
639 "LOGICL",
640 "LOGICQ",
641
642 "INCB",
643 "INCW",
644 "INCL",
645 "INCQ",
646
647 "DECB",
648 "DECW",
649 "DECL",
650 "DECQ",
651
652 "SHLB",
653 "SHLW",
654 "SHLL",
655 "SHLQ",
656
657 "SARB",
658 "SARW",
659 "SARL",
660 "SARQ",
661};
662
663static void
664cpu_x86_dump_seg_cache(CPUState *env, FILE *f,
665 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
666 const char *name, struct SegmentCache *sc)
667{
668#ifdef TARGET_X86_64
669 if (env->hflags & HF_CS64_MASK) {
670 cpu_fprintf(f, "%-3s=%04x %016" PRIx64 " %08x %08x", name,
671 sc->selector, sc->base, sc->limit, sc->flags);
672 } else
673#endif
674 {
675 cpu_fprintf(f, "%-3s=%04x %08x %08x %08x", name, sc->selector,
676 (uint32_t)sc->base, sc->limit, sc->flags);
677 }
678
679 if (!(env->hflags & HF_PE_MASK) || !(sc->flags & DESC_P_MASK))
680 goto done;
681
682 cpu_fprintf(f, " DPL=%d ", (sc->flags & DESC_DPL_MASK) >> DESC_DPL_SHIFT);
683 if (sc->flags & DESC_S_MASK) {
684 if (sc->flags & DESC_CS_MASK) {
685 cpu_fprintf(f, (sc->flags & DESC_L_MASK) ? "CS64" :
686 ((sc->flags & DESC_B_MASK) ? "CS32" : "CS16"));
687 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_C_MASK) ? 'C' : '-',
688 (sc->flags & DESC_R_MASK) ? 'R' : '-');
689 } else {
690 cpu_fprintf(f, (sc->flags & DESC_B_MASK) ? "DS " : "DS16");
691 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_E_MASK) ? 'E' : '-',
692 (sc->flags & DESC_W_MASK) ? 'W' : '-');
693 }
694 cpu_fprintf(f, "%c]", (sc->flags & DESC_A_MASK) ? 'A' : '-');
695 } else {
696 static const char *sys_type_name[2][16] = {
697 { /* 32 bit mode */
698 "Reserved", "TSS16-avl", "LDT", "TSS16-busy",
699 "CallGate16", "TaskGate", "IntGate16", "TrapGate16",
700 "Reserved", "TSS32-avl", "Reserved", "TSS32-busy",
701 "CallGate32", "Reserved", "IntGate32", "TrapGate32"
702 },
703 { /* 64 bit mode */
704 "<hiword>", "Reserved", "LDT", "Reserved", "Reserved",
705 "Reserved", "Reserved", "Reserved", "Reserved",
706 "TSS64-avl", "Reserved", "TSS64-busy", "CallGate64",
707 "Reserved", "IntGate64", "TrapGate64"
708 }
709 };
710 cpu_fprintf(f, sys_type_name[(env->hflags & HF_LMA_MASK) ? 1 : 0]
711 [(sc->flags & DESC_TYPE_MASK)
712 >> DESC_TYPE_SHIFT]);
713 }
714done:
715 cpu_fprintf(f, "\n");
716}
717
718void cpu_dump_state(CPUState *env, FILE *f,
719 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
720 int flags)
721{
722 int eflags, i, nb;
723 char cc_op_name[32];
724 static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
725
726 if (kvm_enabled())
727 kvm_arch_get_registers(env);
728
729 eflags = env->eflags;
730#ifdef TARGET_X86_64
731 if (env->hflags & HF_CS64_MASK) {
732 cpu_fprintf(f,
733 "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
734 "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
735 "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
736 "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
737 "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
738 env->regs[R_EAX],
739 env->regs[R_EBX],
740 env->regs[R_ECX],
741 env->regs[R_EDX],
742 env->regs[R_ESI],
743 env->regs[R_EDI],
744 env->regs[R_EBP],
745 env->regs[R_ESP],
746 env->regs[8],
747 env->regs[9],
748 env->regs[10],
749 env->regs[11],
750 env->regs[12],
751 env->regs[13],
752 env->regs[14],
753 env->regs[15],
754 env->eip, eflags,
755 eflags & DF_MASK ? 'D' : '-',
756 eflags & CC_O ? 'O' : '-',
757 eflags & CC_S ? 'S' : '-',
758 eflags & CC_Z ? 'Z' : '-',
759 eflags & CC_A ? 'A' : '-',
760 eflags & CC_P ? 'P' : '-',
761 eflags & CC_C ? 'C' : '-',
762 env->hflags & HF_CPL_MASK,
763 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
764 (int)(env->a20_mask >> 20) & 1,
765 (env->hflags >> HF_SMM_SHIFT) & 1,
766 env->halted);
767 } else
768#endif
769 {
770 cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
771 "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
772 "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
773 (uint32_t)env->regs[R_EAX],
774 (uint32_t)env->regs[R_EBX],
775 (uint32_t)env->regs[R_ECX],
776 (uint32_t)env->regs[R_EDX],
777 (uint32_t)env->regs[R_ESI],
778 (uint32_t)env->regs[R_EDI],
779 (uint32_t)env->regs[R_EBP],
780 (uint32_t)env->regs[R_ESP],
781 (uint32_t)env->eip, eflags,
782 eflags & DF_MASK ? 'D' : '-',
783 eflags & CC_O ? 'O' : '-',
784 eflags & CC_S ? 'S' : '-',
785 eflags & CC_Z ? 'Z' : '-',
786 eflags & CC_A ? 'A' : '-',
787 eflags & CC_P ? 'P' : '-',
788 eflags & CC_C ? 'C' : '-',
789 env->hflags & HF_CPL_MASK,
790 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
791 (int)(env->a20_mask >> 20) & 1,
792 (env->hflags >> HF_SMM_SHIFT) & 1,
793 env->halted);
794 }
795
796 for(i = 0; i < 6; i++) {
797 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, seg_name[i],
798 &env->segs[i]);
799 }
800 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "LDT", &env->ldt);
801 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "TR", &env->tr);
802
803#ifdef TARGET_X86_64
804 if (env->hflags & HF_LMA_MASK) {
805 cpu_fprintf(f, "GDT= %016" PRIx64 " %08x\n",
806 env->gdt.base, env->gdt.limit);
807 cpu_fprintf(f, "IDT= %016" PRIx64 " %08x\n",
808 env->idt.base, env->idt.limit);
809 cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
810 (uint32_t)env->cr[0],
811 env->cr[2],
812 env->cr[3],
813 (uint32_t)env->cr[4]);
814 for(i = 0; i < 4; i++)
815 cpu_fprintf(f, "DR%d=%016" PRIx64 " ", i, env->dr[i]);
816 cpu_fprintf(f, "\nDR6=%016" PRIx64 " DR7=%016" PRIx64 "\n",
817 env->dr[6], env->dr[7]);
818 } else
819#endif
820 {
821 cpu_fprintf(f, "GDT= %08x %08x\n",
822 (uint32_t)env->gdt.base, env->gdt.limit);
823 cpu_fprintf(f, "IDT= %08x %08x\n",
824 (uint32_t)env->idt.base, env->idt.limit);
825 cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
826 (uint32_t)env->cr[0],
827 (uint32_t)env->cr[2],
828 (uint32_t)env->cr[3],
829 (uint32_t)env->cr[4]);
830 for(i = 0; i < 4; i++)
831 cpu_fprintf(f, "DR%d=%08x ", i, env->dr[i]);
832 cpu_fprintf(f, "\nDR6=%08x DR7=%08x\n", env->dr[6], env->dr[7]);
833 }
834 if (flags & X86_DUMP_CCOP) {
835 if ((unsigned)env->cc_op < CC_OP_NB)
836 snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
837 else
838 snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
839#ifdef TARGET_X86_64
840 if (env->hflags & HF_CS64_MASK) {
841 cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
842 env->cc_src, env->cc_dst,
843 cc_op_name);
844 } else
845#endif
846 {
847 cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
848 (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
849 cc_op_name);
850 }
851 }
852 if (flags & X86_DUMP_FPU) {
853 int fptag;
854 fptag = 0;
855 for(i = 0; i < 8; i++) {
856 fptag |= ((!env->fptags[i]) << i);
857 }
858 cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
859 env->fpuc,
860 (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
861 env->fpstt,
862 fptag,
863 env->mxcsr);
864 for(i=0;i<8;i++) {
865#if defined(USE_X86LDOUBLE)
866 union {
867 long double d;
868 struct {
869 uint64_t lower;
870 uint16_t upper;
871 } l;
872 } tmp;
873 tmp.d = env->fpregs[i].d;
874 cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
875 i, tmp.l.lower, tmp.l.upper);
876#else
877 cpu_fprintf(f, "FPR%d=%016" PRIx64,
878 i, env->fpregs[i].mmx.q);
879#endif
880 if ((i & 1) == 1)
881 cpu_fprintf(f, "\n");
882 else
883 cpu_fprintf(f, " ");
884 }
885 if (env->hflags & HF_CS64_MASK)
886 nb = 16;
887 else
888 nb = 8;
889 for(i=0;i<nb;i++) {
890 cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
891 i,
892 env->xmm_regs[i].XMM_L(3),
893 env->xmm_regs[i].XMM_L(2),
894 env->xmm_regs[i].XMM_L(1),
895 env->xmm_regs[i].XMM_L(0));
896 if ((i & 1) == 1)
897 cpu_fprintf(f, "\n");
898 else
899 cpu_fprintf(f, " ");
900 }
901 }
902}
903
904/***********************************************************/
905/* x86 mmu */
906/* XXX: add PGE support */
907
908void cpu_x86_set_a20(CPUX86State *env, int a20_state)
909{
910 a20_state = (a20_state != 0);
911 if (a20_state != ((env->a20_mask >> 20) & 1)) {
912#if defined(DEBUG_MMU)
913 printf("A20 update: a20=%d\n", a20_state);
914#endif
915 /* if the cpu is currently executing code, we must unlink it and
916 all the potentially executing TB */
917 cpu_interrupt(env, CPU_INTERRUPT_EXITTB);
918
919 /* when a20 is changed, all the MMU mappings are invalid, so
920 we must flush everything */
921 tlb_flush(env, 1);
922 env->a20_mask = (~0x100000) | (a20_state << 20);
923 }
924}
925
926void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
927{
928 int pe_state;
929
930#if defined(DEBUG_MMU)
931 printf("CR0 update: CR0=0x%08x\n", new_cr0);
932#endif
933 if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
934 (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
935 tlb_flush(env, 1);
936 }
937
938#ifdef TARGET_X86_64
939 if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
940 (env->efer & MSR_EFER_LME)) {
941 /* enter in long mode */
942 /* XXX: generate an exception */
943 if (!(env->cr[4] & CR4_PAE_MASK))
944 return;
945 env->efer |= MSR_EFER_LMA;
946 env->hflags |= HF_LMA_MASK;
947 } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
948 (env->efer & MSR_EFER_LMA)) {
949 /* exit long mode */
950 env->efer &= ~MSR_EFER_LMA;
951 env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
952 env->eip &= 0xffffffff;
953 }
954#endif
955 env->cr[0] = new_cr0 | CR0_ET_MASK;
956
957 /* update PE flag in hidden flags */
958 pe_state = (env->cr[0] & CR0_PE_MASK);
959 env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
960 /* ensure that ADDSEG is always set in real mode */
961 env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
962 /* update FPU flags */
963 env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
964 ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
965#ifdef VBOX
966
967 remR3ChangeCpuMode(env);
968#endif
969}
970
971/* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
972 the PDPT */
973void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
974{
975 env->cr[3] = new_cr3;
976 if (env->cr[0] & CR0_PG_MASK) {
977#if defined(DEBUG_MMU)
978 printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
979#endif
980 tlb_flush(env, 0);
981 }
982}
983
984void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
985{
986#if defined(DEBUG_MMU)
987 printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
988#endif
989 if ((new_cr4 & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK)) !=
990 (env->cr[4] & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK))) {
991 tlb_flush(env, 1);
992 }
993 /* SSE handling */
994 if (!(env->cpuid_features & CPUID_SSE))
995 new_cr4 &= ~CR4_OSFXSR_MASK;
996 if (new_cr4 & CR4_OSFXSR_MASK)
997 env->hflags |= HF_OSFXSR_MASK;
998 else
999 env->hflags &= ~HF_OSFXSR_MASK;
1000
1001 env->cr[4] = new_cr4;
1002#ifdef VBOX
1003 remR3ChangeCpuMode(env);
1004#endif
1005}
1006
1007#if defined(CONFIG_USER_ONLY)
1008
1009int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
1010 int is_write, int mmu_idx, int is_softmmu)
1011{
1012 /* user mode only emulation */
1013 is_write &= 1;
1014 env->cr[2] = addr;
1015 env->error_code = (is_write << PG_ERROR_W_BIT);
1016 env->error_code |= PG_ERROR_U_MASK;
1017 env->exception_index = EXCP0E_PAGE;
1018 return 1;
1019}
1020
1021target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
1022{
1023 return addr;
1024}
1025
1026#else
1027
1028/* XXX: This value should match the one returned by CPUID
1029 * and in exec.c */
1030#if defined(CONFIG_KQEMU)
1031#define PHYS_ADDR_MASK 0xfffff000LL
1032#else
1033# if defined(TARGET_X86_64)
1034# define PHYS_ADDR_MASK 0xfffffff000LL
1035# else
1036# define PHYS_ADDR_MASK 0xffffff000LL
1037# endif
1038#endif
1039
1040/* return value:
1041 -1 = cannot handle fault
1042 0 = nothing more to do
1043 1 = generate PF fault
1044 2 = soft MMU activation required for this block
1045*/
1046int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
1047 int is_write1, int mmu_idx, int is_softmmu)
1048{
1049 uint64_t ptep, pte;
1050 target_ulong pde_addr, pte_addr;
1051 int error_code, is_dirty, prot, page_size, ret, is_write, is_user;
1052 target_phys_addr_t paddr;
1053 uint32_t page_offset;
1054 target_ulong vaddr, virt_addr;
1055
1056 is_user = mmu_idx == MMU_USER_IDX;
1057#if defined(DEBUG_MMU)
1058 printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
1059 addr, is_write1, is_user, env->eip);
1060#endif
1061 is_write = is_write1 & 1;
1062
1063 if (!(env->cr[0] & CR0_PG_MASK)) {
1064 pte = addr;
1065 virt_addr = addr & TARGET_PAGE_MASK;
1066 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
1067 page_size = 4096;
1068 goto do_mapping;
1069 }
1070
1071 if (env->cr[4] & CR4_PAE_MASK) {
1072 uint64_t pde, pdpe;
1073 target_ulong pdpe_addr;
1074
1075#ifdef TARGET_X86_64
1076 if (env->hflags & HF_LMA_MASK) {
1077 uint64_t pml4e_addr, pml4e;
1078 int32_t sext;
1079
1080 /* test virtual address sign extension */
1081 sext = (int64_t)addr >> 47;
1082 if (sext != 0 && sext != -1) {
1083 env->error_code = 0;
1084 env->exception_index = EXCP0D_GPF;
1085 return 1;
1086 }
1087
1088 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
1089 env->a20_mask;
1090 pml4e = ldq_phys(pml4e_addr);
1091 if (!(pml4e & PG_PRESENT_MASK)) {
1092 error_code = 0;
1093 goto do_fault;
1094 }
1095 if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) {
1096 error_code = PG_ERROR_RSVD_MASK;
1097 goto do_fault;
1098 }
1099 if (!(pml4e & PG_ACCESSED_MASK)) {
1100 pml4e |= PG_ACCESSED_MASK;
1101 stl_phys_notdirty(pml4e_addr, pml4e);
1102 }
1103 ptep = pml4e ^ PG_NX_MASK;
1104 pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
1105 env->a20_mask;
1106 pdpe = ldq_phys(pdpe_addr);
1107 if (!(pdpe & PG_PRESENT_MASK)) {
1108 error_code = 0;
1109 goto do_fault;
1110 }
1111 if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) {
1112 error_code = PG_ERROR_RSVD_MASK;
1113 goto do_fault;
1114 }
1115 ptep &= pdpe ^ PG_NX_MASK;
1116 if (!(pdpe & PG_ACCESSED_MASK)) {
1117 pdpe |= PG_ACCESSED_MASK;
1118 stl_phys_notdirty(pdpe_addr, pdpe);
1119 }
1120 } else
1121#endif
1122 {
1123 /* XXX: load them when cr3 is loaded ? */
1124 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
1125 env->a20_mask;
1126 pdpe = ldq_phys(pdpe_addr);
1127 if (!(pdpe & PG_PRESENT_MASK)) {
1128 error_code = 0;
1129 goto do_fault;
1130 }
1131 ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
1132 }
1133
1134 pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
1135 env->a20_mask;
1136 pde = ldq_phys(pde_addr);
1137 if (!(pde & PG_PRESENT_MASK)) {
1138 error_code = 0;
1139 goto do_fault;
1140 }
1141 if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) {
1142 error_code = PG_ERROR_RSVD_MASK;
1143 goto do_fault;
1144 }
1145 ptep &= pde ^ PG_NX_MASK;
1146 if (pde & PG_PSE_MASK) {
1147 /* 2 MB page */
1148 page_size = 2048 * 1024;
1149 ptep ^= PG_NX_MASK;
1150 if ((ptep & PG_NX_MASK) && is_write1 == 2)
1151 goto do_fault_protect;
1152 if (is_user) {
1153 if (!(ptep & PG_USER_MASK))
1154 goto do_fault_protect;
1155 if (is_write && !(ptep & PG_RW_MASK))
1156 goto do_fault_protect;
1157 } else {
1158 if ((env->cr[0] & CR0_WP_MASK) &&
1159 is_write && !(ptep & PG_RW_MASK))
1160 goto do_fault_protect;
1161 }
1162 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
1163 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
1164 pde |= PG_ACCESSED_MASK;
1165 if (is_dirty)
1166 pde |= PG_DIRTY_MASK;
1167 stl_phys_notdirty(pde_addr, pde);
1168 }
1169 /* align to page_size */
1170 pte = pde & ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff);
1171 virt_addr = addr & ~(page_size - 1);
1172 } else {
1173 /* 4 KB page */
1174 if (!(pde & PG_ACCESSED_MASK)) {
1175 pde |= PG_ACCESSED_MASK;
1176 stl_phys_notdirty(pde_addr, pde);
1177 }
1178 pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
1179 env->a20_mask;
1180 pte = ldq_phys(pte_addr);
1181 if (!(pte & PG_PRESENT_MASK)) {
1182 error_code = 0;
1183 goto do_fault;
1184 }
1185 if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) {
1186 error_code = PG_ERROR_RSVD_MASK;
1187 goto do_fault;
1188 }
1189 /* combine pde and pte nx, user and rw protections */
1190 ptep &= pte ^ PG_NX_MASK;
1191 ptep ^= PG_NX_MASK;
1192 if ((ptep & PG_NX_MASK) && is_write1 == 2)
1193 goto do_fault_protect;
1194 if (is_user) {
1195 if (!(ptep & PG_USER_MASK))
1196 goto do_fault_protect;
1197 if (is_write && !(ptep & PG_RW_MASK))
1198 goto do_fault_protect;
1199 } else {
1200 if ((env->cr[0] & CR0_WP_MASK) &&
1201 is_write && !(ptep & PG_RW_MASK))
1202 goto do_fault_protect;
1203 }
1204 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
1205 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
1206 pte |= PG_ACCESSED_MASK;
1207 if (is_dirty)
1208 pte |= PG_DIRTY_MASK;
1209 stl_phys_notdirty(pte_addr, pte);
1210 }
1211 page_size = 4096;
1212 virt_addr = addr & ~0xfff;
1213 pte = pte & (PHYS_ADDR_MASK | 0xfff);
1214 }
1215 } else {
1216 uint32_t pde;
1217
1218 /* page directory entry */
1219 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
1220 env->a20_mask;
1221 pde = ldl_phys(pde_addr);
1222 if (!(pde & PG_PRESENT_MASK)) {
1223 error_code = 0;
1224 goto do_fault;
1225 }
1226 /* if PSE bit is set, then we use a 4MB page */
1227 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
1228 page_size = 4096 * 1024;
1229 if (is_user) {
1230 if (!(pde & PG_USER_MASK))
1231 goto do_fault_protect;
1232 if (is_write && !(pde & PG_RW_MASK))
1233 goto do_fault_protect;
1234 } else {
1235 if ((env->cr[0] & CR0_WP_MASK) &&
1236 is_write && !(pde & PG_RW_MASK))
1237 goto do_fault_protect;
1238 }
1239 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
1240 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
1241 pde |= PG_ACCESSED_MASK;
1242 if (is_dirty)
1243 pde |= PG_DIRTY_MASK;
1244 stl_phys_notdirty(pde_addr, pde);
1245 }
1246
1247 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
1248 ptep = pte;
1249 virt_addr = addr & ~(page_size - 1);
1250 } else {
1251 if (!(pde & PG_ACCESSED_MASK)) {
1252 pde |= PG_ACCESSED_MASK;
1253 stl_phys_notdirty(pde_addr, pde);
1254 }
1255
1256 /* page directory entry */
1257 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
1258 env->a20_mask;
1259 pte = ldl_phys(pte_addr);
1260 if (!(pte & PG_PRESENT_MASK)) {
1261 error_code = 0;
1262 goto do_fault;
1263 }
1264 /* combine pde and pte user and rw protections */
1265 ptep = pte & pde;
1266 if (is_user) {
1267 if (!(ptep & PG_USER_MASK))
1268 goto do_fault_protect;
1269 if (is_write && !(ptep & PG_RW_MASK))
1270 goto do_fault_protect;
1271 } else {
1272 if ((env->cr[0] & CR0_WP_MASK) &&
1273 is_write && !(ptep & PG_RW_MASK))
1274 goto do_fault_protect;
1275 }
1276 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
1277 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
1278 pte |= PG_ACCESSED_MASK;
1279 if (is_dirty)
1280 pte |= PG_DIRTY_MASK;
1281 stl_phys_notdirty(pte_addr, pte);
1282 }
1283 page_size = 4096;
1284 virt_addr = addr & ~0xfff;
1285 }
1286 }
1287 /* the page can be put in the TLB */
1288 prot = PAGE_READ;
1289 if (!(ptep & PG_NX_MASK))
1290 prot |= PAGE_EXEC;
1291 if (pte & PG_DIRTY_MASK) {
1292 /* only set write access if already dirty... otherwise wait
1293 for dirty access */
1294 if (is_user) {
1295 if (ptep & PG_RW_MASK)
1296 prot |= PAGE_WRITE;
1297 } else {
1298 if (!(env->cr[0] & CR0_WP_MASK) ||
1299 (ptep & PG_RW_MASK))
1300 prot |= PAGE_WRITE;
1301 }
1302 }
1303 do_mapping:
1304 pte = pte & env->a20_mask;
1305
1306 /* Even if 4MB pages, we map only one 4KB page in the cache to
1307 avoid filling it too fast */
1308 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
1309 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
1310 vaddr = virt_addr + page_offset;
1311
1312 ret = tlb_set_page_exec(env, vaddr, paddr, prot, mmu_idx, is_softmmu);
1313 return ret;
1314 do_fault_protect:
1315 error_code = PG_ERROR_P_MASK;
1316 do_fault:
1317 error_code |= (is_write << PG_ERROR_W_BIT);
1318 if (is_user)
1319 error_code |= PG_ERROR_U_MASK;
1320 if (is_write1 == 2 &&
1321 (env->efer & MSR_EFER_NXE) &&
1322 (env->cr[4] & CR4_PAE_MASK))
1323 error_code |= PG_ERROR_I_D_MASK;
1324 if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
1325 /* cr2 is not modified in case of exceptions */
1326 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
1327 addr);
1328 } else {
1329 env->cr[2] = addr;
1330 }
1331 env->error_code = error_code;
1332 env->exception_index = EXCP0E_PAGE;
1333 return 1;
1334}
1335
1336target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
1337{
1338 target_ulong pde_addr, pte_addr;
1339 uint64_t pte;
1340 target_phys_addr_t paddr;
1341 uint32_t page_offset;
1342 int page_size;
1343
1344 if (env->cr[4] & CR4_PAE_MASK) {
1345 target_ulong pdpe_addr;
1346 uint64_t pde, pdpe;
1347
1348#ifdef TARGET_X86_64
1349 if (env->hflags & HF_LMA_MASK) {
1350 uint64_t pml4e_addr, pml4e;
1351 int32_t sext;
1352
1353 /* test virtual address sign extension */
1354 sext = (int64_t)addr >> 47;
1355 if (sext != 0 && sext != -1)
1356 return -1;
1357
1358 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
1359 env->a20_mask;
1360 pml4e = ldq_phys(pml4e_addr);
1361 if (!(pml4e & PG_PRESENT_MASK))
1362 return -1;
1363
1364 pdpe_addr = ((pml4e & ~0xfff) + (((addr >> 30) & 0x1ff) << 3)) &
1365 env->a20_mask;
1366 pdpe = ldq_phys(pdpe_addr);
1367 if (!(pdpe & PG_PRESENT_MASK))
1368 return -1;
1369 } else
1370#endif
1371 {
1372 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
1373 env->a20_mask;
1374 pdpe = ldq_phys(pdpe_addr);
1375 if (!(pdpe & PG_PRESENT_MASK))
1376 return -1;
1377 }
1378
1379 pde_addr = ((pdpe & ~0xfff) + (((addr >> 21) & 0x1ff) << 3)) &
1380 env->a20_mask;
1381 pde = ldq_phys(pde_addr);
1382 if (!(pde & PG_PRESENT_MASK)) {
1383 return -1;
1384 }
1385 if (pde & PG_PSE_MASK) {
1386 /* 2 MB page */
1387 page_size = 2048 * 1024;
1388 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
1389 } else {
1390 /* 4 KB page */
1391 pte_addr = ((pde & ~0xfff) + (((addr >> 12) & 0x1ff) << 3)) &
1392 env->a20_mask;
1393 page_size = 4096;
1394 pte = ldq_phys(pte_addr);
1395 }
1396 if (!(pte & PG_PRESENT_MASK))
1397 return -1;
1398 } else {
1399 uint32_t pde;
1400
1401 if (!(env->cr[0] & CR0_PG_MASK)) {
1402 pte = addr;
1403 page_size = 4096;
1404 } else {
1405 /* page directory entry */
1406 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask;
1407 pde = ldl_phys(pde_addr);
1408 if (!(pde & PG_PRESENT_MASK))
1409 return -1;
1410 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
1411 pte = pde & ~0x003ff000; /* align to 4MB */
1412 page_size = 4096 * 1024;
1413 } else {
1414 /* page directory entry */
1415 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
1416 pte = ldl_phys(pte_addr);
1417 if (!(pte & PG_PRESENT_MASK))
1418 return -1;
1419 page_size = 4096;
1420 }
1421 }
1422 pte = pte & env->a20_mask;
1423 }
1424
1425 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
1426 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
1427 return paddr;
1428}
1429
1430void hw_breakpoint_insert(CPUState *env, int index)
1431{
1432 int type, err = 0;
1433
1434 switch (hw_breakpoint_type(env->dr[7], index)) {
1435 case 0:
1436 if (hw_breakpoint_enabled(env->dr[7], index))
1437 err = cpu_breakpoint_insert(env, env->dr[index], BP_CPU,
1438 &env->cpu_breakpoint[index]);
1439 break;
1440 case 1:
1441 type = BP_CPU | BP_MEM_WRITE;
1442 goto insert_wp;
1443 case 2:
1444 /* No support for I/O watchpoints yet */
1445 break;
1446 case 3:
1447 type = BP_CPU | BP_MEM_ACCESS;
1448 insert_wp:
1449 err = cpu_watchpoint_insert(env, env->dr[index],
1450 hw_breakpoint_len(env->dr[7], index),
1451 type, &env->cpu_watchpoint[index]);
1452 break;
1453 }
1454 if (err)
1455 env->cpu_breakpoint[index] = NULL;
1456}
1457
1458void hw_breakpoint_remove(CPUState *env, int index)
1459{
1460 if (!env->cpu_breakpoint[index])
1461 return;
1462 switch (hw_breakpoint_type(env->dr[7], index)) {
1463 case 0:
1464 if (hw_breakpoint_enabled(env->dr[7], index))
1465 cpu_breakpoint_remove_by_ref(env, env->cpu_breakpoint[index]);
1466 break;
1467 case 1:
1468 case 3:
1469 cpu_watchpoint_remove_by_ref(env, env->cpu_watchpoint[index]);
1470 break;
1471 case 2:
1472 /* No support for I/O watchpoints yet */
1473 break;
1474 }
1475}
1476
1477int check_hw_breakpoints(CPUState *env, int force_dr6_update)
1478{
1479 target_ulong dr6;
1480 int reg, type;
1481 int hit_enabled = 0;
1482
1483 dr6 = env->dr[6] & ~0xf;
1484 for (reg = 0; reg < 4; reg++) {
1485 type = hw_breakpoint_type(env->dr[7], reg);
1486 if ((type == 0 && env->dr[reg] == env->eip) ||
1487 ((type & 1) && env->cpu_watchpoint[reg] &&
1488 (env->cpu_watchpoint[reg]->flags & BP_WATCHPOINT_HIT))) {
1489 dr6 |= 1 << reg;
1490 if (hw_breakpoint_enabled(env->dr[7], reg))
1491 hit_enabled = 1;
1492 }
1493 }
1494 if (hit_enabled || force_dr6_update)
1495 env->dr[6] = dr6;
1496 return hit_enabled;
1497}
1498
1499static CPUDebugExcpHandler *prev_debug_excp_handler;
1500
1501void raise_exception(int exception_index);
1502
1503static void breakpoint_handler(CPUState *env)
1504{
1505 CPUBreakpoint *bp;
1506
1507 if (env->watchpoint_hit) {
1508 if (env->watchpoint_hit->flags & BP_CPU) {
1509 env->watchpoint_hit = NULL;
1510 if (check_hw_breakpoints(env, 0))
1511 raise_exception(EXCP01_DB);
1512 else
1513 cpu_resume_from_signal(env, NULL);
1514 }
1515 } else {
1516 TAILQ_FOREACH(bp, &env->breakpoints, entry)
1517 if (bp->pc == env->eip) {
1518 if (bp->flags & BP_CPU) {
1519 check_hw_breakpoints(env, 1);
1520 raise_exception(EXCP01_DB);
1521 }
1522 break;
1523 }
1524 }
1525 if (prev_debug_excp_handler)
1526 prev_debug_excp_handler(env);
1527}
1528
1529
1530#ifndef VBOX
1531/* This should come from sysemu.h - if we could include it here... */
1532void qemu_system_reset_request(void);
1533
1534void cpu_inject_x86_mce(CPUState *cenv, int bank, uint64_t status,
1535 uint64_t mcg_status, uint64_t addr, uint64_t misc)
1536{
1537 uint64_t mcg_cap = cenv->mcg_cap;
1538 unsigned bank_num = mcg_cap & 0xff;
1539 uint64_t *banks = cenv->mce_banks;
1540
1541 if (bank >= bank_num || !(status & MCI_STATUS_VAL))
1542 return;
1543
1544 /*
1545 * if MSR_MCG_CTL is not all 1s, the uncorrected error
1546 * reporting is disabled
1547 */
1548 if ((status & MCI_STATUS_UC) && (mcg_cap & MCG_CTL_P) &&
1549 cenv->mcg_ctl != ~(uint64_t)0)
1550 return;
1551 banks += 4 * bank;
1552 /*
1553 * if MSR_MCi_CTL is not all 1s, the uncorrected error
1554 * reporting is disabled for the bank
1555 */
1556 if ((status & MCI_STATUS_UC) && banks[0] != ~(uint64_t)0)
1557 return;
1558 if (status & MCI_STATUS_UC) {
1559 if ((cenv->mcg_status & MCG_STATUS_MCIP) ||
1560 !(cenv->cr[4] & CR4_MCE_MASK)) {
1561 fprintf(stderr, "injects mce exception while previous "
1562 "one is in progress!\n");
1563 qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
1564 qemu_system_reset_request();
1565 return;
1566 }
1567 if (banks[1] & MCI_STATUS_VAL)
1568 status |= MCI_STATUS_OVER;
1569 banks[2] = addr;
1570 banks[3] = misc;
1571 cenv->mcg_status = mcg_status;
1572 banks[1] = status;
1573 cpu_interrupt(cenv, CPU_INTERRUPT_MCE);
1574 } else if (!(banks[1] & MCI_STATUS_VAL)
1575 || !(banks[1] & MCI_STATUS_UC)) {
1576 if (banks[1] & MCI_STATUS_VAL)
1577 status |= MCI_STATUS_OVER;
1578 banks[2] = addr;
1579 banks[3] = misc;
1580 banks[1] = status;
1581 } else
1582 banks[1] |= MCI_STATUS_OVER;
1583}
1584#endif /* !VBOX */
1585#endif /* !CONFIG_USER_ONLY */
1586
1587#ifndef VBOX
1588
1589static void mce_init(CPUX86State *cenv)
1590{
1591 unsigned int bank, bank_num;
1592
1593 if (((cenv->cpuid_version >> 8)&0xf) >= 6
1594 && (cenv->cpuid_features&(CPUID_MCE|CPUID_MCA)) == (CPUID_MCE|CPUID_MCA)) {
1595 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF;
1596 cenv->mcg_ctl = ~(uint64_t)0;
1597 bank_num = cenv->mcg_cap & 0xff;
1598 cenv->mce_banks = qemu_mallocz(bank_num * sizeof(uint64_t) * 4);
1599 for (bank = 0; bank < bank_num; bank++)
1600 cenv->mce_banks[bank*4] = ~(uint64_t)0;
1601 }
1602}
1603
1604static void host_cpuid(uint32_t function, uint32_t count,
1605 uint32_t *eax, uint32_t *ebx,
1606 uint32_t *ecx, uint32_t *edx)
1607{
1608#if defined(CONFIG_KVM)
1609 uint32_t vec[4];
1610
1611#ifdef __x86_64__
1612 asm volatile("cpuid"
1613 : "=a"(vec[0]), "=b"(vec[1]),
1614 "=c"(vec[2]), "=d"(vec[3])
1615 : "0"(function), "c"(count) : "cc");
1616#else
1617 asm volatile("pusha \n\t"
1618 "cpuid \n\t"
1619 "mov %%eax, 0(%2) \n\t"
1620 "mov %%ebx, 4(%2) \n\t"
1621 "mov %%ecx, 8(%2) \n\t"
1622 "mov %%edx, 12(%2) \n\t"
1623 "popa"
1624 : : "a"(function), "c"(count), "S"(vec)
1625 : "memory", "cc");
1626#endif
1627
1628 if (eax)
1629 *eax = vec[0];
1630 if (ebx)
1631 *ebx = vec[1];
1632 if (ecx)
1633 *ecx = vec[2];
1634 if (edx)
1635 *edx = vec[3];
1636#endif
1637}
1638
1639void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
1640 uint32_t *eax, uint32_t *ebx,
1641 uint32_t *ecx, uint32_t *edx)
1642{
1643 /* test if maximum index reached */
1644 if (index & 0x80000000) {
1645 if (index > env->cpuid_xlevel)
1646 index = env->cpuid_level;
1647 } else {
1648 if (index > env->cpuid_level)
1649 index = env->cpuid_level;
1650 }
1651
1652 switch(index) {
1653 case 0:
1654 *eax = env->cpuid_level;
1655 *ebx = env->cpuid_vendor1;
1656 *edx = env->cpuid_vendor2;
1657 *ecx = env->cpuid_vendor3;
1658
1659 /* sysenter isn't supported on compatibility mode on AMD. and syscall
1660 * isn't supported in compatibility mode on Intel. so advertise the
1661 * actuall cpu, and say goodbye to migration between different vendors
1662 * is you use compatibility mode. */
1663 if (kvm_enabled() && !env->cpuid_vendor_override)
1664 host_cpuid(0, 0, NULL, ebx, ecx, edx);
1665 break;
1666 case 1:
1667 *eax = env->cpuid_version;
1668 *ebx = (env->cpuid_apic_id << 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
1669 *ecx = env->cpuid_ext_features;
1670 *edx = env->cpuid_features;
1671 break;
1672 case 2:
1673 /* cache info: needed for Pentium Pro compatibility */
1674 *eax = 1;
1675 *ebx = 0;
1676 *ecx = 0;
1677 *edx = 0x2c307d;
1678 break;
1679 case 4:
1680 /* cache info: needed for Core compatibility */
1681 switch (count) {
1682 case 0: /* L1 dcache info */
1683 *eax = 0x0000121;
1684 *ebx = 0x1c0003f;
1685 *ecx = 0x000003f;
1686 *edx = 0x0000001;
1687 break;
1688 case 1: /* L1 icache info */
1689 *eax = 0x0000122;
1690 *ebx = 0x1c0003f;
1691 *ecx = 0x000003f;
1692 *edx = 0x0000001;
1693 break;
1694 case 2: /* L2 cache info */
1695 *eax = 0x0000143;
1696 *ebx = 0x3c0003f;
1697 *ecx = 0x0000fff;
1698 *edx = 0x0000001;
1699 break;
1700 default: /* end of info */
1701 *eax = 0;
1702 *ebx = 0;
1703 *ecx = 0;
1704 *edx = 0;
1705 break;
1706 }
1707 break;
1708 case 5:
1709 /* mwait info: needed for Core compatibility */
1710 *eax = 0; /* Smallest monitor-line size in bytes */
1711 *ebx = 0; /* Largest monitor-line size in bytes */
1712 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
1713 *edx = 0;
1714 break;
1715 case 6:
1716 /* Thermal and Power Leaf */
1717 *eax = 0;
1718 *ebx = 0;
1719 *ecx = 0;
1720 *edx = 0;
1721 break;
1722 case 9:
1723 /* Direct Cache Access Information Leaf */
1724 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
1725 *ebx = 0;
1726 *ecx = 0;
1727 *edx = 0;
1728 break;
1729 case 0xA:
1730 /* Architectural Performance Monitoring Leaf */
1731 *eax = 0;
1732 *ebx = 0;
1733 *ecx = 0;
1734 *edx = 0;
1735 break;
1736 case 0x80000000:
1737 *eax = env->cpuid_xlevel;
1738 *ebx = env->cpuid_vendor1;
1739 *edx = env->cpuid_vendor2;
1740 *ecx = env->cpuid_vendor3;
1741 break;
1742 case 0x80000001:
1743 *eax = env->cpuid_version;
1744 *ebx = 0;
1745 *ecx = env->cpuid_ext3_features;
1746 *edx = env->cpuid_ext2_features;
1747
1748 if (kvm_enabled()) {
1749 /* Nested SVM not yet supported in KVM */
1750 *ecx &= ~CPUID_EXT3_SVM;
1751 } else {
1752 /* AMD 3DNow! is not supported in QEMU */
1753 *edx &= ~(CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT);
1754 }
1755 break;
1756 case 0x80000002:
1757 case 0x80000003:
1758 case 0x80000004:
1759 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
1760 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
1761 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
1762 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
1763 break;
1764 case 0x80000005:
1765 /* cache info (L1 cache) */
1766 *eax = 0x01ff01ff;
1767 *ebx = 0x01ff01ff;
1768 *ecx = 0x40020140;
1769 *edx = 0x40020140;
1770 break;
1771 case 0x80000006:
1772 /* cache info (L2 cache) */
1773 *eax = 0;
1774 *ebx = 0x42004200;
1775 *ecx = 0x02008140;
1776 *edx = 0;
1777 break;
1778 case 0x80000008:
1779 /* virtual & phys address size in low 2 bytes. */
1780/* XXX: This value must match the one used in the MMU code. */
1781 if (env->cpuid_ext2_features & CPUID_EXT2_LM) {
1782 /* 64 bit processor */
1783#if defined(CONFIG_KQEMU)
1784 *eax = 0x00003020; /* 48 bits virtual, 32 bits physical */
1785#else
1786/* XXX: The physical address space is limited to 42 bits in exec.c. */
1787 *eax = 0x00003028; /* 48 bits virtual, 40 bits physical */
1788#endif
1789 } else {
1790#if defined(CONFIG_KQEMU)
1791 *eax = 0x00000020; /* 32 bits physical */
1792#else
1793 if (env->cpuid_features & CPUID_PSE36)
1794 *eax = 0x00000024; /* 36 bits physical */
1795 else
1796 *eax = 0x00000020; /* 32 bits physical */
1797#endif
1798 }
1799 *ebx = 0;
1800 *ecx = 0;
1801 *edx = 0;
1802 break;
1803 case 0x8000000A:
1804 *eax = 0x00000001; /* SVM Revision */
1805 *ebx = 0x00000010; /* nr of ASIDs */
1806 *ecx = 0;
1807 *edx = 0; /* optional features */
1808 break;
1809 default:
1810 /* reserved values: zero */
1811 *eax = 0;
1812 *ebx = 0;
1813 *ecx = 0;
1814 *edx = 0;
1815 break;
1816 }
1817}
1818
1819int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector,
1820 target_ulong *base, unsigned int *limit,
1821 unsigned int *flags)
1822{
1823 SegmentCache *dt;
1824 target_ulong ptr;
1825 uint32_t e1, e2;
1826 int index;
1827
1828 if (selector & 0x4)
1829 dt = &env->ldt;
1830 else
1831 dt = &env->gdt;
1832 index = selector & ~7;
1833 ptr = dt->base + index;
1834 if ((index + 7) > dt->limit
1835 || cpu_memory_rw_debug(env, ptr, (uint8_t *)&e1, sizeof(e1), 0) != 0
1836 || cpu_memory_rw_debug(env, ptr+4, (uint8_t *)&e2, sizeof(e2), 0) != 0)
1837 return 0;
1838
1839 *base = ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
1840 *limit = (e1 & 0xffff) | (e2 & 0x000f0000);
1841 if (e2 & DESC_G_MASK)
1842 *limit = (*limit << 12) | 0xfff;
1843 *flags = e2;
1844
1845 return 1;
1846}
1847
1848#endif /* !VBOX */
1849
1850#ifndef VBOX
1851CPUX86State *cpu_x86_init(const char *cpu_model)
1852#else
1853CPUX86State *cpu_x86_init(CPUX86State *env, const char *cpu_model)
1854#endif
1855{
1856#ifndef VBOX
1857 CPUX86State *env;
1858#endif
1859 static int inited;
1860
1861#ifndef VBOX
1862 env = qemu_mallocz(sizeof(CPUX86State));
1863#endif
1864 cpu_exec_init(env);
1865 env->cpu_model_str = cpu_model;
1866
1867 /* init various static tables */
1868 if (!inited) {
1869 inited = 1;
1870 optimize_flags_init();
1871#ifndef CONFIG_USER_ONLY
1872 prev_debug_excp_handler =
1873 cpu_set_debug_excp_handler(breakpoint_handler);
1874#endif
1875 }
1876 if (cpu_x86_register(env, cpu_model) < 0) {
1877 cpu_x86_close(env);
1878 return NULL;
1879 }
1880#ifndef VBOX
1881 mce_init(env);
1882#endif
1883 cpu_reset(env);
1884#ifdef CONFIG_KQEMU
1885 kqemu_init(env);
1886#endif
1887
1888 qemu_init_vcpu(env);
1889
1890 return env;
1891}
1892
1893#ifndef VBOX
1894#if !defined(CONFIG_USER_ONLY)
1895void do_cpu_init(CPUState *env)
1896{
1897 int sipi = env->interrupt_request & CPU_INTERRUPT_SIPI;
1898 cpu_reset(env);
1899 env->interrupt_request = sipi;
1900 apic_init_reset(env);
1901}
1902
1903void do_cpu_sipi(CPUState *env)
1904{
1905 apic_sipi(env);
1906}
1907#else
1908void do_cpu_init(CPUState *env)
1909{
1910}
1911void do_cpu_sipi(CPUState *env)
1912{
1913}
1914#endif
1915#endif /* !VBOX */
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette