VirtualBox

source: vbox/trunk/src/recompiler/target-i386/op_helper.c@ 48065

最後變更 在這個檔案從48065是 48065,由 vboxsync 提交於 11 年 前

load_segment: on second though, just remove the bogus log statement.

  • 屬性 svn:eol-style 設為 native
檔案大小: 201.0 KB
 
1/*
2 * i386 helpers
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20/*
21 * Oracle LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
22 * other than GPL or LGPL is available it will apply instead, Oracle elects to use only
23 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
24 * a choice of LGPL license versions is made available with the language indicating
25 * that LGPLv2 or any later version may be used, or where a choice of which version
26 * of the LGPL is applied is otherwise unspecified.
27 */
28
29#include "exec.h"
30#include "exec-all.h"
31#include "host-utils.h"
32#include "ioport.h"
33
34#ifdef VBOX
35# include "qemu-common.h"
36# include <math.h>
37# include "tcg.h"
38#endif /* VBOX */
39
40//#define DEBUG_PCALL
41
42
43#ifdef DEBUG_PCALL
44# define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
45# define LOG_PCALL_STATE(env) \
46 log_cpu_state_mask(CPU_LOG_PCALL, (env), X86_DUMP_CCOP)
47#else
48# define LOG_PCALL(...) do { } while (0)
49# define LOG_PCALL_STATE(env) do { } while (0)
50#endif
51
52
53#if 0
54#define raise_exception_err(a, b)\
55do {\
56 qemu_log("raise_exception line=%d\n", __LINE__);\
57 (raise_exception_err)(a, b);\
58} while (0)
59#endif
60
61static const uint8_t parity_table[256] = {
62 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
63 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
64 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
65 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
66 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
67 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
68 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
69 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
70 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
71 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
72 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
73 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
74 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
75 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
76 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
77 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
78 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
79 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
80 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
81 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
82 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
83 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
84 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
85 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
86 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
87 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
88 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
89 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
90 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
91 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
92 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
93 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
94};
95
96/* modulo 17 table */
97static const uint8_t rclw_table[32] = {
98 0, 1, 2, 3, 4, 5, 6, 7,
99 8, 9,10,11,12,13,14,15,
100 16, 0, 1, 2, 3, 4, 5, 6,
101 7, 8, 9,10,11,12,13,14,
102};
103
104/* modulo 9 table */
105static const uint8_t rclb_table[32] = {
106 0, 1, 2, 3, 4, 5, 6, 7,
107 8, 0, 1, 2, 3, 4, 5, 6,
108 7, 8, 0, 1, 2, 3, 4, 5,
109 6, 7, 8, 0, 1, 2, 3, 4,
110};
111
112static const CPU86_LDouble f15rk[7] =
113{
114 0.00000000000000000000L,
115 1.00000000000000000000L,
116 3.14159265358979323851L, /*pi*/
117 0.30102999566398119523L, /*lg2*/
118 0.69314718055994530943L, /*ln2*/
119 1.44269504088896340739L, /*l2e*/
120 3.32192809488736234781L, /*l2t*/
121};
122
123/* broken thread support */
124
125static spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
126
127void helper_lock(void)
128{
129 spin_lock(&global_cpu_lock);
130}
131
132void helper_unlock(void)
133{
134 spin_unlock(&global_cpu_lock);
135}
136
137void helper_write_eflags(target_ulong t0, uint32_t update_mask)
138{
139 load_eflags(t0, update_mask);
140}
141
142target_ulong helper_read_eflags(void)
143{
144 uint32_t eflags;
145 eflags = helper_cc_compute_all(CC_OP);
146 eflags |= (DF & DF_MASK);
147 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
148 return eflags;
149}
150
151#ifdef VBOX
152
153void helper_write_eflags_vme(target_ulong t0)
154{
155 unsigned int new_eflags = t0;
156
157 assert(env->eflags & (1<<VM_SHIFT));
158
159 /* if virtual interrupt pending and (virtual) interrupts will be enabled -> #GP */
160 /* if TF will be set -> #GP */
161 if ( ((new_eflags & IF_MASK) && (env->eflags & VIP_MASK))
162 || (new_eflags & TF_MASK)) {
163 raise_exception(EXCP0D_GPF);
164 } else {
165 load_eflags(new_eflags,
166 (TF_MASK | AC_MASK | ID_MASK | NT_MASK) & 0xffff);
167
168 if (new_eflags & IF_MASK) {
169 env->eflags |= VIF_MASK;
170 } else {
171 env->eflags &= ~VIF_MASK;
172 }
173 }
174}
175
176target_ulong helper_read_eflags_vme(void)
177{
178 uint32_t eflags;
179 eflags = helper_cc_compute_all(CC_OP);
180 eflags |= (DF & DF_MASK);
181 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
182 if (env->eflags & VIF_MASK)
183 eflags |= IF_MASK;
184 else
185 eflags &= ~IF_MASK;
186
187 /* According to AMD manual, should be read with IOPL == 3 */
188 eflags |= (3 << IOPL_SHIFT);
189
190 /* We only use helper_read_eflags_vme() in 16-bits mode */
191 return eflags & 0xffff;
192}
193
194void helper_dump_state()
195{
196 LogRel(("CS:EIP=%08x:%08x, FLAGS=%08x\n", env->segs[R_CS].base, env->eip, env->eflags));
197 LogRel(("EAX=%08x\tECX=%08x\tEDX=%08x\tEBX=%08x\n",
198 (uint32_t)env->regs[R_EAX], (uint32_t)env->regs[R_ECX],
199 (uint32_t)env->regs[R_EDX], (uint32_t)env->regs[R_EBX]));
200 LogRel(("ESP=%08x\tEBP=%08x\tESI=%08x\tEDI=%08x\n",
201 (uint32_t)env->regs[R_ESP], (uint32_t)env->regs[R_EBP],
202 (uint32_t)env->regs[R_ESI], (uint32_t)env->regs[R_EDI]));
203}
204
205/**
206 * Updates e2 with the DESC_A_MASK, writes it to the descriptor table, and
207 * returns the updated e2.
208 *
209 * @returns e2 with A set.
210 * @param e2 The 2nd selector DWORD.
211 */
212static uint32_t set_segment_accessed(int selector, uint32_t e2)
213{
214 SegmentCache *dt = selector & X86_SEL_LDT ? &env->ldt : &env->gdt;
215 target_ulong ptr = dt->base + (selector & X86_SEL_MASK);
216
217 e2 |= DESC_A_MASK;
218 stl_kernel(ptr + 4, e2);
219 return e2;
220}
221
222#endif /* VBOX */
223
224/* return non zero if error */
225static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
226 int selector)
227{
228 SegmentCache *dt;
229 int index;
230 target_ulong ptr;
231
232 if (selector & 0x4)
233 dt = &env->ldt;
234 else
235 dt = &env->gdt;
236 index = selector & ~7;
237 if ((index + 7) > dt->limit)
238 return -1;
239 ptr = dt->base + index;
240 *e1_ptr = ldl_kernel(ptr);
241 *e2_ptr = ldl_kernel(ptr + 4);
242 return 0;
243}
244
245static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
246{
247 unsigned int limit;
248 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
249 if (e2 & DESC_G_MASK)
250 limit = (limit << 12) | 0xfff;
251 return limit;
252}
253
254static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
255{
256 return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
257}
258
259static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
260{
261 sc->base = get_seg_base(e1, e2);
262 sc->limit = get_seg_limit(e1, e2);
263#ifndef VBOX
264 sc->flags = e2;
265#else
266 sc->flags = e2 & DESC_RAW_FLAG_BITS;
267 sc->newselector = 0;
268 sc->fVBoxFlags = CPUMSELREG_FLAGS_VALID;
269#endif
270}
271
272/* init the segment cache in vm86 mode. */
273static inline void load_seg_vm(int seg, int selector)
274{
275 selector &= 0xffff;
276#ifdef VBOX
277 /* flags must be 0xf3; expand-up read/write accessed data segment with DPL=3. (VT-x) */
278 unsigned flags = DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | DESC_A_MASK;
279 flags |= (3 << DESC_DPL_SHIFT);
280
281 cpu_x86_load_seg_cache(env, seg, selector,
282 (selector << 4), 0xffff, flags);
283#else /* VBOX */
284 cpu_x86_load_seg_cache(env, seg, selector,
285 (selector << 4), 0xffff, 0);
286#endif /* VBOX */
287}
288
289static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
290 uint32_t *esp_ptr, int dpl)
291{
292#ifndef VBOX
293 int type, index, shift;
294#else
295 unsigned int type, index, shift;
296#endif
297
298#if 0
299 {
300 int i;
301 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
302 for(i=0;i<env->tr.limit;i++) {
303 printf("%02x ", env->tr.base[i]);
304 if ((i & 7) == 7) printf("\n");
305 }
306 printf("\n");
307 }
308#endif
309
310 if (!(env->tr.flags & DESC_P_MASK))
311 cpu_abort(env, "invalid tss");
312 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
313 if ((type & 7) != 1)
314 cpu_abort(env, "invalid tss type");
315 shift = type >> 3;
316 index = (dpl * 4 + 2) << shift;
317 if (index + (4 << shift) - 1 > env->tr.limit)
318 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
319 if (shift == 0) {
320 *esp_ptr = lduw_kernel(env->tr.base + index);
321 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
322 } else {
323 *esp_ptr = ldl_kernel(env->tr.base + index);
324 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
325 }
326}
327
328/* XXX: merge with load_seg() */
329static void tss_load_seg(int seg_reg, int selector)
330{
331 uint32_t e1, e2;
332 int rpl, dpl, cpl;
333
334#ifdef VBOX
335 e1 = e2 = 0; /* gcc warning? */
336 cpl = env->hflags & HF_CPL_MASK;
337 /* Trying to load a selector with CPL=1? */
338 if (cpl == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
339 {
340 Log(("RPL 1 -> sel %04X -> %04X (tss_load_seg)\n", selector, selector & 0xfffc));
341 selector = selector & 0xfffc;
342 }
343#endif /* VBOX */
344
345 if ((selector & 0xfffc) != 0) {
346 if (load_segment(&e1, &e2, selector) != 0)
347 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
348 if (!(e2 & DESC_S_MASK))
349 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
350 rpl = selector & 3;
351 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
352 cpl = env->hflags & HF_CPL_MASK;
353 if (seg_reg == R_CS) {
354 if (!(e2 & DESC_CS_MASK))
355 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
356 /* XXX: is it correct ? */
357 if (dpl != rpl)
358 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
359 if ((e2 & DESC_C_MASK) && dpl > rpl)
360 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
361 } else if (seg_reg == R_SS) {
362 /* SS must be writable data */
363 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
364 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
365 if (dpl != cpl || dpl != rpl)
366 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
367 } else {
368 /* not readable code */
369 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
370 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
371 /* if data or non conforming code, checks the rights */
372 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
373 if (dpl < cpl || dpl < rpl)
374 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
375 }
376 }
377 if (!(e2 & DESC_P_MASK))
378 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
379 cpu_x86_load_seg_cache(env, seg_reg, selector,
380 get_seg_base(e1, e2),
381 get_seg_limit(e1, e2),
382 e2);
383 } else {
384 if (seg_reg == R_SS || seg_reg == R_CS)
385 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
386#ifdef VBOX
387# if 0 /** @todo now we ignore loading 0 selectors, need to check what is correct once */
388 cpu_x86_load_seg_cache(env, seg_reg, selector,
389 0, 0, 0);
390# endif
391#endif /* VBOX */
392 }
393}
394
395#define SWITCH_TSS_JMP 0
396#define SWITCH_TSS_IRET 1
397#define SWITCH_TSS_CALL 2
398
399/* XXX: restore CPU state in registers (PowerPC case) */
400static void switch_tss(int tss_selector,
401 uint32_t e1, uint32_t e2, int source,
402 uint32_t next_eip)
403{
404 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
405 target_ulong tss_base;
406 uint32_t new_regs[8], new_segs[6];
407 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
408 uint32_t old_eflags, eflags_mask;
409 SegmentCache *dt;
410#ifndef VBOX
411 int index;
412#else
413 unsigned int index;
414#endif
415 target_ulong ptr;
416
417 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
418 LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
419
420 /* if task gate, we read the TSS segment and we load it */
421 if (type == 5) {
422 if (!(e2 & DESC_P_MASK))
423 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
424 tss_selector = e1 >> 16;
425 if (tss_selector & 4)
426 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
427 if (load_segment(&e1, &e2, tss_selector) != 0)
428 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
429 if (e2 & DESC_S_MASK)
430 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
431 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
432 if ((type & 7) != 1)
433 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
434 }
435
436 if (!(e2 & DESC_P_MASK))
437 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
438
439 if (type & 8)
440 tss_limit_max = 103;
441 else
442 tss_limit_max = 43;
443 tss_limit = get_seg_limit(e1, e2);
444 tss_base = get_seg_base(e1, e2);
445 if ((tss_selector & 4) != 0 ||
446 tss_limit < tss_limit_max)
447 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
448 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
449 if (old_type & 8)
450 old_tss_limit_max = 103;
451 else
452 old_tss_limit_max = 43;
453
454#ifndef VBOX /* The old TSS is written first... */
455 /* read all the registers from the new TSS */
456 if (type & 8) {
457 /* 32 bit */
458 new_cr3 = ldl_kernel(tss_base + 0x1c);
459 new_eip = ldl_kernel(tss_base + 0x20);
460 new_eflags = ldl_kernel(tss_base + 0x24);
461 for(i = 0; i < 8; i++)
462 new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
463 for(i = 0; i < 6; i++)
464 new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
465 new_ldt = lduw_kernel(tss_base + 0x60);
466 new_trap = ldl_kernel(tss_base + 0x64);
467 } else {
468 /* 16 bit */
469 new_cr3 = 0;
470 new_eip = lduw_kernel(tss_base + 0x0e);
471 new_eflags = lduw_kernel(tss_base + 0x10);
472 for(i = 0; i < 8; i++)
473 new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
474 for(i = 0; i < 4; i++)
475 new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
476 new_ldt = lduw_kernel(tss_base + 0x2a);
477 new_segs[R_FS] = 0;
478 new_segs[R_GS] = 0;
479 new_trap = 0;
480 }
481#endif
482
483 /* NOTE: we must avoid memory exceptions during the task switch,
484 so we make dummy accesses before */
485 /* XXX: it can still fail in some cases, so a bigger hack is
486 necessary to valid the TLB after having done the accesses */
487
488 v1 = ldub_kernel(env->tr.base);
489 v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
490 stb_kernel(env->tr.base, v1);
491 stb_kernel(env->tr.base + old_tss_limit_max, v2);
492
493 /* clear busy bit (it is restartable) */
494 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
495 target_ulong ptr;
496 uint32_t e2;
497 ptr = env->gdt.base + (env->tr.selector & ~7);
498 e2 = ldl_kernel(ptr + 4);
499 e2 &= ~DESC_TSS_BUSY_MASK;
500 stl_kernel(ptr + 4, e2);
501 }
502 old_eflags = compute_eflags();
503 if (source == SWITCH_TSS_IRET)
504 old_eflags &= ~NT_MASK;
505
506 /* save the current state in the old TSS */
507 if (type & 8) {
508 /* 32 bit */
509 stl_kernel(env->tr.base + 0x20, next_eip);
510 stl_kernel(env->tr.base + 0x24, old_eflags);
511 stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
512 stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
513 stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
514 stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
515 stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
516 stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
517 stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
518 stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
519 for(i = 0; i < 6; i++)
520 stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
521#if defined(VBOX) && defined(DEBUG)
522 printf("TSS 32 bits switch\n");
523 printf("Saving CS=%08X\n", env->segs[R_CS].selector);
524#endif
525 } else {
526 /* 16 bit */
527 stw_kernel(env->tr.base + 0x0e, next_eip);
528 stw_kernel(env->tr.base + 0x10, old_eflags);
529 stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
530 stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
531 stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
532 stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
533 stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
534 stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
535 stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
536 stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
537 for(i = 0; i < 4; i++)
538 stw_kernel(env->tr.base + (0x22 + i * 2), env->segs[i].selector);
539 }
540
541#ifdef VBOX
542 /* read all the registers from the new TSS - may be the same as the old one */
543 if (type & 8) {
544 /* 32 bit */
545 new_cr3 = ldl_kernel(tss_base + 0x1c);
546 new_eip = ldl_kernel(tss_base + 0x20);
547 new_eflags = ldl_kernel(tss_base + 0x24);
548 for(i = 0; i < 8; i++)
549 new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
550 for(i = 0; i < 6; i++)
551 new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
552 new_ldt = lduw_kernel(tss_base + 0x60);
553 new_trap = ldl_kernel(tss_base + 0x64);
554 } else {
555 /* 16 bit */
556 new_cr3 = 0;
557 new_eip = lduw_kernel(tss_base + 0x0e);
558 new_eflags = lduw_kernel(tss_base + 0x10);
559 for(i = 0; i < 8; i++)
560 new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
561 for(i = 0; i < 4; i++)
562 new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 2));
563 new_ldt = lduw_kernel(tss_base + 0x2a);
564 new_segs[R_FS] = 0;
565 new_segs[R_GS] = 0;
566 new_trap = 0;
567 }
568#endif
569
570 /* now if an exception occurs, it will occurs in the next task
571 context */
572
573 if (source == SWITCH_TSS_CALL) {
574 stw_kernel(tss_base, env->tr.selector);
575 new_eflags |= NT_MASK;
576 }
577
578 /* set busy bit */
579 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
580 target_ulong ptr;
581 uint32_t e2;
582 ptr = env->gdt.base + (tss_selector & ~7);
583 e2 = ldl_kernel(ptr + 4);
584 e2 |= DESC_TSS_BUSY_MASK;
585 stl_kernel(ptr + 4, e2);
586 }
587
588 /* set the new CPU state */
589 /* from this point, any exception which occurs can give problems */
590 env->cr[0] |= CR0_TS_MASK;
591 env->hflags |= HF_TS_MASK;
592 env->tr.selector = tss_selector;
593 env->tr.base = tss_base;
594 env->tr.limit = tss_limit;
595#ifndef VBOX
596 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
597#else
598 env->tr.flags = e2 & (DESC_RAW_FLAG_BITS & ~(DESC_TSS_BUSY_MASK)); /** @todo stop clearing the busy bit, VT-x and AMD-V seems to set it in the hidden bits. */
599 env->tr.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
600 env->tr.newselector = 0;
601#endif
602
603 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
604 cpu_x86_update_cr3(env, new_cr3);
605 }
606
607 /* load all registers without an exception, then reload them with
608 possible exception */
609 env->eip = new_eip;
610 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
611 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
612 if (!(type & 8))
613 eflags_mask &= 0xffff;
614 load_eflags(new_eflags, eflags_mask);
615 /* XXX: what to do in 16 bit case ? */
616 EAX = new_regs[0];
617 ECX = new_regs[1];
618 EDX = new_regs[2];
619 EBX = new_regs[3];
620 ESP = new_regs[4];
621 EBP = new_regs[5];
622 ESI = new_regs[6];
623 EDI = new_regs[7];
624 if (new_eflags & VM_MASK) {
625 for(i = 0; i < 6; i++)
626 load_seg_vm(i, new_segs[i]);
627 /* in vm86, CPL is always 3 */
628 cpu_x86_set_cpl(env, 3);
629 } else {
630 /* CPL is set the RPL of CS */
631 cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
632 /* first just selectors as the rest may trigger exceptions */
633 for(i = 0; i < 6; i++)
634 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
635 }
636
637 env->ldt.selector = new_ldt & ~4;
638 env->ldt.base = 0;
639 env->ldt.limit = 0;
640 env->ldt.flags = 0;
641#ifdef VBOX
642 env->ldt.flags = DESC_INTEL_UNUSABLE;
643 env->ldt.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
644 env->ldt.newselector = 0;
645#endif
646
647 /* load the LDT */
648 if (new_ldt & 4)
649 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
650
651 if ((new_ldt & 0xfffc) != 0) {
652 dt = &env->gdt;
653 index = new_ldt & ~7;
654 if ((index + 7) > dt->limit)
655 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
656 ptr = dt->base + index;
657 e1 = ldl_kernel(ptr);
658 e2 = ldl_kernel(ptr + 4);
659 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
660 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
661 if (!(e2 & DESC_P_MASK))
662 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
663 load_seg_cache_raw_dt(&env->ldt, e1, e2);
664 }
665
666 /* load the segments */
667 if (!(new_eflags & VM_MASK)) {
668 tss_load_seg(R_CS, new_segs[R_CS]);
669 tss_load_seg(R_SS, new_segs[R_SS]);
670 tss_load_seg(R_ES, new_segs[R_ES]);
671 tss_load_seg(R_DS, new_segs[R_DS]);
672 tss_load_seg(R_FS, new_segs[R_FS]);
673 tss_load_seg(R_GS, new_segs[R_GS]);
674 }
675
676 /* check that EIP is in the CS segment limits */
677 if (new_eip > env->segs[R_CS].limit) {
678 /* XXX: different exception if CALL ? */
679 raise_exception_err(EXCP0D_GPF, 0);
680 }
681
682#ifndef CONFIG_USER_ONLY
683 /* reset local breakpoints */
684 if (env->dr[7] & 0x55) {
685 for (i = 0; i < 4; i++) {
686 if (hw_breakpoint_enabled(env->dr[7], i) == 0x1)
687 hw_breakpoint_remove(env, i);
688 }
689 env->dr[7] &= ~0x55;
690 }
691#endif
692}
693
694/* check if Port I/O is allowed in TSS */
695static inline void check_io(int addr, int size)
696{
697#ifndef VBOX
698 int io_offset, val, mask;
699#else
700 int val, mask;
701 unsigned int io_offset;
702#endif /* VBOX */
703
704 /* TSS must be a valid 32 bit one */
705 if (!(env->tr.flags & DESC_P_MASK) ||
706 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
707 env->tr.limit < 103)
708 goto fail;
709 io_offset = lduw_kernel(env->tr.base + 0x66);
710 io_offset += (addr >> 3);
711 /* Note: the check needs two bytes */
712 if ((io_offset + 1) > env->tr.limit)
713 goto fail;
714 val = lduw_kernel(env->tr.base + io_offset);
715 val >>= (addr & 7);
716 mask = (1 << size) - 1;
717 /* all bits must be zero to allow the I/O */
718 if ((val & mask) != 0) {
719 fail:
720 raise_exception_err(EXCP0D_GPF, 0);
721 }
722}
723
724#ifdef VBOX
725
726/* Keep in sync with gen_check_external_event() */
727void helper_check_external_event()
728{
729 if ( (env->interrupt_request & ( CPU_INTERRUPT_EXTERNAL_FLUSH_TLB
730 | CPU_INTERRUPT_EXTERNAL_EXIT
731 | CPU_INTERRUPT_EXTERNAL_TIMER
732 | CPU_INTERRUPT_EXTERNAL_DMA))
733 || ( (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD)
734 && (env->eflags & IF_MASK)
735 && !(env->hflags & HF_INHIBIT_IRQ_MASK) ) )
736 {
737 helper_external_event();
738 }
739
740}
741
742void helper_sync_seg(uint32_t reg)
743{
744 if (env->segs[reg].newselector)
745 sync_seg(env, reg, env->segs[reg].newselector);
746}
747
748#endif /* VBOX */
749
750void helper_check_iob(uint32_t t0)
751{
752 check_io(t0, 1);
753}
754
755void helper_check_iow(uint32_t t0)
756{
757 check_io(t0, 2);
758}
759
760void helper_check_iol(uint32_t t0)
761{
762 check_io(t0, 4);
763}
764
765void helper_outb(uint32_t port, uint32_t data)
766{
767#ifndef VBOX
768 cpu_outb(port, data & 0xff);
769#else
770 cpu_outb(env, port, data & 0xff);
771#endif
772}
773
774target_ulong helper_inb(uint32_t port)
775{
776#ifndef VBOX
777 return cpu_inb(port);
778#else
779 return cpu_inb(env, port);
780#endif
781}
782
783void helper_outw(uint32_t port, uint32_t data)
784{
785#ifndef VBOX
786 cpu_outw(port, data & 0xffff);
787#else
788 cpu_outw(env, port, data & 0xffff);
789#endif
790}
791
792target_ulong helper_inw(uint32_t port)
793{
794#ifndef VBOX
795 return cpu_inw(port);
796#else
797 return cpu_inw(env, port);
798#endif
799}
800
801void helper_outl(uint32_t port, uint32_t data)
802{
803#ifndef VBOX
804 cpu_outl(port, data);
805#else
806 cpu_outl(env, port, data);
807#endif
808}
809
810target_ulong helper_inl(uint32_t port)
811{
812#ifndef VBOX
813 return cpu_inl(port);
814#else
815 return cpu_inl(env, port);
816#endif
817}
818
819static inline unsigned int get_sp_mask(unsigned int e2)
820{
821 if (e2 & DESC_B_MASK)
822 return 0xffffffff;
823 else
824 return 0xffff;
825}
826
827static int exeption_has_error_code(int intno)
828{
829 switch(intno) {
830 case 8:
831 case 10:
832 case 11:
833 case 12:
834 case 13:
835 case 14:
836 case 17:
837 return 1;
838 }
839 return 0;
840}
841
842#ifdef TARGET_X86_64
843#define SET_ESP(val, sp_mask)\
844do {\
845 if ((sp_mask) == 0xffff)\
846 ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
847 else if ((sp_mask) == 0xffffffffLL)\
848 ESP = (uint32_t)(val);\
849 else\
850 ESP = (val);\
851} while (0)
852#else
853#define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
854#endif
855
856/* in 64-bit machines, this can overflow. So this segment addition macro
857 * can be used to trim the value to 32-bit whenever needed */
858#define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
859
860/* XXX: add a is_user flag to have proper security support */
861#define PUSHW(ssp, sp, sp_mask, val)\
862{\
863 sp -= 2;\
864 stw_kernel((ssp) + (sp & (sp_mask)), (val));\
865}
866
867#define PUSHL(ssp, sp, sp_mask, val)\
868{\
869 sp -= 4;\
870 stl_kernel(SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val));\
871}
872
873#define POPW(ssp, sp, sp_mask, val)\
874{\
875 val = lduw_kernel((ssp) + (sp & (sp_mask)));\
876 sp += 2;\
877}
878
879#define POPL(ssp, sp, sp_mask, val)\
880{\
881 val = (uint32_t)ldl_kernel(SEG_ADDL(ssp, sp, sp_mask));\
882 sp += 4;\
883}
884
885/* protected mode interrupt */
886static void do_interrupt_protected(int intno, int is_int, int error_code,
887 unsigned int next_eip, int is_hw)
888{
889 SegmentCache *dt;
890 target_ulong ptr, ssp;
891 int type, dpl, selector, ss_dpl, cpl;
892 int has_error_code, new_stack, shift;
893 uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0;
894 uint32_t old_eip, sp_mask;
895
896#ifdef VBOX
897 if (remR3NotifyTrap(env, intno, error_code, next_eip) != VINF_SUCCESS)
898 cpu_loop_exit();
899#endif
900
901 has_error_code = 0;
902 if (!is_int && !is_hw)
903 has_error_code = exeption_has_error_code(intno);
904 if (is_int)
905 old_eip = next_eip;
906 else
907 old_eip = env->eip;
908
909 dt = &env->idt;
910#ifndef VBOX
911 if (intno * 8 + 7 > dt->limit)
912#else
913 if ((unsigned)intno * 8 + 7 > dt->limit)
914#endif
915 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
916 ptr = dt->base + intno * 8;
917 e1 = ldl_kernel(ptr);
918 e2 = ldl_kernel(ptr + 4);
919 /* check gate type */
920 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
921 switch(type) {
922 case 5: /* task gate */
923#ifdef VBOX
924 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
925 cpl = env->hflags & HF_CPL_MASK;
926 /* check privilege if software int */
927 if (is_int && dpl < cpl)
928 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
929#endif
930 /* must do that check here to return the correct error code */
931 if (!(e2 & DESC_P_MASK))
932 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
933 switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
934 if (has_error_code) {
935 int type;
936 uint32_t mask;
937 /* push the error code */
938 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
939 shift = type >> 3;
940 if (env->segs[R_SS].flags & DESC_B_MASK)
941 mask = 0xffffffff;
942 else
943 mask = 0xffff;
944 esp = (ESP - (2 << shift)) & mask;
945 ssp = env->segs[R_SS].base + esp;
946 if (shift)
947 stl_kernel(ssp, error_code);
948 else
949 stw_kernel(ssp, error_code);
950 SET_ESP(esp, mask);
951 }
952 return;
953 case 6: /* 286 interrupt gate */
954 case 7: /* 286 trap gate */
955 case 14: /* 386 interrupt gate */
956 case 15: /* 386 trap gate */
957 break;
958 default:
959 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
960 break;
961 }
962 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
963 cpl = env->hflags & HF_CPL_MASK;
964 /* check privilege if software int */
965 if (is_int && dpl < cpl)
966 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
967 /* check valid bit */
968 if (!(e2 & DESC_P_MASK))
969 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
970 selector = e1 >> 16;
971 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
972 if ((selector & 0xfffc) == 0)
973 raise_exception_err(EXCP0D_GPF, 0);
974
975 if (load_segment(&e1, &e2, selector) != 0)
976 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
977#ifdef VBOX /** @todo figure out when this is done one day... */
978 if (!(e2 & DESC_A_MASK))
979 e2 = set_segment_accessed(selector, e2);
980#endif
981 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
982 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
983 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
984 if (dpl > cpl)
985 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
986 if (!(e2 & DESC_P_MASK))
987 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
988 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
989 /* to inner privilege */
990 get_ss_esp_from_tss(&ss, &esp, dpl);
991 if ((ss & 0xfffc) == 0)
992 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
993 if ((ss & 3) != dpl)
994 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
995 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
996 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
997#ifdef VBOX /** @todo figure out when this is done one day... */
998 if (!(ss_e2 & DESC_A_MASK))
999 ss_e2 = set_segment_accessed(ss, ss_e2);
1000#endif
1001 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
1002 if (ss_dpl != dpl)
1003 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
1004 if (!(ss_e2 & DESC_S_MASK) ||
1005 (ss_e2 & DESC_CS_MASK) ||
1006 !(ss_e2 & DESC_W_MASK))
1007 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
1008 if (!(ss_e2 & DESC_P_MASK))
1009#ifdef VBOX /* See page 3-477 of 253666.pdf */
1010 raise_exception_err(EXCP0C_STACK, ss & 0xfffc);
1011#else
1012 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
1013#endif
1014 new_stack = 1;
1015 sp_mask = get_sp_mask(ss_e2);
1016 ssp = get_seg_base(ss_e1, ss_e2);
1017#if defined(VBOX) && defined(DEBUG)
1018 printf("new stack %04X:%08X gate dpl=%d\n", ss, esp, dpl);
1019#endif
1020 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
1021 /* to same privilege */
1022 if (env->eflags & VM_MASK)
1023 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1024 new_stack = 0;
1025 sp_mask = get_sp_mask(env->segs[R_SS].flags);
1026 ssp = env->segs[R_SS].base;
1027 esp = ESP;
1028 dpl = cpl;
1029 } else {
1030 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1031 new_stack = 0; /* avoid warning */
1032 sp_mask = 0; /* avoid warning */
1033 ssp = 0; /* avoid warning */
1034 esp = 0; /* avoid warning */
1035 }
1036
1037 shift = type >> 3;
1038
1039#if 0
1040 /* XXX: check that enough room is available */
1041 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
1042 if (env->eflags & VM_MASK)
1043 push_size += 8;
1044 push_size <<= shift;
1045#endif
1046 if (shift == 1) {
1047 if (new_stack) {
1048 if (env->eflags & VM_MASK) {
1049 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
1050 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
1051 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
1052 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
1053 }
1054 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
1055 PUSHL(ssp, esp, sp_mask, ESP);
1056 }
1057 PUSHL(ssp, esp, sp_mask, compute_eflags());
1058 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
1059 PUSHL(ssp, esp, sp_mask, old_eip);
1060 if (has_error_code) {
1061 PUSHL(ssp, esp, sp_mask, error_code);
1062 }
1063 } else {
1064 if (new_stack) {
1065 if (env->eflags & VM_MASK) {
1066 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
1067 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
1068 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
1069 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
1070 }
1071 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
1072 PUSHW(ssp, esp, sp_mask, ESP);
1073 }
1074 PUSHW(ssp, esp, sp_mask, compute_eflags());
1075 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
1076 PUSHW(ssp, esp, sp_mask, old_eip);
1077 if (has_error_code) {
1078 PUSHW(ssp, esp, sp_mask, error_code);
1079 }
1080 }
1081
1082 if (new_stack) {
1083 if (env->eflags & VM_MASK) {
1084 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
1085 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
1086 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
1087 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
1088 }
1089 ss = (ss & ~3) | dpl;
1090 cpu_x86_load_seg_cache(env, R_SS, ss,
1091 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
1092 }
1093 SET_ESP(esp, sp_mask);
1094
1095 selector = (selector & ~3) | dpl;
1096 cpu_x86_load_seg_cache(env, R_CS, selector,
1097 get_seg_base(e1, e2),
1098 get_seg_limit(e1, e2),
1099 e2);
1100 cpu_x86_set_cpl(env, dpl);
1101 env->eip = offset;
1102
1103 /* interrupt gate clear IF mask */
1104 if ((type & 1) == 0) {
1105 env->eflags &= ~IF_MASK;
1106 }
1107#ifndef VBOX
1108 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1109#else
1110 /*
1111 * We must clear VIP/VIF too on interrupt entry, as otherwise FreeBSD
1112 * gets confused by seemingly changed EFLAGS. See #3491 and
1113 * public bug #2341.
1114 */
1115 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK | VIF_MASK | VIP_MASK);
1116#endif
1117}
1118
1119#ifdef VBOX
1120
1121/* check if VME interrupt redirection is enabled in TSS */
1122DECLINLINE(bool) is_vme_irq_redirected(int intno)
1123{
1124 unsigned int io_offset, intredir_offset;
1125 unsigned char val, mask;
1126
1127 /* TSS must be a valid 32 bit one */
1128 if (!(env->tr.flags & DESC_P_MASK) ||
1129 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
1130 env->tr.limit < 103)
1131 goto fail;
1132 io_offset = lduw_kernel(env->tr.base + 0x66);
1133 /* Make sure the io bitmap offset is valid; anything less than sizeof(VBOXTSS) means there's none. */
1134 if (io_offset < 0x68 + 0x20)
1135 io_offset = 0x68 + 0x20;
1136 /* the virtual interrupt redirection bitmap is located below the io bitmap */
1137 intredir_offset = io_offset - 0x20;
1138
1139 intredir_offset += (intno >> 3);
1140 if ((intredir_offset) > env->tr.limit)
1141 goto fail;
1142
1143 val = ldub_kernel(env->tr.base + intredir_offset);
1144 mask = 1 << (unsigned char)(intno & 7);
1145
1146 /* bit set means no redirection. */
1147 if ((val & mask) != 0) {
1148 return false;
1149 }
1150 return true;
1151
1152fail:
1153 raise_exception_err(EXCP0D_GPF, 0);
1154 return true;
1155}
1156
1157/* V86 mode software interrupt with CR4.VME=1 */
1158static void do_soft_interrupt_vme(int intno, int error_code, unsigned int next_eip)
1159{
1160 target_ulong ptr, ssp;
1161 int selector;
1162 uint32_t offset, esp;
1163 uint32_t old_cs, old_eflags;
1164 uint32_t iopl;
1165
1166 iopl = ((env->eflags >> IOPL_SHIFT) & 3);
1167
1168 if (!is_vme_irq_redirected(intno))
1169 {
1170 if (iopl == 3)
1171 {
1172 do_interrupt_protected(intno, 1, error_code, next_eip, 0);
1173 return;
1174 }
1175 else
1176 raise_exception_err(EXCP0D_GPF, 0);
1177 }
1178
1179 /* virtual mode idt is at linear address 0 */
1180 ptr = 0 + intno * 4;
1181 offset = lduw_kernel(ptr);
1182 selector = lduw_kernel(ptr + 2);
1183 esp = ESP;
1184 ssp = env->segs[R_SS].base;
1185 old_cs = env->segs[R_CS].selector;
1186
1187 old_eflags = compute_eflags();
1188 if (iopl < 3)
1189 {
1190 /* copy VIF into IF and set IOPL to 3 */
1191 if (env->eflags & VIF_MASK)
1192 old_eflags |= IF_MASK;
1193 else
1194 old_eflags &= ~IF_MASK;
1195
1196 old_eflags |= (3 << IOPL_SHIFT);
1197 }
1198
1199 /* XXX: use SS segment size ? */
1200 PUSHW(ssp, esp, 0xffff, old_eflags);
1201 PUSHW(ssp, esp, 0xffff, old_cs);
1202 PUSHW(ssp, esp, 0xffff, next_eip);
1203
1204 /* update processor state */
1205 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1206 env->eip = offset;
1207 env->segs[R_CS].selector = selector;
1208 env->segs[R_CS].base = (selector << 4);
1209 env->eflags &= ~(TF_MASK | RF_MASK);
1210
1211 if (iopl < 3)
1212 env->eflags &= ~VIF_MASK;
1213 else
1214 env->eflags &= ~IF_MASK;
1215}
1216
1217#endif /* VBOX */
1218
1219#ifdef TARGET_X86_64
1220
1221#define PUSHQ(sp, val)\
1222{\
1223 sp -= 8;\
1224 stq_kernel(sp, (val));\
1225}
1226
1227#define POPQ(sp, val)\
1228{\
1229 val = ldq_kernel(sp);\
1230 sp += 8;\
1231}
1232
1233static inline target_ulong get_rsp_from_tss(int level)
1234{
1235 int index;
1236
1237#if 0
1238 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
1239 env->tr.base, env->tr.limit);
1240#endif
1241
1242 if (!(env->tr.flags & DESC_P_MASK))
1243 cpu_abort(env, "invalid tss");
1244 index = 8 * level + 4;
1245 if ((index + 7) > env->tr.limit)
1246 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
1247 return ldq_kernel(env->tr.base + index);
1248}
1249
1250/* 64 bit interrupt */
1251static void do_interrupt64(int intno, int is_int, int error_code,
1252 target_ulong next_eip, int is_hw)
1253{
1254 SegmentCache *dt;
1255 target_ulong ptr;
1256 int type, dpl, selector, cpl, ist;
1257 int has_error_code, new_stack;
1258 uint32_t e1, e2, e3, ss;
1259 target_ulong old_eip, esp, offset;
1260
1261#ifdef VBOX
1262 if (remR3NotifyTrap(env, intno, error_code, next_eip) != VINF_SUCCESS)
1263 cpu_loop_exit();
1264#endif
1265
1266 has_error_code = 0;
1267 if (!is_int && !is_hw)
1268 has_error_code = exeption_has_error_code(intno);
1269 if (is_int)
1270 old_eip = next_eip;
1271 else
1272 old_eip = env->eip;
1273
1274 dt = &env->idt;
1275 if (intno * 16 + 15 > dt->limit)
1276 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1277 ptr = dt->base + intno * 16;
1278 e1 = ldl_kernel(ptr);
1279 e2 = ldl_kernel(ptr + 4);
1280 e3 = ldl_kernel(ptr + 8);
1281 /* check gate type */
1282 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1283 switch(type) {
1284 case 14: /* 386 interrupt gate */
1285 case 15: /* 386 trap gate */
1286 break;
1287 default:
1288 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1289 break;
1290 }
1291 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1292 cpl = env->hflags & HF_CPL_MASK;
1293 /* check privilege if software int */
1294 if (is_int && dpl < cpl)
1295 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1296 /* check valid bit */
1297 if (!(e2 & DESC_P_MASK))
1298 raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
1299 selector = e1 >> 16;
1300 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1301 ist = e2 & 7;
1302 if ((selector & 0xfffc) == 0)
1303 raise_exception_err(EXCP0D_GPF, 0);
1304
1305 if (load_segment(&e1, &e2, selector) != 0)
1306 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1307 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
1308 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1309 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1310 if (dpl > cpl)
1311 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1312 if (!(e2 & DESC_P_MASK))
1313 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1314 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
1315 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1316 if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
1317 /* to inner privilege */
1318 if (ist != 0)
1319 esp = get_rsp_from_tss(ist + 3);
1320 else
1321 esp = get_rsp_from_tss(dpl);
1322 esp &= ~0xfLL; /* align stack */
1323 ss = 0;
1324 new_stack = 1;
1325 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
1326 /* to same privilege */
1327 if (env->eflags & VM_MASK)
1328 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1329 new_stack = 0;
1330 if (ist != 0)
1331 esp = get_rsp_from_tss(ist + 3);
1332 else
1333 esp = ESP;
1334 esp &= ~0xfLL; /* align stack */
1335 dpl = cpl;
1336 } else {
1337 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1338 new_stack = 0; /* avoid warning */
1339 esp = 0; /* avoid warning */
1340 }
1341
1342 PUSHQ(esp, env->segs[R_SS].selector);
1343 PUSHQ(esp, ESP);
1344 PUSHQ(esp, compute_eflags());
1345 PUSHQ(esp, env->segs[R_CS].selector);
1346 PUSHQ(esp, old_eip);
1347 if (has_error_code) {
1348 PUSHQ(esp, error_code);
1349 }
1350
1351 if (new_stack) {
1352 ss = 0 | dpl;
1353#ifndef VBOX
1354 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
1355#else
1356 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, dpl << DESC_DPL_SHIFT);
1357#endif
1358 }
1359 ESP = esp;
1360
1361 selector = (selector & ~3) | dpl;
1362 cpu_x86_load_seg_cache(env, R_CS, selector,
1363 get_seg_base(e1, e2),
1364 get_seg_limit(e1, e2),
1365 e2);
1366 cpu_x86_set_cpl(env, dpl);
1367 env->eip = offset;
1368
1369 /* interrupt gate clear IF mask */
1370 if ((type & 1) == 0) {
1371 env->eflags &= ~IF_MASK;
1372 }
1373#ifndef VBOX
1374 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1375#else /* VBOX */
1376 /*
1377 * We must clear VIP/VIF too on interrupt entry, as otherwise FreeBSD
1378 * gets confused by seemingly changed EFLAGS. See #3491 and
1379 * public bug #2341.
1380 */
1381 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK | VIF_MASK | VIP_MASK);
1382#endif /* VBOX */
1383}
1384#endif
1385
1386#ifdef TARGET_X86_64
1387#if defined(CONFIG_USER_ONLY)
1388void helper_syscall(int next_eip_addend)
1389{
1390 env->exception_index = EXCP_SYSCALL;
1391 env->exception_next_eip = env->eip + next_eip_addend;
1392 cpu_loop_exit();
1393}
1394#else
1395void helper_syscall(int next_eip_addend)
1396{
1397 int selector;
1398
1399 if (!(env->efer & MSR_EFER_SCE)) {
1400 raise_exception_err(EXCP06_ILLOP, 0);
1401 }
1402 selector = (env->star >> 32) & 0xffff;
1403 if (env->hflags & HF_LMA_MASK) {
1404 int code64;
1405
1406 ECX = env->eip + next_eip_addend;
1407 env->regs[11] = compute_eflags();
1408
1409 code64 = env->hflags & HF_CS64_MASK;
1410
1411 cpu_x86_set_cpl(env, 0);
1412 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1413 0, 0xffffffff,
1414 DESC_G_MASK | DESC_P_MASK |
1415 DESC_S_MASK |
1416 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
1417 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1418 0, 0xffffffff,
1419 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1420 DESC_S_MASK |
1421 DESC_W_MASK | DESC_A_MASK);
1422 env->eflags &= ~env->fmask;
1423 load_eflags(env->eflags, 0);
1424 if (code64)
1425 env->eip = env->lstar;
1426 else
1427 env->eip = env->cstar;
1428 } else {
1429 ECX = (uint32_t)(env->eip + next_eip_addend);
1430
1431 cpu_x86_set_cpl(env, 0);
1432 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1433 0, 0xffffffff,
1434 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1435 DESC_S_MASK |
1436 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1437 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1438 0, 0xffffffff,
1439 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1440 DESC_S_MASK |
1441 DESC_W_MASK | DESC_A_MASK);
1442 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1443 env->eip = (uint32_t)env->star;
1444 }
1445}
1446#endif
1447#endif
1448
1449#ifdef TARGET_X86_64
1450void helper_sysret(int dflag)
1451{
1452 int cpl, selector;
1453
1454 if (!(env->efer & MSR_EFER_SCE)) {
1455 raise_exception_err(EXCP06_ILLOP, 0);
1456 }
1457 cpl = env->hflags & HF_CPL_MASK;
1458 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1459 raise_exception_err(EXCP0D_GPF, 0);
1460 }
1461 selector = (env->star >> 48) & 0xffff;
1462 if (env->hflags & HF_LMA_MASK) {
1463 if (dflag == 2) {
1464 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1465 0, 0xffffffff,
1466 DESC_G_MASK | DESC_P_MASK |
1467 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1468 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1469 DESC_L_MASK);
1470 env->eip = ECX;
1471 } else {
1472 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1473 0, 0xffffffff,
1474 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1475 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1476 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1477 env->eip = (uint32_t)ECX;
1478 }
1479 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1480 0, 0xffffffff,
1481 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1482 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1483 DESC_W_MASK | DESC_A_MASK);
1484 load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1485 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1486 cpu_x86_set_cpl(env, 3);
1487 } else {
1488 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1489 0, 0xffffffff,
1490 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1491 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1492 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1493 env->eip = (uint32_t)ECX;
1494 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1495 0, 0xffffffff,
1496 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1497 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1498 DESC_W_MASK | DESC_A_MASK);
1499 env->eflags |= IF_MASK;
1500 cpu_x86_set_cpl(env, 3);
1501 }
1502}
1503#endif
1504
1505#ifdef VBOX
1506
1507/**
1508 * Checks and processes external VMM events.
1509 * Called by op_check_external_event() when any of the flags is set and can be serviced.
1510 */
1511void helper_external_event(void)
1512{
1513# if defined(RT_OS_DARWIN) && defined(VBOX_STRICT)
1514 uintptr_t uSP;
1515# ifdef RT_ARCH_AMD64
1516 __asm__ __volatile__("movq %%rsp, %0" : "=r" (uSP));
1517# else
1518 __asm__ __volatile__("movl %%esp, %0" : "=r" (uSP));
1519# endif
1520 AssertMsg(!(uSP & 15), ("xSP=%#p\n", uSP));
1521# endif
1522 /* Keep in sync with flags checked by gen_check_external_event() */
1523 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD)
1524 {
1525 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1526 ~CPU_INTERRUPT_EXTERNAL_HARD);
1527 cpu_interrupt(env, CPU_INTERRUPT_HARD);
1528 }
1529 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_EXIT)
1530 {
1531 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1532 ~CPU_INTERRUPT_EXTERNAL_EXIT);
1533 cpu_exit(env);
1534 }
1535 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_DMA)
1536 {
1537 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1538 ~CPU_INTERRUPT_EXTERNAL_DMA);
1539 remR3DmaRun(env);
1540 }
1541 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_TIMER)
1542 {
1543 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1544 ~CPU_INTERRUPT_EXTERNAL_TIMER);
1545 remR3TimersRun(env);
1546 }
1547 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_FLUSH_TLB)
1548 {
1549 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1550 ~CPU_INTERRUPT_EXTERNAL_HARD);
1551 cpu_interrupt(env, CPU_INTERRUPT_HARD);
1552 }
1553}
1554
1555/* helper for recording call instruction addresses for later scanning */
1556void helper_record_call()
1557{
1558 if ( !(env->state & CPU_RAW_RING0)
1559 && (env->cr[0] & CR0_PG_MASK)
1560 && !(env->eflags & X86_EFL_IF))
1561 remR3RecordCall(env);
1562}
1563
1564#endif /* VBOX */
1565
1566/* real mode interrupt */
1567static void do_interrupt_real(int intno, int is_int, int error_code,
1568 unsigned int next_eip)
1569{
1570 SegmentCache *dt;
1571 target_ulong ptr, ssp;
1572 int selector;
1573 uint32_t offset, esp;
1574 uint32_t old_cs, old_eip;
1575
1576 /* real mode (simpler !) */
1577 dt = &env->idt;
1578#ifndef VBOX
1579 if (intno * 4 + 3 > dt->limit)
1580#else
1581 if ((unsigned)intno * 4 + 3 > dt->limit)
1582#endif
1583 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1584 ptr = dt->base + intno * 4;
1585 offset = lduw_kernel(ptr);
1586 selector = lduw_kernel(ptr + 2);
1587 esp = ESP;
1588 ssp = env->segs[R_SS].base;
1589 if (is_int)
1590 old_eip = next_eip;
1591 else
1592 old_eip = env->eip;
1593 old_cs = env->segs[R_CS].selector;
1594 /* XXX: use SS segment size ? */
1595 PUSHW(ssp, esp, 0xffff, compute_eflags());
1596 PUSHW(ssp, esp, 0xffff, old_cs);
1597 PUSHW(ssp, esp, 0xffff, old_eip);
1598
1599 /* update processor state */
1600 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1601 env->eip = offset;
1602 env->segs[R_CS].selector = selector;
1603 env->segs[R_CS].base = (selector << 4);
1604 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1605}
1606
1607/* fake user mode interrupt */
1608void do_interrupt_user(int intno, int is_int, int error_code,
1609 target_ulong next_eip)
1610{
1611 SegmentCache *dt;
1612 target_ulong ptr;
1613 int dpl, cpl, shift;
1614 uint32_t e2;
1615
1616 dt = &env->idt;
1617 if (env->hflags & HF_LMA_MASK) {
1618 shift = 4;
1619 } else {
1620 shift = 3;
1621 }
1622 ptr = dt->base + (intno << shift);
1623 e2 = ldl_kernel(ptr + 4);
1624
1625 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1626 cpl = env->hflags & HF_CPL_MASK;
1627 /* check privilege if software int */
1628 if (is_int && dpl < cpl)
1629 raise_exception_err(EXCP0D_GPF, (intno << shift) + 2);
1630
1631 /* Since we emulate only user space, we cannot do more than
1632 exiting the emulation with the suitable exception and error
1633 code */
1634 if (is_int)
1635 EIP = next_eip;
1636}
1637
1638#if !defined(CONFIG_USER_ONLY)
1639static void handle_even_inj(int intno, int is_int, int error_code,
1640 int is_hw, int rm)
1641{
1642 uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
1643 if (!(event_inj & SVM_EVTINJ_VALID)) {
1644 int type;
1645 if (is_int)
1646 type = SVM_EVTINJ_TYPE_SOFT;
1647 else
1648 type = SVM_EVTINJ_TYPE_EXEPT;
1649 event_inj = intno | type | SVM_EVTINJ_VALID;
1650 if (!rm && exeption_has_error_code(intno)) {
1651 event_inj |= SVM_EVTINJ_VALID_ERR;
1652 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err), error_code);
1653 }
1654 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj);
1655 }
1656}
1657#endif
1658
1659/*
1660 * Begin execution of an interruption. is_int is TRUE if coming from
1661 * the int instruction. next_eip is the EIP value AFTER the interrupt
1662 * instruction. It is only relevant if is_int is TRUE.
1663 */
1664void do_interrupt(int intno, int is_int, int error_code,
1665 target_ulong next_eip, int is_hw)
1666{
1667 if (qemu_loglevel_mask(CPU_LOG_INT)) {
1668 if ((env->cr[0] & CR0_PE_MASK)) {
1669 static int count;
1670 qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1671 count, intno, error_code, is_int,
1672 env->hflags & HF_CPL_MASK,
1673 env->segs[R_CS].selector, EIP,
1674 (int)env->segs[R_CS].base + EIP,
1675 env->segs[R_SS].selector, ESP);
1676 if (intno == 0x0e) {
1677 qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
1678 } else {
1679 qemu_log(" EAX=" TARGET_FMT_lx, EAX);
1680 }
1681 qemu_log("\n");
1682 log_cpu_state(env, X86_DUMP_CCOP);
1683#if 0
1684 {
1685 int i;
1686 uint8_t *ptr;
1687 qemu_log(" code=");
1688 ptr = env->segs[R_CS].base + env->eip;
1689 for(i = 0; i < 16; i++) {
1690 qemu_log(" %02x", ldub(ptr + i));
1691 }
1692 qemu_log("\n");
1693 }
1694#endif
1695 count++;
1696 }
1697 }
1698#ifdef VBOX
1699 if (RT_UNLIKELY(env->state & CPU_EMULATE_SINGLE_STEP)) {
1700 if (is_int) {
1701 RTLogPrintf("do_interrupt: %#04x err=%#x pc=%#RGv%s\n",
1702 intno, error_code, (RTGCPTR)env->eip, is_hw ? " hw" : "");
1703 } else {
1704 RTLogPrintf("do_interrupt: %#04x err=%#x pc=%#RGv next=%#RGv%s\n",
1705 intno, error_code, (RTGCPTR)env->eip, (RTGCPTR)next_eip, is_hw ? " hw" : "");
1706 }
1707 }
1708#endif
1709 if (env->cr[0] & CR0_PE_MASK) {
1710#if !defined(CONFIG_USER_ONLY)
1711 if (env->hflags & HF_SVMI_MASK)
1712 handle_even_inj(intno, is_int, error_code, is_hw, 0);
1713#endif
1714#ifdef TARGET_X86_64
1715 if (env->hflags & HF_LMA_MASK) {
1716 do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1717 } else
1718#endif
1719 {
1720#ifdef VBOX
1721 /* int xx *, v86 code and VME enabled? */
1722 if ( (env->eflags & VM_MASK)
1723 && (env->cr[4] & CR4_VME_MASK)
1724 && is_int
1725 && !is_hw
1726 && env->eip + 1 != next_eip /* single byte int 3 goes straight to the protected mode handler */
1727 )
1728 do_soft_interrupt_vme(intno, error_code, next_eip);
1729 else
1730#endif /* VBOX */
1731 do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1732 }
1733 } else {
1734#if !defined(CONFIG_USER_ONLY)
1735 if (env->hflags & HF_SVMI_MASK)
1736 handle_even_inj(intno, is_int, error_code, is_hw, 1);
1737#endif
1738 do_interrupt_real(intno, is_int, error_code, next_eip);
1739 }
1740
1741#if !defined(CONFIG_USER_ONLY)
1742 if (env->hflags & HF_SVMI_MASK) {
1743 uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
1744 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
1745 }
1746#endif
1747}
1748
1749/* This should come from sysemu.h - if we could include it here... */
1750void qemu_system_reset_request(void);
1751
1752/*
1753 * Check nested exceptions and change to double or triple fault if
1754 * needed. It should only be called, if this is not an interrupt.
1755 * Returns the new exception number.
1756 */
1757static int check_exception(int intno, int *error_code)
1758{
1759 int first_contributory = env->old_exception == 0 ||
1760 (env->old_exception >= 10 &&
1761 env->old_exception <= 13);
1762 int second_contributory = intno == 0 ||
1763 (intno >= 10 && intno <= 13);
1764
1765 qemu_log_mask(CPU_LOG_INT, "check_exception old: 0x%x new 0x%x\n",
1766 env->old_exception, intno);
1767
1768#if !defined(CONFIG_USER_ONLY)
1769 if (env->old_exception == EXCP08_DBLE) {
1770 if (env->hflags & HF_SVMI_MASK)
1771 helper_vmexit(SVM_EXIT_SHUTDOWN, 0); /* does not return */
1772
1773 qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
1774
1775# ifndef VBOX
1776 qemu_system_reset_request();
1777# else
1778 remR3RaiseRC(env->pVM, VINF_EM_RESET); /** @todo test + improve tripple fault handling. */
1779# endif
1780 return EXCP_HLT;
1781 }
1782#endif
1783
1784 if ((first_contributory && second_contributory)
1785 || (env->old_exception == EXCP0E_PAGE &&
1786 (second_contributory || (intno == EXCP0E_PAGE)))) {
1787 intno = EXCP08_DBLE;
1788 *error_code = 0;
1789 }
1790
1791 if (second_contributory || (intno == EXCP0E_PAGE) ||
1792 (intno == EXCP08_DBLE))
1793 env->old_exception = intno;
1794
1795 return intno;
1796}
1797
1798/*
1799 * Signal an interruption. It is executed in the main CPU loop.
1800 * is_int is TRUE if coming from the int instruction. next_eip is the
1801 * EIP value AFTER the interrupt instruction. It is only relevant if
1802 * is_int is TRUE.
1803 */
1804static void QEMU_NORETURN raise_interrupt(int intno, int is_int, int error_code,
1805 int next_eip_addend)
1806{
1807#if defined(VBOX) && defined(DEBUG)
1808 Log2(("raise_interrupt: %x %x %x %RGv\n", intno, is_int, error_code, (RTGCPTR)env->eip + next_eip_addend));
1809#endif
1810 if (!is_int) {
1811 helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
1812 intno = check_exception(intno, &error_code);
1813 } else {
1814 helper_svm_check_intercept_param(SVM_EXIT_SWINT, 0);
1815 }
1816
1817 env->exception_index = intno;
1818 env->error_code = error_code;
1819 env->exception_is_int = is_int;
1820 env->exception_next_eip = env->eip + next_eip_addend;
1821 cpu_loop_exit();
1822}
1823
1824/* shortcuts to generate exceptions */
1825
1826void raise_exception_err(int exception_index, int error_code)
1827{
1828 raise_interrupt(exception_index, 0, error_code, 0);
1829}
1830
1831void raise_exception(int exception_index)
1832{
1833 raise_interrupt(exception_index, 0, 0, 0);
1834}
1835
1836void raise_exception_env(int exception_index, CPUState *nenv)
1837{
1838 env = nenv;
1839 raise_exception(exception_index);
1840}
1841/* SMM support */
1842
1843#if defined(CONFIG_USER_ONLY)
1844
1845void do_smm_enter(void)
1846{
1847}
1848
1849void helper_rsm(void)
1850{
1851}
1852
1853#else
1854
1855#ifdef TARGET_X86_64
1856#define SMM_REVISION_ID 0x00020064
1857#else
1858#define SMM_REVISION_ID 0x00020000
1859#endif
1860
1861void do_smm_enter(void)
1862{
1863 target_ulong sm_state;
1864 SegmentCache *dt;
1865 int i, offset;
1866
1867 qemu_log_mask(CPU_LOG_INT, "SMM: enter\n");
1868 log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
1869
1870 env->hflags |= HF_SMM_MASK;
1871 cpu_smm_update(env);
1872
1873 sm_state = env->smbase + 0x8000;
1874
1875#ifdef TARGET_X86_64
1876 for(i = 0; i < 6; i++) {
1877 dt = &env->segs[i];
1878 offset = 0x7e00 + i * 16;
1879 stw_phys(sm_state + offset, dt->selector);
1880 stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1881 stl_phys(sm_state + offset + 4, dt->limit);
1882 stq_phys(sm_state + offset + 8, dt->base);
1883 }
1884
1885 stq_phys(sm_state + 0x7e68, env->gdt.base);
1886 stl_phys(sm_state + 0x7e64, env->gdt.limit);
1887
1888 stw_phys(sm_state + 0x7e70, env->ldt.selector);
1889 stq_phys(sm_state + 0x7e78, env->ldt.base);
1890 stl_phys(sm_state + 0x7e74, env->ldt.limit);
1891 stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1892
1893 stq_phys(sm_state + 0x7e88, env->idt.base);
1894 stl_phys(sm_state + 0x7e84, env->idt.limit);
1895
1896 stw_phys(sm_state + 0x7e90, env->tr.selector);
1897 stq_phys(sm_state + 0x7e98, env->tr.base);
1898 stl_phys(sm_state + 0x7e94, env->tr.limit);
1899 stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1900
1901 stq_phys(sm_state + 0x7ed0, env->efer);
1902
1903 stq_phys(sm_state + 0x7ff8, EAX);
1904 stq_phys(sm_state + 0x7ff0, ECX);
1905 stq_phys(sm_state + 0x7fe8, EDX);
1906 stq_phys(sm_state + 0x7fe0, EBX);
1907 stq_phys(sm_state + 0x7fd8, ESP);
1908 stq_phys(sm_state + 0x7fd0, EBP);
1909 stq_phys(sm_state + 0x7fc8, ESI);
1910 stq_phys(sm_state + 0x7fc0, EDI);
1911 for(i = 8; i < 16; i++)
1912 stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1913 stq_phys(sm_state + 0x7f78, env->eip);
1914 stl_phys(sm_state + 0x7f70, compute_eflags());
1915 stl_phys(sm_state + 0x7f68, env->dr[6]);
1916 stl_phys(sm_state + 0x7f60, env->dr[7]);
1917
1918 stl_phys(sm_state + 0x7f48, env->cr[4]);
1919 stl_phys(sm_state + 0x7f50, env->cr[3]);
1920 stl_phys(sm_state + 0x7f58, env->cr[0]);
1921
1922 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1923 stl_phys(sm_state + 0x7f00, env->smbase);
1924#else
1925 stl_phys(sm_state + 0x7ffc, env->cr[0]);
1926 stl_phys(sm_state + 0x7ff8, env->cr[3]);
1927 stl_phys(sm_state + 0x7ff4, compute_eflags());
1928 stl_phys(sm_state + 0x7ff0, env->eip);
1929 stl_phys(sm_state + 0x7fec, EDI);
1930 stl_phys(sm_state + 0x7fe8, ESI);
1931 stl_phys(sm_state + 0x7fe4, EBP);
1932 stl_phys(sm_state + 0x7fe0, ESP);
1933 stl_phys(sm_state + 0x7fdc, EBX);
1934 stl_phys(sm_state + 0x7fd8, EDX);
1935 stl_phys(sm_state + 0x7fd4, ECX);
1936 stl_phys(sm_state + 0x7fd0, EAX);
1937 stl_phys(sm_state + 0x7fcc, env->dr[6]);
1938 stl_phys(sm_state + 0x7fc8, env->dr[7]);
1939
1940 stl_phys(sm_state + 0x7fc4, env->tr.selector);
1941 stl_phys(sm_state + 0x7f64, env->tr.base);
1942 stl_phys(sm_state + 0x7f60, env->tr.limit);
1943 stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1944
1945 stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1946 stl_phys(sm_state + 0x7f80, env->ldt.base);
1947 stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1948 stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1949
1950 stl_phys(sm_state + 0x7f74, env->gdt.base);
1951 stl_phys(sm_state + 0x7f70, env->gdt.limit);
1952
1953 stl_phys(sm_state + 0x7f58, env->idt.base);
1954 stl_phys(sm_state + 0x7f54, env->idt.limit);
1955
1956 for(i = 0; i < 6; i++) {
1957 dt = &env->segs[i];
1958 if (i < 3)
1959 offset = 0x7f84 + i * 12;
1960 else
1961 offset = 0x7f2c + (i - 3) * 12;
1962 stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1963 stl_phys(sm_state + offset + 8, dt->base);
1964 stl_phys(sm_state + offset + 4, dt->limit);
1965 stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1966 }
1967 stl_phys(sm_state + 0x7f14, env->cr[4]);
1968
1969 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1970 stl_phys(sm_state + 0x7ef8, env->smbase);
1971#endif
1972 /* init SMM cpu state */
1973
1974#ifdef TARGET_X86_64
1975 cpu_load_efer(env, 0);
1976#endif
1977 load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1978 env->eip = 0x00008000;
1979 cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1980 0xffffffff, 0);
1981 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1982 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1983 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1984 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1985 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1986
1987 cpu_x86_update_cr0(env,
1988 env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1989 cpu_x86_update_cr4(env, 0);
1990 env->dr[7] = 0x00000400;
1991 CC_OP = CC_OP_EFLAGS;
1992}
1993
1994void helper_rsm(void)
1995{
1996#ifdef VBOX
1997 cpu_abort(env, "helper_rsm");
1998#else /* !VBOX */
1999 target_ulong sm_state;
2000 int i, offset;
2001 uint32_t val;
2002
2003 sm_state = env->smbase + 0x8000;
2004#ifdef TARGET_X86_64
2005 cpu_load_efer(env, ldq_phys(sm_state + 0x7ed0));
2006
2007 for(i = 0; i < 6; i++) {
2008 offset = 0x7e00 + i * 16;
2009 cpu_x86_load_seg_cache(env, i,
2010 lduw_phys(sm_state + offset),
2011 ldq_phys(sm_state + offset + 8),
2012 ldl_phys(sm_state + offset + 4),
2013 (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
2014 }
2015
2016 env->gdt.base = ldq_phys(sm_state + 0x7e68);
2017 env->gdt.limit = ldl_phys(sm_state + 0x7e64);
2018
2019 env->ldt.selector = lduw_phys(sm_state + 0x7e70);
2020 env->ldt.base = ldq_phys(sm_state + 0x7e78);
2021 env->ldt.limit = ldl_phys(sm_state + 0x7e74);
2022 env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
2023#ifdef VBOX
2024 env->ldt.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
2025 env->ldt.newselector = 0;
2026#endif
2027
2028 env->idt.base = ldq_phys(sm_state + 0x7e88);
2029 env->idt.limit = ldl_phys(sm_state + 0x7e84);
2030
2031 env->tr.selector = lduw_phys(sm_state + 0x7e90);
2032 env->tr.base = ldq_phys(sm_state + 0x7e98);
2033 env->tr.limit = ldl_phys(sm_state + 0x7e94);
2034 env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
2035#ifdef VBOX
2036 env->tr.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
2037 env->tr.newselector = 0;
2038#endif
2039
2040 EAX = ldq_phys(sm_state + 0x7ff8);
2041 ECX = ldq_phys(sm_state + 0x7ff0);
2042 EDX = ldq_phys(sm_state + 0x7fe8);
2043 EBX = ldq_phys(sm_state + 0x7fe0);
2044 ESP = ldq_phys(sm_state + 0x7fd8);
2045 EBP = ldq_phys(sm_state + 0x7fd0);
2046 ESI = ldq_phys(sm_state + 0x7fc8);
2047 EDI = ldq_phys(sm_state + 0x7fc0);
2048 for(i = 8; i < 16; i++)
2049 env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
2050 env->eip = ldq_phys(sm_state + 0x7f78);
2051 load_eflags(ldl_phys(sm_state + 0x7f70),
2052 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
2053 env->dr[6] = ldl_phys(sm_state + 0x7f68);
2054 env->dr[7] = ldl_phys(sm_state + 0x7f60);
2055
2056 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
2057 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
2058 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
2059
2060 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
2061 if (val & 0x20000) {
2062 env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
2063 }
2064#else
2065 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
2066 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
2067 load_eflags(ldl_phys(sm_state + 0x7ff4),
2068 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
2069 env->eip = ldl_phys(sm_state + 0x7ff0);
2070 EDI = ldl_phys(sm_state + 0x7fec);
2071 ESI = ldl_phys(sm_state + 0x7fe8);
2072 EBP = ldl_phys(sm_state + 0x7fe4);
2073 ESP = ldl_phys(sm_state + 0x7fe0);
2074 EBX = ldl_phys(sm_state + 0x7fdc);
2075 EDX = ldl_phys(sm_state + 0x7fd8);
2076 ECX = ldl_phys(sm_state + 0x7fd4);
2077 EAX = ldl_phys(sm_state + 0x7fd0);
2078 env->dr[6] = ldl_phys(sm_state + 0x7fcc);
2079 env->dr[7] = ldl_phys(sm_state + 0x7fc8);
2080
2081 env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
2082 env->tr.base = ldl_phys(sm_state + 0x7f64);
2083 env->tr.limit = ldl_phys(sm_state + 0x7f60);
2084 env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
2085#ifdef VBOX
2086 env->tr.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
2087 env->tr.newselector = 0;
2088#endif
2089
2090 env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
2091 env->ldt.base = ldl_phys(sm_state + 0x7f80);
2092 env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
2093 env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
2094#ifdef VBOX
2095 env->ldt.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
2096 env->ldt.newselector = 0;
2097#endif
2098
2099 env->gdt.base = ldl_phys(sm_state + 0x7f74);
2100 env->gdt.limit = ldl_phys(sm_state + 0x7f70);
2101
2102 env->idt.base = ldl_phys(sm_state + 0x7f58);
2103 env->idt.limit = ldl_phys(sm_state + 0x7f54);
2104
2105 for(i = 0; i < 6; i++) {
2106 if (i < 3)
2107 offset = 0x7f84 + i * 12;
2108 else
2109 offset = 0x7f2c + (i - 3) * 12;
2110 cpu_x86_load_seg_cache(env, i,
2111 ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
2112 ldl_phys(sm_state + offset + 8),
2113 ldl_phys(sm_state + offset + 4),
2114 (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
2115 }
2116 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
2117
2118 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
2119 if (val & 0x20000) {
2120 env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
2121 }
2122#endif
2123 CC_OP = CC_OP_EFLAGS;
2124 env->hflags &= ~HF_SMM_MASK;
2125 cpu_smm_update(env);
2126
2127 qemu_log_mask(CPU_LOG_INT, "SMM: after RSM\n");
2128 log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
2129#endif /* !VBOX */
2130}
2131
2132#endif /* !CONFIG_USER_ONLY */
2133
2134
2135/* division, flags are undefined */
2136
2137void helper_divb_AL(target_ulong t0)
2138{
2139 unsigned int num, den, q, r;
2140
2141 num = (EAX & 0xffff);
2142 den = (t0 & 0xff);
2143 if (den == 0) {
2144 raise_exception(EXCP00_DIVZ);
2145 }
2146 q = (num / den);
2147 if (q > 0xff)
2148 raise_exception(EXCP00_DIVZ);
2149 q &= 0xff;
2150 r = (num % den) & 0xff;
2151 EAX = (EAX & ~0xffff) | (r << 8) | q;
2152}
2153
2154void helper_idivb_AL(target_ulong t0)
2155{
2156 int num, den, q, r;
2157
2158 num = (int16_t)EAX;
2159 den = (int8_t)t0;
2160 if (den == 0) {
2161 raise_exception(EXCP00_DIVZ);
2162 }
2163 q = (num / den);
2164 if (q != (int8_t)q)
2165 raise_exception(EXCP00_DIVZ);
2166 q &= 0xff;
2167 r = (num % den) & 0xff;
2168 EAX = (EAX & ~0xffff) | (r << 8) | q;
2169}
2170
2171void helper_divw_AX(target_ulong t0)
2172{
2173 unsigned int num, den, q, r;
2174
2175 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
2176 den = (t0 & 0xffff);
2177 if (den == 0) {
2178 raise_exception(EXCP00_DIVZ);
2179 }
2180 q = (num / den);
2181 if (q > 0xffff)
2182 raise_exception(EXCP00_DIVZ);
2183 q &= 0xffff;
2184 r = (num % den) & 0xffff;
2185 EAX = (EAX & ~0xffff) | q;
2186 EDX = (EDX & ~0xffff) | r;
2187}
2188
2189void helper_idivw_AX(target_ulong t0)
2190{
2191 int num, den, q, r;
2192
2193 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
2194 den = (int16_t)t0;
2195 if (den == 0) {
2196 raise_exception(EXCP00_DIVZ);
2197 }
2198 q = (num / den);
2199 if (q != (int16_t)q)
2200 raise_exception(EXCP00_DIVZ);
2201 q &= 0xffff;
2202 r = (num % den) & 0xffff;
2203 EAX = (EAX & ~0xffff) | q;
2204 EDX = (EDX & ~0xffff) | r;
2205}
2206
2207void helper_divl_EAX(target_ulong t0)
2208{
2209 unsigned int den, r;
2210 uint64_t num, q;
2211
2212 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
2213 den = t0;
2214 if (den == 0) {
2215 raise_exception(EXCP00_DIVZ);
2216 }
2217 q = (num / den);
2218 r = (num % den);
2219 if (q > 0xffffffff)
2220 raise_exception(EXCP00_DIVZ);
2221 EAX = (uint32_t)q;
2222 EDX = (uint32_t)r;
2223}
2224
2225void helper_idivl_EAX(target_ulong t0)
2226{
2227 int den, r;
2228 int64_t num, q;
2229
2230 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
2231 den = t0;
2232 if (den == 0) {
2233 raise_exception(EXCP00_DIVZ);
2234 }
2235 q = (num / den);
2236 r = (num % den);
2237 if (q != (int32_t)q)
2238 raise_exception(EXCP00_DIVZ);
2239 EAX = (uint32_t)q;
2240 EDX = (uint32_t)r;
2241}
2242
2243/* bcd */
2244
2245/* XXX: exception */
2246void helper_aam(int base)
2247{
2248 int al, ah;
2249 al = EAX & 0xff;
2250 ah = al / base;
2251 al = al % base;
2252 EAX = (EAX & ~0xffff) | al | (ah << 8);
2253 CC_DST = al;
2254}
2255
2256void helper_aad(int base)
2257{
2258 int al, ah;
2259 al = EAX & 0xff;
2260 ah = (EAX >> 8) & 0xff;
2261 al = ((ah * base) + al) & 0xff;
2262 EAX = (EAX & ~0xffff) | al;
2263 CC_DST = al;
2264}
2265
2266void helper_aaa(void)
2267{
2268 int icarry;
2269 int al, ah, af;
2270 int eflags;
2271
2272 eflags = helper_cc_compute_all(CC_OP);
2273 af = eflags & CC_A;
2274 al = EAX & 0xff;
2275 ah = (EAX >> 8) & 0xff;
2276
2277 icarry = (al > 0xf9);
2278 if (((al & 0x0f) > 9 ) || af) {
2279 al = (al + 6) & 0x0f;
2280 ah = (ah + 1 + icarry) & 0xff;
2281 eflags |= CC_C | CC_A;
2282 } else {
2283 eflags &= ~(CC_C | CC_A);
2284 al &= 0x0f;
2285 }
2286 EAX = (EAX & ~0xffff) | al | (ah << 8);
2287 CC_SRC = eflags;
2288}
2289
2290void helper_aas(void)
2291{
2292 int icarry;
2293 int al, ah, af;
2294 int eflags;
2295
2296 eflags = helper_cc_compute_all(CC_OP);
2297 af = eflags & CC_A;
2298 al = EAX & 0xff;
2299 ah = (EAX >> 8) & 0xff;
2300
2301 icarry = (al < 6);
2302 if (((al & 0x0f) > 9 ) || af) {
2303 al = (al - 6) & 0x0f;
2304 ah = (ah - 1 - icarry) & 0xff;
2305 eflags |= CC_C | CC_A;
2306 } else {
2307 eflags &= ~(CC_C | CC_A);
2308 al &= 0x0f;
2309 }
2310 EAX = (EAX & ~0xffff) | al | (ah << 8);
2311 CC_SRC = eflags;
2312}
2313
2314void helper_daa(void)
2315{
2316 int al, af, cf;
2317 int eflags;
2318
2319 eflags = helper_cc_compute_all(CC_OP);
2320 cf = eflags & CC_C;
2321 af = eflags & CC_A;
2322 al = EAX & 0xff;
2323
2324 eflags = 0;
2325 if (((al & 0x0f) > 9 ) || af) {
2326 al = (al + 6) & 0xff;
2327 eflags |= CC_A;
2328 }
2329 if ((al > 0x9f) || cf) {
2330 al = (al + 0x60) & 0xff;
2331 eflags |= CC_C;
2332 }
2333 EAX = (EAX & ~0xff) | al;
2334 /* well, speed is not an issue here, so we compute the flags by hand */
2335 eflags |= (al == 0) << 6; /* zf */
2336 eflags |= parity_table[al]; /* pf */
2337 eflags |= (al & 0x80); /* sf */
2338 CC_SRC = eflags;
2339}
2340
2341void helper_das(void)
2342{
2343 int al, al1, af, cf;
2344 int eflags;
2345
2346 eflags = helper_cc_compute_all(CC_OP);
2347 cf = eflags & CC_C;
2348 af = eflags & CC_A;
2349 al = EAX & 0xff;
2350
2351 eflags = 0;
2352 al1 = al;
2353 if (((al & 0x0f) > 9 ) || af) {
2354 eflags |= CC_A;
2355 if (al < 6 || cf)
2356 eflags |= CC_C;
2357 al = (al - 6) & 0xff;
2358 }
2359 if ((al1 > 0x99) || cf) {
2360 al = (al - 0x60) & 0xff;
2361 eflags |= CC_C;
2362 }
2363 EAX = (EAX & ~0xff) | al;
2364 /* well, speed is not an issue here, so we compute the flags by hand */
2365 eflags |= (al == 0) << 6; /* zf */
2366 eflags |= parity_table[al]; /* pf */
2367 eflags |= (al & 0x80); /* sf */
2368 CC_SRC = eflags;
2369}
2370
2371void helper_into(int next_eip_addend)
2372{
2373 int eflags;
2374 eflags = helper_cc_compute_all(CC_OP);
2375 if (eflags & CC_O) {
2376 raise_interrupt(EXCP04_INTO, 1, 0, next_eip_addend);
2377 }
2378}
2379
2380void helper_cmpxchg8b(target_ulong a0)
2381{
2382 uint64_t d;
2383 int eflags;
2384
2385 eflags = helper_cc_compute_all(CC_OP);
2386 d = ldq(a0);
2387 if (d == (((uint64_t)EDX << 32) | (uint32_t)EAX)) {
2388 stq(a0, ((uint64_t)ECX << 32) | (uint32_t)EBX);
2389 eflags |= CC_Z;
2390 } else {
2391 /* always do the store */
2392 stq(a0, d);
2393 EDX = (uint32_t)(d >> 32);
2394 EAX = (uint32_t)d;
2395 eflags &= ~CC_Z;
2396 }
2397 CC_SRC = eflags;
2398}
2399
2400#ifdef TARGET_X86_64
2401void helper_cmpxchg16b(target_ulong a0)
2402{
2403 uint64_t d0, d1;
2404 int eflags;
2405
2406 if ((a0 & 0xf) != 0)
2407 raise_exception(EXCP0D_GPF);
2408 eflags = helper_cc_compute_all(CC_OP);
2409 d0 = ldq(a0);
2410 d1 = ldq(a0 + 8);
2411 if (d0 == EAX && d1 == EDX) {
2412 stq(a0, EBX);
2413 stq(a0 + 8, ECX);
2414 eflags |= CC_Z;
2415 } else {
2416 /* always do the store */
2417 stq(a0, d0);
2418 stq(a0 + 8, d1);
2419 EDX = d1;
2420 EAX = d0;
2421 eflags &= ~CC_Z;
2422 }
2423 CC_SRC = eflags;
2424}
2425#endif
2426
2427void helper_single_step(void)
2428{
2429#ifndef CONFIG_USER_ONLY
2430 check_hw_breakpoints(env, 1);
2431 env->dr[6] |= DR6_BS;
2432#endif
2433 raise_exception(EXCP01_DB);
2434}
2435
2436void helper_cpuid(void)
2437{
2438 uint32_t eax, ebx, ecx, edx;
2439
2440 helper_svm_check_intercept_param(SVM_EXIT_CPUID, 0);
2441
2442 cpu_x86_cpuid(env, (uint32_t)EAX, (uint32_t)ECX, &eax, &ebx, &ecx, &edx);
2443 EAX = eax;
2444 EBX = ebx;
2445 ECX = ecx;
2446 EDX = edx;
2447}
2448
2449void helper_enter_level(int level, int data32, target_ulong t1)
2450{
2451 target_ulong ssp;
2452 uint32_t esp_mask, esp, ebp;
2453
2454 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2455 ssp = env->segs[R_SS].base;
2456 ebp = EBP;
2457 esp = ESP;
2458 if (data32) {
2459 /* 32 bit */
2460 esp -= 4;
2461 while (--level) {
2462 esp -= 4;
2463 ebp -= 4;
2464 stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
2465 }
2466 esp -= 4;
2467 stl(ssp + (esp & esp_mask), t1);
2468 } else {
2469 /* 16 bit */
2470 esp -= 2;
2471 while (--level) {
2472 esp -= 2;
2473 ebp -= 2;
2474 stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
2475 }
2476 esp -= 2;
2477 stw(ssp + (esp & esp_mask), t1);
2478 }
2479}
2480
2481#ifdef TARGET_X86_64
2482void helper_enter64_level(int level, int data64, target_ulong t1)
2483{
2484 target_ulong esp, ebp;
2485 ebp = EBP;
2486 esp = ESP;
2487
2488 if (data64) {
2489 /* 64 bit */
2490 esp -= 8;
2491 while (--level) {
2492 esp -= 8;
2493 ebp -= 8;
2494 stq(esp, ldq(ebp));
2495 }
2496 esp -= 8;
2497 stq(esp, t1);
2498 } else {
2499 /* 16 bit */
2500 esp -= 2;
2501 while (--level) {
2502 esp -= 2;
2503 ebp -= 2;
2504 stw(esp, lduw(ebp));
2505 }
2506 esp -= 2;
2507 stw(esp, t1);
2508 }
2509}
2510#endif
2511
2512void helper_lldt(int selector)
2513{
2514 SegmentCache *dt;
2515 uint32_t e1, e2;
2516#ifndef VBOX
2517 int index, entry_limit;
2518#else
2519 unsigned int index, entry_limit;
2520#endif
2521 target_ulong ptr;
2522
2523#ifdef VBOX
2524 Log(("helper_lldt_T0: old ldtr=%RTsel {.base=%RGv, .limit=%RGv} new=%RTsel\n",
2525 (RTSEL)env->ldt.selector, (RTGCPTR)env->ldt.base, (RTGCPTR)env->ldt.limit, (RTSEL)(selector & 0xffff)));
2526#endif
2527
2528 selector &= 0xffff;
2529 if ((selector & 0xfffc) == 0) {
2530 /* XXX: NULL selector case: invalid LDT */
2531 env->ldt.base = 0;
2532 env->ldt.limit = 0;
2533#ifdef VBOX
2534 env->ldt.flags = DESC_INTEL_UNUSABLE;
2535 env->ldt.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
2536 env->ldt.newselector = 0;
2537#endif
2538 } else {
2539 if (selector & 0x4)
2540 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2541 dt = &env->gdt;
2542 index = selector & ~7;
2543#ifdef TARGET_X86_64
2544 if (env->hflags & HF_LMA_MASK)
2545 entry_limit = 15;
2546 else
2547#endif
2548 entry_limit = 7;
2549 if ((index + entry_limit) > dt->limit)
2550 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2551 ptr = dt->base + index;
2552 e1 = ldl_kernel(ptr);
2553 e2 = ldl_kernel(ptr + 4);
2554 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
2555 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2556 if (!(e2 & DESC_P_MASK))
2557 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2558#ifdef TARGET_X86_64
2559 if (env->hflags & HF_LMA_MASK) {
2560 uint32_t e3;
2561 e3 = ldl_kernel(ptr + 8);
2562 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2563 env->ldt.base |= (target_ulong)e3 << 32;
2564 } else
2565#endif
2566 {
2567 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2568 }
2569 }
2570 env->ldt.selector = selector;
2571#ifdef VBOX
2572 Log(("helper_lldt_T0: new ldtr=%RTsel {.base=%RGv, .limit=%RGv}\n",
2573 (RTSEL)env->ldt.selector, (RTGCPTR)env->ldt.base, (RTGCPTR)env->ldt.limit));
2574#endif
2575}
2576
2577void helper_ltr(int selector)
2578{
2579 SegmentCache *dt;
2580 uint32_t e1, e2;
2581#ifndef VBOX
2582 int index, type, entry_limit;
2583#else
2584 unsigned int index;
2585 int type, entry_limit;
2586#endif
2587 target_ulong ptr;
2588
2589#ifdef VBOX
2590 Log(("helper_ltr: pc=%RGv old tr=%RTsel {.base=%RGv, .limit=%RGv, .flags=%RX32} new=%RTsel\n",
2591 (RTGCPTR)env->eip, (RTSEL)env->tr.selector, (RTGCPTR)env->tr.base, (RTGCPTR)env->tr.limit,
2592 env->tr.flags, (RTSEL)(selector & 0xffff)));
2593#endif
2594 selector &= 0xffff;
2595 if ((selector & 0xfffc) == 0) {
2596 /* NULL selector case: invalid TR */
2597#ifdef VBOX
2598 raise_exception_err(EXCP0A_TSS, 0);
2599#else
2600 env->tr.base = 0;
2601 env->tr.limit = 0;
2602 env->tr.flags = 0;
2603#endif
2604 } else {
2605 if (selector & 0x4)
2606 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2607 dt = &env->gdt;
2608 index = selector & ~7;
2609#ifdef TARGET_X86_64
2610 if (env->hflags & HF_LMA_MASK)
2611 entry_limit = 15;
2612 else
2613#endif
2614 entry_limit = 7;
2615 if ((index + entry_limit) > dt->limit)
2616 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2617 ptr = dt->base + index;
2618 e1 = ldl_kernel(ptr);
2619 e2 = ldl_kernel(ptr + 4);
2620 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2621 if ((e2 & DESC_S_MASK) ||
2622 (type != 1 && type != 9))
2623 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2624 if (!(e2 & DESC_P_MASK))
2625 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2626#ifdef TARGET_X86_64
2627 if (env->hflags & HF_LMA_MASK) {
2628 uint32_t e3, e4;
2629 e3 = ldl_kernel(ptr + 8);
2630 e4 = ldl_kernel(ptr + 12);
2631 if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
2632 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2633 load_seg_cache_raw_dt(&env->tr, e1, e2);
2634 env->tr.base |= (target_ulong)e3 << 32;
2635 } else
2636#endif
2637 {
2638 load_seg_cache_raw_dt(&env->tr, e1, e2);
2639 }
2640 e2 |= DESC_TSS_BUSY_MASK;
2641 stl_kernel(ptr + 4, e2);
2642 }
2643 env->tr.selector = selector;
2644#ifdef VBOX
2645 Log(("helper_ltr: new tr=%RTsel {.base=%RGv, .limit=%RGv, .flags=%RX32} new=%RTsel\n",
2646 (RTSEL)env->tr.selector, (RTGCPTR)env->tr.base, (RTGCPTR)env->tr.limit,
2647 env->tr.flags, (RTSEL)(selector & 0xffff)));
2648#endif
2649}
2650
2651/* only works if protected mode and not VM86. seg_reg must be != R_CS */
2652void helper_load_seg(int seg_reg, int selector)
2653{
2654 uint32_t e1, e2;
2655 int cpl, dpl, rpl;
2656 SegmentCache *dt;
2657#ifndef VBOX
2658 int index;
2659#else
2660 unsigned int index;
2661#endif
2662 target_ulong ptr;
2663
2664 selector &= 0xffff;
2665 cpl = env->hflags & HF_CPL_MASK;
2666#ifdef VBOX
2667
2668 /* Trying to load a selector with CPL=1? */
2669 if (cpl == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
2670 {
2671 Log(("RPL 1 -> sel %04X -> %04X (helper_load_seg)\n", selector, selector & 0xfffc));
2672 selector = selector & 0xfffc;
2673 }
2674#endif /* VBOX */
2675 if ((selector & 0xfffc) == 0) {
2676 /* null selector case */
2677#ifndef VBOX
2678 if (seg_reg == R_SS
2679#ifdef TARGET_X86_64
2680 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2681#endif
2682 )
2683 raise_exception_err(EXCP0D_GPF, 0);
2684 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
2685#else
2686 if (seg_reg == R_SS) {
2687 if (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2688 raise_exception_err(EXCP0D_GPF, 0);
2689 e2 = (cpl << DESC_DPL_SHIFT) | DESC_INTEL_UNUSABLE;
2690 } else {
2691 e2 = DESC_INTEL_UNUSABLE;
2692 }
2693 cpu_x86_load_seg_cache_with_clean_flags(env, seg_reg, selector, 0, 0, e2);
2694#endif
2695 } else {
2696
2697 if (selector & 0x4)
2698 dt = &env->ldt;
2699 else
2700 dt = &env->gdt;
2701 index = selector & ~7;
2702 if ((index + 7) > dt->limit)
2703 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2704 ptr = dt->base + index;
2705 e1 = ldl_kernel(ptr);
2706 e2 = ldl_kernel(ptr + 4);
2707
2708 if (!(e2 & DESC_S_MASK))
2709 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2710 rpl = selector & 3;
2711 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2712 if (seg_reg == R_SS) {
2713 /* must be writable segment */
2714 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
2715 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2716 if (rpl != cpl || dpl != cpl)
2717 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2718 } else {
2719 /* must be readable segment */
2720 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
2721 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2722
2723 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2724 /* if not conforming code, test rights */
2725 if (dpl < cpl || dpl < rpl)
2726 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2727 }
2728 }
2729
2730 if (!(e2 & DESC_P_MASK)) {
2731 if (seg_reg == R_SS)
2732 raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
2733 else
2734 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2735 }
2736
2737 /* set the access bit if not already set */
2738 if (!(e2 & DESC_A_MASK)) {
2739 e2 |= DESC_A_MASK;
2740 stl_kernel(ptr + 4, e2);
2741 }
2742
2743 cpu_x86_load_seg_cache(env, seg_reg, selector,
2744 get_seg_base(e1, e2),
2745 get_seg_limit(e1, e2),
2746 e2);
2747#if 0
2748 qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2749 selector, (unsigned long)sc->base, sc->limit, sc->flags);
2750#endif
2751 }
2752}
2753
2754/* protected mode jump */
2755void helper_ljmp_protected(int new_cs, target_ulong new_eip,
2756 int next_eip_addend)
2757{
2758 int gate_cs, type;
2759 uint32_t e1, e2, cpl, dpl, rpl, limit;
2760 target_ulong next_eip;
2761
2762#ifdef VBOX /** @todo Why do we do this? */
2763 e1 = e2 = 0;
2764#endif
2765 if ((new_cs & 0xfffc) == 0)
2766 raise_exception_err(EXCP0D_GPF, 0);
2767 if (load_segment(&e1, &e2, new_cs) != 0)
2768 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2769 cpl = env->hflags & HF_CPL_MASK;
2770 if (e2 & DESC_S_MASK) {
2771 if (!(e2 & DESC_CS_MASK))
2772 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2773 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2774 if (e2 & DESC_C_MASK) {
2775 /* conforming code segment */
2776 if (dpl > cpl)
2777 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2778 } else {
2779 /* non conforming code segment */
2780 rpl = new_cs & 3;
2781 if (rpl > cpl)
2782 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2783 if (dpl != cpl)
2784 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2785 }
2786 if (!(e2 & DESC_P_MASK))
2787 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2788 limit = get_seg_limit(e1, e2);
2789 if (new_eip > limit &&
2790 !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2791 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2792#ifdef VBOX
2793 if (!(e2 & DESC_A_MASK))
2794 e2 = set_segment_accessed(new_cs, e2);
2795#endif
2796 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2797 get_seg_base(e1, e2), limit, e2);
2798 EIP = new_eip;
2799 } else {
2800 /* jump to call or task gate */
2801 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2802 rpl = new_cs & 3;
2803 cpl = env->hflags & HF_CPL_MASK;
2804 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2805 switch(type) {
2806 case 1: /* 286 TSS */
2807 case 9: /* 386 TSS */
2808 case 5: /* task gate */
2809 if (dpl < cpl || dpl < rpl)
2810 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2811 next_eip = env->eip + next_eip_addend;
2812 switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2813 CC_OP = CC_OP_EFLAGS;
2814 break;
2815 case 4: /* 286 call gate */
2816 case 12: /* 386 call gate */
2817 if ((dpl < cpl) || (dpl < rpl))
2818 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2819 if (!(e2 & DESC_P_MASK))
2820 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2821 gate_cs = e1 >> 16;
2822 new_eip = (e1 & 0xffff);
2823 if (type == 12)
2824 new_eip |= (e2 & 0xffff0000);
2825 if (load_segment(&e1, &e2, gate_cs) != 0)
2826 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2827 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2828 /* must be code segment */
2829 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2830 (DESC_S_MASK | DESC_CS_MASK)))
2831 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2832 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2833 (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2834 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2835 if (!(e2 & DESC_P_MASK))
2836#ifdef VBOX /* See page 3-514 of 253666.pdf */
2837 raise_exception_err(EXCP0B_NOSEG, gate_cs & 0xfffc);
2838#else
2839 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2840#endif
2841 limit = get_seg_limit(e1, e2);
2842 if (new_eip > limit)
2843 raise_exception_err(EXCP0D_GPF, 0);
2844 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2845 get_seg_base(e1, e2), limit, e2);
2846 EIP = new_eip;
2847 break;
2848 default:
2849 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2850 break;
2851 }
2852 }
2853}
2854
2855/* real mode call */
2856void helper_lcall_real(int new_cs, target_ulong new_eip1,
2857 int shift, int next_eip)
2858{
2859 int new_eip;
2860 uint32_t esp, esp_mask;
2861 target_ulong ssp;
2862
2863 new_eip = new_eip1;
2864 esp = ESP;
2865 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2866 ssp = env->segs[R_SS].base;
2867 if (shift) {
2868 PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2869 PUSHL(ssp, esp, esp_mask, next_eip);
2870 } else {
2871 PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2872 PUSHW(ssp, esp, esp_mask, next_eip);
2873 }
2874
2875 SET_ESP(esp, esp_mask);
2876 env->eip = new_eip;
2877 env->segs[R_CS].selector = new_cs;
2878 env->segs[R_CS].base = (new_cs << 4);
2879}
2880
2881/* protected mode call */
2882void helper_lcall_protected(int new_cs, target_ulong new_eip,
2883 int shift, int next_eip_addend)
2884{
2885 int new_stack, i;
2886 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2887 uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, sp, type, ss_dpl, sp_mask;
2888 uint32_t val, limit, old_sp_mask;
2889 target_ulong ssp, old_ssp, next_eip;
2890
2891#ifdef VBOX /** @todo Why do we do this? */
2892 e1 = e2 = 0;
2893#endif
2894 next_eip = env->eip + next_eip_addend;
2895 LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs, (uint32_t)new_eip, shift);
2896 LOG_PCALL_STATE(env);
2897 if ((new_cs & 0xfffc) == 0)
2898 raise_exception_err(EXCP0D_GPF, 0);
2899 if (load_segment(&e1, &e2, new_cs) != 0)
2900 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2901 cpl = env->hflags & HF_CPL_MASK;
2902 LOG_PCALL("desc=%08x:%08x\n", e1, e2);
2903 if (e2 & DESC_S_MASK) {
2904 if (!(e2 & DESC_CS_MASK))
2905 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2906 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2907 if (e2 & DESC_C_MASK) {
2908 /* conforming code segment */
2909 if (dpl > cpl)
2910 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2911 } else {
2912 /* non conforming code segment */
2913 rpl = new_cs & 3;
2914 if (rpl > cpl)
2915 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2916 if (dpl != cpl)
2917 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2918 }
2919 if (!(e2 & DESC_P_MASK))
2920 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2921#ifdef VBOX
2922 if (!(e2 & DESC_A_MASK))
2923 e2 = set_segment_accessed(new_cs, e2);
2924#endif
2925
2926#ifdef TARGET_X86_64
2927 /* XXX: check 16/32 bit cases in long mode */
2928 if (shift == 2) {
2929 target_ulong rsp;
2930 /* 64 bit case */
2931 rsp = ESP;
2932 PUSHQ(rsp, env->segs[R_CS].selector);
2933 PUSHQ(rsp, next_eip);
2934 /* from this point, not restartable */
2935 ESP = rsp;
2936 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2937 get_seg_base(e1, e2),
2938 get_seg_limit(e1, e2), e2);
2939 EIP = new_eip;
2940 } else
2941#endif
2942 {
2943 sp = ESP;
2944 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2945 ssp = env->segs[R_SS].base;
2946 if (shift) {
2947 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2948 PUSHL(ssp, sp, sp_mask, next_eip);
2949 } else {
2950 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2951 PUSHW(ssp, sp, sp_mask, next_eip);
2952 }
2953
2954 limit = get_seg_limit(e1, e2);
2955 if (new_eip > limit)
2956 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2957 /* from this point, not restartable */
2958 SET_ESP(sp, sp_mask);
2959 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2960 get_seg_base(e1, e2), limit, e2);
2961 EIP = new_eip;
2962 }
2963 } else {
2964 /* check gate type */
2965 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2966 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2967 rpl = new_cs & 3;
2968 switch(type) {
2969 case 1: /* available 286 TSS */
2970 case 9: /* available 386 TSS */
2971 case 5: /* task gate */
2972 if (dpl < cpl || dpl < rpl)
2973 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2974 switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2975 CC_OP = CC_OP_EFLAGS;
2976 return;
2977 case 4: /* 286 call gate */
2978 case 12: /* 386 call gate */
2979 break;
2980 default:
2981 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2982 break;
2983 }
2984 shift = type >> 3;
2985
2986 if (dpl < cpl || dpl < rpl)
2987 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2988 /* check valid bit */
2989 if (!(e2 & DESC_P_MASK))
2990 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2991 selector = e1 >> 16;
2992 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2993 param_count = e2 & 0x1f;
2994 if ((selector & 0xfffc) == 0)
2995 raise_exception_err(EXCP0D_GPF, 0);
2996
2997 if (load_segment(&e1, &e2, selector) != 0)
2998 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2999 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
3000 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
3001 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3002 if (dpl > cpl)
3003 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
3004 if (!(e2 & DESC_P_MASK))
3005 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
3006
3007 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
3008 /* to inner privilege */
3009 get_ss_esp_from_tss(&ss, &sp, dpl);
3010 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
3011 ss, sp, param_count, ESP);
3012 if ((ss & 0xfffc) == 0)
3013 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3014 if ((ss & 3) != dpl)
3015 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3016 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
3017 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3018 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
3019 if (ss_dpl != dpl)
3020 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3021 if (!(ss_e2 & DESC_S_MASK) ||
3022 (ss_e2 & DESC_CS_MASK) ||
3023 !(ss_e2 & DESC_W_MASK))
3024 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3025 if (!(ss_e2 & DESC_P_MASK))
3026#ifdef VBOX /* See page 3-99 of 253666.pdf */
3027 raise_exception_err(EXCP0C_STACK, ss & 0xfffc);
3028#else
3029 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3030#endif
3031
3032 // push_size = ((param_count * 2) + 8) << shift;
3033
3034 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
3035 old_ssp = env->segs[R_SS].base;
3036
3037 sp_mask = get_sp_mask(ss_e2);
3038 ssp = get_seg_base(ss_e1, ss_e2);
3039 if (shift) {
3040 PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
3041 PUSHL(ssp, sp, sp_mask, ESP);
3042 for(i = param_count - 1; i >= 0; i--) {
3043 val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
3044 PUSHL(ssp, sp, sp_mask, val);
3045 }
3046 } else {
3047 PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
3048 PUSHW(ssp, sp, sp_mask, ESP);
3049 for(i = param_count - 1; i >= 0; i--) {
3050 val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
3051 PUSHW(ssp, sp, sp_mask, val);
3052 }
3053 }
3054 new_stack = 1;
3055 } else {
3056 /* to same privilege */
3057 sp = ESP;
3058 sp_mask = get_sp_mask(env->segs[R_SS].flags);
3059 ssp = env->segs[R_SS].base;
3060 // push_size = (4 << shift);
3061 new_stack = 0;
3062 }
3063
3064 if (shift) {
3065 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
3066 PUSHL(ssp, sp, sp_mask, next_eip);
3067 } else {
3068 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
3069 PUSHW(ssp, sp, sp_mask, next_eip);
3070 }
3071
3072 /* from this point, not restartable */
3073
3074 if (new_stack) {
3075 ss = (ss & ~3) | dpl;
3076 cpu_x86_load_seg_cache(env, R_SS, ss,
3077 ssp,
3078 get_seg_limit(ss_e1, ss_e2),
3079 ss_e2);
3080 }
3081
3082 selector = (selector & ~3) | dpl;
3083 cpu_x86_load_seg_cache(env, R_CS, selector,
3084 get_seg_base(e1, e2),
3085 get_seg_limit(e1, e2),
3086 e2);
3087 cpu_x86_set_cpl(env, dpl);
3088 SET_ESP(sp, sp_mask);
3089 EIP = offset;
3090 }
3091}
3092
3093/* real and vm86 mode iret */
3094void helper_iret_real(int shift)
3095{
3096 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
3097 target_ulong ssp;
3098 int eflags_mask;
3099#ifdef VBOX
3100 bool fVME = false;
3101
3102 remR3TrapClear(env->pVM);
3103#endif /* VBOX */
3104
3105 sp_mask = 0xffff; /* XXXX: use SS segment size ? */
3106 sp = ESP;
3107 ssp = env->segs[R_SS].base;
3108 if (shift == 1) {
3109 /* 32 bits */
3110 POPL(ssp, sp, sp_mask, new_eip);
3111 POPL(ssp, sp, sp_mask, new_cs);
3112 new_cs &= 0xffff;
3113 POPL(ssp, sp, sp_mask, new_eflags);
3114 } else {
3115 /* 16 bits */
3116 POPW(ssp, sp, sp_mask, new_eip);
3117 POPW(ssp, sp, sp_mask, new_cs);
3118 POPW(ssp, sp, sp_mask, new_eflags);
3119 }
3120#ifdef VBOX
3121 if ( (env->eflags & VM_MASK)
3122 && ((env->eflags >> IOPL_SHIFT) & 3) != 3
3123 && (env->cr[4] & CR4_VME_MASK)) /* implied or else we would fault earlier */
3124 {
3125 fVME = true;
3126 /* if virtual interrupt pending and (virtual) interrupts will be enabled -> #GP */
3127 /* if TF will be set -> #GP */
3128 if ( ((new_eflags & IF_MASK) && (env->eflags & VIP_MASK))
3129 || (new_eflags & TF_MASK))
3130 raise_exception(EXCP0D_GPF);
3131 }
3132#endif /* VBOX */
3133 ESP = (ESP & ~sp_mask) | (sp & sp_mask);
3134 env->segs[R_CS].selector = new_cs;
3135 env->segs[R_CS].base = (new_cs << 4);
3136 env->eip = new_eip;
3137#ifdef VBOX
3138 if (fVME)
3139 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
3140 else
3141#endif
3142 if (env->eflags & VM_MASK)
3143 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
3144 else
3145 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
3146 if (shift == 0)
3147 eflags_mask &= 0xffff;
3148 load_eflags(new_eflags, eflags_mask);
3149 env->hflags2 &= ~HF2_NMI_MASK;
3150#ifdef VBOX
3151 if (fVME)
3152 {
3153 if (new_eflags & IF_MASK)
3154 env->eflags |= VIF_MASK;
3155 else
3156 env->eflags &= ~VIF_MASK;
3157 }
3158#endif /* VBOX */
3159}
3160
3161static inline void validate_seg(int seg_reg, int cpl)
3162{
3163 int dpl;
3164 uint32_t e2;
3165
3166 /* XXX: on x86_64, we do not want to nullify FS and GS because
3167 they may still contain a valid base. I would be interested to
3168 know how a real x86_64 CPU behaves */
3169 if ((seg_reg == R_FS || seg_reg == R_GS) &&
3170 (env->segs[seg_reg].selector & 0xfffc) == 0)
3171 return;
3172
3173 e2 = env->segs[seg_reg].flags;
3174 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3175 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
3176 /* data or non conforming code segment */
3177 if (dpl < cpl) {
3178 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
3179 }
3180 }
3181}
3182
3183/* protected mode iret */
3184static inline void helper_ret_protected(int shift, int is_iret, int addend)
3185{
3186 uint32_t new_cs, new_eflags, new_ss;
3187 uint32_t new_es, new_ds, new_fs, new_gs;
3188 uint32_t e1, e2, ss_e1, ss_e2;
3189 int cpl, dpl, rpl, eflags_mask, iopl;
3190 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
3191
3192#ifdef VBOX /** @todo Why do we do this? */
3193 ss_e1 = ss_e2 = e1 = e2 = 0;
3194#endif
3195
3196#ifdef TARGET_X86_64
3197 if (shift == 2)
3198 sp_mask = -1;
3199 else
3200#endif
3201 sp_mask = get_sp_mask(env->segs[R_SS].flags);
3202 sp = ESP;
3203 ssp = env->segs[R_SS].base;
3204 new_eflags = 0; /* avoid warning */
3205#ifdef TARGET_X86_64
3206 if (shift == 2) {
3207 POPQ(sp, new_eip);
3208 POPQ(sp, new_cs);
3209 new_cs &= 0xffff;
3210 if (is_iret) {
3211 POPQ(sp, new_eflags);
3212 }
3213 } else
3214#endif
3215 if (shift == 1) {
3216 /* 32 bits */
3217 POPL(ssp, sp, sp_mask, new_eip);
3218 POPL(ssp, sp, sp_mask, new_cs);
3219 new_cs &= 0xffff;
3220 if (is_iret) {
3221 POPL(ssp, sp, sp_mask, new_eflags);
3222#define LOG_GROUP LOG_GROUP_REM
3223#if defined(VBOX) && defined(DEBUG)
3224 Log(("iret: new CS %04X (old=%x)\n", new_cs, env->segs[R_CS].selector));
3225 Log(("iret: new EIP %08X\n", (uint32_t)new_eip));
3226 Log(("iret: new EFLAGS %08X\n", new_eflags));
3227 Log(("iret: EAX=%08x\n", (uint32_t)EAX));
3228#endif
3229 if (new_eflags & VM_MASK)
3230 goto return_to_vm86;
3231 }
3232#ifdef VBOX
3233 if ((new_cs & 0x3) == 1 && (env->state & CPU_RAW_RING0))
3234 {
3235 if ( !EMIsRawRing1Enabled(env->pVM)
3236 || env->segs[R_CS].selector == (new_cs & 0xfffc))
3237 {
3238 Log(("RPL 1 -> new_cs %04X -> %04X\n", new_cs, new_cs & 0xfffc));
3239 new_cs = new_cs & 0xfffc;
3240 }
3241 else
3242 {
3243 /* Ugly assumption: assume a genuine switch to ring-1. */
3244 Log(("Genuine switch to ring-1 (iret)\n"));
3245 }
3246 }
3247 else if ((new_cs & 0x3) == 2 && (env->state & CPU_RAW_RING0) && EMIsRawRing1Enabled(env->pVM))
3248 {
3249 Log(("RPL 2 -> new_cs %04X -> %04X\n", new_cs, (new_cs & 0xfffc) | 1));
3250 new_cs = (new_cs & 0xfffc) | 1;
3251 }
3252#endif
3253 } else {
3254 /* 16 bits */
3255 POPW(ssp, sp, sp_mask, new_eip);
3256 POPW(ssp, sp, sp_mask, new_cs);
3257 if (is_iret)
3258 POPW(ssp, sp, sp_mask, new_eflags);
3259 }
3260 LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
3261 new_cs, new_eip, shift, addend);
3262 LOG_PCALL_STATE(env);
3263 if ((new_cs & 0xfffc) == 0)
3264 {
3265#if defined(VBOX) && defined(DEBUG)
3266 Log(("new_cs & 0xfffc) == 0\n"));
3267#endif
3268 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3269 }
3270 if (load_segment(&e1, &e2, new_cs) != 0)
3271 {
3272#if defined(VBOX) && defined(DEBUG)
3273 Log(("load_segment failed\n"));
3274#endif
3275 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3276 }
3277 if (!(e2 & DESC_S_MASK) ||
3278 !(e2 & DESC_CS_MASK))
3279 {
3280#if defined(VBOX) && defined(DEBUG)
3281 Log(("e2 mask %08x\n", e2));
3282#endif
3283 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3284 }
3285 cpl = env->hflags & HF_CPL_MASK;
3286 rpl = new_cs & 3;
3287 if (rpl < cpl)
3288 {
3289#if defined(VBOX) && defined(DEBUG)
3290 Log(("rpl < cpl (%d vs %d)\n", rpl, cpl));
3291#endif
3292 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3293 }
3294 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3295
3296 if (e2 & DESC_C_MASK) {
3297 if (dpl > rpl)
3298 {
3299#if defined(VBOX) && defined(DEBUG)
3300 Log(("dpl > rpl (%d vs %d)\n", dpl, rpl));
3301#endif
3302 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3303 }
3304 } else {
3305 if (dpl != rpl)
3306 {
3307#if defined(VBOX) && defined(DEBUG)
3308 Log(("dpl != rpl (%d vs %d) e1=%x e2=%x\n", dpl, rpl, e1, e2));
3309#endif
3310 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3311 }
3312 }
3313 if (!(e2 & DESC_P_MASK))
3314 {
3315#if defined(VBOX) && defined(DEBUG)
3316 Log(("DESC_P_MASK e2=%08x\n", e2));
3317#endif
3318 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
3319 }
3320
3321 sp += addend;
3322 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
3323 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
3324 /* return to same privilege level */
3325#ifdef VBOX
3326 if (!(e2 & DESC_A_MASK))
3327 e2 = set_segment_accessed(new_cs, e2);
3328#endif
3329 cpu_x86_load_seg_cache(env, R_CS, new_cs,
3330 get_seg_base(e1, e2),
3331 get_seg_limit(e1, e2),
3332 e2);
3333 } else {
3334 /* return to different privilege level */
3335#ifdef TARGET_X86_64
3336 if (shift == 2) {
3337 POPQ(sp, new_esp);
3338 POPQ(sp, new_ss);
3339 new_ss &= 0xffff;
3340 } else
3341#endif
3342 if (shift == 1) {
3343 /* 32 bits */
3344 POPL(ssp, sp, sp_mask, new_esp);
3345 POPL(ssp, sp, sp_mask, new_ss);
3346 new_ss &= 0xffff;
3347 } else {
3348 /* 16 bits */
3349 POPW(ssp, sp, sp_mask, new_esp);
3350 POPW(ssp, sp, sp_mask, new_ss);
3351 }
3352 LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
3353 new_ss, new_esp);
3354 if ((new_ss & 0xfffc) == 0) {
3355#ifdef TARGET_X86_64
3356 /* NULL ss is allowed in long mode if cpl != 3*/
3357# ifndef VBOX
3358 /* XXX: test CS64 ? */
3359 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
3360 cpu_x86_load_seg_cache(env, R_SS, new_ss,
3361 0, 0xffffffff,
3362 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3363 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
3364 DESC_W_MASK | DESC_A_MASK);
3365 ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
3366 } else
3367# else /* VBOX */
3368 if ((env->hflags & HF_LMA_MASK) && rpl != 3 && (e2 & DESC_L_MASK)) {
3369 if (!(e2 & DESC_A_MASK))
3370 e2 = set_segment_accessed(new_cs, e2);
3371 cpu_x86_load_seg_cache_with_clean_flags(env, R_SS, new_ss,
3372 0, 0xffffffff,
3373 DESC_INTEL_UNUSABLE | (rpl << DESC_DPL_SHIFT) );
3374 ss_e2 = DESC_B_MASK; /* not really used */
3375 } else
3376# endif
3377#endif
3378 {
3379#if defined(VBOX) && defined(DEBUG)
3380 Log(("NULL ss, rpl=%d\n", rpl));
3381#endif
3382 raise_exception_err(EXCP0D_GPF, 0);
3383 }
3384 } else {
3385 if ((new_ss & 3) != rpl)
3386 {
3387#if defined(VBOX) && defined(DEBUG)
3388 Log(("new_ss=%x != rpl=%d\n", new_ss, rpl));
3389#endif
3390 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3391 }
3392 if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
3393 {
3394#if defined(VBOX) && defined(DEBUG)
3395 Log(("new_ss=%x load error\n", new_ss));
3396#endif
3397 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3398 }
3399 if (!(ss_e2 & DESC_S_MASK) ||
3400 (ss_e2 & DESC_CS_MASK) ||
3401 !(ss_e2 & DESC_W_MASK))
3402 {
3403#if defined(VBOX) && defined(DEBUG)
3404 Log(("new_ss=%x ss_e2=%#x bad type\n", new_ss, ss_e2));
3405#endif
3406 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3407 }
3408 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
3409 if (dpl != rpl)
3410 {
3411#if defined(VBOX) && defined(DEBUG)
3412 Log(("SS.dpl=%u != rpl=%u\n", dpl, rpl));
3413#endif
3414 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3415 }
3416 if (!(ss_e2 & DESC_P_MASK))
3417 {
3418#if defined(VBOX) && defined(DEBUG)
3419 Log(("new_ss=%#x #NP\n", new_ss));
3420#endif
3421 raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
3422 }
3423#ifdef VBOX
3424 if (!(e2 & DESC_A_MASK))
3425 e2 = set_segment_accessed(new_cs, e2);
3426 if (!(ss_e2 & DESC_A_MASK))
3427 ss_e2 = set_segment_accessed(new_ss, ss_e2);
3428#endif
3429 cpu_x86_load_seg_cache(env, R_SS, new_ss,
3430 get_seg_base(ss_e1, ss_e2),
3431 get_seg_limit(ss_e1, ss_e2),
3432 ss_e2);
3433 }
3434
3435 cpu_x86_load_seg_cache(env, R_CS, new_cs,
3436 get_seg_base(e1, e2),
3437 get_seg_limit(e1, e2),
3438 e2);
3439 cpu_x86_set_cpl(env, rpl);
3440 sp = new_esp;
3441#ifdef TARGET_X86_64
3442 if (env->hflags & HF_CS64_MASK)
3443 sp_mask = -1;
3444 else
3445#endif
3446 sp_mask = get_sp_mask(ss_e2);
3447
3448 /* validate data segments */
3449 validate_seg(R_ES, rpl);
3450 validate_seg(R_DS, rpl);
3451 validate_seg(R_FS, rpl);
3452 validate_seg(R_GS, rpl);
3453
3454 sp += addend;
3455 }
3456 SET_ESP(sp, sp_mask);
3457 env->eip = new_eip;
3458 if (is_iret) {
3459 /* NOTE: 'cpl' is the _old_ CPL */
3460 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
3461 if (cpl == 0)
3462#ifdef VBOX
3463 eflags_mask |= IOPL_MASK | VIF_MASK | VIP_MASK;
3464#else
3465 eflags_mask |= IOPL_MASK;
3466#endif
3467 iopl = (env->eflags >> IOPL_SHIFT) & 3;
3468 if (cpl <= iopl)
3469 eflags_mask |= IF_MASK;
3470 if (shift == 0)
3471 eflags_mask &= 0xffff;
3472 load_eflags(new_eflags, eflags_mask);
3473 }
3474 return;
3475
3476 return_to_vm86:
3477 POPL(ssp, sp, sp_mask, new_esp);
3478 POPL(ssp, sp, sp_mask, new_ss);
3479 POPL(ssp, sp, sp_mask, new_es);
3480 POPL(ssp, sp, sp_mask, new_ds);
3481 POPL(ssp, sp, sp_mask, new_fs);
3482 POPL(ssp, sp, sp_mask, new_gs);
3483
3484 /* modify processor state */
3485 load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
3486 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
3487 load_seg_vm(R_CS, new_cs & 0xffff);
3488 cpu_x86_set_cpl(env, 3);
3489 load_seg_vm(R_SS, new_ss & 0xffff);
3490 load_seg_vm(R_ES, new_es & 0xffff);
3491 load_seg_vm(R_DS, new_ds & 0xffff);
3492 load_seg_vm(R_FS, new_fs & 0xffff);
3493 load_seg_vm(R_GS, new_gs & 0xffff);
3494
3495 env->eip = new_eip & 0xffff;
3496 ESP = new_esp;
3497}
3498
3499void helper_iret_protected(int shift, int next_eip)
3500{
3501 int tss_selector, type;
3502 uint32_t e1, e2;
3503
3504#ifdef VBOX
3505 Log(("iret (shift=%d new_eip=%#x)\n", shift, next_eip));
3506 e1 = e2 = 0; /** @todo Why do we do this? */
3507 remR3TrapClear(env->pVM);
3508#endif
3509
3510 /* specific case for TSS */
3511 if (env->eflags & NT_MASK) {
3512#ifdef TARGET_X86_64
3513 if (env->hflags & HF_LMA_MASK)
3514 {
3515#if defined(VBOX) && defined(DEBUG)
3516 Log(("eflags.NT=1 on iret in long mode\n"));
3517#endif
3518 raise_exception_err(EXCP0D_GPF, 0);
3519 }
3520#endif
3521 tss_selector = lduw_kernel(env->tr.base + 0);
3522 if (tss_selector & 4)
3523 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3524 if (load_segment(&e1, &e2, tss_selector) != 0)
3525 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3526 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
3527 /* NOTE: we check both segment and busy TSS */
3528 if (type != 3)
3529 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3530 switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
3531 } else {
3532 helper_ret_protected(shift, 1, 0);
3533 }
3534 env->hflags2 &= ~HF2_NMI_MASK;
3535}
3536
3537void helper_lret_protected(int shift, int addend)
3538{
3539 helper_ret_protected(shift, 0, addend);
3540}
3541
3542void helper_sysenter(void)
3543{
3544 if (env->sysenter_cs == 0) {
3545 raise_exception_err(EXCP0D_GPF, 0);
3546 }
3547 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
3548 cpu_x86_set_cpl(env, 0);
3549
3550#ifdef TARGET_X86_64
3551 if (env->hflags & HF_LMA_MASK) {
3552 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
3553 0, 0xffffffff,
3554 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3555 DESC_S_MASK |
3556 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
3557 } else
3558#endif
3559 {
3560 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
3561 0, 0xffffffff,
3562 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3563 DESC_S_MASK |
3564 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
3565 }
3566 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
3567 0, 0xffffffff,
3568 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3569 DESC_S_MASK |
3570 DESC_W_MASK | DESC_A_MASK);
3571 ESP = env->sysenter_esp;
3572 EIP = env->sysenter_eip;
3573}
3574
3575void helper_sysexit(int dflag)
3576{
3577 int cpl;
3578
3579 cpl = env->hflags & HF_CPL_MASK;
3580 if (env->sysenter_cs == 0 || cpl != 0) {
3581 raise_exception_err(EXCP0D_GPF, 0);
3582 }
3583 cpu_x86_set_cpl(env, 3);
3584#ifdef TARGET_X86_64
3585 if (dflag == 2) {
3586 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) | 3,
3587 0, 0xffffffff,
3588 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3589 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3590 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
3591 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) | 3,
3592 0, 0xffffffff,
3593 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3594 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3595 DESC_W_MASK | DESC_A_MASK);
3596 } else
3597#endif
3598 {
3599 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
3600 0, 0xffffffff,
3601 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3602 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3603 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
3604 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
3605 0, 0xffffffff,
3606 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3607 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3608 DESC_W_MASK | DESC_A_MASK);
3609 }
3610 ESP = ECX;
3611 EIP = EDX;
3612}
3613
3614#if defined(CONFIG_USER_ONLY)
3615target_ulong helper_read_crN(int reg)
3616{
3617 return 0;
3618}
3619
3620void helper_write_crN(int reg, target_ulong t0)
3621{
3622}
3623
3624void helper_movl_drN_T0(int reg, target_ulong t0)
3625{
3626}
3627#else
3628target_ulong helper_read_crN(int reg)
3629{
3630 target_ulong val;
3631
3632 helper_svm_check_intercept_param(SVM_EXIT_READ_CR0 + reg, 0);
3633 switch(reg) {
3634 default:
3635 val = env->cr[reg];
3636 break;
3637 case 8:
3638 if (!(env->hflags2 & HF2_VINTR_MASK)) {
3639#ifndef VBOX
3640 val = cpu_get_apic_tpr(env->apic_state);
3641#else /* VBOX */
3642 val = cpu_get_apic_tpr(env);
3643#endif /* VBOX */
3644 } else {
3645 val = env->v_tpr;
3646 }
3647 break;
3648 }
3649 return val;
3650}
3651
3652void helper_write_crN(int reg, target_ulong t0)
3653{
3654 helper_svm_check_intercept_param(SVM_EXIT_WRITE_CR0 + reg, 0);
3655 switch(reg) {
3656 case 0:
3657 cpu_x86_update_cr0(env, t0);
3658 break;
3659 case 3:
3660 cpu_x86_update_cr3(env, t0);
3661 break;
3662 case 4:
3663 cpu_x86_update_cr4(env, t0);
3664 break;
3665 case 8:
3666 if (!(env->hflags2 & HF2_VINTR_MASK)) {
3667#ifndef VBOX
3668 cpu_set_apic_tpr(env->apic_state, t0);
3669#else /* VBOX */
3670 cpu_set_apic_tpr(env, t0);
3671#endif /* VBOX */
3672 }
3673 env->v_tpr = t0 & 0x0f;
3674 break;
3675 default:
3676 env->cr[reg] = t0;
3677 break;
3678 }
3679}
3680
3681void helper_movl_drN_T0(int reg, target_ulong t0)
3682{
3683 int i;
3684
3685 if (reg < 4) {
3686 hw_breakpoint_remove(env, reg);
3687 env->dr[reg] = t0;
3688 hw_breakpoint_insert(env, reg);
3689# ifndef VBOX
3690 } else if (reg == 7) {
3691# else
3692 } else if (reg == 7 || reg == 5) { /* (DR5 is an alias for DR7.) */
3693 if (t0 & X86_DR7_MBZ_MASK)
3694 raise_exception_err(EXCP0D_GPF, 0);
3695 t0 |= X86_DR7_RA1_MASK;
3696 t0 &= ~X86_DR7_RAZ_MASK;
3697# endif
3698 for (i = 0; i < 4; i++)
3699 hw_breakpoint_remove(env, i);
3700 env->dr[7] = t0;
3701 for (i = 0; i < 4; i++)
3702 hw_breakpoint_insert(env, i);
3703 } else {
3704# ifndef VBOX
3705 env->dr[reg] = t0;
3706# else
3707 if (t0 & X86_DR6_MBZ_MASK)
3708 raise_exception_err(EXCP0D_GPF, 0);
3709 t0 |= X86_DR6_RA1_MASK;
3710 t0 &= ~X86_DR6_RAZ_MASK;
3711 env->dr[6] = t0; /* (DR4 is an alias for DR6.) */
3712# endif
3713 }
3714}
3715#endif
3716
3717void helper_lmsw(target_ulong t0)
3718{
3719 /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
3720 if already set to one. */
3721 t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
3722 helper_write_crN(0, t0);
3723}
3724
3725void helper_clts(void)
3726{
3727 env->cr[0] &= ~CR0_TS_MASK;
3728 env->hflags &= ~HF_TS_MASK;
3729}
3730
3731void helper_invlpg(target_ulong addr)
3732{
3733 helper_svm_check_intercept_param(SVM_EXIT_INVLPG, 0);
3734 tlb_flush_page(env, addr);
3735}
3736
3737void helper_rdtsc(void)
3738{
3739 uint64_t val;
3740
3741 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3742 raise_exception(EXCP0D_GPF);
3743 }
3744 helper_svm_check_intercept_param(SVM_EXIT_RDTSC, 0);
3745
3746 val = cpu_get_tsc(env) + env->tsc_offset;
3747 EAX = (uint32_t)(val);
3748 EDX = (uint32_t)(val >> 32);
3749}
3750
3751void helper_rdtscp(void)
3752{
3753 helper_rdtsc();
3754#ifndef VBOX
3755 ECX = (uint32_t)(env->tsc_aux);
3756#else /* VBOX */
3757 uint64_t val;
3758 if (cpu_rdmsr(env, MSR_K8_TSC_AUX, &val) == 0)
3759 ECX = (uint32_t)(val);
3760 else
3761 ECX = 0;
3762#endif /* VBOX */
3763}
3764
3765void helper_rdpmc(void)
3766{
3767#ifdef VBOX
3768 /* If X86_CR4_PCE is *not* set, then CPL must be zero. */
3769 if (!(env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3770 raise_exception(EXCP0D_GPF);
3771 }
3772 /* Just return zero here; rather tricky to properly emulate this, especially as the specs are a mess. */
3773 EAX = 0;
3774 EDX = 0;
3775#else /* !VBOX */
3776 if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3777 raise_exception(EXCP0D_GPF);
3778 }
3779 helper_svm_check_intercept_param(SVM_EXIT_RDPMC, 0);
3780
3781 /* currently unimplemented */
3782 raise_exception_err(EXCP06_ILLOP, 0);
3783#endif /* !VBOX */
3784}
3785
3786#if defined(CONFIG_USER_ONLY)
3787void helper_wrmsr(void)
3788{
3789}
3790
3791void helper_rdmsr(void)
3792{
3793}
3794#else
3795void helper_wrmsr(void)
3796{
3797 uint64_t val;
3798
3799 helper_svm_check_intercept_param(SVM_EXIT_MSR, 1);
3800
3801 val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
3802
3803 switch((uint32_t)ECX) {
3804 case MSR_IA32_SYSENTER_CS:
3805 env->sysenter_cs = val & 0xffff;
3806 break;
3807 case MSR_IA32_SYSENTER_ESP:
3808 env->sysenter_esp = val;
3809 break;
3810 case MSR_IA32_SYSENTER_EIP:
3811 env->sysenter_eip = val;
3812 break;
3813 case MSR_IA32_APICBASE:
3814# ifndef VBOX /* The CPUMSetGuestMsr call below does this now. */
3815 cpu_set_apic_base(env->apic_state, val);
3816# endif
3817 break;
3818 case MSR_EFER:
3819 {
3820 uint64_t update_mask;
3821 update_mask = 0;
3822 if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
3823 update_mask |= MSR_EFER_SCE;
3824 if (env->cpuid_ext2_features & CPUID_EXT2_LM)
3825 update_mask |= MSR_EFER_LME;
3826 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3827 update_mask |= MSR_EFER_FFXSR;
3828 if (env->cpuid_ext2_features & CPUID_EXT2_NX)
3829 update_mask |= MSR_EFER_NXE;
3830 if (env->cpuid_ext3_features & CPUID_EXT3_SVM)
3831 update_mask |= MSR_EFER_SVME;
3832 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3833 update_mask |= MSR_EFER_FFXSR;
3834 cpu_load_efer(env, (env->efer & ~update_mask) |
3835 (val & update_mask));
3836 }
3837 break;
3838 case MSR_STAR:
3839 env->star = val;
3840 break;
3841 case MSR_PAT:
3842 env->pat = val;
3843 break;
3844 case MSR_VM_HSAVE_PA:
3845 env->vm_hsave = val;
3846 break;
3847#ifdef TARGET_X86_64
3848 case MSR_LSTAR:
3849 env->lstar = val;
3850 break;
3851 case MSR_CSTAR:
3852 env->cstar = val;
3853 break;
3854 case MSR_FMASK:
3855 env->fmask = val;
3856 break;
3857 case MSR_FSBASE:
3858 env->segs[R_FS].base = val;
3859 break;
3860 case MSR_GSBASE:
3861 env->segs[R_GS].base = val;
3862 break;
3863 case MSR_KERNELGSBASE:
3864 env->kernelgsbase = val;
3865 break;
3866#endif
3867# ifndef VBOX
3868 case MSR_MTRRphysBase(0):
3869 case MSR_MTRRphysBase(1):
3870 case MSR_MTRRphysBase(2):
3871 case MSR_MTRRphysBase(3):
3872 case MSR_MTRRphysBase(4):
3873 case MSR_MTRRphysBase(5):
3874 case MSR_MTRRphysBase(6):
3875 case MSR_MTRRphysBase(7):
3876 env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base = val;
3877 break;
3878 case MSR_MTRRphysMask(0):
3879 case MSR_MTRRphysMask(1):
3880 case MSR_MTRRphysMask(2):
3881 case MSR_MTRRphysMask(3):
3882 case MSR_MTRRphysMask(4):
3883 case MSR_MTRRphysMask(5):
3884 case MSR_MTRRphysMask(6):
3885 case MSR_MTRRphysMask(7):
3886 env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask = val;
3887 break;
3888 case MSR_MTRRfix64K_00000:
3889 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix64K_00000] = val;
3890 break;
3891 case MSR_MTRRfix16K_80000:
3892 case MSR_MTRRfix16K_A0000:
3893 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1] = val;
3894 break;
3895 case MSR_MTRRfix4K_C0000:
3896 case MSR_MTRRfix4K_C8000:
3897 case MSR_MTRRfix4K_D0000:
3898 case MSR_MTRRfix4K_D8000:
3899 case MSR_MTRRfix4K_E0000:
3900 case MSR_MTRRfix4K_E8000:
3901 case MSR_MTRRfix4K_F0000:
3902 case MSR_MTRRfix4K_F8000:
3903 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3] = val;
3904 break;
3905 case MSR_MTRRdefType:
3906 env->mtrr_deftype = val;
3907 break;
3908 case MSR_MCG_STATUS:
3909 env->mcg_status = val;
3910 break;
3911 case MSR_MCG_CTL:
3912 if ((env->mcg_cap & MCG_CTL_P)
3913 && (val == 0 || val == ~(uint64_t)0))
3914 env->mcg_ctl = val;
3915 break;
3916 case MSR_TSC_AUX:
3917 env->tsc_aux = val;
3918 break;
3919# endif /* !VBOX */
3920 default:
3921# ifndef VBOX
3922 if ((uint32_t)ECX >= MSR_MC0_CTL
3923 && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) {
3924 uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL;
3925 if ((offset & 0x3) != 0
3926 || (val == 0 || val == ~(uint64_t)0))
3927 env->mce_banks[offset] = val;
3928 break;
3929 }
3930 /* XXX: exception ? */
3931# endif
3932 break;
3933 }
3934
3935# ifdef VBOX
3936 /* call CPUM. */
3937 if (cpu_wrmsr(env, (uint32_t)ECX, val) != 0)
3938 {
3939 /** @todo be a brave man and raise a \#GP(0) here as we should... */
3940 }
3941# endif
3942}
3943
3944void helper_rdmsr(void)
3945{
3946 uint64_t val;
3947
3948 helper_svm_check_intercept_param(SVM_EXIT_MSR, 0);
3949
3950 switch((uint32_t)ECX) {
3951 case MSR_IA32_SYSENTER_CS:
3952 val = env->sysenter_cs;
3953 break;
3954 case MSR_IA32_SYSENTER_ESP:
3955 val = env->sysenter_esp;
3956 break;
3957 case MSR_IA32_SYSENTER_EIP:
3958 val = env->sysenter_eip;
3959 break;
3960 case MSR_IA32_APICBASE:
3961#ifndef VBOX
3962 val = cpu_get_apic_base(env->apic_state);
3963#else /* VBOX */
3964 val = cpu_get_apic_base(env);
3965#endif /* VBOX */
3966 break;
3967 case MSR_EFER:
3968 val = env->efer;
3969 break;
3970 case MSR_STAR:
3971 val = env->star;
3972 break;
3973 case MSR_PAT:
3974 val = env->pat;
3975 break;
3976 case MSR_VM_HSAVE_PA:
3977 val = env->vm_hsave;
3978 break;
3979# ifndef VBOX /* forward to CPUMQueryGuestMsr. */
3980 case MSR_IA32_PERF_STATUS:
3981 /* tsc_increment_by_tick */
3982 val = 1000ULL;
3983 /* CPU multiplier */
3984 val |= (((uint64_t)4ULL) << 40);
3985 break;
3986# endif /* !VBOX */
3987#ifdef TARGET_X86_64
3988 case MSR_LSTAR:
3989 val = env->lstar;
3990 break;
3991 case MSR_CSTAR:
3992 val = env->cstar;
3993 break;
3994 case MSR_FMASK:
3995 val = env->fmask;
3996 break;
3997 case MSR_FSBASE:
3998 val = env->segs[R_FS].base;
3999 break;
4000 case MSR_GSBASE:
4001 val = env->segs[R_GS].base;
4002 break;
4003 case MSR_KERNELGSBASE:
4004 val = env->kernelgsbase;
4005 break;
4006# ifndef VBOX
4007 case MSR_TSC_AUX:
4008 val = env->tsc_aux;
4009 break;
4010# endif /*!VBOX*/
4011#endif
4012# ifndef VBOX
4013 case MSR_MTRRphysBase(0):
4014 case MSR_MTRRphysBase(1):
4015 case MSR_MTRRphysBase(2):
4016 case MSR_MTRRphysBase(3):
4017 case MSR_MTRRphysBase(4):
4018 case MSR_MTRRphysBase(5):
4019 case MSR_MTRRphysBase(6):
4020 case MSR_MTRRphysBase(7):
4021 val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base;
4022 break;
4023 case MSR_MTRRphysMask(0):
4024 case MSR_MTRRphysMask(1):
4025 case MSR_MTRRphysMask(2):
4026 case MSR_MTRRphysMask(3):
4027 case MSR_MTRRphysMask(4):
4028 case MSR_MTRRphysMask(5):
4029 case MSR_MTRRphysMask(6):
4030 case MSR_MTRRphysMask(7):
4031 val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask;
4032 break;
4033 case MSR_MTRRfix64K_00000:
4034 val = env->mtrr_fixed[0];
4035 break;
4036 case MSR_MTRRfix16K_80000:
4037 case MSR_MTRRfix16K_A0000:
4038 val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1];
4039 break;
4040 case MSR_MTRRfix4K_C0000:
4041 case MSR_MTRRfix4K_C8000:
4042 case MSR_MTRRfix4K_D0000:
4043 case MSR_MTRRfix4K_D8000:
4044 case MSR_MTRRfix4K_E0000:
4045 case MSR_MTRRfix4K_E8000:
4046 case MSR_MTRRfix4K_F0000:
4047 case MSR_MTRRfix4K_F8000:
4048 val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3];
4049 break;
4050 case MSR_MTRRdefType:
4051 val = env->mtrr_deftype;
4052 break;
4053 case MSR_MTRRcap:
4054 if (env->cpuid_features & CPUID_MTRR)
4055 val = MSR_MTRRcap_VCNT | MSR_MTRRcap_FIXRANGE_SUPPORT | MSR_MTRRcap_WC_SUPPORTED;
4056 else
4057 /* XXX: exception ? */
4058 val = 0;
4059 break;
4060 case MSR_MCG_CAP:
4061 val = env->mcg_cap;
4062 break;
4063 case MSR_MCG_CTL:
4064 if (env->mcg_cap & MCG_CTL_P)
4065 val = env->mcg_ctl;
4066 else
4067 val = 0;
4068 break;
4069 case MSR_MCG_STATUS:
4070 val = env->mcg_status;
4071 break;
4072# endif /* !VBOX */
4073 default:
4074# ifndef VBOX
4075 if ((uint32_t)ECX >= MSR_MC0_CTL
4076 && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) {
4077 uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL;
4078 val = env->mce_banks[offset];
4079 break;
4080 }
4081 /* XXX: exception ? */
4082 val = 0;
4083# else /* VBOX */
4084 if (cpu_rdmsr(env, (uint32_t)ECX, &val) != 0)
4085 {
4086 /** @todo be a brave man and raise a \#GP(0) here as we should... */
4087 val = 0;
4088 }
4089# endif /* VBOX */
4090 break;
4091 }
4092 EAX = (uint32_t)(val);
4093 EDX = (uint32_t)(val >> 32);
4094
4095# ifdef VBOX_STRICT
4096 if ((uint32_t)ECX != MSR_IA32_TSC) {
4097 if (cpu_rdmsr(env, (uint32_t)ECX, &val) != 0)
4098 val = 0;
4099 AssertMsg(val == RT_MAKE_U64(EAX, EDX), ("idMsr=%#x val=%#llx eax:edx=%#llx\n", (uint32_t)ECX, val, RT_MAKE_U64(EAX, EDX)));
4100 }
4101# endif
4102}
4103#endif
4104
4105target_ulong helper_lsl(target_ulong selector1)
4106{
4107 unsigned int limit;
4108 uint32_t e1, e2, eflags, selector;
4109 int rpl, dpl, cpl, type;
4110
4111 selector = selector1 & 0xffff;
4112 eflags = helper_cc_compute_all(CC_OP);
4113 if ((selector & 0xfffc) == 0)
4114 goto fail;
4115 if (load_segment(&e1, &e2, selector) != 0)
4116 goto fail;
4117 rpl = selector & 3;
4118 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
4119 cpl = env->hflags & HF_CPL_MASK;
4120 if (e2 & DESC_S_MASK) {
4121 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
4122 /* conforming */
4123 } else {
4124 if (dpl < cpl || dpl < rpl)
4125 goto fail;
4126 }
4127 } else {
4128 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
4129 switch(type) {
4130 case 1:
4131 case 2:
4132 case 3:
4133 case 9:
4134 case 11:
4135 break;
4136 default:
4137 goto fail;
4138 }
4139 if (dpl < cpl || dpl < rpl) {
4140 fail:
4141 CC_SRC = eflags & ~CC_Z;
4142 return 0;
4143 }
4144 }
4145 limit = get_seg_limit(e1, e2);
4146 CC_SRC = eflags | CC_Z;
4147 return limit;
4148}
4149
4150target_ulong helper_lar(target_ulong selector1)
4151{
4152 uint32_t e1, e2, eflags, selector;
4153 int rpl, dpl, cpl, type;
4154
4155 selector = selector1 & 0xffff;
4156 eflags = helper_cc_compute_all(CC_OP);
4157 if ((selector & 0xfffc) == 0)
4158 goto fail;
4159 if (load_segment(&e1, &e2, selector) != 0)
4160 goto fail;
4161 rpl = selector & 3;
4162 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
4163 cpl = env->hflags & HF_CPL_MASK;
4164 if (e2 & DESC_S_MASK) {
4165 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
4166 /* conforming */
4167 } else {
4168 if (dpl < cpl || dpl < rpl)
4169 goto fail;
4170 }
4171 } else {
4172 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
4173 switch(type) {
4174 case 1:
4175 case 2:
4176 case 3:
4177 case 4:
4178 case 5:
4179 case 9:
4180 case 11:
4181 case 12:
4182 break;
4183 default:
4184 goto fail;
4185 }
4186 if (dpl < cpl || dpl < rpl) {
4187 fail:
4188 CC_SRC = eflags & ~CC_Z;
4189 return 0;
4190 }
4191 }
4192 CC_SRC = eflags | CC_Z;
4193#ifdef VBOX /* AMD says 0x00ffff00, while intel says 0x00fxff00. Bochs and IEM does like AMD says (x=f). */
4194 return e2 & 0x00ffff00;
4195#else
4196 return e2 & 0x00f0ff00;
4197#endif
4198}
4199
4200void helper_verr(target_ulong selector1)
4201{
4202 uint32_t e1, e2, eflags, selector;
4203 int rpl, dpl, cpl;
4204
4205 selector = selector1 & 0xffff;
4206 eflags = helper_cc_compute_all(CC_OP);
4207 if ((selector & 0xfffc) == 0)
4208 goto fail;
4209 if (load_segment(&e1, &e2, selector) != 0)
4210 goto fail;
4211 if (!(e2 & DESC_S_MASK))
4212 goto fail;
4213 rpl = selector & 3;
4214 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
4215 cpl = env->hflags & HF_CPL_MASK;
4216 if (e2 & DESC_CS_MASK) {
4217 if (!(e2 & DESC_R_MASK))
4218 goto fail;
4219 if (!(e2 & DESC_C_MASK)) {
4220 if (dpl < cpl || dpl < rpl)
4221 goto fail;
4222 }
4223 } else {
4224 if (dpl < cpl || dpl < rpl) {
4225 fail:
4226 CC_SRC = eflags & ~CC_Z;
4227 return;
4228 }
4229 }
4230 CC_SRC = eflags | CC_Z;
4231}
4232
4233void helper_verw(target_ulong selector1)
4234{
4235 uint32_t e1, e2, eflags, selector;
4236 int rpl, dpl, cpl;
4237
4238 selector = selector1 & 0xffff;
4239 eflags = helper_cc_compute_all(CC_OP);
4240 if ((selector & 0xfffc) == 0)
4241 goto fail;
4242 if (load_segment(&e1, &e2, selector) != 0)
4243 goto fail;
4244 if (!(e2 & DESC_S_MASK))
4245 goto fail;
4246 rpl = selector & 3;
4247 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
4248 cpl = env->hflags & HF_CPL_MASK;
4249 if (e2 & DESC_CS_MASK) {
4250 goto fail;
4251 } else {
4252 if (dpl < cpl || dpl < rpl)
4253 goto fail;
4254 if (!(e2 & DESC_W_MASK)) {
4255 fail:
4256 CC_SRC = eflags & ~CC_Z;
4257 return;
4258 }
4259 }
4260 CC_SRC = eflags | CC_Z;
4261}
4262
4263/* x87 FPU helpers */
4264
4265static void fpu_set_exception(int mask)
4266{
4267 env->fpus |= mask;
4268 if (env->fpus & (~env->fpuc & FPUC_EM))
4269 env->fpus |= FPUS_SE | FPUS_B;
4270}
4271
4272static inline CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
4273{
4274 if (b == 0.0)
4275 fpu_set_exception(FPUS_ZE);
4276 return a / b;
4277}
4278
4279static void fpu_raise_exception(void)
4280{
4281 if (env->cr[0] & CR0_NE_MASK) {
4282 raise_exception(EXCP10_COPR);
4283 }
4284#if !defined(CONFIG_USER_ONLY)
4285 else {
4286 cpu_set_ferr(env);
4287 }
4288#endif
4289}
4290
4291void helper_flds_FT0(uint32_t val)
4292{
4293 union {
4294 float32 f;
4295 uint32_t i;
4296 } u;
4297 u.i = val;
4298 FT0 = float32_to_floatx(u.f, &env->fp_status);
4299}
4300
4301void helper_fldl_FT0(uint64_t val)
4302{
4303 union {
4304 float64 f;
4305 uint64_t i;
4306 } u;
4307 u.i = val;
4308 FT0 = float64_to_floatx(u.f, &env->fp_status);
4309}
4310
4311void helper_fildl_FT0(int32_t val)
4312{
4313 FT0 = int32_to_floatx(val, &env->fp_status);
4314}
4315
4316void helper_flds_ST0(uint32_t val)
4317{
4318 int new_fpstt;
4319 union {
4320 float32 f;
4321 uint32_t i;
4322 } u;
4323 new_fpstt = (env->fpstt - 1) & 7;
4324 u.i = val;
4325 env->fpregs[new_fpstt].d = float32_to_floatx(u.f, &env->fp_status);
4326 env->fpstt = new_fpstt;
4327 env->fptags[new_fpstt] = 0; /* validate stack entry */
4328}
4329
4330void helper_fldl_ST0(uint64_t val)
4331{
4332 int new_fpstt;
4333 union {
4334 float64 f;
4335 uint64_t i;
4336 } u;
4337 new_fpstt = (env->fpstt - 1) & 7;
4338 u.i = val;
4339 env->fpregs[new_fpstt].d = float64_to_floatx(u.f, &env->fp_status);
4340 env->fpstt = new_fpstt;
4341 env->fptags[new_fpstt] = 0; /* validate stack entry */
4342}
4343
4344void helper_fildl_ST0(int32_t val)
4345{
4346 int new_fpstt;
4347 new_fpstt = (env->fpstt - 1) & 7;
4348 env->fpregs[new_fpstt].d = int32_to_floatx(val, &env->fp_status);
4349 env->fpstt = new_fpstt;
4350 env->fptags[new_fpstt] = 0; /* validate stack entry */
4351}
4352
4353void helper_fildll_ST0(int64_t val)
4354{
4355 int new_fpstt;
4356 new_fpstt = (env->fpstt - 1) & 7;
4357 env->fpregs[new_fpstt].d = int64_to_floatx(val, &env->fp_status);
4358 env->fpstt = new_fpstt;
4359 env->fptags[new_fpstt] = 0; /* validate stack entry */
4360}
4361
4362#ifndef VBOX
4363uint32_t helper_fsts_ST0(void)
4364#else
4365RTCCUINTREG helper_fsts_ST0(void)
4366#endif
4367{
4368 union {
4369 float32 f;
4370 uint32_t i;
4371 } u;
4372 u.f = floatx_to_float32(ST0, &env->fp_status);
4373 return u.i;
4374}
4375
4376uint64_t helper_fstl_ST0(void)
4377{
4378 union {
4379 float64 f;
4380 uint64_t i;
4381 } u;
4382 u.f = floatx_to_float64(ST0, &env->fp_status);
4383 return u.i;
4384}
4385
4386#ifndef VBOX
4387int32_t helper_fist_ST0(void)
4388#else
4389RTCCINTREG helper_fist_ST0(void)
4390#endif
4391{
4392 int32_t val;
4393 val = floatx_to_int32(ST0, &env->fp_status);
4394 if (val != (int16_t)val)
4395 val = -32768;
4396 return val;
4397}
4398
4399#ifndef VBOX
4400int32_t helper_fistl_ST0(void)
4401#else
4402RTCCINTREG helper_fistl_ST0(void)
4403#endif
4404{
4405 int32_t val;
4406 val = floatx_to_int32(ST0, &env->fp_status);
4407 return val;
4408}
4409
4410int64_t helper_fistll_ST0(void)
4411{
4412 int64_t val;
4413 val = floatx_to_int64(ST0, &env->fp_status);
4414 return val;
4415}
4416
4417#ifndef VBOX
4418int32_t helper_fistt_ST0(void)
4419#else
4420RTCCINTREG helper_fistt_ST0(void)
4421#endif
4422{
4423 int32_t val;
4424 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
4425 if (val != (int16_t)val)
4426 val = -32768;
4427 return val;
4428}
4429
4430#ifndef VBOX
4431int32_t helper_fisttl_ST0(void)
4432#else
4433RTCCINTREG helper_fisttl_ST0(void)
4434#endif
4435{
4436 int32_t val;
4437 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
4438 return val;
4439}
4440
4441int64_t helper_fisttll_ST0(void)
4442{
4443 int64_t val;
4444 val = floatx_to_int64_round_to_zero(ST0, &env->fp_status);
4445 return val;
4446}
4447
4448void helper_fldt_ST0(target_ulong ptr)
4449{
4450 int new_fpstt;
4451 new_fpstt = (env->fpstt - 1) & 7;
4452 env->fpregs[new_fpstt].d = helper_fldt(ptr);
4453 env->fpstt = new_fpstt;
4454 env->fptags[new_fpstt] = 0; /* validate stack entry */
4455}
4456
4457void helper_fstt_ST0(target_ulong ptr)
4458{
4459 helper_fstt(ST0, ptr);
4460}
4461
4462void helper_fpush(void)
4463{
4464 fpush();
4465}
4466
4467void helper_fpop(void)
4468{
4469 fpop();
4470}
4471
4472void helper_fdecstp(void)
4473{
4474 env->fpstt = (env->fpstt - 1) & 7;
4475 env->fpus &= (~0x4700);
4476}
4477
4478void helper_fincstp(void)
4479{
4480 env->fpstt = (env->fpstt + 1) & 7;
4481 env->fpus &= (~0x4700);
4482}
4483
4484/* FPU move */
4485
4486void helper_ffree_STN(int st_index)
4487{
4488 env->fptags[(env->fpstt + st_index) & 7] = 1;
4489}
4490
4491void helper_fmov_ST0_FT0(void)
4492{
4493 ST0 = FT0;
4494}
4495
4496void helper_fmov_FT0_STN(int st_index)
4497{
4498 FT0 = ST(st_index);
4499}
4500
4501void helper_fmov_ST0_STN(int st_index)
4502{
4503 ST0 = ST(st_index);
4504}
4505
4506void helper_fmov_STN_ST0(int st_index)
4507{
4508 ST(st_index) = ST0;
4509}
4510
4511void helper_fxchg_ST0_STN(int st_index)
4512{
4513 CPU86_LDouble tmp;
4514 tmp = ST(st_index);
4515 ST(st_index) = ST0;
4516 ST0 = tmp;
4517}
4518
4519/* FPU operations */
4520
4521static const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500};
4522
4523void helper_fcom_ST0_FT0(void)
4524{
4525 int ret;
4526
4527 ret = floatx_compare(ST0, FT0, &env->fp_status);
4528 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1];
4529}
4530
4531void helper_fucom_ST0_FT0(void)
4532{
4533 int ret;
4534
4535 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
4536 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret+ 1];
4537}
4538
4539static const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};
4540
4541void helper_fcomi_ST0_FT0(void)
4542{
4543 int eflags;
4544 int ret;
4545
4546 ret = floatx_compare(ST0, FT0, &env->fp_status);
4547 eflags = helper_cc_compute_all(CC_OP);
4548 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
4549 CC_SRC = eflags;
4550}
4551
4552void helper_fucomi_ST0_FT0(void)
4553{
4554 int eflags;
4555 int ret;
4556
4557 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
4558 eflags = helper_cc_compute_all(CC_OP);
4559 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
4560 CC_SRC = eflags;
4561}
4562
4563void helper_fadd_ST0_FT0(void)
4564{
4565 ST0 += FT0;
4566}
4567
4568void helper_fmul_ST0_FT0(void)
4569{
4570 ST0 *= FT0;
4571}
4572
4573void helper_fsub_ST0_FT0(void)
4574{
4575 ST0 -= FT0;
4576}
4577
4578void helper_fsubr_ST0_FT0(void)
4579{
4580 ST0 = FT0 - ST0;
4581}
4582
4583void helper_fdiv_ST0_FT0(void)
4584{
4585 ST0 = helper_fdiv(ST0, FT0);
4586}
4587
4588void helper_fdivr_ST0_FT0(void)
4589{
4590 ST0 = helper_fdiv(FT0, ST0);
4591}
4592
4593/* fp operations between STN and ST0 */
4594
4595void helper_fadd_STN_ST0(int st_index)
4596{
4597 ST(st_index) += ST0;
4598}
4599
4600void helper_fmul_STN_ST0(int st_index)
4601{
4602 ST(st_index) *= ST0;
4603}
4604
4605void helper_fsub_STN_ST0(int st_index)
4606{
4607 ST(st_index) -= ST0;
4608}
4609
4610void helper_fsubr_STN_ST0(int st_index)
4611{
4612 CPU86_LDouble *p;
4613 p = &ST(st_index);
4614 *p = ST0 - *p;
4615}
4616
4617void helper_fdiv_STN_ST0(int st_index)
4618{
4619 CPU86_LDouble *p;
4620 p = &ST(st_index);
4621 *p = helper_fdiv(*p, ST0);
4622}
4623
4624void helper_fdivr_STN_ST0(int st_index)
4625{
4626 CPU86_LDouble *p;
4627 p = &ST(st_index);
4628 *p = helper_fdiv(ST0, *p);
4629}
4630
4631/* misc FPU operations */
4632void helper_fchs_ST0(void)
4633{
4634 ST0 = floatx_chs(ST0);
4635}
4636
4637void helper_fabs_ST0(void)
4638{
4639 ST0 = floatx_abs(ST0);
4640}
4641
4642void helper_fld1_ST0(void)
4643{
4644 ST0 = f15rk[1];
4645}
4646
4647void helper_fldl2t_ST0(void)
4648{
4649 ST0 = f15rk[6];
4650}
4651
4652void helper_fldl2e_ST0(void)
4653{
4654 ST0 = f15rk[5];
4655}
4656
4657void helper_fldpi_ST0(void)
4658{
4659 ST0 = f15rk[2];
4660}
4661
4662void helper_fldlg2_ST0(void)
4663{
4664 ST0 = f15rk[3];
4665}
4666
4667void helper_fldln2_ST0(void)
4668{
4669 ST0 = f15rk[4];
4670}
4671
4672void helper_fldz_ST0(void)
4673{
4674 ST0 = f15rk[0];
4675}
4676
4677void helper_fldz_FT0(void)
4678{
4679 FT0 = f15rk[0];
4680}
4681
4682#ifndef VBOX
4683uint32_t helper_fnstsw(void)
4684#else
4685RTCCUINTREG helper_fnstsw(void)
4686#endif
4687{
4688 return (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4689}
4690
4691#ifndef VBOX
4692uint32_t helper_fnstcw(void)
4693#else
4694RTCCUINTREG helper_fnstcw(void)
4695#endif
4696{
4697 return env->fpuc;
4698}
4699
4700static void update_fp_status(void)
4701{
4702 int rnd_type;
4703
4704 /* set rounding mode */
4705 switch(env->fpuc & RC_MASK) {
4706 default:
4707 case RC_NEAR:
4708 rnd_type = float_round_nearest_even;
4709 break;
4710 case RC_DOWN:
4711 rnd_type = float_round_down;
4712 break;
4713 case RC_UP:
4714 rnd_type = float_round_up;
4715 break;
4716 case RC_CHOP:
4717 rnd_type = float_round_to_zero;
4718 break;
4719 }
4720 set_float_rounding_mode(rnd_type, &env->fp_status);
4721#ifdef FLOATX80
4722 switch((env->fpuc >> 8) & 3) {
4723 case 0:
4724 rnd_type = 32;
4725 break;
4726 case 2:
4727 rnd_type = 64;
4728 break;
4729 case 3:
4730 default:
4731 rnd_type = 80;
4732 break;
4733 }
4734 set_floatx80_rounding_precision(rnd_type, &env->fp_status);
4735#endif
4736}
4737
4738void helper_fldcw(uint32_t val)
4739{
4740 env->fpuc = val;
4741 update_fp_status();
4742}
4743
4744void helper_fclex(void)
4745{
4746 env->fpus &= 0x7f00;
4747}
4748
4749void helper_fwait(void)
4750{
4751 if (env->fpus & FPUS_SE)
4752 fpu_raise_exception();
4753}
4754
4755void helper_fninit(void)
4756{
4757 env->fpus = 0;
4758 env->fpstt = 0;
4759 env->fpuc = 0x37f;
4760 env->fptags[0] = 1;
4761 env->fptags[1] = 1;
4762 env->fptags[2] = 1;
4763 env->fptags[3] = 1;
4764 env->fptags[4] = 1;
4765 env->fptags[5] = 1;
4766 env->fptags[6] = 1;
4767 env->fptags[7] = 1;
4768}
4769
4770/* BCD ops */
4771
4772void helper_fbld_ST0(target_ulong ptr)
4773{
4774 CPU86_LDouble tmp;
4775 uint64_t val;
4776 unsigned int v;
4777 int i;
4778
4779 val = 0;
4780 for(i = 8; i >= 0; i--) {
4781 v = ldub(ptr + i);
4782 val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
4783 }
4784 tmp = val;
4785 if (ldub(ptr + 9) & 0x80)
4786 tmp = -tmp;
4787 fpush();
4788 ST0 = tmp;
4789}
4790
4791void helper_fbst_ST0(target_ulong ptr)
4792{
4793 int v;
4794 target_ulong mem_ref, mem_end;
4795 int64_t val;
4796
4797 val = floatx_to_int64(ST0, &env->fp_status);
4798 mem_ref = ptr;
4799 mem_end = mem_ref + 9;
4800 if (val < 0) {
4801 stb(mem_end, 0x80);
4802 val = -val;
4803 } else {
4804 stb(mem_end, 0x00);
4805 }
4806 while (mem_ref < mem_end) {
4807 if (val == 0)
4808 break;
4809 v = val % 100;
4810 val = val / 100;
4811 v = ((v / 10) << 4) | (v % 10);
4812 stb(mem_ref++, v);
4813 }
4814 while (mem_ref < mem_end) {
4815 stb(mem_ref++, 0);
4816 }
4817}
4818
4819void helper_f2xm1(void)
4820{
4821 ST0 = pow(2.0,ST0) - 1.0;
4822}
4823
4824void helper_fyl2x(void)
4825{
4826 CPU86_LDouble fptemp;
4827
4828 fptemp = ST0;
4829 if (fptemp>0.0){
4830 fptemp = log(fptemp)/log(2.0); /* log2(ST) */
4831 ST1 *= fptemp;
4832 fpop();
4833 } else {
4834 env->fpus &= (~0x4700);
4835 env->fpus |= 0x400;
4836 }
4837}
4838
4839void helper_fptan(void)
4840{
4841 CPU86_LDouble fptemp;
4842
4843 fptemp = ST0;
4844 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4845 env->fpus |= 0x400;
4846 } else {
4847 ST0 = tan(fptemp);
4848 fpush();
4849 ST0 = 1.0;
4850 env->fpus &= (~0x400); /* C2 <-- 0 */
4851 /* the above code is for |arg| < 2**52 only */
4852 }
4853}
4854
4855void helper_fpatan(void)
4856{
4857 CPU86_LDouble fptemp, fpsrcop;
4858
4859 fpsrcop = ST1;
4860 fptemp = ST0;
4861 ST1 = atan2(fpsrcop,fptemp);
4862 fpop();
4863}
4864
4865void helper_fxtract(void)
4866{
4867 CPU86_LDoubleU temp;
4868 unsigned int expdif;
4869
4870 temp.d = ST0;
4871 expdif = EXPD(temp) - EXPBIAS;
4872 /*DP exponent bias*/
4873 ST0 = expdif;
4874 fpush();
4875 BIASEXPONENT(temp);
4876 ST0 = temp.d;
4877}
4878
4879void helper_fprem1(void)
4880{
4881 CPU86_LDouble dblq, fpsrcop, fptemp;
4882 CPU86_LDoubleU fpsrcop1, fptemp1;
4883 int expdif;
4884 signed long long int q;
4885
4886#ifndef VBOX /* Unfortunately, we cannot handle isinf/isnan easily in wrapper */
4887 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4888#else
4889 if ((ST0 != ST0) || (ST1 != ST1) || (ST1 == 0.0)) {
4890#endif
4891 ST0 = 0.0 / 0.0; /* NaN */
4892 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4893 return;
4894 }
4895
4896 fpsrcop = ST0;
4897 fptemp = ST1;
4898 fpsrcop1.d = fpsrcop;
4899 fptemp1.d = fptemp;
4900 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4901
4902 if (expdif < 0) {
4903 /* optimisation? taken from the AMD docs */
4904 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4905 /* ST0 is unchanged */
4906 return;
4907 }
4908
4909 if (expdif < 53) {
4910 dblq = fpsrcop / fptemp;
4911 /* round dblq towards nearest integer */
4912 dblq = rint(dblq);
4913 ST0 = fpsrcop - fptemp * dblq;
4914
4915 /* convert dblq to q by truncating towards zero */
4916 if (dblq < 0.0)
4917 q = (signed long long int)(-dblq);
4918 else
4919 q = (signed long long int)dblq;
4920
4921 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4922 /* (C0,C3,C1) <-- (q2,q1,q0) */
4923 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4924 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4925 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4926 } else {
4927 env->fpus |= 0x400; /* C2 <-- 1 */
4928 fptemp = pow(2.0, expdif - 50);
4929 fpsrcop = (ST0 / ST1) / fptemp;
4930 /* fpsrcop = integer obtained by chopping */
4931 fpsrcop = (fpsrcop < 0.0) ?
4932 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4933 ST0 -= (ST1 * fpsrcop * fptemp);
4934 }
4935}
4936
4937void helper_fprem(void)
4938{
4939 CPU86_LDouble dblq, fpsrcop, fptemp;
4940 CPU86_LDoubleU fpsrcop1, fptemp1;
4941 int expdif;
4942 signed long long int q;
4943
4944#ifndef VBOX /* Unfortunately, we cannot easily handle isinf/isnan in wrapper */
4945 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4946#else
4947 if ((ST0 != ST0) || (ST1 != ST1) || (ST1 == 0.0)) {
4948#endif
4949 ST0 = 0.0 / 0.0; /* NaN */
4950 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4951 return;
4952 }
4953
4954 fpsrcop = (CPU86_LDouble)ST0;
4955 fptemp = (CPU86_LDouble)ST1;
4956 fpsrcop1.d = fpsrcop;
4957 fptemp1.d = fptemp;
4958 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4959
4960 if (expdif < 0) {
4961 /* optimisation? taken from the AMD docs */
4962 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4963 /* ST0 is unchanged */
4964 return;
4965 }
4966
4967 if ( expdif < 53 ) {
4968 dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/;
4969 /* round dblq towards zero */
4970 dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
4971 ST0 = fpsrcop/*ST0*/ - fptemp * dblq;
4972
4973 /* convert dblq to q by truncating towards zero */
4974 if (dblq < 0.0)
4975 q = (signed long long int)(-dblq);
4976 else
4977 q = (signed long long int)dblq;
4978
4979 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4980 /* (C0,C3,C1) <-- (q2,q1,q0) */
4981 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4982 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4983 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4984 } else {
4985 int N = 32 + (expdif % 32); /* as per AMD docs */
4986 env->fpus |= 0x400; /* C2 <-- 1 */
4987 fptemp = pow(2.0, (double)(expdif - N));
4988 fpsrcop = (ST0 / ST1) / fptemp;
4989 /* fpsrcop = integer obtained by chopping */
4990 fpsrcop = (fpsrcop < 0.0) ?
4991 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4992 ST0 -= (ST1 * fpsrcop * fptemp);
4993 }
4994}
4995
4996void helper_fyl2xp1(void)
4997{
4998 CPU86_LDouble fptemp;
4999
5000 fptemp = ST0;
5001 if ((fptemp+1.0)>0.0) {
5002 fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
5003 ST1 *= fptemp;
5004 fpop();
5005 } else {
5006 env->fpus &= (~0x4700);
5007 env->fpus |= 0x400;
5008 }
5009}
5010
5011void helper_fsqrt(void)
5012{
5013 CPU86_LDouble fptemp;
5014
5015 fptemp = ST0;
5016 if (fptemp<0.0) {
5017 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
5018 env->fpus |= 0x400;
5019 }
5020 ST0 = sqrt(fptemp);
5021}
5022
5023void helper_fsincos(void)
5024{
5025 CPU86_LDouble fptemp;
5026
5027 fptemp = ST0;
5028 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
5029 env->fpus |= 0x400;
5030 } else {
5031 ST0 = sin(fptemp);
5032 fpush();
5033 ST0 = cos(fptemp);
5034 env->fpus &= (~0x400); /* C2 <-- 0 */
5035 /* the above code is for |arg| < 2**63 only */
5036 }
5037}
5038
5039void helper_frndint(void)
5040{
5041 ST0 = floatx_round_to_int(ST0, &env->fp_status);
5042}
5043
5044void helper_fscale(void)
5045{
5046 ST0 = ldexp (ST0, (int)(ST1));
5047}
5048
5049void helper_fsin(void)
5050{
5051 CPU86_LDouble fptemp;
5052
5053 fptemp = ST0;
5054 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
5055 env->fpus |= 0x400;
5056 } else {
5057 ST0 = sin(fptemp);
5058 env->fpus &= (~0x400); /* C2 <-- 0 */
5059 /* the above code is for |arg| < 2**53 only */
5060 }
5061}
5062
5063void helper_fcos(void)
5064{
5065 CPU86_LDouble fptemp;
5066
5067 fptemp = ST0;
5068 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
5069 env->fpus |= 0x400;
5070 } else {
5071 ST0 = cos(fptemp);
5072 env->fpus &= (~0x400); /* C2 <-- 0 */
5073 /* the above code is for |arg5 < 2**63 only */
5074 }
5075}
5076
5077void helper_fxam_ST0(void)
5078{
5079 CPU86_LDoubleU temp;
5080 int expdif;
5081
5082 temp.d = ST0;
5083
5084 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
5085 if (SIGND(temp))
5086 env->fpus |= 0x200; /* C1 <-- 1 */
5087
5088 /* XXX: test fptags too */
5089 expdif = EXPD(temp);
5090 if (expdif == MAXEXPD) {
5091#ifdef USE_X86LDOUBLE
5092 if (MANTD(temp) == 0x8000000000000000ULL)
5093#else
5094 if (MANTD(temp) == 0)
5095#endif
5096 env->fpus |= 0x500 /*Infinity*/;
5097 else
5098 env->fpus |= 0x100 /*NaN*/;
5099 } else if (expdif == 0) {
5100 if (MANTD(temp) == 0)
5101 env->fpus |= 0x4000 /*Zero*/;
5102 else
5103 env->fpus |= 0x4400 /*Denormal*/;
5104 } else {
5105 env->fpus |= 0x400;
5106 }
5107}
5108
5109void helper_fstenv(target_ulong ptr, int data32)
5110{
5111 int fpus, fptag, exp, i;
5112 uint64_t mant;
5113 CPU86_LDoubleU tmp;
5114
5115 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
5116 fptag = 0;
5117 for (i=7; i>=0; i--) {
5118 fptag <<= 2;
5119 if (env->fptags[i]) {
5120 fptag |= 3;
5121 } else {
5122 tmp.d = env->fpregs[i].d;
5123 exp = EXPD(tmp);
5124 mant = MANTD(tmp);
5125 if (exp == 0 && mant == 0) {
5126 /* zero */
5127 fptag |= 1;
5128 } else if (exp == 0 || exp == MAXEXPD
5129#ifdef USE_X86LDOUBLE
5130 || (mant & (1LL << 63)) == 0
5131#endif
5132 ) {
5133 /* NaNs, infinity, denormal */
5134 fptag |= 2;
5135 }
5136 }
5137 }
5138 if (data32) {
5139 /* 32 bit */
5140 stl(ptr, env->fpuc);
5141 stl(ptr + 4, fpus);
5142 stl(ptr + 8, fptag);
5143 stl(ptr + 12, 0); /* fpip */
5144 stl(ptr + 16, 0); /* fpcs */
5145 stl(ptr + 20, 0); /* fpoo */
5146 stl(ptr + 24, 0); /* fpos */
5147 } else {
5148 /* 16 bit */
5149 stw(ptr, env->fpuc);
5150 stw(ptr + 2, fpus);
5151 stw(ptr + 4, fptag);
5152 stw(ptr + 6, 0);
5153 stw(ptr + 8, 0);
5154 stw(ptr + 10, 0);
5155 stw(ptr + 12, 0);
5156 }
5157}
5158
5159void helper_fldenv(target_ulong ptr, int data32)
5160{
5161 int i, fpus, fptag;
5162
5163 if (data32) {
5164 env->fpuc = lduw(ptr);
5165 fpus = lduw(ptr + 4);
5166 fptag = lduw(ptr + 8);
5167 }
5168 else {
5169 env->fpuc = lduw(ptr);
5170 fpus = lduw(ptr + 2);
5171 fptag = lduw(ptr + 4);
5172 }
5173 env->fpstt = (fpus >> 11) & 7;
5174 env->fpus = fpus & ~0x3800;
5175 for(i = 0;i < 8; i++) {
5176 env->fptags[i] = ((fptag & 3) == 3);
5177 fptag >>= 2;
5178 }
5179}
5180
5181void helper_fsave(target_ulong ptr, int data32)
5182{
5183 CPU86_LDouble tmp;
5184 int i;
5185
5186 helper_fstenv(ptr, data32);
5187
5188 ptr += (14 << data32);
5189 for(i = 0;i < 8; i++) {
5190 tmp = ST(i);
5191 helper_fstt(tmp, ptr);
5192 ptr += 10;
5193 }
5194
5195 /* fninit */
5196 env->fpus = 0;
5197 env->fpstt = 0;
5198 env->fpuc = 0x37f;
5199 env->fptags[0] = 1;
5200 env->fptags[1] = 1;
5201 env->fptags[2] = 1;
5202 env->fptags[3] = 1;
5203 env->fptags[4] = 1;
5204 env->fptags[5] = 1;
5205 env->fptags[6] = 1;
5206 env->fptags[7] = 1;
5207}
5208
5209void helper_frstor(target_ulong ptr, int data32)
5210{
5211 CPU86_LDouble tmp;
5212 int i;
5213
5214 helper_fldenv(ptr, data32);
5215 ptr += (14 << data32);
5216
5217 for(i = 0;i < 8; i++) {
5218 tmp = helper_fldt(ptr);
5219 ST(i) = tmp;
5220 ptr += 10;
5221 }
5222}
5223
5224void helper_fxsave(target_ulong ptr, int data64)
5225{
5226 int fpus, fptag, i, nb_xmm_regs;
5227 CPU86_LDouble tmp;
5228 target_ulong addr;
5229
5230 /* The operand must be 16 byte aligned */
5231 if (ptr & 0xf) {
5232 raise_exception(EXCP0D_GPF);
5233 }
5234
5235 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
5236 fptag = 0;
5237 for(i = 0; i < 8; i++) {
5238 fptag |= (env->fptags[i] << i);
5239 }
5240 stw(ptr, env->fpuc);
5241 stw(ptr + 2, fpus);
5242 stw(ptr + 4, fptag ^ 0xff);
5243#ifdef TARGET_X86_64
5244 if (data64) {
5245 stq(ptr + 0x08, 0); /* rip */
5246 stq(ptr + 0x10, 0); /* rdp */
5247 } else
5248#endif
5249 {
5250 stl(ptr + 0x08, 0); /* eip */
5251 stl(ptr + 0x0c, 0); /* sel */
5252 stl(ptr + 0x10, 0); /* dp */
5253 stl(ptr + 0x14, 0); /* sel */
5254 }
5255
5256 addr = ptr + 0x20;
5257 for(i = 0;i < 8; i++) {
5258 tmp = ST(i);
5259 helper_fstt(tmp, addr);
5260 addr += 16;
5261 }
5262
5263 if (env->cr[4] & CR4_OSFXSR_MASK) {
5264 /* XXX: finish it */
5265 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
5266 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
5267 if (env->hflags & HF_CS64_MASK)
5268 nb_xmm_regs = 16;
5269 else
5270 nb_xmm_regs = 8;
5271 addr = ptr + 0xa0;
5272 /* Fast FXSAVE leaves out the XMM registers */
5273 if (!(env->efer & MSR_EFER_FFXSR)
5274 || (env->hflags & HF_CPL_MASK)
5275 || !(env->hflags & HF_LMA_MASK)) {
5276 for(i = 0; i < nb_xmm_regs; i++) {
5277 stq(addr, env->xmm_regs[i].XMM_Q(0));
5278 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
5279 addr += 16;
5280 }
5281 }
5282 }
5283}
5284
5285void helper_fxrstor(target_ulong ptr, int data64)
5286{
5287 int i, fpus, fptag, nb_xmm_regs;
5288 CPU86_LDouble tmp;
5289 target_ulong addr;
5290
5291 /* The operand must be 16 byte aligned */
5292 if (ptr & 0xf) {
5293 raise_exception(EXCP0D_GPF);
5294 }
5295
5296 env->fpuc = lduw(ptr);
5297 fpus = lduw(ptr + 2);
5298 fptag = lduw(ptr + 4);
5299 env->fpstt = (fpus >> 11) & 7;
5300 env->fpus = fpus & ~0x3800;
5301 fptag ^= 0xff;
5302 for(i = 0;i < 8; i++) {
5303 env->fptags[i] = ((fptag >> i) & 1);
5304 }
5305
5306 addr = ptr + 0x20;
5307 for(i = 0;i < 8; i++) {
5308 tmp = helper_fldt(addr);
5309 ST(i) = tmp;
5310 addr += 16;
5311 }
5312
5313 if (env->cr[4] & CR4_OSFXSR_MASK) {
5314 /* XXX: finish it */
5315 env->mxcsr = ldl(ptr + 0x18);
5316 //ldl(ptr + 0x1c);
5317 if (env->hflags & HF_CS64_MASK)
5318 nb_xmm_regs = 16;
5319 else
5320 nb_xmm_regs = 8;
5321 addr = ptr + 0xa0;
5322 /* Fast FXRESTORE leaves out the XMM registers */
5323 if (!(env->efer & MSR_EFER_FFXSR)
5324 || (env->hflags & HF_CPL_MASK)
5325 || !(env->hflags & HF_LMA_MASK)) {
5326 for(i = 0; i < nb_xmm_regs; i++) {
5327#if !defined(VBOX) || __GNUC__ < 4
5328 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
5329 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
5330#else /* VBOX + __GNUC__ >= 4: gcc 4.x compiler bug - it runs out of registers for the 64-bit value. */
5331# if 1
5332 env->xmm_regs[i].XMM_L(0) = ldl(addr);
5333 env->xmm_regs[i].XMM_L(1) = ldl(addr + 4);
5334 env->xmm_regs[i].XMM_L(2) = ldl(addr + 8);
5335 env->xmm_regs[i].XMM_L(3) = ldl(addr + 12);
5336# else
5337 /* this works fine on Mac OS X, gcc 4.0.1 */
5338 uint64_t u64 = ldq(addr);
5339 env->xmm_regs[i].XMM_Q(0);
5340 u64 = ldq(addr + 4);
5341 env->xmm_regs[i].XMM_Q(1) = u64;
5342# endif
5343#endif
5344 addr += 16;
5345 }
5346 }
5347 }
5348}
5349
5350#ifndef USE_X86LDOUBLE
5351
5352void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
5353{
5354 CPU86_LDoubleU temp;
5355 int e;
5356
5357 temp.d = f;
5358 /* mantissa */
5359 *pmant = (MANTD(temp) << 11) | (1LL << 63);
5360 /* exponent + sign */
5361 e = EXPD(temp) - EXPBIAS + 16383;
5362 e |= SIGND(temp) >> 16;
5363 *pexp = e;
5364}
5365
5366CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
5367{
5368 CPU86_LDoubleU temp;
5369 int e;
5370 uint64_t ll;
5371
5372 /* XXX: handle overflow ? */
5373 e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
5374 e |= (upper >> 4) & 0x800; /* sign */
5375 ll = (mant >> 11) & ((1LL << 52) - 1);
5376#ifdef __arm__
5377 temp.l.upper = (e << 20) | (ll >> 32);
5378 temp.l.lower = ll;
5379#else
5380 temp.ll = ll | ((uint64_t)e << 52);
5381#endif
5382 return temp.d;
5383}
5384
5385#else
5386
5387void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
5388{
5389 CPU86_LDoubleU temp;
5390
5391 temp.d = f;
5392 *pmant = temp.l.lower;
5393 *pexp = temp.l.upper;
5394}
5395
5396CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
5397{
5398 CPU86_LDoubleU temp;
5399
5400 temp.l.upper = upper;
5401 temp.l.lower = mant;
5402 return temp.d;
5403}
5404#endif
5405
5406#ifdef TARGET_X86_64
5407
5408//#define DEBUG_MULDIV
5409
5410static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
5411{
5412 *plow += a;
5413 /* carry test */
5414 if (*plow < a)
5415 (*phigh)++;
5416 *phigh += b;
5417}
5418
5419static void neg128(uint64_t *plow, uint64_t *phigh)
5420{
5421 *plow = ~ *plow;
5422 *phigh = ~ *phigh;
5423 add128(plow, phigh, 1, 0);
5424}
5425
5426/* return TRUE if overflow */
5427static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
5428{
5429 uint64_t q, r, a1, a0;
5430 int i, qb, ab;
5431
5432 a0 = *plow;
5433 a1 = *phigh;
5434 if (a1 == 0) {
5435 q = a0 / b;
5436 r = a0 % b;
5437 *plow = q;
5438 *phigh = r;
5439 } else {
5440 if (a1 >= b)
5441 return 1;
5442 /* XXX: use a better algorithm */
5443 for(i = 0; i < 64; i++) {
5444 ab = a1 >> 63;
5445 a1 = (a1 << 1) | (a0 >> 63);
5446 if (ab || a1 >= b) {
5447 a1 -= b;
5448 qb = 1;
5449 } else {
5450 qb = 0;
5451 }
5452 a0 = (a0 << 1) | qb;
5453 }
5454#if defined(DEBUG_MULDIV)
5455 printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
5456 *phigh, *plow, b, a0, a1);
5457#endif
5458 *plow = a0;
5459 *phigh = a1;
5460 }
5461 return 0;
5462}
5463
5464/* return TRUE if overflow */
5465static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
5466{
5467 int sa, sb;
5468 sa = ((int64_t)*phigh < 0);
5469 if (sa)
5470 neg128(plow, phigh);
5471 sb = (b < 0);
5472 if (sb)
5473 b = -b;
5474 if (div64(plow, phigh, b) != 0)
5475 return 1;
5476 if (sa ^ sb) {
5477 if (*plow > (1ULL << 63))
5478 return 1;
5479 *plow = - *plow;
5480 } else {
5481 if (*plow >= (1ULL << 63))
5482 return 1;
5483 }
5484 if (sa)
5485 *phigh = - *phigh;
5486 return 0;
5487}
5488
5489void helper_mulq_EAX_T0(target_ulong t0)
5490{
5491 uint64_t r0, r1;
5492
5493 mulu64(&r0, &r1, EAX, t0);
5494 EAX = r0;
5495 EDX = r1;
5496 CC_DST = r0;
5497 CC_SRC = r1;
5498}
5499
5500void helper_imulq_EAX_T0(target_ulong t0)
5501{
5502 uint64_t r0, r1;
5503
5504 muls64(&r0, &r1, EAX, t0);
5505 EAX = r0;
5506 EDX = r1;
5507 CC_DST = r0;
5508 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
5509}
5510
5511target_ulong helper_imulq_T0_T1(target_ulong t0, target_ulong t1)
5512{
5513 uint64_t r0, r1;
5514
5515 muls64(&r0, &r1, t0, t1);
5516 CC_DST = r0;
5517 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
5518 return r0;
5519}
5520
5521void helper_divq_EAX(target_ulong t0)
5522{
5523 uint64_t r0, r1;
5524 if (t0 == 0) {
5525 raise_exception(EXCP00_DIVZ);
5526 }
5527 r0 = EAX;
5528 r1 = EDX;
5529 if (div64(&r0, &r1, t0))
5530 raise_exception(EXCP00_DIVZ);
5531 EAX = r0;
5532 EDX = r1;
5533}
5534
5535void helper_idivq_EAX(target_ulong t0)
5536{
5537 uint64_t r0, r1;
5538 if (t0 == 0) {
5539 raise_exception(EXCP00_DIVZ);
5540 }
5541 r0 = EAX;
5542 r1 = EDX;
5543 if (idiv64(&r0, &r1, t0))
5544 raise_exception(EXCP00_DIVZ);
5545 EAX = r0;
5546 EDX = r1;
5547}
5548#endif
5549
5550static void do_hlt(void)
5551{
5552 env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
5553 env->halted = 1;
5554 env->exception_index = EXCP_HLT;
5555 cpu_loop_exit();
5556}
5557
5558void helper_hlt(int next_eip_addend)
5559{
5560 helper_svm_check_intercept_param(SVM_EXIT_HLT, 0);
5561 EIP += next_eip_addend;
5562
5563 do_hlt();
5564}
5565
5566void helper_monitor(target_ulong ptr)
5567{
5568#ifdef VBOX
5569 if ((uint32_t)ECX > 1)
5570 raise_exception(EXCP0D_GPF);
5571#else /* !VBOX */
5572 if ((uint32_t)ECX != 0)
5573 raise_exception(EXCP0D_GPF);
5574#endif /* !VBOX */
5575 /* XXX: store address ? */
5576 helper_svm_check_intercept_param(SVM_EXIT_MONITOR, 0);
5577}
5578
5579void helper_mwait(int next_eip_addend)
5580{
5581 if ((uint32_t)ECX != 0)
5582 raise_exception(EXCP0D_GPF);
5583#ifdef VBOX
5584 helper_hlt(next_eip_addend);
5585#else /* !VBOX */
5586 helper_svm_check_intercept_param(SVM_EXIT_MWAIT, 0);
5587 EIP += next_eip_addend;
5588
5589 /* XXX: not complete but not completely erroneous */
5590 if (env->cpu_index != 0 || env->next_cpu != NULL) {
5591 /* more than one CPU: do not sleep because another CPU may
5592 wake this one */
5593 } else {
5594 do_hlt();
5595 }
5596#endif /* !VBOX */
5597}
5598
5599void helper_debug(void)
5600{
5601 env->exception_index = EXCP_DEBUG;
5602 cpu_loop_exit();
5603}
5604
5605void helper_reset_rf(void)
5606{
5607 env->eflags &= ~RF_MASK;
5608}
5609
5610void helper_raise_interrupt(int intno, int next_eip_addend)
5611{
5612 raise_interrupt(intno, 1, 0, next_eip_addend);
5613}
5614
5615void helper_raise_exception(int exception_index)
5616{
5617 raise_exception(exception_index);
5618}
5619
5620void helper_cli(void)
5621{
5622 env->eflags &= ~IF_MASK;
5623}
5624
5625void helper_sti(void)
5626{
5627 env->eflags |= IF_MASK;
5628}
5629
5630#ifdef VBOX
5631void helper_cli_vme(void)
5632{
5633 env->eflags &= ~VIF_MASK;
5634}
5635
5636void helper_sti_vme(void)
5637{
5638 /* First check, then change eflags according to the AMD manual */
5639 if (env->eflags & VIP_MASK) {
5640 raise_exception(EXCP0D_GPF);
5641 }
5642 env->eflags |= VIF_MASK;
5643}
5644#endif /* VBOX */
5645
5646#if 0
5647/* vm86plus instructions */
5648void helper_cli_vm(void)
5649{
5650 env->eflags &= ~VIF_MASK;
5651}
5652
5653void helper_sti_vm(void)
5654{
5655 env->eflags |= VIF_MASK;
5656 if (env->eflags & VIP_MASK) {
5657 raise_exception(EXCP0D_GPF);
5658 }
5659}
5660#endif
5661
5662void helper_set_inhibit_irq(void)
5663{
5664 env->hflags |= HF_INHIBIT_IRQ_MASK;
5665}
5666
5667void helper_reset_inhibit_irq(void)
5668{
5669 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5670}
5671
5672void helper_boundw(target_ulong a0, int v)
5673{
5674 int low, high;
5675 low = ldsw(a0);
5676 high = ldsw(a0 + 2);
5677 v = (int16_t)v;
5678 if (v < low || v > high) {
5679 raise_exception(EXCP05_BOUND);
5680 }
5681}
5682
5683void helper_boundl(target_ulong a0, int v)
5684{
5685 int low, high;
5686 low = ldl(a0);
5687 high = ldl(a0 + 4);
5688 if (v < low || v > high) {
5689 raise_exception(EXCP05_BOUND);
5690 }
5691}
5692
5693static float approx_rsqrt(float a)
5694{
5695 return 1.0 / sqrt(a);
5696}
5697
5698static float approx_rcp(float a)
5699{
5700 return 1.0 / a;
5701}
5702
5703#if !defined(CONFIG_USER_ONLY)
5704
5705#define MMUSUFFIX _mmu
5706
5707#define SHIFT 0
5708#include "softmmu_template.h"
5709
5710#define SHIFT 1
5711#include "softmmu_template.h"
5712
5713#define SHIFT 2
5714#include "softmmu_template.h"
5715
5716#define SHIFT 3
5717#include "softmmu_template.h"
5718
5719#endif
5720
5721#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
5722/* This code assumes real physical address always fit into host CPU reg,
5723 which is wrong in general, but true for our current use cases. */
5724RTCCUINTREG REGPARM __ldb_vbox_phys(RTCCUINTREG addr)
5725{
5726 return remR3PhysReadS8(addr);
5727}
5728RTCCUINTREG REGPARM __ldub_vbox_phys(RTCCUINTREG addr)
5729{
5730 return remR3PhysReadU8(addr);
5731}
5732void REGPARM __stb_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5733{
5734 remR3PhysWriteU8(addr, val);
5735}
5736RTCCUINTREG REGPARM __ldw_vbox_phys(RTCCUINTREG addr)
5737{
5738 return remR3PhysReadS16(addr);
5739}
5740RTCCUINTREG REGPARM __lduw_vbox_phys(RTCCUINTREG addr)
5741{
5742 return remR3PhysReadU16(addr);
5743}
5744void REGPARM __stw_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5745{
5746 remR3PhysWriteU16(addr, val);
5747}
5748RTCCUINTREG REGPARM __ldl_vbox_phys(RTCCUINTREG addr)
5749{
5750 return remR3PhysReadS32(addr);
5751}
5752RTCCUINTREG REGPARM __ldul_vbox_phys(RTCCUINTREG addr)
5753{
5754 return remR3PhysReadU32(addr);
5755}
5756void REGPARM __stl_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5757{
5758 remR3PhysWriteU32(addr, val);
5759}
5760uint64_t REGPARM __ldq_vbox_phys(RTCCUINTREG addr)
5761{
5762 return remR3PhysReadU64(addr);
5763}
5764void REGPARM __stq_vbox_phys(RTCCUINTREG addr, uint64_t val)
5765{
5766 remR3PhysWriteU64(addr, val);
5767}
5768#endif /* VBOX */
5769
5770#if !defined(CONFIG_USER_ONLY)
5771/* try to fill the TLB and return an exception if error. If retaddr is
5772 NULL, it means that the function was called in C code (i.e. not
5773 from generated code or from helper.c) */
5774/* XXX: fix it to restore all registers */
5775void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
5776{
5777 TranslationBlock *tb;
5778 int ret;
5779 uintptr_t pc;
5780 CPUX86State *saved_env;
5781
5782 /* XXX: hack to restore env in all cases, even if not called from
5783 generated code */
5784 saved_env = env;
5785 env = cpu_single_env;
5786
5787 ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
5788 if (ret) {
5789 if (retaddr) {
5790 /* now we have a real cpu fault */
5791 pc = (uintptr_t)retaddr;
5792 tb = tb_find_pc(pc);
5793 if (tb) {
5794 /* the PC is inside the translated code. It means that we have
5795 a virtual CPU fault */
5796 cpu_restore_state(tb, env, pc, NULL);
5797 }
5798 }
5799 raise_exception_err(env->exception_index, env->error_code);
5800 }
5801 env = saved_env;
5802}
5803#endif
5804
5805#ifdef VBOX
5806
5807/**
5808 * Correctly computes the eflags.
5809 * @returns eflags.
5810 * @param env1 CPU environment.
5811 */
5812uint32_t raw_compute_eflags(CPUX86State *env1)
5813{
5814 CPUX86State *savedenv = env;
5815 uint32_t efl;
5816 env = env1;
5817 efl = compute_eflags();
5818 env = savedenv;
5819 return efl;
5820}
5821
5822/**
5823 * Reads byte from virtual address in guest memory area.
5824 * XXX: is it working for any addresses? swapped out pages?
5825 * @returns read data byte.
5826 * @param env1 CPU environment.
5827 * @param pvAddr GC Virtual address.
5828 */
5829uint8_t read_byte(CPUX86State *env1, target_ulong addr)
5830{
5831 CPUX86State *savedenv = env;
5832 uint8_t u8;
5833 env = env1;
5834 u8 = ldub_kernel(addr);
5835 env = savedenv;
5836 return u8;
5837}
5838
5839/**
5840 * Reads byte from virtual address in guest memory area.
5841 * XXX: is it working for any addresses? swapped out pages?
5842 * @returns read data byte.
5843 * @param env1 CPU environment.
5844 * @param pvAddr GC Virtual address.
5845 */
5846uint16_t read_word(CPUX86State *env1, target_ulong addr)
5847{
5848 CPUX86State *savedenv = env;
5849 uint16_t u16;
5850 env = env1;
5851 u16 = lduw_kernel(addr);
5852 env = savedenv;
5853 return u16;
5854}
5855
5856/**
5857 * Reads byte from virtual address in guest memory area.
5858 * XXX: is it working for any addresses? swapped out pages?
5859 * @returns read data byte.
5860 * @param env1 CPU environment.
5861 * @param pvAddr GC Virtual address.
5862 */
5863uint32_t read_dword(CPUX86State *env1, target_ulong addr)
5864{
5865 CPUX86State *savedenv = env;
5866 uint32_t u32;
5867 env = env1;
5868 u32 = ldl_kernel(addr);
5869 env = savedenv;
5870 return u32;
5871}
5872
5873/**
5874 * Writes byte to virtual address in guest memory area.
5875 * XXX: is it working for any addresses? swapped out pages?
5876 * @returns read data byte.
5877 * @param env1 CPU environment.
5878 * @param pvAddr GC Virtual address.
5879 * @param val byte value
5880 */
5881void write_byte(CPUX86State *env1, target_ulong addr, uint8_t val)
5882{
5883 CPUX86State *savedenv = env;
5884 env = env1;
5885 stb(addr, val);
5886 env = savedenv;
5887}
5888
5889void write_word(CPUX86State *env1, target_ulong addr, uint16_t val)
5890{
5891 CPUX86State *savedenv = env;
5892 env = env1;
5893 stw(addr, val);
5894 env = savedenv;
5895}
5896
5897void write_dword(CPUX86State *env1, target_ulong addr, uint32_t val)
5898{
5899 CPUX86State *savedenv = env;
5900 env = env1;
5901 stl(addr, val);
5902 env = savedenv;
5903}
5904
5905/**
5906 * Correctly loads selector into segment register with updating internal
5907 * qemu data/caches.
5908 * @param env1 CPU environment.
5909 * @param seg_reg Segment register.
5910 * @param selector Selector to load.
5911 */
5912void sync_seg(CPUX86State *env1, int seg_reg, int selector)
5913{
5914 CPUX86State *savedenv = env;
5915#ifdef FORCE_SEGMENT_SYNC
5916 jmp_buf old_buf;
5917#endif
5918
5919 env = env1;
5920
5921 if ( env->eflags & X86_EFL_VM
5922 || !(env->cr[0] & X86_CR0_PE))
5923 {
5924 load_seg_vm(seg_reg, selector);
5925
5926 env = savedenv;
5927
5928 /* Successful sync. */
5929 Assert(env1->segs[seg_reg].newselector == 0);
5930 }
5931 else
5932 {
5933 /* For some reasons, it works even w/o save/restore of the jump buffer, so as code is
5934 time critical - let's not do that */
5935#ifdef FORCE_SEGMENT_SYNC
5936 memcpy(&old_buf, &env1->jmp_env, sizeof(old_buf));
5937#endif
5938 if (setjmp(env1->jmp_env) == 0)
5939 {
5940 if (seg_reg == R_CS)
5941 {
5942 uint32_t e1, e2;
5943 e1 = e2 = 0;
5944 load_segment(&e1, &e2, selector);
5945 cpu_x86_load_seg_cache(env, R_CS, selector,
5946 get_seg_base(e1, e2),
5947 get_seg_limit(e1, e2),
5948 e2);
5949 }
5950 else
5951 helper_load_seg(seg_reg, selector);
5952 /* We used to use tss_load_seg(seg_reg, selector); which, for some reasons ignored
5953 loading 0 selectors, what, in order, lead to subtle problems like #3588 */
5954
5955 env = savedenv;
5956
5957 /* Successful sync. */
5958 Assert(env1->segs[seg_reg].newselector == 0);
5959 }
5960 else
5961 {
5962 env = savedenv;
5963
5964 /* Postpone sync until the guest uses the selector. */
5965 env1->segs[seg_reg].selector = selector; /* hidden values are now incorrect, but will be resynced when this register is accessed. */
5966 env1->segs[seg_reg].newselector = selector;
5967 Log(("sync_seg: out of sync seg_reg=%d selector=%#x\n", seg_reg, selector));
5968 env1->exception_index = -1;
5969 env1->error_code = 0;
5970 env1->old_exception = -1;
5971 }
5972#ifdef FORCE_SEGMENT_SYNC
5973 memcpy(&env1->jmp_env, &old_buf, sizeof(old_buf));
5974#endif
5975 }
5976
5977}
5978
5979DECLINLINE(void) tb_reset_jump(TranslationBlock *tb, int n)
5980{
5981 tb_set_jmp_target(tb, n, (uintptr_t)(tb->tc_ptr + tb->tb_next_offset[n]));
5982}
5983
5984
5985int emulate_single_instr(CPUX86State *env1)
5986{
5987 TranslationBlock *tb;
5988 TranslationBlock *current;
5989 int flags;
5990 uint8_t *tc_ptr;
5991 target_ulong old_eip;
5992
5993 /* ensures env is loaded! */
5994 CPUX86State *savedenv = env;
5995 env = env1;
5996
5997 RAWEx_ProfileStart(env, STATS_EMULATE_SINGLE_INSTR);
5998
5999 current = env->current_tb;
6000 env->current_tb = NULL;
6001 flags = env->hflags | (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
6002
6003 /*
6004 * Translate only one instruction.
6005 */
6006 ASMAtomicOrU32(&env->state, CPU_EMULATE_SINGLE_INSTR);
6007 tb = tb_gen_code(env, env->eip + env->segs[R_CS].base,
6008 env->segs[R_CS].base, flags, 0);
6009
6010 ASMAtomicAndU32(&env->state, ~CPU_EMULATE_SINGLE_INSTR);
6011
6012
6013 /* tb_link_phys: */
6014 tb->jmp_first = (TranslationBlock *)((intptr_t)tb | 2);
6015 tb->jmp_next[0] = NULL;
6016 tb->jmp_next[1] = NULL;
6017 Assert(tb->jmp_next[0] == NULL);
6018 Assert(tb->jmp_next[1] == NULL);
6019 if (tb->tb_next_offset[0] != 0xffff)
6020 tb_reset_jump(tb, 0);
6021 if (tb->tb_next_offset[1] != 0xffff)
6022 tb_reset_jump(tb, 1);
6023
6024 /*
6025 * Execute it using emulation
6026 */
6027 old_eip = env->eip;
6028 env->current_tb = tb;
6029
6030 /*
6031 * eip remains the same for repeated instructions; no idea why qemu doesn't do a jump inside the generated code
6032 * perhaps not a very safe hack
6033 */
6034 while (old_eip == env->eip)
6035 {
6036 tc_ptr = tb->tc_ptr;
6037
6038#if defined(VBOX) && defined(GCC_WITH_BUGGY_REGPARM)
6039 int fake_ret;
6040 tcg_qemu_tb_exec(tc_ptr, fake_ret);
6041#else
6042 tcg_qemu_tb_exec(tc_ptr);
6043#endif
6044
6045 /*
6046 * Exit once we detect an external interrupt and interrupts are enabled
6047 */
6048 if ( (env->interrupt_request & (CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_TIMER))
6049 || ( (env->eflags & IF_MASK)
6050 && !(env->hflags & HF_INHIBIT_IRQ_MASK)
6051 && (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD) )
6052 )
6053 {
6054 break;
6055 }
6056 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_FLUSH_TLB) {
6057 tlb_flush(env, true);
6058 }
6059 }
6060 env->current_tb = current;
6061
6062 tb_phys_invalidate(tb, -1);
6063 tb_free(tb);
6064/*
6065 Assert(tb->tb_next_offset[0] == 0xffff);
6066 Assert(tb->tb_next_offset[1] == 0xffff);
6067 Assert(tb->tb_next[0] == 0xffff);
6068 Assert(tb->tb_next[1] == 0xffff);
6069 Assert(tb->jmp_next[0] == NULL);
6070 Assert(tb->jmp_next[1] == NULL);
6071 Assert(tb->jmp_first == NULL); */
6072
6073 RAWEx_ProfileStop(env, STATS_EMULATE_SINGLE_INSTR);
6074
6075 /*
6076 * Execute the next instruction when we encounter instruction fusing.
6077 */
6078 if (env->hflags & HF_INHIBIT_IRQ_MASK)
6079 {
6080 Log(("REM: Emulating next instruction due to instruction fusing (HF_INHIBIT_IRQ_MASK) at %RGv\n", env->eip));
6081 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
6082 emulate_single_instr(env);
6083 }
6084
6085 env = savedenv;
6086 return 0;
6087}
6088
6089/**
6090 * Correctly loads a new ldtr selector.
6091 *
6092 * @param env1 CPU environment.
6093 * @param selector Selector to load.
6094 */
6095void sync_ldtr(CPUX86State *env1, int selector)
6096{
6097 CPUX86State *saved_env = env;
6098 if (setjmp(env1->jmp_env) == 0)
6099 {
6100 env = env1;
6101 helper_lldt(selector);
6102 env = saved_env;
6103 }
6104 else
6105 {
6106 env = saved_env;
6107#ifdef VBOX_STRICT
6108 cpu_abort(env1, "sync_ldtr: selector=%#x\n", selector);
6109#endif
6110 }
6111}
6112
6113int get_ss_esp_from_tss_raw(CPUX86State *env1, uint32_t *ss_ptr,
6114 uint32_t *esp_ptr, int dpl)
6115{
6116 int type, index, shift;
6117
6118 CPUX86State *savedenv = env;
6119 env = env1;
6120
6121 if (!(env->tr.flags & DESC_P_MASK))
6122 cpu_abort(env, "invalid tss");
6123 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
6124 if ((type & 7) != 1)
6125 cpu_abort(env, "invalid tss type %d", type);
6126 shift = type >> 3;
6127 index = (dpl * 4 + 2) << shift;
6128 if (index + (4 << shift) - 1 > env->tr.limit)
6129 {
6130 env = savedenv;
6131 return 0;
6132 }
6133 //raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
6134
6135 if (shift == 0) {
6136 *esp_ptr = lduw_kernel(env->tr.base + index);
6137 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
6138 } else {
6139 *esp_ptr = ldl_kernel(env->tr.base + index);
6140 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
6141 }
6142
6143 env = savedenv;
6144 return 1;
6145}
6146
6147//*****************************************************************************
6148// Needs to be at the bottom of the file (overriding macros)
6149
6150static inline CPU86_LDouble helper_fldt_raw(uint8_t *ptr)
6151{
6152#ifdef USE_X86LDOUBLE
6153 CPU86_LDoubleU tmp;
6154 tmp.l.lower = *(uint64_t const *)ptr;
6155 tmp.l.upper = *(uint16_t const *)(ptr + 8);
6156 return tmp.d;
6157#else
6158# error "Busted FPU saving/restoring!"
6159 return *(CPU86_LDouble *)ptr;
6160#endif
6161}
6162
6163static inline void helper_fstt_raw(CPU86_LDouble f, uint8_t *ptr)
6164{
6165#ifdef USE_X86LDOUBLE
6166 CPU86_LDoubleU tmp;
6167 tmp.d = f;
6168 *(uint64_t *)(ptr + 0) = tmp.l.lower;
6169 *(uint16_t *)(ptr + 8) = tmp.l.upper;
6170 *(uint16_t *)(ptr + 10) = 0;
6171 *(uint32_t *)(ptr + 12) = 0;
6172 AssertCompile(sizeof(long double) > 8);
6173#else
6174# error "Busted FPU saving/restoring!"
6175 *(CPU86_LDouble *)ptr = f;
6176#endif
6177}
6178
6179#undef stw
6180#undef stl
6181#undef stq
6182#define stw(a,b) *(uint16_t *)(a) = (uint16_t)(b)
6183#define stl(a,b) *(uint32_t *)(a) = (uint32_t)(b)
6184#define stq(a,b) *(uint64_t *)(a) = (uint64_t)(b)
6185
6186//*****************************************************************************
6187void restore_raw_fp_state(CPUX86State *env, uint8_t *ptr)
6188{
6189 int fpus, fptag, i, nb_xmm_regs;
6190 CPU86_LDouble tmp;
6191 uint8_t *addr;
6192 int data64 = !!(env->hflags & HF_LMA_MASK);
6193
6194 if (env->cpuid_features & CPUID_FXSR)
6195 {
6196 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
6197 fptag = 0;
6198 for(i = 0; i < 8; i++) {
6199 fptag |= (env->fptags[i] << i);
6200 }
6201 stw(ptr, env->fpuc);
6202 stw(ptr + 2, fpus);
6203 stw(ptr + 4, fptag ^ 0xff);
6204
6205 addr = ptr + 0x20;
6206 for(i = 0;i < 8; i++) {
6207 tmp = ST(i);
6208 helper_fstt_raw(tmp, addr);
6209 addr += 16;
6210 }
6211
6212 if (env->cr[4] & CR4_OSFXSR_MASK) {
6213 /* XXX: finish it */
6214 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
6215 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
6216 nb_xmm_regs = 8 << data64;
6217 addr = ptr + 0xa0;
6218 for(i = 0; i < nb_xmm_regs; i++) {
6219#if __GNUC__ < 4
6220 stq(addr, env->xmm_regs[i].XMM_Q(0));
6221 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
6222#else /* VBOX + __GNUC__ >= 4: gcc 4.x compiler bug - it runs out of registers for the 64-bit value. */
6223 stl(addr, env->xmm_regs[i].XMM_L(0));
6224 stl(addr + 4, env->xmm_regs[i].XMM_L(1));
6225 stl(addr + 8, env->xmm_regs[i].XMM_L(2));
6226 stl(addr + 12, env->xmm_regs[i].XMM_L(3));
6227#endif
6228 addr += 16;
6229 }
6230 }
6231 }
6232 else
6233 {
6234 PX86FPUSTATE fp = (PX86FPUSTATE)ptr;
6235 int fptag;
6236
6237 fp->FCW = env->fpuc;
6238 fp->FSW = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
6239 fptag = 0;
6240 for (i=7; i>=0; i--) {
6241 fptag <<= 2;
6242 if (env->fptags[i]) {
6243 fptag |= 3;
6244 } else {
6245 /* the FPU automatically computes it */
6246 }
6247 }
6248 fp->FTW = fptag;
6249
6250 for(i = 0;i < 8; i++) {
6251 tmp = ST(i);
6252 helper_fstt_raw(tmp, &fp->regs[i].au8[0]);
6253 }
6254 }
6255}
6256
6257//*****************************************************************************
6258#undef lduw
6259#undef ldl
6260#undef ldq
6261#define lduw(a) *(uint16_t *)(a)
6262#define ldl(a) *(uint32_t *)(a)
6263#define ldq(a) *(uint64_t *)(a)
6264//*****************************************************************************
6265void save_raw_fp_state(CPUX86State *env, uint8_t *ptr)
6266{
6267 int i, fpus, fptag, nb_xmm_regs;
6268 CPU86_LDouble tmp;
6269 uint8_t *addr;
6270 int data64 = !!(env->hflags & HF_LMA_MASK); /* don't use HF_CS64_MASK here as cs hasn't been synced when this function is called. */
6271
6272 if (env->cpuid_features & CPUID_FXSR)
6273 {
6274 env->fpuc = lduw(ptr);
6275 fpus = lduw(ptr + 2);
6276 fptag = lduw(ptr + 4);
6277 env->fpstt = (fpus >> 11) & 7;
6278 env->fpus = fpus & ~0x3800;
6279 fptag ^= 0xff;
6280 for(i = 0;i < 8; i++) {
6281 env->fptags[i] = ((fptag >> i) & 1);
6282 }
6283
6284 addr = ptr + 0x20;
6285 for(i = 0;i < 8; i++) {
6286 tmp = helper_fldt_raw(addr);
6287 ST(i) = tmp;
6288 addr += 16;
6289 }
6290
6291 if (env->cr[4] & CR4_OSFXSR_MASK) {
6292 /* XXX: finish it, endianness */
6293 env->mxcsr = ldl(ptr + 0x18);
6294 //ldl(ptr + 0x1c);
6295 nb_xmm_regs = 8 << data64;
6296 addr = ptr + 0xa0;
6297 for(i = 0; i < nb_xmm_regs; i++) {
6298#if HC_ARCH_BITS == 32
6299 /* this is a workaround for http://gcc.gnu.org/bugzilla/show_bug.cgi?id=35135 */
6300 env->xmm_regs[i].XMM_L(0) = ldl(addr);
6301 env->xmm_regs[i].XMM_L(1) = ldl(addr + 4);
6302 env->xmm_regs[i].XMM_L(2) = ldl(addr + 8);
6303 env->xmm_regs[i].XMM_L(3) = ldl(addr + 12);
6304#else
6305 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
6306 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
6307#endif
6308 addr += 16;
6309 }
6310 }
6311 }
6312 else
6313 {
6314 PX86FPUSTATE fp = (PX86FPUSTATE)ptr;
6315 int fptag, j;
6316
6317 env->fpuc = fp->FCW;
6318 env->fpstt = (fp->FSW >> 11) & 7;
6319 env->fpus = fp->FSW & ~0x3800;
6320 fptag = fp->FTW;
6321 for(i = 0;i < 8; i++) {
6322 env->fptags[i] = ((fptag & 3) == 3);
6323 fptag >>= 2;
6324 }
6325 j = env->fpstt;
6326 for(i = 0;i < 8; i++) {
6327 tmp = helper_fldt_raw(&fp->regs[i].au8[0]);
6328 ST(i) = tmp;
6329 }
6330 }
6331}
6332//*****************************************************************************
6333//*****************************************************************************
6334
6335#endif /* VBOX */
6336
6337/* Secure Virtual Machine helpers */
6338
6339#if defined(CONFIG_USER_ONLY)
6340
6341void helper_vmrun(int aflag, int next_eip_addend)
6342{
6343}
6344void helper_vmmcall(void)
6345{
6346}
6347void helper_vmload(int aflag)
6348{
6349}
6350void helper_vmsave(int aflag)
6351{
6352}
6353void helper_stgi(void)
6354{
6355}
6356void helper_clgi(void)
6357{
6358}
6359void helper_skinit(void)
6360{
6361}
6362void helper_invlpga(int aflag)
6363{
6364}
6365void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
6366{
6367}
6368void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
6369{
6370}
6371
6372void helper_svm_check_io(uint32_t port, uint32_t param,
6373 uint32_t next_eip_addend)
6374{
6375}
6376#else
6377
6378static inline void svm_save_seg(target_phys_addr_t addr,
6379 const SegmentCache *sc)
6380{
6381 stw_phys(addr + offsetof(struct vmcb_seg, selector),
6382 sc->selector);
6383 stq_phys(addr + offsetof(struct vmcb_seg, base),
6384 sc->base);
6385 stl_phys(addr + offsetof(struct vmcb_seg, limit),
6386 sc->limit);
6387 stw_phys(addr + offsetof(struct vmcb_seg, attrib),
6388 ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
6389}
6390
6391static inline void svm_load_seg(target_phys_addr_t addr, SegmentCache *sc)
6392{
6393 unsigned int flags;
6394
6395 sc->selector = lduw_phys(addr + offsetof(struct vmcb_seg, selector));
6396 sc->base = ldq_phys(addr + offsetof(struct vmcb_seg, base));
6397 sc->limit = ldl_phys(addr + offsetof(struct vmcb_seg, limit));
6398 flags = lduw_phys(addr + offsetof(struct vmcb_seg, attrib));
6399 sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
6400}
6401
6402static inline void svm_load_seg_cache(target_phys_addr_t addr,
6403 CPUState *env, int seg_reg)
6404{
6405 SegmentCache sc1, *sc = &sc1;
6406 svm_load_seg(addr, sc);
6407 cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
6408 sc->base, sc->limit, sc->flags);
6409}
6410
6411void helper_vmrun(int aflag, int next_eip_addend)
6412{
6413 target_ulong addr;
6414 uint32_t event_inj;
6415 uint32_t int_ctl;
6416
6417 helper_svm_check_intercept_param(SVM_EXIT_VMRUN, 0);
6418
6419 if (aflag == 2)
6420 addr = EAX;
6421 else
6422 addr = (uint32_t)EAX;
6423
6424 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr);
6425
6426 env->vm_vmcb = addr;
6427
6428 /* save the current CPU state in the hsave page */
6429 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
6430 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
6431
6432 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
6433 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
6434
6435 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
6436 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
6437 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
6438 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
6439 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
6440 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
6441
6442 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
6443 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
6444
6445 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.es),
6446 &env->segs[R_ES]);
6447 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.cs),
6448 &env->segs[R_CS]);
6449 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ss),
6450 &env->segs[R_SS]);
6451 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ds),
6452 &env->segs[R_DS]);
6453
6454 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip),
6455 EIP + next_eip_addend);
6456 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
6457 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
6458
6459 /* load the interception bitmaps so we do not need to access the
6460 vmcb in svm mode */
6461 env->intercept = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept));
6462 env->intercept_cr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
6463 env->intercept_cr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
6464 env->intercept_dr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
6465 env->intercept_dr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
6466 env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
6467
6468 /* enable intercepts */
6469 env->hflags |= HF_SVMI_MASK;
6470
6471 env->tsc_offset = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.tsc_offset));
6472
6473 env->gdt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
6474 env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
6475
6476 env->idt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
6477 env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
6478
6479 /* clear exit_info_2 so we behave like the real hardware */
6480 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
6481
6482 cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
6483 cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
6484 cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
6485 env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
6486 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
6487 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
6488 if (int_ctl & V_INTR_MASKING_MASK) {
6489 env->v_tpr = int_ctl & V_TPR_MASK;
6490 env->hflags2 |= HF2_VINTR_MASK;
6491 if (env->eflags & IF_MASK)
6492 env->hflags2 |= HF2_HIF_MASK;
6493 }
6494
6495 cpu_load_efer(env,
6496 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer)));
6497 env->eflags = 0;
6498 load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
6499 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
6500 CC_OP = CC_OP_EFLAGS;
6501
6502 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.es),
6503 env, R_ES);
6504 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.cs),
6505 env, R_CS);
6506 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ss),
6507 env, R_SS);
6508 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ds),
6509 env, R_DS);
6510
6511 EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
6512 env->eip = EIP;
6513 ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
6514 EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
6515 env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
6516 env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
6517 cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
6518
6519 /* FIXME: guest state consistency checks */
6520
6521 switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
6522 case TLB_CONTROL_DO_NOTHING:
6523 break;
6524 case TLB_CONTROL_FLUSH_ALL_ASID:
6525 /* FIXME: this is not 100% correct but should work for now */
6526 tlb_flush(env, 1);
6527 break;
6528 }
6529
6530 env->hflags2 |= HF2_GIF_MASK;
6531
6532 if (int_ctl & V_IRQ_MASK) {
6533 env->interrupt_request |= CPU_INTERRUPT_VIRQ;
6534 }
6535
6536 /* maybe we need to inject an event */
6537 event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
6538 if (event_inj & SVM_EVTINJ_VALID) {
6539 uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
6540 uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
6541 uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
6542
6543 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
6544 /* FIXME: need to implement valid_err */
6545 switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
6546 case SVM_EVTINJ_TYPE_INTR:
6547 env->exception_index = vector;
6548 env->error_code = event_inj_err;
6549 env->exception_is_int = 0;
6550 env->exception_next_eip = -1;
6551 qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
6552 /* XXX: is it always correct ? */
6553 do_interrupt(vector, 0, 0, 0, 1);
6554 break;
6555 case SVM_EVTINJ_TYPE_NMI:
6556 env->exception_index = EXCP02_NMI;
6557 env->error_code = event_inj_err;
6558 env->exception_is_int = 0;
6559 env->exception_next_eip = EIP;
6560 qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
6561 cpu_loop_exit();
6562 break;
6563 case SVM_EVTINJ_TYPE_EXEPT:
6564 env->exception_index = vector;
6565 env->error_code = event_inj_err;
6566 env->exception_is_int = 0;
6567 env->exception_next_eip = -1;
6568 qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
6569 cpu_loop_exit();
6570 break;
6571 case SVM_EVTINJ_TYPE_SOFT:
6572 env->exception_index = vector;
6573 env->error_code = event_inj_err;
6574 env->exception_is_int = 1;
6575 env->exception_next_eip = EIP;
6576 qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
6577 cpu_loop_exit();
6578 break;
6579 }
6580 qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", env->exception_index, env->error_code);
6581 }
6582}
6583
6584void helper_vmmcall(void)
6585{
6586 helper_svm_check_intercept_param(SVM_EXIT_VMMCALL, 0);
6587 raise_exception(EXCP06_ILLOP);
6588}
6589
6590void helper_vmload(int aflag)
6591{
6592 target_ulong addr;
6593 helper_svm_check_intercept_param(SVM_EXIT_VMLOAD, 0);
6594
6595 if (aflag == 2)
6596 addr = EAX;
6597 else
6598 addr = (uint32_t)EAX;
6599
6600 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
6601 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
6602 env->segs[R_FS].base);
6603
6604 svm_load_seg_cache(addr + offsetof(struct vmcb, save.fs),
6605 env, R_FS);
6606 svm_load_seg_cache(addr + offsetof(struct vmcb, save.gs),
6607 env, R_GS);
6608 svm_load_seg(addr + offsetof(struct vmcb, save.tr),
6609 &env->tr);
6610 svm_load_seg(addr + offsetof(struct vmcb, save.ldtr),
6611 &env->ldt);
6612
6613#ifdef TARGET_X86_64
6614 env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
6615 env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
6616 env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
6617 env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
6618#endif
6619 env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
6620 env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
6621 env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp));
6622 env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip));
6623}
6624
6625void helper_vmsave(int aflag)
6626{
6627 target_ulong addr;
6628 helper_svm_check_intercept_param(SVM_EXIT_VMSAVE, 0);
6629
6630 if (aflag == 2)
6631 addr = EAX;
6632 else
6633 addr = (uint32_t)EAX;
6634
6635 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
6636 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
6637 env->segs[R_FS].base);
6638
6639 svm_save_seg(addr + offsetof(struct vmcb, save.fs),
6640 &env->segs[R_FS]);
6641 svm_save_seg(addr + offsetof(struct vmcb, save.gs),
6642 &env->segs[R_GS]);
6643 svm_save_seg(addr + offsetof(struct vmcb, save.tr),
6644 &env->tr);
6645 svm_save_seg(addr + offsetof(struct vmcb, save.ldtr),
6646 &env->ldt);
6647
6648#ifdef TARGET_X86_64
6649 stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
6650 stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
6651 stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
6652 stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
6653#endif
6654 stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
6655 stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
6656 stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp);
6657 stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
6658}
6659
6660void helper_stgi(void)
6661{
6662 helper_svm_check_intercept_param(SVM_EXIT_STGI, 0);
6663 env->hflags2 |= HF2_GIF_MASK;
6664}
6665
6666void helper_clgi(void)
6667{
6668 helper_svm_check_intercept_param(SVM_EXIT_CLGI, 0);
6669 env->hflags2 &= ~HF2_GIF_MASK;
6670}
6671
6672void helper_skinit(void)
6673{
6674 helper_svm_check_intercept_param(SVM_EXIT_SKINIT, 0);
6675 /* XXX: not implemented */
6676 raise_exception(EXCP06_ILLOP);
6677}
6678
6679void helper_invlpga(int aflag)
6680{
6681 target_ulong addr;
6682 helper_svm_check_intercept_param(SVM_EXIT_INVLPGA, 0);
6683
6684 if (aflag == 2)
6685 addr = EAX;
6686 else
6687 addr = (uint32_t)EAX;
6688
6689 /* XXX: could use the ASID to see if it is needed to do the
6690 flush */
6691 tlb_flush_page(env, addr);
6692}
6693
6694void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
6695{
6696 if (likely(!(env->hflags & HF_SVMI_MASK)))
6697 return;
6698#ifndef VBOX
6699 switch(type) {
6700 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
6701 if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
6702 helper_vmexit(type, param);
6703 }
6704 break;
6705 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
6706 if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
6707 helper_vmexit(type, param);
6708 }
6709 break;
6710 case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
6711 if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
6712 helper_vmexit(type, param);
6713 }
6714 break;
6715 case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
6716 if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
6717 helper_vmexit(type, param);
6718 }
6719 break;
6720 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
6721 if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
6722 helper_vmexit(type, param);
6723 }
6724 break;
6725 case SVM_EXIT_MSR:
6726 if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
6727 /* FIXME: this should be read in at vmrun (faster this way?) */
6728 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
6729 uint32_t t0, t1;
6730 switch((uint32_t)ECX) {
6731 case 0 ... 0x1fff:
6732 t0 = (ECX * 2) % 8;
6733 t1 = ECX / 8;
6734 break;
6735 case 0xc0000000 ... 0xc0001fff:
6736 t0 = (8192 + ECX - 0xc0000000) * 2;
6737 t1 = (t0 / 8);
6738 t0 %= 8;
6739 break;
6740 case 0xc0010000 ... 0xc0011fff:
6741 t0 = (16384 + ECX - 0xc0010000) * 2;
6742 t1 = (t0 / 8);
6743 t0 %= 8;
6744 break;
6745 default:
6746 helper_vmexit(type, param);
6747 t0 = 0;
6748 t1 = 0;
6749 break;
6750 }
6751 if (ldub_phys(addr + t1) & ((1 << param) << t0))
6752 helper_vmexit(type, param);
6753 }
6754 break;
6755 default:
6756 if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
6757 helper_vmexit(type, param);
6758 }
6759 break;
6760 }
6761#else /* VBOX */
6762 AssertMsgFailed(("We shouldn't be here, HM supported differently!"));
6763#endif /* VBOX */
6764}
6765
6766void helper_svm_check_io(uint32_t port, uint32_t param,
6767 uint32_t next_eip_addend)
6768{
6769 if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
6770 /* FIXME: this should be read in at vmrun (faster this way?) */
6771 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
6772 uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
6773 if(lduw_phys(addr + port / 8) & (mask << (port & 7))) {
6774 /* next EIP */
6775 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
6776 env->eip + next_eip_addend);
6777 helper_vmexit(SVM_EXIT_IOIO, param | (port << 16));
6778 }
6779 }
6780}
6781
6782/* Note: currently only 32 bits of exit_code are used */
6783void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
6784{
6785 uint32_t int_ctl;
6786
6787 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
6788 exit_code, exit_info_1,
6789 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)),
6790 EIP);
6791
6792 if(env->hflags & HF_INHIBIT_IRQ_MASK) {
6793 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK);
6794 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
6795 } else {
6796 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
6797 }
6798
6799 /* Save the VM state in the vmcb */
6800 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.es),
6801 &env->segs[R_ES]);
6802 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.cs),
6803 &env->segs[R_CS]);
6804 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ss),
6805 &env->segs[R_SS]);
6806 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ds),
6807 &env->segs[R_DS]);
6808
6809 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
6810 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
6811
6812 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base);
6813 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
6814
6815 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
6816 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
6817 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
6818 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
6819 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
6820
6821 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
6822 int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
6823 int_ctl |= env->v_tpr & V_TPR_MASK;
6824 if (env->interrupt_request & CPU_INTERRUPT_VIRQ)
6825 int_ctl |= V_IRQ_MASK;
6826 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
6827
6828 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags());
6829 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
6830 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
6831 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
6832 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
6833 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
6834 stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK);
6835
6836 /* Reload the host state from vm_hsave */
6837 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
6838 env->hflags &= ~HF_SVMI_MASK;
6839 env->intercept = 0;
6840 env->intercept_exceptions = 0;
6841 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
6842 env->tsc_offset = 0;
6843
6844 env->gdt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base));
6845 env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit));
6846
6847 env->idt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base));
6848 env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit));
6849
6850 cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK);
6851 cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4)));
6852 cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3)));
6853 /* we need to set the efer after the crs so the hidden flags get
6854 set properly */
6855 cpu_load_efer(env,
6856 ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer)));
6857 env->eflags = 0;
6858 load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
6859 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
6860 CC_OP = CC_OP_EFLAGS;
6861
6862 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.es),
6863 env, R_ES);
6864 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.cs),
6865 env, R_CS);
6866 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ss),
6867 env, R_SS);
6868 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ds),
6869 env, R_DS);
6870
6871 EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
6872 ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
6873 EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
6874
6875 env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
6876 env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
6877
6878 /* other setups */
6879 cpu_x86_set_cpl(env, 0);
6880 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
6881 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
6882
6883 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info),
6884 ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj)));
6885 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err),
6886 ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err)));
6887 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 0);
6888
6889 env->hflags2 &= ~HF2_GIF_MASK;
6890 /* FIXME: Resets the current ASID register to zero (host ASID). */
6891
6892 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
6893
6894 /* Clears the TSC_OFFSET inside the processor. */
6895
6896 /* If the host is in PAE mode, the processor reloads the host's PDPEs
6897 from the page table indicated the host's CR3. If the PDPEs contain
6898 illegal state, the processor causes a shutdown. */
6899
6900 /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
6901 env->cr[0] |= CR0_PE_MASK;
6902 env->eflags &= ~VM_MASK;
6903
6904 /* Disables all breakpoints in the host DR7 register. */
6905
6906 /* Checks the reloaded host state for consistency. */
6907
6908 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
6909 host's code segment or non-canonical (in the case of long mode), a
6910 #GP fault is delivered inside the host.) */
6911
6912 /* remove any pending exception */
6913 env->exception_index = -1;
6914 env->error_code = 0;
6915 env->old_exception = -1;
6916
6917 cpu_loop_exit();
6918}
6919
6920#endif
6921
6922/* MMX/SSE */
6923/* XXX: optimize by storing fptt and fptags in the static cpu state */
6924void helper_enter_mmx(void)
6925{
6926 env->fpstt = 0;
6927 *(uint32_t *)(env->fptags) = 0;
6928 *(uint32_t *)(env->fptags + 4) = 0;
6929}
6930
6931void helper_emms(void)
6932{
6933 /* set to empty state */
6934 *(uint32_t *)(env->fptags) = 0x01010101;
6935 *(uint32_t *)(env->fptags + 4) = 0x01010101;
6936}
6937
6938/* XXX: suppress */
6939void helper_movq(void *d, void *s)
6940{
6941 *(uint64_t *)d = *(uint64_t *)s;
6942}
6943
6944#define SHIFT 0
6945#include "ops_sse.h"
6946
6947#define SHIFT 1
6948#include "ops_sse.h"
6949
6950#define SHIFT 0
6951#include "helper_template.h"
6952#undef SHIFT
6953
6954#define SHIFT 1
6955#include "helper_template.h"
6956#undef SHIFT
6957
6958#define SHIFT 2
6959#include "helper_template.h"
6960#undef SHIFT
6961
6962#ifdef TARGET_X86_64
6963
6964#define SHIFT 3
6965#include "helper_template.h"
6966#undef SHIFT
6967
6968#endif
6969
6970/* bit operations */
6971target_ulong helper_bsf(target_ulong t0)
6972{
6973 int count;
6974 target_ulong res;
6975
6976 res = t0;
6977 count = 0;
6978 while ((res & 1) == 0) {
6979 count++;
6980 res >>= 1;
6981 }
6982 return count;
6983}
6984
6985target_ulong helper_lzcnt(target_ulong t0, int wordsize)
6986{
6987 int count;
6988 target_ulong res, mask;
6989
6990 if (wordsize > 0 && t0 == 0) {
6991 return wordsize;
6992 }
6993 res = t0;
6994 count = TARGET_LONG_BITS - 1;
6995 mask = (target_ulong)1 << (TARGET_LONG_BITS - 1);
6996 while ((res & mask) == 0) {
6997 count--;
6998 res <<= 1;
6999 }
7000 if (wordsize > 0) {
7001 return wordsize - 1 - count;
7002 }
7003 return count;
7004}
7005
7006target_ulong helper_bsr(target_ulong t0)
7007{
7008 return helper_lzcnt(t0, 0);
7009}
7010
7011static int compute_all_eflags(void)
7012{
7013 return CC_SRC;
7014}
7015
7016static int compute_c_eflags(void)
7017{
7018 return CC_SRC & CC_C;
7019}
7020
7021uint32_t helper_cc_compute_all(int op)
7022{
7023 switch (op) {
7024 default: /* should never happen */ return 0;
7025
7026 case CC_OP_EFLAGS: return compute_all_eflags();
7027
7028 case CC_OP_MULB: return compute_all_mulb();
7029 case CC_OP_MULW: return compute_all_mulw();
7030 case CC_OP_MULL: return compute_all_mull();
7031
7032 case CC_OP_ADDB: return compute_all_addb();
7033 case CC_OP_ADDW: return compute_all_addw();
7034 case CC_OP_ADDL: return compute_all_addl();
7035
7036 case CC_OP_ADCB: return compute_all_adcb();
7037 case CC_OP_ADCW: return compute_all_adcw();
7038 case CC_OP_ADCL: return compute_all_adcl();
7039
7040 case CC_OP_SUBB: return compute_all_subb();
7041 case CC_OP_SUBW: return compute_all_subw();
7042 case CC_OP_SUBL: return compute_all_subl();
7043
7044 case CC_OP_SBBB: return compute_all_sbbb();
7045 case CC_OP_SBBW: return compute_all_sbbw();
7046 case CC_OP_SBBL: return compute_all_sbbl();
7047
7048 case CC_OP_LOGICB: return compute_all_logicb();
7049 case CC_OP_LOGICW: return compute_all_logicw();
7050 case CC_OP_LOGICL: return compute_all_logicl();
7051
7052 case CC_OP_INCB: return compute_all_incb();
7053 case CC_OP_INCW: return compute_all_incw();
7054 case CC_OP_INCL: return compute_all_incl();
7055
7056 case CC_OP_DECB: return compute_all_decb();
7057 case CC_OP_DECW: return compute_all_decw();
7058 case CC_OP_DECL: return compute_all_decl();
7059
7060 case CC_OP_SHLB: return compute_all_shlb();
7061 case CC_OP_SHLW: return compute_all_shlw();
7062 case CC_OP_SHLL: return compute_all_shll();
7063
7064 case CC_OP_SARB: return compute_all_sarb();
7065 case CC_OP_SARW: return compute_all_sarw();
7066 case CC_OP_SARL: return compute_all_sarl();
7067
7068#ifdef TARGET_X86_64
7069 case CC_OP_MULQ: return compute_all_mulq();
7070
7071 case CC_OP_ADDQ: return compute_all_addq();
7072
7073 case CC_OP_ADCQ: return compute_all_adcq();
7074
7075 case CC_OP_SUBQ: return compute_all_subq();
7076
7077 case CC_OP_SBBQ: return compute_all_sbbq();
7078
7079 case CC_OP_LOGICQ: return compute_all_logicq();
7080
7081 case CC_OP_INCQ: return compute_all_incq();
7082
7083 case CC_OP_DECQ: return compute_all_decq();
7084
7085 case CC_OP_SHLQ: return compute_all_shlq();
7086
7087 case CC_OP_SARQ: return compute_all_sarq();
7088#endif
7089 }
7090}
7091
7092uint32_t helper_cc_compute_c(int op)
7093{
7094 switch (op) {
7095 default: /* should never happen */ return 0;
7096
7097 case CC_OP_EFLAGS: return compute_c_eflags();
7098
7099 case CC_OP_MULB: return compute_c_mull();
7100 case CC_OP_MULW: return compute_c_mull();
7101 case CC_OP_MULL: return compute_c_mull();
7102
7103 case CC_OP_ADDB: return compute_c_addb();
7104 case CC_OP_ADDW: return compute_c_addw();
7105 case CC_OP_ADDL: return compute_c_addl();
7106
7107 case CC_OP_ADCB: return compute_c_adcb();
7108 case CC_OP_ADCW: return compute_c_adcw();
7109 case CC_OP_ADCL: return compute_c_adcl();
7110
7111 case CC_OP_SUBB: return compute_c_subb();
7112 case CC_OP_SUBW: return compute_c_subw();
7113 case CC_OP_SUBL: return compute_c_subl();
7114
7115 case CC_OP_SBBB: return compute_c_sbbb();
7116 case CC_OP_SBBW: return compute_c_sbbw();
7117 case CC_OP_SBBL: return compute_c_sbbl();
7118
7119 case CC_OP_LOGICB: return compute_c_logicb();
7120 case CC_OP_LOGICW: return compute_c_logicw();
7121 case CC_OP_LOGICL: return compute_c_logicl();
7122
7123 case CC_OP_INCB: return compute_c_incl();
7124 case CC_OP_INCW: return compute_c_incl();
7125 case CC_OP_INCL: return compute_c_incl();
7126
7127 case CC_OP_DECB: return compute_c_incl();
7128 case CC_OP_DECW: return compute_c_incl();
7129 case CC_OP_DECL: return compute_c_incl();
7130
7131 case CC_OP_SHLB: return compute_c_shlb();
7132 case CC_OP_SHLW: return compute_c_shlw();
7133 case CC_OP_SHLL: return compute_c_shll();
7134
7135 case CC_OP_SARB: return compute_c_sarl();
7136 case CC_OP_SARW: return compute_c_sarl();
7137 case CC_OP_SARL: return compute_c_sarl();
7138
7139#ifdef TARGET_X86_64
7140 case CC_OP_MULQ: return compute_c_mull();
7141
7142 case CC_OP_ADDQ: return compute_c_addq();
7143
7144 case CC_OP_ADCQ: return compute_c_adcq();
7145
7146 case CC_OP_SUBQ: return compute_c_subq();
7147
7148 case CC_OP_SBBQ: return compute_c_sbbq();
7149
7150 case CC_OP_LOGICQ: return compute_c_logicq();
7151
7152 case CC_OP_INCQ: return compute_c_incl();
7153
7154 case CC_OP_DECQ: return compute_c_incl();
7155
7156 case CC_OP_SHLQ: return compute_c_shlq();
7157
7158 case CC_OP_SARQ: return compute_c_sarl();
7159#endif
7160 }
7161}
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette