VirtualBox

source: vbox/trunk/src/recompiler/target-i386/op_helper.c@ 66374

最後變更 在這個檔案從66374是 66262,由 vboxsync 提交於 8 年 前

REM: Do not mess with the TSS busy flag in CPU. Make sure that when loading TSS, the busy flag is set, not cleared. (bugref:8818)

  • 屬性 svn:eol-style 設為 native
檔案大小: 200.9 KB
 
1/*
2 * i386 helpers
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20/*
21 * Oracle LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
22 * other than GPL or LGPL is available it will apply instead, Oracle elects to use only
23 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
24 * a choice of LGPL license versions is made available with the language indicating
25 * that LGPLv2 or any later version may be used, or where a choice of which version
26 * of the LGPL is applied is otherwise unspecified.
27 */
28
29#include "exec.h"
30#include "exec-all.h"
31#include "host-utils.h"
32#include "ioport.h"
33
34#ifdef VBOX
35# include "qemu-common.h"
36# include <math.h>
37# include "tcg.h"
38#endif /* VBOX */
39
40//#define DEBUG_PCALL
41
42
43#ifdef DEBUG_PCALL
44# define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
45# define LOG_PCALL_STATE(env) \
46 log_cpu_state_mask(CPU_LOG_PCALL, (env), X86_DUMP_CCOP)
47#else
48# define LOG_PCALL(...) do { } while (0)
49# define LOG_PCALL_STATE(env) do { } while (0)
50#endif
51
52
53#if 0
54#define raise_exception_err(a, b)\
55do {\
56 qemu_log("raise_exception line=%d\n", __LINE__);\
57 (raise_exception_err)(a, b);\
58} while (0)
59#endif
60
61static const uint8_t parity_table[256] = {
62 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
63 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
64 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
65 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
66 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
67 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
68 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
69 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
70 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
71 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
72 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
73 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
74 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
75 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
76 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
77 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
78 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
79 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
80 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
81 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
82 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
83 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
84 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
85 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
86 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
87 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
88 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
89 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
90 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
91 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
92 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
93 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
94};
95
96/* modulo 17 table */
97static const uint8_t rclw_table[32] = {
98 0, 1, 2, 3, 4, 5, 6, 7,
99 8, 9,10,11,12,13,14,15,
100 16, 0, 1, 2, 3, 4, 5, 6,
101 7, 8, 9,10,11,12,13,14,
102};
103
104/* modulo 9 table */
105static const uint8_t rclb_table[32] = {
106 0, 1, 2, 3, 4, 5, 6, 7,
107 8, 0, 1, 2, 3, 4, 5, 6,
108 7, 8, 0, 1, 2, 3, 4, 5,
109 6, 7, 8, 0, 1, 2, 3, 4,
110};
111
112static const CPU86_LDouble f15rk[7] =
113{
114 0.00000000000000000000L,
115 1.00000000000000000000L,
116 3.14159265358979323851L, /*pi*/
117 0.30102999566398119523L, /*lg2*/
118 0.69314718055994530943L, /*ln2*/
119 1.44269504088896340739L, /*l2e*/
120 3.32192809488736234781L, /*l2t*/
121};
122
123/* broken thread support */
124
125static spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
126
127void helper_lock(void)
128{
129 spin_lock(&global_cpu_lock);
130}
131
132void helper_unlock(void)
133{
134 spin_unlock(&global_cpu_lock);
135}
136
137void helper_write_eflags(target_ulong t0, uint32_t update_mask)
138{
139 load_eflags(t0, update_mask);
140}
141
142target_ulong helper_read_eflags(void)
143{
144 uint32_t eflags;
145 eflags = helper_cc_compute_all(CC_OP);
146 eflags |= (DF & DF_MASK);
147 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
148 return eflags;
149}
150
151#ifdef VBOX
152
153void helper_write_eflags_vme(target_ulong t0)
154{
155 unsigned int new_eflags = t0;
156
157 assert(env->eflags & (1<<VM_SHIFT));
158
159 /* if virtual interrupt pending and (virtual) interrupts will be enabled -> #GP */
160 /* if TF will be set -> #GP */
161 if ( ((new_eflags & IF_MASK) && (env->eflags & VIP_MASK))
162 || (new_eflags & TF_MASK)) {
163 raise_exception(EXCP0D_GPF);
164 } else {
165 load_eflags(new_eflags,
166 (TF_MASK | AC_MASK | ID_MASK | NT_MASK) & 0xffff);
167
168 if (new_eflags & IF_MASK) {
169 env->eflags |= VIF_MASK;
170 } else {
171 env->eflags &= ~VIF_MASK;
172 }
173 }
174}
175
176target_ulong helper_read_eflags_vme(void)
177{
178 uint32_t eflags;
179 eflags = helper_cc_compute_all(CC_OP);
180 eflags |= (DF & DF_MASK);
181 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
182 if (env->eflags & VIF_MASK)
183 eflags |= IF_MASK;
184 else
185 eflags &= ~IF_MASK;
186
187 /* According to AMD manual, should be read with IOPL == 3 */
188 eflags |= (3 << IOPL_SHIFT);
189
190 /* We only use helper_read_eflags_vme() in 16-bits mode */
191 return eflags & 0xffff;
192}
193
194void helper_dump_state()
195{
196 LogRel(("CS:EIP=%08x:%08x, FLAGS=%08x\n", env->segs[R_CS].base, env->eip, env->eflags));
197 LogRel(("EAX=%08x\tECX=%08x\tEDX=%08x\tEBX=%08x\n",
198 (uint32_t)env->regs[R_EAX], (uint32_t)env->regs[R_ECX],
199 (uint32_t)env->regs[R_EDX], (uint32_t)env->regs[R_EBX]));
200 LogRel(("ESP=%08x\tEBP=%08x\tESI=%08x\tEDI=%08x\n",
201 (uint32_t)env->regs[R_ESP], (uint32_t)env->regs[R_EBP],
202 (uint32_t)env->regs[R_ESI], (uint32_t)env->regs[R_EDI]));
203}
204
205/**
206 * Updates e2 with the DESC_A_MASK, writes it to the descriptor table, and
207 * returns the updated e2.
208 *
209 * @returns e2 with A set.
210 * @param e2 The 2nd selector DWORD.
211 */
212static uint32_t set_segment_accessed(int selector, uint32_t e2)
213{
214 SegmentCache *dt = selector & X86_SEL_LDT ? &env->ldt : &env->gdt;
215 target_ulong ptr = dt->base + (selector & X86_SEL_MASK);
216
217 e2 |= DESC_A_MASK;
218 stl_kernel(ptr + 4, e2);
219 return e2;
220}
221
222#endif /* VBOX */
223
224/* return non zero if error */
225static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
226 int selector)
227{
228 SegmentCache *dt;
229 int index;
230 target_ulong ptr;
231
232 if (selector & 0x4)
233 dt = &env->ldt;
234 else
235 dt = &env->gdt;
236 index = selector & ~7;
237 if ((index + 7) > dt->limit)
238 return -1;
239 ptr = dt->base + index;
240 *e1_ptr = ldl_kernel(ptr);
241 *e2_ptr = ldl_kernel(ptr + 4);
242 return 0;
243}
244
245static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
246{
247 unsigned int limit;
248 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
249 if (e2 & DESC_G_MASK)
250 limit = (limit << 12) | 0xfff;
251 return limit;
252}
253
254static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
255{
256 return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
257}
258
259static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
260{
261 sc->base = get_seg_base(e1, e2);
262 sc->limit = get_seg_limit(e1, e2);
263#ifndef VBOX
264 sc->flags = e2;
265#else
266 sc->flags = e2 & DESC_RAW_FLAG_BITS;
267 sc->newselector = 0;
268 sc->fVBoxFlags = CPUMSELREG_FLAGS_VALID;
269#endif
270}
271
272/* init the segment cache in vm86 mode. */
273static inline void load_seg_vm(int seg, int selector)
274{
275 selector &= 0xffff;
276#ifdef VBOX
277 /* flags must be 0xf3; expand-up read/write accessed data segment with DPL=3. (VT-x) */
278 unsigned flags = DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | DESC_A_MASK;
279 flags |= (3 << DESC_DPL_SHIFT);
280
281 cpu_x86_load_seg_cache(env, seg, selector,
282 (selector << 4), 0xffff, flags);
283#else /* VBOX */
284 cpu_x86_load_seg_cache(env, seg, selector,
285 (selector << 4), 0xffff, 0);
286#endif /* VBOX */
287}
288
289static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
290 uint32_t *esp_ptr, int dpl)
291{
292#ifndef VBOX
293 int type, index, shift;
294#else
295 unsigned int type, index, shift;
296#endif
297
298#if 0
299 {
300 int i;
301 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
302 for(i=0;i<env->tr.limit;i++) {
303 printf("%02x ", env->tr.base[i]);
304 if ((i & 7) == 7) printf("\n");
305 }
306 printf("\n");
307 }
308#endif
309
310 if (!(env->tr.flags & DESC_P_MASK))
311 cpu_abort(env, "invalid tss");
312 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
313 if ((type & 7) != 3)
314 cpu_abort(env, "invalid tss type");
315 shift = type >> 3;
316 index = (dpl * 4 + 2) << shift;
317 if (index + (4 << shift) - 1 > env->tr.limit)
318 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
319 if (shift == 0) {
320 *esp_ptr = lduw_kernel(env->tr.base + index);
321 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
322 } else {
323 *esp_ptr = ldl_kernel(env->tr.base + index);
324 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
325 }
326}
327
328/* XXX: merge with load_seg() */
329static void tss_load_seg(int seg_reg, int selector)
330{
331 uint32_t e1, e2;
332 int rpl, dpl, cpl;
333
334#ifdef VBOX
335 e1 = e2 = 0; /* gcc warning? */
336 cpl = env->hflags & HF_CPL_MASK;
337 /* Trying to load a selector with CPL=1? */
338 if (cpl == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
339 {
340 Log(("RPL 1 -> sel %04X -> %04X (tss_load_seg)\n", selector, selector & 0xfffc));
341 selector = selector & 0xfffc;
342 }
343#endif /* VBOX */
344
345 if ((selector & 0xfffc) != 0) {
346 if (load_segment(&e1, &e2, selector) != 0)
347 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
348 if (!(e2 & DESC_S_MASK))
349 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
350 rpl = selector & 3;
351 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
352 cpl = env->hflags & HF_CPL_MASK;
353 if (seg_reg == R_CS) {
354 if (!(e2 & DESC_CS_MASK))
355 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
356 /* XXX: is it correct ? */
357 if (dpl != rpl)
358 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
359 if ((e2 & DESC_C_MASK) && dpl > rpl)
360 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
361 } else if (seg_reg == R_SS) {
362 /* SS must be writable data */
363 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
364 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
365 if (dpl != cpl || dpl != rpl)
366 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
367 } else {
368 /* not readable code */
369 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
370 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
371 /* if data or non conforming code, checks the rights */
372 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
373 if (dpl < cpl || dpl < rpl)
374 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
375 }
376 }
377 if (!(e2 & DESC_P_MASK))
378 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
379 cpu_x86_load_seg_cache(env, seg_reg, selector,
380 get_seg_base(e1, e2),
381 get_seg_limit(e1, e2),
382 e2);
383 } else {
384 if (seg_reg == R_SS || seg_reg == R_CS)
385 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
386#ifdef VBOX
387# if 0 /** @todo now we ignore loading 0 selectors, need to check what is correct once */
388 cpu_x86_load_seg_cache(env, seg_reg, selector,
389 0, 0, 0);
390# endif
391#endif /* VBOX */
392 }
393}
394
395#define SWITCH_TSS_JMP 0
396#define SWITCH_TSS_IRET 1
397#define SWITCH_TSS_CALL 2
398
399/* XXX: restore CPU state in registers (PowerPC case) */
400static void switch_tss(int tss_selector,
401 uint32_t e1, uint32_t e2, int source,
402 uint32_t next_eip)
403{
404 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
405 target_ulong tss_base;
406 uint32_t new_regs[8], new_segs[6];
407 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
408 uint32_t old_eflags, eflags_mask;
409 SegmentCache *dt;
410#ifndef VBOX
411 int index;
412#else
413 unsigned int index;
414#endif
415 target_ulong ptr;
416
417 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
418 LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
419
420 /* if task gate, we read the TSS segment and we load it */
421 if (type == 5) {
422 if (!(e2 & DESC_P_MASK))
423 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
424 tss_selector = e1 >> 16;
425 if (tss_selector & 4)
426 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
427 if (load_segment(&e1, &e2, tss_selector) != 0)
428 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
429 if (e2 & DESC_S_MASK)
430 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
431 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
432 if ((type & 7) != 1)
433 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
434 }
435
436 if (!(e2 & DESC_P_MASK))
437 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
438
439 if (type & 8)
440 tss_limit_max = 103;
441 else
442 tss_limit_max = 43;
443 tss_limit = get_seg_limit(e1, e2);
444 tss_base = get_seg_base(e1, e2);
445 if ((tss_selector & 4) != 0 ||
446 tss_limit < tss_limit_max)
447 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
448 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
449 if (old_type & 8)
450 old_tss_limit_max = 103;
451 else
452 old_tss_limit_max = 43;
453
454#ifndef VBOX /* The old TSS is written first... */
455 /* read all the registers from the new TSS */
456 if (type & 8) {
457 /* 32 bit */
458 new_cr3 = ldl_kernel(tss_base + 0x1c);
459 new_eip = ldl_kernel(tss_base + 0x20);
460 new_eflags = ldl_kernel(tss_base + 0x24);
461 for(i = 0; i < 8; i++)
462 new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
463 for(i = 0; i < 6; i++)
464 new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
465 new_ldt = lduw_kernel(tss_base + 0x60);
466 new_trap = ldl_kernel(tss_base + 0x64);
467 } else {
468 /* 16 bit */
469 new_cr3 = 0;
470 new_eip = lduw_kernel(tss_base + 0x0e);
471 new_eflags = lduw_kernel(tss_base + 0x10);
472 for(i = 0; i < 8; i++)
473 new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
474 for(i = 0; i < 4; i++)
475 new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
476 new_ldt = lduw_kernel(tss_base + 0x2a);
477 new_segs[R_FS] = 0;
478 new_segs[R_GS] = 0;
479 new_trap = 0;
480 }
481#endif
482
483 /* NOTE: we must avoid memory exceptions during the task switch,
484 so we make dummy accesses before */
485 /* XXX: it can still fail in some cases, so a bigger hack is
486 necessary to valid the TLB after having done the accesses */
487
488 v1 = ldub_kernel(env->tr.base);
489 v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
490 stb_kernel(env->tr.base, v1);
491 stb_kernel(env->tr.base + old_tss_limit_max, v2);
492
493 /* clear busy bit (it is restartable) */
494 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
495 target_ulong ptr;
496 uint32_t e2;
497 ptr = env->gdt.base + (env->tr.selector & ~7);
498 e2 = ldl_kernel(ptr + 4);
499 e2 &= ~DESC_TSS_BUSY_MASK;
500 stl_kernel(ptr + 4, e2);
501 }
502 old_eflags = compute_eflags();
503 if (source == SWITCH_TSS_IRET)
504 old_eflags &= ~NT_MASK;
505
506 /* save the current state in the old TSS */
507 if (type & 8) {
508 /* 32 bit */
509 stl_kernel(env->tr.base + 0x20, next_eip);
510 stl_kernel(env->tr.base + 0x24, old_eflags);
511 stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
512 stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
513 stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
514 stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
515 stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
516 stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
517 stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
518 stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
519 for(i = 0; i < 6; i++)
520 stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
521#if defined(VBOX) && defined(DEBUG)
522 printf("TSS 32 bits switch\n");
523 printf("Saving CS=%08X\n", env->segs[R_CS].selector);
524#endif
525 } else {
526 /* 16 bit */
527 stw_kernel(env->tr.base + 0x0e, next_eip);
528 stw_kernel(env->tr.base + 0x10, old_eflags);
529 stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
530 stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
531 stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
532 stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
533 stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
534 stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
535 stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
536 stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
537 for(i = 0; i < 4; i++)
538 stw_kernel(env->tr.base + (0x22 + i * 2), env->segs[i].selector);
539 }
540
541#ifdef VBOX
542 /* read all the registers from the new TSS - may be the same as the old one */
543 if (type & 8) {
544 /* 32 bit */
545 new_cr3 = ldl_kernel(tss_base + 0x1c);
546 new_eip = ldl_kernel(tss_base + 0x20);
547 new_eflags = ldl_kernel(tss_base + 0x24);
548 for(i = 0; i < 8; i++)
549 new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
550 for(i = 0; i < 6; i++)
551 new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
552 new_ldt = lduw_kernel(tss_base + 0x60);
553 new_trap = ldl_kernel(tss_base + 0x64);
554 } else {
555 /* 16 bit */
556 new_cr3 = 0;
557 new_eip = lduw_kernel(tss_base + 0x0e);
558 new_eflags = lduw_kernel(tss_base + 0x10);
559 for(i = 0; i < 8; i++)
560 new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
561 for(i = 0; i < 4; i++)
562 new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 2));
563 new_ldt = lduw_kernel(tss_base + 0x2a);
564 new_segs[R_FS] = 0;
565 new_segs[R_GS] = 0;
566 new_trap = 0;
567 }
568#endif
569
570 /* now if an exception occurs, it will occurs in the next task
571 context */
572
573 if (source == SWITCH_TSS_CALL) {
574 stw_kernel(tss_base, env->tr.selector);
575 new_eflags |= NT_MASK;
576 }
577
578 /* set busy bit */
579 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
580 target_ulong ptr;
581 uint32_t e2;
582 ptr = env->gdt.base + (tss_selector & ~7);
583 e2 = ldl_kernel(ptr + 4);
584 e2 |= DESC_TSS_BUSY_MASK;
585 stl_kernel(ptr + 4, e2);
586 }
587
588 /* set the new CPU state */
589 /* from this point, any exception which occurs can give problems */
590 env->cr[0] |= CR0_TS_MASK;
591 env->hflags |= HF_TS_MASK;
592 env->tr.selector = tss_selector;
593 env->tr.base = tss_base;
594 env->tr.limit = tss_limit;
595#ifndef VBOX
596 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
597#else
598 env->tr.flags = (e2 | DESC_TSS_BUSY_MASK) & DESC_RAW_FLAG_BITS;
599 env->tr.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
600 env->tr.newselector = 0;
601#endif
602
603 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
604 cpu_x86_update_cr3(env, new_cr3);
605 }
606
607 /* load all registers without an exception, then reload them with
608 possible exception */
609 env->eip = new_eip;
610 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
611 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
612 if (!(type & 8))
613 eflags_mask &= 0xffff;
614 load_eflags(new_eflags, eflags_mask);
615 /* XXX: what to do in 16 bit case ? */
616 EAX = new_regs[0];
617 ECX = new_regs[1];
618 EDX = new_regs[2];
619 EBX = new_regs[3];
620 ESP = new_regs[4];
621 EBP = new_regs[5];
622 ESI = new_regs[6];
623 EDI = new_regs[7];
624 if (new_eflags & VM_MASK) {
625 for(i = 0; i < 6; i++)
626 load_seg_vm(i, new_segs[i]);
627 /* in vm86, CPL is always 3 */
628 cpu_x86_set_cpl(env, 3);
629 } else {
630 /* CPL is set the RPL of CS */
631 cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
632 /* first just selectors as the rest may trigger exceptions */
633 for(i = 0; i < 6; i++)
634 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
635 }
636
637 env->ldt.selector = new_ldt & ~4;
638 env->ldt.base = 0;
639 env->ldt.limit = 0;
640 env->ldt.flags = 0;
641#ifdef VBOX
642 env->ldt.flags = DESC_INTEL_UNUSABLE;
643 env->ldt.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
644 env->ldt.newselector = 0;
645#endif
646
647 /* load the LDT */
648 if (new_ldt & 4)
649 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
650
651 if ((new_ldt & 0xfffc) != 0) {
652 dt = &env->gdt;
653 index = new_ldt & ~7;
654 if ((index + 7) > dt->limit)
655 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
656 ptr = dt->base + index;
657 e1 = ldl_kernel(ptr);
658 e2 = ldl_kernel(ptr + 4);
659 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
660 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
661 if (!(e2 & DESC_P_MASK))
662 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
663 load_seg_cache_raw_dt(&env->ldt, e1, e2);
664 }
665
666 /* load the segments */
667 if (!(new_eflags & VM_MASK)) {
668 tss_load_seg(R_CS, new_segs[R_CS]);
669 tss_load_seg(R_SS, new_segs[R_SS]);
670 tss_load_seg(R_ES, new_segs[R_ES]);
671 tss_load_seg(R_DS, new_segs[R_DS]);
672 tss_load_seg(R_FS, new_segs[R_FS]);
673 tss_load_seg(R_GS, new_segs[R_GS]);
674 }
675
676 /* check that EIP is in the CS segment limits */
677 if (new_eip > env->segs[R_CS].limit) {
678 /* XXX: different exception if CALL ? */
679 raise_exception_err(EXCP0D_GPF, 0);
680 }
681
682#ifndef CONFIG_USER_ONLY
683 /* reset local breakpoints */
684 if (env->dr[7] & 0x55) {
685 for (i = 0; i < 4; i++) {
686 if (hw_breakpoint_enabled(env->dr[7], i) == 0x1)
687 hw_breakpoint_remove(env, i);
688 }
689 env->dr[7] &= ~0x55;
690 }
691#endif
692}
693
694/* check if Port I/O is allowed in TSS */
695static inline void check_io(int addr, int size)
696{
697#ifndef VBOX
698 int io_offset, val, mask;
699#else
700 int val, mask;
701 unsigned int io_offset;
702#endif /* VBOX */
703
704 /* TSS must be a valid 32 bit one */
705 if (!(env->tr.flags & DESC_P_MASK) ||
706 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 11 ||
707 env->tr.limit < 103)
708 goto fail;
709 io_offset = lduw_kernel(env->tr.base + 0x66);
710 io_offset += (addr >> 3);
711 /* Note: the check needs two bytes */
712 if ((io_offset + 1) > env->tr.limit)
713 goto fail;
714 val = lduw_kernel(env->tr.base + io_offset);
715 val >>= (addr & 7);
716 mask = (1 << size) - 1;
717 /* all bits must be zero to allow the I/O */
718 if ((val & mask) != 0) {
719 fail:
720 raise_exception_err(EXCP0D_GPF, 0);
721 }
722}
723
724#ifdef VBOX
725
726/* Keep in sync with gen_check_external_event() */
727void helper_check_external_event()
728{
729 if ( (env->interrupt_request & ( CPU_INTERRUPT_EXTERNAL_FLUSH_TLB
730 | CPU_INTERRUPT_EXTERNAL_EXIT
731 | CPU_INTERRUPT_EXTERNAL_TIMER
732 | CPU_INTERRUPT_EXTERNAL_DMA))
733 || ( (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD)
734 && (env->eflags & IF_MASK)
735 && !(env->hflags & HF_INHIBIT_IRQ_MASK) ) )
736 {
737 helper_external_event();
738 }
739
740}
741
742void helper_sync_seg(uint32_t reg)
743{
744 if (env->segs[reg].newselector)
745 sync_seg(env, reg, env->segs[reg].newselector);
746}
747
748#endif /* VBOX */
749
750void helper_check_iob(uint32_t t0)
751{
752 check_io(t0, 1);
753}
754
755void helper_check_iow(uint32_t t0)
756{
757 check_io(t0, 2);
758}
759
760void helper_check_iol(uint32_t t0)
761{
762 check_io(t0, 4);
763}
764
765void helper_outb(uint32_t port, uint32_t data)
766{
767#ifndef VBOX
768 cpu_outb(port, data & 0xff);
769#else
770 cpu_outb(env, port, data & 0xff);
771#endif
772}
773
774target_ulong helper_inb(uint32_t port)
775{
776#ifndef VBOX
777 return cpu_inb(port);
778#else
779 return cpu_inb(env, port);
780#endif
781}
782
783void helper_outw(uint32_t port, uint32_t data)
784{
785#ifndef VBOX
786 cpu_outw(port, data & 0xffff);
787#else
788 cpu_outw(env, port, data & 0xffff);
789#endif
790}
791
792target_ulong helper_inw(uint32_t port)
793{
794#ifndef VBOX
795 return cpu_inw(port);
796#else
797 return cpu_inw(env, port);
798#endif
799}
800
801void helper_outl(uint32_t port, uint32_t data)
802{
803#ifndef VBOX
804 cpu_outl(port, data);
805#else
806 cpu_outl(env, port, data);
807#endif
808}
809
810target_ulong helper_inl(uint32_t port)
811{
812#ifndef VBOX
813 return cpu_inl(port);
814#else
815 return cpu_inl(env, port);
816#endif
817}
818
819static inline unsigned int get_sp_mask(unsigned int e2)
820{
821 if (e2 & DESC_B_MASK)
822 return 0xffffffff;
823 else
824 return 0xffff;
825}
826
827static int exeption_has_error_code(int intno)
828{
829 switch(intno) {
830 case 8:
831 case 10:
832 case 11:
833 case 12:
834 case 13:
835 case 14:
836 case 17:
837 return 1;
838 }
839 return 0;
840}
841
842#ifdef TARGET_X86_64
843#define SET_ESP(val, sp_mask)\
844do {\
845 if ((sp_mask) == 0xffff)\
846 ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
847 else if ((sp_mask) == 0xffffffffLL)\
848 ESP = (uint32_t)(val);\
849 else\
850 ESP = (val);\
851} while (0)
852#else
853#define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
854#endif
855
856/* in 64-bit machines, this can overflow. So this segment addition macro
857 * can be used to trim the value to 32-bit whenever needed */
858#define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
859
860/* XXX: add a is_user flag to have proper security support */
861#define PUSHW(ssp, sp, sp_mask, val)\
862{\
863 sp -= 2;\
864 stw_kernel((ssp) + (sp & (sp_mask)), (val));\
865}
866
867#define PUSHL(ssp, sp, sp_mask, val)\
868{\
869 sp -= 4;\
870 stl_kernel(SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val));\
871}
872
873#define POPW(ssp, sp, sp_mask, val)\
874{\
875 val = lduw_kernel((ssp) + (sp & (sp_mask)));\
876 sp += 2;\
877}
878
879#define POPL(ssp, sp, sp_mask, val)\
880{\
881 val = (uint32_t)ldl_kernel(SEG_ADDL(ssp, sp, sp_mask));\
882 sp += 4;\
883}
884
885/* protected mode interrupt */
886static void do_interrupt_protected(int intno, int is_int, int error_code,
887 unsigned int next_eip, int is_hw)
888{
889 SegmentCache *dt;
890 target_ulong ptr, ssp;
891 int type, dpl, selector, ss_dpl, cpl;
892 int has_error_code, new_stack, shift;
893 uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0;
894 uint32_t old_eip, sp_mask;
895
896#ifdef VBOX
897 if (remR3NotifyTrap(env, intno, error_code, next_eip) != VINF_SUCCESS)
898 cpu_loop_exit();
899#endif
900
901 has_error_code = 0;
902 if (!is_int && !is_hw)
903 has_error_code = exeption_has_error_code(intno);
904 if (is_int)
905 old_eip = next_eip;
906 else
907 old_eip = env->eip;
908
909 dt = &env->idt;
910#ifndef VBOX
911 if (intno * 8 + 7 > dt->limit)
912#else
913 if ((unsigned)intno * 8 + 7 > dt->limit)
914#endif
915 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
916 ptr = dt->base + intno * 8;
917 e1 = ldl_kernel(ptr);
918 e2 = ldl_kernel(ptr + 4);
919 /* check gate type */
920 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
921 switch(type) {
922 case 5: /* task gate */
923#ifdef VBOX
924 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
925 cpl = env->hflags & HF_CPL_MASK;
926 /* check privilege if software int */
927 if (is_int && dpl < cpl)
928 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
929#endif
930 /* must do that check here to return the correct error code */
931 if (!(e2 & DESC_P_MASK))
932 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
933 switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
934 if (has_error_code) {
935 int type;
936 uint32_t mask;
937 /* push the error code */
938 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
939 shift = type >> 3;
940 if (env->segs[R_SS].flags & DESC_B_MASK)
941 mask = 0xffffffff;
942 else
943 mask = 0xffff;
944 esp = (ESP - (2 << shift)) & mask;
945 ssp = env->segs[R_SS].base + esp;
946 if (shift)
947 stl_kernel(ssp, error_code);
948 else
949 stw_kernel(ssp, error_code);
950 SET_ESP(esp, mask);
951 }
952 return;
953 case 6: /* 286 interrupt gate */
954 case 7: /* 286 trap gate */
955 case 14: /* 386 interrupt gate */
956 case 15: /* 386 trap gate */
957 break;
958 default:
959 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
960 break;
961 }
962 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
963 cpl = env->hflags & HF_CPL_MASK;
964 /* check privilege if software int */
965 if (is_int && dpl < cpl)
966 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
967 /* check valid bit */
968 if (!(e2 & DESC_P_MASK))
969 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
970 selector = e1 >> 16;
971 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
972 if ((selector & 0xfffc) == 0)
973 raise_exception_err(EXCP0D_GPF, 0);
974
975 if (load_segment(&e1, &e2, selector) != 0)
976 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
977#ifdef VBOX /** @todo figure out when this is done one day... */
978 if (!(e2 & DESC_A_MASK))
979 e2 = set_segment_accessed(selector, e2);
980#endif
981 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
982 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
983 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
984 if (dpl > cpl)
985 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
986 if (!(e2 & DESC_P_MASK))
987 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
988 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
989 /* to inner privilege */
990 get_ss_esp_from_tss(&ss, &esp, dpl);
991 if ((ss & 0xfffc) == 0)
992 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
993 if ((ss & 3) != dpl)
994 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
995 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
996 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
997#ifdef VBOX /** @todo figure out when this is done one day... */
998 if (!(ss_e2 & DESC_A_MASK))
999 ss_e2 = set_segment_accessed(ss, ss_e2);
1000#endif
1001 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
1002 if (ss_dpl != dpl)
1003 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
1004 if (!(ss_e2 & DESC_S_MASK) ||
1005 (ss_e2 & DESC_CS_MASK) ||
1006 !(ss_e2 & DESC_W_MASK))
1007 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
1008 if (!(ss_e2 & DESC_P_MASK))
1009#ifdef VBOX /* See page 3-477 of 253666.pdf */
1010 raise_exception_err(EXCP0C_STACK, ss & 0xfffc);
1011#else
1012 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
1013#endif
1014 new_stack = 1;
1015 sp_mask = get_sp_mask(ss_e2);
1016 ssp = get_seg_base(ss_e1, ss_e2);
1017#if defined(VBOX) && defined(DEBUG)
1018 printf("new stack %04X:%08X gate dpl=%d\n", ss, esp, dpl);
1019#endif
1020 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
1021 /* to same privilege */
1022 if (env->eflags & VM_MASK)
1023 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1024 new_stack = 0;
1025 sp_mask = get_sp_mask(env->segs[R_SS].flags);
1026 ssp = env->segs[R_SS].base;
1027 esp = ESP;
1028 dpl = cpl;
1029 } else {
1030 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1031 new_stack = 0; /* avoid warning */
1032 sp_mask = 0; /* avoid warning */
1033 ssp = 0; /* avoid warning */
1034 esp = 0; /* avoid warning */
1035 }
1036
1037 shift = type >> 3;
1038
1039#if 0
1040 /* XXX: check that enough room is available */
1041 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
1042 if (env->eflags & VM_MASK)
1043 push_size += 8;
1044 push_size <<= shift;
1045#endif
1046 if (shift == 1) {
1047 if (new_stack) {
1048 if (env->eflags & VM_MASK) {
1049 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
1050 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
1051 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
1052 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
1053 }
1054 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
1055 PUSHL(ssp, esp, sp_mask, ESP);
1056 }
1057 PUSHL(ssp, esp, sp_mask, compute_eflags());
1058 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
1059 PUSHL(ssp, esp, sp_mask, old_eip);
1060 if (has_error_code) {
1061 PUSHL(ssp, esp, sp_mask, error_code);
1062 }
1063 } else {
1064 if (new_stack) {
1065 if (env->eflags & VM_MASK) {
1066 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
1067 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
1068 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
1069 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
1070 }
1071 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
1072 PUSHW(ssp, esp, sp_mask, ESP);
1073 }
1074 PUSHW(ssp, esp, sp_mask, compute_eflags());
1075 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
1076 PUSHW(ssp, esp, sp_mask, old_eip);
1077 if (has_error_code) {
1078 PUSHW(ssp, esp, sp_mask, error_code);
1079 }
1080 }
1081
1082 if (new_stack) {
1083 if (env->eflags & VM_MASK) {
1084 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
1085 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
1086 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
1087 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
1088 }
1089 ss = (ss & ~3) | dpl;
1090 cpu_x86_load_seg_cache(env, R_SS, ss,
1091 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
1092 }
1093 SET_ESP(esp, sp_mask);
1094
1095 selector = (selector & ~3) | dpl;
1096 cpu_x86_load_seg_cache(env, R_CS, selector,
1097 get_seg_base(e1, e2),
1098 get_seg_limit(e1, e2),
1099 e2);
1100 cpu_x86_set_cpl(env, dpl);
1101 env->eip = offset;
1102
1103 /* interrupt gate clear IF mask */
1104 if ((type & 1) == 0) {
1105 env->eflags &= ~IF_MASK;
1106 }
1107#ifndef VBOX
1108 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1109#else
1110 /*
1111 * We must clear VIP/VIF too on interrupt entry, as otherwise FreeBSD
1112 * gets confused by seemingly changed EFLAGS. See #3491 and
1113 * public bug #2341.
1114 */
1115 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK | VIF_MASK | VIP_MASK);
1116#endif
1117}
1118
1119#ifdef VBOX
1120
1121/* check if VME interrupt redirection is enabled in TSS */
1122DECLINLINE(bool) is_vme_irq_redirected(int intno)
1123{
1124 unsigned int io_offset, intredir_offset;
1125 unsigned char val, mask;
1126
1127 /* TSS must be a valid 32 bit one */
1128 if (!(env->tr.flags & DESC_P_MASK) ||
1129 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 11 ||
1130 env->tr.limit < 103)
1131 goto fail;
1132 io_offset = lduw_kernel(env->tr.base + 0x66);
1133 /* Make sure the io bitmap offset is valid; anything less than sizeof(VBOXTSS) means there's none. */
1134 if (io_offset < 0x68 + 0x20)
1135 io_offset = 0x68 + 0x20;
1136 /* the virtual interrupt redirection bitmap is located below the io bitmap */
1137 intredir_offset = io_offset - 0x20;
1138
1139 intredir_offset += (intno >> 3);
1140 if ((intredir_offset) > env->tr.limit)
1141 goto fail;
1142
1143 val = ldub_kernel(env->tr.base + intredir_offset);
1144 mask = 1 << (unsigned char)(intno & 7);
1145
1146 /* bit set means no redirection. */
1147 if ((val & mask) != 0) {
1148 return false;
1149 }
1150 return true;
1151
1152fail:
1153 raise_exception_err(EXCP0D_GPF, 0);
1154 return true;
1155}
1156
1157/* V86 mode software interrupt with CR4.VME=1 */
1158static void do_soft_interrupt_vme(int intno, int error_code, unsigned int next_eip)
1159{
1160 target_ulong ptr, ssp;
1161 int selector;
1162 uint32_t offset, esp;
1163 uint32_t old_cs, old_eflags;
1164 uint32_t iopl;
1165
1166 iopl = ((env->eflags >> IOPL_SHIFT) & 3);
1167
1168 if (!is_vme_irq_redirected(intno))
1169 {
1170 if (iopl == 3)
1171 {
1172 do_interrupt_protected(intno, 1, error_code, next_eip, 0);
1173 return;
1174 }
1175 else
1176 raise_exception_err(EXCP0D_GPF, 0);
1177 }
1178
1179 /* virtual mode idt is at linear address 0 */
1180 ptr = 0 + intno * 4;
1181 offset = lduw_kernel(ptr);
1182 selector = lduw_kernel(ptr + 2);
1183 esp = ESP;
1184 ssp = env->segs[R_SS].base;
1185 old_cs = env->segs[R_CS].selector;
1186
1187 old_eflags = compute_eflags();
1188 if (iopl < 3)
1189 {
1190 /* copy VIF into IF and set IOPL to 3 */
1191 if (env->eflags & VIF_MASK)
1192 old_eflags |= IF_MASK;
1193 else
1194 old_eflags &= ~IF_MASK;
1195
1196 old_eflags |= (3 << IOPL_SHIFT);
1197 }
1198
1199 /* XXX: use SS segment size ? */
1200 PUSHW(ssp, esp, 0xffff, old_eflags);
1201 PUSHW(ssp, esp, 0xffff, old_cs);
1202 PUSHW(ssp, esp, 0xffff, next_eip);
1203
1204 /* update processor state */
1205 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1206 env->eip = offset;
1207 env->segs[R_CS].selector = selector;
1208 env->segs[R_CS].base = (selector << 4);
1209 env->eflags &= ~(TF_MASK | RF_MASK);
1210
1211 if (iopl < 3)
1212 env->eflags &= ~VIF_MASK;
1213 else
1214 env->eflags &= ~IF_MASK;
1215}
1216
1217#endif /* VBOX */
1218
1219#ifdef TARGET_X86_64
1220
1221#define PUSHQ(sp, val)\
1222{\
1223 sp -= 8;\
1224 stq_kernel(sp, (val));\
1225}
1226
1227#define POPQ(sp, val)\
1228{\
1229 val = ldq_kernel(sp);\
1230 sp += 8;\
1231}
1232
1233static inline target_ulong get_rsp_from_tss(int level)
1234{
1235 int index;
1236
1237#if 0
1238 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
1239 env->tr.base, env->tr.limit);
1240#endif
1241
1242 if (!(env->tr.flags & DESC_P_MASK))
1243 cpu_abort(env, "invalid tss");
1244 index = 8 * level + 4;
1245 if ((index + 7) > env->tr.limit)
1246 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
1247 return ldq_kernel(env->tr.base + index);
1248}
1249
1250/* 64 bit interrupt */
1251static void do_interrupt64(int intno, int is_int, int error_code,
1252 target_ulong next_eip, int is_hw)
1253{
1254 SegmentCache *dt;
1255 target_ulong ptr;
1256 int type, dpl, selector, cpl, ist;
1257 int has_error_code, new_stack;
1258 uint32_t e1, e2, e3, ss;
1259 target_ulong old_eip, esp, offset;
1260
1261#ifdef VBOX
1262 if (remR3NotifyTrap(env, intno, error_code, next_eip) != VINF_SUCCESS)
1263 cpu_loop_exit();
1264#endif
1265
1266 has_error_code = 0;
1267 if (!is_int && !is_hw)
1268 has_error_code = exeption_has_error_code(intno);
1269 if (is_int)
1270 old_eip = next_eip;
1271 else
1272 old_eip = env->eip;
1273
1274 dt = &env->idt;
1275 if (intno * 16 + 15 > dt->limit)
1276 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1277 ptr = dt->base + intno * 16;
1278 e1 = ldl_kernel(ptr);
1279 e2 = ldl_kernel(ptr + 4);
1280 e3 = ldl_kernel(ptr + 8);
1281 /* check gate type */
1282 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1283 switch(type) {
1284 case 14: /* 386 interrupt gate */
1285 case 15: /* 386 trap gate */
1286 break;
1287 default:
1288 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1289 break;
1290 }
1291 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1292 cpl = env->hflags & HF_CPL_MASK;
1293 /* check privilege if software int */
1294 if (is_int && dpl < cpl)
1295 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1296 /* check valid bit */
1297 if (!(e2 & DESC_P_MASK))
1298 raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
1299 selector = e1 >> 16;
1300 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1301 ist = e2 & 7;
1302 if ((selector & 0xfffc) == 0)
1303 raise_exception_err(EXCP0D_GPF, 0);
1304
1305 if (load_segment(&e1, &e2, selector) != 0)
1306 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1307 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
1308 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1309 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1310 if (dpl > cpl)
1311 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1312 if (!(e2 & DESC_P_MASK))
1313 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1314 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
1315 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1316 if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
1317 /* to inner privilege */
1318 if (ist != 0)
1319 esp = get_rsp_from_tss(ist + 3);
1320 else
1321 esp = get_rsp_from_tss(dpl);
1322 esp &= ~0xfLL; /* align stack */
1323 ss = 0;
1324 new_stack = 1;
1325 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
1326 /* to same privilege */
1327 if (env->eflags & VM_MASK)
1328 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1329 new_stack = 0;
1330 if (ist != 0)
1331 esp = get_rsp_from_tss(ist + 3);
1332 else
1333 esp = ESP;
1334 esp &= ~0xfLL; /* align stack */
1335 dpl = cpl;
1336 } else {
1337 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1338 new_stack = 0; /* avoid warning */
1339 esp = 0; /* avoid warning */
1340 }
1341
1342 PUSHQ(esp, env->segs[R_SS].selector);
1343 PUSHQ(esp, ESP);
1344 PUSHQ(esp, compute_eflags());
1345 PUSHQ(esp, env->segs[R_CS].selector);
1346 PUSHQ(esp, old_eip);
1347 if (has_error_code) {
1348 PUSHQ(esp, error_code);
1349 }
1350
1351 if (new_stack) {
1352 ss = 0 | dpl;
1353#ifndef VBOX
1354 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
1355#else
1356 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, dpl << DESC_DPL_SHIFT);
1357#endif
1358 }
1359 ESP = esp;
1360
1361 selector = (selector & ~3) | dpl;
1362 cpu_x86_load_seg_cache(env, R_CS, selector,
1363 get_seg_base(e1, e2),
1364 get_seg_limit(e1, e2),
1365 e2);
1366 cpu_x86_set_cpl(env, dpl);
1367 env->eip = offset;
1368
1369 /* interrupt gate clear IF mask */
1370 if ((type & 1) == 0) {
1371 env->eflags &= ~IF_MASK;
1372 }
1373#ifndef VBOX
1374 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1375#else /* VBOX */
1376 /*
1377 * We must clear VIP/VIF too on interrupt entry, as otherwise FreeBSD
1378 * gets confused by seemingly changed EFLAGS. See #3491 and
1379 * public bug #2341.
1380 */
1381 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK | VIF_MASK | VIP_MASK);
1382#endif /* VBOX */
1383}
1384#endif
1385
1386#ifdef TARGET_X86_64
1387#if defined(CONFIG_USER_ONLY)
1388void helper_syscall(int next_eip_addend)
1389{
1390 env->exception_index = EXCP_SYSCALL;
1391 env->exception_next_eip = env->eip + next_eip_addend;
1392 cpu_loop_exit();
1393}
1394#else
1395void helper_syscall(int next_eip_addend)
1396{
1397 int selector;
1398
1399 if (!(env->efer & MSR_EFER_SCE)) {
1400 raise_exception_err(EXCP06_ILLOP, 0);
1401 }
1402 selector = (env->star >> 32) & 0xffff;
1403 if (env->hflags & HF_LMA_MASK) {
1404 int code64;
1405
1406 ECX = env->eip + next_eip_addend;
1407 env->regs[11] = compute_eflags();
1408
1409 code64 = env->hflags & HF_CS64_MASK;
1410
1411 cpu_x86_set_cpl(env, 0);
1412 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1413 0, 0xffffffff,
1414 DESC_G_MASK | DESC_P_MASK |
1415 DESC_S_MASK |
1416 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
1417 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1418 0, 0xffffffff,
1419 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1420 DESC_S_MASK |
1421 DESC_W_MASK | DESC_A_MASK);
1422 env->eflags &= ~env->fmask;
1423 load_eflags(env->eflags, 0);
1424 if (code64)
1425 env->eip = env->lstar;
1426 else
1427 env->eip = env->cstar;
1428 } else {
1429 ECX = (uint32_t)(env->eip + next_eip_addend);
1430
1431 cpu_x86_set_cpl(env, 0);
1432 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1433 0, 0xffffffff,
1434 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1435 DESC_S_MASK |
1436 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1437 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1438 0, 0xffffffff,
1439 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1440 DESC_S_MASK |
1441 DESC_W_MASK | DESC_A_MASK);
1442 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1443 env->eip = (uint32_t)env->star;
1444 }
1445}
1446#endif
1447#endif
1448
1449#ifdef TARGET_X86_64
1450void helper_sysret(int dflag)
1451{
1452 int cpl, selector;
1453
1454 if (!(env->efer & MSR_EFER_SCE)) {
1455 raise_exception_err(EXCP06_ILLOP, 0);
1456 }
1457 cpl = env->hflags & HF_CPL_MASK;
1458 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1459 raise_exception_err(EXCP0D_GPF, 0);
1460 }
1461 selector = (env->star >> 48) & 0xffff;
1462 if (env->hflags & HF_LMA_MASK) {
1463 if (dflag == 2) {
1464 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1465 0, 0xffffffff,
1466 DESC_G_MASK | DESC_P_MASK |
1467 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1468 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1469 DESC_L_MASK);
1470 env->eip = ECX;
1471 } else {
1472 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1473 0, 0xffffffff,
1474 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1475 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1476 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1477 env->eip = (uint32_t)ECX;
1478 }
1479 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1480 0, 0xffffffff,
1481 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1482 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1483 DESC_W_MASK | DESC_A_MASK);
1484 load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1485 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1486 cpu_x86_set_cpl(env, 3);
1487 } else {
1488 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1489 0, 0xffffffff,
1490 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1491 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1492 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1493 env->eip = (uint32_t)ECX;
1494 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1495 0, 0xffffffff,
1496 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1497 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1498 DESC_W_MASK | DESC_A_MASK);
1499 env->eflags |= IF_MASK;
1500 cpu_x86_set_cpl(env, 3);
1501 }
1502}
1503#endif
1504
1505#ifdef VBOX
1506
1507/**
1508 * Checks and processes external VMM events.
1509 * Called by op_check_external_event() when any of the flags is set and can be serviced.
1510 */
1511void helper_external_event(void)
1512{
1513# if defined(RT_OS_DARWIN) && defined(VBOX_STRICT)
1514 uintptr_t uSP;
1515# ifdef RT_ARCH_AMD64
1516 __asm__ __volatile__("movq %%rsp, %0" : "=r" (uSP));
1517# else
1518 __asm__ __volatile__("movl %%esp, %0" : "=r" (uSP));
1519# endif
1520 AssertMsg(!(uSP & 15), ("xSP=%#p\n", uSP));
1521# endif
1522 /* Keep in sync with flags checked by gen_check_external_event() */
1523 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD)
1524 {
1525 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1526 ~CPU_INTERRUPT_EXTERNAL_HARD);
1527 cpu_interrupt(env, CPU_INTERRUPT_HARD);
1528 }
1529 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_EXIT)
1530 {
1531 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1532 ~CPU_INTERRUPT_EXTERNAL_EXIT);
1533 cpu_exit(env);
1534 }
1535 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_DMA)
1536 {
1537 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1538 ~CPU_INTERRUPT_EXTERNAL_DMA);
1539 remR3DmaRun(env);
1540 }
1541 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_TIMER)
1542 {
1543 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1544 ~CPU_INTERRUPT_EXTERNAL_TIMER);
1545 remR3TimersRun(env);
1546 }
1547 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_FLUSH_TLB)
1548 {
1549 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1550 ~CPU_INTERRUPT_EXTERNAL_HARD);
1551 cpu_interrupt(env, CPU_INTERRUPT_HARD);
1552 }
1553}
1554
1555/* helper for recording call instruction addresses for later scanning */
1556void helper_record_call()
1557{
1558 if ( !(env->state & CPU_RAW_RING0)
1559 && (env->cr[0] & CR0_PG_MASK)
1560 && !(env->eflags & X86_EFL_IF))
1561 remR3RecordCall(env);
1562}
1563
1564#endif /* VBOX */
1565
1566/* real mode interrupt */
1567static void do_interrupt_real(int intno, int is_int, int error_code,
1568 unsigned int next_eip)
1569{
1570 SegmentCache *dt;
1571 target_ulong ptr, ssp;
1572 int selector;
1573 uint32_t offset, esp;
1574 uint32_t old_cs, old_eip;
1575
1576 /* real mode (simpler !) */
1577 dt = &env->idt;
1578#ifndef VBOX
1579 if (intno * 4 + 3 > dt->limit)
1580#else
1581 if ((unsigned)intno * 4 + 3 > dt->limit)
1582#endif
1583 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1584 ptr = dt->base + intno * 4;
1585 offset = lduw_kernel(ptr);
1586 selector = lduw_kernel(ptr + 2);
1587 esp = ESP;
1588 ssp = env->segs[R_SS].base;
1589 if (is_int)
1590 old_eip = next_eip;
1591 else
1592 old_eip = env->eip;
1593 old_cs = env->segs[R_CS].selector;
1594 /* XXX: use SS segment size ? */
1595 PUSHW(ssp, esp, 0xffff, compute_eflags());
1596 PUSHW(ssp, esp, 0xffff, old_cs);
1597 PUSHW(ssp, esp, 0xffff, old_eip);
1598
1599 /* update processor state */
1600 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1601 env->eip = offset;
1602 env->segs[R_CS].selector = selector;
1603 env->segs[R_CS].base = (selector << 4);
1604 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1605}
1606
1607/* fake user mode interrupt */
1608void do_interrupt_user(int intno, int is_int, int error_code,
1609 target_ulong next_eip)
1610{
1611 SegmentCache *dt;
1612 target_ulong ptr;
1613 int dpl, cpl, shift;
1614 uint32_t e2;
1615
1616 dt = &env->idt;
1617 if (env->hflags & HF_LMA_MASK) {
1618 shift = 4;
1619 } else {
1620 shift = 3;
1621 }
1622 ptr = dt->base + (intno << shift);
1623 e2 = ldl_kernel(ptr + 4);
1624
1625 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1626 cpl = env->hflags & HF_CPL_MASK;
1627 /* check privilege if software int */
1628 if (is_int && dpl < cpl)
1629 raise_exception_err(EXCP0D_GPF, (intno << shift) + 2);
1630
1631 /* Since we emulate only user space, we cannot do more than
1632 exiting the emulation with the suitable exception and error
1633 code */
1634 if (is_int)
1635 EIP = next_eip;
1636}
1637
1638#if !defined(CONFIG_USER_ONLY)
1639static void handle_even_inj(int intno, int is_int, int error_code,
1640 int is_hw, int rm)
1641{
1642 uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
1643 if (!(event_inj & SVM_EVTINJ_VALID)) {
1644 int type;
1645 if (is_int)
1646 type = SVM_EVTINJ_TYPE_SOFT;
1647 else
1648 type = SVM_EVTINJ_TYPE_EXEPT;
1649 event_inj = intno | type | SVM_EVTINJ_VALID;
1650 if (!rm && exeption_has_error_code(intno)) {
1651 event_inj |= SVM_EVTINJ_VALID_ERR;
1652 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err), error_code);
1653 }
1654 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj);
1655 }
1656}
1657#endif
1658
1659/*
1660 * Begin execution of an interruption. is_int is TRUE if coming from
1661 * the int instruction. next_eip is the EIP value AFTER the interrupt
1662 * instruction. It is only relevant if is_int is TRUE.
1663 */
1664void do_interrupt(int intno, int is_int, int error_code,
1665 target_ulong next_eip, int is_hw)
1666{
1667 if (qemu_loglevel_mask(CPU_LOG_INT)) {
1668 if ((env->cr[0] & CR0_PE_MASK)) {
1669 static int count;
1670 qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1671 count, intno, error_code, is_int,
1672 env->hflags & HF_CPL_MASK,
1673 env->segs[R_CS].selector, EIP,
1674 (int)env->segs[R_CS].base + EIP,
1675 env->segs[R_SS].selector, ESP);
1676 if (intno == 0x0e) {
1677 qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
1678 } else {
1679 qemu_log(" EAX=" TARGET_FMT_lx, EAX);
1680 }
1681 qemu_log("\n");
1682 log_cpu_state(env, X86_DUMP_CCOP);
1683#if 0
1684 {
1685 int i;
1686 uint8_t *ptr;
1687 qemu_log(" code=");
1688 ptr = env->segs[R_CS].base + env->eip;
1689 for(i = 0; i < 16; i++) {
1690 qemu_log(" %02x", ldub(ptr + i));
1691 }
1692 qemu_log("\n");
1693 }
1694#endif
1695 count++;
1696 }
1697 }
1698#ifdef VBOX
1699 if (RT_UNLIKELY(env->state & CPU_EMULATE_SINGLE_STEP)) {
1700 if (is_int) {
1701 RTLogPrintf("do_interrupt: %#04x err=%#x pc=%#RGv%s\n",
1702 intno, error_code, (RTGCPTR)env->eip, is_hw ? " hw" : "");
1703 } else {
1704 RTLogPrintf("do_interrupt: %#04x err=%#x pc=%#RGv next=%#RGv%s\n",
1705 intno, error_code, (RTGCPTR)env->eip, (RTGCPTR)next_eip, is_hw ? " hw" : "");
1706 }
1707 }
1708#endif
1709 if (env->cr[0] & CR0_PE_MASK) {
1710#if !defined(CONFIG_USER_ONLY)
1711 if (env->hflags & HF_SVMI_MASK)
1712 handle_even_inj(intno, is_int, error_code, is_hw, 0);
1713#endif
1714#ifdef TARGET_X86_64
1715 if (env->hflags & HF_LMA_MASK) {
1716 do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1717 } else
1718#endif
1719 {
1720#ifdef VBOX
1721 /* int xx *, v86 code and VME enabled? */
1722 if ( (env->eflags & VM_MASK)
1723 && (env->cr[4] & CR4_VME_MASK)
1724 && is_int
1725 && !is_hw
1726 && env->eip + 1 != next_eip /* single byte int 3 goes straight to the protected mode handler */
1727 )
1728 do_soft_interrupt_vme(intno, error_code, next_eip);
1729 else
1730#endif /* VBOX */
1731 do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1732 }
1733 } else {
1734#if !defined(CONFIG_USER_ONLY)
1735 if (env->hflags & HF_SVMI_MASK)
1736 handle_even_inj(intno, is_int, error_code, is_hw, 1);
1737#endif
1738 do_interrupt_real(intno, is_int, error_code, next_eip);
1739 }
1740
1741#if !defined(CONFIG_USER_ONLY)
1742 if (env->hflags & HF_SVMI_MASK) {
1743 uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
1744 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
1745 }
1746#endif
1747}
1748
1749/* This should come from sysemu.h - if we could include it here... */
1750void qemu_system_reset_request(void);
1751
1752/*
1753 * Check nested exceptions and change to double or triple fault if
1754 * needed. It should only be called, if this is not an interrupt.
1755 * Returns the new exception number.
1756 */
1757static int check_exception(int intno, int *error_code)
1758{
1759 int first_contributory = env->old_exception == 0 ||
1760 (env->old_exception >= 10 &&
1761 env->old_exception <= 13);
1762 int second_contributory = intno == 0 ||
1763 (intno >= 10 && intno <= 13);
1764
1765 qemu_log_mask(CPU_LOG_INT, "check_exception old: 0x%x new 0x%x\n",
1766 env->old_exception, intno);
1767
1768#if !defined(CONFIG_USER_ONLY)
1769 if (env->old_exception == EXCP08_DBLE) {
1770 if (env->hflags & HF_SVMI_MASK)
1771 helper_vmexit(SVM_EXIT_SHUTDOWN, 0); /* does not return */
1772
1773 qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
1774
1775# ifndef VBOX
1776 qemu_system_reset_request();
1777 return EXCP_HLT;
1778# else
1779 remR3RaiseRC(env->pVM, VINF_EM_TRIPLE_FAULT);
1780 return EXCP_RC;
1781# endif
1782 }
1783#endif
1784
1785 if ((first_contributory && second_contributory)
1786 || (env->old_exception == EXCP0E_PAGE &&
1787 (second_contributory || (intno == EXCP0E_PAGE)))) {
1788 intno = EXCP08_DBLE;
1789 *error_code = 0;
1790 }
1791
1792 if (second_contributory || (intno == EXCP0E_PAGE) ||
1793 (intno == EXCP08_DBLE))
1794 env->old_exception = intno;
1795
1796 return intno;
1797}
1798
1799/*
1800 * Signal an interruption. It is executed in the main CPU loop.
1801 * is_int is TRUE if coming from the int instruction. next_eip is the
1802 * EIP value AFTER the interrupt instruction. It is only relevant if
1803 * is_int is TRUE.
1804 */
1805static void QEMU_NORETURN raise_interrupt(int intno, int is_int, int error_code,
1806 int next_eip_addend)
1807{
1808#if defined(VBOX) && defined(DEBUG)
1809 Log2(("raise_interrupt: %x %x %x %RGv\n", intno, is_int, error_code, (RTGCPTR)env->eip + next_eip_addend));
1810#endif
1811 if (!is_int) {
1812 helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
1813 intno = check_exception(intno, &error_code);
1814 } else {
1815 helper_svm_check_intercept_param(SVM_EXIT_SWINT, 0);
1816 }
1817
1818 env->exception_index = intno;
1819 env->error_code = error_code;
1820 env->exception_is_int = is_int;
1821 env->exception_next_eip = env->eip + next_eip_addend;
1822 cpu_loop_exit();
1823}
1824
1825/* shortcuts to generate exceptions */
1826
1827void raise_exception_err(int exception_index, int error_code)
1828{
1829 raise_interrupt(exception_index, 0, error_code, 0);
1830}
1831
1832void raise_exception(int exception_index)
1833{
1834 raise_interrupt(exception_index, 0, 0, 0);
1835}
1836
1837void raise_exception_env(int exception_index, CPUState *nenv)
1838{
1839 env = nenv;
1840 raise_exception(exception_index);
1841}
1842/* SMM support */
1843
1844#if defined(CONFIG_USER_ONLY)
1845
1846void do_smm_enter(void)
1847{
1848}
1849
1850void helper_rsm(void)
1851{
1852}
1853
1854#else
1855
1856#ifdef TARGET_X86_64
1857#define SMM_REVISION_ID 0x00020064
1858#else
1859#define SMM_REVISION_ID 0x00020000
1860#endif
1861
1862void do_smm_enter(void)
1863{
1864 target_ulong sm_state;
1865 SegmentCache *dt;
1866 int i, offset;
1867
1868 qemu_log_mask(CPU_LOG_INT, "SMM: enter\n");
1869 log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
1870
1871 env->hflags |= HF_SMM_MASK;
1872 cpu_smm_update(env);
1873
1874 sm_state = env->smbase + 0x8000;
1875
1876#ifdef TARGET_X86_64
1877 for(i = 0; i < 6; i++) {
1878 dt = &env->segs[i];
1879 offset = 0x7e00 + i * 16;
1880 stw_phys(sm_state + offset, dt->selector);
1881 stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1882 stl_phys(sm_state + offset + 4, dt->limit);
1883 stq_phys(sm_state + offset + 8, dt->base);
1884 }
1885
1886 stq_phys(sm_state + 0x7e68, env->gdt.base);
1887 stl_phys(sm_state + 0x7e64, env->gdt.limit);
1888
1889 stw_phys(sm_state + 0x7e70, env->ldt.selector);
1890 stq_phys(sm_state + 0x7e78, env->ldt.base);
1891 stl_phys(sm_state + 0x7e74, env->ldt.limit);
1892 stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1893
1894 stq_phys(sm_state + 0x7e88, env->idt.base);
1895 stl_phys(sm_state + 0x7e84, env->idt.limit);
1896
1897 stw_phys(sm_state + 0x7e90, env->tr.selector);
1898 stq_phys(sm_state + 0x7e98, env->tr.base);
1899 stl_phys(sm_state + 0x7e94, env->tr.limit);
1900 stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1901
1902 stq_phys(sm_state + 0x7ed0, env->efer);
1903
1904 stq_phys(sm_state + 0x7ff8, EAX);
1905 stq_phys(sm_state + 0x7ff0, ECX);
1906 stq_phys(sm_state + 0x7fe8, EDX);
1907 stq_phys(sm_state + 0x7fe0, EBX);
1908 stq_phys(sm_state + 0x7fd8, ESP);
1909 stq_phys(sm_state + 0x7fd0, EBP);
1910 stq_phys(sm_state + 0x7fc8, ESI);
1911 stq_phys(sm_state + 0x7fc0, EDI);
1912 for(i = 8; i < 16; i++)
1913 stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1914 stq_phys(sm_state + 0x7f78, env->eip);
1915 stl_phys(sm_state + 0x7f70, compute_eflags());
1916 stl_phys(sm_state + 0x7f68, env->dr[6]);
1917 stl_phys(sm_state + 0x7f60, env->dr[7]);
1918
1919 stl_phys(sm_state + 0x7f48, env->cr[4]);
1920 stl_phys(sm_state + 0x7f50, env->cr[3]);
1921 stl_phys(sm_state + 0x7f58, env->cr[0]);
1922
1923 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1924 stl_phys(sm_state + 0x7f00, env->smbase);
1925#else
1926 stl_phys(sm_state + 0x7ffc, env->cr[0]);
1927 stl_phys(sm_state + 0x7ff8, env->cr[3]);
1928 stl_phys(sm_state + 0x7ff4, compute_eflags());
1929 stl_phys(sm_state + 0x7ff0, env->eip);
1930 stl_phys(sm_state + 0x7fec, EDI);
1931 stl_phys(sm_state + 0x7fe8, ESI);
1932 stl_phys(sm_state + 0x7fe4, EBP);
1933 stl_phys(sm_state + 0x7fe0, ESP);
1934 stl_phys(sm_state + 0x7fdc, EBX);
1935 stl_phys(sm_state + 0x7fd8, EDX);
1936 stl_phys(sm_state + 0x7fd4, ECX);
1937 stl_phys(sm_state + 0x7fd0, EAX);
1938 stl_phys(sm_state + 0x7fcc, env->dr[6]);
1939 stl_phys(sm_state + 0x7fc8, env->dr[7]);
1940
1941 stl_phys(sm_state + 0x7fc4, env->tr.selector);
1942 stl_phys(sm_state + 0x7f64, env->tr.base);
1943 stl_phys(sm_state + 0x7f60, env->tr.limit);
1944 stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1945
1946 stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1947 stl_phys(sm_state + 0x7f80, env->ldt.base);
1948 stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1949 stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1950
1951 stl_phys(sm_state + 0x7f74, env->gdt.base);
1952 stl_phys(sm_state + 0x7f70, env->gdt.limit);
1953
1954 stl_phys(sm_state + 0x7f58, env->idt.base);
1955 stl_phys(sm_state + 0x7f54, env->idt.limit);
1956
1957 for(i = 0; i < 6; i++) {
1958 dt = &env->segs[i];
1959 if (i < 3)
1960 offset = 0x7f84 + i * 12;
1961 else
1962 offset = 0x7f2c + (i - 3) * 12;
1963 stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1964 stl_phys(sm_state + offset + 8, dt->base);
1965 stl_phys(sm_state + offset + 4, dt->limit);
1966 stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1967 }
1968 stl_phys(sm_state + 0x7f14, env->cr[4]);
1969
1970 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1971 stl_phys(sm_state + 0x7ef8, env->smbase);
1972#endif
1973 /* init SMM cpu state */
1974
1975#ifdef TARGET_X86_64
1976 cpu_load_efer(env, 0);
1977#endif
1978 load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1979 env->eip = 0x00008000;
1980 cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1981 0xffffffff, 0);
1982 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1983 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1984 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1985 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1986 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1987
1988 cpu_x86_update_cr0(env,
1989 env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1990 cpu_x86_update_cr4(env, 0);
1991 env->dr[7] = 0x00000400;
1992 CC_OP = CC_OP_EFLAGS;
1993}
1994
1995void helper_rsm(void)
1996{
1997#ifdef VBOX
1998 cpu_abort(env, "helper_rsm");
1999#else /* !VBOX */
2000 target_ulong sm_state;
2001 int i, offset;
2002 uint32_t val;
2003
2004 sm_state = env->smbase + 0x8000;
2005#ifdef TARGET_X86_64
2006 cpu_load_efer(env, ldq_phys(sm_state + 0x7ed0));
2007
2008 for(i = 0; i < 6; i++) {
2009 offset = 0x7e00 + i * 16;
2010 cpu_x86_load_seg_cache(env, i,
2011 lduw_phys(sm_state + offset),
2012 ldq_phys(sm_state + offset + 8),
2013 ldl_phys(sm_state + offset + 4),
2014 (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
2015 }
2016
2017 env->gdt.base = ldq_phys(sm_state + 0x7e68);
2018 env->gdt.limit = ldl_phys(sm_state + 0x7e64);
2019
2020 env->ldt.selector = lduw_phys(sm_state + 0x7e70);
2021 env->ldt.base = ldq_phys(sm_state + 0x7e78);
2022 env->ldt.limit = ldl_phys(sm_state + 0x7e74);
2023 env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
2024#ifdef VBOX
2025 env->ldt.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
2026 env->ldt.newselector = 0;
2027#endif
2028
2029 env->idt.base = ldq_phys(sm_state + 0x7e88);
2030 env->idt.limit = ldl_phys(sm_state + 0x7e84);
2031
2032 env->tr.selector = lduw_phys(sm_state + 0x7e90);
2033 env->tr.base = ldq_phys(sm_state + 0x7e98);
2034 env->tr.limit = ldl_phys(sm_state + 0x7e94);
2035 env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
2036#ifdef VBOX
2037 env->tr.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
2038 env->tr.newselector = 0;
2039#endif
2040
2041 EAX = ldq_phys(sm_state + 0x7ff8);
2042 ECX = ldq_phys(sm_state + 0x7ff0);
2043 EDX = ldq_phys(sm_state + 0x7fe8);
2044 EBX = ldq_phys(sm_state + 0x7fe0);
2045 ESP = ldq_phys(sm_state + 0x7fd8);
2046 EBP = ldq_phys(sm_state + 0x7fd0);
2047 ESI = ldq_phys(sm_state + 0x7fc8);
2048 EDI = ldq_phys(sm_state + 0x7fc0);
2049 for(i = 8; i < 16; i++)
2050 env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
2051 env->eip = ldq_phys(sm_state + 0x7f78);
2052 load_eflags(ldl_phys(sm_state + 0x7f70),
2053 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
2054 env->dr[6] = ldl_phys(sm_state + 0x7f68);
2055 env->dr[7] = ldl_phys(sm_state + 0x7f60);
2056
2057 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
2058 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
2059 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
2060
2061 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
2062 if (val & 0x20000) {
2063 env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
2064 }
2065#else
2066 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
2067 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
2068 load_eflags(ldl_phys(sm_state + 0x7ff4),
2069 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
2070 env->eip = ldl_phys(sm_state + 0x7ff0);
2071 EDI = ldl_phys(sm_state + 0x7fec);
2072 ESI = ldl_phys(sm_state + 0x7fe8);
2073 EBP = ldl_phys(sm_state + 0x7fe4);
2074 ESP = ldl_phys(sm_state + 0x7fe0);
2075 EBX = ldl_phys(sm_state + 0x7fdc);
2076 EDX = ldl_phys(sm_state + 0x7fd8);
2077 ECX = ldl_phys(sm_state + 0x7fd4);
2078 EAX = ldl_phys(sm_state + 0x7fd0);
2079 env->dr[6] = ldl_phys(sm_state + 0x7fcc);
2080 env->dr[7] = ldl_phys(sm_state + 0x7fc8);
2081
2082 env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
2083 env->tr.base = ldl_phys(sm_state + 0x7f64);
2084 env->tr.limit = ldl_phys(sm_state + 0x7f60);
2085 env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
2086#ifdef VBOX
2087 env->tr.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
2088 env->tr.newselector = 0;
2089#endif
2090
2091 env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
2092 env->ldt.base = ldl_phys(sm_state + 0x7f80);
2093 env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
2094 env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
2095#ifdef VBOX
2096 env->ldt.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
2097 env->ldt.newselector = 0;
2098#endif
2099
2100 env->gdt.base = ldl_phys(sm_state + 0x7f74);
2101 env->gdt.limit = ldl_phys(sm_state + 0x7f70);
2102
2103 env->idt.base = ldl_phys(sm_state + 0x7f58);
2104 env->idt.limit = ldl_phys(sm_state + 0x7f54);
2105
2106 for(i = 0; i < 6; i++) {
2107 if (i < 3)
2108 offset = 0x7f84 + i * 12;
2109 else
2110 offset = 0x7f2c + (i - 3) * 12;
2111 cpu_x86_load_seg_cache(env, i,
2112 ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
2113 ldl_phys(sm_state + offset + 8),
2114 ldl_phys(sm_state + offset + 4),
2115 (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
2116 }
2117 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
2118
2119 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
2120 if (val & 0x20000) {
2121 env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
2122 }
2123#endif
2124 CC_OP = CC_OP_EFLAGS;
2125 env->hflags &= ~HF_SMM_MASK;
2126 cpu_smm_update(env);
2127
2128 qemu_log_mask(CPU_LOG_INT, "SMM: after RSM\n");
2129 log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
2130#endif /* !VBOX */
2131}
2132
2133#endif /* !CONFIG_USER_ONLY */
2134
2135
2136/* division, flags are undefined */
2137
2138void helper_divb_AL(target_ulong t0)
2139{
2140 unsigned int num, den, q, r;
2141
2142 num = (EAX & 0xffff);
2143 den = (t0 & 0xff);
2144 if (den == 0) {
2145 raise_exception(EXCP00_DIVZ);
2146 }
2147 q = (num / den);
2148 if (q > 0xff)
2149 raise_exception(EXCP00_DIVZ);
2150 q &= 0xff;
2151 r = (num % den) & 0xff;
2152 EAX = (EAX & ~0xffff) | (r << 8) | q;
2153}
2154
2155void helper_idivb_AL(target_ulong t0)
2156{
2157 int num, den, q, r;
2158
2159 num = (int16_t)EAX;
2160 den = (int8_t)t0;
2161 if (den == 0) {
2162 raise_exception(EXCP00_DIVZ);
2163 }
2164 q = (num / den);
2165 if (q != (int8_t)q)
2166 raise_exception(EXCP00_DIVZ);
2167 q &= 0xff;
2168 r = (num % den) & 0xff;
2169 EAX = (EAX & ~0xffff) | (r << 8) | q;
2170}
2171
2172void helper_divw_AX(target_ulong t0)
2173{
2174 unsigned int num, den, q, r;
2175
2176 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
2177 den = (t0 & 0xffff);
2178 if (den == 0) {
2179 raise_exception(EXCP00_DIVZ);
2180 }
2181 q = (num / den);
2182 if (q > 0xffff)
2183 raise_exception(EXCP00_DIVZ);
2184 q &= 0xffff;
2185 r = (num % den) & 0xffff;
2186 EAX = (EAX & ~0xffff) | q;
2187 EDX = (EDX & ~0xffff) | r;
2188}
2189
2190void helper_idivw_AX(target_ulong t0)
2191{
2192 int num, den, q, r;
2193
2194 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
2195 den = (int16_t)t0;
2196 if (den == 0) {
2197 raise_exception(EXCP00_DIVZ);
2198 }
2199 q = (num / den);
2200 if (q != (int16_t)q)
2201 raise_exception(EXCP00_DIVZ);
2202 q &= 0xffff;
2203 r = (num % den) & 0xffff;
2204 EAX = (EAX & ~0xffff) | q;
2205 EDX = (EDX & ~0xffff) | r;
2206}
2207
2208void helper_divl_EAX(target_ulong t0)
2209{
2210 unsigned int den, r;
2211 uint64_t num, q;
2212
2213 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
2214 den = t0;
2215 if (den == 0) {
2216 raise_exception(EXCP00_DIVZ);
2217 }
2218 q = (num / den);
2219 r = (num % den);
2220 if (q > 0xffffffff)
2221 raise_exception(EXCP00_DIVZ);
2222 EAX = (uint32_t)q;
2223 EDX = (uint32_t)r;
2224}
2225
2226void helper_idivl_EAX(target_ulong t0)
2227{
2228 int den, r;
2229 int64_t num, q;
2230
2231 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
2232 den = t0;
2233 if (den == 0) {
2234 raise_exception(EXCP00_DIVZ);
2235 }
2236 q = (num / den);
2237 r = (num % den);
2238 if (q != (int32_t)q)
2239 raise_exception(EXCP00_DIVZ);
2240 EAX = (uint32_t)q;
2241 EDX = (uint32_t)r;
2242}
2243
2244/* bcd */
2245
2246/* XXX: exception */
2247void helper_aam(int base)
2248{
2249 int al, ah;
2250 al = EAX & 0xff;
2251 ah = al / base;
2252 al = al % base;
2253 EAX = (EAX & ~0xffff) | al | (ah << 8);
2254 CC_DST = al;
2255}
2256
2257void helper_aad(int base)
2258{
2259 int al, ah;
2260 al = EAX & 0xff;
2261 ah = (EAX >> 8) & 0xff;
2262 al = ((ah * base) + al) & 0xff;
2263 EAX = (EAX & ~0xffff) | al;
2264 CC_DST = al;
2265}
2266
2267void helper_aaa(void)
2268{
2269 int icarry;
2270 int al, ah, af;
2271 int eflags;
2272
2273 eflags = helper_cc_compute_all(CC_OP);
2274 af = eflags & CC_A;
2275 al = EAX & 0xff;
2276 ah = (EAX >> 8) & 0xff;
2277
2278 icarry = (al > 0xf9);
2279 if (((al & 0x0f) > 9 ) || af) {
2280 al = (al + 6) & 0x0f;
2281 ah = (ah + 1 + icarry) & 0xff;
2282 eflags |= CC_C | CC_A;
2283 } else {
2284 eflags &= ~(CC_C | CC_A);
2285 al &= 0x0f;
2286 }
2287 EAX = (EAX & ~0xffff) | al | (ah << 8);
2288 CC_SRC = eflags;
2289}
2290
2291void helper_aas(void)
2292{
2293 int icarry;
2294 int al, ah, af;
2295 int eflags;
2296
2297 eflags = helper_cc_compute_all(CC_OP);
2298 af = eflags & CC_A;
2299 al = EAX & 0xff;
2300 ah = (EAX >> 8) & 0xff;
2301
2302 icarry = (al < 6);
2303 if (((al & 0x0f) > 9 ) || af) {
2304 al = (al - 6) & 0x0f;
2305 ah = (ah - 1 - icarry) & 0xff;
2306 eflags |= CC_C | CC_A;
2307 } else {
2308 eflags &= ~(CC_C | CC_A);
2309 al &= 0x0f;
2310 }
2311 EAX = (EAX & ~0xffff) | al | (ah << 8);
2312 CC_SRC = eflags;
2313}
2314
2315void helper_daa(void)
2316{
2317 int al, af, cf;
2318 int eflags;
2319
2320 eflags = helper_cc_compute_all(CC_OP);
2321 cf = eflags & CC_C;
2322 af = eflags & CC_A;
2323 al = EAX & 0xff;
2324
2325 eflags = 0;
2326 if (((al & 0x0f) > 9 ) || af) {
2327 al = (al + 6) & 0xff;
2328 eflags |= CC_A;
2329 }
2330 if ((al > 0x9f) || cf) {
2331 al = (al + 0x60) & 0xff;
2332 eflags |= CC_C;
2333 }
2334 EAX = (EAX & ~0xff) | al;
2335 /* well, speed is not an issue here, so we compute the flags by hand */
2336 eflags |= (al == 0) << 6; /* zf */
2337 eflags |= parity_table[al]; /* pf */
2338 eflags |= (al & 0x80); /* sf */
2339 CC_SRC = eflags;
2340}
2341
2342void helper_das(void)
2343{
2344 int al, al1, af, cf;
2345 int eflags;
2346
2347 eflags = helper_cc_compute_all(CC_OP);
2348 cf = eflags & CC_C;
2349 af = eflags & CC_A;
2350 al = EAX & 0xff;
2351
2352 eflags = 0;
2353 al1 = al;
2354 if (((al & 0x0f) > 9 ) || af) {
2355 eflags |= CC_A;
2356 if (al < 6 || cf)
2357 eflags |= CC_C;
2358 al = (al - 6) & 0xff;
2359 }
2360 if ((al1 > 0x99) || cf) {
2361 al = (al - 0x60) & 0xff;
2362 eflags |= CC_C;
2363 }
2364 EAX = (EAX & ~0xff) | al;
2365 /* well, speed is not an issue here, so we compute the flags by hand */
2366 eflags |= (al == 0) << 6; /* zf */
2367 eflags |= parity_table[al]; /* pf */
2368 eflags |= (al & 0x80); /* sf */
2369 CC_SRC = eflags;
2370}
2371
2372void helper_into(int next_eip_addend)
2373{
2374 int eflags;
2375 eflags = helper_cc_compute_all(CC_OP);
2376 if (eflags & CC_O) {
2377 raise_interrupt(EXCP04_INTO, 1, 0, next_eip_addend);
2378 }
2379}
2380
2381void helper_cmpxchg8b(target_ulong a0)
2382{
2383 uint64_t d;
2384 int eflags;
2385
2386 eflags = helper_cc_compute_all(CC_OP);
2387 d = ldq(a0);
2388 if (d == (((uint64_t)EDX << 32) | (uint32_t)EAX)) {
2389 stq(a0, ((uint64_t)ECX << 32) | (uint32_t)EBX);
2390 eflags |= CC_Z;
2391 } else {
2392 /* always do the store */
2393 stq(a0, d);
2394 EDX = (uint32_t)(d >> 32);
2395 EAX = (uint32_t)d;
2396 eflags &= ~CC_Z;
2397 }
2398 CC_SRC = eflags;
2399}
2400
2401#ifdef TARGET_X86_64
2402void helper_cmpxchg16b(target_ulong a0)
2403{
2404 uint64_t d0, d1;
2405 int eflags;
2406
2407 if ((a0 & 0xf) != 0)
2408 raise_exception(EXCP0D_GPF);
2409 eflags = helper_cc_compute_all(CC_OP);
2410 d0 = ldq(a0);
2411 d1 = ldq(a0 + 8);
2412 if (d0 == EAX && d1 == EDX) {
2413 stq(a0, EBX);
2414 stq(a0 + 8, ECX);
2415 eflags |= CC_Z;
2416 } else {
2417 /* always do the store */
2418 stq(a0, d0);
2419 stq(a0 + 8, d1);
2420 EDX = d1;
2421 EAX = d0;
2422 eflags &= ~CC_Z;
2423 }
2424 CC_SRC = eflags;
2425}
2426#endif
2427
2428void helper_single_step(void)
2429{
2430#ifndef CONFIG_USER_ONLY
2431 check_hw_breakpoints(env, 1);
2432 env->dr[6] |= DR6_BS;
2433#endif
2434 raise_exception(EXCP01_DB);
2435}
2436
2437void helper_cpuid(void)
2438{
2439 uint32_t eax, ebx, ecx, edx;
2440
2441 helper_svm_check_intercept_param(SVM_EXIT_CPUID, 0);
2442
2443 cpu_x86_cpuid(env, (uint32_t)EAX, (uint32_t)ECX, &eax, &ebx, &ecx, &edx);
2444 EAX = eax;
2445 EBX = ebx;
2446 ECX = ecx;
2447 EDX = edx;
2448}
2449
2450void helper_enter_level(int level, int data32, target_ulong t1)
2451{
2452 target_ulong ssp;
2453 uint32_t esp_mask, esp, ebp;
2454
2455 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2456 ssp = env->segs[R_SS].base;
2457 ebp = EBP;
2458 esp = ESP;
2459 if (data32) {
2460 /* 32 bit */
2461 esp -= 4;
2462 while (--level) {
2463 esp -= 4;
2464 ebp -= 4;
2465 stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
2466 }
2467 esp -= 4;
2468 stl(ssp + (esp & esp_mask), t1);
2469 } else {
2470 /* 16 bit */
2471 esp -= 2;
2472 while (--level) {
2473 esp -= 2;
2474 ebp -= 2;
2475 stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
2476 }
2477 esp -= 2;
2478 stw(ssp + (esp & esp_mask), t1);
2479 }
2480}
2481
2482#ifdef TARGET_X86_64
2483void helper_enter64_level(int level, int data64, target_ulong t1)
2484{
2485 target_ulong esp, ebp;
2486 ebp = EBP;
2487 esp = ESP;
2488
2489 if (data64) {
2490 /* 64 bit */
2491 esp -= 8;
2492 while (--level) {
2493 esp -= 8;
2494 ebp -= 8;
2495 stq(esp, ldq(ebp));
2496 }
2497 esp -= 8;
2498 stq(esp, t1);
2499 } else {
2500 /* 16 bit */
2501 esp -= 2;
2502 while (--level) {
2503 esp -= 2;
2504 ebp -= 2;
2505 stw(esp, lduw(ebp));
2506 }
2507 esp -= 2;
2508 stw(esp, t1);
2509 }
2510}
2511#endif
2512
2513void helper_lldt(int selector)
2514{
2515 SegmentCache *dt;
2516 uint32_t e1, e2;
2517#ifndef VBOX
2518 int index, entry_limit;
2519#else
2520 unsigned int index, entry_limit;
2521#endif
2522 target_ulong ptr;
2523
2524#ifdef VBOX
2525 Log(("helper_lldt_T0: old ldtr=%RTsel {.base=%RGv, .limit=%RGv} new=%RTsel\n",
2526 (RTSEL)env->ldt.selector, (RTGCPTR)env->ldt.base, (RTGCPTR)env->ldt.limit, (RTSEL)(selector & 0xffff)));
2527#endif
2528
2529 selector &= 0xffff;
2530 if ((selector & 0xfffc) == 0) {
2531 /* XXX: NULL selector case: invalid LDT */
2532 env->ldt.base = 0;
2533 env->ldt.limit = 0;
2534#ifdef VBOX
2535 env->ldt.flags = DESC_INTEL_UNUSABLE;
2536 env->ldt.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
2537 env->ldt.newselector = 0;
2538#endif
2539 } else {
2540 if (selector & 0x4)
2541 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2542 dt = &env->gdt;
2543 index = selector & ~7;
2544#ifdef TARGET_X86_64
2545 if (env->hflags & HF_LMA_MASK)
2546 entry_limit = 15;
2547 else
2548#endif
2549 entry_limit = 7;
2550 if ((index + entry_limit) > dt->limit)
2551 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2552 ptr = dt->base + index;
2553 e1 = ldl_kernel(ptr);
2554 e2 = ldl_kernel(ptr + 4);
2555 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
2556 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2557 if (!(e2 & DESC_P_MASK))
2558 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2559#ifdef TARGET_X86_64
2560 if (env->hflags & HF_LMA_MASK) {
2561 uint32_t e3;
2562 e3 = ldl_kernel(ptr + 8);
2563 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2564 env->ldt.base |= (target_ulong)e3 << 32;
2565 } else
2566#endif
2567 {
2568 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2569 }
2570 }
2571 env->ldt.selector = selector;
2572#ifdef VBOX
2573 Log(("helper_lldt_T0: new ldtr=%RTsel {.base=%RGv, .limit=%RGv}\n",
2574 (RTSEL)env->ldt.selector, (RTGCPTR)env->ldt.base, (RTGCPTR)env->ldt.limit));
2575#endif
2576}
2577
2578void helper_ltr(int selector)
2579{
2580 SegmentCache *dt;
2581 uint32_t e1, e2;
2582#ifndef VBOX
2583 int index, type, entry_limit;
2584#else
2585 unsigned int index;
2586 int type, entry_limit;
2587#endif
2588 target_ulong ptr;
2589
2590#ifdef VBOX
2591 Log(("helper_ltr: pc=%RGv old tr=%RTsel {.base=%RGv, .limit=%RGv, .flags=%RX32} new=%RTsel\n",
2592 (RTGCPTR)env->eip, (RTSEL)env->tr.selector, (RTGCPTR)env->tr.base, (RTGCPTR)env->tr.limit,
2593 env->tr.flags, (RTSEL)(selector & 0xffff)));
2594#endif
2595 selector &= 0xffff;
2596 if ((selector & 0xfffc) == 0) {
2597 /* NULL selector case: invalid TR */
2598#ifdef VBOX
2599 raise_exception_err(EXCP0A_TSS, 0);
2600#else
2601 env->tr.base = 0;
2602 env->tr.limit = 0;
2603 env->tr.flags = 0;
2604#endif
2605 } else {
2606 if (selector & 0x4)
2607 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2608 dt = &env->gdt;
2609 index = selector & ~7;
2610#ifdef TARGET_X86_64
2611 if (env->hflags & HF_LMA_MASK)
2612 entry_limit = 15;
2613 else
2614#endif
2615 entry_limit = 7;
2616 if ((index + entry_limit) > dt->limit)
2617 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2618 ptr = dt->base + index;
2619 e1 = ldl_kernel(ptr);
2620 e2 = ldl_kernel(ptr + 4);
2621 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2622 if ((e2 & DESC_S_MASK) ||
2623 (type != 1 && type != 9))
2624 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2625 if (!(e2 & DESC_P_MASK))
2626 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2627#ifdef TARGET_X86_64
2628 if (env->hflags & HF_LMA_MASK) {
2629 uint32_t e3, e4;
2630 e3 = ldl_kernel(ptr + 8);
2631 e4 = ldl_kernel(ptr + 12);
2632 if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
2633 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2634 load_seg_cache_raw_dt(&env->tr, e1, e2);
2635 env->tr.base |= (target_ulong)e3 << 32;
2636 } else
2637#endif
2638 {
2639 load_seg_cache_raw_dt(&env->tr, e1, e2);
2640 }
2641 env->tr.flags |= DESC_TSS_BUSY_MASK;
2642 e2 |= DESC_TSS_BUSY_MASK;
2643 stl_kernel(ptr + 4, e2);
2644 }
2645 env->tr.selector = selector;
2646#ifdef VBOX
2647 Log(("helper_ltr: new tr=%RTsel {.base=%RGv, .limit=%RGv, .flags=%RX32} new=%RTsel\n",
2648 (RTSEL)env->tr.selector, (RTGCPTR)env->tr.base, (RTGCPTR)env->tr.limit,
2649 env->tr.flags, (RTSEL)(selector & 0xffff)));
2650#endif
2651}
2652
2653/* only works if protected mode and not VM86. seg_reg must be != R_CS */
2654void helper_load_seg(int seg_reg, int selector)
2655{
2656 uint32_t e1, e2;
2657 int cpl, dpl, rpl;
2658 SegmentCache *dt;
2659#ifndef VBOX
2660 int index;
2661#else
2662 unsigned int index;
2663#endif
2664 target_ulong ptr;
2665
2666 selector &= 0xffff;
2667 cpl = env->hflags & HF_CPL_MASK;
2668#ifdef VBOX
2669
2670 /* Trying to load a selector with CPL=1? */
2671 if (cpl == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
2672 {
2673 Log(("RPL 1 -> sel %04X -> %04X (helper_load_seg)\n", selector, selector & 0xfffc));
2674 selector = selector & 0xfffc;
2675 }
2676#endif /* VBOX */
2677 if ((selector & 0xfffc) == 0) {
2678 /* null selector case */
2679#ifndef VBOX
2680 if (seg_reg == R_SS
2681#ifdef TARGET_X86_64
2682 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2683#endif
2684 )
2685 raise_exception_err(EXCP0D_GPF, 0);
2686 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
2687#else
2688 if (seg_reg == R_SS) {
2689 if (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2690 raise_exception_err(EXCP0D_GPF, 0);
2691 e2 = (cpl << DESC_DPL_SHIFT) | DESC_INTEL_UNUSABLE;
2692 } else {
2693 e2 = DESC_INTEL_UNUSABLE;
2694 }
2695 cpu_x86_load_seg_cache_with_clean_flags(env, seg_reg, selector, 0, 0, e2);
2696#endif
2697 } else {
2698
2699 if (selector & 0x4)
2700 dt = &env->ldt;
2701 else
2702 dt = &env->gdt;
2703 index = selector & ~7;
2704 if ((index + 7) > dt->limit)
2705 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2706 ptr = dt->base + index;
2707 e1 = ldl_kernel(ptr);
2708 e2 = ldl_kernel(ptr + 4);
2709
2710 if (!(e2 & DESC_S_MASK))
2711 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2712 rpl = selector & 3;
2713 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2714 if (seg_reg == R_SS) {
2715 /* must be writable segment */
2716 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
2717 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2718 if (rpl != cpl || dpl != cpl)
2719 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2720 } else {
2721 /* must be readable segment */
2722 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
2723 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2724
2725 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2726 /* if not conforming code, test rights */
2727 if (dpl < cpl || dpl < rpl)
2728 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2729 }
2730 }
2731
2732 if (!(e2 & DESC_P_MASK)) {
2733 if (seg_reg == R_SS)
2734 raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
2735 else
2736 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2737 }
2738
2739 /* set the access bit if not already set */
2740 if (!(e2 & DESC_A_MASK)) {
2741 e2 |= DESC_A_MASK;
2742 stl_kernel(ptr + 4, e2);
2743 }
2744
2745 cpu_x86_load_seg_cache(env, seg_reg, selector,
2746 get_seg_base(e1, e2),
2747 get_seg_limit(e1, e2),
2748 e2);
2749#if 0
2750 qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2751 selector, (unsigned long)sc->base, sc->limit, sc->flags);
2752#endif
2753 }
2754}
2755
2756/* protected mode jump */
2757void helper_ljmp_protected(int new_cs, target_ulong new_eip,
2758 int next_eip_addend)
2759{
2760 int gate_cs, type;
2761 uint32_t e1, e2, cpl, dpl, rpl, limit;
2762 target_ulong next_eip;
2763
2764#ifdef VBOX /** @todo Why do we do this? */
2765 e1 = e2 = 0;
2766#endif
2767 if ((new_cs & 0xfffc) == 0)
2768 raise_exception_err(EXCP0D_GPF, 0);
2769 if (load_segment(&e1, &e2, new_cs) != 0)
2770 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2771 cpl = env->hflags & HF_CPL_MASK;
2772 if (e2 & DESC_S_MASK) {
2773 if (!(e2 & DESC_CS_MASK))
2774 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2775 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2776 if (e2 & DESC_C_MASK) {
2777 /* conforming code segment */
2778 if (dpl > cpl)
2779 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2780 } else {
2781 /* non conforming code segment */
2782 rpl = new_cs & 3;
2783 if (rpl > cpl)
2784 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2785 if (dpl != cpl)
2786 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2787 }
2788 if (!(e2 & DESC_P_MASK))
2789 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2790 limit = get_seg_limit(e1, e2);
2791 if (new_eip > limit &&
2792 !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2793 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2794#ifdef VBOX
2795 if (!(e2 & DESC_A_MASK))
2796 e2 = set_segment_accessed(new_cs, e2);
2797#endif
2798 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2799 get_seg_base(e1, e2), limit, e2);
2800 EIP = new_eip;
2801 } else {
2802 /* jump to call or task gate */
2803 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2804 rpl = new_cs & 3;
2805 cpl = env->hflags & HF_CPL_MASK;
2806 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2807 switch(type) {
2808 case 1: /* 286 TSS */
2809 case 9: /* 386 TSS */
2810 case 5: /* task gate */
2811 if (dpl < cpl || dpl < rpl)
2812 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2813 next_eip = env->eip + next_eip_addend;
2814 switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2815 CC_OP = CC_OP_EFLAGS;
2816 break;
2817 case 4: /* 286 call gate */
2818 case 12: /* 386 call gate */
2819 if ((dpl < cpl) || (dpl < rpl))
2820 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2821 if (!(e2 & DESC_P_MASK))
2822 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2823 gate_cs = e1 >> 16;
2824 new_eip = (e1 & 0xffff);
2825 if (type == 12)
2826 new_eip |= (e2 & 0xffff0000);
2827 if (load_segment(&e1, &e2, gate_cs) != 0)
2828 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2829 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2830 /* must be code segment */
2831 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2832 (DESC_S_MASK | DESC_CS_MASK)))
2833 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2834 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2835 (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2836 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2837 if (!(e2 & DESC_P_MASK))
2838#ifdef VBOX /* See page 3-514 of 253666.pdf */
2839 raise_exception_err(EXCP0B_NOSEG, gate_cs & 0xfffc);
2840#else
2841 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2842#endif
2843 limit = get_seg_limit(e1, e2);
2844 if (new_eip > limit)
2845 raise_exception_err(EXCP0D_GPF, 0);
2846 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2847 get_seg_base(e1, e2), limit, e2);
2848 EIP = new_eip;
2849 break;
2850 default:
2851 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2852 break;
2853 }
2854 }
2855}
2856
2857/* real mode call */
2858void helper_lcall_real(int new_cs, target_ulong new_eip1,
2859 int shift, int next_eip)
2860{
2861 int new_eip;
2862 uint32_t esp, esp_mask;
2863 target_ulong ssp;
2864
2865 new_eip = new_eip1;
2866 esp = ESP;
2867 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2868 ssp = env->segs[R_SS].base;
2869 if (shift) {
2870 PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2871 PUSHL(ssp, esp, esp_mask, next_eip);
2872 } else {
2873 PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2874 PUSHW(ssp, esp, esp_mask, next_eip);
2875 }
2876
2877 SET_ESP(esp, esp_mask);
2878 env->eip = new_eip;
2879 env->segs[R_CS].selector = new_cs;
2880 env->segs[R_CS].base = (new_cs << 4);
2881}
2882
2883/* protected mode call */
2884void helper_lcall_protected(int new_cs, target_ulong new_eip,
2885 int shift, int next_eip_addend)
2886{
2887 int new_stack, i;
2888 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2889 uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, sp, type, ss_dpl, sp_mask;
2890 uint32_t val, limit, old_sp_mask;
2891 target_ulong ssp, old_ssp, next_eip;
2892
2893#ifdef VBOX /** @todo Why do we do this? */
2894 e1 = e2 = 0;
2895#endif
2896 next_eip = env->eip + next_eip_addend;
2897 LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs, (uint32_t)new_eip, shift);
2898 LOG_PCALL_STATE(env);
2899 if ((new_cs & 0xfffc) == 0)
2900 raise_exception_err(EXCP0D_GPF, 0);
2901 if (load_segment(&e1, &e2, new_cs) != 0)
2902 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2903 cpl = env->hflags & HF_CPL_MASK;
2904 LOG_PCALL("desc=%08x:%08x\n", e1, e2);
2905 if (e2 & DESC_S_MASK) {
2906 if (!(e2 & DESC_CS_MASK))
2907 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2908 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2909 if (e2 & DESC_C_MASK) {
2910 /* conforming code segment */
2911 if (dpl > cpl)
2912 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2913 } else {
2914 /* non conforming code segment */
2915 rpl = new_cs & 3;
2916 if (rpl > cpl)
2917 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2918 if (dpl != cpl)
2919 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2920 }
2921 if (!(e2 & DESC_P_MASK))
2922 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2923#ifdef VBOX
2924 if (!(e2 & DESC_A_MASK))
2925 e2 = set_segment_accessed(new_cs, e2);
2926#endif
2927
2928#ifdef TARGET_X86_64
2929 /* XXX: check 16/32 bit cases in long mode */
2930 if (shift == 2) {
2931 target_ulong rsp;
2932 /* 64 bit case */
2933 rsp = ESP;
2934 PUSHQ(rsp, env->segs[R_CS].selector);
2935 PUSHQ(rsp, next_eip);
2936 /* from this point, not restartable */
2937 ESP = rsp;
2938 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2939 get_seg_base(e1, e2),
2940 get_seg_limit(e1, e2), e2);
2941 EIP = new_eip;
2942 } else
2943#endif
2944 {
2945 sp = ESP;
2946 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2947 ssp = env->segs[R_SS].base;
2948 if (shift) {
2949 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2950 PUSHL(ssp, sp, sp_mask, next_eip);
2951 } else {
2952 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2953 PUSHW(ssp, sp, sp_mask, next_eip);
2954 }
2955
2956 limit = get_seg_limit(e1, e2);
2957 if (new_eip > limit)
2958 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2959 /* from this point, not restartable */
2960 SET_ESP(sp, sp_mask);
2961 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2962 get_seg_base(e1, e2), limit, e2);
2963 EIP = new_eip;
2964 }
2965 } else {
2966 /* check gate type */
2967 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2968 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2969 rpl = new_cs & 3;
2970 switch(type) {
2971 case 1: /* available 286 TSS */
2972 case 9: /* available 386 TSS */
2973 case 5: /* task gate */
2974 if (dpl < cpl || dpl < rpl)
2975 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2976 switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2977 CC_OP = CC_OP_EFLAGS;
2978 return;
2979 case 4: /* 286 call gate */
2980 case 12: /* 386 call gate */
2981 break;
2982 default:
2983 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2984 break;
2985 }
2986 shift = type >> 3;
2987
2988 if (dpl < cpl || dpl < rpl)
2989 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2990 /* check valid bit */
2991 if (!(e2 & DESC_P_MASK))
2992 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2993 selector = e1 >> 16;
2994 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2995 param_count = e2 & 0x1f;
2996 if ((selector & 0xfffc) == 0)
2997 raise_exception_err(EXCP0D_GPF, 0);
2998
2999 if (load_segment(&e1, &e2, selector) != 0)
3000 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
3001 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
3002 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
3003 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3004 if (dpl > cpl)
3005 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
3006 if (!(e2 & DESC_P_MASK))
3007 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
3008
3009 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
3010 /* to inner privilege */
3011 get_ss_esp_from_tss(&ss, &sp, dpl);
3012 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
3013 ss, sp, param_count, ESP);
3014 if ((ss & 0xfffc) == 0)
3015 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3016 if ((ss & 3) != dpl)
3017 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3018 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
3019 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3020 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
3021 if (ss_dpl != dpl)
3022 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3023 if (!(ss_e2 & DESC_S_MASK) ||
3024 (ss_e2 & DESC_CS_MASK) ||
3025 !(ss_e2 & DESC_W_MASK))
3026 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3027 if (!(ss_e2 & DESC_P_MASK))
3028#ifdef VBOX /* See page 3-99 of 253666.pdf */
3029 raise_exception_err(EXCP0C_STACK, ss & 0xfffc);
3030#else
3031 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3032#endif
3033
3034 // push_size = ((param_count * 2) + 8) << shift;
3035
3036 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
3037 old_ssp = env->segs[R_SS].base;
3038
3039 sp_mask = get_sp_mask(ss_e2);
3040 ssp = get_seg_base(ss_e1, ss_e2);
3041 if (shift) {
3042 PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
3043 PUSHL(ssp, sp, sp_mask, ESP);
3044 for(i = param_count - 1; i >= 0; i--) {
3045 val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
3046 PUSHL(ssp, sp, sp_mask, val);
3047 }
3048 } else {
3049 PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
3050 PUSHW(ssp, sp, sp_mask, ESP);
3051 for(i = param_count - 1; i >= 0; i--) {
3052 val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
3053 PUSHW(ssp, sp, sp_mask, val);
3054 }
3055 }
3056 new_stack = 1;
3057 } else {
3058 /* to same privilege */
3059 sp = ESP;
3060 sp_mask = get_sp_mask(env->segs[R_SS].flags);
3061 ssp = env->segs[R_SS].base;
3062 // push_size = (4 << shift);
3063 new_stack = 0;
3064 }
3065
3066 if (shift) {
3067 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
3068 PUSHL(ssp, sp, sp_mask, next_eip);
3069 } else {
3070 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
3071 PUSHW(ssp, sp, sp_mask, next_eip);
3072 }
3073
3074 /* from this point, not restartable */
3075
3076 if (new_stack) {
3077 ss = (ss & ~3) | dpl;
3078 cpu_x86_load_seg_cache(env, R_SS, ss,
3079 ssp,
3080 get_seg_limit(ss_e1, ss_e2),
3081 ss_e2);
3082 }
3083
3084 selector = (selector & ~3) | dpl;
3085 cpu_x86_load_seg_cache(env, R_CS, selector,
3086 get_seg_base(e1, e2),
3087 get_seg_limit(e1, e2),
3088 e2);
3089 cpu_x86_set_cpl(env, dpl);
3090 SET_ESP(sp, sp_mask);
3091 EIP = offset;
3092 }
3093}
3094
3095/* real and vm86 mode iret */
3096void helper_iret_real(int shift)
3097{
3098 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
3099 target_ulong ssp;
3100 int eflags_mask;
3101#ifdef VBOX
3102 bool fVME = false;
3103
3104 remR3TrapClear(env->pVM);
3105#endif /* VBOX */
3106
3107 sp_mask = 0xffff; /* XXXX: use SS segment size ? */
3108 sp = ESP;
3109 ssp = env->segs[R_SS].base;
3110 if (shift == 1) {
3111 /* 32 bits */
3112 POPL(ssp, sp, sp_mask, new_eip);
3113 POPL(ssp, sp, sp_mask, new_cs);
3114 new_cs &= 0xffff;
3115 POPL(ssp, sp, sp_mask, new_eflags);
3116 } else {
3117 /* 16 bits */
3118 POPW(ssp, sp, sp_mask, new_eip);
3119 POPW(ssp, sp, sp_mask, new_cs);
3120 POPW(ssp, sp, sp_mask, new_eflags);
3121 }
3122#ifdef VBOX
3123 if ( (env->eflags & VM_MASK)
3124 && ((env->eflags >> IOPL_SHIFT) & 3) != 3
3125 && (env->cr[4] & CR4_VME_MASK)) /* implied or else we would fault earlier */
3126 {
3127 fVME = true;
3128 /* if virtual interrupt pending and (virtual) interrupts will be enabled -> #GP */
3129 /* if TF will be set -> #GP */
3130 if ( ((new_eflags & IF_MASK) && (env->eflags & VIP_MASK))
3131 || (new_eflags & TF_MASK))
3132 raise_exception(EXCP0D_GPF);
3133 }
3134#endif /* VBOX */
3135 ESP = (ESP & ~sp_mask) | (sp & sp_mask);
3136 env->segs[R_CS].selector = new_cs;
3137 env->segs[R_CS].base = (new_cs << 4);
3138 env->eip = new_eip;
3139#ifdef VBOX
3140 if (fVME)
3141 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
3142 else
3143#endif
3144 if (env->eflags & VM_MASK)
3145 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
3146 else
3147 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
3148 if (shift == 0)
3149 eflags_mask &= 0xffff;
3150 load_eflags(new_eflags, eflags_mask);
3151 env->hflags2 &= ~HF2_NMI_MASK;
3152#ifdef VBOX
3153 if (fVME)
3154 {
3155 if (new_eflags & IF_MASK)
3156 env->eflags |= VIF_MASK;
3157 else
3158 env->eflags &= ~VIF_MASK;
3159 }
3160#endif /* VBOX */
3161}
3162
3163static inline void validate_seg(int seg_reg, int cpl)
3164{
3165 int dpl;
3166 uint32_t e2;
3167
3168 /* XXX: on x86_64, we do not want to nullify FS and GS because
3169 they may still contain a valid base. I would be interested to
3170 know how a real x86_64 CPU behaves */
3171 if ((seg_reg == R_FS || seg_reg == R_GS) &&
3172 (env->segs[seg_reg].selector & 0xfffc) == 0)
3173 return;
3174
3175 e2 = env->segs[seg_reg].flags;
3176 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3177 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
3178 /* data or non conforming code segment */
3179 if (dpl < cpl) {
3180 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
3181 }
3182 }
3183}
3184
3185/* protected mode iret */
3186static inline void helper_ret_protected(int shift, int is_iret, int addend)
3187{
3188 uint32_t new_cs, new_eflags, new_ss;
3189 uint32_t new_es, new_ds, new_fs, new_gs;
3190 uint32_t e1, e2, ss_e1, ss_e2;
3191 int cpl, dpl, rpl, eflags_mask, iopl;
3192 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
3193
3194#ifdef VBOX /** @todo Why do we do this? */
3195 ss_e1 = ss_e2 = e1 = e2 = 0;
3196#endif
3197
3198#ifdef TARGET_X86_64
3199 if (shift == 2)
3200 sp_mask = -1;
3201 else
3202#endif
3203 sp_mask = get_sp_mask(env->segs[R_SS].flags);
3204 sp = ESP;
3205 ssp = env->segs[R_SS].base;
3206 new_eflags = 0; /* avoid warning */
3207#ifdef TARGET_X86_64
3208 if (shift == 2) {
3209 POPQ(sp, new_eip);
3210 POPQ(sp, new_cs);
3211 new_cs &= 0xffff;
3212 if (is_iret) {
3213 POPQ(sp, new_eflags);
3214 }
3215 } else
3216#endif
3217 if (shift == 1) {
3218 /* 32 bits */
3219 POPL(ssp, sp, sp_mask, new_eip);
3220 POPL(ssp, sp, sp_mask, new_cs);
3221 new_cs &= 0xffff;
3222 if (is_iret) {
3223 POPL(ssp, sp, sp_mask, new_eflags);
3224#define LOG_GROUP LOG_GROUP_REM
3225#if defined(VBOX) && defined(DEBUG)
3226 Log(("iret: new CS %04X (old=%x)\n", new_cs, env->segs[R_CS].selector));
3227 Log(("iret: new EIP %08X\n", (uint32_t)new_eip));
3228 Log(("iret: new EFLAGS %08X\n", new_eflags));
3229 Log(("iret: EAX=%08x\n", (uint32_t)EAX));
3230#endif
3231 if (new_eflags & VM_MASK)
3232 goto return_to_vm86;
3233 }
3234#ifdef VBOX
3235 if ((new_cs & 0x3) == 1 && (env->state & CPU_RAW_RING0))
3236 {
3237 if ( !EMIsRawRing1Enabled(env->pVM)
3238 || env->segs[R_CS].selector == (new_cs & 0xfffc))
3239 {
3240 Log(("RPL 1 -> new_cs %04X -> %04X\n", new_cs, new_cs & 0xfffc));
3241 new_cs = new_cs & 0xfffc;
3242 }
3243 else
3244 {
3245 /* Ugly assumption: assume a genuine switch to ring-1. */
3246 Log(("Genuine switch to ring-1 (iret)\n"));
3247 }
3248 }
3249 else if ((new_cs & 0x3) == 2 && (env->state & CPU_RAW_RING0) && EMIsRawRing1Enabled(env->pVM))
3250 {
3251 Log(("RPL 2 -> new_cs %04X -> %04X\n", new_cs, (new_cs & 0xfffc) | 1));
3252 new_cs = (new_cs & 0xfffc) | 1;
3253 }
3254#endif
3255 } else {
3256 /* 16 bits */
3257 POPW(ssp, sp, sp_mask, new_eip);
3258 POPW(ssp, sp, sp_mask, new_cs);
3259 if (is_iret)
3260 POPW(ssp, sp, sp_mask, new_eflags);
3261 }
3262 LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
3263 new_cs, new_eip, shift, addend);
3264 LOG_PCALL_STATE(env);
3265 if ((new_cs & 0xfffc) == 0)
3266 {
3267#if defined(VBOX) && defined(DEBUG)
3268 Log(("new_cs & 0xfffc) == 0\n"));
3269#endif
3270 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3271 }
3272 if (load_segment(&e1, &e2, new_cs) != 0)
3273 {
3274#if defined(VBOX) && defined(DEBUG)
3275 Log(("load_segment failed\n"));
3276#endif
3277 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3278 }
3279 if (!(e2 & DESC_S_MASK) ||
3280 !(e2 & DESC_CS_MASK))
3281 {
3282#if defined(VBOX) && defined(DEBUG)
3283 Log(("e2 mask %08x\n", e2));
3284#endif
3285 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3286 }
3287 cpl = env->hflags & HF_CPL_MASK;
3288 rpl = new_cs & 3;
3289 if (rpl < cpl)
3290 {
3291#if defined(VBOX) && defined(DEBUG)
3292 Log(("rpl < cpl (%d vs %d)\n", rpl, cpl));
3293#endif
3294 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3295 }
3296 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3297
3298 if (e2 & DESC_C_MASK) {
3299 if (dpl > rpl)
3300 {
3301#if defined(VBOX) && defined(DEBUG)
3302 Log(("dpl > rpl (%d vs %d)\n", dpl, rpl));
3303#endif
3304 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3305 }
3306 } else {
3307 if (dpl != rpl)
3308 {
3309#if defined(VBOX) && defined(DEBUG)
3310 Log(("dpl != rpl (%d vs %d) e1=%x e2=%x\n", dpl, rpl, e1, e2));
3311#endif
3312 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3313 }
3314 }
3315 if (!(e2 & DESC_P_MASK))
3316 {
3317#if defined(VBOX) && defined(DEBUG)
3318 Log(("DESC_P_MASK e2=%08x\n", e2));
3319#endif
3320 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
3321 }
3322
3323 sp += addend;
3324 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
3325 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
3326 /* return to same privilege level */
3327#ifdef VBOX
3328 if (!(e2 & DESC_A_MASK))
3329 e2 = set_segment_accessed(new_cs, e2);
3330#endif
3331 cpu_x86_load_seg_cache(env, R_CS, new_cs,
3332 get_seg_base(e1, e2),
3333 get_seg_limit(e1, e2),
3334 e2);
3335 } else {
3336 /* return to different privilege level */
3337#ifdef TARGET_X86_64
3338 if (shift == 2) {
3339 POPQ(sp, new_esp);
3340 POPQ(sp, new_ss);
3341 new_ss &= 0xffff;
3342 } else
3343#endif
3344 if (shift == 1) {
3345 /* 32 bits */
3346 POPL(ssp, sp, sp_mask, new_esp);
3347 POPL(ssp, sp, sp_mask, new_ss);
3348 new_ss &= 0xffff;
3349 } else {
3350 /* 16 bits */
3351 POPW(ssp, sp, sp_mask, new_esp);
3352 POPW(ssp, sp, sp_mask, new_ss);
3353 }
3354 LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
3355 new_ss, new_esp);
3356 if ((new_ss & 0xfffc) == 0) {
3357#ifdef TARGET_X86_64
3358 /* NULL ss is allowed in long mode if cpl != 3*/
3359# ifndef VBOX
3360 /* XXX: test CS64 ? */
3361 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
3362 cpu_x86_load_seg_cache(env, R_SS, new_ss,
3363 0, 0xffffffff,
3364 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3365 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
3366 DESC_W_MASK | DESC_A_MASK);
3367 ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
3368 } else
3369# else /* VBOX */
3370 if ((env->hflags & HF_LMA_MASK) && rpl != 3 && (e2 & DESC_L_MASK)) {
3371 if (!(e2 & DESC_A_MASK))
3372 e2 = set_segment_accessed(new_cs, e2);
3373 cpu_x86_load_seg_cache_with_clean_flags(env, R_SS, new_ss,
3374 0, 0xffffffff,
3375 DESC_INTEL_UNUSABLE | (rpl << DESC_DPL_SHIFT) );
3376 ss_e2 = DESC_B_MASK; /* not really used */
3377 } else
3378# endif
3379#endif
3380 {
3381#if defined(VBOX) && defined(DEBUG)
3382 Log(("NULL ss, rpl=%d\n", rpl));
3383#endif
3384 raise_exception_err(EXCP0D_GPF, 0);
3385 }
3386 } else {
3387 if ((new_ss & 3) != rpl)
3388 {
3389#if defined(VBOX) && defined(DEBUG)
3390 Log(("new_ss=%x != rpl=%d\n", new_ss, rpl));
3391#endif
3392 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3393 }
3394 if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
3395 {
3396#if defined(VBOX) && defined(DEBUG)
3397 Log(("new_ss=%x load error\n", new_ss));
3398#endif
3399 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3400 }
3401 if (!(ss_e2 & DESC_S_MASK) ||
3402 (ss_e2 & DESC_CS_MASK) ||
3403 !(ss_e2 & DESC_W_MASK))
3404 {
3405#if defined(VBOX) && defined(DEBUG)
3406 Log(("new_ss=%x ss_e2=%#x bad type\n", new_ss, ss_e2));
3407#endif
3408 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3409 }
3410 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
3411 if (dpl != rpl)
3412 {
3413#if defined(VBOX) && defined(DEBUG)
3414 Log(("SS.dpl=%u != rpl=%u\n", dpl, rpl));
3415#endif
3416 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3417 }
3418 if (!(ss_e2 & DESC_P_MASK))
3419 {
3420#if defined(VBOX) && defined(DEBUG)
3421 Log(("new_ss=%#x #NP\n", new_ss));
3422#endif
3423 raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
3424 }
3425#ifdef VBOX
3426 if (!(e2 & DESC_A_MASK))
3427 e2 = set_segment_accessed(new_cs, e2);
3428 if (!(ss_e2 & DESC_A_MASK))
3429 ss_e2 = set_segment_accessed(new_ss, ss_e2);
3430#endif
3431 cpu_x86_load_seg_cache(env, R_SS, new_ss,
3432 get_seg_base(ss_e1, ss_e2),
3433 get_seg_limit(ss_e1, ss_e2),
3434 ss_e2);
3435 }
3436
3437 cpu_x86_load_seg_cache(env, R_CS, new_cs,
3438 get_seg_base(e1, e2),
3439 get_seg_limit(e1, e2),
3440 e2);
3441 cpu_x86_set_cpl(env, rpl);
3442 sp = new_esp;
3443#ifdef TARGET_X86_64
3444 if (env->hflags & HF_CS64_MASK)
3445 sp_mask = -1;
3446 else
3447#endif
3448 sp_mask = get_sp_mask(ss_e2);
3449
3450 /* validate data segments */
3451 validate_seg(R_ES, rpl);
3452 validate_seg(R_DS, rpl);
3453 validate_seg(R_FS, rpl);
3454 validate_seg(R_GS, rpl);
3455
3456 sp += addend;
3457 }
3458 SET_ESP(sp, sp_mask);
3459 env->eip = new_eip;
3460 if (is_iret) {
3461 /* NOTE: 'cpl' is the _old_ CPL */
3462 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
3463 if (cpl == 0)
3464#ifdef VBOX
3465 eflags_mask |= IOPL_MASK | VIF_MASK | VIP_MASK;
3466#else
3467 eflags_mask |= IOPL_MASK;
3468#endif
3469 iopl = (env->eflags >> IOPL_SHIFT) & 3;
3470 if (cpl <= iopl)
3471 eflags_mask |= IF_MASK;
3472 if (shift == 0)
3473 eflags_mask &= 0xffff;
3474 load_eflags(new_eflags, eflags_mask);
3475 }
3476 return;
3477
3478 return_to_vm86:
3479 POPL(ssp, sp, sp_mask, new_esp);
3480 POPL(ssp, sp, sp_mask, new_ss);
3481 POPL(ssp, sp, sp_mask, new_es);
3482 POPL(ssp, sp, sp_mask, new_ds);
3483 POPL(ssp, sp, sp_mask, new_fs);
3484 POPL(ssp, sp, sp_mask, new_gs);
3485
3486 /* modify processor state */
3487 load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
3488 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
3489 load_seg_vm(R_CS, new_cs & 0xffff);
3490 cpu_x86_set_cpl(env, 3);
3491 load_seg_vm(R_SS, new_ss & 0xffff);
3492 load_seg_vm(R_ES, new_es & 0xffff);
3493 load_seg_vm(R_DS, new_ds & 0xffff);
3494 load_seg_vm(R_FS, new_fs & 0xffff);
3495 load_seg_vm(R_GS, new_gs & 0xffff);
3496
3497 env->eip = new_eip & 0xffff;
3498 ESP = new_esp;
3499}
3500
3501void helper_iret_protected(int shift, int next_eip)
3502{
3503 int tss_selector, type;
3504 uint32_t e1, e2;
3505
3506#ifdef VBOX
3507 Log(("iret (shift=%d new_eip=%#x)\n", shift, next_eip));
3508 e1 = e2 = 0; /** @todo Why do we do this? */
3509 remR3TrapClear(env->pVM);
3510#endif
3511
3512 /* specific case for TSS */
3513 if (env->eflags & NT_MASK) {
3514#ifdef TARGET_X86_64
3515 if (env->hflags & HF_LMA_MASK)
3516 {
3517#if defined(VBOX) && defined(DEBUG)
3518 Log(("eflags.NT=1 on iret in long mode\n"));
3519#endif
3520 raise_exception_err(EXCP0D_GPF, 0);
3521 }
3522#endif
3523 tss_selector = lduw_kernel(env->tr.base + 0);
3524 if (tss_selector & 4)
3525 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3526 if (load_segment(&e1, &e2, tss_selector) != 0)
3527 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3528 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
3529 /* NOTE: we check both segment and busy TSS */
3530 if (type != 3)
3531 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3532 switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
3533 } else {
3534 helper_ret_protected(shift, 1, 0);
3535 }
3536 env->hflags2 &= ~HF2_NMI_MASK;
3537}
3538
3539void helper_lret_protected(int shift, int addend)
3540{
3541 helper_ret_protected(shift, 0, addend);
3542}
3543
3544void helper_sysenter(void)
3545{
3546 if (env->sysenter_cs == 0) {
3547 raise_exception_err(EXCP0D_GPF, 0);
3548 }
3549 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
3550 cpu_x86_set_cpl(env, 0);
3551
3552#ifdef TARGET_X86_64
3553 if (env->hflags & HF_LMA_MASK) {
3554 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
3555 0, 0xffffffff,
3556 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3557 DESC_S_MASK |
3558 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
3559 } else
3560#endif
3561 {
3562 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
3563 0, 0xffffffff,
3564 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3565 DESC_S_MASK |
3566 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
3567 }
3568 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
3569 0, 0xffffffff,
3570 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3571 DESC_S_MASK |
3572 DESC_W_MASK | DESC_A_MASK);
3573 ESP = env->sysenter_esp;
3574 EIP = env->sysenter_eip;
3575}
3576
3577void helper_sysexit(int dflag)
3578{
3579 int cpl;
3580
3581 cpl = env->hflags & HF_CPL_MASK;
3582 if (env->sysenter_cs == 0 || cpl != 0) {
3583 raise_exception_err(EXCP0D_GPF, 0);
3584 }
3585 cpu_x86_set_cpl(env, 3);
3586#ifdef TARGET_X86_64
3587 if (dflag == 2) {
3588 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) | 3,
3589 0, 0xffffffff,
3590 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3591 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3592 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
3593 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) | 3,
3594 0, 0xffffffff,
3595 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3596 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3597 DESC_W_MASK | DESC_A_MASK);
3598 } else
3599#endif
3600 {
3601 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
3602 0, 0xffffffff,
3603 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3604 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3605 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
3606 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
3607 0, 0xffffffff,
3608 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3609 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3610 DESC_W_MASK | DESC_A_MASK);
3611 }
3612 ESP = ECX;
3613 EIP = EDX;
3614}
3615
3616#if defined(CONFIG_USER_ONLY)
3617target_ulong helper_read_crN(int reg)
3618{
3619 return 0;
3620}
3621
3622void helper_write_crN(int reg, target_ulong t0)
3623{
3624}
3625
3626void helper_movl_drN_T0(int reg, target_ulong t0)
3627{
3628}
3629#else
3630target_ulong helper_read_crN(int reg)
3631{
3632 target_ulong val;
3633
3634 helper_svm_check_intercept_param(SVM_EXIT_READ_CR0 + reg, 0);
3635 switch(reg) {
3636 default:
3637 val = env->cr[reg];
3638 break;
3639 case 8:
3640 if (!(env->hflags2 & HF2_VINTR_MASK)) {
3641#ifndef VBOX
3642 val = cpu_get_apic_tpr(env->apic_state);
3643#else /* VBOX */
3644 val = cpu_get_apic_tpr(env);
3645#endif /* VBOX */
3646 } else {
3647 val = env->v_tpr;
3648 }
3649 break;
3650 }
3651 return val;
3652}
3653
3654void helper_write_crN(int reg, target_ulong t0)
3655{
3656 helper_svm_check_intercept_param(SVM_EXIT_WRITE_CR0 + reg, 0);
3657 switch(reg) {
3658 case 0:
3659 cpu_x86_update_cr0(env, t0);
3660 break;
3661 case 3:
3662 cpu_x86_update_cr3(env, t0);
3663 break;
3664 case 4:
3665 cpu_x86_update_cr4(env, t0);
3666 break;
3667 case 8:
3668 if (!(env->hflags2 & HF2_VINTR_MASK)) {
3669#ifndef VBOX
3670 cpu_set_apic_tpr(env->apic_state, t0);
3671#else /* VBOX */
3672 cpu_set_apic_tpr(env, t0);
3673#endif /* VBOX */
3674 }
3675 env->v_tpr = t0 & 0x0f;
3676 break;
3677 default:
3678 env->cr[reg] = t0;
3679 break;
3680 }
3681}
3682
3683void helper_movl_drN_T0(int reg, target_ulong t0)
3684{
3685 int i;
3686
3687 if (reg < 4) {
3688 hw_breakpoint_remove(env, reg);
3689 env->dr[reg] = t0;
3690 hw_breakpoint_insert(env, reg);
3691# ifndef VBOX
3692 } else if (reg == 7) {
3693# else
3694 } else if (reg == 7 || reg == 5) { /* (DR5 is an alias for DR7.) */
3695 if (t0 & X86_DR7_MBZ_MASK)
3696 raise_exception_err(EXCP0D_GPF, 0);
3697 t0 |= X86_DR7_RA1_MASK;
3698 t0 &= ~X86_DR7_RAZ_MASK;
3699# endif
3700 for (i = 0; i < 4; i++)
3701 hw_breakpoint_remove(env, i);
3702 env->dr[7] = t0;
3703 for (i = 0; i < 4; i++)
3704 hw_breakpoint_insert(env, i);
3705 } else {
3706# ifndef VBOX
3707 env->dr[reg] = t0;
3708# else
3709 if (t0 & X86_DR6_MBZ_MASK)
3710 raise_exception_err(EXCP0D_GPF, 0);
3711 t0 |= X86_DR6_RA1_MASK;
3712 t0 &= ~X86_DR6_RAZ_MASK;
3713 env->dr[6] = t0; /* (DR4 is an alias for DR6.) */
3714# endif
3715 }
3716}
3717#endif
3718
3719void helper_lmsw(target_ulong t0)
3720{
3721 /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
3722 if already set to one. */
3723 t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
3724 helper_write_crN(0, t0);
3725}
3726
3727void helper_clts(void)
3728{
3729 env->cr[0] &= ~CR0_TS_MASK;
3730 env->hflags &= ~HF_TS_MASK;
3731}
3732
3733void helper_invlpg(target_ulong addr)
3734{
3735 helper_svm_check_intercept_param(SVM_EXIT_INVLPG, 0);
3736 tlb_flush_page(env, addr);
3737}
3738
3739void helper_rdtsc(void)
3740{
3741 uint64_t val;
3742
3743 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3744 raise_exception(EXCP0D_GPF);
3745 }
3746 helper_svm_check_intercept_param(SVM_EXIT_RDTSC, 0);
3747
3748 val = cpu_get_tsc(env) + env->tsc_offset;
3749 EAX = (uint32_t)(val);
3750 EDX = (uint32_t)(val >> 32);
3751}
3752
3753void helper_rdtscp(void)
3754{
3755 helper_rdtsc();
3756#ifndef VBOX
3757 ECX = (uint32_t)(env->tsc_aux);
3758#else /* VBOX */
3759 uint64_t val;
3760 if (cpu_rdmsr(env, MSR_K8_TSC_AUX, &val) == 0)
3761 ECX = (uint32_t)(val);
3762 else
3763 ECX = 0;
3764#endif /* VBOX */
3765}
3766
3767void helper_rdpmc(void)
3768{
3769#ifdef VBOX
3770 /* If X86_CR4_PCE is *not* set, then CPL must be zero. */
3771 if (!(env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3772 raise_exception(EXCP0D_GPF);
3773 }
3774 /* Just return zero here; rather tricky to properly emulate this, especially as the specs are a mess. */
3775 EAX = 0;
3776 EDX = 0;
3777#else /* !VBOX */
3778 if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3779 raise_exception(EXCP0D_GPF);
3780 }
3781 helper_svm_check_intercept_param(SVM_EXIT_RDPMC, 0);
3782
3783 /* currently unimplemented */
3784 raise_exception_err(EXCP06_ILLOP, 0);
3785#endif /* !VBOX */
3786}
3787
3788#if defined(CONFIG_USER_ONLY)
3789void helper_wrmsr(void)
3790{
3791}
3792
3793void helper_rdmsr(void)
3794{
3795}
3796#else
3797void helper_wrmsr(void)
3798{
3799 uint64_t val;
3800
3801 helper_svm_check_intercept_param(SVM_EXIT_MSR, 1);
3802
3803 val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
3804
3805 switch((uint32_t)ECX) {
3806 case MSR_IA32_SYSENTER_CS:
3807 env->sysenter_cs = val & 0xffff;
3808 break;
3809 case MSR_IA32_SYSENTER_ESP:
3810 env->sysenter_esp = val;
3811 break;
3812 case MSR_IA32_SYSENTER_EIP:
3813 env->sysenter_eip = val;
3814 break;
3815 case MSR_IA32_APICBASE:
3816# ifndef VBOX /* The CPUMSetGuestMsr call below does this now. */
3817 cpu_set_apic_base(env->apic_state, val);
3818# endif
3819 break;
3820 case MSR_EFER:
3821 {
3822 uint64_t update_mask;
3823 update_mask = 0;
3824 if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
3825 update_mask |= MSR_EFER_SCE;
3826 if (env->cpuid_ext2_features & CPUID_EXT2_LM)
3827 update_mask |= MSR_EFER_LME;
3828 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3829 update_mask |= MSR_EFER_FFXSR;
3830 if (env->cpuid_ext2_features & CPUID_EXT2_NX)
3831 update_mask |= MSR_EFER_NXE;
3832 if (env->cpuid_ext3_features & CPUID_EXT3_SVM)
3833 update_mask |= MSR_EFER_SVME;
3834 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3835 update_mask |= MSR_EFER_FFXSR;
3836 cpu_load_efer(env, (env->efer & ~update_mask) |
3837 (val & update_mask));
3838 }
3839 break;
3840 case MSR_STAR:
3841 env->star = val;
3842 break;
3843 case MSR_PAT:
3844 env->pat = val;
3845 break;
3846 case MSR_VM_HSAVE_PA:
3847 env->vm_hsave = val;
3848 break;
3849#ifdef TARGET_X86_64
3850 case MSR_LSTAR:
3851 env->lstar = val;
3852 break;
3853 case MSR_CSTAR:
3854 env->cstar = val;
3855 break;
3856 case MSR_FMASK:
3857 env->fmask = val;
3858 break;
3859 case MSR_FSBASE:
3860 env->segs[R_FS].base = val;
3861 break;
3862 case MSR_GSBASE:
3863 env->segs[R_GS].base = val;
3864 break;
3865 case MSR_KERNELGSBASE:
3866 env->kernelgsbase = val;
3867 break;
3868#endif
3869# ifndef VBOX
3870 case MSR_MTRRphysBase(0):
3871 case MSR_MTRRphysBase(1):
3872 case MSR_MTRRphysBase(2):
3873 case MSR_MTRRphysBase(3):
3874 case MSR_MTRRphysBase(4):
3875 case MSR_MTRRphysBase(5):
3876 case MSR_MTRRphysBase(6):
3877 case MSR_MTRRphysBase(7):
3878 env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base = val;
3879 break;
3880 case MSR_MTRRphysMask(0):
3881 case MSR_MTRRphysMask(1):
3882 case MSR_MTRRphysMask(2):
3883 case MSR_MTRRphysMask(3):
3884 case MSR_MTRRphysMask(4):
3885 case MSR_MTRRphysMask(5):
3886 case MSR_MTRRphysMask(6):
3887 case MSR_MTRRphysMask(7):
3888 env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask = val;
3889 break;
3890 case MSR_MTRRfix64K_00000:
3891 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix64K_00000] = val;
3892 break;
3893 case MSR_MTRRfix16K_80000:
3894 case MSR_MTRRfix16K_A0000:
3895 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1] = val;
3896 break;
3897 case MSR_MTRRfix4K_C0000:
3898 case MSR_MTRRfix4K_C8000:
3899 case MSR_MTRRfix4K_D0000:
3900 case MSR_MTRRfix4K_D8000:
3901 case MSR_MTRRfix4K_E0000:
3902 case MSR_MTRRfix4K_E8000:
3903 case MSR_MTRRfix4K_F0000:
3904 case MSR_MTRRfix4K_F8000:
3905 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3] = val;
3906 break;
3907 case MSR_MTRRdefType:
3908 env->mtrr_deftype = val;
3909 break;
3910 case MSR_MCG_STATUS:
3911 env->mcg_status = val;
3912 break;
3913 case MSR_MCG_CTL:
3914 if ((env->mcg_cap & MCG_CTL_P)
3915 && (val == 0 || val == ~(uint64_t)0))
3916 env->mcg_ctl = val;
3917 break;
3918 case MSR_TSC_AUX:
3919 env->tsc_aux = val;
3920 break;
3921# endif /* !VBOX */
3922 default:
3923# ifndef VBOX
3924 if ((uint32_t)ECX >= MSR_MC0_CTL
3925 && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) {
3926 uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL;
3927 if ((offset & 0x3) != 0
3928 || (val == 0 || val == ~(uint64_t)0))
3929 env->mce_banks[offset] = val;
3930 break;
3931 }
3932 /* XXX: exception ? */
3933# endif
3934 break;
3935 }
3936
3937# ifdef VBOX
3938 /* call CPUM. */
3939 if (cpu_wrmsr(env, (uint32_t)ECX, val) != 0)
3940 {
3941 /** @todo be a brave man and raise a \#GP(0) here as we should... */
3942 }
3943# endif
3944}
3945
3946void helper_rdmsr(void)
3947{
3948 uint64_t val;
3949
3950 helper_svm_check_intercept_param(SVM_EXIT_MSR, 0);
3951
3952 switch((uint32_t)ECX) {
3953 case MSR_IA32_SYSENTER_CS:
3954 val = env->sysenter_cs;
3955 break;
3956 case MSR_IA32_SYSENTER_ESP:
3957 val = env->sysenter_esp;
3958 break;
3959 case MSR_IA32_SYSENTER_EIP:
3960 val = env->sysenter_eip;
3961 break;
3962 case MSR_IA32_APICBASE:
3963#ifndef VBOX
3964 val = cpu_get_apic_base(env->apic_state);
3965#else /* VBOX */
3966 val = cpu_get_apic_base(env);
3967#endif /* VBOX */
3968 break;
3969 case MSR_EFER:
3970 val = env->efer;
3971 break;
3972 case MSR_STAR:
3973 val = env->star;
3974 break;
3975 case MSR_PAT:
3976 val = env->pat;
3977 break;
3978 case MSR_VM_HSAVE_PA:
3979 val = env->vm_hsave;
3980 break;
3981# ifndef VBOX /* forward to CPUMQueryGuestMsr. */
3982 case MSR_IA32_PERF_STATUS:
3983 /* tsc_increment_by_tick */
3984 val = 1000ULL;
3985 /* CPU multiplier */
3986 val |= (((uint64_t)4ULL) << 40);
3987 break;
3988# endif /* !VBOX */
3989#ifdef TARGET_X86_64
3990 case MSR_LSTAR:
3991 val = env->lstar;
3992 break;
3993 case MSR_CSTAR:
3994 val = env->cstar;
3995 break;
3996 case MSR_FMASK:
3997 val = env->fmask;
3998 break;
3999 case MSR_FSBASE:
4000 val = env->segs[R_FS].base;
4001 break;
4002 case MSR_GSBASE:
4003 val = env->segs[R_GS].base;
4004 break;
4005 case MSR_KERNELGSBASE:
4006 val = env->kernelgsbase;
4007 break;
4008# ifndef VBOX
4009 case MSR_TSC_AUX:
4010 val = env->tsc_aux;
4011 break;
4012# endif /*!VBOX*/
4013#endif
4014# ifndef VBOX
4015 case MSR_MTRRphysBase(0):
4016 case MSR_MTRRphysBase(1):
4017 case MSR_MTRRphysBase(2):
4018 case MSR_MTRRphysBase(3):
4019 case MSR_MTRRphysBase(4):
4020 case MSR_MTRRphysBase(5):
4021 case MSR_MTRRphysBase(6):
4022 case MSR_MTRRphysBase(7):
4023 val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base;
4024 break;
4025 case MSR_MTRRphysMask(0):
4026 case MSR_MTRRphysMask(1):
4027 case MSR_MTRRphysMask(2):
4028 case MSR_MTRRphysMask(3):
4029 case MSR_MTRRphysMask(4):
4030 case MSR_MTRRphysMask(5):
4031 case MSR_MTRRphysMask(6):
4032 case MSR_MTRRphysMask(7):
4033 val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask;
4034 break;
4035 case MSR_MTRRfix64K_00000:
4036 val = env->mtrr_fixed[0];
4037 break;
4038 case MSR_MTRRfix16K_80000:
4039 case MSR_MTRRfix16K_A0000:
4040 val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1];
4041 break;
4042 case MSR_MTRRfix4K_C0000:
4043 case MSR_MTRRfix4K_C8000:
4044 case MSR_MTRRfix4K_D0000:
4045 case MSR_MTRRfix4K_D8000:
4046 case MSR_MTRRfix4K_E0000:
4047 case MSR_MTRRfix4K_E8000:
4048 case MSR_MTRRfix4K_F0000:
4049 case MSR_MTRRfix4K_F8000:
4050 val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3];
4051 break;
4052 case MSR_MTRRdefType:
4053 val = env->mtrr_deftype;
4054 break;
4055 case MSR_MTRRcap:
4056 if (env->cpuid_features & CPUID_MTRR)
4057 val = MSR_MTRRcap_VCNT | MSR_MTRRcap_FIXRANGE_SUPPORT | MSR_MTRRcap_WC_SUPPORTED;
4058 else
4059 /* XXX: exception ? */
4060 val = 0;
4061 break;
4062 case MSR_MCG_CAP:
4063 val = env->mcg_cap;
4064 break;
4065 case MSR_MCG_CTL:
4066 if (env->mcg_cap & MCG_CTL_P)
4067 val = env->mcg_ctl;
4068 else
4069 val = 0;
4070 break;
4071 case MSR_MCG_STATUS:
4072 val = env->mcg_status;
4073 break;
4074# endif /* !VBOX */
4075 default:
4076# ifndef VBOX
4077 if ((uint32_t)ECX >= MSR_MC0_CTL
4078 && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) {
4079 uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL;
4080 val = env->mce_banks[offset];
4081 break;
4082 }
4083 /* XXX: exception ? */
4084 val = 0;
4085# else /* VBOX */
4086 if (cpu_rdmsr(env, (uint32_t)ECX, &val) != 0)
4087 {
4088 /** @todo be a brave man and raise a \#GP(0) here as we should... */
4089 val = 0;
4090 }
4091# endif /* VBOX */
4092 break;
4093 }
4094 EAX = (uint32_t)(val);
4095 EDX = (uint32_t)(val >> 32);
4096
4097# ifdef VBOX_STRICT
4098 if ((uint32_t)ECX != MSR_IA32_TSC) {
4099 if (cpu_rdmsr(env, (uint32_t)ECX, &val) != 0)
4100 val = 0;
4101 AssertMsg(val == RT_MAKE_U64(EAX, EDX), ("idMsr=%#x val=%#llx eax:edx=%#llx\n", (uint32_t)ECX, val, RT_MAKE_U64(EAX, EDX)));
4102 }
4103# endif
4104}
4105#endif
4106
4107target_ulong helper_lsl(target_ulong selector1)
4108{
4109 unsigned int limit;
4110 uint32_t e1, e2, eflags, selector;
4111 int rpl, dpl, cpl, type;
4112
4113 selector = selector1 & 0xffff;
4114 eflags = helper_cc_compute_all(CC_OP);
4115 if ((selector & 0xfffc) == 0)
4116 goto fail;
4117 if (load_segment(&e1, &e2, selector) != 0)
4118 goto fail;
4119 rpl = selector & 3;
4120 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
4121 cpl = env->hflags & HF_CPL_MASK;
4122 if (e2 & DESC_S_MASK) {
4123 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
4124 /* conforming */
4125 } else {
4126 if (dpl < cpl || dpl < rpl)
4127 goto fail;
4128 }
4129 } else {
4130 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
4131 switch(type) {
4132 case 1:
4133 case 2:
4134 case 3:
4135 case 9:
4136 case 11:
4137 break;
4138 default:
4139 goto fail;
4140 }
4141 if (dpl < cpl || dpl < rpl) {
4142 fail:
4143 CC_SRC = eflags & ~CC_Z;
4144 return 0;
4145 }
4146 }
4147 limit = get_seg_limit(e1, e2);
4148 CC_SRC = eflags | CC_Z;
4149 return limit;
4150}
4151
4152target_ulong helper_lar(target_ulong selector1)
4153{
4154 uint32_t e1, e2, eflags, selector;
4155 int rpl, dpl, cpl, type;
4156
4157 selector = selector1 & 0xffff;
4158 eflags = helper_cc_compute_all(CC_OP);
4159 if ((selector & 0xfffc) == 0)
4160 goto fail;
4161 if (load_segment(&e1, &e2, selector) != 0)
4162 goto fail;
4163 rpl = selector & 3;
4164 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
4165 cpl = env->hflags & HF_CPL_MASK;
4166 if (e2 & DESC_S_MASK) {
4167 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
4168 /* conforming */
4169 } else {
4170 if (dpl < cpl || dpl < rpl)
4171 goto fail;
4172 }
4173 } else {
4174 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
4175 switch(type) {
4176 case 1:
4177 case 2:
4178 case 3:
4179 case 4:
4180 case 5:
4181 case 9:
4182 case 11:
4183 case 12:
4184 break;
4185 default:
4186 goto fail;
4187 }
4188 if (dpl < cpl || dpl < rpl) {
4189 fail:
4190 CC_SRC = eflags & ~CC_Z;
4191 return 0;
4192 }
4193 }
4194 CC_SRC = eflags | CC_Z;
4195#ifdef VBOX /* AMD says 0x00ffff00, while intel says 0x00fxff00. Bochs and IEM does like AMD says (x=f). */
4196 return e2 & 0x00ffff00;
4197#else
4198 return e2 & 0x00f0ff00;
4199#endif
4200}
4201
4202void helper_verr(target_ulong selector1)
4203{
4204 uint32_t e1, e2, eflags, selector;
4205 int rpl, dpl, cpl;
4206
4207 selector = selector1 & 0xffff;
4208 eflags = helper_cc_compute_all(CC_OP);
4209 if ((selector & 0xfffc) == 0)
4210 goto fail;
4211 if (load_segment(&e1, &e2, selector) != 0)
4212 goto fail;
4213 if (!(e2 & DESC_S_MASK))
4214 goto fail;
4215 rpl = selector & 3;
4216 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
4217 cpl = env->hflags & HF_CPL_MASK;
4218 if (e2 & DESC_CS_MASK) {
4219 if (!(e2 & DESC_R_MASK))
4220 goto fail;
4221 if (!(e2 & DESC_C_MASK)) {
4222 if (dpl < cpl || dpl < rpl)
4223 goto fail;
4224 }
4225 } else {
4226 if (dpl < cpl || dpl < rpl) {
4227 fail:
4228 CC_SRC = eflags & ~CC_Z;
4229 return;
4230 }
4231 }
4232 CC_SRC = eflags | CC_Z;
4233}
4234
4235void helper_verw(target_ulong selector1)
4236{
4237 uint32_t e1, e2, eflags, selector;
4238 int rpl, dpl, cpl;
4239
4240 selector = selector1 & 0xffff;
4241 eflags = helper_cc_compute_all(CC_OP);
4242 if ((selector & 0xfffc) == 0)
4243 goto fail;
4244 if (load_segment(&e1, &e2, selector) != 0)
4245 goto fail;
4246 if (!(e2 & DESC_S_MASK))
4247 goto fail;
4248 rpl = selector & 3;
4249 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
4250 cpl = env->hflags & HF_CPL_MASK;
4251 if (e2 & DESC_CS_MASK) {
4252 goto fail;
4253 } else {
4254 if (dpl < cpl || dpl < rpl)
4255 goto fail;
4256 if (!(e2 & DESC_W_MASK)) {
4257 fail:
4258 CC_SRC = eflags & ~CC_Z;
4259 return;
4260 }
4261 }
4262 CC_SRC = eflags | CC_Z;
4263}
4264
4265/* x87 FPU helpers */
4266
4267static void fpu_set_exception(int mask)
4268{
4269 env->fpus |= mask;
4270 if (env->fpus & (~env->fpuc & FPUC_EM))
4271 env->fpus |= FPUS_SE | FPUS_B;
4272}
4273
4274static inline CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
4275{
4276 if (b == 0.0)
4277 fpu_set_exception(FPUS_ZE);
4278 return a / b;
4279}
4280
4281static void fpu_raise_exception(void)
4282{
4283 if (env->cr[0] & CR0_NE_MASK) {
4284 raise_exception(EXCP10_COPR);
4285 }
4286#if !defined(CONFIG_USER_ONLY)
4287 else {
4288 cpu_set_ferr(env);
4289 }
4290#endif
4291}
4292
4293void helper_flds_FT0(uint32_t val)
4294{
4295 union {
4296 float32 f;
4297 uint32_t i;
4298 } u;
4299 u.i = val;
4300 FT0 = float32_to_floatx(u.f, &env->fp_status);
4301}
4302
4303void helper_fldl_FT0(uint64_t val)
4304{
4305 union {
4306 float64 f;
4307 uint64_t i;
4308 } u;
4309 u.i = val;
4310 FT0 = float64_to_floatx(u.f, &env->fp_status);
4311}
4312
4313void helper_fildl_FT0(int32_t val)
4314{
4315 FT0 = int32_to_floatx(val, &env->fp_status);
4316}
4317
4318void helper_flds_ST0(uint32_t val)
4319{
4320 int new_fpstt;
4321 union {
4322 float32 f;
4323 uint32_t i;
4324 } u;
4325 new_fpstt = (env->fpstt - 1) & 7;
4326 u.i = val;
4327 env->fpregs[new_fpstt].d = float32_to_floatx(u.f, &env->fp_status);
4328 env->fpstt = new_fpstt;
4329 env->fptags[new_fpstt] = 0; /* validate stack entry */
4330}
4331
4332void helper_fldl_ST0(uint64_t val)
4333{
4334 int new_fpstt;
4335 union {
4336 float64 f;
4337 uint64_t i;
4338 } u;
4339 new_fpstt = (env->fpstt - 1) & 7;
4340 u.i = val;
4341 env->fpregs[new_fpstt].d = float64_to_floatx(u.f, &env->fp_status);
4342 env->fpstt = new_fpstt;
4343 env->fptags[new_fpstt] = 0; /* validate stack entry */
4344}
4345
4346void helper_fildl_ST0(int32_t val)
4347{
4348 int new_fpstt;
4349 new_fpstt = (env->fpstt - 1) & 7;
4350 env->fpregs[new_fpstt].d = int32_to_floatx(val, &env->fp_status);
4351 env->fpstt = new_fpstt;
4352 env->fptags[new_fpstt] = 0; /* validate stack entry */
4353}
4354
4355void helper_fildll_ST0(int64_t val)
4356{
4357 int new_fpstt;
4358 new_fpstt = (env->fpstt - 1) & 7;
4359 env->fpregs[new_fpstt].d = int64_to_floatx(val, &env->fp_status);
4360 env->fpstt = new_fpstt;
4361 env->fptags[new_fpstt] = 0; /* validate stack entry */
4362}
4363
4364#ifndef VBOX
4365uint32_t helper_fsts_ST0(void)
4366#else
4367RTCCUINTREG helper_fsts_ST0(void)
4368#endif
4369{
4370 union {
4371 float32 f;
4372 uint32_t i;
4373 } u;
4374 u.f = floatx_to_float32(ST0, &env->fp_status);
4375 return u.i;
4376}
4377
4378uint64_t helper_fstl_ST0(void)
4379{
4380 union {
4381 float64 f;
4382 uint64_t i;
4383 } u;
4384 u.f = floatx_to_float64(ST0, &env->fp_status);
4385 return u.i;
4386}
4387
4388#ifndef VBOX
4389int32_t helper_fist_ST0(void)
4390#else
4391RTCCINTREG helper_fist_ST0(void)
4392#endif
4393{
4394 int32_t val;
4395 val = floatx_to_int32(ST0, &env->fp_status);
4396 if (val != (int16_t)val)
4397 val = -32768;
4398 return val;
4399}
4400
4401#ifndef VBOX
4402int32_t helper_fistl_ST0(void)
4403#else
4404RTCCINTREG helper_fistl_ST0(void)
4405#endif
4406{
4407 int32_t val;
4408 val = floatx_to_int32(ST0, &env->fp_status);
4409 return val;
4410}
4411
4412int64_t helper_fistll_ST0(void)
4413{
4414 int64_t val;
4415 val = floatx_to_int64(ST0, &env->fp_status);
4416 return val;
4417}
4418
4419#ifndef VBOX
4420int32_t helper_fistt_ST0(void)
4421#else
4422RTCCINTREG helper_fistt_ST0(void)
4423#endif
4424{
4425 int32_t val;
4426 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
4427 if (val != (int16_t)val)
4428 val = -32768;
4429 return val;
4430}
4431
4432#ifndef VBOX
4433int32_t helper_fisttl_ST0(void)
4434#else
4435RTCCINTREG helper_fisttl_ST0(void)
4436#endif
4437{
4438 int32_t val;
4439 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
4440 return val;
4441}
4442
4443int64_t helper_fisttll_ST0(void)
4444{
4445 int64_t val;
4446 val = floatx_to_int64_round_to_zero(ST0, &env->fp_status);
4447 return val;
4448}
4449
4450void helper_fldt_ST0(target_ulong ptr)
4451{
4452 int new_fpstt;
4453 new_fpstt = (env->fpstt - 1) & 7;
4454 env->fpregs[new_fpstt].d = helper_fldt(ptr);
4455 env->fpstt = new_fpstt;
4456 env->fptags[new_fpstt] = 0; /* validate stack entry */
4457}
4458
4459void helper_fstt_ST0(target_ulong ptr)
4460{
4461 helper_fstt(ST0, ptr);
4462}
4463
4464void helper_fpush(void)
4465{
4466 fpush();
4467}
4468
4469void helper_fpop(void)
4470{
4471 fpop();
4472}
4473
4474void helper_fdecstp(void)
4475{
4476 env->fpstt = (env->fpstt - 1) & 7;
4477 env->fpus &= (~0x4700);
4478}
4479
4480void helper_fincstp(void)
4481{
4482 env->fpstt = (env->fpstt + 1) & 7;
4483 env->fpus &= (~0x4700);
4484}
4485
4486/* FPU move */
4487
4488void helper_ffree_STN(int st_index)
4489{
4490 env->fptags[(env->fpstt + st_index) & 7] = 1;
4491}
4492
4493void helper_fmov_ST0_FT0(void)
4494{
4495 ST0 = FT0;
4496}
4497
4498void helper_fmov_FT0_STN(int st_index)
4499{
4500 FT0 = ST(st_index);
4501}
4502
4503void helper_fmov_ST0_STN(int st_index)
4504{
4505 ST0 = ST(st_index);
4506}
4507
4508void helper_fmov_STN_ST0(int st_index)
4509{
4510 ST(st_index) = ST0;
4511}
4512
4513void helper_fxchg_ST0_STN(int st_index)
4514{
4515 CPU86_LDouble tmp;
4516 tmp = ST(st_index);
4517 ST(st_index) = ST0;
4518 ST0 = tmp;
4519}
4520
4521/* FPU operations */
4522
4523static const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500};
4524
4525void helper_fcom_ST0_FT0(void)
4526{
4527 int ret;
4528
4529 ret = floatx_compare(ST0, FT0, &env->fp_status);
4530 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1];
4531}
4532
4533void helper_fucom_ST0_FT0(void)
4534{
4535 int ret;
4536
4537 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
4538 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret+ 1];
4539}
4540
4541static const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};
4542
4543void helper_fcomi_ST0_FT0(void)
4544{
4545 int eflags;
4546 int ret;
4547
4548 ret = floatx_compare(ST0, FT0, &env->fp_status);
4549 eflags = helper_cc_compute_all(CC_OP);
4550 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
4551 CC_SRC = eflags;
4552}
4553
4554void helper_fucomi_ST0_FT0(void)
4555{
4556 int eflags;
4557 int ret;
4558
4559 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
4560 eflags = helper_cc_compute_all(CC_OP);
4561 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
4562 CC_SRC = eflags;
4563}
4564
4565void helper_fadd_ST0_FT0(void)
4566{
4567 ST0 += FT0;
4568}
4569
4570void helper_fmul_ST0_FT0(void)
4571{
4572 ST0 *= FT0;
4573}
4574
4575void helper_fsub_ST0_FT0(void)
4576{
4577 ST0 -= FT0;
4578}
4579
4580void helper_fsubr_ST0_FT0(void)
4581{
4582 ST0 = FT0 - ST0;
4583}
4584
4585void helper_fdiv_ST0_FT0(void)
4586{
4587 ST0 = helper_fdiv(ST0, FT0);
4588}
4589
4590void helper_fdivr_ST0_FT0(void)
4591{
4592 ST0 = helper_fdiv(FT0, ST0);
4593}
4594
4595/* fp operations between STN and ST0 */
4596
4597void helper_fadd_STN_ST0(int st_index)
4598{
4599 ST(st_index) += ST0;
4600}
4601
4602void helper_fmul_STN_ST0(int st_index)
4603{
4604 ST(st_index) *= ST0;
4605}
4606
4607void helper_fsub_STN_ST0(int st_index)
4608{
4609 ST(st_index) -= ST0;
4610}
4611
4612void helper_fsubr_STN_ST0(int st_index)
4613{
4614 CPU86_LDouble *p;
4615 p = &ST(st_index);
4616 *p = ST0 - *p;
4617}
4618
4619void helper_fdiv_STN_ST0(int st_index)
4620{
4621 CPU86_LDouble *p;
4622 p = &ST(st_index);
4623 *p = helper_fdiv(*p, ST0);
4624}
4625
4626void helper_fdivr_STN_ST0(int st_index)
4627{
4628 CPU86_LDouble *p;
4629 p = &ST(st_index);
4630 *p = helper_fdiv(ST0, *p);
4631}
4632
4633/* misc FPU operations */
4634void helper_fchs_ST0(void)
4635{
4636 ST0 = floatx_chs(ST0);
4637}
4638
4639void helper_fabs_ST0(void)
4640{
4641 ST0 = floatx_abs(ST0);
4642}
4643
4644void helper_fld1_ST0(void)
4645{
4646 ST0 = f15rk[1];
4647}
4648
4649void helper_fldl2t_ST0(void)
4650{
4651 ST0 = f15rk[6];
4652}
4653
4654void helper_fldl2e_ST0(void)
4655{
4656 ST0 = f15rk[5];
4657}
4658
4659void helper_fldpi_ST0(void)
4660{
4661 ST0 = f15rk[2];
4662}
4663
4664void helper_fldlg2_ST0(void)
4665{
4666 ST0 = f15rk[3];
4667}
4668
4669void helper_fldln2_ST0(void)
4670{
4671 ST0 = f15rk[4];
4672}
4673
4674void helper_fldz_ST0(void)
4675{
4676 ST0 = f15rk[0];
4677}
4678
4679void helper_fldz_FT0(void)
4680{
4681 FT0 = f15rk[0];
4682}
4683
4684#ifndef VBOX
4685uint32_t helper_fnstsw(void)
4686#else
4687RTCCUINTREG helper_fnstsw(void)
4688#endif
4689{
4690 return (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4691}
4692
4693#ifndef VBOX
4694uint32_t helper_fnstcw(void)
4695#else
4696RTCCUINTREG helper_fnstcw(void)
4697#endif
4698{
4699 return env->fpuc;
4700}
4701
4702static void update_fp_status(void)
4703{
4704 int rnd_type;
4705
4706 /* set rounding mode */
4707 switch(env->fpuc & RC_MASK) {
4708 default:
4709 case RC_NEAR:
4710 rnd_type = float_round_nearest_even;
4711 break;
4712 case RC_DOWN:
4713 rnd_type = float_round_down;
4714 break;
4715 case RC_UP:
4716 rnd_type = float_round_up;
4717 break;
4718 case RC_CHOP:
4719 rnd_type = float_round_to_zero;
4720 break;
4721 }
4722 set_float_rounding_mode(rnd_type, &env->fp_status);
4723#ifdef FLOATX80
4724 switch((env->fpuc >> 8) & 3) {
4725 case 0:
4726 rnd_type = 32;
4727 break;
4728 case 2:
4729 rnd_type = 64;
4730 break;
4731 case 3:
4732 default:
4733 rnd_type = 80;
4734 break;
4735 }
4736 set_floatx80_rounding_precision(rnd_type, &env->fp_status);
4737#endif
4738}
4739
4740void helper_fldcw(uint32_t val)
4741{
4742 env->fpuc = val;
4743 update_fp_status();
4744}
4745
4746void helper_fclex(void)
4747{
4748 env->fpus &= 0x7f00;
4749}
4750
4751void helper_fwait(void)
4752{
4753 if (env->fpus & FPUS_SE)
4754 fpu_raise_exception();
4755}
4756
4757void helper_fninit(void)
4758{
4759 env->fpus = 0;
4760 env->fpstt = 0;
4761 env->fpuc = 0x37f;
4762 env->fptags[0] = 1;
4763 env->fptags[1] = 1;
4764 env->fptags[2] = 1;
4765 env->fptags[3] = 1;
4766 env->fptags[4] = 1;
4767 env->fptags[5] = 1;
4768 env->fptags[6] = 1;
4769 env->fptags[7] = 1;
4770}
4771
4772/* BCD ops */
4773
4774void helper_fbld_ST0(target_ulong ptr)
4775{
4776 CPU86_LDouble tmp;
4777 uint64_t val;
4778 unsigned int v;
4779 int i;
4780
4781 val = 0;
4782 for(i = 8; i >= 0; i--) {
4783 v = ldub(ptr + i);
4784 val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
4785 }
4786 tmp = val;
4787 if (ldub(ptr + 9) & 0x80)
4788 tmp = -tmp;
4789 fpush();
4790 ST0 = tmp;
4791}
4792
4793void helper_fbst_ST0(target_ulong ptr)
4794{
4795 int v;
4796 target_ulong mem_ref, mem_end;
4797 int64_t val;
4798
4799 val = floatx_to_int64(ST0, &env->fp_status);
4800 mem_ref = ptr;
4801 mem_end = mem_ref + 9;
4802 if (val < 0) {
4803 stb(mem_end, 0x80);
4804 val = -val;
4805 } else {
4806 stb(mem_end, 0x00);
4807 }
4808 while (mem_ref < mem_end) {
4809 if (val == 0)
4810 break;
4811 v = val % 100;
4812 val = val / 100;
4813 v = ((v / 10) << 4) | (v % 10);
4814 stb(mem_ref++, v);
4815 }
4816 while (mem_ref < mem_end) {
4817 stb(mem_ref++, 0);
4818 }
4819}
4820
4821void helper_f2xm1(void)
4822{
4823 ST0 = pow(2.0,ST0) - 1.0;
4824}
4825
4826void helper_fyl2x(void)
4827{
4828 CPU86_LDouble fptemp;
4829
4830 fptemp = ST0;
4831 if (fptemp>0.0){
4832 fptemp = log(fptemp)/log(2.0); /* log2(ST) */
4833 ST1 *= fptemp;
4834 fpop();
4835 } else {
4836 env->fpus &= (~0x4700);
4837 env->fpus |= 0x400;
4838 }
4839}
4840
4841void helper_fptan(void)
4842{
4843 CPU86_LDouble fptemp;
4844
4845 fptemp = ST0;
4846 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4847 env->fpus |= 0x400;
4848 } else {
4849 ST0 = tan(fptemp);
4850 fpush();
4851 ST0 = 1.0;
4852 env->fpus &= (~0x400); /* C2 <-- 0 */
4853 /* the above code is for |arg| < 2**52 only */
4854 }
4855}
4856
4857void helper_fpatan(void)
4858{
4859 CPU86_LDouble fptemp, fpsrcop;
4860
4861 fpsrcop = ST1;
4862 fptemp = ST0;
4863 ST1 = atan2(fpsrcop,fptemp);
4864 fpop();
4865}
4866
4867void helper_fxtract(void)
4868{
4869 CPU86_LDoubleU temp;
4870 unsigned int expdif;
4871
4872 temp.d = ST0;
4873 expdif = EXPD(temp) - EXPBIAS;
4874 /*DP exponent bias*/
4875 ST0 = expdif;
4876 fpush();
4877 BIASEXPONENT(temp);
4878 ST0 = temp.d;
4879}
4880
4881void helper_fprem1(void)
4882{
4883 CPU86_LDouble dblq, fpsrcop, fptemp;
4884 CPU86_LDoubleU fpsrcop1, fptemp1;
4885 int expdif;
4886 signed long long int q;
4887
4888#ifndef VBOX /* Unfortunately, we cannot handle isinf/isnan easily in wrapper */
4889 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4890#else
4891 if ((ST0 != ST0) || (ST1 != ST1) || (ST1 == 0.0)) {
4892#endif
4893 ST0 = 0.0 / 0.0; /* NaN */
4894 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4895 return;
4896 }
4897
4898 fpsrcop = ST0;
4899 fptemp = ST1;
4900 fpsrcop1.d = fpsrcop;
4901 fptemp1.d = fptemp;
4902 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4903
4904 if (expdif < 0) {
4905 /* optimisation? taken from the AMD docs */
4906 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4907 /* ST0 is unchanged */
4908 return;
4909 }
4910
4911 if (expdif < 53) {
4912 dblq = fpsrcop / fptemp;
4913 /* round dblq towards nearest integer */
4914 dblq = rint(dblq);
4915 ST0 = fpsrcop - fptemp * dblq;
4916
4917 /* convert dblq to q by truncating towards zero */
4918 if (dblq < 0.0)
4919 q = (signed long long int)(-dblq);
4920 else
4921 q = (signed long long int)dblq;
4922
4923 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4924 /* (C0,C3,C1) <-- (q2,q1,q0) */
4925 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4926 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4927 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4928 } else {
4929 env->fpus |= 0x400; /* C2 <-- 1 */
4930 fptemp = pow(2.0, expdif - 50);
4931 fpsrcop = (ST0 / ST1) / fptemp;
4932 /* fpsrcop = integer obtained by chopping */
4933 fpsrcop = (fpsrcop < 0.0) ?
4934 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4935 ST0 -= (ST1 * fpsrcop * fptemp);
4936 }
4937}
4938
4939void helper_fprem(void)
4940{
4941 CPU86_LDouble dblq, fpsrcop, fptemp;
4942 CPU86_LDoubleU fpsrcop1, fptemp1;
4943 int expdif;
4944 signed long long int q;
4945
4946#ifndef VBOX /* Unfortunately, we cannot easily handle isinf/isnan in wrapper */
4947 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4948#else
4949 if ((ST0 != ST0) || (ST1 != ST1) || (ST1 == 0.0)) {
4950#endif
4951 ST0 = 0.0 / 0.0; /* NaN */
4952 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4953 return;
4954 }
4955
4956 fpsrcop = (CPU86_LDouble)ST0;
4957 fptemp = (CPU86_LDouble)ST1;
4958 fpsrcop1.d = fpsrcop;
4959 fptemp1.d = fptemp;
4960 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4961
4962 if (expdif < 0) {
4963 /* optimisation? taken from the AMD docs */
4964 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4965 /* ST0 is unchanged */
4966 return;
4967 }
4968
4969 if ( expdif < 53 ) {
4970 dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/;
4971 /* round dblq towards zero */
4972 dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
4973 ST0 = fpsrcop/*ST0*/ - fptemp * dblq;
4974
4975 /* convert dblq to q by truncating towards zero */
4976 if (dblq < 0.0)
4977 q = (signed long long int)(-dblq);
4978 else
4979 q = (signed long long int)dblq;
4980
4981 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4982 /* (C0,C3,C1) <-- (q2,q1,q0) */
4983 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4984 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4985 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4986 } else {
4987 int N = 32 + (expdif % 32); /* as per AMD docs */
4988 env->fpus |= 0x400; /* C2 <-- 1 */
4989 fptemp = pow(2.0, (double)(expdif - N));
4990 fpsrcop = (ST0 / ST1) / fptemp;
4991 /* fpsrcop = integer obtained by chopping */
4992 fpsrcop = (fpsrcop < 0.0) ?
4993 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4994 ST0 -= (ST1 * fpsrcop * fptemp);
4995 }
4996}
4997
4998void helper_fyl2xp1(void)
4999{
5000 CPU86_LDouble fptemp;
5001
5002 fptemp = ST0;
5003 if ((fptemp+1.0)>0.0) {
5004 fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
5005 ST1 *= fptemp;
5006 fpop();
5007 } else {
5008 env->fpus &= (~0x4700);
5009 env->fpus |= 0x400;
5010 }
5011}
5012
5013void helper_fsqrt(void)
5014{
5015 CPU86_LDouble fptemp;
5016
5017 fptemp = ST0;
5018 if (fptemp<0.0) {
5019 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
5020 env->fpus |= 0x400;
5021 }
5022 ST0 = sqrt(fptemp);
5023}
5024
5025void helper_fsincos(void)
5026{
5027 CPU86_LDouble fptemp;
5028
5029 fptemp = ST0;
5030 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
5031 env->fpus |= 0x400;
5032 } else {
5033 ST0 = sin(fptemp);
5034 fpush();
5035 ST0 = cos(fptemp);
5036 env->fpus &= (~0x400); /* C2 <-- 0 */
5037 /* the above code is for |arg| < 2**63 only */
5038 }
5039}
5040
5041void helper_frndint(void)
5042{
5043 ST0 = floatx_round_to_int(ST0, &env->fp_status);
5044}
5045
5046void helper_fscale(void)
5047{
5048 ST0 = ldexp (ST0, (int)(ST1));
5049}
5050
5051void helper_fsin(void)
5052{
5053 CPU86_LDouble fptemp;
5054
5055 fptemp = ST0;
5056 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
5057 env->fpus |= 0x400;
5058 } else {
5059 ST0 = sin(fptemp);
5060 env->fpus &= (~0x400); /* C2 <-- 0 */
5061 /* the above code is for |arg| < 2**53 only */
5062 }
5063}
5064
5065void helper_fcos(void)
5066{
5067 CPU86_LDouble fptemp;
5068
5069 fptemp = ST0;
5070 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
5071 env->fpus |= 0x400;
5072 } else {
5073 ST0 = cos(fptemp);
5074 env->fpus &= (~0x400); /* C2 <-- 0 */
5075 /* the above code is for |arg5 < 2**63 only */
5076 }
5077}
5078
5079void helper_fxam_ST0(void)
5080{
5081 CPU86_LDoubleU temp;
5082 int expdif;
5083
5084 temp.d = ST0;
5085
5086 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
5087 if (SIGND(temp))
5088 env->fpus |= 0x200; /* C1 <-- 1 */
5089
5090 /* XXX: test fptags too */
5091 expdif = EXPD(temp);
5092 if (expdif == MAXEXPD) {
5093#ifdef USE_X86LDOUBLE
5094 if (MANTD(temp) == 0x8000000000000000ULL)
5095#else
5096 if (MANTD(temp) == 0)
5097#endif
5098 env->fpus |= 0x500 /*Infinity*/;
5099 else
5100 env->fpus |= 0x100 /*NaN*/;
5101 } else if (expdif == 0) {
5102 if (MANTD(temp) == 0)
5103 env->fpus |= 0x4000 /*Zero*/;
5104 else
5105 env->fpus |= 0x4400 /*Denormal*/;
5106 } else {
5107 env->fpus |= 0x400;
5108 }
5109}
5110
5111void helper_fstenv(target_ulong ptr, int data32)
5112{
5113 int fpus, fptag, exp, i;
5114 uint64_t mant;
5115 CPU86_LDoubleU tmp;
5116
5117 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
5118 fptag = 0;
5119 for (i=7; i>=0; i--) {
5120 fptag <<= 2;
5121 if (env->fptags[i]) {
5122 fptag |= 3;
5123 } else {
5124 tmp.d = env->fpregs[i].d;
5125 exp = EXPD(tmp);
5126 mant = MANTD(tmp);
5127 if (exp == 0 && mant == 0) {
5128 /* zero */
5129 fptag |= 1;
5130 } else if (exp == 0 || exp == MAXEXPD
5131#ifdef USE_X86LDOUBLE
5132 || (mant & (1LL << 63)) == 0
5133#endif
5134 ) {
5135 /* NaNs, infinity, denormal */
5136 fptag |= 2;
5137 }
5138 }
5139 }
5140 if (data32) {
5141 /* 32 bit */
5142 stl(ptr, env->fpuc);
5143 stl(ptr + 4, fpus);
5144 stl(ptr + 8, fptag);
5145 stl(ptr + 12, 0); /* fpip */
5146 stl(ptr + 16, 0); /* fpcs */
5147 stl(ptr + 20, 0); /* fpoo */
5148 stl(ptr + 24, 0); /* fpos */
5149 } else {
5150 /* 16 bit */
5151 stw(ptr, env->fpuc);
5152 stw(ptr + 2, fpus);
5153 stw(ptr + 4, fptag);
5154 stw(ptr + 6, 0);
5155 stw(ptr + 8, 0);
5156 stw(ptr + 10, 0);
5157 stw(ptr + 12, 0);
5158 }
5159}
5160
5161void helper_fldenv(target_ulong ptr, int data32)
5162{
5163 int i, fpus, fptag;
5164
5165 if (data32) {
5166 env->fpuc = lduw(ptr);
5167 fpus = lduw(ptr + 4);
5168 fptag = lduw(ptr + 8);
5169 }
5170 else {
5171 env->fpuc = lduw(ptr);
5172 fpus = lduw(ptr + 2);
5173 fptag = lduw(ptr + 4);
5174 }
5175 env->fpstt = (fpus >> 11) & 7;
5176 env->fpus = fpus & ~0x3800;
5177 for(i = 0;i < 8; i++) {
5178 env->fptags[i] = ((fptag & 3) == 3);
5179 fptag >>= 2;
5180 }
5181}
5182
5183void helper_fsave(target_ulong ptr, int data32)
5184{
5185 CPU86_LDouble tmp;
5186 int i;
5187
5188 helper_fstenv(ptr, data32);
5189
5190 ptr += (14 << data32);
5191 for(i = 0;i < 8; i++) {
5192 tmp = ST(i);
5193 helper_fstt(tmp, ptr);
5194 ptr += 10;
5195 }
5196
5197 /* fninit */
5198 env->fpus = 0;
5199 env->fpstt = 0;
5200 env->fpuc = 0x37f;
5201 env->fptags[0] = 1;
5202 env->fptags[1] = 1;
5203 env->fptags[2] = 1;
5204 env->fptags[3] = 1;
5205 env->fptags[4] = 1;
5206 env->fptags[5] = 1;
5207 env->fptags[6] = 1;
5208 env->fptags[7] = 1;
5209}
5210
5211void helper_frstor(target_ulong ptr, int data32)
5212{
5213 CPU86_LDouble tmp;
5214 int i;
5215
5216 helper_fldenv(ptr, data32);
5217 ptr += (14 << data32);
5218
5219 for(i = 0;i < 8; i++) {
5220 tmp = helper_fldt(ptr);
5221 ST(i) = tmp;
5222 ptr += 10;
5223 }
5224}
5225
5226void helper_fxsave(target_ulong ptr, int data64)
5227{
5228 int fpus, fptag, i, nb_xmm_regs;
5229 CPU86_LDouble tmp;
5230 target_ulong addr;
5231
5232 /* The operand must be 16 byte aligned */
5233 if (ptr & 0xf) {
5234 raise_exception(EXCP0D_GPF);
5235 }
5236
5237 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
5238 fptag = 0;
5239 for(i = 0; i < 8; i++) {
5240 fptag |= (env->fptags[i] << i);
5241 }
5242 stw(ptr, env->fpuc);
5243 stw(ptr + 2, fpus);
5244 stw(ptr + 4, fptag ^ 0xff);
5245#ifdef TARGET_X86_64
5246 if (data64) {
5247 stq(ptr + 0x08, 0); /* rip */
5248 stq(ptr + 0x10, 0); /* rdp */
5249 } else
5250#endif
5251 {
5252 stl(ptr + 0x08, 0); /* eip */
5253 stl(ptr + 0x0c, 0); /* sel */
5254 stl(ptr + 0x10, 0); /* dp */
5255 stl(ptr + 0x14, 0); /* sel */
5256 }
5257
5258 addr = ptr + 0x20;
5259 for(i = 0;i < 8; i++) {
5260 tmp = ST(i);
5261 helper_fstt(tmp, addr);
5262 addr += 16;
5263 }
5264
5265 if (env->cr[4] & CR4_OSFXSR_MASK) {
5266 /* XXX: finish it */
5267 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
5268 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
5269 if (env->hflags & HF_CS64_MASK)
5270 nb_xmm_regs = 16;
5271 else
5272 nb_xmm_regs = 8;
5273 addr = ptr + 0xa0;
5274 /* Fast FXSAVE leaves out the XMM registers */
5275 if (!(env->efer & MSR_EFER_FFXSR)
5276 || (env->hflags & HF_CPL_MASK)
5277 || !(env->hflags & HF_LMA_MASK)) {
5278 for(i = 0; i < nb_xmm_regs; i++) {
5279 stq(addr, env->xmm_regs[i].XMM_Q(0));
5280 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
5281 addr += 16;
5282 }
5283 }
5284 }
5285}
5286
5287void helper_fxrstor(target_ulong ptr, int data64)
5288{
5289 int i, fpus, fptag, nb_xmm_regs;
5290 CPU86_LDouble tmp;
5291 target_ulong addr;
5292
5293 /* The operand must be 16 byte aligned */
5294 if (ptr & 0xf) {
5295 raise_exception(EXCP0D_GPF);
5296 }
5297
5298 env->fpuc = lduw(ptr);
5299 fpus = lduw(ptr + 2);
5300 fptag = lduw(ptr + 4);
5301 env->fpstt = (fpus >> 11) & 7;
5302 env->fpus = fpus & ~0x3800;
5303 fptag ^= 0xff;
5304 for(i = 0;i < 8; i++) {
5305 env->fptags[i] = ((fptag >> i) & 1);
5306 }
5307
5308 addr = ptr + 0x20;
5309 for(i = 0;i < 8; i++) {
5310 tmp = helper_fldt(addr);
5311 ST(i) = tmp;
5312 addr += 16;
5313 }
5314
5315 if (env->cr[4] & CR4_OSFXSR_MASK) {
5316 /* XXX: finish it */
5317 env->mxcsr = ldl(ptr + 0x18);
5318 //ldl(ptr + 0x1c);
5319 if (env->hflags & HF_CS64_MASK)
5320 nb_xmm_regs = 16;
5321 else
5322 nb_xmm_regs = 8;
5323 addr = ptr + 0xa0;
5324 /* Fast FXRESTORE leaves out the XMM registers */
5325 if (!(env->efer & MSR_EFER_FFXSR)
5326 || (env->hflags & HF_CPL_MASK)
5327 || !(env->hflags & HF_LMA_MASK)) {
5328 for(i = 0; i < nb_xmm_regs; i++) {
5329#if !defined(VBOX) || __GNUC__ < 4
5330 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
5331 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
5332#else /* VBOX + __GNUC__ >= 4: gcc 4.x compiler bug - it runs out of registers for the 64-bit value. */
5333# if 1
5334 env->xmm_regs[i].XMM_L(0) = ldl(addr);
5335 env->xmm_regs[i].XMM_L(1) = ldl(addr + 4);
5336 env->xmm_regs[i].XMM_L(2) = ldl(addr + 8);
5337 env->xmm_regs[i].XMM_L(3) = ldl(addr + 12);
5338# else
5339 /* this works fine on Mac OS X, gcc 4.0.1 */
5340 uint64_t u64 = ldq(addr);
5341 env->xmm_regs[i].XMM_Q(0);
5342 u64 = ldq(addr + 4);
5343 env->xmm_regs[i].XMM_Q(1) = u64;
5344# endif
5345#endif
5346 addr += 16;
5347 }
5348 }
5349 }
5350}
5351
5352#ifndef USE_X86LDOUBLE
5353
5354void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
5355{
5356 CPU86_LDoubleU temp;
5357 int e;
5358
5359 temp.d = f;
5360 /* mantissa */
5361 *pmant = (MANTD(temp) << 11) | (1LL << 63);
5362 /* exponent + sign */
5363 e = EXPD(temp) - EXPBIAS + 16383;
5364 e |= SIGND(temp) >> 16;
5365 *pexp = e;
5366}
5367
5368CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
5369{
5370 CPU86_LDoubleU temp;
5371 int e;
5372 uint64_t ll;
5373
5374 /* XXX: handle overflow ? */
5375 e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
5376 e |= (upper >> 4) & 0x800; /* sign */
5377 ll = (mant >> 11) & ((1LL << 52) - 1);
5378#ifdef __arm__
5379 temp.l.upper = (e << 20) | (ll >> 32);
5380 temp.l.lower = ll;
5381#else
5382 temp.ll = ll | ((uint64_t)e << 52);
5383#endif
5384 return temp.d;
5385}
5386
5387#else
5388
5389void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
5390{
5391 CPU86_LDoubleU temp;
5392
5393 temp.d = f;
5394 *pmant = temp.l.lower;
5395 *pexp = temp.l.upper;
5396}
5397
5398CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
5399{
5400 CPU86_LDoubleU temp;
5401
5402 temp.l.upper = upper;
5403 temp.l.lower = mant;
5404 return temp.d;
5405}
5406#endif
5407
5408#ifdef TARGET_X86_64
5409
5410//#define DEBUG_MULDIV
5411
5412static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
5413{
5414 *plow += a;
5415 /* carry test */
5416 if (*plow < a)
5417 (*phigh)++;
5418 *phigh += b;
5419}
5420
5421static void neg128(uint64_t *plow, uint64_t *phigh)
5422{
5423 *plow = ~ *plow;
5424 *phigh = ~ *phigh;
5425 add128(plow, phigh, 1, 0);
5426}
5427
5428/* return TRUE if overflow */
5429static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
5430{
5431 uint64_t q, r, a1, a0;
5432 int i, qb, ab;
5433
5434 a0 = *plow;
5435 a1 = *phigh;
5436 if (a1 == 0) {
5437 q = a0 / b;
5438 r = a0 % b;
5439 *plow = q;
5440 *phigh = r;
5441 } else {
5442 if (a1 >= b)
5443 return 1;
5444 /* XXX: use a better algorithm */
5445 for(i = 0; i < 64; i++) {
5446 ab = a1 >> 63;
5447 a1 = (a1 << 1) | (a0 >> 63);
5448 if (ab || a1 >= b) {
5449 a1 -= b;
5450 qb = 1;
5451 } else {
5452 qb = 0;
5453 }
5454 a0 = (a0 << 1) | qb;
5455 }
5456#if defined(DEBUG_MULDIV)
5457 printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
5458 *phigh, *plow, b, a0, a1);
5459#endif
5460 *plow = a0;
5461 *phigh = a1;
5462 }
5463 return 0;
5464}
5465
5466/* return TRUE if overflow */
5467static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
5468{
5469 int sa, sb;
5470 sa = ((int64_t)*phigh < 0);
5471 if (sa)
5472 neg128(plow, phigh);
5473 sb = (b < 0);
5474 if (sb)
5475 b = -b;
5476 if (div64(plow, phigh, b) != 0)
5477 return 1;
5478 if (sa ^ sb) {
5479 if (*plow > (1ULL << 63))
5480 return 1;
5481 *plow = - *plow;
5482 } else {
5483 if (*plow >= (1ULL << 63))
5484 return 1;
5485 }
5486 if (sa)
5487 *phigh = - *phigh;
5488 return 0;
5489}
5490
5491void helper_mulq_EAX_T0(target_ulong t0)
5492{
5493 uint64_t r0, r1;
5494
5495 mulu64(&r0, &r1, EAX, t0);
5496 EAX = r0;
5497 EDX = r1;
5498 CC_DST = r0;
5499 CC_SRC = r1;
5500}
5501
5502void helper_imulq_EAX_T0(target_ulong t0)
5503{
5504 uint64_t r0, r1;
5505
5506 muls64(&r0, &r1, EAX, t0);
5507 EAX = r0;
5508 EDX = r1;
5509 CC_DST = r0;
5510 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
5511}
5512
5513target_ulong helper_imulq_T0_T1(target_ulong t0, target_ulong t1)
5514{
5515 uint64_t r0, r1;
5516
5517 muls64(&r0, &r1, t0, t1);
5518 CC_DST = r0;
5519 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
5520 return r0;
5521}
5522
5523void helper_divq_EAX(target_ulong t0)
5524{
5525 uint64_t r0, r1;
5526 if (t0 == 0) {
5527 raise_exception(EXCP00_DIVZ);
5528 }
5529 r0 = EAX;
5530 r1 = EDX;
5531 if (div64(&r0, &r1, t0))
5532 raise_exception(EXCP00_DIVZ);
5533 EAX = r0;
5534 EDX = r1;
5535}
5536
5537void helper_idivq_EAX(target_ulong t0)
5538{
5539 uint64_t r0, r1;
5540 if (t0 == 0) {
5541 raise_exception(EXCP00_DIVZ);
5542 }
5543 r0 = EAX;
5544 r1 = EDX;
5545 if (idiv64(&r0, &r1, t0))
5546 raise_exception(EXCP00_DIVZ);
5547 EAX = r0;
5548 EDX = r1;
5549}
5550#endif
5551
5552static void do_hlt(void)
5553{
5554 env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
5555 env->halted = 1;
5556 env->exception_index = EXCP_HLT;
5557 cpu_loop_exit();
5558}
5559
5560void helper_hlt(int next_eip_addend)
5561{
5562 helper_svm_check_intercept_param(SVM_EXIT_HLT, 0);
5563 EIP += next_eip_addend;
5564
5565 do_hlt();
5566}
5567
5568void helper_monitor(target_ulong ptr)
5569{
5570#ifdef VBOX
5571 if ((uint32_t)ECX > 1)
5572 raise_exception(EXCP0D_GPF);
5573#else /* !VBOX */
5574 if ((uint32_t)ECX != 0)
5575 raise_exception(EXCP0D_GPF);
5576#endif /* !VBOX */
5577 /* XXX: store address ? */
5578 helper_svm_check_intercept_param(SVM_EXIT_MONITOR, 0);
5579}
5580
5581void helper_mwait(int next_eip_addend)
5582{
5583 if ((uint32_t)ECX != 0)
5584 raise_exception(EXCP0D_GPF);
5585#ifdef VBOX
5586 helper_hlt(next_eip_addend);
5587#else /* !VBOX */
5588 helper_svm_check_intercept_param(SVM_EXIT_MWAIT, 0);
5589 EIP += next_eip_addend;
5590
5591 /* XXX: not complete but not completely erroneous */
5592 if (env->cpu_index != 0 || env->next_cpu != NULL) {
5593 /* more than one CPU: do not sleep because another CPU may
5594 wake this one */
5595 } else {
5596 do_hlt();
5597 }
5598#endif /* !VBOX */
5599}
5600
5601void helper_debug(void)
5602{
5603 env->exception_index = EXCP_DEBUG;
5604 cpu_loop_exit();
5605}
5606
5607void helper_reset_rf(void)
5608{
5609 env->eflags &= ~RF_MASK;
5610}
5611
5612void helper_raise_interrupt(int intno, int next_eip_addend)
5613{
5614 raise_interrupt(intno, 1, 0, next_eip_addend);
5615}
5616
5617void helper_raise_exception(int exception_index)
5618{
5619 raise_exception(exception_index);
5620}
5621
5622void helper_cli(void)
5623{
5624 env->eflags &= ~IF_MASK;
5625}
5626
5627void helper_sti(void)
5628{
5629 env->eflags |= IF_MASK;
5630}
5631
5632#ifdef VBOX
5633void helper_cli_vme(void)
5634{
5635 env->eflags &= ~VIF_MASK;
5636}
5637
5638void helper_sti_vme(void)
5639{
5640 /* First check, then change eflags according to the AMD manual */
5641 if (env->eflags & VIP_MASK) {
5642 raise_exception(EXCP0D_GPF);
5643 }
5644 env->eflags |= VIF_MASK;
5645}
5646#endif /* VBOX */
5647
5648#if 0
5649/* vm86plus instructions */
5650void helper_cli_vm(void)
5651{
5652 env->eflags &= ~VIF_MASK;
5653}
5654
5655void helper_sti_vm(void)
5656{
5657 env->eflags |= VIF_MASK;
5658 if (env->eflags & VIP_MASK) {
5659 raise_exception(EXCP0D_GPF);
5660 }
5661}
5662#endif
5663
5664void helper_set_inhibit_irq(void)
5665{
5666 env->hflags |= HF_INHIBIT_IRQ_MASK;
5667}
5668
5669void helper_reset_inhibit_irq(void)
5670{
5671 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5672}
5673
5674void helper_boundw(target_ulong a0, int v)
5675{
5676 int low, high;
5677 low = ldsw(a0);
5678 high = ldsw(a0 + 2);
5679 v = (int16_t)v;
5680 if (v < low || v > high) {
5681 raise_exception(EXCP05_BOUND);
5682 }
5683}
5684
5685void helper_boundl(target_ulong a0, int v)
5686{
5687 int low, high;
5688 low = ldl(a0);
5689 high = ldl(a0 + 4);
5690 if (v < low || v > high) {
5691 raise_exception(EXCP05_BOUND);
5692 }
5693}
5694
5695static float approx_rsqrt(float a)
5696{
5697 return 1.0 / sqrt(a);
5698}
5699
5700static float approx_rcp(float a)
5701{
5702 return 1.0 / a;
5703}
5704
5705#if !defined(CONFIG_USER_ONLY)
5706
5707#define MMUSUFFIX _mmu
5708
5709#define SHIFT 0
5710#include "softmmu_template.h"
5711
5712#define SHIFT 1
5713#include "softmmu_template.h"
5714
5715#define SHIFT 2
5716#include "softmmu_template.h"
5717
5718#define SHIFT 3
5719#include "softmmu_template.h"
5720
5721#endif
5722
5723#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
5724/* This code assumes real physical address always fit into host CPU reg,
5725 which is wrong in general, but true for our current use cases. */
5726RTCCUINTREG REGPARM __ldb_vbox_phys(RTCCUINTREG addr)
5727{
5728 return remR3PhysReadS8(addr);
5729}
5730RTCCUINTREG REGPARM __ldub_vbox_phys(RTCCUINTREG addr)
5731{
5732 return remR3PhysReadU8(addr);
5733}
5734void REGPARM __stb_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5735{
5736 remR3PhysWriteU8(addr, val);
5737}
5738RTCCUINTREG REGPARM __ldw_vbox_phys(RTCCUINTREG addr)
5739{
5740 return remR3PhysReadS16(addr);
5741}
5742RTCCUINTREG REGPARM __lduw_vbox_phys(RTCCUINTREG addr)
5743{
5744 return remR3PhysReadU16(addr);
5745}
5746void REGPARM __stw_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5747{
5748 remR3PhysWriteU16(addr, val);
5749}
5750RTCCUINTREG REGPARM __ldl_vbox_phys(RTCCUINTREG addr)
5751{
5752 return remR3PhysReadS32(addr);
5753}
5754RTCCUINTREG REGPARM __ldul_vbox_phys(RTCCUINTREG addr)
5755{
5756 return remR3PhysReadU32(addr);
5757}
5758void REGPARM __stl_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5759{
5760 remR3PhysWriteU32(addr, val);
5761}
5762uint64_t REGPARM __ldq_vbox_phys(RTCCUINTREG addr)
5763{
5764 return remR3PhysReadU64(addr);
5765}
5766void REGPARM __stq_vbox_phys(RTCCUINTREG addr, uint64_t val)
5767{
5768 remR3PhysWriteU64(addr, val);
5769}
5770#endif /* VBOX */
5771
5772#if !defined(CONFIG_USER_ONLY)
5773/* try to fill the TLB and return an exception if error. If retaddr is
5774 NULL, it means that the function was called in C code (i.e. not
5775 from generated code or from helper.c) */
5776/* XXX: fix it to restore all registers */
5777void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
5778{
5779 TranslationBlock *tb;
5780 int ret;
5781 uintptr_t pc;
5782 CPUX86State *saved_env;
5783
5784 /* XXX: hack to restore env in all cases, even if not called from
5785 generated code */
5786 saved_env = env;
5787 env = cpu_single_env;
5788
5789 ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
5790 if (ret) {
5791 if (retaddr) {
5792 /* now we have a real cpu fault */
5793 pc = (uintptr_t)retaddr;
5794 tb = tb_find_pc(pc);
5795 if (tb) {
5796 /* the PC is inside the translated code. It means that we have
5797 a virtual CPU fault */
5798 cpu_restore_state(tb, env, pc, NULL);
5799 }
5800 }
5801 raise_exception_err(env->exception_index, env->error_code);
5802 }
5803 env = saved_env;
5804}
5805#endif
5806
5807#ifdef VBOX
5808
5809/**
5810 * Correctly computes the eflags.
5811 * @returns eflags.
5812 * @param env1 CPU environment.
5813 */
5814uint32_t raw_compute_eflags(CPUX86State *env1)
5815{
5816 CPUX86State *savedenv = env;
5817 uint32_t efl;
5818 env = env1;
5819 efl = compute_eflags();
5820 env = savedenv;
5821 return efl;
5822}
5823
5824/**
5825 * Reads byte from virtual address in guest memory area.
5826 * XXX: is it working for any addresses? swapped out pages?
5827 * @returns read data byte.
5828 * @param env1 CPU environment.
5829 * @param pvAddr GC Virtual address.
5830 */
5831uint8_t read_byte(CPUX86State *env1, target_ulong addr)
5832{
5833 CPUX86State *savedenv = env;
5834 uint8_t u8;
5835 env = env1;
5836 u8 = ldub_kernel(addr);
5837 env = savedenv;
5838 return u8;
5839}
5840
5841/**
5842 * Reads byte from virtual address in guest memory area.
5843 * XXX: is it working for any addresses? swapped out pages?
5844 * @returns read data byte.
5845 * @param env1 CPU environment.
5846 * @param pvAddr GC Virtual address.
5847 */
5848uint16_t read_word(CPUX86State *env1, target_ulong addr)
5849{
5850 CPUX86State *savedenv = env;
5851 uint16_t u16;
5852 env = env1;
5853 u16 = lduw_kernel(addr);
5854 env = savedenv;
5855 return u16;
5856}
5857
5858/**
5859 * Reads byte from virtual address in guest memory area.
5860 * XXX: is it working for any addresses? swapped out pages?
5861 * @returns read data byte.
5862 * @param env1 CPU environment.
5863 * @param pvAddr GC Virtual address.
5864 */
5865uint32_t read_dword(CPUX86State *env1, target_ulong addr)
5866{
5867 CPUX86State *savedenv = env;
5868 uint32_t u32;
5869 env = env1;
5870 u32 = ldl_kernel(addr);
5871 env = savedenv;
5872 return u32;
5873}
5874
5875/**
5876 * Writes byte to virtual address in guest memory area.
5877 * XXX: is it working for any addresses? swapped out pages?
5878 * @returns read data byte.
5879 * @param env1 CPU environment.
5880 * @param pvAddr GC Virtual address.
5881 * @param val byte value
5882 */
5883void write_byte(CPUX86State *env1, target_ulong addr, uint8_t val)
5884{
5885 CPUX86State *savedenv = env;
5886 env = env1;
5887 stb(addr, val);
5888 env = savedenv;
5889}
5890
5891void write_word(CPUX86State *env1, target_ulong addr, uint16_t val)
5892{
5893 CPUX86State *savedenv = env;
5894 env = env1;
5895 stw(addr, val);
5896 env = savedenv;
5897}
5898
5899void write_dword(CPUX86State *env1, target_ulong addr, uint32_t val)
5900{
5901 CPUX86State *savedenv = env;
5902 env = env1;
5903 stl(addr, val);
5904 env = savedenv;
5905}
5906
5907/**
5908 * Correctly loads selector into segment register with updating internal
5909 * qemu data/caches.
5910 * @param env1 CPU environment.
5911 * @param seg_reg Segment register.
5912 * @param selector Selector to load.
5913 */
5914void sync_seg(CPUX86State *env1, int seg_reg, int selector)
5915{
5916 CPUX86State *savedenv = env;
5917#ifdef FORCE_SEGMENT_SYNC
5918 jmp_buf old_buf;
5919#endif
5920
5921 env = env1;
5922
5923 if ( env->eflags & X86_EFL_VM
5924 || !(env->cr[0] & X86_CR0_PE))
5925 {
5926 load_seg_vm(seg_reg, selector);
5927
5928 env = savedenv;
5929
5930 /* Successful sync. */
5931 Assert(env1->segs[seg_reg].newselector == 0);
5932 }
5933 else
5934 {
5935 /* For some reasons, it works even w/o save/restore of the jump buffer, so as code is
5936 time critical - let's not do that */
5937#ifdef FORCE_SEGMENT_SYNC
5938 memcpy(&old_buf, &env1->jmp_env, sizeof(old_buf));
5939#endif
5940 if (setjmp(env1->jmp_env) == 0)
5941 {
5942 if (seg_reg == R_CS)
5943 {
5944 uint32_t e1, e2;
5945 e1 = e2 = 0;
5946 load_segment(&e1, &e2, selector);
5947 cpu_x86_load_seg_cache(env, R_CS, selector,
5948 get_seg_base(e1, e2),
5949 get_seg_limit(e1, e2),
5950 e2);
5951 }
5952 else
5953 helper_load_seg(seg_reg, selector);
5954 /* We used to use tss_load_seg(seg_reg, selector); which, for some reasons ignored
5955 loading 0 selectors, what, in order, lead to subtle problems like #3588 */
5956
5957 env = savedenv;
5958
5959 /* Successful sync. */
5960 Assert(env1->segs[seg_reg].newselector == 0);
5961 }
5962 else
5963 {
5964 env = savedenv;
5965
5966 /* Postpone sync until the guest uses the selector. */
5967 env1->segs[seg_reg].selector = selector; /* hidden values are now incorrect, but will be resynced when this register is accessed. */
5968 env1->segs[seg_reg].newselector = selector;
5969 Log(("sync_seg: out of sync seg_reg=%d selector=%#x\n", seg_reg, selector));
5970 env1->exception_index = -1;
5971 env1->error_code = 0;
5972 env1->old_exception = -1;
5973 }
5974#ifdef FORCE_SEGMENT_SYNC
5975 memcpy(&env1->jmp_env, &old_buf, sizeof(old_buf));
5976#endif
5977 }
5978
5979}
5980
5981DECLINLINE(void) tb_reset_jump(TranslationBlock *tb, int n)
5982{
5983 tb_set_jmp_target(tb, n, (uintptr_t)(tb->tc_ptr + tb->tb_next_offset[n]));
5984}
5985
5986
5987int emulate_single_instr(CPUX86State *env1)
5988{
5989 TranslationBlock *tb;
5990 TranslationBlock *current;
5991 int flags;
5992 uint8_t *tc_ptr;
5993 target_ulong old_eip;
5994
5995 /* ensures env is loaded! */
5996 CPUX86State *savedenv = env;
5997 env = env1;
5998
5999 RAWEx_ProfileStart(env, STATS_EMULATE_SINGLE_INSTR);
6000
6001 current = env->current_tb;
6002 env->current_tb = NULL;
6003 flags = env->hflags | (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
6004
6005 /*
6006 * Translate only one instruction.
6007 */
6008 ASMAtomicOrU32(&env->state, CPU_EMULATE_SINGLE_INSTR);
6009 tb = tb_gen_code(env, env->eip + env->segs[R_CS].base,
6010 env->segs[R_CS].base, flags, 0);
6011
6012 ASMAtomicAndU32(&env->state, ~CPU_EMULATE_SINGLE_INSTR);
6013
6014
6015 /* tb_link_phys: */
6016 tb->jmp_first = (TranslationBlock *)((intptr_t)tb | 2);
6017 tb->jmp_next[0] = NULL;
6018 tb->jmp_next[1] = NULL;
6019 Assert(tb->jmp_next[0] == NULL);
6020 Assert(tb->jmp_next[1] == NULL);
6021 if (tb->tb_next_offset[0] != 0xffff)
6022 tb_reset_jump(tb, 0);
6023 if (tb->tb_next_offset[1] != 0xffff)
6024 tb_reset_jump(tb, 1);
6025
6026 /*
6027 * Execute it using emulation
6028 */
6029 old_eip = env->eip;
6030 env->current_tb = tb;
6031
6032 /*
6033 * eip remains the same for repeated instructions; no idea why qemu doesn't do a jump inside the generated code
6034 * perhaps not a very safe hack
6035 */
6036 while (old_eip == env->eip)
6037 {
6038 tc_ptr = tb->tc_ptr;
6039
6040#if defined(VBOX) && defined(GCC_WITH_BUGGY_REGPARM)
6041 int fake_ret;
6042 tcg_qemu_tb_exec(tc_ptr, fake_ret);
6043#else
6044 tcg_qemu_tb_exec(tc_ptr);
6045#endif
6046
6047 /*
6048 * Exit once we detect an external interrupt and interrupts are enabled
6049 */
6050 if ( (env->interrupt_request & (CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_TIMER))
6051 || ( (env->eflags & IF_MASK)
6052 && !(env->hflags & HF_INHIBIT_IRQ_MASK)
6053 && (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD) )
6054 )
6055 {
6056 break;
6057 }
6058 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_FLUSH_TLB) {
6059 tlb_flush(env, true);
6060 }
6061 }
6062 env->current_tb = current;
6063
6064 tb_phys_invalidate(tb, -1);
6065 tb_free(tb);
6066/*
6067 Assert(tb->tb_next_offset[0] == 0xffff);
6068 Assert(tb->tb_next_offset[1] == 0xffff);
6069 Assert(tb->tb_next[0] == 0xffff);
6070 Assert(tb->tb_next[1] == 0xffff);
6071 Assert(tb->jmp_next[0] == NULL);
6072 Assert(tb->jmp_next[1] == NULL);
6073 Assert(tb->jmp_first == NULL); */
6074
6075 RAWEx_ProfileStop(env, STATS_EMULATE_SINGLE_INSTR);
6076
6077 /*
6078 * Execute the next instruction when we encounter instruction fusing.
6079 */
6080 if (env->hflags & HF_INHIBIT_IRQ_MASK)
6081 {
6082 Log(("REM: Emulating next instruction due to instruction fusing (HF_INHIBIT_IRQ_MASK) at %RGv\n", env->eip));
6083 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
6084 emulate_single_instr(env);
6085 }
6086
6087 env = savedenv;
6088 return 0;
6089}
6090
6091/**
6092 * Correctly loads a new ldtr selector.
6093 *
6094 * @param env1 CPU environment.
6095 * @param selector Selector to load.
6096 */
6097void sync_ldtr(CPUX86State *env1, int selector)
6098{
6099 CPUX86State *saved_env = env;
6100 if (setjmp(env1->jmp_env) == 0)
6101 {
6102 env = env1;
6103 helper_lldt(selector);
6104 env = saved_env;
6105 }
6106 else
6107 {
6108 env = saved_env;
6109#ifdef VBOX_STRICT
6110 cpu_abort(env1, "sync_ldtr: selector=%#x\n", selector);
6111#endif
6112 }
6113}
6114
6115int get_ss_esp_from_tss_raw(CPUX86State *env1, uint32_t *ss_ptr,
6116 uint32_t *esp_ptr, int dpl)
6117{
6118 int type, index, shift;
6119
6120 CPUX86State *savedenv = env;
6121 env = env1;
6122
6123 if (!(env->tr.flags & DESC_P_MASK))
6124 cpu_abort(env, "invalid tss");
6125 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
6126 if ((type & 7) != 3)
6127 cpu_abort(env, "invalid tss type %d", type);
6128 shift = type >> 3;
6129 index = (dpl * 4 + 2) << shift;
6130 if (index + (4 << shift) - 1 > env->tr.limit)
6131 {
6132 env = savedenv;
6133 return 0;
6134 }
6135 //raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
6136
6137 if (shift == 0) {
6138 *esp_ptr = lduw_kernel(env->tr.base + index);
6139 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
6140 } else {
6141 *esp_ptr = ldl_kernel(env->tr.base + index);
6142 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
6143 }
6144
6145 env = savedenv;
6146 return 1;
6147}
6148
6149//*****************************************************************************
6150// Needs to be at the bottom of the file (overriding macros)
6151
6152static inline CPU86_LDouble helper_fldt_raw(uint8_t *ptr)
6153{
6154#ifdef USE_X86LDOUBLE
6155 CPU86_LDoubleU tmp;
6156 tmp.l.lower = *(uint64_t const *)ptr;
6157 tmp.l.upper = *(uint16_t const *)(ptr + 8);
6158 return tmp.d;
6159#else
6160# error "Busted FPU saving/restoring!"
6161 return *(CPU86_LDouble *)ptr;
6162#endif
6163}
6164
6165static inline void helper_fstt_raw(CPU86_LDouble f, uint8_t *ptr)
6166{
6167#ifdef USE_X86LDOUBLE
6168 CPU86_LDoubleU tmp;
6169 tmp.d = f;
6170 *(uint64_t *)(ptr + 0) = tmp.l.lower;
6171 *(uint16_t *)(ptr + 8) = tmp.l.upper;
6172 *(uint16_t *)(ptr + 10) = 0;
6173 *(uint32_t *)(ptr + 12) = 0;
6174 AssertCompile(sizeof(long double) > 8);
6175#else
6176# error "Busted FPU saving/restoring!"
6177 *(CPU86_LDouble *)ptr = f;
6178#endif
6179}
6180
6181#undef stw
6182#undef stl
6183#undef stq
6184#define stw(a,b) *(uint16_t *)(a) = (uint16_t)(b)
6185#define stl(a,b) *(uint32_t *)(a) = (uint32_t)(b)
6186#define stq(a,b) *(uint64_t *)(a) = (uint64_t)(b)
6187
6188//*****************************************************************************
6189void restore_raw_fp_state(CPUX86State *env, uint8_t *ptr)
6190{
6191 int fpus, fptag, i, nb_xmm_regs;
6192 CPU86_LDouble tmp;
6193 uint8_t *addr;
6194 int data64 = !!(env->hflags & HF_LMA_MASK);
6195
6196 if (env->cpuid_features & CPUID_FXSR)
6197 {
6198 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
6199 fptag = 0;
6200 for(i = 0; i < 8; i++) {
6201 fptag |= (env->fptags[i] << i);
6202 }
6203 stw(ptr, env->fpuc);
6204 stw(ptr + 2, fpus);
6205 stw(ptr + 4, fptag ^ 0xff);
6206
6207 addr = ptr + 0x20;
6208 for(i = 0;i < 8; i++) {
6209 tmp = ST(i);
6210 helper_fstt_raw(tmp, addr);
6211 addr += 16;
6212 }
6213
6214 if (env->cr[4] & CR4_OSFXSR_MASK) {
6215 /* XXX: finish it */
6216 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
6217 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
6218 nb_xmm_regs = 8 << data64;
6219 addr = ptr + 0xa0;
6220 for(i = 0; i < nb_xmm_regs; i++) {
6221#if __GNUC__ < 4
6222 stq(addr, env->xmm_regs[i].XMM_Q(0));
6223 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
6224#else /* VBOX + __GNUC__ >= 4: gcc 4.x compiler bug - it runs out of registers for the 64-bit value. */
6225 stl(addr, env->xmm_regs[i].XMM_L(0));
6226 stl(addr + 4, env->xmm_regs[i].XMM_L(1));
6227 stl(addr + 8, env->xmm_regs[i].XMM_L(2));
6228 stl(addr + 12, env->xmm_regs[i].XMM_L(3));
6229#endif
6230 addr += 16;
6231 }
6232 }
6233 }
6234 else
6235 {
6236 PX86FPUSTATE fp = (PX86FPUSTATE)ptr;
6237 int fptag;
6238
6239 fp->FCW = env->fpuc;
6240 fp->FSW = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
6241 fptag = 0;
6242 for (i=7; i>=0; i--) {
6243 fptag <<= 2;
6244 if (env->fptags[i]) {
6245 fptag |= 3;
6246 } else {
6247 /* the FPU automatically computes it */
6248 }
6249 }
6250 fp->FTW = fptag;
6251
6252 for(i = 0;i < 8; i++) {
6253 tmp = ST(i);
6254 helper_fstt_raw(tmp, &fp->regs[i].au8[0]);
6255 }
6256 }
6257}
6258
6259//*****************************************************************************
6260#undef lduw
6261#undef ldl
6262#undef ldq
6263#define lduw(a) *(uint16_t *)(a)
6264#define ldl(a) *(uint32_t *)(a)
6265#define ldq(a) *(uint64_t *)(a)
6266//*****************************************************************************
6267void save_raw_fp_state(CPUX86State *env, uint8_t *ptr)
6268{
6269 int i, fpus, fptag, nb_xmm_regs;
6270 CPU86_LDouble tmp;
6271 uint8_t *addr;
6272 int data64 = !!(env->hflags & HF_LMA_MASK); /* don't use HF_CS64_MASK here as cs hasn't been synced when this function is called. */
6273
6274 if (env->cpuid_features & CPUID_FXSR)
6275 {
6276 env->fpuc = lduw(ptr);
6277 fpus = lduw(ptr + 2);
6278 fptag = lduw(ptr + 4);
6279 env->fpstt = (fpus >> 11) & 7;
6280 env->fpus = fpus & ~0x3800;
6281 fptag ^= 0xff;
6282 for(i = 0;i < 8; i++) {
6283 env->fptags[i] = ((fptag >> i) & 1);
6284 }
6285
6286 addr = ptr + 0x20;
6287 for(i = 0;i < 8; i++) {
6288 tmp = helper_fldt_raw(addr);
6289 ST(i) = tmp;
6290 addr += 16;
6291 }
6292
6293 if (env->cr[4] & CR4_OSFXSR_MASK) {
6294 /* XXX: finish it, endianness */
6295 env->mxcsr = ldl(ptr + 0x18);
6296 //ldl(ptr + 0x1c);
6297 nb_xmm_regs = 8 << data64;
6298 addr = ptr + 0xa0;
6299 for(i = 0; i < nb_xmm_regs; i++) {
6300#if HC_ARCH_BITS == 32
6301 /* this is a workaround for http://gcc.gnu.org/bugzilla/show_bug.cgi?id=35135 */
6302 env->xmm_regs[i].XMM_L(0) = ldl(addr);
6303 env->xmm_regs[i].XMM_L(1) = ldl(addr + 4);
6304 env->xmm_regs[i].XMM_L(2) = ldl(addr + 8);
6305 env->xmm_regs[i].XMM_L(3) = ldl(addr + 12);
6306#else
6307 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
6308 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
6309#endif
6310 addr += 16;
6311 }
6312 }
6313 }
6314 else
6315 {
6316 PX86FPUSTATE fp = (PX86FPUSTATE)ptr;
6317 int fptag, j;
6318
6319 env->fpuc = fp->FCW;
6320 env->fpstt = (fp->FSW >> 11) & 7;
6321 env->fpus = fp->FSW & ~0x3800;
6322 fptag = fp->FTW;
6323 for(i = 0;i < 8; i++) {
6324 env->fptags[i] = ((fptag & 3) == 3);
6325 fptag >>= 2;
6326 }
6327 j = env->fpstt;
6328 for(i = 0;i < 8; i++) {
6329 tmp = helper_fldt_raw(&fp->regs[i].au8[0]);
6330 ST(i) = tmp;
6331 }
6332 }
6333}
6334//*****************************************************************************
6335//*****************************************************************************
6336
6337#endif /* VBOX */
6338
6339/* Secure Virtual Machine helpers */
6340
6341#if defined(CONFIG_USER_ONLY)
6342
6343void helper_vmrun(int aflag, int next_eip_addend)
6344{
6345}
6346void helper_vmmcall(void)
6347{
6348}
6349void helper_vmload(int aflag)
6350{
6351}
6352void helper_vmsave(int aflag)
6353{
6354}
6355void helper_stgi(void)
6356{
6357}
6358void helper_clgi(void)
6359{
6360}
6361void helper_skinit(void)
6362{
6363}
6364void helper_invlpga(int aflag)
6365{
6366}
6367void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
6368{
6369}
6370void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
6371{
6372}
6373
6374void helper_svm_check_io(uint32_t port, uint32_t param,
6375 uint32_t next_eip_addend)
6376{
6377}
6378#else
6379
6380static inline void svm_save_seg(target_phys_addr_t addr,
6381 const SegmentCache *sc)
6382{
6383 stw_phys(addr + offsetof(struct vmcb_seg, selector),
6384 sc->selector);
6385 stq_phys(addr + offsetof(struct vmcb_seg, base),
6386 sc->base);
6387 stl_phys(addr + offsetof(struct vmcb_seg, limit),
6388 sc->limit);
6389 stw_phys(addr + offsetof(struct vmcb_seg, attrib),
6390 ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
6391}
6392
6393static inline void svm_load_seg(target_phys_addr_t addr, SegmentCache *sc)
6394{
6395 unsigned int flags;
6396
6397 sc->selector = lduw_phys(addr + offsetof(struct vmcb_seg, selector));
6398 sc->base = ldq_phys(addr + offsetof(struct vmcb_seg, base));
6399 sc->limit = ldl_phys(addr + offsetof(struct vmcb_seg, limit));
6400 flags = lduw_phys(addr + offsetof(struct vmcb_seg, attrib));
6401 sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
6402}
6403
6404static inline void svm_load_seg_cache(target_phys_addr_t addr,
6405 CPUState *env, int seg_reg)
6406{
6407 SegmentCache sc1, *sc = &sc1;
6408 svm_load_seg(addr, sc);
6409 cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
6410 sc->base, sc->limit, sc->flags);
6411}
6412
6413void helper_vmrun(int aflag, int next_eip_addend)
6414{
6415 target_ulong addr;
6416 uint32_t event_inj;
6417 uint32_t int_ctl;
6418
6419 helper_svm_check_intercept_param(SVM_EXIT_VMRUN, 0);
6420
6421 if (aflag == 2)
6422 addr = EAX;
6423 else
6424 addr = (uint32_t)EAX;
6425
6426 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr);
6427
6428 env->vm_vmcb = addr;
6429
6430 /* save the current CPU state in the hsave page */
6431 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
6432 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
6433
6434 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
6435 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
6436
6437 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
6438 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
6439 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
6440 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
6441 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
6442 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
6443
6444 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
6445 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
6446
6447 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.es),
6448 &env->segs[R_ES]);
6449 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.cs),
6450 &env->segs[R_CS]);
6451 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ss),
6452 &env->segs[R_SS]);
6453 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ds),
6454 &env->segs[R_DS]);
6455
6456 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip),
6457 EIP + next_eip_addend);
6458 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
6459 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
6460
6461 /* load the interception bitmaps so we do not need to access the
6462 vmcb in svm mode */
6463 env->intercept = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept));
6464 env->intercept_cr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
6465 env->intercept_cr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
6466 env->intercept_dr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
6467 env->intercept_dr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
6468 env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
6469
6470 /* enable intercepts */
6471 env->hflags |= HF_SVMI_MASK;
6472
6473 env->tsc_offset = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.tsc_offset));
6474
6475 env->gdt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
6476 env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
6477
6478 env->idt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
6479 env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
6480
6481 /* clear exit_info_2 so we behave like the real hardware */
6482 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
6483
6484 cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
6485 cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
6486 cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
6487 env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
6488 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
6489 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
6490 if (int_ctl & V_INTR_MASKING_MASK) {
6491 env->v_tpr = int_ctl & V_TPR_MASK;
6492 env->hflags2 |= HF2_VINTR_MASK;
6493 if (env->eflags & IF_MASK)
6494 env->hflags2 |= HF2_HIF_MASK;
6495 }
6496
6497 cpu_load_efer(env,
6498 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer)));
6499 env->eflags = 0;
6500 load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
6501 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
6502 CC_OP = CC_OP_EFLAGS;
6503
6504 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.es),
6505 env, R_ES);
6506 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.cs),
6507 env, R_CS);
6508 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ss),
6509 env, R_SS);
6510 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ds),
6511 env, R_DS);
6512
6513 EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
6514 env->eip = EIP;
6515 ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
6516 EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
6517 env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
6518 env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
6519 cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
6520
6521 /* FIXME: guest state consistency checks */
6522
6523 switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
6524 case TLB_CONTROL_DO_NOTHING:
6525 break;
6526 case TLB_CONTROL_FLUSH_ALL_ASID:
6527 /* FIXME: this is not 100% correct but should work for now */
6528 tlb_flush(env, 1);
6529 break;
6530 }
6531
6532 env->hflags2 |= HF2_GIF_MASK;
6533
6534 if (int_ctl & V_IRQ_MASK) {
6535 env->interrupt_request |= CPU_INTERRUPT_VIRQ;
6536 }
6537
6538 /* maybe we need to inject an event */
6539 event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
6540 if (event_inj & SVM_EVTINJ_VALID) {
6541 uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
6542 uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
6543 uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
6544
6545 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
6546 /* FIXME: need to implement valid_err */
6547 switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
6548 case SVM_EVTINJ_TYPE_INTR:
6549 env->exception_index = vector;
6550 env->error_code = event_inj_err;
6551 env->exception_is_int = 0;
6552 env->exception_next_eip = -1;
6553 qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
6554 /* XXX: is it always correct ? */
6555 do_interrupt(vector, 0, 0, 0, 1);
6556 break;
6557 case SVM_EVTINJ_TYPE_NMI:
6558 env->exception_index = EXCP02_NMI;
6559 env->error_code = event_inj_err;
6560 env->exception_is_int = 0;
6561 env->exception_next_eip = EIP;
6562 qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
6563 cpu_loop_exit();
6564 break;
6565 case SVM_EVTINJ_TYPE_EXEPT:
6566 env->exception_index = vector;
6567 env->error_code = event_inj_err;
6568 env->exception_is_int = 0;
6569 env->exception_next_eip = -1;
6570 qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
6571 cpu_loop_exit();
6572 break;
6573 case SVM_EVTINJ_TYPE_SOFT:
6574 env->exception_index = vector;
6575 env->error_code = event_inj_err;
6576 env->exception_is_int = 1;
6577 env->exception_next_eip = EIP;
6578 qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
6579 cpu_loop_exit();
6580 break;
6581 }
6582 qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", env->exception_index, env->error_code);
6583 }
6584}
6585
6586void helper_vmmcall(void)
6587{
6588 helper_svm_check_intercept_param(SVM_EXIT_VMMCALL, 0);
6589 raise_exception(EXCP06_ILLOP);
6590}
6591
6592void helper_vmload(int aflag)
6593{
6594 target_ulong addr;
6595 helper_svm_check_intercept_param(SVM_EXIT_VMLOAD, 0);
6596
6597 if (aflag == 2)
6598 addr = EAX;
6599 else
6600 addr = (uint32_t)EAX;
6601
6602 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
6603 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
6604 env->segs[R_FS].base);
6605
6606 svm_load_seg_cache(addr + offsetof(struct vmcb, save.fs),
6607 env, R_FS);
6608 svm_load_seg_cache(addr + offsetof(struct vmcb, save.gs),
6609 env, R_GS);
6610 svm_load_seg(addr + offsetof(struct vmcb, save.tr),
6611 &env->tr);
6612 svm_load_seg(addr + offsetof(struct vmcb, save.ldtr),
6613 &env->ldt);
6614
6615#ifdef TARGET_X86_64
6616 env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
6617 env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
6618 env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
6619 env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
6620#endif
6621 env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
6622 env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
6623 env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp));
6624 env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip));
6625}
6626
6627void helper_vmsave(int aflag)
6628{
6629 target_ulong addr;
6630 helper_svm_check_intercept_param(SVM_EXIT_VMSAVE, 0);
6631
6632 if (aflag == 2)
6633 addr = EAX;
6634 else
6635 addr = (uint32_t)EAX;
6636
6637 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
6638 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
6639 env->segs[R_FS].base);
6640
6641 svm_save_seg(addr + offsetof(struct vmcb, save.fs),
6642 &env->segs[R_FS]);
6643 svm_save_seg(addr + offsetof(struct vmcb, save.gs),
6644 &env->segs[R_GS]);
6645 svm_save_seg(addr + offsetof(struct vmcb, save.tr),
6646 &env->tr);
6647 svm_save_seg(addr + offsetof(struct vmcb, save.ldtr),
6648 &env->ldt);
6649
6650#ifdef TARGET_X86_64
6651 stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
6652 stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
6653 stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
6654 stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
6655#endif
6656 stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
6657 stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
6658 stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp);
6659 stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
6660}
6661
6662void helper_stgi(void)
6663{
6664 helper_svm_check_intercept_param(SVM_EXIT_STGI, 0);
6665 env->hflags2 |= HF2_GIF_MASK;
6666}
6667
6668void helper_clgi(void)
6669{
6670 helper_svm_check_intercept_param(SVM_EXIT_CLGI, 0);
6671 env->hflags2 &= ~HF2_GIF_MASK;
6672}
6673
6674void helper_skinit(void)
6675{
6676 helper_svm_check_intercept_param(SVM_EXIT_SKINIT, 0);
6677 /* XXX: not implemented */
6678 raise_exception(EXCP06_ILLOP);
6679}
6680
6681void helper_invlpga(int aflag)
6682{
6683 target_ulong addr;
6684 helper_svm_check_intercept_param(SVM_EXIT_INVLPGA, 0);
6685
6686 if (aflag == 2)
6687 addr = EAX;
6688 else
6689 addr = (uint32_t)EAX;
6690
6691 /* XXX: could use the ASID to see if it is needed to do the
6692 flush */
6693 tlb_flush_page(env, addr);
6694}
6695
6696void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
6697{
6698 if (likely(!(env->hflags & HF_SVMI_MASK)))
6699 return;
6700#ifndef VBOX
6701 switch(type) {
6702 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
6703 if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
6704 helper_vmexit(type, param);
6705 }
6706 break;
6707 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
6708 if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
6709 helper_vmexit(type, param);
6710 }
6711 break;
6712 case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
6713 if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
6714 helper_vmexit(type, param);
6715 }
6716 break;
6717 case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
6718 if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
6719 helper_vmexit(type, param);
6720 }
6721 break;
6722 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
6723 if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
6724 helper_vmexit(type, param);
6725 }
6726 break;
6727 case SVM_EXIT_MSR:
6728 if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
6729 /* FIXME: this should be read in at vmrun (faster this way?) */
6730 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
6731 uint32_t t0, t1;
6732 switch((uint32_t)ECX) {
6733 case 0 ... 0x1fff:
6734 t0 = (ECX * 2) % 8;
6735 t1 = ECX / 8;
6736 break;
6737 case 0xc0000000 ... 0xc0001fff:
6738 t0 = (8192 + ECX - 0xc0000000) * 2;
6739 t1 = (t0 / 8);
6740 t0 %= 8;
6741 break;
6742 case 0xc0010000 ... 0xc0011fff:
6743 t0 = (16384 + ECX - 0xc0010000) * 2;
6744 t1 = (t0 / 8);
6745 t0 %= 8;
6746 break;
6747 default:
6748 helper_vmexit(type, param);
6749 t0 = 0;
6750 t1 = 0;
6751 break;
6752 }
6753 if (ldub_phys(addr + t1) & ((1 << param) << t0))
6754 helper_vmexit(type, param);
6755 }
6756 break;
6757 default:
6758 if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
6759 helper_vmexit(type, param);
6760 }
6761 break;
6762 }
6763#else /* VBOX */
6764 AssertMsgFailed(("We shouldn't be here, HM supported differently!"));
6765#endif /* VBOX */
6766}
6767
6768void helper_svm_check_io(uint32_t port, uint32_t param,
6769 uint32_t next_eip_addend)
6770{
6771 if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
6772 /* FIXME: this should be read in at vmrun (faster this way?) */
6773 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
6774 uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
6775 if(lduw_phys(addr + port / 8) & (mask << (port & 7))) {
6776 /* next EIP */
6777 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
6778 env->eip + next_eip_addend);
6779 helper_vmexit(SVM_EXIT_IOIO, param | (port << 16));
6780 }
6781 }
6782}
6783
6784/* Note: currently only 32 bits of exit_code are used */
6785void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
6786{
6787 uint32_t int_ctl;
6788
6789 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
6790 exit_code, exit_info_1,
6791 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)),
6792 EIP);
6793
6794 if(env->hflags & HF_INHIBIT_IRQ_MASK) {
6795 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK);
6796 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
6797 } else {
6798 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
6799 }
6800
6801 /* Save the VM state in the vmcb */
6802 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.es),
6803 &env->segs[R_ES]);
6804 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.cs),
6805 &env->segs[R_CS]);
6806 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ss),
6807 &env->segs[R_SS]);
6808 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ds),
6809 &env->segs[R_DS]);
6810
6811 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
6812 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
6813
6814 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base);
6815 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
6816
6817 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
6818 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
6819 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
6820 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
6821 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
6822
6823 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
6824 int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
6825 int_ctl |= env->v_tpr & V_TPR_MASK;
6826 if (env->interrupt_request & CPU_INTERRUPT_VIRQ)
6827 int_ctl |= V_IRQ_MASK;
6828 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
6829
6830 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags());
6831 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
6832 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
6833 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
6834 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
6835 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
6836 stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK);
6837
6838 /* Reload the host state from vm_hsave */
6839 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
6840 env->hflags &= ~HF_SVMI_MASK;
6841 env->intercept = 0;
6842 env->intercept_exceptions = 0;
6843 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
6844 env->tsc_offset = 0;
6845
6846 env->gdt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base));
6847 env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit));
6848
6849 env->idt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base));
6850 env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit));
6851
6852 cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK);
6853 cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4)));
6854 cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3)));
6855 /* we need to set the efer after the crs so the hidden flags get
6856 set properly */
6857 cpu_load_efer(env,
6858 ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer)));
6859 env->eflags = 0;
6860 load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
6861 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
6862 CC_OP = CC_OP_EFLAGS;
6863
6864 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.es),
6865 env, R_ES);
6866 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.cs),
6867 env, R_CS);
6868 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ss),
6869 env, R_SS);
6870 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ds),
6871 env, R_DS);
6872
6873 EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
6874 ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
6875 EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
6876
6877 env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
6878 env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
6879
6880 /* other setups */
6881 cpu_x86_set_cpl(env, 0);
6882 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
6883 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
6884
6885 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info),
6886 ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj)));
6887 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err),
6888 ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err)));
6889 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 0);
6890
6891 env->hflags2 &= ~HF2_GIF_MASK;
6892 /* FIXME: Resets the current ASID register to zero (host ASID). */
6893
6894 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
6895
6896 /* Clears the TSC_OFFSET inside the processor. */
6897
6898 /* If the host is in PAE mode, the processor reloads the host's PDPEs
6899 from the page table indicated the host's CR3. If the PDPEs contain
6900 illegal state, the processor causes a shutdown. */
6901
6902 /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
6903 env->cr[0] |= CR0_PE_MASK;
6904 env->eflags &= ~VM_MASK;
6905
6906 /* Disables all breakpoints in the host DR7 register. */
6907
6908 /* Checks the reloaded host state for consistency. */
6909
6910 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
6911 host's code segment or non-canonical (in the case of long mode), a
6912 #GP fault is delivered inside the host.) */
6913
6914 /* remove any pending exception */
6915 env->exception_index = -1;
6916 env->error_code = 0;
6917 env->old_exception = -1;
6918
6919 cpu_loop_exit();
6920}
6921
6922#endif
6923
6924/* MMX/SSE */
6925/* XXX: optimize by storing fptt and fptags in the static cpu state */
6926void helper_enter_mmx(void)
6927{
6928 env->fpstt = 0;
6929 *(uint32_t *)(env->fptags) = 0;
6930 *(uint32_t *)(env->fptags + 4) = 0;
6931}
6932
6933void helper_emms(void)
6934{
6935 /* set to empty state */
6936 *(uint32_t *)(env->fptags) = 0x01010101;
6937 *(uint32_t *)(env->fptags + 4) = 0x01010101;
6938}
6939
6940/* XXX: suppress */
6941void helper_movq(void *d, void *s)
6942{
6943 *(uint64_t *)d = *(uint64_t *)s;
6944}
6945
6946#define SHIFT 0
6947#include "ops_sse.h"
6948
6949#define SHIFT 1
6950#include "ops_sse.h"
6951
6952#define SHIFT 0
6953#include "helper_template.h"
6954#undef SHIFT
6955
6956#define SHIFT 1
6957#include "helper_template.h"
6958#undef SHIFT
6959
6960#define SHIFT 2
6961#include "helper_template.h"
6962#undef SHIFT
6963
6964#ifdef TARGET_X86_64
6965
6966#define SHIFT 3
6967#include "helper_template.h"
6968#undef SHIFT
6969
6970#endif
6971
6972/* bit operations */
6973target_ulong helper_bsf(target_ulong t0)
6974{
6975 int count;
6976 target_ulong res;
6977
6978 res = t0;
6979 count = 0;
6980 while ((res & 1) == 0) {
6981 count++;
6982 res >>= 1;
6983 }
6984 return count;
6985}
6986
6987target_ulong helper_lzcnt(target_ulong t0, int wordsize)
6988{
6989 int count;
6990 target_ulong res, mask;
6991
6992 if (wordsize > 0 && t0 == 0) {
6993 return wordsize;
6994 }
6995 res = t0;
6996 count = TARGET_LONG_BITS - 1;
6997 mask = (target_ulong)1 << (TARGET_LONG_BITS - 1);
6998 while ((res & mask) == 0) {
6999 count--;
7000 res <<= 1;
7001 }
7002 if (wordsize > 0) {
7003 return wordsize - 1 - count;
7004 }
7005 return count;
7006}
7007
7008target_ulong helper_bsr(target_ulong t0)
7009{
7010 return helper_lzcnt(t0, 0);
7011}
7012
7013static int compute_all_eflags(void)
7014{
7015 return CC_SRC;
7016}
7017
7018static int compute_c_eflags(void)
7019{
7020 return CC_SRC & CC_C;
7021}
7022
7023uint32_t helper_cc_compute_all(int op)
7024{
7025 switch (op) {
7026 default: /* should never happen */ return 0;
7027
7028 case CC_OP_EFLAGS: return compute_all_eflags();
7029
7030 case CC_OP_MULB: return compute_all_mulb();
7031 case CC_OP_MULW: return compute_all_mulw();
7032 case CC_OP_MULL: return compute_all_mull();
7033
7034 case CC_OP_ADDB: return compute_all_addb();
7035 case CC_OP_ADDW: return compute_all_addw();
7036 case CC_OP_ADDL: return compute_all_addl();
7037
7038 case CC_OP_ADCB: return compute_all_adcb();
7039 case CC_OP_ADCW: return compute_all_adcw();
7040 case CC_OP_ADCL: return compute_all_adcl();
7041
7042 case CC_OP_SUBB: return compute_all_subb();
7043 case CC_OP_SUBW: return compute_all_subw();
7044 case CC_OP_SUBL: return compute_all_subl();
7045
7046 case CC_OP_SBBB: return compute_all_sbbb();
7047 case CC_OP_SBBW: return compute_all_sbbw();
7048 case CC_OP_SBBL: return compute_all_sbbl();
7049
7050 case CC_OP_LOGICB: return compute_all_logicb();
7051 case CC_OP_LOGICW: return compute_all_logicw();
7052 case CC_OP_LOGICL: return compute_all_logicl();
7053
7054 case CC_OP_INCB: return compute_all_incb();
7055 case CC_OP_INCW: return compute_all_incw();
7056 case CC_OP_INCL: return compute_all_incl();
7057
7058 case CC_OP_DECB: return compute_all_decb();
7059 case CC_OP_DECW: return compute_all_decw();
7060 case CC_OP_DECL: return compute_all_decl();
7061
7062 case CC_OP_SHLB: return compute_all_shlb();
7063 case CC_OP_SHLW: return compute_all_shlw();
7064 case CC_OP_SHLL: return compute_all_shll();
7065
7066 case CC_OP_SARB: return compute_all_sarb();
7067 case CC_OP_SARW: return compute_all_sarw();
7068 case CC_OP_SARL: return compute_all_sarl();
7069
7070#ifdef TARGET_X86_64
7071 case CC_OP_MULQ: return compute_all_mulq();
7072
7073 case CC_OP_ADDQ: return compute_all_addq();
7074
7075 case CC_OP_ADCQ: return compute_all_adcq();
7076
7077 case CC_OP_SUBQ: return compute_all_subq();
7078
7079 case CC_OP_SBBQ: return compute_all_sbbq();
7080
7081 case CC_OP_LOGICQ: return compute_all_logicq();
7082
7083 case CC_OP_INCQ: return compute_all_incq();
7084
7085 case CC_OP_DECQ: return compute_all_decq();
7086
7087 case CC_OP_SHLQ: return compute_all_shlq();
7088
7089 case CC_OP_SARQ: return compute_all_sarq();
7090#endif
7091 }
7092}
7093
7094uint32_t helper_cc_compute_c(int op)
7095{
7096 switch (op) {
7097 default: /* should never happen */ return 0;
7098
7099 case CC_OP_EFLAGS: return compute_c_eflags();
7100
7101 case CC_OP_MULB: return compute_c_mull();
7102 case CC_OP_MULW: return compute_c_mull();
7103 case CC_OP_MULL: return compute_c_mull();
7104
7105 case CC_OP_ADDB: return compute_c_addb();
7106 case CC_OP_ADDW: return compute_c_addw();
7107 case CC_OP_ADDL: return compute_c_addl();
7108
7109 case CC_OP_ADCB: return compute_c_adcb();
7110 case CC_OP_ADCW: return compute_c_adcw();
7111 case CC_OP_ADCL: return compute_c_adcl();
7112
7113 case CC_OP_SUBB: return compute_c_subb();
7114 case CC_OP_SUBW: return compute_c_subw();
7115 case CC_OP_SUBL: return compute_c_subl();
7116
7117 case CC_OP_SBBB: return compute_c_sbbb();
7118 case CC_OP_SBBW: return compute_c_sbbw();
7119 case CC_OP_SBBL: return compute_c_sbbl();
7120
7121 case CC_OP_LOGICB: return compute_c_logicb();
7122 case CC_OP_LOGICW: return compute_c_logicw();
7123 case CC_OP_LOGICL: return compute_c_logicl();
7124
7125 case CC_OP_INCB: return compute_c_incl();
7126 case CC_OP_INCW: return compute_c_incl();
7127 case CC_OP_INCL: return compute_c_incl();
7128
7129 case CC_OP_DECB: return compute_c_incl();
7130 case CC_OP_DECW: return compute_c_incl();
7131 case CC_OP_DECL: return compute_c_incl();
7132
7133 case CC_OP_SHLB: return compute_c_shlb();
7134 case CC_OP_SHLW: return compute_c_shlw();
7135 case CC_OP_SHLL: return compute_c_shll();
7136
7137 case CC_OP_SARB: return compute_c_sarl();
7138 case CC_OP_SARW: return compute_c_sarl();
7139 case CC_OP_SARL: return compute_c_sarl();
7140
7141#ifdef TARGET_X86_64
7142 case CC_OP_MULQ: return compute_c_mull();
7143
7144 case CC_OP_ADDQ: return compute_c_addq();
7145
7146 case CC_OP_ADCQ: return compute_c_adcq();
7147
7148 case CC_OP_SUBQ: return compute_c_subq();
7149
7150 case CC_OP_SBBQ: return compute_c_sbbq();
7151
7152 case CC_OP_LOGICQ: return compute_c_logicq();
7153
7154 case CC_OP_INCQ: return compute_c_incl();
7155
7156 case CC_OP_DECQ: return compute_c_incl();
7157
7158 case CC_OP_SHLQ: return compute_c_shlq();
7159
7160 case CC_OP_SARQ: return compute_c_sarl();
7161#endif
7162 }
7163}
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette